mirror of
https://github.com/sqlite/sqlite.git
synced 2025-08-08 14:02:16 +03:00
Merge the latest trunk enhancements into the wal2 branch.
FossilOrigin-Name: 80e6ddd560b3041fe9164b940d684eeb6f28560a6c48b23ff49095da52e85df8
This commit is contained in:
@@ -2015,7 +2015,8 @@ void sqlite3Fts5ParseSetDistance(
|
||||
);
|
||||
return;
|
||||
}
|
||||
nNear = nNear * 10 + (p->p[i] - '0');
|
||||
if( nNear<214748363 ) nNear = nNear * 10 + (p->p[i] - '0');
|
||||
/* ^^^^^^^^^^^^^^^--- Prevent integer overflow */
|
||||
}
|
||||
}else{
|
||||
nNear = FTS5_DEFAULT_NEARDIST;
|
||||
|
@@ -60,8 +60,7 @@
|
||||
** step HIDDEN
|
||||
** );
|
||||
**
|
||||
** The virtual table also has a rowid, logically equivalent to n+1 where
|
||||
** "n" is the ascending integer in the aforesaid production definition.
|
||||
** The virtual table also has a rowid which is an alias for the value.
|
||||
**
|
||||
** Function arguments in queries against this virtual table are translated
|
||||
** into equality constraints against successive hidden columns. In other
|
||||
@@ -276,6 +275,7 @@ static int seriesConnect(
|
||||
int rc;
|
||||
|
||||
/* Column numbers */
|
||||
#define SERIES_COLUMN_ROWID (-1)
|
||||
#define SERIES_COLUMN_VALUE 0
|
||||
#define SERIES_COLUMN_START 1
|
||||
#define SERIES_COLUMN_STOP 2
|
||||
@@ -363,13 +363,11 @@ static int seriesColumn(
|
||||
#endif
|
||||
|
||||
/*
|
||||
** Return the rowid for the current row, logically equivalent to n+1 where
|
||||
** "n" is the ascending integer in the aforesaid production definition.
|
||||
** The rowid is the same as the value.
|
||||
*/
|
||||
static int seriesRowid(sqlite3_vtab_cursor *cur, sqlite_int64 *pRowid){
|
||||
series_cursor *pCur = (series_cursor*)cur;
|
||||
sqlite3_uint64 n = pCur->ss.uSeqIndexNow;
|
||||
*pRowid = (sqlite3_int64)((n<LARGEST_UINT64)? n+1 : 0);
|
||||
*pRowid = pCur->ss.iValueNow;
|
||||
return SQLITE_OK;
|
||||
}
|
||||
|
||||
@@ -657,7 +655,10 @@ static int seriesBestIndex(
|
||||
continue;
|
||||
}
|
||||
if( pConstraint->iColumn<SERIES_COLUMN_START ){
|
||||
if( pConstraint->iColumn==SERIES_COLUMN_VALUE && pConstraint->usable ){
|
||||
if( (pConstraint->iColumn==SERIES_COLUMN_VALUE ||
|
||||
pConstraint->iColumn==SERIES_COLUMN_ROWID)
|
||||
&& pConstraint->usable
|
||||
){
|
||||
switch( op ){
|
||||
case SQLITE_INDEX_CONSTRAINT_EQ:
|
||||
case SQLITE_INDEX_CONSTRAINT_IS: {
|
||||
|
@@ -79,6 +79,48 @@ globalThis.sqlite3ApiBootstrap.initializers.push(function(sqlite3){
|
||||
capi.SQLITE_OPEN_MAIN_JOURNAL |
|
||||
capi.SQLITE_OPEN_SUPER_JOURNAL |
|
||||
capi.SQLITE_OPEN_WAL;
|
||||
const FLAG_COMPUTE_DIGEST_V2 = capi.SQLITE_OPEN_MEMORY
|
||||
/* Part of the fix for
|
||||
https://github.com/sqlite/sqlite-wasm/issues/97
|
||||
|
||||
Summary: prior to version 3.50.0 computeDigest() always computes
|
||||
a value of [0,0] due to overflows, so it does not do anything
|
||||
useful. Fixing it invalidates old persistent files, so we
|
||||
instead only fix it for files created or updated since the bug
|
||||
was discovered and fixed.
|
||||
|
||||
This flag determines whether we use the broken legacy
|
||||
computeDigest() or the v2 variant. We only use this flag for
|
||||
newly-created/overwritten files. Pre-existing files have the
|
||||
broken digest stored in them so need to continue to use that.
|
||||
|
||||
What this means, in terms of db file compatibility between
|
||||
versions:
|
||||
|
||||
- DBs created with versions older than this fix (<3.50.0)
|
||||
can be read by post-fix versions. Such DBs which are written
|
||||
to in-place (not replaced) by newer versions can still be read
|
||||
by older versions, as the affected digest is only modified
|
||||
when the SAH slot is assigned to a given filename.
|
||||
|
||||
- DBs created with post-fix versions will, when read by a pre-fix
|
||||
version, be seen as having a "bad digest" and will be
|
||||
unceremoniously replaced by that pre-fix version. When swapping
|
||||
back to a post-fix version, that version will see that the file
|
||||
entry is missing the FLAG_COMPUTE_DIGEST_V2 bit so will treat it
|
||||
as a legacy file.
|
||||
|
||||
This flag is stored in the same memory as the various
|
||||
SQLITE_OPEN_... flags and we must be careful here to not use a
|
||||
flag bit which is otherwise relevant for the VFS.
|
||||
SQLITE_OPEN_MEMORY is handled by sqlite3_open_v2() and friends,
|
||||
not the VFS, so we'll repurpose that one. If we take a
|
||||
currently-unused bit and it ends up, at some later point, being
|
||||
used, we would have to invalidate existing VFS files in order to
|
||||
move to another bit. Similarly, if the SQLITE_OPEN_MEMORY bit
|
||||
were ever reassigned (which it won't be!), we'd invalidate all
|
||||
VFS-side files.
|
||||
*/;
|
||||
|
||||
/** Subdirectory of the VFS's space where "opaque" (randomly-named)
|
||||
files are stored. Changing this effectively invalidates the data
|
||||
@@ -329,6 +371,7 @@ globalThis.sqlite3ApiBootstrap.initializers.push(function(sqlite3){
|
||||
xOpen: function f(pVfs, zName, pFile, flags, pOutFlags){
|
||||
const pool = getPoolForVfs(pVfs);
|
||||
try{
|
||||
flags &= ~FLAG_COMPUTE_DIGEST_V2;
|
||||
pool.log(`xOpen ${wasm.cstrToJs(zName)} ${flags}`);
|
||||
// First try to open a path that already exists in the file system.
|
||||
const path = (zName && wasm.peek8(zName))
|
||||
@@ -624,7 +667,8 @@ globalThis.sqlite3ApiBootstrap.initializers.push(function(sqlite3){
|
||||
|
||||
const fileDigest = new Uint32Array(HEADER_DIGEST_SIZE / 4);
|
||||
sah.read(fileDigest, {at: HEADER_OFFSET_DIGEST});
|
||||
const compDigest = this.computeDigest(this.#apBody);
|
||||
const compDigest = this.computeDigest(this.#apBody, flags);
|
||||
//warn("getAssociatedPath() flags",'0x'+flags.toString(16), "compDigest", compDigest);
|
||||
if(fileDigest.every((v,i) => v===compDigest[i])){
|
||||
// Valid digest
|
||||
const pathBytes = this.#apBody.findIndex((v)=>0===v);
|
||||
@@ -633,6 +677,7 @@ globalThis.sqlite3ApiBootstrap.initializers.push(function(sqlite3){
|
||||
// leaving stale db data laying around.
|
||||
sah.truncate(HEADER_OFFSET_DATA);
|
||||
}
|
||||
//warn("getAssociatedPath() flags",'0x'+flags.toString(16), "compDigest", compDigest,"pathBytes",pathBytes);
|
||||
return pathBytes
|
||||
? textDecoder.decode(this.#apBody.subarray(0,pathBytes))
|
||||
: '';
|
||||
@@ -655,10 +700,17 @@ globalThis.sqlite3ApiBootstrap.initializers.push(function(sqlite3){
|
||||
if(HEADER_MAX_PATH_SIZE <= enc.written + 1/*NUL byte*/){
|
||||
toss("Path too long:",path);
|
||||
}
|
||||
if(path && flags){
|
||||
/* When creating or re-writing files, update their digest, if
|
||||
needed, to v2. We continue to use v1 for the (!path) case
|
||||
(empty files) because there's little reason not to use a
|
||||
digest of 0 for empty entries. */
|
||||
flags |= FLAG_COMPUTE_DIGEST_V2;
|
||||
}
|
||||
this.#apBody.fill(0, enc.written, HEADER_MAX_PATH_SIZE);
|
||||
this.#dvBody.setUint32(HEADER_OFFSET_FLAGS, flags);
|
||||
|
||||
const digest = this.computeDigest(this.#apBody);
|
||||
const digest = this.computeDigest(this.#apBody, flags);
|
||||
//console.warn("setAssociatedPath(",path,") digest",digest);
|
||||
sah.write(this.#apBody, {at: 0});
|
||||
sah.write(digest, {at: HEADER_OFFSET_DIGEST});
|
||||
sah.flush();
|
||||
@@ -679,15 +731,22 @@ globalThis.sqlite3ApiBootstrap.initializers.push(function(sqlite3){
|
||||
metadata for each file as a validation check. Changing this
|
||||
algorithm invalidates all existing databases for this VFS, so
|
||||
don't do that.
|
||||
|
||||
See the docs for FLAG_COMPUTE_DIGEST_V2 for more details.
|
||||
*/
|
||||
computeDigest(byteArray){
|
||||
let h1 = 0xdeadbeef;
|
||||
let h2 = 0x41c6ce57;
|
||||
for(const v of byteArray){
|
||||
h1 = 31 * h1 + (v * 307);
|
||||
h2 = 31 * h2 + (v * 307);
|
||||
computeDigest(byteArray, fileFlags){
|
||||
if( fileFlags & FLAG_COMPUTE_DIGEST_V2 ){
|
||||
let h1 = 0xdeadbeef;
|
||||
let h2 = 0x41c6ce57;
|
||||
for(const v of byteArray){
|
||||
h1 = Math.imul(h1 ^ v, 2654435761);
|
||||
h2 = Math.imul(h2 ^ v, 104729);
|
||||
}
|
||||
return new Uint32Array([h1>>>0, h2>>>0]);
|
||||
}else{
|
||||
/* this is what the buggy legacy computation worked out to */
|
||||
return new Uint32Array([0,0]);
|
||||
}
|
||||
return new Uint32Array([h1>>>0, h2>>>0]);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@@ -45,6 +45,7 @@
|
||||
** "sqlite3-wasmfs" build, only "esm" (ES6 Module) is legal.
|
||||
*/
|
||||
#define JS_BUILD_MODES vanilla esm bundler-friendly node
|
||||
/* Separator to help eyeballs find the different sections */
|
||||
static const char * zBanner =
|
||||
"\n########################################################################\n";
|
||||
|
||||
@@ -140,8 +141,8 @@ static void mk_prologue(void){
|
||||
** mk_lib_mode().
|
||||
**
|
||||
** Maintenance reminder: do not combine flags within this enum,
|
||||
** e.g. LIBMODE_BUNDLER_FRIEND=0x02|LIBMODE_ESM, as that will lead to
|
||||
** breakage in some of the flag checks.
|
||||
** e.g. LIBMODE_BUNDLER_FRIENDLY=0x02|LIBMODE_ESM, as that will lead
|
||||
** to breakage in some of the flag checks.
|
||||
*/
|
||||
enum LibModeFlags {
|
||||
/* Indicates an ESM module build. */
|
||||
@@ -208,7 +209,7 @@ static void mk_pre_post(const char *zName /* build name */,
|
||||
pf("$(eval $(call SQLITE.CALL.C-PP.FILTER,$(extern-post-js.js.in),$(extern-post-js.js.%s-%s),"
|
||||
"$(c-pp.D.%s-%s)))\n", zNM, zNM);
|
||||
|
||||
/* Combine flags for use with emcc... */
|
||||
/* Combined flags for use with emcc... */
|
||||
pf("pre-post-common.flags.%s-%s := "
|
||||
"$(pre-post-common.flags) "
|
||||
"--post-js=$(post-js.js.%s-%s) "
|
||||
|
@@ -3506,7 +3506,7 @@ globalThis.sqlite3InitModule = sqlite3InitModule;
|
||||
});
|
||||
db.exec([
|
||||
"create table t(a);",
|
||||
"insert into t(a) values(1),(2),(3);",
|
||||
"insert into t(a) values(1),(2),(1);",
|
||||
"select auxtest(1,a), auxtest(1,a) from t order by a"
|
||||
]);
|
||||
}finally{
|
||||
|
94
ext/wasm/tests/opfs/sahpool/digest-worker.js
Normal file
94
ext/wasm/tests/opfs/sahpool/digest-worker.js
Normal file
@@ -0,0 +1,94 @@
|
||||
/*
|
||||
2025-01-31
|
||||
|
||||
The author disclaims copyright to this source code. In place of a
|
||||
legal notice, here is a blessing:
|
||||
|
||||
* May you do good and not evil.
|
||||
* May you find forgiveness for yourself and forgive others.
|
||||
* May you share freely, never taking more than you give.
|
||||
|
||||
***********************************************************************
|
||||
|
||||
This file is part of testing the OPFS SAHPool VFS's computeDigest()
|
||||
fix. See ./digest.html for the details.
|
||||
*/
|
||||
const clog = console.log.bind(console);
|
||||
const wPost = (type,...args)=>postMessage({type, payload:args});
|
||||
const log = (...args)=>{
|
||||
clog("Worker:",...args);
|
||||
wPost('log',...args);
|
||||
}
|
||||
|
||||
const hasOpfs = ()=>{
|
||||
return globalThis.FileSystemHandle
|
||||
&& globalThis.FileSystemDirectoryHandle
|
||||
&& globalThis.FileSystemFileHandle
|
||||
&& globalThis.FileSystemFileHandle.prototype.createSyncAccessHandle
|
||||
&& navigator?.storage?.getDirectory;
|
||||
};
|
||||
if( !hasOpfs() ){
|
||||
wPost('error',"OPFS not detected");
|
||||
throw new Error("OPFS not detected");
|
||||
}
|
||||
|
||||
clog("Importing sqlite3...");
|
||||
const searchParams = new URL(self.location.href).searchParams;
|
||||
importScripts(searchParams.get('sqlite3.dir') + '/sqlite3.js');
|
||||
|
||||
const runTests = function(sqlite3, poolUtil){
|
||||
const fname = '/my.db';
|
||||
let db = new poolUtil.OpfsSAHPoolDb(fname);
|
||||
let n = (new Date()).valueOf();
|
||||
try {
|
||||
db.exec([
|
||||
"create table if not exists t(a);"
|
||||
]);
|
||||
db.exec({
|
||||
sql: "insert into t(a) values(?)",
|
||||
bind: n++
|
||||
});
|
||||
log(fname,"record count: ",db.selectValue("select count(*) from t"));
|
||||
}finally{
|
||||
db.close();
|
||||
}
|
||||
|
||||
db = new poolUtil.OpfsSAHPoolDb(fname);
|
||||
try {
|
||||
db.exec({
|
||||
sql: "insert into t(a) values(?)",
|
||||
bind: n++
|
||||
});
|
||||
log(fname,"record count: ",db.selectValue("select count(*) from t"));
|
||||
}finally{
|
||||
db.close();
|
||||
}
|
||||
|
||||
const fname2 = '/my2.db';
|
||||
db = new poolUtil.OpfsSAHPoolDb(fname2);
|
||||
try {
|
||||
db.exec([
|
||||
"create table if not exists t(a);"
|
||||
]);
|
||||
db.exec({
|
||||
sql: "insert into t(a) values(?)",
|
||||
bind: n++
|
||||
});
|
||||
log(fname2,"record count: ",db.selectValue("select count(*) from t"));
|
||||
}finally{
|
||||
db.close();
|
||||
}
|
||||
};
|
||||
|
||||
globalThis.sqlite3InitModule().then(async function(sqlite3){
|
||||
log("sqlite3 version:",sqlite3.version);
|
||||
const sahPoolConfig = {
|
||||
name: 'opfs-sahpool-digest',
|
||||
clearOnInit: false,
|
||||
initialCapacity: 6
|
||||
};
|
||||
return sqlite3.installOpfsSAHPoolVfs(sahPoolConfig).then(poolUtil=>{
|
||||
log('vfs acquired');
|
||||
runTests(sqlite3, poolUtil);
|
||||
});
|
||||
});
|
151
ext/wasm/tests/opfs/sahpool/digest.html
Normal file
151
ext/wasm/tests/opfs/sahpool/digest.html
Normal file
@@ -0,0 +1,151 @@
|
||||
<!doctype html>
|
||||
<html lang="en-us">
|
||||
<head>
|
||||
<meta charset="utf-8">
|
||||
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
|
||||
<link rel="shortcut icon" href="data:image/x-icon;," type="image/x-icon">
|
||||
<link rel="stylesheet" href="../../../common/emscripten.css"/>
|
||||
<link rel="stylesheet" href="../../../common/testing.css"/>
|
||||
<title>sqlite3 tester: OpfsSAHPool Digest</title>
|
||||
<style></style>
|
||||
</head>
|
||||
<body><h1 id='color-target'></h1>
|
||||
|
||||
<p>
|
||||
This is a test app for the digest calculation of the OPFS
|
||||
SAHPool VFS. It requires running it with a new database created using
|
||||
v3.49.0 or older, then running it again with a newer version, then
|
||||
again with 3.49.0 or older.
|
||||
</p>
|
||||
<div class='input-wrapper'>
|
||||
<input type='checkbox' id='cb-log-reverse'>
|
||||
<label for='cb-log-reverse'>Reverse log order?</label>
|
||||
</div>
|
||||
<div id='test-output'></div>
|
||||
<script>
|
||||
/*
|
||||
2025-02-03
|
||||
|
||||
The author disclaims copyright to this source code. In place of a
|
||||
legal notice, here is a blessing:
|
||||
|
||||
* May you do good and not evil.
|
||||
* May you find forgiveness for yourself and forgive others.
|
||||
* May you share freely, never taking more than you give.
|
||||
|
||||
***********************************************************************
|
||||
|
||||
This is a bugfix test for the OPFS SAHPool VFS. It requires
|
||||
setting up a database created using v3.49.0 or older, then
|
||||
running it again with a newer version. In that case, the newer
|
||||
version should be able to read the older version's db files
|
||||
just fine. Revering back to the old version should also still
|
||||
work - it should be able to read databases modified by the
|
||||
newer version. However, a database _created_ by a version with
|
||||
this fix will _not_ be legible by a version which predates
|
||||
this fix, in which case the older version will see that VFS
|
||||
file slot as corrupt and will clear it for re-use.
|
||||
|
||||
This is unfortunately rather cumbersome to test properly,
|
||||
and essentially impossible to automate.
|
||||
*/
|
||||
(function(){
|
||||
'use strict';
|
||||
document.querySelector('h1').innerHTML =
|
||||
document.querySelector('title').innerHTML;
|
||||
const mapToString = (v)=>{
|
||||
switch(typeof v){
|
||||
case 'number': case 'string': case 'boolean':
|
||||
case 'undefined': case 'bigint':
|
||||
return ''+v;
|
||||
default: break;
|
||||
}
|
||||
if(null===v) return 'null';
|
||||
if(v instanceof Error){
|
||||
v = {
|
||||
message: v.message,
|
||||
stack: v.stack,
|
||||
errorClass: v.name
|
||||
};
|
||||
}
|
||||
return JSON.stringify(v,undefined,2);
|
||||
};
|
||||
const normalizeArgs = (args)=>args.map(mapToString);
|
||||
const logTarget = document.querySelector('#test-output');
|
||||
const logClass = function(cssClass,...args){
|
||||
const ln = document.createElement('div');
|
||||
if(cssClass){
|
||||
for(const c of (Array.isArray(cssClass) ? cssClass : [cssClass])){
|
||||
ln.classList.add(c);
|
||||
}
|
||||
}
|
||||
ln.append(document.createTextNode(normalizeArgs(args).join(' ')));
|
||||
logTarget.append(ln);
|
||||
};
|
||||
const cbReverse = document.querySelector('#cb-log-reverse');
|
||||
//cbReverse.setAttribute('checked','checked');
|
||||
const cbReverseKey = 'tester1:cb-log-reverse';
|
||||
const cbReverseIt = ()=>{
|
||||
logTarget.classList[cbReverse.checked ? 'add' : 'remove']('reverse');
|
||||
//localStorage.setItem(cbReverseKey, cbReverse.checked ? 1 : 0);
|
||||
};
|
||||
cbReverse.addEventListener('change', cbReverseIt, true);
|
||||
/*if(localStorage.getItem(cbReverseKey)){
|
||||
cbReverse.checked = !!(+localStorage.getItem(cbReverseKey));
|
||||
}*/
|
||||
cbReverseIt();
|
||||
|
||||
const log = (...args)=>{
|
||||
//console.log(...args);
|
||||
logClass('',...args);
|
||||
}
|
||||
const warn = (...args)=>{
|
||||
console.warn(...args);
|
||||
logClass('warning',...args);
|
||||
}
|
||||
const error = (...args)=>{
|
||||
console.error(...args);
|
||||
logClass('error',...args);
|
||||
};
|
||||
|
||||
const toss = (...args)=>{
|
||||
error(...args);
|
||||
throw new Error(args.join(' '));
|
||||
};
|
||||
|
||||
const endOfWork = (passed=true)=>{
|
||||
const eH = document.querySelector('#color-target');
|
||||
const eT = document.querySelector('title');
|
||||
if(passed){
|
||||
log("End of work chain. If you made it this far, you win.");
|
||||
eH.innerText = 'PASS: '+eH.innerText;
|
||||
eH.classList.add('tests-pass');
|
||||
eT.innerText = 'PASS: '+eT.innerText;
|
||||
}else{
|
||||
eH.innerText = 'FAIL: '+eH.innerText;
|
||||
eH.classList.add('tests-fail');
|
||||
eT.innerText = 'FAIL: '+eT.innerText;
|
||||
}
|
||||
};
|
||||
|
||||
log("Running opfs-sahpool digest tests...");
|
||||
const W1 = new Worker('digest-worker.js?sqlite3.dir=../../../jswasm');
|
||||
W1.onmessage = function({data}){
|
||||
//log("onmessage:",data);
|
||||
switch(data.type){
|
||||
case 'log':
|
||||
log('worker says:', ...data.payload);
|
||||
break;
|
||||
case 'error':
|
||||
error('worker says:', ...data.payload);
|
||||
endOfWork(false);
|
||||
break;
|
||||
case 'initialized':
|
||||
log(data.workerId, ': Worker initialized',...data.payload);
|
||||
break;
|
||||
}
|
||||
};
|
||||
})();
|
||||
</script>
|
||||
</body>
|
||||
</html>
|
Reference in New Issue
Block a user