1
0
mirror of https://github.com/sqlite/sqlite.git synced 2025-07-29 08:01:23 +03:00

Doc cleanups and additions. Add a way for the OPFS async runner to propagate exception text to the calling thread.

FossilOrigin-Name: 5c5e80652825cf883e6c17809cb98f2bf17d5feac2d263f6f492479154730dab
This commit is contained in:
stephan
2022-09-21 12:27:35 +00:00
parent 171b168b3e
commit 72ab400d4d
5 changed files with 149 additions and 100 deletions

View File

@ -16,9 +16,7 @@
Worker, implemented in sqlite3-opfs-async-proxy.js. This file is Worker, implemented in sqlite3-opfs-async-proxy.js. This file is
intended to be appended to the main sqlite3 JS deliverable somewhere intended to be appended to the main sqlite3 JS deliverable somewhere
after sqlite3-api-glue.js and before sqlite3-api-cleanup.js. after sqlite3-api-glue.js and before sqlite3-api-cleanup.js.
*/ */
'use strict'; 'use strict';
self.sqlite3ApiBootstrap.initializers.push(function(sqlite3){ self.sqlite3ApiBootstrap.initializers.push(function(sqlite3){
/** /**
@ -314,55 +312,98 @@ sqlite3.installOpfsVfs = function callee(asyncProxyUri = callee.defaultProxyUri)
const t = performance.now(); const t = performance.now();
Atomics.wait(state.sabOPView, state.opIds.rc, -1); Atomics.wait(state.sabOPView, state.opIds.rc, -1);
const rc = Atomics.load(state.sabOPView, state.opIds.rc); const rc = Atomics.load(state.sabOPView, state.opIds.rc);
if(rc){
const err = state.s11n.deserialize();
if(err) error(op+"() async error:",...err);
}
metrics[op].wait += performance.now() - t; metrics[op].wait += performance.now() - t;
return rc; return rc;
}; };
const initS11n = ()=>{ const initS11n = ()=>{
/** /**
ACHTUNG: this code is 100% duplicated in the other half of ACHTUNG: this code is 100% duplicated in the other half of this
this proxy! proxy! The documentation is maintained in the "synchronous half".
Historical note: this impl was initially about 5% this size by using This proxy de/serializes cross-thread function arguments and
using JSON.stringify/parse(), but using fit-to-purpose serialization output-pointer values via the state.sabIO SharedArrayBuffer,
saves considerable runtime. using the region defined by (state.sabS11nOffset,
state.sabS11nOffset]. Only one dataset is recorded at a time.
This is not a general-purpose format. It only supports the range
of operations, and data sizes, needed by the sqlite3_vfs and
sqlite3_io_methods operations.
The data format can be succinctly summarized as:
Nt...Td...D
Where:
- N = number of entries (1 byte)
- t = type ID of first argument (1 byte)
- ...T = type IDs of the 2nd and subsequent arguments (1 byte
each).
- d = raw bytes of first argument (per-type size).
- ...D = raw bytes of the 2nd and subsequent arguments (per-type
size).
All types except strings have fixed sizes. Strings are stored
using their TextEncoder/TextDecoder representations. It would
arguably make more sense to store them as Int16Arrays of
their JS character values, but how best/fastest to get that
in and out of string form us an open point.
Historical note: this impl was initially about 1% this size by
using using JSON.stringify/parse(), but using fit-to-purpose
serialization saves considerable runtime.
*/ */
if(state.s11n) return state.s11n; if(state.s11n) return state.s11n;
const textDecoder = new TextDecoder(), const textDecoder = new TextDecoder(),
textEncoder = new TextEncoder('utf-8'), textEncoder = new TextEncoder('utf-8'),
viewU8 = new Uint8Array(state.sabIO, state.sabS11nOffset, state.sabS11nSize), viewU8 = new Uint8Array(state.sabIO, state.sabS11nOffset, state.sabS11nSize),
viewDV = new DataView(state.sabIO, state.sabS11nOffset, state.sabS11nSize); viewDV = new DataView(state.sabIO, state.sabS11nOffset, state.sabS11nSize);
state.s11n = Object.create(null); state.s11n = Object.create(null);
/* Only arguments and return values of these types may be
serialized. This covers the whole range of types needed by the
sqlite3_vfs API. */
const TypeIds = Object.create(null); const TypeIds = Object.create(null);
TypeIds.number = { id: 1, size: 8, getter: 'getFloat64', setter: 'setFloat64' }; TypeIds.number = { id: 1, size: 8, getter: 'getFloat64', setter: 'setFloat64' };
TypeIds.bigint = { id: 2, size: 8, getter: 'getBigInt64', setter: 'setBigInt64' }; TypeIds.bigint = { id: 2, size: 8, getter: 'getBigInt64', setter: 'setBigInt64' };
TypeIds.boolean = { id: 3, size: 4, getter: 'getInt32', setter: 'setInt32' }; TypeIds.boolean = { id: 3, size: 4, getter: 'getInt32', setter: 'setInt32' };
TypeIds.string = { id: 4 }; TypeIds.string = { id: 4 };
const getTypeId = (v)=>{
return TypeIds[typeof v] || toss("This value type cannot be serialized.",v); const getTypeId = (v)=>(
}; TypeIds[typeof v]
|| toss("Maintenance required: this value type cannot be serialized.",v)
);
const getTypeIdById = (tid)=>{ const getTypeIdById = (tid)=>{
switch(tid){ switch(tid){
case TypeIds.number.id: return TypeIds.number; case TypeIds.number.id: return TypeIds.number;
case TypeIds.bigint.id: return TypeIds.bigint; case TypeIds.bigint.id: return TypeIds.bigint;
case TypeIds.boolean.id: return TypeIds.boolean; case TypeIds.boolean.id: return TypeIds.boolean;
case TypeIds.string.id: return TypeIds.string; case TypeIds.string.id: return TypeIds.string;
default: toss("Invalid type ID:",tid); default: toss("Invalid type ID:",tid);
} }
}; };
/** /**
Returns an array of the state serialized by the most recent Returns an array of the deserialized state stored by the most
serialize() operation (here or in the counterpart thread), or recent serialize() operation (from from this thread or the
null if the serialization buffer is empty. counterpart thread), or null if the serialization buffer is empty.
*/ */
state.s11n.deserialize = function(){ state.s11n.deserialize = function(){
++metrics.s11n.deserialize.count; ++metrics.s11n.deserialize.count;
const t = performance.now(); const t = performance.now();
let rc = null;
const argc = viewU8[0]; const argc = viewU8[0];
const rc = argc ? [] : null;
if(argc){ if(argc){
rc = []; const typeIds = [];
let offset = 1, i, n, v, typeIds = []; let offset = 1, i, n, v;
for(i = 0; i < argc; ++i, ++offset){ for(i = 0; i < argc; ++i, ++offset){
typeIds.push(getTypeIdById(viewU8[offset])); typeIds.push(getTypeIdById(viewU8[offset]));
} }
@ -371,7 +412,7 @@ sqlite3.installOpfsVfs = function callee(asyncProxyUri = callee.defaultProxyUri)
if(t.getter){ if(t.getter){
v = viewDV[t.getter](offset, state.littleEndian); v = viewDV[t.getter](offset, state.littleEndian);
offset += t.size; offset += t.size;
}else{ }else{/*String*/
n = viewDV.getInt32(offset, state.littleEndian); n = viewDV.getInt32(offset, state.littleEndian);
offset += 4; offset += 4;
v = textDecoder.decode(viewU8.slice(offset, offset+n)); v = textDecoder.decode(viewU8.slice(offset, offset+n));
@ -384,6 +425,7 @@ sqlite3.installOpfsVfs = function callee(asyncProxyUri = callee.defaultProxyUri)
metrics.s11n.deserialize.time += performance.now() - t; metrics.s11n.deserialize.time += performance.now() - t;
return rc; return rc;
}; };
/** /**
Serializes all arguments to the shared buffer for consumption Serializes all arguments to the shared buffer for consumption
by the counterpart thread. by the counterpart thread.
@ -397,22 +439,27 @@ sqlite3.installOpfsVfs = function callee(asyncProxyUri = callee.defaultProxyUri)
state. state.
*/ */
state.s11n.serialize = function(...args){ state.s11n.serialize = function(...args){
++metrics.s11n.serialize.count;
const t = performance.now(); const t = performance.now();
++metrics.s11n.serialize.count;
if(args.length){ if(args.length){
//log("serialize():",args); //log("serialize():",args);
let i = 0, offset = 1, typeIds = []; const typeIds = [];
viewU8[0] = args.length & 0xff; let i = 0, offset = 1;
viewU8[0] = args.length & 0xff /* header = # of args */;
for(; i < args.length; ++i, ++offset){ for(; i < args.length; ++i, ++offset){
/* Write the TypeIds.id value into the next args.length
bytes. */
typeIds.push(getTypeId(args[i])); typeIds.push(getTypeId(args[i]));
viewU8[offset] = typeIds[i].id; viewU8[offset] = typeIds[i].id;
} }
for(i = 0; i < args.length; ++i) { for(i = 0; i < args.length; ++i) {
/* Deserialize the following bytes based on their
corresponding TypeIds.id from the header. */
const t = typeIds[i]; const t = typeIds[i];
if(t.setter){ if(t.setter){
viewDV[t.setter](offset, args[i], state.littleEndian); viewDV[t.setter](offset, args[i], state.littleEndian);
offset += t.size; offset += t.size;
}else{ }else{/*String*/
const s = textEncoder.encode(args[i]); const s = textEncoder.encode(args[i]);
viewDV.setInt32(offset, s.byteLength, state.littleEndian); viewDV.setInt32(offset, s.byteLength, state.littleEndian);
offset += 4; offset += 4;
@ -774,7 +821,10 @@ sqlite3.installOpfsVfs = function callee(asyncProxyUri = callee.defaultProxyUri)
but cannot report the nature of the failure. but cannot report the nature of the failure.
*/ */
opfsUtil.deleteEntry = function(fsEntryName,recursive=false){ opfsUtil.deleteEntry = function(fsEntryName,recursive=false){
return 0===opRun('xDelete', fsEntryName, 0, recursive); mTimeStart('xDelete');
const rc = opRun('xDelete', fsEntryName, 0, recursive);
mTimeEnd();
return 0===rc;
}; };
/** /**
Synchronously creates the given directory name, recursively, in Synchronously creates the given directory name, recursively, in
@ -782,7 +832,10 @@ sqlite3.installOpfsVfs = function callee(asyncProxyUri = callee.defaultProxyUri)
directory already exists, else false. directory already exists, else false.
*/ */
opfsUtil.mkdir = function(absDirName){ opfsUtil.mkdir = function(absDirName){
return 0===opRun('mkdir', absDirName); mTimeStart('mkdir');
const rc = opRun('mkdir', absDirName);
mTimeEnd();
return 0===rc;
}; };
/** /**
Synchronously checks whether the given OPFS filesystem exists, Synchronously checks whether the given OPFS filesystem exists,

View File

@ -9,10 +9,10 @@
/* /*
** WASM_KEEP is identical to EMSCRIPTEN_KEEPALIVE but is not ** WASM_KEEP is identical to EMSCRIPTEN_KEEPALIVE but is not
** Emscripten-specific. It explicitly includes marked functions for ** Emscripten-specific. It explicitly marks functions for export into
** export into the target wasm file without requiring explicit listing ** the target wasm file without requiring explicit listing of those
** of those functions in Emscripten's -sEXPORTED_FUNCTIONS=... list ** functions in Emscripten's -sEXPORTED_FUNCTIONS=... list (or
** (or equivalent in other build platforms). Any function with neither ** equivalent in other build platforms). Any function with neither
** this attribute nor which is listed as an explicit export will not ** this attribute nor which is listed as an explicit export will not
** be exported from the wasm file (but may still be used internally ** be exported from the wasm file (but may still be used internally
** within the wasm file). ** within the wasm file).

View File

@ -136,8 +136,8 @@ const getDirForPath = async function f(absFilename, createDirs = false){
/** /**
Stores the given value at the array index reserved for the given op Stores the given value at state.sabOPView[state.opIds.rc] and then
and then Atomics.notify()'s it. Atomics.notify()'s it.
*/ */
const storeAndNotify = (opName, value)=>{ const storeAndNotify = (opName, value)=>{
log(opName+"() => notify(",state.opIds.rc,",",value,")"); log(opName+"() => notify(",state.opIds.rc,",",value,")");
@ -190,10 +190,11 @@ const vfsAsyncImpls = {
try { try {
await getDirForPath(dirname+"/filepart", true); await getDirForPath(dirname+"/filepart", true);
}catch(e){ }catch(e){
//error("mkdir failed",filename, e.message); state.s11n.serialize(e.message);
rc = state.sq3Codes.SQLITE_IOERR; rc = state.sq3Codes.SQLITE_IOERR;
}finally{
wTimeEnd();
} }
wTimeEnd();
storeAndNotify('mkdir', rc); storeAndNotify('mkdir', rc);
mTimeEnd(); mTimeEnd();
}, },
@ -216,9 +217,11 @@ const vfsAsyncImpls = {
const [dh, fn] = await getDirForPath(filename); const [dh, fn] = await getDirForPath(filename);
await dh.getFileHandle(fn); await dh.getFileHandle(fn);
}catch(e){ }catch(e){
state.s11n.serialize(e.message);
rc = state.sq3Codes.SQLITE_IOERR; rc = state.sq3Codes.SQLITE_IOERR;
}finally{
wTimeEnd();
} }
wTimeEnd();
storeAndNotify('xAccess', rc); storeAndNotify('xAccess', rc);
mTimeEnd(); mTimeEnd();
}, },
@ -236,6 +239,7 @@ const vfsAsyncImpls = {
catch(e){ warn("Ignoring dirHandle.removeEntry() failure of",fh,e) } catch(e){ warn("Ignoring dirHandle.removeEntry() failure of",fh,e) }
} }
}else{ }else{
state.s11n.serialize();
rc = state.sq3Codes.SQLITE_NOTFOUND; rc = state.sq3Codes.SQLITE_NOTFOUND;
} }
wTimeEnd(); wTimeEnd();
@ -274,9 +278,7 @@ const vfsAsyncImpls = {
filename = filename.join('/'); filename = filename.join('/');
} }
}catch(e){ }catch(e){
/* Ignoring: _presumably_ the file can't be found or a dir is state.s11n.serialize(e.message);
not empty. */
//error("Delete failed",filename, e.message);
rc = state.sq3Codes.SQLITE_IOERR_DELETE; rc = state.sq3Codes.SQLITE_IOERR_DELETE;
} }
wTimeEnd(); wTimeEnd();
@ -292,7 +294,7 @@ const vfsAsyncImpls = {
state.s11n.serialize(Number(sz)); state.s11n.serialize(Number(sz));
sz = 0; sz = 0;
}catch(e){ }catch(e){
error("xFileSize():",e, fh); state.s11n.serialize(e.message);
sz = state.sq3Codes.SQLITE_IOERR; sz = state.sq3Codes.SQLITE_IOERR;
} }
wTimeEnd(); wTimeEnd();
@ -337,6 +339,7 @@ const vfsAsyncImpls = {
}catch(e){ }catch(e){
wTimeEnd(); wTimeEnd();
error(opName,e); error(opName,e);
state.s11n.serialize(e.message);
storeAndNotify(opName, state.sq3Codes.SQLITE_IOERR); storeAndNotify(opName, state.sq3Codes.SQLITE_IOERR);
} }
mTimeEnd(); mTimeEnd();
@ -358,6 +361,7 @@ const vfsAsyncImpls = {
} }
}catch(e){ }catch(e){
error("xRead() failed",e,fh); error("xRead() failed",e,fh);
state.s11n.serialize(e.message);
rc = state.sq3Codes.SQLITE_IOERR_READ; rc = state.sq3Codes.SQLITE_IOERR_READ;
} }
storeAndNotify('xRead',rc); storeAndNotify('xRead',rc);
@ -366,12 +370,18 @@ const vfsAsyncImpls = {
xSync: async function(fid,flags/*ignored*/){ xSync: async function(fid,flags/*ignored*/){
mTimeStart('xSync'); mTimeStart('xSync');
const fh = __openFiles[fid]; const fh = __openFiles[fid];
let rc = 0;
if(!fh.readOnly && fh.accessHandle){ if(!fh.readOnly && fh.accessHandle){
wTimeStart('xSync'); try {
await fh.accessHandle.flush(); wTimeStart('xSync');
wTimeEnd(); await fh.accessHandle.flush();
}catch(e){
state.s11n.serialize(e.message);
}finally{
wTimeEnd();
}
} }
storeAndNotify('xSync',0); storeAndNotify('xSync',rc);
mTimeEnd(); mTimeEnd();
}, },
xTruncate: async function(fid,size){ xTruncate: async function(fid,size){
@ -384,6 +394,7 @@ const vfsAsyncImpls = {
await fh.accessHandle.truncate(size); await fh.accessHandle.truncate(size);
}catch(e){ }catch(e){
error("xTruncate():",e,fh); error("xTruncate():",e,fh);
state.s11n.serialize(e.message);
rc = state.sq3Codes.SQLITE_IOERR_TRUNCATE; rc = state.sq3Codes.SQLITE_IOERR_TRUNCATE;
} }
wTimeEnd(); wTimeEnd();
@ -403,6 +414,7 @@ const vfsAsyncImpls = {
) ? 0 : state.sq3Codes.SQLITE_IOERR_WRITE; ) ? 0 : state.sq3Codes.SQLITE_IOERR_WRITE;
}catch(e){ }catch(e){
error("xWrite():",e,fh); error("xWrite():",e,fh);
state.s11n.serialize(e.message);
rc = state.sq3Codes.SQLITE_IOERR_WRITE; rc = state.sq3Codes.SQLITE_IOERR_WRITE;
}finally{ }finally{
wTimeEnd(); wTimeEnd();
@ -413,53 +425,42 @@ const vfsAsyncImpls = {
}; };
const initS11n = ()=>{ const initS11n = ()=>{
// Achtung: this code is 100% duplicated in the other half of this proxy!
/** /**
Historical note: this impl was initially about 1% this size by using ACHTUNG: this code is 100% duplicated in the other half of this
using JSON.stringify/parse(), but using fit-to-purpose serialization proxy! The documentation is maintained in the "synchronous half".
saves considerable runtime.
*/ */
if(state.s11n) return state.s11n; if(state.s11n) return state.s11n;
const textDecoder = new TextDecoder(), const textDecoder = new TextDecoder(),
textEncoder = new TextEncoder('utf-8'), textEncoder = new TextEncoder('utf-8'),
viewU8 = new Uint8Array(state.sabIO, state.sabS11nOffset, state.sabS11nSize), viewU8 = new Uint8Array(state.sabIO, state.sabS11nOffset, state.sabS11nSize),
viewDV = new DataView(state.sabIO, state.sabS11nOffset, state.sabS11nSize); viewDV = new DataView(state.sabIO, state.sabS11nOffset, state.sabS11nSize);
state.s11n = Object.create(null); state.s11n = Object.create(null);
const TypeIds = Object.create(null); const TypeIds = Object.create(null);
TypeIds.number = { id: 1, size: 8, getter: 'getFloat64', setter: 'setFloat64' }; TypeIds.number = { id: 1, size: 8, getter: 'getFloat64', setter: 'setFloat64' };
TypeIds.bigint = { id: 2, size: 8, getter: 'getBigInt64', setter: 'setBigInt64' }; TypeIds.bigint = { id: 2, size: 8, getter: 'getBigInt64', setter: 'setBigInt64' };
TypeIds.boolean = { id: 3, size: 4, getter: 'getInt32', setter: 'setInt32' }; TypeIds.boolean = { id: 3, size: 4, getter: 'getInt32', setter: 'setInt32' };
TypeIds.string = { id: 4 }; TypeIds.string = { id: 4 };
const getTypeId = (v)=>(
const getTypeId = (v)=>{ TypeIds[typeof v]
return TypeIds[typeof v] || toss("This value type cannot be serialized.",v); || toss("Maintenance required: this value type cannot be serialized.",v)
}; );
const getTypeIdById = (tid)=>{ const getTypeIdById = (tid)=>{
switch(tid){ switch(tid){
case TypeIds.number.id: return TypeIds.number; case TypeIds.number.id: return TypeIds.number;
case TypeIds.bigint.id: return TypeIds.bigint; case TypeIds.bigint.id: return TypeIds.bigint;
case TypeIds.boolean.id: return TypeIds.boolean; case TypeIds.boolean.id: return TypeIds.boolean;
case TypeIds.string.id: return TypeIds.string; case TypeIds.string.id: return TypeIds.string;
default: toss("Invalid type ID:",tid); default: toss("Invalid type ID:",tid);
} }
}; };
/**
Returns an array of the state serialized by the most recent
serialize() operation (here or in the counterpart thread), or
null if the serialization buffer is empty.
*/
state.s11n.deserialize = function(){ state.s11n.deserialize = function(){
++metrics.s11n.deserialize.count; ++metrics.s11n.deserialize.count;
const t = performance.now(); const t = performance.now();
let rc = null;
const argc = viewU8[0]; const argc = viewU8[0];
const rc = argc ? [] : null;
if(argc){ if(argc){
rc = []; const typeIds = [];
let offset = 1, i, n, v, typeIds = []; let offset = 1, i, n, v;
for(i = 0; i < argc; ++i, ++offset){ for(i = 0; i < argc; ++i, ++offset){
typeIds.push(getTypeIdById(viewU8[offset])); typeIds.push(getTypeIdById(viewU8[offset]));
} }
@ -468,7 +469,7 @@ const initS11n = ()=>{
if(t.getter){ if(t.getter){
v = viewDV[t.getter](offset, state.littleEndian); v = viewDV[t.getter](offset, state.littleEndian);
offset += t.size; offset += t.size;
}else{ }else{/*String*/
n = viewDV.getInt32(offset, state.littleEndian); n = viewDV.getInt32(offset, state.littleEndian);
offset += 4; offset += 4;
v = textDecoder.decode(viewU8.slice(offset, offset+n)); v = textDecoder.decode(viewU8.slice(offset, offset+n));
@ -481,36 +482,28 @@ const initS11n = ()=>{
metrics.s11n.deserialize.time += performance.now() - t; metrics.s11n.deserialize.time += performance.now() - t;
return rc; return rc;
}; };
/**
Serializes all arguments to the shared buffer for consumption
by the counterpart thread.
This routine is only intended for serializing OPFS VFS
arguments and (in at least one special case) result values,
and the buffer is sized to be able to comfortably handle
those.
If passed no arguments then it zeroes out the serialization
state.
*/
state.s11n.serialize = function(...args){ state.s11n.serialize = function(...args){
++metrics.s11n.serialize.count;
const t = performance.now(); const t = performance.now();
++metrics.s11n.serialize.count;
if(args.length){ if(args.length){
//log("serialize():",args); //log("serialize():",args);
let i = 0, offset = 1, typeIds = []; const typeIds = [];
viewU8[0] = args.length & 0xff; let i = 0, offset = 1;
viewU8[0] = args.length & 0xff /* header = # of args */;
for(; i < args.length; ++i, ++offset){ for(; i < args.length; ++i, ++offset){
/* Write the TypeIds.id value into the next args.length
bytes. */
typeIds.push(getTypeId(args[i])); typeIds.push(getTypeId(args[i]));
viewU8[offset] = typeIds[i].id; viewU8[offset] = typeIds[i].id;
} }
for(i = 0; i < args.length; ++i) { for(i = 0; i < args.length; ++i) {
/* Deserialize the following bytes based on their
corresponding TypeIds.id from the header. */
const t = typeIds[i]; const t = typeIds[i];
if(t.setter){ if(t.setter){
viewDV[t.setter](offset, args[i], state.littleEndian); viewDV[t.setter](offset, args[i], state.littleEndian);
offset += t.size; offset += t.size;
}else{ }else{/*String*/
const s = textEncoder.encode(args[i]); const s = textEncoder.encode(args[i]);
viewDV.setInt32(offset, s.byteLength, state.littleEndian); viewDV.setInt32(offset, s.byteLength, state.littleEndian);
offset += 4; offset += 4;
@ -548,6 +541,9 @@ const waitLoop = async function f(){
Atomics.store(state.sabOPView, state.opIds.whichOp, 0); Atomics.store(state.sabOPView, state.opIds.whichOp, 0);
const hnd = opHandlers[opId] ?? toss("No waitLoop handler for whichOp #",opId); const hnd = opHandlers[opId] ?? toss("No waitLoop handler for whichOp #",opId);
const args = state.s11n.deserialize(); const args = state.s11n.deserialize();
state.s11n.serialize()/* clear s11n to keep the caller from
confusing this with an exception string
written by the upcoming operation */;
//warn("waitLoop() whichOp =",opId, hnd, args); //warn("waitLoop() whichOp =",opId, hnd, args);
if(hnd.f) await hnd.f(...args); if(hnd.f) await hnd.f(...args);
else error("Missing callback for opId",opId); else error("Missing callback for opId",opId);

View File

@ -1,5 +1,5 @@
C Added\ssome\sstructure\sto\sthe\swasm\sdemo\slink\slist. C Doc\scleanups\sand\sadditions.\sAdd\sa\sway\sfor\sthe\sOPFS\sasync\srunner\sto\spropagate\sexception\stext\sto\sthe\scalling\sthread.
D 2022-09-21T12:25:40.497 D 2022-09-21T12:27:35.940
F .fossil-settings/empty-dirs dbb81e8fc0401ac46a1491ab34a7f2c7c0452f2f06b54ebb845d024ca8283ef1 F .fossil-settings/empty-dirs dbb81e8fc0401ac46a1491ab34a7f2c7c0452f2f06b54ebb845d024ca8283ef1
F .fossil-settings/ignore-glob 35175cdfcf539b2318cb04a9901442804be81cd677d8b889fcc9149c21f239ea F .fossil-settings/ignore-glob 35175cdfcf539b2318cb04a9901442804be81cd677d8b889fcc9149c21f239ea
F LICENSE.md df5091916dbb40e6e9686186587125e1b2ff51f022cc334e886c19a0e9982724 F LICENSE.md df5091916dbb40e6e9686186587125e1b2ff51f022cc334e886c19a0e9982724
@ -484,11 +484,11 @@ F ext/wasm/api/post-js-header.js 2e5c886398013ba2af88028ecbced1e4b22dc96a86467f1
F ext/wasm/api/sqlite3-api-cleanup.js 8564a6077cdcaea9a9f428a019af8a05887f0131e6a2a1e72a7ff1145fadfe77 F ext/wasm/api/sqlite3-api-cleanup.js 8564a6077cdcaea9a9f428a019af8a05887f0131e6a2a1e72a7ff1145fadfe77
F ext/wasm/api/sqlite3-api-glue.js 366d580c8e5bf7fcf4c6dee6f646c31f5549bd417ea03a59a0acca00e8ecce30 F ext/wasm/api/sqlite3-api-glue.js 366d580c8e5bf7fcf4c6dee6f646c31f5549bd417ea03a59a0acca00e8ecce30
F ext/wasm/api/sqlite3-api-oo1.js f974e79d9af8f26bf33928c5730b0988cc706d14f59a5fe36394739b92249841 F ext/wasm/api/sqlite3-api-oo1.js f974e79d9af8f26bf33928c5730b0988cc706d14f59a5fe36394739b92249841
F ext/wasm/api/sqlite3-api-opfs.js 10be4156d7db4d6aa8a456b4fb0f31a6e35c61297766e8bb55573fc5c0d56530 F ext/wasm/api/sqlite3-api-opfs.js dbbce38b0cd89d1eaf829546e2999241127150a40ff2e0331d842a1f31c756e5
F ext/wasm/api/sqlite3-api-prologue.js 6f3a67c4db37e884d33a05e5cf6d9d9bc012226a18c09f33f662fefd99840a63 F ext/wasm/api/sqlite3-api-prologue.js 6f3a67c4db37e884d33a05e5cf6d9d9bc012226a18c09f33f662fefd99840a63
F ext/wasm/api/sqlite3-api-worker1.js 2eeb2a24e1a90322d84a9b88a99919b806623de62792436446099c0988f2030b F ext/wasm/api/sqlite3-api-worker1.js 2eeb2a24e1a90322d84a9b88a99919b806623de62792436446099c0988f2030b
F ext/wasm/api/sqlite3-wasi.h 25356084cfe0d40458a902afb465df8c21fc4152c1d0a59b563a3fba59a068f9 F ext/wasm/api/sqlite3-wasi.h 25356084cfe0d40458a902afb465df8c21fc4152c1d0a59b563a3fba59a068f9
F ext/wasm/api/sqlite3-wasm.c 4130e2df9587f4e4c3afc04c3549d682c8a5c0cfe5b22819a0a86edb7f01b9bd F ext/wasm/api/sqlite3-wasm.c 9401a3f9bd191a410b4f679b7957c6b7e168a68106f52ddeafa1c776d0364e49
F ext/wasm/batch-runner.html 2857a6db7292ac83d1581af865d643fd34235db2df830d10b43b01388c599e04 F ext/wasm/batch-runner.html 2857a6db7292ac83d1581af865d643fd34235db2df830d10b43b01388c599e04
F ext/wasm/batch-runner.js 6f5b86e0b5519a9a941d9f17ee9c5ecdc63f452f157602fe7fdf87f6275a2b49 F ext/wasm/batch-runner.js 6f5b86e0b5519a9a941d9f17ee9c5ecdc63f452f157602fe7fdf87f6275a2b49
F ext/wasm/common/SqliteTestUtil.js 529161a624265ba84271a52db58da022649832fa1c71309fb1e02cc037327a2b F ext/wasm/common/SqliteTestUtil.js 529161a624265ba84271a52db58da022649832fa1c71309fb1e02cc037327a2b
@ -519,7 +519,7 @@ F ext/wasm/speedtest1.html 8ae6ece128151d01f90579de69cfa06f021acdb760735250ef745
F ext/wasm/split-speedtest1-script.sh a3e271938d4d14ee49105eb05567c6a69ba4c1f1293583ad5af0cd3a3779e205 x F ext/wasm/split-speedtest1-script.sh a3e271938d4d14ee49105eb05567c6a69ba4c1f1293583ad5af0cd3a3779e205 x
F ext/wasm/sql/000-mandelbrot.sql 775337a4b80938ac8146aedf88808282f04d02d983d82675bd63d9c2d97a15f0 F ext/wasm/sql/000-mandelbrot.sql 775337a4b80938ac8146aedf88808282f04d02d983d82675bd63d9c2d97a15f0
F ext/wasm/sql/001-sudoku.sql 35b7cb7239ba5d5f193bc05ec379bcf66891bce6f2a5b3879f2f78d0917299b5 F ext/wasm/sql/001-sudoku.sql 35b7cb7239ba5d5f193bc05ec379bcf66891bce6f2a5b3879f2f78d0917299b5
F ext/wasm/sqlite3-opfs-async-proxy.js 9305d92f32d02983c4528b9c801096cfd8295ca7d24e357d90de9bbcb201d035 F ext/wasm/sqlite3-opfs-async-proxy.js 0523e3093df2ad2c58691aa65c5e32c0aafb1bbabb6119dd9406d34a8e16dd68
F ext/wasm/sqlite3-worker1-promiser.js 4fd0465688a28a75f1d4ee4406540ba494f49844e3cad0670d0437a001943365 F ext/wasm/sqlite3-worker1-promiser.js 4fd0465688a28a75f1d4ee4406540ba494f49844e3cad0670d0437a001943365
F ext/wasm/sqlite3-worker1.js 0c1e7626304543969c3846573e080c082bf43bcaa47e87d416458af84f340a9e F ext/wasm/sqlite3-worker1.js 0c1e7626304543969c3846573e080c082bf43bcaa47e87d416458af84f340a9e
F ext/wasm/test-opfs-vfs.html eb69dda21eb414b8f5e3f7c1cc0f774103cc9c0f87b2d28a33419e778abfbab5 F ext/wasm/test-opfs-vfs.html eb69dda21eb414b8f5e3f7c1cc0f774103cc9c0f87b2d28a33419e778abfbab5
@ -2026,8 +2026,8 @@ F vsixtest/vsixtest.tcl 6a9a6ab600c25a91a7acc6293828957a386a8a93
F vsixtest/vsixtest.vcxproj.data 2ed517e100c66dc455b492e1a33350c1b20fbcdc F vsixtest/vsixtest.vcxproj.data 2ed517e100c66dc455b492e1a33350c1b20fbcdc
F vsixtest/vsixtest.vcxproj.filters 37e51ffedcdb064aad6ff33b6148725226cd608e F vsixtest/vsixtest.vcxproj.filters 37e51ffedcdb064aad6ff33b6148725226cd608e
F vsixtest/vsixtest_TemporaryKey.pfx e5b1b036facdb453873e7084e1cae9102ccc67a0 F vsixtest/vsixtest_TemporaryKey.pfx e5b1b036facdb453873e7084e1cae9102ccc67a0
P 96c734c07acfbea153d9aaf293a28a2d49d78de19fec4fb90c4c08fb1fd0ddbd P 777077c4c2249e1ec78390d4f65aaf281c1fbefcef4bcc7609199e995645ceb6
R 861bbf4cfa9812be3b9491caf1943540 R 52ae18958684abb42f4ad80857c37c9c
U stephan U stephan
Z 172073e3d6aad2306bf7614651c7edbb Z 4853fab0e74bf411129cdad2e17be435
# Remove this line to create a well-formed Fossil manifest. # Remove this line to create a well-formed Fossil manifest.

View File

@ -1 +1 @@
777077c4c2249e1ec78390d4f65aaf281c1fbefcef4bcc7609199e995645ceb6 5c5e80652825cf883e6c17809cb98f2bf17d5feac2d263f6f492479154730dab