mirror of
https://github.com/sqlite/sqlite.git
synced 2025-07-29 08:01:23 +03:00
Doc cleanups and additions. Add a way for the OPFS async runner to propagate exception text to the calling thread.
FossilOrigin-Name: 5c5e80652825cf883e6c17809cb98f2bf17d5feac2d263f6f492479154730dab
This commit is contained in:
@ -16,9 +16,7 @@
|
||||
Worker, implemented in sqlite3-opfs-async-proxy.js. This file is
|
||||
intended to be appended to the main sqlite3 JS deliverable somewhere
|
||||
after sqlite3-api-glue.js and before sqlite3-api-cleanup.js.
|
||||
|
||||
*/
|
||||
|
||||
'use strict';
|
||||
self.sqlite3ApiBootstrap.initializers.push(function(sqlite3){
|
||||
/**
|
||||
@ -314,18 +312,55 @@ sqlite3.installOpfsVfs = function callee(asyncProxyUri = callee.defaultProxyUri)
|
||||
const t = performance.now();
|
||||
Atomics.wait(state.sabOPView, state.opIds.rc, -1);
|
||||
const rc = Atomics.load(state.sabOPView, state.opIds.rc);
|
||||
if(rc){
|
||||
const err = state.s11n.deserialize();
|
||||
if(err) error(op+"() async error:",...err);
|
||||
}
|
||||
metrics[op].wait += performance.now() - t;
|
||||
return rc;
|
||||
};
|
||||
|
||||
const initS11n = ()=>{
|
||||
/**
|
||||
ACHTUNG: this code is 100% duplicated in the other half of
|
||||
this proxy!
|
||||
ACHTUNG: this code is 100% duplicated in the other half of this
|
||||
proxy! The documentation is maintained in the "synchronous half".
|
||||
|
||||
Historical note: this impl was initially about 5% this size by using
|
||||
using JSON.stringify/parse(), but using fit-to-purpose serialization
|
||||
saves considerable runtime.
|
||||
This proxy de/serializes cross-thread function arguments and
|
||||
output-pointer values via the state.sabIO SharedArrayBuffer,
|
||||
using the region defined by (state.sabS11nOffset,
|
||||
state.sabS11nOffset]. Only one dataset is recorded at a time.
|
||||
|
||||
This is not a general-purpose format. It only supports the range
|
||||
of operations, and data sizes, needed by the sqlite3_vfs and
|
||||
sqlite3_io_methods operations.
|
||||
|
||||
The data format can be succinctly summarized as:
|
||||
|
||||
Nt...Td...D
|
||||
|
||||
Where:
|
||||
|
||||
- N = number of entries (1 byte)
|
||||
|
||||
- t = type ID of first argument (1 byte)
|
||||
|
||||
- ...T = type IDs of the 2nd and subsequent arguments (1 byte
|
||||
each).
|
||||
|
||||
- d = raw bytes of first argument (per-type size).
|
||||
|
||||
- ...D = raw bytes of the 2nd and subsequent arguments (per-type
|
||||
size).
|
||||
|
||||
All types except strings have fixed sizes. Strings are stored
|
||||
using their TextEncoder/TextDecoder representations. It would
|
||||
arguably make more sense to store them as Int16Arrays of
|
||||
their JS character values, but how best/fastest to get that
|
||||
in and out of string form us an open point.
|
||||
|
||||
Historical note: this impl was initially about 1% this size by
|
||||
using using JSON.stringify/parse(), but using fit-to-purpose
|
||||
serialization saves considerable runtime.
|
||||
*/
|
||||
if(state.s11n) return state.s11n;
|
||||
const textDecoder = new TextDecoder(),
|
||||
@ -333,14 +368,19 @@ sqlite3.installOpfsVfs = function callee(asyncProxyUri = callee.defaultProxyUri)
|
||||
viewU8 = new Uint8Array(state.sabIO, state.sabS11nOffset, state.sabS11nSize),
|
||||
viewDV = new DataView(state.sabIO, state.sabS11nOffset, state.sabS11nSize);
|
||||
state.s11n = Object.create(null);
|
||||
/* Only arguments and return values of these types may be
|
||||
serialized. This covers the whole range of types needed by the
|
||||
sqlite3_vfs API. */
|
||||
const TypeIds = Object.create(null);
|
||||
TypeIds.number = { id: 1, size: 8, getter: 'getFloat64', setter: 'setFloat64' };
|
||||
TypeIds.bigint = { id: 2, size: 8, getter: 'getBigInt64', setter: 'setBigInt64' };
|
||||
TypeIds.boolean = { id: 3, size: 4, getter: 'getInt32', setter: 'setInt32' };
|
||||
TypeIds.string = { id: 4 };
|
||||
const getTypeId = (v)=>{
|
||||
return TypeIds[typeof v] || toss("This value type cannot be serialized.",v);
|
||||
};
|
||||
|
||||
const getTypeId = (v)=>(
|
||||
TypeIds[typeof v]
|
||||
|| toss("Maintenance required: this value type cannot be serialized.",v)
|
||||
);
|
||||
const getTypeIdById = (tid)=>{
|
||||
switch(tid){
|
||||
case TypeIds.number.id: return TypeIds.number;
|
||||
@ -350,19 +390,20 @@ sqlite3.installOpfsVfs = function callee(asyncProxyUri = callee.defaultProxyUri)
|
||||
default: toss("Invalid type ID:",tid);
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
Returns an array of the state serialized by the most recent
|
||||
serialize() operation (here or in the counterpart thread), or
|
||||
null if the serialization buffer is empty.
|
||||
Returns an array of the deserialized state stored by the most
|
||||
recent serialize() operation (from from this thread or the
|
||||
counterpart thread), or null if the serialization buffer is empty.
|
||||
*/
|
||||
state.s11n.deserialize = function(){
|
||||
++metrics.s11n.deserialize.count;
|
||||
const t = performance.now();
|
||||
let rc = null;
|
||||
const argc = viewU8[0];
|
||||
const rc = argc ? [] : null;
|
||||
if(argc){
|
||||
rc = [];
|
||||
let offset = 1, i, n, v, typeIds = [];
|
||||
const typeIds = [];
|
||||
let offset = 1, i, n, v;
|
||||
for(i = 0; i < argc; ++i, ++offset){
|
||||
typeIds.push(getTypeIdById(viewU8[offset]));
|
||||
}
|
||||
@ -371,7 +412,7 @@ sqlite3.installOpfsVfs = function callee(asyncProxyUri = callee.defaultProxyUri)
|
||||
if(t.getter){
|
||||
v = viewDV[t.getter](offset, state.littleEndian);
|
||||
offset += t.size;
|
||||
}else{
|
||||
}else{/*String*/
|
||||
n = viewDV.getInt32(offset, state.littleEndian);
|
||||
offset += 4;
|
||||
v = textDecoder.decode(viewU8.slice(offset, offset+n));
|
||||
@ -384,6 +425,7 @@ sqlite3.installOpfsVfs = function callee(asyncProxyUri = callee.defaultProxyUri)
|
||||
metrics.s11n.deserialize.time += performance.now() - t;
|
||||
return rc;
|
||||
};
|
||||
|
||||
/**
|
||||
Serializes all arguments to the shared buffer for consumption
|
||||
by the counterpart thread.
|
||||
@ -397,22 +439,27 @@ sqlite3.installOpfsVfs = function callee(asyncProxyUri = callee.defaultProxyUri)
|
||||
state.
|
||||
*/
|
||||
state.s11n.serialize = function(...args){
|
||||
++metrics.s11n.serialize.count;
|
||||
const t = performance.now();
|
||||
++metrics.s11n.serialize.count;
|
||||
if(args.length){
|
||||
//log("serialize():",args);
|
||||
let i = 0, offset = 1, typeIds = [];
|
||||
viewU8[0] = args.length & 0xff;
|
||||
const typeIds = [];
|
||||
let i = 0, offset = 1;
|
||||
viewU8[0] = args.length & 0xff /* header = # of args */;
|
||||
for(; i < args.length; ++i, ++offset){
|
||||
/* Write the TypeIds.id value into the next args.length
|
||||
bytes. */
|
||||
typeIds.push(getTypeId(args[i]));
|
||||
viewU8[offset] = typeIds[i].id;
|
||||
}
|
||||
for(i = 0; i < args.length; ++i) {
|
||||
/* Deserialize the following bytes based on their
|
||||
corresponding TypeIds.id from the header. */
|
||||
const t = typeIds[i];
|
||||
if(t.setter){
|
||||
viewDV[t.setter](offset, args[i], state.littleEndian);
|
||||
offset += t.size;
|
||||
}else{
|
||||
}else{/*String*/
|
||||
const s = textEncoder.encode(args[i]);
|
||||
viewDV.setInt32(offset, s.byteLength, state.littleEndian);
|
||||
offset += 4;
|
||||
@ -774,7 +821,10 @@ sqlite3.installOpfsVfs = function callee(asyncProxyUri = callee.defaultProxyUri)
|
||||
but cannot report the nature of the failure.
|
||||
*/
|
||||
opfsUtil.deleteEntry = function(fsEntryName,recursive=false){
|
||||
return 0===opRun('xDelete', fsEntryName, 0, recursive);
|
||||
mTimeStart('xDelete');
|
||||
const rc = opRun('xDelete', fsEntryName, 0, recursive);
|
||||
mTimeEnd();
|
||||
return 0===rc;
|
||||
};
|
||||
/**
|
||||
Synchronously creates the given directory name, recursively, in
|
||||
@ -782,7 +832,10 @@ sqlite3.installOpfsVfs = function callee(asyncProxyUri = callee.defaultProxyUri)
|
||||
directory already exists, else false.
|
||||
*/
|
||||
opfsUtil.mkdir = function(absDirName){
|
||||
return 0===opRun('mkdir', absDirName);
|
||||
mTimeStart('mkdir');
|
||||
const rc = opRun('mkdir', absDirName);
|
||||
mTimeEnd();
|
||||
return 0===rc;
|
||||
};
|
||||
/**
|
||||
Synchronously checks whether the given OPFS filesystem exists,
|
||||
|
@ -9,10 +9,10 @@
|
||||
|
||||
/*
|
||||
** WASM_KEEP is identical to EMSCRIPTEN_KEEPALIVE but is not
|
||||
** Emscripten-specific. It explicitly includes marked functions for
|
||||
** export into the target wasm file without requiring explicit listing
|
||||
** of those functions in Emscripten's -sEXPORTED_FUNCTIONS=... list
|
||||
** (or equivalent in other build platforms). Any function with neither
|
||||
** Emscripten-specific. It explicitly marks functions for export into
|
||||
** the target wasm file without requiring explicit listing of those
|
||||
** functions in Emscripten's -sEXPORTED_FUNCTIONS=... list (or
|
||||
** equivalent in other build platforms). Any function with neither
|
||||
** this attribute nor which is listed as an explicit export will not
|
||||
** be exported from the wasm file (but may still be used internally
|
||||
** within the wasm file).
|
||||
|
@ -136,8 +136,8 @@ const getDirForPath = async function f(absFilename, createDirs = false){
|
||||
|
||||
|
||||
/**
|
||||
Stores the given value at the array index reserved for the given op
|
||||
and then Atomics.notify()'s it.
|
||||
Stores the given value at state.sabOPView[state.opIds.rc] and then
|
||||
Atomics.notify()'s it.
|
||||
*/
|
||||
const storeAndNotify = (opName, value)=>{
|
||||
log(opName+"() => notify(",state.opIds.rc,",",value,")");
|
||||
@ -190,10 +190,11 @@ const vfsAsyncImpls = {
|
||||
try {
|
||||
await getDirForPath(dirname+"/filepart", true);
|
||||
}catch(e){
|
||||
//error("mkdir failed",filename, e.message);
|
||||
state.s11n.serialize(e.message);
|
||||
rc = state.sq3Codes.SQLITE_IOERR;
|
||||
}
|
||||
}finally{
|
||||
wTimeEnd();
|
||||
}
|
||||
storeAndNotify('mkdir', rc);
|
||||
mTimeEnd();
|
||||
},
|
||||
@ -216,9 +217,11 @@ const vfsAsyncImpls = {
|
||||
const [dh, fn] = await getDirForPath(filename);
|
||||
await dh.getFileHandle(fn);
|
||||
}catch(e){
|
||||
state.s11n.serialize(e.message);
|
||||
rc = state.sq3Codes.SQLITE_IOERR;
|
||||
}
|
||||
}finally{
|
||||
wTimeEnd();
|
||||
}
|
||||
storeAndNotify('xAccess', rc);
|
||||
mTimeEnd();
|
||||
},
|
||||
@ -236,6 +239,7 @@ const vfsAsyncImpls = {
|
||||
catch(e){ warn("Ignoring dirHandle.removeEntry() failure of",fh,e) }
|
||||
}
|
||||
}else{
|
||||
state.s11n.serialize();
|
||||
rc = state.sq3Codes.SQLITE_NOTFOUND;
|
||||
}
|
||||
wTimeEnd();
|
||||
@ -274,9 +278,7 @@ const vfsAsyncImpls = {
|
||||
filename = filename.join('/');
|
||||
}
|
||||
}catch(e){
|
||||
/* Ignoring: _presumably_ the file can't be found or a dir is
|
||||
not empty. */
|
||||
//error("Delete failed",filename, e.message);
|
||||
state.s11n.serialize(e.message);
|
||||
rc = state.sq3Codes.SQLITE_IOERR_DELETE;
|
||||
}
|
||||
wTimeEnd();
|
||||
@ -292,7 +294,7 @@ const vfsAsyncImpls = {
|
||||
state.s11n.serialize(Number(sz));
|
||||
sz = 0;
|
||||
}catch(e){
|
||||
error("xFileSize():",e, fh);
|
||||
state.s11n.serialize(e.message);
|
||||
sz = state.sq3Codes.SQLITE_IOERR;
|
||||
}
|
||||
wTimeEnd();
|
||||
@ -337,6 +339,7 @@ const vfsAsyncImpls = {
|
||||
}catch(e){
|
||||
wTimeEnd();
|
||||
error(opName,e);
|
||||
state.s11n.serialize(e.message);
|
||||
storeAndNotify(opName, state.sq3Codes.SQLITE_IOERR);
|
||||
}
|
||||
mTimeEnd();
|
||||
@ -358,6 +361,7 @@ const vfsAsyncImpls = {
|
||||
}
|
||||
}catch(e){
|
||||
error("xRead() failed",e,fh);
|
||||
state.s11n.serialize(e.message);
|
||||
rc = state.sq3Codes.SQLITE_IOERR_READ;
|
||||
}
|
||||
storeAndNotify('xRead',rc);
|
||||
@ -366,12 +370,18 @@ const vfsAsyncImpls = {
|
||||
xSync: async function(fid,flags/*ignored*/){
|
||||
mTimeStart('xSync');
|
||||
const fh = __openFiles[fid];
|
||||
let rc = 0;
|
||||
if(!fh.readOnly && fh.accessHandle){
|
||||
try {
|
||||
wTimeStart('xSync');
|
||||
await fh.accessHandle.flush();
|
||||
}catch(e){
|
||||
state.s11n.serialize(e.message);
|
||||
}finally{
|
||||
wTimeEnd();
|
||||
}
|
||||
storeAndNotify('xSync',0);
|
||||
}
|
||||
storeAndNotify('xSync',rc);
|
||||
mTimeEnd();
|
||||
},
|
||||
xTruncate: async function(fid,size){
|
||||
@ -384,6 +394,7 @@ const vfsAsyncImpls = {
|
||||
await fh.accessHandle.truncate(size);
|
||||
}catch(e){
|
||||
error("xTruncate():",e,fh);
|
||||
state.s11n.serialize(e.message);
|
||||
rc = state.sq3Codes.SQLITE_IOERR_TRUNCATE;
|
||||
}
|
||||
wTimeEnd();
|
||||
@ -403,6 +414,7 @@ const vfsAsyncImpls = {
|
||||
) ? 0 : state.sq3Codes.SQLITE_IOERR_WRITE;
|
||||
}catch(e){
|
||||
error("xWrite():",e,fh);
|
||||
state.s11n.serialize(e.message);
|
||||
rc = state.sq3Codes.SQLITE_IOERR_WRITE;
|
||||
}finally{
|
||||
wTimeEnd();
|
||||
@ -413,30 +425,25 @@ const vfsAsyncImpls = {
|
||||
};
|
||||
|
||||
const initS11n = ()=>{
|
||||
// Achtung: this code is 100% duplicated in the other half of this proxy!
|
||||
|
||||
/**
|
||||
Historical note: this impl was initially about 1% this size by using
|
||||
using JSON.stringify/parse(), but using fit-to-purpose serialization
|
||||
saves considerable runtime.
|
||||
ACHTUNG: this code is 100% duplicated in the other half of this
|
||||
proxy! The documentation is maintained in the "synchronous half".
|
||||
*/
|
||||
|
||||
if(state.s11n) return state.s11n;
|
||||
const textDecoder = new TextDecoder(),
|
||||
textEncoder = new TextEncoder('utf-8'),
|
||||
viewU8 = new Uint8Array(state.sabIO, state.sabS11nOffset, state.sabS11nSize),
|
||||
viewDV = new DataView(state.sabIO, state.sabS11nOffset, state.sabS11nSize);
|
||||
state.s11n = Object.create(null);
|
||||
|
||||
const TypeIds = Object.create(null);
|
||||
TypeIds.number = { id: 1, size: 8, getter: 'getFloat64', setter: 'setFloat64' };
|
||||
TypeIds.bigint = { id: 2, size: 8, getter: 'getBigInt64', setter: 'setBigInt64' };
|
||||
TypeIds.boolean = { id: 3, size: 4, getter: 'getInt32', setter: 'setInt32' };
|
||||
TypeIds.string = { id: 4 };
|
||||
|
||||
const getTypeId = (v)=>{
|
||||
return TypeIds[typeof v] || toss("This value type cannot be serialized.",v);
|
||||
};
|
||||
const getTypeId = (v)=>(
|
||||
TypeIds[typeof v]
|
||||
|| toss("Maintenance required: this value type cannot be serialized.",v)
|
||||
);
|
||||
const getTypeIdById = (tid)=>{
|
||||
switch(tid){
|
||||
case TypeIds.number.id: return TypeIds.number;
|
||||
@ -446,20 +453,14 @@ const initS11n = ()=>{
|
||||
default: toss("Invalid type ID:",tid);
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
Returns an array of the state serialized by the most recent
|
||||
serialize() operation (here or in the counterpart thread), or
|
||||
null if the serialization buffer is empty.
|
||||
*/
|
||||
state.s11n.deserialize = function(){
|
||||
++metrics.s11n.deserialize.count;
|
||||
const t = performance.now();
|
||||
let rc = null;
|
||||
const argc = viewU8[0];
|
||||
const rc = argc ? [] : null;
|
||||
if(argc){
|
||||
rc = [];
|
||||
let offset = 1, i, n, v, typeIds = [];
|
||||
const typeIds = [];
|
||||
let offset = 1, i, n, v;
|
||||
for(i = 0; i < argc; ++i, ++offset){
|
||||
typeIds.push(getTypeIdById(viewU8[offset]));
|
||||
}
|
||||
@ -468,7 +469,7 @@ const initS11n = ()=>{
|
||||
if(t.getter){
|
||||
v = viewDV[t.getter](offset, state.littleEndian);
|
||||
offset += t.size;
|
||||
}else{
|
||||
}else{/*String*/
|
||||
n = viewDV.getInt32(offset, state.littleEndian);
|
||||
offset += 4;
|
||||
v = textDecoder.decode(viewU8.slice(offset, offset+n));
|
||||
@ -481,36 +482,28 @@ const initS11n = ()=>{
|
||||
metrics.s11n.deserialize.time += performance.now() - t;
|
||||
return rc;
|
||||
};
|
||||
|
||||
/**
|
||||
Serializes all arguments to the shared buffer for consumption
|
||||
by the counterpart thread.
|
||||
|
||||
This routine is only intended for serializing OPFS VFS
|
||||
arguments and (in at least one special case) result values,
|
||||
and the buffer is sized to be able to comfortably handle
|
||||
those.
|
||||
|
||||
If passed no arguments then it zeroes out the serialization
|
||||
state.
|
||||
*/
|
||||
state.s11n.serialize = function(...args){
|
||||
++metrics.s11n.serialize.count;
|
||||
const t = performance.now();
|
||||
++metrics.s11n.serialize.count;
|
||||
if(args.length){
|
||||
//log("serialize():",args);
|
||||
let i = 0, offset = 1, typeIds = [];
|
||||
viewU8[0] = args.length & 0xff;
|
||||
const typeIds = [];
|
||||
let i = 0, offset = 1;
|
||||
viewU8[0] = args.length & 0xff /* header = # of args */;
|
||||
for(; i < args.length; ++i, ++offset){
|
||||
/* Write the TypeIds.id value into the next args.length
|
||||
bytes. */
|
||||
typeIds.push(getTypeId(args[i]));
|
||||
viewU8[offset] = typeIds[i].id;
|
||||
}
|
||||
for(i = 0; i < args.length; ++i) {
|
||||
/* Deserialize the following bytes based on their
|
||||
corresponding TypeIds.id from the header. */
|
||||
const t = typeIds[i];
|
||||
if(t.setter){
|
||||
viewDV[t.setter](offset, args[i], state.littleEndian);
|
||||
offset += t.size;
|
||||
}else{
|
||||
}else{/*String*/
|
||||
const s = textEncoder.encode(args[i]);
|
||||
viewDV.setInt32(offset, s.byteLength, state.littleEndian);
|
||||
offset += 4;
|
||||
@ -548,6 +541,9 @@ const waitLoop = async function f(){
|
||||
Atomics.store(state.sabOPView, state.opIds.whichOp, 0);
|
||||
const hnd = opHandlers[opId] ?? toss("No waitLoop handler for whichOp #",opId);
|
||||
const args = state.s11n.deserialize();
|
||||
state.s11n.serialize()/* clear s11n to keep the caller from
|
||||
confusing this with an exception string
|
||||
written by the upcoming operation */;
|
||||
//warn("waitLoop() whichOp =",opId, hnd, args);
|
||||
if(hnd.f) await hnd.f(...args);
|
||||
else error("Missing callback for opId",opId);
|
||||
|
16
manifest
16
manifest
@ -1,5 +1,5 @@
|
||||
C Added\ssome\sstructure\sto\sthe\swasm\sdemo\slink\slist.
|
||||
D 2022-09-21T12:25:40.497
|
||||
C Doc\scleanups\sand\sadditions.\sAdd\sa\sway\sfor\sthe\sOPFS\sasync\srunner\sto\spropagate\sexception\stext\sto\sthe\scalling\sthread.
|
||||
D 2022-09-21T12:27:35.940
|
||||
F .fossil-settings/empty-dirs dbb81e8fc0401ac46a1491ab34a7f2c7c0452f2f06b54ebb845d024ca8283ef1
|
||||
F .fossil-settings/ignore-glob 35175cdfcf539b2318cb04a9901442804be81cd677d8b889fcc9149c21f239ea
|
||||
F LICENSE.md df5091916dbb40e6e9686186587125e1b2ff51f022cc334e886c19a0e9982724
|
||||
@ -484,11 +484,11 @@ F ext/wasm/api/post-js-header.js 2e5c886398013ba2af88028ecbced1e4b22dc96a86467f1
|
||||
F ext/wasm/api/sqlite3-api-cleanup.js 8564a6077cdcaea9a9f428a019af8a05887f0131e6a2a1e72a7ff1145fadfe77
|
||||
F ext/wasm/api/sqlite3-api-glue.js 366d580c8e5bf7fcf4c6dee6f646c31f5549bd417ea03a59a0acca00e8ecce30
|
||||
F ext/wasm/api/sqlite3-api-oo1.js f974e79d9af8f26bf33928c5730b0988cc706d14f59a5fe36394739b92249841
|
||||
F ext/wasm/api/sqlite3-api-opfs.js 10be4156d7db4d6aa8a456b4fb0f31a6e35c61297766e8bb55573fc5c0d56530
|
||||
F ext/wasm/api/sqlite3-api-opfs.js dbbce38b0cd89d1eaf829546e2999241127150a40ff2e0331d842a1f31c756e5
|
||||
F ext/wasm/api/sqlite3-api-prologue.js 6f3a67c4db37e884d33a05e5cf6d9d9bc012226a18c09f33f662fefd99840a63
|
||||
F ext/wasm/api/sqlite3-api-worker1.js 2eeb2a24e1a90322d84a9b88a99919b806623de62792436446099c0988f2030b
|
||||
F ext/wasm/api/sqlite3-wasi.h 25356084cfe0d40458a902afb465df8c21fc4152c1d0a59b563a3fba59a068f9
|
||||
F ext/wasm/api/sqlite3-wasm.c 4130e2df9587f4e4c3afc04c3549d682c8a5c0cfe5b22819a0a86edb7f01b9bd
|
||||
F ext/wasm/api/sqlite3-wasm.c 9401a3f9bd191a410b4f679b7957c6b7e168a68106f52ddeafa1c776d0364e49
|
||||
F ext/wasm/batch-runner.html 2857a6db7292ac83d1581af865d643fd34235db2df830d10b43b01388c599e04
|
||||
F ext/wasm/batch-runner.js 6f5b86e0b5519a9a941d9f17ee9c5ecdc63f452f157602fe7fdf87f6275a2b49
|
||||
F ext/wasm/common/SqliteTestUtil.js 529161a624265ba84271a52db58da022649832fa1c71309fb1e02cc037327a2b
|
||||
@ -519,7 +519,7 @@ F ext/wasm/speedtest1.html 8ae6ece128151d01f90579de69cfa06f021acdb760735250ef745
|
||||
F ext/wasm/split-speedtest1-script.sh a3e271938d4d14ee49105eb05567c6a69ba4c1f1293583ad5af0cd3a3779e205 x
|
||||
F ext/wasm/sql/000-mandelbrot.sql 775337a4b80938ac8146aedf88808282f04d02d983d82675bd63d9c2d97a15f0
|
||||
F ext/wasm/sql/001-sudoku.sql 35b7cb7239ba5d5f193bc05ec379bcf66891bce6f2a5b3879f2f78d0917299b5
|
||||
F ext/wasm/sqlite3-opfs-async-proxy.js 9305d92f32d02983c4528b9c801096cfd8295ca7d24e357d90de9bbcb201d035
|
||||
F ext/wasm/sqlite3-opfs-async-proxy.js 0523e3093df2ad2c58691aa65c5e32c0aafb1bbabb6119dd9406d34a8e16dd68
|
||||
F ext/wasm/sqlite3-worker1-promiser.js 4fd0465688a28a75f1d4ee4406540ba494f49844e3cad0670d0437a001943365
|
||||
F ext/wasm/sqlite3-worker1.js 0c1e7626304543969c3846573e080c082bf43bcaa47e87d416458af84f340a9e
|
||||
F ext/wasm/test-opfs-vfs.html eb69dda21eb414b8f5e3f7c1cc0f774103cc9c0f87b2d28a33419e778abfbab5
|
||||
@ -2026,8 +2026,8 @@ F vsixtest/vsixtest.tcl 6a9a6ab600c25a91a7acc6293828957a386a8a93
|
||||
F vsixtest/vsixtest.vcxproj.data 2ed517e100c66dc455b492e1a33350c1b20fbcdc
|
||||
F vsixtest/vsixtest.vcxproj.filters 37e51ffedcdb064aad6ff33b6148725226cd608e
|
||||
F vsixtest/vsixtest_TemporaryKey.pfx e5b1b036facdb453873e7084e1cae9102ccc67a0
|
||||
P 96c734c07acfbea153d9aaf293a28a2d49d78de19fec4fb90c4c08fb1fd0ddbd
|
||||
R 861bbf4cfa9812be3b9491caf1943540
|
||||
P 777077c4c2249e1ec78390d4f65aaf281c1fbefcef4bcc7609199e995645ceb6
|
||||
R 52ae18958684abb42f4ad80857c37c9c
|
||||
U stephan
|
||||
Z 172073e3d6aad2306bf7614651c7edbb
|
||||
Z 4853fab0e74bf411129cdad2e17be435
|
||||
# Remove this line to create a well-formed Fossil manifest.
|
||||
|
@ -1 +1 @@
|
||||
777077c4c2249e1ec78390d4f65aaf281c1fbefcef4bcc7609199e995645ceb6
|
||||
5c5e80652825cf883e6c17809cb98f2bf17d5feac2d263f6f492479154730dab
|
Reference in New Issue
Block a user