mirror of
https://github.com/sqlite/sqlite.git
synced 2025-07-30 19:03:16 +03:00
Add the remaining vfs/io_methods wrappers to the OPFS sync/async proxy, but most are not yet tested.
FossilOrigin-Name: 44db9132145b3072488ea91db53f6c06be74544beccad5fd07efd22c0f03dc04
This commit is contained in:
@ -20,267 +20,308 @@
|
||||
https://github.com/rhashimoto/wa-sqlite/blob/master/src/examples/OriginPrivateFileSystemVFS.js
|
||||
|
||||
for demonstrating how to use the OPFS APIs.
|
||||
|
||||
This file is to be loaded as a Worker. It does not have any direct
|
||||
access to the sqlite3 JS/WASM bits, so any bits which it needs (most
|
||||
notably SQLITE_xxx integer codes) have to be imported into it via an
|
||||
initialization process.
|
||||
*/
|
||||
'use strict';
|
||||
(function(){
|
||||
const toss = function(...args){throw new Error(args.join(' '))};
|
||||
if(self.window === self){
|
||||
toss("This code cannot run from the main thread.",
|
||||
"Load it as a Worker from a separate Worker.");
|
||||
}else if(!navigator.storage.getDirectory){
|
||||
toss("This API requires navigator.storage.getDirectory.");
|
||||
const toss = function(...args){throw new Error(args.join(' '))};
|
||||
if(self.window === self){
|
||||
toss("This code cannot run from the main thread.",
|
||||
"Load it as a Worker from a separate Worker.");
|
||||
}else if(!navigator.storage.getDirectory){
|
||||
toss("This API requires navigator.storage.getDirectory.");
|
||||
}
|
||||
/**
|
||||
Will hold state copied to this object from the syncronous side of
|
||||
this API.
|
||||
*/
|
||||
const state = Object.create(null);
|
||||
/**
|
||||
verbose:
|
||||
|
||||
0 = no logging output
|
||||
1 = only errors
|
||||
2 = warnings and errors
|
||||
3 = debug, warnings, and errors
|
||||
*/
|
||||
state.verbose = 2;
|
||||
|
||||
const __logPrefix = "OPFS asyncer:";
|
||||
const log = (...args)=>{
|
||||
if(state.verbose>2) console.log(__logPrefix,...args);
|
||||
};
|
||||
const warn = (...args)=>{
|
||||
if(state.verbose>1) console.warn(__logPrefix,...args);
|
||||
};
|
||||
const error = (...args)=>{
|
||||
if(state.verbose) console.error(__logPrefix,...args);
|
||||
};
|
||||
|
||||
warn("This file is very much experimental and under construction.",self.location.pathname);
|
||||
|
||||
/**
|
||||
Map of sqlite3_file pointers (integers) to metadata related to a
|
||||
given OPFS file handles. The pointers are, in this side of the
|
||||
interface, opaque file handle IDs provided by the synchronous
|
||||
part of this constellation. Each value is an object with a structure
|
||||
demonstrated in the xOpen() impl.
|
||||
*/
|
||||
const __openFiles = Object.create(null);
|
||||
|
||||
/**
|
||||
Map of dir names to FileSystemDirectoryHandle objects.
|
||||
*/
|
||||
const __dirCache = new Map;
|
||||
|
||||
/**
|
||||
Takes the absolute path to a filesystem element. Returns an array
|
||||
of [handleOfContainingDir, filename]. If the 2nd argument is
|
||||
truthy then each directory element leading to the file is created
|
||||
along the way. Throws if any creation or resolution fails.
|
||||
*/
|
||||
const getDirForPath = async function f(absFilename, createDirs = false){
|
||||
const url = new URL(
|
||||
absFilename, 'file://xyz'
|
||||
) /* use URL to resolve path pieces such as a/../b */;
|
||||
const path = url.pathname.split('/').filter((v)=>!!v);
|
||||
const filename = path.pop();
|
||||
const allDirs = '/'+path.join('/');
|
||||
let dh = __dirCache.get(allDirs);
|
||||
if(!dh){
|
||||
dh = state.rootDir;
|
||||
for(const dirName of path){
|
||||
if(dirName){
|
||||
dh = await dh.getDirectoryHandle(dirName, {create: !!createDirs});
|
||||
}
|
||||
}
|
||||
__dirCache.set(allDirs, dh);
|
||||
}
|
||||
const logPrefix = "OPFS worker:";
|
||||
const log = (...args)=>{
|
||||
console.log(logPrefix,...args);
|
||||
};
|
||||
const warn = (...args)=>{
|
||||
console.warn(logPrefix,...args);
|
||||
};
|
||||
const error = (...args)=>{
|
||||
console.error(logPrefix,...args);
|
||||
};
|
||||
return [dh, filename];
|
||||
};
|
||||
|
||||
warn("This file is very much experimental and under construction.",self.location.pathname);
|
||||
const wMsg = (type,payload)=>postMessage({type,payload});
|
||||
|
||||
const state = Object.create(null);
|
||||
/*state.opSab;
|
||||
state.sabIO;
|
||||
state.opBuf;
|
||||
state.opIds;
|
||||
state.rootDir;*/
|
||||
/**
|
||||
Map of sqlite3_file pointers (integers) to metadata related to a
|
||||
given OPFS file handles. The pointers are, in this side of the
|
||||
interface, opaque file handle IDs provided by the synchronous
|
||||
part of this constellation. Each value is an object with a structure
|
||||
demonstrated in the xOpen() impl.
|
||||
*/
|
||||
state.openFiles = Object.create(null);
|
||||
/**
|
||||
Stores the given value at the array index reserved for the given op
|
||||
and then Atomics.notify()'s it.
|
||||
*/
|
||||
const storeAndNotify = (opName, value)=>{
|
||||
log(opName+"() is notify()ing w/ value:",value);
|
||||
Atomics.store(state.opBuf, state.opIds[opName], value);
|
||||
Atomics.notify(state.opBuf, state.opIds[opName]);
|
||||
};
|
||||
|
||||
/**
|
||||
Map of dir names to FileSystemDirectoryHandle objects.
|
||||
*/
|
||||
state.dirCache = new Map;
|
||||
const isInt32 = function(n){
|
||||
return ('bigint'!==typeof n /*TypeError: can't convert BigInt to number*/)
|
||||
&& !!(n===(n|0) && n<=2147483647 && n>=-2147483648);
|
||||
};
|
||||
const affirm32Bits = function(n){
|
||||
return isInt32(n) || toss("Number is too large (>31 bits) (FIXME!):",n);
|
||||
};
|
||||
|
||||
const __splitPath = (absFilename)=>{
|
||||
const a = absFilename.split('/').filter((v)=>!!v);
|
||||
return [a, a.pop()];
|
||||
};
|
||||
/**
|
||||
Takes the absolute path to a filesystem element. Returns an array
|
||||
of [handleOfContainingDir, filename]. If the 2nd argument is
|
||||
truthy then each directory element leading to the file is created
|
||||
along the way. Throws if any creation or resolution fails.
|
||||
*/
|
||||
const getDirForPath = async function f(absFilename, createDirs = false){
|
||||
const url = new URL(
|
||||
absFilename, 'file://xyz'
|
||||
) /* use URL to resolve path pieces such as a/../b */;
|
||||
const [path, filename] = __splitPath(url.pathname);
|
||||
const allDirs = path.join('/');
|
||||
let dh = state.dirCache.get(allDirs);
|
||||
if(!dh){
|
||||
dh = state.rootDir;
|
||||
for(const dirName of path){
|
||||
if(dirName){
|
||||
dh = await dh.getDirectoryHandle(dirName, {create: !!createDirs});
|
||||
}
|
||||
/**
|
||||
Throws if fh is a file-holding object which is flagged as read-only.
|
||||
*/
|
||||
const affirmNotRO = function(opName,fh){
|
||||
if(fh.readOnly) toss(opName+"(): File is read-only: "+fh.filenameAbs);
|
||||
};
|
||||
|
||||
/**
|
||||
Asynchronous wrappers for sqlite3_vfs and sqlite3_io_methods
|
||||
methods. Maintenance reminder: members are in alphabetical order
|
||||
to simplify finding them.
|
||||
*/
|
||||
const vfsAsyncImpls = {
|
||||
xAccess: async function({filename, exists, readWrite}){
|
||||
warn("xAccess(",arguments[0],") is TODO");
|
||||
const rc = state.sq3Codes.SQLITE_IOERR;
|
||||
storeAndNotify('xAccess', rc);
|
||||
},
|
||||
xClose: async function(fid){
|
||||
const opName = 'xClose';
|
||||
log(opName+"(",arguments[0],")");
|
||||
const fh = __openFiles[fid];
|
||||
if(fh){
|
||||
delete __openFiles[fid];
|
||||
if(fh.accessHandle) await fh.accessHandle.close();
|
||||
if(fh.deleteOnClose){
|
||||
try{ await fh.dirHandle.removeEntry(fh.filenamePart) }
|
||||
catch(e){ warn("Ignoring dirHandle.removeEntry() failure of",fh,e) }
|
||||
}
|
||||
state.dirCache.set(allDirs, dh);
|
||||
storeAndNotify(opName, 0);
|
||||
}else{
|
||||
storeAndNotify(opName, state.sq3Codes.SQLITE_NOFOUND);
|
||||
}
|
||||
return [dh, filename];
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
Generates a random ASCII string len characters long, intended for
|
||||
use as a temporary file name.
|
||||
*/
|
||||
const randomFilename = function f(len=16){
|
||||
if(!f._chars){
|
||||
f._chars = "abcdefghijklmnopqrstuvwxyz"+
|
||||
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"+
|
||||
"012346789";
|
||||
f._n = f._chars.length;
|
||||
},
|
||||
xDelete: async function({filename, syncDir/*ignored*/}){
|
||||
log("xDelete(",arguments[0],")");
|
||||
try {
|
||||
const [hDir, filenamePart] = await getDirForPath(filename, false);
|
||||
await hDir.removeEntry(filenamePart);
|
||||
}catch(e){
|
||||
/* Ignoring: _presumably_ the file can't be found. */
|
||||
}
|
||||
const a = [];
|
||||
let i = 0;
|
||||
for( ; i < len; ++i){
|
||||
const ndx = Math.random() * (f._n * 64) % f._n | 0;
|
||||
a[i] = f._chars[ndx];
|
||||
storeAndNotify('xDelete', 0);
|
||||
},
|
||||
xFileSize: async function(fid){
|
||||
log("xFileSize(",arguments,")");
|
||||
const fh = __openFiles[fid];
|
||||
let sz;
|
||||
try{
|
||||
sz = await fh.accessHandle.getSize();
|
||||
fh.sabViewFileSize.setBigInt64(0, BigInt(sz));
|
||||
sz = 0;
|
||||
}catch(e){
|
||||
error("xFileSize():",e, fh);
|
||||
sz = state.sq3Codes.SQLITE_IOERR;
|
||||
}
|
||||
return a.join('');
|
||||
};
|
||||
|
||||
const storeAndNotify = (opName, value)=>{
|
||||
log(opName+"() is notify()ing w/ value:",value);
|
||||
Atomics.store(state.opBuf, state.opIds[opName], value);
|
||||
Atomics.notify(state.opBuf, state.opIds[opName]);
|
||||
};
|
||||
|
||||
const isInt32 = function(n){
|
||||
return ('bigint'!==typeof n /*TypeError: can't convert BigInt to number*/)
|
||||
&& !!(n===(n|0) && n<=2147483647 && n>=-2147483648);
|
||||
};
|
||||
const affirm32Bits = function(n){
|
||||
return isInt32(n) || toss("Number is too large (>31 bits):",n);
|
||||
};
|
||||
|
||||
const ioMethods = {
|
||||
xAccess: async function({filename, exists, readWrite}){
|
||||
log("xAccess(",arguments,")");
|
||||
const rc = 1;
|
||||
storeAndNotify('xAccess', rc);
|
||||
},
|
||||
xClose: async function(fid){
|
||||
const opName = 'xClose';
|
||||
storeAndNotify('xFileSize', sz);
|
||||
},
|
||||
xOpen: async function({
|
||||
fid/*sqlite3_file pointer*/,
|
||||
sab/*file-specific SharedArrayBuffer*/,
|
||||
filename,
|
||||
fileType = undefined /*mainDb, mainJournal, etc.*/,
|
||||
create = false, readOnly = false, deleteOnClose = false
|
||||
}){
|
||||
const opName = 'xOpen';
|
||||
try{
|
||||
if(create) readOnly = false;
|
||||
log(opName+"(",arguments[0],")");
|
||||
log("state.openFiles",state.openFiles);
|
||||
const fh = state.openFiles[fid];
|
||||
if(fh){
|
||||
delete state.openFiles[fid];
|
||||
//await fh.close();
|
||||
if(fh.accessHandle) await fh.accessHandle.close();
|
||||
if(fh.deleteOnClose){
|
||||
try{
|
||||
await fh.dirHandle.removeEntry(fh.filenamePart);
|
||||
}
|
||||
catch(e){
|
||||
warn("Ignoring dirHandle.removeEntry() failure of",fh);
|
||||
}
|
||||
}
|
||||
log("state.openFiles",state.openFiles);
|
||||
storeAndNotify(opName, 0);
|
||||
}else{
|
||||
storeAndNotify(opName, state.errCodes.NotFound);
|
||||
}
|
||||
},
|
||||
xDelete: async function(filename){
|
||||
log("xDelete(",arguments,")");
|
||||
storeAndNotify('xClose', 0);
|
||||
},
|
||||
xFileSize: async function(fid){
|
||||
log("xFileSize(",arguments,")");
|
||||
const fh = state.openFiles[fid];
|
||||
const sz = await fh.getSize();
|
||||
affirm32Bits(sz);
|
||||
storeAndNotify('xFileSize', sz | 0);
|
||||
},
|
||||
xOpen: async function({
|
||||
fid/*sqlite3_file pointer*/, sab/*file-specific SharedArrayBuffer*/,
|
||||
filename,
|
||||
fileType = undefined /*mainDb, mainJournal, etc.*/,
|
||||
create = false, readOnly = false, deleteOnClose = false,
|
||||
}){
|
||||
const opName = 'xOpen';
|
||||
try{
|
||||
if(create) readOnly = false;
|
||||
log(opName+"(",arguments[0],")");
|
||||
|
||||
let hDir, filenamePart, hFile;
|
||||
try {
|
||||
[hDir, filenamePart] = await getDirForPath(filename, !!create);
|
||||
}catch(e){
|
||||
storeAndNotify(opName, state.errCodes.NotFound);
|
||||
return;
|
||||
}
|
||||
hFile = await hDir.getFileHandle(filenamePart, {create: !!create});
|
||||
log(opName,"filenamePart =",filenamePart, 'hDir =',hDir);
|
||||
const fobj = state.openFiles[fid] = Object.create(null);
|
||||
fobj.filenameAbs = filename;
|
||||
fobj.filenamePart = filenamePart;
|
||||
fobj.dirHandle = hDir;
|
||||
fobj.fileHandle = hFile;
|
||||
fobj.accessHandle = undefined;
|
||||
fobj.fileType = fileType;
|
||||
fobj.sab = sab;
|
||||
fobj.create = !!create;
|
||||
fobj.readOnly = !!readOnly;
|
||||
fobj.deleteOnClose = !!deleteOnClose;
|
||||
|
||||
/**
|
||||
wa-sqlite, at this point, grabs a SyncAccessHandle and
|
||||
assigns it to the accessHandle prop of the file state
|
||||
object, but it's unclear why it does that.
|
||||
*/
|
||||
storeAndNotify(opName, 0);
|
||||
let hDir, filenamePart;
|
||||
try {
|
||||
[hDir, filenamePart] = await getDirForPath(filename, !!create);
|
||||
}catch(e){
|
||||
error(opName,e);
|
||||
storeAndNotify(opName, state.errCodes.IO);
|
||||
storeAndNotify(opName, state.sql3Codes.SQLITE_NOTFOUND);
|
||||
return;
|
||||
}
|
||||
},
|
||||
xRead: async function({fid,n,offset}){
|
||||
log("xRead(",arguments,")");
|
||||
affirm32Bits(n + offset);
|
||||
const fh = state.openFiles[fid];
|
||||
storeAndNotify('xRead',fid);
|
||||
},
|
||||
xSleep: async function f({ms}){
|
||||
log("xSleep(",arguments[0],")");
|
||||
await new Promise((resolve)=>{
|
||||
setTimeout(()=>resolve(), ms);
|
||||
}).finally(()=>storeAndNotify('xSleep',0));
|
||||
},
|
||||
xSync: async function({fid}){
|
||||
log("xSync(",arguments,")");
|
||||
const fh = state.openFiles[fid];
|
||||
await fh.flush();
|
||||
storeAndNotify('xSync',fid);
|
||||
},
|
||||
xTruncate: async function({fid,size}){
|
||||
log("xTruncate(",arguments,")");
|
||||
affirm32Bits(size);
|
||||
const fh = state.openFiles[fid];
|
||||
fh.truncate(size);
|
||||
storeAndNotify('xTruncate',fid);
|
||||
},
|
||||
xWrite: async function({fid,src,n,offset}){
|
||||
log("xWrite(",arguments,")");
|
||||
const fh = state.openFiles[fid];
|
||||
storeAndNotify('xWrite',fid);
|
||||
const hFile = await hDir.getFileHandle(filenamePart, {create: !!create});
|
||||
log(opName,"filenamePart =",filenamePart, 'hDir =',hDir);
|
||||
const fobj = __openFiles[fid] = Object.create(null);
|
||||
fobj.filenameAbs = filename;
|
||||
fobj.filenamePart = filenamePart;
|
||||
fobj.dirHandle = hDir;
|
||||
fobj.fileHandle = hFile;
|
||||
fobj.fileType = fileType;
|
||||
fobj.sab = sab;
|
||||
fobj.sabViewFileSize = new DataView(sab,state.fbInt64Offset,8);
|
||||
fobj.create = !!create;
|
||||
fobj.readOnly = !!readOnly;
|
||||
fobj.deleteOnClose = !!deleteOnClose;
|
||||
/**
|
||||
wa-sqlite, at this point, grabs a SyncAccessHandle and
|
||||
assigns it to the accessHandle prop of the file state
|
||||
object, but only for certain cases and it's unclear why it
|
||||
places that limitation on it.
|
||||
*/
|
||||
fobj.accessHandle = await hFile.createSyncAccessHandle();
|
||||
storeAndNotify(opName, 0);
|
||||
}catch(e){
|
||||
error(opName,e);
|
||||
storeAndNotify(opName, state.sq3Codes.SQLITE_IOERR);
|
||||
}
|
||||
},
|
||||
xRead: async function({fid,n,offset}){
|
||||
log("xRead(",arguments[0],")");
|
||||
let rc = 0;
|
||||
const fh = __openFiles[fid];
|
||||
try{
|
||||
const aRead = new Uint8array(fh.sab, n);
|
||||
const nRead = fh.accessHandle.read(aRead, {at: offset});
|
||||
if(nRead < n){/* Zero-fill remaining bytes */
|
||||
new Uint8array(fh.sab).fill(0, nRead, n);
|
||||
rc = state.sq3Codes.SQLITE_IOERR_SHORT_READ;
|
||||
}
|
||||
}catch(e){
|
||||
error("xRead() failed",e,fh);
|
||||
rc = state.sq3Codes.SQLITE_IOERR_READ;
|
||||
}
|
||||
storeAndNotify('xRead',rc);
|
||||
},
|
||||
xSleep: async function f(ms){
|
||||
log("xSleep(",ms,")");
|
||||
await new Promise((resolve)=>{
|
||||
setTimeout(()=>resolve(), ms);
|
||||
}).finally(()=>storeAndNotify('xSleep',0));
|
||||
},
|
||||
xSync: async function({fid,flags/*ignored*/}){
|
||||
log("xSync(",arguments[0],")");
|
||||
const fh = __openFiles[fid];
|
||||
if(!fh.readOnly && fh.accessHandle) await fh.accessHandle.flush();
|
||||
storeAndNotify('xSync',0);
|
||||
},
|
||||
xTruncate: async function({fid,size}){
|
||||
log("xTruncate(",arguments[0],")");
|
||||
let rc = 0;
|
||||
const fh = __openFiles[fid];
|
||||
try{
|
||||
affirmNotRO('xTruncate', fh);
|
||||
await fh.accessHandle.truncate(size);
|
||||
}catch(e){
|
||||
error("xTruncate():",e,fh);
|
||||
rc = state.sq3Codes.SQLITE_IOERR_TRUNCATE;
|
||||
}
|
||||
storeAndNotify('xTruncate',rc);
|
||||
},
|
||||
xWrite: async function({fid,src,n,offset}){
|
||||
log("xWrite(",arguments[0],")");
|
||||
let rc;
|
||||
try{
|
||||
const fh = __openFiles[fid];
|
||||
affirmNotRO('xWrite', fh);
|
||||
const nOut = fh.accessHandle.write(new UInt8Array(fh.sab, 0, n), {at: offset});
|
||||
rc = (nOut===n) ? 0 : state.sq3Codes.SQLITE_IOERR_WRITE;
|
||||
}catch(e){
|
||||
error("xWrite():",e,fh);
|
||||
rc = state.sq3Codes.SQLITE_IOERR_WRITE;
|
||||
}
|
||||
storeAndNotify('xWrite',rc);
|
||||
}
|
||||
};
|
||||
|
||||
navigator.storage.getDirectory().then(function(d){
|
||||
const wMsg = (type)=>postMessage({type});
|
||||
state.rootDir = d;
|
||||
log("state.rootDir =",state.rootDir);
|
||||
self.onmessage = async function({data}){
|
||||
log("self.onmessage()",data);
|
||||
switch(data.type){
|
||||
case 'init':{
|
||||
/* Receive shared state from synchronous partner */
|
||||
const opt = data.payload;
|
||||
state.verbose = opt.verbose ?? 2;
|
||||
state.fileBufferSize = opt.fileBufferSize;
|
||||
state.fbInt64Offset = opt.fbInt64Offset;
|
||||
state.opSab = opt.opSab;
|
||||
state.opBuf = new Int32Array(state.opSab);
|
||||
state.opIds = opt.opIds;
|
||||
state.sq3Codes = opt.sq3Codes;
|
||||
Object.keys(vfsAsyncImpls).forEach((k)=>{
|
||||
if(!Number.isFinite(state.opIds[k])){
|
||||
toss("Maintenance required: missing state.opIds[",k,"]");
|
||||
}
|
||||
});
|
||||
log("init state",state);
|
||||
wMsg('inited');
|
||||
break;
|
||||
}
|
||||
default:{
|
||||
let err;
|
||||
const m = vfsAsyncImpls[data.type] || toss("Unknown message type:",data.type);
|
||||
try {
|
||||
await m(data.payload).catch((e)=>err=e);
|
||||
}catch(e){
|
||||
err = e;
|
||||
}
|
||||
if(err){
|
||||
error("Error handling",data.type+"():",e);
|
||||
storeAndNotify(data.type, state.sq3Codes.SQLITE_ERROR);
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const onReady = function(){
|
||||
self.onmessage = async function({data}){
|
||||
log("self.onmessage",data);
|
||||
switch(data.type){
|
||||
case 'init':{
|
||||
const opt = data.payload;
|
||||
state.opSab = opt.opSab;
|
||||
state.opBuf = new Int32Array(state.opSab);
|
||||
state.opIds = opt.opIds;
|
||||
state.errCodes = opt.errCodes;
|
||||
state.sq3Codes = opt.sq3Codes;
|
||||
Object.keys(ioMethods).forEach((k)=>{
|
||||
if(!state.opIds[k]){
|
||||
toss("Maintenance required: missing state.opIds[",k,"]");
|
||||
}
|
||||
});
|
||||
log("init state",state);
|
||||
break;
|
||||
}
|
||||
default:{
|
||||
const m = ioMethods[data.type] || toss("Unknown message type:",data.type);
|
||||
try {
|
||||
await m(data.payload);
|
||||
}catch(e){
|
||||
error("Error handling",data.type+"():",e);
|
||||
storeAndNotify(data.type, -99);
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
};
|
||||
wMsg('ready');
|
||||
};
|
||||
|
||||
navigator.storage.getDirectory().then(function(d){
|
||||
state.rootDir = d;
|
||||
log("state.rootDir =",state.rootDir);
|
||||
onReady();
|
||||
});
|
||||
|
||||
})();
|
||||
wMsg('loaded');
|
||||
});
|
||||
|
@ -13,16 +13,10 @@
|
||||
<div>This is an experiment in wrapping the
|
||||
asynchronous OPFS APIs behind a fully synchronous proxy. It is
|
||||
very much incomplete, under construction, and experimental.
|
||||
See the dev console for all output.
|
||||
<strong>See the dev console for all output.</strong>
|
||||
</div>
|
||||
<div id='test-output'>
|
||||
</div>
|
||||
<!--script src="common/whwasmutil.js"></script-->
|
||||
<!--script src="common/SqliteTestUtil.js"></script-->
|
||||
<script>
|
||||
(function(){
|
||||
new Worker("x-sync-async.js");
|
||||
})();
|
||||
</script>
|
||||
<script>new Worker("x-sync-async.js");</script>
|
||||
</body>
|
||||
</html>
|
||||
|
@ -1,5 +1,29 @@
|
||||
/*
|
||||
2022-09-17
|
||||
|
||||
The author disclaims copyright to this source code. In place of a
|
||||
legal notice, here is a blessing:
|
||||
|
||||
* May you do good and not evil.
|
||||
* May you find forgiveness for yourself and forgive others.
|
||||
* May you share freely, never taking more than you give.
|
||||
|
||||
***********************************************************************
|
||||
|
||||
A EXTREMELY INCOMPLETE and UNDER CONSTRUCTION experiment for OPFS.
|
||||
This file holds the synchronous half of an sqlite3_vfs
|
||||
implementation which proxies, in a synchronous fashion, the
|
||||
asynchronous OPFS APIs using a second Worker.
|
||||
*/
|
||||
'use strict';
|
||||
const doAtomicsStuff = function(sqlite3){
|
||||
/**
|
||||
This function is a placeholder for use in development. When
|
||||
working, this will be moved into a file named
|
||||
api/sqlite3-api-opfs.js, or similar, and hooked in to the
|
||||
sqlite-api build construct.
|
||||
*/
|
||||
const initOpfsVfs = function(sqlite3){
|
||||
const toss = function(...args){throw new Error(args.join(' '))};
|
||||
const logPrefix = "OPFS syncer:";
|
||||
const log = (...args)=>{
|
||||
console.log(logPrefix,...args);
|
||||
@ -10,46 +34,73 @@ const doAtomicsStuff = function(sqlite3){
|
||||
const error = (...args)=>{
|
||||
console.error(logPrefix,...args);
|
||||
};
|
||||
warn("This file is very much experimental and under construction.",self.location.pathname);
|
||||
|
||||
const capi = sqlite3.capi;
|
||||
const wasm = capi.wasm;
|
||||
const sqlite3_vfs = capi.sqlite3_vfs
|
||||
|| toss("Missing sqlite3.capi.sqlite3_vfs object.");
|
||||
const sqlite3_file = capi.sqlite3_file
|
||||
|| toss("Missing sqlite3.capi.sqlite3_file object.");
|
||||
const sqlite3_io_methods = capi.sqlite3_io_methods
|
||||
|| toss("Missing sqlite3.capi.sqlite3_io_methods object.");
|
||||
const StructBinder = sqlite3.StructBinder || toss("Missing sqlite3.StructBinder.");
|
||||
|
||||
const W = new Worker("sqlite3-opfs-async-proxy.js");
|
||||
const wMsg = (type,payload)=>W.postMessage({type,payload});
|
||||
warn("This file is very much experimental and under construction.",self.location.pathname);
|
||||
|
||||
/**
|
||||
State which we send to the async-api Worker or share with it.
|
||||
This object must initially contain only cloneable or sharable
|
||||
objects. After the worker's "ready" message arrives, other types
|
||||
objects. After the worker's "inited" message arrives, other types
|
||||
of data may be added to it.
|
||||
*/
|
||||
const state = Object.create(null);
|
||||
state.verbose = 3;
|
||||
state.fileBufferSize = 1024 * 64 + 8 /* size of fileHandle.sab. 64k = max sqlite3 page size */;
|
||||
state.fbInt64Offset = state.fileBufferSize - 8 /*spot in fileHandle.sab to store an int64*/;
|
||||
state.opIds = Object.create(null);
|
||||
state.opIds.xAccess = 1;
|
||||
state.opIds.xClose = 2;
|
||||
state.opIds.xDelete = 3;
|
||||
state.opIds.xFileSize = 4;
|
||||
state.opIds.xOpen = 5;
|
||||
state.opIds.xRead = 6;
|
||||
state.opIds.xSync = 7;
|
||||
state.opIds.xTruncate = 8;
|
||||
state.opIds.xWrite = 9;
|
||||
state.opIds.xSleep = 10;
|
||||
state.opIds.xBlock = 99 /* to block worker while this code is still handling something */;
|
||||
state.opSab = new SharedArrayBuffer(64);
|
||||
state.fileBufferSize = 1024 * 65 /* 64k = max sqlite3 page size */;
|
||||
/* TODO: use SQLITE_xxx err codes. */
|
||||
state.errCodes = Object.create(null);
|
||||
state.errCodes.Error = -100;
|
||||
state.errCodes.IO = -101;
|
||||
state.errCodes.NotFound = -102;
|
||||
state.errCodes.Misuse = -103;
|
||||
{
|
||||
let i = 0;
|
||||
state.opIds.xAccess = i++;
|
||||
state.opIds.xClose = i++;
|
||||
state.opIds.xDelete = i++;
|
||||
state.opIds.xFileSize = i++;
|
||||
state.opIds.xOpen = i++;
|
||||
state.opIds.xRead = i++;
|
||||
state.opIds.xSleep = i++;
|
||||
state.opIds.xSync = i++;
|
||||
state.opIds.xTruncate = i++;
|
||||
state.opIds.xWrite = i++;
|
||||
state.opSab = new SharedArrayBuffer(i * 4);
|
||||
}
|
||||
|
||||
// TODO: add any SQLITE_xxx symbols we need here.
|
||||
state.sq3Codes = Object.create(null);
|
||||
|
||||
const isWorkerErrCode = (n)=>(n<=state.errCodes.Error);
|
||||
state.sq3Codes._reverse = Object.create(null);
|
||||
[ // SQLITE_xxx constants to export to the async worker counterpart...
|
||||
'SQLITE_ERROR', 'SQLITE_IOERR',
|
||||
'SQLITE_NOTFOUND', 'SQLITE_MISUSE',
|
||||
'SQLITE_IOERR_READ', 'SQLITE_IOERR_SHORT_READ',
|
||||
'SQLITE_IOERR_WRITE', 'SQLITE_IOERR_FSYNC',
|
||||
'SQLITE_IOERR_TRUNCATE', 'SQLITE_IOERR_DELETE',
|
||||
'SQLITE_IOERR_ACCESS', 'SQLITE_IOERR_CLOSE'
|
||||
].forEach(function(k){
|
||||
state.sq3Codes[k] = capi[k] || toss("Maintenance required: not found:",k);
|
||||
state.sq3Codes._reverse[capi[k]] = k;
|
||||
});
|
||||
|
||||
const isWorkerErrCode = (n)=>!!state.sq3Codes._reverse[n];
|
||||
|
||||
const opStore = (op,val=-1)=>Atomics.store(state.opBuf, state.opIds[op], val);
|
||||
const opWait = (op,val=-1)=>Atomics.wait(state.opBuf, state.opIds[op], val);
|
||||
|
||||
/**
|
||||
Runs the given operation in the async worker counterpart, waits
|
||||
for its response, and returns the result which the async worker
|
||||
writes to the given op's index in state.opBuf. The 2nd argument
|
||||
must be a single object or primitive value, depending on the
|
||||
given operation's signature in the async API counterpart.
|
||||
*/
|
||||
const opRun = (op,args)=>{
|
||||
opStore(op);
|
||||
wMsg(op, args);
|
||||
@ -63,71 +114,321 @@ const doAtomicsStuff = function(sqlite3){
|
||||
});
|
||||
};
|
||||
|
||||
const vfsSyncWrappers = {
|
||||
xOpen: function f(pFile, name, flags, outFlags = {}){
|
||||
if(!f._){
|
||||
f._ = {
|
||||
// TODO: map openFlags to args.fileType names.
|
||||
};
|
||||
}
|
||||
const args = Object.create(null);
|
||||
args.fid = pFile;
|
||||
args.filename = name;
|
||||
args.sab = new SharedArrayBuffer(state.fileBufferSize);
|
||||
args.fileType = undefined /*TODO: populate based on SQLITE_OPEN_xxx */;
|
||||
// TODO: populate args object based on flags:
|
||||
// args.create, args.readOnly, args.deleteOnClose
|
||||
args.create = true;
|
||||
args.deleteOnClose = true;
|
||||
const rc = opRun('xOpen', args);
|
||||
if(!rc){
|
||||
outFlags.readOnly = args.readOnly;
|
||||
args.ba = new Uint8Array(args.sab);
|
||||
state.openFiles[pFile] = args;
|
||||
}
|
||||
return rc;
|
||||
/**
|
||||
Generates a random ASCII string len characters long, intended for
|
||||
use as a temporary file name.
|
||||
*/
|
||||
const randomFilename = function f(len=16){
|
||||
if(!f._chars){
|
||||
f._chars = "abcdefghijklmnopqrstuvwxyz"+
|
||||
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"+
|
||||
"012346789";
|
||||
f._n = f._chars.length;
|
||||
}
|
||||
const a = [];
|
||||
let i = 0;
|
||||
for( ; i < len; ++i){
|
||||
const ndx = Math.random() * (f._n * 64) % f._n | 0;
|
||||
a[i] = f._chars[ndx];
|
||||
}
|
||||
return a.join('');
|
||||
};
|
||||
|
||||
/**
|
||||
Map of sqlite3_file pointers to objects constructed by xOpen().
|
||||
*/
|
||||
const __openFiles = Object.create(null);
|
||||
|
||||
const pDVfs = capi.sqlite3_vfs_find(null)/*pointer to default VFS*/;
|
||||
const dVfs = pDVfs
|
||||
? new sqlite3_vfs(pDVfs)
|
||||
: null /* dVfs will be null when sqlite3 is built with
|
||||
SQLITE_OS_OTHER. Though we cannot currently handle
|
||||
that case, the hope is to eventually be able to. */;
|
||||
const opfsVfs = new sqlite3_vfs();
|
||||
const opfsIoMethods = new sqlite3_io_methods();
|
||||
opfsVfs.$iVersion = 2/*yes, two*/;
|
||||
opfsVfs.$szOsFile = capi.sqlite3_file.structInfo.sizeof;
|
||||
opfsVfs.$mxPathname = 1024/*sure, why not?*/;
|
||||
opfsVfs.$zName = wasm.allocCString("opfs");
|
||||
opfsVfs.ondispose = [
|
||||
'$zName', opfsVfs.$zName,
|
||||
'cleanup dVfs', ()=>(dVfs ? dVfs.dispose() : null)
|
||||
];
|
||||
if(dVfs){
|
||||
opfsVfs.$xSleep = dVfs.$xSleep;
|
||||
opfsVfs.$xRandomness = dVfs.$xRandomness;
|
||||
}
|
||||
// All C-side memory of opfsVfs is zeroed out, but just to be explicit:
|
||||
opfsVfs.$xDlOpen = opfsVfs.$xDlError = opfsVfs.$xDlSym = opfsVfs.$xDlClose = null;
|
||||
/**
|
||||
Pedantic sidebar about opfsVfs.ondispose: the entries in that array
|
||||
are items to clean up when opfsVfs.dispose() is called, but in this
|
||||
environment it will never be called. The VFS instance simply
|
||||
hangs around until the WASM module instance is cleaned up. We
|
||||
"could" _hypothetically_ clean it up by "importing" an
|
||||
sqlite3_os_end() impl into the wasm build, but the shutdown order
|
||||
of the wasm engine and the JS one are undefined so there is no
|
||||
guaranty that the opfsVfs instance would be available in one
|
||||
environment or the other when sqlite3_os_end() is called (_if_ it
|
||||
gets called at all in a wasm build, which is undefined).
|
||||
*/
|
||||
|
||||
/**
|
||||
Impls for the sqlite3_io_methods methods. Maintenance reminder:
|
||||
members are in alphabetical order to simplify finding them.
|
||||
*/
|
||||
const ioSyncWrappers = {
|
||||
xCheckReservedLock: function(pFile,pOut){
|
||||
// Exclusive lock is automatically acquired when opened
|
||||
//warn("xCheckReservedLock(",arguments,") is a no-op");
|
||||
wasm.setMemValue(pOut,1,'i32');
|
||||
return 0;
|
||||
},
|
||||
xClose: function(pFile){
|
||||
let rc = 0;
|
||||
if(state.openFiles[pFile]){
|
||||
delete state.openFiles[pFile];
|
||||
const f = __openFiles[pFile];
|
||||
if(f){
|
||||
delete __openFiles[pFile];
|
||||
rc = opRun('xClose', pFile);
|
||||
if(f.sq3File) f.sq3File.dispose();
|
||||
}
|
||||
return rc;
|
||||
},
|
||||
xDeviceCharacteristics: function(pFile){
|
||||
//debug("xDeviceCharacteristics(",pFile,")");
|
||||
return capi.SQLITE_IOCAP_UNDELETABLE_WHEN_OPEN;
|
||||
},
|
||||
xFileControl: function(pFile,op,pArg){
|
||||
//debug("xFileControl(",arguments,") is a no-op");
|
||||
return capi.SQLITE_NOTFOUND;
|
||||
},
|
||||
xFileSize: function(pFile,pSz64){
|
||||
const rc = opRun('xFileSize', pFile);
|
||||
if(!isWorkerErrCode(rc)){
|
||||
const f = __openFiles[pFile];
|
||||
wasm.setMemValue(pSz64, f.sabViewFileSize.getBigInt64(0) ,'i64');
|
||||
}
|
||||
return rc;
|
||||
},
|
||||
xLock: function(pFile,lockType){
|
||||
//2022-09: OPFS handles lock when opened
|
||||
//warn("xLock(",arguments,") is a no-op");
|
||||
return 0;
|
||||
},
|
||||
xRead: function(pFile,pDest,n,offset){
|
||||
/* int (*xRead)(sqlite3_file*, void*, int iAmt, sqlite3_int64 iOfst) */
|
||||
const f = __opfsHandles[pFile];
|
||||
try {
|
||||
// FIXME(?): block until we finish copying the xRead result buffer. How?
|
||||
let rc = opRun('xRead',{fid:pFile, n, offset});
|
||||
if(0!==rc) return rc;
|
||||
let i = 0;
|
||||
for(; i < n; ++i) wasm.setMemValue(pDest + i, f.sabView[i]);
|
||||
}catch(e){
|
||||
error("xRead(",arguments,") failed:",e,f);
|
||||
rc = capi.SQLITE_IOERR_READ;
|
||||
}
|
||||
return rc;
|
||||
},
|
||||
xSync: function(pFile,flags){
|
||||
return opRun('xSync', {fid:pFile, flags});
|
||||
},
|
||||
xTruncate: function(pFile,sz64){
|
||||
return opRun('xTruncate', {fid:pFile, size: sz64});
|
||||
},
|
||||
xUnlock: function(pFile,lockType){
|
||||
//2022-09: OPFS handles lock when opened
|
||||
//warn("xUnlock(",arguments,") is a no-op");
|
||||
return 0;
|
||||
},
|
||||
xWrite: function(pFile,pSrc,n,offset){
|
||||
/* int (*xWrite)(sqlite3_file*, const void*, int iAmt, sqlite3_int64 iOfst) */
|
||||
const f = __opfsHandles[pFile];
|
||||
try {
|
||||
let i = 0;
|
||||
// FIXME(?): block from here until we finish the xWrite. How?
|
||||
for(; i < n; ++i) f.sabView[i] = wasm.getMemValue(pSrc+i);
|
||||
return opRun('xWrite',{fid:pFile, n, offset});
|
||||
}catch(e){
|
||||
error("xWrite(",arguments,") failed:",e,f);
|
||||
return capi.SQLITE_IOERR_WRITE;
|
||||
}
|
||||
}
|
||||
};
|
||||
}/*ioSyncWrappers*/;
|
||||
|
||||
/**
|
||||
Impls for the sqlite3_vfs methods. Maintenance reminder: members
|
||||
are in alphabetical order to simplify finding them.
|
||||
*/
|
||||
const vfsSyncWrappers = {
|
||||
// TODO: xAccess
|
||||
xCurrentTime: function(pVfs,pOut){
|
||||
/* If it turns out that we need to adjust for timezone, see:
|
||||
https://stackoverflow.com/a/11760121/1458521 */
|
||||
wasm.setMemValue(pOut, 2440587.5 + (new Date().getTime()/86400000),
|
||||
'double');
|
||||
return 0;
|
||||
},
|
||||
xCurrentTimeInt64: function(pVfs,pOut){
|
||||
// TODO: confirm that this calculation is correct
|
||||
wasm.setMemValue(pOut, (2440587.5 * 86400000) + new Date().getTime(),
|
||||
'i64');
|
||||
return 0;
|
||||
},
|
||||
xDelete: function(pVfs, zName, doSyncDir){
|
||||
return opRun('xDelete', {filename: wasm.cstringToJs(zName), syncDir: doSyncDir});
|
||||
},
|
||||
xFullPathname: function(pVfs,zName,nOut,pOut){
|
||||
/* Until/unless we have some notion of "current dir"
|
||||
in OPFS, simply copy zName to pOut... */
|
||||
const i = wasm.cstrncpy(pOut, zName, nOut);
|
||||
return i<nOut ? 0 : capi.SQLITE_CANTOPEN
|
||||
/*CANTOPEN is required by the docs but SQLITE_RANGE would be a closer match*/;
|
||||
},
|
||||
xGetLastError: function(pVfs,nOut,pOut){
|
||||
/* TODO: store exception.message values from the async
|
||||
partner in a dedicated SharedArrayBuffer, noting that we'd have
|
||||
to encode them... TextEncoder can do that for us. */
|
||||
warn("OPFS xGetLastError() has nothing sensible to return.");
|
||||
return 0;
|
||||
},
|
||||
xOpen: function f(pVfs, zName, pFile, flags, pOutFlags){
|
||||
if(!f._){
|
||||
f._ = {
|
||||
fileTypes: {
|
||||
SQLITE_OPEN_MAIN_DB: 'mainDb',
|
||||
SQLITE_OPEN_MAIN_JOURNAL: 'mainJournal',
|
||||
SQLITE_OPEN_TEMP_DB: 'tempDb',
|
||||
SQLITE_OPEN_TEMP_JOURNAL: 'tempJournal',
|
||||
SQLITE_OPEN_TRANSIENT_DB: 'transientDb',
|
||||
SQLITE_OPEN_SUBJOURNAL: 'subjournal',
|
||||
SQLITE_OPEN_SUPER_JOURNAL: 'superJournal',
|
||||
SQLITE_OPEN_WAL: 'wal'
|
||||
},
|
||||
getFileType: function(filename,oflags){
|
||||
const ft = f._.fileTypes;
|
||||
for(let k of Object.keys(ft)){
|
||||
if(oflags & capi[k]) return ft[k];
|
||||
}
|
||||
warn("Cannot determine fileType based on xOpen() flags for file",filename);
|
||||
return '???';
|
||||
}
|
||||
};
|
||||
}
|
||||
if(0===zName){
|
||||
zName = randomFilename();
|
||||
}else if('number'===typeof zName){
|
||||
zName = wasm.cstringToJs(zName);
|
||||
}
|
||||
const args = Object.create(null);
|
||||
args.fid = pFile;
|
||||
args.filename = zName;
|
||||
args.sab = new SharedArrayBuffer(state.fileBufferSize);
|
||||
args.fileType = f._.getFileType(args.filename, flags);
|
||||
args.create = !!(flags & capi.SQLITE_OPEN_CREATE);
|
||||
args.deleteOnClose = !!(flags & capi.SQLITE_OPEN_DELETEONCLOSE);
|
||||
args.readOnly = !!(flags & capi.SQLITE_OPEN_READONLY);
|
||||
const rc = opRun('xOpen', args);
|
||||
if(!rc){
|
||||
/* Recall that sqlite3_vfs::xClose() will be called, even on
|
||||
error, unless pFile->pMethods is NULL. */
|
||||
if(args.readOnly){
|
||||
wasm.setMemValue(pOutFlags, capi.SQLITE_OPEN_READONLY, 'i32');
|
||||
}
|
||||
__openFiles[pFile] = args;
|
||||
args.sabView = new Uint8Array(args.sab);
|
||||
args.sabViewFileSize = new DataView(args.sab, state.fbInt64Offset, 8);
|
||||
args.sq3File = new sqlite3_file(pFile);
|
||||
args.sq3File.$pMethods = opfsIoMethods.pointer;
|
||||
args.ba = new Uint8Array(args.sab);
|
||||
}
|
||||
return rc;
|
||||
}/*xOpen()*/
|
||||
}/*vfsSyncWrappers*/;
|
||||
|
||||
if(!opfsVfs.$xRandomness){
|
||||
/* If the default VFS has no xRandomness(), add a basic JS impl... */
|
||||
vfsSyncWrappers.xRandomness = function(pVfs, nOut, pOut){
|
||||
const heap = wasm.heap8u();
|
||||
let i = 0;
|
||||
for(; i < nOut; ++i) heap[pOut + i] = (Math.random()*255000) & 0xFF;
|
||||
return i;
|
||||
};
|
||||
}
|
||||
if(!opfsVfs.$xSleep){
|
||||
/* If we can inherit an xSleep() impl from the default VFS then
|
||||
use it, otherwise install one which is certainly less accurate
|
||||
because it has to go round-trip through the async worker, but
|
||||
provides the only option for a synchronous sleep() in JS. */
|
||||
vfsSyncWrappers.xSleep = (pVfs,ms)=>opRun('xSleep',ms);
|
||||
}
|
||||
|
||||
const doSomething = function(){
|
||||
/*
|
||||
TODO: plug in the above functions in to opfsVfs and opfsIoMethods.
|
||||
Code for doing so is in api/sqlite3-api-opfs.js.
|
||||
*/
|
||||
|
||||
const sanityCheck = async function(){
|
||||
//state.ioBuf = new Uint8Array(state.sabIo);
|
||||
const fid = 37;
|
||||
let rc = vfsSyncWrappers.xOpen(fid, "/foo/bar/baz.sqlite3",0, {});
|
||||
log("open rc =",rc,"state.opBuf[xOpen] =",state.opBuf[state.opIds.xOpen]);
|
||||
if(isWorkerErrCode(rc)){
|
||||
error("open failed with code",rc);
|
||||
return;
|
||||
}
|
||||
log("xSleep()ing before close()ing...");
|
||||
opRun('xSleep',{ms: 1500});
|
||||
log("wait()ing before close()ing...");
|
||||
wait(1500).then(function(){
|
||||
rc = vfsSyncWrappers.xClose(fid);
|
||||
const scope = wasm.scopedAllocPush();
|
||||
const sq3File = new sqlite3_file();
|
||||
try{
|
||||
const fid = sq3File.pointer;
|
||||
const openFlags = capi.SQLITE_OPEN_CREATE
|
||||
| capi.SQLITE_OPEN_READWRITE
|
||||
| capi.SQLITE_OPEN_DELETEONCLOSE
|
||||
| capi.SQLITE_OPEN_MAIN_DB;
|
||||
const pOut = wasm.scopedAlloc(8);
|
||||
const dbFile = "/sanity/check/file";
|
||||
let rc = vfsSyncWrappers.xOpen(opfsVfs.pointer, dbFile,
|
||||
fid, openFlags, pOut);
|
||||
log("open rc =",rc,"state.opBuf[xOpen] =",state.opBuf[state.opIds.xOpen]);
|
||||
if(isWorkerErrCode(rc)){
|
||||
error("open failed with code",rc);
|
||||
return;
|
||||
}
|
||||
rc = ioSyncWrappers.xSync(sq3File.pointer, 0);
|
||||
if(rc) toss('sync failed w/ rc',rc);
|
||||
rc = ioSyncWrappers.xTruncate(sq3File.pointer, 1024);
|
||||
if(rc) toss('truncate failed w/ rc',rc);
|
||||
wasm.setMemValue(pOut,0,'i64');
|
||||
rc = ioSyncWrappers.xFileSize(sq3File.pointer, pOut);
|
||||
if(rc) toss('xFileSize failed w/ rc',rc);
|
||||
log("xFileSize says:",wasm.getMemValue(pOut, 'i64'));
|
||||
log("xSleep()ing before close()ing...");
|
||||
opRun('xSleep',1500);
|
||||
rc = ioSyncWrappers.xClose(fid);
|
||||
log("xClose rc =",rc,"opBuf =",state.opBuf);
|
||||
});
|
||||
log("Deleting file:",dbFile);
|
||||
opRun('xDelete', dbFile);
|
||||
}finally{
|
||||
sq3File.dispose();
|
||||
wasm.scopedAllocPop(scope);
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
W.onmessage = function({data}){
|
||||
log("Worker.onmessage:",data);
|
||||
switch(data.type){
|
||||
case 'ready':
|
||||
case 'loaded':
|
||||
/*Pass our config and shared state on to the async worker.*/
|
||||
wMsg('init',state);
|
||||
break;
|
||||
case 'inited':
|
||||
/*Indicates that the async partner has received the 'init',
|
||||
so we now know that the state object is no longer subject to
|
||||
being copied by a pending postMessage() call.*/
|
||||
state.opBuf = new Int32Array(state.opSab);
|
||||
state.openFiles = Object.create(null);
|
||||
doSomething();
|
||||
sanityCheck();
|
||||
break;
|
||||
default:
|
||||
error("Unexpected message from the async worker:",data);
|
||||
break;
|
||||
}
|
||||
};
|
||||
}/*doAtomicsStuff*/
|
||||
}/*initOpfsVfs*/
|
||||
|
||||
importScripts('sqlite3.js');
|
||||
self.sqlite3InitModule().then((EmscriptenModule)=>doAtomicsStuff(EmscriptenModule.sqlite3));
|
||||
self.sqlite3InitModule().then((EmscriptenModule)=>initOpfsVfs(EmscriptenModule.sqlite3));
|
||||
|
Reference in New Issue
Block a user