2022-09-17 18:08:22 +03:00
|
|
|
/*
|
|
|
|
2022-09-16
|
|
|
|
|
|
|
|
The author disclaims copyright to this source code. In place of a
|
|
|
|
legal notice, here is a blessing:
|
|
|
|
|
|
|
|
* May you do good and not evil.
|
|
|
|
* May you find forgiveness for yourself and forgive others.
|
|
|
|
* May you share freely, never taking more than you give.
|
|
|
|
|
|
|
|
***********************************************************************
|
|
|
|
|
2022-09-18 00:13:26 +03:00
|
|
|
An INCOMPLETE and UNDER CONSTRUCTION experiment for OPFS: a Worker
|
|
|
|
which manages asynchronous OPFS handles on behalf of a synchronous
|
|
|
|
API which controls it via a combination of Worker messages,
|
|
|
|
SharedArrayBuffer, and Atomics.
|
2022-09-17 18:08:22 +03:00
|
|
|
|
|
|
|
Highly indebted to:
|
|
|
|
|
|
|
|
https://github.com/rhashimoto/wa-sqlite/blob/master/src/examples/OriginPrivateFileSystemVFS.js
|
|
|
|
|
|
|
|
for demonstrating how to use the OPFS APIs.
|
2022-09-17 23:50:12 +03:00
|
|
|
|
|
|
|
This file is to be loaded as a Worker. It does not have any direct
|
|
|
|
access to the sqlite3 JS/WASM bits, so any bits which it needs (most
|
|
|
|
notably SQLITE_xxx integer codes) have to be imported into it via an
|
|
|
|
initialization process.
|
2022-09-17 18:08:22 +03:00
|
|
|
*/
|
|
|
|
'use strict';
|
2022-09-17 23:50:12 +03:00
|
|
|
const toss = function(...args){throw new Error(args.join(' '))};
|
|
|
|
if(self.window === self){
|
|
|
|
toss("This code cannot run from the main thread.",
|
|
|
|
"Load it as a Worker from a separate Worker.");
|
|
|
|
}else if(!navigator.storage.getDirectory){
|
|
|
|
toss("This API requires navigator.storage.getDirectory.");
|
|
|
|
}
|
|
|
|
/**
|
|
|
|
Will hold state copied to this object from the syncronous side of
|
|
|
|
this API.
|
|
|
|
*/
|
|
|
|
const state = Object.create(null);
|
|
|
|
/**
|
|
|
|
verbose:
|
2022-09-17 18:08:22 +03:00
|
|
|
|
2022-09-17 23:50:12 +03:00
|
|
|
0 = no logging output
|
|
|
|
1 = only errors
|
|
|
|
2 = warnings and errors
|
|
|
|
3 = debug, warnings, and errors
|
|
|
|
*/
|
|
|
|
state.verbose = 2;
|
2022-09-17 18:08:22 +03:00
|
|
|
|
2022-09-19 12:58:01 +03:00
|
|
|
const loggers = {
|
|
|
|
0:console.error.bind(console),
|
|
|
|
1:console.warn.bind(console),
|
|
|
|
2:console.log.bind(console)
|
2022-09-17 23:50:12 +03:00
|
|
|
};
|
2022-09-19 12:58:01 +03:00
|
|
|
const logImpl = (level,...args)=>{
|
|
|
|
if(state.verbose>level) loggers[level]("OPFS asyncer:",...args);
|
2022-09-17 23:50:12 +03:00
|
|
|
};
|
2022-09-19 12:58:01 +03:00
|
|
|
const log = (...args)=>logImpl(2, ...args);
|
|
|
|
const warn = (...args)=>logImpl(1, ...args);
|
|
|
|
const error = (...args)=>logImpl(0, ...args);
|
2022-09-19 20:09:09 +03:00
|
|
|
const metrics = Object.create(null);
|
2022-09-19 21:22:29 +03:00
|
|
|
metrics.reset = ()=>{
|
|
|
|
let k;
|
|
|
|
const r = (m)=>(m.count = m.time = 0);
|
|
|
|
for(k in state.opIds){
|
|
|
|
r(metrics[k] = Object.create(null));
|
|
|
|
}
|
|
|
|
};
|
|
|
|
metrics.dump = ()=>{
|
|
|
|
let k, n = 0, t = 0, w = 0;
|
|
|
|
for(k in state.opIds){
|
|
|
|
const m = metrics[k];
|
|
|
|
n += m.count;
|
|
|
|
t += m.time;
|
|
|
|
m.avgTime = (m.count && m.time) ? (m.time / m.count) : 0;
|
|
|
|
}
|
|
|
|
console.log(self.location.href,
|
|
|
|
"metrics for",self.location.href,":",metrics,
|
|
|
|
"\nTotal of",n,"op(s) for",t,"ms");
|
|
|
|
};
|
2022-09-17 18:08:22 +03:00
|
|
|
|
2022-09-19 12:58:01 +03:00
|
|
|
warn("This file is very much experimental and under construction.",
|
|
|
|
self.location.pathname);
|
2022-09-17 18:08:22 +03:00
|
|
|
|
2022-09-17 23:50:12 +03:00
|
|
|
/**
|
|
|
|
Map of sqlite3_file pointers (integers) to metadata related to a
|
|
|
|
given OPFS file handles. The pointers are, in this side of the
|
|
|
|
interface, opaque file handle IDs provided by the synchronous
|
|
|
|
part of this constellation. Each value is an object with a structure
|
|
|
|
demonstrated in the xOpen() impl.
|
|
|
|
*/
|
|
|
|
const __openFiles = Object.create(null);
|
|
|
|
|
|
|
|
/**
|
2022-09-18 02:29:27 +03:00
|
|
|
Expects an OPFS file path. It gets resolved, such that ".."
|
|
|
|
components are properly expanded, and returned. If the 2nd
|
|
|
|
are is true, it's returned as an array of path elements,
|
|
|
|
else it's returned as an absolute path string.
|
2022-09-17 23:50:12 +03:00
|
|
|
*/
|
2022-09-18 02:29:27 +03:00
|
|
|
const getResolvedPath = function(filename,splitIt){
|
|
|
|
const p = new URL(
|
|
|
|
filename, 'file://irrelevant'
|
|
|
|
).pathname;
|
|
|
|
return splitIt ? p.split('/').filter((v)=>!!v) : p;
|
2022-09-19 12:58:01 +03:00
|
|
|
};
|
2022-09-17 23:50:12 +03:00
|
|
|
|
|
|
|
/**
|
|
|
|
Takes the absolute path to a filesystem element. Returns an array
|
|
|
|
of [handleOfContainingDir, filename]. If the 2nd argument is
|
|
|
|
truthy then each directory element leading to the file is created
|
|
|
|
along the way. Throws if any creation or resolution fails.
|
|
|
|
*/
|
|
|
|
const getDirForPath = async function f(absFilename, createDirs = false){
|
2022-09-18 02:29:27 +03:00
|
|
|
const path = getResolvedPath(absFilename, true);
|
2022-09-17 23:50:12 +03:00
|
|
|
const filename = path.pop();
|
2022-09-18 02:29:27 +03:00
|
|
|
let dh = state.rootDir;
|
|
|
|
for(const dirName of path){
|
|
|
|
if(dirName){
|
|
|
|
dh = await dh.getDirectoryHandle(dirName, {create: !!createDirs});
|
2022-09-17 18:08:22 +03:00
|
|
|
}
|
2022-09-17 23:50:12 +03:00
|
|
|
}
|
|
|
|
return [dh, filename];
|
|
|
|
};
|
2022-09-17 18:08:22 +03:00
|
|
|
|
|
|
|
|
2022-09-17 23:50:12 +03:00
|
|
|
/**
|
|
|
|
Stores the given value at the array index reserved for the given op
|
|
|
|
and then Atomics.notify()'s it.
|
|
|
|
*/
|
|
|
|
const storeAndNotify = (opName, value)=>{
|
|
|
|
log(opName+"() is notify()ing w/ value:",value);
|
2022-09-20 04:28:47 +03:00
|
|
|
Atomics.store(state.sabOPView, state.opIds[opName], value);
|
|
|
|
Atomics.notify(state.sabOPView, state.opIds[opName]);
|
2022-09-17 23:50:12 +03:00
|
|
|
};
|
2022-09-17 18:08:22 +03:00
|
|
|
|
2022-09-17 23:50:12 +03:00
|
|
|
/**
|
|
|
|
Throws if fh is a file-holding object which is flagged as read-only.
|
|
|
|
*/
|
|
|
|
const affirmNotRO = function(opName,fh){
|
|
|
|
if(fh.readOnly) toss(opName+"(): File is read-only: "+fh.filenameAbs);
|
|
|
|
};
|
2022-09-17 18:08:22 +03:00
|
|
|
|
2022-09-19 21:22:29 +03:00
|
|
|
|
|
|
|
const opTimer = Object.create(null);
|
|
|
|
opTimer.op = undefined;
|
|
|
|
opTimer.start = undefined;
|
|
|
|
const mTimeStart = (op)=>{
|
|
|
|
opTimer.start = performance.now();
|
|
|
|
opTimer.op = op;
|
|
|
|
//metrics[op] || toss("Maintenance required: missing metrics for",op);
|
|
|
|
++metrics[op].count;
|
|
|
|
};
|
|
|
|
const mTimeEnd = ()=>(
|
|
|
|
metrics[opTimer.op].time += performance.now() - opTimer.start
|
|
|
|
);
|
|
|
|
|
2022-09-17 23:50:12 +03:00
|
|
|
/**
|
|
|
|
Asynchronous wrappers for sqlite3_vfs and sqlite3_io_methods
|
|
|
|
methods. Maintenance reminder: members are in alphabetical order
|
|
|
|
to simplify finding them.
|
|
|
|
*/
|
|
|
|
const vfsAsyncImpls = {
|
2022-09-19 21:22:29 +03:00
|
|
|
mkdir: async function(dirname){
|
|
|
|
let rc = 0;
|
|
|
|
try {
|
|
|
|
await getDirForPath(dirname+"/filepart", true);
|
|
|
|
}catch(e){
|
|
|
|
//error("mkdir failed",filename, e.message);
|
|
|
|
rc = state.sq3Codes.SQLITE_IOERR;
|
|
|
|
}
|
|
|
|
storeAndNotify('mkdir', rc);
|
|
|
|
},
|
2022-09-18 02:29:27 +03:00
|
|
|
xAccess: async function(filename){
|
|
|
|
log("xAccess(",arguments[0],")");
|
2022-09-19 21:22:29 +03:00
|
|
|
mTimeStart('xAccess');
|
2022-09-18 02:29:27 +03:00
|
|
|
/* OPFS cannot support the full range of xAccess() queries sqlite3
|
|
|
|
calls for. We can essentially just tell if the file is
|
|
|
|
accessible, but if it is it's automatically writable (unless
|
|
|
|
it's locked, which we cannot(?) know without trying to open
|
|
|
|
it). OPFS does not have the notion of read-only.
|
|
|
|
|
|
|
|
The return semantics of this function differ from sqlite3's
|
|
|
|
xAccess semantics because we are limited in what we can
|
|
|
|
communicate back to our synchronous communication partner: 0 =
|
|
|
|
accessible, non-0 means not accessible.
|
|
|
|
*/
|
|
|
|
let rc = 0;
|
|
|
|
try{
|
|
|
|
const [dh, fn] = await getDirForPath(filename);
|
|
|
|
await dh.getFileHandle(fn);
|
|
|
|
}catch(e){
|
|
|
|
rc = state.sq3Codes.SQLITE_IOERR;
|
|
|
|
}
|
2022-09-17 23:50:12 +03:00
|
|
|
storeAndNotify('xAccess', rc);
|
2022-09-19 21:22:29 +03:00
|
|
|
mTimeEnd();
|
2022-09-17 23:50:12 +03:00
|
|
|
},
|
|
|
|
xClose: async function(fid){
|
|
|
|
const opName = 'xClose';
|
2022-09-19 21:22:29 +03:00
|
|
|
mTimeStart(opName);
|
2022-09-17 23:50:12 +03:00
|
|
|
log(opName+"(",arguments[0],")");
|
|
|
|
const fh = __openFiles[fid];
|
|
|
|
if(fh){
|
|
|
|
delete __openFiles[fid];
|
|
|
|
if(fh.accessHandle) await fh.accessHandle.close();
|
|
|
|
if(fh.deleteOnClose){
|
|
|
|
try{ await fh.dirHandle.removeEntry(fh.filenamePart) }
|
|
|
|
catch(e){ warn("Ignoring dirHandle.removeEntry() failure of",fh,e) }
|
|
|
|
}
|
|
|
|
storeAndNotify(opName, 0);
|
|
|
|
}else{
|
|
|
|
storeAndNotify(opName, state.sq3Codes.SQLITE_NOFOUND);
|
|
|
|
}
|
2022-09-19 21:22:29 +03:00
|
|
|
mTimeEnd();
|
2022-09-17 23:50:12 +03:00
|
|
|
},
|
2022-09-20 04:28:47 +03:00
|
|
|
xDelete: async function(...args){
|
|
|
|
mTimeStart('xDelete');
|
|
|
|
const rc = await vfsAsyncImpls.xDeleteNoWait(...args);
|
|
|
|
storeAndNotify('xDelete', rc);
|
|
|
|
mTimeEnd();
|
|
|
|
},
|
2022-09-20 06:31:02 +03:00
|
|
|
xDeleteNoWait: async function(filename, syncDir = 0, recursive = false){
|
2022-09-18 02:29:27 +03:00
|
|
|
/* The syncDir flag is, for purposes of the VFS API's semantics,
|
|
|
|
ignored here. However, if it has the value 0x1234 then: after
|
|
|
|
deleting the given file, recursively try to delete any empty
|
|
|
|
directories left behind in its wake (ignoring any errors and
|
|
|
|
stopping at the first failure).
|
|
|
|
|
|
|
|
That said: we don't know for sure that removeEntry() fails if
|
|
|
|
the dir is not empty because the API is not documented. It has,
|
|
|
|
however, a "recursive" flag which defaults to false, so
|
|
|
|
presumably it will fail if the dir is not empty and that flag
|
|
|
|
is false.
|
|
|
|
*/
|
2022-09-17 23:50:12 +03:00
|
|
|
log("xDelete(",arguments[0],")");
|
2022-09-18 20:32:35 +03:00
|
|
|
let rc = 0;
|
2022-09-17 23:50:12 +03:00
|
|
|
try {
|
2022-09-18 02:29:27 +03:00
|
|
|
while(filename){
|
|
|
|
const [hDir, filenamePart] = await getDirForPath(filename, false);
|
|
|
|
//log("Removing:",hDir, filenamePart);
|
|
|
|
if(!filenamePart) break;
|
2022-09-18 20:32:35 +03:00
|
|
|
await hDir.removeEntry(filenamePart, {recursive});
|
2022-09-18 02:29:27 +03:00
|
|
|
if(0x1234 !== syncDir) break;
|
|
|
|
filename = getResolvedPath(filename, true);
|
|
|
|
filename.pop();
|
|
|
|
filename = filename.join('/');
|
|
|
|
}
|
2022-09-17 23:50:12 +03:00
|
|
|
}catch(e){
|
2022-09-18 02:29:27 +03:00
|
|
|
/* Ignoring: _presumably_ the file can't be found or a dir is
|
|
|
|
not empty. */
|
|
|
|
//error("Delete failed",filename, e.message);
|
2022-09-18 20:32:35 +03:00
|
|
|
rc = state.sq3Codes.SQLITE_IOERR_DELETE;
|
|
|
|
}
|
|
|
|
return rc;
|
|
|
|
},
|
2022-09-17 23:50:12 +03:00
|
|
|
xFileSize: async function(fid){
|
2022-09-19 21:22:29 +03:00
|
|
|
mTimeStart('xFileSize');
|
2022-09-17 23:50:12 +03:00
|
|
|
log("xFileSize(",arguments,")");
|
|
|
|
const fh = __openFiles[fid];
|
|
|
|
let sz;
|
|
|
|
try{
|
|
|
|
sz = await fh.accessHandle.getSize();
|
2022-09-20 06:31:02 +03:00
|
|
|
state.s11n.serialize(Number(sz));
|
2022-09-17 23:50:12 +03:00
|
|
|
sz = 0;
|
|
|
|
}catch(e){
|
|
|
|
error("xFileSize():",e, fh);
|
|
|
|
sz = state.sq3Codes.SQLITE_IOERR;
|
|
|
|
}
|
|
|
|
storeAndNotify('xFileSize', sz);
|
2022-09-19 21:22:29 +03:00
|
|
|
mTimeEnd();
|
2022-09-17 23:50:12 +03:00
|
|
|
},
|
2022-09-20 06:31:02 +03:00
|
|
|
xOpen: async function(fid/*sqlite3_file pointer*/, filename, flags){
|
2022-09-17 23:50:12 +03:00
|
|
|
const opName = 'xOpen';
|
2022-09-19 21:22:29 +03:00
|
|
|
mTimeStart(opName);
|
2022-09-20 04:28:47 +03:00
|
|
|
log(opName+"(",arguments[0],")");
|
|
|
|
const deleteOnClose = (state.sq3Codes.SQLITE_OPEN_DELETEONCLOSE & flags);
|
|
|
|
const create = (state.sq3Codes.SQLITE_OPEN_CREATE & flags);
|
2022-09-17 23:50:12 +03:00
|
|
|
try{
|
|
|
|
let hDir, filenamePart;
|
|
|
|
try {
|
|
|
|
[hDir, filenamePart] = await getDirForPath(filename, !!create);
|
2022-09-17 18:08:22 +03:00
|
|
|
}catch(e){
|
2022-09-17 23:50:12 +03:00
|
|
|
storeAndNotify(opName, state.sql3Codes.SQLITE_NOTFOUND);
|
2022-09-19 21:22:29 +03:00
|
|
|
mTimeEnd();
|
2022-09-17 23:50:12 +03:00
|
|
|
return;
|
2022-09-17 18:08:22 +03:00
|
|
|
}
|
2022-09-20 04:28:47 +03:00
|
|
|
const hFile = await hDir.getFileHandle(filenamePart, {create});
|
|
|
|
const fobj = Object.create(null);
|
2022-09-17 23:50:12 +03:00
|
|
|
/**
|
|
|
|
wa-sqlite, at this point, grabs a SyncAccessHandle and
|
|
|
|
assigns it to the accessHandle prop of the file state
|
|
|
|
object, but only for certain cases and it's unclear why it
|
|
|
|
places that limitation on it.
|
|
|
|
*/
|
|
|
|
fobj.accessHandle = await hFile.createSyncAccessHandle();
|
2022-09-20 04:28:47 +03:00
|
|
|
__openFiles[fid] = fobj;
|
|
|
|
fobj.filenameAbs = filename;
|
|
|
|
fobj.filenamePart = filenamePart;
|
|
|
|
fobj.dirHandle = hDir;
|
|
|
|
fobj.fileHandle = hFile;
|
|
|
|
fobj.sabView = state.sabFileBufView;
|
|
|
|
fobj.readOnly = create ? false : (state.sq3Codes.SQLITE_OPEN_READONLY & flags);
|
|
|
|
fobj.deleteOnClose = deleteOnClose;
|
2022-09-17 23:50:12 +03:00
|
|
|
storeAndNotify(opName, 0);
|
|
|
|
}catch(e){
|
|
|
|
error(opName,e);
|
|
|
|
storeAndNotify(opName, state.sq3Codes.SQLITE_IOERR);
|
2022-09-17 18:08:22 +03:00
|
|
|
}
|
2022-09-19 21:22:29 +03:00
|
|
|
mTimeEnd();
|
2022-09-17 23:50:12 +03:00
|
|
|
},
|
2022-09-20 06:31:02 +03:00
|
|
|
xRead: async function(fid,n,offset){
|
2022-09-19 21:22:29 +03:00
|
|
|
mTimeStart('xRead');
|
2022-09-17 23:50:12 +03:00
|
|
|
log("xRead(",arguments[0],")");
|
|
|
|
let rc = 0;
|
|
|
|
try{
|
2022-09-19 21:22:29 +03:00
|
|
|
const fh = __openFiles[fid];
|
|
|
|
const nRead = fh.accessHandle.read(
|
|
|
|
fh.sabView.subarray(0, n),
|
|
|
|
{at: Number(offset)}
|
|
|
|
);
|
2022-09-17 23:50:12 +03:00
|
|
|
if(nRead < n){/* Zero-fill remaining bytes */
|
2022-09-19 21:22:29 +03:00
|
|
|
fh.sabView.fill(0, nRead, n);
|
2022-09-17 23:50:12 +03:00
|
|
|
rc = state.sq3Codes.SQLITE_IOERR_SHORT_READ;
|
|
|
|
}
|
|
|
|
}catch(e){
|
|
|
|
error("xRead() failed",e,fh);
|
|
|
|
rc = state.sq3Codes.SQLITE_IOERR_READ;
|
|
|
|
}
|
|
|
|
storeAndNotify('xRead',rc);
|
2022-09-19 21:22:29 +03:00
|
|
|
mTimeEnd();
|
2022-09-17 23:50:12 +03:00
|
|
|
},
|
2022-09-20 06:31:02 +03:00
|
|
|
xSync: async function(fid,flags/*ignored*/){
|
2022-09-19 21:22:29 +03:00
|
|
|
mTimeStart('xSync');
|
2022-09-17 23:50:12 +03:00
|
|
|
log("xSync(",arguments[0],")");
|
|
|
|
const fh = __openFiles[fid];
|
|
|
|
if(!fh.readOnly && fh.accessHandle) await fh.accessHandle.flush();
|
|
|
|
storeAndNotify('xSync',0);
|
2022-09-19 21:22:29 +03:00
|
|
|
mTimeEnd();
|
2022-09-17 23:50:12 +03:00
|
|
|
},
|
2022-09-20 06:31:02 +03:00
|
|
|
xTruncate: async function(fid,size){
|
2022-09-19 21:22:29 +03:00
|
|
|
mTimeStart('xTruncate');
|
2022-09-17 23:50:12 +03:00
|
|
|
log("xTruncate(",arguments[0],")");
|
|
|
|
let rc = 0;
|
|
|
|
const fh = __openFiles[fid];
|
|
|
|
try{
|
|
|
|
affirmNotRO('xTruncate', fh);
|
2022-09-20 06:31:02 +03:00
|
|
|
await fh.accessHandle.truncate(size);
|
2022-09-17 23:50:12 +03:00
|
|
|
}catch(e){
|
|
|
|
error("xTruncate():",e,fh);
|
|
|
|
rc = state.sq3Codes.SQLITE_IOERR_TRUNCATE;
|
|
|
|
}
|
|
|
|
storeAndNotify('xTruncate',rc);
|
2022-09-19 21:22:29 +03:00
|
|
|
mTimeEnd();
|
2022-09-17 23:50:12 +03:00
|
|
|
},
|
2022-09-20 06:31:02 +03:00
|
|
|
xWrite: async function(fid,n,offset){
|
2022-09-19 21:22:29 +03:00
|
|
|
mTimeStart('xWrite');
|
2022-09-17 23:50:12 +03:00
|
|
|
log("xWrite(",arguments[0],")");
|
|
|
|
let rc;
|
|
|
|
try{
|
2022-09-19 21:22:29 +03:00
|
|
|
const fh = __openFiles[fid];
|
2022-09-17 23:50:12 +03:00
|
|
|
affirmNotRO('xWrite', fh);
|
2022-09-19 21:22:29 +03:00
|
|
|
rc = (
|
|
|
|
n === fh.accessHandle.write(fh.sabView.subarray(0, n),
|
|
|
|
{at: Number(offset)})
|
|
|
|
) ? 0 : state.sq3Codes.SQLITE_IOERR_WRITE;
|
2022-09-17 23:50:12 +03:00
|
|
|
}catch(e){
|
|
|
|
error("xWrite():",e,fh);
|
|
|
|
rc = state.sq3Codes.SQLITE_IOERR_WRITE;
|
|
|
|
}
|
|
|
|
storeAndNotify('xWrite',rc);
|
2022-09-19 21:22:29 +03:00
|
|
|
mTimeEnd();
|
2022-09-17 23:50:12 +03:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2022-09-20 06:31:02 +03:00
|
|
|
|
|
|
|
const initS11n = ()=>{
|
|
|
|
// Achtung: this code is 100% duplicated in the other half of this proxy!
|
|
|
|
if(state.s11n) return state.s11n;
|
|
|
|
const jsonDecoder = new TextDecoder(),
|
|
|
|
jsonEncoder = new TextEncoder('utf-8'),
|
|
|
|
viewSz = new DataView(state.sabIO, state.sabS11nOffset, 4),
|
|
|
|
viewJson = new Uint8Array(state.sabIO, state.sabS11nOffset+4, state.sabS11nSize-4);
|
|
|
|
state.s11n = Object.create(null);
|
|
|
|
/**
|
|
|
|
Returns an array of the state serialized by the most recent
|
|
|
|
serialize() operation (here or in the counterpart thread), or
|
|
|
|
null if the serialization buffer is empty.
|
|
|
|
*/
|
|
|
|
state.s11n.deserialize = function(){
|
|
|
|
const sz = viewSz.getInt32(0, state.littleEndian);
|
|
|
|
const json = sz ? jsonDecoder.decode(
|
|
|
|
viewJson.slice(0, sz)
|
|
|
|
/* slice() (copy) needed, instead of subarray() (reference),
|
|
|
|
because TextDecoder throws if asked to decode from an
|
|
|
|
SAB. */
|
|
|
|
) : null;
|
|
|
|
return JSON.parse(json);
|
|
|
|
}
|
|
|
|
/**
|
|
|
|
Serializes all arguments to the shared buffer for consumption
|
|
|
|
by the counterpart thread. This impl currently uses JSON for
|
|
|
|
serialization for simplicy of implementation, but if that
|
|
|
|
proves imperformant then a lower-level approach will be
|
|
|
|
created.
|
|
|
|
*/
|
|
|
|
state.s11n.serialize = function(...args){
|
|
|
|
const json = jsonEncoder.encode(JSON.stringify(args));
|
|
|
|
viewSz.setInt32(0, json.byteLength, state.littleEndian);
|
|
|
|
viewJson.set(json);
|
|
|
|
};
|
|
|
|
return state.s11n;
|
|
|
|
};
|
|
|
|
|
|
|
|
const waitLoop = function(){
|
|
|
|
const opHandlers = Object.create(null);
|
|
|
|
for(let k of Object.keys(state.opIds)){
|
|
|
|
const o = Object.create(null);
|
|
|
|
opHandlers[state.opIds[k]] = o;
|
|
|
|
o.key = k;
|
|
|
|
}
|
|
|
|
const sabOP = state.sabOP;
|
|
|
|
for(;;){
|
|
|
|
try {
|
|
|
|
Atomics.store(sabOP, state.opIds.whichOp, 0);
|
|
|
|
Atomic.wait(sabOP, state.opIds.whichOp);
|
|
|
|
const opId = Atomics.load(sabOP, state.opIds.whichOp);
|
|
|
|
const hnd = opHandlers[opId] ?? toss("No waitLoop handler for whichOp #",opId);
|
|
|
|
const args = state.s11n.deserialize();
|
|
|
|
log("whichOp =",opId,hnd,args);
|
|
|
|
const rc = 0/*TODO: run op*/;
|
|
|
|
Atomics.store(sabOP, state.rcIds[hnd.key], rc);
|
|
|
|
Atomics.notify(sabOP, state.rcIds[hnd.key]);
|
|
|
|
}catch(e){
|
|
|
|
error('in waitLoop():',e.message);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2022-09-17 23:50:12 +03:00
|
|
|
navigator.storage.getDirectory().then(function(d){
|
|
|
|
const wMsg = (type)=>postMessage({type});
|
|
|
|
state.rootDir = d;
|
|
|
|
log("state.rootDir =",state.rootDir);
|
|
|
|
self.onmessage = async function({data}){
|
|
|
|
log("self.onmessage()",data);
|
|
|
|
switch(data.type){
|
2022-09-20 06:31:02 +03:00
|
|
|
case 'opfs-async-init':{
|
2022-09-17 23:50:12 +03:00
|
|
|
/* Receive shared state from synchronous partner */
|
2022-09-20 06:31:02 +03:00
|
|
|
const opt = data.args;
|
|
|
|
state.littleEndian = opt.littleEndian;
|
2022-09-17 23:50:12 +03:00
|
|
|
state.verbose = opt.verbose ?? 2;
|
|
|
|
state.fileBufferSize = opt.fileBufferSize;
|
2022-09-20 06:31:02 +03:00
|
|
|
state.sabS11nOffset = opt.sabS11nOffset;
|
|
|
|
state.sabS11nSize = opt.sabS11nSize;
|
2022-09-20 04:28:47 +03:00
|
|
|
state.sabOP = opt.sabOP;
|
|
|
|
state.sabOPView = new Int32Array(state.sabOP);
|
|
|
|
state.sabIO = opt.sabIO;
|
|
|
|
state.sabFileBufView = new Uint8Array(state.sabIO, 0, state.fileBufferSize);
|
2022-09-20 06:31:02 +03:00
|
|
|
state.sabS11nView = new Uint8Array(state.sabIO, state.sabS11nOffset, state.sabS11nSize);
|
2022-09-17 23:50:12 +03:00
|
|
|
state.opIds = opt.opIds;
|
2022-09-20 06:31:02 +03:00
|
|
|
state.rcIds = opt.rcIds;
|
2022-09-17 23:50:12 +03:00
|
|
|
state.sq3Codes = opt.sq3Codes;
|
|
|
|
Object.keys(vfsAsyncImpls).forEach((k)=>{
|
|
|
|
if(!Number.isFinite(state.opIds[k])){
|
|
|
|
toss("Maintenance required: missing state.opIds[",k,"]");
|
2022-09-17 18:08:22 +03:00
|
|
|
}
|
2022-09-17 23:50:12 +03:00
|
|
|
});
|
2022-09-20 06:31:02 +03:00
|
|
|
initS11n();
|
2022-09-19 21:22:29 +03:00
|
|
|
metrics.reset();
|
2022-09-17 23:50:12 +03:00
|
|
|
log("init state",state);
|
2022-09-20 06:31:02 +03:00
|
|
|
wMsg('opfs-async-inited');
|
2022-09-17 23:50:12 +03:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
default:{
|
|
|
|
let err;
|
|
|
|
const m = vfsAsyncImpls[data.type] || toss("Unknown message type:",data.type);
|
|
|
|
try {
|
2022-09-20 06:31:02 +03:00
|
|
|
await m(...data.args).catch((e)=>err=e);
|
2022-09-17 23:50:12 +03:00
|
|
|
}catch(e){
|
|
|
|
err = e;
|
2022-09-17 18:08:22 +03:00
|
|
|
}
|
2022-09-17 23:50:12 +03:00
|
|
|
if(err){
|
|
|
|
error("Error handling",data.type+"():",e);
|
|
|
|
storeAndNotify(data.type, state.sq3Codes.SQLITE_ERROR);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2022-09-17 18:08:22 +03:00
|
|
|
};
|
2022-09-20 06:31:02 +03:00
|
|
|
wMsg('opfs-async-loaded');
|
2022-09-18 02:29:27 +03:00
|
|
|
}).catch((e)=>error(e));
|