diff options
author | stephan <stephan@noemail.net> | 2022-11-21 03:50:52 +0000 |
---|---|---|
committer | stephan <stephan@noemail.net> | 2022-11-21 03:50:52 +0000 |
commit | 27c4cd183d91d09e34e310d6349cda2b33c255ba (patch) | |
tree | f269eaaf5e12a6ae7b83bd040e2bdc9b38db3749 /ext/wasm/api | |
parent | ae276719f002a92d1262fc45e67118922f4707b8 (diff) | |
download | sqlite-27c4cd183d91d09e34e310d6349cda2b33c255ba.tar.gz sqlite-27c4cd183d91d09e34e310d6349cda2b33c255ba.zip |
Add test app for experimenting with multi-worker OPFS concurrency. Tweak OPFS VFS to significantly improve the otherwise "unfortunate" concurrency situation.
FossilOrigin-Name: 96f76e7616f8157a342b9e1c42f7b1feab200d182268871a2b25f67d4ee2564c
Diffstat (limited to 'ext/wasm/api')
-rw-r--r-- | ext/wasm/api/EXPORTED_FUNCTIONS.sqlite3-api | 1 | ||||
-rw-r--r-- | ext/wasm/api/extern-post-js.js | 3 | ||||
-rw-r--r-- | ext/wasm/api/sqlite3-api-opfs.js | 12 | ||||
-rw-r--r-- | ext/wasm/api/sqlite3-api-prologue.js | 1 | ||||
-rw-r--r-- | ext/wasm/api/sqlite3-opfs-async-proxy.js | 141 |
5 files changed, 97 insertions, 61 deletions
diff --git a/ext/wasm/api/EXPORTED_FUNCTIONS.sqlite3-api b/ext/wasm/api/EXPORTED_FUNCTIONS.sqlite3-api index b903bedee..1f7908e3b 100644 --- a/ext/wasm/api/EXPORTED_FUNCTIONS.sqlite3-api +++ b/ext/wasm/api/EXPORTED_FUNCTIONS.sqlite3-api @@ -7,6 +7,7 @@ _sqlite3_bind_null _sqlite3_bind_parameter_count _sqlite3_bind_parameter_index _sqlite3_bind_text +_sqlite3_busy_timeout _sqlite3_changes _sqlite3_changes64 _sqlite3_clear_bindings diff --git a/ext/wasm/api/extern-post-js.js b/ext/wasm/api/extern-post-js.js index cace6ed51..b32783781 100644 --- a/ext/wasm/api/extern-post-js.js +++ b/ext/wasm/api/extern-post-js.js @@ -59,6 +59,9 @@ const toExportForES6 = li.pop(); initModuleState.sqlite3Dir = li.join('/') + '/'; } + if(initModuleState.sqlite3Dir){ + initModuleState.sqlite3Dir = initModuleState.sqlite3Dir.replace(/[/]{2,}/g,'/'); + } self.sqlite3InitModule = (...args)=>{ //console.warn("Using replaced sqlite3InitModule()",self.location); diff --git a/ext/wasm/api/sqlite3-api-opfs.js b/ext/wasm/api/sqlite3-api-opfs.js index a3f73cc7b..1fd50dcc6 100644 --- a/ext/wasm/api/sqlite3-api-opfs.js +++ b/ext/wasm/api/sqlite3-api-opfs.js @@ -92,7 +92,8 @@ const installOpfsVfs = function callee(options){ } const urlParams = new URL(self.location.href).searchParams; if(undefined===options.verbose){ - options.verbose = urlParams.has('opfs-verbose') ? 3 : 2; + options.verbose = urlParams.has('opfs-verbose') + ? (+urlParams.get('opfs-verbose') || 2) : 1; } if(undefined===options.sanityChecks){ options.sanityChecks = urlParams.has('opfs-sanity-check'); @@ -101,6 +102,8 @@ const installOpfsVfs = function callee(options){ options.proxyUri = callee.defaultProxyUri; } + //console.warn("OPFS options =",options,self.location); + if('function' === typeof options.proxyUri){ options.proxyUri = options.proxyUri(); } @@ -1154,7 +1157,10 @@ const installOpfsVfs = function callee(options){ [ /* Truncate journal mode is faster than delete or wal for this vfs, per speedtest1. */ - "pragma journal_mode=truncate;" + "pragma journal_mode=truncate;", + /* Set a default busy-timeout handler to help OPFS dbs + deal with multi-tab/multi-worker contention. */ + "pragma busy_timeout=2000;", /* This vfs benefits hugely from cache on moderate/large speedtest1 --size 50 and --size 100 workloads. We currently @@ -1162,7 +1168,7 @@ const installOpfsVfs = function callee(options){ sqlite3.wasm. If that policy changes, the cache can be set here. */ - //"pragma cache_size=-8388608;" + //"pragma cache_size=-16384;" ].join("") ); } diff --git a/ext/wasm/api/sqlite3-api-prologue.js b/ext/wasm/api/sqlite3-api-prologue.js index fed1c5666..8b2ce0936 100644 --- a/ext/wasm/api/sqlite3-api-prologue.js +++ b/ext/wasm/api/sqlite3-api-prologue.js @@ -897,6 +897,7 @@ self.sqlite3ApiBootstrap = function sqlite3ApiBootstrap( the lines of sqlite3_prepare_v3(). The slightly problematic part is the final argument (text destructor). */ ], + ["sqlite3_busy_timeout","int", "sqlite3*", "int"], ["sqlite3_close_v2", "int", "sqlite3*"], ["sqlite3_changes", "int", "sqlite3*"], ["sqlite3_clear_bindings","int", "sqlite3_stmt*"], diff --git a/ext/wasm/api/sqlite3-opfs-async-proxy.js b/ext/wasm/api/sqlite3-opfs-async-proxy.js index e4657484e..3701e8c30 100644 --- a/ext/wasm/api/sqlite3-opfs-async-proxy.js +++ b/ext/wasm/api/sqlite3-opfs-async-proxy.js @@ -53,7 +53,7 @@ const state = Object.create(null); 2 = warnings and errors 3 = debug, warnings, and errors */ -state.verbose = 2; +state.verbose = 1; const loggers = { 0:console.error.bind(console), @@ -151,6 +151,57 @@ const getDirForFilename = async function f(absFilename, createDirs = false){ }; /** + If the given file-holding object has a sync handle attached to it, + that handle is remove and asynchronously closed. Though it may + sound sensible to continue work as soon as the close() returns + (noting that it's asynchronous), doing so can cause operations + performed soon afterwards, e.g. a call to getSyncHandle() to fail + because they may happen out of order from the close(). OPFS does + not guaranty that the actual order of operations is retained in + such cases. i.e. always "await" on the result of this function. +*/ +const closeSyncHandle = async (fh)=>{ + if(fh.syncHandle){ + log("Closing sync handle for",fh.filenameAbs); + const h = fh.syncHandle; + delete fh.syncHandle; + delete fh.xLock; + __autoLocks.delete(fh.fid); + return h.close(); + } +}; + +/** + A proxy for closeSyncHandle() which is guaranteed to not throw. + + This function is part of a lock/unlock step in functions which + require a sync access handle but may be called without xLock() + having been called first. Such calls need to release that + handle to avoid locking the file for all of time. This is an + _attempt_ at reducing cross-tab contention but it may prove + to be more of a problem than a solution and may need to be + removed. +*/ +const closeSyncHandleNoThrow = async (fh)=>{ + try{await closeSyncHandle(fh)} + catch(e){ + warn("closeSyncHandleNoThrow() ignoring:",e,fh); + } +}; + +/* Release all auto-locks. */ +const closeAutoLocks = async ()=>{ + if(__autoLocks.size){ + /* Release all auto-locks. */ + for(const fid of __autoLocks){ + const fh = __openFiles[fid]; + await closeSyncHandleNoThrow(fh); + log("Auto-unlocked",fid,fh.filenameAbs); + } + } +}; + +/** An error class specifically for use with getSyncHandle(), the goal of which is to eventually be able to distinguish unambiguously between locking-related failures and other types, noting that we @@ -168,7 +219,25 @@ class GetSyncHandleError extends Error { this.name = 'GetSyncHandleError'; } }; - +GetSyncHandleError.convertRc = (e,rc)=>{ + if(1){ + /* This approach returns SQLITE_LOCKED to the C API + when getSyncHandle() fails but makes the very + wild assumption that such a failure _is_ a locking + error. In practice that appears to be the most + common error, by far, but we cannot unambiguously + distinguish that from other errors. + + This approach demonstrably reduces concurrency-related + errors but is highly questionable. + */ + return (e instanceof GetSyncHandleError) + ? state.sq3Codes.SQLITE_LOCKED + : rc; + }else{ + return ec; + } +} /** Returns the sync access handle associated with the given file handle object (which must be a valid handle object, as created by @@ -201,7 +270,8 @@ const getSyncHandle = async (fh)=>{ ); } warn("Error getting sync handle. Waiting",ms, - "ms and trying again.",fh.filenameAbs,e); + "ms and trying again.",fh.filenameAbs,e); + //await closeAutoLocks(); Atomics.wait(state.sabOPView, state.opIds.retry, 0, ms); } } @@ -215,45 +285,6 @@ const getSyncHandle = async (fh)=>{ }; /** - If the given file-holding object has a sync handle attached to it, - that handle is remove and asynchronously closed. Though it may - sound sensible to continue work as soon as the close() returns - (noting that it's asynchronous), doing so can cause operations - performed soon afterwards, e.g. a call to getSyncHandle() to fail - because they may happen out of order from the close(). OPFS does - not guaranty that the actual order of operations is retained in - such cases. i.e. always "await" on the result of this function. -*/ -const closeSyncHandle = async (fh)=>{ - if(fh.syncHandle){ - log("Closing sync handle for",fh.filenameAbs); - const h = fh.syncHandle; - delete fh.syncHandle; - delete fh.xLock; - __autoLocks.delete(fh.fid); - return h.close(); - } -}; - -/** - A proxy for closeSyncHandle() which is guaranteed to not throw. - - This function is part of a lock/unlock step in functions which - require a sync access handle but may be called without xLock() - having been called first. Such calls need to release that - handle to avoid locking the file for all of time. This is an - _attempt_ at reducing cross-tab contention but it may prove - to be more of a problem than a solution and may need to be - removed. -*/ -const closeSyncHandleNoThrow = async (fh)=>{ - try{await closeSyncHandle(fh)} - catch(e){ - warn("closeSyncHandleNoThrow() ignoring:",e,fh); - } -}; - -/** Stores the given value at state.sabOPView[state.opIds.rc] and then Atomics.notify()'s it. */ @@ -451,7 +482,7 @@ const vfsAsyncImpls = { rc = 0; }catch(e){ state.s11n.storeException(2,e); - rc = state.sq3Codes.SQLITE_IOERR; + rc = GetSyncHandleError.convertRc(e,state.sq3Codes.SQLITE_IOERR); } wTimeEnd(); storeAndNotify('xFileSize', rc); @@ -471,7 +502,7 @@ const vfsAsyncImpls = { __autoLocks.delete(fid); }catch(e){ state.s11n.storeException(1,e); - rc = state.sq3Codes.SQLITE_IOERR_LOCK; + rc = GetSyncHandleError.convertRc(e,state.sq3Codes.SQLITE_IOERR_LOCK); fh.xLock = oldLockType; } wTimeEnd(); @@ -545,7 +576,7 @@ const vfsAsyncImpls = { if(undefined===nRead) wTimeEnd(); error("xRead() failed",e,fh); state.s11n.storeException(1,e); - rc = state.sq3Codes.SQLITE_IOERR_READ; + rc = GetSyncHandleError.convertRc(e,state.sq3Codes.SQLITE_IOERR_READ); } storeAndNotify('xRead',rc); mTimeEnd(); @@ -579,7 +610,7 @@ const vfsAsyncImpls = { }catch(e){ error("xTruncate():",e,fh); state.s11n.storeException(2,e); - rc = state.sq3Codes.SQLITE_IOERR_TRUNCATE; + rc = GetSyncHandleError.convertRc(e,state.sq3Codes.SQLITE_IOERR_TRUNCATE); } wTimeEnd(); storeAndNotify('xTruncate',rc); @@ -619,7 +650,7 @@ const vfsAsyncImpls = { }catch(e){ error("xWrite():",e,fh); state.s11n.storeException(1,e); - rc = state.sq3Codes.SQLITE_IOERR_WRITE; + rc = GetSyncHandleError.convertRc(e,state.sq3Codes.SQLITE_IOERR_WRITE); } wTimeEnd(); storeAndNotify('xWrite',rc); @@ -746,22 +777,16 @@ const waitLoop = async function f(){ /** waitTime is how long (ms) to wait for each Atomics.wait(). We need to wake up periodically to give the thread a chance - to do other things. + to do other things. If this is too high (e.g. 500ms) then + even two workers/tabs can easily run into locking errors. */ - const waitTime = 500; + const waitTime = 150; while(!flagAsyncShutdown){ try { if('timed-out'===Atomics.wait( state.sabOPView, state.opIds.whichOp, 0, waitTime )){ - if(__autoLocks.size){ - /* Release all auto-locks. */ - for(const fid of __autoLocks){ - const fh = __openFiles[fid]; - await closeSyncHandleNoThrow(fh); - log("Auto-unlocked",fid,fh.filenameAbs); - } - } + await closeAutoLocks(); continue; } const opId = Atomics.load(state.sabOPView, state.opIds.whichOp); @@ -791,7 +816,7 @@ navigator.storage.getDirectory().then(function(d){ const opt = data.args; state.littleEndian = opt.littleEndian; state.asyncS11nExceptions = opt.asyncS11nExceptions; - state.verbose = opt.verbose ?? 2; + state.verbose = opt.verbose ?? 1; state.fileBufferSize = opt.fileBufferSize; state.sabS11nOffset = opt.sabS11nOffset; state.sabS11nSize = opt.sabS11nSize; |