aboutsummaryrefslogtreecommitdiff
path: root/ext/wasm
diff options
context:
space:
mode:
Diffstat (limited to 'ext/wasm')
-rw-r--r--ext/wasm/api/sqlite3-vfs-opfs-sahpool.c-pp.js78
-rw-r--r--ext/wasm/tester1.c-pp.js2
-rw-r--r--ext/wasm/tests/opfs/sahpool/digest-worker.js94
-rw-r--r--ext/wasm/tests/opfs/sahpool/digest.html141
4 files changed, 304 insertions, 11 deletions
diff --git a/ext/wasm/api/sqlite3-vfs-opfs-sahpool.c-pp.js b/ext/wasm/api/sqlite3-vfs-opfs-sahpool.c-pp.js
index 95843a35d..f30df3198 100644
--- a/ext/wasm/api/sqlite3-vfs-opfs-sahpool.c-pp.js
+++ b/ext/wasm/api/sqlite3-vfs-opfs-sahpool.c-pp.js
@@ -79,6 +79,48 @@ globalThis.sqlite3ApiBootstrap.initializers.push(function(sqlite3){
capi.SQLITE_OPEN_MAIN_JOURNAL |
capi.SQLITE_OPEN_SUPER_JOURNAL |
capi.SQLITE_OPEN_WAL;
+ const FLAG_COMPUTE_DIGEST_V2 = capi.SQLITE_OPEN_MEMORY
+ /* Part of the fix for
+ https://github.com/sqlite/sqlite-wasm/issues/97
+
+ Summary: prior to version 3.50.0 computeDigest() always computes
+ a value of [0,0] due to overflows, so it does not do anything
+ useful. Fixing it invalidates old persistent files, so we
+ instead only fix it for files created or updated since the bug
+ was discovered and fixed.
+
+ This flag determines whether we use the broken legacy
+ computeDigest() or the v2 variant. We only use this flag for
+ newly-created/overwritten files. Pre-existing files have the
+ broken digest stored in them so need to continue to use that.
+
+ What this means, in terms of db file compatibility between
+ versions:
+
+ - DBs created with versions older than this fix (<3.50.0)
+ can be read by post-fix versions. Such DBs which are written
+ to in-place (not replaced) by newer versions can still be read
+ by older versions, as the affected digest is only modified
+ when the SAH slot is assigned to a given filename.
+
+ - DBs created with post-fix versions will, when read by a pre-fix
+ version, be seen as having a "bad digest" and will be
+ unceremoniously replaced by that pre-fix version. When swapping
+ back to a post-fix version, that version will see that the file
+ entry is missing the FLAG_COMPUTE_DIGEST_V2 bit so will treat it
+ as a legacy file.
+
+ This flag is stored in the same memory as the various
+ SQLITE_OPEN_... flags and we must be careful here to not use a
+ flag bit which is otherwise relevant for the VFS.
+ SQLITE_OPEN_MEMORY is handled by sqlite3_open_v2() and friends,
+ not the VFS, so we'll repurpose that one. If we take a
+ currently-unused bit and it ends up, at some later point, being
+ used, we would have to invalidate existing VFS files in order to
+ move to another bit. Similarly, if the SQLITE_OPEN_MEMORY bit
+ were ever reassigned (which it won't be!), we'd invalidate all
+ VFS-side files.
+ */;
/** Subdirectory of the VFS's space where "opaque" (randomly-named)
files are stored. Changing this effectively invalidates the data
@@ -329,6 +371,7 @@ globalThis.sqlite3ApiBootstrap.initializers.push(function(sqlite3){
xOpen: function f(pVfs, zName, pFile, flags, pOutFlags){
const pool = getPoolForVfs(pVfs);
try{
+ flags &= ~FLAG_COMPUTE_DIGEST_V2;
pool.log(`xOpen ${wasm.cstrToJs(zName)} ${flags}`);
// First try to open a path that already exists in the file system.
const path = (zName && wasm.peek8(zName))
@@ -624,7 +667,8 @@ globalThis.sqlite3ApiBootstrap.initializers.push(function(sqlite3){
const fileDigest = new Uint32Array(HEADER_DIGEST_SIZE / 4);
sah.read(fileDigest, {at: HEADER_OFFSET_DIGEST});
- const compDigest = this.computeDigest(this.#apBody);
+ const compDigest = this.computeDigest(this.#apBody, flags);
+ //warn("getAssociatedPath() flags",'0x'+flags.toString(16), "compDigest", compDigest);
if(fileDigest.every((v,i) => v===compDigest[i])){
// Valid digest
const pathBytes = this.#apBody.findIndex((v)=>0===v);
@@ -655,10 +699,17 @@ globalThis.sqlite3ApiBootstrap.initializers.push(function(sqlite3){
if(HEADER_MAX_PATH_SIZE <= enc.written + 1/*NUL byte*/){
toss("Path too long:",path);
}
+ if(path && flags){
+ /* When creating or re-writing files, update their digest, if
+ needed, to v2. We continue to use v1 for the (!path) case
+ (empty files) because there's little reason not to use a
+ digest of 0 for empty entries. */
+ flags |= FLAG_COMPUTE_DIGEST_V2;
+ }
this.#apBody.fill(0, enc.written, HEADER_MAX_PATH_SIZE);
this.#dvBody.setUint32(HEADER_OFFSET_FLAGS, flags);
-
- const digest = this.computeDigest(this.#apBody);
+ const digest = this.computeDigest(this.#apBody, flags);
+ //console.warn("setAssociatedPath(",path,") digest",digest);
sah.write(this.#apBody, {at: 0});
sah.write(digest, {at: HEADER_OFFSET_DIGEST});
sah.flush();
@@ -679,15 +730,22 @@ globalThis.sqlite3ApiBootstrap.initializers.push(function(sqlite3){
metadata for each file as a validation check. Changing this
algorithm invalidates all existing databases for this VFS, so
don't do that.
+
+ See the docs for FLAG_COMPUTE_DIGEST_V2 for more details.
*/
- computeDigest(byteArray){
- let h1 = 0xdeadbeef;
- let h2 = 0x41c6ce57;
- for(const v of byteArray){
- h1 = 31 * h1 + (v * 307);
- h2 = 31 * h2 + (v * 307);
+ computeDigest(byteArray, fileFlags){
+ if( fileFlags & FLAG_COMPUTE_DIGEST_V2 ){
+ let h1 = 0xdeadbeef;
+ let h2 = 0x41c6ce57;
+ for(const v of byteArray){
+ h1 = Math.imul(h1 ^ v, 2654435761);
+ h2 = Math.imul(h2 ^ v, 104729);
+ }
+ return new Uint32Array([h1>>>0, h2>>>0]);
+ }else{
+ /* this is what the buggy legacy computation worked out to */
+ return new Uint32Array([0,0]);
}
- return new Uint32Array([h1>>>0, h2>>>0]);
}
/**
diff --git a/ext/wasm/tester1.c-pp.js b/ext/wasm/tester1.c-pp.js
index 8638845a7..d30e59e38 100644
--- a/ext/wasm/tester1.c-pp.js
+++ b/ext/wasm/tester1.c-pp.js
@@ -3506,7 +3506,7 @@ globalThis.sqlite3InitModule = sqlite3InitModule;
});
db.exec([
"create table t(a);",
- "insert into t(a) values(1),(2),(3);",
+ "insert into t(a) values(1),(2),(1);",
"select auxtest(1,a), auxtest(1,a) from t order by a"
]);
}finally{
diff --git a/ext/wasm/tests/opfs/sahpool/digest-worker.js b/ext/wasm/tests/opfs/sahpool/digest-worker.js
new file mode 100644
index 000000000..430856667
--- /dev/null
+++ b/ext/wasm/tests/opfs/sahpool/digest-worker.js
@@ -0,0 +1,94 @@
+/*
+ 2025-01-31
+
+ The author disclaims copyright to this source code. In place of a
+ legal notice, here is a blessing:
+
+ * May you do good and not evil.
+ * May you find forgiveness for yourself and forgive others.
+ * May you share freely, never taking more than you give.
+
+ ***********************************************************************
+
+ This file is part of sahpool-pausing.js's demonstration of the
+ pause/unpause feature of the opfs-sahpool VFS.
+*/
+const clog = console.log.bind(console);
+const wPost = (type,...args)=>postMessage({type, payload:args});
+const log = (...args)=>{
+ clog("Worker:",...args);
+ wPost('log',...args);
+}
+
+const hasOpfs = ()=>{
+ return globalThis.FileSystemHandle
+ && globalThis.FileSystemDirectoryHandle
+ && globalThis.FileSystemFileHandle
+ && globalThis.FileSystemFileHandle.prototype.createSyncAccessHandle
+ && navigator?.storage?.getDirectory;
+};
+if( !hasOpfs() ){
+ wPost('error',"OPFS not detected");
+ throw new Error("OPFS not detected");
+}
+
+clog("Importing sqlite3...");
+const searchParams = new URL(self.location.href).searchParams;
+importScripts(searchParams.get('sqlite3.dir') + '/sqlite3.js');
+
+const runTests = function(sqlite3, poolUtil){
+ const fname = '/my.db';
+ let db = new poolUtil.OpfsSAHPoolDb(fname);
+ let n = (new Date()).valueOf();
+ try {
+ db.exec([
+ "create table if not exists t(a);"
+ ]);
+ db.exec({
+ sql: "insert into t(a) values(?)",
+ bind: n++
+ });
+ log(fname,"record count: ",db.selectValue("select count(*) from t"));
+ }finally{
+ db.close();
+ }
+
+ db = new poolUtil.OpfsSAHPoolDb(fname);
+ try {
+ db.exec({
+ sql: "insert into t(a) values(?)",
+ bind: n++
+ });
+ log(fname,"record count: ",db.selectValue("select count(*) from t"));
+ }finally{
+ db.close();
+ }
+
+ const fname2 = '/my2.db';
+ db = new poolUtil.OpfsSAHPoolDb(fname2);
+ try {
+ db.exec([
+ "create table if not exists t(a);"
+ ]);
+ db.exec({
+ sql: "insert into t(a) values(?)",
+ bind: n++
+ });
+ log(fname2,"record count: ",db.selectValue("select count(*) from t"));
+ }finally{
+ db.close();
+ }
+};
+
+globalThis.sqlite3InitModule().then(async function(sqlite3){
+ log("sqlite3 version:",sqlite3.version);
+ const sahPoolConfig = {
+ name: 'opfs-sahpool-digest',
+ clearOnInit: false,
+ initialCapacity: 6
+ };
+ return sqlite3.installOpfsSAHPoolVfs(sahPoolConfig).then(poolUtil=>{
+ log('vfs acquired');
+ runTests(sqlite3, poolUtil);
+ });
+});
diff --git a/ext/wasm/tests/opfs/sahpool/digest.html b/ext/wasm/tests/opfs/sahpool/digest.html
new file mode 100644
index 000000000..fdcd98ec1
--- /dev/null
+++ b/ext/wasm/tests/opfs/sahpool/digest.html
@@ -0,0 +1,141 @@
+<!doctype html>
+<html lang="en-us">
+ <head>
+ <meta charset="utf-8">
+ <meta http-equiv="Content-Type" content="text/html; charset=utf-8">
+ <link rel="shortcut icon" href="data:image/x-icon;," type="image/x-icon">
+ <link rel="stylesheet" href="../../../common/emscripten.css"/>
+ <link rel="stylesheet" href="../../../common/testing.css"/>
+ <title>sqlite3 tester: OpfsSAHPool Digest</title>
+ <style></style>
+ </head>
+ <body><h1 id='color-target'></h1>
+
+ <p>
+ This is a test app for the digest calculation of the OPFS
+ SAHPool VFS. It requires running it with a new database created using
+ v3.49.0 or older, then running it again with a newer version, then
+ again with 3.49.0 or older.
+ </p>
+ <div class='input-wrapper'>
+ <input type='checkbox' id='cb-log-reverse'>
+ <label for='cb-log-reverse'>Reverse log order?</label>
+ </div>
+ <div id='test-output'></div>
+ <script>
+ /*
+ 2025-02-03
+
+ The author disclaims copyright to this source code. In place of a
+ legal notice, here is a blessing:
+
+ * May you do good and not evil.
+ * May you find forgiveness for yourself and forgive others.
+ * May you share freely, never taking more than you give.
+
+ ***********************************************************************
+
+ This is a bugfix test for the OPFS SAHPool VFS. It requires setting up
+ a database created using v3.49.0 or older, then running it again with
+ a newer version.
+ */
+ (function(){
+ 'use strict';
+ document.querySelector('h1').innerHTML =
+ document.querySelector('title').innerHTML;
+ const mapToString = (v)=>{
+ switch(typeof v){
+ case 'number': case 'string': case 'boolean':
+ case 'undefined': case 'bigint':
+ return ''+v;
+ default: break;
+ }
+ if(null===v) return 'null';
+ if(v instanceof Error){
+ v = {
+ message: v.message,
+ stack: v.stack,
+ errorClass: v.name
+ };
+ }
+ return JSON.stringify(v,undefined,2);
+ };
+ const normalizeArgs = (args)=>args.map(mapToString);
+ const logTarget = document.querySelector('#test-output');
+ const logClass = function(cssClass,...args){
+ const ln = document.createElement('div');
+ if(cssClass){
+ for(const c of (Array.isArray(cssClass) ? cssClass : [cssClass])){
+ ln.classList.add(c);
+ }
+ }
+ ln.append(document.createTextNode(normalizeArgs(args).join(' ')));
+ logTarget.append(ln);
+ };
+ const cbReverse = document.querySelector('#cb-log-reverse');
+ //cbReverse.setAttribute('checked','checked');
+ const cbReverseKey = 'tester1:cb-log-reverse';
+ const cbReverseIt = ()=>{
+ logTarget.classList[cbReverse.checked ? 'add' : 'remove']('reverse');
+ //localStorage.setItem(cbReverseKey, cbReverse.checked ? 1 : 0);
+ };
+ cbReverse.addEventListener('change', cbReverseIt, true);
+ /*if(localStorage.getItem(cbReverseKey)){
+ cbReverse.checked = !!(+localStorage.getItem(cbReverseKey));
+ }*/
+ cbReverseIt();
+
+ const log = (...args)=>{
+ //console.log(...args);
+ logClass('',...args);
+ }
+ const warn = (...args)=>{
+ console.warn(...args);
+ logClass('warning',...args);
+ }
+ const error = (...args)=>{
+ console.error(...args);
+ logClass('error',...args);
+ };
+
+ const toss = (...args)=>{
+ error(...args);
+ throw new Error(args.join(' '));
+ };
+
+ const endOfWork = (passed=true)=>{
+ const eH = document.querySelector('#color-target');
+ const eT = document.querySelector('title');
+ if(passed){
+ log("End of work chain. If you made it this far, you win.");
+ eH.innerText = 'PASS: '+eH.innerText;
+ eH.classList.add('tests-pass');
+ eT.innerText = 'PASS: '+eT.innerText;
+ }else{
+ eH.innerText = 'FAIL: '+eH.innerText;
+ eH.classList.add('tests-fail');
+ eT.innerText = 'FAIL: '+eT.innerText;
+ }
+ };
+
+ log("Running opfs-sahpool digest tests...");
+ const W1 = new Worker('digest-worker.js?sqlite3.dir=../../../jswasm');
+ W1.onmessage = function({data}){
+ //log("onmessage:",data);
+ switch(data.type){
+ case 'log':
+ log('worker says:', ...data.payload);
+ break;
+ case 'error':
+ error('worker says:', ...data.payload);
+ endOfWork(false);
+ break;
+ case 'initialized':
+ log(data.workerId, ': Worker initialized',...data.payload);
+ break;
+ }
+ };
+ })();
+ </script>
+ </body>
+</html>