Index: ext/wasm/api/sqlite3-vfs-opfs-sahpool.c-pp.js ================================================================== --- ext/wasm/api/sqlite3-vfs-opfs-sahpool.c-pp.js +++ ext/wasm/api/sqlite3-vfs-opfs-sahpool.c-pp.js @@ -77,10 +77,52 @@ const PERSISTENT_FILE_TYPES = capi.SQLITE_OPEN_MAIN_DB | capi.SQLITE_OPEN_MAIN_JOURNAL | capi.SQLITE_OPEN_SUPER_JOURNAL | capi.SQLITE_OPEN_WAL; + const FLAG_COMPUTE_DIGEST_V2 = capi.SQLITE_OPEN_MEMORY + /* Part of the fix for + https://github.com/sqlite/sqlite-wasm/issues/97 + + Summary: prior to version 3.50.0 computeDigest() always computes + a value of [0,0] due to overflows, so it does not do anything + useful. Fixing it invalidates old persistent files, so we + instead only fix it for files created or updated since the bug + was discovered and fixed. + + This flag determines whether we use the broken legacy + computeDigest() or the v2 variant. We only use this flag for + newly-created/overwritten files. Pre-existing files have the + broken digest stored in them so need to continue to use that. + + What this means, in terms of db file compatibility between + versions: + + - DBs created with versions older than this fix (<3.50.0) + can be read by post-fix versions. Such DBs which are written + to in-place (not replaced) by newer versions can still be read + by older versions, as the affected digest is only modified + when the SAH slot is assigned to a given filename. + + - DBs created with post-fix versions will, when read by a pre-fix + version, be seen as having a "bad digest" and will be + unceremoniously replaced by that pre-fix version. When swapping + back to a post-fix version, that version will see that the file + entry is missing the FLAG_COMPUTE_DIGEST_V2 bit so will treat it + as a legacy file. + + This flag is stored in the same memory as the various + SQLITE_OPEN_... flags and we must be careful here to not use a + flag bit which is otherwise relevant for the VFS. + SQLITE_OPEN_MEMORY is handled by sqlite3_open_v2() and friends, + not the VFS, so we'll repurpose that one. If we take a + currently-unused bit and it ends up, at some later point, being + used, we would have to invalidate existing VFS files in order to + move to another bit. Similarly, if the SQLITE_OPEN_MEMORY bit + were ever reassigned (which it won't be!), we'd invalidate all + VFS-side files. + */; /** Subdirectory of the VFS's space where "opaque" (randomly-named) files are stored. Changing this effectively invalidates the data stored under older names (orphaning it), so don't do that. */ const OPAQUE_DIR_NAME = ".opaque"; @@ -327,10 +369,11 @@ }, //xSleep is optionally defined below xOpen: function f(pVfs, zName, pFile, flags, pOutFlags){ const pool = getPoolForVfs(pVfs); try{ + flags &= ~FLAG_COMPUTE_DIGEST_V2; pool.log(`xOpen ${wasm.cstrToJs(zName)} ${flags}`); // First try to open a path that already exists in the file system. const path = (zName && wasm.peek8(zName)) ? pool.getPath(zName) : getRandomName(); @@ -622,11 +665,12 @@ return ''; } const fileDigest = new Uint32Array(HEADER_DIGEST_SIZE / 4); sah.read(fileDigest, {at: HEADER_OFFSET_DIGEST}); - const compDigest = this.computeDigest(this.#apBody); + const compDigest = this.computeDigest(this.#apBody, flags); + //warn("getAssociatedPath() flags",'0x'+flags.toString(16), "compDigest", compDigest); if(fileDigest.every((v,i) => v===compDigest[i])){ // Valid digest const pathBytes = this.#apBody.findIndex((v)=>0===v); if(0===pathBytes){ // This file is unassociated, so truncate it to avoid @@ -653,14 +697,21 @@ setAssociatedPath(sah, path, flags){ const enc = textEncoder.encodeInto(path, this.#apBody); if(HEADER_MAX_PATH_SIZE <= enc.written + 1/*NUL byte*/){ toss("Path too long:",path); } + if(path && flags){ + /* When creating or re-writing files, update their digest, if + needed, to v2. We continue to use v1 for the (!path) case + (empty files) because there's little reason not to use a + digest of 0 for empty entries. */ + flags |= FLAG_COMPUTE_DIGEST_V2; + } this.#apBody.fill(0, enc.written, HEADER_MAX_PATH_SIZE); this.#dvBody.setUint32(HEADER_OFFSET_FLAGS, flags); - - const digest = this.computeDigest(this.#apBody); + const digest = this.computeDigest(this.#apBody, flags); + //console.warn("setAssociatedPath(",path,") digest",digest); sah.write(this.#apBody, {at: 0}); sah.write(digest, {at: HEADER_OFFSET_DIGEST}); sah.flush(); if(path){ @@ -677,19 +728,26 @@ Computes a digest for the given byte array and returns it as a two-element Uint32Array. This digest gets stored in the metadata for each file as a validation check. Changing this algorithm invalidates all existing databases for this VFS, so don't do that. + + See the docs for FLAG_COMPUTE_DIGEST_V2 for more details. */ - computeDigest(byteArray){ - let h1 = 0xdeadbeef; - let h2 = 0x41c6ce57; - for(const v of byteArray){ - h1 = 31 * h1 + (v * 307); - h2 = 31 * h2 + (v * 307); - } - return new Uint32Array([h1>>>0, h2>>>0]); + computeDigest(byteArray, fileFlags){ + if( fileFlags & FLAG_COMPUTE_DIGEST_V2 ){ + let h1 = 0xdeadbeef; + let h2 = 0x41c6ce57; + for(const v of byteArray){ + h1 = Math.imul(h1 ^ v, 2654435761); + h2 = Math.imul(h2 ^ v, 104729); + } + return new Uint32Array([h1>>>0, h2>>>0]); + }else{ + /* this is what the buggy legacy computation worked out to */ + return new Uint32Array([0,0]); + } } /** Re-initializes the state of the SAH pool, releasing and re-acquiring all handles. Index: ext/wasm/tester1.c-pp.js ================================================================== --- ext/wasm/tester1.c-pp.js +++ ext/wasm/tester1.c-pp.js @@ -3504,11 +3504,11 @@ return x; } }); db.exec([ "create table t(a);", - "insert into t(a) values(1),(2),(3);", + "insert into t(a) values(1),(2),(1);", "select auxtest(1,a), auxtest(1,a) from t order by a" ]); }finally{ db.close(); wasm.pstack.restore(stack); ADDED ext/wasm/tests/opfs/sahpool/digest-worker.js Index: ext/wasm/tests/opfs/sahpool/digest-worker.js ================================================================== --- /dev/null +++ ext/wasm/tests/opfs/sahpool/digest-worker.js @@ -0,0 +1,94 @@ +/* + 2025-01-31 + + The author disclaims copyright to this source code. In place of a + legal notice, here is a blessing: + + * May you do good and not evil. + * May you find forgiveness for yourself and forgive others. + * May you share freely, never taking more than you give. + + *********************************************************************** + + This file is part of sahpool-pausing.js's demonstration of the + pause/unpause feature of the opfs-sahpool VFS. +*/ +const clog = console.log.bind(console); +const wPost = (type,...args)=>postMessage({type, payload:args}); +const log = (...args)=>{ + clog("Worker:",...args); + wPost('log',...args); +} + +const hasOpfs = ()=>{ + return globalThis.FileSystemHandle + && globalThis.FileSystemDirectoryHandle + && globalThis.FileSystemFileHandle + && globalThis.FileSystemFileHandle.prototype.createSyncAccessHandle + && navigator?.storage?.getDirectory; +}; +if( !hasOpfs() ){ + wPost('error',"OPFS not detected"); + throw new Error("OPFS not detected"); +} + +clog("Importing sqlite3..."); +const searchParams = new URL(self.location.href).searchParams; +importScripts(searchParams.get('sqlite3.dir') + '/sqlite3.js'); + +const runTests = function(sqlite3, poolUtil){ + const fname = '/my.db'; + let db = new poolUtil.OpfsSAHPoolDb(fname); + let n = (new Date()).valueOf(); + try { + db.exec([ + "create table if not exists t(a);" + ]); + db.exec({ + sql: "insert into t(a) values(?)", + bind: n++ + }); + log(fname,"record count: ",db.selectValue("select count(*) from t")); + }finally{ + db.close(); + } + + db = new poolUtil.OpfsSAHPoolDb(fname); + try { + db.exec({ + sql: "insert into t(a) values(?)", + bind: n++ + }); + log(fname,"record count: ",db.selectValue("select count(*) from t")); + }finally{ + db.close(); + } + + const fname2 = '/my2.db'; + db = new poolUtil.OpfsSAHPoolDb(fname2); + try { + db.exec([ + "create table if not exists t(a);" + ]); + db.exec({ + sql: "insert into t(a) values(?)", + bind: n++ + }); + log(fname2,"record count: ",db.selectValue("select count(*) from t")); + }finally{ + db.close(); + } +}; + +globalThis.sqlite3InitModule().then(async function(sqlite3){ + log("sqlite3 version:",sqlite3.version); + const sahPoolConfig = { + name: 'opfs-sahpool-digest', + clearOnInit: false, + initialCapacity: 6 + }; + return sqlite3.installOpfsSAHPoolVfs(sahPoolConfig).then(poolUtil=>{ + log('vfs acquired'); + runTests(sqlite3, poolUtil); + }); +}); ADDED ext/wasm/tests/opfs/sahpool/digest.html Index: ext/wasm/tests/opfs/sahpool/digest.html ================================================================== --- /dev/null +++ ext/wasm/tests/opfs/sahpool/digest.html @@ -0,0 +1,141 @@ + + +
+ + + + + ++ This is a test app for the digest calculation of the OPFS + SAHPool VFS. It requires running it with a new database created using + v3.49.0 or older, then running it again with a newer version, then + again with 3.49.0 or older. +
+