diff --git a/JetStreamDriver.js b/JetStreamDriver.js index e6995d1..0f5c613 100644 --- a/JetStreamDriver.js +++ b/JetStreamDriver.js @@ -1,7 +1,7 @@ "use strict"; /* - * Copyright (C) 2018-2022 Apple Inc. All rights reserved. + * Copyright (C) 2018-2024 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -36,11 +36,17 @@ globalThis.dumpJSONResults ??= false; globalThis.customTestList ??= []; let shouldReport = false; +let startDelay; if (typeof(URLSearchParams) !== "undefined") { const urlParameters = new URLSearchParams(window.location.search); shouldReport = urlParameters.has('report') && urlParameters.get('report').toLowerCase() == 'true'; + if (shouldReport) + startDelay = 4000; + if (urlParameters.has('startDelay')) + startDelay = urlParameters.get('startDelay'); if (urlParameters.has('test')) customTestList = urlParameters.getAll("test"); + } // Used for the promise representing the current benchmark run. @@ -158,7 +164,7 @@ function uiFriendlyDuration(time) const minutes = time.getMinutes(); const seconds = time.getSeconds(); const milliSeconds = time.getMilliseconds(); - const result = "" + minutes + ":"; + let result = "" + minutes + ":"; result = result + (seconds < 10 ? "0" : "") + seconds + "."; result = result + (milliSeconds < 10 ? "00" : (milliSeconds < 100 ? "0" : "")) + milliSeconds; @@ -330,7 +336,13 @@ class Driver { } else globalObject = runString(""); - globalObject.console = { log: globalObject.print, warn: (e) => { print("Warn: " + e); /*$vm.abort();*/ } } + globalObject.console = { + log: globalObject.print, + warn: (e) => { print("Warn: " + e); }, + error: (e) => { print("Error: " + e); }, + debug: (e) => { print("Debug: " + e); }, + }; + globalObject.self = globalObject; globalObject.top = { currentResolve, @@ -414,8 +426,8 @@ class Driver { await this.prefetchResourcesForBrowser(); await this.fetchResources(); this.prepareToRun(); - if (isInBrowser && shouldReport) { - setTimeout(() => this.start(), 4000); + if (isInBrowser && startDelay !== undefined) { + setTimeout(() => this.start(), startDelay); } } @@ -543,10 +555,11 @@ class Benchmark { if (__benchmark.prepareForNextIteration) __benchmark.prepareForNextIteration(); - ${this.preiterationCode} + ${this.preIterationCode} let start = performance.now(); __benchmark.runIteration(); let end = performance.now(); + ${this.postIterationCode} results.push(Math.max(1, end - start)); } @@ -565,11 +578,24 @@ class Benchmark { get prerunCode() { return null; } - get preiterationCode() { + get preIterationCode() { + let code = ""; if (this.plan.deterministicRandom) - return `Math.random.__resetSeed();`; + code += `Math.random.__resetSeed();`; - return ""; + if (globalThis.customPreIterationCode) + code += customPreIterationCode; + + return code; + } + + get postIterationCode() { + let code = ""; + + if (globalThis.customPostIterationCode) + code += customPostIterationCode; + + return code; } async run() { @@ -972,10 +998,11 @@ class AsyncBenchmark extends DefaultBenchmark { let __benchmark = new Benchmark(); let results = []; for (let i = 0; i < ${this.iterations}; i++) { - ${this.preiterationCode} + ${this.preIterationCode} let start = performance.now(); await __benchmark.runIteration(); let end = performance.now(); + ${this.postIterationCode} results.push(Math.max(1, end - start)); } if (__benchmark.validate) @@ -986,6 +1013,94 @@ class AsyncBenchmark extends DefaultBenchmark { } }; +// Meant for wasm benchmarks that are directly compiled with an emcc build script. It might not work for benchmarks built as +// part of a larger project's build system or a wasm benchmark compiled from a language that doesn't compile with emcc. +class WasmEMCCBenchmark extends AsyncBenchmark { + get prerunCode() { + let str = ` + let verbose = false; + + let globalObject = this; + + abort = quit = function() { + if (verbose) + console.log('Intercepted quit/abort'); + }; + + oldPrint = globalObject.print; + globalObject.print = globalObject.printErr = (...args) => { + if (verbose) + console.log('Intercepted print: ', ...args); + }; + + let Module = { + preRun: [], + postRun: [], + print: print, + printErr: printErr, + setStatus: function(text) { + }, + totalDependencies: 0, + monitorRunDependencies: function(left) { + this.totalDependencies = Math.max(this.totalDependencies, left); + Module.setStatus(left ? 'Preparing... (' + (this.totalDependencies-left) + '/' + this.totalDependencies + ')' : 'All downloads complete.'); + }, + }; + globalObject.Module = Module; + `; + return str; + } + + get runnerCode() { + let str = `function loadBlob(key, path, andThen) {`; + + if (isInBrowser) { + str += ` + var xhr = new XMLHttpRequest(); + xhr.open('GET', path, true); + xhr.responseType = 'arraybuffer'; + xhr.onload = function() { + Module[key] = new Int8Array(xhr.response); + andThen(); + }; + xhr.send(null); + `; + } else { + str += ` + Module[key] = new Int8Array(read(path, "binary")); + + Module.setStatus = null; + Module.monitorRunDependencies = null; + + Promise.resolve(42).then(() => { + try { + andThen(); + } catch(e) { + console.log("error running wasm:", e); + console.log(e.stack); + throw e; + } + }) + `; + } + + str += "}"; + + let keys = Object.keys(this.plan.preload); + for (let i = 0; i < keys.length; ++i) { + str += `loadBlob("${keys[i]}", "${this.plan.preload[keys[i]]}", async () => {\n`; + } + + str += super.runnerCode; + for (let i = 0; i < keys.length; ++i) { + str += `})`; + } + str += `;`; + + return str; + } +}; + class WSLBenchmark extends Benchmark { constructor(...args) { super(...args); @@ -1062,7 +1177,7 @@ class WSLBenchmark extends Benchmark { } }; -class WasmBenchmark extends Benchmark { +class WasmLegacyBenchmark extends Benchmark { constructor(...args) { super(...args); @@ -1776,18 +1891,21 @@ const testPlans = [ preload: { wasmBinary: "./wasm/HashSet.wasm" }, - benchmarkClass: WasmBenchmark, + benchmarkClass: WasmLegacyBenchmark, testGroup: WasmGroup }, { name: "tsf-wasm", files: [ - "./wasm/tsf.js" + "./wasm/TSF/build/tsf.js", + "./wasm/TSF/benchmark.js", ], preload: { - wasmBinary: "./wasm/tsf.wasm" + wasmBinary: "./wasm/TSF/build/tsf.wasm" }, - benchmarkClass: WasmBenchmark, + benchmarkClass: WasmEMCCBenchmark, + iterations: 15, + worstCaseCount: 2, testGroup: WasmGroup }, { @@ -1798,7 +1916,7 @@ const testPlans = [ preload: { wasmBinary: "./wasm/quicksort.wasm" }, - benchmarkClass: WasmBenchmark, + benchmarkClass: WasmLegacyBenchmark, testGroup: WasmGroup }, { @@ -1809,7 +1927,7 @@ const testPlans = [ preload: { wasmBinary: "./wasm/gcc-loops.wasm" }, - benchmarkClass: WasmBenchmark, + benchmarkClass: WasmLegacyBenchmark, testGroup: WasmGroup }, { @@ -1820,7 +1938,7 @@ const testPlans = [ preload: { wasmBinary: "./wasm/richards.wasm" }, - benchmarkClass: WasmBenchmark, + benchmarkClass: WasmLegacyBenchmark, testGroup: WasmGroup }, { @@ -1833,7 +1951,7 @@ const testPlans = [ preload: { wasmBinary: "./sqlite3/build/jswasm/speedtest1.wasm" }, - benchmarkClass: WasmBenchmark, + benchmarkClass: WasmLegacyBenchmark, testGroup: WasmGroup }, { @@ -1852,7 +1970,7 @@ const testPlans = [ preload: { tfjsBackendWasmBlob: "./wasm/tfjs-backend-wasm.wasm", }, - benchmarkClass: WasmBenchmark, + benchmarkClass: WasmLegacyBenchmark, async: true, deterministicRandom: true, testGroup: WasmGroup @@ -1873,7 +1991,7 @@ const testPlans = [ preload: { tfjsBackendWasmSimdBlob: "./wasm/tfjs-backend-wasm-simd.wasm", }, - benchmarkClass: WasmBenchmark, + benchmarkClass: WasmLegacyBenchmark, async: true, deterministicRandom: true, testGroup: WasmGroup @@ -1888,7 +2006,7 @@ const testPlans = [ preload: { argon2WasmBlob: "./wasm/argon2.wasm", }, - benchmarkClass: WasmBenchmark, + benchmarkClass: WasmLegacyBenchmark, testGroup: WasmGroup }, { @@ -1901,7 +2019,7 @@ const testPlans = [ preload: { argon2WasmSimdBlob: "./wasm/argon2-simd.wasm", }, - benchmarkClass: WasmBenchmark, + benchmarkClass: WasmLegacyBenchmark, testGroup: WasmGroup }, // WorkerTests @@ -1975,7 +2093,7 @@ const testPlans = [ romBinary: "./8bitbench/assets/program.bin" }, async: true, - benchmarkClass: WasmBenchmark, + benchmarkClass: WasmLegacyBenchmark, testGroup: WasmGroup } ]; diff --git a/sqlite3/build.sh b/sqlite3/build.sh index ab9568d..ff7b067 100755 --- a/sqlite3/build.sh +++ b/sqlite3/build.sh @@ -8,8 +8,9 @@ rm sqlite-src-*.zip rm -rf sqlite-src-*/ rm -rf build/ +touch build.log BUILD_LOG="$(realpath build.log)" -echo -e "Built on $(date --rfc-3339=seconds)\n" | tee "$BUILD_LOG" +echo -e "Built on $(date -u '+%Y-%m-%dT%H:%M:%SZ')\n" | tee "$BUILD_LOG" echo "Toolchain versions" | tee -a "$BUILD_LOG" emcc --version | head -n1 | tee -a "$BUILD_LOG" diff --git a/wasm/TSF/benchmark.js b/wasm/TSF/benchmark.js new file mode 100644 index 0000000..405e126 --- /dev/null +++ b/wasm/TSF/benchmark.js @@ -0,0 +1,33 @@ +/* + * Copyright (C) 2024 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. +*/ + +class Benchmark { + async runIteration() { + if (!Module._runIteration) + await setupModule(Module); + + Module._runIteration(150); + } +}; diff --git a/wasm/TSF/build.log b/wasm/TSF/build.log new file mode 100644 index 0000000..febb95e --- /dev/null +++ b/wasm/TSF/build.log @@ -0,0 +1,6 @@ +Built on 2024-12-20T14:36:37Z + +Toolchain versions +emcc (Emscripten gcc/clang-like replacement + linker emulating GNU ld) 3.1.73-git +Building... +Building done diff --git a/wasm/TSF/build.sh b/wasm/TSF/build.sh index 45017b1..ac73360 100755 --- a/wasm/TSF/build.sh +++ b/wasm/TSF/build.sh @@ -1,6 +1,18 @@ #!/bin/sh + +set -euo pipefail + +touch build.log +BUILD_LOG="$(realpath build.log)" +echo "Built on $(date -u '+%Y-%m-%dT%H:%M:%SZ')\n" | tee "$BUILD_LOG" + +echo "Toolchain versions" | tee -a "$BUILD_LOG" +emcc --version | head -n1 | tee -a "$BUILD_LOG" + +echo "Building..." | tee -a "$BUILD_LOG" +mkdir -p build emcc \ - -o tsf.html -o tsf.js -O2 -s WASM=1 -s TOTAL_MEMORY=52428800 -g1 \ + -o build/tsf.js -O2 -s MODULARIZE=1 -s EXPORT_NAME=setupModule -s WASM=1 -s TOTAL_MEMORY=52428800 -g1 --emit-symbol-map -s EXPORTED_FUNCTIONS=_runIteration \ -I. -DTSF_BUILD_SYSTEM=1 \ tsf_asprintf.c\ tsf_buffer.c\ @@ -53,3 +65,4 @@ emcc \ tsf_ir_different.c\ tsf_ir_speed.c +echo "Building done" | tee -a "$BUILD_LOG" diff --git a/wasm/TSF/build/tsf.js b/wasm/TSF/build/tsf.js new file mode 100644 index 0000000..dab0e9b --- /dev/null +++ b/wasm/TSF/build/tsf.js @@ -0,0 +1,3611 @@ + +var setupModule = (() => { + var _scriptName = typeof document != 'undefined' ? document.currentScript?.src : undefined; + if (typeof __filename != 'undefined') _scriptName = _scriptName || __filename; + return ( +function(moduleArg = {}) { + var moduleRtn; + +// include: shell.js +// The Module object: Our interface to the outside world. We import +// and export values on it. There are various ways Module can be used: +// 1. Not defined. We create it here +// 2. A function parameter, function(moduleArg) => Promise +// 3. pre-run appended it, var Module = {}; ..generated code.. +// 4. External script tag defines var Module. +// We need to check if Module already exists (e.g. case 3 above). +// Substitution will be replaced with actual code on later stage of the build, +// this way Closure Compiler will not mangle it (e.g. case 4. above). +// Note that if you want to run closure, and also to use Module +// after the generated code, you will need to define var Module = {}; +// before the code. Then that object will be used in the code, and you +// can continue to use Module afterwards as well. +var Module = moduleArg; + +// Set up the promise that indicates the Module is initialized +var readyPromiseResolve, readyPromiseReject; + +var readyPromise = new Promise((resolve, reject) => { + readyPromiseResolve = resolve; + readyPromiseReject = reject; +}); + +// Determine the runtime environment we are in. You can customize this by +// setting the ENVIRONMENT setting at compile time (see settings.js). +// Attempt to auto-detect the environment +var ENVIRONMENT_IS_WEB = typeof window == "object"; + +var ENVIRONMENT_IS_WORKER = typeof WorkerGlobalScope != "undefined"; + +// N.b. Electron.js environment is simultaneously a NODE-environment, but +// also a web environment. +var ENVIRONMENT_IS_NODE = typeof process == "object" && typeof process.versions == "object" && typeof process.versions.node == "string" && process.type != "renderer"; + +if (ENVIRONMENT_IS_NODE) {} + +// --pre-jses are emitted after the Module integration code, so that they can +// refer to Module (if they choose; they can also define Module) +// Sometimes an existing Module object exists with properties +// meant to overwrite the default module functionality. Here +// we collect those properties and reapply _after_ we configure +// the current environment's defaults to avoid having to be so +// defensive during initialization. +var moduleOverrides = Object.assign({}, Module); + +var arguments_ = []; + +var thisProgram = "./this.program"; + +var quit_ = (status, toThrow) => { + throw toThrow; +}; + +// `/` should be present at the end if `scriptDirectory` is not empty +var scriptDirectory = ""; + +function locateFile(path) { + if (Module["locateFile"]) { + return Module["locateFile"](path, scriptDirectory); + } + return scriptDirectory + path; +} + +// Hooks that are implemented differently in different runtime environments. +var readAsync, readBinary; + +if (ENVIRONMENT_IS_NODE) { + // These modules will usually be used on Node.js. Load them eagerly to avoid + // the complexity of lazy-loading. + var fs = require("fs"); + var nodePath = require("path"); + scriptDirectory = __dirname + "/"; + // include: node_shell_read.js + readBinary = filename => { + // We need to re-wrap `file://` strings to URLs. Normalizing isn't + // necessary in that case, the path should already be absolute. + filename = isFileURI(filename) ? new URL(filename) : nodePath.normalize(filename); + var ret = fs.readFileSync(filename); + return ret; + }; + readAsync = (filename, binary = true) => { + // See the comment in the `readBinary` function. + filename = isFileURI(filename) ? new URL(filename) : nodePath.normalize(filename); + return new Promise((resolve, reject) => { + fs.readFile(filename, binary ? undefined : "utf8", (err, data) => { + if (err) reject(err); else resolve(binary ? data.buffer : data); + }); + }); + }; + // end include: node_shell_read.js + if (!Module["thisProgram"] && process.argv.length > 1) { + thisProgram = process.argv[1].replace(/\\/g, "/"); + } + arguments_ = process.argv.slice(2); + // MODULARIZE will export the module in the proper place outside, we don't need to export here + quit_ = (status, toThrow) => { + process.exitCode = status; + throw toThrow; + }; +} else // Note that this includes Node.js workers when relevant (pthreads is enabled). +// Node.js workers are detected as a combination of ENVIRONMENT_IS_WORKER and +// ENVIRONMENT_IS_NODE. +if (ENVIRONMENT_IS_WEB || ENVIRONMENT_IS_WORKER) { + if (ENVIRONMENT_IS_WORKER) { + // Check worker, not web, since window could be polyfilled + scriptDirectory = self.location.href; + } else if (typeof document != "undefined" && document.currentScript) { + // web + scriptDirectory = document.currentScript.src; + } + // When MODULARIZE, this JS may be executed later, after document.currentScript + // is gone, so we saved it, and we use it here instead of any other info. + if (_scriptName) { + scriptDirectory = _scriptName; + } + // blob urls look like blob:http://site.com/etc/etc and we cannot infer anything from them. + // otherwise, slice off the final part of the url to find the script directory. + // if scriptDirectory does not contain a slash, lastIndexOf will return -1, + // and scriptDirectory will correctly be replaced with an empty string. + // If scriptDirectory contains a query (starting with ?) or a fragment (starting with #), + // they are removed because they could contain a slash. + if (scriptDirectory.startsWith("blob:")) { + scriptDirectory = ""; + } else { + scriptDirectory = scriptDirectory.substr(0, scriptDirectory.replace(/[?#].*/, "").lastIndexOf("/") + 1); + } + { + // include: web_or_worker_shell_read.js + if (ENVIRONMENT_IS_WORKER) { + readBinary = url => { + var xhr = new XMLHttpRequest; + xhr.open("GET", url, false); + xhr.responseType = "arraybuffer"; + xhr.send(null); + return new Uint8Array(/** @type{!ArrayBuffer} */ (xhr.response)); + }; + } + readAsync = url => { + // Fetch has some additional restrictions over XHR, like it can't be used on a file:// url. + // See https://github.com/github/fetch/pull/92#issuecomment-140665932 + // Cordova or Electron apps are typically loaded from a file:// url. + // So use XHR on webview if URL is a file URL. + if (isFileURI(url)) { + return new Promise((resolve, reject) => { + var xhr = new XMLHttpRequest; + xhr.open("GET", url, true); + xhr.responseType = "arraybuffer"; + xhr.onload = () => { + if (xhr.status == 200 || (xhr.status == 0 && xhr.response)) { + // file URLs can return 0 + resolve(xhr.response); + return; + } + reject(xhr.status); + }; + xhr.onerror = reject; + xhr.send(null); + }); + } + return fetch(url, { + credentials: "same-origin" + }).then(response => { + if (response.ok) { + return response.arrayBuffer(); + } + return Promise.reject(new Error(response.status + " : " + response.url)); + }); + }; + } +} else // end include: web_or_worker_shell_read.js +{} + +var out = Module["print"] || console.log.bind(console); + +var err = Module["printErr"] || console.error.bind(console); + +// Merge back in the overrides +Object.assign(Module, moduleOverrides); + +// Free the object hierarchy contained in the overrides, this lets the GC +// reclaim data used. +moduleOverrides = null; + +// Emit code to handle expected values on the Module object. This applies Module.x +// to the proper local x. This has two benefits: first, we only emit it if it is +// expected to arrive, and second, by using a local everywhere else that can be +// minified. +if (Module["arguments"]) arguments_ = Module["arguments"]; + +if (Module["thisProgram"]) thisProgram = Module["thisProgram"]; + +// perform assertions in shell.js after we set up out() and err(), as otherwise if an assertion fails it cannot print the message +// end include: shell.js +// include: preamble.js +// === Preamble library stuff === +// Documentation for the public APIs defined in this file must be updated in: +// site/source/docs/api_reference/preamble.js.rst +// A prebuilt local version of the documentation is available at: +// site/build/text/docs/api_reference/preamble.js.txt +// You can also build docs locally as HTML or other formats in site/ +// An online HTML version (which may be of a different version of Emscripten) +// is up at http://kripken.github.io/emscripten-site/docs/api_reference/preamble.js.html +var wasmBinary = Module["wasmBinary"]; + +// Wasm globals +var wasmMemory; + +//======================================== +// Runtime essentials +//======================================== +// whether we are quitting the application. no code should run after this. +// set in exit() and abort() +var ABORT = false; + +// set by exit() and abort(). Passed to 'onExit' handler. +// NOTE: This is also used as the process return code code in shell environments +// but only when noExitRuntime is false. +var EXITSTATUS; + +// Memory management +var /** @type {!Int8Array} */ HEAP8, /** @type {!Uint8Array} */ HEAPU8, /** @type {!Int16Array} */ HEAP16, /** @type {!Uint16Array} */ HEAPU16, /** @type {!Int32Array} */ HEAP32, /** @type {!Uint32Array} */ HEAPU32, /** @type {!Float32Array} */ HEAPF32, /** @type {!Float64Array} */ HEAPF64; + +// include: runtime_shared.js +function updateMemoryViews() { + var b = wasmMemory.buffer; + Module["HEAP8"] = HEAP8 = new Int8Array(b); + Module["HEAP16"] = HEAP16 = new Int16Array(b); + Module["HEAPU8"] = HEAPU8 = new Uint8Array(b); + Module["HEAPU16"] = HEAPU16 = new Uint16Array(b); + Module["HEAP32"] = HEAP32 = new Int32Array(b); + Module["HEAPU32"] = HEAPU32 = new Uint32Array(b); + Module["HEAPF32"] = HEAPF32 = new Float32Array(b); + Module["HEAPF64"] = HEAPF64 = new Float64Array(b); +} + +// end include: runtime_shared.js +// include: runtime_stack_check.js +// end include: runtime_stack_check.js +var __ATPRERUN__ = []; + +// functions called before the runtime is initialized +var __ATINIT__ = []; + +// functions called during shutdown +var __ATPOSTRUN__ = []; + +// functions called after the main() is called +var runtimeInitialized = false; + +function preRun() { + if (Module["preRun"]) { + if (typeof Module["preRun"] == "function") Module["preRun"] = [ Module["preRun"] ]; + while (Module["preRun"].length) { + addOnPreRun(Module["preRun"].shift()); + } + } + callRuntimeCallbacks(__ATPRERUN__); +} + +function initRuntime() { + runtimeInitialized = true; + if (!Module["noFSInit"] && !FS.initialized) FS.init(); + FS.ignorePermissions = false; + TTY.init(); + callRuntimeCallbacks(__ATINIT__); +} + +function postRun() { + if (Module["postRun"]) { + if (typeof Module["postRun"] == "function") Module["postRun"] = [ Module["postRun"] ]; + while (Module["postRun"].length) { + addOnPostRun(Module["postRun"].shift()); + } + } + callRuntimeCallbacks(__ATPOSTRUN__); +} + +function addOnPreRun(cb) { + __ATPRERUN__.unshift(cb); +} + +function addOnInit(cb) { + __ATINIT__.unshift(cb); +} + +function addOnPostRun(cb) { + __ATPOSTRUN__.unshift(cb); +} + +// include: runtime_math.js +// https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Math/imul +// https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Math/fround +// https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Math/clz32 +// https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Math/trunc +// end include: runtime_math.js +// A counter of dependencies for calling run(). If we need to +// do asynchronous work before running, increment this and +// decrement it. Incrementing must happen in a place like +// Module.preRun (used by emcc to add file preloading). +// Note that you can add dependencies in preRun, even though +// it happens right before run - run will be postponed until +// the dependencies are met. +var runDependencies = 0; + +var runDependencyWatcher = null; + +var dependenciesFulfilled = null; + +// overridden to take different actions when all run dependencies are fulfilled +function getUniqueRunDependency(id) { + return id; +} + +function addRunDependency(id) { + runDependencies++; + Module["monitorRunDependencies"]?.(runDependencies); +} + +function removeRunDependency(id) { + runDependencies--; + Module["monitorRunDependencies"]?.(runDependencies); + if (runDependencies == 0) { + if (runDependencyWatcher !== null) { + clearInterval(runDependencyWatcher); + runDependencyWatcher = null; + } + if (dependenciesFulfilled) { + var callback = dependenciesFulfilled; + dependenciesFulfilled = null; + callback(); + } + } +} + +/** @param {string|number=} what */ function abort(what) { + Module["onAbort"]?.(what); + what = "Aborted(" + what + ")"; + // TODO(sbc): Should we remove printing and leave it up to whoever + // catches the exception? + err(what); + ABORT = true; + what += ". Build with -sASSERTIONS for more info."; + // Use a wasm runtime error, because a JS error might be seen as a foreign + // exception, which means we'd run destructors on it. We need the error to + // simply make the program stop. + // FIXME This approach does not work in Wasm EH because it currently does not assume + // all RuntimeErrors are from traps; it decides whether a RuntimeError is from + // a trap or not based on a hidden field within the object. So at the moment + // we don't have a way of throwing a wasm trap from JS. TODO Make a JS API that + // allows this in the wasm spec. + // Suppress closure compiler warning here. Closure compiler's builtin extern + // definition for WebAssembly.RuntimeError claims it takes no arguments even + // though it can. + // TODO(https://github.com/google/closure-compiler/pull/3913): Remove if/when upstream closure gets fixed. + /** @suppress {checkTypes} */ var e = new WebAssembly.RuntimeError(what); + readyPromiseReject(e); + // Throw the error whether or not MODULARIZE is set because abort is used + // in code paths apart from instantiation where an exception is expected + // to be thrown when abort is called. + throw e; +} + +// include: memoryprofiler.js +// end include: memoryprofiler.js +// include: URIUtils.js +// Prefix of data URIs emitted by SINGLE_FILE and related options. +var dataURIPrefix = "data:application/octet-stream;base64,"; + +/** + * Indicates whether filename is a base64 data URI. + * @noinline + */ var isDataURI = filename => filename.startsWith(dataURIPrefix); + +/** + * Indicates whether filename is delivered via file protocol (as opposed to http/https) + * @noinline + */ var isFileURI = filename => filename.startsWith("file://"); + +// end include: URIUtils.js +// include: runtime_exceptions.js +// end include: runtime_exceptions.js +function findWasmBinary() { + var f = "tsf.wasm"; + if (!isDataURI(f)) { + return locateFile(f); + } + return f; +} + +var wasmBinaryFile; + +function getBinarySync(file) { + if (file == wasmBinaryFile && wasmBinary) { + return new Uint8Array(wasmBinary); + } + if (readBinary) { + return readBinary(file); + } + throw "both async and sync fetching of the wasm failed"; +} + +function getBinaryPromise(binaryFile) { + // If we don't have the binary yet, load it asynchronously using readAsync. + if (!wasmBinary) { + // Fetch the binary using readAsync + return readAsync(binaryFile).then(response => new Uint8Array(/** @type{!ArrayBuffer} */ (response)), // Fall back to getBinarySync if readAsync fails + () => getBinarySync(binaryFile)); + } + // Otherwise, getBinarySync should be able to get it synchronously + return Promise.resolve().then(() => getBinarySync(binaryFile)); +} + +function instantiateArrayBuffer(binaryFile, imports, receiver) { + return getBinaryPromise(binaryFile).then(binary => WebAssembly.instantiate(binary, imports)).then(receiver, reason => { + err(`failed to asynchronously prepare wasm: ${reason}`); + abort(reason); + }); +} + +function instantiateAsync(binary, binaryFile, imports, callback) { + if (!binary && typeof WebAssembly.instantiateStreaming == "function" && !isDataURI(binaryFile) && // Don't use streaming for file:// delivered objects in a webview, fetch them synchronously. + !isFileURI(binaryFile) && // Avoid instantiateStreaming() on Node.js environment for now, as while + // Node.js v18.1.0 implements it, it does not have a full fetch() + // implementation yet. + // Reference: + // https://github.com/emscripten-core/emscripten/pull/16917 + !ENVIRONMENT_IS_NODE && typeof fetch == "function") { + return fetch(binaryFile, { + credentials: "same-origin" + }).then(response => { + // Suppress closure warning here since the upstream definition for + // instantiateStreaming only allows Promise rather than + // an actual Response. + // TODO(https://github.com/google/closure-compiler/pull/3913): Remove if/when upstream closure is fixed. + /** @suppress {checkTypes} */ var result = WebAssembly.instantiateStreaming(response, imports); + return result.then(callback, function(reason) { + // We expect the most common failure cause to be a bad MIME type for the binary, + // in which case falling back to ArrayBuffer instantiation should work. + err(`wasm streaming compile failed: ${reason}`); + err("falling back to ArrayBuffer instantiation"); + return instantiateArrayBuffer(binaryFile, imports, callback); + }); + }); + } + return instantiateArrayBuffer(binaryFile, imports, callback); +} + +function getWasmImports() { + // prepare imports + return { + "env": wasmImports, + "wasi_snapshot_preview1": wasmImports + }; +} + +// Create the wasm instance. +// Receives the wasm imports, returns the exports. +function createWasm() { + // Load the wasm module and create an instance of using native support in the JS engine. + // handle a generated wasm instance, receiving its exports and + // performing other necessary setup + /** @param {WebAssembly.Module=} module*/ function receiveInstance(instance, module) { + wasmExports = instance.exports; + wasmMemory = wasmExports["memory"]; + updateMemoryViews(); + addOnInit(wasmExports["__wasm_call_ctors"]); + removeRunDependency("wasm-instantiate"); + return wasmExports; + } + // wait for the pthread pool (if any) + addRunDependency("wasm-instantiate"); + // Prefer streaming instantiation if available. + function receiveInstantiationResult(result) { + // 'result' is a ResultObject object which has both the module and instance. + // receiveInstance() will swap in the exports (to Module.asm) so they can be called + // TODO: Due to Closure regression https://github.com/google/closure-compiler/issues/3193, the above line no longer optimizes out down to the following line. + // When the regression is fixed, can restore the above PTHREADS-enabled path. + receiveInstance(result["instance"]); + } + var info = getWasmImports(); + // User shell pages can write their own Module.instantiateWasm = function(imports, successCallback) callback + // to manually instantiate the Wasm module themselves. This allows pages to + // run the instantiation parallel to any other async startup actions they are + // performing. + // Also pthreads and wasm workers initialize the wasm instance through this + // path. + if (Module["instantiateWasm"]) { + try { + return Module["instantiateWasm"](info, receiveInstance); + } catch (e) { + err(`Module.instantiateWasm callback failed with error: ${e}`); + // If instantiation fails, reject the module ready promise. + readyPromiseReject(e); + } + } + wasmBinaryFile ??= findWasmBinary(); + // If instantiation fails, reject the module ready promise. + instantiateAsync(wasmBinary, wasmBinaryFile, info, receiveInstantiationResult).catch(readyPromiseReject); + return {}; +} + +// Globals used by JS i64 conversions (see makeSetValue) +var tempDouble; + +var tempI64; + +// include: runtime_debug.js +// end include: runtime_debug.js +// === Body === +// end include: preamble.js +class ExitStatus { + name="ExitStatus"; + constructor(status) { + this.message = `Program terminated with exit(${status})`; + this.status = status; + } +} + +var callRuntimeCallbacks = callbacks => { + while (callbacks.length > 0) { + // Pass the module as the first argument. + callbacks.shift()(Module); + } +}; + +var noExitRuntime = Module["noExitRuntime"] || true; + +/** @suppress {duplicate } */ var syscallGetVarargI = () => { + // the `+` prepended here is necessary to convince the JSCompiler that varargs is indeed a number. + var ret = HEAP32[((+SYSCALLS.varargs) >> 2)]; + SYSCALLS.varargs += 4; + return ret; +}; + +var syscallGetVarargP = syscallGetVarargI; + +var PATH = { + isAbs: path => path.charAt(0) === "/", + splitPath: filename => { + var splitPathRe = /^(\/?|)([\s\S]*?)((?:\.{1,2}|[^\/]+?|)(\.[^.\/]*|))(?:[\/]*)$/; + return splitPathRe.exec(filename).slice(1); + }, + normalizeArray: (parts, allowAboveRoot) => { + // if the path tries to go above the root, `up` ends up > 0 + var up = 0; + for (var i = parts.length - 1; i >= 0; i--) { + var last = parts[i]; + if (last === ".") { + parts.splice(i, 1); + } else if (last === "..") { + parts.splice(i, 1); + up++; + } else if (up) { + parts.splice(i, 1); + up--; + } + } + // if the path is allowed to go above the root, restore leading ..s + if (allowAboveRoot) { + for (;up; up--) { + parts.unshift(".."); + } + } + return parts; + }, + normalize: path => { + var isAbsolute = PATH.isAbs(path), trailingSlash = path.substr(-1) === "/"; + // Normalize the path + path = PATH.normalizeArray(path.split("/").filter(p => !!p), !isAbsolute).join("/"); + if (!path && !isAbsolute) { + path = "."; + } + if (path && trailingSlash) { + path += "/"; + } + return (isAbsolute ? "/" : "") + path; + }, + dirname: path => { + var result = PATH.splitPath(path), root = result[0], dir = result[1]; + if (!root && !dir) { + // No dirname whatsoever + return "."; + } + if (dir) { + // It has a dirname, strip trailing slash + dir = dir.substr(0, dir.length - 1); + } + return root + dir; + }, + basename: path => { + // EMSCRIPTEN return '/'' for '/', not an empty string + if (path === "/") return "/"; + path = PATH.normalize(path); + path = path.replace(/\/$/, ""); + var lastSlash = path.lastIndexOf("/"); + if (lastSlash === -1) return path; + return path.substr(lastSlash + 1); + }, + join: (...paths) => PATH.normalize(paths.join("/")), + join2: (l, r) => PATH.normalize(l + "/" + r) +}; + +var initRandomFill = () => { + if (typeof crypto == "object" && typeof crypto["getRandomValues"] == "function") { + // for modern web browsers + return view => crypto.getRandomValues(view); + } else if (ENVIRONMENT_IS_NODE) { + // for nodejs with or without crypto support included + try { + var crypto_module = require("crypto"); + var randomFillSync = crypto_module["randomFillSync"]; + if (randomFillSync) { + // nodejs with LTS crypto support + return view => crypto_module["randomFillSync"](view); + } + // very old nodejs with the original crypto API + var randomBytes = crypto_module["randomBytes"]; + return view => (view.set(randomBytes(view.byteLength)), // Return the original view to match modern native implementations. + view); + } catch (e) {} + } + // we couldn't find a proper implementation, as Math.random() is not suitable for /dev/random, see emscripten-core/emscripten/pull/7096 + abort("initRandomDevice"); +}; + +var randomFill = view => (randomFill = initRandomFill())(view); + +var PATH_FS = { + resolve: (...args) => { + var resolvedPath = "", resolvedAbsolute = false; + for (var i = args.length - 1; i >= -1 && !resolvedAbsolute; i--) { + var path = (i >= 0) ? args[i] : FS.cwd(); + // Skip empty and invalid entries + if (typeof path != "string") { + throw new TypeError("Arguments to path.resolve must be strings"); + } else if (!path) { + return ""; + } + // an invalid portion invalidates the whole thing + resolvedPath = path + "/" + resolvedPath; + resolvedAbsolute = PATH.isAbs(path); + } + // At this point the path should be resolved to a full absolute path, but + // handle relative paths to be safe (might happen when process.cwd() fails) + resolvedPath = PATH.normalizeArray(resolvedPath.split("/").filter(p => !!p), !resolvedAbsolute).join("/"); + return ((resolvedAbsolute ? "/" : "") + resolvedPath) || "."; + }, + relative: (from, to) => { + from = PATH_FS.resolve(from).substr(1); + to = PATH_FS.resolve(to).substr(1); + function trim(arr) { + var start = 0; + for (;start < arr.length; start++) { + if (arr[start] !== "") break; + } + var end = arr.length - 1; + for (;end >= 0; end--) { + if (arr[end] !== "") break; + } + if (start > end) return []; + return arr.slice(start, end - start + 1); + } + var fromParts = trim(from.split("/")); + var toParts = trim(to.split("/")); + var length = Math.min(fromParts.length, toParts.length); + var samePartsLength = length; + for (var i = 0; i < length; i++) { + if (fromParts[i] !== toParts[i]) { + samePartsLength = i; + break; + } + } + var outputParts = []; + for (var i = samePartsLength; i < fromParts.length; i++) { + outputParts.push(".."); + } + outputParts = outputParts.concat(toParts.slice(samePartsLength)); + return outputParts.join("/"); + } +}; + +var UTF8Decoder = typeof TextDecoder != "undefined" ? new TextDecoder : undefined; + +/** + * Given a pointer 'idx' to a null-terminated UTF8-encoded string in the given + * array that contains uint8 values, returns a copy of that string as a + * Javascript String object. + * heapOrArray is either a regular array, or a JavaScript typed array view. + * @param {number=} idx + * @param {number=} maxBytesToRead + * @return {string} + */ var UTF8ArrayToString = (heapOrArray, idx = 0, maxBytesToRead = NaN) => { + var endIdx = idx + maxBytesToRead; + var endPtr = idx; + // TextDecoder needs to know the byte length in advance, it doesn't stop on + // null terminator by itself. Also, use the length info to avoid running tiny + // strings through TextDecoder, since .subarray() allocates garbage. + // (As a tiny code save trick, compare endPtr against endIdx using a negation, + // so that undefined/NaN means Infinity) + while (heapOrArray[endPtr] && !(endPtr >= endIdx)) ++endPtr; + if (endPtr - idx > 16 && heapOrArray.buffer && UTF8Decoder) { + return UTF8Decoder.decode(heapOrArray.subarray(idx, endPtr)); + } + var str = ""; + // If building with TextDecoder, we have already computed the string length + // above, so test loop end condition against that + while (idx < endPtr) { + // For UTF8 byte structure, see: + // http://en.wikipedia.org/wiki/UTF-8#Description + // https://www.ietf.org/rfc/rfc2279.txt + // https://tools.ietf.org/html/rfc3629 + var u0 = heapOrArray[idx++]; + if (!(u0 & 128)) { + str += String.fromCharCode(u0); + continue; + } + var u1 = heapOrArray[idx++] & 63; + if ((u0 & 224) == 192) { + str += String.fromCharCode(((u0 & 31) << 6) | u1); + continue; + } + var u2 = heapOrArray[idx++] & 63; + if ((u0 & 240) == 224) { + u0 = ((u0 & 15) << 12) | (u1 << 6) | u2; + } else { + u0 = ((u0 & 7) << 18) | (u1 << 12) | (u2 << 6) | (heapOrArray[idx++] & 63); + } + if (u0 < 65536) { + str += String.fromCharCode(u0); + } else { + var ch = u0 - 65536; + str += String.fromCharCode(55296 | (ch >> 10), 56320 | (ch & 1023)); + } + } + return str; +}; + +var FS_stdin_getChar_buffer = []; + +var lengthBytesUTF8 = str => { + var len = 0; + for (var i = 0; i < str.length; ++i) { + // Gotcha: charCodeAt returns a 16-bit word that is a UTF-16 encoded code + // unit, not a Unicode code point of the character! So decode + // UTF16->UTF32->UTF8. + // See http://unicode.org/faq/utf_bom.html#utf16-3 + var c = str.charCodeAt(i); + // possibly a lead surrogate + if (c <= 127) { + len++; + } else if (c <= 2047) { + len += 2; + } else if (c >= 55296 && c <= 57343) { + len += 4; + ++i; + } else { + len += 3; + } + } + return len; +}; + +var stringToUTF8Array = (str, heap, outIdx, maxBytesToWrite) => { + // Parameter maxBytesToWrite is not optional. Negative values, 0, null, + // undefined and false each don't write out any bytes. + if (!(maxBytesToWrite > 0)) return 0; + var startIdx = outIdx; + var endIdx = outIdx + maxBytesToWrite - 1; + // -1 for string null terminator. + for (var i = 0; i < str.length; ++i) { + // Gotcha: charCodeAt returns a 16-bit word that is a UTF-16 encoded code + // unit, not a Unicode code point of the character! So decode + // UTF16->UTF32->UTF8. + // See http://unicode.org/faq/utf_bom.html#utf16-3 + // For UTF8 byte structure, see http://en.wikipedia.org/wiki/UTF-8#Description + // and https://www.ietf.org/rfc/rfc2279.txt + // and https://tools.ietf.org/html/rfc3629 + var u = str.charCodeAt(i); + // possibly a lead surrogate + if (u >= 55296 && u <= 57343) { + var u1 = str.charCodeAt(++i); + u = 65536 + ((u & 1023) << 10) | (u1 & 1023); + } + if (u <= 127) { + if (outIdx >= endIdx) break; + heap[outIdx++] = u; + } else if (u <= 2047) { + if (outIdx + 1 >= endIdx) break; + heap[outIdx++] = 192 | (u >> 6); + heap[outIdx++] = 128 | (u & 63); + } else if (u <= 65535) { + if (outIdx + 2 >= endIdx) break; + heap[outIdx++] = 224 | (u >> 12); + heap[outIdx++] = 128 | ((u >> 6) & 63); + heap[outIdx++] = 128 | (u & 63); + } else { + if (outIdx + 3 >= endIdx) break; + heap[outIdx++] = 240 | (u >> 18); + heap[outIdx++] = 128 | ((u >> 12) & 63); + heap[outIdx++] = 128 | ((u >> 6) & 63); + heap[outIdx++] = 128 | (u & 63); + } + } + // Null-terminate the pointer to the buffer. + heap[outIdx] = 0; + return outIdx - startIdx; +}; + +/** @type {function(string, boolean=, number=)} */ function intArrayFromString(stringy, dontAddNull, length) { + var len = length > 0 ? length : lengthBytesUTF8(stringy) + 1; + var u8array = new Array(len); + var numBytesWritten = stringToUTF8Array(stringy, u8array, 0, u8array.length); + if (dontAddNull) u8array.length = numBytesWritten; + return u8array; +} + +var FS_stdin_getChar = () => { + if (!FS_stdin_getChar_buffer.length) { + var result = null; + if (ENVIRONMENT_IS_NODE) { + // we will read data by chunks of BUFSIZE + var BUFSIZE = 256; + var buf = Buffer.alloc(BUFSIZE); + var bytesRead = 0; + // For some reason we must suppress a closure warning here, even though + // fd definitely exists on process.stdin, and is even the proper way to + // get the fd of stdin, + // https://github.com/nodejs/help/issues/2136#issuecomment-523649904 + // This started to happen after moving this logic out of library_tty.js, + // so it is related to the surrounding code in some unclear manner. + /** @suppress {missingProperties} */ var fd = process.stdin.fd; + try { + bytesRead = fs.readSync(fd, buf, 0, BUFSIZE); + } catch (e) { + // Cross-platform differences: on Windows, reading EOF throws an + // exception, but on other OSes, reading EOF returns 0. Uniformize + // behavior by treating the EOF exception to return 0. + if (e.toString().includes("EOF")) bytesRead = 0; else throw e; + } + if (bytesRead > 0) { + result = buf.slice(0, bytesRead).toString("utf-8"); + } + } else if (typeof window != "undefined" && typeof window.prompt == "function") { + // Browser. + result = window.prompt("Input: "); + // returns null on cancel + if (result !== null) { + result += "\n"; + } + } else {} + if (!result) { + return null; + } + FS_stdin_getChar_buffer = intArrayFromString(result, true); + } + return FS_stdin_getChar_buffer.shift(); +}; + +var TTY = { + ttys: [], + init() {}, + // https://github.com/emscripten-core/emscripten/pull/1555 + // if (ENVIRONMENT_IS_NODE) { + // // currently, FS.init does not distinguish if process.stdin is a file or TTY + // // device, it always assumes it's a TTY device. because of this, we're forcing + // // process.stdin to UTF8 encoding to at least make stdin reading compatible + // // with text files until FS.init can be refactored. + // process.stdin.setEncoding('utf8'); + // } + shutdown() {}, + // https://github.com/emscripten-core/emscripten/pull/1555 + // if (ENVIRONMENT_IS_NODE) { + // // inolen: any idea as to why node -e 'process.stdin.read()' wouldn't exit immediately (with process.stdin being a tty)? + // // isaacs: because now it's reading from the stream, you've expressed interest in it, so that read() kicks off a _read() which creates a ReadReq operation + // // inolen: I thought read() in that case was a synchronous operation that just grabbed some amount of buffered data if it exists? + // // isaacs: it is. but it also triggers a _read() call, which calls readStart() on the handle + // // isaacs: do process.stdin.pause() and i'd think it'd probably close the pending call + // process.stdin.pause(); + // } + register(dev, ops) { + TTY.ttys[dev] = { + input: [], + output: [], + ops + }; + FS.registerDevice(dev, TTY.stream_ops); + }, + stream_ops: { + open(stream) { + var tty = TTY.ttys[stream.node.rdev]; + if (!tty) { + throw new FS.ErrnoError(43); + } + stream.tty = tty; + stream.seekable = false; + }, + close(stream) { + // flush any pending line data + stream.tty.ops.fsync(stream.tty); + }, + fsync(stream) { + stream.tty.ops.fsync(stream.tty); + }, + read(stream, buffer, offset, length, pos) { + /* ignored */ if (!stream.tty || !stream.tty.ops.get_char) { + throw new FS.ErrnoError(60); + } + var bytesRead = 0; + for (var i = 0; i < length; i++) { + var result; + try { + result = stream.tty.ops.get_char(stream.tty); + } catch (e) { + throw new FS.ErrnoError(29); + } + if (result === undefined && bytesRead === 0) { + throw new FS.ErrnoError(6); + } + if (result === null || result === undefined) break; + bytesRead++; + buffer[offset + i] = result; + } + if (bytesRead) { + stream.node.timestamp = Date.now(); + } + return bytesRead; + }, + write(stream, buffer, offset, length, pos) { + if (!stream.tty || !stream.tty.ops.put_char) { + throw new FS.ErrnoError(60); + } + try { + for (var i = 0; i < length; i++) { + stream.tty.ops.put_char(stream.tty, buffer[offset + i]); + } + } catch (e) { + throw new FS.ErrnoError(29); + } + if (length) { + stream.node.timestamp = Date.now(); + } + return i; + } + }, + default_tty_ops: { + get_char(tty) { + return FS_stdin_getChar(); + }, + put_char(tty, val) { + if (val === null || val === 10) { + out(UTF8ArrayToString(tty.output)); + tty.output = []; + } else { + if (val != 0) tty.output.push(val); + } + }, + // val == 0 would cut text output off in the middle. + fsync(tty) { + if (tty.output && tty.output.length > 0) { + out(UTF8ArrayToString(tty.output)); + tty.output = []; + } + }, + ioctl_tcgets(tty) { + // typical setting + return { + c_iflag: 25856, + c_oflag: 5, + c_cflag: 191, + c_lflag: 35387, + c_cc: [ 3, 28, 127, 21, 4, 0, 1, 0, 17, 19, 26, 0, 18, 15, 23, 22, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ] + }; + }, + ioctl_tcsets(tty, optional_actions, data) { + // currently just ignore + return 0; + }, + ioctl_tiocgwinsz(tty) { + return [ 24, 80 ]; + } + }, + default_tty1_ops: { + put_char(tty, val) { + if (val === null || val === 10) { + err(UTF8ArrayToString(tty.output)); + tty.output = []; + } else { + if (val != 0) tty.output.push(val); + } + }, + fsync(tty) { + if (tty.output && tty.output.length > 0) { + err(UTF8ArrayToString(tty.output)); + tty.output = []; + } + } + } +}; + +var mmapAlloc = size => { + abort(); +}; + +var MEMFS = { + ops_table: null, + mount(mount) { + return MEMFS.createNode(null, "/", 16895, 0); + }, + createNode(parent, name, mode, dev) { + if (FS.isBlkdev(mode) || FS.isFIFO(mode)) { + // no supported + throw new FS.ErrnoError(63); + } + MEMFS.ops_table ||= { + dir: { + node: { + getattr: MEMFS.node_ops.getattr, + setattr: MEMFS.node_ops.setattr, + lookup: MEMFS.node_ops.lookup, + mknod: MEMFS.node_ops.mknod, + rename: MEMFS.node_ops.rename, + unlink: MEMFS.node_ops.unlink, + rmdir: MEMFS.node_ops.rmdir, + readdir: MEMFS.node_ops.readdir, + symlink: MEMFS.node_ops.symlink + }, + stream: { + llseek: MEMFS.stream_ops.llseek + } + }, + file: { + node: { + getattr: MEMFS.node_ops.getattr, + setattr: MEMFS.node_ops.setattr + }, + stream: { + llseek: MEMFS.stream_ops.llseek, + read: MEMFS.stream_ops.read, + write: MEMFS.stream_ops.write, + allocate: MEMFS.stream_ops.allocate, + mmap: MEMFS.stream_ops.mmap, + msync: MEMFS.stream_ops.msync + } + }, + link: { + node: { + getattr: MEMFS.node_ops.getattr, + setattr: MEMFS.node_ops.setattr, + readlink: MEMFS.node_ops.readlink + }, + stream: {} + }, + chrdev: { + node: { + getattr: MEMFS.node_ops.getattr, + setattr: MEMFS.node_ops.setattr + }, + stream: FS.chrdev_stream_ops + } + }; + var node = FS.createNode(parent, name, mode, dev); + if (FS.isDir(node.mode)) { + node.node_ops = MEMFS.ops_table.dir.node; + node.stream_ops = MEMFS.ops_table.dir.stream; + node.contents = {}; + } else if (FS.isFile(node.mode)) { + node.node_ops = MEMFS.ops_table.file.node; + node.stream_ops = MEMFS.ops_table.file.stream; + node.usedBytes = 0; + // The actual number of bytes used in the typed array, as opposed to contents.length which gives the whole capacity. + // When the byte data of the file is populated, this will point to either a typed array, or a normal JS array. Typed arrays are preferred + // for performance, and used by default. However, typed arrays are not resizable like normal JS arrays are, so there is a small disk size + // penalty involved for appending file writes that continuously grow a file similar to std::vector capacity vs used -scheme. + node.contents = null; + } else if (FS.isLink(node.mode)) { + node.node_ops = MEMFS.ops_table.link.node; + node.stream_ops = MEMFS.ops_table.link.stream; + } else if (FS.isChrdev(node.mode)) { + node.node_ops = MEMFS.ops_table.chrdev.node; + node.stream_ops = MEMFS.ops_table.chrdev.stream; + } + node.timestamp = Date.now(); + // add the new node to the parent + if (parent) { + parent.contents[name] = node; + parent.timestamp = node.timestamp; + } + return node; + }, + getFileDataAsTypedArray(node) { + if (!node.contents) return new Uint8Array(0); + if (node.contents.subarray) return node.contents.subarray(0, node.usedBytes); + // Make sure to not return excess unused bytes. + return new Uint8Array(node.contents); + }, + expandFileStorage(node, newCapacity) { + var prevCapacity = node.contents ? node.contents.length : 0; + if (prevCapacity >= newCapacity) return; + // No need to expand, the storage was already large enough. + // Don't expand strictly to the given requested limit if it's only a very small increase, but instead geometrically grow capacity. + // For small filesizes (<1MB), perform size*2 geometric increase, but for large sizes, do a much more conservative size*1.125 increase to + // avoid overshooting the allocation cap by a very large margin. + var CAPACITY_DOUBLING_MAX = 1024 * 1024; + newCapacity = Math.max(newCapacity, (prevCapacity * (prevCapacity < CAPACITY_DOUBLING_MAX ? 2 : 1.125)) >>> 0); + if (prevCapacity != 0) newCapacity = Math.max(newCapacity, 256); + // At minimum allocate 256b for each file when expanding. + var oldContents = node.contents; + node.contents = new Uint8Array(newCapacity); + // Allocate new storage. + if (node.usedBytes > 0) node.contents.set(oldContents.subarray(0, node.usedBytes), 0); + }, + // Copy old data over to the new storage. + resizeFileStorage(node, newSize) { + if (node.usedBytes == newSize) return; + if (newSize == 0) { + node.contents = null; + // Fully decommit when requesting a resize to zero. + node.usedBytes = 0; + } else { + var oldContents = node.contents; + node.contents = new Uint8Array(newSize); + // Allocate new storage. + if (oldContents) { + node.contents.set(oldContents.subarray(0, Math.min(newSize, node.usedBytes))); + } + // Copy old data over to the new storage. + node.usedBytes = newSize; + } + }, + node_ops: { + getattr(node) { + var attr = {}; + // device numbers reuse inode numbers. + attr.dev = FS.isChrdev(node.mode) ? node.id : 1; + attr.ino = node.id; + attr.mode = node.mode; + attr.nlink = 1; + attr.uid = 0; + attr.gid = 0; + attr.rdev = node.rdev; + if (FS.isDir(node.mode)) { + attr.size = 4096; + } else if (FS.isFile(node.mode)) { + attr.size = node.usedBytes; + } else if (FS.isLink(node.mode)) { + attr.size = node.link.length; + } else { + attr.size = 0; + } + attr.atime = new Date(node.timestamp); + attr.mtime = new Date(node.timestamp); + attr.ctime = new Date(node.timestamp); + // NOTE: In our implementation, st_blocks = Math.ceil(st_size/st_blksize), + // but this is not required by the standard. + attr.blksize = 4096; + attr.blocks = Math.ceil(attr.size / attr.blksize); + return attr; + }, + setattr(node, attr) { + if (attr.mode !== undefined) { + node.mode = attr.mode; + } + if (attr.timestamp !== undefined) { + node.timestamp = attr.timestamp; + } + if (attr.size !== undefined) { + MEMFS.resizeFileStorage(node, attr.size); + } + }, + lookup(parent, name) { + throw MEMFS.doesNotExistError; + }, + mknod(parent, name, mode, dev) { + return MEMFS.createNode(parent, name, mode, dev); + }, + rename(old_node, new_dir, new_name) { + // if we're overwriting a directory at new_name, make sure it's empty. + if (FS.isDir(old_node.mode)) { + var new_node; + try { + new_node = FS.lookupNode(new_dir, new_name); + } catch (e) {} + if (new_node) { + for (var i in new_node.contents) { + throw new FS.ErrnoError(55); + } + } + } + // do the internal rewiring + delete old_node.parent.contents[old_node.name]; + old_node.parent.timestamp = Date.now(); + old_node.name = new_name; + new_dir.contents[new_name] = old_node; + new_dir.timestamp = old_node.parent.timestamp; + }, + unlink(parent, name) { + delete parent.contents[name]; + parent.timestamp = Date.now(); + }, + rmdir(parent, name) { + var node = FS.lookupNode(parent, name); + for (var i in node.contents) { + throw new FS.ErrnoError(55); + } + delete parent.contents[name]; + parent.timestamp = Date.now(); + }, + readdir(node) { + var entries = [ ".", ".." ]; + for (var key of Object.keys(node.contents)) { + entries.push(key); + } + return entries; + }, + symlink(parent, newname, oldpath) { + var node = MEMFS.createNode(parent, newname, 511 | 40960, 0); + node.link = oldpath; + return node; + }, + readlink(node) { + if (!FS.isLink(node.mode)) { + throw new FS.ErrnoError(28); + } + return node.link; + } + }, + stream_ops: { + read(stream, buffer, offset, length, position) { + var contents = stream.node.contents; + if (position >= stream.node.usedBytes) return 0; + var size = Math.min(stream.node.usedBytes - position, length); + if (size > 8 && contents.subarray) { + // non-trivial, and typed array + buffer.set(contents.subarray(position, position + size), offset); + } else { + for (var i = 0; i < size; i++) buffer[offset + i] = contents[position + i]; + } + return size; + }, + write(stream, buffer, offset, length, position, canOwn) { + if (!length) return 0; + var node = stream.node; + node.timestamp = Date.now(); + if (buffer.subarray && (!node.contents || node.contents.subarray)) { + // This write is from a typed array to a typed array? + if (canOwn) { + node.contents = buffer.subarray(offset, offset + length); + node.usedBytes = length; + return length; + } else if (node.usedBytes === 0 && position === 0) { + // If this is a simple first write to an empty file, do a fast set since we don't need to care about old data. + node.contents = buffer.slice(offset, offset + length); + node.usedBytes = length; + return length; + } else if (position + length <= node.usedBytes) { + // Writing to an already allocated and used subrange of the file? + node.contents.set(buffer.subarray(offset, offset + length), position); + return length; + } + } + // Appending to an existing file and we need to reallocate, or source data did not come as a typed array. + MEMFS.expandFileStorage(node, position + length); + if (node.contents.subarray && buffer.subarray) { + // Use typed array write which is available. + node.contents.set(buffer.subarray(offset, offset + length), position); + } else { + for (var i = 0; i < length; i++) { + node.contents[position + i] = buffer[offset + i]; + } + } + node.usedBytes = Math.max(node.usedBytes, position + length); + return length; + }, + llseek(stream, offset, whence) { + var position = offset; + if (whence === 1) { + position += stream.position; + } else if (whence === 2) { + if (FS.isFile(stream.node.mode)) { + position += stream.node.usedBytes; + } + } + if (position < 0) { + throw new FS.ErrnoError(28); + } + return position; + }, + allocate(stream, offset, length) { + MEMFS.expandFileStorage(stream.node, offset + length); + stream.node.usedBytes = Math.max(stream.node.usedBytes, offset + length); + }, + mmap(stream, length, position, prot, flags) { + if (!FS.isFile(stream.node.mode)) { + throw new FS.ErrnoError(43); + } + var ptr; + var allocated; + var contents = stream.node.contents; + // Only make a new copy when MAP_PRIVATE is specified. + if (!(flags & 2) && contents && contents.buffer === HEAP8.buffer) { + // We can't emulate MAP_SHARED when the file is not backed by the + // buffer we're mapping to (e.g. the HEAP buffer). + allocated = false; + ptr = contents.byteOffset; + } else { + allocated = true; + ptr = mmapAlloc(length); + if (!ptr) { + throw new FS.ErrnoError(48); + } + if (contents) { + // Try to avoid unnecessary slices. + if (position > 0 || position + length < contents.length) { + if (contents.subarray) { + contents = contents.subarray(position, position + length); + } else { + contents = Array.prototype.slice.call(contents, position, position + length); + } + } + HEAP8.set(contents, ptr); + } + } + return { + ptr, + allocated + }; + }, + msync(stream, buffer, offset, length, mmapFlags) { + MEMFS.stream_ops.write(stream, buffer, 0, length, offset, false); + // should we check if bytesWritten and length are the same? + return 0; + } + } +}; + +/** @param {boolean=} noRunDep */ var asyncLoad = (url, onload, onerror, noRunDep) => { + var dep = !noRunDep ? getUniqueRunDependency(`al ${url}`) : ""; + readAsync(url).then(arrayBuffer => { + onload(new Uint8Array(arrayBuffer)); + if (dep) removeRunDependency(dep); + }, err => { + if (onerror) { + onerror(); + } else { + throw `Loading data file "${url}" failed.`; + } + }); + if (dep) addRunDependency(dep); +}; + +var FS_createDataFile = (parent, name, fileData, canRead, canWrite, canOwn) => { + FS.createDataFile(parent, name, fileData, canRead, canWrite, canOwn); +}; + +var preloadPlugins = Module["preloadPlugins"] || []; + +var FS_handledByPreloadPlugin = (byteArray, fullname, finish, onerror) => { + // Ensure plugins are ready. + if (typeof Browser != "undefined") Browser.init(); + var handled = false; + preloadPlugins.forEach(plugin => { + if (handled) return; + if (plugin["canHandle"](fullname)) { + plugin["handle"](byteArray, fullname, finish, onerror); + handled = true; + } + }); + return handled; +}; + +var FS_createPreloadedFile = (parent, name, url, canRead, canWrite, onload, onerror, dontCreateFile, canOwn, preFinish) => { + // TODO we should allow people to just pass in a complete filename instead + // of parent and name being that we just join them anyways + var fullname = name ? PATH_FS.resolve(PATH.join2(parent, name)) : parent; + var dep = getUniqueRunDependency(`cp ${fullname}`); + // might have several active requests for the same fullname + function processData(byteArray) { + function finish(byteArray) { + preFinish?.(); + if (!dontCreateFile) { + FS_createDataFile(parent, name, byteArray, canRead, canWrite, canOwn); + } + onload?.(); + removeRunDependency(dep); + } + if (FS_handledByPreloadPlugin(byteArray, fullname, finish, () => { + onerror?.(); + removeRunDependency(dep); + })) { + return; + } + finish(byteArray); + } + addRunDependency(dep); + if (typeof url == "string") { + asyncLoad(url, processData, onerror); + } else { + processData(url); + } +}; + +var FS_modeStringToFlags = str => { + var flagModes = { + "r": 0, + "r+": 2, + "w": 512 | 64 | 1, + "w+": 512 | 64 | 2, + "a": 1024 | 64 | 1, + "a+": 1024 | 64 | 2 + }; + var flags = flagModes[str]; + if (typeof flags == "undefined") { + throw new Error(`Unknown file open mode: ${str}`); + } + return flags; +}; + +var FS_getMode = (canRead, canWrite) => { + var mode = 0; + if (canRead) mode |= 292 | 73; + if (canWrite) mode |= 146; + return mode; +}; + +var FS = { + root: null, + mounts: [], + devices: {}, + streams: [], + nextInode: 1, + nameTable: null, + currentPath: "/", + initialized: false, + ignorePermissions: true, + ErrnoError: class { + name="ErrnoError"; + // We set the `name` property to be able to identify `FS.ErrnoError` + // - the `name` is a standard ECMA-262 property of error objects. Kind of good to have it anyway. + // - when using PROXYFS, an error can come from an underlying FS + // as different FS objects have their own FS.ErrnoError each, + // the test `err instanceof FS.ErrnoError` won't detect an error coming from another filesystem, causing bugs. + // we'll use the reliable test `err.name == "ErrnoError"` instead + constructor(errno) { + this.errno = errno; + } + }, + filesystems: null, + syncFSRequests: 0, + readFiles: {}, + FSStream: class { + shared={}; + get object() { + return this.node; + } + set object(val) { + this.node = val; + } + get isRead() { + return (this.flags & 2097155) !== 1; + } + get isWrite() { + return (this.flags & 2097155) !== 0; + } + get isAppend() { + return (this.flags & 1024); + } + get flags() { + return this.shared.flags; + } + set flags(val) { + this.shared.flags = val; + } + get position() { + return this.shared.position; + } + set position(val) { + this.shared.position = val; + } + }, + FSNode: class { + node_ops={}; + stream_ops={}; + readMode=292 | 73; + writeMode=146; + mounted=null; + constructor(parent, name, mode, rdev) { + if (!parent) { + parent = this; + } + // root node sets parent to itself + this.parent = parent; + this.mount = parent.mount; + this.id = FS.nextInode++; + this.name = name; + this.mode = mode; + this.rdev = rdev; + } + get read() { + return (this.mode & this.readMode) === this.readMode; + } + set read(val) { + val ? this.mode |= this.readMode : this.mode &= ~this.readMode; + } + get write() { + return (this.mode & this.writeMode) === this.writeMode; + } + set write(val) { + val ? this.mode |= this.writeMode : this.mode &= ~this.writeMode; + } + get isFolder() { + return FS.isDir(this.mode); + } + get isDevice() { + return FS.isChrdev(this.mode); + } + }, + lookupPath(path, opts = {}) { + path = PATH_FS.resolve(path); + if (!path) return { + path: "", + node: null + }; + var defaults = { + follow_mount: true, + recurse_count: 0 + }; + opts = Object.assign(defaults, opts); + if (opts.recurse_count > 8) { + // max recursive lookup of 8 + throw new FS.ErrnoError(32); + } + // split the absolute path + var parts = path.split("/").filter(p => !!p); + // start at the root + var current = FS.root; + var current_path = "/"; + for (var i = 0; i < parts.length; i++) { + var islast = (i === parts.length - 1); + if (islast && opts.parent) { + // stop resolving + break; + } + current = FS.lookupNode(current, parts[i]); + current_path = PATH.join2(current_path, parts[i]); + // jump to the mount's root node if this is a mountpoint + if (FS.isMountpoint(current)) { + if (!islast || (islast && opts.follow_mount)) { + current = current.mounted.root; + } + } + // by default, lookupPath will not follow a symlink if it is the final path component. + // setting opts.follow = true will override this behavior. + if (!islast || opts.follow) { + var count = 0; + while (FS.isLink(current.mode)) { + var link = FS.readlink(current_path); + current_path = PATH_FS.resolve(PATH.dirname(current_path), link); + var lookup = FS.lookupPath(current_path, { + recurse_count: opts.recurse_count + 1 + }); + current = lookup.node; + if (count++ > 40) { + // limit max consecutive symlinks to 40 (SYMLOOP_MAX). + throw new FS.ErrnoError(32); + } + } + } + } + return { + path: current_path, + node: current + }; + }, + getPath(node) { + var path; + while (true) { + if (FS.isRoot(node)) { + var mount = node.mount.mountpoint; + if (!path) return mount; + return mount[mount.length - 1] !== "/" ? `${mount}/${path}` : mount + path; + } + path = path ? `${node.name}/${path}` : node.name; + node = node.parent; + } + }, + hashName(parentid, name) { + var hash = 0; + for (var i = 0; i < name.length; i++) { + hash = ((hash << 5) - hash + name.charCodeAt(i)) | 0; + } + return ((parentid + hash) >>> 0) % FS.nameTable.length; + }, + hashAddNode(node) { + var hash = FS.hashName(node.parent.id, node.name); + node.name_next = FS.nameTable[hash]; + FS.nameTable[hash] = node; + }, + hashRemoveNode(node) { + var hash = FS.hashName(node.parent.id, node.name); + if (FS.nameTable[hash] === node) { + FS.nameTable[hash] = node.name_next; + } else { + var current = FS.nameTable[hash]; + while (current) { + if (current.name_next === node) { + current.name_next = node.name_next; + break; + } + current = current.name_next; + } + } + }, + lookupNode(parent, name) { + var errCode = FS.mayLookup(parent); + if (errCode) { + throw new FS.ErrnoError(errCode); + } + var hash = FS.hashName(parent.id, name); + for (var node = FS.nameTable[hash]; node; node = node.name_next) { + var nodeName = node.name; + if (node.parent.id === parent.id && nodeName === name) { + return node; + } + } + // if we failed to find it in the cache, call into the VFS + return FS.lookup(parent, name); + }, + createNode(parent, name, mode, rdev) { + var node = new FS.FSNode(parent, name, mode, rdev); + FS.hashAddNode(node); + return node; + }, + destroyNode(node) { + FS.hashRemoveNode(node); + }, + isRoot(node) { + return node === node.parent; + }, + isMountpoint(node) { + return !!node.mounted; + }, + isFile(mode) { + return (mode & 61440) === 32768; + }, + isDir(mode) { + return (mode & 61440) === 16384; + }, + isLink(mode) { + return (mode & 61440) === 40960; + }, + isChrdev(mode) { + return (mode & 61440) === 8192; + }, + isBlkdev(mode) { + return (mode & 61440) === 24576; + }, + isFIFO(mode) { + return (mode & 61440) === 4096; + }, + isSocket(mode) { + return (mode & 49152) === 49152; + }, + flagsToPermissionString(flag) { + var perms = [ "r", "w", "rw" ][flag & 3]; + if ((flag & 512)) { + perms += "w"; + } + return perms; + }, + nodePermissions(node, perms) { + if (FS.ignorePermissions) { + return 0; + } + // return 0 if any user, group or owner bits are set. + if (perms.includes("r") && !(node.mode & 292)) { + return 2; + } else if (perms.includes("w") && !(node.mode & 146)) { + return 2; + } else if (perms.includes("x") && !(node.mode & 73)) { + return 2; + } + return 0; + }, + mayLookup(dir) { + if (!FS.isDir(dir.mode)) return 54; + var errCode = FS.nodePermissions(dir, "x"); + if (errCode) return errCode; + if (!dir.node_ops.lookup) return 2; + return 0; + }, + mayCreate(dir, name) { + try { + var node = FS.lookupNode(dir, name); + return 20; + } catch (e) {} + return FS.nodePermissions(dir, "wx"); + }, + mayDelete(dir, name, isdir) { + var node; + try { + node = FS.lookupNode(dir, name); + } catch (e) { + return e.errno; + } + var errCode = FS.nodePermissions(dir, "wx"); + if (errCode) { + return errCode; + } + if (isdir) { + if (!FS.isDir(node.mode)) { + return 54; + } + if (FS.isRoot(node) || FS.getPath(node) === FS.cwd()) { + return 10; + } + } else { + if (FS.isDir(node.mode)) { + return 31; + } + } + return 0; + }, + mayOpen(node, flags) { + if (!node) { + return 44; + } + if (FS.isLink(node.mode)) { + return 32; + } else if (FS.isDir(node.mode)) { + if (FS.flagsToPermissionString(flags) !== "r" || // opening for write + (flags & 512)) { + // TODO: check for O_SEARCH? (== search for dir only) + return 31; + } + } + return FS.nodePermissions(node, FS.flagsToPermissionString(flags)); + }, + MAX_OPEN_FDS: 4096, + nextfd() { + for (var fd = 0; fd <= FS.MAX_OPEN_FDS; fd++) { + if (!FS.streams[fd]) { + return fd; + } + } + throw new FS.ErrnoError(33); + }, + getStreamChecked(fd) { + var stream = FS.getStream(fd); + if (!stream) { + throw new FS.ErrnoError(8); + } + return stream; + }, + getStream: fd => FS.streams[fd], + createStream(stream, fd = -1) { + // clone it, so we can return an instance of FSStream + stream = Object.assign(new FS.FSStream, stream); + if (fd == -1) { + fd = FS.nextfd(); + } + stream.fd = fd; + FS.streams[fd] = stream; + return stream; + }, + closeStream(fd) { + FS.streams[fd] = null; + }, + dupStream(origStream, fd = -1) { + var stream = FS.createStream(origStream, fd); + stream.stream_ops?.dup?.(stream); + return stream; + }, + chrdev_stream_ops: { + open(stream) { + var device = FS.getDevice(stream.node.rdev); + // override node's stream ops with the device's + stream.stream_ops = device.stream_ops; + // forward the open call + stream.stream_ops.open?.(stream); + }, + llseek() { + throw new FS.ErrnoError(70); + } + }, + major: dev => ((dev) >> 8), + minor: dev => ((dev) & 255), + makedev: (ma, mi) => ((ma) << 8 | (mi)), + registerDevice(dev, ops) { + FS.devices[dev] = { + stream_ops: ops + }; + }, + getDevice: dev => FS.devices[dev], + getMounts(mount) { + var mounts = []; + var check = [ mount ]; + while (check.length) { + var m = check.pop(); + mounts.push(m); + check.push(...m.mounts); + } + return mounts; + }, + syncfs(populate, callback) { + if (typeof populate == "function") { + callback = populate; + populate = false; + } + FS.syncFSRequests++; + if (FS.syncFSRequests > 1) { + err(`warning: ${FS.syncFSRequests} FS.syncfs operations in flight at once, probably just doing extra work`); + } + var mounts = FS.getMounts(FS.root.mount); + var completed = 0; + function doCallback(errCode) { + FS.syncFSRequests--; + return callback(errCode); + } + function done(errCode) { + if (errCode) { + if (!done.errored) { + done.errored = true; + return doCallback(errCode); + } + return; + } + if (++completed >= mounts.length) { + doCallback(null); + } + } + // sync all mounts + mounts.forEach(mount => { + if (!mount.type.syncfs) { + return done(null); + } + mount.type.syncfs(mount, populate, done); + }); + }, + mount(type, opts, mountpoint) { + var root = mountpoint === "/"; + var pseudo = !mountpoint; + var node; + if (root && FS.root) { + throw new FS.ErrnoError(10); + } else if (!root && !pseudo) { + var lookup = FS.lookupPath(mountpoint, { + follow_mount: false + }); + mountpoint = lookup.path; + // use the absolute path + node = lookup.node; + if (FS.isMountpoint(node)) { + throw new FS.ErrnoError(10); + } + if (!FS.isDir(node.mode)) { + throw new FS.ErrnoError(54); + } + } + var mount = { + type, + opts, + mountpoint, + mounts: [] + }; + // create a root node for the fs + var mountRoot = type.mount(mount); + mountRoot.mount = mount; + mount.root = mountRoot; + if (root) { + FS.root = mountRoot; + } else if (node) { + // set as a mountpoint + node.mounted = mount; + // add the new mount to the current mount's children + if (node.mount) { + node.mount.mounts.push(mount); + } + } + return mountRoot; + }, + unmount(mountpoint) { + var lookup = FS.lookupPath(mountpoint, { + follow_mount: false + }); + if (!FS.isMountpoint(lookup.node)) { + throw new FS.ErrnoError(28); + } + // destroy the nodes for this mount, and all its child mounts + var node = lookup.node; + var mount = node.mounted; + var mounts = FS.getMounts(mount); + Object.keys(FS.nameTable).forEach(hash => { + var current = FS.nameTable[hash]; + while (current) { + var next = current.name_next; + if (mounts.includes(current.mount)) { + FS.destroyNode(current); + } + current = next; + } + }); + // no longer a mountpoint + node.mounted = null; + // remove this mount from the child mounts + var idx = node.mount.mounts.indexOf(mount); + node.mount.mounts.splice(idx, 1); + }, + lookup(parent, name) { + return parent.node_ops.lookup(parent, name); + }, + mknod(path, mode, dev) { + var lookup = FS.lookupPath(path, { + parent: true + }); + var parent = lookup.node; + var name = PATH.basename(path); + if (!name || name === "." || name === "..") { + throw new FS.ErrnoError(28); + } + var errCode = FS.mayCreate(parent, name); + if (errCode) { + throw new FS.ErrnoError(errCode); + } + if (!parent.node_ops.mknod) { + throw new FS.ErrnoError(63); + } + return parent.node_ops.mknod(parent, name, mode, dev); + }, + statfs(path) { + // NOTE: None of the defaults here are true. We're just returning safe and + // sane values. + var rtn = { + bsize: 4096, + frsize: 4096, + blocks: 1e6, + bfree: 5e5, + bavail: 5e5, + files: FS.nextInode, + ffree: FS.nextInode - 1, + fsid: 42, + flags: 2, + namelen: 255 + }; + var parent = FS.lookupPath(path, { + follow: true + }).node; + if (parent?.node_ops.statfs) { + Object.assign(rtn, parent.node_ops.statfs(parent.mount.opts.root)); + } + return rtn; + }, + create(path, mode = 438) { + mode &= 4095; + mode |= 32768; + return FS.mknod(path, mode, 0); + }, + mkdir(path, mode = 511) { + mode &= 511 | 512; + mode |= 16384; + return FS.mknod(path, mode, 0); + }, + mkdirTree(path, mode) { + var dirs = path.split("/"); + var d = ""; + for (var i = 0; i < dirs.length; ++i) { + if (!dirs[i]) continue; + d += "/" + dirs[i]; + try { + FS.mkdir(d, mode); + } catch (e) { + if (e.errno != 20) throw e; + } + } + }, + mkdev(path, mode, dev) { + if (typeof dev == "undefined") { + dev = mode; + mode = 438; + } + mode |= 8192; + return FS.mknod(path, mode, dev); + }, + symlink(oldpath, newpath) { + if (!PATH_FS.resolve(oldpath)) { + throw new FS.ErrnoError(44); + } + var lookup = FS.lookupPath(newpath, { + parent: true + }); + var parent = lookup.node; + if (!parent) { + throw new FS.ErrnoError(44); + } + var newname = PATH.basename(newpath); + var errCode = FS.mayCreate(parent, newname); + if (errCode) { + throw new FS.ErrnoError(errCode); + } + if (!parent.node_ops.symlink) { + throw new FS.ErrnoError(63); + } + return parent.node_ops.symlink(parent, newname, oldpath); + }, + rename(old_path, new_path) { + var old_dirname = PATH.dirname(old_path); + var new_dirname = PATH.dirname(new_path); + var old_name = PATH.basename(old_path); + var new_name = PATH.basename(new_path); + // parents must exist + var lookup, old_dir, new_dir; + // let the errors from non existent directories percolate up + lookup = FS.lookupPath(old_path, { + parent: true + }); + old_dir = lookup.node; + lookup = FS.lookupPath(new_path, { + parent: true + }); + new_dir = lookup.node; + if (!old_dir || !new_dir) throw new FS.ErrnoError(44); + // need to be part of the same mount + if (old_dir.mount !== new_dir.mount) { + throw new FS.ErrnoError(75); + } + // source must exist + var old_node = FS.lookupNode(old_dir, old_name); + // old path should not be an ancestor of the new path + var relative = PATH_FS.relative(old_path, new_dirname); + if (relative.charAt(0) !== ".") { + throw new FS.ErrnoError(28); + } + // new path should not be an ancestor of the old path + relative = PATH_FS.relative(new_path, old_dirname); + if (relative.charAt(0) !== ".") { + throw new FS.ErrnoError(55); + } + // see if the new path already exists + var new_node; + try { + new_node = FS.lookupNode(new_dir, new_name); + } catch (e) {} + // early out if nothing needs to change + if (old_node === new_node) { + return; + } + // we'll need to delete the old entry + var isdir = FS.isDir(old_node.mode); + var errCode = FS.mayDelete(old_dir, old_name, isdir); + if (errCode) { + throw new FS.ErrnoError(errCode); + } + // need delete permissions if we'll be overwriting. + // need create permissions if new doesn't already exist. + errCode = new_node ? FS.mayDelete(new_dir, new_name, isdir) : FS.mayCreate(new_dir, new_name); + if (errCode) { + throw new FS.ErrnoError(errCode); + } + if (!old_dir.node_ops.rename) { + throw new FS.ErrnoError(63); + } + if (FS.isMountpoint(old_node) || (new_node && FS.isMountpoint(new_node))) { + throw new FS.ErrnoError(10); + } + // if we are going to change the parent, check write permissions + if (new_dir !== old_dir) { + errCode = FS.nodePermissions(old_dir, "w"); + if (errCode) { + throw new FS.ErrnoError(errCode); + } + } + // remove the node from the lookup hash + FS.hashRemoveNode(old_node); + // do the underlying fs rename + try { + old_dir.node_ops.rename(old_node, new_dir, new_name); + // update old node (we do this here to avoid each backend + // needing to) + old_node.parent = new_dir; + } catch (e) { + throw e; + } finally { + // add the node back to the hash (in case node_ops.rename + // changed its name) + FS.hashAddNode(old_node); + } + }, + rmdir(path) { + var lookup = FS.lookupPath(path, { + parent: true + }); + var parent = lookup.node; + var name = PATH.basename(path); + var node = FS.lookupNode(parent, name); + var errCode = FS.mayDelete(parent, name, true); + if (errCode) { + throw new FS.ErrnoError(errCode); + } + if (!parent.node_ops.rmdir) { + throw new FS.ErrnoError(63); + } + if (FS.isMountpoint(node)) { + throw new FS.ErrnoError(10); + } + parent.node_ops.rmdir(parent, name); + FS.destroyNode(node); + }, + readdir(path) { + var lookup = FS.lookupPath(path, { + follow: true + }); + var node = lookup.node; + if (!node.node_ops.readdir) { + throw new FS.ErrnoError(54); + } + return node.node_ops.readdir(node); + }, + unlink(path) { + var lookup = FS.lookupPath(path, { + parent: true + }); + var parent = lookup.node; + if (!parent) { + throw new FS.ErrnoError(44); + } + var name = PATH.basename(path); + var node = FS.lookupNode(parent, name); + var errCode = FS.mayDelete(parent, name, false); + if (errCode) { + // According to POSIX, we should map EISDIR to EPERM, but + // we instead do what Linux does (and we must, as we use + // the musl linux libc). + throw new FS.ErrnoError(errCode); + } + if (!parent.node_ops.unlink) { + throw new FS.ErrnoError(63); + } + if (FS.isMountpoint(node)) { + throw new FS.ErrnoError(10); + } + parent.node_ops.unlink(parent, name); + FS.destroyNode(node); + }, + readlink(path) { + var lookup = FS.lookupPath(path); + var link = lookup.node; + if (!link) { + throw new FS.ErrnoError(44); + } + if (!link.node_ops.readlink) { + throw new FS.ErrnoError(28); + } + return link.node_ops.readlink(link); + }, + stat(path, dontFollow) { + var lookup = FS.lookupPath(path, { + follow: !dontFollow + }); + var node = lookup.node; + if (!node) { + throw new FS.ErrnoError(44); + } + if (!node.node_ops.getattr) { + throw new FS.ErrnoError(63); + } + return node.node_ops.getattr(node); + }, + lstat(path) { + return FS.stat(path, true); + }, + chmod(path, mode, dontFollow) { + var node; + if (typeof path == "string") { + var lookup = FS.lookupPath(path, { + follow: !dontFollow + }); + node = lookup.node; + } else { + node = path; + } + if (!node.node_ops.setattr) { + throw new FS.ErrnoError(63); + } + node.node_ops.setattr(node, { + mode: (mode & 4095) | (node.mode & ~4095), + timestamp: Date.now() + }); + }, + lchmod(path, mode) { + FS.chmod(path, mode, true); + }, + fchmod(fd, mode) { + var stream = FS.getStreamChecked(fd); + FS.chmod(stream.node, mode); + }, + chown(path, uid, gid, dontFollow) { + var node; + if (typeof path == "string") { + var lookup = FS.lookupPath(path, { + follow: !dontFollow + }); + node = lookup.node; + } else { + node = path; + } + if (!node.node_ops.setattr) { + throw new FS.ErrnoError(63); + } + node.node_ops.setattr(node, { + timestamp: Date.now() + }); + }, + // we ignore the uid / gid for now + lchown(path, uid, gid) { + FS.chown(path, uid, gid, true); + }, + fchown(fd, uid, gid) { + var stream = FS.getStreamChecked(fd); + FS.chown(stream.node, uid, gid); + }, + truncate(path, len) { + if (len < 0) { + throw new FS.ErrnoError(28); + } + var node; + if (typeof path == "string") { + var lookup = FS.lookupPath(path, { + follow: true + }); + node = lookup.node; + } else { + node = path; + } + if (!node.node_ops.setattr) { + throw new FS.ErrnoError(63); + } + if (FS.isDir(node.mode)) { + throw new FS.ErrnoError(31); + } + if (!FS.isFile(node.mode)) { + throw new FS.ErrnoError(28); + } + var errCode = FS.nodePermissions(node, "w"); + if (errCode) { + throw new FS.ErrnoError(errCode); + } + node.node_ops.setattr(node, { + size: len, + timestamp: Date.now() + }); + }, + ftruncate(fd, len) { + var stream = FS.getStreamChecked(fd); + if ((stream.flags & 2097155) === 0) { + throw new FS.ErrnoError(28); + } + FS.truncate(stream.node, len); + }, + utime(path, atime, mtime) { + var lookup = FS.lookupPath(path, { + follow: true + }); + var node = lookup.node; + node.node_ops.setattr(node, { + timestamp: Math.max(atime, mtime) + }); + }, + open(path, flags, mode = 438) { + if (path === "") { + throw new FS.ErrnoError(44); + } + flags = typeof flags == "string" ? FS_modeStringToFlags(flags) : flags; + if ((flags & 64)) { + mode = (mode & 4095) | 32768; + } else { + mode = 0; + } + var node; + if (typeof path == "object") { + node = path; + } else { + path = PATH.normalize(path); + try { + var lookup = FS.lookupPath(path, { + follow: !(flags & 131072) + }); + node = lookup.node; + } catch (e) {} + } + // perhaps we need to create the node + var created = false; + if ((flags & 64)) { + if (node) { + // if O_CREAT and O_EXCL are set, error out if the node already exists + if ((flags & 128)) { + throw new FS.ErrnoError(20); + } + } else { + // node doesn't exist, try to create it + node = FS.mknod(path, mode, 0); + created = true; + } + } + if (!node) { + throw new FS.ErrnoError(44); + } + // can't truncate a device + if (FS.isChrdev(node.mode)) { + flags &= ~512; + } + // if asked only for a directory, then this must be one + if ((flags & 65536) && !FS.isDir(node.mode)) { + throw new FS.ErrnoError(54); + } + // check permissions, if this is not a file we just created now (it is ok to + // create and write to a file with read-only permissions; it is read-only + // for later use) + if (!created) { + var errCode = FS.mayOpen(node, flags); + if (errCode) { + throw new FS.ErrnoError(errCode); + } + } + // do truncation if necessary + if ((flags & 512) && !created) { + FS.truncate(node, 0); + } + // we've already handled these, don't pass down to the underlying vfs + flags &= ~(128 | 512 | 131072); + // register the stream with the filesystem + var stream = FS.createStream({ + node, + path: FS.getPath(node), + // we want the absolute path to the node + flags, + seekable: true, + position: 0, + stream_ops: node.stream_ops, + // used by the file family libc calls (fopen, fwrite, ferror, etc.) + ungotten: [], + error: false + }); + // call the new stream's open function + if (stream.stream_ops.open) { + stream.stream_ops.open(stream); + } + if (Module["logReadFiles"] && !(flags & 1)) { + if (!(path in FS.readFiles)) { + FS.readFiles[path] = 1; + } + } + return stream; + }, + close(stream) { + if (FS.isClosed(stream)) { + throw new FS.ErrnoError(8); + } + if (stream.getdents) stream.getdents = null; + // free readdir state + try { + if (stream.stream_ops.close) { + stream.stream_ops.close(stream); + } + } catch (e) { + throw e; + } finally { + FS.closeStream(stream.fd); + } + stream.fd = null; + }, + isClosed(stream) { + return stream.fd === null; + }, + llseek(stream, offset, whence) { + if (FS.isClosed(stream)) { + throw new FS.ErrnoError(8); + } + if (!stream.seekable || !stream.stream_ops.llseek) { + throw new FS.ErrnoError(70); + } + if (whence != 0 && whence != 1 && whence != 2) { + throw new FS.ErrnoError(28); + } + stream.position = stream.stream_ops.llseek(stream, offset, whence); + stream.ungotten = []; + return stream.position; + }, + read(stream, buffer, offset, length, position) { + if (length < 0 || position < 0) { + throw new FS.ErrnoError(28); + } + if (FS.isClosed(stream)) { + throw new FS.ErrnoError(8); + } + if ((stream.flags & 2097155) === 1) { + throw new FS.ErrnoError(8); + } + if (FS.isDir(stream.node.mode)) { + throw new FS.ErrnoError(31); + } + if (!stream.stream_ops.read) { + throw new FS.ErrnoError(28); + } + var seeking = typeof position != "undefined"; + if (!seeking) { + position = stream.position; + } else if (!stream.seekable) { + throw new FS.ErrnoError(70); + } + var bytesRead = stream.stream_ops.read(stream, buffer, offset, length, position); + if (!seeking) stream.position += bytesRead; + return bytesRead; + }, + write(stream, buffer, offset, length, position, canOwn) { + if (length < 0 || position < 0) { + throw new FS.ErrnoError(28); + } + if (FS.isClosed(stream)) { + throw new FS.ErrnoError(8); + } + if ((stream.flags & 2097155) === 0) { + throw new FS.ErrnoError(8); + } + if (FS.isDir(stream.node.mode)) { + throw new FS.ErrnoError(31); + } + if (!stream.stream_ops.write) { + throw new FS.ErrnoError(28); + } + if (stream.seekable && stream.flags & 1024) { + // seek to the end before writing in append mode + FS.llseek(stream, 0, 2); + } + var seeking = typeof position != "undefined"; + if (!seeking) { + position = stream.position; + } else if (!stream.seekable) { + throw new FS.ErrnoError(70); + } + var bytesWritten = stream.stream_ops.write(stream, buffer, offset, length, position, canOwn); + if (!seeking) stream.position += bytesWritten; + return bytesWritten; + }, + allocate(stream, offset, length) { + if (FS.isClosed(stream)) { + throw new FS.ErrnoError(8); + } + if (offset < 0 || length <= 0) { + throw new FS.ErrnoError(28); + } + if ((stream.flags & 2097155) === 0) { + throw new FS.ErrnoError(8); + } + if (!FS.isFile(stream.node.mode) && !FS.isDir(stream.node.mode)) { + throw new FS.ErrnoError(43); + } + if (!stream.stream_ops.allocate) { + throw new FS.ErrnoError(138); + } + stream.stream_ops.allocate(stream, offset, length); + }, + mmap(stream, length, position, prot, flags) { + // User requests writing to file (prot & PROT_WRITE != 0). + // Checking if we have permissions to write to the file unless + // MAP_PRIVATE flag is set. According to POSIX spec it is possible + // to write to file opened in read-only mode with MAP_PRIVATE flag, + // as all modifications will be visible only in the memory of + // the current process. + if ((prot & 2) !== 0 && (flags & 2) === 0 && (stream.flags & 2097155) !== 2) { + throw new FS.ErrnoError(2); + } + if ((stream.flags & 2097155) === 1) { + throw new FS.ErrnoError(2); + } + if (!stream.stream_ops.mmap) { + throw new FS.ErrnoError(43); + } + if (!length) { + throw new FS.ErrnoError(28); + } + return stream.stream_ops.mmap(stream, length, position, prot, flags); + }, + msync(stream, buffer, offset, length, mmapFlags) { + if (!stream.stream_ops.msync) { + return 0; + } + return stream.stream_ops.msync(stream, buffer, offset, length, mmapFlags); + }, + ioctl(stream, cmd, arg) { + if (!stream.stream_ops.ioctl) { + throw new FS.ErrnoError(59); + } + return stream.stream_ops.ioctl(stream, cmd, arg); + }, + readFile(path, opts = {}) { + opts.flags = opts.flags || 0; + opts.encoding = opts.encoding || "binary"; + if (opts.encoding !== "utf8" && opts.encoding !== "binary") { + throw new Error(`Invalid encoding type "${opts.encoding}"`); + } + var ret; + var stream = FS.open(path, opts.flags); + var stat = FS.stat(path); + var length = stat.size; + var buf = new Uint8Array(length); + FS.read(stream, buf, 0, length, 0); + if (opts.encoding === "utf8") { + ret = UTF8ArrayToString(buf); + } else if (opts.encoding === "binary") { + ret = buf; + } + FS.close(stream); + return ret; + }, + writeFile(path, data, opts = {}) { + opts.flags = opts.flags || 577; + var stream = FS.open(path, opts.flags, opts.mode); + if (typeof data == "string") { + var buf = new Uint8Array(lengthBytesUTF8(data) + 1); + var actualNumBytes = stringToUTF8Array(data, buf, 0, buf.length); + FS.write(stream, buf, 0, actualNumBytes, undefined, opts.canOwn); + } else if (ArrayBuffer.isView(data)) { + FS.write(stream, data, 0, data.byteLength, undefined, opts.canOwn); + } else { + throw new Error("Unsupported data type"); + } + FS.close(stream); + }, + cwd: () => FS.currentPath, + chdir(path) { + var lookup = FS.lookupPath(path, { + follow: true + }); + if (lookup.node === null) { + throw new FS.ErrnoError(44); + } + if (!FS.isDir(lookup.node.mode)) { + throw new FS.ErrnoError(54); + } + var errCode = FS.nodePermissions(lookup.node, "x"); + if (errCode) { + throw new FS.ErrnoError(errCode); + } + FS.currentPath = lookup.path; + }, + createDefaultDirectories() { + FS.mkdir("/tmp"); + FS.mkdir("/home"); + FS.mkdir("/home/web_user"); + }, + createDefaultDevices() { + // create /dev + FS.mkdir("/dev"); + // setup /dev/null + FS.registerDevice(FS.makedev(1, 3), { + read: () => 0, + write: (stream, buffer, offset, length, pos) => length, + llseek: () => 0 + }); + FS.mkdev("/dev/null", FS.makedev(1, 3)); + // setup /dev/tty and /dev/tty1 + // stderr needs to print output using err() rather than out() + // so we register a second tty just for it. + TTY.register(FS.makedev(5, 0), TTY.default_tty_ops); + TTY.register(FS.makedev(6, 0), TTY.default_tty1_ops); + FS.mkdev("/dev/tty", FS.makedev(5, 0)); + FS.mkdev("/dev/tty1", FS.makedev(6, 0)); + // setup /dev/[u]random + // use a buffer to avoid overhead of individual crypto calls per byte + var randomBuffer = new Uint8Array(1024), randomLeft = 0; + var randomByte = () => { + if (randomLeft === 0) { + randomLeft = randomFill(randomBuffer).byteLength; + } + return randomBuffer[--randomLeft]; + }; + FS.createDevice("/dev", "random", randomByte); + FS.createDevice("/dev", "urandom", randomByte); + // we're not going to emulate the actual shm device, + // just create the tmp dirs that reside in it commonly + FS.mkdir("/dev/shm"); + FS.mkdir("/dev/shm/tmp"); + }, + createSpecialDirectories() { + // create /proc/self/fd which allows /proc/self/fd/6 => readlink gives the + // name of the stream for fd 6 (see test_unistd_ttyname) + FS.mkdir("/proc"); + var proc_self = FS.mkdir("/proc/self"); + FS.mkdir("/proc/self/fd"); + FS.mount({ + mount() { + var node = FS.createNode(proc_self, "fd", 16895, 73); + node.node_ops = { + lookup(parent, name) { + var fd = +name; + var stream = FS.getStreamChecked(fd); + var ret = { + parent: null, + mount: { + mountpoint: "fake" + }, + node_ops: { + readlink: () => stream.path + } + }; + ret.parent = ret; + // make it look like a simple root node + return ret; + } + }; + return node; + } + }, {}, "/proc/self/fd"); + }, + createStandardStreams(input, output, error) { + // TODO deprecate the old functionality of a single + // input / output callback and that utilizes FS.createDevice + // and instead require a unique set of stream ops + // by default, we symlink the standard streams to the + // default tty devices. however, if the standard streams + // have been overwritten we create a unique device for + // them instead. + if (input) { + FS.createDevice("/dev", "stdin", input); + } else { + FS.symlink("/dev/tty", "/dev/stdin"); + } + if (output) { + FS.createDevice("/dev", "stdout", null, output); + } else { + FS.symlink("/dev/tty", "/dev/stdout"); + } + if (error) { + FS.createDevice("/dev", "stderr", null, error); + } else { + FS.symlink("/dev/tty1", "/dev/stderr"); + } + // open default streams for the stdin, stdout and stderr devices + var stdin = FS.open("/dev/stdin", 0); + var stdout = FS.open("/dev/stdout", 1); + var stderr = FS.open("/dev/stderr", 1); + }, + staticInit() { + FS.nameTable = new Array(4096); + FS.mount(MEMFS, {}, "/"); + FS.createDefaultDirectories(); + FS.createDefaultDevices(); + FS.createSpecialDirectories(); + FS.filesystems = { + "MEMFS": MEMFS + }; + }, + init(input, output, error) { + FS.initialized = true; + // Allow Module.stdin etc. to provide defaults, if none explicitly passed to us here + input ??= Module["stdin"]; + output ??= Module["stdout"]; + error ??= Module["stderr"]; + FS.createStandardStreams(input, output, error); + }, + quit() { + FS.initialized = false; + // force-flush all streams, so we get musl std streams printed out + // close all of our streams + for (var i = 0; i < FS.streams.length; i++) { + var stream = FS.streams[i]; + if (!stream) { + continue; + } + FS.close(stream); + } + }, + findObject(path, dontResolveLastLink) { + var ret = FS.analyzePath(path, dontResolveLastLink); + if (!ret.exists) { + return null; + } + return ret.object; + }, + analyzePath(path, dontResolveLastLink) { + // operate from within the context of the symlink's target + try { + var lookup = FS.lookupPath(path, { + follow: !dontResolveLastLink + }); + path = lookup.path; + } catch (e) {} + var ret = { + isRoot: false, + exists: false, + error: 0, + name: null, + path: null, + object: null, + parentExists: false, + parentPath: null, + parentObject: null + }; + try { + var lookup = FS.lookupPath(path, { + parent: true + }); + ret.parentExists = true; + ret.parentPath = lookup.path; + ret.parentObject = lookup.node; + ret.name = PATH.basename(path); + lookup = FS.lookupPath(path, { + follow: !dontResolveLastLink + }); + ret.exists = true; + ret.path = lookup.path; + ret.object = lookup.node; + ret.name = lookup.node.name; + ret.isRoot = lookup.path === "/"; + } catch (e) { + ret.error = e.errno; + } + return ret; + }, + createPath(parent, path, canRead, canWrite) { + parent = typeof parent == "string" ? parent : FS.getPath(parent); + var parts = path.split("/").reverse(); + while (parts.length) { + var part = parts.pop(); + if (!part) continue; + var current = PATH.join2(parent, part); + try { + FS.mkdir(current); + } catch (e) {} + // ignore EEXIST + parent = current; + } + return current; + }, + createFile(parent, name, properties, canRead, canWrite) { + var path = PATH.join2(typeof parent == "string" ? parent : FS.getPath(parent), name); + var mode = FS_getMode(canRead, canWrite); + return FS.create(path, mode); + }, + createDataFile(parent, name, data, canRead, canWrite, canOwn) { + var path = name; + if (parent) { + parent = typeof parent == "string" ? parent : FS.getPath(parent); + path = name ? PATH.join2(parent, name) : parent; + } + var mode = FS_getMode(canRead, canWrite); + var node = FS.create(path, mode); + if (data) { + if (typeof data == "string") { + var arr = new Array(data.length); + for (var i = 0, len = data.length; i < len; ++i) arr[i] = data.charCodeAt(i); + data = arr; + } + // make sure we can write to the file + FS.chmod(node, mode | 146); + var stream = FS.open(node, 577); + FS.write(stream, data, 0, data.length, 0, canOwn); + FS.close(stream); + FS.chmod(node, mode); + } + }, + createDevice(parent, name, input, output) { + var path = PATH.join2(typeof parent == "string" ? parent : FS.getPath(parent), name); + var mode = FS_getMode(!!input, !!output); + FS.createDevice.major ??= 64; + var dev = FS.makedev(FS.createDevice.major++, 0); + // Create a fake device that a set of stream ops to emulate + // the old behavior. + FS.registerDevice(dev, { + open(stream) { + stream.seekable = false; + }, + close(stream) { + // flush any pending line data + if (output?.buffer?.length) { + output(10); + } + }, + read(stream, buffer, offset, length, pos) { + /* ignored */ var bytesRead = 0; + for (var i = 0; i < length; i++) { + var result; + try { + result = input(); + } catch (e) { + throw new FS.ErrnoError(29); + } + if (result === undefined && bytesRead === 0) { + throw new FS.ErrnoError(6); + } + if (result === null || result === undefined) break; + bytesRead++; + buffer[offset + i] = result; + } + if (bytesRead) { + stream.node.timestamp = Date.now(); + } + return bytesRead; + }, + write(stream, buffer, offset, length, pos) { + for (var i = 0; i < length; i++) { + try { + output(buffer[offset + i]); + } catch (e) { + throw new FS.ErrnoError(29); + } + } + if (length) { + stream.node.timestamp = Date.now(); + } + return i; + } + }); + return FS.mkdev(path, mode, dev); + }, + forceLoadFile(obj) { + if (obj.isDevice || obj.isFolder || obj.link || obj.contents) return true; + if (typeof XMLHttpRequest != "undefined") { + throw new Error("Lazy loading should have been performed (contents set) in createLazyFile, but it was not. Lazy loading only works in web workers. Use --embed-file or --preload-file in emcc on the main thread."); + } else { + // Command-line. + try { + obj.contents = readBinary(obj.url); + obj.usedBytes = obj.contents.length; + } catch (e) { + throw new FS.ErrnoError(29); + } + } + }, + createLazyFile(parent, name, url, canRead, canWrite) { + // Lazy chunked Uint8Array (implements get and length from Uint8Array). + // Actual getting is abstracted away for eventual reuse. + class LazyUint8Array { + lengthKnown=false; + chunks=[]; + // Loaded chunks. Index is the chunk number + get(idx) { + if (idx > this.length - 1 || idx < 0) { + return undefined; + } + var chunkOffset = idx % this.chunkSize; + var chunkNum = (idx / this.chunkSize) | 0; + return this.getter(chunkNum)[chunkOffset]; + } + setDataGetter(getter) { + this.getter = getter; + } + cacheLength() { + // Find length + var xhr = new XMLHttpRequest; + xhr.open("HEAD", url, false); + xhr.send(null); + if (!(xhr.status >= 200 && xhr.status < 300 || xhr.status === 304)) throw new Error("Couldn't load " + url + ". Status: " + xhr.status); + var datalength = Number(xhr.getResponseHeader("Content-length")); + var header; + var hasByteServing = (header = xhr.getResponseHeader("Accept-Ranges")) && header === "bytes"; + var usesGzip = (header = xhr.getResponseHeader("Content-Encoding")) && header === "gzip"; + var chunkSize = 1024 * 1024; + // Chunk size in bytes + if (!hasByteServing) chunkSize = datalength; + // Function to get a range from the remote URL. + var doXHR = (from, to) => { + if (from > to) throw new Error("invalid range (" + from + ", " + to + ") or no bytes requested!"); + if (to > datalength - 1) throw new Error("only " + datalength + " bytes available! programmer error!"); + // TODO: Use mozResponseArrayBuffer, responseStream, etc. if available. + var xhr = new XMLHttpRequest; + xhr.open("GET", url, false); + if (datalength !== chunkSize) xhr.setRequestHeader("Range", "bytes=" + from + "-" + to); + // Some hints to the browser that we want binary data. + xhr.responseType = "arraybuffer"; + if (xhr.overrideMimeType) { + xhr.overrideMimeType("text/plain; charset=x-user-defined"); + } + xhr.send(null); + if (!(xhr.status >= 200 && xhr.status < 300 || xhr.status === 304)) throw new Error("Couldn't load " + url + ". Status: " + xhr.status); + if (xhr.response !== undefined) { + return new Uint8Array(/** @type{Array} */ (xhr.response || [])); + } + return intArrayFromString(xhr.responseText || "", true); + }; + var lazyArray = this; + lazyArray.setDataGetter(chunkNum => { + var start = chunkNum * chunkSize; + var end = (chunkNum + 1) * chunkSize - 1; + // including this byte + end = Math.min(end, datalength - 1); + // if datalength-1 is selected, this is the last block + if (typeof lazyArray.chunks[chunkNum] == "undefined") { + lazyArray.chunks[chunkNum] = doXHR(start, end); + } + if (typeof lazyArray.chunks[chunkNum] == "undefined") throw new Error("doXHR failed!"); + return lazyArray.chunks[chunkNum]; + }); + if (usesGzip || !datalength) { + // if the server uses gzip or doesn't supply the length, we have to download the whole file to get the (uncompressed) length + chunkSize = datalength = 1; + // this will force getter(0)/doXHR do download the whole file + datalength = this.getter(0).length; + chunkSize = datalength; + out("LazyFiles on gzip forces download of the whole file when length is accessed"); + } + this._length = datalength; + this._chunkSize = chunkSize; + this.lengthKnown = true; + } + get length() { + if (!this.lengthKnown) { + this.cacheLength(); + } + return this._length; + } + get chunkSize() { + if (!this.lengthKnown) { + this.cacheLength(); + } + return this._chunkSize; + } + } + if (typeof XMLHttpRequest != "undefined") { + if (!ENVIRONMENT_IS_WORKER) throw "Cannot do synchronous binary XHRs outside webworkers in modern browsers. Use --embed-file or --preload-file in emcc"; + var lazyArray = new LazyUint8Array; + var properties = { + isDevice: false, + contents: lazyArray + }; + } else { + var properties = { + isDevice: false, + url + }; + } + var node = FS.createFile(parent, name, properties, canRead, canWrite); + // This is a total hack, but I want to get this lazy file code out of the + // core of MEMFS. If we want to keep this lazy file concept I feel it should + // be its own thin LAZYFS proxying calls to MEMFS. + if (properties.contents) { + node.contents = properties.contents; + } else if (properties.url) { + node.contents = null; + node.url = properties.url; + } + // Add a function that defers querying the file size until it is asked the first time. + Object.defineProperties(node, { + usedBytes: { + get: function() { + return this.contents.length; + } + } + }); + // override each stream op with one that tries to force load the lazy file first + var stream_ops = {}; + var keys = Object.keys(node.stream_ops); + keys.forEach(key => { + var fn = node.stream_ops[key]; + stream_ops[key] = (...args) => { + FS.forceLoadFile(node); + return fn(...args); + }; + }); + function writeChunks(stream, buffer, offset, length, position) { + var contents = stream.node.contents; + if (position >= contents.length) return 0; + var size = Math.min(contents.length - position, length); + if (contents.slice) { + // normal array + for (var i = 0; i < size; i++) { + buffer[offset + i] = contents[position + i]; + } + } else { + for (var i = 0; i < size; i++) { + // LazyUint8Array from sync binary XHR + buffer[offset + i] = contents.get(position + i); + } + } + return size; + } + // use a custom read function + stream_ops.read = (stream, buffer, offset, length, position) => { + FS.forceLoadFile(node); + return writeChunks(stream, buffer, offset, length, position); + }; + // use a custom mmap function + stream_ops.mmap = (stream, length, position, prot, flags) => { + FS.forceLoadFile(node); + var ptr = mmapAlloc(length); + if (!ptr) { + throw new FS.ErrnoError(48); + } + writeChunks(stream, HEAP8, ptr, length, position); + return { + ptr, + allocated: true + }; + }; + node.stream_ops = stream_ops; + return node; + } +}; + +/** + * Given a pointer 'ptr' to a null-terminated UTF8-encoded string in the + * emscripten HEAP, returns a copy of that string as a Javascript String object. + * + * @param {number} ptr + * @param {number=} maxBytesToRead - An optional length that specifies the + * maximum number of bytes to read. You can omit this parameter to scan the + * string until the first 0 byte. If maxBytesToRead is passed, and the string + * at [ptr, ptr+maxBytesToReadr[ contains a null byte in the middle, then the + * string will cut short at that byte index (i.e. maxBytesToRead will not + * produce a string of exact length [ptr, ptr+maxBytesToRead[) N.B. mixing + * frequent uses of UTF8ToString() with and without maxBytesToRead may throw + * JS JIT optimizations off, so it is worth to consider consistently using one + * @return {string} + */ var UTF8ToString = (ptr, maxBytesToRead) => ptr ? UTF8ArrayToString(HEAPU8, ptr, maxBytesToRead) : ""; + +var SYSCALLS = { + DEFAULT_POLLMASK: 5, + calculateAt(dirfd, path, allowEmpty) { + if (PATH.isAbs(path)) { + return path; + } + // relative path + var dir; + if (dirfd === -100) { + dir = FS.cwd(); + } else { + var dirstream = SYSCALLS.getStreamFromFD(dirfd); + dir = dirstream.path; + } + if (path.length == 0) { + if (!allowEmpty) { + throw new FS.ErrnoError(44); + } + return dir; + } + return PATH.join2(dir, path); + }, + doStat(func, path, buf) { + var stat = func(path); + HEAP32[((buf) >> 2)] = stat.dev; + HEAP32[(((buf) + (4)) >> 2)] = stat.mode; + HEAPU32[(((buf) + (8)) >> 2)] = stat.nlink; + HEAP32[(((buf) + (12)) >> 2)] = stat.uid; + HEAP32[(((buf) + (16)) >> 2)] = stat.gid; + HEAP32[(((buf) + (20)) >> 2)] = stat.rdev; + (tempI64 = [ stat.size >>> 0, (tempDouble = stat.size, (+(Math.abs(tempDouble))) >= 1 ? (tempDouble > 0 ? (+(Math.floor((tempDouble) / 4294967296))) >>> 0 : (~~((+(Math.ceil((tempDouble - +(((~~(tempDouble))) >>> 0)) / 4294967296))))) >>> 0) : 0) ], + HEAP32[(((buf) + (24)) >> 2)] = tempI64[0], HEAP32[(((buf) + (28)) >> 2)] = tempI64[1]); + HEAP32[(((buf) + (32)) >> 2)] = 4096; + HEAP32[(((buf) + (36)) >> 2)] = stat.blocks; + var atime = stat.atime.getTime(); + var mtime = stat.mtime.getTime(); + var ctime = stat.ctime.getTime(); + (tempI64 = [ Math.floor(atime / 1e3) >>> 0, (tempDouble = Math.floor(atime / 1e3), + (+(Math.abs(tempDouble))) >= 1 ? (tempDouble > 0 ? (+(Math.floor((tempDouble) / 4294967296))) >>> 0 : (~~((+(Math.ceil((tempDouble - +(((~~(tempDouble))) >>> 0)) / 4294967296))))) >>> 0) : 0) ], + HEAP32[(((buf) + (40)) >> 2)] = tempI64[0], HEAP32[(((buf) + (44)) >> 2)] = tempI64[1]); + HEAPU32[(((buf) + (48)) >> 2)] = (atime % 1e3) * 1e3 * 1e3; + (tempI64 = [ Math.floor(mtime / 1e3) >>> 0, (tempDouble = Math.floor(mtime / 1e3), + (+(Math.abs(tempDouble))) >= 1 ? (tempDouble > 0 ? (+(Math.floor((tempDouble) / 4294967296))) >>> 0 : (~~((+(Math.ceil((tempDouble - +(((~~(tempDouble))) >>> 0)) / 4294967296))))) >>> 0) : 0) ], + HEAP32[(((buf) + (56)) >> 2)] = tempI64[0], HEAP32[(((buf) + (60)) >> 2)] = tempI64[1]); + HEAPU32[(((buf) + (64)) >> 2)] = (mtime % 1e3) * 1e3 * 1e3; + (tempI64 = [ Math.floor(ctime / 1e3) >>> 0, (tempDouble = Math.floor(ctime / 1e3), + (+(Math.abs(tempDouble))) >= 1 ? (tempDouble > 0 ? (+(Math.floor((tempDouble) / 4294967296))) >>> 0 : (~~((+(Math.ceil((tempDouble - +(((~~(tempDouble))) >>> 0)) / 4294967296))))) >>> 0) : 0) ], + HEAP32[(((buf) + (72)) >> 2)] = tempI64[0], HEAP32[(((buf) + (76)) >> 2)] = tempI64[1]); + HEAPU32[(((buf) + (80)) >> 2)] = (ctime % 1e3) * 1e3 * 1e3; + (tempI64 = [ stat.ino >>> 0, (tempDouble = stat.ino, (+(Math.abs(tempDouble))) >= 1 ? (tempDouble > 0 ? (+(Math.floor((tempDouble) / 4294967296))) >>> 0 : (~~((+(Math.ceil((tempDouble - +(((~~(tempDouble))) >>> 0)) / 4294967296))))) >>> 0) : 0) ], + HEAP32[(((buf) + (88)) >> 2)] = tempI64[0], HEAP32[(((buf) + (92)) >> 2)] = tempI64[1]); + return 0; + }, + doMsync(addr, stream, len, flags, offset) { + if (!FS.isFile(stream.node.mode)) { + throw new FS.ErrnoError(43); + } + if (flags & 2) { + // MAP_PRIVATE calls need not to be synced back to underlying fs + return 0; + } + var buffer = HEAPU8.slice(addr, addr + len); + FS.msync(stream, buffer, offset, len, flags); + }, + getStreamFromFD(fd) { + var stream = FS.getStreamChecked(fd); + return stream; + }, + varargs: undefined, + getStr(ptr) { + var ret = UTF8ToString(ptr); + return ret; + } +}; + +function ___syscall_fcntl64(fd, cmd, varargs) { + SYSCALLS.varargs = varargs; + try { + var stream = SYSCALLS.getStreamFromFD(fd); + switch (cmd) { + case 0: + { + var arg = syscallGetVarargI(); + if (arg < 0) { + return -28; + } + while (FS.streams[arg]) { + arg++; + } + var newStream; + newStream = FS.dupStream(stream, arg); + return newStream.fd; + } + + case 1: + case 2: + return 0; + + // FD_CLOEXEC makes no sense for a single process. + case 3: + return stream.flags; + + case 4: + { + var arg = syscallGetVarargI(); + stream.flags |= arg; + return 0; + } + + case 12: + { + var arg = syscallGetVarargP(); + var offset = 0; + // We're always unlocked. + HEAP16[(((arg) + (offset)) >> 1)] = 2; + return 0; + } + + case 13: + case 14: + return 0; + } + // Pretend that the locking is successful. + return -28; + } catch (e) { + if (typeof FS == "undefined" || !(e.name === "ErrnoError")) throw e; + return -e.errno; + } +} + +function ___syscall_ioctl(fd, op, varargs) { + SYSCALLS.varargs = varargs; + try { + var stream = SYSCALLS.getStreamFromFD(fd); + switch (op) { + case 21509: + { + if (!stream.tty) return -59; + return 0; + } + + case 21505: + { + if (!stream.tty) return -59; + if (stream.tty.ops.ioctl_tcgets) { + var termios = stream.tty.ops.ioctl_tcgets(stream); + var argp = syscallGetVarargP(); + HEAP32[((argp) >> 2)] = termios.c_iflag || 0; + HEAP32[(((argp) + (4)) >> 2)] = termios.c_oflag || 0; + HEAP32[(((argp) + (8)) >> 2)] = termios.c_cflag || 0; + HEAP32[(((argp) + (12)) >> 2)] = termios.c_lflag || 0; + for (var i = 0; i < 32; i++) { + HEAP8[(argp + i) + (17)] = termios.c_cc[i] || 0; + } + return 0; + } + return 0; + } + + case 21510: + case 21511: + case 21512: + { + if (!stream.tty) return -59; + return 0; + } + + // no-op, not actually adjusting terminal settings + case 21506: + case 21507: + case 21508: + { + if (!stream.tty) return -59; + if (stream.tty.ops.ioctl_tcsets) { + var argp = syscallGetVarargP(); + var c_iflag = HEAP32[((argp) >> 2)]; + var c_oflag = HEAP32[(((argp) + (4)) >> 2)]; + var c_cflag = HEAP32[(((argp) + (8)) >> 2)]; + var c_lflag = HEAP32[(((argp) + (12)) >> 2)]; + var c_cc = []; + for (var i = 0; i < 32; i++) { + c_cc.push(HEAP8[(argp + i) + (17)]); + } + return stream.tty.ops.ioctl_tcsets(stream.tty, op, { + c_iflag, + c_oflag, + c_cflag, + c_lflag, + c_cc + }); + } + return 0; + } + + // no-op, not actually adjusting terminal settings + case 21519: + { + if (!stream.tty) return -59; + var argp = syscallGetVarargP(); + HEAP32[((argp) >> 2)] = 0; + return 0; + } + + case 21520: + { + if (!stream.tty) return -59; + return -28; + } + + // not supported + case 21531: + { + var argp = syscallGetVarargP(); + return FS.ioctl(stream, op, argp); + } + + case 21523: + { + // TODO: in theory we should write to the winsize struct that gets + // passed in, but for now musl doesn't read anything on it + if (!stream.tty) return -59; + if (stream.tty.ops.ioctl_tiocgwinsz) { + var winsize = stream.tty.ops.ioctl_tiocgwinsz(stream.tty); + var argp = syscallGetVarargP(); + HEAP16[((argp) >> 1)] = winsize[0]; + HEAP16[(((argp) + (2)) >> 1)] = winsize[1]; + } + return 0; + } + + case 21524: + { + // TODO: technically, this ioctl call should change the window size. + // but, since emscripten doesn't have any concept of a terminal window + // yet, we'll just silently throw it away as we do TIOCGWINSZ + if (!stream.tty) return -59; + return 0; + } + + case 21515: + { + if (!stream.tty) return -59; + return 0; + } + + default: + return -28; + } + } // not supported + catch (e) { + if (typeof FS == "undefined" || !(e.name === "ErrnoError")) throw e; + return -e.errno; + } +} + +function ___syscall_openat(dirfd, path, flags, varargs) { + SYSCALLS.varargs = varargs; + try { + path = SYSCALLS.getStr(path); + path = SYSCALLS.calculateAt(dirfd, path); + var mode = varargs ? syscallGetVarargI() : 0; + return FS.open(path, flags, mode).fd; + } catch (e) { + if (typeof FS == "undefined" || !(e.name === "ErrnoError")) throw e; + return -e.errno; + } +} + +function ___syscall_unlinkat(dirfd, path, flags) { + try { + path = SYSCALLS.getStr(path); + path = SYSCALLS.calculateAt(dirfd, path); + if (flags === 0) { + FS.unlink(path); + } else if (flags === 512) { + FS.rmdir(path); + } else { + abort("Invalid flags passed to unlinkat"); + } + return 0; + } catch (e) { + if (typeof FS == "undefined" || !(e.name === "ErrnoError")) throw e; + return -e.errno; + } +} + +var __abort_js = () => abort(""); + +var __emscripten_memcpy_js = (dest, src, num) => HEAPU8.copyWithin(dest, src, src + num); + +var _emscripten_date_now = () => Date.now(); + +var abortOnCannotGrowMemory = requestedSize => { + abort("OOM"); +}; + +var _emscripten_resize_heap = requestedSize => { + var oldSize = HEAPU8.length; + // With CAN_ADDRESS_2GB or MEMORY64, pointers are already unsigned. + requestedSize >>>= 0; + abortOnCannotGrowMemory(requestedSize); +}; + +var ENV = {}; + +var getExecutableName = () => thisProgram || "./this.program"; + +var getEnvStrings = () => { + if (!getEnvStrings.strings) { + // Default values. + // Browser language detection #8751 + var lang = ((typeof navigator == "object" && navigator.languages && navigator.languages[0]) || "C").replace("-", "_") + ".UTF-8"; + var env = { + "USER": "web_user", + "LOGNAME": "web_user", + "PATH": "/", + "PWD": "/", + "HOME": "/home/web_user", + "LANG": lang, + "_": getExecutableName() + }; + // Apply the user-provided values, if any. + for (var x in ENV) { + // x is a key in ENV; if ENV[x] is undefined, that means it was + // explicitly set to be so. We allow user code to do that to + // force variables with default values to remain unset. + if (ENV[x] === undefined) delete env[x]; else env[x] = ENV[x]; + } + var strings = []; + for (var x in env) { + strings.push(`${x}=${env[x]}`); + } + getEnvStrings.strings = strings; + } + return getEnvStrings.strings; +}; + +var stringToAscii = (str, buffer) => { + for (var i = 0; i < str.length; ++i) { + HEAP8[buffer++] = str.charCodeAt(i); + } + // Null-terminate the string + HEAP8[buffer] = 0; +}; + +var _environ_get = (__environ, environ_buf) => { + var bufSize = 0; + getEnvStrings().forEach((string, i) => { + var ptr = environ_buf + bufSize; + HEAPU32[(((__environ) + (i * 4)) >> 2)] = ptr; + stringToAscii(string, ptr); + bufSize += string.length + 1; + }); + return 0; +}; + +var _environ_sizes_get = (penviron_count, penviron_buf_size) => { + var strings = getEnvStrings(); + HEAPU32[((penviron_count) >> 2)] = strings.length; + var bufSize = 0; + strings.forEach(string => bufSize += string.length + 1); + HEAPU32[((penviron_buf_size) >> 2)] = bufSize; + return 0; +}; + +var runtimeKeepaliveCounter = 0; + +var keepRuntimeAlive = () => noExitRuntime || runtimeKeepaliveCounter > 0; + +var _proc_exit = code => { + EXITSTATUS = code; + if (!keepRuntimeAlive()) { + Module["onExit"]?.(code); + ABORT = true; + } + quit_(code, new ExitStatus(code)); +}; + +/** @suppress {duplicate } */ /** @param {boolean|number=} implicit */ var exitJS = (status, implicit) => { + EXITSTATUS = status; + _proc_exit(status); +}; + +var _exit = exitJS; + +function _fd_close(fd) { + try { + var stream = SYSCALLS.getStreamFromFD(fd); + FS.close(stream); + return 0; + } catch (e) { + if (typeof FS == "undefined" || !(e.name === "ErrnoError")) throw e; + return e.errno; + } +} + +/** @param {number=} offset */ var doReadv = (stream, iov, iovcnt, offset) => { + var ret = 0; + for (var i = 0; i < iovcnt; i++) { + var ptr = HEAPU32[((iov) >> 2)]; + var len = HEAPU32[(((iov) + (4)) >> 2)]; + iov += 8; + var curr = FS.read(stream, HEAP8, ptr, len, offset); + if (curr < 0) return -1; + ret += curr; + if (curr < len) break; + // nothing more to read + if (typeof offset != "undefined") { + offset += curr; + } + } + return ret; +}; + +function _fd_read(fd, iov, iovcnt, pnum) { + try { + var stream = SYSCALLS.getStreamFromFD(fd); + var num = doReadv(stream, iov, iovcnt); + HEAPU32[((pnum) >> 2)] = num; + return 0; + } catch (e) { + if (typeof FS == "undefined" || !(e.name === "ErrnoError")) throw e; + return e.errno; + } +} + +var convertI32PairToI53Checked = (lo, hi) => ((hi + 2097152) >>> 0 < 4194305 - !!lo) ? (lo >>> 0) + hi * 4294967296 : NaN; + +function _fd_seek(fd, offset_low, offset_high, whence, newOffset) { + var offset = convertI32PairToI53Checked(offset_low, offset_high); + try { + if (isNaN(offset)) return 61; + var stream = SYSCALLS.getStreamFromFD(fd); + FS.llseek(stream, offset, whence); + (tempI64 = [ stream.position >>> 0, (tempDouble = stream.position, (+(Math.abs(tempDouble))) >= 1 ? (tempDouble > 0 ? (+(Math.floor((tempDouble) / 4294967296))) >>> 0 : (~~((+(Math.ceil((tempDouble - +(((~~(tempDouble))) >>> 0)) / 4294967296))))) >>> 0) : 0) ], + HEAP32[((newOffset) >> 2)] = tempI64[0], HEAP32[(((newOffset) + (4)) >> 2)] = tempI64[1]); + if (stream.getdents && offset === 0 && whence === 0) stream.getdents = null; + // reset readdir state + return 0; + } catch (e) { + if (typeof FS == "undefined" || !(e.name === "ErrnoError")) throw e; + return e.errno; + } +} + +/** @param {number=} offset */ var doWritev = (stream, iov, iovcnt, offset) => { + var ret = 0; + for (var i = 0; i < iovcnt; i++) { + var ptr = HEAPU32[((iov) >> 2)]; + var len = HEAPU32[(((iov) + (4)) >> 2)]; + iov += 8; + var curr = FS.write(stream, HEAP8, ptr, len, offset); + if (curr < 0) return -1; + ret += curr; + if (curr < len) { + // No more space to write. + break; + } + if (typeof offset != "undefined") { + offset += curr; + } + } + return ret; +}; + +function _fd_write(fd, iov, iovcnt, pnum) { + try { + var stream = SYSCALLS.getStreamFromFD(fd); + var num = doWritev(stream, iov, iovcnt); + HEAPU32[((pnum) >> 2)] = num; + return 0; + } catch (e) { + if (typeof FS == "undefined" || !(e.name === "ErrnoError")) throw e; + return e.errno; + } +} + +FS.createPreloadedFile = FS_createPreloadedFile; + +FS.staticInit(); + +// This error may happen quite a bit. To avoid overhead we reuse it (and +// suffer a lack of stack info). +MEMFS.doesNotExistError = new FS.ErrnoError(44); + +/** @suppress {checkTypes} */ MEMFS.doesNotExistError.stack = ""; + +var wasmImports = { + /** @export */ __syscall_fcntl64: ___syscall_fcntl64, + /** @export */ __syscall_ioctl: ___syscall_ioctl, + /** @export */ __syscall_openat: ___syscall_openat, + /** @export */ __syscall_unlinkat: ___syscall_unlinkat, + /** @export */ _abort_js: __abort_js, + /** @export */ _emscripten_memcpy_js: __emscripten_memcpy_js, + /** @export */ emscripten_date_now: _emscripten_date_now, + /** @export */ emscripten_resize_heap: _emscripten_resize_heap, + /** @export */ environ_get: _environ_get, + /** @export */ environ_sizes_get: _environ_sizes_get, + /** @export */ exit: _exit, + /** @export */ fd_close: _fd_close, + /** @export */ fd_read: _fd_read, + /** @export */ fd_seek: _fd_seek, + /** @export */ fd_write: _fd_write +}; + +var wasmExports = createWasm(); + +var ___wasm_call_ctors = () => (___wasm_call_ctors = wasmExports["__wasm_call_ctors"])(); + +var _runIteration = Module["_runIteration"] = a0 => (_runIteration = Module["_runIteration"] = wasmExports["runIteration"])(a0); + +var __emscripten_stack_restore = a0 => (__emscripten_stack_restore = wasmExports["_emscripten_stack_restore"])(a0); + +var __emscripten_stack_alloc = a0 => (__emscripten_stack_alloc = wasmExports["_emscripten_stack_alloc"])(a0); + +var _emscripten_stack_get_current = () => (_emscripten_stack_get_current = wasmExports["emscripten_stack_get_current"])(); + +var dynCall_jiji = Module["dynCall_jiji"] = (a0, a1, a2, a3, a4) => (dynCall_jiji = Module["dynCall_jiji"] = wasmExports["dynCall_jiji"])(a0, a1, a2, a3, a4); + +// include: postamble.js +// === Auto-generated postamble setup entry stuff === +var calledRun; + +dependenciesFulfilled = function runCaller() { + // If run has never been called, and we should call run (INVOKE_RUN is true, and Module.noInitialRun is not false) + if (!calledRun) run(); + if (!calledRun) dependenciesFulfilled = runCaller; +}; + +// try this again later, after new deps are fulfilled +function run() { + if (runDependencies > 0) { + return; + } + preRun(); + // a preRun added a dependency, run will be called later + if (runDependencies > 0) { + return; + } + function doRun() { + // run may have just been called through dependencies being fulfilled just in this very frame, + // or while the async setStatus time below was happening + if (calledRun) return; + calledRun = true; + Module["calledRun"] = true; + if (ABORT) return; + initRuntime(); + readyPromiseResolve(Module); + Module["onRuntimeInitialized"]?.(); + postRun(); + } + if (Module["setStatus"]) { + Module["setStatus"]("Running..."); + setTimeout(() => { + setTimeout(() => Module["setStatus"](""), 1); + doRun(); + }, 1); + } else { + doRun(); + } +} + +if (Module["preInit"]) { + if (typeof Module["preInit"] == "function") Module["preInit"] = [ Module["preInit"] ]; + while (Module["preInit"].length > 0) { + Module["preInit"].pop()(); + } +} + +run(); + +// end include: postamble.js +// include: postamble_modularize.js +// In MODULARIZE mode we wrap the generated code in a factory function +// and return either the Module itself, or a promise of the module. +// We assign to the `moduleRtn` global here and configure closure to see +// this as and extern so it won't get minified. +moduleRtn = readyPromise; + + + return moduleRtn; +} +); +})(); +if (typeof exports === 'object' && typeof module === 'object') + module.exports = setupModule; +else if (typeof define === 'function' && define['amd']) + define([], () => setupModule); diff --git a/wasm/TSF/build/tsf.js.symbols b/wasm/TSF/build/tsf.js.symbols new file mode 100644 index 0000000..e161e3c --- /dev/null +++ b/wasm/TSF/build/tsf.js.symbols @@ -0,0 +1,259 @@ +0:exit +1:__syscall_fcntl64 +2:__syscall_ioctl +3:__wasi_fd_close +4:__wasi_fd_read +5:__wasi_fd_write +6:_abort_js +7:_emscripten_memcpy_js +8:emscripten_date_now +9:__syscall_openat +10:__wasi_environ_sizes_get +11:__wasi_environ_get +12:__syscall_unlinkat +13:emscripten_resize_heap +14:legalimport$__wasi_fd_seek +15:__wasm_call_ctors +16:tsf_vasprintf +17:tsf_asprintf +18:tsf_buffer_initialize_custom +19:tsf_buffer_destroy_custom +20:read_buffer_impl +21:write_buffer_impl +22:tsf_set_error_fullv +23:tsf_set_error_full +24:tsf_set_error +25:tsf_set_errno +26:new_size +27:tsf_st_init_ptrtable +28:tsf_st_init_strtable +29:tsf_st_free_table +30:tsf_st_lookup +31:tsf_st_insert +32:tsf_st_add_direct +33:tsf_st_delete +34:tsf_st_foreach +35:tsf_st_strhash +36:ptrcmp +37:tsf_st_ptrhash +38:tsf_type_create +39:check_init_types +40:tsf_type_recompute_hash +41:tsf_type_destroy +42:tsf_struct_type_append +43:tsf_type_clone +44:tsf_choice_type_append +45:tsf_type_read_rec +46:tsf_choice_type_get_num_elements +47:tsf_type_write +48:tsf_type_dup +49:tsf_type_own_begin +50:tsf_type_own_commit +51:tsf_type_get_byte_size +52:tsf_type_get_hash +53:tsf_type_compare +54:tsf_type_memo +55:tsf_type_instanceof +56:tsf_struct_type_get_num_elements +57:tsf_struct_type_get_element +58:tsf_struct_type_find_node +59:tsf_choice_type_get_element +60:tsf_choice_type_find_node +61:tsf_type_get_static_size +62:tsf_choice_type_has_non_void +63:tsf_fd_writer +64:tsf_fd_partial_reader +65:tsf_size_calc_writer +66:tsf_size_aware_reader +67:tsf_native_type_has_struct_mapping +68:tsf_native_type_get_size +69:tsf_native_struct_type_map +70:tsf_native_struct_type_set_size +71:tsf_type_compare_adaptor +72:tsf_parser_create +73:tsf_parser_parse_into +74:tsf_buf_writer_destroy +75:tsf_buf_writer_write +76:tsf_buf_reader_read +77:tsf_type_table_create +78:tsf_type_table_copy +79:tsf_type_table_append +80:tsf_type_table_find_by_index +81:tsf_type_table_destroy +82:tsf_type_table_read +83:tsf_type_table_write +84:tsf_type_table_get_hash +85:tsf_type_table_contains_dynamic +86:tsf_type_table_compare +87:tsf_type_table_find_by_name +88:generate_size_calc +89:generate_generator +90:generate_parser +91:generate_set_default +92:gpc_code_gen_debug +93:gpc_threaded_destroy +94:gpc_threaded_run +95:back_branch_action +96:fore_branch_action +97:st_aa_free +98:correct_branch +99:gpc_intable_destroy +100:gpc_intable_run +101:gpc_instruction_static_size +102:gpc_instruction_size +103:gpc_instruction_for_all_branches +104:gpc_program_from_proto +105:gpc_program_run +106:gpc_proto_create +107:gpc_proto_destroy +108:gpc_proto_append +109:gpc_proto_append_tablejump_local +110:gpc_proto_append_tablejump_field +111:gpc_intable_get_stack_heights +112:branch_action +113:tsf_type_in_map_create +114:tsf_type_in_map_from_type_out_map_in +115:table_snatch_pair +116:tsf_type_in_map_get_empty_singleton +117:tsf_type_in_map_destroy +118:tsf_type_in_map_prepare +119:tsf_type_in_map_append +120:tsf_type_in_map_get_type +121:tsf_type_out_map_create +122:tsf_type_out_map_destroy +123:table_destroy_pair +124:tsf_type_out_map_get_type_code +125:tsf_stream_file_input_open +126:tsf_stream_file_input_close +127:tsf_stream_file_input_read_existing_buffer +128:tsf_named_type_destroy +129:tsf_named_type_get_hash +130:tsf_read_string +131:tsf_write_string +132:tsf_full_read_of_partial +133:tsf_zip_wtr_attr_get_default +134:tsf_zip_rdr_attr_get_default +135:tsf_zip_writer_destroy +136:tsf_zip_writer_write +137:tsf_zip_abstract_deflate +138:tsf_adaptive_reader_destroy +139:tear_down_mode +140:tsf_adaptive_reader_read +141:select_mode +142:tsf_type_create_aoe +143:aoe_failure +144:tsf_type_create_array_aoe +145:tsf_type_create_struct_with_native_size_aoe +146:tsf_type_create_struct_with_native_size_and_destructor_aoe +147:tsf_type_create_choice_with_native_map_aoe +148:tsf_struct_type_append_primitive_with_native_map_aoe +149:tsf_struct_type_append_from_callback_and_dup_with_native_map_aoe +150:tsf_choice_type_append_primitive_aoe +151:tsf_choice_type_append_from_callback_and_dup_aoe +152:tsf_type_set_name_aoe +153:tsf_typed_data_read +154:Operand__get_type +155:Type__get_type +156:CodeOffset__get_type +157:VariableDecl__destruct +158:Instruction__alloc__destruct +159:Instruction__call__destruct +160:ProcedureDecl__get_type +161:ProcedureDefn__get_type +162:ProcedureDefn__destruct +163:Program__get_type +164:Program__get_parser +165:Program__destruct +166:Program__read +167:Program__read_into +168:Instruction__mov__get_type +169:Instruction__add__get_type +170:Instruction__alloc__get_type +171:Instruction__ret__get_type +172:Instruction__jump__get_type +173:Instruction__call__get_type +174:Instruction__call__args__get_type +175:Instruction__branchZero__get_type +176:ProcedureDefn__variables__get_type +177:ProcedureDefn__code__get_type +178:ProcedureDefn__debug__get_type +179:Program__globals__get_type +180:DOperand__get_type +181:DType__get_type +182:DCodeOffset__get_type +183:DInstruction__call__destruct +184:DProcedureDecl__get_type +185:DProcedureDefn__get_type +186:DProcedureDefn__destruct +187:DProgram__destruct +188:DProgram__read +189:DInstruction__mov__get_type +190:DInstruction__add__get_type +191:DInstruction__alloc__get_type +192:DInstruction__ret__get_type +193:DInstruction__jump__get_type +194:DInstruction__call__get_type +195:DInstruction__call__args__get_type +196:DInstruction__branchZero__get_type +197:DProcedureDefn__variables__get_type +198:DProcedureDefn__code__get_type +199:DProgram__globals__get_type +200:runIteration +201:__stdio_close +202:__stdio_read +203:__stdio_seek +204:__stdio_write +205:abort +206:close +207:__memcpy +208:__memset +209:__gettimeofday +210:fflush +211:fiprintf +212:__towrite +213:__overflow +214:fputc +215:__fwritex +216:getenv +217:__bswap_32 +218:__lseek +219:open +220:__small_printf +221:read +222:snprintf +223:__emscripten_stdout_close +224:__emscripten_stdout_seek +225:__stpcpy +226:strchr +227:__strchrnul +228:strcmp +229:strdup +230:__strerror_l +231:strlen +232:__syscall_ret +233:tolower +234:vasprintf +235:frexp +236:__vfprintf_internal +237:printf_core +238:out +239:getint +240:pop_arg +241:fmt_u +242:pad +243:fmt_fp +244:pop_arg_long_double +245:vsnprintf +246:sn_write +247:__wasi_syscall_ret +248:wctomb +249:emscripten_builtin_malloc +250:emscripten_builtin_free +251:dlrealloc +252:dispose_chunk +253:emscripten_builtin_calloc +254:sbrk +255:_emscripten_stack_restore +256:_emscripten_stack_alloc +257:emscripten_stack_get_current +258:legalstub$dynCall_jiji diff --git a/wasm/TSF/build/tsf.wasm b/wasm/TSF/build/tsf.wasm new file mode 100755 index 0000000..c56dbcc Binary files /dev/null and b/wasm/TSF/build/tsf.wasm differ diff --git a/wasm/TSF/tsf_ir_speed.c b/wasm/TSF/tsf_ir_speed.c index dd75c43..05eb656 100644 --- a/wasm/TSF/tsf_ir_speed.c +++ b/wasm/TSF/tsf_ir_speed.c @@ -321,47 +321,19 @@ static void readConvertTest(const char *filename, unsigned numPrograms) { tsf_stream_file_input_close(in); } -int main(int c, char **v) { +int runIteration(unsigned count) { static const char *filename = "tsf_ir_speed_test_file.tsf"; static const char *zipFilename = "tsf_ir_speed_test_zip_file.tsf"; - - unsigned count; - - switch (c) { - case 1: - /* Use a small problem size suitable for regression testing. */ - count = 1000; - break; - - case 2: - if (sscanf(v[1], "%u", &count) != 1) { - usage(); - } - break; - - default: - usage(); - return 1; - } - - printf("Writing %u programs.\n", count); - if (count != 10000) - printf("WARNING: If you are benchmarking, please use count = 10000.\n"); - + TIMEIT(writeTest(filename, 100, count, TSF_ZIP_NONE)); TIMEIT(readTest(filename, count)); TIMEIT(readMallocTest(filename, count)); TIMEIT(readConvertTest(filename, count)); - - if (tsf_zlib_supported()) { - TIMEIT(writeTest(zipFilename, 100, count, TSF_ZIP_ZLIB)); - TIMEIT(readTest(zipFilename, count)); - TIMEIT(readMallocTest(filename, count)); - TIMEIT(readConvertTest(zipFilename, count)); - } - + + /* We don't benchmark zlib because it's not supported in JetStream */ + /* We don't benchmark bzip2 because it's just too slow to be interesting. */ - + return 0; }