dcp-client 4.3.0-0 → 4.3.0-2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -3967,7 +3967,7 @@ eval("// Copyright Joyent, Inc. and other Node contributors.\n//\n// Permission
3967
3967
  \**********************/
3968
3968
  /***/ ((__unused_webpack_module, exports, __webpack_require__) => {
3969
3969
 
3970
- eval("/**\n * @file build.js\n * Provide build information for DCP rtlink code, the same way as the dcp/build\n * module is injected for dcp-client code. That is what src/common/dcp-build.js\n * was supposed to do. We collide symbols here with a dcp-client built-in, so\n * that the code works the same everywhere but a gives different results depending\n * on how it was linked.\n *\n * This file is expressly processed by the copy-stamp rule in the core install\n * map, so that the symbols in here get substituted with their 'real' values as\n * the product is installed. A side effect of this is that dev versions running\n * out of the source tree will have symbol names instead of symbol values here.\n *\n * @author Wes Garland, wes@distributive.network\n * @date Nov 2022\n */\n\nexports.version = 'd3f7cde3c2ffc818fa9281da428a4a1f4aa70d89';\nexports.branch = 'develop';\n\n/* When/how configure.sh ran */\nexports.config = __webpack_require__(/*! ../etc/local-config.json */ \"./etc/local-config.json\");\ndelete exports.config.install;\n\n/* When/how install.sh ran (not for dcp-client) */\nexports.install = {\n timestamp: new Date(Number('__TIMESTAMP__') * 1000),\n build: '__DCP_BUILD',\n};\n\n\n\n\n//# sourceURL=webpack://dcp/./src/build.js?");
3970
+ eval("/**\n * @file build.js\n * Provide build information for DCP rtlink code, the same way as the dcp/build\n * module is injected for dcp-client code. That is what src/common/dcp-build.js\n * was supposed to do. We collide symbols here with a dcp-client built-in, so\n * that the code works the same everywhere but a gives different results depending\n * on how it was linked.\n *\n * This file is expressly processed by the copy-stamp rule in the core install\n * map, so that the symbols in here get substituted with their 'real' values as\n * the product is installed. A side effect of this is that dev versions running\n * out of the source tree will have symbol names instead of symbol values here.\n *\n * @author Wes Garland, wes@distributive.network\n * @date Nov 2022\n */\n\nexports.version = '4214ca584c5104c0a7d8f3ebe5ca39f3f72ab3d1';\nexports.branch = 'develop';\n\n/* When/how configure.sh ran */\nexports.config = __webpack_require__(/*! ../etc/local-config.json */ \"./etc/local-config.json\");\ndelete exports.config.install;\n\n/* When/how install.sh ran (not for dcp-client) */\nexports.install = {\n timestamp: new Date(Number('__TIMESTAMP__') * 1000),\n build: '__DCP_BUILD',\n};\n\n\n\n\n//# sourceURL=webpack://dcp/./src/build.js?");
3971
3971
 
3972
3972
  /***/ }),
3973
3973
 
@@ -4019,7 +4019,7 @@ eval("/**\n * @file dcp-assert.js\n * Simple assertion modul
4019
4019
  \*********************************/
4020
4020
  /***/ ((__unused_webpack_module, exports, __webpack_require__) => {
4021
4021
 
4022
- eval("/* provided dependency */ var process = __webpack_require__(/*! ./node_modules/process/browser.js */ \"./node_modules/process/browser.js\");\n/**\n * @file dcp-build.js Return an object describing the current DCP build.\n * @author Ryan Rossiter <ryan@kingsds.network>\n * @author Wes Garland, wes@distributive.network\n * @date July 2020\n * @date Apr 2023\n */\n\nif ((__webpack_require__(/*! dcp/common/dcp-env */ \"./src/common/dcp-env.js\").platform) === 'nodejs')\n{\n const { requireNative } = __webpack_require__(/*! dcp/dcp-client/webpack-native-bridge */ \"./src/dcp-client/webpack-native-bridge.js\");\n const fs = requireNative('fs');\n const path = requireNative('path');\n\n try\n {\n const rtlink = requireNative('dcp-rtlink');\n const localConfig = false\n || process.env.DCP_LOCAL_CONFIG_JSON /* dcp-client bundler's temp dir */\n || path.join(rtlink.runningLocation, 'etc', 'local-config.json'); /* installed or src dir config */\n Object.assign(exports, JSON.parse(fs.readFileSync(localConfig), 'utf-8'));\n }\n catch(error)\n {\n if (error.code !== 'MODULE_NOT_FOUND')\n throw error;\n /* If we arrive here, we couldn't resolve dcp-rtlink => we're on dcp-client */\n Object.assign(exports, {\"version\":\"d3f7cde3c2ffc818fa9281da428a4a1f4aa70d89\",\"branch\":\"develop\",\"dcpClient\":{\"version\":\"4.2.32\",\"resolved\":\"git+ssh://git@gitlab.com/Distributed-Compute-Protocol/dcp-client.git#b4cbcb43532d7f56418cd05de1d1f299786d3f0f\",\"overridden\":false},\"built\":\"Thu Aug 24 2023 10:11:42 GMT-0400 (Eastern Daylight Saving Time)\",\"config\":{\"generated\":\"Thu 24 Aug 2023 10:11:42 AM EDT by erose on lorge\",\"build\":\"debug\"},\"webpack\":\"5.88.2\",\"node\":\"v16.20.2\"}.config);\n }\n}\nelse\n{\n /* For all non-node platforms, just assume that we're also on dcp-client */\n Object.assign(exports, {\"version\":\"d3f7cde3c2ffc818fa9281da428a4a1f4aa70d89\",\"branch\":\"develop\",\"dcpClient\":{\"version\":\"4.2.32\",\"resolved\":\"git+ssh://git@gitlab.com/Distributed-Compute-Protocol/dcp-client.git#b4cbcb43532d7f56418cd05de1d1f299786d3f0f\",\"overridden\":false},\"built\":\"Thu Aug 24 2023 10:11:42 GMT-0400 (Eastern Daylight Saving Time)\",\"config\":{\"generated\":\"Thu 24 Aug 2023 10:11:42 AM EDT by erose on lorge\",\"build\":\"debug\"},\"webpack\":\"5.88.2\",\"node\":\"v16.20.2\"}.config);\n}\n\nif (typeof exports.build === 'undefined') {\n throw new Error('Could not determine build type!');\n}\n\n\n//# sourceURL=webpack://dcp/./src/common/dcp-build.js?");
4022
+ eval("/* provided dependency */ var process = __webpack_require__(/*! ./node_modules/process/browser.js */ \"./node_modules/process/browser.js\");\n/**\n * @file dcp-build.js Return an object describing the current DCP build.\n * @author Ryan Rossiter <ryan@kingsds.network>\n * @author Wes Garland, wes@distributive.network\n * @date July 2020\n * @date Apr 2023\n */\n\nif ((__webpack_require__(/*! dcp/common/dcp-env */ \"./src/common/dcp-env.js\").platform) === 'nodejs')\n{\n const { requireNative } = __webpack_require__(/*! dcp/dcp-client/webpack-native-bridge */ \"./src/dcp-client/webpack-native-bridge.js\");\n const fs = requireNative('fs');\n const path = requireNative('path');\n\n try\n {\n const rtlink = requireNative('dcp-rtlink');\n const localConfig = false\n || process.env.DCP_LOCAL_CONFIG_JSON /* dcp-client bundler's temp dir */\n || path.join(rtlink.runningLocation, 'etc', 'local-config.json'); /* installed or src dir config */\n Object.assign(exports, JSON.parse(fs.readFileSync(localConfig), 'utf-8'));\n }\n catch(error)\n {\n if (error.code !== 'MODULE_NOT_FOUND')\n throw error;\n /* If we arrive here, we couldn't resolve dcp-rtlink => we're on dcp-client */\n Object.assign(exports, {\"version\":\"4214ca584c5104c0a7d8f3ebe5ca39f3f72ab3d1\",\"branch\":\"develop\",\"dcpClient\":{\"version\":\"4.3.0-1\",\"resolved\":\"git+ssh://git@gitlab.com/Distributed-Compute-Protocol/dcp-client.git#69bc8333e2d23d82ac1d16ec00cfb97c7831a204\",\"overridden\":false},\"built\":\"Thu Aug 31 2023 14:42:36 GMT-0400 (Eastern Daylight Saving Time)\",\"config\":{\"generated\":\"Thu 31 Aug 2023 02:42:36 PM EDT by erose on lorge\",\"build\":\"debug\"},\"webpack\":\"5.88.2\",\"node\":\"v16.20.2\"}.config);\n }\n}\nelse\n{\n /* For all non-node platforms, just assume that we're also on dcp-client */\n Object.assign(exports, {\"version\":\"4214ca584c5104c0a7d8f3ebe5ca39f3f72ab3d1\",\"branch\":\"develop\",\"dcpClient\":{\"version\":\"4.3.0-1\",\"resolved\":\"git+ssh://git@gitlab.com/Distributed-Compute-Protocol/dcp-client.git#69bc8333e2d23d82ac1d16ec00cfb97c7831a204\",\"overridden\":false},\"built\":\"Thu Aug 31 2023 14:42:36 GMT-0400 (Eastern Daylight Saving Time)\",\"config\":{\"generated\":\"Thu 31 Aug 2023 02:42:36 PM EDT by erose on lorge\",\"build\":\"debug\"},\"webpack\":\"5.88.2\",\"node\":\"v16.20.2\"}.config);\n}\n\nif (typeof exports.build === 'undefined') {\n throw new Error('Could not determine build type!');\n}\n\n\n//# sourceURL=webpack://dcp/./src/common/dcp-build.js?");
4023
4023
 
4024
4024
  /***/ }),
4025
4025
 
@@ -4293,7 +4293,7 @@ eval("/**\n * @file password.js\n * Modal providing a way to
4293
4293
  \**********************************************/
4294
4294
  /***/ ((__unused_webpack_module, exports, __webpack_require__) => {
4295
4295
 
4296
- eval("/**\n * @file client-modal/utils.js\n * @author KC Erb\n * @date Mar 2020\n * \n * All shared functions among the modals.\n */\nconst { fetchRelative } = __webpack_require__(/*! ./fetch-relative */ \"./src/dcp-client/client-modal/fetch-relative.js\");\nconst { DCPError } = __webpack_require__(/*! dcp/common/dcp-error */ \"./src/common/dcp-error.js\");\nconst DCP_ENV = __webpack_require__(/*! dcp/common/dcp-env */ \"./src/common/dcp-env.js\");\nexports.OnCloseErrorCode = 'DCP_CM:CANCELX';\n\nif (DCP_ENV.isBrowserPlatform) {\n // Provide as export for the convenience of `utils.MicroModal` instead of a separate require.\n exports.MicroModal = __webpack_require__(/*! micromodal */ \"./node_modules/micromodal/dist/micromodal.es.js\")[\"default\"];\n}\n\n/**\n * Return a unique string, formatted as a GET parameter, that changes often enough to\n * always force the browser to fetch the latest version of our resource.\n *\n * @note Currently always returns the Date-based poison due to webpack. \n */\nfunction cachePoison() {\n if (true)\n return '?ucp=d3f7cde3c2ffc818fa9281da428a4a1f4aa70d89'; /* installer token */\n return '?ucp=' + Date.now();\n}\n \n/* Detect load type - on webpack, load dynamic content relative to webpack bundle;\n * otherwise load relative to the current scheduler's configured portal.\n */\nexports.myScript = (typeof document !== 'undefined') && document.currentScript;\nexports.corsProxyHref = undefined;\nif (exports.myScript && exports.myScript === (__webpack_require__(/*! ./fetch-relative */ \"./src/dcp-client/client-modal/fetch-relative.js\").myScript)) {\n let url = new ((__webpack_require__(/*! dcp/common/dcp-url */ \"./src/common/dcp-url.js\").DcpURL))(exports.myScript.src);\n exports.corsProxyHref = url.resolve('../cors-proxy.html');\n}\n\n/**\n * Look for modal id and required ids on page based on config, if not found, provide from dcp-client.\n * The first id in the required array must be the id of the modal's form element.\n * @param {Object} modalConfig Modal configuration object\n * @param {string} modalConfig.id Id of parent modal element\n * @param {string[]} modalConfig.required Array of required ids in parent modal element\n * @param {string[]} [modalConfig.optional] Array of optional ids in parent modal element\n * @param {string} modalConfig.path Relative path to modal html in dcp-client\n * @returns {DOMElement[]} Array of modal elements on page [config.id, ...config.required]\n */\nexports.initModal = async function (modalConfig, onClose) {\n exports.corsProxyHref = exports.corsProxyHref || dcpConfig.portal.location.resolve('dcp-client/cors-proxy.html');\n\n // Call ensure modal on any eager-loaded modals.\n if (modalConfig.eagerLoad) {\n Promise.all(\n modalConfig.eagerLoad.map(config => ensureModal(config))\n )\n };\n\n const [elements, optionalElements] = await ensureModal(modalConfig);\n\n // Wire up form to prevent default, resolve on submission, reject+reset when closed (or call onClose when closed)\n const [modal, form] = elements;\n form.reset(); // ensure that form is fresh\n let formResolve, formReject;\n let formPromise = new Promise( function(res, rej) {\n formResolve = res;\n formReject = rej;\n });\n form.onsubmit = function (submitEvent) {\n submitEvent.preventDefault();\n modal.setAttribute(\"data-state\", \"submitted\");\n formResolve(submitEvent);\n }\n\n exports.MicroModal.show(modalConfig.id, { \n disableFocus: true, \n onClose: onClose || getDefaultOnClose(formReject)\n });\n return [elements, formPromise, optionalElements];\n};\n\n// Ensure all required modal elements are on page according to modalConfig\nasync function ensureModal(modalConfig) {\n let allRequiredIds = [modalConfig.id, ...modalConfig.required];\n let missing = allRequiredIds.filter( id => !document.getElementById(id) );\n if (missing.length > 0) {\n if (missing.length !== allRequiredIds.length)\n console.warn(`Some of the ids needed to replace the default DCP-modal were found, but not all. So the default DCP-Modal will be used. Missing ids are: [${missing}].`);\n let contents = await fetchRelative(exports.corsProxyHref, modalConfig.path + cachePoison());\n const container = document.createElement('div');\n container.innerHTML = contents;\n document.body.appendChild(container);\n }\n\n const elements = allRequiredIds.map(id => document.getElementById(id));\n const optionalElements = (modalConfig.optional || []).map(id => document.getElementById(id));\n return [elements, optionalElements];\n};\n\n// This onClose is called by MicroModal and thus has the modal passed to it.\nfunction getDefaultOnClose (formReject) {\n return (modal) => {\n modal.offsetLeft; // forces style recalc\n const origState = modal.dataset.state;\n // reset form including data-state\n modal.setAttribute(\"data-state\", \"new\");\n // reject if closed without submitting form.\n if (origState !== \"submitted\") {\n const err = new DCPError(\"Modal was closed but modal's form was not submitted.\", exports.OnCloseErrorCode);\n formReject(err);\n }\n }\n}\n\n\n//# sourceURL=webpack://dcp/./src/dcp-client/client-modal/utils.js?");
4296
+ eval("/**\n * @file client-modal/utils.js\n * @author KC Erb\n * @date Mar 2020\n * \n * All shared functions among the modals.\n */\nconst { fetchRelative } = __webpack_require__(/*! ./fetch-relative */ \"./src/dcp-client/client-modal/fetch-relative.js\");\nconst { DCPError } = __webpack_require__(/*! dcp/common/dcp-error */ \"./src/common/dcp-error.js\");\nconst DCP_ENV = __webpack_require__(/*! dcp/common/dcp-env */ \"./src/common/dcp-env.js\");\nexports.OnCloseErrorCode = 'DCP_CM:CANCELX';\n\nif (DCP_ENV.isBrowserPlatform) {\n // Provide as export for the convenience of `utils.MicroModal` instead of a separate require.\n exports.MicroModal = __webpack_require__(/*! micromodal */ \"./node_modules/micromodal/dist/micromodal.es.js\")[\"default\"];\n}\n\n/**\n * Return a unique string, formatted as a GET parameter, that changes often enough to\n * always force the browser to fetch the latest version of our resource.\n *\n * @note Currently always returns the Date-based poison due to webpack. \n */\nfunction cachePoison() {\n if (true)\n return '?ucp=4214ca584c5104c0a7d8f3ebe5ca39f3f72ab3d1'; /* installer token */\n return '?ucp=' + Date.now();\n}\n \n/* Detect load type - on webpack, load dynamic content relative to webpack bundle;\n * otherwise load relative to the current scheduler's configured portal.\n */\nexports.myScript = (typeof document !== 'undefined') && document.currentScript;\nexports.corsProxyHref = undefined;\nif (exports.myScript && exports.myScript === (__webpack_require__(/*! ./fetch-relative */ \"./src/dcp-client/client-modal/fetch-relative.js\").myScript)) {\n let url = new ((__webpack_require__(/*! dcp/common/dcp-url */ \"./src/common/dcp-url.js\").DcpURL))(exports.myScript.src);\n exports.corsProxyHref = url.resolve('../cors-proxy.html');\n}\n\n/**\n * Look for modal id and required ids on page based on config, if not found, provide from dcp-client.\n * The first id in the required array must be the id of the modal's form element.\n * @param {Object} modalConfig Modal configuration object\n * @param {string} modalConfig.id Id of parent modal element\n * @param {string[]} modalConfig.required Array of required ids in parent modal element\n * @param {string[]} [modalConfig.optional] Array of optional ids in parent modal element\n * @param {string} modalConfig.path Relative path to modal html in dcp-client\n * @returns {DOMElement[]} Array of modal elements on page [config.id, ...config.required]\n */\nexports.initModal = async function (modalConfig, onClose) {\n exports.corsProxyHref = exports.corsProxyHref || dcpConfig.portal.location.resolve('dcp-client/cors-proxy.html');\n\n // Call ensure modal on any eager-loaded modals.\n if (modalConfig.eagerLoad) {\n Promise.all(\n modalConfig.eagerLoad.map(config => ensureModal(config))\n )\n };\n\n const [elements, optionalElements] = await ensureModal(modalConfig);\n\n // Wire up form to prevent default, resolve on submission, reject+reset when closed (or call onClose when closed)\n const [modal, form] = elements;\n form.reset(); // ensure that form is fresh\n let formResolve, formReject;\n let formPromise = new Promise( function(res, rej) {\n formResolve = res;\n formReject = rej;\n });\n form.onsubmit = function (submitEvent) {\n submitEvent.preventDefault();\n modal.setAttribute(\"data-state\", \"submitted\");\n formResolve(submitEvent);\n }\n\n exports.MicroModal.show(modalConfig.id, { \n disableFocus: true, \n onClose: onClose || getDefaultOnClose(formReject)\n });\n return [elements, formPromise, optionalElements];\n};\n\n// Ensure all required modal elements are on page according to modalConfig\nasync function ensureModal(modalConfig) {\n let allRequiredIds = [modalConfig.id, ...modalConfig.required];\n let missing = allRequiredIds.filter( id => !document.getElementById(id) );\n if (missing.length > 0) {\n if (missing.length !== allRequiredIds.length)\n console.warn(`Some of the ids needed to replace the default DCP-modal were found, but not all. So the default DCP-Modal will be used. Missing ids are: [${missing}].`);\n let contents = await fetchRelative(exports.corsProxyHref, modalConfig.path + cachePoison());\n const container = document.createElement('div');\n container.innerHTML = contents;\n document.body.appendChild(container);\n }\n\n const elements = allRequiredIds.map(id => document.getElementById(id));\n const optionalElements = (modalConfig.optional || []).map(id => document.getElementById(id));\n return [elements, optionalElements];\n};\n\n// This onClose is called by MicroModal and thus has the modal passed to it.\nfunction getDefaultOnClose (formReject) {\n return (modal) => {\n modal.offsetLeft; // forces style recalc\n const origState = modal.dataset.state;\n // reset form including data-state\n modal.setAttribute(\"data-state\", \"new\");\n // reject if closed without submitting form.\n if (origState !== \"submitted\") {\n const err = new DCPError(\"Modal was closed but modal's form was not submitted.\", exports.OnCloseErrorCode);\n formReject(err);\n }\n }\n}\n\n\n//# sourceURL=webpack://dcp/./src/dcp-client/client-modal/utils.js?");
4297
4297
 
4298
4298
  /***/ }),
4299
4299
 
@@ -4325,7 +4325,7 @@ eval("/**\n * @file Module that implements Compute API\n * @module dcp/comput
4325
4325
  /***/ ((module, exports, __webpack_require__) => {
4326
4326
 
4327
4327
  "use strict";
4328
- eval("/* module decorator */ module = __webpack_require__.nmd(module);\n/* provided dependency */ var process = __webpack_require__(/*! ./node_modules/process/browser.js */ \"./node_modules/process/browser.js\");\n/**\n * @file dcp-client-bundle-src.js\n * Top-level file which gets webpacked into the bundle consumed by dcp-client 2.5\n * @author Wes Garland, wes@kingsds.network\n * @date July 2019\n */\n\n{\n let thisScript = typeof document !== 'undefined' ? (typeof document.currentScript !== 'undefined' && document.currentScript) || document.getElementById('_dcp_client_bundle') : {}\n let realModuleDeclare\n\n if ( false || typeof module.declare === 'undefined') {\n realModuleDeclare = ( true) ? module.declare : 0\n if (false) {}\n module.declare = function moduleUnWrapper (deps, factory) {\n factory(null, module.exports, module)\n return module.exports\n }\n }\n\n let _debugging = () => false\n if (process.env.DCP_CONFIG_USE_DEPRECATED_FUTURE)\n dcpConfig.future = (__webpack_require__(/*! ../common/config-future.js */ \"./src/common/config-future.js\").futureFactory)(_debugging, dcpConfig);\n\n /* These modules are official API and must be part of DCP Client */\n let officialApi = {\n 'protocol': __webpack_require__(/*! ../protocol-v4 */ \"./src/protocol-v4/index.js\"),\n 'compute': (__webpack_require__(/*! ./compute */ \"./src/dcp-client/compute.js\").compute),\n 'worker': __webpack_require__(/*! ./worker */ \"./src/dcp-client/worker/index.js\"),\n 'wallet': __webpack_require__(/*! ./wallet */ \"./src/dcp-client/wallet/index.js\"),\n };\n\n /* Some of these modules are API-track. Some of them need to be published to be\n * available for top-level resolution by DCP internals. Those (mostly) should have\n * been written using relative module paths.....\n */\n let modules = Object.assign({\n 'dcp-build': {\"version\":\"d3f7cde3c2ffc818fa9281da428a4a1f4aa70d89\",\"branch\":\"develop\",\"dcpClient\":{\"version\":\"4.2.32\",\"resolved\":\"git+ssh://git@gitlab.com/Distributed-Compute-Protocol/dcp-client.git#b4cbcb43532d7f56418cd05de1d1f299786d3f0f\",\"overridden\":false},\"built\":\"Thu Aug 24 2023 10:11:42 GMT-0400 (Eastern Daylight Saving Time)\",\"config\":{\"generated\":\"Thu 24 Aug 2023 10:11:42 AM EDT by erose on lorge\",\"build\":\"debug\"},\"webpack\":\"5.88.2\",\"node\":\"v16.20.2\"},\n 'dcp-xhr': __webpack_require__(/*! ../common/dcp-xhr */ \"./src/common/dcp-xhr.js\"),\n 'dcp-env': __webpack_require__(/*! ../common/dcp-env */ \"./src/common/dcp-env.js\"),\n 'dcp-url': __webpack_require__(/*! ../common/dcp-url */ \"./src/common/dcp-url.js\"),\n 'cli': __webpack_require__(/*! ../common/cli */ \"./src/common/cli.js\"),\n 'dcp-timers': __webpack_require__(/*! ../common/dcp-timers */ \"./src/common/dcp-timers.js\"),\n 'dcp-dot-dir': __webpack_require__(/*! ../common/dcp-dot-dir */ \"./src/common/dcp-dot-dir.js\"),\n 'dcp-assert': __webpack_require__(/*! ../common/dcp-assert */ \"./src/common/dcp-assert.js\"),\n 'dcp-events': __webpack_require__(/*! ../common/dcp-events */ \"./src/common/dcp-events/index.js\"),\n 'utils': __webpack_require__(/*! ../utils */ \"./src/utils/index.js\"),\n 'debugging': __webpack_require__(/*! ../debugging */ \"./src/debugging.js\"),\n 'publish': __webpack_require__(/*! ../common/dcp-publish */ \"./src/common/dcp-publish.js\"),\n 'compute-groups': {\n ...__webpack_require__(/*! ./compute-groups */ \"./src/dcp-client/compute-groups/index.js\"),\n publicGroupOpaqueId: (__webpack_require__(/*! ../common/scheduler-constants */ \"./src/common/scheduler-constants.js\").computeGroups[\"public\"].opaqueId),\n },\n 'bank-util': __webpack_require__(/*! ./bank-util */ \"./src/dcp-client/bank-util.js\"),\n 'protocol-v4': __webpack_require__(/*! ../protocol-v4 */ \"./src/protocol-v4/index.js\"), /* deprecated */\n 'client-modal': __webpack_require__(/*! ./client-modal */ \"./src/dcp-client/client-modal/index.js\"),\n 'eth': __webpack_require__(/*! ./wallet/eth */ \"./src/dcp-client/wallet/eth.js\"),\n 'serialize': __webpack_require__(/*! ../utils/serialize */ \"./src/utils/serialize.js\"),\n 'kvin': __webpack_require__(/*! kvin */ \"./node_modules/kvin/kvin.js\"),\n 'job': __webpack_require__(/*! ./job */ \"./src/dcp-client/job/index.js\"),\n 'range-object': __webpack_require__(/*! ./range-object */ \"./src/dcp-client/range-object.js\"),\n 'stats-ranges': __webpack_require__(/*! ./stats-ranges */ \"./src/dcp-client/stats-ranges.js\"),\n 'job-values': __webpack_require__(/*! ./job-values */ \"./src/dcp-client/job-values.js\"),\n 'signal-handler': __webpack_require__(/*! ../node-libs/signal-handler */ \"./src/node-libs/signal-handler.js\"),\n 'standard-objects': {}\n }, officialApi);\n\n /* Export the JS Standard Classes (etc) from the global object of the bundle evaluation context,\n * in case we have code somewhere that needs to use these for instanceof checks.\n */\n ;[ Object, Function, Boolean, Symbol,\n Error, EvalError, RangeError, ReferenceError, SyntaxError, TypeError, URIError,\n Number, Math, Date,\n String, RegExp,\n Array, Int8Array, Uint8Array, Uint8ClampedArray, Int16Array, Uint16Array, Int32Array, Uint32Array, Float32Array, Float64Array,\n Map, Set, WeakMap, WeakSet,\n ArrayBuffer, DataView, JSON,\n Promise, \n Reflect, Proxy, Intl, WebAssembly, __webpack_require__\n ].forEach(function (obj) {\n if (obj.name && (typeof obj === 'function' || typeof obj === 'object'))\n modules['standard-objects'][obj.name] = obj\n })\n\n /* Export the constructors used by object literals and boxing objects. Usually these are the same as\n * the standard objects, but not always -- evaluation environments like node vm contexts can have\n * different standard objects supplied via the sandboxing object than the engine uses internally.\n */\n modules['engine-constructors'] = {\n Object: ({}).constructor,\n Array: ([]).constructor,\n Number: (0).constructor,\n String: ('').constructor,\n Boolean: (false).constructor,\n Function: (()=>1).constructor,\n };\n\n if (typeof BigInt !== 'undefined')\n {\n modules['standard-objects']['BigInt'] === BigInt;\n modules['engine-constructors'].BigInt = (0n).constructor;\n }\n if (typeof BigInt64Array !== 'undefined')\n modules['standard-objects']['BigInt64Array'] === BigInt64Array;\n if (typeof BigInt64Array !== 'undefined')\n modules['standard-objects']['BigUint64Array'] === BigUint64Array;\n\n module.declare([], function(require, exports, module) {\n Object.assign(exports, modules)\n exports['dcp-config'] = dcpConfig\n exports['dcp-default-config'] = {\"_serializeVerId\":\"v8\",\"what\":{\"ctr\":0,\"ps\":{\"dcp\":{\"ctr\":0,\"ps\":{\"connectionOptions\":{\"ctr\":0,\"ps\":{\"default\":{\"ctr\":0,\"ps\":{\"connectTimeout\":60,\"disconnectTimeout\":900,\"lingerTimeout\":18000,\"identityUnlockTimeout\":300,\"batchWaitTime\":0.03,\"ttl\":{\"raw\":{\"min\":15,\"max\":600,\"default\":120}},\"transports\":{\"arr\":[\"socketio\"]},\"socketio\":{\"raw\":{}}}}}},\"validitySlopValue\":10,\"validityStampCachePurgeInterval\":60,\"maxConnectionTimeout\":300000}},\"worker\":{\"raw\":{}},\"evaluator\":{\"ctr\":0,\"ps\":{\"listen\":{\"ctr\":\"dcpUrl$$DcpURL\",\"ps\":{},\"arg\":\"http://localhost:9000/\"},\"location\":{\"ctr\":\"dcpUrl$$DcpURL\",\"ps\":{},\"arg\":\"http://localhost:9000/\"},\"friendLocation\":{\"seen\":6}}},\"supervisor\":{\"raw\":{\"dcp\":{\"connectionOptions\":{\"default\":{\"identityUnlockTimeout\":900}}},\"tuning\":{\"watchdogInterval\":7,\"minSandboxStartDelay\":0.1,\"maxSandboxStartDelay\":0.7,\"minSandboxSlack\":0.2,\"maxSandboxSlack\":0.5,\"maxSandboxSliceRetries\":1,\"cachedJobsThreshold\":0,\"prefetchInterval\":30,\"pruneFrequency\":30000,\"mustPruneMultiplier\":1.3,\"defaultDelayIncrement\":50,\"maxExtraSandboxes\":8,\"maxResultSubmissionRetries\":3,\"reservedSliceLifetime\":300000},\"repoMan\":{\"frequency\":60000,\"thresholdMultiplier\":2},\"sandbox\":{\"generalTimeout\":6000,\"sliceTimeout\":86400000,\"progressTimeout\":300000,\"progressReportInterval\":1200000,\"maxSandboxUse\":1000},\"pCores\":0}},\"standaloneWorker\":{\"undefined\":true},\"scheduler\":{\"ctr\":0,\"ps\":{\"location\":{\"ctr\":\"dcpUrl$$DcpURL\",\"ps\":{},\"arg\":\"https://scheduler.distributed.computer/\"},\"worker\":{\"ctr\":0,\"ps\":{\"types\":{\"arr\":[\"v4\"]},\"operations\":{\"raw\":\"1.0.0\"}}},\"compatibility\":{\"raw\":{\"minimum\":{\"dcp\":\"^5.0.0\",\"dcp-client\":\"^4.0.0\",\"dcp-worker\":\"^2.1.0\",\"operations\":{\"work\":\"^4.1.0\",\"compute\":\"^1.0.0\",\"bank\":\"^4.0.0\"}},\"exclusions\":{}}}}}}}};\n })\n if (realModuleDeclare)\n module.declare = realModuleDeclare\n\n bundleExports = thisScript.exports = exports; /* must be last expression evaluated! */\n}\n\n\n//# sourceURL=webpack://dcp/./src/dcp-client/index.js?");
4328
+ eval("/* module decorator */ module = __webpack_require__.nmd(module);\n/* provided dependency */ var process = __webpack_require__(/*! ./node_modules/process/browser.js */ \"./node_modules/process/browser.js\");\n/**\n * @file dcp-client-bundle-src.js\n * Top-level file which gets webpacked into the bundle consumed by dcp-client 2.5\n * @author Wes Garland, wes@kingsds.network\n * @date July 2019\n */\n\n{\n let thisScript = typeof document !== 'undefined' ? (typeof document.currentScript !== 'undefined' && document.currentScript) || document.getElementById('_dcp_client_bundle') : {}\n let realModuleDeclare\n\n if ( false || typeof module.declare === 'undefined') {\n realModuleDeclare = ( true) ? module.declare : 0\n if (false) {}\n module.declare = function moduleUnWrapper (deps, factory) {\n factory(null, module.exports, module)\n return module.exports\n }\n }\n\n let _debugging = () => false\n if (process.env.DCP_CONFIG_USE_DEPRECATED_FUTURE)\n dcpConfig.future = (__webpack_require__(/*! ../common/config-future.js */ \"./src/common/config-future.js\").futureFactory)(_debugging, dcpConfig);\n\n /* These modules are official API and must be part of DCP Client */\n let officialApi = {\n 'protocol': __webpack_require__(/*! ../protocol-v4 */ \"./src/protocol-v4/index.js\"),\n 'compute': (__webpack_require__(/*! ./compute */ \"./src/dcp-client/compute.js\").compute),\n 'worker': __webpack_require__(/*! ./worker */ \"./src/dcp-client/worker/index.js\"),\n 'wallet': __webpack_require__(/*! ./wallet */ \"./src/dcp-client/wallet/index.js\"),\n };\n\n /* Some of these modules are API-track. Some of them need to be published to be\n * available for top-level resolution by DCP internals. Those (mostly) should have\n * been written using relative module paths.....\n */\n let modules = Object.assign({\n 'dcp-build': {\"version\":\"4214ca584c5104c0a7d8f3ebe5ca39f3f72ab3d1\",\"branch\":\"develop\",\"dcpClient\":{\"version\":\"4.3.0-1\",\"resolved\":\"git+ssh://git@gitlab.com/Distributed-Compute-Protocol/dcp-client.git#69bc8333e2d23d82ac1d16ec00cfb97c7831a204\",\"overridden\":false},\"built\":\"Thu Aug 31 2023 14:42:36 GMT-0400 (Eastern Daylight Saving Time)\",\"config\":{\"generated\":\"Thu 31 Aug 2023 02:42:36 PM EDT by erose on lorge\",\"build\":\"debug\"},\"webpack\":\"5.88.2\",\"node\":\"v16.20.2\"},\n 'dcp-xhr': __webpack_require__(/*! ../common/dcp-xhr */ \"./src/common/dcp-xhr.js\"),\n 'dcp-env': __webpack_require__(/*! ../common/dcp-env */ \"./src/common/dcp-env.js\"),\n 'dcp-url': __webpack_require__(/*! ../common/dcp-url */ \"./src/common/dcp-url.js\"),\n 'cli': __webpack_require__(/*! ../common/cli */ \"./src/common/cli.js\"),\n 'dcp-timers': __webpack_require__(/*! ../common/dcp-timers */ \"./src/common/dcp-timers.js\"),\n 'dcp-dot-dir': __webpack_require__(/*! ../common/dcp-dot-dir */ \"./src/common/dcp-dot-dir.js\"),\n 'dcp-assert': __webpack_require__(/*! ../common/dcp-assert */ \"./src/common/dcp-assert.js\"),\n 'dcp-events': __webpack_require__(/*! ../common/dcp-events */ \"./src/common/dcp-events/index.js\"),\n 'utils': __webpack_require__(/*! ../utils */ \"./src/utils/index.js\"),\n 'debugging': __webpack_require__(/*! ../debugging */ \"./src/debugging.js\"),\n 'publish': __webpack_require__(/*! ../common/dcp-publish */ \"./src/common/dcp-publish.js\"),\n 'compute-groups': {\n ...__webpack_require__(/*! ./compute-groups */ \"./src/dcp-client/compute-groups/index.js\"),\n publicGroupOpaqueId: (__webpack_require__(/*! ../common/scheduler-constants */ \"./src/common/scheduler-constants.js\").computeGroups[\"public\"].opaqueId),\n },\n 'bank-util': __webpack_require__(/*! ./bank-util */ \"./src/dcp-client/bank-util.js\"),\n 'protocol-v4': __webpack_require__(/*! ../protocol-v4 */ \"./src/protocol-v4/index.js\"), /* deprecated */\n 'client-modal': __webpack_require__(/*! ./client-modal */ \"./src/dcp-client/client-modal/index.js\"),\n 'eth': __webpack_require__(/*! ./wallet/eth */ \"./src/dcp-client/wallet/eth.js\"),\n 'serialize': __webpack_require__(/*! ../utils/serialize */ \"./src/utils/serialize.js\"),\n 'kvin': __webpack_require__(/*! kvin */ \"./node_modules/kvin/kvin.js\"),\n 'job': __webpack_require__(/*! ./job */ \"./src/dcp-client/job/index.js\"),\n 'range-object': __webpack_require__(/*! ./range-object */ \"./src/dcp-client/range-object.js\"),\n 'stats-ranges': __webpack_require__(/*! ./stats-ranges */ \"./src/dcp-client/stats-ranges.js\"),\n 'job-values': __webpack_require__(/*! ./job-values */ \"./src/dcp-client/job-values.js\"),\n 'signal-handler': __webpack_require__(/*! ../node-libs/signal-handler */ \"./src/node-libs/signal-handler.js\"),\n 'standard-objects': {}\n }, officialApi);\n\n /* Export the JS Standard Classes (etc) from the global object of the bundle evaluation context,\n * in case we have code somewhere that needs to use these for instanceof checks.\n */\n ;[ Object, Function, Boolean, Symbol,\n Error, EvalError, RangeError, ReferenceError, SyntaxError, TypeError, URIError,\n Number, Math, Date,\n String, RegExp,\n Array, Int8Array, Uint8Array, Uint8ClampedArray, Int16Array, Uint16Array, Int32Array, Uint32Array, Float32Array, Float64Array,\n Map, Set, WeakMap, WeakSet,\n ArrayBuffer, DataView, JSON,\n Promise, \n Reflect, Proxy, Intl, WebAssembly, __webpack_require__\n ].forEach(function (obj) {\n if (obj.name && (typeof obj === 'function' || typeof obj === 'object'))\n modules['standard-objects'][obj.name] = obj\n })\n\n /* Export the constructors used by object literals and boxing objects. Usually these are the same as\n * the standard objects, but not always -- evaluation environments like node vm contexts can have\n * different standard objects supplied via the sandboxing object than the engine uses internally.\n */\n modules['engine-constructors'] = {\n Object: ({}).constructor,\n Array: ([]).constructor,\n Number: (0).constructor,\n String: ('').constructor,\n Boolean: (false).constructor,\n Function: (()=>1).constructor,\n };\n\n if (typeof BigInt !== 'undefined')\n {\n modules['standard-objects']['BigInt'] === BigInt;\n modules['engine-constructors'].BigInt = (0n).constructor;\n }\n if (typeof BigInt64Array !== 'undefined')\n modules['standard-objects']['BigInt64Array'] === BigInt64Array;\n if (typeof BigInt64Array !== 'undefined')\n modules['standard-objects']['BigUint64Array'] === BigUint64Array;\n\n module.declare([], function(require, exports, module) {\n Object.assign(exports, modules)\n exports['dcp-config'] = dcpConfig\n exports['dcp-default-config'] = {\"_serializeVerId\":\"v8\",\"what\":{\"ctr\":0,\"ps\":{\"dcp\":{\"ctr\":0,\"ps\":{\"connectionOptions\":{\"ctr\":0,\"ps\":{\"default\":{\"ctr\":0,\"ps\":{\"connectTimeout\":60,\"disconnectTimeout\":900,\"lingerTimeout\":18000,\"identityUnlockTimeout\":300,\"batchWaitTime\":0.03,\"ttl\":{\"raw\":{\"min\":15,\"max\":600,\"default\":120}},\"transports\":{\"arr\":[\"socketio\"]},\"socketio\":{\"raw\":{}}}}}},\"validitySlopValue\":10,\"validityStampCachePurgeInterval\":60,\"maxConnectionTimeout\":300000}},\"worker\":{\"raw\":{}},\"evaluator\":{\"ctr\":0,\"ps\":{\"listen\":{\"ctr\":\"dcpUrl$$DcpURL\",\"ps\":{},\"arg\":\"http://localhost:9000/\"},\"location\":{\"ctr\":\"dcpUrl$$DcpURL\",\"ps\":{},\"arg\":\"http://localhost:9000/\"},\"friendLocation\":{\"seen\":6}}},\"supervisor\":{\"raw\":{\"dcp\":{\"connectionOptions\":{\"default\":{\"identityUnlockTimeout\":900}}},\"tuning\":{\"watchdogInterval\":7,\"minSandboxStartDelay\":0.1,\"maxSandboxStartDelay\":0.7,\"minSandboxSlack\":0.2,\"maxSandboxSlack\":0.5,\"maxSandboxSliceRetries\":1,\"cachedJobsThreshold\":0,\"prefetchInterval\":30,\"pruneFrequency\":30000,\"mustPruneMultiplier\":1.3,\"defaultDelayIncrement\":50,\"maxExtraSandboxes\":8,\"maxResultSubmissionRetries\":3,\"reservedSliceLifetime\":300000},\"repoMan\":{\"frequency\":60000,\"thresholdMultiplier\":2},\"sandbox\":{\"generalTimeout\":6000,\"sliceTimeout\":86400000,\"progressTimeout\":300000,\"progressReportInterval\":1200000,\"maxSandboxUse\":1000},\"pCores\":0}},\"standaloneWorker\":{\"undefined\":true},\"scheduler\":{\"ctr\":0,\"ps\":{\"location\":{\"ctr\":\"dcpUrl$$DcpURL\",\"ps\":{},\"arg\":\"https://scheduler.distributed.computer/\"},\"worker\":{\"ctr\":0,\"ps\":{\"types\":{\"arr\":[\"v4\"]},\"operations\":{\"raw\":\"1.0.0\"}}},\"compatibility\":{\"raw\":{\"minimum\":{\"dcp\":\"^5.0.0\",\"dcp-client\":\"^4.0.0\",\"dcp-worker\":\"^2.1.0\",\"operations\":{\"work\":\"^4.1.0\",\"compute\":\"^1.0.0\",\"bank\":\"^4.0.0\"}},\"exclusions\":{}}}}}}}};\n })\n if (realModuleDeclare)\n module.declare = realModuleDeclare\n\n bundleExports = thisScript.exports = exports; /* must be last expression evaluated! */\n}\n\n\n//# sourceURL=webpack://dcp/./src/dcp-client/index.js?");
4329
4329
 
4330
4330
  /***/ }),
4331
4331
 
@@ -4387,7 +4387,7 @@ eval("/**\n * @file job/slice-payment-offer.js\n * @author Ryan Ross
4387
4387
  \*********************************************/
4388
4388
  /***/ ((__unused_webpack_module, exports, __webpack_require__) => {
4389
4389
 
4390
- eval("/**\n * @file job/upload-slices.js\n * @author Ryan Saweczko, ryansaweczko@kingsds.network\n * Danny Akbarzadeh, danny@kingsds.network\n * \n * @date Jun 2022\n *\n * Implement functions to upload slices to the scheduler after a job has been deployed.\n * This area will have it's own connection to the job submit service which it is responsible\n * for handling.\n */\n\nconst { Connection } = __webpack_require__(/*! dcp/protocol-v4 */ \"./src/protocol-v4/index.js\");\n\nconst debugging = (__webpack_require__(/*! dcp/debugging */ \"./src/debugging.js\").scope)('dcp-client');\n\nlet uploadConnection = null;\nlet uploadRefs = 0;\nlet tunedKvin;\n\nfunction createNewConnection()\n{\n uploadConnection = new Connection(dcpConfig.scheduler.services.jobSubmit, { allowBatch: false });\n uploadConnection.on('end', createNewConnection);\n return uploadConnection.connect();\n}\n\n/**\n * Helper function that tries to upload slicePile to scheduler for the job with the given address\n * If the connection throws, we will continue trying to upload until it has thrown errorTolerance times\n * However, if the upload is unsuccessful, we throw immediately.\n * @param {object} pileMessage \n * @returns payload containing success property (pertaining to success of adding slices to job) as well as lastSliceNumber of job \n */\nasync function safeSliceUpload(pileMessage)\n{\n let payload = undefined;\n let errorTolerance = dcpConfig.job.sliceUploadErrorTolerance; // copy number of times we will tolerate non-success when uploading slices directly from config\n\n while (true) // eslint-disable-line no-constant-condition\n {\n try\n {\n const start = Date.now();\n debugging('slice-upload') && console.log('x-dbg-uploadStart', pileMessage.signedMessage.length);\n\n payload = await pileMessage.send();\n if (!payload.success)\n {\n debugging('slice-upload') && console.log('x-dbg-uploadBackoff', {length:pileMessage.signedMessage.length});\n throw payload;\n }\n else\n {\n debugging('slice-upload') && console.log('x-dbg-uploadProgress', Date.now() - start);\n break;\n }\n }\n catch (error)\n {\n if (--errorTolerance <= 0)\n {\n debugging('slice-upload') && console.log('x-dbg-uploadError', error);\n if (uploadConnection)\n {\n uploadConnection.off('end', createNewConnection);\n uploadConnection.close(); // ensure we clean up the connection\n }\n throw error;\n }\n }\n }\n\n return payload;\n}\n\n/**\n* This function contains the actual logic behind staggered slice uploads\n* to the scheduler which makes quicker deployment possible.\n* \n* Note that we pass in mostToTake so that the uploadLogic function can update \n* it to the new value it needs to be, and then pass it back to the wrapper \n* function (addSlices) which actually does the work of picking up slices \n* and thus uses this value\n* @param {Array} pile the actual array of slices being uploaded to scheduler\n* @param {Number} mostToTake number of slices that should be taken by the wrapper function (addSlices) \n* which actually does the work of picking up slices and thus uses this value.\n* We pass in mostToTake so that the uploadLogic function can update it to the \n* new value it needs to be, and then pass it back to the wrapper\n* @param {*} jobAddress Address of job to upload the slices to \n* @returns payload containing success property (pertaining to success of adding slices to job) as well as lastSliceNumber of job\n*/\nasync function sliceUploadLogic(pile, mostToTake, jobAddress)\n{\n if (!uploadConnection)\n createNewConnection();\n\n const slicesTaken = pile.length;\n const payloadData = {\n job: jobAddress,\n dataValues: getKvin().marshal(pile)\n };\n const pileMessage = new uploadConnection.Request({\n /* payload object */\n operation: 'addSliceData',\n jsonData: JSON.stringify(payloadData) /* becomes payload.data in transit */\n });\n const pileSize = pileMessage.payload.jsonData.length;\n \n let newMostToTake;\n let uploadedSlices;\n\n // if the pile is larger than the ceiling but we only took one slice, there's no smaller pile we can make\n // so we upload it anyway but we don't try taking more next time cause we were over the ceiling (which \n // is a hard limit on upload sizes)\n if ((pileSize > dcpConfig.job.uploadSlicesCeiling) && (slicesTaken === 1))\n {\n uploadedSlices = await safeSliceUpload(pileMessage);\n newMostToTake = 1;\n }\n \n // if the pile is larger than the target but we only took one slice, there's no smaller pile we can make\n // so we upload it anyway and still try taking more\n else if ((pileSize > dcpConfig.job.uploadSlicesTarget) && (slicesTaken === 1))\n {\n uploadedSlices = await safeSliceUpload(pileMessage);\n newMostToTake = mostToTake * dcpConfig.job.uploadIncreaseFactor;\n }\n \n // otherwise, if the pile is smaller than the soft ceiling, send up the pile anyway (since piles are expensive to make) \n // but remember to include incrementFactor times as many slices in the next pile\n else if (pileSize <= dcpConfig.job.uploadSlicesTarget)\n {\n uploadedSlices = await safeSliceUpload(pileMessage);\n newMostToTake = mostToTake * dcpConfig.job.uploadIncreaseFactor;\n }\n \n // if the pile is over the ceiling then we do not upload and begin reassembling our piles from scratch\n else if (pileSize > dcpConfig.job.uploadSlicesCeiling)\n {\n newMostToTake = -1;\n }\n \n // if the pile is over the target (but implicitly under the ceiling), then upload the pile to scheduler but lower mostToTake\n // by a smaller factor than incrementFactor to allow us to begin \"centering\" sizes of piles around the target\n else if (pileSize > dcpConfig.job.uploadSlicesTarget)\n {\n uploadedSlices = await safeSliceUpload(pileMessage);\n newMostToTake = Math.ceil(mostToTake / ((2 / 3) * dcpConfig.job.uploadIncreaseFactor));\n }\n else\n throw new Error('hopefully impossible code in slice upload logic');\n\n let payload = uploadedSlices ? uploadedSlices.payload : undefined;\n return { payload, newMostToTake }; // in case the user needs lastSliceNumber's value\n}\n\n/**\n* Uploads slices to the scheduler in a staggered fashion\n* @param {Array} dataValues actual array of slices being uploaded to scheduler\n* @param {*} jobAddress Address of job these slices are for\n* @param {*} newTunedKvin undefined, or new version of kvin tuned for speed or size specifically. Use if defined.\n* @returns payload containing success property (pertaining to success of adding slices to job) as well as lastSliceNumber of job\n*/\nasync function addSlices(dataValues, jobAddress, newTunedKvin)\n{\n if (newTunedKvin)\n tunedKvin = newTunedKvin;\n\n if (!Array.isArray(dataValues))\n throw new TypeError('Only data-by-value jobs may dynamically add slices');\n\n let mostToTake = dcpConfig.job.uploadInitialNumberOfSlices; // maximum number of slices we could take in per pile\n let payload = undefined; // used in return value\n let slicesTaken = 0; // number of slices in the pile already\n let pile = [];\n uploadRefs++;\n\n for (let slice of dataValues)\n {\n pile.push(slice);\n slicesTaken++;\n if (slicesTaken === mostToTake)\n {\n let total = await sliceUploadLogic(pile, mostToTake, jobAddress);\n payload = total.payload;\n \n if (total.newMostToTake < 0)\n {\n /* if total.newMostToTake == -1 (only non-positive value returned), then the pile was not successfully\n * uploaded because it was over the ceiling and we need to upload the pile *itself* again, recursively\n */\n payload = await addSlices(pile, jobAddress);\n /* and next time, the number of slices we take is the number from this time *divided* by the incrementFactor\n * since we know invariably that number of slices was under the ceiling AND target\n * if you're curious why that's an invariant, this is because mostToTake only ever *increases* by being multiplied by \n * a factor of incrementFactor within sliceUploadLogic, and this only occurs when the pile being uploaded that time\n * was under the target\n */\n mostToTake = mostToTake / dcpConfig.job.uploadIncreaseFactor;\n }\n else\n {\n /* in all other cases (other than the pile size being over the ceiling) the sliceUploadLogic helper \n * determines the number of slices we should pick up next time, so we just use the value it spits out\n */\n mostToTake = total.newMostToTake;\n }\n \n // reset slicesTaken and pile since at this point we know for sure the pile has been uploaded\n pile = [];\n slicesTaken = 0;\n }\n }\n // upload the pile one last time in case we continued off the last slice with a non-empty pile\n if (pile.length !== 0)\n {\n let finalObj = await sliceUploadLogic(pile, mostToTake, jobAddress);\n payload = finalObj.payload;\n mostToTake = finalObj.newMostToTake;\n \n if (mostToTake < 0)\n {\n // if you need documentation on the next two lines, look inside the if (total.newMostToTake < 0) just above\n payload = await addSlices(pile, jobAddress);\n mostToTake = mostToTake / dcpConfig.job.uploadIncreaseFactor;\n }\n }\n\n // and finally assign whatever mostToTake was at the end of this run of the function to be returned \n // as part of the payload in case addSlices was called recursively\n payload.mostToTake = mostToTake;\n\n // cleanup connection when done with this function\n uploadRefs--;\n if (uploadConnection && uploadRefs === 0) {\n debugging('slice-upload') && console.debug(`250: closing uploadConnection`, uploadConnection?.id);\n uploadConnection.removeAllListeners('end');\n uploadConnection.close();\n uploadConnection = null;\n debugging('slice-upload') && console.debug(`252: closed uploadConnection`, uploadConnection);\n }\n\n /* contains the job's lastSliceNumber (the only externally-meaningful value returned from \n * the uploading of slices to the scheduler) in case the calling function needs it \n */\n return payload;\n}\n\n/**\n * marshal the value using kvin or instance of the kvin (tunedKvin)\n * tunedKvin is defined if job.tuning.kvin is specified.\n *\n * @param {any} value \n * @return {object} A marshaled object\n * \n */\nfunction getKvin()\n{\n return tunedKvin || __webpack_require__(/*! kvin */ \"./node_modules/kvin/kvin.js\");\n}\n\n\nexports.addSlices = addSlices;\n\n\n//# sourceURL=webpack://dcp/./src/dcp-client/job/upload-slices.js?");
4390
+ eval("/**\n * @file job/upload-slices.js\n * @author Ryan Saweczko, ryansaweczko@kingsds.network\n * Danny Akbarzadeh, danny@kingsds.network\n * \n * @date Jun 2022\n *\n * Implement functions to upload slices to the scheduler after a job has been deployed.\n * This area will have it's own connection to the job submit service which it is responsible\n * for handling.\n */\n\nconst { Connection } = __webpack_require__(/*! dcp/protocol-v4 */ \"./src/protocol-v4/index.js\");\n\nconst debugging = (__webpack_require__(/*! dcp/debugging */ \"./src/debugging.js\").scope)('dcp-client');\n\nlet uploadConnection = null;\nlet uploadRefs = 0;\nlet tunedKvin;\n\nfunction createNewConnection()\n{\n uploadConnection = new Connection(dcpConfig.scheduler.services.jobSubmit, { allowBatch: false });\n uploadConnection.on('end', createNewConnection);\n return uploadConnection.connect();\n}\n\n/**\n * Helper function that tries to upload slicePile to scheduler for the job with the given address\n * If the connection throws, we will continue trying to upload until it has thrown errorTolerance times\n * However, if the upload is unsuccessful, we throw immediately.\n * @param {object} pileMessage \n * @returns payload containing success property (pertaining to success of adding slices to job) as well as lastSliceNumber of job \n */\nasync function safeSliceUpload(pileMessage)\n{\n let payload = undefined;\n let errorTolerance = dcpConfig.job.sliceUploadErrorTolerance; // copy number of times we will tolerate non-success when uploading slices directly from config\n\n while (true) // eslint-disable-line no-constant-condition\n {\n try\n {\n const start = Date.now();\n debugging('slice-upload') && console.log('x-dbg-uploadStart', pileMessage.payload.jsonData.length);\n\n payload = await pileMessage.send();\n if (!payload.success)\n {\n debugging('slice-upload') && console.log('x-dbg-uploadBackoff', {length:pileMessage.payload.jsonData.length});\n throw payload;\n }\n else\n {\n debugging('slice-upload') && console.log('x-dbg-uploadProgress', Date.now() - start);\n break;\n }\n }\n catch (error)\n {\n if (--errorTolerance <= 0)\n {\n debugging('slice-upload') && console.log('x-dbg-uploadError', error);\n if (uploadConnection)\n {\n uploadConnection.off('end', createNewConnection);\n uploadConnection.close(); // ensure we clean up the connection\n }\n throw error;\n }\n }\n }\n\n return payload;\n}\n\n/**\n* This function contains the actual logic behind staggered slice uploads\n* to the scheduler which makes quicker deployment possible.\n* \n* Note that we pass in mostToTake so that the uploadLogic function can update \n* it to the new value it needs to be, and then pass it back to the wrapper \n* function (addSlices) which actually does the work of picking up slices \n* and thus uses this value\n* @param {Array} pile the actual array of slices being uploaded to scheduler\n* @param {Number} mostToTake number of slices that should be taken by the wrapper function (addSlices) \n* which actually does the work of picking up slices and thus uses this value.\n* We pass in mostToTake so that the uploadLogic function can update it to the \n* new value it needs to be, and then pass it back to the wrapper\n* @param {*} jobAddress Address of job to upload the slices to \n* @returns payload containing success property (pertaining to success of adding slices to job) as well as lastSliceNumber of job\n*/\nasync function sliceUploadLogic(pile, mostToTake, jobAddress)\n{\n if (!uploadConnection)\n createNewConnection();\n\n const slicesTaken = pile.length;\n const payloadData = {\n job: jobAddress,\n dataValues: getKvin().marshal(pile)\n };\n const pileMessage = new uploadConnection.Request({\n /* payload object */\n operation: 'addSliceData',\n jsonData: JSON.stringify(payloadData) /* becomes payload.data in transit */\n });\n const pileSize = pileMessage.payload.jsonData.length;\n \n let newMostToTake;\n let uploadedSlices;\n\n // if the pile is larger than the ceiling but we only took one slice, there's no smaller pile we can make\n // so we upload it anyway but we don't try taking more next time cause we were over the ceiling (which \n // is a hard limit on upload sizes)\n if ((pileSize > dcpConfig.job.uploadSlicesCeiling) && (slicesTaken === 1))\n {\n uploadedSlices = await safeSliceUpload(pileMessage);\n newMostToTake = 1;\n }\n \n // if the pile is larger than the target but we only took one slice, there's no smaller pile we can make\n // so we upload it anyway and still try taking more\n else if ((pileSize > dcpConfig.job.uploadSlicesTarget) && (slicesTaken === 1))\n {\n uploadedSlices = await safeSliceUpload(pileMessage);\n newMostToTake = mostToTake * dcpConfig.job.uploadIncreaseFactor;\n }\n \n // otherwise, if the pile is smaller than the soft ceiling, send up the pile anyway (since piles are expensive to make) \n // but remember to include incrementFactor times as many slices in the next pile\n else if (pileSize <= dcpConfig.job.uploadSlicesTarget)\n {\n uploadedSlices = await safeSliceUpload(pileMessage);\n newMostToTake = mostToTake * dcpConfig.job.uploadIncreaseFactor;\n }\n \n // if the pile is over the ceiling then we do not upload and begin reassembling our piles from scratch\n else if (pileSize > dcpConfig.job.uploadSlicesCeiling)\n {\n newMostToTake = -1;\n }\n \n // if the pile is over the target (but implicitly under the ceiling), then upload the pile to scheduler but lower mostToTake\n // by a smaller factor than incrementFactor to allow us to begin \"centering\" sizes of piles around the target\n else if (pileSize > dcpConfig.job.uploadSlicesTarget)\n {\n uploadedSlices = await safeSliceUpload(pileMessage);\n newMostToTake = Math.ceil(mostToTake / ((2 / 3) * dcpConfig.job.uploadIncreaseFactor));\n }\n else\n throw new Error('hopefully impossible code in slice upload logic');\n\n let payload = uploadedSlices ? uploadedSlices.payload : undefined;\n return { payload, newMostToTake }; // in case the user needs lastSliceNumber's value\n}\n\n/**\n* Uploads slices to the scheduler in a staggered fashion\n* @param {Array} dataValues actual array of slices being uploaded to scheduler\n* @param {*} jobAddress Address of job these slices are for\n* @param {*} newTunedKvin undefined, or new version of kvin tuned for speed or size specifically. Use if defined.\n* @returns payload containing success property (pertaining to success of adding slices to job) as well as lastSliceNumber of job\n*/\nasync function addSlices(dataValues, jobAddress, newTunedKvin)\n{\n if (newTunedKvin)\n tunedKvin = newTunedKvin;\n\n if (!Array.isArray(dataValues))\n throw new TypeError('Only data-by-value jobs may dynamically add slices');\n\n let mostToTake = dcpConfig.job.uploadInitialNumberOfSlices; // maximum number of slices we could take in per pile\n let payload = undefined; // used in return value\n let slicesTaken = 0; // number of slices in the pile already\n let pile = [];\n uploadRefs++;\n\n for (let slice of dataValues)\n {\n pile.push(slice);\n slicesTaken++;\n if (slicesTaken === mostToTake)\n {\n let total = await sliceUploadLogic(pile, mostToTake, jobAddress);\n payload = total.payload;\n \n if (total.newMostToTake < 0)\n {\n /* if total.newMostToTake == -1 (only non-positive value returned), then the pile was not successfully\n * uploaded because it was over the ceiling and we need to upload the pile *itself* again, recursively\n */\n payload = await addSlices(pile, jobAddress);\n /* and next time, the number of slices we take is the number from this time *divided* by the incrementFactor\n * since we know invariably that number of slices was under the ceiling AND target\n * if you're curious why that's an invariant, this is because mostToTake only ever *increases* by being multiplied by \n * a factor of incrementFactor within sliceUploadLogic, and this only occurs when the pile being uploaded that time\n * was under the target\n */\n mostToTake = mostToTake / dcpConfig.job.uploadIncreaseFactor;\n }\n else\n {\n /* in all other cases (other than the pile size being over the ceiling) the sliceUploadLogic helper \n * determines the number of slices we should pick up next time, so we just use the value it spits out\n */\n mostToTake = total.newMostToTake;\n }\n \n // reset slicesTaken and pile since at this point we know for sure the pile has been uploaded\n pile = [];\n slicesTaken = 0;\n }\n }\n // upload the pile one last time in case we continued off the last slice with a non-empty pile\n if (pile.length !== 0)\n {\n let finalObj = await sliceUploadLogic(pile, mostToTake, jobAddress);\n payload = finalObj.payload;\n mostToTake = finalObj.newMostToTake;\n \n if (mostToTake < 0)\n {\n // if you need documentation on the next two lines, look inside the if (total.newMostToTake < 0) just above\n payload = await addSlices(pile, jobAddress);\n mostToTake = mostToTake / dcpConfig.job.uploadIncreaseFactor;\n }\n }\n\n // and finally assign whatever mostToTake was at the end of this run of the function to be returned \n // as part of the payload in case addSlices was called recursively\n payload.mostToTake = mostToTake;\n\n // cleanup connection when done with this function\n uploadRefs--;\n if (uploadConnection && uploadRefs === 0) {\n debugging('slice-upload') && console.debug(`250: closing uploadConnection`, uploadConnection?.id);\n uploadConnection.removeAllListeners('end');\n uploadConnection.close();\n uploadConnection = null;\n debugging('slice-upload') && console.debug(`252: closed uploadConnection`, uploadConnection);\n }\n\n /* contains the job's lastSliceNumber (the only externally-meaningful value returned from \n * the uploading of slices to the scheduler) in case the calling function needs it \n */\n return payload;\n}\n\n/**\n * marshal the value using kvin or instance of the kvin (tunedKvin)\n * tunedKvin is defined if job.tuning.kvin is specified.\n *\n * @param {any} value \n * @return {object} A marshaled object\n * \n */\nfunction getKvin()\n{\n return tunedKvin || __webpack_require__(/*! kvin */ \"./node_modules/kvin/kvin.js\");\n}\n\n\nexports.addSlices = addSlices;\n\n\n//# sourceURL=webpack://dcp/./src/dcp-client/job/upload-slices.js?");
4391
4391
 
4392
4392
  /***/ }),
4393
4393
 
@@ -4447,7 +4447,7 @@ eval("/* provided dependency */ var process = __webpack_require__(/*! ./node_mod
4447
4447
  \*************************************************/
4448
4448
  /***/ ((module, __unused_webpack_exports, __webpack_require__) => {
4449
4449
 
4450
- eval("/**\n * @file /src/schedmsg/schedmsg-web.js\n * @author Ryan Rossiter, ryan@kingsds.network\n * @date March 2020\n *\n * This is the SchedMsg implementation for commands that are browser-specific\n * or have browser-specific behaviour.\n */\n\nconst { SchedMsg } = __webpack_require__(/*! ./schedmsg */ \"./src/dcp-client/schedmsg/schedmsg.js\");\n\nclass SchedMsgWeb extends SchedMsg {\n constructor(worker) {\n super(worker);\n\n this.registerHandler('announce', this.onAnnouncement.bind(this));\n this.registerHandler('openPopup', this.onOpenPopup.bind(this));\n this.registerHandler('reload', this.onReload.bind(this));\n }\n\n onAnnouncement({ message }) {\n \n window.alert('DCP Worker Announcement: ' + message);\n\n }\n\n onOpenPopup({ href }) {\n window.open(href);\n }\n\n onReload() {\n const hash = window.location.hash;\n\n let newUrl = window.location.href.replace(/#.*/, '');\n newUrl += (newUrl.indexOf('?') === -1 ? '?' : '&');\n newUrl += 'dcp=d3f7cde3c2ffc818fa9281da428a4a1f4aa70d89,' + Date.now() + hash;\n\n window.location.replace(newUrl);\n }\n}\n\nObject.assign(module.exports, {\n SchedMsgWeb\n});\n\n\n//# sourceURL=webpack://dcp/./src/dcp-client/schedmsg/schedmsg-web.js?");
4450
+ eval("/**\n * @file /src/schedmsg/schedmsg-web.js\n * @author Ryan Rossiter, ryan@kingsds.network\n * @date March 2020\n *\n * This is the SchedMsg implementation for commands that are browser-specific\n * or have browser-specific behaviour.\n */\n\nconst { SchedMsg } = __webpack_require__(/*! ./schedmsg */ \"./src/dcp-client/schedmsg/schedmsg.js\");\n\nclass SchedMsgWeb extends SchedMsg {\n constructor(worker) {\n super(worker);\n\n this.registerHandler('announce', this.onAnnouncement.bind(this));\n this.registerHandler('openPopup', this.onOpenPopup.bind(this));\n this.registerHandler('reload', this.onReload.bind(this));\n }\n\n onAnnouncement({ message }) {\n \n window.alert('DCP Worker Announcement: ' + message);\n\n }\n\n onOpenPopup({ href }) {\n window.open(href);\n }\n\n onReload() {\n const hash = window.location.hash;\n\n let newUrl = window.location.href.replace(/#.*/, '');\n newUrl += (newUrl.indexOf('?') === -1 ? '?' : '&');\n newUrl += 'dcp=4214ca584c5104c0a7d8f3ebe5ca39f3f72ab3d1,' + Date.now() + hash;\n\n window.location.replace(newUrl);\n }\n}\n\nObject.assign(module.exports, {\n SchedMsgWeb\n});\n\n\n//# sourceURL=webpack://dcp/./src/dcp-client/schedmsg/schedmsg-web.js?");
4451
4451
 
4452
4452
  /***/ }),
4453
4453
 
@@ -4541,7 +4541,7 @@ eval("/**\n * @file ks-cache.js\n * Wallet API Keystore cach
4541
4541
  \****************************************************/
4542
4542
  /***/ ((__unused_webpack_module, exports, __webpack_require__) => {
4543
4543
 
4544
- eval("/**\n * @file passphrase-prompt.js\n * Cross-platform passphrase prompt module for the Wallet API\n *\n * @author Wes Garland - wes@kingsds.network\n * @author Ryan Rossiter - ryan@kingsds.network\n * @date August 2019\n */\n\nconst debugging = (__webpack_require__(/*! dcp/debugging */ \"./src/debugging.js\").scope)(\n 'dcp-client:wallet:passphrase-prompt',\n);\nconst ClientModal = __webpack_require__(/*! dcp/dcp-client/client-modal */ \"./src/dcp-client/client-modal/index.js\");\nconst DCP_ENV = __webpack_require__(/*! dcp/common/dcp-env */ \"./src/common/dcp-env.js\");\nconst { setTimeout } = __webpack_require__(/*! dcp/common/dcp-timers */ \"./src/common/dcp-timers.js\");\n\nconst debug = (...args) => {\n if (debugging()) {\n args.unshift('dcp-client:wallet:passphrase-prompt');\n console.debug(...args);\n }\n};\n\n// This is the format for the username for password managers in the browser.\nfunction generateUsername(metaData) {\n return (metaData.label || \"Key\") + \" - \" + (metaData.address || \"Address\");\n}\n\nfunction ppServiceList() {\n const { readPass } = __webpack_require__(/*! dcp/utils */ \"./src/utils/index.js\");\n\n let head = ppServiceList.pending[0]\n\n if (ppServiceList.pending.length === 0) {\n clearTimeout(ppServiceList.timer)\n ppServiceList.timer = undefined\n return\n }\n \n if (head.pending) {\n if (!ppServiceList.timer)\n ppServiceList.timer = setTimeout(ppServiceList, 100)\n return\n }\n \n head.pending = true\n function afterPrompt(ppPromise) {\n ppServiceList.timer = setTimeout(ppServiceList, 100)\n let idx = ppServiceList.pending.indexOf(head)\n if (idx !== -1)\n ppServiceList.pending.splice(idx, 1)\n head.resolve(ppPromise)\n }\n\n if (head.skipCheckFn && head.skipCheckFn())\n return afterPrompt()\n\n readPass(head.prompt).then(afterPrompt);\n}\nppServiceList.pending = [];\nppServiceList.timer = null;\n\n/**\n * This function takes a string, prompt, prints it to the screen, and then\n * returns whatever the user types after it.\n *\n * @param {Object} metaData Options\n * @param {string} metaData.label Label for the keystore\n * @param {*} urgent Read below, careful handling of event loop\n * @param {*} skipCheckFn see ppServiceList\n */\nfunction wallet$$passphrasePrompt$node(prompt, urgent, skipCheckFn) {\n return new Promise(function(resolve, reject) {\n try {\n /* force the outputting of readPass() onto the event loop, so that pending \n * console.log messages (etc) get printed first, when the loop is serviced,\n * instead of while we are typing the password.\n *\n * Also, make it so that it stalls on the event loop if there is already a\n * a password prompt being displayed.\n */\n let listO = {\n resolve: resolve,\n reject: reject,\n prompt,\n skipCheckFn: skipCheckFn,\n };\n\n if (ppServiceList.pending.length === 0 || urgent)\n setTimeout(ppServiceList, 0)\n if (urgent)\n ppServiceList.pending.unshift(listO)\n else\n ppServiceList.pending.push(listO)\n } catch(e) {\n reject(e);\n }\n });\n}\n\nasync function promptCreatePassphrase(metaData={}) {\n if (DCP_ENV.platform === 'nodejs') {\n let pp1, pp2;\n do {\n pp1 = await (__webpack_require__(/*! ../wallet */ \"./src/dcp-client/wallet/index.js\").passphrasePrompt)('Enter a password for your Keystore:', metaData);\n pp2 = await (__webpack_require__(/*! ../wallet */ \"./src/dcp-client/wallet/index.js\").passphrasePrompt)(\"Please confirm new password by entering it again:\", metaData, true);\n } while(pp1 !== pp2);\n\n return pp1;\n } else {\n return await ClientModal.getPasswordCreation(generateUsername(metaData));\n }\n}\n\nlet createPassphraseFn = promptCreatePassphrase;\nexports.promptCreatePassphrase = function wallet$$create$passphrase(...args) {\n debugging() && console.debug('wallet - promptCreatePassphrase call site ', new Error('debug\\b\\b\\b\\b\\b\\b\\b\\b\\b\\b\\b\\b '));\n return createPassphraseFn(...args);\n}\n\n// only for testing:\nexports.overrideCreatePassphrase = function wallet$$create$passphrase$override(fn) {\n createPassphraseFn = fn;\n}\n\n/** This function accepts arbitrary arguments and returns a string as provided by the user. \n * The default behaviour of this mode is in node, browser is a separate process in passphraseTries()\n * \n * @param {...any} args \n * @return {Promise<string>} \n */\nexports.passphrasePrompt = async function passphrasePrompt(...args) {\n /**\n * This function was written out of spec, and over time has had a lot of additions to it that haven't been spec'd out.\n * To avoid breaking current applications that use the functionality included in this function, the function is overloaded to\n * process different kind of parameters.\n * \n * If the first argument is a string, it will be used as the message to display when passphrasePrompt is called. \n * metaData is an object containing the label of the keystore.\n * urgent is a boolean flag for ppServiceList() that works with the event loop.\n * tryPassphrase is a function (node implementation) and a string (browser implementation), which is used to skip input from user.\n * \n */\n debug('passphrasePrompt args:', args);\n \n /**\n * Form 1:\n * passphrasePrompt(prompt, metaData, urgent/tryPassphrase)\n * Prompt is provided in the hook and urgent/tryPassphrase is set based on the value. Urgent can only be a boolean, so if not a boolean, it is tryPassphrase.\n * \n * Form 2:\n * passphrasePrompt(metaData, tryPassphrase)\n * A default prompt is used\n */\n let prompt, metaData, urgent = false, tryPassphrase;\n if (typeof(args[0]) === 'string')\n {\n prompt = args[0];\n metaData = args[1];\n if (typeof(args[2]) === 'boolean')\n urgent = args[2]\n else\n tryPassphrase = args[2];\n }\n else\n {\n prompt = 'Enter your passphrase here:';\n metaData = args[0];\n if (args[1])\n tryPassphrase = args[1];\n }\n \n if (DCP_ENV.isBrowserPlatform) {\n await ClientModal.getPasswordEntry(generateUsername(metaData), metaData.label, metaData.maxTries, tryPassphrase);\n } else {\n let skipCheckFn = tryPassphrase;\n let pp = await wallet$$passphrasePrompt$node(prompt, urgent, skipCheckFn);\n \n return pp;\n }\n}\n\n\n/** This function tries to get the correct passphrase\n * by asking the user up to exports.maxPassphraseTries times.\n * We give `passphrasePrompt` the callback `tryAndRememberPassphrase`\n * so that the passwordCreation.js modal knows if a password was correct or not.\n * This lets it trigger the browser to offer to save only correct passwords.\n * If all tries are used and tryFn never returns truey, this function returns false.\n *\n * @param {object} metaData metaData is passed to passphrasePrompt()\n * @param {function} tryFn function which trys to unlock the keystore with a supplied password\n */\nexports.passphraseTries = async function wallet$$passphraseTries(metaData, tryFn, skipCheckFn) {\n let res;\n metaData.maxTries = exports.maxPassphraseTries\n\n const tryAndRememberPassphrase = async function keystore$$tryPassphrase(pp) {\n const thisRes = await tryFn(pp);\n res = thisRes || res;\n return !!thisRes;\n }\n\n if (DCP_ENV.isBrowserPlatform) { //Special case for the web modal - this prompts\n await (__webpack_require__(/*! ../wallet */ \"./src/dcp-client/wallet/index.js\").passphrasePrompt)( metaData, tryAndRememberPassphrase ) // Just returns a passphrase\n } else { // Tries using normal passphrase function, the user cannot override number of passphrase tries\n let prompt = 'Enter your passphrase:'\n let ppIsCorrect;\n for(let i = 0; !ppIsCorrect && i < metaData.maxTries; i++) {\n metaData.tries = i\n const pp = await (__webpack_require__(/*! ../wallet */ \"./src/dcp-client/wallet/index.js\").passphrasePrompt)(prompt, metaData, skipCheckFn) // Just returns a passphrase\n ppIsCorrect = await tryAndRememberPassphrase(pp);\n prompt = 'Incorrect passphrase, enter your passphrase again:'\n }\n }\n return res || false;\n}\n\nexports.maxPassphraseTries = 5;\n\n\n//# sourceURL=webpack://dcp/./src/dcp-client/wallet/passphrase-prompt.js?");
4544
+ eval("/**\n * @file passphrase-prompt.js\n * Cross-platform passphrase prompt module for the Wallet API\n *\n * @author Wes Garland - wes@kingsds.network\n * @author Ryan Rossiter - ryan@kingsds.network\n * @date August 2019\n */\n\nconst debugging = (__webpack_require__(/*! dcp/debugging */ \"./src/debugging.js\").scope)(\n 'dcp-client:wallet:passphrase-prompt',\n);\nconst ClientModal = __webpack_require__(/*! dcp/dcp-client/client-modal */ \"./src/dcp-client/client-modal/index.js\");\nconst DCP_ENV = __webpack_require__(/*! dcp/common/dcp-env */ \"./src/common/dcp-env.js\");\nconst { setTimeout } = __webpack_require__(/*! dcp/common/dcp-timers */ \"./src/common/dcp-timers.js\");\n\nconst debug = (...args) => {\n if (debugging()) {\n args.unshift('dcp-client:wallet:passphrase-prompt');\n console.debug(...args);\n }\n};\n\n// This is the format for the username for password managers in the browser.\nfunction generateUsername(metaData) {\n return (metaData.label || \"Key\") + \" - \" + (metaData.address || \"Address\");\n}\n\nfunction ppServiceList() {\n const { readPass } = __webpack_require__(/*! dcp/utils */ \"./src/utils/index.js\");\n\n let head = ppServiceList.pending[0]\n\n if (ppServiceList.pending.length === 0) {\n clearTimeout(ppServiceList.timer)\n ppServiceList.timer = undefined\n return\n }\n \n if (head.pending) {\n if (!ppServiceList.timer)\n ppServiceList.timer = setTimeout(ppServiceList, 100)\n return\n }\n \n head.pending = true\n function afterPrompt(ppPromise) {\n ppServiceList.timer = setTimeout(ppServiceList, 100)\n let idx = ppServiceList.pending.indexOf(head)\n if (idx !== -1)\n ppServiceList.pending.splice(idx, 1)\n head.resolve(ppPromise)\n }\n\n if (head.skipCheckFn && head.skipCheckFn())\n return afterPrompt()\n\n readPass(head.prompt).then(afterPrompt);\n}\nppServiceList.pending = [];\nppServiceList.timer = null;\n\n/**\n * This function takes a string, prompt, prints it to the screen, and then\n * returns whatever the user types after it.\n *\n * @param {Object} metaData Options\n * @param {string} metaData.label Label for the keystore\n * @param {*} urgent Read below, careful handling of event loop\n * @param {*} skipCheckFn see ppServiceList\n */\nfunction wallet$$passphrasePrompt$node(prompt, urgent, skipCheckFn) {\n return new Promise(function(resolve, reject) {\n try {\n /* force the outputting of readPass() onto the event loop, so that pending \n * console.log messages (etc) get printed first, when the loop is serviced,\n * instead of while we are typing the password.\n *\n * Also, make it so that it stalls on the event loop if there is already a\n * a password prompt being displayed.\n */\n let listO = {\n resolve: resolve,\n reject: reject,\n prompt,\n skipCheckFn: skipCheckFn,\n };\n\n if (ppServiceList.pending.length === 0 || urgent)\n setTimeout(ppServiceList, 0)\n if (urgent)\n ppServiceList.pending.unshift(listO)\n else\n ppServiceList.pending.push(listO)\n } catch(e) {\n reject(e);\n }\n });\n}\n\nasync function promptCreatePassphrase(metaData={}) {\n if (DCP_ENV.platform === 'nodejs') {\n let pp1, pp2;\n do {\n pp1 = await (__webpack_require__(/*! ../wallet */ \"./src/dcp-client/wallet/index.js\").passphrasePrompt)('Enter a password for your Keystore:', metaData);\n pp2 = await (__webpack_require__(/*! ../wallet */ \"./src/dcp-client/wallet/index.js\").passphrasePrompt)(\"Please confirm new password by entering it again:\", metaData, true);\n } while(pp1 !== pp2);\n\n return pp1;\n } else {\n return await ClientModal.getPasswordCreation(generateUsername(metaData));\n }\n}\n\nlet createPassphraseFn = promptCreatePassphrase;\nexports.promptCreatePassphrase = function wallet$$create$passphrase(...args) {\n debugging() && console.debug('wallet - promptCreatePassphrase call site ', new Error('debug\\b\\b\\b\\b\\b\\b\\b\\b\\b\\b\\b\\b '));\n return createPassphraseFn(...args);\n}\n\n// only for testing:\nexports.overrideCreatePassphrase = function wallet$$create$passphrase$override(fn) {\n createPassphraseFn = fn;\n}\n\n/** This function accepts arbitrary arguments and returns a string as provided by the user. \n * The default behaviour of this mode is in node, browser is a separate process in passphraseTries()\n * \n * @param {...any} args \n * @return {Promise<string>} \n */\nexports.passphrasePrompt = async function passphrasePrompt(...args) {\n /**\n * This function was written out of spec, and over time has had a lot of additions to it that haven't been spec'd out.\n * To avoid breaking current applications that use the functionality included in this function, the function is overloaded to\n * process different kind of parameters.\n * \n * If the first argument is a string, it will be used as the message to display when passphrasePrompt is called. \n * metaData is an object containing the label of the keystore.\n * urgent is a boolean flag for ppServiceList() that works with the event loop.\n * tryPassphrase is a function (node implementation) and a string (browser implementation), which is used to skip input from user.\n * \n */\n debug('passphrasePrompt args:', args);\n \n /**\n * Form 1:\n * passphrasePrompt(prompt, metaData, urgent/tryPassphrase)\n * Prompt is provided in the hook and urgent/tryPassphrase is set based on the value. Urgent can only be a boolean, so if not a boolean, it is tryPassphrase.\n * \n * Form 2:\n * passphrasePrompt(metaData, tryPassphrase)\n * A default prompt is used\n */\n let prompt, metaData, urgent = false, tryPassphrase;\n if (typeof(args[0]) === 'string')\n {\n prompt = args[0];\n metaData = args[1];\n if (typeof(args[2]) === 'boolean')\n urgent = args[2]\n else\n tryPassphrase = args[2];\n }\n else\n {\n metaData = args[0];\n const ksIdentifier = metaData.label ? metaData.label : metaData.address;\n prompt = `Enter your passphrase here (${ksIdentifier}):`;\n if (args[1])\n tryPassphrase = args[1];\n }\n \n if (DCP_ENV.isBrowserPlatform) {\n await ClientModal.getPasswordEntry(generateUsername(metaData), metaData.label, metaData.maxTries, tryPassphrase);\n } else {\n let skipCheckFn = tryPassphrase;\n let pp = await wallet$$passphrasePrompt$node(prompt, urgent, skipCheckFn);\n \n return pp;\n }\n}\n\n\n/** This function tries to get the correct passphrase\n * by asking the user up to exports.maxPassphraseTries times.\n * We give `passphrasePrompt` the callback `tryAndRememberPassphrase`\n * so that the passwordCreation.js modal knows if a password was correct or not.\n * This lets it trigger the browser to offer to save only correct passwords.\n * If all tries are used and tryFn never returns truey, this function returns false.\n *\n * @param {object} metaData metaData is passed to passphrasePrompt()\n * @param {function} tryFn function which trys to unlock the keystore with a supplied password\n */\nexports.passphraseTries = async function wallet$$passphraseTries(metaData, tryFn, skipCheckFn) {\n let res;\n metaData.maxTries = exports.maxPassphraseTries\n\n const tryAndRememberPassphrase = async function keystore$$tryPassphrase(pp) {\n const thisRes = await tryFn(pp);\n res = thisRes || res;\n return !!thisRes;\n }\n\n if (DCP_ENV.isBrowserPlatform) { //Special case for the web modal - this prompts\n await (__webpack_require__(/*! ../wallet */ \"./src/dcp-client/wallet/index.js\").passphrasePrompt)( metaData, tryAndRememberPassphrase ) // Just returns a passphrase\n } else { // Tries using normal passphrase function, the user cannot override number of passphrase tries\n const ksIdentifier = metaData.label ? metaData.label : metaData.address;\n let prompt = `Enter your passphrase (${ksIdentifier}):`;\n let ppIsCorrect;\n for(let i = 0; !ppIsCorrect && i < metaData.maxTries; i++) {\n metaData.tries = i\n const pp = await (__webpack_require__(/*! ../wallet */ \"./src/dcp-client/wallet/index.js\").passphrasePrompt)(prompt, metaData, skipCheckFn) // Just returns a passphrase\n ppIsCorrect = await tryAndRememberPassphrase(pp);\n prompt = `Incorrect passphrase, enter your passphrase again (${ksIdentifier}):`\n }\n }\n return res || false;\n}\n\nexports.maxPassphraseTries = 5;\n\n\n//# sourceURL=webpack://dcp/./src/dcp-client/wallet/passphrase-prompt.js?");
4545
4545
 
4546
4546
  /***/ }),
4547
4547
 
@@ -4626,7 +4626,7 @@ eval("/**\n * @file origin-access-manager.js\n * The Origin
4626
4626
  /***/ ((__unused_webpack_module, exports, __webpack_require__) => {
4627
4627
 
4628
4628
  "use strict";
4629
- eval("// NOTE - need timeout/postmessage function\n/**\n * @file dcp-client/worker/sandbox.js\n *\n * A sandbox that when constructed and assigned can do work for\n * a distributed slice. A sandbox runs for a single slice at a time.\n *\n * Usage:\n * let sandbox = new Sandbox()\n * await sandbox.start()\n * let results = await sandbox.run(slice)\n *\n * Debug flags:\n * Sandbox.debugWork = true // - turns off 30 second timeout to let user debug sandbox innards more easily\n * Sandbox.debugState = true // - logs all state transitions for this sandbox\n * Sandbox.debugEvents = true // - logs all events received from the sandbox\n *\n * @author Matthew Palma, mpalma@kingsds.network\n * Ryan Rossiter, ryan@kingsds.network\n * @date May 2019\n * @module sandbox\n */\n/* global dcpConfig */\n// @ts-check\n\n\nconst debugging = (__webpack_require__(/*! dcp/debugging */ \"./src/debugging.js\").scope)('worker');\nconst { assert, assertEq3 } = __webpack_require__(/*! dcp/common/dcp-assert */ \"./src/common/dcp-assert.js\");\nconst DCP_ENV = __webpack_require__(/*! dcp/common/dcp-env */ \"./src/common/dcp-env.js\");\nconst { rehydrateRange } = __webpack_require__(/*! dcp/dcp-client/range-object */ \"./src/dcp-client/range-object.js\");\nconst { allowOriginsPurposes } = __webpack_require__(/*! dcp/common/worker-constants */ \"./src/common/worker-constants.js\");\nconst kvin = __webpack_require__(/*! kvin */ \"./node_modules/kvin/kvin.js\");\n\nlet timeDilation = 1;\nif (DCP_ENV.platform === 'nodejs') {\n const { requireNative } = __webpack_require__(/*! dcp/dcp-client/webpack-native-bridge */ \"./src/dcp-client/webpack-native-bridge.js\");\n /** Make timers 10x slower when running in niim */\n timeDilation = (requireNative('module')._cache.niim instanceof requireNative('module').Module) ? 10 : 1;\n}\n\n/**\n * Wraps console.debug to emulate debug module prefixing messages on npm.\n * @param {...any} args\n */\nconst debug = (...args) => {\n if (debugging()) {\n console.debug('dcp-client:worker:sandbox', ...args);\n }\n};\n\nconst nanoid = (__webpack_require__(/*! nanoid */ \"./node_modules/nanoid/index.browser.js\").nanoid);\n\nconst EventEmitter = __webpack_require__(/*! events */ \"./node_modules/events/events.js\");\nconst { fetchURI, encodeDataURI } = __webpack_require__(/*! dcp/utils */ \"./src/utils/index.js\");\nconst dcp_timers = __webpack_require__(/*! dcp/common/dcp-timers */ \"./src/common/dcp-timers.js\");\n\n// Sandbox states\nconst UNREADY = 'UNREADY' // No Sandbox (web worker, saworker, etc) has been constructed yet\nconst READYING = 'READYING' // Sandbox is being constructed and environment (bravojs, env) is being set up\nconst READY_FOR_ASSIGN = 'READY_FOR_ASSIGN' // Sandbox is ready to be assigned\nconst ASSIGNING = 'ASSIGNING' // Sandbox is running through assigning steps\nconst ASSIGNED = 'ASSIGNED' // Sandbox is assigned but not working\nconst WORKING = 'WORKING' // Sandbox is working\nconst TERMINATED = 'TERMINATED'\nconst EVAL_RESULT_PREFIX = 'evalResult::';\n\nclass SandboxError extends Error {}\nclass NoProgressError extends SandboxError { constructor(...args) { super(...args); this.errorCode = 'ENOPROGRESS'; } }\nclass SliceTooSlowError extends SandboxError { constructor(...args) { super(...args); this.errorCode = 'ESLICETOOSLOW'; } }\nclass UncaughtExceptionError extends SandboxError { constructor(...args) { super(...args); this.errorCode = 'EUNCAUGHT'; } }\nclass RemoteFetchError extends SandboxError { constructor(...args) { super(...args); this.errorCode = 'EPERM_ORIGIN'; }}\n\n/** @typedef {string} opaqueId */ // 22 character base64 string\n/** @typedef {import('./slice').Slice} Slice */\n/** @typedef {import('./supervisor-cache').SupervisorCache} SupervisorCache */\n/** @typedef {import('dcp/utils/jsdoc-types').SandboxOptions} SandboxOptions */\n\n/**\n * Public event emitter.\n * https://gitlab.com/Distributed-Compute-Protocol/dcp-docs-wes/-/blob/wip/worker/worker-events.md\n */\nclass SandboxHandle extends EventEmitter\n{\n /** @type {{ id: number, public: { name: string, description: string, link: string }, jobAddress: string, slice: Slice }} */\n #info;\n\n /**\n * @constructor\n * @param {Sandbox} sandbox\n */\n constructor (sandbox)\n {\n super({ captureRejections: false });\n this.#info = sandbox.info;\n }\n /** @type {number} */\n get id () { return this.#info.id; }\n /** @type {{ name: string, description: string, link: string }} */\n get public () { return this.#info.public ?? { name: '<unassigned>', description: '', link: '' }; }\n /** @type {string} */\n get jobAddress () { return this.#info.jobAddress; }\n /** @type {number} */\n get sliceNumber () { return this.#info.slice?.sliceNumber ?? -1; }\n}\nexports.SandboxHandle = SandboxHandle;\n\nclass Sandbox extends EventEmitter {\n /**\n * A Sandbox (i.e. a worker sandbox) which executes distributed slices.\n *\n * @constructor\n * @param {object} supervisor the instance of Supervisor that owns this sandbox\n * @param {SandboxOptions} options\n */\n constructor (supervisor, options) {\n super({ captureRejections: false });\n\n this.supervisor = supervisor;\n /** @type {SupervisorCache} */\n this.supervisorCache = supervisor.cache;\n /** @type {SandboxOptions} */\n this.options = {\n ignoreNoProgress: false,\n ...options,\n SandboxConstructor: options.SandboxConstructor ||\n (__webpack_require__(/*! ./evaluators */ \"./src/dcp-client/worker/evaluators/index.js\").BrowserEvaluator),\n }\n\n /** @type {object} */\n this.evaluatorHandle = null;\n /** @type {object} */\n this.capabilities = null;\n\n /** @type {string} */\n this._state = UNREADY;\n /** @type {boolean} */\n this.allocated = false;\n /** @type {number?} */\n this.progress = 100;\n /** @type {object} */\n this.progressReports = null;\n /** @type {object} */\n this.progressTimeout = null;\n /** @type {object} */\n this.sliceTimeout = null;\n /** @type {object} */\n this.rejectionData = null;\n\n /** @type {number?} */\n this.started = null;\n /** @type {number?} */\n this.sliceStartTime = null;\n /** @type {boolean} */\n this.requiresGPU = false;\n /** @type {string|URL} */\n this.packageURL = dcpConfig.packageManager.location\n\n /** @type {{ id: number, public: { name: string, description: string, link: string }, jobAddress: string, slice: Slice }} */\n this.info = {\n id: Sandbox.getNewId(),\n public: null,\n jobAddress: null,\n slice: null,\n };\n\n /**\n * Event emitter containing info that describes the sandbox.\n * @type {SandboxHandle}\n */\n this.sandboxHandle = new SandboxHandle(this);\n\n this.ringMessageHandlers = [\n this.handleRing0Message,\n this.handleRing1Message,\n this.handleRing2Message,\n this.handleRing3Message,\n ];\n\n this.resetSliceTimeReport();\n }\n\n get identifier() {\n if (this.allocated)\n return `${this.id}.${this.jobAddress}.${this.state}.${this.allocated}`;\n return `${this.id}.${this.jobAddress}.${this.state}`;\n }\n\n static getNewId() {\n return Sandbox.idCounter++;\n }\n\n /** @type {number} */\n get id () { return this.info.id; }\n /** @type {{ name: string, description: string, link: string }} */\n get public () { return this.info.public; }\n /** @type {{ name: string, description: string, link: string }} */\n set public (data) { this.info.public = data; }\n /** @type {string} */\n get jobAddress () { return this.info.jobAddress; }\n /** @type {string} */\n set jobAddress (address) { this.info.jobAddress = address; }\n /** @type {Slice} */\n get slice () { return this.info.slice; }\n /** @type {Slice} */\n set slice (slice) { this.info.slice = slice; }\n /** @type {number} */\n get sliceNumber () { return this.slice ? this.slice.sliceNumber : -1; }\n\n get state () {\n return this._state\n }\n\n set state (value) {\n if (Sandbox.debugState) {\n console.debug(`sandbox - changing state of ${this.id}... ${this._state} -> ${value}`)\n }\n\n if (this.state === TERMINATED && value !== TERMINATED) {\n // For safety!\n throw new Error(`Sandbox set state violation, attepted to change state from ${this.state} to ${value}`);\n }\n\n this._state = value;\n }\n\n get isReadyForAssign () {\n return this.state === READY_FOR_ASSIGN;\n }\n\n get isAssigned () {\n return this.state === ASSIGNED;\n }\n\n get isWorking () {\n return this.state === WORKING;\n }\n\n get isTerminated () {\n return this.state === TERMINATED;\n }\n\n changeWorkingToAssigned () {\n if (this.isWorking) {\n this.state = ASSIGNED;\n this.sandboxHandle.emit('ready');\n }\n }\n\n setIsAssigning () {\n this.state = ASSIGNING;\n }\n\n /**\n * Readies the sandbox. This will result in the sandbox being ready and not assigned,\n * it will need to be assigned with a job before it is able to do work.\n *\n * @todo maybe preload specific modules or let the cache pass in what modules to load?\n * @throws on failure to ready\n */\n async start(delay = 0) {\n this.started = Date.now();\n this.state = READYING;\n\n if (delay > 0) await new Promise((resolve) => dcp_timers.setTimeout(resolve, delay * timeDilation));\n\n try {\n // RING 0\n this.evaluatorHandle = new this.options.SandboxConstructor({\n name: `DCP Sandbox #${this.id}`,\n });\n this.evaluatorHandle.onerror = this.onerror.bind(this);\n\n const messageHandler = this.onmessage.bind(this);\n this.evaluatorHandle.onmessage = function onmessage(event)\n {\n let data;\n if (event.data.serialized)\n {\n data = kvin.parse(event.data.message);\n }\n else\n {\n data = kvin.unmarshal(event.data);\n }\n messageHandler({ data });\n }\n\n const evaluatorPostMessage = this.evaluatorHandle.postMessage.bind(this.evaluatorHandle);\n this.evaluatorHandle.postMessage = function postMessage(message)\n {\n evaluatorPostMessage(kvin.marshal(message));\n }\n\n const ceci = this;\n this.evaluatorHandle.addEventListener('end', () => {\n debugging() && console.debug('Sandbox evaluatorHandle-end-handler:', this.identifier);\n ceci.terminate(true)\n });\n\n // Now in RING 1\n\n // Now in RING 2\n await this.describe();\n this.state = READY_FOR_ASSIGN;\n } catch (error) {\n console.warn('Failed to start the sandbox -', error.message);\n this.terminate(false);\n throw error;\n }\n }\n\n /**\n * This will assign the sandbox with a job, loading its sandbox code\n * into the sandbox.\n *\n * @param {string} jobAddress The address of the job to assign to\n * @throws on initialization failure\n */\n async assign(jobAddress) {\n this.jobAddress = jobAddress;\n this.job = await this.supervisorCache.fetchJob(jobAddress, this.supervisor.originManager);\n /* At this point, the worker has decided that this sandbox will be associated with a specific job. \n Therefore, we emit the SandboxHandle<job> event. */\n this.sandboxHandle.emit('job', this.supervisor.jobHandles[this.jobAddress]);\n\n assertEq3(this.job.address, jobAddress);\n assert(typeof this.job === 'object');\n assert(typeof this.job.requirements === 'object');\n assert(Array.isArray(this.job.dependencies));\n assert(Array.isArray(this.job.requirePath));\n\n // Extract public data from job, with defaults\n this.public = Object.assign({\n name: `Anonymous Job ${this.job.address.slice(0, 6)}`,\n description: 'Discreetly helping make the world smarter.',\n link: 'https://distributed.computer/about',\n }, this.job.public);\n\n // Future: We may want other filename tags for appliances // RR Nov 2019\n\n // Important: The order of applying requirements before loading the sandbox code\n // is important for modules and sandbox code to set globals over the whitelist.\n await this.applySandboxRequirements(this.job.requirements);\n await this.assignEvaluator();\n this.state = ASSIGNED;\n this.sandboxHandle.emit('ready');\n }\n\n async assignEvaluator() {\n debug('Begin assigning job to evaluator');\n const ceci = this;\n\n return new Promise(function sandbox$$assignEvaluatorPromise(resolve, reject) {\n const message = {\n request: 'assign',\n job: ceci.job,\n sandboxConfig: dcpConfig.supervisor.sandbox,\n };\n\n const onSuccess = (event) => {\n // eslint-disable-next-line no-use-before-define\n ceci.removeListener('reject', onFail);\n ceci.emit('assigned', event.jobAddress);\n debug('Job assigned to evaluator');\n resolve();\n };\n\n const onFail = (error) => {\n // eslint-disable-next-line no-use-before-define\n ceci.removeListener('assigned', onSuccess);\n reject(error);\n };\n\n ceci.once('assigned', onSuccess);\n ceci.once('reject', onFail);\n\n ceci.evaluatorHandle.postMessage(message);\n });\n }\n\n /**\n * Evaluates a string inside the sandbox.\n *\n * @param {string} code - the code to evaluate in the sandbox\n * @param {string} filename - the name of the 'file' to help with debugging,\n * no longer working though?\n * @returns {Promise} - resolves with eval result on success, rejects\n * otherwise\n */\n eval(code, filename) {\n var ceci = this;\n \n return new Promise(function sandbox$$eval$Promise(resolve, reject) {\n let msgId = nanoid();\n let msg = {\n request: 'eval',\n data: code,\n filename,\n msgId, \n }\n\n const eventId = EVAL_RESULT_PREFIX + msgId;\n\n let onSuccess = (event) => {\n ceci.removeListener('reject', onFail)\n resolve(event)\n }\n\n let onFail = (event) => {\n ceci.removeListener(eventId, onSuccess)\n reject(event)\n }\n\n ceci.once(eventId, onSuccess);\n ceci.once('reject', onFail)\n\n ceci.evaluatorHandle.postMessage(msg)\n })\n }\n\n /**\n * Resets the state of the bootstrap, without resetting the sandbox function if assigned.\n * Mostly used to reset the progress status before reusing a sandbox on another slice.\n * Must be called after @start.\n *\n * @returns {Promise} - resolves with result on success, rejects otherwise\n */\n resetSandboxState () {\n var ceci = this;\n\n return new Promise(function sandbox$resetSandboxStatePromise(resolve, reject) {\n let failTimeout;\n let msg = {\n request: 'resetState',\n };\n\n function sandbox$resetSandboxState$success () {\n if (failTimeout === false)\n return; /* already rejected */\n dcp_timers.clearTimeout(failTimeout);\n failTimeout = false;\n resolve();\n }\n\n ceci.once('resetStateDone', sandbox$resetSandboxState$success);\n\n failTimeout = dcp_timers.setTimeout(function sandbox$resetSandboxState$fail() {\n if (failTimeout === false)\n return; /* already resolved */\n \n ceci.removeListener('resetStateDone', sandbox$resetSandboxState$success);\n ceci.terminate(false);\n failTimeout = false;\n\n reject(new Error('resetState never received resetStateDone event from sandbox'));\n }, 3000 * timeDilation); /* XXXwg need tuneable */\n\n assert(ceci.evaluatorHandle); // It is possible that ceci.terminate nulls out evaluatorHandle before getting here.\n ceci.evaluatorHandle.postMessage(msg);\n });\n }\n\n /**\n * Sends a post message to describe its capabilities.\n *\n * Side effect: Sets the capabilities property of the current sandbox.\n *\n * @returns {Promise} Resolves with the sandbox's capabilities. Rejects with\n * an error saying a response was not received.\n * @memberof Sandbox\n */\n describe() {\n debug('Beginning to describe evaluator');\n var ceci = this;\n \n return new Promise(function sandbox$describePromise(resolve, reject) {\n if (ceci.evaluatorHandle === null) {\n return reject(new Error('Evaluator has not been initialized.'));\n }\n\n /**\n * Opted to create a flag for the describe response being received so that\n * we don't have to *hoist* the timeout's id to clear it in the response\n * handler.\n */\n let didReceiveDescribeResponse = false;\n const describeResponseHandler = (data) => {\n didReceiveDescribeResponse = true;\n const { capabilities } = data;\n if (typeof capabilities === 'undefined') {\n reject(\n new Error('Did not receive capabilities from describe response.'),\n );\n }\n ceci.capabilities = capabilities;\n debug('Evaluator has been described');\n resolve(capabilities);\n };\n ceci.once('describe', describeResponseHandler); \n\n const describeResponseFailedHandler = () => {\n if (!didReceiveDescribeResponse) {\n ceci.removeListener('describe', describeResponseHandler);\n ceci.terminate(false);\n reject(\n new Error(\n 'Describe message timed-out. No describe response was received from the describe command.',\n ),\n );\n }\n };\n\n const message = {\n request: 'describe',\n };\n\n // Arbitrarily set the waiting time.\n dcp_timers.setTimeout(describeResponseFailedHandler, 6000 * timeDilation); /* XXXwg need tuneable */\n assert(ceci.evaluatorHandle); // It is possible that ceci.terminate nulls out evaluatorHandle before getting here.\n ceci.evaluatorHandle.postMessage(message);\n });\n }\n\n /**\n * Passes the job's requirements object into the sandbox so that the global\n * access lists can be updated accordingly.\n *\n * e.g. disallow access to OffscreenCanvas without\n * environment.offscreenCanvas=true present.\n *\n * Must be called after @start.\n *\n * @returns {Promise} - resolves with result on success, rejects otherwise\n */\n applySandboxRequirements(requirements) {\n var ceci = this;\n \n return new Promise(function sandbox$applySandboxRequirementsPromise(resolve, reject) {\n const message = {\n requirements,\n request: 'applyRequirements',\n };\n let wereRequirementsApplied = false;\n\n function sandbox$applyRequirements$success() {\n wereRequirementsApplied = true;\n resolve();\n };\n\n ceci.once('applyRequirementsDone', sandbox$applyRequirements$success);\n\n assert(typeof message.requirements === 'object');\n ceci.evaluatorHandle.postMessage(message);\n\n dcp_timers.setTimeout(function sandbox$finishApplySandboxRequirements() {\n if (!wereRequirementsApplied) {\n ceci.removeListener('applyRequirementsDone', sandbox$applyRequirements$success);\n ceci.terminate(false);\n reject(\n new Error(\n 'applyRequirements never received applyRequirementsDone response from sandbox',\n ),\n );\n }\n }, 3000 * timeDilation); /* XXXwg needs tunable */\n });\n }\n\n /**\n * Executes a slice received from the supervisor.\n * Must be called after @start.\n *\n * @param {Slice} slice - bare minimum data required for the job/job code to be executed on\n * @param {number} [delay = 0] the delay that this method should wait before beginning work, used to avoid starting all sandboxes at once\n *\n * @returns {Promise} - resolves with result on success, rejects otherwise\n */\n\n async work (slice, delay = 0) {\n var ceci = this;\n\n if (!ceci.isAssigned) {\n throw new Error(\"Sandbox.run: Sandbox is not ready to work, state=\" + ceci.state);\n }\n\n ceci.state = WORKING;\n ceci.slice = slice;\n assert(slice);\n\n this.sandboxHandle.emit('slice', slice.sliceNumber);\n\n // cf. DCP-1720\n this.resetSliceTimeReport();\n \n // Now wait for the delay if provided, prevents many sandboxes starting at once from crashing the supervisor\n if (delay > 0) await new Promise(resolve => dcp_timers.setTimeout(resolve, (delay + 1) * timeDilation));\n if (!ceci.isWorking) return; // sandbox.terminate could have been called during the delay timeout\n\n // Prepare the sandbox to begin work\n // will be replaced by `assign` message that should be called before emitting a `work` message\n if (ceci.jobAddress !== slice.jobAddress) {\n throw new Error(`Sandbox.run: Sandbox is already assigned and jobAddress doesn't match previous (${ceci.jobAddress} !== ${slice.jobAddress})`);\n }\n\n let sliceHnd = { job: ceci.public, sandbox: ceci };\n await ceci.resetSandboxState();\n if (!ceci.slice) {\n console.error(`Slice for job ${ceci.jobAddress} vanished during work initialization - aborting`);\n return;\n }\n\n let inputDatum;\n let dataError = false;\n try {\n if (ceci.slice.datumUri)\n inputDatum = await fetchURI(ceci.slice.datumUri, this.supervisor.originManager, allowOriginsPurposes.fetchData);\n else {\n let { mro } = await ceci.supervisorCache.fetchJob(\n ceci.jobAddress,\n this.supervisor.originManager\n );\n const ro = rehydrateRange(mro);\n // -1 to prevent an OBOE since slice numbers start at 1.\n inputDatum = ro[ceci.slice.sliceNumber - 1];\n }\n } catch (err) {\n dataError = err;\n if(err.code === 'EPERM_ORIGIN')\n dataError.errorCode = 'EPERM_ORIGIN'\n else\n dataError.errorCode = 'EUNCAUGHTERROR'\n ceci.emit('workEmit', {\n eventName: 'error',\n payload: {\n message: dataError.message,\n stack:dataError.stack,\n name: ceci.public.name\n }\n });\n }\n\n debugging('sandbox') && debug(`Fetched datum: ${inputDatum}`);\n\n if (!ceci.slice) {\n console.error(`Slice for job ${ceci.jobAddress} vanished after data fetch - aborting`);\n return;\n }\n\n ceci.resetProgressTimeout();\n ceci.resetSliceTimeout();\n\n return new Promise(function sandbox$$workPromise(resolve, reject) {\n function sandbox$$work$onSuccess (event) {\n ceci.removeListener('reject', sandbox$$work$onFail)\n resolve(event)\n };\n\n function sandbox$$work$onFail (err) {\n ceci.removeListener('resolve', sandbox$$work$onSuccess)\n reject(err)\n };\n\n ceci.once('resolve', sandbox$$work$onSuccess);\n\n ceci.once('reject', sandbox$$work$onFail);\n\n ceci.sliceStartTime = Date.now();\n ceci.progress = null;\n ceci.progressReports = {\n last: undefined,\n lastDeterministic: undefined,\n };\n\n ceci.resetProgressTimeout();\n ceci.resetSliceTimeout();\n ceci.emit('start', sliceHnd);\n \n if(dataError){\n ceci.removeListener('resolve', sandbox$$work$onSuccess);\n ceci.removeListener('reject', sandbox$$work$onFail);\n dcp_timers.setTimeout(() => reject(dataError), 0)\n\n } else {\n ceci.evaluatorHandle.postMessage({\n request: 'main',\n data: inputDatum,\n })\n }\n })\n .then(async function sandbox$$work$then(event) {\n debugging() && console.debug('Sandbox: sliceFinish', ceci.jobAddress, ceci.slice?.sliceNumber, event?.timeReport);\n ceci.sandboxHandle.emit('sliceEnd', ceci.slice?.sliceNumber); \n ceci.emit('complete', ceci.jobAddress);\n\n ceci.changeWorkingToAssigned();\n ceci.slice = false;\n return event;\n })\n .catch((err) => { \n if (err.name === 'EWORKREJECT') {\n this.rejectionData = err;\n this.rejectionData.timeReport = this.sliceTimeReport;\n this.emit('rejectedWorkMetrics', this.rejectionData);\n delete this.rejectionData;\n } else { // sandbox termination for rejected work happens in Supervisor.handleRejectedWork\n // Ceci is the reject callback for when the slice throws an error\n ceci.terminate(false);\n }\n\n ceci.supervisor.worker.emit('error', err, 'slice');\n\n if (err instanceof NoProgressError) {\n ceci.emit('workEmit', {\n eventName: 'noProgress',\n payload: {\n timestamp: Date.now() - ceci.sliceStartTime,\n data: ceci.slice.datumUri,\n progressReports: ceci.progressReports,\n }\n });\n }\n throw err;\n });\n }\n\n resetProgressTimeout() {\n if (this.progressTimeout) {\n dcp_timers.clearTimeout(this.progressTimeout);\n this.progressTimeout = null;\n }\n\n this.progressTimeout = dcp_timers.setTimeout(() => {\n if (this.options.ignoreNoProgress) {\n return console.warn(\"ENOPROGRESS silenced by localExec: In a remote worker, this slice would be stopped for not calling progress frequently enough.\");\n }\n\n this.emit('reject', new NoProgressError(`No progress event was received in the last ${dcpConfig.supervisor.sandbox.progressTimeout / 1000} seconds.`));\n }, +dcpConfig.supervisor.sandbox.progressTimeout * timeDilation);\n }\n\n resetSliceTimeout() {\n if (this.sliceTimeout) dcp_timers.clearTimeout(this.sliceTimeout);\n\n this.sliceTimeout = dcp_timers.setTimeout(() => {\n if (Sandbox.debugWork) return console.warn(\"Sandbox.debugWork: Ignoring slice timeout\");\n\n this.emit('reject', new SliceTooSlowError(`Slice took longer than ${dcpConfig.supervisor.sandbox.sliceTimeout / 1000} seconds.`));\n }, +dcpConfig.supervisor.sandbox.sliceTimeout * timeDilation);\n }\n \n async handleRing0Message(data) {\n debugging('event:ring-0') && debug('event:ring-0', data);\n //handling a true ring 0 message\n switch (data.request) {\n case 'scriptLoaded':\n if(data.result !== \"success\") {\n this.onerror(data);\n }\n break;\n case 'error':\n // Warning: rejecting here with just event.data.error causes issues\n // where the reject handlers modify the object so it interferes with the\n // workEmit event payload, wrapping in an Error instance copies the values\n let e = new Error(\n data.error.message,\n data.error.fileName,\n data.error.lineNumber);\n e.stack = data.error.stack;\n e.name = data.error.name;\n \n if (this.listenerCount('reject') > 0) {\n this.emit('reject', e);\n } else {\n // This will happen if the error is thrown during initialization\n throw e;\n }\n\n break;\n default:\n let error = new Error('Received unhandled request from sandbox: ' + data.request + '\\n\\t' + JSON.stringify(data));\n console.error(error);\n break; \n }\n }\n\n async handleRing1Message(data) {\n switch (data.request) {\n case 'applyRequirementsDone':\n // emit internally\n this.emit(data.request, data)\n break;\n default:\n let error = new Error('Received unhandled request from sandbox ring 1: ' + data.request + '\\n\\t' + JSON.stringify(data));\n console.error(error)\n break; \n }\n }\n\n async handleRing2Message(data) {\n debugging('event:ring-2') && debug('event:ring-2', data);\n switch (data.request) {\n case 'dependency': {\n let moduleData;\n try {\n moduleData = await this.supervisorCache.fetchModule(data.data);\n } catch (error) {\n /*\n * In the event of an error here, we want to let the client know there was a problem in\n * loading their module. However, there hasn't yet been an actual slice assigned to the sandbox.\n * Therefore, we assign 'slice 0' to the sandbox, a slice that will never exist, and is used\n * purely for this purpose. \n */\n if (!this.slice)\n this.slice = {\n jobAddress: this.jobAddress,\n sliceNumber: 0,\n };\n\n const payload = {\n name: error.name,\n timestamp: error.timestamp,\n message: error.message,\n };\n\n this.emit('workEmit', {\n eventName: 'error',\n payload,\n });\n this.emit('reject', error);\n break;\n }\n this.evaluatorHandle.postMessage({\n request: 'moduleGroup',\n data: moduleData,\n id: data.id,\n });\n break;\n }\n case 'error':\n /*\n * Ring 2 error messages will only fire for problems inside of the worker that are separate from\n * the work function. In most cases there are other handlers for situations where 'error' may be emitted\n * such as timeouts if the expected message isn't recieved. Thus, we will output the error, but nothing else.\n */\n console.error(data.error);\n break;\n case 'describe':\n case 'evalResult':\n case 'resetStateDone':\n case 'assigned':\n // emit internally\n this.emit(data.request, data);\n break;\n case 'reject':\n // emit internally\n this.emit(data.request, data.error);\n break;\n default: {\n const error = new Error(\n `Received unhandled request from sandbox ring 2. Data: ${JSON.stringify(\n data,\n null,\n 2,\n )}`,\n );\n console.error(error);\n break;\n }\n }\n }\n\n async handleRing3Message(data) {\n debugging('sandbox-ring-3') && console.debug('sandbox-ring-3 data:', data);\n switch (data.request) {\n case 'complete':\n dcp_timers.clearTimeout(this.progressTimeout);\n dcp_timers.clearTimeout(this.sliceTimeout);\n this.progressTimeout = this.sliceTimeout = null;\n\n if (this.progress === null) {\n if (this.options.ignoreNoProgress) {\n console.warn(\"ENOPROGRESS silenced by localExec: Progress was not called during this slice's execution, in a remote sandbox this would cause the slice to be failed\");\n } else {\n // If a progress update was never received (progress === null) then reject\n this.emit('reject', new NoProgressError('Sandbox never emitted a progress event.'));\n this.rejectionData.timeReport = this.sliceTimeReport;\n this.emit('rejectedWorkMetrics', this.rejectionData);\n delete this.rejectionData; \n break;\n }\n }\n \n this.progress = 100;\n this.completeData = data;\n\n this.completeData.timeReport = this.sliceTimeReport;\n this.emit('resolve', this.completeData);\n delete this.completeData;\n break;\n case 'progress':\n let { progress, indeterminate, throttledReports, value } = data;\n this.progress = progress;\n const progressReport = {\n timestamp: Date.now() - this.sliceStartTime,\n progress,\n value,\n throttledReports,\n }\n this.progressReports.last = progressReport;\n if (!indeterminate) {\n this.progressReports.lastDeterministic = progressReport;\n }\n\n this.resetProgressTimeout();\n \n this.sandboxHandle.emit('progress', data.indeterminate || data.progress < 0 || data.progress > 100 ? undefined : data.progress);\n break;\n\n case 'noProgress':\n let { message } = data;\n\n this.emit('reject', new NoProgressError(message));\n break;\n case 'console':\n data.payload.message = kvin.marshal(data.payload.message); \n this.emit('workEmit', {\n eventName: 'console',\n payload: data.payload, \n });\n break;\n\n case 'emitEvent':/* ad-hoc event from the sandbox (work.emit) */\n this.emit('workEmit', {\n eventName: 'custom',\n payload: data.payload\n })\n break;\n case 'measurement':\n this.updateTime(data);\n break;\n case 'sandboxError': /* the sandbox itself has an error condition */\n {\n this.emit('sandboxError', data.error);\n break;\n }\n case 'workError': { /* the work function threw/rejected */\n this.emit('workEmit', {\n eventName: 'error',\n payload: data.error,\n });\n\n // Warning: rejecting here with just .data.error causes issues\n // where the reject handlers modify the object so it interferes with the\n // workEmit payload, wrapping in an Error instance copies the values\n const wrappedError = new UncaughtExceptionError(\n data.error.message,\n data.error.fileName,\n data.error.lineNumber,\n );\n wrappedError.stack = data.error.stack;\n wrappedError.name = data.error.name;\n\n if (this.listenerCount('reject') > 0) {\n this.emit('reject', wrappedError);\n } else {\n // This will happen if the error is thrown during initialization\n throw wrappedError;\n }\n break;\n }\n default:\n let error = new Error('Received unhandled request from sandbox ring 3: ' + data.request + '\\n\\t' + JSON.stringify(data));\n console.error(error)\n break; \n }\n }\n\n /**\n * Handles progress and completion events from sandbox.\n * Unless explicitly returned out of this function will re-emit the event\n * where the name of the event is event.data.request.\n *\n * @param {object} event - event received from the sandbox\n */\n async onmessage(event) {\n debugging('event') && debug('event', event);\n if (Sandbox.debugEvents) {\n console.debug('sandbox - eventDebug:', {\n id: this.id,\n state: this.state,\n event: JSON.stringify(event)\n })\n }\n\n const { data } = event;\n const ringLevel = data.ringSource\n\n // Give the data to a handler depending on ring level\n if (ringLevel === -1) {\n console.error('Message sent directly from raw postMessage. Terminating worker...');\n console.debug(event);\n return this.terminate(true);\n } else {\n const handler = this.ringMessageHandlers[ringLevel];\n if (handler) {\n handler.call(this, data.value);\n } else {\n console.warn(`No handler defined for message from ring ${ringLevel}`);\n console.debug(event);\n }\n }\n }\n\n /**\n * Error handler for the internal sandbox.\n * Currently just logs the errors that the sandbox spits out.\n */\n onerror(event) {\n console.error('Sandbox emitted an error:', event);\n this.terminate(true, true);\n }\n\n /**\n * Clears the timeout and terminates the sandbox and sometimes emits a reject event.\n *\n * @param {boolean} [reject = true] - if true emit reject event\n * @param {boolean} [immediate = false] - passed to terminate, used by standaloneWorker to immediately close the connection\n */\n terminate (reject = true, immediate = false) {\n debugging() && console.debug(`Terminate sandbox ${this.identifier}`);\n const oldState = this.state;\n this.state = TERMINATED;\n \n dcp_timers.clearTimeout(this.progressTimeout);\n dcp_timers.clearTimeout(this.sliceTimeout);\n this.progressTimeout = this.sliceTimeout = null;\n \n if (this.evaluatorHandle && typeof this.evaluatorHandle.terminate === 'function') {\n try {\n this.evaluatorHandle.terminate(immediate);\n this.evaluatorHandle = null;\n } catch (e) {\n console.error(`Error terminating sandbox ${this.id} (${oldState}):`, e);\n } finally {\n this.emit('terminate', this);\n }\n }\n\n if (reject) {\n this.emit('reject', new Error(`Sandbox ${this.id} (${oldState}) was terminated.`));\n }\n\n this.sandboxHandle.emit('end');\n }\n\n /**\n * Attempts to stop the sandbox from doing completing its current\n * set of work without terminating the working.\n */\n stop () {\n throw new Error('Sandbox.stop is not yet implemented.')\n }\n\n /**\n * ringNPostMessage can send a `measurement` request and update these\n * totals.\n */\n updateTime (measurementEvent) {\n ['total', 'CPU', 'webGL', 'webGPU'].forEach((key) => {\n if (measurementEvent[key]) this.sliceTimeReport[key] += measurementEvent[key];\n })\n }\n\n resetSliceTimeReport () {\n this.sliceTimeReport = {\n total: 0,\n CPU: 0,\n webGL: 0,\n webGPU: 0,\n }\n }\n}\n\nSandbox.idCounter = 1;\nSandbox.debugWork = false;\nSandbox.debugState = false;\nSandbox.debugEvents = false;\n\nexports.Sandbox = Sandbox;\nexports.SandboxError = SandboxError;\nexports.NoProgressError = NoProgressError;\nexports.SliceTooSlowError = SliceTooSlowError;\nexports.UncaughtExceptionError = UncaughtExceptionError;\nexports.RemoteFetchError = RemoteFetchError;\n\n\n//# sourceURL=webpack://dcp/./src/dcp-client/worker/sandbox.js?");
4629
+ eval("// NOTE - need timeout/postmessage function\n/**\n * @file dcp-client/worker/sandbox.js\n *\n * A sandbox that when constructed and assigned can do work for\n * a distributed slice. A sandbox runs for a single slice at a time.\n *\n * Usage:\n * let sandbox = new Sandbox()\n * await sandbox.start()\n * let results = await sandbox.run(slice)\n *\n * Debug flags:\n * Sandbox.debugWork = true // - turns off 30 second timeout to let user debug sandbox innards more easily\n * Sandbox.debugState = true // - logs all state transitions for this sandbox\n * Sandbox.debugEvents = true // - logs all events received from the sandbox\n *\n * @author Matthew Palma, mpalma@kingsds.network\n * Ryan Rossiter, ryan@kingsds.network\n * @date May 2019\n * @module sandbox\n */\n/* global dcpConfig */\n// @ts-check\n\n\nconst debugging = (__webpack_require__(/*! dcp/debugging */ \"./src/debugging.js\").scope)('worker');\nconst { assert, assertEq3 } = __webpack_require__(/*! dcp/common/dcp-assert */ \"./src/common/dcp-assert.js\");\nconst DCP_ENV = __webpack_require__(/*! dcp/common/dcp-env */ \"./src/common/dcp-env.js\");\nconst { rehydrateRange } = __webpack_require__(/*! dcp/dcp-client/range-object */ \"./src/dcp-client/range-object.js\");\nconst { allowOriginsPurposes } = __webpack_require__(/*! dcp/common/worker-constants */ \"./src/common/worker-constants.js\");\nconst kvin = __webpack_require__(/*! kvin */ \"./node_modules/kvin/kvin.js\");\n\nlet timeDilation = 1;\nif (DCP_ENV.platform === 'nodejs') {\n const { requireNative } = __webpack_require__(/*! dcp/dcp-client/webpack-native-bridge */ \"./src/dcp-client/webpack-native-bridge.js\");\n /** Make timers 10x slower when running in niim */\n timeDilation = (requireNative('module')._cache.niim instanceof requireNative('module').Module) ? 10 : 1;\n}\n\n/**\n * Wraps console.debug to emulate debug module prefixing messages on npm.\n * @param {...any} args\n */\nconst debug = (...args) => {\n if (debugging()) {\n console.debug('dcp-client:worker:sandbox', ...args);\n }\n};\n\nconst nanoid = (__webpack_require__(/*! nanoid */ \"./node_modules/nanoid/index.browser.js\").nanoid);\n\nconst EventEmitter = __webpack_require__(/*! events */ \"./node_modules/events/events.js\");\nconst { fetchURI, encodeDataURI } = __webpack_require__(/*! dcp/utils */ \"./src/utils/index.js\");\nconst dcp_timers = __webpack_require__(/*! dcp/common/dcp-timers */ \"./src/common/dcp-timers.js\");\n\n// Sandbox states\nconst UNREADY = 'UNREADY' // No Sandbox (web worker, saworker, etc) has been constructed yet\nconst READYING = 'READYING' // Sandbox is being constructed and environment (bravojs, env) is being set up\nconst READY_FOR_ASSIGN = 'READY_FOR_ASSIGN' // Sandbox is ready to be assigned\nconst ASSIGNING = 'ASSIGNING' // Sandbox is running through assigning steps\nconst ASSIGNED = 'ASSIGNED' // Sandbox is assigned but not working\nconst WORKING = 'WORKING' // Sandbox is working\nconst TERMINATED = 'TERMINATED'\nconst EVAL_RESULT_PREFIX = 'evalResult::';\n\nclass SandboxError extends Error {}\nclass NoProgressError extends SandboxError { constructor(...args) { super(...args); this.errorCode = 'ENOPROGRESS'; } }\nclass SliceTooSlowError extends SandboxError { constructor(...args) { super(...args); this.errorCode = 'ESLICETOOSLOW'; } }\nclass UncaughtExceptionError extends SandboxError { constructor(...args) { super(...args); this.errorCode = 'EUNCAUGHT'; } }\nclass RemoteFetchError extends SandboxError { constructor(...args) { super(...args); this.errorCode = 'EPERM_ORIGIN'; }}\n\n/** @typedef {string} opaqueId */ // 22 character base64 string\n/** @typedef {import('./slice').Slice} Slice */\n/** @typedef {import('./supervisor-cache').SupervisorCache} SupervisorCache */\n/** @typedef {import('dcp/utils/jsdoc-types').SandboxOptions} SandboxOptions */\n\n/**\n * Public event emitter.\n * https://gitlab.com/Distributed-Compute-Protocol/dcp-docs-wes/-/blob/wip/worker/worker-events.md\n */\nclass SandboxHandle extends EventEmitter\n{\n /** @type {{ id: number, public: { name: string, description: string, link: string }, jobAddress: string, slice: Slice }} */\n #info;\n\n /**\n * @constructor\n * @param {Sandbox} sandbox\n */\n constructor (sandbox)\n {\n super({ captureRejections: false });\n this.#info = sandbox.info;\n }\n /** @type {number} */\n get id () { return this.#info.id; }\n /** @type {{ name: string, description: string, link: string }} */\n get public () { return this.#info.public ?? { name: '<unassigned>', description: '', link: '' }; }\n /** @type {string} */\n get jobAddress () { return this.#info.jobAddress; }\n /** @type {number} */\n get sliceNumber () { return this.#info.slice?.sliceNumber ?? -1; }\n}\nexports.SandboxHandle = SandboxHandle;\n\nclass Sandbox extends EventEmitter {\n /**\n * A Sandbox (i.e. a worker sandbox) which executes distributed slices.\n *\n * @constructor\n * @param {object} supervisor the instance of Supervisor that owns this sandbox\n * @param {SandboxOptions} options\n */\n constructor (supervisor, options) {\n super({ captureRejections: false });\n\n this.supervisor = supervisor;\n /** @type {SupervisorCache} */\n this.supervisorCache = supervisor.cache;\n /** @type {SandboxOptions} */\n this.options = {\n ignoreNoProgress: false,\n ...options,\n SandboxConstructor: options.SandboxConstructor ||\n (__webpack_require__(/*! ./evaluators */ \"./src/dcp-client/worker/evaluators/index.js\").BrowserEvaluator),\n }\n\n /** @type {object} */\n this.evaluatorHandle = null;\n /** @type {object} */\n this.capabilities = null;\n\n /** @type {string} */\n this._state = UNREADY;\n /** @type {boolean} */\n this.allocated = false;\n /** @type {number?} */\n this.progress = 100;\n /** @type {object} */\n this.progressReports = null;\n /** @type {object} */\n this.progressTimeout = null;\n /** @type {object} */\n this.sliceTimeout = null;\n /** @type {object} */\n this.rejectionData = null;\n\n /** @type {number?} */\n this.started = null;\n /** @type {number?} */\n this.sliceStartTime = null;\n /** @type {boolean} */\n this.requiresGPU = false;\n /** @type {string|URL} */\n this.packageURL = dcpConfig.packageManager.location\n\n /** @type {{ id: number, public: { name: string, description: string, link: string }, jobAddress: string, slice: Slice }} */\n this.info = {\n id: Sandbox.getNewId(),\n public: null,\n jobAddress: null,\n slice: null,\n };\n\n /**\n * Event emitter containing info that describes the sandbox.\n * @type {SandboxHandle}\n */\n this.sandboxHandle = new SandboxHandle(this);\n\n this.ringMessageHandlers = [\n this.handleRing0Message,\n this.handleRing1Message,\n this.handleRing2Message,\n this.handleRing3Message,\n ];\n\n this.resetSliceTimeReport();\n }\n\n get identifier() {\n if (this.allocated)\n return `${this.id}.${this.jobAddress}.${this.state}.${this.allocated}`;\n return `${this.id}.${this.jobAddress}.${this.state}`;\n }\n\n static getNewId() {\n return Sandbox.idCounter++;\n }\n\n /** @type {number} */\n get id () { return this.info.id; }\n /** @type {{ name: string, description: string, link: string }} */\n get public () { return this.info.public; }\n /** @type {{ name: string, description: string, link: string }} */\n set public (data) { this.info.public = data; }\n /** @type {string} */\n get jobAddress () { return this.info.jobAddress; }\n /** @type {string} */\n set jobAddress (address) { this.info.jobAddress = address; }\n /** @type {Slice} */\n get slice () { return this.info.slice; }\n /** @type {Slice} */\n set slice (slice) { this.info.slice = slice; }\n /** @type {number} */\n get sliceNumber () { return this.slice ? this.slice.sliceNumber : -1; }\n\n get state () {\n return this._state\n }\n\n set state (value) {\n if (Sandbox.debugState) {\n console.debug(`sandbox - changing state of ${this.id}... ${this._state} -> ${value}`)\n }\n\n if (this.state === TERMINATED && value !== TERMINATED) {\n // For safety!\n throw new Error(`Sandbox set state violation, attepted to change state from ${this.state} to ${value}`);\n }\n\n this._state = value;\n }\n\n get isReadyForAssign () {\n return this.state === READY_FOR_ASSIGN;\n }\n\n get isAssigned () {\n return this.state === ASSIGNED;\n }\n\n get isWorking () {\n return this.state === WORKING;\n }\n\n get isTerminated () {\n return this.state === TERMINATED;\n }\n\n changeWorkingToAssigned () {\n if (this.isWorking) {\n this.state = ASSIGNED;\n this.sandboxHandle.emit('ready');\n }\n }\n\n setIsAssigning () {\n this.state = ASSIGNING;\n }\n\n /**\n * Readies the sandbox. This will result in the sandbox being ready and not assigned,\n * it will need to be assigned with a job before it is able to do work.\n *\n * @todo maybe preload specific modules or let the cache pass in what modules to load?\n * @throws on failure to ready\n */\n async start(delay = 0) {\n this.started = Date.now();\n this.state = READYING;\n\n if (delay > 0) await new Promise((resolve) => dcp_timers.setTimeout(resolve, delay * timeDilation));\n\n try {\n // RING 0\n this.evaluatorHandle = new this.options.SandboxConstructor({\n name: `DCP Sandbox #${this.id}`,\n });\n this.evaluatorHandle.onerror = this.onerror.bind(this);\n\n const messageHandler = this.onmessage.bind(this);\n this.evaluatorHandle.onmessage = function onmessage(event)\n {\n let data;\n if (event.data.serialized)\n {\n data = kvin.parse(event.data.message);\n }\n else\n {\n data = kvin.unmarshal(event.data);\n }\n messageHandler({ data });\n }\n\n const evaluatorPostMessage = this.evaluatorHandle.postMessage.bind(this.evaluatorHandle);\n this.evaluatorHandle.postMessage = function postMessage(message)\n {\n evaluatorPostMessage(kvin.marshal(message));\n }\n\n const ceci = this;\n this.evaluatorHandle.addEventListener('end', () => {\n debugging() && console.debug('Sandbox evaluatorHandle-end-handler:', this.identifier);\n ceci.terminate(true)\n });\n\n // Don't let an open sockets prevent clean worker exit.\n if (this.evaluatorHandle.unref)\n this.evaluatorHandle.unref();\n\n // Now in RING 1\n\n // Now in RING 2\n await this.describe();\n this.state = READY_FOR_ASSIGN;\n } catch (error) {\n console.warn('Failed to start the sandbox -', error.message);\n this.terminate(false);\n throw error;\n }\n }\n\n /**\n * This will assign the sandbox with a job, loading its sandbox code\n * into the sandbox.\n *\n * @param {string} jobAddress The address of the job to assign to\n * @throws on initialization failure\n */\n async assign(jobAddress) {\n this.jobAddress = jobAddress;\n this.job = await this.supervisorCache.fetchJob(jobAddress, this.supervisor.originManager);\n /* At this point, the worker has decided that this sandbox will be associated with a specific job. \n Therefore, we emit the SandboxHandle<job> event. */\n this.sandboxHandle.emit('job', this.supervisor.jobHandles[this.jobAddress]);\n\n assertEq3(this.job.address, jobAddress);\n assert(typeof this.job === 'object');\n assert(typeof this.job.requirements === 'object');\n assert(Array.isArray(this.job.dependencies));\n assert(Array.isArray(this.job.requirePath));\n\n // Extract public data from job, with defaults\n this.public = Object.assign({\n name: `Anonymous Job ${this.job.address.slice(0, 6)}`,\n description: 'Discreetly helping make the world smarter.',\n link: 'https://distributed.computer/about',\n }, this.job.public);\n\n // Future: We may want other filename tags for appliances // RR Nov 2019\n\n // Important: The order of applying requirements before loading the sandbox code\n // is important for modules and sandbox code to set globals over the whitelist.\n await this.applySandboxRequirements(this.job.requirements);\n await this.assignEvaluator();\n this.state = ASSIGNED;\n this.sandboxHandle.emit('ready');\n }\n\n async assignEvaluator() {\n debug('Begin assigning job to evaluator');\n const ceci = this;\n\n return new Promise(function sandbox$$assignEvaluatorPromise(resolve, reject) {\n const message = {\n request: 'assign',\n job: ceci.job,\n sandboxConfig: dcpConfig.supervisor.sandbox,\n };\n\n const onSuccess = (event) => {\n // eslint-disable-next-line no-use-before-define\n ceci.removeListener('reject', onFail);\n ceci.emit('assigned', event.jobAddress);\n debug('Job assigned to evaluator');\n resolve();\n };\n\n const onFail = (error) => {\n // eslint-disable-next-line no-use-before-define\n ceci.removeListener('assigned', onSuccess);\n reject(error);\n };\n\n ceci.once('assigned', onSuccess);\n ceci.once('reject', onFail);\n\n ceci.evaluatorHandle.postMessage(message);\n });\n }\n\n /**\n * Evaluates a string inside the sandbox.\n *\n * @param {string} code - the code to evaluate in the sandbox\n * @param {string} filename - the name of the 'file' to help with debugging,\n * no longer working though?\n * @returns {Promise} - resolves with eval result on success, rejects\n * otherwise\n */\n eval(code, filename) {\n var ceci = this;\n \n return new Promise(function sandbox$$eval$Promise(resolve, reject) {\n let msgId = nanoid();\n let msg = {\n request: 'eval',\n data: code,\n filename,\n msgId, \n }\n\n const eventId = EVAL_RESULT_PREFIX + msgId;\n\n let onSuccess = (event) => {\n ceci.removeListener('reject', onFail)\n resolve(event)\n }\n\n let onFail = (event) => {\n ceci.removeListener(eventId, onSuccess)\n reject(event)\n }\n\n ceci.once(eventId, onSuccess);\n ceci.once('reject', onFail)\n\n ceci.evaluatorHandle.postMessage(msg)\n })\n }\n\n /**\n * Resets the state of the bootstrap, without resetting the sandbox function if assigned.\n * Mostly used to reset the progress status before reusing a sandbox on another slice.\n * Must be called after @start.\n *\n * @returns {Promise} - resolves with result on success, rejects otherwise\n */\n resetSandboxState () {\n var ceci = this;\n\n return new Promise(function sandbox$resetSandboxStatePromise(resolve, reject) {\n let failTimeout;\n let msg = {\n request: 'resetState',\n };\n\n function sandbox$resetSandboxState$success () {\n if (failTimeout === false)\n return; /* already rejected */\n dcp_timers.clearTimeout(failTimeout);\n failTimeout = false;\n resolve();\n }\n\n ceci.once('resetStateDone', sandbox$resetSandboxState$success);\n\n failTimeout = dcp_timers.setTimeout(function sandbox$resetSandboxState$fail() {\n if (failTimeout === false)\n return; /* already resolved */\n \n ceci.removeListener('resetStateDone', sandbox$resetSandboxState$success);\n ceci.terminate(false);\n failTimeout = false;\n\n reject(new Error('resetState never received resetStateDone event from sandbox'));\n }, 3000 * timeDilation); /* XXXwg need tuneable */\n\n assert(ceci.evaluatorHandle); // It is possible that ceci.terminate nulls out evaluatorHandle before getting here.\n ceci.evaluatorHandle.postMessage(msg);\n });\n }\n\n /**\n * Sends a post message to describe its capabilities.\n *\n * Side effect: Sets the capabilities property of the current sandbox.\n *\n * @returns {Promise} Resolves with the sandbox's capabilities. Rejects with\n * an error saying a response was not received.\n * @memberof Sandbox\n */\n describe() {\n debug('Beginning to describe evaluator');\n var ceci = this;\n \n return new Promise(function sandbox$describePromise(resolve, reject) {\n if (ceci.evaluatorHandle === null) {\n return reject(new Error('Evaluator has not been initialized.'));\n }\n\n /**\n * Opted to create a flag for the describe response being received so that\n * we don't have to *hoist* the timeout's id to clear it in the response\n * handler.\n */\n let didReceiveDescribeResponse = false;\n const describeResponseHandler = (data) => {\n didReceiveDescribeResponse = true;\n const { capabilities } = data;\n if (typeof capabilities === 'undefined') {\n reject(\n new Error('Did not receive capabilities from describe response.'),\n );\n }\n ceci.capabilities = capabilities;\n debug('Evaluator has been described');\n resolve(capabilities);\n };\n ceci.once('describe', describeResponseHandler); \n\n const describeResponseFailedHandler = () => {\n if (!didReceiveDescribeResponse) {\n ceci.removeListener('describe', describeResponseHandler);\n ceci.terminate(false);\n reject(\n new Error(\n 'Describe message timed-out. No describe response was received from the describe command.',\n ),\n );\n }\n };\n\n const message = {\n request: 'describe',\n };\n\n // Arbitrarily set the waiting time.\n dcp_timers.setTimeout(describeResponseFailedHandler, 6000 * timeDilation); /* XXXwg need tuneable */\n assert(ceci.evaluatorHandle); // It is possible that ceci.terminate nulls out evaluatorHandle before getting here.\n ceci.evaluatorHandle.postMessage(message);\n });\n }\n\n /**\n * Passes the job's requirements object into the sandbox so that the global\n * access lists can be updated accordingly.\n *\n * e.g. disallow access to OffscreenCanvas without\n * environment.offscreenCanvas=true present.\n *\n * Must be called after @start.\n *\n * @returns {Promise} - resolves with result on success, rejects otherwise\n */\n applySandboxRequirements(requirements) {\n var ceci = this;\n \n return new Promise(function sandbox$applySandboxRequirementsPromise(resolve, reject) {\n const message = {\n requirements,\n request: 'applyRequirements',\n };\n let wereRequirementsApplied = false;\n\n function sandbox$applyRequirements$success() {\n wereRequirementsApplied = true;\n resolve();\n };\n\n ceci.once('applyRequirementsDone', sandbox$applyRequirements$success);\n\n assert(typeof message.requirements === 'object');\n ceci.evaluatorHandle.postMessage(message);\n\n dcp_timers.setTimeout(function sandbox$finishApplySandboxRequirements() {\n if (!wereRequirementsApplied) {\n ceci.removeListener('applyRequirementsDone', sandbox$applyRequirements$success);\n ceci.terminate(false);\n reject(\n new Error(\n 'applyRequirements never received applyRequirementsDone response from sandbox',\n ),\n );\n }\n }, 3000 * timeDilation); /* XXXwg needs tunable */\n });\n }\n\n /**\n * Executes a slice received from the supervisor.\n * Must be called after @start.\n *\n * @param {Slice} slice - bare minimum data required for the job/job code to be executed on\n * @param {number} [delay = 0] the delay that this method should wait before beginning work, used to avoid starting all sandboxes at once\n *\n * @returns {Promise} - resolves with result on success, rejects otherwise\n */\n\n async work (slice, delay = 0) {\n var ceci = this;\n\n if (!ceci.isAssigned) {\n throw new Error(\"Sandbox.run: Sandbox is not ready to work, state=\" + ceci.state);\n }\n\n ceci.state = WORKING;\n ceci.slice = slice;\n assert(slice);\n\n this.sandboxHandle.emit('slice', slice.sliceNumber);\n\n // cf. DCP-1720\n this.resetSliceTimeReport();\n \n // Now wait for the delay if provided, prevents many sandboxes starting at once from crashing the supervisor\n if (delay > 0) await new Promise(resolve => dcp_timers.setTimeout(resolve, (delay + 1) * timeDilation));\n if (!ceci.isWorking) return; // sandbox.terminate could have been called during the delay timeout\n\n // Prepare the sandbox to begin work\n // will be replaced by `assign` message that should be called before emitting a `work` message\n if (ceci.jobAddress !== slice.jobAddress) {\n throw new Error(`Sandbox.run: Sandbox is already assigned and jobAddress doesn't match previous (${ceci.jobAddress} !== ${slice.jobAddress})`);\n }\n\n let sliceHnd = { job: ceci.public, sandbox: ceci };\n await ceci.resetSandboxState();\n if (!ceci.slice) {\n console.error(`Slice for job ${ceci.jobAddress} vanished during work initialization - aborting`);\n return;\n }\n\n let inputDatum;\n let dataError = false;\n try {\n if (ceci.slice.datumUri)\n inputDatum = await fetchURI(ceci.slice.datumUri, this.supervisor.originManager, allowOriginsPurposes.fetchData);\n else {\n let { mro } = await ceci.supervisorCache.fetchJob(\n ceci.jobAddress,\n this.supervisor.originManager\n );\n const ro = rehydrateRange(mro);\n // -1 to prevent an OBOE since slice numbers start at 1.\n inputDatum = ro[ceci.slice.sliceNumber - 1];\n }\n } catch (err) {\n dataError = err;\n if(err.code === 'EPERM_ORIGIN')\n dataError.errorCode = 'EPERM_ORIGIN'\n else\n dataError.errorCode = 'EUNCAUGHTERROR'\n ceci.emit('workEmit', {\n eventName: 'error',\n payload: {\n message: dataError.message,\n stack:dataError.stack,\n name: ceci.public.name\n }\n });\n }\n\n debugging('sandbox') && debug(`Fetched datum: ${inputDatum}`);\n\n if (!ceci.slice) {\n console.error(`Slice for job ${ceci.jobAddress} vanished after data fetch - aborting`);\n return;\n }\n\n ceci.resetProgressTimeout();\n ceci.resetSliceTimeout();\n\n return new Promise(function sandbox$$workPromise(resolve, reject) {\n function sandbox$$work$onSuccess (event) {\n ceci.removeListener('reject', sandbox$$work$onFail)\n resolve(event)\n };\n\n function sandbox$$work$onFail (err) {\n ceci.removeListener('resolve', sandbox$$work$onSuccess)\n reject(err)\n };\n\n ceci.once('resolve', sandbox$$work$onSuccess);\n\n ceci.once('reject', sandbox$$work$onFail);\n\n ceci.sliceStartTime = Date.now();\n ceci.progress = null;\n ceci.progressReports = {\n last: undefined,\n lastDeterministic: undefined,\n };\n\n ceci.resetProgressTimeout();\n ceci.resetSliceTimeout();\n ceci.emit('start', sliceHnd);\n \n if(dataError){\n ceci.removeListener('resolve', sandbox$$work$onSuccess);\n ceci.removeListener('reject', sandbox$$work$onFail);\n dcp_timers.setTimeout(() => reject(dataError), 0)\n\n } else {\n ceci.evaluatorHandle.postMessage({\n request: 'main',\n data: inputDatum,\n })\n }\n })\n .then(async function sandbox$$work$then(event) {\n debugging() && console.debug('Sandbox: sliceFinish', ceci.jobAddress, ceci.slice?.sliceNumber, event?.timeReport);\n ceci.sandboxHandle.emit('sliceEnd', ceci.slice?.sliceNumber); \n ceci.emit('complete', ceci.jobAddress);\n\n ceci.changeWorkingToAssigned();\n ceci.slice = false;\n return event;\n })\n .catch((err) => { \n if (err.name === 'EWORKREJECT') {\n this.rejectionData = err;\n this.rejectionData.timeReport = this.sliceTimeReport;\n this.emit('rejectedWorkMetrics', this.rejectionData);\n delete this.rejectionData;\n } else { // sandbox termination for rejected work happens in Supervisor.handleRejectedWork\n // Ceci is the reject callback for when the slice throws an error\n ceci.terminate(false);\n }\n\n ceci.supervisor.worker.emit('error', err, 'slice');\n\n if (err instanceof NoProgressError) {\n ceci.emit('workEmit', {\n eventName: 'noProgress',\n payload: {\n timestamp: Date.now() - ceci.sliceStartTime,\n data: ceci.slice.datumUri,\n progressReports: ceci.progressReports,\n }\n });\n }\n throw err;\n });\n }\n\n resetProgressTimeout() {\n if (this.progressTimeout) {\n dcp_timers.clearTimeout(this.progressTimeout);\n this.progressTimeout = null;\n }\n\n this.progressTimeout = dcp_timers.setTimeout(() => {\n if (this.options.ignoreNoProgress) {\n return console.warn(\"ENOPROGRESS silenced by localExec: In a remote worker, this slice would be stopped for not calling progress frequently enough.\");\n }\n\n this.emit('reject', new NoProgressError(`No progress event was received in the last ${dcpConfig.supervisor.sandbox.progressTimeout / 1000} seconds.`));\n }, +dcpConfig.supervisor.sandbox.progressTimeout * timeDilation);\n }\n\n resetSliceTimeout() {\n if (this.sliceTimeout) dcp_timers.clearTimeout(this.sliceTimeout);\n\n this.sliceTimeout = dcp_timers.setTimeout(() => {\n if (Sandbox.debugWork) return console.warn(\"Sandbox.debugWork: Ignoring slice timeout\");\n\n this.emit('reject', new SliceTooSlowError(`Slice took longer than ${dcpConfig.supervisor.sandbox.sliceTimeout / 1000} seconds.`));\n }, +dcpConfig.supervisor.sandbox.sliceTimeout * timeDilation);\n }\n \n async handleRing0Message(data) {\n debugging('event:ring-0') && debug('event:ring-0', data);\n //handling a true ring 0 message\n switch (data.request) {\n case 'scriptLoaded':\n if(data.result !== \"success\") {\n this.onerror(data);\n }\n break;\n case 'error':\n // Warning: rejecting here with just event.data.error causes issues\n // where the reject handlers modify the object so it interferes with the\n // workEmit event payload, wrapping in an Error instance copies the values\n let e = new Error(\n data.error.message,\n data.error.fileName,\n data.error.lineNumber);\n e.stack = data.error.stack;\n e.name = data.error.name;\n \n if (this.listenerCount('reject') > 0) {\n this.emit('reject', e);\n } else {\n // This will happen if the error is thrown during initialization\n throw e;\n }\n\n break;\n default:\n let error = new Error('Received unhandled request from sandbox: ' + data.request + '\\n\\t' + JSON.stringify(data));\n console.error(error);\n break; \n }\n }\n\n async handleRing1Message(data) {\n switch (data.request) {\n case 'applyRequirementsDone':\n // emit internally\n this.emit(data.request, data)\n break;\n default:\n let error = new Error('Received unhandled request from sandbox ring 1: ' + data.request + '\\n\\t' + JSON.stringify(data));\n console.error(error)\n break; \n }\n }\n\n async handleRing2Message(data) {\n debugging('event:ring-2') && debug('event:ring-2', data);\n switch (data.request) {\n case 'dependency': {\n let moduleData;\n try {\n moduleData = await this.supervisorCache.fetchModule(data.data);\n } catch (error) {\n /*\n * In the event of an error here, we want to let the client know there was a problem in\n * loading their module. However, there hasn't yet been an actual slice assigned to the sandbox.\n * Therefore, we assign 'slice 0' to the sandbox, a slice that will never exist, and is used\n * purely for this purpose. \n */\n if (!this.slice)\n this.slice = {\n jobAddress: this.jobAddress,\n sliceNumber: 0,\n };\n\n const payload = {\n name: error.name,\n timestamp: error.timestamp,\n message: error.message,\n };\n\n this.emit('workEmit', {\n eventName: 'error',\n payload,\n });\n this.emit('reject', error);\n break;\n }\n this.evaluatorHandle.postMessage({\n request: 'moduleGroup',\n data: moduleData,\n id: data.id,\n });\n break;\n }\n case 'error':\n /*\n * Ring 2 error messages will only fire for problems inside of the worker that are separate from\n * the work function. In most cases there are other handlers for situations where 'error' may be emitted\n * such as timeouts if the expected message isn't recieved. Thus, we will output the error, but nothing else.\n */\n console.error(data.error);\n break;\n case 'describe':\n case 'evalResult':\n case 'resetStateDone':\n case 'assigned':\n // emit internally\n this.emit(data.request, data);\n break;\n case 'reject':\n // emit internally\n this.emit(data.request, data.error);\n break;\n default: {\n const error = new Error(\n `Received unhandled request from sandbox ring 2. Data: ${JSON.stringify(\n data,\n null,\n 2,\n )}`,\n );\n console.error(error);\n break;\n }\n }\n }\n\n async handleRing3Message(data) {\n debugging('sandbox-ring-3') && console.debug('sandbox-ring-3 data:', data);\n switch (data.request) {\n case 'complete':\n dcp_timers.clearTimeout(this.progressTimeout);\n dcp_timers.clearTimeout(this.sliceTimeout);\n this.progressTimeout = this.sliceTimeout = null;\n\n if (this.progress === null) {\n if (this.options.ignoreNoProgress) {\n console.warn(\"ENOPROGRESS silenced by localExec: Progress was not called during this slice's execution, in a remote sandbox this would cause the slice to be failed\");\n } else {\n // If a progress update was never received (progress === null) then reject\n this.emit('reject', new NoProgressError('Sandbox never emitted a progress event.'));\n this.rejectionData.timeReport = this.sliceTimeReport;\n this.emit('rejectedWorkMetrics', this.rejectionData);\n delete this.rejectionData; \n break;\n }\n }\n \n this.progress = 100;\n this.completeData = data;\n\n this.completeData.timeReport = this.sliceTimeReport;\n this.emit('resolve', this.completeData);\n delete this.completeData;\n break;\n case 'progress':\n let { progress, indeterminate, throttledReports, value } = data;\n this.progress = progress;\n const progressReport = {\n timestamp: Date.now() - this.sliceStartTime,\n progress,\n value,\n throttledReports,\n }\n this.progressReports.last = progressReport;\n if (!indeterminate) {\n this.progressReports.lastDeterministic = progressReport;\n }\n\n this.resetProgressTimeout();\n \n this.sandboxHandle.emit('progress', data.indeterminate || data.progress < 0 || data.progress > 100 ? undefined : data.progress);\n break;\n\n case 'noProgress':\n let { message } = data;\n\n this.emit('reject', new NoProgressError(message));\n break;\n case 'console':\n data.payload.message = kvin.marshal(data.payload.message); \n this.emit('workEmit', {\n eventName: 'console',\n payload: data.payload, \n });\n break;\n\n case 'emitEvent':/* ad-hoc event from the sandbox (work.emit) */\n this.emit('workEmit', {\n eventName: 'custom',\n payload: data.payload\n })\n break;\n case 'measurement':\n this.updateTime(data);\n break;\n case 'sandboxError': /* the sandbox itself has an error condition */\n {\n this.emit('sandboxError', data.error);\n break;\n }\n case 'workError': { /* the work function threw/rejected */\n this.emit('workEmit', {\n eventName: 'error',\n payload: data.error,\n });\n\n // Warning: rejecting here with just .data.error causes issues\n // where the reject handlers modify the object so it interferes with the\n // workEmit payload, wrapping in an Error instance copies the values\n const wrappedError = new UncaughtExceptionError(\n data.error.message,\n data.error.fileName,\n data.error.lineNumber,\n );\n wrappedError.stack = data.error.stack;\n wrappedError.name = data.error.name;\n\n if (this.listenerCount('reject') > 0) {\n this.emit('reject', wrappedError);\n } else {\n // This will happen if the error is thrown during initialization\n throw wrappedError;\n }\n break;\n }\n default:\n let error = new Error('Received unhandled request from sandbox ring 3: ' + data.request + '\\n\\t' + JSON.stringify(data));\n console.error(error)\n break; \n }\n }\n\n /**\n * Handles progress and completion events from sandbox.\n * Unless explicitly returned out of this function will re-emit the event\n * where the name of the event is event.data.request.\n *\n * @param {object} event - event received from the sandbox\n */\n async onmessage(event) {\n debugging('event') && debug('event', event);\n if (Sandbox.debugEvents) {\n console.debug('sandbox - eventDebug:', {\n id: this.id,\n state: this.state,\n event: JSON.stringify(event)\n })\n }\n\n const { data } = event;\n const ringLevel = data.ringSource\n\n // Give the data to a handler depending on ring level\n if (ringLevel === -1) {\n console.error('Message sent directly from raw postMessage. Terminating worker...');\n console.debug(event);\n return this.terminate(true);\n } else {\n const handler = this.ringMessageHandlers[ringLevel];\n if (handler) {\n handler.call(this, data.value);\n } else {\n console.warn(`No handler defined for message from ring ${ringLevel}`);\n console.debug(event);\n }\n }\n }\n\n /**\n * Error handler for the internal sandbox.\n * Currently just logs the errors that the sandbox spits out.\n */\n onerror(event) {\n console.error('Sandbox emitted an error:', event);\n this.terminate(true, true);\n }\n\n /**\n * Clears the timeout and terminates the sandbox and sometimes emits a reject event.\n *\n * @param {boolean} [reject = true] - if true emit reject event\n * @param {boolean} [immediate = false] - passed to terminate, used by standaloneWorker to immediately close the connection\n */\n terminate (reject = true, immediate = false) {\n debugging() && console.debug(`Terminate sandbox ${this.identifier}`);\n const oldState = this.state;\n this.state = TERMINATED;\n \n dcp_timers.clearTimeout(this.progressTimeout);\n dcp_timers.clearTimeout(this.sliceTimeout);\n this.progressTimeout = this.sliceTimeout = null;\n \n if (this.evaluatorHandle && typeof this.evaluatorHandle.terminate === 'function') {\n try {\n this.evaluatorHandle.terminate(immediate);\n this.evaluatorHandle = null;\n } catch (e) {\n console.error(`Error terminating sandbox ${this.id} (${oldState}):`, e);\n } finally {\n this.emit('terminate', this);\n }\n }\n\n if (reject) {\n this.emit('reject', new Error(`Sandbox ${this.id} (${oldState}) was terminated.`));\n }\n\n this.sandboxHandle.emit('end');\n }\n\n /**\n * Attempts to stop the sandbox from doing completing its current\n * set of work without terminating the working.\n */\n stop () {\n throw new Error('Sandbox.stop is not yet implemented.')\n }\n\n /**\n * ringNPostMessage can send a `measurement` request and update these\n * totals.\n */\n updateTime (measurementEvent) {\n ['total', 'CPU', 'webGL', 'webGPU'].forEach((key) => {\n if (measurementEvent[key]) this.sliceTimeReport[key] += measurementEvent[key];\n })\n }\n\n resetSliceTimeReport () {\n this.sliceTimeReport = {\n total: 0,\n CPU: 0,\n webGL: 0,\n webGPU: 0,\n }\n }\n}\n\nSandbox.idCounter = 1;\nSandbox.debugWork = false;\nSandbox.debugState = false;\nSandbox.debugEvents = false;\n\nexports.Sandbox = Sandbox;\nexports.SandboxError = SandboxError;\nexports.NoProgressError = NoProgressError;\nexports.SliceTooSlowError = SliceTooSlowError;\nexports.UncaughtExceptionError = UncaughtExceptionError;\nexports.RemoteFetchError = RemoteFetchError;\n\n\n//# sourceURL=webpack://dcp/./src/dcp-client/worker/sandbox.js?");
4630
4630
 
4631
4631
  /***/ }),
4632
4632
 
@@ -4658,7 +4658,7 @@ eval("/**\n * @file worker/supervisor-cache.js\n *\n * A cache for the superviso
4658
4658
  /***/ ((__unused_webpack_module, exports, __webpack_require__) => {
4659
4659
 
4660
4660
  "use strict";
4661
- eval("/* provided dependency */ var process = __webpack_require__(/*! ./node_modules/process/browser.js */ \"./node_modules/process/browser.js\");\n/**\n * @file worker/supervisor.js\n *\n * The component that controls each of the sandboxes\n * and distributes work to them. Also communicates with the\n * scheduler to fetch said work.\n *\n * The supervisor readies sandboxes before/while fetching slices.\n * This means sometimes there are extra instantiated WebWorkers\n * that are idle (in this.readiedSandboxes). Readied sandboxes can\n * be used for any slice. After a readied sandbox is given a slice\n * it becomes assigned to slice's job and can only do work\n * for that job.\n *\n * After a sandbox completes its work, the sandbox becomes cached\n * and can be reused if another slice with a matching job is fetched.\n *\n * @author Matthew Palma, mpalma@kingsds.network\n * Ryan Rossiter, ryan@kingsds.network\n * @date May 2019\n */\n\n/* global dcpConfig */ // eslint-disable-line no-redeclare\n// @ts-check\n\n\nconst constants = __webpack_require__(/*! dcp/common/scheduler-constants */ \"./src/common/scheduler-constants.js\");\nconst wallet = __webpack_require__(/*! dcp/dcp-client/wallet */ \"./src/dcp-client/wallet/index.js\");\nconst { Address } = __webpack_require__(/*! dcp/dcp-client/wallet/eth */ \"./src/dcp-client/wallet/eth.js\");\nconst { Keystore } = __webpack_require__(/*! dcp/dcp-client/wallet/keystore */ \"./src/dcp-client/wallet/keystore.js\");\nconst protocolV4 = __webpack_require__(/*! dcp/protocol-v4 */ \"./src/protocol-v4/index.js\");\nconst DCP_ENV = __webpack_require__(/*! dcp/common/dcp-env */ \"./src/common/dcp-env.js\");\nconst { OriginAccessManager } = __webpack_require__(/*! dcp/dcp-client/worker/origin-access-manager */ \"./src/dcp-client/worker/origin-access-manager.js\");\nconst debugging = (__webpack_require__(/*! dcp/debugging */ \"./src/debugging.js\").scope)('worker');\nconst { assert } = __webpack_require__(/*! dcp/common/dcp-assert */ \"./src/common/dcp-assert.js\");\nconst EventEmitter = __webpack_require__(/*! events */ \"./node_modules/events/events.js\");\nconst { requireNative } = __webpack_require__(/*! dcp/dcp-client/webpack-native-bridge */ \"./src/dcp-client/webpack-native-bridge.js\");\nconst { localStorage } = __webpack_require__(/*! dcp/common/dcp-localstorage */ \"./src/common/dcp-localstorage.js\");\nconst { DCPError } = __webpack_require__(/*! dcp/common/dcp-error */ \"./src/common/dcp-error.js\");\nconst dcp_timers = __webpack_require__(/*! dcp/common/dcp-timers */ \"./src/common/dcp-timers.js\");\nconst { sliceStatus } = __webpack_require__(/*! dcp/common/scheduler-constants */ \"./src/common/scheduler-constants.js\");\nconst { booley, encodeDataURI, leafMerge, a$sleepMs, compressJobMap, toJobMap, compressSandboxes,\n compressSlices, truncateAddress, dumpSandboxesIfNotUnique, dumpSlicesIfNotUnique, generateOpaqueId } = __webpack_require__(/*! dcp/utils */ \"./src/utils/index.js\");\n\nconst { Sandbox, SandboxError } = __webpack_require__(/*! ./sandbox */ \"./src/dcp-client/worker/sandbox.js\");\nconst { Slice, SLICE_STATUS_UNASSIGNED, SLICE_STATUS_FAILED } = __webpack_require__(/*! ./slice */ \"./src/dcp-client/worker/slice.js\");\nconst { SupervisorCache } = __webpack_require__(/*! ./supervisor-cache */ \"./src/dcp-client/worker/supervisor-cache.js\");\nconst RingBuffer = __webpack_require__(/*! dcp/utils/ringBuffer */ \"./src/utils/ringBuffer.js\");\nconst supShared = __webpack_require__(/*! ./SupShared */ \"./src/dcp-client/worker/SupShared.js\");\n\n/** Make timers 10x slower when running in niim */\nlet timeDilation = 1;\nif (DCP_ENV.platform === 'nodejs') {\n /** Make timers 10x slower when running in niim */\n timeDilation = (requireNative('module')._cache.niim instanceof requireNative('module').Module) ? 10 : 1;\n}\n\n/** @typedef {string} opaqueId */ // 22 character base64 string\n/** @typedef {import('./index').Worker} Worker */\n/** @typedef {import('dcp/dcp-client/wallet/keystore').Keystore} Keystore */\n/** @typedef {import('dcp/utils/jsdoc-types').SliceMessage} SliceMessage */\n/** @typedef {import('dcp/utils/jsdoc-types').Auth} Auth */\n/** @typedef {import('dcp/utils/jsdoc-types').Signature} Signature */\n/** @typedef {import('dcp/utils/jsdoc-types').SupervisorOptions} SupervisorOptions */\n\n/**\n * @typedef {object} SandboxSlice\n * @property {Sandbox} sandbox\n * @property {Slice} slice\n */\n\n// Slightly different from descriptor in dcp/utils/jsdoc-types\n/**\n * @typedef {object} TDPayload\n * @property {Address} owner\n * @property {Signature} signature\n * @property {Auth} auth\n * @property {Body} body\n */\n\n// Slightly different from descriptor in dcp/utils/jsdoc-types\n/**\n * @access public\n * @typedef {object} Body\n * @property {Object<string, object>} [newJobs]\n * @property {SliceMessage[]} task\n * @property {Object<string, string[]>} [computeGroupJobs]\n * @property {Object<string, Object<string, string[]>>} [computeGroupOrigins]\n * @property {{ targetTaskDuration: number }} [schedulerConfig]\n */\n\nclass Supervisor extends EventEmitter {\n /**\n * @constructor\n * @param {Worker} worker\n * @param {Keystore} identity\n * @param {SupervisorOptions} options\n */\n constructor (worker, identity, options) {\n super({ captureRejections: false });\n\n if (!(identity instanceof Keystore))\n throw new Error(`identity ${JSON.stringify(identity)} must be an instance of Keystore`);\n\n debugging('supervisor') && console.debug('Supervisor.options', options);\n assert(options === worker.workerOptions);\n\n /** @type {Worker} */\n this.worker = worker;\n this.setDefaultIdentityKeystore(identity);\n // Sup1/Sup2 compatibility.\n this._identityKeystore = identity;\n /** @type {SupervisorOptions} */\n this.options = options;\n\n debugging() && console.debug('Supervisor 1: cores.cpu, cores.gpu, maxSandboxes', options.cores?.cpu, options.cores?.gpu, this.maxSandboxes);\n\n // Verify options.jobAddresses exist\n if (!this.options.jobAddresses)\n this.options.jobAddresses = [];\n\n this.tuning = {\n watchdogInterval: 7, /**< seconds - time between fetches when ENOTASK(? /wg nov 2019) */\n minSandboxStartDelay: 0.1, /**< seconds - minimum time between WebWorker starts */\n maxSandboxStartDelay: 0.7, /**< seconds - maximum delay time between WebWorker starts */\n ...dcpConfig.supervisor.tuning\n };\n \n /** @type {object} */\n this.jobHandles = {};\n \n /** @type {Sandbox[]} */\n this.sandboxes = [];\n\n /** @type {Sandbox[]} */\n this.readiedSandboxes = [];\n\n /** @type {Sandbox[]} */\n this.assignedSandboxes = [];\n\n /** @type {Slice[]} */\n this.slices = [];\n\n /** @type {Slice[]} */\n this.queuedSlices = [];\n\n /** @type {Slice[]} */\n this.lostSlices = [];\n\n /** @type {boolean} */\n this.matching = false;\n\n /** @type {boolean} */\n this.isFetchingNewWork = false;\n\n /** @type {number} */\n this.numberOfCoresReserved = 0;\n\n /** @type {number} */\n this.addressTruncationLength = 20; // Set to -1 for no truncation.\n\n /** @type {Array<{ address: string, reasons: string[] }>} */\n this.rejectedJobs = [];\n /** @type {Object<string, string[]>} */\n this.rejectedJobReasons = {};\n\n /** @type {number} */\n this.defaultMaxGPUs = 1;\n\n /**\n * TODO: Remove this when the supervisor sends all of the sandbox\n * capabilities to the scheduler when fetching work.\n * @type {object}\n */\n this.capabilities = null;\n\n /** @type {number} */\n this.lastProgressReport = 0;\n this._t0 = Date.now();\n\n /** \n * An N-slot ring buffer of job addresses. Stores all jobs that have had no more than 1 slice run in the ring buffer.\n * Required for the implementation of discrete jobs \n * @type {RingBuffer} \n */\n this.ringBufferofJobs = new RingBuffer(200); // N = 200 should be more than enough.\n \n /** @type {OriginAccessManager} */\n this.originManager = OriginAccessManager.construct(this.options.allowOrigins);\n \n /** @type {SupervisorCache} */\n this.cache = new SupervisorCache(this);\n /** @type {object} */\n this._connections = {}; /* active DCPv4 connections */\n // Call the watchdog every 7 seconds.\n this.watchdogInterval = dcp_timers.setInterval(() => this.watchdog(), this.tuning.watchdogInterval * 1000);\n this.watchdogInterval.unref();\n\n const ceci = this;\n\n // Initialize to null so these properties are recognized for the Supervisor class\n this.taskDistributorConnection = null;\n this.eventRouterConnection = null;\n this.resultSubmitterConnection = null;\n this.packageManagerConnection = null;\n this.openTaskDistributorConn = function openTaskDistributorConn()\n {\n let config = dcpConfig.scheduler.services.taskDistributor;\n ceci.taskDistributorConnection = new protocolV4.Connection(config, ceci.identityKeystore, connectionOptions(config.location, 'taskDistributor'));\n ceci.taskDistributorConnection.on('end', ceci.openTaskDistributorConn);\n\n // Worker API connect and disconnect event propogation\n ceci.taskDistributorConnection.on('connect', (event) => this.worker.emit('connect', event));\n ceci.taskDistributorConnection.on('disconnect', (event) => this.worker.emit('disconnect', event));\n }\n\n this.openEventRouterConn = function openEventRouterConn()\n {\n let config = dcpConfig.scheduler.services.eventRouter;\n ceci.eventRouterConnection = new protocolV4.Connection(config, ceci.identityKeystore, connectionOptions(config.location, 'eventRouter'));\n ceci.eventRouterConnection.on('end', ceci.openEventRouterConn);\n if (ceci.eventRouterMessageQueue.length)\n ceci.resendRejectedMessages(ceci.eventRouterConnection, ceci.eventRouterMessageQueue);\n\n // Worker API connect and disconnect event propogation\n ceci.eventRouterConnection.on('connect', (event) => this.worker.emit('connect', event));\n ceci.eventRouterConnection.on('disconnect', (event) => this.worker.emit('disconnect', event));\n }\n this.eventRouterMessageQueue = [];\n \n this.openResultSubmitterConn = function openResultSubmitterConn()\n {\n let config = dcpConfig.scheduler.services.resultSubmitter;\n ceci.resultSubmitterConnection = new protocolV4.Connection(config, ceci.identityKeystore, connectionOptions(config.location, 'resultSubmitter'));\n ceci.resultSubmitterConnection.on('end', ceci.openResultSubmitterConn);\n if (ceci.resultSubmitterMessageQueue.length)\n ceci.resendRejectedMessages(ceci.resultSubmitterConnection, ceci.resultSubmitterMessageQueue);\n\n // Worker API connect and disconnect event propogation\n ceci.resultSubmitterConnection.on('connect', (event) => this.worker.emit('connect', event));\n ceci.resultSubmitterConnection.on('disconnect', (event) => this.worker.emit('disconnect', event));\n }\n this.resultSubmitterMessageQueue = [];\n\n this.openPackageManagerConn = function openPackageManagerConn()\n {\n let config = dcpConfig.packageManager;\n ceci.packageManagerConnection = new protocolV4.Connection(config, ceci.identityKeystore, connectionOptions(config.location, 'packageManager'));\n ceci.packageManagerConnection.on('end', ceci.openPackageManagerConn);\n if (ceci.packageManagerMessageQueue.length)\n ceci.resendRejectedMessages(ceci.packageManagerConnection, ceci.packageManagerMessageQueue);\n\n // Worker API connect and disconnect event propogation\n ceci.packageManagerConnection.on('connect', (event) => this.worker.emit('connect', event));\n ceci.packageManagerConnection.on('disconnect', (event) => this.worker.emit('disconnect', event));\n }\n this.packageManagerMessageQueue = [];\n\n //\n // Some properties that used to be static, but should not have been\n //\n /** @type {number | boolean} */\n this.lastAssignFailTimerMs = false;\n /** @type {boolean} */\n this.startSandboxWork_beenCalled = false;\n /** @type {boolean} */\n this.debugBuild = ((__webpack_require__(/*! dcp/common/dcp-build */ \"./src/common/dcp-build.js\").build) === 'debug');\n /**\n * When this.sliceTiming is set to be true, it displays the timings of a every slice\n * slice['pairingDelta'] = timespan of when slice is paired with sandbox until execution starts\n * slice['executionDelta'] = timespan of execution in sandbox\n * slice['resultDelta'] = timespan of when sandbox finishes executing until recordResult completes.\n * @type {boolean}\n */\n this.sliceTiming = false;\n }\n\n /**\n * This getter is the absolute source-of-truth for what the\n * identity keystore is for this instance of the Supervisor.\n * @type {Keystore}\n */\n get identityKeystore() {\n return this._identityKeystore || this.defaultIdentityKeystore;\n }\n /** @type {Keystore} */\n set identityKeystore(ks) {\n if (!(ks instanceof Keystore))\n throw new Error('Supervisor.identityKeystore: must be an instance of Keystore.');\n this._identityKeystore = ks;\n }\n \n /**\n * Maximum sandboxes allowed to work at a given time.\n * @type {number}\n */\n get maxSandboxes() { return Math.floor(this.options.cores?.cpu || 1); }\n /**\n * Maximum sandboxes allowed to work at a given time.\n * @type {number}\n */\n set maxSandboxes(newVal) { this.options.cores.cpu = Math.floor(newVal); }\n\n /**\n * Return worker opaqueId.\n * @type {opaqueId}\n */\n get workerOpaqueId() {\n if (!this._workerOpaqueId)\n this._workerOpaqueId = localStorage.getItem('workerOpaqueId');\n\n if (!this._workerOpaqueId || this._workerOpaqueId.length !== constants.workerIdLength)\n this.workerOpaqueId = generateOpaqueId();\n\n return this._workerOpaqueId;\n }\n /** @type {opaqueId} */\n set workerOpaqueId(newId) {\n this._workerOpaqueId = newId;\n localStorage.setItem('workerOpaqueId', this._workerOpaqueId);\n }\n\n /**\n * Sup1/Sup2 compatibility helpers.\n * @deprecated Rip this out when we kill sup1.\n * @todo XXXpfr Rip out this sup2/sup1 special-casing when we finally kill sup1.\n * We want to present a consistent interface, regardless whether it's sup1 or sup2.\n */\n /** @type {boolean} */\n get working() { return this.worker.working; }\n /** @type {opaqueId} */\n get workerId() { return this.workerOpaqueId; }\n /** @type {opaqueId} */\n set workerId(newWorkerId) { this.workerOpaqueId = newWorkerId; }\n //get fetchTaskBarrier () { return this.isFetchingNewWork; }\n //set fetchTaskBarrier (value) { this.isFetchingNewWork = value; }\n get version() { return '1.5.0' }\n /** @type {boolean} */\n get isSupervisor1() { return true; }\n /** @type {boolean} */\n get isSupervisor2() { return false; }\n /** @type {Slice[]} */\n get workingSlices() { return this.allocatedSlices; }\n /**\n * Get the job descriptor for jobAddress;\n * viz., the object value corresponding to the key jobAddress,\n * in the object returned by getJobsForTask in task-jobs.js.\n * @param {string} jobAddress\n * @returns {object}\n */\n jobDescriptor (jobAddress) {\n if (!this.cache.cache.job[jobAddress])\n throw new Error(`Cannot find the job descriptor corresponding to jobAddress ${jobAddress}`);\n return this.cache.fetch('job', jobAddress);\n }\n\n /**\n * Open all connections. Used when supervisor is instantiated or stopped/started\n * to initially open connections.\n */\n instantiateAllConnections() {\n if (!this.taskDistributorConnection)\n this.openTaskDistributorConn();\n \n if (!this.eventRouterConnection)\n this.openEventRouterConn();\n \n if (!this.resultSubmitterConnection)\n this.openResultSubmitterConn();\n\n if (!this.packageManagerConnection)\n this.openPackageManagerConn();\n }\n \n /**\n * Asynchronously send a result to the result submitter that was previously rejected.\n * Different from resendRejectedMessages below in the sense that the function only resolves\n * once we've delivered the result, or gone past our max number of attempts.\n * @param {object} result \n * @returns the response payload from the result operation\n */\n async resendResult(result) {\n var protocolError = false;\n if (!result.sendRetries)\n result.sendRetries = 1;\n else\n result.sendRetries++;\n \n if (result.sendRetries > this.tuning.maxResultSubmissionRetries)\n throw new DCPError(`Could not submit result after ${dcpConfig.supervisor.tuning.maxResultSubmissionRetries} attempts. Aborting.`);\n \n debugging() && console.debug(`supervisor - failed to submit result ${result.sendRetries} time(s), trying again `)\n let res = await this.resultSubmitterConnection.request('result', result).catch(async (e) => {\n debugging('supervisor') && console.error(`Failed to submit result to scheduler for slice ${result.slice} of job ${result.job}:\\n ${e} \\nWill try again on new connection.`);\n this.resultSubmitterConnection.close();\n await a$sleepMs(10); /* let connection recycle */\n protocolError = true;\n });\n if ((!res.success && res.payload && res.payload.code === 'DCPS-01002') || protocolError)\n return this.resendResult(result)\n else\n return res;\n }\n \n /**\n * Try sending messages that were rejected on an old instance of the given connection.\n * These are messages that a) were rejected due to a protocol error and b) don't care when exactly\n * they're sent in the grand scheme of things.\n */\n resendRejectedMessages(connection, messageQueue) {\n var message, quitLoop;\n if (connection.resendingMessages) /* if the passed connection is already in the loop, exit */\n return;\n \n message = messageQueue.shift();\n\n do {\n connection.resendingMessages = true;\n quitLoop = false;\n \n connection.request(message.operation, message.data)\n .catch((e) =>\n {\n /* Protocol Error; Close connection (this will trigger the opening of a new connection that will try sending again) */\n debugging('supervisor') && console.error(`Failed to send message ${message.operation} to scheduler: ${e}. Will try again on a new connection.`);\n messageQueue.unshift(message);\n connection.close();\n quitLoop = true;\n });\n \n message = messageQueue.shift();\n \n } while (message && !quitLoop)\n\n connection.resendingMessages = false;\n }\n\n /** Set the default identity keystore -- needs to happen before anything that talks\n * to the scheduler for work gets called. This is a wart and should be removed by\n * refactoring.\n *\n * The default identity keystore will be used if the Supervisor was not provided\n * with an alternate. This keystore will be located via the Wallet API, and \n * if not found, a randomized default identity will be generated. \n *\n * @param {Keystore} [ks] - An instance of wallet::Keystore -- if undefined, we pick the best default we can.\n * @returns {Promise<void>}\n */\n async setDefaultIdentityKeystore(ks) {\n try {\n if (ks) {\n this.defaultIdentityKeystore = ks;\n return;\n }\n\n if (this.defaultIdentityKeystore)\n return;\n\n try {\n this.defaultIdentityKeystore = await wallet.getId();\n } catch(e) {\n debugging('supervisor') && console.debug('Error generating default identity, try to do it another way.');\n this.defaultIdentityKeystore = await new wallet.IdKeystore(null, '');\n }\n } finally {\n if (this.defaultIdentityKeystore)\n debugging('supervisor') && console.debug('Set default identity =', this.defaultIdentityKeystore.address);\n else\n debugging('supervisor') && console.debug('Failed to set default identity, worker cannot work.');\n }\n }\n\n //\n // What follows is a bunch of utility properties and functions for creating filtered views\n // of the slices and sandboxes array.\n //\n\n /**\n * Please do not use this.workingSandboxes; use this.allocatedSandboxes instead.\n * Sandboxes that are in WORKING state.\n *\n * Warning: Do not rely on this information being 100% accurate -- it may change in the next instant.\n * @type {Sandbox[]}\n */\n get workingSandboxes() {\n return this.sandboxes.filter(sandbox => sandbox.isWorking);\n }\n\n /**\n * Use instead of this.workingSandboxes.\n *\n * When a sandbox is paired with a slice, execution is pending and sandbox.allocated=true and\n * sandbox.slice=slice and sandbox.jobAddress=slice.jobAddress. This is what 'allocated' means.\n * Immediately upon the exit of sandbox.work, sandbox.allocated=false is set and if an exception\n * wasn't thrown the sandbox is placed in this.assignedSandboxes.\n * Thus from the pov of supervisor, this.allocatedSandboxes is deterministic and this.workingSandboxes is not.\n * Please try to not use this.workingSandboxes. It is deprecated.\n *\n * Warning: Do not rely on this information being 100% accurate -- it may change in the next instant.\n * @type {Sandbox[]}\n */\n get allocatedSandboxes() {\n return this.sandboxes.filter(sandbox => sandbox.allocated);\n }\n\n /**\n * Slices that are allocated.\n * Warning: Do not rely on this information being 100% accurate -- it may change in the next instant.\n * @type {Slice[]}\n */\n get allocatedSlices() {\n return this.slices.filter(slice => slice.allocated);\n }\n\n /**\n * This property is used as the target number of sandboxes to be associated with slices and start working.\n *\n * It is used in this.watchdog as to prevent a call to this.work when unallocatedSpace <= 0.\n * It is also used in this.distributeQueuedSlices where it is passed as an argument to this.matchSlicesWithSandboxes to indicate how many sandboxes\n * to associate with slices and start working.\n *\n * Warning: Do not rely on this information being 100% accurate -- it may change in the next instant.\n * @type {number}\n */\n get unallocatedSpace() {\n return this.maxSandboxes - this.allocatedSandboxes.length - this.numberOfCoresReserved;\n }\n \n /**\n * Call acquire(numberOfCoresToReserve) to reserve numberOfCoresToReserve unallocated sandboxes as measured by unallocatedSpace.\n * Call release() to undo the previous acquire.\n * This pseudo-mutex technique helps prevent races in scheduling slices in Supervisor.\n * @param {number} numberOfCoresToReserve\n */\n acquire(numberOfCoresToReserve) { \n this.numberOfCoresReserved = numberOfCoresToReserve; \n }\n release() { \n this.numberOfCoresReserved = 0; \n }\n\n /**\n * Remove from this.slices.\n * @param {Slice} slice\n */\n removeSlice(slice) {\n this.removeElement(this.slices, slice);\n if (this.debugBuild) {\n if (this.queuedSlices.indexOf(slice) !== -1)\n throw new Error(`removeSlice: slice ${slice.identifier} is in queuedSlices; inconsistent state.`);\n if (this.lostSlices.length > 0) {\n debugging() && console.warn(`removeSlice: slice ${slice.identifier}, found lostSlices ${this.lostSlices.map(s => s.identifier)}`);\n if (this.lostSlices.indexOf(slice) !== -1)\n throw new Error(`removeSlice: slice ${slice.identifier} is in lostSlices; inconsistent state.`);\n }\n }\n }\n\n /**\n * Remove from this.slices.\n * @param {Slice[]} slices\n */\n removeSlices(slices) {\n this.slices = this.slices.filter(slice => slices.indexOf(slice) === -1);\n }\n\n /**\n * Remove from this.queuedSlices.\n * @param {Slice[]} slices\n */\n removeQueuedSlices(slices) {\n this.queuedSlices = this.queuedSlices.filter(slice => slices.indexOf(slice) === -1);\n }\n\n /**\n * Remove all terminated sandboxes.\n */\n removeTerminatedSanboxes () {\n this.sandboxes = this.sandboxes.filter((sbx) => !sbx.isTerminated);\n this.assignedSandboxes = this.assignedSandboxes.filter((sbx) => !sbx.isTerminated);\n this.readiedSandboxes = this.readiedSandboxes.filter((sbx) => !sbx.isTerminated);\n }\n\n /**\n * Remove from this.sandboxes, this.assignedSandboxes and this.readiedSandboxes.\n * @param {Sandbox} sandbox\n */\n removeSandbox(sandbox) {\n debugging('scheduler') && console.log(`removeSandbox ${sandbox.identifier}`);\n this.removeElement(this.sandboxes, sandbox);\n this.removeElement(this.assignedSandboxes, sandbox);\n this.removeElement(this.readiedSandboxes, sandbox);\n }\n\n /**\n * Remove from this.sandboxes and this.assignedSandboxes .\n * @param {Sandbox[]} sandboxes\n */\n async removeSandboxes(sandboxes) {\n debugging('scheduler') && console.log(`removeSandboxes: Remove ${sandboxes.length} sandboxes ${this.dumpSandboxes(sandboxes)}`);\n this.sandboxes = this.sandboxes.filter(sandbox => sandboxes.indexOf(sandbox) === -1);\n this.assignedSandboxes = this.assignedSandboxes.filter(sandbox => sandboxes.indexOf(sandbox) === -1);\n\n if (this.debugBuild) {\n const readied = this.readiedSandboxes.filter(sandbox => sandboxes.indexOf(sandbox) !== -1);\n if (readied.length > 0)\n throw new Error(`removeSandboxes: sandboxes ${readied.map(s => s.identifier)} are in readiedSandboxes; inconsistent state.`);\n }\n }\n\n /**\n * Remove element from theArray.\n * @param {Array<*>} theArray\n * @param {object|number} element\n */\n removeElement(theArray, element) {\n let index = theArray.indexOf(element);\n if (index !== -1) theArray.splice(index, 1);\n }\n\n /**\n * Log sliceArray.\n * @param {Slice[]} sliceArray\n * @param {string} [header]\n * @returns {string}\n */\n dumpSlices(sliceArray, header) {\n if (header) console.log(`\\n${header}`);\n return compressSlices(sliceArray, this.addressTruncationLength);\n }\n\n /**\n * Log sandboxArray.\n * @param {Sandbox[]} sandboxArray\n * @param {string} [header]\n * @returns {string}\n */\n dumpSandboxes(sandboxArray, header) {\n if (header) console.log(`\\n${header}`);\n return compressSandboxes(sandboxArray, this.addressTruncationLength);\n }\n\n /**\n * If the elements of sandboxSliceArray are not unique, log the duplicates and dump the array.\n * @param {SandboxSlice[]} sandboxSliceArray\n * @param {string} header\n */\n dumpSandboxSlicesIfNotUnique(sandboxSliceArray, header) {\n if (!this.isUniqueSandboxSlices(sandboxSliceArray, header))\n console.log(this.dumpSandboxSlices(sandboxSliceArray));\n }\n\n /**\n * Log { sandbox, slice }.\n * @param {Sandbox} sandbox\n * @param {Slice} slice\n * @returns {string}\n */\n dumpSandboxAndSlice(sandbox, slice) {\n return `${sandbox.id}~${slice.sliceNumber}.${this.dumpJobAddress(slice.jobAddress)}`;\n }\n\n /**\n * Log { sandbox, slice } with state/status.\n * @param {Sandbox} sandbox\n * @param {Slice} slice\n * @returns {string}\n */\n dumpStatefulSandboxAndSlice(sandbox, slice) {\n return `${sandbox.id}.${sandbox.state}~${slice.sliceNumber}.${this.dumpJobAddress(slice.jobAddress)}.${slice.status}`;\n }\n\n /**\n * Truncates jobAddress.toString() to this.addressTruncationLength digits.\n * @param {string} jobAddress\n * @returns {string}\n */\n dumpJobAddress(jobAddress) {\n return truncateAddress(jobAddress, this.addressTruncationLength /* digits*/);\n }\n\n /**\n * Dump sandboxSliceArray.\n * @param {SandboxSlice[]} sandboxSliceArray - input array of { sandbox, slice }\n * @param {string} [header] - optional header\n * @param {boolean} [stateFul] - when true, also includes slice.status and sandbox.state.\n * @returns {string}\n */\n dumpSandboxSlices(sandboxSliceArray, header, stateFul=false) {\n if (header) console.log(`\\n${header}`);\n const jobMap = {};\n sandboxSliceArray.forEach(ss => {\n const sss = stateFul ? `${ss.sandbox.id}.${ss.sandbox.state}~${ss.slice.sliceNumber}.${ss.slice.status}` : `${ss.sandbox.id}~${ss.slice.sliceNumber}`;\n if (!jobMap[ss.slice.jobAddress]) jobMap[ss.slice.jobAddress] = sss;\n else jobMap[ss.slice.jobAddress] += `,${sss}`;\n });\n let output = '';\n for (const [jobAddress, sss] of Object.entries(jobMap))\n output += `${this.dumpJobAddress(jobAddress)}:[${sss}]:`;\n return output;\n }\n\n /**\n * Check sandboxSliceArray for duplicates.\n * @param {SandboxSlice[]} sandboxSliceArray\n * @param {string} [header]\n * @param {function} [log]\n * @returns {boolean}\n */\n isUniqueSandboxSlices(sandboxSliceArray, header, log) {\n const result = [], slices = [], sandboxes = [];\n let once = true;\n sandboxSliceArray.forEach(x => {\n const sliceIndex = slices.indexOf(x.slice);\n const sandboxIndex = sandboxes.indexOf(x.sandbox);\n\n if (sandboxIndex >= 0) {\n if (once && header) console.log(`\\n${header}`); once = false;\n log ? log(x.sandbox) : console.log(`\\tWarning: Found duplicate sandbox ${x.sandbox.identifier}.`);\n } else sandboxes.push(x.sandbox);\n\n if (sliceIndex >= 0) {\n if (once && header) console.log(`\\n${header}`); once = false;\n log ? log(x.slice) : console.log(`\\tWarning: Found duplicate slice ${x.slice.identifier}.`);\n } else {\n slices.push(x.slice);\n if (sandboxIndex < 0) result.push(x);\n }\n });\n return sandboxSliceArray.length === result.length;\n }\n\n /**\n * Attempts to create and start a given number of sandboxes.\n * The sandboxes that are created can then be assigned for a\n * specific job at a later time. All created sandboxes\n * get put into the @this.readiedSandboxes array when allocateLocalSandboxes is false.\n *\n * @param {number} numSandboxes - the number of sandboxes to create\n * @param {boolean} [allocateLocalSandboxes=false] - when true, do not place in this.readiedSandboxes\n * @returns {Promise<Sandbox[]>} - resolves with array of created sandboxes, rejects otherwise\n * @throws when given a numSandboxes is not a number or if numSandboxes is Infinity\n */\n async readySandboxes (numSandboxes, allocateLocalSandboxes = false) {\n debugging('supervisor') && console.debug(`readySandboxes: Readying ${numSandboxes} sandboxes, total sandboxes ${this.sandboxes.length}, matching ${this.matching}, fetching ${this.isFetchingNewWork}`);\n \n if (typeof numSandboxes !== 'number' || Number.isNaN(numSandboxes) || numSandboxes === Infinity) {\n throw new Error(`${numSandboxes} is not a number of sandboxes that can be readied.`);\n }\n if (numSandboxes <= 0) {\n return [];\n }\n\n const sandboxStartPromises = [];\n const sandboxes = [];\n const errors = [];\n for (let i = 0; i < numSandboxes; i++) {\n const sandbox = new Sandbox(this, {\n ...this.options.sandboxOptions,\n });\n \n sandbox.addListener('start', () => {\n // When sliceNumber == 0, result-submitter status skips the slice,\n // so don't send it in the first place.\n // The 'start' event is fired when a worker starts up, hence there's no way\n // to determine whether sandbox has a valid slice without checking.\n if (sandbox.slice) {\n const jobAddress = sandbox.jobAddress;\n const sliceNumber = sandbox.slice.sliceNumber;\n // !authorizationMessage <==> sliceNumber === 0.\n const authorizationMessage = sandbox.slice.getAuthorizationMessage();\n\n if (authorizationMessage) {\n let statusPayload = {\n worker: this.workerOpaqueId,\n slices: [{\n job: jobAddress,\n sliceNumber: sliceNumber,\n status: 'begin',\n authorizationMessage,\n }],\n }\n \n try /* resultSubmitterConnection can be null if worker is stopped */\n {\n this.resultSubmitterConnection.request('status', statusPayload).catch((error) => {\n debugging('supervisor') && console.error(`Error sending 'status' for slice ${sliceNumber} of job ${jobAddress}:\\n ${error}\\nWill try again on a new connection`);\n this.resultSubmitterMessageQueue.push({ operation: 'status', data: statusPayload });\n this.resultSubmitterConnection.close();\n });\n } catch (error)\n {\n debugging('supervisor') && console.error(`Failed to send 'status' for slice ${sliceNumber} of job ${jobAddress}:, no connection to result submitter:`, error);\n }\n }\n }\n });\n sandbox.addListener('workEmit', ({ eventName, payload }) => {\n // Need to check if the sandbox hasn't been assigned a slice yet.\n if (!sandbox.slice) {\n if (this.debugBuild) {\n console.error(\n `Sandbox not assigned a slice before sending workEmit message to scheduler. 'workEmit' event originates from \"${eventName}\" event`, \n payload,\n );\n }\n }\n else\n {\n const jobAddress = sandbox.slice.jobAddress;\n const sliceNumber = sandbox.slice.sliceNumber;\n // sliceNumber can be zero if it came from a problem with loading modules.\n assert(jobAddress && (sliceNumber || sliceNumber === 0));\n // Send a work emit message from the sandbox to the event router\n // !authorizationMessage <==> sliceNumber === 0.\n let authorizationMessage;\n try {\n // Sometimes a sliceNumber===0 workEmit comes in before the client bundle is properly loaded.\n // Also happens with minor dcp-client version mismatches.\n authorizationMessage = sandbox.slice.getAuthorizationMessage();\n } catch(e) {\n authorizationMessage = null;\n }\n\n if (!authorizationMessage)\n {\n this.worker.emit('warning', `workEmit: missing authorization message for job ${jobAddress}, slice: ${sliceNumber}`);\n return Promise.resolve();\n }\n \n let workEmitPayload = {\n eventName,\n payload,\n job: jobAddress,\n slice: sliceNumber,\n worker: this.workerOpaqueId,\n authorizationMessage,\n }\n \n const workEmitPromise = this.eventRouterConnection.request('workEmit', workEmitPayload).catch(error => {\n debugging('supervisor') && console.warn(`workEmit: unable to send ${eventName} for slice ${sliceNumber} of job ${jobAddress}: ${error.message}.\\nTrying again on a new connection.`, error);\n this.eventRouterMessageQueue.push({ operation: 'workEmit', data: workEmitPayload })\n this.eventRouterConnection.close();\n });\n\n if (this.debugBuild) {\n workEmitPromise.then(result => {\n if (!result || !result.success)\n this.worker.emit('warning', `workEmit: event router did not accept event: ${result}`);\n });\n }\n }\n });\n\n // When any sbx completes, \n sandbox.addListener('complete', () => {\n this.watchdog();\n });\n\n sandbox.on('sandboxError', (error) => handleSandboxError(this, sandbox, error));\n \n sandbox.on('rejectedWorkMetrics', (data) =>{\n function updateRejectedMetrics(report) {\n ['total', 'CPU', 'webGL', 'webGPU'].forEach((key) => {\n if (report[key]) sandbox.slice.rejectedTimeReport[key] += report[key];\n })\n }\n \n // If the slice already has rejected metrics, add this data to it. If not, assign this data to slices rejected metrics property\n if (sandbox.slice && data.timeReport) {\n (sandbox.slice.rejectedTimeReport) ? updateRejectedMetrics(data.timeReport) : sandbox.slice.rejectedTimeReport = data.timeReport;\n }\n })\n \n // If the sandbox terminated and we are not shutting down, then should return all work which is currently\n // not being computed if all sandboxes are dead and the attempt to create a new one fails.\n sandbox.sandboxHandle.on('end', async () => {\n if (this.sandboxes.length > 0) {\n let terminatedSandboxes = this.sandboxes.filter(sbx => sbx.isTerminated);\n if (terminatedSandboxes.length === this.sandboxes.length) {\n debugging() && console.debug(`readySandboxes: Create 1 sandbox in the sandbox-terminated-handler, total sandboxes ${this.sandboxes.length}, matching ${this.matching}, fetching ${this.isFetchingNewWork}`);\n try {\n await this.readySandboxes(1);\n } catch (e) {\n // If we cannot create a new sandbox, that probably means we're on a screensaver worker\n // and the screensaver is down. So return the slices to the scheduler.\n debugging() && console.debug('Terminated handler: Evaluator is down');\n this.removeTerminatedSanboxes();\n this.returnSlices(this.queuedSlices).then(() => {\n this.queuedSlices.length = 0;\n });\n }\n }\n }\n })\n\n const delayMs =\n 1000 *\n (this.tuning.minSandboxStartDelay +\n Math.random() *\n (this.tuning.maxSandboxStartDelay - this.tuning.minSandboxStartDelay));\n \n sandboxStartPromises.push(\n sandbox\n .start(delayMs)\n .then(() => {\n this.worker.emit('sandbox', sandbox.sandboxHandle);\n if (!allocateLocalSandboxes) this.readiedSandboxes.push(sandbox);\n this.sandboxes.push(sandbox);\n sandboxes.push(sandbox);\n }).catch((err) => {\n errors.push(err);\n this.returnSandbox(sandbox);\n if (err.code === 'ENOWORKER') {\n throw new DCPError(\"Cannot use localExec without dcp-worker installed. Use the command 'npm install dcp-worker' to install the neccessary modules.\", 'ENOWORKER');\n }\n }));\n }\n \n await Promise.all(sandboxStartPromises);\n\n if (errors.length) {\n debugging() && console.warn(`Failed to ready ${errors.length} of ${numSandboxes} sandboxes.`, errors);\n errors.forEach((e) => { this.worker.emit('error', e); });\n throw new Error('Failed to ready sandboxes.');\n }\n\n debugging('supervisor') && console.log(`readySandboxes: Readied ${sandboxes.length} sandboxes ${JSON.stringify(sandboxes.map(sandbox => sandbox.id))}`);\n \n return sandboxes;\n }\n\n /**\n * Accepts a sandbox after it has finished working or encounters an error.\n * If the sandbox was terminated or if \"!slice || slice.failed\" then\n * the sandbox will be removed from the sandboxes array and terminated if necessary.\n * Otherwise it will try to distribute a slice to the sandbox immediately.\n *\n * @param {Sandbox} sandbox - the sandbox to return\n * @param {Slice} [slice] - the slice just worked on; !slice => terminate\n */\n returnSandbox (sandbox, slice) {\n if (!slice || slice.failed || sandbox.isTerminated) {\n \n this.removeSandbox(sandbox);\n \n if (!sandbox.isTerminated) {\n debugging('supervisor') && console.log(`Supervisor.returnSandbox: Terminating ${sandbox.identifier}~${slice ? slice.identifier : '<null>'}, # of sandboxes ${this.sandboxes.length}`);\n sandbox.terminate(false);\n } else {\n debugging() && console.log(`Supervisor.returnSandbox: Already terminated ${sandbox.identifier}~${slice ? slice.identifier : '<null>'}, # of sandboxes ${this.sandboxes.length}`);\n }\n }\n }\n\n /**\n * Terminates sandboxes, in order of creation, when the total started sandboxes exceeds the total allowed sandboxes.\n *\n * @returns {Promise<void>}\n */\n pruneSandboxes () {\n let numOver = this.sandboxes.length - this.maxSandboxes;\n if (numOver <= 0) return;\n\n // Don't kill readied sandboxes while creating readied sandboxes.\n for (let index = 0; index < this.readiedSandboxes.length; ) {\n const sandbox = this.readiedSandboxes[index];\n // If the sandbox is allocated, advance to the next one in the list.\n if (sandbox.allocated) {\n index++;\n continue;\n }\n // Otherwise, remove this sandbox but look at the same array index in the next loop.\n debugging('supervisor') && console.log(`pruneSandboxes: Terminating readied sandbox ${sandbox.identifier}`);\n this.readiedSandboxes.splice(index, 1);\n this.returnSandbox(sandbox);\n\n if (--numOver <= 0) break;\n }\n\n if (numOver <= 0) return;\n for (let index = 0; index < this.assignedSandboxes.length; ) {\n const sandbox = this.assignedSandboxes[index];\n // If the sandbox is allocated, advance to the next one in the list.\n if (sandbox.allocated) {\n index++;\n continue;\n }\n // Otherwise, remove this sandbox but look at the same array index in the next loop.\n debugging('supervisor') && console.log(`pruneSandboxes: Terminating assigned sandbox ${sandbox.identifier}`);\n this.assignedSandboxes.splice(index, 1);\n this.returnSandbox(sandbox);\n\n if (--numOver <= 0) break;\n }\n }\n \n /**\n * Basic watch dog to check if there are idle sandboxes and\n * attempts to nudge the supervisor to feed them work.\n *\n * Run in an interval created in the constructor .\n * @returns {Promise<void>}\n */\n async watchdog () {\n if (!this.watchdogState)\n this.watchdogState = {};\n\n // Every 5 minutes, report progress of all working slices to the scheduler\n if (Date.now() > ( (this.lastProgressReport || 0) + (dcpConfig.supervisor?.sandbox?.progressReportInterval || 20 * 60 * 1000) )) {\n // console.log('454: Assembling progress update...');\n this.lastProgressReport = Date.now();\n\n //\n // Note: this.slices is the disjoint union of:\n // this.allocatedSlices, \n // this.queuedSlices, \n // this.slices.filter(slice => !slice.isUnassigned) .\n // When a slice is not in these 3 arrays, the slice is lost.\n //\n \n const currentLostSlices = this.slices.filter(slice => slice.isUnassigned \n && this.queuedSlices.indexOf(slice) === -1\n && this.allocatedSlices.indexOf(slice) === -1);\n\n if (currentLostSlices.length > 0) {\n this.lostSlices.push(...currentLostSlices);\n // Try to recover.\n // Needs more work and testing.\n // Test when we can come up with a decent lost slice repro case.\n // --> this.queuedSlices.push(...currentLostSlices);\n }\n\n if (this.lostSlices.length > 0) {\n if (debugging()) {\n console.warn('Supervisor.watchdog: Found lost slices!');\n for (const slice of this.lostSlices)\n console.warn('\\t', slice.identifier);\n }\n this.lostSlices = this.lostSlices.filter(slice => slice.isUnassigned);\n }\n\n const slices = [];\n this.queuedSlices.forEach(slice => {\n assert(slice && slice.sliceNumber > 0);\n addToSlicePayload(slices, slice, sliceStatus.scheduled);\n });\n\n this.allocatedSlices.forEach(slice => {\n assert(slice && slice.sliceNumber > 0);\n addToSlicePayload(slices, slice, 'progress'); // Beacon.\n });\n\n if (slices.length) {\n // console.log('471: sending progress update...');\n const progressReportPayload = {\n worker: this.workerOpaqueId,\n slices,\n };\n\n this.resultSubmitterConnection.request('status', progressReportPayload)\n .catch(error => {\n debugging('supervisor') && console.error('479: Failed to send status update:', error/*.message*/);\n this.resultSubmitterMessageQueue.push({ operation: 'status', data: progressReportPayload })\n this.resultSubmitterConnection.close();\n });\n }\n }\n\n if (this.worker.working) {\n if (this.unallocatedSpace > 0) {\n await this.work().catch(err => {\n if (!this.watchdogState[err.code || '0'])\n this.watchdogState[err.code || '0'] = 0;\n if (Date.now() - this.watchdogState[err.code || '0'] > ((this.tuning.watchdogLogInterval * timeDilation || 120) * 1000))\n this.worker.emit('error', `301: Failed to start work: ${err.message}`);\n this.watchdogState[err.code || '0'] = Date.now();\n });\n }\n\n this.pruneSandboxes();\n }\n }\n\n /**\n * Gets the logical and physical number of cores and also\n * the total number of sandboxes the worker is allowed to run\n *\n */\n getStatisticsCPU() {\n const pCores = dcpConfig.supervisor?.pCores;\n if (DCP_ENV.isBrowserPlatform) {\n return {\n worker: this.workerOpaqueId,\n lCores: pCores || window.navigator.hardwareConcurrency,\n pCores: pCores || window.navigator.hardwareConcurrency,\n sandbox: this.maxSandboxes\n }\n }\n\n return {\n worker: this.workerOpaqueId,\n lCores: pCores || requireNative('os').cpus().length,\n pCores: pCores || requireNative('physical-cpu-count'),\n sandbox: this.maxSandboxes\n }\n }\n\n static getLogicalCores () {\n if (DCP_ENV.isBrowserPlatform)\n return window.navigator.hardwareConcurrency;\n return requireNative('os').cpus().length;\n }\n\n /**\n * Returns the number of unallocated sandbox slots to send to fetchTask.\n *\n * @returns {number}\n */\n numberOfAvailableSandboxSlots() {\n let numCores;\n if (this.queuedSlices.length > 1) {\n // We have slices queued, no need to fetch\n numCores = 0;\n } else {\n // The queue is almost empty (there may be 0 or 1 element), fetch a full task.\n // The task is full, in the sense that it will contain slices whose\n // aggregate execution time is this.maxSandboxes * 5-minutes.\n // However, there can only be this.unallocatedSpace # of long slices.\n // Thus we need to know whether the last slice in this.queuedSlices is long or not.\n // (A long slice has estimated execution time >= 5-minutes.)\n const longSliceCount = (this.queuedSlices.length > 0 && this.queuedSlices[0].isLongSlice) ? 1 : 0;\n numCores = this.unallocatedSpace - longSliceCount;\n }\n return numCores;\n }\n\n /**\n * Retry wrapper around readySandboxes.\n * @param {number} count\n * @param {boolean} [allocateLocalSandboxes=false] - When true does not populate this.readiedSandboxes .\n */\n async createSandboxes (count, allocateLocalSandboxes = false) {\n /**\n * If the evaluator cannot start (ie. if the evalServer is not running),\n * then the while loop will keep retrying until the evalServer comes online.\n */\n let sandboxes, retry = 0;\n while (true) {\n try {\n debugging() && console.debug(`Supervisor.createSandboxes(${allocateLocalSandboxes}): ready ${count} sandbox(es), unallocatedSpace ${this.unallocatedSpace}, queued slices ${this.queuedSlices.length}, # of sandboxes ${this.sandboxes.length}`);\n sandboxes = await this.readySandboxes(1, allocateLocalSandboxes);\n count--;\n const sbxes = await this.readySandboxes(count, allocateLocalSandboxes);\n sandboxes.push(...sbxes);\n debugging('supervisor') && console.debug(`Supervisor.createSandboxes(${allocateLocalSandboxes}): success`, sandboxes.length);\n break;\n } catch (error) {\n debugging() && console.warn(`906: failed to ready sandboxes(${allocateLocalSandboxes}); will retry`, this.sandboxes.length, error.code, error.message);\n // The evaluator may be down or shutting down, keep retrying.\n if ((retry % 60) === 0)\n this.worker.emit('warning', error);\n await a$sleepMs(1000 * Math.min(5, ++retry));\n }\n }\n debugging() && console.debug(`createSandboxes(${allocateLocalSandboxes}): created ${sandboxes.length} sandboxes`);\n return sandboxes;\n }\n\n /**\n * Call to start doing work on the network.\n * This is the one place where requests to fetch new slices are made.\n * After the initial slices are fetched it calls this.distributeQueuedSlices.\n *\n * @returns {Promise<void>}, unallocatedSpace ${this.unallocatedSpace}\n */\n async work ()\n {\n // When inside matchSlicesWithSandboxes, don't reenter Supervisor.work to fetch new work or create new sandboxes.\n if (this.matching) {\n // Interesting and noisy.\n // debugging('supervisor') && console.log(`Supervisor.work: Do not interleave work, fetch or matching slices with sandboxes: queuedSlices ${this.queuedSlices.length}, unallocatedSpace ${this.unallocatedSpace}, matching ${this.matching}, fetching ${this.isFetchingNewWork}`);\n return Promise.resolve();\n }\n\n // Instantiate connections that don't exist.\n this.instantiateAllConnections();\n\n const numCores = this.numberOfAvailableSandboxSlots();\n\n debugging() && console.log(`Supervisor.work: Try to get ${numCores} slices in working sandboxes, unallocatedSpace ${this.unallocatedSpace}, queued slices ${this.queuedSlices.length}, # of sandboxes ${this.sandboxes.length}, matching ${this.matching}, fetching: ${this.isFetchingNewWork}`);\n \n // Fetch a new task if we have no more slices queued, then start workers\n try {\n if (numCores > 0 && !this.isFetchingNewWork) {\n this.isFetchingNewWork = true;\n\n /**\n * This will only ready sandboxes up to a total count of\n * maxSandboxes (in any state). It is not possible to know the\n * actual number of sandboxes required until we have the slices because we\n * may have sandboxes assigned for the slice's job already.\n *\n * If the evaluator cannot start (ie. if the evalServer is not running),\n * then the while loop will keep retrying until the evalServer comes online.\n */\n if (this.maxSandboxes > this.sandboxes.length) {\n await this.createSandboxes(this.maxSandboxes - this.sandboxes.length);\n }\n /**\n * Temporary change: Assign the capabilities of one of readied sandboxes\n * before fetching slices from the scheduler.\n *\n * TODO: Remove this once fetchTask uses the capabilities of every\n * sandbox to fetch slices.\n */\n if (!this.capabilities)\n this.capabilities = this.sandboxes[0].capabilities;\n\n if (DCP_ENV.isBrowserPlatform && this.capabilities.browser)\n this.capabilities.browser.chrome = DCP_ENV.isBrowserChrome;\n\n const fetchTimeout = dcp_timers.setTimeout(() => {\n this.worker.emit('warning', 'Fetch exceeded timeout, will reconnect at next watchdog interval');\n \n this.taskDistributorConnection.close().catch(error => {\n this.worker.emit('error', `Failed to close task-distributor connection ${error.message}`);\n });\n this.resultSubmitterConnection.close().catch(error => {\n this.worker.emit('error', `Failed to close result-submitter connection ${error.message}`);\n });\n this.isFetchingNewWork = false;\n this.instantiateAllConnections();\n }, 3 * 60 * 1000); // max out at 3 minutes to fetch\n\n // ensure result submitter and task distributor connections before fetching tasks\n try\n {\n await this.resultSubmitterConnection.keepalive();\n await this.taskDistributorConnection.keepalive();\n }\n catch (e)\n {\n this.worker.emit('error', `Failed to connect to result submitter, refusing to fetch slices. Will try again at next fetch cycle ${e.message}`);\n debugging('supervisor') && console.log(`Error: ${e}`);\n this.isFetchingNewWork = false; // <-- done in the `finally` block, below\n dcp_timers.clearTimeout(fetchTimeout);\n this.taskDistributorConnection.close().catch(error => {\n this.worker.emit('error', `Failed to close task-distributor connection ${error.message}`);\n });\n this.resultSubmitterConnection.close().catch(error => {\n this.worker.emit('error', `Failed to close result-submitter connection ${error.message}`);\n });\n return Promise.resolve();\n }\n await this.fetchTask(numCores).finally(() => {\n dcp_timers.clearTimeout(fetchTimeout);\n this.isFetchingNewWork = false;\n });\n }\n\n this.distributeQueuedSlices().then(() => debugging('supervisor') && 'supervisor: finished distributeQueuedSlices()').catch((e) => {\n // We should never get here, because distributeQueuedSlices was changed\n // to try to catch everything and return slices and sandboxes.\n // If we do catch here it may mean a slice was lost. \n debugging() && console.error('Supervisor.work catch handler for distributeQueuedSlices.', e);\n this.worker.emit('error', e);\n });\n // No catch(), because it will bubble outward to the caller\n } finally {\n }\n }\n\n /**\n * Generate the workerComputeGroups property of the requestTask message. \n * \n * Concatenate the compute groups object from dcpConfig with the list of compute groups\n * from the supervisor, and remove the public group if accidentally present. Finally,\n * we transform joinSecrets/joinHash into joinHashHash for secure transmission.\n *\n * @note computeGroup objects with joinSecrets are mutated to record their hashes. This\n * affects the supervisor options and dcpConfig. Re-adding a joinSecret property\n * to one of these will cause the hash to be recomputed.\n */\n generateWorkerComputeGroups()\n {\n return supShared.generateWorkerComputeGroups(this, this.taskDistributorConnection);\n }\n\n /**\n * Remove all unreferenced jobs in `this.cache`.\n *\n * @param {Object<string, object>} newJobs - Jobs that should not be removed from `this.cache`.\n */\n cleanJobCache(newJobs) {\n /* Delete all jobs in the supervisorCache that are not represented in this newJobs,\n * or in this.queuedSlices, or there is no sandbox assigned to these jobs.\n * Note: There can easily be 200+ places to check; using a lookup structure to maintain O(n).\n */\n if (this.cache.jobs.length > 0) {\n const jobAddressMap = {};\n Object.keys(newJobs).forEach(jobAddress => { jobAddressMap[jobAddress] = 1; });\n this.slices.forEach(slice => { if (!jobAddressMap[slice.jobAddress]) jobAddressMap[slice.jobAddress] = 1; });\n this.cache.jobs.forEach(jobAddress => {\n if (!jobAddressMap[jobAddress]) {\n this.cache.remove('job', jobAddress);\n this.jobHandles[jobAddress].emit('flush');\n // Remove and return the corresponding sandboxes from this.sandboxes.\n const deadSandboxes = this.sandboxes.filter(sb => sb.jobAddress === jobAddress);\n if (deadSandboxes.length > 0) {\n deadSandboxes.forEach(sandbox => { this.returnSandbox(sandbox); });\n debugging('supervisor') && console.log(`Supervisor.fetchTask: Deleting job ${jobAddress} from cache and assigned sandboxes ${deadSandboxes.map(s => s.id)}, # of sandboxes ${this.sandboxes.length}.`);\n }\n }\n });\n }\n }\n\n /**\n * Fetches a task, which contains job information and slices for sandboxes and\n * manages events related to fetching tasks so the UI can more clearly display\n * to user what is actually happening.\n * @param {number} [numCores]\n * @returns {Promise<void>} The requestTask request, resolve on success, rejects otherwise.\n * @emits Supervisor#beforeFetch\n * @emits Supervisor#fetch\n */\n async fetchTask(numCores) {\n // Don't reenter\n if (this.matching || numCores <= 0) {\n // Interesting and noisy.\n debugging('supervisor') && console.log(`Supervisor.fetchTask: Do not nest work, fetch or matching slices with sandboxes: queuedSlices ${this.queuedSlices.length}, unallocatedSpace ${this.unallocatedSpace}, matching ${this.matching}, fetching ${this.isFetchingNewWork}, numCores ${numCores}`);\n return;\n }\n\n //\n // Oversubscription mitigation.\n // Update when there are less available sandbox slots than numCores.\n const checkNumCores = this.numberOfAvailableSandboxSlots();\n if (!numCores || numCores > checkNumCores) numCores = checkNumCores;\n\n if (!(this.options.paymentAddress instanceof Address))\n throw new Error(`options.paymentAddress ${JSON.stringify(this.options.paymentAddress)} must be an instance of Address`);\n\n const fetchStartTime = Date.now();\n // The 'beforeFetch' event allows the user to cancel the requestTask request.\n let canceled = false;\n /**\n * The 'beforeFetch' event fires before the request is sent to requestTask in task distributor.\n * @link https://gitlab.com/Distributed-Compute-Protocol/dcp-docs-wes/-/blob/wip/worker/worker-events.md\n * @event Supervisor#beforeFetch\n */\n this.worker.emit('beforeFetch', () => { canceled = true; })\n debugging() && canceled && console.log('User canceled the fetch task.');\n if (canceled)\n return;\n\n debugging('supervisor') && console.debug('supervisor: fetching task', numCores);\n const requestPayload = {\n supervisor: this.version,\n numCores,\n coreStats: this.getStatisticsCPU(),\n numGPUs: this.defaultMaxGPUs,\n capabilities: this.capabilities,\n paymentAddress: this.options.paymentAddress,\n jobAddresses: this.options.jobAddresses || [], // force array; when set, only fetches slices for these jobs\n workerComputeGroups: this.generateWorkerComputeGroups(),\n minimumWage: this.options.minimumWage || { CPU: 0, GPU: 0, in: 0, out: 0 },\n readyJobs: [ /* list of jobs addresses XXXwg */ ],\n previouslyWorkedJobs: this.ringBufferofJobs.buf, //Only discrete jobs\n rejectedJobs: this.rejectedJobs,\n };\n // workers should be part of the public compute group by default\n if (!booley(this.options.leavePublicGroup))\n requestPayload.workerComputeGroups.push(constants.computeGroups.public);\n debugging('computeGroups') && console.log(`Fetching work for ${requestPayload.workerComputeGroups.length} ComputeGroups: `, requestPayload.workerComputeGroups);\n debugging('supervisor') && console.log(`fetchTask wants ${numCores} slice(s), unallocatedSpace ${this.unallocatedSpace}, queuedSlices ${this.queuedSlices.length}`);\n try {\n debugging('requestTask') && console.debug('fetchTask: requestPayload', requestPayload);\n\n let result = await this.taskDistributorConnection.request('requestTask', requestPayload).catch((error) => {\n debugging('supervisor') && console.error(`Unable to request task from scheduler: ${error}. Will try again on a new connection.`);\n this.taskDistributorConnection.close();\n throw error; /* caught below */\n });\n /** @type {TDPayload} */\n const responsePayload = result.payload;\n\n if (!result.success) {\n debugging() && console.log('Task fetch failure; request=', requestPayload);\n debugging() && console.log('Task fetch failure; response=', responsePayload);\n throw new DCPError('Unable to fetch task for worker', responsePayload);\n }\n \n if (!responsePayload.body?.task?.length) {\n /**\n * The 'fetch' event fires when the supervisor has found no work from the task distributor.\n * @link https://gitlab.com/Distributed-Compute-Protocol/dcp-docs-wes/-/blob/wip/worker/worker-events.md\n * @event Supervisor#fetch\n */\n this.worker.emit('fetch', {\n fetchStart: fetchStartTime,\n fetchEnd: Date.now(),\n fetchSize: 0, \n jobs: {},\n slices: {}\n })\n return;\n }\n\n /*\n * payload: { TDPayload }\n * TDPayload: { owner: Address, signature: Signature, auth: Auth, body: Body };\n * Auth: { workerId: string, authSlices: Object<string, SliceMessage[]>, schedulerId: { address: Address }, jobCommissions: Object<string, { rate: number, account: number }> }\n * Body: { newJobs: Object<string, object>, task: SliceMessage[], computeGroupJobs: Object<string, string[]>, computeGroupOrigins: Object<string, Object<string, string[]>>, schedulerConfig: {{ targetTaskDuration: number }} }\n */\n\n const { body, ...authorizationMessage } = responsePayload;\n let { newJobs, task } = body;\n assert(newJobs); // It should not be possible to have !newJobs -- we throw on !success.\n \n /*\n * If we have specified specific jobs the worker may work on, the received jobs must be in the specified job list\n * Otherwise panic since the scheduler cannot be trusted at that point.\n */\n if (this.options.jobAddresses?.length > 0 && !Object.keys(newJobs).every((ele) => this.options.jobAddresses.includes(ele)))\n {\n this.worker.emit('error', \"Worker received slices it shouldn't have. Rejecting the work and stopping.\");\n process.exit(1);\n }\n\n debugging() && console.log(`Supervisor.fetchTask: task: ${task.length}/${numCores}, jobs: ${Object.keys(newJobs).length}, authSlices: ${compressJobMap(authorizationMessage.auth.authSlices, this.addressTruncationLength /* digits*/)}`);\n // Delete all jobs in the supervisorCache that are not represented in this task,\n // or in this.queuedSlices, or there is no sandbox assigned to these jobs.\n this.cleanJobCache(newJobs);\n\n for (const jobAddress of Object.keys(newJobs))\n {\n const jobHandle = new EventEmitter({ captureRejections: false });\n Object.assign(jobHandle, {\n address: newJobs[jobAddress].address,\n name: newJobs[jobAddress].public.name,\n description: newJobs[jobAddress].public.description,\n link: newJobs[jobAddress].public.link,\n });\n\n this.jobHandles[jobAddress] = jobHandle;\n this.worker.emit('job', jobHandle);\n \n if (!this.cache.cache.job[jobAddress])\n this.cache.store('job', jobAddress, newJobs[jobAddress]);\n }\n\n // Memoize authMessage onto the Slice object, this should\n // follow it for its entire life in the worker.\n const tmpQueuedSlices = task.map(taskElement => new Slice(taskElement, authorizationMessage));\n\n // Make sure old stuff is up front.\n // matchSlicesWithSandboxes dequeues this.queuedSlices as follows:\n // slicesToMatch = this.queuedSlices.slice(0, numCores);\n this.slices.push(...tmpQueuedSlices);\n this.queuedSlices.push(...tmpQueuedSlices);\n \n // Populating the ring buffer based on job's discrete property \n Object.values(newJobs).forEach(job => {\n if(job.requirements.discrete && this.ringBufferofJobs.find(element => element === job.address) === undefined) {\n this.ringBufferofJobs.push(job.address);\n }\n });\n\n // Construct the fetchHandle for 'fetch' event.\n const jobs = {}, slices = {};\n for (const jobAddress in newJobs)\n {\n jobs[jobAddress] = this.jobHandles[jobAddress]\n slices[jobAddress] = authorizationMessage.auth.authSlices[jobAddress].length;\n }\n const fetchHandle = {\n fetchStart: fetchStartTime,\n fetchEnd: Date.now(),\n fetchSize: 0, // need change to protocol branch to efficiently implement\n jobs: jobs,\n slices\n };\n /**\n * The 'fetch' event fires when the supervisor has found work from the task distributor.\n * @link https://gitlab.com/Distributed-Compute-Protocol/dcp-docs-wes/-/blob/wip/worker/worker-events.md\n * @event Supervisor#fetch\n */\n this.worker.emit('fetch', fetchHandle);\n\n } catch (error) {\n this.worker.emit('fetch', error);\n debugging('supervisor') && console.debug(`Supervisor.fetchTask failed!: error: ${error}`);\n }\n }\n\n /**\n * For each slice in this.queuedSlices, match with a sandbox in the following order:\n * 1. Try to find an already assigned sandbox in this.assignedSandboxes for the slice's job.\n * 2. Find a ready sandbox in this.readiedSandboxes that is unassigned.\n * 3. Ready a new sandbox and use that.\n *\n * Take great care in assuring sandboxes and slices are uniquely associated, viz.,\n * a given slice cannot be associated with multiple sandboxes and a given sandbox cannot be associated with multiple slices.\n * The lack of such uniqueness has been the root cause of several difficult bugs.\n *\n * Note: When a sandbox is paired with a slice, execution is pending and sandbox.allocated=true and\n * sandbox.slice=slice and sandbox.jobAddress=slice.jobAddress. This is what 'allocated' means.\n * Immediately upon the exit of sandbox.work, sandbox.allocated=false is set and if an exception\n * wasn't thrown, the paired slice is placed in this.assignedSandboxes.\n * Thus from the pov of supervisor, this.allocatedSandboxes is deterministic and this.workingSandboxes is not.\n * Please try to not use this.workingSandboxes. It is deprecated.\n *\n * The input is numCores, this,queuedSlices, this.assignedSandboxes and this.readiedSandboxes.\n * If there are not enough sandboxes, new readied sandboxes will be created using\n * await this.readySandboxes(...)\n * And it is this await boundary that has caused many bugs.\n * We try not to make assumptions about non-local state across the await boundary.\n *\n * @param {number} numCores - The number of available sandbox slots.\n * @param {boolean} [throwExceptions=true] - Whether to throw exceptions when checking for sanity.\n * @returns {Promise<SandboxSlice[]>} Returns SandboxSlice[], may have length zero.\n */\n async matchSlicesWithSandboxes (numCores, throwExceptions = true) {\n\n const sandboxSlices = [];\n if (this.queuedSlices.length === 0 || this.matching || numCores <= 0) {\n // Interesting and noisy.\n // debugging('supervisor') && console.log(`Supervisor.matchSlicesWithSandboxes: Do not nest work, fetch or matching slices with sandboxes: queuedSlices ${this.queuedSlices.length}, unallocatedSpace ${this.unallocatedSpace}, matching ${this.matching}, fetching ${this.isFetchingNewWork}, numCores ${numCores}`);\n return sandboxSlices;\n }\n\n //\n // Oversubscription mitigation.\n // Update when there are less available sandbox slots than numCores.\n // We cannot use this.unallocatedSpace here because its value is artificially low or zero, because in\n // this.distributedQueuedSlices we use the pseudo-mutex trick: this.acquire(howManySandboxSlotsToReserve)/this.release().\n // Note: Do not use this.numberOfCoresReserved outside of a function locked with this.acquire(howManySandboxSlotsToReserve) .\n const checkNumCores = this.numberOfCoresReserved; // # of locked sandbox slots.\n if (numCores > checkNumCores) numCores = checkNumCores;\n if (numCores <= 0) return sandboxSlices;\n\n // Don't ask for more than we have.\n if (numCores > this.queuedSlices.length)\n numCores = this.queuedSlices.length;\n\n // When evaluator goes down, all sandboxes are terminated.\n this.removeTerminatedSanboxes();\n\n debugging('supervisor') && console.log(`matchSlicesWithSandboxes: numCores ${numCores}, queued slices ${this.queuedSlices.length}: assigned ${this.assignedSandboxes.length}, readied ${this.readiedSandboxes.length}, unallocated ${this.unallocatedSpace}, # of sandboxes: ${this.sandboxes.length}`);\n\n if (debugging('supervisor')) {\n dumpSlicesIfNotUnique(this.queuedSlices, 'Warning: this.queuedSlices slices are not unique -- this is ok when slice is rescheduled.');\n dumpSandboxesIfNotUnique(this.readiedSandboxes, 'Warning: this.readiedSandboxes sandboxes are not unique!');\n dumpSandboxesIfNotUnique(this.assignedSandboxes, 'Warning: this.assignedSandboxes sandboxes are not unique!');\n }\n\n // Three functions to validate slice and sandbox.\n function checkSlice(slice, checkAllocated=true) {\n if (!slice.isUnassigned) throw new DCPError(`Slice must be unassigned: ${slice.identifier}`);\n if (checkAllocated && slice.allocated) throw new DCPError(`Slice must not already be allocated: ${slice.identifier}`);\n }\n function checkSandbox(sandbox, isAssigned) {\n if (sandbox.allocated) throw new DCPError(`Assigned sandbox must not be already allocated: ${sandbox.identifier}`);\n if (isAssigned && !sandbox.isAssigned) throw new DCPError(`Assigned sandbox is not marked as assigned: ${sandbox.identifier}`);\n if (!isAssigned && !sandbox.isReadyForAssign) throw new DCPError(`Readied sandbox is not marked as ready for assign: ${sandbox.identifier}`);\n }\n\n // Sanity checks.\n if (throwExceptions) {\n this.assignedSandboxes.forEach(sandbox => { checkSandbox(sandbox, true /* isAssigned*/); });\n this.readiedSandboxes.forEach(sandbox => { checkSandbox(sandbox, false /* isAssigned*/); });\n this.queuedSlices.forEach(slice => { checkSlice(slice); });\n } else {\n this.assignedSandboxes = this.assignedSandboxes.filter(sandbox => !sandbox.allocated && sandbox.isAssigned);\n this.readiedSandboxes = this.readiedSandboxes.filter(sandbox => !sandbox.allocated && sandbox.isReadyForAssign);\n this.queuedSlices = this.queuedSlices.filter(slice => !slice.allocated && slice.isUnassigned);\n }\n\n const sandboxKind = {\n assigned: 0,\n ready: 1,\n new: 2,\n };\n\n const ceci = this;\n /**\n * Auxiliary function to pair a sandbox with a slice and mark the sandbox as allocated.\n * An allocated sandbox is reserved and will not be released until the slice completes execution on the sandbox.\n *\n * @param {Sandbox} sandbox\n * @param {Slice} slice\n * @param {number} kind\n */\n function pair(sandbox, slice, kind) {\n checkSandbox(sandbox, kind === sandboxKind.assigned);\n checkSlice(slice, kind === sandboxKind.assigned);\n slice.allocated = true;\n sandbox.allocated = true;\n sandbox.jobAddress = slice.jobAddress; // So we can know which jobs to not delete from this.cache .\n sandbox.slice = slice;\n\n sandboxSlices.push({ sandbox, slice });\n if (ceci.sliceTiming) slice['pairingDelta'] = Date.now();\n if (debugging('supervisor')) {\n let fragment = 'New readied';\n if (kind === sandboxKind.assigned) fragment = 'Assigned';\n else if (kind === sandboxKind.ready) fragment = 'Readied';\n console.log(`matchSlicesWithSandboxes.pair: ${fragment} sandbox matched ${ceci.dumpSandboxAndSlice(sandbox, slice)}`);\n }\n }\n\n // These three arrays are used to track/store slices and sandboxes,\n // so that when an exception occurs, the following arrays are restored:\n // this.queuedSlices, this.assignedSandboxes, this.realizedSandboxes.\n let slicesToMatch = [];\n let trackAssignedSandboxes = [];\n let trackReadiedSandboxes = [];\n try\n {\n this.matching = true;\n\n let assignedCounter = 0; // How many assigned sandboxes are being used.\n let readyCounter = 0; // How many sandboxes used from the existing this.readiedSandboxes.\n let newCounter = 0; // How many sandboxes that needed to be newly created.\n\n //\n // The Ideas:\n // 1) We match each slice with a sandbox. First we match with assigned sandboxes in the order\n // that they appear in this.queuedSlices. Then we match in-order with existing this.readiedSandboxes\n // Then we match in-order with new new readied sandboxes created through\n // await this.readySandboxes(newCounter, true /* allocateLocalSandboxes*/);\n // This allows us to try different orderings of execution of slices. E.g. Wes suggested\n // trying to execute slices from different jobs with maximal job diversity -- specifically\n // if there are 3 jobs j1,j2,j3, with slices s11, s12 from j1, s21, s22, s23 from j2 and\n // s31, s32 from j3, then we try to schedule, in order s11, s21, s31, s12, s22, s32, s23.\n //\n // 2) Before matching slices with sandboxes, we allocate available assigned and readied sandboxes\n // and if more are needed then we create and allocate new ones.\n //\n // 3) Finally we match slices with sandboxes and return an array of sandboxSlice pairs.\n //\n // Note: The ordering of sandboxSlices only partially corresponds to the order of this.queuedSlices.\n // It's easy to do. When pairing with assigned sandboxes, any slice in this.queuedSlices which doesn't\n // have an assigned sandbox, will add null to the sandboxSlices array. Then when pairing with readied sandboxes,\n // we fill-in the null entries in the sandboxSlices array.\n //\n\n // Get the slices that are being matched.\n slicesToMatch = this.queuedSlices.slice(0, numCores);\n this.queuedSlices = this.queuedSlices.slice(numCores);\n\n debugging('supervisor') && console.log(`matchSlicesWithSandboxes: slicesToMatch ${this.dumpSlices(slicesToMatch)}`);\n\n // Create object map: jobAddress -> sandboxes with sandboxes.jobAddress === jobAddress .\n const jobSandboxMap = toJobMap(this.assignedSandboxes, sandbox => sandbox);\n \n // Create array to hold slices which do not have assigned sandboxes.\n // These slices will need to be paired with existing and possibly new readied sandboxes.\n // Specifically, the sandboxes from existing this.readiedSandboxes and new sandboxes\n // created through await this.readySandboxes(newCounter, true /* allocateLocalSandboxes*/);\n const slicesThatNeedSandboxes = [];\n\n // Pair assigned sandboxes with slices.\n for (const slice of slicesToMatch) {\n const assigned = jobSandboxMap[slice.jobAddress];\n if (assigned && assigned.length > 0) {\n // Pair.\n const sandbox = assigned.pop();\n pair(sandbox, slice, sandboxKind.assigned);\n this.removeElement(this.assignedSandboxes, sandbox);\n // Track.\n trackAssignedSandboxes.push(sandbox);\n assignedCounter++;\n } else {\n // Don't lose track of these slices.\n slice.allocated = true;\n slicesThatNeedSandboxes.push(slice);\n }\n }\n\n // Pair readied sandboxes with slices.\n readyCounter = Math.min(slicesThatNeedSandboxes.length, this.readiedSandboxes.length);\n newCounter = slicesThatNeedSandboxes.length - readyCounter;\n // Track.\n trackReadiedSandboxes = this.readiedSandboxes.slice(0, readyCounter);\n this.readiedSandboxes = this.readiedSandboxes.slice(readyCounter);\n for (const sandbox of trackReadiedSandboxes) {\n // Pair.\n const slice = slicesThatNeedSandboxes.pop();\n pair(sandbox, slice, sandboxKind.ready);\n }\n debugging('supervisor') && console.log(`matchSlicesWithSandboxes: assignedCounter ${assignedCounter}, readyCounter ${readyCounter}, newCounter ${newCounter}, numCores ${numCores}`)\n\n // Validate algorithm consistency.\n if (this.debugBuild && assignedCounter + readyCounter + newCounter !== numCores) {\n // Structured assert.\n throw new DCPError(`matchSlicesWithSandboxes: Algorithm is corrupt ${assignedCounter} + ${readyCounter} + ${newCounter} !== ${numCores}`);\n }\n\n // Here is an await boundary.\n // Accessing non-local data across an await boundary may result in the unexpected.\n\n // Create new readied sandboxes to associate with slicesThatNeedSandboxes.\n if (newCounter > 0) {\n // When allocateLocalSandboxes is true, this.readySandboxes does not place the new sandboxes\n // on this.readiedSandboxes. Hence the new sandboxes are private and nobody else can see them.\n debugging('supervisor') && console.log(`matchSlicesWithSandboxes: creating ${newCounter} new sandboxes, # of sandboxes ${this.sandboxes.length}`);\n const readied = await this.createSandboxes(newCounter, true /* allocateLocalSandboxes*/);\n // Track.\n trackReadiedSandboxes.push(...readied);\n\n for (const sandbox of readied) {\n assert(slicesThatNeedSandboxes.length > 0);\n // Pair\n const slice = slicesThatNeedSandboxes.pop();\n pair(sandbox, slice, sandboxKind.new);\n }\n \n // Put back any extras. There should not be any unless readySandboxes returned less than asked for.\n if (slicesThatNeedSandboxes.length > 0) {\n slicesThatNeedSandboxes.forEach(slice => {\n slice.allocated = false;\n this.queuedSlices.push(slice);\n });\n }\n }\n\n if (debugging()) {\n console.log(`matchSlicesWithSandboxes: Matches: ${ this.dumpSandboxSlices(sandboxSlices) }`);\n this.dumpSandboxSlicesIfNotUnique(sandboxSlices, 'Warning: sandboxSlices; { sandbox, slice } pairs are not unique!');\n }\n } catch (e) {\n // Clear allocations.\n slicesToMatch.forEach(slice => { slice.allocated = false; });\n trackAssignedSandboxes.forEach(sandbox => { sandbox.allocated = false; sandbox.slice = null; });\n trackReadiedSandboxes.forEach(sandbox => { sandbox.allocated = false; sandbox.slice = null; sandbox.jobAddress = null; });\n \n // Filter out redundancies -- there shouldn't be any...\n slicesToMatch = slicesToMatch.filter(slice => this.queuedSlices.indexOf(slice) === -1);\n trackAssignedSandboxes = trackAssignedSandboxes.filter(sb => this.assignedSandboxes.indexOf(sb) === -1);\n trackReadiedSandboxes = trackReadiedSandboxes.filter(sb => this.readiedSandboxes.indexOf(sb) === -1);\n\n // Sanity checks.\n slicesToMatch.forEach(slice => { checkSlice(slice) });\n trackAssignedSandboxes.forEach(sandbox => { checkSandbox(sandbox, true /* isAssigned*/); });\n trackReadiedSandboxes.forEach(sandbox => { checkSandbox(sandbox, false /* isAssigned*/); });\n\n // Restore arrays.\n this.queuedSlices.push(...slicesToMatch);\n this.assignedSandboxes.push(...trackAssignedSandboxes);\n this.readiedSandboxes.push(...trackReadiedSandboxes);\n \n debugging() && console.error('Error in matchSlicesWithSandboxes: Attempting to recover slices and sandboxes.', e);\n return [];\n } finally {\n this.matching = false;\n }\n\n debugging('supervisor') && console.log(`matchSlicesWithSandboxes: allocated ${sandboxSlices.length} sandboxes, queuedSlices ${this.queuedSlices.length}, unallocatedSpace ${this.unallocatedSpace}, matching ${this.matching}, fetching ${this.isFetchingNewWork}, # of sandboxes: ${this.sandboxes.length}.`);\n\n return sandboxSlices;\n }\n\n disassociateSandboxAndSlice(sandbox, slice) {\n this.returnSandbox(sandbox);\n sandbox.slice = null;\n this.returnSlice(slice, 'none');\n }\n\n /**\n * This method will call this.startSandboxWork(sandbox, slice) for each element { sandbox, slice }\n * of the array returned by this.matchSlicesWithSandboxes(availableSandboxes) until all allocated sandboxes\n * are working. It is possible for a sandbox to interleave with calling distributeQueuedSlices and leave a sandbox\n * that is not working. Moreover, this.queuedSlices may be exhausted before all sandboxes are working.\n * @returns {Promise<void>}\n */\n async distributeQueuedSlices () {\n const numCores = this.unallocatedSpace;\n\n // If there's nothing there, or we're reentering, bail out.\n if (this.queuedSlices.length === 0 || numCores <= 0 || this.matching) {\n // Interesting and noisy.\n // debugging('supervisor') && console.log(`Supervisor.distributeQueuedSlices: Do not nest work, fetch or matching slices with sandboxes: queuedSlices ${this.queuedSlices.length}, matching ${this.matching}, fetching ${this.isFetchingNewWork}, numCores ${numCores}`);\n return Promise.resolve();\n }\n\n //\n // Use the pseudo-mutex to prevent uncontrolled interleaving with fetchTask,\n // matchSlicesWithSandboxes and distributeQueuedSlices\n let sandboxSlices;\n this.acquire(numCores);\n try {\n sandboxSlices = await this.matchSlicesWithSandboxes(numCores);\n } finally {\n this.release();\n }\n\n debugging('supervisor') && console.log(`distributeQueuedSlices: ${sandboxSlices.length} sandboxSlices ${this.dumpSandboxSlices(sandboxSlices)}, matching ${this.matching}, fetching ${this.isFetchingNewWork}`);\n\n for (let sandboxSlice of sandboxSlices) {\n\n const { sandbox, slice } = sandboxSlice;\n try {\n if (sandbox.isReadyForAssign) {\n try {\n let timeoutMs = Math.floor(Math.min(+this.lastAssignFailTimerMs || 0, 10 * 60 * 1000 /* 10m */));\n await a$sleepMs(timeoutMs);\n await this.assignJobToSandbox(sandbox, slice.jobAddress);\n } catch (e) {\n debugging() && console.error(`Supervisor.distributeQueuedSlices: Could not assign slice ${slice.identifier} to sandbox ${sandbox.identifier}.`, e);\n this.worker.emit('error', e);\n this.lastAssignFailTimerMs = this.lastAssignFailTimerMs ? +this.lastAssignFailTimerMs * 1.25 : Math.random() * 200;\n this.disassociateSandboxAndSlice(sandbox, slice);\n continue;\n }\n }\n\n if (!this.lastAssignFailTimerMs)\n this.lastAssignFailTimerMs = Math.random() * 200;\n this.startSandboxWork(sandbox, slice);\n this.lastAssignFailTimerMs = false;\n\n } catch (e) {\n // We should never get here.\n debugging() && console.error(`Supervisor.distributeQueuedSlices: Failed to execute slice ${slice.identifier} in sandbox ${sandbox.identifier}.`, e);\n this.worker.emit('error', e);\n this.disassociateSandboxAndSlice(sandbox, slice);\n }\n }\n }\n\n /**\n *\n * @param {Sandbox} sandbox\n * @param {opaqueId} jobAddress\n * @returns {Promise<void>}\n */\n assignJobToSandbox(sandbox, jobAddress) {\n // Any error will be caught in distributeQueuedSlices.\n return sandbox.assign(jobAddress);\n }\n\n /**\n * Handles reassigning or returning a slice that was rejected by a sandbox.\n * \n * The sandbox will be terminated by this.returnSandbox in finalizeSandboxAndSlice. In this case,\n * if the slice does not have a rejected property already, reassign the slice to a new sandbox\n * and add a rejected property to the slice to indicate it has already rejected once, then set slice = null\n * in the return SandboxSlice so that finalizeSandboxAndSlice won't return slice to scheduler.\n * \n * If the slice rejects with a reason, or has a rejected time stamp (ie. has been rejected once already)\n * then return the slice and all slices from the job to the scheduler and\n * terminate all sandboxes with that jobAddress.\n * @param {Sandbox} sandbox \n * @param {Slice} slice\n * @param {string} rejectReason\n * @returns {Promise<SandboxSlice>}\n */\n async handleWorkReject(sandbox, slice, rejectReason) {\n if (!this.rejectedJobReasons[slice.jobAddress])\n this.rejectedJobReasons[slice.jobAddress] = [];\n\n this.rejectedJobReasons[slice.jobAddress].push(rejectReason); // memoize reasons\n\n debugging() && console.debug('handleWorkReject', rejectReason, slice.rejected, this.rejectedJobReasons, slice.identifier);\n\n // First time rejecting without a reason. Try assigning slice to a new sandbox.\n if (rejectReason === 'false' && !slice.rejected) {\n // Set rejected.\n slice.rejected = Date.now();\n // Schedule the slice for execution.\n this.scheduleSlice(slice, true /* placeInTheFrontOfTheQueue*/, false /* noDuplicateExecution*/);\n \n // Null out slice so this.returnSlice will not be called in finalizeSandboxAndSlice.\n // But we still want this.returnSandbox to terminate the sandbox.\n slice = null;\n } else { // Slice has a reason OR rejected without a reason already and got stamped.\n \n // Purge all slices and sandboxes associated with slice.jobAddress .\n this.purgeAllWork(slice.jobAddress);\n // Clear jobAddress from this.cache .\n this.cleanJobCache({});\n\n // Add to array of rejected jobs.\n let rejectedJob = {\n address: slice.jobAddress,\n reasons: this.rejectedJobReasons[slice.jobAddress],\n }\n this.rejectedJobs.push(rejectedJob);\n\n if (slice.rejected)\n this.worker.emit('warning', `Supervisor.handleWorkReject: The slice ${slice.identifier} was rejected twice.`);\n else\n this.worker.emit('warning', `Supervisor.handleWorkReject: The slice ${slice.identifier} was rejected with reason ${rejectReason}.`);\n this.worker.emit('warning', ' All slices with the same jobAddress returned to the scheduler.');\n this.worker.emit('warning', ' All sandboxes with the same jobAddress are terminated.');\n //\n // this.purgeAllWork(jobAddress) terminates all sandboxes with jobAddress,\n // and it also returns to scheduler all slices with jobAddress.\n // Therefore null out slice and sandbox so finalizeSandboxAndSlice doesn't do anything.\n // \n sandbox = null;\n slice = null;\n }\n return { sandbox, slice };\n }\n\n /**\n * Schedule the slice to be executed.\n * If slice is already executing and noDuplicateExecution is true, return the slice with reason.\n * @param {Slice} slice\n * @param {boolean} [placeInTheFrontOfTheQueue=false]\n * @param {boolean} [noDuplicateExecution=true]\n * @param {string} [reason]\n */\n scheduleSlice(slice, placeInTheFrontOfTheQueue = false, noDuplicateExecution = true, reason) {\n // When noDuplicateExecution, if slice is already executing, do nothing.\n let allocatedSlices = [];\n if (noDuplicateExecution)\n allocatedSlices = this.allocatedSlices;\n\n if (!allocatedSlices.indexOf(slice)) {\n // Reset slice state to allow execution.\n slice.status = SLICE_STATUS_UNASSIGNED;\n slice.allocated = false;\n // Enqueue in the to-be-executed queue.\n if (placeInTheFrontOfTheQueue) this.queuedSlices.unshift(slice);\n else this.queuedSlices.push(slice);\n }\n }\n\n /**\n * Purge all slices and sandboxes with this jobAddress.\n * @param {string} jobAddress\n * @param {boolean} [onlyPurgeQueuedAndAllocated=false]\n */\n purgeAllWork(jobAddress, onlyPurgeQueuedAndAllocated = false) {\n // Purge all slices and sandboxes associated with jobAddress .\n const deadSandboxes = this.sandboxes.filter(sandbox => sandbox.jobAddress === jobAddress);\n\n if (deadSandboxes.length > 0) {\n debugging('supervisor') && console.log(`purgeAllWork(${this.dumpJobAddress(jobAddress)}): sandboxes purged ${deadSandboxes.map(s => s.id)}, # of sandboxes ${this.sandboxes.length}`);\n deadSandboxes.forEach(sandbox => this.returnSandbox(sandbox));\n }\n\n let deadSlices;\n if (onlyPurgeQueuedAndAllocated) {\n deadSlices = this.queuedSlices.filter(slice => slice.jobAddress === jobAddress);\n if (deadSlices.length > 0 || this.allocatedSlices.length > 0)\n debugging('supervisor') && console.log(`purgeAllWork(${this.dumpJobAddress(jobAddress)}): dead queuedSlices ${deadSlices.map(s => s.sliceNumber)}, dead allocatedSlices ${this.allocatedSlices.map(s => s.sliceNumber)}`);\n deadSlices.push(...this.allocatedSlices);\n } else {\n deadSlices = this.slices.filter(slice => slice.jobAddress === jobAddress);\n }\n\n if (deadSlices.length > 0) {\n debugging('supervisor') && console.log(`purgeAllWork(${this.dumpJobAddress(jobAddress)}): slices purged ${deadSlices.map(s => s.sliceNumber)}, # of sandboxes ${this.sandboxes.length}`);\n this.returnSlices(deadSlices);\n this.removeQueuedSlices(deadSlices);\n }\n debugging('supervisor') && console.log(`purgeAllWork(${this.dumpJobAddress(jobAddress)}): Finished: slices ${this.slices.length}, queuedSlices ${this.queuedSlices.length}, assigned ${this.assignedSandboxes.length}, readied ${this.readiedSandboxes.length}, # of sandboxes ${this.sandboxes.length}`);\n }\n\n /**\n * Gives a slice to a sandbox which begins working. Handles collecting\n * the slice result (complete/fail) from the sandbox and submitting the result to the scheduler.\n * It will also return the sandbox to @this.returnSandbox when completed so the sandbox can be re-assigned.\n *\n * @param {Sandbox} sandbox - the sandbox to give the slice\n * @param {Slice} slice - the slice to distribute\n * @returns {Promise<void>} Promise returned from sandbox.run\n */\n async startSandboxWork (sandbox, slice) {\n var startDelayMs, reason = 'unknown';\n\n try {\n slice.markAsWorking();\n } catch (e) {\n // This will occur when the same slice is distributed twice.\n // It is normal because two sandboxes could finish at the same time and be assigned the\n // same slice before the slice is marked as working.\n debugging() && console.debug('startSandboxWork: slice.markAsWorking exception:', e);\n return Promise.resolve();\n }\n\n // sandbox.requiresGPU = slice.requiresGPU;\n // if (sandbox.requiresGPU) {\n // this.GPUsAssigned++;\n // }\n\n if (this.startSandboxWork_beenCalled)\n startDelayMs = 1000 * (this.tuning.minSandboxStartDelay + (Math.random() * (this.tuning.maxSandboxStartDelay - this.tuning.minSandboxStartDelay)));\n else {\n startDelayMs = 1000 * this.tuning.minSandboxStartDelay;\n this.startSandboxWork_beenCalled = true;\n }\n\n try {\n debugging() && console.log(`startSandboxWork: Started ${this.dumpStatefulSandboxAndSlice(sandbox, slice)}, sandboxes: ${this.sandboxes.length}, allocated slices ${this.allocatedSlices.length}, matching ${this.matching}, fetching ${this.isFetchingNewWork}`, Date.now() - this._t0);\n if (this.sliceTiming) {\n slice['pairingDelta'] = Date.now() - slice['pairingDelta'];\n slice['executionDelta'] = Date.now();\n }\n let result;\n try {\n result = await sandbox.work(slice, startDelayMs);\n } finally {\n sandbox.allocated = false;\n slice.allocated = false;\n }\n if (this.sliceTiming) {\n slice['executionDelta'] = Date.now() - slice['executionDelta'];\n slice['resultDelta'] = Date.now();\n }\n slice.collectResult(result, true);\n // In watchdog, all sandboxes in working state, have their slice status sent to result submitter.\n // However, this can happen after the sandbox/slice has already sent results\n // to result submitter, in which case, the activeSlices table has already removed the row\n // corresponding to slice and hence is incapable of updating status.\n sandbox.changeWorkingToAssigned();\n this.assignedSandboxes.push(sandbox);\n debugging() && console.log(`startSandboxWork: Finished ${this.dumpStatefulSandboxAndSlice(sandbox, slice)}, sandboxes: ${this.sandboxes.length}, allocated slices ${this.allocatedSlices.length}, matching ${this.matching}, fetching ${this.isFetchingNewWork}`);\n } catch(error) {\n let logLevel;\n\n if (error instanceof SandboxError) {\n logLevel = 'warn';\n // The message and stack properties of error objects are not enumerable,\n // so they have to be copied into a plain object this way\n const errorResult = Object.getOwnPropertyNames(error).reduce((o, p) => {\n o[p] = error[p]; return o;\n }, { message: 'Unexpected worker error' });\n slice.collectResult(errorResult, false);\n } else {\n logLevel = 'error';\n // This error was unrelated to the work being done, so just return the slice in the finally block.\n // For extra safety the sandbox is terminated.\n slice.result = null;\n slice.status = SLICE_STATUS_FAILED;\n }\n\n let errorString;\n switch (error.errorCode) {\n case 'ENOPROGRESS':\n reason = 'ENOPROGRESS';\n errorString = 'Supervisor.startSandboxWork - No progress error in sandbox.\\n';\n break;\n case 'ESLICETOOSLOW':\n reason = 'ESLICETOOSLOW';\n errorString = 'Supervisor.startSandboxWork - Slice too slow error in sandbox.\\n';\n break;\n case 'EUNCAUGHT':\n reason = 'EUNCAUGHT';\n errorString = `Supervisor.startSandboxWork - Uncaught error in sandbox ${error.message}.\\n`;\n break;\n case 'EFETCH':\n // reason = 'EFETCH'; The status.js processing cannot handle 'EFETCH'\n reason = 'unknown';\n errorString = `Supervisor.startSandboxWork - Could not fetch data: ${error.message}.\\n`;\n break;\n }\n \n const { getenv } = __webpack_require__(/*! ../../common/dcp-env */ \"./src/common/dcp-env.js\");\n // Always display max info under debug builds, otherwise maximal error\n // messages are displayed to the worker, only if both worker and client agree.\n let workerConsole = sandbox.supervisorCache.cache.job[slice.jobAddress].workerConsole;\n const displayMaxInfo = Boolean(getenv('DCP_SUPERVISOR_DEBUG_DISPLAY_MAX_INFO')) || (workerConsole && this.options.allowConsoleAccess);\n\n const errorObject = {\n jobAddress: slice.jobAddress.substr(0,10),\n sliceNumber: slice.sliceNumber,\n sandbox: sandbox.id,\n jobName: sandbox.public ? sandbox.public.name : 'unnamed',\n };\n \n if (error.name === 'EWORKREJECT') {\n error.stack = 'Sandbox was terminated by work.reject()';\n const ss = await this.handleWorkReject(sandbox, slice, error.message);\n sandbox = ss.sandbox; slice = ss.slice;\n }\n\n if (!displayMaxInfo && error.errorCode === 'EUNCAUGHTERROR') {\n this.worker.emit('error', `Supervisor.startSandboxWork - Uncaught error in sandbox, could not compute ${errorObject}`);\n } else if (!displayMaxInfo && error.errorCode === 'EPERM_ORIGIN') {\n this.worker.emit('error', `Supervisor.startSandboxWork - Could not fetch data: ${error.message}`);\n } else if (!displayMaxInfo && errorString) {\n this.worker.emit('error', `${errorString}: ${errorObject}`);\n } else if (!displayMaxInfo && error.name === 'EWORKREJECT') {\n this.worker.emit('error', `Supervisor.startSandboxWork - Sandbox rejected work: ${error.message}`);\n } else {\n if (displayMaxInfo && error.stack)\n errorObject.stack += '\\n --------------------\\n' + (error.stack.split('\\n').slice(1).join('\\n'));\n this.worker.emit('error', `Supervisor.startSandboxWork - Sandbox failed: ${error.message}: ${errorObject}`);\n }\n } finally {\n await this.finalizeSandboxAndSlice(sandbox, slice, reason);\n }\n }\n\n /**\n * If slice && slice.result, then call await this.recordResult(slice) and this.returnSandbox(sandbox, slice) will have no effect.\n * If slice && !slice.result, then call this.returnSlice(slice, reason) and then this.returnSandbox(sandbox, slice) which terminates sandbox.\n * If !slice && sandbox, then terminate the sandbox with this.returnSandbox(sandbox, slice) .\n * If !slice && !sandbox, then do nothing.\n * @param {Sandbox} [sandbox]\n * @param {Slice} [slice]\n * @param {string} [reason]\n */\n async finalizeSandboxAndSlice(sandbox, slice, reason) {\n debugging('supervisor') && console.log(`finalizeSandboxAndSlice: sandbox ${sandbox ? sandbox.identifier : 'nade'}, slice ${slice ? slice.identifier : 'nade'}`);\n if (slice) {\n if (slice.result) await this.recordResult(slice, sandbox.sandboxHandle);\n else this.returnSlice(slice, reason);\n }\n // It is possible that sandbox is already terminated\n // Because sandbox.allocated=false as soon as sandbox.work(...) completes.\n // But the await at or in finalizeSandboxAndSlice may allow pruneSandboxes to slither in.\n if (sandbox) this.returnSandbox(sandbox, slice);\n }\n\n /**\n * Terminates sandboxes and returns slices.\n * Sets the working flag to false, call @this.work to start working again.\n * \n * If forceTerminate is true: Terminates all sandboxes and returns all slices.\n * If forceTerminate is false: Terminates non-allocated sandboxes and returns queued slices.\n *\n * @param {boolean} [forceTerminate = true] - true if you want to stop the sandboxes from completing their current slice.\n * @returns {Promise<void>}\n */\n async stopWork (forceTerminate = true) {\n debugging('supervisor') && console.log('stopWork(${forceTerminate}): terminating sandboxes and returning slices to scheduler.');\n \n const jobs = [];\n for (const slice of this.slices)\n if (jobs.indexOf(slice.jobAddress) === -1)\n jobs.push(slice.jobAddress);\n \n if (forceTerminate) {\n while (this.sandboxes.length) {\n this.returnSandbox(this.sandboxes[0], null);\n }\n\n await this.returnSlices(this.slices).then(() => {\n this.queuedSlices.length = 0;\n });\n } else {\n // Only terminate idle sandboxes and return only queued slices\n let idleSandboxes = this.sandboxes.filter(w => !w.allocated);\n for (const sandbox of idleSandboxes) {\n this.returnSandbox(sandbox, null);\n }\n\n await this.returnSlices(this.queuedSlices).then(() => {\n this.queuedSlices.length = 0;\n });\n\n await new Promise((resolve, reject) => {\n let sandboxesRemaining = this.allocatedSandboxes.length;\n if (sandboxesRemaining === 0)\n {\n resolve();\n }\n // Resolve and finish work once all sandboxes have finished submitting their results.\n this.on('result', () => {\n sandboxesRemaining--;\n if (sandboxesRemaining === 0)\n {\n debugging() && console.log('All sandboxes empty, stopping worker and closing all connections');\n resolve();\n }\n });\n });\n }\n \n for (const jobAddress of jobs)\n this.jobHandles[jobAddress].emit('flush');\n\n if (this.resultSubmitterConnection) {\n this.resultSubmitterConnection.off('end', this.openResultSubmitterConn);\n this.resultSubmitterConnection.close();\n this.resultSubmitterConnection = null;\n }\n\n if (this.taskDistributorConnection) {\n this.taskDistributorConnection.off('end', this.openTaskDistributorConn);\n this.taskDistributorConnection.close();\n this.taskDistributorConnection = null;\n }\n\n if (this.packageManagerConnection) {\n this.packageManagerConnection.off('end', this.openPackageManagerConn);\n this.packageManagerConnection.close();\n this.packageManagerConnection = null;\n }\n\n if (this.eventRouterConnection) {\n this.eventRouterConnection.off('end', this.openEventRouterConn);\n this.eventRouterConnection.close();\n this.eventRouterConnection = null;\n }\n }\n\n /**\n * Takes a slice and returns it to the scheduler to be redistributed.\n * Usually called when an exception is thrown by sandbox.work(slice, startDelayMs) .\n * Or when the supervisor tells it to forcibly stop working.\n *\n * @param {Slice} slice - The slice to return to the scheduler.\n * @param {string} [reason] - Optional reason for the return: 'ENOPROGRESS', 'EUNCAUGHT', 'ESLICETOOSLOW', 'unknown'.\n * @returns {Promise<*>} - Response from the scheduler.\n */\n returnSlice (slice, reason) {\n // When sliceNumber === 0 don't send a status message.\n if (slice.sliceNumber === 0) return Promise.resolve();\n \n debugging() && console.log(`Supervisor.returnSlice: Returning slice ${slice.identifier} with reason ${reason}.`);\n\n this.jobHandles[slice.jobAddress].emit('beforeReturn', slice.sliceNumber);\n\n const payload = slice.getReturnMessagePayload(this.workerOpaqueId, reason);\n try\n {\n return this.resultSubmitterConnection.request('status', payload) /* resultSubmitterConnection can be null if worker is stopped */\n .then(response => {\n this.jobHandles[slice.jobAddress].emit('return', slice.sliceNumber);\n return response;\n }).catch(error => {\n this.jobHandles[slice.jobAddress].emit('return', error);\n debugging('supervisor') && console.error('Failed to return slice', {\n sliceNumber: slice.sliceNumber,\n jobAddress: slice.jobAddress,\n status: slice.status,\n error,\n }, 'Will try again on a new connection.');\n this.resultSubmitterMessageQueue.push({ operation: 'status', data: payload });\n this.resultSubmitterConnection.close();\n });\n }\n catch (error)\n {\n debugging('supervisor') && console.error(`Failed to return slice ${slice.identifier}, no connection to result submitter:`, error);\n }\n }\n\n /**\n * Bulk-return multiple slices, possibly for assorted jobs.\n * Returns slices to the scheduler to be redistributed.\n * Called in the sandbox terminate handler and purgeAllWork(jobAddress)\n * and stopWork(forceTerminate).\n *\n * @param {Slice[]} slices - The slices to return to the scheduler.\n * @returns {Promise<void>} - Response from the scheduler.\n */\n async returnSlices(slices) {\n if (!slices || !slices.length) return Promise.resolve();\n \n const slicePayload = [];\n slices.forEach((slice) => {\n this.jobHandles[slice.jobAddress].emit('beforeReturn', slice.sliceNumber);\n addToReturnSlicePayload(slicePayload, slice);\n });\n this.removeSlices(slices);\n\n debugging('supervisor') && console.log(`Supervisor.returnSlices: Returning slices ${this.dumpSlices(slices)}.`);\n\n return this.resultSubmitterConnection.request('status', {\n worker: this.workerOpaqueId,\n slices: slicePayload,\n }).then(response => {\n slices.forEach((slice) => { this.jobHandles[slice.jobAddress].emit('return', slice.sliceNumber); });\n return response;\n }).catch(error => {\n slices.forEach((slice) => { this.jobHandles[slice.jobAddress].emit('return', error); });\n const errorInfo = slices.map(slice => slice.identifier);\n debugging('supervisor') && console.error('Failed to return slice(s)', { errorInfo, error }, 'Will try again on new connection.');\n // Skipping emitting 'return' event when re-sending returned slices for Sup1.\n // It's done right in Sup2.\n this.resultSubmitterMessageQueue.push({ operation: 'status', data: { worker: this.workerOpaqueId, slices: slicePayload } })\n this.resultSubmitterConnection.close();\n // Just in case the caller is expecing a DCP response\n return { success: false, payload: {} };\n });\n }\n\n /**\n * Submits the slice results to the scheduler, either to the\n * work submit or fail endpoints based on the slice status.\n * Then remove the slice from the @this.slices cache.\n *\n * @param {Slice} slice - The slice to submit.\n * @param {EventEmitter} sandboxHandle - The sandbox handle associated to the slice.\n * @returns {Promise<void>}\n */\n async recordResult (slice, sandboxHandle) {\n // It is possible for slice.result to be undefined when there are upstream errors.\n if ( !(slice && slice.result))\n throw new Error(`recordResult: slice.result is undefined for slice ${slice.identifier}. This is ok when there are upstream errors.`);\n\n if (!(this.options.paymentAddress instanceof Address))\n throw new Error(`options.paymentAddress ${JSON.stringify(this.options.paymentAddress)} must be an instance of Address`);\n\n debugging('supervisor') && console.log(`supervisor: recording result for slice ${slice.identifier}.`);\n\n const jobAddress = slice.jobAddress;\n const sliceNumber = slice.sliceNumber;\n const authorizationMessage = slice.getAuthorizationMessage();\n\n /* @see result-submitter::result for full message details */\n const metrics = { GPUTime: 0, CPUTime: 0, CPUDensity: 0, GPUDensity: 0, InDataSize: 0, OutDataSize: 0 };\n const payloadData = {\n slice: sliceNumber,\n job: jobAddress,\n worker: this.workerOpaqueId,\n paymentAddress: this.options.paymentAddress,\n metrics,\n authorizationMessage,\n }\n\n const timeReport = slice.timeReport;\n if (timeReport && timeReport.total > 0) {\n metrics.GPUTime = timeReport.webGL + timeReport.webGPU;\n metrics.CPUTime = timeReport.CPU;\n metrics.CPUDensity = metrics.CPUTime / timeReport.total;\n metrics.GPUDensity = metrics.GPUTime / timeReport.total;\n metrics.CPUTime = 1 + Math.floor(metrics.CPUTime);\n if (metrics.GPUTime > 0)\n metrics.GPUTime = 1 + Math.floor(metrics.GPUTime);\n\n // Create dummy properties to keep op-router happy.\n metrics.InDataSize = 0;\n metrics.OutDataSize = 0;\n\n const eventMeasurements = {\n elapsed: timeReport.total / 1000,\n CPU: timeReport.CPU / 1000,\n GPU: metrics.GPUTime / 1000,\n in: 0,\n out: 0,\n };\n\n this.jobHandles[slice.jobAddress].emit('metrics', slice.sliceNumber, eventMeasurements);\n sandboxHandle.emit('metrics', slice.sliceNumber, eventMeasurements);\n }\n \n let canceled = false;\n const resultUrl = (slice.resultStorageType !== 'pattern') ? slice.resultStorageDetails : false;\n this.worker.emit('beforeResult', () => { canceled = true; }, resultUrl);\n this.jobHandles[slice.jobAddress].emit('beforeResult', () => { canceled = true; }, resultUrl);\n if (canceled) \n {\n debugging('supervisor') && console.log(`Sup1: User canceled the result submission operation for slice ${slice.identifier}.`);\n this.removeSlice(slice);\n return this.returnSlice(slice, 'Canceled via beforeResult event');\n }\n \n if (!slice.isFinished)\n throw new Error('Cannot record result for slice that is not finished');\n\n if (slice.resultStorageType === 'pattern') /* This is a remote-storage slice. */\n payloadData.result = await this.sendResultToRemote(slice);\n else\n payloadData.result = encodeDataURI(slice.result.result); /* XXXwg - result.result is awful */\n debugging('supervisor') && console.log('Supervisor.recordResult: payloadData.result', payloadData.result.slice(0, 512));\n\n try {\n if (slice.completed) {\n\n /* work function returned a result */\n let resp = await this.resultSubmitterConnection.request(\n 'result',\n payloadData,\n )\n \n if (!resp.success) {\n if (resp.payload && resp.payload.code === 'DCPS-01002') { /* result submitter cannot connect to event router; try again */\n try {\n resp = await this.resendResult(payloadData)\n } catch (error) {\n debugging('supervisor') && console.error(`supervisor - failed to submit result for job ${jobAddress} after ${payloadData.sendRetries} attempts`)\n throw error;\n }\n }\n else\n {\n throw new Error(`failed to submit result for slice ${slice.sliceNumber} of job ${jobAddress}`);\n }\n }\n\n if (false) {}\n\n const paymentAddress = payloadData.paymentAddress.toString(); \n const payment = resp.payload.slicePaymentAmount;\n this.worker.emit('payment', payment, paymentAddress, jobAddress, slice.sliceNumber);\n this.jobHandles[jobAddress].emit('payment', payment, paymentAddress, slice.sliceNumber);\n sandboxHandle.emit('payment', payment, paymentAddress);\n\n /** @todo: Implement size once I/O added. For now its just 0. */\n this.worker.emit('result', resultUrl, 0);\n this.jobHandles[jobAddress].emit('result', resultUrl, 0);\n } else {\n /* slice did not complete for some reason */\n \n // If the slice from a job never completes and the job address exists in the ringBufferofJobs, \n // then we remove it to allow for another slice (from the same job) to be obtained by fetchTask\n this.ringBufferofJobs.buf = this.ringBufferofJobs.filter(element => element !== jobAddress);\n \n await this.returnSlice(slice);\n }\n } catch(error) {\n debugging() && console.info(`1014: Failed to submit results for slice ${payloadData.slice} of job ${payloadData.job}`, error);\n this.worker.emit('result', error);\n this.jobHandles[jobAddress].emit('result', error);\n this.worker.emit('error', error);\n } finally {\n // Remove the slice from the slices array.\n this.removeSlice(slice);\n if (!this.sliceTiming)\n debugging('supervisor') && console.log(`recordResult: Completed slice ${slice.identifier}.`);\n else\n {\n slice['resultDelta'] = Date.now() - slice['resultDelta'];\n console.log(`recordResult(${slice['pairingDelta']}, ${slice['executionDelta']}, ${slice['resultDelta']}): Completed slice ${slice.identifier}.`);\n }\n }\n }\n\n /**\n * Send a work function's result to a server that speaks our DCP Remote Data Server protocol.\n * E.g. https://gitlab.com/Distributed-Compute-Protocol/dcp-rds\n *\n * @param {Slice} slice - Slice object whose result we are sending.\n * @returns {Promise<string>}\n * @throws When HTTP status not in the 2xx range.\n */\n sendResultToRemote(slice) {\n return supShared.sendResultToRemote(this, slice);\n }\n}\n\n/**\n * Sandbox has had an error which is not from the work function: kill it\n * and try to redo the slice.\n */\nfunction handleSandboxError(supervisor, sandbox, error) {\n const slice = sandbox.slice;\n\n slice.sandboxErrorCount = (slice.sandboxErrorCount || 0) + 1;\n sandbox.slice = null;\n supervisor.returnSandbox(sandbox); /* terminate the sandbox */\n slice.status = SLICE_STATUS_UNASSIGNED; /* ToT */\n slice.allocated = false;\n debugging() && console.warn(`Supervisor.handleSandboxError: Sandbox ${sandbox.identifier}...(${sandbox.public.name}/${slice.sandboxErrorCount}) with slice ${slice.identifier} had error.`, error);\n supervisor.worker.emit('error', error);\n\n if (slice.sandboxErrorCount <= supervisor.tuning.maxSandboxSliceRetries)\n supervisor.queuedSlices.push(slice);\n else {\n slice.error = error;\n supervisor.returnSlice(slice);\n }\n}\n\n/**\n * Add a slice to the slice payload being built. If a sliceList already exists for the\n * job-status-authMessage tuple, then the slice will be added to that, otherwise a new\n * sliceList will be added to the payload.\n *\n * @param {Object[]} slicePayload - Slice payload being built. Will be mutated in place.\n * @param {Slice} slice - The slice.\n * @param {String} status - Status update, eg. progress or scheduled.\n *\n * @returns {Object[]} mutated slicePayload array\n */\nfunction addToSlicePayload(slicePayload, slice, status) {\n // getAuthorizationMessage helps enforces the equivalence\n // !authorizationMessage <==> sliceNumber === 0\n const authorizationMessage = slice.getAuthorizationMessage();\n if (!authorizationMessage) return;\n\n // Try to find a sliceList in the payload which matches the job, status, and auth message\n let sliceList = slicePayload.find(desc => {\n return desc.job === slice.jobAddress\n && desc.status === status\n && desc.authorizationMessage === authorizationMessage;\n });\n\n // If we didn't find a sliceList, start a new one and add it to the payload\n if (!sliceList) {\n sliceList = {\n job: slice.jobAddress,\n sliceNumbers: [],\n status,\n authorizationMessage,\n };\n slicePayload.push(sliceList);\n }\n\n sliceList.sliceNumbers.push(slice.sliceNumber);\n\n return slicePayload;\n}\n\n/**\n * Add a slice to the returnSlice payload being built. If a sliceList already exists for the\n * job-isEstimation-authMessage-reason tuple, then the slice will be added to that, otherwise a new\n * sliceList will be added to the payload.\n *\n * @param {Object[]} slicePayload - Slice payload being built. Will be mutated in place.\n * @param {Slice} slice - The slice.\n * @param {String} [reason] - Optional reason to further characterize status; e.g. 'ENOPROGRESS', 'EUNCAUGHT', 'ESLICETOOSLOW', 'unknown'.\n *\n * @returns {Object[]} mutated slicePayload array\n */\nfunction addToReturnSlicePayload(slicePayload, slice, reason) {\n // getAuthorizationMessage helps enforces the equivalence\n // !authorizationMessage <==> sliceNumber === 0\n const authorizationMessage = slice.getAuthorizationMessage();\n if (!authorizationMessage) return;\n\n if (!reason) reason = slice.error ? 'EUNCAUGHT' : 'unknown';\n\n // Try to find a sliceList in the payload which matches the job, status, and auth message\n let sliceList = slicePayload.find(desc => {\n return desc.job === slice.jobAddress\n && desc.isEstimationSlice === slice.isEstimationSlice\n && desc.authorizationMessage === authorizationMessage\n && desc.reason === reason;\n });\n\n // If we didn't find a sliceList, start a new one and add it to the payload\n if (!sliceList) {\n sliceList = {\n job: slice.jobAddress,\n sliceNumbers: [],\n status: 'return',\n isEstimationSlice: slice.isEstimationSlice,\n authorizationMessage,\n reason,\n };\n slicePayload.push(sliceList);\n }\n\n sliceList.sliceNumbers.push(slice.sliceNumber);\n\n return slicePayload;\n}\n\n/**\n * Return DCPv4-specific connection options, composed of type-specific, URL-specific, \n * and worker-specific options, any/all of which can override the dcpConfig.dcp.connectOptions.\n * The order of precedence is the order of specificity.\n */\nfunction connectionOptions(url, label) {\n return leafMerge(/* ordered from most to least specific */\n dcpConfig.supervisor.dcp.connectionOptions.default,\n dcpConfig.supervisor.dcp.connectionOptions[label],\n dcpConfig.supervisor.dcp.connectionOptions[url.href]);\n}\n\nexports.Supervisor = Supervisor;\n\n\n//# sourceURL=webpack://dcp/./src/dcp-client/worker/supervisor.js?");
4661
+ eval("/* provided dependency */ var process = __webpack_require__(/*! ./node_modules/process/browser.js */ \"./node_modules/process/browser.js\");\n/**\n * @file worker/supervisor.js\n *\n * The component that controls each of the sandboxes\n * and distributes work to them. Also communicates with the\n * scheduler to fetch said work.\n *\n * The supervisor readies sandboxes before/while fetching slices.\n * This means sometimes there are extra instantiated WebWorkers\n * that are idle (in this.readiedSandboxes). Readied sandboxes can\n * be used for any slice. After a readied sandbox is given a slice\n * it becomes assigned to slice's job and can only do work\n * for that job.\n *\n * After a sandbox completes its work, the sandbox becomes cached\n * and can be reused if another slice with a matching job is fetched.\n *\n * @author Matthew Palma, mpalma@kingsds.network\n * Ryan Rossiter, ryan@kingsds.network\n * @date May 2019\n */\n\n/* global dcpConfig */ // eslint-disable-line no-redeclare\n// @ts-check\n\n\nconst constants = __webpack_require__(/*! dcp/common/scheduler-constants */ \"./src/common/scheduler-constants.js\");\nconst wallet = __webpack_require__(/*! dcp/dcp-client/wallet */ \"./src/dcp-client/wallet/index.js\");\nconst { Address } = __webpack_require__(/*! dcp/dcp-client/wallet/eth */ \"./src/dcp-client/wallet/eth.js\");\nconst { Keystore } = __webpack_require__(/*! dcp/dcp-client/wallet/keystore */ \"./src/dcp-client/wallet/keystore.js\");\nconst protocolV4 = __webpack_require__(/*! dcp/protocol-v4 */ \"./src/protocol-v4/index.js\");\nconst DCP_ENV = __webpack_require__(/*! dcp/common/dcp-env */ \"./src/common/dcp-env.js\");\nconst { OriginAccessManager } = __webpack_require__(/*! dcp/dcp-client/worker/origin-access-manager */ \"./src/dcp-client/worker/origin-access-manager.js\");\nconst debugging = (__webpack_require__(/*! dcp/debugging */ \"./src/debugging.js\").scope)('worker');\nconst { assert } = __webpack_require__(/*! dcp/common/dcp-assert */ \"./src/common/dcp-assert.js\");\nconst EventEmitter = __webpack_require__(/*! events */ \"./node_modules/events/events.js\");\nconst { requireNative } = __webpack_require__(/*! dcp/dcp-client/webpack-native-bridge */ \"./src/dcp-client/webpack-native-bridge.js\");\nconst { localStorage } = __webpack_require__(/*! dcp/common/dcp-localstorage */ \"./src/common/dcp-localstorage.js\");\nconst { DCPError } = __webpack_require__(/*! dcp/common/dcp-error */ \"./src/common/dcp-error.js\");\nconst dcp_timers = __webpack_require__(/*! dcp/common/dcp-timers */ \"./src/common/dcp-timers.js\");\nconst { sliceStatus } = __webpack_require__(/*! dcp/common/scheduler-constants */ \"./src/common/scheduler-constants.js\");\nconst { booley, encodeDataURI, leafMerge, a$sleepMs, compressJobMap, toJobMap, compressSandboxes,\n compressSlices, truncateAddress, dumpSandboxesIfNotUnique, dumpSlicesIfNotUnique, generateOpaqueId } = __webpack_require__(/*! dcp/utils */ \"./src/utils/index.js\");\n\nconst { Sandbox, SandboxError } = __webpack_require__(/*! ./sandbox */ \"./src/dcp-client/worker/sandbox.js\");\nconst { Slice, SLICE_STATUS_UNASSIGNED, SLICE_STATUS_FAILED } = __webpack_require__(/*! ./slice */ \"./src/dcp-client/worker/slice.js\");\nconst { SupervisorCache } = __webpack_require__(/*! ./supervisor-cache */ \"./src/dcp-client/worker/supervisor-cache.js\");\nconst RingBuffer = __webpack_require__(/*! dcp/utils/ringBuffer */ \"./src/utils/ringBuffer.js\");\nconst supShared = __webpack_require__(/*! ./SupShared */ \"./src/dcp-client/worker/SupShared.js\");\n\n/** Make timers 10x slower when running in niim */\nlet timeDilation = 1;\nif (DCP_ENV.platform === 'nodejs') {\n /** Make timers 10x slower when running in niim */\n timeDilation = (requireNative('module')._cache.niim instanceof requireNative('module').Module) ? 10 : 1;\n}\n\n/** @typedef {string} opaqueId */ // 22 character base64 string\n/** @typedef {import('./index').Worker} Worker */\n/** @typedef {import('dcp/dcp-client/wallet/keystore').Keystore} Keystore */\n/** @typedef {import('dcp/utils/jsdoc-types').SliceMessage} SliceMessage */\n/** @typedef {import('dcp/utils/jsdoc-types').Auth} Auth */\n/** @typedef {import('dcp/utils/jsdoc-types').Signature} Signature */\n/** @typedef {import('dcp/utils/jsdoc-types').SupervisorOptions} SupervisorOptions */\n\n/**\n * @typedef {object} SandboxSlice\n * @property {Sandbox} sandbox\n * @property {Slice} slice\n */\n\n// Slightly different from descriptor in dcp/utils/jsdoc-types\n/**\n * @typedef {object} TDPayload\n * @property {Address} owner\n * @property {Signature} signature\n * @property {Auth} auth\n * @property {Body} body\n */\n\n// Slightly different from descriptor in dcp/utils/jsdoc-types\n/**\n * @access public\n * @typedef {object} Body\n * @property {Object<string, object>} [newJobs]\n * @property {SliceMessage[]} task\n * @property {Object<string, string[]>} [computeGroupJobs]\n * @property {Object<string, Object<string, string[]>>} [computeGroupOrigins]\n * @property {{ targetTaskDuration: number }} [schedulerConfig]\n */\n\nclass Supervisor extends EventEmitter {\n /**\n * @constructor\n * @param {Worker} worker\n * @param {Keystore} identity\n * @param {SupervisorOptions} options\n */\n constructor (worker, identity, options) {\n super({ captureRejections: false });\n\n if (!(identity instanceof Keystore))\n throw new Error(`identity ${JSON.stringify(identity)} must be an instance of Keystore`);\n\n debugging('supervisor') && console.debug('Supervisor.options', options);\n assert(options === worker.workerOptions);\n\n /** @type {Worker} */\n this.worker = worker;\n this.setDefaultIdentityKeystore(identity);\n // Sup1/Sup2 compatibility.\n this._identityKeystore = identity;\n /** @type {SupervisorOptions} */\n this.options = options;\n\n debugging() && console.debug('Supervisor 1: cores.cpu, cores.gpu, maxSandboxes', options.cores?.cpu, options.cores?.gpu, this.maxSandboxes);\n\n // Verify options.jobAddresses exist\n if (!this.options.jobAddresses)\n this.options.jobAddresses = [];\n\n this.tuning = {\n watchdogInterval: 7, /**< seconds - time between fetches when ENOTASK(? /wg nov 2019) */\n minSandboxStartDelay: 0.1, /**< seconds - minimum time between WebWorker starts */\n maxSandboxStartDelay: 0.7, /**< seconds - maximum delay time between WebWorker starts */\n ...dcpConfig.supervisor.tuning\n };\n \n /** @type {object} */\n this.jobHandles = {};\n \n /** @type {Sandbox[]} */\n this.sandboxes = [];\n\n /** @type {Sandbox[]} */\n this.readiedSandboxes = [];\n\n /** @type {Sandbox[]} */\n this.assignedSandboxes = [];\n\n /** @type {Slice[]} */\n this.slices = [];\n\n /** @type {Slice[]} */\n this.queuedSlices = [];\n\n /** @type {Slice[]} */\n this.lostSlices = [];\n\n /** @type {boolean} */\n this.matching = false;\n\n /** @type {boolean} */\n this.isFetchingNewWork = false;\n\n /** @type {number} */\n this.numberOfCoresReserved = 0;\n\n /** @type {number} */\n this.addressTruncationLength = 20; // Set to -1 for no truncation.\n\n /** @type {Array<{ address: string, reasons: string[] }>} */\n this.rejectedJobs = [];\n /** @type {Object<string, string[]>} */\n this.rejectedJobReasons = {};\n\n /** @type {number} */\n this.defaultMaxGPUs = 1;\n\n /** @type {boolean} */\n this.pauseSandboxHandleEndHandler = false;\n\n /**\n * TODO: Remove this when the supervisor sends all of the sandbox\n * capabilities to the scheduler when fetching work.\n * @type {object}\n */\n this.capabilities = null;\n\n /** @type {number} */\n this.lastProgressReport = 0;\n this._t0 = Date.now();\n\n /** \n * An N-slot ring buffer of job addresses. Stores all jobs that have had no more than 1 slice run in the ring buffer.\n * Required for the implementation of discrete jobs \n * @type {RingBuffer} \n */\n this.ringBufferofJobs = new RingBuffer(200); // N = 200 should be more than enough.\n \n /** @type {OriginAccessManager} */\n this.originManager = OriginAccessManager.construct(this.options.allowOrigins);\n \n /** @type {SupervisorCache} */\n this.cache = new SupervisorCache(this);\n /** @type {object} */\n this._connections = {}; /* active DCPv4 connections */\n // Call the watchdog every 7 seconds.\n this.watchdogInterval = dcp_timers.setInterval(() => this.watchdog(), this.tuning.watchdogInterval * 1000);\n this.watchdogInterval.unref();\n\n const ceci = this;\n\n // Initialize to null so these properties are recognized for the Supervisor class\n this.taskDistributorConnection = null;\n this.eventRouterConnection = null;\n this.resultSubmitterConnection = null;\n this.packageManagerConnection = null;\n this.openTaskDistributorConn = function openTaskDistributorConn()\n {\n let config = dcpConfig.scheduler.services.taskDistributor;\n ceci.taskDistributorConnection = new protocolV4.Connection(config, ceci.identityKeystore, connectionOptions(config.location, 'taskDistributor'));\n ceci.taskDistributorConnection.on('end', ceci.openTaskDistributorConn);\n\n // Worker API connect and disconnect event propogation\n ceci.taskDistributorConnection.on('connect', (event) => this.worker.emit('connect', event));\n ceci.taskDistributorConnection.on('disconnect', (event) => this.worker.emit('disconnect', event));\n }\n\n this.openEventRouterConn = function openEventRouterConn()\n {\n let config = dcpConfig.scheduler.services.eventRouter;\n ceci.eventRouterConnection = new protocolV4.Connection(config, ceci.identityKeystore, connectionOptions(config.location, 'eventRouter'));\n ceci.eventRouterConnection.on('end', ceci.openEventRouterConn);\n if (ceci.eventRouterMessageQueue.length)\n ceci.resendRejectedMessages(ceci.eventRouterConnection, ceci.eventRouterMessageQueue);\n\n // Worker API connect and disconnect event propogation\n ceci.eventRouterConnection.on('connect', (event) => this.worker.emit('connect', event));\n ceci.eventRouterConnection.on('disconnect', (event) => this.worker.emit('disconnect', event));\n }\n this.eventRouterMessageQueue = [];\n \n this.openResultSubmitterConn = function openResultSubmitterConn()\n {\n let config = dcpConfig.scheduler.services.resultSubmitter;\n ceci.resultSubmitterConnection = new protocolV4.Connection(config, ceci.identityKeystore, connectionOptions(config.location, 'resultSubmitter'));\n ceci.resultSubmitterConnection.on('end', ceci.openResultSubmitterConn);\n if (ceci.resultSubmitterMessageQueue.length)\n ceci.resendRejectedMessages(ceci.resultSubmitterConnection, ceci.resultSubmitterMessageQueue);\n\n // Worker API connect and disconnect event propogation\n ceci.resultSubmitterConnection.on('connect', (event) => this.worker.emit('connect', event));\n ceci.resultSubmitterConnection.on('disconnect', (event) => this.worker.emit('disconnect', event));\n }\n this.resultSubmitterMessageQueue = [];\n\n this.openPackageManagerConn = function openPackageManagerConn()\n {\n let config = dcpConfig.packageManager;\n ceci.packageManagerConnection = new protocolV4.Connection(config, ceci.identityKeystore, connectionOptions(config.location, 'packageManager'));\n ceci.packageManagerConnection.on('end', ceci.openPackageManagerConn);\n if (ceci.packageManagerMessageQueue.length)\n ceci.resendRejectedMessages(ceci.packageManagerConnection, ceci.packageManagerMessageQueue);\n\n // Worker API connect and disconnect event propogation\n ceci.packageManagerConnection.on('connect', (event) => this.worker.emit('connect', event));\n ceci.packageManagerConnection.on('disconnect', (event) => this.worker.emit('disconnect', event));\n }\n this.packageManagerMessageQueue = [];\n\n //\n // Some properties that used to be static, but should not have been\n //\n /** @type {number | boolean} */\n this.lastAssignFailTimerMs = false;\n /** @type {boolean} */\n this.startSandboxWork_beenCalled = false;\n /** @type {boolean} */\n this.debugBuild = ((__webpack_require__(/*! dcp/common/dcp-build */ \"./src/common/dcp-build.js\").build) === 'debug');\n /**\n * When this.sliceTiming is set to be true, it displays the timings of a every slice\n * slice['pairingDelta'] = timespan of when slice is paired with sandbox until execution starts\n * slice['executionDelta'] = timespan of execution in sandbox\n * slice['resultDelta'] = timespan of when sandbox finishes executing until recordResult completes.\n * @type {boolean}\n */\n this.sliceTiming = false;\n }\n\n /**\n * This getter is the absolute source-of-truth for what the\n * identity keystore is for this instance of the Supervisor.\n * @type {Keystore}\n */\n get identityKeystore() {\n return this._identityKeystore || this.defaultIdentityKeystore;\n }\n /** @type {Keystore} */\n set identityKeystore(ks) {\n if (!(ks instanceof Keystore))\n throw new Error('Supervisor.identityKeystore: must be an instance of Keystore.');\n this._identityKeystore = ks;\n }\n \n /**\n * Maximum sandboxes allowed to work at a given time.\n * @type {number}\n */\n get maxSandboxes() { return Math.floor(this.options.cores?.cpu || 1); }\n /**\n * Maximum sandboxes allowed to work at a given time.\n * @type {number}\n */\n set maxSandboxes(newVal) { this.options.cores.cpu = Math.floor(newVal); }\n\n /**\n * Return worker opaqueId.\n * @type {opaqueId}\n */\n get workerOpaqueId() {\n if (!this._workerOpaqueId)\n this._workerOpaqueId = localStorage.getItem('workerOpaqueId');\n\n if (!this._workerOpaqueId || this._workerOpaqueId.length !== constants.workerIdLength)\n this.workerOpaqueId = generateOpaqueId();\n\n return this._workerOpaqueId;\n }\n /** @type {opaqueId} */\n set workerOpaqueId(newId) {\n this._workerOpaqueId = newId;\n localStorage.setItem('workerOpaqueId', this._workerOpaqueId);\n }\n\n /**\n * Sup1/Sup2 compatibility helpers.\n * @deprecated Rip this out when we kill sup1.\n * @todo XXXpfr Rip out this sup2/sup1 special-casing when we finally kill sup1.\n * We want to present a consistent interface, regardless whether it's sup1 or sup2.\n */\n /** @type {boolean} */\n get working() { return this.worker.working; }\n /** @type {opaqueId} */\n get workerId() { return this.workerOpaqueId; }\n /** @type {opaqueId} */\n set workerId(newWorkerId) { this.workerOpaqueId = newWorkerId; }\n //get fetchTaskBarrier () { return this.isFetchingNewWork; }\n //set fetchTaskBarrier (value) { this.isFetchingNewWork = value; }\n get version() { return '1.5.0' }\n /** @type {boolean} */\n get isSupervisor1() { return true; }\n /** @type {boolean} */\n get isSupervisor2() { return false; }\n /** @type {Slice[]} */\n get workingSlices() { return this.allocatedSlices; }\n /**\n * Get the job descriptor for jobAddress;\n * viz., the object value corresponding to the key jobAddress,\n * in the object returned by getJobsForTask in task-jobs.js.\n * @param {string} jobAddress\n * @returns {object}\n */\n jobDescriptor (jobAddress) {\n if (!this.cache.cache.job[jobAddress])\n throw new Error(`Cannot find the job descriptor corresponding to jobAddress ${jobAddress}`);\n return this.cache.fetch('job', jobAddress);\n }\n\n /**\n * Open all connections. Used when supervisor is instantiated or stopped/started\n * to initially open connections.\n */\n instantiateAllConnections() {\n if (!this.taskDistributorConnection)\n this.openTaskDistributorConn();\n \n if (!this.eventRouterConnection)\n this.openEventRouterConn();\n \n if (!this.resultSubmitterConnection)\n this.openResultSubmitterConn();\n\n if (!this.packageManagerConnection)\n this.openPackageManagerConn();\n }\n \n /**\n * Asynchronously send a result to the result submitter that was previously rejected.\n * Different from resendRejectedMessages below in the sense that the function only resolves\n * once we've delivered the result, or gone past our max number of attempts.\n * @param {object} result \n * @returns the response payload from the result operation\n */\n async resendResult(result) {\n var protocolError = false;\n if (!result.sendRetries)\n result.sendRetries = 1;\n else\n result.sendRetries++;\n \n if (result.sendRetries > this.tuning.maxResultSubmissionRetries)\n throw new DCPError(`Could not submit result after ${dcpConfig.supervisor.tuning.maxResultSubmissionRetries} attempts. Aborting.`);\n \n debugging() && console.debug(`supervisor - failed to submit result ${result.sendRetries} time(s), trying again `)\n let res = await this.resultSubmitterConnection.request('result', result).catch(async (e) => {\n debugging('supervisor') && console.error(`Failed to submit result to scheduler for slice ${result.slice} of job ${result.job}:\\n ${e} \\nWill try again on new connection.`);\n this.resultSubmitterConnection?.close();\n await a$sleepMs(10); /* let connection recycle */\n protocolError = true;\n });\n if ((!res.success && res.payload && res.payload.code === 'DCPS-01002') || protocolError)\n return this.resendResult(result)\n else\n return res;\n }\n \n /**\n * Try sending messages that were rejected on an old instance of the given connection.\n * These are messages that a) were rejected due to a protocol error and b) don't care when exactly\n * they're sent in the grand scheme of things.\n */\n resendRejectedMessages(connection, messageQueue) {\n var message, quitLoop;\n if (connection.resendingMessages) /* if the passed connection is already in the loop, exit */\n return;\n \n message = messageQueue.shift();\n\n do {\n connection.resendingMessages = true;\n quitLoop = false;\n \n connection.request(message.operation, message.data)\n .catch((e) =>\n {\n /* Protocol Error; Close connection (this will trigger the opening of a new connection that will try sending again) */\n debugging('supervisor') && console.error(`Failed to send message ${message.operation} to scheduler: ${e}. Will try again on a new connection.`);\n messageQueue.unshift(message);\n connection.close();\n quitLoop = true;\n });\n \n message = messageQueue.shift();\n \n } while (message && !quitLoop)\n\n connection.resendingMessages = false;\n }\n\n /** Set the default identity keystore -- needs to happen before anything that talks\n * to the scheduler for work gets called. This is a wart and should be removed by\n * refactoring.\n *\n * The default identity keystore will be used if the Supervisor was not provided\n * with an alternate. This keystore will be located via the Wallet API, and \n * if not found, a randomized default identity will be generated. \n *\n * @param {Keystore} [ks] - An instance of wallet::Keystore -- if undefined, we pick the best default we can.\n * @returns {Promise<void>}\n */\n async setDefaultIdentityKeystore(ks) {\n try {\n if (ks) {\n this.defaultIdentityKeystore = ks;\n return;\n }\n\n if (this.defaultIdentityKeystore)\n return;\n\n try {\n this.defaultIdentityKeystore = await wallet.getId();\n } catch(e) {\n debugging('supervisor') && console.debug('Error generating default identity, try to do it another way.');\n this.defaultIdentityKeystore = await new wallet.IdKeystore(null, '');\n }\n } finally {\n if (this.defaultIdentityKeystore)\n debugging('supervisor') && console.debug('Set default identity =', this.defaultIdentityKeystore.address);\n else\n debugging('supervisor') && console.debug('Failed to set default identity, worker cannot work.');\n }\n }\n\n //\n // What follows is a bunch of utility properties and functions for creating filtered views\n // of the slices and sandboxes array.\n //\n\n /**\n * Please do not use this.workingSandboxes; use this.allocatedSandboxes instead.\n * Sandboxes that are in WORKING state.\n *\n * Warning: Do not rely on this information being 100% accurate -- it may change in the next instant.\n * @type {Sandbox[]}\n */\n get workingSandboxes() {\n return this.sandboxes.filter(sandbox => sandbox.isWorking);\n }\n\n /**\n * Use instead of this.workingSandboxes.\n *\n * When a sandbox is paired with a slice, execution is pending and sandbox.allocated=true and\n * sandbox.slice=slice and sandbox.jobAddress=slice.jobAddress. This is what 'allocated' means.\n * Immediately upon the exit of sandbox.work, sandbox.allocated=false is set and if an exception\n * wasn't thrown the sandbox is placed in this.assignedSandboxes.\n * Thus from the pov of supervisor, this.allocatedSandboxes is deterministic and this.workingSandboxes is not.\n * Please try to not use this.workingSandboxes. It is deprecated.\n *\n * Warning: Do not rely on this information being 100% accurate -- it may change in the next instant.\n * @type {Sandbox[]}\n */\n get allocatedSandboxes() {\n return this.sandboxes.filter(sandbox => sandbox.allocated);\n }\n\n /**\n * Slices that are allocated.\n * Warning: Do not rely on this information being 100% accurate -- it may change in the next instant.\n * @type {Slice[]}\n */\n get allocatedSlices() {\n return this.slices.filter(slice => slice.allocated);\n }\n\n /**\n * This property is used as the target number of sandboxes to be associated with slices and start working.\n *\n * It is used in this.watchdog as to prevent a call to this.work when unallocatedSpace <= 0.\n * It is also used in this.distributeQueuedSlices where it is passed as an argument to this.matchSlicesWithSandboxes to indicate how many sandboxes\n * to associate with slices and start working.\n *\n * Warning: Do not rely on this information being 100% accurate -- it may change in the next instant.\n * @type {number}\n */\n get unallocatedSpace() {\n return this.maxSandboxes - this.allocatedSandboxes.length - this.numberOfCoresReserved;\n }\n \n /**\n * Call acquire(numberOfCoresToReserve) to reserve numberOfCoresToReserve unallocated sandboxes as measured by unallocatedSpace.\n * Call release() to undo the previous acquire.\n * This pseudo-mutex technique helps prevent races in scheduling slices in Supervisor.\n * @param {number} numberOfCoresToReserve\n */\n acquire(numberOfCoresToReserve) { \n this.numberOfCoresReserved = numberOfCoresToReserve; \n }\n release() { \n this.numberOfCoresReserved = 0; \n }\n\n /**\n * Remove from this.slices.\n * @param {Slice} slice\n */\n removeSlice(slice) {\n this.removeElement(this.slices, slice);\n if (this.debugBuild) {\n if (this.queuedSlices.indexOf(slice) !== -1)\n throw new Error(`removeSlice: slice ${slice.identifier} is in queuedSlices; inconsistent state.`);\n if (this.lostSlices.length > 0) {\n debugging() && console.warn(`removeSlice: slice ${slice.identifier}, found lostSlices ${this.lostSlices.map(s => s.identifier)}`);\n if (this.lostSlices.indexOf(slice) !== -1)\n throw new Error(`removeSlice: slice ${slice.identifier} is in lostSlices; inconsistent state.`);\n }\n }\n }\n\n /**\n * Remove from this.slices.\n * @param {Slice[]} slices\n */\n removeSlices(slices) {\n this.slices = this.slices.filter(slice => slices.indexOf(slice) === -1);\n }\n\n /**\n * Remove from this.queuedSlices.\n * @param {Slice[]} slices\n */\n removeQueuedSlices(slices) {\n this.queuedSlices = this.queuedSlices.filter(slice => slices.indexOf(slice) === -1);\n }\n\n /**\n * Remove all terminated sandboxes.\n */\n removeTerminatedSanboxes () {\n this.sandboxes = this.sandboxes.filter((sbx) => !sbx.isTerminated);\n this.assignedSandboxes = this.assignedSandboxes.filter((sbx) => !sbx.isTerminated);\n this.readiedSandboxes = this.readiedSandboxes.filter((sbx) => !sbx.isTerminated);\n }\n\n /**\n * Remove from this.sandboxes, this.assignedSandboxes and this.readiedSandboxes.\n * @param {Sandbox} sandbox\n */\n removeSandbox(sandbox) {\n debugging('scheduler') && console.log(`removeSandbox ${sandbox.identifier}`);\n this.removeElement(this.sandboxes, sandbox);\n this.removeElement(this.assignedSandboxes, sandbox);\n this.removeElement(this.readiedSandboxes, sandbox);\n }\n\n /**\n * Remove from this.sandboxes and this.assignedSandboxes .\n * @param {Sandbox[]} sandboxes\n */\n async removeSandboxes(sandboxes) {\n debugging('scheduler') && console.log(`removeSandboxes: Remove ${sandboxes.length} sandboxes ${this.dumpSandboxes(sandboxes)}`);\n this.sandboxes = this.sandboxes.filter(sandbox => sandboxes.indexOf(sandbox) === -1);\n this.assignedSandboxes = this.assignedSandboxes.filter(sandbox => sandboxes.indexOf(sandbox) === -1);\n\n if (this.debugBuild) {\n const readied = this.readiedSandboxes.filter(sandbox => sandboxes.indexOf(sandbox) !== -1);\n if (readied.length > 0)\n throw new Error(`removeSandboxes: sandboxes ${readied.map(s => s.identifier)} are in readiedSandboxes; inconsistent state.`);\n }\n }\n\n /**\n * Remove element from theArray.\n * @param {Array<*>} theArray\n * @param {object|number} element\n */\n removeElement(theArray, element) {\n let index = theArray.indexOf(element);\n if (index !== -1) theArray.splice(index, 1);\n }\n\n /**\n * Log sliceArray.\n * @param {Slice[]} sliceArray\n * @param {string} [header]\n * @returns {string}\n */\n dumpSlices(sliceArray, header) {\n if (header) console.log(`\\n${header}`);\n return compressSlices(sliceArray, this.addressTruncationLength);\n }\n\n /**\n * Log sandboxArray.\n * @param {Sandbox[]} sandboxArray\n * @param {string} [header]\n * @returns {string}\n */\n dumpSandboxes(sandboxArray, header) {\n if (header) console.log(`\\n${header}`);\n return compressSandboxes(sandboxArray, this.addressTruncationLength);\n }\n\n /**\n * If the elements of sandboxSliceArray are not unique, log the duplicates and dump the array.\n * @param {SandboxSlice[]} sandboxSliceArray\n * @param {string} header\n */\n dumpSandboxSlicesIfNotUnique(sandboxSliceArray, header) {\n if (!this.isUniqueSandboxSlices(sandboxSliceArray, header))\n console.log(this.dumpSandboxSlices(sandboxSliceArray));\n }\n\n /**\n * Log { sandbox, slice }.\n * @param {Sandbox} sandbox\n * @param {Slice} slice\n * @returns {string}\n */\n dumpSandboxAndSlice(sandbox, slice) {\n return `${sandbox.id}~${slice.sliceNumber}.${this.dumpJobAddress(slice.jobAddress)}`;\n }\n\n /**\n * Log { sandbox, slice } with state/status.\n * @param {Sandbox} sandbox\n * @param {Slice} slice\n * @returns {string}\n */\n dumpStatefulSandboxAndSlice(sandbox, slice) {\n return `${sandbox.id}.${sandbox.state}~${slice.sliceNumber}.${this.dumpJobAddress(slice.jobAddress)}.${slice.status}`;\n }\n\n /**\n * Truncates jobAddress.toString() to this.addressTruncationLength digits.\n * @param {string} jobAddress\n * @returns {string}\n */\n dumpJobAddress(jobAddress) {\n return truncateAddress(jobAddress, this.addressTruncationLength /* digits*/);\n }\n\n /**\n * Dump sandboxSliceArray.\n * @param {SandboxSlice[]} sandboxSliceArray - input array of { sandbox, slice }\n * @param {string} [header] - optional header\n * @param {boolean} [stateFul] - when true, also includes slice.status and sandbox.state.\n * @returns {string}\n */\n dumpSandboxSlices(sandboxSliceArray, header, stateFul=false) {\n if (header) console.log(`\\n${header}`);\n const jobMap = {};\n sandboxSliceArray.forEach(ss => {\n const sss = stateFul ? `${ss.sandbox.id}.${ss.sandbox.state}~${ss.slice.sliceNumber}.${ss.slice.status}` : `${ss.sandbox.id}~${ss.slice.sliceNumber}`;\n if (!jobMap[ss.slice.jobAddress]) jobMap[ss.slice.jobAddress] = sss;\n else jobMap[ss.slice.jobAddress] += `,${sss}`;\n });\n let output = '';\n for (const [jobAddress, sss] of Object.entries(jobMap))\n output += `${this.dumpJobAddress(jobAddress)}:[${sss}]:`;\n return output;\n }\n\n /**\n * Check sandboxSliceArray for duplicates.\n * @param {SandboxSlice[]} sandboxSliceArray\n * @param {string} [header]\n * @param {function} [log]\n * @returns {boolean}\n */\n isUniqueSandboxSlices(sandboxSliceArray, header, log) {\n const result = [], slices = [], sandboxes = [];\n let once = true;\n sandboxSliceArray.forEach(x => {\n const sliceIndex = slices.indexOf(x.slice);\n const sandboxIndex = sandboxes.indexOf(x.sandbox);\n\n if (sandboxIndex >= 0) {\n if (once && header) console.log(`\\n${header}`); once = false;\n log ? log(x.sandbox) : console.log(`\\tWarning: Found duplicate sandbox ${x.sandbox.identifier}.`);\n } else sandboxes.push(x.sandbox);\n\n if (sliceIndex >= 0) {\n if (once && header) console.log(`\\n${header}`); once = false;\n log ? log(x.slice) : console.log(`\\tWarning: Found duplicate slice ${x.slice.identifier}.`);\n } else {\n slices.push(x.slice);\n if (sandboxIndex < 0) result.push(x);\n }\n });\n return sandboxSliceArray.length === result.length;\n }\n\n /**\n * Attempts to create and start a given number of sandboxes.\n * The sandboxes that are created can then be assigned for a\n * specific job at a later time. All created sandboxes\n * get put into the @this.readiedSandboxes array when allocateLocalSandboxes is false.\n *\n * @param {number} numSandboxes - the number of sandboxes to create\n * @param {boolean} [allocateLocalSandboxes=false] - when true, do not place in this.readiedSandboxes\n * @returns {Promise<Sandbox[]>} - resolves with array of created sandboxes, rejects otherwise\n * @throws when given a numSandboxes is not a number or if numSandboxes is Infinity\n */\n async readySandboxes (numSandboxes, allocateLocalSandboxes = false) {\n debugging('supervisor') && console.debug(`readySandboxes: Readying ${numSandboxes} sandboxes, total sandboxes ${this.sandboxes.length}, matching ${this.matching}, fetching ${this.isFetchingNewWork}`);\n \n if (typeof numSandboxes !== 'number' || Number.isNaN(numSandboxes) || numSandboxes === Infinity) {\n throw new Error(`${numSandboxes} is not a number of sandboxes that can be readied.`);\n }\n if (numSandboxes <= 0) {\n return [];\n }\n\n const sandboxStartPromises = [];\n const sandboxes = [];\n const errors = [];\n for (let i = 0; i < numSandboxes; i++) {\n const sandbox = new Sandbox(this, {\n ...this.options.sandboxOptions,\n });\n \n sandbox.addListener('start', () => {\n // When sliceNumber == 0, result-submitter status skips the slice,\n // so don't send it in the first place.\n // The 'start' event is fired when a worker starts up, hence there's no way\n // to determine whether sandbox has a valid slice without checking.\n if (sandbox.slice) {\n const jobAddress = sandbox.jobAddress;\n const sliceNumber = sandbox.slice.sliceNumber;\n // !authorizationMessage <==> sliceNumber === 0.\n const authorizationMessage = sandbox.slice.getAuthorizationMessage();\n\n if (authorizationMessage) {\n let statusPayload = {\n worker: this.workerOpaqueId,\n slices: [{\n job: jobAddress,\n sliceNumber: sliceNumber,\n status: 'begin',\n authorizationMessage,\n }],\n }\n \n try /* resultSubmitterConnection can be null if worker is stopped */\n {\n this.resultSubmitterConnection.request('status', statusPayload).catch((error) => {\n debugging('supervisor') && console.error(`Error sending 'status' for slice ${sliceNumber} of job ${jobAddress}:\\n ${error}\\nWill try again on a new connection`);\n this.resultSubmitterMessageQueue.push({ operation: 'status', data: statusPayload });\n this.resultSubmitterConnection?.close();\n });\n } catch (error)\n {\n debugging('supervisor') && console.error(`Failed to send 'status' for slice ${sliceNumber} of job ${jobAddress}:, no connection to result submitter:`, error);\n }\n }\n }\n });\n sandbox.addListener('workEmit', ({ eventName, payload }) => {\n // Need to check if the sandbox hasn't been assigned a slice yet.\n if (!sandbox.slice) {\n if (this.debugBuild) {\n console.error(\n `Sandbox not assigned a slice before sending workEmit message to scheduler. 'workEmit' event originates from \"${eventName}\" event`, \n payload,\n );\n }\n }\n else\n {\n const jobAddress = sandbox.slice.jobAddress;\n const sliceNumber = sandbox.slice.sliceNumber;\n // sliceNumber can be zero if it came from a problem with loading modules.\n assert(jobAddress && (sliceNumber || sliceNumber === 0));\n // Send a work emit message from the sandbox to the event router\n // !authorizationMessage <==> sliceNumber === 0.\n let authorizationMessage;\n try {\n // Sometimes a sliceNumber===0 workEmit comes in before the client bundle is properly loaded.\n // Also happens with minor dcp-client version mismatches.\n authorizationMessage = sandbox.slice.getAuthorizationMessage();\n } catch(e) {\n authorizationMessage = null;\n }\n\n if (!authorizationMessage)\n {\n this.worker.emit('warning', `workEmit: missing authorization message for job ${jobAddress}, slice: ${sliceNumber}`);\n return Promise.resolve();\n }\n \n let workEmitPayload = {\n eventName,\n payload,\n job: jobAddress,\n slice: sliceNumber,\n worker: this.workerOpaqueId,\n authorizationMessage,\n }\n \n const workEmitPromise = this.eventRouterConnection.request('workEmit', workEmitPayload).catch(error => {\n debugging('supervisor') && console.warn(`workEmit: unable to send ${eventName} for slice ${sliceNumber} of job ${jobAddress}: ${error.message}.\\nTrying again on a new connection.`, error);\n this.eventRouterMessageQueue.push({ operation: 'workEmit', data: workEmitPayload })\n this.eventRouterConnection?.close();\n });\n\n if (this.debugBuild) {\n workEmitPromise.then(result => {\n if (!result || !result.success)\n this.worker.emit('warning', `workEmit: event router did not accept event: ${result}`);\n });\n }\n }\n });\n\n // When any sbx completes, \n sandbox.addListener('complete', () => {\n this.watchdog();\n });\n\n sandbox.on('sandboxError', (error) => handleSandboxError(this, sandbox, error));\n \n sandbox.on('rejectedWorkMetrics', (data) =>{\n function updateRejectedMetrics(report) {\n ['total', 'CPU', 'webGL', 'webGPU'].forEach((key) => {\n if (report[key]) sandbox.slice.rejectedTimeReport[key] += report[key];\n })\n }\n \n // If the slice already has rejected metrics, add this data to it. If not, assign this data to slices rejected metrics property\n if (sandbox.slice && data.timeReport) {\n (sandbox.slice.rejectedTimeReport) ? updateRejectedMetrics(data.timeReport) : sandbox.slice.rejectedTimeReport = data.timeReport;\n }\n })\n \n // If the sandbox terminated and we are not shutting down, then should return all work which is currently\n // not being computed if all sandboxes are dead and the attempt to create a new one fails.\n sandbox.sandboxHandle.on('end', async () => {\n if (this.sandboxes.length > 0 && !this.pauseSandboxHandleEndHandler) {\n let terminatedSandboxes = this.sandboxes.filter(sbx => sbx.isTerminated);\n if (terminatedSandboxes.length === this.sandboxes.length) {\n debugging() && console.debug(`readySandboxes: Create 1 sandbox in the sandbox-terminated-handler, total sandboxes ${this.sandboxes.length}, matching ${this.matching}, fetching ${this.isFetchingNewWork}`);\n try {\n await this.readySandboxes(1);\n } catch (e) {\n // If we cannot create a new sandbox, that probably means we're on a screensaver worker\n // and the screensaver is down. So return the slices to the scheduler.\n debugging() && console.debug('Terminated handler: Evaluator is down');\n this.removeTerminatedSanboxes();\n this.returnSlices(this.queuedSlices).then(() => {\n this.queuedSlices.length = 0;\n });\n }\n }\n }\n })\n\n const delayMs =\n 1000 *\n (this.tuning.minSandboxStartDelay +\n Math.random() *\n (this.tuning.maxSandboxStartDelay - this.tuning.minSandboxStartDelay));\n \n sandboxStartPromises.push(\n sandbox\n .start(delayMs)\n .then(() => {\n this.worker.emit('sandbox', sandbox.sandboxHandle);\n if (!allocateLocalSandboxes) this.readiedSandboxes.push(sandbox);\n this.sandboxes.push(sandbox);\n sandboxes.push(sandbox);\n }).catch((err) => {\n errors.push(err);\n this.returnSandbox(sandbox);\n if (err.code === 'ENOWORKER') {\n throw new DCPError(\"Cannot use localExec without dcp-worker installed. Use the command 'npm install dcp-worker' to install the neccessary modules.\", 'ENOWORKER');\n }\n }));\n }\n \n await Promise.all(sandboxStartPromises);\n\n if (errors.length) {\n debugging() && console.warn(`Failed to ready ${errors.length} of ${numSandboxes} sandboxes.`, errors);\n errors.forEach((e) => { this.worker.emit('error', e); });\n throw new Error('Failed to ready sandboxes.');\n }\n\n debugging('supervisor') && console.log(`readySandboxes: Readied ${sandboxes.length} sandboxes ${JSON.stringify(sandboxes.map(sandbox => sandbox.id))}`);\n \n return sandboxes;\n }\n\n /**\n * Accepts a sandbox after it has finished working or encounters an error.\n * If the sandbox was terminated or if \"!slice || slice.failed\" then\n * the sandbox will be removed from the sandboxes array and terminated if necessary.\n * Otherwise it will try to distribute a slice to the sandbox immediately.\n *\n * @param {Sandbox} sandbox - the sandbox to return\n * @param {Slice} [slice] - the slice just worked on; !slice => terminate\n */\n returnSandbox (sandbox, slice) {\n if (!slice || slice.failed || sandbox.isTerminated) {\n \n this.removeSandbox(sandbox);\n \n if (!sandbox.isTerminated) {\n debugging('supervisor') && console.log(`Supervisor.returnSandbox: Terminating ${sandbox.identifier}~${slice ? slice.identifier : '<null>'}, # of sandboxes ${this.sandboxes.length}`);\n sandbox.terminate(false);\n } else {\n debugging() && console.log(`Supervisor.returnSandbox: Already terminated ${sandbox.identifier}~${slice ? slice.identifier : '<null>'}, # of sandboxes ${this.sandboxes.length}`);\n }\n }\n }\n\n /**\n * Terminates sandboxes, in order of creation, when the total started sandboxes exceeds the total allowed sandboxes.\n *\n * @returns {Promise<void>}\n */\n pruneSandboxes () {\n let numOver = this.sandboxes.length - this.maxSandboxes;\n if (numOver <= 0) return;\n\n // Don't kill readied sandboxes while creating readied sandboxes.\n for (let index = 0; index < this.readiedSandboxes.length; ) {\n const sandbox = this.readiedSandboxes[index];\n // If the sandbox is allocated, advance to the next one in the list.\n if (sandbox.allocated) {\n index++;\n continue;\n }\n // Otherwise, remove this sandbox but look at the same array index in the next loop.\n debugging('supervisor') && console.log(`pruneSandboxes: Terminating readied sandbox ${sandbox.identifier}`);\n this.readiedSandboxes.splice(index, 1);\n this.returnSandbox(sandbox);\n\n if (--numOver <= 0) break;\n }\n\n if (numOver <= 0) return;\n for (let index = 0; index < this.assignedSandboxes.length; ) {\n const sandbox = this.assignedSandboxes[index];\n // If the sandbox is allocated, advance to the next one in the list.\n if (sandbox.allocated) {\n index++;\n continue;\n }\n // Otherwise, remove this sandbox but look at the same array index in the next loop.\n debugging('supervisor') && console.log(`pruneSandboxes: Terminating assigned sandbox ${sandbox.identifier}`);\n this.assignedSandboxes.splice(index, 1);\n this.returnSandbox(sandbox);\n\n if (--numOver <= 0) break;\n }\n }\n \n /**\n * Basic watch dog to check if there are idle sandboxes and\n * attempts to nudge the supervisor to feed them work.\n *\n * Run in an interval created in the constructor .\n * @returns {Promise<void>}\n */\n async watchdog () {\n if (!this.watchdogState)\n this.watchdogState = {};\n\n // Every 5 minutes, report progress of all working slices to the scheduler\n if (Date.now() > ( (this.lastProgressReport || 0) + (dcpConfig.supervisor?.sandbox?.progressReportInterval || 20 * 60 * 1000) )) {\n // console.log('454: Assembling progress update...');\n this.lastProgressReport = Date.now();\n\n //\n // Note: this.slices is the disjoint union of:\n // this.allocatedSlices, \n // this.queuedSlices, \n // this.slices.filter(slice => !slice.isUnassigned) .\n // When a slice is not in these 3 arrays, the slice is lost.\n //\n \n const currentLostSlices = this.slices.filter(slice => slice.isUnassigned \n && this.queuedSlices.indexOf(slice) === -1\n && this.allocatedSlices.indexOf(slice) === -1);\n\n if (currentLostSlices.length > 0) {\n this.lostSlices.push(...currentLostSlices);\n // Try to recover.\n // Needs more work and testing.\n // Test when we can come up with a decent lost slice repro case.\n // --> this.queuedSlices.push(...currentLostSlices);\n }\n\n if (this.lostSlices.length > 0) {\n if (debugging()) {\n console.warn('Supervisor.watchdog: Found lost slices!');\n for (const slice of this.lostSlices)\n console.warn('\\t', slice.identifier);\n }\n this.lostSlices = this.lostSlices.filter(slice => slice.isUnassigned);\n }\n\n const slices = [];\n this.queuedSlices.forEach(slice => {\n assert(slice && slice.sliceNumber > 0);\n addToSlicePayload(slices, slice, sliceStatus.scheduled);\n });\n\n this.allocatedSlices.forEach(slice => {\n assert(slice && slice.sliceNumber > 0);\n addToSlicePayload(slices, slice, 'progress'); // Beacon.\n });\n\n if (slices.length) {\n // console.log('471: sending progress update...');\n const progressReportPayload = {\n worker: this.workerOpaqueId,\n slices,\n };\n\n this.resultSubmitterConnection.request('status', progressReportPayload)\n .catch(error => {\n debugging('supervisor') && console.error('479: Failed to send status update:', error/*.message*/);\n this.resultSubmitterMessageQueue.push({ operation: 'status', data: progressReportPayload })\n this.resultSubmitterConnection?.close();\n });\n }\n }\n\n if (this.worker.working) {\n if (this.unallocatedSpace > 0) {\n await this.work().catch(err => {\n if (!this.watchdogState[err.code || '0'])\n this.watchdogState[err.code || '0'] = 0;\n if (Date.now() - this.watchdogState[err.code || '0'] > ((this.tuning.watchdogLogInterval * timeDilation || 120) * 1000))\n this.worker.emit('error', `301: Failed to start work: ${err.message}`);\n this.watchdogState[err.code || '0'] = Date.now();\n });\n }\n\n this.pruneSandboxes();\n }\n }\n\n /**\n * Gets the logical and physical number of cores and also\n * the total number of sandboxes the worker is allowed to run\n *\n */\n getStatisticsCPU() {\n const pCores = dcpConfig.supervisor?.pCores;\n if (DCP_ENV.isBrowserPlatform) {\n return {\n worker: this.workerOpaqueId,\n lCores: pCores || window.navigator.hardwareConcurrency,\n pCores: pCores || window.navigator.hardwareConcurrency,\n sandbox: this.maxSandboxes\n }\n }\n\n return {\n worker: this.workerOpaqueId,\n lCores: pCores || requireNative('os').cpus().length,\n pCores: pCores || requireNative('physical-cpu-count'),\n sandbox: this.maxSandboxes\n }\n }\n\n static getLogicalCores () {\n if (DCP_ENV.isBrowserPlatform)\n return window.navigator.hardwareConcurrency;\n return requireNative('os').cpus().length;\n }\n\n /**\n * Returns the number of unallocated sandbox slots to send to fetchTask.\n *\n * @returns {number}\n */\n numberOfAvailableSandboxSlots() {\n let numCores;\n if (this.queuedSlices.length > 1) {\n // We have slices queued, no need to fetch\n numCores = 0;\n } else {\n // The queue is almost empty (there may be 0 or 1 element), fetch a full task.\n // The task is full, in the sense that it will contain slices whose\n // aggregate execution time is this.maxSandboxes * 5-minutes.\n // However, there can only be this.unallocatedSpace # of long slices.\n // Thus we need to know whether the last slice in this.queuedSlices is long or not.\n // (A long slice has estimated execution time >= 5-minutes.)\n const longSliceCount = (this.queuedSlices.length > 0 && this.queuedSlices[0].isLongSlice) ? 1 : 0;\n numCores = this.unallocatedSpace - longSliceCount;\n }\n return numCores;\n }\n\n /**\n * Retry wrapper around readySandboxes.\n * @param {number} count\n * @param {boolean} [allocateLocalSandboxes=false] - When true does not populate this.readiedSandboxes .\n */\n async createSandboxes (count, allocateLocalSandboxes = false) {\n /**\n * If the evaluator cannot start (ie. if the evalServer is not running),\n * then the while loop will keep retrying until the evalServer comes online.\n */\n let sandboxes, retry = 0;\n while (true) {\n try {\n debugging() && console.debug(`Supervisor.createSandboxes(${allocateLocalSandboxes}): ready ${count} sandbox(es), unallocatedSpace ${this.unallocatedSpace}, queued slices ${this.queuedSlices.length}, # of sandboxes ${this.sandboxes.length}`);\n sandboxes = await this.readySandboxes(1, allocateLocalSandboxes);\n count--;\n const sbxes = await this.readySandboxes(count, allocateLocalSandboxes);\n sandboxes.push(...sbxes);\n debugging('supervisor') && console.debug(`Supervisor.createSandboxes(${allocateLocalSandboxes}): success`, sandboxes.length);\n break;\n } catch (error) {\n debugging() && console.warn(`906: failed to ready sandboxes(${allocateLocalSandboxes}); will retry`, this.sandboxes.length, error.code, error.message);\n // The evaluator may be down or shutting down, keep retrying.\n if ((retry % 60) === 0)\n this.worker.emit('warning', error);\n await a$sleepMs(1000 * Math.min(5, ++retry));\n }\n }\n debugging() && console.debug(`createSandboxes(${allocateLocalSandboxes}): created ${sandboxes.length} sandboxes`);\n return sandboxes;\n }\n\n /**\n * Call to start doing work on the network.\n * This is the one place where requests to fetch new slices are made.\n * After the initial slices are fetched it calls this.distributeQueuedSlices.\n *\n * @returns {Promise<void>}, unallocatedSpace ${this.unallocatedSpace}\n */\n async work ()\n {\n // When inside matchSlicesWithSandboxes, don't reenter Supervisor.work to fetch new work or create new sandboxes.\n if (this.matching) {\n // Interesting and noisy.\n // debugging('supervisor') && console.log(`Supervisor.work: Do not interleave work, fetch or matching slices with sandboxes: queuedSlices ${this.queuedSlices.length}, unallocatedSpace ${this.unallocatedSpace}, matching ${this.matching}, fetching ${this.isFetchingNewWork}`);\n return Promise.resolve();\n }\n\n // Reset sandbox handle end handler flag.\n this.pauseSandboxHandleEndHandler = false;\n // Instantiate connections that don't exist.\n this.instantiateAllConnections();\n\n const numCores = this.numberOfAvailableSandboxSlots();\n\n debugging() && console.log(`Supervisor.work: Try to get ${numCores} slices in working sandboxes, unallocatedSpace ${this.unallocatedSpace}, queued slices ${this.queuedSlices.length}, # of sandboxes ${this.sandboxes.length}, matching ${this.matching}, fetching: ${this.isFetchingNewWork}`);\n \n // Fetch a new task if we have no more slices queued, then start workers\n try {\n if (numCores > 0 && !this.isFetchingNewWork) {\n this.isFetchingNewWork = true;\n\n /**\n * This will only ready sandboxes up to a total count of\n * maxSandboxes (in any state). It is not possible to know the\n * actual number of sandboxes required until we have the slices because we\n * may have sandboxes assigned for the slice's job already.\n *\n * If the evaluator cannot start (ie. if the evalServer is not running),\n * then the while loop will keep retrying until the evalServer comes online.\n */\n if (this.maxSandboxes > this.sandboxes.length) {\n await this.createSandboxes(this.maxSandboxes - this.sandboxes.length);\n }\n /**\n * Temporary change: Assign the capabilities of one of readied sandboxes\n * before fetching slices from the scheduler.\n *\n * TODO: Remove this once fetchTask uses the capabilities of every\n * sandbox to fetch slices.\n */\n if (!this.capabilities)\n this.capabilities = this.sandboxes[0].capabilities;\n\n if (DCP_ENV.isBrowserPlatform && this.capabilities.browser)\n this.capabilities.browser.chrome = DCP_ENV.isBrowserChrome;\n\n const fetchTimeout = dcp_timers.setTimeout(() => {\n this.worker.emit('warning', 'Fetch exceeded timeout, will reconnect at next watchdog interval');\n \n this.taskDistributorConnection?.close().catch(error => {\n this.worker.emit('error', `Failed to close task-distributor connection ${error.message}`);\n });\n this.resultSubmitterConnection?.close().catch(error => {\n this.worker.emit('error', `Failed to close result-submitter connection ${error.message}`);\n });\n this.isFetchingNewWork = false;\n this.instantiateAllConnections();\n }, 3 * 60 * 1000); // max out at 3 minutes to fetch\n\n // ensure result submitter and task distributor connections before fetching tasks\n try\n {\n await this.resultSubmitterConnection.keepalive();\n await this.taskDistributorConnection.keepalive();\n }\n catch (e)\n {\n this.worker.emit('error', `Failed to connect to result submitter, refusing to fetch slices. Will try again at next fetch cycle ${e.message}`);\n debugging('supervisor') && console.log(`Error: ${e}`);\n this.isFetchingNewWork = false; // <-- done in the `finally` block, below\n dcp_timers.clearTimeout(fetchTimeout);\n this.taskDistributorConnection?.close().catch(error => {\n this.worker.emit('error', `Failed to close task-distributor connection ${error.message}`);\n });\n this.resultSubmitterConnection?.close().catch(error => {\n this.worker.emit('error', `Failed to close result-submitter connection ${error.message}`);\n });\n return Promise.resolve();\n }\n await this.fetchTask(numCores).finally(() => {\n dcp_timers.clearTimeout(fetchTimeout);\n this.isFetchingNewWork = false;\n });\n }\n\n this.distributeQueuedSlices().then(() => debugging('supervisor') && 'supervisor: finished distributeQueuedSlices()').catch((e) => {\n // We should never get here, because distributeQueuedSlices was changed\n // to try to catch everything and return slices and sandboxes.\n // If we do catch here it may mean a slice was lost. \n debugging() && console.error('Supervisor.work catch handler for distributeQueuedSlices.', e);\n this.worker.emit('error', e);\n });\n // No catch(), because it will bubble outward to the caller\n } finally {\n }\n }\n\n /**\n * Generate the workerComputeGroups property of the requestTask message. \n * \n * Concatenate the compute groups object from dcpConfig with the list of compute groups\n * from the supervisor, and remove the public group if accidentally present. Finally,\n * we transform joinSecrets/joinHash into joinHashHash for secure transmission.\n *\n * @note computeGroup objects with joinSecrets are mutated to record their hashes. This\n * affects the supervisor options and dcpConfig. Re-adding a joinSecret property\n * to one of these will cause the hash to be recomputed.\n */\n generateWorkerComputeGroups()\n {\n return supShared.generateWorkerComputeGroups(this, this.taskDistributorConnection);\n }\n\n /**\n * Remove all unreferenced jobs in `this.cache`.\n *\n * @param {Object<string, object>} newJobs - Jobs that should not be removed from `this.cache`.\n */\n cleanJobCache(newJobs) {\n /* Delete all jobs in the supervisorCache that are not represented in this newJobs,\n * or in this.queuedSlices, or there is no sandbox assigned to these jobs.\n * Note: There can easily be 200+ places to check; using a lookup structure to maintain O(n).\n */\n if (this.cache.jobs.length > 0) {\n const jobAddressMap = {};\n Object.keys(newJobs).forEach(jobAddress => { jobAddressMap[jobAddress] = 1; });\n this.slices.forEach(slice => { if (!jobAddressMap[slice.jobAddress]) jobAddressMap[slice.jobAddress] = 1; });\n this.cache.jobs.forEach(jobAddress => {\n if (!jobAddressMap[jobAddress]) {\n this.cache.remove('job', jobAddress);\n this.jobHandles[jobAddress].emit('flush');\n // Remove and return the corresponding sandboxes from this.sandboxes.\n const deadSandboxes = this.sandboxes.filter(sb => sb.jobAddress === jobAddress);\n if (deadSandboxes.length > 0) {\n deadSandboxes.forEach(sandbox => { this.returnSandbox(sandbox); });\n debugging('supervisor') && console.log(`Supervisor.fetchTask: Deleting job ${jobAddress} from cache and assigned sandboxes ${deadSandboxes.map(s => s.id)}, # of sandboxes ${this.sandboxes.length}.`);\n }\n }\n });\n }\n }\n\n /**\n * Fetches a task, which contains job information and slices for sandboxes and\n * manages events related to fetching tasks so the UI can more clearly display\n * to user what is actually happening.\n * @param {number} [numCores]\n * @returns {Promise<void>} The requestTask request, resolve on success, rejects otherwise.\n * @emits Supervisor#beforeFetch\n * @emits Supervisor#fetch\n */\n async fetchTask(numCores) {\n // Don't reenter\n if (this.matching || numCores <= 0) {\n // Interesting and noisy.\n debugging('supervisor') && console.log(`Supervisor.fetchTask: Do not nest work, fetch or matching slices with sandboxes: queuedSlices ${this.queuedSlices.length}, unallocatedSpace ${this.unallocatedSpace}, matching ${this.matching}, fetching ${this.isFetchingNewWork}, numCores ${numCores}`);\n return;\n }\n\n //\n // Oversubscription mitigation.\n // Update when there are less available sandbox slots than numCores.\n const checkNumCores = this.numberOfAvailableSandboxSlots();\n if (!numCores || numCores > checkNumCores) numCores = checkNumCores;\n\n if (!(this.options.paymentAddress instanceof Address))\n throw new Error(`options.paymentAddress ${JSON.stringify(this.options.paymentAddress)} must be an instance of Address`);\n\n const fetchStartTime = Date.now();\n // The 'beforeFetch' event allows the user to cancel the requestTask request.\n let canceled = false;\n /**\n * The 'beforeFetch' event fires before the request is sent to requestTask in task distributor.\n * @link https://gitlab.com/Distributed-Compute-Protocol/dcp-docs-wes/-/blob/wip/worker/worker-events.md\n * @event Supervisor#beforeFetch\n */\n this.worker.emit('beforeFetch', () => { canceled = true; })\n debugging() && canceled && console.log('User canceled the fetch task.');\n if (canceled)\n return;\n\n debugging('supervisor') && console.debug('supervisor: fetching task', numCores);\n const requestPayload = {\n supervisor: this.version,\n numCores,\n coreStats: this.getStatisticsCPU(),\n numGPUs: this.defaultMaxGPUs,\n capabilities: this.capabilities,\n paymentAddress: this.options.paymentAddress,\n jobAddresses: this.options.jobAddresses || [], // force array; when set, only fetches slices for these jobs\n workerComputeGroups: this.generateWorkerComputeGroups(),\n minimumWage: this.options.minimumWage || { CPU: 0, GPU: 0, in: 0, out: 0 },\n readyJobs: [ /* list of jobs addresses XXXwg */ ],\n previouslyWorkedJobs: this.ringBufferofJobs.buf, //Only discrete jobs\n rejectedJobs: this.rejectedJobs,\n };\n // workers should be part of the public compute group by default\n if (!booley(this.options.leavePublicGroup))\n requestPayload.workerComputeGroups.push(constants.computeGroups.public);\n debugging('computeGroups') && console.log(`Fetching work for ${requestPayload.workerComputeGroups.length} ComputeGroups: `, requestPayload.workerComputeGroups);\n debugging('supervisor') && console.log(`fetchTask wants ${numCores} slice(s), unallocatedSpace ${this.unallocatedSpace}, queuedSlices ${this.queuedSlices.length}`);\n try {\n debugging('requestTask') && console.debug('fetchTask: requestPayload', requestPayload);\n\n let result = await this.taskDistributorConnection.request('requestTask', requestPayload).catch((error) => {\n debugging('supervisor') && console.error(`Unable to request task from scheduler: ${error}. Will try again on a new connection.`);\n this.taskDistributorConnection?.close();\n throw error; /* caught below */\n });\n /** @type {TDPayload} */\n const responsePayload = result.payload;\n\n if (!result.success) {\n debugging() && console.log('Task fetch failure; request=', requestPayload);\n debugging() && console.log('Task fetch failure; response=', responsePayload);\n throw new DCPError('Unable to fetch task for worker', responsePayload);\n }\n \n if (!responsePayload.body?.task?.length) {\n /**\n * The 'fetch' event fires when the supervisor has found no work from the task distributor.\n * @link https://gitlab.com/Distributed-Compute-Protocol/dcp-docs-wes/-/blob/wip/worker/worker-events.md\n * @event Supervisor#fetch\n */\n this.worker.emit('fetch', {\n fetchStart: fetchStartTime,\n fetchEnd: Date.now(),\n fetchSize: 0, \n jobs: {},\n slices: {}\n })\n return;\n }\n\n /*\n * payload: { TDPayload }\n * TDPayload: { owner: Address, signature: Signature, auth: Auth, body: Body };\n * Auth: { workerId: string, authSlices: Object<string, SliceMessage[]>, schedulerId: { address: Address }, jobCommissions: Object<string, { rate: number, account: number }> }\n * Body: { newJobs: Object<string, object>, task: SliceMessage[], computeGroupJobs: Object<string, string[]>, computeGroupOrigins: Object<string, Object<string, string[]>>, schedulerConfig: {{ targetTaskDuration: number }} }\n */\n\n const { body, ...authorizationMessage } = responsePayload;\n let { newJobs, task } = body;\n assert(newJobs); // It should not be possible to have !newJobs -- we throw on !success.\n \n /*\n * If we have specified specific jobs the worker may work on, the received jobs must be in the specified job list\n * Otherwise panic since the scheduler cannot be trusted at that point.\n */\n if (this.options.jobAddresses?.length > 0 && !Object.keys(newJobs).every((ele) => this.options.jobAddresses.includes(ele)))\n {\n this.worker.emit('error', \"Worker received slices it shouldn't have. Rejecting the work and stopping.\");\n process.exit(1);\n }\n\n debugging() && console.log(`Supervisor.fetchTask: task: ${task.length}/${numCores}, jobs: ${Object.keys(newJobs).length}, authSlices: ${compressJobMap(authorizationMessage.auth.authSlices, this.addressTruncationLength /* digits*/)}`);\n // Delete all jobs in the supervisorCache that are not represented in this task,\n // or in this.queuedSlices, or there is no sandbox assigned to these jobs.\n this.cleanJobCache(newJobs);\n\n for (const jobAddress of Object.keys(newJobs))\n {\n const jobHandle = new EventEmitter({ captureRejections: false });\n Object.assign(jobHandle, {\n address: newJobs[jobAddress].address,\n name: newJobs[jobAddress].public.name,\n description: newJobs[jobAddress].public.description,\n link: newJobs[jobAddress].public.link,\n });\n\n this.jobHandles[jobAddress] = jobHandle;\n this.worker.emit('job', jobHandle);\n \n if (!this.cache.cache.job[jobAddress])\n this.cache.store('job', jobAddress, newJobs[jobAddress]);\n }\n\n // Memoize authMessage onto the Slice object, this should\n // follow it for its entire life in the worker.\n const tmpQueuedSlices = task.map(taskElement => new Slice(taskElement, authorizationMessage));\n\n // Make sure old stuff is up front.\n // matchSlicesWithSandboxes dequeues this.queuedSlices as follows:\n // slicesToMatch = this.queuedSlices.slice(0, numCores);\n this.slices.push(...tmpQueuedSlices);\n this.queuedSlices.push(...tmpQueuedSlices);\n \n // Populating the ring buffer based on job's discrete property \n Object.values(newJobs).forEach(job => {\n if(job.requirements.discrete && this.ringBufferofJobs.find(element => element === job.address) === undefined) {\n this.ringBufferofJobs.push(job.address);\n }\n });\n\n // Construct the fetchHandle for 'fetch' event.\n const jobs = {}, slices = {};\n for (const jobAddress in newJobs)\n {\n jobs[jobAddress] = this.jobHandles[jobAddress]\n slices[jobAddress] = authorizationMessage.auth.authSlices[jobAddress].length;\n }\n const fetchHandle = {\n fetchStart: fetchStartTime,\n fetchEnd: Date.now(),\n fetchSize: 0, // need change to protocol branch to efficiently implement\n jobs: jobs,\n slices\n };\n /**\n * The 'fetch' event fires when the supervisor has found work from the task distributor.\n * @link https://gitlab.com/Distributed-Compute-Protocol/dcp-docs-wes/-/blob/wip/worker/worker-events.md\n * @event Supervisor#fetch\n */\n this.worker.emit('fetch', fetchHandle);\n\n } catch (error) {\n this.worker.emit('fetch', error);\n debugging('supervisor') && console.debug(`Supervisor.fetchTask failed!: error: ${error}`);\n }\n }\n\n /**\n * For each slice in this.queuedSlices, match with a sandbox in the following order:\n * 1. Try to find an already assigned sandbox in this.assignedSandboxes for the slice's job.\n * 2. Find a ready sandbox in this.readiedSandboxes that is unassigned.\n * 3. Ready a new sandbox and use that.\n *\n * Take great care in assuring sandboxes and slices are uniquely associated, viz.,\n * a given slice cannot be associated with multiple sandboxes and a given sandbox cannot be associated with multiple slices.\n * The lack of such uniqueness has been the root cause of several difficult bugs.\n *\n * Note: When a sandbox is paired with a slice, execution is pending and sandbox.allocated=true and\n * sandbox.slice=slice and sandbox.jobAddress=slice.jobAddress. This is what 'allocated' means.\n * Immediately upon the exit of sandbox.work, sandbox.allocated=false is set and if an exception\n * wasn't thrown, the paired slice is placed in this.assignedSandboxes.\n * Thus from the pov of supervisor, this.allocatedSandboxes is deterministic and this.workingSandboxes is not.\n * Please try to not use this.workingSandboxes. It is deprecated.\n *\n * The input is numCores, this,queuedSlices, this.assignedSandboxes and this.readiedSandboxes.\n * If there are not enough sandboxes, new readied sandboxes will be created using\n * await this.readySandboxes(...)\n * And it is this await boundary that has caused many bugs.\n * We try not to make assumptions about non-local state across the await boundary.\n *\n * @param {number} numCores - The number of available sandbox slots.\n * @param {boolean} [throwExceptions=true] - Whether to throw exceptions when checking for sanity.\n * @returns {Promise<SandboxSlice[]>} Returns SandboxSlice[], may have length zero.\n */\n async matchSlicesWithSandboxes (numCores, throwExceptions = true) {\n\n const sandboxSlices = [];\n if (this.queuedSlices.length === 0 || this.matching || numCores <= 0) {\n // Interesting and noisy.\n // debugging('supervisor') && console.log(`Supervisor.matchSlicesWithSandboxes: Do not nest work, fetch or matching slices with sandboxes: queuedSlices ${this.queuedSlices.length}, unallocatedSpace ${this.unallocatedSpace}, matching ${this.matching}, fetching ${this.isFetchingNewWork}, numCores ${numCores}`);\n return sandboxSlices;\n }\n\n //\n // Oversubscription mitigation.\n // Update when there are less available sandbox slots than numCores.\n // We cannot use this.unallocatedSpace here because its value is artificially low or zero, because in\n // this.distributedQueuedSlices we use the pseudo-mutex trick: this.acquire(howManySandboxSlotsToReserve)/this.release().\n // Note: Do not use this.numberOfCoresReserved outside of a function locked with this.acquire(howManySandboxSlotsToReserve) .\n const checkNumCores = this.numberOfCoresReserved; // # of locked sandbox slots.\n if (numCores > checkNumCores) numCores = checkNumCores;\n if (numCores <= 0) return sandboxSlices;\n\n // Don't ask for more than we have.\n if (numCores > this.queuedSlices.length)\n numCores = this.queuedSlices.length;\n\n // When evaluator goes down, all sandboxes are terminated.\n this.removeTerminatedSanboxes();\n\n debugging('supervisor') && console.log(`matchSlicesWithSandboxes: numCores ${numCores}, queued slices ${this.queuedSlices.length}: assigned ${this.assignedSandboxes.length}, readied ${this.readiedSandboxes.length}, unallocated ${this.unallocatedSpace}, # of sandboxes: ${this.sandboxes.length}`);\n\n if (debugging('supervisor')) {\n dumpSlicesIfNotUnique(this.queuedSlices, 'Warning: this.queuedSlices slices are not unique -- this is ok when slice is rescheduled.');\n dumpSandboxesIfNotUnique(this.readiedSandboxes, 'Warning: this.readiedSandboxes sandboxes are not unique!');\n dumpSandboxesIfNotUnique(this.assignedSandboxes, 'Warning: this.assignedSandboxes sandboxes are not unique!');\n }\n\n // Three functions to validate slice and sandbox.\n function checkSlice(slice, checkAllocated=true) {\n if (!slice.isUnassigned) throw new DCPError(`Slice must be unassigned: ${slice.identifier}`);\n if (checkAllocated && slice.allocated) throw new DCPError(`Slice must not already be allocated: ${slice.identifier}`);\n }\n function checkSandbox(sandbox, isAssigned) {\n if (sandbox.allocated) throw new DCPError(`Assigned sandbox must not be already allocated: ${sandbox.identifier}`);\n if (isAssigned && !sandbox.isAssigned) throw new DCPError(`Assigned sandbox is not marked as assigned: ${sandbox.identifier}`);\n if (!isAssigned && !sandbox.isReadyForAssign) throw new DCPError(`Readied sandbox is not marked as ready for assign: ${sandbox.identifier}`);\n }\n\n // Sanity checks.\n if (throwExceptions) {\n this.assignedSandboxes.forEach(sandbox => { checkSandbox(sandbox, true /* isAssigned*/); });\n this.readiedSandboxes.forEach(sandbox => { checkSandbox(sandbox, false /* isAssigned*/); });\n this.queuedSlices.forEach(slice => { checkSlice(slice); });\n } else {\n this.assignedSandboxes = this.assignedSandboxes.filter(sandbox => !sandbox.allocated && sandbox.isAssigned);\n this.readiedSandboxes = this.readiedSandboxes.filter(sandbox => !sandbox.allocated && sandbox.isReadyForAssign);\n this.queuedSlices = this.queuedSlices.filter(slice => !slice.allocated && slice.isUnassigned);\n }\n\n const sandboxKind = {\n assigned: 0,\n ready: 1,\n new: 2,\n };\n\n const ceci = this;\n /**\n * Auxiliary function to pair a sandbox with a slice and mark the sandbox as allocated.\n * An allocated sandbox is reserved and will not be released until the slice completes execution on the sandbox.\n *\n * @param {Sandbox} sandbox\n * @param {Slice} slice\n * @param {number} kind\n */\n function pair(sandbox, slice, kind) {\n checkSandbox(sandbox, kind === sandboxKind.assigned);\n checkSlice(slice, kind === sandboxKind.assigned);\n slice.allocated = true;\n sandbox.allocated = true;\n sandbox.jobAddress = slice.jobAddress; // So we can know which jobs to not delete from this.cache .\n sandbox.slice = slice;\n\n sandboxSlices.push({ sandbox, slice });\n if (ceci.sliceTiming) slice['pairingDelta'] = Date.now();\n if (debugging('supervisor')) {\n let fragment = 'New readied';\n if (kind === sandboxKind.assigned) fragment = 'Assigned';\n else if (kind === sandboxKind.ready) fragment = 'Readied';\n console.log(`matchSlicesWithSandboxes.pair: ${fragment} sandbox matched ${ceci.dumpSandboxAndSlice(sandbox, slice)}`);\n }\n }\n\n // These three arrays are used to track/store slices and sandboxes,\n // so that when an exception occurs, the following arrays are restored:\n // this.queuedSlices, this.assignedSandboxes, this.realizedSandboxes.\n let slicesToMatch = [];\n let trackAssignedSandboxes = [];\n let trackReadiedSandboxes = [];\n try\n {\n this.matching = true;\n\n let assignedCounter = 0; // How many assigned sandboxes are being used.\n let readyCounter = 0; // How many sandboxes used from the existing this.readiedSandboxes.\n let newCounter = 0; // How many sandboxes that needed to be newly created.\n\n //\n // The Ideas:\n // 1) We match each slice with a sandbox. First we match with assigned sandboxes in the order\n // that they appear in this.queuedSlices. Then we match in-order with existing this.readiedSandboxes\n // Then we match in-order with new new readied sandboxes created through\n // await this.readySandboxes(newCounter, true /* allocateLocalSandboxes*/);\n // This allows us to try different orderings of execution of slices. E.g. Wes suggested\n // trying to execute slices from different jobs with maximal job diversity -- specifically\n // if there are 3 jobs j1,j2,j3, with slices s11, s12 from j1, s21, s22, s23 from j2 and\n // s31, s32 from j3, then we try to schedule, in order s11, s21, s31, s12, s22, s32, s23.\n //\n // 2) Before matching slices with sandboxes, we allocate available assigned and readied sandboxes\n // and if more are needed then we create and allocate new ones.\n //\n // 3) Finally we match slices with sandboxes and return an array of sandboxSlice pairs.\n //\n // Note: The ordering of sandboxSlices only partially corresponds to the order of this.queuedSlices.\n // It's easy to do. When pairing with assigned sandboxes, any slice in this.queuedSlices which doesn't\n // have an assigned sandbox, will add null to the sandboxSlices array. Then when pairing with readied sandboxes,\n // we fill-in the null entries in the sandboxSlices array.\n //\n\n // Get the slices that are being matched.\n slicesToMatch = this.queuedSlices.slice(0, numCores);\n this.queuedSlices = this.queuedSlices.slice(numCores);\n\n debugging('supervisor') && console.log(`matchSlicesWithSandboxes: slicesToMatch ${this.dumpSlices(slicesToMatch)}`);\n\n // Create object map: jobAddress -> sandboxes with sandboxes.jobAddress === jobAddress .\n const jobSandboxMap = toJobMap(this.assignedSandboxes, sandbox => sandbox);\n \n // Create array to hold slices which do not have assigned sandboxes.\n // These slices will need to be paired with existing and possibly new readied sandboxes.\n // Specifically, the sandboxes from existing this.readiedSandboxes and new sandboxes\n // created through await this.readySandboxes(newCounter, true /* allocateLocalSandboxes*/);\n const slicesThatNeedSandboxes = [];\n\n // Pair assigned sandboxes with slices.\n for (const slice of slicesToMatch) {\n const assigned = jobSandboxMap[slice.jobAddress];\n if (assigned && assigned.length > 0) {\n // Pair.\n const sandbox = assigned.pop();\n pair(sandbox, slice, sandboxKind.assigned);\n this.removeElement(this.assignedSandboxes, sandbox);\n // Track.\n trackAssignedSandboxes.push(sandbox);\n assignedCounter++;\n } else {\n // Don't lose track of these slices.\n slice.allocated = true;\n slicesThatNeedSandboxes.push(slice);\n }\n }\n\n // Pair readied sandboxes with slices.\n readyCounter = Math.min(slicesThatNeedSandboxes.length, this.readiedSandboxes.length);\n newCounter = slicesThatNeedSandboxes.length - readyCounter;\n // Track.\n trackReadiedSandboxes = this.readiedSandboxes.slice(0, readyCounter);\n this.readiedSandboxes = this.readiedSandboxes.slice(readyCounter);\n for (const sandbox of trackReadiedSandboxes) {\n // Pair.\n const slice = slicesThatNeedSandboxes.pop();\n pair(sandbox, slice, sandboxKind.ready);\n }\n debugging('supervisor') && console.log(`matchSlicesWithSandboxes: assignedCounter ${assignedCounter}, readyCounter ${readyCounter}, newCounter ${newCounter}, numCores ${numCores}`)\n\n // Validate algorithm consistency.\n if (this.debugBuild && assignedCounter + readyCounter + newCounter !== numCores) {\n // Structured assert.\n throw new DCPError(`matchSlicesWithSandboxes: Algorithm is corrupt ${assignedCounter} + ${readyCounter} + ${newCounter} !== ${numCores}`);\n }\n\n // Here is an await boundary.\n // Accessing non-local data across an await boundary may result in the unexpected.\n\n // Create new readied sandboxes to associate with slicesThatNeedSandboxes.\n if (newCounter > 0) {\n // When allocateLocalSandboxes is true, this.readySandboxes does not place the new sandboxes\n // on this.readiedSandboxes. Hence the new sandboxes are private and nobody else can see them.\n debugging('supervisor') && console.log(`matchSlicesWithSandboxes: creating ${newCounter} new sandboxes, # of sandboxes ${this.sandboxes.length}`);\n const readied = await this.createSandboxes(newCounter, true /* allocateLocalSandboxes*/);\n // Track.\n trackReadiedSandboxes.push(...readied);\n\n for (const sandbox of readied) {\n assert(slicesThatNeedSandboxes.length > 0);\n // Pair\n const slice = slicesThatNeedSandboxes.pop();\n pair(sandbox, slice, sandboxKind.new);\n }\n \n // Put back any extras. There should not be any unless readySandboxes returned less than asked for.\n if (slicesThatNeedSandboxes.length > 0) {\n slicesThatNeedSandboxes.forEach(slice => {\n slice.allocated = false;\n this.queuedSlices.push(slice);\n });\n }\n }\n\n if (debugging()) {\n console.log(`matchSlicesWithSandboxes: Matches: ${ this.dumpSandboxSlices(sandboxSlices) }`);\n this.dumpSandboxSlicesIfNotUnique(sandboxSlices, 'Warning: sandboxSlices; { sandbox, slice } pairs are not unique!');\n }\n } catch (e) {\n // Clear allocations.\n slicesToMatch.forEach(slice => { slice.allocated = false; });\n trackAssignedSandboxes.forEach(sandbox => { sandbox.allocated = false; sandbox.slice = null; });\n trackReadiedSandboxes.forEach(sandbox => { sandbox.allocated = false; sandbox.slice = null; sandbox.jobAddress = null; });\n \n // Filter out redundancies -- there shouldn't be any...\n slicesToMatch = slicesToMatch.filter(slice => this.queuedSlices.indexOf(slice) === -1);\n trackAssignedSandboxes = trackAssignedSandboxes.filter(sb => this.assignedSandboxes.indexOf(sb) === -1);\n trackReadiedSandboxes = trackReadiedSandboxes.filter(sb => this.readiedSandboxes.indexOf(sb) === -1);\n\n // Sanity checks.\n slicesToMatch.forEach(slice => { checkSlice(slice) });\n trackAssignedSandboxes.forEach(sandbox => { checkSandbox(sandbox, true /* isAssigned*/); });\n trackReadiedSandboxes.forEach(sandbox => { checkSandbox(sandbox, false /* isAssigned*/); });\n\n // Restore arrays.\n this.queuedSlices.push(...slicesToMatch);\n this.assignedSandboxes.push(...trackAssignedSandboxes);\n this.readiedSandboxes.push(...trackReadiedSandboxes);\n \n debugging() && console.error('Error in matchSlicesWithSandboxes: Attempting to recover slices and sandboxes.', e);\n return [];\n } finally {\n this.matching = false;\n }\n\n debugging('supervisor') && console.log(`matchSlicesWithSandboxes: allocated ${sandboxSlices.length} sandboxes, queuedSlices ${this.queuedSlices.length}, unallocatedSpace ${this.unallocatedSpace}, matching ${this.matching}, fetching ${this.isFetchingNewWork}, # of sandboxes: ${this.sandboxes.length}.`);\n\n return sandboxSlices;\n }\n\n disassociateSandboxAndSlice(sandbox, slice) {\n this.returnSandbox(sandbox);\n sandbox.slice = null;\n this.returnSlice(slice, 'none');\n }\n\n /**\n * This method will call this.startSandboxWork(sandbox, slice) for each element { sandbox, slice }\n * of the array returned by this.matchSlicesWithSandboxes(availableSandboxes) until all allocated sandboxes\n * are working. It is possible for a sandbox to interleave with calling distributeQueuedSlices and leave a sandbox\n * that is not working. Moreover, this.queuedSlices may be exhausted before all sandboxes are working.\n * @returns {Promise<void>}\n */\n async distributeQueuedSlices () {\n const numCores = this.unallocatedSpace;\n\n // If there's nothing there, or we're reentering, bail out.\n if (this.queuedSlices.length === 0 || numCores <= 0 || this.matching) {\n // Interesting and noisy.\n // debugging('supervisor') && console.log(`Supervisor.distributeQueuedSlices: Do not nest work, fetch or matching slices with sandboxes: queuedSlices ${this.queuedSlices.length}, matching ${this.matching}, fetching ${this.isFetchingNewWork}, numCores ${numCores}`);\n return Promise.resolve();\n }\n\n //\n // Use the pseudo-mutex to prevent uncontrolled interleaving with fetchTask,\n // matchSlicesWithSandboxes and distributeQueuedSlices\n let sandboxSlices;\n this.acquire(numCores);\n try {\n sandboxSlices = await this.matchSlicesWithSandboxes(numCores);\n } finally {\n this.release();\n }\n\n debugging('supervisor') && console.log(`distributeQueuedSlices: ${sandboxSlices.length} sandboxSlices ${this.dumpSandboxSlices(sandboxSlices)}, matching ${this.matching}, fetching ${this.isFetchingNewWork}`);\n\n for (let sandboxSlice of sandboxSlices) {\n\n const { sandbox, slice } = sandboxSlice;\n try {\n if (sandbox.isReadyForAssign) {\n try {\n let timeoutMs = Math.floor(Math.min(+this.lastAssignFailTimerMs || 0, 10 * 60 * 1000 /* 10m */));\n await a$sleepMs(timeoutMs);\n await this.assignJobToSandbox(sandbox, slice.jobAddress);\n } catch (e) {\n debugging() && console.error(`Supervisor.distributeQueuedSlices: Could not assign slice ${slice.identifier} to sandbox ${sandbox.identifier}.`, e);\n this.worker.emit('error', e);\n this.lastAssignFailTimerMs = this.lastAssignFailTimerMs ? +this.lastAssignFailTimerMs * 1.25 : Math.random() * 200;\n this.disassociateSandboxAndSlice(sandbox, slice);\n continue;\n }\n }\n\n if (!this.lastAssignFailTimerMs)\n this.lastAssignFailTimerMs = Math.random() * 200;\n this.startSandboxWork(sandbox, slice);\n this.lastAssignFailTimerMs = false;\n\n } catch (e) {\n // We should never get here.\n debugging() && console.error(`Supervisor.distributeQueuedSlices: Failed to execute slice ${slice.identifier} in sandbox ${sandbox.identifier}.`, e);\n this.worker.emit('error', e);\n this.disassociateSandboxAndSlice(sandbox, slice);\n }\n }\n }\n\n /**\n *\n * @param {Sandbox} sandbox\n * @param {opaqueId} jobAddress\n * @returns {Promise<void>}\n */\n assignJobToSandbox(sandbox, jobAddress) {\n // Any error will be caught in distributeQueuedSlices.\n return sandbox.assign(jobAddress);\n }\n\n /**\n * Handles reassigning or returning a slice that was rejected by a sandbox.\n * \n * The sandbox will be terminated by this.returnSandbox in finalizeSandboxAndSlice. In this case,\n * if the slice does not have a rejected property already, reassign the slice to a new sandbox\n * and add a rejected property to the slice to indicate it has already rejected once, then set slice = null\n * in the return SandboxSlice so that finalizeSandboxAndSlice won't return slice to scheduler.\n * \n * If the slice rejects with a reason, or has a rejected time stamp (ie. has been rejected once already)\n * then return the slice and all slices from the job to the scheduler and\n * terminate all sandboxes with that jobAddress.\n * @param {Sandbox} sandbox \n * @param {Slice} slice\n * @param {string} rejectReason\n * @returns {Promise<SandboxSlice>}\n */\n async handleWorkReject(sandbox, slice, rejectReason) {\n if (!this.rejectedJobReasons[slice.jobAddress])\n this.rejectedJobReasons[slice.jobAddress] = [];\n\n this.rejectedJobReasons[slice.jobAddress].push(rejectReason); // memoize reasons\n\n debugging() && console.debug('handleWorkReject', rejectReason, slice.rejected, this.rejectedJobReasons, slice.identifier);\n\n // First time rejecting without a reason. Try assigning slice to a new sandbox.\n if (rejectReason === 'false' && !slice.rejected) {\n // Set rejected.\n slice.rejected = Date.now();\n // Schedule the slice for execution.\n this.scheduleSlice(slice, true /* placeInTheFrontOfTheQueue*/, false /* noDuplicateExecution*/);\n \n // Null out slice so this.returnSlice will not be called in finalizeSandboxAndSlice.\n // But we still want this.returnSandbox to terminate the sandbox.\n slice = null;\n } else { // Slice has a reason OR rejected without a reason already and got stamped.\n \n // Purge all slices and sandboxes associated with slice.jobAddress .\n this.purgeAllWork(slice.jobAddress);\n // Clear jobAddress from this.cache .\n this.cleanJobCache({});\n\n // Add to array of rejected jobs.\n let rejectedJob = {\n address: slice.jobAddress,\n reasons: this.rejectedJobReasons[slice.jobAddress],\n }\n this.rejectedJobs.push(rejectedJob);\n\n if (slice.rejected)\n this.worker.emit('warning', `Supervisor.handleWorkReject: The slice ${slice.identifier} was rejected twice.`);\n else\n this.worker.emit('warning', `Supervisor.handleWorkReject: The slice ${slice.identifier} was rejected with reason ${rejectReason}.`);\n this.worker.emit('warning', ' All slices with the same jobAddress returned to the scheduler.');\n this.worker.emit('warning', ' All sandboxes with the same jobAddress are terminated.');\n //\n // this.purgeAllWork(jobAddress) terminates all sandboxes with jobAddress,\n // and it also returns to scheduler all slices with jobAddress.\n // Therefore null out slice and sandbox so finalizeSandboxAndSlice doesn't do anything.\n // \n sandbox = null;\n slice = null;\n }\n return { sandbox, slice };\n }\n\n /**\n * Schedule the slice to be executed.\n * If slice is already executing and noDuplicateExecution is true, return the slice with reason.\n * @param {Slice} slice\n * @param {boolean} [placeInTheFrontOfTheQueue=false]\n * @param {boolean} [noDuplicateExecution=true]\n * @param {string} [reason]\n */\n scheduleSlice(slice, placeInTheFrontOfTheQueue = false, noDuplicateExecution = true, reason) {\n // When noDuplicateExecution, if slice is already executing, do nothing.\n let allocatedSlices = [];\n if (noDuplicateExecution)\n allocatedSlices = this.allocatedSlices;\n\n if (!allocatedSlices.indexOf(slice)) {\n // Reset slice state to allow execution.\n slice.status = SLICE_STATUS_UNASSIGNED;\n slice.allocated = false;\n // Enqueue in the to-be-executed queue.\n if (placeInTheFrontOfTheQueue) this.queuedSlices.unshift(slice);\n else this.queuedSlices.push(slice);\n }\n }\n\n /**\n * Purge all slices and sandboxes with this jobAddress.\n * @param {string} jobAddress\n * @param {boolean} [onlyPurgeQueuedAndAllocated=false]\n */\n purgeAllWork(jobAddress, onlyPurgeQueuedAndAllocated = false) {\n // Purge all slices and sandboxes associated with jobAddress .\n const deadSandboxes = this.sandboxes.filter(sandbox => sandbox.jobAddress === jobAddress);\n\n if (deadSandboxes.length > 0) {\n debugging('supervisor') && console.log(`purgeAllWork(${this.dumpJobAddress(jobAddress)}): sandboxes purged ${deadSandboxes.map(s => s.id)}, # of sandboxes ${this.sandboxes.length}`);\n deadSandboxes.forEach(sandbox => this.returnSandbox(sandbox));\n }\n\n let deadSlices;\n if (onlyPurgeQueuedAndAllocated) {\n deadSlices = this.queuedSlices.filter(slice => slice.jobAddress === jobAddress);\n if (deadSlices.length > 0 || this.allocatedSlices.length > 0)\n debugging('supervisor') && console.log(`purgeAllWork(${this.dumpJobAddress(jobAddress)}): dead queuedSlices ${deadSlices.map(s => s.sliceNumber)}, dead allocatedSlices ${this.allocatedSlices.map(s => s.sliceNumber)}`);\n deadSlices.push(...this.allocatedSlices);\n } else {\n deadSlices = this.slices.filter(slice => slice.jobAddress === jobAddress);\n }\n\n if (deadSlices.length > 0) {\n debugging('supervisor') && console.log(`purgeAllWork(${this.dumpJobAddress(jobAddress)}): slices purged ${deadSlices.map(s => s.sliceNumber)}, # of sandboxes ${this.sandboxes.length}`);\n this.returnSlices(deadSlices);\n this.removeQueuedSlices(deadSlices);\n }\n debugging('supervisor') && console.log(`purgeAllWork(${this.dumpJobAddress(jobAddress)}): Finished: slices ${this.slices.length}, queuedSlices ${this.queuedSlices.length}, assigned ${this.assignedSandboxes.length}, readied ${this.readiedSandboxes.length}, # of sandboxes ${this.sandboxes.length}`);\n }\n\n /**\n * Gives a slice to a sandbox which begins working. Handles collecting\n * the slice result (complete/fail) from the sandbox and submitting the result to the scheduler.\n * It will also return the sandbox to @this.returnSandbox when completed so the sandbox can be re-assigned.\n *\n * @param {Sandbox} sandbox - the sandbox to give the slice\n * @param {Slice} slice - the slice to distribute\n * @returns {Promise<void>} Promise returned from sandbox.run\n */\n async startSandboxWork (sandbox, slice) {\n var startDelayMs, reason = 'unknown';\n\n try {\n slice.markAsWorking();\n } catch (e) {\n // This will occur when the same slice is distributed twice.\n // It is normal because two sandboxes could finish at the same time and be assigned the\n // same slice before the slice is marked as working.\n debugging() && console.debug('startSandboxWork: slice.markAsWorking exception:', e);\n return Promise.resolve();\n }\n\n // sandbox.requiresGPU = slice.requiresGPU;\n // if (sandbox.requiresGPU) {\n // this.GPUsAssigned++;\n // }\n\n if (this.startSandboxWork_beenCalled)\n startDelayMs = 1000 * (this.tuning.minSandboxStartDelay + (Math.random() * (this.tuning.maxSandboxStartDelay - this.tuning.minSandboxStartDelay)));\n else {\n startDelayMs = 1000 * this.tuning.minSandboxStartDelay;\n this.startSandboxWork_beenCalled = true;\n }\n\n try {\n debugging() && console.log(`startSandboxWork: Started ${this.dumpStatefulSandboxAndSlice(sandbox, slice)}, sandboxes: ${this.sandboxes.length}, allocated slices ${this.allocatedSlices.length}, matching ${this.matching}, fetching ${this.isFetchingNewWork}`, Date.now() - this._t0);\n if (this.sliceTiming) {\n slice['pairingDelta'] = Date.now() - slice['pairingDelta'];\n slice['executionDelta'] = Date.now();\n }\n let result;\n try {\n result = await sandbox.work(slice, startDelayMs);\n } finally {\n sandbox.allocated = false;\n slice.allocated = false;\n }\n if (this.sliceTiming) {\n slice['executionDelta'] = Date.now() - slice['executionDelta'];\n slice['resultDelta'] = Date.now();\n }\n slice.collectResult(result, true);\n // In watchdog, all sandboxes in working state, have their slice status sent to result submitter.\n // However, this can happen after the sandbox/slice has already sent results\n // to result submitter, in which case, the activeSlices table has already removed the row\n // corresponding to slice and hence is incapable of updating status.\n sandbox.changeWorkingToAssigned();\n this.assignedSandboxes.push(sandbox);\n debugging() && console.log(`startSandboxWork: Finished ${this.dumpStatefulSandboxAndSlice(sandbox, slice)}, sandboxes: ${this.sandboxes.length}, allocated slices ${this.allocatedSlices.length}, matching ${this.matching}, fetching ${this.isFetchingNewWork}`);\n } catch(error) {\n let logLevel;\n\n if (error instanceof SandboxError) {\n logLevel = 'warn';\n // The message and stack properties of error objects are not enumerable,\n // so they have to be copied into a plain object this way\n const errorResult = Object.getOwnPropertyNames(error).reduce((o, p) => {\n o[p] = error[p]; return o;\n }, { message: 'Unexpected worker error' });\n slice.collectResult(errorResult, false);\n } else {\n logLevel = 'error';\n // This error was unrelated to the work being done, so just return the slice in the finally block.\n // For extra safety the sandbox is terminated.\n slice.result = null;\n slice.status = SLICE_STATUS_FAILED;\n }\n\n let errorString;\n switch (error.errorCode) {\n case 'ENOPROGRESS':\n reason = 'ENOPROGRESS';\n errorString = 'Supervisor.startSandboxWork - No progress error in sandbox.\\n';\n break;\n case 'ESLICETOOSLOW':\n reason = 'ESLICETOOSLOW';\n errorString = 'Supervisor.startSandboxWork - Slice too slow error in sandbox.\\n';\n break;\n case 'EUNCAUGHT':\n reason = 'EUNCAUGHT';\n errorString = `Supervisor.startSandboxWork - Uncaught error in sandbox ${error.message}.\\n`;\n break;\n case 'EFETCH':\n // reason = 'EFETCH'; The status.js processing cannot handle 'EFETCH'\n reason = 'unknown';\n errorString = `Supervisor.startSandboxWork - Could not fetch data: ${error.message}.\\n`;\n break;\n }\n \n const { getenv } = __webpack_require__(/*! ../../common/dcp-env */ \"./src/common/dcp-env.js\");\n // Always display max info under debug builds, otherwise maximal error\n // messages are displayed to the worker, only if both worker and client agree.\n let workerConsole = sandbox.supervisorCache.cache.job[slice.jobAddress].workerConsole;\n const displayMaxInfo = Boolean(getenv('DCP_SUPERVISOR_DEBUG_DISPLAY_MAX_INFO')) || (workerConsole && this.options.allowConsoleAccess);\n\n const errorObject = {\n jobAddress: slice.jobAddress.substr(0,10),\n sliceNumber: slice.sliceNumber,\n sandbox: sandbox.id,\n jobName: sandbox.public ? sandbox.public.name : 'unnamed',\n };\n \n if (error.name === 'EWORKREJECT') {\n error.stack = 'Sandbox was terminated by work.reject()';\n const ss = await this.handleWorkReject(sandbox, slice, error.message);\n sandbox = ss.sandbox; slice = ss.slice;\n }\n\n if (!displayMaxInfo && error.errorCode === 'EUNCAUGHTERROR') {\n this.worker.emit('error', `Supervisor.startSandboxWork - Uncaught error in sandbox, could not compute ${errorObject}`);\n } else if (!displayMaxInfo && error.errorCode === 'EPERM_ORIGIN') {\n this.worker.emit('error', `Supervisor.startSandboxWork - Could not fetch data: ${error.message}`);\n } else if (!displayMaxInfo && errorString) {\n this.worker.emit('error', `${errorString}: ${errorObject}`);\n } else if (!displayMaxInfo && error.name === 'EWORKREJECT') {\n this.worker.emit('error', `Supervisor.startSandboxWork - Sandbox rejected work: ${error.message}`);\n } else {\n if (displayMaxInfo && error.stack)\n errorObject.stack += '\\n --------------------\\n' + (error.stack.split('\\n').slice(1).join('\\n'));\n this.worker.emit('error', `Supervisor.startSandboxWork - Sandbox failed: ${error.message}: ${errorObject}`);\n }\n } finally {\n await this.finalizeSandboxAndSlice(sandbox, slice, reason);\n }\n }\n\n /**\n * If slice && slice.result, then call await this.recordResult(slice) and this.returnSandbox(sandbox, slice) will have no effect.\n * If slice && !slice.result, then call this.returnSlice(slice, reason) and then this.returnSandbox(sandbox, slice) which terminates sandbox.\n * If !slice && sandbox, then terminate the sandbox with this.returnSandbox(sandbox, slice) .\n * If !slice && !sandbox, then do nothing.\n * @param {Sandbox} [sandbox]\n * @param {Slice} [slice]\n * @param {string} [reason]\n */\n async finalizeSandboxAndSlice(sandbox, slice, reason) {\n debugging('supervisor') && console.log(`finalizeSandboxAndSlice: sandbox ${sandbox ? sandbox.identifier : 'nade'}, slice ${slice ? slice.identifier : 'nade'}`);\n if (slice) {\n if (slice.result) await this.recordResult(slice, sandbox.sandboxHandle);\n else this.returnSlice(slice, reason);\n }\n // It is possible that sandbox is already terminated\n // Because sandbox.allocated=false as soon as sandbox.work(...) completes.\n // But the await at or in finalizeSandboxAndSlice may allow pruneSandboxes to slither in.\n if (sandbox) this.returnSandbox(sandbox, slice);\n }\n\n /**\n * Terminates sandboxes and returns slices.\n * Sets the working flag to false, call @this.work to start working again.\n * \n * If forceTerminate is true: Terminates all sandboxes and returns all slices.\n * If forceTerminate is false: Terminates non-allocated sandboxes and returns queued slices.\n *\n * @param {boolean} [forceTerminate = true] - true if you want to stop the sandboxes from completing their current slice.\n * @returns {Promise<void>}\n */\n async stopWork (forceTerminate = true) {\n debugging('supervisor') && console.log('stopWork(${forceTerminate}): terminating sandboxes and returning slices to scheduler.');\n\n // Do not enter is-screen-saver-active logic in the sandbox handle 'end' event handler.\n this.pauseSandboxHandleEndHandler = true;\n\n const jobs = [];\n for (const slice of this.slices)\n if (jobs.indexOf(slice.jobAddress) === -1)\n jobs.push(slice.jobAddress);\n \n if (forceTerminate) {\n while (this.sandboxes.length) {\n this.returnSandbox(this.sandboxes[0], null);\n }\n\n await this.returnSlices(this.slices).then(() => {\n this.queuedSlices.length = 0;\n });\n } else {\n // Only terminate idle sandboxes and return only queued slices\n let idleSandboxes = this.sandboxes.filter(w => !w.allocated);\n for (const sandbox of idleSandboxes) {\n this.returnSandbox(sandbox, null);\n }\n\n await this.returnSlices(this.queuedSlices).then(() => {\n this.queuedSlices.length = 0;\n });\n\n await new Promise((resolve, reject) => {\n let sandboxesRemaining = this.allocatedSandboxes.length;\n if (sandboxesRemaining === 0)\n {\n resolve();\n }\n // Resolve and finish work once all sandboxes have finished submitting their results.\n this.on('result', () => {\n sandboxesRemaining--;\n if (sandboxesRemaining === 0)\n {\n debugging() && console.log('All sandboxes empty, stopping worker and closing all connections');\n resolve();\n }\n });\n });\n }\n\n // Re-enable is-screen-saver-active logic for the sandbox handle 'end' event handler.\n this.pauseSandboxHandleEndHandler = false;\n\n for (const jobAddress of jobs)\n this.jobHandles[jobAddress].emit('flush');\n\n if (this.resultSubmitterConnection) {\n this.resultSubmitterConnection.off('end', this.openResultSubmitterConn);\n this.resultSubmitterConnection.close();\n this.resultSubmitterConnection = null;\n }\n\n if (this.taskDistributorConnection) {\n this.taskDistributorConnection.off('end', this.openTaskDistributorConn);\n this.taskDistributorConnection.close();\n this.taskDistributorConnection = null;\n }\n\n if (this.packageManagerConnection) {\n this.packageManagerConnection.off('end', this.openPackageManagerConn);\n this.packageManagerConnection.close();\n this.packageManagerConnection = null;\n }\n\n if (this.eventRouterConnection) {\n this.eventRouterConnection.off('end', this.openEventRouterConn);\n this.eventRouterConnection.close();\n this.eventRouterConnection = null;\n }\n }\n\n /**\n * Takes a slice and returns it to the scheduler to be redistributed.\n * Usually called when an exception is thrown by sandbox.work(slice, startDelayMs) .\n * Or when the supervisor tells it to forcibly stop working.\n *\n * @param {Slice} slice - The slice to return to the scheduler.\n * @param {string} [reason] - Optional reason for the return: 'ENOPROGRESS', 'EUNCAUGHT', 'ESLICETOOSLOW', 'unknown'.\n * @returns {Promise<*>} - Response from the scheduler.\n */\n returnSlice (slice, reason) {\n // When sliceNumber === 0 don't send a status message.\n if (slice.sliceNumber === 0) return Promise.resolve();\n \n debugging() && console.log(`Supervisor.returnSlice: Returning slice ${slice.identifier} with reason ${reason}.`);\n\n this.jobHandles[slice.jobAddress].emit('beforeReturn', slice.sliceNumber);\n\n const payload = slice.getReturnMessagePayload(this.workerOpaqueId, reason);\n try\n {\n return this.resultSubmitterConnection.request('status', payload) /* resultSubmitterConnection can be null if worker is stopped */\n .then(response => {\n this.jobHandles[slice.jobAddress].emit('return', slice.sliceNumber);\n return response;\n }).catch(error => {\n this.jobHandles[slice.jobAddress].emit('return', error);\n debugging('supervisor') && console.error('Failed to return slice', {\n sliceNumber: slice.sliceNumber,\n jobAddress: slice.jobAddress,\n status: slice.status,\n error,\n }, 'Will try again on a new connection.');\n this.resultSubmitterMessageQueue.push({ operation: 'status', data: payload });\n this.resultSubmitterConnection?.close();\n });\n }\n catch (error)\n {\n debugging('supervisor') && console.error(`Failed to return slice ${slice.identifier}, no connection to result submitter:`, error);\n }\n }\n\n /**\n * Bulk-return multiple slices, possibly for assorted jobs.\n * Returns slices to the scheduler to be redistributed.\n * Called in the sandbox terminate handler and purgeAllWork(jobAddress)\n * and stopWork(forceTerminate).\n *\n * @param {Slice[]} slices - The slices to return to the scheduler.\n * @returns {Promise<void>} - Response from the scheduler.\n */\n async returnSlices(slices) {\n if (!slices || !slices.length) return Promise.resolve();\n \n const slicePayload = [];\n slices.forEach((slice) => {\n this.jobHandles[slice.jobAddress].emit('beforeReturn', slice.sliceNumber);\n addToReturnSlicePayload(slicePayload, slice);\n });\n this.removeSlices(slices);\n\n debugging('supervisor') && console.log(`Supervisor.returnSlices: Returning slices ${this.dumpSlices(slices)}.`);\n\n return this.resultSubmitterConnection.request('status', {\n worker: this.workerOpaqueId,\n slices: slicePayload,\n }).then(response => {\n slices.forEach((slice) => { this.jobHandles[slice.jobAddress].emit('return', slice.sliceNumber); });\n return response;\n }).catch(error => {\n slices.forEach((slice) => { this.jobHandles[slice.jobAddress].emit('return', error); });\n const errorInfo = slices.map(slice => slice.identifier);\n debugging('supervisor') && console.error('Failed to return slice(s)', { errorInfo, error }, 'Will try again on new connection.');\n // Skipping emitting 'return' event when re-sending returned slices for Sup1.\n // It's done right in Sup2.\n this.resultSubmitterMessageQueue.push({ operation: 'status', data: { worker: this.workerOpaqueId, slices: slicePayload } })\n this.resultSubmitterConnection?.close();\n // Just in case the caller is expecing a DCP response\n return { success: false, payload: {} };\n });\n }\n\n /**\n * Submits the slice results to the scheduler, either to the\n * work submit or fail endpoints based on the slice status.\n * Then remove the slice from the @this.slices cache.\n *\n * @param {Slice} slice - The slice to submit.\n * @param {EventEmitter} sandboxHandle - The sandbox handle associated to the slice.\n * @returns {Promise<void>}\n */\n async recordResult (slice, sandboxHandle) {\n // It is possible for slice.result to be undefined when there are upstream errors.\n if ( !(slice && slice.result))\n throw new Error(`recordResult: slice.result is undefined for slice ${slice.identifier}. This is ok when there are upstream errors.`);\n\n if (!(this.options.paymentAddress instanceof Address))\n throw new Error(`options.paymentAddress ${JSON.stringify(this.options.paymentAddress)} must be an instance of Address`);\n\n debugging('supervisor') && console.log(`supervisor: recording result for slice ${slice.identifier}.`);\n\n const jobAddress = slice.jobAddress;\n const sliceNumber = slice.sliceNumber;\n const authorizationMessage = slice.getAuthorizationMessage();\n\n /* @see result-submitter::result for full message details */\n const metrics = { GPUTime: 0, CPUTime: 0, CPUDensity: 0, GPUDensity: 0, InDataSize: 0, OutDataSize: 0 };\n const payloadData = {\n slice: sliceNumber,\n job: jobAddress,\n worker: this.workerOpaqueId,\n paymentAddress: this.options.paymentAddress,\n metrics,\n authorizationMessage,\n }\n\n const timeReport = slice.timeReport;\n if (timeReport && timeReport.total > 0) {\n metrics.GPUTime = timeReport.webGL + timeReport.webGPU;\n metrics.CPUTime = timeReport.CPU;\n metrics.CPUDensity = metrics.CPUTime / timeReport.total;\n metrics.GPUDensity = metrics.GPUTime / timeReport.total;\n metrics.CPUTime = 1 + Math.floor(metrics.CPUTime);\n if (metrics.GPUTime > 0)\n metrics.GPUTime = 1 + Math.floor(metrics.GPUTime);\n\n // Create dummy properties to keep op-router happy.\n metrics.InDataSize = 0;\n metrics.OutDataSize = 0;\n\n const eventMeasurements = {\n elapsed: timeReport.total / 1000,\n CPU: timeReport.CPU / 1000,\n GPU: metrics.GPUTime / 1000,\n in: 0,\n out: 0,\n };\n\n this.jobHandles[slice.jobAddress].emit('metrics', slice.sliceNumber, eventMeasurements);\n sandboxHandle.emit('metrics', slice.sliceNumber, eventMeasurements);\n }\n \n let canceled = false;\n const resultUrl = (slice.resultStorageType !== 'pattern') ? slice.resultStorageDetails : false;\n this.worker.emit('beforeResult', () => { canceled = true; }, resultUrl);\n this.jobHandles[slice.jobAddress].emit('beforeResult', () => { canceled = true; }, resultUrl);\n if (canceled) \n {\n debugging('supervisor') && console.log(`Sup1: User canceled the result submission operation for slice ${slice.identifier}.`);\n this.removeSlice(slice);\n return this.returnSlice(slice, 'Canceled via beforeResult event');\n }\n \n if (!slice.isFinished)\n throw new Error('Cannot record result for slice that is not finished');\n\n if (slice.resultStorageType === 'pattern') /* This is a remote-storage slice. */\n payloadData.result = await this.sendResultToRemote(slice);\n else\n payloadData.result = encodeDataURI(slice.result.result); /* XXXwg - result.result is awful */\n debugging('supervisor') && console.log('Supervisor.recordResult: payloadData.result', payloadData.result.slice(0, 512));\n\n try {\n if (slice.completed) {\n\n /* work function returned a result */\n let resp = await this.resultSubmitterConnection.request(\n 'result',\n payloadData,\n )\n \n if (!resp.success) {\n if (resp.payload && resp.payload.code === 'DCPS-01002') { /* result submitter cannot connect to event router; try again */\n try {\n resp = await this.resendResult(payloadData)\n } catch (error) {\n debugging('supervisor') && console.error(`supervisor - failed to submit result for job ${jobAddress} after ${payloadData.sendRetries} attempts`)\n throw error;\n }\n }\n else\n {\n throw new Error(`failed to submit result for slice ${slice.sliceNumber} of job ${jobAddress}`);\n }\n }\n\n if (false) {}\n\n const paymentAddress = payloadData.paymentAddress.toString(); \n const payment = resp.payload.slicePaymentAmount;\n this.worker.emit('payment', payment, paymentAddress, jobAddress, slice.sliceNumber);\n this.jobHandles[jobAddress].emit('payment', payment, paymentAddress, slice.sliceNumber);\n sandboxHandle.emit('payment', payment, paymentAddress);\n\n /** @todo: Implement size once I/O added. For now its just 0. */\n this.worker.emit('result', resultUrl, 0);\n this.jobHandles[jobAddress].emit('result', resultUrl, 0);\n } else {\n /* slice did not complete for some reason */\n \n // If the slice from a job never completes and the job address exists in the ringBufferofJobs, \n // then we remove it to allow for another slice (from the same job) to be obtained by fetchTask\n this.ringBufferofJobs.buf = this.ringBufferofJobs.filter(element => element !== jobAddress);\n \n await this.returnSlice(slice);\n }\n } catch(error) {\n debugging() && console.info(`1014: Failed to submit results for slice ${payloadData.slice} of job ${payloadData.job}`, error);\n this.worker.emit('result', error);\n this.jobHandles[jobAddress].emit('result', error);\n this.worker.emit('error', error);\n } finally {\n // Remove the slice from the slices array.\n this.removeSlice(slice);\n if (!this.sliceTiming)\n debugging('supervisor') && console.log(`recordResult: Completed slice ${slice.identifier}.`);\n else\n {\n slice['resultDelta'] = Date.now() - slice['resultDelta'];\n console.log(`recordResult(${slice['pairingDelta']}, ${slice['executionDelta']}, ${slice['resultDelta']}): Completed slice ${slice.identifier}.`);\n }\n }\n }\n\n /**\n * Send a work function's result to a server that speaks our DCP Remote Data Server protocol.\n * E.g. https://gitlab.com/Distributed-Compute-Protocol/dcp-rds\n *\n * @param {Slice} slice - Slice object whose result we are sending.\n * @returns {Promise<string>}\n * @throws When HTTP status not in the 2xx range.\n */\n sendResultToRemote(slice) {\n return supShared.sendResultToRemote(this, slice);\n }\n}\n\n/**\n * Sandbox has had an error which is not from the work function: kill it\n * and try to redo the slice.\n */\nfunction handleSandboxError(supervisor, sandbox, error) {\n const slice = sandbox.slice;\n\n slice.sandboxErrorCount = (slice.sandboxErrorCount || 0) + 1;\n sandbox.slice = null;\n supervisor.returnSandbox(sandbox); /* terminate the sandbox */\n slice.status = SLICE_STATUS_UNASSIGNED; /* ToT */\n slice.allocated = false;\n debugging() && console.warn(`Supervisor.handleSandboxError: Sandbox ${sandbox.identifier}...(${sandbox.public.name}/${slice.sandboxErrorCount}) with slice ${slice.identifier} had error.`, error);\n supervisor.worker.emit('error', error);\n\n if (slice.sandboxErrorCount <= supervisor.tuning.maxSandboxSliceRetries)\n supervisor.queuedSlices.push(slice);\n else {\n slice.error = error;\n supervisor.returnSlice(slice);\n }\n}\n\n/**\n * Add a slice to the slice payload being built. If a sliceList already exists for the\n * job-status-authMessage tuple, then the slice will be added to that, otherwise a new\n * sliceList will be added to the payload.\n *\n * @param {Object[]} slicePayload - Slice payload being built. Will be mutated in place.\n * @param {Slice} slice - The slice.\n * @param {String} status - Status update, eg. progress or scheduled.\n *\n * @returns {Object[]} mutated slicePayload array\n */\nfunction addToSlicePayload(slicePayload, slice, status) {\n // getAuthorizationMessage helps enforces the equivalence\n // !authorizationMessage <==> sliceNumber === 0\n const authorizationMessage = slice.getAuthorizationMessage();\n if (!authorizationMessage) return;\n\n // Try to find a sliceList in the payload which matches the job, status, and auth message\n let sliceList = slicePayload.find(desc => {\n return desc.job === slice.jobAddress\n && desc.status === status\n && desc.authorizationMessage === authorizationMessage;\n });\n\n // If we didn't find a sliceList, start a new one and add it to the payload\n if (!sliceList) {\n sliceList = {\n job: slice.jobAddress,\n sliceNumbers: [],\n status,\n authorizationMessage,\n };\n slicePayload.push(sliceList);\n }\n\n sliceList.sliceNumbers.push(slice.sliceNumber);\n\n return slicePayload;\n}\n\n/**\n * Add a slice to the returnSlice payload being built. If a sliceList already exists for the\n * job-isEstimation-authMessage-reason tuple, then the slice will be added to that, otherwise a new\n * sliceList will be added to the payload.\n *\n * @param {Object[]} slicePayload - Slice payload being built. Will be mutated in place.\n * @param {Slice} slice - The slice.\n * @param {String} [reason] - Optional reason to further characterize status; e.g. 'ENOPROGRESS', 'EUNCAUGHT', 'ESLICETOOSLOW', 'unknown'.\n *\n * @returns {Object[]} mutated slicePayload array\n */\nfunction addToReturnSlicePayload(slicePayload, slice, reason) {\n // getAuthorizationMessage helps enforces the equivalence\n // !authorizationMessage <==> sliceNumber === 0\n const authorizationMessage = slice.getAuthorizationMessage();\n if (!authorizationMessage) return;\n\n if (!reason) reason = slice.error ? 'EUNCAUGHT' : 'unknown';\n\n // Try to find a sliceList in the payload which matches the job, status, and auth message\n let sliceList = slicePayload.find(desc => {\n return desc.job === slice.jobAddress\n && desc.isEstimationSlice === slice.isEstimationSlice\n && desc.authorizationMessage === authorizationMessage\n && desc.reason === reason;\n });\n\n // If we didn't find a sliceList, start a new one and add it to the payload\n if (!sliceList) {\n sliceList = {\n job: slice.jobAddress,\n sliceNumbers: [],\n status: 'return',\n isEstimationSlice: slice.isEstimationSlice,\n authorizationMessage,\n reason,\n };\n slicePayload.push(sliceList);\n }\n\n sliceList.sliceNumbers.push(slice.sliceNumber);\n\n return slicePayload;\n}\n\n/**\n * Return DCPv4-specific connection options, composed of type-specific, URL-specific, \n * and worker-specific options, any/all of which can override the dcpConfig.dcp.connectOptions.\n * The order of precedence is the order of specificity.\n */\nfunction connectionOptions(url, label) {\n return leafMerge(/* ordered from most to least specific */\n dcpConfig.supervisor.dcp.connectionOptions.default,\n dcpConfig.supervisor.dcp.connectionOptions[label],\n dcpConfig.supervisor.dcp.connectionOptions[url.href]);\n}\n\nexports.Supervisor = Supervisor;\n\n\n//# sourceURL=webpack://dcp/./src/dcp-client/worker/supervisor.js?");
4662
4662
 
4663
4663
  /***/ }),
4664
4664
 
@@ -4702,7 +4702,7 @@ eval("\n/**\n * @file dcp/src/dcp-client/worker/supervisor2/delay-manager.js\n *
4702
4702
  /***/ ((__unused_webpack_module, exports) => {
4703
4703
 
4704
4704
  "use strict";
4705
- eval("\n/**\n * @file dcp-client/worker/supervisor2/evaluator-manager.js\n *\n * A support class for Supervisor2.\n * It is for managing when the evaluator goes and comes back.\n * The screensaver worker is the main scenario.\n *\n * @author Paul, paul@distributive.network\n * @date Feb-April 2023,\n * @module evaluator-manager\n * @copyright Copyright (c) 2018-2023, Distributive Corp. All Rights Reserved\n */\n\n// @ts-check\n\n\n/**\n * Evaluator down management.\n **/\nclass EvaluatorManager\n{\n constructor()\n {\n /**\n * SAVE: WIll use soon.\n * Possible states for EvaluatorManager\n * Transitions:\n * normal -> shuttingDown -> down -> normal\n * The recovering state is currently unused.\n * @type {Object<string, number>}\n */\n this.state = {\n normal: 0,\n // When true, screensaver or evaluator is in the process of shutting down.\n // The sandbox.evaluator handle 'end' event has been seen.\n shuttingDown: 1,\n // The sandbox terminated-handler is unable to create a new sandbox.\n // Therefore evaluator is down.\n down: 2,\n // After evaluator down it is restarted and sandboxes can be created again.\n recovering: 3,\n };\n /**\n * Indicate the state of evaluator restarting.\n */\n this.reallyDown = false;\n /**\n * When true, screensaver or evaluator is in the process of shutting down.\n * The sandbox.evaluator handle 'end' event has been seen.\n * @type {boolean}\n */\n this.shuttingDown = false;\n /**\n * Used to mitigate terminated handler being called too many times.\n * @type {boolean}\n */\n this.downInterlock = false;\n /**\n * The # of spinning createSandbox instances.\n * @type {number}\n */\n this.createSandboxRefCount = 0;\n }\n /**\n * Indicates whether the evaulator is down or in the process of shutting down.\n * @type {boolean}\n */\n get down ()\n {\n return this.reallyDown || this.shuttingDown\n }\n}\nexports.EvaluatorManager = EvaluatorManager;\n\n\n//# sourceURL=webpack://dcp/./src/dcp-client/worker/supervisor2/evaluator-manager.js?");
4705
+ eval("\n/**\n * @file dcp-client/worker/supervisor2/evaluator-manager.js\n *\n * A support class for Supervisor2.\n * It is for managing when the evaluator goes and comes back.\n * The screensaver worker is the main scenario.\n *\n * @author Paul, paul@distributive.network\n * @date Feb-April 2023,\n * @module evaluator-manager\n * @copyright Copyright (c) 2018-2023, Distributive Corp. All Rights Reserved\n */\n\n// @ts-check\n\n\n/**\n * Evaluator down management.\n **/\nclass EvaluatorManager\n{\n constructor()\n {\n /**\n * SAVE: WIll use soon.\n * Possible states for EvaluatorManager\n * Transitions:\n * normal -> shuttingDown -> down -> normal\n * The recovering state is currently unused.\n * @type {Object<string, number>}\n */\n this.state = {\n normal: 0,\n // When true, screensaver or evaluator is in the process of shutting down.\n // The sandbox.evaluator handle 'end' event has been seen.\n shuttingDown: 1,\n // The sandbox terminated-handler is unable to create a new sandbox.\n // Therefore evaluator is down.\n down: 2,\n // After evaluator down it is restarted and sandboxes can be created again.\n recovering: 3,\n };\n /**\n * Indicate the state of evaluator restarting.\n */\n this.reallyDown = false;\n /**\n * When true, screensaver or evaluator is in the process of shutting down.\n * The sandbox.evaluator handle 'end' event has been seen.\n * @type {boolean}\n */\n this.shuttingDown = false;\n /**\n * Used to mitigate terminated handler being called too many times.\n * @type {boolean}\n */\n this.downInterlock = false;\n /**\n * Used to pause the sandbox handle 'end' event handler, which prevents the\n * evaluator shutdown (viz., is-screen-saver-active) logic from executing.\n * @type {boolean}\n */\n this.pauseSandboxHandleEndHandler = false;\n /**\n * The # of spinning createSandbox instances.\n * @type {number}\n */\n this.createSandboxRefCount = 0;\n }\n /**\n * Indicates whether the evaulator is down or in the process of shutting down.\n * @type {boolean}\n */\n get down ()\n {\n return this.reallyDown || this.shuttingDown\n }\n /**\n * Reset all properties to their default state.\n */\n initialize ()\n {\n this.reallyDown = false;\n this.shuttingDown = false;\n this.downInterlock = false;\n this.pauseSandboxHandleEndHandler = false;\n }\n}\nexports.EvaluatorManager = EvaluatorManager;\n\n\n//# sourceURL=webpack://dcp/./src/dcp-client/worker/supervisor2/evaluator-manager.js?");
4706
4706
 
4707
4707
  /***/ }),
4708
4708
 
@@ -4724,7 +4724,7 @@ eval("/**\n * @file dcp/src/dcp-client/worker/supervisor2/gpu_support.js\n *\n *
4724
4724
  /***/ ((__unused_webpack_module, exports, __webpack_require__) => {
4725
4725
 
4726
4726
  "use strict";
4727
- eval("/**\n * @file dcp/src/dcp-client/worker/supervisor2/index.js\n * Code managing sandboxes, tasks, jobs, and slices within in a DCP Worker.\n * @author Wes Garland, wes@distributive.network\n * Paul, paul@distributive.network\n * @date Dec 2020\n * June 2022, Jan-April 2023\n * @module supervisor\n * @copyright Copyright (c) 2018-2023, Distributive Corp. All Rights Reserved\n */\n/*\n * initial ready reconnecting stopping stopped paused broken\n * |-- ctor ----------------------------------------------------------------------------------------------------------------->\n * |-- work ----------------------------------------------------------------------------------------------------------------->\n * |-- work --------------------------------------------------------------------------------------------------->\n * |-- work -------------------------------------------------------------------------------->\n * |-- work --------------------------------------------------------->\n * |-- work --------------------------------->\n * |-- Worker.pause --------------------------------------------------------------------------->\n * <-- Worker.unpause -------------------------------------------------------------------------|\n * |-- work ----->\n * |-- stopWork ---------------------------->\n * |-- postStopShutdown --->\n * |-- PM.connectTo --> (ProtocolManager)\n * <-- PM.connectTo --| (ProtocolManager)\n * |-- stopWork ------------------------------------------->\n * <-- work -----------------------------------------------------------------------|\n * <-- stopWork -----------------------------------------------------|\n */\n/* global dcpConfig */ // eslint-disable-line no-redeclare\n// @ts-check\n\n\nconst debugging = (__webpack_require__(/*! dcp/debugging */ \"./src/debugging.js\").scope)('worker');\nconst DCP_ENV = __webpack_require__(/*! dcp/common/dcp-env */ \"./src/common/dcp-env.js\");\nconst constants = __webpack_require__(/*! dcp/common/scheduler-constants */ \"./src/common/scheduler-constants.js\");\nconst { assert } = __webpack_require__(/*! dcp/common/dcp-assert */ \"./src/common/dcp-assert.js\");\nconst dcp_timers = __webpack_require__(/*! dcp/common/dcp-timers */ \"./src/common/dcp-timers.js\");\nconst EventEmitter = __webpack_require__(/*! events */ \"./node_modules/events/events.js\");\nconst { DCPError } = __webpack_require__(/*! dcp/common/dcp-error */ \"./src/common/dcp-error.js\");\nconst { Address } = __webpack_require__(/*! dcp/dcp-client/wallet/eth */ \"./src/dcp-client/wallet/eth.js\");\nconst { Keystore } = __webpack_require__(/*! dcp/dcp-client/wallet/keystore */ \"./src/dcp-client/wallet/keystore.js\");\nconst RingBuffer = __webpack_require__(/*! dcp/utils/ringBuffer */ \"./src/utils/ringBuffer.js\");\nconst { Synchronizer } = __webpack_require__(/*! dcp/common/concurrency */ \"./src/common/concurrency.js\");\nconst { JobManager } = __webpack_require__(/*! ./job-manager */ \"./src/dcp-client/worker/supervisor2/job-manager.js\");\nconst { Sandbox, SandboxError } = __webpack_require__(/*! ./sandbox2 */ \"./src/dcp-client/worker/supervisor2/sandbox2.js\");\nconst { sliceStatus } = __webpack_require__(/*! dcp/common/scheduler-constants */ \"./src/common/scheduler-constants.js\");\nconst { OriginAccessManager } = __webpack_require__(/*! dcp/dcp-client/worker/origin-access-manager */ \"./src/dcp-client/worker/origin-access-manager.js\");\nconst { a$sleepMs, booley, toJobMap, encodeDataURI, stringify, nextEma } = __webpack_require__(/*! dcp/utils */ \"./src/utils/index.js\");\nconst kvin = __webpack_require__(/*! kvin */ \"./node_modules/kvin/kvin.js\");\n\nconst { ModuleCache } = __webpack_require__(/*! ./module-cache */ \"./src/dcp-client/worker/supervisor2/module-cache.js\");\nconst { Promise_any } = __webpack_require__(/*! ./promise_any */ \"./src/dcp-client/worker/supervisor2/promise_any.js\");\nconst { ProtocolManager } = __webpack_require__(/*! ./protocol-manager */ \"./src/dcp-client/worker/supervisor2/protocol-manager.js\");\nconst { EvaluatorManager } = __webpack_require__(/*! ./evaluator-manager */ \"./src/dcp-client/worker/supervisor2/evaluator-manager.js\");\nconst { DelayManager } = __webpack_require__(/*! ./delay-manager */ \"./src/dcp-client/worker/supervisor2/delay-manager.js\");\nconst { Options } = __webpack_require__(/*! ./options */ \"./src/dcp-client/worker/supervisor2/options.js\");\nconst common = __webpack_require__(/*! ./common */ \"./src/dcp-client/worker/supervisor2/common.js\");\nconst { debugBuild, selectiveDebug, selectiveDebug2, minimalDiag, selectiveSupEx } = common;\nconst supShared = __webpack_require__(/*! ../SupShared */ \"./src/dcp-client/worker/SupShared.js\");\nconst { canScheduleGPU } = __webpack_require__(/*! ./gpu_support */ \"./src/dcp-client/worker/supervisor2/gpu_support.js\");\n\n/** @typedef {string} opaqueId */ // 22 character base64 string\n/** @typedef {import('./slice2').Slice} Slice */\n/** @typedef {import('dcp/utils/jsdoc-types').Auth} Auth */\n/** @typedef {import('dcp/utils/jsdoc-types').Body} Body */\n/** @typedef {import('./sandbox2').SandboxHandle} SandboxHandle */\n/** @typedef {import('dcp/utils/jsdoc-types').SliceObj} SliceObj */\n/** @typedef {import('dcp/dcp-client/worker/index').Worker} Worker */\n/** @typedef {import('dcp/utils/jsdoc-types').TDPayload} TDPayload */\n/** @typedef {import('dcp/utils/jsdoc-types').Signature} Signature */\n/** @typedef {import('dcp/utils/jsdoc-types').SliceMessage} SliceMessage */\n/** @typedef {import('dcp/dcp-client/wallet/keystore').Keystore} Keystore */\n/** @typedef {import('dcp/utils/jsdoc-types').SupervisorOptions} SupervisorOptions */\n/** @typedef {import('dcp/protocol-v4/connection/connection').Connection} Connection */\n\n//\n// Index to functionality -- search for '_Idx' to toggle through the index.\n//\n// 1) class Supervisor\n// 2) work, checkCapabilities\n// 3) safeEmit, workerEmit, jobEmit, error, warning, mungeError, jobDescriptor, setState\n// 4) returnAllSlices, postStopShutdown, abort, stopWork, purgeJob\n// 5) roundRobinSlices, makeJobSelectionCursor, handleSandboxWorkError, handleFailedSlice\n// 6) returnSlices, returnSlice, emitProgressReport\n// 7) jobQuanta, repoMan, predictLoad(viz., clairvoyance), waitUntilWorkIsReady, generateWorkerComputeGroups\n// 8) availableSandboxSpace, fetchTask, addTaskToWorkload, fetchFromTD, clearUnusedJobManagersAndModuleCache\n// 9) createSandbox, returnSandbox, hookUpSandboxListeners, pruneSandboxes\n// 10) recordResult, sendToResultSubmitter, sendResultToRemote\n// 11) handleWorkReject\n//\n\n// _Idx\n//\n// class Supervisor\n//\n\n/**\n * Supervisor constructor\n *\n * A supervisor manages the communication with the scheduler, manages sandboxes, and\n * decides which workload should be sent to which sandboxes when.\n *\n * Possible states: 'initial', 'ready', 'reconnecting', 'stopping', 'stopped', 'broken'\n * Start state:\n * - initial\n *\n * Intermediate states:\n * - ready\n * - reconnecting\n * - stopping\n *\n * Terminal states:\n * - stopped\n * - broken\n *\n * Valid transitions:\n * - initial -> ready -> reconnecting -> ready\n * - ready -> stopping -> stopped\n * - initial -> broken\n */\nclass Supervisor extends EventEmitter\n{\n /**\n * @constructor\n * @param {Worker} worker\n * @param {Keystore} identity\n * @param {SupervisorOptions} options\n */\n constructor (worker, identity, options)\n {\n super({ captureRejections: false });\n\n if (!(identity instanceof Keystore))\n throw new Error(`identity ${JSON.stringify(identity)} must be an instance of Keystore`);\n\n debugging('supervisor') && console.debug('Supervisor.options', options);\n assert(options === worker.workerOptions);\n \n /** @type {Worker} */\n this.worker = worker;\n /** @type {Keystore} */\n this.identityKeystore = identity;\n /** @type {Options} */\n this.options = new Options(options, worker);\n\n selectiveDebug() && console.debug('Supervisor: cores.cpu, cores.gpu, maxSandboxes', options.cores?.cpu, options.cores?.gpu, this.options.maxSandboxes);\n\n /** @type {ModuleCache} */\n this.moduleCache = new ModuleCache(this);\n\n // Manage delays and exponential backoff.\n this.delayManager = new DelayManager(this, this.options.defaultDelayIncrement);\n\n /* See https://distributive.atlassian.net/browse/DCP-3175 */\n /** @type {OriginAccessManager} */\n this.originManager = OriginAccessManager.construct(this.options.allowOrigins);\n\n /** @type {ProtocolManager} */\n this.dcp4 = new ProtocolManager(this);\n\n /** @type {common.DebuggingTools} */\n this.dbg = new common.DebuggingTools(this);\n\n // Turn on for max speed debugging.\n if (false)\n {}\n\n /** @type { Synchronizer } */\n this.state = new Synchronizer('initial', [ 'initial', 'ready', 'reconnecting', 'stopping', 'stopped', 'paused', 'broken']);\n /** @type {Object<string, JobManager>} */\n this.jobMap = {}; // jobAddress => jobManager\n\n /** @type {JobManager[]} */\n this.jobManagerInventory = common.InventoryArray('jobManagers');\n /** @type {Sandbox[]} */\n this.sandboxInventory = []; // All sandboxes that are being used by the job managers. Makes sure we don't lose sandboxes.\n /** @type {{ next: cbNext, push: cbPush }} */\n this.cursor = null;\n /** @type {number} */\n this.defaultQuanta = 1.0;\n\n /**\n * Evaluator down management.\n **/\n this.evaluator = new EvaluatorManager();\n\n // There are 2 kinds of barriers.\n // 1) fetchTaskBarrier is a barrier for the task fetching from task distributor path.\n // 2) roundRobinBarrier is a barrier for the slice execution path.\n /** @type {boolean} */\n this.fetchTaskBarrier = false;\n /** @type {boolean} */\n this.roundRobinBarrier = false;\n\n /** @type {object[]} */\n this.rejectedJobs = [];\n /**\n * An N-slot ring buffer of job addresses. Stores all jobs that have had no more than 1 slice run in the ring buffer.\n * Required for the implementation of discrete jobs\n * @type {RingBuffer}\n */\n this.ringBufferofJobs = new RingBuffer(200); // N = 200 should be more than enough.\n /**\n * When true we await waitUntilWorkIsReady until at least 1 job is ready with at least 1 ready slice.\n * waitUntilWorkIsReady\n * @type {boolean}\n */\n this.waitForWork = true;\n /**\n * Last repoMan time stamp.\n * @type {number}\n **/\n this.lastRepoMan = Date.now();\n /**\n * Last prune time stamp.\n * @type {number}\n **/\n this.lastPrune = Date.now();\n /**\n * General time stamp.\n * @type {number}\n **/\n this.lastTime = Date.now();\n /**\n * Fetch started time stamp.\n * @type {number}\n **/\n this.fetchTaskStarted = 0;\n /**\n * The capabilities of a random sandbox.\n * @todo XXXpfr Re-work this once fetchTask uses the capabilities of every sandbox to fetch slices.\n * @type {object}\n */\n this.capabilities = null;\n /**\n * EMA times series of CPUTime + GPUTime over all jobs.\n * Each EMA entry is computed right before calling fetchTask.\n * @type {number}\n */\n this.localTime = 0;\n /**\n * EMA times series of sliceCPUTime + sliceGPUTime over all jobs.\n * Each EMA entry is computed right before calling fetchTask.\n * @type {number}\n */\n this.globalTime = 0;\n /**\n * When this.sliceTiming is set to be true, it displays the timings of a every slice\n * slice['queueingDelta'] = timespan of when slice is passed to jobManager.runQueuedSlice until sandbox.work\n * slice['executionDelta'] = timespan of execution in sandbox\n * slice['resultDelta'] = timespan of when sandbox finishes executing until recordResult completes.\n * @type {boolean}\n */\n this.sliceTiming = false;\n\n try\n {\n // Start up the connections.\n this.dcp4.instantiateAllConnections();\n }\n catch(error)\n {\n this.error('Failed to set up DCP connections:', error);\n this.setState('initial', 'broken');\n this.worker.stop(true).finally(() => { throw error; });\n }\n }\n\n //\n // Compatibility layer between Sup1, Sup2 and the Sup interface exposed by Worker.\n //\n /**\n * Get all sandboxes.\n * @type {Sandbox[]}\n */\n get sandboxes () { return this.sandboxInventory.filter((sandbox) => !sandbox.isTerminated); }\n /**\n * Get all working sandboxes.\n * @type {Sandbox[]}\n */\n get workingSandboxes () { return this.sandboxInventory.filter((sandbox) => sandbox.isWorking); }\n /**\n * Get the number of working sandboxes.\n * @type {number}\n */\n get workingSandboxCount () { return this.workingSandboxes.length; }\n /**\n * Get all slices over all jobs..\n * @type {Slice[]}\n */\n get slices () {\n const slices = [];\n this.jobManagerInventory.forEach((jobManager) => { slices.push(...jobManager.sliceInventory); });\n return slices;\n }\n /**\n * Get all queued slices over all jobs..\n * @type {Slice[]}\n */\n get queuedSlices () {\n const slices = [];\n this.jobManagerInventory.forEach((jobManager) => { slices.push(...jobManager.queuedSlices); });\n return slices;\n }\n /**\n * Get all queued slices over all jobs..\n * @type {Slice[]}\n */\n get workingSlices () {\n const slices = [];\n this.jobManagerInventory.forEach((jobManager) => { slices.push(...jobManager.workingSlices); });\n return slices;\n }\n /** @type {opaqueId} */\n get workerId () { return this.options.workerId; }\n /** @type {opaqueId} */\n set workerId (id) { this.options.workerId = id; }\n get version() { return '2.0.0' }\n /**\n * @deprecated\n * @todo XXXpfr Rip out this sup2/sup1 special-casing when we finally kill sup1.\n * @type {boolean}\n */\n get isSupervisor1 () { return false; }\n /**\n * @deprecated\n * @todo XXXpfr Rip out this sup2/sup1 special-casing when we finally kill sup1.\n * @type {boolean}\n */\n get isSupervisor2 () { return true; }\n\n //\n // Miscellaneous properties.\n //\n\n /**\n * Dynamic maxWorkingCores.\n * The maximum number of cores that can be executing slices. Slices are scheduled\n * using density. E.g. suppose a job has GPUDensity is 0 and CPUDensity is 0.5,\n * then 2 slices of this job can be scheduled on a single core.\n * @type {number}\n */\n get maxWorkingCores () { return this.options.cores?.cpu; }\n /**\n * Dynamic maxWorkingGPUs.\n * The maximum number of GPUs that can be executing slices. Slices are scheduled\n * using density. E.g. suppose a job has GPUDensity is 0.5 and CPUDensity is 0.5,\n * then 2 slices of this job can be scheduled on a single GPU core and a single CPU core.\n * @type {number}\n */\n get maxWorkingGPUs () { return this.options.cores?.gpu; }\n /**\n * @deprecated\n * @todo XXXpfr Get rid of this after Sup1 dies.\n */\n get lastDcpsid () { return this.dcp4.lastDcpsid; }\n /**\n * @deprecated\n * @todo XXXpfr Get rid of this after Sup1 dies.\n */\n set lastDcpsid (dcpsid) { this.dcp4.lastDcpsid = dcpsid; }\n /**\n * Indicates whether supervisor is ready for business.\n * @type {boolean}\n */\n get isReady () { return this.worker.working && this.state.is('ready'); }\n /**\n * The # of sandboxes not being used.\n * @type {number}\n */\n get unusedSandboxCount () { return this.options.maxSandboxes - this.workingSliceCount; }\n /**\n * The unused amount of CPU density in the cores.\n * @type {number}\n */\n get unusedCoreSpace () { return this.maxWorkingCores - this.workingSliceDensity; }\n /**\n * The unused amount of GPU density in the cores.\n * Use Math.max(1, this.maxWorkingGPUs) so there's always enough room to schedule\n * a GPU slice when this.workingGPUDensity = 0. In RoundRobinSlices we use the accumulated\n * recent history ( canScheduleGPU(maxWorkingGPUs) ) to check whether the average recent\n * density is within this.maxWorkingGPUs.\n * @type {number}\n */\n get unusedGPUSpace () { return Math.max(1, this.maxWorkingGPUs) - this.workingGPUDensity; }\n /** @type {number} */\n get workingSliceDensity ()\n {\n let density = 0;\n for (const jobMan of this.jobManagerInventory)\n density += jobMan.workingSliceDensity;\n return density;\n }\n /** @type {number} */\n get workingGPUDensity ()\n {\n let density = 0;\n for (const jobMan of this.jobManagerInventory)\n density += jobMan.workingGPUDensity;\n return density;\n }\n /** @type {number} */\n get workingSliceCount ()\n {\n let count = 0;\n for (const jobMan of this.jobManagerInventory)\n count += jobMan.workingSliceCount;\n return count;\n }\n /**\n * Compute the estimated time to completion of all work.\n * The time is measured as if there were only a single slice running at a time.\n * workRemaining is the amount of time until completion.\n * @type {number}\n */\n get workRemaining ()\n {\n let workRemaining = 0;\n for (const jobMan of this.jobManagerInventory)\n workRemaining += jobMan.workRemaining;\n return workRemaining;\n }\n\n // _Idx\n //\n // work, checkCapabilities\n //\n\n /**\n * Set up sandboxes and interval timers, then start to search for work.\n * Called in Worker.start().\n * Initial entry point after Worker constructor.\n * We need to start searching for work here to allow starting and stopping a worker.\n */\n work ()\n {\n const abort = async (error) => {\n // May be in a stopping/stopped state, because dcp-worker was hit with ctrl-C.\n this.setState(['ready', 'stopping', 'stopped', 'reconnecting'], 'broken');\n await this.worker.stop(true);\n throw error;\n };\n /* Provide opportunity for calling code to hook ready/error events. */\n dcp_timers.setImmediate(() => {\n try\n {\n // [ 'initial', 'ready', 'reconnecting', 'stopping', 'stopped', 'paused', 'broken' ]\n if (this.state.isNot('initial'))\n {\n if (this.state.in(['ready', 'stopping', 'reconnecting']))\n {\n this.warning(`Supervisor.work was called when supervisor is already ${this.state.valueOf()}.`, 'Please either wait and try again or restart worker.');\n return;\n }\n else if (this.state.is('broken'))\n {\n this.warning(\"Cannot call Supervisor.work when supervisor is in a 'broken' state. Please restart worker.\");\n return;\n }\n this.state.set(['stopped', 'paused'], 'initial');\n }\n this.dcp4.instantiateAllConnections();\n\n // Beacon interval timer.\n this.progressReportTimer = dcp_timers.setInterval(() => this.emitProgressReport(), this.options.progressReportInterval);\n // Watchdog: fetchTask-driven interval timer.\n this.watchdogTimer = dcp_timers.setInterval(() => this.fetchTask(), this.options.watchdogInterval);\n\n // Interval timers helps keep workers and localExec alive forever.\n this.progressReportTimer.unref();\n this.watchdogTimer.unref();\n\n if ( false || debugging('supervisor'))\n {\n this.sliceDebuggingTimer = setInterval(() => {\n this.jobManagerInventory.forEach((jobMan) => {\n const { unassigned, ready, reserved, working, workdone, complete, failed, finished } = jobMan.dumpSlices ('RRS', false, false);\n console.debug(`RRS(${jobMan.identifier},${this.unusedSandboxCount},${this.unusedCoreSpace},${this.workingSliceCount},${this.workingSliceDensity}): u/r/rsv/w/wd/c/f/fsh ${unassigned}/${ready}/${reserved}/${working}/${workdone}/${complete}/${failed}/${finished}`, jobMan.identifier, this.sandboxInventory.length);\n });\n }, 30 * 1000);\n if (this.sliceDebuggingTimer.unref)\n this.sliceDebuggingTimer.unref();\n }\n\n this.state.set('initial', 'ready');\n\n // Create 1 sandbox now to get the capabilities which are sent to Task Distributor by fetchTask.\n this.createSandbox()\n .then((sandbox) => {\n this.sandboxInventory.push(sandbox);\n debugging('supervisor') && console.debug('work() after createSandbox', this.sandboxInventory.length, sandbox.identifier, Date.now() - this.lastTime, this.options.watchdogInterval);\n this.fetchTask() // Don't wait for watchdog.\n .catch (async (error) => {\n this.error('work() failed when calling fetchTask', error);\n await abort(error);\n });\n })\n .catch(async (error) => {\n this.error('work() failed when calling createSandbox, exiting...', error);\n await abort(error);\n });\n }\n catch(error)\n {\n this.error('work() failed', error);\n if (this.state.is('initial')) this.state.set('initial', 'broken');\n else if (!this.state.is('broken')) this.setState('ready', 'broken');\n this.worker.stop(true).finally(() => { throw error; });\n }\n });\n }\n\n /** Construct capabilities when necessary. */\n checkCapabilities (sandbox)\n {\n /**\n * Assign the capabilities of one the sandboxes before fetching slices from the scheduler.\n * @todo XXXpfr Re-work this once fetchTask uses the capabilities of every sandbox to fetch slices.\n */\n this.capabilities = sandbox.capabilities;\n if (DCP_ENV.isBrowserPlatform && this.capabilities.browser)\n this.capabilities.browser.chrome = DCP_ENV.isBrowserChrome;\n\n debugging('supervisor') && console.debug('Supervisor.checkCapabilities computed', Date.now() - this.lastTime);\n }\n\n // _Idx\n //\n // safeEmit, workerEmit, jobEmit,\n // error, warning, mungeError, jobDescriptor, setState\n //\n\n /**\n * Safe event emitter.\n * @param {EventEmitter} emitter\n * @param {string} event\n * @param {...any} args\n */\n safeEmit(emitter, event, ...args)\n {\n try\n {\n emitter.emit(event, ...args);\n }\n catch (error)\n {\n this.error(`Event handler for event ${event} threw an exception`, error);\n }\n }\n\n /**\n * Safe event emitter on worker.\n * @param {string} event\n * @param {...any} args\n */\n workerEmit(event, ...args)\n {\n this.safeEmit(this.worker, event, ...args);\n }\n\n /**\n * Safe event emitter on slice.jobHandle.\n * @param {Slice} slice\n * @param {string} event\n * @param {...any} args\n */\n jobEmit(slice, event, ...args)\n {\n this.safeEmit(slice.jobHandle, event, ...args);\n }\n\n /**\n * Error feedback to user.\n * @param {string} message\n * @param {Array<Error>|Error|string} [coreError]\n * @param {string} [additionalInfo]\n * @param {boolean} [supressStack=false]\n */\n error (message, coreError, additionalInfo, supressStack = false)\n {\n const isString = (s) => { return (typeof s === 'string' || s instanceof String); };\n if (coreError instanceof AggregateError)\n coreError = coreError.errors;\n if (Array.isArray(coreError) && coreError.length > 0) // Emit error for every element of array.\n return coreError.flat().forEach((c_err) => this.error(message, c_err, additionalInfo));\n\n debugging('supervisor') && console.debug('Supervisor.error:', message, coreError, additionalInfo);\n if (!message)\n message = 'Supervisor.error called w/o valid message';\n if (additionalInfo)\n {\n if (typeof additionalInfo === 'object')\n // @ts-ignore\n additionalInfo = (additionalInfo instanceof Error) ? additionalInfo.message : JSON.stringify(additionalInfo);\n else if (typeof additionalInfo !== 'string')\n additionalInfo = String(additionalInfo);\n\n if (!isString(additionalInfo))\n additionalInfo = additionalInfo.toString();\n if (!coreError)\n coreError = '';\n else if (!isString(coreError))\n coreError = String(coreError);\n }\n\n let dcpError;\n if (additionalInfo)\n dcpError = new DCPError(message, coreError, additionalInfo, supressStack);\n else if (coreError && (coreError instanceof Error))\n dcpError = new DCPError(message, coreError, '', supressStack);\n else\n dcpError = new DCPError(message, '', '', supressStack);\n\n this.worker.emit('error', dcpError);\n }\n\n /**\n * Warning feedback to user.\n * @param {string[]} messages\n */\n warning (...messages)\n {\n debugging('supervisor') && console.debug('Supervisor.warning:', messages);\n if (messages.length < 1)\n messages = [ 'Supervisor.warning called w/o valid message(s)' ];\n messages.forEach((message) => this.worker.emit('warning', message));\n }\n\n /**\n * @deprecated\n * Create new object and copy the interesting properties from error.\n * Only show the stack for debug builds.\n * If timestamp isn't set, assign new Date().\n * @param {{ message }|string|object} error\n * @param {*} [errorCtor]\n * @returns {string|{ message }}\n */\n __mungeError (error, errorCtor)\n {\n if (typeof error === 'string')\n {\n const errorLines = error.split('\\n');\n return common.displayMaxDiagInfo() ? error : errorLines[0];\n }\n\n if (!error || typeof error !== 'object' || !('message' in error) || Array.isArray(error))\n return error;\n\n if (minimalDiag)\n return error.message;\n\n const errorObj = errorCtor ? new errorCtor(error.message) : { message: error.message };\n\n const props = common.displayMaxDiagInfo()\n ? [ 'type', 'process', 'name', 'origin', 'info', 'code', 'errorCode', 'operation', 'fileName', 'lineNumber', 'timestamp' ]\n : [ 'code', 'errorCode', 'fileName', 'lineNumber', 'timestamp' ]\n const predCopy = (prop) => {\n if (error[prop])\n errorObj[prop] = error[prop];\n };\n\n props.forEach((prop) => { predCopy(prop); });\n\n if (common.displayMaxDiagInfo())\n {\n predCopy('stack');\n if (errorObj['name'] === 'Error')\n delete errorObj['name'];\n }\n if (!errorObj['timestamp'])\n errorObj['timestamp'] = new Date();\n\n return errorObj;\n }\n\n /**\n * Get the job descriptor for the appropriate job manager,\n * which is the object value corresponding to jobAddress, in\n * the object returned by getJobsForTask in task-jobs.js.\n * @param {string} jobAddress\n * @returns {object}\n */\n jobDescriptor (jobAddress)\n {\n const jobManager = this.jobMap[jobAddress];\n if (!jobManager)\n throw new Error(`Cannot find the job descriptor corresponding to jobAddress ${jobAddress}`);\n return jobManager.jobMessage;\n }\n\n /**\n * Protect this.state when transitioning from currState -> nextState\n * It's dangerous to place this.state.set in a catch block with this.error or this.warning\n * because an uncaught exception will kill process before emitting the event-based diagnostic.\n * @param {string|string[]} currState\n * @param {string} nextState\n */\n setState(currState, nextState)\n {\n try { this.state.set(currState, nextState); }\n catch (e) { this.error('Supervisor.state.set error', e); }\n }\n\n // _Idx\n //\n // returnAllSlices, postStopShutdown, abort\n // stopWork, purgeJob\n //\n\n /** @returns {Promise<*>} */\n returnAllSlices ()\n {\n if (selectiveDebug())\n {\n const activeSlices = this.jobManagerInventory.map((jm) => jm.activeSlices).flat();\n if (activeSlices.length > 0)\n this.warning(`Returning active slices : ${stringify(activeSlices.map((slice) => slice.identifier), -1, 2)}`);\n }\n // The promises are all about returning the slices to the scheduler and there's no reason to await that.\n return Promise.all(this.jobManagerInventory.map((jm) => jm.destroy()));\n }\n\n /** @returns {Promise<*>} */\n postStopShutdown ()\n {\n for (const sandbox of this.sandboxInventory)\n sandbox.terminate(false);\n this.sandboxInventory = [];\n\n // There shouldn't be anything in the job managers, but just to be safe call returnAllSlices.\n // Clear jobManagerInventory, close all connections and set state to 'stopped'.\n return this.returnAllSlices()\n .finally(() => {\n this.jobManagerInventory = common.InventoryArray('jobManagers');\n return this.dcp4.closeConnections()\n .finally (() => {\n if (this.state.isNot('stopped'))\n this.setState('stopping', 'stopped');\n // This log message assume slices were returned to scheduler in a previous operation, which is the only current use case.\n // If we use this function in a different way in the future, update the log message.\n selectiveDebug() && console.debug(`Supervisor.postStopShutdown(${this.state}): terminated all sandboxes and returned all slices to scheduler...`);\n });\n });\n }\n\n /**\n * Stop the worker immediately and return all unfinished slices.\n * @returns {Promise<*>}\n */\n abort ()\n {\n return this.returnAllSlices()\n .finally (() => {\n return this.postStopShutdown();\n });\n }\n\n /**\n * Terminates sandboxes and returns slices.\n * Sets the working flag to false, call @this.work to start working again.\n *\n * If forceTerminate is true: Terminates all sandboxes and returns all slices.\n * If forceTerminate is false: Terminates non-working sandboxes and returns initial and ready slices.\n *\n * @param {boolean} [forceTerminate = true] - true if you want to stop the sandboxes from completing their current slice.\n * @returns {Promise<*>}\n */\n async stopWork (forceTerminate = true)\n {\n /** @returns {boolean} */\n const doNotWaitForWork = () => {\n return (this.evaluator.reallyDown || !this.sandboxInventory.filter(sbx => !sbx.isTerminated).length);\n }\n selectiveDebug() && console.debug(`Supervisor.stopWork(${forceTerminate}, ${this.state}): terminating sandboxes and returning slices to scheduler.`);\n\n // [ 'initial', 'ready', 'reconnecting', 'stopping', 'stopped', 'paused', 'broken']\n if (this.state.in(['stopping', 'stopped', 'reconnecting']))\n {\n this.warning(`Supervisor.stopWork was called when supervisor is in state ${this.state.valueOf()}.`, 'Please either wait and try again or restart worker.');\n return;\n }\n else if (this.state.is('initial'))\n {\n this.warning('Cannot call stopWork before worker has started. Please either wait and try again or restart worker.');\n return;\n }\n this.state.set(['ready', 'paused', 'broken'], 'stopping');\n\n this.dcp4.instantiateAllConnections();\n\n if (forceTerminate)\n return this.abort();\n else\n {\n const slicesToReturn = [];\n for (const jm of this.jobManagerInventory)\n slicesToReturn.push(...jm.queuedSlices);\n\n const reason = `stopWork returning all non-finished slices that are not working`;\n this.returnSlices(slicesToReturn, reason);\n\n for (let k = 0; k < 3; k++)\n {\n await new Promise((resolve) => {\n // Count the slices that have been working or close-to-working but haven't submitted results yet.\n let activeSliceCount = 0;\n for (const jm of this.jobManagerInventory)\n activeSliceCount += jm.activeSlices.length;\n // When no active slices we're done.\n if (activeSliceCount === 0)\n resolve();\n // When no work can be completed we return all slices and leave.\n if (doNotWaitForWork())\n {\n this.returnAllSlices();\n resolve();\n }\n selectiveDebug() && console.debug(`StopWork: waiting for ${activeSliceCount} working slices to finish`, k);\n // Resolve and finish stopWork once all sandboxes have finished submitting their results.\n this.worker.on('result', () => {\n selectiveDebug() && console.debug(`StopWork: result handler, activeSliceCount ${activeSliceCount-1}`);\n if (--activeSliceCount === 0)\n {\n this.warning('All sandboxes empty, stopping worker and closing all connections');\n resolve();\n }\n });\n this.on('evalDown', () => {\n this.warning('Evaluator is down.', 'Force return all slices to scheduler, stopping worker and closing all connections.');\n this.returnAllSlices();\n resolve();\n });\n });\n }\n\n for (const jm of this.jobManagerInventory)\n this.safeEmit(jm.jobHandle, 'flush');\n\n if (selectiveDebug())\n {\n console.debug(`stopWork(${this.state.valueOf()}): After waiting for working slices to finish: workingSbxes: ${this.workingSandboxCount}, totalSbxes: ${this.sandboxInventory.length}, jobs: ${this.jobManagerInventory.length}`);\n this.jobManagerInventory.forEach((jm) => {\n console.debug('stopWork job', jm.identifier);\n console.debug(jm.countSliceStr('stopWork'));\n });\n }\n }\n\n return this.postStopShutdown();\n }\n\n /**\n * Purge all traces of the job.\n * @param {JobManager} jobManager\n */\n purgeJob (jobManager)\n {\n selectiveDebug() && console.debug(`Supervisor.purgeJob ${jobManager.identifier}.`);\n // If the slice from a job never completes and the job address exists in the ringBufferofJobs,\n // then we remove it to allow for another slice (from the same job) to be obtained by fetchTask\n this.ringBufferofJobs.buf = this.ringBufferofJobs.filter(element => element !== jobManager.address);\n this.jobManagerInventory.delete(jobManager);\n this.moduleCache.removeJob(jobManager.address);\n this.dbg.cleanUpDeadJob(jobManager.address);\n jobManager.destroy();\n }\n\n // _Idx\n //\n // roundRobinSlices, makeJobSelectionCursor, handleSandboxWorkError, handleFailedSlice\n //\n\n /**\n * Round-robin through the job managers, picking 1 slice to run each time.\n * Try to have the same number of working sandboxes for each job.\n * Try to run a slice on every available sandbox.\n *\n * The basic idea behind the scheduling of slices in this implementation is to keep as\n * many slices from different jobs running as possible, so as to reduce the likelihood\n * of resource contention between sandboxes.\n *\n * Slices are scheduled based on the following ruleset:\n * 1) cursor = makeJobSelectionCursor(), then cursor.next() returns a slice chosen as follows.\n * 2) Let concurrency range from 1 to maxWorkingCores.\n * 3) For a given concurrency, let readyJobs be all jobs such that jobMan.workingSliceDensity < concurrency.\n * 4) Do an ascending sort of readyJobs wrt jobMan.emaTotalTime.\n * 5) Pick a slice from the longest job in readyJobs that doesn't have any executing slices.\n * 6) Alternately shift a slice from readySlices vs choose a slice from a random nearly finished job, and remove slice from readySlices.\n * 7) When there are no more almost finished jobs with slices, shift slices from readyJobs.\n * 8) Jobs which have slicePriority set by the task-distributor may have slices chosen ahead of the above algorithm.\n * 9) Jobs with a slicePriority closer to 1 are more likely to be chosen.\n * 10) After finishing concurrency at maxWorkingCores, cursor.next() returns null, so create a new cursor.\n * @returns {Promise<any>}\n */\n roundRobinSlices ()\n {\n //\n // Should we try to put all runSlice promises in an array and return Promise.all(runslice-promises) ?\n //\n try\n {\n /**\n * The amount of space available for the CPU-component of slices to run in sandboxes.\n * If space is 2.5 and there are 6 slices with density 0.4, and there are enough non-working usable\n * sandboxes, then all 6 slices will be scheduled to run.\n * @type {number}\n */\n const unusedCoreSpace = this.unusedCoreSpace;\n /**\n * The number of sandboxes not currently being used.\n * @type {number}\n */\n const unusedSandboxCount = this.unusedSandboxCount;\n /**\n * The amount of space available for the GPU-component of slices to run in sandboxes.\n * @type {number}\n */\n const unusedGPUSpace = this.unusedGPUSpace;\n if (unusedCoreSpace < common.doNotSchedule || this.roundRobinBarrier || unusedSandboxCount < 1)\n {\n selectiveDebug2() && console.debug('RRS: bail early space/barrier/unusedSlots', unusedCoreSpace, this.roundRobinBarrier, unusedSandboxCount);\n return;\n }\n // roundRobinBarrier is a barrier for the slice execution path.\n this.roundRobinBarrier = true;\n if (this.evaluator.down && this.evaluator.createSandboxRefCount > 0)\n return;\n selectiveDebug2() && console.debug('BarrierState:RRS:', this.fetchTaskBarrier, this.roundRobinBarrier);\n\n if (selectiveDebug2())\n {\n this.jobManagerInventory.forEach((jobMan) => {\n const { unassigned, ready, reserved, working, workdone, complete, failed, finished } = jobMan.dumpSlices ('RRS', false, false);\n console.debug(`RRS(${jobMan.identifier},${unusedSandboxCount},${unusedCoreSpace}): u/r/rsv/w/wd/c/f/fsh ${unassigned}/${ready}/${reserved}/${working}/${workdone}/${complete}/${failed}/${finished}`, jobMan.identifier, this.sandboxInventory.length);\n });\n }\n\n if ( false || selectiveDebug())\n {\n let totalReady = 0, totalReadyDensity = 0;\n for (const jobMan of this.jobManagerInventory)\n {\n const currentReady = jobMan.readySlices.length;\n const currentReadyDensity = jobMan.readySlices.length * jobMan.estimateDensity;\n totalReady += currentReady;\n totalReadyDensity += currentReadyDensity;\n console.debug(`RRS: job ${jobMan.identifier}, density ${jobMan.estimateDensity}, readySlices ${currentReady}, readyDensity ${currentReadyDensity}`);\n }\n console.debug(`RRS: space ${unusedCoreSpace}, unusedSandboxCount ${unusedSandboxCount}, totalReady ${totalReady}, totalReadyDensity ${totalReadyDensity}`);\n }\n\n /** @type {Slice[]} */\n const slices = [];\n /** @type {number} */\n let density = 0;\n /** @type {number} */\n let gpuDensity = 0;\n\n const isSpaceAvailable = (density) => {\n const result = density < unusedCoreSpace && slices.length < unusedSandboxCount;\n selectiveDebug2() && console.debug('RRS: isSpaceAvailable', density < unusedCoreSpace, slices.length < unusedSandboxCount);\n return result;\n }\n\n // When the cursor is almost done and RRS tries to schedule slices,\n // it makes sense to recreate the cursor once to ensure enough slices can be pulled from cursor.\n let recreateCursorCount = 0;\n\n while (isSpaceAvailable(density + common.schedulingSlop))\n {\n // Get existing cursor or create new one.\n if (!this.cursor)\n this.cursor = this.makeJobSelectionCursor();\n\n // Get the next slice, then check to see whether it can be used.\n const slice = this.cursor.next();\n if (!slice)\n {\n if (/*!okToSchedule ||*/ ++recreateCursorCount > 1)\n {\n this.cursor = null;\n break;\n }\n // Start a new cursor.\n this.cursor = this.makeJobSelectionCursor();\n continue;\n }\n let okToSchedule = true\n const job = slice.jobManager;\n density += job.estimateDensity;\n if (job.useGPU)\n {\n okToSchedule = canScheduleGPU(this.maxWorkingGPUs);\n if (okToSchedule)\n {\n gpuDensity += job.estimateGPUDensity;\n okToSchedule = (gpuDensity <= unusedGPUSpace);\n selectiveDebug2() && console.debug(`RRS: GPU scheduling(${okToSchedule},${this.workingSliceCount},${density.toFixed(7)},${unusedCoreSpace.toFixed(7)}): gpuDensity/gpuSpace ${gpuDensity.toFixed(7)}/${unusedGPUSpace.toFixed(7)}, jobGPUDensity/jobCPUDensity ${job.estimateGPUDensity.toFixed(7)}/${job.estimateDensity.toFixed(7)}`);\n }\n }\n if (okToSchedule && density <= unusedCoreSpace + common.schedulingSlop) // Ok, if it's only over by a little bit.\n slices.push(slice);\n else\n {\n slice.unReserve();\n density -= job.estimateDensity;\n if (job.useGPU)\n gpuDensity -= job.estimateGPUDensity;\n else\n this.cursor.push(slice); // If useGPU, then skip pulling a slice from job\n break;\n }\n selectiveDebug2() && console.debug('RRS: density/space/numSlices/unusedSlots/jobDensity', density, unusedCoreSpace, slices.length, unusedSandboxCount, job.estimateDensity);\n }\n\n selectiveSupEx() && density > 0 && console.debug(`roundRobinSlices(${this.workingSliceCount},${this.workingSliceDensity}): Found density ${density.toFixed(7)}/${unusedCoreSpace} with ${slices.length} slices:`, slices.map((slice) => slice.identifier), this.jobManagerInventory.map((jm) => `${jm.identifier}:${jm.estimateDensity.toFixed(7)}:${jm.emaSliceTime.toFixed(0)}`));\n\n // Execute the slices.\n if (slices.length > 0)\n {\n const lastSlice = slices.pop();\n for (const slice of slices)\n slice.jobManager.runSlice(slice);\n return lastSlice.jobManager.runSlice(lastSlice);\n }\n }\n finally\n {\n this.roundRobinBarrier = false;\n }\n }\n\n /**\n * @private\n * @callback cbNext\n * @returns {Slice}\n */\n /**\n * @private\n * @callback cbPush\n * @param {Slice} slice\n */\n\n /**\n * Factory function which instantiates a JobSelectionCursor. A JobSelectionCursor\n * steps the order that job slices should be selected for execution in the supervisor,\n * given the current state of the supervisor and the availability of jobs when the\n * inventory was snapshot. The entire slice scheduling algorithm is represented by\n * this cursor.\n *\n * The basic idea behind the scheduling of slices in this implementation is to keep as\n * many slices from different jobs running as possible, so as to reduce the likelihood\n * of resource contention between sandboxes.\n *\n * Slices are scheduled based on the following ruleset:\n * 1) cursor = makeJobSelectionCursor(), then cursor.next() returns a slice chosen as follows.\n * 2) Let concurrency range from 1 to maxWorkingCores.\n * 3) For a given concurrency, let readyJobs be all jobs such that jobMan.workingSliceDensity < concurrency.\n * 4) Do an ascending sort of readyJobs wrt jobMan.emaTotalTime.\n * 5) Pick a slice from the longest job in readyJobs that doesn't have any executing slices.\n * 6) Alternately shift a slice from readySlices vs choose a slice from a random nearly finished job, and remove slice from readySlices.\n * 7) When there are no more almost finished jobs with slices, shift slices from readyJobs.\n * 8) Jobs which have slicePriority set by the task-distributor may have slices chosen ahead of the above algorithm.\n * 9) Jobs with a slicePriority closer to 1 are more likely to be chosen.\n * 10) After finishing concurrency at maxWorkingCores, cursor.next() returns null, so create a new cursor.\n *\n * A custom selection of jobs can be passed in via the argument jobManagers.\n *\n * @param {JobManager[]} [jobManagers]\n * @returns {{ next: cbNext, push: cbPush }}\n */\n makeJobSelectionCursor (jobManagers)\n {\n /* Variables in this scope function as state information for next() */\n /** @type {JobManager[]} */\n var candidateJobs; // The jobs available with slices ready to execute.\n /** @type {JobManager[]} */\n var readyJobs; // The jobs from which slices are selected for a given concurrency level.\n /** @type {JobManager[]} */\n var preferedJobs = []; // Those jobs in readyJobs with a slicePreference property.\n /** @type {JobManager[]} */\n var lowDensityJobs = []; // Jobs with density <= 0.6, will be scheduled again.\n /** @type {Slice[]} */\n var pendingSlices = [];\n /**\n * Upper bound of the sum of the working slices densities allowed for a given job.\n * type {number}\n **/\n var concurrency = 0;\n /** type {number} */\n var jobIdx = 0;\n /** @type {boolean} */\n var lowDensityPass = false;\n\n const that = this;\n if (!jobManagers)\n jobManagers = this.jobManagerInventory;\n\n const jobStateStr = (jobs) => {\n return jobs.map((jm) => `${jm.identifier} : ${jm.readySlices.length} : ${jm.workingSliceDensity} : ${Math.round(jm.emaTotalTime)}`);\n }\n const jobState = (hdr, jobs) => { console.debug(hdr, jobStateStr(jobs)); }\n\n /**\n * Populate readyJobs with jobs which are ready and have at least one slice which is ready,\n * and whose # of working slice density is less than concurrency. A reserved slice has a\n * finite lifetime and if exceeded, transition it back to ready.\n * @param {JobManager[]} jobManagers\n * @param {number} concurrency\n */\n function filterJobsAndCheckOldReservedSlices (jobManagers, concurrency) // eslint-disable-line no-shadow\n {\n candidateJobs = [], readyJobs = [];\n const fiveMinutesAgo = Date.now() - that.options.reservedSliceLifetime;\n for (const jobMan of jobManagers)\n {\n if (!jobMan.ready) continue;\n let readyCount = 0;\n for (const slice of jobMan.sliceInventory)\n {\n if (slice.isReady) readyCount++;\n else if (slice.isReserved && fiveMinutesAgo > slice.startTime)\n {\n slice.unReserve();\n readyCount++;\n }\n }\n if (readyCount > 0)\n {\n candidateJobs.push(jobMan);\n if (jobMan.workingSliceDensity < concurrency) readyJobs.push(jobMan);\n }\n }\n }\n\n function seed (concurrency) // eslint-disable-line no-shadow\n {\n /* Reset. */\n jobIdx = 0;\n\n /* Populate readyJobs with jobs which are ready and have at least one slice which is ready,\n and whose # of working slice density is less than concurrency. */\n filterJobsAndCheckOldReservedSlices(jobManagers, concurrency);\n // candidateJobs = jobManagers.filter((jobMan) => jobMan.readySlices.length > 0);\n // readyJobs = candidateJobs.filter((jobMan) => jobMan.workingSliceDensity < concurrency);\n\n if (!lowDensityPass && lowDensityJobs.length === 0)\n lowDensityJobs = jobManagers.filter((jm) => jm.estimateDensity > 0 && jm.estimateDensity <= 0.6);\n\n if (readyJobs.length > 1)\n {\n /* Asc sort by shortest average slice completion time. */\n const shortestSliceJobs = readyJobs.sort((a, b) => Math.round(a.emaTotalTime) - Math.round(b.emaTotalTime));\n const almostDoneIndices = shortestSliceJobs.filter((jm) => jm.almostDone).map((_, idx) => idx);\n readyJobs = [];\n\n /* Find longest job that isn't working. */\n for (let k = shortestSliceJobs.length - 1; k >= 0; k--)\n {\n const jobMan = shortestSliceJobs[k];\n if (jobMan.isNotWorking)\n {\n readyJobs.push(jobMan);\n shortestSliceJobs.splice(k, 1);\n break;\n }\n }\n\n /* Alternate the next shortest slice with a random almost done job. */\n if (almostDoneIndices.length > 0)\n {\n while (shortestSliceJobs.length > 0)\n {\n readyJobs.push(shortestSliceJobs.shift());\n if (almostDoneIndices.length < 1)\n break;\n else\n {\n const almostDoneIdx = almostDoneIndices[Math.floor(Math.random() * almostDoneIndices.length)];\n readyJobs.push(shortestSliceJobs[almostDoneIdx]);\n shortestSliceJobs.splice(almostDoneIdx, 1);\n }\n }\n }\n if (shortestSliceJobs.length > 0)\n readyJobs.push(...shortestSliceJobs);\n }\n /* Populate preferedJobs with jobs from readyJobs which also have a slicePreference set. */\n preferedJobs = candidateJobs.filter((jm) => jm.hasOwnProperty('slicePreference'));\n selectiveDebug2() && jobState(`makeJobSelectionCursor:seed(${concurrency}): readyJobs:`, readyJobs);\n }\n\n /**\n * Each invocation of next() identifies one slice to run, or returns false if none can run.\n * @returns {Slice}\n */\n function next ()\n {\n if (pendingSlices.length > 0)\n {\n const slice = pendingSlices.pop();\n return slice.markAsReserved();\n }\n\n if (concurrency === 0)\n seed(++concurrency);\n\n selectiveDebug2() && console.debug(`makeJobSelectionCursor(cc/idx/ready/working):next(${concurrency},${jobIdx},${readyJobs.length},${that.workingSliceDensity}): maxWorkingCores ${that.maxWorkingCores}: begin`);\n while (true)\n {\n if (jobIdx >= readyJobs.length)\n {\n if (++concurrency > that.maxWorkingCores)\n break;\n seed(concurrency);\n }\n\n if (readyJobs.length < 1)\n {\n if (candidateJobs.length < 1)\n break;\n continue; /* No ready jobs at current concurrency level. */\n }\n\n selectiveDebug2() && console.debug(`makeJobSelectionCursor:next(${concurrency},${jobIdx},${readyJobs.length},${that.workingSliceDensity}): before loop`);\n\n /* Schedule a prefered job slice based on random chance. */\n if (preferedJobs.length > 0)\n {\n let prioRan = Math.random();\n let list = preferedJobs.filter((jm) => jm['slicePreference'] >= prioRan);\n\n if (list.length > 0)\n {\n const jobMan = list[list.length * Math.random()];\n const slice = jobMan.reserveOneSlice();\n if (slice)\n return slice;\n }\n }\n\n /* Schedule a slice from next job; jobs are in increasing order of estimated run time. */\n while (jobIdx < readyJobs.length)\n {\n const jobMan = readyJobs[jobIdx];\n const slice = jobMan.reserveOneSlice();\n if ( false || selectiveDebug2())\n {\n slice && console.debug(`makeJobSelectionCursor:next(${concurrency},${lowDensityPass},${jobIdx},${readyJobs.length},${that.workingSliceDensity}): found slice(slice:ready:working)`, `${slice.identifier} : ${slice.jobManager.readySlices.length} : ${slice.jobManager.workingSliceDensity}`);\n !slice && console.debug(`makeJobSelectionCursor:next(${concurrency},${lowDensityPass},${jobIdx},${readyJobs.length},${that.workingSliceDensity}): no slices ready for job`, jobMan.identifier);\n }\n jobIdx++;\n if (slice)\n return slice;\n }\n\n /*\n * We did not schedule a slice with current seed. We need to re-seed to look for newly-available work\n * and sandboxes, ratcheting up the concurrency (max # of each job running) until we find something.\n */\n selectiveDebug2() && console.debug(`makeJobSelectionCursor:next(${concurrency},${jobIdx},${readyJobs.length},${that.workingSliceDensity}): after loop`);\n }\n if (!lowDensityPass && lowDensityJobs.length > 0)\n {\n jobManagers = lowDensityJobs;\n concurrency = 0;\n lowDensityPass = true;\n return next();\n }\n selectiveDebug2() && console.debug(`makeJobSelectionCursor:next(${concurrency},${jobIdx},${readyJobs.length},${that.workingSliceDensity}): null`, lowDensityPass);\n return null; /* Did not find any more work that fits. */\n }\n function push (slice) { pendingSlices.push(slice); }\n\n return { next, push };\n }\n\n\n /**\n * Handle sandbox.work(...) errors.\n * @todo The orginal code from 2019 did not terminate sandbox when not SandboxError and Sandbox code didn't already terminate. Do we want to try that?\n * The old 2019 sandbox code terminated upon error in start, assign, resetState, describe, applyRequirements and work.\n * So maybe that 2019 terminate yoga was a bunch of hooie.\n * @param {Sandbox} sandbox\n * @param {Slice} slice\n * @param {Error} error\n * @returns {string}\n */\n handleSandboxWorkError (sandbox, slice, error)\n {\n if (debugBuild && !(slice.isWorking || slice.isWorkDone)) // Sanity. Exception should never fire.\n throw new Error(`handleSandboxWorkError: slice ${slice.identifier} must be WORKING.`);\n\n /** @type {boolean} */\n const isSandboxError = error instanceof SandboxError;\n /** @type {string} */\n let reason;\n const jobAddress = common.truncateAddress(slice.jobAddress);\n\n if (isSandboxError)\n reason = error['errorCode']\n else\n {\n // This error was unrelated to the work being done.\n reason = 'Slice has failed to complete execution';\n if (!error)\n error = new Error(`Slice ${slice.sliceNumber} in state ${slice.state} of job ${jobAddress} failed to complete execution`);\n }\n selectiveDebug() && console.debug('handleSandboxWorkError', slice.identifier, error);\n\n let errorString, onlyDisplayErrorString = true;\n if (error.name === 'EWORKREJECT')\n {\n reason = 'EWORKREJECT'; // The status.js processing does not have a case for 'EWORKREJECT'.\n errorString = !slice.hasBeenRejected\n ? `Slice rejected work: ${error.message}.`\n : `Slice rejected work twice; terminate job: ${error.message}.`\n error.stack = 'Sandbox was terminated by work.reject()';\n this.handleWorkReject(slice, error);\n }\n else\n {\n if (!this.evaluator.down)\n {\n /** Do we to be more selective when we retry a slice? */\n if (/*!isSandboxError ||*/ slice['useRetryLogic'])\n {\n slice['sandboxErrorCount'] = ( slice['sandboxErrorCount'] ?? 0) + 1;\n if (slice['sandboxErrorCount'] <= this.options.maxSliceRetries)\n slice.resetState(); // Try to reuse the slice.\n }\n }\n if (!slice.isReady)\n {\n selectiveDebug() && console.debug(`handleSandboxWorkError: returning slice ${slice.identifier}`);\n this.returnSlice(slice, reason)\n .finally (() => {\n this.handleFailedSlice(slice, error)\n });\n }\n\n switch (reason)\n {\n case 'ENOPROGRESS':\n errorString = 'No progress error in sandbox.';\n break;\n case 'ESLICETOOSLOW':\n errorString = 'Slice too slow error in sandbox.';\n break;\n case 'EPERM_ORIGIN':\n errorString = `Could not fetch data; origin not allowed: ${error.message}.`;\n break;\n case 'EFETCH':\n errorString = `Could not fetch data: ${error.message}.`;\n break;\n case 'EUNCAUGHT':\n onlyDisplayErrorString = false;\n errorString = `Uncaught error in sandbox: ${error.message}.`;\n break;\n default:\n onlyDisplayErrorString = false;\n errorString = `Slice failed in sandbox: ${error.message}.`;\n break;\n }\n }\n\n // Always terminate sandbox.\n this.returnSandbox(sandbox);\n\n // Always display max info under debug builds, otherwise maximal error.\n // messages are displayed to the worker, only if both worker and client agree.\n const displayMaxInfo = slice.jobManager.displayMaxDiagInfo;\n\n const errorObject = {\n jobAddress,\n sliceNumber: slice.sliceNumber,\n sandbox: sandbox.id,\n jobName: sandbox.public ? sandbox.public.name : 'unnamed',\n };\n\n if (!displayMaxInfo && onlyDisplayErrorString)\n this.error(errorString, '', '', true);\n else\n {\n Object.entries(errorObject).forEach(([k,v]) => (errorString += `\\n ${k}: ${v}`));\n this.error(errorString, error, '', true);\n }\n\n return reason;\n }\n\n /**\n * Slice has thrown error during execution:\n * Mark slice as failed, compensate when job is dicrete, emit events.\n * @param {Slice} slice\n * @param {Error} error\n */\n handleFailedSlice (slice, error)\n {\n debugging('supervisor') && console.debug(`handleFailedSlice: ${slice.identifier}`, error);\n slice.collectResult(error, false /*success*/);\n\n // If the slice from a job never completes and the job address exists in the ringBufferofJobs,\n // then we remove it to allow for another slice (from the same job) to be obtained by fetchTask\n this.ringBufferofJobs.buf = this.ringBufferofJobs.filter(element => element !== slice.jobAddress);\n\n this.workerEmit( 'result', error);\n this.jobEmit(slice, 'result', error);\n }\n\n // _Idx\n //\n // returnSlices, returnSlice, emitProgressReport\n //\n\n /**\n * Bulk-return multiple slices, possibly for assorted jobs.\n * Returns slices to the scheduler to be redistributed.\n * Called in the sandbox terminate handler and purgeAllWork(jobAddress)\n * and stopWork(forceTerminate).\n *\n * @param {Slice[]} slices - The slice candidates to check if they can be returned to the scheduler.\n * @param {string} reason - Reason for the return: 'ENOPROGRESS', 'EUNCAUGHT', 'ESLICETOOSLOW', 'EPERM_ORIGIN', 'EFETCH', 'unknown'.\n * @returns {Promise<*>} - Response from the scheduler.\n */\n returnSlices (slices, reason)\n {\n /** @param {Slice[]} verifiedSlices */\n const compressPayload = (verifiedSlices) => {\n assert(verifiedSlices?.length > 0);\n if (verifiedSlices.length > 1)\n return {\n worker: this.workerId,\n slices: common.constructReturnSliceBuckets(verifiedSlices, reason),\n };\n return verifiedSlices[0].getReturnMessagePayload(this.workerId, reason);\n }\n\n if (!slices || !slices.length)\n return Promise.resolve();\n\n debugging('supervisor') && console.debug(`Supervisor.returnSlices(${this.state}): Returning slices`, slices.map(slice => slice.identifier));\n\n // Only return those slices which still exist in their respective jobManagers sliceInventory .\n const verifiedSlices = slices.filter((slice) => slice.jobManager.removeSlice(slice));\n if (verifiedSlices.length > 0)\n {\n selectiveSupEx() && console.debug('Supervisor.returnSlices: Returning slices', verifiedSlices.map(slice => slice.identifier));\n return this.dcp4.sliceReturn(compressPayload(verifiedSlices), slices, reason);\n }\n return Promise.resolve();\n }\n\n /**\n * Takes a slice and returns it to the scheduler to be redistributed.\n * Usually called when an exception is thrown by sandbox.work(...) .\n * Or when the supervisor tells it to forcibly stop working.\n *\n * @param {Slice} slice - The slice to return to the scheduler.\n * @param {string} reason - Reason for the return: ''ENOPROGRESS', 'EUNCAUGHT', 'ESLICETOOSLOW', 'EPERM_ORIGIN', 'EFETCH', 'unknown'.\n * @returns {Promise<*>} - Response from the scheduler.\n */\n returnSlice (slice, reason) { return this.returnSlices([ slice ], reason); }\n\n /**\n * Send beacon to status.js for 'progress' and sliceStatus.scheduled.\n *\n * Run in an interval created in the ctor.\n * @returns {void|Promise<*>}\n */\n emitProgressReport ()\n {\n const readySlices = [], workingSlices = [];\n this.jobManagerInventory.forEach((jobManager) => {\n readySlices.push(...jobManager.readySlices);\n workingSlices.push(...jobManager.workingSlices);\n });\n /** @type {SliceObj[]} */\n const slices = common.constructSliceBuckets( readySlices, sliceStatus.scheduled );\n common.constructSliceBuckets( workingSlices, 'progress', slices );\n\n debugging('supervisor') && console.debug('emitProgressReport:', stringify(slices));\n\n if (slices.length > 0)\n {\n const payload = { worker: this.workerId, slices };\n return this.dcp4.safeRSStatus(payload, 'Failed to emit progress report');\n }\n }\n\n // _Idx\n //\n // jobQuanta, repoMan, predictLoad(viz., clairvoyance), waitUntilWorkIsReady, generateWorkerComputeGroups\n //\n\n /**\n * For a given job, the scheduler stores an EMA approximation of slice completion time.\n * However, each worker also tracks the same information and the ratio of local-info to\n * scheduler-info (viz., global-info) is returned by this.jobQuanta so we can tell the\n * task distributor how much work to return from fetchTask so that the work actually takes\n * 5 minutes to complete when using all the worker sandboxes.\n * @returns {Object<string, number>}\n */\n jobQuanta ()\n {\n //\n // Prevent wild swings of this.defaultQuanta, which is roughly the ratio of\n // local_worker_slice_time / jobPerfData_measured_slice_time.\n // We limit this ratio to be between 1/8th and 8.\n const minQuanta = 0.125, maxQuanta = 8.0;\n //\n // Because there will be slgiht differences between local_worker_slice_time and\n // jobPerfData_measured_slice_time even when the worker is the only worker hooked\n // up to the DCP scheduler, we prove a little rounding. The rounding is 1/32 buckets.\n const discreteIncrement = 0.03125, discreteIncrementInverse = 32;\n\n /** @type {Object<string, number>} */\n const quanta = { 0: 1 };\n let averageLocalTime = 0, averageGlobalTime = 0;\n for (const jobMan of this.jobManagerInventory)\n {\n if (jobMan.emaSliceTime > 0 && jobMan.globalTime > 0)\n {\n quanta[jobMan.address] = jobMan.emaSliceTime;\n /** XXXpfr @todo Should we be using TotalTime here? */\n averageLocalTime += jobMan.emaSliceTime;\n averageGlobalTime += jobMan.globalTime;\n selectiveDebug2() && console.debug('jobQuanta: job state', this.dbg.sliceSandboxStr, `l-density/g-density, ${jobMan.estimateDensity}/${jobMan.metrics?.sliceCPUDensity}`, `local/global, ${jobMan.emaSliceTime}/${jobMan.globalTime}`);\n }\n }\n\n if (averageLocalTime && averageGlobalTime)\n {\n /** @todo XXXpfr Add 1 stddev? */\n // alpha=0.1 gives an effective period of 19\n const alpha = 0.1;\n this.localTime = nextEma(this.localTime, averageLocalTime, alpha);\n this.globalTime = nextEma(this.globalTime, averageGlobalTime, alpha);\n this.defaultQuanta = this.localTime / this.globalTime;\n\n // Discretize by discreteIncrement increments.\n this.defaultQuanta = (this.defaultQuanta > 1\n ? Math.floor(discreteIncrementInverse * this.defaultQuanta)\n : Math.ceil(discreteIncrementInverse * this.defaultQuanta)) * discreteIncrement;\n\n // Enforce reasonable cap and floor to keep things from getting too crazy.\n this.defaultQuanta = Math.min(Math.max(this.defaultQuanta, minQuanta), maxQuanta);\n\n // Fake jobAddress '0' to represent unknown jobs.\n quanta['0'] = this.defaultQuanta;\n }\n else\n this.defaultQuanta = 1.0;\n\n selectiveDebug() && console.debug(`jobQuanta: defaultQuanta ${quanta['0']}, this.localTime ${this.localTime}/${averageLocalTime}, this.globalTime ${this.globalTime}/${averageGlobalTime}, quanta:`, quanta);\n if (common.debugQuanta())\n {\n console.debug('localRawData:', this.dbg.localRawData);\n console.debug('localData:', this.dbg.localData);\n console.debug('globalData:', this.dbg.globalData);\n }\n return quanta;\n }\n\n /**\n * @todo XXXpfr Should we not schedule long slices to a worker with too low defaultQuanta?\n *\n * When the estimated time to completion of all work is more than\n * repoManMultiplier * targetTaskDuration * this.maxWorkingCores,\n * return slices until the excess is removed.\n * Be fair. Round-robin over all jobs until excess is eliminated.\n * Kill the long jobs 1st.\n */\n repoMan()\n {\n const threshold = this.options.repoManMultiplier * this.options.targetTaskDuration * this.maxWorkingCores;\n const workRemaining = this.workRemaining;\n let excess = workRemaining - threshold;\n selectiveDebug() && console.debug(`repoMan: excess ${excess}, workerRemaining ${workRemaining}, threshold ${threshold}`);\n if (excess > 0)\n {\n const slices = [];\n /** @param {JobManager[]} jmi */\n const returnFrom = (jmi) => {\n while (true)\n {\n const _excess = excess;\n for (const jobMan of jmi)\n {\n const _readySlices = jobMan.readySlices;\n if (_readySlices.length > 0)\n {\n const slice = _readySlices[0];\n slice.repoMan(); // Mark as FINISHED\n slices.push(slice);\n excess -= jobMan.adjSliceTime;\n if (excess <= 0)\n break;\n }\n }\n if (_excess === excess || excess <= 0)\n break;\n }\n }\n // Be fair. Round-robin over all jobs until excess is eliminated.\n // Except the long jobs are killed 1st.\n const longJobs = this.jobManagerInventory.filter((jobMan) => jobMan.emaSliceTime >= this.options.targetTaskDuration);\n if (longJobs.length > 0)\n returnFrom(longJobs);\n if (excess > 0)\n returnFrom(this.jobManagerInventory);\n selectiveDebug() && (slices.length > 0) && console.debug(`repoMan: excess ${excess}, workerRemaining ${workRemaining}, threshold ${threshold}, returned-slice-count ${slices.length}`);\n this.returnSlices(slices, 'repoMan');\n }\n }\n\n /**\n * Predict the total reduction in density of working sandboxes timeSpanMs from now.\n * This function is called right before fetchTask, in order to calculate how much space is available.\n * @returns {{ queued: Slice[], working: number }}\n */\n predictLoad()\n {\n const timeSpanMs = this.options.prefetchInterval\n const queued = [];\n let working = 0;\n for (const jobMan of this.jobManagerInventory)\n {\n const { queued: jmQueued, working: jmWorking } = jobMan.predictLoad(timeSpanMs);\n queued.push(...jmQueued);\n // Optimize to short-circuit when queued > 1, because we won't call fetchWork in that case.\n if (queued.length > 1)\n break;\n working += jmWorking;\n }\n selectiveDebug() && console.debug(`Supervisor.predictLoad: queued ${queued.length}/${this.queuedSlices.length}, working ${working}/${this.workingSlices.length}`)\n return { queued, working };\n }\n\n /**\n * On the first call to fetchTask\n * or when the last call to fetchTask found nothing,\n * or when there are no ready slices,\n * wait until at least 1 job is ready with at least 1 ready slice.\n * @param {Array<Promise<any>>} jobManagerPromises\n * @returns {Promise<any>}\n */\n waitUntilWorkIsReady (jobManagerPromises)\n {\n if (this.waitForWork)\n {\n debugging('supervisor') && console.debug(`waitUntilWorkIsReady: promise count ${jobManagerPromises?.length}`);\n this.waitForWork = false;\n // Promise.any is supported in Node 15, Chrome 85, Edge 85, Firefox 79, Safari 14, Opera 71.\n // It was implemented in node and browsers in 2nd half of 2020, so there's a good chance many\n // customers will not have browsers that support it. And currently (Jan. 2023) DCP uses node 14.\n return Promise_any(jobManagerPromises);\n }\n // Flush microtask queue\n return a$sleepMs(0);\n }\n\n /**\n * Generate the workerComputeGroups property of the requestTask message.\n *\n * Concatenate the compute groups object from dcpConfig with the list of compute groups\n * from the supervisor, and remove the public group if accidentally present. Finally,\n * we transform joinSecrets/joinHash into joinHashHash for secure transmission.\n *\n * @note computeGroup objects with joinSecrets are mutated to record their hashes. This\n * affects the supervisor options and dcpConfig. Re-adding a joinSecret property\n * to one of these will cause the hash to be recomputed.\n */\n generateWorkerComputeGroups ()\n {\n return supShared.generateWorkerComputeGroups(this, this.dcp4.taskDistributor);\n }\n\n // _Idx\n //\n // availableSandboxSpace, fetchTask, addTaskToWorkload, fetchFromTD, clearUnusedJobManagersAndModuleCache\n //\n\n /**\n * Returns the number of unused sandbox slots to fill -- sent to fetchTask.\n * @param {Slice[]} queued\n * @param {number} working\n * @returns {number}\n */\n availableSandboxSpace (queued, working)\n {\n // If we find more than 1 queued slices, bail early.\n if (queued.length > 1)\n return 0; // We have more than 1 ready slices, no need to fetch.\n\n let longSliceCount = 0;\n if (queued.length < 1)\n this.waitForWork = true; // There are no ready slices.\n else if (queued[0].isLong)\n longSliceCount = 1;\n\n // There are almost no ready slices (there may be 0 or 1), fetch a full task.\n // The task is full, in the sense that it will contain slices whose\n // aggregate execution time is roughly this.maxWorkingCores * 5-minutes.\n // However, there can only be this.maxWorkingCores # of long slices on a worker,\n // Thus we need to know whether the last slice in this.readySlices() is long or not.\n // (A long slice has estimated execution time >= 5-minutes or is an estimation slice.)\n\n const numCores = this.maxWorkingCores - working - longSliceCount;\n selectiveDebug2() && console.debug('availableSandboxSpace', numCores, working, longSliceCount);\n return numCores;\n }\n\n /**\n * Ask the scheduler (task distributor) for work (Rq).\n * @param {object[]} [jobs=[]]\n * @returns {Promise<*>}\n */\n async fetchTask (jobs = [])\n {\n if (!this.isReady)\n return;\n\n const now = Date.now();\n const { queued, working } = this.predictLoad();\n const unusedFutureCoreSpace = this.maxWorkingCores - working;\n if (unusedFutureCoreSpace < common.doNotSchedule)\n {\n debugging('supervisor') && console.debug('fetchTask: There are no unused sandbox slots.', now - this.lastTime);\n return;\n }\n \n // Record fetch start time.\n this.fetchTaskStarted = now;\n\n // We check for pruning about every 25 seconds, or when must prune level is reached.\n if (this.sandboxInventory.length > this.options.mustPruneSandboxLevel\n || now > this.lastPrune + this.options.pruneFrequency)\n {\n this.lastPrune = now;\n this.pruneSandboxes();\n }\n\n // Every 60 seconds check to see if the estimated time to completion of all work is more than\n // repoManMultiplier * this.targetTaskDuration() * this.maxWorkingCores,\n // and then return slices until the excess is removed.\n // Be fair. Round-robin over all jobs until excess is eliminated. Kill the long jobs 1st.\n if (now > this.lastRepoMan + this.options.repoManFrequency)\n {\n this.lastRepoMan = now;\n this.repoMan();\n }\n\n // There are 2 barriers wrt fetchTask,\n // 1) fetchTaskBarrier is a barrier for the task fetching from task distributor path.\n // 2) roundRobinBarrier is a barrier for the slice execution path.\n\n try\n {\n const cpuSpaceToFill = this.availableSandboxSpace(queued, working);\n selectiveDebug2() && console.debug('Supervisor.fetchTask', cpuSpaceToFill, queued.length, working);\n if (cpuSpaceToFill < 1)\n {\n debugging('supervisor') && console.debug('Supervisor.fetchTask: Sufficient slices exist, so start executing.', now - this.lastTime, cpuSpaceToFill, queued.length, working);\n return this.roundRobinSlices();\n }\n selectiveDebug2() && console.debug('fetchTask begin q/w/slots/space/future-space', queued.length, working, this.unusedSandboxCount, this.unusedCoreSpace, unusedFutureCoreSpace);\n\n if (this.fetchTaskBarrier)\n return;\n // fetchTaskBarrier is a barrier for the task fetching from task distributor path.\n this.fetchTaskBarrier = true;\n\n /* @todo XXXpfr Think about how to do targetLoad.longSlices better.\n * Ideas:\n * 1) While branchy Javascript is CPU bound, that doesn't mean hyperthreading isn't useful.\n * When a branch is mispredicted the whole CPU instruction pipeline is flushed, which is\n * a huge perf hit and while waiting to fill the pipeline again, a hyperthread can get\n * a whole bunch of work done.\n * 2) Setting the Sup2.cores.cpu to #lCores is probably too much, but I've had great success with\n * Sup2.cores.cpu = dcpConfig.supervisor.tuning.coreRatio.cpu * #lCores\n * Which is very close to optimal throughput of work done.\n * 3) When the scheduler is 1/2 very long slices, the short slices will tend to get starved.\n * The config property dcpConfig.scheduler.preventSliceStarvation when set to true (default false)\n * will always leave one vCore open for short slices in every worker. In the future, I want the\n * scheduler to detect short slice starvation and dynamical turn preventSliceStarvation on until\n * short slice starvation is alleviated and then turn it back off.\n * 4) maxSandboxes is currently set to\n * factor * Sup2.cores.cpu\n * where 1.2 <= factor <= 1.5 depending upon how many lCores a worker has. Where I assume that a\n * machine with a large number of lCores has sufficient memory to handle a bigger factor.\n * The factor boundaries can be adjusted in dcpConfig.supervisor, but I intend to also allow them\n * to be overridden at the dcpConfig.worker level, so if somebody has a 32 lCore machine with\n * 8GB of RAM they can adjust factor to be closer to 1. Ideally we could adjust the factor\n * boundaries at the job/CG level.\n */\n\n const request = {\n supervisor: this.version,\n numCores: cpuSpaceToFill, /** @deprecated This is for legacy schedulers. */\n numGPUs: this.maxWorkingGPUs, /** @deprecated This is for legacy schedulers. */\n targetLoad: { cpu: cpuSpaceToFill, gpu: this.maxWorkingGPUs, longSlices: Math.floor(cpuSpaceToFill) },\n coreStats: this.options.getStatisticsCPU(),\n jobQuanta: this.jobQuanta(),\n capabilities: this.capabilities,\n paymentAddress: this.options.paymentAddress,\n jobAddresses: jobs.concat(this.options.jobAddresses || []), // When set, only fetches slices for these jobs.\n workerComputeGroups: this.generateWorkerComputeGroups(),\n minimumWage: this.options.minimumWage,\n loadedJobs: this.jobManagerInventory.map(jobMan => jobMan.address),\n readyJobs: this.jobManagerInventory.filter(jobMan => jobMan.ready).map(jobMan => jobMan.address),\n previouslyWorkedJobs: this.ringBufferofJobs.buf, // Only discrete jobs.\n rejectedJobs: this.rejectedJobs,\n };\n // Workers should be part of the public compute group by default.\n if (!booley(this.options.leavePublicGroup))\n request.workerComputeGroups.push(constants.computeGroups.public);\n\n debugging('supervisor') && console.debug('fetchTask is calling fetchFromTD', Date.now() - this.lastTime);\n\n // Call Task Distributor and handle response with this.addTaskToWorkload.\n return this.fetchFromTD(request, (response) => this.addTaskToWorkload(request, response));\n }\n catch (error)\n {\n this.fetchTaskBarrier = false;\n this.error('Supervisor.fetchTask failed!', error);\n }\n }\n\n /**\n * Callback for fetchFromTD.\n * @param {object} request\n * @param {object} response\n */\n async addTaskToWorkload (request, response)\n {\n const constructFetchHandle = (size, jobs, slices) => {\n return { \n fetchStart: this.fetchTaskStarted,\n fetchEnd: Date.now(),\n fetchSize: size,\n jobs,\n slices,\n };\n };\n\n try\n {\n /** @type {TDPayload} */\n const payload = response.payload;\n if (!response.success)\n {\n debugging() && console.debug('Task fetch failure; request=', request);\n debugging() && console.debug('Task fetch failure; response=', payload);\n this.error(`Unable to request task from scheduler; will try again on a new connection: payload ${stringify(payload)}`);\n return;\n }\n\n if (!payload.body?.newJobs) // No slices found.\n {\n // Reset first fetch logic.\n this.waitForWork = true;\n /**\n * The 'fetch' event fires when the stask distributor found no work.\n * @link https://gitlab.com/Distributed-Compute-Protocol/dcp-docs-wes/-/blob/wip/worker/worker-events.md\n * @event Supervisor#fetch\n */\n this.workerEmit('fetch', constructFetchHandle(0, {}, {}));\n // There may be an extra slice to process.\n debugging('supervisor') && console.debug('Task distributor found no slices...');\n return this.roundRobinSlices();\n }\n\n /** @todo XXXpfr At this poin the line #'s are short by 42 -- figure out why. */\n\n /*\n * payload: { TDPayload }\n * TDPayload: { owner: Address, signature: Signature, auth: Auth, body: Body };\n * Auth: { workerId: string, authSlices: Object<string, SliceMessage[]>, schedulerId: { address: Address }, jobCommissions: Object<string, { rate: number, account: number }> }\n * Body: { newJobs: Object<string, object>, task: Object<string, SliceMessage[]>, computeGroupJobs: Object<string, string[]>, computeGroupOrigins: Object<string, Object<string, string[]>>, schedulerConfig: {{ targetTaskDuration: number }} }\n *\n * NOTE: authorizationMessage has type AuthMessage\n */\n\n const { body, ...authorizationMessage } = payload;\n const { newJobs, task, schedulerConfig } = body;\n const newJobKeys = Object.keys(newJobs);\n const jobCount = newJobKeys.length;\n\n let jobSliceMap = task;\n if (jobSliceMap.length) /** @deprecated Task came from legacy scheduler */\n // @ts-ignore\n jobSliceMap = toJobMap(task, sliceMsg => sliceMsg);\n\n if (schedulerConfig) // Otherwise the default is 300 seconds.\n this.options.targetTaskDuration = schedulerConfig.targetTaskDuration;\n\n /*\n * Ensure all jobs received from the scheduler (task distributor) are:\n * 1. If we have specified specific jobs the worker may work on, the received jobs are in the specified job list\n * 2. If we are in localExec, at most 1 unique job type was received (since localExec workers are designated for only one job)\n * If the received jobs are not within these parameters, stop the worker since the scheduler cannot be trusted at that point.\n */\n if (request.jobAddresses?.length > 0 && !newJobKeys.every((ele) => request.jobAddresses.includes(ele)))\n {\n // \"fetchTask:\" because that should make sense to somebody that doesn't know the internals of Supervisor.\n this.error(\"fetchTask: Worker received slices it shouldn't have; rejecting the work and stopping.\");\n this.stopWork(true);\n return;\n }\n\n // Clear out job managers w/o any queued slices,\n // and remove corresponding job references from module cache.\n // When a cached module no longer has any job references it is removed from the cache.\n this.clearUnusedJobManagersAndModuleCache(newJobs);\n\n /** @todo XXXpfr Figure out how not to construct this every time. */\n this.jobMap = {};\n this.jobManagerInventory.forEach(jobManager => {\n this.jobMap[jobManager.address] = jobManager;\n });\n\n selectiveDebug2() && console.debug(`addTaskToWorkload(${Date.now() - this.lastTime}): newJobs ${common.truncateAddress(newJobKeys)}, jobSliceMap ${common.compressJobMap(jobSliceMap, (s) => s.sliceNumber)}`);\n\n let sliceCount = 0;\n /** @type {Array<Promise<*>>} */\n const jobManagerPromises = [], jobs = {}, slices = {};\n // Populate the job managers with slices, creating new job managers when necessary.\n // Set up discrete job ring buffer.\n for (const [jobAddress, jobMessage] of Object.entries(newJobs))\n {\n /** @type {JobManager} */\n let jobManager;\n const sliceMessages = jobSliceMap[jobAddress];\n sliceCount += sliceMessages.length;\n\n if (this.jobMap.hasOwnProperty(jobAddress))\n {\n jobManager = this.jobMap[jobAddress];\n jobManager.update(jobMessage, sliceMessages, authorizationMessage);\n }\n else\n {\n // Add the slice messages to the job manager ctor, so that slice construction is after job manager is ready.\n jobManager = new JobManager(this, jobMessage, sliceMessages, authorizationMessage);\n this.jobMap[jobAddress] = jobManager;\n this.jobManagerInventory.push(jobManager);\n\n // Populate the ring buffer based on job's discrete property.\n if (jobMessage.requirements.discrete && this.ringBufferofJobs.find(address => address === jobAddress) === undefined)\n this.ringBufferofJobs.push(jobAddress);\n }\n jobs[jobAddress] = jobManager.jobHandle;\n slices[jobAddress] = task[jobAddress].length;\n\n jobManagerPromises.push(jobManager.jobPromise);\n }\n\n const payloadLength = kvin.stringify(payload).length; /** @TODO - fix per DCP-3750 */\n /**\n * The 'fetch' event fires when the supervisor has found work from the task distributor.\n * @link https://gitlab.com/Distributed-Compute-Protocol/dcp-docs-wes/-/blob/wip/worker/worker-events.md\n * @event Supervisor#fetch\n */\n this.workerEmit('fetch', constructFetchHandle(payloadLength, jobs, slices));\n\n const compressTask = () => { return common.compressJobMap(authorizationMessage.auth.authSlices); }\n selectiveSupEx() && console.debug(`addTaskToWorkload(${Date.now() - this.lastTime}): task: ${sliceCount}/${request.targetLoad.cpu}/${this.maxWorkingCores}, jobs: ${jobCount}, authSlices: ${compressTask()}, conversion:`, request.jobQuanta);\n\n // On the first call to fetchTask,\n // or when the last call to fetchTask found nothing,\n // or when there are no ready slices,\n // wait until at least 1 job with 1 slice is ready.\n await this.waitUntilWorkIsReady(jobManagerPromises);\n\n debugging('supervisor') && console.debug('addTaskToWorkload: Before calling roundRobinSlices; job states', this.jobManagerInventory.map((jm) => jm.identifier));\n\n // Start working on the new slices.\n return dcp_timers.setImmediate(() => this.roundRobinSlices());\n }\n catch (error)\n {\n this.workerEmit('fetch', error);\n this.error('Supervisor.fetchTask failed!', error);\n }\n finally\n {\n this.fetchTaskBarrier = false;\n }\n }\n\n /**\n * @private\n * @callback cbAddTaskToWorkload\n * @param {Response} response\n * @returns {Promise<void>}\n */\n\n /**\n * Call to fetch new slices from task distributor.\n * @param {*} request\n * @param {cbAddTaskToWorkload} addTaskToWorkload\n * @returns {Promise<any>}\n */\n async fetchFromTD (request, addTaskToWorkload)\n {\n selectiveDebug2() && console.debug('fetchFromTD begin; BarrierState:', this.fetchTaskBarrier, this.roundRobinBarrier);\n // Fetch a new task if we have insufficient slices queued, then start workers\n if (!this.fetchTaskBarrier)\n throw new Error('fetchTaskBarrier must be set when entering fetchFromTD.');\n\n this.dcp4.instantiateAllConnections();\n\n let fetchTimeout = dcp_timers.setTimeout(() => {\n this.fetchTaskBarrier = false;\n this.warning('Fetch exceeded timeout, will reconnect at next watchdog interval');\n this.dcp4.resetConnection('taskDistributor').catch(error => {\n this.error('Failed to close task-distributor connection', error);\n });\n this.dcp4.resetConnection('resultSubmitter').catch(error => {\n this.error('Failed to close result-submitter connection', error);\n });\n this.dcp4.instantiateAllConnections();\n }, 3 * 60 * 1000); // Max out at 3 minutes to fetch.\n // Allow workers and localExec to exit.\n fetchTimeout.unref();\n\n const finalize = () => {\n this.fetchTaskBarrier = false;\n if (fetchTimeout)\n dcp_timers.clearTimeout(fetchTimeout);\n fetchTimeout = null;\n }\n\n // Ensure result submitter and task distributor connections before fetching tasks.\n try\n {\n await Promise.all([\n this.dcp4.taskDistributor.keepalive(),\n this.dcp4.resultSubmitter.keepalive(),\n ]);\n }\n catch (error)\n {\n selectiveDebug() && console.debug('fetchTaskFromTD: Keep slices failed', error);\n this.warning('Failed to connect to result submitter, refusing to fetch slices.', 'Will try again at next fetch cycle.');\n this.dcp4.resetConnection('taskDistributor').catch(e => {\n this.error('Failed to close task-distributor connection', e);\n });\n this.dcp4.resetConnection('resultSubmitter').catch(e => {\n this.error('Failed to close result-submitter connection', e);\n });\n return finalize();\n }\n\n if (!this.dcp4.taskDistributor)\n {\n const msg = 'Unable to request task from scheduler; no connection to task distributor';\n this.warning(msg);\n this.workerEmit('fetch', new Error(msg));\n return finalize();\n }\n \n // The 'beforeFetch' event allows the user to cancel the requestTask request.\n let canceled = false;\n /**\n * The 'beforeFetch' event fires before the request is sent to requestTask in task distributor.\n * @link https://gitlab.com/Distributed-Compute-Protocol/dcp-docs-wes/-/blob/wip/worker/worker-events.md\n * @event Supervisor#beforeFetch\n */\n this.workerEmit('beforeFetch', () => { canceled = true; })\n selectiveDebug() && canceled && console.debug('User canceled the fetch task.');\n if (canceled)\n return finalize()\n\n return this.dcp4.taskDistributor.request('requestTask', request)\n .then((response) => {\n addTaskToWorkload(response);\n // Success! Restore this.dcp4.taskDistributor delay to retryMinSleepMs (currently 32ms.)\n // Is there a better way to reset than explicit calls?\n this.delayManager.resetEBO('taskDistributor');\n return response;\n })\n .catch((error) => {\n this.workerEmit('fetch', error);\n this.error('Unable to request task from scheduler. Will try again on a new connection.', error);\n this.dcp4.resetConnection('taskDistributor');\n })\n .finally(() => {\n return finalize();\n });\n }\n\n /**\n * Remove all unreferenced jobs in this.jobManagerInventory and this.moduleCache.\n * Since job-managers are inserted into this.jobManagerInventory with a push, the job managers at the beginning are oldest.\n * Only delete #deleteCount of the oldest job-managers:\n * let deleteCount = this.jobManagerInventory.length - cachedJobsThreshold;\n * Edit cachedJobsThreshold to adjust the cache cleanup threshold.\n * @param {Object<string, number[]>} newJobMap - Jobs that should not be removed from this.jobManagerInventory and this.moduleCache.\n */\n clearUnusedJobManagersAndModuleCache (newJobMap)\n {\n const emptyJobs = [];\n for (const jobMan of this.jobManagerInventory) // Grab oldest 1st\n {\n if (!newJobMap[jobMan.address])\n {\n let isEmpty = true;\n for (const slice of jobMan.sliceInventory)\n if (slice.isQueuedOrActive)\n {\n isEmpty = false;\n break;\n }\n if (isEmpty)\n {\n // Walk through whole list to purge empty jobs with no assigned sandboxes to save.\n if (jobMan.assignedSandboxes.length < 1)\n this.purgeJob(jobMan);\n else\n emptyJobs.push(jobMan)\n }\n }\n }\n let deleteCount = this.jobManagerInventory.length - this.options.cachedJobsThreshold;\n if (deleteCount > 0)\n {\n selectiveDebug() && console.debug(`Supervisor.clearUnusedJobManagersAndModuleCache: deleteCount ${deleteCount}/${this.jobManagerInventory.length}/${this.options.cachedJobsThreshold}.`);\n for (const jobMan of emptyJobs) // Grab oldest 1st\n {\n this.purgeJob(jobMan);\n if (--deleteCount < 1)\n break;\n }\n }\n }\n\n // _Idx\n //\n // createSandbox, returnSandbox, hookUpSandboxListeners, pruneSandboxes\n //\n\n /**\n * Automatically handle when the evaluator is down.\n *\n * With the screensaver worker, when the screensaver goes down, so does the evaluator.\n * And when the screensaver starts running again, so does the evaluator. The evaluator\n * may be stopped and started again with sa worker running, and have good behavior.\n * However, browser workers cannot have their evaluators stopped without also stopping\n * the worker (otherwise file-a-bug...)\n *\n * @param {boolean} [throwError=false]\n * @returns {Promise<Sandbox>}\n */\n async createSandbox (throwError = false)\n {\n selectiveDebug2() && console.debug('createSandbox', this.sandboxInventory.length, Date.now() - this.lastTime);\n // See if there are any READY_FOR_ASSIGN sandboxes (viz., sandbox.isReadyForAssign is true.)\n // If the evaluator just came back up (while worker is still running) there should not be any non-assigned sandboxes.\n // We're only considering sa worker (e.g. screensaver worker), because browser workers cannot stop the\n // evaluator w/o stopping the worker (I think -- if not true, file-a-bug.)\n if (this.sandboxInventory.length > 0 && this.sandboxInventory[0].isReadyForAssign)\n {\n selectiveDebug2() && console.debug(`Supervisor.createSandbox: Found ready-for-assign sandbox ${this.sandboxInventory[0].identifier}`);\n return this.sandboxInventory.shift();\n }\n\n // If the evaluator cannot start (e.g. if the evalServer is not running),\n // then the while loop will keep retrying until the evalServer comes online.\n try\n {\n this.evaluator.createSandboxRefCount++;\n\n if (this.evaluator.down)\n {\n selectiveDebug2() && console.debug('createSandbox-eval-down', this.evaluator.createSandboxRefCount, new Date());\n this.capabilities = null;\n // Will fail when getting close to OOM.\n // if (this.evaluator.createSandboxRefCount > 2) // Leave 2 running for a little redundancy.\n // throw new Error(`Noise reduction for evaluator-down error recovery: ref-count ${this.evaluator.createSandboxRefCount}`);\n }\n\n let retry = 0;\n while (true)\n {\n let sandbox;\n try\n {\n sandbox = new Sandbox(this, { ...this.options.sandboxOptions });\n selectiveDebug2() && console.debug(`Supervisor.createSandbox(${sandbox.id}): Calling sandbox.start: ${this.evaluator.createSandboxRefCount}, eval-down ${this.evaluator.down}`);\n this.hookUpSandboxListeners(sandbox);\n await sandbox.start();\n if (!this.capabilities)\n this.checkCapabilities(sandbox);\n if (this.evaluator.reallyDown)\n {\n this.evaluator.reallyDown = false;\n selectiveDebug() && console.debug('Supervisor.createSandbox: Evaluator is up again.', this.evaluator.createSandboxRefCount);\n this.jobManagerInventory.forEach((jobManager) => jobManager.resetSlices('createSandbox'));\n }\n return sandbox;\n }\n catch (error)\n {\n if (throwError)\n throw error;\n selectiveDebug() && console.debug(`Supervisor.createSandbox: Failed to start sandbox ${sandbox.identifier}`, this.evaluator.createSandboxRefCount, this.evaluator.down, error.message);\n if (error.code === 'ENOWORKER')\n throw new DCPError(\"Cannot use localExec without dcp-worker installed. Use the command 'npm install dcp-worker' to install the neccessary modules.\", 'ENOWORKER');\n\n if (throwError)\n throw error;\n\n // The evaluator may be down or shutting down, keep retrying.\n if ((retry % 60) === 0)\n this.warning('Failed to start a sandbox; will keep retrying; screensaver worker or evaluator may be down...');\n await a$sleepMs(1000 * Math.min(5, ++retry));\n }\n }\n }\n finally\n {\n this.evaluator.createSandboxRefCount--;\n }\n }\n\n /**\n * Remove sandbox from inventory and terminate.\n * @param {Sandbox} sandbox\n */\n returnSandbox (sandbox)\n {\n // If sandbox is not in this.sandboxInventory then sandbox must already be terminated\n // <==> this.sandboxInventory.includes(sandbox) || sandbox.isTerminated().\n selectiveDebug2() && console.debug(`returnSandbox: ${sandbox.identifier}`);\n if (common.removeElement(this.sandboxInventory, sandbox))\n sandbox.terminate(false);\n else\n {\n // If sandbox is not in this.sandboxInventory then sandbox must already be terminated\n if (common.displayMaxDiagInfo() && !sandbox.isTerminated) // Design assumption.\n throw new Error(`returnSandbox: Sandbox ${sandbox.identifier} has already been removed.`);\n }\n }\n\n /**\n * For a given sandbox, hook up all the Sandbox listeners.\n * @param {Sandbox} sandbox\n */\n hookUpSandboxListeners (sandbox)\n {\n sandbox.addListener('start', () => {\n if (!sandbox.slice) return;\n const payload = sandbox.slice.getMessagePayload(this.workerId, 'begin');\n return this.dcp4.safeRSStatus(payload, `Failed to send 'begin' status for slice ${sandbox.slice.identifier}`);\n });\n\n const that = this;\n // Sandbox error handler.\n sandbox.on('sandboxError', function Supervisor$sandboxError(error) {\n selectiveDebug() && console.debug(`Sandbox ${sandbox.identifier} sandboxError-handler; error while executing work function`, error);\n const slice = sandbox.slice;\n if (!slice?.isWorking) // Sanity -- warning should never fire.\n this.warning(`handleSandboxError: slice ${slice?.identifier} must be WORKING.`);\n if (slice)\n slice['useRetryLogic'] = true;\n that.returnSandbox(sandbox);\n });\n\n // Sandbox complete handler.\n // When any sandbox completes, go through the Supervisor.fetchTask protocol.\n sandbox.addListener('complete', () => {\n // Try not to call fetchTask unless there's something there.\n selectiveDebug2() && console.debug('Sandbox complete listener', this.fetchTaskBarrier, this.roundRobinBarrier, this.unusedSandboxCount, Date.now() - this.lastTime);\n if (!this.fetchTaskBarrier)\n this.fetchTask();\n });\n\n // If the sandbox terminated and we are not shutting down, then we should return all work which is\n // currently not being computed if all sandboxes are dead and the attempt to create a new one fails.\n sandbox.sandboxHandle.on('end', async () => {\n if (this.sandboxInventory.length > 0)\n {\n selectiveDebug() && console.debug(`hookUpSandboxListeners: Sandbox \"${sandbox.identifier}\" terminated handler`, this.sandboxInventory.length, Date.now() - this.lastTime);\n\n // Does there exist a non-terminated sandbox?\n let allSandboxesTerminated = true;\n for (const sbx of this.sandboxInventory)\n if (!sbx.isTerminated)\n {\n allSandboxesTerminated = false;\n break;\n }\n\n if (allSandboxesTerminated && !this.evaluator.downInterlock)\n {\n //\n // When we get here, all sandboxes have been terminated.\n //\n this.evaluator.downInterlock = true;\n selectiveDebug() && console.debug('hookUpSandboxListeners: Try to create 1 sandbox in the sandbox-terminated-handler...', sandbox.identifier);\n await this.createSandbox(true /*throwError*/)\n .then((sbx) => {\n this.evaluator.reallyDown = false;\n // This is the only place where non-assigned sandboxes are added to this.sandboxInventory.\n this.sandboxInventory.unshift(sbx);\n selectiveDebug() && console.debug('Sandbox terminate handler was able to create new sandbox', sandbox.identifier);\n })\n .catch(() => {\n //\n // Since all sandboxes have been terminated, if we cannot create a new sandbox,\n // that probably means we're on a screensaver worker and the screensaver is down.\n // Try to submit results for completed slices, but return all other non-finished\n // slices to the scheduler -- after a brief delay.\n //\n selectiveDebug() && console.debug('Sandbox terminate handler cannot create new sandbox; evaluator is down', sandbox.identifier);\n this.evaluator.reallyDown = true;\n this.emit('evalDown');\n const delay = 60; // seconds\n this.jobManagerInventory.forEach((jm) => jm.evaluatorDownCleanup(delay));\n this.warning('Stopping all work.', 'Screensaver worker or evaluator may be down.');\n })\n .finally(() => {\n this.sandboxInventory = this.sandboxInventory.filter(sbx => !sbx.isTerminated);\n this.evaluator.shuttingDown = false;\n this.evaluator.downInterlock = false;\n });\n }\n }\n });\n }\n\n /**\n * Terminate extra sandboxes over the limit.\n * @todo XXXpfr Prioritize keeping expensive to assign sandboxes.\n */\n pruneSandboxes ()\n {\n this.sandboxInventory = this.sandboxInventory.filter((sandbox) => !sandbox.isTerminated);\n let pruneCount = this.sandboxInventory.length - this.options.maxSandboxes;\n if (pruneCount <= 0)\n return;\n\n selectiveDebug() && console.debug(`Supervisor.pruneSandboxes START: pruneCount ${pruneCount}/${this.sandboxInventory.length}/${this.options.maxSandboxes}.`, this.dbg.dumpSandboxState());\n selectiveDebug2() && console.debug(this.sandboxInventory.map((sbx) => sbx.identifier));\n\n // Prune ready-for-assign sandboxes first.\n while (pruneCount > 0)\n {\n if (this.sandboxInventory[0].isReadyForAssign)\n {\n const startedSandbox = this.sandboxInventory.shift();\n startedSandbox.terminate(false);\n pruneCount--;\n }\n else\n break;\n }\n\n // Don't purge jobs here: can accidentally purge a job that TD just fetched (XXXpfr)\n\n /**\n * Do we really want to do a bunch of work to keep empty job assigned sandboxes around?\n * When in a private compute group, there will be fewer jobs and it's likely\n * that a given job will be seen again.\n * @todo XXXpfr Prioritize keeping expensive to assign sandboxes.\n */\n const liveJobs = [], emptyJobs = [];\n let maxAssignedSandboxCount = 0;\n for (const jobMan of this.jobManagerInventory)\n {\n let isAlive = false;\n for (const slice of jobMan.sliceInventory)\n if (slice.isQueuedOrActive)\n {\n isAlive = true;\n break;\n }\n if (isAlive)\n liveJobs.push(jobMan);\n else\n {\n const _assignedSandboxes = jobMan.assignedSandboxes;\n if (maxAssignedSandboxCount < _assignedSandboxes.length)\n maxAssignedSandboxCount = _assignedSandboxes.length;\n emptyJobs.push(jobMan);\n }\n }\n\n if (emptyJobs.length > 0)\n {\n // Prune the sandboxes from all jobs with no current work.\n // Try to keep approximately the same # of assigned sandboxes per job.\n for (let k = maxAssignedSandboxCount; k >= 0; k--)\n {\n for (const jobMan of emptyJobs)\n {\n const _assignedSandboxes = jobMan.assignedSandboxes;\n if (_assignedSandboxes.length > k)\n {\n debugging('supervisor') && console.debug(`pruneSandboxes(empty): sandbox${_assignedSandboxes[0].id}`, Date.now() - this.lastTime);\n // Terminate and remove from this.sandboxInventory.\n this.returnSandbox(_assignedSandboxes[0]);\n if (--pruneCount < 1)\n {\n debugging('supervisor') && console.debug(`Supervisor.pruneSandboxes FINISH: ${pruneCount}/${this.sandboxInventory.length}/${this.options.maxSandboxes}.`);\n return;\n }\n }\n }\n }\n }\n\n // Round-robin prune 1 extra assigned sandbox from each non-empty jobmanager.\n while (pruneCount > 0)\n {\n const _pruneCount = pruneCount;\n for (const jobMan of liveJobs)\n {\n const _assignedSandboxes = jobMan.assignedSandboxes;\n if (_assignedSandboxes.length > 0)\n {\n debugging('supervisor') && console.debug(`pruneSandboxes(non-empty): sandbox${_assignedSandboxes[0].id}`, Date.now() - this.lastTime);\n // Terminate and remove from this.sandboxInventory.\n this.returnSandbox(_assignedSandboxes[0]);\n if (--pruneCount < 1)\n {\n debugging('supervisor') && console.debug(`Supervisor.pruneSandboxes FINISH: ${pruneCount}/${this.sandboxInventory.length}/${this.options.maxSandboxes}.`);\n return;\n }\n }\n }\n if (_pruneCount === pruneCount) // Nothing left to prune.\n break;\n }\n\n debugging('supervisor') && console.debug(`Supervisor.pruneSandboxes FINISH: incomplete-prune ${pruneCount}/${this.sandboxInventory.length}/${this.options.maxSandboxes}.`);\n }\n\n // _Idx\n //\n // recordResult, sendToResultSubmitter, sendResultToRemote\n //\n\n /**\n * Submits the slice results to the result-submitter service.\n * Then remove the slice from the its job manager.\n *\n * @param {Slice} slice - The slice to submit.\n * @param {Sandbox} sandbox - The sandbox handle associated to the slice.\n * @returns {Promise<any>}\n */\n recordResult (slice, sandbox)\n {\n // It is possible for slice.result to be undefined when there are upstream errors.\n if (!slice.result)\n throw new Error(`Slice ${slice.identifier} completed work, but there is no result. This is ok when there are upstream errors.`);\n if (!slice.isComplete)\n throw new Error(`Cannot record result for slice ${slice.identifier} that has not completed execution successfully.`);\n if (!slice.timeReport)\n throw new Error(`Invalid time report for slice ${slice.identifier} in recordResult`);\n if (!slice.dataReport)\n throw new Error(`Invalid data report for slice ${slice.identifier} in recordResult`);\n\n const metrics = slice.jobManager.updateStatistics(slice, sandbox);\n selectiveDebug() && console.debug(`Supervisor: recording result for slice ${slice.identifier} with metrics`, this.dbg.justCPU(metrics));\n\n /** @see result-submitter::result for full message details */\n const payloadData = {\n slice: slice.sliceNumber,\n job: slice.jobAddress,\n worker: this.workerId,\n paymentAddress: this.options.paymentAddress,\n metrics,\n authorizationMessage: slice.authorizationMessage,\n };\n\n let canceled = false;\n const resultUrl = (slice.resultStorageType !== 'pattern') ? slice.resultStorageDetails : false;\n this.workerEmit( 'beforeResult', () => { canceled = true; }, resultUrl);\n this.jobEmit(slice, 'beforeResult', () => { canceled = true; }, resultUrl);\n selectiveDebug && canceled && console.debug(`User canceled the result submission operation for slice ${slice.identifier}.`);\n if (canceled)\n return this.returnSlice(slice, 'Canceled via beforeResult event');\n\n if (slice.resultStorageType === 'pattern')\n return this.sendResultToRemote(slice)\n .then((response) => {\n return this.sendToResultSubmitter(slice, sandbox.sandboxHandle, payloadData, response);\n });\n\n return this.sendToResultSubmitter(slice, sandbox.sandboxHandle, payloadData, encodeDataURI(slice.result.result));\n }\n\n /**\n * Send result to result submitter.\n * @param {Slice} slice\n * @param {SandboxHandle} sandboxHandle\n * @param {*} payloadData\n * @param {string} [result]\n * @returns {Promise<any>}\n */\n async sendToResultSubmitter (slice, sandboxHandle, payloadData, result)\n {\n // When handleRSError is hit, { slice, payload } is added to the queue this.dcp4.submitResultsQueueMap[slice.key] .\n // For a given slice, the queue is retried independent of other slices that failed to submit.\n // When a given slice hits the retry limit (6 retries) the slice is returned to scheduler.\n const handleRSError = (error, slice, payloadData) => { // eslint-disable-line no-shadow\n const msg = `Failed to submit results to scheduler for slice ${slice.identifier}`;\n if (!error) error = new Error(msg);\n this.error(msg, error);\n\n slice['retrySubmitResults'] = (slice['retrySubmitResults'] ?? 0) + 1;\n if (slice['retrySubmitResults'] > this.options.maxResultSubmissionRetries)\n {\n this.handleFailedSlice(slice, error);\n throw new Error(`Failed to submit results 6 times for slice ${slice.identifier}`);\n }\n\n // For a given slice, there's never more than one element in the corresponding queue.\n this.dcp4.submitResultsQueueMap[slice.key] = [ { slice, sandboxHandle, payloadData } ];\n return this.dcp4.resetConnection('resultSubmitter');\n }\n\n try\n {\n debugging('supervisor') && console.debug('Supervisor.recordResult: payloadData', result.slice(0, 256), slice.identifier);\n if (result)\n payloadData.result = result;\n\n await this.delayManager.nextDelay('recordResult', 2);\n //->console.log('recordResult', slice.identifier, this.evaluator.down, Date.now() - this.lastTime); // SAVE\n\n return this.dcp4.resultSubmitter.request('result', payloadData)\n .then((resp) => {\n const payload = resp.payload;\n if (!resp.success)\n {\n if (payload)\n {\n selectiveDebug() && console.debug('resultSubmitter.send failed', payload);\n throw new DCPError(`Call to result submitter failed when recording results for ${slice.identifier}.`, payload);\n }\n if (debugBuild)\n {\n selectiveDebug() && console.debug('resultSubmitter.send failed with no payload', slice.identifier);\n // Look inside\n for (const [ key, value ] of Object.entries(resp)) {\n if (key !== 'connection')\n console.debug(`${key}:`, value);\n }\n }\n throw new Error(`Call to result submitter failed when recording results for ${slice.identifier}.`);\n }\n\n debugging('supervisor') && console.debug('Successfully submitted results', slice.identifier);\n\n // Success! Restore this['resultSubmitter'] delay to retryMinSleepMs (currently 32ms.)\n // Is there a better way to reset than explicit calls?\n this.delayManager.resetEBO('resultSubmitter');\n\n common.debugQuanta() && this.dbg.addGlobal(slice, payload.metrics);\n slice.jobManager.update({ metrics: payload.metrics }); // Update metrics\n\n // Emit the 3 'payment' events.\n const paymentAddress = payloadData.paymentAddress.toString();\n this.workerEmit( 'payment', payload.slicePaymentAmount, paymentAddress, slice.jobAddress, slice.sliceNumber);\n this.jobEmit(slice, 'payment', payload.slicePaymentAmount, paymentAddress, slice.sliceNumber);\n this.safeEmit(sandboxHandle, 'payment', payload.slicePaymentAmount, paymentAddress);\n\n const payloadLength = kvin.stringify(payloadData).length; /** @TODO - fix per DCP-3750 */\n const resultUrl = (slice.resultStorageType !== 'pattern') ? slice.resultStorageDetails : false;\n this.workerEmit( 'result', resultUrl, payloadLength);\n this.jobEmit(slice, 'result', resultUrl, payloadLength);\n\n slice.markAsFinished();\n\n // Remove the slice from the job manager.\n slice.jobManager.removeSlice(slice);\n\n if (this.sliceTiming)\n {\n slice['resultDelta'] = Date.now() - slice['resultDelta'];\n console.debug(`recordResult(${slice['queueingDelta']}, ${slice['executionDelta']}, ${slice['resultDelta']}): Completed slice ${slice.identifier}.`, Date.now() - this.lastTime);\n }\n if (false)\n {}\n\n return resp;\n })\n .catch ((error) => {\n handleRSError (error, slice, payloadData);\n });\n }\n catch (error)\n {\n handleRSError (error, slice, payloadData);\n }\n }\n\n /**\n * Send a work function's result to a server that speaks our DCP Remote Data Server protocol.\n * E.g. https://gitlab.com/Distributed-Compute-Protocol/dcp-rds\n *\n * @param {Slice} slice - Slice object whose result we are sending.\n * @returns {Promise<string>}\n * @throws When HTTP status not in the 2xx range.\n */\n sendResultToRemote (slice)\n {\n return supShared.sendResultToRemote(this, slice);\n }\n\n // _Idx\n //\n // handleWorkReject\n //\n\n /**\n * Handles reassigning or returning a slice that rejected.\n *\n * If error.message === 'false' and slice.hasBeenRejected is false, reschedule the slice.\n * Set the slice.hasBeenRejected to be true.\n *\n * If error.message !== 'false' or slice.hasBeenRejected is true (i.e. has been rejected once already)\n * zthen return all slices from the job to the scheduler and terminate all sandboxes with that jobAddress.\n *\n * @param {Slice} slice\n * @param {Error} error\n */\n handleWorkReject (slice, error)\n {\n debugging() && console.debug('handleWorkReject', error.message, slice.hasBeenRejected, slice.identifier);\n\n const jobManager = slice.jobManager;\n jobManager.rejectedJobReasons.push(error.message); // memoize reasons\n\n // First time rejecting without a reason; try rescheduling the slice.\n if (error.message === 'false' && !slice.hasBeenRejected)\n {\n // Mark slice as rejected.\n slice.hasBeenRejected = true;\n // Reset slice state to allow re-execution.\n slice.resetState();\n }\n else\n {\n // Slice has been rejected twice, so add to array of rejected jobs.\n const rejectedJob = {\n address: slice.jobAddress,\n reasons: jobManager.rejectedJobReasons,\n };\n this.rejectedJobs.push(rejectedJob);\n // Broadcast failure.\n this.workerEmit( 'result', error);\n this.jobEmit(slice, 'result', error);\n // Purge the job.\n this.purgeJob(jobManager);\n // Tell everyone all about it, when allowed.\n if (jobManager.displayMaxDiagInfo)\n {\n const suffixMsg = 'All slices and sandboxes with the same jobAddress returned to the scheduler or terminated.';\n if (slice.hasBeenRejected)\n this.warning(`work.reject: The slice ${slice.identifier} was rejected twice.`, suffixMsg);\n else\n this.warning(`work.reject: The slice ${slice.identifier} was rejected with reason: ${error.message}.`, suffixMsg);\n }\n }\n }\n\n}\nexports.Supervisor = Supervisor;\n\n\n//# sourceURL=webpack://dcp/./src/dcp-client/worker/supervisor2/index.js?");
4727
+ eval("/**\n * @file dcp/src/dcp-client/worker/supervisor2/index.js\n * Code managing sandboxes, tasks, jobs, and slices within in a DCP Worker.\n * @author Wes Garland, wes@distributive.network\n * Paul, paul@distributive.network\n * @date Dec 2020\n * June 2022, Jan-April 2023\n * @module supervisor\n * @copyright Copyright (c) 2018-2023, Distributive Corp. All Rights Reserved\n */\n/*\n * initial ready reconnecting stopping stopped paused broken\n * |-- ctor ----------------------------------------------------------------------------------------------------------------->\n * |-- work ----------------------------------------------------------------------------------------------------------------->\n * |-- work --------------------------------------------------------------------------------------------------->\n * |-- work -------------------------------------------------------------------------------->\n * |-- work --------------------------------------------------------->\n * |-- work --------------------------------->\n * |-- Worker.pause --------------------------------------------------------------------------->\n * <-- Worker.unpause -------------------------------------------------------------------------|\n * |-- work ----->\n * |-- stopWork ---------------------------->\n * |-- postStopShutdown --->\n * |-- PM.connectTo --> (ProtocolManager)\n * <-- PM.connectTo --| (ProtocolManager)\n * |-- stopWork ------------------------------------------->\n * <-- work -----------------------------------------------------------------------|\n * <-- stopWork -----------------------------------------------------|\n */\n/* global dcpConfig */ // eslint-disable-line no-redeclare\n// @ts-check\n\n\nconst debugging = (__webpack_require__(/*! dcp/debugging */ \"./src/debugging.js\").scope)('worker');\nconst DCP_ENV = __webpack_require__(/*! dcp/common/dcp-env */ \"./src/common/dcp-env.js\");\nconst constants = __webpack_require__(/*! dcp/common/scheduler-constants */ \"./src/common/scheduler-constants.js\");\nconst { assert } = __webpack_require__(/*! dcp/common/dcp-assert */ \"./src/common/dcp-assert.js\");\nconst dcp_timers = __webpack_require__(/*! dcp/common/dcp-timers */ \"./src/common/dcp-timers.js\");\nconst EventEmitter = __webpack_require__(/*! events */ \"./node_modules/events/events.js\");\nconst { DCPError } = __webpack_require__(/*! dcp/common/dcp-error */ \"./src/common/dcp-error.js\");\nconst { Address } = __webpack_require__(/*! dcp/dcp-client/wallet/eth */ \"./src/dcp-client/wallet/eth.js\");\nconst { Keystore } = __webpack_require__(/*! dcp/dcp-client/wallet/keystore */ \"./src/dcp-client/wallet/keystore.js\");\nconst RingBuffer = __webpack_require__(/*! dcp/utils/ringBuffer */ \"./src/utils/ringBuffer.js\");\nconst { Synchronizer } = __webpack_require__(/*! dcp/common/concurrency */ \"./src/common/concurrency.js\");\nconst { JobManager } = __webpack_require__(/*! ./job-manager */ \"./src/dcp-client/worker/supervisor2/job-manager.js\");\nconst { Sandbox, SandboxError } = __webpack_require__(/*! ./sandbox2 */ \"./src/dcp-client/worker/supervisor2/sandbox2.js\");\nconst { sliceStatus } = __webpack_require__(/*! dcp/common/scheduler-constants */ \"./src/common/scheduler-constants.js\");\nconst { OriginAccessManager } = __webpack_require__(/*! dcp/dcp-client/worker/origin-access-manager */ \"./src/dcp-client/worker/origin-access-manager.js\");\nconst { a$sleepMs, booley, toJobMap, encodeDataURI, stringify, nextEma } = __webpack_require__(/*! dcp/utils */ \"./src/utils/index.js\");\nconst kvin = __webpack_require__(/*! kvin */ \"./node_modules/kvin/kvin.js\");\n\nconst { ModuleCache } = __webpack_require__(/*! ./module-cache */ \"./src/dcp-client/worker/supervisor2/module-cache.js\");\nconst { Promise_any } = __webpack_require__(/*! ./promise_any */ \"./src/dcp-client/worker/supervisor2/promise_any.js\");\nconst { ProtocolManager } = __webpack_require__(/*! ./protocol-manager */ \"./src/dcp-client/worker/supervisor2/protocol-manager.js\");\nconst { EvaluatorManager } = __webpack_require__(/*! ./evaluator-manager */ \"./src/dcp-client/worker/supervisor2/evaluator-manager.js\");\nconst { DelayManager } = __webpack_require__(/*! ./delay-manager */ \"./src/dcp-client/worker/supervisor2/delay-manager.js\");\nconst { Options } = __webpack_require__(/*! ./options */ \"./src/dcp-client/worker/supervisor2/options.js\");\nconst common = __webpack_require__(/*! ./common */ \"./src/dcp-client/worker/supervisor2/common.js\");\nconst { debugBuild, selectiveDebug, selectiveDebug2, minimalDiag, selectiveSupEx } = common;\nconst supShared = __webpack_require__(/*! ../SupShared */ \"./src/dcp-client/worker/SupShared.js\");\nconst { canScheduleGPU } = __webpack_require__(/*! ./gpu_support */ \"./src/dcp-client/worker/supervisor2/gpu_support.js\");\n\n/** @typedef {string} opaqueId */ // 22 character base64 string\n/** @typedef {import('./slice2').Slice} Slice */\n/** @typedef {import('dcp/utils/jsdoc-types').Auth} Auth */\n/** @typedef {import('dcp/utils/jsdoc-types').Body} Body */\n/** @typedef {import('./sandbox2').SandboxHandle} SandboxHandle */\n/** @typedef {import('dcp/utils/jsdoc-types').SliceObj} SliceObj */\n/** @typedef {import('dcp/dcp-client/worker/index').Worker} Worker */\n/** @typedef {import('dcp/utils/jsdoc-types').TDPayload} TDPayload */\n/** @typedef {import('dcp/utils/jsdoc-types').Signature} Signature */\n/** @typedef {import('dcp/utils/jsdoc-types').SliceMessage} SliceMessage */\n/** @typedef {import('dcp/dcp-client/wallet/keystore').Keystore} Keystore */\n/** @typedef {import('dcp/utils/jsdoc-types').SupervisorOptions} SupervisorOptions */\n/** @typedef {import('dcp/protocol-v4/connection/connection').Connection} Connection */\n\n//\n// Index to functionality -- search for '_Idx' to toggle through the index.\n//\n// 1) class Supervisor\n// 2) work, checkCapabilities\n// 3) safeEmit, workerEmit, jobEmit, error, warning, mungeError, jobDescriptor, setState\n// 4) returnAllSlices, postStopShutdown, abort, stopWork, purgeJob\n// 5) roundRobinSlices, makeJobSelectionCursor, handleSandboxWorkError, handleFailedSlice\n// 6) returnSlices, returnSlice, emitProgressReport\n// 7) jobQuanta, repoMan, predictLoad(viz., clairvoyance), waitUntilWorkIsReady, generateWorkerComputeGroups\n// 8) availableSandboxSpace, fetchTask, addTaskToWorkload, fetchFromTD, clearUnusedJobManagersAndModuleCache\n// 9) createSandbox, returnSandbox, hookUpSandboxListeners, pruneSandboxes\n// 10) recordResult, sendToResultSubmitter, sendResultToRemote\n// 11) handleWorkReject\n//\n\n// _Idx\n//\n// class Supervisor\n//\n\n/**\n * Supervisor constructor\n *\n * A supervisor manages the communication with the scheduler, manages sandboxes, and\n * decides which workload should be sent to which sandboxes when.\n *\n * Possible states: 'initial', 'ready', 'reconnecting', 'stopping', 'stopped', 'broken'\n * Start state:\n * - initial\n *\n * Intermediate states:\n * - ready\n * - reconnecting\n * - stopping\n *\n * Terminal states:\n * - stopped\n * - broken\n *\n * Valid transitions:\n * - initial -> ready -> reconnecting -> ready\n * - ready -> stopping -> stopped\n * - initial -> broken\n */\nclass Supervisor extends EventEmitter\n{\n /**\n * @constructor\n * @param {Worker} worker\n * @param {Keystore} identity\n * @param {SupervisorOptions} options\n */\n constructor (worker, identity, options)\n {\n super({ captureRejections: false });\n\n if (!(identity instanceof Keystore))\n throw new Error(`identity ${JSON.stringify(identity)} must be an instance of Keystore`);\n\n debugging('supervisor') && console.debug('Supervisor.options', options);\n assert(options === worker.workerOptions);\n \n /** @type {Worker} */\n this.worker = worker;\n /** @type {Keystore} */\n this.identityKeystore = identity;\n /** @type {Options} */\n this.options = new Options(options, worker);\n\n selectiveDebug() && console.debug('Supervisor: cores.cpu, cores.gpu, maxSandboxes', options.cores?.cpu, options.cores?.gpu, this.options.maxSandboxes);\n\n /** @type {ModuleCache} */\n this.moduleCache = new ModuleCache(this);\n\n // Manage delays and exponential backoff.\n this.delayManager = new DelayManager(this, this.options.defaultDelayIncrement);\n\n /* See https://distributive.atlassian.net/browse/DCP-3175 */\n /** @type {OriginAccessManager} */\n this.originManager = OriginAccessManager.construct(this.options.allowOrigins);\n\n /** @type {ProtocolManager} */\n this.dcp4 = new ProtocolManager(this);\n\n /** @type {common.DebuggingTools} */\n this.dbg = new common.DebuggingTools(this);\n\n // Turn on for max speed debugging.\n if (false)\n {}\n\n /** @type { Synchronizer } */\n this.state = new Synchronizer('initial', [ 'initial', 'ready', 'reconnecting', 'stopping', 'stopped', 'paused', 'broken']);\n /** @type {Object<string, JobManager>} */\n this.jobMap = {}; // jobAddress => jobManager\n\n /** @type {JobManager[]} */\n this.jobManagerInventory = common.InventoryArray('jobManagers');\n /** @type {Sandbox[]} */\n this.sandboxInventory = []; // All sandboxes that are being used by the job managers. Makes sure we don't lose sandboxes.\n /** @type {{ next: cbNext, push: cbPush }} */\n this.cursor = null;\n /** @type {number} */\n this.defaultQuanta = 1.0;\n\n /**\n * Evaluator down management.\n **/\n this.evaluator = new EvaluatorManager();\n\n // There are 2 kinds of barriers.\n // 1) fetchTaskBarrier is a barrier for the task fetching from task distributor path.\n // 2) roundRobinBarrier is a barrier for the slice execution path.\n /** @type {boolean} */\n this.fetchTaskBarrier = false;\n /** @type {boolean} */\n this.roundRobinBarrier = false;\n\n /** @type {object[]} */\n this.rejectedJobs = [];\n /**\n * An N-slot ring buffer of job addresses. Stores all jobs that have had no more than 1 slice run in the ring buffer.\n * Required for the implementation of discrete jobs\n * @type {RingBuffer}\n */\n this.ringBufferofJobs = new RingBuffer(200); // N = 200 should be more than enough.\n /**\n * When true we await waitUntilWorkIsReady until at least 1 job is ready with at least 1 ready slice.\n * waitUntilWorkIsReady\n * @type {boolean}\n */\n this.waitForWork = true;\n /**\n * Last repoMan time stamp.\n * @type {number}\n **/\n this.lastRepoMan = Date.now();\n /**\n * Last prune time stamp.\n * @type {number}\n **/\n this.lastPrune = Date.now();\n /**\n * General time stamp.\n * @type {number}\n **/\n this.lastTime = Date.now();\n /**\n * Fetch started time stamp.\n * @type {number}\n **/\n this.fetchTaskStarted = 0;\n /**\n * The capabilities of a random sandbox.\n * @todo XXXpfr Re-work this once fetchTask uses the capabilities of every sandbox to fetch slices.\n * @type {object}\n */\n this.capabilities = null;\n /**\n * EMA times series of CPUTime + GPUTime over all jobs.\n * Each EMA entry is computed right before calling fetchTask.\n * @type {number}\n */\n this.localTime = 0;\n /**\n * EMA times series of sliceCPUTime + sliceGPUTime over all jobs.\n * Each EMA entry is computed right before calling fetchTask.\n * @type {number}\n */\n this.globalTime = 0;\n /**\n * When this.sliceTiming is set to be true, it displays the timings of a every slice\n * slice['queueingDelta'] = timespan of when slice is passed to jobManager.runQueuedSlice until sandbox.work\n * slice['executionDelta'] = timespan of execution in sandbox\n * slice['resultDelta'] = timespan of when sandbox finishes executing until recordResult completes.\n * @type {boolean}\n */\n this.sliceTiming = false;\n\n try\n {\n // Start up the connections.\n this.dcp4.instantiateAllConnections();\n }\n catch(error)\n {\n this.error('Failed to set up DCP connections:', error);\n this.setState('initial', 'broken');\n this.worker.stop(true).finally(() => { throw error; });\n }\n }\n\n //\n // Compatibility layer between Sup1, Sup2 and the Sup interface exposed by Worker.\n //\n /**\n * Get all sandboxes.\n * @type {Sandbox[]}\n */\n get sandboxes () { return this.sandboxInventory.filter((sandbox) => !sandbox.isTerminated); }\n /**\n * Get all working sandboxes.\n * @type {Sandbox[]}\n */\n get workingSandboxes () { return this.sandboxInventory.filter((sandbox) => sandbox.isWorking); }\n /**\n * Get the number of working sandboxes.\n * @type {number}\n */\n get workingSandboxCount () { return this.workingSandboxes.length; }\n /**\n * Get all slices over all jobs..\n * @type {Slice[]}\n */\n get slices () {\n const slices = [];\n this.jobManagerInventory.forEach((jobManager) => { slices.push(...jobManager.sliceInventory); });\n return slices;\n }\n /**\n * Get all queued slices over all jobs..\n * @type {Slice[]}\n */\n get queuedSlices () {\n const slices = [];\n this.jobManagerInventory.forEach((jobManager) => { slices.push(...jobManager.queuedSlices); });\n return slices;\n }\n /**\n * Get all queued slices over all jobs..\n * @type {Slice[]}\n */\n get workingSlices () {\n const slices = [];\n this.jobManagerInventory.forEach((jobManager) => { slices.push(...jobManager.workingSlices); });\n return slices;\n }\n /** @type {opaqueId} */\n get workerId () { return this.options.workerId; }\n /** @type {opaqueId} */\n set workerId (id) { this.options.workerId = id; }\n get version() { return '2.0.0' }\n /**\n * @deprecated\n * @todo XXXpfr Rip out this sup2/sup1 special-casing when we finally kill sup1.\n * @type {boolean}\n */\n get isSupervisor1 () { return false; }\n /**\n * @deprecated\n * @todo XXXpfr Rip out this sup2/sup1 special-casing when we finally kill sup1.\n * @type {boolean}\n */\n get isSupervisor2 () { return true; }\n\n //\n // Miscellaneous properties.\n //\n\n /**\n * Dynamic maxWorkingCores.\n * The maximum number of cores that can be executing slices. Slices are scheduled\n * using density. E.g. suppose a job has GPUDensity is 0 and CPUDensity is 0.5,\n * then 2 slices of this job can be scheduled on a single core.\n * @type {number}\n */\n get maxWorkingCores () { return this.options.cores?.cpu; }\n /**\n * Dynamic maxWorkingGPUs.\n * The maximum number of GPUs that can be executing slices. Slices are scheduled\n * using density. E.g. suppose a job has GPUDensity is 0.5 and CPUDensity is 0.5,\n * then 2 slices of this job can be scheduled on a single GPU core and a single CPU core.\n * @type {number}\n */\n get maxWorkingGPUs () { return this.options.cores?.gpu; }\n /**\n * @deprecated\n * @todo XXXpfr Get rid of this after Sup1 dies.\n */\n get lastDcpsid () { return this.dcp4.lastDcpsid; }\n /**\n * @deprecated\n * @todo XXXpfr Get rid of this after Sup1 dies.\n */\n set lastDcpsid (dcpsid) { this.dcp4.lastDcpsid = dcpsid; }\n /**\n * Indicates whether supervisor is ready for business.\n * @type {boolean}\n */\n get isReady () { return this.worker.working && this.state.is('ready'); }\n /**\n * The # of sandboxes not being used.\n * @type {number}\n */\n get unusedSandboxCount () { return this.options.maxSandboxes - this.workingSliceCount; }\n /**\n * The unused amount of CPU density in the cores.\n * @type {number}\n */\n get unusedCoreSpace () { return this.maxWorkingCores - this.workingSliceDensity; }\n /**\n * The unused amount of GPU density in the cores.\n * Use Math.max(1, this.maxWorkingGPUs) so there's always enough room to schedule\n * a GPU slice when this.workingGPUDensity = 0. In RoundRobinSlices we use the accumulated\n * recent history ( canScheduleGPU(maxWorkingGPUs) ) to check whether the average recent\n * density is within this.maxWorkingGPUs.\n * @type {number}\n */\n get unusedGPUSpace () { return Math.max(1, this.maxWorkingGPUs) - this.workingGPUDensity; }\n /** @type {number} */\n get workingSliceDensity ()\n {\n let density = 0;\n for (const jobMan of this.jobManagerInventory)\n density += jobMan.workingSliceDensity;\n return density;\n }\n /** @type {number} */\n get workingGPUDensity ()\n {\n let density = 0;\n for (const jobMan of this.jobManagerInventory)\n density += jobMan.workingGPUDensity;\n return density;\n }\n /** @type {number} */\n get workingSliceCount ()\n {\n let count = 0;\n for (const jobMan of this.jobManagerInventory)\n count += jobMan.workingSliceCount;\n return count;\n }\n /**\n * Compute the estimated time to completion of all work.\n * The time is measured as if there were only a single slice running at a time.\n * workRemaining is the amount of time until completion.\n * @type {number}\n */\n get workRemaining ()\n {\n let workRemaining = 0;\n for (const jobMan of this.jobManagerInventory)\n workRemaining += jobMan.workRemaining;\n return workRemaining;\n }\n\n // _Idx\n //\n // work, checkCapabilities\n //\n\n /**\n * Set up sandboxes and interval timers, then start to search for work.\n * Called in Worker.start().\n * Initial entry point after Worker constructor.\n * We need to start searching for work here to allow starting and stopping a worker.\n */\n work ()\n {\n const abort = async (error) => {\n // May be in a stopping/stopped state, because dcp-worker was hit with ctrl-C.\n this.setState(['ready', 'stopping', 'stopped', 'reconnecting'], 'broken');\n await this.worker.stop(true);\n throw error;\n };\n /* Provide opportunity for calling code to hook ready/error events. */\n dcp_timers.setImmediate(() => {\n try\n {\n // [ 'initial', 'ready', 'reconnecting', 'stopping', 'stopped', 'paused', 'broken' ]\n if (this.state.isNot('initial'))\n {\n if (this.state.in(['ready', 'stopping', 'reconnecting']))\n {\n this.warning(`Supervisor.work was called when supervisor is already ${this.state.valueOf()}.`, 'Please either wait and try again or restart worker.');\n return;\n }\n else if (this.state.is('broken'))\n {\n this.warning(\"Cannot call Supervisor.work when supervisor is in a 'broken' state. Please restart worker.\");\n return;\n }\n this.state.set(['stopped', 'paused'], 'initial');\n }\n this.evaluator.initialize();\n this.dcp4.instantiateAllConnections();\n\n // Beacon interval timer.\n this.progressReportTimer = dcp_timers.setInterval(() => this.emitProgressReport(), this.options.progressReportInterval);\n // Watchdog: fetchTask-driven interval timer.\n this.watchdogTimer = dcp_timers.setInterval(() => this.fetchTask(), this.options.watchdogInterval);\n\n // Interval timers helps keep workers and localExec alive forever.\n this.progressReportTimer.unref();\n this.watchdogTimer.unref();\n\n if ( false || debugging('supervisor'))\n {\n this.sliceDebuggingTimer = setInterval(() => {\n this.jobManagerInventory.forEach((jobMan) => {\n const { unassigned, ready, reserved, working, workdone, complete, failed, finished } = jobMan.dumpSlices ('RRS', false, false);\n console.debug(`RRS(${jobMan.identifier},${this.unusedSandboxCount},${this.unusedCoreSpace},${this.workingSliceCount},${this.workingSliceDensity}): u/r/rsv/w/wd/c/f/fsh ${unassigned}/${ready}/${reserved}/${working}/${workdone}/${complete}/${failed}/${finished}`, jobMan.identifier, this.sandboxInventory.length);\n });\n }, 30 * 1000);\n if (this.sliceDebuggingTimer.unref)\n this.sliceDebuggingTimer.unref();\n }\n\n this.state.set('initial', 'ready');\n\n // Create 1 sandbox now to get the capabilities which are sent to Task Distributor by fetchTask.\n this.createSandbox()\n .then((sandbox) => {\n this.sandboxInventory.push(sandbox);\n debugging('supervisor') && console.debug('work() after createSandbox', this.sandboxInventory.length, sandbox.identifier, Date.now() - this.lastTime, this.options.watchdogInterval);\n this.fetchTask() // Don't wait for watchdog.\n .catch (async (error) => {\n this.error('work() failed when calling fetchTask', error);\n await abort(error);\n });\n })\n .catch(async (error) => {\n this.error('work() failed when calling createSandbox, exiting...', error);\n await abort(error);\n });\n }\n catch(error)\n {\n this.error('work() failed', error);\n if (this.state.is('initial')) this.state.set('initial', 'broken');\n else if (!this.state.is('broken')) this.setState('ready', 'broken');\n this.worker.stop(true).finally(() => { throw error; });\n }\n });\n }\n\n /** Construct capabilities when necessary. */\n checkCapabilities (sandbox)\n {\n /**\n * Assign the capabilities of one the sandboxes before fetching slices from the scheduler.\n * @todo XXXpfr Re-work this once fetchTask uses the capabilities of every sandbox to fetch slices.\n */\n this.capabilities = sandbox.capabilities;\n if (DCP_ENV.isBrowserPlatform && this.capabilities.browser)\n this.capabilities.browser.chrome = DCP_ENV.isBrowserChrome;\n\n debugging('supervisor') && console.debug('Supervisor.checkCapabilities computed', Date.now() - this.lastTime);\n }\n\n // _Idx\n //\n // safeEmit, workerEmit, jobEmit,\n // error, warning, mungeError, jobDescriptor, setState\n //\n\n /**\n * Safe event emitter.\n * @param {EventEmitter} emitter\n * @param {string} event\n * @param {...any} args\n */\n safeEmit(emitter, event, ...args)\n {\n try\n {\n emitter.emit(event, ...args);\n }\n catch (error)\n {\n this.error(`Event handler for event ${event} threw an exception`, error);\n }\n }\n\n /**\n * Safe event emitter on worker.\n * @param {string} event\n * @param {...any} args\n */\n workerEmit(event, ...args)\n {\n this.safeEmit(this.worker, event, ...args);\n }\n\n /**\n * Safe event emitter on slice.jobHandle.\n * @param {Slice} slice\n * @param {string} event\n * @param {...any} args\n */\n jobEmit(slice, event, ...args)\n {\n this.safeEmit(slice.jobHandle, event, ...args);\n }\n\n /**\n * Error feedback to user.\n * @param {string} message\n * @param {Array<Error>|Error|string} [coreError]\n * @param {string} [additionalInfo]\n * @param {boolean} [supressStack=false]\n */\n error (message, coreError, additionalInfo, supressStack = false)\n {\n const isString = (s) => { return (typeof s === 'string' || s instanceof String); };\n if (coreError instanceof AggregateError)\n coreError = coreError.errors;\n if (Array.isArray(coreError) && coreError.length > 0) // Emit error for every element of array.\n return coreError.flat().forEach((c_err) => this.error(message, c_err, additionalInfo));\n\n debugging('supervisor') && console.debug('Supervisor.error:', message, coreError, additionalInfo);\n if (!message)\n message = 'Supervisor.error called w/o valid message';\n if (additionalInfo)\n {\n if (typeof additionalInfo === 'object')\n // @ts-ignore\n additionalInfo = (additionalInfo instanceof Error) ? additionalInfo.message : JSON.stringify(additionalInfo);\n else if (typeof additionalInfo !== 'string')\n additionalInfo = String(additionalInfo);\n\n if (!isString(additionalInfo))\n additionalInfo = additionalInfo.toString();\n if (!coreError)\n coreError = '';\n else if (!isString(coreError))\n coreError = String(coreError);\n }\n\n let dcpError;\n if (additionalInfo)\n dcpError = new DCPError(message, coreError, additionalInfo, supressStack);\n else if (coreError && (coreError instanceof Error))\n dcpError = new DCPError(message, coreError, '', supressStack);\n else\n dcpError = new DCPError(message, '', '', supressStack);\n\n this.worker.emit('error', dcpError);\n }\n\n /**\n * Warning feedback to user.\n * @param {string[]} messages\n */\n warning (...messages)\n {\n debugging('supervisor') && console.debug('Supervisor.warning:', messages);\n if (messages.length < 1)\n messages = [ 'Supervisor.warning called w/o valid message(s)' ];\n messages.forEach((message) => this.worker.emit('warning', message));\n }\n\n /**\n * @deprecated\n * Create new object and copy the interesting properties from error.\n * Only show the stack for debug builds.\n * If timestamp isn't set, assign new Date().\n * @param {{ message }|string|object} error\n * @param {*} [errorCtor]\n * @returns {string|{ message }}\n */\n __mungeError (error, errorCtor)\n {\n if (typeof error === 'string')\n {\n const errorLines = error.split('\\n');\n return common.displayMaxDiagInfo() ? error : errorLines[0];\n }\n\n if (!error || typeof error !== 'object' || !('message' in error) || Array.isArray(error))\n return error;\n\n if (minimalDiag)\n return error.message;\n\n const errorObj = errorCtor ? new errorCtor(error.message) : { message: error.message };\n\n const props = common.displayMaxDiagInfo()\n ? [ 'type', 'process', 'name', 'origin', 'info', 'code', 'errorCode', 'operation', 'fileName', 'lineNumber', 'timestamp' ]\n : [ 'code', 'errorCode', 'fileName', 'lineNumber', 'timestamp' ]\n const predCopy = (prop) => {\n if (error[prop])\n errorObj[prop] = error[prop];\n };\n\n props.forEach((prop) => { predCopy(prop); });\n\n if (common.displayMaxDiagInfo())\n {\n predCopy('stack');\n if (errorObj['name'] === 'Error')\n delete errorObj['name'];\n }\n if (!errorObj['timestamp'])\n errorObj['timestamp'] = new Date();\n\n return errorObj;\n }\n\n /**\n * Get the job descriptor for the appropriate job manager,\n * which is the object value corresponding to jobAddress, in\n * the object returned by getJobsForTask in task-jobs.js.\n * @param {string} jobAddress\n * @returns {object}\n */\n jobDescriptor (jobAddress)\n {\n const jobManager = this.jobMap[jobAddress];\n if (!jobManager)\n throw new Error(`Cannot find the job descriptor corresponding to jobAddress ${jobAddress}`);\n return jobManager.jobMessage;\n }\n\n /**\n * Protect this.state when transitioning from currState -> nextState\n * It's dangerous to place this.state.set in a catch block with this.error or this.warning\n * because an uncaught exception will kill process before emitting the event-based diagnostic.\n * @param {string|string[]} currState\n * @param {string} nextState\n */\n setState(currState, nextState)\n {\n try { this.state.set(currState, nextState); }\n catch (e) { this.error('Supervisor.state.set error', e); }\n }\n\n // _Idx\n //\n // returnAllSlices, postStopShutdown, abort\n // stopWork, purgeJob\n //\n\n /** @returns {Promise<*>} */\n returnAllSlices ()\n {\n if (selectiveDebug())\n {\n const activeSlices = this.jobManagerInventory.map((jm) => jm.activeSlices).flat();\n if (activeSlices.length > 0)\n this.warning(`Returning active slices : ${stringify(activeSlices.map((slice) => slice.identifier), -1, 2)}`);\n }\n // The promises are all about returning the slices to the scheduler and there's no reason to await that.\n return Promise.all(this.jobManagerInventory.map((jm) => jm.destroy()));\n }\n\n /** @returns {Promise<*>} */\n postStopShutdown ()\n {\n for (const sandbox of this.sandboxInventory)\n sandbox.terminate(false);\n this.sandboxInventory = [];\n\n // There shouldn't be anything in the job managers, but just to be safe call returnAllSlices.\n // Clear jobManagerInventory, close all connections and set state to 'stopped'.\n return this.returnAllSlices()\n .finally(() => {\n // Re-enable is-screen-saver-active logic for the sandbox handle 'end' event handler.\n this.evaluator.pauseSandboxHandleEndHandler = false;\n this.jobManagerInventory = common.InventoryArray('jobManagers');\n return this.dcp4.closeConnections()\n .finally (() => {\n if (this.state.isNot('stopped'))\n this.setState('stopping', 'stopped');\n // This log message assume slices were returned to scheduler in a previous operation, which is the only current use case.\n // If we use this function in a different way in the future, update the log message.\n selectiveDebug() && console.debug(`Supervisor.postStopShutdown(${this.state}): terminated all sandboxes and returned all slices to scheduler...`);\n });\n });\n }\n\n /**\n * Stop the worker immediately and return all unfinished slices.\n * @returns {Promise<*>}\n */\n abort ()\n {\n return this.returnAllSlices()\n .finally (() => {\n return this.postStopShutdown();\n });\n }\n\n /**\n * Terminates sandboxes and returns slices.\n * Sets the working flag to false, call @this.work to start working again.\n *\n * If forceTerminate is true: Terminates all sandboxes and returns all slices.\n * If forceTerminate is false: Terminates non-working sandboxes and returns initial and ready slices.\n *\n * @param {boolean} [forceTerminate = true] - true if you want to stop the sandboxes from completing their current slice.\n * @returns {Promise<*>}\n */\n async stopWork (forceTerminate = true)\n {\n /** @returns {boolean} */\n const doNotWaitForWork = () => {\n return (this.evaluator.reallyDown || !this.sandboxInventory.filter(sbx => !sbx.isTerminated).length);\n }\n selectiveDebug() && console.debug(`Supervisor.stopWork(${forceTerminate}, ${this.state}): terminating sandboxes and returning slices to scheduler.`);\n\n // [ 'initial', 'ready', 'reconnecting', 'stopping', 'stopped', 'paused', 'broken']\n if (this.state.in(['stopping', 'stopped', 'reconnecting']))\n {\n this.warning(`Supervisor.stopWork was called when supervisor is in state ${this.state.valueOf()}.`, 'Please either wait and try again or restart worker.');\n return;\n }\n else if (this.state.is('initial'))\n {\n this.warning('Cannot call stopWork before worker has started. Please either wait and try again or restart worker.');\n return;\n }\n this.state.set(['ready', 'paused', 'broken'], 'stopping');\n\n this.dcp4.instantiateAllConnections();\n\n // Do not enter is-screen-saver-active logic in the sandbox handle 'end' event handler.\n this.evaluator.pauseSandboxHandleEndHandler = true;\n\n if (forceTerminate)\n return this.abort();\n else\n {\n const slicesToReturn = [];\n for (const jm of this.jobManagerInventory)\n slicesToReturn.push(...jm.queuedSlices);\n\n const reason = `stopWork returning all non-finished slices that are not working`;\n this.returnSlices(slicesToReturn, reason);\n\n for (let k = 0; k < 3; k++)\n {\n await new Promise((resolve) => {\n // Count the slices that have been working or close-to-working but haven't submitted results yet.\n let activeSliceCount = 0;\n for (const jm of this.jobManagerInventory)\n activeSliceCount += jm.activeSlices.length;\n // When no active slices we're done.\n if (activeSliceCount === 0)\n resolve();\n // When no work can be completed we return all slices and leave.\n if (doNotWaitForWork())\n {\n this.returnAllSlices();\n resolve();\n }\n selectiveDebug() && console.debug(`StopWork: waiting for ${activeSliceCount} working slices to finish`, k);\n // Resolve and finish stopWork once all sandboxes have finished submitting their results.\n this.worker.on('result', () => {\n selectiveDebug() && console.debug(`StopWork: result handler, activeSliceCount ${activeSliceCount-1}`);\n if (--activeSliceCount === 0)\n {\n this.warning('All sandboxes empty, stopping worker and closing all connections');\n resolve();\n }\n });\n this.on('evalDown', () => {\n this.warning('Evaluator is down.', 'Force return all slices to scheduler, stopping worker and closing all connections.');\n this.returnAllSlices();\n resolve();\n });\n });\n }\n\n for (const jm of this.jobManagerInventory)\n this.safeEmit(jm.jobHandle, 'flush');\n\n if (selectiveDebug())\n {\n console.debug(`stopWork(${this.state.valueOf()}): After waiting for working slices to finish: workingSbxes: ${this.workingSandboxCount}, totalSbxes: ${this.sandboxInventory.length}, jobs: ${this.jobManagerInventory.length}`);\n this.jobManagerInventory.forEach((jm) => {\n console.debug('stopWork job', jm.identifier);\n console.debug(jm.countSliceStr('stopWork'));\n });\n }\n }\n\n return this.postStopShutdown();\n }\n\n /**\n * Purge all traces of the job.\n * @param {JobManager} jobManager\n */\n purgeJob (jobManager)\n {\n selectiveDebug() && console.debug(`Supervisor.purgeJob ${jobManager.identifier}.`);\n // If the slice from a job never completes and the job address exists in the ringBufferofJobs,\n // then we remove it to allow for another slice (from the same job) to be obtained by fetchTask\n this.ringBufferofJobs.buf = this.ringBufferofJobs.filter(element => element !== jobManager.address);\n this.jobManagerInventory.delete(jobManager);\n this.moduleCache.removeJob(jobManager.address);\n this.dbg.cleanUpDeadJob(jobManager.address);\n jobManager.destroy();\n }\n\n // _Idx\n //\n // roundRobinSlices, makeJobSelectionCursor, handleSandboxWorkError, handleFailedSlice\n //\n\n /**\n * Round-robin through the job managers, picking 1 slice to run each time.\n * Try to have the same number of working sandboxes for each job.\n * Try to run a slice on every available sandbox.\n *\n * The basic idea behind the scheduling of slices in this implementation is to keep as\n * many slices from different jobs running as possible, so as to reduce the likelihood\n * of resource contention between sandboxes.\n *\n * Slices are scheduled based on the following ruleset:\n * 1) cursor = makeJobSelectionCursor(), then cursor.next() returns a slice chosen as follows.\n * 2) Let concurrency range from 1 to maxWorkingCores.\n * 3) For a given concurrency, let readyJobs be all jobs such that jobMan.workingSliceDensity < concurrency.\n * 4) Do an ascending sort of readyJobs wrt jobMan.emaTotalTime.\n * 5) Pick a slice from the longest job in readyJobs that doesn't have any executing slices.\n * 6) Alternately shift a slice from readySlices vs choose a slice from a random nearly finished job, and remove slice from readySlices.\n * 7) When there are no more almost finished jobs with slices, shift slices from readyJobs.\n * 8) Jobs which have slicePriority set by the task-distributor may have slices chosen ahead of the above algorithm.\n * 9) Jobs with a slicePriority closer to 1 are more likely to be chosen.\n * 10) After finishing concurrency at maxWorkingCores, cursor.next() returns null, so create a new cursor.\n * @returns {Promise<any>}\n */\n roundRobinSlices ()\n {\n //\n // Should we try to put all runSlice promises in an array and return Promise.all(runslice-promises) ?\n //\n try\n {\n /**\n * The amount of space available for the CPU-component of slices to run in sandboxes.\n * If space is 2.5 and there are 6 slices with density 0.4, and there are enough non-working usable\n * sandboxes, then all 6 slices will be scheduled to run.\n * @type {number}\n */\n const unusedCoreSpace = this.unusedCoreSpace;\n /**\n * The number of sandboxes not currently being used.\n * @type {number}\n */\n const unusedSandboxCount = this.unusedSandboxCount;\n /**\n * The amount of space available for the GPU-component of slices to run in sandboxes.\n * @type {number}\n */\n const unusedGPUSpace = this.unusedGPUSpace;\n if (unusedCoreSpace < common.doNotSchedule || this.roundRobinBarrier || unusedSandboxCount < 1)\n {\n selectiveDebug2() && console.debug('RRS: bail early space/barrier/unusedSlots', unusedCoreSpace, this.roundRobinBarrier, unusedSandboxCount);\n return;\n }\n // roundRobinBarrier is a barrier for the slice execution path.\n this.roundRobinBarrier = true;\n if (this.evaluator.down && this.evaluator.createSandboxRefCount > 0)\n return;\n selectiveDebug2() && console.debug('BarrierState:RRS:', this.fetchTaskBarrier, this.roundRobinBarrier);\n\n if (selectiveDebug2())\n {\n this.jobManagerInventory.forEach((jobMan) => {\n const { unassigned, ready, reserved, working, workdone, complete, failed, finished } = jobMan.dumpSlices ('RRS', false, false);\n console.debug(`RRS(${jobMan.identifier},${unusedSandboxCount},${unusedCoreSpace}): u/r/rsv/w/wd/c/f/fsh ${unassigned}/${ready}/${reserved}/${working}/${workdone}/${complete}/${failed}/${finished}`, jobMan.identifier, this.sandboxInventory.length);\n });\n }\n\n if ( false || selectiveDebug())\n {\n let totalReady = 0, totalReadyDensity = 0;\n for (const jobMan of this.jobManagerInventory)\n {\n const currentReady = jobMan.readySlices.length;\n const currentReadyDensity = jobMan.readySlices.length * jobMan.estimateDensity;\n totalReady += currentReady;\n totalReadyDensity += currentReadyDensity;\n console.debug(`RRS: job ${jobMan.identifier}, density ${jobMan.estimateDensity}, readySlices ${currentReady}, readyDensity ${currentReadyDensity}`);\n }\n console.debug(`RRS: space ${unusedCoreSpace}, unusedSandboxCount ${unusedSandboxCount}, totalReady ${totalReady}, totalReadyDensity ${totalReadyDensity}`);\n }\n\n /** @type {Slice[]} */\n const slices = [];\n /** @type {number} */\n let density = 0;\n /** @type {number} */\n let gpuDensity = 0;\n\n const isSpaceAvailable = (density) => {\n const result = density < unusedCoreSpace && slices.length < unusedSandboxCount;\n selectiveDebug2() && console.debug('RRS: isSpaceAvailable', density < unusedCoreSpace, slices.length < unusedSandboxCount);\n return result;\n }\n\n // When the cursor is almost done and RRS tries to schedule slices,\n // it makes sense to recreate the cursor once to ensure enough slices can be pulled from cursor.\n let recreateCursorCount = 0;\n\n while (isSpaceAvailable(density + common.schedulingSlop))\n {\n // Get existing cursor or create new one.\n if (!this.cursor)\n this.cursor = this.makeJobSelectionCursor();\n\n // Get the next slice, then check to see whether it can be used.\n const slice = this.cursor.next();\n if (!slice)\n {\n if (/*!okToSchedule ||*/ ++recreateCursorCount > 1)\n {\n this.cursor = null;\n break;\n }\n // Start a new cursor.\n this.cursor = this.makeJobSelectionCursor();\n continue;\n }\n let okToSchedule = true\n const job = slice.jobManager;\n density += job.estimateDensity;\n if (job.useGPU)\n {\n okToSchedule = canScheduleGPU(this.maxWorkingGPUs);\n if (okToSchedule)\n {\n gpuDensity += job.estimateGPUDensity;\n okToSchedule = (gpuDensity <= unusedGPUSpace);\n selectiveDebug2() && console.debug(`RRS: GPU scheduling(${okToSchedule},${this.workingSliceCount},${density.toFixed(7)},${unusedCoreSpace.toFixed(7)}): gpuDensity/gpuSpace ${gpuDensity.toFixed(7)}/${unusedGPUSpace.toFixed(7)}, jobGPUDensity/jobCPUDensity ${job.estimateGPUDensity.toFixed(7)}/${job.estimateDensity.toFixed(7)}`);\n }\n }\n if (okToSchedule && density <= unusedCoreSpace + common.schedulingSlop) // Ok, if it's only over by a little bit.\n slices.push(slice);\n else\n {\n slice.unReserve();\n density -= job.estimateDensity;\n if (job.useGPU)\n gpuDensity -= job.estimateGPUDensity;\n else\n this.cursor.push(slice); // If useGPU, then skip pulling a slice from job\n break;\n }\n selectiveDebug2() && console.debug('RRS: density/space/numSlices/unusedSlots/jobDensity', density, unusedCoreSpace, slices.length, unusedSandboxCount, job.estimateDensity);\n }\n\n selectiveSupEx() && density > 0 && console.debug(`roundRobinSlices(${this.workingSliceCount},${this.workingSliceDensity}): Found density ${density.toFixed(7)}/${unusedCoreSpace} with ${slices.length} slices:`, slices.map((slice) => slice.identifier), this.jobManagerInventory.map((jm) => `${jm.identifier}:${jm.estimateDensity.toFixed(7)}:${jm.emaSliceTime.toFixed(0)}`));\n\n // Execute the slices.\n if (slices.length > 0)\n {\n const lastSlice = slices.pop();\n for (const slice of slices)\n slice.jobManager.runSlice(slice);\n return lastSlice.jobManager.runSlice(lastSlice);\n }\n }\n finally\n {\n this.roundRobinBarrier = false;\n }\n }\n\n /**\n * @private\n * @callback cbNext\n * @returns {Slice}\n */\n /**\n * @private\n * @callback cbPush\n * @param {Slice} slice\n */\n\n /**\n * Factory function which instantiates a JobSelectionCursor. A JobSelectionCursor\n * steps the order that job slices should be selected for execution in the supervisor,\n * given the current state of the supervisor and the availability of jobs when the\n * inventory was snapshot. The entire slice scheduling algorithm is represented by\n * this cursor.\n *\n * The basic idea behind the scheduling of slices in this implementation is to keep as\n * many slices from different jobs running as possible, so as to reduce the likelihood\n * of resource contention between sandboxes.\n *\n * Slices are scheduled based on the following ruleset:\n * 1) cursor = makeJobSelectionCursor(), then cursor.next() returns a slice chosen as follows.\n * 2) Let concurrency range from 1 to maxWorkingCores.\n * 3) For a given concurrency, let readyJobs be all jobs such that jobMan.workingSliceDensity < concurrency.\n * 4) Do an ascending sort of readyJobs wrt jobMan.emaTotalTime.\n * 5) Pick a slice from the longest job in readyJobs that doesn't have any executing slices.\n * 6) Alternately shift a slice from readySlices vs choose a slice from a random nearly finished job, and remove slice from readySlices.\n * 7) When there are no more almost finished jobs with slices, shift slices from readyJobs.\n * 8) Jobs which have slicePriority set by the task-distributor may have slices chosen ahead of the above algorithm.\n * 9) Jobs with a slicePriority closer to 1 are more likely to be chosen.\n * 10) After finishing concurrency at maxWorkingCores, cursor.next() returns null, so create a new cursor.\n *\n * A custom selection of jobs can be passed in via the argument jobManagers.\n *\n * @param {JobManager[]} [jobManagers]\n * @returns {{ next: cbNext, push: cbPush }}\n */\n makeJobSelectionCursor (jobManagers)\n {\n /* Variables in this scope function as state information for next() */\n /** @type {JobManager[]} */\n var candidateJobs; // The jobs available with slices ready to execute.\n /** @type {JobManager[]} */\n var readyJobs; // The jobs from which slices are selected for a given concurrency level.\n /** @type {JobManager[]} */\n var preferedJobs = []; // Those jobs in readyJobs with a slicePreference property.\n /** @type {JobManager[]} */\n var lowDensityJobs = []; // Jobs with density <= 0.6, will be scheduled again.\n /** @type {Slice[]} */\n var pendingSlices = [];\n /**\n * Upper bound of the sum of the working slices densities allowed for a given job.\n * type {number}\n **/\n var concurrency = 0;\n /** type {number} */\n var jobIdx = 0;\n /** @type {boolean} */\n var lowDensityPass = false;\n\n const that = this;\n if (!jobManagers)\n jobManagers = this.jobManagerInventory;\n\n const jobStateStr = (jobs) => {\n return jobs.map((jm) => `${jm.identifier} : ${jm.readySlices.length} : ${jm.workingSliceDensity} : ${Math.round(jm.emaTotalTime)}`);\n }\n const jobState = (hdr, jobs) => { console.debug(hdr, jobStateStr(jobs)); }\n\n /**\n * Populate readyJobs with jobs which are ready and have at least one slice which is ready,\n * and whose # of working slice density is less than concurrency. A reserved slice has a\n * finite lifetime and if exceeded, transition it back to ready.\n * @param {JobManager[]} jobManagers\n * @param {number} concurrency\n */\n function filterJobsAndCheckOldReservedSlices (jobManagers, concurrency) // eslint-disable-line no-shadow\n {\n candidateJobs = [], readyJobs = [];\n const fiveMinutesAgo = Date.now() - that.options.reservedSliceLifetime;\n for (const jobMan of jobManagers)\n {\n if (!jobMan.ready) continue;\n let readyCount = 0;\n for (const slice of jobMan.sliceInventory)\n {\n if (slice.isReady) readyCount++;\n else if (slice.isReserved && fiveMinutesAgo > slice.startTime)\n {\n slice.unReserve();\n readyCount++;\n }\n }\n if (readyCount > 0)\n {\n candidateJobs.push(jobMan);\n if (jobMan.workingSliceDensity < concurrency) readyJobs.push(jobMan);\n }\n }\n }\n\n function seed (concurrency) // eslint-disable-line no-shadow\n {\n /* Reset. */\n jobIdx = 0;\n\n /* Populate readyJobs with jobs which are ready and have at least one slice which is ready,\n and whose # of working slice density is less than concurrency. */\n filterJobsAndCheckOldReservedSlices(jobManagers, concurrency);\n // candidateJobs = jobManagers.filter((jobMan) => jobMan.readySlices.length > 0);\n // readyJobs = candidateJobs.filter((jobMan) => jobMan.workingSliceDensity < concurrency);\n\n if (!lowDensityPass && lowDensityJobs.length === 0)\n lowDensityJobs = jobManagers.filter((jm) => jm.estimateDensity > 0 && jm.estimateDensity <= 0.6);\n\n if (readyJobs.length > 1)\n {\n /* Asc sort by shortest average slice completion time. */\n const shortestSliceJobs = readyJobs.sort((a, b) => Math.round(a.emaTotalTime) - Math.round(b.emaTotalTime));\n const almostDoneIndices = shortestSliceJobs.filter((jm) => jm.almostDone).map((_, idx) => idx);\n readyJobs = [];\n\n /* Find longest job that isn't working. */\n for (let k = shortestSliceJobs.length - 1; k >= 0; k--)\n {\n const jobMan = shortestSliceJobs[k];\n if (jobMan.isNotWorking)\n {\n readyJobs.push(jobMan);\n shortestSliceJobs.splice(k, 1);\n break;\n }\n }\n\n /* Alternate the next shortest slice with a random almost done job. */\n if (almostDoneIndices.length > 0)\n {\n while (shortestSliceJobs.length > 0)\n {\n readyJobs.push(shortestSliceJobs.shift());\n if (almostDoneIndices.length < 1)\n break;\n else\n {\n const almostDoneIdx = almostDoneIndices[Math.floor(Math.random() * almostDoneIndices.length)];\n readyJobs.push(shortestSliceJobs[almostDoneIdx]);\n shortestSliceJobs.splice(almostDoneIdx, 1);\n }\n }\n }\n if (shortestSliceJobs.length > 0)\n readyJobs.push(...shortestSliceJobs);\n }\n /* Populate preferedJobs with jobs from readyJobs which also have a slicePreference set. */\n preferedJobs = candidateJobs.filter((jm) => jm.hasOwnProperty('slicePreference'));\n selectiveDebug2() && jobState(`makeJobSelectionCursor:seed(${concurrency}): readyJobs:`, readyJobs);\n }\n\n /**\n * Each invocation of next() identifies one slice to run, or returns false if none can run.\n * @returns {Slice}\n */\n function next ()\n {\n if (pendingSlices.length > 0)\n {\n const slice = pendingSlices.pop();\n return slice.markAsReserved();\n }\n\n if (concurrency === 0)\n seed(++concurrency);\n\n selectiveDebug2() && console.debug(`makeJobSelectionCursor(cc/idx/ready/working):next(${concurrency},${jobIdx},${readyJobs.length},${that.workingSliceDensity}): maxWorkingCores ${that.maxWorkingCores}: begin`);\n while (true)\n {\n if (jobIdx >= readyJobs.length)\n {\n if (++concurrency > that.maxWorkingCores)\n break;\n seed(concurrency);\n }\n\n if (readyJobs.length < 1)\n {\n if (candidateJobs.length < 1)\n break;\n continue; /* No ready jobs at current concurrency level. */\n }\n\n selectiveDebug2() && console.debug(`makeJobSelectionCursor:next(${concurrency},${jobIdx},${readyJobs.length},${that.workingSliceDensity}): before loop`);\n\n /* Schedule a prefered job slice based on random chance. */\n if (preferedJobs.length > 0)\n {\n let prioRan = Math.random();\n let list = preferedJobs.filter((jm) => jm['slicePreference'] >= prioRan);\n\n if (list.length > 0)\n {\n const jobMan = list[list.length * Math.random()];\n const slice = jobMan.reserveOneSlice();\n if (slice)\n return slice;\n }\n }\n\n /* Schedule a slice from next job; jobs are in increasing order of estimated run time. */\n while (jobIdx < readyJobs.length)\n {\n const jobMan = readyJobs[jobIdx];\n const slice = jobMan.reserveOneSlice();\n if ( false || selectiveDebug2())\n {\n slice && console.debug(`makeJobSelectionCursor:next(${concurrency},${lowDensityPass},${jobIdx},${readyJobs.length},${that.workingSliceDensity}): found slice(slice:ready:working)`, `${slice.identifier} : ${slice.jobManager.readySlices.length} : ${slice.jobManager.workingSliceDensity}`);\n !slice && console.debug(`makeJobSelectionCursor:next(${concurrency},${lowDensityPass},${jobIdx},${readyJobs.length},${that.workingSliceDensity}): no slices ready for job`, jobMan.identifier);\n }\n jobIdx++;\n if (slice)\n return slice;\n }\n\n /*\n * We did not schedule a slice with current seed. We need to re-seed to look for newly-available work\n * and sandboxes, ratcheting up the concurrency (max # of each job running) until we find something.\n */\n selectiveDebug2() && console.debug(`makeJobSelectionCursor:next(${concurrency},${jobIdx},${readyJobs.length},${that.workingSliceDensity}): after loop`);\n }\n if (!lowDensityPass && lowDensityJobs.length > 0)\n {\n jobManagers = lowDensityJobs;\n concurrency = 0;\n lowDensityPass = true;\n return next();\n }\n selectiveDebug2() && console.debug(`makeJobSelectionCursor:next(${concurrency},${jobIdx},${readyJobs.length},${that.workingSliceDensity}): null`, lowDensityPass);\n return null; /* Did not find any more work that fits. */\n }\n function push (slice) { pendingSlices.push(slice); }\n\n return { next, push };\n }\n\n\n /**\n * Handle sandbox.work(...) errors.\n * @todo The orginal code from 2019 did not terminate sandbox when not SandboxError and Sandbox code didn't already terminate. Do we want to try that?\n * The old 2019 sandbox code terminated upon error in start, assign, resetState, describe, applyRequirements and work.\n * So maybe that 2019 terminate yoga was a bunch of hooie.\n * @param {Sandbox} sandbox\n * @param {Slice} slice\n * @param {Error} error\n * @returns {string}\n */\n handleSandboxWorkError (sandbox, slice, error)\n {\n if (debugBuild && !(slice.isWorking || slice.isWorkDone)) // Sanity. Exception should never fire.\n throw new Error(`handleSandboxWorkError: slice ${slice.identifier} must be WORKING.`);\n\n /** @type {boolean} */\n const isSandboxError = error instanceof SandboxError;\n /** @type {string} */\n let reason;\n const jobAddress = common.truncateAddress(slice.jobAddress);\n\n if (isSandboxError)\n reason = error['errorCode']\n else\n {\n // This error was unrelated to the work being done.\n reason = 'Slice has failed to complete execution';\n if (!error)\n error = new Error(`Slice ${slice.sliceNumber} in state ${slice.state} of job ${jobAddress} failed to complete execution`);\n }\n selectiveDebug() && console.debug('handleSandboxWorkError', slice.identifier, error);\n\n let errorString, onlyDisplayErrorString = true;\n if (error.name === 'EWORKREJECT')\n {\n reason = 'EWORKREJECT'; // The status.js processing does not have a case for 'EWORKREJECT'.\n errorString = !slice.hasBeenRejected\n ? `Slice rejected work: ${error.message}.`\n : `Slice rejected work twice; terminate job: ${error.message}.`\n error.stack = 'Sandbox was terminated by work.reject()';\n this.handleWorkReject(slice, error);\n }\n else\n {\n if (!this.evaluator.down)\n {\n /** Do we to be more selective when we retry a slice? */\n if (/*!isSandboxError ||*/ slice['useRetryLogic'])\n {\n slice['sandboxErrorCount'] = ( slice['sandboxErrorCount'] ?? 0) + 1;\n if (slice['sandboxErrorCount'] <= this.options.maxSliceRetries)\n slice.resetState(); // Try to reuse the slice.\n }\n }\n if (!slice.isReady)\n {\n selectiveDebug() && console.debug(`handleSandboxWorkError: returning slice ${slice.identifier}`);\n this.returnSlice(slice, reason)\n .finally (() => {\n this.handleFailedSlice(slice, error)\n });\n }\n\n switch (reason)\n {\n case 'ENOPROGRESS':\n errorString = 'No progress error in sandbox.';\n break;\n case 'ESLICETOOSLOW':\n errorString = 'Slice too slow error in sandbox.';\n break;\n case 'EPERM_ORIGIN':\n errorString = `Could not fetch data; origin not allowed: ${error.message}.`;\n break;\n case 'EFETCH':\n errorString = `Could not fetch data: ${error.message}.`;\n break;\n case 'EUNCAUGHT':\n onlyDisplayErrorString = false;\n errorString = `Uncaught error in sandbox: ${error.message}.`;\n break;\n default:\n onlyDisplayErrorString = false;\n errorString = `Slice failed in sandbox: ${error.message}.`;\n break;\n }\n }\n\n // Always terminate sandbox.\n this.returnSandbox(sandbox);\n\n // Always display max info under debug builds, otherwise maximal error.\n // messages are displayed to the worker, only if both worker and client agree.\n const displayMaxInfo = slice.jobManager.displayMaxDiagInfo;\n\n const errorObject = {\n jobAddress,\n sliceNumber: slice.sliceNumber,\n sandbox: sandbox.id,\n jobName: sandbox.public ? sandbox.public.name : 'unnamed',\n };\n\n if (!displayMaxInfo && onlyDisplayErrorString)\n this.error(errorString, '', '', true);\n else\n {\n Object.entries(errorObject).forEach(([k,v]) => (errorString += `\\n ${k}: ${v}`));\n this.error(errorString, error, '', true);\n }\n\n return reason;\n }\n\n /**\n * Slice has thrown error during execution:\n * Mark slice as failed, compensate when job is dicrete, emit events.\n * @param {Slice} slice\n * @param {Error} error\n */\n handleFailedSlice (slice, error)\n {\n debugging('supervisor') && console.debug(`handleFailedSlice: ${slice.identifier}`, error);\n slice.collectResult(error, false /*success*/);\n\n // If the slice from a job never completes and the job address exists in the ringBufferofJobs,\n // then we remove it to allow for another slice (from the same job) to be obtained by fetchTask\n this.ringBufferofJobs.buf = this.ringBufferofJobs.filter(element => element !== slice.jobAddress);\n\n this.workerEmit( 'result', error);\n this.jobEmit(slice, 'result', error);\n }\n\n // _Idx\n //\n // returnSlices, returnSlice, emitProgressReport\n //\n\n /**\n * Bulk-return multiple slices, possibly for assorted jobs.\n * Returns slices to the scheduler to be redistributed.\n * Called in the sandbox terminate handler and purgeAllWork(jobAddress)\n * and stopWork(forceTerminate).\n *\n * @param {Slice[]} slices - The slice candidates to check if they can be returned to the scheduler.\n * @param {string} reason - Reason for the return: 'ENOPROGRESS', 'EUNCAUGHT', 'ESLICETOOSLOW', 'EPERM_ORIGIN', 'EFETCH', 'unknown'.\n * @returns {Promise<*>} - Response from the scheduler.\n */\n returnSlices (slices, reason)\n {\n /** @param {Slice[]} verifiedSlices */\n const compressPayload = (verifiedSlices) => {\n assert(verifiedSlices?.length > 0);\n if (verifiedSlices.length > 1)\n return {\n worker: this.workerId,\n slices: common.constructReturnSliceBuckets(verifiedSlices, reason),\n };\n return verifiedSlices[0].getReturnMessagePayload(this.workerId, reason);\n }\n\n if (!slices || !slices.length)\n return Promise.resolve();\n\n debugging('supervisor') && console.debug(`Supervisor.returnSlices(${this.state}): Returning slices`, slices.map(slice => slice.identifier));\n\n // Only return those slices which still exist in their respective jobManagers sliceInventory .\n const verifiedSlices = slices.filter((slice) => slice.jobManager.removeSlice(slice));\n if (verifiedSlices.length > 0)\n {\n selectiveSupEx() && console.debug('Supervisor.returnSlices: Returning slices', verifiedSlices.map(slice => slice.identifier));\n return this.dcp4.sliceReturn(compressPayload(verifiedSlices), slices, reason);\n }\n return Promise.resolve();\n }\n\n /**\n * Takes a slice and returns it to the scheduler to be redistributed.\n * Usually called when an exception is thrown by sandbox.work(...) .\n * Or when the supervisor tells it to forcibly stop working.\n *\n * @param {Slice} slice - The slice to return to the scheduler.\n * @param {string} reason - Reason for the return: ''ENOPROGRESS', 'EUNCAUGHT', 'ESLICETOOSLOW', 'EPERM_ORIGIN', 'EFETCH', 'unknown'.\n * @returns {Promise<*>} - Response from the scheduler.\n */\n returnSlice (slice, reason) { return this.returnSlices([ slice ], reason); }\n\n /**\n * Send beacon to status.js for 'progress' and sliceStatus.scheduled.\n *\n * Run in an interval created in the ctor.\n * @returns {void|Promise<*>}\n */\n emitProgressReport ()\n {\n const readySlices = [], workingSlices = [];\n this.jobManagerInventory.forEach((jobManager) => {\n readySlices.push(...jobManager.readySlices);\n workingSlices.push(...jobManager.workingSlices);\n });\n /** @type {SliceObj[]} */\n const slices = common.constructSliceBuckets( readySlices, sliceStatus.scheduled );\n common.constructSliceBuckets( workingSlices, 'progress', slices );\n\n debugging('supervisor') && console.debug('emitProgressReport:', stringify(slices));\n\n if (slices.length > 0)\n {\n const payload = { worker: this.workerId, slices };\n return this.dcp4.safeRSStatus(payload, 'Failed to emit progress report');\n }\n }\n\n // _Idx\n //\n // jobQuanta, repoMan, predictLoad(viz., clairvoyance), waitUntilWorkIsReady, generateWorkerComputeGroups\n //\n\n /**\n * For a given job, the scheduler stores an EMA approximation of slice completion time.\n * However, each worker also tracks the same information and the ratio of local-info to\n * scheduler-info (viz., global-info) is returned by this.jobQuanta so we can tell the\n * task distributor how much work to return from fetchTask so that the work actually takes\n * 5 minutes to complete when using all the worker sandboxes.\n * @returns {Object<string, number>}\n */\n jobQuanta ()\n {\n //\n // Prevent wild swings of this.defaultQuanta, which is roughly the ratio of\n // local_worker_slice_time / jobPerfData_measured_slice_time.\n // We limit this ratio to be between 1/8th and 8.\n const minQuanta = 0.125, maxQuanta = 8.0;\n //\n // Because there will be slgiht differences between local_worker_slice_time and\n // jobPerfData_measured_slice_time even when the worker is the only worker hooked\n // up to the DCP scheduler, we prove a little rounding. The rounding is 1/32 buckets.\n const discreteIncrement = 0.03125, discreteIncrementInverse = 32;\n\n /** @type {Object<string, number>} */\n const quanta = { 0: 1 };\n let averageLocalTime = 0, averageGlobalTime = 0;\n for (const jobMan of this.jobManagerInventory)\n {\n if (jobMan.emaSliceTime > 0 && jobMan.globalTime > 0)\n {\n quanta[jobMan.address] = jobMan.emaSliceTime;\n /** XXXpfr @todo Should we be using TotalTime here? */\n averageLocalTime += jobMan.emaSliceTime;\n averageGlobalTime += jobMan.globalTime;\n selectiveDebug2() && console.debug('jobQuanta: job state', this.dbg.sliceSandboxStr, `l-density/g-density, ${jobMan.estimateDensity}/${jobMan.metrics?.sliceCPUDensity}`, `local/global, ${jobMan.emaSliceTime}/${jobMan.globalTime}`);\n }\n }\n\n if (averageLocalTime && averageGlobalTime)\n {\n /** @todo XXXpfr Add 1 stddev? */\n // alpha=0.1 gives an effective period of 19\n const alpha = 0.1;\n this.localTime = nextEma(this.localTime, averageLocalTime, alpha);\n this.globalTime = nextEma(this.globalTime, averageGlobalTime, alpha);\n this.defaultQuanta = this.localTime / this.globalTime;\n\n // Discretize by discreteIncrement increments.\n this.defaultQuanta = (this.defaultQuanta > 1\n ? Math.floor(discreteIncrementInverse * this.defaultQuanta)\n : Math.ceil(discreteIncrementInverse * this.defaultQuanta)) * discreteIncrement;\n\n // Enforce reasonable cap and floor to keep things from getting too crazy.\n this.defaultQuanta = Math.min(Math.max(this.defaultQuanta, minQuanta), maxQuanta);\n\n // Fake jobAddress '0' to represent unknown jobs.\n quanta['0'] = this.defaultQuanta;\n }\n else\n this.defaultQuanta = 1.0;\n\n selectiveDebug() && console.debug(`jobQuanta: defaultQuanta ${quanta['0']}, this.localTime ${this.localTime}/${averageLocalTime}, this.globalTime ${this.globalTime}/${averageGlobalTime}, quanta:`, quanta);\n if (common.debugQuanta())\n {\n console.debug('localRawData:', this.dbg.localRawData);\n console.debug('localData:', this.dbg.localData);\n console.debug('globalData:', this.dbg.globalData);\n }\n return quanta;\n }\n\n /**\n * @todo XXXpfr Should we not schedule long slices to a worker with too low defaultQuanta?\n *\n * When the estimated time to completion of all work is more than\n * repoManMultiplier * targetTaskDuration * this.maxWorkingCores,\n * return slices until the excess is removed.\n * Be fair. Round-robin over all jobs until excess is eliminated.\n * Kill the long jobs 1st.\n */\n repoMan()\n {\n const threshold = this.options.repoManMultiplier * this.options.targetTaskDuration * this.maxWorkingCores;\n const workRemaining = this.workRemaining;\n let excess = workRemaining - threshold;\n selectiveDebug() && console.debug(`repoMan: excess ${excess}, workerRemaining ${workRemaining}, threshold ${threshold}`);\n if (excess > 0)\n {\n const slices = [];\n /** @param {JobManager[]} jmi */\n const returnFrom = (jmi) => {\n while (true)\n {\n const _excess = excess;\n for (const jobMan of jmi)\n {\n const _readySlices = jobMan.readySlices;\n if (_readySlices.length > 0)\n {\n const slice = _readySlices[0];\n slice.repoMan(); // Mark as FINISHED\n slices.push(slice);\n excess -= jobMan.adjSliceTime;\n if (excess <= 0)\n break;\n }\n }\n if (_excess === excess || excess <= 0)\n break;\n }\n }\n // Be fair. Round-robin over all jobs until excess is eliminated.\n // Except the long jobs are killed 1st.\n const longJobs = this.jobManagerInventory.filter((jobMan) => jobMan.emaSliceTime >= this.options.targetTaskDuration);\n if (longJobs.length > 0)\n returnFrom(longJobs);\n if (excess > 0)\n returnFrom(this.jobManagerInventory);\n selectiveDebug() && (slices.length > 0) && console.debug(`repoMan: excess ${excess}, workerRemaining ${workRemaining}, threshold ${threshold}, returned-slice-count ${slices.length}`);\n this.returnSlices(slices, 'repoMan');\n }\n }\n\n /**\n * Predict the total reduction in density of working sandboxes timeSpanMs from now.\n * This function is called right before fetchTask, in order to calculate how much space is available.\n * @returns {{ queued: Slice[], working: number }}\n */\n predictLoad()\n {\n const timeSpanMs = this.options.prefetchInterval\n const queued = [];\n let working = 0;\n for (const jobMan of this.jobManagerInventory)\n {\n const { queued: jmQueued, working: jmWorking } = jobMan.predictLoad(timeSpanMs);\n queued.push(...jmQueued);\n // Optimize to short-circuit when queued > 1, because we won't call fetchWork in that case.\n if (queued.length > 1)\n break;\n working += jmWorking;\n }\n selectiveDebug() && console.debug(`Supervisor.predictLoad: queued ${queued.length}/${this.queuedSlices.length}, working ${working}/${this.workingSlices.length}`)\n return { queued, working };\n }\n\n /**\n * On the first call to fetchTask\n * or when the last call to fetchTask found nothing,\n * or when there are no ready slices,\n * wait until at least 1 job is ready with at least 1 ready slice.\n * @param {Array<Promise<any>>} jobManagerPromises\n * @returns {Promise<any>}\n */\n waitUntilWorkIsReady (jobManagerPromises)\n {\n if (this.waitForWork)\n {\n debugging('supervisor') && console.debug(`waitUntilWorkIsReady: promise count ${jobManagerPromises?.length}`);\n this.waitForWork = false;\n // Promise.any is supported in Node 15, Chrome 85, Edge 85, Firefox 79, Safari 14, Opera 71.\n // It was implemented in node and browsers in 2nd half of 2020, so there's a good chance many\n // customers will not have browsers that support it. And currently (Jan. 2023) DCP uses node 14.\n return Promise_any(jobManagerPromises);\n }\n // Flush microtask queue\n return a$sleepMs(0);\n }\n\n /**\n * Generate the workerComputeGroups property of the requestTask message.\n *\n * Concatenate the compute groups object from dcpConfig with the list of compute groups\n * from the supervisor, and remove the public group if accidentally present. Finally,\n * we transform joinSecrets/joinHash into joinHashHash for secure transmission.\n *\n * @note computeGroup objects with joinSecrets are mutated to record their hashes. This\n * affects the supervisor options and dcpConfig. Re-adding a joinSecret property\n * to one of these will cause the hash to be recomputed.\n */\n generateWorkerComputeGroups ()\n {\n return supShared.generateWorkerComputeGroups(this, this.dcp4.taskDistributor);\n }\n\n // _Idx\n //\n // availableSandboxSpace, fetchTask, addTaskToWorkload, fetchFromTD, clearUnusedJobManagersAndModuleCache\n //\n\n /**\n * Returns the number of unused sandbox slots to fill -- sent to fetchTask.\n * @param {Slice[]} queued\n * @param {number} working\n * @returns {number}\n */\n availableSandboxSpace (queued, working)\n {\n // If we find more than 1 queued slices, bail early.\n if (queued.length > 1)\n return 0; // We have more than 1 ready slices, no need to fetch.\n\n let longSliceCount = 0;\n if (queued.length < 1)\n this.waitForWork = true; // There are no ready slices.\n else if (queued[0].isLong)\n longSliceCount = 1;\n\n // There are almost no ready slices (there may be 0 or 1), fetch a full task.\n // The task is full, in the sense that it will contain slices whose\n // aggregate execution time is roughly this.maxWorkingCores * 5-minutes.\n // However, there can only be this.maxWorkingCores # of long slices on a worker,\n // Thus we need to know whether the last slice in this.readySlices() is long or not.\n // (A long slice has estimated execution time >= 5-minutes or is an estimation slice.)\n\n const numCores = this.maxWorkingCores - working - longSliceCount;\n selectiveDebug2() && console.debug('availableSandboxSpace', numCores, working, longSliceCount);\n return numCores;\n }\n\n /**\n * Ask the scheduler (task distributor) for work (Rq).\n * @param {object[]} [jobs=[]]\n * @returns {Promise<*>}\n */\n async fetchTask (jobs = [])\n {\n if (!this.isReady)\n return;\n\n const now = Date.now();\n const { queued, working } = this.predictLoad();\n const unusedFutureCoreSpace = this.maxWorkingCores - working;\n if (unusedFutureCoreSpace < common.doNotSchedule)\n {\n debugging('supervisor') && console.debug('fetchTask: There are no unused sandbox slots.', now - this.lastTime);\n return;\n }\n \n // Record fetch start time.\n this.fetchTaskStarted = now;\n\n // We check for pruning about every 25 seconds, or when must prune level is reached.\n if (this.sandboxInventory.length > this.options.mustPruneSandboxLevel\n || now > this.lastPrune + this.options.pruneFrequency)\n {\n this.lastPrune = now;\n this.pruneSandboxes();\n }\n\n // Every 60 seconds check to see if the estimated time to completion of all work is more than\n // repoManMultiplier * this.targetTaskDuration() * this.maxWorkingCores,\n // and then return slices until the excess is removed.\n // Be fair. Round-robin over all jobs until excess is eliminated. Kill the long jobs 1st.\n if (now > this.lastRepoMan + this.options.repoManFrequency)\n {\n this.lastRepoMan = now;\n this.repoMan();\n }\n\n // There are 2 barriers wrt fetchTask,\n // 1) fetchTaskBarrier is a barrier for the task fetching from task distributor path.\n // 2) roundRobinBarrier is a barrier for the slice execution path.\n\n try\n {\n const cpuSpaceToFill = this.availableSandboxSpace(queued, working);\n selectiveDebug2() && console.debug('Supervisor.fetchTask', cpuSpaceToFill, queued.length, working);\n if (cpuSpaceToFill < 1)\n {\n debugging('supervisor') && console.debug('Supervisor.fetchTask: Sufficient slices exist, so start executing.', now - this.lastTime, cpuSpaceToFill, queued.length, working);\n return this.roundRobinSlices();\n }\n selectiveDebug2() && console.debug('fetchTask begin q/w/slots/space/future-space', queued.length, working, this.unusedSandboxCount, this.unusedCoreSpace, unusedFutureCoreSpace);\n\n if (this.fetchTaskBarrier)\n return;\n // fetchTaskBarrier is a barrier for the task fetching from task distributor path.\n this.fetchTaskBarrier = true;\n\n /* @todo XXXpfr Think about how to do targetLoad.longSlices better.\n * Ideas:\n * 1) While branchy Javascript is CPU bound, that doesn't mean hyperthreading isn't useful.\n * When a branch is mispredicted the whole CPU instruction pipeline is flushed, which is\n * a huge perf hit and while waiting to fill the pipeline again, a hyperthread can get\n * a whole bunch of work done.\n * 2) Setting the Sup2.cores.cpu to #lCores is probably too much, but I've had great success with\n * Sup2.cores.cpu = dcpConfig.supervisor.tuning.coreRatio.cpu * #lCores\n * Which is very close to optimal throughput of work done.\n * 3) When the scheduler is 1/2 very long slices, the short slices will tend to get starved.\n * The config property dcpConfig.scheduler.preventSliceStarvation when set to true (default false)\n * will always leave one vCore open for short slices in every worker. In the future, I want the\n * scheduler to detect short slice starvation and dynamical turn preventSliceStarvation on until\n * short slice starvation is alleviated and then turn it back off.\n * 4) maxSandboxes is currently set to\n * factor * Sup2.cores.cpu\n * where 1.2 <= factor <= 1.5 depending upon how many lCores a worker has. Where I assume that a\n * machine with a large number of lCores has sufficient memory to handle a bigger factor.\n * The factor boundaries can be adjusted in dcpConfig.supervisor, but I intend to also allow them\n * to be overridden at the dcpConfig.worker level, so if somebody has a 32 lCore machine with\n * 8GB of RAM they can adjust factor to be closer to 1. Ideally we could adjust the factor\n * boundaries at the job/CG level.\n */\n\n const request = {\n supervisor: this.version,\n numCores: cpuSpaceToFill, /** @deprecated This is for legacy schedulers. */\n numGPUs: this.maxWorkingGPUs, /** @deprecated This is for legacy schedulers. */\n targetLoad: { cpu: cpuSpaceToFill, gpu: this.maxWorkingGPUs, longSlices: Math.floor(cpuSpaceToFill) },\n coreStats: this.options.getStatisticsCPU(),\n jobQuanta: this.jobQuanta(),\n capabilities: this.capabilities,\n paymentAddress: this.options.paymentAddress,\n jobAddresses: jobs.concat(this.options.jobAddresses || []), // When set, only fetches slices for these jobs.\n workerComputeGroups: this.generateWorkerComputeGroups(),\n minimumWage: this.options.minimumWage,\n loadedJobs: this.jobManagerInventory.map(jobMan => jobMan.address),\n readyJobs: this.jobManagerInventory.filter(jobMan => jobMan.ready).map(jobMan => jobMan.address),\n previouslyWorkedJobs: this.ringBufferofJobs.buf, // Only discrete jobs.\n rejectedJobs: this.rejectedJobs,\n };\n // Workers should be part of the public compute group by default.\n if (!booley(this.options.leavePublicGroup))\n request.workerComputeGroups.push(constants.computeGroups.public);\n\n debugging('supervisor') && console.debug('fetchTask is calling fetchFromTD', Date.now() - this.lastTime);\n\n // Call Task Distributor and handle response with this.addTaskToWorkload.\n return this.fetchFromTD(request, (response) => this.addTaskToWorkload(request, response));\n }\n catch (error)\n {\n this.fetchTaskBarrier = false;\n this.error('Supervisor.fetchTask failed!', error);\n }\n }\n\n /**\n * Callback for fetchFromTD.\n * @param {object} request\n * @param {object} response\n */\n async addTaskToWorkload (request, response)\n {\n const constructFetchHandle = (size, jobs, slices) => {\n return { \n fetchStart: this.fetchTaskStarted,\n fetchEnd: Date.now(),\n fetchSize: size,\n jobs,\n slices,\n };\n };\n\n try\n {\n /** @type {TDPayload} */\n const payload = response.payload;\n if (!response.success)\n {\n debugging() && console.debug('Task fetch failure; request=', request);\n debugging() && console.debug('Task fetch failure; response=', payload);\n this.error(`Unable to request task from scheduler; will try again on a new connection: payload ${stringify(payload)}`);\n return;\n }\n\n if (!payload.body?.newJobs) // No slices found.\n {\n // Reset first fetch logic.\n this.waitForWork = true;\n /**\n * The 'fetch' event fires when the stask distributor found no work.\n * @link https://gitlab.com/Distributed-Compute-Protocol/dcp-docs-wes/-/blob/wip/worker/worker-events.md\n * @event Supervisor#fetch\n */\n this.workerEmit('fetch', constructFetchHandle(0, {}, {}));\n // There may be an extra slice to process.\n debugging('supervisor') && console.debug('Task distributor found no slices...');\n return this.roundRobinSlices();\n }\n\n /** @todo XXXpfr At this poin the line #'s are short by 42 -- figure out why. */\n\n /*\n * payload: { TDPayload }\n * TDPayload: { owner: Address, signature: Signature, auth: Auth, body: Body };\n * Auth: { workerId: string, authSlices: Object<string, SliceMessage[]>, schedulerId: { address: Address }, jobCommissions: Object<string, { rate: number, account: number }> }\n * Body: { newJobs: Object<string, object>, task: Object<string, SliceMessage[]>, computeGroupJobs: Object<string, string[]>, computeGroupOrigins: Object<string, Object<string, string[]>>, schedulerConfig: {{ targetTaskDuration: number }} }\n *\n * NOTE: authorizationMessage has type AuthMessage\n */\n\n const { body, ...authorizationMessage } = payload;\n const { newJobs, task, schedulerConfig } = body;\n const newJobKeys = Object.keys(newJobs);\n const jobCount = newJobKeys.length;\n\n let jobSliceMap = task;\n if (jobSliceMap.length) /** @deprecated Task came from legacy scheduler */\n // @ts-ignore\n jobSliceMap = toJobMap(task, sliceMsg => sliceMsg);\n\n if (schedulerConfig) // Otherwise the default is 300 seconds.\n this.options.targetTaskDuration = schedulerConfig.targetTaskDuration;\n\n /*\n * Ensure all jobs received from the scheduler (task distributor) are:\n * 1. If we have specified specific jobs the worker may work on, the received jobs are in the specified job list\n * 2. If we are in localExec, at most 1 unique job type was received (since localExec workers are designated for only one job)\n * If the received jobs are not within these parameters, stop the worker since the scheduler cannot be trusted at that point.\n */\n if (request.jobAddresses?.length > 0 && !newJobKeys.every((ele) => request.jobAddresses.includes(ele)))\n {\n // \"fetchTask:\" because that should make sense to somebody that doesn't know the internals of Supervisor.\n this.error(\"fetchTask: Worker received slices it shouldn't have; rejecting the work and stopping.\");\n this.stopWork(true);\n return;\n }\n\n // Clear out job managers w/o any queued slices,\n // and remove corresponding job references from module cache.\n // When a cached module no longer has any job references it is removed from the cache.\n this.clearUnusedJobManagersAndModuleCache(newJobs);\n\n /** @todo XXXpfr Figure out how not to construct this every time. */\n this.jobMap = {};\n this.jobManagerInventory.forEach(jobManager => {\n this.jobMap[jobManager.address] = jobManager;\n });\n\n selectiveDebug2() && console.debug(`addTaskToWorkload(${Date.now() - this.lastTime}): newJobs ${common.truncateAddress(newJobKeys)}, jobSliceMap ${common.compressJobMap(jobSliceMap, (s) => s.sliceNumber)}`);\n\n let sliceCount = 0;\n /** @type {Array<Promise<*>>} */\n const jobManagerPromises = [], jobs = {}, slices = {};\n // Populate the job managers with slices, creating new job managers when necessary.\n // Set up discrete job ring buffer.\n for (const [jobAddress, jobMessage] of Object.entries(newJobs))\n {\n /** @type {JobManager} */\n let jobManager;\n const sliceMessages = jobSliceMap[jobAddress];\n sliceCount += sliceMessages.length;\n\n if (this.jobMap.hasOwnProperty(jobAddress))\n {\n jobManager = this.jobMap[jobAddress];\n jobManager.update(jobMessage, sliceMessages, authorizationMessage);\n }\n else\n {\n // Add the slice messages to the job manager ctor, so that slice construction is after job manager is ready.\n jobManager = new JobManager(this, jobMessage, sliceMessages, authorizationMessage);\n this.jobMap[jobAddress] = jobManager;\n this.jobManagerInventory.push(jobManager);\n\n // Populate the ring buffer based on job's discrete property.\n if (jobMessage.requirements.discrete && this.ringBufferofJobs.find(address => address === jobAddress) === undefined)\n this.ringBufferofJobs.push(jobAddress);\n }\n jobs[jobAddress] = jobManager.jobHandle;\n slices[jobAddress] = task[jobAddress].length;\n\n jobManagerPromises.push(jobManager.jobPromise);\n }\n\n const payloadLength = kvin.stringify(payload).length; /** @TODO - fix per DCP-3750 */\n /**\n * The 'fetch' event fires when the supervisor has found work from the task distributor.\n * @link https://gitlab.com/Distributed-Compute-Protocol/dcp-docs-wes/-/blob/wip/worker/worker-events.md\n * @event Supervisor#fetch\n */\n this.workerEmit('fetch', constructFetchHandle(payloadLength, jobs, slices));\n\n const compressTask = () => { return common.compressJobMap(authorizationMessage.auth.authSlices); }\n selectiveSupEx() && console.debug(`addTaskToWorkload(${Date.now() - this.lastTime}): task: ${sliceCount}/${request.targetLoad.cpu}/${this.maxWorkingCores}, jobs: ${jobCount}, authSlices: ${compressTask()}, conversion:`, request.jobQuanta);\n\n // On the first call to fetchTask,\n // or when the last call to fetchTask found nothing,\n // or when there are no ready slices,\n // wait until at least 1 job with 1 slice is ready.\n await this.waitUntilWorkIsReady(jobManagerPromises);\n\n debugging('supervisor') && console.debug('addTaskToWorkload: Before calling roundRobinSlices; job states', this.jobManagerInventory.map((jm) => jm.identifier));\n\n // Start working on the new slices.\n return dcp_timers.setImmediate(() => this.roundRobinSlices());\n }\n catch (error)\n {\n this.workerEmit('fetch', error);\n this.error('Supervisor.fetchTask failed!', error);\n }\n finally\n {\n this.fetchTaskBarrier = false;\n }\n }\n\n /**\n * @private\n * @callback cbAddTaskToWorkload\n * @param {Response} response\n * @returns {Promise<void>}\n */\n\n /**\n * Call to fetch new slices from task distributor.\n * @param {*} request\n * @param {cbAddTaskToWorkload} addTaskToWorkload\n * @returns {Promise<any>}\n */\n async fetchFromTD (request, addTaskToWorkload)\n {\n selectiveDebug2() && console.debug('fetchFromTD begin; BarrierState:', this.fetchTaskBarrier, this.roundRobinBarrier);\n // Fetch a new task if we have insufficient slices queued, then start workers\n if (!this.fetchTaskBarrier)\n throw new Error('fetchTaskBarrier must be set when entering fetchFromTD.');\n\n this.dcp4.instantiateAllConnections();\n\n let fetchTimeout = dcp_timers.setTimeout(() => {\n this.fetchTaskBarrier = false;\n this.warning('Fetch exceeded timeout, will reconnect at next watchdog interval');\n this.dcp4.resetConnection('taskDistributor').catch(error => {\n this.error('Failed to close task-distributor connection', error);\n });\n this.dcp4.resetConnection('resultSubmitter').catch(error => {\n this.error('Failed to close result-submitter connection', error);\n });\n this.dcp4.instantiateAllConnections();\n }, 3 * 60 * 1000); // Max out at 3 minutes to fetch.\n // Allow workers and localExec to exit.\n fetchTimeout.unref();\n\n const finalize = () => {\n this.fetchTaskBarrier = false;\n if (fetchTimeout)\n dcp_timers.clearTimeout(fetchTimeout);\n fetchTimeout = null;\n }\n\n // Ensure result submitter and task distributor connections before fetching tasks.\n try\n {\n await Promise.all([\n this.dcp4.taskDistributor.keepalive(),\n this.dcp4.resultSubmitter.keepalive(),\n ]);\n }\n catch (error)\n {\n selectiveDebug() && console.debug('fetchTaskFromTD: Keep slices failed', error);\n this.warning('Failed to connect to result submitter, refusing to fetch slices.', 'Will try again at next fetch cycle.');\n this.dcp4.resetConnection('taskDistributor').catch(e => {\n this.error('Failed to close task-distributor connection', e);\n });\n this.dcp4.resetConnection('resultSubmitter').catch(e => {\n this.error('Failed to close result-submitter connection', e);\n });\n return finalize();\n }\n\n if (!this.dcp4.taskDistributor)\n {\n const msg = 'Unable to request task from scheduler; no connection to task distributor';\n this.warning(msg);\n this.workerEmit('fetch', new Error(msg));\n return finalize();\n }\n \n // The 'beforeFetch' event allows the user to cancel the requestTask request.\n let canceled = false;\n /**\n * The 'beforeFetch' event fires before the request is sent to requestTask in task distributor.\n * @link https://gitlab.com/Distributed-Compute-Protocol/dcp-docs-wes/-/blob/wip/worker/worker-events.md\n * @event Supervisor#beforeFetch\n */\n this.workerEmit('beforeFetch', () => { canceled = true; })\n selectiveDebug() && canceled && console.debug('User canceled the fetch task.');\n if (canceled)\n return finalize()\n\n return this.dcp4.taskDistributor.request('requestTask', request)\n .then((response) => {\n addTaskToWorkload(response);\n // Success! Restore this.dcp4.taskDistributor delay to retryMinSleepMs (currently 32ms.)\n // Is there a better way to reset than explicit calls?\n this.delayManager.resetEBO('taskDistributor');\n return response;\n })\n .catch((error) => {\n this.workerEmit('fetch', error);\n this.error('Unable to request task from scheduler. Will try again on a new connection.', error);\n this.dcp4.resetConnection('taskDistributor');\n })\n .finally(() => {\n return finalize();\n });\n }\n\n /**\n * Remove all unreferenced jobs in this.jobManagerInventory and this.moduleCache.\n * Since job-managers are inserted into this.jobManagerInventory with a push, the job managers at the beginning are oldest.\n * Only delete #deleteCount of the oldest job-managers:\n * let deleteCount = this.jobManagerInventory.length - cachedJobsThreshold;\n * Edit cachedJobsThreshold to adjust the cache cleanup threshold.\n * @param {Object<string, number[]>} newJobMap - Jobs that should not be removed from this.jobManagerInventory and this.moduleCache.\n */\n clearUnusedJobManagersAndModuleCache (newJobMap)\n {\n const emptyJobs = [];\n for (const jobMan of this.jobManagerInventory) // Grab oldest 1st\n {\n if (!newJobMap[jobMan.address])\n {\n let isEmpty = true;\n for (const slice of jobMan.sliceInventory)\n if (slice.isQueuedOrActive)\n {\n isEmpty = false;\n break;\n }\n if (isEmpty)\n {\n // Walk through whole list to purge empty jobs with no assigned sandboxes to save.\n if (jobMan.assignedSandboxes.length < 1)\n this.purgeJob(jobMan);\n else\n emptyJobs.push(jobMan)\n }\n }\n }\n let deleteCount = this.jobManagerInventory.length - this.options.cachedJobsThreshold;\n if (deleteCount > 0)\n {\n selectiveDebug() && console.debug(`Supervisor.clearUnusedJobManagersAndModuleCache: deleteCount ${deleteCount}/${this.jobManagerInventory.length}/${this.options.cachedJobsThreshold}.`);\n for (const jobMan of emptyJobs) // Grab oldest 1st\n {\n this.purgeJob(jobMan);\n if (--deleteCount < 1)\n break;\n }\n }\n }\n\n // _Idx\n //\n // createSandbox, returnSandbox, hookUpSandboxListeners, pruneSandboxes\n //\n\n /**\n * Automatically handle when the evaluator is down.\n *\n * With the screensaver worker, when the screensaver goes down, so does the evaluator.\n * And when the screensaver starts running again, so does the evaluator. The evaluator\n * may be stopped and started again with sa worker running, and have good behavior.\n * However, browser workers cannot have their evaluators stopped without also stopping\n * the worker (otherwise file-a-bug...)\n *\n * @param {boolean} [throwError=false]\n * @returns {Promise<Sandbox>}\n */\n async createSandbox (throwError = false)\n {\n selectiveDebug2() && console.debug('createSandbox', this.sandboxInventory.length, Date.now() - this.lastTime);\n // See if there are any READY_FOR_ASSIGN sandboxes (viz., sandbox.isReadyForAssign is true.)\n // If the evaluator just came back up (while worker is still running) there should not be any non-assigned sandboxes.\n // We're only considering sa worker (e.g. screensaver worker), because browser workers cannot stop the\n // evaluator w/o stopping the worker (I think -- if not true, file-a-bug.)\n if (this.sandboxInventory.length > 0 && this.sandboxInventory[0].isReadyForAssign)\n {\n selectiveDebug2() && console.debug(`Supervisor.createSandbox: Found ready-for-assign sandbox ${this.sandboxInventory[0].identifier}`);\n return this.sandboxInventory.shift();\n }\n\n // If the evaluator cannot start (e.g. if the evalServer is not running),\n // then the while loop will keep retrying until the evalServer comes online.\n try\n {\n this.evaluator.createSandboxRefCount++;\n\n let retry = 0;\n while (true)\n {\n let sandbox;\n try\n {\n sandbox = new Sandbox(this, { ...this.options.sandboxOptions });\n selectiveDebug2() && console.debug(`Supervisor.createSandbox(${sandbox.id}): Calling sandbox.start: ${this.evaluator.createSandboxRefCount}, eval-down ${this.evaluator.down}`);\n this.hookUpSandboxListeners(sandbox);\n await sandbox.start();\n if (!this.capabilities)\n this.checkCapabilities(sandbox);\n if (this.evaluator.reallyDown)\n {\n this.evaluator.reallyDown = false;\n selectiveDebug() && console.debug('Supervisor.createSandbox: Evaluator is up again.', this.evaluator.createSandboxRefCount);\n this.jobManagerInventory.forEach((jobManager) => jobManager.resetSlices('createSandbox'));\n }\n return sandbox;\n }\n catch (error)\n {\n if (throwError)\n throw error;\n selectiveDebug() && console.debug(`Supervisor.createSandbox: Failed to start sandbox ${sandbox.identifier}`, this.evaluator.createSandboxRefCount, this.evaluator.down, error.message);\n if (error.code === 'ENOWORKER')\n throw new DCPError(\"Cannot use localExec without dcp-worker installed. Use the command 'npm install dcp-worker' to install the neccessary modules.\", 'ENOWORKER');\n\n if (throwError)\n throw error;\n\n // The evaluator may be down or shutting down, keep retrying.\n if ((retry % 60) === 0)\n this.warning('Failed to start a sandbox; will keep retrying; screensaver worker or evaluator may be down...');\n await a$sleepMs(1000 * Math.min(5, ++retry));\n }\n }\n }\n finally\n {\n this.evaluator.createSandboxRefCount--;\n }\n }\n\n /**\n * Remove sandbox from inventory and terminate.\n * @param {Sandbox} sandbox\n */\n returnSandbox (sandbox)\n {\n // If sandbox is not in this.sandboxInventory then sandbox must already be terminated\n // <==> this.sandboxInventory.includes(sandbox) || sandbox.isTerminated().\n selectiveDebug2() && console.debug(`returnSandbox: ${sandbox.identifier}`);\n if (common.removeElement(this.sandboxInventory, sandbox))\n sandbox.terminate(false);\n else\n {\n // If sandbox is not in this.sandboxInventory then sandbox must already be terminated\n if (common.displayMaxDiagInfo() && !sandbox.isTerminated) // Design assumption.\n throw new Error(`returnSandbox: Sandbox ${sandbox.identifier} has already been removed.`);\n }\n }\n\n /**\n * For a given sandbox, hook up all the Sandbox listeners.\n * @param {Sandbox} sandbox\n */\n hookUpSandboxListeners (sandbox)\n {\n sandbox.addListener('start', () => {\n if (!sandbox.slice) return;\n const payload = sandbox.slice.getMessagePayload(this.workerId, 'begin');\n return this.dcp4.safeRSStatus(payload, `Failed to send 'begin' status for slice ${sandbox.slice.identifier}`);\n });\n\n const that = this;\n // Sandbox error handler.\n sandbox.on('sandboxError', function Supervisor$sandboxError(error) {\n selectiveDebug() && console.debug(`Sandbox ${sandbox.identifier} sandboxError-handler; error while executing work function`, error);\n const slice = sandbox.slice;\n if (!slice?.isWorking) // Sanity -- warning should never fire.\n this.warning(`handleSandboxError: slice ${slice?.identifier} must be WORKING.`);\n if (slice)\n slice['useRetryLogic'] = true;\n that.returnSandbox(sandbox);\n });\n\n // Sandbox complete handler.\n // When any sandbox completes, go through the Supervisor.fetchTask protocol.\n sandbox.addListener('complete', () => {\n // Try not to call fetchTask unless there's something there.\n selectiveDebug2() && console.debug('Sandbox complete listener', this.fetchTaskBarrier, this.roundRobinBarrier, this.unusedSandboxCount, Date.now() - this.lastTime);\n if (!this.fetchTaskBarrier)\n this.fetchTask();\n });\n\n // If the sandbox terminated and we are not shutting down, then we should return all work which is\n // currently not being computed if all sandboxes are dead and the attempt to create a new one fails.\n sandbox.sandboxHandle.on('end', async () => {\n if (this.sandboxInventory.length > 0 && !this.evaluator.pauseSandboxHandleEndHandler)\n {\n selectiveDebug() && console.debug(`hookUpSandboxListeners: Sandbox \"${sandbox.identifier}\" terminated handler`, this.sandboxInventory.length, Date.now() - this.lastTime);\n\n // Does there exist a non-terminated sandbox?\n let allSandboxesTerminated = true;\n for (const sbx of this.sandboxInventory)\n if (!sbx.isTerminated)\n {\n allSandboxesTerminated = false;\n break;\n }\n\n if (allSandboxesTerminated && !this.evaluator.downInterlock)\n {\n //\n // When we get here, all sandboxes have been terminated.\n //\n this.evaluator.downInterlock = true;\n selectiveDebug() && console.debug('hookUpSandboxListeners: Try to create 1 sandbox in the sandbox-terminated-handler...', sandbox.identifier);\n await this.createSandbox(true /*throwError*/)\n .then((sbx) => {\n this.evaluator.reallyDown = false;\n // This is the only place where non-assigned sandboxes are added to this.sandboxInventory.\n this.sandboxInventory.unshift(sbx);\n selectiveDebug() && console.debug('Sandbox terminate handler was able to create new sandbox', sandbox.identifier);\n })\n .catch(() => {\n //\n // Since all sandboxes have been terminated, if we cannot create a new sandbox,\n // that probably means we're on a screensaver worker and the screensaver is down.\n // Try to submit results for completed slices, but return all other non-finished\n // slices to the scheduler -- after a brief delay.\n //\n selectiveDebug() && console.debug('Sandbox terminate handler cannot create new sandbox; evaluator is down', sandbox.identifier);\n this.evaluator.reallyDown = true;\n this.emit('evalDown');\n const delay = 60; // seconds\n this.jobManagerInventory.forEach((jm) => jm.evaluatorDownCleanup(delay));\n this.warning('Stopping all work.', 'Screensaver worker or evaluator may be down.');\n })\n .finally(() => {\n this.sandboxInventory = this.sandboxInventory.filter(sbx => !sbx.isTerminated);\n this.evaluator.shuttingDown = false;\n this.evaluator.downInterlock = false;\n });\n }\n }\n });\n }\n\n /**\n * Terminate extra sandboxes over the limit.\n * @todo XXXpfr Prioritize keeping expensive to assign sandboxes.\n */\n pruneSandboxes ()\n {\n this.sandboxInventory = this.sandboxInventory.filter((sandbox) => !sandbox.isTerminated);\n let pruneCount = this.sandboxInventory.length - this.options.maxSandboxes;\n if (pruneCount <= 0)\n return;\n\n selectiveDebug() && console.debug(`Supervisor.pruneSandboxes START: pruneCount ${pruneCount}/${this.sandboxInventory.length}/${this.options.maxSandboxes}.`, this.dbg.dumpSandboxState());\n selectiveDebug2() && console.debug(this.sandboxInventory.map((sbx) => sbx.identifier));\n\n // Prune ready-for-assign sandboxes first.\n while (pruneCount > 0)\n {\n if (this.sandboxInventory[0].isReadyForAssign)\n {\n const startedSandbox = this.sandboxInventory.shift();\n startedSandbox.terminate(false);\n pruneCount--;\n }\n else\n break;\n }\n\n // Don't purge jobs here: can accidentally purge a job that TD just fetched (XXXpfr)\n\n /**\n * Do we really want to do a bunch of work to keep empty job assigned sandboxes around?\n * When in a private compute group, there will be fewer jobs and it's likely\n * that a given job will be seen again.\n * @todo XXXpfr Prioritize keeping expensive to assign sandboxes.\n */\n const liveJobs = [], emptyJobs = [];\n let maxAssignedSandboxCount = 0;\n for (const jobMan of this.jobManagerInventory)\n {\n let isAlive = false;\n for (const slice of jobMan.sliceInventory)\n if (slice.isQueuedOrActive)\n {\n isAlive = true;\n break;\n }\n if (isAlive)\n liveJobs.push(jobMan);\n else\n {\n const _assignedSandboxes = jobMan.assignedSandboxes;\n if (maxAssignedSandboxCount < _assignedSandboxes.length)\n maxAssignedSandboxCount = _assignedSandboxes.length;\n emptyJobs.push(jobMan);\n }\n }\n\n if (emptyJobs.length > 0)\n {\n // Prune the sandboxes from all jobs with no current work.\n // Try to keep approximately the same # of assigned sandboxes per job.\n for (let k = maxAssignedSandboxCount; k >= 0; k--)\n {\n for (const jobMan of emptyJobs)\n {\n const _assignedSandboxes = jobMan.assignedSandboxes;\n if (_assignedSandboxes.length > k)\n {\n debugging('supervisor') && console.debug(`pruneSandboxes(empty): sandbox${_assignedSandboxes[0].id}`, Date.now() - this.lastTime);\n // Terminate and remove from this.sandboxInventory.\n this.returnSandbox(_assignedSandboxes[0]);\n if (--pruneCount < 1)\n {\n debugging('supervisor') && console.debug(`Supervisor.pruneSandboxes FINISH: ${pruneCount}/${this.sandboxInventory.length}/${this.options.maxSandboxes}.`);\n return;\n }\n }\n }\n }\n }\n\n // Round-robin prune 1 extra assigned sandbox from each non-empty jobmanager.\n while (pruneCount > 0)\n {\n const _pruneCount = pruneCount;\n for (const jobMan of liveJobs)\n {\n const _assignedSandboxes = jobMan.assignedSandboxes;\n if (_assignedSandboxes.length > 0)\n {\n debugging('supervisor') && console.debug(`pruneSandboxes(non-empty): sandbox${_assignedSandboxes[0].id}`, Date.now() - this.lastTime);\n // Terminate and remove from this.sandboxInventory.\n this.returnSandbox(_assignedSandboxes[0]);\n if (--pruneCount < 1)\n {\n debugging('supervisor') && console.debug(`Supervisor.pruneSandboxes FINISH: ${pruneCount}/${this.sandboxInventory.length}/${this.options.maxSandboxes}.`);\n return;\n }\n }\n }\n if (_pruneCount === pruneCount) // Nothing left to prune.\n break;\n }\n\n debugging('supervisor') && console.debug(`Supervisor.pruneSandboxes FINISH: incomplete-prune ${pruneCount}/${this.sandboxInventory.length}/${this.options.maxSandboxes}.`);\n }\n\n // _Idx\n //\n // recordResult, sendToResultSubmitter, sendResultToRemote\n //\n\n /**\n * Submits the slice results to the result-submitter service.\n * Then remove the slice from the its job manager.\n *\n * @param {Slice} slice - The slice to submit.\n * @param {Sandbox} sandbox - The sandbox handle associated to the slice.\n * @returns {Promise<any>}\n */\n recordResult (slice, sandbox)\n {\n // It is possible for slice.result to be undefined when there are upstream errors.\n if (!slice.result)\n throw new Error(`Slice ${slice.identifier} completed work, but there is no result. This is ok when there are upstream errors.`);\n if (!slice.isComplete)\n throw new Error(`Cannot record result for slice ${slice.identifier} that has not completed execution successfully.`);\n if (!slice.timeReport)\n throw new Error(`Invalid time report for slice ${slice.identifier} in recordResult`);\n if (!slice.dataReport)\n throw new Error(`Invalid data report for slice ${slice.identifier} in recordResult`);\n\n const metrics = slice.jobManager.updateStatistics(slice, sandbox);\n selectiveDebug() && console.debug(`Supervisor: recording result for slice ${slice.identifier} with metrics`, this.dbg.justCPU(metrics));\n\n /** @see result-submitter::result for full message details */\n const payloadData = {\n slice: slice.sliceNumber,\n job: slice.jobAddress,\n worker: this.workerId,\n paymentAddress: this.options.paymentAddress,\n metrics,\n authorizationMessage: slice.authorizationMessage,\n };\n\n let canceled = false;\n const resultUrl = (slice.resultStorageType !== 'pattern') ? slice.resultStorageDetails : false;\n this.workerEmit( 'beforeResult', () => { canceled = true; }, resultUrl);\n this.jobEmit(slice, 'beforeResult', () => { canceled = true; }, resultUrl);\n selectiveDebug && canceled && console.debug(`User canceled the result submission operation for slice ${slice.identifier}.`);\n if (canceled)\n return this.returnSlice(slice, 'Canceled via beforeResult event');\n\n if (slice.resultStorageType === 'pattern')\n return this.sendResultToRemote(slice)\n .then((response) => {\n return this.sendToResultSubmitter(slice, sandbox.sandboxHandle, payloadData, response);\n });\n\n return this.sendToResultSubmitter(slice, sandbox.sandboxHandle, payloadData, encodeDataURI(slice.result.result));\n }\n\n /**\n * Send result to result submitter.\n * @param {Slice} slice\n * @param {SandboxHandle} sandboxHandle\n * @param {*} payloadData\n * @param {string} [result]\n * @returns {Promise<any>}\n */\n async sendToResultSubmitter (slice, sandboxHandle, payloadData, result)\n {\n // When handleRSError is hit, { slice, payload } is added to the queue this.dcp4.submitResultsQueueMap[slice.key] .\n // For a given slice, the queue is retried independent of other slices that failed to submit.\n // When a given slice hits the retry limit (6 retries) the slice is returned to scheduler.\n const handleRSError = (error, slice, payloadData) => { // eslint-disable-line no-shadow\n const msg = `Failed to submit results to scheduler for slice ${slice.identifier}`;\n if (!error) error = new Error(msg);\n this.error(msg, error);\n\n slice['retrySubmitResults'] = (slice['retrySubmitResults'] ?? 0) + 1;\n if (slice['retrySubmitResults'] > this.options.maxResultSubmissionRetries)\n {\n this.handleFailedSlice(slice, error);\n throw new Error(`Failed to submit results 6 times for slice ${slice.identifier}`);\n }\n\n // For a given slice, there's never more than one element in the corresponding queue.\n this.dcp4.submitResultsQueueMap[slice.key] = [ { slice, sandboxHandle, payloadData } ];\n return this.dcp4.resetConnection('resultSubmitter');\n }\n\n try\n {\n debugging('supervisor') && console.debug('Supervisor.recordResult: payloadData', result.slice(0, 256), slice.identifier);\n if (result)\n payloadData.result = result;\n\n await this.delayManager.nextDelay('recordResult', 2);\n //->console.log('recordResult', slice.identifier, this.evaluator.down, Date.now() - this.lastTime); // SAVE\n\n return this.dcp4.resultSubmitter.request('result', payloadData)\n .then((resp) => {\n const payload = resp.payload;\n if (!resp.success)\n {\n if (payload)\n {\n selectiveDebug() && console.debug('resultSubmitter.send failed', payload);\n throw new DCPError(`Call to result submitter failed when recording results for ${slice.identifier}.`, payload);\n }\n if (debugBuild)\n {\n selectiveDebug() && console.debug('resultSubmitter.send failed with no payload', slice.identifier);\n // Look inside\n for (const [ key, value ] of Object.entries(resp)) {\n if (key !== 'connection')\n console.debug(`${key}:`, value);\n }\n }\n throw new Error(`Call to result submitter failed when recording results for ${slice.identifier}.`);\n }\n\n debugging('supervisor') && console.debug('Successfully submitted results', slice.identifier);\n\n // Success! Restore this['resultSubmitter'] delay to retryMinSleepMs (currently 32ms.)\n // Is there a better way to reset than explicit calls?\n this.delayManager.resetEBO('resultSubmitter');\n\n common.debugQuanta() && this.dbg.addGlobal(slice, payload.metrics);\n slice.jobManager.update({ metrics: payload.metrics }); // Update metrics\n\n // Emit the 3 'payment' events.\n const paymentAddress = payloadData.paymentAddress.toString();\n this.workerEmit( 'payment', payload.slicePaymentAmount, paymentAddress, slice.jobAddress, slice.sliceNumber);\n this.jobEmit(slice, 'payment', payload.slicePaymentAmount, paymentAddress, slice.sliceNumber);\n this.safeEmit(sandboxHandle, 'payment', payload.slicePaymentAmount, paymentAddress);\n\n const payloadLength = kvin.stringify(payloadData).length; /** @TODO - fix per DCP-3750 */\n const resultUrl = (slice.resultStorageType !== 'pattern') ? slice.resultStorageDetails : false;\n this.workerEmit( 'result', resultUrl, payloadLength);\n this.jobEmit(slice, 'result', resultUrl, payloadLength);\n\n slice.markAsFinished();\n\n // Remove the slice from the job manager.\n slice.jobManager.removeSlice(slice);\n\n if (this.sliceTiming)\n {\n slice['resultDelta'] = Date.now() - slice['resultDelta'];\n console.debug(`recordResult(${slice['queueingDelta']}, ${slice['executionDelta']}, ${slice['resultDelta']}): Completed slice ${slice.identifier}.`, Date.now() - this.lastTime);\n }\n if (false)\n {}\n\n return resp;\n })\n .catch ((error) => {\n handleRSError (error, slice, payloadData);\n });\n }\n catch (error)\n {\n handleRSError (error, slice, payloadData);\n }\n }\n\n /**\n * Send a work function's result to a server that speaks our DCP Remote Data Server protocol.\n * E.g. https://gitlab.com/Distributed-Compute-Protocol/dcp-rds\n *\n * @param {Slice} slice - Slice object whose result we are sending.\n * @returns {Promise<string>}\n * @throws When HTTP status not in the 2xx range.\n */\n sendResultToRemote (slice)\n {\n return supShared.sendResultToRemote(this, slice);\n }\n\n // _Idx\n //\n // handleWorkReject\n //\n\n /**\n * Handles reassigning or returning a slice that rejected.\n *\n * If error.message === 'false' and slice.hasBeenRejected is false, reschedule the slice.\n * Set the slice.hasBeenRejected to be true.\n *\n * If error.message !== 'false' or slice.hasBeenRejected is true (i.e. has been rejected once already)\n * zthen return all slices from the job to the scheduler and terminate all sandboxes with that jobAddress.\n *\n * @param {Slice} slice\n * @param {Error} error\n */\n handleWorkReject (slice, error)\n {\n debugging() && console.debug('handleWorkReject', error.message, slice.hasBeenRejected, slice.identifier);\n\n const jobManager = slice.jobManager;\n jobManager.rejectedJobReasons.push(error.message); // memoize reasons\n\n // First time rejecting without a reason; try rescheduling the slice.\n if (error.message === 'false' && !slice.hasBeenRejected)\n {\n // Mark slice as rejected.\n slice.hasBeenRejected = true;\n // Reset slice state to allow re-execution.\n slice.resetState();\n }\n else\n {\n // Slice has been rejected twice, so add to array of rejected jobs.\n const rejectedJob = {\n address: slice.jobAddress,\n reasons: jobManager.rejectedJobReasons,\n };\n this.rejectedJobs.push(rejectedJob);\n // Broadcast failure.\n this.workerEmit( 'result', error);\n this.jobEmit(slice, 'result', error);\n // Purge the job.\n this.purgeJob(jobManager);\n // Tell everyone all about it, when allowed.\n if (jobManager.displayMaxDiagInfo)\n {\n const suffixMsg = 'All slices and sandboxes with the same jobAddress returned to the scheduler or terminated.';\n if (slice.hasBeenRejected)\n this.warning(`work.reject: The slice ${slice.identifier} was rejected twice.`, suffixMsg);\n else\n this.warning(`work.reject: The slice ${slice.identifier} was rejected with reason: ${error.message}.`, suffixMsg);\n }\n }\n }\n\n}\nexports.Supervisor = Supervisor;\n\n\n//# sourceURL=webpack://dcp/./src/dcp-client/worker/supervisor2/index.js?");
4728
4728
 
4729
4729
  /***/ }),
4730
4730
 
@@ -4798,7 +4798,7 @@ eval("/**\n * @file dcp/src/dcp-client/worker/supervisor2/rolling-statistics.js\
4798
4798
  /***/ ((__unused_webpack_module, exports, __webpack_require__) => {
4799
4799
 
4800
4800
  "use strict";
4801
- eval("/**\n * @file dcp/src/dcp-client/worker/supervisor2/sandbox2.js\n *\n * A sandbox that when constructed and assigned can do work for\n * a distributed slice. A sandbox runs for a single slice at a time.\n *\n * Usage (simplified...):\n * const sandbox = new Sandbox(this, { ...this.options.sandboxOptions });\n * await sandbox.start();\n * sandbox.slice = slice;\n * await sandbox.assign(jobManager);\n * return sandbox.work()\n * .then((result) => {\n * slice.collectResult(result, true);\n * sandbox.checkSandboxReUse();\n * this.supervisor.recordResult(slice)\n * })\n * .catch((error) => {\n * slice.collectResult(error, false);\n * const reason = this.supervisor.handleSandboxWorkError(sandbox, slice, error);\n * this.supervisor.returnSlice(slice, reason);\n * this.returnSandbox(sandbox);\n * });\n *\n * Debug flags:\n * Sandbox.debugWork = true // - turns off 30 second timeout to let user debug sandbox innards more easily\n * Sandbox.debugState = true // - logs all state transitions for this sandbox\n * Sandbox.debugEvents = true // - logs all events received from the sandbox\n *\n * Initial states:\n * UNREADY\n *\n * Terminal states:\n * TERMINATED\n *\n * Valid transitions:\n * ( sandbox.start )\n * UNREADY -> READYING -> READY_FOR_ASSIGN\n * READYING -> TERMINATED\n * ( sandbox.assign )\n * READY_FOR_ASSIGN -> ASSIGNING -> ASSIGNED\n * ASSIGNING -> TERMINATED\n * ( sandbox.markAsWorking )\n * ASSIGEND -> WORKING\n * ( sandbox.work )\n * WORKING -> ASSIGNED\n * -> TERMINATED\n * ( sandbox.terminate )\n * any -> TERMINATED\n *\n * @author Matthew Palma, mpalma@kingsds.network\n * Ryan Rossiter, ryan@kingsds.network\n * Wes Garland, wes@distributive.network\n * Paul, paul@distributive.network\n * @date May 2019\n * May 2019\n * Decemeber 2020\n * June, Dec 2022, Jan-May 2023\n * @module sandbox\n * @copyright Copyright (c) 2018-2023, Distributive Corp. All Rights Reserved\n */\n// @ts-check\n\n\nconst debugging = (__webpack_require__(/*! dcp/debugging */ \"./src/debugging.js\").scope)('worker');\nconst dcp_timers = __webpack_require__(/*! dcp/common/dcp-timers */ \"./src/common/dcp-timers.js\");\nconst { assert, assertEq3 } = __webpack_require__(/*! dcp/common/dcp-assert */ \"./src/common/dcp-assert.js\");\nconst { Synchronizer } = __webpack_require__(/*! dcp/common/concurrency */ \"./src/common/concurrency.js\");\nconst nanoid = (__webpack_require__(/*! nanoid */ \"./node_modules/nanoid/index.browser.js\").nanoid);\nconst EventEmitter = __webpack_require__(/*! events */ \"./node_modules/events/events.js\");\nconst kvin = __webpack_require__(/*! kvin */ \"./node_modules/kvin/kvin.js\");\nconst { Config } = __webpack_require__(/*! ./config */ \"./src/dcp-client/worker/supervisor2/config.js\");\nconst common = __webpack_require__(/*! ./common */ \"./src/dcp-client/worker/supervisor2/common.js\");\nconst { selectiveDebug, truncateAddress, timeDilation, selectiveDebug2 } = common;\nconst { stringify } = __webpack_require__(/*! dcp/utils */ \"./src/utils/index.js\");\n\n/**\n * Wraps console.debug to emulate debug module prefixing messages on npm.\n * @param {...any} args\n */\nconst debug = (...args) => {\n if (debugging())\n console.debug('Sandbox:', ...args);\n};\n\n// Sandbox states\nconst UNREADY = 'UNREADY' // No Sandbox (web worker, saworker, etc) has been constructed yet\nconst READYING = 'READYING' // Sandbox is being constructed and environment (bravojs, env) is being set up\nconst READY_FOR_ASSIGN = 'READY_FOR_ASSIGN' // Sandbox is ready to be assigned\nconst ASSIGNED = 'ASSIGNED' // Sandbox is assigned but not working\nconst ASSIGNING = 'ASSIGNING' // Sandbox is in the process of being ASSIGNED\nconst WORKING = 'WORKING' // Sandbox is working\nconst TERMINATED = 'TERMINATED' // Sandbox is terminated.\nconst EVAL_RESULT_PREFIX = 'evalResult::';\n\nclass SandboxError extends Error\n{\n /**\n * @param {string} errorCode\n * @param {string|Error} msg\n */\n constructor(errorCode, msg)\n {\n super((msg.constructor?.name === 'String') ? msg : msg['message']);\n /** @type {string} */\n this.errorCode = errorCode;\n if (msg.constructor?.name !== 'String')\n for (const prop of [ 'name', 'code', 'stack', 'lineNumber', 'columnNumber' ])\n if (msg[prop]) this[prop] = msg[prop];\n }\n}\nclass NoProgressError extends SandboxError { constructor(msg) { super('ENOPROGRESS', msg); } }\nclass SliceTooSlowError extends SandboxError { constructor(msg) { super('ESLICETOOSLOW', msg); } }\nclass UncaughtExceptionError extends SandboxError { constructor(msg) { super('EUNCAUGHT', msg); } }\n\n/** @typedef {import('./slice2').Slice} Slice */\n/** @typedef {import('./index').Supervisor} Supervisor */\n/** @typedef {import('./job-manager').JobManager} JobManager */\n/** @typedef {import('./module-cache').ModuleCache} ModuleCache */\n/** @typedef {import('dcp/utils/jsdoc-types').SandboxOptions} SandboxOptions */\n\n/**\n * Public event emitter.\n * https://gitlab.com/Distributed-Compute-Protocol/dcp-docs-wes/-/blob/wip/worker/worker-events.md\n */\nclass SandboxHandle extends EventEmitter\n{\n /** @type {{ id: number, public: { name: string, description: string, link: string }, jobManager: JobManager, slice: Slice }} */\n #info;\n\n /**\n * @constructor\n * @param {Sandbox} sandbox\n */\n constructor (sandbox)\n {\n super({ captureRejections: false });\n this.#info = sandbox.info;\n }\n /** @type {number} */\n get id () { return this.#info.id; }\n /** @type {{ name: string, description: string, link: string }} */\n get public () { return this.#info.public ?? { name: '<unassigned>', description: '', link: '' }; }\n /** @type {string} */\n get jobAddress () { return this.#info.jobManager?.address; }\n /** @type {number} */\n get sliceNumber () { return this.#info.slice?.sliceNumber ?? -1; }\n}\nexports.SandboxHandle = SandboxHandle;\n\n//\n// Index to functionality -- search for '_Idx' to toggle through the index.\n//\n// 1) class Sandbox\n// 2) checkSandboxReUse, postMessageToEvaluator, changeState,\n// and punctuatedTimer is expiremental for replacing hard-coded timeouts.\n// 3) start, describe, assign, applyRequirements, assignEvaluator\n// 4) eval, resetState, work, resetProgressTimeout, resetSliceTimeout\n// 5) handleRing0Message, handleRing1Message, handleRing2Message, handleRing3Message\n// 6) onmessage, onerror, terminate\n// 7) updateTime, resetSliceReport, sandboxEmit, error, warning\n//\n\n// _Idx\n//\n// class Sandbox\n//\n\nclass Sandbox extends EventEmitter\n{\n /**\n * A Sandbox (i.e. a worker sandbox) which executes distributed slices.\n *\n * @constructor\n * @param {Supervisor} supervisor\n * @param {SandboxOptions} options\n */\n constructor (supervisor, options)\n {\n super({ captureRejections: false });\n /** @type {Supervisor} */\n this.supervisor = supervisor;\n /** @type {ModuleCache} */\n this.moduleCache = supervisor.moduleCache;\n /** @type {SandboxOptions} */\n this.options = {\n ignoreNoProgress: false,\n ...options,\n SandboxConstructor: options.SandboxConstructor || (__webpack_require__(/*! ../evaluators */ \"./src/dcp-client/worker/evaluators/index.js\").BrowserEvaluator),\n }\n /** @type {Synchronizer} */\n this.state = new Synchronizer(UNREADY, [ UNREADY, READYING, READY_FOR_ASSIGN, ASSIGNING, ASSIGNED, WORKING, TERMINATED ]);\n\n /** @type {{ id: number, public: { name: string, description: string, link: string }, jobManager: JobManager, slice: Slice }} */\n this.info = {\n id: Sandbox.getNewId(),\n public: null,\n jobManager: null,\n slice: null,\n };\n\n /**\n * Event emitter containing info that describes the sandbox.\n * @type {SandboxHandle}\n */\n this.sandboxHandle = new SandboxHandle(this);\n\n /** Properties of type object. */\n this.evaluatorHandle = null;\n this.capabilities = null;\n this.progressTimeout = null;\n this.sliceTimeout = null;\n this.rejectionData = null;\n\n /** @type {number?} */\n this.progress = 100;\n /** @type {{ last: { deltaMs: number, value: any, throttledReports: number }, lastDeterministic: { deltaMs: number, progress: number, value: any, throttledReports: number } }} */\n this.progressReports = null; // cf. job-noProgress.js\n /** @type {object} */\n this.progressTimeout = null;\n /** @type {object} */\n this.sliceTimeout = null;\n\n /** @type {{ total: number, CPU: number, webGL: number, webGPU: number }} */\n this.sliceTimeReport = null;\n /** @type {number} */\n this.moduleInDataSize = 0; // Sandbox level input size; set during assign, never reset.\n /** @type {number} */\n this.sliceOutDataSize = 0; // Slice level output size; reset for every slice executed.\n\n /** @type {number?} */\n this.sliceStartTime = null;\n /** @type {number} */\n this.useCounter = 1; // Anticipating the initial use.\n /** @type {Config} */\n this.hive = new Config();\n\n ///** @type {((data: any) => Promise<void>)[]} */\n this.ringMessageHandlers = [\n this.handleRing0Message,\n this.handleRing1Message,\n this.handleRing2Message,\n this.handleRing3Message,\n ];\n\n this.resetSliceReport();\n }\n\n /** @type {number} */\n get id () { return this.info.id; }\n /** @type {{ name: string, description: string, link: string }} */\n get public () { return this.info.public; }\n /** @type {{ name: string, description: string, link: string }} */\n set public (data) { this.info.public = data; }\n /** @type {JobManager} */\n get jobManager () { return this.info.jobManager; }\n /** @type {string} */\n get jobAddress () { return this.jobManager?.address; }\n /** @type {Slice} */\n get slice () { return this.info.slice; }\n /** @type {Slice} */\n set slice (slice) { this.info.slice = slice; }\n /** @type {number} */\n get sliceNumber () { return this.slice ? this.slice.sliceNumber : -1; }\n /** @type {number} */\n get generalTimeout () { return 2 * this.hive.generalTimeout; }\n /** @type {number} */\n get punctuatedTimeout () { return this.hive.generalTimeout; }\n\n /**\n * Debug string that characterizes sandbox.\n * @type {string}\n */\n get identifier()\n {\n if (!this.jobAddress)\n return `${this.id}.${this.state}`;\n const address = truncateAddress(this.jobAddress);\n if (this.slice)\n return `${this.id}.${address}.${this.state}~${this.slice.sliceNumber}`;\n return `${this.id}.${address}.${this.state}`;\n }\n\n /** @returns {number} */\n static getNewId() { return Sandbox.idCounter++; }\n\n /** @type {boolean} */\n get isReadyForAssign () { return this.state.is(READY_FOR_ASSIGN); }\n /** @type {boolean} */\n get isAssigned () { return this.state.is(ASSIGNED); }\n /** @type {boolean} */\n get isWorking () { return this.state.is(WORKING); }\n /** @type {boolean} */\n get isTerminated () { return this.state.is(TERMINATED); }\n\n // _Idx\n //\n // checkSandboxReUse, postMessageToEvaluator, changeState,\n // punctuatedTimer is expiremental for replacing hard-coded timeouts.\n //\n\n /**\n * Mark WORKING sandbox as ASSIGNED in preparation for possible reuse.\n * Allow use of sandbox on a given job up to a limit of dcpConfig.supervisor.sandbox.maxSandboxUse .\n */\n checkSandboxReUse ()\n {\n selectiveDebug2() && console.debug(`Sandbox2.checkSandboxReUse: useCounter ${this.useCounter}, ${this.identifier}`);\n if (this.useCounter++ < this.hive.maxSandboxUse)\n {\n this.state.set(WORKING, ASSIGNED);\n this.sandboxEmit('ready');\n }\n else\n {\n this.terminate(false);\n common.removeElement(this.supervisor.sandboxInventory, this);\n }\n }\n\n /** Transitions: ASSIGNED --> WORKING. */\n markAsWorking ()\n {\n if (!this.isAssigned)\n throw new Error(`Sandbox ${this.identifier} is not ready to work`);\n this.state.set(ASSIGNED, WORKING);\n }\n \n /**\n * Safely post message to evaluator.\n * @param {object} message\n */\n postMessageToEvaluator (message)\n {\n if (this.isTerminated) // When evaluator goes down, all sandboxes are terminated.\n throw new Error(`postMessageToEvaluator: Sandbox ${this.identifier} has been terminated.`);\n return this.evaluatorHandle.postMessage(message);\n }\n \n /**\n * Safely change state.\n * @param {string} currentState\n * @param {string} nextState\n */\n changeState (currentState, nextState)\n {\n if (this.isTerminated) // When evaluator goes down, all sandboxes are terminated.\n throw new Error(`changeState: Sandbox ${this.identifier} has been terminated.`);\n this.state.set(currentState, nextState);\n }\n\n /** Upon fatal error return slice to scheduler. */\n returnSlice ()\n {\n selectiveDebug() && console.debug('Sandbox.returnSlice', this.identifier);\n return this.supervisor.returnSlice(this.slice, 'Sandbox.returnSlice');\n }\n\n /**\n * @callback cbFn\n * @returns {void}\n */\n\n /**\n * UNUSED.\n * Future work.\n * Replaces the timers in:\n * describe,\n * applyRequirements,\n * resetState,\n * The idea is to have a long timeout with a warning every\n * 6 seconds saying why it is waiting.\n * @param {cbFn} body\n * @param {string} waitMessage\n * @param {string} timerExpiredMessage\n * @returns {Promise<{ closeIntervalTimer: cbFn }>}\n */\n punctuatedTimer(body, waitMessage, timerExpiredMessage)\n {\n const that = this;\n return new Promise((resolve, reject) => {\n let intervalCounter = 0;\n let intervalHandle = null;\n function closeIntervalTimer()\n {\n if (intervalHandle !== null)\n dcp_timers.clearTimeout(intervalHandle);\n intervalHandle = null;\n }\n intervalHandle = dcp_timers.setInterval(() => {\n if (++intervalCounter > 12)\n {\n closeIntervalTimer();\n that.error(timerExpiredMessage);\n }\n that.warning(waitMessage);\n body();\n }, this.punctuatedTimeout)\n // Allow workers and localExec to exit.\n intervalHandle.unref();\n resolve({ closeIntervalTimer });\n });\n }\n\n // _Idx\n //\n // start, describe, assign, applyRequirements, assignEvaluator\n //\n\n /**\n * Readies the sandbox. This will result in the sandbox being ready and not assigned.\n * It will need to be assigned with a job before it is able to do work.\n * Sandbox.start will terminate the sandbox upon failure.\n * @todo maybe preload specific modules or let the cache pass in what modules to load?\n *\n * @returns {Promise<void>}\n * @throws on failure to ready\n */\n async start ()\n {\n debug('Sandbox.start begin');\n await this.supervisor.delayManager.nextDelay('sandboxStart');\n this.changeState(UNREADY, READYING);\n\n try\n {\n // RING 0\n this.evaluatorHandle = new this.options.SandboxConstructor({\n name: `DCP Sandbox #${this.id}`,\n });\n // Annoying! onerror terminates sandbox which can happen independent of whether the slice\n // is ok or not. Since we don't know, we have to return the slice when onerror is called\n // during sandbox.work .\n /** @todo XXXpfr Beware of onerror firing often. */\n this.evaluatorHandle.onerror = this.onerror.bind(this);\n\n const messageHandler = this.onmessage.bind(this);\n this.evaluatorHandle.onmessage = function onmessage(event)\n {\n const data = (event.data.serialized)\n ? kvin.parse(event.data.message)\n : kvin.unmarshal(event.data);\n messageHandler({ data });\n }\n\n const evaluatorPostMessage = this.evaluatorHandle.postMessage.bind(this.evaluatorHandle);\n this.evaluatorHandle.postMessage = function postMessage(message)\n {\n evaluatorPostMessage(kvin.marshal(message));\n }\n\n const that = this;\n this.evaluatorHandle.addEventListener('end', function sandbox$start$addEventListener() {\n selectiveDebug() && console.debug(\"END:Sandbox evaluatorHandle end-handler\", that.identifier, new Date());\n that.supervisor.evaluator.shuttingDown = true;\n that.terminate(true);\n });\n\n // Now in RING 1\n\n // Now in RING 2\n await this.describe();\n this.changeState(READYING, READY_FOR_ASSIGN);\n\n // Emit the 'sandbox' event on the worker event emitter.\n this.supervisor.safeEmit(this.supervisor.worker, 'sandbox', this.sandboxHandle);\n }\n catch (error)\n {\n if (this.isTerminated)\n debug(`Failed to start sandbox because it is already terminated: ${this.identifier}.\\n\\tMay be due to screensaver worker being down or evaluator was stopped.`);\n else\n {\n debug(`Failed to start sandbox ${this.identifier}.`, error.message); // FIX s.b. error\n this.terminate(false);\n }\n throw error;\n }\n }\n\n /**\n * Sends a post message to describe its capabilities.\n * Side effect: Sets the capabilities property of the current sandbox.\n *\n * @returns {Promise<any>} Resolves with the sandbox's capabilities.\n * Rejects with an error saying a response was not received.\n * @memberof Sandbox\n */\n describe ()\n {\n debugging('sandbox') && debug('Beginning to describe evaluator', this.identifier);\n const that = this;\n\n return new Promise(function sandbox$describePromise(resolve, reject) {\n let describeTimeout;\n\n if (that.isTerminated) // When evaluator goes down, all sandboxes are terminated.\n reject(new Error(`Sandbox ${that.identifier} has been terminated.`));\n\n if (that.evaluatorHandle === null)\n reject(new Error(`Evaluator has not been initialized: ${that.identifier}`));\n\n function sandbox$describe$success(data)\n {\n if (describeTimeout !== false)\n {\n dcp_timers.clearTimeout(describeTimeout);\n describeTimeout = false;\n\n const { capabilities } = data;\n if (typeof capabilities === 'undefined')\n reject(new Error(`Did not receive capabilities from describe response: ${that.identifier}`));\n that.capabilities = capabilities;\n\n debugging('sandbox') && debug('Evaluator has been described');\n resolve(capabilities);\n }\n }\n // Emitted by handleRing2Message.\n that.once('describe', sandbox$describe$success);\n\n describeTimeout = dcp_timers.setTimeout(function sandbox$describe$fail() {\n if (describeTimeout !== false)\n {\n describeTimeout = false;\n that.removeListener('describe', sandbox$describe$success);\n reject(new Error( `Describe message timed-out. No describe response was received from the describe command: ${that.identifier}`));\n }\n }, that.generalTimeout);\n // Allow workers and localExec to exit.\n describeTimeout.unref();\n\n const message = {\n request: 'describe',\n };\n that.postMessageToEvaluator(message);\n });\n }\n\n /**\n * This will assign the sandbox with a job, loading its sandbox code into the sandbox.\n * Sandbox.assign will not terminate the sandbox upon failure.\n * The sandbox will be terminated in JobManager.assignSandbox .\n * @param {JobManager} jobManager - The job manager that will be the owner of this sandbox.\n * @returns {Promise<Sandbox>}\n * @throws on initialization failure\n */\n async assign (jobManager)\n {\n if (!this.slice) // Design assumption.\n throw new Error(`Must have valid sandbox.slice before sandbox.assign is called: ${this.identifier}`);\n\n await this.supervisor.delayManager.nextDelay('sandboxAssign');\n debug('Sandbox.assign', this.identifier, Date.now() - this.supervisor.lastTime);\n\n try\n {\n this.changeState(READY_FOR_ASSIGN, ASSIGNING);\n this.info.jobManager = jobManager;\n this.job = this.jobManager.jobMessage;\n\n /* At this point, the worker has decided that this sandbox will be associated with a specific job. \n Therefore, we emit the SandboxHandle<job> event*/\n this.sandboxEmit('job', jobManager.jobHandle);\n\n assertEq3(this.job.address, this.jobAddress);\n assert(typeof this.job === 'object');\n assert(typeof this.job.requirements === 'object');\n assert(Array.isArray(this.job.dependencies));\n assert(Array.isArray(this.job.requirePath));\n\n // Extract public data from job, with defaults\n this.public = Object.assign({\n name: `Anonymous Job ${truncateAddress(this.jobAddress)}`,\n description: 'Discreetly helping make the world smarter.',\n link: 'https://distributed.computer/about',\n }, this.job.public);\n\n // Future: We may want other filename tags for appliances // RR Nov 2019\n\n // Important: The order of applying requirements before loading the sandbox code\n // is important for modules and sandbox code to set globals over the whitelist.\n await this.applyRequirements(this.job.requirements);\n //const _t0 = Date.now();\n await this.assignEvaluator();\n //console.log('Finished Sandbox.assignEvaluator', Date.now() - _t0);\n this.changeState(ASSIGNING, ASSIGNED);\n this.sandboxEmit('ready');\n }\n catch (error)\n {\n if (this.isTerminated)\n debug(`Failed to assign sandbox ${this.identifier} to evaluator because it is already terminated.\\n\\tMay be due to screensaver worker being down or evaluator was stopped.`);\n else\n {\n debug(`Failed to assign sandbox ${this.identifier} to evaluator.`);\n this.terminate(false);\n }\n throw error;\n }\n\n return this;\n }\n\n /**\n * Passes the job's requirements object into the sandbox so that the global access lists can be updated accordingly.\n * E.g. disallow access to OffscreenCanvas without environment.offscreenCanvas=true present.\n * Must be called after @start.\n *\n * @returns {Promise<void>} - resolves on success, rejects otherwise\n */\n applyRequirements (requirements)\n {\n assert(typeof requirements === 'object');\n const that = this;\n\n return new Promise(function sandbox$applyRequirementsPromise(resolve, reject) {\n let requirementTimeout;\n\n function sandbox$applyRequirements$success()\n {\n if (requirementTimeout !== false)\n {\n dcp_timers.clearTimeout(requirementTimeout);\n requirementTimeout = false;\n resolve();\n }\n }\n // Emitted by handleRing1Message.\n that.once('applyRequirementsDone', sandbox$applyRequirements$success);\n\n requirementTimeout = dcp_timers.setTimeout(function sandbox$finishApplySandboxRequirements$fail() {\n if (requirementTimeout !== false)\n {\n requirementTimeout = false;\n that.removeListener('applyRequirementsDone', sandbox$applyRequirements$success);\n reject(new Error(`applyRequirements never received 'applyRequirementsDone' response from sandbox: ${that.identifier}`));\n }\n }, that.generalTimeout);\n // Allow workers and localExec to exit.\n requirementTimeout.unref();\n\n const message = {\n requirements,\n request: 'applyRequirements',\n };\n that.postMessageToEvaluator(message);\n });\n }\n\n /**\n * Assign job to the evaluator.\n * @returns {Promise<any>} - resolves on success, rejects otherwise\n */\n assignEvaluator ()\n {\n debugging('sandbox') && console.debug('Begin assigning job to evaluator', this.identifier);\n const that = this;\n\n return new Promise(function sandbox$$assignEvaluatorPromise(resolve, reject) {\n function sandbox$assignEvaluator$success(event)\n {\n that.removeListener('reject', sandbox$assignEvaluator$fail);\n debugging('sandbox') && debug('Job assigned to evaluator');\n resolve(event);\n }\n\n function sandbox$assignEvaluator$fail(error)\n {\n that.removeListener('assigned', sandbox$assignEvaluator$success);\n that.error(`assignEvaluator failed(${that.identifier}): evaluator may be out of memory or the screensaver may be down.`, error);\n selectiveDebug() && console.debug('assignEvaluator failed', that.identifier, error);\n if (that.slice) // Normally the slice hasn't been set yet.\n that.returnSlice();\n reject(error);\n }\n\n // Emitted by handleRing2Message.\n that.once('assigned', sandbox$assignEvaluator$success);\n that.once('reject', sandbox$assignEvaluator$fail);\n\n // Had to add useStrict -- not sure if anything else was missed.\n const jobMessage = {\n address: that.job.address,\n arguments: that.job.arguments,\n dependencies: that.job.dependencies,\n modulePath: that.job.modulePath,\n public: that.job.public,\n requireModules: that.job.requireModules,\n requirePath: that.job.requirePath,\n workFunction: that.job.workFunction,\n useStrict: that.job.useStrict,\n };\n\n const message = {\n request: 'assign',\n job: jobMessage,\n sandboxConfig: that.hive.sandboxConfig,\n };\n that.postMessageToEvaluator(message);\n });\n }\n\n // _Idx\n //\n // eval, resetState, work, resetProgressTimeout, resetSliceTimeout\n //\n\n /**\n * Evaluates a string inside the sandbox.\n * @todo XXXpfr -- I don't understand how this gets called?\n * There's an old comment saying: \"no longer working though?\"\n *\n * @param {string} code - the code to evaluate in the sandbox\n * @param {string} filename - the name of the 'file' to help with debugging,\n * @returns {Promise<any>} - resolves with eval result on success, rejects otherwise\n */\n eval (code, filename)\n {\n const that = this;\n const msgId = nanoid();\n\n return new Promise(function sandbox$$eval$Promise(resolve, reject) {\n const eventId = EVAL_RESULT_PREFIX + msgId;\n\n function sandbox$eval$success(event)\n {\n that.removeListener('reject', sandbox$eval$fail);\n resolve(event);\n };\n\n function sandbox$eval$fail(error)\n {\n that.removeListener(eventId, sandbox$eval$success);\n reject(error);\n };\n\n that.once(eventId, sandbox$eval$success);\n that.once('reject', sandbox$eval$fail);\n\n const message = {\n request: 'eval',\n data: code,\n filename,\n msgId,\n };\n that.postMessageToEvaluator(message);\n });\n }\n\n /**\n * Resets the state of the bootstrap, without resetting the sandbox function if assigned.\n * Mostly used to reset the progress status before reusing a sandbox on another slice.\n * Must be called after @start.\n *\n * @returns {Promise<void>} - resolves with result on success, rejects otherwise\n */\n resetState ()\n {\n const that = this;\n assert(this.isWorking); // Design assumption.\n\n return new Promise(function sandbox$resetStatePromise(resolve, reject) {\n let resetStateTimeout;\n\n function sandbox$resetState$success ()\n {\n if (resetStateTimeout !== false)\n {\n dcp_timers.clearTimeout(resetStateTimeout);\n resetStateTimeout = false;\n resolve();\n }\n }\n that.once('resetStateDone', sandbox$resetState$success);\n\n resetStateTimeout = dcp_timers.setTimeout(function sandbox$resetState$fail() {\n if (resetStateTimeout !== false)\n {\n resetStateTimeout = false;\n that.removeListener('resetStateDone', sandbox$resetState$success);\n reject(new Error(`resetState never received resetStateDone event from sandbox: ${that.identifier}`));\n }\n }, that.generalTimeout);\n // Allow workers and localExec to exit.\n resetStateTimeout.unref();\n\n const message = {\n request: 'resetState',\n };\n that.postMessageToEvaluator(message);\n });\n }\n\n /**\n * Executes a slice received from the supervisor.\n * Must be called after this.start, this.assign and this.markAsWorking .\n * Sandbox.work will not terminate the sandbox upon failure.\n * The sandbox will be terminated in Supervisor.handleSandboxWorkError .\n * @returns {Promise<any>} - resolves with result on success, rejects otherwise\n */\n async work ()\n {\n const that = this;\n\n if (!this.slice) // Design assumption\n throw new Error(`Must have valid sandbox.slice before sandbox.assign is called: ${this.identifier}`);\n\n await this.supervisor.delayManager.nextDelay('sandboxWork');\n debug('Sandbox.work begin', this.identifier, Date.now() - this.supervisor.lastTime);\n\n if (this.isTerminated) // When evaluator goes down, all sandboxes are terminated.\n throw new Error(`Sandbox ${this.identifier} has been terminated.`);\n if (!this.isWorking)\n throw new Error(`Sandbox ${this.identifier} in Sandbox.work must be marked as working.`)\n\n // cf. DCP-1719,1720\n this.resetSliceReport();\n\n // Check that sandbox and slice have the same job.\n if (this.jobAddress !== this.slice.jobAddress)\n throw new Error(`Sandbox.work: sandbox ${this.identifier} and slice ${this.slice.identifier} are from different jobsz`);\n\n /** @todo Should sliceHnd just be replaced with { sandbox: this } since this.public is part of this? */\n let sliceHnd = { job: this.public, sandbox: this };\n await this.resetState();\n if (!this.slice)\n {\n this.error(`Slice for job ${this.jobAddress} vanished during work initialization - aborting`);\n return;\n }\n\n const { datum: inputDatum, error: dataError } = this.slice;\n if (dataError)\n {\n that.postWorkEmit('error', {\n message: dataError.message,\n stack: dataError.stack,\n name: this.public.name\n });\n }\n\n this.resetProgressTimeout();\n this.resetSliceTimeout();\n\n return new Promise(function sandbox$$workPromise(resolve, reject) {\n function sandbox$$work$success (event)\n {\n that.removeListener('reject', sandbox$$work$fail);\n resolve(event);\n }\n\n function sandbox$$work$fail (error)\n {\n that.removeListener('resolve', sandbox$$work$success);\n reject(error);\n }\n\n that.once('resolve', sandbox$$work$success);\n that.once('reject', sandbox$$work$fail);\n\n that.sliceStartTime = Date.now();\n that.slice.startTime = that.sliceStartTime;\n that.progress = null;\n that.progressReports = {\n last: undefined,\n lastDeterministic: undefined,\n };\n\n that.resetProgressTimeout();\n that.resetSliceTimeout();\n that.emit('start', sliceHnd);\n\n if (dataError)\n {\n that.removeListener('resolve', sandbox$$work$success);\n that.removeListener('reject', sandbox$$work$fail);\n dcp_timers.setTimeout(() => reject(dataError), 0)\n }\n else\n {\n // Do the work.\n const message = { request: 'main', data: inputDatum, };\n that.postMessageToEvaluator(message);\n }\n })\n .then(async function sandbox$$work$then(event) {\n // Tell supervisor sandbox slot is available.\n that.slice.markAsWorkDone();\n\n selectiveDebug2() && console.debug('Sandbox.sliceFinish', that.identifier, event?.timeReport);\n that.sandboxEmit('sliceEnd', that.slice?.sliceNumber)\n that.emit('complete', that.jobAddress);\n\n // Reset slice property.\n that.slice = null;\n\n // JobManager.runSliceOnSandbox will transition WORKDONE -> ASSIGNED\n return event;\n })\n .catch(async function sandbox$$work$catch(error) {\n selectiveDebug() && console.debug('Sandbox.work catch', that.identifier, error);\n // Tell supervisor sandbox slot is available.\n if (that.slice)\n that.slice.markAsWorkDone();\n // Current sandbox will not be reused.\n // Do not overwrite that.slice because it is needed in subsequent error reporting.\n\n if (error instanceof NoProgressError)\n {\n const payload = {\n name: that.public.name,\n message: error.message,\n timestamp: Date.now() - that.sliceStartTime,\n };\n that.postWorkEmit('error', payload);\n that.postWorkEmit('noProgress', { ...payload, progressReports: that.progressReports });\n }\n if (error.name === 'EWORKREJECT')\n that.handleRejectedWork(that.sliceTimeReport);\n\n // Otherwise sandbox will be terminated in Supervisor.handleSandboxWorkError\n debugging('sandbox') && debug(`Sandbox ${that.identifier} failed to execute slice`, error);\n\n throw error;\n });\n }\n\n resetProgressTimeout()\n {\n const that = this;\n\n if (this.progressTimeout)\n dcp_timers.clearTimeout(this.progressTimeout);\n\n this.progressTimeout = dcp_timers.setTimeout(function sandbox$ProgressTimeout() {\n if (that.options.ignoreNoProgress)\n return that.warning('ENOPROGRESS silenced by localExec: In a remote worker, this slice would be stopped for not calling progress frequently enough.');\n\n that.emit('reject', new NoProgressError(`No progress event was received in the last ${that.hive.progressTimeout / 1000} seconds.`));\n }, this.hive.progressTimeout * timeDilation);\n // Allow workers and localExec to exit.\n this.progressTimeout.unref();\n }\n\n resetSliceTimeout()\n {\n const that = this;\n\n if (this.sliceTimeout)\n dcp_timers.clearTimeout(this.sliceTimeout);\n\n this.sliceTimeout = dcp_timers.setTimeout(function sandbox$SliceTimeout() {\n if (Sandbox.debugWork)\n return that.warning('Sandbox.debugWork: Ignoring slice timeout');\n\n that.emit('reject', new SliceTooSlowError(`Slice took longer than ${that.hive.sliceTimeout / 1000} seconds.`));\n }, this.hive.sliceTimeout * timeDilation);\n // Allow workers and localExec to exit.\n this.sliceTimeout.unref();\n }\n\n /**\n * Send payload to the workEmit endpoint in the event router.\n * @param {string} eventName\n * @param {*} payload\n * @returns {Promise<*>}\n */\n postWorkEmit (eventName, payload)\n {\n // Need to check if the sandbox hasn't been assigned a slice yet.\n if (!this.slice)\n this.error('Sandbox not assigned a slice before sending workEmit message to scheduler', payload, `'workEmit' event originates from '${eventName}' event`);\n else\n {\n const slice = this.slice;\n // Authorization should always be valid.\n if (!slice.authorizationMessage)\n this.warning(`workEmit: missing authorization message for slice ${slice.identifier}`);\n else\n {\n const workEmitPayload = {\n eventName,\n payload,\n job: slice.jobAddress,\n slice: slice.sliceNumber,\n worker: this.supervisor.workerId,\n authorizationMessage : slice.authorizationMessage,\n };\n return this.supervisor.dcp4.safeWorkEmit(workEmitPayload, `Failed to send workEmit (${eventName}) payload for slice ${slice.identifier}`)\n .then((success) => {\n if (!success)\n this.warning(`Message sent to workEmit is unauthorized; not accepted '${eventName}'`);\n });\n }\n }\n }\n\n /**\n * Save rejected slice timeReport data in this.slice.rejectedTimeReport, then when needed in\n * Supervisor.recordResult, merge this.slice.rejectedTimeReport into this.slice.timeReport.\n * @param {{ total: number, CPU: number, webGL: number, webGPU: number }} timeReport\n */\n handleRejectedWork (timeReport)\n {\n selectiveDebug() && console.debug('handleRejectedWork', this.identifier);\n // If the slice already has rejectedTimeReport, add this timeReport to it.\n // If not, assign this timeReport to slices rejectedTimeReport property\n if (this.slice)\n {\n if (!this.slice.rejectedTimeReport)\n this.slice.rejectedTimeReport = timeReport;\n else\n {\n ['total', 'CPU', 'webGL', 'webGPU'].forEach((key) => {\n if (timeReport[key])\n this.slice.rejectedTimeReport[key] += timeReport[key];\n });\n }\n }\n }\n\n /**\n * Attach CGIO to result returned by a slice workFn.\n * @param {*} completeData - results\n */\n attachCGIOToResult (completeData)\n {\n if (!completeData)\n throw new Error('Slice result is not ready'); // Should never fire.\n if (completeData['timeReport'])\n throw new Error('Slice result already has timeReport'); // Should never fire.\n if (completeData['dataReport'])\n throw new Error('Slice result already has dataReport'); // Should never fire.\n if (this.listenerCount('resolve') > 0)\n {\n completeData['timeReport'] = this.sliceTimeReport;\n completeData['dataReport'] = {\n InDataSize: this.moduleInDataSize + this.jobManager.inputDataSize + this.slice.inputDataSize,\n OutDataSize: this.sliceOutDataSize,\n };\n this.emit('resolve', completeData);\n selectiveDebug() && console.debug('attachCGIOToResult', this.moduleInDataSize, this.jobManager.inputDataSize, this.slice.inputDataSize, completeData['dataReport'].InDataSize);\n }\n else\n {\n // If there is no internal listener for 'resolve', the slice was rejected\n // and we need to update this.slice.rejectedTimeReport appropriately.\n this.handleRejectedWork(this.sliceTimeReport);\n }\n // Clear time and data reports so we can catch mistaken writes.\n this.sliceTimeReport = null;\n this.sliceOutDataSize = 0;\n }\n\n // _Idx\n //\n // handleRing0Message, handleRing1Message, handleRing2Message, handleRing3Message\n //\n\n async handleRing0Message(data) // eslint-disable-line require-await\n {\n debugging('ring0') && debug('Ring0', this.identifier, data.request);\n\n switch (data.request)\n {\n case 'scriptLoaded':\n if(data.result !== \"success\")\n this.onerror(data);\n break;\n case 'error':\n debug('Sandbox error in ring0', data.error);\n this.rejectWithCleanup('during initialization', data.error);\n break;\n default:\n this.error('Received unhandled request from sandbox: ' + data.request, null, `data: ${ JSON.stringify(data)}`);\n break;\n }\n }\n\n async handleRing1Message(data) // eslint-disable-line require-await\n {\n debugging('ring1') && debug('Ring1', this.identifier, data.request);\n\n switch (data.request)\n {\n case 'applyRequirementsDone':\n // emit internally\n this.emit(data.request, data)\n break;\n default:\n this.error('Received unhandled request from sandbox ring 1: ' + data.request, null, `data: ${ JSON.stringify(data)}`);\n break; \n }\n }\n\n async handleRing2Message(data)\n {\n debugging('ring2') && debug('Ring2', this.identifier, data.request);\n\n switch (data.request)\n {\n case 'dependency': {\n try\n {\n const moduleData = await this.moduleCache.fetchModule(data.data, this.jobAddress);\n // Success! Restore this['packageManager'] delay to retryMinSleepMs (currently 32ms.)\n // Is there a better way to reset than explicit calls?\n this.supervisor.delayManager.resetEBO('packageManager');\n // Send module data to be evaluator.\n const message = {\n request: 'moduleGroup',\n data: moduleData,\n id: data.id,\n };\n // Module data is dynamic since it may only be required in a conditional branch.\n // Moreover, on a long job, the published module itself may be updated on the scheduler.\n const moduleLength = kvin.stringify(moduleData).length; /** @TODO - fix per DCP-3750 */\n this.moduleInDataSize += moduleLength;\n selectiveDebug() && console.debug('Sandbox.Ring2.fetchModule size', this.moduleInDataSize, moduleLength);\n this.postMessageToEvaluator(message);\n }\n catch (error)\n {\n /*\n * In the event of an error here, we want to let the client know there was a problem in\n * loading their module. In principle we shouldn't need a valid sandbox.slice at sandbox.assign.\n * However, in the implementation of Sup2 there is precisely 1 callsite of sandbox.assign and\n * we do have an associated slice at this point. So we make the assumption that sandbox.slice\n * is valid here.\n */\n if (!this.slice) // Design assumption\n throw new Error(`Must have valid slice in sandbox before sandbox.assign is called: ${this.identifier}`);\n\n const payload = {\n name: error.name,\n message: error.message,\n timestamp: error.timestamp ? error.timestamp : new Date(),\n };\n\n this.postWorkEmit('error', payload);\n this.emit('reject', error);\n\n debugging() && console.debug(`Sandbox.Ring2: fetchModule failed ${this.identifier}`, payload, error, Date.now() - this.supervisor.lastTime);\n\n // Close packageManager to start the connection reconnect logic.\n // Should we do a retry loop with fetchModule too?\n this.supervisor.dcp4.resetConnection('packageManager');\n }\n break;\n }\n case 'error':\n /*\n * Ring 2 error messages will only fire for problems inside of the worker that are separate from\n * the work function. In most cases there are other handlers for situations where 'error' may be emitted\n * such as timeouts if the expected message isn't recieved.\n */\n debug('Sandbox error in ring2', data.error);\n this.rejectWithCleanup('during assignment and dependency resolution', data.error);\n break;\n case 'describe':\n case 'evalResult':\n case 'resetStateDone':\n case 'assigned':\n this.emit(data.request, data); // emit internally\n break;\n case 'reject':\n this.emit('reject', data.error); // emit internally\n break;\n default:\n this.error(`Received unhandled request from sandbox ring 2. Data: ${JSON.stringify(data, null, 2)}`);\n break;\n }\n }\n\n async handleRing3Message(data) // eslint-disable-line require-await\n {\n debugging('ring3') && debug('Ring3', this.identifier, data.request);\n\n switch (data.request)\n {\n case 'complete':\n dcp_timers.clearTimeout(this.progressTimeout);\n dcp_timers.clearTimeout(this.sliceTimeout);\n this.progressTimeout = this.sliceTimeout = null;\n\n if (this.progress === null)\n {\n if (this.options.ignoreNoProgress)\n this.warning(\"ENOPROGRESS silenced by localExec: Progress was not called during this slice's execution, in a remote sandbox this would cause the slice to fail.\");\n else\n {\n // If a progress update was never received (progress === null) then reject\n this.emit('reject', new NoProgressError('Sandbox never emitted a progress event.'));\n this.handleRejectedWork(this.sliceTimeReport);\n break;\n }\n }\n \n this.progress = 100;\n this.sliceOutDataSize += kvin.stringify(data.result).length; /** @TODO - fix per DCP-3750 */\n this.attachCGIOToResult(data);\n break;\n case 'progress':\n {\n const { progress, indeterminate, throttledReports, value } = data;\n this.progress = progress;\n // cf. job-noProgress.js\n const progressReport = {\n deltaMs: Date.now() - this.sliceStartTime,\n progress,\n value,\n throttledReports,\n }\n this.progressReports.last = progressReport;\n if (!indeterminate)\n this.progressReports.lastDeterministic = progressReport;\n\n this.resetProgressTimeout();\n this.sandboxEmit('progress', indeterminate || progress < 0 || progress > 100 ? undefined : progress);\n break;\n }\n case 'noProgress':\n this.emit('reject', new NoProgressError(data.message));\n break;\n case 'console':\n data.payload.message = kvin.marshal(data.payload.message);\n this.sliceOutDataSize += JSON.stringify(data.payload.message).length; /** @TODO - fix per DCP-3750 */\n this.postWorkEmit('console', data.payload);\n break;\n case 'emitEvent': /* ad-hoc event from the sandbox (work.emit) */\n this.postWorkEmit('custom', data.payload);\n break;\n case 'measurement':\n this.updateTime(data);\n break;\n case 'sandboxError': /* the sandbox itself has an error condition */\n debug(`Ring3 received a 'sandboxError' event for sandbox ${this.identifier}`, data.error);\n this.emit('sandboxError', data.error);\n this.rejectWithCleanup('internal sandbox error while executing work function', data.error);\n break;\n case 'workError': /* the work function threw/rejected */\n debug(`Ring3 received a 'workError' event for sandbox ${this.identifier}`, data.error);\n this.postWorkEmit('error', data.error);\n const wrappedError = new UncaughtExceptionError(data.error);\n this.rejectWithCleanup('error while executing work function', wrappedError);\n break;\n default:\n this.error('Received unhandled request from sandbox ring 3: ' + data.request, null, `data: ${ JSON.stringify(data)}`);\n break; \n }\n }\n\n /**\n * Try to send the error back to the reject handler in Sandbox.work.\n * But if the reject handler is not available (s.b. rare) then cleanup, emit error and throw.\n * @param {string} message\n * @param {Error|string} error\n */\n rejectWithCleanup (message, error)\n {\n if (this.listenerCount('reject') > 0)\n this.emit('reject', error);\n else\n {\n this.terminate(false);\n this.error(`Sandbox ${this.identifier} ${message}`, error);\n throw error;\n }\n }\n\n // _Idx\n //\n // onmessage, onerror, terminate\n //\n\n /**\n * Handles progress and completion events from sandbox.\n * Unless explicitly returned out of this function will re-emit the event\n * where the name of the event is event.data.request.\n *\n * @param {object} event - event received from the evaaluator sandbox\n * @returns {Promise<void>}\n */\n onmessage (event)\n {\n debugging('event') && debug('onmessage-event', event.data.ringSource);\n if (Sandbox.debugEvents)\n console.debug('sandbox - eventDebug:', { id: this.id, state: this.state.valueOf(), event: JSON.stringify(event) });\n\n const { data } = event;\n const ringLevel = data.ringSource\n\n // Give the data to a handler depending on ring level\n if (ringLevel === -1)\n {\n /** @todo XXXpfr Beware of this happening often. */\n this.error(`Message sent directly from raw postMessage for event ${event}. Terminating worker...`);\n this.terminate(true);\n }\n else\n {\n const handler = this.ringMessageHandlers[ringLevel];\n if (handler)\n return handler.call(this, data.value);\n this.warning(`No handler defined for message from ring ${ringLevel} for event ${event}.`);\n }\n }\n\n /**\n * Error handler for the internal sandbox.\n * Emits error event that gets handled up in the Worker class.\n */\n onerror (event)\n {\n /** @todo XXXpfr Beware of onerror firing often. */\n this.error(`Sandbox.onerror emitted an error: ${event}`);\n this.terminate(true, true);\n }\n\n /**\n * Clears the timeout and terminates the sandbox and sometimes emits a reject event.\n *\n * @param {boolean} [reject=true] - if true emit reject event\n * @param {boolean} [immediate=false] - passed to terminate, used by standaloneWorker to immediately close the connection\n */\n terminate (reject = true, immediate = false)\n {\n if (this.slice)\n this.returnSlice();\n if (this.isTerminated)\n return;\n\n selectiveDebug() && console.debug(`Terminate sandbox ${this.identifier}`);\n\n this.state = new Synchronizer(TERMINATED, [ UNREADY, READYING, READY_FOR_ASSIGN, ASSIGNING, ASSIGNED, WORKING, TERMINATED ]);\n\n dcp_timers.clearTimeout(this.progressTimeout);\n dcp_timers.clearTimeout(this.sliceTimeout);\n this.progressTimeout = this.sliceTimeout = null;\n\n if (this.evaluatorHandle && typeof this.evaluatorHandle.terminate === 'function')\n {\n try\n {\n this.evaluatorHandle.terminate(immediate);\n }\n catch (e)\n {\n this.error(`Error terminating sandbox ${this.id}:`, e);\n }\n finally\n {\n this.evaluatorHandle = null;\n }\n }\n\n if (reject)\n this.emit('reject', new Error(`Sandbox ${this.identifier} was terminated.`));\n\n this.sandboxEmit('end');\n }\n\n // _Idx\n //\n // updateTime, resetSliceReport, sandboxEmit, error, warning\n //\n\n /**\n * ringNPostMessage can send a `measurement` request and update these\n * totals.\n */\n updateTime (measurementEvent)\n {\n ['total', 'CPU', 'webGL', 'webGPU'].forEach((key) => {\n if (measurementEvent[key])\n this.sliceTimeReport[key] += measurementEvent[key];\n });\n }\n\n /**\n * Start over sandbox work timers.\n */\n resetSliceReport ()\n {\n this.sliceTimeReport = {\n total: 0,\n CPU: 0,\n webGL: 0,\n webGPU: 0,\n };\n this.sliceOutDataSize = 0;\n }\n\n /**\n * Safe event emitter on sandboxHandle.\n * @param {string} event\n * @param {...any} args\n */\n sandboxEmit(event, ...args)\n {\n this.supervisor.safeEmit(this.sandboxHandle, event, ...args);\n }\n\n /**\n * Error feedback to user.\n * @param {string} message\n * @param {Array<Error>|Error|string} [coreError]\n * @param {string} [additionalInfo]\n * @param {boolean} [supressStack=false]\n */\n error (message, coreError, additionalInfo, supressStack = false)\n {\n this.supervisor.error(message, coreError, additionalInfo, supressStack);\n }\n\n /**\n * Warning feedback to user.\n * @param {string[]} messages\n */\n warning (...messages)\n {\n this.supervisor.warning(...messages);\n }\n}\n\nSandbox.idCounter = 1;\nSandbox.debugWork = false;\nSandbox.debugState = false;\nSandbox.debugEvents = false;\n\nexports.Sandbox = Sandbox;\nexports.SandboxError = SandboxError;\nexports.NoProgressError = NoProgressError;\nexports.SliceTooSlowError = SliceTooSlowError;\nexports.UncaughtExceptionError = UncaughtExceptionError;\n\n\n//# sourceURL=webpack://dcp/./src/dcp-client/worker/supervisor2/sandbox2.js?");
4801
+ eval("/**\n * @file dcp/src/dcp-client/worker/supervisor2/sandbox2.js\n *\n * A sandbox that when constructed and assigned can do work for\n * a distributed slice. A sandbox runs for a single slice at a time.\n *\n * Usage (simplified...):\n * const sandbox = new Sandbox(this, { ...this.options.sandboxOptions });\n * await sandbox.start();\n * sandbox.slice = slice;\n * await sandbox.assign(jobManager);\n * return sandbox.work()\n * .then((result) => {\n * slice.collectResult(result, true);\n * sandbox.checkSandboxReUse();\n * this.supervisor.recordResult(slice)\n * })\n * .catch((error) => {\n * slice.collectResult(error, false);\n * const reason = this.supervisor.handleSandboxWorkError(sandbox, slice, error);\n * this.supervisor.returnSlice(slice, reason);\n * this.returnSandbox(sandbox);\n * });\n *\n * Debug flags:\n * Sandbox.debugWork = true // - turns off 30 second timeout to let user debug sandbox innards more easily\n * Sandbox.debugState = true // - logs all state transitions for this sandbox\n * Sandbox.debugEvents = true // - logs all events received from the sandbox\n *\n * Initial states:\n * UNREADY\n *\n * Terminal states:\n * TERMINATED\n *\n * Valid transitions:\n * ( sandbox.start )\n * UNREADY -> READYING -> READY_FOR_ASSIGN\n * READYING -> TERMINATED\n * ( sandbox.assign )\n * READY_FOR_ASSIGN -> ASSIGNING -> ASSIGNED\n * ASSIGNING -> TERMINATED\n * ( sandbox.markAsWorking )\n * ASSIGEND -> WORKING\n * ( sandbox.work )\n * WORKING -> ASSIGNED\n * -> TERMINATED\n * ( sandbox.terminate )\n * any -> TERMINATED\n *\n * @author Matthew Palma, mpalma@kingsds.network\n * Ryan Rossiter, ryan@kingsds.network\n * Wes Garland, wes@distributive.network\n * Paul, paul@distributive.network\n * @date May 2019\n * May 2019\n * Decemeber 2020\n * June, Dec 2022, Jan-May 2023\n * @module sandbox\n * @copyright Copyright (c) 2018-2023, Distributive Corp. All Rights Reserved\n */\n// @ts-check\n\n\nconst debugging = (__webpack_require__(/*! dcp/debugging */ \"./src/debugging.js\").scope)('worker');\nconst dcp_timers = __webpack_require__(/*! dcp/common/dcp-timers */ \"./src/common/dcp-timers.js\");\nconst { assert, assertEq3 } = __webpack_require__(/*! dcp/common/dcp-assert */ \"./src/common/dcp-assert.js\");\nconst { Synchronizer } = __webpack_require__(/*! dcp/common/concurrency */ \"./src/common/concurrency.js\");\nconst nanoid = (__webpack_require__(/*! nanoid */ \"./node_modules/nanoid/index.browser.js\").nanoid);\nconst EventEmitter = __webpack_require__(/*! events */ \"./node_modules/events/events.js\");\nconst kvin = __webpack_require__(/*! kvin */ \"./node_modules/kvin/kvin.js\");\nconst { Config } = __webpack_require__(/*! ./config */ \"./src/dcp-client/worker/supervisor2/config.js\");\nconst common = __webpack_require__(/*! ./common */ \"./src/dcp-client/worker/supervisor2/common.js\");\nconst { selectiveDebug, truncateAddress, timeDilation, selectiveDebug2 } = common;\nconst { stringify } = __webpack_require__(/*! dcp/utils */ \"./src/utils/index.js\");\n\n/**\n * Wraps console.debug to emulate debug module prefixing messages on npm.\n * @param {...any} args\n */\nconst debug = (...args) => {\n if (debugging())\n console.debug('Sandbox:', ...args);\n};\n\n// Sandbox states\nconst UNREADY = 'UNREADY' // No Sandbox (web worker, saworker, etc) has been constructed yet\nconst READYING = 'READYING' // Sandbox is being constructed and environment (bravojs, env) is being set up\nconst READY_FOR_ASSIGN = 'READY_FOR_ASSIGN' // Sandbox is ready to be assigned\nconst ASSIGNED = 'ASSIGNED' // Sandbox is assigned but not working\nconst ASSIGNING = 'ASSIGNING' // Sandbox is in the process of being ASSIGNED\nconst WORKING = 'WORKING' // Sandbox is working\nconst TERMINATED = 'TERMINATED' // Sandbox is terminated.\nconst EVAL_RESULT_PREFIX = 'evalResult::';\n\nclass SandboxError extends Error\n{\n /**\n * @param {string} errorCode\n * @param {string|Error} msg\n */\n constructor(errorCode, msg)\n {\n super((msg.constructor?.name === 'String') ? msg : msg['message']);\n /** @type {string} */\n this.errorCode = errorCode;\n if (msg.constructor?.name !== 'String')\n for (const prop of [ 'name', 'code', 'stack', 'lineNumber', 'columnNumber' ])\n if (msg[prop]) this[prop] = msg[prop];\n }\n}\nclass NoProgressError extends SandboxError { constructor(msg) { super('ENOPROGRESS', msg); } }\nclass SliceTooSlowError extends SandboxError { constructor(msg) { super('ESLICETOOSLOW', msg); } }\nclass UncaughtExceptionError extends SandboxError { constructor(msg) { super('EUNCAUGHT', msg); } }\n\n/** @typedef {import('./slice2').Slice} Slice */\n/** @typedef {import('./index').Supervisor} Supervisor */\n/** @typedef {import('./job-manager').JobManager} JobManager */\n/** @typedef {import('./module-cache').ModuleCache} ModuleCache */\n/** @typedef {import('dcp/utils/jsdoc-types').SandboxOptions} SandboxOptions */\n\n/**\n * Public event emitter.\n * https://gitlab.com/Distributed-Compute-Protocol/dcp-docs-wes/-/blob/wip/worker/worker-events.md\n */\nclass SandboxHandle extends EventEmitter\n{\n /** @type {{ id: number, public: { name: string, description: string, link: string }, jobManager: JobManager, slice: Slice }} */\n #info;\n\n /**\n * @constructor\n * @param {Sandbox} sandbox\n */\n constructor (sandbox)\n {\n super({ captureRejections: false });\n this.#info = sandbox.info;\n }\n /** @type {number} */\n get id () { return this.#info.id; }\n /** @type {{ name: string, description: string, link: string }} */\n get public () { return this.#info.public ?? { name: '<unassigned>', description: '', link: '' }; }\n /** @type {string} */\n get jobAddress () { return this.#info.jobManager?.address; }\n /** @type {number} */\n get sliceNumber () { return this.#info.slice?.sliceNumber ?? -1; }\n}\nexports.SandboxHandle = SandboxHandle;\n\n//\n// Index to functionality -- search for '_Idx' to toggle through the index.\n//\n// 1) class Sandbox\n// 2) checkSandboxReUse, postMessageToEvaluator, changeState,\n// and punctuatedTimer is expiremental for replacing hard-coded timeouts.\n// 3) start, describe, assign, applyRequirements, assignEvaluator\n// 4) eval, resetState, work, resetProgressTimeout, resetSliceTimeout\n// 5) handleRing0Message, handleRing1Message, handleRing2Message, handleRing3Message\n// 6) onmessage, onerror, terminate\n// 7) updateTime, resetSliceReport, sandboxEmit, error, warning\n//\n\n// _Idx\n//\n// class Sandbox\n//\n\nclass Sandbox extends EventEmitter\n{\n /**\n * A Sandbox (i.e. a worker sandbox) which executes distributed slices.\n *\n * @constructor\n * @param {Supervisor} supervisor\n * @param {SandboxOptions} options\n */\n constructor (supervisor, options)\n {\n super({ captureRejections: false });\n /** @type {Supervisor} */\n this.supervisor = supervisor;\n /** @type {ModuleCache} */\n this.moduleCache = supervisor.moduleCache;\n /** @type {SandboxOptions} */\n this.options = {\n ignoreNoProgress: false,\n ...options,\n SandboxConstructor: options.SandboxConstructor || (__webpack_require__(/*! ../evaluators */ \"./src/dcp-client/worker/evaluators/index.js\").BrowserEvaluator),\n }\n /** @type {Synchronizer} */\n this.state = new Synchronizer(UNREADY, [ UNREADY, READYING, READY_FOR_ASSIGN, ASSIGNING, ASSIGNED, WORKING, TERMINATED ]);\n\n /** @type {{ id: number, public: { name: string, description: string, link: string }, jobManager: JobManager, slice: Slice }} */\n this.info = {\n id: Sandbox.getNewId(),\n public: null,\n jobManager: null,\n slice: null,\n };\n\n /**\n * Event emitter containing info that describes the sandbox.\n * @type {SandboxHandle}\n */\n this.sandboxHandle = new SandboxHandle(this);\n\n /** Properties of type object. */\n this.evaluatorHandle = null;\n this.capabilities = null;\n this.progressTimeout = null;\n this.sliceTimeout = null;\n this.rejectionData = null;\n\n /** @type {number?} */\n this.progress = 100;\n /** @type {{ last: { deltaMs: number, value: any, throttledReports: number }, lastDeterministic: { deltaMs: number, progress: number, value: any, throttledReports: number } }} */\n this.progressReports = null; // cf. job-noProgress.js\n /** @type {object} */\n this.progressTimeout = null;\n /** @type {object} */\n this.sliceTimeout = null;\n\n /** @type {{ total: number, CPU: number, webGL: number, webGPU: number }} */\n this.sliceTimeReport = null;\n /** @type {number} */\n this.moduleInDataSize = 0; // Sandbox level input size; set during assign, never reset.\n /** @type {number} */\n this.sliceOutDataSize = 0; // Slice level output size; reset for every slice executed.\n\n /** @type {number?} */\n this.sliceStartTime = null;\n /** @type {number} */\n this.useCounter = 1; // Anticipating the initial use.\n /** @type {Config} */\n this.hive = new Config();\n\n ///** @type {((data: any) => Promise<void>)[]} */\n this.ringMessageHandlers = [\n this.handleRing0Message,\n this.handleRing1Message,\n this.handleRing2Message,\n this.handleRing3Message,\n ];\n\n this.resetSliceReport();\n }\n\n /** @type {number} */\n get id () { return this.info.id; }\n /** @type {{ name: string, description: string, link: string }} */\n get public () { return this.info.public; }\n /** @type {{ name: string, description: string, link: string }} */\n set public (data) { this.info.public = data; }\n /** @type {JobManager} */\n get jobManager () { return this.info.jobManager; }\n /** @type {string} */\n get jobAddress () { return this.jobManager?.address; }\n /** @type {Slice} */\n get slice () { return this.info.slice; }\n /** @type {Slice} */\n set slice (slice) { this.info.slice = slice; }\n /** @type {number} */\n get sliceNumber () { return this.slice ? this.slice.sliceNumber : -1; }\n /** @type {number} */\n get generalTimeout () { return 2 * this.hive.generalTimeout; }\n /** @type {number} */\n get punctuatedTimeout () { return this.hive.generalTimeout; }\n\n /**\n * Debug string that characterizes sandbox.\n * @type {string}\n */\n get identifier()\n {\n if (!this.jobAddress)\n return `${this.id}.${this.state}`;\n const address = truncateAddress(this.jobAddress);\n if (this.slice)\n return `${this.id}.${address}.${this.state}~${this.slice.sliceNumber}`;\n return `${this.id}.${address}.${this.state}`;\n }\n\n /** @returns {number} */\n static getNewId() { return Sandbox.idCounter++; }\n\n /** @type {boolean} */\n get isReadyForAssign () { return this.state.is(READY_FOR_ASSIGN); }\n /** @type {boolean} */\n get isAssigned () { return this.state.is(ASSIGNED); }\n /** @type {boolean} */\n get isWorking () { return this.state.is(WORKING); }\n /** @type {boolean} */\n get isTerminated () { return this.state.is(TERMINATED); }\n\n // _Idx\n //\n // checkSandboxReUse, postMessageToEvaluator, changeState,\n // punctuatedTimer is expiremental for replacing hard-coded timeouts.\n //\n\n /**\n * Mark WORKING sandbox as ASSIGNED in preparation for possible reuse.\n * Allow use of sandbox on a given job up to a limit of dcpConfig.supervisor.sandbox.maxSandboxUse .\n */\n checkSandboxReUse ()\n {\n selectiveDebug2() && console.debug(`Sandbox2.checkSandboxReUse: useCounter ${this.useCounter}, ${this.identifier}`);\n if (this.useCounter++ < this.hive.maxSandboxUse)\n {\n this.state.set(WORKING, ASSIGNED);\n this.sandboxEmit('ready');\n }\n else\n {\n this.terminate(false);\n common.removeElement(this.supervisor.sandboxInventory, this);\n }\n }\n\n /** Transitions: ASSIGNED --> WORKING. */\n markAsWorking ()\n {\n if (!this.isAssigned)\n throw new Error(`Sandbox ${this.identifier} is not ready to work`);\n this.state.set(ASSIGNED, WORKING);\n }\n \n /**\n * Safely post message to evaluator.\n * @param {object} message\n */\n postMessageToEvaluator (message)\n {\n if (this.isTerminated) // When evaluator goes down, all sandboxes are terminated.\n throw new Error(`postMessageToEvaluator: Sandbox ${this.identifier} has been terminated.`);\n return this.evaluatorHandle.postMessage(message);\n }\n \n /**\n * Safely change state.\n * @param {string} currentState\n * @param {string} nextState\n */\n changeState (currentState, nextState)\n {\n if (this.isTerminated) // When evaluator goes down, all sandboxes are terminated.\n throw new Error(`changeState: Sandbox ${this.identifier} has been terminated.`);\n this.state.set(currentState, nextState);\n }\n\n /** Upon fatal error return slice to scheduler. */\n returnSlice ()\n {\n selectiveDebug() && console.debug('Sandbox.returnSlice', this.identifier);\n return this.supervisor.returnSlice(this.slice, 'Sandbox.returnSlice');\n }\n\n /**\n * @callback cbFn\n * @returns {void}\n */\n\n /**\n * UNUSED.\n * Future work.\n * Replaces the timers in:\n * describe,\n * applyRequirements,\n * resetState,\n * The idea is to have a long timeout with a warning every\n * 6 seconds saying why it is waiting.\n * @param {cbFn} body\n * @param {string} waitMessage\n * @param {string} timerExpiredMessage\n * @returns {Promise<{ closeIntervalTimer: cbFn }>}\n */\n punctuatedTimer(body, waitMessage, timerExpiredMessage)\n {\n const that = this;\n return new Promise((resolve, reject) => {\n let intervalCounter = 0;\n let intervalHandle = null;\n function closeIntervalTimer()\n {\n if (intervalHandle !== null)\n dcp_timers.clearTimeout(intervalHandle);\n intervalHandle = null;\n }\n intervalHandle = dcp_timers.setInterval(() => {\n if (++intervalCounter > 12)\n {\n closeIntervalTimer();\n that.error(timerExpiredMessage);\n }\n that.warning(waitMessage);\n body();\n }, this.punctuatedTimeout)\n // Allow workers and localExec to exit.\n intervalHandle.unref();\n resolve({ closeIntervalTimer });\n });\n }\n\n // _Idx\n //\n // start, describe, assign, applyRequirements, assignEvaluator\n //\n\n /**\n * Readies the sandbox. This will result in the sandbox being ready and not assigned.\n * It will need to be assigned with a job before it is able to do work.\n * Sandbox.start will terminate the sandbox upon failure.\n * @todo maybe preload specific modules or let the cache pass in what modules to load?\n *\n * @returns {Promise<void>}\n * @throws on failure to ready\n */\n async start ()\n {\n debug('Sandbox.start begin');\n await this.supervisor.delayManager.nextDelay('sandboxStart');\n this.changeState(UNREADY, READYING);\n\n try\n {\n // RING 0\n this.evaluatorHandle = new this.options.SandboxConstructor({\n name: `DCP Sandbox #${this.id}`,\n });\n // Annoying! onerror terminates sandbox which can happen independent of whether the slice\n // is ok or not. Since we don't know, we have to return the slice when onerror is called\n // during sandbox.work .\n /** @todo XXXpfr Beware of onerror firing often. */\n this.evaluatorHandle.onerror = this.onerror.bind(this);\n\n const messageHandler = this.onmessage.bind(this);\n this.evaluatorHandle.onmessage = function onmessage(event)\n {\n const data = (event.data.serialized)\n ? kvin.parse(event.data.message)\n : kvin.unmarshal(event.data);\n messageHandler({ data });\n }\n\n const evaluatorPostMessage = this.evaluatorHandle.postMessage.bind(this.evaluatorHandle);\n this.evaluatorHandle.postMessage = function postMessage(message)\n {\n evaluatorPostMessage(kvin.marshal(message));\n }\n\n const that = this;\n this.evaluatorHandle.addEventListener('end', function sandbox$start$addEventListener() {\n selectiveDebug() && console.debug(\"END:Sandbox evaluatorHandle end-handler\", that.identifier, new Date());\n that.supervisor.evaluator.shuttingDown = true;\n that.terminate(true);\n });\n\n // Don't let an open sockets prevent clean worker exit.\n if (this.evaluatorHandle.unref)\n this.evaluatorHandle.unref();\n\n // Now in RING 1\n\n // Now in RING 2\n await this.describe();\n this.changeState(READYING, READY_FOR_ASSIGN);\n\n // Emit the 'sandbox' event on the worker event emitter.\n this.supervisor.safeEmit(this.supervisor.worker, 'sandbox', this.sandboxHandle);\n }\n catch (error)\n {\n if (this.isTerminated)\n debug(`Failed to start sandbox because it is already terminated: ${this.identifier}.\\n\\tMay be due to screensaver worker being down or evaluator was stopped.`);\n else\n {\n debug(`Failed to start sandbox ${this.identifier}.`, error.message); // FIX s.b. error\n this.terminate(false);\n }\n throw error;\n }\n }\n\n /**\n * Sends a post message to describe its capabilities.\n * Side effect: Sets the capabilities property of the current sandbox.\n *\n * @returns {Promise<any>} Resolves with the sandbox's capabilities.\n * Rejects with an error saying a response was not received.\n * @memberof Sandbox\n */\n describe ()\n {\n debugging('sandbox') && debug('Beginning to describe evaluator', this.identifier);\n const that = this;\n\n return new Promise(function sandbox$describePromise(resolve, reject) {\n let describeTimeout;\n\n if (that.isTerminated) // When evaluator goes down, all sandboxes are terminated.\n reject(new Error(`Sandbox ${that.identifier} has been terminated.`));\n\n if (that.evaluatorHandle === null)\n reject(new Error(`Evaluator has not been initialized: ${that.identifier}`));\n\n function sandbox$describe$success(data)\n {\n if (describeTimeout !== false)\n {\n dcp_timers.clearTimeout(describeTimeout);\n describeTimeout = false;\n\n const { capabilities } = data;\n if (typeof capabilities === 'undefined')\n reject(new Error(`Did not receive capabilities from describe response: ${that.identifier}`));\n that.capabilities = capabilities;\n\n debugging('sandbox') && debug('Evaluator has been described');\n resolve(capabilities);\n }\n }\n // Emitted by handleRing2Message.\n that.once('describe', sandbox$describe$success);\n\n describeTimeout = dcp_timers.setTimeout(function sandbox$describe$fail() {\n if (describeTimeout !== false)\n {\n describeTimeout = false;\n that.removeListener('describe', sandbox$describe$success);\n reject(new Error( `Describe message timed-out. No describe response was received from the describe command: ${that.identifier}`));\n }\n }, that.generalTimeout);\n // Allow workers and localExec to exit.\n describeTimeout.unref();\n\n const message = {\n request: 'describe',\n };\n that.postMessageToEvaluator(message);\n });\n }\n\n /**\n * This will assign the sandbox with a job, loading its sandbox code into the sandbox.\n * Sandbox.assign will not terminate the sandbox upon failure.\n * The sandbox will be terminated in JobManager.assignSandbox .\n * @param {JobManager} jobManager - The job manager that will be the owner of this sandbox.\n * @returns {Promise<Sandbox>}\n * @throws on initialization failure\n */\n async assign (jobManager)\n {\n if (!this.slice) // Design assumption.\n throw new Error(`Must have valid sandbox.slice before sandbox.assign is called: ${this.identifier}`);\n\n await this.supervisor.delayManager.nextDelay('sandboxAssign');\n debug('Sandbox.assign', this.identifier, Date.now() - this.supervisor.lastTime);\n\n try\n {\n this.changeState(READY_FOR_ASSIGN, ASSIGNING);\n this.info.jobManager = jobManager;\n this.job = this.jobManager.jobMessage;\n\n /* At this point, the worker has decided that this sandbox will be associated with a specific job. \n Therefore, we emit the SandboxHandle<job> event*/\n this.sandboxEmit('job', jobManager.jobHandle);\n\n assertEq3(this.job.address, this.jobAddress);\n assert(typeof this.job === 'object');\n assert(typeof this.job.requirements === 'object');\n assert(Array.isArray(this.job.dependencies));\n assert(Array.isArray(this.job.requirePath));\n\n // Extract public data from job, with defaults\n this.public = Object.assign({\n name: `Anonymous Job ${truncateAddress(this.jobAddress)}`,\n description: 'Discreetly helping make the world smarter.',\n link: 'https://distributed.computer/about',\n }, this.job.public);\n\n // Future: We may want other filename tags for appliances // RR Nov 2019\n\n // Important: The order of applying requirements before loading the sandbox code\n // is important for modules and sandbox code to set globals over the whitelist.\n await this.applyRequirements(this.job.requirements);\n //const _t0 = Date.now();\n await this.assignEvaluator();\n //console.log('Finished Sandbox.assignEvaluator', Date.now() - _t0);\n this.changeState(ASSIGNING, ASSIGNED);\n this.sandboxEmit('ready');\n }\n catch (error)\n {\n if (this.isTerminated)\n debug(`Failed to assign sandbox ${this.identifier} to evaluator because it is already terminated.\\n\\tMay be due to screensaver worker being down or evaluator was stopped.`);\n else\n {\n debug(`Failed to assign sandbox ${this.identifier} to evaluator.`);\n this.terminate(false);\n }\n throw error;\n }\n\n return this;\n }\n\n /**\n * Passes the job's requirements object into the sandbox so that the global access lists can be updated accordingly.\n * E.g. disallow access to OffscreenCanvas without environment.offscreenCanvas=true present.\n * Must be called after @start.\n *\n * @returns {Promise<void>} - resolves on success, rejects otherwise\n */\n applyRequirements (requirements)\n {\n assert(typeof requirements === 'object');\n const that = this;\n\n return new Promise(function sandbox$applyRequirementsPromise(resolve, reject) {\n let requirementTimeout;\n\n function sandbox$applyRequirements$success()\n {\n if (requirementTimeout !== false)\n {\n dcp_timers.clearTimeout(requirementTimeout);\n requirementTimeout = false;\n resolve();\n }\n }\n // Emitted by handleRing1Message.\n that.once('applyRequirementsDone', sandbox$applyRequirements$success);\n\n requirementTimeout = dcp_timers.setTimeout(function sandbox$finishApplySandboxRequirements$fail() {\n if (requirementTimeout !== false)\n {\n requirementTimeout = false;\n that.removeListener('applyRequirementsDone', sandbox$applyRequirements$success);\n reject(new Error(`applyRequirements never received 'applyRequirementsDone' response from sandbox: ${that.identifier}`));\n }\n }, that.generalTimeout);\n // Allow workers and localExec to exit.\n requirementTimeout.unref();\n\n const message = {\n requirements,\n request: 'applyRequirements',\n };\n that.postMessageToEvaluator(message);\n });\n }\n\n /**\n * Assign job to the evaluator.\n * @returns {Promise<any>} - resolves on success, rejects otherwise\n */\n assignEvaluator ()\n {\n debugging('sandbox') && console.debug('Begin assigning job to evaluator', this.identifier);\n const that = this;\n\n return new Promise(function sandbox$$assignEvaluatorPromise(resolve, reject) {\n function sandbox$assignEvaluator$success(event)\n {\n that.removeListener('reject', sandbox$assignEvaluator$fail);\n debugging('sandbox') && debug('Job assigned to evaluator');\n resolve(event);\n }\n\n function sandbox$assignEvaluator$fail(error)\n {\n that.removeListener('assigned', sandbox$assignEvaluator$success);\n that.error(`assignEvaluator failed(${that.identifier}): evaluator may be out of memory or the screensaver may be down.`, error);\n selectiveDebug() && console.debug('assignEvaluator failed', that.identifier, error);\n if (that.slice) // Normally the slice hasn't been set yet.\n that.returnSlice();\n reject(error);\n }\n\n // Emitted by handleRing2Message.\n that.once('assigned', sandbox$assignEvaluator$success);\n that.once('reject', sandbox$assignEvaluator$fail);\n\n // Had to add useStrict -- not sure if anything else was missed.\n const jobMessage = {\n address: that.job.address,\n arguments: that.job.arguments,\n dependencies: that.job.dependencies,\n modulePath: that.job.modulePath,\n public: that.job.public,\n requireModules: that.job.requireModules,\n requirePath: that.job.requirePath,\n workFunction: that.job.workFunction,\n useStrict: that.job.useStrict,\n };\n\n const message = {\n request: 'assign',\n job: jobMessage,\n sandboxConfig: that.hive.sandboxConfig,\n };\n that.postMessageToEvaluator(message);\n });\n }\n\n // _Idx\n //\n // eval, resetState, work, resetProgressTimeout, resetSliceTimeout\n //\n\n /**\n * Evaluates a string inside the sandbox.\n * @todo XXXpfr -- I don't understand how this gets called?\n * There's an old comment saying: \"no longer working though?\"\n *\n * @param {string} code - the code to evaluate in the sandbox\n * @param {string} filename - the name of the 'file' to help with debugging,\n * @returns {Promise<any>} - resolves with eval result on success, rejects otherwise\n */\n eval (code, filename)\n {\n const that = this;\n const msgId = nanoid();\n\n return new Promise(function sandbox$$eval$Promise(resolve, reject) {\n const eventId = EVAL_RESULT_PREFIX + msgId;\n\n function sandbox$eval$success(event)\n {\n that.removeListener('reject', sandbox$eval$fail);\n resolve(event);\n };\n\n function sandbox$eval$fail(error)\n {\n that.removeListener(eventId, sandbox$eval$success);\n reject(error);\n };\n\n that.once(eventId, sandbox$eval$success);\n that.once('reject', sandbox$eval$fail);\n\n const message = {\n request: 'eval',\n data: code,\n filename,\n msgId,\n };\n that.postMessageToEvaluator(message);\n });\n }\n\n /**\n * Resets the state of the bootstrap, without resetting the sandbox function if assigned.\n * Mostly used to reset the progress status before reusing a sandbox on another slice.\n * Must be called after @start.\n *\n * @returns {Promise<void>} - resolves with result on success, rejects otherwise\n */\n resetState ()\n {\n const that = this;\n assert(this.isWorking); // Design assumption.\n\n return new Promise(function sandbox$resetStatePromise(resolve, reject) {\n let resetStateTimeout;\n\n function sandbox$resetState$success ()\n {\n if (resetStateTimeout !== false)\n {\n dcp_timers.clearTimeout(resetStateTimeout);\n resetStateTimeout = false;\n resolve();\n }\n }\n that.once('resetStateDone', sandbox$resetState$success);\n\n resetStateTimeout = dcp_timers.setTimeout(function sandbox$resetState$fail() {\n if (resetStateTimeout !== false)\n {\n resetStateTimeout = false;\n that.removeListener('resetStateDone', sandbox$resetState$success);\n reject(new Error(`resetState never received resetStateDone event from sandbox: ${that.identifier}`));\n }\n }, that.generalTimeout);\n // Allow workers and localExec to exit.\n resetStateTimeout.unref();\n\n const message = {\n request: 'resetState',\n };\n that.postMessageToEvaluator(message);\n });\n }\n\n /**\n * Executes a slice received from the supervisor.\n * Must be called after this.start, this.assign and this.markAsWorking .\n * Sandbox.work will not terminate the sandbox upon failure.\n * The sandbox will be terminated in Supervisor.handleSandboxWorkError .\n * @returns {Promise<any>} - resolves with result on success, rejects otherwise\n */\n async work ()\n {\n const that = this;\n\n if (!this.slice) // Design assumption\n throw new Error(`Must have valid sandbox.slice before sandbox.assign is called: ${this.identifier}`);\n\n await this.supervisor.delayManager.nextDelay('sandboxWork');\n debug('Sandbox.work begin', this.identifier, Date.now() - this.supervisor.lastTime);\n\n if (this.isTerminated) // When evaluator goes down, all sandboxes are terminated.\n throw new Error(`Sandbox ${this.identifier} has been terminated.`);\n if (!this.isWorking)\n throw new Error(`Sandbox ${this.identifier} in Sandbox.work must be marked as working.`)\n\n // cf. DCP-1719,1720\n this.resetSliceReport();\n\n // Check that sandbox and slice have the same job.\n if (this.jobAddress !== this.slice.jobAddress)\n throw new Error(`Sandbox.work: sandbox ${this.identifier} and slice ${this.slice.identifier} are from different jobsz`);\n\n /** @todo Should sliceHnd just be replaced with { sandbox: this } since this.public is part of this? */\n let sliceHnd = { job: this.public, sandbox: this };\n await this.resetState();\n if (!this.slice)\n {\n this.error(`Slice for job ${this.jobAddress} vanished during work initialization - aborting`);\n return;\n }\n\n const { datum: inputDatum, error: dataError } = this.slice;\n if (dataError)\n {\n that.postWorkEmit('error', {\n message: dataError.message,\n stack: dataError.stack,\n name: this.public.name\n });\n }\n\n this.resetProgressTimeout();\n this.resetSliceTimeout();\n\n return new Promise(function sandbox$$workPromise(resolve, reject) {\n function sandbox$$work$success (event)\n {\n that.removeListener('reject', sandbox$$work$fail);\n resolve(event);\n }\n\n function sandbox$$work$fail (error)\n {\n that.removeListener('resolve', sandbox$$work$success);\n reject(error);\n }\n\n that.once('resolve', sandbox$$work$success);\n that.once('reject', sandbox$$work$fail);\n\n that.sliceStartTime = Date.now();\n that.slice.startTime = that.sliceStartTime;\n that.progress = null;\n that.progressReports = {\n last: undefined,\n lastDeterministic: undefined,\n };\n\n that.resetProgressTimeout();\n that.resetSliceTimeout();\n that.emit('start', sliceHnd);\n\n if (dataError)\n {\n that.removeListener('resolve', sandbox$$work$success);\n that.removeListener('reject', sandbox$$work$fail);\n dcp_timers.setTimeout(() => reject(dataError), 0)\n }\n else\n {\n // Do the work.\n const message = { request: 'main', data: inputDatum, };\n that.postMessageToEvaluator(message);\n }\n })\n .then(async function sandbox$$work$then(event) {\n // Tell supervisor sandbox slot is available.\n that.slice.markAsWorkDone();\n\n selectiveDebug2() && console.debug('Sandbox.sliceFinish', that.identifier, event?.timeReport);\n that.sandboxEmit('sliceEnd', that.slice?.sliceNumber)\n that.emit('complete', that.jobAddress);\n\n // Reset slice property.\n that.slice = null;\n\n // JobManager.runSliceOnSandbox will transition WORKDONE -> ASSIGNED\n return event;\n })\n .catch(async function sandbox$$work$catch(error) {\n selectiveDebug() && console.debug('Sandbox.work catch', that.identifier, error);\n // Tell supervisor sandbox slot is available.\n if (that.slice)\n that.slice.markAsWorkDone();\n // Current sandbox will not be reused.\n // Do not overwrite that.slice because it is needed in subsequent error reporting.\n\n if (error instanceof NoProgressError)\n {\n const payload = {\n name: that.public.name,\n message: error.message,\n timestamp: Date.now() - that.sliceStartTime,\n };\n that.postWorkEmit('error', payload);\n that.postWorkEmit('noProgress', { ...payload, progressReports: that.progressReports });\n }\n if (error.name === 'EWORKREJECT')\n that.handleRejectedWork(that.sliceTimeReport);\n\n // Otherwise sandbox will be terminated in Supervisor.handleSandboxWorkError\n debugging('sandbox') && debug(`Sandbox ${that.identifier} failed to execute slice`, error);\n\n throw error;\n });\n }\n\n resetProgressTimeout()\n {\n const that = this;\n\n if (this.progressTimeout)\n dcp_timers.clearTimeout(this.progressTimeout);\n\n this.progressTimeout = dcp_timers.setTimeout(function sandbox$ProgressTimeout() {\n if (that.options.ignoreNoProgress)\n return that.warning('ENOPROGRESS silenced by localExec: In a remote worker, this slice would be stopped for not calling progress frequently enough.');\n\n that.emit('reject', new NoProgressError(`No progress event was received in the last ${that.hive.progressTimeout / 1000} seconds.`));\n }, this.hive.progressTimeout * timeDilation);\n // Allow workers and localExec to exit.\n this.progressTimeout.unref();\n }\n\n resetSliceTimeout()\n {\n const that = this;\n\n if (this.sliceTimeout)\n dcp_timers.clearTimeout(this.sliceTimeout);\n\n this.sliceTimeout = dcp_timers.setTimeout(function sandbox$SliceTimeout() {\n if (Sandbox.debugWork)\n return that.warning('Sandbox.debugWork: Ignoring slice timeout');\n\n that.emit('reject', new SliceTooSlowError(`Slice took longer than ${that.hive.sliceTimeout / 1000} seconds.`));\n }, this.hive.sliceTimeout * timeDilation);\n // Allow workers and localExec to exit.\n this.sliceTimeout.unref();\n }\n\n /**\n * Send payload to the workEmit endpoint in the event router.\n * @param {string} eventName\n * @param {*} payload\n * @returns {Promise<*>}\n */\n postWorkEmit (eventName, payload)\n {\n // Need to check if the sandbox hasn't been assigned a slice yet.\n if (!this.slice)\n this.error('Sandbox not assigned a slice before sending workEmit message to scheduler', payload, `'workEmit' event originates from '${eventName}' event`);\n else\n {\n const slice = this.slice;\n // Authorization should always be valid.\n if (!slice.authorizationMessage)\n this.warning(`workEmit: missing authorization message for slice ${slice.identifier}`);\n else\n {\n const workEmitPayload = {\n eventName,\n payload,\n job: slice.jobAddress,\n slice: slice.sliceNumber,\n worker: this.supervisor.workerId,\n authorizationMessage : slice.authorizationMessage,\n };\n return this.supervisor.dcp4.safeWorkEmit(workEmitPayload, `Failed to send workEmit (${eventName}) payload for slice ${slice.identifier}`)\n .then((success) => {\n if (!success)\n this.warning(`Message sent to workEmit is unauthorized; not accepted '${eventName}'`);\n });\n }\n }\n }\n\n /**\n * Save rejected slice timeReport data in this.slice.rejectedTimeReport, then when needed in\n * Supervisor.recordResult, merge this.slice.rejectedTimeReport into this.slice.timeReport.\n * @param {{ total: number, CPU: number, webGL: number, webGPU: number }} timeReport\n */\n handleRejectedWork (timeReport)\n {\n selectiveDebug() && console.debug('handleRejectedWork', this.identifier);\n // If the slice already has rejectedTimeReport, add this timeReport to it.\n // If not, assign this timeReport to slices rejectedTimeReport property\n if (this.slice)\n {\n if (!this.slice.rejectedTimeReport)\n this.slice.rejectedTimeReport = timeReport;\n else\n {\n ['total', 'CPU', 'webGL', 'webGPU'].forEach((key) => {\n if (timeReport[key])\n this.slice.rejectedTimeReport[key] += timeReport[key];\n });\n }\n }\n }\n\n /**\n * Attach CGIO to result returned by a slice workFn.\n * @param {*} completeData - results\n */\n attachCGIOToResult (completeData)\n {\n if (!completeData)\n throw new Error('Slice result is not ready'); // Should never fire.\n if (completeData['timeReport'])\n throw new Error('Slice result already has timeReport'); // Should never fire.\n if (completeData['dataReport'])\n throw new Error('Slice result already has dataReport'); // Should never fire.\n if (this.listenerCount('resolve') > 0)\n {\n completeData['timeReport'] = this.sliceTimeReport;\n completeData['dataReport'] = {\n InDataSize: this.moduleInDataSize + this.jobManager.inputDataSize + this.slice.inputDataSize,\n OutDataSize: this.sliceOutDataSize,\n };\n this.emit('resolve', completeData);\n selectiveDebug() && console.debug('attachCGIOToResult', this.moduleInDataSize, this.jobManager.inputDataSize, this.slice.inputDataSize, completeData['dataReport'].InDataSize);\n }\n else\n {\n // If there is no internal listener for 'resolve', the slice was rejected\n // and we need to update this.slice.rejectedTimeReport appropriately.\n this.handleRejectedWork(this.sliceTimeReport);\n }\n // Clear time and data reports so we can catch mistaken writes.\n this.sliceTimeReport = null;\n this.sliceOutDataSize = 0;\n }\n\n // _Idx\n //\n // handleRing0Message, handleRing1Message, handleRing2Message, handleRing3Message\n //\n\n async handleRing0Message(data) // eslint-disable-line require-await\n {\n debugging('ring0') && debug('Ring0', this.identifier, data.request);\n\n switch (data.request)\n {\n case 'scriptLoaded':\n if(data.result !== \"success\")\n this.onerror(data);\n break;\n case 'error':\n debug('Sandbox error in ring0', data.error);\n this.rejectWithCleanup('during initialization', data.error);\n break;\n default:\n this.error('Received unhandled request from sandbox: ' + data.request, null, `data: ${ JSON.stringify(data)}`);\n break;\n }\n }\n\n async handleRing1Message(data) // eslint-disable-line require-await\n {\n debugging('ring1') && debug('Ring1', this.identifier, data.request);\n\n switch (data.request)\n {\n case 'applyRequirementsDone':\n // emit internally\n this.emit(data.request, data)\n break;\n default:\n this.error('Received unhandled request from sandbox ring 1: ' + data.request, null, `data: ${ JSON.stringify(data)}`);\n break; \n }\n }\n\n async handleRing2Message(data)\n {\n debugging('ring2') && debug('Ring2', this.identifier, data.request);\n\n switch (data.request)\n {\n case 'dependency': {\n try\n {\n const moduleData = await this.moduleCache.fetchModule(data.data, this.jobAddress);\n // Success! Restore this['packageManager'] delay to retryMinSleepMs (currently 32ms.)\n // Is there a better way to reset than explicit calls?\n this.supervisor.delayManager.resetEBO('packageManager');\n // Send module data to be evaluator.\n const message = {\n request: 'moduleGroup',\n data: moduleData,\n id: data.id,\n };\n // Module data is dynamic since it may only be required in a conditional branch.\n // Moreover, on a long job, the published module itself may be updated on the scheduler.\n const moduleLength = kvin.stringify(moduleData).length; /** @TODO - fix per DCP-3750 */\n this.moduleInDataSize += moduleLength;\n selectiveDebug() && console.debug('Sandbox.Ring2.fetchModule size', this.moduleInDataSize, moduleLength);\n this.postMessageToEvaluator(message);\n }\n catch (error)\n {\n /*\n * In the event of an error here, we want to let the client know there was a problem in\n * loading their module. In principle we shouldn't need a valid sandbox.slice at sandbox.assign.\n * However, in the implementation of Sup2 there is precisely 1 callsite of sandbox.assign and\n * we do have an associated slice at this point. So we make the assumption that sandbox.slice\n * is valid here.\n */\n if (!this.slice) // Design assumption\n throw new Error(`Must have valid slice in sandbox before sandbox.assign is called: ${this.identifier}`);\n\n const payload = {\n name: error.name,\n message: error.message,\n timestamp: error.timestamp ? error.timestamp : new Date(),\n };\n\n this.postWorkEmit('error', payload);\n this.emit('reject', error);\n\n debugging() && console.debug(`Sandbox.Ring2: fetchModule failed ${this.identifier}`, payload, error, Date.now() - this.supervisor.lastTime);\n\n // Close packageManager to start the connection reconnect logic.\n // Should we do a retry loop with fetchModule too?\n this.supervisor.dcp4.resetConnection('packageManager');\n }\n break;\n }\n case 'error':\n /*\n * Ring 2 error messages will only fire for problems inside of the worker that are separate from\n * the work function. In most cases there are other handlers for situations where 'error' may be emitted\n * such as timeouts if the expected message isn't recieved.\n */\n debug('Sandbox error in ring2', data.error);\n this.rejectWithCleanup('during assignment and dependency resolution', data.error);\n break;\n case 'describe':\n case 'evalResult':\n case 'resetStateDone':\n case 'assigned':\n this.emit(data.request, data); // emit internally\n break;\n case 'reject':\n this.emit('reject', data.error); // emit internally\n break;\n default:\n this.error(`Received unhandled request from sandbox ring 2. Data: ${JSON.stringify(data, null, 2)}`);\n break;\n }\n }\n\n async handleRing3Message(data) // eslint-disable-line require-await\n {\n debugging('ring3') && debug('Ring3', this.identifier, data.request);\n\n switch (data.request)\n {\n case 'complete':\n dcp_timers.clearTimeout(this.progressTimeout);\n dcp_timers.clearTimeout(this.sliceTimeout);\n this.progressTimeout = this.sliceTimeout = null;\n\n if (this.progress === null)\n {\n if (this.options.ignoreNoProgress)\n this.warning(\"ENOPROGRESS silenced by localExec: Progress was not called during this slice's execution, in a remote sandbox this would cause the slice to fail.\");\n else\n {\n // If a progress update was never received (progress === null) then reject\n this.emit('reject', new NoProgressError('Sandbox never emitted a progress event.'));\n this.handleRejectedWork(this.sliceTimeReport);\n break;\n }\n }\n \n this.progress = 100;\n this.sliceOutDataSize += kvin.stringify(data.result).length; /** @TODO - fix per DCP-3750 */\n this.attachCGIOToResult(data);\n break;\n case 'progress':\n {\n const { progress, indeterminate, throttledReports, value } = data;\n this.progress = progress;\n // cf. job-noProgress.js\n const progressReport = {\n deltaMs: Date.now() - this.sliceStartTime,\n progress,\n value,\n throttledReports,\n }\n this.progressReports.last = progressReport;\n if (!indeterminate)\n this.progressReports.lastDeterministic = progressReport;\n\n this.resetProgressTimeout();\n this.sandboxEmit('progress', indeterminate || progress < 0 || progress > 100 ? undefined : progress);\n break;\n }\n case 'noProgress':\n this.emit('reject', new NoProgressError(data.message));\n break;\n case 'console':\n data.payload.message = kvin.marshal(data.payload.message);\n this.sliceOutDataSize += JSON.stringify(data.payload.message).length; /** @TODO - fix per DCP-3750 */\n this.postWorkEmit('console', data.payload);\n break;\n case 'emitEvent': /* ad-hoc event from the sandbox (work.emit) */\n this.postWorkEmit('custom', data.payload);\n break;\n case 'measurement':\n this.updateTime(data);\n break;\n case 'sandboxError': /* the sandbox itself has an error condition */\n debug(`Ring3 received a 'sandboxError' event for sandbox ${this.identifier}`, data.error);\n this.emit('sandboxError', data.error);\n this.rejectWithCleanup('internal sandbox error while executing work function', data.error);\n break;\n case 'workError': /* the work function threw/rejected */\n debug(`Ring3 received a 'workError' event for sandbox ${this.identifier}`, data.error);\n this.postWorkEmit('error', data.error);\n const wrappedError = new UncaughtExceptionError(data.error);\n this.rejectWithCleanup('error while executing work function', wrappedError);\n break;\n default:\n this.error('Received unhandled request from sandbox ring 3: ' + data.request, null, `data: ${ JSON.stringify(data)}`);\n break; \n }\n }\n\n /**\n * Try to send the error back to the reject handler in Sandbox.work.\n * But if the reject handler is not available (s.b. rare) then cleanup, emit error and throw.\n * @param {string} message\n * @param {Error|string} error\n */\n rejectWithCleanup (message, error)\n {\n if (this.listenerCount('reject') > 0)\n this.emit('reject', error);\n else\n {\n this.terminate(false);\n this.error(`Sandbox ${this.identifier} ${message}`, error);\n throw error;\n }\n }\n\n // _Idx\n //\n // onmessage, onerror, terminate\n //\n\n /**\n * Handles progress and completion events from sandbox.\n * Unless explicitly returned out of this function will re-emit the event\n * where the name of the event is event.data.request.\n *\n * @param {object} event - event received from the evaaluator sandbox\n * @returns {Promise<void>}\n */\n onmessage (event)\n {\n debugging('event') && debug('onmessage-event', event.data.ringSource);\n if (Sandbox.debugEvents)\n console.debug('sandbox - eventDebug:', { id: this.id, state: this.state.valueOf(), event: JSON.stringify(event) });\n\n const { data } = event;\n const ringLevel = data.ringSource\n\n // Give the data to a handler depending on ring level\n if (ringLevel === -1)\n {\n /** @todo XXXpfr Beware of this happening often. */\n this.error(`Message sent directly from raw postMessage for event ${event}. Terminating worker...`);\n this.terminate(true);\n }\n else\n {\n const handler = this.ringMessageHandlers[ringLevel];\n if (handler)\n return handler.call(this, data.value);\n this.warning(`No handler defined for message from ring ${ringLevel} for event ${event}.`);\n }\n }\n\n /**\n * Error handler for the internal sandbox.\n * Emits error event that gets handled up in the Worker class.\n */\n onerror (event)\n {\n /** @todo XXXpfr Beware of onerror firing often. */\n this.error(`Sandbox.onerror emitted an error: ${event}`);\n this.terminate(true, true);\n }\n\n /**\n * Clears the timeout and terminates the sandbox and sometimes emits a reject event.\n *\n * @param {boolean} [reject=true] - if true emit reject event\n * @param {boolean} [immediate=false] - passed to terminate, used by standaloneWorker to immediately close the connection\n */\n terminate (reject = true, immediate = false)\n {\n if (this.slice)\n this.returnSlice();\n if (this.isTerminated)\n return;\n\n selectiveDebug() && console.debug(`Terminate sandbox ${this.identifier}`);\n\n this.state = new Synchronizer(TERMINATED, [ UNREADY, READYING, READY_FOR_ASSIGN, ASSIGNING, ASSIGNED, WORKING, TERMINATED ]);\n\n dcp_timers.clearTimeout(this.progressTimeout);\n dcp_timers.clearTimeout(this.sliceTimeout);\n this.progressTimeout = this.sliceTimeout = null;\n\n if (this.evaluatorHandle && typeof this.evaluatorHandle.terminate === 'function')\n {\n try\n {\n this.evaluatorHandle.terminate(immediate);\n }\n catch (e)\n {\n this.error(`Error terminating sandbox ${this.id}:`, e);\n }\n finally\n {\n this.evaluatorHandle = null;\n }\n }\n\n if (reject)\n this.emit('reject', new Error(`Sandbox ${this.identifier} was terminated.`));\n\n this.sandboxEmit('end');\n }\n\n // _Idx\n //\n // updateTime, resetSliceReport, sandboxEmit, error, warning\n //\n\n /**\n * ringNPostMessage can send a `measurement` request and update these\n * totals.\n */\n updateTime (measurementEvent)\n {\n ['total', 'CPU', 'webGL', 'webGPU'].forEach((key) => {\n if (measurementEvent[key])\n this.sliceTimeReport[key] += measurementEvent[key];\n });\n }\n\n /**\n * Start over sandbox work timers.\n */\n resetSliceReport ()\n {\n this.sliceTimeReport = {\n total: 0,\n CPU: 0,\n webGL: 0,\n webGPU: 0,\n };\n this.sliceOutDataSize = 0;\n }\n\n /**\n * Safe event emitter on sandboxHandle.\n * @param {string} event\n * @param {...any} args\n */\n sandboxEmit(event, ...args)\n {\n this.supervisor.safeEmit(this.sandboxHandle, event, ...args);\n }\n\n /**\n * Error feedback to user.\n * @param {string} message\n * @param {Array<Error>|Error|string} [coreError]\n * @param {string} [additionalInfo]\n * @param {boolean} [supressStack=false]\n */\n error (message, coreError, additionalInfo, supressStack = false)\n {\n this.supervisor.error(message, coreError, additionalInfo, supressStack);\n }\n\n /**\n * Warning feedback to user.\n * @param {string[]} messages\n */\n warning (...messages)\n {\n this.supervisor.warning(...messages);\n }\n}\n\nSandbox.idCounter = 1;\nSandbox.debugWork = false;\nSandbox.debugState = false;\nSandbox.debugEvents = false;\n\nexports.Sandbox = Sandbox;\nexports.SandboxError = SandboxError;\nexports.NoProgressError = NoProgressError;\nexports.SliceTooSlowError = SliceTooSlowError;\nexports.UncaughtExceptionError = UncaughtExceptionError;\n\n\n//# sourceURL=webpack://dcp/./src/dcp-client/worker/supervisor2/sandbox2.js?");
4802
4802
 
4803
4803
  /***/ }),
4804
4804