dcp-client 4.2.13 → 4.2.15

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -3841,7 +3841,7 @@ eval("// Copyright Joyent, Inc. and other Node contributors.\n//\n// Permission
3841
3841
  /***/ ((__unused_webpack_module, __webpack_exports__, __webpack_require__) => {
3842
3842
 
3843
3843
  "use strict";
3844
- eval("__webpack_require__.r(__webpack_exports__);\n/* harmony export */ __webpack_require__.d(__webpack_exports__, {\n/* harmony export */ \"default\": () => (/* binding */ Modal)\n/* harmony export */ });\n/**\n * A Small Modal Class\n * @module Modal\n */\n/* globals Event dcpConfig */\nclass Modal {\n constructor (title, message, callback = false, exitHandler = false, {\n continueLabel = 'Continue',\n cancelLabel = 'Cancel',\n cancelVisible = true\n } = {}) {\n const modal = document.createElement('div')\n modal.className = 'dcp-modal-container-old day'\n modal.innerHTML = `\n <dialog class=\"dcp-modal-content\">\n <div class=\"dcp-modal-header\">\n <h2>${title}<button type=\"button\" class=\"close\">&times;</button></h2>\n ${message ? '<p>' + message + '</p>' : ''}\n </div>\n <div class=\"dcp-modal-loading hidden\">\n <div class='loading'></div>\n </div>\n <form onsubmit='return false' method=\"dialog\">\n <div class=\"dcp-modal-body\"></div>\n <div class=\"dcp-modal-footer ${cancelVisible ? '' : 'centered'}\">\n <button type=\"submit\" class=\"continue green-modal-button\">${continueLabel}</button>\n <button type=\"button\" class=\"cancel green-modal-button\">${cancelLabel}</button>\n </div>\n </form>\n </dialog>`\n\n // To give a reference to do developer who wants to override the form submit.\n // May occur if they want to validate the information in the backend\n // without closing the modal prematurely.\n this.form = modal.querySelector('.dcp-modal-content form')\n this.continueButton = modal.querySelector('.dcp-modal-footer button.continue')\n this.cancelButton = modal.querySelector('.dcp-modal-footer button.cancel')\n this.closeButton = modal.querySelector('.dcp-modal-header .close')\n if (!cancelVisible) {\n this.cancelButton.style.display = 'none'\n }\n\n // To remove the event listener, the reference to the original function\n // added is required.\n this.formSubmitHandler = this.continue.bind(this)\n\n modal.addEventListener('keydown', function (event) {\n event.stopPropagation()\n // 27 is the keycode for the escape key.\n if (event.keyCode === 27) this.close()\n }.bind(this))\n\n this.container = modal\n this.callback = callback\n this.exitHandler = exitHandler\n document.body.appendChild(modal)\n }\n\n changeFormSubmitHandler (newFormSubmitHandler) {\n this.formSubmitHandler = newFormSubmitHandler\n }\n\n /**\n * Validates the form values in the modal and calls the modal's callback\n */\n async continue (event) {\n // To further prevent form submission from trying to redirect from the\n // current page.\n if (event instanceof Event) {\n event.preventDefault()\n }\n let fieldsAreValid = true\n let formElements = this.container.querySelectorAll('.dcp-modal-body select, .dcp-modal-body input, .dcp-modal-body textarea')\n\n const formValues = []\n if (typeof formElements.length === 'undefined') formElements = [formElements]\n // Separate into two loops to enable input validation requiring formValues\n // that come after it. e.g. Two password fields matching.\n for (let i = 0; i < formElements.length; i++) {\n switch (formElements[i].type) {\n case 'file':\n formValues.push(formElements[i])\n break\n case 'checkbox':\n formValues.push(formElements[i].checked)\n break\n default:\n formValues.push(formElements[i].value)\n break\n }\n }\n for (let i = 0; i < formElements.length; i++) {\n if (formElements[i].validation) {\n // Optional fields are allowed to be empty but still can't be wrong if not empty.\n if (!(formElements[i].value === '' && !formElements[i].required)) {\n if (typeof formElements[i].validation === 'function') {\n if (!formElements[i].validation(formValues)) {\n fieldsAreValid = false\n formElements[i].classList.add('is-invalid')\n }\n } else if (!formElements[i].validation.test(formElements[i].value)) {\n fieldsAreValid = false\n formElements[i].classList.add('is-invalid')\n }\n }\n }\n }\n\n if (!fieldsAreValid) return\n\n this.loading()\n if (typeof this.callback === 'function') {\n try {\n return this.callback(formValues)\n } catch (error) {\n console.error('Unexpected error in modal.continue:', error);\n return this.close(false)\n }\n }\n this.close(true)\n }\n\n loading () {\n this.container.querySelector('.dcp-modal-loading').classList.remove('hidden')\n this.container.querySelector('.dcp-modal-body').classList.add('hidden')\n this.container.querySelector('.dcp-modal-footer').classList.add('hidden')\n }\n\n open () {\n this.form.addEventListener('submit', async (event) => {\n const success = await this.formSubmitHandler(event)\n if (success === false) {\n return\n }\n this.close(true)\n })\n // When the user clicks on <span> (x), close the modal\n this.closeButton.addEventListener('click', this.close.bind(this))\n this.cancelButton.addEventListener('click', this.close.bind(this))\n\n // Prevent lingering outlines after clicking some form elements.\n this.container.querySelectorAll('.dcp-modal-body button, .dcp-modal-body input[type=\"checkbox\"]').forEach(element => {\n element.addEventListener('click', () => {\n element.blur()\n })\n })\n\n // Show the modal.\n this.container.style.display = 'block'\n\n const formElements = this.container.querySelectorAll('.dcp-modal-body select, .dcp-modal-body input')\n if (formElements.length) {\n formElements[0].focus()\n if (formElements[0].type === 'text') {\n formElements[0].select()\n }\n for (const el of formElements) {\n if (el.realType) {\n el.type = el.realType\n }\n }\n } else {\n // With no form elements to allow for form submission on enter, focus the\n // continue button.\n this.container.querySelector('.dcp-modal-footer button.continue').focus()\n }\n } // TODO: This should return a promise with the action resolving it\n\n /**\n * Shows the modal and returns a promise of the result of the modal (e.g. was\n * it closed, did its action succeed?)\n */\n showModal () {\n return new Promise((resolve, reject) => {\n this.form.addEventListener('submit', handleContinue.bind(this))\n this.cancelButton.addEventListener('click', handleCancel.bind(this))\n this.closeButton.addEventListener('click', handleCancel.bind(this))\n\n // Prevent lingering outlines after clicking some form elements.\n this.container.querySelectorAll('.dcp-modal-body button, .dcp-modal-body input[type=\"checkbox\"]').forEach(element => {\n element.addEventListener('click', () => {\n element.blur()\n })\n })\n\n // Show the modal.\n this.container.style.display = 'block'\n\n const formElements = this.container.querySelectorAll('.dcp-modal-body select, .dcp-modal-body input')\n if (formElements.length) {\n formElements[0].focus()\n if (formElements[0].type === 'text') {\n formElements[0].select()\n }\n for (const el of formElements) {\n if (el.realType) {\n el.type = el.realType\n }\n }\n } else {\n // With no form elements to allow for form submission on enter, focus the\n // continue button.\n this.continueButton.focus()\n }\n\n async function handleContinue (event) {\n let result\n try {\n result = await this.formSubmitHandler(event)\n } catch (error) {\n reject(error)\n }\n this.close(true)\n resolve(result)\n }\n\n async function handleCancel () {\n let result\n try {\n result = await this.close()\n } catch (error) {\n reject(error)\n }\n resolve(result)\n }\n })\n }\n\n close (success = false) {\n this.container.style.display = 'none'\n if (this.container.parentNode) {\n this.container.parentNode.removeChild(this.container)\n }\n\n // @todo this needs to remove eventlisteners to prevent memory leaks\n\n if ((success !== true) && typeof this.exitHandler === 'function') {\n return this.exitHandler(this)\n }\n }\n\n /**\n * Adds different form elements to the modal depending on the case.\n *\n * @param {*} elements - The properties of the form elements to add.\n * @returns {HTMLElement} The input form elements.\n */\n addFormElement (...elements) {\n const body = this.container.querySelector('.dcp-modal-body')\n const inputElements = []\n let label\n for (let i = 0; i < elements.length; i++) {\n let row = document.createElement('div')\n row.className = 'row'\n\n let col, input\n switch (elements[i].type) {\n case 'button':\n col = document.createElement('div')\n col.className = 'col-md-12'\n\n input = document.createElement('button')\n input.innerHTML = elements[i].label\n input.type = 'button'\n input.classList.add('green-modal-button')\n if (!elements[i].onclick) {\n throw new Error('A button in the modal body should have an on click event handler.')\n }\n input.addEventListener('click', elements[i].onclick)\n\n col.appendChild(input)\n row.appendChild(col)\n break\n case 'textarea':\n col = document.createElement('div')\n col.className = 'col-md-12'\n\n input = document.createElement('textarea')\n input.className = 'text-input-field form-control'\n if (elements[i].placeholder) input.placeholder = elements[i].placeholder\n\n col.appendChild(input)\n row.appendChild(col)\n break\n case 'text':\n case 'email':\n case 'number':\n case 'password': {\n const inputCol = document.createElement('div')\n\n input = document.createElement('input')\n input.type = elements[i].type\n input.validation = elements[i].validation\n input.autocomplete = elements[i].autocomplete || (elements[i].type === 'password' ? 'off' : 'on')\n input.className = 'text-input-field form-control'\n\n // Adding bootstraps custom feedback styles.\n let invalidFeedback = null\n if (elements[i].invalidFeedback) {\n invalidFeedback = document.createElement('div')\n invalidFeedback.className = 'invalid-feedback'\n invalidFeedback.innerText = elements[i].invalidFeedback\n }\n\n if (elements[i].type === 'password') {\n elements[i].realType = 'password'\n }\n\n if (elements[i].label) {\n const labelCol = document.createElement('div')\n label = document.createElement('label')\n label.innerText = elements[i].label\n const inputId = 'dcp-modal-input-' + this.container.querySelectorAll('input[type=\"text\"], input[type=\"email\"], input[type=\"number\"], input[type=\"password\"]').length\n label.setAttribute('for', inputId)\n input.id = inputId\n labelCol.classList.add('col-md-6', 'label-column')\n labelCol.appendChild(label)\n row.appendChild(labelCol)\n inputCol.className = 'col-md-6'\n } else {\n inputCol.className = 'col-md-12'\n }\n\n inputCol.appendChild(input)\n if (invalidFeedback !== null) {\n inputCol.appendChild(invalidFeedback)\n }\n row.appendChild(inputCol)\n break\n }\n case 'select':\n col = document.createElement('div')\n col.className = 'col-md-4'\n\n label = document.createElement('span')\n label.innerText = elements[i].label\n\n col.appendChild(label)\n row.appendChild(col)\n\n col = document.createElement('div')\n col.className = 'col-md-8'\n\n input = document.createElement('select')\n\n col.appendChild(input)\n row.appendChild(col)\n break\n case 'checkbox': {\n row.classList.add('checkbox-row')\n const checkboxLabelCol = document.createElement('div')\n checkboxLabelCol.classList.add('label-column', 'checkbox-label-column')\n\n label = document.createElement('label')\n label.innerText = elements[i].label\n label.for = 'dcp-checkbox-input-' + this.container.querySelectorAll('input[type=\"checkbox\"]').length\n label.setAttribute('for', label.for)\n label.className = 'checkbox-label'\n\n checkboxLabelCol.appendChild(label)\n\n const checkboxCol = document.createElement('div')\n checkboxCol.classList.add('checkbox-column')\n\n input = document.createElement('input')\n input.type = 'checkbox'\n input.id = label.for\n if (elements[i].checked) {\n input.checked = true\n }\n\n checkboxCol.appendChild(input)\n\n if (elements[i].labelToTheRightOfCheckbox) {\n checkboxCol.classList.add('col-md-5')\n row.appendChild(checkboxCol)\n checkboxLabelCol.classList.add('col-md-7')\n row.appendChild(checkboxLabelCol)\n } else {\n checkboxLabelCol.classList.add('col-md-6')\n checkboxCol.classList.add('col-md-6')\n row.appendChild(checkboxLabelCol)\n row.appendChild(checkboxCol)\n }\n break\n }\n case 'file':\n [input, row] = this.addFileInput(elements[i], input, row)\n break\n case 'label':\n row.classList.add('label-row')\n label = document.createElement('label')\n label.innerText = elements[i].label\n row.appendChild(label)\n break\n }\n\n // Copy other possibly specified element properties:\n const inputPropertyNames = ['title', 'inputmode', 'value', 'minLength', 'maxLength', 'size', 'required', 'pattern', 'min', 'max', 'step', 'placeholder', 'accept', 'multiple', 'id', 'onkeypress', 'oninput', 'for', 'readonly', 'autocomplete']\n for (const propertyName of inputPropertyNames) {\n if (Object.prototype.hasOwnProperty.call(elements[i], propertyName)) {\n if (propertyName === 'for' && !label.hasAttribute(propertyName)) {\n label.setAttribute(propertyName, elements[i][propertyName])\n }\n if (propertyName.startsWith('on')) {\n input.addEventListener(propertyName.slice(2), elements[i][propertyName])\n } else {\n input.setAttribute(propertyName, elements[i][propertyName])\n }\n }\n }\n\n inputElements.push(input)\n body.appendChild(row)\n }\n\n if (inputElements.length === 1) return inputElements[0]\n else return inputElements\n }\n\n /**\n * Adds a drag and drop file form element to the modal.\n *\n * @param {*} fileInputProperties - An object specifying some of the\n * properties of the file input element.\n * @param {*} fileInput - Placeholders to help create the file\n * input.\n * @param {HTMLDivElement} row - Placeholders to help create the file\n * input.\n */\n addFileInput (fileInputProperties, fileInput, row) {\n // Adding the upload label.\n const uploadLabel = document.createElement('label')\n uploadLabel.innerText = fileInputProperties.label\n row.appendChild(uploadLabel)\n const body = this.container.querySelector('.dcp-modal-body')\n body.appendChild(row)\n const fileSelectionRow = document.createElement('div')\n fileSelectionRow.id = 'file-selection-row'\n\n // Adding the drag and drop file upload input.\n const dropContainer = document.createElement('div')\n dropContainer.id = 'drop-container'\n\n // Adding an image of a wallet\n const imageContainer = document.createElement('div')\n imageContainer.id = 'image-container'\n const walletImage = document.createElement('span')\n walletImage.classList.add('fas', 'fa-wallet')\n imageContainer.appendChild(walletImage)\n\n // Adding some text prompts\n const dropMessage = document.createElement('span')\n dropMessage.innerText = 'Drop a keystore file here'\n const orMessage = document.createElement('span')\n orMessage.innerText = 'or'\n\n // Adding the manual file input element (hiding the default one)\n const fileInputContainer = document.createElement('div')\n const fileInputLabel = document.createElement('label')\n // Linking the label to the file input so that clicking on the label\n // activates the file input.\n fileInputLabel.setAttribute('for', 'file-input')\n fileInputLabel.innerText = 'Browse'\n fileInput = document.createElement('input')\n fileInput.type = fileInputProperties.type\n fileInput.id = 'file-input'\n // To remove the lingering outline after selecting the file.\n fileInput.addEventListener('click', () => {\n fileInput.blur()\n })\n fileInputContainer.append(fileInput, fileInputLabel)\n\n // Creating the final row element to append to the modal body.\n dropContainer.append(imageContainer, dropMessage, orMessage, fileInputContainer)\n fileSelectionRow.appendChild(dropContainer)\n\n // Adding functionality to the drag and drop file input.\n dropContainer.addEventListener('drop', selectDroppedFile.bind(this))\n dropContainer.addEventListener('drop', unhighlightDropArea)\n // Prevent file from being opened by the browser.\n dropContainer.ondragover = highlightDropArea\n dropContainer.ondragenter = highlightDropArea\n dropContainer.ondragleave = unhighlightDropArea\n\n fileInput.addEventListener('change', handleFileChange)\n\n const fileNamePlaceholder = document.createElement('center')\n fileNamePlaceholder.id = 'file-name-placeholder'\n fileNamePlaceholder.className = 'row'\n fileNamePlaceholder.innerText = ''\n fileSelectionRow.appendChild(fileNamePlaceholder)\n fileNamePlaceholder.classList.add('hidden')\n\n // Check if the continue button is invalid on the keystore upload modal and\n // click it if it should no longer be invalid.\n this.continueButton.addEventListener('invalid', () => {\n const fileFormElements = this.container.querySelectorAll('.dcp-modal-body input[type=\"file\"], .dcp-modal-body input[type=\"text\"]')\n const filledInFileFormElements = Array.from(fileFormElements).filter(fileFormElement => fileFormElement.value !== '')\n if (fileFormElements.length !== 0 && filledInFileFormElements.length !== 0) {\n this.continueButton.setCustomValidity('')\n // Clicking instead of dispatching a submit event to ensure other form validation is used before submitting the form.\n this.continueButton.click()\n }\n })\n\n return [fileInput, fileSelectionRow]\n\n /**\n * Checks that the dropped items contain only a single keystore file.\n * If valid, sets the file input's value to the dropped file.\n * @param {DragEvent} event - Contains the files dropped.\n */\n function selectDroppedFile (event) {\n // Prevent file from being opened.\n event.preventDefault()\n\n // Check if only one file was dropped.\n const wasOneFileDropped = event.dataTransfer.items.length === 1 ||\n event.dataTransfer.files.length === 1\n updateFileSelectionStatus(wasOneFileDropped)\n if (!wasOneFileDropped) {\n fileInput.setCustomValidity('Only one file can be uploaded.')\n fileInput.reportValidity()\n return\n } else {\n fileInput.setCustomValidity('')\n }\n\n // Now to use the DataTransfer interface to access the file(s), setting\n // the value of the file input.\n const file = event.dataTransfer.files[0]\n\n if (checkFileExtension(file)) {\n fileInput.files = event.dataTransfer.files\n fileInput.dispatchEvent(new Event('change'))\n }\n }\n\n function handleFileChange () {\n if (checkFileExtension(this.files[0]) && this.files.length === 1) {\n fileNamePlaceholder.innerText = `Selected File: ${this.files[0].name}`\n updateFileSelectionStatus(true)\n // Invoke a callback if additional functionality is required.\n if (typeof fileInputProperties.callback === 'function') {\n fileInputProperties.callback(this.files[0])\n }\n }\n }\n\n /**\n * Checks if the file extension on the inputted file is correct.\n * @param {File} file - The file to check\n * @returns {boolean} True if the file extension is valid, false otherwise.\n */\n function checkFileExtension (file) {\n // If there's no restriction, return true.\n if (!fileInputProperties.extension) {\n return true\n }\n const fileExtension = file.name.split('.').pop()\n const isValidExtension = fileExtension === fileInputProperties.extension\n updateFileSelectionStatus(isValidExtension)\n if (!isValidExtension) {\n fileInput.setCustomValidity(`Only a .${fileInputProperties.extension} file can be uploaded.`)\n fileInput.reportValidity()\n fileNamePlaceholder.classList.add('hidden')\n } else {\n fileInput.setCustomValidity('')\n }\n return isValidExtension\n }\n\n /**\n * Updates the file input to reflect the validity of the current file\n * selection.\n * @param {boolean} isValidFileSelection - True if a single .keystore file\n * was selected. False otherwise.\n */\n function updateFileSelectionStatus (isValidFileSelection) {\n imageContainer.innerHTML = ''\n const statusImage = document.createElement('span')\n statusImage.classList.add('fas', isValidFileSelection ? 'fa-check' : 'fa-times')\n statusImage.style.color = isValidFileSelection ? 'green' : 'red'\n imageContainer.appendChild(statusImage)\n\n if (!isValidFileSelection) {\n fileInput.value = null\n fileNamePlaceholder.classList.add('hidden')\n } else {\n fileNamePlaceholder.classList.remove('hidden')\n }\n\n // If the modal contains a password field for a keystore file, change its\n // visibility.\n const walletPasswordInputContainer = document.querySelector('.dcp-modal-body input[type=\"password\"]').parentElement.parentElement\n if (walletPasswordInputContainer) {\n if (isValidFileSelection) {\n walletPasswordInputContainer.classList.remove('hidden')\n const walletPasswordInput = document.querySelector('.dcp-modal-body input[type=\"password\"]')\n walletPasswordInput.focus()\n } else {\n walletPasswordInputContainer.classList.add('hidden')\n }\n }\n }\n\n function highlightDropArea (event) {\n event.preventDefault()\n this.classList.add('highlight')\n }\n\n function unhighlightDropArea (event) {\n event.preventDefault()\n this.classList.remove('highlight')\n }\n }\n\n /**\n * Sets up a custom tooltip to pop up when the passwords do not match, but are\n * valid otherwise.\n */\n addFormValidationForPasswordConfirmation () {\n const [newPassword, confirmPassword] = document.querySelectorAll('.dcp-modal-body input[type=\"password\"]')\n if (!newPassword || !confirmPassword) {\n throw Error('New Password field and Confirm Password fields not present.')\n }\n\n newPassword.addEventListener('input', checkMatchingPasswords)\n confirmPassword.addEventListener('input', checkMatchingPasswords)\n\n function checkMatchingPasswords () {\n if (newPassword.value !== confirmPassword.value &&\n newPassword.validity.valid &&\n confirmPassword.validity.valid) {\n newPassword.setCustomValidity('Both passwords must match.')\n } else if (newPassword.value === confirmPassword.value ||\n newPassword.validity.tooShort ||\n newPassword.validity.patternMismatch ||\n newPassword.validity.valueMissing ||\n confirmPassword.validity.tooShort ||\n confirmPassword.validity.patternMismatch ||\n confirmPassword.validity.valueMissing) {\n // If the passwords fields match or have become invalidated some other\n // way again, reset the custom message.\n newPassword.setCustomValidity('')\n }\n }\n }\n\n updateInvalidEmailMessage() {\n const email = document.querySelector('.dcp-modal-body input[id=\"email\"')\n if (!email){\n throw Error(\"Email field not present\")\n }\n email.addEventListener('input', checkValidEmail);\n function checkValidEmail() {\n if (!email.validity.patternMismatch &&\n !email.validity.valueMissing) {\n email.setCustomValidity('')\n } else {\n email.setCustomValidity(\"Enter a valid email address.\")\n }\n\n }\n }\n\n /**\n * Adds message(s) to the modal's body.\n * @param {string} messages - The message(s) to add to the modal's body.\n * @returns Paragraph element(s) containing the message(s) added to the\n * modal's body.\n */\n addMessage (...messages) {\n const elements = []\n const body = this.container.querySelector('.dcp-modal-body')\n for (let i = 0; i < messages.length; i++) {\n const row = document.createElement('div')\n row.className = 'row'\n\n const paragraph = document.createElement('p')\n paragraph.innerHTML = messages[i]\n paragraph.classList.add('message')\n row.appendChild(paragraph)\n body.appendChild(row)\n\n elements.push(paragraph)\n }\n\n if (elements.length === 1) return elements[0]\n else return elements\n }\n\n addHorizontalRule () {\n const body = this.container.querySelector('.dcp-modal-body')\n body.appendChild(document.createElement('hr'))\n }\n\n // Does what it says. Still ill advised to use unless you have to.\n addCustomHTML (htmlStr, browseCallback) {\n const elements = []\n const body = this.container.querySelector('.dcp-modal-body')\n body.innerHTML += htmlStr\n body.querySelector('#browse-button').addEventListener('click', browseCallback.bind(this, this))\n\n if (elements.length === 1) return elements[0]\n else return elements\n }\n\n addButton (...buttons) {\n const elements = []\n const body = this.container.querySelector('.dcp-modal-body')\n for (let i = 0; i < buttons.length; i++) {\n const row = document.createElement('div')\n row.className = 'row'\n\n let col = document.createElement('div')\n col.className = 'col-md-4'\n\n const description = document.createElement('span')\n description.innerText = buttons[i].description\n\n col.appendChild(description)\n row.appendChild(col)\n\n col = document.createElement('div')\n col.className = 'col-md-8'\n\n const button = document.createElement('button')\n button.innerText = buttons[i].label\n button.addEventListener('click', buttons[i].callback.bind(this, this))\n\n elements.push(button)\n\n col.appendChild(button)\n row.appendChild(col)\n\n body.appendChild(row)\n }\n\n if (elements.length === 1) return elements[0]\n else return elements\n }\n}\n\n\n// Inject our special stylesheet from dcp-client only if we're on the portal webpage.\nif (typeof window !== 'undefined' && typeof document !== 'undefined' && dcpConfig.portal.location.hostname === window.location.hostname) {\n // <link rel='stylesheet' href='/css/dashboard.css'>\n const stylesheet = document.createElement('link')\n stylesheet.rel = 'stylesheet'\n // Needed for the duplicate check done later.\n stylesheet.id = 'dcp-modal-styles'\n\n const dcpClientBundle = document.getElementById('_dcp_client_bundle')\n let src\n if (dcpClientBundle) {\n src = dcpClientBundle.src.replace('dcp-client-bundle.js', 'dcp-modal-style.css')\n } else {\n src = dcpConfig.portal.location.href + 'dcp-client/dist/dcp-modal-style.css'\n }\n\n stylesheet.href = src\n // If the style was injected before, don't inject it again.\n // Could occur when loading a file that imports Modal.js and loading\n // comput.min.js in the same HTML file.\n if (document.getElementById(stylesheet.id) === null) {\n document.getElementsByTagName('head')[0].appendChild(stylesheet)\n }\n\n if (typeof {\"version\":\"8f44464faf259aae5ef214f8752f7ce8728dd5f0\",\"branch\":\"release\",\"dcpClient\":{\"version\":\"4.2.12\",\"from\":\"git+ssh://git@gitlab.com/Distributed-Compute-Protocol/dcp-client.git#prod-20220907\",\"resolved\":\"git+ssh://git@gitlab.com/Distributed-Compute-Protocol/dcp-client.git#85f2b5d11fef51c90410286f12fa800492957c39\"},\"built\":\"Mon Sep 12 2022 15:21:54 GMT-0400 (Eastern Daylight Saving Time)\",\"config\":{\"generated\":\"Mon 12 Sep 2022 03:21:51 PM EDT by erose on lorge\",\"build\":\"debug\"},\"webpack\":\"5.70.0\",\"node\":\"v14.20.0\"} !== 'undefined' && typeof window.Modal === 'undefined') {\n window.Modal = Modal\n }\n}\n\n\n//# sourceURL=webpack://dcp/./portal/www/js/modal.js?");
3844
+ eval("__webpack_require__.r(__webpack_exports__);\n/* harmony export */ __webpack_require__.d(__webpack_exports__, {\n/* harmony export */ \"default\": () => (/* binding */ Modal)\n/* harmony export */ });\n/**\n * A Small Modal Class\n * @module Modal\n */\n/* globals Event dcpConfig */\nclass Modal {\n constructor (title, message, callback = false, exitHandler = false, {\n continueLabel = 'Continue',\n cancelLabel = 'Cancel',\n cancelVisible = true\n } = {}) {\n const modal = document.createElement('div')\n modal.className = 'dcp-modal-container-old day'\n modal.innerHTML = `\n <dialog class=\"dcp-modal-content\">\n <div class=\"dcp-modal-header\">\n <h2>${title}<button type=\"button\" class=\"close\">&times;</button></h2>\n ${message ? '<p>' + message + '</p>' : ''}\n </div>\n <div class=\"dcp-modal-loading hidden\">\n <div class='loading'></div>\n </div>\n <form onsubmit='return false' method=\"dialog\">\n <div class=\"dcp-modal-body\"></div>\n <div class=\"dcp-modal-footer ${cancelVisible ? '' : 'centered'}\">\n <button type=\"submit\" class=\"continue green-modal-button\">${continueLabel}</button>\n <button type=\"button\" class=\"cancel green-modal-button\">${cancelLabel}</button>\n </div>\n </form>\n </dialog>`\n\n // To give a reference to do developer who wants to override the form submit.\n // May occur if they want to validate the information in the backend\n // without closing the modal prematurely.\n this.form = modal.querySelector('.dcp-modal-content form')\n this.continueButton = modal.querySelector('.dcp-modal-footer button.continue')\n this.cancelButton = modal.querySelector('.dcp-modal-footer button.cancel')\n this.closeButton = modal.querySelector('.dcp-modal-header .close')\n if (!cancelVisible) {\n this.cancelButton.style.display = 'none'\n }\n\n // To remove the event listener, the reference to the original function\n // added is required.\n this.formSubmitHandler = this.continue.bind(this)\n\n modal.addEventListener('keydown', function (event) {\n event.stopPropagation()\n // 27 is the keycode for the escape key.\n if (event.keyCode === 27) this.close()\n }.bind(this))\n\n this.container = modal\n this.callback = callback\n this.exitHandler = exitHandler\n document.body.appendChild(modal)\n }\n\n changeFormSubmitHandler (newFormSubmitHandler) {\n this.formSubmitHandler = newFormSubmitHandler\n }\n\n /**\n * Validates the form values in the modal and calls the modal's callback\n */\n async continue (event) {\n // To further prevent form submission from trying to redirect from the\n // current page.\n if (event instanceof Event) {\n event.preventDefault()\n }\n let fieldsAreValid = true\n let formElements = this.container.querySelectorAll('.dcp-modal-body select, .dcp-modal-body input, .dcp-modal-body textarea')\n\n const formValues = []\n if (typeof formElements.length === 'undefined') formElements = [formElements]\n // Separate into two loops to enable input validation requiring formValues\n // that come after it. e.g. Two password fields matching.\n for (let i = 0; i < formElements.length; i++) {\n switch (formElements[i].type) {\n case 'file':\n formValues.push(formElements[i])\n break\n case 'checkbox':\n formValues.push(formElements[i].checked)\n break\n default:\n formValues.push(formElements[i].value)\n break\n }\n }\n for (let i = 0; i < formElements.length; i++) {\n if (formElements[i].validation) {\n // Optional fields are allowed to be empty but still can't be wrong if not empty.\n if (!(formElements[i].value === '' && !formElements[i].required)) {\n if (typeof formElements[i].validation === 'function') {\n if (!formElements[i].validation(formValues)) {\n fieldsAreValid = false\n formElements[i].classList.add('is-invalid')\n }\n } else if (!formElements[i].validation.test(formElements[i].value)) {\n fieldsAreValid = false\n formElements[i].classList.add('is-invalid')\n }\n }\n }\n }\n\n if (!fieldsAreValid) return\n\n this.loading()\n if (typeof this.callback === 'function') {\n try {\n return this.callback(formValues)\n } catch (error) {\n console.error('Unexpected error in modal.continue:', error);\n return this.close(false)\n }\n }\n this.close(true)\n }\n\n loading () {\n this.container.querySelector('.dcp-modal-loading').classList.remove('hidden')\n this.container.querySelector('.dcp-modal-body').classList.add('hidden')\n this.container.querySelector('.dcp-modal-footer').classList.add('hidden')\n }\n\n open () {\n this.form.addEventListener('submit', async (event) => {\n const success = await this.formSubmitHandler(event)\n if (success === false) {\n return\n }\n this.close(true)\n })\n // When the user clicks on <span> (x), close the modal\n this.closeButton.addEventListener('click', this.close.bind(this))\n this.cancelButton.addEventListener('click', this.close.bind(this))\n\n // Prevent lingering outlines after clicking some form elements.\n this.container.querySelectorAll('.dcp-modal-body button, .dcp-modal-body input[type=\"checkbox\"]').forEach(element => {\n element.addEventListener('click', () => {\n element.blur()\n })\n })\n\n // Show the modal.\n this.container.style.display = 'block'\n\n const formElements = this.container.querySelectorAll('.dcp-modal-body select, .dcp-modal-body input')\n if (formElements.length) {\n formElements[0].focus()\n if (formElements[0].type === 'text') {\n formElements[0].select()\n }\n for (const el of formElements) {\n if (el.realType) {\n el.type = el.realType\n }\n }\n } else {\n // With no form elements to allow for form submission on enter, focus the\n // continue button.\n this.container.querySelector('.dcp-modal-footer button.continue').focus()\n }\n } // TODO: This should return a promise with the action resolving it\n\n /**\n * Shows the modal and returns a promise of the result of the modal (e.g. was\n * it closed, did its action succeed?)\n */\n showModal () {\n return new Promise((resolve, reject) => {\n this.form.addEventListener('submit', handleContinue.bind(this))\n this.cancelButton.addEventListener('click', handleCancel.bind(this))\n this.closeButton.addEventListener('click', handleCancel.bind(this))\n\n // Prevent lingering outlines after clicking some form elements.\n this.container.querySelectorAll('.dcp-modal-body button, .dcp-modal-body input[type=\"checkbox\"]').forEach(element => {\n element.addEventListener('click', () => {\n element.blur()\n })\n })\n\n // Show the modal.\n this.container.style.display = 'block'\n\n const formElements = this.container.querySelectorAll('.dcp-modal-body select, .dcp-modal-body input')\n if (formElements.length) {\n formElements[0].focus()\n if (formElements[0].type === 'text') {\n formElements[0].select()\n }\n for (const el of formElements) {\n if (el.realType) {\n el.type = el.realType\n }\n }\n } else {\n // With no form elements to allow for form submission on enter, focus the\n // continue button.\n this.continueButton.focus()\n }\n\n async function handleContinue (event) {\n let result\n try {\n result = await this.formSubmitHandler(event)\n } catch (error) {\n reject(error)\n }\n this.close(true)\n resolve(result)\n }\n\n async function handleCancel () {\n let result\n try {\n result = await this.close()\n } catch (error) {\n reject(error)\n }\n resolve(result)\n }\n })\n }\n\n close (success = false) {\n this.container.style.display = 'none'\n if (this.container.parentNode) {\n this.container.parentNode.removeChild(this.container)\n }\n\n // @todo this needs to remove eventlisteners to prevent memory leaks\n\n if ((success !== true) && typeof this.exitHandler === 'function') {\n return this.exitHandler(this)\n }\n }\n\n /**\n * Adds different form elements to the modal depending on the case.\n *\n * @param {*} elements - The properties of the form elements to add.\n * @returns {HTMLElement} The input form elements.\n */\n addFormElement (...elements) {\n const body = this.container.querySelector('.dcp-modal-body')\n const inputElements = []\n let label\n for (let i = 0; i < elements.length; i++) {\n let row = document.createElement('div')\n row.className = 'row'\n\n let col, input\n switch (elements[i].type) {\n case 'button':\n col = document.createElement('div')\n col.className = 'col-md-12'\n\n input = document.createElement('button')\n input.innerHTML = elements[i].label\n input.type = 'button'\n input.classList.add('green-modal-button')\n if (!elements[i].onclick) {\n throw new Error('A button in the modal body should have an on click event handler.')\n }\n input.addEventListener('click', elements[i].onclick)\n\n col.appendChild(input)\n row.appendChild(col)\n break\n case 'textarea':\n col = document.createElement('div')\n col.className = 'col-md-12'\n\n input = document.createElement('textarea')\n input.className = 'text-input-field form-control'\n if (elements[i].placeholder) input.placeholder = elements[i].placeholder\n\n col.appendChild(input)\n row.appendChild(col)\n break\n case 'text':\n case 'email':\n case 'number':\n case 'password': {\n const inputCol = document.createElement('div')\n\n input = document.createElement('input')\n input.type = elements[i].type\n input.validation = elements[i].validation\n input.autocomplete = elements[i].autocomplete || (elements[i].type === 'password' ? 'off' : 'on')\n input.className = 'text-input-field form-control'\n\n // Adding bootstraps custom feedback styles.\n let invalidFeedback = null\n if (elements[i].invalidFeedback) {\n invalidFeedback = document.createElement('div')\n invalidFeedback.className = 'invalid-feedback'\n invalidFeedback.innerText = elements[i].invalidFeedback\n }\n\n if (elements[i].type === 'password') {\n elements[i].realType = 'password'\n }\n\n if (elements[i].label) {\n const labelCol = document.createElement('div')\n label = document.createElement('label')\n label.innerText = elements[i].label\n const inputId = 'dcp-modal-input-' + this.container.querySelectorAll('input[type=\"text\"], input[type=\"email\"], input[type=\"number\"], input[type=\"password\"]').length\n label.setAttribute('for', inputId)\n input.id = inputId\n labelCol.classList.add('col-md-6', 'label-column')\n labelCol.appendChild(label)\n row.appendChild(labelCol)\n inputCol.className = 'col-md-6'\n } else {\n inputCol.className = 'col-md-12'\n }\n\n inputCol.appendChild(input)\n if (invalidFeedback !== null) {\n inputCol.appendChild(invalidFeedback)\n }\n row.appendChild(inputCol)\n break\n }\n case 'select':\n col = document.createElement('div')\n col.className = 'col-md-4'\n\n label = document.createElement('span')\n label.innerText = elements[i].label\n\n col.appendChild(label)\n row.appendChild(col)\n\n col = document.createElement('div')\n col.className = 'col-md-8'\n\n input = document.createElement('select')\n\n col.appendChild(input)\n row.appendChild(col)\n break\n case 'checkbox': {\n row.classList.add('checkbox-row')\n const checkboxLabelCol = document.createElement('div')\n checkboxLabelCol.classList.add('label-column', 'checkbox-label-column')\n\n label = document.createElement('label')\n label.innerText = elements[i].label\n label.for = 'dcp-checkbox-input-' + this.container.querySelectorAll('input[type=\"checkbox\"]').length\n label.setAttribute('for', label.for)\n label.className = 'checkbox-label'\n\n checkboxLabelCol.appendChild(label)\n\n const checkboxCol = document.createElement('div')\n checkboxCol.classList.add('checkbox-column')\n\n input = document.createElement('input')\n input.type = 'checkbox'\n input.id = label.for\n if (elements[i].checked) {\n input.checked = true\n }\n\n checkboxCol.appendChild(input)\n\n if (elements[i].labelToTheRightOfCheckbox) {\n checkboxCol.classList.add('col-md-5')\n row.appendChild(checkboxCol)\n checkboxLabelCol.classList.add('col-md-7')\n row.appendChild(checkboxLabelCol)\n } else {\n checkboxLabelCol.classList.add('col-md-6')\n checkboxCol.classList.add('col-md-6')\n row.appendChild(checkboxLabelCol)\n row.appendChild(checkboxCol)\n }\n break\n }\n case 'file':\n [input, row] = this.addFileInput(elements[i], input, row)\n break\n case 'label':\n row.classList.add('label-row')\n label = document.createElement('label')\n label.innerText = elements[i].label\n row.appendChild(label)\n break\n }\n\n // Copy other possibly specified element properties:\n const inputPropertyNames = ['title', 'inputmode', 'value', 'minLength', 'maxLength', 'size', 'required', 'pattern', 'min', 'max', 'step', 'placeholder', 'accept', 'multiple', 'id', 'onkeypress', 'oninput', 'for', 'readonly', 'autocomplete']\n for (const propertyName of inputPropertyNames) {\n if (Object.prototype.hasOwnProperty.call(elements[i], propertyName)) {\n if (propertyName === 'for' && !label.hasAttribute(propertyName)) {\n label.setAttribute(propertyName, elements[i][propertyName])\n }\n if (propertyName.startsWith('on')) {\n input.addEventListener(propertyName.slice(2), elements[i][propertyName])\n } else {\n input.setAttribute(propertyName, elements[i][propertyName])\n }\n }\n }\n\n inputElements.push(input)\n body.appendChild(row)\n }\n\n if (inputElements.length === 1) return inputElements[0]\n else return inputElements\n }\n\n /**\n * Adds a drag and drop file form element to the modal.\n *\n * @param {*} fileInputProperties - An object specifying some of the\n * properties of the file input element.\n * @param {*} fileInput - Placeholders to help create the file\n * input.\n * @param {HTMLDivElement} row - Placeholders to help create the file\n * input.\n */\n addFileInput (fileInputProperties, fileInput, row) {\n // Adding the upload label.\n const uploadLabel = document.createElement('label')\n uploadLabel.innerText = fileInputProperties.label\n row.appendChild(uploadLabel)\n const body = this.container.querySelector('.dcp-modal-body')\n body.appendChild(row)\n const fileSelectionRow = document.createElement('div')\n fileSelectionRow.id = 'file-selection-row'\n\n // Adding the drag and drop file upload input.\n const dropContainer = document.createElement('div')\n dropContainer.id = 'drop-container'\n\n // Adding an image of a wallet\n const imageContainer = document.createElement('div')\n imageContainer.id = 'image-container'\n const walletImage = document.createElement('span')\n walletImage.classList.add('fas', 'fa-wallet')\n imageContainer.appendChild(walletImage)\n\n // Adding some text prompts\n const dropMessage = document.createElement('span')\n dropMessage.innerText = 'Drop a keystore file here'\n const orMessage = document.createElement('span')\n orMessage.innerText = 'or'\n\n // Adding the manual file input element (hiding the default one)\n const fileInputContainer = document.createElement('div')\n const fileInputLabel = document.createElement('label')\n // Linking the label to the file input so that clicking on the label\n // activates the file input.\n fileInputLabel.setAttribute('for', 'file-input')\n fileInputLabel.innerText = 'Browse'\n fileInput = document.createElement('input')\n fileInput.type = fileInputProperties.type\n fileInput.id = 'file-input'\n // To remove the lingering outline after selecting the file.\n fileInput.addEventListener('click', () => {\n fileInput.blur()\n })\n fileInputContainer.append(fileInput, fileInputLabel)\n\n // Creating the final row element to append to the modal body.\n dropContainer.append(imageContainer, dropMessage, orMessage, fileInputContainer)\n fileSelectionRow.appendChild(dropContainer)\n\n // Adding functionality to the drag and drop file input.\n dropContainer.addEventListener('drop', selectDroppedFile.bind(this))\n dropContainer.addEventListener('drop', unhighlightDropArea)\n // Prevent file from being opened by the browser.\n dropContainer.ondragover = highlightDropArea\n dropContainer.ondragenter = highlightDropArea\n dropContainer.ondragleave = unhighlightDropArea\n\n fileInput.addEventListener('change', handleFileChange)\n\n const fileNamePlaceholder = document.createElement('center')\n fileNamePlaceholder.id = 'file-name-placeholder'\n fileNamePlaceholder.className = 'row'\n fileNamePlaceholder.innerText = ''\n fileSelectionRow.appendChild(fileNamePlaceholder)\n fileNamePlaceholder.classList.add('hidden')\n\n // Check if the continue button is invalid on the keystore upload modal and\n // click it if it should no longer be invalid.\n this.continueButton.addEventListener('invalid', () => {\n const fileFormElements = this.container.querySelectorAll('.dcp-modal-body input[type=\"file\"], .dcp-modal-body input[type=\"text\"]')\n const filledInFileFormElements = Array.from(fileFormElements).filter(fileFormElement => fileFormElement.value !== '')\n if (fileFormElements.length !== 0 && filledInFileFormElements.length !== 0) {\n this.continueButton.setCustomValidity('')\n // Clicking instead of dispatching a submit event to ensure other form validation is used before submitting the form.\n this.continueButton.click()\n }\n })\n\n return [fileInput, fileSelectionRow]\n\n /**\n * Checks that the dropped items contain only a single keystore file.\n * If valid, sets the file input's value to the dropped file.\n * @param {DragEvent} event - Contains the files dropped.\n */\n function selectDroppedFile (event) {\n // Prevent file from being opened.\n event.preventDefault()\n\n // Check if only one file was dropped.\n const wasOneFileDropped = event.dataTransfer.items.length === 1 ||\n event.dataTransfer.files.length === 1\n updateFileSelectionStatus(wasOneFileDropped)\n if (!wasOneFileDropped) {\n fileInput.setCustomValidity('Only one file can be uploaded.')\n fileInput.reportValidity()\n return\n } else {\n fileInput.setCustomValidity('')\n }\n\n // Now to use the DataTransfer interface to access the file(s), setting\n // the value of the file input.\n const file = event.dataTransfer.files[0]\n\n if (checkFileExtension(file)) {\n fileInput.files = event.dataTransfer.files\n fileInput.dispatchEvent(new Event('change'))\n }\n }\n\n function handleFileChange () {\n if (checkFileExtension(this.files[0]) && this.files.length === 1) {\n fileNamePlaceholder.innerText = `Selected File: ${this.files[0].name}`\n updateFileSelectionStatus(true)\n // Invoke a callback if additional functionality is required.\n if (typeof fileInputProperties.callback === 'function') {\n fileInputProperties.callback(this.files[0])\n }\n }\n }\n\n /**\n * Checks if the file extension on the inputted file is correct.\n * @param {File} file - The file to check\n * @returns {boolean} True if the file extension is valid, false otherwise.\n */\n function checkFileExtension (file) {\n // If there's no restriction, return true.\n if (!fileInputProperties.extension) {\n return true\n }\n const fileExtension = file.name.split('.').pop()\n const isValidExtension = fileExtension === fileInputProperties.extension\n updateFileSelectionStatus(isValidExtension)\n if (!isValidExtension) {\n fileInput.setCustomValidity(`Only a .${fileInputProperties.extension} file can be uploaded.`)\n fileInput.reportValidity()\n fileNamePlaceholder.classList.add('hidden')\n } else {\n fileInput.setCustomValidity('')\n }\n return isValidExtension\n }\n\n /**\n * Updates the file input to reflect the validity of the current file\n * selection.\n * @param {boolean} isValidFileSelection - True if a single .keystore file\n * was selected. False otherwise.\n */\n function updateFileSelectionStatus (isValidFileSelection) {\n imageContainer.innerHTML = ''\n const statusImage = document.createElement('span')\n statusImage.classList.add('fas', isValidFileSelection ? 'fa-check' : 'fa-times')\n statusImage.style.color = isValidFileSelection ? 'green' : 'red'\n imageContainer.appendChild(statusImage)\n\n if (!isValidFileSelection) {\n fileInput.value = null\n fileNamePlaceholder.classList.add('hidden')\n } else {\n fileNamePlaceholder.classList.remove('hidden')\n }\n\n // If the modal contains a password field for a keystore file, change its\n // visibility.\n const walletPasswordInputContainer = document.querySelector('.dcp-modal-body input[type=\"password\"]').parentElement.parentElement\n if (walletPasswordInputContainer) {\n if (isValidFileSelection) {\n walletPasswordInputContainer.classList.remove('hidden')\n const walletPasswordInput = document.querySelector('.dcp-modal-body input[type=\"password\"]')\n walletPasswordInput.focus()\n } else {\n walletPasswordInputContainer.classList.add('hidden')\n }\n }\n }\n\n function highlightDropArea (event) {\n event.preventDefault()\n this.classList.add('highlight')\n }\n\n function unhighlightDropArea (event) {\n event.preventDefault()\n this.classList.remove('highlight')\n }\n }\n\n /**\n * Sets up a custom tooltip to pop up when the passwords do not match, but are\n * valid otherwise.\n */\n addFormValidationForPasswordConfirmation () {\n const [newPassword, confirmPassword] = document.querySelectorAll('.dcp-modal-body input[type=\"password\"]')\n if (!newPassword || !confirmPassword) {\n throw Error('New Password field and Confirm Password fields not present.')\n }\n\n newPassword.addEventListener('input', checkMatchingPasswords)\n confirmPassword.addEventListener('input', checkMatchingPasswords)\n\n function checkMatchingPasswords () {\n if (newPassword.value !== confirmPassword.value &&\n newPassword.validity.valid &&\n confirmPassword.validity.valid) {\n newPassword.setCustomValidity('Both passwords must match.')\n } else if (newPassword.value === confirmPassword.value ||\n newPassword.validity.tooShort ||\n newPassword.validity.patternMismatch ||\n newPassword.validity.valueMissing ||\n confirmPassword.validity.tooShort ||\n confirmPassword.validity.patternMismatch ||\n confirmPassword.validity.valueMissing) {\n // If the passwords fields match or have become invalidated some other\n // way again, reset the custom message.\n newPassword.setCustomValidity('')\n }\n }\n }\n\n updateInvalidEmailMessage() {\n const email = document.querySelector('.dcp-modal-body input[id=\"email\"')\n if (!email){\n throw Error(\"Email field not present\")\n }\n email.addEventListener('input', checkValidEmail);\n function checkValidEmail() {\n if (!email.validity.patternMismatch &&\n !email.validity.valueMissing) {\n email.setCustomValidity('')\n } else {\n email.setCustomValidity(\"Enter a valid email address.\")\n }\n\n }\n }\n\n /**\n * Adds message(s) to the modal's body.\n * @param {string} messages - The message(s) to add to the modal's body.\n * @returns Paragraph element(s) containing the message(s) added to the\n * modal's body.\n */\n addMessage (...messages) {\n const elements = []\n const body = this.container.querySelector('.dcp-modal-body')\n for (let i = 0; i < messages.length; i++) {\n const row = document.createElement('div')\n row.className = 'row'\n\n const paragraph = document.createElement('p')\n paragraph.innerHTML = messages[i]\n paragraph.classList.add('message')\n row.appendChild(paragraph)\n body.appendChild(row)\n\n elements.push(paragraph)\n }\n\n if (elements.length === 1) return elements[0]\n else return elements\n }\n\n addHorizontalRule () {\n const body = this.container.querySelector('.dcp-modal-body')\n body.appendChild(document.createElement('hr'))\n }\n\n // Does what it says. Still ill advised to use unless you have to.\n addCustomHTML (htmlStr, browseCallback) {\n const elements = []\n const body = this.container.querySelector('.dcp-modal-body')\n body.innerHTML += htmlStr\n body.querySelector('#browse-button').addEventListener('click', browseCallback.bind(this, this))\n\n if (elements.length === 1) return elements[0]\n else return elements\n }\n\n addButton (...buttons) {\n const elements = []\n const body = this.container.querySelector('.dcp-modal-body')\n for (let i = 0; i < buttons.length; i++) {\n const row = document.createElement('div')\n row.className = 'row'\n\n let col = document.createElement('div')\n col.className = 'col-md-4'\n\n const description = document.createElement('span')\n description.innerText = buttons[i].description\n\n col.appendChild(description)\n row.appendChild(col)\n\n col = document.createElement('div')\n col.className = 'col-md-8'\n\n const button = document.createElement('button')\n button.innerText = buttons[i].label\n button.addEventListener('click', buttons[i].callback.bind(this, this))\n\n elements.push(button)\n\n col.appendChild(button)\n row.appendChild(col)\n\n body.appendChild(row)\n }\n\n if (elements.length === 1) return elements[0]\n else return elements\n }\n}\n\n\n// Inject our special stylesheet from dcp-client only if we're on the portal webpage.\nif (typeof window !== 'undefined' && typeof document !== 'undefined' && dcpConfig.portal.location.hostname === window.location.hostname) {\n // <link rel='stylesheet' href='/css/dashboard.css'>\n const stylesheet = document.createElement('link')\n stylesheet.rel = 'stylesheet'\n // Needed for the duplicate check done later.\n stylesheet.id = 'dcp-modal-styles'\n\n const dcpClientBundle = document.getElementById('_dcp_client_bundle')\n let src\n if (dcpClientBundle) {\n src = dcpClientBundle.src.replace('dcp-client-bundle.js', 'dcp-modal-style.css')\n } else {\n src = dcpConfig.portal.location.href + 'dcp-client/dist/dcp-modal-style.css'\n }\n\n stylesheet.href = src\n // If the style was injected before, don't inject it again.\n // Could occur when loading a file that imports Modal.js and loading\n // comput.min.js in the same HTML file.\n if (document.getElementById(stylesheet.id) === null) {\n document.getElementsByTagName('head')[0].appendChild(stylesheet)\n }\n\n if (typeof {\"version\":\"c96b8086bdb343ed36ff35e133755d3f21613609\",\"branch\":\"release\",\"dcpClient\":{\"version\":\"4.2.15\",\"from\":\"git+ssh://git@gitlab.com/Distributed-Compute-Protocol/dcp-client.git#prod-20220919\",\"resolved\":\"git+ssh://git@gitlab.com/Distributed-Compute-Protocol/dcp-client.git#20c6d0b3df95fdc029b43e01e1e6ac78c9d5b372\"},\"built\":\"Fri Sep 23 2022 16:37:39 GMT-0400 (Eastern Daylight Saving Time)\",\"config\":{\"generated\":\"Fri 23 Sep 2022 04:37:37 PM EDT by erose on lorge\",\"build\":\"debug\"},\"webpack\":\"5.70.0\",\"node\":\"v14.20.0\"} !== 'undefined' && typeof window.Modal === 'undefined') {\n window.Modal = Modal\n }\n}\n\n\n//# sourceURL=webpack://dcp/./portal/www/js/modal.js?");
3845
3845
 
3846
3846
  /***/ }),
3847
3847
 
@@ -3883,7 +3883,7 @@ eval("/** \n * Factory function which creates instances of the future function t
3883
3883
  /***/ ((__unused_webpack_module, exports, __webpack_require__) => {
3884
3884
 
3885
3885
  "use strict";
3886
- eval("/**\n * @file dcp-assert.js\n * Simple assertion module for DCP. Assertions are only\n * evaluated for debug builds, except for the security-named\n * assertions.\n *\n * @author Wes Garland, wes@kingsds.network\n * @date Dec 2019\n */\n\n\nvar isDebugBuild = undefined;\n\nexports.assertTriggersDebugger = !!(__webpack_require__(/*! process */ \"./node_modules/process/browser.js\").env.DCP_ASSERT_TRIGGERS_DEBUGGER);\nexports.assertTestTriggersDebugger = !!(__webpack_require__(/*! process */ \"./node_modules/process/browser.js\").env.DCP_ASSERT_TEST_TRIGGERS_DEBUGGER);\n\n/** \n * Sets whether this assertion module is running in a debug mode or not. If not called\n * before the first assertion, then we figure this out by checking the module that DCP\n * was configured in with ./configure.sh. The difference is that non-security assertions\n * are ignored during production builds.\n *\n * @note this code test to run very early in the process due to eager module initialization,\n * especially while loading the webpack bundle, which might not have actually set\n * the correct dcpConfig.build yet, so it's stuck at \"bootstrap\", which we treat mostly\n * like a debug build.\n * \n * @param idb {boolean|undefined} falsey if this is release build; truey if this is a debug build; undefined to detect\n */\nexports.setDebugBuild = function dcpAssert$$setDebugBuild(idb)\n{\n if (typeof idb === 'undefined')\n idb = dcpConfig.build !== 'release';\n \n /* In release mode, rewrite the non-security assertions as fast dummy functions */\n if (!idb)\n {\n let dummy = function dcpAssert$dummy(){return};\n for (let assertion of assertionList) {\n if (typeof exports[assertion] === 'function')\n exports[assertion] = dummy;\n }\n }\n else\n {\n for (let assertion of assertionList)\n exports[assertion] = exports.always[assertion];\n }\n\n if (dcpConfig.build !== 'bootstrap')\n isDebugBuild = Boolean(idb); \n}\n \n/** Generic assertion mechanism. Throws if any argument is not true. */\nlet assert = exports.assert = function dcpAssert$$assert() {\n let e;\n\n if (typeof isDebugBuild === 'undefined')\n exports.setDebugBuild();\n \n if (exports.assertTestTriggersDebugger)\n debugger; // allow-debugger\n \n for (let value of arguments) {\n if (!value) {\n if (exports.assertTriggersDebugger)\n debugger; // allow-debugger\n\n try { /* this throws in ES5 strict mode and maybe future-ES */\n let loc = 2;\n if (Object.keys(exports).map((key) => exports[key]).includes(arguments.callee.caller))\n loc++;\n e = new Error('Assertion failure ' + new Error().stack.toString().split('\\n')[loc].trim());\n } catch(error) {\n e = new Error('Assertion failure');\n }\n e.code = 'EASSERT';\n throw e;\n }\n }\n}\n\n/** Evaluate an expression; assert if the result is not true */\nexports.assertEval = function dcpAssert$$assertEval(expr) {\n assert(eval(expr));\n}\n\n/** Assert to that two values are == equivalent */\nexports.assertEq2 = function dcpAssert$$assertEq2(lValue, rValue) {\n assert(lValue == rValue)\n}\n\n/**\n * Asserts that two values are the same in terms of strict equality (===).\n * Can pass an optional message describing the assertion being made.\n *\n * @param {any} expected The expected value to test for equality\n * @param {any} actual The actual value to compare teh expected value against\n * @param {string} [message=''] An message appended to the assertion error\n */\nexports.assertEq3 = function dcpAssert$$assertEq3(\n expected,\n actual,\n message = '',\n) {\n try {\n assert(expected === actual);\n } catch (e) {\n if (message) {\n e.message += `: ${message}`;\n }\n\n e.message += ` (${expected} !== ${actual})`;\n throw e;\n }\n};\n\n/** Assert to that two values are not == equivalent */\nexports.assertNeq2 = function dcpAssert$$assertNeq2(lValue, rValue) {\n assert(lValue != rValue);\n}\n\n/** Assert to that two values are not the same */\nexports.assertNeq3 = function dcpAssert$$assertNeq3(lValue, rValue) {\n assert(lValue !== rValue);\n}\n\n/**\n * Assertion that ensures a given statement will throw a given exception.\n * @param statement {function} function to invoke which is expected to throw\n * @param statement {string} source code of statement which is evaluated with direct-eval\n * and expected to throw\n * @param code [optional] {string} expected value of the exception's code property\n * @param ctor [optional] {function} function which is expected on the exceptions prototype chain\n * @returns true if expectations were met\n */\nexports.assertThrows = function dcpAssert$$assertThrows(statement, code, ctor) {\n var threw;\n \n if (typeof statement === 'string') {\n statement = function shouldThrow_statement() { eval(arguments[0]) };\n }\n if (arguments.length === 2 && typeof code === 'function') {\n ctor = code;\n code = undefined;\n }\n\n try {\n let result = statement();\n threw = false;\n } catch(e) {\n threw = true;\n if (code)\n assert(e.code === code);\n if (ctor)\n assert(e instanceof ctor);\n }\n\n assert(threw === true);\n}\n\n/**\n * Assertion that ensures a given collection contains a given element.\n *\n * @param {any} haystack The collection to search; must be a Set, Map, Array or Array-like object.\n * @param {any} needle The element to search for\n */\nexports.assertHas = function dcpAssert$$assertHas(haystack, needle) {\n if (Array.isArray(haystack))\n assert(haystack.indexOf(needle) !== -1);\n else if (needle instanceof Set || needle instanceof Map)\n assert(haystack.has(needle));\n else\n assert(Array.from(haystack).indexOf(needle) !== -1);\n}\n\n/**\n * Assertion that ensures a given value is of a given type.\n */\nexports.assertIsA = function dcpAssert$$assertIsA(value, type) {\n assert(typeof value === type);\n}\n\n/* *** All assertions must be defined above here *** */\nconst assertionList = Object.keys(exports);\n\n/** Add the security assertions (not disabled by debug build) */\nfor (let assertion of assertionList) {\n let securityAssertion = 'security' + assertion[0].toUpperCase() + assertion.slice(1);\n exports[securityAssertion] = exports[assertion];\n}\n\n/** Add the 'always' assertions (also not disabled by debug build) */\nexports.always = {};\nfor (let assertion of assertionList) {\n exports.always[assertion] = exports[assertion];\n}\n\n\n//# sourceURL=webpack://dcp/./src/common/dcp-assert.js?");
3886
+ eval("/**\n * @file dcp-assert.js\n * Simple assertion module for DCP. Assertions are only\n * evaluated for debug builds, except for the security-named\n * assertions.\n *\n * @author Wes Garland, wes@kingsds.network\n * @date Dec 2019\n */\n\n\nvar isDebugBuild = undefined;\n\nexports.assertTriggersDebugger = !!(__webpack_require__(/*! process */ \"./node_modules/process/browser.js\").env.DCP_ASSERT_TRIGGERS_DEBUGGER);\nexports.assertTestTriggersDebugger = !!(__webpack_require__(/*! process */ \"./node_modules/process/browser.js\").env.DCP_ASSERT_TEST_TRIGGERS_DEBUGGER);\n\n/** \n * Sets whether this assertion module is running in a debug mode or not. If not called\n * before the first assertion, then we figure this out by checking the module that DCP\n * was configured in with ./configure.sh. The difference is that non-security assertions\n * are ignored during production builds.\n *\n * @note this code test to run very early in the process due to eager module initialization,\n * especially while loading the webpack bundle, which might not have actually set\n * the correct dcpConfig.build yet, so it's stuck at \"bootstrap\", which we treat mostly\n * like a debug build.\n * \n * @param idb {boolean|undefined} falsey if this is release build; truey if this is a debug build; undefined to detect\n */\nexports.setDebugBuild = function dcpAssert$$setDebugBuild(idb)\n{\n if (typeof idb === 'undefined')\n idb = (typeof dcpConfig === 'object') && (dcpConfig.build !== 'release');\n \n /* In release mode, rewrite the non-security assertions as fast dummy functions */\n if (!idb)\n {\n let dummy = function dcpAssert$dummy(){return};\n for (let assertion of assertionList) {\n if (typeof exports[assertion] === 'function')\n exports[assertion] = dummy;\n }\n }\n else\n {\n for (let assertion of assertionList)\n exports[assertion] = exports.always[assertion];\n }\n\n if (typeof dcpConfig === 'undefined' || dcpConfig.build !== 'bootstrap')\n isDebugBuild = Boolean(idb); \n}\n \n/** Generic assertion mechanism. Throws if any argument is not true. */\nlet assert = exports.assert = function dcpAssert$$assert() {\n let e;\n\n if (typeof isDebugBuild === 'undefined')\n exports.setDebugBuild();\n \n if (exports.assertTestTriggersDebugger)\n debugger; // allow-debugger\n \n for (let value of arguments) {\n if (!value) {\n if (exports.assertTriggersDebugger)\n debugger; // allow-debugger\n\n try { /* this throws in ES5 strict mode and maybe future-ES */\n let loc = 2;\n if (Object.keys(exports).map((key) => exports[key]).includes(arguments.callee.caller))\n loc++;\n e = new Error('Assertion failure ' + new Error().stack.toString().split('\\n')[loc].trim());\n } catch(error) {\n e = new Error('Assertion failure');\n }\n e.code = 'EASSERT';\n throw e;\n }\n }\n}\n\n/** Evaluate an expression; assert if the result is not true */\nexports.assertEval = function dcpAssert$$assertEval(expr) {\n assert(eval(expr));\n}\n\n/** Assert to that two values are == equivalent */\nexports.assertEq2 = function dcpAssert$$assertEq2(lValue, rValue) {\n assert(lValue == rValue)\n}\n\n/**\n * Asserts that two values are the same in terms of strict equality (===).\n * Can pass an optional message describing the assertion being made.\n *\n * @param {any} expected The expected value to test for equality\n * @param {any} actual The actual value to compare teh expected value against\n * @param {string} [message=''] An message appended to the assertion error\n */\nexports.assertEq3 = function dcpAssert$$assertEq3(\n expected,\n actual,\n message = '',\n) {\n try {\n assert(expected === actual);\n } catch (e) {\n if (message) {\n e.message += `: ${message}`;\n }\n\n e.message += ` (${expected} !== ${actual})`;\n throw e;\n }\n};\n\n/** Assert to that two values are not == equivalent */\nexports.assertNeq2 = function dcpAssert$$assertNeq2(lValue, rValue) {\n assert(lValue != rValue);\n}\n\n/** Assert to that two values are not the same */\nexports.assertNeq3 = function dcpAssert$$assertNeq3(lValue, rValue) {\n assert(lValue !== rValue);\n}\n\n/**\n * Assertion that ensures a given statement will throw a given exception.\n * @param statement {function} function to invoke which is expected to throw\n * @param statement {string} source code of statement which is evaluated with direct-eval\n * and expected to throw\n * @param code [optional] {string} expected value of the exception's code property\n * @param ctor [optional] {function} function which is expected on the exceptions prototype chain\n * @returns true if expectations were met\n */\nexports.assertThrows = function dcpAssert$$assertThrows(statement, code, ctor) {\n var threw;\n \n if (typeof statement === 'string') {\n statement = function shouldThrow_statement() { eval(arguments[0]) };\n }\n if (arguments.length === 2 && typeof code === 'function') {\n ctor = code;\n code = undefined;\n }\n\n try {\n let result = statement();\n threw = false;\n } catch(e) {\n threw = true;\n if (code)\n assert(e.code === code);\n if (ctor)\n assert(e instanceof ctor);\n }\n\n assert(threw === true);\n}\n\n/**\n * Assertion that ensures a given collection contains a given element.\n *\n * @param {any} haystack The collection to search; must be a Set, Map, Array or Array-like object.\n * @param {any} needle The element to search for\n */\nexports.assertHas = function dcpAssert$$assertHas(haystack, needle) {\n if (Array.isArray(haystack))\n assert(haystack.indexOf(needle) !== -1);\n else if (needle instanceof Set || needle instanceof Map)\n assert(haystack.has(needle));\n else\n assert(Array.from(haystack).indexOf(needle) !== -1);\n}\n\n/**\n * Assertion that ensures a given value is of a given type.\n */\nexports.assertIsA = function dcpAssert$$assertIsA(value, type) {\n assert(typeof value === type);\n}\n\n/* *** All assertions must be defined above here *** */\nconst assertionList = Object.keys(exports);\n\n/** Add the security assertions (not disabled by debug build) */\nfor (let assertion of assertionList) {\n let securityAssertion = 'security' + assertion[0].toUpperCase() + assertion.slice(1);\n exports[securityAssertion] = exports[assertion];\n}\n\n/** Add the 'always' assertions (also not disabled by debug build) */\nexports.always = {};\nfor (let assertion of assertionList) {\n exports.always[assertion] = exports[assertion];\n}\n\n\n//# sourceURL=webpack://dcp/./src/common/dcp-assert.js?");
3887
3887
 
3888
3888
  /***/ }),
3889
3889
 
@@ -3903,7 +3903,7 @@ eval("/**\n * @file dcp-build.js Return an object describing the curre
3903
3903
  \***********************************/
3904
3904
  /***/ ((__unused_webpack_module, exports, __webpack_require__) => {
3905
3905
 
3906
- eval("/**\n * @file node/dcp-dot-dir.js\n * @author Ryan Rossiter, ryan@kingsds.network\n * @date March 2020\n *\n * This module is a utility for resolving the dotDcpDir,\n * the user's configuration directory, which is usually\n * located at ~/.dcp\n *\n * To programmatically determine the correct place to read the .dcp directory,\n * we should look at exports.dotDcpDir, whose initial value can be given by\n * dcpConfig.global.dcpDcpDir.\n */\n\nconst DCP_ENV = __webpack_require__(/*! ./dcp-env */ \"./src/common/dcp-env.js\");\n\nif (DCP_ENV.platform === 'nodejs') {\n const { requireNative } = __webpack_require__(/*! dcp/dcp-client/webpack-native-bridge */ \"./src/dcp-client/webpack-native-bridge.js\");\n const path = requireNative('path');\n const os = requireNative('os');\n const process = requireNative('process');\n let _homedir;\n \n /** Override DCP's idea of the home directory, expanding tildes and adjusting exports.dotDcpDir as necessary */\n exports.setHomeDir = function dcpDotDir$$setHomeDir(homedir) {\n _homedir = (__webpack_require__(/*! dcp/utils */ \"./src/utils/index.js\").expandPath)(homedir);\n exports.dotDcpDir = path.resolve(_homedir, '.dcp');\n }\n\n exports.setHomeDir(process.env.DCP_HOMEDIR ||\n (dcpConfig.global && dcpConfig.global.dotDcpDir) ||\n os.homedir());\n\n /** Returns where DCP thinks the homedir is */\n exports.getHomeDir = () => _homedir;\n}\n\n\n//# sourceURL=webpack://dcp/./src/common/dcp-dot-dir.js?");
3906
+ eval("/**\n * @file node/dcp-dot-dir.js\n * @author Ryan Rossiter, ryan@kingsds.network\n * @date March 2020\n *\n * This module is a utility for resolving the dotDcpDir,\n * the user's configuration directory, which is usually\n * located at ~/.dcp\n *\n * To programmatically determine the correct place to read the .dcp directory,\n * we should look at exports.dotDcpDir, whose initial value can be affected\n * via the DCP_HOMEDIR environment variable. It can be changed by changing\n * the homedir.\n *\n * Note: caching this module's dcpDcpDir is discouraged, as setHomeDir() will\n * not be able to affect your cache!\n */\nconst DCP_ENV = __webpack_require__(/*! ./dcp-env */ \"./src/common/dcp-env.js\");\n\nif (DCP_ENV.platform === 'nodejs') {\n const { requireNative } = __webpack_require__(/*! dcp/dcp-client/webpack-native-bridge */ \"./src/dcp-client/webpack-native-bridge.js\");\n const path = requireNative('path');\n const os = requireNative('os');\n const process = requireNative('process');\n let _homedir;\n \n /** Override DCP's idea of the home directory, expanding tildes and adjusting exports.dotDcpDir as necessary */\n exports.setHomeDir = function dcpDotDir$$setHomeDir(homedir) {\n _homedir = (__webpack_require__(/*! dcp/utils */ \"./src/utils/index.js\").expandPath)(homedir);\n exports.dotDcpDir = path.resolve(_homedir, '.dcp');\n }\n\n exports.setHomeDir(process.env.DCP_HOMEDIR ||\n os.homedir());\n\n /** Returns where DCP thinks the homedir is */\n exports.getHomeDir = () => _homedir;\n}\n\n\n//# sourceURL=webpack://dcp/./src/common/dcp-dot-dir.js?");
3907
3907
 
3908
3908
  /***/ }),
3909
3909
 
@@ -4047,7 +4047,7 @@ eval("exports.DEFAULT_REQUIREMENTS = {\n engine: {\n es7: null,\n spiderm
4047
4047
  /***/ ((__unused_webpack_module, exports, __webpack_require__) => {
4048
4048
 
4049
4049
  "use strict";
4050
- eval("/* provided dependency */ var process = __webpack_require__(/*! ./node_modules/process/browser.js */ \"./node_modules/process/browser.js\");\n/**\n * @file scheduler-constants.js\n * Contants and constant-like precomputed values for use with/by DCPv4.\n * All values in this module are completely deterministic and will change\n * only if the source code changes.\n * @author Wes Garland, wes@kingsds.network\n * @date Nov 2020\n */\n\n\n/** Pre-defined, hard-coded compute groups */\nexports.computeGroups = {\n public: {\n opaqueId: 'WHhetL7mj1w1mw1XV6dxyC', \n id: 1,\n name: 'Public Compute Group (open access)',\n joinKey: 'public',\n joinSecret: '',\n },\n};\n\n/** Currently used bit masks for flags column of jobs table. There is capacity for 31 bit masks. */\nexports.jobFlags = {\n localExec: 1 << 0, /* local exec job; prevent from joining compute groups */\n open: 1 << 1, /* job still open, i.e. able to add more slices */\n workerConsole: 1 << 2, /* job is allowed to log to worker's console - if worker permits also */\n greedyEstimation: 1 << 3, /* Allows job in estimation to have requestTask return multiple estimation slices. */\n isCI: 1 << 4, /* CI job: prevent their slices to be distributed and sheepdog cancel the job*/\n force100pctCPUDensity: 1 << 29, /* Temporary flag that considers the wall clock vs cpu time */\n};\n\nexports.workerIdLength = 22;\n\n/** \n * Constant Groups have a load-time side effect of needing dcpConfig already loaded, in order to\n * determine how they are constructed; proxies are relatively heavy and we don't want them running\n * on production builds if possible.\n */\nfunction initConstantGroups(build)\n{\n clearTimeout(initConstantGroups.timerHnd);\n var argv = Array.from(arguments);\n var dcpConfig = globalThis.hasOwnProperty('dcpConfig') ? globalThis.dcpConfig : {};\n\n /**\n * The list of all possible job status in the status column of the jobs table.\n */\n exports.jobStatus = new SchedulerConstantGroup(\n 'cancelled',\n 'corrupted',\n 'estimation',\n 'finished',\n 'running',\n 'paused',\n 'new',\n );\n\n exports.jobValueKind = new SchedulerConstantGroup(\n 'results',\n 'sliceData',\n 'jobArguments',\n );\n\n /**\n * The list of all possible slice status in the status column of the\n * activeSlices table.\n */\n exports.sliceStatus = new SchedulerConstantGroup(\n 'overdue',\n 'tiebreaker',\n 'scheduled',\n 'working',\n 'paused',\n 'returned',\n 'new',\n );\n\n function SchedulerConstantGroup()\n {\n var argv = Array.from(arguments);\n \n for (let el of argv)\n this[el] = el;\n\n if (build === 'debug')\n {\n let px = new Proxy(this, {\n get: (target, prop) => {\n if (!target.hasOwnProperty(prop))\n throw new Error(`no such constant: '${prop}'`);\n return target[prop];\n },\n set: (target, prop) => {\n throw new Error('constants are immutable!');\n },\n delete: (prop) => {\n throw new Error('constant groups are immutable!');\n },\n });\n\n return px;\n }\n }\n\n /** Temporary arrays for backwards compatibility - do not use for new code */\n exports.sliceStatuses = Object.keys(exports.sliceStatus);\n exports.jobStatuses = Object.keys(exports.jobStatus);\n}\n\ninitConstantGroups('release');\n\nsetImmediate(function initConstantGroupsInDebugMode() {\n try\n {\n /* Init the consts groups in debug mode only once dcpConfig is loaded. We can use \"node-only code\" \n * here, because we know that the browser initializes dcpConfig before the bundle is evaluated.\n *\n * We run one tick down the event loop because of dcp-client init semantics.\n */\n if (typeof dcpConfig !== 'undefined' && dcpConfig.build)\n initConstantGroups(dcpConfig.build);\n else\n {\n const { requireNative } = __webpack_require__(/*! dcp/dcp-client/webpack-native-bridge */ \"./src/dcp-client/webpack-native-bridge.js\");\n requireNative('dcp/node-libs/config').on('loaded', () => initConstantGroups(dcpConfig.build));\n\n initConstantGroups.timerHnd = setTimeout(() => console.warn('scheduler-constants not fully initialized; please load dcpConfig!'), 10000);\n initConstantGroups.timerHnd.unref();\n }\n }\n catch(error)\n {\n if ( false\n || (process && process.env && process.env.DCP_DEBUG)\n || (dcpConfig && dcpConfig.build === 'debug'))\n console.error('Cannot put scheduler constants into debug mode:', error);\n }\n});\n\n\n//# sourceURL=webpack://dcp/./src/common/scheduler-constants.js?");
4050
+ eval("/* provided dependency */ var process = __webpack_require__(/*! ./node_modules/process/browser.js */ \"./node_modules/process/browser.js\");\n/**\n * @file scheduler-constants.js\n * Contants and constant-like precomputed values for use with/by DCPv4.\n * All values in this module are completely deterministic and will change\n * only if the source code changes.\n * @author Wes Garland, wes@kingsds.network\n * @date Nov 2020\n */\n\n\n/** Pre-defined, hard-coded compute groups */\nexports.computeGroups = {\n public: {\n opaqueId: 'WHhetL7mj1w1mw1XV6dxyC', \n id: 1,\n name: 'Public Compute Group (open access)',\n joinKey: 'public',\n joinSecret: '',\n },\n};\n\n/** Currently used bit masks for flags column of jobs table. There is capacity for 31 bit masks. */\nexports.jobFlags = {\n localExec: 1 << 0, /* local exec job; prevent from joining compute groups */\n open: 1 << 1, /* job still open, i.e. able to add more slices */\n workerConsole: 1 << 2, /* job is allowed to log to worker's console - if worker permits also */\n greedyEstimation: 1 << 3, /* Allows job in estimation to have requestTask return multiple estimation slices. */\n isCI: 1 << 4, /* CI job: prevent their slices to be distributed and sheepdog cancel the job*/\n force100pctCPUDensity: 1 << 29, /* Temporary flag that considers the wall clock vs cpu time */\n};\n\nexports.workerIdLength = 22;\n\n/** \n * Constant Groups have a load-time side effect of needing dcpConfig already loaded, in order to\n * determine how they are constructed; proxies are relatively heavy and we don't want them running\n * on production builds if possible.\n */\nfunction initConstantGroups(build)\n{\n clearTimeout(initConstantGroups.timerHnd);\n\n /**\n * The list of all possible job status in the status column of the jobs table.\n */\n exports.jobStatus = new SchedulerConstantGroup(\n 'cancelled',\n 'corrupted',\n 'estimation',\n 'finished',\n 'running',\n 'paused',\n 'new',\n );\n\n exports.jobValueKind = new SchedulerConstantGroup(\n 'results',\n 'sliceData',\n 'jobArguments',\n );\n\n /**\n * The list of all possible slice status in the status column of the\n * activeSlices table.\n */\n exports.sliceStatus = new SchedulerConstantGroup(\n 'overdue',\n 'tiebreaker',\n 'scheduled',\n 'working',\n 'paused',\n 'returned',\n 'new',\n );\n\n function SchedulerConstantGroup()\n {\n var argv = Array.from(arguments);\n \n for (let el of argv)\n this[el] = el;\n\n if (build === 'debug')\n {\n let px = new Proxy(this, {\n get: (target, prop) => {\n if (!target.hasOwnProperty(prop))\n throw new Error(`no such constant: '${prop}'`);\n return target[prop];\n },\n set: (target, prop) => {\n throw new Error('constants are immutable!');\n },\n deleteProperty: (_prop) => {\n throw new Error('constant groups are immutable!');\n },\n });\n\n return px;\n }\n }\n\n /** Temporary arrays for backwards compatibility - do not use for new code */\n exports.sliceStatuses = Object.keys(exports.sliceStatus);\n exports.jobStatuses = Object.keys(exports.jobStatus);\n}\n\ninitConstantGroups('release');\n\nsetImmediate(function initConstantGroupsInDebugMode() {\n try\n {\n /* Init the consts groups in debug mode only once dcpConfig is loaded. We can use \"node-only code\" \n * here, because we know that the browser initializes dcpConfig before the bundle is evaluated.\n *\n * We run one tick down the event loop because of dcp-client init semantics.\n */\n if (typeof dcpConfig !== 'undefined' && dcpConfig.build)\n initConstantGroups(dcpConfig.build);\n else\n {\n const { requireNative } = __webpack_require__(/*! dcp/dcp-client/webpack-native-bridge */ \"./src/dcp-client/webpack-native-bridge.js\");\n requireNative('dcp/node-libs/config').on('loaded', () => initConstantGroups(dcpConfig.build));\n\n initConstantGroups.timerHnd = setTimeout(() => console.warn('scheduler-constants not fully initialized; please load dcpConfig!'), 10000);\n initConstantGroups.timerHnd.unref();\n }\n }\n catch(error)\n {\n if ( false\n || (process && process.env && process.env.DCP_DEBUG)\n || (dcpConfig && dcpConfig.build === 'debug'))\n console.error('Cannot put scheduler constants into debug mode:', error);\n }\n});\n\n\n//# sourceURL=webpack://dcp/./src/common/scheduler-constants.js?");
4051
4051
 
4052
4052
  /***/ }),
4053
4053
 
@@ -4147,7 +4147,7 @@ eval("/**\n * @file password.js\n * Modal providing a way to
4147
4147
  \**********************************************/
4148
4148
  /***/ ((__unused_webpack_module, exports, __webpack_require__) => {
4149
4149
 
4150
- eval("/**\n * @file client-modal/utils.js\n * @author KC Erb\n * @date Mar 2020\n * \n * All shared functions among the modals.\n */\nconst { fetchRelative } = __webpack_require__(/*! ./fetch-relative */ \"./src/dcp-client/client-modal/fetch-relative.js\");\nconst { DCPError } = __webpack_require__(/*! dcp/common/dcp-error */ \"./src/common/dcp-error.js\");\nconst DCP_ENV = __webpack_require__(/*! dcp/common/dcp-env */ \"./src/common/dcp-env.js\");\nexports.OnCloseErrorCode = 'DCP_CM:CANCELX';\n\nif (DCP_ENV.isBrowserPlatform) {\n // Provide as export for the convenience of `utils.MicroModal` instead of a separate require.\n exports.MicroModal = __webpack_require__(/*! micromodal */ \"./node_modules/micromodal/dist/micromodal.es.js\")[\"default\"];\n}\n\n/**\n * Return a unique string, formatted as a GET parameter, that changes often enough to\n * always force the browser to fetch the latest version of our resource.\n *\n * @note Currently always returns the Date-based poison due to webpack. \n */\nfunction cachePoison() {\n if (true)\n return '?ucp=8f44464faf259aae5ef214f8752f7ce8728dd5f0'; /* installer token */\n return '?ucp=' + Date.now();\n}\n \n/* Detect load type - on webpack, load dynamic content relative to webpack bundle;\n * otherwise load relative to the current scheduler's configured portal.\n */\nexports.myScript = (typeof document !== 'undefined') && document.currentScript;\nexports.corsProxyHref = undefined;\nif (exports.myScript && exports.myScript === (__webpack_require__(/*! ./fetch-relative */ \"./src/dcp-client/client-modal/fetch-relative.js\").myScript)) {\n let url = new ((__webpack_require__(/*! dcp/common/dcp-url */ \"./src/common/dcp-url.js\").DcpURL))(exports.myScript.src);\n exports.corsProxyHref = url.resolve('../cors-proxy.html');\n}\n\n/**\n * Look for modal id and required ids on page based on config, if not found, provide from dcp-client.\n * The first id in the required array must be the id of the modal's form element.\n * @param {Object} modalConfig Modal configuration object\n * @param {string} modalConfig.id Id of parent modal element\n * @param {string[]} modalConfig.required Array of required ids in parent modal element\n * @param {string[]} [modalConfig.optional] Array of optional ids in parent modal element\n * @param {string} modalConfig.path Relative path to modal html in dcp-client\n * @returns {DOMElement[]} Array of modal elements on page [config.id, ...config.required]\n */\nexports.initModal = async function (modalConfig, onClose) {\n exports.corsProxyHref = exports.corsProxyHref || dcpConfig.portal.location.resolve('dcp-client/cors-proxy.html');\n\n // Call ensure modal on any eager-loaded modals.\n if (modalConfig.eagerLoad) {\n Promise.all(\n modalConfig.eagerLoad.map(config => ensureModal(config))\n )\n };\n\n const [elements, optionalElements] = await ensureModal(modalConfig);\n\n // Wire up form to prevent default, resolve on submission, reject+reset when closed (or call onClose when closed)\n const [modal, form] = elements;\n form.reset(); // ensure that form is fresh\n let formResolve, formReject;\n let formPromise = new Promise( function(res, rej) {\n formResolve = res;\n formReject = rej;\n });\n form.onsubmit = function (submitEvent) {\n submitEvent.preventDefault();\n modal.setAttribute(\"data-state\", \"submitted\");\n formResolve(submitEvent);\n }\n\n exports.MicroModal.show(modalConfig.id, { \n disableFocus: true, \n onClose: onClose || getDefaultOnClose(formReject)\n });\n return [elements, formPromise, optionalElements];\n};\n\n// Ensure all required modal elements are on page according to modalConfig\nasync function ensureModal(modalConfig) {\n let allRequiredIds = [modalConfig.id, ...modalConfig.required];\n let missing = allRequiredIds.filter( id => !document.getElementById(id) );\n if (missing.length > 0) {\n if (missing.length !== allRequiredIds.length)\n console.warn(`Some of the ids needed to replace the default DCP-modal were found, but not all. So the default DCP-Modal will be used. Missing ids are: [${missing}].`);\n let contents = await fetchRelative(exports.corsProxyHref, modalConfig.path + cachePoison());\n const container = document.createElement('div');\n container.innerHTML = contents;\n document.body.appendChild(container);\n }\n\n const elements = allRequiredIds.map(id => document.getElementById(id));\n const optionalElements = (modalConfig.optional || []).map(id => document.getElementById(id));\n return [elements, optionalElements];\n};\n\n// This onClose is called by MicroModal and thus has the modal passed to it.\nfunction getDefaultOnClose (formReject) {\n return (modal) => {\n modal.offsetLeft; // forces style recalc\n const origState = modal.dataset.state;\n // reset form including data-state\n modal.setAttribute(\"data-state\", \"new\");\n // reject if closed without submitting form.\n if (origState !== \"submitted\") {\n const err = new DCPError(\"Modal was closed but modal's form was not submitted.\", exports.OnCloseErrorCode);\n formReject(err);\n }\n }\n}\n\n\n//# sourceURL=webpack://dcp/./src/dcp-client/client-modal/utils.js?");
4150
+ eval("/**\n * @file client-modal/utils.js\n * @author KC Erb\n * @date Mar 2020\n * \n * All shared functions among the modals.\n */\nconst { fetchRelative } = __webpack_require__(/*! ./fetch-relative */ \"./src/dcp-client/client-modal/fetch-relative.js\");\nconst { DCPError } = __webpack_require__(/*! dcp/common/dcp-error */ \"./src/common/dcp-error.js\");\nconst DCP_ENV = __webpack_require__(/*! dcp/common/dcp-env */ \"./src/common/dcp-env.js\");\nexports.OnCloseErrorCode = 'DCP_CM:CANCELX';\n\nif (DCP_ENV.isBrowserPlatform) {\n // Provide as export for the convenience of `utils.MicroModal` instead of a separate require.\n exports.MicroModal = __webpack_require__(/*! micromodal */ \"./node_modules/micromodal/dist/micromodal.es.js\")[\"default\"];\n}\n\n/**\n * Return a unique string, formatted as a GET parameter, that changes often enough to\n * always force the browser to fetch the latest version of our resource.\n *\n * @note Currently always returns the Date-based poison due to webpack. \n */\nfunction cachePoison() {\n if (true)\n return '?ucp=c96b8086bdb343ed36ff35e133755d3f21613609'; /* installer token */\n return '?ucp=' + Date.now();\n}\n \n/* Detect load type - on webpack, load dynamic content relative to webpack bundle;\n * otherwise load relative to the current scheduler's configured portal.\n */\nexports.myScript = (typeof document !== 'undefined') && document.currentScript;\nexports.corsProxyHref = undefined;\nif (exports.myScript && exports.myScript === (__webpack_require__(/*! ./fetch-relative */ \"./src/dcp-client/client-modal/fetch-relative.js\").myScript)) {\n let url = new ((__webpack_require__(/*! dcp/common/dcp-url */ \"./src/common/dcp-url.js\").DcpURL))(exports.myScript.src);\n exports.corsProxyHref = url.resolve('../cors-proxy.html');\n}\n\n/**\n * Look for modal id and required ids on page based on config, if not found, provide from dcp-client.\n * The first id in the required array must be the id of the modal's form element.\n * @param {Object} modalConfig Modal configuration object\n * @param {string} modalConfig.id Id of parent modal element\n * @param {string[]} modalConfig.required Array of required ids in parent modal element\n * @param {string[]} [modalConfig.optional] Array of optional ids in parent modal element\n * @param {string} modalConfig.path Relative path to modal html in dcp-client\n * @returns {DOMElement[]} Array of modal elements on page [config.id, ...config.required]\n */\nexports.initModal = async function (modalConfig, onClose) {\n exports.corsProxyHref = exports.corsProxyHref || dcpConfig.portal.location.resolve('dcp-client/cors-proxy.html');\n\n // Call ensure modal on any eager-loaded modals.\n if (modalConfig.eagerLoad) {\n Promise.all(\n modalConfig.eagerLoad.map(config => ensureModal(config))\n )\n };\n\n const [elements, optionalElements] = await ensureModal(modalConfig);\n\n // Wire up form to prevent default, resolve on submission, reject+reset when closed (or call onClose when closed)\n const [modal, form] = elements;\n form.reset(); // ensure that form is fresh\n let formResolve, formReject;\n let formPromise = new Promise( function(res, rej) {\n formResolve = res;\n formReject = rej;\n });\n form.onsubmit = function (submitEvent) {\n submitEvent.preventDefault();\n modal.setAttribute(\"data-state\", \"submitted\");\n formResolve(submitEvent);\n }\n\n exports.MicroModal.show(modalConfig.id, { \n disableFocus: true, \n onClose: onClose || getDefaultOnClose(formReject)\n });\n return [elements, formPromise, optionalElements];\n};\n\n// Ensure all required modal elements are on page according to modalConfig\nasync function ensureModal(modalConfig) {\n let allRequiredIds = [modalConfig.id, ...modalConfig.required];\n let missing = allRequiredIds.filter( id => !document.getElementById(id) );\n if (missing.length > 0) {\n if (missing.length !== allRequiredIds.length)\n console.warn(`Some of the ids needed to replace the default DCP-modal were found, but not all. So the default DCP-Modal will be used. Missing ids are: [${missing}].`);\n let contents = await fetchRelative(exports.corsProxyHref, modalConfig.path + cachePoison());\n const container = document.createElement('div');\n container.innerHTML = contents;\n document.body.appendChild(container);\n }\n\n const elements = allRequiredIds.map(id => document.getElementById(id));\n const optionalElements = (modalConfig.optional || []).map(id => document.getElementById(id));\n return [elements, optionalElements];\n};\n\n// This onClose is called by MicroModal and thus has the modal passed to it.\nfunction getDefaultOnClose (formReject) {\n return (modal) => {\n modal.offsetLeft; // forces style recalc\n const origState = modal.dataset.state;\n // reset form including data-state\n modal.setAttribute(\"data-state\", \"new\");\n // reject if closed without submitting form.\n if (origState !== \"submitted\") {\n const err = new DCPError(\"Modal was closed but modal's form was not submitted.\", exports.OnCloseErrorCode);\n formReject(err);\n }\n }\n}\n\n\n//# sourceURL=webpack://dcp/./src/dcp-client/client-modal/utils.js?");
4151
4151
 
4152
4152
  /***/ }),
4153
4153
 
@@ -4158,7 +4158,7 @@ eval("/**\n * @file client-modal/utils.js\n * @author KC Erb\n * @date Mar 2020\
4158
4158
  /***/ ((__unused_webpack_module, exports, __webpack_require__) => {
4159
4159
 
4160
4160
  "use strict";
4161
- eval("/**\n * @file Client facing module that implements Compute Groups API\n * @module dcp/compute-groups\n * @access public\n * @author Kayra E-A <kayra@kingsds.network>\n * Wes Garland <wes@kingsds.network>\n * Paul <paul@kingsds.network>\n * @date Sept 2020\n * February 2022\n * May 2022\n */\n\n\nconst protocolV4 = __webpack_require__(/*! dcp/protocol-v4 */ \"./src/protocol-v4/index.js\");\nconst wallet = __webpack_require__(/*! dcp/dcp-client/wallet */ \"./src/dcp-client/wallet/index.js\");\nconst hash = __webpack_require__(/*! ../../common/hash */ \"./src/common/hash.js\");\nconst { DCPError } = __webpack_require__(/*! ../../common/dcp-error */ \"./src/common/dcp-error.js\");\nconst debugging = (__webpack_require__(/*! dcp/debugging */ \"./src/debugging.js\").scope)('scheduler');\nconst { Address } = __webpack_require__(/*! dcp/dcp-client/wallet */ \"./src/dcp-client/wallet/index.js\");\nconst constants = __webpack_require__(/*! dcp/common/scheduler-constants */ \"./src/common/scheduler-constants.js\");\nconst { clientError, reconstructServiceError } = __webpack_require__(/*! dcp/utils */ \"./src/utils/index.js\");\n\n/** @typedef {import('dcp/utils').apiServiceType} apiServiceType */\n/** @typedef {import('dcp/utils').apiClientType} apiClientType */\n/** @typedef {string} opaqueId */\n\n/**\n * @typedef {object} cgAccessType\n * @property {opaqueId} [id]\n * @property {string} [joinKey]\n */\n\n/**\n * @typedef {object} cgClientJoinType\n * @property {opaqueId} [id]\n * @property {Address} [joinAddress]\n * @property {string} [joinKey]\n * @property {string} [joinSecret]\n * @property {string} [joinHash]\n */\n\n/**\n * @typedef {object} cgServiceJoinType\n * @property {opaqueId} [id]\n * @property {Address} [joinAddress]\n * @property {string} [joinKey]\n * @property {string} [joinHashHash]\n */\n\n/**\n * Establishes the client connection to the computeGroups microservice if it does not exist already from the default config.\n * \n * @returns {protocolV4.Connection}\n * @access public\n * @example\n * const result = await exports.serviceConnection.send('createGroup', {\n name: name,\n description: description,\n });\n */\n\nexports.serviceConnection = null;\n\n//\n// Reference counting pattern:\n// For every time addRef is called,\n// closeServiceConnection must eventually be called.\n// Reference counting allows multiple execs in a Promise.all .\n//\nvar refCount = 0;\nexports.addRef = function addRef() {\n refCount++;\n}\n\nconst openAndConnectServiceConn = async function openAndConnectServiceConn()\n{\n exports.serviceConnection = new protocolV4.Connection(dcpConfig.scheduler.services.computeGroups);\n exports.serviceConnection.on('close', openAndConnectServiceConn);\n await exports.serviceConnection.connect();\n refCount = 0; // Help with sanity.\n}\n\n/**\n * Resets the client connection to the computeGroups microservice.\n */\nexports.closeServiceConnection = async function closeServiceConnection() {\n if (refCount > 0) refCount--;\n if (exports.serviceConnection && refCount < 1)\n {\n exports.serviceConnection.off('close', openAndConnectServiceConn);\n exports.serviceConnection.close(null, true);\n refCount = 0; // Help with sanity.\n exports.serviceConnection = null;\n }\n};\n\n/**\n * (Used in jobs/index.js)\n * KeepAlive for the service connection to compute groups.\n */\nexports.keepAlive = async function keepAlive() {\n if (!exports.serviceConnection)\n await openAndConnectServiceConn();\n\n exports.serviceConnection.keepalive().catch(err => console.error('Warning: keepalive failed for compute groups service', err));\n}\n\n/**\n * Checks whether descriptor corresponds to the public compute group from the scheduler constants.\n */\nexports.isPublicComputeGroup = function isPublicComputeGroup(descriptor) {\n return descriptor.id === constants.computeGroups.public.id\n && descriptor.opaqueId === constants.computeGroups.public.opaqueId;\n};\n\n/**\n * Returns a compute group identification snippet for diagnostic messages,\n * @param {object} descriptor - Must have one of the properties joinKey, id (id:=opaqueId). Specifically\n * descriptor = { joinKey: 'dcpDemo' } or descriptor = { id: 'bYcYGQ3NOpFnP4FKs6IBQd' },\n * where the corresponding row in table computeGroups have attributes\n * joinKey:='dcpDemo' or opaqueId:='bYcYGQ3NOpFnP4FKs6IBQd' .\n * @returns {string}\n */\nfunction cgId(descriptor) {\n return (descriptor.joinKey) ? `joinKey ${descriptor.joinKey}` : `id ${descriptor.id}`;\n}\n\n/**\n * Verify sufficient information in descriptor to access a compute group.\n * Emit diagnostics about unnecessary information.\n * @param {cgAccessType} descriptor \n * @param {string} methodName \n */\nfunction validateCGDescriptor(descriptor, methodName) {\n for (const prop in descriptor) {\n if ([ 'id', 'joinKey' ].includes(prop)) continue;\n if ([ 'joinAddress', 'joinHash', 'joinSecret' ].includes(prop))\n console.warn(`It is not necessary to specify '${prop}' in the descriptor ${JSON.stringify(descriptor)} when calling ${methodName}`);\n else\n console.error(`Do not specify '${prop}' in the descriptor ${JSON.stringify(descriptor)} when calling ${methodName}`);\n }\n}\n\n/**\n * Verify sufficient information in descriptor to authorize a compute group.\n * Emit diagnostics about unnecessary information.\n * @param {cgClientJoinType} joinDescriptor \n * @param {string} methodName \n */\nfunction validateCGJoinDescriptor(joinDescriptor, methodName) {\n for (const prop in joinDescriptor) {\n if ([ 'id', 'joinKey', 'joinSecret', 'joinHash', 'joinAddress' ].includes(prop)) continue;\n console.error(`Do not specify '${prop}' in the descriptor ${JSON.stringify(joinDescriptor)} when calling ${methodName}`);\n }\n}\n\n/**\n * Build message to go across the wire.\n * Verify sufficient information in descriptor to access a compute group.\n * Emit diagnostics about unnecessary information.\n * @param {cgAccessType} descriptor\n * @param {string} methodName\n * @returns {cgAccessType}\n */\nfunction buildCGMessage(descriptor, methodName)\n{\n if (exports.isPublicComputeGroup(descriptor)) return descriptor;\n\n const message = {};\n // Construct message.joinKey xor message.id .\n if (descriptor.joinKey) message.joinKey = descriptor.joinKey;\n else if (descriptor.id) message.id = descriptor.id; // id:=opaqueId\n\n debugging('computeGroups') && console.debug(`${methodName}:buildCGMessage: descriptor`, descriptor, 'message', message);\n\n validateCGDescriptor(descriptor, methodName);\n\n return message;\n}\n\n/**\n * Build message so that joinHash, joinSecret, opaqueId do not go across the wire.\n * Verify sufficient information in descriptor to authorize a compute group.\n * Emit diagnostics about unnecessary information.\n * @param {cgClientJoinType} descriptor\n * @param {string} methodName\n * @returns {cgServiceJoinType}\n */\nfunction buildCGJoinMessage(descriptor, methodName)\n{\n if (exports.isPublicComputeGroup(descriptor)) return descriptor;\n\n const message = {};\n // Construct message.joinKey xor message.id .\n if (descriptor.joinKey) message.joinKey = descriptor.joinKey;\n else if (descriptor.id) message.id = descriptor.id; // id:=opaqueId\n // Construct message.joinAddress .\n if (descriptor.joinAddress) message.joinAddress = descriptor.joinAddress;\n\n debugging('computeGroups') && console.debug(`${methodName}:buildCGJoinMessage: descriptor`, descriptor, 'message', message);\n\n validateCGJoinDescriptor(descriptor, methodName);\n\n // Construct message.joinHashHash .\n if (descriptor.joinSecret) message.joinHashHash = hash.calculate(hash.eh1, exports.calculateJoinHash(descriptor), exports.serviceConnection.dcpsid);\n if (descriptor.joinHash) message.joinHashHash = hash.calculate(hash.eh1, descriptor.joinHash, exports.serviceConnection.dcpsid);\n\n return message;\n}\n\nfunction hasSufficientJoinInfo(joinDescriptor) {\n // Verify joinDescriptor has sufficient information to authorize a compute group (not guarenteed).\n return (joinDescriptor.joinKey && (joinDescriptor.joinSecret || joinDescriptor.joinHash))\n || (joinDescriptor.id && joinDescriptor.joinAddress)\n || exports.isPublicComputeGroup(joinDescriptor);\n}\n\nconst newCGPrototype = { type: 'object',\n parameters: {\n // name: { type: 'string', default: undefined }, /* name of group (length <= 255) */\n // description: { type: 'string', default: undefined }, /* description of group (length <= 255) */\n // id: { type: 'string', default: undefined }, /* opaqueId, the unique identifier of the compute group; nanoid (length === 22) */\n // joinKey: { type: 'string', default: undefined }, /* basically the login (length <= 255) */\n // joinSecret: { type: 'string', default: undefined }, /* basically the password (length <= 255) */\n // joinHash: { type: 'string', default: undefined }, /* basically the password, the joinSecret seeded & hashed */\n // joinAddress: { type: Address, default: undefined }, /* signature gives alternative to login/password */\n\n commissionRate: { type: 'BigNumber', default: undefined }, /* commission, see DCP-1889 */\n deployFee: { type: 'BigNumber', default: undefined }, /* number of DCC to take for every deployment */\n deployAccess: { type: 'string', default: undefined }, /* can be \"owner\"|\"join\" (dcp-1910) */\n addJobFee: { type: 'BigNumber', default: undefined }, /* fee required each time a job joins a compute group */\n maxTotalPayment: { type: 'BigNumber', default: undefined }, /* limit on maximum job payment, NULL => Infinity */\n\n /* Administrative limits on group. NULL => Infinity: Should all be integers or undefined. */\n maxConcurrentJobs: { type: 'number', default: undefined },\n maxConcurrentWorkers: { type: 'number', default: undefined },\n maxConcurrentSandboxes: { type: 'number', default: undefined },\n maxConcurrentCPUs: { type: 'number', default: undefined },\n maxConcurrentGPUs: { type: 'number', default: undefined },\n maxConcurrentEscrow: { type: 'BigNumber', default: undefined },\n },\n};\n\n/**\n * Async function that creates a new Compute Group.\n *\n * The joinDescriptor is of the form { joinKey, joinSecret }, { joinKey, joinHash } or { id, joinAddress }.\n * where id will correspond to the attribute opaqueId in the new row in the computeGroups table.\n *\n * This function can only be called with ADMIN permission.\n * Properties not appearing in newCGPrototype.parameters are not allowed in otherProperties.\n *\n * @param {cgClientJoinType} joinDescriptor - Must have properly defined { joinKey, joinSecret }, { joinKey, joinHash }\n * or { id, joinAddress }, where id will correspond to the attribute opaqueId\n * in the new row in the computeGroups table.\n * @param {string} [name] - The name of the compute group.\n * @param {string} [description] - The description of the compute group.\n * @param {object} [otherProperties] - The 5 attributes of table computeGroup related to commissions and fees.\n * commissionRate: notNull(zFinNum),// commission, see DCP-1889\n * deployFee: notNull(zFinNum),// number of DCC to take for every deployment\n * deployAccess: string, // can be \"owner\"|\"join\" (dcp-1910)\n * addJobFee: notNull(zFinNum),// fee required each time a job joins a compute group\n * maxTotalPayment: finNum, // limit on maximum job payment, NULL => Infinity\n * And the 6 attributes of table computeGroup related to limits.\n * maxConcurrentJobs: integer,\n * maxConcurrentWorkers: integer,\n * maxConcurrentSandboxes: integer,\n * maxConcurrentCPUs: integer,\n * maxConcurrentGPUs: integer,\n * maxConcurrentEscrow: finNum,\n * @returns {Promise<apiClientType>} - { success, payload: computeGroup.id }\n * @access public\n * @example\n * await computeGroup.createGroup({ joinKey: 'dcpDemo', joinSecret: 'theSecret' }, 'myCGName', 'myCGDescription', { deployFee: 0.00015 });\n * await computeGroup.createGroup({ joinKey: 'dcpDemo2', joinHash: 'eh1-e063976b20a92da97a27b9873465c6f2c9d6e4370befa86c8c1dd312c78befc2' });\n * await computeGroup.createGroup({ id: 'bYcYGQ3NOpFnP4FKs6IBQd', joinAddress: 'c15053fc30d4bdf91e2e0bba79578f8b649e55ea' });\n * where the corresponding rows in table computeGroups have attributes\n * row1: joinKey:='dcpDemo',\n * row2: joinKey:='dcpDemo2', joinHash:='eh1-e063976b20a92da97a27b9873465c6f2c9d6e4370befa86c8c1dd312c78befc2'\n * row3: opaqueId:='bYcYGQ3NOpFnP4FKs6IBQd', joinAddress:='c15053fc30d4bdf91e2e0bba79578f8b649e55ea' .\n */\nexports.createGroup = async function createGroup(joinDescriptor, name, description, otherProperties)\n{\n if (!exports.serviceConnection)\n await openAndConnectServiceConn();\n\n validateCGJoinDescriptor(joinDescriptor, 'createGroup');\n\n // Verify joinDescriptor has sufficient information to authorize a compute group (not guarenteed).\n if (!hasSufficientJoinInfo(joinDescriptor))\n return clientError(`createGroup: Insufficient information to authorize compute group: ${JSON.stringify(joinDescriptor)}.`);\n\n // Validate the properties in otherProperties.\n for (const methodName in otherProperties) {\n if (!Object.keys(newCGPrototype.parameters).includes(methodName))\n return clientError(`createGroup: Property ${methodName} cannot be speicfied in otherProperties. Can only specify ${JSON.stringify(Object.keys(newCGPrototype.parameters))}`);\n }\n\n // Translate joinSecret to joinHash.\n if (joinDescriptor.joinSecret) {\n joinDescriptor.joinHash = exports.calculateJoinHash(joinDescriptor);\n delete joinDescriptor.joinSecret;\n }\n\n if (otherProperties && (otherProperties.commissionRate < 0 || otherProperties.commissionRate >= 1))\n return clientError(`client-createGroup: commissionRate ${otherProperties.commissionRate} must be between 0 and 1 (0 <= commissionRate < 1).`);\n\n debugging('computeGroups') && console.debug('client-createGroup: input:', joinDescriptor, name, description, otherProperties);\n\n const { success, payload } = await exports.serviceConnection.send('createGroup', { joinDescriptor, name, description, otherProperties });\n\n if (!success) return clientError(`Cannot create new compute group, with ${cgId(joinDescriptor)}.`);\n if (payload && !payload.success) return reconstructServiceError(payload);\n\n debugging('computeGroups') && console.debug('client-createGroup: payload', payload);\n\n return payload;\n};\n\nconst changeCGPrototype = { type: 'object',\n parameters: {\n name: { type: 'string', default: undefined }, /* name of group (length <= 255) */\n description: { type: 'string', default: undefined }, /* description of group (length <= 255) */\n joinHash: { type: 'string', default: undefined }, /* basically the password, seeded & hashed (length <= 255) */\n joinAddress: { type: Address, default: undefined }, /* signature gives alternative to login/password */\n\n commissionRate: { type: 'BigNumber', default: undefined }, /* commission, see DCP-1889 */\n deployFee: { type: 'BigNumber', default: undefined }, /* number of DCC to take for every deployment */\n deployAccess: { type: 'string', default: undefined }, /* can be \"owner\"|\"join\" (dcp-1910) */\n addJobFee: { type: 'BigNumber', default: undefined }, /* fee required each time a job joins a compute group */\n maxTotalPayment: { type: 'BigNumber', default: undefined }, /* limit on maximum job payment, NULL => Infinity */\n\n /* Administrative limits on group. NULL => Infinity: Should all be integers or undefined. */\n maxConcurrentJobs: { type: 'number', default: undefined },\n maxConcurrentWorkers: { type: 'number', default: undefined },\n maxConcurrentSandboxes: { type: 'number', default: undefined },\n maxConcurrentCPUs: { type: 'number', default: undefined },\n maxConcurrentGPUs: { type: 'number', default: undefined },\n maxConcurrentEscrow: { type: 'BigNumber', default: undefined },\n },\n};\n\n/**\n * Async function that changes a new Compute Group.\n * \n * The parameter newDescriptor contains the new property values,\n * and the properties that are allowed to be changed appear in changeCGPrototype.parameters.\n * \n * The descriptor must have joinKey or id, where id:=opaqueId.\n * Must own the compute group or be ADMIN to use changeGroup.\n * \n * @param {cgAccessType} descriptor - Must have joinkey or id, where id:=opaqueId.\n * @param {object} newDescriptor - Properties not appearing in changeCGPrototype.parameters are not allowed.\n * @returns {Promise<apiClientType>}\n * await computeGroup.changeGroup({ joinKey: 'dcpDemo' }, { joinSecret: 'myNewPasswrd' });\n * await computeGroup.changeGroup({ id: 'bYcYGQ3NOpFnP4FKs6IBQd' }, { name: 'myNewName', deployFee: 0.0001 });\n * where the corresponding rows in table computeGroups have attributes\n * row1: joinKey:='dcpDemo',\n * row2: opaqueId:='bYcYGQ3NOpFnP4FKs6IBQd'\n */\nexports.changeGroup = async function changeGroup(descriptor, newDescriptor)\n{\n if (!exports.serviceConnection)\n await openAndConnectServiceConn();\n\n // Verify descriptor has sufficient information to access a compute group (not guarenteed).\n if (!descriptor.joinKey && !descriptor.id)\n return clientError(`changeGroup: Insufficient information to identify compute group: ${JSON.stringify(descriptor)}.`);\n\n // Validate the properties in newDescriptor.\n for (const methodName in newDescriptor) {\n if (!Object.keys(changeCGPrototype.parameters).includes(methodName))\n return clientError(`changeGroup: Not allowed to change property ${methodName}. Can only change ${JSON.stringify(Object.keys(changeCGPrototype.parameters))}`);\n }\n\n // Translate joinSecret to joinHash.\n if (newDescriptor.joinSecret) {\n newDescriptor.joinHash = exports.calculateJoinHash(newDescriptor);\n delete newDescriptor.joinSecret;\n }\n\n descriptor = buildCGMessage(descriptor, 'changeGroup');\n debugging('computeGroups') && console.debug('change compute group client:', descriptor, newDescriptor);\n const { success, payload } = await exports.serviceConnection.send('changeGroup', { descriptor, newDescriptor });\n\n if (!success) throw new DCPError(`Cannot change compute group with ${cgId(descriptor)}:`, payload);\n if (payload && !payload.success) return reconstructServiceError(payload);\n\n return payload;\n};\n\n/**\n * Async function that deletes a compute group.\n * \n * The descriptor must have joinkey or id, where id:=opaqueId.\n * \n * Must either own the group or be ADMIN.\n * If not ADMIN, then the following config must be true:\n * dcpConfig.scheduler.services.computeGroups.usersCanDeleteGroups\n * \n * @param {cgAccessType} descriptor - Must contain joinKey or id (id:=opaqueId) \n * @returns {Promise<apiClientType>}\n * await computeGroup.deleteGroup({ joinKey: 'dcpDemo' });\n * await computeGroup.deleteGroup({ id: 'bYcYGQ3NOpFnP4FKs6IBQd' });\n * where the corresponding rows in table computeGroups have attributes\n * row1: joinKey:='dcpDemo',\n * row2: opaqueId:='bYcYGQ3NOpFnP4FKs6IBQd'\n */\nexports.deleteGroup = async function deleteGroup(descriptor)\n{\n if (!exports.serviceConnection)\n await openAndConnectServiceConn();\n\n // Verify descriptor has sufficient information to access a compute group (not guarenteed).\n if (!descriptor.joinKey && !descriptor.id)\n return clientError(`deleteGroup: Insufficient information to identify compute group: ${JSON.stringify(descriptor)}.`);\n\n descriptor = buildCGMessage(descriptor, 'deleteGroup');\n debugging('computeGroups') && console.debug('delete compute group client:', descriptor);\n const { success, payload } = await exports.serviceConnection.send('deleteGroup', { descriptor });\n\n if (!success) throw new DCPError(`Cannot delete compute group with ${cgId(descriptor)}:`, payload);\n if (payload && !payload.success) return reconstructServiceError(payload);\n\n return payload;\n};\n\n/**\n * Async function that adds a job to a specified compute group. \n * \n * Must be the owner of the job.\n *\n * Useful feedback is provided from this function, as it\n * will make its way back to the application developer, *after* they have made the\n * deployment fee micropayment.\n *\n * On the client side the access model in place is that if you know the (user/password)\n * joinKey+joinSecret/joinKey+joinHash/joinKey+joinHashHash/id+joinAddress,\n * you can add the job to the compute groups, where id:=opaqueId from table computeGroups.\n * On the service side the corresponding access model is\n * joinKey+joinHashHash/id+joinAddress .\n * Access is also allowed if the compute group owner is the connection peerAddress.\n * \n * Unless the compute group owner is the connection peerAddress, element of the descriptor array must contain\n * { joinKey, joinSecret }, { joinKey, joinHash } or { id, joinAddress }\n * where the value of id in { id, joinAddress } is the opaqueId attribute of the row in table computeGroups.\n *\n * @param {Address} job The address of the Job that will be added to the Compute Group.\n * @param {Array} computeGroups Array of descriptor objects for the compute groups. This descriptor\n * needs to contain enough information to authorize access to the\n * compute group. Properties may include:\n * - id (id:=opaqueId)\n * - joinKey\n * - joinSecret\n * - joinHash\n * - joinAddress\n * \n * Additional, either the joinKey or id MUST be specified so\n * that we can identify the compute group in question.\n *\n * All compute groups can have jobs submitted to them, provided either the joinKey\n * or the id are specified, and the message contains valid join permission and the \n * job is owned by the caller of addJobToGroups.\n *\n * FUTURE - after DCP-1910\n * keystore A keystore used to grant access to job deployment within this compute group.\n * This can be either the ownerKeystore or the joinAddress keystore when the\n * compute group is in deployAccessType='join' mode.\n * @returns {Promise<apiClientType>}\n * @access public\n * @example\n * await computeGroup.addJobToGroups('P+Y4IApeFQLrYS2W7MkVg7', \n * [ { joinKey: 'dcpDemo', joinSecret: 'theSecret' },\n * { joinKey: 'dcpDemo2', joinHash: 'eh1-e063976b20a92da97a27b9873465c6f2c9d6e4370befa86c8c1dd312c78befc2' }, \n * { id: 'bYcYGQ3NOpFnP4FKs6IBQd', joinAddress: 'c15053fc30d4bdf91e2e0bba79578f8b649e55ea' } ]);\n * where the corresponding rows in table computeGroups have attributes\n * row1: joinKey:='dcpDemo',\n * row2: joinKey:='dcpDemo2', joinHash:='eh1-e063976b20a92da97a27b9873465c6f2c9d6e4370befa86c8c1dd312c78befc2'\n * row3: opaqueId:='bYcYGQ3NOpFnP4FKs6IBQd', joinAddress:='c15053fc30d4bdf91e2e0bba79578f8b649e55ea' .\n */\nexports.addJobToGroups = async function addJobToGroups(job, computeGroups)\n{\n if (!exports.serviceConnection)\n await openAndConnectServiceConn();\n\n const cgArray = [];\n for (const joinDescriptor of computeGroups)\n {\n // Verify joinDescriptor has sufficient information to authorize a compute group (not guarenteed).\n if (!hasSufficientJoinInfo(joinDescriptor))\n console.error(`addJobToGroups: Insufficient information to authorize compute group: ${JSON.stringify(joinDescriptor)}.`);\n\n // Translate so that neither joinHash nor joinSecret goes across the wire.\n const message = buildCGJoinMessage(joinDescriptor, 'addJobToGroups');\n debugging('computeGroups') && console.debug(`addJobToGroups client: job ${job}, message`, message);\n\n cgArray.push(message);\n }\n\n const { success, payload } = await exports.serviceConnection.send('addJobToGroups', { job, cgArray });\n\n debugging('computeGroups') && console.debug('addJobToGroups payload', payload);\n\n if (!success) throw new DCPError(`Cannot add job ${job} to compute groups.`, payload);\n if (payload && !payload.success) return reconstructServiceError(payload);\n\n // If the server reported success but did not return a list of CGs (eg. v.4.2.5 server),\n // assume (and inform the client) we added all groups successfully\n return payload || computeGroups;\n};\n\n/**\n * Async function that lists all the Jobs in a Compute Group.\n * \n * The descriptor must have one of the properties joinkey, id (id:=opaqueId).\n * Must be the owner of the Compute Group to list jobs from it.\n * The job does not need to be owned.\n * \n * The descriptor is of the form { joinKey: 'dcpDemo' } or { id: 'bYcYGQ3NOpFnP4FKs6IBQd' }.\n * where 'bYcYGQ3NOpFnP4FKs6IBQd' is the opaqueId of the Compute Group.\n *\n * @param {cgAccessType} descriptor - Must have one of the properties joinKey, id (id:=opaqueId). Specifically\n * descriptor = { joinKey: 'dcpDemo' } or descriptor = { id: opaqueId }\n * @returns {Promise<apiClientType>}\n * @access public\n * @example\n * let listOfJobs1 = await computeGroup.listJobs({ joinKey: 'dcpDemo' });\n * let listOfJobs2 = await computeGroup.listJobs({ id: 'bYcYGQ3NOpFnP4FKs6IBQd' });\n * where the corresponding rows in table computeGroups have attributes\n * row1: joinKey:='dcpDemo'\n * row2: opaqueId:='bYcYGQ3NOpFnP4FKs6IBQd'\n */\nexports.listJobs = async function listJobs(descriptor)\n{\n if (!exports.serviceConnection)\n await openAndConnectServiceConn();\n\n // Verify descriptor has sufficient information to access a compute group (not guarenteed).\n if (!descriptor.joinKey && !descriptor.id)\n return clientError(`listJobs: Insufficient information to identify compute group: ${JSON.stringify(descriptor)}.`);\n\n descriptor = buildCGMessage(descriptor, 'listJobs');\n debugging('computeGroups') && console.debug('listJob client: descriptor', descriptor);\n const { success, payload } = await exports.serviceConnection.send('listJobs', { descriptor });\n\n if (!success) throw new DCPError(`Cannot list jobs for compute group with ${cgId(descriptor)}`, payload);\n if (payload && !payload.success) return reconstructServiceError(payload);\n\n return payload;\n};\n\n/**\n * Async function that removes a job from a Compute Group.\n * \n * The descriptor must have one of the properties joinkey, id (id:=opaqueId).\n * Must be the owner of the Compute Group to remove a job from it.\n * The job does not need to be owned.\n * \n * The descriptor is of the form { joinKey: 'dcpDemo' } or { id: 'bYcYGQ3NOpFnP4FKs6IBQd' }.\n * where 'bYcYGQ3NOpFnP4FKs6IBQd' is the opaqueId of the Compute Group.\n *\n * @param {Address} job - The address of the Job that will be added to the Compute Group.\n * @param {cgAccessType} descriptor - { joinKey: 'dcpDemo' } or { id: 'bYcYGQ3NOpFnP4FKs6IBQd' }\n * @returns {Promise<apiClientType>}\n * @access public\n * @example\n * await computeGroup.removeJob( 'P+Y4IApeFQLrYS2W7MkVg7', { joinKey: 'dcpDemo' });\n * await computeGroup.removeJob( 'P+Y4IApeFQLrYS2W7MkVg7', { id: 'bYcYGQ3NOpFnP4FKs6IBQd' });\n * where the corresponding rows in table computeGroups have attributes\n * row1: joinKey:='dcpDemo'\n * row2: opaqueId:='bYcYGQ3NOpFnP4FKs6IBQd'\n */\nexports.removeJob = async function removeJob(job, descriptor)\n{\n if (!exports.serviceConnection)\n await openAndConnectServiceConn();\n\n // Verify descriptor has sufficient information to access a compute group (not guarenteed).\n if (!descriptor.joinKey && !descriptor.id)\n return clientError(`removeJob: Insufficient information to identify compute group: ${JSON.stringify(descriptor)}.`);\n\n descriptor = buildCGMessage(descriptor, 'removeJob');\n debugging('computeGroups') && console.debug(`removeJob client: job ${job}, descriptor`, descriptor);\n const { success, payload } = await exports.serviceConnection.send('removeJob', { job, descriptor });\n\n if (!success) throw new DCPError(`Cannot remove job ${job} from compute group with ${cgId(descriptor)}`, payload);\n if (payload && !payload.success) return reconstructServiceError(payload);\n\n return payload;\n};\n\n/**\n * Async function that removes all jobs from a Compute Group.\n * \n * The descriptor must have one of the properties joinkey, id (id:=opaqueId).\n * Must be the owner of the Compute Group to remove jobs from it.\n * \n * The descriptor is of the form { joinKey: 'dcpDemo' } or { id: 'bYcYGQ3NOpFnP4FKs6IBQd' }.\n * where 'bYcYGQ3NOpFnP4FKs6IBQd' is the opaqueId of the Compute Group.\n *\n * @param {cgAccessType} descriptor - { joinKey: 'dcpDemo' } or { id: 'bYcYGQ3NOpFnP4FKs6IBQd' }\n * @returns {Promise<apiClientType>}\n * @access public\n * @example\n * await computeGroup.removeAllJobs({ joinKey: 'dcpDemo' });\n * await computeGroup.removeAllJobs({ id: 'bYcYGQ3NOpFnP4FKs6IBQd' });\n * where the corresponding rows in table computeGroups have attributes\n * row1: joinKey:='dcpDemo'\n * row2: opaqueId:='bYcYGQ3NOpFnP4FKs6IBQd'\n */\nexports.removeAllJobs = async function removeAllJobs(descriptor)\n{\n if (!exports.serviceConnection)\n await openAndConnectServiceConn();\n\n // Verify descriptor has sufficient information to access a compute group (not guarenteed).\n if (!descriptor.joinKey && !descriptor.id)\n return clientError(`removeAllJobs: Insufficient information to identify compute group: ${JSON.stringify(descriptor)}.`);\n\n descriptor = buildCGMessage(descriptor, 'removeAllJobs');\n debugging('computeGroups') && console.debug('removeAllJobs client: descriptor', descriptor);\n const { success, payload } = await exports.serviceConnection.send('removeAllJobs', { descriptor });\n\n if (!success) throw new DCPError(`Cannot remove all jobs from compute group with ${cgId(descriptor)}:`, payload);\n if (payload && !payload.success) return reconstructServiceError(payload);\n\n return payload;\n};\n\n/**\n * Async function that cancels the specified owned job.\n * \n * Must be the owner of the job.\n *\n * On the client side the access model in place is that if you know the (user/password)\n * joinKey+joinSecret/joinKey+joinHash/joinKey+joinHashHash/id+joinAddress,\n * you can cancel the job in the compute group, where id:=opaqueId from table computeGroups.\n * On the service side the corresponding access model is\n * joinKey+joinHashHash/id+joinAddress .\n * Access is also allowed if the compute group owner is the connection peerAddress.\n * \n * Unless the compute group owner is the connection peerAddress, the descriptor must contain\n * { joinKey, joinHashHash } or { id, joinAddress }\n * where the value of id in { id, joinAddress } is the opaqueId attribute of the row in table computeGroups.\n * \n * @param {Address} job - The address of the Job that will be added to the Compute Group.\n * @param {cgClientJoinType} joinDescriptor - Array of descriptor objects for the compute groups. This descriptor\n * needs to contain enough information to authorize access to the\n * compute group. Properties may include:\n * - id (id:=opaqueId)\n * - joinKey\n * - joinSecret\n * - joinHash\n * - joinAddress\n *\n * Additional, either the joinKey or id MUST be specified so\n * that we can identify the compute group in question.\n * @returns {Promise<apiClientType>}\n * @access public\n * @example\n * await computeGroup.cancelJob( 'P+Y4IApeFQLrYS2W7MkVg7', { joinKey: 'dcpDemo', joinHash: 'eh1-e063976b20a92da97a27b9873465c6f2c9d6e4370befa86c8c1dd312c78befc2' } );\n * await computeGroup.cancelJob( 'P+Y4IApeFQLrYS2W7MkVg7', { id: 'bYcYGQ3NOpFnP4FKs6IBQd', joinAddress: 'c15053fc30d4bdf91e2e0bba79578f8b649e55ea' });\n * where the corresponding rows in table computeGroups have attributes\n * row1: joinKey:='dcpDemo', joinHash:='eh1-e063976b20a92da97a27b9873465c6f2c9d6e4370befa86c8c1dd312c78befc2'\n * row2: opaqueId:='bYcYGQ3NOpFnP4FKs6IBQd', joinAddress:='c15053fc30d4bdf91e2e0bba79578f8b649e55ea' .\n */\nexports.cancelJob = async function cancelJob(job, joinDescriptor)\n{\n if (!exports.serviceConnection)\n await openAndConnectServiceConn();\n\n // Verify joinDescriptor has sufficient information to authorize a compute group (not guarenteed).\n if (!hasSufficientJoinInfo(joinDescriptor))\n return clientError(`cancelJob: Insufficient information to authorize compute group: ${JSON.stringify(joinDescriptor)}.`);\n\n // Translate so that neither joinHash nor joinSecret goes across the wire.\n joinDescriptor = buildCGJoinMessage(joinDescriptor, 'cancelJob');\n debugging('computeGroups') && console.debug(`cancelJob client: job ${job}, descriptor`, joinDescriptor);\n const { success, payload } = await exports.serviceConnection.send('cancelJob', { job, joinDescriptor });\n\n if (!success) throw new DCPError(`Cannot cancel job ${job} for compute group with ${cgId(joinDescriptor)}:`, payload);\n if (payload && !payload.success) return reconstructServiceError(payload);\n\n return payload;\n};\n\n/**\n * Async function that cancels the owned job in the Compute Group.\n * \n * On the client side the access model in place is that if you know the (user/password)\n * joinKey+joinSecret/joinKey+joinHash/joinKey+joinHashHash/id+joinAddress,\n * you can cancel the jobs in the compute group, where id:=opaqueId from table computeGroups.\n * On the service side the corresponding access model is\n * joinKey+joinHashHash/id+joinAddress .\n * Access is also allowed if the compute group owner is the connection peerAddress.\n * \n * Unless the compute group owner is the connection peerAddress, the descriptor must contain\n * { joinKey, joinHashHash } or { id, joinAddress }\n * where the value of id in { id, joinAddress } is the opaqueId attribute of the row in table computeGroups.\n * \n * @param {cgClientJoinType} joinDescriptor - Array of descriptor objects for the compute groups. This descriptor\n * needs to contain enough information to authorize access to the\n * compute group. Properties may include:\n * - id (id:=opaqueId)\n * - joinKey\n * - joinSecret\n * - joinHash\n * - joinAddress\n * \n * Additional, either the joinKey or id MUST be specified so\n * that we can identify the compute group in question.\n * @returns {Promise<apiClientType>}\n * @access public\n * @example\n * await computeGroup.cancelAllJobs( { joinKey: 'dcpDemo', joinHash: 'eh1-e063976b20a92da97a27b9873465c6f2c9d6e4370befa86c8c1dd312c78befc2' } );\n * await computeGroup.cancelAllJobs( { id: 'bYcYGQ3NOpFnP4FKs6IBQd', joinAddress: 'c15053fc30d4bdf91e2e0bba79578f8b649e55ea' });\n * where the corresponding rows in table computeGroups have attributes\n * row1: joinKey:='dcpDemo', joinHash:='eh1-e063976b20a92da97a27b9873465c6f2c9d6e4370befa86c8c1dd312c78befc2'\n * row2: opaqueId:='bYcYGQ3NOpFnP4FKs6IBQd', joinAddress:='c15053fc30d4bdf91e2e0bba79578f8b649e55ea' .\n */\nexports.cancelAllJobs = async function cancelAllJobs(joinDescriptor)\n{\n if (!exports.serviceConnection)\n await openAndConnectServiceConn();\n\n // Verify joinDescriptor has sufficient information to authorize a compute group (not guarenteed).\n if (!hasSufficientJoinInfo(joinDescriptor))\n return clientError(`cancelAllJobs: Insufficient information to authorize compute group: ${JSON.stringify(joinDescriptor)}.`);\n\n // Translate so that neither joinHash nor joinSecret goes across the wire.\n joinDescriptor = buildCGJoinMessage(joinDescriptor, 'cancelAllJobs');\n debugging('computeGroups') && console.debug('cancelAllJobs client: descriptor', joinDescriptor);\n const { success, payload } = await exports.serviceConnection.send('cancelAllJobs', { joinDescriptor });\n\n if (!success) throw new DCPError(`Cannot cancel owned jobs for compute group with ${cgId(joinDescriptor)}:`, payload);\n if (payload && !payload.success) return reconstructServiceError(payload);\n\n return payload;\n};\n\n/**\n * Calculate a joinHash for a compute group. This is an eh1- hash of the cg salt and \n * joinSecret components of a compute group description.\n *\n * @param {object} details an object containing the cg salt, which is\n * the joinKey if the compute group uses one;\n * otherwise it is the joinAddress. This object\n * may also contain the joinSecret.\n * @param {string} [joinSecret] the join secret -- plain text -- that is\n * the \"password\" for the compute group. If not\n * specified, we use details.joinSecret.\n */\nexports.calculateJoinHash = function computeGroups$calculateJoinHash(details, joinSecret)\n{\n if (typeof joinSecret === 'undefined')\n joinSecret = details.joinSecret;\n\n return hash.calculate(hash.eh1, `${details.joinKey || details.joinAddress} ${joinSecret}`);\n}\n\n\n//# sourceURL=webpack://dcp/./src/dcp-client/compute-groups/index.js?");
4161
+ eval("/**\n * @file Client facing module that implements Compute Groups API\n * @module dcp/compute-groups\n * @access public\n * @author Kayra E-A <kayra@kingsds.network>\n * Wes Garland <wes@kingsds.network>\n * Paul <paul@kingsds.network>\n * @date Sept 2020\n * February 2022\n * May 2022\n */\n\n\nconst protocolV4 = __webpack_require__(/*! dcp/protocol-v4 */ \"./src/protocol-v4/index.js\");\nconst wallet = __webpack_require__(/*! dcp/dcp-client/wallet */ \"./src/dcp-client/wallet/index.js\");\nconst hash = __webpack_require__(/*! ../../common/hash */ \"./src/common/hash.js\");\nconst { DCPError } = __webpack_require__(/*! ../../common/dcp-error */ \"./src/common/dcp-error.js\");\nconst debugging = (__webpack_require__(/*! dcp/debugging */ \"./src/debugging.js\").scope)('scheduler');\nconst { Address } = __webpack_require__(/*! dcp/dcp-client/wallet */ \"./src/dcp-client/wallet/index.js\");\nconst constants = __webpack_require__(/*! dcp/common/scheduler-constants */ \"./src/common/scheduler-constants.js\");\nconst { clientError, reconstructServiceError } = __webpack_require__(/*! dcp/utils */ \"./src/utils/index.js\");\n\n/** @typedef {import('dcp/utils').apiServiceType} apiServiceType */\n/** @typedef {import('dcp/utils').apiClientType} apiClientType */\n/** @typedef {string} opaqueId */\n\n/**\n * @typedef {object} cgAccessType\n * @property {opaqueId} [id]\n * @property {string} [joinKey]\n */\n\n/**\n * @typedef {object} cgClientJoinType\n * @property {opaqueId} [id]\n * @property {Address} [joinAddress]\n * @property {string} [joinKey]\n * @property {string} [joinSecret]\n * @property {string} [joinHash]\n */\n\n/**\n * @typedef {object} cgServiceJoinType\n * @property {opaqueId} [id]\n * @property {Address} [joinAddress]\n * @property {string} [joinKey]\n * @property {string} [joinHashHash]\n */\n\n/**\n * Establishes the client connection to the computeGroups microservice if it does not exist already from the default config.\n * \n * @returns {protocolV4.Connection}\n * @access public\n * @example\n * const result = await exports.serviceConnection.send('createGroup', {\n name: name,\n description: description,\n });\n */\n\nexports.serviceConnection = null;\n\n//\n// Reference counting pattern:\n// For every time addRef is called,\n// closeServiceConnection must eventually be called.\n// Reference counting allows multiple execs in a Promise.all .\n//\nvar refCount = 0;\nexports.addRef = function addRef() {\n refCount++;\n}\n\nconst openAndConnectServiceConn = async function openAndConnectServiceConn()\n{\n exports.serviceConnection = new protocolV4.Connection(dcpConfig.scheduler.services.computeGroups);\n exports.serviceConnection.on('close', openAndConnectServiceConn);\n await exports.serviceConnection.connect();\n refCount = 0; // Help with sanity.\n}\n\n/**\n * Resets the client connection to the computeGroups microservice.\n */\nexports.closeServiceConnection = async function closeServiceConnection() {\n if (refCount > 0) refCount--;\n if (exports.serviceConnection && refCount < 1)\n {\n exports.serviceConnection.off('close', openAndConnectServiceConn);\n exports.serviceConnection.close(null, true);\n refCount = 0; // Help with sanity.\n exports.serviceConnection = null;\n }\n};\n\n/**\n * (Used in jobs/index.js)\n * KeepAlive for the service connection to compute groups.\n */\nexports.keepAlive = async function keepAlive() {\n if (!exports.serviceConnection)\n await openAndConnectServiceConn();\n\n exports.serviceConnection.keepalive().catch(err => console.error('Warning: keepalive failed for compute groups service', err));\n}\n\n/**\n * Checks whether descriptor corresponds to the public compute group from the scheduler constants.\n */\nexports.isPublicComputeGroup = function isPublicComputeGroup(descriptor) {\n return descriptor.id === constants.computeGroups.public.id\n && descriptor.opaqueId === constants.computeGroups.public.opaqueId;\n};\n\n/**\n * Returns a compute group identification snippet for diagnostic messages,\n * @param {object} descriptor - Must have one of the properties joinKey, id (id:=opaqueId). Specifically\n * descriptor = { joinKey: 'dcpDemo' } or descriptor = { id: 'bYcYGQ3NOpFnP4FKs6IBQd' },\n * where the corresponding row in table computeGroups have attributes\n * joinKey:='dcpDemo' or opaqueId:='bYcYGQ3NOpFnP4FKs6IBQd' .\n * @returns {string}\n */\nfunction cgId(descriptor) {\n return (descriptor.joinKey) ? `joinKey ${descriptor.joinKey}` : `id ${descriptor.id}`;\n}\n\n/**\n * Verify sufficient information in descriptor to access a compute group.\n * Emit diagnostics about unnecessary information.\n * @param {cgAccessType} descriptor \n * @param {string} methodName \n */\nfunction validateCGDescriptor(descriptor, methodName) {\n for (const prop in descriptor) {\n if ([ 'id', 'joinKey' ].includes(prop)) continue;\n if ([ 'joinAddress', 'joinHash', 'joinSecret' ].includes(prop))\n console.warn(`It is not necessary to specify '${prop}' in the descriptor ${JSON.stringify(descriptor)} when calling ${methodName}`);\n else\n console.error(`Do not specify '${prop}' in the descriptor ${JSON.stringify(descriptor)} when calling ${methodName}`);\n }\n}\n\n/**\n * Verify sufficient information in descriptor to authorize a compute group.\n * Emit diagnostics about unnecessary information.\n * @param {cgClientJoinType} joinDescriptor \n * @param {string} methodName \n */\nfunction validateCGJoinDescriptor(joinDescriptor, methodName) {\n for (const prop in joinDescriptor) {\n if ([ 'id', 'joinKey', 'joinSecret', 'joinHash', 'joinAddress' ].includes(prop)) continue;\n console.error(`Do not specify '${prop}' in the descriptor ${JSON.stringify(joinDescriptor)} when calling ${methodName}`);\n }\n}\n\n/**\n * Build message to go across the wire.\n * Verify sufficient information in descriptor to access a compute group.\n * Emit diagnostics about unnecessary information.\n * @param {cgAccessType} descriptor\n * @param {string} methodName\n * @returns {cgAccessType}\n */\nfunction buildCGMessage(descriptor, methodName)\n{\n if (exports.isPublicComputeGroup(descriptor)) return descriptor;\n\n const message = {};\n // Construct message.joinKey xor message.id .\n if (descriptor.joinKey) message.joinKey = descriptor.joinKey;\n else if (descriptor.id) message.id = descriptor.id; // id:=opaqueId\n\n debugging('computeGroups') && console.debug(`${methodName}:buildCGMessage: descriptor`, descriptor, 'message', message);\n\n validateCGDescriptor(descriptor, methodName);\n\n return message;\n}\n\n/**\n * Build message so that joinHash, joinSecret, opaqueId do not go across the wire.\n * Verify sufficient information in descriptor to authorize a compute group.\n * Emit diagnostics about unnecessary information.\n * @param {cgClientJoinType} descriptor\n * @param {string} methodName\n * @returns {cgServiceJoinType}\n */\nfunction buildCGJoinMessage(descriptor, methodName)\n{\n if (exports.isPublicComputeGroup(descriptor)) return descriptor;\n\n const message = {};\n // Construct message.joinKey xor message.id .\n if (descriptor.joinKey) message.joinKey = descriptor.joinKey;\n else if (descriptor.id) message.id = descriptor.id; // id:=opaqueId\n // Construct message.joinAddress .\n if (descriptor.joinAddress) message.joinAddress = descriptor.joinAddress;\n\n debugging('computeGroups') && console.debug(`${methodName}:buildCGJoinMessage: descriptor`, descriptor, 'message', message);\n\n validateCGJoinDescriptor(descriptor, methodName);\n\n // Construct message.joinHashHash .\n if (descriptor.joinSecret) message.joinHashHash = hash.calculate(hash.eh1, exports.calculateJoinHash(descriptor), exports.serviceConnection.dcpsid);\n if (descriptor.joinHash) message.joinHashHash = hash.calculate(hash.eh1, descriptor.joinHash, exports.serviceConnection.dcpsid);\n\n return message;\n}\n\nfunction hasSufficientJoinInfo(joinDescriptor) {\n // Verify joinDescriptor has sufficient information to authorize a compute group (not guarenteed).\n return (joinDescriptor.joinKey && (joinDescriptor.joinSecret || joinDescriptor.joinHash))\n || (joinDescriptor.id && joinDescriptor.joinAddress)\n || exports.isPublicComputeGroup(joinDescriptor);\n}\n\nconst newCGPrototype = { type: 'object',\n parameters: {\n // name: { type: 'string', default: undefined }, /* name of group (length <= 255) */\n // description: { type: 'string', default: undefined }, /* description of group (length <= 255) */\n // id: { type: 'string', default: undefined }, /* opaqueId, the unique identifier of the compute group; nanoid (length === 22) */\n // joinKey: { type: 'string', default: undefined }, /* basically the login (length <= 255) */\n // joinSecret: { type: 'string', default: undefined }, /* basically the password (length <= 255) */\n // joinHash: { type: 'string', default: undefined }, /* basically the password, the joinSecret seeded & hashed */\n // joinAddress: { type: Address, default: undefined }, /* signature gives alternative to login/password */\n\n commissionRate: { type: 'BigNumber', default: undefined }, /* commission, see DCP-1889 */\n deployFee: { type: 'BigNumber', default: undefined }, /* number of DCC to take for every deployment */\n deployAccess: { type: 'string', default: undefined }, /* can be \"owner\"|\"join\" (dcp-1910) */\n addJobFee: { type: 'BigNumber', default: undefined }, /* fee required each time a job joins a compute group */\n maxTotalPayment: { type: 'BigNumber', default: undefined }, /* limit on maximum job payment, NULL => Infinity */\n\n /* Administrative limits on group. NULL => Infinity: Should all be integers or undefined. */\n maxConcurrentJobs: { type: 'number', default: undefined },\n maxConcurrentWorkers: { type: 'number', default: undefined },\n maxConcurrentSandboxes: { type: 'number', default: undefined },\n maxConcurrentCPUs: { type: 'number', default: undefined },\n maxConcurrentGPUs: { type: 'number', default: undefined },\n maxConcurrentEscrow: { type: 'BigNumber', default: undefined },\n },\n};\n\n/**\n * Async function that creates a new Compute Group.\n *\n * The joinDescriptor is of the form { joinKey, joinSecret }, { joinKey, joinHash } or { id, joinAddress }.\n * where id will correspond to the attribute opaqueId in the new row in the computeGroups table.\n *\n * This function can only be called with ADMIN permission.\n * Properties not appearing in newCGPrototype.parameters are not allowed in otherProperties.\n *\n * @param {cgClientJoinType} joinDescriptor - Must have properly defined { joinKey, joinSecret }, { joinKey, joinHash }\n * or { id, joinAddress }, where id will correspond to the attribute opaqueId\n * in the new row in the computeGroups table.\n * @param {string} [name] - The name of the compute group.\n * @param {string} [description] - The description of the compute group.\n * @param {object} [otherProperties] - The 5 attributes of table computeGroup related to commissions and fees.\n * commissionRate: notNull(zFinNum),// commission, see DCP-1889\n * deployFee: notNull(zFinNum),// number of DCC to take for every deployment\n * deployAccess: string, // can be \"owner\"|\"join\" (dcp-1910)\n * addJobFee: notNull(zFinNum),// fee required each time a job joins a compute group\n * maxTotalPayment: finNum, // limit on maximum job payment, NULL => Infinity\n * And the 6 attributes of table computeGroup related to limits.\n * maxConcurrentJobs: integer,\n * maxConcurrentWorkers: integer,\n * maxConcurrentSandboxes: integer,\n * maxConcurrentCPUs: integer,\n * maxConcurrentGPUs: integer,\n * maxConcurrentEscrow: finNum,\n * @returns {Promise<apiClientType>} - { success, payload: computeGroup.id }\n * @access public\n * @example\n * await computeGroup.createGroup({ joinKey: 'dcpDemo', joinSecret: 'theSecret' }, 'myCGName', 'myCGDescription', { deployFee: 0.00015 });\n * await computeGroup.createGroup({ joinKey: 'dcpDemo2', joinHash: 'eh1-e063976b20a92da97a27b9873465c6f2c9d6e4370befa86c8c1dd312c78befc2' });\n * await computeGroup.createGroup({ id: 'bYcYGQ3NOpFnP4FKs6IBQd', joinAddress: 'c15053fc30d4bdf91e2e0bba79578f8b649e55ea' });\n * where the corresponding rows in table computeGroups have attributes\n * row1: joinKey:='dcpDemo',\n * row2: joinKey:='dcpDemo2', joinHash:='eh1-e063976b20a92da97a27b9873465c6f2c9d6e4370befa86c8c1dd312c78befc2'\n * row3: opaqueId:='bYcYGQ3NOpFnP4FKs6IBQd', joinAddress:='c15053fc30d4bdf91e2e0bba79578f8b649e55ea' .\n */\nexports.createGroup = async function createGroup(joinDescriptor, name, description, otherProperties)\n{\n if (!exports.serviceConnection)\n await openAndConnectServiceConn();\n\n validateCGJoinDescriptor(joinDescriptor, 'createGroup');\n\n // Verify joinDescriptor has sufficient information to authorize a compute group (not guarenteed).\n if (!hasSufficientJoinInfo(joinDescriptor))\n return clientError(`createGroup: Insufficient information to authorize compute group: ${JSON.stringify(joinDescriptor)}.`);\n\n // Validate the properties in otherProperties.\n for (const methodName in otherProperties) {\n if (!Object.keys(newCGPrototype.parameters).includes(methodName))\n return clientError(`createGroup: Property ${methodName} cannot be speicfied in otherProperties. Can only specify ${JSON.stringify(Object.keys(newCGPrototype.parameters))}`);\n }\n\n // Translate joinSecret to joinHash.\n if (joinDescriptor.joinSecret) {\n joinDescriptor.joinHash = exports.calculateJoinHash(joinDescriptor);\n delete joinDescriptor.joinSecret;\n }\n\n if (otherProperties && (otherProperties.commissionRate < 0 || otherProperties.commissionRate >= 1))\n return clientError(`client-createGroup: commissionRate ${otherProperties.commissionRate} must be between 0 and 1 (0 <= commissionRate < 1).`);\n\n debugging('computeGroups') && console.debug('client-createGroup: input:', joinDescriptor, name, description, otherProperties);\n\n const { success, payload } = await exports.serviceConnection.send('createGroup', { joinDescriptor, name, description, otherProperties });\n\n if (!success) return clientError(`Cannot create new compute group, with ${cgId(joinDescriptor)}.`);\n if (payload && !payload.success) return reconstructServiceError(payload);\n\n debugging('computeGroups') && console.debug('client-createGroup: payload', payload);\n\n return payload;\n};\n\nconst changeCGPrototype = { type: 'object',\n parameters: {\n name: { type: 'string', default: undefined }, /* name of group (length <= 255) */\n description: { type: 'string', default: undefined }, /* description of group (length <= 255) */\n joinHash: { type: 'string', default: undefined }, /* basically the password, seeded & hashed (length <= 255) */\n joinAddress: { type: Address, default: undefined }, /* signature gives alternative to login/password */\n\n commissionRate: { type: 'BigNumber', default: undefined }, /* commission, see DCP-1889 */\n deployFee: { type: 'BigNumber', default: undefined }, /* number of DCC to take for every deployment */\n deployAccess: { type: 'string', default: undefined }, /* can be \"owner\"|\"join\" (dcp-1910) */\n addJobFee: { type: 'BigNumber', default: undefined }, /* fee required each time a job joins a compute group */\n maxTotalPayment: { type: 'BigNumber', default: undefined }, /* limit on maximum job payment, NULL => Infinity */\n\n /* Administrative limits on group. NULL => Infinity: Should all be integers or undefined. */\n maxConcurrentJobs: { type: 'number', default: undefined },\n maxConcurrentWorkers: { type: 'number', default: undefined },\n maxConcurrentSandboxes: { type: 'number', default: undefined },\n maxConcurrentCPUs: { type: 'number', default: undefined },\n maxConcurrentGPUs: { type: 'number', default: undefined },\n maxConcurrentEscrow: { type: 'BigNumber', default: undefined },\n },\n};\n\n/**\n * Async function that changes a new Compute Group.\n * \n * The parameter newDescriptor contains the new property values,\n * and the properties that are allowed to be changed appear in changeCGPrototype.parameters.\n * \n * The descriptor must have joinKey or id, where id:=opaqueId.\n * Must own the compute group or be ADMIN to use changeGroup.\n * \n * @param {cgAccessType} descriptor - Must have joinkey or id, where id:=opaqueId.\n * @param {object} newDescriptor - Properties not appearing in changeCGPrototype.parameters are not allowed.\n * @returns {Promise<apiClientType>}\n * await computeGroup.changeGroup({ joinKey: 'dcpDemo' }, { joinSecret: 'myNewPasswrd' });\n * await computeGroup.changeGroup({ id: 'bYcYGQ3NOpFnP4FKs6IBQd' }, { name: 'myNewName', deployFee: 0.0001 });\n * where the corresponding rows in table computeGroups have attributes\n * row1: joinKey:='dcpDemo',\n * row2: opaqueId:='bYcYGQ3NOpFnP4FKs6IBQd'\n */\nexports.changeGroup = async function changeGroup(descriptor, newDescriptor)\n{\n if (!exports.serviceConnection)\n await openAndConnectServiceConn();\n\n // Verify descriptor has sufficient information to access a compute group (not guarenteed).\n if (!descriptor.joinKey && !descriptor.id)\n return clientError(`changeGroup: Insufficient information to identify compute group: ${JSON.stringify(descriptor)}.`);\n\n // Validate the properties in newDescriptor.\n for (const methodName in newDescriptor) {\n if (!Object.keys(changeCGPrototype.parameters).includes(methodName))\n return clientError(`changeGroup: Not allowed to change property ${methodName}. Can only change ${JSON.stringify(Object.keys(changeCGPrototype.parameters))}`);\n }\n\n // Translate joinSecret to joinHash.\n if (newDescriptor.joinSecret) {\n newDescriptor.joinHash = exports.calculateJoinHash(newDescriptor);\n delete newDescriptor.joinSecret;\n }\n\n descriptor = buildCGMessage(descriptor, 'changeGroup');\n debugging('computeGroups') && console.debug('change compute group client:', descriptor, newDescriptor);\n const { success, payload } = await exports.serviceConnection.send('changeGroup', { descriptor, newDescriptor });\n\n if (!success) throw new DCPError(`Cannot change compute group with ${cgId(descriptor)}:`, payload);\n if (payload && !payload.success) return reconstructServiceError(payload);\n\n return payload;\n};\n\n/**\n * Async function that deletes a compute group.\n * \n * The descriptor must have joinkey or id, where id:=opaqueId.\n * \n * Must either own the group or be ADMIN.\n * If not ADMIN, then the following config must be true:\n * dcpConfig.scheduler.services.computeGroups.usersCanDeleteGroups\n * \n * @param {cgAccessType} descriptor - Must contain joinKey or id (id:=opaqueId) \n * @returns {Promise<apiClientType>}\n * await computeGroup.deleteGroup({ joinKey: 'dcpDemo' });\n * await computeGroup.deleteGroup({ id: 'bYcYGQ3NOpFnP4FKs6IBQd' });\n * where the corresponding rows in table computeGroups have attributes\n * row1: joinKey:='dcpDemo',\n * row2: opaqueId:='bYcYGQ3NOpFnP4FKs6IBQd'\n */\nexports.deleteGroup = async function deleteGroup(descriptor)\n{\n if (!exports.serviceConnection)\n await openAndConnectServiceConn();\n\n // Verify descriptor has sufficient information to access a compute group (not guarenteed).\n if (!descriptor.joinKey && !descriptor.id)\n return clientError(`deleteGroup: Insufficient information to identify compute group: ${JSON.stringify(descriptor)}.`);\n\n descriptor = buildCGMessage(descriptor, 'deleteGroup');\n debugging('computeGroups') && console.debug('delete compute group client:', descriptor);\n const { success, payload } = await exports.serviceConnection.send('deleteGroup', { descriptor });\n\n if (!success) throw new DCPError(`Cannot delete compute group with ${cgId(descriptor)}:`, payload);\n if (payload && !payload.success) return reconstructServiceError(payload);\n\n return payload;\n};\n\n/**\n * Async function that adds a job to a specified compute group. \n * \n * Must be the owner of the job.\n *\n * Useful feedback is provided from this function, as it\n * will make its way back to the application developer, *after* they have made the\n * deployment fee micropayment.\n *\n * On the client side the access model in place is that if you know the (user/password)\n * joinKey+joinSecret/joinKey+joinHash/joinKey+joinHashHash/id+joinAddress,\n * you can add the job to the compute groups, where id:=opaqueId from table computeGroups.\n * On the service side the corresponding access model is\n * joinKey+joinHashHash/id+joinAddress .\n * Access is also allowed if the compute group owner is the connection peerAddress.\n * \n * Unless the compute group owner is the connection peerAddress, element of the descriptor array must contain\n * { joinKey, joinSecret }, { joinKey, joinHash } or { id, joinAddress }\n * where the value of id in { id, joinAddress } is the opaqueId attribute of the row in table computeGroups.\n *\n * @param {Address} job The address of the Job that will be added to the Compute Group.\n * @param {Array} computeGroups Array of descriptor objects for the compute groups. This descriptor\n * needs to contain enough information to authorize access to the\n * compute group. Properties may include:\n * - id (id:=opaqueId)\n * - joinKey\n * - joinSecret\n * - joinHash\n * - joinAddress\n * \n * Additional, either the joinKey or id MUST be specified so\n * that we can identify the compute group in question.\n *\n * All compute groups can have jobs submitted to them, provided either the joinKey\n * or the id are specified, and the message contains valid join permission and the \n * job is owned by the caller of addJobToGroups.\n *\n * FUTURE - after DCP-1910\n * keystore A keystore used to grant access to job deployment within this compute group.\n * This can be either the ownerKeystore or the joinAddress keystore when the\n * compute group is in deployAccessType='join' mode.\n * @returns {Promise<apiClientType>}\n * @access public\n * @example\n * await computeGroup.addJobToGroups('P+Y4IApeFQLrYS2W7MkVg7', \n * [ { joinKey: 'dcpDemo', joinSecret: 'theSecret' },\n * { joinKey: 'dcpDemo2', joinHash: 'eh1-e063976b20a92da97a27b9873465c6f2c9d6e4370befa86c8c1dd312c78befc2' }, \n * { id: 'bYcYGQ3NOpFnP4FKs6IBQd', joinAddress: 'c15053fc30d4bdf91e2e0bba79578f8b649e55ea' } ]);\n * where the corresponding rows in table computeGroups have attributes\n * row1: joinKey:='dcpDemo',\n * row2: joinKey:='dcpDemo2', joinHash:='eh1-e063976b20a92da97a27b9873465c6f2c9d6e4370befa86c8c1dd312c78befc2'\n * row3: opaqueId:='bYcYGQ3NOpFnP4FKs6IBQd', joinAddress:='c15053fc30d4bdf91e2e0bba79578f8b649e55ea' .\n */\nexports.addJobToGroups = async function addJobToGroups(job, computeGroups)\n{\n if (!exports.serviceConnection)\n await openAndConnectServiceConn();\n\n const cgArray = [];\n for (const joinDescriptor of computeGroups)\n {\n // Verify joinDescriptor has sufficient information to authorize a compute group (not guarenteed).\n if (!hasSufficientJoinInfo(joinDescriptor))\n {\n console.error(`addJobToGroups: Insufficient information to authorize compute group: ${JSON.stringify(joinDescriptor)}.`);\n continue;\n }\n\n // Translate so that neither joinHash nor joinSecret goes across the wire.\n const message = buildCGJoinMessage(joinDescriptor, 'addJobToGroups');\n debugging('computeGroups') && console.debug(`addJobToGroups client: job ${job}, message`, message);\n\n cgArray.push(message);\n }\n\n const { success, payload } = await exports.serviceConnection.send('addJobToGroups', { job, cgArray });\n\n debugging('computeGroups') && console.debug('addJobToGroups payload', payload);\n\n if (!success) throw new DCPError(`Cannot add job ${job} to compute groups.`, payload);\n if (payload && !payload.success) return reconstructServiceError(payload);\n\n // If the server reported success but did not return a list of CGs (eg. v.4.2.5 server),\n // assume (and inform the client) we added all groups successfully\n return payload || computeGroups;\n};\n\n/**\n * Async function that lists all the Jobs in a Compute Group.\n * \n * The descriptor must have one of the properties joinkey, id (id:=opaqueId).\n * Must be the owner of the Compute Group to list jobs from it.\n * The job does not need to be owned.\n * \n * The descriptor is of the form { joinKey: 'dcpDemo' } or { id: 'bYcYGQ3NOpFnP4FKs6IBQd' }.\n * where 'bYcYGQ3NOpFnP4FKs6IBQd' is the opaqueId of the Compute Group.\n *\n * @param {cgAccessType} descriptor - Must have one of the properties joinKey, id (id:=opaqueId). Specifically\n * descriptor = { joinKey: 'dcpDemo' } or descriptor = { id: opaqueId }\n * @returns {Promise<apiClientType>}\n * @access public\n * @example\n * let listOfJobs1 = await computeGroup.listJobs({ joinKey: 'dcpDemo' });\n * let listOfJobs2 = await computeGroup.listJobs({ id: 'bYcYGQ3NOpFnP4FKs6IBQd' });\n * where the corresponding rows in table computeGroups have attributes\n * row1: joinKey:='dcpDemo'\n * row2: opaqueId:='bYcYGQ3NOpFnP4FKs6IBQd'\n */\nexports.listJobs = async function listJobs(descriptor)\n{\n if (!exports.serviceConnection)\n await openAndConnectServiceConn();\n\n // Verify descriptor has sufficient information to access a compute group (not guarenteed).\n if (!descriptor.joinKey && !descriptor.id)\n return clientError(`listJobs: Insufficient information to identify compute group: ${JSON.stringify(descriptor)}.`);\n\n descriptor = buildCGMessage(descriptor, 'listJobs');\n debugging('computeGroups') && console.debug('listJob client: descriptor', descriptor);\n const { success, payload } = await exports.serviceConnection.send('listJobs', { descriptor });\n\n if (!success) throw new DCPError(`Cannot list jobs for compute group with ${cgId(descriptor)}`, payload);\n if (payload && !payload.success) return reconstructServiceError(payload);\n\n return payload;\n};\n\n/**\n * Async function that removes a job from a Compute Group.\n * \n * The descriptor must have one of the properties joinkey, id (id:=opaqueId).\n * Must be the owner of the Compute Group to remove a job from it.\n * The job does not need to be owned.\n * \n * The descriptor is of the form { joinKey: 'dcpDemo' } or { id: 'bYcYGQ3NOpFnP4FKs6IBQd' }.\n * where 'bYcYGQ3NOpFnP4FKs6IBQd' is the opaqueId of the Compute Group.\n *\n * @param {Address} job - The address of the Job that will be added to the Compute Group.\n * @param {cgAccessType} descriptor - { joinKey: 'dcpDemo' } or { id: 'bYcYGQ3NOpFnP4FKs6IBQd' }\n * @returns {Promise<apiClientType>}\n * @access public\n * @example\n * await computeGroup.removeJob( 'P+Y4IApeFQLrYS2W7MkVg7', { joinKey: 'dcpDemo' });\n * await computeGroup.removeJob( 'P+Y4IApeFQLrYS2W7MkVg7', { id: 'bYcYGQ3NOpFnP4FKs6IBQd' });\n * where the corresponding rows in table computeGroups have attributes\n * row1: joinKey:='dcpDemo'\n * row2: opaqueId:='bYcYGQ3NOpFnP4FKs6IBQd'\n */\nexports.removeJob = async function removeJob(job, descriptor)\n{\n if (!exports.serviceConnection)\n await openAndConnectServiceConn();\n\n // Verify descriptor has sufficient information to access a compute group (not guarenteed).\n if (!descriptor.joinKey && !descriptor.id)\n return clientError(`removeJob: Insufficient information to identify compute group: ${JSON.stringify(descriptor)}.`);\n\n descriptor = buildCGMessage(descriptor, 'removeJob');\n debugging('computeGroups') && console.debug(`removeJob client: job ${job}, descriptor`, descriptor);\n const { success, payload } = await exports.serviceConnection.send('removeJob', { job, descriptor });\n\n if (!success) throw new DCPError(`Cannot remove job ${job} from compute group with ${cgId(descriptor)}`, payload);\n if (payload && !payload.success) return reconstructServiceError(payload);\n\n return payload;\n};\n\n/**\n * Async function that removes all jobs from a Compute Group.\n * \n * The descriptor must have one of the properties joinkey, id (id:=opaqueId).\n * Must be the owner of the Compute Group to remove jobs from it.\n * \n * The descriptor is of the form { joinKey: 'dcpDemo' } or { id: 'bYcYGQ3NOpFnP4FKs6IBQd' }.\n * where 'bYcYGQ3NOpFnP4FKs6IBQd' is the opaqueId of the Compute Group.\n *\n * @param {cgAccessType} descriptor - { joinKey: 'dcpDemo' } or { id: 'bYcYGQ3NOpFnP4FKs6IBQd' }\n * @returns {Promise<apiClientType>}\n * @access public\n * @example\n * await computeGroup.removeAllJobs({ joinKey: 'dcpDemo' });\n * await computeGroup.removeAllJobs({ id: 'bYcYGQ3NOpFnP4FKs6IBQd' });\n * where the corresponding rows in table computeGroups have attributes\n * row1: joinKey:='dcpDemo'\n * row2: opaqueId:='bYcYGQ3NOpFnP4FKs6IBQd'\n */\nexports.removeAllJobs = async function removeAllJobs(descriptor)\n{\n if (!exports.serviceConnection)\n await openAndConnectServiceConn();\n\n // Verify descriptor has sufficient information to access a compute group (not guarenteed).\n if (!descriptor.joinKey && !descriptor.id)\n return clientError(`removeAllJobs: Insufficient information to identify compute group: ${JSON.stringify(descriptor)}.`);\n\n descriptor = buildCGMessage(descriptor, 'removeAllJobs');\n debugging('computeGroups') && console.debug('removeAllJobs client: descriptor', descriptor);\n const { success, payload } = await exports.serviceConnection.send('removeAllJobs', { descriptor });\n\n if (!success) throw new DCPError(`Cannot remove all jobs from compute group with ${cgId(descriptor)}:`, payload);\n if (payload && !payload.success) return reconstructServiceError(payload);\n\n return payload;\n};\n\n/**\n * Async function that cancels the specified owned job.\n * \n * Must be the owner of the job.\n *\n * On the client side the access model in place is that if you know the (user/password)\n * joinKey+joinSecret/joinKey+joinHash/joinKey+joinHashHash/id+joinAddress,\n * you can cancel the job in the compute group, where id:=opaqueId from table computeGroups.\n * On the service side the corresponding access model is\n * joinKey+joinHashHash/id+joinAddress .\n * Access is also allowed if the compute group owner is the connection peerAddress.\n * \n * Unless the compute group owner is the connection peerAddress, the descriptor must contain\n * { joinKey, joinHashHash } or { id, joinAddress }\n * where the value of id in { id, joinAddress } is the opaqueId attribute of the row in table computeGroups.\n * \n * @param {Address} job - The address of the Job that will be added to the Compute Group.\n * @param {cgClientJoinType} joinDescriptor - Array of descriptor objects for the compute groups. This descriptor\n * needs to contain enough information to authorize access to the\n * compute group. Properties may include:\n * - id (id:=opaqueId)\n * - joinKey\n * - joinSecret\n * - joinHash\n * - joinAddress\n *\n * Additional, either the joinKey or id MUST be specified so\n * that we can identify the compute group in question.\n * @returns {Promise<apiClientType>}\n * @access public\n * @example\n * await computeGroup.cancelJob( 'P+Y4IApeFQLrYS2W7MkVg7', { joinKey: 'dcpDemo', joinHash: 'eh1-e063976b20a92da97a27b9873465c6f2c9d6e4370befa86c8c1dd312c78befc2' } );\n * await computeGroup.cancelJob( 'P+Y4IApeFQLrYS2W7MkVg7', { id: 'bYcYGQ3NOpFnP4FKs6IBQd', joinAddress: 'c15053fc30d4bdf91e2e0bba79578f8b649e55ea' });\n * where the corresponding rows in table computeGroups have attributes\n * row1: joinKey:='dcpDemo', joinHash:='eh1-e063976b20a92da97a27b9873465c6f2c9d6e4370befa86c8c1dd312c78befc2'\n * row2: opaqueId:='bYcYGQ3NOpFnP4FKs6IBQd', joinAddress:='c15053fc30d4bdf91e2e0bba79578f8b649e55ea' .\n */\nexports.cancelJob = async function cancelJob(job, joinDescriptor)\n{\n if (!exports.serviceConnection)\n await openAndConnectServiceConn();\n\n // Verify joinDescriptor has sufficient information to authorize a compute group (not guarenteed).\n if (!hasSufficientJoinInfo(joinDescriptor))\n return clientError(`cancelJob: Insufficient information to authorize compute group: ${JSON.stringify(joinDescriptor)}.`);\n\n // Translate so that neither joinHash nor joinSecret goes across the wire.\n joinDescriptor = buildCGJoinMessage(joinDescriptor, 'cancelJob');\n debugging('computeGroups') && console.debug(`cancelJob client: job ${job}, descriptor`, joinDescriptor);\n const { success, payload } = await exports.serviceConnection.send('cancelJob', { job, joinDescriptor });\n\n if (!success) throw new DCPError(`Cannot cancel job ${job} for compute group with ${cgId(joinDescriptor)}:`, payload);\n if (payload && !payload.success) return reconstructServiceError(payload);\n\n return payload;\n};\n\n/**\n * Async function that cancels the owned job in the Compute Group.\n * \n * On the client side the access model in place is that if you know the (user/password)\n * joinKey+joinSecret/joinKey+joinHash/joinKey+joinHashHash/id+joinAddress,\n * you can cancel the jobs in the compute group, where id:=opaqueId from table computeGroups.\n * On the service side the corresponding access model is\n * joinKey+joinHashHash/id+joinAddress .\n * Access is also allowed if the compute group owner is the connection peerAddress.\n * \n * Unless the compute group owner is the connection peerAddress, the descriptor must contain\n * { joinKey, joinHashHash } or { id, joinAddress }\n * where the value of id in { id, joinAddress } is the opaqueId attribute of the row in table computeGroups.\n * \n * @param {cgClientJoinType} joinDescriptor - Array of descriptor objects for the compute groups. This descriptor\n * needs to contain enough information to authorize access to the\n * compute group. Properties may include:\n * - id (id:=opaqueId)\n * - joinKey\n * - joinSecret\n * - joinHash\n * - joinAddress\n * \n * Additional, either the joinKey or id MUST be specified so\n * that we can identify the compute group in question.\n * @returns {Promise<apiClientType>}\n * @access public\n * @example\n * await computeGroup.cancelAllJobs( { joinKey: 'dcpDemo', joinHash: 'eh1-e063976b20a92da97a27b9873465c6f2c9d6e4370befa86c8c1dd312c78befc2' } );\n * await computeGroup.cancelAllJobs( { id: 'bYcYGQ3NOpFnP4FKs6IBQd', joinAddress: 'c15053fc30d4bdf91e2e0bba79578f8b649e55ea' });\n * where the corresponding rows in table computeGroups have attributes\n * row1: joinKey:='dcpDemo', joinHash:='eh1-e063976b20a92da97a27b9873465c6f2c9d6e4370befa86c8c1dd312c78befc2'\n * row2: opaqueId:='bYcYGQ3NOpFnP4FKs6IBQd', joinAddress:='c15053fc30d4bdf91e2e0bba79578f8b649e55ea' .\n */\nexports.cancelAllJobs = async function cancelAllJobs(joinDescriptor)\n{\n if (!exports.serviceConnection)\n await openAndConnectServiceConn();\n\n // Verify joinDescriptor has sufficient information to authorize a compute group (not guarenteed).\n if (!hasSufficientJoinInfo(joinDescriptor))\n return clientError(`cancelAllJobs: Insufficient information to authorize compute group: ${JSON.stringify(joinDescriptor)}.`);\n\n // Translate so that neither joinHash nor joinSecret goes across the wire.\n joinDescriptor = buildCGJoinMessage(joinDescriptor, 'cancelAllJobs');\n debugging('computeGroups') && console.debug('cancelAllJobs client: descriptor', joinDescriptor);\n const { success, payload } = await exports.serviceConnection.send('cancelAllJobs', { joinDescriptor });\n\n if (!success) throw new DCPError(`Cannot cancel owned jobs for compute group with ${cgId(joinDescriptor)}:`, payload);\n if (payload && !payload.success) return reconstructServiceError(payload);\n\n return payload;\n};\n\n/**\n * Calculate a joinHash for a compute group. This is an eh1- hash of the cg salt and \n * joinSecret components of a compute group description.\n *\n * @param {object} details an object containing the cg salt, which is\n * the joinKey if the compute group uses one;\n * otherwise it is the joinAddress. This object\n * may also contain the joinSecret.\n * @param {string} [joinSecret] the join secret -- plain text -- that is\n * the \"password\" for the compute group. If not\n * specified, we use details.joinSecret.\n */\nexports.calculateJoinHash = function computeGroups$calculateJoinHash(details, joinSecret)\n{\n if (typeof joinSecret === 'undefined')\n joinSecret = details.joinSecret;\n\n return hash.calculate(hash.eh1, `${details.joinKey || details.joinAddress} ${joinSecret}`);\n}\n\n\n//# sourceURL=webpack://dcp/./src/dcp-client/compute-groups/index.js?");
4162
4162
 
4163
4163
  /***/ }),
4164
4164
 
@@ -4178,7 +4178,7 @@ eval("/**\n * @file Module that implements Compute API\n * @module dcp/comput
4178
4178
  \*********************************/
4179
4179
  /***/ ((module, exports, __webpack_require__) => {
4180
4180
 
4181
- eval("/* module decorator */ module = __webpack_require__.nmd(module);\n/**\n * @file dcp-client-bundle-src.js\n * Top-level file which gets webpacked into the bundle consumed by dcp-client 2.5\n * @author Wes Garland, wes@kingsds.network\n * @date July 2019\n */\n\n{\n let thisScript = typeof document !== 'undefined' ? (typeof document.currentScript !== 'undefined' && document.currentScript) || document.getElementById('_dcp_client_bundle') : {}\n let realModuleDeclare\n\n if ( false || typeof module.declare === 'undefined') {\n realModuleDeclare = ( true) ? module.declare : 0\n if (false) {}\n module.declare = function moduleUnWrapper (deps, factory) {\n factory(null, module.exports, module)\n return module.exports\n }\n }\n\n let _debugging = () => false\n dcpConfig.future = (__webpack_require__(/*! ../common/config-future.js */ \"./src/common/config-future.js\").futureFactory)(_debugging, dcpConfig);\n\n /* These modules are official API and must be part of DCP Client */\n let officialApi = {\n 'protocol': __webpack_require__(/*! ../protocol-v4 */ \"./src/protocol-v4/index.js\"),\n 'compute': (__webpack_require__(/*! ./compute */ \"./src/dcp-client/compute.js\").compute),\n 'worker': __webpack_require__(/*! ./worker */ \"./src/dcp-client/worker/index.js\"),\n 'wallet': __webpack_require__(/*! ./wallet */ \"./src/dcp-client/wallet/index.js\"),\n };\n\n /* Allow client programs to use modules which happen to be in the bundle anyhow */\n let conveniencePeers = {\n 'ethereumjs-wallet': (__webpack_require__(/*! ./wallet/keystore */ \"./src/dcp-client/wallet/keystore.js\")._internalEth.wallet),\n 'ethereumjs-util': (__webpack_require__(/*! ./wallet/keystore */ \"./src/dcp-client/wallet/keystore.js\")._internalEth.util),\n 'socket.io-client': __webpack_require__(/*! socket.io-client */ \"./node_modules/socket.io-client/build/cjs/index.js\"),\n 'bignumber.js': __webpack_require__(/*! bignumber.js */ \"./node_modules/bignumber.js/bignumber.js\"),\n 'semver': __webpack_require__(/*! semver */ \"./node_modules/semver/semver.js\"),\n };\n\n /* Some of these modules are API-track. Some of them need to be published to be\n * available for top-level resolution by DCP internals. Those (mostly) should have\n * been written using relative module paths.....\n */\n let modules = Object.assign({\n 'dcp-build': {\"version\":\"8f44464faf259aae5ef214f8752f7ce8728dd5f0\",\"branch\":\"release\",\"dcpClient\":{\"version\":\"4.2.12\",\"from\":\"git+ssh://git@gitlab.com/Distributed-Compute-Protocol/dcp-client.git#prod-20220907\",\"resolved\":\"git+ssh://git@gitlab.com/Distributed-Compute-Protocol/dcp-client.git#85f2b5d11fef51c90410286f12fa800492957c39\"},\"built\":\"Mon Sep 12 2022 15:21:54 GMT-0400 (Eastern Daylight Saving Time)\",\"config\":{\"generated\":\"Mon 12 Sep 2022 03:21:51 PM EDT by erose on lorge\",\"build\":\"debug\"},\"webpack\":\"5.70.0\",\"node\":\"v14.20.0\"},\n 'dcp-xhr': __webpack_require__(/*! ../common/dcp-xhr */ \"./src/common/dcp-xhr.js\"),\n 'dcp-env': __webpack_require__(/*! ../common/dcp-env */ \"./src/common/dcp-env.js\"),\n 'dcp-url': __webpack_require__(/*! ../common/dcp-url */ \"./src/common/dcp-url.js\"),\n 'cli': __webpack_require__(/*! ../common/cli */ \"./src/common/cli.js\"),\n 'dcp-timers': __webpack_require__(/*! ../common/dcp-timers */ \"./src/common/dcp-timers.js\"),\n 'dcp-dot-dir': __webpack_require__(/*! ../common/dcp-dot-dir */ \"./src/common/dcp-dot-dir.js\"),\n 'dcp-assert': __webpack_require__(/*! ../common/dcp-assert */ \"./src/common/dcp-assert.js\"),\n 'dcp-events': __webpack_require__(/*! ../common/dcp-events */ \"./src/common/dcp-events/index.js\"),\n 'utils': __webpack_require__(/*! ../utils */ \"./src/utils/index.js\"),\n 'debugging': __webpack_require__(/*! ../debugging */ \"./src/debugging.js\"),\n 'publish': __webpack_require__(/*! ../common/dcp-publish */ \"./src/common/dcp-publish.js\"),\n 'compute-groups': {\n ...__webpack_require__(/*! ./compute-groups */ \"./src/dcp-client/compute-groups/index.js\"),\n publicGroupOpaqueId: (__webpack_require__(/*! ../common/scheduler-constants */ \"./src/common/scheduler-constants.js\").computeGroups[\"public\"].opaqueId),\n },\n 'bank-util': __webpack_require__(/*! ./bank-util */ \"./src/dcp-client/bank-util.js\"),\n 'protocol-v4': __webpack_require__(/*! ../protocol-v4 */ \"./src/protocol-v4/index.js\"), /* deprecated */\n 'client-modal': __webpack_require__(/*! ./client-modal */ \"./src/dcp-client/client-modal/index.js\"),\n 'legacy-modal': (__webpack_require__(/*! ../../portal/www/js/modal */ \"./portal/www/js/modal.js\").Modal),\n 'eth': __webpack_require__(/*! ./wallet/eth */ \"./src/dcp-client/wallet/eth.js\"),\n 'serialize': __webpack_require__(/*! ../utils/serialize */ \"./src/utils/serialize.js\"),\n 'job': __webpack_require__(/*! ./job */ \"./src/dcp-client/job/index.js\"),\n 'range-object': __webpack_require__(/*! ./range-object */ \"./src/dcp-client/range-object.js\"),\n 'stats-ranges': __webpack_require__(/*! ./stats-ranges */ \"./src/dcp-client/stats-ranges.js\"),\n 'job-values': __webpack_require__(/*! ./job-values */ \"./src/dcp-client/job-values.js\"),\n 'standard-objects': {}\n }, conveniencePeers, officialApi);\n\n /* Export the JS Standard Classes (etc) from the global object of the bundle evaluation context,\n * in case we have code somewhere that needs to use these for instanceof checks.\n */\n ;[ Object, Function, Boolean, Symbol,\n Error, EvalError, RangeError, ReferenceError, SyntaxError, TypeError, URIError,\n Number, Math, Date,\n String, RegExp,\n Array, Int8Array, Uint8Array, Uint8ClampedArray, Int16Array, Uint16Array, Int32Array, Uint32Array, Float32Array, Float64Array,\n Map, Set, WeakMap, WeakSet,\n ArrayBuffer, DataView, JSON,\n Promise, \n Reflect, Proxy, Intl, WebAssembly, __webpack_require__\n ].forEach(function (obj) {\n if (obj.name && (typeof obj === 'function' || typeof obj === 'object'))\n modules['standard-objects'][obj.name] = obj\n })\n\n if (typeof BigInt !== 'undefined')\n modules['standard-objects']['BigInt'] === BigInt;\n if (typeof BigInt64Array !== 'undefined')\n modules['standard-objects']['BigInt64Array'] === BigInt64Array;\n if (typeof BigInt64Array !== 'undefined')\n modules['standard-objects']['BigUint64Array'] === BigUint64Array;\n\n module.declare([], function(require, exports, module) {\n Object.assign(exports, modules)\n exports['dcp-config'] = dcpConfig\n })\n if (realModuleDeclare)\n module.declare = realModuleDeclare\n\n bundleExports = thisScript.exports = exports; /* must be last expression evaluated! */\n}\n\n\n//# sourceURL=webpack://dcp/./src/dcp-client/index.js?");
4181
+ eval("/* module decorator */ module = __webpack_require__.nmd(module);\n/**\n * @file dcp-client-bundle-src.js\n * Top-level file which gets webpacked into the bundle consumed by dcp-client 2.5\n * @author Wes Garland, wes@kingsds.network\n * @date July 2019\n */\n\n{\n let thisScript = typeof document !== 'undefined' ? (typeof document.currentScript !== 'undefined' && document.currentScript) || document.getElementById('_dcp_client_bundle') : {}\n let realModuleDeclare\n\n if ( false || typeof module.declare === 'undefined') {\n realModuleDeclare = ( true) ? module.declare : 0\n if (false) {}\n module.declare = function moduleUnWrapper (deps, factory) {\n factory(null, module.exports, module)\n return module.exports\n }\n }\n\n let _debugging = () => false\n dcpConfig.future = (__webpack_require__(/*! ../common/config-future.js */ \"./src/common/config-future.js\").futureFactory)(_debugging, dcpConfig);\n\n /* These modules are official API and must be part of DCP Client */\n let officialApi = {\n 'protocol': __webpack_require__(/*! ../protocol-v4 */ \"./src/protocol-v4/index.js\"),\n 'compute': (__webpack_require__(/*! ./compute */ \"./src/dcp-client/compute.js\").compute),\n 'worker': __webpack_require__(/*! ./worker */ \"./src/dcp-client/worker/index.js\"),\n 'wallet': __webpack_require__(/*! ./wallet */ \"./src/dcp-client/wallet/index.js\"),\n };\n\n /* Allow client programs to use modules which happen to be in the bundle anyhow */\n let conveniencePeers = {\n 'ethereumjs-wallet': (__webpack_require__(/*! ./wallet/keystore */ \"./src/dcp-client/wallet/keystore.js\")._internalEth.wallet),\n 'ethereumjs-util': (__webpack_require__(/*! ./wallet/keystore */ \"./src/dcp-client/wallet/keystore.js\")._internalEth.util),\n 'socket.io-client': __webpack_require__(/*! socket.io-client */ \"./node_modules/socket.io-client/build/cjs/index.js\"),\n 'bignumber.js': __webpack_require__(/*! bignumber.js */ \"./node_modules/bignumber.js/bignumber.js\"),\n 'semver': __webpack_require__(/*! semver */ \"./node_modules/semver/semver.js\"),\n };\n\n /* Some of these modules are API-track. Some of them need to be published to be\n * available for top-level resolution by DCP internals. Those (mostly) should have\n * been written using relative module paths.....\n */\n let modules = Object.assign({\n 'dcp-build': {\"version\":\"c96b8086bdb343ed36ff35e133755d3f21613609\",\"branch\":\"release\",\"dcpClient\":{\"version\":\"4.2.15\",\"from\":\"git+ssh://git@gitlab.com/Distributed-Compute-Protocol/dcp-client.git#prod-20220919\",\"resolved\":\"git+ssh://git@gitlab.com/Distributed-Compute-Protocol/dcp-client.git#20c6d0b3df95fdc029b43e01e1e6ac78c9d5b372\"},\"built\":\"Fri Sep 23 2022 16:37:39 GMT-0400 (Eastern Daylight Saving Time)\",\"config\":{\"generated\":\"Fri 23 Sep 2022 04:37:37 PM EDT by erose on lorge\",\"build\":\"debug\"},\"webpack\":\"5.70.0\",\"node\":\"v14.20.0\"},\n 'dcp-xhr': __webpack_require__(/*! ../common/dcp-xhr */ \"./src/common/dcp-xhr.js\"),\n 'dcp-env': __webpack_require__(/*! ../common/dcp-env */ \"./src/common/dcp-env.js\"),\n 'dcp-url': __webpack_require__(/*! ../common/dcp-url */ \"./src/common/dcp-url.js\"),\n 'cli': __webpack_require__(/*! ../common/cli */ \"./src/common/cli.js\"),\n 'dcp-timers': __webpack_require__(/*! ../common/dcp-timers */ \"./src/common/dcp-timers.js\"),\n 'dcp-dot-dir': __webpack_require__(/*! ../common/dcp-dot-dir */ \"./src/common/dcp-dot-dir.js\"),\n 'dcp-assert': __webpack_require__(/*! ../common/dcp-assert */ \"./src/common/dcp-assert.js\"),\n 'dcp-events': __webpack_require__(/*! ../common/dcp-events */ \"./src/common/dcp-events/index.js\"),\n 'utils': __webpack_require__(/*! ../utils */ \"./src/utils/index.js\"),\n 'debugging': __webpack_require__(/*! ../debugging */ \"./src/debugging.js\"),\n 'publish': __webpack_require__(/*! ../common/dcp-publish */ \"./src/common/dcp-publish.js\"),\n 'compute-groups': {\n ...__webpack_require__(/*! ./compute-groups */ \"./src/dcp-client/compute-groups/index.js\"),\n publicGroupOpaqueId: (__webpack_require__(/*! ../common/scheduler-constants */ \"./src/common/scheduler-constants.js\").computeGroups[\"public\"].opaqueId),\n },\n 'bank-util': __webpack_require__(/*! ./bank-util */ \"./src/dcp-client/bank-util.js\"),\n 'protocol-v4': __webpack_require__(/*! ../protocol-v4 */ \"./src/protocol-v4/index.js\"), /* deprecated */\n 'client-modal': __webpack_require__(/*! ./client-modal */ \"./src/dcp-client/client-modal/index.js\"),\n 'legacy-modal': (__webpack_require__(/*! ../../portal/www/js/modal */ \"./portal/www/js/modal.js\").Modal),\n 'eth': __webpack_require__(/*! ./wallet/eth */ \"./src/dcp-client/wallet/eth.js\"),\n 'serialize': __webpack_require__(/*! ../utils/serialize */ \"./src/utils/serialize.js\"),\n 'job': __webpack_require__(/*! ./job */ \"./src/dcp-client/job/index.js\"),\n 'range-object': __webpack_require__(/*! ./range-object */ \"./src/dcp-client/range-object.js\"),\n 'stats-ranges': __webpack_require__(/*! ./stats-ranges */ \"./src/dcp-client/stats-ranges.js\"),\n 'job-values': __webpack_require__(/*! ./job-values */ \"./src/dcp-client/job-values.js\"),\n 'standard-objects': {}\n }, conveniencePeers, officialApi);\n\n /* Export the JS Standard Classes (etc) from the global object of the bundle evaluation context,\n * in case we have code somewhere that needs to use these for instanceof checks.\n */\n ;[ Object, Function, Boolean, Symbol,\n Error, EvalError, RangeError, ReferenceError, SyntaxError, TypeError, URIError,\n Number, Math, Date,\n String, RegExp,\n Array, Int8Array, Uint8Array, Uint8ClampedArray, Int16Array, Uint16Array, Int32Array, Uint32Array, Float32Array, Float64Array,\n Map, Set, WeakMap, WeakSet,\n ArrayBuffer, DataView, JSON,\n Promise, \n Reflect, Proxy, Intl, WebAssembly, __webpack_require__\n ].forEach(function (obj) {\n if (obj.name && (typeof obj === 'function' || typeof obj === 'object'))\n modules['standard-objects'][obj.name] = obj\n })\n\n if (typeof BigInt !== 'undefined')\n modules['standard-objects']['BigInt'] === BigInt;\n if (typeof BigInt64Array !== 'undefined')\n modules['standard-objects']['BigInt64Array'] === BigInt64Array;\n if (typeof BigInt64Array !== 'undefined')\n modules['standard-objects']['BigUint64Array'] === BigUint64Array;\n\n module.declare([], function(require, exports, module) {\n Object.assign(exports, modules)\n exports['dcp-config'] = dcpConfig\n })\n if (realModuleDeclare)\n module.declare = realModuleDeclare\n\n bundleExports = thisScript.exports = exports; /* must be last expression evaluated! */\n}\n\n\n//# sourceURL=webpack://dcp/./src/dcp-client/index.js?");
4182
4182
 
4183
4183
  /***/ }),
4184
4184
 
@@ -4199,7 +4199,7 @@ eval("/**\n * @file job-values.js\n * Utility code related t
4199
4199
  /***/ ((__unused_webpack_module, exports, __webpack_require__) => {
4200
4200
 
4201
4201
  "use strict";
4202
- eval("/**\n * @file job/index.js\n * @author Eddie Roosenmaallen, eddie@kingsds.network\n * Matthew Palma, mpalma@kingsds.network\n * Wes Garland, wes@kingsds.network\n * Paul, paul@kingsds.network\n * Ryan Saweczko, ryansaweczko@kingsds.network\n * @date November 2018\n * November 2018\n * February 2022\n * May 2022\n * Jun 2022\n *\n * This module implements the Compute API's Job Handle\n *\n */\n/** @typedef {import('dcp/dcp-client/wallet/keystore').Keystore} Keystore */\n\n\nconst { BigNumber } = __webpack_require__(/*! bignumber.js */ \"./node_modules/bignumber.js/bignumber.js\");\nconst { v4: uuidv4 } = __webpack_require__(/*! uuid */ \"./node_modules/uuid/dist/esm-browser/index.js\");\nconst { EventEmitter, PropagatingEventEmitter } = __webpack_require__(/*! dcp/common/dcp-events */ \"./src/common/dcp-events/index.js\");\nconst { RangeObject, MultiRangeObject, DistributionRange, SuperRangeObject, SparseRangeObject } = __webpack_require__(/*! dcp/dcp-client/range-object */ \"./src/dcp-client/range-object.js\");\nconst { fetchURI, encodeDataURI, createTempFile } = __webpack_require__(/*! dcp/utils */ \"./src/utils/index.js\");\nconst { encodeJobValueUri, RemoteValue } = __webpack_require__(/*! dcp/dcp-client/job-values */ \"./src/dcp-client/job-values.js\");\nconst wallet = __webpack_require__(/*! dcp/dcp-client/wallet */ \"./src/dcp-client/wallet/index.js\");\nconst protocolV4 = __webpack_require__(/*! dcp/protocol-v4 */ \"./src/protocol-v4/index.js\");\nconst { DcpURL } = __webpack_require__(/*! dcp/common/dcp-url */ \"./src/common/dcp-url.js\");\nconst ClientModal = __webpack_require__(/*! dcp/dcp-client/client-modal */ \"./src/dcp-client/client-modal/index.js\");\nconst { Worker } = __webpack_require__(/*! dcp/dcp-client/worker */ \"./src/dcp-client/worker/index.js\");\nconst { RemoteDataSet } = __webpack_require__(/*! dcp/dcp-client/remote-data-set */ \"./src/dcp-client/remote-data-set.js\");\nconst { RemoteDataPattern } = __webpack_require__(/*! dcp/dcp-client/remote-data-pattern */ \"./src/dcp-client/remote-data-pattern.js\");\nconst { ResultHandle } = __webpack_require__(/*! ./result-handle */ \"./src/dcp-client/job/result-handle.js\");\nconst { SlicePaymentOffer } = __webpack_require__(/*! ./slice-payment-offer */ \"./src/dcp-client/job/slice-payment-offer.js\");\nconst { addSlices } = __webpack_require__(/*! ./upload-slices */ \"./src/dcp-client/job/upload-slices.js\");\nconst DCP_ENV = __webpack_require__(/*! dcp/common/dcp-env */ \"./src/common/dcp-env.js\");\nconst computeGroups = __webpack_require__(/*! dcp/dcp-client/compute-groups */ \"./src/dcp-client/compute-groups/index.js\");\nconst schedulerConstants = __webpack_require__(/*! dcp/common/scheduler-constants */ \"./src/common/scheduler-constants.js\");\nconst { DEFAULT_REQUIREMENTS, removeBadRequirements } = __webpack_require__(/*! dcp/common/job-requirements-defaults */ \"./src/common/job-requirements-defaults.js\");\nconst { sliceStatus, jobValueKind } = __webpack_require__(/*! dcp/common/scheduler-constants */ \"./src/common/scheduler-constants.js\");\nconst { jobStatus } = __webpack_require__(/*! dcp/common/scheduler-constants */ \"./src/common/scheduler-constants.js\");\nconst bankUtil = __webpack_require__(/*! dcp/dcp-client/bank-util */ \"./src/dcp-client/bank-util.js\");\nconst { DCPError } = __webpack_require__(/*! dcp/common/dcp-error */ \"./src/common/dcp-error.js\");\nconst debugging = (__webpack_require__(/*! dcp/debugging */ \"./src/debugging.js\").scope)('dcp-client');\nconst kvin = __webpack_require__(/*! kvin */ \"./node_modules/kvin/kvin.js\");\nlet tunedKvin;\n\nconst log = (...args) => {\n if (debugging('job')) {\n console.debug('dcp-client:job', ...args);\n }\n};\n\nconst ON_BROWSER = DCP_ENV.isBrowserPlatform;\nconst sideloaderModuleIdentifier = 'sideloader-v1';\n\n\n/** @typedef {import('../range-object').RangeLike} RangeLike */\n\n/**\n * Ensure input data is an appropriate format\n * @param {RangeObject | DistributionRange | RemoteDataSet | Array | Iterable}\n * inputData - A URI-shaped string, a [Multi]RangeObject-constructing value, or\n * an array of slice data\n * @return {RangeObject | RangeLike | DistributionRange | RemoteDataSet | Array}\n * The coerced input in an appropriate format ([Multi]RangeObject,\n * DistributionRange, RemoteDataSet, or array)\n */\n const wrangleData = (inputData) => {\n\n if (RangeObject.isRangelike(inputData)) { return inputData }\n if (RangeObject.isRangeObject(inputData)) { return inputData }\n if (DistributionRange.isDistribution(inputData)) { return inputData }\n if (inputData instanceof SparseRangeObject) { return inputData }\n if (inputData instanceof MultiRangeObject) { return inputData }\n if (MultiRangeObject.isProtoMultiRangelike(inputData)) { return new MultiRangeObject(inputData) }\n if (RangeObject.isProtoRangelike(inputData)) { return new RangeObject(inputData) }\n if (DistributionRange.isProtoDistribution(inputData)) { return new DistributionRange(inputData) }\n if (RemoteDataSet.isRemoteDataSet(inputData)) { return inputData }\n if (RemoteDataPattern.isRemoteDataPattern(inputData)) { return inputData }\n\n return Array.isArray(inputData) ? inputData : [inputData];\n};\n\n/**\n * @classdesc The Compute API's Job Handle (see {@link https://docs.dcp.dev/specs/compute-api.html#job-handles|Compute API spec})\n * Job handles are objects which correspond to jobs. \n * They are created by some exports of the compute module, such as {@link module:dcp/compute.do|compute.do} and {@link module:dcp/compute.for|compute.for}.\n * @extends module:dcp/dcp-events.PropagatingEventEmitter\n * @hideconstructor\n * @access public\n */\nclass Job extends PropagatingEventEmitter\n{\n /**\n * Fired when the job is accepted by the scheduler on deploy.\n * \n * @event Job#accepted\n * @access public\n * @type {object}\n *//**\n * Fired when the job is cancelled.\n * \n * @event Job#cancel\n * @access public\n *//**\n * Fired when a result is returned.\n * \n * @event Job#result\n * @access public\n * @type {object}\n * @property {string} jobAddress Address of the job\n * @property {string} task ID of the task (slice) the result came from\n * @property {number} sort The index of the slice\n * @property {object} result\n * @property {string} result.request\n * @property {*} result.result The value returned from the work function\n *//**\n * Fired when the result handle is modified, either when a new `result` event is fired or when the results are populated with `results.fetch()`\n * \n * @event Job#resultsUpdated\n * @access public\n *//**\n * Fired when the job has been completed.\n * \n * @event Job#complete\n * @access public\n * @type {ResultHandle}\n *//**\n * Fired when the job's status changes.\n * \n * @event Job#status\n * @access public\n * @type {object}\n * @property {string} jobAddress Address of the job\n * @property {number} total Total number of slices in the job\n * @property {number} distributed Number of slices that have been distributed\n * @property {number} computed Number of slices that have completed execution (returned a result)\n * @property {string} runStatus Current runStatus of the job\n *//**\n * Fired when a slice throws an error.\n * \n * @event Job#error\n * @access public\n * @type {object}\n * @property {string} jobAddress Address of the job\n * @property {number} sliceIndex Index of the slice that threw the error\n * @property {string} message The error message\n * @property {string} stack The error stacktrace\n * @property {string} name The error type name\n *//**\n * Fired when a slice uses one of the console log functions.\n * \n * @event Job#console\n * @access public\n * @type {object}\n * @property {string} jobAddress Address of the job\n * @property {number} sliceIndex The index of the slice that produced this event\n * @property {string} level The log level, one of `debug`, `info`, `log`, `warn`, or `error`\n * @property {string} message The console log message\n *//**\n * Fired when a slice is stopped for not calling progress. Contains information about how long the slice ran for, and about the last reported progress calls.\n * \n * @event Job#noProgress\n * @access public\n * @type {object}\n * @property {string} jobAddress Address of the job\n * @property {number} sliceIndex The index of the slice that failed due to no progress\n * @property {number} timestamp How long the slice ran before failing\n * @property {object} progressReports\n * @property {object} progressReports.last The last progress report received from the worker\n * @property {number} progressReports.last.timestamp Time since the start of the slice\n * @property {number} progressReports.last.progress Progress value reported\n * @property {*} progressReports.last.value The last value that was passed to the progress function\n * @property {number} progressReports.last.throttledReports Number of calls to progress that were throttled since the last report\n * @property {object} progressReports.lastUpdate The last determinate (update to the progress param) progress report received from the worker\n * @property {number} progressReports.lastUpdate.timestamp\n * @property {number} progressReports.lastUpdate.progress\n * @property {*} progressReports.lastUpdate.value\n * @property {number} progressReports.lastUpdate.throttledReports\n *//**\n @todo: is this in the spec? is there a no progress data? should there be?\n * Identical to `noProgress`, except that it also contains the data that the slice was executed with.\n * \n * @event Job#noProgressData\n * @access public\n * @type {object}\n * @property {*} data The data that the slice was executed with\n *//**\n * Fired when the job is paused due to running out of funds. The job can be resumed by escrowing more funds then resuming the job.\n * @todo: is this a thing, should it be a thing (the payload)\n * Event payload is the estimated funds required to complete the job\n * \n * @event Job#nofunds\n * @access public\n * @type {BigNumber}\n *//**\n * Fired when the job cannot be deployed due to no bank account / not enough balance to deploy the job\n * \n * @event Job#ENOFUNDS\n * @access public\n *//**\n * Fired when the job is cancelled due to the work function not calling the `progress` method frequently enough.\n * \n * @event Job#ENOPROGRESS\n * @access public\n *//**\n * The job was cancelled because scheduler has determined that individual tasks in this job exceed the maximum allowable execution time.\n * \n * @event Job#ESLICETOOSLOW\n * @access public\n *//**\n * Fired when the job is cancelled because too many work functions are terminating with uncaught exceptions.\n * \n * @event Job#ETOOMANYERRORS\n * @access public\n */\n\n /**\n * @form1 new Job('application_worker_address'[, data[, arguments]])\n * @form2a new Job('worker source'[, data[, arguments]])\n * @form2b new Job(worker_function[, data[, arguments]])\n */\n constructor ()\n {\n super('Job');\n if (typeof arguments[0] === 'function')\n arguments[0] = arguments[0].toString();\n\n if (typeof arguments[0] === 'string')\n {\n const { encodeDataURI } = __webpack_require__(/*! dcp/utils */ \"./src/utils/index.js\");\n this.workFunctionURI = encodeDataURI(arguments[0], 'application/javascript');\n } \n else if (DcpURL.isURL(arguments[0]))\n this.workFunctionURI = arguments[0].href;\n\n this.jobInputData = wrangleData(arguments[1] || []);\n this.jobArguments = wrangleData(arguments[2] || []);\n \n log('num wrangledInputData:', this.jobInputData.length);\n log('num wrangledArguments:', this.jobArguments.length);\n\n this.initEventSystems();\n\n /**\n * An object describing the cost the user believes each the average slice will incur, in terms of CPU/GPU and I/O.\n * If defined, this object is used to provide initial scheduling hints and to calculate escrow amounts.\n *\n * @type {object}\n * @access public\n */\n this.initialSliceProfile = undefined;\n \n // The max value that the client is willing to spend to deploy\n // (list on the scheduler, doesn't include compute payment)\n // maxDeployPayment is the max the user is willing to pay to DCP (as a\n // Hold), in addition to the per-slice offer and associated scrape.\n // Currently calculated as `deployCost = costPerKB *\n // (JSON.stringify(job).length / 1024) // 1e-9 per kb`\n // @todo: figure this out / er nov 2018\n this.maxDeployPayment = 1;\n\n /**\n * An object describing the requirements that workers must have to be eligible for this job. The default values are set in the job-submitter, and only the client specified\n * requirements are sent over the wire. See {@link https://docs.dcp.dev/specs/compute-api.html#requirements-objects|Requirements Objects}.\n *\n * @type {object}\n * @access public\n */\n this.requirements = {};\n\n /**\n * A place to store public-facing attributes of the job. Anything stored on this object will be available inside the work \n * function (see {@link module:dcp/compute~sandboxEnv.work}). The properties documented here may be used by workers to display what jobs are currently being \n * worked on.\n * @access public\n * @property {string} name Public-facing name of this job.\n * @property {string} description Public-facing description for this job.\n * @property {string} link Public-facing link to external resource about this job.\n */\n this.public = {\n name: null,\n description: null,\n link: null,\n };\n\n /**\n * A number (can be null, undefined, or infinity) describing the estimationSlicesRemaining in the jpd (dcp-2593)\n * @type {number}\n * @access public\n */\n this.estimationSlices = undefined;\n /**\n * When true, allows a job in estimation to have requestTask return multiple estimation slices.\n * This flag applies independent of infinite estimation, viz., this.estimationSlices === null .\n * @type {boolean}\n * @access public\n */\n this.greedyEstimation = false;\n /**\n * tunable parameters per job\n * @access public\n * @param {object} tuning \n * @param {string} tuning.kvin Encode the TypedArray into a string, trying multiple methods to determine optimum \n * size/performance. The this.tune variable affects the behavior of this code this:\n * @param {boolean} speed If true, only do naive encoding: floats get represented as byte-per-digit strings\n * @param {boolean} size If true, try the naive, ab8, and ab16 encodings; pick the smallest\n * If both are false try the naive encoding if under typedArrayPackThreshold and use if smaller\n * than ab8; otherwise, use ab8\n */\n this.tuning = {\n kvin: {\n size: false,\n speed: false,\n },\n }\n /* For API interface to end-users only */\n Object.defineProperty(this, 'id', {\n get: () => this.address,\n set: (id) => { this.address = id }\n });\n \n this.uuid = uuidv4(); /** @see {@link https://kingsds.atlassian.net/browse/DCP-1475?atlOrigin=eyJpIjoiNzg3NmEzOWE0OWI4NGZkNmI5NjU0MWNmZGY2OTYzZDUiLCJwIjoiaiJ9|Jira Issue} */\n this.dependencies = []; /* dependencies of the work function */\n this.requirePath = []; /* require path for dependencies */\n this.connected = false; /* true when exec or resume called */\n this.results = new ResultHandle(this); /* result handle */\n this.collateResults = true; /* option to receive results as they are computed & ensure all are received on finish */\n this.force100pctCPUDensity = false; /* tell scheduler to assume this job uses 100% cpu density */\n this.workerConsole = false; /* tell workers to log more information about failures in the evaluator this job causes */\n this.address = null; /* job address, created by scheduler during exec call. */\n this.paymentAccountKeystore = null; /* keystore for payment for job to come from */\n this.status = { /* job status details */\n runStatus: null,\n total: null,\n distributed: null,\n computed: null\n };\n // Compute groups. Add to public compute group by default\n this.computeGroups = [ Object.assign({}, schedulerConstants.computeGroups.public) ];\n \n // Update the ready state as we go through job deployment\n this.readyState = sliceStatus.new;\n const that = this;\n this.readyStateChange = function job$$readyStateChange (readyState)\n {\n that.readyState = readyState;\n that.emit('readyStateChange', that.readyState);\n }\n }\n \n /**\n * Initialize the various event systems the job handle requires. These include:\n * - an internal event emitter (this.ee)\n * - an event emitter for any events emitted on `work.emit` within work functions (this.work)\n * - an event subscriber to subscribe (to receive) events from the scheduler (this.eventSubscriber)\n */\n initEventSystems ()\n {\n // Handle the various event-related things required in the constructor\n\n // Internal event emitter for events within job handle\n this.ee = new EventEmitter('Job Internal');\n\n /**\n * An EventEmitter for custom events dispatched by the work function.\n * @type {module:dcp/dcp-events.EventEmitter}\n * @access public\n * @example\n * // in work function\n * work.emit('myEventName', 1, [2], \"three\");\n * // client-side\n * job.work.on('myEventName', (num, arr, string) => { });\n */\n this.work = new EventEmitter('job.work');\n this.listenForCustomEvents = false;\n\n // Initialize the eventSubscriber so each job has unique eventSubscriber\n this.eventSubscriber = new ((__webpack_require__(/*! dcp/events/event-subscriber */ \"./src/events/event-subscriber.js\").EventSubscriber))(this);\n\n // Some events from the event subscriber can't be emitted immediately upon receipt without having \n // weird/wrong output due to things like serialization. We allow interceptors in the event subscriber\n // to handle this.\n const that = this\n var lastConsoleEv;\n var sameCounter = 1;\n const parseConsole = function deserializeConsoleMessage(ev) {\n if (tunedKvin)\n ev.message = tunedKvin.unmarshal(ev.message);\n else \n ev.message = kvin.unmarshal(ev.message);\n \n if (lastConsoleEv && ev.message[0] === lastConsoleEv.message[0] && ev.sliceNumber === lastConsoleEv.sliceNumber && ev.level === lastConsoleEv.level)\n ev.same = ++sameCounter;\n else\n sameCounter = 1;\n lastConsoleEv = ev;\n \n /* if we have the same message being logged (same sliceNumber, message, log level), the console event object will have the sole property same, nothing else */\n if (ev.same > 1)\n that.emit('console', { same: ev.same });\n else\n {\n delete ev.same;\n that.emit('console', ev);\n }\n }\n\n this.eventIntercepts = {\n result: (ev) => this.handleResult(ev),\n status: (ev) => this.handleStatus(ev),\n cancel: (ev) => this.ee.emit('stopped', ev),\n custom: (ev) => this.work.emit(ev.customEvent, ev),\n console: parseConsole,\n };\n \n this.eventTypes = (__webpack_require__(/*! dcp/common/dcp-events */ \"./src/common/dcp-events/index.js\").eventTypes);\n\n this.work.on('newListener', (evt) => {\n this.listenForCustomEvents = true;\n });\n this.desiredEvents = []\n this.on('newListener', (evt) => {\n if (!this.connected && evt !== 'newListener')\n this.desiredEvents.push(evt);\n if (evt === 'cancel')\n this.listeningForCancel = true;\n });\n }\n \n /** \n * Cancel the job\n * @access public\n * @param {string} reason If provided, will be sent to client\n */\n async cancel (reason = undefined)\n {\n const response = await this.useDeployConnection('cancelJob', {\n job: this.address,\n owner: this.paymentAccountKeystore.address,\n reason,\n }, this.paymentAccountKeystore);\n\n return response.payload;\n }\n\n /** \n * Resume this job\n * @access public\n */\n async resume ()\n {\n const response = await this.schedulerConnection.send('resumeJob', {\n job: this.address,\n owner: this.paymentAccountKeystore.address,\n }, this.paymentAccountKeystore);\n\n return response.payload;\n }\n\n /**\n * Helper function for retrieving info about the job. The job must have already been deployed.\n * An alias for {@link module:dcp/compute.getJobInfo}.\n * @access public\n */\n getJobInfo ()\n {\n return (__webpack_require__(/*! ../compute */ \"./src/dcp-client/compute.js\").compute.getJobInfo)(this.address);\n }\n \n /**\n * Helper function for retrieving info about the job's slices. The job must have already been deployed.\n * An alias for {@link module:dcp/compute.getSliceInfo}.\n * @access public\n */\n getSliceInfo ()\n {\n return (__webpack_require__(/*! ../compute */ \"./src/dcp-client/compute.js\").compute.getSliceInfo)(this.address);\n }\n \n /** Escrow additional funds for this job\n * @access public\n * @param {number|BigNumber} fundsRequired - A number or BigNumber instance representing the funds to escrow for this job\n */\n async escrow (fundsRequired)\n {\n if ((typeof fundsRequired !== 'number' && !BigNumber.isBigNumber(fundsRequired))\n || fundsRequired <= 0 || !Number.isFinite(fundsRequired) || Number.isNaN(fundsRequired))\n throw new Error(`Job.escrow: fundsRequired must be a number greater than zero. (not ${fundsRequired})`);\n\n const bankConnection = new protocolV4.Connection(dcpConfig.bank.services.bankTeller);\n\n /*\n * escrow has been broken for an unknown amount of time. `feeStructureId` is not defined anywhere in the job class, and hasn't\n * for a period of time. When fixed, `this[INTERNAL_SYMBOL].payloadDetails.feeStructureId` will likely become just `this.feeStructureId`,\n * but it's being left alone until someone spends the time to fix escrow. / rs Jul 2022\n */\n const response = await bankConnection.send('embiggenFeeStructure', {\n feeStructureAddress: this[INTERNAL_SYMBOL].payloadDetails.feeStructureId,\n additionalEscrow: new BigNumber(fundsRequired),\n fromAddress: this.paymentAccountKeystore.address,\n }, this.paymentAccountKeystore);\n\n bankConnection.close();\n const receipt = response.payload;\n return receipt;\n }\n\n /**\n * create bundles for local dependencies\n */\n _pack ()\n {\n var retval = (__webpack_require__(/*! ./node-modules */ \"./src/dcp-client/job/node-modules.js\").createModuleBundle)(this.dependencies);\n return retval;\n }\n\n /** \n * Collect all of the dependencies together, throw them into a BravoJS\n * module which sideloads them as a side effect of declaration, and transmit\n * them to the package manager. Then we return the package descriptor object,\n * which is guaranteed to have only one file in it.\n *\n * @returns {object} with properties name and files[0]\n */\n async _publishLocalModules()\n {\n const dcpPublish = __webpack_require__(/*! dcp/common/dcp-publish */ \"./src/common/dcp-publish.js\");\n \n const { tempFile, hash, unresolved } = await this._pack();\n\n if (!tempFile) {\n return { unresolved };\n }\n\n const sideloaderFilename = tempFile.filename;\n const pkg = {\n name: `dcp-pkg-v1-localhost-${hash.toString('hex')}`,\n version: '1.0.0',\n files: {\n [sideloaderFilename]: `${sideloaderModuleIdentifier}.js`,\n },\n }\n\n await dcpPublish.publish(pkg);\n tempFile.remove();\n\n return { pkg, unresolved };\n }\n \n /**\n * This function specifies a module dependency (when the argument is a string)\n * or a list of dependencies (when the argument is an array) of the work\n * function. This function can be invoked multiple times before deployment.\n * @param {string | string[]} modulePaths - A string or array describing one\n * or more dependencies of the job.\n * @access public\n */\n requires (modulePaths)\n {\n if (typeof modulePaths !== 'string' && (!Array.isArray(modulePaths) || modulePaths.some((modulePath) => typeof modulePath !== 'string')))\n throw new TypeError('The argument to dependencies is not a string or an array of strings');\n else if (modulePaths.length === 0)\n throw new RangeError('The argument to dependencies cannot be an empty string or array');\n else if (Array.isArray(modulePaths) && modulePaths.some((modulePath) => modulePath.length === 0))\n throw new RangeError('The argument to dependencies cannot be an array containing an empty string');\n\n if (!Array.isArray(modulePaths))\n modulePaths = [modulePaths];\n\n for (const modulePath of modulePaths)\n {\n if (modulePath[0] !== '.' && modulePath.indexOf('/') !== -1)\n {\n const modulePrefixRegEx = /^(.*)\\/.*?$/;\n const [, modulePrefix] = modulePath.match(modulePrefixRegEx);\n if (modulePrefix && this.requirePath.indexOf(modulePrefix) === -1)\n this.requirePath.push(modulePrefix);\n }\n this.dependencies.push(modulePath);\n }\n }\n \n /** Set the account upon which funds will be drawn to pay for the job.\n * @param {module:dcp/wallet.AuthKeystore} keystore A keystore that representa a bank account.\n * @access public\n */\n setPaymentAccountKeystore (keystore)\n {\n if (this.address)\n {\n if (!keystore.address.eq(this.paymentAccountKeystore))\n {\n let message = 'Cannot change payment account after job has been deployed';\n this.emit('EPERM', message);\n throw new Error(`EPERM: ${message}`);\n }\n }\n \n if (!(keystore instanceof wallet.Keystore))\n throw new Error('Not an instance of Keystore: ' + keystore.toString());\n this.paymentAccountKeystore = keystore;\n }\n \n /** Set the slice payment offer. This is equivalent to the first argument to exec.\n * @param {number} slicePaymentOffer - The number of DCC the user is willing to pay to compute one slice of this job\n */\n setSlicePaymentOffer (slicePaymentOffer)\n {\n this.slicePaymentOffer = new SlicePaymentOffer(slicePaymentOffer);\n }\n \n \n /**\n * @param {URL|DcpURL} locationUrl - A URL object\n * @param {object} postParams - An object with any parameters that a user would like to be passed to a \n * remote result location. This object is capable of carry API keys for S3, \n * DropBox, etc. These parameters are passed as parameters in an \n * application/x-www-form-urlencoded request.\n */\n setResultStorage (locationUrl, postParams)\n {\n if (locationUrl instanceof URL || locationUrl instanceof DcpURL)\n this.resultStorageDetails = locationUrl;\n else\n throw new Error('Not an instance of a DCP URL: ' + locationUrl);\n \n\n // resultStorageParams contains any post params required for off-prem storage\n if (typeof postParams !== 'undefined' && typeof postParams === 'object' )\n this.resultStorageParams = postParams;\n else\n throw new Error('Not an instance of a object: ' + postParams);\n\n // Some type of object here\n this.resultStorageType = 'pattern';\n }\n \n /**\n * This function is identical to exec, except that the job is executed locally\n * in the client.\n * @async\n * @param {number} cores - the number of local cores in which to execute the job.\n * @param {...any} args - The remaining arguments are identical to the arguments of exec\n * @return {Promise<ResultHandle>} - resolves with the results of the job, rejects on an error\n * @access public\n */\n async localExec (cores = 1, ...args)\n {\n this.inLocalExec = true;\n this.estimationSlices = 0;\n this.greedyEstimation = false;\n this.isCI = false;\n\n let worker;\n this.on('accepted', () => {\n // Start a worker for this job\n worker = new Worker({\n localExec: true,\n jobAddresses: [this.address],\n allowedOrigins: this.localExecAllowedFiles,\n paymentAddress: this.paymentAccountKeystore.address,\n identity: this.identityKeystore,\n maxWorkingSandboxes: cores,\n sandboxOptions: {\n ignoreNoProgress: true,\n SandboxConstructor: (DCP_ENV.platform === 'nodejs'\n && (__webpack_require__(/*! ../worker/evaluators */ \"./src/dcp-client/worker/evaluators/index.js\").nodeEvaluatorFactory)())\n },\n });\n\n worker.start().catch((e) => {\n console.error('Failed to start worker for localExec:');\n console.error(e.message);\n });\n });\n \n if (DCP_ENV.platform === 'nodejs')\n {\n this.localExecAllowedFiles =\n {\n any: [],\n fetchData: [],\n fetchWorkFunctions: [],\n fetchArguments: [],\n sendResults: [],\n };\n \n // Determine type of input data\n const { dataRange, dataValues, dataPattern, sliceCount } = marshalInputData(this.jobInputData);\n \n const inputSetFiles = [];\n \n let inputSetURIs = [];\n let dataSet;\n \n if (dataValues)\n {\n for (let i = 0; i < dataValues.length; i++)\n {\n if (!(dataValues[i] instanceof URL))\n {\n let marshaledInputValue = kvinMarshal(dataValues[i]);\n let inputDataFile = createTempFile('dcp-localExec-sliceData-XXXXXXXXX', 'kvin');\n inputDataFile.writeSync(JSON.stringify(marshaledInputValue));\n inputSetFiles.push(inputDataFile);\n inputSetURIs.push(new URL('file://' + inputDataFile.filename));\n }\n else\n {\n inputSetURIs.push(dataValues[i]);\n this.localExecAllowedFiles['fetchData'].push(dataValues[i].origin);\n }\n }\n dataSet = new RemoteDataSet(inputSetURIs);\n if (dataSet.length > 0)\n this.marshaledDataValues = dataSet;\n }\n if (dataRange)\n {\n inputSetFiles.push(createTempFile('dcp-localExec-sliceData-XXXXXXXXX', 'json'));\n let marshaledInputSet = JSON.stringify(dataRange);\n inputSetFiles[0].writeSync(marshaledInputSet)\n inputSetURIs.push(new URL('file://' + inputSetFiles[0].filename));\n dataSet = new RemoteDataSet(inputSetURIs);\n this.marshaledDataRange = dataSet;\n this.rangeLength = dataRange.length;\n }\n \n if (dataPattern)\n {\n let uri = dataPattern;\n for (let i = 0; i < sliceCount; i++)\n {\n let sliceNum = i+1;\n let newURI = new URL(uri.replace('{slice}', sliceNum.toString()));\n this.localExecAllowedFiles['fetchData'].push(newURI.origin);\n }\n }\n \n // For allowed origins of the localexec worker. Only allow the origins (files in this case) in this list.\n for (let i = 0; i < inputSetFiles.length; i++)\n this.localExecAllowedFiles['fetchData'].push(inputSetFiles[i].filename);\n \n // Save work function to disk if work function starts with data (ie not remote)\n if (this.workFunctionURI.startsWith('data:'))\n {\n const workFunctionFile = createTempFile('dcp-localExec-workFunction-XXXXXXXXX', 'js');\n const workFunction = await fetchURI(this.workFunctionURI);\n workFunctionFile.writeSync(workFunction);\n \n const workFunctionFileURL = new URL('file://' + workFunctionFile);\n this.workFunctionURI = workFunctionFileURL.href;\n this.localExecAllowedFiles['fetchWorkFunctions'].push(workFunctionFile.filename);\n }\n else\n this.localExecAllowedFiles['fetchWorkFunctions'].push(new URL(this.workFunctionURI).origin);\n \n this.marshaledArguments = [];\n if (this.jobArguments)\n {\n if (this.jobArguments instanceof RemoteDataPattern) /* Not supported */\n throw new DCPError('Cannot use RemoteDataPattern as work function arguments', 'EBADARG')\n if (this.jobArguments instanceof RemoteDataSet) /* Entire set is RemoteDataSet */\n {\n this.jobArguments.forEach((e) =>\n {\n this.localExecAllowedFiles['fetchArguments'].push(new URL(e).origin)\n this.marshaledArguments.push(new URL(e));\n });\n }\n else\n {\n for (let i = 0; i < this.jobArguments.length; i++)\n {\n if (this.jobArguments[i] instanceof URL)\n {\n this.localExecAllowedFiles['fetchArguments'].push(this.jobArguments[i].origin);\n this.marshaledArguments.push(this.jobArguments[i]);\n }\n else\n {\n if (this.jobArguments[i] instanceof RemoteDataSet) /* Member of set is RemoteDataSet */\n {\n this.jobArguments[i].forEach((e) =>\n {\n this.localExecAllowedFiles['fetchArguments'].push(new URL(e).origin);\n this.marshaledArguments.push(new URL(e));\n });\n }\n else /* Actual Value */\n {\n const localArgFile = createTempFile(`dcp-localExec-argument-${i}-XXXXXXXXX`, 'kvin');\n localArgFile.writeSync(JSON.stringify(kvinMarshal(this.jobArguments[i])));\n this.marshaledArguments.push(new URL('file://' + localArgFile.filename));\n this.localExecAllowedFiles['fetchArguments'].push(localArgFile.filename);\n }\n }\n } \n }\n }\n this.marshaledArguments = kvinMarshal(this.marshaledArguments);\n }\n \n return this.exec(...args).finally(() => {\n if (worker) {\n setTimeout(() => {\n // stop the worker\n worker.stop(true);\n }, 3000);\n }\n });\n }\n\n /**\n * Deploys the job to the scheduler.\n * @param {number | object} [slicePaymentOffer=compute.marketValue] - Amount\n * in DCC that the user is willing to pay per slice.\n * @param {Keystore} [paymentAccountKeystore=wallet.get] - An instance of the\n * Wallet API Keystore that's used as the payment account when executing the\n * job.\n * @param {object} [initialSliceProfile] - An object describing the cost the\n * user believes the average slice will incur.\n * @access public\n * @emits Job#accepted\n */\n async exec (slicePaymentOffer = (__webpack_require__(/*! ../compute */ \"./src/dcp-client/compute.js\").compute.marketValue), paymentAccountKeystore, initialSliceProfile)\n {\n if (this.connected)\n throw new Error('Exec called twice on the same job handle.');\n \n if (this.estimationSlices === Infinity)\n this.estimationSlices = null;\n else if (this.estimationSlices < 0)\n throw new Error('Incorrect value for estimationSlices; it can be an integer or Infinity!');\n \n if (this.tuning.kvin.speed || this.tuning.kvin.size)\n {\n tunedKvin = new kvin.KVIN();\n tunedKvin.tune = 'size';\n if(this.tuning.kvin.speed)\n tunedKvin.tune = 'speed';\n // If both size and speed are true, kvin will optimize based on speed\n if(this.tuning.kvin.speed && this.tuning.kvin.size)\n console.log('Slices and arguments are being uploaded with speed optimization.');\n }\n \n /* slight optimization to ensure we don't send requirements that will be ignored in the job submitter. Make a copy of the client specified requirements for this so that we dont magically override something they manually set */\n const _DEFAULT_REQUIREMENTS = JSON.parse(JSON.stringify(DEFAULT_REQUIREMENTS));\n removeBadRequirements(this.requirements, _DEFAULT_REQUIREMENTS);\n \n this.readyStateChange('exec');\n if ((typeof slicePaymentOffer === 'number') || (typeof slicePaymentOffer === 'object')\n || ((this.slicePaymentOffer === null || this.slicePaymentOffer === undefined) && typeof slicePaymentOffer === 'function'))\n this.setSlicePaymentOffer(slicePaymentOffer);\n if (typeof initialSliceProfile !== 'undefined')\n this.initialSliceProfile = initialSliceProfile;\n \n if (typeof paymentAccountKeystore !== 'undefined')\n {\n /** XXX @todo deprecate use of ethereum wallet objects */\n if (typeof paymentAccountKeystore === 'object' && paymentAccountKeystore.hasOwnProperty('_privKey'))\n {\n console.warn('* deprecated API * - job.exec invoked with ethereum wallet object as paymentAccountKeystore') /* /wg oct 2019 */\n paymentAccountKeystore = paymentAccountKeystore._privKey\n }\n /** XXX @todo deprecate use of private keys */\n if (wallet.isPrivateKey(paymentAccountKeystore))\n {\n console.warn('* deprecated API * - job.exec invoked with private key as paymentAccountKeystore') /* /wg dec 2019 */\n paymentAccountKeystore = await new wallet.Keystore(paymentAccountKeystore, '');\n }\n\n this.setPaymentAccountKeystore(paymentAccountKeystore)\n }\n \n if (this.paymentAccountKeystore)\n // Throws if they fail to unlock, we allow this since the keystore was set programmatically. \n await this.paymentAccountKeystore.unlock(undefined, parseFloat(dcpConfig.job.maxDeployTime));\n else\n {\n // If not set programmatically, we keep trying to get an unlocked keystore ... forever.\n let locked = true;\n let safety = 0; // no while loop shall go unguarded\n let ks;\n do\n {\n ks = null;\n // custom message for the browser modal to denote the purpose of keystore submission\n let msg = `This application is requesting a keystore file to execute ${this.public.description || this.public.name || 'this job'}. Please upload the corresponding keystore file. If you upload a keystore file which has been encrypted with a passphrase, the application will not be able to use it until it prompts for a passphrase and you enter it.`;\n try\n {\n ks = await wallet.get({ contextId: this.contextId, jobName: this.public.name, msg});\n }\n catch (e)\n {\n if (e.code !== ClientModal.CancelErrorCode) throw e;\n };\n if (ks)\n {\n try\n {\n await ks.unlock(undefined, parseFloat(dcpConfig.job.maxDeployTime));\n locked = false;\n }\n catch (e)\n {\n // prompt user again if user enters password incorrectly, exit modal otherwise\n if (e.code !== wallet.unlockFailErrorCode) throw e;\n }\n }\n if (safety++ > 1000) throw new Error('EINFINITY: job.exec tried wallet.get more than 1000 times.')\n } while (locked);\n this.setPaymentAccountKeystore(ks)\n }\n \n // We either have a valid keystore + password or we have rejected by this point.\n if (!this.slicePaymentOffer)\n throw new Error('A payment profile must be assigned before executing the job');\n else\n this.feeStructure = this.slicePaymentOffer.toFeeStructure(this.jobInputData.length);\n\n if (!this.address)\n {\n try\n {\n this.readyStateChange('init');\n await this.deployJob();\n const listenersPromise = this.addInitialEvents();\n const computeGroupsPromise = this.joinComputeGroups();\n let uploadSlicePromise;\n // if job data is by value then upload data to the scheduler in a staggered fashion\n if (Array.isArray(this.dataValues) && !this.marshaledDataValues)\n {\n this.readyStateChange('uploading');\n uploadSlicePromise = addSlices(this.dataValues, this.address, tunedKvin).then(() => this.close());\n }\n \n // await all promises for operations that can be done after the job is deployed\n await Promise.all([listenersPromise, computeGroupsPromise, uploadSlicePromise]);\n \n this.readyStateChange('deployed');\n this.emit('accepted', { job: this });\n }\n catch (error)\n {\n if (ON_BROWSER)\n await ClientModal.alert(error, { title: 'Failed to deploy job!' });\n throw error;\n }\n }\n else\n {\n // reconnecting to an old job\n await this.addInitialEvents();\n this.readyStateChange('reconnected');\n }\n\n this.connected = true;\n\n return new Promise((resolve, reject) => {\n const onComplete = () => resolve(this.results);\n const onCancel = (event) => {\n /**\n * FIXME(DCP-1150): Remove this since normal cancel event is noisy\n * enough to not need stopped event too.\n */\n if (ON_BROWSER && !this.listeningForCancel)\n ClientModal.alert('More details in console...', { title: 'Job Canceled' });\n this.emit('cancel', event);\n\n let errorMsg = event.reason;\n if (event.error && event.error !== 'undefined')\n errorMsg = errorMsg +`\\n Recent error message: ${event.error.message}`\n \n reject(new DCPError(errorMsg, event.code));\n };\n\n this.ee.once('stopped', async (stopEvent) => {\n // There is a chance the result submitter will emit finished > 1 time. Only handle it once.\n if (this.receivedStop)\n return;\n this.receivedStop = true;\n this.emit('stopped', stopEvent.runStatus);\n switch (stopEvent.runStatus) {\n case jobStatus.finished:\n if (this.collateResults)\n {\n let report = await this.getJobInfo();\n let allSliceNumbers = Array.from(Array(report.totalSlices)).map((e,i)=>i+1);\n let remainSliceNumbers = allSliceNumbers.filter((e) => !this.results.isAvailable(e));\n\n if (remainSliceNumbers.length)\n {\n const promises = remainSliceNumbers.map(sliceNumber => this.results.fetch([sliceNumber], true));\n await Promise.all(promises);\n }\n }\n\n this.emit('complete', this.results);\n onComplete();\n break;\n case jobStatus.cancelled:\n onCancel(stopEvent);\n break;\n default:\n /**\n * Asserting that we should never be able to reach here. The only\n * scheduler events that should trigger the Job's 'stopped' event\n * are jobStatus.cancelled, jobStatus.finished, and sliceStatus.paused.\n */\n reject(new Error(`Unknown event \"${stopEvent.runStatus}\" caused the job to be stopped.`));\n break;\n }\n });\n\n }).finally(() => {\n const handleErr = (e) => {\n console.error('Error while closing job connection:');\n console.error(e);\n }\n\n // Create an async IIFE to not block the promise chain\n (async () => {\n // delay to let last few events to be received\n await new Promise((resolve) => setTimeout(resolve, 1000));\n \n // close all of the connections so that we don't cause node processes to hang.\n this.closeDeployConnection();\n await this.eventSubscriber.close().catch(handleErr);\n await computeGroups.closeServiceConnection().catch((err) => {\n console.error('Warning: could not close compute groups service connection', err);\n });\n })();\n });\n }\n \n /**\n * job.addListeners(): Private function used to set up event listeners to the scheduler\n * before deploying the job.\n */\n async addInitialEvents ()\n {\n this.readyStateChange('listeners');\n\n // This is important: We need to flush the task queue before adding listeners\n // because we queue pending listeners by listening to the newListener event (in the constructor).\n // If we don't flush here, then the newListener events may fire after this function has run,\n // and the events won't be properly set up.\n await new Promise(resolve => setTimeout(resolve, 0));\n\n // @todo: Listen for an estimated cost, probably emit an \"estimated\" event when it comes in?\n // also @todo: Do the estimation task(s) on the scheduler and send an \"estimated\" event\n\n // Always listen to the stop event. It will resolve the work function promise, so is always needed.\n this.on('stop', (ev) => {this.ee.emit('stopped', ev)});\n\n // Connect listeners that were set up before exec\n if (this.desiredEvents.includes('result'))\n this.listeningForResults = true;\n await this.subscribeNewEvents(this.desiredEvents);\n\n // Connect listeners that are set up after exec\n this.on('newListener', (evt) => {\n if (evt === 'newListener' || this.desiredEvents.includes(evt))\n return;\n this.subscribeNewEvents([evt]);\n });\n \n // automatically add a listener for results if collateResults is on\n if (this.collateResults && !this.listeningForResults)\n this.on('result', () => {});\n\n debugging('dcp-client') && console.debug('subscribedEvents', this.desiredEvents);\n\n // If we have listeners for job.work, subscribe to custom events\n if (this.listenForCustomEvents)\n await this.subscribeCustomEvents();\n // Connect work event listeners that are set up after exec\n else\n this.work.on('newListener', () => this.subscribeCustomEvents());\n }\n \n /**\n * Subscribes to either reliable events or optional events. It is assumed that\n * any call to this function will include only new events.\n * @param {string[]} events \n */\n async subscribeNewEvents (events)\n {\n const reliableEvents = [];\n const optionalEvents = [];\n for (let eventName of events)\n {\n eventName = eventName.toLowerCase();\n if (this.eventTypes[eventName] && this.eventTypes[eventName].reliable)\n reliableEvents.push(eventName);\n else if (this.eventTypes[eventName] && !this.eventTypes[eventName].reliable)\n optionalEvents.push(eventName);\n else\n debugging('dcp-client') && console.debug(`Job handler has listener ${eventName} which isn't an event-router event.`);\n }\n if (debugging('dcp-client'))\n {\n console.debug('reliableEvents', reliableEvents);\n console.debug('optionalEvents', optionalEvents);\n }\n await this.eventSubscriber.subscribeManyEvents(reliableEvents, optionalEvents, { filter: { job: this.address } });\n }\n \n /**\n * Establishes listeners for worker events when requested by the client\n */\n async subscribeCustomEvents ()\n {\n if (!this.listeningForCustomEvents)\n await this.eventSubscriber.subscribeManyEvents([], ['custom'], { filter: { job: this.address } });\n this.listeningForCustomEvents = true\n }\n \n async joinComputeGroups ()\n {\n // localExec jobs are not entered in any compute group.\n if (!this.inLocalExec && this.computeGroups)\n {\n this.readyStateChange('compute-groups');\n computeGroups.addRef(); // Just in case we're doing a Promise.all on multiple execs.\n\n // Add this job to its currently-defined compute groups (as well as public group, if included)\n let success;\n \n if (!Array.isArray(this.computeGroups)) \n throw new DCPError('Compute groups must be wrapped in an Array', 'DCPL-1101');\n\n for (let i = 0; i < this.computeGroups.length; i++)\n {\n let value = this.computeGroups[i];\n \n if (typeof value !== 'object')\n throw new DCPError(`This compute group: ${value[i]} must be an object`, 'DCPL-1102');\n \n if (value.joinKey && typeof value.joinKey !== 'string' && !(value.joinKey instanceof String))\n throw new DCPError(`This join key: ${value.joinKey} must be a string or a string literal`, 'DCPL-1103');\n else if (value.joinKeystore && !(value.joinKeystore instanceof wallet.Keystore))\n throw new DCPError(`This join Keystore: ${value.joinKeystore} must be an instance of wallet.Keystore`, 'DCPL-1104');\n else if (!value.joinKey && !value.joinKeystore)\n throw new DCPError('Compute group must contain a joinKey or a joinKeystore', 'DCPL-1105');\n }\n \n try\n {\n const cgPayload = await computeGroups.addJobToGroups(this.address, this.computeGroups);\n success = true; // To support older version of CG service where addJobToGroups had void/undefined return.\n if (cgPayload) success = cgPayload.success;\n debugging('dcp-client') && console.debug('job/index: addJobToGroups cgPayload:', cgPayload ? cgPayload : 'cgPayload is not defined; probably from legacy CG service.');\n }\n catch (e)\n {\n debugging('dcp-client') && console.debug('job/index: addJobToGroups threw exception:', e);\n success = false;\n }\n\n computeGroups.closeServiceConnection().catch((err) => {\n console.error('Warning: could not close compute groups service connection', err)\n });\n\n /* Could not put the job in any compute group, even though the user wanted it to run. Cancel the job. */\n if (!success)\n {\n await this.cancel('compute-groups::Unable to join any compute groups');\n throw new DCPError(`Access Denied::Failed to add job ${this.address} to any of the desired compute groups`, 'DCPL-1100');\n }\n }\n }\n \n /**\n * Takes result events as input, stores the result and fires off\n * events on the job handle as required. (result, duplicate-result)\n *\n * @param {object} ev - the event recieved from protocol.listen('/results/0xThisGenAdr')\n */\n async handleResult (ev)\n {\n if (this.results === null)\n // This should never happen - the onResult event should only be established/called\n // in addListeners which should also initialize the internal results array\n throw new Error('Job.onResult was invoked before initializing internal results');\n \n const { result: _result, time } = ev.result;\n debugging('dcp-client') && console.debug('handleResult', _result);\n let result = await fetchURI(_result);\n\n if (this.results.isAvailable(ev.sliceNumber))\n {\n const changed = JSON.stringify(this.results[ev.sliceNumber]) !== JSON.stringify(result);\n this.emit('duplicate-result', { sliceNumber: ev.sliceNumber, changed });\n }\n\n this.results.newResult(result, ev.sliceNumber);\n }\n \n /**\n * Receives status events from the scheduler, updates the local status object\n * and emits a 'status' event\n *\n * @param {object} ev - the status event received from\n * protocol.listen('/status/0xThisGenAdr')\n * @param {boolean} emitStatus - value indicating whether or not the status\n * event should be emitted\n */\n handleStatus ({ runStatus, total, distributed, computed }, emitStatus = true)\n {\n Object.assign(this.status, {\n runStatus,\n total,\n distributed,\n computed,\n });\n\n if (emitStatus)\n this.emit('status', { ...this.status, job: this.address });\n }\n \n /**\n * Sends a request to the scheduler to deploy the job.\n */\n async deployJob ()\n {\n var moduleDependencies; \n \n /* Send sideloader bundle to the package server */\n if (DCP_ENV.platform === 'nodejs' && this.dependencies.length)\n {\n try\n {\n let { pkg, unresolved } = await this._publishLocalModules();\n\n moduleDependencies = unresolved;\n if (pkg)\n moduleDependencies.push(pkg.name + '/' + sideloaderModuleIdentifier); \n }\n catch(error)\n {\n throw new DCPError(`Error trying to communicate with package manager server: ${error}`);\n }\n }\n else\n moduleDependencies = this.dependencies;\n \n this.readyStateChange('preauth');\n\n const adhocId = this.uuid.slice(this.uuid.length - 6, this.uuid.length);\n const schedId = await dcpConfig.scheduler.identity;\n // The following check is needed for when using dcp-rtlink and loading the config through source, instead of using the dcp-client bundle\n let schedIdAddress = schedId;\n if(schedId.address)\n schedIdAddress = schedId.address;\n this.identityKeystore = await wallet.getId();\n const preauthToken = await bankUtil.preAuthorizePayment(schedIdAddress, this.maxDeployPayment, this.paymentAccountKeystore);\n const { dataRange, dataValues, dataPattern, sliceCount } = marshalInputData(this.jobInputData);\n if(dataValues)\n this.dataValues = dataValues;\n\n this.readyStateChange('deploying');\n\n /* Payload format is documented in scheduler-v4/libexec/job-submit/operations/submit.js */\n const submitPayload = {\n owner: this.identityKeystore.address,\n paymentAccount: this.paymentAccountKeystore.address,\n priority: 0, // @nyi\n\n workFunctionURI: this.workFunctionURI,\n uuid: this.uuid,\n mvMultSlicePayment: Number(this.feeStructure.marketValue) || 0, // @todo: improve feeStructure internals to better reflect v4\n absoluteSlicePayment: Number(this.feeStructure.maxPerRequest) || 0,\n requirePath: this.requirePath,\n dependencies: moduleDependencies,\n requirements: this.requirements, /* capex */\n localExec: this.inLocalExec,\n force100pctCPUDensity: this.force100pctCPUDensity,\n estimationSlices: this.estimationSlices,\n greedyEstimation: this.greedyEstimation,\n workerConsole: this.workerConsole,\n isCI: this.isCI,\n\n description: this.public.description || 'Discreetly making the world smarter',\n name: this.public.name || 'Ad-Hoc Job' + adhocId,\n link: this.public.link || '',\n\n preauthToken, // XXXwg/er @todo: validate this after fleshing out the stub(s)\n\n resultStorageType: this.resultStorageType, // @todo: implement other result types\n resultStorageDetails: this.resultStorageDetails, // Content depends on resultStorageType\n resultStorageParams: this.resultStorageParams, // Post params for off-prem storage\n dataRange,\n dataPattern,\n sliceCount,\n marshaledDataValues: this.marshaledDataValues,\n rangeLength: this.rangeLength\n };\n \n // Check if dataRange or dataPattern input is already marshaled\n if (this.marshaledDataRange)\n submitPayload.dataRange = this.marshaledDataRange;\n \n /* Determine composition of argument set and build payload */\n if (this.jobArguments && !this.marshaledArguments)\n {\n submitPayload.marshaledArguments = [];\n if (this.jobArguments instanceof RemoteDataPattern) /* Not supported */\n throw new DCPError('Cannot use RemoteDataPattern as work function arguments', 'EBADARG')\n if (this.jobArguments instanceof RemoteDataSet) /* Entire set is RemoteDataSet */\n submitPayload.marshaledArguments = kvinMarshal(this.jobArguments.map(e => new URL(e)))\n else\n {\n for (let i = 0; i < this.jobArguments.length; i++)\n {\n if (this.jobArguments[i] instanceof URL)\n submitPayload.marshaledArguments.push(this.jobArguments[i])\n else\n {\n if (this.jobArguments[i] instanceof RemoteDataSet) /* Member of set is RemoteDataSet */\n this.jobArguments[i].forEach((e) => { submitPayload.marshaledArguments.push(new URL(e)) });\n else /* Actual Value */\n submitPayload.marshaledArguments.push(this.jobArguments[i]);\n }\n }\n submitPayload.marshaledArguments = kvinMarshal(submitPayload.marshaledArguments);\n }\n }\n else\n submitPayload.marshaledArguments = this.marshaledArguments;\n\n // XXXpfr Excellent tracing.\n if (debugging('dcp-client'))\n {\n const { dumpObject } = __webpack_require__(/*! dcp/utils */ \"./src/utils/index.js\");\n dumpObject(submitPayload, 'Submit: Job Index: submitPayload', 256);\n console.debug('Before Deploy', this.identityKeystore);\n }\n\n // Deploy the job! If we get an error, try again a few times until threshold of errors is reached, then actually throw it\n let deployed\n let deployAttempts = 0;\n while (deployAttempts++ < (dcpConfig.job.deployAttempts || 10))\n {\n try\n {\n deployed = await this.useDeployConnection('submit', submitPayload, this.identityKeystore);\n break;\n }\n catch (e)\n {\n if (deployAttempts < 10)\n debugging('dcp-client') && console.debug('Error when trying to deploy job, trying again', e);\n else\n throw e;\n }\n }\n\n if (!deployed.success)\n {\n // close all of the connections so that we don't cause node processes to hang.\n const handleErr = (e) => {\n console.error('Error while closing job connection:');\n console.error(e);\n };\n \n this.closeDeployConnection();\n this.eventSubscriber.close().catch(handleErr);\n computeGroups.closeServiceConnection().catch(handleErr);\n \n // Yes, it is possible for deployed or deployed.payload to be undefined.\n if (deployed.payload)\n {\n if (deployed.payload.code === 'ENOTFOUND')\n throw new DCPError(`Failed to submit job to scheduler. Account: ${submitPayload.paymentAccount} was not found or does not have sufficient balance (${deployed.payload.info.deployCost} DCCs needed to deploy this job)`, deployed.payload); \n throw new DCPError('Failed to submit job to scheduler', deployed.payload);\n }\n throw new DCPError('Failed to submit job to scheduler (no payload)', deployed ? deployed : '');\n }\n\n debugging('dcp-client') && console.debug('After Deploy', JSON.stringify(deployed));\n\n this.address = deployed.payload.job;\n this.deployCost = deployed.payload.deployCost;\n\n if (!this.status)\n this.status = {\n runStatus: null,\n total: 0,\n computed: 0,\n distributed: 0,\n };\n \n this.status.runStatus = deployed.payload.status;\n this.status.total = deployed.payload.lastSliceNumber;\n this.running = true;\n }\n \n /** close an open job to indicate we are done adding data so it is okay to finish\n * the job at the appropriate time\n */\n close ()\n {\n return this.useDeployConnection('closeJob', {\n job: this.id,\n });\n }\n \n /** Use the connection to job submit service. Will open a new connection if one does not exist,\n * and close the connection if it is idle for more than 10 seconds (tuneable).\n */\n useDeployConnection(...args)\n {\n if (!this.useDeployConnection.uses)\n this.useDeployConnection.uses = 0;\n this.useDeployConnection.uses++;\n if (!this.deployConnection)\n {\n this.deployConnection = new protocolV4.Connection(dcpConfig.scheduler.services.jobSubmit); \n this.deployConnection.on('close', () => { this.deployConnection = null; });\n }\n if (this.deployConnectionTimeout)\n clearTimeout(this.deployConnectionTimeout);\n \n const deployPromise = this.deployConnection.send(...args);\n \n deployPromise.finally(() => {\n if (this.useDeployConnection.uses === 1)\n {\n this.deployConnectionTimeout = setTimeout(() => {\n this.useDeployConnection.uses === 0 && this.deployConnection && this.deployConnection.close()\n }, (dcpConfig.job.deployCloseTimeout || 10 * 1000));\n if (!ON_BROWSER)\n this.deployConnectionTimeout.unref();\n }\n this.useDeployConnection.uses--;\n }); \n \n return deployPromise;\n }\n \n /**\n * Close the connection to the job submit (if it exists), and clear the close timeout (if needed).\n */\n closeDeployConnection()\n {\n if (this.deployConnection)\n this.deployConnection.close();\n if (this.deployConnectionTimeout)\n clearTimeout(this.deployConnectionTimeout);\n }\n}\n\n/** \n * Encode a value list for transmission to the job-submit daemon. This could be either job arguments\n * or the input set, if the input set was an Array-like object.\n *\n * @param {ArrayLike} valueList the list of values to encode\n * @returns Array of URIString\n */\nfunction encodeJobValueList(valueList, valueKind)\n{\n var list = [];\n \n /*\n * We need to handle several different styles of datasets, and create the output array accordingly.\n *\n * 1. instance of RemoteDataSet => arguments is a list of URI strings; fetch URIs before handing to work fn\n * 2. an Array-like objects => arguments handed directly to work fn - except instances of RemoteDatum\n * All values sent to the scheduler in payload are sent in their database representation (always as some kind of URI)\n */\n \n if (typeof valueList === 'undefined' || (typeof valueList === 'object' && valueList.length === 0))\n return list; /* empty set */\n\n if (typeof valueList !== 'object' || !valueList.hasOwnProperty('length'))\n throw new Error('value list must be an Array-like object');\n \n for (let i = 0; i < valueList.length; i++) /* Set is composed of values from potentially varying sources */\n {\n let value = valueList[i];\n if (value instanceof RemoteDataSet)\n value.forEach((el) => list.push(new URL(el)));\n else if (value instanceof RemoteDataPattern)\n {\n if (valueKind === jobValueKind.jobArguments)\n throw new DCPError('Cannot use RemoteDataPattern as work function arguments', 'EBADARG');\n else\n {\n let uri = valueList['pattern'];\n for (let sliceNum = 1; sliceNum <= valueList['sliceCount']; sliceNum++)\n list.push(new URL(uri.replace('{slice}', sliceNum)))\n }\n }\n else if (value instanceof RemoteValue)\n list.push(value.href);\n else\n list.push(value);\n } \n \n const encodedList = list.map(encodeJobValueUri)\n return encodedList;\n}\n\n/**\n * Depending on the shape of the job's data, resolve it into a RangeObject, a\n * Pattern, or a values array, and return it in the appropriate property.\n *\n * @param {any} data Job's input data\n * @return {MarshaledInputData} An object with one of the following properties set:\n * - dataValues: job input is an array of arbitrary values \n * - dataPattern: job input is a URI pattern \n * - dataRange: job input is a RangeObject (and/or friends)\n */\nfunction marshalInputData (data)\n{\n if (!(data instanceof Object || data instanceof SuperRangeObject))\n throw new TypeError(`Invalid job data type: ${typeof data}`);\n\n /**\n * @type MarshaledInputData\n */\n const marshalledInputData = {};\n\n // TODO(wesgarland): Make this more robust.\n if (data instanceof SuperRangeObject ||\n (data.hasOwnProperty('ranges') && data.ranges instanceof MultiRangeObject) ||\n (data.hasOwnProperty('start') && data.hasOwnProperty('end')))\n marshalledInputData.dataRange = data;\n else if (Array.isArray(data))\n marshalledInputData.dataValues = data;\n else if (data instanceof URL || data instanceof DcpURL)\n marshalledInputData.dataPattern = String(data);\n else if(data instanceof RemoteDataSet)\n marshalledInputData.dataValues = data.map(e => new URL(e));\n else if(data instanceof RemoteDataPattern)\n {\n marshalledInputData.dataPattern = data['pattern'];\n marshalledInputData.sliceCount = data['sliceCount'];\n }\n\n debugging('job') && console.debug('marshalledInputData:', marshalledInputData);\n return marshalledInputData;\n}\n\n/**\n * marshal the value using kvin or instance of the kvin (tunedKvin)\n * tunedKvin is defined if job.tuning.kvin is specified.\n *\n * @param {any} value \n * @return {object} A marshaled object\n * \n */\nfunction kvinMarshal (value) {\n if (tunedKvin)\n return tunedKvin.marshal(value);\n\n return kvin.marshal(value);\n}\n\n\n\nexports.Job = Job;\nexports.SlicePaymentOffer = SlicePaymentOffer;\nexports.ResultHandle = ResultHandle;\n\n\n//# sourceURL=webpack://dcp/./src/dcp-client/job/index.js?");
4202
+ eval("/**\n * @file job/index.js\n * @author Eddie Roosenmaallen, eddie@kingsds.network\n * Matthew Palma, mpalma@kingsds.network\n * Wes Garland, wes@kingsds.network\n * Paul, paul@kingsds.network\n * Ryan Saweczko, ryansaweczko@kingsds.network\n * @date November 2018\n * November 2018\n * February 2022\n * May 2022\n * Jun 2022\n *\n * This module implements the Compute API's Job Handle\n *\n */\n\n\nconst { BigNumber } = __webpack_require__(/*! bignumber.js */ \"./node_modules/bignumber.js/bignumber.js\");\nconst { v4: uuidv4 } = __webpack_require__(/*! uuid */ \"./node_modules/uuid/dist/esm-browser/index.js\");\nconst { EventEmitter, PropagatingEventEmitter } = __webpack_require__(/*! dcp/common/dcp-events */ \"./src/common/dcp-events/index.js\");\nconst { RangeObject, MultiRangeObject, DistributionRange, SuperRangeObject, SparseRangeObject } = __webpack_require__(/*! dcp/dcp-client/range-object */ \"./src/dcp-client/range-object.js\");\nconst { fetchURI, encodeDataURI, createTempFile } = __webpack_require__(/*! dcp/utils */ \"./src/utils/index.js\");\nconst { encodeJobValueUri, RemoteValue } = __webpack_require__(/*! dcp/dcp-client/job-values */ \"./src/dcp-client/job-values.js\");\nconst wallet = __webpack_require__(/*! dcp/dcp-client/wallet */ \"./src/dcp-client/wallet/index.js\");\nconst protocolV4 = __webpack_require__(/*! dcp/protocol-v4 */ \"./src/protocol-v4/index.js\");\nconst { DcpURL } = __webpack_require__(/*! dcp/common/dcp-url */ \"./src/common/dcp-url.js\");\nconst ClientModal = __webpack_require__(/*! dcp/dcp-client/client-modal */ \"./src/dcp-client/client-modal/index.js\");\nconst { Worker } = __webpack_require__(/*! dcp/dcp-client/worker */ \"./src/dcp-client/worker/index.js\");\nconst { RemoteDataSet } = __webpack_require__(/*! dcp/dcp-client/remote-data-set */ \"./src/dcp-client/remote-data-set.js\");\nconst { RemoteDataPattern } = __webpack_require__(/*! dcp/dcp-client/remote-data-pattern */ \"./src/dcp-client/remote-data-pattern.js\");\nconst { ResultHandle } = __webpack_require__(/*! ./result-handle */ \"./src/dcp-client/job/result-handle.js\");\nconst { SlicePaymentOffer } = __webpack_require__(/*! ./slice-payment-offer */ \"./src/dcp-client/job/slice-payment-offer.js\");\nconst { addSlices } = __webpack_require__(/*! ./upload-slices */ \"./src/dcp-client/job/upload-slices.js\");\nconst DCP_ENV = __webpack_require__(/*! dcp/common/dcp-env */ \"./src/common/dcp-env.js\");\nconst computeGroups = __webpack_require__(/*! dcp/dcp-client/compute-groups */ \"./src/dcp-client/compute-groups/index.js\");\nconst schedulerConstants = __webpack_require__(/*! dcp/common/scheduler-constants */ \"./src/common/scheduler-constants.js\");\nconst { DEFAULT_REQUIREMENTS, removeBadRequirements } = __webpack_require__(/*! dcp/common/job-requirements-defaults */ \"./src/common/job-requirements-defaults.js\");\nconst { sliceStatus, jobValueKind } = __webpack_require__(/*! dcp/common/scheduler-constants */ \"./src/common/scheduler-constants.js\");\nconst { jobStatus } = __webpack_require__(/*! dcp/common/scheduler-constants */ \"./src/common/scheduler-constants.js\");\nconst bankUtil = __webpack_require__(/*! dcp/dcp-client/bank-util */ \"./src/dcp-client/bank-util.js\");\nconst { DCPError } = __webpack_require__(/*! dcp/common/dcp-error */ \"./src/common/dcp-error.js\");\nconst debugging = (__webpack_require__(/*! dcp/debugging */ \"./src/debugging.js\").scope)('dcp-client');\nconst kvin = __webpack_require__(/*! kvin */ \"./node_modules/kvin/kvin.js\");\nlet tunedKvin;\n\nconst log = (...args) => {\n if (debugging('job')) {\n console.debug('dcp-client:job', ...args);\n }\n};\n\nconst ON_BROWSER = DCP_ENV.isBrowserPlatform;\nconst sideloaderModuleIdentifier = 'sideloader-v1';\n\n\n/** @typedef {import('dcp/dcp-client/wallet/keystore').Keystore} Keystore */\n/** @typedef {import('dcp/dcp-client/range-object').RangeLike} RangeLike */\n/** @typedef {import('scheduler-v4/libexec/job-submit/operations/submit').MarshaledInputData} MarshaledInputData */\n\n/**\n * Ensure input data is an appropriate format\n * @param {RangeObject | DistributionRange | RemoteDataSet | Array | Iterable}\n * inputData - A URI-shaped string, a [Multi]RangeObject-constructing value, or\n * an array of slice data\n * @return {RangeObject | RangeLike | DistributionRange | RemoteDataSet | Array}\n * The coerced input in an appropriate format ([Multi]RangeObject,\n * DistributionRange, RemoteDataSet, or array)\n */\n const wrangleData = (inputData) => {\n\n if (RangeObject.isRangelike(inputData)) { return inputData }\n if (RangeObject.isRangeObject(inputData)) { return inputData }\n if (DistributionRange.isDistribution(inputData)) { return inputData }\n if (inputData instanceof SparseRangeObject) { return inputData }\n if (inputData instanceof MultiRangeObject) { return inputData }\n if (MultiRangeObject.isProtoMultiRangelike(inputData)) { return new MultiRangeObject(inputData) }\n if (RangeObject.isProtoRangelike(inputData)) { return new RangeObject(inputData) }\n if (DistributionRange.isProtoDistribution(inputData)) { return new DistributionRange(inputData) }\n if (RemoteDataSet.isRemoteDataSet(inputData)) { return inputData }\n if (RemoteDataPattern.isRemoteDataPattern(inputData)) { return inputData }\n\n return Array.isArray(inputData) ? inputData : [inputData];\n};\n\n/**\n * @classdesc The Compute API's Job Handle (see {@link https://docs.dcp.dev/specs/compute-api.html#job-handles|Compute API spec})\n * Job handles are objects which correspond to jobs. \n * They are created by some exports of the compute module, such as {@link module:dcp/compute.do|compute.do} and {@link module:dcp/compute.for|compute.for}.\n * @extends module:dcp/dcp-events.PropagatingEventEmitter\n * @hideconstructor\n * @access public\n */\nclass Job extends PropagatingEventEmitter\n{\n /**\n * Fired when the job is accepted by the scheduler on deploy.\n * \n * @event Job#accepted\n * @access public\n * @type {object}\n *//**\n * Fired when the job is cancelled.\n * \n * @event Job#cancel\n * @access public\n *//**\n * Fired when a result is returned.\n * \n * @event Job#result\n * @access public\n * @type {object}\n * @property {string} jobAddress Address of the job\n * @property {string} task ID of the task (slice) the result came from\n * @property {number} sort The index of the slice\n * @property {object} result\n * @property {string} result.request\n * @property {*} result.result The value returned from the work function\n *//**\n * Fired when the result handle is modified, either when a new `result` event is fired or when the results are populated with `results.fetch()`\n * \n * @event Job#resultsUpdated\n * @access public\n *//**\n * Fired when the job has been completed.\n * \n * @event Job#complete\n * @access public\n * @type {ResultHandle}\n *//**\n * Fired when the job's status changes.\n * \n * @event Job#status\n * @access public\n * @type {object}\n * @property {string} jobAddress Address of the job\n * @property {number} total Total number of slices in the job\n * @property {number} distributed Number of slices that have been distributed\n * @property {number} computed Number of slices that have completed execution (returned a result)\n * @property {string} runStatus Current runStatus of the job\n *//**\n * Fired when a slice throws an error.\n * \n * @event Job#error\n * @access public\n * @type {object}\n * @property {string} jobAddress Address of the job\n * @property {number} sliceIndex Index of the slice that threw the error\n * @property {string} message The error message\n * @property {string} stack The error stacktrace\n * @property {string} name The error type name\n *//**\n * Fired when a slice uses one of the console log functions.\n * \n * @event Job#console\n * @access public\n * @type {object}\n * @property {string} jobAddress Address of the job\n * @property {number} sliceIndex The index of the slice that produced this event\n * @property {string} level The log level, one of `debug`, `info`, `log`, `warn`, or `error`\n * @property {string} message The console log message\n *//**\n * Fired when a slice is stopped for not calling progress. Contains information about how long the slice ran for, and about the last reported progress calls.\n * \n * @event Job#noProgress\n * @access public\n * @type {object}\n * @property {string} jobAddress Address of the job\n * @property {number} sliceIndex The index of the slice that failed due to no progress\n * @property {number} timestamp How long the slice ran before failing\n * @property {object} progressReports\n * @property {object} progressReports.last The last progress report received from the worker\n * @property {number} progressReports.last.timestamp Time since the start of the slice\n * @property {number} progressReports.last.progress Progress value reported\n * @property {*} progressReports.last.value The last value that was passed to the progress function\n * @property {number} progressReports.last.throttledReports Number of calls to progress that were throttled since the last report\n * @property {object} progressReports.lastUpdate The last determinate (update to the progress param) progress report received from the worker\n * @property {number} progressReports.lastUpdate.timestamp\n * @property {number} progressReports.lastUpdate.progress\n * @property {*} progressReports.lastUpdate.value\n * @property {number} progressReports.lastUpdate.throttledReports\n *//**\n @todo: is this in the spec? is there a no progress data? should there be?\n * Identical to `noProgress`, except that it also contains the data that the slice was executed with.\n * \n * @event Job#noProgressData\n * @access public\n * @type {object}\n * @property {*} data The data that the slice was executed with\n *//**\n * Fired when the job is paused due to running out of funds. The job can be resumed by escrowing more funds then resuming the job.\n * @todo: is this a thing, should it be a thing (the payload)\n * Event payload is the estimated funds required to complete the job\n * \n * @event Job#nofunds\n * @access public\n * @type {BigNumber}\n *//**\n * Fired when the job cannot be deployed due to no bank account / not enough balance to deploy the job\n * \n * @event Job#ENOFUNDS\n * @access public\n *//**\n * Fired when the job is cancelled due to the work function not calling the `progress` method frequently enough.\n * \n * @event Job#ENOPROGRESS\n * @access public\n *//**\n * The job was cancelled because scheduler has determined that individual tasks in this job exceed the maximum allowable execution time.\n * \n * @event Job#ESLICETOOSLOW\n * @access public\n *//**\n * Fired when the job is cancelled because too many work functions are terminating with uncaught exceptions.\n * \n * @event Job#ETOOMANYERRORS\n * @access public\n */\n\n /**\n * @form1 new Job('application_worker_address'[, data[, arguments]])\n * @form2a new Job('worker source'[, data[, arguments]])\n * @form2b new Job(worker_function[, data[, arguments]])\n */\n constructor ()\n {\n super('Job');\n if (typeof arguments[0] === 'function')\n arguments[0] = arguments[0].toString();\n\n if (typeof arguments[0] === 'string')\n {\n const { encodeDataURI } = __webpack_require__(/*! dcp/utils */ \"./src/utils/index.js\");\n this.workFunctionURI = encodeDataURI(arguments[0], 'application/javascript');\n } \n else if (DcpURL.isURL(arguments[0]))\n this.workFunctionURI = arguments[0].href;\n\n this.jobInputData = wrangleData(arguments[1] || []);\n this.jobArguments = wrangleData(arguments[2] || []);\n \n log('num wrangledInputData:', this.jobInputData.length);\n log('num wrangledArguments:', this.jobArguments.length);\n\n this.initEventSystems();\n\n /**\n * An object describing the cost the user believes each the average slice will incur, in terms of CPU/GPU and I/O.\n * If defined, this object is used to provide initial scheduling hints and to calculate escrow amounts.\n *\n * @type {object}\n * @access public\n */\n this.initialSliceProfile = undefined;\n \n // The max value that the client is willing to spend to deploy\n // (list on the scheduler, doesn't include compute payment)\n // maxDeployPayment is the max the user is willing to pay to DCP (as a\n // Hold), in addition to the per-slice offer and associated scrape.\n // Currently calculated as `deployCost = costPerKB *\n // (JSON.stringify(job).length / 1024) // 1e-9 per kb`\n // @todo: figure this out / er nov 2018\n this.maxDeployPayment = 1;\n\n /**\n * An object describing the requirements that workers must have to be eligible for this job. The default values are set in the job-submitter, and only the client specified\n * requirements are sent over the wire. See {@link https://docs.dcp.dev/specs/compute-api.html#requirements-objects|Requirements Objects}.\n *\n * @type {object}\n * @access public\n */\n this.requirements = {};\n\n /**\n * A place to store public-facing attributes of the job. Anything stored on this object will be available inside the work \n * function (see {@link module:dcp/compute~sandboxEnv.work}). The properties documented here may be used by workers to display what jobs are currently being \n * worked on.\n * @access public\n * @property {string} name Public-facing name of this job.\n * @property {string} description Public-facing description for this job.\n * @property {string} link Public-facing link to external resource about this job.\n */\n this.public = {\n name: null,\n description: null,\n link: null,\n };\n \n /**\n * A cryptographic receipt indicating deployment of the job on the scheduler\n * @type {object}\n * @access public\n */\n this.receipt = null;\n \n /**\n * a SliceProfile object which contains the average costs for the slices which have been computed to date.\n * Until the first result is returned, this property is undefined.\n * @type {object}\n * @access public\n */\n this.meanSliceProfile = null;\n \n /**\n * A number (can be null, undefined, or infinity) describing the estimationSlicesRemaining in the jpd (dcp-2593)\n * @type {number}\n * @access public\n */\n this.estimationSlices = undefined;\n /**\n * When true, allows a job in estimation to have requestTask return multiple estimation slices.\n * This flag applies independent of infinite estimation, viz., this.estimationSlices === null .\n * @type {boolean}\n * @access public\n */\n this.greedyEstimation = false;\n /**\n * tunable parameters per job\n * @access public\n * @param {object} tuning \n * @param {string} tuning.kvin Encode the TypedArray into a string, trying multiple methods to determine optimum \n * size/performance. The this.tune variable affects the behavior of this code this:\n * @param {boolean} speed If true, only do naive encoding: floats get represented as byte-per-digit strings\n * @param {boolean} size If true, try the naive, ab8, and ab16 encodings; pick the smallest\n * If both are false try the naive encoding if under typedArrayPackThreshold and use if smaller\n * than ab8; otherwise, use ab8\n */\n this.tuning = {\n kvin: {\n size: false,\n speed: false,\n },\n }\n /* For API interface to end-users only */\n Object.defineProperty(this, 'id', {\n get: () => this.address,\n set: (id) => { this.address = id }\n });\n \n this.uuid = uuidv4(); /** @see {@link https://kingsds.atlassian.net/browse/DCP-1475?atlOrigin=eyJpIjoiNzg3NmEzOWE0OWI4NGZkNmI5NjU0MWNmZGY2OTYzZDUiLCJwIjoiaiJ9|Jira Issue} */\n this.dependencies = []; /* dependencies of the work function */\n this.requirePath = []; /* require path for dependencies */\n this.modulePath = []; /* path to module that invoked .exec() for job */\n this.connected = false; /* true when exec or resume called */\n this.results = new ResultHandle(this); /* result handle */\n this.collateResults = true; /* option to receive results as they are computed & ensure all are received on finish */\n this.contextId = null; /* optional string which is used to indicate to caching mechanisms different keystores with same name */ \n this.force100pctCPUDensity = false; /* tell scheduler to assume this job uses 100% cpu density */\n this.workerConsole = false; /* tell workers to log more information about failures in the evaluator this job causes */\n this.address = null; /* job address, created by scheduler during exec call. */\n this.paymentAccountKeystore = null; /* keystore for payment for job to come from */\n this.status = { /* job status details */\n runStatus: null,\n total: null,\n distributed: null,\n computed: null\n };\n \n // service locations\n this.scheduler = dcpConfig.scheduler.services.jobSubmit.location;\n this.bank = dcpConfig.bank.services.bankTeller.location;\n \n // Compute groups. Add to public compute group by default\n this.computeGroups = [ Object.assign({}, schedulerConstants.computeGroups.public) ];\n \n // Update the ready state as we go through job deployment\n this.readyState = sliceStatus.new;\n const that = this;\n this.readyStateChange = function job$$readyStateChange (readyState)\n {\n that.readyState = readyState;\n that.emit('readyStateChange', that.readyState);\n }\n }\n \n /**\n * Initialize the various event systems the job handle requires. These include:\n * - an internal event emitter (this.ee)\n * - an event emitter for any events emitted on `work.emit` within work functions (this.work)\n * - an event subscriber to subscribe (to receive) events from the scheduler (this.eventSubscriber)\n */\n initEventSystems ()\n {\n // Handle the various event-related things required in the constructor\n\n // Internal event emitter for events within job handle\n this.ee = new EventEmitter('Job Internal');\n\n /**\n * An EventEmitter for custom events dispatched by the work function.\n * @type {module:dcp/dcp-events.EventEmitter}\n * @access public\n * @example\n * // in work function\n * work.emit('myEventName', 1, [2], \"three\");\n * // client-side\n * job.work.on('myEventName', (num, arr, string) => { });\n */\n this.work = new EventEmitter('job.work');\n this.listenForCustomEvents = false;\n\n // Initialize the eventSubscriber so each job has unique eventSubscriber\n this.eventSubscriber = new ((__webpack_require__(/*! dcp/events/event-subscriber */ \"./src/events/event-subscriber.js\").EventSubscriber))(this);\n\n // Some events from the event subscriber can't be emitted immediately upon receipt without having \n // weird/wrong output due to things like serialization. We allow interceptors in the event subscriber\n // to handle this.\n const that = this\n var lastConsoleEv;\n var sameCounter = 1;\n const parseConsole = function deserializeConsoleMessage(ev) {\n if (tunedKvin)\n ev.message = tunedKvin.unmarshal(ev.message);\n else \n ev.message = kvin.unmarshal(ev.message);\n \n if (lastConsoleEv && ev.message[0] === lastConsoleEv.message[0] && ev.sliceNumber === lastConsoleEv.sliceNumber && ev.level === lastConsoleEv.level)\n ev.same = ++sameCounter;\n else\n sameCounter = 1;\n lastConsoleEv = ev;\n \n /* if we have the same message being logged (same sliceNumber, message, log level), the console event object will have the sole property same, nothing else */\n if (ev.same > 1)\n that.emit('console', { same: ev.same });\n else\n {\n delete ev.same;\n that.emit('console', ev);\n }\n }\n\n this.eventIntercepts = {\n result: (ev) => this.handleResult(ev),\n status: (ev) => this.handleStatus(ev),\n cancel: (ev) => this.ee.emit('stopped', ev),\n custom: (ev) => this.work.emit(ev.customEvent, ev),\n console: parseConsole,\n };\n \n this.eventTypes = (__webpack_require__(/*! dcp/common/dcp-events */ \"./src/common/dcp-events/index.js\").eventTypes);\n\n this.work.on('newListener', (evt) => {\n this.listenForCustomEvents = true;\n });\n this.desiredEvents = []\n this.on('newListener', (evt) => {\n if (!this.connected && evt !== 'newListener')\n this.desiredEvents.push(evt);\n if (evt === 'cancel')\n this.listeningForCancel = true;\n });\n }\n \n /** \n * Cancel the job\n * @access public\n * @param {string} reason If provided, will be sent to client\n */\n async cancel (reason = undefined)\n {\n const response = await this.useDeployConnection('cancelJob', {\n job: this.address,\n owner: this.paymentAccountKeystore.address,\n reason,\n }, this.paymentAccountKeystore);\n\n return response.payload;\n }\n\n /** \n * Resume this job\n * @access public\n */\n async resume ()\n {\n const response = await this.schedulerConnection.send('resumeJob', {\n job: this.address,\n owner: this.paymentAccountKeystore.address,\n }, this.paymentAccountKeystore);\n\n return response.payload;\n }\n\n /**\n * Helper function for retrieving info about the job. The job must have already been deployed.\n * An alias for {@link module:dcp/compute.getJobInfo}.\n * @access public\n */\n getJobInfo ()\n {\n return (__webpack_require__(/*! ../compute */ \"./src/dcp-client/compute.js\").compute.getJobInfo)(this.address);\n }\n \n /**\n * Helper function for retrieving info about the job's slices. The job must have already been deployed.\n * An alias for {@link module:dcp/compute.getSliceInfo}.\n * @access public\n */\n getSliceInfo ()\n {\n return (__webpack_require__(/*! ../compute */ \"./src/dcp-client/compute.js\").compute.getSliceInfo)(this.address);\n }\n \n /** Escrow additional funds for this job\n * @access public\n * @param {number|BigNumber} fundsRequired - A number or BigNumber instance representing the funds to escrow for this job\n */\n async escrow (fundsRequired)\n {\n if ((typeof fundsRequired !== 'number' && !BigNumber.isBigNumber(fundsRequired))\n || fundsRequired <= 0 || !Number.isFinite(fundsRequired) || Number.isNaN(fundsRequired))\n throw new Error(`Job.escrow: fundsRequired must be a number greater than zero. (not ${fundsRequired})`);\n\n const bankConnection = new protocolV4.Connection(dcpConfig.bank.services.bankTeller);\n\n /*\n * escrow has been broken for an unknown amount of time. `feeStructureId` is not defined anywhere in the job class, and hasn't\n * for a period of time. When fixed, `this[INTERNAL_SYMBOL].payloadDetails.feeStructureId` will likely become just `this.feeStructureId`,\n * but it's being left alone until someone spends the time to fix escrow. / rs Jul 2022\n */\n const response = await bankConnection.send('embiggenFeeStructure', {\n feeStructureAddress: this[INTERNAL_SYMBOL].payloadDetails.feeStructureId,\n additionalEscrow: new BigNumber(fundsRequired),\n fromAddress: this.paymentAccountKeystore.address,\n }, this.paymentAccountKeystore);\n\n bankConnection.close();\n const escrowReceipt = response.payload;\n return escrowReceipt;\n }\n\n /**\n * create bundles for local dependencies\n */\n _pack ()\n {\n var retval = (__webpack_require__(/*! ./node-modules */ \"./src/dcp-client/job/node-modules.js\").createModuleBundle)(this.dependencies);\n return retval;\n }\n\n /** \n * Collect all of the dependencies together, throw them into a BravoJS\n * module which sideloads them as a side effect of declaration, and transmit\n * them to the package manager. Then we return the package descriptor object,\n * which is guaranteed to have only one file in it.\n *\n * @returns {object} with properties name and files[0]\n */\n async _publishLocalModules()\n {\n const dcpPublish = __webpack_require__(/*! dcp/common/dcp-publish */ \"./src/common/dcp-publish.js\");\n \n const { tempFile, hash, unresolved } = await this._pack();\n\n if (!tempFile) {\n return { unresolved };\n }\n\n const sideloaderFilename = tempFile.filename;\n const pkg = {\n name: `dcp-pkg-v1-localhost-${hash.toString('hex')}`,\n version: '1.0.0',\n files: {\n [sideloaderFilename]: `${sideloaderModuleIdentifier}.js`,\n },\n }\n\n await dcpPublish.publish(pkg);\n tempFile.remove();\n\n return { pkg, unresolved };\n }\n \n /**\n * This function specifies a module dependency (when the argument is a string)\n * or a list of dependencies (when the argument is an array) of the work\n * function. This function can be invoked multiple times before deployment.\n * @param {string | string[]} modulePaths - A string or array describing one\n * or more dependencies of the job.\n * @access public\n */\n requires (modulePaths)\n {\n if (typeof modulePaths !== 'string' && (!Array.isArray(modulePaths) || modulePaths.some((modulePath) => typeof modulePath !== 'string')))\n throw new TypeError('The argument to dependencies is not a string or an array of strings');\n else if (modulePaths.length === 0)\n throw new RangeError('The argument to dependencies cannot be an empty string or array');\n else if (Array.isArray(modulePaths) && modulePaths.some((modulePath) => modulePath.length === 0))\n throw new RangeError('The argument to dependencies cannot be an array containing an empty string');\n\n if (!Array.isArray(modulePaths))\n modulePaths = [modulePaths];\n\n for (const modulePath of modulePaths)\n {\n if (modulePath[0] !== '.' && modulePath.indexOf('/') !== -1)\n {\n const modulePrefixRegEx = /^(.*)\\/.*?$/;\n const [, modulePrefix] = modulePath.match(modulePrefixRegEx);\n if (modulePrefix && this.requirePath.indexOf(modulePrefix) === -1)\n this.requirePath.push(modulePrefix);\n }\n this.dependencies.push(modulePath);\n }\n }\n \n /** Set the account upon which funds will be drawn to pay for the job.\n * @param {module:dcp/wallet.AuthKeystore} keystore A keystore that representa a bank account.\n * @access public\n */\n setPaymentAccountKeystore (keystore)\n {\n if (this.address)\n {\n if (!keystore.address.eq(this.paymentAccountKeystore))\n {\n let message = 'Cannot change payment account after job has been deployed';\n this.emit('EPERM', message);\n throw new Error(`EPERM: ${message}`);\n }\n }\n \n if (!(keystore instanceof wallet.Keystore))\n throw new Error('Not an instance of Keystore: ' + keystore.toString());\n this.paymentAccountKeystore = keystore;\n }\n \n /** Set the slice payment offer. This is equivalent to the first argument to exec.\n * @param {number} slicePaymentOffer - The number of DCC the user is willing to pay to compute one slice of this job\n */\n setSlicePaymentOffer (slicePaymentOffer)\n {\n this.slicePaymentOffer = new SlicePaymentOffer(slicePaymentOffer);\n }\n \n \n /**\n * @param {URL|DcpURL} locationUrl - A URL object\n * @param {object} postParams - An object with any parameters that a user would like to be passed to a \n * remote result location. This object is capable of carry API keys for S3, \n * DropBox, etc. These parameters are passed as parameters in an \n * application/x-www-form-urlencoded request.\n */\n setResultStorage (locationUrl, postParams)\n {\n if (locationUrl instanceof URL || locationUrl instanceof DcpURL)\n this.resultStorageDetails = locationUrl;\n else\n throw new Error('Not an instance of a DCP URL: ' + locationUrl);\n \n\n // resultStorageParams contains any post params required for off-prem storage\n if (typeof postParams !== 'undefined' && typeof postParams === 'object' )\n this.resultStorageParams = postParams;\n else\n throw new Error('Not an instance of a object: ' + postParams);\n\n // Some type of object here\n this.resultStorageType = 'pattern';\n }\n \n /**\n * This function is identical to exec, except that the job is executed locally\n * in the client.\n * @async\n * @param {number} cores - the number of local cores in which to execute the job.\n * @param {...any} args - The remaining arguments are identical to the arguments of exec\n * @return {Promise<ResultHandle>} - resolves with the results of the job, rejects on an error\n * @access public\n */\n async localExec (cores = 1, ...args)\n {\n this.inLocalExec = true;\n this.estimationSlices = 0;\n this.greedyEstimation = false;\n this.isCI = false;\n\n let worker;\n this.on('accepted', () => {\n // Start a worker for this job\n worker = new Worker({\n localExec: true,\n jobAddresses: [this.address],\n allowedOrigins: this.localExecAllowedFiles,\n paymentAddress: this.paymentAccountKeystore.address,\n identity: this.identityKeystore,\n maxWorkingSandboxes: cores,\n sandboxOptions: {\n ignoreNoProgress: true,\n SandboxConstructor: (DCP_ENV.platform === 'nodejs'\n && (__webpack_require__(/*! ../worker/evaluators */ \"./src/dcp-client/worker/evaluators/index.js\").nodeEvaluatorFactory)())\n },\n });\n \n worker.on('error', (error) => {\n console.error('Worker Error:', error);\n });\n \n worker.on('warning', (warning) => {\n console.warn('Worker Warning:', warning);\n });\n\n worker.start().catch((e) => {\n console.error('Failed to start worker for localExec:');\n console.error(e.message);\n });\n });\n \n if (DCP_ENV.platform === 'nodejs')\n {\n this.localExecAllowedFiles =\n {\n any: [],\n fetchData: [],\n fetchWorkFunctions: [],\n fetchArguments: [],\n sendResults: [],\n };\n \n // Determine type of input data\n const { dataRange, dataValues, dataPattern, sliceCount } = marshalInputData(this.jobInputData);\n \n const inputSetFiles = [];\n \n let inputSetURIs = [];\n let dataSet;\n \n if (dataValues)\n {\n for (let i = 0; i < dataValues.length; i++)\n {\n if (!(dataValues[i] instanceof URL))\n {\n let marshaledInputValue = kvinMarshal(dataValues[i]);\n let inputDataFile = createTempFile('dcp-localExec-sliceData-XXXXXXXXX', 'kvin');\n inputDataFile.writeSync(JSON.stringify(marshaledInputValue));\n inputSetFiles.push(inputDataFile);\n inputSetURIs.push(new URL('file://' + inputDataFile.filename));\n }\n else\n {\n inputSetURIs.push(dataValues[i]);\n this.localExecAllowedFiles['fetchData'].push(dataValues[i].origin);\n }\n }\n dataSet = new RemoteDataSet(inputSetURIs);\n if (dataSet.length > 0)\n this.marshaledDataValues = dataSet;\n }\n if (dataRange)\n {\n inputSetFiles.push(createTempFile('dcp-localExec-sliceData-XXXXXXXXX', 'json'));\n let marshaledInputSet = JSON.stringify(dataRange);\n inputSetFiles[0].writeSync(marshaledInputSet)\n inputSetURIs.push(new URL('file://' + inputSetFiles[0].filename));\n dataSet = new RemoteDataSet(inputSetURIs);\n this.marshaledDataRange = dataSet;\n this.rangeLength = dataRange.length;\n }\n \n if (dataPattern)\n {\n let uri = dataPattern;\n for (let i = 0; i < sliceCount; i++)\n {\n let sliceNum = i+1;\n let newURI = new URL(uri.replace('{slice}', sliceNum.toString()));\n this.localExecAllowedFiles['fetchData'].push(newURI.origin);\n }\n }\n \n // For allowed origins of the localexec worker. Only allow the origins (files in this case) in this list.\n for (let i = 0; i < inputSetFiles.length; i++)\n this.localExecAllowedFiles['fetchData'].push(inputSetFiles[i].filename);\n \n // Save work function to disk if work function starts with data (ie not remote)\n if (this.workFunctionURI.startsWith('data:'))\n {\n const workFunctionFile = createTempFile('dcp-localExec-workFunction-XXXXXXXXX', 'js');\n const workFunction = await fetchURI(this.workFunctionURI);\n workFunctionFile.writeSync(workFunction);\n \n const workFunctionFileURL = new URL('file://' + workFunctionFile);\n this.workFunctionURI = workFunctionFileURL.href;\n this.localExecAllowedFiles['fetchWorkFunctions'].push(workFunctionFile.filename);\n }\n else\n this.localExecAllowedFiles['fetchWorkFunctions'].push(new URL(this.workFunctionURI).origin);\n \n let encodedJobArgumentUris = [];\n if (this.jobArguments)\n {\n if (this.jobArguments instanceof RemoteDataPattern) /* Not supported */\n throw new DCPError('Cannot use RemoteDataPattern as work function arguments', 'EBADARG')\n if (this.jobArguments instanceof RemoteDataSet) /* Entire set is RemoteDataSet */\n {\n this.jobArguments.forEach((e) =>\n {\n this.localExecAllowedFiles['fetchArguments'].push(new URL(e).origin);\n encodedJobArgumentUris.push(encodeJobValueUri(new URL(e)));\n });\n }\n else\n {\n for (let i = 0; i < this.jobArguments.length; i++)\n {\n if (this.jobArguments[i] instanceof URL)\n {\n this.localExecAllowedFiles['fetchArguments'].push(this.jobArguments[i].origin);\n encodedJobArgumentUris.push(encodeJobValueUri(this.jobArguments[i]));\n }\n else\n {\n if (this.jobArguments[i] instanceof RemoteDataSet) /* Member of set is RemoteDataSet */\n {\n this.jobArguments[i].forEach((e) =>\n {\n this.localExecAllowedFiles['fetchArguments'].push(new URL(e).origin);\n encodedJobArgumentUris.push(encodeJobValueUri(new URL(e)));\n });\n }\n else /* Actual Value */\n {\n const localArgFile = createTempFile(`dcp-localExec-argument-${i}-XXXXXXXXX`, 'kvin');\n localArgFile.writeSync(JSON.stringify(kvinMarshal(this.jobArguments[i])));\n this.localExecAllowedFiles['fetchArguments'].push(localArgFile.filename);\n encodedJobArgumentUris.push(encodeJobValueUri(new URL('file://' + localArgFile.filename)));\n }\n }\n } \n }\n }\n this.marshaledArguments = kvinMarshal(encodedJobArgumentUris);\n }\n \n return this.exec(...args).finally(() => {\n if (worker) {\n setTimeout(() => {\n // stop the worker\n worker.stop(true);\n }, 3000);\n }\n });\n }\n\n /**\n * Deploys the job to the scheduler.\n * @param {number | object} [slicePaymentOffer=compute.marketValue] - Amount\n * in DCC that the user is willing to pay per slice.\n * @param {Keystore} [paymentAccountKeystore=wallet.get] - An instance of the\n * Wallet API Keystore that's used as the payment account when executing the\n * job.\n * @param {object} [initialSliceProfile] - An object describing the cost the\n * user believes the average slice will incur.\n * @access public\n * @emits Job#accepted\n */\n async exec (slicePaymentOffer = (__webpack_require__(/*! ../compute */ \"./src/dcp-client/compute.js\").compute.marketValue), paymentAccountKeystore, initialSliceProfile)\n {\n if (this.connected)\n throw new Error('Exec called twice on the same job handle.');\n \n if (this.estimationSlices === Infinity)\n this.estimationSlices = null;\n else if (this.estimationSlices < 0)\n throw new Error('Incorrect value for estimationSlices; it can be an integer or Infinity!');\n \n if (this.tuning.kvin.speed || this.tuning.kvin.size)\n {\n tunedKvin = new kvin.KVIN();\n tunedKvin.tune = 'size';\n if(this.tuning.kvin.speed)\n tunedKvin.tune = 'speed';\n // If both size and speed are true, kvin will optimize based on speed\n if(this.tuning.kvin.speed && this.tuning.kvin.size)\n console.log('Slices and arguments are being uploaded with speed optimization.');\n }\n \n /* slight optimization to ensure we don't send requirements that will be ignored in the job submitter. Make a copy of the client specified requirements for this so that we dont magically override something they manually set */\n const _DEFAULT_REQUIREMENTS = JSON.parse(JSON.stringify(DEFAULT_REQUIREMENTS));\n removeBadRequirements(this.requirements, _DEFAULT_REQUIREMENTS);\n \n this.readyStateChange('exec');\n if ((typeof slicePaymentOffer === 'number') || (typeof slicePaymentOffer === 'object')\n || ((this.slicePaymentOffer === null || this.slicePaymentOffer === undefined) && typeof slicePaymentOffer === 'function'))\n this.setSlicePaymentOffer(slicePaymentOffer);\n if (typeof initialSliceProfile !== 'undefined')\n this.initialSliceProfile = initialSliceProfile;\n \n if (typeof paymentAccountKeystore !== 'undefined')\n {\n /** XXX @todo deprecate use of ethereum wallet objects */\n if (typeof paymentAccountKeystore === 'object' && paymentAccountKeystore.hasOwnProperty('_privKey'))\n {\n console.warn('* deprecated API * - job.exec invoked with ethereum wallet object as paymentAccountKeystore') /* /wg oct 2019 */\n paymentAccountKeystore = paymentAccountKeystore._privKey\n }\n /** XXX @todo deprecate use of private keys */\n if (wallet.isPrivateKey(paymentAccountKeystore))\n {\n console.warn('* deprecated API * - job.exec invoked with private key as paymentAccountKeystore') /* /wg dec 2019 */\n paymentAccountKeystore = await new wallet.Keystore(paymentAccountKeystore, '');\n }\n\n this.setPaymentAccountKeystore(paymentAccountKeystore)\n }\n \n if (this.paymentAccountKeystore)\n // Throws if they fail to unlock, we allow this since the keystore was set programmatically. \n await this.paymentAccountKeystore.unlock(undefined, parseFloat(dcpConfig.job.maxDeployTime));\n else\n {\n // If not set programmatically, we keep trying to get an unlocked keystore ... forever.\n let locked = true;\n let safety = 0; // no while loop shall go unguarded\n let ks;\n do\n {\n ks = null;\n // custom message for the browser modal to denote the purpose of keystore submission\n let msg = `This application is requesting a keystore file to execute ${this.public.description || this.public.name || 'this job'}. Please upload the corresponding keystore file. If you upload a keystore file which has been encrypted with a passphrase, the application will not be able to use it until it prompts for a passphrase and you enter it.`;\n try\n {\n ks = await wallet.get({ contextId: this.contextId, jobName: this.public.name, msg});\n }\n catch (e)\n {\n if (e.code !== ClientModal.CancelErrorCode) throw e;\n };\n if (ks)\n {\n try\n {\n await ks.unlock(undefined, parseFloat(dcpConfig.job.maxDeployTime));\n locked = false;\n }\n catch (e)\n {\n // prompt user again if user enters password incorrectly, exit modal otherwise\n if (e.code !== wallet.unlockFailErrorCode) throw e;\n }\n }\n if (safety++ > 1000) throw new Error('EINFINITY: job.exec tried wallet.get more than 1000 times.')\n } while (locked);\n this.setPaymentAccountKeystore(ks)\n }\n \n // We either have a valid keystore + password or we have rejected by this point.\n if (!this.slicePaymentOffer)\n throw new Error('A payment profile must be assigned before executing the job');\n else\n this.feeStructure = this.slicePaymentOffer.toFeeStructure(this.jobInputData.length);\n\n if (!this.address)\n {\n try\n {\n this.readyStateChange('init');\n await this.deployJob();\n const listenersPromise = this.addInitialEvents();\n const computeGroupsPromise = this.joinComputeGroups();\n let uploadSlicePromise;\n // if job data is by value then upload data to the scheduler in a staggered fashion\n if (Array.isArray(this.dataValues) && !this.marshaledDataValues)\n {\n this.readyStateChange('uploading');\n uploadSlicePromise = addSlices(this.dataValues, this.address, tunedKvin)\n .then(() => {\n debugging('slice-upload') && console.info(`970: slice data uploaded, closing job...`);\n return this.close();\n });\n }\n \n // await all promises for operations that can be done after the job is deployed\n await Promise.all([listenersPromise, computeGroupsPromise, uploadSlicePromise]);\n \n this.readyStateChange('deployed');\n this.emit('accepted', { job: this });\n }\n catch (error)\n {\n if (ON_BROWSER)\n await ClientModal.alert(error, { title: 'Failed to deploy job!' });\n throw error;\n }\n }\n else\n {\n // reconnecting to an old job\n await this.addInitialEvents();\n this.readyStateChange('reconnected');\n }\n\n this.connected = true;\n\n return new Promise((resolve, reject) => {\n const onComplete = () => resolve(this.results);\n const onCancel = (event) => {\n /**\n * FIXME(DCP-1150): Remove this since normal cancel event is noisy\n * enough to not need stopped event too.\n */\n if (ON_BROWSER && !this.listeningForCancel)\n ClientModal.alert('More details in console...', { title: 'Job Canceled' });\n this.emit('cancel', event);\n\n let errorMsg = event.reason;\n if (event.error && event.error !== 'undefined')\n errorMsg = errorMsg +`\\n Recent error message: ${event.error.message}`\n \n reject(new DCPError(errorMsg, event.code));\n };\n\n this.ee.once('stopped', async (stopEvent) => {\n // There is a chance the result submitter will emit finished > 1 time. Only handle it once.\n if (this.receivedStop)\n return;\n this.receivedStop = true;\n this.emit('stopped', stopEvent.runStatus);\n switch (stopEvent.runStatus) {\n case jobStatus.finished:\n if (this.collateResults)\n {\n let report = await this.getJobInfo();\n let allSliceNumbers = Array.from(Array(report.totalSlices)).map((e,i)=>i+1);\n let remainSliceNumbers = allSliceNumbers.filter((e) => !this.results.isAvailable(e));\n\n if (remainSliceNumbers.length)\n {\n const promises = remainSliceNumbers.map(sliceNumber => this.results.fetch([sliceNumber], true));\n await Promise.all(promises);\n }\n }\n\n this.emit('complete', this.results);\n onComplete();\n break;\n case jobStatus.cancelled:\n onCancel(stopEvent);\n break;\n default:\n /**\n * Asserting that we should never be able to reach here. The only\n * scheduler events that should trigger the Job's 'stopped' event\n * are jobStatus.cancelled, jobStatus.finished, and sliceStatus.paused.\n */\n reject(new Error(`Unknown event \"${stopEvent.runStatus}\" caused the job to be stopped.`));\n break;\n }\n });\n\n }).finally(() => {\n const handleErr = (e) => {\n console.error('Error while closing job connection:');\n console.error(e);\n }\n\n // Create an async IIFE to not block the promise chain\n (async () => {\n // delay to let last few events to be received\n await new Promise((resolve) => setTimeout(resolve, 1000));\n \n // close all of the connections so that we don't cause node processes to hang.\n this.closeDeployConnection();\n await this.eventSubscriber.close().catch(handleErr);\n await computeGroups.closeServiceConnection().catch((err) => {\n console.error('Warning: could not close compute groups service connection', err);\n });\n })();\n });\n }\n \n /**\n * job.addListeners(): Private function used to set up event listeners to the scheduler\n * before deploying the job.\n */\n async addInitialEvents ()\n {\n this.readyStateChange('listeners');\n\n // This is important: We need to flush the task queue before adding listeners\n // because we queue pending listeners by listening to the newListener event (in the constructor).\n // If we don't flush here, then the newListener events may fire after this function has run,\n // and the events won't be properly set up.\n await new Promise(resolve => setTimeout(resolve, 0));\n\n // @todo: Listen for an estimated cost, probably emit an \"estimated\" event when it comes in?\n // also @todo: Do the estimation task(s) on the scheduler and send an \"estimated\" event\n\n // Always listen to the stop event. It will resolve the work function promise, so is always needed.\n this.on('stop', (ev) => {this.ee.emit('stopped', ev)});\n\n // Connect listeners that were set up before exec\n if (this.desiredEvents.includes('result'))\n this.listeningForResults = true;\n await this.subscribeNewEvents(this.desiredEvents);\n\n // Connect listeners that are set up after exec\n this.on('newListener', (evt) => {\n if (evt === 'newListener' || this.desiredEvents.includes(evt))\n return;\n this.subscribeNewEvents([evt]);\n });\n \n // automatically add a listener for results if collateResults is on\n if (this.collateResults && !this.listeningForResults)\n this.on('result', () => {});\n\n debugging('dcp-client') && console.debug('subscribedEvents', this.desiredEvents);\n\n // If we have listeners for job.work, subscribe to custom events\n if (this.listenForCustomEvents)\n await this.subscribeCustomEvents();\n // Connect work event listeners that are set up after exec\n else\n this.work.on('newListener', () => this.subscribeCustomEvents());\n }\n \n /**\n * Subscribes to either reliable events or optional events. It is assumed that\n * any call to this function will include only new events.\n * @param {string[]} events \n */\n async subscribeNewEvents (events)\n {\n const reliableEvents = [];\n const optionalEvents = [];\n for (let eventName of events)\n {\n eventName = eventName.toLowerCase();\n if (this.eventTypes[eventName] && this.eventTypes[eventName].reliable)\n reliableEvents.push(eventName);\n else if (this.eventTypes[eventName] && !this.eventTypes[eventName].reliable)\n optionalEvents.push(eventName);\n else\n debugging('dcp-client') && console.debug(`Job handler has listener ${eventName} which isn't an event-router event.`);\n }\n if (debugging('dcp-client'))\n {\n console.debug('reliableEvents', reliableEvents);\n console.debug('optionalEvents', optionalEvents);\n }\n await this.eventSubscriber.subscribeManyEvents(reliableEvents, optionalEvents, { filter: { job: this.address } });\n }\n \n /**\n * Establishes listeners for worker events when requested by the client\n */\n async subscribeCustomEvents ()\n {\n if (!this.listeningForCustomEvents)\n await this.eventSubscriber.subscribeManyEvents([], ['custom'], { filter: { job: this.address } });\n this.listeningForCustomEvents = true\n }\n \n async joinComputeGroups ()\n {\n // localExec jobs are not entered in any compute group.\n if (!this.inLocalExec && this.computeGroups)\n {\n this.readyStateChange('compute-groups');\n computeGroups.addRef(); // Just in case we're doing a Promise.all on multiple execs.\n\n // Add this job to its currently-defined compute groups (as well as public group, if included)\n let success;\n \n if (!Array.isArray(this.computeGroups)) \n throw new DCPError('Compute groups must be wrapped in an Array', 'DCPL-1101');\n\n for (let i = 0; i < this.computeGroups.length; i++)\n {\n let value = this.computeGroups[i];\n \n if (typeof value !== 'object')\n throw new DCPError(`This compute group: ${value[i]} must be an object`, 'DCPL-1102');\n \n if (value.joinKey && typeof value.joinKey !== 'string' && !(value.joinKey instanceof String))\n throw new DCPError(`This join key: ${value.joinKey} must be a string or a string literal`, 'DCPL-1103');\n else if (value.joinKeystore && !(value.joinKeystore instanceof wallet.Keystore))\n throw new DCPError(`This join Keystore: ${value.joinKeystore} must be an instance of wallet.Keystore`, 'DCPL-1104');\n else if (!value.joinKey && !value.joinKeystore)\n throw new DCPError('Compute group must contain a joinKey or a joinKeystore', 'DCPL-1105');\n }\n \n try\n {\n const cgPayload = await computeGroups.addJobToGroups(this.address, this.computeGroups);\n success = true; // To support older version of CG service where addJobToGroups had void/undefined return.\n if (cgPayload) success = cgPayload.success;\n debugging('dcp-client') && console.debug('job/index: addJobToGroups cgPayload:', cgPayload ? cgPayload : 'cgPayload is not defined; probably from legacy CG service.');\n }\n catch (e)\n {\n debugging('dcp-client') && console.debug('job/index: addJobToGroups threw exception:', e);\n success = false;\n }\n\n computeGroups.closeServiceConnection().catch((err) => {\n console.error('Warning: could not close compute groups service connection', err)\n });\n\n /* Could not put the job in any compute group, even though the user wanted it to run. Cancel the job. */\n if (!success)\n {\n await this.cancel('compute-groups::Unable to join any compute groups');\n throw new DCPError(`Access Denied::Failed to add job ${this.address} to any of the desired compute groups`, 'DCPL-1100');\n }\n }\n }\n \n /**\n * Takes result events as input, stores the result and fires off\n * events on the job handle as required. (result, duplicate-result)\n *\n * @param {object} ev - the event recieved from protocol.listen('/results/0xThisGenAdr')\n */\n async handleResult (ev)\n {\n if (this.results === null)\n // This should never happen - the onResult event should only be established/called\n // in addListeners which should also initialize the internal results array\n throw new Error('Job.onResult was invoked before initializing internal results');\n \n const { result: _result, time } = ev.result;\n debugging('dcp-client') && console.debug('handleResult', _result);\n let result = await fetchURI(_result);\n\n if (this.results.isAvailable(ev.sliceNumber))\n {\n const changed = JSON.stringify(this.results[ev.sliceNumber]) !== JSON.stringify(result);\n this.emit('duplicate-result', { sliceNumber: ev.sliceNumber, changed });\n }\n\n this.results.newResult(result, ev.sliceNumber);\n }\n \n /**\n * Receives status events from the scheduler, updates the local status object\n * and emits a 'status' event\n *\n * @param {object} ev - the status event received from\n * protocol.listen('/status/0xThisGenAdr')\n * @param {boolean} emitStatus - value indicating whether or not the status\n * event should be emitted\n */\n handleStatus ({ runStatus, total, distributed, computed }, emitStatus = true)\n {\n Object.assign(this.status, {\n runStatus,\n total,\n distributed,\n computed,\n });\n\n if (emitStatus)\n this.emit('status', { ...this.status, job: this.address });\n }\n \n /**\n * Sends a request to the scheduler to deploy the job.\n */\n async deployJob ()\n {\n var moduleDependencies; \n \n /* Send sideloader bundle to the package server */\n if (DCP_ENV.platform === 'nodejs' && this.dependencies.length)\n {\n try\n {\n let { pkg, unresolved } = await this._publishLocalModules();\n\n moduleDependencies = unresolved;\n if (pkg)\n moduleDependencies.push(pkg.name + '/' + sideloaderModuleIdentifier); \n }\n catch(error)\n {\n throw new DCPError(`Error trying to communicate with package manager server: ${error}`);\n }\n }\n else\n moduleDependencies = this.dependencies;\n \n this.readyStateChange('preauth');\n\n const adhocId = this.uuid.slice(this.uuid.length - 6, this.uuid.length);\n const schedId = await dcpConfig.scheduler.identity;\n // The following check is needed for when using dcp-rtlink and loading the config through source, instead of using the dcp-client bundle\n let schedIdAddress = schedId;\n if(schedId.address)\n schedIdAddress = schedId.address;\n this.identityKeystore = await wallet.getId();\n const preauthToken = await bankUtil.preAuthorizePayment(schedIdAddress, this.maxDeployPayment, this.paymentAccountKeystore);\n const { dataRange, dataValues, dataPattern, sliceCount } = marshalInputData(this.jobInputData);\n if(dataValues)\n this.dataValues = dataValues;\n\n this.readyStateChange('deploying');\n\n /* Payload format is documented in scheduler-v4/libexec/job-submit/operations/submit.js */\n const submitPayload = {\n owner: this.identityKeystore.address,\n paymentAccount: this.paymentAccountKeystore.address,\n priority: 0, // @nyi\n\n workFunctionURI: this.workFunctionURI,\n uuid: this.uuid,\n mvMultSlicePayment: Number(this.feeStructure.marketValue) || 0, // @todo: improve feeStructure internals to better reflect v4\n absoluteSlicePayment: Number(this.feeStructure.maxPerRequest) || 0,\n requirePath: this.requirePath,\n dependencies: moduleDependencies,\n requirements: this.requirements, /* capex */\n localExec: this.inLocalExec,\n force100pctCPUDensity: this.force100pctCPUDensity,\n estimationSlices: this.estimationSlices,\n greedyEstimation: this.greedyEstimation,\n workerConsole: this.workerConsole,\n isCI: this.isCI,\n\n description: this.public.description || 'Discreetly making the world smarter',\n name: this.public.name || 'Ad-Hoc Job' + adhocId,\n link: this.public.link || '',\n\n preauthToken, // XXXwg/er @todo: validate this after fleshing out the stub(s)\n\n resultStorageType: this.resultStorageType, // @todo: implement other result types\n resultStorageDetails: this.resultStorageDetails, // Content depends on resultStorageType\n resultStorageParams: this.resultStorageParams, // Post params for off-prem storage\n dataRange,\n dataPattern,\n sliceCount,\n marshaledDataValues: this.marshaledDataValues,\n rangeLength: this.rangeLength\n };\n \n // Check if dataRange or dataPattern input is already marshaled\n if (this.marshaledDataRange)\n submitPayload.dataRange = this.marshaledDataRange;\n\n /* Determine composition of argument set and build payload */\n if (this.jobArguments && !this.marshaledArguments)\n submitPayload.marshaledArguments = kvin.marshal(encodeJobValueList(this.jobArguments, 'jobArguments'));\n else\n submitPayload.marshaledArguments = this.marshaledArguments;\n \n // XXXpfr Excellent tracing.\n if (debugging('dcp-client'))\n {\n const { dumpObject } = __webpack_require__(/*! dcp/utils */ \"./src/utils/index.js\");\n dumpObject(submitPayload, 'Submit: Job Index: submitPayload', 256);\n console.debug('Before Deploy', this.identityKeystore);\n }\n\n // Deploy the job! If we get an error, try again a few times until threshold of errors is reached, then actually throw it\n let deployed\n let deployAttempts = 0;\n while (deployAttempts++ < (dcpConfig.job.deployAttempts || 10))\n {\n try\n {\n deployed = await this.useDeployConnection('submit', submitPayload, this.identityKeystore);\n break;\n }\n catch (e)\n {\n if (deployAttempts < 10)\n debugging('dcp-client') && console.debug('Error when trying to deploy job, trying again', e);\n else\n throw e;\n }\n }\n\n if (!deployed.success)\n {\n // close all of the connections so that we don't cause node processes to hang.\n const handleErr = (e) => {\n console.error('Error while closing job connection:');\n console.error(e);\n };\n \n this.closeDeployConnection();\n this.eventSubscriber.close().catch(handleErr);\n computeGroups.closeServiceConnection().catch(handleErr);\n \n // Yes, it is possible for deployed or deployed.payload to be undefined.\n if (deployed.payload)\n {\n if (deployed.payload.code === 'ENOTFOUND')\n throw new DCPError(`Failed to submit job to scheduler. Account: ${submitPayload.paymentAccount} was not found or does not have sufficient balance (${deployed.payload.info.deployCost} DCCs needed to deploy this job)`, deployed.payload); \n throw new DCPError('Failed to submit job to scheduler', deployed.payload);\n }\n throw new DCPError('Failed to submit job to scheduler (no payload)', deployed ? deployed : '');\n }\n\n debugging('dcp-client') && console.debug('After Deploy', JSON.stringify(deployed));\n\n this.address = deployed.payload.job;\n this.deployCost = deployed.payload.deployCost;\n\n if (!this.status)\n this.status = {\n runStatus: null,\n total: 0,\n computed: 0,\n distributed: 0,\n };\n \n this.status.runStatus = deployed.payload.status;\n this.status.total = deployed.payload.lastSliceNumber;\n this.running = true;\n }\n \n /** close an open job to indicate we are done adding data so it is okay to finish\n * the job at the appropriate time\n */\n close ()\n {\n return this.useDeployConnection('closeJob', {\n job: this.id,\n });\n }\n \n /** Use the connection to job submit service. Will open a new connection if one does not exist,\n * and close the connection if it is idle for more than 10 seconds (tuneable).\n */\n useDeployConnection(...args)\n {\n if (!this.useDeployConnection.uses)\n this.useDeployConnection.uses = 0;\n this.useDeployConnection.uses++;\n if (!this.deployConnection)\n {\n debugging('deploy-connection') && console.info(`1453: making a new deployConnection...`)\n this.deployConnection = new protocolV4.Connection(dcpConfig.scheduler.services.jobSubmit); \n this.deployConnection.on('close', () => { this.deployConnection = null; });\n }\n if (this.deployConnectionTimeout)\n clearTimeout(this.deployConnectionTimeout);\n\n debugging('deploy-connection') && console.info(`1460: sending ${args[0]} request...`);\n const deployPromise = this.deployConnection.send(...args);\n \n deployPromise.finally(() => {\n this.useDeployConnection.uses--;\n\n debugging('deploy-connection') && console.info(`1462: deployConnection done ${args[0]} request, connection uses is ${this.useDeployConnection.uses}`)\n\n this.deployConnectionTimeout = setTimeout(() => {\n if (this.useDeployConnection.uses === 0 && this.deployConnection)\n {\n debugging('deploy-connection') && console.info(`1469: done with deployConn, closing...`);\n // if we're done w/ the connection, then remove its cleanup\n // function, close it, and clean up manually to make room for a\n // new conn when needed (ie, don't leave the closing conn where\n // someone could accidentally pick it up)\n this.deployConnection.removeAllListeners('close');\n this.deployConnection.close();\n this.deployConnection = null;\n }\n }, (dcpConfig.job.deployCloseTimeout || 10 * 1000));\n if (!ON_BROWSER)\n this.deployConnectionTimeout.unref();\n }); \n \n return deployPromise;\n }\n \n /**\n * Close the connection to the job submit (if it exists), and clear the close timeout (if needed).\n */\n closeDeployConnection()\n {\n if (this.deployConnection)\n this.deployConnection.close();\n if (this.deployConnectionTimeout)\n clearTimeout(this.deployConnectionTimeout);\n }\n}\n\n/** \n * Encode a value list for transmission to the job-submit daemon. This could be either job arguments\n * or the input set, if the input set was an Array-like object.\n *\n * @param {ArrayLike} valueList the list of values to encode\n * @returns Array of URIString\n */\nfunction encodeJobValueList(valueList, valueKind)\n{\n var list = [];\n \n /*\n * We need to handle several different styles of datasets, and create the output array accordingly.\n *\n * 1. instance of RemoteDataSet => arguments is a list of URI strings; fetch URIs before handing to work fn\n * 2. an Array-like objects => arguments handed directly to work fn - except instances of RemoteDatum\n * All values sent to the scheduler in payload are sent in their database representation (always as some kind of URI)\n */\n \n if (typeof valueList === 'undefined' || (typeof valueList === 'object' && valueList.length === 0))\n return list; /* empty set */\n\n if (typeof valueList !== 'object' || !valueList.hasOwnProperty('length'))\n throw new Error('value list must be an Array-like object');\n \n for (let i = 0; i < valueList.length; i++) /* Set is composed of values from potentially varying sources */\n {\n let value = valueList[i];\n if (value instanceof RemoteDataSet)\n value.forEach((el) => list.push(new URL(el)));\n else if (value instanceof RemoteDataPattern)\n {\n if (valueKind === jobValueKind.jobArguments)\n throw new DCPError('Cannot use RemoteDataPattern as work function arguments', 'EBADARG');\n else\n {\n let uri = valueList['pattern'];\n for (let sliceNum = 1; sliceNum <= valueList['sliceCount']; sliceNum++)\n list.push(new URL(uri.replace('{slice}', sliceNum)))\n }\n }\n else if (value instanceof RemoteValue)\n list.push(value.href);\n else\n list.push(value);\n } \n\n const encodedList = list.map(encodeJobValueUri)\n return encodedList;\n} \n\n/**\n * Depending on the shape of the job's data, resolve it into a RangeObject, a\n * Pattern, or a values array, and return it in the appropriate property.\n *\n * @param {any} data Job's input data\n * @return {MarshaledInputData} An object with one of the following properties set:\n * - dataValues: job input is an array of arbitrary values \n * - dataPattern: job input is a URI pattern \n * - dataRange: job input is a RangeObject (and/or friends)\n */\nfunction marshalInputData (data)\n{\n if (!(data instanceof Object || data instanceof SuperRangeObject))\n throw new TypeError(`Invalid job data type: ${typeof data}`);\n\n /**\n * @type {MarshaledInputData}\n */\n const marshalledInputData = {};\n\n // TODO(wesgarland): Make this more robust.\n if (data instanceof SuperRangeObject ||\n (data.hasOwnProperty('ranges') && data.ranges instanceof MultiRangeObject) ||\n (data.hasOwnProperty('start') && data.hasOwnProperty('end')))\n marshalledInputData.dataRange = data;\n else if (Array.isArray(data))\n marshalledInputData.dataValues = data;\n else if (data instanceof URL || data instanceof DcpURL)\n marshalledInputData.dataPattern = String(data);\n else if(data instanceof RemoteDataSet)\n marshalledInputData.dataValues = data.map(e => new URL(e));\n else if(data instanceof RemoteDataPattern)\n {\n marshalledInputData.dataPattern = data['pattern'];\n marshalledInputData.sliceCount = data['sliceCount'];\n }\n\n debugging('job') && console.debug('marshalledInputData:', marshalledInputData);\n return marshalledInputData;\n}\n\n/**\n * marshal the value using kvin or instance of the kvin (tunedKvin)\n * tunedKvin is defined if job.tuning.kvin is specified.\n *\n * @param {any} value \n * @return {object} A marshaled object\n * \n */\nfunction kvinMarshal (value) {\n if (tunedKvin)\n return tunedKvin.marshal(value);\n\n return kvin.marshal(value);\n}\n\n\n\nexports.Job = Job;\nexports.SlicePaymentOffer = SlicePaymentOffer;\nexports.ResultHandle = ResultHandle;\n\n\n//# sourceURL=webpack://dcp/./src/dcp-client/job/index.js?");
4203
4203
 
4204
4204
  /***/ }),
4205
4205
 
@@ -4240,7 +4240,7 @@ eval("/**\n * @file job/slice-payment-offer.js\n * @author Ryan Ross
4240
4240
  \*********************************************/
4241
4241
  /***/ ((__unused_webpack_module, exports, __webpack_require__) => {
4242
4242
 
4243
- eval("/**\n * @file job/upload-slices.js\n * @author Ryan Saweczko, ryansaweczko@kingsds.network\n * Danny Akbarzadeh, danny@kingsds.network\n * \n * @date Jun 2022\n *\n * Implement functions to upload slices to the scheduler after a job has been deployed.\n * This area will have it's own connection to the job submit service which it is responsible\n * for handling.\n */\n\nconst { Connection } = __webpack_require__(/*! dcp/protocol-v4 */ \"./src/protocol-v4/index.js\");\nconst { DCPError } = __webpack_require__(/*! dcp/common/dcp-error */ \"./src/common/dcp-error.js\");\nconst kvin = __webpack_require__(/*! kvin */ \"./node_modules/kvin/kvin.js\");\n\nconst debugging = (__webpack_require__(/*! dcp/debugging */ \"./src/debugging.js\").scope)('dcp-client');\n\nlet uploadConnection = null;\nlet uploadRefs = 0;\nlet tunedKvin;\n\nfunction createNewConnection()\n{\n uploadConnection = new Connection(dcpConfig.scheduler.services.jobSubmit);\n uploadConnection.on('close', () => { uploadConnection = null; });\n return uploadConnection.connect();\n}\n\n/*\nHelper function: reprepare a message if a connection breaks before error tolerance\n*/\nfunction reprepareMessage(details)\n{\n return uploadConnection.prepare('addSliceData', {\n job: details.job,\n dataValues: kvinMarshal(details.pile),\n });\n}\n\n/**\n * Helper function that tries to upload slicePile to scheduler for the job with the given address\n * If the connection throws, we will continue trying to upload until it has thrown errorTolerance times\n * However, if the upload is unsuccessful, we throw immediately.\n * @param {object} pileMessage \n * @param {object} originalMessageDetails - used if the connection breaks and we need to reprepare the message.\n * @returns payload containing success property (pertaining to success of adding slices to job) as well as lastSliceNumber of job \n */\nasync function safeSliceUpload(pileMessage, originalMessageDetails)\n{\n let payload = undefined;\n let errorTolerance = dcpConfig.job.sliceUploadErrorTolerance; // copy number of times we will tolerate non-success when uploading slices directly from config\n\n while (true) // eslint-disable-line no-constant-condition\n {\n try\n {\n const start = Date.now();\n debugging('slice-upload') && console.log('x-dbg-uploadStart', pileMessage.signedMessage.length);\n // If the connection closes after we prepare the message but before our error tolerance is reached, re-prepare and continue trying to send message.\n if (!uploadConnection)\n {\n debugging('slice-upload') && console.log('x-dbg-openNewConnection');\n await createNewConnection();\n pileMessage = await reprepareMessage(originalMessageDetails); /* Assuming preparing a identical message will result in the same length */\n }\n\n payload = await uploadConnection.sendPreparedMessage(pileMessage);\n if (!payload.success)\n {\n debugging('slice-upload') && console.log('x-dbg-uploadBackoff', {length:pileMessage.signedMessage.length});\n throw payload;\n }\n else\n {\n debugging('slice-upload') && console.log('x-dbg-uploadProgress', Date.now() - start);\n break;\n }\n }\n catch (error)\n {\n if (--errorTolerance <= 0)\n {\n debugging('slice-upload') && console.log('x-dbg-uploadError', error);\n if (uploadConnection)\n uploadConnection.close(); // ensure we clean up the connection\n throw error;\n }\n }\n }\n return payload;\n}\n\n/**\n* This function contains the actual logic behind staggered slice uploads\n* to the scheduler which makes quicker deployment possible.\n* \n* Note that we pass in mostToTake so that the uploadLogic function can update \n* it to the new value it needs to be, and then pass it back to the wrapper \n* function (addSlices) which actually does the work of picking up slices \n* and thus uses this value\n* @param {Array} pile the actual array of slices being uploaded to scheduler\n* @param {Number} mostToTake number of slices that should be taken by the wrapper function (addSlices) \n* which actually does the work of picking up slices and thus uses this value.\n* We pass in mostToTake so that the uploadLogic function can update it to the \n* new value it needs to be, and then pass it back to the wrapper\n* @param {*} jobAddress Address of job to upload the slices to \n* @returns payload containing success property (pertaining to success of adding slices to job) as well as lastSliceNumber of job\n*/\nasync function sliceUploadLogic(pile, mostToTake, jobAddress)\n{\n if (!uploadConnection)\n await createNewConnection();\n\n const slicesTaken = pile.length;\n let pileMessage = await uploadConnection.prepare('addSliceData', {\n job: jobAddress,\n dataValues: kvinMarshal(pile),\n });\n \n let pileSize = pileMessage.signedMessage.length;\n \n let newMostToTake;\n let uploadedSlices;\n\n // if the pile is larger than the ceiling but we only took one slice, there's no smaller pile we can make\n // so we upload it anyway but we don't try taking more next time cause we were over the ceiling (which \n // is a hard limit on upload sizes)\n if ((pileSize > dcpConfig.job.uploadSlicesCeiling) && (slicesTaken === 1))\n {\n uploadedSlices = await safeSliceUpload(pileMessage, { job: jobAddress, pile });\n newMostToTake = 1;\n }\n \n // if the pile is larger than the target but we only took one slice, there's no smaller pile we can make\n // so we upload it anyway and still try taking more\n else if ((pileSize > dcpConfig.job.uploadSlicesTarget) && (slicesTaken === 1))\n {\n uploadedSlices = await safeSliceUpload(pileMessage, { job: jobAddress, pile });\n newMostToTake = mostToTake * dcpConfig.job.uploadIncreaseFactor;\n }\n \n // otherwise, if the pile is smaller than the soft ceiling, send up the pile anyway (since piles are expensive to make) \n // but remember to include incrementFactor times as many slices in the next pile\n else if (pileSize <= dcpConfig.job.uploadSlicesTarget)\n {\n uploadedSlices = await safeSliceUpload(pileMessage, { job: jobAddress, pile });\n newMostToTake = mostToTake * dcpConfig.job.uploadIncreaseFactor;\n }\n \n // if the pile is over the ceiling then we do not upload and begin reassembling our piles from scratch\n else if (pileSize > dcpConfig.job.uploadSlicesCeiling)\n {\n newMostToTake = -1;\n }\n \n // if the pile is over the target (but implicitly under the ceiling), then upload the pile to scheduler but lower mostToTake\n // by a smaller factor than incrementFactor to allow us to begin \"centering\" sizes of piles around the target\n else if (pileSize > dcpConfig.job.uploadSlicesTarget)\n {\n uploadedSlices = await safeSliceUpload(pileMessage, { job: jobAddress, pile });\n newMostToTake = Math.ceil(mostToTake / ((2 / 3) * dcpConfig.job.uploadIncreaseFactor));\n }\n else\n throw new Error('hopefully impossible code in slice upload logic');\n\n let payload = uploadedSlices ? uploadedSlices.payload : undefined;\n return { payload, newMostToTake }; // in case the user needs lastSliceNumber's value\n}\n\n/**\n* Uploads slices to the scheduler in a staggered fashion\n* @param {Array} dataValues actual array of slices being uploaded to scheduler\n* @param {*} jobAddress Address of job these slices are for\n* @param {*} newTunedKvin undefined, or new version of kvin tuned for speed or size specifically. Use if defined.\n* @returns payload containing success property (pertaining to success of adding slices to job) as well as lastSliceNumber of job\n*/\nasync function addSlices(dataValues, jobAddress, newTunedKvin)\n{\n if (newTunedKvin)\n tunedKvin = newTunedKvin;\n\n if (!Array.isArray(dataValues))\n throw new TypeError('Only data-by-value jobs may dynamically add slices');\n\n let mostToTake = dcpConfig.job.uploadInitialNumberOfSlices; // maximum number of slices we could take in per pile\n let payload = undefined; // used in return value\n let slicesTaken = 0; // number of slices in the pile already\n let pile = [];\n uploadRefs++;\n\n for (let slice of dataValues)\n {\n pile.push(slice);\n slicesTaken++;\n if (slicesTaken === mostToTake)\n {\n let total = await sliceUploadLogic(pile, mostToTake, jobAddress);\n payload = total.payload;\n \n if (total.newMostToTake < 0)\n {\n /* if total.newMostToTake == -1 (only non-positive value returned), then the pile was not successfully\n * uploaded because it was over the ceiling and we need to upload the pile *itself* again, recursively\n */\n payload = await addSlices(pile, jobAddress);\n /* and next time, the number of slices we take is the number from this time *divided* by the incrementFactor\n * since we know invariably that number of slices was under the ceiling AND target\n * if you're curious why that's an invariant, this is because mostToTake only ever *increases* by being multiplied by \n * a factor of incrementFactor within sliceUploadLogic, and this only occurs when the pile being uploaded that time\n * was under the target\n */\n mostToTake = mostToTake / dcpConfig.job.uploadIncreaseFactor;\n }\n else\n {\n /* in all other cases (other than the pile size being over the ceiling) the sliceUploadLogic helper \n * determines the number of slices we should pick up next time, so we just use the value it spits out\n */\n mostToTake = total.newMostToTake;\n }\n \n // reset slicesTaken and pile since at this point we know for sure the pile has been uploaded\n pile = [];\n slicesTaken = 0;\n }\n }\n // upload the pile one last time in case we continued off the last slice with a non-empty pile\n if (pile.length !== 0)\n {\n let finalObj = await sliceUploadLogic(pile, mostToTake, jobAddress);\n payload = finalObj.payload;\n mostToTake = finalObj.newMostToTake;\n \n if (mostToTake < 0)\n {\n // if you need documentation on the next two lines, look inside the if (total.newMostToTake < 0) just above\n payload = await addSlices(pile, jobAddress);\n mostToTake = mostToTake / dcpConfig.job.uploadIncreaseFactor;\n }\n }\n\n // and finally assign whatever mostToTake was at the end of this run of the function to be returned \n // as part of the payload in case addSlices was called recursively\n payload.mostToTake = mostToTake;\n\n // cleanup connection when done with this function\n if (uploadConnection && uploadRefs === 1) {\n debugging('slice-upload') && console.debug(`250: closing uploadConnection`, uploadConnection?.id);\n await uploadConnection.close();\n debugging('slice-upload') && console.debug(`252: closed uploadConnection`, uploadConnection);\n }\n uploadRefs--;\n\n /* contains the job's lastSliceNumber (the only externally-meaningful value returned from \n * the uploading of slices to the scheduler) in case the calling function needs it \n */\n return payload;\n}\n\n/**\n * marshal the value using kvin or instance of the kvin (tunedKvin)\n * tunedKvin is defined if job.tuning.kvin is specified.\n *\n * @param {any} value \n * @return {object} A marshaled object\n * \n */\nfunction kvinMarshal (value) {\n if (tunedKvin)\n return tunedKvin.marshal(value);\n\n return kvin.marshal(value);\n}\n\n\nexports.addSlices = addSlices;\n\n\n//# sourceURL=webpack://dcp/./src/dcp-client/job/upload-slices.js?");
4243
+ eval("/**\n * @file job/upload-slices.js\n * @author Ryan Saweczko, ryansaweczko@kingsds.network\n * Danny Akbarzadeh, danny@kingsds.network\n * \n * @date Jun 2022\n *\n * Implement functions to upload slices to the scheduler after a job has been deployed.\n * This area will have it's own connection to the job submit service which it is responsible\n * for handling.\n */\n\nconst { Connection } = __webpack_require__(/*! dcp/protocol-v4 */ \"./src/protocol-v4/index.js\");\nconst { DCPError } = __webpack_require__(/*! dcp/common/dcp-error */ \"./src/common/dcp-error.js\");\nconst kvin = __webpack_require__(/*! kvin */ \"./node_modules/kvin/kvin.js\");\n\nconst debugging = (__webpack_require__(/*! dcp/debugging */ \"./src/debugging.js\").scope)('dcp-client');\n\nlet uploadConnection = null;\nlet uploadRefs = 0;\nlet tunedKvin;\n\nfunction createNewConnection()\n{\n uploadConnection = new Connection(dcpConfig.scheduler.services.jobSubmit);\n uploadConnection.on('close', () => { uploadConnection = null; });\n return uploadConnection.connect();\n}\n\n/*\nHelper function: reprepare a message if a connection breaks before error tolerance\n*/\nfunction reprepareMessage(details)\n{\n return uploadConnection.prepare('addSliceData', {\n job: details.job,\n dataValues: kvinMarshal(details.pile),\n });\n}\n\n/**\n * Helper function that tries to upload slicePile to scheduler for the job with the given address\n * If the connection throws, we will continue trying to upload until it has thrown errorTolerance times\n * However, if the upload is unsuccessful, we throw immediately.\n * @param {object} pileMessage \n * @param {object} originalMessageDetails - used if the connection breaks and we need to reprepare the message.\n * @returns payload containing success property (pertaining to success of adding slices to job) as well as lastSliceNumber of job \n */\nasync function safeSliceUpload(pileMessage, originalMessageDetails)\n{\n let payload = undefined;\n let errorTolerance = dcpConfig.job.sliceUploadErrorTolerance; // copy number of times we will tolerate non-success when uploading slices directly from config\n\n while (true) // eslint-disable-line no-constant-condition\n {\n try\n {\n const start = Date.now();\n debugging('slice-upload') && console.log('x-dbg-uploadStart', pileMessage.signedMessage.length);\n // If the connection closes after we prepare the message but before our error tolerance is reached, re-prepare and continue trying to send message.\n if (!uploadConnection)\n {\n debugging('slice-upload') && console.log('x-dbg-openNewConnection');\n await createNewConnection();\n pileMessage = await reprepareMessage(originalMessageDetails); /* Assuming preparing a identical message will result in the same length */\n }\n\n payload = await uploadConnection.sendPreparedMessage(pileMessage);\n if (!payload.success)\n {\n debugging('slice-upload') && console.log('x-dbg-uploadBackoff', {length:pileMessage.signedMessage.length});\n throw payload;\n }\n else\n {\n debugging('slice-upload') && console.log('x-dbg-uploadProgress', Date.now() - start);\n break;\n }\n }\n catch (error)\n {\n if (--errorTolerance <= 0)\n {\n debugging('slice-upload') && console.log('x-dbg-uploadError', error);\n if (uploadConnection)\n uploadConnection.close(); // ensure we clean up the connection\n throw error;\n }\n }\n }\n return payload;\n}\n\n/**\n* This function contains the actual logic behind staggered slice uploads\n* to the scheduler which makes quicker deployment possible.\n* \n* Note that we pass in mostToTake so that the uploadLogic function can update \n* it to the new value it needs to be, and then pass it back to the wrapper \n* function (addSlices) which actually does the work of picking up slices \n* and thus uses this value\n* @param {Array} pile the actual array of slices being uploaded to scheduler\n* @param {Number} mostToTake number of slices that should be taken by the wrapper function (addSlices) \n* which actually does the work of picking up slices and thus uses this value.\n* We pass in mostToTake so that the uploadLogic function can update it to the \n* new value it needs to be, and then pass it back to the wrapper\n* @param {*} jobAddress Address of job to upload the slices to \n* @returns payload containing success property (pertaining to success of adding slices to job) as well as lastSliceNumber of job\n*/\nasync function sliceUploadLogic(pile, mostToTake, jobAddress)\n{\n if (!uploadConnection)\n await createNewConnection();\n\n const slicesTaken = pile.length;\n let pileMessage = await uploadConnection.prepare('addSliceData', {\n job: jobAddress,\n dataValues: kvinMarshal(pile),\n });\n \n let pileSize = pileMessage.signedMessage.length;\n \n let newMostToTake;\n let uploadedSlices;\n\n // if the pile is larger than the ceiling but we only took one slice, there's no smaller pile we can make\n // so we upload it anyway but we don't try taking more next time cause we were over the ceiling (which \n // is a hard limit on upload sizes)\n if ((pileSize > dcpConfig.job.uploadSlicesCeiling) && (slicesTaken === 1))\n {\n uploadedSlices = await safeSliceUpload(pileMessage, { job: jobAddress, pile });\n newMostToTake = 1;\n }\n \n // if the pile is larger than the target but we only took one slice, there's no smaller pile we can make\n // so we upload it anyway and still try taking more\n else if ((pileSize > dcpConfig.job.uploadSlicesTarget) && (slicesTaken === 1))\n {\n uploadedSlices = await safeSliceUpload(pileMessage, { job: jobAddress, pile });\n newMostToTake = mostToTake * dcpConfig.job.uploadIncreaseFactor;\n }\n \n // otherwise, if the pile is smaller than the soft ceiling, send up the pile anyway (since piles are expensive to make) \n // but remember to include incrementFactor times as many slices in the next pile\n else if (pileSize <= dcpConfig.job.uploadSlicesTarget)\n {\n uploadedSlices = await safeSliceUpload(pileMessage, { job: jobAddress, pile });\n newMostToTake = mostToTake * dcpConfig.job.uploadIncreaseFactor;\n }\n \n // if the pile is over the ceiling then we do not upload and begin reassembling our piles from scratch\n else if (pileSize > dcpConfig.job.uploadSlicesCeiling)\n {\n newMostToTake = -1;\n }\n \n // if the pile is over the target (but implicitly under the ceiling), then upload the pile to scheduler but lower mostToTake\n // by a smaller factor than incrementFactor to allow us to begin \"centering\" sizes of piles around the target\n else if (pileSize > dcpConfig.job.uploadSlicesTarget)\n {\n uploadedSlices = await safeSliceUpload(pileMessage, { job: jobAddress, pile });\n newMostToTake = Math.ceil(mostToTake / ((2 / 3) * dcpConfig.job.uploadIncreaseFactor));\n }\n else\n throw new Error('hopefully impossible code in slice upload logic');\n\n let payload = uploadedSlices ? uploadedSlices.payload : undefined;\n return { payload, newMostToTake }; // in case the user needs lastSliceNumber's value\n}\n\n/**\n* Uploads slices to the scheduler in a staggered fashion\n* @param {Array} dataValues actual array of slices being uploaded to scheduler\n* @param {*} jobAddress Address of job these slices are for\n* @param {*} newTunedKvin undefined, or new version of kvin tuned for speed or size specifically. Use if defined.\n* @returns payload containing success property (pertaining to success of adding slices to job) as well as lastSliceNumber of job\n*/\nasync function addSlices(dataValues, jobAddress, newTunedKvin)\n{\n if (newTunedKvin)\n tunedKvin = newTunedKvin;\n\n if (!Array.isArray(dataValues))\n throw new TypeError('Only data-by-value jobs may dynamically add slices');\n\n let mostToTake = dcpConfig.job.uploadInitialNumberOfSlices; // maximum number of slices we could take in per pile\n let payload = undefined; // used in return value\n let slicesTaken = 0; // number of slices in the pile already\n let pile = [];\n uploadRefs++;\n\n for (let slice of dataValues)\n {\n pile.push(slice);\n slicesTaken++;\n if (slicesTaken === mostToTake)\n {\n let total = await sliceUploadLogic(pile, mostToTake, jobAddress);\n payload = total.payload;\n \n if (total.newMostToTake < 0)\n {\n /* if total.newMostToTake == -1 (only non-positive value returned), then the pile was not successfully\n * uploaded because it was over the ceiling and we need to upload the pile *itself* again, recursively\n */\n payload = await addSlices(pile, jobAddress);\n /* and next time, the number of slices we take is the number from this time *divided* by the incrementFactor\n * since we know invariably that number of slices was under the ceiling AND target\n * if you're curious why that's an invariant, this is because mostToTake only ever *increases* by being multiplied by \n * a factor of incrementFactor within sliceUploadLogic, and this only occurs when the pile being uploaded that time\n * was under the target\n */\n mostToTake = mostToTake / dcpConfig.job.uploadIncreaseFactor;\n }\n else\n {\n /* in all other cases (other than the pile size being over the ceiling) the sliceUploadLogic helper \n * determines the number of slices we should pick up next time, so we just use the value it spits out\n */\n mostToTake = total.newMostToTake;\n }\n \n // reset slicesTaken and pile since at this point we know for sure the pile has been uploaded\n pile = [];\n slicesTaken = 0;\n }\n }\n // upload the pile one last time in case we continued off the last slice with a non-empty pile\n if (pile.length !== 0)\n {\n let finalObj = await sliceUploadLogic(pile, mostToTake, jobAddress);\n payload = finalObj.payload;\n mostToTake = finalObj.newMostToTake;\n \n if (mostToTake < 0)\n {\n // if you need documentation on the next two lines, look inside the if (total.newMostToTake < 0) just above\n payload = await addSlices(pile, jobAddress);\n mostToTake = mostToTake / dcpConfig.job.uploadIncreaseFactor;\n }\n }\n\n // and finally assign whatever mostToTake was at the end of this run of the function to be returned \n // as part of the payload in case addSlices was called recursively\n payload.mostToTake = mostToTake;\n\n // cleanup connection when done with this function\n uploadRefs--;\n if (uploadConnection && uploadRefs === 0) {\n debugging('slice-upload') && console.debug(`250: closing uploadConnection`, uploadConnection?.id);\n uploadConnection.removeAllListeners('close');\n uploadConnection.close();\n uploadConnection = null;\n debugging('slice-upload') && console.debug(`252: closed uploadConnection`, uploadConnection);\n }\n\n /* contains the job's lastSliceNumber (the only externally-meaningful value returned from \n * the uploading of slices to the scheduler) in case the calling function needs it \n */\n return payload;\n}\n\n/**\n * marshal the value using kvin or instance of the kvin (tunedKvin)\n * tunedKvin is defined if job.tuning.kvin is specified.\n *\n * @param {any} value \n * @return {object} A marshaled object\n * \n */\nfunction kvinMarshal (value) {\n if (tunedKvin)\n return tunedKvin.marshal(value);\n\n return kvin.marshal(value);\n}\n\n\nexports.addSlices = addSlices;\n\n\n//# sourceURL=webpack://dcp/./src/dcp-client/job/upload-slices.js?");
4244
4244
 
4245
4245
  /***/ }),
4246
4246
 
@@ -4250,7 +4250,7 @@ eval("/**\n * @file job/upload-slices.js\n * @author Ryan Saweczko,
4250
4250
  \****************************************/
4251
4251
  /***/ ((__unused_webpack_module, exports, __webpack_require__) => {
4252
4252
 
4253
- eval("/**\n * @file range-object.js\n * @author Eddie Roosenmaallen, eddie@kingsds.network\n * Matthew Palma, mpalma@kingsds.network\n * Robert Mirandola, robert@kingsds.network\n * @date October 2018\n * July 2022\n * This module exports classes and methods for working with RangeObjects\n *\n * RangeObject:\n * new RangeObject(start, end[, step[, group]])\n * new RangeObject({ start, end[, step[, group]] })\n * new RangeObject('[object RangeObject {start},{end},{step},{group}]')\n *\n * In the first two forms, step and group are optional, defaulting to 1. In the string form, all paramteres are required\n *\n *\n * Methods:\n * toString() Returns a string descriptor, suitable to pass to a new constructor\n * toObject() Returns a vanilla ES object with the four parameters as properties\n * toArray() Resolve all values from the RangeObject, grouping if group > 1, as an array\n * nthValue(n) Look up a single value/group\n * slice([start[, end]]) Resolve a selection of values from start to end (exclusive).\n * Same semantics as Array.prototype.slice, including negative indexes\n * materialize(now) Resolve all values and store as an array (eg. for random distributions)\n *\n * Properties:\n * length Return the total number of values (or groups, if group > 1) generasted by the RangeObject\n * dist Name of the distributor function\n * values If present, the Distribution has been materialized\n * materializeOnScheduler If present and truthy, the distribution will be materialized on the scheduler before\n * instantiating tasks\n *\n * Examples:\n * bob = new RangeObject(1, 10)\n * bob.toArray() // [1,2,3,4,5,6,7,8,9,10]\n * bob.slice(-2) // [9,10]\n * fred = new RangeObject({ start:1, end: 1000, step: 3})\n * fred.toArray() // [1,4,7 (...) 994, 997, 1000]\n * fred.toObject() // { start: 1, end: 1000, step: 3, group: 1}\n * bill = new RO.RangeObject(0, 999, 3, 3)\n * rangeobject.js?1541083657962:42 Uncaught RangeError: Range must be divisible evenly into groups\n * at new RangeObject (rangeobject.js?1541083657962:42)\n * at <anonymous>:1:8\n * bill = new RO.RangeObject(0, 998, 3, 3)\n * bill.toArray() // [[0, 3, 6] (...), [990, 993, 996]]\n *\n * MultiRangeObject:\n * new MultiRangeObject(rol1, rol2, ... roln)\n * new MultiRangeObject([rol1, rol2, ... roln])\n * Each argument is either a RangeObject, an Array-like object (, or a Distribution object? NYI ~ER). The\n * values generated by the MRO will be a multi-dimentional array, where each element of the array is a vector\n * across all input ranges.\n */\n\n\n// Some utility functions to make validating the parameters easier\nconst assert = (premise, msg) => {\n if (!premise) throw new Error(msg);\n}\n\nconst assertNumeric = (val, msg) => {\n assert(typeof val === 'number', msg);\n assert(!Number.isNaN(val), msg);\n assert(Number.isFinite(val), msg);\n}\n\nconst RANGEOBJ_REGEX = /^\\[object RangeObject (-?\\d+(?:\\.\\d+)?),(-?\\d+(?:\\.\\d+)?),(-?\\d+(?:\\.\\d+)?),(\\d+|undefined)\\]$/;\n\n/** \n * Defines a consistent interface for each of the range object types\n * to inherit from, provides some array methods.\n * @access public\n */\nclass SuperRangeObject {\n constructor() {\n return new Proxy(this, {\n get: (target, name) => {\n if ((typeof name === 'string' || typeof name === 'number') && Number.isInteger(parseFloat(name))) {\n return target.nthValue(parseFloat(name));\n } else {\n return target[name];\n }\n }\n });\n }\n\n [Symbol.iterator]() {\n let index = 0;\n\n return {\n next: () => ({ value: this.nthValue(index++), done: index > this.length })\n };\n }\n\n get length() {\n return 0;\n }\n\n nthValue(n) {\n throw new Error(\"nthValue not overridden\");\n }\n\n toArray() {\n return this.slice()\n }\n\n /**\n * Generate array from range starting at value `start` and ending at value `end` via `nthValue`.\n * @param {number} [start=0] index to start slice\n * @param {number} [end] index to end slice, return rest of array if not provided.\n * @access public\n */\n slice(start, end) {\n if (typeof start === 'undefined') { start = 0 } else if (start < 0) { start += this.length }\n\n if (typeof end === 'undefined') { end = this.length } else if (end < 0) { end += (this.length) }\n\n if (end > this.length) { end = this.length }\n\n const arr = []\n for (let i = start; i < end; i++) { arr.push(this.nthValue(i)) }\n\n return arr\n }\n\n /**\n * Converts range to an Array and then calls `filter(...args)` on it.\n * @param {...any} args Same args as you would pass to Array#filter\n * @access public\n */\n filter(...args) {\n return this.toArray().filter(...args);\n }\n}\n\n/**\n * Range objects are vanilla ES objects used to describe value range sets for use by `compute.for()`.\n * The range must be increasing, i.e. `start` must be less than `end`.\n * Calculations made to derive the set of numbers in a range are carried out with `BigNumber`, \n * eg. arbitrary-precision, support. The numbers `Infinity` and `-Infinity` are not supported, and \n * the API does not differentiate between `+0` and `-0`.\n */\n/**\n * An object which represents a range of values.\n * @param {number|object} startOrObject Beginning of range, or object literal with `start` and `end` properties.\n * @param {number} end End of range\n * @param {number} [step=1] Step size in range\n * @param {number|undefined} [group] Groups in range\n * @access public\n * @extends SuperRangeObject\n * @example <caption>With implicit step size = 1.</caption>\n * let rangeObject = new RangeObject(0.5,3);\n * rangeObject.toArray();\n * // [ 0.5, 1.5, 2.5 ]\n * \n * @example <caption>With explicit step size.</caption>\n * let rangeObject = new RangeObject(1,9,3);\n * rangeObject.toArray();\n * // [ 1, 4, 7 ]\n * \n * @example <caption>With grouping.</caption>\n * let rangeObject = new RangeObject(1,9,3,2);\n * rangeObject.toArray();\n * // [ [ 1, 4 ], [ 7 ] ]\n */\nclass RangeObject extends SuperRangeObject {\n \n constructor (start, end, step, group) {\n super();\n if (typeof start === 'string' && start.match(RANGEOBJ_REGEX)) {\n const parts = start.match(RANGEOBJ_REGEX)\n start = {\n start: parseFloat(parts[1]),\n end: parseFloat(parts[2]),\n step: parseFloat(parts[3]),\n group: (parts[4] === 'undefined'? undefined : parseFloat(parts[4]))\n }\n }\n\n if (typeof start === 'object') {\n this.start = start.start;\n this.end = start.end;\n this.step = start.step;\n this.group = start.group;\n } else {\n this.start = start;\n this.end = end;\n this.step = step;\n this.group = group;\n }\n\n assertNumeric(this.start, `Invalid start parameter \"${this.start}\", must be numeric and finite.`);\n assertNumeric(this.end, `Invalid end parameter \"${this.end}\", must be numeric and finite.`);\n\n // Ensure step moves in the correct direction for start-end (ie, negative if end < start)\n if (typeof this.step === 'undefined') {\n this.step = Math.sign(this.end - this.start);\n } else {\n assertNumeric(this.step, `Invalid step parameter \"${this.step}\", must be numeric and finite.`);\n if ((this.step === 0 && this.start !== this.end) || Math.sign(this.step) !== Math.sign(this.end - this.start)) {\n throw new Error('RangeObject step must approach end from start.');\n }\n }\n\n if (typeof this.group !== 'undefined') {\n // group must remain undefined if not provided because no grouping should occur if not provided.\n // As per spec, even if group is 1 it should group into arrays of 1 element\n assertNumeric(this.group, `Invalid group parameter \"${this.group}\", must be numeric and finite.`);\n assert(Number.isInteger(this.group), `Invalid group parameter \"${this.group}\", must be an integer.`);\n assert(this.group > 0, `Invalid group parameter \"${this.group}\", must be greater than zero.`);\n }\n }\n\n /**\n * @typedef {object} RangeLike\n * @property {number} start\n * @property {number} end\n * @property {number} nthValue\n */\n\n /**\n * @returns {boolean}\n */\n static isRangelike (r) {\n if (r instanceof RangeObject) { return true }\n if (typeof r === 'object' &&\n typeof r.start === 'number' &&\n typeof r.end === 'number' &&\n typeof r.nthValue === 'function') { return true }\n\n return false\n }\n\n /**\n * @returns {boolean}\n */\n static isRangeObject(r) {\n if (r instanceof RangeObject) { return true }\n\n return false\n }\n\n /**\n * Test whether a value can be passed to the RangeObject constructor\n * @param r Value to test\n * @param strict Optional. Truthy to disallow objects which already look Rangelike\n */\n static isProtoRangelike (r, strict = true) {\n if (typeof r === 'object' &&\n typeof r.start === 'number' &&\n typeof r.end === 'number') { return true }\n\n if (typeof r === 'string' &&\n r.match(RANGEOBJ_REGEX)) { return true }\n\n if (!strict && RangeObject.isRangelike(r)) { return true }\n\n return false\n }\n\n /**\n * Create string representation of range: [object RangeObject start,end,step,group]\n * @access public\n */\n toString () {\n return `[object RangeObject ${this.start},${this.end},${this.step},${this.group}]`\n }\n\n /**\n * Create object literal for range with properties: start, end, step, and group.\n * @access public\n */\n toObject () {\n return {\n start: this.start,\n end: this.end,\n step: this.step,\n group: this.group\n }\n }\n\n /**\n * Return nth value in range\n * @param {number} n\n * @access public\n * @example\n * let rangeObject = new RangeObject(1,10,3,2);\n * rangeObject.toArray();\n * // [ [ 1, 4 ], [ 7, 10 ] ]\n * rangeObject.nthValue(1);\n * // [ 7, 10 ]\n */\n nthValue(n) {\n /**\n * `>=` since the value at index 7 in an array that's of length 7 is outside\n * its range\n */\n if (n < 0 || n >= this.length) {\n return undefined;\n }\n\n if (typeof this.group !== 'undefined') {\n const start = (this.group * this.step * n) + this.start\n const arr = []\n\n for (let i = 0; i < this.group && i + this.group * n < this.stepCount; i++) {\n arr.push(start + (i * this.step))\n }\n\n return arr;\n }\n\n return this.start + (n * this.step);\n }\n\n /**\n * Return max value in range\n * @access public\n * @returns {number}\n * @example\n * let rangeObject = new RangeObject(1,10,3,2);\n * rangeObject.toArray();\n * // [ [ 1, 4 ], [ 7, 10 ] ]\n * rangeObject.max;\n * // 10\n */\n get max () {\n if (typeof this.group === 'undefined' && this.step === 1) { return this.end }\n\n let tail\n if (typeof this.group === 'undefined') {\n [ tail ] = this.slice(-1)\n } else {\n [ tail ] = this.slice(-1)[0].slice(-1)\n }\n return tail\n }\n\n /**\n * Boolean indicating whether all groups are filled.\n * Only defined for RangeObjects that group results.\n * @access public\n * @returns {boolean}\n * @example <caption>With remainder</caption>\n * let rangeObject = new RangeObject(1,9,3,2);\n * rangeObject.toArray();\n * // [ [ 1, 4 ], [ 7 ] ]\n * rangeObject.hasRemainder;\n * // true\n * @example <caption>Without remainder</caption>\n * let rangeObject = new RangeObject(1,10,3,2);\n * rangeObject.toArray();\n * // [ [ 1, 4 ], [ 7, 10 ] ]\n * rangeObject.hasRemainder;\n * // true\n */\n get hasRemainder () {\n if (typeof this.group === 'undefined') { return false }\n const groups = this.stepCount / this.group;\n\n return (groups !== Math.floor(groups));\n }\n\n /**\n * Number of elements in range, or number of groups if grouped.\n * @access public\n * @returns {number}\n * @example <caption>Without grouping</caption>\n * let rangeObject = new RangeObject(1,10,3);\n * rangeObject.toArray();\n * // [ 1, 4, 7, 10 ]\n * rangeObject.length;\n * // 4\n * @example <caption>With grouping</caption>\n * let rangeObject = new RangeObject(1,9,3,2);\n * rangeObject.toArray();\n * // [ [ 1, 4 ], [ 7 ] ]\n * rangeObject.length;\n * // 2\n */\n get length () {\n return Math.ceil(this.stepCount / (this.group || 1));\n }\n\n /**\n * Number of steps in range (sort of like number of elements, except grouping is no longer relevant).\n * @access public\n * @returns {number}\n * @example <caption>Without grouping</caption>\n * let rangeObject = new RangeObject(1,10,3);\n * rangeObject.toArray();\n * // [ 1, 4, 7, 10 ]\n * rangeObject.stepCount;\n * // 4\n * @example <caption>With grouping</caption>\n * let rangeObject = new RangeObject(1,9,3,2);\n * rangeObject.toArray();\n * // [ [ 1, 4 ], [ 7 ] ]\n * rangeObject.length;\n * // 3\n */\n get stepCount () {\n if (this.step === 0) return 1;\n return Math.floor(Math.abs((this.end - this.start) / this.step)) + 1;\n }\n}\n\n/**\n * A sparse range object contains many {@link RangeObject}s. The range objects are stored as arrays and then are\n * concatenated into one array in the order that they were supplied to the constructor.\n * @param {RangeObject|RangeObject[]|object} arg - First range object, or array of range objects, or object with `sparse` key containing an array of range objects.\n * @param {RangeObject} rangeObject - If first argument is a RangeObject, subsquent arguments are range objects too.\n * @access public\n * @extends SuperRangeObject\n * @example\n * r0 = new RangeObject(1,2)\n * r1 = new RangeObject(1,3)\n * sro = new SparseRangeObject(r0, r1)\n * // [ 1, 2, 1, 2, 3]\n */\nclass SparseRangeObject extends SuperRangeObject {\n constructor() {\n super();\n let sparse = [];\n\n if (arguments.length === 1 && arguments[0].ranges) \n sparse = [...arguments[0].ranges];\n else \n sparse = [...arguments];\n \n if (sparse instanceof SparseRangeObject)\n throw new Error('Argument is of type sparse range object');\n \n // If sparse key is passed, make sure the arguments are only the range objects (compute.for() implementation) \n if (sparse[0].sparse)\n sparse = sparse[0].sparse;\n \n sparse.map( r =>\n {\n if (!RangeObject.isProtoRangelike(r))\n throw new Error('Argument is not of type RangeObject');\n })\n \n if (sparse[0].group)\n {\n for (let i = 0; i < sparse.length; i++)\n {\n if (sparse[i].group !== sparse[0].group)\n throw new Error('Range Object has different dimension than other range objects');\n }\n } \n \n this.sparse = true;\n this.ranges = sparse.map(r => \n { \n return new RangeObject(r);\n })\n \n }\n \n /**\n * Test whether a value can be passed to the SparseRangeObject constructor\n * @param r Value to test\n * @param strict Optional. Truthy to disallow objects which already look Rangelike\n */\n static isProtoSparseRangelike (r, strict = true) \n {\n if (typeof r === 'object' && r.sparse) { return true; }\n return false;\n }\n \n /**\n * Return nth value in range\n * @param {number} n\n * @access public\n * @example\n * let sparseRangeObject = new SparseRangeObject(1,3,1);\n * rangeObject.toArray();\n * // [ 1, 2, 3]\n * rangeObject.nthValue(1);\n * // 2\n */\n nthValue(n) \n {\n if (n < 0 || n >= this.length) { return undefined }\n \n let count = 0;\n let rangeCount = 0;\n while (count !== n)\n {\n if (count <= n)\n {\n for (let i = 0; i < this.ranges[rangeCount].length; i++)\n {\n if (count === n)\n return this.ranges[rangeCount][i];\n else\n count++;\n }\n }\n rangeCount++;\n }\n return this.ranges[rangeCount][0];\n }\n \n /**\n * Create object literal with `sparse` property and `range` property containing array of range objects.\n */\n toObject () {\n \n const obj = {\n sparse: true,\n ranges: this.ranges\n }\n return obj;\n }\n \n get length()\n {\n let len = 0;\n this.ranges.forEach((r) => len += r.length);\n\n return len;\n }\n \n}\n\n/**\n * Range objects are vanilla ES objects used to describe value range sets for use by `compute.for()`. \n * Calculations made to derive the set of numbers in a range are carried out with `BigNumber`, \n * eg. arbitrary-precision, support. The numbers `Infinity` and `-Infinity` are not supported, and \n * the API does not differentiate between `+0` and `-0`.\n */\nclass MultiRangeObject extends SuperRangeObject {\n /**\n * A multi-range object contains many {@link RangeObject}s. They are iterated over \n * with the fastest moving index going over the right-most range object in array order. Each element\n * of a multi range is a tuple of values from constituent ranges.\n * @param {RangeObject|RangeObject[]|object} arg - First range object, or array of range objects, or object with `ranges` key containing an array of range objects.\n * @param {RangeObject} rangeObject - If first argument is a RangeObject, subsquent arguments are range objects too.\n * @access public\n * @extends SuperRangeObject\n * @example\n * r0 = new RangeObject(1,2)\n * r1 = new RangeObject(1,3)\n * mro = new MultiRangeObject(r0, r1)\n * mro.toArray()\n * // [ [ 1, 1 ], [ 1, 2 ], [ 1, 3 ], [ 2, 1 ], [ 2, 2 ], [ 2, 3 ] ]\n */\n constructor () {\n super();\n var ranges = []\n\n if (arguments.length === 1 && typeof arguments[0] === 'string') {\n const inputs = JSON.parse(arguments[0])\n if (Array.isArray(inputs)) {\n ranges = inputs\n } else if (inputs.ranges) {\n ranges = inputs.ranges\n } else {\n ranges = [inputs]\n }\n } else if (arguments.length === 1 && Array.isArray(arguments[0])) {\n ranges = [...arguments[0]]\n } else if (arguments.length === 1 && !!arguments[0].ranges) {\n ranges = [...arguments[0].ranges]\n } else {\n ranges = [...arguments]\n }\n\n this.ranges = ranges.map(r => {\n if (RangeObject.isRangelike(r)) { return r }\n if (RangeObject.isRangeObject(r)) { return r }\n if (DistributionRange.isDistribution(r)) { return r }\n if (RangeObject.isProtoRangelike(r)) { return new RangeObject(r) }\n if (DistributionRange.isProtoDistribution(r)) { return new DistributionRange(r) }\n\n return Array.isArray(r) ? r : [r]\n })\n }\n\n /**\n * Test whether a value can be passed to the MultiRangeObject constructor\n * @param r Value to test\n * @param strict Optional. Truthy to disallow objects which already look Rangelike\n */\n static isProtoMultiRangelike (r, strict = true) {\n if ((typeof r === 'object') &&\n Array.isArray(r.ranges) &&\n !r.sparse)\n {\n return true;\n }\n\n return false\n }\n\n /**\n * Create string representation of this MultiRangeObject\n * @access public\n * @example\n * \"[object MultiRangeObject ' + this.ranges.length + ']\"\n */\n toString () {\n return '[object MultiRangeObject ' + this.ranges.length + ']'\n }\n\n /**\n * Create object literal with `ranges` property containing array of range objects.\n */\n toObject () {\n return { ranges: this.ranges }\n }\n\n /**\n * Returns a tuple of values from the ranges given by this multi range object.\n * @param {number} n index of multi-range tuple to return\n * @access public\n * @example\n * r0 = new RangeObject(1,2)\n * r1 = new RangeObject(1,3)\n * mro = new MultiRangeObject(r0, r1)\n * mro.toArray()\n * // [ [ 1, 1 ], [ 1, 2 ], [ 1, 3 ], [ 2, 1 ], [ 2, 2 ], [ 2, 3 ] ]\n * mro.nthValue(2)\n * // [ 1, 3 ]\n */\n nthValue (n) {\n if (n < 0 || n >= this.length) { return undefined }\n\n const indexes = []\n\n for (let r = (this.ranges.length - 1); r >= 0; r--) {\n const idx = n % this.ranges[r].length\n\n indexes.unshift(idx)\n\n n -= idx\n n /= this.ranges[r].length\n }\n\n const values = []\n\n for (let i = 0; i < indexes.length; i++) {\n values[i] = Array.isArray(this.ranges[i]) ? this.ranges[i][indexes[i]] : this.ranges[i].nthValue(indexes[i])\n }\n\n return values\n }\n\n /**\n * Boolean indicating whether any of the ranges in this multi-range object has a remainder. See {@link RangeObject#hasRemainder}.\n * @access public\n * @returns {boolean}\n */\n get hasRemainder () {\n for (let r of this.ranges) {\n if (r.hasRemainder) { return true }\n }\n\n return false\n }\n\n get length () {\n let len = 1\n\n this.ranges.forEach((r) => len *= r.length)\n\n return len\n }\n}\n\n// DistributionRange object wraps a distributing function into a RangeObject-like API\n// which can be dropped directly into a MultiRangeObject to generate input slices\nclass DistributionRange extends SuperRangeObject {\n constructor (n, dist, ...params) {\n super();\n\n this.distributor = (__webpack_require__(/*! ./stats-ranges */ \"./src/dcp-client/stats-ranges.js\").distributor);\n\n // If argv[0] is a string formatted as DistributionRange.toString(), then unpack it\n if (typeof n === 'string' && n.match(/^\\[object DistributionRange (\\w+?)\\((\\d+?)(?:,(.+?))?\\)\\]$/)) {\n const parts = n.match(/^\\[object DistributionRange (\\w+?)\\((\\d+?)(?:,(.+?))?\\)\\]$/)\n dist = parts[1]\n n = parseInt(parts[2])\n params = (parts[3] || '').split(',').map(e => parseFloat(e))\n }\n\n // If argv[0] is a string describing a DistributionRange, then unpack it\n if (typeof n === 'string' && n.match(/^(\\w+?)\\((\\d+?)(?:,(.+?))?\\)$/)) {\n const parts = n.match(/^(\\w+?)\\((\\d+?)(?:,(.+?))?\\)$/)\n dist = parts[1]\n n = parseInt(parts[2])\n params = (parts[3] || '').split(',').map(e => parseFloat(e))\n }\n\n // If argv[0] is a object of the right shape, then unpack it\n if (typeof n === 'object' &&\n typeof n.length === 'number' &&\n typeof n.dist === 'string' &&\n Array.isArray(n.params)) {\n // console.log('Unpacking proto-object', n)\n dist = n.dist\n params = n.params\n n = n.length\n if (Array.isArray(n.values)) { this.values = n.values }\n if (typeof n.materializeOnScheduler === 'boolean') { this.materializeOnScheduler = n.materializeOnScheduler }\n }\n\n Object.defineProperty(this, 'length', {\n value: n,\n enumerable: true\n })\n Object.defineProperty(this, 'dist', {\n value: dist,\n enumerable: true\n })\n Object.defineProperty(this, 'params', {\n value: params || [],\n enumerable: true\n })\n\n if (typeof this.distributor[dist] !== 'function') {\n // console.log({n,dist,params})\n throw new TypeError('dist param must point to an exported distributing function')\n }\n }\n\n /**\n * @returns {boolean}\n */\n static isDistribution (d) {\n return d instanceof DistributionRange\n }\n\n static isDistributionLike (d) {\n if (DistributionRange.isDistribution(d)) { return true }\n if (typeof d === 'object' &&\n typeof d.nthValue === 'function' &&\n typeof d.slice === 'function') { return true }\n\n return false\n }\n\n static isProtoDistribution (d) {\n if (typeof d === 'string' && d.match(/^\\[object DistributionRange (\\w+?)\\((\\d+?)(?:,(.+?))?\\)\\]$/)) { return true }\n if (typeof d === 'string' && d.match(/^(\\w+?)\\((\\d+?)(?:,(.+?))?\\)$/)) { return true }\n if (typeof d === 'object' &&\n typeof d.length === 'number' &&\n typeof d.dist === 'string' &&\n Array.isArray(d.params)) { return true }\n\n return false\n }\n\n toString () {\n return `[object DistributionRange ${this.dist}(${[this.length, ...this.params].join()})]`\n }\n\n toObject () {\n this.materialize();\n return {\n length: this.length,\n dist: this.dist,\n params: this.params,\n materializeOnScheduler: this.materializeOnScheduler || undefined,\n values: this.values || undefined\n }\n }\n\n nthValue (n) {\n if (n < 0 || n >= this.length) { return undefined }\n\n if (this.values) { return this.values[n] }\n\n const fn = this.distributor[this.dist]\n\n if (typeof fn === 'function') { return fn.apply(fn, [n, this.length, ...this.params]) }\n\n return undefined\n }\n\n /** Resolve the distribution to a static array\n * @param now If false, then set a flag to materialize on the scheduler. Default: materialize now\n */\n materialize (now = true) {\n if (now === false) { return this.materializeOnScheduler = true }\n\n this.values = this.toArray()\n }\n}\n\n/** Rehydrate an input range from a vanilla ES5 object to an appropriate rangelike object\n * @param obj Serialized job.data object (or JSON string)\n * @return as appropriate, a RangeObject, DistributionRange, MultiRangeObject, or array\n */\nfunction rehydrateRange (obj) {\n const { RemoteDataPattern } = __webpack_require__(/*! dcp/dcp-client/remote-data-pattern */ \"./src/dcp-client/remote-data-pattern.js\");\n const { RemoteDataSet } = __webpack_require__(/*! dcp/dcp-client/remote-data-set */ \"./src/dcp-client/remote-data-set.js\");\n\n if (typeof obj === 'string') {\n obj = JSON.parse(obj)\n }\n\n if (typeof obj === 'number') {\n return obj\n }\n\n if (obj instanceof RangeObject ||\n obj instanceof SparseRangeObject ||\n obj instanceof MultiRangeObject ||\n obj instanceof RemoteDataSet ||\n obj instanceof RemoteDataPattern ||\n obj instanceof DistributionRange) {\n return obj;\n }\n\n // If obj looks like a RemoteDataSet, make one of those\n if (RemoteDataSet.isProtoRemoteDataSetLike(obj)) {\n return new RemoteDataSet(obj)\n }\n \n // If obj looks like a RemoteDataPattern, make one of those\n if (RemoteDataPattern.isProtoRemoteDataPatternLike(obj)) {\n return new RemoteDataPattern(obj.pattern, obj.sliceCount)\n }\n\n // If obj is an iterable, coerce it to an array\n if (Symbol.iterator in Object(obj)) {\n return Array.from(obj)\n }\n \n // If obj looks like a SparseRangeObject, make one of those\n if (SparseRangeObject.isProtoSparseRangelike(obj))\n return new SparseRangeObject(obj);\n\n // If obj looks like a MultiRangeObject, make one of those\n if (MultiRangeObject.isProtoMultiRangelike(obj)) {\n return new MultiRangeObject(obj)\n }\n\n // If obj looks rangelike, make a RangeObject\n if (RangeObject.isProtoRangelike(obj)) {\n return new RangeObject(obj)\n }\n\n // If obj looks like a proto-distribution, make a DistributionRange\n if (DistributionRange.isProtoDistribution(obj)) {\n return new DistributionRange(obj)\n }\n\n throw new TypeError(`obj cannot be cast to any supported Rangelike object: ${JSON.stringify(obj)}`)\n}\n\nexports.SuperRangeObject = SuperRangeObject;\nexports.RangeObject = RangeObject;\nexports.MultiRangeObject = MultiRangeObject;\nexports.DistributionRange = DistributionRange;\nexports.SparseRangeObject = SparseRangeObject;\nexports.rehydrateRange = rehydrateRange;\n\n\n//# sourceURL=webpack://dcp/./src/dcp-client/range-object.js?");
4253
+ eval("/**\n * @file range-object.js\n * @author Eddie Roosenmaallen, eddie@kingsds.network\n * Matthew Palma, mpalma@kingsds.network\n * Robert Mirandola, robert@kingsds.network\n * @date October 2018\n * July 2022\n * This module exports classes and methods for working with RangeObjects\n *\n * RangeObject:\n * new RangeObject(start, end[, step[, group]])\n * new RangeObject({ start, end[, step[, group]] })\n * new RangeObject('[object RangeObject {start},{end},{step},{group}]')\n *\n * In the first two forms, step and group are optional, defaulting to 1. In the string form, all paramteres are required\n *\n *\n * Methods:\n * toString() Returns a string descriptor, suitable to pass to a new constructor\n * toObject() Returns a vanilla ES object with the four parameters as properties\n * toArray() Resolve all values from the RangeObject, grouping if group > 1, as an array\n * nthValue(n) Look up a single value/group\n * slice([start[, end]]) Resolve a selection of values from start to end (exclusive).\n * Same semantics as Array.prototype.slice, including negative indexes\n * materialize(now) Resolve all values and store as an array (eg. for random distributions)\n *\n * Properties:\n * length Return the total number of values (or groups, if group > 1) generasted by the RangeObject\n * dist Name of the distributor function\n * values If present, the Distribution has been materialized\n * materializeOnScheduler If present and truthy, the distribution will be materialized on the scheduler before\n * instantiating tasks\n *\n * Examples:\n * bob = new RangeObject(1, 10)\n * bob.toArray() // [1,2,3,4,5,6,7,8,9,10]\n * bob.slice(-2) // [9,10]\n * fred = new RangeObject({ start:1, end: 1000, step: 3})\n * fred.toArray() // [1,4,7 (...) 994, 997, 1000]\n * fred.toObject() // { start: 1, end: 1000, step: 3, group: 1}\n * bill = new RO.RangeObject(0, 999, 3, 3)\n * rangeobject.js?1541083657962:42 Uncaught RangeError: Range must be divisible evenly into groups\n * at new RangeObject (rangeobject.js?1541083657962:42)\n * at <anonymous>:1:8\n * bill = new RO.RangeObject(0, 998, 3, 3)\n * bill.toArray() // [[0, 3, 6] (...), [990, 993, 996]]\n *\n * MultiRangeObject:\n * new MultiRangeObject(rol1, rol2, ... roln)\n * new MultiRangeObject([rol1, rol2, ... roln])\n * Each argument is either a RangeObject, an Array-like object (, or a Distribution object? NYI ~ER). The\n * values generated by the MRO will be a multi-dimentional array, where each element of the array is a vector\n * across all input ranges.\n */\n\n\n/**\n * @typedef {object} RangeLike\n * @property {number} start\n * @property {number} end\n * @property {number} nthValue\n */\n\n// Some utility functions to make validating the parameters easier\nconst assert = (premise, msg) => {\n if (!premise) throw new Error(msg);\n}\n\nconst assertNumeric = (val, msg) => {\n assert(typeof val === 'number', msg);\n assert(!Number.isNaN(val), msg);\n assert(Number.isFinite(val), msg);\n}\n\nconst RANGEOBJ_REGEX = /^\\[object RangeObject (-?\\d+(?:\\.\\d+)?),(-?\\d+(?:\\.\\d+)?),(-?\\d+(?:\\.\\d+)?),(\\d+|undefined)\\]$/;\n\n/** \n * Defines a consistent interface for each of the range object types\n * to inherit from, provides some array methods.\n * @access public\n */\nclass SuperRangeObject {\n constructor() {\n return new Proxy(this, {\n get: (target, name) => {\n if ((typeof name === 'string' || typeof name === 'number') && Number.isInteger(parseFloat(name))) {\n return target.nthValue(parseFloat(name));\n } else {\n return target[name];\n }\n }\n });\n }\n\n [Symbol.iterator]() {\n let index = 0;\n\n return {\n next: () => ({ value: this.nthValue(index++), done: index > this.length })\n };\n }\n\n get length() {\n return 0;\n }\n\n nthValue(n) {\n throw new Error(\"nthValue not overridden\");\n }\n\n toArray() {\n return this.slice()\n }\n\n /**\n * Generate array from range starting at value `start` and ending at value `end` via `nthValue`.\n * @param {number} [start=0] index to start slice\n * @param {number} [end] index to end slice, return rest of array if not provided.\n * @access public\n */\n slice(start, end) {\n if (typeof start === 'undefined') { start = 0 } else if (start < 0) { start += this.length }\n\n if (typeof end === 'undefined') { end = this.length } else if (end < 0) { end += (this.length) }\n\n if (end > this.length) { end = this.length }\n\n const arr = []\n for (let i = start; i < end; i++) { arr.push(this.nthValue(i)) }\n\n return arr\n }\n\n /**\n * Converts range to an Array and then calls `filter(...args)` on it.\n * @param {...any} args Same args as you would pass to Array#filter\n * @access public\n */\n filter(...args) {\n return this.toArray().filter(...args);\n }\n}\n\n/**\n * Range objects are vanilla ES objects used to describe value range sets for use by `compute.for()`.\n * The range must be increasing, i.e. `start` must be less than `end`.\n * Calculations made to derive the set of numbers in a range are carried out with `BigNumber`, \n * eg. arbitrary-precision, support. The numbers `Infinity` and `-Infinity` are not supported, and \n * the API does not differentiate between `+0` and `-0`.\n */\n/**\n * An object which represents a range of values.\n * @param {number|object} startOrObject Beginning of range, or object literal with `start` and `end` properties.\n * @param {number} end End of range\n * @param {number} [step=1] Step size in range\n * @param {number|undefined} [group] Groups in range\n * @access public\n * @extends SuperRangeObject\n * @example <caption>With implicit step size = 1.</caption>\n * let rangeObject = new RangeObject(0.5,3);\n * rangeObject.toArray();\n * // [ 0.5, 1.5, 2.5 ]\n * \n * @example <caption>With explicit step size.</caption>\n * let rangeObject = new RangeObject(1,9,3);\n * rangeObject.toArray();\n * // [ 1, 4, 7 ]\n * \n * @example <caption>With grouping.</caption>\n * let rangeObject = new RangeObject(1,9,3,2);\n * rangeObject.toArray();\n * // [ [ 1, 4 ], [ 7 ] ]\n */\nclass RangeObject extends SuperRangeObject {\n \n constructor (start, end, step, group) {\n super();\n if (typeof start === 'string' && start.match(RANGEOBJ_REGEX)) {\n const parts = start.match(RANGEOBJ_REGEX)\n start = {\n start: parseFloat(parts[1]),\n end: parseFloat(parts[2]),\n step: parseFloat(parts[3]),\n group: (parts[4] === 'undefined'? undefined : parseFloat(parts[4]))\n }\n }\n\n if (typeof start === 'object') {\n this.start = start.start;\n this.end = start.end;\n this.step = start.step;\n this.group = start.group;\n } else {\n this.start = start;\n this.end = end;\n this.step = step;\n this.group = group;\n }\n\n assertNumeric(this.start, `Invalid start parameter \"${this.start}\", must be numeric and finite.`);\n assertNumeric(this.end, `Invalid end parameter \"${this.end}\", must be numeric and finite.`);\n\n // Ensure step moves in the correct direction for start-end (ie, negative if end < start)\n if (typeof this.step === 'undefined') {\n this.step = Math.sign(this.end - this.start);\n } else {\n assertNumeric(this.step, `Invalid step parameter \"${this.step}\", must be numeric and finite.`);\n if ((this.step === 0 && this.start !== this.end) || Math.sign(this.step) !== Math.sign(this.end - this.start)) {\n throw new Error('RangeObject step must approach end from start.');\n }\n }\n\n if (typeof this.group !== 'undefined') {\n // group must remain undefined if not provided because no grouping should occur if not provided.\n // As per spec, even if group is 1 it should group into arrays of 1 element\n assertNumeric(this.group, `Invalid group parameter \"${this.group}\", must be numeric and finite.`);\n assert(Number.isInteger(this.group), `Invalid group parameter \"${this.group}\", must be an integer.`);\n assert(this.group > 0, `Invalid group parameter \"${this.group}\", must be greater than zero.`);\n }\n }\n\n /**\n * @returns {boolean}\n */\n static isRangelike (r) {\n if (r instanceof RangeObject) { return true }\n if (typeof r === 'object' &&\n typeof r.start === 'number' &&\n typeof r.end === 'number' &&\n typeof r.nthValue === 'function') { return true }\n\n return false\n }\n\n /**\n * @returns {boolean}\n */\n static isRangeObject(r) {\n if (r instanceof RangeObject) { return true }\n\n return false\n }\n\n /**\n * Test whether a value can be passed to the RangeObject constructor\n * @param r Value to test\n * @param strict Optional. Truthy to disallow objects which already look Rangelike\n */\n static isProtoRangelike (r, strict = true) {\n if (typeof r === 'object' &&\n typeof r.start === 'number' &&\n typeof r.end === 'number') { return true }\n\n if (typeof r === 'string' &&\n r.match(RANGEOBJ_REGEX)) { return true }\n\n if (!strict && RangeObject.isRangelike(r)) { return true }\n\n return false\n }\n\n /**\n * Create string representation of range: [object RangeObject start,end,step,group]\n * @access public\n */\n toString () {\n return `[object RangeObject ${this.start},${this.end},${this.step},${this.group}]`\n }\n\n /**\n * Create object literal for range with properties: start, end, step, and group.\n * @access public\n */\n toObject () {\n return {\n start: this.start,\n end: this.end,\n step: this.step,\n group: this.group\n }\n }\n\n /**\n * Return nth value in range\n * @param {number} n\n * @access public\n * @example\n * let rangeObject = new RangeObject(1,10,3,2);\n * rangeObject.toArray();\n * // [ [ 1, 4 ], [ 7, 10 ] ]\n * rangeObject.nthValue(1);\n * // [ 7, 10 ]\n */\n nthValue(n) {\n /**\n * `>=` since the value at index 7 in an array that's of length 7 is outside\n * its range\n */\n if (n < 0 || n >= this.length) {\n return undefined;\n }\n\n if (typeof this.group !== 'undefined') {\n const start = (this.group * this.step * n) + this.start\n const arr = []\n\n for (let i = 0; i < this.group && i + this.group * n < this.stepCount; i++) {\n arr.push(start + (i * this.step))\n }\n\n return arr;\n }\n\n return this.start + (n * this.step);\n }\n\n /**\n * Return max value in range\n * @access public\n * @returns {number}\n * @example\n * let rangeObject = new RangeObject(1,10,3,2);\n * rangeObject.toArray();\n * // [ [ 1, 4 ], [ 7, 10 ] ]\n * rangeObject.max;\n * // 10\n */\n get max () {\n if (typeof this.group === 'undefined' && this.step === 1) { return this.end }\n\n let tail\n if (typeof this.group === 'undefined') {\n [ tail ] = this.slice(-1)\n } else {\n [ tail ] = this.slice(-1)[0].slice(-1)\n }\n return tail\n }\n\n /**\n * Boolean indicating whether all groups are filled.\n * Only defined for RangeObjects that group results.\n * @access public\n * @returns {boolean}\n * @example <caption>With remainder</caption>\n * let rangeObject = new RangeObject(1,9,3,2);\n * rangeObject.toArray();\n * // [ [ 1, 4 ], [ 7 ] ]\n * rangeObject.hasRemainder;\n * // true\n * @example <caption>Without remainder</caption>\n * let rangeObject = new RangeObject(1,10,3,2);\n * rangeObject.toArray();\n * // [ [ 1, 4 ], [ 7, 10 ] ]\n * rangeObject.hasRemainder;\n * // true\n */\n get hasRemainder () {\n if (typeof this.group === 'undefined') { return false }\n const groups = this.stepCount / this.group;\n\n return (groups !== Math.floor(groups));\n }\n\n /**\n * Number of elements in range, or number of groups if grouped.\n * @access public\n * @returns {number}\n * @example <caption>Without grouping</caption>\n * let rangeObject = new RangeObject(1,10,3);\n * rangeObject.toArray();\n * // [ 1, 4, 7, 10 ]\n * rangeObject.length;\n * // 4\n * @example <caption>With grouping</caption>\n * let rangeObject = new RangeObject(1,9,3,2);\n * rangeObject.toArray();\n * // [ [ 1, 4 ], [ 7 ] ]\n * rangeObject.length;\n * // 2\n */\n get length () {\n return Math.ceil(this.stepCount / (this.group || 1));\n }\n\n /**\n * Number of steps in range (sort of like number of elements, except grouping is no longer relevant).\n * @access public\n * @returns {number}\n * @example <caption>Without grouping</caption>\n * let rangeObject = new RangeObject(1,10,3);\n * rangeObject.toArray();\n * // [ 1, 4, 7, 10 ]\n * rangeObject.stepCount;\n * // 4\n * @example <caption>With grouping</caption>\n * let rangeObject = new RangeObject(1,9,3,2);\n * rangeObject.toArray();\n * // [ [ 1, 4 ], [ 7 ] ]\n * rangeObject.length;\n * // 3\n */\n get stepCount () {\n if (this.step === 0) return 1;\n return Math.floor(Math.abs((this.end - this.start) / this.step)) + 1;\n }\n}\n\n/**\n * A sparse range object contains many {@link RangeObject}s. The range objects are stored as arrays and then are\n * concatenated into one array in the order that they were supplied to the constructor.\n * @param {RangeObject|RangeObject[]|object} arg - First range object, or array of range objects, or object with `sparse` key containing an array of range objects.\n * @param {RangeObject} rangeObject - If first argument is a RangeObject, subsquent arguments are range objects too.\n * @access public\n * @extends SuperRangeObject\n * @example\n * r0 = new RangeObject(1,2)\n * r1 = new RangeObject(1,3)\n * sro = new SparseRangeObject(r0, r1)\n * // [ 1, 2, 1, 2, 3]\n */\nclass SparseRangeObject extends SuperRangeObject {\n constructor() {\n super();\n let sparse = [];\n\n if (arguments.length === 1 && arguments[0].ranges) \n sparse = [...arguments[0].ranges];\n else \n sparse = [...arguments];\n \n if (sparse instanceof SparseRangeObject)\n throw new Error('Argument is of type sparse range object');\n \n // If sparse key is passed, make sure the arguments are only the range objects (compute.for() implementation) \n if (sparse[0].sparse)\n sparse = sparse[0].sparse;\n \n sparse.map( r =>\n {\n if (!RangeObject.isProtoRangelike(r))\n throw new Error('Argument is not of type RangeObject');\n })\n \n if (sparse[0].group)\n {\n for (let i = 0; i < sparse.length; i++)\n {\n if (sparse[i].group !== sparse[0].group)\n throw new Error('Range Object has different dimension than other range objects');\n }\n } \n \n this.sparse = true;\n this.ranges = sparse.map(r => \n { \n return new RangeObject(r);\n })\n \n }\n \n /**\n * Test whether a value can be passed to the SparseRangeObject constructor\n * @param r Value to test\n * @param strict Optional. Truthy to disallow objects which already look Rangelike\n */\n static isProtoSparseRangelike (r, strict = true) \n {\n if (typeof r === 'object' && r.sparse) { return true; }\n return false;\n }\n \n /**\n * Return nth value in range\n * @param {number} n\n * @access public\n * @example\n * let sparseRangeObject = new SparseRangeObject(1,3,1);\n * rangeObject.toArray();\n * // [ 1, 2, 3]\n * rangeObject.nthValue(1);\n * // 2\n */\n nthValue(n) \n {\n if (n < 0 || n >= this.length) { return undefined }\n \n let count = 0;\n let rangeCount = 0;\n while (count !== n)\n {\n if (count <= n)\n {\n for (let i = 0; i < this.ranges[rangeCount].length; i++)\n {\n if (count === n)\n return this.ranges[rangeCount][i];\n else\n count++;\n }\n }\n rangeCount++;\n }\n return this.ranges[rangeCount][0];\n }\n \n /**\n * Create object literal with `sparse` property and `range` property containing array of range objects.\n */\n toObject () {\n \n const obj = {\n sparse: true,\n ranges: this.ranges\n }\n return obj;\n }\n \n get length()\n {\n let len = 0;\n this.ranges.forEach((r) => len += r.length);\n\n return len;\n }\n \n}\n\n/**\n * Range objects are vanilla ES objects used to describe value range sets for use by `compute.for()`. \n * Calculations made to derive the set of numbers in a range are carried out with `BigNumber`, \n * eg. arbitrary-precision, support. The numbers `Infinity` and `-Infinity` are not supported, and \n * the API does not differentiate between `+0` and `-0`.\n */\nclass MultiRangeObject extends SuperRangeObject {\n /**\n * A multi-range object contains many {@link RangeObject}s. They are iterated over \n * with the fastest moving index going over the right-most range object in array order. Each element\n * of a multi range is a tuple of values from constituent ranges.\n * @param {RangeObject|RangeObject[]|object} arg - First range object, or array of range objects, or object with `ranges` key containing an array of range objects.\n * @param {RangeObject} rangeObject - If first argument is a RangeObject, subsquent arguments are range objects too.\n * @access public\n * @extends SuperRangeObject\n * @example\n * r0 = new RangeObject(1,2)\n * r1 = new RangeObject(1,3)\n * mro = new MultiRangeObject(r0, r1)\n * mro.toArray()\n * // [ [ 1, 1 ], [ 1, 2 ], [ 1, 3 ], [ 2, 1 ], [ 2, 2 ], [ 2, 3 ] ]\n */\n constructor () {\n super();\n var ranges = []\n\n if (arguments.length === 1 && typeof arguments[0] === 'string') {\n const inputs = JSON.parse(arguments[0])\n if (Array.isArray(inputs)) {\n ranges = inputs\n } else if (inputs.ranges) {\n ranges = inputs.ranges\n } else {\n ranges = [inputs]\n }\n } else if (arguments.length === 1 && Array.isArray(arguments[0])) {\n ranges = [...arguments[0]]\n } else if (arguments.length === 1 && !!arguments[0].ranges) {\n ranges = [...arguments[0].ranges]\n } else {\n ranges = [...arguments]\n }\n\n this.ranges = ranges.map(r => {\n if (RangeObject.isRangelike(r)) { return r }\n if (RangeObject.isRangeObject(r)) { return r }\n if (DistributionRange.isDistribution(r)) { return r }\n if (RangeObject.isProtoRangelike(r)) { return new RangeObject(r) }\n if (DistributionRange.isProtoDistribution(r)) { return new DistributionRange(r) }\n\n return Array.isArray(r) ? r : [r]\n })\n }\n\n /**\n * Test whether a value can be passed to the MultiRangeObject constructor\n * @param r Value to test\n * @param strict Optional. Truthy to disallow objects which already look Rangelike\n */\n static isProtoMultiRangelike (r, strict = true) {\n if ((typeof r === 'object') &&\n Array.isArray(r.ranges) &&\n !r.sparse)\n {\n return true;\n }\n\n return false\n }\n\n /**\n * Create string representation of this MultiRangeObject\n * @access public\n * @example\n * \"[object MultiRangeObject ' + this.ranges.length + ']\"\n */\n toString () {\n return '[object MultiRangeObject ' + this.ranges.length + ']'\n }\n\n /**\n * Create object literal with `ranges` property containing array of range objects.\n */\n toObject () {\n return { ranges: this.ranges }\n }\n\n /**\n * Returns a tuple of values from the ranges given by this multi range object.\n * @param {number} n index of multi-range tuple to return\n * @access public\n * @example\n * r0 = new RangeObject(1,2)\n * r1 = new RangeObject(1,3)\n * mro = new MultiRangeObject(r0, r1)\n * mro.toArray()\n * // [ [ 1, 1 ], [ 1, 2 ], [ 1, 3 ], [ 2, 1 ], [ 2, 2 ], [ 2, 3 ] ]\n * mro.nthValue(2)\n * // [ 1, 3 ]\n */\n nthValue (n) {\n if (n < 0 || n >= this.length) { return undefined }\n\n const indexes = []\n\n for (let r = (this.ranges.length - 1); r >= 0; r--) {\n const idx = n % this.ranges[r].length\n\n indexes.unshift(idx)\n\n n -= idx\n n /= this.ranges[r].length\n }\n\n const values = []\n\n for (let i = 0; i < indexes.length; i++) {\n values[i] = Array.isArray(this.ranges[i]) ? this.ranges[i][indexes[i]] : this.ranges[i].nthValue(indexes[i])\n }\n\n return values\n }\n\n /**\n * Boolean indicating whether any of the ranges in this multi-range object has a remainder. See {@link RangeObject#hasRemainder}.\n * @access public\n * @returns {boolean}\n */\n get hasRemainder () {\n for (let r of this.ranges) {\n if (r.hasRemainder) { return true }\n }\n\n return false\n }\n\n get length () {\n let len = 1\n\n this.ranges.forEach((r) => len *= r.length)\n\n return len\n }\n}\n\n// DistributionRange object wraps a distributing function into a RangeObject-like API\n// which can be dropped directly into a MultiRangeObject to generate input slices\nclass DistributionRange extends SuperRangeObject {\n constructor (n, dist, ...params) {\n super();\n\n this.distributor = (__webpack_require__(/*! ./stats-ranges */ \"./src/dcp-client/stats-ranges.js\").distributor);\n\n // If argv[0] is a string formatted as DistributionRange.toString(), then unpack it\n if (typeof n === 'string' && n.match(/^\\[object DistributionRange (\\w+?)\\((\\d+?)(?:,(.+?))?\\)\\]$/)) {\n const parts = n.match(/^\\[object DistributionRange (\\w+?)\\((\\d+?)(?:,(.+?))?\\)\\]$/)\n dist = parts[1]\n n = parseInt(parts[2])\n params = (parts[3] || '').split(',').map(e => parseFloat(e))\n }\n\n // If argv[0] is a string describing a DistributionRange, then unpack it\n if (typeof n === 'string' && n.match(/^(\\w+?)\\((\\d+?)(?:,(.+?))?\\)$/)) {\n const parts = n.match(/^(\\w+?)\\((\\d+?)(?:,(.+?))?\\)$/)\n dist = parts[1]\n n = parseInt(parts[2])\n params = (parts[3] || '').split(',').map(e => parseFloat(e))\n }\n\n // If argv[0] is a object of the right shape, then unpack it\n if (typeof n === 'object' &&\n typeof n.length === 'number' &&\n typeof n.dist === 'string' &&\n Array.isArray(n.params)) {\n // console.log('Unpacking proto-object', n)\n dist = n.dist\n params = n.params\n n = n.length\n if (Array.isArray(n.values)) { this.values = n.values }\n if (typeof n.materializeOnScheduler === 'boolean') { this.materializeOnScheduler = n.materializeOnScheduler }\n }\n\n Object.defineProperty(this, 'length', {\n value: n,\n enumerable: true\n })\n Object.defineProperty(this, 'dist', {\n value: dist,\n enumerable: true\n })\n Object.defineProperty(this, 'params', {\n value: params || [],\n enumerable: true\n })\n\n if (typeof this.distributor[dist] !== 'function') {\n // console.log({n,dist,params})\n throw new TypeError('dist param must point to an exported distributing function')\n }\n }\n\n /**\n * @returns {boolean}\n */\n static isDistribution (d) {\n return d instanceof DistributionRange\n }\n\n static isDistributionLike (d) {\n if (DistributionRange.isDistribution(d)) { return true }\n if (typeof d === 'object' &&\n typeof d.nthValue === 'function' &&\n typeof d.slice === 'function') { return true }\n\n return false\n }\n\n static isProtoDistribution (d) {\n if (typeof d === 'string' && d.match(/^\\[object DistributionRange (\\w+?)\\((\\d+?)(?:,(.+?))?\\)\\]$/)) { return true }\n if (typeof d === 'string' && d.match(/^(\\w+?)\\((\\d+?)(?:,(.+?))?\\)$/)) { return true }\n if (typeof d === 'object' &&\n typeof d.length === 'number' &&\n typeof d.dist === 'string' &&\n Array.isArray(d.params)) { return true }\n\n return false\n }\n\n toString () {\n return `[object DistributionRange ${this.dist}(${[this.length, ...this.params].join()})]`\n }\n\n toObject () {\n this.materialize();\n return {\n length: this.length,\n dist: this.dist,\n params: this.params,\n materializeOnScheduler: this.materializeOnScheduler || undefined,\n values: this.values || undefined\n }\n }\n\n nthValue (n) {\n if (n < 0 || n >= this.length) { return undefined }\n\n if (this.values) { return this.values[n] }\n\n const fn = this.distributor[this.dist]\n\n if (typeof fn === 'function') { return fn.apply(fn, [n, this.length, ...this.params]) }\n\n return undefined\n }\n\n /** Resolve the distribution to a static array\n * @param now If false, then set a flag to materialize on the scheduler. Default: materialize now\n */\n materialize (now = true) {\n if (now === false) { return this.materializeOnScheduler = true }\n\n this.values = this.toArray()\n }\n}\n\n/** Rehydrate an input range from a vanilla ES5 object to an appropriate rangelike object\n * @param obj Serialized job.data object (or JSON string)\n * @return as appropriate, a RangeObject, DistributionRange, MultiRangeObject, or array\n */\nfunction rehydrateRange (obj) {\n const { RemoteDataPattern } = __webpack_require__(/*! dcp/dcp-client/remote-data-pattern */ \"./src/dcp-client/remote-data-pattern.js\");\n const { RemoteDataSet } = __webpack_require__(/*! dcp/dcp-client/remote-data-set */ \"./src/dcp-client/remote-data-set.js\");\n\n if (typeof obj === 'string') {\n obj = JSON.parse(obj)\n }\n\n if (typeof obj === 'number') {\n return obj\n }\n\n if (obj instanceof RangeObject ||\n obj instanceof SparseRangeObject ||\n obj instanceof MultiRangeObject ||\n obj instanceof RemoteDataSet ||\n obj instanceof RemoteDataPattern ||\n obj instanceof DistributionRange) {\n return obj;\n }\n\n // If obj looks like a RemoteDataSet, make one of those\n if (RemoteDataSet.isProtoRemoteDataSetLike(obj)) {\n return new RemoteDataSet(obj)\n }\n \n // If obj looks like a RemoteDataPattern, make one of those\n if (RemoteDataPattern.isProtoRemoteDataPatternLike(obj)) {\n return new RemoteDataPattern(obj.pattern, obj.sliceCount)\n }\n\n // If obj is an iterable, coerce it to an array\n if (Symbol.iterator in Object(obj)) {\n return Array.from(obj)\n }\n \n // If obj looks like a SparseRangeObject, make one of those\n if (SparseRangeObject.isProtoSparseRangelike(obj))\n return new SparseRangeObject(obj);\n\n // If obj looks like a MultiRangeObject, make one of those\n if (MultiRangeObject.isProtoMultiRangelike(obj)) {\n return new MultiRangeObject(obj)\n }\n\n // If obj looks rangelike, make a RangeObject\n if (RangeObject.isProtoRangelike(obj)) {\n return new RangeObject(obj)\n }\n\n // If obj looks like a proto-distribution, make a DistributionRange\n if (DistributionRange.isProtoDistribution(obj)) {\n return new DistributionRange(obj)\n }\n\n throw new TypeError(`obj cannot be cast to any supported Rangelike object: ${JSON.stringify(obj)}`)\n}\n\nexports.SuperRangeObject = SuperRangeObject;\nexports.RangeObject = RangeObject;\nexports.MultiRangeObject = MultiRangeObject;\nexports.DistributionRange = DistributionRange;\nexports.SparseRangeObject = SparseRangeObject;\nexports.rehydrateRange = rehydrateRange;\n\n\n//# sourceURL=webpack://dcp/./src/dcp-client/range-object.js?");
4254
4254
 
4255
4255
  /***/ }),
4256
4256
 
@@ -4300,7 +4300,7 @@ eval("/* provided dependency */ var process = __webpack_require__(/*! ./node_mod
4300
4300
  \*************************************************/
4301
4301
  /***/ ((module, __unused_webpack_exports, __webpack_require__) => {
4302
4302
 
4303
- eval("/**\n * @file /src/schedmsg/schedmsg-web.js\n * @author Ryan Rossiter, ryan@kingsds.network\n * @date March 2020\n *\n * This is the SchedMsg implementation for commands that are browser-specific\n * or have browser-specific behaviour.\n */\n\nconst { SchedMsg } = __webpack_require__(/*! ./schedmsg */ \"./src/dcp-client/schedmsg/schedmsg.js\");\n\nclass SchedMsgWeb extends SchedMsg {\n constructor(worker) {\n super(worker);\n this.modal = null;\n\n this.registerHandler('announce', this.onAnnouncement.bind(this));\n this.registerHandler('openPopup', this.onOpenPopup.bind(this));\n this.registerHandler('reload', this.onReload.bind(this));\n }\n\n onAnnouncement({ message }) {\n if (this.modal) {\n this.modal.close();\n }\n\n this.modal = window.userInterface.alert('Announcement', '' /* subtitle */, message,\n /* onClose */ () => this.modal = null);\n }\n\n onOpenPopup({ href }) {\n window.open(href);\n }\n\n onReload() {\n const hash = window.location.hash;\n\n let newUrl = window.location.href.replace(/#.*/, '');\n newUrl += (newUrl.indexOf('?') === -1 ? '?' : '&');\n newUrl += 'dcp=8f44464faf259aae5ef214f8752f7ce8728dd5f0,' + Date.now() + hash;\n\n window.location.replace(newUrl);\n }\n}\n\nObject.assign(module.exports, {\n SchedMsgWeb\n});\n\n\n//# sourceURL=webpack://dcp/./src/dcp-client/schedmsg/schedmsg-web.js?");
4303
+ eval("/**\n * @file /src/schedmsg/schedmsg-web.js\n * @author Ryan Rossiter, ryan@kingsds.network\n * @date March 2020\n *\n * This is the SchedMsg implementation for commands that are browser-specific\n * or have browser-specific behaviour.\n */\n\nconst { SchedMsg } = __webpack_require__(/*! ./schedmsg */ \"./src/dcp-client/schedmsg/schedmsg.js\");\n\nclass SchedMsgWeb extends SchedMsg {\n constructor(worker) {\n super(worker);\n this.modal = null;\n\n this.registerHandler('announce', this.onAnnouncement.bind(this));\n this.registerHandler('openPopup', this.onOpenPopup.bind(this));\n this.registerHandler('reload', this.onReload.bind(this));\n }\n\n onAnnouncement({ message }) {\n if (this.modal) {\n this.modal.close();\n }\n\n this.modal = window.userInterface.alert('Announcement', '' /* subtitle */, message,\n /* onClose */ () => this.modal = null);\n }\n\n onOpenPopup({ href }) {\n window.open(href);\n }\n\n onReload() {\n const hash = window.location.hash;\n\n let newUrl = window.location.href.replace(/#.*/, '');\n newUrl += (newUrl.indexOf('?') === -1 ? '?' : '&');\n newUrl += 'dcp=c96b8086bdb343ed36ff35e133755d3f21613609,' + Date.now() + hash;\n\n window.location.replace(newUrl);\n }\n}\n\nObject.assign(module.exports, {\n SchedMsgWeb\n});\n\n\n//# sourceURL=webpack://dcp/./src/dcp-client/schedmsg/schedmsg-web.js?");
4304
4304
 
4305
4305
  /***/ }),
4306
4306
 
@@ -4445,7 +4445,7 @@ eval("/**\n * @file node-localExec.js Node-specific support for cre
4445
4445
  /***/ ((__unused_webpack_module, exports, __webpack_require__) => {
4446
4446
 
4447
4447
  "use strict";
4448
- eval("/**\n * @file This module implements the Worker API, used to create workers for earning DCCs.\n * @author Ryan Rossiter <ryan@kingsds.network>\n * Paul <paul@kingsds.network>\n * @date May 2020\n * June, July 2022\n * \n * @module dcp/worker\n * @access public\n */\n// @ts-check\n\n\nconst debugging = (__webpack_require__(/*! dcp/debugging */ \"./src/debugging.js\").scope)('worker');\nconst { SchedMsg } = __webpack_require__(/*! dcp/dcp-client/schedmsg */ \"./src/dcp-client/schedmsg/index.js\");\nconst { EventEmitter } = __webpack_require__(/*! dcp/common/dcp-events */ \"./src/common/dcp-events/index.js\");\nconst { localStorage } = __webpack_require__(/*! dcp/common/dcp-localstorage */ \"./src/common/dcp-localstorage.js\");\nconst { confirmPrompt } = __webpack_require__(/*! dcp/utils */ \"./src/utils/index.js\");\nconst { assert } = __webpack_require__(/*! dcp/common/dcp-assert */ \"./src/common/dcp-assert.js\");\nconst { Keystore, Address } = __webpack_require__(/*! dcp/dcp-client/wallet */ \"./src/dcp-client/wallet/index.js\");\n\n\n// To use Supervisor2 set the environment variable `USE_SUPERVISOR2`.\nconst USE_SUPERVISOR2 = Boolean((__webpack_require__(/*! dcp/common/dcp-env */ \"./src/common/dcp-env.js\").getenv)('USE_SUPERVISOR2'));\nconst { Supervisor } = USE_SUPERVISOR2 ? __webpack_require__(/*! ./supervisor2 */ \"./src/dcp-client/worker/supervisor2/index.js\") : __webpack_require__(/*! ./supervisor */ \"./src/dcp-client/worker/supervisor.js\");\n\nconst DISABLE_WORKER_CACHE_KEY = 'disable_worker';\n\n/** @typedef {import('./sandbox').SandboxOptions} SandboxOptions */\n/** @typedef {import('../wallet/keystore').Keystore} Keystore */\n\n/**\n * @access public\n * @typedef {object} SupervisorOptions\n * @property {Address} paymentAddress - Address to deposit earned funds into\n * @property {Keystore} identity - Keystore to use as the supervisor's identity\n * @property {string[]} [jobAddresses=[]] - If set, the supervisor will only fetch work for the provided jobIDs\n * @property {boolean} [localExec=false] - If true, fetched work will not be filtered by compute groups.\n * @property {boolean} [priorityOnly=false] - Whether to only work on priority jobs, i.e. become idle if `jobAddresses` is empty.\n * @property {SandboxOptions} [sandboxOptions] - Options that will be passed to the Sandbox constructor\n * @property {number} [watchdogInterval] - Number of ms between watchdog cycles, defaults to dcpConfig tuning param\n * @property {object[]} [computeGroups] - The compute group descriptors the worker will accept jobs from (+ optionally the default compute group)\n * @property {object} [minimumWage] - The minimum payout per slice the worker will accept from a job.\n * @property {string[]} [allowedOrigins] - origins for fetching data URIs that are allowed\n * @property {boolean} [leavePublicGroup=false] - Don't fetch slices from public compute group.\n * @property {object} [schedulerConfig] - Overrides for dcpConfig.scheduler.\n * @property {number} [maxWorkingSandboxes] - Max number of concurrently working sandboxes\n * @property {{cpu: number, gpu: number}} [cores] - The number of CPU vCores and GPU devices available for compute.\n * @property {{cpu: number, gpu: number}} [targetLoad] - The proportion of the cores.cpu and cores.gpu to load.\n */\n\nfunction disableWorker() {\n localStorage.setItem(DISABLE_WORKER_CACHE_KEY, true);\n}\n\n/**\n * Fired when the worker begins fetching slices from the scheduler.\n * @access public\n * @event Worker#fetchStart\n */\nclass Worker extends EventEmitter {\n /**\n * Returns a new Worker instance.\n * @access public\n * @param {module:dcp/worker~SupervisorOptions} supervisorOptions \n */\n constructor(supervisorOptions) {\n super('Worker');\n /**\n * @type {boolean}\n * @access public\n */\n this.working = false;\n /**\n * @type {SchedMsg}\n * @access public\n */\n this.schedMsg = new SchedMsg(this);\n /**\n * @type {Supervisor}\n * @access public\n */\n this.supervisor = new Supervisor(this, supervisorOptions);\n \n debugging() && console.debug('Worker supervisorOptions:', supervisorOptions);\n\n this.supervisor.on('fetchingTask', () => this.emit('fetchStart'));\n this.supervisor.on('fetchedTask', (fetchedSlicesCount) => this.emit('fetch', fetchedSlicesCount));\n this.supervisor.on('fetchedTask', (fetchedSlicesCount) => this.emit('fetchEnd', fetchedSlicesCount));\n this.supervisor.on('fetchTaskFailed', (error) => this.emit('fetchEnd', error));\n this.supervisor.on('fetchTaskFailed', (error) => this.emit('fetchError', error));\n\n this.supervisor.on('submittingResults', () => this.emit('submitStart'));\n this.supervisor.on('submittedResult', () => this.emit('submitEnd'));\n this.supervisor.on('submitResultsFailed', (error) => this.emit('submitEnd', error));\n this.supervisor.on('submitResultsFailed', (error) => this.emit('submitError', error));\n this.supervisor.on('submittedResult', () => this.emit('submit'));\n \n this.supervisor.on('dccCredit', (event) => this.emit('payment', event));\n this.supervisor.on('dccNoCredit', (event) => this.emit('payment', event));\n\n this.supervisor.on('sandboxReady', (sandbox) => this.emit('sandbox', sandbox));\n }\n\n /**\n * Disables worker instances from being started. The user will need to manually intervene to re-enable workers.\n * \n * @access public\n */\n static disableWorker() {\n disableWorker();\n }\n\n /**\n * Starts the worker.\n * \n * @access public\n */\n async start() {\n if (this.working) throw new Error('Cannot start worker: Already working.');\n\n if (localStorage.getItem(DISABLE_WORKER_CACHE_KEY)) {\n await confirmPrompt(`Worker has been disabled by the DCP Security Team; check the @DC_Protocol Twitter feed for more information before continuing.`)\n if (await confirmPrompt('Are you sure you would like to restart the worker?')) {\n localStorage.removeItem(DISABLE_WORKER_CACHE_KEY);\n console.log(\"Starting worker...\");\n } else {\n return;\n }\n }\n\n this.working = true;\n await this.supervisor.work();\n await this.schedMsg.start();\n this.emit('start');\n }\n\n /**\n * Stops the worker.\n * \n * @access public\n * @param {boolean} [immediate=false] Whether the worker should stop imediately or allow the current slices to finish.\n */\n async stop(immediate=false) {\n if (!this.working) throw new Error('Cannot stop worker: Already stopped.');\n \n this.working = false;\n await this.schedMsg.stop();\n await this.supervisor.stopWork(immediate);\n this.emit('stop');\n }\n \n /**\n * Set payment address\n * @param {Address} addr - new address to be used\n */\n setPaymentAddress(addr)\n {\n assert(addr instanceof Address);\n this.supervisor.paymentAddress = addr;\n this.emit('paymentAddressChange', addr);\n }\n \n /**\n * Get payment address\n * @returns {Address} - current payment address.\n */\n getPaymentAddress()\n {\n return this.supervisor.paymentAddress;\n }\n \n /**\n * Set identity keystore.\n * Note: connections to the scheduler will only use the new identity if they are closed and recreated.\n * @param {Keystore} ks - new identity to be used\n */\n setIdentity(ks)\n {\n assert(ks instanceof Keystore);\n \n /* compatibility for supervisor 1 */\n if (this.supervisor.setDefaultIdentityKeystore)\n this.supervisor.setDefaultIdentityKeystore(ks);\n else\n this.supervisor.identity = ks;\n this.emit('identityChange', ks);\n }\n \n /**\n * Get identity keystore\n * @returns {Keystore} - the current identity keystore\n */\n getIdentity()\n {\n /* compatibiliy for supervisor 1 */\n if (this.supervisor._identityKeystore)\n return this.supervisor._identityKeystore;\n else\n return this.supervisor.identity;\n }\n \n /**\n * Set max working sandboxes\n * @param {number} max - new max working sandboxes\n */\n setMaxWorkingSandboxes(max)\n {\n this.supervisor.maxWorkingSandboxes = max;\n this.emit('maxSandboxesChange', max);\n }\n \n /**\n * Get max working sandboxes\n * @returns {number} - current max working sandboxes\n */\n getMaxWorkingSandboxes()\n {\n return this.supervisor.maxWorkingSandboxes;\n }\n \n /**\n * Check if there are any working sandboxes within the worker\n * @returns {Boolean} - true if there are working sandboxes.\n */\n hasWorkingSandboxes()\n {\n /* compatibility for supervisor 1 */\n if (this.supervisor.workingSandboxes)\n return this.supervisor.workingSandboxes.length > 0\n else\n return this.supervisor.workingSandboxCount() > 0;\n }\n}\n\nexports.Worker = Worker;\nexports.Supervisor = Supervisor;\nexports.disableWorker = disableWorker;\n\nexports.version = {\n api: '1.0.0',\n provides: '1.0.0' /* dcpConfig.scheduler.compatibility.operations.work */\n};\n\n\n//# sourceURL=webpack://dcp/./src/dcp-client/worker/index.js?");
4448
+ eval("/**\n * @file This module implements the Worker API, used to create workers for earning DCCs.\n * @author Ryan Rossiter <ryan@kingsds.network>\n * Paul <paul@kingsds.network>\n * @date May 2020\n * June, July 2022\n * \n * @module dcp/worker\n * @access public\n */\n// @ts-check\n\n\nconst debugging = (__webpack_require__(/*! dcp/debugging */ \"./src/debugging.js\").scope)('worker');\nconst { SchedMsg } = __webpack_require__(/*! dcp/dcp-client/schedmsg */ \"./src/dcp-client/schedmsg/index.js\");\nconst { EventEmitter } = __webpack_require__(/*! dcp/common/dcp-events */ \"./src/common/dcp-events/index.js\");\nconst { localStorage } = __webpack_require__(/*! dcp/common/dcp-localstorage */ \"./src/common/dcp-localstorage.js\");\nconst { confirmPrompt } = __webpack_require__(/*! dcp/utils */ \"./src/utils/index.js\");\nconst { assert } = __webpack_require__(/*! dcp/common/dcp-assert */ \"./src/common/dcp-assert.js\");\nconst { Keystore, Address } = __webpack_require__(/*! dcp/dcp-client/wallet */ \"./src/dcp-client/wallet/index.js\");\n\n\n// To use Supervisor2 set the environment variable `USE_SUPERVISOR2`.\nconst USE_SUPERVISOR2 = Boolean((__webpack_require__(/*! dcp/common/dcp-env */ \"./src/common/dcp-env.js\").getenv)('USE_SUPERVISOR2'));\nconst { Supervisor } = USE_SUPERVISOR2 ? __webpack_require__(/*! ./supervisor2 */ \"./src/dcp-client/worker/supervisor2/index.js\") : __webpack_require__(/*! ./supervisor */ \"./src/dcp-client/worker/supervisor.js\");\n\nconst DISABLE_WORKER_CACHE_KEY = 'disable_worker';\n\n/** @typedef {import('./sandbox').SandboxOptions} SandboxOptions */\n/** @typedef {import('../wallet/keystore').Keystore} Keystore */\n\n/**\n * @access public\n * @typedef {object} SupervisorOptions\n * @property {Address} paymentAddress - Address to deposit earned funds into\n * @property {Keystore} identity - Keystore to use as the supervisor's identity\n * @property {string[]} [jobAddresses=[]] - If set, the supervisor will only fetch work for the provided jobIDs\n * @property {boolean} [localExec=false] - If true, fetched work will not be filtered by compute groups.\n * @property {boolean} [priorityOnly=false] - Whether to only work on priority jobs, i.e. become idle if `jobAddresses` is empty.\n * @property {SandboxOptions} [sandboxOptions] - Options that will be passed to the Sandbox constructor\n * @property {number} [watchdogInterval] - Number of ms between watchdog cycles, defaults to dcpConfig tuning param\n * @property {object[]} [computeGroups] - The compute group descriptors the worker will accept jobs from (+ optionally the default compute group)\n * @property {object} [minimumWage] - The minimum payout per slice the worker will accept from a job.\n * @property {string[]} [allowedOrigins] - origins for fetching data URIs that are allowed\n * @property {boolean} [leavePublicGroup=false] - Don't fetch slices from public compute group.\n * @property {object} [schedulerConfig] - Overrides for dcpConfig.scheduler.\n * @property {number} [maxWorkingSandboxes] - Max number of concurrently working sandboxes\n * @property {{cpu: number, gpu: number}} [cores] - The number of CPU vCores and GPU devices available for compute.\n * @property {{cpu: number, gpu: number}} [targetLoad] - The proportion of the cores.cpu and cores.gpu to load.\n */\n\nfunction disableWorker() {\n localStorage.setItem(DISABLE_WORKER_CACHE_KEY, true);\n}\n\n/**\n * Fired when the worker begins fetching slices from the scheduler.\n * @access public\n * @event Worker#fetchStart\n */\nclass Worker extends EventEmitter {\n /**\n * Returns a new Worker instance.\n * @access public\n * @param {module:dcp/worker~SupervisorOptions} supervisorOptions \n */\n constructor(supervisorOptions) {\n super('Worker');\n /**\n * @type {boolean}\n * @access public\n */\n this.working = false;\n /**\n * @type {SchedMsg}\n * @access public\n */\n this.schedMsg = new SchedMsg(this);\n /**\n * @type {Supervisor}\n * @access public\n */\n this.supervisor = new Supervisor(this, supervisorOptions);\n \n debugging() && console.debug('Worker supervisorOptions:', supervisorOptions);\n\n this.supervisor.on('fetchingTask', () => this.emit('fetchStart'));\n //this.supervisor.on('fetchedTask', (fetchedSlicesCount) => this.emit('fetch', fetchedSlicesCount)); // AFAICT UNUSED -- XXXpfr\n this.supervisor.on('fetchedTask', (fetchedSlicesCount) => this.emit('fetchEnd', fetchedSlicesCount));\n this.supervisor.on('fetchTaskFailed', (error) => this.emit('fetchEnd', error));\n this.supervisor.on('fetchTaskFailed', (error) => this.emit('fetchError', error));\n\n this.supervisor.on('submittingResults', () => this.emit('submitStart'));\n this.supervisor.on('submittedResult', () => this.emit('submitEnd'));\n this.supervisor.on('submitResultsFailed', (error) => this.emit('submitEnd', error));\n this.supervisor.on('submitResultsFailed', (error) => this.emit('submitError', error));\n this.supervisor.on('submittedResult', () => this.emit('submit'));\n \n this.supervisor.on('dccCredit', (event) => this.emit('payment', event));\n this.supervisor.on('dccNoCredit', (event) => this.emit('payment', event));\n\n this.supervisor.on('sandboxReady', (sandbox) => this.emit('sandbox', sandbox));\n \n this.supervisor.on('error', (error) => this.emit('error', error));\n this.supervisor.on('warning', (warning) => this.emit('warning', warning));\n }\n\n /**\n * Disables worker instances from being started. The user will need to manually intervene to re-enable workers.\n * \n * @access public\n */\n static disableWorker() {\n disableWorker();\n }\n\n /**\n * Starts the worker.\n * \n * @access public\n */\n async start() {\n if (this.working) throw new Error('Cannot start worker: Already working.');\n\n if (localStorage.getItem(DISABLE_WORKER_CACHE_KEY)) {\n await confirmPrompt(`Worker has been disabled by the DCP Security Team; check the @DC_Protocol Twitter feed for more information before continuing.`)\n if (await confirmPrompt('Are you sure you would like to restart the worker?')) {\n localStorage.removeItem(DISABLE_WORKER_CACHE_KEY);\n console.log(\"Starting worker...\");\n } else {\n return;\n }\n }\n\n this.working = true;\n await this.supervisor.work();\n await this.schedMsg.start();\n this.emit('start');\n }\n\n /**\n * Stops the worker.\n * \n * @access public\n * @param {boolean} [immediate=false] Whether the worker should stop imediately or allow the current slices to finish.\n */\n async stop(immediate=false) {\n if (!this.working) throw new Error('Cannot stop worker: Already stopped.');\n \n this.working = false;\n await this.schedMsg.stop();\n await this.supervisor.stopWork(immediate);\n this.emit('stop');\n }\n \n /**\n * Set payment address\n * @param {Address} addr - new address to be used\n */\n setPaymentAddress(addr)\n {\n assert(addr instanceof Address);\n this.supervisor.paymentAddress = addr;\n this.emit('paymentAddressChange', addr);\n }\n \n /**\n * Get payment address\n * @returns {Address} - current payment address.\n */\n getPaymentAddress()\n {\n return this.supervisor.paymentAddress;\n }\n \n /**\n * Set identity keystore.\n * Note: connections to the scheduler will only use the new identity if they are closed and recreated.\n * @param {Keystore} ks - new identity to be used\n */\n setIdentity(ks)\n {\n assert(ks instanceof Keystore);\n \n /* compatibility for supervisor 1 */\n if (this.supervisor.setDefaultIdentityKeystore)\n this.supervisor.setDefaultIdentityKeystore(ks);\n else\n this.supervisor.identity = ks;\n this.emit('identityChange', ks);\n }\n \n /**\n * Get identity keystore\n * @returns {Keystore} - the current identity keystore\n */\n getIdentity()\n {\n /* compatibiliy for supervisor 1 */\n if (this.supervisor._identityKeystore)\n return this.supervisor._identityKeystore;\n else\n return this.supervisor.identity;\n }\n \n /**\n * Set max working sandboxes\n * @param {number} max - new max working sandboxes\n */\n setMaxWorkingSandboxes(max)\n {\n this.supervisor.maxWorkingSandboxes = max;\n this.emit('maxSandboxesChange', max);\n }\n \n /**\n * Get max working sandboxes\n * @returns {number} - current max working sandboxes\n */\n getMaxWorkingSandboxes()\n {\n return this.supervisor.maxWorkingSandboxes;\n }\n \n /**\n * Check if there are any working sandboxes within the worker\n * @returns {Boolean} - true if there are working sandboxes.\n */\n hasWorkingSandboxes()\n {\n /* compatibility for supervisor 1 */\n if (this.supervisor.workingSandboxes)\n return this.supervisor.workingSandboxes.length > 0\n else\n return this.supervisor.workingSandboxCount() > 0;\n }\n}\n\nexports.Worker = Worker;\nexports.Supervisor = Supervisor;\nexports.disableWorker = disableWorker;\n\nexports.version = {\n api: '1.0.0',\n provides: '1.0.0' /* dcpConfig.scheduler.compatibility.operations.work */\n};\n\n\n//# sourceURL=webpack://dcp/./src/dcp-client/worker/index.js?");
4449
4449
 
4450
4450
  /***/ }),
4451
4451
 
@@ -4488,7 +4488,7 @@ eval("/**\n * @file worker/supervisor-cache.js\n *\n * A cache for the superviso
4488
4488
  /***/ ((__unused_webpack_module, exports, __webpack_require__) => {
4489
4489
 
4490
4490
  "use strict";
4491
- eval("/* provided dependency */ var process = __webpack_require__(/*! ./node_modules/process/browser.js */ \"./node_modules/process/browser.js\");\n/**\n * @file worker/supervisor.js\n *\n * The component that controls each of the sandboxes\n * and distributes work to them. Also communicates with the\n * scheduler to fetch said work.\n *\n * The supervisor readies sandboxes before/while fetching slices.\n * This means sometimes there are extra instantiated WebWorkers\n * that are idle (in this.readiedSandboxes). Readied sandboxes can\n * be used for any slice. After a readied sandbox is given a slice\n * it becomes assigned to slice's job and can only do work\n * for that job.\n *\n * After a sandbox completes its work, the sandbox becomes cached\n * and can be reused if another slice with a matching job is fetched.\n *\n * @author Matthew Palma, mpalma@kingsds.network\n * Ryan Rossiter, ryan@kingsds.network\n * @date May 2019\n */\n\n/* global dcpConfig */\n// @ts-check\n\n\nconst constants = __webpack_require__(/*! dcp/common/scheduler-constants */ \"./src/common/scheduler-constants.js\");\nconst hash = __webpack_require__(/*! dcp/common/hash */ \"./src/common/hash.js\");\nconst wallet = __webpack_require__(/*! dcp/dcp-client/wallet */ \"./src/dcp-client/wallet/index.js\");\nconst protocolV4 = __webpack_require__(/*! dcp/protocol-v4 */ \"./src/protocol-v4/index.js\");\nconst DCP_ENV = __webpack_require__(/*! dcp/common/dcp-env */ \"./src/common/dcp-env.js\");\n\nconst debugging = (__webpack_require__(/*! dcp/debugging */ \"./src/debugging.js\").scope)('worker');\nconst { assert } = __webpack_require__(/*! dcp/common/dcp-assert */ \"./src/common/dcp-assert.js\");\nconst { EventEmitter } = __webpack_require__(/*! dcp/common/dcp-events */ \"./src/common/dcp-events/index.js\");\nconst { Sandbox, SandboxError } = __webpack_require__(/*! ./sandbox */ \"./src/dcp-client/worker/sandbox.js\");\nconst { Slice, SLICE_STATUS_UNASSIGNED, SLICE_STATUS_FAILED } = __webpack_require__(/*! ./slice */ \"./src/dcp-client/worker/slice.js\");\nconst { SupervisorCache } = __webpack_require__(/*! ./supervisor-cache */ \"./src/dcp-client/worker/supervisor-cache.js\");\nconst { DcpURL } = __webpack_require__(/*! dcp/common/dcp-url */ \"./src/common/dcp-url.js\");\nconst { requireNative } = __webpack_require__(/*! dcp/dcp-client/webpack-native-bridge */ \"./src/dcp-client/webpack-native-bridge.js\");\nconst { localStorage } = __webpack_require__(/*! dcp/common/dcp-localstorage */ \"./src/common/dcp-localstorage.js\");\nconst { booley, encodeDataURI, makeValueURI, leafMerge, a$sleepMs, justFetch, compressJobMap, toJobMap,\n compressSandboxes, compressSlices, truncateAddress, dumpSandboxesIfNotUnique, dumpSlicesIfNotUnique, \n generateOpaqueId } = __webpack_require__(/*! dcp/utils */ \"./src/utils/index.js\");\nconst { DCPError } = __webpack_require__(/*! dcp/common/dcp-error */ \"./src/common/dcp-error.js\");\nconst { sliceStatus } = __webpack_require__(/*! dcp/common/scheduler-constants */ \"./src/common/scheduler-constants.js\");\nconst { calculateJoinHash } = __webpack_require__(/*! dcp/dcp-client/compute-groups */ \"./src/dcp-client/compute-groups/index.js\");\nconst RingBuffer = __webpack_require__(/*! dcp/utils/ringBuffer */ \"./src/utils/ringBuffer.js\");\nconst supervisorTuning = dcpConfig.future('worker.tuning');\nconst tuning = {\n watchdogInterval: 7, /**< seconds - time between fetches when ENOTASK(? /wg nov 2019) */\n minSandboxStartDelay: 0.1, /**< seconds - minimum time between WebWorker starts */\n maxSandboxStartDelay: 0.7, /**< seconds - maximum delay time between WebWorker starts */\n ...supervisorTuning\n};\n\n/** Make timers 10x slower when running in niim */\nlet timeDilation = 1;\nif (DCP_ENV.platform === 'nodejs') {\n /** Make timers 10x slower when running in niim */\n timeDilation = (requireNative('module')._cache.niim instanceof requireNative('module').Module) ? 10 : 1;\n}\n\ndcpConfig.future('worker.sandbox', { progressReportInterval: (5 * 60 * 1000) });\nconst sandboxTuning = dcpConfig.worker.sandbox;\n\n/**\n * @typedef {*} address\n * @typedef {*} opaqueId\n */\n\n/**\n * @typedef {object} SandboxSlice\n * @property {Sandbox} sandbox\n * @property {Slice} slice\n */\n\n/**\n * @typedef {object} Signature\n * @property {Uint8Array} r\n * @property {Uint8Array} s\n * @property {Uint8Array} v\n */\n\n/**\n * @typedef {object} SignedAuthorizationMessageObject\n * @property {object} auth\n * @property {Signature} signature\n * @property {module:dcp/wallet.Address} owner\n */\n\n/** @typedef {import('.').Worker} Worker */\n/** @typedef {import('.').SupervisorOptions} SupervisorOptions */\n\nclass Supervisor extends EventEmitter {\n /**\n * @constructor\n * @param {Worker} worker\n * @param {SupervisorOptions} options\n */\n constructor (worker, options={}) {\n super('Supervisor');\n\n /** @type {Worker} */\n this.worker = worker;\n\n /** @type {Sandbox[]} */\n this.sandboxes = [];\n\n /** @type {Sandbox[]} */\n this.readiedSandboxes = [];\n\n /** @type {Sandbox[]} */\n this.assignedSandboxes = [];\n\n /** @type {Slice[]} */\n this.slices = [];\n\n /** @type {Slice[]} */\n this.queuedSlices = [];\n\n /** @type {Slice[]} */\n this.lostSlices = [];\n\n /** @type {boolean} */\n this.matching = false;\n\n /** @type {boolean} */\n this.isFetchingNewWork = false;\n\n /** @type {number} */\n this.numberOfCoresReserved = 0;\n\n /** @type {number} */\n this.addressTruncationLength = 20; // Set to -1 for no truncation.\n\n /** @type {Object[]} */\n this.rejectedJobs = [];\n this.rejectedJobReasons = [];\n\n if (!options) {\n console.error('Supervisor Options', options, new Error().stack);\n options = {};\n }\n\n /** @type {object} */\n this.options = {\n jobAddresses: options.jobAddresses || [/* all jobs unless priorityOnly */],\n ...options,\n };\n\n const { paymentAddress, identity } = options;\n if (paymentAddress) {\n if (paymentAddress instanceof wallet.Keystore) {\n this.paymentAddress = paymentAddress.address;\n } else {\n this.paymentAddress = new wallet.Address(paymentAddress);\n }\n } else {\n this.paymentAddress = null;\n }\n\n this._identityKeystore = identity;\n\n this.extraAllowOrigins = {\n any: [],\n fetchData: [],\n fetchWorkFunctions: [],\n fetchArguments: [],\n sendResults: [],\n };\n \n if (typeof options.allowedOrigins !== 'undefined')\n {\n console.warn('Warning: using deprecated interface options.allowedOrigins; callstack=', new Error().stack.split('\\n').slice(1));\n if (!Array.isArray(options.allowedOrigins))\n {\n for (let kind in this.extraAllowOrigins)\n {\n if (options.allowedOrigins[kind])\n this.extraAllowOrigins[kind].push(...options.allowedOrigins[kind]); \n }\n }\n else\n this.extraAllowOrigins['any'].push(...options.allowedOrigins)\n \n delete options.allowedOrigins;\n }\n /* Assume that an array of dcpConfig.worker.allowOrigins means they can be used for anything */\n if (Array.isArray(dcpConfig.worker.allowOrigins))\n dcpConfig.worker.allowOrigins = { any: dcpConfig.worker.allowOrigins };\n \n /**\n * Maximum sandboxes allowed to work at a given time.\n * @type {number}\n */\n this.maxWorkingSandboxes = options.maxWorkingSandboxes || 1;\n\n /** @type {number} */\n this.defaultMaxGPUs = 1;\n // this.GPUsAssigned = 0;\n \n // Object.defineProperty(this, 'GPUsAssigned', {\n // get: () => this.allocatedSandboxes.filter(sb => !!sb.requiresGPU).length,\n // enumerable: true,\n // configurable: false,\n // });\n\n /**\n * TODO: Remove this when the supervisor sends all of the sandbox\n * capabilities to the scheduler when fetching work.\n * @type {object}\n */\n this.capabilities = null;\n\n /** @type {number} */\n this.lastProgressReport = 0;\n\n /** \n * An N-slot ring buffer of job addresses. Stores all jobs that have had no more than 1 slice run in the ring buffer.\n * Required for the implementation of discrete jobs \n * @type {RingBuffer} \n */\n this.ringBufferofJobs = new RingBuffer(200); // N = 200 should be more than enough.\n \n // @hack - dcp-env.isBrowserPlatform is not set unless the platform is _explicitly_ set,\n // using the default detected platform doesn't set it.\n // Fixing that causes an error in the wallet module's startup on web platform, which I\n // probably can't fix in a reasonable time this morning.\n // ~ER2020-02-20\n\n if (!options.maxWorkingSandboxes\n && DCP_ENV.browserPlatformList.includes(DCP_ENV.platform)\n && navigator.hardwareConcurrency > 1) {\n this.maxWorkingSandboxes = navigator.hardwareConcurrency - 1;\n if (typeof navigator.userAgent === 'string') {\n if (/(Android).*(Chrome|Chromium)/.exec(navigator.userAgent)) {\n this.maxWorkingSandboxes = 1;\n console.log('Doing work with Chromimum browsers on Android is currently limited to one sandbox');\n }\n }\n }\n\n /** @type {SupervisorCache} */\n this.cache = new SupervisorCache(this);\n /** @type {object} */\n this._connections = {}; /* active DCPv4 connections */\n // Call the watchdog every 7 seconds.\n this.watchdogInterval = setInterval(() => this.watchdog(), tuning.watchdogInterval * 1000);\n if (DCP_ENV.platform === 'nodejs' && this.options.localExec) /* interval helps keep normal worker alive forever, which we don't want in localexec */\n this.watchdogInterval.unref();\n\n const ceci = this;\n\n // Initialize to null so these properties are recognized for the Supervisor class\n this.taskDistributorConnection = null;\n this.eventRouterConnection = null;\n this.resultSubmitterConnection = null;\n this.packageManagerConnection = null;\n this.openTaskDistributorConn = function openTaskDistributorConn()\n {\n let config = dcpConfig.scheduler.services.taskDistributor;\n ceci.taskDistributorConnection = new protocolV4.Connection(config, ceci.identityKeystore, connectionOptions(config.location, 'taskDistributor'));\n ceci.taskDistributorConnection.on('close', ceci.openTaskDistributorConn);\n }\n\n this.openEventRouterConn = function openEventRouterConn()\n {\n let config = dcpConfig.scheduler.services.eventRouter;\n ceci.eventRouterConnection = new protocolV4.Connection(config, ceci.identityKeystore, connectionOptions(config.location, 'eventRouter'));\n ceci.eventRouterConnection.on('close', ceci.openEventRouterConn);\n if (ceci.eventRouterMessageQueue.length)\n ceci.resendRejectedMessages(ceci.eventRouterConnection, ceci.eventRouterMessageQueue);\n }\n this.eventRouterMessageQueue = [];\n \n this.openResultSubmitterConn = function openResultSubmitterConn()\n {\n let config = dcpConfig.scheduler.services.resultSubmitter;\n ceci.resultSubmitterConnection = new protocolV4.Connection(config, ceci.identityKeystore, connectionOptions(config.location, 'resultSubmitter'));\n ceci.resultSubmitterConnection.on('close', ceci.openResultSubmitterConn);\n if (ceci.resultSubmitterMessageQueue.length)\n ceci.resendRejectedMessages(ceci.resultSubmitterConnection, ceci.resultSubmitterMessageQueue);\n }\n this.resultSubmitterMessageQueue = [];\n\n this.openPackageManagerConn = function openPackageManagerConn()\n {\n let config = dcpConfig.packageManager;\n ceci.packageManagerConnection = new protocolV4.Connection(config, ceci.identityKeystore, connectionOptions(config.location, 'packageManager'));\n ceci.packageManagerConnection.on('close', ceci.openPackageManagerConn);\n if (ceci.packageManagerMessageQueue.length)\n ceci.resendRejectedMessages(ceci.packageManagerConnection, ceci.packageManagerMessageQueue);\n }\n this.packageManagerMessageQueue = [];\n }\n\n /**\n * Return worker opaqueId.\n * @type {opaqueId}\n */\n get workerOpaqueId() {\n if (!this._workerOpaqueId)\n this._workerOpaqueId = localStorage.getItem('workerOpaqueId');\n\n if (!this._workerOpaqueId || this._workerOpaqueId.length !== constants.workerIdLength) {\n this._workerOpaqueId = generateOpaqueId();\n localStorage.setItem('workerOpaqueId', this._workerOpaqueId);\n }\n\n return this._workerOpaqueId;\n }\n\n /**\n * This getter is the absolute source-of-truth for what the\n * identity keystore is for this instance of the Supervisor.\n */\n get identityKeystore() {\n assert(this.defaultIdentityKeystore);\n\n return this._identityKeystore || this.defaultIdentityKeystore;\n }\n \n \n /** \n * Factory function which generates a list of origins which are safe to communicate \n * with for this purpose. Currently-valid purposes (more will be added):\n * - any\n * - fetchData\n * - fetchWork\n * - fetchWorkArguments\n * - sendResults\n */\n makeSafeOriginList(purpose)\n {\n var list = [];\n\n assert(Array.isArray(this.extraAllowOrigins[purpose]));\n \n if (this.extraAllowOrigins[purpose])\n list = list.concat(this.extraAllowOrigins[purpose]);\n if (dcpConfig.worker.allowOrigins[purpose])\n list = list.concat(dcpConfig.worker.allowOrigins[purpose])\n\n // In localExec, do not allow work function or arguments to come from the 'any' origins\n if (purpose !== 'any' && (!this.options.localExec || (this.options.localExec && purpose === 'sendResults')))\n {\n if (this.extraAllowOrigins.any)\n list = list.concat(this.extraAllowOrigins.any);\n if (dcpConfig.worker.allowOrigins.any)\n list = list.concat(dcpConfig.worker.allowOrigins.any);\n }\n return list;\n }\n\n /**\n * Open all connections. Used when supervisor is instantiated or stopped/started\n * to initially open connections.\n */\n instantiateAllConnections() {\n if (!this.taskDistributorConnection)\n this.openTaskDistributorConn();\n \n if (!this.eventRouterConnection)\n this.openEventRouterConn();\n \n if (!this.resultSubmitterConnection)\n this.openResultSubmitterConn();\n\n if (!this.packageManagerConnection)\n this.openPackageManagerConn();\n }\n \n /**\n * Asynchronously send a result to the result submitter that was previously rejected.\n * Different from resendRejectedMessages below in the sense that the function only resolves\n * once we've delivered the result, or gone past our max number of attempts.\n * @param {object} result \n * @returns the response payload from the result operation\n */\n async resendResult(result) {\n var protocolError = false;\n if (!result.sendRetries)\n result.sendRetries = 1;\n else\n result.sendRetries++;\n \n if (result.sendRetries > dcpConfig.worker.maxResultSubmissionRetries)\n throw new DCPError(`Could not submit result after ${dcpConfig.worker.maxResultSubmissionRetries} attempts. Aborting.`) \n \n debugging() && console.debug(`supervisor - failed to submit result ${result.sendRetries} time(s), trying again `)\n let res = await this.resultSubmitterConnection.send('result', result).catch(async (e) => {\n debugging('supervisor') && console.error(`Failed to submit result to scheduler for slice ${result.slice} of job ${result.job}:\\n ${e} \\nWill try again on new connection.`);\n this.resultSubmitterConnection.close();\n await a$sleepMs(10); /* let connection recycle */\n protocolError = true;\n });\n if ((!res.success && res.payload.code === 'DCPS-01002') || protocolError)\n return this.resendResult(result)\n else\n return res;\n }\n \n /**\n * Try sending messages that were rejected on an old instance of the given connection.\n * These are messages that a) were rejected due to a protocol error and b) don't care when exactly\n * they're sent in the grand scheme of things.\n */\n resendRejectedMessages(connection, messageQueue) {\n if (connection.resendingMessages) /* if the passed connection is already in the loop, exit */\n return;\n \n var message = messageQueue.shift();\n\n do {\n \n connection.resendingMessages = true;\n var quitLoop = false;\n \n connection.send(message.operation, message.data)\n .catch((e) =>\n {\n /* Protocol Error; Close connection (this will trigger the opening of a new connection that will try sending again) */\n debugging('supervisor') && console.error(`Failed to send message ${message.operation} to scheduler: ${e}. Will try again on a new \n connection.`);\n messageQueue.unshift(message);\n connection.close();\n quitLoop = true;\n });\n \n message = messageQueue.shift();\n \n } while (message && !quitLoop)\n\n connection.resendingMessages = false;\n }\n\n /** Set the default identity keystore -- needs to happen before anything that talks\n * to the scheduler for work gets called. This is a wart and should be removed by\n * refactoring.\n *\n * The default identity keystore will be used if the Supervisor was not provided\n * with an alternate. This keystore will be located via the Wallet API, and \n * if not found, a randomized default identity will be generated. \n *\n * @param {object} ks An instance of wallet::Keystore -- if undefined, we pick the best default we can.\n * @returns {Promise<void>}\n */\n async setDefaultIdentityKeystore(ks) {\n try {\n if (ks) {\n this.defaultIdentityKeystore = ks;\n return;\n }\n\n if (this.defaultIdentityKeystore)\n return;\n\n try {\n this.defaultIdentityKeystore = await wallet.getId();\n } catch(e) {\n debugging('supervisor') && console.debug('Error generating default identity, try to do it another way.');\n this.defaultIdentityKeystore = await new wallet.IdKeystore(null, '');\n }\n } finally {\n if (this.defaultIdentityKeystore)\n debugging('supervisor') && console.debug('Set default identity =', this.defaultIdentityKeystore.address);\n else\n debugging('supervisor') && console.debug('Failed to set default identity, worker cannot work.');\n }\n }\n\n //\n // What follows is a bunch of utility properties and functions for creating filtered views\n // of the slices and sandboxes array.\n //\n /** XXXpfr @todo Write sort w/o using promises so we can get rid of async on all the compress functions. */\n\n /**\n * @deprecated -- Please do not use this.workingSandboxes; use this.allocatedSandboxes instead.\n * Sandboxes that are in WORKING state.\n *\n * Warning: Do not rely on this information being 100% accurate -- it may change in the next instant.\n * @type {Sandbox[]}\n */\n get workingSandboxes() {\n return this.sandboxes.filter(sandbox => sandbox.isWorking);\n }\n\n /**\n * Use instead of this.workingSandboxes.\n *\n * When a sandbox is paired with a slice, execution is pending and sandbox.allocated=true and\n * sandbox.slice=slice and sandbox.jobAddress=slice.jobAddress. This is what 'allocated' means.\n * Immediately upon the exit of sandbox.work, sandbox.allocated=false is set and if an exception\n * wasn't thrown the sandbox is placed in this.assignedSandboxes.\n * Thus from the pov of supervisor, this.allocatedSandboxes is deterministic and this.workingSandboxes is not.\n * Please try to not use this.workingSandboxes. It is deprecated.\n *\n * Warning: Do not rely on this information being 100% accurate -- it may change in the next instant.\n * @type {Sandbox[]}\n */\n get allocatedSandboxes() {\n return this.sandboxes.filter(sandbox => sandbox.allocated);\n }\n\n /**\n * Slices that are allocated.\n * Warning: Do not rely on this information being 100% accurate -- it may change in the next instant.\n * @type {Slice[]}\n */\n get allocatedSlices() {\n return this.slices.filter(slice => slice.allocated);\n }\n\n /**\n * This property is used as the target number of sandboxes to be associated with slices and start working.\n *\n * It is used in this.watchdog as to prevent a call to this.work when unallocatedSpace <= 0.\n * It is also used in this.distributeQueuedSlices where it is passed as an argument to this.matchSlicesWithSandboxes to indicate how many sandboxes\n * to associate with slices and start working.\n *\n * Warning: Do not rely on this information being 100% accurate -- it may change in the next instant.\n * @type {number}\n */\n get unallocatedSpace() {\n return this.maxWorkingSandboxes - this.allocatedSandboxes.length - this.numberOfCoresReserved;\n }\n \n /**\n * Call acquire(numberOfCoresToReserve) to reserve numberOfCoresToReserve unallocated sandboxes as measured by unallocatedSpace.\n * Call release() to undo the previous acquire.\n * This pseudo-mutex technique helps prevent races in scheduling slices in Supervisor.\n * @param {number} numberOfCoresToReserve\n */\n acquire(numberOfCoresToReserve) { \n this.numberOfCoresReserved = numberOfCoresToReserve; \n }\n release() { \n this.numberOfCoresReserved = 0; \n }\n\n /**\n * Remove from this.slices.\n * @param {Slice} slice\n */\n removeSlice(slice) {\n this.removeElement(this.slices, slice);\n if (Supervisor.debugBuild) {\n if (this.queuedSlices.indexOf(slice) !== -1)\n throw new Error(`removeSlice: slice ${slice.identifier} is in queuedSlices; inconsistent state.`);\n if (this.lostSlices.length > 0) {\n console.warn(`removeSlice: slice ${slice.identifier}, found lostSlices ${this.lostSlices.map(s => s.identifier)}`);\n if (this.lostSlices.indexOf(slice) !== -1)\n throw new Error(`removeSlice: slice ${slice.identifier} is in lostSlices; inconsistent state.`);\n }\n }\n }\n\n /**\n * Remove from this.slices.\n * @param {Slice[]} slices\n */\n removeSlices(slices) {\n this.slices = this.slices.filter(slice => slices.indexOf(slice) === -1);\n }\n\n /**\n * Remove from this.queuedSlices.\n * @param {Slice[]} slices\n */\n removeQueuedSlices(slices) {\n this.queuedSlices = this.queuedSlices.filter(slice => slices.indexOf(slice) === -1);\n }\n\n /**\n * Remove from this.sandboxes, this.assignedSandboxes and this.readiedSandboxes.\n * @param {Sandbox} sandbox\n */\n removeSandbox(sandbox) {\n debugging('scheduler') && console.log(`removeSandbox ${sandbox.identifier}`);\n this.removeElement(this.sandboxes, sandbox);\n this.removeElement(this.assignedSandboxes, sandbox);\n\n // XXXpfr: April 13, 2022\n // I'm trying to understand and control when sandboxes get removed.\n // A sandbox in this.readiedSandboxes should never have returnSandbox/removeSandbox called on it except in stopWork.\n // Because of races and random worker crashes, it is hard to get this right, but I want to try.\n // If I don't fix this is the next 30 days or I forget, please delete this exception.\n if (false)\n {}\n\n this.removeElement(this.readiedSandboxes, sandbox);\n }\n\n /**\n * Remove from this.sandboxes and this.assignedSandboxes .\n * @param {Sandbox[]} sandboxes\n */\n async removeSandboxes(sandboxes) {\n debugging('scheduler') && console.log(`removeSandboxes: Remove ${sandboxes.length} sandboxes ${this.dumpSandboxes(sandboxes)}`);\n this.sandboxes = this.sandboxes.filter(sandbox => sandboxes.indexOf(sandbox) === -1);\n this.assignedSandboxes = this.assignedSandboxes.filter(sandbox => sandboxes.indexOf(sandbox) === -1);\n\n if (Supervisor.debugBuild) {\n const readied = this.readiedSandboxes.filter(sandbox => sandboxes.indexOf(sandbox) !== -1);\n if (readied.length > 0)\n throw new Error(`removeSandboxes: sandboxes ${readied.map(s => s.identifier)} are in readiedSandboxes; inconsistent state.`);\n }\n }\n\n /**\n * Remove element from theArray.\n * @param {Array<*>} theArray\n * @param {object|number} element\n * @param {boolean} [assertExists = true]\n */\n removeElement(theArray, element, assertExists = false) {\n let index = theArray.indexOf(element);\n assert(index !== -1 || !assertExists);\n if (index !== -1) theArray.splice(index, 1);\n }\n\n /**\n * Log sliceArray.\n * @param {Slice[]} sliceArray\n * @param {string} [header]\n * @returns {string}\n */\n dumpSlices(sliceArray, header) {\n if (header) console.log(`\\n${header}`);\n return compressSlices(sliceArray, this.addressTruncationLength);\n }\n\n /**\n * Log sandboxArray.\n * @param {Sandbox[]} sandboxArray\n * @param {string} [header]\n * @returns {string}\n */\n dumpSandboxes(sandboxArray, header) {\n if (header) console.log(`\\n${header}`);\n return compressSandboxes(sandboxArray, this.addressTruncationLength);\n }\n\n /**\n * If the elements of sandboxSliceArray are not unique, log the duplicates and dump the array.\n * @param {SandboxSlice[]} sandboxSliceArray\n * @param {string} header\n */\n dumpSandboxSlicesIfNotUnique(sandboxSliceArray, header) {\n if (!this.isUniqueSandboxSlices(sandboxSliceArray, header))\n console.log(this.dumpSandboxSlices(sandboxSliceArray));\n }\n\n /**\n * Log { sandbox, slice }.\n * @param {Sandbox} sandbox\n * @param {Slice} slice\n * @returns {string}\n */\n dumpSandboxAndSlice(sandbox, slice) {\n return `${sandbox.id}~${slice.sliceNumber}.${this.dumpJobAddress(slice.jobAddress)}`;\n }\n\n /**\n * Log { sandbox, slice } with state/status.\n * @param {Sandbox} sandbox\n * @param {Slice} slice\n * @returns {string}\n */\n dumpStatefulSandboxAndSlice(sandbox, slice) {\n return `${sandbox.id}.${sandbox.state}~${slice.sliceNumber}.${this.dumpJobAddress(slice.jobAddress)}.${slice.status}`;\n }\n\n /**\n * Truncates jobAddress.toString() to this.addressTruncationLength digits.\n * @param {address} jobAddress\n * @returns {string}\n */\n dumpJobAddress(jobAddress) {\n return truncateAddress(jobAddress, this.addressTruncationLength /* digits*/);\n }\n\n /**\n * Dump sandboxSliceArray.\n * @param {SandboxSlice[]} sandboxSliceArray - input array of { sandbox, slice }\n * @param {string} [header] - optional header\n * @param {boolean} [stateFul] - when true, also includes slice.status and sandbox.state.\n * @returns {string}\n */\n dumpSandboxSlices(sandboxSliceArray, header, stateFul=false) {\n if (header) console.log(`\\n${header}`);\n const jobMap = {};\n sandboxSliceArray.forEach(ss => {\n const sss = stateFul ? `${ss.sandbox.id}.${ss.sandbox.state}~${ss.slice.sliceNumber}.${ss.slice.status}` : `${ss.sandbox.id}~${ss.slice.sliceNumber}`;\n if (!jobMap[ss.slice.jobAddress]) jobMap[ss.slice.jobAddress] = sss;\n else jobMap[ss.slice.jobAddress] += `,${sss}`;\n });\n let output = '';\n for (const [jobAddress, sss] of Object.entries(jobMap))\n output += `${this.dumpJobAddress(jobAddress)}:[${sss}]:`;\n return output;\n }\n\n /**\n * Check sandboxSliceArray for duplicates.\n * @param {SandboxSlice[]} sandboxSliceArray\n * @param {string} [header]\n * @param {function} [log]\n * @returns {boolean}\n */\n isUniqueSandboxSlices(sandboxSliceArray, header, log) {\n const result = [], slices = [], sandboxes = [];\n let once = true;\n sandboxSliceArray.forEach(x => {\n const sliceIndex = slices.indexOf(x.slice);\n const sandboxIndex = sandboxes.indexOf(x.sandbox);\n\n if (sandboxIndex >= 0) {\n if (once && header) console.log(`\\n${header}`); once = false;\n log ? log(x.sandbox) : console.log(`\\tWarning: Found duplicate sandbox ${x.sandbox.identifier}.`);\n } else sandboxes.push(x.sandbox);\n\n if (sliceIndex >= 0) {\n if (once && header) console.log(`\\n${header}`); once = false;\n log ? log(x.slice) : console.log(`\\tWarning: Found duplicate slice ${x.slice.identifier}.`);\n } else {\n slices.push(x.slice);\n if (sandboxIndex < 0) result.push(x);\n }\n });\n return sandboxSliceArray.length === result.length;\n }\n\n /**\n * Attempts to create and start a given number of sandboxes.\n * The sandboxes that are created can then be assigned for a\n * specific job at a later time. All created sandboxes\n * get put into the @this.readiedSandboxes array when allocateLocalSandboxes is false.\n *\n * @param {number} numSandboxes - the number of sandboxes to create\n * @param {boolean} [allocateLocalSandboxes=false] - when true, do not place in this.readiedSandboxes\n * @returns {Promise<Sandbox[]>} - resolves with array of created sandboxes, rejects otherwise\n * @throws when given a numSandboxes is not a number or if numSandboxes is Infinity\n */\n async readySandboxes (numSandboxes, allocateLocalSandboxes = false) {\n debugging('supervisor') && console.debug(`readySandboxes: Readying ${numSandboxes} sandboxes, total sandboxes ${this.sandboxes.length}, matching ${this.matching}, fetching ${this.isFetchingNewWork}`);\n \n if (typeof numSandboxes !== 'number' || Number.isNaN(numSandboxes) || numSandboxes === Infinity) {\n throw new Error(`${numSandboxes} is not a number of sandboxes that can be readied.`);\n }\n if (numSandboxes <= 0) {\n return [];\n }\n\n const sandboxStartPromises = [];\n const sandboxes = [];\n const errors = [];\n for (let i = 0; i < numSandboxes; i++) {\n const sandbox = new Sandbox(this, {\n ...this.options.sandboxOptions,\n });\n sandbox.addListener('ready', () => this.emit('sandboxReady', sandbox));\n sandbox.addListener('start', () => {\n this.emit('sandboxStart', sandbox);\n\n // When sliceNumber == 0, result-submitter status skips the slice,\n // so don't send it in the first place.\n // The 'start' event is fired when a worker starts up, hence there's no way\n // to determine whether sandbox has a valid slice without checking.\n if (sandbox.slice) {\n const jobAddress = sandbox.jobAddress;\n const sliceNumber = sandbox.slice.sliceNumber;\n // !authorizationMessage <==> sliceNumber === 0.\n const authorizationMessage = sandbox.slice.getAuthorizationMessage();\n\n if (authorizationMessage) {\n let statusPayload = {\n worker: this.workerOpaqueId,\n slices: [{\n job: jobAddress,\n sliceNumber: sliceNumber,\n status: 'begin',\n authorizationMessage,\n }],\n }\n \n try /* resultSubmitterConnection can be null if worker is stopped */\n {\n this.resultSubmitterConnection.send('status', statusPayload).catch((error) => {\n debugging('supervisor') && console.error(`Error sending 'status' for slice ${sliceNumber} of job ${jobAddress}:\\n ${error}\\nWill try again on a new connection`);\n this.resultSubmitterMessageQueue.push({ operation: 'status', data: statusPayload });\n this.resultSubmitterConnection.close();\n });\n } catch (error)\n {\n debugging('supervisor') && console.error(`Failed to send 'status' for slice ${sliceNumber} of job ${jobAddress}:, no connection to result submitter:`, error);\n }\n }\n }\n });\n sandbox.addListener('workEmit', ({ eventName, payload }) => {\n // Need to check if the sandbox hasn't been assigned a slice yet.\n if (!sandbox.slice) {\n if (Supervisor.debugBuild) {\n console.error(\n `Sandbox not assigned a slice before sending workEmit message to scheduler. 'workEmit' event originates from \"${eventName}\" event`, \n payload,\n );\n }\n }\n else\n {\n const jobAddress = sandbox.slice.jobAddress;\n const sliceNumber = sandbox.slice.sliceNumber;\n // sliceNumber can be zero if it came from a problem with loading modules.\n assert(jobAddress && (sliceNumber || sliceNumber === 0));\n // Send a work emit message from the sandbox to the event router\n // !authorizationMessage <==> sliceNumber === 0.\n let authorizationMessage;\n try {\n // Sometimes a sliceNumber===0 workEmit comes in before the client bundle is properly loaded.\n // Also happens with minor dcp-client version mismatches.\n authorizationMessage = sandbox.slice.getAuthorizationMessage();\n } catch(e) {\n authorizationMessage = null;\n }\n\n if (!authorizationMessage)\n {\n console.warn(`workEmit: missing authorization message for job ${jobAddress}, slice: ${sliceNumber}`);\n return Promise.resolve();\n }\n \n let workEmitPayload = {\n eventName,\n payload,\n job: jobAddress,\n slice: sliceNumber,\n worker: this.workerOpaqueId,\n authorizationMessage,\n }\n \n const workEmitPromise = this.eventRouterConnection.send('workEmit', workEmitPayload).catch(error => {\n debugging('supervisor') && console.warn(`workEmit: unable to send ${eventName} for slice ${sliceNumber} of job ${jobAddress}: ${error.message}.\\nTrying again on a new connection.`);\n this.eventRouterMessageQueue.push({ operation: 'workEmit', data: workEmitPayload })\n this.eventRouterConnection.close();\n if (Supervisor.debugBuild)\n console.error('workEmit error:', error);\n });\n\n if (Supervisor.debugBuild) {\n workEmitPromise.then(result => {\n if (!result || !result.success)\n console.warn('workEmit: event router did not accept event', result);\n });\n }\n }\n });\n\n // When any sbx completes, \n sandbox.addListener('complete', () => {\n this.watchdog();\n });\n\n sandbox.on('sandboxError', (error) => handleSandboxError(this, sandbox, error));\n \n sandbox.on('rejectedWorkMetrics', (data) =>{\n function updateRejectedMetrics(report) {\n ['total', 'CPU', 'webGL'].forEach((key) => {\n if (report[key]) sandbox.slice.rejectedTimeReport[key] += report[key];\n })\n }\n \n // If the slice already has rejected metrics, add this data to it. If not, assign this data to slices rejected metrics property\n if (sandbox.slice) {\n (sandbox.slice.rejectedTimeReport) ? updateRejectedMetrics(data.timeReport) : sandbox.slice.rejectedTimeReport = data.timeReport;\n }\n })\n \n // If the sandbox terminated and we are not shutting down, then should return all work which is currently\n // not being computed if all sandboxes are dead and the attempt to create a new one fails.\n sandbox.on('terminated',async () => {\n if (this.sandboxes.length > 0) {\n let terminatedSandboxes = this.sandboxes.filter(sbx => sbx.isTerminated);\n if (terminatedSandboxes.length === this.sandboxes.length) {\n debugging('supervisor') && console.debug(`readySandboxes: Create 1 sandbox in the sandbox-terminated-handler, total sandboxes ${this.sandboxes.length}, matching ${this.matching}, fetching ${this.isFetchingNewWork}`);\n await this.readySandboxes(1);\n \n // If we cannot create a new sandbox, that probably means we're on a screensaver worker\n // and the screensaver is down. So return the slices to the scheduler.\n if (this.sandboxes.length !== terminatedSandboxes.length + 1) {\n this.returnSlices(this.queuedSlices).then(() => {\n this.queuedSlices.length = 0;\n });\n }\n }\n }\n })\n\n const delayMs =\n 1000 *\n (tuning.minSandboxStartDelay +\n Math.random() *\n (tuning.maxSandboxStartDelay - tuning.minSandboxStartDelay));\n \n sandboxStartPromises.push(\n sandbox\n .start(delayMs)\n .then(() => {\n if (!allocateLocalSandboxes) this.readiedSandboxes.push(sandbox);\n this.sandboxes.push(sandbox);\n sandboxes.push(sandbox);\n }).catch((err) => {\n errors.push(err);\n this.returnSandbox(sandbox);\n if (err.code === 'ENOWORKER') {\n throw new DCPError(\"Cannot use localExec without dcp-worker installed. Use the command 'npm install dcp-worker' to install the neccessary modules.\", 'ENOWORKER');\n }\n }));\n }\n \n await Promise.all(sandboxStartPromises);\n\n if (errors.length) {\n console.warn(`Failed to ready ${errors.length} of ${numSandboxes} sandboxes.`, errors);\n throw new Error('Failed to ready sandboxes.');\n }\n\n debugging('supervisor') && console.log(`readySandboxes: Readied ${sandboxes.length} sandboxes ${JSON.stringify(sandboxes.map(sandbox => sandbox.id))}`);\n \n return sandboxes;\n }\n\n /**\n * Accepts a sandbox after it has finished working or encounters an error.\n * If the sandbox was terminated or if \"!slice || slice.failed\" then\n * the sandbox will be removed from the sandboxes array and terminated if necessary.\n * Otherwise it will try to distribute a slice to the sandbox immediately.\n *\n * @param {Sandbox} sandbox - the sandbox to return\n * @param {Slice} [slice] - the slice just worked on; !slice => terminate\n * @param {boolean} [verifySandboxIsNotTerminated=true] - if true, check sandbox is not already terminated\n */\n returnSandbox (sandbox, slice, verifySandboxIsNotTerminated=true) {\n if (!slice || slice.failed || sandbox.isTerminated) {\n \n this.removeSandbox(sandbox);\n \n if (!sandbox.isTerminated) {\n debugging('supervisor') && console.log(`Supervisor.returnSandbox: Terminating ${sandbox.identifier}${slice ? `~${slice.identifier}` : ''}, # of sandboxes ${this.sandboxes.length}`);\n sandbox.terminate(false);\n } else {\n debugging('supervisor') && console.log(`Supervisor.returnSandbox: Already terminated ${sandbox.identifier}${slice ? `~${slice.identifier}` : ''}, # of sandboxes ${this.sandboxes.length}`);\n // XXXpfr: April 13, 2022\n // I'm trying to understand and control when sandboxes get terminated.\n // Because of races and random worker crashes, it is impossible to not try to terminate a sandbox more than once.\n // But at some places where returnSandbox is we shouldn't see this behavior, hence this exception.\n // If I don't fix this is the next 30 days or I forget, please delete this exception.\n if (false)\n {}\n }\n }\n }\n\n /**\n * Terminates sandboxes, in order of creation, when the total started sandboxes exceeds the total allowed sandboxes.\n *\n * @returns {Promise<void>}\n */\n pruneSandboxes () {\n let numOver = this.sandboxes.length - (dcpConfig.worker.maxAllowedSandboxes + this.maxWorkingSandboxes);\n if (numOver <= 0) return;\n \n // Don't kill readied sandboxes while creating readied sandboxes.\n for (let index = 0; index < this.readiedSandboxes.length; ) {\n const sandbox = this.readiedSandboxes[index];\n // If the sandbox is allocated, advance to the next one in the list.\n if (sandbox.allocated) {\n index++;\n continue;\n }\n // Otherwise, remove this sandbox but look at the same array index in the next loop.\n debugging('supervisor') && console.log(`pruneSandboxes: Terminating readied sandbox ${sandbox.identifier}`);\n this.readiedSandboxes.splice(index, 1);\n this.returnSandbox(sandbox);\n\n if (--numOver <= 0) break;\n }\n\n if (numOver <= 0) return;\n for (let index = 0; index < this.assignedSandboxes.length; ) {\n const sandbox = this.assignedSandboxes[index];\n // If the sandbox is allocated, advance to the next one in the list.\n if (sandbox.allocated) {\n index++;\n continue;\n }\n // Otherwise, remove this sandbox but look at the same array index in the next loop.\n debugging('supervisor') && console.log(`pruneSandboxes: Terminating assigned sandbox ${sandbox.identifier}`);\n this.assignedSandboxes.splice(index, 1);\n this.returnSandbox(sandbox);\n\n if (--numOver <= 0) break;\n }\n }\n \n /**\n * Basic watch dog to check if there are idle sandboxes and\n * attempts to nudge the supervisor to feed them work.\n *\n * Run in an interval created in @constructor .\n * @returns {Promise<void>}\n */\n async watchdog () {\n if (!this.watchdogState)\n this.watchdogState = {};\n\n // Every 5 minutes, report progress of all working slices to the scheduler\n if (Date.now() > ((this.lastProgressReport || 0) + sandboxTuning.progressReportInterval)) {\n // console.log('454: Assembling progress update...');\n this.lastProgressReport = Date.now();\n\n //\n // Note: this.slices is the disjoint union of:\n // this.allocatedSlices, \n // this.queuedSlices, \n // this.slices.filter(slice => !slice.isUnassigned) .\n // When a slice is not in these 3 arrays, the slice is lost.\n //\n \n const currentLostSlices = this.slices.filter(slice => slice.isUnassigned \n && this.queuedSlices.indexOf(slice) === -1\n && this.allocatedSlices.indexOf(slice) === -1);\n\n if (currentLostSlices.length > 0) {\n this.lostSlices.push(...currentLostSlices);\n // Try to recover.\n // Needs more work and testing.\n // Test when we can come up with a decent lost slice repro case.\n // --> this.queuedSlices.push(...currentLostSlices);\n }\n\n if (this.lostSlices.length > 0) {\n if (true) { // Keep this on for awhile, until we know lost slices aren't happening.\n console.warn('Supervisor.watchdog: Found lost slices!');\n for (const slice of this.lostSlices)\n console.warn('\\t', slice.identifier);\n }\n this.lostSlices = this.lostSlices.filter(slice => slice.isUnassigned);\n }\n\n const slices = [];\n this.queuedSlices.forEach(slice => {\n assert(slice && slice.sliceNumber > 0);\n addToSlicePayload(slices, slice, sliceStatus.scheduled);\n });\n\n this.allocatedSlices.forEach(slice => {\n assert(slice && slice.sliceNumber > 0);\n addToSlicePayload(slices, slice, 'progress'); // Beacon.\n });\n\n if (slices.length) {\n // console.log('471: sending progress update...');\n const progressReportPayload = {\n worker: this.workerOpaqueId,\n slices,\n };\n\n this.resultSubmitterConnection.send('status', progressReportPayload)\n .catch(error => {\n debugging('supervisor') && console.error('479: Failed to send status update:', error/*.message*/);\n this.resultSubmitterMessageQueue.push({ operation: 'status', data: progressReportPayload })\n this.resultSubmitterConnection.close();\n });\n }\n }\n\n if (this.worker.working) {\n if (this.unallocatedSpace > 0) {\n await this.work().catch(err => {\n if (!this.watchdogState[err.code || '0'])\n this.watchdogState[err.code || '0'] = 0;\n if (Date.now() - this.watchdogState[err.code || '0'] > ((dcpConfig.worker.watchdogLogInterval * timeDilation || 120) * 1000))\n console.error('301: Failed to start work:', err);\n this.watchdogState[err.code || '0'] = Date.now();\n });\n }\n\n this.pruneSandboxes();\n }\n }\n\n /**\n * Gets the logical and physical number of cores and also\n * the total number of sandboxes the worker is allowed to run\n *\n */\n getStatisticsCPU() {\n if (DCP_ENV.isBrowserPlatform) {\n return {\n worker: this.workerOpaqueId,\n lCores: window.navigator.hardwareConcurrency,\n pCores: dcpConfig.worker.pCores || window.navigator.hardwareConcurrency,\n sandbox: this.maxWorkingSandboxes\n }\n }\n\n return {\n worker: this.workerOpaqueId,\n lCores: requireNative('os').cpus().length,\n pCores: requireNative('physical-cpu-count'),\n sandbox: this.maxWorkingSandboxes\n }\n }\n\n /**\n * Returns the number of unallocated sandbox slots to send to fetchTask.\n *\n * @returns {number}\n */\n numberOfAvailableSandboxSlots() {\n let numCores;\n if (this.options.priorityOnly && this.options.jobAddresses.length === 0) {\n numCores = 0;\n } else if (this.queuedSlices.length > 1) {\n // We have slices queued, no need to fetch\n numCores = 0;\n } else {\n // The queue is almost empty (there may be 0 or 1 element), fetch a full task.\n // The task is full, in the sense that it will contain slices whose\n // aggregate execution time is this.maxWorkingSandboxes * 5-minutes.\n // However, there can only be this.unallocatedSpace # of long slices.\n // Thus we need to know whether the last slice in this.queuedSlices is long or not.\n // (A long slice has estimated execution time >= 5-minutes.)\n const longSliceCount = (this.queuedSlices.length > 0 && this.queuedSlices[0].isLongSlice) ? 1 : 0;\n numCores = this.unallocatedSpace - longSliceCount;\n }\n return numCores;\n }\n\n /**\n * Call to start doing work on the network.\n * This is the one place where requests to fetch new slices are made.\n * After the initial slices are fetched it calls this.distributeQueuedSlices.\n *\n * @returns {Promise<void>}, unallocatedSpace ${this.unallocatedSpace}\n */\n async work()\n {\n // When inside matchSlicesWithSandboxes, don't reenter Supervisor.work to fetch new work or create new sandboxes.\n if (this.matching) {\n // Interesting and noisy.\n // debugging('supervisor') && console.log(`Supervisor.work: Do not interleave work, fetch or matching slices with sandboxes: queuedSlices ${this.queuedSlices.length}, unallocatedSpace ${this.unallocatedSpace}, matching ${this.matching}, fetching ${this.isFetchingNewWork}`);\n return Promise.resolve();\n }\n\n await this.setDefaultIdentityKeystore();\n\n // Instantiate connections that don't exist.\n this.instantiateAllConnections();\n\n const numCores = this.numberOfAvailableSandboxSlots();\n\n debugging() && console.log(`Supervisor.work: Try to get ${numCores} slices in working sandboxes, unallocatedSpace ${this.unallocatedSpace}, queued slices ${this.queuedSlices.length}, # of sandboxes ${this.sandboxes.length}, matching ${this.matching}, fetching: ${this.isFetchingNewWork}`);\n \n // Fetch a new task if we have no more slices queued, then start workers\n try {\n if (numCores > 0 && !this.isFetchingNewWork) {\n this.isFetchingNewWork = true;\n\n /**\n * This will only ready sandboxes up to a total count of\n * maxWorkingSandboxes (in any state). It is not possible to know the\n * actual number of sandboxes required until we have the slices because we\n * may have sandboxes assigned for the slice's job already.\n *\n * If the evaluator cannot start (ie. if the evalServer is not running),\n * then the while loop will keep retrying until the evalServer comes online\n */\n if (this.maxWorkingSandboxes > this.sandboxes.length) {\n // Note: The old technique had \n // while (this.maxWorkingSandboxes > this.sandboxes.length) {....\n // and sometimes we'd get far too many sandboxes, because it would keep looping while waiting for\n // this.readySandboxes(this.maxWorkingSandboxes - this.sandboxes.length);\n // to construct the rest of the sandboxes. The fix is to only loop when the 1st \n // await this.readySandboxes(1) \n // is failing.\n let needFirstSandbox = true;\n while (needFirstSandbox) {\n debugging('supervisor') && console.log(`Supervisor.work: ready 1 sandbox, # of sandboxes ${this.sandboxes.length}, matching ${this.matching}, fetching ${this.isFetchingNewWork}`);\n await this.readySandboxes(1)\n .then(() => {\n debugging('supervisor') && console.log(`Supervisor.work: ready ${this.maxWorkingSandboxes - this.sandboxes.length} sandbox(es), # of sandboxes ${this.sandboxes.length}, matching ${this.matching}, fetching ${this.isFetchingNewWork}`);\n this.readySandboxes(this.maxWorkingSandboxes - this.sandboxes.length);\n needFirstSandbox = false;\n }).catch(error => {\n console.warn('906: failed to ready sandboxes; will retry', error.code, error.message);\n });\n }\n }\n\n /**\n * Temporary change: Assign the capabilities of one of readied sandboxes\n * before fetching slices from the scheduler.\n *\n * TODO: Remove this once fetchTask uses the capabilities of every\n * sandbox to fetch slices.\n */\n if (!this.capabilities) {\n this.capabilities = this.sandboxes[0].capabilities;\n this.emit('capabilitiesCalculated', this.capabilities);\n }\n\n if (DCP_ENV.isBrowserPlatform && this.capabilities.browser)\n this.capabilities.browser.chrome = DCP_ENV.isBrowserChrome;\n\n const fetchTimeout = setTimeout(() => {\n console.warn(`679: Fetch exceeded timeout, will reconnect at next watchdog interval`);\n \n this.taskDistributorConnection.close('Fetch timed out', Math.random() > 0.5).catch(error => {\n console.error(`931: Failed to close task-distributor connection`, error);\n });\n this.resultSubmitterConnection.close('Fetch timed out', Math.random() > 0.5).catch(error => {\n console.error(`920: Failed to close result-submitter connection`, error);\n });\n this.isFetchingNewWork = false;\n this.instantiateAllConnections();\n }, 3 * 60 * 1000); // max out at 3 minutes to fetch\n\n // ensure result submitter and task distributor connections before fetching tasks\n try\n {\n await this.resultSubmitterConnection.keepalive();\n await this.taskDistributorConnection.keepalive();\n }\n catch (e)\n {\n console.error('Failed to connect to result submitter, refusing to fetch slices. Will try again at next fetch cycle.')\n debugging('supervisor') && console.log(`Error: ${e}`);\n this.isFetchingNewWork = false; // <-- done in the `finally` block, below\n clearTimeout(fetchTimeout);\n this.taskDistributorConnection.close('Failed to connect to result-submitter', true).catch(error => {\n console.error(`939: Failed to close task-distributor connection`, error);\n });\n this.resultSubmitterConnection.close('Failed to connect to result-submitter', true).catch(error => {\n console.error(`942: Failed to close result-submitter connection`, error);\n });\n return Promise.resolve();\n }\n await this.fetchTask(numCores).finally(() => {\n clearTimeout(fetchTimeout);\n this.isFetchingNewWork = false;\n });\n }\n\n this.distributeQueuedSlices().then(() => debugging('supervisor') && 'supervisor: finished distributeQueuedSlices()').catch((e) => {\n // We should never get here, because distributeQueuedSlices was changed\n // to try to catch everything and return slices and sandboxes.\n // If we do catch here it may mean a slice was lost. \n console.error('Supervisor.work catch handler for distributeQueuedSlices.', e);\n });\n // No catch(), because it will bubble outward to the caller\n } finally {\n }\n }\n\n /**\n * Generate the workerComputeGroups property of the requestTask message. \n * \n * Concatenate the compute groups object from dcpConfig with the list of compute groups\n * from the supervisor, and remove the public group if accidentally present. Finally,\n * we transform joinSecrets/joinHash into joinHashHash for secure transmission.\n *\n * @note computeGroup objects with joinSecrets are mutated to record their hashes. This\n * affects the supervisor options and dcpConfig. Re-adding a joinSecret property\n * to one of these will cause the hash to be recomputed.\n */\n generateWorkerComputeGroups()\n {\n var computeGroups = Object.values(dcpConfig.worker.computeGroups || {});\n if (this.options.computeGroups)\n computeGroups = computeGroups.concat(this.options.computeGroups);\n computeGroups = computeGroups.filter(group => group.id !== constants.computeGroups.public.id);\n const hashedComputeGroups = [];\n for (const group of computeGroups)\n {\n const groupCopy = Object.assign({}, group);\n if ((group.joinSecret || group.joinHash) && (!group.joinHashHash || this.lastDcpsid !== this.taskDistributorConnection.dcpsid))\n {\n let joinHash;\n if (group.joinHash) {\n joinHash = group.joinHash.replace(/\\s+/g, ''); // strip whitespace\n } else {\n joinHash = calculateJoinHash(groupCopy);\n } \n\n groupCopy.joinHashHash = hash.calculate(hash.eh1, joinHash, this.taskDistributorConnection.dcpsid);\n delete groupCopy.joinSecret;\n delete groupCopy.joinHash;\n debugging('computeGroups') && console.debug(`Calculated joinHash=${joinHash} for`, groupCopy);\n }\n hashedComputeGroups.push(groupCopy);\n }\n this.lastDcpsid = this.taskDistributorConnection.dcpsid;\n debugging('computeGroups') && console.debug('Requesting ', computeGroups.length, 'non-public groups for session', this.lastDcpsid);\n return hashedComputeGroups;\n }\n\n /**\n * Remove all unreferenced jobs in `this.cache`.\n *\n * @param {any[]} newJobs - Jobs that should not be removed from\n * `this.cache`.\n */\n cleanJobCache(newJobs = []) {\n /* Delete all jobs in the supervisorCache that are not represented in this newJobs,\n * or in this.queuedSlices, or there is no sandbox assigned to these jobs.\n * Note: There can easily be 200+ places to check; using a lookup structure to maintain O(n).\n */\n if (this.cache.jobs.length > 0) {\n const jobAddressMap = {};\n Object.keys(newJobs).forEach(jobAddress => { jobAddressMap[jobAddress] = 1; });\n this.slices.forEach(slice => { if (!jobAddressMap[slice.jobAddress]) jobAddressMap[slice.jobAddress] = 1; });\n this.cache.jobs.forEach(jobAddress => {\n if (!jobAddressMap[jobAddress]) {\n this.cache.remove('job', jobAddress);\n // Remove and return the corresponding sandboxes from this.sandboxes.\n const deadSandboxes = this.sandboxes.filter(sb => sb.jobAddress === jobAddress);\n if (deadSandboxes.length > 0) {\n deadSandboxes.forEach(sandbox => { this.returnSandbox(sandbox); });\n debugging('supervisor') && console.log(`Supervisor.fetchTask: Deleting job ${jobAddress} from cache and assigned sandboxes ${deadSandboxes.map(s => s.id)}, # of sandboxes ${this.sandboxes.length}.`);\n }\n }\n });\n }\n }\n\n /**\n * Fetches a task, which contains job information and slices for sandboxes and\n * manages events related to fetching tasks so the UI can more clearly display\n * to user what is actually happening.\n * @param {number} numCores\n * @returns {Promise<void>} The requestTask request, resolve on success, rejects otherwise.\n * @emits Supervisor#fetchingTask\n * @emits Supervisor#fetchedTask\n */\n async fetchTask(numCores) {\n\n // Don't reenter\n if (this.matching || numCores <= 0) {\n // Interesting and noisy.\n debugging('supervisor') && console.log(`Supervisor.fetchTask: Do not nest work, fetch or matching slices with sandboxes: queuedSlices ${this.queuedSlices.length}, unallocatedSpace ${this.unallocatedSpace}, matching ${this.matching}, fetching ${this.isFetchingNewWork}, numCores ${numCores}`);\n return Promise.resolve();\n }\n\n //\n // Oversubscription mitigation.\n // Update when there are less available sandbox slots than numCores.\n const checkNumCores = this.numberOfAvailableSandboxSlots();\n if (numCores > checkNumCores) numCores = checkNumCores;\n if (numCores <= 0) return Promise.resolve();\n\n this.emit('fetchingTask');\n debugging('supervisor') && console.debug('supervisor: fetching task');\n const requestPayload = {\n numCores,\n coreStats: this.getStatisticsCPU(),\n numGPUs: this.defaultMaxGPUs,\n capabilities: this.capabilities,\n paymentAddress: this.paymentAddress,\n jobAddresses: this.options.jobAddresses || [], // force array; when set, only fetches slices for these jobs\n localExec: this.options.localExec,\n workerComputeGroups: this.generateWorkerComputeGroups(),\n minimumWage: dcpConfig.worker.minimumWage || this.options.minimumWage,\n readyJobs: [ /* list of jobs addresses XXXwg */ ],\n previouslyWorkedJobs: this.ringBufferofJobs.buf, //Only discrete jobs\n rejectedJobs: this.rejectedJobs,\n };\n // workers should be part of the public compute group by default\n if (!booley(dcpConfig.worker.leavePublicGroup) && !booley(this.options.leavePublicGroup) && (!requestPayload.localExec))\n requestPayload.workerComputeGroups.push(constants.computeGroups.public);\n debugging('computeGroups') && console.log(`Fetching work for ${requestPayload.workerComputeGroups.length} ComputeGroups: `, requestPayload.workerComputeGroups);\n debugging('supervisor') && console.log(`fetchTask wants ${numCores} slice(s), unallocatedSpace ${this.unallocatedSpace}, queuedSlices ${this.queuedSlices.length}`);\n try {\n debugging('requestTask') && console.debug('fetchTask: requestPayload', requestPayload);\n\n let result = await this.taskDistributorConnection.send('requestTask', requestPayload).catch((error) => {\n debugging('supervisor') && console.error(`Unable to request task from scheduler: ${error}. Will try again on a new connection.`);\n this.taskDistributorConnection.close(error, true);\n throw error; /* caught below */\n });\n let responsePayload = result.payload; \n\n if (!result.success) {\n debugging() && console.log('Task fetch failure; request=', requestPayload);\n debugging() && console.log('Task fetch failure; response=', result.payload);\n throw new DCPError('Unable to fetch task for worker', responsePayload);\n }\n\n const sliceCount = responsePayload.body.task.length || 0;\n\n /**\n * The fetchedTask event fires when the supervisor has finished trying to\n * fetch work from the scheduler (task-manager). The data emitted is the\n * number of new slices to work on in the fetched task.\n *\n * @event Supervisor#fetchedTask\n * @type {number}\n */\n this.emit('fetchedTask', sliceCount);\n\n if (sliceCount < 1) {\n return Promise.resolve();\n }\n\n /**\n * DCP-1698 Send auth msg with tasks to worker, then validate authority of worker to send slice info back to scheduler.\n * payload structure: { owner: this.address, signature: signature, auth: messageLightWeight, body: messageBody };\n * messageLightWeight: { workerId: worker, jobSlices, schedulerId, jobCommissions }\n * messageBody: { newJobs: await getNewJobsForTask(dbScheduler, task, request), task }\n */\n const { body, ...authorizationMessage } = responsePayload;\n const { newJobs, task } = body;\n assert(newJobs); // It should not be possible to have !newJobs -- we throw on !success.\n \n /*\n * Ensure all jobs received from the scheduler are:\n * 1. If we have specified specific jobs the worker may work on, the received jobs are in the specified job list\n * 2. If we are in localExec, at most 1 unique job type was received (since localExec workers are designated for only\n * one job)\n * If the received jobs are not within these parameters, stop the worker since the scheduler cannot be trusted at that point.\n */\n if ((this.options.jobAddresses.length && !Object.keys(newJobs).every((ele) => this.options.jobAddresses.includes(ele)))\n || (this.options.localExec && Object.keys(newJobs).length > 1))\n {\n console.error(\"Worker received slices it shouldn't have. Rejecting the work and stopping.\");\n process.exit(1);\n }\n\n debugging() && console.log(`Supervisor.fetchTask: task: ${task.length}/${numCores}, jobs: ${Object.keys(newJobs).length}, authSlices: ${compressJobMap(authorizationMessage.auth.authSlices, true /* skipFirst*/, this.addressTruncationLength /* digits*/)}`);\n // Delete all jobs in the supervisorCache that are not represented in this task,\n // or in this.queuedSlices, or there is no sandbox assigned to these jobs.\n this.cleanJobCache(newJobs);\n\n for (const jobAddress of Object.keys(newJobs))\n if (!this.cache.cache.job[jobAddress])\n this.cache.store('job', jobAddress, newJobs[jobAddress]);\n\n // Memoize authMessage onto the Slice object, this should\n // follow it for its entire life in the worker.\n const tmpQueuedSlices = task.map(taskElement => new Slice(taskElement, authorizationMessage));\n\n // Make sure old stuff is up front.\n // matchSlicesWithSandboxes dequeues this.queuedSlices as follows:\n // slicesToMatch = this.queuedSlices.slice(0, numCores);\n this.slices.push(...tmpQueuedSlices);\n this.queuedSlices.push(...tmpQueuedSlices);\n \n // Populating the ring buffer based on job's discrete property \n Object.values(newJobs).forEach(job => {\n if(job.requirements.discrete && this.ringBufferofJobs.find(element => element === job.address) === undefined) {\n this.ringBufferofJobs.push(job.address);\n }\n });\n \n } catch (error) {\n this.emit('fetchTaskFailed', error);\n debugging('supervisor') && console.debug(`Supervisor.fetchTask failed!: error: ${error}`);\n }\n }\n\n /**\n * For each slice in this.queuedSlices, match with a sandbox in the following order:\n * 1. Try to find an already assigned sandbox in this.assignedSandboxes for the slice's job.\n * 2. Find a ready sandbox in this.readiedSandboxes that is unassigned.\n * 3. Ready a new sandbox and use that.\n *\n * Take great care in assuring sandboxes and slices are uniquely associated, viz.,\n * a given slice cannot be associated with multiple sandboxes and a given sandbox cannot be associated with multiple slices.\n * The lack of such uniqueness has been the root cause of several difficult bugs.\n *\n * Note: When a sandbox is paired with a slice, execution is pending and sandbox.allocated=true and\n * sandbox.slice=slice and sandbox.jobAddress=slice.jobAddress. This is what 'allocated' means.\n * Immediately upon the exit of sandbox.work, sandbox.allocated=false is set and if an exception\n * wasn't thrown, the paired slice is placed in this.assignedSandboxes.\n * Thus from the pov of supervisor, this.allocatedSandboxes is deterministic and this.workingSandboxes is not.\n * Please try to not use this.workingSandboxes. It is deprecated.\n *\n * The input is numCores, this,queuedSlices, this.assignedSandboxes and this.readiedSandboxes.\n * If there are not enough sandboxes, new readied sandboxes will be created using\n * await this.readySandboxes(...)\n * And it is this await boundary that has caused many bugs.\n * We try not to make assumptions about non-local state across the await boundary.\n *\n * @param {number} numCores - The number of available sandbox slots.\n * @param {boolean} [throwExceptions=true] - Whether to throw exceptions when checking for sanity.\n * @returns {Promise<SandboxSlice[]>} Returns SandboxSlice[], may have length zero.\n */\n async matchSlicesWithSandboxes (numCores, throwExceptions = true) {\n\n const sandboxSlices = [];\n if (this.queuedSlices.length === 0 || this.matching || numCores <= 0) {\n // Interesting and noisy.\n // debugging('supervisor') && console.log(`Supervisor.matchSlicesWithSandboxes: Do not nest work, fetch or matching slices with sandboxes: queuedSlices ${this.queuedSlices.length}, unallocatedSpace ${this.unallocatedSpace}, matching ${this.matching}, fetching ${this.isFetchingNewWork}, numCores ${numCores}`);\n return sandboxSlices;\n }\n\n //\n // Oversubscription mitigation.\n // Update when there are less available sandbox slots than numCores.\n // We cannot use this.unallocatedSpace here because its value is artificially low or zero, because in\n // this.distributedQueuedSlices we use the pseudo-mutex trick: this.acquire(howManySandboxSlotsToReserve)/this.release().\n // Note: Do not use this.numberOfCoresReserved outside of a function locked with this.acquire(howManySandboxSlotsToReserve) .\n const checkNumCores = this.numberOfCoresReserved; // # of locked sandbox slots.\n if (numCores > checkNumCores) numCores = checkNumCores;\n if (numCores <= 0) return sandboxSlices;\n\n // Don't ask for more than we have.\n if (numCores > this.queuedSlices.length)\n numCores = this.queuedSlices.length;\n\n debugging('supervisor') && console.log(`matchSlicesWithSandboxes: numCores ${numCores}, queued slices ${this.queuedSlices.length}: assigned ${this.assignedSandboxes.length}, readied ${this.readiedSandboxes.length}, unallocated ${this.unallocatedSpace}, # of sandboxes: ${this.sandboxes.length}`);\n\n if (debugging('supervisor')) {\n dumpSlicesIfNotUnique(this.queuedSlices, 'Warning: this.queuedSlices slices are not unique -- this is ok when slice is rescheduled.');\n dumpSandboxesIfNotUnique(this.readiedSandboxes, 'Warning: this.readiedSandboxes sandboxes are not unique!');\n dumpSandboxesIfNotUnique(this.assignedSandboxes, 'Warning: this.assignedSandboxes sandboxes are not unique!');\n }\n\n // Three functions to validate slice and sandbox.\n function checkSlice(slice, checkAllocated=true) {\n if (!slice.isUnassigned) throw new DCPError(`Slice must be unassigned: ${slice.identifier}`);\n if (checkAllocated && slice.allocated) throw new DCPError(`Slice must not already be allocated: ${slice.identifier}`);\n }\n function checkSandbox(sandbox, isAssigned) {\n if (sandbox.allocated) throw new DCPError(`Assigned sandbox must not be already allocated: ${sandbox.identifier}`);\n if (isAssigned && !sandbox.isAssigned) throw new DCPError(`Assigned sandbox is not marked as assigned: ${sandbox.identifier}`);\n if (!isAssigned && !sandbox.isReadyForAssign) throw new DCPError(`Readied sandbox is not marked as ready for assign: ${sandbox.identifier}`);\n }\n\n // Sanity checks.\n if (throwExceptions) {\n this.assignedSandboxes.forEach(sandbox => { checkSandbox(sandbox, true /* isAssigned*/); });\n this.readiedSandboxes.forEach(sandbox => { checkSandbox(sandbox, false /* isAssigned*/); });\n this.queuedSlices.forEach(slice => { checkSlice(slice); });\n } else {\n this.assignedSandboxes = this.assignedSandboxes.filter(sandbox => !sandbox.allocated && sandbox.isAssigned);\n this.readiedSandboxes = this.readiedSandboxes.filter(sandbox => !sandbox.allocated && sandbox.isReadyForAssign);\n this.queuedSlices = this.queuedSlices.filter(slice => !slice.allocated && slice.isUnassigned);\n }\n\n const sandboxKind = {\n assigned: 0,\n ready: 1,\n new: 2,\n };\n\n const ceci = this;\n /**\n * Auxiliary function to pair a sandbox with a slice and mark the sandbox as allocated.\n * An allocated sandbox is reserved and will not be released until the slice completes execution on the sandbox.\n *\n * @param {Sandbox} sandbox\n * @param {Slice} slice\n * @param {number} kind\n */\n function pair(sandbox, slice, kind) {\n checkSandbox(sandbox, kind === sandboxKind.assigned);\n checkSlice(slice, kind === sandboxKind.assigned);\n slice.allocated = true;\n sandbox.allocated = true;\n sandbox.jobAddress = slice.jobAddress; // So we can know which jobs to not delete from this.cache .\n sandbox.slice = slice;\n sandboxSlices.push({ sandbox, slice });\n if (Supervisor.sliceTiming) slice['pairingDelta'] = Date.now();\n if (debugging('supervisor')) {\n let fragment = 'New readied';\n if (kind === sandboxKind.assigned) fragment = 'Assigned';\n else if (kind === sandboxKind.ready) fragment = 'Readied';\n console.log(`matchSlicesWithSandboxes.pair: ${fragment} sandbox matched ${ceci.dumpSandboxAndSlice(sandbox, slice)}`);\n }\n }\n\n // These three arrays are used to track/store slices and sandboxes,\n // so that when an exception occurs, the following arrays are restored:\n // this.queuedSlices, this.assignedSandboxes, this.realizedSandboxes.\n let slicesToMatch = [];\n let trackAssignedSandboxes = [];\n let trackReadiedSandboxes = [];\n try\n {\n this.matching = true;\n\n let assignedCounter = 0; // How many assigned sandboxes are being used.\n let readyCounter = 0; // How many sandboxes used from the existing this.readiedSandboxes.\n let newCounter = 0; // How many sandboxes that needed to be newly created.\n\n //\n // The Ideas:\n // 1) We match each slice with a sandbox. First we match with assigned sandboxes in the order\n // that they appear in this.queuedSlices. Then we match in-order with existing this.readiedSandboxes\n // Then we match in-order with new new readied sandboxes created through\n // await this.readySandboxes(newCounter, true /* allocateLocalSandboxes*/);\n // This allows us to try different orderings of execution of slices. E.g. Wes suggested\n // trying to execute slices from different jobs with maximal job diversity -- specifically\n // if there are 3 jobs j1,j2,j3, with slices s11, s12 from j1, s21, s22, s23 from j2 and\n // s31, s32 from j3, then we try to schedule, in order s11, s21, s31, s12, s22, s32, s23.\n //\n // 2) Before matching slices with sandboxes, we allocate available assigned and readied sandboxes\n // and if more are needed then we create and allocate new ones.\n //\n // 3) Finally we match slices with sandboxes and return an array of sandboxSlice pairs.\n //\n // Note: The ordering of sandboxSlices only partially corresponds to the order of this.queuedSlices.\n // It's easy to do. When pairing with assigned sandboxes, any slice in this.queuedSlices which doesn't\n // have an assigned sandbox, will add null to the sandboxSlices array. Then when pairing with readied sandboxes,\n // we fill-in the null entries in the sandboxSlices array.\n //\n /** XXXpfr @todo When it is needed, fix the ordering as described above. */\n\n // Get the slices that are being matched.\n slicesToMatch = this.queuedSlices.slice(0, numCores);\n this.queuedSlices = this.queuedSlices.slice(numCores);\n\n debugging('supervisor') && console.log(`matchSlicesWithSandboxes: slicesToMatch ${this.dumpSlices(slicesToMatch)}`);\n\n // Create object map: jobAddress -> sandboxes with sandboxes.jobAddress === jobAddress .\n const jobSandboxMap = toJobMap(this.assignedSandboxes, sandbox => sandbox);\n \n // Create array to hold slices which do not have assigned sandboxes.\n // These slices will need to be paired with existing and possibly new readied sandboxes.\n // Specifically, the sandboxes from existing this.readiedSandboxes and new sandboxes\n // created through await this.readySandboxes(newCounter, true /* allocateLocalSandboxes*/);\n const slicesThatNeedSandboxes = [];\n\n // Pair assigned sandboxes with slices.\n for (const slice of slicesToMatch) {\n const assigned = jobSandboxMap[slice.jobAddress];\n if (assigned && assigned.length > 0) {\n // Pair.\n const sandbox = assigned.pop();\n pair(sandbox, slice, sandboxKind.assigned);\n this.removeElement(this.assignedSandboxes, sandbox);\n // Track.\n trackAssignedSandboxes.push(sandbox);\n assignedCounter++;\n } else {\n // Don't lose track of these slices.\n slice.allocated = true;\n slicesThatNeedSandboxes.push(slice);\n }\n }\n\n // Pair readied sandboxes with slices.\n readyCounter = Math.min(slicesThatNeedSandboxes.length, this.readiedSandboxes.length);\n newCounter = slicesThatNeedSandboxes.length - readyCounter;\n // Track.\n trackReadiedSandboxes = this.readiedSandboxes.slice(0, readyCounter);\n this.readiedSandboxes = this.readiedSandboxes.slice(readyCounter);\n for (const sandbox of trackReadiedSandboxes) {\n // Pair.\n const slice = slicesThatNeedSandboxes.pop();\n pair(sandbox, slice, sandboxKind.ready);\n }\n \n debugging('supervisor') && console.log(`matchSlicesWithSandboxes: assignedCounter ${assignedCounter}, readyCounter ${readyCounter}, newCounter ${newCounter}, numCores ${numCores}`)\n\n // Validate algorithm consistency.\n if (Supervisor.debugBuild && assignedCounter + readyCounter + newCounter !== numCores) {\n // Structured assert.\n throw new DCPError(`matchSlicesWithSandboxes: Algorithm is corrupt ${assignedCounter} + ${readyCounter} + ${newCounter} !== ${numCores}`);\n }\n\n // Here is an await boundary.\n // Accessing non-local data across an await boundary may result in the unexpected.\n\n // Create new readied sandboxes to associate with slicesThatNeedSandboxes.\n if (newCounter > 0) {\n // When allocateLocalSandboxes is true, this.readySandboxes does not place the new sandboxes\n // on this.readiedSandboxes. Hence the new sandboxes are private and nobody else can see them.\n debugging('supervisor') && console.log(`matchSlicesWithSandboxes: creating ${newCounter} new sandboxes, # of sandboxes ${this.sandboxes.length}`);\n const readied = await this.readySandboxes(newCounter, true /* allocateLocalSandboxes*/);\n // Track.\n trackReadiedSandboxes.push(...readied);\n\n for (const sandbox of readied) {\n assert(slicesThatNeedSandboxes.length > 0);\n // Pair\n const slice = slicesThatNeedSandboxes.pop();\n pair(sandbox, slice, sandboxKind.new);\n }\n \n // Put back any extras. There should not be any unless readySandboxes returned less than asked for.\n if (slicesThatNeedSandboxes.length > 0) {\n slicesThatNeedSandboxes.forEach(slice => {\n slice.allocated = false;\n this.queuedSlices.push(slice);\n });\n }\n }\n\n if ( false || debugging()) {\n console.log(`matchSlicesWithSandboxes: Matches: ${ this.dumpSandboxSlices(sandboxSlices) }`);\n this.dumpSandboxSlicesIfNotUnique(sandboxSlices, 'Warning: sandboxSlices; { sandbox, slice } pairs are not unique!');\n }\n } catch (e) {\n // Clear allocations.\n slicesToMatch.forEach(slice => { slice.allocated = false; });\n trackAssignedSandboxes.forEach(sandbox => { sandbox.allocated = false; sandbox.slice = null; });\n trackReadiedSandboxes.forEach(sandbox => { sandbox.allocated = false; sandbox.slice = null; sandbox.jobAddress = null; });\n \n // Filter out redundancies -- there shouldn't be any...\n slicesToMatch = slicesToMatch.filter(slice => this.queuedSlices.indexOf(slice) === -1);\n trackAssignedSandboxes = trackAssignedSandboxes.filter(sb => this.assignedSandboxes.indexOf(sb) === -1);\n trackReadiedSandboxes = trackReadiedSandboxes.filter(sb => this.readiedSandboxes.indexOf(sb) === -1);\n\n // Sanity checks.\n slicesToMatch.forEach(slice => { checkSlice(slice) });\n trackAssignedSandboxes.forEach(sandbox => { checkSandbox(sandbox, true /* isAssigned*/); });\n trackReadiedSandboxes.forEach(sandbox => { checkSandbox(sandbox, false /* isAssigned*/); });\n\n // Restore arrays.\n this.queuedSlices.push(...slicesToMatch);\n this.assignedSandboxes.push(...trackAssignedSandboxes);\n this.readiedSandboxes.push(...trackReadiedSandboxes);\n \n console.error('Error in matchSlicesWithSandboxes: Attempting to recover slices and sandboxes.', e);\n return [];\n } finally {\n this.matching = false;\n }\n\n debugging('supervisor') && console.log(`matchSlicesWithSandboxes: allocated ${sandboxSlices.length} sandboxes, queuedSlices ${this.queuedSlices.length}, unallocatedSpace ${this.unallocatedSpace}, matching ${this.matching}, fetching ${this.isFetchingNewWork}, # of sandboxes: ${this.sandboxes.length}.`);\n\n return sandboxSlices;\n }\n\n disassociateSandboxAndSlice(sandbox, slice) {\n this.returnSandbox(sandbox);\n sandbox.slice = null;\n this.returnSlice(slice, 'EUNCAUGHT');\n }\n\n /**\n * This method will call this.startSandboxWork(sandbox, slice) for each element { sandbox, slice }\n * of the array returned by this.matchSlicesWithSandboxes(availableSandboxes) until all allocated sandboxes\n * are working. It is possible for a sandbox to interleave with calling distributeQueuedSlices and leave a sandbox\n * that is not working. Moreover, this.queuedSlices may be exhausted before all sandboxes are working.\n * @returns {Promise<void>}\n */\n async distributeQueuedSlices () {\n const numCores = this.unallocatedSpace;\n\n // If there's nothing there, or we're reentering, bail out.\n if (this.queuedSlices.length === 0 || numCores <= 0 || this.matching) {\n // Interesting and noisy.\n // debugging('supervisor') && console.log(`Supervisor.distributeQueuedSlices: Do not nest work, fetch or matching slices with sandboxes: queuedSlices ${this.queuedSlices.length}, matching ${this.matching}, fetching ${this.isFetchingNewWork}, numCores ${numCores}`);\n return Promise.resolve();\n }\n\n //\n // Use the pseudo-mutex to prevent uncontrolled interleaving with fetchTask,\n // matchSlicesWithSandboxes and distributeQueuedSlices\n let sandboxSlices;\n this.acquire(numCores);\n try {\n sandboxSlices = await this.matchSlicesWithSandboxes(numCores);\n } finally {\n this.release();\n }\n\n debugging('supervisor') && console.log(`distributeQueuedSlices: ${sandboxSlices.length} sandboxSlices ${this.dumpSandboxSlices(sandboxSlices)}, matching ${this.matching}, fetching ${this.isFetchingNewWork}`);\n\n for (let sandboxSlice of sandboxSlices) {\n\n const { sandbox, slice } = sandboxSlice;\n try {\n if (sandbox.isReadyForAssign) {\n try {\n let timeoutMs = Math.floor(Math.min(+Supervisor.lastAssignFailTimerMs || 0, 10 * 60 * 1000 /* 10m */));\n await a$sleepMs(timeoutMs);\n await this.assignJobToSandbox(sandbox, slice.jobAddress);\n } catch (e) {\n console.error(`Supervisor.distributeQueuedSlices: Could not assign slice ${slice.identifier} to sandbox ${sandbox.identifier}.`);\n if (Supervisor.debugBuild) console.error(`...exception`, e);\n Supervisor.lastAssignFailTimerMs = Supervisor.lastAssignFailTimerMs ? +Supervisor.lastAssignFailTimerMs * 1.25 : Math.random() * 200;\n this.disassociateSandboxAndSlice(sandbox, slice);\n continue;\n }\n }\n\n if (!Supervisor.lastAssignFailTimerMs)\n Supervisor.lastAssignFailTimerMs = Math.random() * 200;\n this.startSandboxWork(sandbox, slice);\n Supervisor.lastAssignFailTimerMs = false;\n\n } catch (e) {\n // We should never get here.\n console.error(`Supervisor.distributeQueuedSlices: Failed to execute slice ${slice.identifier} in sandbox ${sandbox.identifier}.`);\n if (Supervisor.debugBuild) console.error('...exception', e);\n this.disassociateSandboxAndSlice(sandbox, slice);\n }\n }\n }\n\n /**\n *\n * @param {Sandbox} sandbox\n * @param {opaqueId} jobAddress\n * @returns {Promise<void>}\n */\n assignJobToSandbox(sandbox, jobAddress) {\n // Any error will be caught in distributeQueuedSlices.\n return sandbox.assign(jobAddress);\n }\n\n /**\n * Handles reassigning or returning a slice that was rejected by a sandbox.\n * \n * The sandbox will be terminated by this.returnSandbox in finalizeSandboxAndSlice. In this case,\n * if the slice does not have a rejected property already, reassign the slice to a new sandbox\n * and add a rejected property to the slice to indicate it has already rejected once, then set slice = null\n * in the return SandboxSlice so that finalizeSandboxAndSlice won't return slice to scheduler.\n * \n * If the slice rejects with a reason, or has a rejected time stamp (ie. has been rejected once already)\n * then return the slice and all slices from the job to the scheduler and\n * terminate all sandboxes with that jobAddress.\n * @param {Sandbox} sandbox \n * @param {Slice} slice\n * @returns {Promise<SandboxSlice>}\n */\n async handleWorkReject(sandbox, slice, rejectReason) {\n if (!this.rejectedJobReasons[slice.jobAddress])\n this.rejectedJobReasons[slice.jobAddress] = [];\n\n this.rejectedJobReasons[slice.jobAddress].push(rejectReason); // memoize reasons\n\n // First time rejecting without a reason. Try assigning slice to a new sandbox.\n if (rejectReason === 'false' && !slice.rejected) {\n // Set rejected.\n slice.rejected = Date.now();\n // Schedule the slice for execution.\n this.scheduleSlice(slice, true /* placeInTheFrontOfTheQueue*/, false /* noDuplicateExecution*/);\n \n // Null out slice so this.returnSlice will not be called in finalizeSandboxAndSlice.\n // But we still want this.returnSandbox to terminate the sandbox.\n slice = null;\n } else { // Slice has a reason OR rejected without a reason already and got stamped.\n \n // Purge all slices and sandboxes associated with slice.jobAddress .\n this.purgeAllWork(slice.jobAddress);\n // Clear jobAddress from this.cache .\n this.cleanJobCache();\n\n // Add to array of rejected jobs.\n let rejectedJob = {\n address: slice.jobAddress,\n reasons: this.rejectedJobReasons[slice.jobAddress],\n }\n this.rejectedJobs.push(rejectedJob);\n\n // Tell everyone all about it, when allowed.\n if (dcpConfig.worker.allowConsoleAccess || Supervisor.debugBuild)\n {\n if (slice.rejected)\n console.warn(`Supervisor.handleWorkReject: The slice ${slice.identifier} was rejected twice.`);\n else\n console.warn(`Supervisor.handleWorkReject: The slice ${slice.identifier} was rejected with reason ${rejectReason}.`);\n console.warn(' All slices with the same jobAddress returned to the scheduler.');\n console.warn(' All sandboxes with the same jobAddress are terminated.');\n }\n //\n // this.purgeAllWork(jobAddress) terminates all sandboxes with jobAddress,\n // and it also returns to scheduler all slices with jobAddress.\n // Therefore null out slice and sandbox so finalizeSandboxAndSlice doesn't do anything.\n // \n sandbox = null;\n slice = null;\n }\n return { sandbox, slice };\n }\n\n /**\n * Schedule the slice to be executed.\n * If slice is already executing and noDuplicateExecution is true, return the slice with reason.\n * @param {Slice} slice\n * @param {boolean} [placeInTheFrontOfTheQueue=false]\n * @param {boolean} [noDuplicateExecution=true]\n * @param {string} [reason]\n */\n scheduleSlice(slice, placeInTheFrontOfTheQueue = false, noDuplicateExecution = true, reason) {\n // When noDuplicateExecution, if slice is already executing, do nothing.\n let workingSlices = [];\n if (noDuplicateExecution)\n workingSlices = this.allocatedSlices;\n\n if (!workingSlices.indexOf(slice)) {\n // Reset slice state to allow execution.\n slice.status = SLICE_STATUS_UNASSIGNED;\n // Enqueue in the to-be-executed queue.\n if (placeInTheFrontOfTheQueue) this.queuedSlices.unshift(slice);\n else this.queuedSlices.push(slice);\n }\n }\n\n /**\n * Purge all slices and sandboxes with this jobAddress.\n * @param {address} jobAddress\n * @param {boolean} [onlyPurgeQueuedAndAllocated=false]\n */\n purgeAllWork(jobAddress, onlyPurgeQueuedAndAllocated = false) {\n // Purge all slices and sandboxes associated with jobAddress .\n const deadSandboxes = this.sandboxes.filter(sandbox => sandbox.jobAddress === jobAddress);\n\n if (deadSandboxes.length > 0) {\n debugging('supervisor') && console.log(`purgeAllWork(${this.dumpJobAddress(jobAddress)}): sandboxes purged ${deadSandboxes.map(s => s.id)}, # of sandboxes ${this.sandboxes.length}`);\n deadSandboxes.forEach(sandbox => this.returnSandbox(sandbox));\n }\n\n let deadSlices;\n if (onlyPurgeQueuedAndAllocated) {\n deadSlices = this.queuedSlices.filter(slice => slice.jobAddress === jobAddress);\n if (deadSlices.length > 0 || this.allocatedSlices.length > 0)\n debugging('supervisor') && console.log(`purgeAllWork(${this.dumpJobAddress(jobAddress)}): dead queuedSlices ${deadSlices.map(s => s.sliceNumber)}, dead allocatedSlices ${this.allocatedSlices.map(s => s.sliceNumber)}`);\n deadSlices.push(...this.allocatedSlices);\n } else {\n deadSlices = this.slices.filter(slice => slice.jobAddress === jobAddress);\n }\n\n if (deadSlices.length > 0) {\n debugging('supervisor') && console.log(`purgeAllWork(${this.dumpJobAddress(jobAddress)}): slices purged ${deadSlices.map(s => s.sliceNumber)}, # of sandboxes ${this.sandboxes.length}`);\n this.returnSlices(deadSlices);\n this.removeQueuedSlices(deadSlices);\n }\n debugging('supervisor') && console.log(`purgeAllWork(${this.dumpJobAddress(jobAddress)}): Finished: slices ${this.slices.length}, queuedSlices ${this.queuedSlices.length}, assigned ${this.assignedSandboxes.length}, readied ${this.readiedSandboxes.length}, # of sandboxes ${this.sandboxes.length}`);\n }\n\n /**\n * Gives a slice to a sandbox which begins working. Handles collecting\n * the slice result (complete/fail) from the sandbox and submitting the result to the scheduler.\n * It will also return the sandbox to @this.returnSandbox when completed so the sandbox can be re-assigned.\n *\n * @param {Sandbox} sandbox - the sandbox to give the slice\n * @param {Slice} slice - the slice to distribute\n * @returns {Promise<void>} Promise returned from sandbox.run\n */\n async startSandboxWork (sandbox, slice) {\n var startDelayMs, reason = 'unknown';\n\n try {\n slice.markAsWorking();\n } catch (e) {\n // This will occur when the same slice is distributed twice.\n // It is normal because two sandboxes could finish at the same time and be assigned the\n // same slice before the slice is marked as working.\n debugging() && console.debug('startSandboxWork: slice.markAsWorking exception:', e);\n return Promise.resolve();\n }\n\n // sandbox.requiresGPU = slice.requiresGPU;\n // if (sandbox.requiresGPU) {\n // this.GPUsAssigned++;\n // }\n\n if (Supervisor.startSandboxWork_beenCalled)\n startDelayMs = 1000 * (tuning.minSandboxStartDelay + (Math.random() * (tuning.maxSandboxStartDelay - tuning.minSandboxStartDelay)));\n else {\n startDelayMs = 1000 * tuning.minSandboxStartDelay;\n Supervisor.startSandboxWork_beenCalled = true;\n }\n\n try {\n debugging() && console.log(`startSandboxWork: Started ${this.dumpStatefulSandboxAndSlice(sandbox, slice)}, total sandbox count: ${this.sandboxes.length}, matching ${this.matching}, fetching ${this.isFetchingNewWork}`);\n if (Supervisor.sliceTiming) {\n slice['pairingDelta'] = Date.now() - slice['pairingDelta'];\n slice['executionDelta'] = Date.now();\n }\n let result;\n try {\n result = await sandbox.work(slice, startDelayMs);\n } finally {\n sandbox.allocated = false;\n slice.allocated = false;\n }\n if (Supervisor.sliceTiming) {\n slice['executionDelta'] = Date.now() - slice['executionDelta'];\n slice['resultDelta'] = Date.now();\n }\n slice.collectResult(result, true);\n // In watchdog, all sandboxes in working state, have their slice status sent to result submitter.\n // However, this can happen after the sandbox/slice has already sent results\n // to result submitter, in which case, the activeSlices table has already removed the row\n // corresponding to slice and hence is incapable of updating status.\n sandbox.changeWorkingToAssigned();\n this.assignedSandboxes.push(sandbox);\n debugging() && console.log(`startSandboxWork: Finished ${this.dumpStatefulSandboxAndSlice(sandbox, slice)}, total sandbox count: ${this.sandboxes.length}, matching ${this.matching}, fetching ${this.isFetchingNewWork}`);\n } catch(error) {\n let logLevel;\n\n if (error instanceof SandboxError) {\n logLevel = 'warn';\n // The message and stack properties of error objects are not enumerable,\n // so they have to be copied into a plain object this way\n const errorResult = Object.getOwnPropertyNames(error).reduce((o, p) => {\n o[p] = error[p]; return o;\n }, { message: 'Unexpected worker error' });\n slice.collectResult(errorResult, false);\n } else {\n logLevel = 'error';\n // This error was unrelated to the work being done, so just return the slice in the finally block.\n // For extra safety the sandbox is terminated.\n slice.result = null;\n slice.status = SLICE_STATUS_FAILED; /** XXXpfr @todo terminating sandbox? */\n }\n\n let errorString;\n switch (error.errorCode) {\n case 'ENOPROGRESS':\n reason = 'ENOPROGRESS';\n errorString = 'Supervisor.startSandboxWork - No progress error in sandbox.\\n';\n break;\n case 'ESLICETOOSLOW':\n reason = 'ESLICETOOSLOW';\n errorString = 'Supervisor.startSandboxWork - Slice too slow error in sandbox.\\n';\n break;\n case 'EUNCAUGHT':\n reason = 'EUNCAUGHT';\n errorString = `Supervisor.startSandboxWork - Uncaught error in sandbox ${error.message}.\\n`;\n break;\n case 'EFETCH':\n // reason = 'EFETCH'; The status.js processing cannot handle 'EFETCH'\n reason = 'unknown';\n errorString = `Supervisor.startSandboxWork - Could not fetch data: ${error.message}.\\n`;\n break;\n }\n \n const { getenv } = __webpack_require__(/*! ../../common/dcp-env */ \"./src/common/dcp-env.js\");\n // Always display max info under debug builds, otherwise maximal error\n // messages are displayed to the worker, only if both worker and client agree.\n let workerConsole = sandbox.supervisorCache.cache.job[slice.jobAddress].workerConsole;\n const displayMaxInfo = Boolean(getenv('DCP_SUPERVISOR_DEBUG_DISPLAY_MAX_INFO')) || (workerConsole && dcpConfig.worker.allowConsoleAccess);\n\n const errorObject = {\n jobAddress: slice.jobAddress.substr(0,10),\n sliceNumber: slice.sliceNumber,\n sandbox: sandbox.id,\n jobName: sandbox.public ? sandbox.public.name : 'unnamed',\n };\n \n if (error.name === 'EWORKREJECT') {\n error.stack = 'Sandbox was terminated by work.reject()';\n const ss = await this.handleWorkReject(sandbox, slice, error.message);\n sandbox = ss.sandbox; slice = ss.slice;\n }\n\n if (!displayMaxInfo && error.errorCode === 'EUNCAUGHTERROR') {\n console[logLevel](`Supervisor.startSandboxWork - Uncaught error in sandbox, could not compute.\\n`, errorObject);\n } else if (!displayMaxInfo && error.errorCode === 'EFETCH_BAD_ORIGIN') {\n console[logLevel](`Supervisor.startSandboxWork - Could not fetch data: ${error.message}`);\n } else if (!displayMaxInfo && errorString) {\n console[logLevel](errorString, errorObject);\n } else if (!displayMaxInfo && error.name === 'EWORKREJECT') {\n console[logLevel](`Supervisor.startSandboxWork - Sandbox rejected work: ${error.message}`)\n } else {\n if (displayMaxInfo)\n errorObject.stack += '\\n --------------------\\n' + (error.stack.split('\\n').slice(1).join('\\n'));\n console[logLevel](`Supervisor.startSandboxWork - Sandbox failed: ${error.message}\\n`, errorObject);\n }\n } finally {\n await this.finalizeSandboxAndSlice(sandbox, slice, reason);\n }\n }\n\n /**\n * If slice && slice.result, then call await this.recordResult(slice) and this.returnSandbox(sandbox, slice) will have no effect.\n * If slice && !slice.result, then call this.returnSlice(slice, reason) and then this.returnSandbox(sandbox, slice) which terminates sandbox.\n * If !slice && sandbox, then terminate the sandbox with this.returnSandbox(sandbox, slice) .\n * If !slice && !sandbox, then do nothing.\n * @param {Sandbox} [sandbox]\n * @param {Slice} [slice]\n * @param {string} [reason]\n */\n async finalizeSandboxAndSlice(sandbox, slice, reason) {\n debugging('supervisor') && console.log(`finalizeSandboxAndSlice: sandbox ${sandbox ? sandbox.identifier : 'nade'}, slice ${slice ? slice.identifier : 'nade'}`);\n if (slice) {\n if (slice.result) await this.recordResult(slice);\n else this.returnSlice(slice, reason);\n }\n // It is possible that sandbox is already terminated\n // Because sandbox.allocated=false as soon as sandbox.work(...) completes.\n // But the await at or in finalizeSandboxAndSlice may allow pruneSandboxes to slither in.\n if (sandbox) this.returnSandbox(sandbox, slice, false /* verifySandboxIsNotTerminated*/);\n }\n\n /**\n * Terminates sandboxes and returns slices.\n * Sets the working flag to false, call @this.work to start working again.\n * \n * If forceTerminate is true: Terminates all sandboxes and returns all slices.\n * If forceTerminate is false: Terminates non-allocated sandboxes and returns queued slices.\n *\n * @param {boolean} [forceTerminate = true] - true if you want to stop the sandboxes from completing their current slice.\n * @returns {Promise<void>}\n */\n async stopWork (forceTerminate = true) {\n debugging('supervisor') && console.log('stopWork(${forceTerminate}): terminating sandboxes and returning slices to scheduler.');\n if (forceTerminate) {\n while (this.sandboxes.length) {\n this.returnSandbox(this.sandboxes[0], null, false);\n }\n\n await this.returnSlices(this.slices).then(() => {\n this.queuedSlices.length = 0;\n });\n } else {\n // Only terminate idle sandboxes and return only queued slices\n let idleSandboxes = this.sandboxes.filter(w => !w.allocated);\n for (const sandbox of idleSandboxes) {\n this.returnSandbox(sandbox, null, false /* verifySandboxIsNotTerminated*/);\n }\n\n await this.returnSlices(this.queuedSlices).then(() => {\n this.queuedSlices.length = 0;\n });\n\n await new Promise((resolve, reject) => {\n let sandboxesRemaining = this.allocatedSandboxes.length;\n if (sandboxesRemaining === 0)\n {\n resolve();\n }\n // Resolve and finish work once all sandboxes have finished submitting their results.\n this.on('submitFinished', () => {\n sandboxesRemaining--;\n if (sandboxesRemaining === 0)\n {\n console.log('All sandboxes empty, stopping worker and closing all connections');\n resolve();\n }\n });\n });\n }\n\n if (this.resultSubmitterConnection) {\n this.resultSubmitterConnection.off('close', this.openResultSubmitterConn);\n this.resultSubmitterConnection.close();\n this.resultSubmitterConnection = null;\n }\n\n if (this.taskDistributorConnection) {\n this.taskDistributorConnection.off('close', this.openTaskDistributorConn);\n this.taskDistributorConnection.close();\n this.taskDistributorConnection = null;\n }\n\n if (this.packageManagerConnection) {\n this.packageManagerConnection.off('close', this.openPackageManagerConn);\n this.packageManagerConnection.close();\n this.packageManagerConnection = null;\n }\n\n if (this.eventRouterConnection) {\n this.eventRouterConnection.off('close', this.openEventRouterConn);\n this.eventRouterConnection.close();\n this.eventRouterConnection = null;\n }\n\n this.emit('stop');\n }\n\n /**\n * Takes a slice and returns it to the scheduler to be redistributed.\n * Usually called when an exception is thrown by sandbox.work(slice, startDelayMs) .\n * Or when the supervisor tells it to forcibly stop working.\n *\n * @param {Slice} slice - The slice to return to the scheduler.\n * @param {string} [reason] - Optional reason for the return: 'ENOPROGRESS', 'EUNCAUGHT', 'ESLICETOOSLOW', 'unknown'.\n * @returns {Promise<*>} - Response from the scheduler.\n */\n returnSlice (slice, reason) {\n // When sliceNumber === 0 don't send a status message.\n if (slice.sliceNumber === 0) return Promise.resolve();\n \n debugging() && console.log(`Supervisor.returnSlice: Returning slice ${slice.identifier} with reason ${reason}.`);\n \n const payload = slice.getReturnMessagePayload(this.workerOpaqueId, reason);\n try\n {\n return this.resultSubmitterConnection.send('status', payload) /* resultSubmitterConnection can be null if worker is stopped */\n .then(response => {\n return response;\n }).catch(error => {\n debugging('supervisor') && console.error('Failed to return slice', {\n sliceNumber: slice.sliceNumber,\n jobAddress: slice.jobAddress,\n status: slice.status,\n error,\n }, 'Will try again on a new connection.');\n this.resultSubmitterMessageQueue.push({ operation: 'status', data: payload });\n this.resultSubmitterConnection.close();\n });\n }\n catch (error)\n {\n debugging('supervisor') && console.error(`Failed to return slice ${slice.identifier}, no connection to result submitter:`, error);\n }\n }\n\n /**\n * Bulk-return multiple slices, possibly for assorted jobs.\n * Returns slices to the scheduler to be redistributed.\n * Called in the sandbox terminate handler and purgeAllWork(jobAddress)\n * and stopWork(forceTerminate).\n *\n * @param {Slice[]} slices - The slices to return to the scheduler.\n * @returns {Promise<void>} - Response from the scheduler.\n */\n async returnSlices(slices) {\n if (!slices || !slices.length) return Promise.resolve();\n \n const slicePayload = [];\n slices.forEach(slice => { addToReturnSlicePayload(slicePayload, slice); });\n this.removeSlices(slices);\n\n debugging('supervisor') && console.log(`Supervisor.returnSlices: Returning slices ${this.dumpSlices(slices)}.`);\n\n return this.resultSubmitterConnection.send('status', {\n worker: this.workerOpaqueId,\n slices: slicePayload,\n }).then(response => {\n return response;\n }).catch(error => {\n const errorInfo = slices.map(slice => slice.identifier);\n debugging('supervisor') && console.error('Failed to return slice(s)', { errorInfo, error }, 'Will try again on new connection.');\n this.resultSubmitterMessageQueue.push({ operation: 'status', data: { worker: this.workerOpaqueId, slices: slicePayload } })\n this.resultSubmitterConnection.close();\n // Just in case the caller is expecing a DCP response\n return { success: false, payload: {} };\n });\n }\n\n /**\n * Submits the slice results to the scheduler, either to the\n * work submit or fail endpoints based on the slice status.\n * Then remove the slice from the @this.slices cache.\n *\n * @param {Slice} slice - The slice to submit.\n * @returns {Promise<void>}\n */\n async recordResult (slice) {\n // It is possible for slice.result to be undefined when there are upstream errors.\n if ( !(slice && slice.result))\n throw new Error(`recordResult: slice.result is undefined for slice ${slice.identifier}. This is ok when there are upstream errors.`);\n\n debugging('supervisor') && console.log(`supervisor: recording result for slice ${slice.identifier}.`);\n\n const jobAddress = slice.jobAddress;\n const sliceNumber = slice.sliceNumber;\n const authorizationMessage = slice.getAuthorizationMessage();\n\n /* @see result-submitter::result for full message details */\n const metrics = { GPUTime: 0, CPUTime: 0, CPUDensity: 0, GPUDensity: 0, total: 0 };\n const payloadData = {\n slice: sliceNumber,\n job: jobAddress,\n worker: this.workerOpaqueId,\n paymentAddress: this.paymentAddress,\n metrics,\n authorizationMessage,\n }\n\n const timeReport = slice.timeReport;\n if (timeReport && timeReport.total > 0) {\n metrics.GPUTime = timeReport.webGL;\n metrics.CPUTime = timeReport.CPU;\n metrics.CPUDensity = metrics.CPUTime / timeReport.total;\n metrics.GPUDensity = metrics.GPUTime / timeReport.total;\n metrics.total = timeReport.total;\n metrics.CPUTime = 1 + Math.floor(metrics.CPUTime);\n if (metrics.GPUTime > 0)\n metrics.GPUTime = 1 + Math.floor(metrics.GPUTime);\n }\n \n this.emit('submittingResult');\n\n if (!slice.isFinished)\n throw new Error('Cannot record result for slice that is not finished');\n\n if (slice.resultStorageType === 'pattern') { /* This is a remote-storage slice. */\n const remoteResult = await this.sendResultToRemote(slice);\n payloadData.result = encodeDataURI(JSON.stringify(remoteResult));\n } else {\n payloadData.result = encodeDataURI(slice.result.result); /* XXXwg - result.result is awful */\n }\n debugging('supervisor') && console.log('Supervisor.recordResult: payloadData.result', payloadData.result.slice(0, 512));\n\n try {\n if (slice.completed) {\n\n /* work function returned a result */\n let resp = await this.resultSubmitterConnection.send(\n 'result',\n payloadData,\n )\n \n if (!resp.success) {\n if (resp.payload.code === 'DCPS-01002') { /* result submitter cannot connect to event router; try again */\n try {\n resp = await this.resendResult(payloadData)\n } catch (error) {\n debugging('supervisor') && console.error(`supervisor - failed to submit result for job ${jobAddress} after ${payloadData.sendRetries} attempts`)\n throw error;\n }\n }\n else\n throw resp.payload;\n }\n\n if (false) {}\n\n const receipt = {\n accepted: true,\n payment: resp.payload.slicePaymentAmount,\n };\n this.emit('submittedResult', resp.payload);\n this.emit('dccCredit', receipt);\n } else {\n /* slice did not complete for some reason */\n \n // If the slice from a job never completes and the job address exists in the ringBufferofJobs, \n // then we remove it to allow for another slice (from the same job) to be obtained by fetchTask\n this.ringBufferofJobs.buf = this.ringBufferofJobs.filter(element => element !== jobAddress);\n \n await this.returnSlice(slice);\n }\n } catch(error) {\n console.info(`1014: Failed to submit results for slice ${payloadData.slice} of job ${payloadData.job}`, error);\n this.emit('submitSliceFailed', error);\n } finally {\n this.emit('submitFinished');\n // Remove the slice from the slices array.\n this.removeSlice(slice);\n if (Supervisor.sliceTiming) {\n slice['resultDelta'] = Date.now() - slice['resultDelta'];\n console.log(`recordResult(${slice['pairingDelta']}, ${slice['executionDelta']}, ${slice['resultDelta']}): Completed slice ${slice.identifier}.`);\n } else\n debugging('supervisor') && console.log(`recordResult: Completed slice ${slice.identifier}.`);\n }\n }\n\n /**\n * Send a work function's result to a server that speaks our DCP Remote Data Server protocol.\n * The data server dcp-rds is been implemented in https://gitlab.com/Distributed-Compute-Protocol/dcp-rds .\n *\n * @param {Slice} slice - Slice object whose result we are sending.\n * @returns {Promise<object>} - Object of the form { success: true, href: 'http://127.0.0.1:3521/methods/download/jobs/34/result/10' } .\n * @throws When HTTP status not in the 2xx range.\n */\n async sendResultToRemote(slice) {\n const postParams = {\n ...slice.resultStorageParams\n };\n \n const patternParams = {\n slice: slice.sliceNUmber,\n job: slice.jobAddress\n };\n \n /**\n * @todo Future Work: we need to pass the pattern parameters with the job details into the supervisor. \n * We do not have all the information (necessarily) to calculate them from here. /wg jan 2022\n */\n const sliceResultUri = makeValueURI('pattern', slice.resultStorageDetails, patternParams)\n\n debugging() && console.log('sendResultToRemote sliceResultUri: ', sliceResultUri);\n \n if (this.makeSafeOriginList('sendResults').indexOf(sliceResultUri.origin) === -1)\n throw new Error(`Invalid origin for remote result storage: '${sliceResultUri.origin}'`);\n\n postParams.element = slice.sliceNumber;\n postParams.contentType = 'application/json'; // Currently data will be outputed as a JSON object, @todo: Support file upload.\n\n debugging() && console.log('sendResultToRemote: postParams: ', postParams);\n\n let result = slice.result.result;\n if (result) {\n postParams.content = JSON.stringify(result);\n } else {\n postParams.error = JSON.stringify(slice.error);\n }\n\n debugging('supervisor') && console.log('sendResultToRemote: content: ', (result ? postParams.content : postParams.error).slice(0, 512));\n\n //\n // Notes:\n // 1) In recordResults the response from justFetch is JSON serialized and encodeDataURI is called.\n // payloadData.result = await this.sendResultToRemote(slice);\n // payloadData.result = encodeDataURI(JSON.stringify(payloadData.result));\n // 2) We do further processing after the call to sendResultToRemote in recordResult, because\n // if we did it here there would be a perf hit. When the return value is a promise, it gets\n // folded into sendResultToRemote's main promise. If justFetch's promise wasn't a return value then\n // justFetch would be separately added to the micro-task-queue.\n return await justFetch(sliceResultUri, 'JSON', 'POST', false, postParams);\n }\n}\n\n/**\n * Sandbox has had an error which is not from the work function: kill it\n * and try to redo the slice.\n */\nfunction handleSandboxError(supervisor, sandbox, error) {\n const slice = sandbox.slice;\n\n slice.sandboxErrorCount = (slice.sandboxErrorCount || 0) + 1;\n sandbox.slice = null;\n supervisor.returnSandbox(sandbox); /* terminate the sandbox */\n slice.status = SLICE_STATUS_UNASSIGNED; /* ToT */\n console.warn(`Supervisor.handleSandboxError: Sandbox ${sandbox.identifier}...(${sandbox.public.name}/${slice.sandboxErrorCount}) with slice ${slice.identifier} had error.`, error);\n\n if (slice.sandboxErrorCount < dcpConfig.worker.maxSandboxErrorsPerSlice)\n supervisor.queuedSlices.push(slice);\n else {\n slice.error = error;\n supervisor.returnSlice(slice);\n }\n}\n\n/**\n * Add a slice to the slice payload being built. If a sliceList already exists for the\n * job-status-authMessage tuple, then the slice will be added to that, otherwise a new\n * sliceList will be added to the payload.\n *\n * @param {Object[]} slicePayload - Slice payload being built. Will be mutated in place.\n * @param {Slice} slice - The slice.\n * @param {String} status - Status update, eg. progress or scheduled.\n *\n * @returns {Object[]} mutated slicePayload array\n */\nfunction addToSlicePayload(slicePayload, slice, status) {\n // getAuthorizationMessage helps enforces the equivalence\n // !authorizationMessage <==> sliceNumber === 0\n const authorizationMessage = slice.getAuthorizationMessage();\n if (!authorizationMessage) return;\n\n // Try to find a sliceList in the payload which matches the job, status, and auth message\n let sliceList = slicePayload.find(desc => {\n return desc.job === slice.jobAddress\n && desc.status === status\n && desc.authorizationMessage === authorizationMessage;\n });\n\n // If we didn't find a sliceList, start a new one and add it to the payload\n if (!sliceList) {\n sliceList = {\n job: slice.jobAddress,\n sliceNumbers: [],\n status,\n authorizationMessage,\n };\n slicePayload.push(sliceList);\n }\n\n sliceList.sliceNumbers.push(slice.sliceNumber);\n\n return slicePayload;\n}\n\n/**\n * Add a slice to the returnSlice payload being built. If a sliceList already exists for the\n * job-isEstimation-authMessage-reason tuple, then the slice will be added to that, otherwise a new\n * sliceList will be added to the payload.\n *\n * @param {Object[]} slicePayload - Slice payload being built. Will be mutated in place.\n * @param {Slice} slice - The slice.\n * @param {String} [reason] - Optional reason to further characterize status; e.g. 'ENOPROGRESS', 'EUNCAUGHT', 'ESLICETOOSLOW', 'unknown'.\n *\n * @returns {Object[]} mutated slicePayload array\n */\nfunction addToReturnSlicePayload(slicePayload, slice, reason) {\n // getAuthorizationMessage helps enforces the equivalence\n // !authorizationMessage <==> sliceNumber === 0\n const authorizationMessage = slice.getAuthorizationMessage();\n if (!authorizationMessage) return;\n\n if (!reason) reason = slice.error ? 'EUNCAUGHT' : 'unknown';\n\n // Try to find a sliceList in the payload which matches the job, status, and auth message\n let sliceList = slicePayload.find(desc => {\n return desc.job === slice.jobAddress\n && desc.isEstimationSlice === slice.isEstimationSlice\n && desc.authorizationMessage === authorizationMessage\n && desc.reason === reason;\n });\n\n // If we didn't find a sliceList, start a new one and add it to the payload\n if (!sliceList) {\n sliceList = {\n job: slice.jobAddress,\n sliceNumbers: [],\n status: 'return',\n isEstimationSlice: slice.isEstimationSlice,\n authorizationMessage,\n reason,\n };\n slicePayload.push(sliceList);\n }\n\n sliceList.sliceNumbers.push(slice.sliceNumber);\n\n return slicePayload;\n}\n\n/**\n * Return DCPv4-specific connection options, composed of type-specific, URL-specific, \n * and worker-specific options, any/all of which can override the dcpConfig.dcp.connectOptions.\n * The order of precedence is the order of specificity.\n */\nfunction connectionOptions(url, label) {\n return leafMerge(/* ordered from most to least specific */\n dcpConfig.worker.dcp.connectionOptions.default,\n dcpConfig.worker.dcp.connectionOptions[label],\n dcpConfig.worker.dcp.connectionOptions[url.href]);\n}\n\n/** @type {number | boolean} */\nSupervisor.lastAssignFailTimerMs = false;\n/** @type {boolean} */\nSupervisor.startSandboxWork_beenCalled = false;\n/** @type {boolean} */\nSupervisor.debugBuild = ((__webpack_require__(/*! dcp/common/dcp-build */ \"./src/common/dcp-build.js\").build) === 'debug');\n/**\n * When Supervisor.sliceTiming is set to be true, it displays the timings of a every slice\n * slice['pairingDelta'] = timespan of when slice is paired with sandbox until execution starts\n * slice['executionDelta'] = timespan of execution in sandbox\n * slice['resultDelta'] = timespan of when sandbox finishes executing until recordResult completes.\n * @type {boolean}\n */\nSupervisor.sliceTiming = false;\n\nexports.Supervisor = Supervisor;\n\n\n//# sourceURL=webpack://dcp/./src/dcp-client/worker/supervisor.js?");
4491
+ eval("/* provided dependency */ var process = __webpack_require__(/*! ./node_modules/process/browser.js */ \"./node_modules/process/browser.js\");\n/**\n * @file worker/supervisor.js\n *\n * The component that controls each of the sandboxes\n * and distributes work to them. Also communicates with the\n * scheduler to fetch said work.\n *\n * The supervisor readies sandboxes before/while fetching slices.\n * This means sometimes there are extra instantiated WebWorkers\n * that are idle (in this.readiedSandboxes). Readied sandboxes can\n * be used for any slice. After a readied sandbox is given a slice\n * it becomes assigned to slice's job and can only do work\n * for that job.\n *\n * After a sandbox completes its work, the sandbox becomes cached\n * and can be reused if another slice with a matching job is fetched.\n *\n * @author Matthew Palma, mpalma@kingsds.network\n * Ryan Rossiter, ryan@kingsds.network\n * @date May 2019\n */\n\n/* global dcpConfig */\n// @ts-check\n\n\nconst constants = __webpack_require__(/*! dcp/common/scheduler-constants */ \"./src/common/scheduler-constants.js\");\nconst hash = __webpack_require__(/*! dcp/common/hash */ \"./src/common/hash.js\");\nconst wallet = __webpack_require__(/*! dcp/dcp-client/wallet */ \"./src/dcp-client/wallet/index.js\");\nconst protocolV4 = __webpack_require__(/*! dcp/protocol-v4 */ \"./src/protocol-v4/index.js\");\nconst DCP_ENV = __webpack_require__(/*! dcp/common/dcp-env */ \"./src/common/dcp-env.js\");\n\nconst debugging = (__webpack_require__(/*! dcp/debugging */ \"./src/debugging.js\").scope)('worker');\nconst { assert } = __webpack_require__(/*! dcp/common/dcp-assert */ \"./src/common/dcp-assert.js\");\nconst { EventEmitter } = __webpack_require__(/*! dcp/common/dcp-events */ \"./src/common/dcp-events/index.js\");\nconst { Sandbox, SandboxError } = __webpack_require__(/*! ./sandbox */ \"./src/dcp-client/worker/sandbox.js\");\nconst { Slice, SLICE_STATUS_UNASSIGNED, SLICE_STATUS_FAILED } = __webpack_require__(/*! ./slice */ \"./src/dcp-client/worker/slice.js\");\nconst { SupervisorCache } = __webpack_require__(/*! ./supervisor-cache */ \"./src/dcp-client/worker/supervisor-cache.js\");\nconst { DcpURL } = __webpack_require__(/*! dcp/common/dcp-url */ \"./src/common/dcp-url.js\");\nconst { requireNative } = __webpack_require__(/*! dcp/dcp-client/webpack-native-bridge */ \"./src/dcp-client/webpack-native-bridge.js\");\nconst { localStorage } = __webpack_require__(/*! dcp/common/dcp-localstorage */ \"./src/common/dcp-localstorage.js\");\nconst { booley, encodeDataURI, makeValueURI, leafMerge, a$sleepMs, justFetch, compressJobMap, toJobMap,\n compressSandboxes, compressSlices, truncateAddress, dumpSandboxesIfNotUnique, dumpSlicesIfNotUnique, \n generateOpaqueId } = __webpack_require__(/*! dcp/utils */ \"./src/utils/index.js\");\nconst { DCPError } = __webpack_require__(/*! dcp/common/dcp-error */ \"./src/common/dcp-error.js\");\nconst { sliceStatus } = __webpack_require__(/*! dcp/common/scheduler-constants */ \"./src/common/scheduler-constants.js\");\nconst { calculateJoinHash } = __webpack_require__(/*! dcp/dcp-client/compute-groups */ \"./src/dcp-client/compute-groups/index.js\");\nconst RingBuffer = __webpack_require__(/*! dcp/utils/ringBuffer */ \"./src/utils/ringBuffer.js\");\nconst supervisorTuning = dcpConfig.future('worker.tuning');\nconst tuning = {\n watchdogInterval: 7, /**< seconds - time between fetches when ENOTASK(? /wg nov 2019) */\n minSandboxStartDelay: 0.1, /**< seconds - minimum time between WebWorker starts */\n maxSandboxStartDelay: 0.7, /**< seconds - maximum delay time between WebWorker starts */\n ...supervisorTuning\n};\n\n/** Make timers 10x slower when running in niim */\nlet timeDilation = 1;\nif (DCP_ENV.platform === 'nodejs') {\n /** Make timers 10x slower when running in niim */\n timeDilation = (requireNative('module')._cache.niim instanceof requireNative('module').Module) ? 10 : 1;\n}\n\ndcpConfig.future('worker.sandbox', { progressReportInterval: (5 * 60 * 1000) });\nconst sandboxTuning = dcpConfig.worker.sandbox;\n\n/**\n * @typedef {*} address\n * @typedef {*} opaqueId\n */\n\n/**\n * @typedef {object} SandboxSlice\n * @property {Sandbox} sandbox\n * @property {Slice} slice\n */\n\n/**\n * @typedef {object} Signature\n * @property {Uint8Array} r\n * @property {Uint8Array} s\n * @property {Uint8Array} v\n */\n\n/**\n * @typedef {object} SignedAuthorizationMessageObject\n * @property {object} auth\n * @property {Signature} signature\n * @property {module:dcp/wallet.Address} owner\n */\n\n/** @typedef {import('.').Worker} Worker */\n/** @typedef {import('.').SupervisorOptions} SupervisorOptions */\n\nclass Supervisor extends EventEmitter {\n /**\n * @constructor\n * @param {Worker} worker\n * @param {SupervisorOptions} options\n */\n constructor (worker, options={}) {\n super('Supervisor');\n\n /** @type {Worker} */\n this.worker = worker;\n\n /** @type {Sandbox[]} */\n this.sandboxes = [];\n\n /** @type {Sandbox[]} */\n this.readiedSandboxes = [];\n\n /** @type {Sandbox[]} */\n this.assignedSandboxes = [];\n\n /** @type {Slice[]} */\n this.slices = [];\n\n /** @type {Slice[]} */\n this.queuedSlices = [];\n\n /** @type {Slice[]} */\n this.lostSlices = [];\n\n /** @type {boolean} */\n this.matching = false;\n\n /** @type {boolean} */\n this.isFetchingNewWork = false;\n\n /** @type {number} */\n this.numberOfCoresReserved = 0;\n\n /** @type {number} */\n this.addressTruncationLength = 20; // Set to -1 for no truncation.\n\n /** @type {Object[]} */\n this.rejectedJobs = [];\n this.rejectedJobReasons = [];\n\n if (!options) {\n console.error('Supervisor Options', options, new Error().stack);\n options = {};\n }\n\n /** @type {object} */\n this.options = {\n jobAddresses: options.jobAddresses || [/* all jobs unless priorityOnly */],\n ...options,\n };\n\n const { paymentAddress, identity } = options;\n if (paymentAddress) {\n if (paymentAddress instanceof wallet.Keystore) {\n this.paymentAddress = paymentAddress.address;\n } else {\n this.paymentAddress = new wallet.Address(paymentAddress);\n }\n } else {\n this.paymentAddress = null;\n }\n\n this._identityKeystore = identity;\n\n this.extraAllowOrigins = {\n any: [],\n fetchData: [],\n fetchWorkFunctions: [],\n fetchArguments: [],\n sendResults: [],\n };\n \n if (typeof options.allowedOrigins !== 'undefined')\n {\n console.warn('Warning: using deprecated interface options.allowedOrigins; callstack=', new Error().stack.split('\\n').slice(1));\n if (!Array.isArray(options.allowedOrigins))\n {\n for (let kind in this.extraAllowOrigins)\n {\n if (options.allowedOrigins[kind])\n this.extraAllowOrigins[kind].push(...options.allowedOrigins[kind]); \n }\n }\n else\n this.extraAllowOrigins['any'].push(...options.allowedOrigins)\n \n delete options.allowedOrigins;\n }\n /* Assume that an array of dcpConfig.worker.allowOrigins means they can be used for anything */\n if (Array.isArray(dcpConfig.worker.allowOrigins))\n dcpConfig.worker.allowOrigins = { any: dcpConfig.worker.allowOrigins };\n \n /**\n * Maximum sandboxes allowed to work at a given time.\n * @type {number}\n */\n this.maxWorkingSandboxes = options.maxWorkingSandboxes || 1;\n\n /** @type {number} */\n this.defaultMaxGPUs = 1;\n // this.GPUsAssigned = 0;\n \n // Object.defineProperty(this, 'GPUsAssigned', {\n // get: () => this.allocatedSandboxes.filter(sb => !!sb.requiresGPU).length,\n // enumerable: true,\n // configurable: false,\n // });\n\n /**\n * TODO: Remove this when the supervisor sends all of the sandbox\n * capabilities to the scheduler when fetching work.\n * @type {object}\n */\n this.capabilities = null;\n\n /** @type {number} */\n this.lastProgressReport = 0;\n\n /** \n * An N-slot ring buffer of job addresses. Stores all jobs that have had no more than 1 slice run in the ring buffer.\n * Required for the implementation of discrete jobs \n * @type {RingBuffer} \n */\n this.ringBufferofJobs = new RingBuffer(200); // N = 200 should be more than enough.\n \n // @hack - dcp-env.isBrowserPlatform is not set unless the platform is _explicitly_ set,\n // using the default detected platform doesn't set it.\n // Fixing that causes an error in the wallet module's startup on web platform, which I\n // probably can't fix in a reasonable time this morning.\n // ~ER2020-02-20\n\n if (!options.maxWorkingSandboxes\n && DCP_ENV.browserPlatformList.includes(DCP_ENV.platform)\n && navigator.hardwareConcurrency > 1) {\n this.maxWorkingSandboxes = navigator.hardwareConcurrency - 1;\n if (typeof navigator.userAgent === 'string') {\n if (/(Android).*(Chrome|Chromium)/.exec(navigator.userAgent)) {\n this.maxWorkingSandboxes = 1;\n console.log('Doing work with Chromimum browsers on Android is currently limited to one sandbox');\n }\n }\n }\n\n /** @type {SupervisorCache} */\n this.cache = new SupervisorCache(this);\n /** @type {object} */\n this._connections = {}; /* active DCPv4 connections */\n // Call the watchdog every 7 seconds.\n this.watchdogInterval = setInterval(() => this.watchdog(), tuning.watchdogInterval * 1000);\n if (DCP_ENV.platform === 'nodejs' && this.options.localExec) /* interval helps keep normal worker alive forever, which we don't want in localexec */\n this.watchdogInterval.unref();\n\n const ceci = this;\n\n // Initialize to null so these properties are recognized for the Supervisor class\n this.taskDistributorConnection = null;\n this.eventRouterConnection = null;\n this.resultSubmitterConnection = null;\n this.packageManagerConnection = null;\n this.openTaskDistributorConn = function openTaskDistributorConn()\n {\n let config = dcpConfig.scheduler.services.taskDistributor;\n ceci.taskDistributorConnection = new protocolV4.Connection(config, ceci.identityKeystore, connectionOptions(config.location, 'taskDistributor'));\n ceci.taskDistributorConnection.on('close', ceci.openTaskDistributorConn);\n }\n\n this.openEventRouterConn = function openEventRouterConn()\n {\n let config = dcpConfig.scheduler.services.eventRouter;\n ceci.eventRouterConnection = new protocolV4.Connection(config, ceci.identityKeystore, connectionOptions(config.location, 'eventRouter'));\n ceci.eventRouterConnection.on('close', ceci.openEventRouterConn);\n if (ceci.eventRouterMessageQueue.length)\n ceci.resendRejectedMessages(ceci.eventRouterConnection, ceci.eventRouterMessageQueue);\n }\n this.eventRouterMessageQueue = [];\n \n this.openResultSubmitterConn = function openResultSubmitterConn()\n {\n let config = dcpConfig.scheduler.services.resultSubmitter;\n ceci.resultSubmitterConnection = new protocolV4.Connection(config, ceci.identityKeystore, connectionOptions(config.location, 'resultSubmitter'));\n ceci.resultSubmitterConnection.on('close', ceci.openResultSubmitterConn);\n if (ceci.resultSubmitterMessageQueue.length)\n ceci.resendRejectedMessages(ceci.resultSubmitterConnection, ceci.resultSubmitterMessageQueue);\n }\n this.resultSubmitterMessageQueue = [];\n\n this.openPackageManagerConn = function openPackageManagerConn()\n {\n let config = dcpConfig.packageManager;\n ceci.packageManagerConnection = new protocolV4.Connection(config, ceci.identityKeystore, connectionOptions(config.location, 'packageManager'));\n ceci.packageManagerConnection.on('close', ceci.openPackageManagerConn);\n if (ceci.packageManagerMessageQueue.length)\n ceci.resendRejectedMessages(ceci.packageManagerConnection, ceci.packageManagerMessageQueue);\n }\n this.packageManagerMessageQueue = [];\n }\n\n /**\n * Return worker opaqueId.\n * @type {opaqueId}\n */\n get workerOpaqueId() {\n if (!this._workerOpaqueId)\n this._workerOpaqueId = localStorage.getItem('workerOpaqueId');\n\n if (!this._workerOpaqueId || this._workerOpaqueId.length !== constants.workerIdLength) {\n this._workerOpaqueId = generateOpaqueId();\n localStorage.setItem('workerOpaqueId', this._workerOpaqueId);\n }\n\n return this._workerOpaqueId;\n }\n\n /**\n * This getter is the absolute source-of-truth for what the\n * identity keystore is for this instance of the Supervisor.\n */\n get identityKeystore() {\n assert(this.defaultIdentityKeystore);\n\n return this._identityKeystore || this.defaultIdentityKeystore;\n }\n \n \n /** \n * Factory function which generates a list of origins which are safe to communicate \n * with for this purpose. Currently-valid purposes (more will be added):\n * - any\n * - fetchData\n * - fetchWork\n * - fetchWorkArguments\n * - sendResults\n */\n makeSafeOriginList(purpose)\n {\n var list = [];\n\n assert(Array.isArray(this.extraAllowOrigins[purpose]));\n \n if (this.extraAllowOrigins[purpose])\n list = list.concat(this.extraAllowOrigins[purpose]);\n if (dcpConfig.worker.allowOrigins[purpose])\n list = list.concat(dcpConfig.worker.allowOrigins[purpose])\n\n // In localExec, do not allow work function or arguments to come from the 'any' origins\n if (purpose !== 'any' && (!this.options.localExec || (this.options.localExec && purpose === 'sendResults')))\n {\n if (this.extraAllowOrigins.any)\n list = list.concat(this.extraAllowOrigins.any);\n if (dcpConfig.worker.allowOrigins.any)\n list = list.concat(dcpConfig.worker.allowOrigins.any);\n }\n return list;\n }\n\n /**\n * Open all connections. Used when supervisor is instantiated or stopped/started\n * to initially open connections.\n */\n instantiateAllConnections() {\n if (!this.taskDistributorConnection)\n this.openTaskDistributorConn();\n \n if (!this.eventRouterConnection)\n this.openEventRouterConn();\n \n if (!this.resultSubmitterConnection)\n this.openResultSubmitterConn();\n\n if (!this.packageManagerConnection)\n this.openPackageManagerConn();\n }\n \n /**\n * Asynchronously send a result to the result submitter that was previously rejected.\n * Different from resendRejectedMessages below in the sense that the function only resolves\n * once we've delivered the result, or gone past our max number of attempts.\n * @param {object} result \n * @returns the response payload from the result operation\n */\n async resendResult(result) {\n var protocolError = false;\n if (!result.sendRetries)\n result.sendRetries = 1;\n else\n result.sendRetries++;\n \n if (result.sendRetries > dcpConfig.worker.maxResultSubmissionRetries)\n throw new DCPError(`Could not submit result after ${dcpConfig.worker.maxResultSubmissionRetries} attempts. Aborting.`) \n \n debugging() && console.debug(`supervisor - failed to submit result ${result.sendRetries} time(s), trying again `)\n let res = await this.resultSubmitterConnection.send('result', result).catch(async (e) => {\n debugging('supervisor') && console.error(`Failed to submit result to scheduler for slice ${result.slice} of job ${result.job}:\\n ${e} \\nWill try again on new connection.`);\n this.resultSubmitterConnection.close();\n await a$sleepMs(10); /* let connection recycle */\n protocolError = true;\n });\n if ((!res.success && res.payload && res.payload.code === 'DCPS-01002') || protocolError)\n return this.resendResult(result)\n else\n return res;\n }\n \n /**\n * Try sending messages that were rejected on an old instance of the given connection.\n * These are messages that a) were rejected due to a protocol error and b) don't care when exactly\n * they're sent in the grand scheme of things.\n */\n resendRejectedMessages(connection, messageQueue) {\n if (connection.resendingMessages) /* if the passed connection is already in the loop, exit */\n return;\n \n var message = messageQueue.shift();\n\n do {\n \n connection.resendingMessages = true;\n var quitLoop = false;\n \n connection.send(message.operation, message.data)\n .catch((e) =>\n {\n /* Protocol Error; Close connection (this will trigger the opening of a new connection that will try sending again) */\n debugging('supervisor') && console.error(`Failed to send message ${message.operation} to scheduler: ${e}. Will try again on a new \n connection.`);\n messageQueue.unshift(message);\n connection.close();\n quitLoop = true;\n });\n \n message = messageQueue.shift();\n \n } while (message && !quitLoop)\n\n connection.resendingMessages = false;\n }\n\n /** Set the default identity keystore -- needs to happen before anything that talks\n * to the scheduler for work gets called. This is a wart and should be removed by\n * refactoring.\n *\n * The default identity keystore will be used if the Supervisor was not provided\n * with an alternate. This keystore will be located via the Wallet API, and \n * if not found, a randomized default identity will be generated. \n *\n * @param {object} ks An instance of wallet::Keystore -- if undefined, we pick the best default we can.\n * @returns {Promise<void>}\n */\n async setDefaultIdentityKeystore(ks) {\n try {\n if (ks) {\n this.defaultIdentityKeystore = ks;\n return;\n }\n\n if (this.defaultIdentityKeystore)\n return;\n\n try {\n this.defaultIdentityKeystore = await wallet.getId();\n } catch(e) {\n debugging('supervisor') && console.debug('Error generating default identity, try to do it another way.');\n this.defaultIdentityKeystore = await new wallet.IdKeystore(null, '');\n }\n } finally {\n if (this.defaultIdentityKeystore)\n debugging('supervisor') && console.debug('Set default identity =', this.defaultIdentityKeystore.address);\n else\n debugging('supervisor') && console.debug('Failed to set default identity, worker cannot work.');\n }\n }\n\n //\n // What follows is a bunch of utility properties and functions for creating filtered views\n // of the slices and sandboxes array.\n //\n /** XXXpfr @todo Write sort w/o using promises so we can get rid of async on all the compress functions. */\n\n /**\n * @deprecated -- Please do not use this.workingSandboxes; use this.allocatedSandboxes instead.\n * Sandboxes that are in WORKING state.\n *\n * Warning: Do not rely on this information being 100% accurate -- it may change in the next instant.\n * @type {Sandbox[]}\n */\n get workingSandboxes() {\n return this.sandboxes.filter(sandbox => sandbox.isWorking);\n }\n\n /**\n * Use instead of this.workingSandboxes.\n *\n * When a sandbox is paired with a slice, execution is pending and sandbox.allocated=true and\n * sandbox.slice=slice and sandbox.jobAddress=slice.jobAddress. This is what 'allocated' means.\n * Immediately upon the exit of sandbox.work, sandbox.allocated=false is set and if an exception\n * wasn't thrown the sandbox is placed in this.assignedSandboxes.\n * Thus from the pov of supervisor, this.allocatedSandboxes is deterministic and this.workingSandboxes is not.\n * Please try to not use this.workingSandboxes. It is deprecated.\n *\n * Warning: Do not rely on this information being 100% accurate -- it may change in the next instant.\n * @type {Sandbox[]}\n */\n get allocatedSandboxes() {\n return this.sandboxes.filter(sandbox => sandbox.allocated);\n }\n\n /**\n * Slices that are allocated.\n * Warning: Do not rely on this information being 100% accurate -- it may change in the next instant.\n * @type {Slice[]}\n */\n get allocatedSlices() {\n return this.slices.filter(slice => slice.allocated);\n }\n\n /**\n * This property is used as the target number of sandboxes to be associated with slices and start working.\n *\n * It is used in this.watchdog as to prevent a call to this.work when unallocatedSpace <= 0.\n * It is also used in this.distributeQueuedSlices where it is passed as an argument to this.matchSlicesWithSandboxes to indicate how many sandboxes\n * to associate with slices and start working.\n *\n * Warning: Do not rely on this information being 100% accurate -- it may change in the next instant.\n * @type {number}\n */\n get unallocatedSpace() {\n return this.maxWorkingSandboxes - this.allocatedSandboxes.length - this.numberOfCoresReserved;\n }\n \n /**\n * Call acquire(numberOfCoresToReserve) to reserve numberOfCoresToReserve unallocated sandboxes as measured by unallocatedSpace.\n * Call release() to undo the previous acquire.\n * This pseudo-mutex technique helps prevent races in scheduling slices in Supervisor.\n * @param {number} numberOfCoresToReserve\n */\n acquire(numberOfCoresToReserve) { \n this.numberOfCoresReserved = numberOfCoresToReserve; \n }\n release() { \n this.numberOfCoresReserved = 0; \n }\n\n /**\n * Remove from this.slices.\n * @param {Slice} slice\n */\n removeSlice(slice) {\n this.removeElement(this.slices, slice);\n if (Supervisor.debugBuild) {\n if (this.queuedSlices.indexOf(slice) !== -1)\n throw new Error(`removeSlice: slice ${slice.identifier} is in queuedSlices; inconsistent state.`);\n if (this.lostSlices.length > 0) {\n console.warn(`removeSlice: slice ${slice.identifier}, found lostSlices ${this.lostSlices.map(s => s.identifier)}`);\n if (this.lostSlices.indexOf(slice) !== -1)\n throw new Error(`removeSlice: slice ${slice.identifier} is in lostSlices; inconsistent state.`);\n }\n }\n }\n\n /**\n * Remove from this.slices.\n * @param {Slice[]} slices\n */\n removeSlices(slices) {\n this.slices = this.slices.filter(slice => slices.indexOf(slice) === -1);\n }\n\n /**\n * Remove from this.queuedSlices.\n * @param {Slice[]} slices\n */\n removeQueuedSlices(slices) {\n this.queuedSlices = this.queuedSlices.filter(slice => slices.indexOf(slice) === -1);\n }\n\n /**\n * Remove from this.sandboxes, this.assignedSandboxes and this.readiedSandboxes.\n * @param {Sandbox} sandbox\n */\n removeSandbox(sandbox) {\n debugging('scheduler') && console.log(`removeSandbox ${sandbox.identifier}`);\n this.removeElement(this.sandboxes, sandbox);\n this.removeElement(this.assignedSandboxes, sandbox);\n\n // XXXpfr: April 13, 2022\n // I'm trying to understand and control when sandboxes get removed.\n // A sandbox in this.readiedSandboxes should never have returnSandbox/removeSandbox called on it except in stopWork.\n // Because of races and random worker crashes, it is hard to get this right, but I want to try.\n // If I don't fix this is the next 30 days or I forget, please delete this exception.\n if (false)\n {}\n\n this.removeElement(this.readiedSandboxes, sandbox);\n }\n\n /**\n * Remove from this.sandboxes and this.assignedSandboxes .\n * @param {Sandbox[]} sandboxes\n */\n async removeSandboxes(sandboxes) {\n debugging('scheduler') && console.log(`removeSandboxes: Remove ${sandboxes.length} sandboxes ${this.dumpSandboxes(sandboxes)}`);\n this.sandboxes = this.sandboxes.filter(sandbox => sandboxes.indexOf(sandbox) === -1);\n this.assignedSandboxes = this.assignedSandboxes.filter(sandbox => sandboxes.indexOf(sandbox) === -1);\n\n if (Supervisor.debugBuild) {\n const readied = this.readiedSandboxes.filter(sandbox => sandboxes.indexOf(sandbox) !== -1);\n if (readied.length > 0)\n throw new Error(`removeSandboxes: sandboxes ${readied.map(s => s.identifier)} are in readiedSandboxes; inconsistent state.`);\n }\n }\n\n /**\n * Remove element from theArray.\n * @param {Array<*>} theArray\n * @param {object|number} element\n * @param {boolean} [assertExists = true]\n */\n removeElement(theArray, element, assertExists = false) {\n let index = theArray.indexOf(element);\n assert(index !== -1 || !assertExists);\n if (index !== -1) theArray.splice(index, 1);\n }\n\n /**\n * Log sliceArray.\n * @param {Slice[]} sliceArray\n * @param {string} [header]\n * @returns {string}\n */\n dumpSlices(sliceArray, header) {\n if (header) console.log(`\\n${header}`);\n return compressSlices(sliceArray, this.addressTruncationLength);\n }\n\n /**\n * Log sandboxArray.\n * @param {Sandbox[]} sandboxArray\n * @param {string} [header]\n * @returns {string}\n */\n dumpSandboxes(sandboxArray, header) {\n if (header) console.log(`\\n${header}`);\n return compressSandboxes(sandboxArray, this.addressTruncationLength);\n }\n\n /**\n * If the elements of sandboxSliceArray are not unique, log the duplicates and dump the array.\n * @param {SandboxSlice[]} sandboxSliceArray\n * @param {string} header\n */\n dumpSandboxSlicesIfNotUnique(sandboxSliceArray, header) {\n if (!this.isUniqueSandboxSlices(sandboxSliceArray, header))\n console.log(this.dumpSandboxSlices(sandboxSliceArray));\n }\n\n /**\n * Log { sandbox, slice }.\n * @param {Sandbox} sandbox\n * @param {Slice} slice\n * @returns {string}\n */\n dumpSandboxAndSlice(sandbox, slice) {\n return `${sandbox.id}~${slice.sliceNumber}.${this.dumpJobAddress(slice.jobAddress)}`;\n }\n\n /**\n * Log { sandbox, slice } with state/status.\n * @param {Sandbox} sandbox\n * @param {Slice} slice\n * @returns {string}\n */\n dumpStatefulSandboxAndSlice(sandbox, slice) {\n return `${sandbox.id}.${sandbox.state}~${slice.sliceNumber}.${this.dumpJobAddress(slice.jobAddress)}.${slice.status}`;\n }\n\n /**\n * Truncates jobAddress.toString() to this.addressTruncationLength digits.\n * @param {address} jobAddress\n * @returns {string}\n */\n dumpJobAddress(jobAddress) {\n return truncateAddress(jobAddress, this.addressTruncationLength /* digits*/);\n }\n\n /**\n * Dump sandboxSliceArray.\n * @param {SandboxSlice[]} sandboxSliceArray - input array of { sandbox, slice }\n * @param {string} [header] - optional header\n * @param {boolean} [stateFul] - when true, also includes slice.status and sandbox.state.\n * @returns {string}\n */\n dumpSandboxSlices(sandboxSliceArray, header, stateFul=false) {\n if (header) console.log(`\\n${header}`);\n const jobMap = {};\n sandboxSliceArray.forEach(ss => {\n const sss = stateFul ? `${ss.sandbox.id}.${ss.sandbox.state}~${ss.slice.sliceNumber}.${ss.slice.status}` : `${ss.sandbox.id}~${ss.slice.sliceNumber}`;\n if (!jobMap[ss.slice.jobAddress]) jobMap[ss.slice.jobAddress] = sss;\n else jobMap[ss.slice.jobAddress] += `,${sss}`;\n });\n let output = '';\n for (const [jobAddress, sss] of Object.entries(jobMap))\n output += `${this.dumpJobAddress(jobAddress)}:[${sss}]:`;\n return output;\n }\n\n /**\n * Check sandboxSliceArray for duplicates.\n * @param {SandboxSlice[]} sandboxSliceArray\n * @param {string} [header]\n * @param {function} [log]\n * @returns {boolean}\n */\n isUniqueSandboxSlices(sandboxSliceArray, header, log) {\n const result = [], slices = [], sandboxes = [];\n let once = true;\n sandboxSliceArray.forEach(x => {\n const sliceIndex = slices.indexOf(x.slice);\n const sandboxIndex = sandboxes.indexOf(x.sandbox);\n\n if (sandboxIndex >= 0) {\n if (once && header) console.log(`\\n${header}`); once = false;\n log ? log(x.sandbox) : console.log(`\\tWarning: Found duplicate sandbox ${x.sandbox.identifier}.`);\n } else sandboxes.push(x.sandbox);\n\n if (sliceIndex >= 0) {\n if (once && header) console.log(`\\n${header}`); once = false;\n log ? log(x.slice) : console.log(`\\tWarning: Found duplicate slice ${x.slice.identifier}.`);\n } else {\n slices.push(x.slice);\n if (sandboxIndex < 0) result.push(x);\n }\n });\n return sandboxSliceArray.length === result.length;\n }\n\n /**\n * Attempts to create and start a given number of sandboxes.\n * The sandboxes that are created can then be assigned for a\n * specific job at a later time. All created sandboxes\n * get put into the @this.readiedSandboxes array when allocateLocalSandboxes is false.\n *\n * @param {number} numSandboxes - the number of sandboxes to create\n * @param {boolean} [allocateLocalSandboxes=false] - when true, do not place in this.readiedSandboxes\n * @returns {Promise<Sandbox[]>} - resolves with array of created sandboxes, rejects otherwise\n * @throws when given a numSandboxes is not a number or if numSandboxes is Infinity\n */\n async readySandboxes (numSandboxes, allocateLocalSandboxes = false) {\n debugging('supervisor') && console.debug(`readySandboxes: Readying ${numSandboxes} sandboxes, total sandboxes ${this.sandboxes.length}, matching ${this.matching}, fetching ${this.isFetchingNewWork}`);\n \n if (typeof numSandboxes !== 'number' || Number.isNaN(numSandboxes) || numSandboxes === Infinity) {\n throw new Error(`${numSandboxes} is not a number of sandboxes that can be readied.`);\n }\n if (numSandboxes <= 0) {\n return [];\n }\n\n const sandboxStartPromises = [];\n const sandboxes = [];\n const errors = [];\n for (let i = 0; i < numSandboxes; i++) {\n const sandbox = new Sandbox(this, {\n ...this.options.sandboxOptions,\n });\n sandbox.addListener('ready', () => this.emit('sandboxReady', sandbox));\n sandbox.addListener('start', () => {\n this.emit('sandboxStart', sandbox);\n\n // When sliceNumber == 0, result-submitter status skips the slice,\n // so don't send it in the first place.\n // The 'start' event is fired when a worker starts up, hence there's no way\n // to determine whether sandbox has a valid slice without checking.\n if (sandbox.slice) {\n const jobAddress = sandbox.jobAddress;\n const sliceNumber = sandbox.slice.sliceNumber;\n // !authorizationMessage <==> sliceNumber === 0.\n const authorizationMessage = sandbox.slice.getAuthorizationMessage();\n\n if (authorizationMessage) {\n let statusPayload = {\n worker: this.workerOpaqueId,\n slices: [{\n job: jobAddress,\n sliceNumber: sliceNumber,\n status: 'begin',\n authorizationMessage,\n }],\n }\n \n try /* resultSubmitterConnection can be null if worker is stopped */\n {\n this.resultSubmitterConnection.send('status', statusPayload).catch((error) => {\n debugging('supervisor') && console.error(`Error sending 'status' for slice ${sliceNumber} of job ${jobAddress}:\\n ${error}\\nWill try again on a new connection`);\n this.resultSubmitterMessageQueue.push({ operation: 'status', data: statusPayload });\n this.resultSubmitterConnection.close();\n });\n } catch (error)\n {\n debugging('supervisor') && console.error(`Failed to send 'status' for slice ${sliceNumber} of job ${jobAddress}:, no connection to result submitter:`, error);\n }\n }\n }\n });\n sandbox.addListener('workEmit', ({ eventName, payload }) => {\n // Need to check if the sandbox hasn't been assigned a slice yet.\n if (!sandbox.slice) {\n if (Supervisor.debugBuild) {\n console.error(\n `Sandbox not assigned a slice before sending workEmit message to scheduler. 'workEmit' event originates from \"${eventName}\" event`, \n payload,\n );\n }\n }\n else\n {\n const jobAddress = sandbox.slice.jobAddress;\n const sliceNumber = sandbox.slice.sliceNumber;\n // sliceNumber can be zero if it came from a problem with loading modules.\n assert(jobAddress && (sliceNumber || sliceNumber === 0));\n // Send a work emit message from the sandbox to the event router\n // !authorizationMessage <==> sliceNumber === 0.\n let authorizationMessage;\n try {\n // Sometimes a sliceNumber===0 workEmit comes in before the client bundle is properly loaded.\n // Also happens with minor dcp-client version mismatches.\n authorizationMessage = sandbox.slice.getAuthorizationMessage();\n } catch(e) {\n authorizationMessage = null;\n }\n\n if (!authorizationMessage)\n {\n console.warn(`workEmit: missing authorization message for job ${jobAddress}, slice: ${sliceNumber}`);\n return Promise.resolve();\n }\n \n let workEmitPayload = {\n eventName,\n payload,\n job: jobAddress,\n slice: sliceNumber,\n worker: this.workerOpaqueId,\n authorizationMessage,\n }\n \n const workEmitPromise = this.eventRouterConnection.send('workEmit', workEmitPayload).catch(error => {\n debugging('supervisor') && console.warn(`workEmit: unable to send ${eventName} for slice ${sliceNumber} of job ${jobAddress}: ${error.message}.\\nTrying again on a new connection.`);\n this.eventRouterMessageQueue.push({ operation: 'workEmit', data: workEmitPayload })\n this.eventRouterConnection.close();\n if (Supervisor.debugBuild)\n console.error('workEmit error:', error);\n });\n\n if (Supervisor.debugBuild) {\n workEmitPromise.then(result => {\n if (!result || !result.success)\n console.warn('workEmit: event router did not accept event', result);\n });\n }\n }\n });\n\n // When any sbx completes, \n sandbox.addListener('complete', () => {\n this.watchdog();\n });\n\n sandbox.on('sandboxError', (error) => handleSandboxError(this, sandbox, error));\n \n sandbox.on('rejectedWorkMetrics', (data) =>{\n function updateRejectedMetrics(report) {\n ['total', 'CPU', 'webGL'].forEach((key) => {\n if (report[key]) sandbox.slice.rejectedTimeReport[key] += report[key];\n })\n }\n \n // If the slice already has rejected metrics, add this data to it. If not, assign this data to slices rejected metrics property\n if (sandbox.slice) {\n (sandbox.slice.rejectedTimeReport) ? updateRejectedMetrics(data.timeReport) : sandbox.slice.rejectedTimeReport = data.timeReport;\n }\n })\n \n // If the sandbox terminated and we are not shutting down, then should return all work which is currently\n // not being computed if all sandboxes are dead and the attempt to create a new one fails.\n sandbox.on('terminated',async () => {\n if (this.sandboxes.length > 0) {\n let terminatedSandboxes = this.sandboxes.filter(sbx => sbx.isTerminated);\n if (terminatedSandboxes.length === this.sandboxes.length) {\n debugging('supervisor') && console.debug(`readySandboxes: Create 1 sandbox in the sandbox-terminated-handler, total sandboxes ${this.sandboxes.length}, matching ${this.matching}, fetching ${this.isFetchingNewWork}`);\n await this.readySandboxes(1);\n \n // If we cannot create a new sandbox, that probably means we're on a screensaver worker\n // and the screensaver is down. So return the slices to the scheduler.\n if (this.sandboxes.length !== terminatedSandboxes.length + 1) {\n this.returnSlices(this.queuedSlices).then(() => {\n this.queuedSlices.length = 0;\n });\n }\n }\n }\n })\n\n const delayMs =\n 1000 *\n (tuning.minSandboxStartDelay +\n Math.random() *\n (tuning.maxSandboxStartDelay - tuning.minSandboxStartDelay));\n \n sandboxStartPromises.push(\n sandbox\n .start(delayMs)\n .then(() => {\n if (!allocateLocalSandboxes) this.readiedSandboxes.push(sandbox);\n this.sandboxes.push(sandbox);\n sandboxes.push(sandbox);\n }).catch((err) => {\n errors.push(err);\n this.returnSandbox(sandbox);\n if (err.code === 'ENOWORKER') {\n throw new DCPError(\"Cannot use localExec without dcp-worker installed. Use the command 'npm install dcp-worker' to install the neccessary modules.\", 'ENOWORKER');\n }\n }));\n }\n \n await Promise.all(sandboxStartPromises);\n\n if (errors.length) {\n console.warn(`Failed to ready ${errors.length} of ${numSandboxes} sandboxes.`, errors);\n throw new Error('Failed to ready sandboxes.');\n }\n\n debugging('supervisor') && console.log(`readySandboxes: Readied ${sandboxes.length} sandboxes ${JSON.stringify(sandboxes.map(sandbox => sandbox.id))}`);\n \n return sandboxes;\n }\n\n /**\n * Accepts a sandbox after it has finished working or encounters an error.\n * If the sandbox was terminated or if \"!slice || slice.failed\" then\n * the sandbox will be removed from the sandboxes array and terminated if necessary.\n * Otherwise it will try to distribute a slice to the sandbox immediately.\n *\n * @param {Sandbox} sandbox - the sandbox to return\n * @param {Slice} [slice] - the slice just worked on; !slice => terminate\n * @param {boolean} [verifySandboxIsNotTerminated=true] - if true, check sandbox is not already terminated\n */\n returnSandbox (sandbox, slice, verifySandboxIsNotTerminated=true) {\n if (!slice || slice.failed || sandbox.isTerminated) {\n \n this.removeSandbox(sandbox);\n \n if (!sandbox.isTerminated) {\n debugging('supervisor') && console.log(`Supervisor.returnSandbox: Terminating ${sandbox.identifier}${slice ? `~${slice.identifier}` : ''}, # of sandboxes ${this.sandboxes.length}`);\n sandbox.terminate(false);\n } else {\n debugging('supervisor') && console.log(`Supervisor.returnSandbox: Already terminated ${sandbox.identifier}${slice ? `~${slice.identifier}` : ''}, # of sandboxes ${this.sandboxes.length}`);\n // XXXpfr: April 13, 2022\n // I'm trying to understand and control when sandboxes get terminated.\n // Because of races and random worker crashes, it is impossible to not try to terminate a sandbox more than once.\n // But at some places where returnSandbox is we shouldn't see this behavior, hence this exception.\n // If I don't fix this is the next 30 days or I forget, please delete this exception.\n if (false)\n {}\n }\n }\n }\n\n /**\n * Terminates sandboxes, in order of creation, when the total started sandboxes exceeds the total allowed sandboxes.\n *\n * @returns {Promise<void>}\n */\n pruneSandboxes () {\n let numOver = this.sandboxes.length - (dcpConfig.worker.maxAllowedSandboxes + this.maxWorkingSandboxes);\n if (numOver <= 0) return;\n \n // Don't kill readied sandboxes while creating readied sandboxes.\n for (let index = 0; index < this.readiedSandboxes.length; ) {\n const sandbox = this.readiedSandboxes[index];\n // If the sandbox is allocated, advance to the next one in the list.\n if (sandbox.allocated) {\n index++;\n continue;\n }\n // Otherwise, remove this sandbox but look at the same array index in the next loop.\n debugging('supervisor') && console.log(`pruneSandboxes: Terminating readied sandbox ${sandbox.identifier}`);\n this.readiedSandboxes.splice(index, 1);\n this.returnSandbox(sandbox);\n\n if (--numOver <= 0) break;\n }\n\n if (numOver <= 0) return;\n for (let index = 0; index < this.assignedSandboxes.length; ) {\n const sandbox = this.assignedSandboxes[index];\n // If the sandbox is allocated, advance to the next one in the list.\n if (sandbox.allocated) {\n index++;\n continue;\n }\n // Otherwise, remove this sandbox but look at the same array index in the next loop.\n debugging('supervisor') && console.log(`pruneSandboxes: Terminating assigned sandbox ${sandbox.identifier}`);\n this.assignedSandboxes.splice(index, 1);\n this.returnSandbox(sandbox);\n\n if (--numOver <= 0) break;\n }\n }\n \n /**\n * Basic watch dog to check if there are idle sandboxes and\n * attempts to nudge the supervisor to feed them work.\n *\n * Run in an interval created in @constructor .\n * @returns {Promise<void>}\n */\n async watchdog () {\n if (!this.watchdogState)\n this.watchdogState = {};\n\n // Every 5 minutes, report progress of all working slices to the scheduler\n if (Date.now() > ((this.lastProgressReport || 0) + sandboxTuning.progressReportInterval)) {\n // console.log('454: Assembling progress update...');\n this.lastProgressReport = Date.now();\n\n //\n // Note: this.slices is the disjoint union of:\n // this.allocatedSlices, \n // this.queuedSlices, \n // this.slices.filter(slice => !slice.isUnassigned) .\n // When a slice is not in these 3 arrays, the slice is lost.\n //\n \n const currentLostSlices = this.slices.filter(slice => slice.isUnassigned \n && this.queuedSlices.indexOf(slice) === -1\n && this.allocatedSlices.indexOf(slice) === -1);\n\n if (currentLostSlices.length > 0) {\n this.lostSlices.push(...currentLostSlices);\n // Try to recover.\n // Needs more work and testing.\n // Test when we can come up with a decent lost slice repro case.\n // --> this.queuedSlices.push(...currentLostSlices);\n }\n\n if (this.lostSlices.length > 0) {\n if (true) { // Keep this on for awhile, until we know lost slices aren't happening.\n console.warn('Supervisor.watchdog: Found lost slices!');\n for (const slice of this.lostSlices)\n console.warn('\\t', slice.identifier);\n }\n this.lostSlices = this.lostSlices.filter(slice => slice.isUnassigned);\n }\n\n const slices = [];\n this.queuedSlices.forEach(slice => {\n assert(slice && slice.sliceNumber > 0);\n addToSlicePayload(slices, slice, sliceStatus.scheduled);\n });\n\n this.allocatedSlices.forEach(slice => {\n assert(slice && slice.sliceNumber > 0);\n addToSlicePayload(slices, slice, 'progress'); // Beacon.\n });\n\n if (slices.length) {\n // console.log('471: sending progress update...');\n const progressReportPayload = {\n worker: this.workerOpaqueId,\n slices,\n };\n\n this.resultSubmitterConnection.send('status', progressReportPayload)\n .catch(error => {\n debugging('supervisor') && console.error('479: Failed to send status update:', error/*.message*/);\n this.resultSubmitterMessageQueue.push({ operation: 'status', data: progressReportPayload })\n this.resultSubmitterConnection.close();\n });\n }\n }\n\n if (this.worker.working) {\n if (this.unallocatedSpace > 0) {\n await this.work().catch(err => {\n if (!this.watchdogState[err.code || '0'])\n this.watchdogState[err.code || '0'] = 0;\n if (Date.now() - this.watchdogState[err.code || '0'] > ((dcpConfig.worker.watchdogLogInterval * timeDilation || 120) * 1000))\n console.error('301: Failed to start work:', err);\n this.watchdogState[err.code || '0'] = Date.now();\n });\n }\n\n this.pruneSandboxes();\n }\n }\n\n /**\n * Gets the logical and physical number of cores and also\n * the total number of sandboxes the worker is allowed to run\n *\n */\n getStatisticsCPU() {\n if (DCP_ENV.isBrowserPlatform) {\n return {\n worker: this.workerOpaqueId,\n lCores: window.navigator.hardwareConcurrency,\n pCores: dcpConfig.worker.pCores || window.navigator.hardwareConcurrency,\n sandbox: this.maxWorkingSandboxes\n }\n }\n\n return {\n worker: this.workerOpaqueId,\n lCores: requireNative('os').cpus().length,\n pCores: requireNative('physical-cpu-count'),\n sandbox: this.maxWorkingSandboxes\n }\n }\n\n /**\n * Returns the number of unallocated sandbox slots to send to fetchTask.\n *\n * @returns {number}\n */\n numberOfAvailableSandboxSlots() {\n let numCores;\n if (this.options.priorityOnly && this.options.jobAddresses.length === 0) {\n numCores = 0;\n } else if (this.queuedSlices.length > 1) {\n // We have slices queued, no need to fetch\n numCores = 0;\n } else {\n // The queue is almost empty (there may be 0 or 1 element), fetch a full task.\n // The task is full, in the sense that it will contain slices whose\n // aggregate execution time is this.maxWorkingSandboxes * 5-minutes.\n // However, there can only be this.unallocatedSpace # of long slices.\n // Thus we need to know whether the last slice in this.queuedSlices is long or not.\n // (A long slice has estimated execution time >= 5-minutes.)\n const longSliceCount = (this.queuedSlices.length > 0 && this.queuedSlices[0].isLongSlice) ? 1 : 0;\n numCores = this.unallocatedSpace - longSliceCount;\n }\n return numCores;\n }\n\n /**\n * Call to start doing work on the network.\n * This is the one place where requests to fetch new slices are made.\n * After the initial slices are fetched it calls this.distributeQueuedSlices.\n *\n * @returns {Promise<void>}, unallocatedSpace ${this.unallocatedSpace}\n */\n async work()\n {\n // When inside matchSlicesWithSandboxes, don't reenter Supervisor.work to fetch new work or create new sandboxes.\n if (this.matching) {\n // Interesting and noisy.\n // debugging('supervisor') && console.log(`Supervisor.work: Do not interleave work, fetch or matching slices with sandboxes: queuedSlices ${this.queuedSlices.length}, unallocatedSpace ${this.unallocatedSpace}, matching ${this.matching}, fetching ${this.isFetchingNewWork}`);\n return Promise.resolve();\n }\n\n await this.setDefaultIdentityKeystore();\n\n // Instantiate connections that don't exist.\n this.instantiateAllConnections();\n\n const numCores = this.numberOfAvailableSandboxSlots();\n\n debugging() && console.log(`Supervisor.work: Try to get ${numCores} slices in working sandboxes, unallocatedSpace ${this.unallocatedSpace}, queued slices ${this.queuedSlices.length}, # of sandboxes ${this.sandboxes.length}, matching ${this.matching}, fetching: ${this.isFetchingNewWork}`);\n \n // Fetch a new task if we have no more slices queued, then start workers\n try {\n if (numCores > 0 && !this.isFetchingNewWork) {\n this.isFetchingNewWork = true;\n\n /**\n * This will only ready sandboxes up to a total count of\n * maxWorkingSandboxes (in any state). It is not possible to know the\n * actual number of sandboxes required until we have the slices because we\n * may have sandboxes assigned for the slice's job already.\n *\n * If the evaluator cannot start (ie. if the evalServer is not running),\n * then the while loop will keep retrying until the evalServer comes online\n */\n if (this.maxWorkingSandboxes > this.sandboxes.length) {\n // Note: The old technique had \n // while (this.maxWorkingSandboxes > this.sandboxes.length) {....\n // and sometimes we'd get far too many sandboxes, because it would keep looping while waiting for\n // this.readySandboxes(this.maxWorkingSandboxes - this.sandboxes.length);\n // to construct the rest of the sandboxes. The fix is to only loop when the 1st \n // await this.readySandboxes(1) \n // is failing.\n let needFirstSandbox = true;\n while (needFirstSandbox) {\n debugging('supervisor') && console.log(`Supervisor.work: ready 1 sandbox, # of sandboxes ${this.sandboxes.length}, matching ${this.matching}, fetching ${this.isFetchingNewWork}`);\n await this.readySandboxes(1)\n .then(() => {\n debugging('supervisor') && console.log(`Supervisor.work: ready ${this.maxWorkingSandboxes - this.sandboxes.length} sandbox(es), # of sandboxes ${this.sandboxes.length}, matching ${this.matching}, fetching ${this.isFetchingNewWork}`);\n this.readySandboxes(this.maxWorkingSandboxes - this.sandboxes.length);\n needFirstSandbox = false;\n }).catch(error => {\n console.warn('906: failed to ready sandboxes; will retry', error.code, error.message);\n });\n }\n }\n\n /**\n * Temporary change: Assign the capabilities of one of readied sandboxes\n * before fetching slices from the scheduler.\n *\n * TODO: Remove this once fetchTask uses the capabilities of every\n * sandbox to fetch slices.\n */\n if (!this.capabilities) {\n this.capabilities = this.sandboxes[0].capabilities;\n this.emit('capabilitiesCalculated', this.capabilities);\n }\n\n if (DCP_ENV.isBrowserPlatform && this.capabilities.browser)\n this.capabilities.browser.chrome = DCP_ENV.isBrowserChrome;\n\n const fetchTimeout = setTimeout(() => {\n console.warn(`679: Fetch exceeded timeout, will reconnect at next watchdog interval`);\n \n this.taskDistributorConnection.close('Fetch timed out', Math.random() > 0.5).catch(error => {\n console.error(`931: Failed to close task-distributor connection`, error);\n });\n this.resultSubmitterConnection.close('Fetch timed out', Math.random() > 0.5).catch(error => {\n console.error(`920: Failed to close result-submitter connection`, error);\n });\n this.isFetchingNewWork = false;\n this.instantiateAllConnections();\n }, 3 * 60 * 1000); // max out at 3 minutes to fetch\n\n // ensure result submitter and task distributor connections before fetching tasks\n try\n {\n await this.resultSubmitterConnection.keepalive();\n await this.taskDistributorConnection.keepalive();\n }\n catch (e)\n {\n console.error('Failed to connect to result submitter, refusing to fetch slices. Will try again at next fetch cycle.')\n debugging('supervisor') && console.log(`Error: ${e}`);\n this.isFetchingNewWork = false; // <-- done in the `finally` block, below\n clearTimeout(fetchTimeout);\n this.taskDistributorConnection.close('Failed to connect to result-submitter', true).catch(error => {\n console.error(`939: Failed to close task-distributor connection`, error);\n });\n this.resultSubmitterConnection.close('Failed to connect to result-submitter', true).catch(error => {\n console.error(`942: Failed to close result-submitter connection`, error);\n });\n return Promise.resolve();\n }\n await this.fetchTask(numCores).finally(() => {\n clearTimeout(fetchTimeout);\n this.isFetchingNewWork = false;\n });\n }\n\n this.distributeQueuedSlices().then(() => debugging('supervisor') && 'supervisor: finished distributeQueuedSlices()').catch((e) => {\n // We should never get here, because distributeQueuedSlices was changed\n // to try to catch everything and return slices and sandboxes.\n // If we do catch here it may mean a slice was lost. \n console.error('Supervisor.work catch handler for distributeQueuedSlices.', e);\n });\n // No catch(), because it will bubble outward to the caller\n } finally {\n }\n }\n\n /**\n * Generate the workerComputeGroups property of the requestTask message. \n * \n * Concatenate the compute groups object from dcpConfig with the list of compute groups\n * from the supervisor, and remove the public group if accidentally present. Finally,\n * we transform joinSecrets/joinHash into joinHashHash for secure transmission.\n *\n * @note computeGroup objects with joinSecrets are mutated to record their hashes. This\n * affects the supervisor options and dcpConfig. Re-adding a joinSecret property\n * to one of these will cause the hash to be recomputed.\n */\n generateWorkerComputeGroups()\n {\n var computeGroups = Object.values(dcpConfig.worker.computeGroups || {});\n if (this.options.computeGroups)\n computeGroups = computeGroups.concat(this.options.computeGroups);\n computeGroups = computeGroups.filter(group => group.id !== constants.computeGroups.public.id);\n const hashedComputeGroups = [];\n for (const group of computeGroups)\n {\n const groupCopy = Object.assign({}, group);\n if ((group.joinSecret || group.joinHash) && (!group.joinHashHash || this.lastDcpsid !== this.taskDistributorConnection.dcpsid))\n {\n let joinHash;\n if (group.joinHash) {\n joinHash = group.joinHash.replace(/\\s+/g, ''); // strip whitespace\n } else {\n joinHash = calculateJoinHash(groupCopy);\n } \n\n groupCopy.joinHashHash = hash.calculate(hash.eh1, joinHash, this.taskDistributorConnection.dcpsid);\n delete groupCopy.joinSecret;\n delete groupCopy.joinHash;\n debugging('computeGroups') && console.debug(`Calculated joinHash=${joinHash} for`, groupCopy);\n }\n hashedComputeGroups.push(groupCopy);\n }\n this.lastDcpsid = this.taskDistributorConnection.dcpsid;\n debugging('computeGroups') && console.debug('Requesting ', computeGroups.length, 'non-public groups for session', this.lastDcpsid);\n return hashedComputeGroups;\n }\n\n /**\n * Remove all unreferenced jobs in `this.cache`.\n *\n * @param {any[]} newJobs - Jobs that should not be removed from\n * `this.cache`.\n */\n cleanJobCache(newJobs = []) {\n /* Delete all jobs in the supervisorCache that are not represented in this newJobs,\n * or in this.queuedSlices, or there is no sandbox assigned to these jobs.\n * Note: There can easily be 200+ places to check; using a lookup structure to maintain O(n).\n */\n if (this.cache.jobs.length > 0) {\n const jobAddressMap = {};\n Object.keys(newJobs).forEach(jobAddress => { jobAddressMap[jobAddress] = 1; });\n this.slices.forEach(slice => { if (!jobAddressMap[slice.jobAddress]) jobAddressMap[slice.jobAddress] = 1; });\n this.cache.jobs.forEach(jobAddress => {\n if (!jobAddressMap[jobAddress]) {\n this.cache.remove('job', jobAddress);\n // Remove and return the corresponding sandboxes from this.sandboxes.\n const deadSandboxes = this.sandboxes.filter(sb => sb.jobAddress === jobAddress);\n if (deadSandboxes.length > 0) {\n deadSandboxes.forEach(sandbox => { this.returnSandbox(sandbox); });\n debugging('supervisor') && console.log(`Supervisor.fetchTask: Deleting job ${jobAddress} from cache and assigned sandboxes ${deadSandboxes.map(s => s.id)}, # of sandboxes ${this.sandboxes.length}.`);\n }\n }\n });\n }\n }\n\n /**\n * Fetches a task, which contains job information and slices for sandboxes and\n * manages events related to fetching tasks so the UI can more clearly display\n * to user what is actually happening.\n * @param {number} numCores\n * @returns {Promise<void>} The requestTask request, resolve on success, rejects otherwise.\n * @emits Supervisor#fetchingTask\n * @emits Supervisor#fetchedTask\n */\n async fetchTask(numCores) {\n\n // Don't reenter\n if (this.matching || numCores <= 0) {\n // Interesting and noisy.\n debugging('supervisor') && console.log(`Supervisor.fetchTask: Do not nest work, fetch or matching slices with sandboxes: queuedSlices ${this.queuedSlices.length}, unallocatedSpace ${this.unallocatedSpace}, matching ${this.matching}, fetching ${this.isFetchingNewWork}, numCores ${numCores}`);\n return Promise.resolve();\n }\n\n //\n // Oversubscription mitigation.\n // Update when there are less available sandbox slots than numCores.\n const checkNumCores = this.numberOfAvailableSandboxSlots();\n if (numCores > checkNumCores) numCores = checkNumCores;\n if (numCores <= 0) return Promise.resolve();\n\n this.emit('fetchingTask');\n debugging('supervisor') && console.debug('supervisor: fetching task');\n const requestPayload = {\n numCores,\n coreStats: this.getStatisticsCPU(),\n numGPUs: this.defaultMaxGPUs,\n capabilities: this.capabilities,\n paymentAddress: this.paymentAddress,\n jobAddresses: this.options.jobAddresses || [], // force array; when set, only fetches slices for these jobs\n localExec: this.options.localExec,\n workerComputeGroups: this.generateWorkerComputeGroups(),\n minimumWage: dcpConfig.worker.minimumWage || this.options.minimumWage,\n readyJobs: [ /* list of jobs addresses XXXwg */ ],\n previouslyWorkedJobs: this.ringBufferofJobs.buf, //Only discrete jobs\n rejectedJobs: this.rejectedJobs,\n };\n // workers should be part of the public compute group by default\n if (!booley(dcpConfig.worker.leavePublicGroup) && !booley(this.options.leavePublicGroup) && (!requestPayload.localExec))\n requestPayload.workerComputeGroups.push(constants.computeGroups.public);\n debugging('computeGroups') && console.log(`Fetching work for ${requestPayload.workerComputeGroups.length} ComputeGroups: `, requestPayload.workerComputeGroups);\n debugging('supervisor') && console.log(`fetchTask wants ${numCores} slice(s), unallocatedSpace ${this.unallocatedSpace}, queuedSlices ${this.queuedSlices.length}`);\n try {\n debugging('requestTask') && console.debug('fetchTask: requestPayload', requestPayload);\n\n let result = await this.taskDistributorConnection.send('requestTask', requestPayload).catch((error) => {\n debugging('supervisor') && console.error(`Unable to request task from scheduler: ${error}. Will try again on a new connection.`);\n this.taskDistributorConnection.close(error, true);\n throw error; /* caught below */\n });\n let responsePayload = result.payload; \n\n if (!result.success) {\n debugging() && console.log('Task fetch failure; request=', requestPayload);\n debugging() && console.log('Task fetch failure; response=', result.payload);\n throw new DCPError('Unable to fetch task for worker', responsePayload);\n }\n\n const sliceCount = responsePayload.body.task.length || 0;\n\n /**\n * The fetchedTask event fires when the supervisor has finished trying to\n * fetch work from the scheduler (task-manager). The data emitted is the\n * number of new slices to work on in the fetched task.\n *\n * @event Supervisor#fetchedTask\n * @type {number}\n */\n this.emit('fetchedTask', sliceCount);\n\n if (sliceCount < 1) {\n return Promise.resolve();\n }\n\n /**\n * DCP-1698 Send auth msg with tasks to worker, then validate authority of worker to send slice info back to scheduler.\n * payload structure: { owner: this.address, signature: signature, auth: messageLightWeight, body: messageBody };\n * messageLightWeight: { workerId: worker, jobSlices, schedulerId, jobCommissions }\n * messageBody: { newJobs: await getNewJobsForTask(dbScheduler, task, request), task }\n */\n const { body, ...authorizationMessage } = responsePayload;\n const { newJobs, task } = body;\n assert(newJobs); // It should not be possible to have !newJobs -- we throw on !success.\n \n /*\n * Ensure all jobs received from the scheduler are:\n * 1. If we have specified specific jobs the worker may work on, the received jobs are in the specified job list\n * 2. If we are in localExec, at most 1 unique job type was received (since localExec workers are designated for only\n * one job)\n * If the received jobs are not within these parameters, stop the worker since the scheduler cannot be trusted at that point.\n */\n if ((this.options.jobAddresses.length && !Object.keys(newJobs).every((ele) => this.options.jobAddresses.includes(ele)))\n || (this.options.localExec && Object.keys(newJobs).length > 1))\n {\n console.error(\"Worker received slices it shouldn't have. Rejecting the work and stopping.\");\n process.exit(1);\n }\n\n debugging() && console.log(`Supervisor.fetchTask: task: ${task.length}/${numCores}, jobs: ${Object.keys(newJobs).length}, authSlices: ${compressJobMap(authorizationMessage.auth.authSlices, true /* skipFirst*/, this.addressTruncationLength /* digits*/)}`);\n // Delete all jobs in the supervisorCache that are not represented in this task,\n // or in this.queuedSlices, or there is no sandbox assigned to these jobs.\n this.cleanJobCache(newJobs);\n\n for (const jobAddress of Object.keys(newJobs))\n if (!this.cache.cache.job[jobAddress])\n this.cache.store('job', jobAddress, newJobs[jobAddress]);\n\n // Memoize authMessage onto the Slice object, this should\n // follow it for its entire life in the worker.\n const tmpQueuedSlices = task.map(taskElement => new Slice(taskElement, authorizationMessage));\n\n // Make sure old stuff is up front.\n // matchSlicesWithSandboxes dequeues this.queuedSlices as follows:\n // slicesToMatch = this.queuedSlices.slice(0, numCores);\n this.slices.push(...tmpQueuedSlices);\n this.queuedSlices.push(...tmpQueuedSlices);\n \n // Populating the ring buffer based on job's discrete property \n Object.values(newJobs).forEach(job => {\n if(job.requirements.discrete && this.ringBufferofJobs.find(element => element === job.address) === undefined) {\n this.ringBufferofJobs.push(job.address);\n }\n });\n \n } catch (error) {\n this.emit('fetchTaskFailed', error);\n debugging('supervisor') && console.debug(`Supervisor.fetchTask failed!: error: ${error}`);\n }\n }\n\n /**\n * For each slice in this.queuedSlices, match with a sandbox in the following order:\n * 1. Try to find an already assigned sandbox in this.assignedSandboxes for the slice's job.\n * 2. Find a ready sandbox in this.readiedSandboxes that is unassigned.\n * 3. Ready a new sandbox and use that.\n *\n * Take great care in assuring sandboxes and slices are uniquely associated, viz.,\n * a given slice cannot be associated with multiple sandboxes and a given sandbox cannot be associated with multiple slices.\n * The lack of such uniqueness has been the root cause of several difficult bugs.\n *\n * Note: When a sandbox is paired with a slice, execution is pending and sandbox.allocated=true and\n * sandbox.slice=slice and sandbox.jobAddress=slice.jobAddress. This is what 'allocated' means.\n * Immediately upon the exit of sandbox.work, sandbox.allocated=false is set and if an exception\n * wasn't thrown, the paired slice is placed in this.assignedSandboxes.\n * Thus from the pov of supervisor, this.allocatedSandboxes is deterministic and this.workingSandboxes is not.\n * Please try to not use this.workingSandboxes. It is deprecated.\n *\n * The input is numCores, this,queuedSlices, this.assignedSandboxes and this.readiedSandboxes.\n * If there are not enough sandboxes, new readied sandboxes will be created using\n * await this.readySandboxes(...)\n * And it is this await boundary that has caused many bugs.\n * We try not to make assumptions about non-local state across the await boundary.\n *\n * @param {number} numCores - The number of available sandbox slots.\n * @param {boolean} [throwExceptions=true] - Whether to throw exceptions when checking for sanity.\n * @returns {Promise<SandboxSlice[]>} Returns SandboxSlice[], may have length zero.\n */\n async matchSlicesWithSandboxes (numCores, throwExceptions = true) {\n\n const sandboxSlices = [];\n if (this.queuedSlices.length === 0 || this.matching || numCores <= 0) {\n // Interesting and noisy.\n // debugging('supervisor') && console.log(`Supervisor.matchSlicesWithSandboxes: Do not nest work, fetch or matching slices with sandboxes: queuedSlices ${this.queuedSlices.length}, unallocatedSpace ${this.unallocatedSpace}, matching ${this.matching}, fetching ${this.isFetchingNewWork}, numCores ${numCores}`);\n return sandboxSlices;\n }\n\n //\n // Oversubscription mitigation.\n // Update when there are less available sandbox slots than numCores.\n // We cannot use this.unallocatedSpace here because its value is artificially low or zero, because in\n // this.distributedQueuedSlices we use the pseudo-mutex trick: this.acquire(howManySandboxSlotsToReserve)/this.release().\n // Note: Do not use this.numberOfCoresReserved outside of a function locked with this.acquire(howManySandboxSlotsToReserve) .\n const checkNumCores = this.numberOfCoresReserved; // # of locked sandbox slots.\n if (numCores > checkNumCores) numCores = checkNumCores;\n if (numCores <= 0) return sandboxSlices;\n\n // Don't ask for more than we have.\n if (numCores > this.queuedSlices.length)\n numCores = this.queuedSlices.length;\n\n debugging('supervisor') && console.log(`matchSlicesWithSandboxes: numCores ${numCores}, queued slices ${this.queuedSlices.length}: assigned ${this.assignedSandboxes.length}, readied ${this.readiedSandboxes.length}, unallocated ${this.unallocatedSpace}, # of sandboxes: ${this.sandboxes.length}`);\n\n if (debugging('supervisor')) {\n dumpSlicesIfNotUnique(this.queuedSlices, 'Warning: this.queuedSlices slices are not unique -- this is ok when slice is rescheduled.');\n dumpSandboxesIfNotUnique(this.readiedSandboxes, 'Warning: this.readiedSandboxes sandboxes are not unique!');\n dumpSandboxesIfNotUnique(this.assignedSandboxes, 'Warning: this.assignedSandboxes sandboxes are not unique!');\n }\n\n // Three functions to validate slice and sandbox.\n function checkSlice(slice, checkAllocated=true) {\n if (!slice.isUnassigned) throw new DCPError(`Slice must be unassigned: ${slice.identifier}`);\n if (checkAllocated && slice.allocated) throw new DCPError(`Slice must not already be allocated: ${slice.identifier}`);\n }\n function checkSandbox(sandbox, isAssigned) {\n if (sandbox.allocated) throw new DCPError(`Assigned sandbox must not be already allocated: ${sandbox.identifier}`);\n if (isAssigned && !sandbox.isAssigned) throw new DCPError(`Assigned sandbox is not marked as assigned: ${sandbox.identifier}`);\n if (!isAssigned && !sandbox.isReadyForAssign) throw new DCPError(`Readied sandbox is not marked as ready for assign: ${sandbox.identifier}`);\n }\n\n // Sanity checks.\n if (throwExceptions) {\n this.assignedSandboxes.forEach(sandbox => { checkSandbox(sandbox, true /* isAssigned*/); });\n this.readiedSandboxes.forEach(sandbox => { checkSandbox(sandbox, false /* isAssigned*/); });\n this.queuedSlices.forEach(slice => { checkSlice(slice); });\n } else {\n this.assignedSandboxes = this.assignedSandboxes.filter(sandbox => !sandbox.allocated && sandbox.isAssigned);\n this.readiedSandboxes = this.readiedSandboxes.filter(sandbox => !sandbox.allocated && sandbox.isReadyForAssign);\n this.queuedSlices = this.queuedSlices.filter(slice => !slice.allocated && slice.isUnassigned);\n }\n\n const sandboxKind = {\n assigned: 0,\n ready: 1,\n new: 2,\n };\n\n const ceci = this;\n /**\n * Auxiliary function to pair a sandbox with a slice and mark the sandbox as allocated.\n * An allocated sandbox is reserved and will not be released until the slice completes execution on the sandbox.\n *\n * @param {Sandbox} sandbox\n * @param {Slice} slice\n * @param {number} kind\n */\n function pair(sandbox, slice, kind) {\n checkSandbox(sandbox, kind === sandboxKind.assigned);\n checkSlice(slice, kind === sandboxKind.assigned);\n slice.allocated = true;\n sandbox.allocated = true;\n sandbox.jobAddress = slice.jobAddress; // So we can know which jobs to not delete from this.cache .\n sandbox.slice = slice;\n sandboxSlices.push({ sandbox, slice });\n if (Supervisor.sliceTiming) slice['pairingDelta'] = Date.now();\n if (debugging('supervisor')) {\n let fragment = 'New readied';\n if (kind === sandboxKind.assigned) fragment = 'Assigned';\n else if (kind === sandboxKind.ready) fragment = 'Readied';\n console.log(`matchSlicesWithSandboxes.pair: ${fragment} sandbox matched ${ceci.dumpSandboxAndSlice(sandbox, slice)}`);\n }\n }\n\n // These three arrays are used to track/store slices and sandboxes,\n // so that when an exception occurs, the following arrays are restored:\n // this.queuedSlices, this.assignedSandboxes, this.realizedSandboxes.\n let slicesToMatch = [];\n let trackAssignedSandboxes = [];\n let trackReadiedSandboxes = [];\n try\n {\n this.matching = true;\n\n let assignedCounter = 0; // How many assigned sandboxes are being used.\n let readyCounter = 0; // How many sandboxes used from the existing this.readiedSandboxes.\n let newCounter = 0; // How many sandboxes that needed to be newly created.\n\n //\n // The Ideas:\n // 1) We match each slice with a sandbox. First we match with assigned sandboxes in the order\n // that they appear in this.queuedSlices. Then we match in-order with existing this.readiedSandboxes\n // Then we match in-order with new new readied sandboxes created through\n // await this.readySandboxes(newCounter, true /* allocateLocalSandboxes*/);\n // This allows us to try different orderings of execution of slices. E.g. Wes suggested\n // trying to execute slices from different jobs with maximal job diversity -- specifically\n // if there are 3 jobs j1,j2,j3, with slices s11, s12 from j1, s21, s22, s23 from j2 and\n // s31, s32 from j3, then we try to schedule, in order s11, s21, s31, s12, s22, s32, s23.\n //\n // 2) Before matching slices with sandboxes, we allocate available assigned and readied sandboxes\n // and if more are needed then we create and allocate new ones.\n //\n // 3) Finally we match slices with sandboxes and return an array of sandboxSlice pairs.\n //\n // Note: The ordering of sandboxSlices only partially corresponds to the order of this.queuedSlices.\n // It's easy to do. When pairing with assigned sandboxes, any slice in this.queuedSlices which doesn't\n // have an assigned sandbox, will add null to the sandboxSlices array. Then when pairing with readied sandboxes,\n // we fill-in the null entries in the sandboxSlices array.\n //\n /** XXXpfr @todo When it is needed, fix the ordering as described above. */\n\n // Get the slices that are being matched.\n slicesToMatch = this.queuedSlices.slice(0, numCores);\n this.queuedSlices = this.queuedSlices.slice(numCores);\n\n debugging('supervisor') && console.log(`matchSlicesWithSandboxes: slicesToMatch ${this.dumpSlices(slicesToMatch)}`);\n\n // Create object map: jobAddress -> sandboxes with sandboxes.jobAddress === jobAddress .\n const jobSandboxMap = toJobMap(this.assignedSandboxes, sandbox => sandbox);\n \n // Create array to hold slices which do not have assigned sandboxes.\n // These slices will need to be paired with existing and possibly new readied sandboxes.\n // Specifically, the sandboxes from existing this.readiedSandboxes and new sandboxes\n // created through await this.readySandboxes(newCounter, true /* allocateLocalSandboxes*/);\n const slicesThatNeedSandboxes = [];\n\n // Pair assigned sandboxes with slices.\n for (const slice of slicesToMatch) {\n const assigned = jobSandboxMap[slice.jobAddress];\n if (assigned && assigned.length > 0) {\n // Pair.\n const sandbox = assigned.pop();\n pair(sandbox, slice, sandboxKind.assigned);\n this.removeElement(this.assignedSandboxes, sandbox);\n // Track.\n trackAssignedSandboxes.push(sandbox);\n assignedCounter++;\n } else {\n // Don't lose track of these slices.\n slice.allocated = true;\n slicesThatNeedSandboxes.push(slice);\n }\n }\n\n // Pair readied sandboxes with slices.\n readyCounter = Math.min(slicesThatNeedSandboxes.length, this.readiedSandboxes.length);\n newCounter = slicesThatNeedSandboxes.length - readyCounter;\n // Track.\n trackReadiedSandboxes = this.readiedSandboxes.slice(0, readyCounter);\n this.readiedSandboxes = this.readiedSandboxes.slice(readyCounter);\n for (const sandbox of trackReadiedSandboxes) {\n // Pair.\n const slice = slicesThatNeedSandboxes.pop();\n pair(sandbox, slice, sandboxKind.ready);\n }\n \n debugging('supervisor') && console.log(`matchSlicesWithSandboxes: assignedCounter ${assignedCounter}, readyCounter ${readyCounter}, newCounter ${newCounter}, numCores ${numCores}`)\n\n // Validate algorithm consistency.\n if (Supervisor.debugBuild && assignedCounter + readyCounter + newCounter !== numCores) {\n // Structured assert.\n throw new DCPError(`matchSlicesWithSandboxes: Algorithm is corrupt ${assignedCounter} + ${readyCounter} + ${newCounter} !== ${numCores}`);\n }\n\n // Here is an await boundary.\n // Accessing non-local data across an await boundary may result in the unexpected.\n\n // Create new readied sandboxes to associate with slicesThatNeedSandboxes.\n if (newCounter > 0) {\n // When allocateLocalSandboxes is true, this.readySandboxes does not place the new sandboxes\n // on this.readiedSandboxes. Hence the new sandboxes are private and nobody else can see them.\n debugging('supervisor') && console.log(`matchSlicesWithSandboxes: creating ${newCounter} new sandboxes, # of sandboxes ${this.sandboxes.length}`);\n const readied = await this.readySandboxes(newCounter, true /* allocateLocalSandboxes*/);\n // Track.\n trackReadiedSandboxes.push(...readied);\n\n for (const sandbox of readied) {\n assert(slicesThatNeedSandboxes.length > 0);\n // Pair\n const slice = slicesThatNeedSandboxes.pop();\n pair(sandbox, slice, sandboxKind.new);\n }\n \n // Put back any extras. There should not be any unless readySandboxes returned less than asked for.\n if (slicesThatNeedSandboxes.length > 0) {\n slicesThatNeedSandboxes.forEach(slice => {\n slice.allocated = false;\n this.queuedSlices.push(slice);\n });\n }\n }\n\n if ( false || debugging()) {\n console.log(`matchSlicesWithSandboxes: Matches: ${ this.dumpSandboxSlices(sandboxSlices) }`);\n this.dumpSandboxSlicesIfNotUnique(sandboxSlices, 'Warning: sandboxSlices; { sandbox, slice } pairs are not unique!');\n }\n } catch (e) {\n // Clear allocations.\n slicesToMatch.forEach(slice => { slice.allocated = false; });\n trackAssignedSandboxes.forEach(sandbox => { sandbox.allocated = false; sandbox.slice = null; });\n trackReadiedSandboxes.forEach(sandbox => { sandbox.allocated = false; sandbox.slice = null; sandbox.jobAddress = null; });\n \n // Filter out redundancies -- there shouldn't be any...\n slicesToMatch = slicesToMatch.filter(slice => this.queuedSlices.indexOf(slice) === -1);\n trackAssignedSandboxes = trackAssignedSandboxes.filter(sb => this.assignedSandboxes.indexOf(sb) === -1);\n trackReadiedSandboxes = trackReadiedSandboxes.filter(sb => this.readiedSandboxes.indexOf(sb) === -1);\n\n // Sanity checks.\n slicesToMatch.forEach(slice => { checkSlice(slice) });\n trackAssignedSandboxes.forEach(sandbox => { checkSandbox(sandbox, true /* isAssigned*/); });\n trackReadiedSandboxes.forEach(sandbox => { checkSandbox(sandbox, false /* isAssigned*/); });\n\n // Restore arrays.\n this.queuedSlices.push(...slicesToMatch);\n this.assignedSandboxes.push(...trackAssignedSandboxes);\n this.readiedSandboxes.push(...trackReadiedSandboxes);\n \n console.error('Error in matchSlicesWithSandboxes: Attempting to recover slices and sandboxes.', e);\n return [];\n } finally {\n this.matching = false;\n }\n\n debugging('supervisor') && console.log(`matchSlicesWithSandboxes: allocated ${sandboxSlices.length} sandboxes, queuedSlices ${this.queuedSlices.length}, unallocatedSpace ${this.unallocatedSpace}, matching ${this.matching}, fetching ${this.isFetchingNewWork}, # of sandboxes: ${this.sandboxes.length}.`);\n\n return sandboxSlices;\n }\n\n disassociateSandboxAndSlice(sandbox, slice) {\n this.returnSandbox(sandbox);\n sandbox.slice = null;\n this.returnSlice(slice, 'EUNCAUGHT');\n }\n\n /**\n * This method will call this.startSandboxWork(sandbox, slice) for each element { sandbox, slice }\n * of the array returned by this.matchSlicesWithSandboxes(availableSandboxes) until all allocated sandboxes\n * are working. It is possible for a sandbox to interleave with calling distributeQueuedSlices and leave a sandbox\n * that is not working. Moreover, this.queuedSlices may be exhausted before all sandboxes are working.\n * @returns {Promise<void>}\n */\n async distributeQueuedSlices () {\n const numCores = this.unallocatedSpace;\n\n // If there's nothing there, or we're reentering, bail out.\n if (this.queuedSlices.length === 0 || numCores <= 0 || this.matching) {\n // Interesting and noisy.\n // debugging('supervisor') && console.log(`Supervisor.distributeQueuedSlices: Do not nest work, fetch or matching slices with sandboxes: queuedSlices ${this.queuedSlices.length}, matching ${this.matching}, fetching ${this.isFetchingNewWork}, numCores ${numCores}`);\n return Promise.resolve();\n }\n\n //\n // Use the pseudo-mutex to prevent uncontrolled interleaving with fetchTask,\n // matchSlicesWithSandboxes and distributeQueuedSlices\n let sandboxSlices;\n this.acquire(numCores);\n try {\n sandboxSlices = await this.matchSlicesWithSandboxes(numCores);\n } finally {\n this.release();\n }\n\n debugging('supervisor') && console.log(`distributeQueuedSlices: ${sandboxSlices.length} sandboxSlices ${this.dumpSandboxSlices(sandboxSlices)}, matching ${this.matching}, fetching ${this.isFetchingNewWork}`);\n\n for (let sandboxSlice of sandboxSlices) {\n\n const { sandbox, slice } = sandboxSlice;\n try {\n if (sandbox.isReadyForAssign) {\n try {\n let timeoutMs = Math.floor(Math.min(+Supervisor.lastAssignFailTimerMs || 0, 10 * 60 * 1000 /* 10m */));\n await a$sleepMs(timeoutMs);\n await this.assignJobToSandbox(sandbox, slice.jobAddress);\n } catch (e) {\n console.error(`Supervisor.distributeQueuedSlices: Could not assign slice ${slice.identifier} to sandbox ${sandbox.identifier}.`);\n if (Supervisor.debugBuild) console.error(`...exception`, e);\n Supervisor.lastAssignFailTimerMs = Supervisor.lastAssignFailTimerMs ? +Supervisor.lastAssignFailTimerMs * 1.25 : Math.random() * 200;\n this.disassociateSandboxAndSlice(sandbox, slice);\n continue;\n }\n }\n\n if (!Supervisor.lastAssignFailTimerMs)\n Supervisor.lastAssignFailTimerMs = Math.random() * 200;\n this.startSandboxWork(sandbox, slice);\n Supervisor.lastAssignFailTimerMs = false;\n\n } catch (e) {\n // We should never get here.\n console.error(`Supervisor.distributeQueuedSlices: Failed to execute slice ${slice.identifier} in sandbox ${sandbox.identifier}.`);\n if (Supervisor.debugBuild) console.error('...exception', e);\n this.disassociateSandboxAndSlice(sandbox, slice);\n }\n }\n }\n\n /**\n *\n * @param {Sandbox} sandbox\n * @param {opaqueId} jobAddress\n * @returns {Promise<void>}\n */\n assignJobToSandbox(sandbox, jobAddress) {\n // Any error will be caught in distributeQueuedSlices.\n return sandbox.assign(jobAddress);\n }\n\n /**\n * Handles reassigning or returning a slice that was rejected by a sandbox.\n * \n * The sandbox will be terminated by this.returnSandbox in finalizeSandboxAndSlice. In this case,\n * if the slice does not have a rejected property already, reassign the slice to a new sandbox\n * and add a rejected property to the slice to indicate it has already rejected once, then set slice = null\n * in the return SandboxSlice so that finalizeSandboxAndSlice won't return slice to scheduler.\n * \n * If the slice rejects with a reason, or has a rejected time stamp (ie. has been rejected once already)\n * then return the slice and all slices from the job to the scheduler and\n * terminate all sandboxes with that jobAddress.\n * @param {Sandbox} sandbox \n * @param {Slice} slice\n * @returns {Promise<SandboxSlice>}\n */\n async handleWorkReject(sandbox, slice, rejectReason) {\n if (!this.rejectedJobReasons[slice.jobAddress])\n this.rejectedJobReasons[slice.jobAddress] = [];\n\n this.rejectedJobReasons[slice.jobAddress].push(rejectReason); // memoize reasons\n\n // First time rejecting without a reason. Try assigning slice to a new sandbox.\n if (rejectReason === 'false' && !slice.rejected) {\n // Set rejected.\n slice.rejected = Date.now();\n // Schedule the slice for execution.\n this.scheduleSlice(slice, true /* placeInTheFrontOfTheQueue*/, false /* noDuplicateExecution*/);\n \n // Null out slice so this.returnSlice will not be called in finalizeSandboxAndSlice.\n // But we still want this.returnSandbox to terminate the sandbox.\n slice = null;\n } else { // Slice has a reason OR rejected without a reason already and got stamped.\n \n // Purge all slices and sandboxes associated with slice.jobAddress .\n this.purgeAllWork(slice.jobAddress);\n // Clear jobAddress from this.cache .\n this.cleanJobCache();\n\n // Add to array of rejected jobs.\n let rejectedJob = {\n address: slice.jobAddress,\n reasons: this.rejectedJobReasons[slice.jobAddress],\n }\n this.rejectedJobs.push(rejectedJob);\n\n // Tell everyone all about it, when allowed.\n if (dcpConfig.worker.allowConsoleAccess || Supervisor.debugBuild)\n {\n if (slice.rejected)\n console.warn(`Supervisor.handleWorkReject: The slice ${slice.identifier} was rejected twice.`);\n else\n console.warn(`Supervisor.handleWorkReject: The slice ${slice.identifier} was rejected with reason ${rejectReason}.`);\n console.warn(' All slices with the same jobAddress returned to the scheduler.');\n console.warn(' All sandboxes with the same jobAddress are terminated.');\n }\n //\n // this.purgeAllWork(jobAddress) terminates all sandboxes with jobAddress,\n // and it also returns to scheduler all slices with jobAddress.\n // Therefore null out slice and sandbox so finalizeSandboxAndSlice doesn't do anything.\n // \n sandbox = null;\n slice = null;\n }\n return { sandbox, slice };\n }\n\n /**\n * Schedule the slice to be executed.\n * If slice is already executing and noDuplicateExecution is true, return the slice with reason.\n * @param {Slice} slice\n * @param {boolean} [placeInTheFrontOfTheQueue=false]\n * @param {boolean} [noDuplicateExecution=true]\n * @param {string} [reason]\n */\n scheduleSlice(slice, placeInTheFrontOfTheQueue = false, noDuplicateExecution = true, reason) {\n // When noDuplicateExecution, if slice is already executing, do nothing.\n let workingSlices = [];\n if (noDuplicateExecution)\n workingSlices = this.allocatedSlices;\n\n if (!workingSlices.indexOf(slice)) {\n // Reset slice state to allow execution.\n slice.status = SLICE_STATUS_UNASSIGNED;\n slice.allocated = false;\n // Enqueue in the to-be-executed queue.\n if (placeInTheFrontOfTheQueue) this.queuedSlices.unshift(slice);\n else this.queuedSlices.push(slice);\n }\n }\n\n /**\n * Purge all slices and sandboxes with this jobAddress.\n * @param {address} jobAddress\n * @param {boolean} [onlyPurgeQueuedAndAllocated=false]\n */\n purgeAllWork(jobAddress, onlyPurgeQueuedAndAllocated = false) {\n // Purge all slices and sandboxes associated with jobAddress .\n const deadSandboxes = this.sandboxes.filter(sandbox => sandbox.jobAddress === jobAddress);\n\n if (deadSandboxes.length > 0) {\n debugging('supervisor') && console.log(`purgeAllWork(${this.dumpJobAddress(jobAddress)}): sandboxes purged ${deadSandboxes.map(s => s.id)}, # of sandboxes ${this.sandboxes.length}`);\n deadSandboxes.forEach(sandbox => this.returnSandbox(sandbox));\n }\n\n let deadSlices;\n if (onlyPurgeQueuedAndAllocated) {\n deadSlices = this.queuedSlices.filter(slice => slice.jobAddress === jobAddress);\n if (deadSlices.length > 0 || this.allocatedSlices.length > 0)\n debugging('supervisor') && console.log(`purgeAllWork(${this.dumpJobAddress(jobAddress)}): dead queuedSlices ${deadSlices.map(s => s.sliceNumber)}, dead allocatedSlices ${this.allocatedSlices.map(s => s.sliceNumber)}`);\n deadSlices.push(...this.allocatedSlices);\n } else {\n deadSlices = this.slices.filter(slice => slice.jobAddress === jobAddress);\n }\n\n if (deadSlices.length > 0) {\n debugging('supervisor') && console.log(`purgeAllWork(${this.dumpJobAddress(jobAddress)}): slices purged ${deadSlices.map(s => s.sliceNumber)}, # of sandboxes ${this.sandboxes.length}`);\n this.returnSlices(deadSlices);\n this.removeQueuedSlices(deadSlices);\n }\n debugging('supervisor') && console.log(`purgeAllWork(${this.dumpJobAddress(jobAddress)}): Finished: slices ${this.slices.length}, queuedSlices ${this.queuedSlices.length}, assigned ${this.assignedSandboxes.length}, readied ${this.readiedSandboxes.length}, # of sandboxes ${this.sandboxes.length}`);\n }\n\n /**\n * Gives a slice to a sandbox which begins working. Handles collecting\n * the slice result (complete/fail) from the sandbox and submitting the result to the scheduler.\n * It will also return the sandbox to @this.returnSandbox when completed so the sandbox can be re-assigned.\n *\n * @param {Sandbox} sandbox - the sandbox to give the slice\n * @param {Slice} slice - the slice to distribute\n * @returns {Promise<void>} Promise returned from sandbox.run\n */\n async startSandboxWork (sandbox, slice) {\n var startDelayMs, reason = 'unknown';\n\n try {\n slice.markAsWorking();\n } catch (e) {\n // This will occur when the same slice is distributed twice.\n // It is normal because two sandboxes could finish at the same time and be assigned the\n // same slice before the slice is marked as working.\n debugging() && console.debug('startSandboxWork: slice.markAsWorking exception:', e);\n return Promise.resolve();\n }\n\n // sandbox.requiresGPU = slice.requiresGPU;\n // if (sandbox.requiresGPU) {\n // this.GPUsAssigned++;\n // }\n\n if (Supervisor.startSandboxWork_beenCalled)\n startDelayMs = 1000 * (tuning.minSandboxStartDelay + (Math.random() * (tuning.maxSandboxStartDelay - tuning.minSandboxStartDelay)));\n else {\n startDelayMs = 1000 * tuning.minSandboxStartDelay;\n Supervisor.startSandboxWork_beenCalled = true;\n }\n\n try {\n debugging() && console.log(`startSandboxWork: Started ${this.dumpStatefulSandboxAndSlice(sandbox, slice)}, total sandbox count: ${this.sandboxes.length}, matching ${this.matching}, fetching ${this.isFetchingNewWork}`);\n if (Supervisor.sliceTiming) {\n slice['pairingDelta'] = Date.now() - slice['pairingDelta'];\n slice['executionDelta'] = Date.now();\n }\n let result;\n try {\n result = await sandbox.work(slice, startDelayMs);\n } finally {\n sandbox.allocated = false;\n slice.allocated = false;\n }\n if (Supervisor.sliceTiming) {\n slice['executionDelta'] = Date.now() - slice['executionDelta'];\n slice['resultDelta'] = Date.now();\n }\n slice.collectResult(result, true);\n // In watchdog, all sandboxes in working state, have their slice status sent to result submitter.\n // However, this can happen after the sandbox/slice has already sent results\n // to result submitter, in which case, the activeSlices table has already removed the row\n // corresponding to slice and hence is incapable of updating status.\n sandbox.changeWorkingToAssigned();\n this.assignedSandboxes.push(sandbox);\n debugging() && console.log(`startSandboxWork: Finished ${this.dumpStatefulSandboxAndSlice(sandbox, slice)}, total sandbox count: ${this.sandboxes.length}, matching ${this.matching}, fetching ${this.isFetchingNewWork}`);\n } catch(error) {\n let logLevel;\n\n if (error instanceof SandboxError) {\n logLevel = 'warn';\n // The message and stack properties of error objects are not enumerable,\n // so they have to be copied into a plain object this way\n const errorResult = Object.getOwnPropertyNames(error).reduce((o, p) => {\n o[p] = error[p]; return o;\n }, { message: 'Unexpected worker error' });\n slice.collectResult(errorResult, false);\n } else {\n logLevel = 'error';\n // This error was unrelated to the work being done, so just return the slice in the finally block.\n // For extra safety the sandbox is terminated.\n slice.result = null;\n slice.status = SLICE_STATUS_FAILED; /** XXXpfr @todo terminating sandbox? */\n }\n\n let errorString;\n switch (error.errorCode) {\n case 'ENOPROGRESS':\n reason = 'ENOPROGRESS';\n errorString = 'Supervisor.startSandboxWork - No progress error in sandbox.\\n';\n break;\n case 'ESLICETOOSLOW':\n reason = 'ESLICETOOSLOW';\n errorString = 'Supervisor.startSandboxWork - Slice too slow error in sandbox.\\n';\n break;\n case 'EUNCAUGHT':\n reason = 'EUNCAUGHT';\n errorString = `Supervisor.startSandboxWork - Uncaught error in sandbox ${error.message}.\\n`;\n break;\n case 'EFETCH':\n // reason = 'EFETCH'; The status.js processing cannot handle 'EFETCH'\n reason = 'unknown';\n errorString = `Supervisor.startSandboxWork - Could not fetch data: ${error.message}.\\n`;\n break;\n }\n \n const { getenv } = __webpack_require__(/*! ../../common/dcp-env */ \"./src/common/dcp-env.js\");\n // Always display max info under debug builds, otherwise maximal error\n // messages are displayed to the worker, only if both worker and client agree.\n let workerConsole = sandbox.supervisorCache.cache.job[slice.jobAddress].workerConsole;\n const displayMaxInfo = Boolean(getenv('DCP_SUPERVISOR_DEBUG_DISPLAY_MAX_INFO')) || (workerConsole && dcpConfig.worker.allowConsoleAccess);\n\n const errorObject = {\n jobAddress: slice.jobAddress.substr(0,10),\n sliceNumber: slice.sliceNumber,\n sandbox: sandbox.id,\n jobName: sandbox.public ? sandbox.public.name : 'unnamed',\n };\n \n if (error.name === 'EWORKREJECT') {\n error.stack = 'Sandbox was terminated by work.reject()';\n const ss = await this.handleWorkReject(sandbox, slice, error.message);\n sandbox = ss.sandbox; slice = ss.slice;\n }\n\n if (!displayMaxInfo && error.errorCode === 'EUNCAUGHTERROR') {\n console[logLevel](`Supervisor.startSandboxWork - Uncaught error in sandbox, could not compute.\\n`, errorObject);\n } else if (!displayMaxInfo && error.errorCode === 'EFETCH_BAD_ORIGIN') {\n console[logLevel](`Supervisor.startSandboxWork - Could not fetch data: ${error.message}`);\n } else if (!displayMaxInfo && errorString) {\n console[logLevel](errorString, errorObject);\n } else if (!displayMaxInfo && error.name === 'EWORKREJECT') {\n console[logLevel](`Supervisor.startSandboxWork - Sandbox rejected work: ${error.message}`)\n } else {\n if (displayMaxInfo)\n errorObject.stack += '\\n --------------------\\n' + (error.stack.split('\\n').slice(1).join('\\n'));\n console[logLevel](`Supervisor.startSandboxWork - Sandbox failed: ${error.message}\\n`, errorObject);\n }\n } finally {\n await this.finalizeSandboxAndSlice(sandbox, slice, reason);\n }\n }\n\n /**\n * If slice && slice.result, then call await this.recordResult(slice) and this.returnSandbox(sandbox, slice) will have no effect.\n * If slice && !slice.result, then call this.returnSlice(slice, reason) and then this.returnSandbox(sandbox, slice) which terminates sandbox.\n * If !slice && sandbox, then terminate the sandbox with this.returnSandbox(sandbox, slice) .\n * If !slice && !sandbox, then do nothing.\n * @param {Sandbox} [sandbox]\n * @param {Slice} [slice]\n * @param {string} [reason]\n */\n async finalizeSandboxAndSlice(sandbox, slice, reason) {\n debugging('supervisor') && console.log(`finalizeSandboxAndSlice: sandbox ${sandbox ? sandbox.identifier : 'nade'}, slice ${slice ? slice.identifier : 'nade'}`);\n if (slice) {\n if (slice.result) await this.recordResult(slice);\n else this.returnSlice(slice, reason);\n }\n // It is possible that sandbox is already terminated\n // Because sandbox.allocated=false as soon as sandbox.work(...) completes.\n // But the await at or in finalizeSandboxAndSlice may allow pruneSandboxes to slither in.\n if (sandbox) this.returnSandbox(sandbox, slice, false /* verifySandboxIsNotTerminated*/);\n }\n\n /**\n * Terminates sandboxes and returns slices.\n * Sets the working flag to false, call @this.work to start working again.\n * \n * If forceTerminate is true: Terminates all sandboxes and returns all slices.\n * If forceTerminate is false: Terminates non-allocated sandboxes and returns queued slices.\n *\n * @param {boolean} [forceTerminate = true] - true if you want to stop the sandboxes from completing their current slice.\n * @returns {Promise<void>}\n */\n async stopWork (forceTerminate = true) {\n debugging('supervisor') && console.log('stopWork(${forceTerminate}): terminating sandboxes and returning slices to scheduler.');\n if (forceTerminate) {\n while (this.sandboxes.length) {\n this.returnSandbox(this.sandboxes[0], null, false);\n }\n\n await this.returnSlices(this.slices).then(() => {\n this.queuedSlices.length = 0;\n });\n } else {\n // Only terminate idle sandboxes and return only queued slices\n let idleSandboxes = this.sandboxes.filter(w => !w.allocated);\n for (const sandbox of idleSandboxes) {\n this.returnSandbox(sandbox, null, false /* verifySandboxIsNotTerminated*/);\n }\n\n await this.returnSlices(this.queuedSlices).then(() => {\n this.queuedSlices.length = 0;\n });\n\n await new Promise((resolve, reject) => {\n let sandboxesRemaining = this.allocatedSandboxes.length;\n if (sandboxesRemaining === 0)\n {\n resolve();\n }\n // Resolve and finish work once all sandboxes have finished submitting their results.\n this.on('submitFinished', () => {\n sandboxesRemaining--;\n if (sandboxesRemaining === 0)\n {\n console.log('All sandboxes empty, stopping worker and closing all connections');\n resolve();\n }\n });\n });\n }\n\n if (this.resultSubmitterConnection) {\n this.resultSubmitterConnection.off('close', this.openResultSubmitterConn);\n this.resultSubmitterConnection.close();\n this.resultSubmitterConnection = null;\n }\n\n if (this.taskDistributorConnection) {\n this.taskDistributorConnection.off('close', this.openTaskDistributorConn);\n this.taskDistributorConnection.close();\n this.taskDistributorConnection = null;\n }\n\n if (this.packageManagerConnection) {\n this.packageManagerConnection.off('close', this.openPackageManagerConn);\n this.packageManagerConnection.close();\n this.packageManagerConnection = null;\n }\n\n if (this.eventRouterConnection) {\n this.eventRouterConnection.off('close', this.openEventRouterConn);\n this.eventRouterConnection.close();\n this.eventRouterConnection = null;\n }\n\n this.emit('stop');\n }\n\n /**\n * Takes a slice and returns it to the scheduler to be redistributed.\n * Usually called when an exception is thrown by sandbox.work(slice, startDelayMs) .\n * Or when the supervisor tells it to forcibly stop working.\n *\n * @param {Slice} slice - The slice to return to the scheduler.\n * @param {string} [reason] - Optional reason for the return: 'ENOPROGRESS', 'EUNCAUGHT', 'ESLICETOOSLOW', 'unknown'.\n * @returns {Promise<*>} - Response from the scheduler.\n */\n returnSlice (slice, reason) {\n // When sliceNumber === 0 don't send a status message.\n if (slice.sliceNumber === 0) return Promise.resolve();\n \n debugging() && console.log(`Supervisor.returnSlice: Returning slice ${slice.identifier} with reason ${reason}.`);\n \n const payload = slice.getReturnMessagePayload(this.workerOpaqueId, reason);\n try\n {\n return this.resultSubmitterConnection.send('status', payload) /* resultSubmitterConnection can be null if worker is stopped */\n .then(response => {\n return response;\n }).catch(error => {\n debugging('supervisor') && console.error('Failed to return slice', {\n sliceNumber: slice.sliceNumber,\n jobAddress: slice.jobAddress,\n status: slice.status,\n error,\n }, 'Will try again on a new connection.');\n this.resultSubmitterMessageQueue.push({ operation: 'status', data: payload });\n this.resultSubmitterConnection.close();\n });\n }\n catch (error)\n {\n debugging('supervisor') && console.error(`Failed to return slice ${slice.identifier}, no connection to result submitter:`, error);\n }\n }\n\n /**\n * Bulk-return multiple slices, possibly for assorted jobs.\n * Returns slices to the scheduler to be redistributed.\n * Called in the sandbox terminate handler and purgeAllWork(jobAddress)\n * and stopWork(forceTerminate).\n *\n * @param {Slice[]} slices - The slices to return to the scheduler.\n * @returns {Promise<void>} - Response from the scheduler.\n */\n async returnSlices(slices) {\n if (!slices || !slices.length) return Promise.resolve();\n \n const slicePayload = [];\n slices.forEach(slice => { addToReturnSlicePayload(slicePayload, slice); });\n this.removeSlices(slices);\n\n debugging('supervisor') && console.log(`Supervisor.returnSlices: Returning slices ${this.dumpSlices(slices)}.`);\n\n return this.resultSubmitterConnection.send('status', {\n worker: this.workerOpaqueId,\n slices: slicePayload,\n }).then(response => {\n return response;\n }).catch(error => {\n const errorInfo = slices.map(slice => slice.identifier);\n debugging('supervisor') && console.error('Failed to return slice(s)', { errorInfo, error }, 'Will try again on new connection.');\n this.resultSubmitterMessageQueue.push({ operation: 'status', data: { worker: this.workerOpaqueId, slices: slicePayload } })\n this.resultSubmitterConnection.close();\n // Just in case the caller is expecing a DCP response\n return { success: false, payload: {} };\n });\n }\n\n /**\n * Submits the slice results to the scheduler, either to the\n * work submit or fail endpoints based on the slice status.\n * Then remove the slice from the @this.slices cache.\n *\n * @param {Slice} slice - The slice to submit.\n * @returns {Promise<void>}\n */\n async recordResult (slice) {\n // It is possible for slice.result to be undefined when there are upstream errors.\n if ( !(slice && slice.result))\n throw new Error(`recordResult: slice.result is undefined for slice ${slice.identifier}. This is ok when there are upstream errors.`);\n\n debugging('supervisor') && console.log(`supervisor: recording result for slice ${slice.identifier}.`);\n\n const jobAddress = slice.jobAddress;\n const sliceNumber = slice.sliceNumber;\n const authorizationMessage = slice.getAuthorizationMessage();\n\n /* @see result-submitter::result for full message details */\n const metrics = { GPUTime: 0, CPUTime: 0, CPUDensity: 0, GPUDensity: 0, total: 0 };\n const payloadData = {\n slice: sliceNumber,\n job: jobAddress,\n worker: this.workerOpaqueId,\n paymentAddress: this.paymentAddress,\n metrics,\n authorizationMessage,\n }\n\n const timeReport = slice.timeReport;\n if (timeReport && timeReport.total > 0) {\n metrics.GPUTime = timeReport.webGL;\n metrics.CPUTime = timeReport.CPU;\n metrics.CPUDensity = metrics.CPUTime / timeReport.total;\n metrics.GPUDensity = metrics.GPUTime / timeReport.total;\n metrics.total = timeReport.total;\n metrics.CPUTime = 1 + Math.floor(metrics.CPUTime);\n if (metrics.GPUTime > 0)\n metrics.GPUTime = 1 + Math.floor(metrics.GPUTime);\n }\n \n this.emit('submittingResult');\n\n if (!slice.isFinished)\n throw new Error('Cannot record result for slice that is not finished');\n\n if (slice.resultStorageType === 'pattern') { /* This is a remote-storage slice. */\n const remoteResult = await this.sendResultToRemote(slice);\n payloadData.result = encodeDataURI(JSON.stringify(remoteResult));\n } else {\n payloadData.result = encodeDataURI(slice.result.result); /* XXXwg - result.result is awful */\n }\n debugging('supervisor') && console.log('Supervisor.recordResult: payloadData.result', payloadData.result.slice(0, 512));\n\n try {\n if (slice.completed) {\n\n /* work function returned a result */\n let resp = await this.resultSubmitterConnection.send(\n 'result',\n payloadData,\n )\n \n if (!resp.success) {\n if (resp.payload && resp.payload.code === 'DCPS-01002') { /* result submitter cannot connect to event router; try again */\n try {\n resp = await this.resendResult(payloadData)\n } catch (error) {\n debugging('supervisor') && console.error(`supervisor - failed to submit result for job ${jobAddress} after ${payloadData.sendRetries} attempts`)\n throw error;\n }\n }\n else\n throw new Error(`failed to submit result for slice ${slice.sliceNumber} of job ${jobAddress}`);\n }\n\n if (false) {}\n\n const receipt = {\n accepted: true,\n payment: resp.payload.slicePaymentAmount,\n };\n this.emit('submittedResult', resp.payload);\n this.emit('dccCredit', receipt);\n } else {\n /* slice did not complete for some reason */\n \n // If the slice from a job never completes and the job address exists in the ringBufferofJobs, \n // then we remove it to allow for another slice (from the same job) to be obtained by fetchTask\n this.ringBufferofJobs.buf = this.ringBufferofJobs.filter(element => element !== jobAddress);\n \n await this.returnSlice(slice);\n }\n } catch(error) {\n console.info(`1014: Failed to submit results for slice ${payloadData.slice} of job ${payloadData.job}`, error);\n this.emit('submitSliceFailed', error);\n } finally {\n this.emit('submitFinished');\n // Remove the slice from the slices array.\n this.removeSlice(slice);\n if (Supervisor.sliceTiming) {\n slice['resultDelta'] = Date.now() - slice['resultDelta'];\n console.log(`recordResult(${slice['pairingDelta']}, ${slice['executionDelta']}, ${slice['resultDelta']}): Completed slice ${slice.identifier}.`);\n } else\n debugging('supervisor') && console.log(`recordResult: Completed slice ${slice.identifier}.`);\n }\n }\n\n /**\n * Send a work function's result to a server that speaks our DCP Remote Data Server protocol.\n * The data server dcp-rds is been implemented in https://gitlab.com/Distributed-Compute-Protocol/dcp-rds .\n *\n * @param {Slice} slice - Slice object whose result we are sending.\n * @returns {Promise<object>} - Object of the form { success: true, href: 'http://127.0.0.1:3521/methods/download/jobs/34/result/10' } .\n * @throws When HTTP status not in the 2xx range.\n */\n async sendResultToRemote(slice) {\n const postParams = {\n ...slice.resultStorageParams\n };\n \n const patternParams = {\n slice: slice.sliceNUmber,\n job: slice.jobAddress\n };\n \n /**\n * @todo Future Work: we need to pass the pattern parameters with the job details into the supervisor. \n * We do not have all the information (necessarily) to calculate them from here. /wg jan 2022\n */\n const sliceResultUri = makeValueURI('pattern', slice.resultStorageDetails, patternParams)\n\n debugging() && console.log('sendResultToRemote sliceResultUri: ', sliceResultUri);\n \n if (this.makeSafeOriginList('sendResults').indexOf(sliceResultUri.origin) === -1)\n throw new Error(`Invalid origin for remote result storage: '${sliceResultUri.origin}'`);\n\n postParams.element = slice.sliceNumber;\n postParams.contentType = 'application/json'; // Currently data will be outputed as a JSON object, @todo: Support file upload.\n\n debugging() && console.log('sendResultToRemote: postParams: ', postParams);\n\n let result = slice.result.result;\n if (result) {\n postParams.content = JSON.stringify(result);\n } else {\n postParams.error = JSON.stringify(slice.error);\n }\n\n debugging('supervisor') && console.log('sendResultToRemote: content: ', (result ? postParams.content : postParams.error).slice(0, 512));\n\n //\n // Notes:\n // 1) In recordResults the response from justFetch is JSON serialized and encodeDataURI is called.\n // payloadData.result = await this.sendResultToRemote(slice);\n // payloadData.result = encodeDataURI(JSON.stringify(payloadData.result));\n // 2) We do further processing after the call to sendResultToRemote in recordResult, because\n // if we did it here there would be a perf hit. When the return value is a promise, it gets\n // folded into sendResultToRemote's main promise. If justFetch's promise wasn't a return value then\n // justFetch would be separately added to the micro-task-queue.\n return await justFetch(sliceResultUri, 'JSON', 'POST', false, postParams);\n }\n}\n\n/**\n * Sandbox has had an error which is not from the work function: kill it\n * and try to redo the slice.\n */\nfunction handleSandboxError(supervisor, sandbox, error) {\n const slice = sandbox.slice;\n\n slice.sandboxErrorCount = (slice.sandboxErrorCount || 0) + 1;\n sandbox.slice = null;\n supervisor.returnSandbox(sandbox); /* terminate the sandbox */\n slice.status = SLICE_STATUS_UNASSIGNED; /* ToT */\n slice.allocated = false;\n console.warn(`Supervisor.handleSandboxError: Sandbox ${sandbox.identifier}...(${sandbox.public.name}/${slice.sandboxErrorCount}) with slice ${slice.identifier} had error.`, error);\n\n if (slice.sandboxErrorCount < dcpConfig.worker.maxSandboxErrorsPerSlice)\n supervisor.queuedSlices.push(slice);\n else {\n slice.error = error;\n supervisor.returnSlice(slice);\n }\n}\n\n/**\n * Add a slice to the slice payload being built. If a sliceList already exists for the\n * job-status-authMessage tuple, then the slice will be added to that, otherwise a new\n * sliceList will be added to the payload.\n *\n * @param {Object[]} slicePayload - Slice payload being built. Will be mutated in place.\n * @param {Slice} slice - The slice.\n * @param {String} status - Status update, eg. progress or scheduled.\n *\n * @returns {Object[]} mutated slicePayload array\n */\nfunction addToSlicePayload(slicePayload, slice, status) {\n // getAuthorizationMessage helps enforces the equivalence\n // !authorizationMessage <==> sliceNumber === 0\n const authorizationMessage = slice.getAuthorizationMessage();\n if (!authorizationMessage) return;\n\n // Try to find a sliceList in the payload which matches the job, status, and auth message\n let sliceList = slicePayload.find(desc => {\n return desc.job === slice.jobAddress\n && desc.status === status\n && desc.authorizationMessage === authorizationMessage;\n });\n\n // If we didn't find a sliceList, start a new one and add it to the payload\n if (!sliceList) {\n sliceList = {\n job: slice.jobAddress,\n sliceNumbers: [],\n status,\n authorizationMessage,\n };\n slicePayload.push(sliceList);\n }\n\n sliceList.sliceNumbers.push(slice.sliceNumber);\n\n return slicePayload;\n}\n\n/**\n * Add a slice to the returnSlice payload being built. If a sliceList already exists for the\n * job-isEstimation-authMessage-reason tuple, then the slice will be added to that, otherwise a new\n * sliceList will be added to the payload.\n *\n * @param {Object[]} slicePayload - Slice payload being built. Will be mutated in place.\n * @param {Slice} slice - The slice.\n * @param {String} [reason] - Optional reason to further characterize status; e.g. 'ENOPROGRESS', 'EUNCAUGHT', 'ESLICETOOSLOW', 'unknown'.\n *\n * @returns {Object[]} mutated slicePayload array\n */\nfunction addToReturnSlicePayload(slicePayload, slice, reason) {\n // getAuthorizationMessage helps enforces the equivalence\n // !authorizationMessage <==> sliceNumber === 0\n const authorizationMessage = slice.getAuthorizationMessage();\n if (!authorizationMessage) return;\n\n if (!reason) reason = slice.error ? 'EUNCAUGHT' : 'unknown';\n\n // Try to find a sliceList in the payload which matches the job, status, and auth message\n let sliceList = slicePayload.find(desc => {\n return desc.job === slice.jobAddress\n && desc.isEstimationSlice === slice.isEstimationSlice\n && desc.authorizationMessage === authorizationMessage\n && desc.reason === reason;\n });\n\n // If we didn't find a sliceList, start a new one and add it to the payload\n if (!sliceList) {\n sliceList = {\n job: slice.jobAddress,\n sliceNumbers: [],\n status: 'return',\n isEstimationSlice: slice.isEstimationSlice,\n authorizationMessage,\n reason,\n };\n slicePayload.push(sliceList);\n }\n\n sliceList.sliceNumbers.push(slice.sliceNumber);\n\n return slicePayload;\n}\n\n/**\n * Return DCPv4-specific connection options, composed of type-specific, URL-specific, \n * and worker-specific options, any/all of which can override the dcpConfig.dcp.connectOptions.\n * The order of precedence is the order of specificity.\n */\nfunction connectionOptions(url, label) {\n return leafMerge(/* ordered from most to least specific */\n dcpConfig.worker.dcp.connectionOptions.default,\n dcpConfig.worker.dcp.connectionOptions[label],\n dcpConfig.worker.dcp.connectionOptions[url.href]);\n}\n\n/** @type {number | boolean} */\nSupervisor.lastAssignFailTimerMs = false;\n/** @type {boolean} */\nSupervisor.startSandboxWork_beenCalled = false;\n/** @type {boolean} */\nSupervisor.debugBuild = ((__webpack_require__(/*! dcp/common/dcp-build */ \"./src/common/dcp-build.js\").build) === 'debug');\n/**\n * When Supervisor.sliceTiming is set to be true, it displays the timings of a every slice\n * slice['pairingDelta'] = timespan of when slice is paired with sandbox until execution starts\n * slice['executionDelta'] = timespan of execution in sandbox\n * slice['resultDelta'] = timespan of when sandbox finishes executing until recordResult completes.\n * @type {boolean}\n */\nSupervisor.sliceTiming = false;\n\nexports.Supervisor = Supervisor;\n\n\n//# sourceURL=webpack://dcp/./src/dcp-client/worker/supervisor.js?");
4492
4492
 
4493
4493
  /***/ }),
4494
4494
 
@@ -4499,7 +4499,7 @@ eval("/* provided dependency */ var process = __webpack_require__(/*! ./node_mod
4499
4499
  /***/ ((__unused_webpack_module, exports, __webpack_require__) => {
4500
4500
 
4501
4501
  "use strict";
4502
- eval("/* provided dependency */ var process = __webpack_require__(/*! ./node_modules/process/browser.js */ \"./node_modules/process/browser.js\");\n/**\n * @file dcp-client/worker/supervisor2/index.js\n * Code managing sandboxes, tasks, jobs, and slices within in a DCP Worker.\n * @author Wes Garland, wes@kingsds.network\n * Paul, paul@kingsds.network\n * @date Dec 2020\n * June 2022\n * @module supervisor\n */\n\n/* global dcpConfig */ // eslint-disable-line no-redeclare\n// @ts-check\n\n\nconst DCP_ENV = __webpack_require__(/*! dcp/common/dcp-env */ \"./src/common/dcp-env.js\");\nconst { requireNative } = __webpack_require__(/*! dcp/dcp-client/webpack-native-bridge */ \"./src/dcp-client/webpack-native-bridge.js\");\nconst dcp4 = __webpack_require__(/*! dcp/protocol-v4 */ \"./src/protocol-v4/index.js\");\nconst debugging = (__webpack_require__(/*! dcp/debugging */ \"./src/debugging.js\").scope)('worker');\nconst constants = __webpack_require__(/*! dcp/common/scheduler-constants */ \"./src/common/scheduler-constants.js\");\nconst { setImmediate } = __webpack_require__(/*! dcp/common/dcp-timers */ \"./src/common/dcp-timers.js\");\nconst { EventEmitter } = __webpack_require__(/*! dcp/common/dcp-events */ \"./src/common/dcp-events/index.js\");\nconst { DCPError } = __webpack_require__(/*! dcp/common/dcp-error */ \"./src/common/dcp-error.js\");\nconst { Keystore, Address } = __webpack_require__(/*! dcp/dcp-client/wallet */ \"./src/dcp-client/wallet/index.js\");\nconst { assert } = __webpack_require__(/*! dcp/common/dcp-assert */ \"./src/common/dcp-assert.js\");\nconst { localStorage } = __webpack_require__(/*! dcp/common/dcp-localstorage */ \"./src/common/dcp-localstorage.js\");\nconst { DcpURL } = __webpack_require__(/*! dcp/common/dcp-url */ \"./src/common/dcp-url.js\");\nconst RingBuffer = __webpack_require__(/*! dcp/utils/ringBuffer */ \"./src/utils/ringBuffer.js\");\nconst { Synchronizer } = __webpack_require__(/*! dcp/common/concurrency */ \"./src/common/concurrency.js\");\nconst { JobManager } = __webpack_require__(/*! ./job-manager */ \"./src/dcp-client/worker/supervisor2/job-manager.js\");\nconst { Load } = __webpack_require__(/*! ./load */ \"./src/dcp-client/worker/supervisor2/load.js\");\nconst { Sandbox, SandboxError } = __webpack_require__(/*! ./sandbox2 */ \"./src/dcp-client/worker/supervisor2/sandbox2.js\");\nconst { sliceStatus } = __webpack_require__(/*! dcp/common/scheduler-constants */ \"./src/common/scheduler-constants.js\");\nconst hash = __webpack_require__(/*! dcp/common/hash */ \"./src/common/hash.js\");\nconst { calculateJoinHash } = __webpack_require__(/*! dcp/dcp-client/compute-groups */ \"./src/dcp-client/compute-groups/index.js\");\nconst { ModuleCache } = __webpack_require__(/*! ./module-cache */ \"./src/dcp-client/worker/supervisor2/module-cache.js\");\nconst { Inventory, leafMerge, a$sleepMs, ms, pct, generateOpaqueId, booley, compressJobMap, \n toJobMap, truncateAddress, encodeDataURI, makeDataURI, justFetch, stringify } = __webpack_require__(/*! dcp/utils */ \"./src/utils/index.js\");\n//const { CodeFlow } = require('client-oauth2');\n//const { text } = require('./lang').getLocaleStrings('en_CA'); /** XXXpfr @todo Don't know what to do with localization? */\n\n/** @typedef {import('dcp/dcp-client/wallet/eth').Address} Address */\n/** @typedef {import('dcp/protocol-v4/connection/connection').Connection} Connection */\n/** @typedef {string} opaqueId */ // 22 character base64 string\n/** @typedef {import('..').Worker} Worker */\n/** @typedef {import('..').SupervisorOptions} SupervisorOptions */\n/** @typedef {import('./slice2').Slice} Slice */\n/** @typedef {import('dcp/utils').SliceMessage} SliceMessage */\n\nconst addressTruncationLength = 20;\nconst AWAIT_ALL = false;\n\n//\n// Configs are moving around in dcpConfig and local worker configs, so set up some defaults.\nlet workerTuning = dcpConfig.worker;\nif (!workerTuning) workerTuning = dcpConfig.Supervisor;\nif (!workerTuning || !workerTuning.dcp || !workerTuning.dcp.tuning || !workerTuning.dcp.tuning.watchdogInterval\n || !workerTuning.sandbox || !workerTuning.allowOrigins || !workerTuning.minimumWage || !workerTuning.computeGroups)\n workerTuning = {\n dcp: {\n tuning: { watchdogInterval: 7, minSandboxStartDelay: 0.1, maxSandboxStartDelay: 0.7 },\n connectionOptions: { default: { identityUnlockTimeout: 15 * 60 /* seconds */ } },\n },\n sandbox: { progressReportInterval: 2 * 60 * 100 },\n allowOrigins: { fetchWorkFunctions: [], fetchArguments: [], fetchData: [], sendResults: [], any: [] },\n minimumWage: { CPU: 0, GPU: 0, 'in': 0, out: 0 },\n leavePublicGroup: false,\n pCores: 0,\n computeGroups: {},\n // The following configs are not in dcpConfig or worker configs (yet), but may be specified in local worker configs to override the defaults.\n pruneFrequency: 15 * 1000, // Maxiumum time interval where we check to prune used sandboxes.\n workerSandboxThreshold: 7, // When maxWorkingSandboxes >= workerSandboxThreshold, we allow an extra 25% of assigned sandboxes that won't be pruned.\n cachedJobsThreshold: 12, // Prune the unused job managers >= cachedJobsThreshold.\n};\n\n//\n// Flags for tracing.\n//\nconst selectiveEnable = false;\nconst displayWarnError = false || selectiveEnable;\nconst selectiveDebugging = selectiveEnable || debugging();\nconst debuggingError = false || selectiveDebugging || displayWarnError;\nconst debuggingWarn = false || selectiveDebugging || displayWarnError;\nconst selectiveDebugging2 = selectiveEnable && false || debugging('supervisor');\n\n/** \n * Adjust delay times when debugging.\n * The adjustment for niim is automatic, other debuggers must manually change this value.\n */\nlet timeDilation = 1;\nif (DCP_ENV.platform === 'nodejs')\n{\n /** Make timers 10x slower when running in niim */\n timeDilation = (requireNative('module')._cache.niim instanceof requireNative('module').Module) ? 10 : 1;\n}\n\n//\n// Index to functionality -- search for '_Idx' to toggle through the index.\n//\n// 1) Ctor: Supervisor constructor.\n// 2) Important property-like functions.\n// 3) Dtors: screenSaverDestroy, stopWork, purgeJob.\n// 4) Connection code.\n// 5) Work: Distribute slice to sandboxes.\n// 6) Return slices and sent progress reports to result-submitter-results.\n// 7) Task Distributor (TD): requestTask (Rq) support -- communication with TD.\n// 8) Aggregators from the job managers.\n// 9) Sandbox creation and management.\n// 10) Result-submitter-result support functions.\n// 11) Work reject.\n// 12) Unused functions that we need to review.\n//\n\n// _Idx\n//\n// Ctor: Supervisor constructor.\n//\n\n/** \n * Supervisor constructor\n * \n * A supervisor manages the communication with the scheduler, manages sandboxes, and\n * decides which workload should be sent to which sandboxes when.\n *\n * Start state:\n * - initial\n *\n * Intermediate states:\n * - ready\n * - stopping\n *\n * Terminal states:\n * - stopped\n *\n * Valid transitions:\n * - initial -> ready where that happens \n * - ready -> stopping\n * - stopping -> stopped\n *\n * @param {Worker} worker - The worker that created this instance.\n * @param {SupervisorOptions} options - Options for specifying custom behaviour and tuning,\n */\nfunction Supervisor(worker, options)\n{\n assert(options.identity instanceof Keystore);\n assert(options.paymentAddress instanceof Address);\n\n /**\n * Flag to indicate a debug build.\n * Used when we want to display extra information and do extra checks for developers only.\n * @type {boolean}\n */\n this.debugBuild = ((__webpack_require__(/*! dcp/common/dcp-build */ \"./src/common/dcp-build.js\").build) === 'debug');\n /**\n * When Supervisor.sliceTiming is set to be true, it displays the timings of a every slice\n * slice['queueingDelta'] = timespan of when slice is passed to jobManager.runQueuedSlice until sandbox.work\n * slice['executionDelta'] = timespan of execution in sandbox\n * slice['resultDelta'] = timespan of when sandbox finishes executing until recordResult completes.\n * @type {boolean}\n */\n this.sliceTiming = false;\n /** Used for analyzing the completed results in Supervisor.recordResult. */\n this.resultMap = {};\n\n /** @type {ModuleCache} */\n this.cache = new ModuleCache(this);\n\n this.worker = worker;\n this.identity = options.identity;\n this.paymentAddress = options.paymentAddress;\n this.options = options;\n this.maxWorkingSandboxes = options.maxWorkingSandboxes || 1;\n this.maxTotalSandboxes = this.maxWorkingSandboxes;\n\n // We're making the assumption that if a worker has at least 7 sandboxes, \n // then the worker has sufficient resources to handle 25% more sandboxes in memory.\n // This assumption may be overridden by changing workerSandboxThreshold.\n if (this.maxWorkingSandboxes >= this.workerSandboxThreshold)\n this.maxTotalSandboxes = Math.ceil(1.25 * this.maxWorkingSandboxes);\n // When # of sandboxes reaches this level, we more aggressively prune.\n this.mustPruneSandboxLevel = Math.ceil(1.5 * this.maxTotalSandboxes);\n // Last prune time stamp.\n this.lastPrune = 0;\n\n // Startup perf timer -- SAVE\n //this.lastTime = Date.now();\n\n // Supervisor may get created by Worker where options.cores or options.targetLoad is not defined.\n this.numCPU = this.maxWorkingSandboxes;\n this.numGPU = 1;\n this.portionToUseCPU = pct(100);\n this.portionToUseGPU = pct(100);\n\n if (options.cores)\n {\n this.numCPU = options.cores.cpu || this.numCPU;\n this.numGPU = options.cores.gpu || this.numGPU;\n }\n if (options.targetLoad)\n {\n this.portionToUseCPU = options.targetLoad.cpu || pct(100);\n this.portionToUseGPU = options.targetLoad.gpu || pct(100);\n }\n\n this.tuning = {\n maxCPUAlloc: this.portionToUseCPU, /**< Maximum amount of CPU time to attempt to use */\n maxGPUAlloc: this.portionToUseGPU, /**< Maximum amount of GPU time to attempt to use */\n watchdogInterval: 7, /**< How frequently to kick off an unsolicited requestTask */\n prefetchInterval: 20, /**< How many seconds into the future are looking to project capacity during work fetch */\n minSandboxStartDelay: 0.1, /**< seconds - base minimum of this.delayMs, scaled by this.delayScaler */\n maxSandboxStartDelay: 0.7, /**< seconds - base maximum random component of this.delayMs, scaled by this.delayScaler */\n };\n this.tuning = leafMerge(this.tuning, workerTuning.dcp.tuning);\n if (options.watchdogInterval > 0)\n this.tuning.watchdogInterval = options.watchdogInterval; // Override.\n //this.tuning.watchdogInterval = 0.25;\n\n /**\n * Fine tune this.delayMs.\n * Note: Please discuss any change with Paul, paul@kingsds.network.\n * XXXpfr @todo Finalize the delay tuning.\n */\n this.delayScaler = 0.5;\n\n debugging('supervisor') && console.debug('Supervisor.tuning', this.tuning);\n\n /**\n * Note: targetLoad is not properly implemented yet.\n * XXXpfr @todo Collaborate with Wes to get it right.\n * @type {Load}\n */\n this.targetLoad = new Load({\n cpu: Math.min(this.maxWorkingSandboxes, this.numCPU),\n gpu: Math.min(this.maxWorkingSandboxes, this.numGPU)\n }).scale(this.tuning.maxCPUAlloc, this.tuning.maxGPUAlloc);\n\n /** @type {string[]} */\n this.allowedOrigins = workerTuning.allowOrigins.any;\n /** @type {string[]} */\n this.fetchWorkFunctions = workerTuning.allowOrigins.fetchWorkFunctions;\n /** @type {string[]} */\n this.fetchArguments = workerTuning.allowOrigins.fetchArguments;\n /** @type {string[]} */\n this.fetchData = workerTuning.allowOrigins.fetchData;\n /** @type {string[]} */\n this.sendResults = workerTuning.allowOrigins.sendResults;\n\n // In localExec, do not allow work function or arguments to come from the 'any' origins\n if (this.options.localExec)\n {\n this.allowedOrigins = this.allowedOrigins.concat(options.allowedOrigins.any);\n this.fetchWorkFunctions = this.fetchWorkFunctions.concat(options.allowedOrigins.fetchWorkFunctions);\n this.fetchArguments = this.fetchArguments.concat(options.allowedOrigins.fetchArguments);\n this.fetchData = this.fetchData.concat(options.allowedOrigins.fetchData);\n this.sendResults = this.sendResults.concat(options.allowedOrigins.sendResults)\n }\n\n if (options.allowedOrigins && options.allowedOrigins.length > 0)\n this.allowedOrigins = options.allowedOrigins.concat(this.allowedOrigins);\n \n //\n // The following 3 configs are not in dcpConfig or worker configs (yet), but may be specified in local worker configs to override the defaults.\n //\n /** @type {number} - Maxiumum time interval where we check to prune used sandboxes. */\n this.pruneFrequency = workerTuning.pruneFrequency || 15 * 1000;\n /** @type {number} - When maxWorkingSandboxes >= workerSandboxThreshold, we allow an extra 25% of assigned sandboxes that won't be pruned. */\n this.workerSandboxThreshold = workerTuning.workerSandboxThreshold || 7;\n /** @type {number} - Prune the unused job managers >= cachedJobsThreshold. */\n this.cachedJobsThreshold = workerTuning.cachedJobsThreshold || 12;\n\n /** @type {Object.<Address, JobManager>} */\n this.jobMap = {}; \n /** @type {Sandbox[]} - All sandboxes that are being used by the job managers. Makes sure we don't lose sandboxes. */\n this.sandboxInventory = [];\n /** @type {Sandbox[]} - Started sandboxes that are not in sandboxInventory yet. */\n this.readiedSandboxes = [];\n /** @type {JobManager[]} */\n this.jobManagerInventory = new Inventory('jobManagers');\n /** @type {Synchronizer} */\n this.state = new Synchronizer('initial', [ 'initial', 'ready', 'reconnecting', 'stopping', 'stopped', 'broken']);\n\n /** @type {string} */\n this.lastDcpsid = undefined;\n /** @type {Connection} */\n this.taskDistributor = null;\n /** @type {Connection} */\n this.resultSubmitter = null;\n /** @type {Connection} */\n this.eventRouter = null;\n /** @type {Connection} */\n this.packageManager = null;\n /** @type {Array<object>} */\n this.resultSubmitterMessageQueue = [];\n /** @type {Array<object>} */\n this.eventRouterMessageQueue = [];\n /** @type {Array<object>} */\n this.packageManagerMessageQueue = [];\n\n /** @type {object} */\n this.schedulerConfig = leafMerge(dcpConfig.scheduler, options.schedulerConfig);\n\n /** @type {opaqueId} */\n this.workerId = localStorage.getItem('workerId');\n if (!this.workerId || this.workerId.length !== constants.workerIdLength)\n {\n this.workerId = generateOpaqueId();\n localStorage.setItem('workerId', this.workerId);\n }\n /** @type {object[]} */\n this.rejectedJobs = [];\n /** \n * An N-slot ring buffer of job addresses. Stores all jobs that have had no more than 1 slice run in the ring buffer.\n * Required for the implementation of discrete jobs \n * @type {RingBuffer} \n */\n this.ringBufferofJobs = new RingBuffer(100); // N = 100 should be more than enough. \n /** @type {boolean} - pseudo-mutex guarding requestTask. */\n this.isFetchingNewWork = false;\n\n // Start up the connections.\n this.instantiateAllConnections();\n\n /**\n * Note: DCP-3241 asks to test Android to see if we need this restriction any longer.\n * XXXpfr @todo Hopefully we can delete this @hack.\n */\n // @hack - dcp-env.isBrowserPlatform is not set unless the platform is _explicitly_ set,\n // using the default detected platform doesn't set it.\n // Fixing that causes an error in the wallet module's startup on web platform, which I\n // probably can't fix in a reasonable time this morning.\n // ~ER2020-02-20\n if (!options.maxWorkingSandboxes\n && DCP_ENV.browserPlatformList.includes(DCP_ENV.platform)\n && navigator.hardwareConcurrency > 1) {\n this.maxWorkingSandboxes = navigator.hardwareConcurrency - 1;\n if (typeof navigator.userAgent === 'string') {\n if (/(Android).*(Chrome|Chromium)/.exec(navigator.userAgent)) {\n this.maxWorkingSandboxes = 1;\n console.log('Doing work with Chromimum browsers on Android is currently limited to one sandbox');\n }\n }\n }\n}\nexports.Supervisor = Supervisor;\nSupervisor.prototype = Object.getPrototypeOf(new EventEmitter('Supervisor')); // Fake out VSCode -- get's rid of a billion red-squigglies.\nSupervisor.prototype = new EventEmitter('Supervisor');\n/**\n * Preserve the constructor property.\n * @constructor\n */\nSupervisor.prototype.constructor = Supervisor;\n\n/**\n * Set up sandboxes and interval timers, then start to search for work.\n * @param {boolean} [searchForWork=true] - When true, start searching for work.\n **/\nSupervisor.prototype.startWork = function Supervisor$startWork (searchForWork = true)\n{\n /* Provide opportunity for calling code to hook ready/error events. */\n setImmediate(async () => {\n try\n {\n if (this.state.isNot('initial'))\n {\n if (this.state.setIf('stopped', 'initial')) {}\n else if (this.state.setIf('reconnecting', 'initial')) {}\n else if (this.state.setIf('broken', 'initial')) {}\n else if (this.state.is('ready')) return\n else throw new Error(`Supervisor startWork is in unexpected state ${this.state}, aborting...`);\n }\n this.instantiateAllConnections();\n\n await this.createAndSaveSandboxes(this.maxWorkingSandboxes)\n .then(() => this.checkCapabilities());\n\n //console.log('startWork:1', Date.now() - this.lastTime); // SAVE\n\n // Beacon interval timer.\n this.progressReportTimer = setInterval(() => this.emitProgressReport(), (workerTuning.sandbox.progressReportInterval || 2 * 60 * 100));\n // Watchdog: requestTask-driven interval timer.\n this.watchdogTimer = setInterval(() => this.requestTask() , ms(this.tuning.watchdogInterval));\n if (DCP_ENV.platform === 'nodejs' && this.options.localExec)\n {\n /* Interval timer helps keep worker alive forever, which we don't want in localExec. */\n this.progressReportTimer.unref();\n this.watchdogTimer.unref();\n }\n\n this.state.set('initial', 'ready');\n\n //console.log('startWork:2', Date.now() - this.lastTime); // SAVE\n\n if (searchForWork)\n setImmediate(() => this.requestTask()); // Don't wait for watchdog.\n }\n catch(error)\n {\n this.state.set('initial', 'broken');\n this.emit('error', error);\n }\n });\n}\n\n/** Construct capabilities when necessary. */\nSupervisor.prototype.checkCapabilities = function Supervisor$checkCapabilities ()\n{\n if (!this.capabilities)\n {\n /**\n * Assign the capabilities of one the sandboxes before fetching slices from the scheduler.\n * @todo Remove this once fetchTask uses the capabilities of every sandbox to fetch slices.\n */\n const sandbox = this.readiedSandboxes.length > 0 ? this.readiedSandboxes[0] : this.sandboxInventory[0];\n if (sandbox)\n {\n this.capabilities = sandbox.capabilities;\n this.emit('capabilitiesCalculated', this.capabilities);\n }\n }\n\n if (DCP_ENV.isBrowserPlatform && this.capabilities.browser)\n this.capabilities.browser.chrome = DCP_ENV.isBrowserChrome;\n}\n\n// _Idx\n//\n// Important property-like functions.\n//\n\n/**\n * Universal delay milliseconds..\n * @returns {number}\n */\nSupervisor.prototype.delayMs = function Supervisor$delayMs (max = this.tuning.maxSandboxStartDelay, min = this.tuning.minSandboxStartDelay)\n{\n // Note: Please discuss any change with Paul, paul@kingsds.network.\n return 1000 * timeDilation * this.delayScaler * (min + Math.random() * (max - min));\n}\n\n/**\n * Indicates whether supervisor is ready for business.\n * @returns {boolean} - When true, the party is on...\n */\nSupervisor.prototype.isReady = function Supervisor$isReady()\n{\n return this.worker.working && this.state.is('ready');\n}\n\n/**\n * Safe access to Connection.close(...).\n * @param {string} name\n * @param {Connection} connection\n * @param {string} [reason='requested']\n * @param {boolean} [immediate=false]\n * @returns {string}\n */\nfunction safeCloseEx(name, connection, reason = 'requested', immediate = false)\n{\n if (connection)\n {\n let kind;\n if (connection.state.is('closed')) kind = 'closed';\n else if (connection.state.is('closing')) kind = 'closing';\n else if (connection.state.is('close-wait')) kind = 'close-wait';\n if (kind)\n {\n const msg = `${name} is ${kind}`;\n debuggingWarn && console.warn(`${msg}, do not try to close again.`);\n return msg;\n }\n else\n {\n debuggingWarn && console.warn(`${name} is being closed now.`);\n connection.close(reason, immediate);\n return null;\n }\n }\n return `${name} is null`;\n}\n\n/**\n * Safe access to Connection.close(...).\n * @param {Connection} connection\n * @param {string} [reason='requested']\n * @param {boolean} [immediate=false]\n * @returns {string}\n */\nfunction safeClose(connection, reason = 'requested', immediate = false)\n{\n return safeCloseEx('Connection', connection, reason, immediate);\n}\n\n/**\n * When true, the sandbox complete handler will look for another slice in the same job,\n * and if not found, then proceed to Supervisor.requestTask.\n * @returns {boolean}\n */\nSupervisor.prototype.runSliceFromSameJob = function Supervisor$runSliceFromSameJob()\n{\n //\n // Experimental, should be off by default.\n // Cf. sandbox complete handler in JobManager.hookUpSandboxListeners.\n //\n const disable = true;\n const tooManyJobs = this.activeJobCount() > this.maxWorkingSandboxes;\n return !disable && !tooManyJobs && this.unusedSandboxSlots() < 2;\n}\n\n/**\n * This function is used as the target number of sandboxes to be associated with slices and start working.\n * Warning: Do not rely on this information being 100% accurate -- it may change in the next instant.\n * @returns {number}\n */\nSupervisor.prototype.unusedSandboxSlots = function Supervisor$unusedSandboxSlots ()\n{\n return this.maxWorkingSandboxes - this.workingSliceCount();\n}\n\n/**\n * Create errorObj with error.code if it exists.\n * @param {Error} error\n * @returns {object}\n */\nSupervisor.prototype.checkCode = function Supervisor$checkCode (error)\n{\n if (!error) return '';\n const errorObj = { message: error.message };\n if (error['errorCode']) errorObj['errorCode'] = error['errorCode'];\n if (error['code']) errorObj['code'] = error['code'];\n return errorObj;\n}\n\n/**\n * Remove stack trace from error.\n * May not work when error is a string with no new-lines.\n * @param {string|Error} error\n * @returns {string|Error}\n */\nSupervisor.prototype.removeStackTrace = function Supervisor$removeStackTrace (error)\n{\n if (typeof error === 'string')\n {\n const errorLines = error.split('\\n');\n return (errorLines && errorLines.length > 0) ? errorLines[0] : error;\n }\n if (error instanceof Error)\n return error.message;\n return error;\n}\n\n// _Idx\n//\n// Dtors: screenSaverDestroy, stopWork, purgeJob.\n//\n\n/**\n * If we cannot create a new sandbox, that probably means we're on a screensaver worker\n * and the screensaver is down. So return the slices to the scheduler.\n */\nSupervisor.prototype.screenSaverDestroy = function Supervisor$screenSaverDestroy()\n{\n debugging('supervisor') && console.debug(`Supervisor.screenSaverDestroy: destroying all job managers and terminating all sandboxes.`);\n this.jobManagerInventory.forEach(jm => jm.destroy());\n this.jobManagerInventory = new Inventory('jobManagers');\n\n this.readiedSandboxes.forEach(sandbox => {\n if (!sandbox.isTerminated) sandbox.terminate(false);\n });\n this.readiedSandboxes = [];\n\n this.sandboxInventory.forEach(sandbox => {\n if (!sandbox.isTerminated) sandbox.terminate(false);\n });\n this.sandboxInventory = [];\n}\n\n/**\n * Terminates sandboxes and returns slices.\n * Sets the working flag to false, call @this.work to start working again.\n * \n * If forceTerminate is true: Terminates all sandboxes and returns all slices.\n * If forceTerminate is false: Terminates non-working sandboxes and returns initial and ready slices.\n *\n * @param {boolean} [forceTerminate = true] - true if you want to stop the sandboxes from completing their current slice.\n * @returns {Promise<void>}\n */\nSupervisor.prototype.stopWork = async function Supervisor$stopWork (forceTerminate = true)\n{\n selectiveDebugging && console.debug(`Supervisor.stopWork(${forceTerminate},${this.state}): terminating sandboxes and returning slices to scheduler.`);\n\n // Do a hard flush of the microtask queue and finish the current event loop.\n await new Promise((resolve) => setImmediate(() => setTimeout(resolve, 0)));\n\n if (this.state.setIf('ready', 'stopping')) {}\n else if (this.state.setIf('reconnecting', 'stopping')) {}\n else if (this.state.setIf('broken', 'stopping')) {}\n else if (this.state.is('stopped')) return\n else throw new Error(`Supervisor stopWork is in unexpected state ${this.state}, aborting...`);\n\n this.instantiateAllConnections();\n\n this.readiedSandboxes.forEach((sandbox) => {\n if (!sandbox.isTerminated) sandbox.terminate(false);\n });\n this.readiedSandboxes = [];\n\n if (forceTerminate)\n {\n for (const jm of this.jobManagerInventory)\n jm.destroy();\n\n this.sandboxInventory.forEach((sandbox) => {\n if (!sandbox.isTerminated) sandbox.terminate(false);\n });\n }\n else\n {\n let activeSliceCount = 0;\n const slicesToReturn = [];\n for (const jm of this.jobManagerInventory)\n {\n //jm.dumpSlices(`stopWork1:${jm.address}`);\n const queuedSlices = jm.queuedSlices;\n slicesToReturn.push(...queuedSlices);\n jm.removeSlices(queuedSlices);\n activeSliceCount += jm.activeSlices.length;\n jm.nonWorkingSandboxes.forEach((sandbox) => jm.returnSandbox(sandbox));\n }\n\n const reason = `Supervisor stopWork(${forceTerminate}): returning all non-finished slices that are not working.`;\n this.returnSlices(slicesToReturn, reason, false /*removeSlices*/);\n\n await new Promise((resolve) => {\n if (activeSliceCount === 0)\n resolve();\n // Resolve and finish work once all sandboxes have finished submitting their results.\n this.on('submitFinished', () => {\n if (--activeSliceCount === 0)\n {\n debuggingWarn && console.warn('All sandboxes empty, stopping worker and closing all connections');\n resolve();\n }\n });\n });\n\n for (const jm of this.jobManagerInventory)\n {\n //jm.dumpSlices(`stopWork2:${jm.address}`);\n jm.safeSandboxes.forEach((sandbox) => sandbox.terminate(false));\n jm._sandboxInventory = [];\n }\n }\n this.sandboxInventory = [];\n this.jobManagerInventory = new Inventory('jobManagers');\n\n this.closeConnections();\n\n this.emit('stop');\n this.state.set('stopping', 'stopped');\n}\n\n/**\n* Purge all traces of the job.\n* @param {JobManager} jobManager \n*/\nSupervisor.prototype.purgeJob = function Supervisor$purgeJob (jobManager)\n{\n selectiveDebugging && console.debug(`Supervisor.purgeJob ${jobManager.identifier}.`);\n this.jobManagerInventory.delete(jobManager);\n this.cache.removeJob(jobManager.address);\n jobManager.destroy();\n}\n\n// _Idx\n//\n// Connection code.\n//\n\n/** \n * Connect the supervisor to a given scheduler sub-service. Reconnection \"DDoS\" from workers\n * mitigated via an exponential backoff algorithm.\n *\n * DCPv4 connections are lazily-initiated. Successful connection establishment detected by\n * observing the payload event, which normally triggers during versioning.\n */\nSupervisor.prototype.connectTo = function Supervisor$connectTo(which)\n{\n const config = (which === 'packageManager') ? dcpConfig.packageManager : this.schedulerConfig.services[which];\n const retryMinSleepMs = 100;\n const payloadResetCount = 3; /* How many payloadCount before we reset retryNextSleepMs. */\n \n var retryNextSleepMs = retryMinSleepMs;\n var payloadCount = 0;\n var options = leafMerge(/* ordered from most to least specific */\n workerTuning.dcp.connectionOptions.default,\n workerTuning.dcp.connectionOptions[which],\n workerTuning.dcp.connectionOptions[config.location.href],\n );\n\n /**\n * The payload event handler is an interesting way to handle exponential backoff\n * for the delay in this.reconnect.\n * XXXpfr @todo Revisit exponential backoff for this.reconnect in Sup2 Part II.\n */\n\n const handlePayloadEventFn = (ev) =>\n {\n if (++payloadCount === payloadResetCount)\n {\n this[which].removeEventListener('payload', handlePayloadEventFn);\n retryNextSleepMs = retryMinSleepMs; \n }\n }\n\n this[which] = new dcp4.Connection(config, this.identity, options);\n\n debugging() && console.debug(`Opening new connection ${this[which].id} to ${which}.`)\n\n const messageQueue = this[which + 'MessageQueue'];\n if (messageQueue && messageQueue.length > 0)\n resendRejectedMessages(this[which], messageQueue);\n\n this[which].on('payload', handlePayloadEventFn);\n\n this['reconnect' + which] = this.reconnect.bind(this, which);\n this[which].on('close', () => { this['reconnect' + which](); });\n}\n\n/**\n * Reconnect logic.\n * @param {string} which -- Name of the connection \n */\nSupervisor.prototype.reconnect = async function Supervisor$reconnect(which)\n{\n debugging('supervisor') && console.debug(`Supervisor.reconnect: ${which}:`, this.state.valueOf(), this[which].state.valueOf());\n const stateChange = this.state.testAndSet('ready', 'reconnecting');\n if (stateChange || this.state.is('reconnecting'))\n {\n await a$sleepMs(100); // Sleep for 100ms , maybe exp-backoff later if we need it.\n this.connectTo(which);\n if (stateChange) this.state.set('reconnecting', 'ready');\n debugging() && console.debug(`Supervisor.reconnect: Trying to reconnect ${which}`, this.state.valueOf(), this[which].state.valueOf());\n }\n}\n\n/**\n * Close a connection properly.\n * @param {string} which -- Name of the connection to close.\n */\nSupervisor.prototype.closeConnection = function Supervisor$closeConnection(which)\n{\n if (this[which])\n {\n this[which].off('close', this['reconnect' + which]);\n this[which].close();\n this[which] = null;\n }\n}\n\n/**\n * Close all connections.\n */\nSupervisor.prototype.closeConnections = function Supervisor$closeConnections()\n{\n /** XXXpfr @todo Remove when bug DCP-3094 is fixed. */\n a$sleepMs(500);\n\n this.closeConnection('taskDistributor');\n this.closeConnection('resultSubmitter');\n this.closeConnection('eventRouter');\n this.closeConnection('packageManager');\n}\n\n/**\n * Broadcast keepalive to all connections and optionally start up sandboxes.\n * @param {boolean} [createSandbox=false] - When true, creates sandboxes for unused sandbox slots.\n */\nSupervisor.prototype.keepalive = function Supervisor$keepalive(createSandbox = false)\n{\n return Promise.all([\n this.taskDistributor.keepalive(),\n this.resultSubmitter.keepalive(),\n this.eventRouter .keepalive(),\n this.packageManager .keepalive(),\n (createSandbox ? this.createAndSaveSandboxes(this.maxWorkingSandboxes) : Promise.resolve()),\n ]);\n}\n\n/**\n * Open all connections. Used when supervisor is instantiated or stopped/started to initially open connections.\n */\nSupervisor.prototype.instantiateAllConnections = function Supervisor$instantiateAllConnections ()\n{\n if (!this.taskDistributor)\n this.connectTo('taskDistributor');\n if (!this.eventRouter)\n this.connectTo('eventRouter');\n if (!this.resultSubmitter)\n this.connectTo('resultSubmitter');\n if (!this.packageManager)\n this.connectTo('packageManager');\n}\n\n/**\n * Try sending messages that were rejected on an old instance of the given connection.\n * @param {Connection} connection\n * @param {Array<object>} messageQueue\n */\nasync function resendRejectedMessages (connection, messageQueue)\n{\n var message = messageQueue.shift();\n do \n {\n try\n {\n await connection.send(message.operation, message.data);\n }\n catch (error)\n {\n debuggingError && console.error(`Failed to resend message ${message.operation} to scheduler: ${error}. Will try again on a new connection.`);\n messageQueue.unshift(message);\n safeClose(connection);\n break;\n }\n message = messageQueue.shift();\n } while (message);\n}\n \n// _Idx\n//\n// Work: Distribute slice to sandboxes.\n//\n\n/** \n * UNUSED\n * @deprecated\n * Round-robin through the job managers, picking 1 slice to run each time.\n * Try to have the same number of working sandboxes for each job.\n * Try to run a slice on every available sandbox.\n */\nSupervisor.prototype.workOnCurrentTask = function Supervisor$workOnCurrentTask ()\n{\n return this.roundRobinSlices();\n}\n\n/**\n * This is for compatibility with Supervisor1 in sa worker.\n * When we get rid of Supervisor1 we can delete the ref in sa worker and then get rid of this function.\n */\nSupervisor.prototype.setDefaultIdentityKeystore = function Supervisor$setDefaultIdentityKeystore ()\n{\n}\n\n/**\n * Called in Worker.start().\n * Initial entry point after Worker constructor.\n * We need to start searching for work here to allow starting and stopping a worker.\n */\nSupervisor.prototype.work = function Supervisor$work ()\n{\n // Set up sandboxes and interval timers, then start to search for work.\n this.startWork();\n}\n\n/** \n * Round-robin through the job managers, picking 1 slice to run each time.\n * Try to have the same number of working sandboxes for each job.\n * Try to run a slice on every available sandbox.\n */\nSupervisor.prototype.roundRobinSlices2 = function Supervisor$roundRobinSlices2 ()\n{\n try\n {\n let first = true;\n const cursor = this.makeJobSelectionCursor(this.jobManagerInventory);\n while (true)\n {\n const slice = cursor.next();\n if (!slice) break; /* No more work can fit. */\n debugging('supervisor') && console.debug('roundRobinSlices: Executing slice', slice.identifier);\n slice.markAsReserved();\n slice.jobManager.runSlice(slice, first ? 0 : this.delayMs());\n first = false;\n }\n }\n finally\n {\n this.isFetchingNewWork = false;\n }\n}\n\n/**\n * We try to balance so that each job has the same number of working slices.\n *\n * NOTES:\n * 1) If count is such that it cannot be distributed evenly, we do not yet randomly assign the extras.\n * E.g. 3 jobs, j1, j2, j3: count = 5 -- so 2 jobs get extras -- the extras need to be assigned randomly.\n * @todo Assign the extras randomly.\n * 2) this.roundRobinSlices is not going to be what we use when sup2 is in final form.\n * We want to this.makeJobSelectionCursor and something like this.roundRobinSlices2\n *\n * In the outer loop,\n * when numworkingSandboxes=1, try to get a slice running for each job with 0 working sandboxes.\n * when numworkingSandboxes=2, try to get a slice running for each job with 1 working sandboxes.\n * when numworkingSandboxes=3, try to get a slice running for each job with 2 working sandboxes. Etc.\n * The idea is to balance the number of slices working on each job.\n * @param {number} [count=0] - The number of new slices to try to get running in sandboxes.\n */\nSupervisor.prototype.roundRobinSlices = function Supervisor$roundRobinSlices (count = 0)\n{\n try\n {\n if (!count) count = this.unusedSandboxSlots();\n if (!count || this.readySliceCount() < 1)\n return Promise.resolve();\n\n const slices = [];\n let numScheduled = 0\n let readyJobManagers = this.jobManagerInventory.filter(jm => jm.ready);\n let first = true;\n selectiveDebugging && console.debug('roundRobinSlices: START count', count, 'numJobMgrs', readyJobManagers.length, 'totalWorking(w/r/wo/wsbx/sbx)', this.workingSliceCount(), this.reservedSliceCount(), this.workingSliceOnlyCount(), this.workingSandboxCount(), this.sandboxCount());\n\n for (let numWorkingSandboxes = 1; numWorkingSandboxes <= this.maxWorkingSandboxes; numWorkingSandboxes++)\n {\n let sliceCount = 0;\n const beginNumScheduled = numScheduled;\n for (const jobMan of readyJobManagers)\n {\n const _readySlices = jobMan.readySlices;\n sliceCount += _readySlices.length\n const skip = numWorkingSandboxes <= jobMan.workingSlices.length;\n\n if (skip || _readySlices.length < 1)\n {\n // Noisy log message turned off by default.\n //debugging('supervisor') && console.debug('RRS0(numS, beginNumS, count, sliceCount, skip, _ready, numWorkingS(loop), workingSlices):', numScheduled, beginNumScheduled, count, sliceCount, skip, _readySlices.length, numWorkingSandboxes, jobMan.workingSlices.length);\n continue;\n }\n\n const slice = _readySlices[0];\n slices.push(slice);\n\n slice.markAsReserved();\n jobMan.runSlice(slice, first ? 0 : this.delayMs());\n\n first = false;\n if (++numScheduled >= count)\n break;\n }\n if (numScheduled >= count)\n {\n debugging('supervisor') && console.debug('RRS1(numS, beginNumS, count, sliceCount):', numScheduled, beginNumScheduled, count, sliceCount);\n break;\n }\n if (beginNumScheduled === numScheduled && sliceCount < 1)\n {\n debugging('supervisor') && console.debug('RRS2(numS, beginNumS, count, sliceCount):', numScheduled, beginNumScheduled, count, sliceCount);\n break;\n }\n }\n if (selectiveDebugging)\n {\n console.debug(`roundRobinSlices(working:(w/r/wo/wsbx/sbx)${this.workingSliceCount()},${this.reservedSliceCount()},${this.workingSliceOnlyCount()},${this.workingSandboxCount()},${this.sandboxCount()}): Started ${slices.length}/${numScheduled}/${count} scheduled slices`, compressSlices(slices));\n const sliceGrouper = {};\n slices.forEach((slice) => {\n if (!sliceGrouper[slice.jobAddress]) sliceGrouper[slice.jobAddress] = { cnt: 1, working: slice.jobManager.workingSlices.length, queued: slice.jobManager.queuedSlices.length, assigned: slice.jobManager.assignedSandboxes.length, estimation: slice.jobManager.isEstimation };\n else sliceGrouper[slice.jobAddress].cnt++;\n });\n console.debug(sliceGrouper);\n }\n if (selectiveDebugging2)\n {\n const jobGrouper = {};\n this.jobManagerInventory.forEach((jm) => {\n jobGrouper[jm.address] = { working: jm.workingSlices.length, queued: jm.queuedSlices.length, assigned: jm.assignedSandboxes.length, estimation: jm.isEstimation };\n });\n console.debug(jobGrouper);\n }\n }\n finally\n {\n this.isFetchingNewWork = false;\n }\n}\n\n/**\n * @callback cbNext\n * @returns {Slice}\n */\n\n/** \n * Factory function which instantiates a JobSelectionCursor. A JobSelectionCursor\n * steps the order that job slices should be selected for execution in the supervisor,\n * given the current state of the supervisor and the availability of jobs when the\n * inventory was snapshot. The entire slice scheduling algorithm is represented by\n * this cursor.\n *\n * The basic idea behind the scheduling of slices in this implementation is to keep as\n * many slices from different jobs running as possible, so as to reduce the likelihood\n * of resource contention between sandboxes.\n *\n * Slices are scheduled in here based on the following ruleset:\n * - pick a slice from the longest job that isn't running\n * - choose a slice from the remaining jobs, in order for shortest to longest slice time\n * - if there are any jobs which are nearly finished, every other slice comes from one\n * of these jobs, selected randomly????? <-- NYI. XXXpfr @todo Think about implementing...\n * - jobs which have slicePriority set by the task-distributor may be chosen in place\n * of slices in the above algorith. Jobs with a slicePriority closer to 1 are more likely\n * to exhibit this behaviour.\n * @param {JobManager[]} jobManagerInventory\n * @returns { { next: cbNext } }\n */\nSupervisor.prototype.makeJobSelectionCursor = function Supervisor$JobSelectionCursor (jobManagerInventory)\n{\n /* Variables in this scope function as state information for next() */\n var jobManIdx = 0;\n /** @type {JobManager[]} - All job managers that are ready that have at least one ready slice. */\n var jobManList;\n /** @type {JobManager[]} - All jobManList elements that correspond to preferred jobs. */\n var prefJobManList;\n /* Number of working sandboxes allowed for a given job. */\n var concurrency = 1;\n const that = this;\n \n function seed (_concurrency)\n {\n function countSandboxes(jobAddress)\n {\n const jobManager = that.jobMap[jobAddress];\n if (!jobManager) throw new Error(`Lost track of job manager for address '${jobAddress}'`);\n return jobManager.workingSlices.length;\n }\n \n // Reset.\n jobManIdx = 0;\n\n /* Populate jobManList with jobs which are ready and have at least one slice which is ready. */\n jobManList = jobManagerInventory.filter((jobMan) => jobMan.ready && jobMan.readySlices.length > 0);\n\n /* Populate jobManList with jobManagers whose # of working slices is less than _concurrency. */\n jobManList = jobManList.filter((jobMan) => countSandboxes(jobMan.address) < _concurrency);\n\n /* Increasing sort. */\n jobManList.sort((a,b) => a.estimateWallMs - b.estimateWallMs);\n\n /* Populate prefJobManList with jobs from jobManList which also have a slicePreference set. */\n prefJobManList = jobManList.filter(jobMan => jobMan.hasOwnProperty('slicePreference') );\n }\n\n /**\n * Takes slices off the ready list, marks it reserved and increments workingSandboxCoun,\n * because the slice will soon be working.\n * @param {JobManager} jobMan \n * @returns {Slice}\n */\n function reserveSlice (jobMan)\n {\n const _readySlices = jobMan.readySlices;\n if (_readySlices.length > 0)\n {\n const slice = _readySlices[0];\n slice.markAsReserved();\n return slice;\n }\n return null;\n }\n\n /**\n * Each invocation of next() identifies one slice to run, or returns false if none can run.\n * @returns {Slice}\n */\n function next ()\n {\n /* Adjust order to schedule the heaviest job's first slice asap. */\n jobManList.unshift(jobManList.pop());\n\n let workingSliceCount;\n do\n {\n seed(concurrency);\n\n /* Maybe schedule a prefered job slice based on random chance .*/\n if (prefJobManList.length > 0)\n {\n let prioRan = Math.random();\n let list = prefJobManList.filter(jm => jm['slicePreference'] >= prioRan);\n\n if (list.length > 0)\n {\n const jobMan = list[list.length * Math.random()];\n const slice = reserveSlice(jobMan);\n if (slice)\n return slice;\n }\n }\n\n /* Schedule a slice from next job; jobs are in increasing order of estimated run time. */\n while (jobManIdx < jobManList.length)\n {\n const jobMan = jobManList[jobManIdx];\n jobManIdx++;\n const slice = reserveSlice(jobMan);\n if (slice)\n return slice;\n }\n\n /* If this is reached, we did not schedule a slice with current seed. We need\n * to re-seed to look for newly-available work and sandboxes, ratcheting up the\n * concurrency (max # of each job running) until we find something we can do.\n */\n workingSliceCount = that.workingSliceCount();\n debugging() && console.debug(`job selection - no slice found from ${jobManList.length} jobs for concurrency=${concurrency} and ${workingSliceCount} working sandboxes`);\n } while (jobManList.length > 0 && workingSliceCount < that.maxWorkingSandboxes && concurrency++ < that.maxWorkingSandboxes);\n\n return false; /* Did not find any more work that fits. */\n }\n \n return { next };\n}\n\n/**\n * Handle sandbox.work(slice) errors.\n * @param {Sandbox} sandbox \n * @param {Slice} slice \n * @param {Error} error \n * @return {Promise<string>}\n */\nSupervisor.prototype.handleSandboxWorkError = async function Supervisor$handleSandboxWorkError (sandbox, slice, error)\n{\n if (!slice.isWorking) // Sanity. Exception should never fire.\n throw new Error(`handleSandboxWorkError: slice ${slice.identifier} must be WORKING.`);\n\n let logLevel, reason;\n \n if (error instanceof SandboxError)\n {\n logLevel = 'warn';\n reason = error.errorCode;\n // The message and stack properties of error objects are not enumerable,\n // so they have to be copied into a plain object this way\n // @ts-ignore\n error = Object.getOwnPropertyNames(error).reduce((o, p) => {\n o[p] = error[p]; return o;\n }, { message: 'Unexpected worker error' });\n }\n else\n {\n logLevel = 'error';\n if (error)\n reason = `Slice ${slice.sliceNumber} in state ${slice.state} of job ${slice.jobAddress} failed to complete execution with error ${this.checkCode(error)}.`;\n else\n {\n reason = `Slice ${slice.sliceNumber} in state ${slice.state} of job ${slice.jobAddress} failed to complete execution.`;\n error = new Error(reason);\n }\n // This error was unrelated to the work being done, so just return the slice\n // in the promise.catch in JobManager.runSliceOnSandbox .\n assert(slice.result === null);\n }\n \n this.handleFailedSlice(slice, error);\n\n let errorString;\n switch (reason)\n {\n case 'ENOPROGRESS':\n errorString = 'No progress error in sandbox.\\n';\n break;\n case 'ESLICETOOSLOW':\n errorString = 'Slice too slow error in sandbox.\\n';\n break;\n case 'EUNCAUGHT':\n errorString = `Uncaught error in sandbox ${error.message}.\\n`;\n break;\n case 'EFETCH':\n // The status.js processing does not have a case for 'EFETCH' yet.\n errorString = `Could not fetch data: ${error.message}.\\n`;\n break;\n }\n\n // Always display max info under debug builds, otherwise maximal error.\n // messages are displayed to the worker, only if both worker and client agree.\n const displayMaxInfo = slice.jobManager.displayMaxDiagInfo;\n\n const errorObject = {\n jobAddress: truncateAddress(slice.jobAddress, addressTruncationLength),\n sliceNumber: slice.sliceNumber,\n sandbox: sandbox.id,\n jobName: sandbox.public ? sandbox.public.name : 'unnamed',\n };\n if (displayMaxInfo)\n errorObject.stack += '\\n --------------------\\n' + (error.stack.split('\\n').slice(1).join('\\n'));\n\n if (error.name === 'EWORKREJECT')\n {\n reason = 'EWORKREJECT'; // The status.js processing does not have a case for 'EWORKREJECT' yet.\n error.stack = 'Sandbox was terminated by work.reject()';\n await this.handleWorkReject(sandbox, slice, error.message);\n }\n else\n {\n this.returnSlice(slice, reason);\n slice.jobManager.returnSandbox(sandbox);\n }\n\n if (errorString)\n console[logLevel](errorString, errorObject);\n else if (error.name === 'EWORKREJECT')\n console[logLevel](`Slice rejected work: ${error.message}`)\n else\n console[logLevel](`Slice failed: ${error.message}\\n`, errorObject);\n\n return reason;\n}\n\n/**\n * Slice has thrown error during execution:\n * Mark slice as failed, compensate when job is dicrete, emit events.\n * @param {Slice} slice\n * @param {Error} error\n */\nSupervisor.prototype.handleFailedSlice = function Supervisor$handleFailedSlice (slice, error)\n{\n assert(error, 'error must be valid');\n slice.collectResult(error, false);\n\n // If the slice from a job never completes and the job address exists in the ringBufferofJobs,\n // then we remove it to allow for another slice (from the same job) to be obtained by fetchTask\n this.ringBufferofJobs.buf = this.ringBufferofJobs.filter(element => element !== slice.jobAddress);\n\n this.emit('submitSliceFailed', error);\n this.emit('submitFinished');\n}\n\n// _Idx\n//\n// Return slices and sent progress reports to result-submitter-results.\n// Return slices to result-submitter-status which marks the\n// corresponding row in activeSlices to be rescheduled on usually another worker.\n//\n\n/**\n * Bulk-return multiple slices, possibly for assorted jobs.\n * Returns slices to the scheduler to be redistributed.\n * Called in the sandbox terminate handler and purgeAllWork(jobAddress)\n * and stopWork(forceTerminate).\n *\n * @param {Slice[]} slices - The slices to return to the scheduler.\n * @param {string} [reason='unknown'] - Optional reason for the return: 'ENOPROGRESS', 'EUNCAUGHT', 'ESLICETOOSLOW', 'unknown'.\n * @param {boolean} [removeSlices=true] - When true, removes slices from this.sliceInventory .\n * @returns {Promise<*>} - Response from the scheduler.\n */\nSupervisor.prototype.returnSlices = function Supervisor$$returnSlices (slices, reason = 'unknown', removeSlices = true)\n{\n if (!slices || !slices.length) return Promise.resolve();\n debugging('supervisor') && console.debug('Supervisor.returnSlices: Returning slices', slices.map(slice => slice.identifier));\n\n const slicePayload = constructReturnSliceBuckets(slices, reason);\n if (removeSlices) slices.forEach((slice) => slice.jobManager.removeSlice(slice));\n\n try\n {\n return this.resultSubmitter.send('status', {\n worker: this.workerId,\n slices: slicePayload,\n }).catch(error => {\n const errorInfo = slices.map(slice => slice.identifier).sort();\n debuggingError && console.error('Failed to return slice(s)', { errorInfo, error }, 'Will try again on new connection.');\n this.resultSubmitterMessageQueue.push({ operation: 'status', data: { worker: this.workerId, slices: slicePayload } })\n safeClose(this.resultSubmitter);\n });\n }\n catch (error)\n {\n /* resultSubmitter can be null if worker is stopped */\n debuggingError && console.error(`Failed to return slices ${compressSlices(slices)}, no connection to result submitter:`, error);\n }\n}\n\n/** XXXpfr @todo TEMP -- Remove when sup2 replaces sup1 */\nfunction compressSlices(sliceArray)\n{\n const jobSliceMap = toJobMap(sliceArray, slice => slice.sliceNumber);\n return compressJobMap(jobSliceMap, false /* skipFirst*/, addressTruncationLength);\n}\n\n/**\n * Takes a slice and returns it to the scheduler to be redistributed.\n * Usually called when an exception is thrown by sandbox.work(slice, startDelayMs) .\n * Or when the supervisor tells it to forcibly stop working.\n *\n * @param {Slice} slice - The slice to return to the scheduler.\n * @param {string} [reason] - Optional reason for the return: 'ENOPROGRESS', 'EUNCAUGHT', 'ESLICETOOSLOW', 'unknown'.\n * @returns {Promise<*>} - Response from the scheduler.\n */\nSupervisor.prototype.returnSlice = function Supervisor$$returnSlice (slice, reason)\n{\n assert(slice.sliceNumber > 0 && slice.jobManager);\n debugging() && console.debug(`Supervisor.returnSlice: Returning slice ${slice.identifier} with reason ${reason}.`);\n\n if (!this.resultSubmitter)\n this.connectTo('resultSubmitter');\n\n try\n {\n slice.jobManager.removeSlice(slice);\n const payload = slice.getReturnMessagePayload(this.workerId, reason);\n return this.resultSubmitter.send('status', payload)\n .catch(error => {\n debuggingError && console.error('Failed to return slice', {\n sliceNumber: slice.sliceNumber,\n jobAddress: slice.jobAddress,\n status: slice.state.valueOf(),\n error,\n }, 'Will try again on a new connection.');\n this.resultSubmitterMessageQueue.push({ operation: 'status', data: payload });\n safeClose(this.resultSubmitter);\n });\n }\n catch (error)\n {\n /* resultSubmitter can be null if worker is stopped */\n debuggingError && console.error(`Failed to return slice ${slice.identifier}, no connection to result submitter:`, error);\n }\n}\n\n/**\n * Send beacon to status.js for 'progress' and sliceStatus.scheduled.\n *\n * Run in an interval created in @constructor .\n * @returns {Promise<void|Response>}\n */\nSupervisor.prototype.emitProgressReport = function emitProgressReport () \n{\n const slices = constructSliceBuckets( this.readySlices(), sliceStatus.scheduled );\n constructSliceBuckets( this.workingSlices(), 'progress', slices );\n\n debugging('supervisor') && console.debug('emitProgressReport:', stringify(slices));\n\n if (slices.length)\n {\n const progressReportPayload = {\n worker: this.workerId,\n slices,\n };\n\n try\n {\n return this.resultSubmitter.send('status', progressReportPayload)\n .catch(error => {\n debuggingError && console.error('479: Failed to send status beacon update:', error/*.message*/);\n this.resultSubmitterMessageQueue.push({ operation: 'status', data: progressReportPayload })\n safeClose(this.resultSubmitter);\n });\n }\n catch (error) \n {\n /* resultSubmitter can be null if worker is stopped */\n debuggingError && console.error(`Failed to emit progress report, no connection to result submitter:`, error);\n }\n }\n}\n\n/**\n * Add a slice to the slice buckets being built. If a sliceBucket already exists for the\n * job-status-authMessage tuple, then the slice will be added to that, otherwise a new\n * sliceBucket will be added to the buckets.\n *\n * @param {Slice[]} slices - The slices.\n * @param {String} status - Status update, eg. progress or scheduled.\n * @param {Object[]} [sliceBuckets] - Slice buckets being built. Will be mutated in place.\n * @returns {Object[]} - mutated sliceBuckets array\n */\nfunction constructSliceBuckets (slices, status, sliceBuckets)\n{\n const jobMap = {};\n for (const slice of slices)\n {\n assert(slice.sliceNumber > 0 );\n if (!jobMap[slice.jobAddress]) jobMap[slice.jobAddress] = [];\n\n // Try to find a sliceBucket in the buckets which matches the job, status, and auth message.\n let sliceBucket = jobMap[slice.jobAddress].find(desc => {\n return desc.status === status\n && desc.authorizationMessage === slice.authorizationMessage;\n });\n\n if (!sliceBucket) jobMap[slice.jobAddress].push(slice.getMessage(status));\n else sliceBucket.sliceNumbers.push(slice.sliceNumber);\n }\n\n if (!sliceBuckets) return Object.values(jobMap);\n sliceBuckets.push(...Object.values(jobMap));\n return sliceBuckets;\n}\n \n/**\n * Add a slice to the returnSlice bucket being built. If a sliceBucket already exists for the\n * job-isEstimation-authMessage-reason tuple, then the slice will be added to that, otherwise a new\n * sliceBucket will be added to the buckets.\n *\n * @param {Slice[]} slices - The slices.\n * @param {String} [reason] - Optional reason to further characterize status; e.g. 'ENOPROGRESS', 'EUNCAUGHT', 'ESLICETOOSLOW', 'unknown'.\n * @param {Object[]} [sliceBuckets] - Optional slice buckets being built. Will be mutated in place.\n * @returns {Object[]} - mutated sliceBuckets array\n */\nfunction constructReturnSliceBuckets (slices, reason, sliceBuckets)\n{\n const jobMap = {};\n for (const slice of slices)\n {\n assert(slice.sliceNumber > 0 );\n if (!jobMap[slice.jobAddress]) jobMap[slice.jobAddress] = [];\n \n // Try to find a sliceBucket in the buckets which matches the job, estimation status, reason, and auth message.\n let sliceBucket = jobMap[slice.jobAddress].find(desc => {\n return desc.isEstimationSlice === slice.isEstimation\n && desc.authorizationMessage === slice.authorizationMessage\n && desc.reason === reason;\n });\n\n if (!sliceBucket) \n jobMap[slice.jobAddress].push(slice.getMessage('return', { isEstimationSlice: slice.isEstimation, reason }));\n else sliceBucket.sliceNumbers.push(slice.sliceNumber);\n }\n\n if (!sliceBuckets) return Object.values(jobMap);\n sliceBuckets.push(...Object.values(jobMap));\n return sliceBuckets;\n}\n \n// _Idx\n//\n// Task Distributor (TD): requestTask (Rq) support -- communication with TD.\n//\n\n/**\n * XXXpfr @todo Needs Work\n * For a given job, the scheduler stores an EMA approximation of average slice completion time in\n * jobPerfData.sliceCPUTime (and jobPerfData.sliceGPUTime, but we don't do the GPU analysis yet.)\n * However, each worker also tracks the same information and the ratio of local-info to scheduler-info\n * is returned by this.conversionQuantum so we can tell the task distributor how much work to return\n * from requestTask so that the work actually takes 5 minutes to complete when using all the worker sandboxes.\n * Note: \n * We average the completion times over the current jobs.\n * Define completion time in terms of sliceC(G)PUTime and sliceC(G)PUDensity\n * completion-time = (sliceCGPUTime + sliceCGPUTime) / ( sliceCPUDensity + sliceGPUDensity);\n * The local completion time is an EMA approximation of local completion-time as computed by Supervisor.recordResult.\n * The scheduler completion-time is computed directly from the corresponding row in jobPerfData.\n */\nSupervisor.prototype.conversionQuantum = function Supervisor$conversionQuantum()\n{\n let globalSpeed = 0, localSpeed = 0;\n for (const jobMan of this.jobManagerInventory)\n {\n const _globalTime = jobMan.globalTime;\n const _localTime = jobMan.statistics.ema;\n if (_globalTime > 0 && _localTime > 0)\n {\n globalSpeed += _globalTime;\n localSpeed += _localTime;\n }\n }\n const conversion = globalSpeed > 0 ? localSpeed / globalSpeed : 1;\n return Math.min(Math.max(conversion, 0.2), 5.0); // Truncate if conversion is too bizarre.\n}\n\n/**\n * Remove all unreferenced jobs in this.jobManagerInventory and this.cache.\n * Since job-managers are inserted into this.jobManagerInventory with a push, the job managers at the beginning are oldest.\n * Only delete #deleteCount of the oldest job-managers:\n * let deleteCount = this.jobManagerInventory.length - cachedJobsThreshold;\n * Edit cachedJobsThreshold to adjust the cache cleanup threshold.\n * @param {object[]} [newJobKeys=[]] - Jobs that should not be removed from this.jobManagerInventory and this.cache.\n */\nSupervisor.prototype.clearUnusedJobManagersAndModuleCache = function Supervisor$clearUnusedJobManagersAndModuleCache(newJobKeys=[])\n{\n let deleteCount = this.jobManagerInventory.length - this.cachedJobsThreshold;\n if (deleteCount > 0)\n {\n selectiveDebugging && console.debug(`Supervisor.clearUnusedJobManagersAndModuleCache START: deleteCount ${deleteCount}/${this.jobManagerInventory.length}/${this.cachedJobsThreshold}.`);\n const jobMap = {};\n newJobKeys.forEach(jobAddress => { jobMap[jobAddress] = 1; });\n for (const jobManager of this.jobManagerInventory)\n {\n if (!jobMap[jobManager.address])\n {\n const sliceInventory = jobManager.sliceInventory.filter(slice => slice.isActive || slice.isQueued);\n if (sliceInventory.length < 1)\n {\n this.purgeJob(jobManager);\n if (--deleteCount < 1)\n break;\n }\n }\n }\n selectiveDebugging && console.debug(`Supervisor.clearUnusedJobManagersAndModuleCache FINISH: deleteCount ${deleteCount}/${this.jobManagerInventory.length}/${this.cachedJobsThreshold}.`);\n }\n}\n\n/**\n * Ask the scheduler (task distributor) for work.\n * @param {number} [unusedSandboxSlots]\n * @param {object[]} [jobs=[]] \n */\nSupervisor.prototype.requestTask = function Supervisor$requestTask (unusedSandboxSlots, jobs = [])\n{\n if (!this.isReady() || this.isFetchingNewWork)\n return Promise.resolve();\n\n if(!unusedSandboxSlots) unusedSandboxSlots = this.unusedSandboxSlots();\n if (unusedSandboxSlots < 1)\n {\n debugging('supervisor') && console.debug('requestTask: There are no unused sandbox slots.');\n return Promise.resolve();\n }\n\n // Refresh connections.\n this.instantiateAllConnections();\n\n // We prune for over this.maxTotalSandboxes about every 15 seconds, or when must prune level is reached.\n if (this.sandboxCount() > this.mustPruneSandboxLevel || Date.now() > this.lastPrune + this.pruneFrequency)\n {\n this.lastPrune = Date.now();\n this.pruneSandboxes();\n }\n\n try\n {\n this.isFetchingNewWork = true;\n const numCPUSlotToFill = this.numberOfAvailableSandboxSlots(unusedSandboxSlots);\n if (numCPUSlotToFill < 1)\n {\n //debugging() && console.debug('Predicted workload too high; not fetching additional work yet'); <-- Save Wes' msg...\n debugging() && console.debug('Supervisor.requestTask: We have enough, so start executing some slices.');\n return this.roundRobinSlices(); // roundRobinSlices guarantees this.isFetchingNewWork === false\n }\n\n /** XXXpfr @todo Get together with Wes to figure this out. */\n //let predictedLoad = this.predictLoad(Date.now() + ms(this.tuning.prefetchInterval)).load;\n\n const request = {\n numCores: numCPUSlotToFill,\n coreStats: this.getStatisticsCPU(),\n numGPUs: this.numGPU,\n //targetLoad: this.targetLoad.subtract(predictedLoad), /** XXXpfr @todo Get together with Wes to figure this out. */\n conversionQuantum: this.conversionQuantum(),\n capabilities: this.capabilities,\n paymentAddress: this.paymentAddress,\n jobAddresses: jobs.concat(this.options.jobAddresses || []), // When set, only fetches slices for these jobs.\n localExec: this.options.localExec,\n workerComputeGroups: this.generateWorkerComputeGroups(),\n minimumWage: workerTuning.minimumWage || this.options.minimumWage,\n loadedJobs: this.jobManagerInventory.map(jobMan => jobMan.address),\n readyJobs: this.jobManagerInventory.filter(jobMan => jobMan.ready).map(jobMan => jobMan.address),\n previouslyWorkedJobs: this.ringBufferofJobs.buf, // Only discrete jobs.\n rejectedJobs: this.rejectedJobs,\n };\n // Workers should be part of the public compute group by default.\n if (!booley(workerTuning.leavePublicGroup) && !booley(this.options.leavePublicGroup))\n request.workerComputeGroups.push(constants.computeGroups.public);\n\n // Call Task Distributor and handle response with this.addTaskToWorkload.\n this.fetchTask(request, (response) => this.addTaskToWorkload(request, response));\n }\n catch (error)\n {\n // Paranoid double-checking we don't accidently leave a live this.isFetchingNewWork.\n this.isFetchingNewWork = false;\n throw error;\n }\n}\n\n/** Gets the logical and physical number of cores and also the total number of sandboxes the worker is allowed to run. */\nSupervisor.prototype.getStatisticsCPU = function Supervisor$getStatisticsCPU ()\n{\n if (DCP_ENV.isBrowserPlatform)\n {\n return {\n worker: this.workerId,\n lCores: window.navigator.hardwareConcurrency,\n pCores: workerTuning.pCores || window.navigator.hardwareConcurrency,\n sandbox: this.maxWorkingSandboxes,\n }\n }\n\n return {\n worker: this.workerId,\n lCores: requireNative('os').cpus().length,\n pCores: requireNative('physical-cpu-count'),\n sandbox: this.maxWorkingSandboxes,\n }\n}\n\n/**\n * Callback for fetchTask.\n * @param {object} request \n * @param {object} response\n */\nSupervisor.prototype.addTaskToWorkload = function Supervisor$addTaskToWorkload (request, response)\n{\n try\n {\n const payload = response.payload;\n if (!response.success)\n {\n debugging() && console.debug('Task fetch failure; request=', request);\n debugging() && console.debug('Task fetch failure; response=', payload);\n // Only report errors when in 'ready' state.\n if (this.isReady()) throw new DCPError('Unable to fetch task for worker', payload);\n else return;\n }\n\n const sliceCount = payload.body.task.length || 0;\n if (sliceCount < 1)\n {\n // Display completed results so far.\n if (selectiveDebugging && this.queuedSliceCount() < 1)\n {\n const values = Object.values(this.resultMap);\n if (values.length > 0)\n {\n values.forEach((descriptor) => descriptor.slices.sort((x,y) => x-y))\n console.debug(`Recorded results: job managers ${values.length}:`, this.resultMap);\n }\n }\n this.emit('fetchedTask', { jobs: 0, slices: sliceCount });\n // There may be an extra slice to process.\n // roundRobinSlices guarantees this.isFetchingNewWork === false;\n return this.roundRobinSlices();\n }\n\n //console.log('addTaskToWorkload:3', Date.now() - this.lastTime); // SAVE\n\n /**\n * payload structure: { owner: this.address, signature: signature, auth: messageLightWeight, body: messageBody };\n * messageLightWeight: { workerId: worker, jobSlices, schedulerId, jobCommissions }\n * messageBody: { newJobs: await-getNewJobsForTask(dbScheduler, task, request), task }\n */\n const { body, ...authorizationMessage } = payload;\n /** @type {{ newJobs: object, task: SliceMessage[] }} */\n const { newJobs, task } = body;\n assert(newJobs); // It should not be possible to have !newJobs -- we throw on !success.\n const newJobKeys = Object.keys(newJobs);\n const jobCount = newJobKeys.length;\n\n /*\n * Ensure all jobs received from the scheduler (task distributor) are:\n * 1. If we have specified specific jobs the worker may work on, the received jobs are in the specified job list\n * 2. If we are in localExec, at most 1 unique job type was received (since localExec workers are designated for only one job)\n * If the received jobs are not within these parameters, stop the worker since the scheduler cannot be trusted at that point.\n */\n if (request.jobAddresses.length > 0 && !newJobKeys.every((ele) => request.jobAddresses.includes(ele))\n || request.localExec && jobCount > 1)\n {\n // Sup1 did not guard this diag.\n console.error(\"Worker received slices it shouldn't have. Rejecting the work and stopping.\");\n process.exit(1);\n }\n\n selectiveDebugging && console.debug(`Supervisor.addTaskToWorkload: task: ${task.length}/${this.maxWorkingSandboxes}, conversion: ${request.conversionQuantum}, jobs: ${jobCount}, authSlices: ${compressJobMap(authorizationMessage.auth.authSlices, true /* skipFirst*/, addressTruncationLength /* digits*/)}`);\n\n //console.log('addTaskToWorkload:4', Date.now() - this.lastTime); // SAVE\n\n // Clear out job managers w/o any queued slices,\n // and remove corresponding job references from module cache.\n // When a cached module no longer has any job references it is removed from the cache.\n this.clearUnusedJobManagersAndModuleCache(newJobKeys);\n\n // this.jobMap: job.address --> jobManager\n /** @type {Object.<Address, JobManager>} */\n this.jobMap = {};\n this.jobManagerInventory.forEach(jobManager => {\n this.jobMap[jobManager.address] = jobManager;\n });\n\n /** @type {Object.<Address, SliceMessage[]>} */\n const jobSliceMap = {};\n task.forEach((element) => {\n const address = String(element.jobAddress);\n if (!jobSliceMap[address]) jobSliceMap[address] = [element];\n else jobSliceMap[address].push(element);\n });\n\n debugging('supervisor') && console.debug('requestTask: slices, newJobs and jobMap', task.length, Object.keys(newJobs), Object.keys(this.jobMap));\n\n // Populate the job managers with slices, creating new job managers when necessary.\n // Set up discrete job ring buffer.\n for (const [jobAddress, jobEl] of Object.entries(newJobs))\n {\n if (this.jobMap.hasOwnProperty(jobAddress))\n this.jobMap[jobAddress].update(jobEl, jobSliceMap[jobAddress], authorizationMessage);\n else\n {\n // Add the slice messages to the job manager ctor, so that slice construction is after job manager is ready.\n const jobManager = new JobManager(this, jobEl, jobSliceMap[jobAddress], authorizationMessage);\n this.jobMap[jobAddress] = jobManager;\n this.jobManagerInventory.push(jobManager);\n\n // Populate the ring buffer based on job's discrete property.\n assert(jobEl.requirements);\n if (jobEl.requirements.discrete && this.ringBufferofJobs.find(address => address === jobEl.address) === undefined)\n this.ringBufferofJobs.push(jobEl.address);\n }\n }\n\n /**\n * The requestTask event fires when the supervisor has finished trying to\n * fetch work from the scheduler (task distributor). The data emitted is the\n * number of jobs and new slices to work on in the fetched task.\n *\n * @event Supervisor#requestTask\n * @type {object}\n */\n this.emit('fetchedTask', { jobs: jobCount, slices: sliceCount });\n\n //console.log('addTaskToWorkload:5', Date.now() - this.lastTime); // SAVE\n\n // Start working on the slices.\n setImmediate(() => this.roundRobinSlices());\n }\n catch (error)\n {\n this.isFetchingNewWork = false; // Paranoid double checking that we don't leave this.isFetchingNewWork live.\n this.emit('fetchTaskFailed', error);\n debuggingError && console.error('Supervisor.requestTask failed!', error);\n }\n}\n\n/**\n * Returns the number of unused sandbox slots to fill -- sent to requestTask.\n * @returns {number}\n */\nSupervisor.prototype.numberOfAvailableSandboxSlots = function Supervisor$numberOfAvailableSandboxSlots(unusedSandboxSlots)\n{\n const _readySlices = this.readySlices();\n let numCores;\n if (this.options.priorityOnly && this.options.jobAddresses.length === 0)\n numCores = 0;\n else if (_readySlices.length > 1) // We have slices ready, no need to fetch.\n numCores = 0;\n else\n {\n // There are almost no ready slices (there may be 0 or 1), fetch a full task.\n // The task is full, in the sense that it will contain slices whose\n // aggregate execution time is this.maxWorkingSandboxes * 5-minutes.\n // However, there can only be unusedSandboxSlots # of long slices.\n // Thus we need to know whether the last slice in this.readySlices() is long or not.\n // (A long slice has estimated execution time >= 5-minutes on an average worker.)\n const longSliceCount = (_readySlices.length > 0 && _readySlices[0].isLong) ? 1 : 0;\n numCores = unusedSandboxSlots - longSliceCount;\n }\n return numCores;\n}\n\n/**\n * @callback cbAddTaskToWorkload\n * @param {Response} response\n * @returns {Promise<void>}\n */\n\n/**\n * Call to fetch new slices from task distributor.\n * @param {*} request\n * @param {cbAddTaskToWorkload} addTaskToWorkload\n * @returns {Promise<void>}\n */\nSupervisor.prototype.fetchTask = async function Supervisor$fetchTask (request, addTaskToWorkload)\n{\n // Fetch a new task if we have insufficient slices queued, then start workers\n assert(this.isFetchingNewWork);\n\n this.instantiateAllConnections();\n\n // Top up sandboxes when necessary.\n const moreSandboxes = this.maxWorkingSandboxes - this.sandboxCount();\n if (moreSandboxes > 0)\n {\n // If the evaluator cannot start (e.g. if the evalServer is not running),\n // then the while loop will keep retrying until the evalServer comes online.\n while (true)\n {\n debugging('supervisor') && console.debug(`Supervisor.work: creating ${moreSandboxes} sandboxes; currently have ${this.maxWorkingSandboxes-moreSandboxes}`);\n try\n {\n await this.createNewSandbox(); // Place new sandbox in this.readiedSandboxes.\n if (moreSandboxes > 1)\n this.createAndSaveSandboxes(moreSandboxes - 1);\n break;\n }\n catch (error)\n {\n // Sup1 did not guard this diag.\n console.warn('Failed to ready sandboxes; will retry.', this.checkCode(error));\n await a$sleepMs(1000);\n }\n }\n\n this.checkCapabilities();\n }\n\n const fetchTimeout = setTimeout(() => {\n this.isFetchingNewWork = false;\n // Sup1 did not guard this diag.\n console.warn(`Fetch exceeded timeout, will reconnect at next watchdog interval`);\n safeClose(this.taskDistributor, 'Fetch timed out', Math.random() > 0.5).catch(error => {\n console.error(`Failed to close task-distributor connection`, error); // Sup1 did not guard this diag.\n });\n safeClose(this.resultSubmitter, 'Fetch timed out', Math.random() > 0.5).catch(error => {\n console.error(`Failed to close result-submitter connection`, error); // Sup1 did not guard this diag.\n });\n this.instantiateAllConnections();\n }, 3 * 60 * 1000); // Max out at 3 minutes to fetch.\n\n // Ensure result submitter and task distributor connections before fetching tasks.\n try\n {\n await this.resultSubmitter.keepalive();\n await this.taskDistributor.keepalive();\n }\n catch (e)\n {\n this.isFetchingNewWork = false;\n console.error('Failed to connect to result submitter, refusing to fetch slices. Will try again at next fetch cycle.'); // Sup1 did not guard this diag.\n debugging('supervisor') && console.debug('Error:', e);\n clearTimeout(fetchTimeout);\n safeClose(this.taskDistributor, 'Failed to connect to result-submitter', true).catch(error => {\n console.error(`Failed to close task-distributor connection`, error); // Sup1 did not guard this diag.\n });\n safeClose(this.resultSubmitter, 'Failed to connect to result-submitter', true).catch(error => {\n console.error(`Failed to close result-submitter connection`, error); // Sup1 did not guard this diag.\n });\n return Promise.resolve();\n }\n\n this.emit('fetchingTask');\n\n if (!this.taskDistributor) return\n return this.taskDistributor.send('requestTask', request)\n .then((response) => {\n addTaskToWorkload(response);\n })\n .catch((error) => {\n this.isFetchingNewWork = false; // Redundant.\n this.emit('fetchTaskFailed', error);\n console.error('Unable to request task from scheduler. Will try again on a new connection.', error); // Sup1 did not guard this diag.\n safeClose(this.taskDistributor, error, true);\n })\n .finally(() => {\n this.isFetchingNewWork = false;\n clearTimeout(fetchTimeout);\n });\n}\n\n/**\n * Generate the workerComputeGroups property of the requestTask message. \n * \n * Concatenate the compute groups object from dcpConfig with the list of compute groups\n * from the supervisor, and remove the public group if accidentally present. Finally,\n * we transform joinSecrets/joinHash into joinHashHash for secure transmission.\n *\n * @note computeGroup objects with joinSecrets are mutated to record their hashes. This\n * affects the supervisor options and dcpConfig. Re-adding a joinSecret property\n * to one of these will cause the hash to be recomputed.\n */\nSupervisor.prototype.generateWorkerComputeGroups = function Supervisor$generateWorkerComputeGroups ()\n{\n let computeGroups = Object.values(workerTuning.computeGroups || {});\n if (this.options.computeGroups)\n computeGroups = computeGroups.concat(this.options.computeGroups);\n computeGroups = computeGroups.filter(group => group.id !== constants.computeGroups.public.id);\n const hashedComputeGroups = [];\n for (const group of computeGroups)\n {\n const groupCopy = Object.assign({}, group);\n if ((group.joinSecret || group.joinHash) && (!group.joinHashHash || this.lastDcpsid !== this.taskDistributor.dcpsid))\n {\n let joinHash;\n if (group.joinHash)\n joinHash = group.joinHash.replace(/\\s+/g, ''); // strip whitespace\n else\n joinHash = calculateJoinHash(groupCopy);\n\n groupCopy.joinHashHash = hash.calculate(hash.eh1, joinHash, this.taskDistributor.dcpsid);\n delete groupCopy.joinSecret;\n delete groupCopy.joinHash;\n debugging('computeGroups') && console.debug(`Calculated joinHash=${joinHash} for`, groupCopy);\n }\n hashedComputeGroups.push(groupCopy);\n }\n this.lastDcpsid = this.taskDistributor.dcpsid;\n debugging('computeGroups') && console.debug('Requesting ', computeGroups.length, 'non-public groups for session', this.lastDcpsid);\n return hashedComputeGroups;\n}\n\n// _Idx\n//\n// Aggregators from the job managers.\n// Note: Not all functions are used yet.\n//\n/** XXXpfr @todo Figure out which aggregators to keep. */\n\n/**\n * Gather the count of job managers with queuedSlices.\n * @returns {number}\n */\nSupervisor.prototype.activeJobCount = function Supervisor$activeJobCount ()\n{\n let count = 0;\n this.jobManagerInventory.forEach((jobManager) => {\n if (jobManager.queuedSlices.length > 0) count++;\n });\n return count;\n}\n\n/**\n * Gather the ready slices from the job managers.\n * @returns {Slice[]}\n */\nSupervisor.prototype.readySlices = function Supervisor$readySlices ()\n{\n const readySlices = [];\n this.jobManagerInventory.forEach((jobManager) => {\n readySlices.push(...jobManager.readySlices);\n });\n return readySlices;\n}\n\n/**\n * Gather the working slices in the job managers.\n * @returns {Slice[]}\n */\nSupervisor.prototype.workingSlices = function Supervisor$workingSlices ()\n{\n const workingSlices = [];\n this.jobManagerInventory.forEach((jobManager) => {\n workingSlices.push(...jobManager.workingSlices);\n });\n return workingSlices;\n}\n\n/**\n * Gather the count of various kinds of slices over all the job managers.\n * @param {string} predicate - 'all;, 'ready', 'queued', 'reserved', 'working', 'workingOnly'.\n * @returns {number}\n */\nSupervisor.prototype.predicateSliceCount = function Supervisor$predicateSliceCount (predicate)\n{\n let count = 0;\n switch (predicate)\n {\n case 'all':\n this.jobManagerInventory.forEach((jobManager) => {\n count += jobManager.sliceInventory.length;\n });\n break\n case 'ready':\n this.jobManagerInventory.forEach((jobManager) => {\n count += jobManager.readySlices.length;\n });\n break;\n case 'queued':\n this.jobManagerInventory.forEach((jobManager) => {\n count += jobManager.queuedSlices.length;\n });\n break;\n case 'reserved':\n this.jobManagerInventory.forEach((jobManager) => {\n count += jobManager.reservedSlices.length;\n });\n break;\n case 'working': // both working and reserved (viz., soon-to-be-working)\n this.jobManagerInventory.forEach((jobManager) => {\n count += jobManager.workingSlices.length;\n });\n break;\n case 'workingOnly':\n this.jobManagerInventory.forEach((jobManager) => {\n count += jobManager.workingSlicesOnly.length;\n });\n break;\n }\n return count;\n}\n/** @returns {number} */\nSupervisor.prototype.sliceCount = function Supervisor$sliceCount () { return this.predicateSliceCount('all'); }\n/** @returns {number} */\nSupervisor.prototype.readySliceCount = function Supervisor$readySliceCount () { return this.predicateSliceCount('ready'); }\n/** @returns {number} */\nSupervisor.prototype.queuedSliceCount = function Supervisor$queuedSliceCount () { return this.predicateSliceCount('queued'); }\n/** @returns {number} */\nSupervisor.prototype.reservedSliceCount = function Supervisor$reservedSliceCount () { return this.predicateSliceCount('reserved'); }\n/** @returns {number} */\nSupervisor.prototype.workingSliceCount = function Supervisor$workingSliceCount () { return this.predicateSliceCount('working'); }\n/** @returns {number} */\nSupervisor.prototype.workingSliceOnlyCount = function Supervisor$workingSliceOnlyCount () { return this.predicateSliceCount('workingOnly'); }\n\n/**\n * Gather the count of working sandboxes over all the job managers.\n * @returns {number}\n */\nSupervisor.prototype.sandboxCount = function Supervisor$sandboxCount ()\n{\n return this.readiedSandboxes.length + this.sandboxInventory.filter((sandbox) => !sandbox.isTerminated).length;\n}\n\n/**\n * Gather the count of working sandboxes over all the job managers.\n * @returns {number}\n */\nSupervisor.prototype.workingSandboxCount = function Supervisor$workingSandboxCount ()\n{\n return this.sandboxInventory.filter((sandbox) => !sandbox.isTerminated && sandbox.isWorking).length;\n}\n\n// _Idx\n//\n// Sandbox creation and management.\n// \n\n/**\n * Create and start a Sandbox.\n * When this.readiedSandboxes.length > 0, use one of those sandboxes, instead of creating a new one.\n * @param {number} [delayMs=0] - The delay ms when calling sandbox.start(delayMs) .\n * @returns {Promise<Sandbox>}\n */\nSupervisor.prototype.createSandbox = function Supervisor$createSandbox (delayMs = 0)\n{\n if (this.readiedSandboxes.length > 0)\n {\n const sandbox = this.readiedSandboxes.pop();\n this.sandboxInventory.push(sandbox);\n return Promise.resolve(sandbox);\n }\n // Do not place in this.readiedSandboxes, we'll directly use the return value of createSandbox.\n return this.createNewSandbox(delayMs, true/*putInInventory*/);\n}\n \n/**\n * Create and start a Sandbox.\n * Store it in this.readiedSandboxes.\n * @param {boolean} [putInInventory=false]\n * @param {number} [delayStartMs=0] - Millisecond delay when calling sandbox.start(delayStartMs), otherwise return it and use it.\n * @returns {Promise<Sandbox>}\n */\nSupervisor.prototype.createNewSandbox = function Supervisor$createNewSandbox (delayStartMs = 0, putInInventory = false)\n{\n const rawSandbox = new Sandbox(this.cache, { ...this.options.sandboxOptions }, this.allowedOrigins);\n this.hookUpSandboxListeners(rawSandbox);\n return rawSandbox.start(delayStartMs)\n .then((sandbox) => {\n if (putInInventory) this.sandboxInventory.push(sandbox);\n else this.readiedSandboxes.push(sandbox);\n return sandbox;\n })\n .catch((error) => {\n if (!error) error = new Error('Unknown error creating sandbox.');\n debuggingWarn && console.warn(`Supervisor.createNewSandbox: Failed to start sandbox ${rawSandbox.identifier}.`, error);\n rawSandbox.terminate(false);\n if (error.code === 'ENOWORKER')\n throw new DCPError(\"Cannot use localExec without dcp-worker installed. Use the command 'npm install dcp-worker' to install the neccessary modules.\", 'ENOWORKER');\n throw error;\n });\n}\n\n/**\n * Bulk: create and start Sandboxes and save in this.readiedSandboxes.\n * @param {number} numSandboxes - The number of sandboxes to create.\n * @returns {Promise<void>}\n */\nSupervisor.prototype.createAndSaveSandboxes = async function Supervisor$createAndSaveSandboxes (numSandboxes)\n{\n const promises = [], errors = [];\n let first = true;\n for (let k = 0; k < numSandboxes; k++)\n {\n const promise = this.createNewSandbox(first ? 0: this.delayMs())\n .catch((error) => errors.push(this.checkCode(error)));\n promises.push(promise);\n first = false;\n }\n\n await Promise.all(promises);\n\n if (errors.length) // Sup1 did not guard this diag.\n console.warn(`Failed to ready ${errors.length} of ${numSandboxes} sandboxes.`, errors);\n\n // Sort so that pop() will return sandboxes in increasing order.\n this.readiedSandboxes.sort((x,y) => y.id - x.id);\n\n debugging('supervisor') && console.debug(`createAndSaveSandboxes: Created ${numSandboxes-errors.length} sandboxes.`, this.readiedSandboxes.map(s => s.id));\n}\n\n/**\n * For a given sandbox, hook up all the Sandbox listeners.\n * @param {Sandbox} sandbox \n */\nSupervisor.prototype.hookUpSandboxListeners = function hookUpSandboxListeners (sandbox) \n{\n sandbox.addListener('ready', () => this.emit('sandboxReady', sandbox));\n\n sandbox.addListener('start', () => {\n this.emit('sandboxStart', sandbox);\n\n if (sandbox.slice)\n {\n try\n {\n const statusPayload = sandbox.slice.getMessagePayload(this.workerId, 'begin');\n this.resultSubmitter.send('status', statusPayload).catch((error) => {\n debuggingError && console.error(`Error sending 'status' for slice ${sandbox.slice.identifier}:\\n\\t${error}\\n\\tWill try again on a new connection`);\n this.resultSubmitterMessageQueue.push({ operation: 'status', data: statusPayload });\n safeClose(this.resultSubmitter);\n });\n } \n catch (error)\n {\n /* resultSubmitterConnection can be null if worker is stopped */\n debuggingError && console.error(`Failed to send 'begin' status for slice ${sandbox.slice.identifier}, no connection to result submitter`, error);\n }\n }\n });\n\n sandbox.addListener('workEmit', ({ eventName, payload }) => {\n // Need to check if the sandbox hasn't been assigned a slice yet.\n if (!sandbox.slice)\n {\n // Sup1 did not guard this diag.\n console.error(`Sandbox not assigned a slice before sending workEmit message to scheduler.\\n\\t'workEmit' event originates from '${eventName}' event`, payload);\n }\n else\n {\n const slice = sandbox.slice;\n // Sometimes a sliceNumber===0 workEmit comes in before the client bundle is properly loaded.\n // Also happens with minor dcp-client version mismatches.\n // sliceNumber===0 <==> authorizationMessage undefined...\n if (!slice.authorizationMessage) // Sup1 did not guard this diag.\n console.warn(`workEmit: missing authorization message for slice ${slice.identifier}`);\n else if (this.eventRouter) // No reason to emit if event router is closed.\n {\n const workEmitPayload = {\n eventName,\n payload,\n job: slice.jobAddress,\n slice: slice.sliceNumber,\n worker: this.workerId,\n authorizationMessage : slice.authorizationMessage,\n };\n\n const workEmitPromise = this.eventRouter.send('workEmit', workEmitPayload).catch(error => {\n debuggingWarn && console.warn(`workEmit: Unable to send ${eventName} for slice ${slice.identifier}: ${error.message}.\\n\\tTrying again on a new connection.`);\n this.eventRouterMessageQueue.push({ operation: 'workEmit', data: workEmitPayload })\n safeClose(this.eventRouter); // stopWork could slip-in during eventRouter.send\n if (this.debugBuild) console.error('workEmit error:', error); // Sup1 did not guard this diag.\n });\n\n if (this.debugBuild)\n {\n workEmitPromise.then(result => { // Sup1 did not guard this diag.\n if (!result || !result.success) /*debuggingWarn &&*/ console.warn('workEmit: Event router did not accept event', result);\n });\n }\n }\n }\n });\n\n sandbox.on('rejectedWorkMetrics', (data) => {\n // If the slice already has rejectedTimeReport, add this data to it. If not, assign this data to slices rejectedTimeReport property\n if (sandbox.slice) \n {\n if (!sandbox.slice.rejectedTimeReport) sandbox.slice.rejectedTimeReport = data.timeReport;\n else \n {\n ['total', 'CPU', 'webGL'].forEach((key) => {\n if (data.timeReport[key]) sandbox.slice.rejectedTimeReport[key] += data.timeReport[key];\n })\n }\n }\n });\n\n // If the sandbox terminated and we are not shutting down, then we should return all work which is\n // currently not being computed if all sandboxes are dead and the attempt to create a new one fails.\n sandbox.on('terminated',async () => {\n let nonTerminatedSandboxes = this.sandboxInventory.filter(sbx => !sbx.isTerminated);\n if (nonTerminatedSandboxes.length === 0 && this.worker.working)\n {\n debugging('supervisor') && console.debug(`hookUpSandboxListeners: Try to create 1 sandbox in the sandbox-terminated-handler.`);\n let _sandbox;\n try\n {\n _sandbox = await this.createNewSandbox(); // Place in this.readiedSandboxes.\n } catch (e) {}\n\n // If we cannot create a new sandbox, that probably means we're on a screensaver worker\n // and the screensaver is down. So return the slices to the scheduler.\n if (!_sandbox) this.screenSaverDestroy();\n }\n });\n}\n\n/**\n * Terminate extra sandboxes over the limit: this.maxTotalSandboxes.\n * First terminate assigned sandboxes which are unlikely to be used with the current ready slices.\n * Then terminate the unassigned sandboxes: this.readiedSandboxes.\n * (There should be no readied sandboxes at this point.)\n * Then round-robin prune 1 assigned sandbox from each jobmanager.\n * XXXpfr @todo Prioritize sandboxes that we wish to keep.\n * E.g. When a sandbox is especially expensive to assign.\n */\nSupervisor.prototype.pruneSandboxes = function Supervisor$pruneSandboxes () \n{\n let pruneCount = this.sandboxCount() - this.maxTotalSandboxes;\n if (pruneCount <= 0) return;\n selectiveDebugging && console.debug(`Supervisor.pruneSandboxes START: pruneCount ${pruneCount}/${this.sandboxCount()}/${this.maxTotalSandboxes}.`);\n // Only prune the extras: jm.assignedSandboxes.length > jm.queuedSlices.length .\n // Round-robin prune 1 extra assigned sandbox from each jobmanager.\n const readyJobManagers = this.jobManagerInventory.filter(jm => jm.ready);\n while (true)\n {\n const _pruneCount = pruneCount;\n for (const jm of readyJobManagers)\n {\n if (jm.pruneExtraAssignedSandbox())\n {\n if (--pruneCount < 1)\n {\n selectiveDebugging && console.debug(`Supervisor.pruneSandboxes FINISH: unpruned ${pruneCount}/${this.sandboxCount()}/${this.maxTotalSandboxes}.`);\n return;\n }\n }\n }\n if (pruneCount === _pruneCount)\n break;\n }\n assert(pruneCount > 0);\n // Prune the excess non-assigned sandboxes -- we should never hit this.\n if (this.readiedSandboxes.length > 0)\n {\n const toPrune = this.readiedSandboxes.slice(0, pruneCount);\n this.readiedSandboxes = this.readiedSandboxes.slice(pruneCount);\n toPrune.forEach(sandbox => sandbox.terminate(false));\n pruneCount -= toPrune.length;\n if (pruneCount < 1)\n {\n selectiveDebugging && console.debug(`Supervisor.pruneSandboxes FINISH: unpruned ${pruneCount}/${this.sandboxCount()}/${this.maxTotalSandboxes}.`);\n return;\n }\n }\n // Round-robin prune 1 assigned sandbox from each jobmanager.\n while (true)\n {\n const _pruneCount = pruneCount;\n for (const jm of readyJobManagers)\n {\n if (jm.pruneAssignedSandbox())\n {\n if (--pruneCount < 1)\n {\n selectiveDebugging && console.debug(`Supervisor.pruneSandboxes FINISH: unpruned ${pruneCount}/${this.sandboxCount()}/${this.maxTotalSandboxes}.`);\n return;\n }\n }\n }\n if (pruneCount === _pruneCount)\n break;\n }\n this.sandboxInventory = this.sandboxInventory.filter((sandbox) => !sandbox.isTerminated);\n selectiveDebugging && console.debug(`Supervisor.pruneSandboxes FINISH: unpruned ${pruneCount}/${this.sandboxCount()}/${this.maxTotalSandboxes}.`);\n}\n\n// _Idx\n//\n// Result-submitter-result support functions.\n// Send in the results!!!\n//\n\n/**\n * Submits the slice results to the result-submitter service.\n * Then remove the slice from the its job manager.\n *\n * @param {Slice} slice - The slice to submit.\n * @returns {Promise<void>}\n */\nSupervisor.prototype.recordResult = function Supervisor$recordResult (slice)\n{\n // It is possible for slice.result to be undefined when there are upstream errors.\n if ( !(slice && slice.result))\n throw new Error(`recordResult: slice.result is undefined for slice ${slice.identifier}. This is ok when there are upstream errors.`); \n if (!slice.isComplete)\n throw new Error('Cannot record result for slice that has not completed execution successfully.');\n\n debugging('supervisor') && console.debug(`supervisor: recording result for slice ${slice.identifier}.`);\n\n /* @see result-submitter::result for full message details */\n const metrics = { GPUTime: 0, CPUTime: 0, CPUDensity: 0, GPUDensity: 0, total: 0 };\n const payloadData = {\n slice: slice.sliceNumber,\n job: slice.jobAddress,\n worker: this.workerId,\n paymentAddress: this.paymentAddress,\n metrics,\n authorizationMessage: slice.authorizationMessage,\n }\n\n const timeReport = slice.timeReport;\n if (timeReport)\n {\n debugging('supervisor') && console.debug('recordResult timeReport', timeReport);\n // If slice takes less than 1ms to execute, CPUTime will be 0, so compensate.\n if (timeReport.CPU < 1)\n {\n timeReport.CPU++;\n timeReport.total++;\n }\n if (timeReport.total < timeReport.CPU + timeReport.webGL)\n {\n // Compensate or throw? For now we compensate.\n debuggingWarn && console.warn(`Supervisor.recordResult:: Inconsistent time report -- total < CPU + webGL -- ${stringify(timeReport)}`)\n //throw new Error(`recordResult: Inconsistent time report -- total < CPU + webGL -- ${stringify(timeReport)}`)\n timeReport.total = timeReport.CPU + timeReport.webGL;\n }\n if (timeReport.total > 0)\n {\n slice.jobManager.updateStatistics(timeReport);\n metrics.total = timeReport.total;\n metrics.CPUTime = timeReport.CPU;\n metrics.GPUTime = timeReport.webGL;\n metrics.CPUDensity = metrics.CPUTime / timeReport.total;\n metrics.GPUDensity = metrics.GPUTime / timeReport.total;\n }\n }\n\n this.emit('submittingResult');\n\n if (!this.resultSubmitter)\n this.connectTo('resultSubmitter');\n\n if (slice.resultStorageType === 'pattern')\n return this.sendResultToRemote(slice)\n .then((response) => {\n payloadData.result = response;\n this.sendToResultSubmitter(slice, payloadData);\n });\n\n payloadData.result = encodeDataURI(slice.result.result);\n return this.sendToResultSubmitter(slice, payloadData);\n}\n\n/**\n * @param {Slice} slice\n * @param {*} payloadData\n * @returns {Promise<void>}\n */\nSupervisor.prototype.sendToResultSubmitter = async function Supervisor$sendToResultSubmitter (slice, payloadData)\n{\n try\n {\n debugging('supervisor') && console.debug('Supervisor.recordResult: payloadData', payloadData.result.slice(0, 256), slice.identifier);\n\n return await this.resultSubmitter.send('result', payloadData)\n .then((resp) => {\n if (!resp.success)\n throw resp.payload;\n\n debugging('supervisor') && console.debug('recordResult: SUCCESS', slice.identifier);\n\n const receipt = {\n accepted: true,\n payment: resp.payload.slicePaymentAmount,\n };\n this.emit('submittedResult', resp.payload);\n this.emit('dccCredit', receipt);\n });\n }\n catch (error)\n {\n // Sup1 did not guard this diag.\n console.error(`Failed to submit results to scheduler for slice ${payloadData.slice} of job ${payloadData.job}`, error?error:'');\n //slice.jobManager.dumpSlices('recordResult');\n this.resultSubmitterMessageQueue.push({ operation: 'result', data: payloadData });\n const msg = safeCloseEx('resultSubmitter', this.resultSubmitter);\n if (!error && msg) error = new Error(msg); // eslint-disable-line no-ex-assign\n this.emit('submitSliceFailed', error);\n }\n finally\n {\n slice.markAsFinished();\n this.emit('submitFinished');\n // Remove the slice from the job manager.\n slice.jobManager.removeSlice(slice);\n if (this.sliceTiming)\n {\n slice['resultDelta'] = Date.now() - slice['resultDelta'];\n console.debug(`recordResult(${slice['queueingDelta']}, ${slice['executionDelta']}, ${slice['resultDelta']}): Completed slice ${slice.identifier}.`);\n }\n if (selectiveDebugging)\n {\n if (!this.resultMap[slice.jobAddress]) this.resultMap[slice.jobAddress] = { slices: [], totalTimes: [] };\n this.resultMap[slice.jobAddress].slices.push(slice.sliceNumber);\n this.resultMap[slice.jobAddress].totalTimes.push(payloadData.metrics.total);\n }\n }\n}\n\n/**\n * Send a work function's result to a server that speaks our DCP Remote Data Server protocol.\n * @param {Slice} slice - Slice object whose result we are sending.\n * @returns {Promise<string>}\n * @throws When HTTP status not in the 2xx range.\n */\nSupervisor.prototype.sendResultToRemote = function Supervisor$sendResultToRemote (slice)\n{ \n // Construct postParams.\n const postParams = { ...slice.resultStorageParams };\n postParams.element = slice.sliceNumber;\n /** Currently data will be outputed as a JSON object, XXXpfr @todo: Support file upload and other contentTypes. */\n postParams.contentType = 'application/json';\n\n const result = slice.result.result;\n if (result) postParams.content = JSON.stringify(result);\n else postParams.error = JSON.stringify(slice.error);\n debugging('supervisor') && console.debug('sendResultToRemote: content: ', (result ? postParams.content : postParams.error).slice(0, 512));\n\n // Construct url.\n const sliceResultUri = makeDataURI('pattern', slice.resultStorageDetails, {\n slice: slice.sliceNumber,\n job: slice.jobAddress,\n });\n debugging() && console.debug('sendResultToRemote sliceResultUri: ', sliceResultUri);\n const url = new DcpURL(sliceResultUri);\n\n // Check allowed origins.\n if (this.allowedOrigins.indexOf(url.origin) === -1 && this.sendResults.indexOf(url.origin) === -1)\n throw new Error(`Invalid origin for remote result storage: '${url.origin}'`);\n\n return justFetch(url, 'JSON', 'POST', false, postParams)\n .then((response) => encodeDataURI(JSON.stringify(response)));\n}\n\n// _Idx\n//\n// Reject.\n//\n\n/**\n * Handles reassigning or returning a slice that was rejected by a sandbox.\n *\n * If the slice does not have a rejected property already, reassign the\n * slice to a new sandbox and add a rejected property to the slice to\n * indicate it has already rejected once.\n *\n * If the slice rejects with a reason, or has a rejected time stamp\n * (ie. has been rejected once already) then return all slices from the\n * job to the scheduler and terminate all sandboxes with that jobAddress.\n *\n * The sandbox will be terminated.\n *\n * @param {Sandbox} sandbox\n * @param {Slice} slice\n */\nSupervisor.prototype.handleWorkReject = async function Supervisor$handleWorkReject (sandbox, slice, rejectReason)\n{\n debugging() && console.debug('handleWorkReject', rejectReason, slice.rejectedTimeStamp, slice.identifier);\n\n // Do a hard flush of the microtask queue and finish the current event loop.\n await new Promise((resolve) => setImmediate(() => setTimeout(resolve, 0)));\n\n const jobManager = slice.jobManager;\n jobManager.rejectedJobReasons.push(rejectReason); // memoize reasons\n\n // First time rejecting without a reason. Try assigning slice to a new sandbox.\n if (rejectReason === 'false' && !slice.rejectedTimeStamp)\n {\n // Set rejected time stamp.\n slice.rejectedTimeStamp = Date.now();\n // Schedule the slice for execution.\n jobManager.scheduleSlice(slice, true /* placeInTheFrontOfTheQueue*/);\n // Slice has been rescheduled, but we still need to terminate the sandbox.\n jobManager.returnSandbox(sandbox);\n }\n else\n { \n // Slice has a reason OR rejected without a reason already and got stamped.\n // Add to array of rejected jobs.\n let rejectedJob = {\n address: slice.jobAddress,\n reasons: jobManager.rejectedJobReasons,\n }\n this.rejectedJobs.push(rejectedJob);\n\n // Purge the job.\n this.purgeJob(jobManager);\n\n // Tell everyone all about it, when allowed.\n if (jobManager.displayMaxDiagInfo)\n {\n const suffixMsg = '\\n\\tAll slices with the same jobAddress returned to the scheduler.\\n\\tAll sandboxes with the same jobAddress are terminated.';\n if (slice.rejectedTimeStamp)\n console.warn(`work.reject: The slice ${slice.identifier} was rejected twice.${suffixMsg}`);\n else\n console.warn(`work.reject: The slice ${slice.identifier} was rejected with reason ${rejectReason}.${suffixMsg}`);\n }\n }\n}\n\n// _Idx\n//\n// Unused functions that we need to review.\n// 1) destroy, shutdown, halt -- possibly need to incorporate these ideas in stopWork\n// 2) predictLoad -- XXXpfr: I really feel bad about not being able to figure out how to incorporate\n// this into the design of sup2. This was a central part of Wes' design of sup2.\n// I need to collaborate with Wes to resolve my ignorance.\n//\n\n/**\n * UNUSED\n * @deprecated -- may use later\n **/\nSupervisor.prototype.destroy = function Supervisor$destory()\n{\n selectiveDebugging && console.debug(`Supervisor.screenSaverDestroy: destroying Supervisor and everything else.`);\n this.stopWork(true /*forceTerminate*/);\n if (this.state) this.state.destroy();\n if (this.progressReportTimer) clearInterval(this.progressReportTimer);\n if (this.watchdogTimer) clearInterval(this.watchdogTimer);\n this.state = null;\n this.progressReportTimer = null;\n this.watchdogTimer = null;\n this.jobManagerInventory = null;\n this.sandboxInventory = [];\n this.readiedSandboxes = [];\n this.closeConnections();\n}\n\n/**\n * UNUSED\n * @deprecated -- may use later \n * Halt the Supervisor as quickly as possible.\n **/\nSupervisor.prototype.halt = function Supervisor$halt()\n{\n this.state.setIf('ready', 'stopping');\n if (!this.state.is('stopping'))\n throw new Error(`Supervisor has an invalid state ${this.state} for halt`);\n clearInterval(this.watchdogTimer);\n\n for (let jobMan of this.jobManagerInventory)\n {\n jobMan.state.setIf('ready', 'stop');\n for (const sandbox of jobMan.workingSandboxes)\n {\n sandbox.stop(); // NYI -- will terminate.\n }\n }\n}\n \n/**\n * UNUSED\n * @deprecated -- may use later \n * Shutdown the supervisor; attempts to return work which will not be finished before timeout expires.\n * The shutdown is complete once this supervisor emits the stopped state change.\n */\nSupervisor.prototype.shutdown = function Supervisor$shutdown(timeoutMs)\n{\n var ps = [], returnSliceInventory = [];\n var timer;\n\n this.state.setIf('ready', 'stopping');\n if (!this.state.is('stopping'))\n throw new Error(`Supervisor has an invalid state ${this.state} for shutdown`);\n clearInterval(this.watchdogTimer);\n\n for (let jobMan of this.jobManagerInventory)\n {\n jobMan.state.setIf('ready', 'stop');\n\n for (let slice of jobMan.sliceInventory)\n {\n if (slice.state.is('initial') || slice.state.is('ready'))\n {\n returnSliceInventory.push(slice);\n }\n else if (slice.state.is(sliceStatus.working))\n {\n ps.push(new Promise((resolve, reject) => {\n slice.state.on('change', (status) => {\n if (status === 'done')\n resolve();\n });\n }));\n }\n }\n }\n\n const reason = 'Supervisor.shutdown';\n this.returnSlices(returnSliceInventory, reason);\n timer = setTimeout(this.halt.bind(this), timeoutMs);\n Promise.all(ps)\n .then(() => {\n clearTimeout(timer);\n this.state.set('stopping', 'stopped');\n })\n .catch((e) => {\n if (e.code !== 'DCP_SUPERVISOR_ESYNC')\n throw e; /* becomes unhandled rejection */\n });\n}\n\n/** \n * Factory function which generates a list of origins which are safe to communicate \n * with for this purpose. Currently-valid purposes (more will be added):\n * - any\n * - fetchData\n * - fetchWorkFunctions\n * - fetchArguments\n * - sendResults\n */\nSupervisor.prototype.makeSafeOriginList = function Supervisor$$makeSafeOriginList(purpose)\n{\n var list = [];\n \n if (this[purpose])\n list = list.concat(this[purpose]);\n \n /* Add 'any' origin(s) to list iff not in localExec, or in localExec and purpose is sendResults */\n if (!this.options.localExec || (this.options.localExec && purpose === 'sendResults'))\n list = list.concat(this.allowedOrigins)\n \n return list;\n}\n \n /**\n * UNUSED -- DOES NOT WORK YET.\n * NEED TO WORK WITH WES TO FIGURE OUT BEST WAY TO GET PREDICTLOAD TO WORK.\n * Predict the load on this supervisor based on the local job measurement data.\n * Works by looking at current conditions and available slices, and tries to guess\n * in what order they will be finished, working, etc. \n *\n * The simulation is very naive, but is expected to be accurate several seconds\n * into the future, particularly as we approach the end of a task.\n *\n * @param {number} whenMs \n * \n * @returns {Object<load, jobManagerInventory>} where load is and instance of Load and the predicted \n * load at the prediction time, and jobManagerInventory \n * is a counterfeit which holds the predicted state of \n * the jobManagerInventory at that time.\n */\nSupervisor.prototype.predictLoad = function Supervisor$predictLoad (whenMs)\n{\n /** @type {JobManager[]} */\n var jmi = new Inventory(); /* Inventory of counterfeit JobManagers. */\n var load = new Load(0,0); /* This \"current\" load throughout the prediction. */\n /** @type {Slice} */\n var next; /* The next slice to \"finish\". */\n\n /* Initialize data structures for prediction from current activity. */\n for (let jobMan of this.jobManagerInventory.filter(jm => jm.state.is('ready') && jm.sliceInventory.length))\n {\n jobMan = jobMan.counterfeit();\n jmi.push(jobMan);\n jobMan.sliceInventory.forEach((s) => s.state.setIf('initial', 'ready'));\n }\n next = findNextSlice();\n \n /**\n * Routine that finds the slice that will end next (soonest.)\n * @returns {Slice}\n */\n function findNextSlice()\n {\n /** @type {Slice} */\n var _next;\n for (let jobMan of jmi)\n {\n const _workingSlices = jobMan.workingSlices;\n for (let slice of _workingSlices)\n {\n //\n // slice.etaMs is the estimated time interval until slice execution completes.\n //\n // If the slice hasn't started,\n // slice.etaMs = slice.jobManager.estimateWallMs,\n // else if the slice has completed execution:\n // slice.etaMs = 0.\n // else if the slice has started:\n // slice.jobManager.estimateWallMs - (Date.now() - slice.startTime).\n //\n if (_next && (_next.etaMs <= slice.etaMs))\n continue;\n\n _next = slice;\n }\n }\n load.add(_next.jobManager.metrics);\n \n return _next;\n }\n\n /* At this point, jmi is an Inventory of counterfeit job managers that are \"ready\" for\n * work, next.etaMs is the time interval until the next slice will finish, and we have\n * a reasonably accurate picture of our current load.\n *\n * Next, we \"end\" this slice, try to fill all cores, and push the timeline forward to\n * the next predicted end of slice.\n */\n for (next = findNextSlice();\n next && (next.etaMs < whenMs);\n next = findNextSlice())\n {\n let ended = next;\n let cursor = this.makeJobSelectionCursor(jmi);\n\n /* \"end\" this slice */\n load.subtract(ended.jobManager.metrics);\n /* Fake out collecting result to transition state to FINISHED. */\n ended.collectResult(null);\n\n /* \"start\" as many slices as we can - given our CPU/GPU constraints, slice data in memory, etc */\n while (this.targetLoad.fits(load))\n {\n let slice = cursor.next();\n if (!slice)\n break; /* Running out of work that fits. */\n\n if (!load.fits(this.targetLoad, slice.jobManager.metrics))\n continue;\n\n /* Pick a ready slice from this job and add its anticipated load to our current load if it will fit */\n slice = slice.jobManager.readySlices.shift();\n slice.markAsWorking(); // ?? Not sure this is correct.\n //slice.etaMs = ended.etaMs + slice.jobManager.estimateWallMs; wtf?!?! <--- LOOK HERE\n\n load.add(slice.jobManager.metrics);\n }\n }\n\n return { load, jobManagerInventory: jmi };\n}\n\n\n//# sourceURL=webpack://dcp/./src/dcp-client/worker/supervisor2/index.js?");
4502
+ eval("/* provided dependency */ var process = __webpack_require__(/*! ./node_modules/process/browser.js */ \"./node_modules/process/browser.js\");\n/**\n * @file dcp-client/worker/supervisor2/index.js\n * Code managing sandboxes, tasks, jobs, and slices within in a DCP Worker.\n * @author Wes Garland, wes@kingsds.network\n * Paul, paul@kingsds.network\n * @date Dec 2020\n * June 2022\n * @module supervisor\n */\n\n/* global dcpConfig */ // eslint-disable-line no-redeclare\n// @ts-check\n\n\nconst DCP_ENV = __webpack_require__(/*! dcp/common/dcp-env */ \"./src/common/dcp-env.js\");\nconst { requireNative } = __webpack_require__(/*! dcp/dcp-client/webpack-native-bridge */ \"./src/dcp-client/webpack-native-bridge.js\");\nconst dcp4 = __webpack_require__(/*! dcp/protocol-v4 */ \"./src/protocol-v4/index.js\");\nconst debugging = (__webpack_require__(/*! dcp/debugging */ \"./src/debugging.js\").scope)('worker');\nconst constants = __webpack_require__(/*! dcp/common/scheduler-constants */ \"./src/common/scheduler-constants.js\");\nconst { setImmediate } = __webpack_require__(/*! dcp/common/dcp-timers */ \"./src/common/dcp-timers.js\");\nconst { EventEmitter } = __webpack_require__(/*! dcp/common/dcp-events */ \"./src/common/dcp-events/index.js\");\nconst { DCPError } = __webpack_require__(/*! dcp/common/dcp-error */ \"./src/common/dcp-error.js\");\nconst { Keystore, Address } = __webpack_require__(/*! dcp/dcp-client/wallet */ \"./src/dcp-client/wallet/index.js\");\nconst { assert } = __webpack_require__(/*! dcp/common/dcp-assert */ \"./src/common/dcp-assert.js\");\nconst { localStorage } = __webpack_require__(/*! dcp/common/dcp-localstorage */ \"./src/common/dcp-localstorage.js\");\nconst { DcpURL } = __webpack_require__(/*! dcp/common/dcp-url */ \"./src/common/dcp-url.js\");\nconst RingBuffer = __webpack_require__(/*! dcp/utils/ringBuffer */ \"./src/utils/ringBuffer.js\");\nconst { Synchronizer } = __webpack_require__(/*! dcp/common/concurrency */ \"./src/common/concurrency.js\");\nconst { JobManager } = __webpack_require__(/*! ./job-manager */ \"./src/dcp-client/worker/supervisor2/job-manager.js\");\nconst { Load } = __webpack_require__(/*! ./load */ \"./src/dcp-client/worker/supervisor2/load.js\");\nconst { Sandbox, SandboxError } = __webpack_require__(/*! ./sandbox2 */ \"./src/dcp-client/worker/supervisor2/sandbox2.js\");\nconst { sliceStatus } = __webpack_require__(/*! dcp/common/scheduler-constants */ \"./src/common/scheduler-constants.js\");\nconst hash = __webpack_require__(/*! dcp/common/hash */ \"./src/common/hash.js\");\nconst { calculateJoinHash } = __webpack_require__(/*! dcp/dcp-client/compute-groups */ \"./src/dcp-client/compute-groups/index.js\");\nconst { ModuleCache } = __webpack_require__(/*! ./module-cache */ \"./src/dcp-client/worker/supervisor2/module-cache.js\");\nconst { Inventory, leafMerge, a$sleepMs, ms, pct, generateOpaqueId, booley, compressJobMap, \n toJobMap, truncateAddress, encodeDataURI, makeValueURI, justFetch, stringify } = __webpack_require__(/*! dcp/utils */ \"./src/utils/index.js\");\n//const { CodeFlow } = require('client-oauth2');\n//const { text } = require('./lang').getLocaleStrings('en_CA'); /** XXXpfr @todo Don't know what to do with localization? */\n\n/** @typedef {import('dcp/dcp-client/wallet/eth').Address} Address */\n/** @typedef {import('dcp/protocol-v4/connection/connection').Connection} Connection */\n/** @typedef {string} opaqueId */ // 22 character base64 string\n/** @typedef {import('..').Worker} Worker */\n/** @typedef {import('..').SupervisorOptions} SupervisorOptions */\n/** @typedef {import('./slice2').Slice} Slice */\n/** @typedef {import('dcp/utils').SliceMessage} SliceMessage */\n\nconst addressTruncationLength = 20;\n\n//\n// Configs are moving around in dcpConfig and local worker configs, so set up some defaults.\nlet workerTuning = dcpConfig.worker;\nif (!workerTuning) workerTuning = dcpConfig.Supervisor;\nif (!workerTuning || !workerTuning.dcp || !workerTuning.dcp.tuning || !workerTuning.dcp.tuning.watchdogInterval\n || !workerTuning.sandbox || !workerTuning.allowOrigins || !workerTuning.minimumWage || !workerTuning.computeGroups)\n workerTuning = {\n dcp: {\n tuning: { watchdogInterval: 7, minSandboxStartDelay: 0.1, maxSandboxStartDelay: 0.7 },\n connectionOptions: { default: { identityUnlockTimeout: 15 * 60 /* seconds */ } },\n },\n sandbox: { progressReportInterval: 2 * 60 * 100 },\n allowOrigins: { fetchWorkFunctions: [], fetchArguments: [], fetchData: [], sendResults: [], any: [] },\n minimumWage: { CPU: 0, GPU: 0, 'in': 0, out: 0 },\n leavePublicGroup: false,\n pCores: 0,\n computeGroups: {},\n // The following configs are not in dcpConfig or worker configs (yet), but may be specified in local worker configs to override the defaults.\n pruneFrequency: 15 * 1000, // Maxiumum time interval where we check to prune used sandboxes.\n workerSandboxThreshold: 7, // When maxWorkingSandboxes >= workerSandboxThreshold, we allow an extra 25% of assigned sandboxes that won't be pruned.\n cachedJobsThreshold: 12, // Prune the unused job managers >= cachedJobsThreshold.\n};\n\n//\n// Flags for tracing.\n//\nconst selectiveEnable = false;\nconst displayWarnError = false || selectiveEnable;\nconst selectiveDebugging = selectiveEnable || debugging();\nconst debuggingError = false || selectiveDebugging || displayWarnError;\nconst debuggingWarn = false || selectiveDebugging || displayWarnError;\nconst selectiveDebugging2 = selectiveEnable && false || debugging('supervisor');\nconst displaySliceState = true;\nconst displayCompletedResults = false;\n\n/** \n * Adjust delay times when debugging.\n * The adjustment for niim is automatic, other debuggers must manually change this value.\n */\nlet timeDilation = 1;\nif (DCP_ENV.platform === 'nodejs')\n{\n /** Make timers 10x slower when running in niim */\n timeDilation = (requireNative('module')._cache.niim instanceof requireNative('module').Module) ? 10 : 1;\n}\n\n//\n// Index to functionality -- search for '_Idx' to toggle through the index.\n//\n// 1) Ctor: Supervisor constructor.\n// 2) Important property-like functions.\n// 3) Dtors: screenSaverDestroy, stopWork, purgeJob.\n// 4) Connection code.\n// 5) Work: Distribute slice to sandboxes.\n// 6) Return slices and sent progress reports to result-submitter-results.\n// 7) Task Distributor (TD): requestTask (Rq) support -- communication with TD.\n// 8) Aggregators from the job managers.\n// 9) Sandbox creation and management.\n// 10) Result-submitter-result support functions.\n// 11) Work reject.\n// 12) Unused functions that we need to review.\n//\n\n// _Idx\n//\n// Ctor: Supervisor constructor.\n//\n\n/** \n * Supervisor constructor\n * \n * A supervisor manages the communication with the scheduler, manages sandboxes, and\n * decides which workload should be sent to which sandboxes when.\n *\n * Start state:\n * - initial\n *\n * Intermediate states:\n * - ready\n * - stopping\n *\n * Terminal states:\n * - stopped\n *\n * Valid transitions:\n * - initial -> ready where that happens \n * - ready -> stopping\n * - stopping -> stopped\n *\n * @param {Worker} worker - The worker that created this instance.\n * @param {SupervisorOptions} options - Options for specifying custom behaviour and tuning,\n */\nfunction Supervisor(worker, options)\n{\n assert(options.identity instanceof Keystore);\n assert(options.paymentAddress instanceof Address);\n\n /**\n * Flag to indicate a debug build.\n * Used when we want to display extra information and do extra checks for developers only.\n * @type {boolean}\n */\n this.debugBuild = ((__webpack_require__(/*! dcp/common/dcp-build */ \"./src/common/dcp-build.js\").build) === 'debug');\n /**\n * When Supervisor.sliceTiming is set to be true, it displays the timings of a every slice\n * slice['queueingDelta'] = timespan of when slice is passed to jobManager.runQueuedSlice until sandbox.work\n * slice['executionDelta'] = timespan of execution in sandbox\n * slice['resultDelta'] = timespan of when sandbox finishes executing until recordResult completes.\n * @type {boolean}\n */\n this.sliceTiming = false;\n /** Used for analyzing the completed results in Supervisor.recordResult. */\n this.resultMap = {};\n\n /** @type {ModuleCache} */\n this.moduleCache = new ModuleCache(this);\n\n this.worker = worker;\n this.identity = options.identity;\n this.paymentAddress = options.paymentAddress;\n this.options = options;\n this.maxWorkingSandboxes = options.maxWorkingSandboxes || 1;\n this.maxTotalSandboxes = this.maxWorkingSandboxes;\n\n // We're making the assumption that if a worker has at least 7 sandboxes, \n // then the worker has sufficient resources to handle 25% more sandboxes in memory.\n // This assumption may be overridden by changing workerSandboxThreshold.\n if (this.maxWorkingSandboxes >= this.workerSandboxThreshold)\n this.maxTotalSandboxes = Math.ceil(1.25 * this.maxWorkingSandboxes);\n // When # of sandboxes reaches this level, we more aggressively prune.\n this.mustPruneSandboxLevel = Math.ceil(1.5 * this.maxTotalSandboxes);\n // Last prune time stamp.\n this.lastPrune = 0;\n // General time stamp.\n this.lastTime = 0;\n\n // Supervisor may get created by Worker where options.cores or options.targetLoad is not defined.\n this.numCPU = this.maxWorkingSandboxes;\n this.numGPU = 1;\n this.portionToUseCPU = pct(100);\n this.portionToUseGPU = pct(100);\n\n if (options.cores)\n {\n this.numCPU = options.cores.cpu || this.numCPU;\n this.numGPU = options.cores.gpu || this.numGPU;\n }\n if (options.targetLoad)\n {\n this.portionToUseCPU = options.targetLoad.cpu || pct(100);\n this.portionToUseGPU = options.targetLoad.gpu || pct(100);\n }\n\n this.tuning = {\n maxCPUAlloc: this.portionToUseCPU, /**< Maximum proportion of CPU time to attempt to use. */\n maxGPUAlloc: this.portionToUseGPU, /**< Maximum proportion of GPU time to attempt to use. */\n watchdogInterval: 7, /**< (seconds) How frequently to kick off an unsolicited requestTask. */\n prefetchInterval: 20, /**< (seconds) How many seconds into the future are looking to project capacity during work fetch. */\n minSandboxStartDelay: 0.1, /**< (seconds) Base minimum of this.delayMs, scaled by this.delayScaler. */\n maxSandboxStartDelay: 0.7, /**< (seconds) Base maximum random component of this.delayMs, scaled by this.delayScaler. */\n };\n this.tuning = leafMerge(this.tuning, workerTuning.dcp.tuning);\n if (options.watchdogInterval > 0)\n this.tuning.watchdogInterval = options.watchdogInterval; // Override.\n //this.tuning.watchdogInterval = 0.25;\n\n /**\n * Fine tune this.delayMs.\n * Note: Please discuss any change with Paul, paul@kingsds.network.\n * XXXpfr @todo Finalize the delay tuning.\n */\n this.delayScaler = 0.5;\n\n debugging('supervisor') && console.debug('Supervisor.tuning', this.tuning);\n\n /**\n * Note: targetLoad is not properly implemented yet.\n * XXXpfr @todo Collaborate with Wes to get it right.\n * @type {Load}\n */\n this.targetLoad = new Load({\n cpu: Math.min(this.maxWorkingSandboxes, this.numCPU),\n gpu: Math.min(this.maxWorkingSandboxes, this.numGPU)\n }).scale(this.tuning.maxCPUAlloc, this.tuning.maxGPUAlloc);\n\n /** @type {string[]} */\n this.allowedOrigins = workerTuning.allowOrigins.any;\n /** @type {string[]} */\n this.fetchWorkFunctions = workerTuning.allowOrigins.fetchWorkFunctions;\n /** @type {string[]} */\n this.fetchArguments = workerTuning.allowOrigins.fetchArguments;\n /** @type {string[]} */\n this.fetchData = workerTuning.allowOrigins.fetchData;\n /** @type {string[]} */\n this.sendResults = workerTuning.allowOrigins.sendResults;\n\n // In localExec, do not allow work function or arguments to come from the 'any' origins\n if (this.options.localExec)\n {\n this.allowedOrigins = this.allowedOrigins.concat(options.allowedOrigins.any);\n this.fetchWorkFunctions = this.fetchWorkFunctions.concat(options.allowedOrigins.fetchWorkFunctions);\n this.fetchArguments = this.fetchArguments.concat(options.allowedOrigins.fetchArguments);\n this.fetchData = this.fetchData.concat(options.allowedOrigins.fetchData);\n this.sendResults = this.sendResults.concat(options.allowedOrigins.sendResults)\n }\n\n if (options.allowedOrigins && options.allowedOrigins.length > 0)\n this.allowedOrigins = options.allowedOrigins.concat(this.allowedOrigins);\n \n //\n // The following 3 configs are not in dcpConfig or worker configs (yet), but may be specified in local worker configs to override the defaults.\n //\n /** @type {number} - Maxiumum time interval where we check to prune used sandboxes. */\n this.pruneFrequency = workerTuning.pruneFrequency || 15 * 1000;\n /** @type {number} - When maxWorkingSandboxes >= workerSandboxThreshold, we allow an extra 25% of assigned sandboxes that won't be pruned. */\n this.workerSandboxThreshold = workerTuning.workerSandboxThreshold || 7;\n /** @type {number} - Prune the unused job managers >= cachedJobsThreshold. */\n this.cachedJobsThreshold = workerTuning.cachedJobsThreshold || 12;\n\n /** @type {Object.<Address, JobManager>} */\n this.jobMap = {}; \n /** @type {Sandbox[]} - All sandboxes that are being used by the job managers. Makes sure we don't lose sandboxes. */\n this.sandboxInventory = [];\n /** @type {Sandbox[]} - Started sandboxes that are not in sandboxInventory yet. */\n this.readiedSandboxes = [];\n /** @type {JobManager[]} */\n this.jobManagerInventory = new Inventory('jobManagers');\n /** @type {Synchronizer} */\n this.state = new Synchronizer('initial', [ 'initial', 'ready', 'reconnecting', 'stopping', 'stopped', 'broken']);\n\n /** @type {string} */\n this.lastDcpsid = undefined;\n /** @type {Connection} */\n this.taskDistributor = null;\n /** @type {Connection} */\n this.resultSubmitter = null;\n /** @type {Connection} */\n this.eventRouter = null;\n /** @type {Connection} */\n this.packageManager = null;\n /** @type {Array<object>} */\n this.resultSubmitterMessageQueue = [];\n /** @type {Array<object>} */\n this.eventRouterMessageQueue = [];\n\n /** @type {object} */\n this.schedulerConfig = leafMerge(dcpConfig.scheduler, options.schedulerConfig);\n\n /** @type {opaqueId} */\n this.workerId = localStorage.getItem('workerId');\n if (!this.workerId || this.workerId.length !== constants.workerIdLength)\n {\n this.workerId = generateOpaqueId();\n localStorage.setItem('workerId', this.workerId);\n }\n /** @type {object[]} */\n this.rejectedJobs = [];\n /** \n * An N-slot ring buffer of job addresses. Stores all jobs that have had no more than 1 slice run in the ring buffer.\n * Required for the implementation of discrete jobs \n * @type {RingBuffer} \n */\n this.ringBufferofJobs = new RingBuffer(100); // N = 100 should be more than enough. \n /** @type {boolean} - pseudo-mutex guarding requestTask. */\n this.isFetchingNewWork = false;\n\n // Start up the connections.\n this.instantiateAllConnections();\n\n /**\n * Note: DCP-3241 asks to test Android to see if we need this restriction any longer.\n * XXXpfr @todo Hopefully we can delete this @hack.\n */\n // @hack - dcp-env.isBrowserPlatform is not set unless the platform is _explicitly_ set,\n // using the default detected platform doesn't set it.\n // Fixing that causes an error in the wallet module's startup on web platform, which I\n // probably can't fix in a reasonable time this morning.\n // ~ER2020-02-20\n if (!options.maxWorkingSandboxes\n && DCP_ENV.browserPlatformList.includes(DCP_ENV.platform)\n && navigator.hardwareConcurrency > 1) {\n this.maxWorkingSandboxes = navigator.hardwareConcurrency - 1;\n if (typeof navigator.userAgent === 'string') {\n if (/(Android).*(Chrome|Chromium)/.exec(navigator.userAgent)) {\n this.maxWorkingSandboxes = 1;\n this.emit('warning', 'Doing work with Chromimum browsers on Android is currently limited to one sandbox');\n }\n }\n }\n}\nexports.Supervisor = Supervisor;\nSupervisor.prototype = Object.getPrototypeOf(new EventEmitter('Supervisor')); // Fake out VSCode -- get's rid of a billion red-squigglies.\nSupervisor.prototype = new EventEmitter('Supervisor');\n/**\n * Preserve the constructor property.\n * @constructor\n */\nSupervisor.prototype.constructor = Supervisor;\n\n/**\n * Set up sandboxes and interval timers, then start to search for work.\n **/\nSupervisor.prototype.startWork = function Supervisor$startWork ()\n{\n /* Provide opportunity for calling code to hook ready/error events. */\n setImmediate(async () => {\n try\n {\n if (this.state.isNot('initial'))\n {\n if (this.state.setIf('stopped', 'initial')) {}\n else if (this.state.setIf('reconnecting', 'initial')) {}\n else if (this.state.setIf('broken', 'initial')) {}\n else if (this.state.is('ready')) return\n else throw new Error(`Supervisor startWork is in unexpected state ${this.state}, aborting...`);\n }\n this.instantiateAllConnections();\n\n await this.createSandboxes(this.maxWorkingSandboxes)\n .then(() => this.checkCapabilities());\n\n // Beacon interval timer.\n this.progressReportTimer = setInterval(() => this.emitProgressReport(), (workerTuning.sandbox.progressReportInterval || 2 * 60 * 100));\n // Watchdog: requestTask-driven interval timer.\n this.watchdogTimer = setInterval(() => this.requestTask() , ms(this.tuning.watchdogInterval));\n if (DCP_ENV.platform === 'nodejs' && this.options.localExec)\n {\n /* Interval timer helps keep worker alive forever, which we don't want in localExec. */\n this.progressReportTimer.unref();\n this.watchdogTimer.unref();\n }\n\n this.state.set('initial', 'ready');\n\n setImmediate(() => this.requestTask()); // Don't wait for watchdog.\n }\n catch(error)\n {\n this.state.set('initial', 'broken');\n this.emit('error', error);\n }\n });\n}\n\n/** Construct capabilities when necessary. */\nSupervisor.prototype.checkCapabilities = function Supervisor$checkCapabilities ()\n{\n if (!this.capabilities)\n {\n /**\n * Assign the capabilities of one the sandboxes before fetching slices from the scheduler.\n * @todo Remove this once fetchTask uses the capabilities of every sandbox to fetch slices.\n */\n const sandbox = this.readiedSandboxes.length > 0 ? this.readiedSandboxes[0] : this.sandboxInventory[0];\n if (sandbox)\n {\n this.capabilities = sandbox.capabilities;\n this.emit('capabilitiesCalculated', this.capabilities);\n }\n }\n\n if (DCP_ENV.isBrowserPlatform && this.capabilities.browser)\n this.capabilities.browser.chrome = DCP_ENV.isBrowserChrome;\n}\n\n// _Idx\n//\n// Important property-like functions.\n//\n\n/**\n * Universal delay milliseconds..\n * @returns {number}\n */\nSupervisor.prototype.delayMs = function Supervisor$delayMs (max = this.tuning.maxSandboxStartDelay, min = this.tuning.minSandboxStartDelay)\n{\n // Note: Please discuss any change with Paul, paul@kingsds.network.\n return 1000 * timeDilation * this.delayScaler * (min + Math.random() * (max - min));\n}\n\n/**\n * Indicates whether supervisor is ready for business.\n * @returns {boolean} - When true, the party is on...\n */\nSupervisor.prototype.isReady = function Supervisor$isReady()\n{\n return this.worker.working && this.state.is('ready');\n}\n\n/**\n * Safe access to Connection.close(...).\n * @param {Connection} connection\n * @param {string} [reason='requested']\n * @param {boolean} [immediate=false]\n * @returns {Promise<string>}\n */\nfunction safeClose(connection, reason = 'requested', immediate = false)\n{\n if (connection)\n {\n let msg;\n if (connection.state.is('closed')) msg = 'closed';\n else if (connection.state.is('closing')) msg = 'closing';\n else if (connection.state.is('close-wait')) msg = 'close-wait';\n if (msg)\n {\n debuggingWarn && console.warn(`${msg}, do not try to close again.`);\n return Promise.resolve(msg);\n }\n return connection.close(reason, immediate)\n .then(() => {\n return Promise.resolve(null);\n });\n }\n return Promise.resolve('already closed');\n}\n\n/**\n *\n * @param {string} operation\n * @param {*} data\n * @returns {Promise<string>}\n */\nSupervisor.prototype.saveForResubmitToRS = function Supervisor$saveForResubmitToRS(operation, data)\n{\n this.resultSubmitterMessageQueue.push({ operation, data });\n return safeClose(this.resultSubmitter);\n}\n\n/**\n * Error feedback to user.\n * @param {string} message\n * @param {*} extra\n */\nSupervisor.prototype.error = function Supervisor$error(message, extra)\n{\n const dcpError = new DCPError(message, extra);\n this.emit('error', dcpError);\n}\n\n/**\n * When true, the sandbox complete handler will look for another slice in the same job,\n * and if not found, then proceed to Supervisor.requestTask.\n * @returns {boolean}\n */\nSupervisor.prototype.runSliceFromSameJob = function Supervisor$runSliceFromSameJob()\n{\n //\n // Experimental, should be off by default.\n // Cf. sandbox complete handler in JobManager.hookUpSandboxListeners.\n //\n const disable = true;\n const tooManyJobs = this.activeJobCount() > this.maxWorkingSandboxes;\n return !disable && !tooManyJobs && this.unusedSandboxSlots() < 2;\n}\n\n/**\n * This function is used as the target number of sandboxes to be associated with slices and start working.\n * Warning: Do not rely on this information being 100% accurate -- it may change in the next instant.\n * @returns {number}\n */\nSupervisor.prototype.unusedSandboxSlots = function Supervisor$unusedSandboxSlots ()\n{\n return this.maxWorkingSandboxes - this.workingSliceCount();\n}\n\n/**\n * Create errorObj with error.code if it exists.\n * @param {Error} error\n * @returns {object}\n */\nSupervisor.prototype.checkCode = function Supervisor$checkCode (error)\n{\n if (!error) return '';\n const errorObj = { message: error.message };\n if (error['errorCode']) errorObj['errorCode'] = error['errorCode'];\n if (error['code']) errorObj['code'] = error['code'];\n return errorObj;\n}\n\n/**\n * Remove stack trace from error.\n * May not work when error is a string with no new-lines.\n * @param {string|Error} error\n * @returns {string|Error}\n */\nSupervisor.prototype.removeStackTrace = function Supervisor$removeStackTrace (error)\n{\n if (typeof error === 'string')\n {\n const errorLines = error.split('\\n');\n return (errorLines && errorLines.length > 0) ? errorLines[0] : error;\n }\n if (error instanceof Error)\n return error.message;\n return error;\n}\n\n// _Idx\n//\n// Dtors: screenSaverDestroy, stopWork, purgeJob.\n//\n\n/**\n * If we cannot create a new sandbox, that probably means we're on a screensaver worker\n * and the screensaver is down. So return the slices to the scheduler.\n */\nSupervisor.prototype.screenSaverDestroy = function Supervisor$screenSaverDestroy()\n{\n debugging('supervisor') && console.debug(`Supervisor.screenSaverDestroy: destroying all job managers and terminating all sandboxes.`);\n this.jobManagerInventory.forEach(jm => jm.destroy());\n this.jobManagerInventory = new Inventory('jobManagers');\n\n this.readiedSandboxes.forEach(sandbox => {\n if (!sandbox.isTerminated) sandbox.terminate(false);\n });\n this.readiedSandboxes = [];\n\n this.sandboxInventory.forEach(sandbox => {\n if (!sandbox.isTerminated) sandbox.terminate(false);\n });\n this.sandboxInventory = [];\n}\n\n/**\n * Terminates sandboxes and returns slices.\n * Sets the working flag to false, call @this.work to start working again.\n * \n * If forceTerminate is true: Terminates all sandboxes and returns all slices.\n * If forceTerminate is false: Terminates non-working sandboxes and returns initial and ready slices.\n *\n * @param {boolean} [forceTerminate = true] - true if you want to stop the sandboxes from completing their current slice.\n * @returns {Promise<void>}\n */\nSupervisor.prototype.stopWork = async function Supervisor$stopWork (forceTerminate = true)\n{\n selectiveDebugging && console.debug(`Supervisor.stopWork(${forceTerminate},${this.state}): terminating sandboxes and returning slices to scheduler.`);\n\n // Do a hard flush of the microtask queue and finish the current event loop.\n await new Promise((resolve) => setImmediate(() => setTimeout(resolve, 0)));\n\n if (this.state.setIf('ready', 'stopping')) {}\n else if (this.state.setIf('reconnecting', 'stopping')) {}\n else if (this.state.setIf('broken', 'stopping')) {}\n else if (this.state.is('stopped')) return\n else throw new Error(`Supervisor stopWork is in unexpected state ${this.state}, aborting...`);\n\n this.instantiateAllConnections();\n\n this.readiedSandboxes.forEach((sandbox) => {\n if (!sandbox.isTerminated) sandbox.terminate(false);\n });\n this.readiedSandboxes = [];\n\n if (forceTerminate)\n {\n for (const jm of this.jobManagerInventory)\n jm.destroy();\n\n this.sandboxInventory.forEach((sandbox) => {\n if (!sandbox.isTerminated) sandbox.terminate(false);\n });\n }\n else\n {\n let activeSliceCount = 0;\n const slicesToReturn = [];\n for (const jm of this.jobManagerInventory)\n {\n //jm.dumpSlices(`stopWork1:${jm.address}`);\n const queuedSlices = jm.queuedSlices;\n slicesToReturn.push(...queuedSlices);\n jm.removeSlices(queuedSlices);\n activeSliceCount += jm.activeSlices.length;\n jm.nonWorkingSandboxes.forEach((sandbox) => jm.returnSandbox(sandbox));\n }\n\n const reason = `Supervisor stopWork(${forceTerminate}): returning all non-finished slices that are not working.`;\n this.returnSlices(slicesToReturn, reason, false /*removeSlices*/);\n\n await new Promise((resolve) => {\n if (activeSliceCount === 0)\n resolve();\n // Resolve and finish work once all sandboxes have finished submitting their results.\n this.on('submitFinished', () => {\n if (--activeSliceCount === 0)\n {\n debuggingWarn && console.warn('All sandboxes empty, stopping worker and closing all connections');\n resolve();\n }\n });\n });\n\n for (const jm of this.jobManagerInventory)\n {\n //jm.dumpSlices(`stopWork2:${jm.address}`);\n jm.liveSandboxes.forEach((sandbox) => sandbox.terminate(false));\n jm._sandboxInventory = [];\n }\n }\n this.sandboxInventory = [];\n this.jobManagerInventory = new Inventory('jobManagers');\n\n this.closeConnections();\n\n this.emit('stop');\n this.state.set('stopping', 'stopped');\n}\n\n/**\n* Purge all traces of the job.\n* @param {JobManager} jobManager \n*/\nSupervisor.prototype.purgeJob = function Supervisor$purgeJob (jobManager)\n{\n selectiveDebugging && console.debug(`Supervisor.purgeJob ${jobManager.identifier}.`);\n this.jobManagerInventory.delete(jobManager);\n this.moduleCache.removeJob(jobManager.address);\n jobManager.destroy();\n}\n\n// _Idx\n//\n// Connection code.\n//\n\n/** \n * Connect the supervisor to a given scheduler sub-service. Reconnection \"DDoS\" from workers\n * mitigated via an exponential backoff algorithm.\n *\n * DCPv4 connections are lazily-initiated. Successful connection establishment detected by\n * observing the payload event, which normally triggers during versioning.\n */\nSupervisor.prototype.connectTo = function Supervisor$connectTo(which)\n{\n const config = (which === 'packageManager') ? dcpConfig.packageManager : this.schedulerConfig.services[which];\n const retryMinSleepMs = 100;\n const payloadResetCount = 3; /* How many payloadCount before we reset retryNextSleepMs. */\n \n var retryNextSleepMs = retryMinSleepMs;\n var payloadCount = 0;\n var options = leafMerge(/* ordered from most to least specific */\n workerTuning.dcp.connectionOptions.default,\n workerTuning.dcp.connectionOptions[which],\n workerTuning.dcp.connectionOptions[config.location.href],\n );\n\n /**\n * The payload event handler is an interesting way to handle exponential backoff\n * for the delay in this.reconnect.\n * XXXpfr @todo Revisit exponential backoff for this.reconnect in Sup2 Part II.\n */\n\n const handlePayloadEventFn = (ev) =>\n {\n if (++payloadCount === payloadResetCount)\n {\n this[which].removeEventListener('payload', handlePayloadEventFn);\n retryNextSleepMs = retryMinSleepMs; \n }\n }\n\n this[which] = new dcp4.Connection(config, this.identity, options);\n\n debugging() && console.debug(`Opening new connection ${this[which].id} to ${which}.`)\n\n const messageQueue = this[which + 'MessageQueue'];\n if (messageQueue && messageQueue.length > 0)\n resendRejectedMessages(this[which], messageQueue);\n\n this[which].on('payload', handlePayloadEventFn);\n\n this['reconnect' + which] = this.reconnect.bind(this, which);\n this[which].on('close', () => { this['reconnect' + which](); });\n}\n\n/**\n * Reconnect logic.\n * @param {string} which -- Name of the connection \n */\nSupervisor.prototype.reconnect = async function Supervisor$reconnect(which)\n{\n debugging('supervisor') && console.debug(`Supervisor.reconnect: ${which}:`, this.state.valueOf(), this[which].state.valueOf());\n const stateChange = this.state.testAndSet('ready', 'reconnecting');\n if (stateChange || this.state.is('reconnecting'))\n {\n await a$sleepMs(100); // Sleep for 100ms, maybe exp-backoff later if we need it.\n this.connectTo(which);\n if (stateChange) this.state.set('reconnecting', 'ready');\n debugging() && console.debug(`Supervisor.reconnect: Trying to reconnect ${which}`, this.state.valueOf(), this[which].state.valueOf());\n }\n}\n\n/**\n * Close a connection properly.\n * @param {string} which -- Name of the connection to close.\n */\nSupervisor.prototype.closeConnection = function Supervisor$closeConnection(which)\n{\n if (this[which])\n {\n this[which].off('close', this['reconnect' + which]);\n this[which].close();\n this[which] = null;\n }\n}\n\n/**\n * Close all connections.\n */\nSupervisor.prototype.closeConnections = function Supervisor$closeConnections()\n{\n /** XXXpfr @todo Remove when bug DCP-3094 is fixed. */\n a$sleepMs(500);\n\n this.closeConnection('taskDistributor');\n this.closeConnection('resultSubmitter');\n this.closeConnection('eventRouter');\n this.closeConnection('packageManager');\n}\n\n/**\n * Broadcast keepalive to all connections and optionally start up sandboxes.\n * @param {boolean} [createSandbox=false] - When true, creates sandboxes for unused sandbox slots.\n */\nSupervisor.prototype.keepalive = function Supervisor$keepalive(createSandbox = false)\n{\n return Promise.all([\n this.taskDistributor.keepalive(),\n this.resultSubmitter.keepalive(),\n this.eventRouter .keepalive(),\n this.packageManager .keepalive(),\n (createSandbox ? this.createSandboxes(this.maxWorkingSandboxes) : Promise.resolve()),\n ]);\n}\n\n/**\n * Open all connections. Used when supervisor is instantiated or stopped/started to initially open connections.\n */\nSupervisor.prototype.instantiateAllConnections = function Supervisor$instantiateAllConnections ()\n{\n if (!this.taskDistributor)\n this.connectTo('taskDistributor');\n if (!this.eventRouter)\n this.connectTo('eventRouter');\n if (!this.resultSubmitter)\n this.connectTo('resultSubmitter');\n if (!this.packageManager)\n this.connectTo('packageManager');\n}\n\n/**\n * Try sending messages that were rejected on an old instance of the given connection.\n * @param {Connection} connection\n * @param {Array<object>} messageQueue\n */\nasync function resendRejectedMessages (connection, messageQueue)\n{\n var message = messageQueue.shift();\n do \n {\n try\n {\n await connection.send(message.operation, message.data);\n }\n catch (error)\n {\n debuggingError && console.error(`Failed to resend message ${message.operation} to scheduler: ${error}. Will try again on a new connection.`);\n messageQueue.unshift(message);\n safeClose(connection);\n break;\n }\n message = messageQueue.shift();\n } while (message);\n}\n \n// _Idx\n//\n// Work: Distribute slice to sandboxes.\n//\n\n/** \n * UNUSED\n * @deprecated\n * Round-robin through the job managers, picking 1 slice to run each time.\n * Try to have the same number of working sandboxes for each job.\n * Try to run a slice on every available sandbox.\n */\nSupervisor.prototype.workOnCurrentTask = function Supervisor$workOnCurrentTask ()\n{\n return this.roundRobinSlices();\n}\n\n/**\n * This is for compatibility with Supervisor1 in sa worker.\n * When we get rid of Supervisor1 we can delete the ref in sa worker and then get rid of this function.\n */\nSupervisor.prototype.setDefaultIdentityKeystore = function Supervisor$setDefaultIdentityKeystore ()\n{\n}\n\n/**\n * Called in Worker.start().\n * Initial entry point after Worker constructor.\n * We need to start searching for work here to allow starting and stopping a worker.\n */\nSupervisor.prototype.work = function Supervisor$work ()\n{\n // Set up sandboxes and interval timers, then start to search for work.\n this.startWork();\n}\n\n/** \n * Round-robin through the job managers, picking 1 slice to run each time.\n * Try to have the same number of working sandboxes for each job.\n * Try to run a slice on every available sandbox.\n */\nSupervisor.prototype.roundRobinSlices2 = function Supervisor$roundRobinSlices2 ()\n{\n try\n {\n let first = true;\n const cursor = this.makeJobSelectionCursor(this.jobManagerInventory);\n while (true)\n {\n const slice = cursor.next();\n if (!slice) break; /* No more work can fit. */\n debugging('supervisor') && console.debug('roundRobinSlices: Executing slice', slice.identifier);\n slice.markAsReserved();\n slice.jobManager.runSlice(slice, first ? 0 : this.delayMs());\n first = false;\n }\n }\n finally\n {\n this.isFetchingNewWork = false;\n }\n}\n\n/**\n * We try to balance so that each job has the same number of working slices.\n *\n * NOTES:\n * 1) If count is such that it cannot be distributed evenly, we do not yet randomly assign the extras.\n * E.g. 3 jobs, j1, j2, j3: count = 5 -- so 2 jobs get extras -- the extras need to be assigned randomly.\n * @todo Assign the extras randomly.\n * 2) this.roundRobinSlices is not going to be what we use when sup2 is in final form.\n * We want to this.makeJobSelectionCursor and something like this.roundRobinSlices2\n *\n * In the outer loop,\n * when numworkingSandboxes=1, try to get a slice running for each job with 0 working sandboxes.\n * when numworkingSandboxes=2, try to get a slice running for each job with 1 working sandboxes.\n * when numworkingSandboxes=3, try to get a slice running for each job with 2 working sandboxes. Etc.\n * The idea is to balance the number of slices working on each job.\n * @param {number} [count=0] - The number of new slices to try to get running in sandboxes.\n */\nSupervisor.prototype.roundRobinSlices = function Supervisor$roundRobinSlices (count = 0)\n{\n try\n {\n if (!count) count = this.unusedSandboxSlots();\n if (!count || this.readySliceCount() < 1)\n return Promise.resolve();\n\n const slices = [];\n let numScheduled = 0\n let readyJobManagers = this.jobManagerInventory.filter(jm => jm.ready);\n let first = true;\n selectiveDebugging && console.debug('roundRobinSlices: START count', count, 'numJobMgrs', readyJobManagers.length, 'totalWorking(w/r/wo/wsbx/sbx)', this.workingSliceCount(), this.reservedSliceCount(), this.workingSliceOnlyCount(), this.workingSandboxCount(), this.sandboxCount());\n\n for (let numWorkingSandboxes = 1; numWorkingSandboxes <= this.maxWorkingSandboxes; numWorkingSandboxes++)\n {\n let sliceCount = 0;\n const beginNumScheduled = numScheduled;\n for (const jobMan of readyJobManagers)\n {\n const _readySlices = jobMan.readySlices;\n sliceCount += _readySlices.length\n const skip = numWorkingSandboxes <= jobMan.workingSlices.length;\n\n if (skip || _readySlices.length < 1)\n {\n // Noisy log message turned off by default.\n //debugging('supervisor') && console.debug('RRS0(numS, beginNumS, count, sliceCount, skip, _ready, numWorkingS(loop), workingSlices):', numScheduled, beginNumScheduled, count, sliceCount, skip, _readySlices.length, numWorkingSandboxes, jobMan.workingSlices.length);\n continue;\n }\n\n const slice = _readySlices[0];\n slices.push(slice);\n\n slice.markAsReserved();\n jobMan.runSlice(slice, first ? 0 : this.delayMs());\n\n first = false;\n if (++numScheduled >= count)\n break;\n }\n if (numScheduled >= count)\n {\n debugging('supervisor') && console.debug('RRS1(numS, beginNumS, count, sliceCount):', numScheduled, beginNumScheduled, count, sliceCount);\n break;\n }\n if (beginNumScheduled === numScheduled && sliceCount < 1)\n {\n debugging('supervisor') && console.debug('RRS2(numS, beginNumS, count, sliceCount):', numScheduled, beginNumScheduled, count, sliceCount);\n break;\n }\n }\n if (selectiveDebugging)\n {\n console.debug(`roundRobinSlices(working:(w/r/wo/wsbx/sbx)${this.workingSliceCount()},${this.reservedSliceCount()},${this.workingSliceOnlyCount()},${this.workingSandboxCount()},${this.sandboxCount()}): Started ${slices.length}/${numScheduled}/${count} scheduled slices`, compressSlices(slices));\n const sliceGrouper = {};\n slices.forEach((slice) => {\n const jm = slice.jobManager;\n if (!sliceGrouper[slice.jobAddress]) sliceGrouper[slice.jobAddress] = { cnt: 1, working: jm.workingSlices.length, queued: jm.queuedSlices.length, assigned: jm.assignedSandboxes.length, estimation: jm.isEstimation };\n else sliceGrouper[slice.jobAddress].cnt++;\n });\n console.debug(sliceGrouper);\n }\n if (selectiveDebugging2)\n {\n const jobGrouper = {};\n this.jobManagerInventory.forEach((jm) => {\n jobGrouper[jm.address] = { working: jm.workingSlices.length, queued: jm.queuedSlices.length, assigned: jm.assignedSandboxes.length, estimation: jm.isEstimation };\n });\n console.debug(jobGrouper);\n }\n }\n finally\n {\n this.isFetchingNewWork = false;\n }\n}\n\n/**\n * @callback cbNext\n * @returns {Slice}\n */\n\n/** \n * Factory function which instantiates a JobSelectionCursor. A JobSelectionCursor\n * steps the order that job slices should be selected for execution in the supervisor,\n * given the current state of the supervisor and the availability of jobs when the\n * inventory was snapshot. The entire slice scheduling algorithm is represented by\n * this cursor.\n *\n * The basic idea behind the scheduling of slices in this implementation is to keep as\n * many slices from different jobs running as possible, so as to reduce the likelihood\n * of resource contention between sandboxes.\n *\n * Slices are scheduled in here based on the following ruleset:\n * - pick a slice from the longest job that isn't running\n * - choose a slice from the remaining jobs, in order for shortest to longest slice time\n * - if there are any jobs which are nearly finished, every other slice comes from one\n * of these jobs, selected randomly????? <-- NYI. XXXpfr @todo Think about implementing...\n * - jobs which have slicePriority set by the task-distributor may be chosen in place\n * of slices in the above algorith. Jobs with a slicePriority closer to 1 are more likely\n * to exhibit this behaviour.\n * @param {JobManager[]} jobManagerInventory\n * @returns { { next: cbNext } }\n */\nSupervisor.prototype.makeJobSelectionCursor = function Supervisor$JobSelectionCursor (jobManagerInventory)\n{\n /* Variables in this scope function as state information for next() */\n var jobManIdx = 0;\n /** @type {JobManager[]} - All job managers that are ready that have at least one ready slice. */\n var jobManList;\n /** @type {JobManager[]} - All jobManList elements that correspond to preferred jobs. */\n var prefJobManList;\n /* Number of working sandboxes allowed for a given job. */\n var concurrency = 1;\n const that = this;\n \n function seed (_concurrency)\n {\n function countSandboxes(jobAddress)\n {\n const jobManager = that.jobMap[jobAddress];\n if (!jobManager) throw new Error(`Lost track of job manager for address '${jobAddress}'`);\n return jobManager.workingSlices.length;\n }\n \n // Reset.\n jobManIdx = 0;\n\n /* Populate jobManList with jobs which are ready and have at least one slice which is ready. */\n jobManList = jobManagerInventory.filter((jobMan) => jobMan.ready && jobMan.readySlices.length > 0);\n\n /* Populate jobManList with jobManagers whose # of working slices is less than _concurrency. */\n jobManList = jobManList.filter((jobMan) => countSandboxes(jobMan.address) < _concurrency);\n\n /* Increasing sort. */\n jobManList.sort((a,b) => a.estimateWallMs - b.estimateWallMs);\n\n /* Populate prefJobManList with jobs from jobManList which also have a slicePreference set. */\n prefJobManList = jobManList.filter(jobMan => jobMan.hasOwnProperty('slicePreference') );\n }\n\n /**\n * Takes slices off the ready list, marks it reserved and increments workingSandboxCoun,\n * because the slice will soon be working.\n * @param {JobManager} jobMan \n * @returns {Slice}\n */\n function reserveSlice (jobMan)\n {\n const _readySlices = jobMan.readySlices;\n if (_readySlices.length > 0)\n {\n const slice = _readySlices[0];\n slice.markAsReserved();\n return slice;\n }\n return null;\n }\n\n /**\n * Each invocation of next() identifies one slice to run, or returns false if none can run.\n * @returns {Slice}\n */\n function next ()\n {\n /* Adjust order to schedule the heaviest job's first slice asap. */\n jobManList.unshift(jobManList.pop());\n\n let workingSliceCount;\n do\n {\n seed(concurrency);\n\n /* Maybe schedule a prefered job slice based on random chance .*/\n if (prefJobManList.length > 0)\n {\n let prioRan = Math.random();\n let list = prefJobManList.filter(jm => jm['slicePreference'] >= prioRan);\n\n if (list.length > 0)\n {\n const jobMan = list[list.length * Math.random()];\n const slice = reserveSlice(jobMan);\n if (slice)\n return slice;\n }\n }\n\n /* Schedule a slice from next job; jobs are in increasing order of estimated run time. */\n while (jobManIdx < jobManList.length)\n {\n const jobMan = jobManList[jobManIdx];\n jobManIdx++;\n const slice = reserveSlice(jobMan);\n if (slice)\n return slice;\n }\n\n /* If this is reached, we did not schedule a slice with current seed. We need\n * to re-seed to look for newly-available work and sandboxes, ratcheting up the\n * concurrency (max # of each job running) until we find something we can do.\n */\n workingSliceCount = that.workingSliceCount();\n debugging() && console.debug(`job selection - no slice found from ${jobManList.length} jobs for concurrency=${concurrency} and ${workingSliceCount} working sandboxes`);\n } while (jobManList.length > 0 && workingSliceCount < that.maxWorkingSandboxes && concurrency++ < that.maxWorkingSandboxes);\n\n return false; /* Did not find any more work that fits. */\n }\n \n return { next };\n}\n\n/**\n * Handle sandbox.work(...) errors.\n * @param {Sandbox} sandbox \n * @param {Slice} slice \n * @param {Error} error \n * @return {Promise<string>}\n */\nSupervisor.prototype.handleSandboxWorkError = async function Supervisor$handleSandboxWorkError (sandbox, slice, error)\n{\n if (!slice.isWorking) // Sanity. Exception should never fire.\n throw new Error(`handleSandboxWorkError: slice ${slice.identifier} must be WORKING.`);\n\n let logLevel, reason;\n \n if (error instanceof SandboxError)\n {\n logLevel = 'warn';\n reason = error.errorCode;\n // The message and stack properties of error objects are not enumerable,\n // so they have to be copied into a plain object this way\n // @ts-ignore\n error = Object.getOwnPropertyNames(error).reduce((o, p) => {\n o[p] = error[p]; return o;\n }, { message: 'Unexpected worker error' });\n }\n else\n {\n logLevel = 'error';\n if (error)\n reason = `Slice ${slice.sliceNumber} in state ${slice.state} of job ${slice.jobAddress} failed to complete execution with error ${this.checkCode(error)}.`;\n else\n {\n reason = `Slice ${slice.sliceNumber} in state ${slice.state} of job ${slice.jobAddress} failed to complete execution.`;\n error = new Error(reason);\n }\n // This error was unrelated to the work being done, so just return the slice\n // in the promise.catch in JobManager.runSliceOnSandbox .\n assert(slice.result === null);\n }\n \n this.handleFailedSlice(slice, error);\n\n let errorString;\n switch (reason)\n {\n case 'ENOPROGRESS':\n errorString = 'No progress error in sandbox.\\n';\n break;\n case 'ESLICETOOSLOW':\n errorString = 'Slice too slow error in sandbox.\\n';\n break;\n case 'EUNCAUGHT':\n errorString = `Uncaught error in sandbox ${error.message}.\\n`;\n break;\n case 'EFETCH':\n // The status.js processing does not have a case for 'EFETCH' yet.\n errorString = `Could not fetch data: ${error.message}.\\n`;\n break;\n }\n\n // Always display max info under debug builds, otherwise maximal error.\n // messages are displayed to the worker, only if both worker and client agree.\n const displayMaxInfo = slice.jobManager.displayMaxDiagInfo;\n\n const errorObject = {\n jobAddress: truncateAddress(slice.jobAddress, addressTruncationLength),\n sliceNumber: slice.sliceNumber,\n sandbox: sandbox.id,\n jobName: sandbox.public ? sandbox.public.name : 'unnamed',\n };\n if (displayMaxInfo)\n errorObject.stack += '\\n --------------------\\n' + (error.stack.split('\\n').slice(1).join('\\n'));\n\n if (error.name === 'EWORKREJECT')\n {\n reason = 'EWORKREJECT'; // The status.js processing does not have a case for 'EWORKREJECT' yet.\n error.stack = 'Sandbox was terminated by work.reject()';\n await this.handleWorkReject(sandbox, slice, error.message);\n }\n else\n {\n this.returnSlice(slice, reason);\n slice.jobManager.returnSandbox(sandbox);\n }\n\n if (errorString)\n console[logLevel](errorString, errorObject);\n else if (error.name === 'EWORKREJECT')\n console[logLevel](`Slice rejected work: ${error.message}`)\n else\n console[logLevel](`Slice failed: ${error.message}\\n`, errorObject);\n\n return reason;\n}\n\n/**\n * Slice has thrown error during execution:\n * Mark slice as failed, compensate when job is dicrete, emit events.\n * @param {Slice} slice\n * @param {Error} error\n */\nSupervisor.prototype.handleFailedSlice = function Supervisor$handleFailedSlice (slice, error)\n{\n assert(error, 'error must be valid');\n slice.collectResult(error, false);\n\n // If the slice from a job never completes and the job address exists in the ringBufferofJobs,\n // then we remove it to allow for another slice (from the same job) to be obtained by fetchTask\n this.ringBufferofJobs.buf = this.ringBufferofJobs.filter(element => element !== slice.jobAddress);\n\n this.emit('submitSliceFailed', error);\n this.emit('submitFinished');\n}\n\n// _Idx\n//\n// Return slices and sent progress reports to result-submitter-results.\n// Return slices to result-submitter-status which marks the\n// corresponding row in activeSlices to be rescheduled on usually another worker.\n//\n\n/**\n * Bulk-return multiple slices, possibly for assorted jobs.\n * Returns slices to the scheduler to be redistributed.\n * Called in the sandbox terminate handler and purgeAllWork(jobAddress)\n * and stopWork(forceTerminate).\n *\n * @param {Slice[]} slices - The slices to return to the scheduler.\n * @param {string} [reason='unknown'] - Optional reason for the return: 'ENOPROGRESS', 'EUNCAUGHT', 'ESLICETOOSLOW', 'unknown'.\n * @param {boolean} [removeSlices=true] - When true, removes slices from this.sliceInventory .\n * @returns {Promise<*>} - Response from the scheduler.\n */\nSupervisor.prototype.returnSlices = function Supervisor$$returnSlices (slices, reason = 'unknown', removeSlices = true)\n{\n if (!slices || !slices.length) return Promise.resolve();\n debugging('supervisor') && console.debug('Supervisor.returnSlices: Returning slices', slices.map(slice => slice.identifier));\n\n const slicePayload = constructReturnSliceBuckets(slices, reason);\n if (removeSlices) slices.forEach((slice) => slice.jobManager.removeSlice(slice));\n\n try\n {\n return this.resultSubmitter.send('status', {\n worker: this.workerId,\n slices: slicePayload,\n }).catch(error => {\n const errorInfo = slices.map(slice => slice.identifier).sort();\n debuggingError && console.error('Failed to return slice(s)', { errorInfo, error }, 'Will try again on new connection.');\n return this.saveForResubmitToRS('status', { worker: this.workerId, slices: slicePayload });\n });\n }\n catch (error)\n {\n /* resultSubmitter can be null if worker is stopped */\n debuggingError && console.error(`Failed to return slices ${compressSlices(slices)}, no connection to result submitter:`, error);\n }\n}\n\n/** XXXpfr @todo TEMP -- Remove when sup2 replaces sup1 */\nfunction compressSlices(sliceArray)\n{\n const jobSliceMap = toJobMap(sliceArray, slice => slice.sliceNumber);\n return compressJobMap(jobSliceMap, false /* skipFirst*/, addressTruncationLength);\n}\n\n/**\n * Takes a slice and returns it to the scheduler to be redistributed.\n * Usually called when an exception is thrown by sandbox.work(...) .\n * Or when the supervisor tells it to forcibly stop working.\n *\n * @param {Slice} slice - The slice to return to the scheduler.\n * @param {string} [reason] - Optional reason for the return: 'ENOPROGRESS', 'EUNCAUGHT', 'ESLICETOOSLOW', 'unknown'.\n * @returns {Promise<*>} - Response from the scheduler.\n */\nSupervisor.prototype.returnSlice = function Supervisor$$returnSlice (slice, reason)\n{\n assert(slice.sliceNumber > 0 && slice.jobManager);\n debugging() && console.debug(`Supervisor.returnSlice: Returning slice ${slice.identifier} with reason ${reason}.`);\n\n if (!this.resultSubmitter)\n this.connectTo('resultSubmitter');\n\n try\n {\n slice.jobManager.removeSlice(slice);\n const payload = slice.getReturnMessagePayload(this.workerId, reason);\n return this.resultSubmitter.send('status', payload)\n .catch(error => {\n debuggingError && console.error('Failed to return slice', {\n sliceNumber: slice.sliceNumber,\n jobAddress: slice.jobAddress,\n status: slice.state.valueOf(),\n error,\n }, 'Will try again on a new connection.');\n return this.saveForResubmitToRS('status', payload);\n });\n }\n catch (error)\n {\n /* resultSubmitter can be null if worker is stopped */\n debuggingError && console.error(`Failed to return slice ${slice.identifier}, no connection to result submitter:`, error);\n }\n}\n\n/**\n * Send beacon to status.js for 'progress' and sliceStatus.scheduled.\n *\n * Run in an interval created in @constructor .\n * @returns {Promise<*>}\n */\nSupervisor.prototype.emitProgressReport = function emitProgressReport () \n{\n const slices = constructSliceBuckets( this.readySlices(), sliceStatus.scheduled );\n constructSliceBuckets( this.workingSlices(), 'progress', slices );\n\n debugging('supervisor') && console.debug('emitProgressReport:', stringify(slices));\n\n if (slices.length)\n {\n const progressReportPayload = {\n worker: this.workerId,\n slices,\n };\n\n try\n {\n return this.resultSubmitter.send('status', progressReportPayload)\n .catch(error => {\n debuggingError && console.error('479: Failed to send status beacon update:', error/*.message*/);\n return this.saveForResubmitToRS('status', progressReportPayload);\n });\n }\n catch (error) \n {\n /* resultSubmitter can be null if worker is stopped */\n debuggingError && console.error(`Failed to emit progress report, no connection to result submitter:`, error);\n }\n }\n}\n\n/**\n * Add a slice to the slice buckets being built. If a sliceBucket already exists for the\n * job-status-authMessage tuple, then the slice will be added to that, otherwise a new\n * sliceBucket will be added to the buckets.\n *\n * @param {Slice[]} slices - The slices.\n * @param {String} status - Status update, eg. progress or scheduled.\n * @param {Object[]} [sliceBuckets] - Slice buckets being built. Will be mutated in place.\n * @returns {Object[]} - mutated sliceBuckets array\n */\nfunction constructSliceBuckets (slices, status, sliceBuckets)\n{\n const jobMap = {};\n for (const slice of slices)\n {\n assert(slice.sliceNumber > 0 );\n if (!jobMap[slice.jobAddress]) jobMap[slice.jobAddress] = [];\n\n // Try to find a sliceBucket in the buckets which matches the job, status, and auth message.\n let sliceBucket = jobMap[slice.jobAddress].find(desc => {\n return desc.status === status\n && desc.authorizationMessage === slice.authorizationMessage;\n });\n\n if (!sliceBucket) jobMap[slice.jobAddress].push(slice.getMessage(status));\n else sliceBucket.sliceNumbers.push(slice.sliceNumber);\n }\n\n if (!sliceBuckets) return Object.values(jobMap);\n sliceBuckets.push(...Object.values(jobMap));\n return sliceBuckets;\n}\n \n/**\n * Add a slice to the returnSlice bucket being built. If a sliceBucket already exists for the\n * job-isEstimation-authMessage-reason tuple, then the slice will be added to that, otherwise a new\n * sliceBucket will be added to the buckets.\n *\n * @param {Slice[]} slices - The slices.\n * @param {String} [reason] - Optional reason to further characterize status; e.g. 'ENOPROGRESS', 'EUNCAUGHT', 'ESLICETOOSLOW', 'unknown'.\n * @param {Object[]} [sliceBuckets] - Optional slice buckets being built. Will be mutated in place.\n * @returns {Object[]} - mutated sliceBuckets array\n */\nfunction constructReturnSliceBuckets (slices, reason, sliceBuckets)\n{\n const jobMap = {};\n for (const slice of slices)\n {\n assert(slice.sliceNumber > 0 );\n if (!jobMap[slice.jobAddress]) jobMap[slice.jobAddress] = [];\n \n // Try to find a sliceBucket in the buckets which matches the job, estimation status, reason, and auth message.\n let sliceBucket = jobMap[slice.jobAddress].find(desc => {\n return desc.isEstimationSlice === slice.isEstimation\n && desc.authorizationMessage === slice.authorizationMessage\n && desc.reason === reason;\n });\n\n if (!sliceBucket) \n jobMap[slice.jobAddress].push(slice.getMessage('return', { isEstimationSlice: slice.isEstimation, reason }));\n else sliceBucket.sliceNumbers.push(slice.sliceNumber);\n }\n\n if (!sliceBuckets) return Object.values(jobMap);\n sliceBuckets.push(...Object.values(jobMap));\n return sliceBuckets;\n}\n \n// _Idx\n//\n// Task Distributor (TD): requestTask (Rq) support -- communication with TD.\n//\n\n/**\n * XXXpfr @todo Needs Work\n * For a given job, the scheduler stores an EMA approximation of average slice completion time in\n * jobPerfData.sliceCPUTime (and jobPerfData.sliceGPUTime, but we don't do the GPU analysis yet.)\n * However, each worker also tracks the same information and the ratio of local-info to scheduler-info\n * is returned by this.conversionQuantum so we can tell the task distributor how much work to return\n * from requestTask so that the work actually takes 5 minutes to complete when using all the worker sandboxes.\n * Note: \n * We average the completion times over the current jobs.\n * Define completion time in terms of sliceC(G)PUTime and sliceC(G)PUDensity\n * completion-time = (sliceCGPUTime + sliceCGPUTime) / ( sliceCPUDensity + sliceGPUDensity);\n * The local completion time is an EMA approximation of local completion-time as computed by Supervisor.recordResult.\n * The scheduler completion-time is computed directly from the corresponding row in jobPerfData.\n */\nSupervisor.prototype.conversionQuantum = function Supervisor$conversionQuantum()\n{\n let globalSpeed = 0, localSpeed = 0;\n for (const jobMan of this.jobManagerInventory)\n {\n const _globalTime = jobMan.globalTime;\n const _localTime = jobMan.statistics.ema;\n if (_globalTime > 0 && _localTime > 0)\n {\n //console.debug('conversionQuantum: local', _localTime, 'global', _globalTime);\n globalSpeed += _globalTime;\n localSpeed += _localTime;\n }\n }\n const conversion = globalSpeed > 0 ? localSpeed / globalSpeed : 1;\n return Math.min(Math.max(conversion, 0.2), 5.0); // Truncate if conversion is too bizarre.\n}\n\n/**\n * Remove all unreferenced jobs in this.jobManagerInventory and this.moduleCache.\n * Since job-managers are inserted into this.jobManagerInventory with a push, the job managers at the beginning are oldest.\n * Only delete #deleteCount of the oldest job-managers:\n * let deleteCount = this.jobManagerInventory.length - cachedJobsThreshold;\n * Edit cachedJobsThreshold to adjust the cache cleanup threshold.\n * @param {object[]} [newJobKeys=[]] - Jobs that should not be removed from this.jobManagerInventory and this.moduleCache.\n */\nSupervisor.prototype.clearUnusedJobManagersAndModuleCache = function Supervisor$clearUnusedJobManagersAndModuleCache(newJobKeys=[])\n{\n let deleteCount = this.jobManagerInventory.length - this.cachedJobsThreshold;\n if (deleteCount > 0)\n {\n selectiveDebugging && console.debug(`Supervisor.clearUnusedJobManagersAndModuleCache START: deleteCount ${deleteCount}/${this.jobManagerInventory.length}/${this.cachedJobsThreshold}.`);\n const jobMap = {};\n newJobKeys.forEach(jobAddress => { jobMap[jobAddress] = 1; });\n for (const jobManager of this.jobManagerInventory)\n {\n if (!jobMap[jobManager.address])\n {\n const sliceInventory = jobManager.sliceInventory.filter(slice => slice.isActive || slice.isQueued);\n if (sliceInventory.length < 1)\n {\n this.purgeJob(jobManager);\n if (--deleteCount < 1)\n break;\n }\n }\n }\n selectiveDebugging && console.debug(`Supervisor.clearUnusedJobManagersAndModuleCache FINISH: deleteCount ${deleteCount}/${this.jobManagerInventory.length}/${this.cachedJobsThreshold}.`);\n }\n}\n\n/**\n * Ask the scheduler (task distributor) for work.\n * @param {number} [unusedSandboxSlots]\n * @param {object[]} [jobs=[]] \n */\nSupervisor.prototype.requestTask = function Supervisor$requestTask (unusedSandboxSlots, jobs = [])\n{\n if (!this.isReady() || this.isFetchingNewWork)\n return Promise.resolve();\n\n if(!unusedSandboxSlots) unusedSandboxSlots = this.unusedSandboxSlots();\n if (unusedSandboxSlots < 1)\n {\n debugging('supervisor') && console.debug('requestTask: There are no unused sandbox slots.');\n return Promise.resolve();\n }\n\n // Refresh connections.\n this.instantiateAllConnections();\n\n // We prune for over this.maxTotalSandboxes about every 15 seconds, or when must prune level is reached.\n if (this.sandboxCount() > this.mustPruneSandboxLevel || Date.now() > this.lastPrune + this.pruneFrequency)\n {\n this.lastPrune = Date.now();\n this.pruneSandboxes();\n }\n\n try\n {\n this.isFetchingNewWork = true;\n const numCPUSlotToFill = this.numberOfAvailableSandboxSlots(unusedSandboxSlots);\n if (numCPUSlotToFill < 1)\n {\n //debugging() && console.debug('Predicted workload too high; not fetching additional work yet'); <-- Save Wes' msg...\n debugging() && console.debug('Supervisor.requestTask: We have enough, so start executing some slices.');\n return this.roundRobinSlices(); // roundRobinSlices guarantees this.isFetchingNewWork === false\n }\n\n /** XXXpfr @todo Get together with Wes to figure this out. */\n //let predictedLoad = this.predictLoad(Date.now() + ms(this.tuning.prefetchInterval)).load;\n\n const request = {\n numCores: numCPUSlotToFill,\n coreStats: this.getStatisticsCPU(),\n numGPUs: this.numGPU,\n //targetLoad: this.targetLoad.subtract(predictedLoad), /** XXXpfr @todo Get together with Wes to figure this out. */\n conversionQuantum: this.conversionQuantum(),\n capabilities: this.capabilities,\n paymentAddress: this.paymentAddress,\n jobAddresses: jobs.concat(this.options.jobAddresses || []), // When set, only fetches slices for these jobs.\n localExec: this.options.localExec,\n workerComputeGroups: this.generateWorkerComputeGroups(),\n minimumWage: workerTuning.minimumWage || this.options.minimumWage,\n loadedJobs: this.jobManagerInventory.map(jobMan => jobMan.address),\n readyJobs: this.jobManagerInventory.filter(jobMan => jobMan.ready).map(jobMan => jobMan.address),\n previouslyWorkedJobs: this.ringBufferofJobs.buf, // Only discrete jobs.\n rejectedJobs: this.rejectedJobs,\n };\n // Workers should be part of the public compute group by default.\n if (!booley(workerTuning.leavePublicGroup) && !booley(this.options.leavePublicGroup))\n request.workerComputeGroups.push(constants.computeGroups.public);\n\n // Call Task Distributor and handle response with this.addTaskToWorkload.\n this.fetchTask(request, (response) => this.addTaskToWorkload(request, response));\n }\n catch (error)\n {\n // Paranoid double-checking we don't accidently leave a live this.isFetchingNewWork.\n this.isFetchingNewWork = false;\n throw error;\n }\n}\n\n/** Gets the logical and physical number of cores and also the total number of sandboxes the worker is allowed to run. */\nSupervisor.prototype.getStatisticsCPU = function Supervisor$getStatisticsCPU ()\n{\n if (DCP_ENV.isBrowserPlatform)\n {\n return {\n worker: this.workerId,\n lCores: window.navigator.hardwareConcurrency,\n pCores: workerTuning.pCores || window.navigator.hardwareConcurrency,\n sandbox: this.maxWorkingSandboxes,\n }\n }\n\n return {\n worker: this.workerId,\n lCores: requireNative('os').cpus().length,\n pCores: requireNative('physical-cpu-count'),\n sandbox: this.maxWorkingSandboxes,\n }\n}\n\n/**\n * Callback for fetchTask.\n * @param {object} request \n * @param {object} response\n */\nSupervisor.prototype.addTaskToWorkload = function Supervisor$addTaskToWorkload (request, response)\n{\n try\n {\n const payload = response.payload;\n if (!response.success)\n {\n debugging() && console.debug('Task fetch failure; request=', request);\n debugging() && console.debug('Task fetch failure; response=', payload);\n // Only report errors when in 'ready' state.\n if (this.isReady()) throw new DCPError('Unable to fetch task for worker', payload);\n else return;\n }\n\n const sliceCount = payload.body.task.length || 0;\n if (sliceCount < 1)\n {\n if (selectiveDebugging2 && (this.lastTime + 7000 < Date.now()))\n {\n this.lastTime = Date.now();\n // Display the state of every slice.\n if (displaySliceState)\n {\n /** @type {JobManager} */\n const jm = this.jobManagerInventory.top();\n jm.dumpSlices(false /*details*/);\n }\n // Display completed results so far.\n if (displayCompletedResults && this.queuedSliceCount() < 1)\n {\n const values = Object.values(this.resultMap);\n if (values.length > 0)\n {\n values.forEach((descriptor) => descriptor.slices.sort((x,y) => x-y))\n console.debug(`Recorded results: job managers ${values.length}:`, this.resultMap);\n }\n }\n }\n this.emit('fetchedTask', { jobs: 0, slices: sliceCount });\n // There may be an extra slice to process.\n // roundRobinSlices guarantees this.isFetchingNewWork === false;\n return this.roundRobinSlices();\n }\n\n /**\n * payload structure: { owner: this.address, signature: signature, auth: messageLightWeight, body: messageBody };\n * messageLightWeight: { workerId: worker, jobSlices, schedulerId, jobCommissions }\n * messageBody: { newJobs: await-getNewJobsForTask(dbScheduler, task, request), task }\n */\n const { body, ...authorizationMessage } = payload;\n /** @type {{ newJobs: object, task: SliceMessage[] }} */\n const { newJobs, task } = body;\n assert(newJobs); // It should not be possible to have !newJobs -- we throw on !success.\n const newJobKeys = Object.keys(newJobs);\n const jobCount = newJobKeys.length;\n\n /*\n * Ensure all jobs received from the scheduler (task distributor) are:\n * 1. If we have specified specific jobs the worker may work on, the received jobs are in the specified job list\n * 2. If we are in localExec, at most 1 unique job type was received (since localExec workers are designated for only one job)\n * If the received jobs are not within these parameters, stop the worker since the scheduler cannot be trusted at that point.\n */\n if (request.jobAddresses.length > 0 && !newJobKeys.every((ele) => request.jobAddresses.includes(ele))\n || request.localExec && jobCount > 1)\n {\n this.error(\"Worker received slices it shouldn't have. Rejecting the work and stopping.\");\n process.exit(1);\n }\n\n selectiveDebugging && console.debug(`Supervisor.addTaskToWorkload: task: ${task.length}/${request.numCores}/${this.maxWorkingSandboxes}, conversion: ${request.conversionQuantum}, jobs: ${jobCount}, authSlices: ${compressJobMap(authorizationMessage.auth.authSlices, true /* skipFirst*/, addressTruncationLength /* digits*/)}`);\n\n // Clear out job managers w/o any queued slices,\n // and remove corresponding job references from module cache.\n // When a cached module no longer has any job references it is removed from the cache.\n this.clearUnusedJobManagersAndModuleCache(newJobKeys);\n\n // this.jobMap: job.address --> jobManager\n /** @type {Object.<Address, JobManager>} */\n this.jobMap = {};\n this.jobManagerInventory.forEach(jobManager => {\n this.jobMap[jobManager.address] = jobManager;\n });\n\n /** @type {Object.<Address, SliceMessage[]>} */\n const jobSliceMap = {};\n task.forEach((element) => {\n const address = String(element.jobAddress);\n if (!jobSliceMap[address]) jobSliceMap[address] = [element];\n else jobSliceMap[address].push(element);\n });\n\n debugging('supervisor') && console.debug('requestTask: slices, newJobs and jobMap', task.length, Object.keys(newJobs), Object.keys(this.jobMap));\n\n // Populate the job managers with slices, creating new job managers when necessary.\n // Set up discrete job ring buffer.\n for (const [jobAddress, jobEl] of Object.entries(newJobs))\n {\n if (this.jobMap.hasOwnProperty(jobAddress))\n {\n /** @type {JobManager} */\n const jm = this.jobMap[jobAddress];\n jm.update(jobEl, jobSliceMap[jobAddress], authorizationMessage);\n }\n else\n {\n // Add the slice messages to the job manager ctor, so that slice construction is after job manager is ready.\n const jobManager = new JobManager(this, jobEl, jobSliceMap[jobAddress], authorizationMessage);\n this.jobMap[jobAddress] = jobManager;\n this.jobManagerInventory.push(jobManager);\n\n // Populate the ring buffer based on job's discrete property.\n assert(jobEl.requirements);\n if (jobEl.requirements.discrete && this.ringBufferofJobs.find(address => address === jobEl.address) === undefined)\n this.ringBufferofJobs.push(jobEl.address);\n }\n }\n\n /**\n * The requestTask event fires when the supervisor has finished trying to\n * fetch work from the scheduler (task distributor). The data emitted is the\n * number of jobs and new slices to work on in the fetched task.\n *\n * @event Supervisor#requestTask\n * @type {object}\n */\n this.emit('fetchedTask', { jobs: jobCount, slices: sliceCount });\n\n // Start working on the slices.\n setImmediate(() => this.roundRobinSlices());\n }\n catch (error)\n {\n this.isFetchingNewWork = false; // Paranoid double checking that we don't leave this.isFetchingNewWork live.\n this.emit('fetchTaskFailed', error);\n debuggingError && console.error('Supervisor.requestTask failed!', error);\n }\n}\n\n/**\n * Returns the number of unused sandbox slots to fill -- sent to requestTask.\n * @returns {number}\n */\nSupervisor.prototype.numberOfAvailableSandboxSlots = function Supervisor$numberOfAvailableSandboxSlots(unusedSandboxSlots)\n{\n const _readySlices = this.readySlices();\n let numCores;\n if (this.options.priorityOnly && this.options.jobAddresses.length === 0)\n numCores = 0;\n else if (_readySlices.length > 1) // We have slices ready, no need to fetch.\n numCores = 0;\n else\n {\n // There are almost no ready slices (there may be 0 or 1), fetch a full task.\n // The task is full, in the sense that it will contain slices whose\n // aggregate execution time is this.maxWorkingSandboxes * 5-minutes.\n // However, there can only be unusedSandboxSlots # of long slices.\n // Thus we need to know whether the last slice in this.readySlices() is long or not.\n // (A long slice has estimated execution time >= 5-minutes on an average worker.)\n const longSliceCount = (_readySlices.length > 0 && _readySlices[0].isLong) ? 1 : 0;\n numCores = unusedSandboxSlots - longSliceCount;\n debugging('supervisor') && console.debug('numberOfAvailableSandboxSlots', numCores, unusedSandboxSlots, longSliceCount);\n }\n return numCores;\n}\n\n/**\n * @callback cbAddTaskToWorkload\n * @param {Response} response\n * @returns {Promise<void>}\n */\n\n/**\n * Call to fetch new slices from task distributor.\n * @param {*} request\n * @param {cbAddTaskToWorkload} addTaskToWorkload\n * @returns {Promise<void>}\n */\nSupervisor.prototype.fetchTask = async function Supervisor$fetchTask (request, addTaskToWorkload)\n{\n // Fetch a new task if we have insufficient slices queued, then start workers\n assert(this.isFetchingNewWork);\n\n this.instantiateAllConnections();\n\n // Top up sandboxes when necessary.\n const moreSandboxes = this.maxWorkingSandboxes - this.sandboxCount();\n if (moreSandboxes > 0)\n {\n await this.carefullyCreateSandboxes(moreSandboxes)\n .then(() => this.checkCapabilities()); /** XXXpfr @todo Do we need to check capabilities again? */\n }\n\n const fetchTimeout = setTimeout(() => {\n this.isFetchingNewWork = false;\n this.emit('warning', 'Fetch exceeded timeout, will reconnect at next watchdog interval');\n safeClose(this.taskDistributor, 'Fetch timed out', Math.random() > 0.5).catch(error => {\n this.error('Failed to close task-distributor connection', error);\n });\n safeClose(this.resultSubmitter, 'Fetch timed out', Math.random() > 0.5).catch(error => {\n this.error('Failed to close result-submitter connection', error);\n });\n this.instantiateAllConnections();\n }, 3 * 60 * 1000); // Max out at 3 minutes to fetch.\n\n // Ensure result submitter and task distributor connections before fetching tasks.\n try\n {\n await this.taskDistributor.keepalive();\n await this.resultSubmitter.keepalive();\n await this.taskDistributor.keepalive();\n }\n catch (e)\n {\n this.isFetchingNewWork = false;\n this.error('Failed to connect to result submitter, refusing to fetch slices. Will try again at next fetch cycle.', e);\n clearTimeout(fetchTimeout);\n safeClose(this.taskDistributor, 'Failed to connect to task-distributor', true).catch(error => {\n this.error('Failed to close task-distributor connection', error);\n });\n safeClose(this.resultSubmitter, 'Failed to connect to result-submitter', true).catch(error => {\n this.error('Failed to close result-submitter connection', error);\n });\n return Promise.resolve();\n }\n\n this.emit('fetchingTask');\n\n if (!this.taskDistributor) return\n return this.taskDistributor.send('requestTask', request)\n .then((response) => {\n addTaskToWorkload(response);\n })\n .catch((error) => {\n this.isFetchingNewWork = false; // Redundant.\n this.emit('fetchTaskFailed', error);\n this.error('Unable to request task from scheduler. Will try again on a new connection.', error);\n safeClose(this.taskDistributor, 'Failed to request task.', true);\n })\n .finally(() => {\n this.isFetchingNewWork = false;\n clearTimeout(fetchTimeout);\n });\n}\n\n/**\n * Generate the workerComputeGroups property of the requestTask message. \n * \n * Concatenate the compute groups object from dcpConfig with the list of compute groups\n * from the supervisor, and remove the public group if accidentally present. Finally,\n * we transform joinSecrets/joinHash into joinHashHash for secure transmission.\n *\n * @note computeGroup objects with joinSecrets are mutated to record their hashes. This\n * affects the supervisor options and dcpConfig. Re-adding a joinSecret property\n * to one of these will cause the hash to be recomputed.\n */\nSupervisor.prototype.generateWorkerComputeGroups = function Supervisor$generateWorkerComputeGroups ()\n{\n let computeGroups = Object.values(workerTuning.computeGroups || {});\n if (this.options.computeGroups)\n computeGroups = computeGroups.concat(this.options.computeGroups);\n computeGroups = computeGroups.filter(group => group.id !== constants.computeGroups.public.id);\n const hashedComputeGroups = [];\n for (const group of computeGroups)\n {\n const groupCopy = Object.assign({}, group);\n if ((group.joinSecret || group.joinHash) && (!group.joinHashHash || this.lastDcpsid !== this.taskDistributor.dcpsid))\n {\n let joinHash;\n if (group.joinHash)\n joinHash = group.joinHash.replace(/\\s+/g, ''); // strip whitespace\n else\n joinHash = calculateJoinHash(groupCopy);\n\n groupCopy.joinHashHash = hash.calculate(hash.eh1, joinHash, this.taskDistributor.dcpsid);\n delete groupCopy.joinSecret;\n delete groupCopy.joinHash;\n debugging('computeGroups') && console.debug(`Calculated joinHash=${joinHash} for`, groupCopy);\n }\n hashedComputeGroups.push(groupCopy);\n }\n this.lastDcpsid = this.taskDistributor.dcpsid;\n debugging('computeGroups') && console.debug('Requesting ', computeGroups.length, 'non-public groups for session', this.lastDcpsid);\n return hashedComputeGroups;\n}\n\n// _Idx\n//\n// Aggregators from the job managers.\n// Note: Not all functions are used yet.\n//\n/** XXXpfr @todo Figure out which aggregators to keep. */\n\n/**\n * Gather the count of job managers with queuedSlices.\n * @returns {number}\n */\nSupervisor.prototype.activeJobCount = function Supervisor$activeJobCount ()\n{\n let count = 0;\n this.jobManagerInventory.forEach((jobManager) => {\n if (jobManager.queuedSlices.length > 0) count++;\n });\n return count;\n}\n\n/**\n * Gather the ready slices from the job managers.\n * @returns {Slice[]}\n */\nSupervisor.prototype.readySlices = function Supervisor$readySlices ()\n{\n const readySlices = [];\n this.jobManagerInventory.forEach((jobManager) => {\n readySlices.push(...jobManager.readySlices);\n });\n return readySlices;\n}\n\n/**\n * Gather the working slices in the job managers.\n * @returns {Slice[]}\n */\nSupervisor.prototype.workingSlices = function Supervisor$workingSlices ()\n{\n const workingSlices = [];\n this.jobManagerInventory.forEach((jobManager) => {\n workingSlices.push(...jobManager.workingSlices);\n });\n return workingSlices;\n}\n\n/**\n * Gather the count of various kinds of slices over all the job managers.\n * @param {string} predicate - 'all;, 'ready', 'queued', 'reserved', 'working', 'workingOnly'.\n * @returns {number}\n */\nSupervisor.prototype.predicateSliceCount = function Supervisor$predicateSliceCount (predicate)\n{\n let count = 0;\n switch (predicate)\n {\n case 'all':\n this.jobManagerInventory.forEach((jobManager) => {\n count += jobManager.sliceInventory.length;\n });\n break\n case 'ready':\n this.jobManagerInventory.forEach((jobManager) => {\n count += jobManager.readySlices.length;\n });\n break;\n case 'queued':\n this.jobManagerInventory.forEach((jobManager) => {\n count += jobManager.queuedSlices.length;\n });\n break;\n case 'reserved':\n this.jobManagerInventory.forEach((jobManager) => {\n count += jobManager.reservedSlices.length;\n });\n break;\n case 'working': // both working and reserved (viz., soon-to-be-working)\n this.jobManagerInventory.forEach((jobManager) => {\n count += jobManager.workingSlices.length;\n });\n break;\n case 'workingOnly':\n this.jobManagerInventory.forEach((jobManager) => {\n count += jobManager.workingSlicesOnly.length;\n });\n break;\n }\n return count;\n}\n/** @returns {number} */\nSupervisor.prototype.sliceCount = function Supervisor$sliceCount () { return this.predicateSliceCount('all'); }\n/** @returns {number} */\nSupervisor.prototype.readySliceCount = function Supervisor$readySliceCount () { return this.predicateSliceCount('ready'); }\n/** @returns {number} */\nSupervisor.prototype.queuedSliceCount = function Supervisor$queuedSliceCount () { return this.predicateSliceCount('queued'); }\n/** @returns {number} */\nSupervisor.prototype.reservedSliceCount = function Supervisor$reservedSliceCount () { return this.predicateSliceCount('reserved'); }\n/** @returns {number} */\nSupervisor.prototype.workingSliceCount = function Supervisor$workingSliceCount () { return this.predicateSliceCount('working'); }\n/** @returns {number} */\nSupervisor.prototype.workingSliceOnlyCount = function Supervisor$workingSliceOnlyCount () { return this.predicateSliceCount('workingOnly'); }\n\n/**\n * Gather the count of working sandboxes over all the job managers.\n * @returns {number}\n */\nSupervisor.prototype.sandboxCount = function Supervisor$sandboxCount ()\n{\n return this.readiedSandboxes.length + this.sandboxInventory.filter((sandbox) => !sandbox.isTerminated).length;\n}\n\n/**\n * Gather the count of working sandboxes over all the job managers.\n * @returns {number}\n */\nSupervisor.prototype.workingSandboxCount = function Supervisor$workingSandboxCount ()\n{\n return this.sandboxInventory.filter((sandbox) => !sandbox.isTerminated && sandbox.isWorking).length;\n}\n\n// _Idx\n//\n// Sandbox creation and management.\n// \n\n/**\n * Create and start a Sandbox.\n * When this.readiedSandboxes.length > 0, use one of those sandboxes, instead of creating a new one.\n * @param {number} [delayMs=0] - The delay ms when calling sandbox.start(delayMs) .\n * @returns {Promise<Sandbox>}\n */\nSupervisor.prototype.createSandbox = function Supervisor$createSandbox (delayMs = 0)\n{\n const that = this;\n function getReadiedSandbox()\n {\n const sandbox = that.readiedSandboxes.pop();\n that.sandboxInventory.push(sandbox);\n return Promise.resolve(sandbox);\n }\n\n if (this.readiedSandboxes.length > 0)\n return getReadiedSandbox();\n\n // Do not place in this.readiedSandboxes, we'll directly use the return value of createSandbox.\n return this.createNewSandbox(delayMs, true/*putInInventory*/)\n .catch(() => {\n return this.carefullyCreateSandboxes(1)\n .then(() => {\n return getReadiedSandbox();\n });\n });\n}\n \n/**\n * Create and start a Sandbox.\n * Store it in this.readiedSandboxes or this.sandboxInventory according to putInInventory.\n * @param {number} [delayMs=0] - Millisecond delay when calling sandbox.start(delayMs), otherwise return it and use it.\n * @param {boolean} [putInInventory=false]\n * @returns {Promise<Sandbox>}\n */\nSupervisor.prototype.createNewSandbox = function Supervisor$createNewSandbox (delayMs = 0, putInInventory = false)\n{\n const rawSandbox = new Sandbox(this, { ...this.options.sandboxOptions });\n this.hookUpSandboxListeners(rawSandbox);\n return rawSandbox.start(delayMs)\n .then((sandbox) => {\n if (putInInventory) this.sandboxInventory.push(sandbox);\n else this.readiedSandboxes.push(sandbox);\n return sandbox;\n })\n .catch((error) => {\n if (!error) error = new Error('Unknown error creating sandbox.');\n debuggingWarn && console.warn(`Supervisor.createNewSandbox: Failed to start sandbox ${rawSandbox.identifier}.`, error);\n rawSandbox.terminate(false);\n if (error.code === 'ENOWORKER')\n throw new DCPError(\"Cannot use localExec without dcp-worker installed. Use the command 'npm install dcp-worker' to install the neccessary modules.\", 'ENOWORKER');\n throw error;\n });\n}\n\n/**\n * Bulk: create and start sandboxes and save in this.readiedSandboxes.\n * Call this function when there's a chance the evaluator is down.\n * @param {number} count - The number of sandboxes to create.\n * @returns {Promise<void>}\n */\nSupervisor.prototype.carefullyCreateSandboxes = async function Supervisor$carefullyCreateSandboxes (count)\n{\n if (count < 1) return;\n // If the evaluator cannot start (e.g. if the evalServer is not running),\n // then the while loop will keep retrying until the evalServer comes online.\n let retry = 0;\n while (true)\n {\n try\n {\n await this.createNewSandbox();\n if (count > 1)\n this.createSandboxes(count - 1);\n }\n catch (error)\n {\n if (error.code === 'ENOWORKER') throw error;\n // Now assume the evaluator is down and keep retrying.\n /** XXXpfr @todo Need better indicator that evaluator is down. */\n if ((retry++ % 6) === 0)\n this.error(`Failed to ready sandboxes; will keep retrying: ${this.checkCode(error)}`);\n await a$sleepMs(1000 * Math.max(5, retry));\n }\n }\n}\n\n/**\n * Bulk: create and start sandboxes and save in this.readiedSandboxes.\n * @param {number} count - The number of sandboxes to create.\n * @returns {Promise<void>}\n */\nSupervisor.prototype.createSandboxes = async function Supervisor$createSandboxes (count)\n{\n assert(count > 0);\n const promises = [], errors = [];\n for (let k = 0; k < count; k++)\n {\n promises.push(\n this.createNewSandbox(k === 0 ? 0: this.delayMs())\n .catch((error) => errors.push(this.checkCode(error))));\n }\n\n await Promise.all(promises);\n\n if (errors.length)\n this.emit('warning', `Failed to ready ${errors.length} of ${count} sandboxes: ${errors.map(err => err.message)}`);\n\n // Sort so that pop() will return sandboxes in increasing order.\n this.readiedSandboxes.sort((x,y) => y.id - x.id);\n\n debugging('supervisor') && console.debug(`createSandboxes: Created ${count-errors.length} sandboxes.`, this.readiedSandboxes.map(s => s.id));\n}\n\n/**\n * For a given sandbox, hook up all the Sandbox listeners.\n * @param {Sandbox} sandbox \n */\nSupervisor.prototype.hookUpSandboxListeners = function hookUpSandboxListeners (sandbox) \n{\n sandbox.addListener('ready', () => this.emit('sandboxReady', sandbox));\n\n sandbox.addListener('start', () => {\n this.emit('sandboxStart', sandbox);\n\n if (sandbox.slice)\n {\n try\n {\n const statusPayload = sandbox.slice.getMessagePayload(this.workerId, 'begin');\n return this.resultSubmitter.send('status', statusPayload).catch((error) => {\n debuggingError && console.error(`Error sending 'status' for slice ${sandbox.slice.identifier}:\\n\\t${error}\\n\\tWill try again on a new connection`);\n return this.saveForResubmitToRS('status', statusPayload);\n });\n } \n catch (error)\n {\n /* resultSubmitterConnection can be null if worker is stopped */\n debuggingError && console.error(`Failed to send 'begin' status for slice ${sandbox.slice.identifier}, no connection to result submitter`, error);\n }\n }\n });\n\n sandbox.addListener('workEmit', ({ eventName, payload }) => {\n // Need to check if the sandbox hasn't been assigned a slice yet.\n if (!sandbox.slice)\n this.error(`Sandbox not assigned a slice before sending workEmit message to scheduler.\\n\\t'workEmit' event originates from '${eventName}' event`);\n else\n {\n const slice = sandbox.slice;\n // Sometimes a sliceNumber===0 workEmit comes in before the client bundle is properly loaded.\n // Also happens with minor dcp-client version mismatches.\n // sliceNumber===0 <==> authorizationMessage undefined...\n if (!slice.authorizationMessage)\n this.emit('warning', `workEmit: missing authorization message for slice ${slice.identifier}`);\n else if (this.eventRouter) // No reason to emit if event router is closed.\n {\n const workEmitPayload = {\n eventName,\n payload,\n job: slice.jobAddress,\n slice: slice.sliceNumber,\n worker: this.workerId,\n authorizationMessage : slice.authorizationMessage,\n };\n\n const workEmitPromise = this.eventRouter.send('workEmit', workEmitPayload).catch(error => {\n debuggingWarn && console.warn(`workEmit: Unable to send ${eventName} for slice ${slice.identifier}: ${error.message}.\\n\\tTrying again on a new connection.`);\n this.eventRouterMessageQueue.push({ operation: 'workEmit', data: workEmitPayload })\n safeClose(this.eventRouter); // stopWork could slip-in during eventRouter.send\n if (this.debugBuild) this.error('workEmit error:', error);\n });\n\n if (this.debugBuild)\n {\n workEmitPromise.then(result => {\n if (!result) this.emit('warning', `workEmit: Event router did not accept event ${eventName}`);\n });\n }\n }\n }\n });\n\n sandbox.on('rejectedWorkMetrics', (data) => {\n // If the slice already has rejectedTimeReport, add this data to it. If not, assign this data to slices rejectedTimeReport property\n if (sandbox.slice) \n {\n if (!sandbox.slice.rejectedTimeReport) sandbox.slice.rejectedTimeReport = data.timeReport;\n else \n {\n ['total', 'CPU', 'webGL'].forEach((key) => {\n if (data.timeReport[key]) sandbox.slice.rejectedTimeReport[key] += data.timeReport[key];\n })\n }\n }\n });\n\n // If the sandbox terminated and we are not shutting down, then we should return all work which is\n // currently not being computed if all sandboxes are dead and the attempt to create a new one fails.\n sandbox.on('terminated', async () => {\n let nonTerminatedSandboxes = this.sandboxInventory.filter(sbx => !sbx.isTerminated);\n if (nonTerminatedSandboxes.length === 0 && this.worker.working)\n {\n debugging('supervisor') && console.debug(`hookUpSandboxListeners: Try to create 1 sandbox in the sandbox-terminated-handler.`);\n const _sandbox = await this.createNewSandbox()\n .catch((error) => {\n debugging('supervisor') && console.warn('Failed to replace terminated sandbox; evalserver may be gone.', error.message);\n error.message = 'Failed to replace terminated sandbox: ' + error.message;\n this.emit('warning', error);\n });\n\n // If we cannot create a new sandbox, that probably means we're on a screensaver worker\n // and the screensaver is down. So return the slices to the scheduler.\n if (!_sandbox) this.screenSaverDestroy();\n }\n });\n\n sandbox.on('error', (error) => this.emit('error', error));\n sandbox.on('warning', (warning) => this.emit('warning', warning));\n}\n\n/**\n * Terminate extra sandboxes over the limit: this.maxTotalSandboxes.\n * First terminate assigned sandboxes which are unlikely to be used with the current ready slices.\n * Then terminate the unassigned sandboxes: this.readiedSandboxes.\n * (There should be no readied sandboxes at this point.)\n * Then round-robin prune 1 assigned sandbox from each jobmanager.\n * XXXpfr @todo Prioritize sandboxes that we wish to keep.\n * E.g. When a sandbox is especially expensive to assign.\n */\nSupervisor.prototype.pruneSandboxes = function Supervisor$pruneSandboxes () \n{\n let pruneCount = this.sandboxCount() - this.maxTotalSandboxes;\n if (pruneCount <= 0) return;\n selectiveDebugging && console.debug(`Supervisor.pruneSandboxes START: pruneCount ${pruneCount}/${this.sandboxCount()}/${this.maxTotalSandboxes}.`);\n // Only prune the extras: jm.assignedSandboxes.length > jm.queuedSlices.length .\n // Round-robin prune 1 extra assigned sandbox from each jobmanager.\n const readyJobManagers = this.jobManagerInventory.filter(jm => jm.ready);\n while (true)\n {\n const _pruneCount = pruneCount;\n for (const jm of readyJobManagers)\n {\n if (jm.pruneExtraAssignedSandbox())\n {\n if (--pruneCount < 1)\n {\n selectiveDebugging && console.debug(`Supervisor.pruneSandboxes FINISH: unpruned ${pruneCount}/${this.sandboxCount()}/${this.maxTotalSandboxes}.`);\n return;\n }\n }\n }\n if (pruneCount === _pruneCount)\n break;\n }\n assert(pruneCount > 0);\n // Prune the excess non-assigned sandboxes -- we should never hit this.\n if (this.readiedSandboxes.length > 0)\n {\n const toPrune = this.readiedSandboxes.slice(0, pruneCount);\n this.readiedSandboxes = this.readiedSandboxes.slice(pruneCount);\n toPrune.forEach(sandbox => sandbox.terminate(false));\n pruneCount -= toPrune.length;\n if (pruneCount < 1)\n {\n selectiveDebugging && console.debug(`Supervisor.pruneSandboxes FINISH: unpruned ${pruneCount}/${this.sandboxCount()}/${this.maxTotalSandboxes}.`);\n return;\n }\n }\n // Round-robin prune 1 assigned sandbox from each jobmanager.\n while (true)\n {\n const _pruneCount = pruneCount;\n for (const jm of readyJobManagers)\n {\n if (jm.pruneAssignedSandbox())\n {\n if (--pruneCount < 1)\n {\n selectiveDebugging && console.debug(`Supervisor.pruneSandboxes FINISH: unpruned ${pruneCount}/${this.sandboxCount()}/${this.maxTotalSandboxes}.`);\n return;\n }\n }\n }\n if (pruneCount === _pruneCount)\n break;\n }\n this.sandboxInventory = this.sandboxInventory.filter((sandbox) => !sandbox.isTerminated);\n selectiveDebugging && console.debug(`Supervisor.pruneSandboxes FINISH: unpruned ${pruneCount}/${this.sandboxCount()}/${this.maxTotalSandboxes}.`);\n}\n\n// _Idx\n//\n// Result-submitter-result support functions.\n// Send in the results!!!\n//\n\n/**\n * Submits the slice results to the result-submitter service.\n * Then remove the slice from the its job manager.\n *\n * @param {Slice} slice - The slice to submit.\n * @returns {Promise<void>}\n */\nSupervisor.prototype.recordResult = function Supervisor$recordResult (slice)\n{\n // It is possible for slice.result to be undefined when there are upstream errors.\n if ( !(slice && slice.result))\n throw new Error(`recordResult: slice.result is undefined for slice ${slice.identifier}. This is ok when there are upstream errors.`); \n if (!slice.isComplete)\n throw new Error('Cannot record result for slice that has not completed execution successfully.');\n\n debugging('supervisor') && console.debug(`supervisor: recording result for slice ${slice.identifier}.`);\n\n /* @see result-submitter::result for full message details */\n const metrics = { GPUTime: 0, CPUTime: 0, CPUDensity: 0, GPUDensity: 0, total: 0 };\n const payloadData = {\n slice: slice.sliceNumber,\n job: slice.jobAddress,\n worker: this.workerId,\n paymentAddress: this.paymentAddress,\n metrics,\n authorizationMessage: slice.authorizationMessage,\n }\n\n const timeReport = slice.timeReport;\n if (timeReport)\n {\n debugging('supervisor') && console.debug('recordResult timeReport', timeReport);\n // If slice takes less than 1ms to execute, CPUTime will be 0, so compensate.\n if (timeReport.CPU < 1)\n {\n timeReport.CPU++;\n timeReport.total++;\n }\n if (timeReport.total < timeReport.CPU + timeReport.webGL)\n {\n // Compensate or throw? For now we compensate.\n debuggingWarn && console.warn(`Supervisor.recordResult:: Inconsistent time report -- total < CPU + webGL -- ${stringify(timeReport)}`)\n //throw new Error(`recordResult: Inconsistent time report -- total < CPU + webGL -- ${stringify(timeReport)}`)\n timeReport.total = timeReport.CPU + timeReport.webGL;\n }\n if (timeReport.total > 0)\n {\n slice.jobManager.updateStatistics(timeReport);\n metrics.total = timeReport.total;\n metrics.CPUTime = timeReport.CPU;\n metrics.GPUTime = timeReport.webGL;\n metrics.CPUDensity = metrics.CPUTime / timeReport.total;\n metrics.GPUDensity = metrics.GPUTime / timeReport.total;\n }\n }\n\n this.emit('submittingResult');\n\n if (!this.resultSubmitter)\n this.connectTo('resultSubmitter');\n\n if (slice.resultStorageType === 'pattern')\n return this.sendResultToRemote(slice)\n .then((response) => {\n payloadData.result = response;\n this.sendToResultSubmitter(slice, payloadData);\n });\n\n payloadData.result = encodeDataURI(slice.result.result);\n return this.sendToResultSubmitter(slice, payloadData);\n}\n\n/**\n * @param {Slice} slice\n * @param {*} payloadData\n * @returns {Promise<void>}\n */\nSupervisor.prototype.sendToResultSubmitter = function Supervisor$sendToResultSubmitter (slice, payloadData)\n{\n const that = this;\n function handleRSError (error, payloadData)\n {\n that.error(`Failed to submit results to scheduler for slice ${payloadData.slice} of job ${payloadData.job}`, error);\n //slice.jobManager.dumpSlices('recordResult');\n that.saveForResubmitToRS('result', payloadData)\n .then((msg) => {\n if (!error && msg) error = new Error(`resultSubmitter is ${msg}`);\n that.emit('submitSliceFailed', error);\n throw error;\n });\n }\n\n try\n {\n debugging('supervisor') && console.debug('Supervisor.recordResult: payloadData', payloadData.result.slice(0, 256), slice.identifier);\n\n return this.resultSubmitter.send('result', payloadData)\n .then((resp) => {\n if (!resp.success)\n throw resp.payload;\n\n debugging('supervisor') && console.debug('recordResult: SUCCESS', slice.identifier);\n\n const receipt = {\n accepted: true,\n payment: resp.payload.slicePaymentAmount,\n };\n this.emit('submittedResult', resp.payload);\n this.emit('dccCredit', receipt);\n })\n .catch ((error) => {\n handleRSError (error, payloadData);\n });\n }\n catch (error)\n {\n handleRSError (error, payloadData);\n }\n finally\n {\n slice.markAsFinished();\n this.emit('submitFinished');\n // Remove the slice from the job manager.\n slice.jobManager.removeSlice(slice);\n if (this.sliceTiming)\n {\n slice['resultDelta'] = Date.now() - slice['resultDelta'];\n console.debug(`recordResult(${slice['queueingDelta']}, ${slice['executionDelta']}, ${slice['resultDelta']}): Completed slice ${slice.identifier}.`);\n }\n if (selectiveDebugging)\n {\n if (!this.resultMap[slice.jobAddress]) this.resultMap[slice.jobAddress] = { slices: [], totalTimes: [] };\n this.resultMap[slice.jobAddress].slices.push(slice.sliceNumber);\n this.resultMap[slice.jobAddress].totalTimes.push(payloadData.metrics.total);\n }\n }\n}\n\n/**\n * Send a work function's result to a server that speaks our DCP Remote Data Server protocol.\n * @param {Slice} slice - Slice object whose result we are sending.\n * @returns {Promise<string>}\n * @throws When HTTP status not in the 2xx range.\n */\nSupervisor.prototype.sendResultToRemote = function Supervisor$sendResultToRemote (slice)\n{ \n // Construct postParams.\n const postParams = { ...slice.resultStorageParams };\n postParams.element = slice.sliceNumber;\n /** Currently data will be outputed as a JSON object, XXXpfr @todo: Support file upload and other contentTypes. */\n postParams.contentType = 'application/json';\n\n const result = slice.result.result;\n if (result) postParams.content = JSON.stringify(result);\n else postParams.error = JSON.stringify(slice.error);\n debugging('supervisor') && console.debug('sendResultToRemote: content: ', (result ? postParams.content : postParams.error).slice(0, 512));\n\n // Construct url.\n const sliceResultUri = makeValueURI('pattern', slice.resultStorageDetails, {\n slice: slice.sliceNumber,\n job: slice.jobAddress,\n });\n debugging() && console.debug('sendResultToRemote sliceResultUri: ', sliceResultUri);\n const url = new DcpURL(sliceResultUri);\n\n // Check allowed origins.\n if (this.allowedOrigins.indexOf(url.origin) === -1 && this.sendResults.indexOf(url.origin) === -1)\n throw new Error(`Invalid origin for remote result storage: '${url.origin}'`);\n\n return justFetch(url, 'JSON', 'POST', false, postParams)\n .then((response) => encodeDataURI(JSON.stringify(response)));\n}\n\n// _Idx\n//\n// Reject.\n//\n\n/**\n * Handles reassigning or returning a slice that was rejected by a sandbox.\n *\n * If the slice does not have a rejected property already, reassign the\n * slice to a new sandbox and add a rejected property to the slice to\n * indicate it has already rejected once.\n *\n * If the slice rejects with a reason, or has a rejected time stamp\n * (ie. has been rejected once already) then return all slices from the\n * job to the scheduler and terminate all sandboxes with that jobAddress.\n *\n * The sandbox will be terminated.\n *\n * @param {Sandbox} sandbox\n * @param {Slice} slice\n */\nSupervisor.prototype.handleWorkReject = async function Supervisor$handleWorkReject (sandbox, slice, rejectReason)\n{\n debugging() && console.debug('handleWorkReject', rejectReason, slice.rejectedTimeStamp, slice.identifier);\n\n // Do a hard flush of the microtask queue and finish the current event loop.\n await new Promise((resolve) => setImmediate(() => setTimeout(resolve, 0)));\n\n const jobManager = slice.jobManager;\n jobManager.rejectedJobReasons.push(rejectReason); // memoize reasons\n\n // First time rejecting without a reason. Try assigning slice to a new sandbox.\n if (rejectReason === 'false' && !slice.rejectedTimeStamp)\n {\n // Set rejected time stamp.\n slice.rejectedTimeStamp = Date.now();\n // Schedule the slice for execution.\n jobManager.scheduleSlice(slice, true /* placeInTheFrontOfTheQueue*/);\n // Slice has been rescheduled, but we still need to terminate the sandbox.\n jobManager.returnSandbox(sandbox);\n }\n else\n { \n // Slice has a reason OR rejected without a reason already and got stamped.\n // Add to array of rejected jobs.\n let rejectedJob = {\n address: slice.jobAddress,\n reasons: jobManager.rejectedJobReasons,\n }\n this.rejectedJobs.push(rejectedJob);\n\n // Purge the job.\n this.purgeJob(jobManager);\n\n // Tell everyone all about it, when allowed.\n if (jobManager.displayMaxDiagInfo)\n {\n const suffixMsg = '\\n\\tAll slices with the same jobAddress returned to the scheduler.\\n\\tAll sandboxes with the same jobAddress are terminated.';\n if (slice.rejectedTimeStamp)\n this.emit('warning', `work.reject: The slice ${slice.identifier} was rejected twice.${suffixMsg}`);\n else\n this.emit('warning', `work.reject: The slice ${slice.identifier} was rejected with reason ${rejectReason}.${suffixMsg}`);\n }\n }\n}\n\n// _Idx\n//\n// Unused functions that we need to review.\n// 1) destroy, shutdown, halt -- possibly need to incorporate these ideas in stopWork\n// 2) predictLoad -- XXXpfr: I really feel bad about not being able to figure out how to incorporate\n// this into the design of sup2. This was a central part of Wes' design of sup2.\n// I need to collaborate with Wes to resolve my ignorance.\n//\n\n/**\n * UNUSED\n * @deprecated -- may use later\n **/\nSupervisor.prototype.destroy = function Supervisor$destory()\n{\n selectiveDebugging && console.debug(`Supervisor.screenSaverDestroy: destroying Supervisor and everything else.`);\n this.stopWork(true /*forceTerminate*/);\n if (this.state) this.state.destroy();\n if (this.progressReportTimer) clearInterval(this.progressReportTimer);\n if (this.watchdogTimer) clearInterval(this.watchdogTimer);\n this.state = null;\n this.progressReportTimer = null;\n this.watchdogTimer = null;\n this.jobManagerInventory = null;\n this.sandboxInventory = [];\n this.readiedSandboxes = [];\n this.closeConnections();\n}\n\n/**\n * UNUSED\n * @deprecated -- may use later \n * Halt the Supervisor as quickly as possible.\n **/\nSupervisor.prototype.halt = function Supervisor$halt()\n{\n this.state.setIf('ready', 'stopping');\n if (!this.state.is('stopping'))\n throw new Error(`Supervisor has an invalid state ${this.state} for halt`);\n clearInterval(this.watchdogTimer);\n\n for (let jobMan of this.jobManagerInventory)\n {\n jobMan.state.setIf('ready', 'stop');\n for (const sandbox of jobMan.workingSandboxes)\n {\n sandbox.stop(); // NYI -- will terminate.\n }\n }\n}\n \n/**\n * UNUSED\n * @deprecated -- may use later \n * Shutdown the supervisor; attempts to return work which will not be finished before timeout expires.\n * The shutdown is complete once this supervisor emits the stopped state change.\n */\nSupervisor.prototype.shutdown = function Supervisor$shutdown(timeoutMs)\n{\n var ps = [], returnSliceInventory = [];\n var timer;\n\n this.state.setIf('ready', 'stopping');\n if (!this.state.is('stopping'))\n throw new Error(`Supervisor has an invalid state ${this.state} for shutdown`);\n clearInterval(this.watchdogTimer);\n\n for (let jobMan of this.jobManagerInventory)\n {\n jobMan.state.setIf('ready', 'stop');\n\n for (let slice of jobMan.sliceInventory)\n {\n if (slice.state.is('initial') || slice.state.is('ready'))\n {\n returnSliceInventory.push(slice);\n }\n else if (slice.state.is(sliceStatus.working))\n {\n ps.push(new Promise((resolve, reject) => {\n slice.state.on('change', (status) => {\n if (status === 'done')\n resolve();\n });\n }));\n }\n }\n }\n\n const reason = 'Supervisor.shutdown';\n this.returnSlices(returnSliceInventory, reason);\n timer = setTimeout(this.halt.bind(this), timeoutMs);\n Promise.all(ps)\n .then(() => {\n clearTimeout(timer);\n this.state.set('stopping', 'stopped');\n })\n .catch((e) => {\n if (e.code !== 'DCP_SUPERVISOR_ESYNC')\n throw e; /* becomes unhandled rejection */\n });\n}\n\n/** \n * Factory function which generates a list of origins which are safe to communicate \n * with for this purpose. Currently-valid purposes (more will be added):\n * - any\n * - fetchData\n * - fetchWorkFunctions\n * - fetchArguments\n * - sendResults\n */\nSupervisor.prototype.makeSafeOriginList = function Supervisor$$makeSafeOriginList(purpose)\n{\n var list = [];\n \n if (this[purpose])\n list = list.concat(this[purpose]);\n \n /* Add 'any' origin(s) to list iff not in localExec, or in localExec and purpose is sendResults */\n if (!this.options.localExec || (this.options.localExec && purpose === 'sendResults'))\n list = list.concat(this.allowedOrigins)\n \n return list;\n}\n \n /**\n * UNUSED -- DOES NOT WORK YET.\n * NEED TO WORK WITH WES TO FIGURE OUT BEST WAY TO GET PREDICTLOAD TO WORK.\n * Predict the load on this supervisor based on the local job measurement data.\n * Works by looking at current conditions and available slices, and tries to guess\n * in what order they will be finished, working, etc. \n *\n * The simulation is very naive, but is expected to be accurate several seconds\n * into the future, particularly as we approach the end of a task.\n *\n * @param {number} whenMs \n * \n * @returns {Object<load, jobManagerInventory>} where load is and instance of Load and the predicted \n * load at the prediction time, and jobManagerInventory \n * is a counterfeit which holds the predicted state of \n * the jobManagerInventory at that time.\n */\nSupervisor.prototype.predictLoad = function Supervisor$predictLoad (whenMs)\n{\n /** @type {JobManager[]} */\n var jmi = new Inventory(); /* Inventory of counterfeit JobManagers. */\n var load = new Load(0,0); /* This \"current\" load throughout the prediction. */\n /** @type {Slice} */\n var next; /* The next slice to \"finish\". */\n\n /* Initialize data structures for prediction from current activity. */\n for (let jobMan of this.jobManagerInventory.filter(jm => jm.state.is('ready') && jm.sliceInventory.length))\n {\n jobMan = jobMan.counterfeit();\n jmi.push(jobMan);\n jobMan.sliceInventory.forEach((s) => s.state.setIf('initial', 'ready'));\n }\n next = findNextSlice();\n \n /**\n * Routine that finds the slice that will end next (soonest.)\n * @returns {Slice}\n */\n function findNextSlice()\n {\n /** @type {Slice} */\n var _next;\n for (let jobMan of jmi)\n {\n const _workingSlices = jobMan.workingSlices;\n for (let slice of _workingSlices)\n {\n //\n // slice.etaMs is the estimated time interval until slice execution completes.\n //\n // If the slice hasn't started,\n // slice.etaMs = slice.jobManager.estimateWallMs,\n // else if the slice has completed execution:\n // slice.etaMs = 0.\n // else if the slice has started:\n // slice.jobManager.estimateWallMs - (Date.now() - slice.startTime).\n //\n if (_next && (_next.etaMs <= slice.etaMs))\n continue;\n\n _next = slice;\n }\n }\n load.add(_next.jobManager.metrics);\n \n return _next;\n }\n\n /* At this point, jmi is an Inventory of counterfeit job managers that are \"ready\" for\n * work, next.etaMs is the time interval until the next slice will finish, and we have\n * a reasonably accurate picture of our current load.\n *\n * Next, we \"end\" this slice, try to fill all cores, and push the timeline forward to\n * the next predicted end of slice.\n */\n for (next = findNextSlice();\n next && (next.etaMs < whenMs);\n next = findNextSlice())\n {\n let ended = next;\n let cursor = this.makeJobSelectionCursor(jmi);\n\n /* \"end\" this slice */\n load.subtract(ended.jobManager.metrics);\n /* Fake out collecting result to transition state to FINISHED. */\n ended.collectResult(null);\n\n /* \"start\" as many slices as we can - given our CPU/GPU constraints, slice data in memory, etc */\n while (this.targetLoad.fits(load))\n {\n let slice = cursor.next();\n if (!slice)\n break; /* Running out of work that fits. */\n\n if (!load.fits(this.targetLoad, slice.jobManager.metrics))\n continue;\n\n /* Pick a ready slice from this job and add its anticipated load to our current load if it will fit */\n slice = slice.jobManager.readySlices.shift();\n slice.markAsWorking(); // ?? Not sure this is correct.\n //slice.etaMs = ended.etaMs + slice.jobManager.estimateWallMs; wtf?!?! <--- LOOK HERE\n\n load.add(slice.jobManager.metrics);\n }\n }\n\n return { load, jobManagerInventory: jmi };\n}\n\n\n//# sourceURL=webpack://dcp/./src/dcp-client/worker/supervisor2/index.js?");
4503
4503
 
4504
4504
  /***/ }),
4505
4505
 
@@ -4510,7 +4510,7 @@ eval("/* provided dependency */ var process = __webpack_require__(/*! ./node_mod
4510
4510
  /***/ ((__unused_webpack_module, exports, __webpack_require__) => {
4511
4511
 
4512
4512
  "use strict";
4513
- eval("/**\n * @file dcp-client/worker/supervisor2/job-manager.js\n *\n * A support class for Supervisor2.\n * It is a wrapper for the job object returned from the requestTask,\n * along with tracking slices and sandboxes associated with the job. \n *\n * @author Wes Garland, wes@kingsds.network,\n * Paul, paul@kingsds.network,\n * @date Dec 2020,\n * June 2022,\n * @module JobManager\n */\n\n/* global dcpConfig */ // eslint-disable-line no-redeclare\n// @ts-check\n\n\n/** @typedef {import('./').Supervisor} Supervisor */\n/** @typedef {import('./sandbox2').Sandbox} Sandbox */\n/** @typedef {import('dcp/common/dcp-url').DcpURL} DcpURL */\n/** @typedef {import('dcp/utils').SliceMessage} SliceMessage */\n/** @typedef {string} opaqueId */ // 22 character base64 string \n/** @typedef {string} address */ // String(Address)\n\nconst inspect = Symbol.for('nodejs.util.inspect.custom');\nconst debugging = (__webpack_require__(/*! dcp/debugging */ \"./src/debugging.js\").scope)('worker');\nconst { EventEmitter } = __webpack_require__(/*! dcp/common/dcp-events */ \"./src/common/dcp-events/index.js\");\nconst { assert } = __webpack_require__(/*! dcp/common/dcp-assert */ \"./src/common/dcp-assert.js\");\nconst { Inventory } = __webpack_require__(/*! dcp/utils/inventory */ \"./src/utils/inventory.js\");\nconst { Synchronizer } = __webpack_require__(/*! dcp/common/concurrency */ \"./src/common/concurrency.js\");\nconst { Slice } = __webpack_require__(/*! ./slice2 */ \"./src/dcp-client/worker/supervisor2/slice2.js\");\nconst { Load } = __webpack_require__(/*! ./load */ \"./src/dcp-client/worker/supervisor2/load.js\");\nconst { Statistics } = __webpack_require__(/*! ./rolling-statistics */ \"./src/dcp-client/worker/supervisor2/rolling-statistics.js\");\nconst DCP_ENV = __webpack_require__(/*! dcp/common/dcp-env */ \"./src/common/dcp-env.js\");\nconst { rehydrateRange } = __webpack_require__(/*! dcp/dcp-client/range-object */ \"./src/dcp-client/range-object.js\");\nconst kvin = __webpack_require__(/*! kvin */ \"./node_modules/kvin/kvin.js\");\nconst scopedKvin = new kvin.KVIN({Object: ({}).constructor,\n Array: ([]).constructor, \n Function: (()=>{}).constructor});\nconst { fetchURI, dumpObject, truncateAddress, stringify } = __webpack_require__(/*! dcp/utils */ \"./src/utils/index.js\");\n\nconst addressTruncationLength = 20;\nlet workerTuning = dcpConfig.worker;\nif (!workerTuning) workerTuning = dcpConfig.Supervisor;\nif (!workerTuning || !workerTuning.maxSandboxErrorsPerSlice || !workerTuning.allowConsoleAccess)\n workerTuning = { maxSandboxErrorsPerSlice: 2, allowConsoleAccess: false };\n\n// This is for debugging and performance testing.\n// Several functions chain and return non-awaited promises.\n// When AWAIT_ALL is true, we await the promises, which makes easier debugging.\nconst AWAIT_ALL = false;\n\n// Debug tracing helpers.\nconst selectiveEnable = false;\nconst displayWarnError = false || selectiveEnable;\nconst selectiveDebugging = selectiveEnable || debugging();\nconst debuggingError = false || selectiveDebugging || displayWarnError;\nconst debuggingWarn = false || selectiveDebugging || displayWarnError;\nconst selectiveDebugging2 = selectiveEnable && false || debugging('jobmanager');\n\nconst INITIAL = 'initial';\nconst READY = 'ready';\nconst STOP = 'stop';\nconst REFUSE = 'refuse';\nconst BROKEN = 'broken';\n\n//\n// Index to functionality -- search for '_Idx' to toggle through the index.\n//\n// 1) Ctor: JobManager class definition and various properties.\n// 2) Statistics: updateStatistics, update.\n// 3) Dtors: destroy, stopWork.\n// 4) Assign sandboxes and execute slices.\n// 5) Remove from array and return sandbox.\n// 6) Fetch: WorkFn, slices, arguments.\n// 7) Miscellaneous.\n// 8) Unused functions that we need to review.\n//\n\n// _Idx\n//\n// Ctor: JobManager class definition and various properties.\n// A JobManager handles all scheduling of slices for a given job.\n// It's also responsible for fetching slice data, work functions and arguments.\n// And it collects statistics about slice completion times and resource usage.\n// All functionality across jobs is handled by Supervisor.\n//\n\n/**\n * JobManager Constructor. An instance of JobManager knows everything about a given\n * job within the supervisor, including:\n * - work function code\n * - work function arguments\n * - all the SliceManager instances that go with this job\n * - how long a slice of this job is expected to run\n *\n * Instances of JobManager emit the following events:\n * - addSlice(sliceHandle)\n * - deleteSlice(sliceHandle)\n * - statusChange(new state, old state);\n *\n * JobManager States\n *\n * Start state:\n * initial\n *\n * Intermediate states:\n * ready\n *\n * Terminal states:\n * broken - job could not be initialized\n * refuse - for some reason, we have decided that we don't want work for this job\n * stop - job manager has been stopped\n *\n * Valid transitions:\n * initial -> broken\n * initial -> ready\n * ready -> stop\n * \n * NOTE: If you ever use a property with a leading underscore you are probably making a mistake.\n * But if you must, please ask paul, yarn, bryan or eddie for a CR.\n */\nclass JobManager extends EventEmitter\n{\n /**\n * @constructor\n * @param {Supervisor} parent - Owning Supervisor.\n * @param {object} jobMessage - Job Descriptor from getNewJobsForTask.\n * @param {SliceMessage[]} sliceMessages - Messages from task distributor describing slices.\n * @param {object} authorizationMessage - The signature that shipped with the task authorizing this worker.\n */\n constructor(parent, jobMessage, sliceMessages, authorizationMessage)\n {\n super('JobManager');\n /** @type {Supervisor} */\n this._supervisor = parent;\n /** @type {Slice[]} */\n this._sliceInventory = []; // All slices for this.address.\n /** @type {Sandbox[]} */\n this._sandboxInventory = []; // All sandboxes for this.address.\n /** @type {Synchronizer} */\n this._state = new Synchronizer(INITIAL, [ INITIAL, READY, STOP, REFUSE, BROKEN ]);\n this.state.on('change', (neu, old) => this.emit('statusChange', neu, old));\n\n /** XXXpfr @todo Is there any reason to do a deeper clone here. */\n this.jobMessage = { ...jobMessage };\n\n /** @type {string} */\n this._address = String(this.jobMessage.address);\n /** @type {Load} */\n this._load = null;\n /** @type {Statistics} */\n this._statistics = new Statistics(0.25);\n /** @type {number} */\n this._emaSliceTime = 0;\n /** @type {string[]} */\n this.rejectedJobReasons = [];\n /** @type {boolean} */\n this.isEstimation = true;\n\n const that = this;\n /** \n * Start loading dependencies in the background. Once these are loaded, this.state will \n * transition to 'ready' and the job will be ready for work.\n */\n (async function supervisor$$JobManager$$loadDependencies() {\n await that.fetchJob(that.jobMessage);\n })(/* iife */)\n .then (() => {\n debugging('jobmanager') && console.debug('JobManager is transitioning to READY', this.identifier);\n this.state.set(INITIAL, READY);\n this.addSlices(sliceMessages, authorizationMessage);\n })\n .catch((error) => {\n debuggingError && console.error('fetchJob has failed', error);\n this.state.set(INITIAL, BROKEN);\n })\n .finally(() => {\n selectiveDebugging && console.debug('JobManager.loadDependencies completed.', this.identifier);\n });\n }\n\n /** @type {string} */\n get address () { return this._address; }\n /** @type {string} */\n get uuid () { return this.jobMessage.uuid; }\n /** @type {boolean} */\n get workerConsole () { return this.jobMessage.workerConsole; }\n /** @type {object} */\n get requirements () { return this.jobMessage.requirements; }\n\n // These 3 properties have type object.\n get mro () { return this.jobMessage.mro; }\n get arguments () { return this.jobMessage.arguments; }\n get workFunction () { return this.jobMessage.workFunction; }\n\n /** @type {{computeGroups: Array<{opaqueId: string, name: string, description: string}>, name: string, description: string, link: string}} */\n get public () { return this.jobMessage.public; }\n /** @type {{sliceCPUTime: number, sliceGPUTime: number, sliceCPUDensity: number, sliceGPUDensity: number}} */\n get metrics () { return this.jobMessage.metrics; }\n /** @type {Supervisor} */\n get supervisor () { return this._supervisor; }\n /** @type {Synchronizer} */\n get state () { return this._state; }\n\n /** @type {boolean} */\n get initial () { return this.state.is(INITIAL); }\n /** @type {boolean} */\n get ready () { return this.state.is(READY); }\n /** @type {boolean} */\n get stopped () { return this.state.is(STOP); }\n /** @type {boolean} */\n get refuse () { return this.state.is(REFUSE); }\n /** @type {boolean} */\n get broken () { return this.state.is(BROKEN); }\n\n /** @type {Sandbox[]} */\n get sandboxInventory () { return this._sandboxInventory; }\n /** @type {Sandbox[]} */\n get safeSandboxes () { return this.sandboxInventory.filter((sandbox) => !sandbox.isTerminated); }\n /** @type {Sandbox[]} */\n get assignedSandboxes () { return this.sandboxInventory.filter((sandbox) => sandbox.isAssigned); }\n /** @type {Sandbox[]} */\n get workingSandboxes () { return this.sandboxInventory.filter((sandbox) => sandbox.isWorking); }\n /** @type {Sandbox[]} */\n get nonWorkingSandboxes () { return this.sandboxInventory.filter((sandbox) => !sandbox.isWorking && !sandbox.isTerminated); }\n /** @type {Slice[]} */\n get sliceInventory () { return this._sliceInventory; }\n /** @type {Slice[]} */\n get readySlices () { return this.sliceInventory.filter((slice) => slice.isReady); }\n /** @type {Slice[]} - ready and soon-to-be-ready */\n get queuedSlices () { return this.sliceInventory.filter((slice) => slice.isQueued); }\n /** @type {Slice[]} */\n get reservedSlices () { return this.sliceInventory.filter((slice) => slice.isReserved); }\n /** @type {Slice[]} - unassigned, ready and reserved */\n get activeSlices () { return this.sliceInventory.filter((slice) => slice.isActive); }\n /** @type {Slice[]} - working and soon-to-be-working */\n get workingSlices () { return this.sliceInventory.filter((slice) => slice.isWorking || slice.isReserved); }\n /** @type {Slice[]} - working only */\n get workingSlicesOnly () { return this.sliceInventory.filter((slice) => slice.isWorking); }\n\n /** @type {Load} */\n get load () { return this._load; }\n /** @type {number} */\n get delayMs () { return this.supervisor.delayMs(); }\n /** @type {Statistics} */\n get statistics () { return this._statistics; }\n /** @type {number} */\n get emaSliceTime () { return this._emaSliceTime; }\n /** @type {number} */\n get globalTime () { \n const denominator = this.metrics.sliceCPUDensity + this.metrics.sliceGPUDensity;\n return denominator > 0 ? (this.metrics.sliceCPUTime + this.metrics.sliceGPUTime) / denominator : 300 * 1000;\n }\n /** @type {string} */\n get identifier () { return `${truncateAddress(this.address, addressTruncationLength)}.${this.state}`; }\n /** @type {string} */\n get [inspect] () { return `[Object JobManager <${this.public.name}::${this.address}::${this.state}>]`; }\n /** \n * Estimate the wall time (i.e. actual elapsed time on a wall clock) for a slice of this job.\n * We pick the ema time (alpha=0.25) + 0.7 * stddev to incorporate uncertainty.\n * About 76% of slice completion times are <= ema + 0.7 * stddev.\n * This estimate is very primitive and assumes that CPU and GPU code do not run at the same\n * time.\n *\n * @type {number} - estimated time period in milliseconds\n */ \n get estimateWallMs () { return this.emaSliceTime + 0.7 * this.statistics.stddev; } /** XXXpfr @todo make this.statistics.stddev reflect this.globalTime too. */\n /** @type {boolean} */\n get debugBuild () { return this.supervisor.debugBuild; }\n /**\n * Always display max info under debug builds, otherwise maximal error\n * messages are displayed to the worker, only if both worker and client agree.\n * @type {boolean} - When true, display stack trace and other enhanced diag info.\n **/\n get displayMaxDiagInfo () { return this.workerConsole && workerTuning.allowConsoleAccess || this.debugBuild; }\n\n // _Idx\n //\n // Statistics: updateStatistics, update.\n //\n\n /** @param {{ total, CPU, webGL }} timeReport */\n updateStatistics (timeReport)\n {\n this.statistics.add(timeReport.total);\n this._emaSliceTime = 0.5 * (this._emaSliceTime + this.statistics.ema); /** XXXpfr @todo double smoothing, need to test and compare against this.statistics.ema . */\n debugging('jobmanager') && console.debug('JobManager.updateStatistics: mean', this.statistics.mean, 'stddev', this.statistics.stddev, 'ema', this.statistics.ema, 'ma', this.statistics.ma, 'x', this.statistics.x);\n }\n\n /**\n * Update jobMessage, add some slices to inventory and possibly update the initial seed of the statistics.\n * @param {object} jobMessage - Job Descriptor from getNewJobsForTask.\n * @param {SliceMessage[]} sliceMessages - Messages from task distributor describing slices.\n * @param {object} authorizationMessage - The signature that shipped with the task authorizing this worker.\n */\n update (jobMessage, sliceMessages, authorizationMessage)\n {\n this.jobMessage = Object.assign(this.jobMessage, { ...jobMessage });\n this.addSlices(sliceMessages, authorizationMessage);\n if (!this._emaSliceTime || this.statistics.count < 1)\n this._emaSliceTime = this.globalTime;\n // this._load = new Load(this.metrics.sliceCPUTime, this.metrics.sliceGPUTime); // SAVE for Sup2 Part II\n }\n\n // _Idx\n //\n // Dtors: destroy, stopWork.\n //\n\n /**\n * Destructor.\n */\n destroy()\n {\n selectiveDebugging && console.debug(`JobManager.destroy: terminating sandboxes and returning slices to scheduler for job manager ${this.identifier}.`);\n \n this.sandboxInventory.forEach((sandbox) => {\n if (!sandbox.isTerminated) sandbox.terminate(false);\n });\n this._sandboxInventory = [];\n\n const slicesToReturn = this.sliceInventory.filter((slice) => !slice.isFinished);\n const reason = `JobManager destroy: ${this.identifier}`;\n this.supervisor.returnSlices(slicesToReturn, reason, false /*removeSlices*/);\n this._sliceInventory = [];\n }\n\n /**\n * Terminates sandboxes and returns slices.\n * Sets the working flag to false, call @this.work to start working again.\n * \n * If forceTerminate is true: Terminates all sandboxes and returns all slices.\n * If forceTerminate is false: Terminates non-working sandboxes and returns queued slices.\n *\n * @param {boolean} [forceTerminate = true] - true if you want to stop the sandboxes from completing their current slice.\n */\n stopWork (forceTerminate = true)\n {\n selectiveDebugging && console.debug(`JobManager.stopWork(${forceTerminate}): terminating sandboxes and returning slices to scheduler.`);\n if (forceTerminate)\n {\n this.destroy();\n }\n else\n {\n // Return all non-finished slices which are not working.\n const reason = `JobManager stopWork(false): ${this.identifier}`;\n this.supervisor.returnSlices(this.queuedSlices, reason);\n this._sliceInventory = [];\n\n // Terminate all sandboxes that are not working.\n let idleSandboxes = this.sandboxInventory.filter(w => !w.isWorking);\n for (const sandbox of idleSandboxes)\n this.returnSandbox(sandbox);\n this._sandboxInventory = [];\n }\n }\n \n /**\n * Terminate 1 assigned sandbox when there are more than threshold assigned sandboxes.\n * @param {number} [threshold=0] - integer >= 0\n * @returns {boolean}\n */\n pruneAssignedSandbox(threshold = 0)\n {\n const _assignedSandboxes = this.assignedSandboxes;\n if (_assignedSandboxes.length > threshold)\n {\n debugging('supervisor') && console.debug(`JobManager.pruneAssignedSandbox terminate ${_assignedSandboxes[0].identifier}, threshold ${threshold}`);\n _assignedSandboxes[0].markAsUnready(); // Remove from this.assignedSandboxes.\n this.returnSandbox(_assignedSandboxes[0]);\n return true;\n }\n return false;\n }\n\n /**\n * Terminate 1 assigned sandbox when there are more than this.queuedSlices.length assigned sandboxes.\n * @returns {boolean}\n */\n pruneExtraAssignedSandbox()\n {\n return this.pruneAssignedSandbox(this.queuedSlices.length);\n }\n\n // _Idx\n //\n // Work: Assign sandboxes and execute slices.\n //\n // hookUpSandboxListeners handles the sandbox complete event and tries to schedule another slice\n // from the same job on the sandbox. If there aren't any slices from the same job, it tries slices\n // from other job managers, and if there are no other ready slices, it calls Supervisor.requestTask\n // to get more work from TD .\n //\n // Several functions chain and return non-awaited promises.\n // When AWAIT_ALL is true, we await the promises, which makes easier debugging.\n //\n\n /**\n * Hook up JobManager specific Sandbox event listenters.\n * @param {Sandbox} sandbox\n */\n hookUpSandboxListeners(sandbox)\n {\n // Sandbox error handler.\n sandbox.on('sandboxError', (error) => JobManager.handleSandboxError(this, sandbox, error));\n // Sandbox complete handler.\n // When any sandbox completes, try to run another slice in the same jobManager.\n // Otherwise, go through the Supervisor.requestTask protocol.\n sandbox.addListener('complete', () => {\n if (this.supervisor.isReady())\n {\n const _readySlices = this.readySlices;\n if (this.supervisor.runSliceFromSameJob() && _readySlices.length > 0)\n {\n const slice = _readySlices[0];\n slice.markAsReserved();\n this.runSlice(slice, this.delayMs);\n }\n else if (!this.supervisor.isFetchingNewWork)\n {\n const unusedSandboxSlots = this.supervisor.unusedSandboxSlots();\n if (unusedSandboxSlots > 0)\n this.supervisor.requestTask(unusedSandboxSlots);\n }\n }\n });\n }\n\n /**\n * Create a Sandbox.\n * Start it. Assign it. Add it to jobManager inventory.\n * @param {number} delayStartMs - The delay ms to pass to sandbox.start .\n * @param {number} delayAssignMs - The delay ms to pass to sandbox.assign .\n * @returns {Promise<Sandbox>}\n */\n async readySandbox (delayStartMs, delayAssignMs)\n {\n const sandbox = await this.supervisor.createSandbox(delayStartMs);\n this.hookUpSandboxListeners(sandbox);\n return sandbox.assign(this, delayAssignMs)\n .catch((error) => { // error could be a string coming from evaluator\n const errorObj = { jobAddress: truncateAddress(this.address, addressTruncationLength), error: this.checkStackTrace(error) };\n console.error('Failed to assign job to sandbox.', errorObj);\n this.returnSandbox(sandbox);\n throw error;\n });\n }\n\n /**\n * Mark both sandbox and slice as working.\n * @param {Sandbox} sandbox\n * @param {Slice} slice\n */\n markAsWorking(sandbox, slice)\n {\n assert(sandbox);\n assert(slice);\n slice.markAsWorking();\n sandbox.markAsWorking(slice);\n }\n\n /**\n * Create or reuse a sandbox and run a slice on it.\n * @param {Slice} slice - The slice to execute on a sandbox.\n * @param {number} [delayMs=0] - The delay ms to pass to sandbox.start .\n * @returns {Promise<void>}\n */\n async runSlice(slice, delayMs)\n {\n assert(slice);\n if (this.supervisor.sliceTiming) slice['queueingDelta'] = Date.now();\n\n const _assignedSandboxes = this.assignedSandboxes;\n if (AWAIT_ALL)\n {\n if (_assignedSandboxes.length > 0 && this.supervisor.readiedSandboxes.length < 1)\n {\n const sandbox = _assignedSandboxes[0];\n this.markAsWorking(sandbox, slice);\n return await this.runSliceOnSandbox(slice, sandbox, this.delayMs); \n }\n try\n {\n const sandbox = await this.readySandbox(delayMs/*delayStartMs*/, 0.5 * this.delayMs/*delayAssignMs*/);\n this.sandboxInventory.push(sandbox);\n this.markAsWorking(sandbox, slice);\n return await this.runSliceOnSandbox(slice, sandbox, this.delayMs);\n }\n catch (error)\n {\n // Any exception from readySandbox will have already been emitted.\n debugging('jobmanager') && console.error(`JobManager.runSlice: Failure trying to run slice ${slice.identifier} on a sandbox.`, error);\n this.supervisor.returnSlice(slice, 'EUNCAUGHT');\n }\n }\n else\n {\n if (_assignedSandboxes.length > 0 && this.supervisor.readiedSandboxes.length < 1)\n {\n const sandbox = _assignedSandboxes[0];\n this.markAsWorking(sandbox, slice);\n return this.runSliceOnSandbox(slice, sandbox, this.delayMs); \n }\n return this.readySandbox(delayMs/*delayStartMs*/, this.delayMs/*delayAssignMs*/)\n .then((sandbox) => {\n this.sandboxInventory.push(sandbox);\n this.markAsWorking(sandbox, slice);\n return this.runSliceOnSandbox(slice, sandbox, this.delayMs);\n })\n .catch((error) => {\n // Any exception from readySandbox will have already been emitted.\n debugging('jobmanager') && console.error(`JobManager.runSlice: Failure trying to run slice ${slice.identifier} on a sandbox.`, error);\n this.supervisor.returnSlice(slice, 'EUNCAUGHT');\n })\n }\n }\n\n /**\n * Execute slice on sandbox, collect results or handle errors, and clean up.\n * @param {Slice} slice - The slice to execute on the sandbox.\n * @param {Sandbox} sandbox - The sandbox on which to execute the slice.\n * @param {number} [delayMs=0] - The delay ms to pass to sandbox.work .\n * @returns {Promise<void>}\n */\n async runSliceOnSandbox(slice, sandbox, delayMs = 0)\n {\n selectiveDebugging2 && console.debug(`runSliceOnSandbox ${sandbox.identifier}~${slice.identifier} #readySlices ${this.readySlices.length}/${this.supervisor.readySliceCount()}, #SBs, ${this.sandboxInventory.length}/${this.supervisor.sandboxCount()}, #working(w/r/wo/wsbx), ${this.workingSlices.length}/${this.reservedSlices.length}/${this.workingSlicesOnly.length}/${this.workingSandboxes.length}, #sup-working(w/r/wo/wsbx), ${this.supervisor.workingSliceCount()}/${this.supervisor.reservedSliceCount()}/${this.supervisor.workingSliceOnlyCount()}/${this.supervisor.workingSandboxCount()}, #assigned, ${this.assignedSandboxes.length}, #readiedSBs, ${this.supervisor.readiedSandboxes.length}, #localSBs, ${this.sandboxInventory.map(s => Number(s.id)).sort((x,y)=>x-y)}`);\n //console.log('runSliceOnSandbox', Date.now() - this.supervisor.lastTime, slice.identifier); // SAVE temporarily\n assert(slice && sandbox);\n slice.verifyWorking();\n sandbox.verifyWorking();\n\n if (this.supervisor.sliceTiming)\n {\n slice['queueingDelta'] = Date.now() - slice['queueingDelta'];\n slice['executionDelta'] = Date.now();\n }\n slice.startTime = Date.now();\n\n if (AWAIT_ALL)\n {\n try\n {\n const result = await sandbox.work(slice, delayMs);\n debugging('jobmanager') && console.debug(`runSliceOnSandbox - success: ${sandbox.id}~${slice.identifier}`, result);\n slice.collectResult(result, true);\n sandbox.changeWorkingToAssigned();\n this.supervisor.recordResult(slice);\n }\n catch (error)\n {\n const reason = await this.supervisor.handleSandboxWorkError(sandbox, slice, error);\n debuggingError && console.error(`runSliceOnSandbox - failure: ${sandbox.id}~${slice.identifier}`, reason, error);\n }\n finally\n {\n slice.startTime = null;\n if (this.supervisor.sliceTiming)\n {\n slice['executionDelta'] = Date.now() - slice['executionDelta'];\n slice['resultDelta'] = Date.now();\n }\n }\n }\n else\n {\n return sandbox.work(slice, delayMs)\n .then((result) => {\n debugging('jobmanager') && console.debug(`runSliceOnSandbox - success: ${sandbox.id}~${slice.identifier}`, result);\n slice.collectResult(result, true);\n sandbox.changeWorkingToAssigned();\n this.supervisor.recordResult(slice);\n })\n .catch(async (error) => {\n const reason = await this.supervisor.handleSandboxWorkError(sandbox, slice, error);\n debuggingError && console.error(`runSliceOnSandbox - failure: ${sandbox.id}~${slice.identifier}`, reason, error);\n })\n .finally(() => {\n slice.startTime = null;\n if (this.supervisor.sliceTiming)\n {\n slice['executionDelta'] = Date.now() - slice['executionDelta'];\n slice['resultDelta'] = Date.now();\n }\n });\n }\n }\n\n /**\n * Sandbox has had an error which is not from the work function: terminate it\n * and try to redo the slice.\n * @param {JobManager} jobManager \n * @param {Sandbox} sandbox \n * @param {Error} error \n */\n static handleSandboxError (jobManager, sandbox, error)\n {\n const slice = sandbox.slice;\n if (slice)\n {\n if (!slice.isWorking) // Sanity. Exception should never fire.\n throw new Error(`handleSandboxError: slice ${slice.identifier} must be WORKING.`);\n\n slice['sandboxErrorCount'] = (slice['sandboxErrorCount'] || 0) + 1;\n sandbox.slice = null;\n if (slice['sandboxErrorCount'] < (workerTuning.maxSandboxErrorsPerSlice || 2))\n slice.resetState();\n else\n {\n slice.collectResult(error, false /*success*/);\n const reason = `sandboxError event handler, too many errors ${slice['sandboxErrorCount']}: ${sandbox.id}~${jobManager.identifier}`;\n jobManager.supervisor.returnSlice(slice, reason);\n }\n //debuggingWarn && -- Sup1 does not guard this\n console.warn(`JobManager.handleSandboxError: Sandbox ${sandbox.identifier}...(${sandbox.public.name}/${slice['sandboxErrorCount']}) with slice ${slice.identifier} had error.`, error);\n }\n else\n //debuggingError && -- Sup1 does not guard this\n console.error(`JobManager.handleSandboxError: Sandbox ${sandbox.identifier} has no slice...(${sandbox.public.name} had error.`, error);\n\n jobManager.returnSandbox(sandbox); /* terminate the sandbox */\n }\n\n // _Idx\n //\n // Remove from array and return sandbox.\n //\n\n /**\n * Remove sandbox from this.sandboxInventory and terminate is not already terminated.\n * @param {Sandbox} sandbox - The sandbox to remove from this.sandboxInventory.\n * @param {boolean} [assertExists=true] - When true (and type=array) then assert that sandbox exists in this.sandboxInventory.\n */\n returnSandbox(sandbox, assertExists = true)\n {\n debugging('supervisor') && console.debug(`JobManager.returnSandbox: ${sandbox.identifier}, assertExists ${assertExists}`);\n if (this.sandboxInventory && this.sandboxInventory.length > 0)\n JobManager.removeElement(this.sandboxInventory, sandbox, assertExists);\n // The cleaning up of terminated sandboxes in this.supervisor.sandboxInventory happens in this.supervisor.purgeSandboxes.\n if (!sandbox.isTerminated)\n sandbox.terminate(false);\n }\n /**\n * Remove slice from this.sliceInventory.\n * @param {Slice} slice - The slice to remove from this.sliceInventory.\n * @param {boolean} [assertExists=true] - When true (and type=array) then assert that slice exists in this.sliceInventory.\n */\n removeSlice(slice, assertExists = true)\n {\n assert(this.sliceInventory instanceof Array);\n if (this.sliceInventory && this.sliceInventory.length > 0)\n JobManager.removeElement(this.sliceInventory, slice, assertExists);\n }\n\n /**\n * Remove slices from this.sliceInventory.\n * @param {Slice[]} slices - The slices to remove from this.sliceInventory.\n */\n removeSlices(slices)\n {\n assert(this.sliceInventory instanceof Array);\n if (this.sliceInventory && this.sliceInventory.length > 0)\n this._sliceInventory = this.sliceInventory.filter((slice) => slices.indexOf(slice) === -1);\n }\n\n /**\n * Remove element from arrayLike.\n * @param {Array<*>} array\n * @param {object|number} element\n * @param {boolean} [assertExists=true]\n */\n static removeElement(array, element, assertExists = true)\n {\n let index = array.indexOf(element);\n if (assertExists)\n {\n assert(index !== -1);\n array.splice(index, 1);\n } else if (index !== -1) array.splice(index, 1);\n }\n\n // _Idx\n //\n // Fetch: WorkFn, slices, arguments.\n //\n\n /**\n * Fetch work function, work function arguments and possibly the range object describing the jobs slices..\n * @param {object} mpe - messagePayloadElement: job object returned by task-jobs.js . \n */\n async fetchJob(mpe)\n {\n if (debugging('worker'))\n dumpObject(mpe, 'JobManager.fetchJob: mpe', 512);\n\n if (AWAIT_ALL)\n {\n if (!mpe.workFunction)\n {\n mpe.workFunction = await fetchURI(mpe.codeLocation, this.supervisor.makeSafeOriginList('fetchWorkFunctions'));\n if(mpe.requirements.useStrict)\n mpe.useStrict = true;\n delete mpe.codeLocation;\n }\n if (!mpe.arguments)\n {\n let promises = [];\n let uris = mpe.argumentsLocation;\n if (uris)\n for (let i = 0; i < uris.length; i++)\n promises.push(fetchURI(uris[i].value, this.supervisor.makeSafeOriginList('fetchArguments')));\n\n mpe.arguments = await Promise.all(promises);\n\n // node localExec jobs read arguments from a file, so need to ensure they are properly parsed after being read.\n if (this.supervisor.options.localExec && !DCP_ENV.isBrowserPlatform)\n mpe.arguments = scopedKvin.parse(mpe.arguments[0]);\n\n delete mpe.argumentsLocation;\n }\n // if job input data is range object, we send the range object URI to the worker\n if (!mpe.mro && mpe.MROLocation)\n {\n mpe.mro = await fetchURI(mpe.MROLocation, this.supervisor.makeSafeOriginList('fetchData'));\n delete mpe.MROLocation;\n }\n }\n else\n {\n let promises = [];\n\n // Get workFn.\n if (!mpe.workFunction)\n {\n const workFunctionPromise = fetchURI(mpe.codeLocation, this.supervisor.makeSafeOriginList('fetchWorkFunctions'))\n .then((workFunction) => {\n mpe.workFunction = workFunction;\n if (mpe.requirements.useStrict)\n mpe.useStrict = true;\n delete mpe.codeLocation;\n });\n promises.push(workFunctionPromise);\n }\n\n // if job input data is range object, we send the range object URI to the worker\n if (!mpe.mro && mpe.MROLocation)\n {\n const mroPromise = fetchURI(mpe.MROLocation, this.supervisor.makeSafeOriginList('fetchData'))\n .then((mro) => {\n mpe.mro = mro;\n delete mpe.MROLocation;\n });\n promises.push(mroPromise)\n }\n\n // Get workFn args.\n if (!mpe.arguments && mpe.argumentsLocation)\n {\n mpe.arguments = new Array(mpe.argumentsLocation.length);\n for (let k = 0; k < mpe.argumentsLocation.length; k++)\n promises.push(fetchURI(mpe.argumentsLocation[k].value, this.supervisor.makeSafeOriginList('fetchArguments'))\n .then((arg) => (mpe.arguments[k] = arg) ));\n }\n\n await Promise.all(promises);\n\n if (mpe.argumentsLocation) delete mpe.argumentsLocation;\n }\n\n return mpe;\n }\n\n /**\n * Look up slice.datumUri or use the range object this.mro, constructed by fetchJob.\n * @param {Slice} slice \n * @returns {Promise<{ inputDatum, dataError }>}\n */\n async fetchSliceData (datumUri, slice) // eslint-disable-line require-await\n {\n const that = this;\n function catchHandler(error)\n {\n const dataError = error;\n dataError['errorCode'] = (error.code === 'EFETCH') ? 'EFETCH' : 'EUNCAUGHTERROR';\n that.emit('workEmit', {\n eventName: 'error',\n payload: {\n message: dataError.message,\n stack: dataError.stack,\n name: that.public.name\n }\n });\n return dataError;\n }\n\n if (!datumUri && !this.state.is(READY))\n throw new Error(`When !datumUri, JobManager '${this.identifier}' must be in READY state.`);\n\n try\n {\n if (!datumUri)\n {\n if (!this.mro) throw new Error('Must complete call to JobManager.fetchJob before calling JobManager.fetchSliceData.');\n const ro = rehydrateRange(this.mro);\n // Slice numbers start at 1.\n const inputDatum = ro[slice.sliceNumber - 1];\n debugging('jobmanager') && console.debug(`Fetched datum: ${stringify(inputDatum, 512)}`);\n return { inputDatum, dataError: null };\n }\n\n return fetchURI(datumUri, this.supervisor.makeSafeOriginList('fetchData'))\n .then((inputDatum) => {\n debugging('jobmanager') && console.debug(`Fetched datum: ${stringify(inputDatum, 512)}`);\n return { inputDatum, dataError: null };\n })\n .catch((error) => {\n const dataError = catchHandler(error);\n return { inputDatum: null, dataError };\n });\n }\n catch (error)\n {\n const dataError = catchHandler(error);\n return { inputDatum: null, dataError };\n }\n }\n\n // _Idx\n //\n // Miscellaneous.\n //\n\n /**\n * Debugging helper.\n * @param {string} tag\n */\n dumpSlices(tag='')\n {\n if (this.sliceInventory.length < 1) return;\n const queued = [], working = [], finished = [];\n for (const slice of this.sliceInventory)\n {\n if (slice.isUnassigned || slice.isReady)\n queued.push(slice);\n else if (slice.isReserved || slice.isWorking)\n working.push(slice);\n else\n finished.push(slice);\n }\n console.log(`${tag}:--JobManager.dumpSlices--${this.identifier} slices ${this.sliceInventory.length}-------`);\n console.log(`-----queued(${queued.length})-----------------------------------------------------------------`);\n for (const slice of queued)\n console.log(slice.identifier);\n console.log(`-----working(${working.length})----------------------------------------------------------------`);\n for (const slice of working)\n console.log(slice.identifier);\n console.log(`-----finished(${finished.length})---------------------------------------------------------------`);\n for (const slice of finished)\n console.log(slice.identifier);\n console.log('-----------------------------------------------------------------------------------');\n }\n\n /**\n * Calls removeStackTrace when !this.displayMaxDiagInfo .\n * @param {string|Error} error\n * @returns {string|Error}\n */\n checkStackTrace(error)\n {\n return this.displayMaxDiagInfo ? error : this.supervisor.removeStackTrace(error);\n }\n\n /**\n * Add slices to the job manager's inventory.\n *\n * @param {SliceMessage[]} sliceMessages - Messages from task distributor describing slices.\n * @param {object} authorizationMessage - The signature that shipped with the task authorizing this worker.\n */\n addSlices (sliceMessages, authorizationMessage)\n {\n sliceMessages.forEach((sliceMessage) => {\n const slice = new Slice(this, sliceMessage, authorizationMessage);\n if (!slice.isEstimation) this.isEstimation = false;\n this.sliceInventory.push(slice);\n });\n }\n\n /**\n * Schedule the slice to be executed.\n * @param {Slice} slice\n * @param {boolean} [placeInTheFrontOfTheQueue=false]\n */\n scheduleSlice(slice, placeInTheFrontOfTheQueue = false)\n {\n // Reset slice state to allow execution.\n slice.resetState();\n // Enqueue in the to-be-executed queue.\n if (placeInTheFrontOfTheQueue) this.sliceInventory.unshift(slice);\n else this.sliceInventory.push(slice);\n }\n\n /**\n * XXXpfr @todo UNUSED but may be used soon\n * Returns a counterfeit JobManager - a generic object with most of the same\n * ownProperties as a real exports.JobManager. Any inventories and synchronizer are\n * duplicated (equivalent state, no events copied, ~ shallow clone)\n * @returns {JobManager}\n */\n counterfeit()\n {\n /** @type {JobManager} */\n const fake = {};\n \n for (let prop in Object.keys(this))\n {\n if (this[prop] instanceof Inventory)\n fake[prop] = this[prop].duplicate();\n else if (this[prop] instanceof Synchronizer)\n fake[prop] = this[prop].valueOf();\n else\n fake[prop] = this[prop];\n }\n\n return fake;\n }\n\n}\nexports.JobManager = JobManager;\n\n//# sourceURL=webpack://dcp/./src/dcp-client/worker/supervisor2/job-manager.js?");
4513
+ eval("/**\n * @file dcp-client/worker/supervisor2/job-manager.js\n *\n * A support class for Supervisor2.\n * It is a wrapper for the job object returned from the requestTask,\n * along with tracking slices and sandboxes associated with the job. \n *\n * @author Wes Garland, wes@kingsds.network,\n * Paul, paul@kingsds.network,\n * @date Dec 2020,\n * June 2022,\n * @module JobManager\n */\n\n/* global dcpConfig */ // eslint-disable-line no-redeclare\n// @ts-check\n\n\n/** @typedef {import('./').Supervisor} Supervisor */\n/** @typedef {import('./sandbox2').Sandbox} Sandbox */\n/** @typedef {import('dcp/common/dcp-url').DcpURL} DcpURL */\n/** @typedef {import('dcp/utils').SliceMessage} SliceMessage */\n/** @typedef {string} opaqueId */ // 22 character base64 string \n/** @typedef {string} address */ // String(Address)\n\nconst inspect = Symbol.for('nodejs.util.inspect.custom');\nconst debugging = (__webpack_require__(/*! dcp/debugging */ \"./src/debugging.js\").scope)('worker');\nconst { EventEmitter } = __webpack_require__(/*! dcp/common/dcp-events */ \"./src/common/dcp-events/index.js\");\nconst { assert } = __webpack_require__(/*! dcp/common/dcp-assert */ \"./src/common/dcp-assert.js\");\nconst { Inventory } = __webpack_require__(/*! dcp/utils/inventory */ \"./src/utils/inventory.js\");\nconst { Synchronizer } = __webpack_require__(/*! dcp/common/concurrency */ \"./src/common/concurrency.js\");\nconst { Slice } = __webpack_require__(/*! ./slice2 */ \"./src/dcp-client/worker/supervisor2/slice2.js\");\nconst { Load } = __webpack_require__(/*! ./load */ \"./src/dcp-client/worker/supervisor2/load.js\");\nconst { Statistics } = __webpack_require__(/*! ./rolling-statistics */ \"./src/dcp-client/worker/supervisor2/rolling-statistics.js\");\nconst DCP_ENV = __webpack_require__(/*! dcp/common/dcp-env */ \"./src/common/dcp-env.js\");\nconst { rehydrateRange } = __webpack_require__(/*! dcp/dcp-client/range-object */ \"./src/dcp-client/range-object.js\");\nconst kvin = __webpack_require__(/*! kvin */ \"./node_modules/kvin/kvin.js\");\nconst scopedKvin = new kvin.KVIN({Object: ({}).constructor,\n Array: ([]).constructor, \n Function: (()=>{}).constructor});\nconst { fetchURI, dumpObject, truncateAddress, stringify } = __webpack_require__(/*! dcp/utils */ \"./src/utils/index.js\");\n\nconst addressTruncationLength = 20;\nlet workerTuning = dcpConfig.worker;\nif (!workerTuning) workerTuning = dcpConfig.Supervisor;\nif (!workerTuning || !workerTuning.maxSandboxErrorsPerSlice || !workerTuning.allowConsoleAccess)\n workerTuning = { maxSandboxErrorsPerSlice: 2, allowConsoleAccess: false };\n\n// This is for debugging and performance testing.\n// Several functions chain and return non-awaited promises.\n// When AWAIT_ALL is true, we await the promises, which makes easier debugging.\nconst AWAIT_ALL = false;\n\n// Debug tracing helpers.\nconst selectiveEnable = false;\nconst displayWarnError = false || selectiveEnable;\nconst selectiveDebugging = selectiveEnable || debugging();\nconst debuggingError = false || selectiveDebugging || displayWarnError;\nconst debuggingWarn = false || selectiveDebugging || displayWarnError;\nconst selectiveDebugging2 = selectiveEnable && false || debugging('jobmanager');\n\nconst INITIAL = 'initial';\nconst READY = 'ready';\nconst STOP = 'stop';\nconst REFUSE = 'refuse';\nconst BROKEN = 'broken';\n\n//\n// Index to functionality -- search for '_Idx' to toggle through the index.\n//\n// 1) Ctor: JobManager class definition and various properties.\n// 2) Statistics: updateStatistics, update.\n// 3) Dtors: destroy, stopWork.\n// 4) Assign sandboxes and execute slices.\n// 5) Remove from array and return sandbox.\n// 6) Fetch: WorkFn, slices, arguments.\n// 7) Miscellaneous.\n// 8) Unused functions that we need to review.\n//\n\n// _Idx\n//\n// Ctor: JobManager class definition and various properties.\n// A JobManager handles all scheduling of slices for a given job.\n// It's also responsible for fetching slice data, work functions and arguments.\n// And it collects statistics about slice completion times and resource usage.\n// All functionality across jobs is handled by Supervisor.\n//\n\n/**\n * JobManager Constructor. An instance of JobManager knows everything about a given\n * job within the supervisor, including:\n * - work function code\n * - work function arguments\n * - all the SliceManager instances that go with this job\n * - how long a slice of this job is expected to run\n *\n * Instances of JobManager emit the following events:\n * - addSlice(sliceHandle)\n * - deleteSlice(sliceHandle)\n * - statusChange(new state, old state);\n *\n * JobManager States\n *\n * Start state:\n * initial\n *\n * Intermediate states:\n * ready\n *\n * Terminal states:\n * broken - job could not be initialized\n * refuse - for some reason, we have decided that we don't want work for this job\n * stop - job manager has been stopped\n *\n * Valid transitions:\n * initial -> broken\n * initial -> ready\n * ready -> stop\n * \n * NOTE: If you ever use a property with a leading underscore you are probably making a mistake.\n * But if you must, please ask paul, yarn, bryan or eddie for a CR.\n */\nclass JobManager extends EventEmitter\n{\n /**\n * @constructor\n * @param {Supervisor} parent - Owning Supervisor.\n * @param {object} jobMessage - Job Descriptor from getNewJobsForTask.\n * @param {SliceMessage[]} sliceMessages - Messages from task distributor describing slices.\n * @param {object} authorizationMessage - The signature that shipped with the task authorizing this worker.\n */\n constructor(parent, jobMessage, sliceMessages, authorizationMessage)\n {\n super('JobManager');\n /** @type {Supervisor} */\n this._supervisor = parent;\n /** @type {Slice[]} */\n this._sliceInventory = []; // All slices for this.address.\n /** @type {Sandbox[]} */\n this._sandboxInventory = []; // All sandboxes for this.address.\n /** @type {Synchronizer} */\n this._state = new Synchronizer(INITIAL, [ INITIAL, READY, STOP, REFUSE, BROKEN ]);\n this.state.on('change', (neu, old) => this.emit('statusChange', neu, old));\n\n /** XXXpfr @todo Is there any reason to do a deeper clone here. */\n this.jobMessage = { ...jobMessage };\n\n /** @type {string} */\n this._address = String(this.jobMessage.address);\n /** @type {Load} */\n this._load = null;\n /** @type {Statistics} */\n this._statistics = new Statistics(0.25);\n /** @type {number} */\n this._emaSliceTime = 0;\n /** @type {string[]} */\n this.rejectedJobReasons = [];\n /** @type {boolean} */\n this.isEstimation = true;\n\n const that = this;\n /** \n * Start loading dependencies in the background. Once these are loaded, this.state will \n * transition to 'ready' and the job will be ready for work.\n */\n (async function supervisor$$JobManager$$loadDependencies() {\n await that.fetchJob(that.jobMessage);\n })(/* iife */)\n .then (() => {\n debugging('jobmanager') && console.debug('JobManager is transitioning to READY', this.identifier);\n this.state.set(INITIAL, READY);\n this.addSlices(sliceMessages, authorizationMessage);\n })\n .catch((error) => {\n debuggingError && console.error('fetchJob has failed', error);\n this.state.set(INITIAL, BROKEN);\n })\n .finally(() => {\n selectiveDebugging && console.debug('JobManager.loadDependencies completed.', this.identifier);\n });\n }\n\n /** @type {string} */\n get address () { return this._address; }\n /** @type {string} */\n get uuid () { return this.jobMessage.uuid; }\n /** @type {boolean} */\n get workerConsole () { return this.jobMessage.workerConsole; }\n /** @type {object} */\n get requirements () { return this.jobMessage.requirements; }\n\n // These 3 properties have type object.\n get mro () { return this.jobMessage.mro; }\n get arguments () { return this.jobMessage.arguments; }\n get workFunction () { return this.jobMessage.workFunction; }\n\n /** @type {{computeGroups: Array<{opaqueId: string, name: string, description: string}>, name: string, description: string, link: string}} */\n get public () { return this.jobMessage.public; }\n /** @type {{sliceCPUTime: number, sliceGPUTime: number, sliceCPUDensity: number, sliceGPUDensity: number}} */\n get metrics () { return this.jobMessage.metrics; }\n /** @type {Supervisor} */\n get supervisor () { return this._supervisor; }\n /** @type {Synchronizer} */\n get state () { return this._state; }\n\n /** @type {boolean} */\n get initial () { return this.state.is(INITIAL); }\n /** @type {boolean} */\n get ready () { return this.state.is(READY); }\n /** @type {boolean} */\n get stopped () { return this.state.is(STOP); }\n /** @type {boolean} */\n get refuse () { return this.state.is(REFUSE); }\n /** @type {boolean} */\n get broken () { return this.state.is(BROKEN); }\n\n /** @type {Sandbox[]} */\n get sandboxInventory () { return this._sandboxInventory; }\n /** @type {Sandbox[]} */\n get liveSandboxes () { return this.sandboxInventory.filter((sandbox) => !sandbox.isTerminated); }\n /** @type {Sandbox[]} */\n get assignedSandboxes () { return this.sandboxInventory.filter((sandbox) => sandbox.isAssigned); }\n /** @type {Sandbox[]} */\n get workingSandboxes () { return this.sandboxInventory.filter((sandbox) => sandbox.isWorking); }\n /** @type {Sandbox[]} */\n get nonWorkingSandboxes () { return this.sandboxInventory.filter((sandbox) => !sandbox.isWorking && !sandbox.isTerminated); }\n /** @type {Slice[]} */\n get sliceInventory () { return this._sliceInventory; }\n /** @type {Slice[]} */\n get readySlices () { return this.sliceInventory.filter((slice) => slice.isReady); }\n /** @type {Slice[]} - ready and soon-to-be-ready */\n get queuedSlices () { return this.sliceInventory.filter((slice) => slice.isQueued); }\n /** @type {Slice[]} */\n get reservedSlices () { return this.sliceInventory.filter((slice) => slice.isReserved); }\n /** @type {Slice[]} - unassigned, ready and reserved */\n get activeSlices () { return this.sliceInventory.filter((slice) => slice.isActive); }\n /** @type {Slice[]} - working and soon-to-be-working */\n get workingSlices () { return this.sliceInventory.filter((slice) => slice.isWorking || slice.isReserved); }\n /** @type {Slice[]} - working only */\n get workingSlicesOnly () { return this.sliceInventory.filter((slice) => slice.isWorking); }\n\n /** @type {Load} */\n get load () { return this._load; }\n /** @type {number} */\n get delayMs () { return this.supervisor.delayMs(); }\n /** @type {Statistics} */\n get statistics () { return this._statistics; }\n /** @type {number} */\n get emaSliceTime () { return this._emaSliceTime; }\n /** @type {number} */\n get globalTime () { \n const denominator = this.metrics.sliceCPUDensity + this.metrics.sliceGPUDensity;\n return denominator > 0 ? (this.metrics.sliceCPUTime + this.metrics.sliceGPUTime) / denominator : 0;\n }\n /** @type {string} */\n get identifier () { return `${truncateAddress(this.address, addressTruncationLength)}.${this.state}`; }\n /** @type {string} */\n get [inspect] () { return `[Object JobManager <${this.public.name}::${this.address}::${this.state}>]`; }\n /** \n * Estimate the wall time (i.e. actual elapsed time on a wall clock) for a slice of this job.\n * We pick the ema time (alpha=0.25) + 0.7 * stddev to incorporate uncertainty.\n * About 76% of slice completion times are <= ema + 0.7 * stddev.\n * This estimate is very primitive and assumes that CPU and GPU code do not run at the same\n * time.\n *\n * @type {number} - estimated time period in milliseconds\n */ \n get estimateWallMs () { return this.emaSliceTime + 0.7 * this.statistics.stddev; } /** XXXpfr @todo make this.statistics.stddev reflect this.globalTime too. */\n /** @type {boolean} */\n get debugBuild () { return this.supervisor.debugBuild; }\n /**\n * Always display max info under debug builds, otherwise maximal error\n * messages are displayed to the worker, only if both worker and client agree.\n * @type {boolean} - When true, display stack trace and other enhanced diag info.\n **/\n get displayMaxDiagInfo () { return this.workerConsole && workerTuning.allowConsoleAccess || this.debugBuild; }\n\n // _Idx\n //\n // Statistics: updateStatistics, update.\n //\n\n /** @param {{ total, CPU, webGL }} timeReport */\n updateStatistics (timeReport)\n {\n this.statistics.add(timeReport.total);\n this._emaSliceTime = 0.5 * (this._emaSliceTime + this.statistics.ema); /** XXXpfr @todo double smoothing, need to test and compare against this.statistics.ema . */\n debugging('jobmanager') && console.debug('JobManager.updateStatistics: mean', this.statistics.mean, 'stddev', this.statistics.stddev, 'ema', this.statistics.ema, 'ma', this.statistics.ma, 'x', this.statistics.x);\n }\n\n /**\n * Update jobMessage, add some slices to inventory and possibly update the initial seed of the statistics.\n * @param {object} jobMessage - Job Descriptor from getNewJobsForTask.\n * @param {SliceMessage[]} sliceMessages - Messages from task distributor describing slices.\n * @param {object} authorizationMessage - The signature that shipped with the task authorizing this worker.\n */\n update (jobMessage, sliceMessages, authorizationMessage)\n {\n this.jobMessage = Object.assign(this.jobMessage, { ...jobMessage });\n this.addSlices(sliceMessages, authorizationMessage);\n if (!this._emaSliceTime || this.statistics.count < 1)\n this._emaSliceTime = this.globalTime;\n // this._load = new Load(this.metrics.sliceCPUTime, this.metrics.sliceGPUTime); // SAVE for Sup2 Part II\n }\n\n // _Idx\n //\n // Dtors: destroy, stopWork.\n //\n\n /**\n * Destructor.\n */\n destroy()\n {\n selectiveDebugging && console.debug(`JobManager.destroy: terminating sandboxes and returning slices to scheduler for job manager ${this.identifier}.`);\n \n this.sandboxInventory.forEach((sandbox) => {\n if (!sandbox.isTerminated) sandbox.terminate(false);\n });\n this._sandboxInventory = [];\n\n const slicesToReturn = this.sliceInventory.filter((slice) => !slice.isFinished);\n const reason = `JobManager destroy: ${this.identifier}`;\n this.supervisor.returnSlices(slicesToReturn, reason, false /*removeSlices*/);\n this._sliceInventory = [];\n }\n\n /**\n * Terminates sandboxes and returns slices.\n * Sets the working flag to false, call @this.work to start working again.\n * \n * If forceTerminate is true: Terminates all sandboxes and returns all slices.\n * If forceTerminate is false: Terminates non-working sandboxes and returns queued slices.\n *\n * @param {boolean} [forceTerminate = true] - true if you want to stop the sandboxes from completing their current slice.\n */\n stopWork (forceTerminate = true)\n {\n selectiveDebugging && console.debug(`JobManager.stopWork(${forceTerminate}): terminating sandboxes and returning slices to scheduler.`);\n if (forceTerminate)\n {\n this.destroy();\n }\n else\n {\n // Return all non-finished slices which are not working.\n const reason = `JobManager stopWork(false): ${this.identifier}`;\n this.supervisor.returnSlices(this.queuedSlices, reason);\n this._sliceInventory = [];\n\n // Terminate all sandboxes that are not working.\n let idleSandboxes = this.sandboxInventory.filter(w => !w.isWorking);\n for (const sandbox of idleSandboxes)\n this.returnSandbox(sandbox);\n this._sandboxInventory = [];\n }\n }\n \n /**\n * Terminate 1 assigned sandbox when there are more than threshold assigned sandboxes.\n * @param {number} [threshold=0] - integer >= 0\n * @returns {boolean}\n */\n pruneAssignedSandbox(threshold = 0)\n {\n const _assignedSandboxes = this.assignedSandboxes;\n if (_assignedSandboxes.length > threshold)\n {\n debugging('supervisor') && console.debug(`JobManager.pruneAssignedSandbox terminate ${_assignedSandboxes[0].identifier}, threshold ${threshold}`);\n _assignedSandboxes[0].markAsUnready(); // Remove from this.assignedSandboxes.\n this.returnSandbox(_assignedSandboxes[0]);\n return true;\n }\n return false;\n }\n\n /**\n * Terminate 1 assigned sandbox when there are more than this.queuedSlices.length assigned sandboxes.\n * @returns {boolean}\n */\n pruneExtraAssignedSandbox()\n {\n return this.pruneAssignedSandbox(this.queuedSlices.length);\n }\n\n // _Idx\n //\n // Work: Assign sandboxes and execute slices.\n //\n // hookUpSandboxListeners handles the sandbox complete event and tries to schedule another slice\n // from the same job on the sandbox. If there aren't any slices from the same job, it tries slices\n // from other job managers, and if there are no other ready slices, it calls Supervisor.requestTask\n // to get more work from TD .\n //\n // Several functions chain and return non-awaited promises.\n // When AWAIT_ALL is true, we await the promises, which makes easier debugging.\n //\n\n /**\n * Hook up JobManager specific Sandbox event listenters.\n * @param {Sandbox} sandbox\n */\n hookUpSandboxListeners(sandbox)\n {\n // Sandbox error handler.\n sandbox.on('sandboxError', (error) => JobManager.handleSandboxError(this, sandbox, error));\n // Sandbox complete handler.\n // When any sandbox completes, try to run another slice in the same jobManager.\n // Otherwise, go through the Supervisor.requestTask protocol.\n sandbox.addListener('complete', () => {\n if (this.supervisor.isReady())\n {\n const _readySlices = this.readySlices;\n if (this.supervisor.runSliceFromSameJob() && _readySlices.length > 0)\n {\n const slice = _readySlices[0];\n slice.markAsReserved();\n this.runSlice(slice, this.delayMs);\n }\n else if (!this.supervisor.isFetchingNewWork)\n {\n const unusedSandboxSlots = this.supervisor.unusedSandboxSlots();\n if (unusedSandboxSlots > 0)\n this.supervisor.requestTask(unusedSandboxSlots);\n }\n }\n });\n }\n\n /**\n * Create a Sandbox.\n * Start it. Assign it. Add it to jobManager inventory.\n * @param {number} delayStartMs - The delay ms to pass to sandbox.start .\n * @param {number} delayAssignMs - The delay ms to pass to sandbox.assign .\n * @returns {Promise<Sandbox>}\n */\n async readySandbox (delayStartMs, delayAssignMs)\n {\n const sandbox = await this.supervisor.createSandbox(delayStartMs);\n this.hookUpSandboxListeners(sandbox);\n return sandbox.assign(this, delayAssignMs)\n .catch((error) => { // error could be a string coming from evaluator\n const errorObj = { jobAddress: truncateAddress(this.address, addressTruncationLength), error: this.checkStackTrace(error) };\n this.supervisor.error(`Failed to assign job to sandbox: ${errorObj}`);\n this.returnSandbox(sandbox);\n throw error;\n });\n }\n\n /**\n * Mark both sandbox and slice as working.\n * @param {Sandbox} sandbox\n * @param {Slice} slice\n */\n markAsWorking(sandbox, slice)\n {\n assert(sandbox);\n assert(slice);\n slice.markAsWorking();\n sandbox.markAsWorking(slice);\n }\n\n /**\n * Create or reuse a sandbox and run a slice on it.\n * @param {Slice} slice - The slice to execute on a sandbox.\n * @param {number} [delayMs=0] - The delay ms to pass to sandbox.start .\n * @returns {Promise<void>}\n */\n async runSlice(slice, delayMs)\n {\n assert(slice);\n if (this.supervisor.sliceTiming) slice['queueingDelta'] = Date.now();\n\n const _assignedSandboxes = this.assignedSandboxes;\n if (AWAIT_ALL)\n {\n if (_assignedSandboxes.length > 0 && this.supervisor.readiedSandboxes.length < 1)\n {\n const sandbox = _assignedSandboxes[0];\n this.markAsWorking(sandbox, slice);\n return await this.runSliceOnSandbox(slice, sandbox, this.delayMs); \n }\n try\n {\n const sandbox = await this.readySandbox(delayMs/*delayStartMs*/, 0.5 * this.delayMs/*delayAssignMs*/);\n this.sandboxInventory.push(sandbox);\n this.markAsWorking(sandbox, slice);\n return await this.runSliceOnSandbox(slice, sandbox, this.delayMs);\n }\n catch (error)\n {\n // Any exception from readySandbox will have already been emitted.\n debugging('jobmanager') && console.error(`JobManager.runSlice: Failure trying to run slice ${slice.identifier} on a sandbox.`, error);\n this.supervisor.returnSlice(slice, 'EUNCAUGHT');\n }\n }\n else\n {\n if (_assignedSandboxes.length > 0 && this.supervisor.readiedSandboxes.length < 1)\n {\n const sandbox = _assignedSandboxes[0];\n this.markAsWorking(sandbox, slice);\n return this.runSliceOnSandbox(slice, sandbox, this.delayMs); \n }\n return this.readySandbox(delayMs/*delayStartMs*/, this.delayMs/*delayAssignMs*/)\n .then((sandbox) => {\n this.sandboxInventory.push(sandbox);\n this.markAsWorking(sandbox, slice);\n return this.runSliceOnSandbox(slice, sandbox, this.delayMs);\n })\n .catch((error) => {\n // Any exception from readySandbox will have already been emitted.\n debugging('jobmanager') && console.error(`JobManager.runSlice: Failure trying to run slice ${slice.identifier} on a sandbox.`, error);\n this.supervisor.returnSlice(slice, 'EUNCAUGHT');\n })\n }\n }\n\n /**\n * Execute slice on sandbox, collect results or handle errors, and clean up.\n * @param {Slice} slice - The slice to execute on the sandbox.\n * @param {Sandbox} sandbox - The sandbox on which to execute the slice.\n * @param {number} [delayMs=0] - The delay ms to pass to sandbox.work .\n * @returns {Promise<void>}\n */\n async runSliceOnSandbox(slice, sandbox, delayMs = 0)\n {\n selectiveDebugging2 && console.debug(`runSliceOnSandbox ${sandbox.identifier}~${slice.identifier} #readySlices ${this.readySlices.length}/${this.supervisor.readySliceCount()}, #SBs, ${this.sandboxInventory.length}/${this.supervisor.sandboxCount()}, #working(w/r/wo/wsbx), ${this.workingSlices.length}/${this.reservedSlices.length}/${this.workingSlicesOnly.length}/${this.workingSandboxes.length}, #sup-working(w/r/wo/wsbx), ${this.supervisor.workingSliceCount()}/${this.supervisor.reservedSliceCount()}/${this.supervisor.workingSliceOnlyCount()}/${this.supervisor.workingSandboxCount()}, #assigned, ${this.assignedSandboxes.length}, #readiedSBs, ${this.supervisor.readiedSandboxes.length}, #localSBs, ${this.sandboxInventory.map(s => Number(s.id)).sort((x,y)=>x-y)}`);\n //console.log('runSliceOnSandbox', Date.now() - this.supervisor.lastTime, slice.identifier); // SAVE temporarily\n assert(slice && sandbox);\n slice.verifyWorking();\n sandbox.verifyWorking();\n\n if (this.supervisor.sliceTiming)\n {\n slice['queueingDelta'] = Date.now() - slice['queueingDelta'];\n slice['executionDelta'] = Date.now();\n }\n slice.startTime = Date.now();\n\n if (AWAIT_ALL)\n {\n try\n {\n const result = await sandbox.work(delayMs);\n debugging('jobmanager') && console.debug(`runSliceOnSandbox - success: ${sandbox.id}~${slice.identifier}`, result);\n slice.collectResult(result, true);\n sandbox.changeWorkingToAssigned();\n this.supervisor.recordResult(slice);\n }\n catch (error)\n {\n const reason = await this.supervisor.handleSandboxWorkError(sandbox, slice, error);\n debuggingError && console.error(`runSliceOnSandbox - failure: ${sandbox.id}~${slice.identifier}`, reason, error);\n }\n finally\n {\n slice.startTime = null;\n if (this.supervisor.sliceTiming)\n {\n slice['executionDelta'] = Date.now() - slice['executionDelta'];\n slice['resultDelta'] = Date.now();\n }\n }\n }\n else\n {\n return sandbox.work(delayMs)\n .then((result) => {\n debugging('jobmanager') && console.debug(`runSliceOnSandbox - success: ${sandbox.id}~${slice.identifier}`, result);\n slice.collectResult(result, true);\n sandbox.changeWorkingToAssigned();\n this.supervisor.recordResult(slice);\n })\n .catch(async (error) => {\n const reason = await this.supervisor.handleSandboxWorkError(sandbox, slice, error);\n debuggingError && console.error(`runSliceOnSandbox - failure: ${sandbox.id}~${slice.identifier}`, reason, error);\n })\n .finally(() => {\n slice.startTime = null;\n if (this.supervisor.sliceTiming)\n {\n slice['executionDelta'] = Date.now() - slice['executionDelta'];\n slice['resultDelta'] = Date.now();\n }\n });\n }\n }\n\n /**\n * Sandbox has had an error which is not from the work function: terminate it\n * and try to redo the slice.\n * @param {JobManager} jobManager \n * @param {Sandbox} sandbox \n * @param {Error} error \n */\n static handleSandboxError (jobManager, sandbox, error)\n {\n const slice = sandbox.slice;\n if (slice)\n {\n if (!slice.isWorking) // Sanity. Exception should never fire.\n throw new Error(`handleSandboxError: slice ${slice.identifier} must be WORKING.`);\n\n slice['sandboxErrorCount'] = (slice['sandboxErrorCount'] || 0) + 1;\n sandbox.slice = null;\n if (slice['sandboxErrorCount'] < (workerTuning.maxSandboxErrorsPerSlice || 2))\n slice.resetState();\n else\n {\n slice.collectResult(error, false /*success*/);\n const reason = `sandboxError event handler, too many errors ${slice['sandboxErrorCount']}: ${sandbox.id}~${jobManager.identifier}`;\n jobManager.supervisor.returnSlice(slice, reason);\n }\n jobManager.supervisor.error(`JobManager.handleSandboxError: Sandbox ${sandbox.identifier}...(${sandbox.public.name}/${slice['sandboxErrorCount']}) with slice ${slice.identifier} had error.`, error);\n }\n else\n jobManager.supervisor.error(`JobManager.handleSandboxError: Sandbox ${sandbox.identifier} has no slice...(${sandbox.public.name} had error.`, error);\n\n jobManager.returnSandbox(sandbox); /* terminate the sandbox */\n }\n\n // _Idx\n //\n // Remove from array and return sandbox.\n //\n\n /**\n * Remove sandbox from this.sandboxInventory and terminate is not already terminated.\n * @param {Sandbox} sandbox - The sandbox to remove from this.sandboxInventory.\n * @param {boolean} [assertExists=true] - When true (and type=array) then assert that sandbox exists in this.sandboxInventory.\n */\n returnSandbox(sandbox, assertExists = true)\n {\n debugging('supervisor') && console.debug(`JobManager.returnSandbox: ${sandbox.identifier}, assertExists ${assertExists}`);\n if (this.sandboxInventory && this.sandboxInventory.length > 0)\n JobManager.removeElement(this.sandboxInventory, sandbox, assertExists);\n // The cleaning up of terminated sandboxes in this.supervisor.sandboxInventory happens in this.supervisor.purgeSandboxes.\n if (!sandbox.isTerminated)\n sandbox.terminate(false);\n }\n /**\n * Remove slice from this.sliceInventory.\n * @param {Slice} slice - The slice to remove from this.sliceInventory.\n * @param {boolean} [assertExists=true] - When true (and type=array) then assert that slice exists in this.sliceInventory.\n */\n removeSlice(slice, assertExists = true)\n {\n assert(this.sliceInventory instanceof Array);\n if (this.sliceInventory && this.sliceInventory.length > 0)\n JobManager.removeElement(this.sliceInventory, slice, assertExists);\n }\n\n /**\n * Remove slices from this.sliceInventory.\n * @param {Slice[]} slices - The slices to remove from this.sliceInventory.\n */\n removeSlices(slices)\n {\n assert(this.sliceInventory instanceof Array);\n if (this.sliceInventory && this.sliceInventory.length > 0)\n this._sliceInventory = this.sliceInventory.filter((slice) => slices.indexOf(slice) === -1);\n }\n\n /**\n * Remove element from arrayLike.\n * @param {Array<*>} array\n * @param {object|number} element\n * @param {boolean} [assertExists=true]\n */\n static removeElement(array, element, assertExists = true)\n {\n let index = array.indexOf(element);\n if (assertExists)\n {\n assert(index !== -1);\n array.splice(index, 1);\n } else if (index !== -1) array.splice(index, 1);\n }\n\n // _Idx\n //\n // Fetch: WorkFn, slices, arguments.\n //\n\n /**\n * Fetch work function, work function arguments and possibly the range object describing the jobs slices..\n * @param {object} mpe - messagePayloadElement: job object returned by task-jobs.js . \n */\n async fetchJob(mpe)\n {\n if (debugging('worker'))\n dumpObject(mpe, 'JobManager.fetchJob: mpe', 512);\n\n if (AWAIT_ALL)\n {\n if (!mpe.workFunction)\n {\n mpe.workFunction = await fetchURI(mpe.codeLocation, this.supervisor.makeSafeOriginList('fetchWorkFunctions'));\n if(mpe.requirements.useStrict)\n mpe.useStrict = true;\n delete mpe.codeLocation;\n }\n if (!mpe.arguments)\n {\n let promises = [];\n let uris = mpe.argumentsLocation;\n if (uris)\n for (let i = 0; i < uris.length; i++)\n promises.push(fetchURI(uris[i].value, this.supervisor.makeSafeOriginList('fetchArguments')));\n\n mpe.arguments = await Promise.all(promises);\n\n // node localExec jobs read arguments from a file, so need to ensure they are properly parsed after being read.\n if (this.supervisor.options.localExec && !DCP_ENV.isBrowserPlatform)\n mpe.arguments = scopedKvin.parse(mpe.arguments[0]);\n\n delete mpe.argumentsLocation;\n }\n // if job input data is range object, we send the range object URI to the worker\n if (!mpe.mro && mpe.MROLocation)\n {\n mpe.mro = await fetchURI(mpe.MROLocation, this.supervisor.makeSafeOriginList('fetchData'));\n delete mpe.MROLocation;\n }\n }\n else\n {\n let promises = [];\n\n // Get workFn.\n if (!mpe.workFunction)\n {\n const workFunctionPromise = fetchURI(mpe.codeLocation, this.supervisor.makeSafeOriginList('fetchWorkFunctions'))\n .then((workFunction) => {\n mpe.workFunction = workFunction;\n if (mpe.requirements.useStrict)\n mpe.useStrict = true;\n delete mpe.codeLocation;\n });\n promises.push(workFunctionPromise);\n }\n\n // if job input data is range object, we send the range object URI to the worker\n if (!mpe.mro && mpe.MROLocation)\n {\n // Do we need this? SAVE.\n // if (this.supervisor.options.localExec && !DCP_ENV.isBrowserPlatform)\n // mpe.MROLocation = await fetchURI(mpe.MROLocation, this.supervisor.allowedOrigins, this.supervisor.fetchData);\n\n const mroPromise = fetchURI(mpe.MROLocation, this.supervisor.makeSafeOriginList('fetchData'))\n .then((mro) => {\n mpe.mro = mro;\n delete mpe.MROLocation;\n });\n promises.push(mroPromise)\n }\n\n // Get workFn args.\n if (!mpe.arguments && mpe.argumentsLocation)\n {\n mpe.arguments = new Array(mpe.argumentsLocation.length);\n for (let k = 0; k < mpe.argumentsLocation.length; k++)\n promises.push(fetchURI(mpe.argumentsLocation[k].value, this.supervisor.makeSafeOriginList('fetchArguments'))\n .then((arg) => (mpe.arguments[k] = arg) ));\n }\n\n await Promise.all(promises);\n\n if (mpe.argumentsLocation) delete mpe.argumentsLocation;\n }\n\n return mpe;\n }\n\n /**\n * Look up slice.datumUri or use the range object this.mro, constructed by fetchJob.\n * @param {Slice} slice \n * @returns {Promise<{ inputDatum, dataError }>}\n */\n async fetchSliceData (datumUri, slice) // eslint-disable-line require-await\n {\n function catchHandler(error)\n {\n const dataError = error;\n dataError['errorCode'] = (error.code === 'EFETCH') ? 'EFETCH' : 'EUNCAUGHTERROR';\n return dataError;\n }\n\n if (!datumUri && !this.state.is(READY))\n throw new Error(`When !datumUri, JobManager '${this.identifier}' must be in READY state.`);\n\n try\n {\n if (!datumUri)\n {\n if (!this.mro) throw new Error('Must complete call to JobManager.fetchJob before calling JobManager.fetchSliceData.');\n const ro = rehydrateRange(this.mro);\n // Slice numbers start at 1.\n const inputDatum = ro[slice.sliceNumber - 1];\n debugging('jobmanager') && console.debug(`Fetched mro datum: ${stringify(inputDatum, 512)}`);\n return { inputDatum, dataError: null };\n }\n\n return fetchURI(datumUri, this.supervisor.makeSafeOriginList('fetchData'))\n .then((inputDatum) => {\n debugging('jobmanager') && console.debug(`Fetched datum: ${stringify(inputDatum, 512)}`);\n return { inputDatum, dataError: null };\n })\n .catch((error) => {\n const dataError = catchHandler(error);\n return { inputDatum: null, dataError };\n });\n }\n catch (error)\n {\n const dataError = catchHandler(error);\n return { inputDatum: null, dataError };\n }\n }\n\n // _Idx\n //\n // Miscellaneous.\n //\n\n /**\n * Debugging helper.\n * @param {boolean} [details=true]\n */\n dumpSlices(details = true)\n {\n if (this.sliceInventory.length < 1) return;\n const queued = [], working = [], complete = [], failed = [], finished = [], extras = [];\n for (const slice of this.sliceInventory)\n {\n if (slice.isUnassigned || slice.isReady)\n queued.push(slice);\n else if (slice.isReserved || slice.isWorking)\n working.push(slice);\n else if (slice.isComplete)\n complete.push(slice);\n else if (slice.hasFailed)\n failed.push(slice);\n else if (slice.isFinished)\n finished.push(slice);\n else\n extras.push(slice);\n }\n console.log(`:--JobManager.dumpSlices--${this.identifier} slices ${this.sliceInventory.length}-------`);\n console.log(`-----queued(${queued.length})-----------------------------------------------------------------`);\n if (details)\n {\n for (const slice of queued)\n console.log(slice.identifier);\n console.log(`-----working(${working.length})----------------------------------------------------------------`);\n for (const slice of working)\n console.log(slice.identifier);\n console.log(`-----complete(${complete.length})----------------------------------------------------------------`);\n for (const slice of complete)\n console.log(slice.identifier);\n console.log(`-----failed(${failed.length})----------------------------------------------------------------`);\n for (const slice of failed)\n console.log(slice.identifier);\n console.log(`-----finished(${finished.length})---------------------------------------------------------------`);\n for (const slice of finished)\n console.log(slice.identifier);\n console.log(`-----extras(${extras.length})---------------------------------------------------------------`);\n for (const slice of extras)\n console.log(slice.identifier);\n }\n else\n {\n console.log(`-----working(${working.length})----------------------------------------------------------------`);\n console.log(`-----complete(${complete.length})----------------------------------------------------------------`);\n console.log(`-----failed(${failed.length})----------------------------------------------------------------`);\n console.log(`-----finished(${finished.length})---------------------------------------------------------------`);\n console.log(`-----extras(${extras.length})---------------------------------------------------------------`);\n }\n console.log('-----------------------------------------------------------------------------------');\n }\n\n /**\n * Calls removeStackTrace when !this.displayMaxDiagInfo .\n * @param {string|Error} error\n * @returns {string|Error}\n */\n checkStackTrace(error)\n {\n return this.displayMaxDiagInfo ? error : this.supervisor.removeStackTrace(error);\n }\n\n /**\n * Add slices to the job manager's inventory.\n *\n * @param {SliceMessage[]} sliceMessages - Messages from task distributor describing slices.\n * @param {object} authorizationMessage - The signature that shipped with the task authorizing this worker.\n */\n addSlices (sliceMessages, authorizationMessage)\n {\n sliceMessages.forEach((sliceMessage) => {\n const slice = new Slice(this, sliceMessage, authorizationMessage);\n if (!slice.isEstimation) this.isEstimation = false;\n this.sliceInventory.push(slice);\n });\n }\n\n /**\n * Schedule the slice to be executed.\n * @param {Slice} slice\n * @param {boolean} [placeInTheFrontOfTheQueue=false]\n */\n scheduleSlice(slice, placeInTheFrontOfTheQueue = false)\n {\n // Reset slice state to allow execution.\n slice.resetState();\n // Enqueue in the to-be-executed queue.\n if (placeInTheFrontOfTheQueue) this.sliceInventory.unshift(slice);\n else this.sliceInventory.push(slice);\n }\n\n /**\n * XXXpfr @todo UNUSED but may be used soon\n * Returns a counterfeit JobManager - a generic object with most of the same\n * ownProperties as a real exports.JobManager. Any inventories and synchronizer are\n * duplicated (equivalent state, no events copied, ~ shallow clone)\n * @returns {JobManager}\n */\n counterfeit()\n {\n /** @type {JobManager} */\n const fake = {};\n \n for (let prop in Object.keys(this))\n {\n if (this[prop] instanceof Inventory)\n fake[prop] = this[prop].duplicate();\n else if (this[prop] instanceof Synchronizer)\n fake[prop] = this[prop].valueOf();\n else\n fake[prop] = this[prop];\n }\n\n return fake;\n }\n\n}\nexports.JobManager = JobManager;\n\n//# sourceURL=webpack://dcp/./src/dcp-client/worker/supervisor2/job-manager.js?");
4514
4514
 
4515
4515
  /***/ }),
4516
4516
 
@@ -4551,7 +4551,7 @@ eval("/**\n * @file worker/supervisor2/rolling-statistics.js\n *\n * A support c
4551
4551
  /***/ ((__unused_webpack_module, exports, __webpack_require__) => {
4552
4552
 
4553
4553
  "use strict";
4554
- eval("/* eslint-disable require-await */\n// NOTE - need timeout/postmessage function\n/**\n * @file dcp-client/worker/supervisor2/sandbox.js\n *\n * A sandbox that when constructed and assigned can do work for\n * a distributed slice. A sandbox runs for a single slice at a time.\n *\n * Usage (simplified...):\n * const sandbox = new Sandbox(this.cache, { ...this.options.sandboxOptions }, this.allowedOrigins);\n * await sandbox.start(delayStartMs);\n * await sandbox.assign(this, delayAssignMs);\n * return sandbox.work(slice, delayWorkMs)\n * .then((result) => { \n * slice.collectResult(result, true);\n * sandbox.changeWorkingToAssigned();\n * this.supervisor.recordResult(slice)\n * })\n * .catch((error) => {\n * slice.collectResult(error, false);\n * const reason = this.supervisor.handleSandboxWorkError(sandbox, slice, error);\n * this.supervisor.returnSlice(slice, reason);\n * this.returnSandbox(sandbox);\n * });\n *\n * Debug flags:\n * Sandbox.debugWork = true // - turns off 30 second timeout to let user debug sandbox innards more easily\n * Sandbox.debugState = true // - logs all state transitions for this sandbox\n * Sandbox.debugEvents = true // - logs all events received from the sandbox\n *\n * @author Matthew Palma, mpalma@kingsds.network\n * Ryan Rossiter, ryan@kingsds.network\n * Wes Garland, wes@kingsds.network\n * Paul, paul@kingsds.network\n * @date May 2019\n * May 2019\n * Decemeber 2020\n * June 2022\n * @module sandbox\n */\n\n/* global dcpConfig */ // eslint-disable-line no-redeclare\n// @ts-check\n\n\nconst debugging = (__webpack_require__(/*! dcp/debugging */ \"./src/debugging.js\").scope)('worker');\nconst { assert, assertEq3 } = __webpack_require__(/*! dcp/common/dcp-assert */ \"./src/common/dcp-assert.js\");\nconst DCP_ENV = __webpack_require__(/*! dcp/common/dcp-env */ \"./src/common/dcp-env.js\");\nconst { Synchronizer } = __webpack_require__(/*! dcp/common/concurrency */ \"./src/common/concurrency.js\");\nconst nanoid = (__webpack_require__(/*! nanoid */ \"./node_modules/nanoid/index.browser.js\").nanoid);\nconst { EventEmitter } = __webpack_require__(/*! dcp/common/dcp-events */ \"./src/common/dcp-events/index.js\");\nconst { truncateAddress } = __webpack_require__(/*! dcp/utils */ \"./src/utils/index.js\");\nconst kvin = __webpack_require__(/*! kvin */ \"./node_modules/kvin/kvin.js\");\nconst scopedKvin = new kvin.KVIN({Object: ({}).constructor,\n Array: ([]).constructor, \n Function: (()=>{}).constructor});\n\nlet timeDilation = 1;\nif (DCP_ENV.platform === 'nodejs') {\n const { requireNative } = __webpack_require__(/*! dcp/dcp-client/webpack-native-bridge */ \"./src/dcp-client/webpack-native-bridge.js\");\n /** Make timers 10x slower when running in niim */\n timeDilation = (requireNative('module')._cache.niim instanceof requireNative('module').Module) ? 10 : 1;\n}\nconst addressTruncationLength = 20;\nconst workerTuning = dcpConfig.worker;\ntimeDilation = 1;\nlet sbCnter = 0; // Global counter of terminated sandboxes guarded by debugging().\n\n/**\n * Wraps console.debug to emulate debug module prefixing messages on npm.\n * @param {...any} args\n */\nconst debug = (...args) => {\n if (debugging()) {\n console.debug('Sandbox:', ...args);\n }\n};\n\n// Sandbox states\nconst UNREADY = 'UNREADY' // No Sandbox (web worker, saworker, etc) has been constructed yet\nconst READYING = 'READYING' // Sandbox is being constructed and environment (bravojs, env) is being set up\nconst READY_FOR_ASSIGN = 'READY_FOR_ASSIGN' // Sandbox is ready to be assigned\nconst ASSIGNED = 'ASSIGNED' // Sandbox is assigned but not working\nconst ASSIGNING = 'ASSIGNING' // Sandbox is in the process of being ASSIGNED\nconst WORKING = 'WORKING' // Sandbox is working\nconst BROKEN = 'BROKEN' // Sandbox is broken and should be terminated.\nconst EVAL_RESULT_PREFIX = 'evalResult::';\n\nclass SandboxError extends Error { constructor(errorCode, ...args) { super(...args); this.errorCode = errorCode; }}\nclass NoProgressError extends SandboxError { constructor(...args) { super('ENOPROGRESS', ...args); } }\nclass SliceTooSlowError extends SandboxError { constructor(...args) { super('ESLICETOOSLOW', ...args); } }\nclass UncaughtExceptionError extends SandboxError { constructor(...args) { super('EUNCAUGHT', ...args); } }\nclass RemoteFetchError extends SandboxError { constructor(...args) { super('EFETCH', ...args); } }\n\nfunction fillInError(errorCtor, errorIn) {\n const errorOut = new errorCtor(errorIn.message);\n errorOut.name = errorIn.name;\n errorOut.fileName = errorIn.fileName;\n errorOut.lineNumber = errorIn.lineNumber;\n errorOut.stack = errorIn.stack;\n return errorOut;\n}\n\n/** @typedef {import('dcp/common/dcp-events').EventEmitter} EventEmitter */\n/** @typedef {import('./slice2').Slice} Slice */\n/** @typedef {import('./job-manager').JobManager} JobManager */\n/** @typedef {import('./module-cache').ModuleCache} ModuleCache */\n\n/**\n * @access public\n * @typedef {object} SandboxOptions\n * @constructor {function} [SandboxConstructor]\n * @property {boolean} [ignoreNoProgress] - When true, the sandbox will not be stopped for not calling progress\n */\n\nclass Sandbox extends EventEmitter {\n /**\n * A Sandbox (i.e. a worker sandbox) which executes distributed slices.\n *\n * @constructor\n * @param {ModuleCache} moduleCache\n * @param {SandboxOptions} options\n * @param {string[]} origins\n */\n constructor (moduleCache, options, origins) {\n super('Sandbox');\n /** @type {ModuleCache} */\n this.moduleCache = moduleCache;\n /** @type {SandboxOptions} */\n this.options = {\n ignoreNoProgress: false,\n ...options,\n SandboxConstructor: options.SandboxConstructor || (__webpack_require__(/*! ../evaluators */ \"./src/dcp-client/worker/evaluators/index.js\").BrowserEvaluator),\n }\n /** @type {string[]} */\n this.allowedOrigins = origins;\n /** @type {Synchronizer} */\n this.state = new Synchronizer(UNREADY, [ UNREADY, READYING, READY_FOR_ASSIGN, ASSIGNING, ASSIGNED, WORKING, BROKEN ]);\n\n /** @type {string} */\n this.jobAddress = null;\n /** @type {object} */\n this.evaluatorHandle = null;\n /** @type {object} */\n this.capabilities = null;\n /** @type {EventEmitter} */\n this.ee = new EventEmitter('SandboxInternal');\n\n /** @type {boolean} */\n this.terminated = false;\n /** @type {number?} */\n this.progress = 100;\n /** @type {object} */\n this.progressReports = null;\n /** @type {object} */\n this.progressTimeout = null;\n /** @type {object} */\n this.sliceTimeout = null;\n /** @type {object} */\n this.rejectionData = null;\n /** @type {Slice} */\n this.slice = null;\n /** @type {number?} */\n this.sliceStartTime = null;\n /** @type {number} */\n this.id = Sandbox.getNewId();\n\n this.ringMessageHandlers = [\n this.handleRing0Message,\n this.handleRing1Message,\n this.handleRing2Message,\n this.handleRing3Message,\n ];\n\n this.resetSliceTimeReport();\n }\n\n /** @type {string} - debug string that characterizes sandbox. */\n get identifier() {\n if (!this.jobAddress) return `${this.id}`;\n return `${this.id}.${truncateAddress(this.jobAddress, addressTruncationLength)}.${this.state?this.state:'<null>'}`;\n }\n\n static getNewId() {\n return Sandbox.idCounter++;\n }\n\n /** @type {boolean} */\n get isReadyForAssign () {\n return this.state.is(READY_FOR_ASSIGN);\n }\n /** @type {boolean} */\n get isAssigned () {\n return this.state.is(ASSIGNED);\n }\n /** @type {boolean} */\n get isWorking () {\n return this.state.is(WORKING);\n }\n /** @type {boolean} */\n get isBroken () {\n return this.state.is(BROKEN);\n }\n /** @type {boolean} */\n get isTerminated () {\n return this.terminated;\n }\n\n /**\n * Mark WORKING sandbox as ASSIGNED in preparation for possible reuse,\n */\n changeWorkingToAssigned () {\n this.state.testAndSet(WORKING, ASSIGNED);\n }\n \n /**\n * Remove from collection of ASSIGNED sandboxes in preparation for termination,\n */\n markAsUnready() {\n this.state.testAndSet(ASSIGNED, UNREADY);\n }\n \n /**\n * Transitions: ASSIGNED --> WORKING and assigns the slice.\n * @param {Slice} slice \n */\n markAsWorking (slice) {\n if (!this.isAssigned)\n throw new Error(`Sandbox ${this.identifier} is not ready to work`);\n this.state.set(ASSIGNED, WORKING);\n this.slice = slice;\n }\n \n /**\n * Fancy assert.\n */\n verifyWorking () {\n if (!this.isWorking) {\n throw new Error(`Sandbox ${this.identifier} is not working`);\n }\n }\n\n /**\n * Readies the sandbox. This will result in the sandbox being ready and not assigned,\n * it will need to be assigned with a job before it is able to do work.\n *\n * @todo maybe preload specific modules or let the cache pass in what modules to load?\n * @param {number} [delay=0]\n * @returns {Promise<Sandbox>}\n * @throws on failure to ready\n */\n async start(delay = 0) {\n this.state.set(UNREADY, READYING);\n\n if (delay > 0) await new Promise((resolve) => setTimeout(resolve, delay * timeDilation));\n\n try {\n // RING 0\n this.evaluatorHandle = new this.options.SandboxConstructor({\n name: `DCP Sandbox #${this.id}`,\n });\n this.evaluatorHandle.onerror = this.onerror.bind(this);\n\n const messageHandler = this.onmessage.bind(this);\n this.evaluatorHandle.onmessage = function onmessage(event)\n {\n let data;\n if (event.data.serialized)\n data = kvin.parse(event.data.message);\n else\n data = kvin.unmarshal(event.data);\n messageHandler({ data });\n }\n\n const evaluatorPostMessage = this.evaluatorHandle.postMessage.bind(this.evaluatorHandle);\n this.evaluatorHandle.postMessage = function postMessage(message)\n {\n evaluatorPostMessage(scopedKvin.marshal(message));\n }\n\n const that = this;\n this.evaluatorHandle.addEventListener('end', () => that.terminate(true));\n\n // Now in RING 1\n\n // Now in RING 2\n await this.describe();\n this.state.set(READYING, READY_FOR_ASSIGN);\n this.emit('ready', this);\n } catch (error) {\n console.warn('Failed to start the sandbox -', error.message);\n this.state.set(READYING, BROKEN);\n this.terminate(false);\n throw error;\n }\n \n return this;\n }\n\n /**\n * This will assign the sandbox with a job, loading its sandbox code\n * into the sandbox.\n *\n * @param {JobManager} jobManager - The job manager that will be the owner of this sandbox.\n * @param {number} delay\n * @returns {Promise<Sandbox>}\n * @throws on initialization failure\n */\n async assign (jobManager, delay = 0) {\n \n if (delay > 0) await new Promise((resolve) => setTimeout(resolve, delay * timeDilation));\n \n this.state.set(READY_FOR_ASSIGN, ASSIGNING);\n this.jobAddress = jobManager.address;\n this.job = jobManager.jobMessage;\n \n assertEq3(this.job.address, this.jobAddress);\n assert(typeof this.job === 'object');\n assert(typeof this.job.requirements === 'object');\n assert(Array.isArray(this.job.dependencies));\n assert(Array.isArray(this.job.requirePath));\n\n // Extract public data from job, with defaults\n this.public = Object.assign({\n name: `Anonymous Job ${this.job.address.slice(0, 6)}`,\n description: 'Discreetly helping make the world smarter.',\n link: 'https://distributed.computer/about',\n }, this.job.public);\n\n // Future: We may want other filename tags for appliances // RR Nov 2019\n\n // Important: The order of applying requirements before loading the sandbox code\n // is important for modules and sandbox code to set globals over the whitelist.\n await this.applySandboxRequirements(this.job.requirements);\n await this.assignEvaluator();\n \n return this;\n }\n \n async assignEvaluator() {\n debug('Begin assigning job to evaluator');\n const that = this;\n\n return new Promise(function sandbox$$assignEvaluatorPromise(resolve, reject) {\n const message = {\n request: 'assign',\n job: that.job,\n sandboxConfig: workerTuning.sandbox,\n };\n\n /* note - onFailListener used for removal. This is necessary due to a bug in ee.once. /wg Feb 2022 */\n \n const onSuccess = (event) => {\n // eslint-disable-next-line no-use-before-define\n that.ee.removeListener('reject', onFailListener);\n that.emit('assigned', event.jobAddress);\n debug('Job assigned to evaluator');\n resolve();\n };\n\n const onFail = (error) => {\n // eslint-disable-next-line no-use-before-define\n that.ee.removeListener('assigned', onSuccessListener);\n reject(error);\n };\n\n const onSuccessListener = that.ee.once('assigned', onSuccess);\n const onFailListener = that.ee.once('reject', onFail);\n that.evaluatorHandle.postMessage(message);\n }).then((event) => {\n that.state.set(ASSIGNING, ASSIGNED);\n return event;\n }).catch(error => {\n that.state.set(ASSIGNING, BROKEN);\n debug('Failure in assigning job to evaluator', error);\n throw error;\n });\n }\n\n /**\n * Evaluates a string inside the sandbox.\n *\n * @param {string} code - the code to evaluate in the sandbox\n * @param {string} filename - the name of the 'file' to help with debugging,\n * no longer working though?\n * @returns {Promise} - resolves with eval result on success, rejects\n * otherwise\n */\n eval(code, filename) {\n const that = this;\n \n return new Promise(function sandbox$$eval$Promise(resolve, reject) {\n let msgId = nanoid();\n let msg = {\n request: 'eval',\n data: code,\n filename,\n msgId, \n };\n\n const eventId = EVAL_RESULT_PREFIX + msgId;\n\n const onSuccess = (event) => {\n // eslint-disable-next-line no-use-before-define\n that.ee.removeListener('reject', onFailListener);\n resolve(event);\n }\n\n const onFail = (error) => {\n // eslint-disable-next-line no-use-before-define\n that.ee.removeListener(eventId, onSuccessListener);\n reject(error);\n }\n\n const onSuccessListener = that.ee.once(eventId, onSuccess);\n const onFailListener = that.ee.once('reject', onFail);\n\n that.evaluatorHandle.postMessage(msg);\n })\n }\n\n /**\n * Resets the state of the bootstrap, without resetting the sandbox function if assigned.\n * Mostly used to reset the progress status before reusing a sandbox on another slice.\n * Must be called after @start.\n *\n * @returns {Promise} - resolves with result on success, rejects otherwise\n */\n resetSandboxState () {\n const that = this;\n\n return new Promise(function sandbox$resetSandboxStatePromise(resolve, reject) {\n let successCb, failTimeout;\n let msg = {\n request: 'resetState',\n };\n\n successCb = that.ee.once('resetStateDone', function sandbox$resetSandboxState$success () {\n if (failTimeout === false)\n return; /* already rejected */\n clearTimeout(failTimeout);\n failTimeout = false;\n resolve();\n });\n\n failTimeout = setTimeout(function sandbox$resetSandboxState$fail() {\n if (failTimeout === false)\n return; /* already resolved */\n \n that.ee.removeListener('resetStateDone', successCb);\n that.terminate(false);\n failTimeout = false;\n\n reject(new Error('resetState never received resetStateDone event from sandbox'));\n }, 3000 * timeDilation); /* XXXwg need tuneable */\n\n assert(that.evaluatorHandle); // It is possible that that.terminate nulls out evaluatorHandle before getting here.\n that.evaluatorHandle.postMessage(msg);\n });\n }\n\n /**\n * Clear all timers that are set inside the sandbox (evaluator) environment.\n *\n * @returns {Promise} - resolves with result on success, rejects otherwise\n */\n clearSandboxTimers() {\n const that = this;\n \n return new Promise(function sandbox$clearSandboxTimersPromise(resolve, reject) {\n let successCb, failTimeout;\n let msg = {\n request: 'clearTimers',\n };\n\n successCb = that.ee.once('clearTimersDone', function sandbox$clearSandboxTimers$success() {\n if (failTimeout === false)\n return; /* already rejected */\n clearTimeout(failTimeout);\n failTimeout = false;\n resolve();\n });\n\n failTimeout = setTimeout(function sanbox$clearSandboxTimers$fail() {\n if (failTimeout === false)\n return; /* already resolved */\n \n that.ee.removeListener('clearTimersDone', successCb);\n that.terminate(false);\n failTimeout = false;\n \n reject(new Error('clearTimers never received clearTimersDone event from sandbox'));\n }, 3000 * timeDilation); /* XXXwg need tuneable */\n\n if (that.evaluatorHandle) // Sometimes that.terminate nulls out evaluatorHandle before getting here.\n that.evaluatorHandle.postMessage(msg);\n });\n }\n\n /**\n * Sends a post message to describe its capabilities.\n *\n * Side effect: Sets the capabilities property of the current sandbox.\n *\n * @returns {Promise} Resolves with the sandbox's capabilities. Rejects with\n * an error saying a response was not received.\n * @memberof Sandbox\n */\n describe() {\n debug('Beginning to describe evaluator');\n const that = this;\n \n return new Promise(function sandbox$describePromise(resolve, reject) {\n if (that.evaluatorHandle === null) {\n return reject(new Error('Evaluator has not been initialized.'));\n }\n\n /**\n * Opted to create a flag for the describe response being received so that\n * we don't have to *hoist* the timeout's id to clear it in the response\n * handler.\n */\n let didReceiveDescribeResponse = false;\n const describeResponseHandler = that.ee.once('describe', (data) => {\n didReceiveDescribeResponse = true;\n const { capabilities } = data;\n if (typeof capabilities === 'undefined') {\n reject(new Error('Did not receive capabilities from describe response.'));\n }\n that.capabilities = capabilities;\n\n // Currently only used in tests. May use the event in the future.\n that.emit('described', capabilities);\n debug('Evaluator has been described');\n resolve(capabilities);\n });\n const describeResponseFailedHandler = () => {\n if (!didReceiveDescribeResponse) {\n that.ee.removeListener('describe', describeResponseHandler);\n that.terminate(false);\n reject(new Error( 'Describe message timed-out. No describe response was received from the describe command.'));\n }\n };\n\n const message = {\n request: 'describe',\n };\n\n // Arbitrarily set the waiting time.\n setTimeout(describeResponseFailedHandler, 6000 * timeDilation); /* XXXwg need tuneable */\n assert(that.evaluatorHandle); // It is possible that that.terminate nulls out evaluatorHandle before getting here.\n that.evaluatorHandle.postMessage(message);\n });\n }\n\n /**\n * Passes the job's requirements object into the sandbox so that the global\n * access lists can be updated accordingly.\n *\n * e.g. disallow access to OffscreenCanvas without\n * environment.offscreenCanvas=true present.\n *\n * Must be called after @start.\n *\n * @returns {Promise} - resolves with result on success, rejects otherwise\n */\n applySandboxRequirements(requirements) {\n const that = this;\n \n return new Promise(function sandbox$applySandboxRequirementsPromise(resolve, reject) {\n const message = {\n requirements,\n request: 'applyRequirements',\n };\n let wereRequirementsApplied = false;\n\n const successCb = that.ee.once(\n 'applyRequirementsDone',\n function sandbox$applyRequirements$success() {\n wereRequirementsApplied = true;\n resolve();\n },\n );\n\n assert(typeof message.requirements === 'object');\n that.evaluatorHandle.postMessage(message);\n\n setTimeout(function sandbox$finishApplySandboxRequirements() {\n if (!wereRequirementsApplied) {\n that.ee.removeListener('applyRequirementsDone', successCb);\n that.terminate(false);\n reject(new Error('applyRequirements never received applyRequirementsDone response from sandbox'));\n }\n }, 3000 * timeDilation); /* XXXwg needs tunable */\n });\n }\n\n /**\n * Executes a slice received from the supervisor.\n * Must be called after @start.\n *\n * @param {Slice} slice - bare minimum data required for the job/job code to be executed on\n * @param {number} [delay = 0] the delay that this method should wait before beginning work, used to avoid starting all sandboxes at once\n *\n * @returns {Promise} - resolves with result on success, rejects otherwise\n */\n\n async work (slice, delay = 0) {\n const that = this;\n\n assert(slice);\n\n // cf. DCP-1720\n this.resetSliceTimeReport();\n \n // Now wait for the delay if provided, prevents many sandboxes starting at once from crashing the supervisor\n if (delay > 0) await new Promise(resolve => setTimeout(resolve, (delay + 1) * timeDilation));\n if (!this.isWorking) return; // sandbox.terminate could have been called during the delay timeout\n\n // Prepare the sandbox to begin work\n // will be replaced by `assign` message that should be called before emitting a `work` message\n if (this.jobAddress !== slice.jobAddress) {\n throw new Error(`Sandbox.run: Sandbox is already assigned and jobAddress doesn't match previous (${this.jobAddress} !== ${slice.jobAddress})`);\n }\n\n let sliceHnd = { job: this.public, sandbox: this };\n await this.resetSandboxState();\n if (!this.slice) {\n console.error(`Slice for job ${this.jobAddress} vanished during work initialization - aborting`);\n return;\n }\n\n const { datum: inputDatum, error: dataError } = slice;\n\n this.resetProgressTimeout();\n this.resetSliceTimeout();\n\n return new Promise(function sandbox$$workPromise(resolve, reject) {\n let onSuccess, onFail\n\n onSuccess = that.ee.once('resolve', function sandbox$$work$success (event) {\n that.ee.removeListener('reject', onFail)\n resolve(event)\n }.bind(that));\n\n onFail = that.ee.once('reject', function sandbox$$work$fail (err) {\n that.ee.removeListener('resolve', onSuccess)\n reject(err)\n }.bind(that))\n\n that.sliceStartTime = Date.now();\n that.progress = null;\n that.progressReports = {\n last: undefined,\n lastDeterministic: undefined,\n };\n\n that.resetProgressTimeout();\n that.resetSliceTimeout();\n that.emit('start', sliceHnd);\n \n if(dataError){\n that.ee.removeListener('resolve', onSuccess);\n that.ee.removeListener('reject', onFail);\n setTimeout(() => reject(dataError), 0)\n\n } else {\n that.evaluatorHandle.postMessage({\n request: 'main',\n data: inputDatum,\n })\n }\n })\n .then(async function sandbox$$work$then(event) {\n // prevent any hanging timers from being fired\n await that.clearSandboxTimers();\n\n // TODO: Should sliceHnd just be replaced with that.public?\n that.emit('slice', sliceHnd); /** @todo: decide which event is right */\n that.emit('sliceFinish', event);\n that.emit('complete', that.jobAddress);\n\n that.state.set(WORKING, ASSIGNED);\n that.slice = false;\n\n return event;\n })\n .catch((err) => { \n if (err.name === 'EWORKREJECT') {\n that.rejectionData = err;\n that.evaluatorHandle.postMessage({ request: 'resetAndGetCPUTime' })\n } else { // sandbox termination for rejected work happens in Supervisor.handleRejectedWork\n // Ceci is the reject callback for when the slice throws an error\n that.terminate(false);\n }\n\n that.emit('error', err, 'slice');\n\n if (err instanceof NoProgressError) {\n that.emit('workEmit', {\n eventName: 'noProgress',\n payload: {\n timestamp: Date.now() - that.sliceStartTime,\n data: that.slice.datumUri,\n progressReports: that.progressReports,\n }\n });\n }\n throw err;\n })\n .finally(function sandbox$$work$finally() {\n that.emit('end', sliceHnd);\n });\n }\n\n resetProgressTimeout() {\n if (this.progressTimeout) {\n clearTimeout(this.progressTimeout);\n this.progressTimeout = null;\n }\n\n this.progressTimeout = setTimeout(() => {\n if (this.options.ignoreNoProgress) {\n return console.warn(\"ENOPROGRESS silenced by localExec: In a remote worker, this slice would be stopped for not calling progress frequently enough.\");\n }\n\n this.ee.emit('reject', new NoProgressError(`No progress event was received in the last ${dcpConfig.worker.sandbox.progressTimeout / 1000} seconds.`));\n }, +dcpConfig.worker.sandbox.progressTimeout * timeDilation);\n }\n\n resetSliceTimeout() {\n if (this.sliceTimeout) clearTimeout(this.sliceTimeout);\n\n this.sliceTimeout = setTimeout(() => {\n if (Sandbox.debugWork) return console.warn(\"Sandbox.debugWork: Ignoring slice timeout\");\n\n this.ee.emit('reject', new SliceTooSlowError(`Slice took longer than ${dcpConfig.worker.sandbox.sliceTimeout / 1000} seconds.`));\n }, +dcpConfig.worker.sandbox.sliceTimeout * timeDilation);\n }\n \n async handleRing0Message(data) {\n debugging('event:ring-0') && debug('event:ring-0', data);\n //handling a true ring 0 message\n switch (data.request) {\n case 'sandboxLoaded':\n // emit externally\n this.emit('sandboxLoaded', this)\n break;\n\n case 'scriptLoaded':\n // emit externally\n this.emit('scriptLoaded', data);\n if(data.result !== \"success\") {\n this.onerror(data);\n }\n break;\n \n case 'clearTimersDone':\n this.ee.emit(data.request, data);\n break;\n case 'totalCPUTime':\n this.updateTime(data);\n if (this.ee.listenerCount('resolve') > 0) {\n this.completeData.timeReport = this.sliceTimeReport;\n this.ee.emit('resolve', this.completeData);\n delete this.completeData;\n } else {\n this.rejectionData.timeReport = this.sliceTimeReport\n this.emit('rejectedWorkMetrics', this.rejectionData) // If there is no internal listener for 'resolve', the slice was rejected and\n delete this.rejectionData; // we need to send the rejected metrics to the supervisor\n } \n break;\n case 'error': {\n // Warning: rejecting here with just event.data.error causes issues\n // where the reject handlers modify the object so it interferes with the\n // workEmit event payload, wrapping in an Error instance copies the values\n const wrappedError = fillInError(UncaughtExceptionError, data.error);\n\n if (this.ee.listenerCount('reject') > 0) {\n this.ee.emit('reject', wrappedError);\n } else {\n // This will happen if the error is thrown during initialization\n throw e;\n }\n break;\n }\n default: {\n const errorMsg = new Error('Received unhandled request from sandbox: ' + data.request + '\\n\\t' + JSON.stringify(data));\n console.error(errorMsg);\n break;\n } \n }\n }\n\n async handleRing1Message(data) {\n switch (data.request) {\n case 'applyRequirementsDone':\n // emit internally\n this.ee.emit(data.request, data)\n break;\n default: {\n const errorMsg = new Error('Received unhandled request from sandbox ring 1: ' + data.request + '\\n\\t' + JSON.stringify(data));\n console.error(errorMsg)\n break; \n }\n }\n }\n\n async handleRing2Message(data) {\n debugging('event:ring-2') && debug('event:ring-2', data);\n switch (data.request) {\n case 'dependency': {\n let moduleData;\n try {\n moduleData = await this.moduleCache.fetchModule(data.data, this.jobAddress);\n } catch (error) {\n /*\n * In the event of an error here, we want to let the client know there was a problem in\n * loading their module. However, there hasn't yet been an actual slice assigned to the sandbox.\n * Therefore, we assign 'slice 0' to the sandbox, a slice that will never exist, and is used\n * purely for this purpose. \n */\n this.slice = {\n jobAddress: this.jobAddress,\n sliceNumber: 0,\n };\n\n const payload = {\n name: error.name,\n timestamp: error.timestamp,\n message: error.message,\n };\n\n const wrappedError = fillInError(RemoteFetchError, error);\n\n this.emit('workEmit', {\n eventName: 'error',\n payload,\n });\n this.ee.emit('reject', wrappedError);\n break;\n }\n this.evaluatorHandle.postMessage({\n request: 'moduleGroup',\n data: moduleData,\n id: data.id,\n });\n break;\n }\n case 'error':\n /*\n * Ring 2 error messages will only fire for problems inside of the worker that are separate from\n * the work function. In most cases there are other handlers for situations where 'error' may be emitted\n * such as timeouts if the expected message isn't recieved. Thus, we will output the error, but nothing else.\n */\n console.error(data.error);\n break;\n case 'describe':\n case 'evalResult':\n case 'resetStateDone':\n case 'assigned':\n // emit internally\n this.ee.emit(data.request, data);\n break;\n case 'reject':\n // emit internally\n this.ee.emit(data.request, data.error);\n break;\n default: {\n const error = new Error(`Received unhandled request from sandbox ring 2. Data: ${JSON.stringify(data, null, 2)}`);\n console.error(error);\n break;\n }\n }\n }\n\n async handleRing3Message(data) {\n switch (data.request) {\n case 'complete':\n clearTimeout(this.progressTimeout);\n clearTimeout(this.sliceTimeout);\n this.progressTimeout = this.sliceTimeout = null;\n\n if (this.progress === null) {\n if (this.options.ignoreNoProgress) {\n console.warn(\"ENOPROGRESS silenced by localExec: Progress was not called during this slice's execution, in a remote sandbox this would cause the slice to be failed\");\n } else {\n // If a progress update was never received (progress === null) then reject\n this.ee.emit('reject', new NoProgressError('Sandbox never emitted a progress event.'));\n break;\n }\n }\n this.evaluatorHandle.postMessage({ request: 'resetAndGetCPUTime' })\n this.progress = 100;\n this.completeData = data;\n // The timing report and resolve will be emitted when the CPU time is received. \n break;\n case 'progress': {\n let { progress, indeterminate, throttledReports, value } = data;\n this.progress = progress;\n const progressReport = {\n timestamp: Date.now() - this.sliceStartTime,\n progress,\n value,\n throttledReports,\n }\n this.progressReports.last = progressReport;\n if (!indeterminate) {\n this.progressReports.lastDeterministic = progressReport;\n }\n\n this.resetProgressTimeout();\n this.emit('sliceProgress', data);\n break;\n }\n case 'noProgress':\n this.ee.emit('reject', new NoProgressError(data.message));\n break;\n case 'console':\n data.payload.message = scopedKvin.marshal(data.payload.message);\n this.emit('workEmit', {\n eventName: 'console',\n payload: data.payload,\n });\n break;\n case 'emitEvent':/* ad-hoc event from the sandbox (work.emit) */\n this.emit('workEmit', {\n eventName: 'custom',\n payload: data.payload\n });\n break;\n case 'measurement':\n this.updateTime(data);\n break;\n case 'sandboxError': /* the sandbox itself has an error condition */\n this.emit('sandboxError', data.error);\n break;\n case 'workError': { /* the work function threw/rejected */\n this.emit('workEmit', {\n eventName: 'error',\n payload: data.error,\n });\n\n // Warning: rejecting here with just .data.error causes issues\n // where the reject handlers modify the object so it interferes with the\n // workEmit payload, wrapping in an Error instance copies the values\n const wrappedError = fillInError(UncaughtExceptionError, data.error);\n\n if (this.ee.listenerCount('reject') > 0) {\n this.ee.emit('reject', wrappedError);\n } else {\n // This will happen if the error is thrown during initialization\n throw wrappedError;\n }\n break;\n }\n default: {\n const errorMsg = new Error('Received unhandled request from sandbox ring 3: ' + data.request + '\\n\\t' + JSON.stringify(data));\n console.error(errorMsg)\n break; \n }\n }\n }\n\n /**\n * Handles progress and completion events from sandbox.\n * Unless explicitly returned out of this function will re-emit the event\n * on @this.ee where the name of the event is event.data.request.\n *\n * @param {object} event - event received from the sandbox\n */\n async onmessage(event) {\n debugging('event') && debug('event', event);\n if (Sandbox.debugEvents) {\n console.debug('sandbox - eventDebug:', {\n id: this.id,\n state: this.state ? this.state.valueOf() : '<undefined>',\n event: JSON.stringify(event)\n })\n }\n\n const { data } = event;\n const ringLevel = data.ringSource\n\n // Give the data to a handler depending on ring level\n if (ringLevel === -1) {\n console.error('Message sent directly from raw postMessage. Terminating worker...');\n console.debug(event);\n return this.terminate(true);\n } else {\n const handler = this.ringMessageHandlers[ringLevel];\n if (handler) {\n handler.call(this, data.value);\n } else {\n console.warn(`No handler defined for message from ring ${ringLevel}`);\n console.debug(event);\n }\n }\n }\n\n /**\n * Error handler for the internal sandbox.\n * Currently just logs the errors that the sandbox spits out.\n */\n onerror(event) {\n console.error('Sandbox emitted an error:', event);\n this.terminate(true, true);\n }\n\n /**\n * Clears the timeout and terminates the sandbox and sometimes emits a reject event.\n *\n * @param {boolean} [reject = true] - if true emit reject event\n * @param {boolean} [immediate = false] - passed to terminate, used by standaloneWorker to immediately close the connection\n */\n terminate (reject = true, immediate = false) {\n debugging() && console.debug(`Sandbox.terminate ${this.identifier}, count: ${++sbCnter}`);\n\n if (this.isTerminated) return;\n this.terminated = true;\n\n clearTimeout(this.progressTimeout);\n clearTimeout(this.sliceTimeout);\n this.progressTimeout = this.sliceTimeout = null;\n \n if (this.evaluatorHandle && typeof this.evaluatorHandle.terminate === 'function') {\n try {\n this.evaluatorHandle.terminate(immediate);\n this.evaluatorHandle = null;\n } catch (e) {\n console.error(`Error terminating sandbox ${this.id}:`, e);\n } finally {\n this.emit('terminate', this);\n }\n }\n\n if (reject) {\n this.ee.emit('reject', new Error(`Sandbox ${this.id} was terminated.`));\n }\n\n this.emit('terminated');\n }\n\n /**\n * Attempts to stop the sandbox from doing completing its current\n * set of work without terminating the working. \n * \n *** Until stop is implemented properly, use terminate(false).\n *\n * @todo Properly implement stop\n */\n stop () {\n //throw new Error('Sandbox.stop is not yet implemented.')\n this.terminate(false);\n }\n\n /**\n * ringNPostMessage can send a `measurement` request and update these\n * totals.\n */\n updateTime (measurementEvent) {\n ['total', 'CPU', 'webGL'].forEach((key) => {\n if (measurementEvent[key]) this.sliceTimeReport[key] += measurementEvent[key];\n })\n }\n\n resetSliceTimeReport () {\n this.sliceTimeReport = {\n total: 0,\n CPU: 0,\n webGL: 0,\n }\n }\n}\n\nSandbox.idCounter = 1;\nSandbox.debugWork = false;\nSandbox.debugState = false;\nSandbox.debugEvents = false;\n\nexports.Sandbox = Sandbox;\nexports.SandboxError = SandboxError;\nexports.NoProgressError = NoProgressError;\nexports.SliceTooSlowError = SliceTooSlowError;\nexports.UncaughtExceptionError = UncaughtExceptionError;\nexports.RemoteFetchError = RemoteFetchError;\n\n\n//# sourceURL=webpack://dcp/./src/dcp-client/worker/supervisor2/sandbox2.js?");
4554
+ eval("/* eslint-disable require-await */\n// NOTE - need timeout/postmessage function\n/**\n * @file dcp-client/worker/supervisor2/sandbox.js\n *\n * A sandbox that when constructed and assigned can do work for\n * a distributed slice. A sandbox runs for a single slice at a time.\n *\n * Usage (simplified...):\n * const sandbox = new Sandbox(this, { ...this.options.sandboxOptions });\n * await sandbox.start(delayStartMs);\n * await sandbox.assign(this, delayAssignMs);\n * return sandbox.work(delayWorkMs)\n * .then((result) => { \n * slice.collectResult(result, true);\n * sandbox.changeWorkingToAssigned();\n * this.supervisor.recordResult(slice)\n * })\n * .catch((error) => {\n * slice.collectResult(error, false);\n * const reason = this.supervisor.handleSandboxWorkError(sandbox, slice, error);\n * this.supervisor.returnSlice(slice, reason);\n * this.returnSandbox(sandbox);\n * });\n *\n * Debug flags:\n * Sandbox.debugWork = true // - turns off 30 second timeout to let user debug sandbox innards more easily\n * Sandbox.debugState = true // - logs all state transitions for this sandbox\n * Sandbox.debugEvents = true // - logs all events received from the sandbox\n *\n * @author Matthew Palma, mpalma@kingsds.network\n * Ryan Rossiter, ryan@kingsds.network\n * Wes Garland, wes@kingsds.network\n * Paul, paul@kingsds.network\n * @date May 2019\n * May 2019\n * Decemeber 2020\n * June 2022\n * @module sandbox\n */\n\n/* global dcpConfig */ // eslint-disable-line no-redeclare\n// @ts-check\n\n\nconst debugging = (__webpack_require__(/*! dcp/debugging */ \"./src/debugging.js\").scope)('worker');\nconst { assert, assertEq3 } = __webpack_require__(/*! dcp/common/dcp-assert */ \"./src/common/dcp-assert.js\");\nconst DCP_ENV = __webpack_require__(/*! dcp/common/dcp-env */ \"./src/common/dcp-env.js\");\nconst { Synchronizer } = __webpack_require__(/*! dcp/common/concurrency */ \"./src/common/concurrency.js\");\nconst nanoid = (__webpack_require__(/*! nanoid */ \"./node_modules/nanoid/index.browser.js\").nanoid);\nconst { EventEmitter } = __webpack_require__(/*! dcp/common/dcp-events */ \"./src/common/dcp-events/index.js\");\nconst { truncateAddress } = __webpack_require__(/*! dcp/utils */ \"./src/utils/index.js\");\nconst { DCPError } = __webpack_require__(/*! dcp/common/dcp-error */ \"./src/common/dcp-error.js\");\nconst kvin = __webpack_require__(/*! kvin */ \"./node_modules/kvin/kvin.js\");\nconst scopedKvin = new kvin.KVIN({Object: ({}).constructor,\n Array: ([]).constructor, \n Function: (()=>{}).constructor});\n\nlet timeDilation = 1;\nif (DCP_ENV.platform === 'nodejs') {\n const { requireNative } = __webpack_require__(/*! dcp/dcp-client/webpack-native-bridge */ \"./src/dcp-client/webpack-native-bridge.js\");\n /** Make timers 10x slower when running in niim */\n timeDilation = (requireNative('module')._cache.niim instanceof requireNative('module').Module) ? 10 : 1;\n}\nconst addressTruncationLength = 20;\nconst workerTuning = dcpConfig.worker;\ntimeDilation = 1;\nlet sbCnter = 0; // Global counter of terminated sandboxes guarded by debugging().\n\n/**\n * Wraps console.debug to emulate debug module prefixing messages on npm.\n * @param {...any} args\n */\nconst debug = (...args) => {\n if (debugging()) {\n console.debug('Sandbox:', ...args);\n }\n};\n\n// Sandbox states\nconst UNREADY = 'UNREADY' // No Sandbox (web worker, saworker, etc) has been constructed yet\nconst READYING = 'READYING' // Sandbox is being constructed and environment (bravojs, env) is being set up\nconst READY_FOR_ASSIGN = 'READY_FOR_ASSIGN' // Sandbox is ready to be assigned\nconst ASSIGNED = 'ASSIGNED' // Sandbox is assigned but not working\nconst ASSIGNING = 'ASSIGNING' // Sandbox is in the process of being ASSIGNED\nconst WORKING = 'WORKING' // Sandbox is working\nconst BROKEN = 'BROKEN' // Sandbox is broken and should be terminated.\nconst EVAL_RESULT_PREFIX = 'evalResult::';\n\nclass SandboxError extends Error { constructor(errorCode, ...args) { super(...args); this.errorCode = errorCode; }}\nclass NoProgressError extends SandboxError { constructor(...args) { super('ENOPROGRESS', ...args); } }\nclass SliceTooSlowError extends SandboxError { constructor(...args) { super('ESLICETOOSLOW', ...args); } }\nclass UncaughtExceptionError extends SandboxError { constructor(...args) { super('EUNCAUGHT', ...args); } }\nclass RemoteFetchError extends SandboxError { constructor(...args) { super('EFETCH', ...args); } }\n\nfunction fillInError(errorCtor, errorIn) {\n const errorOut = new errorCtor(errorIn.message);\n errorOut.name = errorIn.name;\n errorOut.fileName = errorIn.fileName;\n errorOut.lineNumber = errorIn.lineNumber;\n errorOut.stack = errorIn.stack;\n return errorOut;\n}\n\n/** @typedef {import('dcp/common/dcp-events').EventEmitter} EventEmitter */\n/** @typedef {import('./index').Supervisor} Supervisor */\n/** @typedef {import('./slice2').Slice} Slice */\n/** @typedef {import('./job-manager').JobManager} JobManager */\n/** @typedef {import('./module-cache').ModuleCache} ModuleCache */\n\n/**\n * @access public\n * @typedef {object} SandboxOptions\n * @constructor {function} [SandboxConstructor]\n * @property {boolean} [ignoreNoProgress] - When true, the sandbox will not be stopped for not calling progress\n */\n\nclass Sandbox extends EventEmitter {\n /**\n * A Sandbox (i.e. a worker sandbox) which executes distributed slices.\n *\n * @constructor\n * @param {Supervisor} supervisor\n * @param {SandboxOptions} options\n */\n constructor (supervisor, options) {\n super('Sandbox');\n /** @type {Supervisor} */\n this.supervisor = supervisor; /** XXXpfr @todo Temporary for sandbox1 compat. Remove later. Directly pass in moduleCache. */\n /** @type {ModuleCache} */\n this.moduleCache = supervisor.moduleCache;\n /** @type {SandboxOptions} */\n this.options = {\n ignoreNoProgress: false,\n ...options,\n SandboxConstructor: options.SandboxConstructor || (__webpack_require__(/*! ../evaluators */ \"./src/dcp-client/worker/evaluators/index.js\").BrowserEvaluator),\n }\n /** @type {Synchronizer} */\n this.state = new Synchronizer(UNREADY, [ UNREADY, READYING, READY_FOR_ASSIGN, ASSIGNING, ASSIGNED, WORKING, BROKEN ]);\n\n /** @type {string} */\n this.jobAddress = null;\n /** @type {object} */\n this.evaluatorHandle = null;\n /** @type {object} */\n this.capabilities = null;\n /** @type {EventEmitter} */\n this.ee = new EventEmitter('SandboxInternal');\n\n /** @type {boolean} */\n this.terminated = false;\n /** @type {number?} */\n this.progress = 100;\n /** @type {object} */\n this.progressReports = null;\n /** @type {object} */\n this.progressTimeout = null;\n /** @type {object} */\n this.sliceTimeout = null;\n /** @type {object} */\n this.rejectionData = null;\n /** @type {Slice} */\n this.slice = null;\n /** @type {number?} */\n this.sliceStartTime = null;\n /** @type {number} */\n this.id = Sandbox.getNewId();\n\n this.ringMessageHandlers = [\n this.handleRing0Message,\n this.handleRing1Message,\n this.handleRing2Message,\n this.handleRing3Message,\n ];\n\n this.resetSliceTimeReport();\n }\n\n /** @type {string} - debug string that characterizes sandbox. */\n get identifier() {\n if (!this.jobAddress) return `${this.id}.${this.state}`;\n return `${this.id}.${truncateAddress(this.jobAddress, addressTruncationLength)}.${this.state}`;\n }\n\n static getNewId() {\n return Sandbox.idCounter++;\n }\n\n /** @type {boolean} */\n get isReadyForAssign () {\n return this.state.is(READY_FOR_ASSIGN);\n }\n /** @type {boolean} */\n get isAssigned () {\n return this.state.is(ASSIGNED);\n }\n /** @type {boolean} */\n get isWorking () {\n return this.state.is(WORKING);\n }\n /** @type {boolean} */\n get isBroken () {\n return this.state.is(BROKEN);\n }\n /** @type {boolean} */\n get isTerminated () {\n return this.terminated;\n }\n\n /**\n * Mark WORKING sandbox as ASSIGNED in preparation for possible reuse,\n */\n changeWorkingToAssigned () {\n this.state.testAndSet(WORKING, ASSIGNED);\n }\n \n /**\n * Remove from collection of ASSIGNED sandboxes in preparation for termination,\n */\n markAsUnready() {\n this.state.testAndSet(ASSIGNED, UNREADY);\n }\n \n /**\n * Transitions: ASSIGNED --> WORKING and assigns the slice.\n * @param {Slice} slice \n */\n markAsWorking (slice) {\n if (!this.isAssigned)\n throw new Error(`Sandbox ${this.identifier} is not ready to work`);\n this.state.set(ASSIGNED, WORKING);\n this.slice = slice;\n }\n \n /**\n * Fancy assert.\n */\n verifyWorking () {\n if (!this.isWorking) {\n throw new Error(`Sandbox ${this.identifier} is not working`);\n }\n }\n\n /**\n * Error feedback to user.\n * @param {string} message\n * @param {*} extra\n */\n error(message, extra)\n {\n const dcpError = new DCPError(message, extra);\n this.emit('error', dcpError);\n }\n\n /**\n * Readies the sandbox. This will result in the sandbox being ready and not assigned,\n * it will need to be assigned with a job before it is able to do work.\n *\n * @todo maybe preload specific modules or let the cache pass in what modules to load?\n * @param {number} [delay=0]\n * @returns {Promise<Sandbox>}\n * @throws on failure to ready\n */\n async start(delay = 0) {\n this.state.set(UNREADY, READYING);\n\n if (delay > 0) await new Promise((resolve) => setTimeout(resolve, delay * timeDilation));\n\n try {\n // RING 0\n this.evaluatorHandle = new this.options.SandboxConstructor({\n name: `DCP Sandbox #${this.id}`,\n });\n this.evaluatorHandle.onerror = this.onerror.bind(this);\n\n const messageHandler = this.onmessage.bind(this);\n this.evaluatorHandle.onmessage = function onmessage(event)\n {\n let data;\n if (event.data.serialized)\n data = kvin.parse(event.data.message);\n else\n data = kvin.unmarshal(event.data);\n messageHandler({ data });\n }\n\n const evaluatorPostMessage = this.evaluatorHandle.postMessage.bind(this.evaluatorHandle);\n this.evaluatorHandle.postMessage = function postMessage(message)\n {\n evaluatorPostMessage(scopedKvin.marshal(message));\n }\n\n const that = this;\n this.evaluatorHandle.addEventListener('end', () => that.terminate(true));\n\n // Now in RING 1\n\n // Now in RING 2\n await this.describe();\n this.state.set(READYING, READY_FOR_ASSIGN);\n this.emit('ready', this);\n } catch (error) {\n this.error('Failed to start the sandbox.', error);\n this.state.set(READYING, BROKEN);\n this.terminate(false);\n throw error;\n }\n \n return this;\n }\n\n /**\n * This will assign the sandbox with a job, loading its sandbox code\n * into the sandbox.\n *\n * @param {JobManager} jobManager - The job manager that will be the owner of this sandbox.\n * @param {number} [delay=0]\n * @returns {Promise<Sandbox>}\n * @throws on initialization failure\n */\n async assign (jobManager, delay = 0) {\n \n if (delay > 0) await new Promise((resolve) => setTimeout(resolve, delay * timeDilation));\n \n this.state.set(READY_FOR_ASSIGN, ASSIGNING);\n this.jobAddress = jobManager.address;\n this.job = jobManager.jobMessage;\n \n assertEq3(this.job.address, this.jobAddress);\n assert(typeof this.job === 'object');\n assert(typeof this.job.requirements === 'object');\n assert(Array.isArray(this.job.dependencies));\n assert(Array.isArray(this.job.requirePath));\n\n // Extract public data from job, with defaults\n this.public = Object.assign({\n name: `Anonymous Job ${truncateAddress(this.jobAddress, addressTruncationLength)}`,\n description: 'Discreetly helping make the world smarter.',\n link: 'https://distributed.computer/about',\n }, this.job.public);\n\n // Future: We may want other filename tags for appliances // RR Nov 2019\n\n // Important: The order of applying requirements before loading the sandbox code\n // is important for modules and sandbox code to set globals over the whitelist.\n await this.applySandboxRequirements(this.job.requirements);\n await this.assignEvaluator();\n \n return this;\n }\n \n async assignEvaluator() {\n debug('Begin assigning job to evaluator');\n const that = this;\n\n return new Promise(function sandbox$$assignEvaluatorPromise(resolve, reject) {\n const message = {\n request: 'assign',\n job: that.job,\n sandboxConfig: workerTuning.sandbox,\n };\n\n /* note - onFailListener used for removal. This is necessary due to a bug in ee.once. /wg Feb 2022 */\n \n const onSuccess = (event) => {\n // eslint-disable-next-line no-use-before-define\n that.ee.removeListener('reject', onFailListener);\n that.emit('assigned', event.jobAddress); /** XXXpfr @todo Who is listening to event 'assigned' ? */\n debug('Job assigned to evaluator');\n resolve();\n };\n\n const onFail = (error) => {\n // eslint-disable-next-line no-use-before-define\n that.ee.removeListener('assigned', onSuccessListener);\n reject(error);\n };\n\n const onSuccessListener = that.ee.once('assigned', onSuccess); // Emitted by handleRing2Message.\n const onFailListener = that.ee.once('reject', onFail);\n that.evaluatorHandle.postMessage(message);\n }).then((event) => {\n that.state.set(ASSIGNING, ASSIGNED);\n return event;\n }).catch(error => {\n that.state.set(ASSIGNING, BROKEN);\n debug('Failure in assigning job to evaluator', error);\n throw error;\n });\n }\n\n /**\n * Evaluates a string inside the sandbox.\n *\n * @param {string} code - the code to evaluate in the sandbox\n * @param {string} filename - the name of the 'file' to help with debugging,\n * no longer working though?\n * @returns {Promise} - resolves with eval result on success, rejects\n * otherwise\n */\n eval(code, filename) {\n const that = this;\n \n return new Promise(function sandbox$$eval$Promise(resolve, reject) {\n let msgId = nanoid();\n let msg = {\n request: 'eval',\n data: code,\n filename,\n msgId, \n };\n\n const eventId = EVAL_RESULT_PREFIX + msgId;\n\n const onSuccess = (event) => {\n // eslint-disable-next-line no-use-before-define\n that.ee.removeListener('reject', onFailListener);\n resolve(event);\n }\n\n const onFail = (error) => {\n // eslint-disable-next-line no-use-before-define\n that.ee.removeListener(eventId, onSuccessListener);\n reject(error);\n }\n\n const onSuccessListener = that.ee.once(eventId, onSuccess);\n const onFailListener = that.ee.once('reject', onFail);\n\n that.evaluatorHandle.postMessage(msg);\n })\n }\n\n /**\n * Resets the state of the bootstrap, without resetting the sandbox function if assigned.\n * Mostly used to reset the progress status before reusing a sandbox on another slice.\n * Must be called after @start.\n *\n * @returns {Promise} - resolves with result on success, rejects otherwise\n */\n resetSandboxState () {\n const that = this;\n\n return new Promise(function sandbox$resetSandboxStatePromise(resolve, reject) {\n let successCb, failTimeout;\n let msg = {\n request: 'resetState',\n };\n\n successCb = that.ee.once('resetStateDone', function sandbox$resetSandboxState$success () {\n if (failTimeout === false)\n return; /* already rejected */\n clearTimeout(failTimeout);\n failTimeout = false;\n resolve();\n });\n\n failTimeout = setTimeout(function sandbox$resetSandboxState$fail() {\n if (failTimeout === false)\n return; /* already resolved */\n \n that.ee.removeListener('resetStateDone', successCb);\n that.terminate(false);\n failTimeout = false;\n\n reject(new Error('resetState never received resetStateDone event from sandbox'));\n }, 3000 * timeDilation); /* XXXwg need tuneable */\n\n assert(that.evaluatorHandle); // It is possible that that.terminate nulls out evaluatorHandle before getting here.\n that.evaluatorHandle.postMessage(msg);\n });\n }\n\n /**\n * Clear all timers that are set inside the sandbox (evaluator) environment.\n *\n * @returns {Promise} - resolves with result on success, rejects otherwise\n */\n clearSandboxTimers() {\n const that = this;\n \n return new Promise(function sandbox$clearSandboxTimersPromise(resolve, reject) {\n let successCb, failTimeout;\n let msg = {\n request: 'clearTimers',\n };\n\n successCb = that.ee.once('clearTimersDone', function sandbox$clearSandboxTimers$success() {\n if (failTimeout === false)\n return; /* already rejected */\n clearTimeout(failTimeout);\n failTimeout = false;\n resolve();\n });\n\n failTimeout = setTimeout(function sanbox$clearSandboxTimers$fail() {\n if (failTimeout === false)\n return; /* already resolved */\n \n that.ee.removeListener('clearTimersDone', successCb);\n that.terminate(false);\n failTimeout = false;\n \n reject(new Error('clearTimers never received clearTimersDone event from sandbox'));\n }, 3000 * timeDilation); /* XXXwg need tuneable */\n\n if (that.evaluatorHandle) // Sometimes that.terminate nulls out evaluatorHandle before getting here.\n that.evaluatorHandle.postMessage(msg);\n });\n }\n\n /**\n * Sends a post message to describe its capabilities.\n *\n * Side effect: Sets the capabilities property of the current sandbox.\n *\n * @returns {Promise} Resolves with the sandbox's capabilities. Rejects with\n * an error saying a response was not received.\n * @memberof Sandbox\n */\n describe() {\n debug('Beginning to describe evaluator');\n const that = this;\n \n return new Promise(function sandbox$describePromise(resolve, reject) {\n if (that.evaluatorHandle === null) {\n return reject(new Error('Evaluator has not been initialized.'));\n }\n\n /**\n * Opted to create a flag for the describe response being received so that\n * we don't have to *hoist* the timeout's id to clear it in the response\n * handler.\n */\n let didReceiveDescribeResponse = false;\n const describeResponseHandler = that.ee.once('describe', (data) => { // Emitted by handleRing2Message.\n didReceiveDescribeResponse = true;\n const { capabilities } = data;\n if (typeof capabilities === 'undefined') {\n reject(new Error('Did not receive capabilities from describe response.'));\n }\n that.capabilities = capabilities;\n\n // Currently only used in tests. May use the event in the future.\n that.emit('described', capabilities); /** XXXpfr @todo Who is listening to event 'described' ? */\n debug('Evaluator has been described');\n resolve(capabilities);\n });\n const describeResponseFailedHandler = () => {\n if (!didReceiveDescribeResponse) {\n that.ee.removeListener('describe', describeResponseHandler);\n that.terminate(false);\n reject(new Error( 'Describe message timed-out. No describe response was received from the describe command.'));\n }\n };\n\n const message = {\n request: 'describe',\n };\n\n // Arbitrarily set the waiting time.\n setTimeout(describeResponseFailedHandler, 6000 * timeDilation); /* XXXwg need tuneable */\n assert(that.evaluatorHandle); // It is possible that that.terminate nulls out evaluatorHandle before getting here.\n that.evaluatorHandle.postMessage(message);\n });\n }\n\n /**\n * Passes the job's requirements object into the sandbox so that the global\n * access lists can be updated accordingly.\n *\n * e.g. disallow access to OffscreenCanvas without\n * environment.offscreenCanvas=true present.\n *\n * Must be called after @start.\n *\n * @returns {Promise} - resolves with result on success, rejects otherwise\n */\n applySandboxRequirements(requirements) {\n const that = this;\n \n return new Promise(function sandbox$applySandboxRequirementsPromise(resolve, reject) {\n const message = {\n requirements,\n request: 'applyRequirements',\n };\n let wereRequirementsApplied = false;\n\n const successCb = that.ee.once(\n 'applyRequirementsDone',\n function sandbox$applyRequirements$success() {\n wereRequirementsApplied = true;\n resolve();\n },\n );\n\n assert(typeof message.requirements === 'object');\n that.evaluatorHandle.postMessage(message);\n\n setTimeout(function sandbox$finishApplySandboxRequirements() {\n if (!wereRequirementsApplied) {\n that.ee.removeListener('applyRequirementsDone', successCb);\n that.terminate(false);\n reject(new Error('applyRequirements never received applyRequirementsDone response from sandbox'));\n }\n }, 3000 * timeDilation); /* XXXwg needs tunable */\n });\n }\n\n /**\n * Executes a slice received from the supervisor.\n * Must be called after this.start, this.assign and this.markAsWorking .\n *\n * @param {number} [delay = 0] the delay that this method should wait before beginning work, used to avoid starting all sandboxes at once\n *\n * @returns {Promise} - resolves with result on success, rejects otherwise\n */\n\n async work (delay = 0) {\n const that = this;\n\n if (!this.slice)\n throw new Error('The slice in Sandbox.work has not been set; be sure to call markAsWorking before.');\n\n // cf. DCP-1720\n this.resetSliceTimeReport();\n \n // Now wait for the delay if provided, prevents many sandboxes starting at once from crashing the supervisor\n if (delay > 0) await new Promise(resolve => setTimeout(resolve, (delay + 1) * timeDilation));\n if (!this.isWorking) return; // sandbox.terminate could have been called during the delay timeout\n\n // Prepare the sandbox to begin work\n // will be replaced by `assign` message that should be called before emitting a `work` message\n if (this.jobAddress !== this.slice.jobAddress) {\n throw new Error(`Sandbox.run: Sandbox is already assigned and jobAddress doesn't match previous (${this.jobAddress} !== ${this.slice.jobAddress})`);\n }\n\n let sliceHnd = { job: this.public, sandbox: this };\n await this.resetSandboxState();\n if (!this.slice) {\n this.error(`Slice for job ${this.jobAddress} vanished during work initialization - aborting`);\n return;\n }\n\n const { datum: inputDatum, error: dataError } = this.slice;\n\n this.resetProgressTimeout();\n this.resetSliceTimeout();\n\n return new Promise(function sandbox$$workPromise(resolve, reject) {\n let onSuccess, onFail\n\n onSuccess = that.ee.once('resolve', function sandbox$$work$success (event) {\n that.ee.removeListener('reject', onFail)\n resolve(event)\n }.bind(that));\n\n onFail = that.ee.once('reject', function sandbox$$work$fail (err) {\n that.ee.removeListener('resolve', onSuccess)\n reject(err)\n }.bind(that))\n\n that.sliceStartTime = Date.now();\n that.progress = null;\n that.progressReports = {\n last: undefined,\n lastDeterministic: undefined,\n };\n\n that.resetProgressTimeout();\n that.resetSliceTimeout();\n that.emit('start', sliceHnd);\n \n if (dataError) {\n that.ee.removeListener('resolve', onSuccess);\n that.ee.removeListener('reject', onFail);\n that.emit('workEmit', {\n eventName: 'error',\n payload: {\n message: dataError.message,\n stack: dataError.stack,\n name: that.public.name\n }\n });\n setTimeout(() => reject(dataError), 0)\n\n } else {\n that.evaluatorHandle.postMessage({\n request: 'main',\n data: inputDatum,\n })\n }\n })\n .then(async function sandbox$$work$then(event) {\n // prevent any hanging timers from being fired\n await that.clearSandboxTimers();\n\n /** @todo Should sliceHnd just be replaced with that.public? */\n /** @todo Decide which event is right. */\n //that.emit('slice', sliceHnd); // Unused -- XXXpfr\n that.emit('sliceFinish', event);\n that.emit('complete', that.jobAddress);\n\n that.state.set(WORKING, ASSIGNED);\n that.slice = false;\n\n return event;\n })\n .catch((err) => { \n if (err.name === 'EWORKREJECT') {\n that.rejectionData = err;\n that.evaluatorHandle.postMessage({ request: 'resetAndGetCPUTime' })\n } else { // sandbox termination for rejected work happens in Supervisor.handleRejectedWork\n // Ceci is the reject callback for when the slice throws an error\n that.terminate(false);\n }\n\n that.emit('error', err, 'slice');\n\n if (err instanceof NoProgressError) {\n that.emit('workEmit', {\n eventName: 'noProgress',\n payload: {\n timestamp: Date.now() - that.sliceStartTime,\n data: that.slice.datumUri,\n progressReports: that.progressReports,\n }\n });\n }\n throw err;\n })\n .finally(function sandbox$$work$finally() {\n that.emit('end', sliceHnd); /** XXXpfr @todo Who is listening to event 'end' ? */\n });\n }\n\n resetProgressTimeout() {\n if (this.progressTimeout) {\n clearTimeout(this.progressTimeout);\n this.progressTimeout = null;\n }\n\n this.progressTimeout = setTimeout(() => {\n if (this.options.ignoreNoProgress)\n return this.emit('warning', \"ENOPROGRESS silenced by localExec: In a remote worker, this slice would be stopped for not calling progress frequently enough.\");\n\n this.ee.emit('reject', new NoProgressError(`No progress event was received in the last ${workerTuning.sandbox.progressTimeout / 1000} seconds.`));\n }, +workerTuning.sandbox.progressTimeout * timeDilation);\n }\n\n resetSliceTimeout() {\n if (this.sliceTimeout) clearTimeout(this.sliceTimeout);\n\n this.sliceTimeout = setTimeout(() => {\n if (Sandbox.debugWork) return this.emit('warning', 'Sandbox.debugWork: Ignoring slice timeout');\n\n this.ee.emit('reject', new SliceTooSlowError(`Slice took longer than ${workerTuning.sandbox.sliceTimeout / 1000} seconds.`));\n }, +workerTuning.sandbox.sliceTimeout * timeDilation);\n }\n \n async handleRing0Message(data) {\n debugging('event:ring-0') && debug('event:ring-0', data);\n //handling a true ring 0 message\n switch (data.request) {\n case 'sandboxLoaded':\n // emit externally\n this.emit('sandboxLoaded', this)\n break;\n\n case 'scriptLoaded':\n // emit externally\n this.emit('scriptLoaded', data);\n if(data.result !== \"success\") {\n this.onerror(data);\n }\n break;\n \n case 'clearTimersDone':\n this.ee.emit(data.request, data);\n break;\n case 'totalCPUTime':\n this.updateTime(data);\n if (this.ee.listenerCount('resolve') > 0) {\n this.completeData.timeReport = this.sliceTimeReport;\n this.ee.emit('resolve', this.completeData);\n delete this.completeData;\n } else {\n this.rejectionData.timeReport = this.sliceTimeReport\n this.emit('rejectedWorkMetrics', this.rejectionData) // If there is no internal listener for 'resolve', the slice was rejected and\n delete this.rejectionData; // we need to send the rejected metrics to the supervisor\n } \n break;\n case 'error': {\n // Warning: rejecting here with just event.data.error causes issues\n // where the reject handlers modify the object so it interferes with the\n // workEmit event payload, wrapping in an Error instance copies the values\n const wrappedError = fillInError(UncaughtExceptionError, data.error);\n\n if (this.ee.listenerCount('reject') > 0) {\n this.ee.emit('reject', wrappedError);\n } else {\n // This will happen if the error is thrown during initialization\n throw wrappedError;\n }\n break;\n }\n default: {\n this.error('Received unhandled request from sandbox: ' + data.request + '\\n\\t' + JSON.stringify(data));\n break;\n } \n }\n }\n\n async handleRing1Message(data) {\n switch (data.request) {\n case 'applyRequirementsDone':\n // emit internally\n this.ee.emit(data.request, data)\n break;\n default: {\n this.error('Received unhandled request from sandbox ring 1: ' + data.request + '\\n\\t' + JSON.stringify(data));\n break; \n }\n }\n }\n\n async handleRing2Message(data) {\n debugging('event:ring-2') && debug('event:ring-2', data);\n switch (data.request) {\n case 'dependency': {\n let moduleData;\n try {\n moduleData = await this.moduleCache.fetchModule(data.data, this.jobAddress);\n } catch (error) {\n /*\n * In the event of an error here, we want to let the client know there was a problem in\n * loading their module. However, there hasn't yet been an actual slice assigned to the sandbox.\n * Therefore, we assign 'slice 0' to the sandbox, a slice that will never exist, and is used\n * purely for this purpose. \n */\n this.slice = {\n jobAddress: this.jobAddress,\n sliceNumber: 0,\n };\n\n const payload = {\n name: error.name,\n timestamp: error.timestamp,\n message: error.message,\n };\n\n const wrappedError = fillInError(RemoteFetchError, error);\n\n this.emit('workEmit', {\n eventName: 'error',\n payload,\n });\n this.ee.emit('reject', wrappedError);\n break;\n }\n this.evaluatorHandle.postMessage({\n request: 'moduleGroup',\n data: moduleData,\n id: data.id,\n });\n break;\n }\n case 'error':\n /*\n * Ring 2 error messages will only fire for problems inside of the worker that are separate from\n * the work function. In most cases there are other handlers for situations where 'error' may be emitted\n * such as timeouts if the expected message isn't recieved. Thus, we will output the error, but nothing else.\n */\n this.error('event:ring-2: some error happened.', data.error);\n break;\n case 'describe':\n case 'evalResult':\n case 'resetStateDone':\n case 'assigned':\n // emit internally\n this.ee.emit(data.request, data);\n break;\n case 'reject':\n // emit internally\n this.ee.emit(data.request, data.error);\n break;\n default: {\n this.error(`Received unhandled request from sandbox ring 2. Data: ${JSON.stringify(data, null, 2)}`);\n break;\n }\n }\n }\n\n async handleRing3Message(data) {\n switch (data.request) {\n case 'complete':\n clearTimeout(this.progressTimeout);\n clearTimeout(this.sliceTimeout);\n this.progressTimeout = this.sliceTimeout = null;\n\n if (this.progress === null) {\n if (this.options.ignoreNoProgress) {\n this.emit('warning', \"ENOPROGRESS silenced by localExec: Progress was not called during this slice's execution, in a remote sandbox this would cause the slice to be failed\");\n } else {\n // If a progress update was never received (progress === null) then reject\n this.ee.emit('reject', new NoProgressError('Sandbox never emitted a progress event.'));\n break;\n }\n }\n this.evaluatorHandle.postMessage({ request: 'resetAndGetCPUTime' })\n this.progress = 100;\n this.completeData = data;\n // The timing report and resolve will be emitted when the CPU time is received. \n break;\n case 'progress': {\n let { progress, indeterminate, throttledReports, value } = data;\n this.progress = progress;\n const progressReport = {\n timestamp: Date.now() - this.sliceStartTime,\n progress,\n value,\n throttledReports,\n }\n this.progressReports.last = progressReport;\n if (!indeterminate) {\n this.progressReports.lastDeterministic = progressReport;\n }\n\n this.resetProgressTimeout();\n this.emit('sliceProgress', data);\n break;\n }\n case 'noProgress':\n this.ee.emit('reject', new NoProgressError(data.message));\n break;\n case 'console':\n data.payload.message = scopedKvin.marshal(data.payload.message);\n this.emit('workEmit', {\n eventName: 'console',\n payload: data.payload,\n });\n break;\n case 'emitEvent':/* ad-hoc event from the sandbox (work.emit) */\n this.emit('workEmit', {\n eventName: 'custom',\n payload: data.payload\n });\n break;\n case 'measurement':\n this.updateTime(data);\n break;\n case 'sandboxError': /* the sandbox itself has an error condition */\n this.emit('sandboxError', data.error);\n break;\n case 'workError': { /* the work function threw/rejected */\n this.emit('workEmit', {\n eventName: 'error',\n payload: data.error,\n });\n\n // Warning: rejecting here with just .data.error causes issues\n // where the reject handlers modify the object so it interferes with the\n // workEmit payload, wrapping in an Error instance copies the values\n const wrappedError = fillInError(UncaughtExceptionError, data.error);\n\n if (this.ee.listenerCount('reject') > 0) {\n this.ee.emit('reject', wrappedError);\n } else {\n // This will happen if the error is thrown during initialization\n throw wrappedError;\n }\n break;\n }\n default: {\n this.error('Received unhandled request from sandbox ring 3: ' + data.request + '\\n\\t' + JSON.stringify(data));\n break; \n }\n }\n }\n\n /**\n * Handles progress and completion events from sandbox.\n * Unless explicitly returned out of this function will re-emit the event\n * on @this.ee where the name of the event is event.data.request.\n *\n * @param {object} event - event received from the sandbox\n */\n async onmessage(event) {\n debugging('event') && debug('event', event);\n if (Sandbox.debugEvents) {\n console.debug('sandbox - eventDebug:', {\n id: this.id,\n state: this.state.valueOf(),\n event: JSON.stringify(event)\n })\n }\n\n const { data } = event;\n const ringLevel = data.ringSource\n\n // Give the data to a handler depending on ring level\n if (ringLevel === -1) {\n this.error('Message sent directly from raw postMessage. Terminating worker...');\n console.debug(event);\n return this.terminate(true);\n } else {\n const handler = this.ringMessageHandlers[ringLevel];\n if (handler) {\n handler.call(this, data.value);\n } else {\n this.emit('warning', `No handler defined for message from ring ${ringLevel}`);\n console.debug(event);\n }\n }\n }\n\n /**\n * Error handler for the internal sandbox.\n * Emits error event that gets handled up in the Worker class.\n */\n onerror(event) {\n if (event instanceof Error) this.error('Sandbox emitted an error:', event);\n else this.error(`Sandbox emitted an error: ${event}`);\n this.terminate(true, true);\n }\n\n /**\n * Clears the timeout and terminates the sandbox and sometimes emits a reject event.\n *\n * @param {boolean} [reject = true] - if true emit reject event\n * @param {boolean} [immediate = false] - passed to terminate, used by standaloneWorker to immediately close the connection\n */\n terminate (reject = true, immediate = false) {\n debugging() && console.debug(`Sandbox.terminate ${this.identifier}, count: ${++sbCnter}`);\n\n if (this.isTerminated) return;\n this.terminated = true;\n\n clearTimeout(this.progressTimeout);\n clearTimeout(this.sliceTimeout);\n this.progressTimeout = this.sliceTimeout = null;\n \n if (this.evaluatorHandle && typeof this.evaluatorHandle.terminate === 'function') {\n try {\n this.evaluatorHandle.terminate(immediate);\n this.evaluatorHandle = null;\n } catch (e) {\n this.error(`Error terminating sandbox ${this.id}:`, e);\n } finally {\n this.emit('terminate', this);\n }\n }\n\n if (reject) {\n this.ee.emit('reject', new Error(`Sandbox ${this.id} was terminated.`));\n }\n\n this.emit('terminated');\n }\n\n /**\n * Attempts to stop the sandbox from doing completing its current\n * set of work without terminating the working. \n * \n *** Until stop is implemented properly, use terminate(false).\n *\n * @todo Properly implement stop\n */\n stop () {\n //throw new Error('Sandbox.stop is not yet implemented.')\n this.terminate(false);\n }\n\n /**\n * ringNPostMessage can send a `measurement` request and update these\n * totals.\n */\n updateTime (measurementEvent) {\n ['total', 'CPU', 'webGL'].forEach((key) => {\n if (measurementEvent[key]) this.sliceTimeReport[key] += measurementEvent[key];\n })\n }\n\n resetSliceTimeReport () {\n this.sliceTimeReport = {\n total: 0,\n CPU: 0,\n webGL: 0,\n }\n }\n}\n\nSandbox.idCounter = 1;\nSandbox.debugWork = false;\nSandbox.debugState = false;\nSandbox.debugEvents = false;\n\nexports.Sandbox = Sandbox;\nexports.SandboxError = SandboxError;\nexports.NoProgressError = NoProgressError;\nexports.SliceTooSlowError = SliceTooSlowError;\nexports.UncaughtExceptionError = UncaughtExceptionError;\nexports.RemoteFetchError = RemoteFetchError;\n\n\n//# sourceURL=webpack://dcp/./src/dcp-client/worker/supervisor2/sandbox2.js?");
4555
4555
 
4556
4556
  /***/ }),
4557
4557
 
@@ -4562,7 +4562,7 @@ eval("/* eslint-disable require-await */\n// NOTE - need timeout/postmessage fun
4562
4562
  /***/ ((__unused_webpack_module, exports, __webpack_require__) => {
4563
4563
 
4564
4564
  "use strict";
4565
- eval("/**\n * @file dcp-client/worker/supervisor2/slice2.js\n *\n * A wrapper for the slice object returned from the scheduler's task distributor.\n *\n * @author Matthew Palma, mpalma@kingsds.network\n * Ryan Rossiter, ryan@kingsds.network\n * Wes Garland, wes@kingsds.network\n * Paul, paul@kingsds.network\n * @date May 2019\n * May 2019\n * Decemeber 2020\n * June 2022\n * @module slice\n */\n\n// @ts-check\n\n\nconst { Synchronizer } = __webpack_require__(/*! dcp/common/concurrency */ \"./src/common/concurrency.js\");\nconst { assert } = __webpack_require__(/*! dcp/common/dcp-assert */ \"./src/common/dcp-assert.js\");\nconst debugging = (__webpack_require__(/*! dcp/debugging */ \"./src/debugging.js\").scope)('worker');\nconst { truncateAddress } = __webpack_require__(/*! dcp/utils */ \"./src/utils/index.js\");\n\nconst addressTruncationLength = 20;\n\nconst INITIAL = 'INITIAL';\nconst READY = 'READY';\nconst RESERVED = 'RESERVED';\nconst WORKING = 'WORKING';\nconst COMPLETE = 'COMPLETE';\nconst FAILED = 'FAILED';\nconst BROKEN = 'BROKEN';\nconst FINISHED = 'FINISHED';\n\n/** @typedef {import('./job-manager').JobManager} JobManager */\n/** @typedef {import('dcp/utils').SliceMessage} SliceMessage */\n/** @typedef {string} opaqueId */ // 22 character base64 string \n\n/** \n * Object use to represent a given slice inside the Supervisor. This object's shape \n * current inherits heavily from the message payload originating from the scheduler,\n * but should not /wg dec 2020.\n *\n * Caveat lector: this documentation was created without a 100% understanding of the\n * code. Please improve as possible.\n * \n * The read-only properties of this object are as follows:\n * - state INITIAL | READY | RESERVED | WORKING | COMPLETE | FAILED | BROKEN\n * - sliceNumber the number of the slice within this job\n * - jobAddress the address of the job that this slice belongs to\n * - isEstimation true when slice is used in estimation\n * - isLong true, when slice is estimated to take more than 300 seconds to execute\n * - sandbox the sandbox the slice will-be/is/has-been running on.\n * \n * - datum input set element for this slice this slice of the job; could be a data: URI or it could\n * be a URL we need to fetch; note that fetches are limited to worker's allow list\n * - result\n * - result.request 'complete',...\n * - result.result return value from work function\n * - result.timeReport { total, idle } ms \n * - error error info when slice FAILED\n * - jobManager wrapper for the job that owns this slice\n * - resultStorageType 'values' => we are storing individual values,\n * which could be data: URIs or URLs\n * at the scheduler\n * 'pattern' => user-specified pattern for result URLs.\n * Data will be uploaded via POST to the\n * URL matching the pattern, provided the\n * worker is allowed to access it.\n * - resultStorageParams user-supplied POST parameters sent to the result \n * storage server when using resultStorageType = pattern.\n * - resultStorageDetails the pattern when using resultStorageType = pattern.\n * - authorizationMessage authorization from task distributor, sent to result submitter, etc...\n * \n * - finished true, when in state COMPLETED, FAILED, BROKEN\n * - failed true, when in state FAILED, BROKEN\n * - completed true, when in state COMPLETED\n * - isUnassigned true, when in state INITIAL\n * - isReady true, when in state READY\n * - isWorking true, when in state WORKING\n * - identifier string 'sliceNumber.jobAddress.state'\n * - timeReport accessor for this.result.timeReport that updates from this.rejectedTimeReport when appropriate\n *\n * The r/w properties of this object are as follows:\n * - etaMs estimate of slice completion time span\n * - rejected slice has been rejected\n * - rejectedTimeReport rejected timeReport\n * \n * NOTE: If you ever use a property with a leading underscore you are probably making a mistake.\n * But if you must, please ask paul, yarn, bryan or eddie for a CR.\n */\nclass Slice\n{\n /**\n * @param {JobManager} jobManager\n * @param {SliceMessage} sliceMessage\n * @param {object} authorizationMessage\n */\n constructor (jobManager, sliceMessage, authorizationMessage)\n {\n /** @type {Synchronizer} */\n this._state = new Synchronizer(INITIAL, [ INITIAL, READY, RESERVED, WORKING, COMPLETE, FAILED, BROKEN, FINISHED ]);\n /** @type {JobManager} */\n this._jobManager = jobManager;\n /** @type {SliceMessage} */\n this._sliceMessage = { ...sliceMessage };\n this._authorizationMessage = authorizationMessage;\n this._datum = null;\n this._result = null;\n this._error = null;\n /** @type {number} */\n this.startTime = 0;\n /** @type {number} */\n this.rejectedTimeStamp = null;\n /** @type {{ total: number, CPU: number, webGL: number }} */\n this.rejectedTimeReport = null;\n \n assert(this.jobAddress === String(this._sliceMessage.jobAddress));\n if (!this.authorizationMessage && this.sliceNumber > 0)\n throw new Error(`Undefined authorization for slice ${this.identifier}.`);\n \n const that = this;\n \n /** \n * Start loading dependencies in the background. Once these are loaded, this.state will \n * transition to READY and the job will be ready to transition to WORKING.\n */\n (async function supervisor$$slice$$loadDatum() {\n ({ inputDatum: that._datum, dataError: that._error } = await that.jobManager.fetchSliceData(that.datumUri, that));\n }) (/* iife */)\n .then (() => {\n debugging('slice') && console.debug('Slice is transitioning to READY');\n this.state.set(INITIAL, READY);\n })\n .catch((error) => {\n debugging('slice') && console.debug('jobManager.fetchSliceData failed', error);\n this.state.set(INITIAL, BROKEN);\n })\n .finally(() => {\n debugging('slice') && console.debug('Slice.loadDatum completed.', this.identifier);\n });\n }\n\n /** @type {Synchronizer} */\n get state () { return this._state; }\n /** @type {number} */\n get sliceNumber () { return this._sliceMessage.sliceNumber; }\n /** @type {string} */\n get jobAddress () { return this._jobManager.address; }\n /** @type {boolean} */\n get isEstimation () { return this._sliceMessage.isEstimationSlice; }\n /** @type {boolean} */\n get isLong () { return this._sliceMessage.isLongSlice; }\n /** @type {string} */\n get datumUri () { return this._sliceMessage.datumUri; }\n /** @type {JobManager} */\n get jobManager () { return this._jobManager; }\n /** @type {string} */\n get resultStorageType () { return this._sliceMessage.resultStorageType; }\n /** @type {string} */\n get resultStorageDetails () { return this._sliceMessage.resultStorageDetails; }\n\n /** Read-only properties of type object. */\n get datum () { return this._datum; }\n get result () { return this._result; }\n get error () { return this._error; }\n get resultStorageParams () { return this._sliceMessage.resultStorageParams; }\n get authorizationMessage () { return this._authorizationMessage; }\n\n /** @type {boolean} */\n get isQueued () { return this.isUnassigned || this.isReady; }\n /** @type {boolean} */\n get isActive () { return this.isReserved || this.isWorking || this.isComplete; }\n /** @type {boolean} */\n get isFinished () { return this.state.is(FINISHED); }\n /** @type {boolean} */\n get hasFailed () { return this.state.is(FAILED) || this.state.is(BROKEN); }\n /** @type {boolean} */\n get isComplete () { return this.state.is(COMPLETE); }\n /** @type {boolean} */\n get isUnassigned () { return this.state.is(INITIAL); }\n /** @type {boolean} */\n get isReady () { return this.state.is(READY); }\n /** @type {boolean} */\n get isWorking () { return this.state.is(WORKING); }\n /** @type {boolean} */\n get isReturnable () { return this.state.is(WORKING); }\n /**\n * Mark a slice as RESERVED to remove it from the ready list, yet still able to transition to WORKING.\n * @type {boolean}\n **/\n get isReserved () { return this.state.is(RESERVED); }\n\n // TEMPORARY: THIS IS ONLY USED FOR COMPATIBILITY WITH SUPERVISOR1.\n // WHEN SUPERVISOR1 IS PUT TO BED, REMOVE THESE TWO FUNCTIONS.\n /** DO NOT USE! @deprecated @type {boolean} */\n get failed () { return this.hasFailed; }\n /** DO NOT USE! @deprecated @type {boolean} */\n get completed () { return this.isComplete; }\n \n /** @type {string} */\n get identifier () { return `${this.sliceNumber}.${truncateAddress(this.jobAddress, addressTruncationLength)}.${this.state}`; }\n /** \n * timeReport accessor that optionally updates from this.rejectedTimeReport. \n * @type { { total, CPU, webGL } }\n **/\n get timeReport ()\n {\n if (this.result.timeReport && this.rejectedTimeReport && this.rejectedTimeReport.total > 0)\n {\n // Data collected from sandboxes that rejected this slice.\n ['total', 'CPU', 'webGL'].forEach((key) => {\n if (this.rejectedTimeReport[key]) this.result.timeReport[key] += this.rejectedTimeReport[key];\n });\n this.rejectedTimeReport = null;\n }\n return this.result.timeReport;\n }\n /**\n * Return the time interval to estimated slice completion time.\n * @type {number}\n **/\n get etaMs ()\n {\n if (this.startTime === null) return 0;\n let etaMs = this.jobManager.estimateWallMs;\n if (this.startTime) etaMs -= (Date.now() - this.startTime);\n return etaMs;\n }\n\n /** Start slice over, regardless of what state it is in. */\n resetState() { this._state = new Synchronizer(READY, [ INITIAL, READY, RESERVED, WORKING, COMPLETE, FAILED, BROKEN ]); }\n\n /** \n * Sets the slice status to RESERVED, called to remove slice from the ready list,\n * yet still able to transition to WORKING.\n **/\n markAsReserved() { this.state.set(READY, RESERVED); }\n\n /** Sets the slice status to WORKING, called when the slice is getting ready to be handed to a worker. */\n markAsWorking() { this.state.set(RESERVED, WORKING); }\n\n /** Sets the slice status to FINISHED, called when the slice has completed and submitted results. */\n markAsFinished() { this.state.set(COMPLETE, FINISHED); }\n\n /**\n * Verifies slice status is working and assign sandbox, called when the slice is handed to a worker.\n */\n verifyWorking()\n {\n if (!this.isWorking) {\n throw new Error(`Slice ${this.identifier} is not working.`);\n }\n }\n\n /**\n * Receives a result from the scheduler.\n * It will then put the result in the appropriate place.\n * It could also eventually determine if the slice should be\n * retried before determining that it has failed.\n *\n * @param {object|Error} result - The result that came back from the worker sandbox.\n * @param {boolean} [success=true] - True if result is considered successful, false if error occurred.\n */\n collectResult(result, success = true)\n {\n if (this.result)\n throw new Error(`Slice ${this.identifier} received more than one result.`);\n \n if (success)\n {\n this.state.set(WORKING, COMPLETE);\n this._result = result;\n }\n else\n {\n this.state.set(WORKING, FAILED);\n this._error = result;\n }\n debugging('slice') && console.debug('collectResult', this.identifier);\n }\n\n /**\n * Create basic message object as part of the payload to send back to the result-submitter's status operation.\n * @param {string} status - The kind of status operation\n * @param {object} [extraProperties={}] - Extra properties for the paylaod object.\n * @returns {object}\n */\n getMessage(status, extraProperties = {})\n {\n return {\n sliceNumbers: [this.sliceNumber],\n job: this.jobAddress,\n authorizationMessage: this.authorizationMessage,\n status,\n ...extraProperties,\n }; \n }\n\n /**\n * Create basic payload object to send back to the result-submitter's status operation.\n * @param {opaqueId} worker - The current worker's opaqueId\n * @param {string} status - The kind of status operation\n * @param {object} [extraProperties={}] - Extra properties for the paylaod object.\n * @returns {object}\n */\n getMessagePayload(worker, status, extraProperties = {})\n {\n return {\n worker,\n slices: [ this.getMessage(status, extraProperties) ],\n }; \n }\n\n /**\n * Create slice-return payload object to send to the result-submitter's status operation.\n * @param {opaqueId} worker - The current worker's opaqueId\n * @param {string} [reason] - Optional reason for the return: 'ENOPROGRESS', 'EUNCAUGHT', 'ESLICETOOSLOW', 'unknown'.\n * @return {object}\n */\n getReturnMessagePayload(worker, reason)\n {\n delete this._result;\n\n if (!reason) reason = this.error ? 'EUNCAUGHT' : 'unknown';\n const extraProperties = {\n isEstimationSlice: this.isEstimation,\n error: this.error,\n reason,\n };\n\n return this.getMessagePayload(worker, 'return', extraProperties);\n }\n\n /**\n * @deprecated -- DO NOT USE!\n * TEMPORARY: THIS IS ONLY USED FOR COMPATIBILITY WITH SUPERVISOR1.\n * WHEN SUPERVISOR1 IS PUT TO BED, REMOVE THIS FUNCTION.\n * This function helps enforce the equivalence:\n * !this.authorizationMessage <==> sliceNumber === 0 .\n * @returns {object} this.authorizationMessage\n */\n getAuthorizationMessage () {\n if (!this.authorizationMessage && this.sliceNumber > 0)\n throw new Error(`Undefined authorization for slice ${this.identifier}.`);\n return this.authorizationMessage;\n }\n}\nexports.Slice = Slice;\n\n\n//# sourceURL=webpack://dcp/./src/dcp-client/worker/supervisor2/slice2.js?");
4565
+ eval("/**\n * @file dcp-client/worker/supervisor2/slice2.js\n *\n * A wrapper for the slice object returned from the scheduler's task distributor.\n *\n * @author Matthew Palma, mpalma@kingsds.network\n * Ryan Rossiter, ryan@kingsds.network\n * Wes Garland, wes@kingsds.network\n * Paul, paul@kingsds.network\n * @date May 2019\n * May 2019\n * Decemeber 2020\n * June 2022\n * @module slice\n */\n\n// @ts-check\n\n\nconst { Synchronizer } = __webpack_require__(/*! dcp/common/concurrency */ \"./src/common/concurrency.js\");\nconst { assert } = __webpack_require__(/*! dcp/common/dcp-assert */ \"./src/common/dcp-assert.js\");\nconst debugging = (__webpack_require__(/*! dcp/debugging */ \"./src/debugging.js\").scope)('worker');\nconst { truncateAddress } = __webpack_require__(/*! dcp/utils */ \"./src/utils/index.js\");\n\nconst addressTruncationLength = 20;\n\nconst INITIAL = 'INITIAL';\nconst READY = 'READY';\nconst RESERVED = 'RESERVED';\nconst WORKING = 'WORKING';\nconst COMPLETE = 'COMPLETE';\nconst FAILED = 'FAILED';\nconst BROKEN = 'BROKEN';\nconst FINISHED = 'FINISHED';\n\n/** @typedef {import('./job-manager').JobManager} JobManager */\n/** @typedef {import('dcp/utils').SliceMessage} SliceMessage */\n/** @typedef {string} opaqueId */ // 22 character base64 string \n\n/** \n * Object use to represent a given slice inside the Supervisor. This object's shape \n * current inherits heavily from the message payload originating from the scheduler,\n * but should not /wg dec 2020.\n *\n * Caveat lector: this documentation was created without a 100% understanding of the code.\n * Please improve when possible.\n * \n * The read-only properties of this object are as follows:\n * - state INITIAL | READY | RESERVED | WORKING | COMPLETE | FAILED | BROKEN | FINISHED\n * - sliceNumber the number of the slice within this job\n * - jobAddress the address of the job that this slice belongs to\n * - isEstimation true when slice is used in estimation\n * - isLong true, when slice is estimated to take more than 300 seconds to execute\n * - datum input set element for this slice this slice of the job; could be a data: URI or it could\n * be a URL we need to fetch; note that fetches are limited to worker's allow list\n * - result\n * - result.request 'complete',...\n * - result.result return value from work function\n * - result.timeReport { total, idle } ms \n * - error error info when slice FAILED\n * - jobManager wrapper for the job that owns this slice\n * - resultStorageType 'values' => we are storing individual values,\n * which could be data: URIs or URLs\n * at the scheduler\n * 'pattern' => user-specified pattern for result URLs.\n * Data will be uploaded via POST to the\n * URL matching the pattern, provided the\n * worker is allowed to access it.\n * - resultStorageParams user-supplied POST parameters sent to the result \n * storage server when using resultStorageType = pattern.\n * - resultStorageDetails the pattern when using resultStorageType = pattern.\n * - authorizationMessage authorization from task distributor, sent to result submitter, etc...\n * \n * - isFinished true, when in state FINISHED <-- Indicates result submission has succeeded.\n * - hasFailed true, when in state FAILED, BROKEN\n * - isComplete true, when in state COMPLETED\n * - isReserved true, when in state RESERVED\n * - isActive true, when in state RESERVED, WORKING, COMPLETE\n * - isUnassigned true, when in state INITIAL\n * - isReady true, when in state READY\n * - isWorking true, when in state WORKING\n * - identifier string 'sliceNumber.jobAddress.state'\n * - timeReport accessor for this.result.timeReport that updates from this.rejectedTimeReport when appropriate\n *\n * The r/w properties of this object are as follows:\n * - startTime time when slice execution started\n * - rejectedTimeStamp slice has been rejected\n * - rejectedTimeReport rejected timeReport\n * \n * NOTE: If you ever use a property with a leading underscore you are probably making a mistake.\n * But if you must, please ask paul, yarn, bryan or eddie for a CR.\n */\nclass Slice\n{\n /**\n * @param {JobManager} jobManager\n * @param {SliceMessage} sliceMessage\n * @param {object} authorizationMessage\n */\n constructor (jobManager, sliceMessage, authorizationMessage)\n {\n /** @type {Synchronizer} */\n this._state = new Synchronizer(INITIAL, [ INITIAL, READY, RESERVED, WORKING, COMPLETE, FAILED, BROKEN, FINISHED ]);\n /** @type {JobManager} */\n this._jobManager = jobManager;\n /** @type {SliceMessage} */\n this._sliceMessage = { ...sliceMessage };\n this._authorizationMessage = authorizationMessage;\n this._datum = null;\n this._result = null;\n this._error = null;\n /** @type {number} */\n this.startTime = 0;\n /** @type {number} */\n this.rejectedTimeStamp = null;\n /** @type {{ total: number, CPU: number, webGL: number }} */\n this.rejectedTimeReport = null;\n \n assert(this.jobAddress === String(this._sliceMessage.jobAddress));\n if (!this.authorizationMessage && this.sliceNumber > 0)\n throw new Error(`Undefined authorization for slice ${this.identifier}.`);\n \n const that = this;\n \n /** \n * Start loading dependencies in the background. Once these are loaded, this.state will \n * transition to READY and the job will be ready to transition to WORKING.\n */\n (async function supervisor$$slice$$loadDatum() {\n ({ inputDatum: that._datum, dataError: that._error } = await that.jobManager.fetchSliceData(that.datumUri, that));\n }) (/* iife */)\n .then (() => {\n debugging('slice') && console.debug('Slice is transitioning to READY');\n this.state.set(INITIAL, READY);\n })\n .catch((error) => {\n debugging('slice') && console.debug('jobManager.fetchSliceData failed', error);\n this.state.set(INITIAL, BROKEN);\n })\n .finally(() => {\n debugging('slice') && console.debug('Slice.loadDatum completed.', this.identifier);\n });\n }\n\n /** @type {Synchronizer} */\n get state () { return this._state; }\n /** @type {number} */\n get sliceNumber () { return this._sliceMessage.sliceNumber; }\n /** @type {string} */\n get jobAddress () { return this._jobManager.address; }\n /** @type {boolean} */\n get isEstimation () { return this._sliceMessage.isEstimationSlice; }\n /** @type {boolean} */\n get isLong () { return this._sliceMessage.isLongSlice; }\n /** @type {string} */\n get datumUri () { return this._sliceMessage.datumUri; }\n /** @type {JobManager} */\n get jobManager () { return this._jobManager; }\n /** @type {string} */\n get resultStorageType () { return this._sliceMessage.resultStorageType; }\n /** @type {string} */\n get resultStorageDetails () { return this._sliceMessage.resultStorageDetails; }\n\n /** Read-only properties of type object. */\n get datum () { return this._datum; }\n get result () { return this._result; }\n get error () { return this._error; }\n get resultStorageParams () { return this._sliceMessage.resultStorageParams; }\n get authorizationMessage () { return this._authorizationMessage; }\n\n /** @type {boolean} */\n get isQueued () { return this.isUnassigned || this.isReady; }\n /** @type {boolean} */\n get isActive () { return this.isReserved || this.isWorking || this.isComplete; }\n /** @type {boolean} */\n get isFinished () { return this.state.is(FINISHED); }\n /** @type {boolean} */\n get hasFailed () { return this.state.is(FAILED) || this.state.is(BROKEN); }\n /** @type {boolean} */\n get isComplete () { return this.state.is(COMPLETE); }\n /** @type {boolean} */\n get isUnassigned () { return this.state.is(INITIAL); }\n /** @type {boolean} */\n get isReady () { return this.state.is(READY); }\n /** @type {boolean} */\n get isWorking () { return this.state.is(WORKING); }\n /**\n * Mark a slice as RESERVED to remove it from the ready list, yet still able to transition to WORKING.\n * @type {boolean}\n **/\n get isReserved () { return this.state.is(RESERVED); }\n\n // TEMPORARY: THIS IS ONLY USED FOR COMPATIBILITY WITH SUPERVISOR1.\n // WHEN SUPERVISOR1 IS PUT TO BED, REMOVE THESE TWO FUNCTIONS.\n /** DO NOT USE! @deprecated @type {boolean} */\n get failed () { return this.hasFailed; }\n /** DO NOT USE! @deprecated @type {boolean} */\n get completed () { return this.isComplete; }\n \n /** @type {string} */\n get identifier () { return `${this.sliceNumber}.${truncateAddress(this.jobAddress, addressTruncationLength)}.${this.state}`; }\n /** \n * timeReport accessor that optionally updates from this.rejectedTimeReport. \n * @type { { total, CPU, webGL } }\n **/\n get timeReport ()\n {\n if (this.result.timeReport && this.rejectedTimeReport && this.rejectedTimeReport.total > 0)\n {\n // Data collected from sandboxes that rejected this slice.\n ['total', 'CPU', 'webGL'].forEach((key) => {\n if (this.rejectedTimeReport[key]) this.result.timeReport[key] += this.rejectedTimeReport[key];\n });\n this.rejectedTimeReport = null;\n }\n return this.result.timeReport;\n }\n /**\n * Return the time interval to estimated slice completion time.\n * @type {number}\n **/\n get etaMs ()\n {\n if (this.startTime === null) return 0;\n let etaMs = this.jobManager.estimateWallMs;\n if (this.startTime) etaMs -= (Date.now() - this.startTime);\n return etaMs;\n }\n\n /** Start slice over, regardless of what state it is in. */\n resetState()\n {\n if (this.isReady || this.isUnassigned) return;\n this._state = new Synchronizer(READY, [ INITIAL, READY, RESERVED, WORKING, COMPLETE, FAILED, BROKEN, FINISHED ]);\n }\n\n /** \n * Sets the slice status to RESERVED, called to remove slice from the ready list,\n * yet still able to transition to WORKING.\n **/\n markAsReserved() { this.state.set(READY, RESERVED); }\n\n /** Sets the slice status to WORKING, called when the slice is getting ready to be handed to a worker. */\n markAsWorking() { this.state.set(RESERVED, WORKING); }\n\n /** Sets the slice status to FINISHED, called when the slice has completed and submitted results. */\n markAsFinished() { this.state.set(COMPLETE, FINISHED); }\n\n /**\n * Verifies slice status is working and assign sandbox, called when the slice is handed to a worker.\n */\n verifyWorking()\n {\n if (!this.isWorking) {\n throw new Error(`Slice ${this.identifier} is not working.`);\n }\n }\n\n /**\n * Receives a result from the scheduler.\n * It will then put the result in the appropriate place.\n * It could also eventually determine if the slice should be\n * retried before determining that it has failed.\n *\n * @param {object|Error} result - The result that came back from the worker sandbox.\n * @param {boolean} [success=true] - True if result is considered successful, false if error occurred.\n */\n collectResult(result, success = true)\n {\n if (this.result)\n throw new Error(`Slice ${this.identifier} received more than one result.`);\n \n if (success)\n {\n this.state.set(WORKING, COMPLETE);\n this._result = result;\n }\n else\n {\n this.state.set(WORKING, FAILED);\n this._error = result;\n }\n debugging('slice') && console.debug('collectResult', this.identifier);\n }\n\n /**\n * Create basic message object as part of the payload to send back to the result-submitter's status operation.\n * @param {string} status - The kind of status operation\n * @param {object} [extraProperties={}] - Extra properties for the paylaod object.\n * @returns {object}\n */\n getMessage(status, extraProperties = {})\n {\n return {\n sliceNumbers: [this.sliceNumber],\n job: this.jobAddress,\n authorizationMessage: this.authorizationMessage,\n status,\n ...extraProperties,\n }; \n }\n\n /**\n * Create basic payload object to send back to the result-submitter's status operation.\n * @param {opaqueId} worker - The current worker's opaqueId\n * @param {string} status - The kind of status operation\n * @param {object} [extraProperties={}] - Extra properties for the paylaod object.\n * @returns {object}\n */\n getMessagePayload(worker, status, extraProperties = {})\n {\n return {\n worker,\n slices: [ this.getMessage(status, extraProperties) ],\n }; \n }\n\n /**\n * Create slice-return payload object to send to the result-submitter's status operation.\n * @param {opaqueId} worker - The current worker's opaqueId\n * @param {string} [reason] - Optional reason for the return: 'ENOPROGRESS', 'EUNCAUGHT', 'ESLICETOOSLOW', 'unknown'.\n * @return {object}\n */\n getReturnMessagePayload(worker, reason)\n {\n delete this._result;\n\n if (!reason) reason = this.error ? 'EUNCAUGHT' : 'unknown';\n const extraProperties = {\n isEstimationSlice: this.isEstimation,\n error: this.error,\n reason,\n };\n\n return this.getMessagePayload(worker, 'return', extraProperties);\n }\n\n /**\n * @deprecated -- DO NOT USE!\n * TEMPORARY: THIS IS ONLY USED FOR COMPATIBILITY WITH SUPERVISOR1.\n * WHEN SUPERVISOR1 IS PUT TO BED, REMOVE THIS FUNCTION.\n * This function helps enforce the equivalence:\n * !this.authorizationMessage <==> sliceNumber === 0 .\n * @returns {object} this.authorizationMessage\n */\n getAuthorizationMessage () {\n if (!this.authorizationMessage && this.sliceNumber > 0)\n throw new Error(`Undefined authorization for slice ${this.identifier}.`);\n return this.authorizationMessage;\n }\n}\nexports.Slice = Slice;\n\n\n//# sourceURL=webpack://dcp/./src/dcp-client/worker/supervisor2/slice2.js?");
4566
4566
 
4567
4567
  /***/ }),
4568
4568
 
@@ -4647,7 +4647,7 @@ eval("/**\n * @file protocol/connection/message.js\n * @author Ryan
4647
4647
  /***/ ((__unused_webpack_module, exports, __webpack_require__) => {
4648
4648
 
4649
4649
  "use strict";
4650
- eval("/* provided dependency */ var process = __webpack_require__(/*! ./node_modules/process/browser.js */ \"./node_modules/process/browser.js\");\n/**\n * @file protocol/connection/connection.js\n * @author Ryan Rossiter\n * @author KC Erb\n * @author Wes Garland\n * @date January 2020, Feb 2021, Mar 2022\n *\n * A Connection object represents a connection to another DCP entity. \n * A DCP connection may 'live' longer than the underlying protocol's connection,\n * and the underlying protocol connection (or, indeed, protocol) may change\n * throughout the life of the DCP connection.\n * \n * DCP connections are uniquely identified by the DCP Session ID, specified by\n * the dcpsid property, present in every message body. This session id negotiated during connection,\n * with the initiator and target each providing half of the string.\n *\n * Connection instance events:\n * - session: dcpsid new session established\n * - connect: url UI hint: internet available\n * - disconnect: url UI hint: internet not available\n * - readyStateChange: *** DO NOT USE **\n * - error: error emitted when errors happen that would otherwise go uncaught\n * - close: connection instance is closing\n * - end: Connection instance is closed\n * - send: msgObj when a message is sent to the peer; does not wait for ack; may re-trigger on reconnect\n * - ready: when the connection is ready for traffic (constructor promises resolved)\n *\n * State Transition Diagram for Connection.state:\n *\n * initial connecting established disconnected close-wait closing closed\n * ===========================================================================================================================\n * |-- i:connect ---->\n * |-- t:accept ----->\n * |-- t:establishTarget -->\n * |-- i:connect ---------->\n * |-- transportDisconnectHandler -->\n * <-- i:_reconnect ----------------------------------------|\n * |-i:useNewTransport-->\n * <-- t:useNewTransport --------|\n * |-- closeWait ----------------------------------------------------------->\n * |-- closeWait ----------------------------------->\n * |-- closeWait -->\n * |-- doClose --------------->\n * |-- close ------------------------------------------------------------------------------------------------------------> \n * |-- close ---------------------------------------------------------------------------->\n * |-- close ---------------------------------------------------->\n * |-- close ------------------->\n * |-- doClose -->\n *\n *\n * Not until the established state can we count on things like a dcpsid, \n * peerAddress, identityPromise resolution and so on.\n * \n * Error Codes relevant to DCP Connections:\n * DCPC-1001 - CONNECTION CANNOT SEND WHEN IN CLOSING, CLOSE-WAIT OR CLOSED\n * EINVAL - MESSAGE OWNER IS INVALID (formerly DCPC-1002)\n * MESSAGE SIGNATURE INVALID (formerly DCPC-1003)\n * MESSAGE BODY IS INVALID (formerly DCPC-1004)\n * DCPC-1005 - TRYING TO ESTABLISH TARGET AFTER TARGET ALREADY ESTABLISHED\n * DCPC-1006 - CONNECTION COULD NOT BE ESTABLISHED WITHIN 30 SECONDS\n * DCPC-1007 - RECEIVED MESSAGE PAYLOAD BEFORE CONNECT OPERATION\n * DCPC-1008 - TARGET RESPONDED WITH INVALID DCPSID\n * DCPC-1009 - MESSAGE IS OF UNKNOWN TYPE\n * DCPC-1010 - DUPLICATE TRANSMISSION RECEIPT\n * DCPC-1011 - DEFAULT ERROR CODE WHEN PEER SENDS CLOSE MESSAGE\n * DCPC-1012 - TRIED TO INITIATE CONNECTION AFTER SESSION ALREADY ESTABLISHED\n * DCPC-1013 - DEFAULT ERROR CODE WHEN CLOSING WITH REASON THATS NOT INSTANCE OF DCPERROR\n * DCPC-1014 - NO TRANSPORTS AVAILABLE\n * DCPC-1015 - CANNOT CONNECT WHEN CONNECTION ALREADY CLOSED\n * DCPC-1016 - ERROR CONNECTING VIA AVAILABLE TRANSPORTS\n * DCPC-1017 - FIRST PROTOCOL MESSAGE WAS DID NOT INVOLVE INITIAL CONNECT REQUEST\n * DCPC-1018 - INVALID ARGUMENT PROVIDED IN PLACE OF IDKEYSTORE\n * ENODCPSID - CONNECTION INSTANCE TRIED TO RE-CONNECT TO A TARGET WHICH DOES NOT HAVE A RECORD OF THAT SESSION\n */\n\n\n\nconst debugging = (__webpack_require__(/*! dcp/debugging */ \"./src/debugging.js\").scope)('dcp');\nconst dcpEnv = __webpack_require__(/*! dcp/common/dcp-env */ \"./src/common/dcp-env.js\");\nconst { EventEmitter } = __webpack_require__(/*! dcp/common/dcp-events */ \"./src/common/dcp-events/index.js\");\nconst wallet = __webpack_require__(/*! dcp/dcp-client/wallet */ \"./src/dcp-client/wallet/index.js\");\nconst { DcpURL } = __webpack_require__(/*! dcp/common/dcp-url */ \"./src/common/dcp-url.js\");\nconst { requireNative } = __webpack_require__(/*! dcp/dcp-client/webpack-native-bridge */ \"./src/dcp-client/webpack-native-bridge.js\");\nconst { assert } = __webpack_require__(/*! dcp/common/dcp-assert */ \"./src/common/dcp-assert.js\");\nconst { leafMerge, a$sleepMs } = __webpack_require__(/*! dcp/utils */ \"./src/utils/index.js\");\nconst { Synchronizer } = __webpack_require__(/*! dcp/common/concurrency */ \"./src/common/concurrency.js\");\nconst { DCPError } = __webpack_require__(/*! dcp/common/dcp-error */ \"./src/common/dcp-error.js\");\n\nconst { Transport } = __webpack_require__(/*! ../transport */ \"./src/protocol-v4/transport/index.js\");\nconst { Sender } = __webpack_require__(/*! ./sender */ \"./src/protocol-v4/connection/sender.js\");\nconst { Receiver } = __webpack_require__(/*! ./receiver */ \"./src/protocol-v4/connection/receiver.js\");\nconst { MessageLedger } = __webpack_require__(/*! ./message-ledger */ \"./src/protocol-v4/connection/message-ledger.js\");\nconst { getGlobalIdentityCache } = __webpack_require__(/*! ./identity-cache */ \"./src/protocol-v4/connection/identity-cache.js\");\nconst { makeEBOIterator, setImmediateN } = __webpack_require__(/*! dcp/common/dcp-timers */ \"./src/common/dcp-timers.js\");\n\nconst { ConnectionMessage } = __webpack_require__(/*! ./connection-message */ \"./src/protocol-v4/connection/connection-message.js\");\nconst { ConnectionRequest } = __webpack_require__(/*! ./request */ \"./src/protocol-v4/connection/request.js\");\nconst { ConnectionResponse } = __webpack_require__(/*! ./response */ \"./src/protocol-v4/connection/response.js\");\nconst { ConnectionBatch } = __webpack_require__(/*! ./batch */ \"./src/protocol-v4/connection/batch.js\");\nconst { ConnectionAck } = __webpack_require__(/*! ./ack */ \"./src/protocol-v4/connection/ack.js\");\nconst { ErrorPayloadCtorFactory } = __webpack_require__(/*! ./error-payload */ \"./src/protocol-v4/connection/error-payload.js\");\nconst { role } = __webpack_require__(/*! ./connection-constants */ \"./src/protocol-v4/connection/connection-constants.js\");\n\nconst isDebugBuild = (__webpack_require__(/*! dcp/common/dcp-build */ \"./src/common/dcp-build.js\").build) === 'debug';\nlet nanoid;\nif (dcpEnv.platform === 'nodejs') {\n const { requireNative } = __webpack_require__(/*! dcp/dcp-client/webpack-native-bridge */ \"./src/dcp-client/webpack-native-bridge.js\");\n nanoid = requireNative('nanoid').nanoid;\n} else {\n nanoid = (__webpack_require__(/*! nanoid */ \"./node_modules/nanoid/index.browser.js\").nanoid);\n}\n\n\nlet globalConnectionId = 0;\nvar _msgId = 0;\n\nconst CONNECTION_STATES = [\n 'initial',\n 'connecting', /* initiator: establish first transport instance connection; target: listening */\n 'established',\n 'disconnected', /* connection is still valid, but underlying transport is no longer connected */\n 'close-wait', /* Target of close message is in this state until response is acknowledged */\n 'closing',\n 'closed',\n]\n\nclass Connection extends EventEmitter {\n static get VERSION() {\n return '5.1.0'; // Semver format\n }\n\n static get VERSION_COMPATIBILITY() {\n return '^5.0.0'; // Semver format, can be a range\n }\n\n /**\n * Connection form 2:\n * @constructor\n * @param {object} [target]\n * @param {Promise} idKsPromise A promise which resolves to the identity keystore described\n * in form 1\n * @param {object} [options]\n * @see form 1\n */\n /**\n * Connection form 1\n * Create a DCP Connection object. This object could represent either the initiator or \n * target end of a connection, until it is specialized by either invoke the connect()\n * or accept() methods. Note that send() invokes connect() internally if not in an established\n * state.\n * @constructor\n * @note Connection objects exist for the lifetime of a given DCP connection \n * (session), whether or not the underlying transport (eg internet protocol) is connected or not. Once \n * the DCP session has ended, this object has no purpose and is not reusable.\n * \n * @param {object} target Object (usually a dcpConfig fragment) describing the target.\n * This object may contain the following properties; 'location' is\n * mandatory:\n * - location: a URL or DcpURL that is valid from the Internet\n * - friendLocation: a DcpURL that is valid from an intranet; if\n * both location and friendLocation specified, the best one will\n * be chosen by examining IP addresses (assuming an IP bearer).\n * - identity: an object with an address property which is a promise\n * that resolves to an instance of wallet.Address which represents\n * to the target's identity; this overrides the initiator's \n * identity cache unless options.strict is truey.\n * \n * @param {Keystore} [idKeystore] The keystore used to sign messages; used for non-repudiation.\n * If not specified, a dynamically-generated keystore will be used.\n * \n * @param {object} [options] Extra connection options that aren't defined via dcpConfig.dcp.connectionOptions.\n * These options include:\n * - identityUnlockTimeout: Number of (floating-point) seconds to leave the identity \n * keystore unlocked between invocations of Connection.send\n *\n * @returns instance of Connection that is specific to a target URL but not a role\n */\n constructor(target, idKeystore, options = {})\n {\n super('Connection');\n this.id = ++globalConnectionId;\n this.debugLabel = `connection(g:${this.id}):`;\n\n /* polymorphism strategy: rewrite to (configFragment, idksPromise, options) */\n if (target instanceof DcpURL)\n target = { location: target };\n else if (DcpURL.isURL(target))\n target = { location: new DcpURL(target) };\n else if (target instanceof String || typeof target === 'string')\n target = { location: new DcpURL(target) };\n assert(typeof target === 'object', target.location);\n\n /* idKeystore is always resolved by the time a session is established. */\n if (!idKeystore)\n this.identityPromise = wallet.getId();\n else if (idKeystore instanceof Promise)\n this.identityPromise = idKeystore;\n else if (idKeystore instanceof wallet.Keystore)\n this.identityPromise = Promise.resolve(idKeystore); \n else if (idKeystore instanceof wallet.Address)\n this.identityPromise = Promise.resolve(new wallet.Keystore(idKeystore, '')); \n else\n throw new DCPError('Invalid argument provided for IdKeystore', 'DCPC-1018');\n\n this.identityPromise.then((keystore) => {\n /* This always happens by the time a role is assumed */\n delete this.identityPromise;\n this.identity = keystore;\n this.emit('ready');\n debugging('connection') && console.debug(this.debugLabel, 'identity is', this.identity.address);\n });\n\n this.target = target;\n this.url = this.target.location;\n \n // Init internal state / vars\n this.state = new Synchronizer(CONNECTION_STATES[0], CONNECTION_STATES);\n // DO NOT USE this.state.on('change', (s) => this.emit('readyStateChange', s) );\n\n this.dcpsid = null;\n this.peerAddress = null;\n this.transport = null;\n this.messageLedger = new MessageLedger(this);\n this.authorizedSender = null;\n this.preDisconnectState = null;\n \n this.Message = ConnectionMessage(this);\n this.Request = ConnectionRequest(this.Message);\n this.Response = ConnectionResponse(this.Message);\n this.Batch = ConnectionBatch(this.Message);\n this.Ack = ConnectionAck(this.Message);\n this.ErrorPayload = ErrorPayloadCtorFactory(this);\n this.connectTime = Date.now();\n\n this.receiver = new Receiver(this);\n this.sender = new Sender(this);\n \n debugging('connection') && console.debug(this.debugLabel, `new connection#${this.id}; ${this.url}`);\n\n /* Create a connection config as this.options which takes into\n * account system defaults and overrides for specific urls, origins, etc.\n */\n this.options = leafMerge(\n ({ /* hardcoded defaults insulate us from missing web config */\n 'connectTimeout': 90,\n 'lingerTimeout': 1800,\n 'allowBatch': true,\n 'maxMessagesPerBatch': 100,\n 'identityUnlockTimeout': 300,\n 'ttl': {\n 'min': 15,\n 'max': 600,\n 'default': 120\n },\n 'transports': [ 'socketio' ],\n }),\n dcpConfig.dcp.connectionOptions.default,\n this.url && dcpConfig.dcp.connectionOptions[this.url.hostname],\n this.url && dcpConfig.dcp.connectionOptions[this.url.origin],\n options\n );\n\n /* draw out errors quickly in dev */\n if ((process.env.DCP_NETWORK_CONFIG_BUILD || dcpConfig.build) === 'debug')\n {\n this.options.maxMessagesPerBatch /= 10;\n \n /* short timeouts and debuggers don't get along well */\n if (dcpEnv.platform === 'nodejs' && !(requireNative('module')._cache.niim instanceof requireNative('module').Module))\n {\n this.options.connectTimeout /= 10;\n this.options.lingerTimeout /= 20;\n this.options.identityUnlockTimeout /= 10;\n }\n }\n\n assert(this.options.identityUnlockTimeout > 0);\n assert(this.options.connectTimeout > 0);\n assert(this.options.lingerTimeout > 0);\n assert(typeof this.options.ttl.min === 'number');\n assert(typeof this.options.ttl.max === 'number');\n assert(typeof this.options.ttl.default === 'number');\n \n this.backoffTimeIterator = makeEBOIterator(500, dcpConfig.build === 'debug' ? 5000 : 60000); /** XXXwg make this configurable */\n\n this.secureLocation = determineIfSecureLocation(this);\n this.loggableDest = '<generic>';\n }\n \n generateMessageId()\n {\n return `${this.id}-${_msgId++}-${Date.now()}-${nanoid()}`;\n }\n\n /**\n * Specialize an instance of Connection for either initiator or target behaviour. Once specialized,\n * the role cannot be changed. This happens based on methods invoked; connect() or accept() cause\n * the change.\n *\n * This specialization also implies that the connection is fully ready for use, including resolution\n * of the identity promise if necessary. This is perhaps not the best place to ensure that, but it\n * provides a reliable - and already async - waypoint to observe that event.\n */\n async a$assumeRole(myRole)\n {\n assert(myRole === role.initiator || myRole === role.target);\n\n if (this.role === myRole)\n return;\n this.role = myRole;\n \n if (this.role === role.target)\n {\n this.debugLabel = `connection(t:${this.id}):`;\n this.sender.debugLabel = `sender(t#${this.id}):`;\n this.messageLedger.debugLabel = `message-ledger(t#${this.id}):`;\n this.loggableDest = '<target>';\n this.hasNtp = true;\n }\n else\n {\n this.debugLabel = `connection(i:${this.id}):`;\n this.sender.debugLabel = `sender(i#${this.id}):`;\n this.messageLedger.debugLabel = `message-ledger(i#${this.id}):`;\n this.loggableDest = this.url.href;\n this.hasNtp = false;\n }\n\n debugging('connection') && console.debug(this.debugLabel, `connection #${this.id} is ${this.role} for ${this.url}`);\n if (!this.identity)\n {\n assert(this.identityPromise);\n debugging('connection') && console.debug(this.debugLabel, `waiting for identity resolution`);\n await this.identityPromise;\n }\n }\n\n /**\n * API to establish a DCP connection. Implied by send().\n *\n * When invoked by the initiator, this method establishes the connection by connecting\n * to the target url provided to the constructor.\n */\n async connect() // eslint-disable-line require-await\n {\n if (this.role == role.target)\n return;\n \n if (!this.role)\n await this.a$assumeRole(role.initiator);\n \n if (this.state.is('initial'))\n {\n if (!this.connectPromise)\n this.connectPromise = Promise.race([this.connectTimer(), this.a$_connect()]).then(() => { clearTimeout(this.connectTimeoutPromise); delete this.connectPromise });\n return this.connectPromise;\n }\n\n if (this.state.is('disconnected'))\n {\n if (!this.connectPromise)\n this.connectPromise = this.a$_reconnect().then(() => delete this.connectPromise);\n return this.connectPromise;\n }\n \n if (this.state.is('connecting'))\n {\n assert(this.connectPromise);\n return this.connectPromise;\n }\n\n if (this.state.is('established'))\n return;\n \n if (this.state.in(['closed', 'close-wait', 'closing']))\n throw new DCPError('Connection already closed', 'DCPC-1015');\n\n throw new Error('impossible');\n }\n\n /**\n * Performs a reconnection for connections which are in the disconnected state, and\n * tries to send any in-flight or enqueued messages as soon as that happens.\n */\n async a$_reconnect()\n {\n var transport;\n\n this.state.testAndSet('disconnected', 'connecting');\n try\n {\n do\n {\n transport = await this.a$connectToTargetTransport();\n } while (!transport && (this.transport && !this.transport.ready()) && !this.state.in(['closed', 'close-wait', 'closing']));\n\n debugging('connection') && console.debug(this.debugLabel, `reconnected via transport ${transport.socket.id}`);\n \n this.useNewTransport(transport);\n }\n catch (error)\n {\n if (error.code !== 'DCPC-1016' && error.code !== 'DCPC-1015')\n {\n /* Unreached unless there are bugs. */\n throw error;\n } \n this.close(error, true);\n }\n }\n\n connectTimer()\n {\n return new Promise((_resolve, reject) =>\n {\n this.connectTimeoutPromise = setTimeout(() =>\n { \n reject(new Error('Failed to establish connection within 30 seconds'));\n }, 30000);\n if (typeof this.connectTimeoutPromise.unref === 'function')\n this.connectTimeoutPromise.unref();\n });\n }\n \n async a$_connect()\n { \n var presharedPeerAddress, establishResults;\n var targetIdentity = await this.target.identity;\n var transport;\n \n assert(this.role === role.initiator);\n\n this.state.set('initial', 'connecting');\n do\n {\n transport = await this.a$connectToTargetTransport().catch((error) =>\n {\n debugging('connection') && console.debug(`${this.debugLabel} error connecting to target on transport layer:`, error);\n return { ready: () => {return false} };\n });\n } while(!transport.ready());\n this.adopt(transport);\n\n establishResults = await this.sender.establish().catch(error => {\n debugging('connection') && console.debug(this.debugLabel, `Could not establish DCP session ${this.transport ? 'over' + this.transport.name : '. Transport establishment was not complete'}:`, error);\n this.close(error, true);\n throw error;\n });\n const peerAddress = new wallet.Address(establishResults.peerAddress);\n const dcpsid = establishResults.dcpsid;\n debugging('connection') && console.debug(this.debugLabel, 'dcpsid is', dcpsid);\n \n if (!this.options.strict && targetIdentity && determineIfSecureConfig())\n {\n if ( false\n || typeof targetIdentity !== 'object'\n || typeof targetIdentity.address !== 'object'\n || !(targetIdentity.address instanceof wallet.Address))\n targetIdentity = { address: new wallet.Address(targetIdentity) }; /* map strings and Addresses to ks ducks */\n\n presharedPeerAddress = targetIdentity.address;\n debugging('connection') && console.debug(this.debugLabel, 'Using preshared peer address', presharedPeerAddress);\n }\n this.ensureIdentity(peerAddress, presharedPeerAddress);\n\n /* At this point, new session is valid & security checks out - make Connection instance usable */\n this.peerAddress = peerAddress;\n if (this.dcpsid)\n throw new DCPError(`Reached impossible state in connection.js; dcpsid already specified ${this.dcpsid} (${this.url})`, 'DCPC-1012');\n\n this.state.set('connecting', 'established'); /* established => dcpsid has been set */\n this.emit('session', (this.dcpsid = dcpsid));\n this.emit('connect', this.url);\n this.sender.notifyTransportReady();\n return Promise.resolve();\n }\n\n /**\n * unreference any objects entrained by this connection so that it does not prevent\n * the node program from exiting naturally.\n */\n unref()\n {\n if (this.connectAbortTimer && this.connectAbortTimer.unref && dcpEnv.platform === 'nodejs')\n this.connectAbortTimer.unref();\n }\n\n /**\n * Method is invoked when the transport disconnects. Transport instance is responsible for its own\n * finalization; Connection instance is responsible for finding a new transport, resuming the\n * connection, and retransmitting any in-flight message.\n *\n * @param {object} transport the transport instance that triggered this handler. In some cases, it\n * is possible that this event is not serviced until after the connection\n * has already acquired a new transport instance, e.g. in a Target where\n * the initiator switched networks. This implies that it is possible for\n * more 'connect' events to be emitted than 'disconnect' events.\n */\n transportDisconnectHandler(transport)\n {\n try\n { \n if (this.state.in(['disconnected', 'closing', 'close-wait', 'closed'])) /* transports may fire this more than once */\n return;\n\n if (transport !== this.transport) /* event no longer relevant */\n return;\n\n if (this.transport)\n {\n transport.close();\n delete this.transport;\n }\n \n if (this.state.is('established'))\n {\n this.state.set('established', 'disconnected');\n this.emit('disconnect', this.url); /* UI hint: \"internet unavailable\" */\n debugging('connection') && console.debug(this.debugLabel, `Transport disconnected from ${this.url}; ${this.sender.inFlight ? 'have' : 'no'} in-flight message`);\n \n if (!this.dcpsid) /* hopefully impossible? */\n {\n debugging('connection') && console.debug(this.debugLabel, 'Not reconnecting - no session');\n return;\n }\n }\n \n if (this.role === role.target)\n {\n /* targets generally can't reconnect due to NAT */\n debugging('connection') && console.debug(this.debugLabel, `Waiting for initiator to reconnect for ${this.dcpsid}`);\n return;\n }\n \n if (this.dcpsid && !this.sender.inFlight && this.options.onDemand)\n {\n debugging('connection') && console.debug(this.debugLabel, `Not reconnecting ${this.dcpsid} until next message`);\n return;\n }\n \n if (this.state.is('connecting') && (!this.dcpsid || !this.peerAddress))\n {\n debugging('connection') && console.debug(this.debugLabel, `Disconnected while connecting, establishing transport and re-sending connect request.`);\n this.a$_reconnect();\n return;\n }\n\n /* At this point, we initiate a (re)connect attempt because either\n * - we haven't connected yet,\n * - we have something to send, or\n * - we are not an on-demand connection\n */\n if (!this.state.is('connecting'))\n this.connect();\n }\n catch(error)\n {\n debugging('connection') && console.debug(error);\n this.close(error, true);\n \n if (error.code !== 'DCPC-1016' && error.code !== 'DCPC-1015')\n {\n /* Unreached unless there are bugs. */\n throw error;\n }\n }\n }\n \n /**\n * Initiators only\n *\n * Connect to a target at the transport level.\n * - Rejects when we give up on all transports.\n * - Resolves with a transport instance when we connect to one.\n *\n * The connection attempt will keep a node program \"alive\" while it is happening.\n * The `autoUnref` connectionOption and unref() methods offer ways to make this not\n * happen.\n */\n async a$connectToTargetTransport()\n {\n const that = this;\n const availableTransports = [].concat(this.options.transports);\n var quitMsg = false; /* not falsey => reject asap, value is error message */\n var quitCode = undefined;\n var boSleepIntr; /* if not falsey, a function that interrupts the backoff sleep */\n var transportConnectIntr; /* if not falsey, a function that interrupts the current connection attempt */\n\n // Already trying to connect to target, don't try multiple times until we've aborted one attempt\n if (this.connectAbortTimer)\n return;\n \n /* This timer has the lifetime of the entire connection attempt. When we time out,\n * we set the quitMsg to get the retry loop to quit, then we interrupt the timer so\n * that we don't have to wait for the current backoff to expire before we notice, and\n * we expire the current attempt to connect right away as well.\n */\n this.connectAbortTimer = setTimeout(() => {\n quitMsg = 'connection timeout';\n if (boSleepIntr) boSleepIntr();\n if (transportConnectIntr) transportConnectIntr();\n }, this.options.connectTimeout * 1000);\n\n if (this.options.autoUnref)\n this.unref();\n\n /* cleanup code called on return/throw */\n function cleanup_ctt()\n {\n clearTimeout(that.connectAbortTimer);\n delete that.connectAbortTimer;\n }\n\n /* Connect to target with a specific transport. */\n /* Resolves with { bool success, obj transport } or rejects with { error } if the transport cannot connect*/\n function a$connectWithTransport(transportName)\n { \n transportConnectIntr = false;\n\n return new Promise((connectWithTransport_resolve, connectWithTransport_reject) => { \n const TransportClass = Transport.require(transportName);\n const transport = new TransportClass(that.target, that.options[transportName]);\n var ret = { transport };\n\n function cleanup_cwt()\n {\n for (let eventName of transport.eventNames())\n for (let listener of transport.listeners(eventName))\n transport.off(eventName, listener);\n }\n \n /* In the case where we have a race condition in the transport implementation, arrange things\n * so that we resolve with whatever fired last if we have a double-fire on the same pass of \n * the event loop.\n */\n transport.on('connect', () => { cleanup_cwt(); ret.success=true; connectWithTransport_resolve(ret) });\n transport.on('error', (error) => { cleanup_cwt(); connectWithTransport_reject(error) });\n transport.on('connect-failed', (error) => {\n cleanup_cwt();\n ret.success = false;\n ret.error = error;\n debugging() && console.debug(`Error connecting to ${that.url};`, error);\n connectWithTransport_resolve(ret);\n });\n \n /* let the connectAbortTimer interrupt this connect attempt */\n transportConnectIntr = () => { transport.close() };\n });\n }\n \n if (availableTransports.length === 0)\n {\n cleanup_ctt();\n return Promise.reject(new DCPError('no transports defined', 'DCPC-1014'));\n }\n \n /* Loop while trying each available transport in turn. Sleep with exponential backoff between runs */\n while (!quitMsg)\n {\n for (let transportName of availableTransports)\n {\n try\n {\n const { success, error, transport } = await a$connectWithTransport(transportName);\n\n /* Have connected to the remote at the transport level - OUT */\n if (success === true)\n {\n transportConnectIntr = false;\n cleanup_ctt();\n \n return transport;\n }\n\n /* Fast-fail: certain - but few - HTTP status codes let us know that this (or any) transport\n * will never work, so don't try those again.\n */\n if (error && error.httpStatus)\n {\n switch(error.httpStatus)\n {\n case 301: case 302: case 303: case 307: case 308:\n debugging('connection') && console.debug(this.debugLabel, `HTTP status ${error.httpStatus}; won't try again with ${transportName}`);\n availableTransports.splice(availableTransports.indexOf(transportName), 1);\n break;\n case 400: case 403: case 404:\n debugging('connection') && console.debug(this.debugLabel, `HTTP status ${error.httpStatus}; won't try again.`);\n quitMsg = error.message;\n quitCode = 'HTTP_' + error.httpStatus || 0;\n break;\n default:\n debugging('connection') && console.debug(this.debugLabel, `HTTP status ${error.httpStatus}; will try again with ${transportName}`);\n break;\n }\n }\n }\n catch (impossibleError)\n {\n /* transport connection attempts should never throw. */\n debugging('connection') && console.debug(this.debugLabel, `Error connecting to ${this.url} with ${transportName}; won't try again:`, impossibleError);\n availableTransports.splice(availableTransports.indexOf(transportName), 1);\n }\n }\n \n if (availableTransports.length === 0)\n {\n quitMsg = 'all transports exhausted';\n break;\n }\n \n /* Go to (interruptible) sleep for a while before trying again */\n const backoffTimeMs = this.backoffTimeIterator.next().value;\n debugging('connection') && console.debug(this.debugLabel, 'trying again in', Number(backoffTimeMs / 1000).toFixed(2), 'seconds');\n const boSleepPromise = a$sleepMs(backoffTimeMs);\n boSleepIntr = boSleepPromise.intr;\n await boSleepPromise;\n boSleepIntr = false;\n }\n\n /* The only way we get here is for us to discover that the connection is unconnectable - eg \n * reject timer has expired or similar.\n */\n cleanup_ctt();\n throw new DCPError(quitMsg, 'DCPC-1016', quitCode);\n }\n\n /**\n * Method which must be invoked whenever a new transport needs to be assigned to the connection. \n * If we have previously adopted a transport, we close it first, which will prevent \n * [by transport definition] any message handlers from firing, even if the old transport instance\n * has buffered traffic.\n *\n * @param {object} transport transport instance\n */\n adopt(transport)\n {\n if (this.transport)\n this.transport.close();\n\n transport.on('message', (m) => this.handleMessage(m));\n transport.on('end', () => this.transportDisconnectHandler(transport));\n transport.on('close', () => this.transportDisconnectHandler(transport));\n \n this.transport = transport;\n }\n \n \n /**\n * Method that gets invoked when there is a new transport available for adoption.\n * This will adjust the state of the connection, adopt the transport then tell\n * the sender to pump the message queue.\n * @param {object} transport transport instance \n */\n useNewTransport(transport)\n {\n if (this.state.in(['closing', 'close-wait', 'closed']))\n {\n debugging('connection') && console.debug(`${this.debugLabel} got a new transport during closing. closing the new transport and not completing transport adoption.`)\n transport.close();\n return;\n }\n \n var preDisconnectState = (!this.dcpsid || !this.peerAddress) ? 'connecting' : 'established';\n if (this.state.isNot(preDisconnectState))\n this.state.set(['connecting', 'disconnected'], preDisconnectState);\n this.adopt(transport);\n this.emit('connect', this.url); // UI hint: \"internet available\" \n this.sender.notifyTransportReady();\n }\n \n /**\n * Method that must be invoked by the target to \"accept\" a new DCP Connection request\n * at the transport layer.\n * @param {object} transport \n */\n async accept(transport)\n {\n assert(!this.role);\n await this.a$assumeRole(role.target);\n this.state.set('initial', 'connecting');\n this.adopt(transport);\n }\n \n /**\n * This method is invoked by the target when it has handled the initial connect request from\n * the initiator, which contains the peerAddress and the first half of the dcpsid (the second half is\n * populated by receiver.handleFirstRequest before being passed here). It transitions the connection \n * into an established state at the protocol level.\n * Note - this is really not the right design for this, but it is invoked from handleFirstRequest\n * in ./receiver.js\n *\n * @param {string} dcpsid dcpsid\n * @param {wallet.Address} peerAddress Address of peer\n */\n establishTarget(dcpsid, peerAddress) {\n assert(this.role === role.target);\n \n this.connectResponseId = Symbol('dummy'); // un-register ConnectResponse\n this.peerAddress = peerAddress;\n if (this.dcpsid)\n throw new DCPError(`Reached impossible state in connection.js; dcpsid already specified ${this.dcpsid}!=${dcpsid} (${this.url})`, 'DCPC-1005');\n this.emit('session', (this.dcpsid = dcpsid));\n debugging() && console.debug(this.debugLabel, 'dcpsid is', dcpsid);\n\n this.loggableDest = this.role === role.initiator ? this.url : peerAddress;\n this.state.set('connecting', 'established'); /* established => dcpsid has been set */\n\n debugging('connection') && console.debug(this.debugLabel, `Established session ${this.dcpsid} with ${this.peerAddress} for ${this.url}`);\n }\n\n /**\n * Check to see if the peer address conflicts with what we have in the global identity cache;\n * it does, throw an exception.\n */\n ensureIdentity (peerAddress, presharedPeerAddress)\n {\n let idc = getGlobalIdentityCache();\n let noConflict = idc.learnIdentity(this.url, peerAddress, presharedPeerAddress);\n\n if (!noConflict)\n throw new DCPError(`**** Security Error: Identity address ${peerAddress} does not match the saved key for ${this.url}`, 'DCPC-EADDRCHANGE');\n }\n \n \n /**\n * This method uses the first request (if we're target) or ack to the first request \n * (if we're initiator) to memoize the address of the peer authorized to send to us. \n * The first message must only be a request (since it comes from sender.specialFirstSend)\n * that has a connect operation, or an acknowledgement of the first request. \n * All future messages' owners will be validated against this authorized sender.\n * @param {Object} message \n */\n setAuthorizedSender(message)\n {\n if (message.body.type !== 'request' && message.body.type !== 'ack')\n throw new DCPError('First protocol message was not a request or ack', 'DCPC-1017');\n \n if (message.body.type === 'request' && message.body.payload.operation !== 'connect')\n throw new DCPError('First protocol message did not contain the correct payload', 'DCPC-1017');\n \n if (message.body.type === 'ack' && this.sender.inFlight.message.payload.operation !== 'connect')\n throw new DCPError('First protocol acknowledgement was not for connect request', 'DCPC-1017');\n \n this.authorizedSender = message.owner;\n }\n \n /**\n * Emits an error event with the relevant error and closes the connection immediately.\n * @param {string} errorMessage \n * @param {string} errorCode \n */\n \n handleValidationError(errorMessage, errorCode)\n {\n var messageError;\n \n debugging('connection') && console.debug(this.debugLabel, 'Message failed validation -', errorMessage);\n this.emit('error', (messageError = new DCPError(`message failed validation: ${errorMessage}`, errorCode)))\n this.close(messageError, true);\n }\n\n /**\n * This method validates the message owner, signature and body before passing it onto\n * either the receiver (for a request, response or batch) or the messageLedger (for an ack).\n * If it's a request, response or batch, this method also provokes the connection to \n * send an acknowledgement (ack) to the peer to let them know we got their message.\n * XXXwg this code needs an audit re error handling: what message error should we be emitting?\n * why do we keep working after we find an error?\n * XXXsc did some auditing. we happy now?\n * @param {string} JSON-encoded unvalidated message object\n */\n async handleMessage (messageJSON) {\n var validation;\n var message;\n\n if (this.state.is('closed')) {\n debugging('connection') && console.debug(this.debugLabel, 'handleMessage was called on a closed connection.');\n return;\n }\n\n try\n {\n message = typeof messageJSON === 'object' ? messageJSON : JSON.parse(messageJSON);\n debugging('wire') && console.debug(this.debugLabel, `handleMessage: ${String(message && message.body && message.body.type).padEnd(10, ' ')} <- ${this.loggableDest}`);\n }\n catch(error)\n {\n console.error('connection::handleMessage received unparseable message from peer:', error);\n this.emit('error', error);\n return;\n }\n \n /**\n * We always ack a duplicate transmission.\n * This must happen before validation since during startup we may lack a\n * nonce or dcpsid (depending on whether initiator or target + race).\n */\n if (this.isDuplicateTransmission(message)) {\n debugging('connection') && console.debug(this.debugLabel, 'duplicate message:', message.body);\n debugging('wire') && console.debug(this.debugLabel, `dup message ack: ${String(message.body.type).padEnd(10, ' ')} -> ${this.loggableDest}`);\n\n this.sendAck(this.lastAckSigned) \n return;\n }\n\n debugging('connection') && console.debug(this.debugLabel, `received message ${message.body.type} ${message.body.id}; nonce=`, message.body.nonce);\n \n validation = this.validateMessageDCPSID(message);\n if (validation.success !== true)\n {\n this.handleValidationError(validation.errorMessage, 'ENODCPSID');\n return;\n }\n\n validation = this.validateMessageOwner(message)\n if (validation.success !== true)\n {\n this.handleValidationError(validation.errorMessage, 'EINVAL');\n return;\n }\n \n validation = this.validateMessageSignature(message);\n if (validation.success !== true)\n {\n this.handleValidationError(validation.errorMessage, 'EINVAL');\n return;\n }\n\n validation = this.validateMessageBody(message);\n if (validation.success !== true)\n {\n this.handleValidationError(validation.errorMessage, validation.errorCode || 'EINVAL'); /* messages of type 'unhandled-message' may contain more information about the failure */\n return;\n }\n \n if (message.body.type === \"ack\") {\n const ack = new this.Ack(message.body);\n this.messageLedger.handleAck(ack);\n return;\n } else if (message.body.type !== 'unhandled-message') {\n this.lastMessage = message;\n await this.ackMessage(message);\n }\n \n this.receiver.handleMessage(message);\n }\n\n \n /**\n * This method takes either a Request, Response or Batch, creates an ack for it\n * and sends it to the peer. This ack contains the nonce we expect on the next\n * message from peer.\n * @param {Connection.Message} message \n */\n async ackMessage(message) {\n debugging('connection') && console.debug(this.debugLabel, 'acking message of type: ', message.body.type);\n const ack = new this.Ack(message);\n const signedMessage = await ack.sign(this.identity);\n\n debugging('wire') && console.debug(this.debugLabel, `ackMessage: ${String(message.body.type).padEnd(10, ' ')} -> ${this.loggableDest}`);\n\n this.sendAck(signedMessage);\n this.lastAck = ack;\n this.lastAckSigned = signedMessage;\n }\n\n /**\n * Checks if the batch we just received has the same nonce\n * as the most-recently received batch.\n * @param {object} messageJSON\n */\n isDuplicateTransmission(messageJSON) {\n return this.lastMessage && this.lastMessage.body.nonce && this.lastMessage.body.nonce === messageJSON.body.nonce;\n }\n\n /**\n * Validate that the message came from the appropriate sender.\n * @param {Object} message the message to validate\n * @returns {Object} returns an object `ret` with either ret.success = true, \n * or ret.success = false accompanied by another property ret.errorMessage\n */\n validateMessageOwner(message)\n {\n if (!this.authorizedSender)\n {\n /* Capture the initial identity of the remote end during the connect operation */\n this.setAuthorizedSender(message);\n return { success: true }\n }\n else if (message.owner !== this.authorizedSender)\n {\n return { success: false, errorMessage: \"message came from unauthorized sender\" }\n }\n return { success: true }\n }\n \n /**\n * Validate that the signature was generated from this message body\n * @param {Object} message\n * @returns {Object} with properties 'success' and 'errorMessage'. When the message is valid on its \n * face, the success property is true, otherwise it is is false. When it is false,\n * the errorMessage property will be a string explaining why.\n */\n validateMessageSignature(message)\n {\n if (!message.signature) {\n debugging('connection') && console.warn(\"Message does not have signature, aborting connection\");\n return { success: false, errorMessage: \"message is missing signature\" };\n }\n \n const owner = new wallet.Address(message.owner);\n const signatureValid = owner.verifySignature(message.body, message.signature);\n\n if (!signatureValid)\n {\n debugging('connection') && console.warn(\"Message has an invalid signature, aborting connection\");\n return { success: false, errorMessage: \"invalid message signature\" };\n }\n\n return { success: true };\n }\n \n validateMessageDCPSID(message)\n {\n if (this.dcpsid !== null && message.dcpsid)\n {\n if (message.dcpsid !== this.dcpsid)\n {\n debugging('connection') && console.warn('Message has an invalid dcpsid, aborting connection');\n return { success: false, errorMessage: 'message has an invalid dcpsid' };\n }\n }\n \n return { success: true };\n }\n /**\n * This method is used to perform validation on all types of messages.\n * It validates the DCPSID, nonce, and the peerAddress.\n * @param {Object} message\n * @returns {Object} with properties 'success' and 'errorMessage'. When the message is valid on its \n * face, the success property is true, otherwise it is is false. When it is false,\n * the errorMessage property will be a string explaining why.\n *\n */\n validateMessageBody(message)\n {\n try\n {\n if (message.body.type === 'unhandled-message')\n {\n /* This special message type may not have a dcpsid, peerAddress, etc., so it might not\n * validate. It is also not a \"real\" message and only used to report ConnectionManager routing \n * errors, so we just report here, drop it, and close the connection.\n *\n * Note also that this is probably the wrong way to handle this case - restarting daemons - but\n * that is a problem for another day. /wg nov 2021\n */\n debugging('connection') && console.warn(this.debugLabel, \"Target Error - target could not process message.\", JSON.stringify(message.body),\n \"Aborting connection.\");\n return { success: false, errorMessage: `target could not process message (${message.body.payload && message.body.payload.message || 'unknown error'})`, errorCode: message.body.payload && message.body.payload.code}\n }\n if (this.peerAddress && !this.peerAddress.eq(message.owner))\n {\n debugging('connection') && console.warn(this.debugLabel,\n \"Received message's owner address does not match peer address, aborting connection\\n\",\n \"(owner addr)\", message.owner, '\\n',\n \"(peer addr)\", this.peerAddress);\n return { success: false, errorMessage: \"received message owner does not match peer address\" };\n }\n\n if (this.state.in(['established', 'closing', 'close-wait']) && message.body.type !== 'unhandled-message')\n {\n const body = message.body;\n\n assert(this.peerAddress); /* should be set in connect */\n /**\n * Security note:\n * We don't require the dcpsid to match on an ack because the connect response\n * ack doesn't have a dcpsid until after it is processed. Also ack's are protected\n * by ack tokens and signatures, so this doesn't leave a hole, just an inconsistency.\n */\n if (body.type !== 'ack' && body.dcpsid !== this.dcpsid)\n {\n debugging('connection') && console.warn(this.debugLabel,\n \"Received message's DCPSID does not match, aborting connection\\n\",\n \"Message owner:\", message.owner, '\\n',\n \"(ours)\", this.dcpsid, (Date.now() - this.connectTime)/1000, \"seconds after connecting - state:\", this.state._, \"\\n\", \n \"(theirs)\", body.dcpsid);\n if(body.dcpsid.substring(0, body.dcpsid.length/2) !== this.dcpsid.substring(0, this.dcpsid.length/2)){\n debugging('connection') && console.warn(\" Left half of both DCPSID is different\");\n }\n if(body.dcpsid.substring(body.dcpsid.length/2 + 1, body.dcpsid.length) !== this.dcpsid.substring(this.dcpsid.length/2 + 1, body.dcpsid.length)){\n debugging('connection') && console.warn(\" Right half of both DCPSID is different\");\n }\n return { success: false, errorMessage: \"DCPSID do not match\" };\n }\n /* can get close in middle of connecting, which will have no nonce.*/\n if (body.type !== 'ack' && this.lastAck.nonce !== body.nonce && (body.payload && body.payload.operation !== 'close'))\n {\n /* When Target sends back ConnectionLessErrorResponse, it uses the nonce of the message that caused an error. */\n if (this.sender.inFlight && this.sender.inFlight.message.nonce === body.nonce)\n {\n debugging('connection') && console.debug(`${this.debugLabel} Received messages nonce matches nonce of our current inFlight message.`,\n \"There was a problem sending this message. Aborting connection. Reason:\\n\", body.payload);\n return { success: false, errorMessage: \"current inflight message returned an error\" }\n }\n debugging('connection') && console.warn(this.debugLabel,\"Received message's nonce does not match expected nonce, aborting connection\\n\");\n debugging('connection') && console.debug(this.debugLabel, this.lastAck.nonce, body.nonce);\n return { success: false, errorMessage: \"received message's nonce does not match expected nonce\" };\n }\n if (body.type === 'request') \n {\n if (body.payload.validity.time === undefined)\n return { success: false, errorMessage: 'received message does not have a valid time in its payload' };\n }\n }\n\n return { success: true };\n }\n catch(error)\n {\n console.error('message validator failure:', error);\n return { success: false, errorMessage: 'validator exception ' + error.message };\n }\n\n return { success: false, errorMessage: 'impossible code reached' }; // eslint-disable-line no-unreachable\n }\n\n /**\n * Targets Only.\n * The receiver creates a special connect response and the connection\n * needs to know about it to get ready for the ack. See `isWaitingForAck`.\n * @param {Message} message message we are sending out and waiting to\n * ack'd, probably a batch containing the response.\n */\n registerConnectResponse(message) {\n this.connectResponseId = message.id;\n }\n\n /**\n * Targets only\n * During the connection process a target sends a connect\n * response to an initiator and the initiator will ack it. Since transports\n * are not tightly coupled, we have no authoritative way to route the ack back\n * to the right connection. So a connection briefly registers the ack it\n * is looking for in this case. It will formally validate the ack after routing.\n * @param {string} messageId id of the message this ack is acknowledging.\n */\n isWaitingForAck(messageId) {\n return messageId === this.connectResponseId;\n }\n\n /**\n * Put connection into close-wait state so that a call to `close`\n * in this state will *not* trigger sending a `close` message to the peer.\n * Then call close.\n *\n * @note: This function is called when the remote end of the transport sends\n * a close command, from receiver::handleOperation. This impllies that\n * that we must be in established or later state.\n */\n closeWait (errorCode = null)\n {\n var preCloseState, reason;\n \n debugging('connection') && console.debug(this.debugLabel, `responding to close. state=closeWait dcpsid=${this.dcpsid}`);\n\n if (this.state.is('closed'))\n {\n debugging('connection') && console.debug(this.debugLabel, `remote asked us to close a closed connection; dcpsid=${this.dcpsid}`);\n return;\n }\n\n // continue with close in either case\n reason = `Received close from peer with Error Code ${errorCode}`;\n if (this.role === role.target)\n reason += ` (${this.url})`;\n else\n reason += ` (${this.debugLabel}${this.peerAddress.address})`;\n\n reason = new DCPError(reason, errorCode || 'DCPC-1011');\n\n // If we're already closing, wait for it to complete then resolve\n // WARNING: any place we transition to closing or close-wait, we MUST guarantedd\n // that 'end' will be emitted, or this code will hang forever!\n if (this.state.in(['close-wait', 'closing'])) {\n return new Promise((resolve) => {\n this.once('end', resolve) /* eventually fired by doClose elsewhere */\n });\n }\n\n /* XXXwg - this should only be established->close-wait. Why more? */\n this.state.set(['disconnected', 'connecting', 'established'], 'close-wait');\n \n /* Set preCloseState to close-wait so doClose doesn't send a close message back */\n preCloseState = this.state.valueOf();\n return this.doClose(preCloseState, reason, true);\n }\n\n /**\n * This method will begin closing the protocol connection. It transitions\n * the protocol into the correct state, and then begins the work of closing.\n * \n * @param {string|Error} [reason] Either an Error or a message to use in the Error that will reject pending sends.\n * @param {boolean} [immediate] When true, the connection will not deliver any pending messages and instead\n * immediately send the peer a 'close' request. \n *\n * @return a Promise which resolves when the connection has been confirmed closed and the end event has been fired.\n */\n close (reason='requested', immediate=false)\n {\n if (this.state.is('initial'))\n {\n this.state.set('initial', 'closed');\n this.emit('close'); /* Don't emit dcpsid */\n }\n if (this.state.is('closed')) return Promise.resolve();\n\n const preCloseState = this.state.valueOf();\n debugging('connection') && \n console.debug(this.debugLabel, \n `close; dcpsid=${this.dcpsid} state=${preCloseState} immediate=${immediate} reason:`, reason);\n\n // If we're already closing, wait for it to complete then resolve\n if (this.state.in(['close-wait', 'closing'])) {\n return new Promise((resolve) => {\n this.once('end', resolve)\n });\n }\n\n this.state.set(['connecting', 'established', 'disconnected'], 'closing');\n\n // Perform actual work of closing\n return this.doClose(preCloseState, reason, immediate);\n }\n\n /**\n * Sends close message to peer after sending all pending messages.\n * Note that close messages are sent without the expectation of a response.\n * @param {DCPError|string} reason reason for closing\n */\n async sendCloseGracefully(reason) \n {\n debugging('connection') && console.debug(`${this.debugLabel} gracefully sending close message to peer with reason ${reason}`)\n let errorCode = reason instanceof DCPError ? reason.code : 'DCPC-1011';\n \n /* This only resolves when close is next message in queue */\n const closeMessage = await this.prepare('close', { errorCode: errorCode });\n this.sendPreparedMessage(closeMessage);\n this.messageLedger.fulfillMessagePromise(closeMessage.message.id, {});\n }\n \n /**\n * Sends close message to peer immediately. Pending messages will not be sent.\n * Note that close messages are sent without expectation of response.\n * @param {DCPError|string} reason reason for closing\n */\n async sendCloseImmediately(reason)\n {\n debugging('connection') && console.debug(`${this.debugLabel} immediately sending close message to peer with reason ${reason}`);\n let errorCode = reason instanceof DCPError ? reason.code : 'DCPC-1011';\n \n /* Last param being `true` means that prepareMessage will return unsigned message. Does not queue message. */\n const closeMessage = await this.prepare('close', { errorCode: errorCode }, true);\n \n if (this.sender.inFlight)\n closeMessage.nonce = this.sender.inFlight.message.nonce;\n else\n closeMessage.nonce = this.sender.nonce;\n \n let signedCloseMessage = await closeMessage.sign();\n \n /* Overwrite the in-flight message because we don't care to deliver pending messages */\n this.sender.inFlight = { message: closeMessage, signedMessage: signedCloseMessage };\n this.sender.sendInFlightMessage();\n }\n \n /**\n * This method performs the core close functionality. It appropriately sends the close message\n * to the peer, fails any pending transmissions, shuts down our sender and underlying transport\n * and puts us into the 'closed' state, indicating this connection object is now useless.\n * When called from closeWait, it does not send a close message.\n * @param {string} preCloseState the state that the connection was in at the start of the\n * invocation of close() or closeWait()\n *\n * @note: this function is not reentrant due to closeGracefully\n */\n async doClose(preCloseState, reason, immediate) {\n const dcpsid = this.dcpsid;\n var rejectErr;\n\n try\n {\n // Emit the close event the moment we know we are going to close, \n // so we can catch the close event and reopen the connection\n //\n // This implies that no API functions which call doClose may await between\n // their invocation and their call to doClose!\n this.emit('close', dcpsid /* should be undefined in initial state */);\n\n assert(this.state.in(['closing', 'close-wait']));\n if (preCloseState === 'established' && this.transport) {\n try {\n if (immediate) {\n await this.sendCloseImmediately(reason);\n } else {\n await this.sendCloseGracefully(reason);\n }\n } catch(e) {\n debugging() && console.warn(`Warning: could not send close message to peer. connectionid=${this.id}, dcpsid=,${this.dcpsid}, url=${this.url ? this.url.href : 'unknown url'} - (${e.message})`);\n }\n }\n\n // can delete these now that we've sent the close message\n this.dcpsid = null;\n this.peerAddress = null;\n\n if (reason instanceof DCPError)\n rejectErr = reason;\n else\n {\n let errorMessage = reason instanceof Error ? reason : `Connection to ${this.loggableDest} closed (${reason})`;\n rejectErr = new DCPError(errorMessage, 'DCPC-1013');\n }\n \n // Reject any pending transmissions in the message ledger\n this.messageLedger.failAllTransmissions(rejectErr);\n \n if (this.transport)\n {\n try { this.sender.shutdown(); }\n catch(e) { debugging() && console.warn(this.debugLabel, `Warning: could not shutdown sender; dcpsid=,${dcpsid}`, e); }\n \n try { this.transport.close(); delete this.transport; }\n catch(e) { debugging() && console.warn(this.debugLabel, `Warning: could not close transport; dcpsid=,${dcpsid}`, e); }\n }\n } catch(error) {\n debugging() && console.warn(this.debugLabel, `could not close connection; dcpsid=${dcpsid}, url=${this.url ? this.url.href : 'unknown url'}:`, error);\n }\n finally\n {\n this.state.set(['closing', 'close-wait'], 'closed');\n this.emit('end'); /* end event resolves promises on other threads for closeWait and close (ugh) */\n }\n }\n/**\n * Prepares a non-batchable message that can be sent directly over the wire. Returns when\n * the message has been signed and is ready to be sent. The connection will not be able to send \n * any messages until the prepared message here is either sent or discarded. If 'canBatch = true',\n * will return the unsigned message instead. In this case, enqueuing is handled by\n * `async Connection.send()`, allowing the message to be put in a batch before being signed.\n * @param {...any} messageData Data to build message with. Format is:\n * `operation {string}, \n * data {Object} (optional),\n * identity {wallet.Keystore} (optional),\n * canBatch {boolean} (optional)`\n * @returns {Promise<Object>} a promise which resolves to { message, signedMessage }\n */\n\n async prepare(...messageData)\n {\n if (this.state.isNot('established'))\n {\n await this.connect().catch((e) => {\n if (e.code !== 'DCPC-1015') /* If we're closed already, then swallow the error */\n { \n this.close(e, true);\n throw e;\n }\n });\n }\n \n \n let signedMessage, message = messageData[0];\n let canBatch = false;\n \n if (typeof messageData[messageData.length - 1] === 'boolean')\n canBatch = messageData.pop();\n \n if (!message.id)\n {\n message = this.Request.buildMessage(...messageData);\n }\n \n debugging('connection') && console.debug(`${this.debugLabel} Created message ${message.id}.`);\n \n message.ackToken = this.sender.makeAckToken();\n message.batchable = canBatch;\n \n if (canBatch)\n return Promise.resolve(message);\n \n debugging('connection') && console.debug(`${this.debugLabel} Preparing message ${message.id} for sending...`); \n const messageWithNonce = await new Promise((resolve) =>\n {\n // This event is fired in the sender by serviceQueue() when the message is at the top of the queue\n // and has a nonce it can sign with. At this point, we may return the prepared message.\n this.once(`${message.id} ready`, (message) => resolve(message))\n \n this.sender.queue.push(message)\n this.sender.requestQueueService()\n })\n \n signedMessage = await messageWithNonce.sign();\n \n debugging('connection') && console.debug(`${this.debugLabel} Finished preparing message. ${message.id} is ready to be sent.`);\n \n return { message: messageWithNonce, signedMessage: signedMessage };\n }\n\n /**\n * Sends a message to the connected peer. If the connection has not yet been established,\n * this routine will first invoke this.connect(). If the first argument has a 'signedMessage'\n * property, the message is assumed to be prepared and is sent immediately. If not, and the first\n * argument does not have an 'id' property, it will be sent to `async prepare()`, and then put\n * in the message queue.\n * \n * @param {...any} args 3 forms:\n * [operation]\n * [operation, data]\n * [operation, data, identity]\n * @returns {Promise<Response>} a promise which resolves to a response.\n */\n async send(...args)\n {\n if (!this.state.is('established'))\n await this.connect().catch((e) =>\n {\n if (e.code !== 'DCPC-1015') /* If we're closed already, then swallow the error */\n { \n this.close(e, true);\n throw e;\n }\n });\n\n let message = args[0];\n // ie. already prepared\n if (message.signedMessage)\n return this.sendPreparedMessage(message);\n \n // ie. message not hyrdated or is a response, which needs ack token\n if (!message.id || message.type === 'response')\n message = await this.prepare(...args, true);\n\n if (this.state.in(['closing', 'close-wait', 'closed']))\n throw new DCPError(`Connection (${this.id}) is ${this.state}; cannot send. (${this.loggableDest})`, 'DCPC-1001');\n \n return this.sender.enqueue(message);\n }\n \n /**\n * Set the sender's flight deck with the given message and send it.\n * Can only be passed a prepared message, which is a promise that only\n * resolves to a message when it is signed with the nonce, so it must\n * be the next message to be sent (or discarded).\n * @param {Object} messageObject\n * @returns {Promise<Response>} \n */\n sendPreparedMessage(messageObject)\n {\n if (!messageObject.signedMessage) return;\n \n const { message, signedMessage } = messageObject;\n assert(!this.sender.inFlight);\n this.sender.inFlight = { message: message, signedMessage: signedMessage };\n const messageSentPromise = this.messageLedger.addMessage(message);\n this.sender.sendInFlightMessage();\n \n return messageSentPromise;\n }\n \n /**\n * Send a signed ack directly over the wire. If we get a SocketIO.Send: Not Connected error, \n * wait until we're connected and then resend the ack.\n * @param {String} ack \n */\n sendAck(ack)\n {\n try\n {\n this.transport.send(ack)\n }\n catch(error)\n {\n // Transport was lost\n if (error.code === 'DCPC-1105')\n this.once('connect', () => this.sendAck(ack));\n else\n console.error(`${this.debugLabel} Error acking message to ${this.loggableDest}: ${error}`);\n }\n }\n \n /**\n * Discard a prepared message by removing it from the queue.\n * Returns nonce to sender and provokes queue service.\n * @param {Object} messageObject { message, signedMessage } message to discard \n */\n discardMessage(messageObject)\n {\n let { message } = messageObject;\n this.sender.nonce = message.nonce;\n delete message.nonce;\n message.type = 'unhandled-message';\n this.sender.requestQueueService();\n }\n\n /**\n * This routine returns the current time for the purposes of\n * populating the Request message payload.validity.time property.\n * \n * @returns {Number} the integer number of seconds which have elapsed since the epoch\n */\n currentTime() {\n let msSinceEpoch;\n if (this.hasNtp) {\n msSinceEpoch = Date.now();\n } else {\n const msSinceLastReceipt = performance.now() - this.receiver.lastResponseTiming.receivedMs;\n msSinceEpoch = this.receiver.lastResponseTiming.time * 1000 + msSinceLastReceipt;\n }\n return Math.floor(msSinceEpoch / 1000);\n }\n\n /**\n * This method sends a keepalive to the peer, and resolves when the response has been received.\n */\n keepalive() {\n return this.send('keepalive');\n }\n}\n\n/** \n * Determine if we got the scheduler config from a secure source, eg https or local disk.\n * We assume tha all https transactions have PKI-CA verified.\n *\n * @note protocol::getSchedulerConfigLocation() is populated via node-libs/config.js or dcp-client/index.js\n *\n * @returns true or falsey\n */\nfunction determineIfSecureConfig()\n{\n var schedulerConfigLocation = (__webpack_require__(/*! dcp/protocol-v4 */ \"./src/protocol-v4/index.js\").getSchedulerConfigLocation)();\n var schedulerConfigSecure;\n\n if (schedulerConfigLocation && (schedulerConfigLocation.protocol === 'https:' || schedulerConfigLocation.protocol === 'file:'))\n {\n debugging('strict-mode') && console.debug(`scheduler config location ${schedulerConfigLocation} is secure`); /* from casual eavesdropping */\n schedulerConfigSecure = true;\n }\n\n if (isDebugBuild)\n {\n debugging('strict-mode') && console.debug('scheduler config location is always secure for debug builds');\n schedulerConfigSecure = 'debug';\n }\n\n debugging('strict-mode') && console.debug(`Config Location ${schedulerConfigLocation} is ${!schedulerConfigSecure ? 'not secure' : 'secure-' + schedulerConfigSecure}`);\n return schedulerConfigSecure;\n}\n\n/**\n * Determine if a URL is secure by examinining the protocol, connection, and information about the \n * process; in particular, we try to determine if the dcp config was securely provided, because if \n * it wasn't, then we can't have a secure location, since the origin could be compromised.\n * \n * \"Secure\" in this case means \"secure against casual eavesdropping\", and this information should only\n * be used to refuse to send secrets over the transport or similar.\n *\n * @returns true or falsey\n */\nfunction determineIfSecureLocation(conn)\n{\n var isSecureConfig = determineIfSecureConfig();\n var secureLocation;\n\n if (!isSecureConfig) /* can't have a secure location without a secure configuration */\n return null;\n \n if (isDebugBuild || conn.url.protocol === 'https:' || conn.url.protocol === 'tcps:')\n secureLocation = true;\n else if (conn.role === role.initiator && conn.target.hasOwnProperty('friendLocation') && conn.url === conn.target.friendLocation)\n secureLocation = true;\n else if (conn.options.allowUnencryptedSecrets)\n secureLocation = 'override';\n else\n secureLocation = false;\n\n debugging('strict-mode') && console.debug(`Location ${conn.url} is ${!secureLocation ? 'not secure' : 'secure-' + secureLocation}`);\n \n return secureLocation;\n}\nexports.Connection = Connection;\n\n\n//# sourceURL=webpack://dcp/./src/protocol-v4/connection/connection.js?");
4650
+ eval("/* provided dependency */ var process = __webpack_require__(/*! ./node_modules/process/browser.js */ \"./node_modules/process/browser.js\");\n/**\n * @file protocol/connection/connection.js\n * @author Ryan Rossiter\n * @author KC Erb\n * @author Wes Garland\n * @date January 2020, Feb 2021, Mar 2022\n *\n * A Connection object represents a connection to another DCP entity. \n * A DCP connection may 'live' longer than the underlying protocol's connection,\n * and the underlying protocol connection (or, indeed, protocol) may change\n * throughout the life of the DCP connection.\n * \n * DCP connections are uniquely identified by the DCP Session ID, specified by\n * the dcpsid property, present in every message body. This session id negotiated during connection,\n * with the initiator and target each providing half of the string.\n *\n * Connection instance events:\n * - session: dcpsid new session established\n * - connect: url UI hint: internet available\n * - disconnect: url UI hint: internet not available\n * - readyStateChange: *** DO NOT USE **\n * - error: error emitted when errors happen that would otherwise go uncaught\n * - close: connection instance is closing\n * - end: Connection instance is closed\n * - send: msgObj when a message is sent to the peer; does not wait for ack; may re-trigger on reconnect\n * - ready: when the connection is ready for traffic (constructor promises resolved)\n *\n * State Transition Diagram for Connection.state:\n *\n * initial connecting established disconnected close-wait closing closed\n * ===========================================================================================================================\n * |-- i:connect ---->\n * |-- t:accept ----->\n * |-- t:establishTarget -->\n * |-- i:connect ---------->\n * |-- transportDisconnectHandler -->\n * <-- i:_reconnect ----------------------------------------|\n * |-i:useNewTransport-->\n * <-- t:useNewTransport --------|\n * |-- closeWait ----------------------------------------------------------->\n * |-- closeWait ----------------------------------->\n * |-- closeWait -->\n * |-- doClose --------------->\n * |-- close ------------------------------------------------------------------------------------------------------------> \n * |-- close ---------------------------------------------------------------------------->\n * |-- close ---------------------------------------------------->\n * |-- close ------------------->\n * |-- doClose -->\n *\n *\n * Not until the established state can we count on things like a dcpsid, \n * peerAddress, identityPromise resolution and so on.\n * \n * Error Codes relevant to DCP Connections:\n * DCPC-1001 - CONNECTION CANNOT SEND WHEN IN CLOSING, CLOSE-WAIT OR CLOSED\n * EINVAL - MESSAGE OWNER IS INVALID (formerly DCPC-1002)\n * MESSAGE SIGNATURE INVALID (formerly DCPC-1003)\n * MESSAGE BODY IS INVALID (formerly DCPC-1004)\n * DCPC-1005 - TRYING TO ESTABLISH TARGET AFTER TARGET ALREADY ESTABLISHED\n * DCPC-1006 - CONNECTION COULD NOT BE ESTABLISHED WITHIN 30 SECONDS\n * DCPC-1007 - RECEIVED MESSAGE PAYLOAD BEFORE CONNECT OPERATION\n * DCPC-1008 - TARGET RESPONDED WITH INVALID DCPSID\n * DCPC-1009 - MESSAGE IS OF UNKNOWN TYPE\n * DCPC-1010 - DUPLICATE TRANSMISSION RECEIPT\n * DCPC-1011 - DEFAULT ERROR CODE WHEN PEER SENDS CLOSE MESSAGE\n * DCPC-1012 - TRIED TO INITIATE CONNECTION AFTER SESSION ALREADY ESTABLISHED\n * DCPC-1013 - DEFAULT ERROR CODE WHEN CLOSING WITH REASON THATS NOT INSTANCE OF DCPERROR\n * DCPC-1014 - NO TRANSPORTS AVAILABLE\n * DCPC-1015 - CANNOT CONNECT WHEN CONNECTION ALREADY CLOSED\n * DCPC-1016 - ERROR CONNECTING VIA AVAILABLE TRANSPORTS\n * DCPC-1017 - FIRST PROTOCOL MESSAGE WAS DID NOT INVOLVE INITIAL CONNECT REQUEST\n * DCPC-1018 - INVALID ARGUMENT PROVIDED IN PLACE OF IDKEYSTORE\n * ENODCPSID - CONNECTION INSTANCE TRIED TO RE-CONNECT TO A TARGET WHICH DOES NOT HAVE A RECORD OF THAT SESSION\n */\n\n\n\nconst debugging = (__webpack_require__(/*! dcp/debugging */ \"./src/debugging.js\").scope)('dcp');\nconst dcpEnv = __webpack_require__(/*! dcp/common/dcp-env */ \"./src/common/dcp-env.js\");\nconst { EventEmitter } = __webpack_require__(/*! dcp/common/dcp-events */ \"./src/common/dcp-events/index.js\");\nconst wallet = __webpack_require__(/*! dcp/dcp-client/wallet */ \"./src/dcp-client/wallet/index.js\");\nconst { DcpURL } = __webpack_require__(/*! dcp/common/dcp-url */ \"./src/common/dcp-url.js\");\nconst { requireNative } = __webpack_require__(/*! dcp/dcp-client/webpack-native-bridge */ \"./src/dcp-client/webpack-native-bridge.js\");\nconst { assert } = __webpack_require__(/*! dcp/common/dcp-assert */ \"./src/common/dcp-assert.js\");\nconst { leafMerge, a$sleepMs } = __webpack_require__(/*! dcp/utils */ \"./src/utils/index.js\");\nconst { Synchronizer } = __webpack_require__(/*! dcp/common/concurrency */ \"./src/common/concurrency.js\");\nconst { DCPError } = __webpack_require__(/*! dcp/common/dcp-error */ \"./src/common/dcp-error.js\");\n\nconst { Transport } = __webpack_require__(/*! ../transport */ \"./src/protocol-v4/transport/index.js\");\nconst { Sender } = __webpack_require__(/*! ./sender */ \"./src/protocol-v4/connection/sender.js\");\nconst { Receiver } = __webpack_require__(/*! ./receiver */ \"./src/protocol-v4/connection/receiver.js\");\nconst { MessageLedger } = __webpack_require__(/*! ./message-ledger */ \"./src/protocol-v4/connection/message-ledger.js\");\nconst { getGlobalIdentityCache } = __webpack_require__(/*! ./identity-cache */ \"./src/protocol-v4/connection/identity-cache.js\");\nconst { makeEBOIterator, setImmediateN, setImmediate } = __webpack_require__(/*! dcp/common/dcp-timers */ \"./src/common/dcp-timers.js\");\n\nconst { ConnectionMessage } = __webpack_require__(/*! ./connection-message */ \"./src/protocol-v4/connection/connection-message.js\");\nconst { ConnectionRequest } = __webpack_require__(/*! ./request */ \"./src/protocol-v4/connection/request.js\");\nconst { ConnectionResponse } = __webpack_require__(/*! ./response */ \"./src/protocol-v4/connection/response.js\");\nconst { ConnectionBatch } = __webpack_require__(/*! ./batch */ \"./src/protocol-v4/connection/batch.js\");\nconst { ConnectionAck } = __webpack_require__(/*! ./ack */ \"./src/protocol-v4/connection/ack.js\");\nconst { ErrorPayloadCtorFactory } = __webpack_require__(/*! ./error-payload */ \"./src/protocol-v4/connection/error-payload.js\");\nconst { role } = __webpack_require__(/*! ./connection-constants */ \"./src/protocol-v4/connection/connection-constants.js\");\n\nconst isDebugBuild = (__webpack_require__(/*! dcp/common/dcp-build */ \"./src/common/dcp-build.js\").build) === 'debug';\nlet nanoid;\nif (dcpEnv.platform === 'nodejs') {\n const { requireNative } = __webpack_require__(/*! dcp/dcp-client/webpack-native-bridge */ \"./src/dcp-client/webpack-native-bridge.js\");\n nanoid = requireNative('nanoid').nanoid;\n} else {\n nanoid = (__webpack_require__(/*! nanoid */ \"./node_modules/nanoid/index.browser.js\").nanoid);\n}\n\n\nlet globalConnectionId = 0;\nvar _msgId = 0;\n\nconst CONNECTION_STATES = [\n 'initial',\n 'connecting', /* initiator: establish first transport instance connection; target: listening */\n 'established',\n 'disconnected', /* connection is still valid, but underlying transport is no longer connected */\n 'close-wait', /* Target of close message is in this state until response is acknowledged */\n 'closing',\n 'closed',\n]\n\nclass Connection extends EventEmitter {\n static get VERSION() {\n return '5.1.0'; // Semver format\n }\n\n static get VERSION_COMPATIBILITY() {\n return '^5.0.0'; // Semver format, can be a range\n }\n\n /**\n * Connection form 2:\n * @constructor\n * @param {object} [target]\n * @param {Promise} idKsPromise A promise which resolves to the identity keystore described\n * in form 1\n * @param {object} [options]\n * @see form 1\n */\n /**\n * Connection form 1\n * Create a DCP Connection object. This object could represent either the initiator or \n * target end of a connection, until it is specialized by either invoke the connect()\n * or accept() methods. Note that send() invokes connect() internally if not in an established\n * state.\n * @constructor\n * @note Connection objects exist for the lifetime of a given DCP connection \n * (session), whether or not the underlying transport (eg internet protocol) is connected or not. Once \n * the DCP session has ended, this object has no purpose and is not reusable.\n * \n * @param {object} target Object (usually a dcpConfig fragment) describing the target.\n * This object may contain the following properties; 'location' is\n * mandatory:\n * - location: a URL or DcpURL that is valid from the Internet\n * - friendLocation: a DcpURL that is valid from an intranet; if\n * both location and friendLocation specified, the best one will\n * be chosen by examining IP addresses (assuming an IP bearer).\n * - identity: an object with an address property which is a promise\n * that resolves to an instance of wallet.Address which represents\n * to the target's identity; this overrides the initiator's \n * identity cache unless options.strict is truey.\n * \n * @param {Keystore} [idKeystore] The keystore used to sign messages; used for non-repudiation.\n * If not specified, a dynamically-generated keystore will be used.\n * \n * @param {object} [options] Extra connection options that aren't defined via dcpConfig.dcp.connectionOptions.\n * These options include:\n * - identityUnlockTimeout: Number of (floating-point) seconds to leave the identity \n * keystore unlocked between invocations of Connection.send\n *\n * @returns instance of Connection that is specific to a target URL but not a role\n */\n constructor(target, idKeystore, options = {})\n {\n super('Connection');\n this.id = ++globalConnectionId;\n this.debugLabel = `connection(g:${this.id}):`;\n\n /* polymorphism strategy: rewrite to (configFragment, idksPromise, options) */\n if (target instanceof DcpURL)\n target = { location: target };\n else if (DcpURL.isURL(target))\n target = { location: new DcpURL(target) };\n else if (target instanceof String || typeof target === 'string')\n target = { location: new DcpURL(target) };\n assert(typeof target === 'object', target.location);\n\n /* idKeystore is always resolved by the time a session is established. */\n if (!idKeystore)\n this.identityPromise = wallet.getId();\n else if (idKeystore instanceof Promise)\n this.identityPromise = idKeystore;\n else if (idKeystore instanceof wallet.Keystore)\n this.identityPromise = Promise.resolve(idKeystore); \n else if (idKeystore instanceof wallet.Address)\n this.identityPromise = Promise.resolve(new wallet.Keystore(idKeystore, '')); \n else\n throw new DCPError('Invalid argument provided for IdKeystore', 'DCPC-1018');\n\n this.identityPromise.then((keystore) => {\n /* This always happens by the time a role is assumed */\n delete this.identityPromise;\n this.identity = keystore;\n this.emit('ready');\n debugging('connection') && console.debug(this.debugLabel, 'identity is', this.identity.address);\n });\n\n this.target = target;\n this.url = this.target.location;\n \n // Init internal state / vars\n this.state = new Synchronizer(CONNECTION_STATES[0], CONNECTION_STATES);\n // DO NOT USE this.state.on('change', (s) => this.emit('readyStateChange', s) );\n\n this.dcpsid = null;\n this.peerAddress = null;\n this.transport = null;\n this.messageLedger = new MessageLedger(this);\n this.authorizedSender = null;\n this.preDisconnectState = null;\n \n this.Message = ConnectionMessage(this);\n this.Request = ConnectionRequest(this.Message);\n this.Response = ConnectionResponse(this.Message);\n this.Batch = ConnectionBatch(this.Message);\n this.Ack = ConnectionAck(this.Message);\n this.ErrorPayload = ErrorPayloadCtorFactory(this);\n this.connectTime = Date.now();\n\n this.receiver = new Receiver(this);\n this.sender = new Sender(this);\n \n debugging('connection') && console.debug(this.debugLabel, `new connection#${this.id}; ${this.url}`);\n\n /* Create a connection config as this.options which takes into\n * account system defaults and overrides for specific urls, origins, etc.\n */\n this.options = leafMerge(\n ({ /* hardcoded defaults insulate us from missing web config */\n 'connectTimeout': 90,\n 'lingerTimeout': 1800,\n 'allowBatch': true,\n 'maxMessagesPerBatch': 100,\n 'identityUnlockTimeout': 300,\n 'ttl': {\n 'min': 15,\n 'max': 600,\n 'default': 120\n },\n 'transports': [ 'socketio' ],\n }),\n dcpConfig.dcp.connectionOptions.default,\n this.url && dcpConfig.dcp.connectionOptions[this.url.hostname],\n this.url && dcpConfig.dcp.connectionOptions[this.url.origin],\n options\n );\n\n /* draw out errors quickly in dev */\n if ((process.env.DCP_NETWORK_CONFIG_BUILD || dcpConfig.build) === 'debug')\n {\n this.options.maxMessagesPerBatch /= 10;\n \n /* short timeouts and debuggers don't get along well */\n if (dcpEnv.platform === 'nodejs' && !(requireNative('module')._cache.niim instanceof requireNative('module').Module))\n {\n this.options.connectTimeout /= 10;\n this.options.lingerTimeout /= 20;\n this.options.identityUnlockTimeout /= 10;\n }\n }\n\n assert(this.options.identityUnlockTimeout > 0);\n assert(this.options.connectTimeout > 0);\n assert(this.options.lingerTimeout > 0);\n assert(typeof this.options.ttl.min === 'number');\n assert(typeof this.options.ttl.max === 'number');\n assert(typeof this.options.ttl.default === 'number');\n \n this.backoffTimeIterator = makeEBOIterator(500, dcpConfig.build === 'debug' ? 5000 : 60000); /** XXXwg make this configurable */\n\n this.secureLocation = determineIfSecureLocation(this);\n this.loggableDest = '<generic>';\n }\n \n generateMessageId()\n {\n return `${this.id}-${_msgId++}-${Date.now()}-${nanoid()}`;\n }\n\n /**\n * Specialize an instance of Connection for either initiator or target behaviour. Once specialized,\n * the role cannot be changed. This happens based on methods invoked; connect() or accept() cause\n * the change.\n *\n * This specialization also implies that the connection is fully ready for use, including resolution\n * of the identity promise if necessary. This is perhaps not the best place to ensure that, but it\n * provides a reliable - and already async - waypoint to observe that event.\n */\n async a$assumeRole(myRole)\n {\n assert(myRole === role.initiator || myRole === role.target);\n\n if (this.role === myRole)\n return;\n this.role = myRole;\n \n if (this.role === role.target)\n {\n this.debugLabel = `connection(t:${this.id}):`;\n this.sender.debugLabel = `sender(t#${this.id}):`;\n this.messageLedger.debugLabel = `message-ledger(t#${this.id}):`;\n this.loggableDest = '<target>';\n this.hasNtp = true;\n }\n else\n {\n this.debugLabel = `connection(i:${this.id}):`;\n this.sender.debugLabel = `sender(i#${this.id}):`;\n this.messageLedger.debugLabel = `message-ledger(i#${this.id}):`;\n this.loggableDest = this.url.href;\n this.hasNtp = false;\n }\n\n debugging('connection') && console.debug(this.debugLabel, `connection #${this.id} is ${this.role} for ${this.url}`);\n if (!this.identity)\n {\n assert(this.identityPromise);\n debugging('connection') && console.debug(this.debugLabel, `waiting for identity resolution`);\n await this.identityPromise;\n }\n }\n\n /**\n * API to establish a DCP connection. Implied by send().\n *\n * When invoked by the initiator, this method establishes the connection by connecting\n * to the target url provided to the constructor.\n */\n async connect() // eslint-disable-line require-await\n {\n if (this.role == role.target)\n return;\n \n if (!this.role)\n await this.a$assumeRole(role.initiator);\n \n if (this.state.is('initial'))\n {\n if (!this.connectPromise)\n this.connectPromise = Promise.race([this.connectTimer(), this.a$_connect()]).then(() => { clearTimeout(this.connectTimeoutPromise); delete this.connectPromise });\n return this.connectPromise;\n }\n\n if (this.state.is('disconnected'))\n {\n if (!this.connectPromise)\n this.connectPromise = this.a$_reconnect().then(() => delete this.connectPromise);\n return this.connectPromise;\n }\n \n if (this.state.is('connecting'))\n {\n assert(this.connectPromise);\n return this.connectPromise;\n }\n\n if (this.state.is('established'))\n return;\n \n if (this.state.in(['closed', 'close-wait', 'closing']))\n throw new DCPError('Connection already closed', 'DCPC-1015');\n\n throw new Error('impossible');\n }\n\n /**\n * Performs a reconnection for connections which are in the disconnected state, and\n * tries to send any in-flight or enqueued messages as soon as that happens.\n */\n async a$_reconnect()\n {\n var transport;\n\n this.state.testAndSet('disconnected', 'connecting');\n try\n {\n do\n {\n transport = await this.a$connectToTargetTransport();\n } while (!transport && (this.transport && !this.transport.ready()) && !this.state.in(['closed', 'close-wait', 'closing']));\n\n debugging('connection') && console.debug(this.debugLabel, `reconnected via transport ${transport.socket.id}`);\n \n this.useNewTransport(transport);\n }\n catch (error)\n {\n if (error.code !== 'DCPC-1016' && error.code !== 'DCPC-1015')\n {\n /* Unreached unless there are bugs. */\n throw error;\n } \n this.close(error, true);\n }\n }\n\n connectTimer()\n {\n return new Promise((_resolve, reject) =>\n {\n this.connectTimeoutPromise = setTimeout(() =>\n { \n reject(new Error('Failed to establish connection within 30 seconds'));\n }, 30000);\n if (typeof this.connectTimeoutPromise.unref === 'function')\n this.connectTimeoutPromise.unref();\n });\n }\n \n async a$_connect()\n { \n var presharedPeerAddress, establishResults;\n var targetIdentity = await this.target.identity;\n var transport;\n \n assert(this.role === role.initiator);\n\n this.state.set('initial', 'connecting');\n do\n {\n transport = await this.a$connectToTargetTransport().catch((error) =>\n {\n debugging('connection') && console.debug(`${this.debugLabel} error connecting to target on transport layer:`, error);\n return { ready: () => {return false} };\n });\n } while(!transport.ready());\n this.adopt(transport);\n\n establishResults = await this.sender.establish().catch(error => {\n debugging('connection') && console.debug(this.debugLabel, `Could not establish DCP session ${this.transport ? 'over' + this.transport.name : '. Transport establishment was not complete'}:`, error);\n this.close(error, true);\n throw error;\n });\n const peerAddress = new wallet.Address(establishResults.peerAddress);\n const dcpsid = establishResults.dcpsid;\n debugging('connection') && console.debug(this.debugLabel, 'dcpsid is', dcpsid);\n \n if (!this.options.strict && targetIdentity && determineIfSecureConfig())\n {\n if ( false\n || typeof targetIdentity !== 'object'\n || typeof targetIdentity.address !== 'object'\n || !(targetIdentity.address instanceof wallet.Address))\n targetIdentity = { address: new wallet.Address(targetIdentity) }; /* map strings and Addresses to ks ducks */\n\n presharedPeerAddress = targetIdentity.address;\n debugging('connection') && console.debug(this.debugLabel, 'Using preshared peer address', presharedPeerAddress);\n }\n this.ensureIdentity(peerAddress, presharedPeerAddress);\n\n /* At this point, new session is valid & security checks out - make Connection instance usable */\n this.peerAddress = peerAddress;\n if (this.dcpsid)\n throw new DCPError(`Reached impossible state in connection.js; dcpsid already specified ${this.dcpsid} (${this.url})`, 'DCPC-1012');\n\n this.state.set('connecting', 'established'); /* established => dcpsid has been set */\n this.emit('session', (this.dcpsid = dcpsid));\n this.emit('connect', this.url);\n this.sender.notifyTransportReady();\n return Promise.resolve();\n }\n\n /**\n * unreference any objects entrained by this connection so that it does not prevent\n * the node program from exiting naturally.\n */\n unref()\n {\n if (this.connectAbortTimer && this.connectAbortTimer.unref && dcpEnv.platform === 'nodejs')\n this.connectAbortTimer.unref();\n }\n\n /**\n * Method is invoked when the transport disconnects. Transport instance is responsible for its own\n * finalization; Connection instance is responsible for finding a new transport, resuming the\n * connection, and retransmitting any in-flight message.\n *\n * @param {object} transport the transport instance that triggered this handler. In some cases, it\n * is possible that this event is not serviced until after the connection\n * has already acquired a new transport instance, e.g. in a Target where\n * the initiator switched networks. This implies that it is possible for\n * more 'connect' events to be emitted than 'disconnect' events.\n */\n transportDisconnectHandler(transport)\n {\n try\n { \n if (this.state.in(['disconnected', 'closing', 'close-wait', 'closed'])) /* transports may fire this more than once */\n return;\n\n if (transport !== this.transport) /* event no longer relevant */\n return;\n\n if (this.transport)\n {\n transport.close();\n delete this.transport;\n }\n \n if (this.state.is('established'))\n {\n this.state.set('established', 'disconnected');\n this.emit('disconnect', this.url); /* UI hint: \"internet unavailable\" */\n debugging('connection') && console.debug(this.debugLabel, `Transport disconnected from ${this.url}; ${this.sender.inFlight ? 'have' : 'no'} in-flight message`);\n \n if (!this.dcpsid) /* hopefully impossible? */\n {\n debugging('connection') && console.debug(this.debugLabel, 'Not reconnecting - no session');\n return;\n }\n }\n \n if (this.role === role.target)\n {\n /* targets generally can't reconnect due to NAT */\n debugging('connection') && console.debug(this.debugLabel, `Waiting for initiator to reconnect for ${this.dcpsid}`);\n return;\n }\n \n if (this.dcpsid && !this.sender.inFlight && this.options.onDemand)\n {\n debugging('connection') && console.debug(this.debugLabel, `Not reconnecting ${this.dcpsid} until next message`);\n return;\n }\n \n if (this.state.is('connecting') && (!this.dcpsid || !this.peerAddress))\n {\n debugging('connection') && console.debug(this.debugLabel, `Disconnected while connecting, establishing transport and re-sending connect request.`);\n this.a$_reconnect();\n return;\n }\n\n /* At this point, we initiate a (re)connect attempt because either\n * - we haven't connected yet,\n * - we have something to send, or\n * - we are not an on-demand connection\n */\n if (!this.state.is('connecting'))\n this.connect();\n }\n catch(error)\n {\n debugging('connection') && console.debug(error);\n this.close(error, true);\n \n if (error.code !== 'DCPC-1016' && error.code !== 'DCPC-1015')\n {\n /* Unreached unless there are bugs. */\n throw error;\n }\n }\n }\n \n /**\n * Initiators only\n *\n * Connect to a target at the transport level.\n * - Rejects when we give up on all transports.\n * - Resolves with a transport instance when we connect to one.\n *\n * The connection attempt will keep a node program \"alive\" while it is happening.\n * The `autoUnref` connectionOption and unref() methods offer ways to make this not\n * happen.\n */\n async a$connectToTargetTransport()\n {\n const that = this;\n const availableTransports = [].concat(this.options.transports);\n var quitMsg = false; /* not falsey => reject asap, value is error message */\n var quitCode = undefined;\n var boSleepIntr; /* if not falsey, a function that interrupts the backoff sleep */\n var transportConnectIntr; /* if not falsey, a function that interrupts the current connection attempt */\n\n // Already trying to connect to target, don't try multiple times until we've aborted one attempt\n if (this.connectAbortTimer)\n return;\n \n /* This timer has the lifetime of the entire connection attempt. When we time out,\n * we set the quitMsg to get the retry loop to quit, then we interrupt the timer so\n * that we don't have to wait for the current backoff to expire before we notice, and\n * we expire the current attempt to connect right away as well.\n */\n this.connectAbortTimer = setTimeout(() => {\n quitMsg = 'connection timeout';\n if (boSleepIntr) boSleepIntr();\n if (transportConnectIntr) transportConnectIntr();\n }, this.options.connectTimeout * 1000);\n\n if (this.options.autoUnref)\n this.unref();\n\n /* cleanup code called on return/throw */\n function cleanup_ctt()\n {\n clearTimeout(that.connectAbortTimer);\n delete that.connectAbortTimer;\n }\n\n /* Connect to target with a specific transport. */\n /* Resolves with { bool success, obj transport } or rejects with { error } if the transport cannot connect*/\n function a$connectWithTransport(transportName)\n { \n transportConnectIntr = false;\n\n return new Promise((connectWithTransport_resolve, connectWithTransport_reject) => { \n const TransportClass = Transport.require(transportName);\n const transport = new TransportClass(that.target, that.options[transportName]);\n var ret = { transport };\n\n function cleanup_cwt()\n {\n for (let eventName of transport.eventNames())\n for (let listener of transport.listeners(eventName))\n transport.off(eventName, listener);\n }\n \n /* In the case where we have a race condition in the transport implementation, arrange things\n * so that we resolve with whatever fired last if we have a double-fire on the same pass of \n * the event loop.\n */\n transport.on('connect', () => { cleanup_cwt(); ret.success=true; connectWithTransport_resolve(ret) });\n transport.on('error', (error) => { cleanup_cwt(); connectWithTransport_reject(error) });\n transport.on('connect-failed', (error) => {\n cleanup_cwt();\n ret.success = false;\n ret.error = error;\n debugging() && console.debug(`Error connecting to ${that.url};`, error);\n connectWithTransport_resolve(ret);\n });\n \n /* let the connectAbortTimer interrupt this connect attempt */\n transportConnectIntr = () => { transport.close() };\n });\n }\n \n if (availableTransports.length === 0)\n {\n cleanup_ctt();\n return Promise.reject(new DCPError('no transports defined', 'DCPC-1014'));\n }\n \n /* Loop while trying each available transport in turn. Sleep with exponential backoff between runs */\n while (!quitMsg)\n {\n for (let transportName of availableTransports)\n {\n try\n {\n const { success, error, transport } = await a$connectWithTransport(transportName);\n\n /* Have connected to the remote at the transport level - OUT */\n if (success === true)\n {\n transportConnectIntr = false;\n cleanup_ctt();\n \n return transport;\n }\n\n /* Fast-fail: certain - but few - HTTP status codes let us know that this (or any) transport\n * will never work, so don't try those again.\n */\n if (error && error.httpStatus)\n {\n switch(error.httpStatus)\n {\n case 301: case 302: case 303: case 307: case 308:\n debugging('connection') && console.debug(this.debugLabel, `HTTP status ${error.httpStatus}; won't try again with ${transportName}`);\n availableTransports.splice(availableTransports.indexOf(transportName), 1);\n break;\n case 400: case 403: case 404:\n debugging('connection') && console.debug(this.debugLabel, `HTTP status ${error.httpStatus}; won't try again.`);\n quitMsg = error.message;\n quitCode = 'HTTP_' + error.httpStatus || 0;\n break;\n default:\n debugging('connection') && console.debug(this.debugLabel, `HTTP status ${error.httpStatus}; will try again with ${transportName}`);\n break;\n }\n }\n }\n catch (impossibleError)\n {\n /* transport connection attempts should never throw. */\n debugging('connection') && console.debug(this.debugLabel, `Error connecting to ${this.url} with ${transportName}; won't try again:`, impossibleError);\n availableTransports.splice(availableTransports.indexOf(transportName), 1);\n }\n }\n \n if (availableTransports.length === 0)\n {\n quitMsg = 'all transports exhausted';\n break;\n }\n \n /* Go to (interruptible) sleep for a while before trying again */\n const backoffTimeMs = this.backoffTimeIterator.next().value;\n debugging('connection') && console.debug(this.debugLabel, 'trying again in', Number(backoffTimeMs / 1000).toFixed(2), 'seconds');\n const boSleepPromise = a$sleepMs(backoffTimeMs);\n boSleepIntr = boSleepPromise.intr;\n await boSleepPromise;\n boSleepIntr = false;\n }\n\n /* The only way we get here is for us to discover that the connection is unconnectable - eg \n * reject timer has expired or similar.\n */\n cleanup_ctt();\n throw new DCPError(quitMsg, 'DCPC-1016', quitCode);\n }\n\n /**\n * Method which must be invoked whenever a new transport needs to be assigned to the connection. \n * If we have previously adopted a transport, we close it first, which will prevent \n * [by transport definition] any message handlers from firing, even if the old transport instance\n * has buffered traffic.\n *\n * @param {object} transport transport instance\n */\n adopt(transport)\n {\n if (this.transport)\n this.transport.close();\n\n transport.on('message', (m) => this.handleMessage(m));\n transport.on('end', () => this.transportDisconnectHandler(transport));\n transport.on('close', () => this.transportDisconnectHandler(transport));\n \n this.transport = transport;\n }\n \n \n /**\n * Method that gets invoked when there is a new transport available for adoption.\n * This will adjust the state of the connection, adopt the transport then tell\n * the sender to pump the message queue.\n * @param {object} transport transport instance \n */\n useNewTransport(transport)\n {\n if (this.state.in(['closing', 'close-wait', 'closed']))\n {\n debugging('connection') && console.debug(`${this.debugLabel} got a new transport during closing. closing the new transport and not completing transport adoption.`)\n transport.close();\n return;\n }\n \n var preDisconnectState = (!this.dcpsid || !this.peerAddress) ? 'connecting' : 'established';\n if (this.state.isNot(preDisconnectState))\n this.state.set(['connecting', 'disconnected'], preDisconnectState);\n this.adopt(transport);\n this.emit('connect', this.url); // UI hint: \"internet available\" \n this.sender.notifyTransportReady();\n }\n \n /**\n * Method that must be invoked by the target to \"accept\" a new DCP Connection request\n * at the transport layer.\n * @param {object} transport \n */\n async accept(transport)\n {\n assert(!this.role);\n await this.a$assumeRole(role.target);\n this.state.set('initial', 'connecting');\n this.adopt(transport);\n }\n \n /**\n * This method is invoked by the target when it has handled the initial connect request from\n * the initiator, which contains the peerAddress and the first half of the dcpsid (the second half is\n * populated by receiver.handleFirstRequest before being passed here). It transitions the connection \n * into an established state at the protocol level.\n * Note - this is really not the right design for this, but it is invoked from handleFirstRequest\n * in ./receiver.js\n *\n * @param {string} dcpsid dcpsid\n * @param {wallet.Address} peerAddress Address of peer\n */\n establishTarget(dcpsid, peerAddress) {\n assert(this.role === role.target);\n \n this.connectResponseId = Symbol('dummy'); // un-register ConnectResponse\n this.peerAddress = peerAddress;\n if (this.dcpsid)\n throw new DCPError(`Reached impossible state in connection.js; dcpsid already specified ${this.dcpsid}!=${dcpsid} (${this.url})`, 'DCPC-1005');\n this.emit('session', (this.dcpsid = dcpsid));\n debugging() && console.debug(this.debugLabel, 'dcpsid is', dcpsid);\n\n this.loggableDest = this.role === role.initiator ? this.url : peerAddress;\n this.state.set('connecting', 'established'); /* established => dcpsid has been set */\n\n debugging('connection') && console.debug(this.debugLabel, `Established session ${this.dcpsid} with ${this.peerAddress} for ${this.url}`);\n }\n\n /**\n * Check to see if the peer address conflicts with what we have in the global identity cache;\n * it does, throw an exception.\n */\n ensureIdentity (peerAddress, presharedPeerAddress)\n {\n let idc = getGlobalIdentityCache();\n let noConflict = idc.learnIdentity(this.url, peerAddress, presharedPeerAddress);\n\n if (!noConflict)\n throw new DCPError(`**** Security Error: Identity address ${peerAddress} does not match the saved key for ${this.url}`, 'DCPC-EADDRCHANGE');\n }\n \n \n /**\n * This method uses the first request (if we're target) or ack to the first request \n * (if we're initiator) to memoize the address of the peer authorized to send to us. \n * The first message must only be a request (since it comes from sender.specialFirstSend)\n * that has a connect operation, or an acknowledgement of the first request. \n * All future messages' owners will be validated against this authorized sender.\n * @param {Object} message \n */\n setAuthorizedSender(message)\n {\n if (message.body.type !== 'request' && message.body.type !== 'ack')\n throw new DCPError('First protocol message was not a request or ack', 'DCPC-1017');\n \n if (message.body.type === 'request' && message.body.payload.operation !== 'connect')\n throw new DCPError('First protocol message did not contain the correct payload', 'DCPC-1017');\n \n if (message.body.type === 'ack' && this.sender.inFlight.message.payload.operation !== 'connect')\n throw new DCPError('First protocol acknowledgement was not for connect request', 'DCPC-1017');\n \n this.authorizedSender = message.owner;\n }\n \n /**\n * Emits an error event with the relevant error and closes the connection immediately.\n * @param {string} errorMessage \n * @param {string} errorCode \n */\n \n handleValidationError(errorMessage, errorCode)\n {\n var messageError;\n \n debugging('connection') && console.debug(this.debugLabel, 'Message failed validation -', errorMessage);\n this.emit('error', (messageError = new DCPError(`message failed validation: ${errorMessage}`, errorCode)))\n this.close(messageError, true);\n }\n\n /**\n * This method validates the message owner, signature and body before passing it onto\n * either the receiver (for a request, response or batch) or the messageLedger (for an ack).\n * If it's a request, response or batch, this method also provokes the connection to \n * send an acknowledgement (ack) to the peer to let them know we got their message.\n * XXXwg this code needs an audit re error handling: what message error should we be emitting?\n * why do we keep working after we find an error?\n * XXXsc did some auditing. we happy now?\n * @param {string} JSON-encoded unvalidated message object\n */\n async handleMessage (messageJSON) {\n var validation;\n var message;\n\n if (this.state.is('closed')) {\n debugging('connection') && console.debug(this.debugLabel, 'handleMessage was called on a closed connection.');\n return;\n }\n\n try\n {\n message = typeof messageJSON === 'object' ? messageJSON : JSON.parse(messageJSON);\n debugging('wire') && console.debug(this.debugLabel, `handleMessage: ${String(message && message.body && message.body.type).padEnd(10, ' ')} <- ${this.loggableDest}`);\n }\n catch(error)\n {\n console.error('connection::handleMessage received unparseable message from peer:', error);\n this.emit('error', error);\n return;\n }\n \n /**\n * We always ack a duplicate transmission.\n * This must happen before validation since during startup we may lack a\n * nonce or dcpsid (depending on whether initiator or target + race).\n */\n if (this.isDuplicateTransmission(message)) {\n debugging('connection') && console.debug(this.debugLabel, 'duplicate message:', message.body);\n debugging('wire') && console.debug(this.debugLabel, `dup message ack: ${String(message.body.type).padEnd(10, ' ')} -> ${this.loggableDest}`);\n\n this.sendAck(this.lastAckSigned) \n return;\n }\n\n debugging('connection') && console.debug(this.debugLabel, `received message ${message.body.type} ${message.body.id}; nonce=`, message.body.nonce);\n \n validation = this.validateMessageDCPSID(message);\n if (validation.success !== true)\n {\n this.handleValidationError(validation.errorMessage, 'ENODCPSID');\n return;\n }\n\n validation = this.validateMessageOwner(message)\n if (validation.success !== true)\n {\n this.handleValidationError(validation.errorMessage, 'EINVAL');\n return;\n }\n \n validation = this.validateMessageSignature(message);\n if (validation.success !== true)\n {\n this.handleValidationError(validation.errorMessage, 'EINVAL');\n return;\n }\n\n validation = this.validateMessageBody(message);\n if (validation.success !== true)\n {\n this.handleValidationError(validation.errorMessage, validation.errorCode || 'EINVAL'); /* messages of type 'unhandled-message' may contain more information about the failure */\n return;\n }\n \n if (message.body.type === \"ack\") {\n const ack = new this.Ack(message.body);\n this.messageLedger.handleAck(ack);\n return;\n } else if (message.body.type !== 'unhandled-message') {\n this.lastMessage = message;\n await this.ackMessage(message);\n }\n \n this.receiver.handleMessage(message);\n }\n\n \n /**\n * This method takes either a Request, Response or Batch, creates an ack for it\n * and sends it to the peer. This ack contains the nonce we expect on the next\n * message from peer.\n * @param {Connection.Message} message \n */\n async ackMessage(message) {\n debugging('connection') && console.debug(this.debugLabel, 'acking message of type: ', message.body.type);\n const ack = new this.Ack(message);\n const signedMessage = await ack.sign(this.identity);\n\n debugging('wire') && console.debug(this.debugLabel, `ackMessage: ${String(message.body.type).padEnd(10, ' ')} -> ${this.loggableDest}`);\n\n this.sendAck(signedMessage);\n this.lastAck = ack;\n this.lastAckSigned = signedMessage;\n }\n\n /**\n * Checks if the batch we just received has the same nonce\n * as the most-recently received batch.\n * @param {object} messageJSON\n */\n isDuplicateTransmission(messageJSON) {\n return this.lastMessage && this.lastMessage.body.nonce && this.lastMessage.body.nonce === messageJSON.body.nonce;\n }\n\n /**\n * Validate that the message came from the appropriate sender.\n * @param {Object} message the message to validate\n * @returns {Object} returns an object `ret` with either ret.success = true, \n * or ret.success = false accompanied by another property ret.errorMessage\n */\n validateMessageOwner(message)\n {\n if (!this.authorizedSender)\n {\n /* Capture the initial identity of the remote end during the connect operation */\n this.setAuthorizedSender(message);\n return { success: true }\n }\n else if (message.owner !== this.authorizedSender)\n {\n return { success: false, errorMessage: \"message came from unauthorized sender\" }\n }\n return { success: true }\n }\n \n /**\n * Validate that the signature was generated from this message body\n * @param {Object} message\n * @returns {Object} with properties 'success' and 'errorMessage'. When the message is valid on its \n * face, the success property is true, otherwise it is is false. When it is false,\n * the errorMessage property will be a string explaining why.\n */\n validateMessageSignature(message)\n {\n if (!message.signature) {\n debugging('connection') && console.warn(\"Message does not have signature, aborting connection\");\n return { success: false, errorMessage: \"message is missing signature\" };\n }\n \n const owner = new wallet.Address(message.owner);\n const signatureValid = owner.verifySignature(message.body, message.signature);\n\n if (!signatureValid)\n {\n debugging('connection') && console.warn(\"Message has an invalid signature, aborting connection\");\n return { success: false, errorMessage: \"invalid message signature\" };\n }\n\n return { success: true };\n }\n \n validateMessageDCPSID(message)\n {\n if (this.dcpsid !== null && message.dcpsid)\n {\n if (message.dcpsid !== this.dcpsid)\n {\n debugging('connection') && console.warn('Message has an invalid dcpsid, aborting connection');\n return { success: false, errorMessage: 'message has an invalid dcpsid' };\n }\n }\n \n return { success: true };\n }\n /**\n * This method is used to perform validation on all types of messages.\n * It validates the DCPSID, nonce, and the peerAddress.\n * @param {Object} message\n * @returns {Object} with properties 'success' and 'errorMessage'. When the message is valid on its \n * face, the success property is true, otherwise it is is false. When it is false,\n * the errorMessage property will be a string explaining why.\n *\n */\n validateMessageBody(message)\n {\n try\n {\n if (message.body.type === 'unhandled-message')\n {\n /* This special message type may not have a dcpsid, peerAddress, etc., so it might not\n * validate. It is also not a \"real\" message and only used to report ConnectionManager routing \n * errors, so we just report here, drop it, and close the connection.\n *\n * Note also that this is probably the wrong way to handle this case - restarting daemons - but\n * that is a problem for another day. /wg nov 2021\n */\n debugging('connection') && console.warn(this.debugLabel, \"Target Error - target could not process message.\", JSON.stringify(message.body),\n \"Aborting connection.\");\n return { success: false, errorMessage: `target could not process message (${message.body.payload && message.body.payload.message || 'unknown error'})`, errorCode: message.body.payload && message.body.payload.code}\n }\n if (this.peerAddress && !this.peerAddress.eq(message.owner))\n {\n debugging('connection') && console.warn(this.debugLabel,\n \"Received message's owner address does not match peer address, aborting connection\\n\",\n \"(owner addr)\", message.owner, '\\n',\n \"(peer addr)\", this.peerAddress);\n return { success: false, errorMessage: \"received message owner does not match peer address\" };\n }\n\n if (this.state.in(['established', 'closing', 'close-wait']) && message.body.type !== 'unhandled-message')\n {\n const body = message.body;\n\n assert(this.peerAddress); /* should be set in connect */\n /**\n * Security note:\n * We don't require the dcpsid to match on an ack because the connect response\n * ack doesn't have a dcpsid until after it is processed. Also ack's are protected\n * by ack tokens and signatures, so this doesn't leave a hole, just an inconsistency.\n */\n if (body.type !== 'ack' && body.dcpsid !== this.dcpsid)\n {\n debugging('connection') && console.warn(this.debugLabel,\n \"Received message's DCPSID does not match, aborting connection\\n\",\n \"Message owner:\", message.owner, '\\n',\n \"(ours)\", this.dcpsid, (Date.now() - this.connectTime)/1000, \"seconds after connecting - state:\", this.state._, \"\\n\", \n \"(theirs)\", body.dcpsid);\n if(body.dcpsid.substring(0, body.dcpsid.length/2) !== this.dcpsid.substring(0, this.dcpsid.length/2)){\n debugging('connection') && console.warn(\" Left half of both DCPSID is different\");\n }\n if(body.dcpsid.substring(body.dcpsid.length/2 + 1, body.dcpsid.length) !== this.dcpsid.substring(this.dcpsid.length/2 + 1, body.dcpsid.length)){\n debugging('connection') && console.warn(\" Right half of both DCPSID is different\");\n }\n return { success: false, errorMessage: \"DCPSID do not match\" };\n }\n /* can get close in middle of connecting, which will have no nonce.*/\n if (body.type !== 'ack' && this.lastAck.nonce !== body.nonce && (body.payload && body.payload.operation !== 'close'))\n {\n /* When Target sends back ConnectionLessErrorResponse, it uses the nonce of the message that caused an error. */\n if (this.sender.inFlight && this.sender.inFlight.message.nonce === body.nonce)\n {\n debugging('connection') && console.debug(`${this.debugLabel} Received messages nonce matches nonce of our current inFlight message.`,\n \"There was a problem sending this message. Aborting connection. Reason:\\n\", body.payload);\n return { success: false, errorMessage: \"current inflight message returned an error\" }\n }\n debugging('connection') && console.warn(this.debugLabel,\"Received message's nonce does not match expected nonce, aborting connection\\n\");\n debugging('connection') && console.debug(this.debugLabel, this.lastAck.nonce, body.nonce);\n return { success: false, errorMessage: \"received message's nonce does not match expected nonce\" };\n }\n if (body.type === 'request') \n {\n if (body.payload.validity.time === undefined)\n return { success: false, errorMessage: 'received message does not have a valid time in its payload' };\n }\n }\n\n return { success: true };\n }\n catch(error)\n {\n console.error('message validator failure:', error);\n return { success: false, errorMessage: 'validator exception ' + error.message };\n }\n\n return { success: false, errorMessage: 'impossible code reached' }; // eslint-disable-line no-unreachable\n }\n\n /**\n * Targets Only.\n * The receiver creates a special connect response and the connection\n * needs to know about it to get ready for the ack. See `isWaitingForAck`.\n * @param {Message} message message we are sending out and waiting to\n * ack'd, probably a batch containing the response.\n */\n registerConnectResponse(message) {\n this.connectResponseId = message.id;\n }\n\n /**\n * Targets only\n * During the connection process a target sends a connect\n * response to an initiator and the initiator will ack it. Since transports\n * are not tightly coupled, we have no authoritative way to route the ack back\n * to the right connection. So a connection briefly registers the ack it\n * is looking for in this case. It will formally validate the ack after routing.\n * @param {string} messageId id of the message this ack is acknowledging.\n */\n isWaitingForAck(messageId) {\n return messageId === this.connectResponseId;\n }\n\n /**\n * Put connection into close-wait state so that a call to `close`\n * in this state will *not* trigger sending a `close` message to the peer.\n * Then call close.\n *\n * @note: This function is called when the remote end of the transport sends\n * a close command, from receiver::handleOperation. This impllies that\n * that we must be in established or later state.\n */\n closeWait (errorCode = null)\n {\n var preCloseState, reason;\n \n debugging('connection') && console.debug(this.debugLabel, `responding to close. state=closeWait dcpsid=${this.dcpsid}`);\n\n if (this.state.is('closed'))\n {\n debugging('connection') && console.debug(this.debugLabel, `remote asked us to close a closed connection; dcpsid=${this.dcpsid}`);\n return;\n }\n\n // continue with close in either case\n reason = `Received close from peer with Error Code ${errorCode}`;\n if (this.role === role.target)\n reason += ` (${this.url})`;\n else\n reason += ` (${this.debugLabel}${this.peerAddress.address})`;\n\n reason = new DCPError(reason, errorCode || 'DCPC-1011');\n\n // If we're already closing, wait for it to complete then resolve\n // WARNING: any place we transition to closing or close-wait, we MUST guarantedd\n // that 'end' will be emitted, or this code will hang forever!\n if (this.state.in(['close-wait', 'closing'])) {\n return new Promise((resolve) => {\n this.once('end', resolve) /* eventually fired by doClose elsewhere */\n });\n }\n\n /* XXXwg - this should only be established->close-wait. Why more? */\n this.state.set(['disconnected', 'connecting', 'established'], 'close-wait');\n \n /* Set preCloseState to close-wait so doClose doesn't send a close message back */\n preCloseState = this.state.valueOf();\n return this.doClose(preCloseState, reason, true);\n }\n\n /**\n * This method will begin closing the protocol connection. It transitions\n * the protocol into the correct state, and then begins the work of closing.\n * \n * @param {string|Error} [reason] Either an Error or a message to use in the Error that will reject pending sends.\n * @param {boolean} [immediate] When true, the connection will not deliver any pending messages and instead\n * immediately send the peer a 'close' request. \n *\n * @return a Promise which resolves when the connection has been confirmed closed and the end event has been fired.\n */\n close (reason='requested', immediate=false)\n {\n if (this.state.is('initial'))\n {\n this.state.set('initial', 'closed');\n this.emit('close'); /* Don't emit dcpsid */\n }\n if (this.state.is('closed')) return Promise.resolve();\n\n const preCloseState = this.state.valueOf();\n debugging('connection') && \n console.debug(this.debugLabel, \n `close; dcpsid=${this.dcpsid} state=${preCloseState} immediate=${immediate} reason:`, reason);\n\n // If we're already closing, wait for it to complete then resolve\n if (this.state.in(['close-wait', 'closing'])) {\n return new Promise((resolve) => {\n this.once('end', resolve)\n });\n }\n\n this.state.set(['connecting', 'established', 'disconnected'], 'closing');\n\n // Perform actual work of closing\n return this.doClose(preCloseState, reason, immediate);\n }\n\n /**\n * Sends close message to peer after sending all pending messages.\n * Note that close messages are sent without the expectation of a response.\n * @param {DCPError|string} reason reason for closing\n */\n async sendCloseGracefully(reason) \n {\n debugging('connection') && console.debug(`${this.debugLabel} gracefully sending close message to peer with reason ${reason}`)\n let errorCode = reason instanceof DCPError ? reason.code : 'DCPC-1011';\n \n /* This only resolves when close is next message in queue */\n const closeMessage = await this.prepare('close', { errorCode: errorCode });\n this.sendPreparedMessage(closeMessage);\n this.messageLedger.fulfillMessagePromise(closeMessage.message.id, {});\n }\n \n /**\n * Sends close message to peer immediately. Pending messages will not be sent.\n * Note that close messages are sent without expectation of response.\n * @param {DCPError|string} reason reason for closing\n */\n async sendCloseImmediately(reason)\n {\n debugging('connection') && console.debug(`${this.debugLabel} immediately sending close message to peer with reason ${reason}`);\n let errorCode = reason instanceof DCPError ? reason.code : 'DCPC-1011';\n \n /* Last param being `true` means that prepareMessage will return unsigned message. Does not queue message. */\n const closeMessage = await this.prepare('close', { errorCode: errorCode }, true);\n \n if (this.sender.inFlight)\n closeMessage.nonce = this.sender.inFlight.message.nonce;\n else\n closeMessage.nonce = this.sender.nonce;\n \n let signedCloseMessage = await closeMessage.sign();\n \n /* Overwrite the in-flight message because we don't care to deliver pending messages */\n this.sender.inFlight = { message: closeMessage, signedMessage: signedCloseMessage };\n this.sender.sendInFlightMessage();\n }\n \n /**\n * This method performs the core close functionality. It appropriately sends the close message\n * to the peer, fails any pending transmissions, shuts down our sender and underlying transport\n * and puts us into the 'closed' state, indicating this connection object is now useless.\n * When called from closeWait, it does not send a close message.\n * @param {string} preCloseState the state that the connection was in at the start of the\n * invocation of close() or closeWait()\n *\n * @note: this function is not reentrant due to closeGracefully\n */\n async doClose(preCloseState, reason, immediate) {\n const dcpsid = this.dcpsid;\n var rejectErr;\n\n try\n {\n // Emit the close event the moment we know we are going to close, \n // so we can catch the close event and reopen the connection\n //\n // This implies that no API functions which call doClose may await between\n // their invocation and their call to doClose!\n this.emit('close', dcpsid /* should be undefined in initial state */);\n\n assert(this.state.in(['closing', 'close-wait']));\n if (preCloseState === 'established' && this.transport) {\n try {\n if (immediate) {\n await this.sendCloseImmediately(reason);\n } else {\n await this.sendCloseGracefully(reason);\n }\n } catch(e) {\n debugging() && console.warn(`Warning: could not send close message to peer. connectionid=${this.id}, dcpsid=,${this.dcpsid}, url=${this.url ? this.url.href : 'unknown url'} - (${e.message})`);\n }\n }\n\n // can delete these now that we've sent the close message\n this.dcpsid = null;\n this.peerAddress = null;\n\n if (reason instanceof DCPError)\n rejectErr = reason;\n else\n {\n let errorMessage = reason instanceof Error ? reason : `Connection to ${this.loggableDest} closed (${reason})`;\n rejectErr = new DCPError(errorMessage, 'DCPC-1013');\n }\n \n // Reject any pending transmissions in the message ledger\n this.messageLedger.failAllTransmissions(rejectErr);\n \n if (this.transport)\n {\n try { this.sender.shutdown(); }\n catch(e) { debugging() && console.warn(this.debugLabel, `Warning: could not shutdown sender; dcpsid=,${dcpsid}`, e); }\n \n try { this.transport.close(); delete this.transport; }\n catch(e) { debugging() && console.warn(this.debugLabel, `Warning: could not close transport; dcpsid=,${dcpsid}`, e); }\n }\n } catch(error) {\n debugging() && console.warn(this.debugLabel, `could not close connection; dcpsid=${dcpsid}, url=${this.url ? this.url.href : 'unknown url'}:`, error);\n }\n finally\n {\n this.state.set(['closing', 'close-wait'], 'closed');\n this.emit('end'); /* end event resolves promises on other threads for closeWait and close (ugh) */\n }\n }\n/**\n * Prepares a non-batchable message that can be sent directly over the wire. Returns when\n * the message has been signed and is ready to be sent. The connection will not be able to send \n * any messages until the prepared message here is either sent or discarded. If 'canBatch = true',\n * will return the unsigned message instead. In this case, enqueuing is handled by\n * `async Connection.send()`, allowing the message to be put in a batch before being signed.\n * @param {...any} messageData Data to build message with. Format is:\n * `operation {string}, \n * data {Object} (optional),\n * identity {wallet.Keystore} (optional),\n * canBatch {boolean} (optional)`\n * @returns {Promise<Object>} a promise which resolves to { message, signedMessage }\n */\n\n async prepare(...messageData)\n {\n if (this.state.isNot('established'))\n {\n await this.connect().catch((e) => {\n if (e.code !== 'DCPC-1015') /* If we're closed already, then swallow the error */\n { \n this.close(e, true);\n throw e;\n }\n });\n }\n \n \n let signedMessage, message = messageData[0];\n let canBatch = false;\n \n if (typeof messageData[messageData.length - 1] === 'boolean')\n canBatch = messageData.pop();\n \n if (!message.id)\n {\n message = this.Request.buildMessage(...messageData);\n }\n \n debugging('connection') && console.debug(`${this.debugLabel} Created message ${message.id}.`);\n \n message.ackToken = this.sender.makeAckToken();\n message.batchable = canBatch;\n \n if (canBatch)\n return Promise.resolve(message);\n \n debugging('connection') && console.debug(`${this.debugLabel} Preparing message ${message.id} for sending...`); \n const messageWithNonce = await new Promise((resolve) =>\n {\n // This event is fired in the sender by serviceQueue() when the message is at the top of the queue\n // and has a nonce it can sign with. At this point, we may return the prepared message.\n this.once(`${message.id} ready`, (message) => resolve(message))\n \n // if we're enqueing a graceful close message, delay until one pass of the event loop so we can handle pending messages \n if (message.payload.operation === 'close')\n setImmediate(() => { this.sender.queue.push(message); }); \n else\n this.sender.queue.push(message)\n \n this.sender.requestQueueService();\n })\n \n signedMessage = await messageWithNonce.sign();\n \n debugging('connection') && console.debug(`${this.debugLabel} Finished preparing message. ${message.id} is ready to be sent.`);\n \n return { message: messageWithNonce, signedMessage: signedMessage };\n }\n\n /**\n * Sends a message to the connected peer. If the connection has not yet been established,\n * this routine will first invoke this.connect(). If the first argument has a 'signedMessage'\n * property, the message is assumed to be prepared and is sent immediately. If not, and the first\n * argument does not have an 'id' property, it will be sent to `async prepare()`, and then put\n * in the message queue.\n * \n * @param {...any} args 3 forms:\n * [operation]\n * [operation, data]\n * [operation, data, identity]\n * @returns {Promise<Response>} a promise which resolves to a response.\n */\n async send(...args)\n {\n if (!this.state.is('established'))\n await this.connect().catch((e) =>\n {\n if (e.code !== 'DCPC-1015') /* If we're closed already, then swallow the error */\n { \n this.close(e, true);\n throw e;\n }\n });\n\n let message = args[0];\n // ie. already prepared\n if (message.signedMessage)\n return this.sendPreparedMessage(message);\n \n // ie. message not hyrdated or is a response, which needs ack token\n if (!message.id || message.type === 'response')\n message = await this.prepare(...args, true);\n\n if (this.state.in(['closed']))\n throw new DCPError(`Connection (${this.id}) is ${this.state}; cannot send. (${this.loggableDest})`, 'DCPC-1001');\n \n return this.sender.enqueue(message);\n }\n \n /**\n * Set the sender's flight deck with the given message and send it.\n * Can only be passed a prepared message, which is a promise that only\n * resolves to a message when it is signed with the nonce, so it must\n * be the next message to be sent (or discarded).\n * @param {Object} messageObject\n * @returns {Promise<Response>} \n */\n sendPreparedMessage(messageObject)\n {\n if (!messageObject.signedMessage) return;\n \n const { message, signedMessage } = messageObject;\n assert(!this.sender.inFlight);\n this.sender.inFlight = { message: message, signedMessage: signedMessage };\n const messageSentPromise = this.messageLedger.addMessage(message);\n this.sender.sendInFlightMessage();\n \n return messageSentPromise;\n }\n \n /**\n * Send a signed ack directly over the wire. If we get a SocketIO.Send: Not Connected error, \n * wait until we're connected and then resend the ack.\n * @param {String} ack \n */\n sendAck(ack)\n {\n try\n {\n this.transport.send(ack)\n }\n catch(error)\n {\n // Transport was lost\n if (error.code === 'DCPC-1105')\n this.once('connect', () => this.sendAck(ack));\n else\n console.error(`${this.debugLabel} Error acking message to ${this.loggableDest}: ${error}`);\n }\n }\n \n /**\n * Discard a prepared message by removing it from the queue.\n * Returns nonce to sender and provokes queue service.\n * @param {Object} messageObject { message, signedMessage } message to discard \n */\n discardMessage(messageObject)\n {\n let { message } = messageObject;\n this.sender.nonce = message.nonce;\n delete message.nonce;\n message.type = 'unhandled-message';\n this.sender.requestQueueService();\n }\n\n /**\n * This routine returns the current time for the purposes of\n * populating the Request message payload.validity.time property.\n * \n * @returns {Number} the integer number of seconds which have elapsed since the epoch\n */\n currentTime() {\n let msSinceEpoch;\n if (this.hasNtp) {\n msSinceEpoch = Date.now();\n } else {\n const msSinceLastReceipt = performance.now() - this.receiver.lastResponseTiming.receivedMs;\n msSinceEpoch = this.receiver.lastResponseTiming.time * 1000 + msSinceLastReceipt;\n }\n return Math.floor(msSinceEpoch / 1000);\n }\n\n /**\n * This method sends a keepalive to the peer, and resolves when the response has been received.\n */\n keepalive() {\n return this.send('keepalive');\n }\n}\n\n/** \n * Determine if we got the scheduler config from a secure source, eg https or local disk.\n * We assume tha all https transactions have PKI-CA verified.\n *\n * @note protocol::getSchedulerConfigLocation() is populated via node-libs/config.js or dcp-client/index.js\n *\n * @returns true or falsey\n */\nfunction determineIfSecureConfig()\n{\n var schedulerConfigLocation = (__webpack_require__(/*! dcp/protocol-v4 */ \"./src/protocol-v4/index.js\").getSchedulerConfigLocation)();\n var schedulerConfigSecure;\n\n if (schedulerConfigLocation && (schedulerConfigLocation.protocol === 'https:' || schedulerConfigLocation.protocol === 'file:'))\n {\n debugging('strict-mode') && console.debug(`scheduler config location ${schedulerConfigLocation} is secure`); /* from casual eavesdropping */\n schedulerConfigSecure = true;\n }\n\n if (isDebugBuild)\n {\n debugging('strict-mode') && console.debug('scheduler config location is always secure for debug builds');\n schedulerConfigSecure = 'debug';\n }\n\n debugging('strict-mode') && console.debug(`Config Location ${schedulerConfigLocation} is ${!schedulerConfigSecure ? 'not secure' : 'secure-' + schedulerConfigSecure}`);\n return schedulerConfigSecure;\n}\n\n/**\n * Determine if a URL is secure by examinining the protocol, connection, and information about the \n * process; in particular, we try to determine if the dcp config was securely provided, because if \n * it wasn't, then we can't have a secure location, since the origin could be compromised.\n * \n * \"Secure\" in this case means \"secure against casual eavesdropping\", and this information should only\n * be used to refuse to send secrets over the transport or similar.\n *\n * @returns true or falsey\n */\nfunction determineIfSecureLocation(conn)\n{\n var isSecureConfig = determineIfSecureConfig();\n var secureLocation;\n\n if (!isSecureConfig) /* can't have a secure location without a secure configuration */\n return null;\n \n if (isDebugBuild || conn.url.protocol === 'https:' || conn.url.protocol === 'tcps:')\n secureLocation = true;\n else if (conn.role === role.initiator && conn.target.hasOwnProperty('friendLocation') && conn.url === conn.target.friendLocation)\n secureLocation = true;\n else if (conn.options.allowUnencryptedSecrets)\n secureLocation = 'override';\n else\n secureLocation = false;\n\n debugging('strict-mode') && console.debug(`Location ${conn.url} is ${!secureLocation ? 'not secure' : 'secure-' + secureLocation}`);\n \n return secureLocation;\n}\nexports.Connection = Connection;\n\n\n//# sourceURL=webpack://dcp/./src/protocol-v4/connection/connection.js?");
4651
4651
 
4652
4652
  /***/ }),
4653
4653
 
@@ -4777,7 +4777,7 @@ eval("/**\n * @file protocol/message.js\n * @author Ryan Rossiter, r
4777
4777
  /***/ ((__unused_webpack_module, exports, __webpack_require__) => {
4778
4778
 
4779
4779
  "use strict";
4780
- eval("/**\n * @file connection/target.js\n * @author Wes Garland, wes@kingsds.network\n * @date March 2022\n */\n\n\nconst wallet = __webpack_require__(/*! dcp/dcp-client/wallet */ \"./src/dcp-client/wallet/index.js\");\n\nconst { EventEmitter } = __webpack_require__(/*! dcp/common/dcp-events */ \"./src/common/dcp-events/index.js\");\nconst { assert } = __webpack_require__(/*! dcp/common/dcp-assert */ \"./src/common/dcp-assert.js\");\nconst { DcpURL } = __webpack_require__(/*! dcp/common/dcp-url */ \"./src/common/dcp-url.js\");\nconst { PseudoExpress } = __webpack_require__(/*! ../node-libs/pseudo-express */ \"./src/node-libs/pseudo-express.js\");\nconst { Connection } = __webpack_require__(/*! ./connection */ \"./src/protocol-v4/connection/index.js\");\nconst { Message } = __webpack_require__(/*! ./message */ \"./src/protocol-v4/message.js\");\nconst { Listener } = __webpack_require__(/*! ./transport/listener */ \"./src/protocol-v4/transport/listener.js\");\nconst bearer = __webpack_require__(/*! ./transport/http-bearer */ \"./src/protocol-v4/transport/http-bearer.js\");\n\nconst debugging = (__webpack_require__(/*! dcp/debugging */ \"./src/debugging.js\").scope)('dcp');\nvar seqNum = 0;\n\n/**\n * @constructor\n * Instanciate a new Target. Targets listen for connections at the transport layer, and turns them into DCP Connections.\n *\n * @emits bearer-listening when we are listening on a given bearer; argument is underlying bearer Server instance\n * @emits listening when we are listening on at least one transport\n * @emits error when bad things happen\n *\n * @param {string} transportNames An array of names of transports, [\"socketio\", \"webrtc\"] to use to create\n * this Target.\n * @param {object} config must have .identity and .location\n * @param {function} listeningHandler [optional] function to invoke once the transport is listening. Takes name of the\n * transport and server instance associated with the event.\n * @param {object} options [optional] A DCP config options variable (like dcpConfig.dcp.listenOptions)\n */\nexports.Target = function Target(transportNames, config, listeningHandler, options)\n{\n const that = this;\n var initialTransportInstanceQueue = [];\n var idKs_unlocked = false; /* unlocked ks needed for daemon operation */\n var listening = false;\n var maintenanceIntervalMs = options && options.connectionSweepIntervalMs || 600000;\n\n assert(config, config.identity, config.location);\n\n if (config.identity instanceof Promise)\n config.identity.then(memoizeUnlockedIdentityKS);\n else\n memoizeUnlockedIdentityKS(config.identity);\n\n this.debugLabel = `target(${transportNames.join(' ')}:${++seqNum})`;\n this.connectionMemos = [];\n this.url = config.location;\n this.options = options;\n\n /* Wire up bearers - shared amongst compatible transport listeners */\n this.httpServer = bearer.httpServerFactory(config, (server) => { this.emit('bearer-listening', server )});\n Object.assign(this, new PseudoExpress());\n \n /* Wire up transport listeners */\n this.listeners = [];\n for (let transportName of transportNames)\n {\n const listener = new Listener(transportName, config, this, options);\n listener.on('connection', startupTransportConnectionHandler);\n listener.on('listening', (server) => startupListeningHandler(transportName, server));\n this.listeners.push(listener);\n }\n\n this.bindServer(this.httpServer);\n\n /* Wire up /status listeners - must happen after transport listeners hook httpServer */\n const statusUrl = new DcpURL(config.location).resolveUrl('./status');\n this.get(statusUrl.pathname, peShowStatus);\n this.get('/status', peShowStatus); /* outer server - not usually available via proxy */\n \n /* store an unlocked version of the idKs in instance-private memory for use by the Connection */\n async function memoizeUnlockedIdentityKS(idKs)\n {\n debugging('target') && console.debug(that.debugLabel, 'memoizeUnlockedIdentityKs: identity keystore unlocked. listening:', listening);\n\n if (!(idKs instanceof wallet.Keystore))\n throw new Error('Identity Keystore is not an instance of wallet.Keystore');\n\n \n const pk = await idKs.getPrivateKey();\n const ks = await new wallet.Keystore(pk, '');\n\n idKs_unlocked = ks;\n\n if (listening)\n rockAndRoll();\n }\n\n /* queue up transport connnections until a listening event handler has fired and idKs promise resolved */\n function startupTransportConnectionHandler(transport)\n {\n debugging('target') && console.debug(that.debugLabel, 'startupTransportConnectionHandler; idKs unlocked:', Boolean(idKs_unlocked) + '; listening:', listening);\n\n if (!idKs_unlocked || !listening)\n initialTransportInstanceQueue.push(transport);\n else\n {\n rockAndRoll();\n that.handleNewTransportInstance.apply(transport);\n }\n }\n\n /* handle listening event for each transport as it comes up */\n function startupListeningHandler(transportName, listeningServer)\n {\n debugging('target') && console.debug(that.debugLabel, 'startupListeningHandler; idKs unlocked:', Boolean(idKs_unlocked));\n \n if (listeningHandler)\n listeningHandler(transportName, listeningServer);\n \n listening = true;\n if (idKs_unlocked)\n rockAndRoll();\n }\n\n /**\n * Listening event has fired for at least one transport, identity has resolved; target is fully up, \n * event order has been synchronized with possible blast of incoming connections, get rid of the\n * intermediate events and get to work.\n */\n function rockAndRoll()\n {\n var queuedTransport;\n\n if (!rockAndRoll.ran)\n {\n for (let listener of that.listeners)\n {\n listener.off('connection', startupTransportConnectionHandler);\n listener.on ('connection', (transport) => that.handleNewTransportInstance(transport));\n }\n rockAndRoll.ran = true;\n }\n\n that.identity = idKs_unlocked;\n debugging('target') && console.debug(that.debugLabel, 'identity is', that.identity.address);\n\n // eslint-disable-next-line no-unmodified-loop-condition\n while (initialTransportInstanceQueue && (queuedTransport = initialTransportInstanceQueue.shift()))\n that.handleNewTransportInstance(queuedTransport);\n initialTransportInstanceQueue = null; /* detect push races, free RAM */\n that.emit('listening', that);\n }\n\n /* If it takes more than 5s to start up, we have a very serious problem */\n setTimeout(() => {\n if (rockAndRoll.ran)\n return;\n const listeningError = new Error('unable to start listener');\n this.emit('error', listeningError);\n debugging('target') && console.debug(that.debugLabel, listeningError);\n }, 5000).unref();\n\n /* Occasionally sweep the connection list and clean up garbage. */\n this.maintenanceIntervalHnd = setInterval(() => this.doConnectionMaintenance(), maintenanceIntervalMs);\n this.maintenanceIntervalHnd.unref();\n}\nexports.Target.prototype = new EventEmitter('Target');\nObject.assign(exports.Target.prototype, PseudoExpress.prototype);\n\n/**\n * API to shutdown the target and its currently-active connections.\n */\nexports.Target.prototype.close = function Target$close()\n{\n clearInterval(this.maintenanceIntervalHnd);\n let numConnectionsToClose = 0, numConnectionsClosed = 0;\n \n debugging('target') && console.debug(this.debugLabel, `closing ${this.getSessions('established').length} established and ${this.getSessions('disconnected').length} sessions`);\n for (const dcpsid in this.connectionMemos)\n {\n let conn = this.connectionMemos[dcpsid].connection;\n numConnectionsToClose++;\n conn.close('server shutting down', true);\n conn.on('end', () => { numConnectionsClosed++; tryShutdown(); })\n }\n \n const that = this;\n function tryShutdown()\n {\n // Not done closing connections\n if (numConnectionsClosed !== numConnectionsToClose)\n return;\n\n for (let listener of that.listeners)\n listener.close();\n \n that.listeners.length = 0;\n that.unref();\n }\n}\n\n/**\n * Internal callback which is invoked whenever a new transport instance is created\n */\nexports.Target.prototype.handleNewTransportInstance = function handleNewTransportInstance(transport)\n{\n const that = this;\n \n debugging('target') && console.debug(this.debugLabel, 'adding new transport instance');\n\n /* new transport instances need their messages intercepted at this layer so that \n * we can figure out if they are new DCP connections or simply reconnections.\n */\n function messageHandler(message)\n {\n void that.a$interceptTransportMessage(message, transport, messageHandler);\n }\n\n transport.on('message', messageHandler);\n \n /* Transport may have pending messages it didn't emit as there was no listener. Try here. */\n transport.emitReadyMessages();\n}\n\n/**\n * Internal method that memoizes a session for later use by the reconnect code.\n */\nexports.Target.prototype.memoizeSession = function memoizeSession(connection)\n{\n assert(connection.dcpsid);\n assert(!this.connectionMemos.hasOwnProperty(connection.dcpsid));\n\n this.connectionMemos[connection.dcpsid] = { connection, lastMessageStampMs: performance.now() };\n debugging('target') && console.debug(this.debugLabel, 'target registered new session', connection.dcpsid);\n}\n\n/**\n * Internal method invoked when a connection connects\n */\nexports.Target.prototype.noteConnectEvent = function noteConnectEvent(connection)\n{\n const cm = this.connectionMemos[connection.dcpsid];\n assert(connection.dcpsid);\n \n if (!cm)\n {\n debugging('target') && console.debug(this.debugLabel, `target connected after session ${connection.dcpsid} culled`);\n connection.close();\n return;\n }\n\n cm.disconnectTimeStamp = false;\n}\n\n/**\n * Internal method invoked when a connection closes\n */\nexports.Target.prototype.noteCloseEvent = function noteCloseEvent(connection)\n{\n delete this.connectionMemos[connection.dcpsid];\n}\n \n/**\n * Internal method invoked when a connection disconnects\n */\nexports.Target.prototype.noteDisconnectEvent = function noteDisconnectEvent(connection)\n{\n const cm = this.connectionMemos[connection.dcpsid];\n assert(connection.dcpsid);\n\n if (!cm)\n {\n debugging('target') && console.debug(this.debugLabel, `target disconnected after session ${connection.dcpsid} culled`);\n connection.close();\n return;\n }\n\n cm.disconnectTimeStamp = performance.now();\n}\n\n/**\n * Handle messages coming from the transport by associating them with an instance of Connection\n * and then having the Connection handle them. It's possible that we might multi-trigger on\n * messages arriving in a flurry just as we connect; this is due to an architecture flaw where\n * we don't really have a way to know what connection a given message is for until it has sent\n * a DCP-level message; either a connect message or a message with an existing dcpsid. Multi-trigger\n * should just get picked up by the dup detection.\n *\n * @param {string} rawMessage JSON-encoded DCP message\n * @param {object} transport a transport instance which is a child of this.transportListener\n * @param {function} messageHandler the function which is the transport message event handler that\n * called this function.\n */\nexports.Target.prototype.a$interceptTransportMessage = async function Target$interceptTransportMessage(rawMessage, transport, messageHandler)\n{\n var message;\n\n /* keep a memo on the transport instance so we don't accidentally try to create multiple connections for it */\n if (transport._targetInfo)\n transport._targetInfo.interceptCount++;\n else\n {\n transport._targetInfo = { interceptCount: 1 };\n debugging('target') && console.debug(this.debugLabel,`target intercepted ${rawMessage.length}-byte message #${transport._targetInfo.interceptCount}`);\n\n transport.off('message', messageHandler);\n try\n {\n message = JSON.parse(rawMessage);\n }\n catch (error)\n {\n console.error(`interceptTransportMessage: message #${transport._targetInfo.interceptCount} is not valid JSON (${String(rawMessage).slice(0,15)}...):`, error);\n transport.close();\n return;\n }\n\n if (typeof message !== 'object' || !message.hasOwnProperty('body'))\n {\n console.error(`interceptTransportMessage: no body in message #${transport._targetInfo.interceptCount} (${String(rawMessage).slice(0,15)}...)`);\n transport.close(true);\n return;\n }\n\n /* Sometimes, this transport has already been associated with a session, but messages were dispatched\n * before the intercept event handler was disabled. We pass them along to the connection either way,\n * but only try to do transport<>connection association for the first message. That is the *ONLY* \n * reason this code should ever look at _targetInfo.connection.\n */\n if (!transport._targetInfo.connection)\n {\n if (message.body.dcpsid)\n transport._targetInfo.connection = this.locateExistingConnection(transport, message);\n else\n transport._targetInfo.connection = await this.initializeNewConnection(transport, message); // eslint-disable-line require-atomic-updates\n }\n\n if (!transport._targetInfo.connection)\n {\n /* function return false has sent a ConnectionlessErrorResponse back at this point. */\n debugging('target') && console.error(`${this.debugLabel} interceptTransportMessage: invalid message #${transport._targetInfo.interceptCount} (${String(rawMessage).slice(0,15)}...)`);\n return;\n }\n }\n\n /* Message was intercepted, now we should have a Connection ready to handle it - handle\n * it immediately, so that it does not accidentally get handled out of order. The await\n * in the previous block will have allowed the 'connection' event to fire and the \n * identityPromise to resolve by now for new connections.\n */\n transport._targetInfo.connection.handleMessage(message);\n}\n\n/**\n * Initializes a Connection object for use by a target, based on a message from the transport layer.\n * This should be the first message from the transport connection (socket), and it must be a message\n * which contains a DCP 'connect' request.\n *\n * If the message is unsuitable for bootstrapping a Connection, a response message will be written \n * back on the transport instance and the transport will be closed.\n *\n * @param {object} transport the instance of Transport which originated the message\n * @param {object} transportMessage a DCP message, from the transport layer, which has been parsed\n * from JSON into an object but not otherwise modified.\n *\n * @returns - Connection when the message is suitable for establishing a Connection, or \n * - false when it is not; in that case the transport will be closed eventually\n * once an error response is sent, or it takes too long to send.\n */\nexports.Target.prototype.initializeNewConnection = function Target$initializeConnection(transport, transportMessage)\n{\n var message;\n var connection;\n\n if (!transportMessage.body || transportMessage.body.type !== 'batch')\n message = transportMessage;\n else\n {\n const messages = transportMessage.body && transportMessage.body.payload;\n if (!messages || !messages.length)\n return this.sendErrorResponse(transport, 'invalid message - empty batch', 'ENOBODY', transportMessage);\n message = messages[0];\n }\n\n if (!message.body)\n return this.sendErrorResponse(transport, 'invalid message - no body', 'ENOBODY', transportMessage);\n \n if (message.body.payload && message.body.payload.operation !== 'connect')\n return this.sendErrorResponse(transport, 'no such connection', 'ENODCPSID', transportMessage);\n\n /* At this point, we have a transport message for a brand new DCP connection */\n connection = new Connection(this.url, this.identity, this.options);\n connection.on('session', () => this.memoizeSession (connection));\n connection.on('connect', () => this.noteConnectEvent (connection));\n connection.on('disconnect', () => this.noteDisconnectEvent(connection));\n connection.on('close', () => this.noteCloseEvent (connection));\n connection.accept(transport).then(() => this.emit('connection', connection));\n\n return connection;\n}\n\n/**\n * @param {object} transport a transport instance which is a child of this.transportListener\n * @param {string} dcpsid DCP session id sent in the first message from the peer\n *\n * @returns instance of Connection or false\n */\nexports.Target.prototype.locateExistingConnection = function Target$locateExistingConnection(transport, message)\n{\n const dcpsid = message.body.dcpsid;\n var cm;\n\n assert(transport, dcpsid);\n this.doConnectionMaintenance();\n cm = this.connectionMemos[dcpsid];\n \n if (!cm)\n {\n debugging('target') && console.debug(this.debugLabel, `target received message after session ${dcpsid} culled or target restarted`);\n return this.sendErrorResponse(transport, 'invalid session ' + dcpsid, 'ENODCPSID', message);\n }\n\n debugging('target') && console.debug(this.debugLabel, 'message is for session', dcpsid);\n if (cm.connection.transport)\n cm.connection.transport.close();\n cm.connection.useNewTransport(transport);\n\n return cm.connection;\n}\n\n/**\n * Retrieve a list of connections' dcpsids and their statuses.\n * @param {string} connectionStatus [optional] connection.status to filter on\n * @returns {Array} list of { dcpsid, status }\n */\nexports.Target.prototype.getSessions = function Target$$getSessions(connectionStatus)\n{\n var sessions = [];\n \n for (let dcpsid in this.connectionMemos)\n {\n if (!connectionStatus || this.connectionMemos[dcpsid].connection.state.is(connectionStatus))\n {\n const session = { dcpsid, status: this.connectionMemos[dcpsid].status };\n if (dcpConfig.build === 'debug')\n session.__connection = this.connectionMemos[dcpsid]; /* troubleshooting only! */\n sessions.push(session);\n }\n }\n\n return sessions;\n}\n\n/**\n * Remove all connections from the target's memo list that have not have been\n * disconnected for more than lingerTimeout seconds.\n */\nexports.Target.prototype.doConnectionMaintenance = function Target$doConnectionMaintenance()\n{\n const now = performance.now();\n\n debugging('target') && console.debug(this.debugLabel, 'performing connection maintenance');\n for (let dcpsid in this.connectionMemos)\n {\n const cm = this.connectionMemos[dcpsid];\n if (!cm.disconnectTimeStamp)\n continue;\n\n const timeoutMs = cm.connection.options.lingerTimeout * 1000;\n if (now - cm.disconnectTimeStamp > timeoutMs)\n {\n debugging('target') && console.debug(`${this.debugLabel} culling session ${dcpsid}`)\n cm.connection.close();\n delete this.connectionMemos[dcpsid];\n }\n }\n}\n\n/** Remove any event loop references used by this target */\nexports.Target.prototype.unref = function Target$unref()\n{\n for (let listener of this.listeners)\n {\n if (listener.unref)\n listener.unref();\n }\n if (this.httpServer)\n this.httpServer.unref();\n}\n\n/**\n * A class for DCP-like messages that do not have an associated session.\n */\nclass ConnectionlessErrorResponse extends Message\n{\n #idKs;\n \n constructor(identity, errorMessage, errorCode, relatedMessage)\n {\n super('Connectionless Error Response');\n \n if (relatedMessage && relatedMessage.body.id)\n {\n this.id = relatedMessage.body.id;\n this.nonce = relatedMessage.body.nonce;\n this.dcpsid = relatedMessage.body.dcpsid;\n }\n\n this.success = false;\n this.time = Date.now();\n this.owner = identity.address;\n this.#idKs = identity;\n this.payload = {\n message: errorMessage,\n code: errorCode,\n };\n }\n \n toJSON()\n {\n return {\n ...super.toJSON(),\n id: this.id,\n nonce: this.nonce,\n dcpsid: this.dcpsid,\n type: 'unhandled-message'\n };\n }\n\n sign() /* async */\n {\n return this.#idKs.makeSignedMessage(this.toJSON());\n }\n}\n\nexports.Target.prototype.sendErrorResponse = function sendErrorResponse(transport, errorMessage, code, relatedMessage)\n{\n debugging('target') && console.debug(`${this.debugLabel} got an invalid message, sending back ConnectionLessErrorResponse with error message: ${errorMessage}`)\n const timer = setTimeout(() => transport.close(), 5000); /* keep lingering connections from chewing up resources on super-slow networks */\n \n const response = new ConnectionlessErrorResponse(this.identity, errorMessage, code, relatedMessage);\n response.sign().then((signedResponse) => transport.send(signedResponse))\n transport.on('drain', () => { transport.close(); clearTimeout(timer) });\n \n return false;\n}\n\n/**\n * A handler for PseudoExpress which displays the status of the Target's process\n * @returns false\n */\nfunction peShowStatus(request, response)\n{\n const process = __webpack_require__(/*! process */ \"./node_modules/process/browser.js\");\n const os = __webpack_require__(/*! os */ \"./node_modules/os-browserify/browser.js\");\n\n function fancy(object)\n {\n const padLen = 4 + Object.keys(object).reduce((curMax, key) => key.length > curMax ? key.length : curMax, 0);\n return '\\n' + Object.entries(object).map(([key, value]) => (' ' + key + ': ').padEnd(padLen) + value).join('\\n');\n }\n\n response.status = 200;\n response.set('content-type', 'text/plain; charset=utf8');\n response.set('cache-control', 'no-cache');\n \n const output = `${new Date()}\\n\nrequest: ${request.method} ${request.url}\nprogram: ${process.argv[1]}\npid: ${process.pid}\ndebugPort: ${process.debugPort}\nhostname: ${request.hostname} aka ${os.hostname()}\nuptime: ${process.uptime()}\nloadavg: ${os.loadavg()}\nresources: ${fancy(process.resourceUsage())}\nmemory: ${fancy(process.memoryUsage())}\ntotalmem: ${os.totalmem()}\nheaders: ${fancy(request.headers)}\n\nbody: \\n${request.body || ''}\n`;\n\n response.send(output);\n}\n\n\n//# sourceURL=webpack://dcp/./src/protocol-v4/target.js?");
4780
+ eval("/**\n * @file connection/target.js\n * @author Wes Garland, wes@kingsds.network\n * @date March 2022\n */\n\n\nconst wallet = __webpack_require__(/*! dcp/dcp-client/wallet */ \"./src/dcp-client/wallet/index.js\");\n\nconst { EventEmitter } = __webpack_require__(/*! dcp/common/dcp-events */ \"./src/common/dcp-events/index.js\");\nconst { assert } = __webpack_require__(/*! dcp/common/dcp-assert */ \"./src/common/dcp-assert.js\");\nconst { DcpURL } = __webpack_require__(/*! dcp/common/dcp-url */ \"./src/common/dcp-url.js\");\nconst { PseudoExpress } = __webpack_require__(/*! ../node-libs/pseudo-express */ \"./src/node-libs/pseudo-express.js\");\nconst { Connection } = __webpack_require__(/*! ./connection */ \"./src/protocol-v4/connection/index.js\");\nconst { Message } = __webpack_require__(/*! ./message */ \"./src/protocol-v4/message.js\");\nconst { Listener } = __webpack_require__(/*! ./transport/listener */ \"./src/protocol-v4/transport/listener.js\");\nconst bearer = __webpack_require__(/*! ./transport/http-bearer */ \"./src/protocol-v4/transport/http-bearer.js\");\n\nconst debugging = (__webpack_require__(/*! dcp/debugging */ \"./src/debugging.js\").scope)('dcp');\nvar seqNum = 0;\n\n/**\n * @constructor\n * Instanciate a new Target. Targets listen for connections at the transport layer, and turns them into DCP Connections.\n *\n * @emits bearer-listening when we are listening on a given bearer; argument is underlying bearer Server instance\n * @emits listening when we are listening on at least one transport\n * @emits error when bad things happen\n *\n * @param {string} transportNames An array of names of transports, [\"socketio\", \"webrtc\"] to use to create\n * this Target.\n * @param {object} config must have .identity and .location\n * @param {function} listeningHandler [optional] function to invoke once the transport is listening. Takes name of the\n * transport and server instance associated with the event.\n * @param {object} options [optional] A DCP config options variable (like dcpConfig.dcp.listenOptions)\n */\nexports.Target = function Target(transportNames, config, listeningHandler, options)\n{\n const that = this;\n var initialTransportInstanceQueue = [];\n var idKs_unlocked = false; /* unlocked ks needed for daemon operation */\n var listening = false;\n var maintenanceIntervalMs = options && options.connectionSweepIntervalMs || 600000;\n\n assert(config, config.identity, config.location);\n\n if (config.identity instanceof Promise)\n config.identity.then(memoizeUnlockedIdentityKS);\n else\n memoizeUnlockedIdentityKS(config.identity);\n\n this.debugLabel = `target(${transportNames.join(' ')}:${++seqNum})`;\n this.connectionMemos = [];\n this.url = config.location;\n this.options = options;\n\n /* Wire up bearers - shared amongst compatible transport listeners */\n this.httpServer = bearer.httpServerFactory(config, (server) => { this.emit('bearer-listening', server )});\n Object.assign(this, new PseudoExpress());\n \n /* Wire up transport listeners */\n this.listeners = [];\n for (let transportName of transportNames)\n {\n const listener = new Listener(transportName, config, this, options);\n listener.on('connection', startupTransportConnectionHandler);\n listener.on('listening', (server) => startupListeningHandler(transportName, server));\n this.listeners.push(listener);\n }\n\n this.bindServer(this.httpServer);\n\n /* Wire up /status listeners - must happen after transport listeners hook httpServer */\n const statusUrl = new DcpURL(config.location).resolveUrl('./status');\n this.get(statusUrl.pathname, peShowStatus);\n this.get('/status', peShowStatus); /* outer server - not usually available via proxy */\n \n /* store an unlocked version of the idKs in instance-private memory for use by the Connection */\n async function memoizeUnlockedIdentityKS(idKs)\n {\n debugging('target') && console.debug(that.debugLabel, 'memoizeUnlockedIdentityKs: identity keystore unlocked. listening:', listening);\n\n if (!(idKs instanceof wallet.Keystore))\n throw new Error('Identity Keystore is not an instance of wallet.Keystore');\n\n \n const pk = await idKs.getPrivateKey();\n const ks = await new wallet.Keystore(pk, '');\n\n idKs_unlocked = ks;\n\n if (listening)\n rockAndRoll();\n }\n\n /* queue up transport connnections until a listening event handler has fired and idKs promise resolved */\n function startupTransportConnectionHandler(transport)\n {\n debugging('target') && console.debug(that.debugLabel, 'startupTransportConnectionHandler; idKs unlocked:', Boolean(idKs_unlocked) + '; listening:', listening);\n\n if (!idKs_unlocked || !listening)\n initialTransportInstanceQueue.push(transport);\n else\n {\n rockAndRoll();\n that.handleNewTransportInstance.apply(transport);\n }\n }\n\n /* handle listening event for each transport as it comes up */\n function startupListeningHandler(transportName, listeningServer)\n {\n debugging('target') && console.debug(that.debugLabel, 'startupListeningHandler; idKs unlocked:', Boolean(idKs_unlocked));\n \n if (listeningHandler)\n listeningHandler(transportName, listeningServer);\n \n listening = true;\n if (idKs_unlocked)\n rockAndRoll();\n }\n\n /**\n * Listening event has fired for at least one transport, identity has resolved; target is fully up, \n * event order has been synchronized with possible blast of incoming connections, get rid of the\n * intermediate events and get to work.\n */\n function rockAndRoll()\n {\n var queuedTransport;\n\n if (!rockAndRoll.ran)\n {\n for (let listener of that.listeners)\n {\n listener.off('connection', startupTransportConnectionHandler);\n listener.on ('connection', (transport) => that.handleNewTransportInstance(transport));\n }\n rockAndRoll.ran = true;\n }\n\n that.identity = idKs_unlocked;\n debugging('target') && console.debug(that.debugLabel, 'identity is', that.identity.address);\n\n // eslint-disable-next-line no-unmodified-loop-condition\n while (initialTransportInstanceQueue && (queuedTransport = initialTransportInstanceQueue.shift()))\n that.handleNewTransportInstance(queuedTransport);\n initialTransportInstanceQueue = null; /* detect push races, free RAM */\n that.emit('listening', that);\n }\n\n /* If it takes more than 5s to start up, we have a very serious problem */\n setTimeout(() => {\n if (rockAndRoll.ran)\n return;\n const listeningError = new Error('unable to start listener');\n this.emit('error', listeningError);\n debugging('target') && console.debug(that.debugLabel, listeningError);\n }, 5000).unref();\n\n /* Occasionally sweep the connection list and clean up garbage. */\n this.maintenanceIntervalHnd = setInterval(() => this.doConnectionMaintenance(), maintenanceIntervalMs);\n this.maintenanceIntervalHnd.unref();\n}\nexports.Target.prototype = new EventEmitter('Target');\nObject.assign(exports.Target.prototype, PseudoExpress.prototype);\n\n/**\n * API to shutdown the target and its currently-active connections.\n */\nexports.Target.prototype.close = function Target$close()\n{\n clearInterval(this.maintenanceIntervalHnd);\n let numConnectionsToClose = 0, numConnectionsClosed = 0;\n \n debugging('target') && console.debug(this.debugLabel, `closing ${this.getSessions('established').length} established and ${this.getSessions('disconnected').length} sessions`);\n for (const dcpsid in this.connectionMemos)\n {\n let conn = this.connectionMemos[dcpsid].connection;\n numConnectionsToClose++;\n conn.close('server shutting down', true);\n conn.on('end', () => { numConnectionsClosed++; tryShutdown(); })\n }\n \n const that = this;\n function tryShutdown()\n {\n // Not done closing connections\n if (numConnectionsClosed !== numConnectionsToClose)\n return;\n\n for (let listener of that.listeners)\n listener.close();\n \n that.listeners.length = 0;\n that.unref();\n }\n}\n\n/**\n * Internal callback which is invoked whenever a new transport instance is created\n */\nexports.Target.prototype.handleNewTransportInstance = function handleNewTransportInstance(transport)\n{\n const that = this;\n \n debugging('target') && console.debug(this.debugLabel, 'adding new transport instance');\n\n /* new transport instances need their messages intercepted at this layer so that \n * we can figure out if they are new DCP connections or simply reconnections.\n */\n function messageHandler(message)\n {\n void that.a$interceptTransportMessage(message, transport, messageHandler);\n }\n\n transport.on('message', messageHandler);\n \n /* Transport may have pending messages it didn't emit as there was no listener. Try here. */\n transport.emitReadyMessages();\n}\n\n/**\n * Internal method that memoizes a session for later use by the reconnect code.\n */\nexports.Target.prototype.memoizeSession = function memoizeSession(connection)\n{\n assert(connection.dcpsid);\n assert(!this.connectionMemos.hasOwnProperty(connection.dcpsid));\n\n this.connectionMemos[connection.dcpsid] = { connection, lastMessageStampMs: performance.now() };\n debugging('target') && console.debug(this.debugLabel, 'target registered new session', connection.dcpsid);\n}\n\n/**\n * Internal method invoked when a connection connects\n */\nexports.Target.prototype.noteConnectEvent = function noteConnectEvent(connection)\n{\n const cm = this.connectionMemos[connection.dcpsid];\n assert(connection.dcpsid);\n \n if (!cm)\n {\n debugging('target') && console.debug(this.debugLabel, `target connected after session ${connection.dcpsid} culled`);\n connection.close();\n return;\n }\n\n cm.disconnectTimeStamp = false;\n}\n\n/**\n * Internal method invoked when a connection closes\n */\nexports.Target.prototype.noteCloseEvent = function noteCloseEvent(connection)\n{\n delete this.connectionMemos[connection.dcpsid];\n}\n \n/**\n * Internal method invoked when a connection disconnects\n */\nexports.Target.prototype.noteDisconnectEvent = function noteDisconnectEvent(connection)\n{\n const cm = this.connectionMemos[connection.dcpsid];\n\n if (!cm)\n {\n debugging('target') && console.debug(this.debugLabel, `target disconnected after session ${connection.dcpsid} culled`);\n connection.close();\n return;\n }\n\n cm.disconnectTimeStamp = performance.now();\n}\n\n/**\n * Handle messages coming from the transport by associating them with an instance of Connection\n * and then having the Connection handle them. It's possible that we might multi-trigger on\n * messages arriving in a flurry just as we connect; this is due to an architecture flaw where\n * we don't really have a way to know what connection a given message is for until it has sent\n * a DCP-level message; either a connect message or a message with an existing dcpsid. Multi-trigger\n * should just get picked up by the dup detection.\n *\n * @param {string} rawMessage JSON-encoded DCP message\n * @param {object} transport a transport instance which is a child of this.transportListener\n * @param {function} messageHandler the function which is the transport message event handler that\n * called this function.\n */\nexports.Target.prototype.a$interceptTransportMessage = async function Target$interceptTransportMessage(rawMessage, transport, messageHandler)\n{\n var message;\n\n /* keep a memo on the transport instance so we don't accidentally try to create multiple connections for it */\n if (transport._targetInfo)\n transport._targetInfo.interceptCount++;\n else\n {\n transport._targetInfo = { interceptCount: 1 };\n debugging('target') && console.debug(this.debugLabel,`target intercepted ${rawMessage.length}-byte message #${transport._targetInfo.interceptCount}`);\n\n transport.off('message', messageHandler);\n try\n {\n message = JSON.parse(rawMessage);\n }\n catch (error)\n {\n console.error(`interceptTransportMessage: message #${transport._targetInfo.interceptCount} is not valid JSON (${String(rawMessage).slice(0,15)}...):`, error);\n transport.close();\n return;\n }\n\n if (typeof message !== 'object' || !message.hasOwnProperty('body'))\n {\n console.error(`interceptTransportMessage: no body in message #${transport._targetInfo.interceptCount} (${String(rawMessage).slice(0,15)}...)`);\n transport.close(true);\n return;\n }\n\n /* Sometimes, this transport has already been associated with a session, but messages were dispatched\n * before the intercept event handler was disabled. We pass them along to the connection either way,\n * but only try to do transport<>connection association for the first message. That is the *ONLY* \n * reason this code should ever look at _targetInfo.connection.\n */\n if (!transport._targetInfo.connection)\n {\n if (message.body.dcpsid)\n transport._targetInfo.connection = this.locateExistingConnection(transport, message);\n else\n transport._targetInfo.connection = await this.initializeNewConnection(transport, message); // eslint-disable-line require-atomic-updates\n }\n\n if (!transport._targetInfo.connection)\n {\n /* function return false has sent a ConnectionlessErrorResponse back at this point. */\n debugging('target') && console.error(`${this.debugLabel} interceptTransportMessage: invalid message #${transport._targetInfo.interceptCount} (${String(rawMessage).slice(0,15)}...)`);\n return;\n }\n }\n\n /* Message was intercepted, now we should have a Connection ready to handle it - handle\n * it immediately, so that it does not accidentally get handled out of order. The await\n * in the previous block will have allowed the 'connection' event to fire and the \n * identityPromise to resolve by now for new connections.\n */\n transport._targetInfo.connection.handleMessage(message);\n}\n\n/**\n * Initializes a Connection object for use by a target, based on a message from the transport layer.\n * This should be the first message from the transport connection (socket), and it must be a message\n * which contains a DCP 'connect' request.\n *\n * If the message is unsuitable for bootstrapping a Connection, a response message will be written \n * back on the transport instance and the transport will be closed.\n *\n * @param {object} transport the instance of Transport which originated the message\n * @param {object} transportMessage a DCP message, from the transport layer, which has been parsed\n * from JSON into an object but not otherwise modified.\n *\n * @returns - Connection when the message is suitable for establishing a Connection, or \n * - false when it is not; in that case the transport will be closed eventually\n * once an error response is sent, or it takes too long to send.\n */\nexports.Target.prototype.initializeNewConnection = function Target$initializeConnection(transport, transportMessage)\n{\n var message;\n var connection;\n\n if (!transportMessage.body || transportMessage.body.type !== 'batch')\n message = transportMessage;\n else\n {\n const messages = transportMessage.body && transportMessage.body.payload;\n if (!messages || !messages.length)\n return this.sendErrorResponse(transport, 'invalid message - empty batch', 'ENOBODY', transportMessage);\n message = messages[0];\n }\n\n if (!message.body)\n return this.sendErrorResponse(transport, 'invalid message - no body', 'ENOBODY', transportMessage);\n \n if (message.body.payload && message.body.payload.operation !== 'connect')\n return this.sendErrorResponse(transport, 'no such connection', 'ENODCPSID', transportMessage);\n\n /* At this point, we have a transport message for a brand new DCP connection */\n connection = new Connection(this.url, this.identity, this.options);\n connection.on('session', () => this.memoizeSession (connection));\n connection.on('connect', () => this.noteConnectEvent (connection));\n connection.on('disconnect', () => this.noteDisconnectEvent(connection));\n connection.on('close', () => this.noteCloseEvent (connection));\n connection.accept(transport).then(() => this.emit('connection', connection));\n\n return connection;\n}\n\n/**\n * @param {object} transport a transport instance which is a child of this.transportListener\n * @param {string} dcpsid DCP session id sent in the first message from the peer\n *\n * @returns instance of Connection or false\n */\nexports.Target.prototype.locateExistingConnection = function Target$locateExistingConnection(transport, message)\n{\n const dcpsid = message.body.dcpsid;\n var cm;\n\n assert(transport, dcpsid);\n this.doConnectionMaintenance();\n cm = this.connectionMemos[dcpsid];\n \n if (!cm)\n {\n debugging('target') && console.debug(this.debugLabel, `target received message after session ${dcpsid} culled or target restarted`);\n return this.sendErrorResponse(transport, 'invalid session ' + dcpsid, 'ENODCPSID', message);\n }\n\n debugging('target') && console.debug(this.debugLabel, 'message is for session', dcpsid);\n if (cm.connection.transport)\n cm.connection.transport.close();\n cm.connection.useNewTransport(transport);\n\n return cm.connection;\n}\n\n/**\n * Retrieve a list of connections' dcpsids and their statuses.\n * @param {string} connectionStatus [optional] connection.status to filter on\n * @returns {Array} list of { dcpsid, status }\n */\nexports.Target.prototype.getSessions = function Target$$getSessions(connectionStatus)\n{\n var sessions = [];\n \n for (let dcpsid in this.connectionMemos)\n {\n if (!connectionStatus || this.connectionMemos[dcpsid].connection.state.is(connectionStatus))\n {\n const session = { dcpsid, status: this.connectionMemos[dcpsid].status };\n if (dcpConfig.build === 'debug')\n session.__connection = this.connectionMemos[dcpsid]; /* troubleshooting only! */\n sessions.push(session);\n }\n }\n\n return sessions;\n}\n\n/**\n * Remove all connections from the target's memo list that have not have been\n * disconnected for more than lingerTimeout seconds.\n */\nexports.Target.prototype.doConnectionMaintenance = function Target$doConnectionMaintenance()\n{\n const now = performance.now();\n\n debugging('target') && console.debug(this.debugLabel, 'performing connection maintenance');\n for (let dcpsid in this.connectionMemos)\n {\n const cm = this.connectionMemos[dcpsid];\n if (!cm.disconnectTimeStamp)\n continue;\n\n const timeoutMs = cm.connection.options.lingerTimeout * 1000;\n if (now - cm.disconnectTimeStamp > timeoutMs)\n {\n debugging('target') && console.debug(`${this.debugLabel} culling session ${dcpsid}`)\n cm.connection.close();\n delete this.connectionMemos[dcpsid];\n }\n }\n}\n\n/** Remove any event loop references used by this target */\nexports.Target.prototype.unref = function Target$unref()\n{\n for (let listener of this.listeners)\n {\n if (listener.unref)\n listener.unref();\n }\n if (this.httpServer)\n this.httpServer.unref();\n}\n\n/**\n * A class for DCP-like messages that do not have an associated session.\n */\nclass ConnectionlessErrorResponse extends Message\n{\n #idKs;\n \n constructor(identity, errorMessage, errorCode, relatedMessage)\n {\n super('Connectionless Error Response');\n \n if (relatedMessage && relatedMessage.body.id)\n {\n this.id = relatedMessage.body.id;\n this.nonce = relatedMessage.body.nonce;\n this.dcpsid = relatedMessage.body.dcpsid;\n }\n\n this.success = false;\n this.time = Date.now();\n this.owner = identity.address;\n this.#idKs = identity;\n this.payload = {\n message: errorMessage,\n code: errorCode,\n };\n }\n \n toJSON()\n {\n return {\n ...super.toJSON(),\n id: this.id,\n nonce: this.nonce,\n dcpsid: this.dcpsid,\n type: 'unhandled-message'\n };\n }\n\n sign() /* async */\n {\n return this.#idKs.makeSignedMessage(this.toJSON());\n }\n}\n\nexports.Target.prototype.sendErrorResponse = function sendErrorResponse(transport, errorMessage, code, relatedMessage)\n{\n debugging('target') && console.debug(`${this.debugLabel} got an invalid message, sending back ConnectionLessErrorResponse with error message: ${errorMessage}`)\n const timer = setTimeout(() => transport.close(), 5000); /* keep lingering connections from chewing up resources on super-slow networks */\n \n const response = new ConnectionlessErrorResponse(this.identity, errorMessage, code, relatedMessage);\n response.sign().then((signedResponse) => transport.send(signedResponse))\n transport.on('drain', () => { transport.close(); clearTimeout(timer) });\n \n return false;\n}\n\n/**\n * A handler for PseudoExpress which displays the status of the Target's process\n * @returns false\n */\nfunction peShowStatus(request, response)\n{\n const process = __webpack_require__(/*! process */ \"./node_modules/process/browser.js\");\n const os = __webpack_require__(/*! os */ \"./node_modules/os-browserify/browser.js\");\n\n function fancy(object)\n {\n const padLen = 4 + Object.keys(object).reduce((curMax, key) => key.length > curMax ? key.length : curMax, 0);\n return '\\n' + Object.entries(object).map(([key, value]) => (' ' + key + ': ').padEnd(padLen) + value).join('\\n');\n }\n\n response.status = 200;\n response.set('content-type', 'text/plain; charset=utf8');\n response.set('cache-control', 'no-cache');\n \n const output = `${new Date()}\\n\nrequest: ${request.method} ${request.url}\nprogram: ${process.argv[1]}\npid: ${process.pid}\ndebugPort: ${process.debugPort}\nhostname: ${request.hostname} aka ${os.hostname()}\nuptime: ${process.uptime()}\nloadavg: ${os.loadavg()}\nresources: ${fancy(process.resourceUsage())}\nmemory: ${fancy(process.memoryUsage())}\ntotalmem: ${os.totalmem()}\nheaders: ${fancy(request.headers)}\n\nbody: \\n${request.body || ''}\n`;\n\n response.send(output);\n}\n\n\n//# sourceURL=webpack://dcp/./src/protocol-v4/target.js?");
4781
4781
 
4782
4782
  /***/ }),
4783
4783
 
@@ -4863,7 +4863,7 @@ eval("/**\n * @file encodings.js\n * Utilities for working w
4863
4863
  /***/ ((__unused_webpack_module, exports, __webpack_require__) => {
4864
4864
 
4865
4865
  "use strict";
4866
- eval("/** \n * @file encodeDataURI.js\n * @author Nazila Akhavan <nazila@kingsds.network>\n * @date Sep 2020\n * \n * Encode input and return the URI.\n */\n\n\nconst kvin = __webpack_require__(/*! kvin */ \"./node_modules/kvin/kvin.js\");\nconst DCP_ENV = __webpack_require__(/*! dcp/common/dcp-env */ \"./src/common/dcp-env.js\");\nconst btoa = __webpack_require__(/*! dcp/utils/content-encoding */ \"./src/utils/content-encoding.js\");\n/**\n * Encode input to data:[<MIME-type>][;base64][;charset=<encoding>],<data>\n * @param { string | object } input \n * @param { string } [MIMEType] \n */\nexports.encodeDataURI = function utils$$encodeDataURI (input, MIMEType) {\n let inputType = typeof input;\n if (inputType === 'string' && input.startsWith('data:')) /* Already encoded, avoid double encoding */\n return input;\n let temp, encoding;\n let textType, imageType;\n if(MIMEType) {\n temp = MIMEType.split(';');\n MIMEType = temp[0];\n if(temp[1]) encoding = temp[1];\n textType = MIMEType.match(/text\\/.*/);\n textType = textType? textType[0] : [];\n imageType = MIMEType.match(/image\\/.*/);\n imageType = imageType? imageType[0] : [];\n } else {\n switch (inputType) {\n case 'string':\n MIMEType = 'text/plain';\n break;\n\n case 'boolean':\n MIMEType = 'application/json';\n break;\n\n default:\n MIMEType = 'application/x-kvin';\n break;\n }\n }\n\n switch (MIMEType) {\n case 'application/javascript': \n case 'text/plain':\n if(encoding === 'base64') {\n return 'data:text/plain;base64,' + btoa(input);\n }\n return 'data:,' + encodeURI(input);\n\n case 'application/json':\n return 'data:application/json,' + (typeof input === 'string' ? encodeURI(input) : encodeURI(JSON.stringify(input)));\n\n case 'application/x-kvin':\n return 'data:application/x-kvin,' + (typeof input === 'string' ? encodeURI(input) : encodeURI(kvin.serialize(input)));\n\n case 'application/octet-stream':\n input = new Uint8Array(input);\n return 'data:application/octet-stream;base64,' + btoa(input); \n\n case textType:\n return `data:${textType},` + encodeURI(input);\n\n case imageType:\n if (inputType === 'string') return `data:${imageType};base64,` + btoa(input);\n input = new Uint8Array(input.buffer)\n return encodeURI(`data:${imageType};base64,` + btoa(input)); \n\n default:\n throw new Error(`The content type ${MIMEType} is not supported in encodeDataURI()!`)\n }\n}\n\n// https://stackoverflow.com/questions/11089732/display-image-from-blob-using-javascript-and-websockets\n// public method for encoding an Uint8Array to base64\n\nfunction encode(input) {\n var keyStr = \"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/=\";\n var output = \"\";\n var chr1, chr2, chr3, enc1, enc2, enc3, enc4;\n var i = 0;\n\n while (i < input.length) {\n chr1 = input[i++];\n chr2 = i < input.length ? input[i++] : Number.NaN; // Not sure if the index \n chr3 = i < input.length ? input[i++] : Number.NaN; // checks are needed here\n\n enc1 = chr1 >> 2;\n enc2 = ((chr1 & 3) << 4) | (chr2 >> 4);\n enc3 = ((chr2 & 15) << 2) | (chr3 >> 6);\n enc4 = chr3 & 63;\n\n if (isNaN(chr2)) {\n enc3 = enc4 = 64;\n } else if (isNaN(chr3)) {\n enc4 = 64;\n }\n output += keyStr.charAt(enc1) + keyStr.charAt(enc2) +\n keyStr.charAt(enc3) + keyStr.charAt(enc4);\n }\n return output;\n}\n\n \n \n\n\n//# sourceURL=webpack://dcp/./src/utils/encodeDataURI.js?");
4866
+ eval("/** \n * @file encodeDataURI.js\n * @author Nazila Akhavan <nazila@kingsds.network>\n * @date Sep 2020\n * \n * Encode input and return the URI.\n */\n\n\nconst kvin = __webpack_require__(/*! kvin */ \"./node_modules/kvin/kvin.js\");\nconst DCP_ENV = __webpack_require__(/*! dcp/common/dcp-env */ \"./src/common/dcp-env.js\");\nconst { btoa } = __webpack_require__(/*! ./content-encoding */ \"./src/utils/content-encoding.js\");\n/**\n * Encode input to data:[<MIME-type>][;base64][;charset=<encoding>],<data>\n * @param { string | object } input \n * @param { string } [MIMEType] \n */\nexports.encodeDataURI = function utils$$encodeDataURI (input, MIMEType) {\n let inputType = typeof input;\n if (inputType === 'string' && input.startsWith('data:')) /* Already encoded, avoid double encoding */\n return input;\n let temp, encoding;\n let textType, imageType;\n if(MIMEType) {\n temp = MIMEType.split(';');\n MIMEType = temp[0];\n if(temp[1]) encoding = temp[1];\n textType = MIMEType.match(/text\\/.*/);\n textType = textType? textType[0] : [];\n imageType = MIMEType.match(/image\\/.*/);\n imageType = imageType? imageType[0] : [];\n } else {\n switch (inputType) {\n case 'string':\n MIMEType = 'text/plain';\n break;\n\n case 'boolean':\n MIMEType = 'application/json';\n break;\n\n default:\n MIMEType = 'application/x-kvin';\n break;\n }\n }\n\n switch (MIMEType) {\n case 'application/javascript': \n case 'text/plain':\n if(encoding === 'base64') {\n return 'data:text/plain;base64,' + btoa(input);\n }\n return 'data:,' + encodeURI(input);\n\n case 'application/json':\n return 'data:application/json,' + (typeof input === 'string' ? encodeURI(input) : encodeURI(JSON.stringify(input)));\n\n case 'application/x-kvin':\n return 'data:application/x-kvin,' + (typeof input === 'string' ? encodeURI(input) : encodeURI(kvin.serialize(input)));\n\n case 'application/octet-stream':\n input = new Uint8Array(input);\n return 'data:application/octet-stream;base64,' + btoa(input); \n\n case textType:\n return `data:${textType},` + encodeURI(input);\n\n case imageType:\n if (inputType === 'string') return `data:${imageType};base64,` + btoa(input);\n input = new Uint8Array(input.buffer)\n return encodeURI(`data:${imageType};base64,` + btoa(input)); \n\n default:\n throw new Error(`The content type ${MIMEType} is not supported in encodeDataURI()!`)\n }\n}\n\n// https://stackoverflow.com/questions/11089732/display-image-from-blob-using-javascript-and-websockets\n// public method for encoding an Uint8Array to base64\n\nfunction encode(input) {\n var keyStr = \"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/=\";\n var output = \"\";\n var chr1, chr2, chr3, enc1, enc2, enc3, enc4;\n var i = 0;\n\n while (i < input.length) {\n chr1 = input[i++];\n chr2 = i < input.length ? input[i++] : Number.NaN; // Not sure if the index \n chr3 = i < input.length ? input[i++] : Number.NaN; // checks are needed here\n\n enc1 = chr1 >> 2;\n enc2 = ((chr1 & 3) << 4) | (chr2 >> 4);\n enc3 = ((chr2 & 15) << 2) | (chr3 >> 6);\n enc4 = chr3 & 63;\n\n if (isNaN(chr2)) {\n enc3 = enc4 = 64;\n } else if (isNaN(chr3)) {\n enc4 = 64;\n }\n output += keyStr.charAt(enc1) + keyStr.charAt(enc2) +\n keyStr.charAt(enc3) + keyStr.charAt(enc4);\n }\n return output;\n}\n\n \n \n\n\n//# sourceURL=webpack://dcp/./src/utils/encodeDataURI.js?");
4867
4867
 
4868
4868
  /***/ }),
4869
4869
 
@@ -4895,7 +4895,7 @@ eval("/**\n * @file fetch-keystore.js\n * Utility code to fe
4895
4895
  /***/ ((__unused_webpack_module, exports, __webpack_require__) => {
4896
4896
 
4897
4897
  "use strict";
4898
- eval("/**\n * @file fetch-uri.js\n * @author Nazila Akhavan <nazila@kingsds.network>, Wes Garland <wes@kingsds.network>\n * @date Sep 2020, Nov 2020\n *\n * Fetch URLs/ Data that is stored in the database.\n * Bootstrap some our own needs via custom MIME Types in data URLs.\n */\n\n\nconst { assert } = __webpack_require__(/*! dcp/common/dcp-assert */ \"./src/common/dcp-assert.js\");\nconst kvin = __webpack_require__(/*! kvin */ \"./node_modules/kvin/kvin.js\");\nconst scopedKvin = new kvin.KVIN({Object: ({}).constructor,\n Array: ([]).constructor, \n Function: (()=>{}).constructor});\n\nconst { justFetch } = __webpack_require__(/*! ./just-fetch */ \"./src/utils/just-fetch.js\");\nconst DCP_ENV = __webpack_require__(/*! dcp/common/dcp-env */ \"./src/common/dcp-env.js\");\nconst { DCPError } = __webpack_require__(/*! dcp/common/dcp-error */ \"./src/common/dcp-error.js\");\nconst { atob } = __webpack_require__(/*! dcp/utils/content-encoding */ \"./src/utils/content-encoding.js\");\n\n/** @typedef {import('dcp/common/dcp-url').DcpURL} DcpURL */\n\n/**\n * Fetch the data via HTTP GET at the given URI. data: URIs are decoded directly, \n * http: or https: URIs are fetched via GET queries. \n *\n * data: URIs will always be decoded.\n * Other URIs (eg https:) must have their origins listed in the allow origin list\n *\n * The return type is unpacked by justFetch - maybe not the best place for this - but this means\n * that we will return a promise that resolves to many possible JS types, dependening on the MIME\n * content-type of the response. The same observation holds for the content-type inside any data:\n * URIs.\n * \n * \n * @param {string[]} allowOriginList the allow origin list (optional, needed for non-data: URIs)\n * @param {URL | DcpURL | string} uri uri of the resource to be fetched\n * @returns If successful, promise which resolves to the data at the URI; see justFetch() for details.\n * Otherwise, promise which results to an instance of error. Errors which happen to due bad\n * origins have the code EFETCH_BAD_ORIGIN.\n */\nexports.fetchURI = async function fetchURI(uri, allowOriginList) {\n if ((typeof uri === 'object' && uri.protocol === 'data:') || uri.startsWith('data:'))\n return exports.parseDataURI(uri);\n \n const url = typeof uri === 'string' ? new URL(uri) : uri;\n \n if (url.protocol === 'file:')\n {\n if (allowOriginList.indexOf(url.pathname) === -1)\n throw new DCPError(`Not allowed to fetch from the file path '${url.pathname}'`, 'EFETCH_BAD_ORIGIN')\n \n return exports.parseFileURI(url);\n }\n \n if (allowOriginList.indexOf(url.origin) === -1)\n throw new DCPError(`Not allowed to fetch from the origin '${url.origin}'`, 'EFETCH_BAD_ORIGIN')\n \n return await justFetch(url, 'string', 'GET');\n}\n\n/**\n * @constructor\n * Instanciate an object based on a data: uri which has properties matching\n * the parameter attributes of the uri.\n * \n * @param {string} uriString The data: URI to decode\n *\n * Object Properties:\n * - contentType: boxed string which is the content type (eg 'image/png')\n * - contentType.major: string which is the major part of the content type (eg 'image')\n * - contentType.minor: string which is the minor part of the content type (eg 'png')\n * - length: the length of the media type section of the data: URI\n * - parameters: object which holds any parameters which were specified in\n * the URI; keys are lowercased attributes.\n *\n * @note The parameters.charset property is treated a little differently to make\n * it easier on API consumers; it is *always* defined, and lower case.\n * If the charset was not specified, it is false.\n */ \nexports.MediaType = function fetchUri$$MediaType(uriString) {\n var mtArr;\n var mtStr = /(^data:)([^,]*)/.exec(uriString)[2];\n\n assert(uriString.startsWith('data:'));\n this.length = mtStr.length;\n if (mtStr === '')\n mtStr = 'text/plain;charset=US-ASCII';\n\n mtArr = mtStr.split(';');\n this.contentType = new String(mtArr.shift());\n [ this.contentType.major, this.contentType.minor ] = this.contentType.split('/');\n \n this.parameters = {}\n for (let parameter of mtArr) {\n let [ attribute, value ] = parameter.split('=');\n if (!value)\n value = true;\n this.parameters[attribute.toLowerCase()] = value;\n }\n\n if (typeof this.parameters.charset === 'undefined')\n this.parameters.charset = false;\n else\n this.parameters.charset = this.parameters.charset.toLowerCase();\n \n if (this.contentType == 'application/json' || this.contentType == 'text/plain')\n {\n this.serializer = {\n method: 'json',\n parse: JSON.parse\n }\n }\n else if (this.contentType == 'application/x-kvin')\n {\n this.serializer = {\n method: 'kvin',\n parse: scopedKvin.deserialize.bind(scopedKvin)\n }\n }\n}\n\n/**\n * Estimate the number of raw bytes stored in a data URI. The estimate is pretty good for URIs\n * embedding base64. URIs which are made up of escaped characters are estimated by counting the number\n * of characters in the first part of the array, and then assuming that the encoding density is constant\n * through out. This is because the URI can be made up of bytes encoded with 1-or-3 bytes, but will \n * normally be all either almost all 1 or all 3. Mixed would be pretty rare in the wild.\n *\n * This does mean that it's possible to under-estimate by nearly 66%, so backing store should be ready\n * for this, but it's realllly unlikely to happen.\n */\nexports.estimateDataUriContentLength = function estimateDataUriContentLength(uriString, mediaType)\n{\n const headerLength = 5 + mediaType.length + 1; /* data: mediaType comma */\n\n if (!mediaType)\n mediaType = new exports.MediaType(uriString);\n if (mediaType.parameters.base64)\n return Math.ceil(((uriString.length - (mediaType.length + headerLength)) * 6) / 8);\n\n /* Assume that the distribution of escaped characters in the first bit is representative\n * of the URI as a whole. We don't want to take this apart just to estimate.\n */\n const leading = uriString.slice(mediaType.length, 1024);\n const remain = leading.replace(/%../g, '');\n const numEscs = (leading.length - remain.length);\n const bytes = remain.length + numEscs;\n const dilation = bytes / leading.length;\n \n return Math.ceil((uriString.length - headerLength) * dilation);\n}\n\n/**\n * Extract the raw data encoded a data URI.\n *\n * @param {string} uriString a valid data: URI\n * @param {exports.MediaType} mediaType optional instance of MediaType that describes this URI.\n *\n * @returns string or Buffer representing the RAW data. If the return type is a string, each character\n * in the string will represent 1 byte, 0-255, of the raw data.\n */\nexports.dataUri_extractBytes = function fetchUri$$dataUri_extractBytes(uriString, mediaType)\n{\n if (!mediaType)\n mediaType = new exports.MediaType(uriString);\n\n const payload = uriString.slice(5 + mediaType.length + 1); /* data: mediaType comma */\n const data = mediaType.parameters.base64 ? atob(payload) : unescape(payload);\n\n return data;\n}\n\n/**\n * Extract text encoded in a data URI.\n *\n * @param {string} uriString a valid data: URI\n * @param {exports.MediaType} mediaType optional instance of MediaType that describes this URI.\n *\n * @returns a JS-style Unicode string, converting from whatever charset is indicated in the\n * data URI's content-type charset attribute.\n */\nexports.dataUri_extractText = function fetchUri$$dataUri_extractText(uriString, mediaType)\n{\n if (!mediaType)\n mediaType = new exports.MediaType(uriString);\n\n switch (mediaType.parameters.charset)\n {\n default:\n throw new Error(`Character set ${mediaType.parameters.charset} not supported`);\n\n case undefined:\n case false:\n case 'iso-8859-1': case 'latin1': case 'latin-1':\n case 'us-ascii': case 'ascii':\n /* These character sets map directly onto unicode */\n return exports.dataUri_extractBytes(uriString, mediaType);\n case 'utf8': case 'utf-8':\n const payload = uriString.slice(5 + mediaType.length + 1); /* data: mediaType comma */\n return mediaType.parameters.base64 ? decodeURI(escape(atob(payload))) : decodeURI(payload);\n }\n\n throw 'unreached';\n}\n\nexports.parseFileURI = function fetchUri$$parseFileURI(uriString) {\n var fetchedResource = (__webpack_require__(/*! fs */ \"fs\").readFileSync)(uriString, 'utf-8');\n var fileExt = (__webpack_require__(/*! path */ \"./node_modules/path-browserify/index.js\").extname)(uriString.pathname);\n \n var parse;\n switch (fileExt) {\n case '.kvin':\n parse = scopedKvin.deserialize.bind(scopedKvin);\n break;\n case '.json':\n parse = JSON.parse\n break;\n }\n \n if (parse)\n fetchedResource = parse(fetchedResource);\n \n return fetchedResource;\n}\n\n/**\n * Parse a data: URI, returning the JavaScript value it encodes. The return type is selected\n * based on the content-type.\n *\n * <pre>\n * MIME Type Return Type\n * ------------------ ----------------------------------------------------------------------------------------------------\n * text/plain or none string primitive\n * text/* A boxed string with the contentType property set, and the charset property set if it was specified.\n * application/json whatever JSON.parse returns on the data when it is treated as a string\n * application/x-kvin whatever kvin.deserialze returns on the data when it is treated as a string\n * image/* A Uint8Array of the decoded contents with the contentType property set.\n * * A Uint8Array of the decoded contents with the contentType property set and the charset property \n * set if it was specified.\n * </pre>\n *\n * @param {string} uriString the URI\n */\nexports.parseDataURI = function(uriString, mediaType = null) {\n if (!mediaType)\n mediaType = new exports.MediaType(uriString);\n \n if (mediaType.contentType.major === 'text')\n {\n const unicodeString = exports.dataUri_extractText(uriString, mediaType);\n\n if (mediaType.contentType == 'text/plain')\n return unicodeString;\n\n const boxedUnicodeString = new String(unicodeString); \n if (mediaType.parameters.charset)\n boxedUnicodeString.charset = mediaType.parameters.charset;\n return boxedUnicodeString;\n }\n \n if (mediaType.serializer)\n {\n /* ALL supported serializers in DCP serialize to UTF-8 */\n const unicodeString = exports.dataUri_extractText(uriString, mediaType);\n return mediaType.serializer.parse(unicodeString);\n }\n \n let ui8;\n const rawData = exports.dataUri_extractBytes(uriString, mediaType);\n if (mediaType.contentType.major === 'image')\n {\n ui8 = Uint8Array.from(rawData, c => c.charCodeAt(0));\n ui8.contentType = mediaType.contentType;\n }\n else \n {\n ui8 = Uint8Array.from(rawData, c => c.charCodeAt(0));\n ui8.contentType = mediaType.contentType.toString();\n if (mediaType.parameters.charset)\n ui8.charset = charset;\n }\n\n return ui8;\n};\n\n\n//# sourceURL=webpack://dcp/./src/utils/fetch-uri.js?");
4898
+ eval("/**\n * @file fetch-uri.js\n * @author Nazila Akhavan <nazila@kingsds.network>, Wes Garland <wes@kingsds.network>\n * @date Sep 2020, Nov 2020\n *\n * Fetch URLs/ Data that is stored in the database.\n * Bootstrap some our own needs via custom MIME Types in data URLs.\n */\n\n\nconst { assert } = __webpack_require__(/*! dcp/common/dcp-assert */ \"./src/common/dcp-assert.js\");\nconst kvin = __webpack_require__(/*! kvin */ \"./node_modules/kvin/kvin.js\");\nconst scopedKvin = new kvin.KVIN({Object: ({}).constructor,\n Array: ([]).constructor, \n Function: (()=>{}).constructor});\n\nconst { justFetch } = __webpack_require__(/*! ./just-fetch */ \"./src/utils/just-fetch.js\");\nconst DCP_ENV = __webpack_require__(/*! dcp/common/dcp-env */ \"./src/common/dcp-env.js\");\nconst { DCPError } = __webpack_require__(/*! dcp/common/dcp-error */ \"./src/common/dcp-error.js\");\nconst { atob } = __webpack_require__(/*! dcp/utils/content-encoding */ \"./src/utils/content-encoding.js\");\n\n/** @typedef {import('dcp/common/dcp-url').DcpURL} DcpURL */\n\n/**\n * Fetch the data via HTTP GET at the given URI. data: URIs are decoded directly, \n * http: or https: URIs are fetched via GET queries. \n *\n * data: URIs will always be decoded.\n * Other URIs (eg https:) must have their origins listed in the allow origin list\n *\n * The return type is unpacked by justFetch - maybe not the best place for this - but this means\n * that we will return a promise that resolves to many possible JS types, dependening on the MIME\n * content-type of the response. The same observation holds for the content-type inside any data:\n * URIs.\n * \n * \n * @param {string[]} allowOriginList the allow origin list (optional, needed for non-data: URIs)\n * @param {URL | DcpURL | string} uri uri of the resource to be fetched\n * @returns If successful, promise which resolves to the data at the URI; see justFetch() for details.\n * Otherwise, promise which results to an instance of error. Errors which happen to due bad\n * origins have the code EFETCH_BAD_ORIGIN.\n */\nexports.fetchURI = async function fetchURI(uri, allowOriginList) {\n if ((typeof uri === 'object' && uri.protocol === 'data:') || (typeof uri === 'string' && uri.startsWith('data:')))\n return exports.parseDataURI(uri);\n \n const url = typeof uri === 'string' ? new URL(uri) : uri;\n \n if (url.protocol === 'file:')\n {\n if (allowOriginList.indexOf(url.pathname) === -1)\n throw new DCPError(`Not allowed to fetch from the file path '${url.pathname}'`, 'EFETCH_BAD_ORIGIN')\n \n return exports.parseFileURI(url);\n }\n \n if (allowOriginList.indexOf(url.origin) === -1)\n throw new DCPError(`Not allowed to fetch from the origin '${url.origin}'`, 'EFETCH_BAD_ORIGIN')\n \n return await justFetch(url, 'string', 'GET');\n}\n\n/**\n * @constructor\n * Instanciate an object based on a data: uri which has properties matching\n * the parameter attributes of the uri.\n * \n * @param {string} uriString The data: URI to decode\n *\n * Object Properties:\n * - contentType: boxed string which is the content type (eg 'image/png')\n * - contentType.major: string which is the major part of the content type (eg 'image')\n * - contentType.minor: string which is the minor part of the content type (eg 'png')\n * - length: the length of the media type section of the data: URI\n * - parameters: object which holds any parameters which were specified in\n * the URI; keys are lowercased attributes.\n *\n * @note The parameters.charset property is treated a little differently to make\n * it easier on API consumers; it is *always* defined, and lower case.\n * If the charset was not specified, it is false.\n */ \nexports.MediaType = function fetchUri$$MediaType(uriString) {\n var mtArr;\n var mtStr = /(^data:)([^,]*)/.exec(uriString)[2];\n\n assert(uriString.startsWith('data:'));\n this.length = mtStr.length;\n if (mtStr === '')\n mtStr = 'text/plain;charset=US-ASCII';\n\n mtArr = mtStr.split(';');\n this.contentType = new String(mtArr.shift());\n [ this.contentType.major, this.contentType.minor ] = this.contentType.split('/');\n \n this.parameters = {}\n for (let parameter of mtArr) {\n let [ attribute, value ] = parameter.split('=');\n if (!value)\n value = true;\n this.parameters[attribute.toLowerCase()] = value;\n }\n\n if (typeof this.parameters.charset === 'undefined')\n this.parameters.charset = false;\n else\n this.parameters.charset = this.parameters.charset.toLowerCase();\n \n if (this.contentType == 'application/json' || this.contentType == 'text/plain')\n {\n this.serializer = {\n method: 'json',\n parse: JSON.parse\n }\n }\n else if (this.contentType == 'application/x-kvin')\n {\n this.serializer = {\n method: 'kvin',\n parse: scopedKvin.deserialize.bind(scopedKvin)\n }\n }\n}\n\n/**\n * Estimate the number of raw bytes stored in a data URI. The estimate is pretty good for URIs\n * embedding base64. URIs which are made up of escaped characters are estimated by counting the number\n * of characters in the first part of the array, and then assuming that the encoding density is constant\n * through out. This is because the URI can be made up of bytes encoded with 1-or-3 bytes, but will \n * normally be all either almost all 1 or all 3. Mixed would be pretty rare in the wild.\n *\n * This does mean that it's possible to under-estimate by nearly 66%, so backing store should be ready\n * for this, but it's realllly unlikely to happen.\n */\nexports.estimateDataUriContentLength = function estimateDataUriContentLength(uriString, mediaType)\n{\n const headerLength = 5 + mediaType.length + 1; /* data: mediaType comma */\n\n if (!mediaType)\n mediaType = new exports.MediaType(uriString);\n if (mediaType.parameters.base64)\n return Math.ceil(((uriString.length - (mediaType.length + headerLength)) * 6) / 8);\n\n /* Assume that the distribution of escaped characters in the first bit is representative\n * of the URI as a whole. We don't want to take this apart just to estimate.\n */\n const leading = uriString.slice(mediaType.length, 1024);\n const remain = leading.replace(/%../g, '');\n const numEscs = (leading.length - remain.length);\n const bytes = remain.length + numEscs;\n const dilation = bytes / leading.length;\n \n return Math.ceil((uriString.length - headerLength) * dilation);\n}\n\n/**\n * Extract the raw data encoded a data URI.\n *\n * @param {string} uriString a valid data: URI\n * @param {exports.MediaType} mediaType optional instance of MediaType that describes this URI.\n *\n * @returns string or Buffer representing the RAW data. If the return type is a string, each character\n * in the string will represent 1 byte, 0-255, of the raw data.\n */\nexports.dataUri_extractBytes = function fetchUri$$dataUri_extractBytes(uriString, mediaType)\n{\n if (!mediaType)\n mediaType = new exports.MediaType(uriString);\n\n const payload = uriString.slice(5 + mediaType.length + 1); /* data: mediaType comma */\n const data = mediaType.parameters.base64 ? atob(payload) : decodeURI(payload);\n\n return data;\n}\n\n/**\n * Extract text encoded in a data URI.\n *\n * @param {string} uriString a valid data: URI\n * @param {exports.MediaType} mediaType optional instance of MediaType that describes this URI.\n *\n * @returns a JS-style Unicode string, converting from whatever charset is indicated in the\n * data URI's content-type charset attribute.\n */\nexports.dataUri_extractText = function fetchUri$$dataUri_extractText(uriString, mediaType)\n{\n if (!mediaType)\n mediaType = new exports.MediaType(uriString);\n\n switch (mediaType.parameters.charset)\n {\n default:\n throw new Error(`Character set ${mediaType.parameters.charset} not supported`);\n\n case undefined:\n case false:\n case 'iso-8859-1': case 'latin1': case 'latin-1':\n case 'us-ascii': case 'ascii':\n /* These character sets map directly onto unicode */\n return exports.dataUri_extractBytes(uriString, mediaType);\n case 'utf8': case 'utf-8':\n const payload = uriString.slice(5 + mediaType.length + 1); /* data: mediaType comma */\n return mediaType.parameters.base64 ? decodeURI(escape(atob(payload))) : decodeURI(payload);\n }\n\n throw 'unreached';\n}\n\nexports.parseFileURI = function fetchUri$$parseFileURI(uriString) {\n var fetchedResource = (__webpack_require__(/*! fs */ \"fs\").readFileSync)(uriString, 'utf-8');\n var fileExt = (__webpack_require__(/*! path */ \"./node_modules/path-browserify/index.js\").extname)(uriString.pathname);\n \n var parse;\n switch (fileExt) {\n case '.kvin':\n parse = scopedKvin.deserialize.bind(scopedKvin);\n break;\n case '.json':\n parse = JSON.parse\n break;\n }\n \n if (parse)\n fetchedResource = parse(fetchedResource);\n \n return fetchedResource;\n}\n\n/**\n * Parse a data: URI, returning the JavaScript value it encodes. The return type is selected\n * based on the content-type.\n *\n * <pre>\n * MIME Type Return Type\n * ------------------ ----------------------------------------------------------------------------------------------------\n * text/plain or none string primitive\n * text/* A boxed string with the contentType property set, and the charset property set if it was specified.\n * application/json whatever JSON.parse returns on the data when it is treated as a string\n * application/x-kvin whatever kvin.deserialze returns on the data when it is treated as a string\n * image/* A Uint8Array of the decoded contents with the contentType property set.\n * * A Uint8Array of the decoded contents with the contentType property set and the charset property \n * set if it was specified.\n * </pre>\n *\n * @param {string} uriString the URI\n */\nexports.parseDataURI = function(uriString, mediaType = null) {\n if (!mediaType)\n mediaType = new exports.MediaType(uriString);\n \n if (mediaType.contentType.major === 'text')\n {\n const unicodeString = exports.dataUri_extractText(uriString, mediaType);\n\n if (mediaType.contentType == 'text/plain')\n return unicodeString;\n\n const boxedUnicodeString = new String(unicodeString); \n if (mediaType.parameters.charset)\n boxedUnicodeString.charset = mediaType.parameters.charset;\n return boxedUnicodeString;\n }\n \n if (mediaType.serializer)\n {\n /* ALL supported serializers in DCP serialize to UTF-8 */\n const unicodeString = exports.dataUri_extractText(uriString, mediaType);\n return mediaType.serializer.parse(unicodeString);\n }\n \n let ui8;\n const rawData = exports.dataUri_extractBytes(uriString, mediaType);\n if (mediaType.contentType.major === 'image')\n {\n ui8 = Uint8Array.from(rawData, c => c.charCodeAt(0));\n ui8.contentType = mediaType.contentType;\n }\n else \n {\n ui8 = Uint8Array.from(rawData, c => c.charCodeAt(0));\n ui8.contentType = mediaType.contentType.toString();\n if (mediaType.parameters.charset)\n ui8.charset = charset;\n }\n\n return ui8;\n};\n\n\n//# sourceURL=webpack://dcp/./src/utils/fetch-uri.js?");
4899
4899
 
4900
4900
  /***/ }),
4901
4901