dcp-client 4.2.7 → 4.2.8

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -3831,7 +3831,7 @@ eval("// Copyright Joyent, Inc. and other Node contributors.\n//\n// Permission
3831
3831
  /***/ ((__unused_webpack_module, __webpack_exports__, __webpack_require__) => {
3832
3832
 
3833
3833
  "use strict";
3834
- eval("__webpack_require__.r(__webpack_exports__);\n/* harmony export */ __webpack_require__.d(__webpack_exports__, {\n/* harmony export */ \"default\": () => (/* binding */ Modal)\n/* harmony export */ });\n/**\n * A Small Modal Class\n * @module Modal\n */\n/* globals Event dcpConfig */\nclass Modal {\n constructor (title, message, callback = false, exitHandler = false, {\n continueLabel = 'Continue',\n cancelLabel = 'Cancel',\n cancelVisible = true\n } = {}) {\n const modal = document.createElement('div')\n modal.className = 'dcp-modal-container-old day'\n modal.innerHTML = `\n <dialog class=\"dcp-modal-content\">\n <div class=\"dcp-modal-header\">\n <h2>${title}<button type=\"button\" class=\"close\">&times;</button></h2>\n ${message ? '<p>' + message + '</p>' : ''}\n </div>\n <div class=\"dcp-modal-loading hidden\">\n <div class='loading'></div>\n </div>\n <form onsubmit='return false' method=\"dialog\">\n <div class=\"dcp-modal-body\"></div>\n <div class=\"dcp-modal-footer ${cancelVisible ? '' : 'centered'}\">\n <button type=\"submit\" class=\"continue green-modal-button\">${continueLabel}</button>\n <button type=\"button\" class=\"cancel green-modal-button\">${cancelLabel}</button>\n </div>\n </form>\n </dialog>`\n\n // To give a reference to do developer who wants to override the form submit.\n // May occur if they want to validate the information in the backend\n // without closing the modal prematurely.\n this.form = modal.querySelector('.dcp-modal-content form')\n this.continueButton = modal.querySelector('.dcp-modal-footer button.continue')\n this.cancelButton = modal.querySelector('.dcp-modal-footer button.cancel')\n this.closeButton = modal.querySelector('.dcp-modal-header .close')\n if (!cancelVisible) {\n this.cancelButton.style.display = 'none'\n }\n\n // To remove the event listener, the reference to the original function\n // added is required.\n this.formSubmitHandler = this.continue.bind(this)\n\n modal.addEventListener('keydown', function (event) {\n event.stopPropagation()\n // 27 is the keycode for the escape key.\n if (event.keyCode === 27) this.close()\n }.bind(this))\n\n this.container = modal\n this.callback = callback\n this.exitHandler = exitHandler\n document.body.appendChild(modal)\n }\n\n changeFormSubmitHandler (newFormSubmitHandler) {\n this.formSubmitHandler = newFormSubmitHandler\n }\n\n /**\n * Validates the form values in the modal and calls the modal's callback\n */\n async continue (event) {\n // To further prevent form submission from trying to redirect from the\n // current page.\n if (event instanceof Event) {\n event.preventDefault()\n }\n let fieldsAreValid = true\n let formElements = this.container.querySelectorAll('.dcp-modal-body select, .dcp-modal-body input, .dcp-modal-body textarea')\n\n const formValues = []\n if (typeof formElements.length === 'undefined') formElements = [formElements]\n // Separate into two loops to enable input validation requiring formValues\n // that come after it. e.g. Two password fields matching.\n for (let i = 0; i < formElements.length; i++) {\n switch (formElements[i].type) {\n case 'file':\n formValues.push(formElements[i])\n break\n case 'checkbox':\n formValues.push(formElements[i].checked)\n break\n default:\n formValues.push(formElements[i].value)\n break\n }\n }\n for (let i = 0; i < formElements.length; i++) {\n if (formElements[i].validation) {\n // Optional fields are allowed to be empty but still can't be wrong if not empty.\n if (!(formElements[i].value === '' && !formElements[i].required)) {\n if (typeof formElements[i].validation === 'function') {\n if (!formElements[i].validation(formValues)) {\n fieldsAreValid = false\n formElements[i].classList.add('is-invalid')\n }\n } else if (!formElements[i].validation.test(formElements[i].value)) {\n fieldsAreValid = false\n formElements[i].classList.add('is-invalid')\n }\n }\n }\n }\n\n if (!fieldsAreValid) return\n\n this.loading()\n if (typeof this.callback === 'function') {\n try {\n return this.callback(formValues)\n } catch (error) {\n console.error('Unexpected error in modal.continue:', error);\n return this.close(false)\n }\n }\n this.close(true)\n }\n\n loading () {\n this.container.querySelector('.dcp-modal-loading').classList.remove('hidden')\n this.container.querySelector('.dcp-modal-body').classList.add('hidden')\n this.container.querySelector('.dcp-modal-footer').classList.add('hidden')\n }\n\n open () {\n this.form.addEventListener('submit', async (event) => {\n const success = await this.formSubmitHandler(event)\n if (success === false) {\n return\n }\n this.close(true)\n })\n // When the user clicks on <span> (x), close the modal\n this.closeButton.addEventListener('click', this.close.bind(this))\n this.cancelButton.addEventListener('click', this.close.bind(this))\n\n // Prevent lingering outlines after clicking some form elements.\n this.container.querySelectorAll('.dcp-modal-body button, .dcp-modal-body input[type=\"checkbox\"]').forEach(element => {\n element.addEventListener('click', () => {\n element.blur()\n })\n })\n\n // Show the modal.\n this.container.style.display = 'block'\n\n const formElements = this.container.querySelectorAll('.dcp-modal-body select, .dcp-modal-body input')\n if (formElements.length) {\n formElements[0].focus()\n if (formElements[0].type === 'text') {\n formElements[0].select()\n }\n for (const el of formElements) {\n if (el.realType) {\n el.type = el.realType\n }\n }\n } else {\n // With no form elements to allow for form submission on enter, focus the\n // continue button.\n this.container.querySelector('.dcp-modal-footer button.continue').focus()\n }\n } // TODO: This should return a promise with the action resolving it\n\n /**\n * Shows the modal and returns a promise of the result of the modal (e.g. was\n * it closed, did its action succeed?)\n */\n showModal () {\n return new Promise((resolve, reject) => {\n this.form.addEventListener('submit', handleContinue.bind(this))\n this.cancelButton.addEventListener('click', handleCancel.bind(this))\n this.closeButton.addEventListener('click', handleCancel.bind(this))\n\n // Prevent lingering outlines after clicking some form elements.\n this.container.querySelectorAll('.dcp-modal-body button, .dcp-modal-body input[type=\"checkbox\"]').forEach(element => {\n element.addEventListener('click', () => {\n element.blur()\n })\n })\n\n // Show the modal.\n this.container.style.display = 'block'\n\n const formElements = this.container.querySelectorAll('.dcp-modal-body select, .dcp-modal-body input')\n if (formElements.length) {\n formElements[0].focus()\n if (formElements[0].type === 'text') {\n formElements[0].select()\n }\n for (const el of formElements) {\n if (el.realType) {\n el.type = el.realType\n }\n }\n } else {\n // With no form elements to allow for form submission on enter, focus the\n // continue button.\n this.continueButton.focus()\n }\n\n async function handleContinue (event) {\n let result\n try {\n result = await this.formSubmitHandler(event)\n } catch (error) {\n reject(error)\n }\n this.close(true)\n resolve(result)\n }\n\n async function handleCancel () {\n let result\n try {\n result = await this.close()\n } catch (error) {\n reject(error)\n }\n resolve(result)\n }\n })\n }\n\n close (success = false) {\n this.container.style.display = 'none'\n if (this.container.parentNode) {\n this.container.parentNode.removeChild(this.container)\n }\n\n // @todo this needs to remove eventlisteners to prevent memory leaks\n\n if ((success !== true) && typeof this.exitHandler === 'function') {\n return this.exitHandler(this)\n }\n }\n\n /**\n * Adds different form elements to the modal depending on the case.\n *\n * @param {*} elements - The properties of the form elements to add.\n * @returns {HTMLElement} The input form elements.\n */\n addFormElement (...elements) {\n const body = this.container.querySelector('.dcp-modal-body')\n const inputElements = []\n let label\n for (let i = 0; i < elements.length; i++) {\n let row = document.createElement('div')\n row.className = 'row'\n\n let col, input\n switch (elements[i].type) {\n case 'button':\n col = document.createElement('div')\n col.className = 'col-md-12'\n\n input = document.createElement('button')\n input.innerHTML = elements[i].label\n input.type = 'button'\n input.classList.add('green-modal-button')\n if (!elements[i].onclick) {\n throw new Error('A button in the modal body should have an on click event handler.')\n }\n input.addEventListener('click', elements[i].onclick)\n\n col.appendChild(input)\n row.appendChild(col)\n break\n case 'textarea':\n col = document.createElement('div')\n col.className = 'col-md-12'\n\n input = document.createElement('textarea')\n input.className = 'text-input-field form-control'\n if (elements[i].placeholder) input.placeholder = elements[i].placeholder\n\n col.appendChild(input)\n row.appendChild(col)\n break\n case 'text':\n case 'email':\n case 'number':\n case 'password': {\n const inputCol = document.createElement('div')\n\n input = document.createElement('input')\n input.type = elements[i].type\n input.validation = elements[i].validation\n input.autocomplete = elements[i].autocomplete || (elements[i].type === 'password' ? 'off' : 'on')\n input.className = 'text-input-field form-control'\n\n // Adding bootstraps custom feedback styles.\n let invalidFeedback = null\n if (elements[i].invalidFeedback) {\n invalidFeedback = document.createElement('div')\n invalidFeedback.className = 'invalid-feedback'\n invalidFeedback.innerText = elements[i].invalidFeedback\n }\n\n if (elements[i].type === 'password') {\n elements[i].realType = 'password'\n }\n\n if (elements[i].label) {\n const labelCol = document.createElement('div')\n label = document.createElement('label')\n label.innerText = elements[i].label\n const inputId = 'dcp-modal-input-' + this.container.querySelectorAll('input[type=\"text\"], input[type=\"email\"], input[type=\"number\"], input[type=\"password\"]').length\n label.setAttribute('for', inputId)\n input.id = inputId\n labelCol.classList.add('col-md-6', 'label-column')\n labelCol.appendChild(label)\n row.appendChild(labelCol)\n inputCol.className = 'col-md-6'\n } else {\n inputCol.className = 'col-md-12'\n }\n\n inputCol.appendChild(input)\n if (invalidFeedback !== null) {\n inputCol.appendChild(invalidFeedback)\n }\n row.appendChild(inputCol)\n break\n }\n case 'select':\n col = document.createElement('div')\n col.className = 'col-md-4'\n\n label = document.createElement('span')\n label.innerText = elements[i].label\n\n col.appendChild(label)\n row.appendChild(col)\n\n col = document.createElement('div')\n col.className = 'col-md-8'\n\n input = document.createElement('select')\n\n col.appendChild(input)\n row.appendChild(col)\n break\n case 'checkbox': {\n row.classList.add('checkbox-row')\n const checkboxLabelCol = document.createElement('div')\n checkboxLabelCol.classList.add('label-column', 'checkbox-label-column')\n\n label = document.createElement('label')\n label.innerText = elements[i].label\n label.for = 'dcp-checkbox-input-' + this.container.querySelectorAll('input[type=\"checkbox\"]').length\n label.setAttribute('for', label.for)\n label.className = 'checkbox-label'\n\n checkboxLabelCol.appendChild(label)\n\n const checkboxCol = document.createElement('div')\n checkboxCol.classList.add('checkbox-column')\n\n input = document.createElement('input')\n input.type = 'checkbox'\n input.id = label.for\n if (elements[i].checked) {\n input.checked = true\n }\n\n checkboxCol.appendChild(input)\n\n if (elements[i].labelToTheRightOfCheckbox) {\n checkboxCol.classList.add('col-md-5')\n row.appendChild(checkboxCol)\n checkboxLabelCol.classList.add('col-md-7')\n row.appendChild(checkboxLabelCol)\n } else {\n checkboxLabelCol.classList.add('col-md-6')\n checkboxCol.classList.add('col-md-6')\n row.appendChild(checkboxLabelCol)\n row.appendChild(checkboxCol)\n }\n break\n }\n case 'file':\n [input, row] = this.addFileInput(elements[i], input, row)\n break\n case 'label':\n row.classList.add('label-row')\n label = document.createElement('label')\n label.innerText = elements[i].label\n row.appendChild(label)\n break\n }\n\n // Copy other possibly specified element properties:\n const inputPropertyNames = ['title', 'inputmode', 'value', 'minLength', 'maxLength', 'size', 'required', 'pattern', 'min', 'max', 'step', 'placeholder', 'accept', 'multiple', 'id', 'onkeypress', 'oninput', 'for', 'readonly', 'autocomplete']\n for (const propertyName of inputPropertyNames) {\n if (Object.prototype.hasOwnProperty.call(elements[i], propertyName)) {\n if (propertyName === 'for' && !label.hasAttribute(propertyName)) {\n label.setAttribute(propertyName, elements[i][propertyName])\n }\n if (propertyName.startsWith('on')) {\n input.addEventListener(propertyName.slice(2), elements[i][propertyName])\n } else {\n input.setAttribute(propertyName, elements[i][propertyName])\n }\n }\n }\n\n inputElements.push(input)\n body.appendChild(row)\n }\n\n if (inputElements.length === 1) return inputElements[0]\n else return inputElements\n }\n\n /**\n * Adds a drag and drop file form element to the modal.\n *\n * @param {*} fileInputProperties - An object specifying some of the\n * properties of the file input element.\n * @param {*} fileInput - Placeholders to help create the file\n * input.\n * @param {HTMLDivElement} row - Placeholders to help create the file\n * input.\n */\n addFileInput (fileInputProperties, fileInput, row) {\n // Adding the upload label.\n const uploadLabel = document.createElement('label')\n uploadLabel.innerText = fileInputProperties.label\n row.appendChild(uploadLabel)\n const body = this.container.querySelector('.dcp-modal-body')\n body.appendChild(row)\n const fileSelectionRow = document.createElement('div')\n fileSelectionRow.id = 'file-selection-row'\n\n // Adding the drag and drop file upload input.\n const dropContainer = document.createElement('div')\n dropContainer.id = 'drop-container'\n\n // Adding an image of a wallet\n const imageContainer = document.createElement('div')\n imageContainer.id = 'image-container'\n const walletImage = document.createElement('span')\n walletImage.classList.add('fas', 'fa-wallet')\n imageContainer.appendChild(walletImage)\n\n // Adding some text prompts\n const dropMessage = document.createElement('span')\n dropMessage.innerText = 'Drop a keystore file here'\n const orMessage = document.createElement('span')\n orMessage.innerText = 'or'\n\n // Adding the manual file input element (hiding the default one)\n const fileInputContainer = document.createElement('div')\n const fileInputLabel = document.createElement('label')\n // Linking the label to the file input so that clicking on the label\n // activates the file input.\n fileInputLabel.setAttribute('for', 'file-input')\n fileInputLabel.innerText = 'Browse'\n fileInput = document.createElement('input')\n fileInput.type = fileInputProperties.type\n fileInput.id = 'file-input'\n // To remove the lingering outline after selecting the file.\n fileInput.addEventListener('click', () => {\n fileInput.blur()\n })\n fileInputContainer.append(fileInput, fileInputLabel)\n\n // Creating the final row element to append to the modal body.\n dropContainer.append(imageContainer, dropMessage, orMessage, fileInputContainer)\n fileSelectionRow.appendChild(dropContainer)\n\n // Adding functionality to the drag and drop file input.\n dropContainer.addEventListener('drop', selectDroppedFile.bind(this))\n dropContainer.addEventListener('drop', unhighlightDropArea)\n // Prevent file from being opened by the browser.\n dropContainer.ondragover = highlightDropArea\n dropContainer.ondragenter = highlightDropArea\n dropContainer.ondragleave = unhighlightDropArea\n\n fileInput.addEventListener('change', handleFileChange)\n\n const fileNamePlaceholder = document.createElement('center')\n fileNamePlaceholder.id = 'file-name-placeholder'\n fileNamePlaceholder.className = 'row'\n fileNamePlaceholder.innerText = ''\n fileSelectionRow.appendChild(fileNamePlaceholder)\n fileNamePlaceholder.classList.add('hidden')\n\n // Check if the continue button is invalid on the keystore upload modal and\n // click it if it should no longer be invalid.\n this.continueButton.addEventListener('invalid', () => {\n const fileFormElements = this.container.querySelectorAll('.dcp-modal-body input[type=\"file\"], .dcp-modal-body input[type=\"text\"]')\n const filledInFileFormElements = Array.from(fileFormElements).filter(fileFormElement => fileFormElement.value !== '')\n if (fileFormElements.length !== 0 && filledInFileFormElements.length !== 0) {\n this.continueButton.setCustomValidity('')\n // Clicking instead of dispatching a submit event to ensure other form validation is used before submitting the form.\n this.continueButton.click()\n }\n })\n\n return [fileInput, fileSelectionRow]\n\n /**\n * Checks that the dropped items contain only a single keystore file.\n * If valid, sets the file input's value to the dropped file.\n * @param {DragEvent} event - Contains the files dropped.\n */\n function selectDroppedFile (event) {\n // Prevent file from being opened.\n event.preventDefault()\n\n // Check if only one file was dropped.\n const wasOneFileDropped = event.dataTransfer.items.length === 1 ||\n event.dataTransfer.files.length === 1\n updateFileSelectionStatus(wasOneFileDropped)\n if (!wasOneFileDropped) {\n fileInput.setCustomValidity('Only one file can be uploaded.')\n fileInput.reportValidity()\n return\n } else {\n fileInput.setCustomValidity('')\n }\n\n // Now to use the DataTransfer interface to access the file(s), setting\n // the value of the file input.\n const file = event.dataTransfer.files[0]\n\n if (checkFileExtension(file)) {\n fileInput.files = event.dataTransfer.files\n fileInput.dispatchEvent(new Event('change'))\n }\n }\n\n function handleFileChange () {\n if (checkFileExtension(this.files[0]) && this.files.length === 1) {\n fileNamePlaceholder.innerText = `Selected File: ${this.files[0].name}`\n updateFileSelectionStatus(true)\n // Invoke a callback if additional functionality is required.\n if (typeof fileInputProperties.callback === 'function') {\n fileInputProperties.callback(this.files[0])\n }\n }\n }\n\n /**\n * Checks if the file extension on the inputted file is correct.\n * @param {File} file - The file to check\n * @returns {boolean} True if the file extension is valid, false otherwise.\n */\n function checkFileExtension (file) {\n // If there's no restriction, return true.\n if (!fileInputProperties.extension) {\n return true\n }\n const fileExtension = file.name.split('.').pop()\n const isValidExtension = fileExtension === fileInputProperties.extension\n updateFileSelectionStatus(isValidExtension)\n if (!isValidExtension) {\n fileInput.setCustomValidity(`Only a .${fileInputProperties.extension} file can be uploaded.`)\n fileInput.reportValidity()\n fileNamePlaceholder.classList.add('hidden')\n } else {\n fileInput.setCustomValidity('')\n }\n return isValidExtension\n }\n\n /**\n * Updates the file input to reflect the validity of the current file\n * selection.\n * @param {boolean} isValidFileSelection - True if a single .keystore file\n * was selected. False otherwise.\n */\n function updateFileSelectionStatus (isValidFileSelection) {\n imageContainer.innerHTML = ''\n const statusImage = document.createElement('span')\n statusImage.classList.add('fas', isValidFileSelection ? 'fa-check' : 'fa-times')\n statusImage.style.color = isValidFileSelection ? 'green' : 'red'\n imageContainer.appendChild(statusImage)\n\n if (!isValidFileSelection) {\n fileInput.value = null\n fileNamePlaceholder.classList.add('hidden')\n } else {\n fileNamePlaceholder.classList.remove('hidden')\n }\n\n // If the modal contains a password field for a keystore file, change its\n // visibility.\n const walletPasswordInputContainer = document.querySelector('.dcp-modal-body input[type=\"password\"]').parentElement.parentElement\n if (walletPasswordInputContainer) {\n if (isValidFileSelection) {\n walletPasswordInputContainer.classList.remove('hidden')\n const walletPasswordInput = document.querySelector('.dcp-modal-body input[type=\"password\"]')\n walletPasswordInput.focus()\n } else {\n walletPasswordInputContainer.classList.add('hidden')\n }\n }\n }\n\n function highlightDropArea (event) {\n event.preventDefault()\n this.classList.add('highlight')\n }\n\n function unhighlightDropArea (event) {\n event.preventDefault()\n this.classList.remove('highlight')\n }\n }\n\n /**\n * Sets up a custom tooltip to pop up when the passwords do not match, but are\n * valid otherwise.\n */\n addFormValidationForPasswordConfirmation () {\n const [newPassword, confirmPassword] = document.querySelectorAll('.dcp-modal-body input[type=\"password\"]')\n if (!newPassword || !confirmPassword) {\n throw Error('New Password field and Confirm Password fields not present.')\n }\n\n newPassword.addEventListener('input', checkMatchingPasswords)\n confirmPassword.addEventListener('input', checkMatchingPasswords)\n\n function checkMatchingPasswords () {\n if (newPassword.value !== confirmPassword.value &&\n newPassword.validity.valid &&\n confirmPassword.validity.valid) {\n newPassword.setCustomValidity('Both passwords must match.')\n } else if (newPassword.value === confirmPassword.value ||\n newPassword.validity.tooShort ||\n newPassword.validity.patternMismatch ||\n newPassword.validity.valueMissing ||\n confirmPassword.validity.tooShort ||\n confirmPassword.validity.patternMismatch ||\n confirmPassword.validity.valueMissing) {\n // If the passwords fields match or have become invalidated some other\n // way again, reset the custom message.\n newPassword.setCustomValidity('')\n }\n }\n }\n\n updateInvalidEmailMessage() {\n const email = document.querySelector('.dcp-modal-body input[id=\"email\"')\n if (!email){\n throw Error(\"Email field not present\")\n }\n email.addEventListener('input', checkValidEmail);\n function checkValidEmail() {\n if (!email.validity.patternMismatch &&\n !email.validity.valueMissing) {\n email.setCustomValidity('')\n } else {\n email.setCustomValidity(\"Enter a valid email address.\")\n }\n\n }\n }\n\n /**\n * Adds message(s) to the modal's body.\n * @param {string} messages - The message(s) to add to the modal's body.\n * @returns Paragraph element(s) containing the message(s) added to the\n * modal's body.\n */\n addMessage (...messages) {\n const elements = []\n const body = this.container.querySelector('.dcp-modal-body')\n for (let i = 0; i < messages.length; i++) {\n const row = document.createElement('div')\n row.className = 'row'\n\n const paragraph = document.createElement('p')\n paragraph.innerHTML = messages[i]\n paragraph.classList.add('message')\n row.appendChild(paragraph)\n body.appendChild(row)\n\n elements.push(paragraph)\n }\n\n if (elements.length === 1) return elements[0]\n else return elements\n }\n\n addHorizontalRule () {\n const body = this.container.querySelector('.dcp-modal-body')\n body.appendChild(document.createElement('hr'))\n }\n\n // Does what it says. Still ill advised to use unless you have to.\n addCustomHTML (htmlStr, browseCallback) {\n const elements = []\n const body = this.container.querySelector('.dcp-modal-body')\n body.innerHTML += htmlStr\n body.querySelector('#browse-button').addEventListener('click', browseCallback.bind(this, this))\n\n if (elements.length === 1) return elements[0]\n else return elements\n }\n\n addButton (...buttons) {\n const elements = []\n const body = this.container.querySelector('.dcp-modal-body')\n for (let i = 0; i < buttons.length; i++) {\n const row = document.createElement('div')\n row.className = 'row'\n\n let col = document.createElement('div')\n col.className = 'col-md-4'\n\n const description = document.createElement('span')\n description.innerText = buttons[i].description\n\n col.appendChild(description)\n row.appendChild(col)\n\n col = document.createElement('div')\n col.className = 'col-md-8'\n\n const button = document.createElement('button')\n button.innerText = buttons[i].label\n button.addEventListener('click', buttons[i].callback.bind(this, this))\n\n elements.push(button)\n\n col.appendChild(button)\n row.appendChild(col)\n\n body.appendChild(row)\n }\n\n if (elements.length === 1) return elements[0]\n else return elements\n }\n}\n\n\n// Inject our special stylesheet from dcp-client only if we're on the portal webpage.\nif (typeof window !== 'undefined' && typeof document !== 'undefined' && dcpConfig.portal.location.hostname === window.location.hostname) {\n // <link rel='stylesheet' href='/css/dashboard.css'>\n const stylesheet = document.createElement('link')\n stylesheet.rel = 'stylesheet'\n // Needed for the duplicate check done later.\n stylesheet.id = 'dcp-modal-styles'\n\n const dcpClientBundle = document.getElementById('_dcp_client_bundle')\n let src\n if (dcpClientBundle) {\n src = dcpClientBundle.src.replace('dcp-client-bundle.js', 'dcp-modal-style.css')\n } else {\n src = dcpConfig.portal.location.href + 'dcp-client/dist/dcp-modal-style.css'\n }\n\n stylesheet.href = src\n // If the style was injected before, don't inject it again.\n // Could occur when loading a file that imports Modal.js and loading\n // comput.min.js in the same HTML file.\n if (document.getElementById(stylesheet.id) === null) {\n document.getElementsByTagName('head')[0].appendChild(stylesheet)\n }\n\n if (typeof {\"version\":\"a97d1d3338ffb45271c76e88ac5c541b96e81039\",\"branch\":\"release\",\"dcpClient\":{\"version\":\"4.2.7\",\"from\":\"git+ssh://git@gitlab.com/Distributed-Compute-Protocol/dcp-client.git#prod-20220531\",\"resolved\":\"git+ssh://git@gitlab.com/Distributed-Compute-Protocol/dcp-client.git#d4fa748a061d970b2d829a01e37f075cb176458a\"},\"built\":\"Wed Jun 01 2022 11:06:50 GMT-0400 (Eastern Daylight Saving Time)\",\"config\":{\"generated\":\"Wed 01 Jun 2022 11:06:48 AM EDT by erose on lorge\",\"build\":\"debug\"},\"webpack\":\"5.70.0\",\"node\":\"v14.19.3\"} !== 'undefined' && typeof window.Modal === 'undefined') {\n window.Modal = Modal\n }\n}\n\n\n//# sourceURL=webpack://dcp/./portal/www/js/modal.js?");
3834
+ eval("__webpack_require__.r(__webpack_exports__);\n/* harmony export */ __webpack_require__.d(__webpack_exports__, {\n/* harmony export */ \"default\": () => (/* binding */ Modal)\n/* harmony export */ });\n/**\n * A Small Modal Class\n * @module Modal\n */\n/* globals Event dcpConfig */\nclass Modal {\n constructor (title, message, callback = false, exitHandler = false, {\n continueLabel = 'Continue',\n cancelLabel = 'Cancel',\n cancelVisible = true\n } = {}) {\n const modal = document.createElement('div')\n modal.className = 'dcp-modal-container-old day'\n modal.innerHTML = `\n <dialog class=\"dcp-modal-content\">\n <div class=\"dcp-modal-header\">\n <h2>${title}<button type=\"button\" class=\"close\">&times;</button></h2>\n ${message ? '<p>' + message + '</p>' : ''}\n </div>\n <div class=\"dcp-modal-loading hidden\">\n <div class='loading'></div>\n </div>\n <form onsubmit='return false' method=\"dialog\">\n <div class=\"dcp-modal-body\"></div>\n <div class=\"dcp-modal-footer ${cancelVisible ? '' : 'centered'}\">\n <button type=\"submit\" class=\"continue green-modal-button\">${continueLabel}</button>\n <button type=\"button\" class=\"cancel green-modal-button\">${cancelLabel}</button>\n </div>\n </form>\n </dialog>`\n\n // To give a reference to do developer who wants to override the form submit.\n // May occur if they want to validate the information in the backend\n // without closing the modal prematurely.\n this.form = modal.querySelector('.dcp-modal-content form')\n this.continueButton = modal.querySelector('.dcp-modal-footer button.continue')\n this.cancelButton = modal.querySelector('.dcp-modal-footer button.cancel')\n this.closeButton = modal.querySelector('.dcp-modal-header .close')\n if (!cancelVisible) {\n this.cancelButton.style.display = 'none'\n }\n\n // To remove the event listener, the reference to the original function\n // added is required.\n this.formSubmitHandler = this.continue.bind(this)\n\n modal.addEventListener('keydown', function (event) {\n event.stopPropagation()\n // 27 is the keycode for the escape key.\n if (event.keyCode === 27) this.close()\n }.bind(this))\n\n this.container = modal\n this.callback = callback\n this.exitHandler = exitHandler\n document.body.appendChild(modal)\n }\n\n changeFormSubmitHandler (newFormSubmitHandler) {\n this.formSubmitHandler = newFormSubmitHandler\n }\n\n /**\n * Validates the form values in the modal and calls the modal's callback\n */\n async continue (event) {\n // To further prevent form submission from trying to redirect from the\n // current page.\n if (event instanceof Event) {\n event.preventDefault()\n }\n let fieldsAreValid = true\n let formElements = this.container.querySelectorAll('.dcp-modal-body select, .dcp-modal-body input, .dcp-modal-body textarea')\n\n const formValues = []\n if (typeof formElements.length === 'undefined') formElements = [formElements]\n // Separate into two loops to enable input validation requiring formValues\n // that come after it. e.g. Two password fields matching.\n for (let i = 0; i < formElements.length; i++) {\n switch (formElements[i].type) {\n case 'file':\n formValues.push(formElements[i])\n break\n case 'checkbox':\n formValues.push(formElements[i].checked)\n break\n default:\n formValues.push(formElements[i].value)\n break\n }\n }\n for (let i = 0; i < formElements.length; i++) {\n if (formElements[i].validation) {\n // Optional fields are allowed to be empty but still can't be wrong if not empty.\n if (!(formElements[i].value === '' && !formElements[i].required)) {\n if (typeof formElements[i].validation === 'function') {\n if (!formElements[i].validation(formValues)) {\n fieldsAreValid = false\n formElements[i].classList.add('is-invalid')\n }\n } else if (!formElements[i].validation.test(formElements[i].value)) {\n fieldsAreValid = false\n formElements[i].classList.add('is-invalid')\n }\n }\n }\n }\n\n if (!fieldsAreValid) return\n\n this.loading()\n if (typeof this.callback === 'function') {\n try {\n return this.callback(formValues)\n } catch (error) {\n console.error('Unexpected error in modal.continue:', error);\n return this.close(false)\n }\n }\n this.close(true)\n }\n\n loading () {\n this.container.querySelector('.dcp-modal-loading').classList.remove('hidden')\n this.container.querySelector('.dcp-modal-body').classList.add('hidden')\n this.container.querySelector('.dcp-modal-footer').classList.add('hidden')\n }\n\n open () {\n this.form.addEventListener('submit', async (event) => {\n const success = await this.formSubmitHandler(event)\n if (success === false) {\n return\n }\n this.close(true)\n })\n // When the user clicks on <span> (x), close the modal\n this.closeButton.addEventListener('click', this.close.bind(this))\n this.cancelButton.addEventListener('click', this.close.bind(this))\n\n // Prevent lingering outlines after clicking some form elements.\n this.container.querySelectorAll('.dcp-modal-body button, .dcp-modal-body input[type=\"checkbox\"]').forEach(element => {\n element.addEventListener('click', () => {\n element.blur()\n })\n })\n\n // Show the modal.\n this.container.style.display = 'block'\n\n const formElements = this.container.querySelectorAll('.dcp-modal-body select, .dcp-modal-body input')\n if (formElements.length) {\n formElements[0].focus()\n if (formElements[0].type === 'text') {\n formElements[0].select()\n }\n for (const el of formElements) {\n if (el.realType) {\n el.type = el.realType\n }\n }\n } else {\n // With no form elements to allow for form submission on enter, focus the\n // continue button.\n this.container.querySelector('.dcp-modal-footer button.continue').focus()\n }\n } // TODO: This should return a promise with the action resolving it\n\n /**\n * Shows the modal and returns a promise of the result of the modal (e.g. was\n * it closed, did its action succeed?)\n */\n showModal () {\n return new Promise((resolve, reject) => {\n this.form.addEventListener('submit', handleContinue.bind(this))\n this.cancelButton.addEventListener('click', handleCancel.bind(this))\n this.closeButton.addEventListener('click', handleCancel.bind(this))\n\n // Prevent lingering outlines after clicking some form elements.\n this.container.querySelectorAll('.dcp-modal-body button, .dcp-modal-body input[type=\"checkbox\"]').forEach(element => {\n element.addEventListener('click', () => {\n element.blur()\n })\n })\n\n // Show the modal.\n this.container.style.display = 'block'\n\n const formElements = this.container.querySelectorAll('.dcp-modal-body select, .dcp-modal-body input')\n if (formElements.length) {\n formElements[0].focus()\n if (formElements[0].type === 'text') {\n formElements[0].select()\n }\n for (const el of formElements) {\n if (el.realType) {\n el.type = el.realType\n }\n }\n } else {\n // With no form elements to allow for form submission on enter, focus the\n // continue button.\n this.continueButton.focus()\n }\n\n async function handleContinue (event) {\n let result\n try {\n result = await this.formSubmitHandler(event)\n } catch (error) {\n reject(error)\n }\n this.close(true)\n resolve(result)\n }\n\n async function handleCancel () {\n let result\n try {\n result = await this.close()\n } catch (error) {\n reject(error)\n }\n resolve(result)\n }\n })\n }\n\n close (success = false) {\n this.container.style.display = 'none'\n if (this.container.parentNode) {\n this.container.parentNode.removeChild(this.container)\n }\n\n // @todo this needs to remove eventlisteners to prevent memory leaks\n\n if ((success !== true) && typeof this.exitHandler === 'function') {\n return this.exitHandler(this)\n }\n }\n\n /**\n * Adds different form elements to the modal depending on the case.\n *\n * @param {*} elements - The properties of the form elements to add.\n * @returns {HTMLElement} The input form elements.\n */\n addFormElement (...elements) {\n const body = this.container.querySelector('.dcp-modal-body')\n const inputElements = []\n let label\n for (let i = 0; i < elements.length; i++) {\n let row = document.createElement('div')\n row.className = 'row'\n\n let col, input\n switch (elements[i].type) {\n case 'button':\n col = document.createElement('div')\n col.className = 'col-md-12'\n\n input = document.createElement('button')\n input.innerHTML = elements[i].label\n input.type = 'button'\n input.classList.add('green-modal-button')\n if (!elements[i].onclick) {\n throw new Error('A button in the modal body should have an on click event handler.')\n }\n input.addEventListener('click', elements[i].onclick)\n\n col.appendChild(input)\n row.appendChild(col)\n break\n case 'textarea':\n col = document.createElement('div')\n col.className = 'col-md-12'\n\n input = document.createElement('textarea')\n input.className = 'text-input-field form-control'\n if (elements[i].placeholder) input.placeholder = elements[i].placeholder\n\n col.appendChild(input)\n row.appendChild(col)\n break\n case 'text':\n case 'email':\n case 'number':\n case 'password': {\n const inputCol = document.createElement('div')\n\n input = document.createElement('input')\n input.type = elements[i].type\n input.validation = elements[i].validation\n input.autocomplete = elements[i].autocomplete || (elements[i].type === 'password' ? 'off' : 'on')\n input.className = 'text-input-field form-control'\n\n // Adding bootstraps custom feedback styles.\n let invalidFeedback = null\n if (elements[i].invalidFeedback) {\n invalidFeedback = document.createElement('div')\n invalidFeedback.className = 'invalid-feedback'\n invalidFeedback.innerText = elements[i].invalidFeedback\n }\n\n if (elements[i].type === 'password') {\n elements[i].realType = 'password'\n }\n\n if (elements[i].label) {\n const labelCol = document.createElement('div')\n label = document.createElement('label')\n label.innerText = elements[i].label\n const inputId = 'dcp-modal-input-' + this.container.querySelectorAll('input[type=\"text\"], input[type=\"email\"], input[type=\"number\"], input[type=\"password\"]').length\n label.setAttribute('for', inputId)\n input.id = inputId\n labelCol.classList.add('col-md-6', 'label-column')\n labelCol.appendChild(label)\n row.appendChild(labelCol)\n inputCol.className = 'col-md-6'\n } else {\n inputCol.className = 'col-md-12'\n }\n\n inputCol.appendChild(input)\n if (invalidFeedback !== null) {\n inputCol.appendChild(invalidFeedback)\n }\n row.appendChild(inputCol)\n break\n }\n case 'select':\n col = document.createElement('div')\n col.className = 'col-md-4'\n\n label = document.createElement('span')\n label.innerText = elements[i].label\n\n col.appendChild(label)\n row.appendChild(col)\n\n col = document.createElement('div')\n col.className = 'col-md-8'\n\n input = document.createElement('select')\n\n col.appendChild(input)\n row.appendChild(col)\n break\n case 'checkbox': {\n row.classList.add('checkbox-row')\n const checkboxLabelCol = document.createElement('div')\n checkboxLabelCol.classList.add('label-column', 'checkbox-label-column')\n\n label = document.createElement('label')\n label.innerText = elements[i].label\n label.for = 'dcp-checkbox-input-' + this.container.querySelectorAll('input[type=\"checkbox\"]').length\n label.setAttribute('for', label.for)\n label.className = 'checkbox-label'\n\n checkboxLabelCol.appendChild(label)\n\n const checkboxCol = document.createElement('div')\n checkboxCol.classList.add('checkbox-column')\n\n input = document.createElement('input')\n input.type = 'checkbox'\n input.id = label.for\n if (elements[i].checked) {\n input.checked = true\n }\n\n checkboxCol.appendChild(input)\n\n if (elements[i].labelToTheRightOfCheckbox) {\n checkboxCol.classList.add('col-md-5')\n row.appendChild(checkboxCol)\n checkboxLabelCol.classList.add('col-md-7')\n row.appendChild(checkboxLabelCol)\n } else {\n checkboxLabelCol.classList.add('col-md-6')\n checkboxCol.classList.add('col-md-6')\n row.appendChild(checkboxLabelCol)\n row.appendChild(checkboxCol)\n }\n break\n }\n case 'file':\n [input, row] = this.addFileInput(elements[i], input, row)\n break\n case 'label':\n row.classList.add('label-row')\n label = document.createElement('label')\n label.innerText = elements[i].label\n row.appendChild(label)\n break\n }\n\n // Copy other possibly specified element properties:\n const inputPropertyNames = ['title', 'inputmode', 'value', 'minLength', 'maxLength', 'size', 'required', 'pattern', 'min', 'max', 'step', 'placeholder', 'accept', 'multiple', 'id', 'onkeypress', 'oninput', 'for', 'readonly', 'autocomplete']\n for (const propertyName of inputPropertyNames) {\n if (Object.prototype.hasOwnProperty.call(elements[i], propertyName)) {\n if (propertyName === 'for' && !label.hasAttribute(propertyName)) {\n label.setAttribute(propertyName, elements[i][propertyName])\n }\n if (propertyName.startsWith('on')) {\n input.addEventListener(propertyName.slice(2), elements[i][propertyName])\n } else {\n input.setAttribute(propertyName, elements[i][propertyName])\n }\n }\n }\n\n inputElements.push(input)\n body.appendChild(row)\n }\n\n if (inputElements.length === 1) return inputElements[0]\n else return inputElements\n }\n\n /**\n * Adds a drag and drop file form element to the modal.\n *\n * @param {*} fileInputProperties - An object specifying some of the\n * properties of the file input element.\n * @param {*} fileInput - Placeholders to help create the file\n * input.\n * @param {HTMLDivElement} row - Placeholders to help create the file\n * input.\n */\n addFileInput (fileInputProperties, fileInput, row) {\n // Adding the upload label.\n const uploadLabel = document.createElement('label')\n uploadLabel.innerText = fileInputProperties.label\n row.appendChild(uploadLabel)\n const body = this.container.querySelector('.dcp-modal-body')\n body.appendChild(row)\n const fileSelectionRow = document.createElement('div')\n fileSelectionRow.id = 'file-selection-row'\n\n // Adding the drag and drop file upload input.\n const dropContainer = document.createElement('div')\n dropContainer.id = 'drop-container'\n\n // Adding an image of a wallet\n const imageContainer = document.createElement('div')\n imageContainer.id = 'image-container'\n const walletImage = document.createElement('span')\n walletImage.classList.add('fas', 'fa-wallet')\n imageContainer.appendChild(walletImage)\n\n // Adding some text prompts\n const dropMessage = document.createElement('span')\n dropMessage.innerText = 'Drop a keystore file here'\n const orMessage = document.createElement('span')\n orMessage.innerText = 'or'\n\n // Adding the manual file input element (hiding the default one)\n const fileInputContainer = document.createElement('div')\n const fileInputLabel = document.createElement('label')\n // Linking the label to the file input so that clicking on the label\n // activates the file input.\n fileInputLabel.setAttribute('for', 'file-input')\n fileInputLabel.innerText = 'Browse'\n fileInput = document.createElement('input')\n fileInput.type = fileInputProperties.type\n fileInput.id = 'file-input'\n // To remove the lingering outline after selecting the file.\n fileInput.addEventListener('click', () => {\n fileInput.blur()\n })\n fileInputContainer.append(fileInput, fileInputLabel)\n\n // Creating the final row element to append to the modal body.\n dropContainer.append(imageContainer, dropMessage, orMessage, fileInputContainer)\n fileSelectionRow.appendChild(dropContainer)\n\n // Adding functionality to the drag and drop file input.\n dropContainer.addEventListener('drop', selectDroppedFile.bind(this))\n dropContainer.addEventListener('drop', unhighlightDropArea)\n // Prevent file from being opened by the browser.\n dropContainer.ondragover = highlightDropArea\n dropContainer.ondragenter = highlightDropArea\n dropContainer.ondragleave = unhighlightDropArea\n\n fileInput.addEventListener('change', handleFileChange)\n\n const fileNamePlaceholder = document.createElement('center')\n fileNamePlaceholder.id = 'file-name-placeholder'\n fileNamePlaceholder.className = 'row'\n fileNamePlaceholder.innerText = ''\n fileSelectionRow.appendChild(fileNamePlaceholder)\n fileNamePlaceholder.classList.add('hidden')\n\n // Check if the continue button is invalid on the keystore upload modal and\n // click it if it should no longer be invalid.\n this.continueButton.addEventListener('invalid', () => {\n const fileFormElements = this.container.querySelectorAll('.dcp-modal-body input[type=\"file\"], .dcp-modal-body input[type=\"text\"]')\n const filledInFileFormElements = Array.from(fileFormElements).filter(fileFormElement => fileFormElement.value !== '')\n if (fileFormElements.length !== 0 && filledInFileFormElements.length !== 0) {\n this.continueButton.setCustomValidity('')\n // Clicking instead of dispatching a submit event to ensure other form validation is used before submitting the form.\n this.continueButton.click()\n }\n })\n\n return [fileInput, fileSelectionRow]\n\n /**\n * Checks that the dropped items contain only a single keystore file.\n * If valid, sets the file input's value to the dropped file.\n * @param {DragEvent} event - Contains the files dropped.\n */\n function selectDroppedFile (event) {\n // Prevent file from being opened.\n event.preventDefault()\n\n // Check if only one file was dropped.\n const wasOneFileDropped = event.dataTransfer.items.length === 1 ||\n event.dataTransfer.files.length === 1\n updateFileSelectionStatus(wasOneFileDropped)\n if (!wasOneFileDropped) {\n fileInput.setCustomValidity('Only one file can be uploaded.')\n fileInput.reportValidity()\n return\n } else {\n fileInput.setCustomValidity('')\n }\n\n // Now to use the DataTransfer interface to access the file(s), setting\n // the value of the file input.\n const file = event.dataTransfer.files[0]\n\n if (checkFileExtension(file)) {\n fileInput.files = event.dataTransfer.files\n fileInput.dispatchEvent(new Event('change'))\n }\n }\n\n function handleFileChange () {\n if (checkFileExtension(this.files[0]) && this.files.length === 1) {\n fileNamePlaceholder.innerText = `Selected File: ${this.files[0].name}`\n updateFileSelectionStatus(true)\n // Invoke a callback if additional functionality is required.\n if (typeof fileInputProperties.callback === 'function') {\n fileInputProperties.callback(this.files[0])\n }\n }\n }\n\n /**\n * Checks if the file extension on the inputted file is correct.\n * @param {File} file - The file to check\n * @returns {boolean} True if the file extension is valid, false otherwise.\n */\n function checkFileExtension (file) {\n // If there's no restriction, return true.\n if (!fileInputProperties.extension) {\n return true\n }\n const fileExtension = file.name.split('.').pop()\n const isValidExtension = fileExtension === fileInputProperties.extension\n updateFileSelectionStatus(isValidExtension)\n if (!isValidExtension) {\n fileInput.setCustomValidity(`Only a .${fileInputProperties.extension} file can be uploaded.`)\n fileInput.reportValidity()\n fileNamePlaceholder.classList.add('hidden')\n } else {\n fileInput.setCustomValidity('')\n }\n return isValidExtension\n }\n\n /**\n * Updates the file input to reflect the validity of the current file\n * selection.\n * @param {boolean} isValidFileSelection - True if a single .keystore file\n * was selected. False otherwise.\n */\n function updateFileSelectionStatus (isValidFileSelection) {\n imageContainer.innerHTML = ''\n const statusImage = document.createElement('span')\n statusImage.classList.add('fas', isValidFileSelection ? 'fa-check' : 'fa-times')\n statusImage.style.color = isValidFileSelection ? 'green' : 'red'\n imageContainer.appendChild(statusImage)\n\n if (!isValidFileSelection) {\n fileInput.value = null\n fileNamePlaceholder.classList.add('hidden')\n } else {\n fileNamePlaceholder.classList.remove('hidden')\n }\n\n // If the modal contains a password field for a keystore file, change its\n // visibility.\n const walletPasswordInputContainer = document.querySelector('.dcp-modal-body input[type=\"password\"]').parentElement.parentElement\n if (walletPasswordInputContainer) {\n if (isValidFileSelection) {\n walletPasswordInputContainer.classList.remove('hidden')\n const walletPasswordInput = document.querySelector('.dcp-modal-body input[type=\"password\"]')\n walletPasswordInput.focus()\n } else {\n walletPasswordInputContainer.classList.add('hidden')\n }\n }\n }\n\n function highlightDropArea (event) {\n event.preventDefault()\n this.classList.add('highlight')\n }\n\n function unhighlightDropArea (event) {\n event.preventDefault()\n this.classList.remove('highlight')\n }\n }\n\n /**\n * Sets up a custom tooltip to pop up when the passwords do not match, but are\n * valid otherwise.\n */\n addFormValidationForPasswordConfirmation () {\n const [newPassword, confirmPassword] = document.querySelectorAll('.dcp-modal-body input[type=\"password\"]')\n if (!newPassword || !confirmPassword) {\n throw Error('New Password field and Confirm Password fields not present.')\n }\n\n newPassword.addEventListener('input', checkMatchingPasswords)\n confirmPassword.addEventListener('input', checkMatchingPasswords)\n\n function checkMatchingPasswords () {\n if (newPassword.value !== confirmPassword.value &&\n newPassword.validity.valid &&\n confirmPassword.validity.valid) {\n newPassword.setCustomValidity('Both passwords must match.')\n } else if (newPassword.value === confirmPassword.value ||\n newPassword.validity.tooShort ||\n newPassword.validity.patternMismatch ||\n newPassword.validity.valueMissing ||\n confirmPassword.validity.tooShort ||\n confirmPassword.validity.patternMismatch ||\n confirmPassword.validity.valueMissing) {\n // If the passwords fields match or have become invalidated some other\n // way again, reset the custom message.\n newPassword.setCustomValidity('')\n }\n }\n }\n\n updateInvalidEmailMessage() {\n const email = document.querySelector('.dcp-modal-body input[id=\"email\"')\n if (!email){\n throw Error(\"Email field not present\")\n }\n email.addEventListener('input', checkValidEmail);\n function checkValidEmail() {\n if (!email.validity.patternMismatch &&\n !email.validity.valueMissing) {\n email.setCustomValidity('')\n } else {\n email.setCustomValidity(\"Enter a valid email address.\")\n }\n\n }\n }\n\n /**\n * Adds message(s) to the modal's body.\n * @param {string} messages - The message(s) to add to the modal's body.\n * @returns Paragraph element(s) containing the message(s) added to the\n * modal's body.\n */\n addMessage (...messages) {\n const elements = []\n const body = this.container.querySelector('.dcp-modal-body')\n for (let i = 0; i < messages.length; i++) {\n const row = document.createElement('div')\n row.className = 'row'\n\n const paragraph = document.createElement('p')\n paragraph.innerHTML = messages[i]\n paragraph.classList.add('message')\n row.appendChild(paragraph)\n body.appendChild(row)\n\n elements.push(paragraph)\n }\n\n if (elements.length === 1) return elements[0]\n else return elements\n }\n\n addHorizontalRule () {\n const body = this.container.querySelector('.dcp-modal-body')\n body.appendChild(document.createElement('hr'))\n }\n\n // Does what it says. Still ill advised to use unless you have to.\n addCustomHTML (htmlStr, browseCallback) {\n const elements = []\n const body = this.container.querySelector('.dcp-modal-body')\n body.innerHTML += htmlStr\n body.querySelector('#browse-button').addEventListener('click', browseCallback.bind(this, this))\n\n if (elements.length === 1) return elements[0]\n else return elements\n }\n\n addButton (...buttons) {\n const elements = []\n const body = this.container.querySelector('.dcp-modal-body')\n for (let i = 0; i < buttons.length; i++) {\n const row = document.createElement('div')\n row.className = 'row'\n\n let col = document.createElement('div')\n col.className = 'col-md-4'\n\n const description = document.createElement('span')\n description.innerText = buttons[i].description\n\n col.appendChild(description)\n row.appendChild(col)\n\n col = document.createElement('div')\n col.className = 'col-md-8'\n\n const button = document.createElement('button')\n button.innerText = buttons[i].label\n button.addEventListener('click', buttons[i].callback.bind(this, this))\n\n elements.push(button)\n\n col.appendChild(button)\n row.appendChild(col)\n\n body.appendChild(row)\n }\n\n if (elements.length === 1) return elements[0]\n else return elements\n }\n}\n\n\n// Inject our special stylesheet from dcp-client only if we're on the portal webpage.\nif (typeof window !== 'undefined' && typeof document !== 'undefined' && dcpConfig.portal.location.hostname === window.location.hostname) {\n // <link rel='stylesheet' href='/css/dashboard.css'>\n const stylesheet = document.createElement('link')\n stylesheet.rel = 'stylesheet'\n // Needed for the duplicate check done later.\n stylesheet.id = 'dcp-modal-styles'\n\n const dcpClientBundle = document.getElementById('_dcp_client_bundle')\n let src\n if (dcpClientBundle) {\n src = dcpClientBundle.src.replace('dcp-client-bundle.js', 'dcp-modal-style.css')\n } else {\n src = dcpConfig.portal.location.href + 'dcp-client/dist/dcp-modal-style.css'\n }\n\n stylesheet.href = src\n // If the style was injected before, don't inject it again.\n // Could occur when loading a file that imports Modal.js and loading\n // comput.min.js in the same HTML file.\n if (document.getElementById(stylesheet.id) === null) {\n document.getElementsByTagName('head')[0].appendChild(stylesheet)\n }\n\n if (typeof {\"version\":\"fca68c239b7cda20a34513139c096b25d2576370\",\"branch\":\"release\",\"dcpClient\":{\"version\":\"4.2.8\",\"from\":\"git+ssh://git@gitlab.com/Distributed-Compute-Protocol/dcp-client.git#prod-20220620\",\"resolved\":\"git+ssh://git@gitlab.com/Distributed-Compute-Protocol/dcp-client.git#e6f31a3a6da1574b3d3bf252bd80208e07937946\"},\"built\":\"Tue Jun 21 2022 13:58:11 GMT-0400 (Eastern Daylight Saving Time)\",\"config\":{\"generated\":\"Tue 21 Jun 2022 01:58:09 PM EDT by erose on lorge\",\"build\":\"debug\"},\"webpack\":\"5.70.0\",\"node\":\"v14.19.3\"} !== 'undefined' && typeof window.Modal === 'undefined') {\n window.Modal = Modal\n }\n}\n\n\n//# sourceURL=webpack://dcp/./portal/www/js/modal.js?");
3835
3835
 
3836
3836
  /***/ }),
3837
3837
 
@@ -4126,7 +4126,7 @@ eval("/**\n * @file password.js\n * Modal providing a way to
4126
4126
  \**********************************************/
4127
4127
  /***/ ((__unused_webpack_module, exports, __webpack_require__) => {
4128
4128
 
4129
- eval("/**\n * @file client-modal/utils.js\n * @author KC Erb\n * @date Mar 2020\n * \n * All shared functions among the modals.\n */\nconst { fetchRelative } = __webpack_require__(/*! ./fetch-relative */ \"./src/dcp-client/client-modal/fetch-relative.js\");\nconst { DCPError } = __webpack_require__(/*! dcp/common/dcp-error */ \"./src/common/dcp-error.js\");\nconst DCP_ENV = __webpack_require__(/*! dcp/common/dcp-env */ \"./src/common/dcp-env.js\");\nexports.OnCloseErrorCode = 'DCP_CM:CANCELX';\n\nif (DCP_ENV.isBrowserPlatform) {\n // Provide as export for the convenience of `utils.MicroModal` instead of a separate require.\n exports.MicroModal = __webpack_require__(/*! micromodal */ \"./node_modules/micromodal/dist/micromodal.es.js\")[\"default\"];\n}\n\n/**\n * Return a unique string, formatted as a GET parameter, that changes often enough to\n * always force the browser to fetch the latest version of our resource.\n *\n * @note Currently always returns the Date-based poison due to webpack. \n */\nfunction cachePoison() {\n if (true)\n return '?ucp=a97d1d3338ffb45271c76e88ac5c541b96e81039'; /* installer token */\n return '?ucp=' + Date.now();\n}\n \n/* Detect load type - on webpack, load dynamic content relative to webpack bundle;\n * otherwise load relative to the current scheduler's configured portal.\n */\nexports.myScript = (typeof document !== 'undefined') && document.currentScript;\nexports.corsProxyHref = undefined;\nif (exports.myScript && exports.myScript === (__webpack_require__(/*! ./fetch-relative */ \"./src/dcp-client/client-modal/fetch-relative.js\").myScript)) {\n let url = new ((__webpack_require__(/*! dcp/common/dcp-url */ \"./src/common/dcp-url.js\").DcpURL))(exports.myScript.src);\n exports.corsProxyHref = url.resolve('../cors-proxy.html');\n}\n\n/**\n * Look for modal id and required ids on page based on config, if not found, provide from dcp-client.\n * The first id in the required array must be the id of the modal's form element.\n * @param {Object} modalConfig Modal configuration object\n * @param {string} modalConfig.id Id of parent modal element\n * @param {string[]} modalConfig.required Array of required ids in parent modal element\n * @param {string[]} [modalConfig.optional] Array of optional ids in parent modal element\n * @param {string} modalConfig.path Relative path to modal html in dcp-client\n * @returns {DOMElement[]} Array of modal elements on page [config.id, ...config.required]\n */\nexports.initModal = async function (modalConfig, onClose) {\n exports.corsProxyHref = exports.corsProxyHref || dcpConfig.portal.location.resolve('dcp-client/cors-proxy.html');\n\n // Call ensure modal on any eager-loaded modals.\n if (modalConfig.eagerLoad) {\n Promise.all(\n modalConfig.eagerLoad.map(config => ensureModal(config))\n )\n };\n\n const [elements, optionalElements] = await ensureModal(modalConfig);\n\n // Wire up form to prevent default, resolve on submission, reject+reset when closed (or call onClose when closed)\n const [modal, form] = elements;\n form.reset(); // ensure that form is fresh\n let formResolve, formReject;\n let formPromise = new Promise( function(res, rej) {\n formResolve = res;\n formReject = rej;\n });\n form.onsubmit = function (submitEvent) {\n submitEvent.preventDefault();\n modal.setAttribute(\"data-state\", \"submitted\");\n formResolve(submitEvent);\n }\n\n exports.MicroModal.show(modalConfig.id, { \n disableFocus: true, \n onClose: onClose || getDefaultOnClose(formReject)\n });\n return [elements, formPromise, optionalElements];\n};\n\n// Ensure all required modal elements are on page according to modalConfig\nasync function ensureModal(modalConfig) {\n let allRequiredIds = [modalConfig.id, ...modalConfig.required];\n let missing = allRequiredIds.filter( id => !document.getElementById(id) );\n if (missing.length > 0) {\n if (missing.length !== allRequiredIds.length)\n console.warn(`Some of the ids needed to replace the default DCP-modal were found, but not all. So the default DCP-Modal will be used. Missing ids are: [${missing}].`);\n let contents = await fetchRelative(exports.corsProxyHref, modalConfig.path + cachePoison());\n const container = document.createElement('div');\n container.innerHTML = contents;\n document.body.appendChild(container);\n }\n\n const elements = allRequiredIds.map(id => document.getElementById(id));\n const optionalElements = (modalConfig.optional || []).map(id => document.getElementById(id));\n return [elements, optionalElements];\n};\n\n// This onClose is called by MicroModal and thus has the modal passed to it.\nfunction getDefaultOnClose (formReject) {\n return (modal) => {\n modal.offsetLeft; // forces style recalc\n const origState = modal.dataset.state;\n // reset form including data-state\n modal.setAttribute(\"data-state\", \"new\");\n // reject if closed without submitting form.\n if (origState !== \"submitted\") {\n const err = new DCPError(\"Modal was closed but modal's form was not submitted.\", exports.OnCloseErrorCode);\n formReject(err);\n }\n }\n}\n\n\n//# sourceURL=webpack://dcp/./src/dcp-client/client-modal/utils.js?");
4129
+ eval("/**\n * @file client-modal/utils.js\n * @author KC Erb\n * @date Mar 2020\n * \n * All shared functions among the modals.\n */\nconst { fetchRelative } = __webpack_require__(/*! ./fetch-relative */ \"./src/dcp-client/client-modal/fetch-relative.js\");\nconst { DCPError } = __webpack_require__(/*! dcp/common/dcp-error */ \"./src/common/dcp-error.js\");\nconst DCP_ENV = __webpack_require__(/*! dcp/common/dcp-env */ \"./src/common/dcp-env.js\");\nexports.OnCloseErrorCode = 'DCP_CM:CANCELX';\n\nif (DCP_ENV.isBrowserPlatform) {\n // Provide as export for the convenience of `utils.MicroModal` instead of a separate require.\n exports.MicroModal = __webpack_require__(/*! micromodal */ \"./node_modules/micromodal/dist/micromodal.es.js\")[\"default\"];\n}\n\n/**\n * Return a unique string, formatted as a GET parameter, that changes often enough to\n * always force the browser to fetch the latest version of our resource.\n *\n * @note Currently always returns the Date-based poison due to webpack. \n */\nfunction cachePoison() {\n if (true)\n return '?ucp=fca68c239b7cda20a34513139c096b25d2576370'; /* installer token */\n return '?ucp=' + Date.now();\n}\n \n/* Detect load type - on webpack, load dynamic content relative to webpack bundle;\n * otherwise load relative to the current scheduler's configured portal.\n */\nexports.myScript = (typeof document !== 'undefined') && document.currentScript;\nexports.corsProxyHref = undefined;\nif (exports.myScript && exports.myScript === (__webpack_require__(/*! ./fetch-relative */ \"./src/dcp-client/client-modal/fetch-relative.js\").myScript)) {\n let url = new ((__webpack_require__(/*! dcp/common/dcp-url */ \"./src/common/dcp-url.js\").DcpURL))(exports.myScript.src);\n exports.corsProxyHref = url.resolve('../cors-proxy.html');\n}\n\n/**\n * Look for modal id and required ids on page based on config, if not found, provide from dcp-client.\n * The first id in the required array must be the id of the modal's form element.\n * @param {Object} modalConfig Modal configuration object\n * @param {string} modalConfig.id Id of parent modal element\n * @param {string[]} modalConfig.required Array of required ids in parent modal element\n * @param {string[]} [modalConfig.optional] Array of optional ids in parent modal element\n * @param {string} modalConfig.path Relative path to modal html in dcp-client\n * @returns {DOMElement[]} Array of modal elements on page [config.id, ...config.required]\n */\nexports.initModal = async function (modalConfig, onClose) {\n exports.corsProxyHref = exports.corsProxyHref || dcpConfig.portal.location.resolve('dcp-client/cors-proxy.html');\n\n // Call ensure modal on any eager-loaded modals.\n if (modalConfig.eagerLoad) {\n Promise.all(\n modalConfig.eagerLoad.map(config => ensureModal(config))\n )\n };\n\n const [elements, optionalElements] = await ensureModal(modalConfig);\n\n // Wire up form to prevent default, resolve on submission, reject+reset when closed (or call onClose when closed)\n const [modal, form] = elements;\n form.reset(); // ensure that form is fresh\n let formResolve, formReject;\n let formPromise = new Promise( function(res, rej) {\n formResolve = res;\n formReject = rej;\n });\n form.onsubmit = function (submitEvent) {\n submitEvent.preventDefault();\n modal.setAttribute(\"data-state\", \"submitted\");\n formResolve(submitEvent);\n }\n\n exports.MicroModal.show(modalConfig.id, { \n disableFocus: true, \n onClose: onClose || getDefaultOnClose(formReject)\n });\n return [elements, formPromise, optionalElements];\n};\n\n// Ensure all required modal elements are on page according to modalConfig\nasync function ensureModal(modalConfig) {\n let allRequiredIds = [modalConfig.id, ...modalConfig.required];\n let missing = allRequiredIds.filter( id => !document.getElementById(id) );\n if (missing.length > 0) {\n if (missing.length !== allRequiredIds.length)\n console.warn(`Some of the ids needed to replace the default DCP-modal were found, but not all. So the default DCP-Modal will be used. Missing ids are: [${missing}].`);\n let contents = await fetchRelative(exports.corsProxyHref, modalConfig.path + cachePoison());\n const container = document.createElement('div');\n container.innerHTML = contents;\n document.body.appendChild(container);\n }\n\n const elements = allRequiredIds.map(id => document.getElementById(id));\n const optionalElements = (modalConfig.optional || []).map(id => document.getElementById(id));\n return [elements, optionalElements];\n};\n\n// This onClose is called by MicroModal and thus has the modal passed to it.\nfunction getDefaultOnClose (formReject) {\n return (modal) => {\n modal.offsetLeft; // forces style recalc\n const origState = modal.dataset.state;\n // reset form including data-state\n modal.setAttribute(\"data-state\", \"new\");\n // reject if closed without submitting form.\n if (origState !== \"submitted\") {\n const err = new DCPError(\"Modal was closed but modal's form was not submitted.\", exports.OnCloseErrorCode);\n formReject(err);\n }\n }\n}\n\n\n//# sourceURL=webpack://dcp/./src/dcp-client/client-modal/utils.js?");
4130
4130
 
4131
4131
  /***/ }),
4132
4132
 
@@ -4137,7 +4137,7 @@ eval("/**\n * @file client-modal/utils.js\n * @author KC Erb\n * @date Mar 2020\
4137
4137
  /***/ ((__unused_webpack_module, exports, __webpack_require__) => {
4138
4138
 
4139
4139
  "use strict";
4140
- eval("/**\n * @file Client facing module that implements Compute Groups API\n * @module dcp/compute-groups\n * @access public\n * @author Kayra E-A <kayra@kingsds.network>\n * Wes Garland <wes@kingsds.network>\n * Paul <paul@kingsds.network>\n * @date Sept 2020\n * February 2022\n * May 2022\n */\n\n\nconst protocolV4 = __webpack_require__(/*! dcp/protocol-v4 */ \"./src/protocol-v4/index.js\");\nconst wallet = __webpack_require__(/*! dcp/dcp-client/wallet */ \"./src/dcp-client/wallet/index.js\");\nconst hash = __webpack_require__(/*! ../../common/hash */ \"./src/common/hash.js\");\nconst { DCPError } = __webpack_require__(/*! ../../common/dcp-error */ \"./src/common/dcp-error.js\");\nconst debugging = (__webpack_require__(/*! dcp/debugging */ \"./src/debugging.js\").scope)('scheduler');\nconst { Address } = __webpack_require__(/*! dcp/dcp-client/wallet */ \"./src/dcp-client/wallet/index.js\");\nconst constants = __webpack_require__(/*! dcp/common/scheduler-constants */ \"./src/common/scheduler-constants.js\");\nconst { clientError, reconstructServiceError } = __webpack_require__(/*! dcp/utils */ \"./src/utils/index.js\");\n\n/** @typedef {import('dcp/utils').apiServiceType} apiServiceType */\n/** @typedef {import('dcp/utils').apiClientType} apiClientType */\n/** @typedef {string} opaqueId */\n\n/**\n * @typedef {object} cgAccessType\n * @property {opaqueId} [id]\n * @property {string} [joinKey]\n */\n\n/**\n * @typedef {object} cgClientJoinType\n * @property {opaqueId} [id]\n * @property {Address} [joinAddress]\n * @property {string} [joinKey]\n * @property {string} [joinSecret]\n * @property {string} [joinHash]\n */\n\n/**\n * @typedef {object} cgServiceJoinType\n * @property {opaqueId} [id]\n * @property {Address} [joinAddress]\n * @property {string} [joinKey]\n * @property {string} [joinHashHash]\n */\n\n/**\n * Establishes the client connection to the computeGroups microservice if it does not exist already from the default config.\n * \n * @returns {protocolV4.Connection}\n * @access public\n * @example\n * const result = await exports.serviceConnection.send('createGroup', {\n name: name,\n description: description,\n });\n */\n\nexports.serviceConnection = null;\n\nconst openServiceConn = function openServiceConn()\n{\n exports.serviceConnection = new protocolV4.Connection(dcpConfig.scheduler.services.computeGroups);\n exports.serviceConnection.on('close', openServiceConn);\n}\n\nconst openAndConnectServiceConn = async function openAndConnectServiceConn()\n{\n openServiceConn();\n await exports.serviceConnection.connect();\n}\n\n/**\n * Resets the client connection to the computeGroups microservice.\n */\nexports.closeServiceConnection = async function closeServiceConnection() {\n if (exports.serviceConnection)\n {\n exports.serviceConnection.off('close', openServiceConn);\n exports.serviceConnection.close(null, true);\n }\n\n exports.serviceConnection = null;\n};\n\n/**\n * (Used in jobs/index.js)\n * KeepAlive for the service connection to compute groups.\n */\nexports.keepAlive = async function keepAlive() {\n if (!exports.serviceConnection)\n await openAndConnectServiceConn();\n\n exports.serviceConnection.keepalive().catch(err => console.error('Warning: keepalive failed for compute groups service', err));\n}\n\n/**\n * Checks whether descriptor corresponds to the public compute group from the scheduler constants.\n */\nexports.isPublicComputeGroup = function isPublicComputeGroup(descriptor) {\n return descriptor.id === constants.computeGroups.public.id\n && descriptor.opaqueId === constants.computeGroups.public.opaqueId;\n};\n\n/**\n * Returns a compute group identification snippet for diagnostic messages,\n * @param {object} descriptor - Must have one of the properties joinKey, id (id:=opaqueId). Specifically\n * descriptor = { joinKey: 'dcpDemo' } or descriptor = { id: 'bYcYGQ3NOpFnP4FKs6IBQd' },\n * where the corresponding row in table computeGroups have attributes\n * joinKey:='dcpDemo' or opaqueId:='bYcYGQ3NOpFnP4FKs6IBQd' .\n * @returns {string}\n */\nfunction cgId(descriptor) {\n return (descriptor.joinKey) ? `joinKey ${descriptor.joinKey}` : `id ${descriptor.id}`;\n}\n\n/**\n * Verify sufficient information in descriptor to access a compute group.\n * Emit diagnostics about unnecessary information.\n * @param {cgAccessType} descriptor \n * @param {string} methodName \n */\nfunction validateCGDescriptor(descriptor, methodName) {\n for (const prop in descriptor) {\n if ([ 'id', 'joinKey' ].includes(prop)) continue;\n if ([ 'joinAddress', 'joinHash', 'joinSecret' ].includes(prop))\n console.warn(`It is not necessary to specify '${prop}' in the descriptor ${JSON.stringify(descriptor)} when calling ${methodName}`);\n else\n console.error(`Do not specify '${prop}' in the descriptor ${JSON.stringify(descriptor)} when calling ${methodName}`);\n }\n}\n\n/**\n * Verify sufficient information in descriptor to authorize a compute group.\n * Emit diagnostics about unnecessary information.\n * @param {cgClientJoinType} joinDescriptor \n * @param {string} methodName \n */\nfunction validateCGJoinDescriptor(joinDescriptor, methodName) {\n for (const prop in joinDescriptor) {\n if ([ 'id', 'joinKey', 'joinSecret', 'joinHash', 'joinAddress' ].includes(prop)) continue;\n console.error(`Do not specify '${prop}' in the descriptor ${JSON.stringify(joinDescriptor)} when calling ${methodName}`);\n }\n}\n\n/**\n * Build message to go across the wire.\n * Verify sufficient information in descriptor to access a compute group.\n * Emit diagnostics about unnecessary information.\n * @param {cgAccessType} descriptor\n * @param {string} methodName\n * @returns {cgAccessType}\n */\nfunction buildCGMessage(descriptor, methodName)\n{\n if (exports.isPublicComputeGroup(descriptor)) return descriptor;\n\n const message = {};\n // Construct message.joinKey xor message.id .\n if (descriptor.joinKey) message.joinKey = descriptor.joinKey;\n else if (descriptor.id) message.id = descriptor.id; // id:=opaqueId\n\n debugging('computeGroups') && console.debug(`${methodName}:buildCGMessage: descriptor`, descriptor, 'message', message);\n\n validateCGDescriptor(descriptor, methodName);\n\n return message;\n}\n\n/**\n * Build message so that joinHash, joinSecret, opaqueId do not go across the wire.\n * Verify sufficient information in descriptor to authorize a compute group.\n * Emit diagnostics about unnecessary information.\n * @param {cgClientJoinType} descriptor\n * @param {string} methodName\n * @returns {cgServiceJoinType}\n */\nfunction buildCGJoinMessage(descriptor, methodName)\n{\n if (exports.isPublicComputeGroup(descriptor)) return descriptor;\n\n const message = {};\n // Construct message.joinKey xor message.id .\n if (descriptor.joinKey) message.joinKey = descriptor.joinKey;\n else if (descriptor.id) message.id = descriptor.id; // id:=opaqueId\n // Construct message.joinAddress .\n if (descriptor.joinAddress) message.joinAddress = descriptor.joinAddress;\n\n debugging('computeGroups') && console.debug(`${methodName}:buildCGJoinMessage: descriptor`, descriptor, 'message', message);\n\n validateCGJoinDescriptor(descriptor, methodName);\n\n // Construct message.joinHashHash .\n if (descriptor.joinSecret) message.joinHashHash = hash.calculate(hash.eh1, exports.calculateJoinHash(descriptor), exports.serviceConnection.dcpsid);\n if (descriptor.joinHash) message.joinHashHash = hash.calculate(hash.eh1, descriptor.joinHash, exports.serviceConnection.dcpsid);\n\n return message;\n}\n\nfunction hasSufficientJoinInfo(joinDescriptor) {\n // Verify joinDescriptor has sufficient information to authorize a compute group (not guarenteed).\n return (joinDescriptor.joinKey && (joinDescriptor.joinSecret || joinDescriptor.joinHash))\n || (joinDescriptor.id && joinDescriptor.joinAddress)\n || exports.isPublicComputeGroup(joinDescriptor);\n}\n\nconst newCGPrototype = { type: 'object',\n parameters: {\n // name: { type: 'string', default: undefined }, /* name of group (length <= 255) */\n // description: { type: 'string', default: undefined }, /* description of group (length <= 255) */\n // id: { type: 'string', default: undefined }, /* opaqueId, the unique identifier of the compute group; nanoid (length === 22) */\n // joinKey: { type: 'string', default: undefined }, /* basically the login (length <= 255) */\n // joinSecret: { type: 'string', default: undefined }, /* basically the password (length <= 255) */\n // joinHash: { type: 'string', default: undefined }, /* basically the password, the joinSecret seeded & hashed */\n // joinAddress: { type: Address, default: undefined }, /* signature gives alternative to login/password */\n\n commissionRate: { type: 'BigNumber', default: undefined }, /* commission, see DCP-1889 */\n deployFee: { type: 'BigNumber', default: undefined }, /* number of DCC to take for every deployment */\n deployAccess: { type: 'string', default: undefined }, /* can be \"owner\"|\"join\" (dcp-1910) */\n addJobFee: { type: 'BigNumber', default: undefined }, /* fee required each time a job joins a compute group */\n maxTotalPayment: { type: 'BigNumber', default: undefined }, /* limit on maximum job payment, NULL => Infinity */\n\n /* Administrative limits on group. NULL => Infinity: Should all be integers or undefined. */\n maxConcurrentJobs: { type: 'number', default: undefined },\n maxConcurrentWorkers: { type: 'number', default: undefined },\n maxConcurrentSandboxes: { type: 'number', default: undefined },\n maxConcurrentCPUs: { type: 'number', default: undefined },\n maxConcurrentGPUs: { type: 'number', default: undefined },\n maxConcurrentEscrow: { type: 'BigNumber', default: undefined },\n },\n};\n\n/**\n * Async function that creates a new Compute Group.\n *\n * The joinDescriptor is of the form { joinKey, joinSecret }, { joinKey, joinHash } or { id, joinAddress }.\n * where id will correspond to the attribute opaqueId in the new row in the computeGroups table.\n *\n * This function can only be called with ADMIN permission.\n * Properties not appearing in newCGPrototype.parameters are not allowed in otherProperties.\n *\n * @param {cgClientJoinType} joinDescriptor - Must have properly defined { joinKey, joinSecret }, { joinKey, joinHash }\n * or { id, joinAddress }, where id will correspond to the attribute opaqueId\n * in the new row in the computeGroups table.\n * @param {string} [name] - The name of the compute group.\n * @param {string} [description] - The description of the compute group.\n * @param {object} [otherProperties] - The 5 attributes of table computeGroup related to commissions and fees.\n * commissionRate: notNull(zDec18), // commission, see DCP-1889\n * deployFee: notNull(zDec18), // number of DCC to take for every deployment\n * deployAccess: string, // can be \"owner\"|\"join\" (dcp-1910)\n * addJobFee: notNull(zDec18), // fee required each time a job joins a compute group\n * maxTotalPayment: dec18, // limit on maximum job payment, NULL => Infinity\n * And the 6 attributes of table computeGroup related to limits.\n * maxConcurrentJobs: integer,\n * maxConcurrentWorkers: integer,\n * maxConcurrentSandboxes: integer,\n * maxConcurrentCPUs: integer,\n * maxConcurrentGPUs: integer,\n * maxConcurrentEscrow: dec18,\n * @returns {Promise<apiClientType>} - { success, payload: computeGroup.id }\n * @access public\n * @example\n * await computeGroup.createGroup({ joinKey: 'dcpDemo', joinSecret: 'theSecret' }, 'myCGName', 'myCGDescription', { deployFee: 0.00015 });\n * await computeGroup.createGroup({ joinKey: 'dcpDemo2', joinHash: 'eh1-e063976b20a92da97a27b9873465c6f2c9d6e4370befa86c8c1dd312c78befc2' });\n * await computeGroup.createGroup({ id: 'bYcYGQ3NOpFnP4FKs6IBQd', joinAddress: 'c15053fc30d4bdf91e2e0bba79578f8b649e55ea' });\n * where the corresponding rows in table computeGroups have attributes\n * row1: joinKey:='dcpDemo',\n * row2: joinKey:='dcpDemo2', joinHash:='eh1-e063976b20a92da97a27b9873465c6f2c9d6e4370befa86c8c1dd312c78befc2'\n * row3: opaqueId:='bYcYGQ3NOpFnP4FKs6IBQd', joinAddress:='c15053fc30d4bdf91e2e0bba79578f8b649e55ea' .\n */\nexports.createGroup = async function createGroup(joinDescriptor, name, description, otherProperties)\n{\n if (!exports.serviceConnection)\n await openAndConnectServiceConn();\n\n validateCGJoinDescriptor(joinDescriptor, 'createGroup');\n\n // Verify joinDescriptor has sufficient information to authorize a compute group (not guarenteed).\n if (!hasSufficientJoinInfo(joinDescriptor))\n return clientError(`createGroup: Insufficient information to authorize compute group: ${JSON.stringify(joinDescriptor)}.`);\n\n // Validate the properties in otherProperties.\n for (const methodName in otherProperties) {\n if (!Object.keys(newCGPrototype.parameters).includes(methodName))\n return clientError(`createGroup: Property ${methodName} cannot be speicfied in otherProperties. Can only specify ${JSON.stringify(Object.keys(newCGPrototype.parameters))}`);\n }\n\n // Translate joinSecret to joinHash.\n if (joinDescriptor.joinSecret) {\n joinDescriptor.joinHash = exports.calculateJoinHash(joinDescriptor);\n delete joinDescriptor.joinSecret;\n }\n\n if (otherProperties && (otherProperties.commissionRate < 0 || otherProperties.commissionRate >= 1))\n return clientError(`client-createGroup: commissionRate ${otherProperties.commissionRate} must be between 0 and 1 (0 <= commissionRate < 1).`);\n\n debugging('computeGroups') && console.debug('client-createGroup: input:', joinDescriptor, name, description, otherProperties);\n\n const { success, payload } = await exports.serviceConnection.send('createGroup', { joinDescriptor, name, description, otherProperties });\n\n if (!success) return clientError(`Cannot create new compute group, with ${cgId(joinDescriptor)}.`);\n if (payload && !payload.success) return reconstructServiceError(payload);\n\n debugging('computeGroups') && console.debug('client-createGroup: payload', payload);\n\n return payload;\n};\n\nconst changeCGPrototype = { type: 'object',\n parameters: {\n name: { type: 'string', default: undefined }, /* name of group (length <= 255) */\n description: { type: 'string', default: undefined }, /* description of group (length <= 255) */\n joinHash: { type: 'string', default: undefined }, /* basically the password, seeded & hashed (length <= 255) */\n joinAddress: { type: Address, default: undefined }, /* signature gives alternative to login/password */\n\n commissionRate: { type: 'BigNumber', default: undefined }, /* commission, see DCP-1889 */\n deployFee: { type: 'BigNumber', default: undefined }, /* number of DCC to take for every deployment */\n deployAccess: { type: 'string', default: undefined }, /* can be \"owner\"|\"join\" (dcp-1910) */\n addJobFee: { type: 'BigNumber', default: undefined }, /* fee required each time a job joins a compute group */\n maxTotalPayment: { type: 'BigNumber', default: undefined }, /* limit on maximum job payment, NULL => Infinity */\n\n /* Administrative limits on group. NULL => Infinity: Should all be integers or undefined. */\n maxConcurrentJobs: { type: 'number', default: undefined },\n maxConcurrentWorkers: { type: 'number', default: undefined },\n maxConcurrentSandboxes: { type: 'number', default: undefined },\n maxConcurrentCPUs: { type: 'number', default: undefined },\n maxConcurrentGPUs: { type: 'number', default: undefined },\n maxConcurrentEscrow: { type: 'BigNumber', default: undefined },\n },\n};\n\n/**\n * Async function that changes a new Compute Group.\n * \n * The parameter newDescriptor contains the new property values,\n * and the properties that are allowed to be changed appear in changeCGPrototype.parameters.\n * \n * The descriptor must have joinKey or id, where id:=opaqueId.\n * Must own the compute group or be ADMIN to use changeGroup.\n * \n * @param {cgAccessType} descriptor - Must have joinkey or id, where id:=opaqueId.\n * @param {object} newDescriptor - Properties not appearing in changeCGPrototype.parameters are not allowed.\n * @returns {Promise<apiClientType>}\n * await computeGroup.changeGroup({ joinKey: 'dcpDemo' }, { joinSecret: 'myNewPasswrd' });\n * await computeGroup.changeGroup({ id: 'bYcYGQ3NOpFnP4FKs6IBQd' }, { name: 'myNewName', deployFee: 0.0001 });\n * where the corresponding rows in table computeGroups have attributes\n * row1: joinKey:='dcpDemo',\n * row2: opaqueId:='bYcYGQ3NOpFnP4FKs6IBQd'\n */\nexports.changeGroup = async function changeGroup(descriptor, newDescriptor)\n{\n if (!exports.serviceConnection)\n await openAndConnectServiceConn();\n\n // Verify descriptor has sufficient information to access a compute group (not guarenteed).\n if (!descriptor.joinKey && !descriptor.id)\n return clientError(`changeGroup: Insufficient information to identify compute group: ${JSON.stringify(descriptor)}.`);\n\n // Validate the properties in newDescriptor.\n for (const methodName in newDescriptor) {\n if (!Object.keys(changeCGPrototype.parameters).includes(methodName))\n return clientError(`changeGroup: Not allowed to change property ${methodName}. Can only change ${JSON.stringify(Object.keys(changeCGPrototype.parameters))}`);\n }\n\n // Translate joinSecret to joinHash.\n if (newDescriptor.joinSecret) {\n newDescriptor.joinHash = exports.calculateJoinHash(newDescriptor);\n delete newDescriptor.joinSecret;\n }\n\n descriptor = buildCGMessage(descriptor, 'changeGroup');\n debugging('computeGroups') && console.debug('change compute group client:', descriptor, newDescriptor);\n const { success, payload } = await exports.serviceConnection.send('changeGroup', { descriptor, newDescriptor });\n\n if (!success) throw new DCPError(`Cannot change compute group with ${cgId(descriptor)}:`, payload);\n if (payload && !payload.success) return reconstructServiceError(payload);\n\n return payload;\n};\n\n/**\n * Async function that deletes a compute group.\n * \n * The descriptor must have joinkey or id, where id:=opaqueId.\n * \n * Must either own the group or be ADMIN.\n * If not ADMIN, then the following config must be true:\n * dcpConfig.scheduler.services.computeGroups.usersCanDeleteGroups\n * \n * @param {cgAccessType} descriptor - Must contain joinKey or id (id:=opaqueId) \n * @returns {Promise<apiClientType>}\n * await computeGroup.deleteGroup({ joinKey: 'dcpDemo' });\n * await computeGroup.deleteGroup({ id: 'bYcYGQ3NOpFnP4FKs6IBQd' });\n * where the corresponding rows in table computeGroups have attributes\n * row1: joinKey:='dcpDemo',\n * row2: opaqueId:='bYcYGQ3NOpFnP4FKs6IBQd'\n */\nexports.deleteGroup = async function deleteGroup(descriptor)\n{\n if (!exports.serviceConnection)\n await openAndConnectServiceConn();\n\n // Verify descriptor has sufficient information to access a compute group (not guarenteed).\n if (!descriptor.joinKey && !descriptor.id)\n return clientError(`deleteGroup: Insufficient information to identify compute group: ${JSON.stringify(descriptor)}.`);\n\n descriptor = buildCGMessage(descriptor, 'deleteGroup');\n debugging('computeGroups') && console.debug('delete compute group client:', descriptor);\n const { success, payload } = await exports.serviceConnection.send('deleteGroup', { descriptor });\n\n if (!success) throw new DCPError(`Cannot delete compute group with ${cgId(descriptor)}:`, payload);\n if (payload && !payload.success) return reconstructServiceError(payload);\n\n return payload;\n};\n\n/**\n * Async function that adds a job to a specified compute group. \n * \n * Must be the owner of the job.\n *\n * Useful feedback is provided from this function, as it\n * will make its way back to the application developer, *after* they have made the\n * deployment fee micropayment.\n *\n * On the client side the access model in place is that if you know the (user/password)\n * joinKey+joinSecret/joinKey+joinHash/joinKey+joinHashHash/id+joinAddress,\n * you can add the job to the compute groups, where id:=opaqueId from table computeGroups.\n * On the service side the corresponding access model is\n * joinKey+joinHashHash/id+joinAddress .\n * Access is also allowed if the compute group owner is the connection peerAddress.\n * \n * Unless the compute group owner is the connection peerAddress, element of the descriptor array must contain\n * { joinKey, joinSecret }, { joinKey, joinHash } or { id, joinAddress }\n * where the value of id in { id, joinAddress } is the opaqueId attribute of the row in table computeGroups.\n *\n * @param {Address} job The address of the Job that will be added to the Compute Group.\n * @param {Array} computeGroups Array of descriptor objects for the compute groups. This descriptor\n * needs to contain enough information to authorize access to the\n * compute group. Properties may include:\n * - id (id:=opaqueId)\n * - joinKey\n * - joinSecret\n * - joinHash\n * - joinAddress\n * \n * Additional, either the joinKey or id MUST be specified so\n * that we can identify the compute group in question.\n *\n * All compute groups can have jobs submitted to them, provided either the joinKey\n * or the id are specified, and the message contains valid join permission and the \n * job is owned by the caller of addJobToGroups.\n *\n * FUTURE - after DCP-1910\n * keystore A keystore used to grant access to job deployment within this compute group.\n * This can be either the ownerKeystore or the joinAddress keystore when the\n * compute group is in deployAccessType='join' mode.\n * @returns {Promise<apiClientType>}\n * @access public\n * @example\n * await computeGroup.addJobToGroups('P+Y4IApeFQLrYS2W7MkVg7', \n * [ { joinKey: 'dcpDemo', joinSecret: 'theSecret' },\n * { joinKey: 'dcpDemo2', joinHash: 'eh1-e063976b20a92da97a27b9873465c6f2c9d6e4370befa86c8c1dd312c78befc2' }, \n * { id: 'bYcYGQ3NOpFnP4FKs6IBQd', joinAddress: 'c15053fc30d4bdf91e2e0bba79578f8b649e55ea' } ]);\n * where the corresponding rows in table computeGroups have attributes\n * row1: joinKey:='dcpDemo',\n * row2: joinKey:='dcpDemo2', joinHash:='eh1-e063976b20a92da97a27b9873465c6f2c9d6e4370befa86c8c1dd312c78befc2'\n * row3: opaqueId:='bYcYGQ3NOpFnP4FKs6IBQd', joinAddress:='c15053fc30d4bdf91e2e0bba79578f8b649e55ea' .\n */\nexports.addJobToGroups = async function addJobToGroups(job, computeGroups)\n{\n if (!exports.serviceConnection)\n await openAndConnectServiceConn();\n\n const cgArray = [];\n for (const joinDescriptor of computeGroups)\n {\n // Verify joinDescriptor has sufficient information to authorize a compute group (not guarenteed).\n if (!hasSufficientJoinInfo(joinDescriptor))\n console.error(`addJobToGroups: Insufficient information to authorize compute group: ${JSON.stringify(joinDescriptor)}.`);\n\n // Translate so that neither joinHash nor joinSecret goes across the wire.\n const message = buildCGJoinMessage(joinDescriptor, 'addJobToGroups');\n debugging('computeGroups') && console.debug(`addJobToGroups client: job ${job}, message`, message);\n\n cgArray.push(message);\n }\n\n const { success, payload } = await exports.serviceConnection.send('addJobToGroups', { job, cgArray });\n\n debugging('computeGroups') && console.debug('addJobToGroups payload', payload);\n\n if (!success) throw new DCPError(`Cannot add job ${job} to compute groups.`, payload);\n if (payload && !payload.success) return reconstructServiceError(payload);\n\n // If the server reported success but did not return a list of CGs (eg. v.4.2.5 server),\n // assume (and inform the client) we added all groups successfully\n return payload || computeGroups;\n};\n\n/**\n * Async function that lists all the Jobs in a Compute Group.\n * \n * The descriptor must have one of the properties joinkey, id (id:=opaqueId).\n * Must be the owner of the Compute Group to list jobs from it.\n * The job does not need to be owned.\n * \n * The descriptor is of the form { joinKey: 'dcpDemo' } or { id: 'bYcYGQ3NOpFnP4FKs6IBQd' }.\n * where 'bYcYGQ3NOpFnP4FKs6IBQd' is the opaqueId of the Compute Group.\n *\n * @param {cgAccessType} descriptor - Must have one of the properties joinKey, id (id:=opaqueId). Specifically\n * descriptor = { joinKey: 'dcpDemo' } or descriptor = { id: opaqueId }\n * @returns {Promise<apiClientType>}\n * @access public\n * @example\n * let listOfJobs1 = await computeGroup.listJobs({ joinKey: 'dcpDemo' });\n * let listOfJobs2 = await computeGroup.listJobs({ id: 'bYcYGQ3NOpFnP4FKs6IBQd' });\n * where the corresponding rows in table computeGroups have attributes\n * row1: joinKey:='dcpDemo'\n * row2: opaqueId:='bYcYGQ3NOpFnP4FKs6IBQd'\n */\nexports.listJobs = async function listJobs(descriptor)\n{\n if (!exports.serviceConnection)\n await openAndConnectServiceConn();\n\n // Verify descriptor has sufficient information to access a compute group (not guarenteed).\n if (!descriptor.joinKey && !descriptor.id)\n return clientError(`listJobs: Insufficient information to identify compute group: ${JSON.stringify(descriptor)}.`);\n\n descriptor = buildCGMessage(descriptor, 'listJobs');\n debugging('computeGroups') && console.debug('listJob client: descriptor', descriptor);\n const { success, payload } = await exports.serviceConnection.send('listJobs', { descriptor });\n\n if (!success) throw new DCPError(`Cannot list jobs for compute group with ${cgId(descriptor)}`, payload);\n if (payload && !payload.success) return reconstructServiceError(payload);\n\n return payload;\n};\n\n/**\n * Async function that removes a job from a Compute Group.\n * \n * The descriptor must have one of the properties joinkey, id (id:=opaqueId).\n * Must be the owner of the Compute Group to remove a job from it.\n * The job does not need to be owned.\n * \n * The descriptor is of the form { joinKey: 'dcpDemo' } or { id: 'bYcYGQ3NOpFnP4FKs6IBQd' }.\n * where 'bYcYGQ3NOpFnP4FKs6IBQd' is the opaqueId of the Compute Group.\n *\n * @param {Address} job - The address of the Job that will be added to the Compute Group.\n * @param {cgAccessType} descriptor - { joinKey: 'dcpDemo' } or { id: 'bYcYGQ3NOpFnP4FKs6IBQd' }\n * @returns {Promise<apiClientType>}\n * @access public\n * @example\n * await computeGroup.removeJob( 'P+Y4IApeFQLrYS2W7MkVg7', { joinKey: 'dcpDemo' });\n * await computeGroup.removeJob( 'P+Y4IApeFQLrYS2W7MkVg7', { id: 'bYcYGQ3NOpFnP4FKs6IBQd' });\n * where the corresponding rows in table computeGroups have attributes\n * row1: joinKey:='dcpDemo'\n * row2: opaqueId:='bYcYGQ3NOpFnP4FKs6IBQd'\n */\nexports.removeJob = async function removeJob(job, descriptor)\n{\n if (!exports.serviceConnection)\n await openAndConnectServiceConn();\n\n // Verify descriptor has sufficient information to access a compute group (not guarenteed).\n if (!descriptor.joinKey && !descriptor.id)\n return clientError(`removeJob: Insufficient information to identify compute group: ${JSON.stringify(descriptor)}.`);\n\n descriptor = buildCGMessage(descriptor, 'removeJob');\n debugging('computeGroups') && console.debug(`removeJob client: job ${job}, descriptor`, descriptor);\n const { success, payload } = await exports.serviceConnection.send('removeJob', { job, descriptor });\n\n if (!success) throw new DCPError(`Cannot remove job ${job} from compute group with ${cgId(descriptor)}`, payload);\n if (payload && !payload.success) return reconstructServiceError(payload);\n\n return payload;\n};\n\n/**\n * Async function that removes all jobs from a Compute Group.\n * \n * The descriptor must have one of the properties joinkey, id (id:=opaqueId).\n * Must be the owner of the Compute Group to remove jobs from it.\n * \n * The descriptor is of the form { joinKey: 'dcpDemo' } or { id: 'bYcYGQ3NOpFnP4FKs6IBQd' }.\n * where 'bYcYGQ3NOpFnP4FKs6IBQd' is the opaqueId of the Compute Group.\n *\n * @param {cgAccessType} descriptor - { joinKey: 'dcpDemo' } or { id: 'bYcYGQ3NOpFnP4FKs6IBQd' }\n * @returns {Promise<apiClientType>}\n * @access public\n * @example\n * await computeGroup.removeAllJobs({ joinKey: 'dcpDemo' });\n * await computeGroup.removeAllJobs({ id: 'bYcYGQ3NOpFnP4FKs6IBQd' });\n * where the corresponding rows in table computeGroups have attributes\n * row1: joinKey:='dcpDemo'\n * row2: opaqueId:='bYcYGQ3NOpFnP4FKs6IBQd'\n */\nexports.removeAllJobs = async function removeAllJobs(descriptor)\n{\n if (!exports.serviceConnection)\n await openAndConnectServiceConn();\n\n // Verify descriptor has sufficient information to access a compute group (not guarenteed).\n if (!descriptor.joinKey && !descriptor.id)\n return clientError(`removeAllJobs: Insufficient information to identify compute group: ${JSON.stringify(descriptor)}.`);\n\n descriptor = buildCGMessage(descriptor, 'removeAllJobs');\n debugging('computeGroups') && console.debug('removeAllJobs client: descriptor', descriptor);\n const { success, payload } = await exports.serviceConnection.send('removeAllJobs', { descriptor });\n\n if (!success) throw new DCPError(`Cannot remove all jobs from compute group with ${cgId(descriptor)}:`, payload);\n if (payload && !payload.success) return reconstructServiceError(payload);\n\n return payload;\n};\n\n/**\n * Async function that cancels the specified owned job.\n * \n * Must be the owner of the job.\n *\n * On the client side the access model in place is that if you know the (user/password)\n * joinKey+joinSecret/joinKey+joinHash/joinKey+joinHashHash/id+joinAddress,\n * you can cancel the job in the compute group, where id:=opaqueId from table computeGroups.\n * On the service side the corresponding access model is\n * joinKey+joinHashHash/id+joinAddress .\n * Access is also allowed if the compute group owner is the connection peerAddress.\n * \n * Unless the compute group owner is the connection peerAddress, the descriptor must contain\n * { joinKey, joinHashHash } or { id, joinAddress }\n * where the value of id in { id, joinAddress } is the opaqueId attribute of the row in table computeGroups.\n * \n * @param {Address} job - The address of the Job that will be added to the Compute Group.\n * @param {cgClientJoinType} joinDescriptor - Array of descriptor objects for the compute groups. This descriptor\n * needs to contain enough information to authorize access to the\n * compute group. Properties may include:\n * - id (id:=opaqueId)\n * - joinKey\n * - joinSecret\n * - joinHash\n * - joinAddress\n *\n * Additional, either the joinKey or id MUST be specified so\n * that we can identify the compute group in question.\n * @returns {Promise<apiClientType>}\n * @access public\n * @example\n * await computeGroup.cancelJob( 'P+Y4IApeFQLrYS2W7MkVg7', { joinKey: 'dcpDemo', joinHash: 'eh1-e063976b20a92da97a27b9873465c6f2c9d6e4370befa86c8c1dd312c78befc2' } );\n * await computeGroup.cancelJob( 'P+Y4IApeFQLrYS2W7MkVg7', { id: 'bYcYGQ3NOpFnP4FKs6IBQd', joinAddress: 'c15053fc30d4bdf91e2e0bba79578f8b649e55ea' });\n * where the corresponding rows in table computeGroups have attributes\n * row1: joinKey:='dcpDemo', joinHash:='eh1-e063976b20a92da97a27b9873465c6f2c9d6e4370befa86c8c1dd312c78befc2'\n * row2: opaqueId:='bYcYGQ3NOpFnP4FKs6IBQd', joinAddress:='c15053fc30d4bdf91e2e0bba79578f8b649e55ea' .\n */\nexports.cancelJob = async function cancelJob(job, joinDescriptor)\n{\n if (!exports.serviceConnection)\n await openAndConnectServiceConn();\n\n // Verify joinDescriptor has sufficient information to authorize a compute group (not guarenteed).\n if (!hasSufficientJoinInfo(joinDescriptor))\n return clientError(`cancelJob: Insufficient information to authorize compute group: ${JSON.stringify(joinDescriptor)}.`);\n\n // Translate so that neither joinHash nor joinSecret goes across the wire.\n joinDescriptor = buildCGJoinMessage(joinDescriptor, 'cancelJob');\n debugging('computeGroups') && console.debug(`cancelJob client: job ${job}, descriptor`, joinDescriptor);\n const { success, payload } = await exports.serviceConnection.send('cancelJob', { job, joinDescriptor });\n\n if (!success) throw new DCPError(`Cannot cancel job ${job} for compute group with ${cgId(joinDescriptor)}:`, payload);\n if (payload && !payload.success) return reconstructServiceError(payload);\n\n return payload;\n};\n\n/**\n * Async function that cancels the owned job in the Compute Group.\n * \n * On the client side the access model in place is that if you know the (user/password)\n * joinKey+joinSecret/joinKey+joinHash/joinKey+joinHashHash/id+joinAddress,\n * you can cancel the jobs in the compute group, where id:=opaqueId from table computeGroups.\n * On the service side the corresponding access model is\n * joinKey+joinHashHash/id+joinAddress .\n * Access is also allowed if the compute group owner is the connection peerAddress.\n * \n * Unless the compute group owner is the connection peerAddress, the descriptor must contain\n * { joinKey, joinHashHash } or { id, joinAddress }\n * where the value of id in { id, joinAddress } is the opaqueId attribute of the row in table computeGroups.\n * \n * @param {cgClientJoinType} joinDescriptor - Array of descriptor objects for the compute groups. This descriptor\n * needs to contain enough information to authorize access to the\n * compute group. Properties may include:\n * - id (id:=opaqueId)\n * - joinKey\n * - joinSecret\n * - joinHash\n * - joinAddress\n * \n * Additional, either the joinKey or id MUST be specified so\n * that we can identify the compute group in question.\n * @returns {Promise<apiClientType>}\n * @access public\n * @example\n * await computeGroup.cancelAllJobs( { joinKey: 'dcpDemo', joinHash: 'eh1-e063976b20a92da97a27b9873465c6f2c9d6e4370befa86c8c1dd312c78befc2' } );\n * await computeGroup.cancelAllJobs( { id: 'bYcYGQ3NOpFnP4FKs6IBQd', joinAddress: 'c15053fc30d4bdf91e2e0bba79578f8b649e55ea' });\n * where the corresponding rows in table computeGroups have attributes\n * row1: joinKey:='dcpDemo', joinHash:='eh1-e063976b20a92da97a27b9873465c6f2c9d6e4370befa86c8c1dd312c78befc2'\n * row2: opaqueId:='bYcYGQ3NOpFnP4FKs6IBQd', joinAddress:='c15053fc30d4bdf91e2e0bba79578f8b649e55ea' .\n */\nexports.cancelAllJobs = async function cancelAllJobs(joinDescriptor)\n{\n if (!exports.serviceConnection)\n await openAndConnectServiceConn();\n\n // Verify joinDescriptor has sufficient information to authorize a compute group (not guarenteed).\n if (!hasSufficientJoinInfo(joinDescriptor))\n return clientError(`cancelAllJobs: Insufficient information to authorize compute group: ${JSON.stringify(joinDescriptor)}.`);\n\n // Translate so that neither joinHash nor joinSecret goes across the wire.\n joinDescriptor = buildCGJoinMessage(joinDescriptor, 'cancelAllJobs');\n debugging('computeGroups') && console.debug('cancelAllJobs client: descriptor', joinDescriptor);\n const { success, payload } = await exports.serviceConnection.send('cancelAllJobs', { joinDescriptor });\n\n if (!success) throw new DCPError(`Cannot cancel owned jobs for compute group with ${cgId(joinDescriptor)}:`, payload);\n if (payload && !payload.success) return reconstructServiceError(payload);\n\n return payload;\n};\n\n/**\n * Calculate a joinHash for a compute group. This is an eh1- hash of the cg salt and \n * joinSecret components of a compute group description.\n *\n * @param {object} details an object containing the cg salt, which is\n * the joinKey if the compute group uses one;\n * otherwise it is the joinAddress. This object\n * may also contain the joinSecret.\n * @param {string} [joinSecret] the join secret -- plain text -- that is\n * the \"password\" for the compute group. If not\n * specified, we use details.joinSecret.\n */\nexports.calculateJoinHash = function computeGroups$calculateJoinHash(details, joinSecret)\n{\n if (typeof joinSecret === 'undefined')\n joinSecret = details.joinSecret;\n\n return hash.calculate(hash.eh1, `${details.joinKey || details.joinAddress} ${joinSecret}`);\n}\n\n\n//# sourceURL=webpack://dcp/./src/dcp-client/compute-groups/index.js?");
4140
+ eval("/**\n * @file Client facing module that implements Compute Groups API\n * @module dcp/compute-groups\n * @access public\n * @author Kayra E-A <kayra@kingsds.network>\n * Wes Garland <wes@kingsds.network>\n * Paul <paul@kingsds.network>\n * @date Sept 2020\n * February 2022\n * May 2022\n */\n\n\nconst protocolV4 = __webpack_require__(/*! dcp/protocol-v4 */ \"./src/protocol-v4/index.js\");\nconst wallet = __webpack_require__(/*! dcp/dcp-client/wallet */ \"./src/dcp-client/wallet/index.js\");\nconst hash = __webpack_require__(/*! ../../common/hash */ \"./src/common/hash.js\");\nconst { DCPError } = __webpack_require__(/*! ../../common/dcp-error */ \"./src/common/dcp-error.js\");\nconst debugging = (__webpack_require__(/*! dcp/debugging */ \"./src/debugging.js\").scope)('scheduler');\nconst { Address } = __webpack_require__(/*! dcp/dcp-client/wallet */ \"./src/dcp-client/wallet/index.js\");\nconst constants = __webpack_require__(/*! dcp/common/scheduler-constants */ \"./src/common/scheduler-constants.js\");\nconst { clientError, reconstructServiceError } = __webpack_require__(/*! dcp/utils */ \"./src/utils/index.js\");\n\n/** @typedef {import('dcp/utils').apiServiceType} apiServiceType */\n/** @typedef {import('dcp/utils').apiClientType} apiClientType */\n/** @typedef {string} opaqueId */\n\n/**\n * @typedef {object} cgAccessType\n * @property {opaqueId} [id]\n * @property {string} [joinKey]\n */\n\n/**\n * @typedef {object} cgClientJoinType\n * @property {opaqueId} [id]\n * @property {Address} [joinAddress]\n * @property {string} [joinKey]\n * @property {string} [joinSecret]\n * @property {string} [joinHash]\n */\n\n/**\n * @typedef {object} cgServiceJoinType\n * @property {opaqueId} [id]\n * @property {Address} [joinAddress]\n * @property {string} [joinKey]\n * @property {string} [joinHashHash]\n */\n\n/**\n * Establishes the client connection to the computeGroups microservice if it does not exist already from the default config.\n * \n * @returns {protocolV4.Connection}\n * @access public\n * @example\n * const result = await exports.serviceConnection.send('createGroup', {\n name: name,\n description: description,\n });\n */\n\nexports.serviceConnection = null;\n\n//\n// Reference counting pattern:\n// For every time addRef is called,\n// closeServiceConnection must eventually be called.\n// Reference counting allows multiple execs in a Promise.all .\n//\nvar refCount = 0;\nexports.addRef = function addRef() {\n refCount++;\n}\n\nconst openAndConnectServiceConn = async function openAndConnectServiceConn()\n{\n exports.serviceConnection = new protocolV4.Connection(dcpConfig.scheduler.services.computeGroups);\n exports.serviceConnection.on('close', openAndConnectServiceConn);\n await exports.serviceConnection.connect();\n refCount = 0; // Help with sanity.\n}\n\n/**\n * Resets the client connection to the computeGroups microservice.\n */\nexports.closeServiceConnection = async function closeServiceConnection() {\n if (refCount > 0) refCount--;\n if (exports.serviceConnection && refCount < 1)\n {\n exports.serviceConnection.off('close', openAndConnectServiceConn);\n exports.serviceConnection.close(null, true);\n refCount = 0; // Help with sanity.\n }\n\n exports.serviceConnection = null;\n};\n\n/**\n * (Used in jobs/index.js)\n * KeepAlive for the service connection to compute groups.\n */\nexports.keepAlive = async function keepAlive() {\n if (!exports.serviceConnection)\n await openAndConnectServiceConn();\n\n exports.serviceConnection.keepalive().catch(err => console.error('Warning: keepalive failed for compute groups service', err));\n}\n\n/**\n * Checks whether descriptor corresponds to the public compute group from the scheduler constants.\n */\nexports.isPublicComputeGroup = function isPublicComputeGroup(descriptor) {\n return descriptor.id === constants.computeGroups.public.id\n && descriptor.opaqueId === constants.computeGroups.public.opaqueId;\n};\n\n/**\n * Returns a compute group identification snippet for diagnostic messages,\n * @param {object} descriptor - Must have one of the properties joinKey, id (id:=opaqueId). Specifically\n * descriptor = { joinKey: 'dcpDemo' } or descriptor = { id: 'bYcYGQ3NOpFnP4FKs6IBQd' },\n * where the corresponding row in table computeGroups have attributes\n * joinKey:='dcpDemo' or opaqueId:='bYcYGQ3NOpFnP4FKs6IBQd' .\n * @returns {string}\n */\nfunction cgId(descriptor) {\n return (descriptor.joinKey) ? `joinKey ${descriptor.joinKey}` : `id ${descriptor.id}`;\n}\n\n/**\n * Verify sufficient information in descriptor to access a compute group.\n * Emit diagnostics about unnecessary information.\n * @param {cgAccessType} descriptor \n * @param {string} methodName \n */\nfunction validateCGDescriptor(descriptor, methodName) {\n for (const prop in descriptor) {\n if ([ 'id', 'joinKey' ].includes(prop)) continue;\n if ([ 'joinAddress', 'joinHash', 'joinSecret' ].includes(prop))\n console.warn(`It is not necessary to specify '${prop}' in the descriptor ${JSON.stringify(descriptor)} when calling ${methodName}`);\n else\n console.error(`Do not specify '${prop}' in the descriptor ${JSON.stringify(descriptor)} when calling ${methodName}`);\n }\n}\n\n/**\n * Verify sufficient information in descriptor to authorize a compute group.\n * Emit diagnostics about unnecessary information.\n * @param {cgClientJoinType} joinDescriptor \n * @param {string} methodName \n */\nfunction validateCGJoinDescriptor(joinDescriptor, methodName) {\n for (const prop in joinDescriptor) {\n if ([ 'id', 'joinKey', 'joinSecret', 'joinHash', 'joinAddress' ].includes(prop)) continue;\n console.error(`Do not specify '${prop}' in the descriptor ${JSON.stringify(joinDescriptor)} when calling ${methodName}`);\n }\n}\n\n/**\n * Build message to go across the wire.\n * Verify sufficient information in descriptor to access a compute group.\n * Emit diagnostics about unnecessary information.\n * @param {cgAccessType} descriptor\n * @param {string} methodName\n * @returns {cgAccessType}\n */\nfunction buildCGMessage(descriptor, methodName)\n{\n if (exports.isPublicComputeGroup(descriptor)) return descriptor;\n\n const message = {};\n // Construct message.joinKey xor message.id .\n if (descriptor.joinKey) message.joinKey = descriptor.joinKey;\n else if (descriptor.id) message.id = descriptor.id; // id:=opaqueId\n\n debugging('computeGroups') && console.debug(`${methodName}:buildCGMessage: descriptor`, descriptor, 'message', message);\n\n validateCGDescriptor(descriptor, methodName);\n\n return message;\n}\n\n/**\n * Build message so that joinHash, joinSecret, opaqueId do not go across the wire.\n * Verify sufficient information in descriptor to authorize a compute group.\n * Emit diagnostics about unnecessary information.\n * @param {cgClientJoinType} descriptor\n * @param {string} methodName\n * @returns {cgServiceJoinType}\n */\nfunction buildCGJoinMessage(descriptor, methodName)\n{\n if (exports.isPublicComputeGroup(descriptor)) return descriptor;\n\n const message = {};\n // Construct message.joinKey xor message.id .\n if (descriptor.joinKey) message.joinKey = descriptor.joinKey;\n else if (descriptor.id) message.id = descriptor.id; // id:=opaqueId\n // Construct message.joinAddress .\n if (descriptor.joinAddress) message.joinAddress = descriptor.joinAddress;\n\n debugging('computeGroups') && console.debug(`${methodName}:buildCGJoinMessage: descriptor`, descriptor, 'message', message);\n\n validateCGJoinDescriptor(descriptor, methodName);\n\n // Construct message.joinHashHash .\n if (descriptor.joinSecret) message.joinHashHash = hash.calculate(hash.eh1, exports.calculateJoinHash(descriptor), exports.serviceConnection.dcpsid);\n if (descriptor.joinHash) message.joinHashHash = hash.calculate(hash.eh1, descriptor.joinHash, exports.serviceConnection.dcpsid);\n\n return message;\n}\n\nfunction hasSufficientJoinInfo(joinDescriptor) {\n // Verify joinDescriptor has sufficient information to authorize a compute group (not guarenteed).\n return (joinDescriptor.joinKey && (joinDescriptor.joinSecret || joinDescriptor.joinHash))\n || (joinDescriptor.id && joinDescriptor.joinAddress)\n || exports.isPublicComputeGroup(joinDescriptor);\n}\n\nconst newCGPrototype = { type: 'object',\n parameters: {\n // name: { type: 'string', default: undefined }, /* name of group (length <= 255) */\n // description: { type: 'string', default: undefined }, /* description of group (length <= 255) */\n // id: { type: 'string', default: undefined }, /* opaqueId, the unique identifier of the compute group; nanoid (length === 22) */\n // joinKey: { type: 'string', default: undefined }, /* basically the login (length <= 255) */\n // joinSecret: { type: 'string', default: undefined }, /* basically the password (length <= 255) */\n // joinHash: { type: 'string', default: undefined }, /* basically the password, the joinSecret seeded & hashed */\n // joinAddress: { type: Address, default: undefined }, /* signature gives alternative to login/password */\n\n commissionRate: { type: 'BigNumber', default: undefined }, /* commission, see DCP-1889 */\n deployFee: { type: 'BigNumber', default: undefined }, /* number of DCC to take for every deployment */\n deployAccess: { type: 'string', default: undefined }, /* can be \"owner\"|\"join\" (dcp-1910) */\n addJobFee: { type: 'BigNumber', default: undefined }, /* fee required each time a job joins a compute group */\n maxTotalPayment: { type: 'BigNumber', default: undefined }, /* limit on maximum job payment, NULL => Infinity */\n\n /* Administrative limits on group. NULL => Infinity: Should all be integers or undefined. */\n maxConcurrentJobs: { type: 'number', default: undefined },\n maxConcurrentWorkers: { type: 'number', default: undefined },\n maxConcurrentSandboxes: { type: 'number', default: undefined },\n maxConcurrentCPUs: { type: 'number', default: undefined },\n maxConcurrentGPUs: { type: 'number', default: undefined },\n maxConcurrentEscrow: { type: 'BigNumber', default: undefined },\n },\n};\n\n/**\n * Async function that creates a new Compute Group.\n *\n * The joinDescriptor is of the form { joinKey, joinSecret }, { joinKey, joinHash } or { id, joinAddress }.\n * where id will correspond to the attribute opaqueId in the new row in the computeGroups table.\n *\n * This function can only be called with ADMIN permission.\n * Properties not appearing in newCGPrototype.parameters are not allowed in otherProperties.\n *\n * @param {cgClientJoinType} joinDescriptor - Must have properly defined { joinKey, joinSecret }, { joinKey, joinHash }\n * or { id, joinAddress }, where id will correspond to the attribute opaqueId\n * in the new row in the computeGroups table.\n * @param {string} [name] - The name of the compute group.\n * @param {string} [description] - The description of the compute group.\n * @param {object} [otherProperties] - The 5 attributes of table computeGroup related to commissions and fees.\n * commissionRate: notNull(zDec18), // commission, see DCP-1889\n * deployFee: notNull(zDec18), // number of DCC to take for every deployment\n * deployAccess: string, // can be \"owner\"|\"join\" (dcp-1910)\n * addJobFee: notNull(zDec18), // fee required each time a job joins a compute group\n * maxTotalPayment: dec18, // limit on maximum job payment, NULL => Infinity\n * And the 6 attributes of table computeGroup related to limits.\n * maxConcurrentJobs: integer,\n * maxConcurrentWorkers: integer,\n * maxConcurrentSandboxes: integer,\n * maxConcurrentCPUs: integer,\n * maxConcurrentGPUs: integer,\n * maxConcurrentEscrow: dec18,\n * @returns {Promise<apiClientType>} - { success, payload: computeGroup.id }\n * @access public\n * @example\n * await computeGroup.createGroup({ joinKey: 'dcpDemo', joinSecret: 'theSecret' }, 'myCGName', 'myCGDescription', { deployFee: 0.00015 });\n * await computeGroup.createGroup({ joinKey: 'dcpDemo2', joinHash: 'eh1-e063976b20a92da97a27b9873465c6f2c9d6e4370befa86c8c1dd312c78befc2' });\n * await computeGroup.createGroup({ id: 'bYcYGQ3NOpFnP4FKs6IBQd', joinAddress: 'c15053fc30d4bdf91e2e0bba79578f8b649e55ea' });\n * where the corresponding rows in table computeGroups have attributes\n * row1: joinKey:='dcpDemo',\n * row2: joinKey:='dcpDemo2', joinHash:='eh1-e063976b20a92da97a27b9873465c6f2c9d6e4370befa86c8c1dd312c78befc2'\n * row3: opaqueId:='bYcYGQ3NOpFnP4FKs6IBQd', joinAddress:='c15053fc30d4bdf91e2e0bba79578f8b649e55ea' .\n */\nexports.createGroup = async function createGroup(joinDescriptor, name, description, otherProperties)\n{\n if (!exports.serviceConnection)\n await openAndConnectServiceConn();\n\n validateCGJoinDescriptor(joinDescriptor, 'createGroup');\n\n // Verify joinDescriptor has sufficient information to authorize a compute group (not guarenteed).\n if (!hasSufficientJoinInfo(joinDescriptor))\n return clientError(`createGroup: Insufficient information to authorize compute group: ${JSON.stringify(joinDescriptor)}.`);\n\n // Validate the properties in otherProperties.\n for (const methodName in otherProperties) {\n if (!Object.keys(newCGPrototype.parameters).includes(methodName))\n return clientError(`createGroup: Property ${methodName} cannot be speicfied in otherProperties. Can only specify ${JSON.stringify(Object.keys(newCGPrototype.parameters))}`);\n }\n\n // Translate joinSecret to joinHash.\n if (joinDescriptor.joinSecret) {\n joinDescriptor.joinHash = exports.calculateJoinHash(joinDescriptor);\n delete joinDescriptor.joinSecret;\n }\n\n if (otherProperties && (otherProperties.commissionRate < 0 || otherProperties.commissionRate >= 1))\n return clientError(`client-createGroup: commissionRate ${otherProperties.commissionRate} must be between 0 and 1 (0 <= commissionRate < 1).`);\n\n debugging('computeGroups') && console.debug('client-createGroup: input:', joinDescriptor, name, description, otherProperties);\n\n const { success, payload } = await exports.serviceConnection.send('createGroup', { joinDescriptor, name, description, otherProperties });\n\n if (!success) return clientError(`Cannot create new compute group, with ${cgId(joinDescriptor)}.`);\n if (payload && !payload.success) return reconstructServiceError(payload);\n\n debugging('computeGroups') && console.debug('client-createGroup: payload', payload);\n\n return payload;\n};\n\nconst changeCGPrototype = { type: 'object',\n parameters: {\n name: { type: 'string', default: undefined }, /* name of group (length <= 255) */\n description: { type: 'string', default: undefined }, /* description of group (length <= 255) */\n joinHash: { type: 'string', default: undefined }, /* basically the password, seeded & hashed (length <= 255) */\n joinAddress: { type: Address, default: undefined }, /* signature gives alternative to login/password */\n\n commissionRate: { type: 'BigNumber', default: undefined }, /* commission, see DCP-1889 */\n deployFee: { type: 'BigNumber', default: undefined }, /* number of DCC to take for every deployment */\n deployAccess: { type: 'string', default: undefined }, /* can be \"owner\"|\"join\" (dcp-1910) */\n addJobFee: { type: 'BigNumber', default: undefined }, /* fee required each time a job joins a compute group */\n maxTotalPayment: { type: 'BigNumber', default: undefined }, /* limit on maximum job payment, NULL => Infinity */\n\n /* Administrative limits on group. NULL => Infinity: Should all be integers or undefined. */\n maxConcurrentJobs: { type: 'number', default: undefined },\n maxConcurrentWorkers: { type: 'number', default: undefined },\n maxConcurrentSandboxes: { type: 'number', default: undefined },\n maxConcurrentCPUs: { type: 'number', default: undefined },\n maxConcurrentGPUs: { type: 'number', default: undefined },\n maxConcurrentEscrow: { type: 'BigNumber', default: undefined },\n },\n};\n\n/**\n * Async function that changes a new Compute Group.\n * \n * The parameter newDescriptor contains the new property values,\n * and the properties that are allowed to be changed appear in changeCGPrototype.parameters.\n * \n * The descriptor must have joinKey or id, where id:=opaqueId.\n * Must own the compute group or be ADMIN to use changeGroup.\n * \n * @param {cgAccessType} descriptor - Must have joinkey or id, where id:=opaqueId.\n * @param {object} newDescriptor - Properties not appearing in changeCGPrototype.parameters are not allowed.\n * @returns {Promise<apiClientType>}\n * await computeGroup.changeGroup({ joinKey: 'dcpDemo' }, { joinSecret: 'myNewPasswrd' });\n * await computeGroup.changeGroup({ id: 'bYcYGQ3NOpFnP4FKs6IBQd' }, { name: 'myNewName', deployFee: 0.0001 });\n * where the corresponding rows in table computeGroups have attributes\n * row1: joinKey:='dcpDemo',\n * row2: opaqueId:='bYcYGQ3NOpFnP4FKs6IBQd'\n */\nexports.changeGroup = async function changeGroup(descriptor, newDescriptor)\n{\n if (!exports.serviceConnection)\n await openAndConnectServiceConn();\n\n // Verify descriptor has sufficient information to access a compute group (not guarenteed).\n if (!descriptor.joinKey && !descriptor.id)\n return clientError(`changeGroup: Insufficient information to identify compute group: ${JSON.stringify(descriptor)}.`);\n\n // Validate the properties in newDescriptor.\n for (const methodName in newDescriptor) {\n if (!Object.keys(changeCGPrototype.parameters).includes(methodName))\n return clientError(`changeGroup: Not allowed to change property ${methodName}. Can only change ${JSON.stringify(Object.keys(changeCGPrototype.parameters))}`);\n }\n\n // Translate joinSecret to joinHash.\n if (newDescriptor.joinSecret) {\n newDescriptor.joinHash = exports.calculateJoinHash(newDescriptor);\n delete newDescriptor.joinSecret;\n }\n\n descriptor = buildCGMessage(descriptor, 'changeGroup');\n debugging('computeGroups') && console.debug('change compute group client:', descriptor, newDescriptor);\n const { success, payload } = await exports.serviceConnection.send('changeGroup', { descriptor, newDescriptor });\n\n if (!success) throw new DCPError(`Cannot change compute group with ${cgId(descriptor)}:`, payload);\n if (payload && !payload.success) return reconstructServiceError(payload);\n\n return payload;\n};\n\n/**\n * Async function that deletes a compute group.\n * \n * The descriptor must have joinkey or id, where id:=opaqueId.\n * \n * Must either own the group or be ADMIN.\n * If not ADMIN, then the following config must be true:\n * dcpConfig.scheduler.services.computeGroups.usersCanDeleteGroups\n * \n * @param {cgAccessType} descriptor - Must contain joinKey or id (id:=opaqueId) \n * @returns {Promise<apiClientType>}\n * await computeGroup.deleteGroup({ joinKey: 'dcpDemo' });\n * await computeGroup.deleteGroup({ id: 'bYcYGQ3NOpFnP4FKs6IBQd' });\n * where the corresponding rows in table computeGroups have attributes\n * row1: joinKey:='dcpDemo',\n * row2: opaqueId:='bYcYGQ3NOpFnP4FKs6IBQd'\n */\nexports.deleteGroup = async function deleteGroup(descriptor)\n{\n if (!exports.serviceConnection)\n await openAndConnectServiceConn();\n\n // Verify descriptor has sufficient information to access a compute group (not guarenteed).\n if (!descriptor.joinKey && !descriptor.id)\n return clientError(`deleteGroup: Insufficient information to identify compute group: ${JSON.stringify(descriptor)}.`);\n\n descriptor = buildCGMessage(descriptor, 'deleteGroup');\n debugging('computeGroups') && console.debug('delete compute group client:', descriptor);\n const { success, payload } = await exports.serviceConnection.send('deleteGroup', { descriptor });\n\n if (!success) throw new DCPError(`Cannot delete compute group with ${cgId(descriptor)}:`, payload);\n if (payload && !payload.success) return reconstructServiceError(payload);\n\n return payload;\n};\n\n/**\n * Async function that adds a job to a specified compute group. \n * \n * Must be the owner of the job.\n *\n * Useful feedback is provided from this function, as it\n * will make its way back to the application developer, *after* they have made the\n * deployment fee micropayment.\n *\n * On the client side the access model in place is that if you know the (user/password)\n * joinKey+joinSecret/joinKey+joinHash/joinKey+joinHashHash/id+joinAddress,\n * you can add the job to the compute groups, where id:=opaqueId from table computeGroups.\n * On the service side the corresponding access model is\n * joinKey+joinHashHash/id+joinAddress .\n * Access is also allowed if the compute group owner is the connection peerAddress.\n * \n * Unless the compute group owner is the connection peerAddress, element of the descriptor array must contain\n * { joinKey, joinSecret }, { joinKey, joinHash } or { id, joinAddress }\n * where the value of id in { id, joinAddress } is the opaqueId attribute of the row in table computeGroups.\n *\n * @param {Address} job The address of the Job that will be added to the Compute Group.\n * @param {Array} computeGroups Array of descriptor objects for the compute groups. This descriptor\n * needs to contain enough information to authorize access to the\n * compute group. Properties may include:\n * - id (id:=opaqueId)\n * - joinKey\n * - joinSecret\n * - joinHash\n * - joinAddress\n * \n * Additional, either the joinKey or id MUST be specified so\n * that we can identify the compute group in question.\n *\n * All compute groups can have jobs submitted to them, provided either the joinKey\n * or the id are specified, and the message contains valid join permission and the \n * job is owned by the caller of addJobToGroups.\n *\n * FUTURE - after DCP-1910\n * keystore A keystore used to grant access to job deployment within this compute group.\n * This can be either the ownerKeystore or the joinAddress keystore when the\n * compute group is in deployAccessType='join' mode.\n * @returns {Promise<apiClientType>}\n * @access public\n * @example\n * await computeGroup.addJobToGroups('P+Y4IApeFQLrYS2W7MkVg7', \n * [ { joinKey: 'dcpDemo', joinSecret: 'theSecret' },\n * { joinKey: 'dcpDemo2', joinHash: 'eh1-e063976b20a92da97a27b9873465c6f2c9d6e4370befa86c8c1dd312c78befc2' }, \n * { id: 'bYcYGQ3NOpFnP4FKs6IBQd', joinAddress: 'c15053fc30d4bdf91e2e0bba79578f8b649e55ea' } ]);\n * where the corresponding rows in table computeGroups have attributes\n * row1: joinKey:='dcpDemo',\n * row2: joinKey:='dcpDemo2', joinHash:='eh1-e063976b20a92da97a27b9873465c6f2c9d6e4370befa86c8c1dd312c78befc2'\n * row3: opaqueId:='bYcYGQ3NOpFnP4FKs6IBQd', joinAddress:='c15053fc30d4bdf91e2e0bba79578f8b649e55ea' .\n */\nexports.addJobToGroups = async function addJobToGroups(job, computeGroups)\n{\n if (!exports.serviceConnection)\n await openAndConnectServiceConn();\n\n const cgArray = [];\n for (const joinDescriptor of computeGroups)\n {\n // Verify joinDescriptor has sufficient information to authorize a compute group (not guarenteed).\n if (!hasSufficientJoinInfo(joinDescriptor))\n console.error(`addJobToGroups: Insufficient information to authorize compute group: ${JSON.stringify(joinDescriptor)}.`);\n\n // Translate so that neither joinHash nor joinSecret goes across the wire.\n const message = buildCGJoinMessage(joinDescriptor, 'addJobToGroups');\n debugging('computeGroups') && console.debug(`addJobToGroups client: job ${job}, message`, message);\n\n cgArray.push(message);\n }\n\n const { success, payload } = await exports.serviceConnection.send('addJobToGroups', { job, cgArray });\n\n debugging('computeGroups') && console.debug('addJobToGroups payload', payload);\n\n if (!success) throw new DCPError(`Cannot add job ${job} to compute groups.`, payload);\n if (payload && !payload.success) return reconstructServiceError(payload);\n\n // If the server reported success but did not return a list of CGs (eg. v.4.2.5 server),\n // assume (and inform the client) we added all groups successfully\n return payload || computeGroups;\n};\n\n/**\n * Async function that lists all the Jobs in a Compute Group.\n * \n * The descriptor must have one of the properties joinkey, id (id:=opaqueId).\n * Must be the owner of the Compute Group to list jobs from it.\n * The job does not need to be owned.\n * \n * The descriptor is of the form { joinKey: 'dcpDemo' } or { id: 'bYcYGQ3NOpFnP4FKs6IBQd' }.\n * where 'bYcYGQ3NOpFnP4FKs6IBQd' is the opaqueId of the Compute Group.\n *\n * @param {cgAccessType} descriptor - Must have one of the properties joinKey, id (id:=opaqueId). Specifically\n * descriptor = { joinKey: 'dcpDemo' } or descriptor = { id: opaqueId }\n * @returns {Promise<apiClientType>}\n * @access public\n * @example\n * let listOfJobs1 = await computeGroup.listJobs({ joinKey: 'dcpDemo' });\n * let listOfJobs2 = await computeGroup.listJobs({ id: 'bYcYGQ3NOpFnP4FKs6IBQd' });\n * where the corresponding rows in table computeGroups have attributes\n * row1: joinKey:='dcpDemo'\n * row2: opaqueId:='bYcYGQ3NOpFnP4FKs6IBQd'\n */\nexports.listJobs = async function listJobs(descriptor)\n{\n if (!exports.serviceConnection)\n await openAndConnectServiceConn();\n\n // Verify descriptor has sufficient information to access a compute group (not guarenteed).\n if (!descriptor.joinKey && !descriptor.id)\n return clientError(`listJobs: Insufficient information to identify compute group: ${JSON.stringify(descriptor)}.`);\n\n descriptor = buildCGMessage(descriptor, 'listJobs');\n debugging('computeGroups') && console.debug('listJob client: descriptor', descriptor);\n const { success, payload } = await exports.serviceConnection.send('listJobs', { descriptor });\n\n if (!success) throw new DCPError(`Cannot list jobs for compute group with ${cgId(descriptor)}`, payload);\n if (payload && !payload.success) return reconstructServiceError(payload);\n\n return payload;\n};\n\n/**\n * Async function that removes a job from a Compute Group.\n * \n * The descriptor must have one of the properties joinkey, id (id:=opaqueId).\n * Must be the owner of the Compute Group to remove a job from it.\n * The job does not need to be owned.\n * \n * The descriptor is of the form { joinKey: 'dcpDemo' } or { id: 'bYcYGQ3NOpFnP4FKs6IBQd' }.\n * where 'bYcYGQ3NOpFnP4FKs6IBQd' is the opaqueId of the Compute Group.\n *\n * @param {Address} job - The address of the Job that will be added to the Compute Group.\n * @param {cgAccessType} descriptor - { joinKey: 'dcpDemo' } or { id: 'bYcYGQ3NOpFnP4FKs6IBQd' }\n * @returns {Promise<apiClientType>}\n * @access public\n * @example\n * await computeGroup.removeJob( 'P+Y4IApeFQLrYS2W7MkVg7', { joinKey: 'dcpDemo' });\n * await computeGroup.removeJob( 'P+Y4IApeFQLrYS2W7MkVg7', { id: 'bYcYGQ3NOpFnP4FKs6IBQd' });\n * where the corresponding rows in table computeGroups have attributes\n * row1: joinKey:='dcpDemo'\n * row2: opaqueId:='bYcYGQ3NOpFnP4FKs6IBQd'\n */\nexports.removeJob = async function removeJob(job, descriptor)\n{\n if (!exports.serviceConnection)\n await openAndConnectServiceConn();\n\n // Verify descriptor has sufficient information to access a compute group (not guarenteed).\n if (!descriptor.joinKey && !descriptor.id)\n return clientError(`removeJob: Insufficient information to identify compute group: ${JSON.stringify(descriptor)}.`);\n\n descriptor = buildCGMessage(descriptor, 'removeJob');\n debugging('computeGroups') && console.debug(`removeJob client: job ${job}, descriptor`, descriptor);\n const { success, payload } = await exports.serviceConnection.send('removeJob', { job, descriptor });\n\n if (!success) throw new DCPError(`Cannot remove job ${job} from compute group with ${cgId(descriptor)}`, payload);\n if (payload && !payload.success) return reconstructServiceError(payload);\n\n return payload;\n};\n\n/**\n * Async function that removes all jobs from a Compute Group.\n * \n * The descriptor must have one of the properties joinkey, id (id:=opaqueId).\n * Must be the owner of the Compute Group to remove jobs from it.\n * \n * The descriptor is of the form { joinKey: 'dcpDemo' } or { id: 'bYcYGQ3NOpFnP4FKs6IBQd' }.\n * where 'bYcYGQ3NOpFnP4FKs6IBQd' is the opaqueId of the Compute Group.\n *\n * @param {cgAccessType} descriptor - { joinKey: 'dcpDemo' } or { id: 'bYcYGQ3NOpFnP4FKs6IBQd' }\n * @returns {Promise<apiClientType>}\n * @access public\n * @example\n * await computeGroup.removeAllJobs({ joinKey: 'dcpDemo' });\n * await computeGroup.removeAllJobs({ id: 'bYcYGQ3NOpFnP4FKs6IBQd' });\n * where the corresponding rows in table computeGroups have attributes\n * row1: joinKey:='dcpDemo'\n * row2: opaqueId:='bYcYGQ3NOpFnP4FKs6IBQd'\n */\nexports.removeAllJobs = async function removeAllJobs(descriptor)\n{\n if (!exports.serviceConnection)\n await openAndConnectServiceConn();\n\n // Verify descriptor has sufficient information to access a compute group (not guarenteed).\n if (!descriptor.joinKey && !descriptor.id)\n return clientError(`removeAllJobs: Insufficient information to identify compute group: ${JSON.stringify(descriptor)}.`);\n\n descriptor = buildCGMessage(descriptor, 'removeAllJobs');\n debugging('computeGroups') && console.debug('removeAllJobs client: descriptor', descriptor);\n const { success, payload } = await exports.serviceConnection.send('removeAllJobs', { descriptor });\n\n if (!success) throw new DCPError(`Cannot remove all jobs from compute group with ${cgId(descriptor)}:`, payload);\n if (payload && !payload.success) return reconstructServiceError(payload);\n\n return payload;\n};\n\n/**\n * Async function that cancels the specified owned job.\n * \n * Must be the owner of the job.\n *\n * On the client side the access model in place is that if you know the (user/password)\n * joinKey+joinSecret/joinKey+joinHash/joinKey+joinHashHash/id+joinAddress,\n * you can cancel the job in the compute group, where id:=opaqueId from table computeGroups.\n * On the service side the corresponding access model is\n * joinKey+joinHashHash/id+joinAddress .\n * Access is also allowed if the compute group owner is the connection peerAddress.\n * \n * Unless the compute group owner is the connection peerAddress, the descriptor must contain\n * { joinKey, joinHashHash } or { id, joinAddress }\n * where the value of id in { id, joinAddress } is the opaqueId attribute of the row in table computeGroups.\n * \n * @param {Address} job - The address of the Job that will be added to the Compute Group.\n * @param {cgClientJoinType} joinDescriptor - Array of descriptor objects for the compute groups. This descriptor\n * needs to contain enough information to authorize access to the\n * compute group. Properties may include:\n * - id (id:=opaqueId)\n * - joinKey\n * - joinSecret\n * - joinHash\n * - joinAddress\n *\n * Additional, either the joinKey or id MUST be specified so\n * that we can identify the compute group in question.\n * @returns {Promise<apiClientType>}\n * @access public\n * @example\n * await computeGroup.cancelJob( 'P+Y4IApeFQLrYS2W7MkVg7', { joinKey: 'dcpDemo', joinHash: 'eh1-e063976b20a92da97a27b9873465c6f2c9d6e4370befa86c8c1dd312c78befc2' } );\n * await computeGroup.cancelJob( 'P+Y4IApeFQLrYS2W7MkVg7', { id: 'bYcYGQ3NOpFnP4FKs6IBQd', joinAddress: 'c15053fc30d4bdf91e2e0bba79578f8b649e55ea' });\n * where the corresponding rows in table computeGroups have attributes\n * row1: joinKey:='dcpDemo', joinHash:='eh1-e063976b20a92da97a27b9873465c6f2c9d6e4370befa86c8c1dd312c78befc2'\n * row2: opaqueId:='bYcYGQ3NOpFnP4FKs6IBQd', joinAddress:='c15053fc30d4bdf91e2e0bba79578f8b649e55ea' .\n */\nexports.cancelJob = async function cancelJob(job, joinDescriptor)\n{\n if (!exports.serviceConnection)\n await openAndConnectServiceConn();\n\n // Verify joinDescriptor has sufficient information to authorize a compute group (not guarenteed).\n if (!hasSufficientJoinInfo(joinDescriptor))\n return clientError(`cancelJob: Insufficient information to authorize compute group: ${JSON.stringify(joinDescriptor)}.`);\n\n // Translate so that neither joinHash nor joinSecret goes across the wire.\n joinDescriptor = buildCGJoinMessage(joinDescriptor, 'cancelJob');\n debugging('computeGroups') && console.debug(`cancelJob client: job ${job}, descriptor`, joinDescriptor);\n const { success, payload } = await exports.serviceConnection.send('cancelJob', { job, joinDescriptor });\n\n if (!success) throw new DCPError(`Cannot cancel job ${job} for compute group with ${cgId(joinDescriptor)}:`, payload);\n if (payload && !payload.success) return reconstructServiceError(payload);\n\n return payload;\n};\n\n/**\n * Async function that cancels the owned job in the Compute Group.\n * \n * On the client side the access model in place is that if you know the (user/password)\n * joinKey+joinSecret/joinKey+joinHash/joinKey+joinHashHash/id+joinAddress,\n * you can cancel the jobs in the compute group, where id:=opaqueId from table computeGroups.\n * On the service side the corresponding access model is\n * joinKey+joinHashHash/id+joinAddress .\n * Access is also allowed if the compute group owner is the connection peerAddress.\n * \n * Unless the compute group owner is the connection peerAddress, the descriptor must contain\n * { joinKey, joinHashHash } or { id, joinAddress }\n * where the value of id in { id, joinAddress } is the opaqueId attribute of the row in table computeGroups.\n * \n * @param {cgClientJoinType} joinDescriptor - Array of descriptor objects for the compute groups. This descriptor\n * needs to contain enough information to authorize access to the\n * compute group. Properties may include:\n * - id (id:=opaqueId)\n * - joinKey\n * - joinSecret\n * - joinHash\n * - joinAddress\n * \n * Additional, either the joinKey or id MUST be specified so\n * that we can identify the compute group in question.\n * @returns {Promise<apiClientType>}\n * @access public\n * @example\n * await computeGroup.cancelAllJobs( { joinKey: 'dcpDemo', joinHash: 'eh1-e063976b20a92da97a27b9873465c6f2c9d6e4370befa86c8c1dd312c78befc2' } );\n * await computeGroup.cancelAllJobs( { id: 'bYcYGQ3NOpFnP4FKs6IBQd', joinAddress: 'c15053fc30d4bdf91e2e0bba79578f8b649e55ea' });\n * where the corresponding rows in table computeGroups have attributes\n * row1: joinKey:='dcpDemo', joinHash:='eh1-e063976b20a92da97a27b9873465c6f2c9d6e4370befa86c8c1dd312c78befc2'\n * row2: opaqueId:='bYcYGQ3NOpFnP4FKs6IBQd', joinAddress:='c15053fc30d4bdf91e2e0bba79578f8b649e55ea' .\n */\nexports.cancelAllJobs = async function cancelAllJobs(joinDescriptor)\n{\n if (!exports.serviceConnection)\n await openAndConnectServiceConn();\n\n // Verify joinDescriptor has sufficient information to authorize a compute group (not guarenteed).\n if (!hasSufficientJoinInfo(joinDescriptor))\n return clientError(`cancelAllJobs: Insufficient information to authorize compute group: ${JSON.stringify(joinDescriptor)}.`);\n\n // Translate so that neither joinHash nor joinSecret goes across the wire.\n joinDescriptor = buildCGJoinMessage(joinDescriptor, 'cancelAllJobs');\n debugging('computeGroups') && console.debug('cancelAllJobs client: descriptor', joinDescriptor);\n const { success, payload } = await exports.serviceConnection.send('cancelAllJobs', { joinDescriptor });\n\n if (!success) throw new DCPError(`Cannot cancel owned jobs for compute group with ${cgId(joinDescriptor)}:`, payload);\n if (payload && !payload.success) return reconstructServiceError(payload);\n\n return payload;\n};\n\n/**\n * Calculate a joinHash for a compute group. This is an eh1- hash of the cg salt and \n * joinSecret components of a compute group description.\n *\n * @param {object} details an object containing the cg salt, which is\n * the joinKey if the compute group uses one;\n * otherwise it is the joinAddress. This object\n * may also contain the joinSecret.\n * @param {string} [joinSecret] the join secret -- plain text -- that is\n * the \"password\" for the compute group. If not\n * specified, we use details.joinSecret.\n */\nexports.calculateJoinHash = function computeGroups$calculateJoinHash(details, joinSecret)\n{\n if (typeof joinSecret === 'undefined')\n joinSecret = details.joinSecret;\n\n return hash.calculate(hash.eh1, `${details.joinKey || details.joinAddress} ${joinSecret}`);\n}\n\n\n//# sourceURL=webpack://dcp/./src/dcp-client/compute-groups/index.js?");
4141
4141
 
4142
4142
  /***/ }),
4143
4143
 
@@ -4157,7 +4157,7 @@ eval("/**\n * @file Module that implements Compute API\n * @module dcp/comput
4157
4157
  \*********************************/
4158
4158
  /***/ ((module, exports, __webpack_require__) => {
4159
4159
 
4160
- eval("/* module decorator */ module = __webpack_require__.nmd(module);\n/**\n * @file dcp-client-bundle-src.js\n * Top-level file which gets webpacked into the bundle consumed by dcp-client 2.5\n * @author Wes Garland, wes@kingsds.network\n * @date July 2019\n */\n\n{\n let thisScript = typeof document !== 'undefined' ? (typeof document.currentScript !== 'undefined' && document.currentScript) || document.getElementById('_dcp_client_bundle') : {}\n let realModuleDeclare\n\n if ( false || typeof module.declare === 'undefined') {\n realModuleDeclare = ( true) ? module.declare : 0\n if (false) {}\n module.declare = function moduleUnWrapper (deps, factory) {\n factory(null, module.exports, module)\n return module.exports\n }\n }\n\n let _debugging = () => false\n dcpConfig.future = (__webpack_require__(/*! ../common/config-future.js */ \"./src/common/config-future.js\").futureFactory)(_debugging, dcpConfig);\n\n /* These modules are official API and must be part of DCP Client */\n let officialApi = {\n 'protocol': __webpack_require__(/*! ../protocol-v4 */ \"./src/protocol-v4/index.js\"),\n 'compute': (__webpack_require__(/*! ./compute */ \"./src/dcp-client/compute.js\").compute),\n 'worker': __webpack_require__(/*! ./worker */ \"./src/dcp-client/worker/index.js\"),\n 'wallet': __webpack_require__(/*! ./wallet */ \"./src/dcp-client/wallet/index.js\"),\n };\n\n /* Allow client programs to use modules which happen to be in the bundle anyhow */\n let conveniencePeers = {\n 'ethereumjs-wallet': (__webpack_require__(/*! ./wallet/keystore */ \"./src/dcp-client/wallet/keystore.js\")._internalEth.wallet),\n 'ethereumjs-util': (__webpack_require__(/*! ./wallet/keystore */ \"./src/dcp-client/wallet/keystore.js\")._internalEth.util),\n 'socket.io-client': __webpack_require__(/*! socket.io-client */ \"./node_modules/socket.io-client/build/cjs/index.js\"),\n 'bignumber.js': __webpack_require__(/*! bignumber.js */ \"./node_modules/bignumber.js/bignumber.js\"),\n 'semver': __webpack_require__(/*! semver */ \"./node_modules/semver/semver.js\"),\n };\n\n /* Some of these modules are API-track. Some of them need to be published to be\n * available for top-level resolution by DCP internals. Those (mostly) should have\n * been written using relative module paths.....\n */\n let modules = Object.assign({\n 'dcp-build': {\"version\":\"a97d1d3338ffb45271c76e88ac5c541b96e81039\",\"branch\":\"release\",\"dcpClient\":{\"version\":\"4.2.7\",\"from\":\"git+ssh://git@gitlab.com/Distributed-Compute-Protocol/dcp-client.git#prod-20220531\",\"resolved\":\"git+ssh://git@gitlab.com/Distributed-Compute-Protocol/dcp-client.git#d4fa748a061d970b2d829a01e37f075cb176458a\"},\"built\":\"Wed Jun 01 2022 11:06:50 GMT-0400 (Eastern Daylight Saving Time)\",\"config\":{\"generated\":\"Wed 01 Jun 2022 11:06:48 AM EDT by erose on lorge\",\"build\":\"debug\"},\"webpack\":\"5.70.0\",\"node\":\"v14.19.3\"},\n 'dcp-xhr': __webpack_require__(/*! ../common/dcp-xhr */ \"./src/common/dcp-xhr.js\"),\n 'dcp-env': __webpack_require__(/*! ../common/dcp-env */ \"./src/common/dcp-env.js\"),\n 'dcp-url': __webpack_require__(/*! ../common/dcp-url */ \"./src/common/dcp-url.js\"),\n 'cli': __webpack_require__(/*! ../common/cli */ \"./src/common/cli.js\"),\n 'dcp-timers': __webpack_require__(/*! ../common/dcp-timers */ \"./src/common/dcp-timers.js\"),\n 'dcp-dot-dir': __webpack_require__(/*! ../common/dcp-dot-dir */ \"./src/common/dcp-dot-dir.js\"),\n 'dcp-assert': __webpack_require__(/*! ../common/dcp-assert */ \"./src/common/dcp-assert.js\"),\n 'dcp-events': __webpack_require__(/*! ../common/dcp-events */ \"./src/common/dcp-events/index.js\"),\n 'utils': __webpack_require__(/*! ../utils */ \"./src/utils/index.js\"),\n 'debugging': __webpack_require__(/*! ../debugging */ \"./src/debugging.js\"),\n 'publish': __webpack_require__(/*! ../common/dcp-publish */ \"./src/common/dcp-publish.js\"),\n 'compute-groups': {\n ...__webpack_require__(/*! ./compute-groups */ \"./src/dcp-client/compute-groups/index.js\"),\n publicGroupOpaqueId: (__webpack_require__(/*! ../common/scheduler-constants */ \"./src/common/scheduler-constants.js\").computeGroups[\"public\"].opaqueId),\n },\n 'bank-util': __webpack_require__(/*! ./bank-util */ \"./src/dcp-client/bank-util.js\"),\n 'protocol-v4': __webpack_require__(/*! ../protocol-v4 */ \"./src/protocol-v4/index.js\"), /* deprecated */\n 'client-modal': __webpack_require__(/*! ./client-modal */ \"./src/dcp-client/client-modal/index.js\"),\n 'legacy-modal': (__webpack_require__(/*! ../../portal/www/js/modal */ \"./portal/www/js/modal.js\").Modal),\n 'eth': __webpack_require__(/*! ./wallet/eth */ \"./src/dcp-client/wallet/eth.js\"),\n 'serialize': __webpack_require__(/*! ../utils/serialize */ \"./src/utils/serialize.js\"),\n 'job': __webpack_require__(/*! ./job */ \"./src/dcp-client/job/index.js\"),\n 'range-object': __webpack_require__(/*! ./range-object */ \"./src/dcp-client/range-object.js\"),\n 'stats-ranges': __webpack_require__(/*! ./stats-ranges */ \"./src/dcp-client/stats-ranges.js\"),\n 'standard-objects': {}\n }, conveniencePeers, officialApi);\n\n /* Export the JS Standard Classes (etc) from the global object of the bundle evaluation context,\n * in case we have code somewhere that needs to use these for instanceof checks.\n */\n ;[ Object, Function, Boolean, Symbol,\n Error, EvalError, RangeError, ReferenceError, SyntaxError, TypeError, URIError,\n Number, Math, Date,\n String, RegExp,\n Array, Int8Array, Uint8Array, Uint8ClampedArray, Int16Array, Uint16Array, Int32Array, Uint32Array, Float32Array, Float64Array,\n Map, Set, WeakMap, WeakSet,\n ArrayBuffer, DataView, JSON,\n Promise, \n Reflect, Proxy, Intl, WebAssembly, __webpack_require__\n ].forEach(function (obj) {\n if (obj.name && (typeof obj === 'function' || typeof obj === 'object'))\n modules['standard-objects'][obj.name] = obj\n })\n\n if (typeof BigInt !== 'undefined')\n modules['standard-objects']['BigInt'] === BigInt;\n if (typeof BigInt64Array !== 'undefined')\n modules['standard-objects']['BigInt64Array'] === BigInt64Array;\n if (typeof BigInt64Array !== 'undefined')\n modules['standard-objects']['BigUint64Array'] === BigUint64Array;\n\n module.declare([], function(require, exports, module) {\n Object.assign(exports, modules)\n exports['dcp-config'] = dcpConfig\n })\n if (realModuleDeclare)\n module.declare = realModuleDeclare\n\n bundleExports = thisScript.exports = exports; /* must be last expression evaluated! */\n}\n\n\n//# sourceURL=webpack://dcp/./src/dcp-client/index.js?");
4160
+ eval("/* module decorator */ module = __webpack_require__.nmd(module);\n/**\n * @file dcp-client-bundle-src.js\n * Top-level file which gets webpacked into the bundle consumed by dcp-client 2.5\n * @author Wes Garland, wes@kingsds.network\n * @date July 2019\n */\n\n{\n let thisScript = typeof document !== 'undefined' ? (typeof document.currentScript !== 'undefined' && document.currentScript) || document.getElementById('_dcp_client_bundle') : {}\n let realModuleDeclare\n\n if ( false || typeof module.declare === 'undefined') {\n realModuleDeclare = ( true) ? module.declare : 0\n if (false) {}\n module.declare = function moduleUnWrapper (deps, factory) {\n factory(null, module.exports, module)\n return module.exports\n }\n }\n\n let _debugging = () => false\n dcpConfig.future = (__webpack_require__(/*! ../common/config-future.js */ \"./src/common/config-future.js\").futureFactory)(_debugging, dcpConfig);\n\n /* These modules are official API and must be part of DCP Client */\n let officialApi = {\n 'protocol': __webpack_require__(/*! ../protocol-v4 */ \"./src/protocol-v4/index.js\"),\n 'compute': (__webpack_require__(/*! ./compute */ \"./src/dcp-client/compute.js\").compute),\n 'worker': __webpack_require__(/*! ./worker */ \"./src/dcp-client/worker/index.js\"),\n 'wallet': __webpack_require__(/*! ./wallet */ \"./src/dcp-client/wallet/index.js\"),\n };\n\n /* Allow client programs to use modules which happen to be in the bundle anyhow */\n let conveniencePeers = {\n 'ethereumjs-wallet': (__webpack_require__(/*! ./wallet/keystore */ \"./src/dcp-client/wallet/keystore.js\")._internalEth.wallet),\n 'ethereumjs-util': (__webpack_require__(/*! ./wallet/keystore */ \"./src/dcp-client/wallet/keystore.js\")._internalEth.util),\n 'socket.io-client': __webpack_require__(/*! socket.io-client */ \"./node_modules/socket.io-client/build/cjs/index.js\"),\n 'bignumber.js': __webpack_require__(/*! bignumber.js */ \"./node_modules/bignumber.js/bignumber.js\"),\n 'semver': __webpack_require__(/*! semver */ \"./node_modules/semver/semver.js\"),\n };\n\n /* Some of these modules are API-track. Some of them need to be published to be\n * available for top-level resolution by DCP internals. Those (mostly) should have\n * been written using relative module paths.....\n */\n let modules = Object.assign({\n 'dcp-build': {\"version\":\"fca68c239b7cda20a34513139c096b25d2576370\",\"branch\":\"release\",\"dcpClient\":{\"version\":\"4.2.8\",\"from\":\"git+ssh://git@gitlab.com/Distributed-Compute-Protocol/dcp-client.git#prod-20220620\",\"resolved\":\"git+ssh://git@gitlab.com/Distributed-Compute-Protocol/dcp-client.git#e6f31a3a6da1574b3d3bf252bd80208e07937946\"},\"built\":\"Tue Jun 21 2022 13:58:11 GMT-0400 (Eastern Daylight Saving Time)\",\"config\":{\"generated\":\"Tue 21 Jun 2022 01:58:09 PM EDT by erose on lorge\",\"build\":\"debug\"},\"webpack\":\"5.70.0\",\"node\":\"v14.19.3\"},\n 'dcp-xhr': __webpack_require__(/*! ../common/dcp-xhr */ \"./src/common/dcp-xhr.js\"),\n 'dcp-env': __webpack_require__(/*! ../common/dcp-env */ \"./src/common/dcp-env.js\"),\n 'dcp-url': __webpack_require__(/*! ../common/dcp-url */ \"./src/common/dcp-url.js\"),\n 'cli': __webpack_require__(/*! ../common/cli */ \"./src/common/cli.js\"),\n 'dcp-timers': __webpack_require__(/*! ../common/dcp-timers */ \"./src/common/dcp-timers.js\"),\n 'dcp-dot-dir': __webpack_require__(/*! ../common/dcp-dot-dir */ \"./src/common/dcp-dot-dir.js\"),\n 'dcp-assert': __webpack_require__(/*! ../common/dcp-assert */ \"./src/common/dcp-assert.js\"),\n 'dcp-events': __webpack_require__(/*! ../common/dcp-events */ \"./src/common/dcp-events/index.js\"),\n 'utils': __webpack_require__(/*! ../utils */ \"./src/utils/index.js\"),\n 'debugging': __webpack_require__(/*! ../debugging */ \"./src/debugging.js\"),\n 'publish': __webpack_require__(/*! ../common/dcp-publish */ \"./src/common/dcp-publish.js\"),\n 'compute-groups': {\n ...__webpack_require__(/*! ./compute-groups */ \"./src/dcp-client/compute-groups/index.js\"),\n publicGroupOpaqueId: (__webpack_require__(/*! ../common/scheduler-constants */ \"./src/common/scheduler-constants.js\").computeGroups[\"public\"].opaqueId),\n },\n 'bank-util': __webpack_require__(/*! ./bank-util */ \"./src/dcp-client/bank-util.js\"),\n 'protocol-v4': __webpack_require__(/*! ../protocol-v4 */ \"./src/protocol-v4/index.js\"), /* deprecated */\n 'client-modal': __webpack_require__(/*! ./client-modal */ \"./src/dcp-client/client-modal/index.js\"),\n 'legacy-modal': (__webpack_require__(/*! ../../portal/www/js/modal */ \"./portal/www/js/modal.js\").Modal),\n 'eth': __webpack_require__(/*! ./wallet/eth */ \"./src/dcp-client/wallet/eth.js\"),\n 'serialize': __webpack_require__(/*! ../utils/serialize */ \"./src/utils/serialize.js\"),\n 'job': __webpack_require__(/*! ./job */ \"./src/dcp-client/job/index.js\"),\n 'range-object': __webpack_require__(/*! ./range-object */ \"./src/dcp-client/range-object.js\"),\n 'stats-ranges': __webpack_require__(/*! ./stats-ranges */ \"./src/dcp-client/stats-ranges.js\"),\n 'standard-objects': {}\n }, conveniencePeers, officialApi);\n\n /* Export the JS Standard Classes (etc) from the global object of the bundle evaluation context,\n * in case we have code somewhere that needs to use these for instanceof checks.\n */\n ;[ Object, Function, Boolean, Symbol,\n Error, EvalError, RangeError, ReferenceError, SyntaxError, TypeError, URIError,\n Number, Math, Date,\n String, RegExp,\n Array, Int8Array, Uint8Array, Uint8ClampedArray, Int16Array, Uint16Array, Int32Array, Uint32Array, Float32Array, Float64Array,\n Map, Set, WeakMap, WeakSet,\n ArrayBuffer, DataView, JSON,\n Promise, \n Reflect, Proxy, Intl, WebAssembly, __webpack_require__\n ].forEach(function (obj) {\n if (obj.name && (typeof obj === 'function' || typeof obj === 'object'))\n modules['standard-objects'][obj.name] = obj\n })\n\n if (typeof BigInt !== 'undefined')\n modules['standard-objects']['BigInt'] === BigInt;\n if (typeof BigInt64Array !== 'undefined')\n modules['standard-objects']['BigInt64Array'] === BigInt64Array;\n if (typeof BigInt64Array !== 'undefined')\n modules['standard-objects']['BigUint64Array'] === BigUint64Array;\n\n module.declare([], function(require, exports, module) {\n Object.assign(exports, modules)\n exports['dcp-config'] = dcpConfig\n })\n if (realModuleDeclare)\n module.declare = realModuleDeclare\n\n bundleExports = thisScript.exports = exports; /* must be last expression evaluated! */\n}\n\n\n//# sourceURL=webpack://dcp/./src/dcp-client/index.js?");
4161
4161
 
4162
4162
  /***/ }),
4163
4163
 
@@ -4168,7 +4168,7 @@ eval("/* module decorator */ module = __webpack_require__.nmd(module);\n/**\n *
4168
4168
  /***/ ((__unused_webpack_module, exports, __webpack_require__) => {
4169
4169
 
4170
4170
  "use strict";
4171
- eval("/**\n * @file job/index.js\n * @author Eddie Roosenmaallen, eddie@kingsds.network\n * Matthew Palma, mpalma@kingsds.network\n * Wes Garland, wes@kingsds.network\n * Paul, paul@kingsds.network\n * @date November 2018\n * November 2018\n * February 2022\n * May 2022\n *\n * This module implements the Compute API's Job Handle\n *\n */\n/** @typedef {import('dcp/dcp-client/wallet/keystore').Keystore} Keystore */\n\n\nconst { BigNumber } = __webpack_require__(/*! bignumber.js */ \"./node_modules/bignumber.js/bignumber.js\");\nconst { v4: uuidv4 } = __webpack_require__(/*! uuid */ \"./node_modules/uuid/dist/esm-browser/index.js\");\nconst { EventEmitter, PropagatingEventEmitter } = __webpack_require__(/*! dcp/common/dcp-events */ \"./src/common/dcp-events/index.js\");\nconst { RangeObject, MultiRangeObject, DistributionRange, SuperRangeObject } = __webpack_require__(/*! dcp/dcp-client/range-object */ \"./src/dcp-client/range-object.js\");\nconst { fetchURI, encodeDataURI, dumpObject } = __webpack_require__(/*! dcp/utils */ \"./src/utils/index.js\");\nconst { getTextEncoder, createTempFile } = __webpack_require__(/*! dcp/utils */ \"./src/utils/index.js\");\nconst serialize = __webpack_require__(/*! dcp/utils/serialize */ \"./src/utils/serialize.js\");\nconst wallet = __webpack_require__(/*! dcp/dcp-client/wallet */ \"./src/dcp-client/wallet/index.js\");\nconst protocolV4 = __webpack_require__(/*! dcp/protocol-v4 */ \"./src/protocol-v4/index.js\");\nconst { EventSubscriber } = __webpack_require__(/*! dcp/events/event-subscriber */ \"./src/events/event-subscriber.js\");\nconst { DcpURL } = __webpack_require__(/*! dcp/common/dcp-url */ \"./src/common/dcp-url.js\");\nconst ClientModal = __webpack_require__(/*! dcp/dcp-client/client-modal */ \"./src/dcp-client/client-modal/index.js\");\nconst { Worker } = __webpack_require__(/*! dcp/dcp-client/worker */ \"./src/dcp-client/worker/index.js\");\nconst { RemoteDataSet } = __webpack_require__(/*! dcp/dcp-client/remote-data-set */ \"./src/dcp-client/remote-data-set.js\");\nconst { RemoteDataPattern } = __webpack_require__(/*! dcp/dcp-client/remote-data-pattern */ \"./src/dcp-client/remote-data-pattern.js\");\nconst { ResultHandle } = __webpack_require__(/*! ./result-handle */ \"./src/dcp-client/job/result-handle.js\");\nconst { SlicePaymentOffer } = __webpack_require__(/*! ./slice-payment-offer */ \"./src/dcp-client/job/slice-payment-offer.js\");\nconst DCP_ENV = __webpack_require__(/*! dcp/common/dcp-env */ \"./src/common/dcp-env.js\");\nconst dcpPublish = __webpack_require__(/*! dcp/common/dcp-publish */ \"./src/common/dcp-publish.js\");\nconst computeGroups = __webpack_require__(/*! dcp/dcp-client/compute-groups */ \"./src/dcp-client/compute-groups/index.js\");\nconst schedulerConstants = __webpack_require__(/*! dcp/common/scheduler-constants */ \"./src/common/scheduler-constants.js\");\nconst { sliceStatus } = __webpack_require__(/*! dcp/common/scheduler-constants */ \"./src/common/scheduler-constants.js\");\nconst { jobStatus } = __webpack_require__(/*! dcp/common/scheduler-constants */ \"./src/common/scheduler-constants.js\");\nconst bankUtil = __webpack_require__(/*! dcp/dcp-client/bank-util */ \"./src/dcp-client/bank-util.js\");\nconst { DCPError } = __webpack_require__(/*! dcp/common/dcp-error */ \"./src/common/dcp-error.js\");\nconst debugging = (__webpack_require__(/*! dcp/debugging */ \"./src/debugging.js\").scope)('dcp-client');\nconst kvin = __webpack_require__(/*! kvin */ \"./node_modules/kvin/kvin.js\");\nlet tunedKvin;\n\nconst TextEncoder = getTextEncoder();\n\nconst log = (...args) => {\n if (debugging('job')) {\n console.debug('dcp-client:job', ...args);\n }\n};\n\nconst ON_BROWSER = DCP_ENV.isBrowserPlatform;\nconst sideloaderModuleIdentifier = 'sideloader-v1';\n\n// Symbols used to hide private members and functions on the Job instance\nconst debugBuild = ((__webpack_require__(/*! dcp/common/dcp-build */ \"./src/common/dcp-build.js\").build) === 'debug');\nconst INTERNAL_SYMBOL = debugBuild ? '_' : Symbol('Job Internals');\nconst SNAPSHOT = debugBuild ? '_snapshot' : Symbol('Job.snapshot');\nconst DEPLOY_JOB = debugBuild ? '_deploy' : Symbol('Job.deploy');\n\nconst ADD_LISTENERS = Symbol('Job.addListeners');\nconst LISTEN_TO_EVENTS = Symbol('Job.listenToEvents');\nconst LISTEN_TO_WORK_EVENTS = Symbol('Job.listenToWorkEvents');\nconst ON_RESULT = Symbol('Job.onResult');\nconst ON_STATUS = Symbol('Job.onStatus');\n\nexports.JOB_INTERNAL_SYMBOL = INTERNAL_SYMBOL; /* allow friends to access our guts, eg. job/result-handle */\n\nconst DEFAULT_REQUIREMENTS = {\n engine: {\n es7: null,\n spidermonkey: null\n },\n environment: {\n webgpu: null,\n offscreenCanvas: null,\n fdlibm: null\n },\n browser: {\n chrome: null\n },\n details: {\n offscreenCanvas: {\n bigTexture4096: null,\n bigTexture8192: null,\n bigTexture16384: null,\n bigTexture32768: null,\n }\n },\n discrete: null,\n useStrict: null,\n};\nconst ZERO_COST_HOLD_ADDRESS = '0x' + '0'.repeat(130);\n\n/** @typedef {import('../range-object').RangeLike} RangeLike */\n\n/**\n * Ensure input data is an appropriate format\n * @param {RangeObject | DistributionRange | RemoteDataSet | Array | Iterable}\n * inputData - A URI-shaped string, a [Multi]RangeObject-constructing value, or\n * an array of slice data\n * @return {RangeObject | RangeLike | DistributionRange | RemoteDataSet | Array}\n * The coerced input in an appropriate format ([Multi]RangeObject,\n * DistributionRange, RemoteDataSet, or array)\n */\nconst wrangleData = (inputData) => {\n if (typeof inputData === 'object' && !!inputData.ranges) { return new MultiRangeObject(inputData) }\n\n if (RangeObject.isRangelike(inputData)) { return inputData }\n if (RangeObject.isRangeObject(inputData)) { return inputData }\n if (DistributionRange.isDistribution(inputData)) { return inputData }\n if (RangeObject.isProtoRangelike(inputData)) { return new RangeObject(inputData) }\n if (DistributionRange.isProtoDistribution(inputData)) { return new DistributionRange(inputData) }\n if (RemoteDataSet.isRemoteDataSet(inputData)) { return inputData }\n if (RemoteDataPattern.isRemoteDataPattern(inputData)) { return inputData }\n\n return Array.isArray(inputData) ? inputData : [inputData];\n};\n\n// Used to validate the requirements object,\n// applies the default requirements schema\nconst applyObjectSchema = (obj, schema, errContext='', scope='') => {\n let checkedObjs = [];\n\n for (let p in schema) {\n let fullPropScope = scope.concat(p);\n if (!(p in obj)) {\n if (typeof schema[p] === 'object' && schema[p] !== null) {\n obj[p] = {};\n checkedObjs.push(fullPropScope);\n applyObjectSchema(obj[p], schema[p], errContext, fullPropScope.concat('.'));\n } else obj[p] = schema[p];\n } else if (typeof schema[p] === 'object' && schema[p] !== null && !checkedObjs.includes(fullPropScope)) {\n if (typeof obj[p] !== 'object') throw new Error(`${errContext}: Schema mismatch, property '${fullPropScope}' should be an object.`);\n else {\n checkedObjs.push(fullPropScope);\n applyObjectSchema(obj[p], schema[p], errContext, fullPropScope.concat('.'));\n }\n } else if ((typeof schema[p] !== 'object' || schema[p] === null)\n && typeof obj[p] !== 'boolean' && obj[p] !== null) {\n throw new Error(`${errContext}: Schema mismatch, object property '${fullPropScope}' should be a boolean.`);\n }\n }\n\n for (let p in obj) {\n let fullPropScope = scope.concat(p);\n if (!(p in schema)) throw new Error(`${errContext}: Schema mismatch, object has extra key '${fullPropScope}'.`);\n else if (typeof obj[p] === 'object' && obj[p] !== null && !checkedObjs.includes(fullPropScope)) {\n checkedObjs.push(fullPropScope);\n applyObjectSchema(obj[p], schema[p], errContext, fullPropScope.concat('.'));\n }\n }\n}\n\n/**\n * @classdesc The Compute API's Job Handle (see {@link https://docs.dcp.dev/specs/compute-api.html#job-handles|Compute API spec})\n * Job handles are objects which correspond to jobs. \n * They are created by some exports of the compute module, such as {@link module:dcp/compute.do|compute.do} and {@link module:dcp/compute.for|compute.for}.\n * @extends module:dcp/dcp-events.PropagatingEventEmitter\n * @hideconstructor\n * @access public\n */\nclass Job extends PropagatingEventEmitter {\n /**\n * This event is emitted when the job is accepted by the scheduler on deploy.\n * \n * @event Job#accepted\n * @access public\n * @type {object}\n * @property {object} job Original object that was delivered to the scheduler for deployment\n *//**\n * Fired when the job is cancelled.\n * \n * @event Job#cancel\n * @access public\n *//**\n * Fired when a result is returned.\n * \n * @event Job#result\n * @access public\n * @type {object}\n * @property {string} jobAddress Address of the job\n * @property {string} task ID of the task (slice) the result came from\n * @property {number} sort The index of the slice\n * @property {object} result\n * @property {string} result.request\n * @property {*} result.result The value returned from the work function\n *//**\n * Fired when the result handle is modified, either when a new `result` event is fired or when the results are populated with `results.fetch()`\n * \n * @event Job#resultsUpdated\n * @access public\n *//**\n * Fired when the job has been completed.\n * \n * @event Job#complete\n * @access public\n * @type {ResultHandle}\n *//**\n * Fired when the job's status changes.\n * \n * @event Job#status\n * @access public\n * @type {object}\n * @property {string} jobAddress Address of the job\n * @property {number} total Total number of slices in the job\n * @property {number} distributed Number of slices that have been distributed\n * @property {number} computed Number of slices that have completed execution (returned a result)\n * @property {string} runStatus Current runStatus of the job\n *//**\n * Fired when a slice throws an error.\n * \n * @event Job#error\n * @access public\n * @type {object}\n * @property {string} jobAddress Address of the job\n * @property {number} sliceIndex Index of the slice that threw the error\n * @property {string} message The error message\n * @property {string} stack The error stacktrace\n * @property {string} name The error type name\n *//**\n * Fired when a slice uses one of the console log functions.\n * \n * @event Job#console\n * @access public\n * @type {object}\n * @property {string} jobAddress Address of the job\n * @property {number} sliceIndex The index of the slice that produced this event\n * @property {string} level The log level, one of `debug`, `info`, `log`, `warn`, or `error`\n * @property {string} message The console log message\n *//**\n * Fired when a slice is stopped for not calling progress. Contains information about how long the slice ran for, and about the last reported progress calls.\n * \n * @event Job#noProgress\n * @access public\n * @type {object}\n * @property {string} jobAddress Address of the job\n * @property {number} sliceIndex The index of the slice that failed due to no progress\n * @property {number} timestamp How long the slice ran before failing\n * @property {object} progressReports\n * @property {object} progressReports.last The last progress report received from the worker\n * @property {number} progressReports.last.timestamp Time since the start of the slice\n * @property {number} progressReports.last.progress Progress value reported\n * @property {*} progressReports.last.value The last value that was passed to the progress function\n * @property {number} progressReports.last.throttledReports Number of calls to progress that were throttled since the last report\n * @property {object} progressReports.lastUpdate The last determinate (update to the progress param) progress report received from the worker\n * @property {number} progressReports.lastUpdate.timestamp\n * @property {number} progressReports.lastUpdate.progress\n * @property {*} progressReports.lastUpdate.value\n * @property {number} progressReports.lastUpdate.throttledReports\n *//**\n * Identical to `noProgress`, except that it also contains the data that the slice was executed with.\n * \n * @event Job#noProgressData\n * @access public\n * @type {object}\n * @property {*} data The data that the slice was executed with\n *//**\n * Fired when the job is paused due to running out of funds. The job can be resumed by escrowing more funds then resuming the job.\n * \n * Event payload is the estimated funds required to complete the job\n * \n * @event Job#ENOFUNDS\n * @access public\n * @type {BigNumber}\n *//**\n * Fired when the job is cancelled due to the work function not calling the `progress` method frequently enough.\n * \n * @event Job#ENOPROGRESS\n * @access public\n *//**\n * The job was cancelled because scheduler has determined that individual tasks in this job exceed the maximum allowable execution time.\n * \n * @event Job#ESLICETOOSLOW\n * @access public\n *//**\n * Fired when the job is cancelled because too many work functions are terminating with uncaught exceptions.\n * \n * @event Job#ETOOMANYERRORS\n * @access public\n */\n\n /**\n * @form1 new Job(job_shaped_object)\n * @form2 new Job('application_worker_address'[, data[, arguments]])\n * @form3b new Job('worker source'[, data[, arguments]])\n * @form3b new Job(worker_function[, data[, arguments]])\n */\n\n constructor() {\n super('Job');\n\n this.readyStateChange = (readyState) => {\n this.readyState = readyState;\n this.emit('readyStateChange', this.readyState);\n };\n this.readyStateChange(sliceStatus.new);\n \n /*\n * Private members\n */\n this[INTERNAL_SYMBOL] = {\n events: new EventEmitter('Job Internal'),\n connected: false, // set to true after first call to exec\n /**\n * This object holds details for generating DCPv4 messages about this job.\n * It is updated everytime we call SNAPSHOT.\n */\n payloadDetails: {\n localExec: false,\n },\n\n /**\n * The slicePaymentOffer default value is set to compute.marketValue, in .exec() \n */\n slicePaymentOffer: null,\n paymentAccountKeystore: null,\n\n /**\n * These are private but getters are provided so they can be modified but\n * not replaced.\n */\n /**\n * List of module prefixes using in CommonJS module resolution.\n * @type {string[]}\n */\n requirePath: [],\n\n /**\n * List of modules the job needs.\n * @type {string[]}\n */\n\n dependencies: [],\n\n // This array contains the names of worker events that\n // had listeners registered before exec is called, once\n // the job has been deployed then the proper event handlers\n // will be generated from this list\n subscribedEvents: new Set(),\n subscribedWorkerEvents: new Set(),\n\n results: [],\n resultsAvailable: [],\n resultStorageType: 'values',\n resultStorageDetails: undefined,\n resultStorageParams: undefined, //Holds the POST params and URL for off-prem storage\n\n // Tracks job progress\n status: {\n runStatus: null,\n total: null,\n distributed: null,\n computed: null,\n },\n\n // Cancel is special. We need to fire an `alert` when the job is canceled. \n // If they are listening for the (reliable) event then they need to be able to\n // prevent it. If not, then it'll be handled by the `exec` rejection via the 'stopped'\n // event. The result is that we want only one of two ways the `alert` can be fired\n // to be active based on whether or not the user is listening for cancel. \n // Once DCP-1150 lands, we won't need to listen on stopped since more failures will fire a cancel event.\n listeningForCancel: false,\n // TODO - cancel events should have more info in them. DCP-1150\n cancelAlert: () => ClientModal.alert(\"More details in console...\", {title: 'Job Canceled'}),\n\n listeningForError: false,\n errorAlert: (err) => ClientModal.alert(err, {title: 'Unexpected Error'}),\n\n listeningForNoFunds: false,\n noFundsAlert: (event) => {\n let msg = `Job \"${event.name}\" is paused due to insufficient funds. ${event.fundsRequired} DCC is required to compute remaining ${event.remainingSlices} slices.\\njobId: ${event.job}\\nbankAccount: ${event.bankAccount}`; \n ClientModal.alert(msg, { title: 'Job paused' })\n },\n };\n\n /*\n * Public members\n */\n // Deep copy the default requirements. I know, I hate it too\n /**\n * An object describing the requirements that workers must have to be eligible for this job. See\n * {@link https://docs.dcp.dev/specs/compute-api.html#requirements-objects|Requirements Objects}.\n *\n * @type {object}\n * @access public\n */\n this.requirements = JSON.parse(JSON.stringify(DEFAULT_REQUIREMENTS));\n this.schedulerURL = undefined;\n this.bankURL = undefined;\n this.deployURL = '';\n this.collateResults = true;\n this.listeningForResults = false;\n /**\n * @see {@link https://kingsds.atlassian.net/browse/DCP-1475?atlOrigin=eyJpIjoiNzg3NmEzOWE0OWI4NGZkNmI5NjU0MWNmZGY2OTYzZDUiLCJwIjoiaiJ9|Jira Issue}\n */\n let uuid = uuidv4();\n\n /**\n * An object describing the cost the user believes each the average slice will incur, in terms of CPU/GPU and I/O.\n * If defined, this object is used to provide initial scheduling hints and to calculate escrow amounts.\n *\n * @type {object}\n * @access public\n */\n this.initialSliceProfile = undefined;\n\n /**\n * A place to store public-facing attributes of the job. Anything stored on this object will be available inside the work \n * function (see {@link module:dcp/compute~sandboxEnv.work}). The properties documented here may be used by workers to display what jobs are currently being \n * worked on.\n * @access public\n * @property {string} name Public-facing name of this job.\n * @property {string} description Public-facing description for this job.\n * @property {string} link Public-facing link to external resource about this job.\n */\n this.public = {\n name: null,\n description: null,\n link: null,\n };\n\n this.contextId = null;\n this.force100pctCPUDensity = false;\n this.workerConsole = false;\n this.isCI = false;\n\n // The following 3 public members are only populated after the job has been deployed\n this.address = null;\n this.receipt = null;\n this.meanSliceProfile = null;\n\n /**\n * A number (can be null, undefined, or infinity) describing the estimationSlicesRemaining in the jpd (dcp-2593)\n * @type {number}\n * @access public\n */\n this.estimationSlices = undefined;\n \n /**\n * tunable parameters per job\n * @access public\n * @param {object} tuning \n * @param {string} tuning.kvin Encode the TypedArray into a string, trying multiple methods to determine optimum \n * size/performance. The this.tune variable affects the behavior of this code this:\n * @param {boolean} speed If true, only do naive encoding: floats get represented as byte-per-digit strings\n * @param {boolean} size If true, try the naive, ab8, and ab16 encodings; pick the smallest\n * If both are false try the naive encoding if under typedArrayPackThreshold and use if smaller\n * than ab8; otherwise, use ab8\n */\n this.tuning = {\n kvin: {\n size: false,\n speed: false,\n },\n }\n\n /**\n * When true, allows a job in estimation to have requestTask return multiple estimation slices.\n * This flag applies independent of infinite estimation, viz., this.estimationSlices === null .\n * @type {boolean}\n * @access public\n */\n this.greedyEstimation = false;\n\n /* We avoid using job.id internally because it is easy to confuse with db::jobs.id, but is an API\n * interface that we present to end-user developers so we need to keep it.\n */\n Object.defineProperty(this, 'id', {\n get: () => this.address,\n set: (id) => this.address = id\n });\n\n // \n /**\n * An EventEmitter for custom events dispatched by the work function.\n * @type {module:dcp/dcp-events.EventEmitter}\n * @access public\n * @example\n * // in sandbox\n * work.emit('myEventName', 1, [2], \"three\");\n * // clientside\n * job.work.on('myEventName', (num, arr, string) => { });\n */\n this.work = new EventEmitter('job.work');\n\n //Initialize the eventSubscriber so each job has unique eventSubscriber\n this.eventSubscriber = new EventSubscriber(this);\n \n // Some events can't be emitted 'naturally' without having weird/wrong output.\n // An example of this is results. When results are returned from the scheduler,\n // They come in as a dataURI of kvin-ified results. We need to parse all that before\n // We actually send it to the client. For such events, we will intercept them, parse\n // them as needed, then emit the event with the 'fixed' data to the client.\n \n const ceci = this\n const parseConsole = function deserializeConsoleMessage(ev) {\n ceci.emit('console', ev);\n }\n \n this.eventIntercepts = {\n result: (ev) => this[ON_RESULT](ev),\n status: (ev) => this[ON_STATUS](ev),\n cancel: (ev) => this[INTERNAL_SYMBOL].events.emit('stopped', ev),\n console: parseConsole,\n }\n\n this.eventTypes = (__webpack_require__(/*! dcp/common/dcp-events */ \"./src/common/dcp-events/index.js\").eventTypes);\n\n this.work.on('newListener', (evt) => {\n if (!this[INTERNAL_SYMBOL].connected && evt !== 'newListener') {\n this[INTERNAL_SYMBOL].subscribedWorkerEvents.add(evt);\n }\n });\n\n this.on('newListener', (evt) => {\n if (!this[INTERNAL_SYMBOL].connected && evt !== 'newListener') {\n this[INTERNAL_SYMBOL].subscribedEvents.add(evt);\n }\n });\n // Form1: If arguments[0] is an object that looks like a job, this is a 'copy constructor'\n // where we inherit as much as possible from the original instance.\n if (typeof arguments[0] === 'object' &&\n arguments[0].type &&\n arguments[0].data &&\n arguments[0].public) {\n \n let src = arguments[0];\n\n this[INTERNAL_SYMBOL].payloadDetails = {\n ...src,\n data: wrangleData(src.data), // rehydrate ranges\n };\n\n if (src.feeStructure) {\n this.setSlicePaymentOffer(src.feeStructure);\n }\n \n if (src.address)\n this.address = src.address;\n if (src.payloadData.status)\n this[ON_STATUS](this[INTERNAL_SYMBOL].payload.status, false);\n if (src.public)\n Object.assign(this.public, src.public);\n } else {\n /* Forms 2-4 */ \n if (typeof arguments[0] === 'function')\n arguments[0] = arguments[0].toString();\n\n if (typeof arguments[0] === 'string') {\n const { encodeDataURI } = __webpack_require__(/*! dcp/utils */ \"./src/utils/index.js\");\n this[INTERNAL_SYMBOL].workFunctionURI = encodeDataURI(arguments[0], 'application/javascript');\n } else if (DcpURL.isURL(arguments[0])) {\n this[INTERNAL_SYMBOL].workFunctionURI = arguments[0].href;\n } \n\n const wrangledInputData = wrangleData(arguments[1] || []);\n const wrangledArguments = wrangleData(arguments[2] || []);\n \n log('wrangledInputData:', wrangledInputData);\n log('wrangledArguments:', wrangledArguments);\n \n Object.assign(this[INTERNAL_SYMBOL].payloadDetails, {\n request: 'main',\n data: wrangledInputData,\n arguments: wrangledArguments,\n });\n }\n\n // This should happen last, it depends on the this.[INTERNAL_SYMBOL].payloadDetails.data array\n /**\n * A Result Handle object used to query and manipulate the output set. \n * Present once job has been deployed.\n * @type {ResultHandle}\n * @access public\n */\n this.results = new ResultHandle(this);\n\n /**\n * Read-only copy of the job's uuid (generated or rehydrated via form1 constructor)\n */\n Object.defineProperty(this, 'uuid', {\n get: () => uuid,\n configurable: false,\n enumerable: true,\n });\n \n // each entry contains the computeGroupID, joinKey, joinSecret, joinKeystore\n // Add to public compute group by default\n this.computeGroups = [ Object.assign({}, schedulerConstants.computeGroups.public) ];\n\n\n // Initialize to null so these properties are recognized for the Job class\n this.bankConnection = null;\n this.deployConnection = null;\n this.openBankConn = function openBankConn()\n {\n ceci.bankConnection = new protocolV4.Connection(dcpConfig.bank.services.bankTeller);\n ceci.bankConnection.on('close', ceci.openBankConn);\n }\n\n this.openDeployConn = function openDeployConn()\n {\n ceci.deployConnection = new protocolV4.Connection(dcpConfig.scheduler.services.jobSubmit);\n ceci.deployConnection.on('close', ceci.openDeployConn);\n }\n\n this.openBankConn();\n this.openDeployConn();\n }\n\n /** \n * Cancel the job\n * @access public\n * @param {string} reason If provided, will be sent to client\n */\n async cancel (reason = undefined) {\n const response = await this.deployConnection.send('cancelJob', {\n job: this.address,\n owner: this.paymentAccountKeystore.address,\n reason,\n }, this.paymentAccountKeystore);\n\n return response.payload;\n }\n\n /** \n * Resume this job\n * @access public\n */\n async resume() {\n const response = await this.schedulerConnection.send('resumeJob', {\n job: this.address,\n owner: this.paymentAccountKeystore.address,\n }, this.paymentAccountKeystore);\n\n return response.payload;\n }\n\n /**\n * Helper function for retrieving info about the job. The job must have already been deployed.\n * An alias for {@link module:dcp/compute.getJobInfo}.\n * @access public\n */\n async getJobInfo() {\n return await (__webpack_require__(/*! ../compute */ \"./src/dcp-client/compute.js\").compute.getJobInfo)(this.address);\n }\n\n /**\n * Helper function for retrieving info about the job's slices. The job must have already been deployed.\n * An alias for {@link module:dcp/compute.getSliceInfo}.\n * @access public\n */\n async getSliceInfo() {\n return await (__webpack_require__(/*! ../compute */ \"./src/dcp-client/compute.js\").compute.getSliceInfo)(this.address);\n }\n \n /**\n * Helper function that tries to upload slicePile to scheduler for the job with the given address\n * If the connection throws, we will continue trying to upload until it has thrown errorTolerance times\n * However, if the upload is unsuccessful, we throw immediately.\n * @param {object} pileMessage \n * @returns payload containing success property (pertaining to success of adding slices to job) as well as lastSliceNumber of job \n */\n async safeSliceUpload(pileMessage)\n {\n let payload = undefined;\n let errorTolerance = dcpConfig.job.sliceUploadErrorTolerance; // copy number of times we will tolerate non-success when uploading slices directly from config\n\n while (true) // eslint-disable-line no-constant-condition\n {\n try\n {\n const start = Date.now();\n this.emit('x-dbg-uploadStart', pileMessage.signedMessage.length);\n payload = await this.deployConnection.sendPreparedMessage(pileMessage);\n if (!payload.success) {\n this.emit('x-dbg-uploadBackoff', pileMessage.signedMessage.length);\n throw new DCPError('Cannot upload slice data to scheduler','EUPLOADSCHED');\n }\n else {\n this.emit('x-dbg-uploadProgress', Date.now() - start);\n break;\n }\n }\n catch (error)\n {\n if (--errorTolerance <= 0) {\n this.emit('x-dbg-uploadError', error);\n throw error;\n }\n }\n }\n return payload;\n }\n \n /**\n * This function contains the actual logic behind staggered slice uploads\n * to the scheduler which makes quicker deployment possible.\n * \n * Note that we pass in mostToTake so that the uploadLogic function can update \n * it to the new value it needs to be, and then pass it back to the wrapper \n * function (addSlices) which actually does the work of picking up slices \n * and thus uses this value\n * @param {Array} pile the actual array of slices being uploaded to scheduler\n * @param {Number} mostToTake number of slices that should be taken by the wrapper function (addSlices) \n * which actually does the work of picking up slices and thus uses this value.\n * We pass in mostToTake so that the uploadLogic function can update it to the \n * new value it needs to be, and then pass it back to the wrapper\n * @returns payload containing success property (pertaining to success of adding slices to job) as well as lastSliceNumber of job\n */\n async sliceUploadLogic(pile, mostToTake)\n {\n const slicesTaken = pile.length;\n await this.deployConnection.keepalive();\n let pileMessage = await this.deployConnection.prepare('addSliceData', {\n job: this.address,\n dataValues: kvinMarshal(pile),\n });\n \n let pileSize = pileMessage.signedMessage.length;\n \n let newMostToTake;\n let uploadedSlices;\n\n // if the pile is larger than the ceiling but we only took one slice, there's no smaller pile we can make\n // so we upload it anyway but we don't try taking more next time cause we were over the ceiling (which \n // is a hard limit on upload sizes)\n if ((pileSize > dcpConfig.job.uploadSlicesCeiling) && (slicesTaken === 1))\n {\n uploadedSlices = await this.safeSliceUpload(pileMessage);\n newMostToTake = 1;\n }\n \n // if the pile is larger than the target but we only took one slice, there's no smaller pile we can make\n // so we upload it anyway and still try taking more\n else if ((pileSize > dcpConfig.job.uploadSlicesTarget) && (slicesTaken === 1))\n {\n uploadedSlices = await this.safeSliceUpload(pileMessage);\n newMostToTake = mostToTake * dcpConfig.job.uploadIncreaseFactor;\n }\n \n // otherwise, if the pile is smaller than the soft ceiling, send up the pile anyway (since piles are expensive to make) \n // but remember to include incrementFactor times as many slices in the next pile\n else if (pileSize <= dcpConfig.job.uploadSlicesTarget)\n {\n uploadedSlices = await this.safeSliceUpload(pileMessage);\n newMostToTake = mostToTake * dcpConfig.job.uploadIncreaseFactor;\n }\n \n // if the pile is over the ceiling then we do not upload and begin reassembling our piles from scratch\n else if (pileSize > dcpConfig.job.uploadSlicesCeiling)\n {\n newMostToTake = -1;\n }\n \n // if the pile is over the target (but implicitly under the ceiling), then upload the pile to scheduler but lower mostToTake\n // by a smaller factor than incrementFactor to allow us to begin \"centering\" sizes of piles around the target\n else if (pileSize > dcpConfig.job.uploadSlicesTarget)\n {\n uploadedSlices = await this.safeSliceUpload(pileMessage);\n newMostToTake = Math.ceil(mostToTake / ((2 / 3) * dcpConfig.job.uploadIncreaseFactor));\n }\n\n if (uploadedSlices && uploadedSlices.success && typeof uploadedSlices.payload.lastSliceNumber !== 'undefined')\n // must check if uploadedSlices exists first since if pileSize > ceiling then there will be no uploadedSlices\n this.status.total = uploadedSlices.payload.lastSliceNumber;\n\n let payload = uploadedSlices ? uploadedSlices.payload : undefined;\n return { payload, newMostToTake }; // in case the user needs lastSliceNumber's value\n }\n \n /**\n * Uploads slices to the scheduler in a staggered fashion\n * @param {Array} dataValues actual array of slices being uploaded to scheduler\n * @returns payload containing success property (pertaining to success of adding slices to job) as well as lastSliceNumber of job\n */\n async addSlices(dataValues)\n {\n if (!Array.isArray(dataValues))\n throw new TypeError('Only data-by-value jobs may dynamically add slices');\n\n let mostToTake = 1; // maximum number of slices we could take in per pile\n let payload = undefined; // used in return value\n let slicesTaken = 0; // number of slices in the pile already\n let pile = [];\n \n for (let slice of dataValues)\n {\n pile.push(slice);\n slicesTaken++;\n if (slicesTaken === mostToTake)\n {\n let total = await this.sliceUploadLogic(pile, mostToTake);\n payload = total.payload;\n \n if (total.newMostToTake < 0)\n {\n /* if total.newMostToTake == -1 (only non-positive value returned), then the pile was not successfully\n * uploaded because it was over the ceiling and we need to upload the pile *itself* again, recursively\n */\n payload = await this.addSlices(pile);\n /* and next time, the number of slices we take is the number from this time *divided* by the incrementFactor\n * since we know invariably that number of slices was under the ceiling AND target\n * if you're curious why that's an invariant, this is because mostToTake only ever *increases* by being multiplied by \n * a factor of incrementFactor within sliceUploadLogic, and this only occurs when the pile being uploaded that time\n * was under the target\n */\n mostToTake = mostToTake / dcpConfig.job.uploadIncreaseFactor;\n }\n else\n {\n /* in all other cases (other than the pile size being over the ceiling) the sliceUploadLogic helper \n * determines the number of slices we should pick up next time, so we just use the value it spits out\n */\n mostToTake = total.newMostToTake;\n }\n \n // reset slicesTaken and pile since at this point we know for sure the pile has been uploaded\n pile = [];\n slicesTaken = 0;\n }\n else\n {\n continue;\n }\n }\n // upload the pile one last time in case we continued off the last slice with a non-empty pile\n if (pile.length !== 0)\n {\n let finalObj = await this.sliceUploadLogic(pile, mostToTake);\n payload = finalObj.payload;\n mostToTake = finalObj.newMostToTake;\n \n if (mostToTake < 0)\n {\n // if you need documentation on the next two lines, look inside the if (total.newMostToTake < 0) just above\n payload = await this.addSlices(pile);\n mostToTake = mostToTake / dcpConfig.job.uploadIncreaseFactor;\n }\n }\n\n // and finally assign whatever mostToTake was at the end of this run of the function to be returned \n // as part of the payload in case addSlices was called recursively\n payload.mostToTake = mostToTake;\n \n /* contains the job's lastSliceNumber (the only externally-meaningful value returned from \n * the uploading of slices to the scheduler) in case the calling function needs it \n */\n return payload;\n }\n\n /**\n * job.snapshot(): Private function used to populate the payloadDetails from private data,\n * inferred data, etc. Once this function has run, the payloadDetails are\n * considered authoritatively up to date until the calling function returns\n * or awaits.\n */\n [SNAPSHOT]() {\n const pd = this[INTERNAL_SYMBOL].payloadDetails;\n\n pd.type = 'ad-hoc'; /* @todo implement appliances */\n pd.uuid = this.uuid;\n pd.workFunctionURI = this[INTERNAL_SYMBOL].workFunctionURI;\n pd.dependencies = this[INTERNAL_SYMBOL].dependencies;\n pd.requirePath = this[INTERNAL_SYMBOL].requirePath;\n pd.modulePath = this[INTERNAL_SYMBOL].modulePath;\n pd.resultStorageType = this[INTERNAL_SYMBOL].resultStorageType;\n pd.resultStorageDetails = this[INTERNAL_SYMBOL].resultStorageDetails;\n pd.resultStorageParams = this[INTERNAL_SYMBOL].resultStorageParams;\n pd.force100pctCPUDensity = this[INTERNAL_SYMBOL].force100pctCPUDensity;\n\n pd.requirements = this.requirements;\n applyObjectSchema(pd.requirements, DEFAULT_REQUIREMENTS, 'Requirements Object');\n \n // @todo: 'figure this out' - wise words from eddie /mp jan 2019\n if (!pd.options) { pd.options = {}; }\n if (!pd.public) { pd.public = {}; } \n\n for (let p of ['name', 'description', 'link']) {\n if (typeof this.public[p] === 'string') {\n pd.public[p] = this.public[p];\n }\n }\n\n // The max value that the client is willing to spend to deploy\n // (list on the scheduler, doesn't include compute payment)\n // maxDeployPayment is the max the user is willing to pay to DCP (as a\n // Hold), in addition to the per-slice offer and associated scrape.\n // Currently calculated as `deployCost = costPerKB *\n // (JSON.stringify(job).length / 1024) // 1e-9 per kb`\n // @todo: figure this out / er nov 2018\n pd.maxDeployPayment = 1;\n \n // payloadDetails.timing can be provided as an initial estimate of slice time, to\n // give a more useful useful calculated heap value (heap.value is more or less\n // dcc-per-millisecond)\n pd.timing = pd.timing || 1; \n }\n\n /** Escrow additional funds for this job\n * @access public\n * @param {number|BigNumber} fundsRequired - A number or BigNumber instance representing the funds to escrow for this job\n */\n async escrow (fundsRequired) {\n if ((typeof fundsRequired !== 'number' && !BigNumber.isBigNumber(fundsRequired))\n || fundsRequired <= 0 || !Number.isFinite(fundsRequired) || Number.isNaN(fundsRequired)) {\n throw new Error(`Job.escrow: fundsRequired must be a number greater than zero. (not ${fundsRequired})`);\n }\n\n const response = await this.bankConnection.send('embiggenFeeStructure', {\n feeStructureAddress: this[INTERNAL_SYMBOL].payloadDetails.feeStructureId,\n additionalEscrow: new BigNumber(fundsRequired),\n fromAddress: this.paymentAccountKeystore.address,\n }, this.paymentAccountKeystore);\n\n this.receipt = response.payload;\n\n return this.receipt;\n }\n\n async _pack () {\n var retval = (__webpack_require__(/*! ./node-modules */ \"./src/dcp-client/job/node-modules.js\").createModuleBundle)(this[INTERNAL_SYMBOL].dependencies);\n return retval;\n }\n\n /** \n * Collect all of the dependencies together, throw them into a BravoJS\n * module which sideloads them as a side effect of declaration, and transmit\n * them to the package manager. Then we return the package descriptor object,\n * which is guaranteed to have only one file in it.\n *\n * @returns {object} with properties name and files[0]\n */\n async _publishLocalModules() {\n const { tempFile, hash, unresolved } = await this._pack();\n\n if (!tempFile) {\n return { unresolved };\n }\n\n const sideloaderFilename = tempFile.filename;\n const pkg = {\n name: `dcp-pkg-v1-localhost-${hash.toString('hex')}`,\n version: '1.0.0',\n files: {\n [sideloaderFilename]: `${sideloaderModuleIdentifier}.js`,\n },\n }\n\n await dcpPublish.publish(pkg);\n tempFile.remove();\n\n return { pkg, unresolved };\n }\n\n /**\n * Deploys the job to the scheduler.\n * @param {number | object} [slicePaymentOffer=compute.marketValue] - Amount\n * in DCC that the user is willing to pay per slice.\n * @param {Keystore} [paymentAccountKeystore=wallet.get] - An instance of the\n * Wallet API Keystore that's used as the payment account when executing the\n * job.\n * @param {object} [initialSliceProfile] - An object describing the cost the\n * user believes the average slice will incur.\n * @access public\n * @emits Job#accepted\n */\n async exec(slicePaymentOffer = (__webpack_require__(/*! ../compute */ \"./src/dcp-client/compute.js\").compute.marketValue), paymentAccountKeystore, initialSliceProfile) {\n if (this[INTERNAL_SYMBOL].connected) {\n throw new Error('Exec called twice on the same job handle.');\n }\n\n if (this.estimationSlices === Infinity)\n this[INTERNAL_SYMBOL].payloadDetails.estimationSlices = null;\n else if (this.estimationSlices <= 0)\n throw new Error('Incorrect value for estimationSlices; it can be an integer or Infinity!');\n else\n this[INTERNAL_SYMBOL].payloadDetails.estimationSlices = this.estimationSlices;\n\n this[INTERNAL_SYMBOL].payloadDetails.greedyEstimation = this.greedyEstimation;\n\n if(this.tuning.kvin.speed || this.tuning.kvin.size)\n {\n tunedKvin = new kvin.KVIN();\n tunedKvin.tune = 'size';\n if(this.tuning.kvin.speed)\n tunedKvin.tune = 'speed';\n // If both size and speed are true, kvin will optimize based on speed\n if(this.tuning.kvin.speed && this.tuning.kvin.size)\n console.log('Slices and arguments are being uploaded with speed optimization.');\n }\n\n /* eagerly connect to depedent services for better performance */\n this.eventSubscriber.eventRouterConnection.keepalive();\n this.deployConnection.keepalive();\n\n this.readyStateChange('exec');\n\n if (typeof slicePaymentOffer !== 'undefined') this.setSlicePaymentOffer(slicePaymentOffer);\n if (typeof initialSliceProfile !== 'undefined') this.initialSliceProfile = initialSliceProfile;\n if (typeof paymentAccountKeystore !== 'undefined') {\n /** XXX @todo deprecate use of ethereum wallet objects */\n if (typeof paymentAccountKeystore === 'object' && paymentAccountKeystore.hasOwnProperty('_privKey')) {\n console.warn('* deprecated API * - job.exec invoked with ethereum wallet object as paymentAccountKeystore') /* /wg oct 2019 */\n paymentAccountKeystore = paymentAccountKeystore._privKey\n }\n /** XXX @todo deprecate use of private keys */\n if (wallet.isPrivateKey(paymentAccountKeystore)) {\n console.warn('* deprecated API * - job.exec invoked with private key as paymentAccountKeystore') /* /wg dec 2019 */\n paymentAccountKeystore = await new wallet.Keystore(paymentAccountKeystore, '');\n }\n\n this.setPaymentAccountKeystore(paymentAccountKeystore)\n }\n\n // Unlock\n if (this[INTERNAL_SYMBOL].paymentAccountKeystore) {\n // Throws if they fail to unlock, we allow this since the keystore was set programmatically. \n await this[INTERNAL_SYMBOL].paymentAccountKeystore.unlock(undefined, parseFloat(dcpConfig.job.maxDeployTime));\n } else {\n // If not set programmatically, we keep trying to get an unlocked keystore ... forever.\n let locked = true;\n let safety = 0; // no while loop shall go unguarded\n let ks;\n do {\n ks = null;\n // custom message for the browser modal to denote the purpose of keystore submission\n let msg = `This application is requesting a keystore file to execute ${this.public.description || this.public.name || 'this job'}. Please upload the corresponding keystore file. If you upload a keystore file which has been encrypted with a passphrase, the application will not be able to use it until it prompts for a passphrase and you enter it.`;\n try {\n ks = await wallet.get({ contextId: this.contextId, jobName: this.public.name, msg});\n } catch (e) {\n if (e.code !== ClientModal.CancelErrorCode) throw e;\n };\n if (ks) {\n try {\n await ks.unlock(undefined, parseFloat(dcpConfig.job.maxDeployTime));\n locked = false;\n } catch (e) {\n const expectedCodes = [wallet.unlockFailErrorCode, ClientModal.CancelErrorCode];\n if (!expectedCodes.includes(e.code)) throw e;\n }\n }\n if (safety++ > 1000) throw new Error('EINFINITY: job.exec tried wallet.get more than 1000 times.')\n } while (locked);\n this.setPaymentAccountKeystore(ks)\n }\n\n // We either have a valid keystore + password or we have rejected by this point.\n if (!this.slicePaymentOffer) {\n throw new Error('A payment profile must be assigned before executing the job');\n } else {\n let pd = this[INTERNAL_SYMBOL].payloadDetails;\n pd.feeStructure = this[INTERNAL_SYMBOL].slicePaymentOffer.toFeeStructure(pd.data.length);\n }\n\n if (!this.address) {\n try {\n this.readyStateChange('init');\n await this[DEPLOY_JOB]();\n this.readyStateChange('listeners');\n const listenersP = this[ADD_LISTENERS]();\n\n // localExec jobs are not entered in any compute group.\n if (!this[INTERNAL_SYMBOL].payloadDetails.localExec && this.computeGroups && this.computeGroups.length)\n {\n this.readyStateChange('compute-groups');\n\n // Add this job to its currently-defined compute groups (as well as public group, if included)\n let cgPayload;\n try {\n cgPayload = await computeGroups.addJobToGroups(this.address, this.computeGroups);\n debugging('dcp-client') && console.debug('job/index: addJobToGroups cgPayload', cgPayload);\n } catch (e) {\n debugging('dcp-client') && console.debug('job/index: addJobToGroups threw exception', e);\n cgPayload = false;\n }\n\n computeGroups.closeServiceConnection().catch((err) => {\n console.error('Warning: could not close compute groups service connection', err)\n });\n\n /* Could not put the job in any compute group, even though the user wanted it to run. Cancel the job. */\n if (cgPayload === false || cgPayload.length === 0)\n {\n await this.cancel('compute-groups::Unable to join any compute groups');\n throw new DCPError(`Access Denied::Failed to add job ${this.address} to any of the desired compute groups`, 'DCPL-1100');\n }\n }\n\n await listenersP;\n\n // Upload slice data after CGs, but before `deployed` readystate.\n // This way, work can begin on the first slices while continuing to\n // upload additional slice input data\n let data = this[INTERNAL_SYMBOL].dataValues;\n\n // if job data is by value then upload data to the scheduler in a staggered fashion\n if (Array.isArray(data)) {\n this.readyStateChange('uploading');\n\n await this.addSlices(data).then(() => {\n return this.close();\n });\n }\n\n this.readyStateChange('deployed');\n this.emit('accepted');\n } catch (error) {\n if (ON_BROWSER) {\n await ClientModal.alert(error, { title: 'Failed to deploy job!' });\n }\n\n throw error;\n }\n } else {\n await this[ADD_LISTENERS]();\n this.readyStateChange('reconnected');\n }\n \n this[ON_STATUS](this[INTERNAL_SYMBOL].payloadDetails.status);\n this[INTERNAL_SYMBOL].connected = true;\n\n return new Promise((resolve, reject) => {\n const onComplete = () => resolve(this.results);\n const onCancel = (event) => {\n /**\n * FIXME(DCP-1150): Remove this since normal cancel event is noisy\n * enough to not need stopped event too.\n */\n if (ON_BROWSER && !this[INTERNAL_SYMBOL].listeningForCancel)\n this[INTERNAL_SYMBOL].cancelAlert(event.reason);\n this.emit('cancel', event);\n\n let errorMsg = event.reason;\n if (event.error)\n errorMsg = errorMsg +`\\n Recent error message: ${event.error.message}`\n \n reject(new DCPError(errorMsg, event.code));\n };\n\n this[INTERNAL_SYMBOL].events.once('stopped', async (stopEvent) => {\n if (this.receivedStop)\n {\n // The result submitter will ensure the client receives the stop event through the event router\n // by repeatedly sending stop messages if it detects something might have gone wrong. Sometimes\n // this detection is 'overeager', causing multiple stop events to be sent by the result submitter.\n // If multiple are received, ignore all after the first one.\n return;\n }\n this.receivedStop = true;\n this.emit('stopped', stopEvent.runStatus);\n switch (stopEvent.runStatus) {\n case jobStatus.finished:\n if (this.collateResults) {\n let report = await this.getJobInfo();\n // fetch results for remain slices\n let fetchedSliceNumbers = this[INTERNAL_SYMBOL].resultsAvailable.reduce((a,e,i) => {\n if(e) a.push(i);\n return a;\n }, []);\n\n let allSliceNumbers = Array.from(Array(report.totalSlices)).map((e,i)=>i+1);\n let remainSliceNumbers = allSliceNumbers.filter( function(e) {\n return !fetchedSliceNumbers.includes(e);\n });\n\n if (remainSliceNumbers.length)\n {\n const promises = remainSliceNumbers.map(sliceNumber => this.results.fetch([sliceNumber], true));\n await Promise.all(promises);\n }\n }\n\n this.emit('complete', this.results);\n onComplete();\n break;\n case jobStatus.cancelled:\n onCancel(stopEvent);\n break;\n default:\n /**\n * Asserting that we should never be able to reach here. The only\n * scheduler events that should trigger the Job's 'stopped' event\n * are jobStatus.cancelled, jobStatus.finished, and sliceStatus.paused.\n */\n reject(\n new Error(\n `Unknown event \"${stopEvent.runStatus}\" caused the job to be stopped.`,\n ),\n );\n break;\n }\n });\n\n if (!this[INTERNAL_SYMBOL].payloadDetails.running) {\n const runStatus = this[INTERNAL_SYMBOL].payloadDetails.runStatus;\n this[INTERNAL_SYMBOL].events.emit('stopped', { runStatus });\n }\n }).finally(() => {\n const handleErr = (e) => {\n console.error('Error while closing job connection:');\n console.error(e);\n }\n\n // Create an async IIFE to not block the promise chain\n (async () => {\n //delay to let last few events to be received\n await new Promise((resolve) => setTimeout(resolve, 1000));\n \n // close all of the connections so that we don't cause node processes to hang.\n await this.eventSubscriber.close().catch(handleErr);\n this.deployConnection.off('close', this.openDeployConn);\n await this.deployConnection.close().catch(handleErr);\n\n this.bankConnection.off('close', this.openDeployConn)\n await this.bankConnection.close().catch(handleErr);\n })();\n });\n }\n\n /**\n * job.addListeners(): Private function used to set up event listeners to the scheduler\n * before deploying the job.\n */\n async [ADD_LISTENERS] () {\n // This is important: We need to flush the task queue before adding listeners\n // because we queue pending listeners by listening to the newListener event (in the constructor).\n // If we don't flush here, then the newListener events may fire after this function has run,\n // and the events won't be properly set up.\n await new Promise(resolve => setTimeout(resolve, 0));\n\n // @todo: Listen for an estimated cost, probably emit an \"estimated\" event when it comes in?\n // also @todo: Do the estimation task(s) on the scheduler and send an \"estimated\" event\n\n // Always listen to the stop event. It will resolve the work function promise, so is always needed.\n this.on('stop', (ev) => {\n this[INTERNAL_SYMBOL].events.emit('stopped', ev)\n });\n\n // Connect listeners that were set up before exec\n const evts = Array.from(this[INTERNAL_SYMBOL].subscribedEvents);\n if (evts.includes('result'))\n this.listeningForResults = true;\n this[INTERNAL_SYMBOL].subscribedEvents.clear();\n await this[LISTEN_TO_EVENTS](evts);\n\n // Connect listeners that are set up after exec\n this.on('newListener', (evt) => {\n if (evt === 'newListener') return;\n this[LISTEN_TO_EVENTS]([evt]);\n });\n \n if (this.collateResults && !this.listeningForResults) {\n // automatically add a listener for results\n this.on('result', () => {});\n }\n\n // Connect work event listeners that were set up before exec\n const workEvts = Array.from(this[INTERNAL_SYMBOL].subscribedWorkerEvents);\n if (debugging('dcp-client')) {\n console.debug('subscribedEvents', evts);\n console.debug('subscribedWorkerEvents', workEvts);\n }\n this[INTERNAL_SYMBOL].subscribedWorkerEvents.clear();\n for (let evt of workEvts) {\n await this[LISTEN_TO_WORK_EVENTS](evt);\n }\n\n // Connect work event listeners that are set up after exec\n this.work.on('newListener', (evt) => {\n if (evt === 'newListener') return;\n this[LISTEN_TO_WORK_EVENTS](evt);\n });\n }\n\n /**\n * Subscribes to either reliable events or optional events\n * @param {string[]} events \n */\n async [LISTEN_TO_EVENTS](events) {\n\n const reliableEvents = [];\n const optionalEvents = [];\n for (let eventName of events) {\n eventName = eventName.toLowerCase();\n if (this[INTERNAL_SYMBOL].subscribedEvents.has(eventName))\n {\n // already subscribed to this event\n continue;\n }\n else\n {\n this[INTERNAL_SYMBOL].subscribedEvents.add(eventName);\n \n if (this.eventTypes[eventName] && this.eventTypes[eventName].reliable)\n {\n reliableEvents.push(eventName)\n }\n else if (this.eventTypes[eventName] && !this.eventTypes[eventName].reliable)\n {\n optionalEvents.push(eventName)\n }\n else\n {\n // console.debug('606: listening for unexpected/unsupported event:', eventName);\n }\n }\n }\n if (debugging('dcp-client')) {\n console.debug('reliableEvents', reliableEvents);\n console.debug('optionalEvents', optionalEvents);\n }\n await this.eventSubscriber.subscribeManyEvents(reliableEvents, optionalEvents, { filter: { job: this.address } })\n }\n\n /**\n * Establishes listeners for worker events when requested by the client\n * @param {string} eventName \n */\n async [LISTEN_TO_WORK_EVENTS](eventName) {\n if (this[INTERNAL_SYMBOL].subscribedWorkerEvents.has(eventName)) {\n // already subscribed to this event\n return;\n }\n else\n {\n this[INTERNAL_SYMBOL].subscribedWorkerEvents.add(eventName);\n this.eventIntercepts.custom = (ev) => this.work.emit(eventName, ev)\n const optionalEvents = ['custom'];\n await this.eventSubscriber.subscribeManyEvents([], optionalEvents, { filter: { job: this.address } });\n }\n }\n\n /**\n * Takes result events as input, stores the result and fires off\n * events on the job handle as required. (result, duplicate-result)\n *\n * @param {object} ev - the event recieved from protocol.listen('/results/0xThisGenAdr')\n */\n async [ON_RESULT] (ev) {\n if (this[INTERNAL_SYMBOL].results === null) {\n // This should never happen - the onResult event should only be established/called\n // in addListeners which should also initialize the internal results array\n throw new Error('Job.onResult was invoked before initializing internal results');\n }\n \n const { result: _result, time } = ev.result;\n debugging('dcp-client') && console.debug('ON_RESULT', _result);\n let result = await fetchURI(_result);\n\n if (this[INTERNAL_SYMBOL].results[ev.sliceNumber]) {\n const changed = JSON.stringify(this[INTERNAL_SYMBOL].results[ev.sliceNumber]) !== JSON.stringify(result);\n this.emit('duplicate-result', { sliceNumber: ev.sliceNumber, changed });\n }\n\n this[INTERNAL_SYMBOL].results[ev.sliceNumber] = result;\n this[INTERNAL_SYMBOL].resultsAvailable[ev.sliceNumber] = true;\n this.emit('result', { sliceNumber: ev.sliceNumber, result });\n this.emit('resultsUpdated');\n }\n\n /**\n * Receives status events from the scheduler, updates the local status object\n * and emits a 'status' event\n *\n * @param {object} ev - the status event received from\n * protocol.listen('/status/0xThisGenAdr')\n * @param {boolean} emitStatus - value indicating whether or not the status\n * event should be emitted\n */\n [ON_STATUS]({ runStatus, total, distributed, computed }, emitStatus = true) {\n Object.assign(this[INTERNAL_SYMBOL].status, {\n runStatus,\n total,\n distributed,\n computed,\n });\n\n if (emitStatus) {\n this.emit('status', this.status);\n }\n }\n\n /**\n * Sends a request to the scheduler to deploy the job.\n */\n async [DEPLOY_JOB] () {\n const { payloadDetails } = this[INTERNAL_SYMBOL];\n \n this[SNAPSHOT](); /* .payloadDetails now up to date */\n \n /* Send sideloader bundle to the package server */\n if (DCP_ENV.platform === 'nodejs' && this[INTERNAL_SYMBOL].dependencies.length) {\n try\n {\n let {pkg, unresolved} = await this._publishLocalModules();\n\n payloadDetails.dependencies = unresolved;\n if (pkg)\n payloadDetails.dependencies.push(pkg.name + '/' + sideloaderModuleIdentifier); \n }\n catch(error)\n {\n throw new DCPError(`Error trying to communicate with package manager server: ${error}`);\n }\n \n }\n \n this.readyStateChange('preauth');\n\n /* eagerly connect to dependent services for better performance */\n computeGroups.keepAlive()\n\n const adhocId = payloadDetails.uuid.slice(payloadDetails.uuid.length - 6, payloadDetails.uuid.length);\n const schedId = await dcpConfig.scheduler.identity;\n const myId = await wallet.getId();\n const preauthToken = await bankUtil.preAuthorizePayment(schedId, payloadDetails.maxDeployPayment, this.paymentAccountKeystore);\n const { dataRange, dataValues, dataPattern, sliceCount } = marshalInputData(payloadDetails.data);\n if(dataValues)\n this[INTERNAL_SYMBOL].dataValues = dataValues;\n\n this.readyStateChange('deploying');\n\n /* Payload format is documented in scheduler-v4/libexec/job-submit/operations/submit.js */\n const submitPayload = {\n owner: myId.address,\n paymentAccount: this.paymentAccountKeystore.address,\n priority: 0, // @nyi\n\n workFunctionURI: payloadDetails.workFunctionURI,\n uuid: payloadDetails.uuid,\n mvMultSlicePayment: Number(payloadDetails.feeStructure.marketValue) || 0, // @todo: improve feeStructure internals to better reflect v4\n absoluteSlicePayment: Number(payloadDetails.feeStructure.maxPerRequest) || 0,\n requirePath: payloadDetails.requirePath,\n modulePath: payloadDetails.modulePath,\n dependencies: payloadDetails.dependencies,\n requirements: payloadDetails.requirements, /* capex */\n localExec: payloadDetails.localExec,\n force100pctCPUDensity: this.force100pctCPUDensity,\n estimationSlices: payloadDetails.estimationSlices,\n greedyEstimation: payloadDetails.greedyEstimation,\n workerConsole: this.workerConsole,\n isCI: this.isCI,\n\n description: payloadDetails.public.description || 'Discreetly making the world smarter',\n name: payloadDetails.public.name || 'Ad-Hoc Job' + adhocId,\n \n preauthToken, // XXXwg/er @todo: validate this after fleshing out the stub(s)\n\n resultStorageType: payloadDetails.resultStorageType, // @todo: implement other result types\n resultStorageDetails: payloadDetails.resultStorageDetails, // Content depends on resultStorageType\n resultStorageParams: payloadDetails.resultStorageParams, // Post params for off-prem storage\n dataRange,\n dataPattern,\n sliceCount\n };\n\n /* Determine thee type of the arguments option and set the submit message payload accordingly. */\n if (Array.isArray(payloadDetails.arguments) && payloadDetails.arguments.length === 1 && payloadDetails.arguments[0] instanceof DcpURL) {\n submitPayload.arguments = payloadDetails.arguments[0].href;\n } else if (payloadDetails.arguments instanceof RemoteDataSet) {\n submitPayload.marshaledArguments = kvinMarshal(payloadDetails.arguments.map(e => new URL(e)))\n } else if (payloadDetails.arguments) {\n try {\n submitPayload.marshaledArguments = kvinMarshal(Array.from(payloadDetails.arguments));\n } catch(e) {\n throw new Error(`Could not convert job arguments to Array (${e.message})`);\n }\n }\n \n if (payloadDetails.localExec && !DCP_ENV.isBrowserPlatform)\n {\n const workFunctionFile = createTempFile('dcp-localExec-workFunction-XXXXXXXXX', 'js');\n const argumentsFile = createTempFile('dcp-localExec-arguments-XXXXXXXXX', 'js');\n \n // For allowed origins of the localexec worker. Only allow the origins (files in this case) in this list.\n this.localExecAllowedFiles = [workFunctionFile.filename, argumentsFile.filename];\n\n // get the workFunctionURI string before writing to file to prevent the need to double-decode the work function in the worker.\n const workFunction = await fetchURI(payloadDetails.workFunctionURI);\n workFunctionFile.writeSync(workFunction);\n \n const workFunctionFileURL = new URL('file://' + workFunctionFile);\n submitPayload.workFunctionURI = workFunctionFileURL.href;\n payloadDetails.workFunctionURI = workFunctionFileURL.href;\n \n if (submitPayload.marshaledArguments)\n {\n argumentsFile.writeSync(JSON.stringify(submitPayload.marshaledArguments));\n const argumentsFileURL = new URL('file://' + argumentsFile.filename);\n submitPayload.marshaledArguments = kvinMarshal([argumentsFileURL]);\n }\n }\n\n // XXXpfr Excellent tracing.\n if (debugging('dcp-client')) {\n dumpObject(submitPayload, 'Submit: Job Index: submitPayload', 256);\n console.debug('Before Deploy', myId);\n }\n\n // Deploy the job!\n const deployed = await this.deployConnection.send('submit', submitPayload, myId);\n\n if (!deployed.success) {\n // Yes, it is possible for deployed or deployed.payload to be undefined.\n if (deployed.payload) {\n if (deployed.payload.code === 'ENOTFOUND')\n throw new DCPError(`Failed to submit job to scheduler. Account: ${submitPayload.paymentAccount} was not found or does not have sufficient balance (${deployed.payload.info.deployCost} DCCs needed to deploy this job)`, deployed.payload);\n throw new DCPError('Failed to submit job to scheduler', deployed.payload);\n }\n throw new DCPError('Failed to submit job to scheduler (no payload)', deployed ? deployed : '');\n }\n\n debugging('dcp-client') && console.debug('After Deploy', JSON.stringify(deployed));\n\n this.address = payloadDetails.address = deployed.payload.job;\n this[INTERNAL_SYMBOL].deployCost = deployed.payload.deployCost;\n\n if (!payloadDetails.status)\n payloadDetails.status = {\n runStatus: null,\n total: 0,\n computed: 0,\n distributed: 0,\n };\n \n payloadDetails.runStatus = payloadDetails.status.runStatus = deployed.payload.status;\n payloadDetails.status.total = deployed.payload.lastSliceNumber;\n payloadDetails.running = true;\n \n this[INTERNAL_SYMBOL].payloadDetails = {\n ...this[INTERNAL_SYMBOL].payloadDetails,\n ...payloadDetails,\n };\n }\n\n /**\n * This function is identical to exec, except that the job is executed locally\n * in the client.\n * @async\n * @param {number} cores - the number of local cores in which to execute the job.\n * @param {...any} args - The remaining arguments are identical to the arguments of exec\n * @return {Promise<ResultHandle>} - resolves with the results of the job, rejects on an error\n * @access public\n */\n localExec (cores = 1, ...args) {\n this[INTERNAL_SYMBOL].payloadDetails.localExec = true;\n this[INTERNAL_SYMBOL].payloadDetails.estimationSlices = 0;\n this[INTERNAL_SYMBOL].payloadDetails.greedyEstimation = false;\n this[INTERNAL_SYMBOL].payloadDetails.isCI = false;\n\n let worker;\n this.on('accepted', () => {\n // Start a worker for this job\n worker = new Worker({\n localExec: true,\n jobAddresses: [this.address],\n allowedOrigins: this.localExecAllowedFiles,\n paymentAddress: this[INTERNAL_SYMBOL].paymentAccountKeystore.address,\n maxWorkingSandboxes: cores,\n sandboxOptions: {\n ignoreNoProgress: true,\n SandboxConstructor: (DCP_ENV.platform === 'nodejs' &&\n (__webpack_require__(/*! ../worker/evaluators */ \"./src/dcp-client/worker/evaluators/index.js\").nodeEvaluatorFactory)())\n },\n });\n\n worker.start().catch((e) => {\n console.error(\"Failed to start worker for localExec:\");\n console.error(e.message);\n });\n });\n\n return this.exec(...args).finally(() => {\n if (worker) {\n setTimeout(() => {\n // stop the worker\n worker.stop(true);\n }, 3000);\n }\n });\n }\n\n /**\n * The current job status. Will have undefined values when the handle hasn't had exec called on it yet.\n * @access public\n * @readonly\n * @type {object}\n * @property {number} total Total number of slices in the job\n * @property {number} distributed Number of slices that have been distributed\n * @property {number} computed Number of slices that have completed execution (returned a result)\n * @property {string} runStatus Current runStatus of the job\n */\n get status () {\n // Shallow-copy to prevent modification\n return { ...this[INTERNAL_SYMBOL].status };\n }\n\n get requirePath() {\n return this[INTERNAL_SYMBOL].requirePath;\n }\n\n /**\n * This function specifies a module dependency (when the argument is a string)\n * or a list of dependencies (when the argument is an array) of the work\n * function. This function can be invoked multiple times before deployment.\n * @param {string | string[]} modulePaths - A string or array describing one\n * or more dependencies of the job.\n * @access public\n */\n requires(modulePaths) {\n if (\n typeof modulePaths !== 'string' &&\n (!Array.isArray(modulePaths) ||\n modulePaths.some((modulePath) => typeof modulePath !== 'string'))\n ) {\n throw new TypeError(\n 'The argument to dependencies is not a string or an array of strings',\n );\n } else if (modulePaths.length === 0) {\n throw new RangeError(\n 'The argument to dependencies cannot be an empty string or array',\n );\n } else if (\n Array.isArray(modulePaths) &&\n modulePaths.some((modulePath) => modulePath.length === 0)\n ) {\n throw new RangeError(\n 'The argument to dependencies cannot be an array containing an empty string',\n );\n }\n\n if (!Array.isArray(modulePaths)) {\n modulePaths = [modulePaths];\n }\n\n for (const modulePath of modulePaths) {\n if (modulePath[0] !== '.' && modulePath.indexOf('/') !== -1) {\n const modulePrefixRegEx = /^(.*)\\/.*?$/;\n const [, modulePrefix] = modulePath.match(modulePrefixRegEx);\n if (\n modulePrefix &&\n this[INTERNAL_SYMBOL].requirePath.indexOf(modulePrefix) === -1\n ) {\n this[INTERNAL_SYMBOL].requirePath.push(modulePrefix);\n }\n }\n\n this[INTERNAL_SYMBOL].dependencies.push(modulePath);\n }\n }\n\n get slicePaymentOffer () {\n return this[INTERNAL_SYMBOL].slicePaymentOffer;\n }\n\n /**\n * The keystore that will be used to pay for the job. Can be set with {@link Job.setPaymentAccountKeystore} or by providing a keystore to {@link Job.exec}.\n * @readonly\n * @access public\n * @type {module:dcp/wallet.AuthKeystore}\n */\n get paymentAccountKeystore () {\n return this[INTERNAL_SYMBOL].paymentAccountKeystore;\n }\n\n /** Set the account upon which funds will be drawn to pay for the job.\n * @param {module:dcp/wallet.AuthKeystore} keystore A keystore that representa a bank account.\n * @access public\n */\n setPaymentAccountKeystore (keystore) {\n if (this.address) {\n if (!keystore.address.eq(this[INTERNAL_SYMBOL].payloadDetails.owner)) {\n let message = \"Cannot change payment account after job has been deployed\";\n this.emit('EPERM', message);\n throw new Error(`EPERM: ${message}`);\n }\n }\n \n if (!(keystore instanceof wallet.Keystore)) {\n let e = new Error('Not an instance of Keystore: ' + keystore.toString())\n console.log(`Not an instance of Keystore: ${keystore}`)\n throw e\n }\n this[INTERNAL_SYMBOL].paymentAccountKeystore = keystore;\n }\n\n /** Set the slice payment offer. This is equivalent to the first argument to exec.\n * @param {number} slicePaymentOffer - The number of DCC the user is willing to pay to compute one slice of this job\n */\n setSlicePaymentOffer (slicePaymentOffer) {\n this[INTERNAL_SYMBOL].slicePaymentOffer = new SlicePaymentOffer(slicePaymentOffer);\n }\n\n /**\n * @param {URL|DcpURL} locationUrl - A URL object\n * @param {object} postParams - An object with any parameters that a user would like to be passed to a \n * remote result location. This object is capable of carry API keys for S3, \n * DropBox, etc. These parameters are passed as parameters in an \n * application/x-www-form-urlencoded request.\n */\n setResultStorage(locationUrl, postParams) {\n if (locationUrl instanceof URL || locationUrl instanceof DcpURL) {\n this[INTERNAL_SYMBOL].resultStorageDetails = locationUrl;\n } else {\n const e = new Error('Not an instance of a DCP URL: ' + locationUrl);\n console.log('Not an instance of a DCP URL ' + locationUrl);\n throw e;\n }\n\n // resultStorageParams contains any post params required for off-prem storage\n if (typeof postParams !== 'undefined' && typeof postParams === 'object' ) {\n this[INTERNAL_SYMBOL].resultStorageParams = postParams;\n } else {\n const e = new Error('Not an instance of a object: ' + postParams);\n console.log('Not an instance of an object ' + postParams);\n throw e;\n }\n\n // Some type of object here\n this[INTERNAL_SYMBOL].resultStorageType = 'pattern';\n }\n\n /** close an open job to indicate we are done adding data so it is okay to finish\n * the job at the appropriate time\n */\n async close() {\n return this.deployConnection.send('closeJob', {\n job: this.id,\n });\n }\n}\n\n/**\n * @typedef {object} MarshaledInputData\n * @property {any[]} [dataValues]\n * @property {string} [dataPattern]\n * @property {RangeObject} [dataRange]\n * @property {number} [sliceCount]\n */\n\n/**\n * Depending on the shape of the job's data, resolve it into a RangeObject, a\n * Pattern, or a values array, and return it in the appropriate property.\n *\n * @param {any} data Job's input data\n * @return {MarshaledInputData} An object with one of the following properties set:\n * - dataValues: job input is an array of arbitrary values \n * - dataPattern: job input is a URI pattern \n * - dataRange: job input is a RangeObject (and/or friends)\n */\nfunction marshalInputData(data) {\n if (!(data instanceof Object\n || data instanceof SuperRangeObject)) {\n throw new TypeError(`Invalid job data type: ${typeof data}`);\n }\n\n /**\n * @type MarshaledInputData\n */\n const marshalledInputData = {};\n\n // TODO(wesgarland): Make this more robust.\n if (data instanceof SuperRangeObject ||\n (data.hasOwnProperty('ranges') && data.ranges instanceof MultiRangeObject) ||\n (data.hasOwnProperty('start') && data.hasOwnProperty('end'))) {\n marshalledInputData.dataRange = data;\n } else if (Array.isArray(data)) {\n marshalledInputData.dataValues = data;\n } else if (data instanceof URL || data instanceof DcpURL) {\n marshalledInputData.dataPattern = String(data);\n } else if(data instanceof RemoteDataSet) {\n marshalledInputData.dataValues = data.map(e => new URL(e));\n } else if(data instanceof RemoteDataPattern) {\n marshalledInputData.dataPattern = data['pattern'];\n marshalledInputData.sliceCount = data['sliceCount'];\n }\n\n log('marshalledInputData:', marshalledInputData);\n return marshalledInputData;\n}\n/**\n * marshal the value using kvin or instance of the kvin (tunedKvin)\n * tunedKvin is defined if job.tuning.kvin is specified.\n *\n * @param {any} value \n * @return {object} A marshaled object\n * \n */\nfunction kvinMarshal (value) {\n if (tunedKvin)\n return tunedKvin.marshal(value);\n\n return kvin.marshal(value);\n}\n\nObject.assign(exports, {\n Job,\n SlicePaymentOffer,\n ResultHandle,\n});\n\n\n//# sourceURL=webpack://dcp/./src/dcp-client/job/index.js?");
4171
+ eval("/**\n * @file job/index.js\n * @author Eddie Roosenmaallen, eddie@kingsds.network\n * Matthew Palma, mpalma@kingsds.network\n * Wes Garland, wes@kingsds.network\n * Paul, paul@kingsds.network\n * @date November 2018\n * November 2018\n * February 2022\n * May 2022\n *\n * This module implements the Compute API's Job Handle\n *\n */\n/** @typedef {import('dcp/dcp-client/wallet/keystore').Keystore} Keystore */\n\n\nconst { BigNumber } = __webpack_require__(/*! bignumber.js */ \"./node_modules/bignumber.js/bignumber.js\");\nconst { v4: uuidv4 } = __webpack_require__(/*! uuid */ \"./node_modules/uuid/dist/esm-browser/index.js\");\nconst { EventEmitter, PropagatingEventEmitter } = __webpack_require__(/*! dcp/common/dcp-events */ \"./src/common/dcp-events/index.js\");\nconst { RangeObject, MultiRangeObject, DistributionRange, SuperRangeObject } = __webpack_require__(/*! dcp/dcp-client/range-object */ \"./src/dcp-client/range-object.js\");\nconst { fetchURI, encodeDataURI, dumpObject } = __webpack_require__(/*! dcp/utils */ \"./src/utils/index.js\");\nconst { getTextEncoder, createTempFile } = __webpack_require__(/*! dcp/utils */ \"./src/utils/index.js\");\nconst serialize = __webpack_require__(/*! dcp/utils/serialize */ \"./src/utils/serialize.js\");\nconst wallet = __webpack_require__(/*! dcp/dcp-client/wallet */ \"./src/dcp-client/wallet/index.js\");\nconst protocolV4 = __webpack_require__(/*! dcp/protocol-v4 */ \"./src/protocol-v4/index.js\");\nconst { EventSubscriber } = __webpack_require__(/*! dcp/events/event-subscriber */ \"./src/events/event-subscriber.js\");\nconst { DcpURL } = __webpack_require__(/*! dcp/common/dcp-url */ \"./src/common/dcp-url.js\");\nconst ClientModal = __webpack_require__(/*! dcp/dcp-client/client-modal */ \"./src/dcp-client/client-modal/index.js\");\nconst { Worker } = __webpack_require__(/*! dcp/dcp-client/worker */ \"./src/dcp-client/worker/index.js\");\nconst { RemoteDataSet } = __webpack_require__(/*! dcp/dcp-client/remote-data-set */ \"./src/dcp-client/remote-data-set.js\");\nconst { RemoteDataPattern } = __webpack_require__(/*! dcp/dcp-client/remote-data-pattern */ \"./src/dcp-client/remote-data-pattern.js\");\nconst { ResultHandle } = __webpack_require__(/*! ./result-handle */ \"./src/dcp-client/job/result-handle.js\");\nconst { SlicePaymentOffer } = __webpack_require__(/*! ./slice-payment-offer */ \"./src/dcp-client/job/slice-payment-offer.js\");\nconst DCP_ENV = __webpack_require__(/*! dcp/common/dcp-env */ \"./src/common/dcp-env.js\");\nconst dcpPublish = __webpack_require__(/*! dcp/common/dcp-publish */ \"./src/common/dcp-publish.js\");\nconst computeGroups = __webpack_require__(/*! dcp/dcp-client/compute-groups */ \"./src/dcp-client/compute-groups/index.js\");\nconst schedulerConstants = __webpack_require__(/*! dcp/common/scheduler-constants */ \"./src/common/scheduler-constants.js\");\nconst { sliceStatus } = __webpack_require__(/*! dcp/common/scheduler-constants */ \"./src/common/scheduler-constants.js\");\nconst { jobStatus } = __webpack_require__(/*! dcp/common/scheduler-constants */ \"./src/common/scheduler-constants.js\");\nconst bankUtil = __webpack_require__(/*! dcp/dcp-client/bank-util */ \"./src/dcp-client/bank-util.js\");\nconst { DCPError } = __webpack_require__(/*! dcp/common/dcp-error */ \"./src/common/dcp-error.js\");\nconst debugging = (__webpack_require__(/*! dcp/debugging */ \"./src/debugging.js\").scope)('dcp-client');\nconst kvin = __webpack_require__(/*! kvin */ \"./node_modules/kvin/kvin.js\");\nlet tunedKvin;\n\nconst TextEncoder = getTextEncoder();\n\nconst log = (...args) => {\n if (debugging('job')) {\n console.debug('dcp-client:job', ...args);\n }\n};\n\nconst ON_BROWSER = DCP_ENV.isBrowserPlatform;\nconst sideloaderModuleIdentifier = 'sideloader-v1';\n\n// Symbols used to hide private members and functions on the Job instance\nconst debugBuild = ((__webpack_require__(/*! dcp/common/dcp-build */ \"./src/common/dcp-build.js\").build) === 'debug');\nconst INTERNAL_SYMBOL = debugBuild ? '_' : Symbol('Job Internals');\nconst SNAPSHOT = debugBuild ? '_snapshot' : Symbol('Job.snapshot');\nconst DEPLOY_JOB = debugBuild ? '_deploy' : Symbol('Job.deploy');\n\nconst ADD_LISTENERS = Symbol('Job.addListeners');\nconst LISTEN_TO_EVENTS = Symbol('Job.listenToEvents');\nconst LISTEN_TO_WORK_EVENTS = Symbol('Job.listenToWorkEvents');\nconst ON_RESULT = Symbol('Job.onResult');\nconst ON_STATUS = Symbol('Job.onStatus');\n\nexports.JOB_INTERNAL_SYMBOL = INTERNAL_SYMBOL; /* allow friends to access our guts, eg. job/result-handle */\n\nconst DEFAULT_REQUIREMENTS = {\n engine: {\n es7: null,\n spidermonkey: null\n },\n environment: {\n webgpu: null,\n offscreenCanvas: null,\n fdlibm: null\n },\n browser: {\n chrome: null\n },\n details: {\n offscreenCanvas: {\n bigTexture4096: null,\n bigTexture8192: null,\n bigTexture16384: null,\n bigTexture32768: null,\n }\n },\n discrete: null,\n useStrict: null,\n};\nconst ZERO_COST_HOLD_ADDRESS = '0x' + '0'.repeat(130);\n\n/** @typedef {import('../range-object').RangeLike} RangeLike */\n\n/**\n * Ensure input data is an appropriate format\n * @param {RangeObject | DistributionRange | RemoteDataSet | Array | Iterable}\n * inputData - A URI-shaped string, a [Multi]RangeObject-constructing value, or\n * an array of slice data\n * @return {RangeObject | RangeLike | DistributionRange | RemoteDataSet | Array}\n * The coerced input in an appropriate format ([Multi]RangeObject,\n * DistributionRange, RemoteDataSet, or array)\n */\nconst wrangleData = (inputData) => {\n if (typeof inputData === 'object' && !!inputData.ranges) { return new MultiRangeObject(inputData) }\n\n if (RangeObject.isRangelike(inputData)) { return inputData }\n if (RangeObject.isRangeObject(inputData)) { return inputData }\n if (DistributionRange.isDistribution(inputData)) { return inputData }\n if (RangeObject.isProtoRangelike(inputData)) { return new RangeObject(inputData) }\n if (DistributionRange.isProtoDistribution(inputData)) { return new DistributionRange(inputData) }\n if (RemoteDataSet.isRemoteDataSet(inputData)) { return inputData }\n if (RemoteDataPattern.isRemoteDataPattern(inputData)) { return inputData }\n\n return Array.isArray(inputData) ? inputData : [inputData];\n};\n\n// Used to validate the requirements object,\n// applies the default requirements schema\nconst applyObjectSchema = (obj, schema, errContext='', scope='') => {\n let checkedObjs = [];\n\n for (let p in schema) {\n let fullPropScope = scope.concat(p);\n if (!(p in obj)) {\n if (typeof schema[p] === 'object' && schema[p] !== null) {\n obj[p] = {};\n checkedObjs.push(fullPropScope);\n applyObjectSchema(obj[p], schema[p], errContext, fullPropScope.concat('.'));\n } else obj[p] = schema[p];\n } else if (typeof schema[p] === 'object' && schema[p] !== null && !checkedObjs.includes(fullPropScope)) {\n if (typeof obj[p] !== 'object') throw new Error(`${errContext}: Schema mismatch, property '${fullPropScope}' should be an object.`);\n else {\n checkedObjs.push(fullPropScope);\n applyObjectSchema(obj[p], schema[p], errContext, fullPropScope.concat('.'));\n }\n } else if ((typeof schema[p] !== 'object' || schema[p] === null)\n && typeof obj[p] !== 'boolean' && obj[p] !== null) {\n throw new Error(`${errContext}: Schema mismatch, object property '${fullPropScope}' should be a boolean.`);\n }\n }\n\n for (let p in obj) {\n let fullPropScope = scope.concat(p);\n if (!(p in schema)) throw new Error(`${errContext}: Schema mismatch, object has extra key '${fullPropScope}'.`);\n else if (typeof obj[p] === 'object' && obj[p] !== null && !checkedObjs.includes(fullPropScope)) {\n checkedObjs.push(fullPropScope);\n applyObjectSchema(obj[p], schema[p], errContext, fullPropScope.concat('.'));\n }\n }\n}\n\n/**\n * @classdesc The Compute API's Job Handle (see {@link https://docs.dcp.dev/specs/compute-api.html#job-handles|Compute API spec})\n * Job handles are objects which correspond to jobs. \n * They are created by some exports of the compute module, such as {@link module:dcp/compute.do|compute.do} and {@link module:dcp/compute.for|compute.for}.\n * @extends module:dcp/dcp-events.PropagatingEventEmitter\n * @hideconstructor\n * @access public\n */\nclass Job extends PropagatingEventEmitter {\n /**\n * This event is emitted when the job is accepted by the scheduler on deploy.\n * \n * @event Job#accepted\n * @access public\n * @type {object}\n * @property {object} job Original object that was delivered to the scheduler for deployment\n *//**\n * Fired when the job is cancelled.\n * \n * @event Job#cancel\n * @access public\n *//**\n * Fired when a result is returned.\n * \n * @event Job#result\n * @access public\n * @type {object}\n * @property {string} jobAddress Address of the job\n * @property {string} task ID of the task (slice) the result came from\n * @property {number} sort The index of the slice\n * @property {object} result\n * @property {string} result.request\n * @property {*} result.result The value returned from the work function\n *//**\n * Fired when the result handle is modified, either when a new `result` event is fired or when the results are populated with `results.fetch()`\n * \n * @event Job#resultsUpdated\n * @access public\n *//**\n * Fired when the job has been completed.\n * \n * @event Job#complete\n * @access public\n * @type {ResultHandle}\n *//**\n * Fired when the job's status changes.\n * \n * @event Job#status\n * @access public\n * @type {object}\n * @property {string} jobAddress Address of the job\n * @property {number} total Total number of slices in the job\n * @property {number} distributed Number of slices that have been distributed\n * @property {number} computed Number of slices that have completed execution (returned a result)\n * @property {string} runStatus Current runStatus of the job\n *//**\n * Fired when a slice throws an error.\n * \n * @event Job#error\n * @access public\n * @type {object}\n * @property {string} jobAddress Address of the job\n * @property {number} sliceIndex Index of the slice that threw the error\n * @property {string} message The error message\n * @property {string} stack The error stacktrace\n * @property {string} name The error type name\n *//**\n * Fired when a slice uses one of the console log functions.\n * \n * @event Job#console\n * @access public\n * @type {object}\n * @property {string} jobAddress Address of the job\n * @property {number} sliceIndex The index of the slice that produced this event\n * @property {string} level The log level, one of `debug`, `info`, `log`, `warn`, or `error`\n * @property {string} message The console log message\n *//**\n * Fired when a slice is stopped for not calling progress. Contains information about how long the slice ran for, and about the last reported progress calls.\n * \n * @event Job#noProgress\n * @access public\n * @type {object}\n * @property {string} jobAddress Address of the job\n * @property {number} sliceIndex The index of the slice that failed due to no progress\n * @property {number} timestamp How long the slice ran before failing\n * @property {object} progressReports\n * @property {object} progressReports.last The last progress report received from the worker\n * @property {number} progressReports.last.timestamp Time since the start of the slice\n * @property {number} progressReports.last.progress Progress value reported\n * @property {*} progressReports.last.value The last value that was passed to the progress function\n * @property {number} progressReports.last.throttledReports Number of calls to progress that were throttled since the last report\n * @property {object} progressReports.lastUpdate The last determinate (update to the progress param) progress report received from the worker\n * @property {number} progressReports.lastUpdate.timestamp\n * @property {number} progressReports.lastUpdate.progress\n * @property {*} progressReports.lastUpdate.value\n * @property {number} progressReports.lastUpdate.throttledReports\n *//**\n * Identical to `noProgress`, except that it also contains the data that the slice was executed with.\n * \n * @event Job#noProgressData\n * @access public\n * @type {object}\n * @property {*} data The data that the slice was executed with\n *//**\n * Fired when the job is paused due to running out of funds. The job can be resumed by escrowing more funds then resuming the job.\n * \n * Event payload is the estimated funds required to complete the job\n * \n * @event Job#ENOFUNDS\n * @access public\n * @type {BigNumber}\n *//**\n * Fired when the job is cancelled due to the work function not calling the `progress` method frequently enough.\n * \n * @event Job#ENOPROGRESS\n * @access public\n *//**\n * The job was cancelled because scheduler has determined that individual tasks in this job exceed the maximum allowable execution time.\n * \n * @event Job#ESLICETOOSLOW\n * @access public\n *//**\n * Fired when the job is cancelled because too many work functions are terminating with uncaught exceptions.\n * \n * @event Job#ETOOMANYERRORS\n * @access public\n */\n\n /**\n * @form1 new Job(job_shaped_object)\n * @form2 new Job('application_worker_address'[, data[, arguments]])\n * @form3b new Job('worker source'[, data[, arguments]])\n * @form3b new Job(worker_function[, data[, arguments]])\n */\n\n constructor() {\n super('Job');\n\n this.readyStateChange = (readyState) => {\n this.readyState = readyState;\n this.emit('readyStateChange', this.readyState);\n };\n this.readyStateChange(sliceStatus.new);\n \n /*\n * Private members\n */\n this[INTERNAL_SYMBOL] = {\n events: new EventEmitter('Job Internal'),\n connected: false, // set to true after first call to exec\n /**\n * This object holds details for generating DCPv4 messages about this job.\n * It is updated everytime we call SNAPSHOT.\n */\n payloadDetails: {\n localExec: false,\n },\n\n /**\n * The slicePaymentOffer default value is set to compute.marketValue, in .exec() \n */\n slicePaymentOffer: null,\n paymentAccountKeystore: null,\n\n /**\n * These are private but getters are provided so they can be modified but\n * not replaced.\n */\n /**\n * List of module prefixes using in CommonJS module resolution.\n * @type {string[]}\n */\n requirePath: [],\n\n /**\n * List of modules the job needs.\n * @type {string[]}\n */\n\n dependencies: [],\n\n // This array contains the names of worker events that\n // had listeners registered before exec is called, once\n // the job has been deployed then the proper event handlers\n // will be generated from this list\n subscribedEvents: new Set(),\n subscribedWorkerEvents: new Set(),\n\n results: [],\n resultsAvailable: [],\n resultStorageType: 'values',\n resultStorageDetails: undefined,\n resultStorageParams: undefined, //Holds the POST params and URL for off-prem storage\n\n // Tracks job progress\n status: {\n runStatus: null,\n total: null,\n distributed: null,\n computed: null,\n },\n\n // Cancel is special. We need to fire an `alert` when the job is canceled. \n // If they are listening for the (reliable) event then they need to be able to\n // prevent it. If not, then it'll be handled by the `exec` rejection via the 'stopped'\n // event. The result is that we want only one of two ways the `alert` can be fired\n // to be active based on whether or not the user is listening for cancel. \n // Once DCP-1150 lands, we won't need to listen on stopped since more failures will fire a cancel event.\n listeningForCancel: false,\n // TODO - cancel events should have more info in them. DCP-1150\n cancelAlert: () => ClientModal.alert(\"More details in console...\", {title: 'Job Canceled'}),\n\n listeningForError: false,\n errorAlert: (err) => ClientModal.alert(err, {title: 'Unexpected Error'}),\n\n listeningForNoFunds: false,\n noFundsAlert: (event) => {\n let msg = `Job \"${event.name}\" is paused due to insufficient funds. ${event.fundsRequired} DCC is required to compute remaining ${event.remainingSlices} slices.\\njobId: ${event.job}\\nbankAccount: ${event.bankAccount}`; \n ClientModal.alert(msg, { title: 'Job paused' })\n },\n };\n\n /*\n * Public members\n */\n // Deep copy the default requirements. I know, I hate it too\n /**\n * An object describing the requirements that workers must have to be eligible for this job. See\n * {@link https://docs.dcp.dev/specs/compute-api.html#requirements-objects|Requirements Objects}.\n *\n * @type {object}\n * @access public\n */\n this.requirements = JSON.parse(JSON.stringify(DEFAULT_REQUIREMENTS));\n this.schedulerURL = undefined;\n this.bankURL = undefined;\n this.deployURL = '';\n this.collateResults = true;\n this.listeningForResults = false;\n /**\n * @see {@link https://kingsds.atlassian.net/browse/DCP-1475?atlOrigin=eyJpIjoiNzg3NmEzOWE0OWI4NGZkNmI5NjU0MWNmZGY2OTYzZDUiLCJwIjoiaiJ9|Jira Issue}\n */\n let uuid = uuidv4();\n\n /**\n * An object describing the cost the user believes each the average slice will incur, in terms of CPU/GPU and I/O.\n * If defined, this object is used to provide initial scheduling hints and to calculate escrow amounts.\n *\n * @type {object}\n * @access public\n */\n this.initialSliceProfile = undefined;\n\n /**\n * A place to store public-facing attributes of the job. Anything stored on this object will be available inside the work \n * function (see {@link module:dcp/compute~sandboxEnv.work}). The properties documented here may be used by workers to display what jobs are currently being \n * worked on.\n * @access public\n * @property {string} name Public-facing name of this job.\n * @property {string} description Public-facing description for this job.\n * @property {string} link Public-facing link to external resource about this job.\n */\n this.public = {\n name: null,\n description: null,\n link: null,\n };\n\n this.contextId = null;\n this.force100pctCPUDensity = false;\n this.workerConsole = false;\n this.isCI = false;\n\n // The following 3 public members are only populated after the job has been deployed\n this.address = null;\n this.receipt = null;\n this.meanSliceProfile = null;\n\n /**\n * A number (can be null, undefined, or infinity) describing the estimationSlicesRemaining in the jpd (dcp-2593)\n * @type {number}\n * @access public\n */\n this.estimationSlices = undefined;\n \n /**\n * tunable parameters per job\n * @access public\n * @param {object} tuning \n * @param {string} tuning.kvin Encode the TypedArray into a string, trying multiple methods to determine optimum \n * size/performance. The this.tune variable affects the behavior of this code this:\n * @param {boolean} speed If true, only do naive encoding: floats get represented as byte-per-digit strings\n * @param {boolean} size If true, try the naive, ab8, and ab16 encodings; pick the smallest\n * If both are false try the naive encoding if under typedArrayPackThreshold and use if smaller\n * than ab8; otherwise, use ab8\n */\n this.tuning = {\n kvin: {\n size: false,\n speed: false,\n },\n }\n\n /**\n * When true, allows a job in estimation to have requestTask return multiple estimation slices.\n * This flag applies independent of infinite estimation, viz., this.estimationSlices === null .\n * @type {boolean}\n * @access public\n */\n this.greedyEstimation = false;\n\n /* We avoid using job.id internally because it is easy to confuse with db::jobs.id, but is an API\n * interface that we present to end-user developers so we need to keep it.\n */\n Object.defineProperty(this, 'id', {\n get: () => this.address,\n set: (id) => this.address = id\n });\n\n // \n /**\n * An EventEmitter for custom events dispatched by the work function.\n * @type {module:dcp/dcp-events.EventEmitter}\n * @access public\n * @example\n * // in sandbox\n * work.emit('myEventName', 1, [2], \"three\");\n * // clientside\n * job.work.on('myEventName', (num, arr, string) => { });\n */\n this.work = new EventEmitter('job.work');\n\n //Initialize the eventSubscriber so each job has unique eventSubscriber\n this.eventSubscriber = new EventSubscriber(this);\n \n // Some events can't be emitted 'naturally' without having weird/wrong output.\n // An example of this is results. When results are returned from the scheduler,\n // They come in as a dataURI of kvin-ified results. We need to parse all that before\n // We actually send it to the client. For such events, we will intercept them, parse\n // them as needed, then emit the event with the 'fixed' data to the client.\n \n const ceci = this\n const parseConsole = function deserializeConsoleMessage(ev) {\n ceci.emit('console', ev);\n }\n \n this.eventIntercepts = {\n result: (ev) => this[ON_RESULT](ev),\n status: (ev) => this[ON_STATUS](ev),\n cancel: (ev) => this[INTERNAL_SYMBOL].events.emit('stopped', ev),\n console: parseConsole,\n }\n\n this.eventTypes = (__webpack_require__(/*! dcp/common/dcp-events */ \"./src/common/dcp-events/index.js\").eventTypes);\n\n this.work.on('newListener', (evt) => {\n if (!this[INTERNAL_SYMBOL].connected && evt !== 'newListener') {\n this[INTERNAL_SYMBOL].subscribedWorkerEvents.add(evt);\n }\n });\n\n this.on('newListener', (evt) => {\n if (!this[INTERNAL_SYMBOL].connected && evt !== 'newListener') {\n this[INTERNAL_SYMBOL].subscribedEvents.add(evt);\n }\n });\n // Form1: If arguments[0] is an object that looks like a job, this is a 'copy constructor'\n // where we inherit as much as possible from the original instance.\n if (typeof arguments[0] === 'object' &&\n arguments[0].type &&\n arguments[0].data &&\n arguments[0].public) {\n \n let src = arguments[0];\n\n this[INTERNAL_SYMBOL].payloadDetails = {\n ...src,\n data: wrangleData(src.data), // rehydrate ranges\n };\n\n if (src.feeStructure) {\n this.setSlicePaymentOffer(src.feeStructure);\n }\n \n if (src.address)\n this.address = src.address;\n if (src.payloadData.status)\n this[ON_STATUS](this[INTERNAL_SYMBOL].payload.status, false);\n if (src.public)\n Object.assign(this.public, src.public);\n } else {\n /* Forms 2-4 */ \n if (typeof arguments[0] === 'function')\n arguments[0] = arguments[0].toString();\n\n if (typeof arguments[0] === 'string') {\n const { encodeDataURI } = __webpack_require__(/*! dcp/utils */ \"./src/utils/index.js\");\n this[INTERNAL_SYMBOL].workFunctionURI = encodeDataURI(arguments[0], 'application/javascript');\n } else if (DcpURL.isURL(arguments[0])) {\n this[INTERNAL_SYMBOL].workFunctionURI = arguments[0].href;\n } \n\n const wrangledInputData = wrangleData(arguments[1] || []);\n const wrangledArguments = wrangleData(arguments[2] || []);\n \n log('wrangledInputData:', wrangledInputData);\n log('wrangledArguments:', wrangledArguments);\n \n Object.assign(this[INTERNAL_SYMBOL].payloadDetails, {\n request: 'main',\n data: wrangledInputData,\n arguments: wrangledArguments,\n });\n }\n\n // This should happen last, it depends on the this.[INTERNAL_SYMBOL].payloadDetails.data array\n /**\n * A Result Handle object used to query and manipulate the output set. \n * Present once job has been deployed.\n * @type {ResultHandle}\n * @access public\n */\n this.results = new ResultHandle(this);\n\n /**\n * Read-only copy of the job's uuid (generated or rehydrated via form1 constructor)\n */\n Object.defineProperty(this, 'uuid', {\n get: () => uuid,\n configurable: false,\n enumerable: true,\n });\n \n // each entry contains the computeGroupID, joinKey, joinSecret, joinKeystore\n // Add to public compute group by default\n this.computeGroups = [ Object.assign({}, schedulerConstants.computeGroups.public) ];\n\n\n // Initialize to null so these properties are recognized for the Job class\n this.bankConnection = null;\n this.deployConnection = null;\n this.openBankConn = function openBankConn()\n {\n ceci.bankConnection = new protocolV4.Connection(dcpConfig.bank.services.bankTeller);\n ceci.bankConnection.on('close', ceci.openBankConn);\n }\n\n this.openDeployConn = function openDeployConn()\n {\n ceci.deployConnection = new protocolV4.Connection(dcpConfig.scheduler.services.jobSubmit);\n ceci.deployConnection.on('close', ceci.openDeployConn);\n }\n\n this.openBankConn();\n this.openDeployConn();\n }\n\n /** \n * Cancel the job\n * @access public\n * @param {string} reason If provided, will be sent to client\n */\n async cancel (reason = undefined) {\n const response = await this.deployConnection.send('cancelJob', {\n job: this.address,\n owner: this.paymentAccountKeystore.address,\n reason,\n }, this.paymentAccountKeystore);\n\n return response.payload;\n }\n\n /** \n * Resume this job\n * @access public\n */\n async resume() {\n const response = await this.schedulerConnection.send('resumeJob', {\n job: this.address,\n owner: this.paymentAccountKeystore.address,\n }, this.paymentAccountKeystore);\n\n return response.payload;\n }\n\n /**\n * Helper function for retrieving info about the job. The job must have already been deployed.\n * An alias for {@link module:dcp/compute.getJobInfo}.\n * @access public\n */\n async getJobInfo() {\n return await (__webpack_require__(/*! ../compute */ \"./src/dcp-client/compute.js\").compute.getJobInfo)(this.address);\n }\n\n /**\n * Helper function for retrieving info about the job's slices. The job must have already been deployed.\n * An alias for {@link module:dcp/compute.getSliceInfo}.\n * @access public\n */\n async getSliceInfo() {\n return await (__webpack_require__(/*! ../compute */ \"./src/dcp-client/compute.js\").compute.getSliceInfo)(this.address);\n }\n \n /**\n * Helper function that tries to upload slicePile to scheduler for the job with the given address\n * If the connection throws, we will continue trying to upload until it has thrown errorTolerance times\n * However, if the upload is unsuccessful, we throw immediately.\n * @param {object} pileMessage \n * @returns payload containing success property (pertaining to success of adding slices to job) as well as lastSliceNumber of job \n */\n async safeSliceUpload(pileMessage)\n {\n let payload = undefined;\n let errorTolerance = dcpConfig.job.sliceUploadErrorTolerance; // copy number of times we will tolerate non-success when uploading slices directly from config\n\n while (true) // eslint-disable-line no-constant-condition\n {\n try\n {\n const start = Date.now();\n this.emit('x-dbg-uploadStart', pileMessage.signedMessage.length);\n payload = await this.deployConnection.sendPreparedMessage(pileMessage);\n if (!payload.success) {\n this.emit('x-dbg-uploadBackoff', pileMessage.signedMessage.length);\n throw new DCPError('Cannot upload slice data to scheduler','EUPLOADSCHED');\n }\n else {\n this.emit('x-dbg-uploadProgress', Date.now() - start);\n break;\n }\n }\n catch (error)\n {\n if (--errorTolerance <= 0) {\n this.emit('x-dbg-uploadError', error);\n throw error;\n }\n }\n }\n return payload;\n }\n \n /**\n * This function contains the actual logic behind staggered slice uploads\n * to the scheduler which makes quicker deployment possible.\n * \n * Note that we pass in mostToTake so that the uploadLogic function can update \n * it to the new value it needs to be, and then pass it back to the wrapper \n * function (addSlices) which actually does the work of picking up slices \n * and thus uses this value\n * @param {Array} pile the actual array of slices being uploaded to scheduler\n * @param {Number} mostToTake number of slices that should be taken by the wrapper function (addSlices) \n * which actually does the work of picking up slices and thus uses this value.\n * We pass in mostToTake so that the uploadLogic function can update it to the \n * new value it needs to be, and then pass it back to the wrapper\n * @returns payload containing success property (pertaining to success of adding slices to job) as well as lastSliceNumber of job\n */\n async sliceUploadLogic(pile, mostToTake)\n {\n const slicesTaken = pile.length;\n await this.deployConnection.keepalive();\n let pileMessage = await this.deployConnection.prepare('addSliceData', {\n job: this.address,\n dataValues: kvinMarshal(pile),\n });\n \n let pileSize = pileMessage.signedMessage.length;\n \n let newMostToTake;\n let uploadedSlices;\n\n // if the pile is larger than the ceiling but we only took one slice, there's no smaller pile we can make\n // so we upload it anyway but we don't try taking more next time cause we were over the ceiling (which \n // is a hard limit on upload sizes)\n if ((pileSize > dcpConfig.job.uploadSlicesCeiling) && (slicesTaken === 1))\n {\n uploadedSlices = await this.safeSliceUpload(pileMessage);\n newMostToTake = 1;\n }\n \n // if the pile is larger than the target but we only took one slice, there's no smaller pile we can make\n // so we upload it anyway and still try taking more\n else if ((pileSize > dcpConfig.job.uploadSlicesTarget) && (slicesTaken === 1))\n {\n uploadedSlices = await this.safeSliceUpload(pileMessage);\n newMostToTake = mostToTake * dcpConfig.job.uploadIncreaseFactor;\n }\n \n // otherwise, if the pile is smaller than the soft ceiling, send up the pile anyway (since piles are expensive to make) \n // but remember to include incrementFactor times as many slices in the next pile\n else if (pileSize <= dcpConfig.job.uploadSlicesTarget)\n {\n uploadedSlices = await this.safeSliceUpload(pileMessage);\n newMostToTake = mostToTake * dcpConfig.job.uploadIncreaseFactor;\n }\n \n // if the pile is over the ceiling then we do not upload and begin reassembling our piles from scratch\n else if (pileSize > dcpConfig.job.uploadSlicesCeiling)\n {\n newMostToTake = -1;\n }\n \n // if the pile is over the target (but implicitly under the ceiling), then upload the pile to scheduler but lower mostToTake\n // by a smaller factor than incrementFactor to allow us to begin \"centering\" sizes of piles around the target\n else if (pileSize > dcpConfig.job.uploadSlicesTarget)\n {\n uploadedSlices = await this.safeSliceUpload(pileMessage);\n newMostToTake = Math.ceil(mostToTake / ((2 / 3) * dcpConfig.job.uploadIncreaseFactor));\n }\n\n if (uploadedSlices && uploadedSlices.success && typeof uploadedSlices.payload.lastSliceNumber !== 'undefined')\n // must check if uploadedSlices exists first since if pileSize > ceiling then there will be no uploadedSlices\n this.status.total = uploadedSlices.payload.lastSliceNumber;\n\n let payload = uploadedSlices ? uploadedSlices.payload : undefined;\n return { payload, newMostToTake }; // in case the user needs lastSliceNumber's value\n }\n \n /**\n * Uploads slices to the scheduler in a staggered fashion\n * @param {Array} dataValues actual array of slices being uploaded to scheduler\n * @returns payload containing success property (pertaining to success of adding slices to job) as well as lastSliceNumber of job\n */\n async addSlices(dataValues)\n {\n if (!Array.isArray(dataValues))\n throw new TypeError('Only data-by-value jobs may dynamically add slices');\n\n let mostToTake = dcpConfig.job.uploadInitialNumberOfSlices; // maximum number of slices we could take in per pile\n let payload = undefined; // used in return value\n let slicesTaken = 0; // number of slices in the pile already\n let pile = [];\n \n for (let slice of dataValues)\n {\n pile.push(slice);\n slicesTaken++;\n if (slicesTaken === mostToTake)\n {\n let total = await this.sliceUploadLogic(pile, mostToTake);\n payload = total.payload;\n \n if (total.newMostToTake < 0)\n {\n /* if total.newMostToTake == -1 (only non-positive value returned), then the pile was not successfully\n * uploaded because it was over the ceiling and we need to upload the pile *itself* again, recursively\n */\n payload = await this.addSlices(pile);\n /* and next time, the number of slices we take is the number from this time *divided* by the incrementFactor\n * since we know invariably that number of slices was under the ceiling AND target\n * if you're curious why that's an invariant, this is because mostToTake only ever *increases* by being multiplied by \n * a factor of incrementFactor within sliceUploadLogic, and this only occurs when the pile being uploaded that time\n * was under the target\n */\n mostToTake = mostToTake / dcpConfig.job.uploadIncreaseFactor;\n }\n else\n {\n /* in all other cases (other than the pile size being over the ceiling) the sliceUploadLogic helper \n * determines the number of slices we should pick up next time, so we just use the value it spits out\n */\n mostToTake = total.newMostToTake;\n }\n \n // reset slicesTaken and pile since at this point we know for sure the pile has been uploaded\n pile = [];\n slicesTaken = 0;\n }\n else\n {\n continue;\n }\n }\n // upload the pile one last time in case we continued off the last slice with a non-empty pile\n if (pile.length !== 0)\n {\n let finalObj = await this.sliceUploadLogic(pile, mostToTake);\n payload = finalObj.payload;\n mostToTake = finalObj.newMostToTake;\n \n if (mostToTake < 0)\n {\n // if you need documentation on the next two lines, look inside the if (total.newMostToTake < 0) just above\n payload = await this.addSlices(pile);\n mostToTake = mostToTake / dcpConfig.job.uploadIncreaseFactor;\n }\n }\n\n // and finally assign whatever mostToTake was at the end of this run of the function to be returned \n // as part of the payload in case addSlices was called recursively\n payload.mostToTake = mostToTake;\n \n /* contains the job's lastSliceNumber (the only externally-meaningful value returned from \n * the uploading of slices to the scheduler) in case the calling function needs it \n */\n return payload;\n }\n\n /**\n * job.snapshot(): Private function used to populate the payloadDetails from private data,\n * inferred data, etc. Once this function has run, the payloadDetails are\n * considered authoritatively up to date until the calling function returns\n * or awaits.\n */\n [SNAPSHOT]() {\n const pd = this[INTERNAL_SYMBOL].payloadDetails;\n\n pd.type = 'ad-hoc'; /* @todo implement appliances */\n pd.uuid = this.uuid;\n pd.workFunctionURI = this[INTERNAL_SYMBOL].workFunctionURI;\n pd.dependencies = this[INTERNAL_SYMBOL].dependencies;\n pd.requirePath = this[INTERNAL_SYMBOL].requirePath;\n pd.modulePath = this[INTERNAL_SYMBOL].modulePath;\n pd.resultStorageType = this[INTERNAL_SYMBOL].resultStorageType;\n pd.resultStorageDetails = this[INTERNAL_SYMBOL].resultStorageDetails;\n pd.resultStorageParams = this[INTERNAL_SYMBOL].resultStorageParams;\n pd.force100pctCPUDensity = this[INTERNAL_SYMBOL].force100pctCPUDensity;\n\n pd.requirements = this.requirements;\n applyObjectSchema(pd.requirements, DEFAULT_REQUIREMENTS, 'Requirements Object');\n \n // @todo: 'figure this out' - wise words from eddie /mp jan 2019\n if (!pd.options) { pd.options = {}; }\n if (!pd.public) { pd.public = {}; } \n\n for (let p of ['name', 'description', 'link']) {\n if (typeof this.public[p] === 'string') {\n pd.public[p] = this.public[p];\n }\n }\n\n // The max value that the client is willing to spend to deploy\n // (list on the scheduler, doesn't include compute payment)\n // maxDeployPayment is the max the user is willing to pay to DCP (as a\n // Hold), in addition to the per-slice offer and associated scrape.\n // Currently calculated as `deployCost = costPerKB *\n // (JSON.stringify(job).length / 1024) // 1e-9 per kb`\n // @todo: figure this out / er nov 2018\n pd.maxDeployPayment = 1;\n \n // payloadDetails.timing can be provided as an initial estimate of slice time, to\n // give a more useful useful calculated heap value (heap.value is more or less\n // dcc-per-millisecond)\n pd.timing = pd.timing || 1; \n }\n\n /** Escrow additional funds for this job\n * @access public\n * @param {number|BigNumber} fundsRequired - A number or BigNumber instance representing the funds to escrow for this job\n */\n async escrow (fundsRequired) {\n if ((typeof fundsRequired !== 'number' && !BigNumber.isBigNumber(fundsRequired))\n || fundsRequired <= 0 || !Number.isFinite(fundsRequired) || Number.isNaN(fundsRequired)) {\n throw new Error(`Job.escrow: fundsRequired must be a number greater than zero. (not ${fundsRequired})`);\n }\n\n const response = await this.bankConnection.send('embiggenFeeStructure', {\n feeStructureAddress: this[INTERNAL_SYMBOL].payloadDetails.feeStructureId,\n additionalEscrow: new BigNumber(fundsRequired),\n fromAddress: this.paymentAccountKeystore.address,\n }, this.paymentAccountKeystore);\n\n this.receipt = response.payload;\n\n return this.receipt;\n }\n\n async _pack () {\n var retval = (__webpack_require__(/*! ./node-modules */ \"./src/dcp-client/job/node-modules.js\").createModuleBundle)(this[INTERNAL_SYMBOL].dependencies);\n return retval;\n }\n\n /** \n * Collect all of the dependencies together, throw them into a BravoJS\n * module which sideloads them as a side effect of declaration, and transmit\n * them to the package manager. Then we return the package descriptor object,\n * which is guaranteed to have only one file in it.\n *\n * @returns {object} with properties name and files[0]\n */\n async _publishLocalModules() {\n const { tempFile, hash, unresolved } = await this._pack();\n\n if (!tempFile) {\n return { unresolved };\n }\n\n const sideloaderFilename = tempFile.filename;\n const pkg = {\n name: `dcp-pkg-v1-localhost-${hash.toString('hex')}`,\n version: '1.0.0',\n files: {\n [sideloaderFilename]: `${sideloaderModuleIdentifier}.js`,\n },\n }\n\n await dcpPublish.publish(pkg);\n tempFile.remove();\n\n return { pkg, unresolved };\n }\n\n /**\n * Deploys the job to the scheduler.\n * @param {number | object} [slicePaymentOffer=compute.marketValue] - Amount\n * in DCC that the user is willing to pay per slice.\n * @param {Keystore} [paymentAccountKeystore=wallet.get] - An instance of the\n * Wallet API Keystore that's used as the payment account when executing the\n * job.\n * @param {object} [initialSliceProfile] - An object describing the cost the\n * user believes the average slice will incur.\n * @access public\n * @emits Job#accepted\n */\n async exec(slicePaymentOffer = (__webpack_require__(/*! ../compute */ \"./src/dcp-client/compute.js\").compute.marketValue), paymentAccountKeystore, initialSliceProfile) {\n if (this[INTERNAL_SYMBOL].connected) {\n throw new Error('Exec called twice on the same job handle.');\n }\n\n if (this.estimationSlices === Infinity)\n this[INTERNAL_SYMBOL].payloadDetails.estimationSlices = null;\n else if (this.estimationSlices <= 0)\n throw new Error('Incorrect value for estimationSlices; it can be an integer or Infinity!');\n else\n this[INTERNAL_SYMBOL].payloadDetails.estimationSlices = this.estimationSlices;\n\n this[INTERNAL_SYMBOL].payloadDetails.greedyEstimation = this.greedyEstimation;\n\n if(this.tuning.kvin.speed || this.tuning.kvin.size)\n {\n tunedKvin = new kvin.KVIN();\n tunedKvin.tune = 'size';\n if(this.tuning.kvin.speed)\n tunedKvin.tune = 'speed';\n // If both size and speed are true, kvin will optimize based on speed\n if(this.tuning.kvin.speed && this.tuning.kvin.size)\n console.log('Slices and arguments are being uploaded with speed optimization.');\n }\n\n /* eagerly connect to depedent services for better performance */\n this.eventSubscriber.eventRouterConnection.keepalive();\n this.deployConnection.keepalive();\n\n this.readyStateChange('exec');\n\n if (typeof slicePaymentOffer !== 'undefined') this.setSlicePaymentOffer(slicePaymentOffer);\n if (typeof initialSliceProfile !== 'undefined') this.initialSliceProfile = initialSliceProfile;\n if (typeof paymentAccountKeystore !== 'undefined') {\n /** XXX @todo deprecate use of ethereum wallet objects */\n if (typeof paymentAccountKeystore === 'object' && paymentAccountKeystore.hasOwnProperty('_privKey')) {\n console.warn('* deprecated API * - job.exec invoked with ethereum wallet object as paymentAccountKeystore') /* /wg oct 2019 */\n paymentAccountKeystore = paymentAccountKeystore._privKey\n }\n /** XXX @todo deprecate use of private keys */\n if (wallet.isPrivateKey(paymentAccountKeystore)) {\n console.warn('* deprecated API * - job.exec invoked with private key as paymentAccountKeystore') /* /wg dec 2019 */\n paymentAccountKeystore = await new wallet.Keystore(paymentAccountKeystore, '');\n }\n\n this.setPaymentAccountKeystore(paymentAccountKeystore)\n }\n\n // Unlock\n if (this[INTERNAL_SYMBOL].paymentAccountKeystore) {\n // Throws if they fail to unlock, we allow this since the keystore was set programmatically. \n await this[INTERNAL_SYMBOL].paymentAccountKeystore.unlock(undefined, parseFloat(dcpConfig.job.maxDeployTime));\n } else {\n // If not set programmatically, we keep trying to get an unlocked keystore ... forever.\n let locked = true;\n let safety = 0; // no while loop shall go unguarded\n let ks;\n do {\n ks = null;\n // custom message for the browser modal to denote the purpose of keystore submission\n let msg = `This application is requesting a keystore file to execute ${this.public.description || this.public.name || 'this job'}. Please upload the corresponding keystore file. If you upload a keystore file which has been encrypted with a passphrase, the application will not be able to use it until it prompts for a passphrase and you enter it.`;\n try {\n ks = await wallet.get({ contextId: this.contextId, jobName: this.public.name, msg});\n } catch (e) {\n if (e.code !== ClientModal.CancelErrorCode) throw e;\n };\n if (ks) {\n try {\n await ks.unlock(undefined, parseFloat(dcpConfig.job.maxDeployTime));\n locked = false;\n } catch (e) {\n const expectedCodes = [wallet.unlockFailErrorCode, ClientModal.CancelErrorCode];\n if (!expectedCodes.includes(e.code)) throw e;\n }\n }\n if (safety++ > 1000) throw new Error('EINFINITY: job.exec tried wallet.get more than 1000 times.')\n } while (locked);\n this.setPaymentAccountKeystore(ks)\n }\n\n // We either have a valid keystore + password or we have rejected by this point.\n if (!this.slicePaymentOffer) {\n throw new Error('A payment profile must be assigned before executing the job');\n } else {\n let pd = this[INTERNAL_SYMBOL].payloadDetails;\n pd.feeStructure = this[INTERNAL_SYMBOL].slicePaymentOffer.toFeeStructure(pd.data.length);\n }\n\n if (!this.address) {\n try {\n this.readyStateChange('init');\n await this[DEPLOY_JOB]();\n this.readyStateChange('listeners');\n const listenersP = this[ADD_LISTENERS]();\n\n // localExec jobs are not entered in any compute group.\n if (!this[INTERNAL_SYMBOL].payloadDetails.localExec && this.computeGroups && this.computeGroups.length)\n {\n this.readyStateChange('compute-groups');\n computeGroups.addRef(); // Just in case we're doing a Promise.all on multiple execs.\n\n // Add this job to its currently-defined compute groups (as well as public group, if included)\n let success;\n try {\n const cgPayload = await computeGroups.addJobToGroups(this.address, this.computeGroups);\n success = true; // To support older version of CG service where addJobToGroups had void/undefined return.\n if (cgPayload) success = cgPayload.success;\n debugging('dcp-client') && console.debug('job/index: addJobToGroups cgPayload:', cgPayload ? cgPayload : 'cgPayload is not defined; probably from legacy CG service.');\n } catch (e) {\n debugging('dcp-client') && console.debug('job/index: addJobToGroups threw exception:', e);\n success = false;\n }\n\n computeGroups.closeServiceConnection().catch((err) => {\n console.error('Warning: could not close compute groups service connection', err)\n });\n\n /* Could not put the job in any compute group, even though the user wanted it to run. Cancel the job. */\n if (!success)\n {\n await this.cancel('compute-groups::Unable to join any compute groups');\n throw new DCPError(`Access Denied::Failed to add job ${this.address} to any of the desired compute groups`, 'DCPL-1100');\n }\n }\n\n await listenersP;\n\n // Upload slice data after CGs, but before `deployed` readystate.\n // This way, work can begin on the first slices while continuing to\n // upload additional slice input data\n let data = this[INTERNAL_SYMBOL].dataValues;\n\n // if job data is by value then upload data to the scheduler in a staggered fashion\n if (Array.isArray(data)) {\n this.readyStateChange('uploading');\n\n await this.addSlices(data).then(() => {\n return this.close();\n });\n }\n\n this.readyStateChange('deployed');\n this.emit('accepted');\n } catch (error) {\n if (ON_BROWSER) {\n await ClientModal.alert(error, { title: 'Failed to deploy job!' });\n }\n\n throw error;\n }\n } else {\n await this[ADD_LISTENERS]();\n this.readyStateChange('reconnected');\n }\n \n this[ON_STATUS](this[INTERNAL_SYMBOL].payloadDetails.status);\n this[INTERNAL_SYMBOL].connected = true;\n\n return new Promise((resolve, reject) => {\n const onComplete = () => resolve(this.results);\n const onCancel = (event) => {\n /**\n * FIXME(DCP-1150): Remove this since normal cancel event is noisy\n * enough to not need stopped event too.\n */\n if (ON_BROWSER && !this[INTERNAL_SYMBOL].listeningForCancel)\n this[INTERNAL_SYMBOL].cancelAlert(event.reason);\n this.emit('cancel', event);\n\n let errorMsg = event.reason;\n if (event.error)\n errorMsg = errorMsg +`\\n Recent error message: ${event.error.message}`\n \n reject(new DCPError(errorMsg, event.code));\n };\n\n this[INTERNAL_SYMBOL].events.once('stopped', async (stopEvent) => {\n if (this.receivedStop)\n {\n // The result submitter will ensure the client receives the stop event through the event router\n // by repeatedly sending stop messages if it detects something might have gone wrong. Sometimes\n // this detection is 'overeager', causing multiple stop events to be sent by the result submitter.\n // If multiple are received, ignore all after the first one.\n return;\n }\n this.receivedStop = true;\n this.emit('stopped', stopEvent.runStatus);\n switch (stopEvent.runStatus) {\n case jobStatus.finished:\n if (this.collateResults) {\n let report = await this.getJobInfo();\n // fetch results for remain slices\n let fetchedSliceNumbers = this[INTERNAL_SYMBOL].resultsAvailable.reduce((a,e,i) => {\n if(e) a.push(i);\n return a;\n }, []);\n\n let allSliceNumbers = Array.from(Array(report.totalSlices)).map((e,i)=>i+1);\n let remainSliceNumbers = allSliceNumbers.filter( function(e) {\n return !fetchedSliceNumbers.includes(e);\n });\n\n if (remainSliceNumbers.length)\n {\n const promises = remainSliceNumbers.map(sliceNumber => this.results.fetch([sliceNumber], true));\n await Promise.all(promises);\n }\n }\n\n this.emit('complete', this.results);\n onComplete();\n break;\n case jobStatus.cancelled:\n onCancel(stopEvent);\n break;\n default:\n /**\n * Asserting that we should never be able to reach here. The only\n * scheduler events that should trigger the Job's 'stopped' event\n * are jobStatus.cancelled, jobStatus.finished, and sliceStatus.paused.\n */\n reject(\n new Error(\n `Unknown event \"${stopEvent.runStatus}\" caused the job to be stopped.`,\n ),\n );\n break;\n }\n });\n\n if (!this[INTERNAL_SYMBOL].payloadDetails.running) {\n const runStatus = this[INTERNAL_SYMBOL].payloadDetails.runStatus;\n this[INTERNAL_SYMBOL].events.emit('stopped', { runStatus });\n }\n }).finally(() => {\n const handleErr = (e) => {\n console.error('Error while closing job connection:');\n console.error(e);\n }\n\n // Create an async IIFE to not block the promise chain\n (async () => {\n //delay to let last few events to be received\n await new Promise((resolve) => setTimeout(resolve, 1000));\n \n // close all of the connections so that we don't cause node processes to hang.\n await this.eventSubscriber.close().catch(handleErr);\n this.deployConnection.off('close', this.openDeployConn);\n await this.deployConnection.close().catch(handleErr);\n\n this.bankConnection.off('close', this.openBankConn)\n await this.bankConnection.close().catch(handleErr);\n \n await computeGroups.closeServiceConnection().catch((err) => {\n console.error('Warning: could not close compute groups service connection', err)\n });\n })();\n });\n }\n\n /**\n * job.addListeners(): Private function used to set up event listeners to the scheduler\n * before deploying the job.\n */\n async [ADD_LISTENERS] () {\n // This is important: We need to flush the task queue before adding listeners\n // because we queue pending listeners by listening to the newListener event (in the constructor).\n // If we don't flush here, then the newListener events may fire after this function has run,\n // and the events won't be properly set up.\n await new Promise(resolve => setTimeout(resolve, 0));\n\n // @todo: Listen for an estimated cost, probably emit an \"estimated\" event when it comes in?\n // also @todo: Do the estimation task(s) on the scheduler and send an \"estimated\" event\n\n // Always listen to the stop event. It will resolve the work function promise, so is always needed.\n this.on('stop', (ev) => {\n this[INTERNAL_SYMBOL].events.emit('stopped', ev)\n });\n\n // Connect listeners that were set up before exec\n const evts = Array.from(this[INTERNAL_SYMBOL].subscribedEvents);\n if (evts.includes('result'))\n this.listeningForResults = true;\n this[INTERNAL_SYMBOL].subscribedEvents.clear();\n await this[LISTEN_TO_EVENTS](evts);\n\n // Connect listeners that are set up after exec\n this.on('newListener', (evt) => {\n if (evt === 'newListener') return;\n this[LISTEN_TO_EVENTS]([evt]);\n });\n \n if (this.collateResults && !this.listeningForResults) {\n // automatically add a listener for results\n this.on('result', () => {});\n }\n\n // Connect work event listeners that were set up before exec\n const workEvts = Array.from(this[INTERNAL_SYMBOL].subscribedWorkerEvents);\n if (debugging('dcp-client')) {\n console.debug('subscribedEvents', evts);\n console.debug('subscribedWorkerEvents', workEvts);\n }\n this[INTERNAL_SYMBOL].subscribedWorkerEvents.clear();\n for (let evt of workEvts) {\n await this[LISTEN_TO_WORK_EVENTS](evt);\n }\n\n // Connect work event listeners that are set up after exec\n this.work.on('newListener', (evt) => {\n if (evt === 'newListener') return;\n this[LISTEN_TO_WORK_EVENTS](evt);\n });\n }\n\n /**\n * Subscribes to either reliable events or optional events\n * @param {string[]} events \n */\n async [LISTEN_TO_EVENTS](events) {\n\n const reliableEvents = [];\n const optionalEvents = [];\n for (let eventName of events) {\n eventName = eventName.toLowerCase();\n if (this[INTERNAL_SYMBOL].subscribedEvents.has(eventName))\n {\n // already subscribed to this event\n continue;\n }\n else\n {\n this[INTERNAL_SYMBOL].subscribedEvents.add(eventName);\n \n if (this.eventTypes[eventName] && this.eventTypes[eventName].reliable)\n {\n reliableEvents.push(eventName)\n }\n else if (this.eventTypes[eventName] && !this.eventTypes[eventName].reliable)\n {\n optionalEvents.push(eventName)\n }\n else\n {\n // console.debug('606: listening for unexpected/unsupported event:', eventName);\n }\n }\n }\n if (debugging('dcp-client')) {\n console.debug('reliableEvents', reliableEvents);\n console.debug('optionalEvents', optionalEvents);\n }\n await this.eventSubscriber.subscribeManyEvents(reliableEvents, optionalEvents, { filter: { job: this.address } })\n }\n\n /**\n * Establishes listeners for worker events when requested by the client\n * @param {string} eventName \n */\n async [LISTEN_TO_WORK_EVENTS](eventName) {\n if (this[INTERNAL_SYMBOL].subscribedWorkerEvents.has(eventName)) {\n // already subscribed to this event\n return;\n }\n else\n {\n this[INTERNAL_SYMBOL].subscribedWorkerEvents.add(eventName);\n this.eventIntercepts.custom = (ev) => this.work.emit(eventName, ev)\n const optionalEvents = ['custom'];\n await this.eventSubscriber.subscribeManyEvents([], optionalEvents, { filter: { job: this.address } });\n }\n }\n\n /**\n * Takes result events as input, stores the result and fires off\n * events on the job handle as required. (result, duplicate-result)\n *\n * @param {object} ev - the event recieved from protocol.listen('/results/0xThisGenAdr')\n */\n async [ON_RESULT] (ev) {\n if (this[INTERNAL_SYMBOL].results === null) {\n // This should never happen - the onResult event should only be established/called\n // in addListeners which should also initialize the internal results array\n throw new Error('Job.onResult was invoked before initializing internal results');\n }\n \n const { result: _result, time } = ev.result;\n debugging('dcp-client') && console.debug('ON_RESULT', _result);\n let result = await fetchURI(_result);\n\n if (this[INTERNAL_SYMBOL].results[ev.sliceNumber]) {\n const changed = JSON.stringify(this[INTERNAL_SYMBOL].results[ev.sliceNumber]) !== JSON.stringify(result);\n this.emit('duplicate-result', { sliceNumber: ev.sliceNumber, changed });\n }\n\n this[INTERNAL_SYMBOL].results[ev.sliceNumber] = result;\n this[INTERNAL_SYMBOL].resultsAvailable[ev.sliceNumber] = true;\n this.emit('result', { sliceNumber: ev.sliceNumber, result });\n this.emit('resultsUpdated');\n }\n\n /**\n * Receives status events from the scheduler, updates the local status object\n * and emits a 'status' event\n *\n * @param {object} ev - the status event received from\n * protocol.listen('/status/0xThisGenAdr')\n * @param {boolean} emitStatus - value indicating whether or not the status\n * event should be emitted\n */\n [ON_STATUS]({ runStatus, total, distributed, computed }, emitStatus = true) {\n Object.assign(this[INTERNAL_SYMBOL].status, {\n runStatus,\n total,\n distributed,\n computed,\n });\n\n if (emitStatus) {\n this.emit('status', this.status);\n }\n }\n\n /**\n * Sends a request to the scheduler to deploy the job.\n */\n async [DEPLOY_JOB] () {\n const { payloadDetails } = this[INTERNAL_SYMBOL];\n \n this[SNAPSHOT](); /* .payloadDetails now up to date */\n \n /* Send sideloader bundle to the package server */\n if (DCP_ENV.platform === 'nodejs' && this[INTERNAL_SYMBOL].dependencies.length) {\n try\n {\n let {pkg, unresolved} = await this._publishLocalModules();\n\n payloadDetails.dependencies = unresolved;\n if (pkg)\n payloadDetails.dependencies.push(pkg.name + '/' + sideloaderModuleIdentifier); \n }\n catch(error)\n {\n throw new DCPError(`Error trying to communicate with package manager server: ${error}`);\n }\n \n }\n \n this.readyStateChange('preauth');\n\n /* eagerly connect to dependent services for better performance */\n computeGroups.keepAlive()\n\n const adhocId = payloadDetails.uuid.slice(payloadDetails.uuid.length - 6, payloadDetails.uuid.length);\n const schedId = await dcpConfig.scheduler.identity;\n // The following check is needed for when using dcp-rtlink and loading the config through source, instead of using the dcp-client bundle\n let schedIdAddress = schedId;\n if(schedId.address){ \n schedIdAddress = schedId.address;\n }\n const myId = await wallet.getId();\n const preauthToken = await bankUtil.preAuthorizePayment(schedIdAddress, payloadDetails.maxDeployPayment, this.paymentAccountKeystore);\n const { dataRange, dataValues, dataPattern, sliceCount } = marshalInputData(payloadDetails.data);\n if(dataValues)\n this[INTERNAL_SYMBOL].dataValues = dataValues;\n\n this.readyStateChange('deploying');\n\n /* Payload format is documented in scheduler-v4/libexec/job-submit/operations/submit.js */\n const submitPayload = {\n owner: myId.address,\n paymentAccount: this.paymentAccountKeystore.address,\n priority: 0, // @nyi\n\n workFunctionURI: payloadDetails.workFunctionURI,\n uuid: payloadDetails.uuid,\n mvMultSlicePayment: Number(payloadDetails.feeStructure.marketValue) || 0, // @todo: improve feeStructure internals to better reflect v4\n absoluteSlicePayment: Number(payloadDetails.feeStructure.maxPerRequest) || 0,\n requirePath: payloadDetails.requirePath,\n modulePath: payloadDetails.modulePath,\n dependencies: payloadDetails.dependencies,\n requirements: payloadDetails.requirements, /* capex */\n localExec: payloadDetails.localExec,\n force100pctCPUDensity: this.force100pctCPUDensity,\n estimationSlices: payloadDetails.estimationSlices,\n greedyEstimation: payloadDetails.greedyEstimation,\n workerConsole: this.workerConsole,\n isCI: this.isCI,\n\n description: payloadDetails.public.description || 'Discreetly making the world smarter',\n name: payloadDetails.public.name || 'Ad-Hoc Job' + adhocId,\n \n preauthToken, // XXXwg/er @todo: validate this after fleshing out the stub(s)\n\n resultStorageType: payloadDetails.resultStorageType, // @todo: implement other result types\n resultStorageDetails: payloadDetails.resultStorageDetails, // Content depends on resultStorageType\n resultStorageParams: payloadDetails.resultStorageParams, // Post params for off-prem storage\n dataRange,\n dataPattern,\n sliceCount\n };\n\n /* Determine thee type of the arguments option and set the submit message payload accordingly. */\n if (Array.isArray(payloadDetails.arguments) && payloadDetails.arguments.length === 1 && payloadDetails.arguments[0] instanceof DcpURL) {\n submitPayload.arguments = payloadDetails.arguments[0].href;\n } else if (payloadDetails.arguments instanceof RemoteDataSet) {\n submitPayload.marshaledArguments = kvinMarshal(payloadDetails.arguments.map(e => new URL(e)))\n } else if (payloadDetails.arguments) {\n try {\n submitPayload.marshaledArguments = kvinMarshal(Array.from(payloadDetails.arguments));\n } catch(e) {\n throw new Error(`Could not convert job arguments to Array (${e.message})`);\n }\n }\n \n if (payloadDetails.localExec && !DCP_ENV.isBrowserPlatform)\n {\n const workFunctionFile = createTempFile('dcp-localExec-workFunction-XXXXXXXXX', 'js');\n const argumentsFile = createTempFile('dcp-localExec-arguments-XXXXXXXXX', 'js');\n \n // For allowed origins of the localexec worker. Only allow the origins (files in this case) in this list.\n this.localExecAllowedFiles = [workFunctionFile.filename, argumentsFile.filename];\n\n // get the workFunctionURI string before writing to file to prevent the need to double-decode the work function in the worker.\n const workFunction = await fetchURI(payloadDetails.workFunctionURI);\n workFunctionFile.writeSync(workFunction);\n \n const workFunctionFileURL = new URL('file://' + workFunctionFile);\n submitPayload.workFunctionURI = workFunctionFileURL.href;\n payloadDetails.workFunctionURI = workFunctionFileURL.href;\n \n if (submitPayload.marshaledArguments)\n {\n argumentsFile.writeSync(JSON.stringify(submitPayload.marshaledArguments));\n const argumentsFileURL = new URL('file://' + argumentsFile.filename);\n submitPayload.marshaledArguments = kvinMarshal([argumentsFileURL]);\n }\n }\n\n // XXXpfr Excellent tracing.\n if (debugging('dcp-client')) {\n dumpObject(submitPayload, 'Submit: Job Index: submitPayload', 256);\n console.debug('Before Deploy', myId);\n }\n\n // Deploy the job!\n const deployed = await this.deployConnection.send('submit', submitPayload, myId);\n\n if (!deployed.success) {\n // close all of the connections so that we don't cause node processes to hang.\n const handleErr = (e) =>\n {\n console.error('Error while closing job connection:');\n console.error(e);\n }\n \n this.eventSubscriber.close().catch(handleErr);\n \n this.deployConnection.off('close', this.openDeployConn);\n this.deployConnection.close().catch(handleErr);\n \n this.bankConnection.off('close', this.openBankConn)\n this.bankConnection.close().catch(handleErr);\n \n computeGroups.closeServiceConnection().catch(handleErr);\n \n // Yes, it is possible for deployed or deployed.payload to be undefined.\n if (deployed.payload) {\n if (deployed.payload.code === 'ENOTFOUND')\n throw new DCPError(`Failed to submit job to scheduler. Account: ${submitPayload.paymentAccount} was not found or does not have sufficient balance (${deployed.payload.info.deployCost} DCCs needed to deploy this job)`, deployed.payload); \n throw new DCPError('Failed to submit job to scheduler', deployed.payload);\n }\n throw new DCPError('Failed to submit job to scheduler (no payload)', deployed ? deployed : '');\n }\n\n debugging('dcp-client') && console.debug('After Deploy', JSON.stringify(deployed));\n\n this.address = payloadDetails.address = deployed.payload.job;\n this[INTERNAL_SYMBOL].deployCost = deployed.payload.deployCost;\n\n if (!payloadDetails.status)\n payloadDetails.status = {\n runStatus: null,\n total: 0,\n computed: 0,\n distributed: 0,\n };\n \n payloadDetails.runStatus = payloadDetails.status.runStatus = deployed.payload.status;\n payloadDetails.status.total = deployed.payload.lastSliceNumber;\n payloadDetails.running = true;\n \n this[INTERNAL_SYMBOL].payloadDetails = {\n ...this[INTERNAL_SYMBOL].payloadDetails,\n ...payloadDetails,\n };\n }\n\n /**\n * This function is identical to exec, except that the job is executed locally\n * in the client.\n * @async\n * @param {number} cores - the number of local cores in which to execute the job.\n * @param {...any} args - The remaining arguments are identical to the arguments of exec\n * @return {Promise<ResultHandle>} - resolves with the results of the job, rejects on an error\n * @access public\n */\n localExec (cores = 1, ...args) {\n this[INTERNAL_SYMBOL].payloadDetails.localExec = true;\n this[INTERNAL_SYMBOL].payloadDetails.estimationSlices = 0;\n this[INTERNAL_SYMBOL].payloadDetails.greedyEstimation = false;\n this[INTERNAL_SYMBOL].payloadDetails.isCI = false;\n\n let worker;\n this.on('accepted', () => {\n // Start a worker for this job\n worker = new Worker({\n localExec: true,\n jobAddresses: [this.address],\n allowedOrigins: this.localExecAllowedFiles,\n paymentAddress: this[INTERNAL_SYMBOL].paymentAccountKeystore.address,\n maxWorkingSandboxes: cores,\n sandboxOptions: {\n ignoreNoProgress: true,\n SandboxConstructor: (DCP_ENV.platform === 'nodejs' &&\n (__webpack_require__(/*! ../worker/evaluators */ \"./src/dcp-client/worker/evaluators/index.js\").nodeEvaluatorFactory)())\n },\n });\n\n worker.start().catch((e) => {\n console.error(\"Failed to start worker for localExec:\");\n console.error(e.message);\n });\n });\n\n return this.exec(...args).finally(() => {\n if (worker) {\n setTimeout(() => {\n // stop the worker\n worker.stop(true);\n }, 3000);\n }\n });\n }\n\n /**\n * The current job status. Will have undefined values when the handle hasn't had exec called on it yet.\n * @access public\n * @readonly\n * @type {object}\n * @property {number} total Total number of slices in the job\n * @property {number} distributed Number of slices that have been distributed\n * @property {number} computed Number of slices that have completed execution (returned a result)\n * @property {string} runStatus Current runStatus of the job\n */\n get status () {\n // Shallow-copy to prevent modification\n return { ...this[INTERNAL_SYMBOL].status };\n }\n\n get requirePath() {\n return this[INTERNAL_SYMBOL].requirePath;\n }\n\n /**\n * This function specifies a module dependency (when the argument is a string)\n * or a list of dependencies (when the argument is an array) of the work\n * function. This function can be invoked multiple times before deployment.\n * @param {string | string[]} modulePaths - A string or array describing one\n * or more dependencies of the job.\n * @access public\n */\n requires(modulePaths) {\n if (\n typeof modulePaths !== 'string' &&\n (!Array.isArray(modulePaths) ||\n modulePaths.some((modulePath) => typeof modulePath !== 'string'))\n ) {\n throw new TypeError(\n 'The argument to dependencies is not a string or an array of strings',\n );\n } else if (modulePaths.length === 0) {\n throw new RangeError(\n 'The argument to dependencies cannot be an empty string or array',\n );\n } else if (\n Array.isArray(modulePaths) &&\n modulePaths.some((modulePath) => modulePath.length === 0)\n ) {\n throw new RangeError(\n 'The argument to dependencies cannot be an array containing an empty string',\n );\n }\n\n if (!Array.isArray(modulePaths)) {\n modulePaths = [modulePaths];\n }\n\n for (const modulePath of modulePaths) {\n if (modulePath[0] !== '.' && modulePath.indexOf('/') !== -1) {\n const modulePrefixRegEx = /^(.*)\\/.*?$/;\n const [, modulePrefix] = modulePath.match(modulePrefixRegEx);\n if (\n modulePrefix &&\n this[INTERNAL_SYMBOL].requirePath.indexOf(modulePrefix) === -1\n ) {\n this[INTERNAL_SYMBOL].requirePath.push(modulePrefix);\n }\n }\n\n this[INTERNAL_SYMBOL].dependencies.push(modulePath);\n }\n }\n\n get slicePaymentOffer () {\n return this[INTERNAL_SYMBOL].slicePaymentOffer;\n }\n\n /**\n * The keystore that will be used to pay for the job. Can be set with {@link Job.setPaymentAccountKeystore} or by providing a keystore to {@link Job.exec}.\n * @readonly\n * @access public\n * @type {module:dcp/wallet.AuthKeystore}\n */\n get paymentAccountKeystore () {\n return this[INTERNAL_SYMBOL].paymentAccountKeystore;\n }\n\n /** Set the account upon which funds will be drawn to pay for the job.\n * @param {module:dcp/wallet.AuthKeystore} keystore A keystore that representa a bank account.\n * @access public\n */\n setPaymentAccountKeystore (keystore) {\n if (this.address) {\n if (!keystore.address.eq(this[INTERNAL_SYMBOL].payloadDetails.owner)) {\n let message = \"Cannot change payment account after job has been deployed\";\n this.emit('EPERM', message);\n throw new Error(`EPERM: ${message}`);\n }\n }\n \n if (!(keystore instanceof wallet.Keystore)) {\n let e = new Error('Not an instance of Keystore: ' + keystore.toString())\n console.log(`Not an instance of Keystore: ${keystore}`)\n throw e\n }\n this[INTERNAL_SYMBOL].paymentAccountKeystore = keystore;\n }\n\n /** Set the slice payment offer. This is equivalent to the first argument to exec.\n * @param {number} slicePaymentOffer - The number of DCC the user is willing to pay to compute one slice of this job\n */\n setSlicePaymentOffer (slicePaymentOffer) {\n this[INTERNAL_SYMBOL].slicePaymentOffer = new SlicePaymentOffer(slicePaymentOffer);\n }\n\n /**\n * @param {URL|DcpURL} locationUrl - A URL object\n * @param {object} postParams - An object with any parameters that a user would like to be passed to a \n * remote result location. This object is capable of carry API keys for S3, \n * DropBox, etc. These parameters are passed as parameters in an \n * application/x-www-form-urlencoded request.\n */\n setResultStorage(locationUrl, postParams) {\n if (locationUrl instanceof URL || locationUrl instanceof DcpURL) {\n this[INTERNAL_SYMBOL].resultStorageDetails = locationUrl;\n } else {\n const e = new Error('Not an instance of a DCP URL: ' + locationUrl);\n console.log('Not an instance of a DCP URL ' + locationUrl);\n throw e;\n }\n\n // resultStorageParams contains any post params required for off-prem storage\n if (typeof postParams !== 'undefined' && typeof postParams === 'object' ) {\n this[INTERNAL_SYMBOL].resultStorageParams = postParams;\n } else {\n const e = new Error('Not an instance of a object: ' + postParams);\n console.log('Not an instance of an object ' + postParams);\n throw e;\n }\n\n // Some type of object here\n this[INTERNAL_SYMBOL].resultStorageType = 'pattern';\n }\n\n /** close an open job to indicate we are done adding data so it is okay to finish\n * the job at the appropriate time\n */\n async close() {\n return this.deployConnection.send('closeJob', {\n job: this.id,\n });\n }\n}\n\n/**\n * @typedef {object} MarshaledInputData\n * @property {any[]} [dataValues]\n * @property {string} [dataPattern]\n * @property {RangeObject} [dataRange]\n * @property {number} [sliceCount]\n */\n\n/**\n * Depending on the shape of the job's data, resolve it into a RangeObject, a\n * Pattern, or a values array, and return it in the appropriate property.\n *\n * @param {any} data Job's input data\n * @return {MarshaledInputData} An object with one of the following properties set:\n * - dataValues: job input is an array of arbitrary values \n * - dataPattern: job input is a URI pattern \n * - dataRange: job input is a RangeObject (and/or friends)\n */\nfunction marshalInputData(data) {\n if (!(data instanceof Object\n || data instanceof SuperRangeObject)) {\n throw new TypeError(`Invalid job data type: ${typeof data}`);\n }\n\n /**\n * @type MarshaledInputData\n */\n const marshalledInputData = {};\n\n // TODO(wesgarland): Make this more robust.\n if (data instanceof SuperRangeObject ||\n (data.hasOwnProperty('ranges') && data.ranges instanceof MultiRangeObject) ||\n (data.hasOwnProperty('start') && data.hasOwnProperty('end'))) {\n marshalledInputData.dataRange = data;\n } else if (Array.isArray(data)) {\n marshalledInputData.dataValues = data;\n } else if (data instanceof URL || data instanceof DcpURL) {\n marshalledInputData.dataPattern = String(data);\n } else if(data instanceof RemoteDataSet) {\n marshalledInputData.dataValues = data.map(e => new URL(e));\n } else if(data instanceof RemoteDataPattern) {\n marshalledInputData.dataPattern = data['pattern'];\n marshalledInputData.sliceCount = data['sliceCount'];\n }\n\n log('marshalledInputData:', marshalledInputData);\n return marshalledInputData;\n}\n/**\n * marshal the value using kvin or instance of the kvin (tunedKvin)\n * tunedKvin is defined if job.tuning.kvin is specified.\n *\n * @param {any} value \n * @return {object} A marshaled object\n * \n */\nfunction kvinMarshal (value) {\n if (tunedKvin)\n return tunedKvin.marshal(value);\n\n return kvin.marshal(value);\n}\n\nObject.assign(exports, {\n Job,\n SlicePaymentOffer,\n ResultHandle,\n});\n\n\n//# sourceURL=webpack://dcp/./src/dcp-client/job/index.js?");
4172
4172
 
4173
4173
  /***/ }),
4174
4174
 
@@ -4189,7 +4189,7 @@ eval("/**\n * @file node-modules.js Node-specific support for sen
4189
4189
  \*********************************************/
4190
4190
  /***/ ((__unused_webpack_module, exports, __webpack_require__) => {
4191
4191
 
4192
- eval("/**\n * @file job/result-handle.js\n * @author Ryan Rossiter, ryan@kingsds.network\n * @date June 2020\n *\n * The ResultHandle acts as a proxy for a job's results, querying\n * internal results when available or the scheduler when the results\n * are not available locally.\n */\n\nconst { rehydrateRange } = __webpack_require__(/*! ../range-object */ \"./src/dcp-client/range-object.js\");\nconst protocolV4 = __webpack_require__(/*! dcp/protocol-v4 */ \"./src/protocol-v4/index.js\");\nconst wallet = __webpack_require__(/*! dcp/dcp-client/wallet */ \"./src/dcp-client/wallet/index.js\");\nconst { fetchURI } = __webpack_require__(/*! dcp/utils */ \"./src/utils/index.js\");\n\nconst RH_INTERNAL_SYMBOL = Symbol('Result Handle Internals');\n// Array methods that don't modify the array, will be applied to the ResultHandle's values object\nconst RH_ARRAY_METHODS = [\n 'slice', 'filter', 'concat', 'find', 'findIndex', 'indexOf', 'map', 'reduce', 'includes', 'toString', 'forEach'\n];\n\n/**\n * This class represents a handle on a job's results. It can be used for accessing the job's results, or for querying the scheduler to fetch results.\n * In addition to the properties and methods of this class, the following standard array methods are also available for accessing the available results: `slice`, `filter`, `concat`, `find`, `findIndex`, `indexOf`, `map`, `reduce`, `includes`, `toString`, and `forEach`.\n * The results can also be accessed by index (`results[i]`), but an error will be thrown if the result for that index is not yet available.\n * @access public\n */\nclass ResultHandle extends Array {\n constructor(job) {\n super();\n\n const { Job, JOB_INTERNAL_SYMBOL } = __webpack_require__(/*! . */ \"./src/dcp-client/job/index.js\");\n if (!(job instanceof Job)) {\n throw new TypeError('ResultHandle must be constructed from a Job object');\n }\n\n /**\n * The length of the available results array.\n * @type {number}\n * @access public\n */\n this.length = 0; // overridden by the proxy, here for JSDoc purposes\n\n this[RH_INTERNAL_SYMBOL] = {\n job,\n keys: job[JOB_INTERNAL_SYMBOL].payloadDetails.data,\n values: job[JOB_INTERNAL_SYMBOL].results,\n valuesAvailable: job[JOB_INTERNAL_SYMBOL].resultsAvailable,\n };\n\n return new Proxy(this, {\n get: (target, name) => {\n if ((typeof name === 'string' || typeof name === 'number') && Number.isInteger(parseFloat(name))) {\n let i = parseFloat(name);\n if (target.isAvailable(i)) {\n return target[RH_INTERNAL_SYMBOL].values[i];\n } else {\n throw new Error(`Result ${i} is not available. It has either not been computed or you need to fetch it.`);\n }\n } else if (name === 'length') {\n return target.getLength();\n } else if (name === 'constructor') {\n return Array.constructor;\n } else if (RH_ARRAY_METHODS.includes(name)) {\n let values = target.values();\n return values[name].bind(values);\n } else if (name === RH_INTERNAL_SYMBOL) {\n return target[RH_INTERNAL_SYMBOL];\n } else {\n // only return methods on this class, don't allow access\n // to array methods other than the whitelisted ones\n let p = target.__proto__[name];\n if (typeof p === 'function') {\n return p.bind(target);\n } else {\n return p;\n }\n }\n }\n });\n }\n\n toJSON() {\n return JSON.stringify(this.values());\n }\n\n isAvailable(index) {\n return this[RH_INTERNAL_SYMBOL].valuesAvailable[index];\n }\n\n reset() {\n // quickly empty the values and valuesAvailable array\n this[RH_INTERNAL_SYMBOL].values.length\n = this[RH_INTERNAL_SYMBOL].valuesAvailable.length = 0;\n\n this[RH_INTERNAL_SYMBOL].values.length\n = this[RH_INTERNAL_SYMBOL].valuesAvailable.length\n = this[RH_INTERNAL_SYMBOL].keys.length;\n }\n\n /**\n * Returns an array of input values. Will only return input values that have a completed result available.\n * @access public\n * @returns {Array<*>}\n */\n keys() {\n // Keys can be a RangeObject, faster to iterate over valuesAvailable\n return this[RH_INTERNAL_SYMBOL].valuesAvailable.reduce((keysList, valueAvailable, sliceNumber) => {\n if (valueAvailable) keysList.push(this[RH_INTERNAL_SYMBOL].keys[sliceNumber - 1]);\n return keysList;\n }, []);\n }\n\n /**\n * Returns an array of results. Will only return results that have been received from the scheduler, if only one result is complete the array will contain one value.\n * @access public\n * @returns {Array<*>}\n */\n values() {\n return this[RH_INTERNAL_SYMBOL].values.filter((v, i) => this.isAvailable(i));\n }\n\n // Aliased as length in proxy, can't use getter because Array.length is not configurable\n getLength() {\n return this[RH_INTERNAL_SYMBOL].valuesAvailable.reduce((n, v) => (n + (v ? 1 : 0)), 0);\n }\n\n [Symbol.iterator]() {\n let index = 0;\n let values = this.values(); // use available values\n\n return {\n next: () => ({\n value: values[index++],\n done: index > values.length\n })\n };\n }\n\n /** \n * Returns an array of [input, output] pairs, in sliceNumber order.\n * Return value is undefined if the input is not an ES5 primitive.\n * @access public\n * @returns {Array<*>}\n */\n entries() {\n return this[RH_INTERNAL_SYMBOL].valuesAvailable.reduce((keyValuePairs, valueAvailable, sliceNumber) => {\n if (valueAvailable) keyValuePairs.push([\n String(this[RH_INTERNAL_SYMBOL].keys[sliceNumber - 1]),\n this[RH_INTERNAL_SYMBOL].values[sliceNumber],\n ]);\n return keyValuePairs;\n }, []);\n }\n\n /** \n * Returns an Object associating input and output values where the inputs are ES5 primitive types.\n * Return value is undefined if the input is not an ES5 primitive.\n * @access public\n * @returns {object}\n */\n fromEntries() {\n return this.entries().reduce((o, [k, v]) => {\n o[k] = v; return o;\n }, {});\n }\n\n /** \n * Return the nth input value/input vector \n * @access public\n * @param {number} n Index in the input set to return the value for.\n * @returns {*} Input set value\n */\n key(n) {\n return this[RH_INTERNAL_SYMBOL].keys[n];\n }\n\n /** \n * Return the value corresponding to the provided key \n * @access public\n * @param {*} key Corresponds to a value in the job's input set.\n * @returns {*} Result corresponding to the input value\n */\n lookupValue(key) {\n // use keys instead of _keys so we only lookup on available results\n let ind = this.keys().indexOf(key);\n return this.values()[ind];\n }\n\n /**\n * Sends request to scheduler to fetch results, the retrieved results will be populated on this object.\n * @param {RangeObject} [rangeObject] - range object to query results\n * @param {string} [emitEvents] - if true, emits a `result` event for new results as they are added to the handle, or emits for all results if set to 'all'\n * @access public\n * @emits Job#resultsUpdated\n */\n async fetch(rangeObject, emitEvents) {\n const range = rangeObject && rehydrateRange(rangeObject);\n\n const job = this[RH_INTERNAL_SYMBOL].job;\n \n // Fetch any existing results\n let ks = await wallet.getId();\n const conn = new protocolV4.Connection(dcpConfig.scheduler.services.resultSubmitter.location, ks);\n\n const { success, payload } = await conn.send('fetchResult', {\n job: job.id,\n owner: job.paymentAccountKeystore.address,\n range,\n }, job.paymentAccountKeystore);\n\n // Unpack results, using fetchURI to decode/fetch result URIs\n await Promise.all(payload.map(async r => {\n if (!r) {\n console.warn(`ResultHandle.fetch: Received result was not defined (${r}), ignoring...`);\n return;\n }\n\n this[RH_INTERNAL_SYMBOL].values[r.slice] = await fetchURI(decodeURIComponent(r.value), dcpConfig.scheduler.location.origin);\n\n if (emitEvents && (emitEvents === 'all' || !this[RH_INTERNAL_SYMBOL].valuesAvailable[r.slice])) {\n job.emit('result', { sliceNumber: r.slice, result: this[RH_INTERNAL_SYMBOL].values[r.slice] });\n }\n this[RH_INTERNAL_SYMBOL].valuesAvailable[r.slice] = true;\n }));\n\n job.emit('resultsUpdated');\n await conn.close(null, true);\n }\n\n async list(rangeObject) {\n const range = rehydrateRange(rangeObject);\n throw new Error(\"ResultHandle.list not implemented\");\n }\n\n async delete(rangeObject) {\n const range = rehydrateRange(rangeObject);\n throw new Error(\"ResultHandle.delete not implemented\");\n }\n\n async stat(rangeObject) {\n const range = rehydrateRange(rangeObject);\n throw new Error(\"ResultHandle.stat not implemented\");\n }\n}\n\nObject.assign(exports, {\n ResultHandle,\n});\n\n\n//# sourceURL=webpack://dcp/./src/dcp-client/job/result-handle.js?");
4192
+ eval("/**\n * @file job/result-handle.js\n * @author Ryan Rossiter, ryan@kingsds.network\n * @date June 2020\n *\n * The ResultHandle acts as a proxy for a job's results, querying\n * internal results when available or the scheduler when the results\n * are not available locally.\n */\n\nconst { rehydrateRange } = __webpack_require__(/*! ../range-object */ \"./src/dcp-client/range-object.js\");\nconst protocolV4 = __webpack_require__(/*! dcp/protocol-v4 */ \"./src/protocol-v4/index.js\");\nconst wallet = __webpack_require__(/*! dcp/dcp-client/wallet */ \"./src/dcp-client/wallet/index.js\");\nconst { fetchURI } = __webpack_require__(/*! dcp/utils */ \"./src/utils/index.js\");\nconst { DCPError } = __webpack_require__(/*! dcp/common/dcp-error */ \"./src/common/dcp-error.js\");\n\nconst RH_INTERNAL_SYMBOL = Symbol('Result Handle Internals');\n// Array methods that don't modify the array, will be applied to the ResultHandle's values object\nconst RH_ARRAY_METHODS = [\n 'slice', 'filter', 'concat', 'find', 'findIndex', 'indexOf', 'map', 'reduce', 'includes', 'toString', 'forEach'\n];\n\n/**\n * This class represents a handle on a job's results. It can be used for accessing the job's results, or for querying the scheduler to fetch results.\n * In addition to the properties and methods of this class, the following standard array methods are also available for accessing the available results: `slice`, `filter`, `concat`, `find`, `findIndex`, `indexOf`, `map`, `reduce`, `includes`, `toString`, and `forEach`.\n * The results can also be accessed by index (`results[i]`), but an error will be thrown if the result for that index is not yet available.\n * @access public\n */\nclass ResultHandle extends Array {\n constructor(job) {\n super();\n\n const { Job, JOB_INTERNAL_SYMBOL } = __webpack_require__(/*! . */ \"./src/dcp-client/job/index.js\");\n if (!(job instanceof Job)) {\n throw new TypeError('ResultHandle must be constructed from a Job object');\n }\n\n /**\n * The length of the available results array.\n * @type {number}\n * @access public\n */\n this.length = 0; // overridden by the proxy, here for JSDoc purposes\n\n this[RH_INTERNAL_SYMBOL] = {\n job,\n keys: job[JOB_INTERNAL_SYMBOL].payloadDetails.data,\n values: job[JOB_INTERNAL_SYMBOL].results,\n valuesAvailable: job[JOB_INTERNAL_SYMBOL].resultsAvailable,\n };\n\n return new Proxy(this, {\n get: (target, name) => {\n if ((typeof name === 'string' || typeof name === 'number') && Number.isInteger(parseFloat(name))) {\n let i = parseFloat(name);\n if (target.isAvailable(i)) {\n return target[RH_INTERNAL_SYMBOL].values[i];\n } else {\n throw new Error(`Result ${i} is not available. It has either not been computed or you need to fetch it.`);\n }\n } else if (name === 'length') {\n return target.getLength();\n } else if (name === 'constructor') {\n return Array.constructor;\n } else if (RH_ARRAY_METHODS.includes(name)) {\n let values = target.values();\n return values[name].bind(values);\n } else if (name === RH_INTERNAL_SYMBOL) {\n return target[RH_INTERNAL_SYMBOL];\n } else {\n // only return methods on this class, don't allow access\n // to array methods other than the whitelisted ones\n let p = target.__proto__[name];\n if (typeof p === 'function') {\n return p.bind(target);\n } else {\n return p;\n }\n }\n }\n });\n }\n\n toJSON() {\n return JSON.stringify(this.values());\n }\n\n isAvailable(index) {\n return this[RH_INTERNAL_SYMBOL].valuesAvailable[index];\n }\n\n reset() {\n // quickly empty the values and valuesAvailable array\n this[RH_INTERNAL_SYMBOL].values.length\n = this[RH_INTERNAL_SYMBOL].valuesAvailable.length = 0;\n\n this[RH_INTERNAL_SYMBOL].values.length\n = this[RH_INTERNAL_SYMBOL].valuesAvailable.length\n = this[RH_INTERNAL_SYMBOL].keys.length;\n }\n\n /**\n * Returns an array of input values. Will only return input values that have a completed result available.\n * @access public\n * @returns {Array<*>}\n */\n keys() {\n // Keys can be a RangeObject, faster to iterate over valuesAvailable\n return this[RH_INTERNAL_SYMBOL].valuesAvailable.reduce((keysList, valueAvailable, sliceNumber) => {\n if (valueAvailable) keysList.push(this[RH_INTERNAL_SYMBOL].keys[sliceNumber - 1]);\n return keysList;\n }, []);\n }\n\n /**\n * Returns an array of results. Will only return results that have been received from the scheduler, if only one result is complete the array will contain one value.\n * @access public\n * @returns {Array<*>}\n */\n values() {\n return this[RH_INTERNAL_SYMBOL].values.filter((v, i) => this.isAvailable(i));\n }\n\n // Aliased as length in proxy, can't use getter because Array.length is not configurable\n getLength() {\n return this[RH_INTERNAL_SYMBOL].valuesAvailable.reduce((n, v) => (n + (v ? 1 : 0)), 0);\n }\n\n [Symbol.iterator]() {\n let index = 0;\n let values = this.values(); // use available values\n\n return {\n next: () => ({\n value: values[index++],\n done: index > values.length\n })\n };\n }\n\n /** \n * Returns an array of [input, output] pairs, in sliceNumber order.\n * Return value is undefined if the input is not an ES5 primitive.\n * @access public\n * @returns {Array<*>}\n */\n entries() {\n return this[RH_INTERNAL_SYMBOL].valuesAvailable.reduce((keyValuePairs, valueAvailable, sliceNumber) => {\n if (valueAvailable) keyValuePairs.push([\n String(this[RH_INTERNAL_SYMBOL].keys[sliceNumber - 1]),\n this[RH_INTERNAL_SYMBOL].values[sliceNumber],\n ]);\n return keyValuePairs;\n }, []);\n }\n\n /** \n * Returns an Object associating input and output values where the inputs are ES5 primitive types.\n * Return value is undefined if the input is not an ES5 primitive.\n * @access public\n * @returns {object}\n */\n fromEntries() {\n return this.entries().reduce((o, [k, v]) => {\n o[k] = v; return o;\n }, {});\n }\n\n /** \n * Return the nth input value/input vector \n * @access public\n * @param {number} n Index in the input set to return the value for.\n * @returns {*} Input set value\n */\n key(n) {\n return this[RH_INTERNAL_SYMBOL].keys[n];\n }\n\n /** \n * Return the value corresponding to the provided key \n * @access public\n * @param {*} key Corresponds to a value in the job's input set.\n * @returns {*} Result corresponding to the input value\n */\n lookupValue(key) {\n // use keys instead of _keys so we only lookup on available results\n let ind = this.keys().indexOf(key);\n if (ind === -1)\n throw new DCPError('Argument passed into the function was not found in input set', 'ERANGE');\n\n return this.values()[ind];\n }\n\n /**\n * Sends request to scheduler to fetch results, the retrieved results will be populated on this object.\n * @param {RangeObject} [rangeObject] - range object to query results\n * @param {string} [emitEvents] - if true, emits a `result` event for new results as they are added to the handle, or emits for all results if set to 'all'\n * @access public\n * @emits Job#resultsUpdated\n */\n async fetch(rangeObject, emitEvents) {\n const range = rangeObject && rehydrateRange(rangeObject);\n\n const job = this[RH_INTERNAL_SYMBOL].job;\n \n // Fetch any existing results\n let ks = await wallet.getId();\n const conn = new protocolV4.Connection(dcpConfig.scheduler.services.resultSubmitter.location, ks);\n\n const { success, payload } = await conn.send('fetchResult', {\n job: job.id,\n owner: job.paymentAccountKeystore.address,\n range,\n }, job.paymentAccountKeystore);\n\n // Unpack results, using fetchURI to decode/fetch result URIs\n await Promise.all(payload.map(async r => {\n if (!r) {\n console.warn(`ResultHandle.fetch: Received result was not defined (${r}), ignoring...`);\n return;\n }\n\n this[RH_INTERNAL_SYMBOL].values[r.slice] = await fetchURI(decodeURIComponent(r.value), dcpConfig.scheduler.location.origin);\n\n if (emitEvents && (emitEvents === 'all' || !this[RH_INTERNAL_SYMBOL].valuesAvailable[r.slice])) {\n job.emit('result', { sliceNumber: r.slice, result: this[RH_INTERNAL_SYMBOL].values[r.slice] });\n }\n this[RH_INTERNAL_SYMBOL].valuesAvailable[r.slice] = true;\n }));\n\n job.emit('resultsUpdated');\n await conn.close(null, true);\n }\n\n async list(rangeObject) {\n const range = rehydrateRange(rangeObject);\n throw new Error(\"ResultHandle.list not implemented\");\n }\n\n async delete(rangeObject) {\n const range = rehydrateRange(rangeObject);\n throw new Error(\"ResultHandle.delete not implemented\");\n }\n\n async stat(rangeObject) {\n const range = rehydrateRange(rangeObject);\n throw new Error(\"ResultHandle.stat not implemented\");\n }\n}\n\nObject.assign(exports, {\n ResultHandle,\n});\n\n\n//# sourceURL=webpack://dcp/./src/dcp-client/job/result-handle.js?");
4193
4193
 
4194
4194
  /***/ }),
4195
4195
 
@@ -4259,7 +4259,7 @@ eval("/* provided dependency */ var process = __webpack_require__(/*! ./node_mod
4259
4259
  \*************************************************/
4260
4260
  /***/ ((module, __unused_webpack_exports, __webpack_require__) => {
4261
4261
 
4262
- eval("/**\n * @file /src/schedmsg/schedmsg-web.js\n * @author Ryan Rossiter, ryan@kingsds.network\n * @date March 2020\n *\n * This is the SchedMsg implementation for commands that are browser-specific\n * or have browser-specific behaviour.\n */\n\nconst { SchedMsg } = __webpack_require__(/*! ./schedmsg */ \"./src/dcp-client/schedmsg/schedmsg.js\");\n\nclass SchedMsgWeb extends SchedMsg {\n constructor(worker) {\n super(worker);\n this.modal = null;\n\n this.registerHandler('announce', this.onAnnouncement.bind(this));\n this.registerHandler('openPopup', this.onOpenPopup.bind(this));\n this.registerHandler('reload', this.onReload.bind(this));\n }\n\n onAnnouncement({ message }) {\n if (this.modal) {\n this.modal.close();\n }\n\n this.modal = window.userInterface.alert('Announcement', '' /* subtitle */, message,\n /* onClose */ () => this.modal = null);\n }\n\n onOpenPopup({ href }) {\n window.open(href);\n }\n\n onReload() {\n const hash = window.location.hash;\n\n let newUrl = window.location.href.replace(/#.*/, '');\n newUrl += (newUrl.indexOf('?') === -1 ? '?' : '&');\n newUrl += 'dcp=a97d1d3338ffb45271c76e88ac5c541b96e81039,' + Date.now() + hash;\n\n window.location.replace(newUrl);\n }\n}\n\nObject.assign(module.exports, {\n SchedMsgWeb\n});\n\n\n//# sourceURL=webpack://dcp/./src/dcp-client/schedmsg/schedmsg-web.js?");
4262
+ eval("/**\n * @file /src/schedmsg/schedmsg-web.js\n * @author Ryan Rossiter, ryan@kingsds.network\n * @date March 2020\n *\n * This is the SchedMsg implementation for commands that are browser-specific\n * or have browser-specific behaviour.\n */\n\nconst { SchedMsg } = __webpack_require__(/*! ./schedmsg */ \"./src/dcp-client/schedmsg/schedmsg.js\");\n\nclass SchedMsgWeb extends SchedMsg {\n constructor(worker) {\n super(worker);\n this.modal = null;\n\n this.registerHandler('announce', this.onAnnouncement.bind(this));\n this.registerHandler('openPopup', this.onOpenPopup.bind(this));\n this.registerHandler('reload', this.onReload.bind(this));\n }\n\n onAnnouncement({ message }) {\n if (this.modal) {\n this.modal.close();\n }\n\n this.modal = window.userInterface.alert('Announcement', '' /* subtitle */, message,\n /* onClose */ () => this.modal = null);\n }\n\n onOpenPopup({ href }) {\n window.open(href);\n }\n\n onReload() {\n const hash = window.location.hash;\n\n let newUrl = window.location.href.replace(/#.*/, '');\n newUrl += (newUrl.indexOf('?') === -1 ? '?' : '&');\n newUrl += 'dcp=fca68c239b7cda20a34513139c096b25d2576370,' + Date.now() + hash;\n\n window.location.replace(newUrl);\n }\n}\n\nObject.assign(module.exports, {\n SchedMsgWeb\n});\n\n\n//# sourceURL=webpack://dcp/./src/dcp-client/schedmsg/schedmsg-web.js?");
4263
4263
 
4264
4264
  /***/ }),
4265
4265
 
@@ -4393,7 +4393,7 @@ eval("exports.BrowserEvaluator = __webpack_require__(/*! ./browser */ \"./src/dc
4393
4393
  \************************************************************/
4394
4394
  /***/ ((__unused_webpack_module, exports, __webpack_require__) => {
4395
4395
 
4396
- eval("/**\n * @file node-localExec.js Node-specific support for creating/running localExec\n * jobs with the Compute API.\n * @author Wes Garland, wes@kingsds.network\n * @date May 2020\n */\n\n/* global dcpConfig */\n// @ts-nocheck\n\nconst utils = __webpack_require__(/*! dcp/utils */ \"./src/utils/index.js\");\nconst { EventEmitter } = __webpack_require__(/*! dcp/common/dcp-events */ \"./src/common/dcp-events/index.js\");\nconst DCP_ENV = __webpack_require__(/*! dcp/common/dcp-env */ \"./src/common/dcp-env.js\");\nconst { DCPError } = __webpack_require__(/*! dcp/common/dcp-error */ \"./src/common/dcp-error.js\");\nconst debugging = (__webpack_require__(/*! dcp/debugging */ \"./src/debugging.js\").scope)('dcp-client');\nconst isDebugging = debugging('localExec');\n\nconst debug = (...args) => {\n if (isDebugging) {\n console.debug('dcp-client:localExec', ...args);\n }\n};\n\n/**\n * Factory function which makes NodeEvaluator constructors based on the current configuration.\n */\nexports.nodeEvaluatorFactory = function evaluators$$nodeEvaluatorFactory(inTesting = false)\n{\n const { requireNative } = __webpack_require__(/*! dcp/dcp-client/webpack-native-bridge */ \"./src/dcp-client/webpack-native-bridge.js\");\n const path = __webpack_require__(/*! path */ \"./node_modules/path-browserify/index.js\");\n var controlCodeFiles; /* Evaluator's control code establishes the sandbox environment and postMessage/onmessage implementation */\n\n if (DCP_ENV.platform !== 'nodejs') {\n throw new Error(`Can't create NodeEvaluator constructor: platform is '${DCP_ENV.platform}'; must be 'nodejs'.`);\n }\n\n /**\n * The `standaloneWorker.js` and `evaluator-node.js` currently resides in\n * `dcp-client` and that this code can be executed from source or from the\n * dcp-client bundle, we need to detect whether we're running in either mode.\n *\n * If we're running from source, `dcp-rtlink` has most likely been initialized\n * so we can resolve `dcp-client` in the `node_modules` folder of the `dcp`\n * repo.\n *\n * Else if we're running from the bundle, `dcp-rtlink` has not been\n * initialized so this code is in the bundle being evaluated in the context of\n * `dcp-client/index.js`. In that case, we need to switch to relative paths to\n * resolve the modules.\n *\n * @todo TODO(bryan-hoang): Improve detection of whether the dcp-rtlink module has\n * been initialized. e.g. Exposed an API through the dcp-rtlink module\n */\n const hasRtlinkBeenInitialized =\n Object.keys(requireNative('module')._cache)\n .map((key) => path.basename(key))\n .indexOf('rtLink.js') !== -1;\n const dcpClientModulePrefix = hasRtlinkBeenInitialized ? 'dcp-client' : '.';\n const { workerFactory } = requireNative(\n `${dcpClientModulePrefix}/lib/standaloneWorker`,\n );\n const { Evaluator } = requireNative(`${dcpClientModulePrefix}/libexec/evaluator-node`);\n \n /**\n * @param socket \"server\" socket for the node evaluator\n */\n function localExecConnectHandler(socket)\n {\n new Evaluator(socket, socket, controlCodeFiles);\n }\n\n /**\n * Constructor for a NodeEvaluator. An instance of this class becomes the SandboxConstructor \n * in the implementation of job::localExec() when running under NodeJS.\n *\n * @todo Eliminate the extra wrapper here. It was previously necessary when we had to\n * capture the factory argument vector in a closure. It should be possible now to\n * implement this as a single function.\n */\n function NodeEvaluator()\n {\n const child_process = requireNative('child_process');\n const process = requireNative('process');\n const env = Object.assign({}, process.env, {\n /**\n * Assume the user is okay with the security implications of executing\n * their work locally. Needed to start the script without a warning\n * and delay. All other environment properties are inherited from the\n * calling process.\n */\n I_WANT_AN_INSECURE_DCP_WORKER: 'badly',\n DCP_SCHEDULER_LOCATION: `${dcpConfig.scheduler.location.href}`,\n });\n var command = false;\n var error;\n var pipe = (__webpack_require__(/*! dcp/utils/tmpfiles */ \"./src/utils/tmpfiles.js\").createTempSocket)(undefined, 'localExec-pipe', localExecConnectHandler);\n var options = {\n readStream: pipe,\n writeStream: pipe\n };\n\n /**\n * First, the set up files for a node evaluator are needed, so the output of\n * the `dcp-evaluator-start` script from the `dcp-worker` package will be\n * used to be the authoritative source of the locations of the files.\n */\n try {\n command = requireNative.resolve('dcp-worker/bin/dcp-evaluator-start');\n } catch (e) {\n /* Sometimes require can't resolve this module through `npm link`, even though it is\n * the \"right way\" to resolve a peer dependency, so we try to resolve from the main\n * module's POV also. This makes use of the 'main' section in dcp-worker/package.json.\n *\n * @todo XXXwg Mar 2022: I am pretty sure this isn't right. The top might run a risk of\n * finding old versions from a global or homedir install..?\n */\n error = e;\n debugging('localExec') && console.error('Error locating dcp-evaluator-start, trying again based on main module location', e);\n const moduleModule = requireNative('module');\n const createRequire = moduleModule.createRequire || moduleModule.createRequireFromPath;\n const mainModuleRequire = createRequire(`${requireNative.main.path}/`);\n\n try\n {\n command = mainModuleRequire.resolve('dcp-worker/bin/dcp-evaluator-start');\n } catch (err)\n {\n debugging('localExec') && console.error('Error locating dcp-evaluator-start based on main module location', err);\n }\n }\n\n if (!command)\n throw new DCPError(`Could not initialize dcp-worker/bin/dcp-evaluator-start - missing peer dependency? (${error.code})`, 'ENOWORKER');\n\n /* Ask dcp-evaluator-start for the sandbox setup files for a node evaluator */\n let sandboxType = 'node';\n if (inTesting)\n sandboxType = 'nodeTesting';\n delete env.DCP_DEBUG; /* don't polute stdio pipeline with debug status messages */\n const child = child_process.spawnSync(process.argv0,\n [command, `--sandbox-type=${sandboxType}`, '-FJ' ], { encoding: 'utf8', env });\n\n debug('Raw localExec evaluator output:', child.stdout);\n if (child.error) {\n console.log(\"ERROR: \",child.error);\n }\n\n if (child.status !== 0) {\n console.error('\\n',child.stderr, child.stdout);\n process.exit(child.status)\n }\n\n controlCodeFiles = JSON.parse(child.stdout);\n debug('Read controlCodeFiles:', controlCodeFiles);\n\n return new (workerFactory(options))();\n }\n\n NodeEvaluator.prototype = new EventEmitter('nodeLocalExec');\n return NodeEvaluator;\n}\n\n\n//# sourceURL=webpack://dcp/./src/dcp-client/worker/evaluators/node-localExec.js?");
4396
+ eval("/**\n * @file node-localExec.js Node-specific support for creating/running localExec\n * jobs with the Compute API.\n * @author Wes Garland, wes@kingsds.network\n * @date May 2020\n */\n\n/* global dcpConfig */\n// @ts-nocheck\n\nconst utils = __webpack_require__(/*! dcp/utils */ \"./src/utils/index.js\");\nconst { EventEmitter } = __webpack_require__(/*! dcp/common/dcp-events */ \"./src/common/dcp-events/index.js\");\nconst DCP_ENV = __webpack_require__(/*! dcp/common/dcp-env */ \"./src/common/dcp-env.js\");\nconst { DCPError } = __webpack_require__(/*! dcp/common/dcp-error */ \"./src/common/dcp-error.js\");\nconst debugging = (__webpack_require__(/*! dcp/debugging */ \"./src/debugging.js\").scope)('dcp-client');\nconst isDebugging = debugging('localExec');\n\nconst debug = (...args) => {\n if (isDebugging) {\n console.debug('dcp-client:localExec', ...args);\n }\n};\n\n/**\n * Factory function which makes NodeEvaluator constructors based on the current configuration.\n */\nexports.nodeEvaluatorFactory = function evaluators$$nodeEvaluatorFactory(inTesting = false)\n{\n const { requireNative } = __webpack_require__(/*! dcp/dcp-client/webpack-native-bridge */ \"./src/dcp-client/webpack-native-bridge.js\");\n const path = __webpack_require__(/*! path */ \"./node_modules/path-browserify/index.js\");\n var controlCodeFiles; /* Evaluator's control code establishes the sandbox environment and postMessage/onmessage implementation */\n\n if (DCP_ENV.platform !== 'nodejs') {\n throw new Error(`Can't create NodeEvaluator constructor: platform is '${DCP_ENV.platform}'; must be 'nodejs'.`);\n }\n\n /**\n * The `standaloneWorker.js` and `evaluator-node.js` currently resides in\n * `dcp-client` and that this code can be executed from source or from the\n * dcp-client bundle, we need to detect whether we're running in either mode.\n *\n * If we're running from source, `dcp-rtlink` has most likely been initialized\n * so we can resolve `dcp-client` in the `node_modules` folder of the `dcp`\n * repo.\n *\n * Else if we're running from the bundle, `dcp-rtlink` has not been\n * initialized so this code is in the bundle being evaluated in the context of\n * `dcp-client/index.js`. In that case, we need to switch to relative paths to\n * resolve the modules.\n *\n * @todo TODO(bryan-hoang): Improve detection of whether the dcp-rtlink module has\n * been initialized. e.g. Exposed an API through the dcp-rtlink module\n */\n const hasRtlinkBeenInitialized =\n Object.keys(requireNative('module')._cache)\n .some((key) => key.includes('dcp-rtlink'))\n const dcpClientModulePrefix = hasRtlinkBeenInitialized ? 'dcp-client' : '.';\n const { workerFactory } = requireNative(\n `${dcpClientModulePrefix}/lib/standaloneWorker`,\n );\n const { Evaluator } = requireNative(`${dcpClientModulePrefix}/libexec/evaluator-node`);\n \n /**\n * @param socket \"server\" socket for the node evaluator\n */\n function localExecConnectHandler(socket)\n {\n new Evaluator(socket, socket, controlCodeFiles);\n }\n\n /**\n * Constructor for a NodeEvaluator. An instance of this class becomes the SandboxConstructor \n * in the implementation of job::localExec() when running under NodeJS.\n *\n * @todo Eliminate the extra wrapper here. It was previously necessary when we had to\n * capture the factory argument vector in a closure. It should be possible now to\n * implement this as a single function.\n */\n function NodeEvaluator()\n {\n const child_process = requireNative('child_process');\n const process = requireNative('process');\n const env = Object.assign({}, process.env, {\n /**\n * Assume the user is okay with the security implications of executing\n * their work locally. Needed to start the script without a warning\n * and delay. All other environment properties are inherited from the\n * calling process.\n */\n I_WANT_AN_INSECURE_DCP_WORKER: 'badly',\n DCP_SCHEDULER_LOCATION: `${dcpConfig.scheduler.location.href}`,\n });\n var command = false;\n var error;\n var pipe = (__webpack_require__(/*! dcp/utils/tmpfiles */ \"./src/utils/tmpfiles.js\").createTempSocket)(undefined, 'localExec-pipe', localExecConnectHandler);\n var options = {\n readStream: pipe,\n writeStream: pipe\n };\n\n /**\n * First, the set up files for a node evaluator are needed, so the output of\n * the `dcp-evaluator-start` script from the `dcp-worker` package will be\n * used to be the authoritative source of the locations of the files.\n */\n try {\n command = requireNative.resolve('dcp-worker/bin/dcp-evaluator-start');\n } catch (e) {\n /* Sometimes require can't resolve this module through `npm link`, even though it is\n * the \"right way\" to resolve a peer dependency, so we try to resolve from the main\n * module's POV also. This makes use of the 'main' section in dcp-worker/package.json.\n *\n * @todo XXXwg Mar 2022: I am pretty sure this isn't right. The top might run a risk of\n * finding old versions from a global or homedir install..?\n */\n error = e;\n debugging('localExec') && console.error('Error locating dcp-evaluator-start, trying again based on main module location', e);\n const moduleModule = requireNative('module');\n const createRequire = moduleModule.createRequire || moduleModule.createRequireFromPath;\n const mainModuleRequire = createRequire(`${requireNative.main.path}/`);\n\n try\n {\n command = mainModuleRequire.resolve('dcp-worker/bin/dcp-evaluator-start');\n } catch (err)\n {\n debugging('localExec') && console.error('Error locating dcp-evaluator-start based on main module location', err);\n }\n }\n\n if (!command)\n throw new DCPError(`Could not initialize dcp-worker/bin/dcp-evaluator-start - missing peer dependency? (${error.code})`, 'ENOWORKER');\n\n /* Ask dcp-evaluator-start for the sandbox setup files for a node evaluator */\n let sandboxType = 'node';\n if (inTesting)\n sandboxType = 'nodeTesting';\n delete env.DCP_DEBUG; /* don't polute stdio pipeline with debug status messages */\n const child = child_process.spawnSync(process.argv0,\n [command, `--sandbox-type=${sandboxType}`, '-FJ' ], { encoding: 'utf8', env });\n\n debug('Raw localExec evaluator output:', child.stdout);\n if (child.error) {\n console.log(\"ERROR: \",child.error);\n }\n\n if (child.status !== 0) {\n console.error('\\n',child.stderr, child.stdout);\n process.exit(child.status)\n }\n\n controlCodeFiles = JSON.parse(child.stdout);\n debug('Read controlCodeFiles:', controlCodeFiles);\n\n return new (workerFactory(options))();\n }\n\n NodeEvaluator.prototype = new EventEmitter('nodeLocalExec');\n return NodeEvaluator;\n}\n\n\n//# sourceURL=webpack://dcp/./src/dcp-client/worker/evaluators/node-localExec.js?");
4397
4397
 
4398
4398
  /***/ }),
4399
4399
 
@@ -4435,7 +4435,7 @@ eval("/**\n * @file worker/slice.js\n *\n * A wrapper for the slice object retur
4435
4435
  \***************************************************/
4436
4436
  /***/ ((__unused_webpack_module, exports, __webpack_require__) => {
4437
4437
 
4438
- eval("/**\n * @file worker/supervisor-cache.js\n *\n * A cache for the supervisor, anything the supervisor\n * may request that is cacheable (the same every time its requested)\n * can be cached in this class.\n *\n * Currently only jobs and modules are cached.\n *\n * @author Matthew Palma, mpalma@kingsds.network\n * @date May 2019\n */\n\n/* global dcpConfig */\n\nconst debugging = (__webpack_require__(/*! dcp/debugging */ \"./src/debugging.js\").scope)('worker');\nconst { justFetch, fetchURI, dumpObject } = __webpack_require__(/*! dcp/utils */ \"./src/utils/index.js\");\nconst { EventEmitter } = __webpack_require__(/*! dcp/common/dcp-events */ \"./src/common/dcp-events/index.js\");\nconst { DCPError } = __webpack_require__(/*! dcp/common/dcp-error */ \"./src/common/dcp-error.js\");\nconst DCP_ENV = __webpack_require__(/*! dcp/common/dcp-env */ \"./src/common/dcp-env.js\");\n\nconst kvin = __webpack_require__(/*! kvin */ \"./node_modules/kvin/kvin.js\");\nconst scopedKvin = new kvin.KVIN({Object: ({}).constructor,\n Array: ([]).constructor, \n Function: (()=>{}).constructor});\n \nclass SupervisorCache extends EventEmitter {\n constructor (supervisor) {\n \n super('SupervisorCache')\n this.supervisor = supervisor;\n this.cache = {\n job: {},\n module: {},\n dependency: {},\n }\n \n this.promises = {\n job: {},\n module: {},\n dependency: {},\n }\n\n this.lastAccess = {}\n for (let key in this.cache) {\n this.lastAccess[key] = {}\n }\n }\n\n /**\n * Returns an object listing what jobs and modules are currently cached.\n * @returns {object} - in the form: { job: [0xgen1, 0xgen2,...], modules: [modGroup1, modGroup2,...] }\n */\n get cacheDescription () {\n let description = {}\n for (let key in this.cache) {\n description[key] = Object.keys(this.cache[key])\n }\n return description\n }\n\n /**\n * Returns an array of all jobId currently cached\n * @returns {Array} all the jobId's in the cache\n */\n get jobs () {\n return Object.keys(this.cache.job);\n }\n\n /**\n * Attempts to look up an item from the cache.\n * If item is found its last access time is updated.\n *\n * @param {string} key - the cache to look in (job or module)\n * @param {string} id - the items identifier (jobAddress or module group name)\n *\n * @returns {any} The value of the stored item or null if nothing is found\n */\n fetch(key, id) {\n if (typeof this.cache[key] === 'undefined') {\n throw new Error(`${key} does not relate to any cache.`);\n }\n if (this.cache[key][id]) {\n this.lastAccess[key][id] = Date.now();\n return this.cache[key][id];\n }\n return null;\n }\n\n /**\n * Stores a fetched value for one of the caches.\n *\n * @param {string} key - the cache to store the item in\n * @param {string} id - the items identifier (job Address or module group name)\n * @param {any} value - the item to store\n *\n * @returns {any} - @value passed in\n */\n store (key, id, value) {\n if (typeof this.cache[key] === 'undefined') {\n throw new Error(`${key} does not relate to any cache.`)\n }\n this.cache[key][id] = value\n this.lastAccess[key][id] = Date.now()\n return value\n }\n\n /**\n * Removes a job or module group from the cache.\n *\n * @param {string} key - the cache to remove the item from (jobs or module groups)\n * @param {string} id - the items identifier (job Address or module group name)\n *\n * @returns {string} - @id passed in\n */\n remove (key, id) {\n if (typeof this.cache[key] === 'undefined') {\n throw new Error(`${key} does not relate to any cache.`)\n }\n delete this.cache[key][id];\n delete this.lastAccess[key][id];\n return id;\n }\n\n /** \n * Fetch a job from the job cache. If the job has components which are \n * which need to be fetched over the network, they are fetched before the\n * returned promise is resolved.\n *\n * The job cache is initially populated during fetchTask.\n */\n async fetchJob(address, allowedOrigins) {\n let job = this.fetch('job', address);\n\n if (!job) {\n let e = new Error(`No job in supervisor cache with address ${address}`);\n e.code = 'ENOJOB';\n throw e;\n }\n\n // XXXpfr Excellent tracing.\n if (debugging('worker')) {\n dumpObject(job, 'SupervisorCache.fetchJob: job', 128);\n }\n\n if (!job.workFunction) {\n job.workFunction = await fetchURI(job.codeLocation, allowedOrigins, dcpConfig.worker.allowOrigins.fetchWorkFunctions);\n if(job.requirements.useStrict)\n job.useStrict = true;\n delete job.codeLocation;\n }\n if (!job.arguments) {\n let promises = [];\n let uris = job.argumentsLocation;\n if (uris)\n for (let i = 0; i < uris.length; i++)\n promises.push(fetchURI(uris[i].value, allowedOrigins, dcpConfig.worker.allowOrigins.fetchArguments));\n\n job.arguments = await Promise.all(promises);\n \n // localExec jobs read arguments from a file, so need to ensure they are properly parsed after being read.\n if (this.supervisor.options.localExec && DCP_ENV.platform === 'nodejs')\n job.arguments[0] = scopedKvin.parse(job.arguments[0]);\n\n delete job.argumentsLocation;\n }\n // if job input data is range object, we send the range object URI to the worker\n if (!job.mro && job.MROLocation) {\n job.mro = await fetchURI(job.MROLocation, allowedOrigins, dcpConfig.worker.allowOrigins.fetchWorkFunctions);\n delete job.MROLocation;\n }\n \n return job;\n }\n\n /**\n * Attempts to fetch a module group from the cache and\n * if it's not found it attempts to fetch then store\n * the module group from the package manager.\n *\n * @param {array} modulesArray - the array of modules requested \n * - (when stringified it's the identifier of the module group)\n *\n * @returns {Promise<object>} - the module group\n * @throws when the module group can not be fetched\n */\n async fetchModule(modulesArray) {\n const cacheKey = JSON.stringify(modulesArray);\n let modules = this.fetch('module', cacheKey);\n if (modules !== null) {\n return modules;\n }\n\n if (this.promises.module[cacheKey]) {\n return this.promises.module[cacheKey];\n }\n\n const {\n success,\n payload: responsePayload,\n } = await this.supervisor.packageManagerConnection.send('fetchModule', {\n modules: modulesArray,\n });\n\n if (!success) {\n /**\n * Preserving the error message by not rewrapping it with DCPError incase\n * we want to let clients know which module couldn't be fetched.\n */\n throw responsePayload;\n }\n\n this.promises.module[cacheKey] = responsePayload;\n modules = await this.promises.module[cacheKey];\n delete this.promises.module[cacheKey];\n return this.store('module', cacheKey, modules);\n }\n\n /**\n * Attempts to fetch a dependency from the cache and\n * if it's not found it attempts to fetch then store\n * the dependency from the package manager.\n *\n * @param {string} dependencyUri - The URI of the dependency\n *\n * @returns {Promise<string>} file contents\n * @throws when the dependency can not be fetched\n */\n async fetchDependency(dependencyUri) {\n let dependency = this.fetch('dependency', dependencyUri);\n if (dependency !== null) {\n return dependency;\n }\n\n if (this.promises.dependency[dependencyUri]) {\n return this.promises.dependency[dependencyUri];\n }\n\n const url = dcpConfig.packageManager.location.resolve(dependencyUri);\n this.promises.dependency[dependencyUri] = justFetch(url, 'string', 'GET', true);\n\n dependency = await this.promises.dependency[dependencyUri];\n\n delete this.promises.dependency[dependencyUri];\n\n return this.store('dependency', dependencyUri, dependency);\n }\n}\n\nexports.SupervisorCache = SupervisorCache;\n\n\n//# sourceURL=webpack://dcp/./src/dcp-client/worker/supervisor-cache.js?");
4438
+ eval("/**\n * @file worker/supervisor-cache.js\n *\n * A cache for the supervisor, anything the supervisor\n * may request that is cacheable (the same every time its requested)\n * can be cached in this class.\n *\n * Currently only jobs and modules are cached.\n *\n * @author Matthew Palma, mpalma@kingsds.network\n * @date May 2019\n */\n\n/* global dcpConfig */\n\nconst debugging = (__webpack_require__(/*! dcp/debugging */ \"./src/debugging.js\").scope)('worker');\nconst { justFetch, fetchURI, dumpObject } = __webpack_require__(/*! dcp/utils */ \"./src/utils/index.js\");\nconst { EventEmitter } = __webpack_require__(/*! dcp/common/dcp-events */ \"./src/common/dcp-events/index.js\");\nconst { DCPError } = __webpack_require__(/*! dcp/common/dcp-error */ \"./src/common/dcp-error.js\");\nconst DCP_ENV = __webpack_require__(/*! dcp/common/dcp-env */ \"./src/common/dcp-env.js\");\n\nconst kvin = __webpack_require__(/*! kvin */ \"./node_modules/kvin/kvin.js\");\nconst scopedKvin = new kvin.KVIN({Object: ({}).constructor,\n Array: ([]).constructor, \n Function: (()=>{}).constructor});\n \nclass SupervisorCache extends EventEmitter {\n constructor (supervisor) {\n \n super('SupervisorCache')\n this.supervisor = supervisor;\n this.cache = {\n job: {},\n module: {},\n dependency: {},\n }\n \n this.promises = {\n job: {},\n module: {},\n dependency: {},\n }\n\n this.lastAccess = {}\n for (let key in this.cache) {\n this.lastAccess[key] = {}\n }\n }\n\n /**\n * Returns an object listing what jobs and modules are currently cached.\n * @returns {object} - in the form: { job: [0xgen1, 0xgen2,...], modules: [modGroup1, modGroup2,...] }\n */\n get cacheDescription () {\n let description = {}\n for (let key in this.cache) {\n description[key] = Object.keys(this.cache[key])\n }\n return description\n }\n\n /**\n * Returns an array of all jobId currently cached\n * @returns {Array} all the jobId's in the cache\n */\n get jobs () {\n return Object.keys(this.cache.job);\n }\n\n /**\n * Attempts to look up an item from the cache.\n * If item is found its last access time is updated.\n *\n * @param {string} key - the cache to look in (job or module)\n * @param {string} id - the items identifier (jobAddress or module group name)\n *\n * @returns {any} The value of the stored item or null if nothing is found\n */\n fetch(key, id) {\n if (typeof this.cache[key] === 'undefined') {\n throw new Error(`${key} does not relate to any cache.`);\n }\n if (this.cache[key][id]) {\n this.lastAccess[key][id] = Date.now();\n return this.cache[key][id];\n }\n return null;\n }\n\n /**\n * Stores a fetched value for one of the caches.\n *\n * @param {string} key - the cache to store the item in\n * @param {string} id - the items identifier (job Address or module group name)\n * @param {any} value - the item to store\n *\n * @returns {any} - @value passed in\n */\n store (key, id, value) {\n if (typeof this.cache[key] === 'undefined') {\n throw new Error(`${key} does not relate to any cache.`)\n }\n this.cache[key][id] = value\n this.lastAccess[key][id] = Date.now()\n return value\n }\n\n /**\n * Removes a job or module group from the cache.\n *\n * @param {string} key - the cache to remove the item from (jobs or module groups)\n * @param {string} id - the items identifier (job Address or module group name)\n *\n * @returns {string} - @id passed in\n */\n remove (key, id) {\n if (typeof this.cache[key] === 'undefined') {\n throw new Error(`${key} does not relate to any cache.`)\n }\n delete this.cache[key][id];\n delete this.lastAccess[key][id];\n return id;\n }\n\n /** \n * Fetch a job from the job cache. If the job has components which are \n * which need to be fetched over the network, they are fetched before the\n * returned promise is resolved.\n *\n * The job cache is initially populated during fetchTask.\n */\n async fetchJob(address, allowedOrigins) {\n let job = this.fetch('job', address);\n\n if (!job) {\n let e = new Error(`No job in supervisor cache with address ${address}`);\n e.code = 'ENOJOB';\n throw e;\n }\n\n // XXXpfr Excellent tracing.\n if (debugging('worker')) {\n dumpObject(job, 'SupervisorCache.fetchJob: job', 128);\n }\n\n if (!job.workFunction) {\n job.workFunction = await fetchURI(job.codeLocation, allowedOrigins, dcpConfig.worker.allowOrigins.fetchWorkFunctions);\n if(job.requirements.useStrict)\n job.useStrict = true;\n delete job.codeLocation;\n }\n if (!job.arguments) {\n let promises = [];\n let uris = job.argumentsLocation;\n if (uris)\n for (let i = 0; i < uris.length; i++)\n promises.push(fetchURI(uris[i].value, allowedOrigins, dcpConfig.worker.allowOrigins.fetchArguments));\n\n job.arguments = await Promise.all(promises);\n \n // node localExec jobs read arguments from a file, so need to ensure they are properly parsed after being read.\n if (this.supervisor.options.localExec && !DCP_ENV.isBrowserPlatform)\n job.arguments = scopedKvin.parse(job.arguments[0]);\n\n delete job.argumentsLocation;\n }\n // if job input data is range object, we send the range object URI to the worker\n if (!job.mro && job.MROLocation) {\n job.mro = await fetchURI(job.MROLocation, allowedOrigins, dcpConfig.worker.allowOrigins.fetchWorkFunctions);\n delete job.MROLocation;\n }\n \n return job;\n }\n\n /**\n * Attempts to fetch a module group from the cache and\n * if it's not found it attempts to fetch then store\n * the module group from the package manager.\n *\n * @param {array} modulesArray - the array of modules requested \n * - (when stringified it's the identifier of the module group)\n *\n * @returns {Promise<object>} - the module group\n * @throws when the module group can not be fetched\n */\n async fetchModule(modulesArray) {\n const cacheKey = JSON.stringify(modulesArray);\n let modules = this.fetch('module', cacheKey);\n if (modules !== null) {\n return modules;\n }\n\n if (this.promises.module[cacheKey]) {\n return this.promises.module[cacheKey];\n }\n\n const {\n success,\n payload: responsePayload,\n } = await this.supervisor.packageManagerConnection.send('fetchModule', {\n modules: modulesArray,\n });\n\n if (!success) {\n /**\n * Preserving the error message by not rewrapping it with DCPError incase\n * we want to let clients know which module couldn't be fetched.\n */\n throw responsePayload;\n }\n\n this.promises.module[cacheKey] = responsePayload;\n modules = await this.promises.module[cacheKey];\n delete this.promises.module[cacheKey];\n return this.store('module', cacheKey, modules);\n }\n\n /**\n * Attempts to fetch a dependency from the cache and\n * if it's not found it attempts to fetch then store\n * the dependency from the package manager.\n *\n * @param {string} dependencyUri - The URI of the dependency\n *\n * @returns {Promise<string>} file contents\n * @throws when the dependency can not be fetched\n */\n async fetchDependency(dependencyUri) {\n let dependency = this.fetch('dependency', dependencyUri);\n if (dependency !== null) {\n return dependency;\n }\n\n if (this.promises.dependency[dependencyUri]) {\n return this.promises.dependency[dependencyUri];\n }\n\n const url = dcpConfig.packageManager.location.resolve(dependencyUri);\n this.promises.dependency[dependencyUri] = justFetch(url, 'string', 'GET', true);\n\n dependency = await this.promises.dependency[dependencyUri];\n\n delete this.promises.dependency[dependencyUri];\n\n return this.store('dependency', dependencyUri, dependency);\n }\n}\n\nexports.SupervisorCache = SupervisorCache;\n\n\n//# sourceURL=webpack://dcp/./src/dcp-client/worker/supervisor-cache.js?");
4439
4439
 
4440
4440
  /***/ }),
4441
4441
 
@@ -4446,7 +4446,7 @@ eval("/**\n * @file worker/supervisor-cache.js\n *\n * A cache for the superviso
4446
4446
  /***/ ((__unused_webpack_module, exports, __webpack_require__) => {
4447
4447
 
4448
4448
  "use strict";
4449
- eval("/* provided dependency */ var process = __webpack_require__(/*! ./node_modules/process/browser.js */ \"./node_modules/process/browser.js\");\n/**\n * @file worker/supervisor.js\n *\n * The component that controls each of the sandboxes\n * and distributes work to them. Also communicates with the\n * scheduler to fetch said work.\n *\n * The supervisor readies sandboxes before/while fetching slices.\n * This means sometimes there are extra instantiated WebWorkers\n * that are idle (in this.readiedSandboxes). Readied sandboxes can\n * be used for any slice. After a readied sandbox is given a slice\n * it becomes assigned to slice's job and can only do work\n * for that job.\n *\n * After a sandbox completes its work, the sandbox becomes cached\n * and can be reused if another slice with a matching job is fetched.\n *\n * @author Matthew Palma, mpalma@kingsds.network\n * Ryan Rossiter, ryan@kingsds.network\n * @date May 2019\n */\n\n/* global dcpConfig */\n// @ts-check\n\n\nconst constants = __webpack_require__(/*! dcp/common/scheduler-constants */ \"./src/common/scheduler-constants.js\");\nconst hash = __webpack_require__(/*! dcp/common/hash */ \"./src/common/hash.js\");\nconst wallet = __webpack_require__(/*! dcp/dcp-client/wallet */ \"./src/dcp-client/wallet/index.js\");\nconst protocolV4 = __webpack_require__(/*! dcp/protocol-v4 */ \"./src/protocol-v4/index.js\");\nconst DCP_ENV = __webpack_require__(/*! dcp/common/dcp-env */ \"./src/common/dcp-env.js\");\n\nconst debugging = (__webpack_require__(/*! dcp/debugging */ \"./src/debugging.js\").scope)('worker');\nconst { assert } = __webpack_require__(/*! dcp/common/dcp-assert */ \"./src/common/dcp-assert.js\");\nconst { EventEmitter } = __webpack_require__(/*! dcp/common/dcp-events */ \"./src/common/dcp-events/index.js\");\nconst { Sandbox, SandboxError } = __webpack_require__(/*! ./sandbox */ \"./src/dcp-client/worker/sandbox.js\");\nconst { Slice, SLICE_STATUS_UNASSIGNED, SLICE_STATUS_FAILED } = __webpack_require__(/*! ./slice */ \"./src/dcp-client/worker/slice.js\");\nconst { SupervisorCache } = __webpack_require__(/*! ./supervisor-cache */ \"./src/dcp-client/worker/supervisor-cache.js\");\nconst { DcpURL } = __webpack_require__(/*! dcp/common/dcp-url */ \"./src/common/dcp-url.js\");\nconst { requireNative } = __webpack_require__(/*! dcp/dcp-client/webpack-native-bridge */ \"./src/dcp-client/webpack-native-bridge.js\");\nconst { localStorage } = __webpack_require__(/*! dcp/common/dcp-localstorage */ \"./src/common/dcp-localstorage.js\");\nconst { booley, encodeDataURI, makeDataURI, leafMerge, a$sleepMs, justFetch, compressJobArray, toJobMap,\n compressSandboxes, compressSlices, truncateAddress, dumpSandboxesIfNotUnique, dumpSlicesIfNotUnique, generateOpaqueId } = __webpack_require__(/*! dcp/utils */ \"./src/utils/index.js\");\nconst { DCPError } = __webpack_require__(/*! dcp/common/dcp-error */ \"./src/common/dcp-error.js\");\nconst { sliceStatus } = __webpack_require__(/*! dcp/common/scheduler-constants */ \"./src/common/scheduler-constants.js\");\nconst { calculateJoinHash } = __webpack_require__(/*! dcp/dcp-client/compute-groups */ \"./src/dcp-client/compute-groups/index.js\");\nconst RingBuffer = __webpack_require__(/*! dcp/utils/ringBuffer */ \"./src/utils/ringBuffer.js\");\nconst supervisorTuning = dcpConfig.future('worker.tuning');\nconst tuning = {\n watchdogInterval: 7, /**< seconds - time between fetches when ENOTASK(? /wg nov 2019) */\n minSandboxStartDelay: 0.1, /**< seconds - minimum time between WebWorker starts */\n maxSandboxStartDelay: 0.7, /**< seconds - maximum delay time between WebWorker starts */\n ...supervisorTuning\n};\n\n/** Make timers 10x slower when running in niim */\nlet timeDilation = 1;\nif (DCP_ENV.platform === 'nodejs') {\n /** Make timers 10x slower when running in niim */\n timeDilation = (requireNative('module')._cache.niim instanceof requireNative('module').Module) ? 10 : 1;\n}\n\ndcpConfig.future('worker.sandbox', { progressReportInterval: (5 * 60 * 1000) });\nconst sandboxTuning = dcpConfig.worker.sandbox;\n\n/**\n * @typedef {*} address\n * @typedef {*} opaqueId\n */\n\n/**\n * @typedef {object} SandboxSlice\n * @property {Sandbox} sandbox\n * @property {Slice} slice\n */\n\n/**\n * @typedef {object} Signature\n * @property {Uint8Array} r\n * @property {Uint8Array} s\n * @property {Uint8Array} v\n */\n\n/**\n * @typedef {object} SignedAuthorizationMessageObject\n * @property {object} auth\n * @property {Signature} signature\n * @property {module:dcp/wallet.Address} owner\n */\n\n/** @typedef {import('.').Worker} Worker */\n/** @typedef {import('.').SupervisorOptions} SupervisorOptions */\n\nclass Supervisor extends EventEmitter {\n /**\n * @constructor\n * @param {Worker} worker\n * @param {SupervisorOptions} options\n */\n constructor (worker, options={}) {\n super('Supervisor');\n\n /** @type {Worker} */\n this.worker = worker;\n\n /** @type {Sandbox[]} */\n this.sandboxes = [];\n\n /** @type {Sandbox[]} */\n this.readiedSandboxes = [];\n\n /** @type {Sandbox[]} */\n this.assignedSandboxes = [];\n\n /** @type {Slice[]} */\n this.slices = [];\n\n /** @type {Slice[]} */\n this.queuedSlices = [];\n\n /** @type {Slice[]} */\n this.lostSlices = [];\n\n /** @type {boolean} */\n this.matching = false;\n\n /** @type {boolean} */\n this.isFetchingNewWork = false;\n\n /** @type {number} */\n this.numberOfCoresReserved = 0;\n\n /** @type {number} */\n this.addressTruncationLength = 20; // Set to -1 for no truncation.\n\n /** @type {Object[]} */\n this.rejectedJobs = [];\n this.rejectedJobReasons = [];\n\n if (!options) {\n console.error('Supervisor Options', options, new Error().stack);\n options = {};\n }\n\n /** @type {object} */\n this.options = {\n jobAddresses: options.jobAddresses || [/* all jobs unless priorityOnly */],\n ...options,\n };\n\n const { paymentAddress, identityKeystore } = options;\n if (paymentAddress) {\n if (paymentAddress instanceof wallet.Keystore) {\n this.paymentAddress = paymentAddress.address;\n } else {\n this.paymentAddress = new wallet.Address(paymentAddress);\n }\n } else {\n this.paymentAddress = null;\n }\n\n this._identityKeystore = identityKeystore;\n\n // In localExec, do not allow work function or arguments to come from the 'any' origins\n this.allowedOrigins = []\n if (this.options.localExec)\n {\n dcpConfig.worker.allowOrigins.fetchData = dcpConfig.worker.allowOrigins.fetchData.concat(dcpConfig.worker.allowOrigins.any)\n dcpConfig.worker.allowOrigins.sendResults = dcpConfig.worker.allowOrigins.sendResults.concat(dcpConfig.worker.allowOrigins.any)\n }\n else\n this.allowedOrigins = dcpConfig.worker.allowOrigins.any;\n\n if(options.allowedOrigins && options.allowedOrigins.length > 0)\n this.allowedOrigins = options.allowedOrigins.concat(this.allowedOrigins);\n\n /**\n * Maximum sandboxes allowed to work at a given time.\n * @type {number}\n */\n this.maxWorkingSandboxes = options.maxWorkingSandboxes || 1;\n\n /** @type {number} */\n this.defaultMaxGPUs = 1;\n // this.GPUsAssigned = 0;\n \n // Object.defineProperty(this, 'GPUsAssigned', {\n // get: () => this.allocatedSandboxes.filter(sb => !!sb.requiresGPU).length,\n // enumerable: true,\n // configurable: false,\n // });\n\n /**\n * TODO: Remove this when the supervisor sends all of the sandbox\n * capabilities to the scheduler when fetching work.\n * @type {object}\n */\n this.capabilities = null;\n\n /** @type {number} */\n this.lastProgressReport = 0;\n\n /** \n * An N-slot ring buffer of job addresses. Stores all jobs that have had no more than 1 slice run in the ring buffer.\n * Required for the implementation of discrete jobs \n * @type {RingBuffer} \n */\n this.ringBufferofJobs = new RingBuffer(200); // N = 200 should be more than enough.\n \n // @hack - dcp-env.isBrowserPlatform is not set unless the platform is _explicitly_ set,\n // using the default detected platform doesn't set it.\n // Fixing that causes an error in the wallet module's startup on web platform, which I\n // probably can't fix in a reasonable time this morning.\n // ~ER2020-02-20\n\n if (!options.maxWorkingSandboxes\n && DCP_ENV.browserPlatformList.includes(DCP_ENV.platform)\n && navigator.hardwareConcurrency > 1) {\n this.maxWorkingSandboxes = navigator.hardwareConcurrency - 1;\n if (typeof navigator.userAgent === 'string') {\n if (/(Android).*(Chrome|Chromium)/.exec(navigator.userAgent)) {\n this.maxWorkingSandboxes = 1;\n console.log('Doing work with Chromimum browsers on Android is currently limited to one sandbox');\n }\n }\n }\n\n /** @type {SupervisorCache} */\n this.cache = new SupervisorCache(this);\n /** @type {object} */\n this._connections = {}; /* active DCPv4 connections */\n // Call the watchdog every 7 seconds.\n this.watchdogInterval = setInterval(() => this.watchdog(), tuning.watchdogInterval * 1000);\n\n const ceci = this;\n\n // Initialize to null so these properties are recognized for the Supervisor class\n this.taskDistributorConnection = null;\n this.eventRouterConnection = null;\n this.resultSubmitterConnection = null;\n this.packageManagerConnection = null;\n this.openTaskDistributorConn = function openTaskDistributorConn()\n {\n let config = dcpConfig.scheduler.services.taskDistributor;\n ceci.taskDistributorConnection = new protocolV4.Connection(config, ceci.identityKeystore, connectionOptions(config.location, 'taskDistributor'));\n ceci.taskDistributorConnection.on('close', ceci.openTaskDistributorConn);\n }\n\n this.openEventRouterConn = function openEventRouterConn()\n {\n let config = dcpConfig.scheduler.services.eventRouter;\n ceci.eventRouterConnection = new protocolV4.Connection(config, ceci.identityKeystore, connectionOptions(config.location, 'eventRouter'));\n ceci.eventRouterConnection.on('close', ceci.openEventRouterConn);\n if (ceci.eventRouterMessageQueue.length)\n ceci.resendRejectedMessages(ceci.eventRouterConnection, ceci.eventRouterMessageQueue);\n }\n this.eventRouterMessageQueue = [];\n \n this.openResultSubmitterConn = function openResultSubmitterConn()\n {\n let config = dcpConfig.scheduler.services.resultSubmitter;\n ceci.resultSubmitterConnection = new protocolV4.Connection(config, ceci.identityKeystore, connectionOptions(config.location, 'resultSubmitter'));\n ceci.resultSubmitterConnection.on('close', ceci.openResultSubmitterConn);\n if (ceci.resultSubmitterMessageQueue.length)\n ceci.resendRejectedMessages(ceci.resultSubmitterConnection, ceci.resultSubmitterMessageQueue);\n }\n this.resultSubmitterMessageQueue = [];\n\n this.openPackageManagerConn = function openPackageManagerConn()\n {\n let config = dcpConfig.packageManager;\n ceci.packageManagerConnection = new protocolV4.Connection(config, ceci.identityKeystore, connectionOptions(config.location, 'packageManager'));\n ceci.packageManagerConnection.on('close', ceci.openPackageManagerConn);\n if (ceci.packageManagerMessageQueue.length)\n ceci.resendRejectedMessages(ceci.packageManagerConnection, ceci.packageManagerMessageQueue);\n }\n this.packageManagerMessageQueue = [];\n }\n\n /**\n * Return worker opaqueId.\n * @type {opaqueId}\n */\n get workerOpaqueId() {\n if (!this._workerOpaqueId)\n this._workerOpaqueId = localStorage.getItem('workerOpaqueId');\n\n if (!this._workerOpaqueId || this._workerOpaqueId.length !== constants.workerIdLength) {\n this._workerOpaqueId = generateOpaqueId();\n localStorage.setItem('workerOpaqueId', this._workerOpaqueId);\n }\n\n return this._workerOpaqueId;\n }\n\n /**\n * This getter is the absolute source-of-truth for what the\n * identity keystore is for this instance of the Supervisor.\n */\n get identityKeystore() {\n assert(this.defaultIdentityKeystore);\n\n return this._identityKeystore || this.defaultIdentityKeystore;\n }\n\n /**\n * Open all connections. Used when supervisor is instantiated or stopped/started\n * to initially open connections.\n */\n instantiateAllConnections() {\n if (!this.taskDistributorConnection)\n this.openTaskDistributorConn();\n \n if (!this.eventRouterConnection)\n this.openEventRouterConn();\n \n if (!this.resultSubmitterConnection)\n this.openResultSubmitterConn();\n\n if (!this.packageManagerConnection)\n this.openPackageManagerConn();\n }\n \n /**\n * Try sending messages that were rejected on an old instance of the given connection.\n */\n async resendRejectedMessages(connection, messageQueue) {\n var message = messageQueue.shift();\n\n do {\n try {\n await connection.send(message.operation, message.data);\n } catch (e) {\n debugging('supervisor') && console.error(`Failed to send message ${message.operation} to scheduler: ${e}. Will try again on a new connection.`);\n messageQueue.unshift(message);\n connection.close();\n break;\n }\n message = messageQueue.shift();\n } while (message)\n }\n\n /** Set the default identity keystore -- needs to happen before anything that talks\n * to the scheduler for work gets called. This is a wart and should be removed by\n * refactoring.\n *\n * The default identity keystore will be used if the Supervisor was not provided\n * with an alternate. This keystore will be located via the Wallet API, and \n * if not found, a randomized default identity will be generated. \n *\n * @param {object} ks An instance of wallet::Keystore -- if undefined, we pick the best default we can.\n * @returns {Promise<void>}\n */\n async setDefaultIdentityKeystore(ks) {\n try {\n if (ks) {\n this.defaultIdentityKeystore = ks;\n return;\n }\n\n if (this.defaultIdentityKeystore)\n return;\n\n try {\n this.defaultIdentityKeystore = await wallet.getId();\n } catch(e) {\n debugging('supervisor') && console.debug('supervisor: generating default identity', this.defaultIdentityKeystore.address);\n this.defaultIdentityKeystore = await new wallet.IdKeystore(null, '');\n }\n } finally {\n debugging('supervisor') && console.debug('supervisor: set default identity =', this.defaultIdentityKeystore.address);\n }\n }\n\n //\n // What follows is a bunch of utility properties and functions for creating filtered views\n // of the slices and sandboxes array.\n //\n /** XXXpfr @todo Write sort w/o using promises so we can get rid of async on all the compress functions. */\n\n /**\n * @deprecated -- Please do not use this.workingSandboxes; use this.allocatedSandboxes instead.\n * Sandboxes that are in WORKING state.\n *\n * Warning: Do not rely on this information being 100% accurate -- it may change in the next instant.\n * @type {Sandbox[]}\n */\n get workingSandboxes() {\n return this.sandboxes.filter(sandbox => sandbox.isWorking);\n }\n\n /**\n * Use instead of this.workingSandboxes.\n *\n * When a sandbox is paired with a slice, execution is pending and sandbox.allocated=true and\n * sandbox.slice=slice and sandbox.jobAddress=slice.jobAddress. This is what 'allocated' means.\n * Immediately upon the exit of sandbox.work, sandbox.allocated=false is set and if an exception\n * wasn't thrown the sandbox is placed in this.assignedSandboxes.\n * Thus from the pov of supervisor, this.allocatedSandboxes is deterministic and this.workingSandboxes is not.\n * Please try to not use this.workingSandboxes. It is deprecated.\n *\n * Warning: Do not rely on this information being 100% accurate -- it may change in the next instant.\n * @type {Sandbox[]}\n */\n get allocatedSandboxes() {\n return this.sandboxes.filter(sandbox => sandbox.allocated);\n }\n\n /**\n * Slices that are allocated.\n * Warning: Do not rely on this information being 100% accurate -- it may change in the next instant.\n * @type {Slice[]}\n */\n get allocatedSlices() {\n return this.slices.filter(slice => slice.allocated);\n }\n\n /**\n * This property is used as the target number of sandboxes to be associated with slices and start working.\n *\n * It is used in this.watchdog as to prevent a call to this.work when unallocatedSpace <= 0.\n * It is also used in this.distributeQueuedSlices where it is passed as an argument to this.matchSlicesWithSandboxes to indicate how many sandboxes\n * to associate with slices and start working.\n *\n * Warning: Do not rely on this information being 100% accurate -- it may change in the next instant.\n * @type {number}\n */\n get unallocatedSpace() {\n return this.maxWorkingSandboxes - this.allocatedSandboxes.length - this.numberOfCoresReserved;\n }\n \n /**\n * Call acquire(numberOfCoresToReserve) to reserve numberOfCoresToReserve unallocated sandboxes as measured by unallocatedSpace.\n * Call release() to undo the previous acquire.\n * This pseudo-mutex technique helps prevent races in scheduling slices in Supervisor.\n * @param {number} numberOfCoresToReserve\n */\n acquire(numberOfCoresToReserve) { \n this.numberOfCoresReserved = numberOfCoresToReserve; \n }\n release() { \n this.numberOfCoresReserved = 0; \n }\n\n /**\n * Remove from this.slices.\n * @param {Slice} slice\n */\n removeSlice(slice) {\n this.removeElement(this.slices, slice);\n if (Supervisor.debugBuild) {\n if (this.queuedSlices.indexOf(slice) !== -1)\n throw new Error(`removeSlice: slice ${slice.identifier} is in queuedSlices; inconsistent state.`);\n if (this.lostSlices.length > 0) {\n console.warn(`removeSlice: slice ${slice.identifier}, found lostSlices ${this.lostSlices.map(s => s.identifier)}`);\n if (this.lostSlices.indexOf(slice) !== -1)\n throw new Error(`removeSlice: slice ${slice.identifier} is in lostSlices; inconsistent state.`);\n }\n }\n }\n\n /**\n * Remove from this.slices.\n * @param {Slice[]} slices\n */\n removeSlices(slices) {\n this.slices = this.slices.filter(slice => slices.indexOf(slice) === -1);\n }\n\n /**\n * Remove from this.queuedSlices.\n * @param {Slice[]} slices\n */\n removeQueuedSlices(slices) {\n this.queuedSlices = this.queuedSlices.filter(slice => slices.indexOf(slice) === -1);\n }\n\n /**\n * Remove from this.sandboxes, this.assignedSandboxes and this.readiedSandboxes.\n * @param {Sandbox} sandbox\n */\n removeSandbox(sandbox) {\n debugging('scheduler') && console.log(`removeSandbox ${sandbox.identifier}`);\n this.removeElement(this.sandboxes, sandbox);\n this.removeElement(this.assignedSandboxes, sandbox);\n\n // XXXpfr: April 13, 2022\n // I'm trying to understand and control when sandboxes get removed.\n // A sandbox in this.readiedSandboxes should never have returnSandbox/removeSandbox called on it except in stopWork.\n // Because of races and random worker crashes, it is hard to get this right, but I want to try.\n // If I don't fix this is the next 30 days or I forget, please delete this exception.\n if (false)\n {}\n\n this.removeElement(this.readiedSandboxes, sandbox);\n }\n\n /**\n * Remove from this.sandboxes and this.assignedSandboxes .\n * @param {Sandbox[]} sandboxes\n */\n async removeSandboxes(sandboxes) {\n debugging('scheduler') && console.log(`removeSandboxes: Remove ${sandboxes.length} sandboxes ${await this.dumpSandboxes(sandboxes)}`);\n this.sandboxes = this.sandboxes.filter(sandbox => sandboxes.indexOf(sandbox) === -1);\n this.assignedSandboxes = this.assignedSandboxes.filter(sandbox => sandboxes.indexOf(sandbox) === -1);\n\n if (Supervisor.debugBuild) {\n const readied = this.readiedSandboxes.filter(sandbox => sandboxes.indexOf(sandbox) !== -1);\n if (readied.length > 0)\n throw new Error(`removeSandboxes: sandboxes ${readied.map(s => s.identifier)} are in readiedSandboxes; inconsistent state.`);\n }\n }\n\n /**\n * Remove element from theArray.\n * @param {Array<*>} theArray\n * @param {object|number} element\n * @param {boolean} [assertExists = true]\n */\n removeElement(theArray, element, assertExists = false) {\n let index = theArray.indexOf(element);\n assert(index !== -1 || !assertExists);\n if (index !== -1) theArray.splice(index, 1);\n }\n\n /**\n * Log sliceArray.\n * @param {Slice[]} sliceArray\n * @param {string} [header]\n * @returns {Promise<string>}\n */\n async dumpSlices(sliceArray, header) {\n if (header) console.log(`\\n${header}`);\n return compressSlices(sliceArray, this.addressTruncationLength);\n }\n\n /**\n * Log sandboxArray.\n * @param {Sandbox[]} sandboxArray\n * @param {string} [header]\n * @returns {Promise<string>}\n */\n async dumpSandboxes(sandboxArray, header) {\n if (header) console.log(`\\n${header}`);\n return compressSandboxes(sandboxArray, this.addressTruncationLength);\n }\n\n /**\n * If the elements of sandboxSliceArray are not unique, log the duplicates and dump the array.\n * @param {SandboxSlice[]} sandboxSliceArray\n * @param {string} header\n */\n dumpSandboxSlicesIfNotUnique(sandboxSliceArray, header) {\n if (!this.isUniqueSandboxSlices(sandboxSliceArray, header))\n console.log(this.dumpSandboxSlices(sandboxSliceArray));\n }\n\n /**\n * Log { sandbox, slice }.\n * @param {Sandbox} sandbox\n * @param {Slice} slice\n * @returns {string}\n */\n dumpSandboxAndSlice(sandbox, slice) {\n return `${sandbox.id}~${slice.sliceNumber}.${this.dumpJobAddress(slice.jobAddress)}`;\n }\n\n /**\n * Log { sandbox, slice } with state/status.\n * @param {Sandbox} sandbox\n * @param {Slice} slice\n * @returns {string}\n */\n dumpStatefulSandboxAndSlice(sandbox, slice) {\n return `${sandbox.id}.${sandbox.state}~${slice.sliceNumber}.${this.dumpJobAddress(slice.jobAddress)}.${slice.status}`;\n }\n\n /**\n * Truncates jobAddress.toString() to this.addressTruncationLength digits.\n * @param {address} jobAddress\n * @returns {string}\n */\n dumpJobAddress(jobAddress) {\n return truncateAddress(jobAddress, this.addressTruncationLength /* digits*/);\n }\n\n /**\n * Dump sandboxSliceArray.\n * @param {SandboxSlice[]} sandboxSliceArray - input array of { sandbox, slice }\n * @param {string} [header] - optional header\n * @param {boolean} [stateFul] - when true, also includes slice.status and sandbox.state.\n * @returns {string}\n */\n dumpSandboxSlices(sandboxSliceArray, header, stateFul=false) {\n if (header) console.log(`\\n${header}`);\n const jobMap = {};\n sandboxSliceArray.forEach(ss => {\n const sss = stateFul ? `${ss.sandbox.id}.${ss.sandbox.state}~${ss.slice.sliceNumber}.${ss.slice.status}` : `${ss.sandbox.id}~${ss.slice.sliceNumber}`;\n if (!jobMap[ss.slice.jobAddress]) jobMap[ss.slice.jobAddress] = sss;\n else jobMap[ss.slice.jobAddress] += `,${sss}`;\n });\n let output = '';\n for (const [jobAddress, sss] of Object.entries(jobMap))\n output += `${this.dumpJobAddress(jobAddress)}:[${sss}]:`;\n return output;\n }\n\n /**\n * Check sandboxSliceArray for duplicates.\n * @param {SandboxSlice[]} sandboxSliceArray\n * @param {string} [header]\n * @param {function} [log]\n * @returns {boolean}\n */\n isUniqueSandboxSlices(sandboxSliceArray, header, log) {\n const result = [], slices = [], sandboxes = [];\n let once = true;\n sandboxSliceArray.forEach(x => {\n const sliceIndex = slices.indexOf(x.slice);\n const sandboxIndex = sandboxes.indexOf(x.sandbox);\n\n if (sandboxIndex >= 0) {\n if (once && header) console.log(`\\n${header}`); once = false;\n log ? log(x.sandbox) : console.log(`\\tWarning: Found duplicate sandbox ${x.sandbox.identifier}.`);\n } else sandboxes.push(x.sandbox);\n\n if (sliceIndex >= 0) {\n if (once && header) console.log(`\\n${header}`); once = false;\n log ? log(x.slice) : console.log(`\\tWarning: Found duplicate slice ${x.slice.identifier}.`);\n } else {\n slices.push(x.slice);\n if (sandboxIndex < 0) result.push(x);\n }\n });\n return sandboxSliceArray.length === result.length;\n }\n\n /**\n * Attempts to create and start a given number of sandboxes.\n * The sandboxes that are created can then be assigned for a\n * specific job at a later time. All created sandboxes\n * get put into the @this.readiedSandboxes array when allocateLocalSandboxes is false.\n *\n * @param {number} numSandboxes - the number of sandboxes to create\n * @param {boolean} [allocateLocalSandboxes=false] - when true, do not place in this.readiedSandboxes\n * @returns {Promise<Sandbox[]>} - resolves with array of created sandboxes, rejects otherwise\n * @throws when given a numSandboxes is not a number or if numSandboxes is Infinity\n */\n async readySandboxes (numSandboxes, allocateLocalSandboxes = false) {\n debugging('supervisor') && console.debug(`readySandboxes: Readying ${numSandboxes} sandboxes, total sandboxes ${this.sandboxes.length}, matching ${this.matching}, fetching ${this.isFetchingNewWork}`);\n \n if (typeof numSandboxes !== 'number' || Number.isNaN(numSandboxes) || numSandboxes === Infinity) {\n throw new Error(`${numSandboxes} is not a number of sandboxes that can be readied.`);\n }\n if (numSandboxes <= 0) {\n return [];\n }\n\n const sandboxStartPromises = [];\n const sandboxes = [];\n const errors = [];\n for (let i = 0; i < numSandboxes; i++) {\n const sandbox = new Sandbox(this.cache, {\n ...this.options.sandboxOptions,\n }, this.allowedOrigins);\n sandbox.addListener('ready', () => this.emit('sandboxReady', sandbox));\n sandbox.addListener('start', () => {\n this.emit('sandboxStart', sandbox);\n\n // When sliceNumber == 0, result-submitter status skips the slice,\n // so don't send it in the first place.\n // The 'start' event is fired when a worker starts up, hence there's no way\n // to determine whether sandbox has a valid slice without checking.\n if (sandbox.slice) {\n const jobAddress = sandbox.jobAddress;\n const sliceNumber = sandbox.slice.sliceNumber;\n // !authorizationMessage <==> sliceNumber === 0.\n const authorizationMessage = sandbox.slice.getAuthorizationMessage();\n\n if (authorizationMessage) {\n let statusPayload = {\n worker: this.workerOpaqueId,\n slices: [{\n job: jobAddress,\n sliceNumber: sliceNumber,\n status: 'begin',\n authorizationMessage,\n }],\n }\n \n this.resultSubmitterConnection.send('status', statusPayload).catch((error) => {\n debugging('supervisor') && console.error(`Error sending 'status' for slice ${sliceNumber} of job ${jobAddress}:\\n ${error}\\nWill try again on a new connection`);\n this.resultSubmitterMessageQueue.push({ operation: 'status', data: statusPayload });\n this.resultSubmitterConnection.close();\n });\n }\n }\n });\n sandbox.addListener('workEmit', ({ eventName, payload }) => {\n // Need to check if the sandbox hasn't been assigned a slice yet.\n if (!sandbox.slice) {\n if (Supervisor.debugBuild) {\n console.error(\n `Sandbox not assigned a slice before sending workEmit message to scheduler. 'workEmit' event originates from \"${eventName}\" event`, \n payload,\n );\n }\n }\n else\n {\n const jobAddress = sandbox.slice.jobAddress;\n const sliceNumber = sandbox.slice.sliceNumber;\n // sliceNumber can be zero if it came from a problem with loading modules.\n assert(jobAddress && (sliceNumber || sliceNumber === 0));\n // Send a work emit message from the sandbox to the event router\n // !authorizationMessage <==> sliceNumber === 0.\n let authorizationMessage;\n try {\n // Sometimes a sliceNumber===0 workEmit comes in before the client bundle is properly loaded.\n // Also happens with minor dcp-client version mismatches.\n authorizationMessage = sandbox.slice.getAuthorizationMessage();\n } catch(e) {\n authorizationMessage = null;\n }\n\n if (!authorizationMessage)\n {\n console.warn(`workEmit: missing authorization message for job ${jobAddress}, slice: ${sliceNumber}`);\n return Promise.resolve();\n }\n \n let workEmitPayload = {\n eventName,\n payload,\n job: jobAddress,\n slice: sliceNumber,\n worker: this.workerOpaqueId,\n authorizationMessage,\n }\n \n const workEmitPromise = this.eventRouterConnection.send('workEmit', workEmitPayload).catch(error => {\n debugging('supervisor') && console.warn(`workEmit: unable to send ${eventName} for slice ${sliceNumber} of job ${jobAddress}: ${error.message}.\\nTrying again on a new connection.`);\n this.eventRouterMessageQueue.push({ operation: 'workEmit', data: workEmitPayload })\n this.eventRouterConnection.close();\n if (Supervisor.debugBuild)\n console.error('workEmit error:', error);\n });\n\n if (Supervisor.debugBuild) {\n workEmitPromise.then(result => {\n if (!result || !result.success)\n console.warn('workEmit: event router did not accept event', result);\n });\n }\n }\n });\n\n // When any sbx completes, \n sandbox.addListener('complete', () => {\n this.watchdog();\n });\n\n sandbox.on('sandboxError', (error) => handleSandboxError(this, sandbox, error));\n \n sandbox.on('rejectedWorkMetrics', (data) =>{\n function updateRejectedMetrics(report) {\n ['total', 'CPU', 'webGL'].forEach((key) => {\n if (report[key]) sandbox.slice.rejectedTimeReport[key] += report[key];\n })\n }\n \n // If the slice already has rejected metrics, add this data to it. If not, assign this data to slices rejected metrics property\n if (sandbox.slice) {\n (sandbox.slice.rejectedTimeReport) ? updateRejectedMetrics(data.timeReport) : sandbox.slice.rejectedTimeReport = data.timeReport;\n }\n })\n \n // If the sandbox terminated and we are not shutting down, then should return all work which is currently\n // not being computed if all sandboxes are dead and the attempt to create a new one fails.\n sandbox.on('terminated',async () => {\n if (this.sandboxes.length > 0) {\n let terminatedSandboxes = this.sandboxes.filter(sbx => sbx.isTerminated);\n if (terminatedSandboxes.length === this.sandboxes.length) {\n debugging('supervisor') && console.debug(`readySandboxes: Create 1 sandbox in the sandbox-terminated-handler, total sandboxes ${this.sandboxes.length}, matching ${this.matching}, fetching ${this.isFetchingNewWork}`);\n await this.readySandboxes(1);\n \n // If we cannot create a new sandbox, that probably means we're on a screensaver worker\n // and the screensaver is down. So return the slices to the scheduler.\n if (this.sandboxes.length !== terminatedSandboxes.length + 1) {\n this.returnSlices(this.queuedSlices).then(() => {\n this.queuedSlices.length = 0;\n });\n }\n }\n }\n })\n\n const delayMs =\n 1000 *\n (tuning.minSandboxStartDelay +\n Math.random() *\n (tuning.maxSandboxStartDelay - tuning.minSandboxStartDelay));\n \n sandboxStartPromises.push(\n sandbox\n .start(delayMs)\n .then(() => {\n if (!allocateLocalSandboxes) this.readiedSandboxes.push(sandbox);\n this.sandboxes.push(sandbox);\n sandboxes.push(sandbox);\n }).catch((err) => {\n errors.push(err);\n this.returnSandbox(sandbox);\n if (err.code === 'ENOWORKER') {\n throw new DCPError(\"Cannot use localExec without dcp-worker installed. Use the command 'npm install dcp-worker' to install the neccessary modules.\", 'ENOWORKER');\n }\n }));\n }\n \n await Promise.all(sandboxStartPromises);\n\n if (errors.length) {\n console.warn(`Failed to ready ${errors.length} of ${numSandboxes} sandboxes.`, errors);\n throw new Error('Failed to ready sandboxes.');\n }\n\n debugging('supervisor') && console.log(`readySandboxes: Readied ${sandboxes.length} sandboxes ${JSON.stringify(sandboxes.map(sandbox => sandbox.id))}`);\n \n return sandboxes;\n }\n\n /**\n * Accepts a sandbox after it has finished working or encounters an error.\n * If the sandbox was terminated or if \"!slice || slice.failed\" then\n * the sandbox will be removed from the sandboxes array and terminated if necessary.\n * Otherwise it will try to distribute a slice to the sandbox immediately.\n *\n * @param {Sandbox} sandbox - the sandbox to return\n * @param {Slice} [slice] - the slice just worked on; !slice => terminate\n * @param {boolean} [verifySandboxIsNotTerminated=true] - if true, check sandbox is not already terminated\n */\n returnSandbox (sandbox, slice, verifySandboxIsNotTerminated=true) {\n if (!slice || slice.failed || sandbox.isTerminated) {\n \n this.removeSandbox(sandbox);\n \n if (!sandbox.isTerminated) {\n debugging('supervisor') && console.log(`Supervisor.returnSandbox: Terminating ${sandbox.identifier}${slice ? `~${slice.identifier}` : ''}, # of sandboxes ${this.sandboxes.length}`);\n sandbox.terminate(false);\n } else {\n debugging('supervisor') && console.log(`Supervisor.returnSandbox: Already terminated ${sandbox.identifier}${slice ? `~${slice.identifier}` : ''}, # of sandboxes ${this.sandboxes.length}`);\n // XXXpfr: April 13, 2022\n // I'm trying to understand and control when sandboxes get terminated.\n // Because of races and random worker crashes, it is impossible to not try to terminate a sandbox more than once.\n // But at some places where returnSandbox is we shouldn't see this behavior, hence this exception.\n // If I don't fix this is the next 30 days or I forget, please delete this exception.\n if (false)\n {}\n }\n }\n }\n\n /**\n * Terminates sandboxes, in order of creation, when the total started sandboxes exceeds the total allowed sandboxes.\n *\n * @returns {Promise<void>}\n */\n pruneSandboxes () {\n let numOver = this.sandboxes.length - (dcpConfig.worker.maxAllowedSandboxes + this.maxWorkingSandboxes);\n if (numOver <= 0) return;\n \n // Don't kill readied sandboxes while creating readied sandboxes.\n for (let index = 0; index < this.readiedSandboxes.length; ) {\n const sandbox = this.readiedSandboxes[index];\n // If the sandbox is allocated, advance to the next one in the list.\n if (sandbox.allocated) {\n index++;\n continue;\n }\n // Otherwise, remove this sandbox but look at the same array index in the next loop.\n debugging('supervisor') && console.log(`pruneSandboxes: Terminating readied sandbox ${sandbox.identifier}`);\n this.readiedSandboxes.splice(index, 1);\n this.returnSandbox(sandbox);\n\n if (--numOver <= 0) break;\n }\n\n if (numOver <= 0) return;\n for (let index = 0; index < this.assignedSandboxes.length; ) {\n const sandbox = this.assignedSandboxes[index];\n // If the sandbox is allocated, advance to the next one in the list.\n if (sandbox.allocated) {\n index++;\n continue;\n }\n // Otherwise, remove this sandbox but look at the same array index in the next loop.\n debugging('supervisor') && console.log(`pruneSandboxes: Terminating assigned sandbox ${sandbox.identifier}`);\n this.assignedSandboxes.splice(index, 1);\n this.returnSandbox(sandbox);\n\n if (--numOver <= 0) break;\n }\n }\n \n /**\n * Basic watch dog to check if there are idle sandboxes and\n * attempts to nudge the supervisor to feed them work.\n *\n * Run in an interval created in @constructor .\n * @returns {Promise<void>}\n */\n async watchdog () {\n if (!this.watchdogState)\n this.watchdogState = {};\n\n // Every 5 minutes, report progress of all working slices to the scheduler\n if (Date.now() > ((this.lastProgressReport || 0) + sandboxTuning.progressReportInterval)) {\n // console.log('454: Assembling progress update...');\n this.lastProgressReport = Date.now();\n\n //\n // Note: this.slices is the disjoint union of:\n // this.allocatedSlices, \n // this.queuedSlices, \n // this.slices.filter(slice => !slice.isUnassigned) .\n // When a slice is not in these 3 arrays, the slice is lost.\n //\n \n const currentLostSlices = this.slices.filter(slice => slice.isUnassigned \n && this.queuedSlices.indexOf(slice) === -1\n && this.allocatedSlices.indexOf(slice) === -1);\n\n if (currentLostSlices.length > 0) {\n this.lostSlices.push(...currentLostSlices);\n // Try to recover.\n // Needs more work and testing.\n // Test when we can come up with a decent lost slice repro case.\n // --> this.queuedSlices.push(...currentLostSlices);\n }\n\n if (this.lostSlices.length > 0) {\n if (true) { // Keep this on for awhile, until we know lost slices aren't happening.\n console.warn('Supervisor.watchdog: Found lost slices!');\n for (const slice of this.lostSlices)\n console.warn('\\t', slice.identifier);\n }\n this.lostSlices = this.lostSlices.filter(slice => slice.isUnassigned);\n }\n\n const slices = [];\n this.queuedSlices.forEach(slice => {\n assert(slice && slice.sliceNumber > 0);\n addToSlicePayload(slices, slice, sliceStatus.scheduled);\n });\n\n this.allocatedSlices.forEach(slice => {\n assert(slice && slice.sliceNumber > 0);\n addToSlicePayload(slices, slice, 'progress'); // Beacon.\n });\n\n if (slices.length) {\n // console.log('471: sending progress update...');\n const progressReportPayload = {\n worker: this.workerOpaqueId,\n slices,\n };\n\n this.resultSubmitterConnection.send('status', progressReportPayload)\n .catch(error => {\n debugging('supervisor') && console.error('479: Failed to send status update:', error/*.message*/);\n this.resultSubmitterMessageQueue.push({ operation: 'status', data: progressReportPayload })\n this.resultSubmitterConnection.close();\n });\n }\n }\n\n if (this.worker.working) {\n if (this.unallocatedSpace > 0) {\n await this.work().catch(err => {\n if (!this.watchdogState[err.code || '0'])\n this.watchdogState[err.code || '0'] = 0;\n if (Date.now() - this.watchdogState[err.code || '0'] > ((dcpConfig.worker.watchdogLogInterval * timeDilation || 120) * 1000))\n console.error('301: Failed to start work:', err);\n this.watchdogState[err.code || '0'] = Date.now();\n });\n }\n\n this.pruneSandboxes();\n }\n }\n\n /**\n * Gets the logical and physical number of cores and also\n * the total number of sandboxes the worker is allowed to run\n *\n */\n getStatisticsCPU() {\n if (DCP_ENV.isBrowserPlatform) {\n return {\n worker: this.workerOpaqueId,\n lCores: window.navigator.hardwareConcurrency,\n pCores: dcpConfig.worker.pCores || window.navigator.hardwareConcurrency,\n sandbox: this.maxWorkingSandboxes\n }\n }\n\n return {\n worker: this.workerOpaqueId,\n lCores: requireNative('os').cpus().length,\n pCores: requireNative('physical-cpu-count'),\n sandbox: this.maxWorkingSandboxes\n }\n }\n\n /**\n * Returns the number of unallocated sandbox slots to send to fetchTask.\n *\n * @returns {number}\n */\n numberOfAvailableSandboxSlots() {\n let numCores;\n if (this.options.priorityOnly && this.options.jobAddresses.length === 0) {\n numCores = 0;\n } else if (this.queuedSlices.length > 1) {\n // We have slices queued, no need to fetch\n numCores = 0;\n } else {\n // The queue is almost empty (there may be 0 or 1 element), fetch a full task.\n // The task is full, in the sense that it will contain slices whose\n // aggregate execution time is this.maxWorkingSandboxes * 5-minutes.\n // However, there can only be this.unallocatedSpace # of long slices.\n // Thus we need to know whether the last slice in this.queuedSlices is long or not.\n // (A long slice has estimated execution time >= 5-minutes.)\n const longSliceCount = (this.queuedSlices.length > 0 && this.queuedSlices[0].isLongSlice) ? 1 : 0;\n numCores = this.unallocatedSpace - longSliceCount;\n }\n return numCores;\n }\n\n /**\n * Call to start doing work on the network.\n * This is the one place where requests to fetch new slices are made.\n * After the initial slices are fetched it calls this.distributeQueuedSlices.\n *\n * @returns {Promise<void>}, unallocatedSpace ${this.unallocatedSpace}\n */\n async work()\n {\n // When inside matchSlicesWithSandboxes, don't reenter Supervisor.work to fetch new work or create new sandboxes.\n if (this.matching) {\n // Interesting and noisy.\n // debugging('supervisor') && console.log(`Supervisor.work: Do not interleave work, fetch or matching slices with sandboxes: queuedSlices ${this.queuedSlices.length}, unallocatedSpace ${this.unallocatedSpace}, matching ${this.matching}, fetching ${this.isFetchingNewWork}`);\n return Promise.resolve();\n }\n\n await this.setDefaultIdentityKeystore();\n\n // Instantiate connections that don't exist.\n this.instantiateAllConnections();\n\n const numCores = this.numberOfAvailableSandboxSlots();\n\n debugging() && console.log(`Supervisor.work: Try to get ${numCores} slices in working sandboxes, unallocatedSpace ${this.unallocatedSpace}, queued slices ${this.queuedSlices.length}, # of sandboxes ${this.sandboxes.length}, matching ${this.matching}, fetching: ${this.isFetchingNewWork}`);\n \n // Fetch a new task if we have no more slices queued, then start workers\n try {\n if (numCores > 0 && !this.isFetchingNewWork) {\n this.isFetchingNewWork = true;\n\n /**\n * This will only ready sandboxes up to a total count of\n * maxWorkingSandboxes (in any state). It is not possible to know the\n * actual number of sandboxes required until we have the slices because we\n * may have sandboxes assigned for the slice's job already.\n *\n * If the evaluator cannot start (ie. if the evalServer is not running),\n * then the while loop will keep retrying until the evalServer comes online\n */\n if (this.maxWorkingSandboxes > this.sandboxes.length) {\n // Note: The old technique had \n // while (this.maxWorkingSandboxes > this.sandboxes.length) {....\n // and sometimes we'd get far too many sandboxes, because it would keep looping while waiting for\n // this.readySandboxes(this.maxWorkingSandboxes - this.sandboxes.length);\n // to construct the rest of the sandboxes. The fix is to only loop when the 1st \n // await this.readySandboxes(1) \n // is failing.\n let needFirstSandbox = true;\n while (needFirstSandbox) {\n debugging('supervisor') && console.log(`Supervisor.work: ready 1 sandbox, # of sandboxes ${this.sandboxes.length}, matching ${this.matching}, fetching ${this.isFetchingNewWork}`);\n await this.readySandboxes(1)\n .then(() => {\n debugging('supervisor') && console.log(`Supervisor.work: ready ${this.maxWorkingSandboxes - this.sandboxes.length} sandbox(es), # of sandboxes ${this.sandboxes.length}, matching ${this.matching}, fetching ${this.isFetchingNewWork}`);\n this.readySandboxes(this.maxWorkingSandboxes - this.sandboxes.length);\n needFirstSandbox = false;\n }).catch(error => {\n console.warn('906: failed to ready sandboxes; will retry', error.code, error.message);\n });\n }\n }\n\n /**\n * Temporary change: Assign the capabilities of one of readied sandboxes\n * before fetching slices from the scheduler.\n *\n * TODO: Remove this once fetchTask uses the capabilities of every\n * sandbox to fetch slices.\n */\n if (!this.capabilities) {\n this.capabilities = this.sandboxes[0].capabilities;\n this.emit('capabilitiesCalculated', this.capabilities);\n }\n\n if (DCP_ENV.isBrowserPlatform && this.capabilities.browser)\n this.capabilities.browser.chrome = DCP_ENV.isBrowserChrome;\n\n const fetchTimeout = setTimeout(() => {\n console.warn(`679: Fetch exceeded timeout, will reconnect at next watchdog interval`);\n \n this.taskDistributorConnection.close('Fetch timed out', Math.random() > 0.5).catch(error => {\n console.error(`931: Failed to close task-distributor connection`, error);\n });\n this.resultSubmitterConnection.close('Fetch timed out', Math.random() > 0.5).catch(error => {\n console.error(`920: Failed to close result-submitter connection`, error);\n });\n this.isFetchingNewWork = false;\n this.instantiateAllConnections();\n }, 3 * 60 * 1000); // max out at 3 minutes to fetch\n\n // ensure result submitter connection before fetching tasks\n try\n {\n await this.resultSubmitterConnection.keepalive();\n }\n catch (e)\n {\n console.error('Failed to connect to result submitter, refusing to fetch slices. Will try again at next fetch cycle.')\n debugging('supervisor') && console.log(`Error: ${e}`);\n this.isFetchingNewWork = false; // <-- done in the `finally` block, below\n clearTimeout(fetchTimeout);\n this.taskDistributorConnection.close('Failed to connect to result-submitter', true).catch(error => {\n console.error(`939: Failed to close task-distributor connection`, error);\n });\n this.resultSubmitterConnection.close('Failed to connect to result-submitter', true).catch(error => {\n console.error(`942: Failed to close result-submitter connection`, error);\n });\n return Promise.resolve();\n }\n await this.fetchTask(numCores).finally(() => {\n clearTimeout(fetchTimeout);\n this.isFetchingNewWork = false;\n });\n }\n\n this.distributeQueuedSlices().then(() => debugging('supervisor') && 'supervisor: finished distributeQueuedSlices()').catch((e) => {\n // We should never get here, because distributeQueuedSlices was changed\n // to try to catch everything and return slices and sandboxes.\n // If we do catch here it may mean a slice was lost. \n console.error('Supervisor.work catch handler for distributeQueuedSlices.', e);\n });\n // No catch(), because it will bubble outward to the caller\n } finally {\n }\n }\n\n /**\n * Generate the workerComputeGroups property of the requestTask message. \n * \n * Concatenate the compute groups object from dcpConfig with the list of compute groups\n * from the supervisor, and remove the public group if accidentally present. Finally,\n * we transform joinSecrets/joinHash into joinHashHash for secure transmission.\n *\n * @note computeGroup objects with joinSecrets are mutated to record their hashes. This\n * affects the supervisor options and dcpConfig. Re-adding a joinSecret property\n * to one of these will cause the hash to be recomputed.\n */\n generateWorkerComputeGroups()\n {\n var computeGroups = Object.values(dcpConfig.worker.computeGroups || {});\n if (this.options.computeGroups)\n computeGroups = computeGroups.concat(this.options.computeGroups);\n computeGroups = computeGroups.filter(group => group.id !== constants.computeGroups.public.id);\n const hashedComputeGroups = [];\n for (const group of computeGroups)\n {\n const groupCopy = Object.assign({}, group);\n if ((group.joinSecret || group.joinHash) && (!group.joinHashHash || this.lastDcpsid !== this.taskDistributorConnection.dcpsid))\n {\n let joinHash;\n if (group.joinHash) {\n joinHash = group.joinHash.replace(/\\s+/g, ''); // strip whitespace\n } else {\n joinHash = calculateJoinHash(groupCopy);\n } \n\n groupCopy.joinHashHash = hash.calculate(hash.eh1, joinHash, this.taskDistributorConnection.dcpsid);\n delete groupCopy.joinSecret;\n delete groupCopy.joinHash;\n debugging('computeGroups') && console.debug(`Calculated joinHash=${joinHash} for`, groupCopy);\n }\n hashedComputeGroups.push(groupCopy);\n }\n this.lastDcpsid = this.taskDistributorConnection.dcpsid;\n debugging('computeGroups') && console.debug('Requesting ', computeGroups.length, 'non-public groups for session', this.lastDcpsid);\n return hashedComputeGroups;\n }\n\n /**\n * Remove all unreferenced jobs in `this.cache`.\n *\n * @param {any[]} newJobs - Jobs that should not be removed from\n * `this.cache`.\n */\n cleanJobCache(newJobs = []) {\n /* Delete all jobs in the supervisorCache that are not represented in this newJobs,\n * or in this.queuedSlices, or there is no sandbox assigned to these jobs.\n * Note: There can easily be 200+ places to check; using a lookup structure to maintain O(n).\n */\n if (this.cache.jobs.length > 0) {\n const jobAddressMap = {};\n Object.keys(newJobs).forEach(jobAddress => { jobAddressMap[jobAddress] = 1; });\n this.slices.forEach(slice => { if (!jobAddressMap[slice.jobAddress]) jobAddressMap[slice.jobAddress] = 1; });\n this.cache.jobs.forEach(jobAddress => {\n if (!jobAddressMap[jobAddress]) {\n this.cache.remove('job', jobAddress);\n // Remove and return the corresponding sandboxes from this.sandboxes.\n const deadSandboxes = this.sandboxes.filter(sb => sb.jobAddress === jobAddress);\n if (deadSandboxes.length > 0) {\n deadSandboxes.forEach(sandbox => { this.returnSandbox(sandbox); });\n debugging('supervisor') && console.log(`Supervisor.fetchTask: Deleting job ${jobAddress} from cache and assigned sandboxes ${deadSandboxes.map(s => s.id)}, # of sandboxes ${this.sandboxes.length}.`);\n }\n }\n });\n }\n }\n\n /**\n * Fetches a task, which contains job information and slices for sandboxes and\n * manages events related to fetching tasks so the UI can more clearly display\n * to user what is actually happening.\n * @param {number} numCores\n * @returns {Promise<void>} The requestTask request, resolve on success, rejects otherwise.\n * @emits Supervisor#fetchingTask\n * @emits Supervisor#fetchedTask\n */\n async fetchTask(numCores) {\n\n // Don't reenter\n if (this.matching || numCores <= 0) {\n // Interesting and noisy.\n debugging('supervisor') && console.log(`Supervisor.fetchTask: Do not nest work, fetch or matching slices with sandboxes: queuedSlices ${this.queuedSlices.length}, unallocatedSpace ${this.unallocatedSpace}, matching ${this.matching}, fetching ${this.isFetchingNewWork}, numCores ${numCores}`);\n return Promise.resolve();\n }\n\n //\n // Oversubscription mitigation.\n // Update when there are less available sandbox slots than numCores.\n const checkNumCores = this.numberOfAvailableSandboxSlots();\n if (numCores > checkNumCores) numCores = checkNumCores;\n if (numCores <= 0) return Promise.resolve();\n\n this.emit('fetchingTask');\n debugging('supervisor') && console.debug('supervisor: fetching task');\n const requestPayload = {\n numCores,\n coreStats: this.getStatisticsCPU(),\n numGPUs: this.defaultMaxGPUs,\n capabilities: this.capabilities,\n paymentAddress: this.paymentAddress,\n jobAddresses: this.options.jobAddresses, // when set, only fetches slices for these jobs\n localExec: this.options.localExec,\n workerComputeGroups: this.generateWorkerComputeGroups(),\n minimumWage: dcpConfig.worker.minimumWage || this.options.minimumWage,\n readyJobs: [ /* list of jobs addresses XXXwg */ ],\n previouslyWorkedJobs: this.ringBufferofJobs.buf, //Only discrete jobs\n rejectedJobs: this.rejectedJobs,\n };\n // workers should be part of the public compute group by default\n if (!booley(dcpConfig.worker.leavePublicGroup) && !booley(this.options.leavePublicGroup))\n requestPayload.workerComputeGroups.push(constants.computeGroups.public);\n debugging('computeGroups') && console.log(`Fetching work for ${requestPayload.workerComputeGroups.length} ComputeGroups: `, requestPayload.workerComputeGroups);\n\n debugging('supervisor') && console.log(`fetchTask wants ${numCores} slice(s), unallocatedSpace ${this.unallocatedSpace}, queuedSlices ${this.queuedSlices.length}`);\n try {\n debugging('requestTask') && console.debug('fetchTask: requestPayload', requestPayload);\n\n let result = await this.taskDistributorConnection.send('requestTask', requestPayload).catch((error) => {\n debugging('supervisor') && console.error(`Unable to request task from scheduler: ${error}. Will try again on a new connection.`);\n this.taskDistributorConnection.close(error, true);\n throw error; /* caught below */\n });\n let responsePayload = result.payload; \n\n if (!result.success) {\n debugging() && console.log('Task fetch failure; request=', requestPayload);\n debugging() && console.log('Task fetch failure; response=', result.payload);\n throw new DCPError('Unable to fetch task for worker', responsePayload);\n }\n\n const sliceCount = responsePayload.body.task.length || 0;\n\n /**\n * The fetchedTask event fires when the supervisor has finished trying to\n * fetch work from the scheduler (task-manager). The data emitted is the\n * number of new slices to work on in the fetched task.\n *\n * @event Supervisor#fetchedTask\n * @type {number}\n */\n this.emit('fetchedTask', sliceCount);\n\n if (sliceCount < 1) {\n return Promise.resolve();\n }\n\n /**\n * DCP-1698 Send auth msg with tasks to worker, then validate authority of worker to send slice info back to scheduler.\n * payload structure: { owner: this.address, signature: signature, auth: messageLightWeight, body: messageBody };\n * messageLightWeight: { workerId: worker, jobSlices, schedulerId, jobCommissions }\n * messageBody: { newJobs: await getNewJobsForTask(dbScheduler, task, request), task }\n */\n const { body, ...authorizationMessage } = responsePayload;\n const { newJobs, task } = body;\n assert(newJobs); // It should not be possible to have !newJobs -- we throw on !success.\n \n /*\n * Ensure all jobs received from the scheduler are:\n * 1. If we have specified specific jobs the worker may work on, the received jobs are in the specified job list\n * 2. If we are in localExec, at most 1 unique job type was received (since localExec workers are designated for only\n * one job)\n * If the received jobs are not within these parameters, stop the worker since the scheduler cannot be trusted at that point.\n */\n if ((this.options.jobAddresses.length && !Object.keys(newJobs).every((ele) => this.options.jobAddresses.includes(ele)))\n || (this.options.localExec && Object.keys(newJobs).length > 1))\n {\n console.error(\"Worker received slices it shouldn't have. Rejecting the work and stopping.\");\n process.exit(1);\n }\n\n debugging() && console.log(`Supervisor.fetchTask: task: ${task.length}/${numCores}, jobs: ${Object.keys(newJobs).length}, authSlices: ${await compressJobMap(authorizationMessage.auth.authSlices, true /* skipFirst*/, this.addressTruncationLength /* digits*/)}`);\n // Delete all jobs in the supervisorCache that are not represented in this task,\n // or in this.queuedSlices, or there is no sandbox assigned to these jobs.\n this.cleanJobCache(newJobs);\n\n for (const jobAddress of Object.keys(newJobs))\n if (!this.cache.cache.job[jobAddress])\n this.cache.store('job', jobAddress, newJobs[jobAddress]);\n\n // Memoize authMessage onto the Slice object, this should\n // follow it for its entire life in the worker.\n const tmpQueuedSlices = task.map(taskElement => new Slice(taskElement, authorizationMessage));\n\n // Make sure old stuff is up front.\n // matchSlicesWithSandboxes dequeues this.queuedSlices as follows:\n // slicesToMatch = this.queuedSlices.slice(0, numCores);\n this.slices.push(...tmpQueuedSlices);\n this.queuedSlices.push(...tmpQueuedSlices);\n \n // Populating the ring buffer based on job's discrete property \n Object.values(newJobs).forEach(job => {\n if(job.requirements.discrete && this.ringBufferofJobs.find(element => element === job.address) === undefined) {\n this.ringBufferofJobs.push(job.address);\n }\n });\n \n } catch (error) {\n this.emit('fetchTaskFailed', error);\n debugging('supervisor') && console.debug(`Supervisor.fetchTask failed!: error: ${error}`);\n }\n }\n\n /**\n * For each slice in this.queuedSlices, match with a sandbox in the following order:\n * 1. Try to find an already assigned sandbox in this.assignedSandboxes for the slice's job.\n * 2. Find a ready sandbox in this.readiedSandboxes that is unassigned.\n * 3. Ready a new sandbox and use that.\n *\n * Take great care in assuring sandboxes and slices are uniquely associated, viz.,\n * a given slice cannot be associated with multiple sandboxes and a given sandbox cannot be associated with multiple slices.\n * The lack of such uniqueness has been the root cause of several difficult bugs.\n *\n * Note: When a sandbox is paired with a slice, execution is pending and sandbox.allocated=true and\n * sandbox.slice=slice and sandbox.jobAddress=slice.jobAddress. This is what 'allocated' means.\n * Immediately upon the exit of sandbox.work, sandbox.allocated=false is set and if an exception\n * wasn't thrown, the paired slice is placed in this.assignedSandboxes.\n * Thus from the pov of supervisor, this.allocatedSandboxes is deterministic and this.workingSandboxes is not.\n * Please try to not use this.workingSandboxes. It is deprecated.\n *\n * The input is numCores, this,queuedSlices, this.assignedSandboxes and this.readiedSandboxes.\n * If there are not enough sandboxes, new readied sandboxes will be created using\n * await this.readySandboxes(...)\n * And it is this await boundary that has caused many bugs.\n * We try not to make assumptions about non-local state across the await boundary.\n *\n * @param {number} numCores - The number of available sandbox slots.\n * @param {boolean} [throwExceptions=true] - Whether to throw exceptions when checking for sanity.\n * @returns {Promise<SandboxSlice[]>} Returns SandboxSlice[], may have length zero.\n */\n async matchSlicesWithSandboxes (numCores, throwExceptions = true) {\n\n const sandboxSlices = [];\n if (this.queuedSlices.length === 0 || this.matching || numCores <= 0) {\n // Interesting and noisy.\n // debugging('supervisor') && console.log(`Supervisor.matchSlicesWithSandboxes: Do not nest work, fetch or matching slices with sandboxes: queuedSlices ${this.queuedSlices.length}, unallocatedSpace ${this.unallocatedSpace}, matching ${this.matching}, fetching ${this.isFetchingNewWork}, numCores ${numCores}`);\n return sandboxSlices;\n }\n\n //\n // Oversubscription mitigation.\n // Update when there are less available sandbox slots than numCores.\n // We cannot use this.unallocatedSpace here because its value is artificially low or zero, because in\n // this.distributedQueuedSlices we use the pseudo-mutex trick: this.acquire(howManySandboxSlotsToReserve)/this.release().\n // Note: Do not use this.numberOfCoresReserved outside of a function locked with this.acquire(howManySandboxSlotsToReserve) .\n const checkNumCores = this.numberOfCoresReserved; // # of locked sandbox slots.\n if (numCores > checkNumCores) numCores = checkNumCores;\n if (numCores <= 0) return sandboxSlices;\n\n // Don't ask for more than we have.\n if (numCores > this.queuedSlices.length)\n numCores = this.queuedSlices.length;\n\n debugging('supervisor') && console.log(`matchSlicesWithSandboxes: numCores ${numCores}, queued slices ${this.queuedSlices.length}: assigned ${this.assignedSandboxes.length}, readied ${this.readiedSandboxes.length}, unallocated ${this.unallocatedSpace}, # of sandboxes: ${this.sandboxes.length}`);\n\n if (debugging('supervisor')) {\n dumpSlicesIfNotUnique(this.queuedSlices, 'Warning: this.queuedSlices slices are not unique -- this is ok when slice is rescheduled.');\n dumpSandboxesIfNotUnique(this.readiedSandboxes, 'Warning: this.readiedSandboxes sandboxes are not unique!');\n dumpSandboxesIfNotUnique(this.assignedSandboxes, 'Warning: this.assignedSandboxes sandboxes are not unique!');\n }\n\n // Three functions to validate slice and sandbox.\n function checkSlice(slice, checkAllocated=true) {\n if (!slice.isUnassigned) throw new DCPError(`Slice must be unassigned: ${slice.identifier}`);\n if (checkAllocated && slice.allocated) throw new DCPError(`Slice must not already be allocated: ${slice.identifier}`);\n }\n function checkSandbox(sandbox, isAssigned) {\n if (sandbox.allocated) throw new DCPError(`Assigned sandbox must not be already allocated: ${sandbox.identifier}`);\n if (isAssigned && !sandbox.isAssigned) throw new DCPError(`Assigned sandbox is not marked as assigned: ${sandbox.identifier}`);\n if (!isAssigned && !sandbox.isReadyForAssign) throw new DCPError(`Readied sandbox is not marked as ready for assign: ${sandbox.identifier}`);\n }\n\n // Sanity checks.\n if (throwExceptions) {\n this.assignedSandboxes.forEach(sandbox => { checkSandbox(sandbox, true /* isAssigned*/); });\n this.readiedSandboxes.forEach(sandbox => { checkSandbox(sandbox, false /* isAssigned*/); });\n this.queuedSlices.forEach(slice => { checkSlice(slice); });\n } else {\n this.assignedSandboxes = this.assignedSandboxes.filter(sandbox => !sandbox.allocated && sandbox.isAssigned);\n this.readiedSandboxes = this.readiedSandboxes.filter(sandbox => !sandbox.allocated && sandbox.isReadyForAssign);\n this.queuedSlices = this.queuedSlices.filter(slice => !slice.allocated && slice.isUnassigned);\n }\n\n const sandboxKind = {\n assigned: 0,\n ready: 1,\n new: 2,\n };\n\n const ceci = this;\n /**\n * Auxiliary function to pair a sandbox with a slice and mark the sandbox as allocated.\n * An allocated sandbox is reserved and will not be released until the slice completes execution on the sandbox.\n *\n * @param {Sandbox} sandbox\n * @param {Slice} slice\n * @param {number} kind\n */\n function pair(sandbox, slice, kind) {\n checkSandbox(sandbox, kind === sandboxKind.assigned);\n checkSlice(slice, kind === sandboxKind.assigned);\n slice.allocated = true;\n sandbox.allocated = true;\n sandbox.jobAddress = slice.jobAddress; // So we can know which jobs to not delete from this.cache .\n sandbox.slice = slice;\n sandboxSlices.push({ sandbox, slice });\n if (Supervisor.sliceTiming) slice['pairingDelta'] = Date.now();\n if (debugging('supervisor')) {\n let fragment = 'New readied';\n if (kind === sandboxKind.assigned) fragment = 'Assigned';\n else if (kind === sandboxKind.ready) fragment = 'Readied';\n console.log(`matchSlicesWithSandboxes.pair: ${fragment} sandbox matched ${ceci.dumpSandboxAndSlice(sandbox, slice)}`);\n }\n }\n\n // These three arrays are used to track/store slices and sandboxes,\n // so that when an exception occurs, the following arrays are restored:\n // this.queuedSlices, this.assignedSandboxes, this.realizedSandboxes.\n let slicesToMatch = [];\n let trackAssignedSandboxes = [];\n let trackReadiedSandboxes = [];\n try\n {\n this.matching = true;\n\n let assignedCounter = 0; // How many assigned sandboxes are being used.\n let readyCounter = 0; // How many sandboxes used from the existing this.readiedSandboxes.\n let newCounter = 0; // How many sandboxes that needed to be newly created.\n\n //\n // The Ideas:\n // 1) We match each slice with a sandbox. First we match with assigned sandboxes in the order\n // that they appear in this.queuedSlices. Then we match in-order with existing this.readiedSandboxes\n // Then we match in-order with new new readied sandboxes created through\n // await this.readySandboxes(newCounter, true /* allocateLocalSandboxes*/);\n // This allows us to try different orderings of execution of slices. E.g. Wes suggested\n // trying to execute slices from different jobs with maximal job diversity -- specifically\n // if there are 3 jobs j1,j2,j3, with slices s11, s12 from j1, s21, s22, s23 from j2 and\n // s31, s32 from j3, then we try to schedule, in order s11, s21, s31, s12, s22, s32, s23.\n //\n // 2) Before matching slices with sandboxes, we allocate available assigned and readied sandboxes\n // and if more are needed then we create and allocate new ones.\n //\n // 3) Finally we match slices with sandboxes and return an array of sandboxSlice pairs.\n //\n // Note: The ordering of sandboxSlices only partially corresponds to the order of this.queuedSlices.\n // It's easy to do. When pairing with assigned sandboxes, any slice in this.queuedSlices which doesn't\n // have an assigned sandbox, will add null to the sandboxSlices array. Then when pairing with readied sandboxes,\n // we fill-in the null entries in the sandboxSlices array.\n //\n /** XXXpfr @todo When it is needed, fix the ordering as described above. */\n\n // Get the slices that are being matched.\n slicesToMatch = this.queuedSlices.slice(0, numCores);\n this.queuedSlices = this.queuedSlices.slice(numCores);\n\n debugging('supervisor') && console.log(`matchSlicesWithSandboxes: slicesToMatch ${await this.dumpSlices(slicesToMatch)}`);\n\n // Create object map: jobAddress -> sandboxes with sandboxes.jobAddress === jobAddress .\n const jobSandboxMap = toJobMap(this.assignedSandboxes, sandbox => sandbox);\n \n // Create array to hold slices which do not have assigned sandboxes.\n // These slices will need to be paired with existing and possibly new readied sandboxes.\n // Specifically, the sandboxes from existing this.readiedSandboxes and new sandboxes\n // created through await this.readySandboxes(newCounter, true /* allocateLocalSandboxes*/);\n const slicesThatNeedSandboxes = [];\n\n // Pair assigned sandboxes with slices.\n for (const slice of slicesToMatch) {\n const assigned = jobSandboxMap[slice.jobAddress];\n if (assigned && assigned.length > 0) {\n // Pair.\n const sandbox = assigned.pop();\n pair(sandbox, slice, sandboxKind.assigned);\n this.removeElement(this.assignedSandboxes, sandbox);\n // Track.\n trackAssignedSandboxes.push(sandbox);\n assignedCounter++;\n } else {\n // Don't lose track of these slices.\n slice.allocated = true;\n slicesThatNeedSandboxes.push(slice);\n }\n }\n\n // Pair readied sandboxes with slices.\n readyCounter = Math.min(slicesThatNeedSandboxes.length, this.readiedSandboxes.length);\n newCounter = slicesThatNeedSandboxes.length - readyCounter;\n // Track.\n trackReadiedSandboxes = this.readiedSandboxes.slice(0, readyCounter);\n this.readiedSandboxes = this.readiedSandboxes.slice(readyCounter);\n for (const sandbox of trackReadiedSandboxes) {\n // Pair.\n const slice = slicesThatNeedSandboxes.pop();\n pair(sandbox, slice, sandboxKind.ready);\n }\n \n debugging('supervisor') && console.log(`matchSlicesWithSandboxes: assignedCounter ${assignedCounter}, readyCounter ${readyCounter}, newCounter ${newCounter}, numCores ${numCores}`)\n\n // Validate algorithm consistency.\n if (Supervisor.debugBuild && assignedCounter + readyCounter + newCounter !== numCores) {\n // Structured assert.\n throw new DCPError(`matchSlicesWithSandboxes: Algorithm is corrupt ${assignedCounter} + ${readyCounter} + ${newCounter} !== ${numCores}`);\n }\n\n // Here is an await boundary.\n // Accessing non-local data across an await boundary may result in the unexpected.\n\n // Create new readied sandboxes to associate with slicesThatNeedSandboxes.\n if (newCounter > 0) {\n // When allocateLocalSandboxes is true, this.readySandboxes does not place the new sandboxes\n // on this.readiedSandboxes. Hence the new sandboxes are private and nobody else can see them.\n debugging('supervisor') && console.log(`matchSlicesWithSandboxes: creating ${newCounter} new sandboxes, # of sandboxes ${this.sandboxes.length}`);\n const readied = await this.readySandboxes(newCounter, true /* allocateLocalSandboxes*/);\n // Track.\n trackReadiedSandboxes.push(...readied);\n\n for (const sandbox of readied) {\n assert(slicesThatNeedSandboxes.length > 0);\n // Pair\n const slice = slicesThatNeedSandboxes.pop();\n pair(sandbox, slice, sandboxKind.new);\n }\n \n // Put back any extras. There should not be any unless readySandboxes returned less than asked for.\n if (slicesThatNeedSandboxes.length > 0) {\n slicesThatNeedSandboxes.forEach(slice => {\n slice.allocated = false;\n this.queuedSlices.push(slice);\n });\n }\n }\n\n if ( false || debugging()) {\n console.log(`matchSlicesWithSandboxes: Matches: ${ this.dumpSandboxSlices(sandboxSlices) }`);\n this.dumpSandboxSlicesIfNotUnique(sandboxSlices, 'Warning: sandboxSlices; { sandbox, slice } pairs are not unique!');\n }\n } catch (e) {\n // Clear allocations.\n slicesToMatch.forEach(slice => { slice.allocated = false; });\n trackAssignedSandboxes.forEach(sandbox => { sandbox.allocated = false; sandbox.slice = null; });\n trackReadiedSandboxes.forEach(sandbox => { sandbox.allocated = false; sandbox.slice = null; sandbox.jobAddress = null; });\n \n // Filter out redundancies -- there shouldn't be any...\n slicesToMatch = slicesToMatch.filter(slice => this.queuedSlices.indexOf(slice) === -1);\n trackAssignedSandboxes = trackAssignedSandboxes.filter(sb => this.assignedSandboxes.indexOf(sb) === -1);\n trackReadiedSandboxes = trackReadiedSandboxes.filter(sb => this.readiedSandboxes.indexOf(sb) === -1);\n\n // Sanity checks.\n slicesToMatch.forEach(slice => { checkSlice(slice) });\n trackAssignedSandboxes.forEach(sandbox => { checkSandbox(sandbox, true /* isAssigned*/); });\n trackReadiedSandboxes.forEach(sandbox => { checkSandbox(sandbox, false /* isAssigned*/); });\n\n // Restore arrays.\n this.queuedSlices.push(...slicesToMatch);\n this.assignedSandboxes.push(...trackAssignedSandboxes);\n this.readiedSandboxes.push(...trackReadiedSandboxes);\n \n console.error('Error in matchSlicesWithSandboxes: Attempting to recover slices and sandboxes.', e);\n return [];\n } finally {\n this.matching = false;\n }\n\n debugging('supervisor') && console.log(`matchSlicesWithSandboxes: allocated ${sandboxSlices.length} sandboxes, queuedSlices ${this.queuedSlices.length}, unallocatedSpace ${this.unallocatedSpace}, matching ${this.matching}, fetching ${this.isFetchingNewWork}, # of sandboxes: ${this.sandboxes.length}.`);\n\n return sandboxSlices;\n }\n\n disassociateSandboxAndSlice(sandbox, slice) {\n this.returnSandbox(sandbox);\n sandbox.slice = null;\n this.returnSlice(slice);\n }\n\n /**\n * This method will call this.startSandboxWork(sandbox, slice) for each element { sandbox, slice }\n * of the array returned by this.matchSlicesWithSandboxes(availableSandboxes) until all allocated sandboxes\n * are working. It is possible for a sandbox to interleave with calling distributeQueuedSlices and leave a sandbox\n * that is not working. Moreover, this.queuedSlices may be exhausted before all sandboxes are working.\n * @returns {Promise<void>}\n */\n async distributeQueuedSlices () {\n const numCores = this.unallocatedSpace;\n\n // If there's nothing there, or we're reentering, bail out.\n if (this.queuedSlices.length === 0 || numCores <= 0 || this.matching) {\n // Interesting and noisy.\n // debugging('supervisor') && console.log(`Supervisor.distributeQueuedSlices: Do not nest work, fetch or matching slices with sandboxes: queuedSlices ${this.queuedSlices.length}, matching ${this.matching}, fetching ${this.isFetchingNewWork}, numCores ${numCores}`);\n return Promise.resolve();\n }\n\n //\n // Use the pseudo-mutex to prevent uncontrolled interleaving with fetchTask,\n // matchSlicesWithSandboxes and distributeQueuedSlices\n let sandboxSlices;\n this.acquire(numCores);\n try {\n sandboxSlices = await this.matchSlicesWithSandboxes(numCores);\n } finally {\n this.release();\n }\n\n debugging('supervisor') && console.log(`distributeQueuedSlices: ${sandboxSlices.length} sandboxSlices ${this.dumpSandboxSlices(sandboxSlices)}, matching ${this.matching}, fetching ${this.isFetchingNewWork}`);\n\n for (let sandboxSlice of sandboxSlices) {\n\n const { sandbox, slice } = sandboxSlice;\n try {\n if (sandbox.isReadyForAssign) {\n try {\n let timeoutMs = Math.floor(Math.min(+Supervisor.lastAssignFailTimerMs || 0, 10 * 60 * 1000 /* 10m */));\n await a$sleepMs(timeoutMs);\n await this.assignJobToSandbox(sandbox, slice.jobAddress);\n } catch (e) {\n console.error(`Supervisor.distributeQueuedSlices: Could not assign slice ${slice.identifier} to sandbox ${sandbox.identifier}.`);\n if (Supervisor.debugBuild) console.error(`...exception`, e);\n Supervisor.lastAssignFailTimerMs = Supervisor.lastAssignFailTimerMs ? +Supervisor.lastAssignFailTimerMs * 1.25 : Math.random() * 200;\n this.disassociateSandboxAndSlice(sandbox, slice);\n continue;\n }\n }\n\n if (!Supervisor.lastAssignFailTimerMs)\n Supervisor.lastAssignFailTimerMs = Math.random() * 200;\n this.startSandboxWork(sandbox, slice);\n Supervisor.lastAssignFailTimerMs = false;\n\n } catch (e) {\n // We should never get here.\n console.error(`Supervisor.distributeQueuedSlices: Failed to execute slice ${slice.identifier} in sandbox ${sandbox.identifier}.`);\n if (Supervisor.debugBuild) console.error('...exception', e);\n this.disassociateSandboxAndSlice(sandbox, slice);\n }\n }\n }\n\n /**\n *\n * @param {Sandbox} sandbox\n * @param {opaqueId} jobAddress\n * @returns {Promise<void>}\n */\n async assignJobToSandbox(sandbox, jobAddress) {\n var ceci = this;\n\n try {\n return sandbox.assign(jobAddress); // Returns Promise.\n } catch(error) {\n // return slice to scheduler, log error\n console.error('Supervisor.assignJobToSandbox: Failed to assign job to sandbox.', {\n jobAddress: jobAddress.substr(0,10),\n error,\n });\n\n ceci.returnSandbox(sandbox);\n\n throw error;\n }\n }\n\n /**\n * Handles reassigning or returning a slice that was rejected by a sandbox.\n * \n * The sandbox will be terminated by this.returnSandbox in finalizeSandboxAndSlice. In this case,\n * if the slice does not have a rejected property already, reassign the slice to a new sandbox\n * and add a rejected property to the slice to indicate it has already rejected once, then set slice = null\n * in the return SandboxSlice so that finalizeSandboxAndSlice won't return slice to scheduler.\n * \n * If the slice rejects with a reason, or has a rejected time stamp (ie. has been rejected once already)\n * then return the slice and all slices from the job to the scheduler and\n * terminate all sandboxes with that jobAddress.\n * @param {Sandbox} sandbox \n * @param {Slice} slice\n * @returns {Promise<SandboxSlice>}\n */\n async handleWorkReject(sandbox, slice, rejectReason) {\n if (!this.rejectedJobReasons[slice.jobAddress])\n this.rejectedJobReasons[slice.jobAddress] = [];\n\n this.rejectedJobReasons[slice.jobAddress].push(rejectReason); // memoize reasons\n\n // First time rejecting without a reason. Try assigning slice to a new sandbox.\n if (rejectReason === 'false' && !slice.rejected) {\n // Set rejected.\n slice.rejected = Date.now();\n // Schedule the slice for execution.\n this.scheduleSlice(slice, true /* placeInTheFrontOfTheQueue*/, false /* noDuplicateExecution*/);\n \n // Null out slice so this.returnSlice will not be called in finalizeSandboxAndSlice.\n // But we still want this.returnSandbox to terminate the sandbox.\n slice = null;\n } else { // Slice has a reason OR rejected without a reason already and got stamped.\n \n // Purge all slices and sandboxes associated with slice.jobAddress .\n this.purgeAllWork(slice.jobAddress);\n // Clear jobAddress from this.cache .\n this.cleanJobCache();\n\n //\n // this.purgeAllWork(jobAddress) terminates all sandboxes with jobAddress,\n // and it also returns to scheduler all slices with jobAddress.\n // Therefore null out slice and sandbox so finalizeSandboxAndSlice doesn't do anything.\n // \n sandbox = null;\n slice = null;\n\n // Add to array of rejected jobs.\n let rejectedJob = {\n address: slice.jobAddress,\n reasons: this.rejectedJobReasons[slice.jobAddress],\n }\n this.rejectedJobs.push(rejectedJob);\n\n // Tell everyone all about it, when allowed.\n if (dcpConfig.worker.allowConsoleAccess || Supervisor.debugBuild)\n console.warn('Supervisor.handleWorkReject: The slice ${slice.identifier} was rejected twice; slice will be returned to the scheduler.');\n console.warn('Supervisor.handleWorkReject: All slices with the same jobAddress returned to the scheduler.');\n console.warn('Supervisor.handleWorkReject: All sandboxes with the same jobAddress are terminated.');\n }\n return { sandbox, slice };\n }\n\n /**\n * Schedule the slice to be executed.\n * If slice is already executing and noDuplicateExecution is true, return the slice with reason.\n * @param {Slice} slice\n * @param {boolean} [placeInTheFrontOfTheQueue=false]\n * @param {boolean} [noDuplicateExecution=true]\n * @param {string} [reason]\n */\n scheduleSlice(slice, placeInTheFrontOfTheQueue = false, noDuplicateExecution = true, reason) {\n // When noDuplicateExecution, if slice is already executing, do nothing.\n let workingSlices = [];\n if (noDuplicateExecution)\n workingSlices = this.allocatedSlices;\n\n if (!workingSlices.indexOf(slice)) {\n // Reset slice state to allow execution.\n slice.status = SLICE_STATUS_UNASSIGNED;\n // Enqueue in the to-be-executed queue.\n if (placeInTheFrontOfTheQueue) this.queuedSlices.unshift(slice);\n else this.queuedSlices.push(slice);\n }\n }\n\n /**\n * Purge all slices and sandboxes with this jobAddress.\n * @param {address} jobAddress\n * @param {boolean} [onlyPurgeQueuedAndAllocated=false]\n */\n purgeAllWork(jobAddress, onlyPurgeQueuedAndAllocated = false) {\n // Purge all slices and sandboxes associated with jobAddress .\n const deadSandboxes = this.sandboxes.filter(sandbox => sandbox.jobAddress === jobAddress);\n\n if (deadSandboxes.length > 0) {\n debugging('supervisor') && console.log(`purgeAllWork(${this.dumpJobAddress(jobAddress)}): sandboxes purged ${deadSandboxes.map(s => s.id)}, # of sandboxes ${this.sandboxes.length}`);\n deadSandboxes.forEach(sandbox => this.returnSandbox(sandbox));\n }\n\n let deadSlices;\n if (onlyPurgeQueuedAndAllocated) {\n deadSlices = this.queuedSlices.filter(slice => slice.jobAddress === jobAddress);\n if (deadSlices.length > 0 || this.allocatedSlices.length > 0)\n debugging('supervisor') && console.log(`purgeAllWork(${this.dumpJobAddress(jobAddress)}): dead queuedSlices ${deadSlices.map(s => s.sliceNumber)}, dead allocatedSlices ${this.allocatedSlices.map(s => s.sliceNumber)}`);\n deadSlices.push(...this.allocatedSlices);\n } else {\n deadSlices = this.slices.filter(slice => slice.jobAddress === jobAddress);\n }\n\n if (deadSlices.length > 0) {\n debugging('supervisor') && console.log(`purgeAllWork(${this.dumpJobAddress(jobAddress)}): slices purged ${deadSlices.map(s => s.sliceNumber)}, # of sandboxes ${this.sandboxes.length}`);\n this.returnSlices(deadSlices);\n this.removeQueuedSlices(deadSlices);\n }\n debugging('supervisor') && console.log(`purgeAllWork(${this.dumpJobAddress(jobAddress)}): Finished: slices ${this.slices.length}, queuedSlices ${this.queuedSlices.length}, assigned ${this.assignedSandboxes.length}, readied ${this.readiedSandboxes.length}, # of sandboxes ${this.sandboxes.length}`);\n }\n\n /**\n * Gives a slice to a sandbox which begins working. Handles collecting\n * the slice result (complete/fail) from the sandbox and submitting the result to the scheduler.\n * It will also return the sandbox to @this.returnSandbox when completed so the sandbox can be re-assigned.\n *\n * @param {Sandbox} sandbox - the sandbox to give the slice\n * @param {Slice} slice - the slice to distribute\n * @returns {Promise<void>} Promise returned from sandbox.run\n */\n async startSandboxWork (sandbox, slice) {\n var startDelayMs, reason = 'unknown';\n\n try {\n slice.markAsWorking();\n } catch (e) {\n // This will occur when the same slice is distributed twice.\n // It is normal because two sandboxes could finish at the same time and be assigned the\n // same slice before the slice is marked as working.\n debugging() && console.debug('startSandboxWork: slice.markAsWorking exception:', e);\n return Promise.resolve();\n }\n\n // sandbox.requiresGPU = slice.requiresGPU;\n // if (sandbox.requiresGPU) {\n // this.GPUsAssigned++;\n // }\n\n if (Supervisor.startSandboxWork_beenCalled)\n startDelayMs = 1000 * (tuning.minSandboxStartDelay + (Math.random() * (tuning.maxSandboxStartDelay - tuning.minSandboxStartDelay)));\n else {\n startDelayMs = 1000 * tuning.minSandboxStartDelay;\n Supervisor.startSandboxWork_beenCalled = true;\n }\n\n try {\n debugging() && console.log(`startSandboxWork: Started ${this.dumpStatefulSandboxAndSlice(sandbox, slice)}, total sandbox count: ${this.sandboxes.length}, matching ${this.matching}, fetching ${this.isFetchingNewWork}`);\n if (Supervisor.sliceTiming) {\n slice['pairingDelta'] = Date.now() - slice['pairingDelta'];\n slice['executionDelta'] = Date.now();\n }\n let result;\n try {\n result = await sandbox.work(slice, startDelayMs);\n } finally {\n sandbox.allocated = false;\n slice.allocated = false;\n }\n if (Supervisor.sliceTiming) {\n slice['executionDelta'] = Date.now() - slice['executionDelta'];\n slice['resultDelta'] = Date.now();\n }\n slice.collectResult(result, true);\n // In watchdog, all sandboxes in working state, have their slice status sent to result submitter.\n // However, this can happen after the sandbox/slice has already sent results\n // to result submitter, in which case, the activeSlices table has already removed the row\n // corresponding to slice and hence is incapable of updating status.\n sandbox.changeWorkingToAssigned();\n this.assignedSandboxes.push(sandbox);\n debugging() && console.log(`startSandboxWork: Finished ${this.dumpStatefulSandboxAndSlice(sandbox, slice)}, total sandbox count: ${this.sandboxes.length}, matching ${this.matching}, fetching ${this.isFetchingNewWork}`);\n } catch(error) {\n let logLevel;\n\n if (error instanceof SandboxError) {\n logLevel = 'warn';\n // The message and stack properties of error objects are not enumerable,\n // so they have to be copied into a plain object this way\n const errorResult = Object.getOwnPropertyNames(error).reduce((o, p) => {\n o[p] = error[p]; return o;\n }, { message: 'Unexpected worker error' });\n slice.collectResult(errorResult, false);\n } else {\n logLevel = 'error';\n // This error was unrelated to the work being done, so just return the slice in the finally block.\n // For extra safety the sandbox is terminated.\n slice.result = null;\n slice.status = SLICE_STATUS_FAILED; /** XXXpfr @todo terminating sandbox? */\n }\n\n let errorString;\n switch (error.errorCode) {\n case 'ENOPROGRESS':\n reason = 'ENOPROGRESS';\n errorString = 'Supervisor.startSandboxWork - No progress error in sandbox.\\n';\n break;\n case 'ESLICETOOSLOW':\n reason = 'ESLICETOOSLOW';\n errorString = 'Supervisor.startSandboxWork - Slice too slow error in sandbox.\\n';\n break;\n case 'EUNCAUGHT':\n reason = 'EUNCAUGHT';\n errorString = `Supervisor.startSandboxWork - Uncaught error in sandbox ${error.message}.\\n`;\n break;\n case 'EFETCH':\n // reason = 'EFETCH'; The status.js processing cannot handle 'EFETCH'\n reason = 'unknown';\n errorString = `Supervisor.startSandboxWork - Could not fetch data: ${error.message}.\\n`;\n break;\n }\n\n if (error.name === 'EWORKREJECT') {\n error.stack = 'Sandbox was terminated by work.reject()';\n const ss = await this.handleWorkReject(sandbox, slice, error.message);\n sandbox = ss.sandbox; slice = ss.slice;\n }\n\n const errorObject = {\n jobAddress: slice.jobAddress.substr(0,10),\n sliceNumber: slice.sliceNumber,\n sandbox: sandbox.id,\n jobName: sandbox.public ? sandbox.public.name : 'unnamed',\n };\n\n // Always display max info under debug builds, otherwise maximal error\n // messages are displayed to the worker, only if both worker and client agree.\n let workerConsole = sandbox.supervisorCache.cache.job[slice.jobAddress].workerConsole;\n const displayMaxInfo = Supervisor.debugBuild || (workerConsole && dcpConfig.worker.allowConsoleAccess);\n\n if (!displayMaxInfo && errorString) {\n console[logLevel](errorString, errorObject);\n } else if (!displayMaxInfo && error.name === 'EWORKREJECT') {\n console[logLevel](`Supervisor.startSandboxWork - Sandbox rejected work: ${error.message}`)\n } else {\n if (displayMaxInfo)\n errorObject.stack += '\\n --------------------\\n' + (error.stack.split('\\n').slice(1).join('\\n'));\n console[logLevel](`Supervisor.startSandboxWork - Sandbox failed: ${error.message}\\n`, errorObject);\n }\n } finally {\n await this.finalizeSandboxAndSlice(sandbox, slice, reason);\n }\n }\n\n /**\n * If slice && slice.result, then call await this.recordResult(slice) and this.returnSandbox(sandbox, slice) will have no effect.\n * If slice && !slice.result, then call this.returnSlice(slice, reason) and then this.returnSandbox(sandbox, slice) which terminates sandbox.\n * If !slice && sandbox, then terminate the sandbox with this.returnSandbox(sandbox, slice) .\n * If !slice && !sandbox, then do nothing.\n * @param {Sandbox} [sandbox]\n * @param {Slice} [slice]\n * @param {string} [reason]\n */\n async finalizeSandboxAndSlice(sandbox, slice, reason) {\n debugging('supervisor') && console.log(`finalizeSandboxAndSlice: sandbox ${sandbox ? sandbox.identifier : 'nade'}, slice ${slice ? slice.identifier : 'nade'}`);\n if (slice) {\n if (slice.result) await this.recordResult(slice);\n else this.returnSlice(slice, reason);\n }\n // It is possible that sandbox is already terminated\n // Because sandbox.allocated=false as soon as sandbox.work(...) completes.\n // But the await at or in finalizeSandboxAndSlice may allow pruneSandboxes to slither in.\n if (sandbox) this.returnSandbox(sandbox, slice, false /* verifySandboxIsNotTerminated*/);\n }\n\n /**\n * Terminates sandboxes and returns slices.\n * Sets the working flag to false, call @this.work to start working again.\n * \n * If forceTerminate is true: Terminates all sandboxes and returns all slices.\n * If forceTerminate is false: Terminates non-allocated sandboxes and returns queued slices.\n *\n * @param {boolean} [forceTerminate = true] - true if you want to stop the sandboxes from completing their current slice.\n * @returns {Promise<void>}\n */\n async stopWork (forceTerminate = true) {\n debugging('supervisor') && console.log('stopWork(${forceTerminate}): terminating sandboxes and returning slices to scheduler.');\n if (forceTerminate) {\n while (this.sandboxes.length) {\n this.returnSandbox(this.sandboxes[0], null, false);\n }\n\n await this.returnSlices(this.slices).then(() => {\n this.queuedSlices.length = 0;\n });\n } else {\n // Only terminate idle sandboxes and return only queued slices\n let idleSandboxes = this.sandboxes.filter(w => !w.allocated);\n for (const sandbox of idleSandboxes) {\n this.returnSandbox(sandbox, null, false /* verifySandboxIsNotTerminated*/);\n }\n\n await this.returnSlices(this.queuedSlices).then(() => {\n this.queuedSlices.length = 0;\n });\n\n await new Promise((resolve, reject) => {\n let sandboxesRemaining = this.allocatedSandboxes.length;\n if (sandboxesRemaining === 0)\n {\n resolve();\n }\n // Resolve and finish work once all sandboxes have finished submitting their results.\n this.on('submitFinished', () => {\n sandboxesRemaining--;\n if (sandboxesRemaining === 0)\n {\n console.log('All sandboxes empty, stopping worker and closing all connections');\n resolve();\n }\n });\n });\n }\n\n if (this.resultSubmitterConnection) {\n this.resultSubmitterConnection.off('close', this.openResultSubmitterConn);\n this.resultSubmitterConnection.close();\n this.resultSubmitterConnection = null;\n }\n\n if (this.taskDistributorConnection) {\n this.taskDistributorConnection.off('close', this.openTaskDistributorConn);\n this.taskDistributorConnection.close();\n this.taskDistributorConnection = null;\n }\n\n if (this.packageManagerConnection) {\n this.packageManagerConnection.off('close', this.openPackageManagerConn);\n this.packageManagerConnection.close();\n this.packageManagerConnection = null;\n }\n\n if (this.eventRouterConnection) {\n this.eventRouterConnection.off('close', this.openEventRouterConn);\n this.eventRouterConnection.close();\n this.eventRouterConnection = null;\n }\n\n this.emit('stop');\n }\n\n /**\n * Takes a slice and returns it to the scheduler to be redistributed.\n * Usually called when an exception is thrown by sandbox.work(slice, startDelayMs) .\n * Or when the supervisor tells it to forcibly stop working.\n *\n * @param {Slice} slice - The slice to return to the scheduler.\n * @param {string} [reason] - Optional reason for the return: 'ENOPROGRESS', 'EUNCAUGHT', 'ESLICETOOSLOW', 'unknown'.\n * @returns {Promise<*>} - Response from the scheduler.\n */\n returnSlice (slice, reason) {\n // When sliceNumber === 0 don't send a status message.\n if (slice.sliceNumber === 0) return Promise.resolve();\n \n debugging() && console.log(`Supervisor.returnSlice: Returning slice ${slice.identifier} with reason ${reason}.`);\n \n const payload = slice.getReturnMessagePayload(this.workerOpaqueId, reason);\n return this.resultSubmitterConnection.send('status', payload)\n .then(response => {\n return response;\n }).catch(error => {\n debugging('supervisor') && console.error('Failed to return slice', {\n sliceNumber: slice.sliceNumber,\n jobAddress: slice.jobAddress,\n status: slice.status,\n error,\n }, 'Will try again on a new connection.');\n this.resultSubmitterMessageQueue.push({ operation: 'status', data: payload });\n this.resultSubmitterConnection.close();\n });\n }\n\n /**\n * Bulk-return multiple slices, possibly for assorted jobs.\n * Returns slices to the scheduler to be redistributed.\n * Called in the sandbox terminate handler and purgeAllWork(jobAddress)\n * and stopWork(forceTerminate).\n *\n * @param {Slice[]} slices - The slices to return to the scheduler.\n * @returns {Promise<void>} - Response from the scheduler.\n */\n async returnSlices(slices) {\n if (!slices || !slices.length) return Promise.resolve();\n \n const slicePayload = [];\n slices.forEach(slice => { addToReturnSlicePayload(slicePayload, slice); });\n this.removeSlices(slices);\n\n debugging('supervisor') && console.log(`Supervisor.returnSlices: Returning slices ${await this.dumpSlices(slices)}.`);\n\n return this.resultSubmitterConnection.send('status', {\n worker: this.workerOpaqueId,\n slices: slicePayload,\n }).then(response => {\n return response;\n }).catch(error => {\n const errorInfo = slices.map(slice => slice.identifier);\n debugging('supervisor') && console.error('Failed to return slice(s)', { errorInfo, error }, 'Will try again on new connection.');\n this.resultSubmitterMessageQueue.push({ operation: 'status', data: { worker: this.workerOpaqueId, slices: slicePayload } })\n this.resultSubmitterConnection.close();\n // Just in case the caller is expecing a DCP response\n return { success: false, payload: {} };\n });\n }\n\n /**\n * Submits the slice results to the scheduler, either to the\n * work submit or fail endpoints based on the slice status.\n * Then remove the slice from the @this.slices cache.\n *\n * @param {Slice} slice - The slice to submit.\n * @returns {Promise<void>}\n */\n async recordResult (slice) {\n // It is possible for slice.result to be undefined when there are upstream errors.\n if ( !(slice && slice.result))\n throw new Error(`recordResult: slice.result is undefined for slice ${slice.identifier}. This is ok when there are upstream errors.`);\n\n debugging('supervisor') && console.log(`supervisor: recording result for slice ${slice.identifier}.`);\n\n const jobAddress = slice.jobAddress;\n const sliceNumber = slice.sliceNumber;\n const authorizationMessage = slice.getAuthorizationMessage();\n\n /* @see result-submitter::result for full message details */\n const metrics = { GPUTime: 0, CPUTime: 0, CPUDensity: 0, GPUDensity: 0 };\n const payloadData = {\n slice: sliceNumber,\n job: jobAddress,\n worker: this.workerOpaqueId,\n paymentAddress: this.paymentAddress,\n metrics,\n authorizationMessage,\n }\n \n const timeReport = slice.timeReport;\n if (timeReport && timeReport.total > 0) {\n metrics.GPUTime = timeReport.webGL;\n metrics.CPUTime = timeReport.CPU;\n metrics.CPUDensity = metrics.CPUTime / timeReport.total;\n metrics.GPUDensity = metrics.GPUTime / timeReport.total;\n metrics.CPUTime = 1 + Math.floor(metrics.CPUTime);\n metrics.GPUTime = 1 + Math.floor(metrics.GPUTime);\n }\n \n this.emit('submittingResult');\n\n if (!slice.isFinished)\n throw new Error('Cannot record result for slice that is not finished');\n\n if (slice.resultStorageType === 'pattern') { /* This is a remote-storage slice. */\n const remoteResult = await this.sendResultToRemote(slice);\n payloadData.result = encodeDataURI(JSON.stringify(remoteResult));\n } else {\n payloadData.result = encodeDataURI(slice.result.result); /* XXXwg - result.result is awful */\n }\n debugging('supervisor') && console.log('Supervisor.recordResult: payloadData.result', payloadData.result.slice(0, 512));\n\n try {\n if (slice.completed) {\n\n /* work function returned a result */\n let resp = await this.resultSubmitterConnection.send(\n 'result',\n payloadData,\n ).catch((error) => {\n debugging('supervisor') && console.error(`Failed to submit result to scheduler for slice ${payloadData.slice} of job ${payloadData.job}:\\n ${error} \\nWill try again on new connection.`);\n this.resultSubmitterMessageQueue.push({ operation: 'result', data: payloadData });\n this.resultSubmitterConnection.close();\n error.handled = true;\n throw error; /* Caught in catch block below */\n });\n \n if (!resp.success)\n throw resp.payload;\n\n if (false) {}\n\n const receipt = {\n accepted: true,\n payment: resp.payload.slicePaymentAmount,\n };\n this.emit('submittedResult', resp.payload);\n this.emit('dccCredit', receipt);\n } else {\n /* slice did not complete for some reason */\n \n // If the slice from a job never completes and the job address exists in the ringBufferofJobs, \n // then we remove it to allow for another slice (from the same job) to be obtained by fetchTask\n this.ringBufferofJobs.buf = this.ringBufferofJobs.filter(element => element !== jobAddress);\n \n await this.returnSlice(slice);\n }\n } catch(error) {\n !error.handled && console.info(`1014: Failed to submit results for slice ${payloadData.slice} of job ${payloadData.job}`, error);\n this.emit('submitSliceFailed', error);\n } finally {\n this.emit('submitFinished');\n // Remove the slice from the slices array.\n this.removeSlice(slice);\n if (Supervisor.sliceTiming) {\n slice['resultDelta'] = Date.now() - slice['resultDelta'];\n console.log(`recordResult(${slice['pairingDelta']}, ${slice['executionDelta']}, ${slice['resultDelta']}): Completed slice ${slice.identifier}.`);\n } else\n debugging('supervisor') && console.log(`recordResult: Completed slice ${slice.identifier}.`);\n }\n }\n\n /**\n * Send a work function's result to a server that speaks our DCP Remote Data Server protocol.\n * The data server dcp-rds is been implemented in https://gitlab.com/Distributed-Compute-Protocol/dcp-rds .\n *\n * @param {Slice} slice - Slice object whose result we are sending.\n * @returns {Promise<object>} - Object of the form { success: true, href: 'http://127.0.0.1:3521/methods/download/jobs/34/result/10' } .\n * @throws When HTTP status not in the 2xx range.\n */\n async sendResultToRemote(slice) {\n const postParams = {\n ...slice.resultStorageParams\n };\n\n const sliceResultUri = makeDataURI('pattern', slice.resultStorageDetails, {\n slice: slice.sliceNumber,\n job: slice.jobAddress,\n });\n\n debugging() && console.log('sendResultToRemote sliceResultUri: ', sliceResultUri);\n\n const url = new DcpURL(sliceResultUri);\n\n // Note: sendResultToRemote was made a member function of class Supervisor to enable access to this.alowedOrigins .\n if (this.allowedOrigins.indexOf(url.origin) === -1 &&\n dcpConfig.worker.allowOrigins.sendResults.indexOf(url.origin) === -1) {\n throw new Error(`Invalid origin for remote result storage: '${url.origin}'`);\n }\n\n postParams.element = slice.sliceNumber;\n postParams.contentType = 'application/json'; // Currently data will be outputed as a JSON object, @todo: Support file upload.\n\n debugging() && console.log('sendResultToRemote: postParams: ', postParams);\n\n let result = slice.result.result;\n if (result) {\n postParams.content = JSON.stringify(result);\n } else {\n postParams.error = JSON.stringify(slice.error);\n }\n\n debugging('supervisor') && console.log('sendResultToRemote: content: ', (result ? postParams.content : postParams.error).slice(0, 512));\n\n //\n // Notes:\n // 1) In recordResults the response from justFetch is JSON serialized and encodeDataURI is called.\n // payloadData.result = await this.sendResultToRemote(slice);\n // payloadData.result = encodeDataURI(JSON.stringify(payloadData.result));\n // 2) We do further processing after the call to sendResultToRemote in recordResult, because\n // if we did it here there would be a perf hit. When the return value is a promise, it gets\n // folded into sendResultToRemote's main promise. If justFetch's promise wasn't a return value then\n // justFetch would be separately added to the micro-task-queue.\n return await justFetch(url, 'JSON', 'POST', false, postParams);\n }\n}\n\n/**\n * Sandbox has had an error which is not from the work function: kill it\n * and try to redo the slice.\n */\nfunction handleSandboxError(supervisor, sandbox, error) {\n const slice = sandbox.slice;\n\n slice.sandboxErrorCount = (slice.sandboxErrorCount || 0) + 1;\n sandbox.slice = null;\n supervisor.returnSandbox(sandbox); /* terminate the sandbox */\n slice.status = SLICE_STATUS_UNASSIGNED; /* ToT */\n console.warn(`Supervisor.handleSandboxError: Sandbox ${sandbox.identifier}...(${sandbox.public.name}/${slice.sandboxErrorCount}) with slice ${slice.identifier} had error.`, error);\n\n if (slice.sandboxErrorCount < dcpConfig.worker.maxSandboxErrorsPerSlice)\n supervisor.queuedSlices.push(slice);\n else {\n slice.error = error;\n supervisor.returnSlice(slice);\n }\n}\n\n/**\n * Add a slice to the slice payload being built. If a sliceList already exists for the\n * job-status-authMessage tuple, then the slice will be added to that, otherwise a new\n * sliceList will be added to the payload.\n *\n * @param {Object[]} slicePayload - Slice payload being built. Will be mutated in place.\n * @param {Slice} slice - The slice.\n * @param {String} status - Status update, eg. progress or scheduled.\n *\n * @returns {Object[]} mutated slicePayload array\n */\nfunction addToSlicePayload(slicePayload, slice, status) {\n // getAuthorizationMessage helps enforces the equivalence\n // !authorizationMessage <==> sliceNumber === 0\n const authorizationMessage = slice.getAuthorizationMessage();\n if (!authorizationMessage) return;\n\n // Try to find a sliceList in the payload which matches the job, status, and auth message\n let sliceList = slicePayload.find(desc => {\n return desc.job === slice.jobAddress\n && desc.status === status\n && desc.authorizationMessage === authorizationMessage;\n });\n\n // If we didn't find a sliceList, start a new one and add it to the payload\n if (!sliceList) {\n sliceList = {\n job: slice.jobAddress,\n sliceNumbers: [],\n status,\n authorizationMessage,\n };\n slicePayload.push(sliceList);\n }\n\n sliceList.sliceNumbers.push(slice.sliceNumber);\n\n return slicePayload;\n}\n\n/**\n * Add a slice to the returnSlice payload being built. If a sliceList already exists for the\n * job-isEstimation-authMessage-reason tuple, then the slice will be added to that, otherwise a new\n * sliceList will be added to the payload.\n *\n * @param {Object[]} slicePayload - Slice payload being built. Will be mutated in place.\n * @param {Slice} slice - The slice.\n * @param {String} [reason] - Optional reason to further characterize status; e.g. 'ENOPROGRESS', 'EUNCAUGHT', 'ESLICETOOSLOW', 'unknown'.\n *\n * @returns {Object[]} mutated slicePayload array\n */\nfunction addToReturnSlicePayload(slicePayload, slice, reason) {\n // getAuthorizationMessage helps enforces the equivalence\n // !authorizationMessage <==> sliceNumber === 0\n const authorizationMessage = slice.getAuthorizationMessage();\n if (!authorizationMessage) return;\n\n if (!reason) reason = slice.error ? 'EUNCAUGHT' : 'unknown';\n\n // Try to find a sliceList in the payload which matches the job, status, and auth message\n let sliceList = slicePayload.find(desc => {\n return desc.job === slice.jobAddress\n && desc.isEstimationSlice === slice.isEstimationSlice\n && desc.authorizationMessage === authorizationMessage\n && desc.reason === reason;\n });\n\n // If we didn't find a sliceList, start a new one and add it to the payload\n if (!sliceList) {\n sliceList = {\n job: slice.jobAddress,\n sliceNumbers: [],\n status: 'return',\n isEstimationSlice: slice.isEstimationSlice,\n authorizationMessage,\n reason,\n };\n slicePayload.push(sliceList);\n }\n\n sliceList.sliceNumbers.push(slice.sliceNumber);\n\n return slicePayload;\n}\n\n/**\n * Return DCPv4-specific connection options, composed of type-specific, URL-specific, \n * and worker-specific options, any/all of which can override the dcpConfig.dcp.connectOptions.\n * The order of precedence is the order of specificity.\n */\nfunction connectionOptions(url, label) {\n return leafMerge(/* ordered from most to least specific */\n dcpConfig.worker.dcp.connectionOptions.default,\n dcpConfig.worker.dcp.connectionOptions[label],\n dcpConfig.worker.dcp.connectionOptions[url.href]);\n}\n\n/** @type {number | boolean} */\nSupervisor.lastAssignFailTimerMs = false;\n/** @type {boolean} */\nSupervisor.startSandboxWork_beenCalled = false;\n/** @type {boolean} */\nSupervisor.debugBuild = ((__webpack_require__(/*! dcp/common/dcp-build */ \"./src/common/dcp-build.js\").build) === 'debug');\n/**\n * When Supervisor.sliceTiming is set to be true, it displays the timings of a every slice\n * slice['pairingDelta'] = timespan of when slice is paired with sandbox until execution starts\n * slice['executionDelta'] = timespan of execution in sandbox\n * slice['resultDelta'] = timespan of when sandbox finishes executing until recordResult completes.\n * @type {boolean}\n */\nSupervisor.sliceTiming = false;\n\nexports.Supervisor = Supervisor;\n\n\n//# sourceURL=webpack://dcp/./src/dcp-client/worker/supervisor.js?");
4449
+ eval("/* provided dependency */ var process = __webpack_require__(/*! ./node_modules/process/browser.js */ \"./node_modules/process/browser.js\");\n/**\n * @file worker/supervisor.js\n *\n * The component that controls each of the sandboxes\n * and distributes work to them. Also communicates with the\n * scheduler to fetch said work.\n *\n * The supervisor readies sandboxes before/while fetching slices.\n * This means sometimes there are extra instantiated WebWorkers\n * that are idle (in this.readiedSandboxes). Readied sandboxes can\n * be used for any slice. After a readied sandbox is given a slice\n * it becomes assigned to slice's job and can only do work\n * for that job.\n *\n * After a sandbox completes its work, the sandbox becomes cached\n * and can be reused if another slice with a matching job is fetched.\n *\n * @author Matthew Palma, mpalma@kingsds.network\n * Ryan Rossiter, ryan@kingsds.network\n * @date May 2019\n */\n\n/* global dcpConfig */\n// @ts-check\n\n\nconst constants = __webpack_require__(/*! dcp/common/scheduler-constants */ \"./src/common/scheduler-constants.js\");\nconst hash = __webpack_require__(/*! dcp/common/hash */ \"./src/common/hash.js\");\nconst wallet = __webpack_require__(/*! dcp/dcp-client/wallet */ \"./src/dcp-client/wallet/index.js\");\nconst protocolV4 = __webpack_require__(/*! dcp/protocol-v4 */ \"./src/protocol-v4/index.js\");\nconst DCP_ENV = __webpack_require__(/*! dcp/common/dcp-env */ \"./src/common/dcp-env.js\");\n\nconst debugging = (__webpack_require__(/*! dcp/debugging */ \"./src/debugging.js\").scope)('worker');\nconst { assert } = __webpack_require__(/*! dcp/common/dcp-assert */ \"./src/common/dcp-assert.js\");\nconst { EventEmitter } = __webpack_require__(/*! dcp/common/dcp-events */ \"./src/common/dcp-events/index.js\");\nconst { Sandbox, SandboxError } = __webpack_require__(/*! ./sandbox */ \"./src/dcp-client/worker/sandbox.js\");\nconst { Slice, SLICE_STATUS_UNASSIGNED, SLICE_STATUS_FAILED } = __webpack_require__(/*! ./slice */ \"./src/dcp-client/worker/slice.js\");\nconst { SupervisorCache } = __webpack_require__(/*! ./supervisor-cache */ \"./src/dcp-client/worker/supervisor-cache.js\");\nconst { DcpURL } = __webpack_require__(/*! dcp/common/dcp-url */ \"./src/common/dcp-url.js\");\nconst { requireNative } = __webpack_require__(/*! dcp/dcp-client/webpack-native-bridge */ \"./src/dcp-client/webpack-native-bridge.js\");\nconst { localStorage } = __webpack_require__(/*! dcp/common/dcp-localstorage */ \"./src/common/dcp-localstorage.js\");\nconst { booley, encodeDataURI, makeDataURI, leafMerge, a$sleepMs, justFetch, compressJobMap, toJobMap,\n compressSandboxes, compressSlices, truncateAddress, dumpSandboxesIfNotUnique, dumpSlicesIfNotUnique, generateOpaqueId } = __webpack_require__(/*! dcp/utils */ \"./src/utils/index.js\");\nconst { DCPError } = __webpack_require__(/*! dcp/common/dcp-error */ \"./src/common/dcp-error.js\");\nconst { sliceStatus } = __webpack_require__(/*! dcp/common/scheduler-constants */ \"./src/common/scheduler-constants.js\");\nconst { calculateJoinHash } = __webpack_require__(/*! dcp/dcp-client/compute-groups */ \"./src/dcp-client/compute-groups/index.js\");\nconst RingBuffer = __webpack_require__(/*! dcp/utils/ringBuffer */ \"./src/utils/ringBuffer.js\");\nconst supervisorTuning = dcpConfig.future('worker.tuning');\nconst tuning = {\n watchdogInterval: 7, /**< seconds - time between fetches when ENOTASK(? /wg nov 2019) */\n minSandboxStartDelay: 0.1, /**< seconds - minimum time between WebWorker starts */\n maxSandboxStartDelay: 0.7, /**< seconds - maximum delay time between WebWorker starts */\n ...supervisorTuning\n};\n\n/** Make timers 10x slower when running in niim */\nlet timeDilation = 1;\nif (DCP_ENV.platform === 'nodejs') {\n /** Make timers 10x slower when running in niim */\n timeDilation = (requireNative('module')._cache.niim instanceof requireNative('module').Module) ? 10 : 1;\n}\n\ndcpConfig.future('worker.sandbox', { progressReportInterval: (5 * 60 * 1000) });\nconst sandboxTuning = dcpConfig.worker.sandbox;\n\n/**\n * @typedef {*} address\n * @typedef {*} opaqueId\n */\n\n/**\n * @typedef {object} SandboxSlice\n * @property {Sandbox} sandbox\n * @property {Slice} slice\n */\n\n/**\n * @typedef {object} Signature\n * @property {Uint8Array} r\n * @property {Uint8Array} s\n * @property {Uint8Array} v\n */\n\n/**\n * @typedef {object} SignedAuthorizationMessageObject\n * @property {object} auth\n * @property {Signature} signature\n * @property {module:dcp/wallet.Address} owner\n */\n\n/** @typedef {import('.').Worker} Worker */\n/** @typedef {import('.').SupervisorOptions} SupervisorOptions */\n\nclass Supervisor extends EventEmitter {\n /**\n * @constructor\n * @param {Worker} worker\n * @param {SupervisorOptions} options\n */\n constructor (worker, options={}) {\n super('Supervisor');\n\n /** @type {Worker} */\n this.worker = worker;\n\n /** @type {Sandbox[]} */\n this.sandboxes = [];\n\n /** @type {Sandbox[]} */\n this.readiedSandboxes = [];\n\n /** @type {Sandbox[]} */\n this.assignedSandboxes = [];\n\n /** @type {Slice[]} */\n this.slices = [];\n\n /** @type {Slice[]} */\n this.queuedSlices = [];\n\n /** @type {Slice[]} */\n this.lostSlices = [];\n\n /** @type {boolean} */\n this.matching = false;\n\n /** @type {boolean} */\n this.isFetchingNewWork = false;\n\n /** @type {number} */\n this.numberOfCoresReserved = 0;\n\n /** @type {number} */\n this.addressTruncationLength = 20; // Set to -1 for no truncation.\n\n /** @type {Object[]} */\n this.rejectedJobs = [];\n this.rejectedJobReasons = [];\n\n if (!options) {\n console.error('Supervisor Options', options, new Error().stack);\n options = {};\n }\n\n /** @type {object} */\n this.options = {\n jobAddresses: options.jobAddresses || [/* all jobs unless priorityOnly */],\n ...options,\n };\n\n const { paymentAddress, identityKeystore } = options;\n if (paymentAddress) {\n if (paymentAddress instanceof wallet.Keystore) {\n this.paymentAddress = paymentAddress.address;\n } else {\n this.paymentAddress = new wallet.Address(paymentAddress);\n }\n } else {\n this.paymentAddress = null;\n }\n\n this._identityKeystore = identityKeystore;\n\n // In localExec, do not allow work function or arguments to come from the 'any' origins\n this.allowedOrigins = []\n if (this.options.localExec)\n {\n dcpConfig.worker.allowOrigins.fetchData = dcpConfig.worker.allowOrigins.fetchData.concat(dcpConfig.worker.allowOrigins.any)\n dcpConfig.worker.allowOrigins.sendResults = dcpConfig.worker.allowOrigins.sendResults.concat(dcpConfig.worker.allowOrigins.any)\n }\n else\n this.allowedOrigins = dcpConfig.worker.allowOrigins.any;\n\n if(options.allowedOrigins && options.allowedOrigins.length > 0)\n this.allowedOrigins = options.allowedOrigins.concat(this.allowedOrigins);\n\n /**\n * Maximum sandboxes allowed to work at a given time.\n * @type {number}\n */\n this.maxWorkingSandboxes = options.maxWorkingSandboxes || 1;\n\n /** @type {number} */\n this.defaultMaxGPUs = 1;\n // this.GPUsAssigned = 0;\n \n // Object.defineProperty(this, 'GPUsAssigned', {\n // get: () => this.allocatedSandboxes.filter(sb => !!sb.requiresGPU).length,\n // enumerable: true,\n // configurable: false,\n // });\n\n /**\n * TODO: Remove this when the supervisor sends all of the sandbox\n * capabilities to the scheduler when fetching work.\n * @type {object}\n */\n this.capabilities = null;\n\n /** @type {number} */\n this.lastProgressReport = 0;\n\n /** \n * An N-slot ring buffer of job addresses. Stores all jobs that have had no more than 1 slice run in the ring buffer.\n * Required for the implementation of discrete jobs \n * @type {RingBuffer} \n */\n this.ringBufferofJobs = new RingBuffer(200); // N = 200 should be more than enough.\n \n // @hack - dcp-env.isBrowserPlatform is not set unless the platform is _explicitly_ set,\n // using the default detected platform doesn't set it.\n // Fixing that causes an error in the wallet module's startup on web platform, which I\n // probably can't fix in a reasonable time this morning.\n // ~ER2020-02-20\n\n if (!options.maxWorkingSandboxes\n && DCP_ENV.browserPlatformList.includes(DCP_ENV.platform)\n && navigator.hardwareConcurrency > 1) {\n this.maxWorkingSandboxes = navigator.hardwareConcurrency - 1;\n if (typeof navigator.userAgent === 'string') {\n if (/(Android).*(Chrome|Chromium)/.exec(navigator.userAgent)) {\n this.maxWorkingSandboxes = 1;\n console.log('Doing work with Chromimum browsers on Android is currently limited to one sandbox');\n }\n }\n }\n\n /** @type {SupervisorCache} */\n this.cache = new SupervisorCache(this);\n /** @type {object} */\n this._connections = {}; /* active DCPv4 connections */\n // Call the watchdog every 7 seconds.\n this.watchdogInterval = setInterval(() => this.watchdog(), tuning.watchdogInterval * 1000);\n if (DCP_ENV.platform === 'nodejs' && this.options.localExec) /* interval helps keep normal worker alive forever, which we don't want in localexec */\n this.watchdogInterval.unref();\n\n const ceci = this;\n\n // Initialize to null so these properties are recognized for the Supervisor class\n this.taskDistributorConnection = null;\n this.eventRouterConnection = null;\n this.resultSubmitterConnection = null;\n this.packageManagerConnection = null;\n this.openTaskDistributorConn = function openTaskDistributorConn()\n {\n let config = dcpConfig.scheduler.services.taskDistributor;\n ceci.taskDistributorConnection = new protocolV4.Connection(config, ceci.identityKeystore, connectionOptions(config.location, 'taskDistributor'));\n ceci.taskDistributorConnection.on('close', ceci.openTaskDistributorConn);\n }\n\n this.openEventRouterConn = function openEventRouterConn()\n {\n let config = dcpConfig.scheduler.services.eventRouter;\n ceci.eventRouterConnection = new protocolV4.Connection(config, ceci.identityKeystore, connectionOptions(config.location, 'eventRouter'));\n ceci.eventRouterConnection.on('close', ceci.openEventRouterConn);\n if (ceci.eventRouterMessageQueue.length)\n ceci.resendRejectedMessages(ceci.eventRouterConnection, ceci.eventRouterMessageQueue);\n }\n this.eventRouterMessageQueue = [];\n \n this.openResultSubmitterConn = function openResultSubmitterConn()\n {\n let config = dcpConfig.scheduler.services.resultSubmitter;\n ceci.resultSubmitterConnection = new protocolV4.Connection(config, ceci.identityKeystore, connectionOptions(config.location, 'resultSubmitter'));\n ceci.resultSubmitterConnection.on('close', ceci.openResultSubmitterConn);\n if (ceci.resultSubmitterMessageQueue.length)\n ceci.resendRejectedMessages(ceci.resultSubmitterConnection, ceci.resultSubmitterMessageQueue);\n }\n this.resultSubmitterMessageQueue = [];\n\n this.openPackageManagerConn = function openPackageManagerConn()\n {\n let config = dcpConfig.packageManager;\n ceci.packageManagerConnection = new protocolV4.Connection(config, ceci.identityKeystore, connectionOptions(config.location, 'packageManager'));\n ceci.packageManagerConnection.on('close', ceci.openPackageManagerConn);\n if (ceci.packageManagerMessageQueue.length)\n ceci.resendRejectedMessages(ceci.packageManagerConnection, ceci.packageManagerMessageQueue);\n }\n this.packageManagerMessageQueue = [];\n }\n\n /**\n * Return worker opaqueId.\n * @type {opaqueId}\n */\n get workerOpaqueId() {\n if (!this._workerOpaqueId)\n this._workerOpaqueId = localStorage.getItem('workerOpaqueId');\n\n if (!this._workerOpaqueId || this._workerOpaqueId.length !== constants.workerIdLength) {\n this._workerOpaqueId = generateOpaqueId();\n localStorage.setItem('workerOpaqueId', this._workerOpaqueId);\n }\n\n return this._workerOpaqueId;\n }\n\n /**\n * This getter is the absolute source-of-truth for what the\n * identity keystore is for this instance of the Supervisor.\n */\n get identityKeystore() {\n assert(this.defaultIdentityKeystore);\n\n return this._identityKeystore || this.defaultIdentityKeystore;\n }\n\n /**\n * Open all connections. Used when supervisor is instantiated or stopped/started\n * to initially open connections.\n */\n instantiateAllConnections() {\n if (!this.taskDistributorConnection)\n this.openTaskDistributorConn();\n \n if (!this.eventRouterConnection)\n this.openEventRouterConn();\n \n if (!this.resultSubmitterConnection)\n this.openResultSubmitterConn();\n\n if (!this.packageManagerConnection)\n this.openPackageManagerConn();\n }\n \n /**\n * Try sending messages that were rejected on an old instance of the given connection.\n */\n async resendRejectedMessages(connection, messageQueue) {\n var message = messageQueue.shift();\n\n do {\n try {\n await connection.send(message.operation, message.data);\n } catch (e) {\n debugging('supervisor') && console.error(`Failed to send message ${message.operation} to scheduler: ${e}. Will try again on a new connection.`);\n messageQueue.unshift(message);\n connection.close();\n break;\n }\n message = messageQueue.shift();\n } while (message)\n }\n\n /** Set the default identity keystore -- needs to happen before anything that talks\n * to the scheduler for work gets called. This is a wart and should be removed by\n * refactoring.\n *\n * The default identity keystore will be used if the Supervisor was not provided\n * with an alternate. This keystore will be located via the Wallet API, and \n * if not found, a randomized default identity will be generated. \n *\n * @param {object} ks An instance of wallet::Keystore -- if undefined, we pick the best default we can.\n * @returns {Promise<void>}\n */\n async setDefaultIdentityKeystore(ks) {\n try {\n if (ks) {\n this.defaultIdentityKeystore = ks;\n return;\n }\n\n if (this.defaultIdentityKeystore)\n return;\n\n try {\n this.defaultIdentityKeystore = await wallet.getId();\n } catch(e) {\n debugging('supervisor') && console.debug('Error generating default identity, try to do it another way.');\n this.defaultIdentityKeystore = await new wallet.IdKeystore(null, '');\n }\n } finally {\n if (this.defaultIdentityKeystore)\n debugging('supervisor') && console.debug('Set default identity =', this.defaultIdentityKeystore.address);\n else\n debugging('supervisor') && console.debug('Failed to set default identity, worker cannot work.');\n }\n }\n\n //\n // What follows is a bunch of utility properties and functions for creating filtered views\n // of the slices and sandboxes array.\n //\n /** XXXpfr @todo Write sort w/o using promises so we can get rid of async on all the compress functions. */\n\n /**\n * @deprecated -- Please do not use this.workingSandboxes; use this.allocatedSandboxes instead.\n * Sandboxes that are in WORKING state.\n *\n * Warning: Do not rely on this information being 100% accurate -- it may change in the next instant.\n * @type {Sandbox[]}\n */\n get workingSandboxes() {\n return this.sandboxes.filter(sandbox => sandbox.isWorking);\n }\n\n /**\n * Use instead of this.workingSandboxes.\n *\n * When a sandbox is paired with a slice, execution is pending and sandbox.allocated=true and\n * sandbox.slice=slice and sandbox.jobAddress=slice.jobAddress. This is what 'allocated' means.\n * Immediately upon the exit of sandbox.work, sandbox.allocated=false is set and if an exception\n * wasn't thrown the sandbox is placed in this.assignedSandboxes.\n * Thus from the pov of supervisor, this.allocatedSandboxes is deterministic and this.workingSandboxes is not.\n * Please try to not use this.workingSandboxes. It is deprecated.\n *\n * Warning: Do not rely on this information being 100% accurate -- it may change in the next instant.\n * @type {Sandbox[]}\n */\n get allocatedSandboxes() {\n return this.sandboxes.filter(sandbox => sandbox.allocated);\n }\n\n /**\n * Slices that are allocated.\n * Warning: Do not rely on this information being 100% accurate -- it may change in the next instant.\n * @type {Slice[]}\n */\n get allocatedSlices() {\n return this.slices.filter(slice => slice.allocated);\n }\n\n /**\n * This property is used as the target number of sandboxes to be associated with slices and start working.\n *\n * It is used in this.watchdog as to prevent a call to this.work when unallocatedSpace <= 0.\n * It is also used in this.distributeQueuedSlices where it is passed as an argument to this.matchSlicesWithSandboxes to indicate how many sandboxes\n * to associate with slices and start working.\n *\n * Warning: Do not rely on this information being 100% accurate -- it may change in the next instant.\n * @type {number}\n */\n get unallocatedSpace() {\n return this.maxWorkingSandboxes - this.allocatedSandboxes.length - this.numberOfCoresReserved;\n }\n \n /**\n * Call acquire(numberOfCoresToReserve) to reserve numberOfCoresToReserve unallocated sandboxes as measured by unallocatedSpace.\n * Call release() to undo the previous acquire.\n * This pseudo-mutex technique helps prevent races in scheduling slices in Supervisor.\n * @param {number} numberOfCoresToReserve\n */\n acquire(numberOfCoresToReserve) { \n this.numberOfCoresReserved = numberOfCoresToReserve; \n }\n release() { \n this.numberOfCoresReserved = 0; \n }\n\n /**\n * Remove from this.slices.\n * @param {Slice} slice\n */\n removeSlice(slice) {\n this.removeElement(this.slices, slice);\n if (Supervisor.debugBuild) {\n if (this.queuedSlices.indexOf(slice) !== -1)\n throw new Error(`removeSlice: slice ${slice.identifier} is in queuedSlices; inconsistent state.`);\n if (this.lostSlices.length > 0) {\n console.warn(`removeSlice: slice ${slice.identifier}, found lostSlices ${this.lostSlices.map(s => s.identifier)}`);\n if (this.lostSlices.indexOf(slice) !== -1)\n throw new Error(`removeSlice: slice ${slice.identifier} is in lostSlices; inconsistent state.`);\n }\n }\n }\n\n /**\n * Remove from this.slices.\n * @param {Slice[]} slices\n */\n removeSlices(slices) {\n this.slices = this.slices.filter(slice => slices.indexOf(slice) === -1);\n }\n\n /**\n * Remove from this.queuedSlices.\n * @param {Slice[]} slices\n */\n removeQueuedSlices(slices) {\n this.queuedSlices = this.queuedSlices.filter(slice => slices.indexOf(slice) === -1);\n }\n\n /**\n * Remove from this.sandboxes, this.assignedSandboxes and this.readiedSandboxes.\n * @param {Sandbox} sandbox\n */\n removeSandbox(sandbox) {\n debugging('scheduler') && console.log(`removeSandbox ${sandbox.identifier}`);\n this.removeElement(this.sandboxes, sandbox);\n this.removeElement(this.assignedSandboxes, sandbox);\n\n // XXXpfr: April 13, 2022\n // I'm trying to understand and control when sandboxes get removed.\n // A sandbox in this.readiedSandboxes should never have returnSandbox/removeSandbox called on it except in stopWork.\n // Because of races and random worker crashes, it is hard to get this right, but I want to try.\n // If I don't fix this is the next 30 days or I forget, please delete this exception.\n if (false)\n {}\n\n this.removeElement(this.readiedSandboxes, sandbox);\n }\n\n /**\n * Remove from this.sandboxes and this.assignedSandboxes .\n * @param {Sandbox[]} sandboxes\n */\n async removeSandboxes(sandboxes) {\n debugging('scheduler') && console.log(`removeSandboxes: Remove ${sandboxes.length} sandboxes ${this.dumpSandboxes(sandboxes)}`);\n this.sandboxes = this.sandboxes.filter(sandbox => sandboxes.indexOf(sandbox) === -1);\n this.assignedSandboxes = this.assignedSandboxes.filter(sandbox => sandboxes.indexOf(sandbox) === -1);\n\n if (Supervisor.debugBuild) {\n const readied = this.readiedSandboxes.filter(sandbox => sandboxes.indexOf(sandbox) !== -1);\n if (readied.length > 0)\n throw new Error(`removeSandboxes: sandboxes ${readied.map(s => s.identifier)} are in readiedSandboxes; inconsistent state.`);\n }\n }\n\n /**\n * Remove element from theArray.\n * @param {Array<*>} theArray\n * @param {object|number} element\n * @param {boolean} [assertExists = true]\n */\n removeElement(theArray, element, assertExists = false) {\n let index = theArray.indexOf(element);\n assert(index !== -1 || !assertExists);\n if (index !== -1) theArray.splice(index, 1);\n }\n\n /**\n * Log sliceArray.\n * @param {Slice[]} sliceArray\n * @param {string} [header]\n * @returns {string}\n */\n dumpSlices(sliceArray, header) {\n if (header) console.log(`\\n${header}`);\n return compressSlices(sliceArray, this.addressTruncationLength);\n }\n\n /**\n * Log sandboxArray.\n * @param {Sandbox[]} sandboxArray\n * @param {string} [header]\n * @returns {string}\n */\n dumpSandboxes(sandboxArray, header) {\n if (header) console.log(`\\n${header}`);\n return compressSandboxes(sandboxArray, this.addressTruncationLength);\n }\n\n /**\n * If the elements of sandboxSliceArray are not unique, log the duplicates and dump the array.\n * @param {SandboxSlice[]} sandboxSliceArray\n * @param {string} header\n */\n dumpSandboxSlicesIfNotUnique(sandboxSliceArray, header) {\n if (!this.isUniqueSandboxSlices(sandboxSliceArray, header))\n console.log(this.dumpSandboxSlices(sandboxSliceArray));\n }\n\n /**\n * Log { sandbox, slice }.\n * @param {Sandbox} sandbox\n * @param {Slice} slice\n * @returns {string}\n */\n dumpSandboxAndSlice(sandbox, slice) {\n return `${sandbox.id}~${slice.sliceNumber}.${this.dumpJobAddress(slice.jobAddress)}`;\n }\n\n /**\n * Log { sandbox, slice } with state/status.\n * @param {Sandbox} sandbox\n * @param {Slice} slice\n * @returns {string}\n */\n dumpStatefulSandboxAndSlice(sandbox, slice) {\n return `${sandbox.id}.${sandbox.state}~${slice.sliceNumber}.${this.dumpJobAddress(slice.jobAddress)}.${slice.status}`;\n }\n\n /**\n * Truncates jobAddress.toString() to this.addressTruncationLength digits.\n * @param {address} jobAddress\n * @returns {string}\n */\n dumpJobAddress(jobAddress) {\n return truncateAddress(jobAddress, this.addressTruncationLength /* digits*/);\n }\n\n /**\n * Dump sandboxSliceArray.\n * @param {SandboxSlice[]} sandboxSliceArray - input array of { sandbox, slice }\n * @param {string} [header] - optional header\n * @param {boolean} [stateFul] - when true, also includes slice.status and sandbox.state.\n * @returns {string}\n */\n dumpSandboxSlices(sandboxSliceArray, header, stateFul=false) {\n if (header) console.log(`\\n${header}`);\n const jobMap = {};\n sandboxSliceArray.forEach(ss => {\n const sss = stateFul ? `${ss.sandbox.id}.${ss.sandbox.state}~${ss.slice.sliceNumber}.${ss.slice.status}` : `${ss.sandbox.id}~${ss.slice.sliceNumber}`;\n if (!jobMap[ss.slice.jobAddress]) jobMap[ss.slice.jobAddress] = sss;\n else jobMap[ss.slice.jobAddress] += `,${sss}`;\n });\n let output = '';\n for (const [jobAddress, sss] of Object.entries(jobMap))\n output += `${this.dumpJobAddress(jobAddress)}:[${sss}]:`;\n return output;\n }\n\n /**\n * Check sandboxSliceArray for duplicates.\n * @param {SandboxSlice[]} sandboxSliceArray\n * @param {string} [header]\n * @param {function} [log]\n * @returns {boolean}\n */\n isUniqueSandboxSlices(sandboxSliceArray, header, log) {\n const result = [], slices = [], sandboxes = [];\n let once = true;\n sandboxSliceArray.forEach(x => {\n const sliceIndex = slices.indexOf(x.slice);\n const sandboxIndex = sandboxes.indexOf(x.sandbox);\n\n if (sandboxIndex >= 0) {\n if (once && header) console.log(`\\n${header}`); once = false;\n log ? log(x.sandbox) : console.log(`\\tWarning: Found duplicate sandbox ${x.sandbox.identifier}.`);\n } else sandboxes.push(x.sandbox);\n\n if (sliceIndex >= 0) {\n if (once && header) console.log(`\\n${header}`); once = false;\n log ? log(x.slice) : console.log(`\\tWarning: Found duplicate slice ${x.slice.identifier}.`);\n } else {\n slices.push(x.slice);\n if (sandboxIndex < 0) result.push(x);\n }\n });\n return sandboxSliceArray.length === result.length;\n }\n\n /**\n * Attempts to create and start a given number of sandboxes.\n * The sandboxes that are created can then be assigned for a\n * specific job at a later time. All created sandboxes\n * get put into the @this.readiedSandboxes array when allocateLocalSandboxes is false.\n *\n * @param {number} numSandboxes - the number of sandboxes to create\n * @param {boolean} [allocateLocalSandboxes=false] - when true, do not place in this.readiedSandboxes\n * @returns {Promise<Sandbox[]>} - resolves with array of created sandboxes, rejects otherwise\n * @throws when given a numSandboxes is not a number or if numSandboxes is Infinity\n */\n async readySandboxes (numSandboxes, allocateLocalSandboxes = false) {\n debugging('supervisor') && console.debug(`readySandboxes: Readying ${numSandboxes} sandboxes, total sandboxes ${this.sandboxes.length}, matching ${this.matching}, fetching ${this.isFetchingNewWork}`);\n \n if (typeof numSandboxes !== 'number' || Number.isNaN(numSandboxes) || numSandboxes === Infinity) {\n throw new Error(`${numSandboxes} is not a number of sandboxes that can be readied.`);\n }\n if (numSandboxes <= 0) {\n return [];\n }\n\n const sandboxStartPromises = [];\n const sandboxes = [];\n const errors = [];\n for (let i = 0; i < numSandboxes; i++) {\n const sandbox = new Sandbox(this.cache, {\n ...this.options.sandboxOptions,\n }, this.allowedOrigins);\n sandbox.addListener('ready', () => this.emit('sandboxReady', sandbox));\n sandbox.addListener('start', () => {\n this.emit('sandboxStart', sandbox);\n\n // When sliceNumber == 0, result-submitter status skips the slice,\n // so don't send it in the first place.\n // The 'start' event is fired when a worker starts up, hence there's no way\n // to determine whether sandbox has a valid slice without checking.\n if (sandbox.slice) {\n const jobAddress = sandbox.jobAddress;\n const sliceNumber = sandbox.slice.sliceNumber;\n // !authorizationMessage <==> sliceNumber === 0.\n const authorizationMessage = sandbox.slice.getAuthorizationMessage();\n\n if (authorizationMessage) {\n let statusPayload = {\n worker: this.workerOpaqueId,\n slices: [{\n job: jobAddress,\n sliceNumber: sliceNumber,\n status: 'begin',\n authorizationMessage,\n }],\n }\n \n this.resultSubmitterConnection.send('status', statusPayload).catch((error) => {\n debugging('supervisor') && console.error(`Error sending 'status' for slice ${sliceNumber} of job ${jobAddress}:\\n ${error}\\nWill try again on a new connection`);\n this.resultSubmitterMessageQueue.push({ operation: 'status', data: statusPayload });\n this.resultSubmitterConnection.close();\n });\n }\n }\n });\n sandbox.addListener('workEmit', ({ eventName, payload }) => {\n // Need to check if the sandbox hasn't been assigned a slice yet.\n if (!sandbox.slice) {\n if (Supervisor.debugBuild) {\n console.error(\n `Sandbox not assigned a slice before sending workEmit message to scheduler. 'workEmit' event originates from \"${eventName}\" event`, \n payload,\n );\n }\n }\n else\n {\n const jobAddress = sandbox.slice.jobAddress;\n const sliceNumber = sandbox.slice.sliceNumber;\n // sliceNumber can be zero if it came from a problem with loading modules.\n assert(jobAddress && (sliceNumber || sliceNumber === 0));\n // Send a work emit message from the sandbox to the event router\n // !authorizationMessage <==> sliceNumber === 0.\n let authorizationMessage;\n try {\n // Sometimes a sliceNumber===0 workEmit comes in before the client bundle is properly loaded.\n // Also happens with minor dcp-client version mismatches.\n authorizationMessage = sandbox.slice.getAuthorizationMessage();\n } catch(e) {\n authorizationMessage = null;\n }\n\n if (!authorizationMessage)\n {\n console.warn(`workEmit: missing authorization message for job ${jobAddress}, slice: ${sliceNumber}`);\n return Promise.resolve();\n }\n \n let workEmitPayload = {\n eventName,\n payload,\n job: jobAddress,\n slice: sliceNumber,\n worker: this.workerOpaqueId,\n authorizationMessage,\n }\n \n const workEmitPromise = this.eventRouterConnection.send('workEmit', workEmitPayload).catch(error => {\n debugging('supervisor') && console.warn(`workEmit: unable to send ${eventName} for slice ${sliceNumber} of job ${jobAddress}: ${error.message}.\\nTrying again on a new connection.`);\n this.eventRouterMessageQueue.push({ operation: 'workEmit', data: workEmitPayload })\n this.eventRouterConnection.close();\n if (Supervisor.debugBuild)\n console.error('workEmit error:', error);\n });\n\n if (Supervisor.debugBuild) {\n workEmitPromise.then(result => {\n if (!result || !result.success)\n console.warn('workEmit: event router did not accept event', result);\n });\n }\n }\n });\n\n // When any sbx completes, \n sandbox.addListener('complete', () => {\n this.watchdog();\n });\n\n sandbox.on('sandboxError', (error) => handleSandboxError(this, sandbox, error));\n \n sandbox.on('rejectedWorkMetrics', (data) =>{\n function updateRejectedMetrics(report) {\n ['total', 'CPU', 'webGL'].forEach((key) => {\n if (report[key]) sandbox.slice.rejectedTimeReport[key] += report[key];\n })\n }\n \n // If the slice already has rejected metrics, add this data to it. If not, assign this data to slices rejected metrics property\n if (sandbox.slice) {\n (sandbox.slice.rejectedTimeReport) ? updateRejectedMetrics(data.timeReport) : sandbox.slice.rejectedTimeReport = data.timeReport;\n }\n })\n \n // If the sandbox terminated and we are not shutting down, then should return all work which is currently\n // not being computed if all sandboxes are dead and the attempt to create a new one fails.\n sandbox.on('terminated',async () => {\n if (this.sandboxes.length > 0) {\n let terminatedSandboxes = this.sandboxes.filter(sbx => sbx.isTerminated);\n if (terminatedSandboxes.length === this.sandboxes.length) {\n debugging('supervisor') && console.debug(`readySandboxes: Create 1 sandbox in the sandbox-terminated-handler, total sandboxes ${this.sandboxes.length}, matching ${this.matching}, fetching ${this.isFetchingNewWork}`);\n await this.readySandboxes(1);\n \n // If we cannot create a new sandbox, that probably means we're on a screensaver worker\n // and the screensaver is down. So return the slices to the scheduler.\n if (this.sandboxes.length !== terminatedSandboxes.length + 1) {\n this.returnSlices(this.queuedSlices).then(() => {\n this.queuedSlices.length = 0;\n });\n }\n }\n }\n })\n\n const delayMs =\n 1000 *\n (tuning.minSandboxStartDelay +\n Math.random() *\n (tuning.maxSandboxStartDelay - tuning.minSandboxStartDelay));\n \n sandboxStartPromises.push(\n sandbox\n .start(delayMs)\n .then(() => {\n if (!allocateLocalSandboxes) this.readiedSandboxes.push(sandbox);\n this.sandboxes.push(sandbox);\n sandboxes.push(sandbox);\n }).catch((err) => {\n errors.push(err);\n this.returnSandbox(sandbox);\n if (err.code === 'ENOWORKER') {\n throw new DCPError(\"Cannot use localExec without dcp-worker installed. Use the command 'npm install dcp-worker' to install the neccessary modules.\", 'ENOWORKER');\n }\n }));\n }\n \n await Promise.all(sandboxStartPromises);\n\n if (errors.length) {\n console.warn(`Failed to ready ${errors.length} of ${numSandboxes} sandboxes.`, errors);\n throw new Error('Failed to ready sandboxes.');\n }\n\n debugging('supervisor') && console.log(`readySandboxes: Readied ${sandboxes.length} sandboxes ${JSON.stringify(sandboxes.map(sandbox => sandbox.id))}`);\n \n return sandboxes;\n }\n\n /**\n * Accepts a sandbox after it has finished working or encounters an error.\n * If the sandbox was terminated or if \"!slice || slice.failed\" then\n * the sandbox will be removed from the sandboxes array and terminated if necessary.\n * Otherwise it will try to distribute a slice to the sandbox immediately.\n *\n * @param {Sandbox} sandbox - the sandbox to return\n * @param {Slice} [slice] - the slice just worked on; !slice => terminate\n * @param {boolean} [verifySandboxIsNotTerminated=true] - if true, check sandbox is not already terminated\n */\n returnSandbox (sandbox, slice, verifySandboxIsNotTerminated=true) {\n if (!slice || slice.failed || sandbox.isTerminated) {\n \n this.removeSandbox(sandbox);\n \n if (!sandbox.isTerminated) {\n debugging('supervisor') && console.log(`Supervisor.returnSandbox: Terminating ${sandbox.identifier}${slice ? `~${slice.identifier}` : ''}, # of sandboxes ${this.sandboxes.length}`);\n sandbox.terminate(false);\n } else {\n debugging('supervisor') && console.log(`Supervisor.returnSandbox: Already terminated ${sandbox.identifier}${slice ? `~${slice.identifier}` : ''}, # of sandboxes ${this.sandboxes.length}`);\n // XXXpfr: April 13, 2022\n // I'm trying to understand and control when sandboxes get terminated.\n // Because of races and random worker crashes, it is impossible to not try to terminate a sandbox more than once.\n // But at some places where returnSandbox is we shouldn't see this behavior, hence this exception.\n // If I don't fix this is the next 30 days or I forget, please delete this exception.\n if (false)\n {}\n }\n }\n }\n\n /**\n * Terminates sandboxes, in order of creation, when the total started sandboxes exceeds the total allowed sandboxes.\n *\n * @returns {Promise<void>}\n */\n pruneSandboxes () {\n let numOver = this.sandboxes.length - (dcpConfig.worker.maxAllowedSandboxes + this.maxWorkingSandboxes);\n if (numOver <= 0) return;\n \n // Don't kill readied sandboxes while creating readied sandboxes.\n for (let index = 0; index < this.readiedSandboxes.length; ) {\n const sandbox = this.readiedSandboxes[index];\n // If the sandbox is allocated, advance to the next one in the list.\n if (sandbox.allocated) {\n index++;\n continue;\n }\n // Otherwise, remove this sandbox but look at the same array index in the next loop.\n debugging('supervisor') && console.log(`pruneSandboxes: Terminating readied sandbox ${sandbox.identifier}`);\n this.readiedSandboxes.splice(index, 1);\n this.returnSandbox(sandbox);\n\n if (--numOver <= 0) break;\n }\n\n if (numOver <= 0) return;\n for (let index = 0; index < this.assignedSandboxes.length; ) {\n const sandbox = this.assignedSandboxes[index];\n // If the sandbox is allocated, advance to the next one in the list.\n if (sandbox.allocated) {\n index++;\n continue;\n }\n // Otherwise, remove this sandbox but look at the same array index in the next loop.\n debugging('supervisor') && console.log(`pruneSandboxes: Terminating assigned sandbox ${sandbox.identifier}`);\n this.assignedSandboxes.splice(index, 1);\n this.returnSandbox(sandbox);\n\n if (--numOver <= 0) break;\n }\n }\n \n /**\n * Basic watch dog to check if there are idle sandboxes and\n * attempts to nudge the supervisor to feed them work.\n *\n * Run in an interval created in @constructor .\n * @returns {Promise<void>}\n */\n async watchdog () {\n if (!this.watchdogState)\n this.watchdogState = {};\n\n // Every 5 minutes, report progress of all working slices to the scheduler\n if (Date.now() > ((this.lastProgressReport || 0) + sandboxTuning.progressReportInterval)) {\n // console.log('454: Assembling progress update...');\n this.lastProgressReport = Date.now();\n\n //\n // Note: this.slices is the disjoint union of:\n // this.allocatedSlices, \n // this.queuedSlices, \n // this.slices.filter(slice => !slice.isUnassigned) .\n // When a slice is not in these 3 arrays, the slice is lost.\n //\n \n const currentLostSlices = this.slices.filter(slice => slice.isUnassigned \n && this.queuedSlices.indexOf(slice) === -1\n && this.allocatedSlices.indexOf(slice) === -1);\n\n if (currentLostSlices.length > 0) {\n this.lostSlices.push(...currentLostSlices);\n // Try to recover.\n // Needs more work and testing.\n // Test when we can come up with a decent lost slice repro case.\n // --> this.queuedSlices.push(...currentLostSlices);\n }\n\n if (this.lostSlices.length > 0) {\n if (true) { // Keep this on for awhile, until we know lost slices aren't happening.\n console.warn('Supervisor.watchdog: Found lost slices!');\n for (const slice of this.lostSlices)\n console.warn('\\t', slice.identifier);\n }\n this.lostSlices = this.lostSlices.filter(slice => slice.isUnassigned);\n }\n\n const slices = [];\n this.queuedSlices.forEach(slice => {\n assert(slice && slice.sliceNumber > 0);\n addToSlicePayload(slices, slice, sliceStatus.scheduled);\n });\n\n this.allocatedSlices.forEach(slice => {\n assert(slice && slice.sliceNumber > 0);\n addToSlicePayload(slices, slice, 'progress'); // Beacon.\n });\n\n if (slices.length) {\n // console.log('471: sending progress update...');\n const progressReportPayload = {\n worker: this.workerOpaqueId,\n slices,\n };\n\n this.resultSubmitterConnection.send('status', progressReportPayload)\n .catch(error => {\n debugging('supervisor') && console.error('479: Failed to send status update:', error/*.message*/);\n this.resultSubmitterMessageQueue.push({ operation: 'status', data: progressReportPayload })\n this.resultSubmitterConnection.close();\n });\n }\n }\n\n if (this.worker.working) {\n if (this.unallocatedSpace > 0) {\n await this.work().catch(err => {\n if (!this.watchdogState[err.code || '0'])\n this.watchdogState[err.code || '0'] = 0;\n if (Date.now() - this.watchdogState[err.code || '0'] > ((dcpConfig.worker.watchdogLogInterval * timeDilation || 120) * 1000))\n console.error('301: Failed to start work:', err);\n this.watchdogState[err.code || '0'] = Date.now();\n });\n }\n\n this.pruneSandboxes();\n }\n }\n\n /**\n * Gets the logical and physical number of cores and also\n * the total number of sandboxes the worker is allowed to run\n *\n */\n getStatisticsCPU() {\n if (DCP_ENV.isBrowserPlatform) {\n return {\n worker: this.workerOpaqueId,\n lCores: window.navigator.hardwareConcurrency,\n pCores: dcpConfig.worker.pCores || window.navigator.hardwareConcurrency,\n sandbox: this.maxWorkingSandboxes\n }\n }\n\n return {\n worker: this.workerOpaqueId,\n lCores: requireNative('os').cpus().length,\n pCores: requireNative('physical-cpu-count'),\n sandbox: this.maxWorkingSandboxes\n }\n }\n\n /**\n * Returns the number of unallocated sandbox slots to send to fetchTask.\n *\n * @returns {number}\n */\n numberOfAvailableSandboxSlots() {\n let numCores;\n if (this.options.priorityOnly && this.options.jobAddresses.length === 0) {\n numCores = 0;\n } else if (this.queuedSlices.length > 1) {\n // We have slices queued, no need to fetch\n numCores = 0;\n } else {\n // The queue is almost empty (there may be 0 or 1 element), fetch a full task.\n // The task is full, in the sense that it will contain slices whose\n // aggregate execution time is this.maxWorkingSandboxes * 5-minutes.\n // However, there can only be this.unallocatedSpace # of long slices.\n // Thus we need to know whether the last slice in this.queuedSlices is long or not.\n // (A long slice has estimated execution time >= 5-minutes.)\n const longSliceCount = (this.queuedSlices.length > 0 && this.queuedSlices[0].isLongSlice) ? 1 : 0;\n numCores = this.unallocatedSpace - longSliceCount;\n }\n return numCores;\n }\n\n /**\n * Call to start doing work on the network.\n * This is the one place where requests to fetch new slices are made.\n * After the initial slices are fetched it calls this.distributeQueuedSlices.\n *\n * @returns {Promise<void>}, unallocatedSpace ${this.unallocatedSpace}\n */\n async work()\n {\n // When inside matchSlicesWithSandboxes, don't reenter Supervisor.work to fetch new work or create new sandboxes.\n if (this.matching) {\n // Interesting and noisy.\n // debugging('supervisor') && console.log(`Supervisor.work: Do not interleave work, fetch or matching slices with sandboxes: queuedSlices ${this.queuedSlices.length}, unallocatedSpace ${this.unallocatedSpace}, matching ${this.matching}, fetching ${this.isFetchingNewWork}`);\n return Promise.resolve();\n }\n\n await this.setDefaultIdentityKeystore();\n\n // Instantiate connections that don't exist.\n this.instantiateAllConnections();\n\n const numCores = this.numberOfAvailableSandboxSlots();\n\n debugging() && console.log(`Supervisor.work: Try to get ${numCores} slices in working sandboxes, unallocatedSpace ${this.unallocatedSpace}, queued slices ${this.queuedSlices.length}, # of sandboxes ${this.sandboxes.length}, matching ${this.matching}, fetching: ${this.isFetchingNewWork}`);\n \n // Fetch a new task if we have no more slices queued, then start workers\n try {\n if (numCores > 0 && !this.isFetchingNewWork) {\n this.isFetchingNewWork = true;\n\n /**\n * This will only ready sandboxes up to a total count of\n * maxWorkingSandboxes (in any state). It is not possible to know the\n * actual number of sandboxes required until we have the slices because we\n * may have sandboxes assigned for the slice's job already.\n *\n * If the evaluator cannot start (ie. if the evalServer is not running),\n * then the while loop will keep retrying until the evalServer comes online\n */\n if (this.maxWorkingSandboxes > this.sandboxes.length) {\n // Note: The old technique had \n // while (this.maxWorkingSandboxes > this.sandboxes.length) {....\n // and sometimes we'd get far too many sandboxes, because it would keep looping while waiting for\n // this.readySandboxes(this.maxWorkingSandboxes - this.sandboxes.length);\n // to construct the rest of the sandboxes. The fix is to only loop when the 1st \n // await this.readySandboxes(1) \n // is failing.\n let needFirstSandbox = true;\n while (needFirstSandbox) {\n debugging('supervisor') && console.log(`Supervisor.work: ready 1 sandbox, # of sandboxes ${this.sandboxes.length}, matching ${this.matching}, fetching ${this.isFetchingNewWork}`);\n await this.readySandboxes(1)\n .then(() => {\n debugging('supervisor') && console.log(`Supervisor.work: ready ${this.maxWorkingSandboxes - this.sandboxes.length} sandbox(es), # of sandboxes ${this.sandboxes.length}, matching ${this.matching}, fetching ${this.isFetchingNewWork}`);\n this.readySandboxes(this.maxWorkingSandboxes - this.sandboxes.length);\n needFirstSandbox = false;\n }).catch(error => {\n console.warn('906: failed to ready sandboxes; will retry', error.code, error.message);\n });\n }\n }\n\n /**\n * Temporary change: Assign the capabilities of one of readied sandboxes\n * before fetching slices from the scheduler.\n *\n * TODO: Remove this once fetchTask uses the capabilities of every\n * sandbox to fetch slices.\n */\n if (!this.capabilities) {\n this.capabilities = this.sandboxes[0].capabilities;\n this.emit('capabilitiesCalculated', this.capabilities);\n }\n\n if (DCP_ENV.isBrowserPlatform && this.capabilities.browser)\n this.capabilities.browser.chrome = DCP_ENV.isBrowserChrome;\n\n const fetchTimeout = setTimeout(() => {\n console.warn(`679: Fetch exceeded timeout, will reconnect at next watchdog interval`);\n \n this.taskDistributorConnection.close('Fetch timed out', Math.random() > 0.5).catch(error => {\n console.error(`931: Failed to close task-distributor connection`, error);\n });\n this.resultSubmitterConnection.close('Fetch timed out', Math.random() > 0.5).catch(error => {\n console.error(`920: Failed to close result-submitter connection`, error);\n });\n this.isFetchingNewWork = false;\n this.instantiateAllConnections();\n }, 3 * 60 * 1000); // max out at 3 minutes to fetch\n\n // ensure result submitter connection before fetching tasks\n try\n {\n await this.resultSubmitterConnection.keepalive();\n }\n catch (e)\n {\n console.error('Failed to connect to result submitter, refusing to fetch slices. Will try again at next fetch cycle.')\n debugging('supervisor') && console.log(`Error: ${e}`);\n this.isFetchingNewWork = false; // <-- done in the `finally` block, below\n clearTimeout(fetchTimeout);\n this.taskDistributorConnection.close('Failed to connect to result-submitter', true).catch(error => {\n console.error(`939: Failed to close task-distributor connection`, error);\n });\n this.resultSubmitterConnection.close('Failed to connect to result-submitter', true).catch(error => {\n console.error(`942: Failed to close result-submitter connection`, error);\n });\n return Promise.resolve();\n }\n await this.fetchTask(numCores).finally(() => {\n clearTimeout(fetchTimeout);\n this.isFetchingNewWork = false;\n });\n }\n\n this.distributeQueuedSlices().then(() => debugging('supervisor') && 'supervisor: finished distributeQueuedSlices()').catch((e) => {\n // We should never get here, because distributeQueuedSlices was changed\n // to try to catch everything and return slices and sandboxes.\n // If we do catch here it may mean a slice was lost. \n console.error('Supervisor.work catch handler for distributeQueuedSlices.', e);\n });\n // No catch(), because it will bubble outward to the caller\n } finally {\n }\n }\n\n /**\n * Generate the workerComputeGroups property of the requestTask message. \n * \n * Concatenate the compute groups object from dcpConfig with the list of compute groups\n * from the supervisor, and remove the public group if accidentally present. Finally,\n * we transform joinSecrets/joinHash into joinHashHash for secure transmission.\n *\n * @note computeGroup objects with joinSecrets are mutated to record their hashes. This\n * affects the supervisor options and dcpConfig. Re-adding a joinSecret property\n * to one of these will cause the hash to be recomputed.\n */\n generateWorkerComputeGroups()\n {\n var computeGroups = Object.values(dcpConfig.worker.computeGroups || {});\n if (this.options.computeGroups)\n computeGroups = computeGroups.concat(this.options.computeGroups);\n computeGroups = computeGroups.filter(group => group.id !== constants.computeGroups.public.id);\n const hashedComputeGroups = [];\n for (const group of computeGroups)\n {\n const groupCopy = Object.assign({}, group);\n if ((group.joinSecret || group.joinHash) && (!group.joinHashHash || this.lastDcpsid !== this.taskDistributorConnection.dcpsid))\n {\n let joinHash;\n if (group.joinHash) {\n joinHash = group.joinHash.replace(/\\s+/g, ''); // strip whitespace\n } else {\n joinHash = calculateJoinHash(groupCopy);\n } \n\n groupCopy.joinHashHash = hash.calculate(hash.eh1, joinHash, this.taskDistributorConnection.dcpsid);\n delete groupCopy.joinSecret;\n delete groupCopy.joinHash;\n debugging('computeGroups') && console.debug(`Calculated joinHash=${joinHash} for`, groupCopy);\n }\n hashedComputeGroups.push(groupCopy);\n }\n this.lastDcpsid = this.taskDistributorConnection.dcpsid;\n debugging('computeGroups') && console.debug('Requesting ', computeGroups.length, 'non-public groups for session', this.lastDcpsid);\n return hashedComputeGroups;\n }\n\n /**\n * Remove all unreferenced jobs in `this.cache`.\n *\n * @param {any[]} newJobs - Jobs that should not be removed from\n * `this.cache`.\n */\n cleanJobCache(newJobs = []) {\n /* Delete all jobs in the supervisorCache that are not represented in this newJobs,\n * or in this.queuedSlices, or there is no sandbox assigned to these jobs.\n * Note: There can easily be 200+ places to check; using a lookup structure to maintain O(n).\n */\n if (this.cache.jobs.length > 0) {\n const jobAddressMap = {};\n Object.keys(newJobs).forEach(jobAddress => { jobAddressMap[jobAddress] = 1; });\n this.slices.forEach(slice => { if (!jobAddressMap[slice.jobAddress]) jobAddressMap[slice.jobAddress] = 1; });\n this.cache.jobs.forEach(jobAddress => {\n if (!jobAddressMap[jobAddress]) {\n this.cache.remove('job', jobAddress);\n // Remove and return the corresponding sandboxes from this.sandboxes.\n const deadSandboxes = this.sandboxes.filter(sb => sb.jobAddress === jobAddress);\n if (deadSandboxes.length > 0) {\n deadSandboxes.forEach(sandbox => { this.returnSandbox(sandbox); });\n debugging('supervisor') && console.log(`Supervisor.fetchTask: Deleting job ${jobAddress} from cache and assigned sandboxes ${deadSandboxes.map(s => s.id)}, # of sandboxes ${this.sandboxes.length}.`);\n }\n }\n });\n }\n }\n\n /**\n * Fetches a task, which contains job information and slices for sandboxes and\n * manages events related to fetching tasks so the UI can more clearly display\n * to user what is actually happening.\n * @param {number} numCores\n * @returns {Promise<void>} The requestTask request, resolve on success, rejects otherwise.\n * @emits Supervisor#fetchingTask\n * @emits Supervisor#fetchedTask\n */\n async fetchTask(numCores) {\n\n // Don't reenter\n if (this.matching || numCores <= 0) {\n // Interesting and noisy.\n debugging('supervisor') && console.log(`Supervisor.fetchTask: Do not nest work, fetch or matching slices with sandboxes: queuedSlices ${this.queuedSlices.length}, unallocatedSpace ${this.unallocatedSpace}, matching ${this.matching}, fetching ${this.isFetchingNewWork}, numCores ${numCores}`);\n return Promise.resolve();\n }\n\n //\n // Oversubscription mitigation.\n // Update when there are less available sandbox slots than numCores.\n const checkNumCores = this.numberOfAvailableSandboxSlots();\n if (numCores > checkNumCores) numCores = checkNumCores;\n if (numCores <= 0) return Promise.resolve();\n\n this.emit('fetchingTask');\n debugging('supervisor') && console.debug('supervisor: fetching task');\n const requestPayload = {\n numCores,\n coreStats: this.getStatisticsCPU(),\n numGPUs: this.defaultMaxGPUs,\n capabilities: this.capabilities,\n paymentAddress: this.paymentAddress,\n jobAddresses: this.options.jobAddresses || [], // force array; when set, only fetches slices for these jobs\n localExec: this.options.localExec,\n workerComputeGroups: this.generateWorkerComputeGroups(),\n minimumWage: dcpConfig.worker.minimumWage || this.options.minimumWage,\n readyJobs: [ /* list of jobs addresses XXXwg */ ],\n previouslyWorkedJobs: this.ringBufferofJobs.buf, //Only discrete jobs\n rejectedJobs: this.rejectedJobs,\n };\n // workers should be part of the public compute group by default\n if (!booley(dcpConfig.worker.leavePublicGroup) && !booley(this.options.leavePublicGroup))\n requestPayload.workerComputeGroups.push(constants.computeGroups.public);\n debugging('computeGroups') && console.log(`Fetching work for ${requestPayload.workerComputeGroups.length} ComputeGroups: `, requestPayload.workerComputeGroups);\n\n debugging('supervisor') && console.log(`fetchTask wants ${numCores} slice(s), unallocatedSpace ${this.unallocatedSpace}, queuedSlices ${this.queuedSlices.length}`);\n try {\n debugging('requestTask') && console.debug('fetchTask: requestPayload', requestPayload);\n\n let result = await this.taskDistributorConnection.send('requestTask', requestPayload).catch((error) => {\n debugging('supervisor') && console.error(`Unable to request task from scheduler: ${error}. Will try again on a new connection.`);\n this.taskDistributorConnection.close(error, true);\n throw error; /* caught below */\n });\n let responsePayload = result.payload; \n\n if (!result.success) {\n debugging() && console.log('Task fetch failure; request=', requestPayload);\n debugging() && console.log('Task fetch failure; response=', result.payload);\n throw new DCPError('Unable to fetch task for worker', responsePayload);\n }\n\n const sliceCount = responsePayload.body.task.length || 0;\n\n /**\n * The fetchedTask event fires when the supervisor has finished trying to\n * fetch work from the scheduler (task-manager). The data emitted is the\n * number of new slices to work on in the fetched task.\n *\n * @event Supervisor#fetchedTask\n * @type {number}\n */\n this.emit('fetchedTask', sliceCount);\n\n if (sliceCount < 1) {\n return Promise.resolve();\n }\n\n /**\n * DCP-1698 Send auth msg with tasks to worker, then validate authority of worker to send slice info back to scheduler.\n * payload structure: { owner: this.address, signature: signature, auth: messageLightWeight, body: messageBody };\n * messageLightWeight: { workerId: worker, jobSlices, schedulerId, jobCommissions }\n * messageBody: { newJobs: await getNewJobsForTask(dbScheduler, task, request), task }\n */\n const { body, ...authorizationMessage } = responsePayload;\n const { newJobs, task } = body;\n assert(newJobs); // It should not be possible to have !newJobs -- we throw on !success.\n \n /*\n * Ensure all jobs received from the scheduler are:\n * 1. If we have specified specific jobs the worker may work on, the received jobs are in the specified job list\n * 2. If we are in localExec, at most 1 unique job type was received (since localExec workers are designated for only\n * one job)\n * If the received jobs are not within these parameters, stop the worker since the scheduler cannot be trusted at that point.\n */\n if ((this.options.jobAddresses.length && !Object.keys(newJobs).every((ele) => this.options.jobAddresses.includes(ele)))\n || (this.options.localExec && Object.keys(newJobs).length > 1))\n {\n console.error(\"Worker received slices it shouldn't have. Rejecting the work and stopping.\");\n process.exit(1);\n }\n\n debugging() && console.log(`Supervisor.fetchTask: task: ${task.length}/${numCores}, jobs: ${Object.keys(newJobs).length}, authSlices: ${compressJobMap(authorizationMessage.auth.authSlices, true /* skipFirst*/, this.addressTruncationLength /* digits*/)}`);\n // Delete all jobs in the supervisorCache that are not represented in this task,\n // or in this.queuedSlices, or there is no sandbox assigned to these jobs.\n this.cleanJobCache(newJobs);\n\n for (const jobAddress of Object.keys(newJobs))\n if (!this.cache.cache.job[jobAddress])\n this.cache.store('job', jobAddress, newJobs[jobAddress]);\n\n // Memoize authMessage onto the Slice object, this should\n // follow it for its entire life in the worker.\n const tmpQueuedSlices = task.map(taskElement => new Slice(taskElement, authorizationMessage));\n\n // Make sure old stuff is up front.\n // matchSlicesWithSandboxes dequeues this.queuedSlices as follows:\n // slicesToMatch = this.queuedSlices.slice(0, numCores);\n this.slices.push(...tmpQueuedSlices);\n this.queuedSlices.push(...tmpQueuedSlices);\n \n // Populating the ring buffer based on job's discrete property \n Object.values(newJobs).forEach(job => {\n if(job.requirements.discrete && this.ringBufferofJobs.find(element => element === job.address) === undefined) {\n this.ringBufferofJobs.push(job.address);\n }\n });\n \n } catch (error) {\n this.emit('fetchTaskFailed', error);\n debugging('supervisor') && console.debug(`Supervisor.fetchTask failed!: error: ${error}`);\n }\n }\n\n /**\n * For each slice in this.queuedSlices, match with a sandbox in the following order:\n * 1. Try to find an already assigned sandbox in this.assignedSandboxes for the slice's job.\n * 2. Find a ready sandbox in this.readiedSandboxes that is unassigned.\n * 3. Ready a new sandbox and use that.\n *\n * Take great care in assuring sandboxes and slices are uniquely associated, viz.,\n * a given slice cannot be associated with multiple sandboxes and a given sandbox cannot be associated with multiple slices.\n * The lack of such uniqueness has been the root cause of several difficult bugs.\n *\n * Note: When a sandbox is paired with a slice, execution is pending and sandbox.allocated=true and\n * sandbox.slice=slice and sandbox.jobAddress=slice.jobAddress. This is what 'allocated' means.\n * Immediately upon the exit of sandbox.work, sandbox.allocated=false is set and if an exception\n * wasn't thrown, the paired slice is placed in this.assignedSandboxes.\n * Thus from the pov of supervisor, this.allocatedSandboxes is deterministic and this.workingSandboxes is not.\n * Please try to not use this.workingSandboxes. It is deprecated.\n *\n * The input is numCores, this,queuedSlices, this.assignedSandboxes and this.readiedSandboxes.\n * If there are not enough sandboxes, new readied sandboxes will be created using\n * await this.readySandboxes(...)\n * And it is this await boundary that has caused many bugs.\n * We try not to make assumptions about non-local state across the await boundary.\n *\n * @param {number} numCores - The number of available sandbox slots.\n * @param {boolean} [throwExceptions=true] - Whether to throw exceptions when checking for sanity.\n * @returns {Promise<SandboxSlice[]>} Returns SandboxSlice[], may have length zero.\n */\n async matchSlicesWithSandboxes (numCores, throwExceptions = true) {\n\n const sandboxSlices = [];\n if (this.queuedSlices.length === 0 || this.matching || numCores <= 0) {\n // Interesting and noisy.\n // debugging('supervisor') && console.log(`Supervisor.matchSlicesWithSandboxes: Do not nest work, fetch or matching slices with sandboxes: queuedSlices ${this.queuedSlices.length}, unallocatedSpace ${this.unallocatedSpace}, matching ${this.matching}, fetching ${this.isFetchingNewWork}, numCores ${numCores}`);\n return sandboxSlices;\n }\n\n //\n // Oversubscription mitigation.\n // Update when there are less available sandbox slots than numCores.\n // We cannot use this.unallocatedSpace here because its value is artificially low or zero, because in\n // this.distributedQueuedSlices we use the pseudo-mutex trick: this.acquire(howManySandboxSlotsToReserve)/this.release().\n // Note: Do not use this.numberOfCoresReserved outside of a function locked with this.acquire(howManySandboxSlotsToReserve) .\n const checkNumCores = this.numberOfCoresReserved; // # of locked sandbox slots.\n if (numCores > checkNumCores) numCores = checkNumCores;\n if (numCores <= 0) return sandboxSlices;\n\n // Don't ask for more than we have.\n if (numCores > this.queuedSlices.length)\n numCores = this.queuedSlices.length;\n\n debugging('supervisor') && console.log(`matchSlicesWithSandboxes: numCores ${numCores}, queued slices ${this.queuedSlices.length}: assigned ${this.assignedSandboxes.length}, readied ${this.readiedSandboxes.length}, unallocated ${this.unallocatedSpace}, # of sandboxes: ${this.sandboxes.length}`);\n\n if (debugging('supervisor')) {\n dumpSlicesIfNotUnique(this.queuedSlices, 'Warning: this.queuedSlices slices are not unique -- this is ok when slice is rescheduled.');\n dumpSandboxesIfNotUnique(this.readiedSandboxes, 'Warning: this.readiedSandboxes sandboxes are not unique!');\n dumpSandboxesIfNotUnique(this.assignedSandboxes, 'Warning: this.assignedSandboxes sandboxes are not unique!');\n }\n\n // Three functions to validate slice and sandbox.\n function checkSlice(slice, checkAllocated=true) {\n if (!slice.isUnassigned) throw new DCPError(`Slice must be unassigned: ${slice.identifier}`);\n if (checkAllocated && slice.allocated) throw new DCPError(`Slice must not already be allocated: ${slice.identifier}`);\n }\n function checkSandbox(sandbox, isAssigned) {\n if (sandbox.allocated) throw new DCPError(`Assigned sandbox must not be already allocated: ${sandbox.identifier}`);\n if (isAssigned && !sandbox.isAssigned) throw new DCPError(`Assigned sandbox is not marked as assigned: ${sandbox.identifier}`);\n if (!isAssigned && !sandbox.isReadyForAssign) throw new DCPError(`Readied sandbox is not marked as ready for assign: ${sandbox.identifier}`);\n }\n\n // Sanity checks.\n if (throwExceptions) {\n this.assignedSandboxes.forEach(sandbox => { checkSandbox(sandbox, true /* isAssigned*/); });\n this.readiedSandboxes.forEach(sandbox => { checkSandbox(sandbox, false /* isAssigned*/); });\n this.queuedSlices.forEach(slice => { checkSlice(slice); });\n } else {\n this.assignedSandboxes = this.assignedSandboxes.filter(sandbox => !sandbox.allocated && sandbox.isAssigned);\n this.readiedSandboxes = this.readiedSandboxes.filter(sandbox => !sandbox.allocated && sandbox.isReadyForAssign);\n this.queuedSlices = this.queuedSlices.filter(slice => !slice.allocated && slice.isUnassigned);\n }\n\n const sandboxKind = {\n assigned: 0,\n ready: 1,\n new: 2,\n };\n\n const ceci = this;\n /**\n * Auxiliary function to pair a sandbox with a slice and mark the sandbox as allocated.\n * An allocated sandbox is reserved and will not be released until the slice completes execution on the sandbox.\n *\n * @param {Sandbox} sandbox\n * @param {Slice} slice\n * @param {number} kind\n */\n function pair(sandbox, slice, kind) {\n checkSandbox(sandbox, kind === sandboxKind.assigned);\n checkSlice(slice, kind === sandboxKind.assigned);\n slice.allocated = true;\n sandbox.allocated = true;\n sandbox.jobAddress = slice.jobAddress; // So we can know which jobs to not delete from this.cache .\n sandbox.slice = slice;\n sandboxSlices.push({ sandbox, slice });\n if (Supervisor.sliceTiming) slice['pairingDelta'] = Date.now();\n if (debugging('supervisor')) {\n let fragment = 'New readied';\n if (kind === sandboxKind.assigned) fragment = 'Assigned';\n else if (kind === sandboxKind.ready) fragment = 'Readied';\n console.log(`matchSlicesWithSandboxes.pair: ${fragment} sandbox matched ${ceci.dumpSandboxAndSlice(sandbox, slice)}`);\n }\n }\n\n // These three arrays are used to track/store slices and sandboxes,\n // so that when an exception occurs, the following arrays are restored:\n // this.queuedSlices, this.assignedSandboxes, this.realizedSandboxes.\n let slicesToMatch = [];\n let trackAssignedSandboxes = [];\n let trackReadiedSandboxes = [];\n try\n {\n this.matching = true;\n\n let assignedCounter = 0; // How many assigned sandboxes are being used.\n let readyCounter = 0; // How many sandboxes used from the existing this.readiedSandboxes.\n let newCounter = 0; // How many sandboxes that needed to be newly created.\n\n //\n // The Ideas:\n // 1) We match each slice with a sandbox. First we match with assigned sandboxes in the order\n // that they appear in this.queuedSlices. Then we match in-order with existing this.readiedSandboxes\n // Then we match in-order with new new readied sandboxes created through\n // await this.readySandboxes(newCounter, true /* allocateLocalSandboxes*/);\n // This allows us to try different orderings of execution of slices. E.g. Wes suggested\n // trying to execute slices from different jobs with maximal job diversity -- specifically\n // if there are 3 jobs j1,j2,j3, with slices s11, s12 from j1, s21, s22, s23 from j2 and\n // s31, s32 from j3, then we try to schedule, in order s11, s21, s31, s12, s22, s32, s23.\n //\n // 2) Before matching slices with sandboxes, we allocate available assigned and readied sandboxes\n // and if more are needed then we create and allocate new ones.\n //\n // 3) Finally we match slices with sandboxes and return an array of sandboxSlice pairs.\n //\n // Note: The ordering of sandboxSlices only partially corresponds to the order of this.queuedSlices.\n // It's easy to do. When pairing with assigned sandboxes, any slice in this.queuedSlices which doesn't\n // have an assigned sandbox, will add null to the sandboxSlices array. Then when pairing with readied sandboxes,\n // we fill-in the null entries in the sandboxSlices array.\n //\n /** XXXpfr @todo When it is needed, fix the ordering as described above. */\n\n // Get the slices that are being matched.\n slicesToMatch = this.queuedSlices.slice(0, numCores);\n this.queuedSlices = this.queuedSlices.slice(numCores);\n\n debugging('supervisor') && console.log(`matchSlicesWithSandboxes: slicesToMatch ${this.dumpSlices(slicesToMatch)}`);\n\n // Create object map: jobAddress -> sandboxes with sandboxes.jobAddress === jobAddress .\n const jobSandboxMap = toJobMap(this.assignedSandboxes, sandbox => sandbox);\n \n // Create array to hold slices which do not have assigned sandboxes.\n // These slices will need to be paired with existing and possibly new readied sandboxes.\n // Specifically, the sandboxes from existing this.readiedSandboxes and new sandboxes\n // created through await this.readySandboxes(newCounter, true /* allocateLocalSandboxes*/);\n const slicesThatNeedSandboxes = [];\n\n // Pair assigned sandboxes with slices.\n for (const slice of slicesToMatch) {\n const assigned = jobSandboxMap[slice.jobAddress];\n if (assigned && assigned.length > 0) {\n // Pair.\n const sandbox = assigned.pop();\n pair(sandbox, slice, sandboxKind.assigned);\n this.removeElement(this.assignedSandboxes, sandbox);\n // Track.\n trackAssignedSandboxes.push(sandbox);\n assignedCounter++;\n } else {\n // Don't lose track of these slices.\n slice.allocated = true;\n slicesThatNeedSandboxes.push(slice);\n }\n }\n\n // Pair readied sandboxes with slices.\n readyCounter = Math.min(slicesThatNeedSandboxes.length, this.readiedSandboxes.length);\n newCounter = slicesThatNeedSandboxes.length - readyCounter;\n // Track.\n trackReadiedSandboxes = this.readiedSandboxes.slice(0, readyCounter);\n this.readiedSandboxes = this.readiedSandboxes.slice(readyCounter);\n for (const sandbox of trackReadiedSandboxes) {\n // Pair.\n const slice = slicesThatNeedSandboxes.pop();\n pair(sandbox, slice, sandboxKind.ready);\n }\n \n debugging('supervisor') && console.log(`matchSlicesWithSandboxes: assignedCounter ${assignedCounter}, readyCounter ${readyCounter}, newCounter ${newCounter}, numCores ${numCores}`)\n\n // Validate algorithm consistency.\n if (Supervisor.debugBuild && assignedCounter + readyCounter + newCounter !== numCores) {\n // Structured assert.\n throw new DCPError(`matchSlicesWithSandboxes: Algorithm is corrupt ${assignedCounter} + ${readyCounter} + ${newCounter} !== ${numCores}`);\n }\n\n // Here is an await boundary.\n // Accessing non-local data across an await boundary may result in the unexpected.\n\n // Create new readied sandboxes to associate with slicesThatNeedSandboxes.\n if (newCounter > 0) {\n // When allocateLocalSandboxes is true, this.readySandboxes does not place the new sandboxes\n // on this.readiedSandboxes. Hence the new sandboxes are private and nobody else can see them.\n debugging('supervisor') && console.log(`matchSlicesWithSandboxes: creating ${newCounter} new sandboxes, # of sandboxes ${this.sandboxes.length}`);\n const readied = await this.readySandboxes(newCounter, true /* allocateLocalSandboxes*/);\n // Track.\n trackReadiedSandboxes.push(...readied);\n\n for (const sandbox of readied) {\n assert(slicesThatNeedSandboxes.length > 0);\n // Pair\n const slice = slicesThatNeedSandboxes.pop();\n pair(sandbox, slice, sandboxKind.new);\n }\n \n // Put back any extras. There should not be any unless readySandboxes returned less than asked for.\n if (slicesThatNeedSandboxes.length > 0) {\n slicesThatNeedSandboxes.forEach(slice => {\n slice.allocated = false;\n this.queuedSlices.push(slice);\n });\n }\n }\n\n if ( false || debugging()) {\n console.log(`matchSlicesWithSandboxes: Matches: ${ this.dumpSandboxSlices(sandboxSlices) }`);\n this.dumpSandboxSlicesIfNotUnique(sandboxSlices, 'Warning: sandboxSlices; { sandbox, slice } pairs are not unique!');\n }\n } catch (e) {\n // Clear allocations.\n slicesToMatch.forEach(slice => { slice.allocated = false; });\n trackAssignedSandboxes.forEach(sandbox => { sandbox.allocated = false; sandbox.slice = null; });\n trackReadiedSandboxes.forEach(sandbox => { sandbox.allocated = false; sandbox.slice = null; sandbox.jobAddress = null; });\n \n // Filter out redundancies -- there shouldn't be any...\n slicesToMatch = slicesToMatch.filter(slice => this.queuedSlices.indexOf(slice) === -1);\n trackAssignedSandboxes = trackAssignedSandboxes.filter(sb => this.assignedSandboxes.indexOf(sb) === -1);\n trackReadiedSandboxes = trackReadiedSandboxes.filter(sb => this.readiedSandboxes.indexOf(sb) === -1);\n\n // Sanity checks.\n slicesToMatch.forEach(slice => { checkSlice(slice) });\n trackAssignedSandboxes.forEach(sandbox => { checkSandbox(sandbox, true /* isAssigned*/); });\n trackReadiedSandboxes.forEach(sandbox => { checkSandbox(sandbox, false /* isAssigned*/); });\n\n // Restore arrays.\n this.queuedSlices.push(...slicesToMatch);\n this.assignedSandboxes.push(...trackAssignedSandboxes);\n this.readiedSandboxes.push(...trackReadiedSandboxes);\n \n console.error('Error in matchSlicesWithSandboxes: Attempting to recover slices and sandboxes.', e);\n return [];\n } finally {\n this.matching = false;\n }\n\n debugging('supervisor') && console.log(`matchSlicesWithSandboxes: allocated ${sandboxSlices.length} sandboxes, queuedSlices ${this.queuedSlices.length}, unallocatedSpace ${this.unallocatedSpace}, matching ${this.matching}, fetching ${this.isFetchingNewWork}, # of sandboxes: ${this.sandboxes.length}.`);\n\n return sandboxSlices;\n }\n\n disassociateSandboxAndSlice(sandbox, slice) {\n this.returnSandbox(sandbox);\n sandbox.slice = null;\n this.returnSlice(slice);\n }\n\n /**\n * This method will call this.startSandboxWork(sandbox, slice) for each element { sandbox, slice }\n * of the array returned by this.matchSlicesWithSandboxes(availableSandboxes) until all allocated sandboxes\n * are working. It is possible for a sandbox to interleave with calling distributeQueuedSlices and leave a sandbox\n * that is not working. Moreover, this.queuedSlices may be exhausted before all sandboxes are working.\n * @returns {Promise<void>}\n */\n async distributeQueuedSlices () {\n const numCores = this.unallocatedSpace;\n\n // If there's nothing there, or we're reentering, bail out.\n if (this.queuedSlices.length === 0 || numCores <= 0 || this.matching) {\n // Interesting and noisy.\n // debugging('supervisor') && console.log(`Supervisor.distributeQueuedSlices: Do not nest work, fetch or matching slices with sandboxes: queuedSlices ${this.queuedSlices.length}, matching ${this.matching}, fetching ${this.isFetchingNewWork}, numCores ${numCores}`);\n return Promise.resolve();\n }\n\n //\n // Use the pseudo-mutex to prevent uncontrolled interleaving with fetchTask,\n // matchSlicesWithSandboxes and distributeQueuedSlices\n let sandboxSlices;\n this.acquire(numCores);\n try {\n sandboxSlices = await this.matchSlicesWithSandboxes(numCores);\n } finally {\n this.release();\n }\n\n debugging('supervisor') && console.log(`distributeQueuedSlices: ${sandboxSlices.length} sandboxSlices ${this.dumpSandboxSlices(sandboxSlices)}, matching ${this.matching}, fetching ${this.isFetchingNewWork}`);\n\n for (let sandboxSlice of sandboxSlices) {\n\n const { sandbox, slice } = sandboxSlice;\n try {\n if (sandbox.isReadyForAssign) {\n try {\n let timeoutMs = Math.floor(Math.min(+Supervisor.lastAssignFailTimerMs || 0, 10 * 60 * 1000 /* 10m */));\n await a$sleepMs(timeoutMs);\n await this.assignJobToSandbox(sandbox, slice.jobAddress);\n } catch (e) {\n console.error(`Supervisor.distributeQueuedSlices: Could not assign slice ${slice.identifier} to sandbox ${sandbox.identifier}.`);\n if (Supervisor.debugBuild) console.error(`...exception`, e);\n Supervisor.lastAssignFailTimerMs = Supervisor.lastAssignFailTimerMs ? +Supervisor.lastAssignFailTimerMs * 1.25 : Math.random() * 200;\n this.disassociateSandboxAndSlice(sandbox, slice);\n continue;\n }\n }\n\n if (!Supervisor.lastAssignFailTimerMs)\n Supervisor.lastAssignFailTimerMs = Math.random() * 200;\n this.startSandboxWork(sandbox, slice);\n Supervisor.lastAssignFailTimerMs = false;\n\n } catch (e) {\n // We should never get here.\n console.error(`Supervisor.distributeQueuedSlices: Failed to execute slice ${slice.identifier} in sandbox ${sandbox.identifier}.`);\n if (Supervisor.debugBuild) console.error('...exception', e);\n this.disassociateSandboxAndSlice(sandbox, slice);\n }\n }\n }\n\n /**\n *\n * @param {Sandbox} sandbox\n * @param {opaqueId} jobAddress\n * @returns {Promise<void>}\n */\n async assignJobToSandbox(sandbox, jobAddress) {\n var ceci = this;\n\n try {\n return sandbox.assign(jobAddress); // Returns Promise.\n } catch(error) {\n // return slice to scheduler, log error\n console.error('Supervisor.assignJobToSandbox: Failed to assign job to sandbox.', {\n jobAddress: jobAddress.substr(0,10),\n error,\n });\n\n ceci.returnSandbox(sandbox);\n\n throw error;\n }\n }\n\n /**\n * Handles reassigning or returning a slice that was rejected by a sandbox.\n * \n * The sandbox will be terminated by this.returnSandbox in finalizeSandboxAndSlice. In this case,\n * if the slice does not have a rejected property already, reassign the slice to a new sandbox\n * and add a rejected property to the slice to indicate it has already rejected once, then set slice = null\n * in the return SandboxSlice so that finalizeSandboxAndSlice won't return slice to scheduler.\n * \n * If the slice rejects with a reason, or has a rejected time stamp (ie. has been rejected once already)\n * then return the slice and all slices from the job to the scheduler and\n * terminate all sandboxes with that jobAddress.\n * @param {Sandbox} sandbox \n * @param {Slice} slice\n * @returns {Promise<SandboxSlice>}\n */\n async handleWorkReject(sandbox, slice, rejectReason) {\n if (!this.rejectedJobReasons[slice.jobAddress])\n this.rejectedJobReasons[slice.jobAddress] = [];\n\n this.rejectedJobReasons[slice.jobAddress].push(rejectReason); // memoize reasons\n\n // First time rejecting without a reason. Try assigning slice to a new sandbox.\n if (rejectReason === 'false' && !slice.rejected) {\n // Set rejected.\n slice.rejected = Date.now();\n // Schedule the slice for execution.\n this.scheduleSlice(slice, true /* placeInTheFrontOfTheQueue*/, false /* noDuplicateExecution*/);\n \n // Null out slice so this.returnSlice will not be called in finalizeSandboxAndSlice.\n // But we still want this.returnSandbox to terminate the sandbox.\n slice = null;\n } else { // Slice has a reason OR rejected without a reason already and got stamped.\n \n // Purge all slices and sandboxes associated with slice.jobAddress .\n this.purgeAllWork(slice.jobAddress);\n // Clear jobAddress from this.cache .\n this.cleanJobCache();\n\n //\n // this.purgeAllWork(jobAddress) terminates all sandboxes with jobAddress,\n // and it also returns to scheduler all slices with jobAddress.\n // Therefore null out slice and sandbox so finalizeSandboxAndSlice doesn't do anything.\n // \n sandbox = null;\n slice = null;\n\n // Add to array of rejected jobs.\n let rejectedJob = {\n address: slice.jobAddress,\n reasons: this.rejectedJobReasons[slice.jobAddress],\n }\n this.rejectedJobs.push(rejectedJob);\n\n // Tell everyone all about it, when allowed.\n if (dcpConfig.worker.allowConsoleAccess || Supervisor.debugBuild)\n console.warn('Supervisor.handleWorkReject: The slice ${slice.identifier} was rejected twice; slice will be returned to the scheduler.');\n console.warn('Supervisor.handleWorkReject: All slices with the same jobAddress returned to the scheduler.');\n console.warn('Supervisor.handleWorkReject: All sandboxes with the same jobAddress are terminated.');\n }\n return { sandbox, slice };\n }\n\n /**\n * Schedule the slice to be executed.\n * If slice is already executing and noDuplicateExecution is true, return the slice with reason.\n * @param {Slice} slice\n * @param {boolean} [placeInTheFrontOfTheQueue=false]\n * @param {boolean} [noDuplicateExecution=true]\n * @param {string} [reason]\n */\n scheduleSlice(slice, placeInTheFrontOfTheQueue = false, noDuplicateExecution = true, reason) {\n // When noDuplicateExecution, if slice is already executing, do nothing.\n let workingSlices = [];\n if (noDuplicateExecution)\n workingSlices = this.allocatedSlices;\n\n if (!workingSlices.indexOf(slice)) {\n // Reset slice state to allow execution.\n slice.status = SLICE_STATUS_UNASSIGNED;\n // Enqueue in the to-be-executed queue.\n if (placeInTheFrontOfTheQueue) this.queuedSlices.unshift(slice);\n else this.queuedSlices.push(slice);\n }\n }\n\n /**\n * Purge all slices and sandboxes with this jobAddress.\n * @param {address} jobAddress\n * @param {boolean} [onlyPurgeQueuedAndAllocated=false]\n */\n purgeAllWork(jobAddress, onlyPurgeQueuedAndAllocated = false) {\n // Purge all slices and sandboxes associated with jobAddress .\n const deadSandboxes = this.sandboxes.filter(sandbox => sandbox.jobAddress === jobAddress);\n\n if (deadSandboxes.length > 0) {\n debugging('supervisor') && console.log(`purgeAllWork(${this.dumpJobAddress(jobAddress)}): sandboxes purged ${deadSandboxes.map(s => s.id)}, # of sandboxes ${this.sandboxes.length}`);\n deadSandboxes.forEach(sandbox => this.returnSandbox(sandbox));\n }\n\n let deadSlices;\n if (onlyPurgeQueuedAndAllocated) {\n deadSlices = this.queuedSlices.filter(slice => slice.jobAddress === jobAddress);\n if (deadSlices.length > 0 || this.allocatedSlices.length > 0)\n debugging('supervisor') && console.log(`purgeAllWork(${this.dumpJobAddress(jobAddress)}): dead queuedSlices ${deadSlices.map(s => s.sliceNumber)}, dead allocatedSlices ${this.allocatedSlices.map(s => s.sliceNumber)}`);\n deadSlices.push(...this.allocatedSlices);\n } else {\n deadSlices = this.slices.filter(slice => slice.jobAddress === jobAddress);\n }\n\n if (deadSlices.length > 0) {\n debugging('supervisor') && console.log(`purgeAllWork(${this.dumpJobAddress(jobAddress)}): slices purged ${deadSlices.map(s => s.sliceNumber)}, # of sandboxes ${this.sandboxes.length}`);\n this.returnSlices(deadSlices);\n this.removeQueuedSlices(deadSlices);\n }\n debugging('supervisor') && console.log(`purgeAllWork(${this.dumpJobAddress(jobAddress)}): Finished: slices ${this.slices.length}, queuedSlices ${this.queuedSlices.length}, assigned ${this.assignedSandboxes.length}, readied ${this.readiedSandboxes.length}, # of sandboxes ${this.sandboxes.length}`);\n }\n\n /**\n * Gives a slice to a sandbox which begins working. Handles collecting\n * the slice result (complete/fail) from the sandbox and submitting the result to the scheduler.\n * It will also return the sandbox to @this.returnSandbox when completed so the sandbox can be re-assigned.\n *\n * @param {Sandbox} sandbox - the sandbox to give the slice\n * @param {Slice} slice - the slice to distribute\n * @returns {Promise<void>} Promise returned from sandbox.run\n */\n async startSandboxWork (sandbox, slice) {\n var startDelayMs, reason = 'unknown';\n\n try {\n slice.markAsWorking();\n } catch (e) {\n // This will occur when the same slice is distributed twice.\n // It is normal because two sandboxes could finish at the same time and be assigned the\n // same slice before the slice is marked as working.\n debugging() && console.debug('startSandboxWork: slice.markAsWorking exception:', e);\n return Promise.resolve();\n }\n\n // sandbox.requiresGPU = slice.requiresGPU;\n // if (sandbox.requiresGPU) {\n // this.GPUsAssigned++;\n // }\n\n if (Supervisor.startSandboxWork_beenCalled)\n startDelayMs = 1000 * (tuning.minSandboxStartDelay + (Math.random() * (tuning.maxSandboxStartDelay - tuning.minSandboxStartDelay)));\n else {\n startDelayMs = 1000 * tuning.minSandboxStartDelay;\n Supervisor.startSandboxWork_beenCalled = true;\n }\n\n try {\n debugging() && console.log(`startSandboxWork: Started ${this.dumpStatefulSandboxAndSlice(sandbox, slice)}, total sandbox count: ${this.sandboxes.length}, matching ${this.matching}, fetching ${this.isFetchingNewWork}`);\n if (Supervisor.sliceTiming) {\n slice['pairingDelta'] = Date.now() - slice['pairingDelta'];\n slice['executionDelta'] = Date.now();\n }\n let result;\n try {\n result = await sandbox.work(slice, startDelayMs);\n } finally {\n sandbox.allocated = false;\n slice.allocated = false;\n }\n if (Supervisor.sliceTiming) {\n slice['executionDelta'] = Date.now() - slice['executionDelta'];\n slice['resultDelta'] = Date.now();\n }\n slice.collectResult(result, true);\n // In watchdog, all sandboxes in working state, have their slice status sent to result submitter.\n // However, this can happen after the sandbox/slice has already sent results\n // to result submitter, in which case, the activeSlices table has already removed the row\n // corresponding to slice and hence is incapable of updating status.\n sandbox.changeWorkingToAssigned();\n this.assignedSandboxes.push(sandbox);\n debugging() && console.log(`startSandboxWork: Finished ${this.dumpStatefulSandboxAndSlice(sandbox, slice)}, total sandbox count: ${this.sandboxes.length}, matching ${this.matching}, fetching ${this.isFetchingNewWork}`);\n } catch(error) {\n let logLevel;\n\n if (error instanceof SandboxError) {\n logLevel = 'warn';\n // The message and stack properties of error objects are not enumerable,\n // so they have to be copied into a plain object this way\n const errorResult = Object.getOwnPropertyNames(error).reduce((o, p) => {\n o[p] = error[p]; return o;\n }, { message: 'Unexpected worker error' });\n slice.collectResult(errorResult, false);\n } else {\n logLevel = 'error';\n // This error was unrelated to the work being done, so just return the slice in the finally block.\n // For extra safety the sandbox is terminated.\n slice.result = null;\n slice.status = SLICE_STATUS_FAILED; /** XXXpfr @todo terminating sandbox? */\n }\n\n let errorString;\n switch (error.errorCode) {\n case 'ENOPROGRESS':\n reason = 'ENOPROGRESS';\n errorString = 'Supervisor.startSandboxWork - No progress error in sandbox.\\n';\n break;\n case 'ESLICETOOSLOW':\n reason = 'ESLICETOOSLOW';\n errorString = 'Supervisor.startSandboxWork - Slice too slow error in sandbox.\\n';\n break;\n case 'EUNCAUGHT':\n reason = 'EUNCAUGHT';\n errorString = `Supervisor.startSandboxWork - Uncaught error in sandbox ${error.message}.\\n`;\n break;\n case 'EFETCH':\n // reason = 'EFETCH'; The status.js processing cannot handle 'EFETCH'\n reason = 'unknown';\n errorString = `Supervisor.startSandboxWork - Could not fetch data: ${error.message}.\\n`;\n break;\n }\n\n if (error.name === 'EWORKREJECT') {\n error.stack = 'Sandbox was terminated by work.reject()';\n const ss = await this.handleWorkReject(sandbox, slice, error.message);\n sandbox = ss.sandbox; slice = ss.slice;\n }\n\n const errorObject = {\n jobAddress: slice.jobAddress.substr(0,10),\n sliceNumber: slice.sliceNumber,\n sandbox: sandbox.id,\n jobName: sandbox.public ? sandbox.public.name : 'unnamed',\n };\n\n // Always display max info under debug builds, otherwise maximal error\n // messages are displayed to the worker, only if both worker and client agree.\n let workerConsole = sandbox.supervisorCache.cache.job[slice.jobAddress].workerConsole;\n const displayMaxInfo = Supervisor.debugBuild || (workerConsole && dcpConfig.worker.allowConsoleAccess);\n\n if (!displayMaxInfo && errorString) {\n console[logLevel](errorString, errorObject);\n } else if (!displayMaxInfo && error.name === 'EWORKREJECT') {\n console[logLevel](`Supervisor.startSandboxWork - Sandbox rejected work: ${error.message}`)\n } else {\n if (displayMaxInfo)\n errorObject.stack += '\\n --------------------\\n' + (error.stack.split('\\n').slice(1).join('\\n'));\n console[logLevel](`Supervisor.startSandboxWork - Sandbox failed: ${error.message}\\n`, errorObject);\n }\n } finally {\n await this.finalizeSandboxAndSlice(sandbox, slice, reason);\n }\n }\n\n /**\n * If slice && slice.result, then call await this.recordResult(slice) and this.returnSandbox(sandbox, slice) will have no effect.\n * If slice && !slice.result, then call this.returnSlice(slice, reason) and then this.returnSandbox(sandbox, slice) which terminates sandbox.\n * If !slice && sandbox, then terminate the sandbox with this.returnSandbox(sandbox, slice) .\n * If !slice && !sandbox, then do nothing.\n * @param {Sandbox} [sandbox]\n * @param {Slice} [slice]\n * @param {string} [reason]\n */\n async finalizeSandboxAndSlice(sandbox, slice, reason) {\n debugging('supervisor') && console.log(`finalizeSandboxAndSlice: sandbox ${sandbox ? sandbox.identifier : 'nade'}, slice ${slice ? slice.identifier : 'nade'}`);\n if (slice) {\n if (slice.result) await this.recordResult(slice);\n else this.returnSlice(slice, reason);\n }\n // It is possible that sandbox is already terminated\n // Because sandbox.allocated=false as soon as sandbox.work(...) completes.\n // But the await at or in finalizeSandboxAndSlice may allow pruneSandboxes to slither in.\n if (sandbox) this.returnSandbox(sandbox, slice, false /* verifySandboxIsNotTerminated*/);\n }\n\n /**\n * Terminates sandboxes and returns slices.\n * Sets the working flag to false, call @this.work to start working again.\n * \n * If forceTerminate is true: Terminates all sandboxes and returns all slices.\n * If forceTerminate is false: Terminates non-allocated sandboxes and returns queued slices.\n *\n * @param {boolean} [forceTerminate = true] - true if you want to stop the sandboxes from completing their current slice.\n * @returns {Promise<void>}\n */\n async stopWork (forceTerminate = true) {\n debugging('supervisor') && console.log('stopWork(${forceTerminate}): terminating sandboxes and returning slices to scheduler.');\n if (forceTerminate) {\n while (this.sandboxes.length) {\n this.returnSandbox(this.sandboxes[0], null, false);\n }\n\n await this.returnSlices(this.slices).then(() => {\n this.queuedSlices.length = 0;\n });\n } else {\n // Only terminate idle sandboxes and return only queued slices\n let idleSandboxes = this.sandboxes.filter(w => !w.allocated);\n for (const sandbox of idleSandboxes) {\n this.returnSandbox(sandbox, null, false /* verifySandboxIsNotTerminated*/);\n }\n\n await this.returnSlices(this.queuedSlices).then(() => {\n this.queuedSlices.length = 0;\n });\n\n await new Promise((resolve, reject) => {\n let sandboxesRemaining = this.allocatedSandboxes.length;\n if (sandboxesRemaining === 0)\n {\n resolve();\n }\n // Resolve and finish work once all sandboxes have finished submitting their results.\n this.on('submitFinished', () => {\n sandboxesRemaining--;\n if (sandboxesRemaining === 0)\n {\n console.log('All sandboxes empty, stopping worker and closing all connections');\n resolve();\n }\n });\n });\n }\n\n if (this.resultSubmitterConnection) {\n this.resultSubmitterConnection.off('close', this.openResultSubmitterConn);\n this.resultSubmitterConnection.close();\n this.resultSubmitterConnection = null;\n }\n\n if (this.taskDistributorConnection) {\n this.taskDistributorConnection.off('close', this.openTaskDistributorConn);\n this.taskDistributorConnection.close();\n this.taskDistributorConnection = null;\n }\n\n if (this.packageManagerConnection) {\n this.packageManagerConnection.off('close', this.openPackageManagerConn);\n this.packageManagerConnection.close();\n this.packageManagerConnection = null;\n }\n\n if (this.eventRouterConnection) {\n this.eventRouterConnection.off('close', this.openEventRouterConn);\n this.eventRouterConnection.close();\n this.eventRouterConnection = null;\n }\n\n this.emit('stop');\n }\n\n /**\n * Takes a slice and returns it to the scheduler to be redistributed.\n * Usually called when an exception is thrown by sandbox.work(slice, startDelayMs) .\n * Or when the supervisor tells it to forcibly stop working.\n *\n * @param {Slice} slice - The slice to return to the scheduler.\n * @param {string} [reason] - Optional reason for the return: 'ENOPROGRESS', 'EUNCAUGHT', 'ESLICETOOSLOW', 'unknown'.\n * @returns {Promise<*>} - Response from the scheduler.\n */\n returnSlice (slice, reason) {\n // When sliceNumber === 0 don't send a status message.\n if (slice.sliceNumber === 0) return Promise.resolve();\n \n debugging() && console.log(`Supervisor.returnSlice: Returning slice ${slice.identifier} with reason ${reason}.`);\n \n const payload = slice.getReturnMessagePayload(this.workerOpaqueId, reason);\n return this.resultSubmitterConnection.send('status', payload)\n .then(response => {\n return response;\n }).catch(error => {\n debugging('supervisor') && console.error('Failed to return slice', {\n sliceNumber: slice.sliceNumber,\n jobAddress: slice.jobAddress,\n status: slice.status,\n error,\n }, 'Will try again on a new connection.');\n this.resultSubmitterMessageQueue.push({ operation: 'status', data: payload });\n this.resultSubmitterConnection.close();\n });\n }\n\n /**\n * Bulk-return multiple slices, possibly for assorted jobs.\n * Returns slices to the scheduler to be redistributed.\n * Called in the sandbox terminate handler and purgeAllWork(jobAddress)\n * and stopWork(forceTerminate).\n *\n * @param {Slice[]} slices - The slices to return to the scheduler.\n * @returns {Promise<void>} - Response from the scheduler.\n */\n async returnSlices(slices) {\n if (!slices || !slices.length) return Promise.resolve();\n \n const slicePayload = [];\n slices.forEach(slice => { addToReturnSlicePayload(slicePayload, slice); });\n this.removeSlices(slices);\n\n debugging('supervisor') && console.log(`Supervisor.returnSlices: Returning slices ${this.dumpSlices(slices)}.`);\n\n return this.resultSubmitterConnection.send('status', {\n worker: this.workerOpaqueId,\n slices: slicePayload,\n }).then(response => {\n return response;\n }).catch(error => {\n const errorInfo = slices.map(slice => slice.identifier);\n debugging('supervisor') && console.error('Failed to return slice(s)', { errorInfo, error }, 'Will try again on new connection.');\n this.resultSubmitterMessageQueue.push({ operation: 'status', data: { worker: this.workerOpaqueId, slices: slicePayload } })\n this.resultSubmitterConnection.close();\n // Just in case the caller is expecing a DCP response\n return { success: false, payload: {} };\n });\n }\n\n /**\n * Submits the slice results to the scheduler, either to the\n * work submit or fail endpoints based on the slice status.\n * Then remove the slice from the @this.slices cache.\n *\n * @param {Slice} slice - The slice to submit.\n * @returns {Promise<void>}\n */\n async recordResult (slice) {\n // It is possible for slice.result to be undefined when there are upstream errors.\n if ( !(slice && slice.result))\n throw new Error(`recordResult: slice.result is undefined for slice ${slice.identifier}. This is ok when there are upstream errors.`);\n\n debugging('supervisor') && console.log(`supervisor: recording result for slice ${slice.identifier}.`);\n\n const jobAddress = slice.jobAddress;\n const sliceNumber = slice.sliceNumber;\n const authorizationMessage = slice.getAuthorizationMessage();\n\n /* @see result-submitter::result for full message details */\n const metrics = { GPUTime: 0, CPUTime: 0, CPUDensity: 0, GPUDensity: 0 };\n const payloadData = {\n slice: sliceNumber,\n job: jobAddress,\n worker: this.workerOpaqueId,\n paymentAddress: this.paymentAddress,\n metrics,\n authorizationMessage,\n }\n \n const timeReport = slice.timeReport;\n if (timeReport && timeReport.total > 0) {\n metrics.GPUTime = timeReport.webGL;\n metrics.CPUTime = timeReport.CPU;\n metrics.CPUDensity = metrics.CPUTime / timeReport.total;\n metrics.GPUDensity = metrics.GPUTime / timeReport.total;\n metrics.CPUTime = 1 + Math.floor(metrics.CPUTime);\n metrics.GPUTime = 1 + Math.floor(metrics.GPUTime);\n }\n \n this.emit('submittingResult');\n\n if (!slice.isFinished)\n throw new Error('Cannot record result for slice that is not finished');\n\n if (slice.resultStorageType === 'pattern') { /* This is a remote-storage slice. */\n const remoteResult = await this.sendResultToRemote(slice);\n payloadData.result = encodeDataURI(JSON.stringify(remoteResult));\n } else {\n payloadData.result = encodeDataURI(slice.result.result); /* XXXwg - result.result is awful */\n }\n debugging('supervisor') && console.log('Supervisor.recordResult: payloadData.result', payloadData.result.slice(0, 512));\n\n try {\n if (slice.completed) {\n\n /* work function returned a result */\n let resp = await this.resultSubmitterConnection.send(\n 'result',\n payloadData,\n ).catch((error) => {\n debugging('supervisor') && console.error(`Failed to submit result to scheduler for slice ${payloadData.slice} of job ${payloadData.job}:\\n ${error} \\nWill try again on new connection.`);\n this.resultSubmitterMessageQueue.push({ operation: 'result', data: payloadData });\n this.resultSubmitterConnection.close();\n error.handled = true;\n throw error; /* Caught in catch block below */\n });\n \n if (!resp.success)\n throw resp.payload;\n\n if (false) {}\n\n const receipt = {\n accepted: true,\n payment: resp.payload.slicePaymentAmount,\n };\n this.emit('submittedResult', resp.payload);\n this.emit('dccCredit', receipt);\n } else {\n /* slice did not complete for some reason */\n \n // If the slice from a job never completes and the job address exists in the ringBufferofJobs, \n // then we remove it to allow for another slice (from the same job) to be obtained by fetchTask\n this.ringBufferofJobs.buf = this.ringBufferofJobs.filter(element => element !== jobAddress);\n \n await this.returnSlice(slice);\n }\n } catch(error) {\n !error.handled && console.info(`1014: Failed to submit results for slice ${payloadData.slice} of job ${payloadData.job}`, error);\n this.emit('submitSliceFailed', error);\n } finally {\n this.emit('submitFinished');\n // Remove the slice from the slices array.\n this.removeSlice(slice);\n if (Supervisor.sliceTiming) {\n slice['resultDelta'] = Date.now() - slice['resultDelta'];\n console.log(`recordResult(${slice['pairingDelta']}, ${slice['executionDelta']}, ${slice['resultDelta']}): Completed slice ${slice.identifier}.`);\n } else\n debugging('supervisor') && console.log(`recordResult: Completed slice ${slice.identifier}.`);\n }\n }\n\n /**\n * Send a work function's result to a server that speaks our DCP Remote Data Server protocol.\n * The data server dcp-rds is been implemented in https://gitlab.com/Distributed-Compute-Protocol/dcp-rds .\n *\n * @param {Slice} slice - Slice object whose result we are sending.\n * @returns {Promise<object>} - Object of the form { success: true, href: 'http://127.0.0.1:3521/methods/download/jobs/34/result/10' } .\n * @throws When HTTP status not in the 2xx range.\n */\n async sendResultToRemote(slice) {\n const postParams = {\n ...slice.resultStorageParams\n };\n\n const sliceResultUri = makeDataURI('pattern', slice.resultStorageDetails, {\n slice: slice.sliceNumber,\n job: slice.jobAddress,\n });\n\n debugging() && console.log('sendResultToRemote sliceResultUri: ', sliceResultUri);\n\n const url = new DcpURL(sliceResultUri);\n\n // Note: sendResultToRemote was made a member function of class Supervisor to enable access to this.alowedOrigins .\n if (this.allowedOrigins.indexOf(url.origin) === -1 &&\n dcpConfig.worker.allowOrigins.sendResults.indexOf(url.origin) === -1) {\n throw new Error(`Invalid origin for remote result storage: '${url.origin}'`);\n }\n\n postParams.element = slice.sliceNumber;\n postParams.contentType = 'application/json'; // Currently data will be outputed as a JSON object, @todo: Support file upload.\n\n debugging() && console.log('sendResultToRemote: postParams: ', postParams);\n\n let result = slice.result.result;\n if (result) {\n postParams.content = JSON.stringify(result);\n } else {\n postParams.error = JSON.stringify(slice.error);\n }\n\n debugging('supervisor') && console.log('sendResultToRemote: content: ', (result ? postParams.content : postParams.error).slice(0, 512));\n\n //\n // Notes:\n // 1) In recordResults the response from justFetch is JSON serialized and encodeDataURI is called.\n // payloadData.result = await this.sendResultToRemote(slice);\n // payloadData.result = encodeDataURI(JSON.stringify(payloadData.result));\n // 2) We do further processing after the call to sendResultToRemote in recordResult, because\n // if we did it here there would be a perf hit. When the return value is a promise, it gets\n // folded into sendResultToRemote's main promise. If justFetch's promise wasn't a return value then\n // justFetch would be separately added to the micro-task-queue.\n return await justFetch(url, 'JSON', 'POST', false, postParams);\n }\n}\n\n/**\n * Sandbox has had an error which is not from the work function: kill it\n * and try to redo the slice.\n */\nfunction handleSandboxError(supervisor, sandbox, error) {\n const slice = sandbox.slice;\n\n slice.sandboxErrorCount = (slice.sandboxErrorCount || 0) + 1;\n sandbox.slice = null;\n supervisor.returnSandbox(sandbox); /* terminate the sandbox */\n slice.status = SLICE_STATUS_UNASSIGNED; /* ToT */\n console.warn(`Supervisor.handleSandboxError: Sandbox ${sandbox.identifier}...(${sandbox.public.name}/${slice.sandboxErrorCount}) with slice ${slice.identifier} had error.`, error);\n\n if (slice.sandboxErrorCount < dcpConfig.worker.maxSandboxErrorsPerSlice)\n supervisor.queuedSlices.push(slice);\n else {\n slice.error = error;\n supervisor.returnSlice(slice);\n }\n}\n\n/**\n * Add a slice to the slice payload being built. If a sliceList already exists for the\n * job-status-authMessage tuple, then the slice will be added to that, otherwise a new\n * sliceList will be added to the payload.\n *\n * @param {Object[]} slicePayload - Slice payload being built. Will be mutated in place.\n * @param {Slice} slice - The slice.\n * @param {String} status - Status update, eg. progress or scheduled.\n *\n * @returns {Object[]} mutated slicePayload array\n */\nfunction addToSlicePayload(slicePayload, slice, status) {\n // getAuthorizationMessage helps enforces the equivalence\n // !authorizationMessage <==> sliceNumber === 0\n const authorizationMessage = slice.getAuthorizationMessage();\n if (!authorizationMessage) return;\n\n // Try to find a sliceList in the payload which matches the job, status, and auth message\n let sliceList = slicePayload.find(desc => {\n return desc.job === slice.jobAddress\n && desc.status === status\n && desc.authorizationMessage === authorizationMessage;\n });\n\n // If we didn't find a sliceList, start a new one and add it to the payload\n if (!sliceList) {\n sliceList = {\n job: slice.jobAddress,\n sliceNumbers: [],\n status,\n authorizationMessage,\n };\n slicePayload.push(sliceList);\n }\n\n sliceList.sliceNumbers.push(slice.sliceNumber);\n\n return slicePayload;\n}\n\n/**\n * Add a slice to the returnSlice payload being built. If a sliceList already exists for the\n * job-isEstimation-authMessage-reason tuple, then the slice will be added to that, otherwise a new\n * sliceList will be added to the payload.\n *\n * @param {Object[]} slicePayload - Slice payload being built. Will be mutated in place.\n * @param {Slice} slice - The slice.\n * @param {String} [reason] - Optional reason to further characterize status; e.g. 'ENOPROGRESS', 'EUNCAUGHT', 'ESLICETOOSLOW', 'unknown'.\n *\n * @returns {Object[]} mutated slicePayload array\n */\nfunction addToReturnSlicePayload(slicePayload, slice, reason) {\n // getAuthorizationMessage helps enforces the equivalence\n // !authorizationMessage <==> sliceNumber === 0\n const authorizationMessage = slice.getAuthorizationMessage();\n if (!authorizationMessage) return;\n\n if (!reason) reason = slice.error ? 'EUNCAUGHT' : 'unknown';\n\n // Try to find a sliceList in the payload which matches the job, status, and auth message\n let sliceList = slicePayload.find(desc => {\n return desc.job === slice.jobAddress\n && desc.isEstimationSlice === slice.isEstimationSlice\n && desc.authorizationMessage === authorizationMessage\n && desc.reason === reason;\n });\n\n // If we didn't find a sliceList, start a new one and add it to the payload\n if (!sliceList) {\n sliceList = {\n job: slice.jobAddress,\n sliceNumbers: [],\n status: 'return',\n isEstimationSlice: slice.isEstimationSlice,\n authorizationMessage,\n reason,\n };\n slicePayload.push(sliceList);\n }\n\n sliceList.sliceNumbers.push(slice.sliceNumber);\n\n return slicePayload;\n}\n\n/**\n * Return DCPv4-specific connection options, composed of type-specific, URL-specific, \n * and worker-specific options, any/all of which can override the dcpConfig.dcp.connectOptions.\n * The order of precedence is the order of specificity.\n */\nfunction connectionOptions(url, label) {\n return leafMerge(/* ordered from most to least specific */\n dcpConfig.worker.dcp.connectionOptions.default,\n dcpConfig.worker.dcp.connectionOptions[label],\n dcpConfig.worker.dcp.connectionOptions[url.href]);\n}\n\n/** @type {number | boolean} */\nSupervisor.lastAssignFailTimerMs = false;\n/** @type {boolean} */\nSupervisor.startSandboxWork_beenCalled = false;\n/** @type {boolean} */\nSupervisor.debugBuild = ((__webpack_require__(/*! dcp/common/dcp-build */ \"./src/common/dcp-build.js\").build) === 'debug');\n/**\n * When Supervisor.sliceTiming is set to be true, it displays the timings of a every slice\n * slice['pairingDelta'] = timespan of when slice is paired with sandbox until execution starts\n * slice['executionDelta'] = timespan of execution in sandbox\n * slice['resultDelta'] = timespan of when sandbox finishes executing until recordResult completes.\n * @type {boolean}\n */\nSupervisor.sliceTiming = false;\n\nexports.Supervisor = Supervisor;\n\n\n//# sourceURL=webpack://dcp/./src/dcp-client/worker/supervisor.js?");
4450
4450
 
4451
4451
  /***/ }),
4452
4452
 
@@ -4531,7 +4531,7 @@ eval("/**\n * @file protocol/connection/message.js\n * @author Ryan
4531
4531
  /***/ ((__unused_webpack_module, exports, __webpack_require__) => {
4532
4532
 
4533
4533
  "use strict";
4534
- eval("/* provided dependency */ var process = __webpack_require__(/*! ./node_modules/process/browser.js */ \"./node_modules/process/browser.js\");\n/**\n * @file protocol/connection/connection.js\n * @author Ryan Rossiter\n * @author KC Erb\n * @author Wes Garland\n * @date January 2020, Feb 2021, Mar 2022\n *\n * A Connection object represents a connection to another DCP entity. \n * A DCP connection may 'live' longer than the underlying protocol's connection,\n * and the underlying protocol connection (or, indeed, protocol) may change\n * throughout the life of the DCP connection.\n * \n * DCP connections are uniquely identified by the DCP Session ID, specified by\n * the dcpsid property, present in every message body. This session id negotiated during connection,\n * with the initiator and target each providing half of the string.\n *\n * Connection instance events:\n * - session: dcpsid new session established\n * - connect: url UI hint: internet available\n * - disconnect: url UI hint: internet not available\n * - readyStateChange: *** DO NOT USE **\n * - error: error emitted when errors happen that would otherwise go uncaught\n * - close: connection instance is closing\n * - end: Connection instance is closed\n * - send: msgObj when a message is sent to the peer; does not wait for ack; may re-trigger on reconnect\n * - ready: when the connection is ready for traffic (constructor promises resolved)\n *\n * State Transition Diagram for Connection.state:\n *\n * initial connecting established disconnected close-wait closing closed\n * ===========================================================================================================================\n * |-- i:connect ---->\n * |-- t:accept ----->\n * |-- t:establishTarget -->\n * |-- i:connect ---------->\n * |-- transportDisconnectHandler -->\n * <-- i:_reconnect ----------------------------------------|\n * |-i:useNewTransport-->\n * <-- t:useNewTransport --------|\n * |-- closeWait ----------------------------------------------------------->\n * |-- closeWait ----------------------------------->\n * |-- closeWait -->\n * |-- doClose --------------->\n * |-- close ------------------------------------------------------------------------------------------------------------> \n * |-- close ---------------------------------------------------------------------------->\n * |-- close ---------------------------------------------------->\n * |-- close ------------------->\n * |-- doClose -->\n *\n *\n * Not until the established state can we count on things like a dcpsid, \n * peerAddress, identityPromise resolution and so on.\n * \n * Error Codes relevant to DCP Connections:\n * DCPC-1001 - CONNECTION CANNOT SEND WHEN IN CLOSING, CLOSE-WAIT OR CLOSED\n * DCPC-1002 - MESSAGE OWNER IS INVALID\n * DCPC-1003 - MESSAGE SIGNATURE INVALID \n * DCPC-1004 - MESSAGE BODY IS INVALID\n * DCPC-1005 - TRYING TO ESTABLISH TARGET AFTER TARGET ALREADY ESTABLISHED\n * DCPC-1006 - CONNECTION COULD NOT BE ESTABLISHED WITHIN 30 SECONDS\n * DCPC-1007 - RECEIVED MESSAGE PAYLOAD BEFORE CONNECT OPERATION\n * DCPC-1008 - TARGET RESPONDED WITH INVALID DCPSID\n * DCPC-1009 - MESSAGE IS OF UNKNOWN TYPE\n * DCPC-1010 - DUPLICATE TRANSMISSION RECEIPT\n * DCPC-1011 - DEFAULT ERROR CODE WHEN PEER SENDS CLOSE MESSAGE\n * DCPC-1012 - TRIED TO INITIATE CONNECTION AFTER SESSION ALREADY ESTABLISHED\n * DCPC-1013 - DEFAULT ERROR CODE WHEN CLOSING WITH REASON THATS NOT INSTANCE OF DCPERROR\n * DCPC-1014 - NO TRANSPORTS AVAILABLE\n * DCPC-1015 - CANNOT CONNECT WHEN CONNECTION ALREADY CLOSED\n * DCPC-1016 - ERROR CONNECTING VIA AVAILABLE TRANSPORTS\n * DCPC-1017 - FIRST PROTOCOL MESSAGE WAS DID NOT INVOLVE INITIAL CONNECT REQUEST\n */\n\n\n\nconst debugging = (__webpack_require__(/*! dcp/debugging */ \"./src/debugging.js\").scope)('dcp');\nconst dcpEnv = __webpack_require__(/*! dcp/common/dcp-env */ \"./src/common/dcp-env.js\");\nconst { EventEmitter } = __webpack_require__(/*! dcp/common/dcp-events */ \"./src/common/dcp-events/index.js\");\nconst wallet = __webpack_require__(/*! dcp/dcp-client/wallet */ \"./src/dcp-client/wallet/index.js\");\nconst { DcpURL } = __webpack_require__(/*! dcp/common/dcp-url */ \"./src/common/dcp-url.js\");\nconst { requireNative } = __webpack_require__(/*! dcp/dcp-client/webpack-native-bridge */ \"./src/dcp-client/webpack-native-bridge.js\");\nconst { assert } = __webpack_require__(/*! dcp/common/dcp-assert */ \"./src/common/dcp-assert.js\");\nconst { leafMerge, a$sleepMs } = __webpack_require__(/*! dcp/utils */ \"./src/utils/index.js\");\nconst { Synchronizer } = __webpack_require__(/*! dcp/common/concurrency */ \"./src/common/concurrency.js\");\nconst { DCPError } = __webpack_require__(/*! dcp/common/dcp-error */ \"./src/common/dcp-error.js\");\n\nconst { Transport } = __webpack_require__(/*! ../transport */ \"./src/protocol-v4/transport/index.js\");\nconst { Sender } = __webpack_require__(/*! ./sender */ \"./src/protocol-v4/connection/sender.js\");\nconst { Receiver } = __webpack_require__(/*! ./receiver */ \"./src/protocol-v4/connection/receiver.js\");\nconst { MessageFactory } = __webpack_require__(/*! ./message-factory */ \"./src/protocol-v4/connection/message-factory.js\");\nconst { MessageLedger } = __webpack_require__(/*! ./message-ledger */ \"./src/protocol-v4/connection/message-ledger.js\");\nconst { getGlobalIdentityCache } = __webpack_require__(/*! ./identity-cache */ \"./src/protocol-v4/connection/identity-cache.js\");\nconst { makeEBOIterator, setImmediateN } = __webpack_require__(/*! dcp/common/dcp-timers */ \"./src/common/dcp-timers.js\");\n\nconst { ConnectionMessage } = __webpack_require__(/*! ./connection-message */ \"./src/protocol-v4/connection/connection-message.js\");\nconst { ConnectionRequest } = __webpack_require__(/*! ./request */ \"./src/protocol-v4/connection/request.js\");\nconst { ConnectionResponse } = __webpack_require__(/*! ./response */ \"./src/protocol-v4/connection/response.js\");\nconst { ConnectionBatch } = __webpack_require__(/*! ./batch */ \"./src/protocol-v4/connection/batch.js\");\nconst { ConnectionAck } = __webpack_require__(/*! ./ack */ \"./src/protocol-v4/connection/ack.js\");\nconst { ErrorPayloadCtorFactory } = __webpack_require__(/*! ./error-payload */ \"./src/protocol-v4/connection/error-payload.js\");\nconst { role } = __webpack_require__(/*! ./connection-constants */ \"./src/protocol-v4/connection/connection-constants.js\");\n\nconst isDebugBuild = (__webpack_require__(/*! dcp/common/dcp-build */ \"./src/common/dcp-build.js\").build) === 'debug';\n\nlet globalConnectionId = 0;\n\nconst CONNECTION_STATES = [\n 'initial',\n 'connecting', /* initiator: establish first transport instance connection; target: listening */\n 'established',\n 'disconnected', /* connection is still valid, but underlying transport is no longer connected */\n 'close-wait', /* Target of close message is in this state until response is acknowledged */\n 'closing',\n 'closed',\n]\n\nclass Connection extends EventEmitter {\n static get VERSION() {\n return '5.1.0'; // Semver format\n }\n\n static get VERSION_COMPATIBILITY() {\n return '^5.0.0'; // Semver format, can be a range\n }\n\n /**\n * Connection form 2:\n * @constructor\n * @param {object} [target]\n * @param {Promise} idKsPromise A promise which resolves to the identity keystore described\n * in form 1\n * @param {object} [options]\n * @see form 1\n */\n /**\n * Connection form 1\n * Create a DCP Connection object. This object could represent either the initiator or \n * target end of a connection, until it is specialized by either invoke the connect()\n * or accept() methods. Note that send() invokes connect() internally if not in an established\n * state.\n * @constructor\n * @note Connection objects exist for the lifetime of a given DCP connection \n * (session), whether or not the underlying transport (eg internet protocol) is connected or not. Once \n * the DCP session has ended, this object has no purpose and is not reusable.\n * \n * @param {object} target Object (usually a dcpConfig fragment) describing the target.\n * This object may contain the following properties; 'location' is\n * mandatory:\n * - location: a URL or DcpURL that is valid from the Internet\n * - friendLocation: a DcpURL that is valid from an intranet; if\n * both location and friendLocation specified, the best one will\n * be chosen by examining IP addresses (assuming an IP bearer).\n * - identity: an object with an address property which is a promise\n * that resolves to an instance of wallet.Address which represents\n * to the target's identity; this overrides the initiator's \n * identity cache unless options.strict is truey.\n * \n * @param {Keystore} [idKeystore] The keystore used to sign messages; used for non-repudiation.\n * If not specified, a dynamically-generated keystore will be used.\n * \n * @param {object} [options] Extra connection options that aren't defined via dcpConfig.dcp.connectionOptions.\n * These options include:\n * - identityUnlockTimeout: Number of (floating-point) seconds to leave the identity \n * keystore unlocked between invocations of Connection.send\n *\n * @returns instance of Connection that is specific to a target URL but not a role\n */\n constructor(target, idKeystore, options = {})\n {\n super('Connection');\n this.id = ++globalConnectionId;\n this.debugLabel = `connection(g:${this.id}):`;\n\n /* polymorphism strategy: rewrite to (configFragment, idksPromise, options) */\n if (target instanceof DcpURL)\n target = { location: target };\n else if (DcpURL.isURL(target))\n target = { location: new DcpURL(target) };\n else if (target instanceof String || typeof target === 'string')\n target = { location: new DcpURL(target) };\n assert(typeof target === 'object', target.location);\n\n /* idKeystore is always resolved by the time a session is established. */\n if (!idKeystore)\n this.identityPromise = wallet.getId();\n else if (idKeystore instanceof Promise)\n this.identityPromise = idKeystore;\n else if (idKeystore instanceof wallet.Keystore)\n this.identityPromise = Promise.resolve(idKeystore); \n else if (idKeystore instanceof wallet.Address)\n this.identityPromise = Promise.resolve(new wallet.Keystore(idKeystore, '')); \n\n this.identityPromise.then((keystore) => {\n /* This always happens by the time a role is assumed */\n delete this.identityPromise;\n this.identity = keystore;\n this.emit('ready');\n debugging('connection') && console.debug(this.debugLabel, 'identity is', this.identity.address);\n });\n\n this.target = target;\n this.url = this.target.location;\n \n // Init internal state / vars\n this.state = new Synchronizer(CONNECTION_STATES[0], CONNECTION_STATES);\n // DO NOT USE this.state.on('change', (s) => this.emit('readyStateChange', s) );\n\n this.dcpsid = null;\n this.peerAddress = null;\n this.transport = null;\n this.messageFactory = new MessageFactory(this);\n this.messageLedger = new MessageLedger(this);\n this.authorizedSender = null;\n this.preDisconnectState = null;\n \n this.Message = ConnectionMessage(this);\n this.Request = ConnectionRequest(this.Message);\n this.Response = ConnectionResponse(this.Message);\n this.Batch = ConnectionBatch(this.Message);\n this.Ack = ConnectionAck(this.Message);\n this.ErrorPayload = ErrorPayloadCtorFactory(this);\n this.connectTime = Date.now();\n\n this.receiver = new Receiver(this);\n this.sender = new Sender(this);\n \n debugging('connection') && console.debug(this.debugLabel, `new connection#${this.id}; ${this.url}`);\n\n /* Create a connection config as this.options which takes into\n * account system defaults and overrides for specific urls, origins, etc.\n */\n this.options = leafMerge(\n ({ /* hardcoded defaults insulate us from missing web config */\n 'connectTimeout': 90,\n 'lingerTimeout': 1800,\n 'allowBatch': true,\n 'maxMessagesPerBatch': 100,\n 'identityUnlockTimeout': 300,\n 'ttl': {\n 'min': 15,\n 'max': 600,\n 'default': 120\n },\n 'transports': [ 'socketio' ],\n }),\n dcpConfig.dcp.connectionOptions.default,\n this.url && dcpConfig.dcp.connectionOptions[this.url.hostname],\n this.url && dcpConfig.dcp.connectionOptions[this.url.origin],\n options\n );\n\n /* draw out errors quickly in dev */\n if ((process.env.DCP_NETWORK_CONFIG_BUILD || dcpConfig.build) === 'debug')\n {\n this.options.maxMessagesPerBatch /= 10;\n \n /* short timeouts and debuggers don't get along well */\n if (dcpEnv.platform === 'nodejs' && !(requireNative('module')._cache.niim instanceof requireNative('module').Module))\n {\n this.options.connectTimeout /= 10;\n this.options.lingerTimeout /= 20;\n this.options.identityUnlockTimeout /= 10;\n }\n }\n\n assert(this.options.identityUnlockTimeout > 0);\n assert(this.options.connectTimeout > 0);\n assert(this.options.lingerTimeout > 0);\n assert(typeof this.options.ttl.min === 'number');\n assert(typeof this.options.ttl.max === 'number');\n assert(typeof this.options.ttl.default === 'number');\n \n this.backoffTimeIterator = makeEBOIterator(500, dcpConfig.build === 'debug' ? 5000 : 60000); /** XXXwg make this configurable */\n\n this.secureLocation = determineIfSecureLocation(this);\n this.loggableDest = '<generic>';\n }\n\n /**\n * Specialize an instance of Connection for either initiator or target behaviour. Once specialized,\n * the role cannot be changed. This happens based on methods invoked; connect() or accept() cause\n * the change.\n *\n * This specialization also implies that the connection is fully ready for use, including resolution\n * of the identity promise if necessary. This is perhaps not the best place to ensure that, but it\n * provides a reliable - and already async - waypoint to observe that event.\n */\n async a$assumeRole(myRole)\n {\n assert(myRole === role.initiator || myRole === role.target);\n\n if (this.role === myRole)\n return;\n this.role = myRole;\n \n if (this.role === role.target)\n {\n this.debugLabel = `connection(t:${this.id}):`;\n this.sender.debugLabel = `sender(t#${this.id}):`;\n this.messageLedger.debugLabel = `message-ledger(t#${this.id}):`;\n this.loggableDest = '<target>';\n this.hasNtp = true;\n }\n else\n {\n this.debugLabel = `connection(i:${this.id}):`;\n this.sender.debugLabel = `sender(i#${this.id}):`;\n this.messageLedger.debugLabel = `message-ledger(i#${this.id}):`;\n this.loggableDest = this.url.href;\n this.hasNtp = false;\n }\n\n debugging('connection') && console.debug(this.debugLabel, `connection #${this.id} is ${this.role} for ${this.url}`);\n if (!this.identity)\n {\n assert(this.identityPromise);\n debugging('connection') && console.debug(this.debugLabel, `waiting for identity resolution`);\n await this.identityPromise;\n }\n }\n\n /**\n * API to establish a DCP connection. Implied by send().\n *\n * When invoked by the initiator, this method establishes the connection by connecting\n * to the target url provided to the constructor.\n */\n async connect() // eslint-disable-line require-await\n {\n if (this.role == role.target)\n return;\n \n if (!this.role)\n await this.a$assumeRole(role.initiator);\n \n if (this.state.is('initial'))\n {\n if (!this.connectPromise)\n this.connectPromise = this.a$_connect().then(() => delete this.connectPromise);\n return this.connectPromise;\n }\n\n if (this.state.is('disconnected'))\n {\n if (!this.connectPromise)\n this.connectPromise = this.a$_reconnect().then(() => delete this.connectPromise);\n return this.connectPromise;\n }\n \n if (this.state.is('connecting'))\n {\n assert(this.connectPromise);\n return this.connectPromise;\n }\n\n if (this.state.is('established'))\n return;\n \n if (this.state.in(['closed', 'close-wait', 'closing']))\n throw new DCPError('Connection already closed', 'DCPC-1015');\n\n throw new Error('impossible');\n }\n\n /**\n * Performs a reconnection for connections which are in the disconnected state, and\n * tries to send any in-flight or enqueued messages as soon as that happens.\n */\n async a$_reconnect()\n {\n var transport;\n\n this.state.testAndSet('disconnected', 'connecting');\n try\n {\n do\n {\n transport = await this.a$connectToTargetTransport();\n } while (!transport && (this.transport && !this.transport.ready()) && !this.state.in(['closed', 'close-wait', 'closing']));\n\n debugging('connection') && console.debug(this.debugLabel, `reconnected via transport ${transport.socket.id}`);\n \n this.useNewTransport(transport);\n }\n catch (error)\n {\n if (error.code !== 'DCPC-1016' && error.code !== 'DCPC-1015')\n {\n /* Unreached unless there are bugs. */\n throw error;\n } \n this.close(error, true);\n }\n }\n\n async a$_connect()\n {\n const connectTimeout = setTimeout(() => { this.close(new DCPError('Connection could not be established within thirty seconds', 'DCPC-1006'), true) }, 30000)\n \n if (dcpEnv.platform === 'nodejs')\n connectTimeout.unref()\n \n var presharedPeerAddress, establishResults;\n var targetIdentity = await this.target.identity;\n var transport;\n \n assert(this.role === role.initiator);\n\n this.state.set('initial', 'connecting');\n do\n {\n transport = await this.a$connectToTargetTransport().catch((error) =>\n {\n debugging('connection') && console.debug(`${this.debugLabel} error connecting to target on transport layer:`, error);\n return { ready: false };\n });\n\n /* This loop is very tricky from a concurrency POV. It is possible that between transport adoption\n * and this check that the underlying transport has disconnected, causing this.transport to be a\n * different value (undefined?) than transport, which could stall us before the state change.\n */\n } while(!transport.ready() && !this.transport.ready());\n this.adopt(transport);\n\n establishResults = await this.sender.establish().catch(error => {\n debugging('connection') && console.debug(this.debugLabel, `Could not establish DCP session over ${this.transport.name}:`, error);\n this.close(error, true);\n throw error;\n });\n const peerAddress = new wallet.Address(establishResults.peerAddress);\n const dcpsid = establishResults.dcpsid;\n debugging('connection') && console.debug(this.debugLabel, 'dcpsid is', dcpsid);\n \n if (!this.options.strict && targetIdentity && determineIfSecureConfig())\n {\n if ( false\n || typeof targetIdentity !== 'object'\n || typeof targetIdentity.address !== 'object'\n || !(targetIdentity.address instanceof wallet.Address))\n targetIdentity = { address: new wallet.Address(targetIdentity) }; /* map strings and Addresses to ks ducks */\n\n presharedPeerAddress = targetIdentity.address;\n debugging('connection') && console.debug(this.debugLabel, 'Using preshared peer address', presharedPeerAddress);\n }\n this.ensureIdentity(peerAddress, presharedPeerAddress);\n\n /* At this point, new session is valid & security checks out - make Connection instance usable */\n this.peerAddress = peerAddress;\n if (this.dcpsid)\n throw new DCPError(`Reached impossible state in connection.js; dcpsid already specified ${this.dcpsid} (${this.url})`, 'DCPC-1012');\n\n this.state.set('connecting', 'established'); /* established => dcpsid has been set */\n clearTimeout(connectTimeout);\n this.emit('session', (this.dcpsid = dcpsid));\n this.emit('connect', this.url);\n this.sender.notifyTransportReady();\n }\n\n /**\n * unreference any objects entrained by this connection so that it does not prevent\n * the node program from exiting naturally.\n */\n unref()\n {\n if (this.connectAbortTimer && this.connectAbortTimer.unref && dcpEnv.platform === 'nodejs')\n this.connectAbortTimer.unref();\n }\n\n /**\n * Method is invoked when the transport disconnects. Transport instance is responsible for its own\n * finalization; Connection instance is responsible for finding a new transport, resuming the\n * connection, and retransmitting any in-flight message.\n *\n * @param {object} transport the transport instance that triggered this handler. In some cases, it\n * is possible that this event is not serviced until after the connection\n * has already acquired a new transport instance, e.g. in a Target where\n * the initiator switched networks. This implies that it is possible for\n * more 'connect' events to be emitted than 'disconnect' events.\n */\n transportDisconnectHandler(transport)\n {\n try\n { \n if (this.state.in(['disconnected', 'closing', 'close-wait', 'closed'])) /* transports may fire this more than once */\n return;\n\n if (transport !== this.transport) /* event no longer relevant */\n return;\n\n if (this.transport)\n {\n transport.close();\n delete this.transport;\n }\n \n if (this.state.is('established'))\n {\n this.state.set('established', 'disconnected');\n this.emit('disconnect', this.url); /* UI hint: \"internet unavailable\" */\n debugging('connection') && console.debug(this.debugLabel, `Transport disconnected from ${this.url}; ${this.sender.inFlight ? 'have' : 'no'} in-flight message`);\n \n if (!this.dcpsid) /* hopefully impossible? */\n {\n debugging('connection') && console.debug(this.debugLabel, 'Not reconnecting - no session');\n return;\n }\n }\n \n if (this.role === role.target)\n {\n /* targets generally can't reconnect due to NAT */\n debugging('connection') && console.debug(this.debugLabel, `Waiting for initiator to reconnect for ${this.dcpsid}`);\n return;\n }\n \n if (this.dcpsid && !this.sender.inFlight && this.options.onDemand)\n {\n debugging('connection') && console.debug(this.debugLabel, `Not reconnecting ${this.dcpsid} until next message`);\n return;\n }\n \n if (this.state.is('connecting') && (!this.dcpsid || !this.peerAddress))\n {\n debugging('connection') && console.debug(this.debugLabel, `Disconnected while connecting, establishing transport and re-sending connect request.`);\n this.a$_reconnect();\n return;\n }\n\n /* At this point, we initiate a (re)connect attempt because either\n * - we haven't connected yet,\n * - we have something to send, or\n * - we are not an on-demand connection\n */\n if (!this.state.is('connecting'))\n this.connect();\n }\n catch(error)\n {\n debugging('connection') && console.debug(error);\n this.close(error, true);\n \n if (error.code !== 'DCPC-1016' && error.code !== 'DCPC-1015')\n {\n /* Unreached unless there are bugs. */\n throw error;\n }\n }\n }\n \n /**\n * Initiators only\n *\n * Connect to a target at the transport level.\n * - Rejects when we give up on all transports.\n * - Resolves with a transport instance when we connect to one.\n *\n * The connection attempt will keep a node program \"alive\" while it is happening.\n * The `autoUnref` connectionOption and unref() methods offer ways to make this not\n * happen.\n */\n async a$connectToTargetTransport()\n {\n const that = this;\n const availableTransports = [].concat(this.options.transports);\n var quitMsg = false; /* not falsey => reject asap, value is error message */\n var quitCode = undefined;\n var boSleepIntr; /* if not falsey, a function that interrupts the backoff sleep */\n var transportConnectIntr; /* if not falsey, a function that interrupts the current connection attempt */\n\n // Already trying to connect to target, don't try multiple times until we've aborted one attempt\n if (this.connectAbortTimer)\n return;\n \n /* This timer has the lifetime of the entire connection attempt. When we time out,\n * we set the quitMsg to get the retry loop to quit, then we interrupt the timer so\n * that we don't have to wait for the current backoff to expire before we notice, and\n * we expire the current attempt to connect right away as well.\n */\n this.connectAbortTimer = setTimeout(() => {\n quitMsg = 'connection timeout';\n if (boSleepIntr) boSleepIntr();\n if (transportConnectIntr) transportConnectIntr();\n }, this.options.connectTimeout * 1000);\n\n if (this.options.autoUnref)\n this.unref();\n\n /* cleanup code called on return/throw */\n function cleanup_ctt()\n {\n clearTimeout(that.connectAbortTimer);\n delete that.connectAbortTimer;\n }\n\n /* Connect to target with a specific transport. */\n /* Resolves with { bool success, obj transport } or rejects with { error } if the transport cannot connect*/\n function a$connectWithTransport(transportName)\n { \n transportConnectIntr = false;\n\n return new Promise((connectWithTransport_resolve, connectWithTransport_reject) => { \n const TransportClass = Transport.require(transportName);\n const transport = new TransportClass(that.target, that.options[transportName]);\n var ret = { transport };\n\n function cleanup_cwt()\n {\n for (let eventName of transport.eventNames())\n for (let listener of transport.listeners(eventName))\n transport.off(eventName, listener);\n }\n \n /* In the case where we have a race condition in the transport implementation, arrange things\n * so that we resolve with whatever fired last if we have a double-fire on the same pass of \n * the event loop.\n */\n transport.on('connect', () => { cleanup_cwt(); ret.success=true; connectWithTransport_resolve(ret) });\n transport.on('error', (error) => { cleanup_cwt(); connectWithTransport_reject(error) });\n transport.on('connect-failed', (error) => {\n cleanup_cwt();\n ret.success = false;\n ret.error = error;\n debugging() && console.debug(`Error connecting to ${that.url};`, error);\n connectWithTransport_resolve(ret);\n });\n \n /* let the connectAbortTimer interrupt this connect attempt */\n transportConnectIntr = () => { transport.close() };\n });\n }\n \n if (availableTransports.length === 0)\n {\n cleanup_ctt();\n return Promise.reject(new DCPError('no transports defined', 'DCPC-1014'));\n }\n \n /* Loop while trying each available transport in turn. Sleep with exponential backoff between runs */\n while (!quitMsg)\n {\n for (let transportName of availableTransports)\n {\n try\n {\n const { success, error, transport } = await a$connectWithTransport(transportName);\n\n /* Have connected to the remote at the transport level - OUT */\n if (success === true)\n {\n transportConnectIntr = false;\n cleanup_ctt();\n \n return transport;\n }\n\n /* Fast-fail: certain - but few - HTTP status codes let us know that this (or any) transport\n * will never work, so don't try those again.\n */\n if (error && error.httpStatus)\n {\n switch(error.httpStatus)\n {\n case 301: case 302: case 303: case 307: case 308:\n debugging('connection') && console.debug(this.debugLabel, `HTTP status ${error.httpStatus}; won't try again with ${transportName}`);\n availableTransports.splice(availableTransports.indexOf(transportName), 1);\n break;\n case 400: case 403: case 404:\n debugging('connection') && console.debug(this.debugLabel, `HTTP status ${error.httpStatus}; won't try again.`);\n quitMsg = error.message;\n quitCode = 'HTTP_' + error.httpStatus || 0;\n break;\n default:\n debugging('connection') && console.debug(this.debugLabel, `HTTP status ${error.httpStatus}; will try again with ${transportName}`);\n break;\n }\n }\n }\n catch (impossibleError)\n {\n /* transport connection attempts should never throw. */\n debugging('connection') && console.debug(this.debugLabel, `Error connecting to ${this.url} with ${transportName}; won't try again:`, impossibleError);\n availableTransports.splice(availableTransports.indexOf(transportName), 1);\n }\n }\n \n if (availableTransports.length === 0)\n {\n quitMsg = 'all transports exhausted';\n break;\n }\n \n /* Go to (interruptible) sleep for a while before trying again */\n const backoffTimeMs = this.backoffTimeIterator.next().value;\n debugging('connection') && console.debug(this.debugLabel, 'trying again in', Number(backoffTimeMs / 1000).toFixed(2), 'seconds');\n const boSleepPromise = a$sleepMs(backoffTimeMs);\n boSleepIntr = boSleepPromise.intr;\n await boSleepPromise;\n boSleepIntr = false;\n }\n\n /* The only way we get here is for us to discover that the connection is unconnectable - eg \n * reject timer has expired or similar.\n */\n cleanup_ctt();\n throw new DCPError(quitMsg, 'DCPC-1016', quitCode);\n }\n\n /**\n * Method which must be invoked whenever a new transport needs to be assigned to the connection. \n * If we have previously adopted a transport, we close it first, which will prevent \n * [by transport definition] any message handlers from firing, even if the old transport instance\n * has buffered traffic.\n *\n * @param {object} transport transport instance\n */\n adopt(transport)\n {\n if (this.transport)\n this.transport.close();\n\n transport.on('message', (m) => this.handleMessage(m));\n transport.on('end', () => this.transportDisconnectHandler(transport));\n transport.on('close', () => this.transportDisconnectHandler(transport));\n \n this.transport = transport;\n }\n \n \n /**\n * Method that gets invoked when there is a new transport available for adoption.\n * This will adjust the state of the connection, adopt the transport then tell\n * the sender to pump the message queue.\n * @param {object} transport transport instance \n */\n useNewTransport(transport)\n {\n if (this.state.in(['closing', 'close-wait', 'closed']))\n {\n debugging('connection') && console.debug(`${this.debugLabel} got a new transport during closing. closing the new transport and not completing transport adoption.`)\n transport.close();\n return;\n }\n \n var preDisconnectState = (!this.dcpsid || !this.peerAddress) ? 'connecting' : 'established';\n if (this.state.isNot(preDisconnectState))\n this.state.set(['connecting', 'disconnected'], preDisconnectState);\n this.adopt(transport);\n this.emit('connect', this.url); // UI hint: \"internet available\" \n this.sender.notifyTransportReady();\n }\n \n /**\n * Method that must be invoked by the target to \"accept\" a new DCP Connection request\n * at the transport layer.\n * @param {object} transport \n */\n async accept(transport)\n {\n assert(!this.role);\n await this.a$assumeRole(role.target);\n this.state.set('initial', 'connecting');\n this.adopt(transport);\n }\n \n /**\n * This method is invoked by the target when it has handled the initial connect request from\n * the initiator, which contains the peerAddress and the first half of the dcpsid (the second half is\n * populated by receiver.handleFirstRequest before being passed here). It transitions the connection \n * into an established state at the protocol level.\n * Note - this is really not the right design for this, but it is invoked from handleFirstRequest\n * in ./receiver.js\n *\n * @param {string} dcpsid dcpsid\n * @param {wallet.Address} peerAddress Address of peer\n */\n establishTarget(dcpsid, peerAddress) {\n assert(this.role === role.target);\n \n this.connectResponseId = Symbol('dummy'); // un-register ConnectResponse\n this.peerAddress = peerAddress;\n if (this.dcpsid)\n throw new DCPError(`Reached impossible state in connection.js; dcpsid already specified ${this.dcpsid}!=${dcpsid} (${this.url})`, 'DCPC-1005');\n this.emit('session', (this.dcpsid = dcpsid));\n debugging() && console.debug(this.debugLabel, 'dcpsid is', dcpsid);\n\n this.loggableDest = this.role === role.initiator ? this.url : peerAddress;\n this.state.set('connecting', 'established'); /* established => dcpsid has been set */\n\n debugging('connection') && console.debug(this.debugLabel, `Established session ${this.dcpsid} with ${this.peerAddress} for ${this.url}`);\n }\n\n /**\n * Check to see if the peer address conflicts with what we have in the global identity cache;\n * it does, throw an exception.\n */\n ensureIdentity (peerAddress, presharedPeerAddress)\n {\n let idc = getGlobalIdentityCache();\n let noConflict = idc.learnIdentity(this.url, peerAddress, presharedPeerAddress);\n\n if (!noConflict)\n throw new DCPError(`**** Security Error: Identity address ${peerAddress} does not match the saved key for ${this.url}`, 'DCPC-EADDRCHANGE');\n }\n \n \n /**\n * This method uses the first request (if we're target) or ack to the first request \n * (if we're initiator) to memoize the address of the peer authorized to send to us. \n * The first message must only be a request (since it comes from sender.specialFirstSend)\n * that has a connect operation, or an acknowledgement of the first request. \n * All future messages' owners will be validated against this authorized sender.\n * @param {Object} message \n */\n setAuthorizedSender(message)\n {\n if (message.body.type !== 'request' && message.body.type !== 'ack')\n throw new DCPError('First protocol message was not a request or ack', 'DCPC-1017');\n \n if (message.body.type === 'request' && message.body.payload.operation !== 'connect')\n throw new DCPError('First protocol message did not contain the correct payload', 'DCPC-1017');\n \n if (message.body.type === 'ack' && this.sender.inFlight.message.payload.operation !== 'connect')\n throw new DCPError('First protocol acknowledgement was not for connect request', 'DCPC-1017');\n \n this.authorizedSender = message.owner;\n }\n \n /**\n * Emits an error event with the relevant error and closes the connection immediately.\n * @param {string} errorMessage \n * @param {string} errorCode \n */\n \n handleValidationError(errorMessage, errorCode)\n {\n var messageError;\n \n debugging('connection') && console.debug(this.debugLabel, 'Message failed validation -', errorMessage);\n this.emit('error', (messageError = new DCPError(`message failed validation: ${errorMessage}`, errorCode)))\n this.close(messageError, true);\n }\n\n /**\n * This method validates the message owner, signature and body before passing it onto\n * either the receiver (for a request, response or batch) or the messageLedger (for an ack).\n * If it's a request, response or batch, this method also provokes the connection to \n * send an acknowledgement (ack) to the peer to let them know we got their message.\n * XXXwg this code needs an audit re error handling: what message error should we be emitting?\n * why do we keep working after we find an error?\n * XXXsc did some auditing. we happy now?\n * @param {string} JSON-encoded unvalidated message object\n */\n async handleMessage (messageJSON) {\n var validation;\n var message;\n\n if (this.state.is('closed')) {\n debugging('connection') && console.debug(this.debugLabel, 'handleMessage was called on a closed connection.');\n return;\n }\n\n try\n {\n message = typeof messageJSON === 'object' ? messageJSON : JSON.parse(messageJSON);\n debugging('wire') && console.debug(this.debugLabel, `handleMessage: ${String(message && message.body && message.body.type).padEnd(10, ' ')} <- ${this.loggableDest}`);\n }\n catch(error)\n {\n console.error('connection::handleMessage received unparseable message from peer:', error);\n this.emit('error', error);\n return;\n }\n \n /**\n * We always ack a duplicate transmission.\n * This must happen before validation since during startup we may lack a\n * nonce or dcpsid (depending on whether initiator or target + race).\n */\n if (this.isDuplicateTransmission(message)) {\n debugging('connection') && console.debug(this.debugLabel, 'duplicate message:', message.body);\n debugging('wire') && console.debug(this.debugLabel, `dup message ack: ${String(message.body.type).padEnd(10, ' ')} -> ${this.loggableDest}`);\n\n this.sendAck(this.lastAckSigned) \n return;\n }\n\n debugging('connection') && console.debug(this.debugLabel, `received message ${message.body.type} ${message.body.id}; nonce=`, message.body.nonce);\n\n validation = this.validateMessageOwner(message)\n if (validation.success !== true)\n {\n this.handleValidationError(validation.errorMessage, 'DCPC-1002');\n return;\n }\n \n validation = this.validateMessageSignature(message);\n if (validation.success !== true)\n {\n this.handleValidationError(validation.errorMessage, 'DCPC-1003');\n return;\n }\n\n validation = this.validateMessageBody(message);\n if (validation.success !== true)\n {\n this.handleValidationError(validation.errorMessage, validation.errorCode || 'DCPC-1004'); /* messages of type 'unhandled-message' may contain more information about the failure */\n return;\n }\n \n if (message.body.type === \"ack\") {\n const ack = new this.Ack(message.body);\n this.messageLedger.handleAck(ack);\n return;\n } else if (message.body.type !== 'unhandled-message') {\n this.lastMessage = message;\n await this.ackMessage(message);\n }\n \n this.receiver.handleMessage(message);\n }\n\n \n /**\n * This method takes either a Request, Response or Batch, creates an ack for it\n * and sends it to the peer. This ack contains the nonce we expect on the next\n * message from peer.\n * @param {Connection.Message} message \n */\n async ackMessage(message) {\n debugging('connection') && console.debug(this.debugLabel, 'acking message of type: ', message.body.type);\n const ack = new this.Ack(message);\n const signedMessage = await ack.sign(this.identity);\n\n debugging('wire') && console.debug(this.debugLabel, `ackMessage: ${String(message.body.type).padEnd(10, ' ')} -> ${this.loggableDest}`);\n\n this.sendAck(signedMessage);\n this.lastAck = ack;\n this.lastAckSigned = signedMessage;\n }\n\n /**\n * Checks if the batch we just received has the same nonce\n * as the most-recently received batch.\n * @param {object} messageJSON\n */\n isDuplicateTransmission(messageJSON) {\n return this.lastMessage && this.lastMessage.body.nonce && this.lastMessage.body.nonce === messageJSON.body.nonce;\n }\n\n /**\n * Validate that the message came from the appropriate sender.\n * @param {Object} message the message to validate\n * @returns {Object} returns an object `ret` with either ret.success = true, \n * or ret.success = false accompanied by another property ret.errorMessage\n */\n validateMessageOwner(message)\n {\n if (!this.authorizedSender)\n {\n /* Capture the initial identity of the remote end during the connect operation */\n this.setAuthorizedSender(message);\n return { success: true }\n }\n else if (message.owner !== this.authorizedSender)\n {\n return { success: false, errorMessage: \"message came from unauthorized sender\" }\n }\n return { success: true }\n }\n \n /**\n * Validate that the signature was generated from this message body\n * @param {Object} message\n * @returns {Object} with properties 'success' and 'errorMessage'. When the message is valid on its \n * face, the success property is true, otherwise it is is false. When it is false,\n * the errorMessage property will be a string explaining why.\n */\n validateMessageSignature(message)\n {\n if (!message.signature) {\n debugging('connection') && console.warn(\"Message does not have signature, aborting connection\");\n return { success: false, errorMessage: \"message is missing signature\" };\n }\n \n const owner = new wallet.Address(message.owner);\n const signatureValid = owner.verifySignature(message.body, message.signature);\n\n if (!signatureValid)\n {\n debugging('connection') && console.warn(\"Message has an invalid signature, aborting connection\");\n return { success: false, errorMessage: \"invalid message signature\" };\n }\n\n return { success: true };\n }\n \n /**\n * This method is used to perform validation on all types of messages.\n * It validates the DCPSID, nonce, and the peerAddress.\n * @param {Object} message\n * @returns {Object} with properties 'success' and 'errorMessage'. When the message is valid on its \n * face, the success property is true, otherwise it is is false. When it is false,\n * the errorMessage property will be a string explaining why.\n *\n */\n validateMessageBody(message)\n {\n try\n {\n if (message.body.type === 'unhandled-message')\n {\n /* This special message type may not have a dcpsid, peerAddress, etc., so it might not\n * validate. It is also not a \"real\" message and only used to report ConnectionManager routing \n * errors, so we just report here, drop it, and close the connection.\n *\n * Note also that this is probably the wrong way to handle this case - restarting daemons - but\n * that is a problem for another day. /wg nov 2021\n */\n debugging('connection') && console.warn(this.debugLabel, \"Target Error - target could not process message.\", JSON.stringify(message.body),\n \"Aborting connection.\");\n return { success: false, errorMessage: `target could not process message (${message.body.payload && message.body.payload.message || 'unknown error'})`, errorCode: message.body.payload && message.body.payload.code}\n }\n if (this.peerAddress && !this.peerAddress.eq(message.owner))\n {\n debugging('connection') && console.warn(this.debugLabel,\n \"Received message's owner address does not match peer address, aborting connection\\n\",\n \"(owner addr)\", message.owner, '\\n',\n \"(peer addr)\", this.peerAddress);\n return { success: false, errorMessage: \"received message owner does not match peer address\" };\n }\n\n if (this.state.in(['established', 'closing', 'close-wait']) && message.body.type !== 'unhandled-message')\n {\n const body = message.body;\n\n assert(this.peerAddress); /* should be set in connect */\n /**\n * Security note:\n * We don't require the dcpsid to match on an ack because the connect response\n * ack doesn't have a dcpsid until after it is processed. Also ack's are protected\n * by ack tokens and signatures, so this doesn't leave a hole, just an inconsistency.\n */\n if (body.type !== 'ack' && body.dcpsid !== this.dcpsid)\n {\n debugging('connection') && console.warn(this.debugLabel,\n \"Received message's DCPSID does not match, aborting connection\\n\",\n \"Message owner:\", message.owner, '\\n',\n \"(ours)\", this.dcpsid, (Date.now() - this.connectTime)/1000, \"seconds after connecting - state:\", this.state._, \"\\n\", \n \"(theirs)\", body.dcpsid);\n if(body.dcpsid.substring(0, body.dcpsid.length/2) !== this.dcpsid.substring(0, this.dcpsid.length/2)){\n debugging('connection') && console.warn(\" Left half of both DCPSID is different\");\n }\n if(body.dcpsid.substring(body.dcpsid.length/2 + 1, body.dcpsid.length) !== this.dcpsid.substring(this.dcpsid.length/2 + 1, body.dcpsid.length)){\n debugging('connection') && console.warn(\" Right half of both DCPSID is different\");\n }\n return { success: false, errorMessage: \"DCPSID do not match\" };\n }\n /* can get close in middle of connecting, which will have no nonce.*/\n if (body.type !== 'ack' && this.lastAck.nonce !== body.nonce && (body.payload && body.payload.operation !== 'close'))\n {\n /* When Target sends back ConnectionLessErrorResponse, it uses the nonce of the message that caused an error. */\n if (this.sender.inFlight && this.sender.inFlight.message.nonce === body.nonce)\n {\n debugging('connection') && console.debug(`${this.debugLabel} Received messages nonce matches nonce of our current inFlight message.`,\n \"There was a problem sending this message. Aborting connection. Reason:\\n\", body.payload);\n return { success: false, errorMessage: \"current inflight message returned an error\" }\n }\n debugging('connection') && console.warn(this.debugLabel,\"Received message's nonce does not match expected nonce, aborting connection\\n\");\n debugging('connection') && console.debug(this.debugLabel, this.lastAck.nonce, body.nonce);\n return { success: false, errorMessage: \"received message's nonce does not match expected nonce\" };\n }\n }\n\n return { success: true };\n }\n catch(error)\n {\n console.error('message validator failure:', error);\n return { success: false, errorMessage: 'validator exception ' + error.message };\n }\n\n return { success: false, errorMessage: 'impossible code reached' }; // eslint-disable-line no-unreachable\n }\n\n /**\n * Targets Only.\n * The receiver creates a special connect response and the connection\n * needs to know about it to get ready for the ack. See `isWaitingForAck`.\n * @param {Message} message message we are sending out and waiting to\n * ack'd, probably a batch containing the response.\n */\n registerConnectResponse(message) {\n this.connectResponseId = message.id;\n }\n\n /**\n * Targets only\n * During the connection process a target sends a connect\n * response to an initiator and the initiator will ack it. Since transports\n * are not tightly coupled, we have no authoritative way to route the ack back\n * to the right connection. So a connection briefly registers the ack it\n * is looking for in this case. It will formally validate the ack after routing.\n * @param {string} messageId id of the message this ack is acknowledging.\n */\n isWaitingForAck(messageId) {\n return messageId === this.connectResponseId;\n }\n\n /**\n * Put connection into close-wait state so that a call to `close`\n * in this state will *not* trigger sending a `close` message to the peer.\n * Then call close.\n *\n * @note: This function is called when the remote end of the transport sends\n * a close command, from receiver::handleOperation. This impllies that\n * that we must be in established or later state.\n */\n closeWait (errorCode = null)\n {\n var preCloseState, reason;\n \n debugging('connection') && console.debug(this.debugLabel, `responding to close. state=closeWait dcpsid=${this.dcpsid}`);\n\n if (this.state.is('closed'))\n {\n debugging('connection') && console.debug(this.debugLabel, `remote asked us to close a closed connection; dcpsid=${this.dcpsid}`);\n return;\n }\n\n // continue with close in either case\n reason = `Received close from peer with Error Code ${errorCode}`;\n if (this.role === role.target)\n reason += ` (${this.url})`;\n else\n reason += ` (${this.debugLabel}${this.peerAddress.address})`;\n\n reason = new DCPError(reason, errorCode || 'DCPC-1011');\n\n // If we're already closing, wait for it to complete then resolve\n // WARNING: any place we transition to closing or close-wait, we MUST guarantedd\n // that 'end' will be emitted, or this code will hang forever!\n if (this.state.in(['close-wait', 'closing'])) {\n return new Promise((resolve) => {\n this.once('end', resolve) /* eventually fired by doClose elsewhere */\n });\n }\n\n /* XXXwg - this should only be established->close-wait. Why more? */\n this.state.set(['disconnected', 'connecting', 'established'], 'close-wait');\n \n /* Set preCloseState to close-wait so doClose doesn't send a close message back */\n preCloseState = this.state.valueOf();\n return this.doClose(preCloseState, reason, true);\n }\n\n /**\n * This method will begin closing the protocol connection. It transitions\n * the protocol into the correct state, and then begins the work of closing.\n * \n * @param {string|Error} [reason] Either an Error or a message to use in the Error that will reject pending sends.\n * @param {boolean} [immediate] When true, the connection will not deliver any pending messages and instead\n * immediately send the peer a 'close' request. \n *\n * @return a Promise which resolves when the connection has been confirmed closed and the end event has been fired.\n */\n close (reason='requested', immediate=false)\n {\n if (this.state.is('initial'))\n {\n this.state.set('initial', 'closed');\n this.emit('close'); /* Don't emit dcpsid */\n }\n if (this.state.is('closed')) return Promise.resolve();\n\n const preCloseState = this.state.valueOf();\n debugging('connection') && \n console.debug(this.debugLabel, \n `close; dcpsid=${this.dcpsid} state=${preCloseState} immediate=${immediate} reason:`, reason);\n\n // If we're already closing, wait for it to complete then resolve\n if (this.state.in(['close-wait', 'closing'])) {\n return new Promise((resolve) => {\n this.once('end', resolve)\n });\n }\n\n this.state.set(['connecting', 'established', 'disconnected'], 'closing');\n\n // Perform actual work of closing\n return this.doClose(preCloseState, reason, immediate);\n }\n\n /**\n * Sends close message to peer after sending all pending messages.\n * Note that close messages are sent without the expectation of a response.\n * @param {DCPError|string} reason reason for closing\n */\n async sendCloseGracefully(reason) \n {\n debugging('connection') && console.debug(`${this.debugLabel} gracefully sending close message to peer with reason ${reason}`)\n let errorCode = reason instanceof DCPError ? reason.code : 'DCPC-1011';\n \n /* This only resolves when close is next message in queue */\n const closeMessage = await this.prepare('close', { errorCode: errorCode });\n this.sendPreparedMessage(closeMessage);\n this.messageLedger.fulfillMessagePromise(closeMessage.message.id, {});\n }\n \n /**\n * Sends close message to peer immediately. Pending messages will not be sent.\n * Note that close messages are sent without expectation of response.\n * @param {DCPError|string} reason reason for closing\n */\n async sendCloseImmediately(reason)\n {\n debugging('connection') && console.debug(`${this.debugLabel} immediately sending close message to peer with reason ${reason}`);\n let errorCode = reason instanceof DCPError ? reason.code : 'DCPC-1011';\n \n /* Last param being `true` means that prepareMessage will return unsigned message. Does not queue message. */\n const closeMessage = await this.prepare('close', { errorCode: errorCode }, true);\n \n if (this.sender.inFlight)\n closeMessage.nonce = this.sender.inFlight.message.nonce;\n else\n closeMessage.nonce = this.sender.nonce;\n \n let signedCloseMessage = await closeMessage.sign();\n \n /* Overwrite the in-flight message because we don't care to deliver pending messages */\n this.sender.inFlight = { message: closeMessage, signedMessage: signedCloseMessage };\n this.sender.sendInFlightMessage();\n }\n \n /**\n * This method performs the core close functionality. It appropriately sends the close message\n * to the peer, fails any pending transmissions, shuts down our sender and underlying transport\n * and puts us into the 'closed' state, indicating this connection object is now useless.\n * When called from closeWait, it does not send a close message.\n * @param {string} preCloseState the state that the connection was in at the start of the\n * invocation of close() or closeWait()\n *\n * @note: this function is not reentrant due to closeGracefully\n */\n async doClose(preCloseState, reason, immediate) {\n const dcpsid = this.dcpsid;\n var rejectErr;\n\n try\n {\n // Emit the close event the moment we know we are going to close, \n // so we can catch the close event and reopen the connection\n //\n // This implies that no API functions which call doClose may await between\n // their invocation and their call to doClose!\n this.emit('close', dcpsid /* should be undefined in initial state */);\n\n assert(this.state.in(['closing', 'close-wait']));\n if (preCloseState === 'established' && this.transport) {\n try {\n if (immediate) {\n await this.sendCloseImmediately(reason);\n } else {\n await this.sendCloseGracefully(reason);\n }\n } catch(e) {\n debugging() && console.warn(`Warning: could not send close message to peer. connectionid=${this.id}, dcpsid=,${this.dcpsid}, url=${this.url ? this.url.href : 'unknown url'} - (${e.message})`);\n }\n }\n\n // can delete these now that we've sent the close message\n this.dcpsid = null;\n this.peerAddress = null;\n\n if (reason instanceof DCPError)\n rejectErr = reason;\n else\n {\n let errorMessage = reason instanceof Error ? reason : `Connection to ${this.loggableDest} closed (${reason})`;\n rejectErr = new DCPError(errorMessage, 'DCPC-1013');\n }\n \n // Reject any pending transmissions in the message ledger\n this.messageLedger.failAllTransmissions(rejectErr);\n \n if (this.transport)\n {\n try { this.sender.shutdown(); }\n catch(e) { debugging() && console.warn(this.debugLabel, `Warning: could not shutdown sender; dcpsid=,${dcpsid}`, e); }\n \n try { this.transport.close(); delete this.transport; }\n catch(e) { debugging() && console.warn(this.debugLabel, `Warning: could not close transport; dcpsid=,${dcpsid}`, e); }\n }\n } catch(error) {\n debugging() && console.warn(this.debugLabel, `could not close connection; dcpsid=${dcpsid}, url=${this.url ? this.url.href : 'unknown url'}:`, error);\n }\n finally\n {\n this.state.set(['closing', 'close-wait'], 'closed');\n this.emit('end'); /* end event resolves promises on other threads for closeWait and close (ugh) */\n }\n }\n/**\n * Prepares a non-batchable message that can be sent directly over the wire. Returns when\n * the message has been signed and is ready to be sent. The connection will not be able to send \n * any messages until the prepared message here is either sent or discarded. If 'canBatch = true',\n * will return the unsigned message instead. In this case, enqueuing is handled by\n * `async Connection.send()`, allowing the message to be put in a batch before being signed.\n * @param {...any} messageData Data to build message with. Format is:\n * `operation {string}, \n * data {Object} (optional),\n * identity {wallet.Keystore} (optional),\n * canBatch {boolean} (optional)`\n * @returns {Promise<Object>} a promise which resolves to { message, signedMessage }\n */\n\n async prepare(...messageData)\n {\n if (this.state.isNot('established'))\n {\n await this.connect().catch((e) => {\n if (e.code !== 'DCPC-1015') /* If we're closed already, then swallow the error */\n { \n this.close(e, true);\n throw e;\n }\n });\n }\n \n \n let signedMessage, message = messageData[0];\n let canBatch = false;\n \n if (typeof messageData[messageData.length - 1] === 'boolean')\n canBatch = messageData.pop();\n \n if (!message.id)\n {\n message = this.messageFactory.buildMessage(...messageData); \n }\n \n debugging('connection') && console.debug(`${this.debugLabel} Created message ${message.id}.`);\n \n message.ackToken = this.sender.makeAckToken();\n message.batchable = canBatch;\n \n if (canBatch)\n return Promise.resolve(message);\n \n debugging('connection') && console.debug(`${this.debugLabel} Preparing message ${message.id} for sending...`); \n const messageWithNonce = await new Promise((resolve) =>\n {\n // This event is fired in the sender by serviceQueue() when the message is at the top of the queue\n // and has a nonce it can sign with. At this point, we may return the prepared message.\n this.once(`${message.id} ready`, (message) => resolve(message))\n \n this.sender.queue.push(message)\n this.sender.requestQueueService()\n })\n \n signedMessage = await messageWithNonce.sign();\n \n debugging('connection') && console.debug(`${this.debugLabel} Finished preparing message. ${message.id} is ready to be sent.`);\n \n return { message: messageWithNonce, signedMessage: signedMessage };\n }\n\n /**\n * Sends a message to the connected peer. If the connection has not yet been established,\n * this routine will first invoke this.connect(). If the first argument has a 'signedMessage'\n * property, the message is assumed to be prepared and is sent immediately. If not, and the first\n * argument does not have an 'id' property, it will be sent to `async prepare()`, and then put\n * in the message queue.\n * \n * @param {...any} args 3 forms:\n * [operation]\n * [operation, data]\n * [operation, data, identity]\n * @returns {Promise<Response>} a promise which resolves to a response.\n */\n async send(...args)\n {\n if (!this.state.is('established'))\n await this.connect().catch((e) =>\n {\n if (e.code !== 'DCPC-1015') /* If we're closed already, then swallow the error */\n { \n this.close(e, true);\n throw e;\n }\n });\n\n let message = args[0];\n // ie. already prepared\n if (message.signedMessage)\n return this.sendPreparedMessage(message);\n \n // ie. message not hyrdated or is a response, which needs ack token\n if (!message.id || message.type === 'response')\n message = await this.prepare(...args, true);\n\n if (this.state.in(['closing', 'close-wait', 'closed']))\n throw new DCPError(`Connection (${this.id}) is ${this.state}; cannot send. (${this.loggableDest})`, 'DCPC-1001');\n\n return this.sender.enqueue(message);\n }\n \n /**\n * Set the sender's flight deck with the given message and send it.\n * Can only be passed a prepared message, which is a promise that only\n * resolves to a message when it is signed with the nonce, so it must\n * be the next message to be sent (or discarded).\n * @param {Object} messageObject\n * @returns {Promise<Response>} \n */\n sendPreparedMessage(messageObject)\n {\n if (!messageObject.signedMessage) return;\n \n const { message, signedMessage } = messageObject;\n assert(!this.sender.inFlight);\n this.sender.inFlight = { message: message, signedMessage: signedMessage };\n const messageSentPromise = this.messageLedger.addMessage(message);\n this.sender.sendInFlightMessage();\n \n return messageSentPromise;\n }\n \n /**\n * Send a signed ack directly over the wire. If we get a SocketIO.Send: Not Connected error, \n * wait until we're connected and then resend the ack.\n * @param {String} ack \n */\n sendAck(ack)\n {\n try\n {\n this.transport.send(ack)\n }\n catch(error)\n {\n // Transport was lost\n if (error.code === 'DCPC-1105')\n this.once('connect', () => this.sendAck(ack));\n else\n console.error(`${this.debugLabel} Error acking message to ${this.loggableDest}: ${error}`);\n }\n }\n \n /**\n * Discard a prepared message by removing it from the queue.\n * Returns nonce to sender and provokes queue service.\n * @param {Object} messageObject { message, signedMessage } message to discard \n */\n discardMessage(messageObject)\n {\n let { message } = messageObject;\n this.sender.nonce = message.nonce;\n delete message.nonce;\n message.type = 'unhandled-message';\n this.sender.requestQueueService();\n }\n\n /**\n * This routine returns the current time for the purposes of\n * populating the Request message payload.validity.time property.\n * \n * @returns {Number} the integer number of seconds which have elapsed since the epoch\n */\n currentTime() {\n let msSinceEpoch;\n if (this.hasNtp) {\n msSinceEpoch = Date.now();\n } else {\n const msSinceLastReceipt = performance.now() - this.receiver.lastResponseTiming.receivedMs;\n msSinceEpoch = this.receiver.lastResponseTiming.time * 1000 + msSinceLastReceipt;\n }\n return Math.floor(msSinceEpoch / 1000);\n }\n\n /**\n * This method sends a keepalive to the peer, and resolves when the response has been received.\n */\n keepalive() {\n return this.send('keepalive');\n }\n}\n\n/** \n * Determine if we got the scheduler config from a secure source, eg https or local disk.\n * We assume tha all https transactions have PKI-CA verified.\n *\n * @note protocol::getSchedulerConfigLocation() is populated via node-libs/config.js or dcp-client/index.js\n *\n * @returns true or falsey\n */\nfunction determineIfSecureConfig()\n{\n var schedulerConfigLocation = (__webpack_require__(/*! dcp/protocol-v4 */ \"./src/protocol-v4/index.js\").getSchedulerConfigLocation)();\n var schedulerConfigSecure;\n\n if (schedulerConfigLocation && (schedulerConfigLocation.protocol === 'https:' || schedulerConfigLocation.protocol === 'file:'))\n {\n debugging('strict-mode') && console.debug(`scheduler config location ${schedulerConfigLocation} is secure`); /* from casual eavesdropping */\n schedulerConfigSecure = true;\n }\n\n if (isDebugBuild)\n {\n debugging('strict-mode') && console.debug('scheduler config location is always secure for debug builds');\n schedulerConfigSecure = 'debug';\n }\n\n debugging('strict-mode') && console.debug(`Config Location ${schedulerConfigLocation} is ${!schedulerConfigSecure ? 'not secure' : 'secure-' + schedulerConfigSecure}`);\n return schedulerConfigSecure;\n}\n\n/**\n * Determine if a URL is secure by examinining the protocol, connection, and information about the \n * process; in particular, we try to determine if the dcp config was securely provided, because if \n * it wasn't, then we can't have a secure location, since the origin could be compromised.\n * \n * \"Secure\" in this case means \"secure against casual eavesdropping\", and this information should only\n * be used to refuse to send secrets over the transport or similar.\n *\n * @returns true or falsey\n */\nfunction determineIfSecureLocation(conn)\n{\n var isSecureConfig = determineIfSecureConfig();\n var secureLocation;\n\n if (!isSecureConfig) /* can't have a secure location without a secure configuration */\n return null;\n \n if (isDebugBuild || conn.url.protocol === 'https:' || conn.url.protocol === 'tcps:')\n secureLocation = true;\n else if (conn.role === role.initiator && conn.target.hasOwnProperty('friendLocation') && conn.url === conn.target.friendLocation)\n secureLocation = true;\n else if (conn.options.allowUnencryptedSecrets)\n secureLocation = 'override';\n else\n secureLocation = false;\n\n debugging('strict-mode') && console.debug(`Location ${conn.url} is ${!secureLocation ? 'not secure' : 'secure-' + secureLocation}`);\n \n return secureLocation;\n}\nexports.Connection = Connection;\n\n\n//# sourceURL=webpack://dcp/./src/protocol-v4/connection/connection.js?");
4534
+ eval("/* provided dependency */ var process = __webpack_require__(/*! ./node_modules/process/browser.js */ \"./node_modules/process/browser.js\");\n/**\n * @file protocol/connection/connection.js\n * @author Ryan Rossiter\n * @author KC Erb\n * @author Wes Garland\n * @date January 2020, Feb 2021, Mar 2022\n *\n * A Connection object represents a connection to another DCP entity. \n * A DCP connection may 'live' longer than the underlying protocol's connection,\n * and the underlying protocol connection (or, indeed, protocol) may change\n * throughout the life of the DCP connection.\n * \n * DCP connections are uniquely identified by the DCP Session ID, specified by\n * the dcpsid property, present in every message body. This session id negotiated during connection,\n * with the initiator and target each providing half of the string.\n *\n * Connection instance events:\n * - session: dcpsid new session established\n * - connect: url UI hint: internet available\n * - disconnect: url UI hint: internet not available\n * - readyStateChange: *** DO NOT USE **\n * - error: error emitted when errors happen that would otherwise go uncaught\n * - close: connection instance is closing\n * - end: Connection instance is closed\n * - send: msgObj when a message is sent to the peer; does not wait for ack; may re-trigger on reconnect\n * - ready: when the connection is ready for traffic (constructor promises resolved)\n *\n * State Transition Diagram for Connection.state:\n *\n * initial connecting established disconnected close-wait closing closed\n * ===========================================================================================================================\n * |-- i:connect ---->\n * |-- t:accept ----->\n * |-- t:establishTarget -->\n * |-- i:connect ---------->\n * |-- transportDisconnectHandler -->\n * <-- i:_reconnect ----------------------------------------|\n * |-i:useNewTransport-->\n * <-- t:useNewTransport --------|\n * |-- closeWait ----------------------------------------------------------->\n * |-- closeWait ----------------------------------->\n * |-- closeWait -->\n * |-- doClose --------------->\n * |-- close ------------------------------------------------------------------------------------------------------------> \n * |-- close ---------------------------------------------------------------------------->\n * |-- close ---------------------------------------------------->\n * |-- close ------------------->\n * |-- doClose -->\n *\n *\n * Not until the established state can we count on things like a dcpsid, \n * peerAddress, identityPromise resolution and so on.\n * \n * Error Codes relevant to DCP Connections:\n * DCPC-1001 - CONNECTION CANNOT SEND WHEN IN CLOSING, CLOSE-WAIT OR CLOSED\n * DCPC-1002 - MESSAGE OWNER IS INVALID\n * DCPC-1003 - MESSAGE SIGNATURE INVALID \n * DCPC-1004 - MESSAGE BODY IS INVALID\n * DCPC-1005 - TRYING TO ESTABLISH TARGET AFTER TARGET ALREADY ESTABLISHED\n * DCPC-1006 - CONNECTION COULD NOT BE ESTABLISHED WITHIN 30 SECONDS\n * DCPC-1007 - RECEIVED MESSAGE PAYLOAD BEFORE CONNECT OPERATION\n * DCPC-1008 - TARGET RESPONDED WITH INVALID DCPSID\n * DCPC-1009 - MESSAGE IS OF UNKNOWN TYPE\n * DCPC-1010 - DUPLICATE TRANSMISSION RECEIPT\n * DCPC-1011 - DEFAULT ERROR CODE WHEN PEER SENDS CLOSE MESSAGE\n * DCPC-1012 - TRIED TO INITIATE CONNECTION AFTER SESSION ALREADY ESTABLISHED\n * DCPC-1013 - DEFAULT ERROR CODE WHEN CLOSING WITH REASON THATS NOT INSTANCE OF DCPERROR\n * DCPC-1014 - NO TRANSPORTS AVAILABLE\n * DCPC-1015 - CANNOT CONNECT WHEN CONNECTION ALREADY CLOSED\n * DCPC-1016 - ERROR CONNECTING VIA AVAILABLE TRANSPORTS\n * DCPC-1017 - FIRST PROTOCOL MESSAGE WAS DID NOT INVOLVE INITIAL CONNECT REQUEST\n */\n\n\n\nconst debugging = (__webpack_require__(/*! dcp/debugging */ \"./src/debugging.js\").scope)('dcp');\nconst dcpEnv = __webpack_require__(/*! dcp/common/dcp-env */ \"./src/common/dcp-env.js\");\nconst { EventEmitter } = __webpack_require__(/*! dcp/common/dcp-events */ \"./src/common/dcp-events/index.js\");\nconst wallet = __webpack_require__(/*! dcp/dcp-client/wallet */ \"./src/dcp-client/wallet/index.js\");\nconst { DcpURL } = __webpack_require__(/*! dcp/common/dcp-url */ \"./src/common/dcp-url.js\");\nconst { requireNative } = __webpack_require__(/*! dcp/dcp-client/webpack-native-bridge */ \"./src/dcp-client/webpack-native-bridge.js\");\nconst { assert } = __webpack_require__(/*! dcp/common/dcp-assert */ \"./src/common/dcp-assert.js\");\nconst { leafMerge, a$sleepMs } = __webpack_require__(/*! dcp/utils */ \"./src/utils/index.js\");\nconst { Synchronizer } = __webpack_require__(/*! dcp/common/concurrency */ \"./src/common/concurrency.js\");\nconst { DCPError } = __webpack_require__(/*! dcp/common/dcp-error */ \"./src/common/dcp-error.js\");\n\nconst { Transport } = __webpack_require__(/*! ../transport */ \"./src/protocol-v4/transport/index.js\");\nconst { Sender } = __webpack_require__(/*! ./sender */ \"./src/protocol-v4/connection/sender.js\");\nconst { Receiver } = __webpack_require__(/*! ./receiver */ \"./src/protocol-v4/connection/receiver.js\");\nconst { MessageFactory } = __webpack_require__(/*! ./message-factory */ \"./src/protocol-v4/connection/message-factory.js\");\nconst { MessageLedger } = __webpack_require__(/*! ./message-ledger */ \"./src/protocol-v4/connection/message-ledger.js\");\nconst { getGlobalIdentityCache } = __webpack_require__(/*! ./identity-cache */ \"./src/protocol-v4/connection/identity-cache.js\");\nconst { makeEBOIterator, setImmediateN } = __webpack_require__(/*! dcp/common/dcp-timers */ \"./src/common/dcp-timers.js\");\n\nconst { ConnectionMessage } = __webpack_require__(/*! ./connection-message */ \"./src/protocol-v4/connection/connection-message.js\");\nconst { ConnectionRequest } = __webpack_require__(/*! ./request */ \"./src/protocol-v4/connection/request.js\");\nconst { ConnectionResponse } = __webpack_require__(/*! ./response */ \"./src/protocol-v4/connection/response.js\");\nconst { ConnectionBatch } = __webpack_require__(/*! ./batch */ \"./src/protocol-v4/connection/batch.js\");\nconst { ConnectionAck } = __webpack_require__(/*! ./ack */ \"./src/protocol-v4/connection/ack.js\");\nconst { ErrorPayloadCtorFactory } = __webpack_require__(/*! ./error-payload */ \"./src/protocol-v4/connection/error-payload.js\");\nconst { role } = __webpack_require__(/*! ./connection-constants */ \"./src/protocol-v4/connection/connection-constants.js\");\n\nconst isDebugBuild = (__webpack_require__(/*! dcp/common/dcp-build */ \"./src/common/dcp-build.js\").build) === 'debug';\n\nlet globalConnectionId = 0;\n\nconst CONNECTION_STATES = [\n 'initial',\n 'connecting', /* initiator: establish first transport instance connection; target: listening */\n 'established',\n 'disconnected', /* connection is still valid, but underlying transport is no longer connected */\n 'close-wait', /* Target of close message is in this state until response is acknowledged */\n 'closing',\n 'closed',\n]\n\nclass Connection extends EventEmitter {\n static get VERSION() {\n return '5.1.0'; // Semver format\n }\n\n static get VERSION_COMPATIBILITY() {\n return '^5.0.0'; // Semver format, can be a range\n }\n\n /**\n * Connection form 2:\n * @constructor\n * @param {object} [target]\n * @param {Promise} idKsPromise A promise which resolves to the identity keystore described\n * in form 1\n * @param {object} [options]\n * @see form 1\n */\n /**\n * Connection form 1\n * Create a DCP Connection object. This object could represent either the initiator or \n * target end of a connection, until it is specialized by either invoke the connect()\n * or accept() methods. Note that send() invokes connect() internally if not in an established\n * state.\n * @constructor\n * @note Connection objects exist for the lifetime of a given DCP connection \n * (session), whether or not the underlying transport (eg internet protocol) is connected or not. Once \n * the DCP session has ended, this object has no purpose and is not reusable.\n * \n * @param {object} target Object (usually a dcpConfig fragment) describing the target.\n * This object may contain the following properties; 'location' is\n * mandatory:\n * - location: a URL or DcpURL that is valid from the Internet\n * - friendLocation: a DcpURL that is valid from an intranet; if\n * both location and friendLocation specified, the best one will\n * be chosen by examining IP addresses (assuming an IP bearer).\n * - identity: an object with an address property which is a promise\n * that resolves to an instance of wallet.Address which represents\n * to the target's identity; this overrides the initiator's \n * identity cache unless options.strict is truey.\n * \n * @param {Keystore} [idKeystore] The keystore used to sign messages; used for non-repudiation.\n * If not specified, a dynamically-generated keystore will be used.\n * \n * @param {object} [options] Extra connection options that aren't defined via dcpConfig.dcp.connectionOptions.\n * These options include:\n * - identityUnlockTimeout: Number of (floating-point) seconds to leave the identity \n * keystore unlocked between invocations of Connection.send\n *\n * @returns instance of Connection that is specific to a target URL but not a role\n */\n constructor(target, idKeystore, options = {})\n {\n super('Connection');\n this.id = ++globalConnectionId;\n this.debugLabel = `connection(g:${this.id}):`;\n\n /* polymorphism strategy: rewrite to (configFragment, idksPromise, options) */\n if (target instanceof DcpURL)\n target = { location: target };\n else if (DcpURL.isURL(target))\n target = { location: new DcpURL(target) };\n else if (target instanceof String || typeof target === 'string')\n target = { location: new DcpURL(target) };\n assert(typeof target === 'object', target.location);\n\n /* idKeystore is always resolved by the time a session is established. */\n if (!idKeystore)\n this.identityPromise = wallet.getId();\n else if (idKeystore instanceof Promise)\n this.identityPromise = idKeystore;\n else if (idKeystore instanceof wallet.Keystore)\n this.identityPromise = Promise.resolve(idKeystore); \n else if (idKeystore instanceof wallet.Address)\n this.identityPromise = Promise.resolve(new wallet.Keystore(idKeystore, '')); \n\n this.identityPromise.then((keystore) => {\n /* This always happens by the time a role is assumed */\n delete this.identityPromise;\n this.identity = keystore;\n this.emit('ready');\n debugging('connection') && console.debug(this.debugLabel, 'identity is', this.identity.address);\n });\n\n this.target = target;\n this.url = this.target.location;\n \n // Init internal state / vars\n this.state = new Synchronizer(CONNECTION_STATES[0], CONNECTION_STATES);\n // DO NOT USE this.state.on('change', (s) => this.emit('readyStateChange', s) );\n\n this.dcpsid = null;\n this.peerAddress = null;\n this.transport = null;\n this.messageFactory = new MessageFactory(this);\n this.messageLedger = new MessageLedger(this);\n this.authorizedSender = null;\n this.preDisconnectState = null;\n \n this.Message = ConnectionMessage(this);\n this.Request = ConnectionRequest(this.Message);\n this.Response = ConnectionResponse(this.Message);\n this.Batch = ConnectionBatch(this.Message);\n this.Ack = ConnectionAck(this.Message);\n this.ErrorPayload = ErrorPayloadCtorFactory(this);\n this.connectTime = Date.now();\n\n this.receiver = new Receiver(this);\n this.sender = new Sender(this);\n \n debugging('connection') && console.debug(this.debugLabel, `new connection#${this.id}; ${this.url}`);\n\n /* Create a connection config as this.options which takes into\n * account system defaults and overrides for specific urls, origins, etc.\n */\n this.options = leafMerge(\n ({ /* hardcoded defaults insulate us from missing web config */\n 'connectTimeout': 90,\n 'lingerTimeout': 1800,\n 'allowBatch': true,\n 'maxMessagesPerBatch': 100,\n 'identityUnlockTimeout': 300,\n 'ttl': {\n 'min': 15,\n 'max': 600,\n 'default': 120\n },\n 'transports': [ 'socketio' ],\n }),\n dcpConfig.dcp.connectionOptions.default,\n this.url && dcpConfig.dcp.connectionOptions[this.url.hostname],\n this.url && dcpConfig.dcp.connectionOptions[this.url.origin],\n options\n );\n\n /* draw out errors quickly in dev */\n if ((process.env.DCP_NETWORK_CONFIG_BUILD || dcpConfig.build) === 'debug')\n {\n this.options.maxMessagesPerBatch /= 10;\n \n /* short timeouts and debuggers don't get along well */\n if (dcpEnv.platform === 'nodejs' && !(requireNative('module')._cache.niim instanceof requireNative('module').Module))\n {\n this.options.connectTimeout /= 10;\n this.options.lingerTimeout /= 20;\n this.options.identityUnlockTimeout /= 10;\n }\n }\n\n assert(this.options.identityUnlockTimeout > 0);\n assert(this.options.connectTimeout > 0);\n assert(this.options.lingerTimeout > 0);\n assert(typeof this.options.ttl.min === 'number');\n assert(typeof this.options.ttl.max === 'number');\n assert(typeof this.options.ttl.default === 'number');\n \n this.backoffTimeIterator = makeEBOIterator(500, dcpConfig.build === 'debug' ? 5000 : 60000); /** XXXwg make this configurable */\n\n this.secureLocation = determineIfSecureLocation(this);\n this.loggableDest = '<generic>';\n }\n\n /**\n * Specialize an instance of Connection for either initiator or target behaviour. Once specialized,\n * the role cannot be changed. This happens based on methods invoked; connect() or accept() cause\n * the change.\n *\n * This specialization also implies that the connection is fully ready for use, including resolution\n * of the identity promise if necessary. This is perhaps not the best place to ensure that, but it\n * provides a reliable - and already async - waypoint to observe that event.\n */\n async a$assumeRole(myRole)\n {\n assert(myRole === role.initiator || myRole === role.target);\n\n if (this.role === myRole)\n return;\n this.role = myRole;\n \n if (this.role === role.target)\n {\n this.debugLabel = `connection(t:${this.id}):`;\n this.sender.debugLabel = `sender(t#${this.id}):`;\n this.messageLedger.debugLabel = `message-ledger(t#${this.id}):`;\n this.loggableDest = '<target>';\n this.hasNtp = true;\n }\n else\n {\n this.debugLabel = `connection(i:${this.id}):`;\n this.sender.debugLabel = `sender(i#${this.id}):`;\n this.messageLedger.debugLabel = `message-ledger(i#${this.id}):`;\n this.loggableDest = this.url.href;\n this.hasNtp = false;\n }\n\n debugging('connection') && console.debug(this.debugLabel, `connection #${this.id} is ${this.role} for ${this.url}`);\n if (!this.identity)\n {\n assert(this.identityPromise);\n debugging('connection') && console.debug(this.debugLabel, `waiting for identity resolution`);\n await this.identityPromise;\n }\n }\n\n /**\n * API to establish a DCP connection. Implied by send().\n *\n * When invoked by the initiator, this method establishes the connection by connecting\n * to the target url provided to the constructor.\n */\n async connect() // eslint-disable-line require-await\n {\n if (this.role == role.target)\n return;\n \n if (!this.role)\n await this.a$assumeRole(role.initiator);\n \n if (this.state.is('initial'))\n {\n if (!this.connectPromise)\n this.connectPromise = this.a$_connect().then(() => delete this.connectPromise);\n return this.connectPromise;\n }\n\n if (this.state.is('disconnected'))\n {\n if (!this.connectPromise)\n this.connectPromise = this.a$_reconnect().then(() => delete this.connectPromise);\n return this.connectPromise;\n }\n \n if (this.state.is('connecting'))\n {\n assert(this.connectPromise);\n return this.connectPromise;\n }\n\n if (this.state.is('established'))\n return;\n \n if (this.state.in(['closed', 'close-wait', 'closing']))\n throw new DCPError('Connection already closed', 'DCPC-1015');\n\n throw new Error('impossible');\n }\n\n /**\n * Performs a reconnection for connections which are in the disconnected state, and\n * tries to send any in-flight or enqueued messages as soon as that happens.\n */\n async a$_reconnect()\n {\n var transport;\n\n this.state.testAndSet('disconnected', 'connecting');\n try\n {\n do\n {\n transport = await this.a$connectToTargetTransport();\n } while (!transport && (this.transport && !this.transport.ready()) && !this.state.in(['closed', 'close-wait', 'closing']));\n\n debugging('connection') && console.debug(this.debugLabel, `reconnected via transport ${transport.socket.id}`);\n \n this.useNewTransport(transport);\n }\n catch (error)\n {\n if (error.code !== 'DCPC-1016' && error.code !== 'DCPC-1015')\n {\n /* Unreached unless there are bugs. */\n throw error;\n } \n this.close(error, true);\n }\n }\n\n async a$_connect()\n {\n const connectTimeout = setTimeout(() => { this.close(new DCPError('Connection could not be established within thirty seconds', 'DCPC-1006'), true) }, 30000)\n \n if (dcpEnv.platform === 'nodejs')\n connectTimeout.unref()\n \n var presharedPeerAddress, establishResults;\n var targetIdentity = await this.target.identity;\n var transport;\n \n assert(this.role === role.initiator);\n\n this.state.set('initial', 'connecting');\n do\n {\n transport = await this.a$connectToTargetTransport().catch((error) =>\n {\n debugging('connection') && console.debug(`${this.debugLabel} error connecting to target on transport layer:`, error);\n return { ready: () => {return false} };\n });\n } while(!transport.ready());\n this.adopt(transport);\n\n establishResults = await this.sender.establish().catch(error => {\n debugging('connection') && console.debug(this.debugLabel, `Could not establish DCP session over ${this.transport.name}:`, error);\n this.close(error, true);\n throw error;\n });\n const peerAddress = new wallet.Address(establishResults.peerAddress);\n const dcpsid = establishResults.dcpsid;\n debugging('connection') && console.debug(this.debugLabel, 'dcpsid is', dcpsid);\n \n if (!this.options.strict && targetIdentity && determineIfSecureConfig())\n {\n if ( false\n || typeof targetIdentity !== 'object'\n || typeof targetIdentity.address !== 'object'\n || !(targetIdentity.address instanceof wallet.Address))\n targetIdentity = { address: new wallet.Address(targetIdentity) }; /* map strings and Addresses to ks ducks */\n\n presharedPeerAddress = targetIdentity.address;\n debugging('connection') && console.debug(this.debugLabel, 'Using preshared peer address', presharedPeerAddress);\n }\n this.ensureIdentity(peerAddress, presharedPeerAddress);\n\n /* At this point, new session is valid & security checks out - make Connection instance usable */\n this.peerAddress = peerAddress;\n if (this.dcpsid)\n throw new DCPError(`Reached impossible state in connection.js; dcpsid already specified ${this.dcpsid} (${this.url})`, 'DCPC-1012');\n\n this.state.set('connecting', 'established'); /* established => dcpsid has been set */\n clearTimeout(connectTimeout);\n this.emit('session', (this.dcpsid = dcpsid));\n this.emit('connect', this.url);\n this.sender.notifyTransportReady();\n }\n\n /**\n * unreference any objects entrained by this connection so that it does not prevent\n * the node program from exiting naturally.\n */\n unref()\n {\n if (this.connectAbortTimer && this.connectAbortTimer.unref && dcpEnv.platform === 'nodejs')\n this.connectAbortTimer.unref();\n }\n\n /**\n * Method is invoked when the transport disconnects. Transport instance is responsible for its own\n * finalization; Connection instance is responsible for finding a new transport, resuming the\n * connection, and retransmitting any in-flight message.\n *\n * @param {object} transport the transport instance that triggered this handler. In some cases, it\n * is possible that this event is not serviced until after the connection\n * has already acquired a new transport instance, e.g. in a Target where\n * the initiator switched networks. This implies that it is possible for\n * more 'connect' events to be emitted than 'disconnect' events.\n */\n transportDisconnectHandler(transport)\n {\n try\n { \n if (this.state.in(['disconnected', 'closing', 'close-wait', 'closed'])) /* transports may fire this more than once */\n return;\n\n if (transport !== this.transport) /* event no longer relevant */\n return;\n\n if (this.transport)\n {\n transport.close();\n delete this.transport;\n }\n \n if (this.state.is('established'))\n {\n this.state.set('established', 'disconnected');\n this.emit('disconnect', this.url); /* UI hint: \"internet unavailable\" */\n debugging('connection') && console.debug(this.debugLabel, `Transport disconnected from ${this.url}; ${this.sender.inFlight ? 'have' : 'no'} in-flight message`);\n \n if (!this.dcpsid) /* hopefully impossible? */\n {\n debugging('connection') && console.debug(this.debugLabel, 'Not reconnecting - no session');\n return;\n }\n }\n \n if (this.role === role.target)\n {\n /* targets generally can't reconnect due to NAT */\n debugging('connection') && console.debug(this.debugLabel, `Waiting for initiator to reconnect for ${this.dcpsid}`);\n return;\n }\n \n if (this.dcpsid && !this.sender.inFlight && this.options.onDemand)\n {\n debugging('connection') && console.debug(this.debugLabel, `Not reconnecting ${this.dcpsid} until next message`);\n return;\n }\n \n if (this.state.is('connecting') && (!this.dcpsid || !this.peerAddress))\n {\n debugging('connection') && console.debug(this.debugLabel, `Disconnected while connecting, establishing transport and re-sending connect request.`);\n this.a$_reconnect();\n return;\n }\n\n /* At this point, we initiate a (re)connect attempt because either\n * - we haven't connected yet,\n * - we have something to send, or\n * - we are not an on-demand connection\n */\n if (!this.state.is('connecting'))\n this.connect();\n }\n catch(error)\n {\n debugging('connection') && console.debug(error);\n this.close(error, true);\n \n if (error.code !== 'DCPC-1016' && error.code !== 'DCPC-1015')\n {\n /* Unreached unless there are bugs. */\n throw error;\n }\n }\n }\n \n /**\n * Initiators only\n *\n * Connect to a target at the transport level.\n * - Rejects when we give up on all transports.\n * - Resolves with a transport instance when we connect to one.\n *\n * The connection attempt will keep a node program \"alive\" while it is happening.\n * The `autoUnref` connectionOption and unref() methods offer ways to make this not\n * happen.\n */\n async a$connectToTargetTransport()\n {\n const that = this;\n const availableTransports = [].concat(this.options.transports);\n var quitMsg = false; /* not falsey => reject asap, value is error message */\n var quitCode = undefined;\n var boSleepIntr; /* if not falsey, a function that interrupts the backoff sleep */\n var transportConnectIntr; /* if not falsey, a function that interrupts the current connection attempt */\n\n // Already trying to connect to target, don't try multiple times until we've aborted one attempt\n if (this.connectAbortTimer)\n return;\n \n /* This timer has the lifetime of the entire connection attempt. When we time out,\n * we set the quitMsg to get the retry loop to quit, then we interrupt the timer so\n * that we don't have to wait for the current backoff to expire before we notice, and\n * we expire the current attempt to connect right away as well.\n */\n this.connectAbortTimer = setTimeout(() => {\n quitMsg = 'connection timeout';\n if (boSleepIntr) boSleepIntr();\n if (transportConnectIntr) transportConnectIntr();\n }, this.options.connectTimeout * 1000);\n\n if (this.options.autoUnref)\n this.unref();\n\n /* cleanup code called on return/throw */\n function cleanup_ctt()\n {\n clearTimeout(that.connectAbortTimer);\n delete that.connectAbortTimer;\n }\n\n /* Connect to target with a specific transport. */\n /* Resolves with { bool success, obj transport } or rejects with { error } if the transport cannot connect*/\n function a$connectWithTransport(transportName)\n { \n transportConnectIntr = false;\n\n return new Promise((connectWithTransport_resolve, connectWithTransport_reject) => { \n const TransportClass = Transport.require(transportName);\n const transport = new TransportClass(that.target, that.options[transportName]);\n var ret = { transport };\n\n function cleanup_cwt()\n {\n for (let eventName of transport.eventNames())\n for (let listener of transport.listeners(eventName))\n transport.off(eventName, listener);\n }\n \n /* In the case where we have a race condition in the transport implementation, arrange things\n * so that we resolve with whatever fired last if we have a double-fire on the same pass of \n * the event loop.\n */\n transport.on('connect', () => { cleanup_cwt(); ret.success=true; connectWithTransport_resolve(ret) });\n transport.on('error', (error) => { cleanup_cwt(); connectWithTransport_reject(error) });\n transport.on('connect-failed', (error) => {\n cleanup_cwt();\n ret.success = false;\n ret.error = error;\n debugging() && console.debug(`Error connecting to ${that.url};`, error);\n connectWithTransport_resolve(ret);\n });\n \n /* let the connectAbortTimer interrupt this connect attempt */\n transportConnectIntr = () => { transport.close() };\n });\n }\n \n if (availableTransports.length === 0)\n {\n cleanup_ctt();\n return Promise.reject(new DCPError('no transports defined', 'DCPC-1014'));\n }\n \n /* Loop while trying each available transport in turn. Sleep with exponential backoff between runs */\n while (!quitMsg)\n {\n for (let transportName of availableTransports)\n {\n try\n {\n const { success, error, transport } = await a$connectWithTransport(transportName);\n\n /* Have connected to the remote at the transport level - OUT */\n if (success === true)\n {\n transportConnectIntr = false;\n cleanup_ctt();\n \n return transport;\n }\n\n /* Fast-fail: certain - but few - HTTP status codes let us know that this (or any) transport\n * will never work, so don't try those again.\n */\n if (error && error.httpStatus)\n {\n switch(error.httpStatus)\n {\n case 301: case 302: case 303: case 307: case 308:\n debugging('connection') && console.debug(this.debugLabel, `HTTP status ${error.httpStatus}; won't try again with ${transportName}`);\n availableTransports.splice(availableTransports.indexOf(transportName), 1);\n break;\n case 400: case 403: case 404:\n debugging('connection') && console.debug(this.debugLabel, `HTTP status ${error.httpStatus}; won't try again.`);\n quitMsg = error.message;\n quitCode = 'HTTP_' + error.httpStatus || 0;\n break;\n default:\n debugging('connection') && console.debug(this.debugLabel, `HTTP status ${error.httpStatus}; will try again with ${transportName}`);\n break;\n }\n }\n }\n catch (impossibleError)\n {\n /* transport connection attempts should never throw. */\n debugging('connection') && console.debug(this.debugLabel, `Error connecting to ${this.url} with ${transportName}; won't try again:`, impossibleError);\n availableTransports.splice(availableTransports.indexOf(transportName), 1);\n }\n }\n \n if (availableTransports.length === 0)\n {\n quitMsg = 'all transports exhausted';\n break;\n }\n \n /* Go to (interruptible) sleep for a while before trying again */\n const backoffTimeMs = this.backoffTimeIterator.next().value;\n debugging('connection') && console.debug(this.debugLabel, 'trying again in', Number(backoffTimeMs / 1000).toFixed(2), 'seconds');\n const boSleepPromise = a$sleepMs(backoffTimeMs);\n boSleepIntr = boSleepPromise.intr;\n await boSleepPromise;\n boSleepIntr = false;\n }\n\n /* The only way we get here is for us to discover that the connection is unconnectable - eg \n * reject timer has expired or similar.\n */\n cleanup_ctt();\n throw new DCPError(quitMsg, 'DCPC-1016', quitCode);\n }\n\n /**\n * Method which must be invoked whenever a new transport needs to be assigned to the connection. \n * If we have previously adopted a transport, we close it first, which will prevent \n * [by transport definition] any message handlers from firing, even if the old transport instance\n * has buffered traffic.\n *\n * @param {object} transport transport instance\n */\n adopt(transport)\n {\n if (this.transport)\n this.transport.close();\n\n transport.on('message', (m) => this.handleMessage(m));\n transport.on('end', () => this.transportDisconnectHandler(transport));\n transport.on('close', () => this.transportDisconnectHandler(transport));\n \n this.transport = transport;\n }\n \n \n /**\n * Method that gets invoked when there is a new transport available for adoption.\n * This will adjust the state of the connection, adopt the transport then tell\n * the sender to pump the message queue.\n * @param {object} transport transport instance \n */\n useNewTransport(transport)\n {\n if (this.state.in(['closing', 'close-wait', 'closed']))\n {\n debugging('connection') && console.debug(`${this.debugLabel} got a new transport during closing. closing the new transport and not completing transport adoption.`)\n transport.close();\n return;\n }\n \n var preDisconnectState = (!this.dcpsid || !this.peerAddress) ? 'connecting' : 'established';\n if (this.state.isNot(preDisconnectState))\n this.state.set(['connecting', 'disconnected'], preDisconnectState);\n this.adopt(transport);\n this.emit('connect', this.url); // UI hint: \"internet available\" \n this.sender.notifyTransportReady();\n }\n \n /**\n * Method that must be invoked by the target to \"accept\" a new DCP Connection request\n * at the transport layer.\n * @param {object} transport \n */\n async accept(transport)\n {\n assert(!this.role);\n await this.a$assumeRole(role.target);\n this.state.set('initial', 'connecting');\n this.adopt(transport);\n }\n \n /**\n * This method is invoked by the target when it has handled the initial connect request from\n * the initiator, which contains the peerAddress and the first half of the dcpsid (the second half is\n * populated by receiver.handleFirstRequest before being passed here). It transitions the connection \n * into an established state at the protocol level.\n * Note - this is really not the right design for this, but it is invoked from handleFirstRequest\n * in ./receiver.js\n *\n * @param {string} dcpsid dcpsid\n * @param {wallet.Address} peerAddress Address of peer\n */\n establishTarget(dcpsid, peerAddress) {\n assert(this.role === role.target);\n \n this.connectResponseId = Symbol('dummy'); // un-register ConnectResponse\n this.peerAddress = peerAddress;\n if (this.dcpsid)\n throw new DCPError(`Reached impossible state in connection.js; dcpsid already specified ${this.dcpsid}!=${dcpsid} (${this.url})`, 'DCPC-1005');\n this.emit('session', (this.dcpsid = dcpsid));\n debugging() && console.debug(this.debugLabel, 'dcpsid is', dcpsid);\n\n this.loggableDest = this.role === role.initiator ? this.url : peerAddress;\n this.state.set('connecting', 'established'); /* established => dcpsid has been set */\n\n debugging('connection') && console.debug(this.debugLabel, `Established session ${this.dcpsid} with ${this.peerAddress} for ${this.url}`);\n }\n\n /**\n * Check to see if the peer address conflicts with what we have in the global identity cache;\n * it does, throw an exception.\n */\n ensureIdentity (peerAddress, presharedPeerAddress)\n {\n let idc = getGlobalIdentityCache();\n let noConflict = idc.learnIdentity(this.url, peerAddress, presharedPeerAddress);\n\n if (!noConflict)\n throw new DCPError(`**** Security Error: Identity address ${peerAddress} does not match the saved key for ${this.url}`, 'DCPC-EADDRCHANGE');\n }\n \n \n /**\n * This method uses the first request (if we're target) or ack to the first request \n * (if we're initiator) to memoize the address of the peer authorized to send to us. \n * The first message must only be a request (since it comes from sender.specialFirstSend)\n * that has a connect operation, or an acknowledgement of the first request. \n * All future messages' owners will be validated against this authorized sender.\n * @param {Object} message \n */\n setAuthorizedSender(message)\n {\n if (message.body.type !== 'request' && message.body.type !== 'ack')\n throw new DCPError('First protocol message was not a request or ack', 'DCPC-1017');\n \n if (message.body.type === 'request' && message.body.payload.operation !== 'connect')\n throw new DCPError('First protocol message did not contain the correct payload', 'DCPC-1017');\n \n if (message.body.type === 'ack' && this.sender.inFlight.message.payload.operation !== 'connect')\n throw new DCPError('First protocol acknowledgement was not for connect request', 'DCPC-1017');\n \n this.authorizedSender = message.owner;\n }\n \n /**\n * Emits an error event with the relevant error and closes the connection immediately.\n * @param {string} errorMessage \n * @param {string} errorCode \n */\n \n handleValidationError(errorMessage, errorCode)\n {\n var messageError;\n \n debugging('connection') && console.debug(this.debugLabel, 'Message failed validation -', errorMessage);\n this.emit('error', (messageError = new DCPError(`message failed validation: ${errorMessage}`, errorCode)))\n this.close(messageError, true);\n }\n\n /**\n * This method validates the message owner, signature and body before passing it onto\n * either the receiver (for a request, response or batch) or the messageLedger (for an ack).\n * If it's a request, response or batch, this method also provokes the connection to \n * send an acknowledgement (ack) to the peer to let them know we got their message.\n * XXXwg this code needs an audit re error handling: what message error should we be emitting?\n * why do we keep working after we find an error?\n * XXXsc did some auditing. we happy now?\n * @param {string} JSON-encoded unvalidated message object\n */\n async handleMessage (messageJSON) {\n var validation;\n var message;\n\n if (this.state.is('closed')) {\n debugging('connection') && console.debug(this.debugLabel, 'handleMessage was called on a closed connection.');\n return;\n }\n\n try\n {\n message = typeof messageJSON === 'object' ? messageJSON : JSON.parse(messageJSON);\n debugging('wire') && console.debug(this.debugLabel, `handleMessage: ${String(message && message.body && message.body.type).padEnd(10, ' ')} <- ${this.loggableDest}`);\n }\n catch(error)\n {\n console.error('connection::handleMessage received unparseable message from peer:', error);\n this.emit('error', error);\n return;\n }\n \n /**\n * We always ack a duplicate transmission.\n * This must happen before validation since during startup we may lack a\n * nonce or dcpsid (depending on whether initiator or target + race).\n */\n if (this.isDuplicateTransmission(message)) {\n debugging('connection') && console.debug(this.debugLabel, 'duplicate message:', message.body);\n debugging('wire') && console.debug(this.debugLabel, `dup message ack: ${String(message.body.type).padEnd(10, ' ')} -> ${this.loggableDest}`);\n\n this.sendAck(this.lastAckSigned) \n return;\n }\n\n debugging('connection') && console.debug(this.debugLabel, `received message ${message.body.type} ${message.body.id}; nonce=`, message.body.nonce);\n\n validation = this.validateMessageOwner(message)\n if (validation.success !== true)\n {\n this.handleValidationError(validation.errorMessage, 'DCPC-1002');\n return;\n }\n \n validation = this.validateMessageSignature(message);\n if (validation.success !== true)\n {\n this.handleValidationError(validation.errorMessage, 'DCPC-1003');\n return;\n }\n\n validation = this.validateMessageBody(message);\n if (validation.success !== true)\n {\n this.handleValidationError(validation.errorMessage, validation.errorCode || 'DCPC-1004'); /* messages of type 'unhandled-message' may contain more information about the failure */\n return;\n }\n \n if (message.body.type === \"ack\") {\n const ack = new this.Ack(message.body);\n this.messageLedger.handleAck(ack);\n return;\n } else if (message.body.type !== 'unhandled-message') {\n this.lastMessage = message;\n await this.ackMessage(message);\n }\n \n this.receiver.handleMessage(message);\n }\n\n \n /**\n * This method takes either a Request, Response or Batch, creates an ack for it\n * and sends it to the peer. This ack contains the nonce we expect on the next\n * message from peer.\n * @param {Connection.Message} message \n */\n async ackMessage(message) {\n debugging('connection') && console.debug(this.debugLabel, 'acking message of type: ', message.body.type);\n const ack = new this.Ack(message);\n const signedMessage = await ack.sign(this.identity);\n\n debugging('wire') && console.debug(this.debugLabel, `ackMessage: ${String(message.body.type).padEnd(10, ' ')} -> ${this.loggableDest}`);\n\n this.sendAck(signedMessage);\n this.lastAck = ack;\n this.lastAckSigned = signedMessage;\n }\n\n /**\n * Checks if the batch we just received has the same nonce\n * as the most-recently received batch.\n * @param {object} messageJSON\n */\n isDuplicateTransmission(messageJSON) {\n return this.lastMessage && this.lastMessage.body.nonce && this.lastMessage.body.nonce === messageJSON.body.nonce;\n }\n\n /**\n * Validate that the message came from the appropriate sender.\n * @param {Object} message the message to validate\n * @returns {Object} returns an object `ret` with either ret.success = true, \n * or ret.success = false accompanied by another property ret.errorMessage\n */\n validateMessageOwner(message)\n {\n if (!this.authorizedSender)\n {\n /* Capture the initial identity of the remote end during the connect operation */\n this.setAuthorizedSender(message);\n return { success: true }\n }\n else if (message.owner !== this.authorizedSender)\n {\n return { success: false, errorMessage: \"message came from unauthorized sender\" }\n }\n return { success: true }\n }\n \n /**\n * Validate that the signature was generated from this message body\n * @param {Object} message\n * @returns {Object} with properties 'success' and 'errorMessage'. When the message is valid on its \n * face, the success property is true, otherwise it is is false. When it is false,\n * the errorMessage property will be a string explaining why.\n */\n validateMessageSignature(message)\n {\n if (!message.signature) {\n debugging('connection') && console.warn(\"Message does not have signature, aborting connection\");\n return { success: false, errorMessage: \"message is missing signature\" };\n }\n \n const owner = new wallet.Address(message.owner);\n const signatureValid = owner.verifySignature(message.body, message.signature);\n\n if (!signatureValid)\n {\n debugging('connection') && console.warn(\"Message has an invalid signature, aborting connection\");\n return { success: false, errorMessage: \"invalid message signature\" };\n }\n\n return { success: true };\n }\n \n /**\n * This method is used to perform validation on all types of messages.\n * It validates the DCPSID, nonce, and the peerAddress.\n * @param {Object} message\n * @returns {Object} with properties 'success' and 'errorMessage'. When the message is valid on its \n * face, the success property is true, otherwise it is is false. When it is false,\n * the errorMessage property will be a string explaining why.\n *\n */\n validateMessageBody(message)\n {\n try\n {\n if (message.body.type === 'unhandled-message')\n {\n /* This special message type may not have a dcpsid, peerAddress, etc., so it might not\n * validate. It is also not a \"real\" message and only used to report ConnectionManager routing \n * errors, so we just report here, drop it, and close the connection.\n *\n * Note also that this is probably the wrong way to handle this case - restarting daemons - but\n * that is a problem for another day. /wg nov 2021\n */\n debugging('connection') && console.warn(this.debugLabel, \"Target Error - target could not process message.\", JSON.stringify(message.body),\n \"Aborting connection.\");\n return { success: false, errorMessage: `target could not process message (${message.body.payload && message.body.payload.message || 'unknown error'})`, errorCode: message.body.payload && message.body.payload.code}\n }\n if (this.peerAddress && !this.peerAddress.eq(message.owner))\n {\n debugging('connection') && console.warn(this.debugLabel,\n \"Received message's owner address does not match peer address, aborting connection\\n\",\n \"(owner addr)\", message.owner, '\\n',\n \"(peer addr)\", this.peerAddress);\n return { success: false, errorMessage: \"received message owner does not match peer address\" };\n }\n\n if (this.state.in(['established', 'closing', 'close-wait']) && message.body.type !== 'unhandled-message')\n {\n const body = message.body;\n\n assert(this.peerAddress); /* should be set in connect */\n /**\n * Security note:\n * We don't require the dcpsid to match on an ack because the connect response\n * ack doesn't have a dcpsid until after it is processed. Also ack's are protected\n * by ack tokens and signatures, so this doesn't leave a hole, just an inconsistency.\n */\n if (body.type !== 'ack' && body.dcpsid !== this.dcpsid)\n {\n debugging('connection') && console.warn(this.debugLabel,\n \"Received message's DCPSID does not match, aborting connection\\n\",\n \"Message owner:\", message.owner, '\\n',\n \"(ours)\", this.dcpsid, (Date.now() - this.connectTime)/1000, \"seconds after connecting - state:\", this.state._, \"\\n\", \n \"(theirs)\", body.dcpsid);\n if(body.dcpsid.substring(0, body.dcpsid.length/2) !== this.dcpsid.substring(0, this.dcpsid.length/2)){\n debugging('connection') && console.warn(\" Left half of both DCPSID is different\");\n }\n if(body.dcpsid.substring(body.dcpsid.length/2 + 1, body.dcpsid.length) !== this.dcpsid.substring(this.dcpsid.length/2 + 1, body.dcpsid.length)){\n debugging('connection') && console.warn(\" Right half of both DCPSID is different\");\n }\n return { success: false, errorMessage: \"DCPSID do not match\" };\n }\n /* can get close in middle of connecting, which will have no nonce.*/\n if (body.type !== 'ack' && this.lastAck.nonce !== body.nonce && (body.payload && body.payload.operation !== 'close'))\n {\n /* When Target sends back ConnectionLessErrorResponse, it uses the nonce of the message that caused an error. */\n if (this.sender.inFlight && this.sender.inFlight.message.nonce === body.nonce)\n {\n debugging('connection') && console.debug(`${this.debugLabel} Received messages nonce matches nonce of our current inFlight message.`,\n \"There was a problem sending this message. Aborting connection. Reason:\\n\", body.payload);\n return { success: false, errorMessage: \"current inflight message returned an error\" }\n }\n debugging('connection') && console.warn(this.debugLabel,\"Received message's nonce does not match expected nonce, aborting connection\\n\");\n debugging('connection') && console.debug(this.debugLabel, this.lastAck.nonce, body.nonce);\n return { success: false, errorMessage: \"received message's nonce does not match expected nonce\" };\n }\n }\n\n return { success: true };\n }\n catch(error)\n {\n console.error('message validator failure:', error);\n return { success: false, errorMessage: 'validator exception ' + error.message };\n }\n\n return { success: false, errorMessage: 'impossible code reached' }; // eslint-disable-line no-unreachable\n }\n\n /**\n * Targets Only.\n * The receiver creates a special connect response and the connection\n * needs to know about it to get ready for the ack. See `isWaitingForAck`.\n * @param {Message} message message we are sending out and waiting to\n * ack'd, probably a batch containing the response.\n */\n registerConnectResponse(message) {\n this.connectResponseId = message.id;\n }\n\n /**\n * Targets only\n * During the connection process a target sends a connect\n * response to an initiator and the initiator will ack it. Since transports\n * are not tightly coupled, we have no authoritative way to route the ack back\n * to the right connection. So a connection briefly registers the ack it\n * is looking for in this case. It will formally validate the ack after routing.\n * @param {string} messageId id of the message this ack is acknowledging.\n */\n isWaitingForAck(messageId) {\n return messageId === this.connectResponseId;\n }\n\n /**\n * Put connection into close-wait state so that a call to `close`\n * in this state will *not* trigger sending a `close` message to the peer.\n * Then call close.\n *\n * @note: This function is called when the remote end of the transport sends\n * a close command, from receiver::handleOperation. This impllies that\n * that we must be in established or later state.\n */\n closeWait (errorCode = null)\n {\n var preCloseState, reason;\n \n debugging('connection') && console.debug(this.debugLabel, `responding to close. state=closeWait dcpsid=${this.dcpsid}`);\n\n if (this.state.is('closed'))\n {\n debugging('connection') && console.debug(this.debugLabel, `remote asked us to close a closed connection; dcpsid=${this.dcpsid}`);\n return;\n }\n\n // continue with close in either case\n reason = `Received close from peer with Error Code ${errorCode}`;\n if (this.role === role.target)\n reason += ` (${this.url})`;\n else\n reason += ` (${this.debugLabel}${this.peerAddress.address})`;\n\n reason = new DCPError(reason, errorCode || 'DCPC-1011');\n\n // If we're already closing, wait for it to complete then resolve\n // WARNING: any place we transition to closing or close-wait, we MUST guarantedd\n // that 'end' will be emitted, or this code will hang forever!\n if (this.state.in(['close-wait', 'closing'])) {\n return new Promise((resolve) => {\n this.once('end', resolve) /* eventually fired by doClose elsewhere */\n });\n }\n\n /* XXXwg - this should only be established->close-wait. Why more? */\n this.state.set(['disconnected', 'connecting', 'established'], 'close-wait');\n \n /* Set preCloseState to close-wait so doClose doesn't send a close message back */\n preCloseState = this.state.valueOf();\n return this.doClose(preCloseState, reason, true);\n }\n\n /**\n * This method will begin closing the protocol connection. It transitions\n * the protocol into the correct state, and then begins the work of closing.\n * \n * @param {string|Error} [reason] Either an Error or a message to use in the Error that will reject pending sends.\n * @param {boolean} [immediate] When true, the connection will not deliver any pending messages and instead\n * immediately send the peer a 'close' request. \n *\n * @return a Promise which resolves when the connection has been confirmed closed and the end event has been fired.\n */\n close (reason='requested', immediate=false)\n {\n if (this.state.is('initial'))\n {\n this.state.set('initial', 'closed');\n this.emit('close'); /* Don't emit dcpsid */\n }\n if (this.state.is('closed')) return Promise.resolve();\n\n const preCloseState = this.state.valueOf();\n debugging('connection') && \n console.debug(this.debugLabel, \n `close; dcpsid=${this.dcpsid} state=${preCloseState} immediate=${immediate} reason:`, reason);\n\n // If we're already closing, wait for it to complete then resolve\n if (this.state.in(['close-wait', 'closing'])) {\n return new Promise((resolve) => {\n this.once('end', resolve)\n });\n }\n\n this.state.set(['connecting', 'established', 'disconnected'], 'closing');\n\n // Perform actual work of closing\n return this.doClose(preCloseState, reason, immediate);\n }\n\n /**\n * Sends close message to peer after sending all pending messages.\n * Note that close messages are sent without the expectation of a response.\n * @param {DCPError|string} reason reason for closing\n */\n async sendCloseGracefully(reason) \n {\n debugging('connection') && console.debug(`${this.debugLabel} gracefully sending close message to peer with reason ${reason}`)\n let errorCode = reason instanceof DCPError ? reason.code : 'DCPC-1011';\n \n /* This only resolves when close is next message in queue */\n const closeMessage = await this.prepare('close', { errorCode: errorCode });\n this.sendPreparedMessage(closeMessage);\n this.messageLedger.fulfillMessagePromise(closeMessage.message.id, {});\n }\n \n /**\n * Sends close message to peer immediately. Pending messages will not be sent.\n * Note that close messages are sent without expectation of response.\n * @param {DCPError|string} reason reason for closing\n */\n async sendCloseImmediately(reason)\n {\n debugging('connection') && console.debug(`${this.debugLabel} immediately sending close message to peer with reason ${reason}`);\n let errorCode = reason instanceof DCPError ? reason.code : 'DCPC-1011';\n \n /* Last param being `true` means that prepareMessage will return unsigned message. Does not queue message. */\n const closeMessage = await this.prepare('close', { errorCode: errorCode }, true);\n \n if (this.sender.inFlight)\n closeMessage.nonce = this.sender.inFlight.message.nonce;\n else\n closeMessage.nonce = this.sender.nonce;\n \n let signedCloseMessage = await closeMessage.sign();\n \n /* Overwrite the in-flight message because we don't care to deliver pending messages */\n this.sender.inFlight = { message: closeMessage, signedMessage: signedCloseMessage };\n this.sender.sendInFlightMessage();\n }\n \n /**\n * This method performs the core close functionality. It appropriately sends the close message\n * to the peer, fails any pending transmissions, shuts down our sender and underlying transport\n * and puts us into the 'closed' state, indicating this connection object is now useless.\n * When called from closeWait, it does not send a close message.\n * @param {string} preCloseState the state that the connection was in at the start of the\n * invocation of close() or closeWait()\n *\n * @note: this function is not reentrant due to closeGracefully\n */\n async doClose(preCloseState, reason, immediate) {\n const dcpsid = this.dcpsid;\n var rejectErr;\n\n try\n {\n // Emit the close event the moment we know we are going to close, \n // so we can catch the close event and reopen the connection\n //\n // This implies that no API functions which call doClose may await between\n // their invocation and their call to doClose!\n this.emit('close', dcpsid /* should be undefined in initial state */);\n\n assert(this.state.in(['closing', 'close-wait']));\n if (preCloseState === 'established' && this.transport) {\n try {\n if (immediate) {\n await this.sendCloseImmediately(reason);\n } else {\n await this.sendCloseGracefully(reason);\n }\n } catch(e) {\n debugging() && console.warn(`Warning: could not send close message to peer. connectionid=${this.id}, dcpsid=,${this.dcpsid}, url=${this.url ? this.url.href : 'unknown url'} - (${e.message})`);\n }\n }\n\n // can delete these now that we've sent the close message\n this.dcpsid = null;\n this.peerAddress = null;\n\n if (reason instanceof DCPError)\n rejectErr = reason;\n else\n {\n let errorMessage = reason instanceof Error ? reason : `Connection to ${this.loggableDest} closed (${reason})`;\n rejectErr = new DCPError(errorMessage, 'DCPC-1013');\n }\n \n // Reject any pending transmissions in the message ledger\n this.messageLedger.failAllTransmissions(rejectErr);\n \n if (this.transport)\n {\n try { this.sender.shutdown(); }\n catch(e) { debugging() && console.warn(this.debugLabel, `Warning: could not shutdown sender; dcpsid=,${dcpsid}`, e); }\n \n try { this.transport.close(); delete this.transport; }\n catch(e) { debugging() && console.warn(this.debugLabel, `Warning: could not close transport; dcpsid=,${dcpsid}`, e); }\n }\n } catch(error) {\n debugging() && console.warn(this.debugLabel, `could not close connection; dcpsid=${dcpsid}, url=${this.url ? this.url.href : 'unknown url'}:`, error);\n }\n finally\n {\n this.state.set(['closing', 'close-wait'], 'closed');\n this.emit('end'); /* end event resolves promises on other threads for closeWait and close (ugh) */\n }\n }\n/**\n * Prepares a non-batchable message that can be sent directly over the wire. Returns when\n * the message has been signed and is ready to be sent. The connection will not be able to send \n * any messages until the prepared message here is either sent or discarded. If 'canBatch = true',\n * will return the unsigned message instead. In this case, enqueuing is handled by\n * `async Connection.send()`, allowing the message to be put in a batch before being signed.\n * @param {...any} messageData Data to build message with. Format is:\n * `operation {string}, \n * data {Object} (optional),\n * identity {wallet.Keystore} (optional),\n * canBatch {boolean} (optional)`\n * @returns {Promise<Object>} a promise which resolves to { message, signedMessage }\n */\n\n async prepare(...messageData)\n {\n if (this.state.isNot('established'))\n {\n await this.connect().catch((e) => {\n if (e.code !== 'DCPC-1015') /* If we're closed already, then swallow the error */\n { \n this.close(e, true);\n throw e;\n }\n });\n }\n \n \n let signedMessage, message = messageData[0];\n let canBatch = false;\n \n if (typeof messageData[messageData.length - 1] === 'boolean')\n canBatch = messageData.pop();\n \n if (!message.id)\n {\n message = this.messageFactory.buildMessage(...messageData); \n }\n \n debugging('connection') && console.debug(`${this.debugLabel} Created message ${message.id}.`);\n \n message.ackToken = this.sender.makeAckToken();\n message.batchable = canBatch;\n \n if (canBatch)\n return Promise.resolve(message);\n \n debugging('connection') && console.debug(`${this.debugLabel} Preparing message ${message.id} for sending...`); \n const messageWithNonce = await new Promise((resolve) =>\n {\n // This event is fired in the sender by serviceQueue() when the message is at the top of the queue\n // and has a nonce it can sign with. At this point, we may return the prepared message.\n this.once(`${message.id} ready`, (message) => resolve(message))\n \n this.sender.queue.push(message)\n this.sender.requestQueueService()\n })\n \n signedMessage = await messageWithNonce.sign();\n \n debugging('connection') && console.debug(`${this.debugLabel} Finished preparing message. ${message.id} is ready to be sent.`);\n \n return { message: messageWithNonce, signedMessage: signedMessage };\n }\n\n /**\n * Sends a message to the connected peer. If the connection has not yet been established,\n * this routine will first invoke this.connect(). If the first argument has a 'signedMessage'\n * property, the message is assumed to be prepared and is sent immediately. If not, and the first\n * argument does not have an 'id' property, it will be sent to `async prepare()`, and then put\n * in the message queue.\n * \n * @param {...any} args 3 forms:\n * [operation]\n * [operation, data]\n * [operation, data, identity]\n * @returns {Promise<Response>} a promise which resolves to a response.\n */\n async send(...args)\n {\n if (!this.state.is('established'))\n await this.connect().catch((e) =>\n {\n if (e.code !== 'DCPC-1015') /* If we're closed already, then swallow the error */\n { \n this.close(e, true);\n throw e;\n }\n });\n\n let message = args[0];\n // ie. already prepared\n if (message.signedMessage)\n return this.sendPreparedMessage(message);\n \n // ie. message not hyrdated or is a response, which needs ack token\n if (!message.id || message.type === 'response')\n message = await this.prepare(...args, true);\n\n if (this.state.in(['closing', 'close-wait', 'closed']))\n throw new DCPError(`Connection (${this.id}) is ${this.state}; cannot send. (${this.loggableDest})`, 'DCPC-1001');\n\n return this.sender.enqueue(message);\n }\n \n /**\n * Set the sender's flight deck with the given message and send it.\n * Can only be passed a prepared message, which is a promise that only\n * resolves to a message when it is signed with the nonce, so it must\n * be the next message to be sent (or discarded).\n * @param {Object} messageObject\n * @returns {Promise<Response>} \n */\n sendPreparedMessage(messageObject)\n {\n if (!messageObject.signedMessage) return;\n \n const { message, signedMessage } = messageObject;\n assert(!this.sender.inFlight);\n this.sender.inFlight = { message: message, signedMessage: signedMessage };\n const messageSentPromise = this.messageLedger.addMessage(message);\n this.sender.sendInFlightMessage();\n \n return messageSentPromise;\n }\n \n /**\n * Send a signed ack directly over the wire. If we get a SocketIO.Send: Not Connected error, \n * wait until we're connected and then resend the ack.\n * @param {String} ack \n */\n sendAck(ack)\n {\n try\n {\n this.transport.send(ack)\n }\n catch(error)\n {\n // Transport was lost\n if (error.code === 'DCPC-1105')\n this.once('connect', () => this.sendAck(ack));\n else\n console.error(`${this.debugLabel} Error acking message to ${this.loggableDest}: ${error}`);\n }\n }\n \n /**\n * Discard a prepared message by removing it from the queue.\n * Returns nonce to sender and provokes queue service.\n * @param {Object} messageObject { message, signedMessage } message to discard \n */\n discardMessage(messageObject)\n {\n let { message } = messageObject;\n this.sender.nonce = message.nonce;\n delete message.nonce;\n message.type = 'unhandled-message';\n this.sender.requestQueueService();\n }\n\n /**\n * This routine returns the current time for the purposes of\n * populating the Request message payload.validity.time property.\n * \n * @returns {Number} the integer number of seconds which have elapsed since the epoch\n */\n currentTime() {\n let msSinceEpoch;\n if (this.hasNtp) {\n msSinceEpoch = Date.now();\n } else {\n const msSinceLastReceipt = performance.now() - this.receiver.lastResponseTiming.receivedMs;\n msSinceEpoch = this.receiver.lastResponseTiming.time * 1000 + msSinceLastReceipt;\n }\n return Math.floor(msSinceEpoch / 1000);\n }\n\n /**\n * This method sends a keepalive to the peer, and resolves when the response has been received.\n */\n keepalive() {\n return this.send('keepalive');\n }\n}\n\n/** \n * Determine if we got the scheduler config from a secure source, eg https or local disk.\n * We assume tha all https transactions have PKI-CA verified.\n *\n * @note protocol::getSchedulerConfigLocation() is populated via node-libs/config.js or dcp-client/index.js\n *\n * @returns true or falsey\n */\nfunction determineIfSecureConfig()\n{\n var schedulerConfigLocation = (__webpack_require__(/*! dcp/protocol-v4 */ \"./src/protocol-v4/index.js\").getSchedulerConfigLocation)();\n var schedulerConfigSecure;\n\n if (schedulerConfigLocation && (schedulerConfigLocation.protocol === 'https:' || schedulerConfigLocation.protocol === 'file:'))\n {\n debugging('strict-mode') && console.debug(`scheduler config location ${schedulerConfigLocation} is secure`); /* from casual eavesdropping */\n schedulerConfigSecure = true;\n }\n\n if (isDebugBuild)\n {\n debugging('strict-mode') && console.debug('scheduler config location is always secure for debug builds');\n schedulerConfigSecure = 'debug';\n }\n\n debugging('strict-mode') && console.debug(`Config Location ${schedulerConfigLocation} is ${!schedulerConfigSecure ? 'not secure' : 'secure-' + schedulerConfigSecure}`);\n return schedulerConfigSecure;\n}\n\n/**\n * Determine if a URL is secure by examinining the protocol, connection, and information about the \n * process; in particular, we try to determine if the dcp config was securely provided, because if \n * it wasn't, then we can't have a secure location, since the origin could be compromised.\n * \n * \"Secure\" in this case means \"secure against casual eavesdropping\", and this information should only\n * be used to refuse to send secrets over the transport or similar.\n *\n * @returns true or falsey\n */\nfunction determineIfSecureLocation(conn)\n{\n var isSecureConfig = determineIfSecureConfig();\n var secureLocation;\n\n if (!isSecureConfig) /* can't have a secure location without a secure configuration */\n return null;\n \n if (isDebugBuild || conn.url.protocol === 'https:' || conn.url.protocol === 'tcps:')\n secureLocation = true;\n else if (conn.role === role.initiator && conn.target.hasOwnProperty('friendLocation') && conn.url === conn.target.friendLocation)\n secureLocation = true;\n else if (conn.options.allowUnencryptedSecrets)\n secureLocation = 'override';\n else\n secureLocation = false;\n\n debugging('strict-mode') && console.debug(`Location ${conn.url} is ${!secureLocation ? 'not secure' : 'secure-' + secureLocation}`);\n \n return secureLocation;\n}\nexports.Connection = Connection;\n\n\n//# sourceURL=webpack://dcp/./src/protocol-v4/connection/connection.js?");
4535
4535
 
4536
4536
  /***/ }),
4537
4537
 
@@ -4716,7 +4716,7 @@ eval("/**\n * @file listener.js\n * Generic API for transpor
4716
4716
  /***/ ((__unused_webpack_module, exports, __webpack_require__) => {
4717
4717
 
4718
4718
  "use strict";
4719
- eval("/* provided dependency */ var process = __webpack_require__(/*! ./node_modules/process/browser.js */ \"./node_modules/process/browser.js\");\n/**\n * @file transport/socketio.js\n * @author Ryan Rossiter, ryan@kingsds.network\n * @author Wes Garland, wes@kingsds.network \n * @date January 2020, March 2022\n *\n * This module implements the SocketIO Transport that is\n * used by the protocol to connect and communicate with peers using\n * SocketIO connections.\n *\n * Transport can operate in either ack mode or non-ack-mode, depending on the value of this.ackMode.\n * Ack mode sends an extra socketio-layer packet back after each message is received, and theoretically\n * this can be use tod meter or measure bandwidth or potentially second-guess socketio's ability to\n * interleave its heartbeat messages OOB from the main traffic. This mode could potentially be enabled\n * on-demand as well, although it appears that it is not necessary to get the desired behaviour at this\n * point, so the feature is off-by-default (although reasonably well-tested).\n */\n\n\nconst { Transport } = __webpack_require__(/*! . */ \"./src/protocol-v4/transport/index.js\");\nconst { EventEmitter } = __webpack_require__(/*! dcp/common/dcp-events */ \"./src/common/dcp-events/index.js\");\nconst { leafMerge } = __webpack_require__(/*! dcp/utils/obj-merge */ \"./src/utils/obj-merge.js\");\nconst { DcpURL } = __webpack_require__(/*! dcp/common/dcp-url */ \"./src/common/dcp-url.js\");\nconst { DCPError } = __webpack_require__(/*! dcp/common/dcp-error */ \"./src/common/dcp-error.js\");\nconst { setImmediateN } = __webpack_require__(/*! dcp/common/dcp-timers */ \"./src/common/dcp-timers.js\");\nconst { setImmediate } = __webpack_require__(/*! dcp/common/dcp-timers */ \"./src/common/dcp-timers.js\");\nconst { requireNative } = __webpack_require__(/*! dcp/dcp-client/webpack-native-bridge */ \"./src/dcp-client/webpack-native-bridge.js\");\nconst { assert } = __webpack_require__(/*! dcp/common/dcp-assert */ \"./src/common/dcp-assert.js\");\n\nconst debugging = (__webpack_require__(/*! dcp/debugging */ \"./src/debugging.js\").scope)('dcp');\nconst dcpEnv = __webpack_require__(/*! dcp/common/dcp-env */ \"./src/common/dcp-env.js\");\nconst bearer = __webpack_require__(/*! ./http-bearer */ \"./src/protocol-v4/transport/http-bearer.js\");\nconst semver = __webpack_require__(/*! semver */ \"./node_modules/semver/semver.js\");\n\nvar listenerSeq = 0;\nvar initiatorSeq = 0;\n\nif (debugging() && !process.env.DEBUG && dcpConfig.build === 'debug')\n{\n if (process.env.DEBUG)\n process.env.DEBUG += ',engine';\n else\n process.env.DEBUG = 'engine';\n\n if (debugging('socketio'))\n {\n /* DCP_DEBUG=dcp:socketio,dcp:verbose to see everything */\n process.env.DEBUG += ',socket.io-client:socket,socket.io:client,socket.io:server';\n if (debugging('verbose') && !debugging('all'))\n process.env.DEBUG += ',socket.io*'\n }\n}\n\nclass SocketIOTransport extends Transport\n{\n /**\n * @constructor\n * @param {socket} newSocket Target: an instance of socket.io Socket that represents\n * a new connection.\n * Initiator: unused\n */\n /**\n * @constructor create new instance of a socketio-flavoured Transport.\n *\n * @param {object} config dcpConfig fragment\n * @param {Url} url Initiator: has .location and optionally .friendLocation\n * Target: has .listen and .location\n *\n * @param {object} options The `socketio` property of a protocol-v4 connectionOptions \n * object, which was built from a combination of defaults and \n * various dcpConfig components.\n *\n * @returns instance suitable for target or initiator, depending on mode, but does not manage connections.\n */\n constructor(config, options={}, newSocket)\n {\n super('Protocol SocketIO Transport');\n const that = this;\n \n this.name = 'socketio';\n this.url = config.location;\n this.msgSequence = 0;\n this.isClosed = false; /* true => connection finalization begun */\n this.isFinal = false; /* true => connection finalization completed */\n this.hasConnected = false; /* true => one 'connect' event has been processed */\n this.rxFragments = {}; /* messages we are receiving in fragments */\n this.rxPending = []; /* strings which are messages that are complete and ready to emit, or objects that in this.rxFragments */\n this.txPending = []; /* messages we are receiving in fragments */\n this.txNumInFlight = 0; /* number of txPending sent but not acknowleged */\n this.txMaxInFlight = 2; /* less than 2 causes bad perf when mode.ack */\n this.debugInfo = { remoteLabel: '<unknown>' };\n this.peerVersion = false;\n this.mode = {\n ack: false,\n concat: false,\n };\n\n assert(DcpURL.isURL(this.url));\n assert(typeof options === 'object');\n\n this.options = this.buildOptions(options);\n /* Calculate the optimal maxHttpBufferSize based on maxInflight and maxFragmentSize.\n * Currently unsure of exactly what happens when socketio serializes our serialized\n * JSON. I suspect we may double-encode UTF-8 > 0x80. The largest UTF-8 character is\n * 5 bytes...so we ensure the buffer is overhead + 5length bytes.\n */\n const fragmentOverhead = 1000 + 64 * 1024; /* derived from concatenation algorithm */\n const maxFragmentSize = fragmentOverhead + options.maxFragmentSize;\n const minBufferSize = maxFragmentSize * 5 * this.txMaxInFlight;\n\n if (this.options.maxHttpBufferSize && (minBufferSize > this.options.maxHttpBufferSize))\n throw new DCPError(`http buffer size too small (${minBufferSize} > ${options.maxHttpBufferSize})`, 'DCP-1106');\n this.options.maxHttpBufferSize = Math.max(this.options.maxHttpBufferSize || 0, minBufferSize);\n\n if (!newSocket) /* Initiator */\n {\n this.debugLabel = `socketio(i:${++initiatorSeq}):`;\n this.rxReady = true; /* relax, said the night man */\n this.debugInfo.remoteLabel = config.location.href;\n if (this.options.disableModeAnnouncement !== false)\n this.sendModeList();\n bearer.a$isFriendlyUrl(config.friendLocation).then((useFriendLocation) => {\n const connectUrl = useFriendLocation ? config.friendLocation : config.location;\n\n debugging('socketio') && console.log(this.debugLabel, 'connecting to', connectUrl.href);\n this.socket = (__webpack_require__(/*! socket.io-client */ \"./node_modules/socket.io-client/build/cjs/index.js\").connect)(connectUrl.origin, this.options);\n this.socket.on('connect_error', (error) => !this.isClosed && this.handleConnectErrorEvent(error));\n this.socket.on('connect', () => !this.isClosed && this.handleConnectEvent());\n\n useSocket();\n }).catch((error) => {\n debugging() && console.error(this.debugLabel, error);\n this.emit('error', error);\n });\n }\n else /* Target - receives socketio instance from SocketIOListener.handleConnectionEvent */\n {\n if (dcpEnv.platform !== 'nodejs')\n throw new Error('socketio: target mode only supported in nodejs');\n\n this.debugLabel = 'socketio(t:<new>):';\n\n this.socket = newSocket;\n this.debugInfo.remoteLabel = this.socket.handshake.address.replace(/^::ffff:/,'');\n debugging('socketio') && console.debug(this.debugLabel, `new socket from ${this.debugInfo.remoteLabel}`\n + ` on ${this.socket.handshake && this.socket.handshake.url}, ${this.socket.id}`);\n useSocket();\n }\n\n function useSocket()\n {\n that.socket.compress(dcpConfig.build !== 'debug'); /* try to keep traffic sniffable in debug */\n that.socket.on('message', (msg) => !that.isFinal && that.handleMessageEvent(msg));\n that.socket.on('disconnect', (reason) => !that.isClosed && that.handleDisconnectEvent(reason));\n }\n }\n\n /**\n * API - determine if a transport is usable or not.\n * @returns true if the transport is not closed.\n */\n ready()\n {\n return !this.isClosed;\n }\n \n /**\n * Handle the socket.io disconnect event, which is fired upon disconnection.\n * For some reasons (explicit disconnection), the socket.io code will not try to reconnect, but\n * in all other cases, the socket.io client will normally wait for a small random delay and then \n * try to reconnect, but in our case, we simply tear the transport down and let the Connection\n * class handle reconnects, since it might prefer a different transport at this point anyhow.\n *\n * One tricky case here is ping timeout, since the ping timeout includes the time to transmit the\n * packet before it was sent.\n *\n * @param {string} reason Possible reasons for disconnection.\n */\n handleDisconnectEvent(reason)\n {\n debugging('socketio') && console.debug(this.debugLabel, `disconnected from ${this.debugInfo.remoteLabel}; reason=${reason}`);\n\n if (this.isClosed !== true)\n setImmediate(() => this.#close(reason));\n\n switch(reason)\n {\n case 'io client disconnect':\n return; /* we called socket.disconnect() from another \"thread\" */\n case 'io server disconnect':\n case 'ping timeout':\n case 'transport close':\n case 'transport error':\n this.emit('end', reason);\n /* fallthrough */\n default:\n }\n }\n\n /**\n * Handle a socketio message from the peer.\n *\n * Most of the time, a socketio message contains a DCP message, however we support other message\n * types as well, with a mechanism inspired by 3GPP TS 23.040 (GSM 03.40) message concatenation.\n */\n handleMessageEvent(msg)\n {\n if (typeof msg !== 'string')\n {\n debugging('socketio') && console.debug(this.debugLabel, `received ${typeof msg} message from peer`, this.debugInfo.remoteLabel);\n return;\n }\n\n try\n {\n switch (msg[0])\n {\n case '{': /* all DCP Messages are JSON */\n this.processMessage(msg);\n break;\n case 'E':\n this.processExtendedMessage(msg);\n break;\n case 'A':\n this.processAcknowledgement(msg);\n break;\n default:\n throw new DCPError(this.debugLabel, `Unrecognized message type indicator from ${this.debugInfo.remoteLabel} (${msg.charCodeAt(0)})`, 'DCPC-1102');\n }\n }\n catch(error)\n {\n debugging('socketio') && console.debug(this.debugLabel, 'handleMessageEvent: transport error, closing\\n ', error);\n this.emit('error', error);\n setImmediate(() => this.#close(error.message));\n }\n }\n\n /**\n * Process an ordinary message. This is just a hunk of JSON that gets passed up to the connection.\n * @param {string} msg\n */\n processMessage(msg)\n {\n this.rxPending.push(msg);\n this.emitReadyMessages();\n if (this.mode.ack)\n this.socket.send('A0000-ACK'); /* Ack w/o header = normal message */\n }\n\n /**\n * Remote has acknowledged the receipt of a message fragment. When in ack mode, this\n * triggers us to send more messages from the txPending queue. We always ack event when \n * not in ack-mode. See processExtendedMessage() comment for header description.\n *\n * Future: this is where maxInFlight might be tuned, based on how fast we get here\n *\n * @param {string} _msg\n */\n processAcknowledgement(_msg)\n {\n if (this.txNumInFlight > 0)\n this.txNumInFlight--;\n else\n {\n if (this.options.strict || !this.mode.ack)\n throw new DCPError(this.debugLabel, `Synchronization error: received unexpected acknowledgement message from ${this.debugInfo.remoteLabel}`, 'DCPC-1109');\n else\n this.mode.ack = true;\n }\n \n this.drainTxPending_soon();\n }\n\n /**\n * Extended messages have the following format:\n * Octet(s) Description\n * 0 E (69) or A (65)\n * 1..4 Header size (N) in hexadecimal\n * 5 Header Type\n * C (67) => concatenation\n * 6..6 + N: Header\n *\n * Upon receipt, extended messages are acknowledged by retransmitting their header, but\n * with the first character changed from E to A.\n *\n * @param {string} msg\n */\n processExtendedMessage(msg)\n {\n const headerSize = parseInt(msg.slice(1,5), 16);\n const headerType = msg[5];\n const header = msg.slice(6, 6 + headerSize);\n\n switch(headerType)\n {\n case 'M':\n this.processExtendedMessage_mode(header, msg.slice(6 + headerSize));\n break;\n case 'C':\n this.processExtendedMessage_concatenated(header, msg.slice(6 + headerSize));\n break;\n default:\n if (this.options.strict)\n throw new DCPError(`Unrecognized extended message header type indicator (${msg.charCodeAt(5)})`, 'DCPC-1101');\n }\n\n if (this.mode.ack)\n this.socket.send('A' + msg.slice(1, 6 + headerSize));\n }\n\n /**\n * We have received a fragment of a concatenation extended message. Memoize the fragment, and\n * if this is the last fragment, transform the fragments list into a pending message string and\n * then emits all ready messages (pending message strings).\n *\n * This code is capable of handling messages whose fragments arrive out of order. Complete messages \n * are emitted in the order that their first parts are received. Socketio is supposed to be total\n * order-aware but I wanted a safety belt for intermingled messages without occupying the entire\n * socketio network buffer on the first pass of the event loop. Because the first message fragment\n * is emitted to socketio on the same event loop pass as their Connection::send calls, I believe\n * that are no cases where messages will be emitted out of order at the other end, even when \n * intermingled on the wire, and the Nth message is much longer than any (N+k)th messages.\n *\n * @param {object} header the extended message header\n * @param {string} payload the payload of the extended message, i.e. a message fragment\n */\n processExtendedMessage_concatenated(header, payload)\n {\n try\n {\n header = JSON.parse(header);\n }\n catch(e)\n {\n throw new DCPError(`invalid extended message header '${header}'`, 'DCPC-1103');\n }\n\n if (!this.options.strict && !this.mode.concat)\n this.mode.concat = true;\n \n if (!(header.total <= this.options.maxFragmentCount))\n throw new DCPError('excessive message fragmentation', 'DCPC-1107'); /* make it more difficult for attacker to force OOM us */\n\n if (!(header.seq < header.total))\n throw new DCPError(`corrupt header for part ${header.seq + 1} of ${header.total} of message ${header.msgId}`, 'DCPC-1108');\n \n /* First fragment received, initial data structures and memoize the message's arrival order via pending list */\n if (!this.rxFragments[header.msgId])\n {\n const fragments = [];\n this.rxFragments[header.msgId] = fragments;\n this.rxPending.push(fragments);\n }\n\n this.rxFragments[header.msgId][header.seq] = payload;\n debugging('socketio') && console.debug(this.debugLabel, 'received part', header.seq + 1, 'of', header.total, 'for message', header.msgId);\n\n if (header.total !== this.rxFragments[header.msgId].filter(el => el !== undefined).length)\n return;\n\n debugging('socketio') && console.debug(this.debugLabel, 'received all parts of message', header.msgId);\n \n const idx = this.rxPending.indexOf(this.rxFragments[header.msgId])\n this.rxPending[idx] = this.rxFragments[header.msgId].join('');\n delete this.rxFragments[header.msgId];\n this.emitReadyMessages();\n }\n\n processExtendedMessage_mode(_header, payload)\n {\n var modeList;\n \n try\n {\n modeList = JSON.parse(payload);\n }\n catch(e)\n {\n throw new DCPError(`invalid mode message payload '${payload}'`, 'DCPC-1105');\n }\n\n for (let mode of modeList)\n {\n switch(mode)\n {\n case 'ack':\n this.mode.ack = true;\n break;\n case 'concat':\n this.mode.concat = true;\n break;\n default:\n debugging() && console.debug(this.debugLabel, 'unrecognized mode:', mode);\n break;\n }\n }\n\n this.sendModeList();\n }\n\n /* Internal method to tell peer what we can receive */\n sendModeList()\n {\n var rawMessage;\n var modeList = [];\n\n if (this.sentModeList)\n return;\n\n this.sentModeList = true;\n this.options.enableModeAck == true && modeList.push('ack');\n this.options.enableModeConcat !== false && modeList.push('concat');\n rawMessage = 'E0000M' + JSON.stringify(modeList);\n\n debugging('socketio') && console.debug(`${this.debugLabel} sending mode list to ${this.debugInfo.remoteLabel}`)\n if (this.socket)\n this.socket.send(rawMessage);\n else\n this.txPending.unshift({ message: rawMessage, msgSequence: ++this.msgSequence});\n }\n\n /**\n * Emit message events so that the connection can process them. The pending property is an\n * array which is used to order messages, so that they are emitted in the order they were\n * sent. There are edge cases in the concatenation code where a sender could theoretically\n * interleave multiple messages, and have a second, shorter, message fully received before \n * the first is fully transmitted.\n *\n * The pending property stores only strings or objects; objects are effectively positional\n * memoes which are turned into strings when they are ready to be emitted to the connection\n * for processing.\n */\n emitReadyMessages()\n {\n while (this.rxReady && this.rxPending.length && typeof this.rxPending[0] === 'string')\n this.emit('message', this.rxPending.shift());\n }\n \n /** \n * Close the current instance of Socket.\n *\n * This function effectively finalizes the socket so that it can't be used any more and\n * (hopefully) does not entrain garbage. A 'close' event is emitted at the end of socket\n * finalization and should be hooked by things that might entrain this transport instance to free\n * up memory, like maybe a list of active transports, etc.\n *\n * We add few extra hops on/off the event loop here to try and clean up any messages halfway in/out\n * the door that we might want to process. That happens between the setting of the isClosed and \n * isFinal flags.\n *\n * @param {string} reason\n */\n #close(reason)\n {\n const that = this;\n debugging('socketio') && console.debug(this.debugLabel, 'closing connection' + (reason ? ` (${reason})` : ''));\n\n function finalize(socket)\n {\n if (that.isFinal)\n return;\n\n if (socket)\n socket.removeAllListeners();\n that.isFinal = true;\n\n /* Free up memory that might be huge in case something entrains us */\n that.rxPending.length = 0;\n that.txPending.length = 0;\n for (let msgId in that.rxFragments)\n that.rxFragments[msgId].length = 0;\n }\n\n function closeSocket()\n {\n const socket = that.socket;\n \n try\n {\n if (socket)\n socket.disconnect();\n that.isClosed = true;\n that.emit('close', reason);\n delete that.socket;\n }\n finally\n {\n finalize(socket);\n }\n }\n\n if (!this.isClosed)\n {\n /* If we called close, drain any pending messages and then disconnect */\n if (reason === 'api-call')\n {\n this.drainTxPending();\n setImmediate(()=>closeSocket());\n }\n else\n closeSocket();\n }\n }\n\n /* API - close the transport, free up memory. */\n close()\n {\n assert(arguments.length === 0)\n this.#close('api-call');\n }\n\n /**\n * Handle the socketio connect_error event, which is fired when:\n * - the low-level connection cannot be established\n * - the connection is denied by the server in a middleware function\n *\n * In the first case, socket.io will automatically try to reconnect, after a delay,\n * except that will cancel that reconnection attempt here by killing the transport\n * instance so that the Connection class can try failing over to an alternate transport.\n *\n * @param {Error} error optional instance of error describing the underlying reason for the\n * connect failure. If the underlying reason is expressible by an\n * HTTP status code, this code will be placed as a Number in the\n * description property.\n */\n handleConnectErrorEvent(error)\n {\n debugging('socketio') && console.debug(this.debugLabel, `unable to connect to ${this.url}`, error);\n if (error.type === 'TransportError' && typeof error.description === 'number')\n error.httpStatus = Number(error.description);\n\n this.emit('connect-failed', error);\n this.#close(error.message);\n }\n\n /** \n * Handle the socketio connect event, which is fired by the Socket instance upon initiator\n * connection and reconnection to a target.\n */\n handleConnectEvent()\n {\n if (this.hasConnected === true)\n {\n /* this transport is not supposed to reuse connections, and we call .close() on disconnect \n * to prevent this. However, the API does not guarantee that will be race-free.\n */\n debugging('socketio') && console.debug(this.debugLabel, `*** reconnected to ${this.debugInfo.remoteLabel} (ignoring)`);\n return;\n }\n\n /* initial connection */\n this.hasConnected = true;\n debugging('socketio') && console.debug(this.debugLabel, 'connected to', this.debugInfo.remoteLabel);\n this.emit('connect');\n\n this.drainTxPending();\n }\n\n /**\n * API: Send a message to the transport instance on the other side of this connection.\n *\n * @param {string} message the message to send\n */\n send(message)\n {\n const msgSequence = ++this.msgSequence;\n\n debugging('socketio') && console.debug(this.debugLabel, `sending message ${msgSequence} to`, this.debugInfo.remoteLabel);\n debugging('socketio') && debugging('verbose') && !debugging('all') && console.debug(this.debugLabel, message);\n\n if (!this.socket)\n throw new DCPError('SocketIOTransport.send: Not connected', 'DCPC-1105');\n \n // iff message length is small enough for a fragment OR client is 5.0.0\n // send the message intact; big messages to modern clients get fragmented\n if ( false\n || !(message.length > this.options.maxFragmentSize)\n || !(this.peerVersion && semver.satisfies(this.peerVersion, '>= 5.1.0'))\n || !this.mode.concat)\n this.txPending.push({ message, msgSequence });\n else\n { /* This is a large message. Use message concatenation to send in fragments. A failure of any\n * fragment is a failure of the entire message and will be handled at the connection layer.\n */\n let total = Math.ceil(message.length / this.options.maxFragmentSize);\n \n for (let seq=0; seq < total; seq++)\n {\n let start = seq * this.options.maxFragmentSize;\n let fragment = message.slice(start, start + this.options.maxFragmentSize);\n let header = JSON.stringify({ msgId: msgSequence, seq, total });\n let extMessage = 'E' + header.length.toString(16).padStart(4, '0') + 'C' + header + fragment;\n\n this.txPending.push({ message: extMessage, msgSequence, seq, total });\n\n /* This should be impossible, but it also effectively kills the connection. */\n if (header.length > 0xFFFF)\n throw new DCPError('extended message header too long', 'DCPC-1104');\n }\n }\n\n this.drainTxPending_soon();\n }\n\n drainTxPending_soon()\n {\n if (this.drainingSoon)\n return;\n this.drainingSoon = true;\n\n setImmediate(() => this.drainTxPending());\n }\n \n /**\n * Drain messages from the txPending queue out to socketio's network buffer. We throw away traffic\n * when it can't be sent; this is no different from packets already on the wire when the other end\n * has changed IP number, etc.\n *\n * In ack mode, this drain is controlled by acknowledgement messages so that socketio can have the\n * opportunity to insert ping/pong messages in the data stream while sending a message large enough\n * to need to be sent via concatenation. [ NOTE - it appears to send ping/pong OOB in version 4.4, \n * so currently ack mode is off by default / wg Mar 2022 ]\n *\n * The reason we put non-concat messages through here is to avoid special casing a bunch of nasty\n * things for the sake of a little tiny bit of performance. Sending message<-ACK and queueing through\n * the drain code is necessary if we wind up interleaving multiple message sends in some interesting\n * edge cases; notably, fast-teardown, but a future Connection class that ran without nonces (or multiple\n * nonces) would need this, too.\n *\n * In non-ack mode, the drain is mostly uncontrolled, but we throw each message on a separate pass of\n * the event loop, to try and keep huge message blasts one connection from overwhelming a busy server,\n * as well allowing socketio the chance to send its heartbeat messages and make it less likely that it\n * will hit its http buffer size limit, because that causes an immediate disconnect.\n *\n * Non-ack-mode is compatible with previous dcp5 versions and probably a little faster.\n */\n drainTxPending()\n {\n const that = this;\n\n if (this.txPending.length)\n debugging('socketio') && console.debug(this.debugLabel, `drain tx pending queue, ${this.txPending.length} ready`\n + (this.mode.ack ? `; ${this.txMaxInFlight - this.txNumInFlight} slots available` : ''));\n delete this.drainingSoon;\n\n if (this.isClosed)\n return;\n \n for (let i=0;\n !this.isClosed && this.txPending.length && (this.txNumInFlight < this.txMaxInFlight || !this.mode.ack);\n i++)\n {\n const pendingElement = this.txPending.shift();\n\n if (i === 0)\n writeToSocket(pendingElement);\n else\n setImmediateN(() => writeToSocket(pendingElement), i);\n\n if (this.mode.ack)\n this.txNumInFlight++;\n\n if (this.txPending.length === 0) /* just sent the last message */\n this.emit('drain');\n }\n\n if (this.txPending.length)\n this.drainTxPending_soon();\n \n function writeToSocket(pendingElement)\n {\n if (that.isClosed)\n return;\n\n const { message, msgSequence, seq, total } = pendingElement;\n that.socket.send(message);\n\n if (typeof seq !== 'undefined')\n debugging('socketio') && console.debug(that.debugLabel, `sent fragment ${seq + 1}/${total} of message ${msgSequence} (${message.length} bytes)`);\n else\n debugging('socketio') && console.debug(that.debugLabel, `sent message ${msgSequence} (${message.length} bytes)`);\n }\n }\n}\nSocketIOTransport.prototype.buildOptions = buildOptions;\n\n/** \n * Build the socketio options object, based on the passed-in options, dcp debug state, and internal\n * object state. Method shared between SocketIOTransport and SocketIOListener.\n */\nfunction buildOptions(options)\n{\n options = leafMerge(\n /* Baked-in Defaults */\n ({\n autoUnref: dcpEnv.platform === 'nodejs' ? true : undefined, /* socket.io + webpack 5 { node:global } work-around */\n perMessageDeflate: dcpConfig.build !== 'debug',\n maxFragmentCount: 1000, /* used to limit memory on receiver */\n maxFragmentSize: 1e6, /* bytes */\n maxHttpBufferSize: false, /* bytes; false = auto */\n pingTimeout: 30 * 1e3, /* s */\n pingInterval: 90 * 1e3, /* s */\n transports: ['polling', 'websocket'],\n upgrade: true,\n rememberUpgrade: true,\n autoConnect: true,\n cors: {\n origin: '*',\n methods: ['GET', 'POST']\n },\n }),\n options,\n /* Absolutely mandatory */\n ({\n path: this.url.pathname // eslint-disable-line no-invalid-this\n }));\n \n /* draw out errors quickly in dev */\n if ((process.env.DCP_NETWORK_CONFIG_BUILD || dcpConfig.build) === 'debug')\n {\n options.maxFragmentSize /= 10;\n options.maxFragmentCount *= 10;\n\n /* short timeouts and debuggers don't get along well */\n if (dcpEnv.platform === 'nodejs' && !(requireNative('module')._cache.niim instanceof requireNative('module').Module))\n {\n options.pingTimeout /= 5;\n options.pingInterval /= 5;\n }\n }\n\n return options;\n}\n \n/**\n * @constructor\n * API - Factory which creates a new socketio listener, or throws. Emits 'listening' if the server \n * parameter is not provided and we create it, once it is ready for connections.\n * \n * @bug It is possible for a connection event to sneak in before the 'listening' event is fired, due to \n * the two-step event trigger. The right fix is to proxy the event, but dcp-events::event-emitter.proxy\n * itself currently has the same bug. For now, we just have callbacks in API. /wg apr 2022\n * \n * @param {object} config dcpConfig fragment - has .listen and .location\n * @param {object} target the instance of Target to bind to\n * @param {object} options [optional] A DCP config options variable (like dcpConfig.dcp.listenOptions)\n */\nfunction SocketIOListener(config, target, options)\n{\n var matchRegex;\n \n this.url = config.location;\n this.options = this.buildOptions(options);\n this.connSequence = 0;\n this.seq = ++listenerSeq;\n this.debugLabel = `socketio(l:${this.seq}):`;\n this.isShutdown = false;\n\n if (dcpEnv.platform !== 'nodejs')\n throw new Error('target mode only supported in nodejs');\n\n target.httpServer.on('listening', () => this.emit('listening', target.httpServer));\n\n matchRegex = '^' + this.url.pathname;\n if (matchRegex.slice(-1) !== '/')\n matchRegex += '\\\\/';\n else\n matchRegex = matchRegex.slice(0, -1) + \"\\\\/\"\n\n matchRegex += '\\\\?EIO=[0-9]';\n debugging('socketio') && console.debug(this.debugLabel, `expecting requests to match regex ${matchRegex}`);\n\n target.all(new RegExp(matchRegex), (request) => {\n /* Socket.io does this intrinsically because it plays with the server's events directly,\n * but we \"handle\" it explicitly here so that we don't trip the 404 handler. \n */\n debugging('socketio') && console.debug(this.debugLabel, 'Handling', request.url);\n }, true);\n\n /* While we normally want to inherit options from parent to child socket via this.options,\n * we don't want this to happen for maxHttpBufferSize due to the way it is used and calculated\n * in the transport constructor. A buffer that is too small (eg false) will cause us to not\n * recognized socket.io SIDs.\n */\n const socketOptions = Object.assign({}, this.options, { maxHttpBufferSize: 1e6 });\n\n this.socketServer = requireNative('socket.io')(target.httpServer, socketOptions);\n this.socketServer.on('connection', (socket) => !this.isShutdown && this.handleConnectionEvent(config, socket));\n debugging() && console.debug(this.debugLabel, 'Socketio listener initialized for path', this.options.path);\n}\nSocketIOListener.prototype = new EventEmitter('socketio(l)');\nSocketIOListener.prototype.buildOptions = buildOptions;\n\nSocketIOListener.prototype.close = function SocketIOListener$$close()\n{\n if (!this.isShutdown)\n this.shutdown();\n this.socketServer.close(() => this.emit('close'));\n}\n\n/** Stop accepting new connections */\nSocketIOListener.prototype.shutdown = function SocketIOListener$$shutdown()\n{\n this.isShutdown = true;\n}\n\n/** Remove any event loop references used by this transport instance */\nSocketIOListener.prototype.unref = function SocketIOListener$$unref()\n{\n void false;\n}\n\n/**\n * Handle the socketio connection event, which is fired upon a connection from client.\n * Used by Target->SocketIOListener to create transport instances.\n *\n * It is expected that code hooking the 'connection' event will hook the 'message' in\n * the same pass of the event loop as it is invoked; otherwise, messages may be missed.\n *\n * @param {object} socket a new connection emitted by the socket.io server\n */\nSocketIOListener.prototype.handleConnectionEvent = function SocketIOListener$$handleConnectionEvent(config, socket)\n{\n var transport = new SocketIOTransport(config, this.options, socket);\n\n this.url = config.location || config.listen;\n this.connSequence++;\n transport.debugLabel = `socketio(t:${this.seq}.${this.connSequence}):`\n\n /* Ensure we don't emit any message events until the application or Connection class\n * has had the chance to set up message handlers via the connection event.\n */\n transport.rxReady = false;\n this.on('connection', () => setImmediate(() => {\n transport.rxReady = true;\n transport.emitReadyMessages();\n }));\n\n this.emit('connection', transport);\n}\n\n/* Define API */\nexports.TransportClass = SocketIOTransport;\nexports.Listener = SocketIOListener;\n\n\n//# sourceURL=webpack://dcp/./src/protocol-v4/transport/socketio.js?");
4719
+ eval("/* provided dependency */ var process = __webpack_require__(/*! ./node_modules/process/browser.js */ \"./node_modules/process/browser.js\");\n/**\n * @file transport/socketio.js\n * @author Ryan Rossiter, ryan@kingsds.network\n * @author Wes Garland, wes@kingsds.network \n * @date January 2020, March 2022\n *\n * This module implements the SocketIO Transport that is\n * used by the protocol to connect and communicate with peers using\n * SocketIO connections.\n *\n * Transport can operate in either ack mode or non-ack-mode, depending on the value of this.ackMode.\n * Ack mode sends an extra socketio-layer packet back after each message is received, and theoretically\n * this can be use tod meter or measure bandwidth or potentially second-guess socketio's ability to\n * interleave its heartbeat messages OOB from the main traffic. This mode could potentially be enabled\n * on-demand as well, although it appears that it is not necessary to get the desired behaviour at this\n * point, so the feature is off-by-default (although reasonably well-tested).\n */\n\n\nconst { Transport } = __webpack_require__(/*! . */ \"./src/protocol-v4/transport/index.js\");\nconst { EventEmitter } = __webpack_require__(/*! dcp/common/dcp-events */ \"./src/common/dcp-events/index.js\");\nconst { leafMerge } = __webpack_require__(/*! dcp/utils/obj-merge */ \"./src/utils/obj-merge.js\");\nconst { DcpURL } = __webpack_require__(/*! dcp/common/dcp-url */ \"./src/common/dcp-url.js\");\nconst { DCPError } = __webpack_require__(/*! dcp/common/dcp-error */ \"./src/common/dcp-error.js\");\nconst { setImmediateN } = __webpack_require__(/*! dcp/common/dcp-timers */ \"./src/common/dcp-timers.js\");\nconst { setImmediate } = __webpack_require__(/*! dcp/common/dcp-timers */ \"./src/common/dcp-timers.js\");\nconst { requireNative } = __webpack_require__(/*! dcp/dcp-client/webpack-native-bridge */ \"./src/dcp-client/webpack-native-bridge.js\");\nconst { assert } = __webpack_require__(/*! dcp/common/dcp-assert */ \"./src/common/dcp-assert.js\");\n\nconst debugging = (__webpack_require__(/*! dcp/debugging */ \"./src/debugging.js\").scope)('dcp');\nconst dcpEnv = __webpack_require__(/*! dcp/common/dcp-env */ \"./src/common/dcp-env.js\");\nconst bearer = __webpack_require__(/*! ./http-bearer */ \"./src/protocol-v4/transport/http-bearer.js\");\nconst semver = __webpack_require__(/*! semver */ \"./node_modules/semver/semver.js\");\n\nvar listenerSeq = 0;\nvar initiatorSeq = 0;\n\nif (debugging() && !process.env.DEBUG && dcpConfig.build === 'debug')\n{\n if (process.env.DEBUG)\n process.env.DEBUG += ',engine';\n else\n process.env.DEBUG = 'engine';\n\n if (debugging('socketio'))\n {\n /* DCP_DEBUG=dcp:socketio,dcp:verbose to see everything */\n process.env.DEBUG += ',socket.io-client:socket,socket.io:client,socket.io:server';\n if (debugging('verbose') && !debugging('all'))\n process.env.DEBUG += ',socket.io*,engine.io*'\n }\n}\n\nclass SocketIOTransport extends Transport\n{\n /**\n * @constructor\n * @param {socket} newSocket Target: an instance of socket.io Socket that represents\n * a new connection.\n * Initiator: unused\n */\n /**\n * @constructor create new instance of a socketio-flavoured Transport.\n *\n * @param {object} config dcpConfig fragment\n * @param {Url} url Initiator: has .location and optionally .friendLocation\n * Target: has .listen and .location\n *\n * @param {object} options The `socketio` property of a protocol-v4 connectionOptions \n * object, which was built from a combination of defaults and \n * various dcpConfig components.\n *\n * @returns instance suitable for target or initiator, depending on mode, but does not manage connections.\n */\n constructor(config, options={}, newSocket)\n {\n super('Protocol SocketIO Transport');\n const that = this;\n \n this.name = 'socketio';\n this.url = config.location;\n this.msgSequence = 0;\n this.isClosed = false; /* true => connection finalization begun */\n this.isFinal = false; /* true => connection finalization completed */\n this.hasConnected = false; /* true => one 'connect' event has been processed */\n this.initTimeout = false; /* timeout when connecting to allow target to reply with their mode message + buffer size, crash if they don't reply */\n this.modeSet = false; /* true => received response mode message */\n this.bufferSizeSet = false; /* true => received target's buffer size */\n this.rxFragments = {}; /* messages we are receiving in fragments */\n this.rxPending = []; /* strings which are messages that are complete and ready to emit, or objects that in this.rxFragments */\n this.txPending = []; /* messages we are receiving in fragments */\n this.txNumInFlight = 0; /* number of txPending sent but not acknowleged */\n this.txMaxInFlight = 2; /* less than 2 causes bad perf when mode.ack */\n this.debugInfo = { remoteLabel: '<unknown>' };\n this.mode = {\n ack: false,\n concat: false,\n };\n\n assert(DcpURL.isURL(this.url));\n assert(typeof options === 'object');\n\n this.options = this.buildOptions(options);\n\n if (!newSocket) /* Initiator */\n {\n this.debugLabel = `socketio(i:${++initiatorSeq}):`;\n this.rxReady = true; /* relax, said the night man */\n this.debugInfo.remoteLabel = config.location.href;\n if (this.options.disableModeAnnouncement !== false)\n this.sendModeList();\n this.sendMaxMessageInfo();\n\n bearer.a$isFriendlyUrl(config.friendLocation).then((useFriendLocation) => {\n const connectUrl = useFriendLocation ? config.friendLocation : config.location;\n\n debugging('socketio') && console.log(this.debugLabel, 'connecting to', connectUrl.href);\n this.socket = (__webpack_require__(/*! socket.io-client */ \"./node_modules/socket.io-client/build/cjs/index.js\").connect)(connectUrl.origin, this.options);\n this.socket.on('connect_error', (error) => !this.isClosed && this.handleConnectErrorEvent(error));\n this.socket.on('connect', () => !this.isClosed && this.handleConnectEvent());\n\n useSocket();\n }).catch((error) => {\n debugging() && console.error(this.debugLabel, error);\n this.emit('error', error);\n });\n }\n else /* Target - receives socketio instance from SocketIOListener.handleConnectionEvent */\n {\n if (dcpEnv.platform !== 'nodejs')\n throw new Error('socketio: target mode only supported in nodejs');\n\n this.debugLabel = 'socketio(t:<new>):';\n\n this.socket = newSocket;\n this.debugInfo.remoteLabel = this.socket.handshake.address.replace(/^::ffff:/,'');\n debugging('socketio') && console.debug(this.debugLabel, `new socket from ${this.debugInfo.remoteLabel}`\n + ` on ${this.socket.handshake && this.socket.handshake.url}, ${this.socket.id}`);\n\n useSocket();\n }\n\n function useSocket()\n {\n that.socket.compress(dcpConfig.build !== 'debug'); /* try to keep traffic sniffable in debug */\n that.socket.on('message', (msg) => !that.isFinal && that.handleMessageEvent(msg));\n that.socket.on('disconnect', (reason) => !that.isClosed && that.handleDisconnectEvent(reason));\n }\n }\n\n /**\n * API - determine if a transport is usable or not.\n * @returns true if the transport is not closed.\n */\n ready()\n {\n return !this.isClosed;\n }\n \n /**\n * Handle the socket.io disconnect event, which is fired upon disconnection.\n * For some reasons (explicit disconnection), the socket.io code will not try to reconnect, but\n * in all other cases, the socket.io client will normally wait for a small random delay and then \n * try to reconnect, but in our case, we simply tear the transport down and let the Connection\n * class handle reconnects, since it might prefer a different transport at this point anyhow.\n *\n * One tricky case here is ping timeout, since the ping timeout includes the time to transmit the\n * packet before it was sent.\n *\n * @param {string} reason Possible reasons for disconnection.\n */\n handleDisconnectEvent(reason)\n {\n debugging('socketio') && console.debug(this.debugLabel, `disconnected from ${this.debugInfo.remoteLabel}; reason=${reason}`);\n\n if (this.isClosed !== true)\n setImmediate(() => this.#close(reason));\n\n switch(reason)\n {\n case 'io client disconnect':\n return; /* we called socket.disconnect() from another \"thread\" */\n case 'io server disconnect':\n case 'ping timeout':\n case 'transport close':\n case 'transport error':\n this.emit('end', reason);\n /* fallthrough */\n default:\n }\n }\n\n /**\n * Handle a socketio message from the peer.\n *\n * Most of the time, a socketio message contains a DCP message, however we support other message\n * types as well, with a mechanism inspired by 3GPP TS 23.040 (GSM 03.40) message concatenation.\n */\n handleMessageEvent(msg)\n {\n if (typeof msg !== 'string')\n {\n debugging('socketio') && console.debug(this.debugLabel, `received ${typeof msg} message from peer`, this.debugInfo.remoteLabel);\n return;\n }\n\n try\n {\n switch (msg[0])\n {\n case '{': /* all DCP Messages are JSON */\n this.processMessage(msg);\n break;\n case 'E':\n this.processExtendedMessage(msg);\n break;\n case 'A':\n this.processAcknowledgement(msg);\n break;\n case 'T':\n debugging('socketio') && console.debug('Received debug message:', msg);\n break;\n default:\n /* adhoc messages not supported in this transport */\n throw new DCPError(this.debugLabel, `Unrecognized message type indicator from ${this.debugInfo.remoteLabel} (${msg.charCodeAt(0)})`, 'DCPC-1102');\n }\n }\n catch(error)\n {\n debugging('socketio') && console.debug(this.debugLabel, 'handleMessageEvent: transport error, closing\\n ', error);\n this.emit('error', error);\n setImmediate(() => this.#close(error.message));\n }\n }\n\n /**\n * Process an ordinary message. This is just a hunk of JSON that gets passed up to the connection.\n * @param {string} msg\n */\n processMessage(msg)\n {\n this.rxPending.push(msg);\n this.emitReadyMessages();\n if (this.mode.ack)\n this.socket.send('A0000-ACK'); /* Ack w/o header = normal message */\n }\n\n /**\n * Remote has acknowledged the receipt of a message fragment. When in ack mode, this\n * triggers us to send more messages from the txPending queue. We always ack event when \n * not in ack-mode. See processExtendedMessage() comment for header description.\n *\n * Future: this is where maxInFlight might be tuned, based on how fast we get here\n *\n * @param {string} _msg\n */\n processAcknowledgement(_msg)\n {\n if (this.txNumInFlight > 0)\n this.txNumInFlight--;\n else\n {\n if (this.options.strict || !this.mode.ack)\n throw new DCPError(this.debugLabel, `Synchronization error: received unexpected acknowledgement message from ${this.debugInfo.remoteLabel}`, 'DCPC-1109');\n else\n this.mode.ack = true;\n }\n \n this.drainTxPending_soon();\n }\n\n /**\n * Extended messages have the following format:\n * Octet(s) Description\n * 0 E (69) or A (65)\n * 1..4 Header size (N) in hexadecimal\n * 5 Header Type\n * C (67) => concatenation\n * 6..6 + N: Header\n *\n * Upon receipt, extended messages are acknowledged by retransmitting their header, but\n * with the first character changed from E to A.\n *\n * @param {string} msg\n */\n processExtendedMessage(msg)\n {\n const headerSize = parseInt(msg.slice(1,5), 16);\n const headerType = msg[5];\n const header = msg.slice(6, 6 + headerSize);\n\n switch(headerType)\n {\n case 'M':\n this.processExtendedMessage_mode(header, msg.slice(6 + headerSize));\n break;\n case 'B':\n this.processExtendedMessage_maxBufferSize(msg.slice(6 + headerSize));\n break;\n case 'C':\n this.processExtendedMessage_concatenated(header, msg.slice(6 + headerSize));\n break;\n default:\n if (this.options.strict)\n throw new DCPError(`Unrecognized extended message header type indicator (${msg.charCodeAt(5)})`, 'DCPC-1101');\n }\n\n if (this.mode.ack)\n this.socket.send('A' + msg.slice(1, 6 + headerSize));\n }\n\n /**\n * We have received a fragment of a concatenation extended message. Memoize the fragment, and\n * if this is the last fragment, transform the fragments list into a pending message string and\n * then emits all ready messages (pending message strings).\n *\n * This code is capable of handling messages whose fragments arrive out of order. Complete messages \n * are emitted in the order that their first parts are received. Socketio is supposed to be total\n * order-aware but I wanted a safety belt for intermingled messages without occupying the entire\n * socketio network buffer on the first pass of the event loop. Because the first message fragment\n * is emitted to socketio on the same event loop pass as their Connection::send calls, I believe\n * that are no cases where messages will be emitted out of order at the other end, even when \n * intermingled on the wire, and the Nth message is much longer than any (N+k)th messages.\n *\n * @param {object} header the extended message header\n * @param {string} payload the payload of the extended message, i.e. a message fragment\n */\n processExtendedMessage_concatenated(header, payload)\n {\n try\n {\n header = JSON.parse(header);\n }\n catch(e)\n {\n throw new DCPError(`invalid extended message header '${header}'`, 'DCPC-1103');\n }\n\n if (!this.options.strict && !this.mode.concat)\n this.mode.concat = true;\n \n if (!(header.total <= this.options.maxFragmentCount))\n throw new DCPError('excessive message fragmentation', 'DCPC-1107'); /* make it more difficult for attacker to force OOM us */\n\n if (!(header.seq < header.total))\n throw new DCPError(`corrupt header for part ${header.seq + 1} of ${header.total} of message ${header.msgId}`, 'DCPC-1108');\n \n /* First fragment received, initial data structures and memoize the message's arrival order via pending list */\n if (!this.rxFragments[header.msgId])\n {\n const fragments = [];\n this.rxFragments[header.msgId] = fragments;\n this.rxPending.push(fragments);\n }\n\n this.rxFragments[header.msgId][header.seq] = payload;\n debugging('socketio') && console.debug(this.debugLabel, 'received part', header.seq + 1, 'of', header.total, 'for message', header.msgId);\n\n if (header.total !== this.rxFragments[header.msgId].filter(el => el !== undefined).length)\n return;\n\n debugging('socketio') && console.debug(this.debugLabel, 'received all parts of message', header.msgId);\n \n const idx = this.rxPending.indexOf(this.rxFragments[header.msgId])\n this.rxPending[idx] = this.rxFragments[header.msgId].join('');\n delete this.rxFragments[header.msgId];\n this.emitReadyMessages();\n }\n\n processExtendedMessage_mode(_header, payload)\n {\n var modeList;\n \n try\n {\n modeList = JSON.parse(payload);\n }\n catch(e)\n {\n throw new DCPError(`invalid mode message payload '${payload}'`, 'DCPC-1105');\n }\n\n for (let mode of modeList)\n {\n debugging('socketio') && console.debug(this.debugLabel, ` - remote supports ${mode} mode`); \n switch(mode)\n {\n case 'ack':\n this.mode.ack = true;\n break;\n case 'concat':\n this.mode.concat = true;\n break;\n default:\n debugging('socketio') && console.debug(this.debugLabel, ' - remote requested unrecognized mode:', mode);\n break;\n }\n }\n\n // true if we are connecting a new transport, expecting the initial mode reply, and have received the target's buffer size\n if (this.initTimeout && this.bufferSizeSet)\n {\n clearTimeout(this.initTimeout);\n delete this.initTimeout;\n this.emit('connect');\n }\n this.modeSet = true;\n this.sendModeList();\n }\n \n processExtendedMessage_maxBufferSize(payload)\n {\n var receivedOptions = JSON.parse(payload); \n \n this.options.txMaxFragmentSize = receivedOptions.maxFragmentSize;\n this.options.txMaxFragmentCount = receivedOptions.maxFragmentCount;\n \n if (receivedOptions.txMaxInFlight < this.txMaxInFlight)\n this.txMaxInFlight = receivedOptions.txMaxInFlight;\n if (this.options.maxFragmentSize < 0)\n throw new DCPError(`maxFragmentSize was set to < 1, (${this.options.txMaxFragmentSize}). Server requested bad fragment size, cannot connect.`, 'DCPC-1111');\n if (this.txMaxInFlight < 1)\n throw new DCPError(`txMaxInFlight was set to < 1, (${this.txMaxInFlight}). Cannot send messages with fewer than 1 in flight.`, 'DCPC-1111');\n \n // true if we are connecting a new transport, expecting the initial buffer reply, and have received the target's mode designation\n if (this.initTimeout && this.modeSet)\n {\n clearTimeout(this.initTimeout);\n delete this.initTimeout;\n this.emit('connect');\n }\n this.bufferSizeSet = true;\n\n this.sendMaxMessageInfo(); \n }\n\n /* Internal method to tell peer what we can receive */\n sendModeList()\n {\n var rawMessage;\n var modeList = [];\n\n if (this.sentModeList)\n return;\n\n this.sentModeList = true;\n this.options.enableModeAck == true && modeList.push('ack');\n this.options.enableModeConcat !== false && modeList.push('concat');\n rawMessage = 'E0000M' + JSON.stringify(modeList);\n\n debugging('socketio') && console.debug(`${this.debugLabel} sending mode list to ${this.debugInfo.remoteLabel}`)\n if (this.socket)\n this.socket.send(rawMessage);\n else\n this.txPending.unshift({ message: rawMessage, msgSequence: ++this.msgSequence});\n }\n \n /* Internal method to exchange desired fragment size/max inflight messages/max fragments per message */\n sendMaxMessageInfo()\n {\n if (this.sentMessageInfo)\n return;\n\n /* Calculate the optimal maxFragmentSize based on maxHttpBufferSize.\n * Currently unsure of exactly what happens when socketio serializes our serialized\n * JSON. I suspect we may double-encode UTF-8 > 0x80. The largest UTF-8 character is\n * 5 bytes...so we ensure the maxFragmentSize is (maxHttpBufferSize / 5) - (fragmentOverhead * 5)\n * to allow for worst-case scenario\n */\n const fragmentOverhead = 1000 + 64 * 1024; /* derived from concatenation algorithm */\n\n let desiredMaxFragmentSize;\n if (this.options.maxHttpBufferSize)\n desiredMaxFragmentSize = (this.options.maxHttpBufferSize / 5) - (fragmentOverhead * 5);\n else\n desiredMaxFragmentSize = this.options.maxFragmentSize;\n const options = {\n maxFragmentSize: desiredMaxFragmentSize,\n maxFragments: this.options.maxFragmentCount,\n txMaxInFlight: this.txMaxInFlight,\n };\n\n const message = 'E0000B' + JSON.stringify(options);\n\n debugging('socketio') && console.debug(`${this.debugLabel} sending max http buffer size to ${this.debugInfo.remoteLabel}`);\n if (this.socket)\n this.socket.send(message);\n else\n this.txPending.push({ message: message, msgSequence: ++this.msgSequence });\n \n this.sentMessageInfo = true;\n }\n\n /**\n * Emit message events so that the connection can process them. The pending property is an\n * array which is used to order messages, so that they are emitted in the order they were\n * sent. There are edge cases in the concatenation code where a sender could theoretically\n * interleave multiple messages, and have a second, shorter, message fully received before \n * the first is fully transmitted.\n *\n * The pending property stores only strings or objects; objects are effectively positional\n * memoes which are turned into strings when they are ready to be emitted to the connection\n * for processing.\n */\n emitReadyMessages()\n {\n while (this.rxReady && this.rxPending.length && typeof this.rxPending[0] === 'string')\n this.emit('message', this.rxPending.shift());\n }\n \n /** \n * Close the current instance of Socket.\n *\n * This function effectively finalizes the socket so that it can't be used any more and\n * (hopefully) does not entrain garbage. A 'close' event is emitted at the end of socket\n * finalization and should be hooked by things that might entrain this transport instance to free\n * up memory, like maybe a list of active transports, etc.\n *\n * We add few extra hops on/off the event loop here to try and clean up any messages halfway in/out\n * the door that we might want to process. That happens between the setting of the isClosed and \n * isFinal flags.\n *\n * @param {string} reason\n */\n #close(reason)\n {\n const that = this;\n debugging('socketio') && console.debug(this.debugLabel, 'closing connection' + (reason ? ` (${reason})` : ''));\n\n function finalize(socket)\n {\n if (that.isFinal)\n return;\n\n if (socket)\n socket.removeAllListeners();\n that.isFinal = true;\n\n /* Free up memory that might be huge in case something entrains us */\n that.rxPending.length = 0;\n that.txPending.length = 0;\n for (let msgId in that.rxFragments)\n that.rxFragments[msgId].length = 0;\n }\n\n function closeSocket()\n {\n const socket = that.socket;\n \n try\n {\n if (socket)\n socket.disconnect();\n that.isClosed = true;\n that.emit('close', reason);\n delete that.socket;\n }\n finally\n {\n finalize(socket);\n }\n }\n\n if (!this.isClosed)\n {\n /* If we called close, drain any pending messages and then disconnect */\n if (reason === 'api-call')\n {\n this.drainTxPending();\n setImmediate(()=>closeSocket());\n }\n else\n closeSocket();\n }\n }\n\n /* API - close the transport, free up memory. */\n close()\n {\n assert(arguments.length === 0)\n this.#close('api-call');\n }\n\n /**\n * Handle the socketio connect_error event, which is fired when:\n * - the low-level connection cannot be established\n * - the connection is denied by the server in a middleware function\n *\n * In the first case, socket.io will automatically try to reconnect, after a delay,\n * except that will cancel that reconnection attempt here by killing the transport\n * instance so that the Connection class can try failing over to an alternate transport.\n *\n * @param {Error} error optional instance of error describing the underlying reason for the\n * connect failure. If the underlying reason is expressible by an\n * HTTP status code, this code will be placed as a Number in the\n * description property.\n */\n handleConnectErrorEvent(error)\n {\n debugging('socketio') && console.debug(this.debugLabel, `unable to connect to ${this.url}`, error);\n if (error.type === 'TransportError' && typeof error.description === 'number')\n error.httpStatus = Number(error.description);\n\n this.emit('connect-failed', error);\n this.#close(error.message);\n }\n\n /** \n * Handle the socketio connect event, which is fired by the Socket instance upon initiator\n * connection and reconnection to a target.\n */\n handleConnectEvent()\n {\n if (this.hasConnected === true)\n {\n /* this transport is not supposed to reuse connections, and we call .close() on disconnect \n * to prevent this. However, the API does not guarantee that will be race-free.\n */\n debugging('socketio') && console.debug(this.debugLabel, `*** reconnected to ${this.debugInfo.remoteLabel} (ignoring)`);\n return;\n }\n\n /* initial connection */\n this.hasConnected = true;\n debugging('socketio') && console.debug(this.debugLabel, 'connected to', this.debugInfo.remoteLabel);\n\n this.initTimeout = setTimeout(() => this.#close('no mode message received'), this.options.establishTimeout) /* give target 10 second to reply with capabilities before assuming bad connection and crashing */\n if (dcpEnv.platform === 'nodejs')\n this.initTimeout.unref();\n \n this.drainTxPending();\n }\n\n /**\n * API: Send a message to the transport instance on the other side of this connection.\n *\n * @param {string} message the message to send\n */\n send(message)\n {\n const msgSequence = ++this.msgSequence;\n\n debugging('socketio') && console.debug(this.debugLabel, `sending message ${msgSequence} to`, this.debugInfo.remoteLabel);\n debugging('socketio') && debugging('verbose') && !debugging('all') && console.debug(this.debugLabel, message);\n\n if (!this.socket)\n throw new DCPError('SocketIOTransport.send: Not connected', 'DCPC-1110');\n \n if ( false\n || !(message.length > this.options.txMaxFragmentSize)\n || !this.mode.concat)\n this.txPending.push({ message, msgSequence });\n else\n { /* This is a large message. Use message concatenation to send in fragments. A failure of any\n * fragment is a failure of the entire message and will be handled at the connection layer.\n */\n let total = Math.ceil(message.length / this.options.txMaxFragmentSize);\n \n if (total > this.options.txMaxFragmentCount)\n throw new DCPError('extended message has more fragments than the peer will allow', 'DCP-1112');\n \n for (let seq=0; seq < total; seq++)\n {\n let start = seq * this.options.txMaxFragmentSize;\n let fragment = message.slice(start, start + this.options.txMaxFragmentSize);\n let header = JSON.stringify({ msgId: msgSequence, seq, total });\n let extMessage = 'E' + header.length.toString(16).padStart(4, '0') + 'C' + header + fragment;\n\n this.txPending.push({ message: extMessage, msgSequence, seq, total });\n\n /* This should be impossible, but it also effectively kills the connection. */\n if (header.length > 0xFFFF)\n throw new DCPError('extended message header too long', 'DCPC-1104');\n }\n }\n\n this.drainTxPending_soon();\n }\n\n drainTxPending_soon()\n {\n if (this.drainingSoon)\n return;\n this.drainingSoon = true;\n\n setImmediate(() => this.drainTxPending());\n }\n \n /**\n * Drain messages from the txPending queue out to socketio's network buffer. We throw away traffic\n * when it can't be sent; this is no different from packets already on the wire when the other end\n * has changed IP number, etc.\n *\n * In ack mode, this drain is controlled by acknowledgement messages so that socketio can have the\n * opportunity to insert ping/pong messages in the data stream while sending a message large enough\n * to need to be sent via concatenation. [ NOTE - it appears to send ping/pong OOB in version 4.4, \n * so currently ack mode is off by default / wg Mar 2022 ]\n *\n * The reason we put non-concat messages through here is to avoid special casing a bunch of nasty\n * things for the sake of a little tiny bit of performance. Sending message<-ACK and queueing through\n * the drain code is necessary if we wind up interleaving multiple message sends in some interesting\n * edge cases; notably, fast-teardown, but a future Connection class that ran without nonces (or multiple\n * nonces) would need this, too.\n *\n * In non-ack mode, the drain is mostly uncontrolled, but we throw each message on a separate pass of\n * the event loop, to try and keep huge message blasts one connection from overwhelming a busy server,\n * as well allowing socketio the chance to send its heartbeat messages and make it less likely that it\n * will hit its http buffer size limit, because that causes an immediate disconnect.\n *\n * Non-ack-mode is compatible with previous dcp5 versions and probably a little faster.\n */\n drainTxPending()\n {\n const that = this;\n\n if (this.txPending.length)\n debugging('socketio') && console.debug(this.debugLabel, `drain tx pending queue, ${this.txPending.length} ready`\n + (this.mode.ack ? `; ${this.txMaxInFlight - this.txNumInFlight} slots available` : ''));\n delete this.drainingSoon;\n\n if (this.isClosed)\n return;\n \n for (let i=0;\n !this.isClosed && this.txPending.length && (this.txNumInFlight < this.txMaxInFlight || !this.mode.ack);\n i++)\n {\n const pendingElement = this.txPending.shift();\n\n if (i === 0)\n writeToSocket(pendingElement);\n else\n setImmediateN(() => writeToSocket(pendingElement), i);\n\n if (this.mode.ack)\n this.txNumInFlight++;\n\n if (this.txPending.length === 0) /* just sent the last message */\n this.emit('drain');\n }\n\n if (this.txPending.length)\n this.drainTxPending_soon();\n \n function writeToSocket(pendingElement)\n {\n if (that.isClosed)\n return;\n\n const { message, msgSequence, seq, total } = pendingElement;\n that.socket.send(message);\n\n if (typeof seq !== 'undefined')\n debugging('socketio') && console.debug(that.debugLabel, `sent fragment ${seq + 1}/${total} of message ${msgSequence} (${message.length} bytes)`);\n else\n debugging('socketio') && console.debug(that.debugLabel, `sent message ${msgSequence} (${message.length} bytes)`);\n }\n }\n}\nSocketIOTransport.prototype.buildOptions = buildOptions;\n\n/** \n * Build the socketio options object, based on the passed-in options, dcp debug state, and internal\n * object state. Method shared between SocketIOTransport and SocketIOListener.\n */\nfunction buildOptions(options)\n{\n options = leafMerge(\n /* Baked-in Defaults */\n ({\n autoUnref: dcpEnv.platform === 'nodejs' ? true : undefined, /* socket.io + webpack 5 { node:global } work-around */\n perMessageDeflate: dcpConfig.build !== 'debug',\n maxFragmentCount: 1000, /* used to limit memory on receiver */\n maxFragmentSize: 1e6, /* bytes */\n txMaxFragmentCount:1000, /* peer's limit for fragments */\n txMaxFragmentSize: 1e6, /* peer's limit for fragment size */\n maxHttpBufferSize: false, /* bytes; false = auto */\n establishTimeout: 10 * 1000, /* 10 seconds */\n pingTimeout: 30 * 1e3, /* s */\n pingInterval: 90 * 1e3, /* s */\n transports: ['polling', 'websocket'],\n upgrade: true,\n rememberUpgrade: true,\n autoConnect: true,\n cors: {\n origin: '*',\n methods: ['GET', 'POST']\n },\n }),\n options,\n /* Absolutely mandatory */\n ({\n path: this.url.pathname // eslint-disable-line no-invalid-this\n }));\n\n /* draw out errors quickly in dev */\n if ((process.env.DCP_NETWORK_CONFIG_BUILD || dcpConfig.build) === 'debug')\n {\n /* short timeouts and debuggers don't get along well */\n if (dcpEnv.platform === 'nodejs' && !(requireNative('module')._cache.niim instanceof requireNative('module').Module))\n {\n options.pingTimeout /= 5;\n options.pingInterval /= 5;\n }\n }\n\n return options;\n}\n \n/**\n * @constructor\n * API - Factory which creates a new socketio listener, or throws. Emits 'listening' if the server \n * parameter is not provided and we create it, once it is ready for connections.\n * \n * @bug It is possible for a connection event to sneak in before the 'listening' event is fired, due to \n * the two-step event trigger. The right fix is to proxy the event, but dcp-events::event-emitter.proxy\n * itself currently has the same bug. For now, we just have callbacks in API. /wg apr 2022\n * \n * @param {object} config dcpConfig fragment - has .listen and .location\n * @param {object} target the instance of Target to bind to\n * @param {object} options [optional] A DCP config options variable (like dcpConfig.dcp.listenOptions)\n */\nfunction SocketIOListener(config, target, options)\n{\n var matchRegex;\n \n this.url = config.location;\n this.options = this.buildOptions(options);\n this.connSequence = 0;\n this.seq = ++listenerSeq;\n this.debugLabel = `socketio(l:${this.seq}):`;\n this.isShutdown = false;\n\n if (dcpEnv.platform !== 'nodejs')\n throw new Error('target mode only supported in nodejs');\n\n target.httpServer.on('listening', () => this.emit('listening', target.httpServer));\n\n matchRegex = '^' + this.url.pathname;\n if (matchRegex.slice(-1) !== '/')\n matchRegex += '\\\\/';\n else\n matchRegex = matchRegex.slice(0, -1) + \"\\\\/\"\n\n matchRegex += '\\\\?EIO=[0-9]';\n debugging('socketio') && console.debug(this.debugLabel, `expecting requests to match regex ${matchRegex}`);\n\n target.all(new RegExp(matchRegex), (request) => {\n /* Socket.io does this intrinsically because it plays with the server's events directly,\n * but we \"handle\" it explicitly here so that we don't trip the 404 handler. \n */\n debugging('socketio') && console.debug(this.debugLabel, 'Handling', request.url);\n }, true);\n\n /* While we normally want to inherit options from parent to child socket via this.options,\n * we don't want this to happen for maxHttpBufferSize due to the way it is used and calculated\n * in the transport constructor. A buffer that is too small (eg false) will cause us to not\n * recognized socket.io SIDs.\n */\n \n this.options.maxHttpBufferSize = 1e7;\n \n const socketOptions = this.options;\n\n this.socketServer = requireNative('socket.io')(target.httpServer, socketOptions);\n this.socketServer.on('connection', (socket) => !this.isShutdown && this.handleConnectionEvent(config, socket));\n debugging() && console.debug(this.debugLabel, 'Socketio listener initialized for path', this.options.path);\n}\nSocketIOListener.prototype = new EventEmitter('socketio(l)');\nSocketIOListener.prototype.buildOptions = buildOptions;\n\nSocketIOListener.prototype.close = function SocketIOListener$$close()\n{\n if (!this.isShutdown)\n this.shutdown();\n this.socketServer.close(() => this.emit('close'));\n}\n\n/** Stop accepting new connections */\nSocketIOListener.prototype.shutdown = function SocketIOListener$$shutdown()\n{\n this.isShutdown = true;\n}\n\n/** Remove any event loop references used by this transport instance */\nSocketIOListener.prototype.unref = function SocketIOListener$$unref()\n{\n void false;\n}\n\n/**\n * Handle the socketio connection event, which is fired upon a connection from client.\n * Used by Target->SocketIOListener to create transport instances.\n *\n * It is expected that code hooking the 'connection' event will hook the 'message' in\n * the same pass of the event loop as it is invoked; otherwise, messages may be missed.\n *\n * @param {object} socket a new connection emitted by the socket.io server\n */\nSocketIOListener.prototype.handleConnectionEvent = function SocketIOListener$$handleConnectionEvent(config, socket)\n{\n var transport = new SocketIOTransport(config, this.options, socket);\n\n this.url = config.location || config.listen;\n this.connSequence++;\n transport.debugLabel = `socketio(t:${this.seq}.${this.connSequence}):`\n\n /* Ensure we don't emit any message events until the application or Connection class\n * has had the chance to set up message handlers via the connection event.\n */\n transport.rxReady = false;\n this.on('connection', () => setImmediate(() => {\n transport.rxReady = true;\n transport.emitReadyMessages();\n }));\n\n this.emit('connection', transport);\n}\n\n/* Define API */\nexports.TransportClass = SocketIOTransport;\nexports.Listener = SocketIOListener;\n\n\n//# sourceURL=webpack://dcp/./src/protocol-v4/transport/socketio.js?");
4720
4720
 
4721
4721
  /***/ }),
4722
4722
 
@@ -4800,7 +4800,7 @@ eval("/**\n * @file utils/http.js Helper module for things rel
4800
4800
  /***/ ((module, exports, __webpack_require__) => {
4801
4801
 
4802
4802
  "use strict";
4803
- eval("/* provided dependency */ var process = __webpack_require__(/*! ./node_modules/process/browser.js */ \"./node_modules/process/browser.js\");\n/* provided dependency */ var Buffer = __webpack_require__(/*! ./node_modules/node-polyfill-webpack-plugin/node_modules/buffer/index.js */ \"./node_modules/node-polyfill-webpack-plugin/node_modules/buffer/index.js\")[\"Buffer\"];\n/**\n * @file src/utils/index.js\n * @author Ryan Rossiter\n * @date Feb 2020\n *\n * Place to put little JS utilities. If they are more than a few lines, please `require` and `export` \n * them instead of making this monolithic.\n */\n\n\nconst { DCPError } = __webpack_require__(/*! dcp/common/dcp-error */ \"./src/common/dcp-error.js\");\nconst { assert } = __webpack_require__(/*! dcp/common/dcp-assert */ \"./src/common/dcp-assert.js\");\nconst { requireNative } = __webpack_require__(/*! dcp/dcp-client/webpack-native-bridge */ \"./src/dcp-client/webpack-native-bridge.js\");\nconst DCP_ENV = __webpack_require__(/*! dcp/common/dcp-env */ \"./src/common/dcp-env.js\");\nconst schedulerConstants = __webpack_require__(/*! dcp/common/scheduler-constants */ \"./src/common/scheduler-constants.js\");\n\n module.exports.paramUtils = __webpack_require__(/*! ./assert-params */ \"./src/utils/assert-params.js\");\n module.exports.httpUtils = __webpack_require__(/*! ./http */ \"./src/utils/http.js\");\n module.exports.serialize = __webpack_require__(/*! ./serialize */ \"./src/utils/serialize.js\");\n Object.assign(exports, __webpack_require__(/*! ./web-format-date */ \"./src/utils/web-format-date.js\"));\n Object.assign(exports, __webpack_require__(/*! ./confirm-prompt */ \"./src/utils/confirm-prompt.js\"));\n Object.assign(exports, __webpack_require__(/*! ./message-to-buffer */ \"./src/utils/message-to-buffer.js\"));\n\n/** @typedef {import('dcp/dcp-client/worker/slice').Slice} Slice */\n/** @typedef {import('dcp/dcp-client/worker/sandbox').Sandbox} Sandbox */\n\n /**\n * Writes object properties into another object matching shape.\n * For example: if obj = { a: { b: 1, c: 2}}\n * and source = { a: {c: 0, d: 1}}\n * then obj will be updated to { a: { b: 1, c: 0, d: 1 } }\n * compare this to Object.assign which gives { a: { c: 0, d: 1 } }\n */\nconst setObjProps = module.exports.setObjProps = (obj, source) => {\n for (let p in source) {\n if (typeof source[p] === 'object') setObjProps(obj[p], source[p]);\n else obj[p] = source[p];\n }\n}\n\n\n/**\n * Generates a new random opaqueId i.e. a 22-character base64 string.\n * Used for job and slice ids.\n */\nmodule.exports.generateOpaqueId = function utils$$generateOpaqueId()\n{\n if (!utils$$generateOpaqueId.nanoid)\n {\n const nanoidModule = (DCP_ENV.platform === 'nodejs') ? requireNative('nanoid') : __webpack_require__(/*! nanoid */ \"./node_modules/nanoid/index.browser.js\");\n utils$$generateOpaqueId.nanoid = nanoidModule.customAlphabet('abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789+', schedulerConstants.workerIdLength);\n }\n\n return utils$$generateOpaqueId.nanoid();\n}\n\n/**\n * Accepts an object and a list of properties to apply to the object to retreive a final value.\n * E.g.\n * object = { a: { b: { c: 4 } } }\n * listOfProperties = [\"a\", \"b\", \"c\"]\n * return = 4\n */\nmodule.exports.getNestedFromListOfProperties = function (object, listOfProperties) {\n return listOfProperties.reduce((o, k) => {\n if (typeof o !== 'object') return undefined;\n return o[k];\n }, object);\n}\n\n/**\n * Accepts an object and a dot-separated property name to retrieve from the object.\n * E.g.\n * object = { a: { b: { c: 4 } } }\n * property = 'a.b.c'\n * return = 4\n *\n * @param {object} object \n * @param {string} property \n */\nmodule.exports.getNestedProperty = function (object, property) {\n if (typeof property !== 'string') return undefined;\n return module.exports.getNestedFromListOfProperties(object, property.split(\".\"));\n}\n\n/**\n * Accepts an object and a list of properties and a value and mutates the object\n * to have the specified value at the location denoted by the list of properties.\n * Similar to getNestedFromListOfProperties, but sets instead of gets.\n */\nmodule.exports.setNestedFromListOfProperties = function (object, listOfProperties, value) {\n if(!listOfProperties.length || listOfProperties.length < 1) {\n throw new Error(\"listOfProperties must be an array of length >= 1\");\n }\n const indexOfLastProp = listOfProperties.length - 1;\n const pathToParent = listOfProperties.slice(0, indexOfLastProp);\n const parent = module.exports.getNestedFromListOfProperties(object, pathToParent);\n if(!parent) {\n throw new Error(\"Could not find value at:\", pathToParent, \"in object:\", object);\n }\n const lastProperty = listOfProperties[indexOfLastProp];\n parent[lastProperty] = value;\n}\n\n/**\n * Block the event loop for a specified time\n * @milliseconds the number of milliseconds to wait (integer)\n */\nexports.msleep = function utils$$msleep(milliseconds) {\n try\n {\n let sab = new SharedArrayBuffer(4);\n let int32 = new Int32Array(sab);\n Atomics.wait(int32, 0, 0, milliseconds);\n }\n catch(error)\n {\n console.error('Cannot msleep;', error);\n }\n}\n\n/**\n * Block the event loop for a specified time\n * @seconds the number of seconds to wait (float)\n */\nexports.sleep = function utils$$sleep(seconds) {\n return exports.msleep(seconds * 1000);\n}\n\n/** \n * Resolve a promise after a specified time.\n * \n * @param {number} ms the number of milliseconds after which to resolve the promise.\n * @returns Promise with an extra property, intr(). Calling this function causes the promise\n * to resolve immediately. If the promise was interrupted, it will resolve with false;\n * otherwise, true.\n */\nexports.a$sleepMs = function a$sleepMs(ms)\n{\n var interrupt;\n \n const ret = new Promise((resolve, reject) => {\n var resolved = false;\n const timerHnd = setTimeout(() => { resolved=true; resolve(false) }, ms);\n function a$sleepMs_intr()\n {\n clearTimeout(timerHnd);\n if (!resolved)\n resolve(true);\n }\n \n interrupt = a$sleepMs_intr;\n });\n\n ret.intr = () => interrupt();\n return ret;\n}\n\n/** \n * @see: a$sleepMs\n * @param {number} ms the number of milliseconds after which to resolve the promise.\n */\nexports.a$sleep = function a$sleep(seconds) {\n return exports.a$sleepMs(seconds * 1000);\n}\n\n/** \n * Returns the number of millisecond in a time expression.\n * @param s {number} The number of seconds\n * @returns {number}\n */\n/**\n * @param s {string} A complex time expression using m, w, d, s, h. '10d 6h 1s' means 10 days, 6 hours, and 1 second.\n */\nexports.ms = function utils$$ms(s)\n{\n let ms = 0;\n \n if (typeof s === 'number')\n return s * 1000;\n\n assert(typeof s === 'string');\n \n for (let expr of s.match(/[0-9.]+[smhdw]/g))\n {\n let unit = expr.slice(-1);\n let value = +expr.slice(0, -1);\n\n switch(unit)\n {\n case 's': {\n ms += value * 1000;\n break;\n }\n case 'm': {\n ms += value * 1000 * 60;\n break;\n }\n case 'h': {\n ms += value * 1000 * 60 * 60;\n break;\n }\n case 'd': {\n ms = value * 1000 * 60 * 60 * 24;\n break;\n }\n case 'w': {\n ms = value * 1000 * 60 * 60 * 24 * 7;\n break;\n }\n default: {\n throw new Error(`invalid time unit ${unit}`);\n }\n }\n }\n\n return ms;\n}\n\n/**\n * Returns a percentage as a number.\n * @param n {number} this number is returned\n */\n/**\n * @param n {string} this string is converted to a number and returned; it is divided by 100 if it ends in %.\n */\nexports.pct = function utils$$pct(n)\n{\n if (typeof n === 'number')\n return n / 100;\n\n if (n.match(/%$/))\n return +n / 100;\n\n return +n;\n}\n\n/**\n * Coerce human-written or registry-provided values into Boolean in a sensisble way.\n */\nexports.booley = function utils$$booley(check)\n{\n switch (typeof check)\n {\n case 'undefined':\n return false;\n case 'string':\n return check && (check !== 'false');\n case 'boolean':\n return check;\n case 'number':\n case 'object':\n return Boolean(check);\n default:\n throw new Error(`can't coerce ${typeof check} to booley`);\n }\n}\n\ntry {\n exports.useChalk = requireNative ? requireNative('tty').isatty(0) || process.env.FORCE_COLOR : false;\n}\ncatch (error) {\n if (error.message.includes('no native require'))\n exports.useChalk = false;\n else\n throw error;\n}\n\n/** \n * Factory function which constructs an error message relating to version mismatches\n * @param {string} oldThingLabel The name of the thing that is too old\n * @param {string} upgradeThingLabel [optional] The name of the thing that needs to be upgraded to fix that; if\n * unspecified, we use oldThingLabel.\n * @param {string} newThingLabel What the thing is that is that is newer than oldThingLabel\n * @param {string} minimumVersion The minimum version of the old thing that the new thing supports. \n * Obstensibibly semver, but unparsed.\n * @param {string} code [optional] A code property to add to the return value\n *\n * @returns instance of Error\n */\nexports.versionError = function dcpClient$$versionError(oldThingLabel, upgradeThingLabel, newThingLabel, minimumVersion, code)\n{\n function repeat(what, len) {\n let out = '';\n while (len--)\n out += what;\n return out;\n }\n\n function bold(string) {\n if ((__webpack_require__(/*! dcp/common/dcp-env */ \"./src/common/dcp-env.js\").platform) === 'nodejs') {\n const chalk = new requireNative('chalk').constructor({enabled: exports.useChalk});\n\n return chalk.yellow(string);\n }\n\n return string;\n }\n \n var mainMessage = `${oldThingLabel} is too old; ${newThingLabel} needs version ${minimumVersion}`;\n var upgradeMessage = bold(`Please upgrade ${upgradeThingLabel || oldThingLabel}${repeat(\" \", mainMessage.length - 15 - upgradeThingLabel.length)}`);\n var error = new Error(`\n****${repeat('*', mainMessage.length)}****\n*** ${repeat(' ', mainMessage.length)} ***\n*** ${mainMessage} ***\n*** ${upgradeMessage} ***\n*** ${repeat(' ', mainMessage.length)} ***\n****${repeat('*', mainMessage.length)}****\n`);\n\n if (code)\n error.code = code;\n return error;\n}\n\n/*********************************************************************************************/\n\n//\n// Sandbox and Slice debugging and logging tools.\n//\n/** XXXpfr @todo Write sort w/o using promises so we can get rid of async on all the compress functions. */\n\n/**\n * Previous short form perf time stamp.\n * #Private\n * @type {Date}\n */\nlet previousTime = new Date();\n\n/**\n * Short form perf time format with timespan diff from last call.\n * @returns {string}\n */\nexports.shortTime = function util$$shortTime() {\n const currentTime = new Date();\n const diff = currentTime.getTime() - previousTime.getTime();\n previousTime = currentTime;\n return `diff=${diff}: ${currentTime.getMinutes()}.${currentTime.getSeconds()}.${currentTime.getMilliseconds()}`;\n}\n\n/**\n * Log sliceArray.\n * @param {Slice[]} sliceArray\n * @param {string} [header]\n */\nexports.dumpSlices = async function utils$$dumpSlices(sliceArray, header) {\n if (header) console.log(`\\n${header}`);\n console.log(await exports.compressSlices(sliceArray));\n}\n\n/**\n * Log sandboxArray.\n * @param {Sandbox[]} sandboxArray\n * @param {string} [header]\n */\nexports.dumpSandboxes = async function utils$$dumpSandboxes(sandboxArray, header) {\n if (header) console.log(`\\n${header}`);\n console.log(await exports.compressSandboxes(sandboxArray));\n}\n\n/**\n * If the elements of sliceArray are not unique, log the duplicates and log the full array.\n * @param {Slice[]} sliceArray\n * @param {string} [header]\n */\nexports.dumpSlicesIfNotUnique = async function utils$$dumpSlicesIfNotUnique(sliceArray, header) {\n if (!exports.isUniqueSlices(sliceArray, header))\n console.log(await exports.compressSlices(sliceArray));\n}\n\n/**\n * If the elements of sandboxArray are not unique, log the duplicates and log the full array.\n * @param {Sandbox[]} sandboxArray\n * @param {string} [header]\n */\nexports.dumpSandboxesIfNotUnique = async function utils$$dumpSandboxesIfNotUnique(sandboxArray, header) {\n if (!exports.isUniqueSandboxes(sandboxArray, header))\n console.log(await exports.compressSandboxes(sandboxArray));\n}\n\n/**\n * Checks whether the elements of sliceArray are unique and if not, log the duplicates.\n * @param {Slice[]} sliceArray\n * @param {string} [header]\n * @param {function} [log]\n * @returns {boolean}\n */\nexports.isUniqueSlices = function utils$$isUniqueSlices(sliceArray, header, log) {\n const slices = [];\n let once = true;\n sliceArray.forEach(x => {\n if (slices.indexOf(x) >= 0) {\n if (once && header) console.log(`\\n${header}`); once = false;\n log ? log(x) : console.log(`\\tFound duplicate slice ${x.identifier}.`);\n } else slices.push(x);\n });\n return sliceArray.length === slices.length;\n}\n\n/**\n * Checks whether the elements of sandboxArray are unique and if not, log the duplicates.\n * @param {Sandbox[]} sandboxArray\n * @param {string} [header]\n * @param {function} [log]\n * @returns {boolean}\n */\nexports.isUniqueSandboxes = function utils$$isUniqueSandboxes(sandboxArray, header, log) {\n const sandboxes = [];\n let once = true;\n sandboxArray.forEach(x => {\n if (sandboxes.indexOf(x) >= 0) {\n if (once && header) console.log(`\\n${header}`); once = false;\n log ? log(x) : console.log(`\\tFound duplicate sandbox ${x.identifier}.`);\n } else sandboxes.push(x);\n });\n return sandboxArray.length === sandboxes.length;\n}\n\n/**\n * Quck and dirty JSON serialization that ignores cycles.\n *\n * @param {*} o - entity to be serialized.\n * @param {number} [len=512] - number of string elements to return.\n * @returns {string}\n */\nexports.stringify = function _stringify(o, len = 512) {\n if (!o) return 'nada';\n let cache = [];\n const str = JSON.stringify(o, (key, value) => {\n if (typeof value === 'object' && value !== null) {\n if (cache.includes(value)) return;\n cache.push(value);\n }\n return value;\n });\n cache = null;\n return str ? str.slice(0, len) : null;\n}\n\n/**\n * Calls truncated JSON.stringify on theAnything.\n * @param {*} theAnything\n * @param {string} [header='dumpJSON']\n * @param {number} [truncationLength=512]\n */\nexports.dumpJSON = function utils$$dumpJSON(theAnything, header = 'dumpJSON: ', truncationLength = 512) {\n if (theAnything) {\n const strV = exports.stringify(theAnything, truncationLength);\n if (strV) console.log(`${header}: ${String(strV)}`);\n else console.log(`${header}:`, theAnything);\n } else console.log(`${header}:`, theAnything);\n}\n\n/**\n * Iterates over all property [key, value]-pairs of theObject and call truncated JSON.stringify on property values.\n * @param {object} theObject\n * @param {string} [header='dumpObject']\n * @param {number} [truncationLength=512]\n */\nexports.dumpObject = function utils$$dumpObject(theObject, header = 'dumpObject: ', truncationLength = 512, dumpKeys = true) {\n if (theObject) {\n if (dumpKeys) console.log(`${header}: dump the keys`, Object.keys(theObject));\n console.log(`${header}: dump the key-value entries...`);\n console.group();\n for (const [key, value] of Object.entries(theObject)) {\n if (value) {\n const strV = exports.stringify(value, truncationLength);\n if (strV) console.log(`${header}.${key}: ${String(strV)}`);\n else console.log(`${header}.${key}:`, value);\n } else console.log(`${header}.${key}:`, value);\n }\n console.groupEnd();\n }\n}\n\n/**\n * @param {object[]} jobArray\n * @param {function} functor\n * @returns {object}\n */\nexports.toJobMap = function utils$$toJobMap(jobArray, functor) {\n const jobMap = {};\n for (const x of jobArray) {\n if (!jobMap[x.jobAddress]) jobMap[x.jobAddress] = [functor(x)];\n else jobMap[x.jobAddress].push(functor(x));\n }\n return jobMap;\n}\n\n/**\n * @param {Sandbox[]} sandboxArray\n * @param {number} [digits=-1]\n * @returns {Promise<string>}\n */\nexports.compressSandboxes = async function utils$$compressSandboxes(sandboxArray, digits = -1) {\n const jobSandboxMap = exports.toJobMap(sandboxArray, sbx => sbx.id);\n return exports.compressJobMap(jobSandboxMap, false /* skipFirst*/, digits);\n}\n\n/**\n * @param {Slice[]} sliceArray\n * @param {number} [digits=-1]\n * @returns {Promise<string>}\n */\nexports.compressSlices = async function utils$$compressSlices(sliceArray, digits = -1) {\n const jobSliceMap = exports.toJobMap(sliceArray, slice => slice.sliceNumber);\n return exports.compressJobMap(jobSliceMap, false /* skipFirst*/, digits);\n}\n\n/**\n * @param {object} jobMap\n * @param {boolean} [skipFirst=false]\n * @param {number} [digits=-1]\n * @returns {Promise<string>}\n */\nexports.compressJobMap = async function utils$$compressJobMap(jobMap, skipFirst = false, digits = -1) {\n return exports.compressJobArray(Object.entries(jobMap), skipFirst, digits);\n}\n\n/**\n * @param {object[]} jobArray\n * @param {boolean} [skipFirst=false]\n * @param {number} [digits=-1]\n * @returns {Promise<string>}\n */\nexports.compressJobArray = async function utils$$compressJobArray(jobArray, skipFirst = false, digits = -1) {\n let output = '';\n for (let k = 0; k < jobArray.length; k++) {\n output += await exports.compressJobValue(jobArray[k], skipFirst, digits);\n }\n return output;\n}\n\n/**\n * @param {object|object[]} jobValue\n * @param {boolean} [skipFirst=false]\n * @param {number} [digits=-1]\n * @returns {Promise<string>}\n */\nexports.compressJobValue = async function utils$$compressJobValue(jobValue, skipFirst = false, digits = -1) {\n if (jobValue.job && jobValue.slices)\n return exports.compressJobEntry(jobValue.job, (skipFirst ? jobValue.slices.slice(1) : jobValue.slices), digits);\n if (jobValue.length === 2)\n return exports.compressJobEntry(jobValue[0], (skipFirst ? jobValue[1].slice(1) : jobValue[1]), digits);\n return 'nada';\n}\n\n/**\n * @param {*} jobAddress\n * @param {Array<number|Number>} sliceNumbers\n * @param {number} [digits=-1]\n * @returns {Promise<string>}\n */\nexports.compressJobEntry = async function utils$$compressJobEntry(jobAddress, sliceNumbers, digits = -1) {\n return `${exports.truncateAddress(jobAddress, digits)}:[${await exports.compressRange(sliceNumbers)}]:`;\n}\n\nexports.compressEnhancedJobEntry = async function utils$$compressEnhancedJobEntry(job, slices, digits = -1) {\n return `${job.id}.${exports.truncateAddress(job.address, digits)}:[${await exports.compressEnhancedRange(slices)}]:`;\n}\n\n/**\n * @param {*} address\n * @param {number} [digits=-1]\n * @returns {string}\n */\nexports.truncateAddress = function utils$$truncateAddress(address, digits = -1) {\n let value = address.toString();\n if (digits < 0) return value.startsWith('0x') ? value.substring(2) : fragment;\n return value.startsWith('0x') ? value.substring(2, 2 + digits) : value.substring(0, digits);\n}\n\n/**\n * Input [2, 3, 4, 7, 5, 8, 9, 13, 14, 15] returns '2-5,7-9,13-15'\n * Input [2, 3, 4, 7, 5, 8, 9, 13] returns '2-5,7-9,13'\n * Input [2, 3, 4, 7, 5, 4, 4, 8, 9] returns '2-4,4,4-5,7-9'\n * @param {Array<number|Number>} numberArray\n * @returns {Promise<string>}\n */\nexports.compressRange = async function utils$$compressRange(numberArray) {\n assert(numberArray && Array.isArray(numberArray));\n await numberArray.sort((x, y) => x - y); // increasing...\n let start = numberArray[0];\n let output = `${start}`;\n for (let k = 1; k < numberArray.length; k++) {\n assert(typeof numberArray[k] === 'number' || numberArray[k] && numberArray[k].constructor.name === 'Number');\n if (numberArray[k] - numberArray[k - 1] !== 1) {\n output += (numberArray[k - 1] > start) ? `-${numberArray[k - 1]},` : ',';\n start = numberArray[k];\n output += `${start}`;\n } else if (k === numberArray.length - 1) {\n output += `-${numberArray[k]}`;\n }\n }\n return output;\n}\n\nexports.compressEnhancedRange = async function utils$$compressEnhancedRange(slices) {\n assert(slices && Array.isArray(slices));\n await slices.sort((x, y) => x.sliceNumber - y.sliceNumber); // increasing...\n let start = slices[0];\n let output = fragment(start);\n for (let k = 1; k < slices.length; k++) {\n if (slices[k].sliceNumber - slices[k - 1].sliceNumber !== 1) {\n output += (slices[k - 1].sliceNumber > start.sliceNumber) ? `-${fragment(slices[k - 1])},` : ',';\n start = slices[k];\n output += `${fragment(start)}`;\n } else if (k === slices.length - 1) {\n output += `-${fragment(slices[k])}`;\n }\n }\n return output;\n}\n\nfunction fragment(slice) {\n return `${slice.sliceNumber}.${slice.isEstimationSlice}.${slice.isLongSlice}`;\n}\n\n/*********************************************************************************************/\n\nexports.shuffle = function utils$$shuffle(jobDescriptors, partitionPortions) {\n let jobDescriptorsInEstimation = [];\n let partitionPortionsInEstimation = [];\n for (let [index, jobDescriptor] of jobDescriptors.entries()) {\n if(jobDescriptor.job.status === schedulerConstants.jobStatus.estimation) {\n jobDescriptorsInEstimation.push(jobDescriptor);\n jobDescriptors.splice(index, 1);\n partitionPortionsInEstimation.push(partitionPortions[index]);\n partitionPortions.splice(index, 1) \n }\n }\n \n let currentIndex = jobDescriptors.length, randomIndex;\n // While there remain elements to shuffle...\n while (0 !== currentIndex && jobDescriptors.length > 0) {\n\n // Pick a remaining element...\n randomIndex = Math.floor(Math.random() * currentIndex);\n currentIndex--;\n\n // And swap it with the current element.\n [jobDescriptors[currentIndex], jobDescriptors[randomIndex]] = [\n jobDescriptors[randomIndex], jobDescriptors[currentIndex]];\n\n [partitionPortions[currentIndex], partitionPortions[randomIndex]] = [\n partitionPortions[randomIndex], partitionPortions[currentIndex]];\n }\n \n return [\n [...jobDescriptorsInEstimation, ...jobDescriptors],\n [...partitionPortionsInEstimation, ...partitionPortions]\n ]\n}\n\nexports.hashGeneration = function utils$$hashGeneration(object) {\n const { sha256 } = (__webpack_require__(/*! dcp/dcp-client/wallet/keystore */ \"./src/dcp-client/wallet/keystore.js\")._internalEth.util);\n return sha256(Buffer.from(JSON.stringify(object)));\n}\n\n/**\n * @typedef {object} apiClientType\n * @property {boolean} success\n * @property {*} [payload]\n * @property {DCPError} [error]\n */\n\n/**\n * @typedef {object} apiServiceType\n * @property {boolean} success\n * @property {*} [payload]\n * @property {string} [message]\n * @property {string} [stack]\n * @property {string} [code]\n */\n\n/**\n * @param {apiServiceType} payload \n * @returns {apiClientType}\n */\nexports.reconstructServiceError = function utils$$reconstructServiceError(payload) {\n assert(!payload.success);\n let ex = new DCPError(payload.message);\n ex.stack = dcpConfig.worker.allowConsoleAccess ? payload.stack : '';\n if (payload.code) ex.code = payload.code;\n return exports._clientError(ex);\n}\n\n/**\n * @param {DCPError} ex\n * @returns {apiClientType}\n */\nexports._clientError = function utils$$_clientError(ex) {\n if (dcpConfig.worker.allowConsoleAccess) console.error(ex);\n return { success: false, error: ex };\n}\n/**\n * @param {string} message\n * @returns {apiClientType}\n */\nexports.clientError = function utils$$clientError(message) {\n const ex = new DCPError(message);\n return exports._clientError(ex);\n}\n\n/**\n * @param {DCPError} ex\n * @returns {apiServiceType}\n */\nexports._serviceError = function utils$$_serviceError(ex) {\n return { success: false, message: ex.message, stack: ex.stack, code: ex.code };\n}\n/**\n * @param {string} message\n * @param {string|object} codeEx\n * @returns {apiServiceType}\n */\nexports.serviceError = function utils$$serviceError(message, codeEx) {\n const ex = new DCPError(message, codeEx);\n return exports._serviceError(ex);\n}\n\n\nif ((__webpack_require__(/*! dcp/common/dcp-env */ \"./src/common/dcp-env.js\").platform) === 'nodejs') {\n // Assigning the properties explicitly for VSCode intellisense.\n const tmpFiles = __webpack_require__(/*! ./tmpfiles */ \"./src/utils/tmpfiles.js\");\n exports.catFile = tmpFiles.catFile;\n exports.createTempFile = tmpFiles.createTempFile;\n exports.systemTempDir = tmpFiles.systemTempDir;\n\n Object.assign(exports, __webpack_require__(/*! ./readln */ \"./src/utils/readln.js\"));\n}\nObject.assign(exports, __webpack_require__(/*! ./sh */ \"./src/utils/sh.js\"));\nObject.assign(exports, __webpack_require__(/*! ./eventUtils */ \"./src/utils/eventUtils.js\"));\nObject.assign(exports, __webpack_require__(/*! ./obj-merge */ \"./src/utils/obj-merge.js\"));\nObject.assign(exports, __webpack_require__(/*! ./make-data-uri */ \"./src/utils/make-data-uri.js\"));\nObject.assign(exports, __webpack_require__(/*! ./just-fetch */ \"./src/utils/just-fetch.js\"));\nObject.assign(exports, __webpack_require__(/*! ./inventory */ \"./src/utils/inventory.js\"));\nObject.assign(exports, __webpack_require__(/*! ./fetch-keystore */ \"./src/utils/fetch-keystore.js\"));\n\nmodule.exports.fetchURI = __webpack_require__(/*! ./fetch-uri */ \"./src/utils/fetch-uri.js\").fetchURI;\nmodule.exports.encodeDataURI = __webpack_require__(/*! ./encodeDataURI */ \"./src/utils/encodeDataURI.js\").encodeDataURI;\n\n\n//# sourceURL=webpack://dcp/./src/utils/index.js?");
4803
+ eval("/* provided dependency */ var process = __webpack_require__(/*! ./node_modules/process/browser.js */ \"./node_modules/process/browser.js\");\n/* provided dependency */ var Buffer = __webpack_require__(/*! ./node_modules/node-polyfill-webpack-plugin/node_modules/buffer/index.js */ \"./node_modules/node-polyfill-webpack-plugin/node_modules/buffer/index.js\")[\"Buffer\"];\n/**\n * @file src/utils/index.js\n * @author Ryan Rossiter\n * @date Feb 2020\n *\n * Place to put little JS utilities. If they are more than a few lines, please `require` and `export` \n * them instead of making this monolithic.\n */\n\n\nconst { DCPError } = __webpack_require__(/*! dcp/common/dcp-error */ \"./src/common/dcp-error.js\");\nconst { assert } = __webpack_require__(/*! dcp/common/dcp-assert */ \"./src/common/dcp-assert.js\");\nconst { requireNative } = __webpack_require__(/*! dcp/dcp-client/webpack-native-bridge */ \"./src/dcp-client/webpack-native-bridge.js\");\nconst DCP_ENV = __webpack_require__(/*! dcp/common/dcp-env */ \"./src/common/dcp-env.js\");\nconst schedulerConstants = __webpack_require__(/*! dcp/common/scheduler-constants */ \"./src/common/scheduler-constants.js\");\n\n module.exports.paramUtils = __webpack_require__(/*! ./assert-params */ \"./src/utils/assert-params.js\");\n module.exports.httpUtils = __webpack_require__(/*! ./http */ \"./src/utils/http.js\");\n module.exports.serialize = __webpack_require__(/*! ./serialize */ \"./src/utils/serialize.js\");\n Object.assign(exports, __webpack_require__(/*! ./web-format-date */ \"./src/utils/web-format-date.js\"));\n Object.assign(exports, __webpack_require__(/*! ./confirm-prompt */ \"./src/utils/confirm-prompt.js\"));\n Object.assign(exports, __webpack_require__(/*! ./message-to-buffer */ \"./src/utils/message-to-buffer.js\"));\n\n/** @typedef {import('dcp/dcp-client/worker/slice').Slice} Slice */\n/** @typedef {import('dcp/dcp-client/worker/sandbox').Sandbox} Sandbox */\n\n /**\n * Writes object properties into another object matching shape.\n * For example: if obj = { a: { b: 1, c: 2}}\n * and source = { a: {c: 0, d: 1}}\n * then obj will be updated to { a: { b: 1, c: 0, d: 1 } }\n * compare this to Object.assign which gives { a: { c: 0, d: 1 } }\n */\nconst setObjProps = module.exports.setObjProps = (obj, source) => {\n for (let p in source) {\n if (typeof source[p] === 'object') setObjProps(obj[p], source[p]);\n else obj[p] = source[p];\n }\n}\n\n\n/**\n * Generates a new random opaqueId i.e. a 22-character base64 string.\n * Used for job and slice ids.\n */\nmodule.exports.generateOpaqueId = function utils$$generateOpaqueId()\n{\n if (!utils$$generateOpaqueId.nanoid)\n {\n const nanoidModule = (DCP_ENV.platform === 'nodejs') ? requireNative('nanoid') : __webpack_require__(/*! nanoid */ \"./node_modules/nanoid/index.browser.js\");\n utils$$generateOpaqueId.nanoid = nanoidModule.customAlphabet('abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789+', schedulerConstants.workerIdLength);\n }\n\n return utils$$generateOpaqueId.nanoid();\n}\n\n/**\n * Accepts an object and a list of properties to apply to the object to retreive a final value.\n * E.g.\n * object = { a: { b: { c: 4 } } }\n * listOfProperties = [\"a\", \"b\", \"c\"]\n * return = 4\n */\nmodule.exports.getNestedFromListOfProperties = function (object, listOfProperties) {\n return listOfProperties.reduce((o, k) => {\n if (typeof o !== 'object') return undefined;\n return o[k];\n }, object);\n}\n\n/**\n * Accepts an object and a dot-separated property name to retrieve from the object.\n * E.g.\n * object = { a: { b: { c: 4 } } }\n * property = 'a.b.c'\n * return = 4\n *\n * @param {object} object \n * @param {string} property \n */\nmodule.exports.getNestedProperty = function (object, property) {\n if (typeof property !== 'string') return undefined;\n return module.exports.getNestedFromListOfProperties(object, property.split(\".\"));\n}\n\n/**\n * Accepts an object and a list of properties and a value and mutates the object\n * to have the specified value at the location denoted by the list of properties.\n * Similar to getNestedFromListOfProperties, but sets instead of gets.\n */\nmodule.exports.setNestedFromListOfProperties = function (object, listOfProperties, value) {\n if(!listOfProperties.length || listOfProperties.length < 1) {\n throw new Error(\"listOfProperties must be an array of length >= 1\");\n }\n const indexOfLastProp = listOfProperties.length - 1;\n const pathToParent = listOfProperties.slice(0, indexOfLastProp);\n const parent = module.exports.getNestedFromListOfProperties(object, pathToParent);\n if(!parent) {\n throw new Error(\"Could not find value at:\", pathToParent, \"in object:\", object);\n }\n const lastProperty = listOfProperties[indexOfLastProp];\n parent[lastProperty] = value;\n}\n\n/**\n * Block the event loop for a specified time\n * @milliseconds the number of milliseconds to wait (integer)\n */\nexports.msleep = function utils$$msleep(milliseconds) {\n try\n {\n let sab = new SharedArrayBuffer(4);\n let int32 = new Int32Array(sab);\n Atomics.wait(int32, 0, 0, milliseconds);\n }\n catch(error)\n {\n console.error('Cannot msleep;', error);\n }\n}\n\n/**\n * Block the event loop for a specified time\n * @seconds the number of seconds to wait (float)\n */\nexports.sleep = function utils$$sleep(seconds) {\n return exports.msleep(seconds * 1000);\n}\n\n/** \n * Resolve a promise after a specified time.\n * \n * @param {number} ms the number of milliseconds after which to resolve the promise.\n * @returns Promise with an extra property, intr(). Calling this function causes the promise\n * to resolve immediately. If the promise was interrupted, it will resolve with false;\n * otherwise, true.\n */\nexports.a$sleepMs = function a$sleepMs(ms)\n{\n var interrupt;\n \n const ret = new Promise((resolve, reject) => {\n var resolved = false;\n const timerHnd = setTimeout(() => { resolved=true; resolve(false) }, ms);\n function a$sleepMs_intr()\n {\n clearTimeout(timerHnd);\n if (!resolved)\n resolve(true);\n }\n \n interrupt = a$sleepMs_intr;\n });\n\n ret.intr = () => interrupt();\n return ret;\n}\n\n/** \n * @see: a$sleepMs\n * @param {number} ms the number of milliseconds after which to resolve the promise.\n */\nexports.a$sleep = function a$sleep(seconds) {\n return exports.a$sleepMs(seconds * 1000);\n}\n\n/** \n * Returns the number of millisecond in a time expression.\n * @param s {number} The number of seconds\n * @returns {number}\n */\n/**\n * @param s {string} A complex time expression using m, w, d, s, h. '10d 6h 1s' means 10 days, 6 hours, and 1 second.\n */\nexports.ms = function utils$$ms(s)\n{\n let ms = 0;\n \n if (typeof s === 'number')\n return s * 1000;\n\n assert(typeof s === 'string');\n \n for (let expr of s.match(/[0-9.]+[smhdw]/g))\n {\n let unit = expr.slice(-1);\n let value = +expr.slice(0, -1);\n\n switch(unit)\n {\n case 's': {\n ms += value * 1000;\n break;\n }\n case 'm': {\n ms += value * 1000 * 60;\n break;\n }\n case 'h': {\n ms += value * 1000 * 60 * 60;\n break;\n }\n case 'd': {\n ms = value * 1000 * 60 * 60 * 24;\n break;\n }\n case 'w': {\n ms = value * 1000 * 60 * 60 * 24 * 7;\n break;\n }\n default: {\n throw new Error(`invalid time unit ${unit}`);\n }\n }\n }\n\n return ms;\n}\n\n/**\n * Returns a percentage as a number.\n * @param n {number} this number is returned\n */\n/**\n * @param n {string} this string is converted to a number and returned; it is divided by 100 if it ends in %.\n */\nexports.pct = function utils$$pct(n)\n{\n if (typeof n === 'number')\n return n / 100;\n\n if (n.match(/%$/))\n return +n / 100;\n\n return +n;\n}\n\n/**\n * Coerce human-written or registry-provided values into Boolean in a sensisble way.\n */\nexports.booley = function utils$$booley(check)\n{\n switch (typeof check)\n {\n case 'undefined':\n return false;\n case 'string':\n return check && (check !== 'false');\n case 'boolean':\n return check;\n case 'number':\n case 'object':\n return Boolean(check);\n default:\n throw new Error(`can't coerce ${typeof check} to booley`);\n }\n}\n\ntry {\n exports.useChalk = requireNative ? requireNative('tty').isatty(0) || process.env.FORCE_COLOR : false;\n}\ncatch (error) {\n if (error.message.includes('no native require'))\n exports.useChalk = false;\n else\n throw error;\n}\n\n/** \n * Factory function which constructs an error message relating to version mismatches\n * @param {string} oldThingLabel The name of the thing that is too old\n * @param {string} upgradeThingLabel [optional] The name of the thing that needs to be upgraded to fix that; if\n * unspecified, we use oldThingLabel.\n * @param {string} newThingLabel What the thing is that is that is newer than oldThingLabel\n * @param {string} minimumVersion The minimum version of the old thing that the new thing supports. \n * Obstensibibly semver, but unparsed.\n * @param {string} code [optional] A code property to add to the return value\n *\n * @returns instance of Error\n */\nexports.versionError = function dcpClient$$versionError(oldThingLabel, upgradeThingLabel, newThingLabel, minimumVersion, code)\n{\n function repeat(what, len) {\n let out = '';\n while (len--)\n out += what;\n return out;\n }\n\n function bold(string) {\n if ((__webpack_require__(/*! dcp/common/dcp-env */ \"./src/common/dcp-env.js\").platform) === 'nodejs') {\n const chalk = new requireNative('chalk').constructor({enabled: exports.useChalk});\n\n return chalk.yellow(string);\n }\n\n return string;\n }\n \n var mainMessage = `${oldThingLabel} is too old; ${newThingLabel} needs version ${minimumVersion}`;\n var upgradeMessage = bold(`Please upgrade ${upgradeThingLabel || oldThingLabel}${repeat(\" \", mainMessage.length - 15 - upgradeThingLabel.length)}`);\n var error = new Error(`\n****${repeat('*', mainMessage.length)}****\n*** ${repeat(' ', mainMessage.length)} ***\n*** ${mainMessage} ***\n*** ${upgradeMessage} ***\n*** ${repeat(' ', mainMessage.length)} ***\n****${repeat('*', mainMessage.length)}****\n`);\n\n if (code)\n error.code = code;\n return error;\n}\n\n/**\n * Perf Timers:\n * shortTime\n * class PerfTimers\n *********************************************************************************************/\n\n/**\n * Previous short form perf time stamp.\n * @private\n * @type {Date}\n */\nlet previousTime = new Date();\n\n/**\n * Short form perf time format with timespan diff from last call.\n * @param {boolean} [displayDiffOnly=true]\n * @returns {string}\n */\nexports.shortTime = function util$$shortTime(displayDiffOnly=true) {\n const currentTime = new Date();\n const diff = currentTime.getTime() - previousTime.getTime();\n previousTime = currentTime;\n if (displayDiffOnly) return `diff=${diff}`;\n return `diff=${diff}: ${currentTime.getMinutes()}.${currentTime.getSeconds()}.${currentTime.getMilliseconds()}`;\n}\n\n/**\n * By default returns\n * functionName:lineNumber: message\n * When (!message || !message.length), returns\n * functionName:lineNumber\n *\n * When shortForm is true, returns\n * lineNumber: message\n *\n * @param {string} [message]\n * @param {boolean} [shortForm=false]\n * @returns {string}\n */\nexports.debugLine = function debugLine(message, depth = 2, shortForm = false) {\n const e = new Error();\n const frame = e.stack.split(\"\\n\")[depth];\n const lineNumber = frame.split(\":\").reverse()[1];\n const funcName = frame.split(\" \")[5];\n const header = shortForm ? 'line#' : funcName;\n const headerAndLine = `${header}:${lineNumber}`;\n if (!message || !message.length) return headerAndLine;\n return headerAndLine + \" \" + message;\n}\n\n/**\n * class Tracer: Method mark, displays time span since the last mark.\n *\n * When perf testing the first step is to place a bunch of timer entries\n * in the code where the time difference from the last entry is displayed\n * This class Tracer is used as follows:\n * const tracer = new Tracer(tag);\n * ...............................\n * tracer.mark(); // displays dt1: lineNumber: tag -- where dt1 is time since constructor was called\n * ...............................\n * tracer.mark(); // displays dt2: lineNumber: tag -- where dt2 is time since previous mark\n * ...............................\n * tracer.mark(); // displays dt3: lineNumber: tag -- where dt3 is time since previous mark\n *\n * There are many variations possible as can be seen by the comment on the mark method.\n */\nclass Tracer {\n /**\n * \n * @param {boolean} [shortForm=true]\n * @param {string} [tag]\n */\n constructor(shortForm = true, tag) {\n this.shortForm = shortForm;\n this.tag = tag;\n this.previousTime = new Date();\n }\n /**\n * Let diff be the time span since the last call to mark or the Tracer constructor.\n * Let lineNumber be the line # of the file containing the call to mark.\n * Let line# be the string \"line#\"\n * Let funcName be the name of the function where mark was called.\n * When tag.length > 0,\n * displays: diff: line#:lineNumber: tag:message\n * or: diff: line#:lineNumber: tag when !message\n * or: diff: funcName:lineNumber tag:message when !shortForm\n * or: diff: funcName:lineNumber tag when !shortForm and !message\n * else\n * displays: diff: line#:lineNumber: message\n * or: diff: line#lineNumber: when !message\n * or: diff: funcName:lineNumber message when !shortForm\n * or: diff: funcName:lineNumber when !shortForm and !message\n * @param {string} [message]\n */\n mark(message) {\n const currentTime = new Date();\n const diff = currentTime.getTime() - this.previousTime.getTime();\n this.previousTime = currentTime;\n let tag = this.tag;\n if (tag && tag.length && message) tag = `${tag}:${message}`;\n else if (message) tag = message;\n console.log(`${diff}: ${exports.debugLine(tag, 3 /* depth*/, this.shortForm)}`);\n }\n}\nexports.Tracer = Tracer;\n\n/**\n* class PerfTimer: Construct a set of possibly overlapping timers.\n*\n* Example:\n* const perfTimers = new PerfTimers(['timer0', 'timer1', 'timer2']);\n* perfTimers.start('timer0');\n* perfTimers.start('timer1');\n* .........................\n* perfTimers.stop('timer1');\n* .........................\n* perfTimers.stop('timer0');\n* .........................\n* perfTimers.start('timer2');\n* ..........................\n* perfTimers.stop('timer2');\n* perfTimers.sum(name => name !== 'timer1');\n*\n* Should display something like:\n* timer1: 472ms\n* timer0: 1650ms\n* timer2: 2333ms\n* The sum of timers [timer0, timer2] is 3983ms\n*/\nclass PerfTimers {\n /**\n * @constructor\n * @param {string[]} perfTimerNames\n * @param {boolean} [disable=false]]\n * @param {boolean} [summaryView=false]]\n */\n constructor (perfTimerNames, disable = false, summaryView = false) {\n this.disable = disable;\n if (this.disable) return;\n assert(Array.isArray(perfTimerNames));\n /** @type {string[]} */\n this.perfTimerNames = perfTimerNames;\n /** @type {boolean} */\n this.summaryView = summaryView;\n /** @type {number[]} */\n this.perfTimers = Array.from({ length: perfTimerNames.length }, (v, i) => 0);\n /** @type {object} */\n this.perfTimerMap = {};\n for (let k = 0; k < this.perfTimerNames.length; k++)\n this.perfTimerMap[this.perfTimerNames[k]] = k;\n }\n /**\n * Start a timer on perfTimerName\n * @param {string} perfTimerName\n */\n start(perfTimerName) {\n if (this.disable) return;\n const slot = this.perfTimerMap[perfTimerName];\n if (slot === undefined) throw new Error(`PerfDebugging: perfTimer '${perfTimerName}' not found.`);\n this.perfTimers[slot] = Date.now();\n }\n /**\n * Stop a timer on perfTimerName and display the result.\n * @param {string} perfTimerName\n */\n stop(perfTimerName) {\n if (this.disable) return;\n const slot = this.perfTimerMap[perfTimerName];\n if (slot === undefined) throw new Error(`PerfDebugging: perfTimer '${perfTimerName}' not found.`);\n this.perfTimers[slot] = Date.now() - this.perfTimers[slot];\n if (!this.summaryView) console.log(`${perfTimerName}: ${this.perfTimers[slot]}ms`);\n }\n /** \n * @callback PredicateCallback\n * @param {string} value\n * @param {number} index\n * @param {string[]} array\n */\n /**\n * Use predicate to choose a subset of this.perfTimerNames,\n * sum up the corresponding timers and display the result.\n * @param {PredicateCallback} predicate\n * @param {boolean} [forceDisplay=false]\n */\n sum(predicate, forceDisplay = false) {\n if (this.disable || this.summaryView && !forceDisplay) return;\n const names = this.perfTimerNames.filter(predicate);\n const timers = this.perfTimers.map(k => this.perfTimerMap(names[k]));\n const sum = timers.reduce((previous, current) => previous + current, 0);\n console.log(`The sum of timers ${JSON.stringify(names)} is ${sum}ms`);\n }\n /**\n * Display summary.\n * @param {PredicateCallback} predicate\n * @param {boolean} [forceDisplay=false]\n */\n summary(predicate, forceDisplay = false) {\n if (this.disable) return;\n if (this.summaryView || forceDisplay) {\n for (let k = 0; k < this.perfTimers.length; k++)\n console.log(`${this.perfTimerNames[k]}: ${this.perfTimers[k]}ms`);\n }\n if (predicate) this.sum(predicate);\n }\n}\nexports.PerfTimers = PerfTimers;\n\n/**\n * Sandbox and Slice debugging and logging tools.\n * dumpSlices -- dump array of slices\n * dumpSandboxes -- dump array of sandboxes\n * dumpSlicesIfNotUnique -- dump array of slices when there are dups\n * dumpSandboxesIfNotUnique -- dump array of sandboxes when there are dups\n * isUniqueSlices -- detect dups in an array of slices\n * isUniqueSandboxes -- detect dups in an array of sandboxes\n *********************************************************************************************/\n\n/**\n * Log sliceArray.\n * @param {Slice[]} sliceArray\n * @param {string} [header]\n */\nexports.dumpSlices = function utils$$dumpSlices(sliceArray, header) {\n if (header) console.log(`\\n${header}`);\n console.log(exports.compressSlices(sliceArray));\n}\n\n/**\n * Log sandboxArray.\n * @param {Sandbox[]} sandboxArray\n * @param {string} [header]\n */\nexports.dumpSandboxes = function utils$$dumpSandboxes(sandboxArray, header) {\n if (header) console.log(`\\n${header}`);\n console.log(exports.compressSandboxes(sandboxArray));\n}\n\n/**\n * If the elements of sliceArray are not unique, log the duplicates and log the full array.\n * @param {Slice[]} sliceArray\n * @param {string} [header]\n */\nexports.dumpSlicesIfNotUnique = function utils$$dumpSlicesIfNotUnique(sliceArray, header) {\n if (!exports.isUniqueSlices(sliceArray, header))\n console.log(exports.compressSlices(sliceArray));\n}\n\n/**\n * If the elements of sandboxArray are not unique, log the duplicates and log the full array.\n * @param {Sandbox[]} sandboxArray\n * @param {string} [header]\n */\nexports.dumpSandboxesIfNotUnique = function utils$$dumpSandboxesIfNotUnique(sandboxArray, header) {\n if (!exports.isUniqueSandboxes(sandboxArray, header))\n console.log(exports.compressSandboxes(sandboxArray));\n}\n\n/**\n * Checks whether the elements of sliceArray are unique and if not, log the duplicates.\n * @param {Slice[]} sliceArray\n * @param {string} [header]\n * @param {function} [log]\n * @returns {boolean}\n */\nexports.isUniqueSlices = function utils$$isUniqueSlices(sliceArray, header, log) {\n const slices = [];\n let once = true;\n sliceArray.forEach(x => {\n if (slices.indexOf(x) >= 0) {\n if (once && header) console.log(`\\n${header}`); once = false;\n log ? log(x) : console.log(`\\tFound duplicate slice ${x.identifier}.`);\n } else slices.push(x);\n });\n return sliceArray.length === slices.length;\n}\n\n/**\n * Checks whether the elements of sandboxArray are unique and if not, log the duplicates.\n * @param {Sandbox[]} sandboxArray\n * @param {string} [header]\n * @param {function} [log]\n * @returns {boolean}\n */\nexports.isUniqueSandboxes = function utils$$isUniqueSandboxes(sandboxArray, header, log) {\n const sandboxes = [];\n let once = true;\n sandboxArray.forEach(x => {\n if (sandboxes.indexOf(x) >= 0) {\n if (once && header) console.log(`\\n${header}`); once = false;\n log ? log(x) : console.log(`\\tFound duplicate sandbox ${x.identifier}.`);\n } else sandboxes.push(x);\n });\n return sandboxArray.length === sandboxes.length;\n}\n\n/**\n * JSON Serialization\n * stringify -- ignore cycles\n * dumpJSON -- safely wrapped stringify with header\n * dumpObject -- Apply dumpJSON to every [key, value] of Object.entries(theObject)\n *********************************************************************************************/\n\n/**\n * Quck and dirty JSON serialization that ignores cycles.\n *\n * @param {*} theAnything - entity to be serialized.\n * @param {number} [truncationLength=512] - number of string elements to return.\n * @returns {string}\n */\nexports.stringify = function _stringify(theAnything, truncationLength = 512) {\n let cache = [];\n const str = JSON.stringify(theAnything, (key, value) => {\n if (typeof value === 'object' && value !== null) {\n if (cache.includes(value)) return;\n cache.push(value);\n }\n return value;\n });\n cache = null;\n return str ? str.slice(0, truncationLength) : null;\n}\n\n/**\n * Calls truncated JSON.stringify on theAnything.\n * @param {*} theAnything\n * @param {string} [header='dumpJSON']\n * @param {number} [truncationLength=512]\n */\nexports.dumpJSON = function utils$$dumpJSON(theAnything, header = 'dumpJSON: ', truncationLength = 512) {\n if (theAnything) {\n const strV = exports.stringify(theAnything, truncationLength);\n if (strV) console.log(`${header}: ${String(strV)}`);\n else console.log(`${header}:`, theAnything);\n } else console.log(`${header}:`, theAnything);\n}\n\n/**\n * Iterates over all property [key, value]-pairs of theObject and call truncated JSON.stringify on property values.\n * @param {object} theObject\n * @param {string} [header='dumpObject']\n * @param {number} [truncationLength=512]\n */\nexports.dumpObject = function utils$$dumpObject(theObject, header = 'dumpObject: ', truncationLength = 512, dumpKeys = true) {\n if (theObject) {\n if (dumpKeys) console.log(`${header}: dump the keys`, Object.keys(theObject));\n console.log(`${header}: dump the key-value entries...`);\n console.group();\n for (const [key, value] of Object.entries(theObject))\n exports.dumpJSON(value, `${header}.${key}`, truncationLength);\n console.groupEnd();\n }\n}\n\n/**\n * Compressed Representations and Maps\n * toJobMap\n * compressSandboxes\n * compressSlices\n * compressJobMap\n * compressJobArray\n * compressJobValue\n * compressJobEntry\n * compressEnhancedJobEntry\n * truncateAddress\n * compressRange\n * compressEnhancedRange\n *********************************************************************************************/\n\n/**\n * @param {object[]} jobArray\n * @param {function} functor\n * @returns {object}\n */\nexports.toJobMap = function utils$$toJobMap(jobArray, functor) {\n const jobMap = {};\n for (const x of jobArray) {\n if (!jobMap[x.jobAddress]) jobMap[x.jobAddress] = [functor(x)];\n else jobMap[x.jobAddress].push(functor(x));\n }\n return jobMap;\n}\n\n/**\n * @param {Sandbox[]} sandboxArray\n * @param {number} [digits=-1]\n * @returns {string}\n */\nexports.compressSandboxes = function utils$$compressSandboxes(sandboxArray, digits = -1) {\n const jobSandboxMap = exports.toJobMap(sandboxArray, sbx => sbx.id);\n return exports.compressJobMap(jobSandboxMap, false /* skipFirst*/, digits);\n}\n\n/**\n * @param {Slice[]} sliceArray\n * @param {number} [digits=-1]\n * @returns {string}\n */\nexports.compressSlices = function utils$$compressSlices(sliceArray, digits = -1) {\n const jobSliceMap = exports.toJobMap(sliceArray, slice => slice.sliceNumber);\n return exports.compressJobMap(jobSliceMap, false /* skipFirst*/, digits);\n}\n\n/**\n * @param {object} jobMap\n * @param {boolean} [skipFirst=false]\n * @param {number} [digits=-1]\n * @returns {string}\n */\nexports.compressJobMap = function utils$$compressJobMap(jobMap, skipFirst = false, digits = -1) {\n return exports.compressJobArray(Object.entries(jobMap), skipFirst, digits);\n}\n\n/**\n * @param {object[]} jobArray\n * @param {boolean} [skipFirst=false]\n * @param {number} [digits=-1]\n * @returns {string}\n */\nexports.compressJobArray = function utils$$compressJobArray(jobArray, skipFirst = false, digits = -1) {\n let output = '';\n for (let k = 0; k < jobArray.length; k++) {\n output += exports.compressJobValue(jobArray[k], skipFirst, digits);\n }\n return output;\n}\n\n/**\n * @param {object|object[]} jobValue\n * @param {boolean} [skipFirst=false]\n * @param {number} [digits=-1]\n * @returns {string}\n */\nexports.compressJobValue = function utils$$compressJobValue(jobValue, skipFirst = false, digits = -1) {\n if (jobValue.job && jobValue.slices)\n return exports.compressJobEntry(jobValue.job, (skipFirst ? jobValue.slices.slice(1) : jobValue.slices), digits);\n if (jobValue.length === 2)\n return exports.compressJobEntry(jobValue[0], (skipFirst ? jobValue[1].slice(1) : jobValue[1]), digits);\n return 'nada';\n}\n\n/**\n * @param {*} jobAddress\n * @param {Array<number|Number>} sliceNumbers\n * @param {number} [digits=-1]\n * @returns {string}\n */\nexports.compressJobEntry = function utils$$compressJobEntry(jobAddress, sliceNumbers, digits = -1) {\n return `${exports.truncateAddress(jobAddress, digits)}:[${exports.compressRange(sliceNumbers)}]:`;\n}\n\nexports.compressEnhancedJobEntry = function utils$$compressEnhancedJobEntry(job, slices, digits = -1) {\n return `${job.id}.${exports.truncateAddress(job.address, digits)}:[${exports.compressEnhancedRange(slices)}]:`;\n}\n\n/**\n * @param {*} address\n * @param {number} [digits=-1]\n * @returns {string}\n */\nexports.truncateAddress = function utils$$truncateAddress(address, digits = -1) {\n let value = address.toString();\n if (digits < 0) return value.startsWith('0x') ? value.substring(2) : fragment;\n return value.startsWith('0x') ? value.substring(2, 2 + digits) : value.substring(0, digits);\n}\n\n/**\n * Input [2, 3, 4, 7, 5, 8, 9, 13, 14, 15] returns '2-5,7-9,13-15'\n * Input [2, 3, 4, 7, 5, 8, 9, 13] returns '2-5,7-9,13'\n * Input [2, 3, 4, 7, 5, 4, 4, 8, 9] returns '2-4,4,4-5,7-9'\n * @param {Array<number|Number>} numberArray\n * @returns {string}\n */\nexports.compressRange = function utils$$compressRange(numberArray) {\n assert(numberArray && Array.isArray(numberArray));\n numberArray.sort((x, y) => x - y); // increasing...\n let start = numberArray[0];\n let output = `${start}`;\n for (let k = 1; k < numberArray.length; k++) {\n assert(typeof numberArray[k] === 'number' || numberArray[k] && numberArray[k].constructor.name === 'Number');\n if (numberArray[k] - numberArray[k - 1] !== 1) {\n output += (numberArray[k - 1] > start) ? `-${numberArray[k - 1]},` : ',';\n start = numberArray[k];\n output += `${start}`;\n } else if (k === numberArray.length - 1) {\n output += `-${numberArray[k]}`;\n }\n }\n return output;\n}\n\nexports.compressEnhancedRange = function utils$$compressEnhancedRange(slices) {\n assert(slices && Array.isArray(slices));\n slices.sort((x, y) => x.sliceNumber - y.sliceNumber); // increasing...\n let start = slices[0];\n let output = fragment(start);\n for (let k = 1; k < slices.length; k++) {\n if (slices[k].sliceNumber - slices[k - 1].sliceNumber !== 1) {\n output += (slices[k - 1].sliceNumber > start.sliceNumber) ? `-${fragment(slices[k - 1])},` : ',';\n start = slices[k];\n output += `${fragment(start)}`;\n } else if (k === slices.length - 1) {\n output += `-${fragment(slices[k])}`;\n }\n }\n return output;\n}\n\nfunction fragment(slice) {\n return `${slice.sliceNumber}.${slice.isEstimationSlice}.${slice.isLongSlice}`;\n}\n\n/**\n * shuffle\n * hashGeneration\n *********************************************************************************************/\n\nexports.shuffle = function utils$$shuffle(jobDescriptors, partitionPortions) {\n let jobDescriptorsInEstimation = [];\n let partitionPortionsInEstimation = [];\n for (let [index, jobDescriptor] of jobDescriptors.entries()) {\n if(jobDescriptor.job.status === schedulerConstants.jobStatus.estimation) {\n jobDescriptorsInEstimation.push(jobDescriptor);\n jobDescriptors.splice(index, 1);\n partitionPortionsInEstimation.push(partitionPortions[index]);\n partitionPortions.splice(index, 1) \n }\n }\n \n let currentIndex = jobDescriptors.length, randomIndex;\n // While there remain elements to shuffle...\n while (0 !== currentIndex && jobDescriptors.length > 0) {\n\n // Pick a remaining element...\n randomIndex = Math.floor(Math.random() * currentIndex);\n currentIndex--;\n\n // And swap it with the current element.\n [jobDescriptors[currentIndex], jobDescriptors[randomIndex]] = [\n jobDescriptors[randomIndex], jobDescriptors[currentIndex]];\n\n [partitionPortions[currentIndex], partitionPortions[randomIndex]] = [\n partitionPortions[randomIndex], partitionPortions[currentIndex]];\n }\n \n return [\n [...jobDescriptorsInEstimation, ...jobDescriptors],\n [...partitionPortionsInEstimation, ...partitionPortions]\n ]\n}\n\nexports.hashGeneration = function utils$$hashGeneration(object) {\n const { sha256 } = (__webpack_require__(/*! dcp/dcp-client/wallet/keystore */ \"./src/dcp-client/wallet/keystore.js\")._internalEth.util);\n return sha256(Buffer.from(JSON.stringify(object)));\n}\n\n/**\n * CG service error handling types and helpers.\n *********************************************************************************************/\n\n/**\n * @typedef {object} apiClientType\n * @property {boolean} success\n * @property {*} [payload]\n * @property {DCPError} [error]\n */\n\n/**\n * @typedef {object} apiServiceType\n * @property {boolean} success\n * @property {*} [payload]\n * @property {string} [message]\n * @property {string} [stack]\n * @property {string} [code]\n */\n\n/**\n * @param {apiServiceType} payload \n * @returns {apiClientType}\n */\nexports.reconstructServiceError = function utils$$reconstructServiceError(payload) {\n assert(!payload.success);\n let ex = new DCPError(payload.message);\n ex.stack = dcpConfig.worker.allowConsoleAccess ? payload.stack : '';\n if (payload.code) ex.code = payload.code;\n return exports._clientError(ex);\n}\n\n/**\n * @param {DCPError} ex\n * @returns {apiClientType}\n */\nexports._clientError = function utils$$_clientError(ex) {\n if (dcpConfig.worker.allowConsoleAccess) console.error(ex);\n return { success: false, error: ex };\n}\n/**\n * @param {string} message\n * @returns {apiClientType}\n */\nexports.clientError = function utils$$clientError(message) {\n const ex = new DCPError(message);\n return exports._clientError(ex);\n}\n\n/**\n * @param {DCPError} ex\n * @returns {apiServiceType}\n */\nexports._serviceError = function utils$$_serviceError(ex) {\n return { success: false, message: ex.message, stack: ex.stack, code: ex.code };\n}\n/**\n * @param {string} message\n * @param {string|object} codeEx\n * @returns {apiServiceType}\n */\nexports.serviceError = function utils$$serviceError(message, codeEx) {\n const ex = new DCPError(message, codeEx);\n return exports._serviceError(ex);\n}\n\n/**\n * exports\n *********************************************************************************************/\n\nif ((__webpack_require__(/*! dcp/common/dcp-env */ \"./src/common/dcp-env.js\").platform) === 'nodejs') {\n // Assigning the properties explicitly for VSCode intellisense.\n const tmpFiles = __webpack_require__(/*! ./tmpfiles */ \"./src/utils/tmpfiles.js\");\n exports.catFile = tmpFiles.catFile;\n exports.createTempFile = tmpFiles.createTempFile;\n exports.systemTempDir = tmpFiles.systemTempDir;\n\n Object.assign(exports, __webpack_require__(/*! ./readln */ \"./src/utils/readln.js\"));\n}\nObject.assign(exports, __webpack_require__(/*! ./sh */ \"./src/utils/sh.js\"));\nObject.assign(exports, __webpack_require__(/*! ./eventUtils */ \"./src/utils/eventUtils.js\"));\nObject.assign(exports, __webpack_require__(/*! ./obj-merge */ \"./src/utils/obj-merge.js\"));\nObject.assign(exports, __webpack_require__(/*! ./make-data-uri */ \"./src/utils/make-data-uri.js\"));\nObject.assign(exports, __webpack_require__(/*! ./just-fetch */ \"./src/utils/just-fetch.js\"));\nObject.assign(exports, __webpack_require__(/*! ./inventory */ \"./src/utils/inventory.js\"));\nObject.assign(exports, __webpack_require__(/*! ./fetch-keystore */ \"./src/utils/fetch-keystore.js\"));\n\nmodule.exports.fetchURI = __webpack_require__(/*! ./fetch-uri */ \"./src/utils/fetch-uri.js\").fetchURI;\nmodule.exports.encodeDataURI = __webpack_require__(/*! ./encodeDataURI */ \"./src/utils/encodeDataURI.js\").encodeDataURI;\n\n\n//# sourceURL=webpack://dcp/./src/utils/index.js?");
4804
4804
 
4805
4805
  /***/ }),
4806
4806