@promptbook/remote-server 0.92.0-3 → 0.92.0-31
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/esm/index.es.js +999 -298
- package/esm/index.es.js.map +1 -1
- package/esm/typings/src/_packages/browser.index.d.ts +2 -0
- package/esm/typings/src/_packages/core.index.d.ts +22 -6
- package/esm/typings/src/_packages/deepseek.index.d.ts +2 -0
- package/esm/typings/src/_packages/google.index.d.ts +2 -0
- package/esm/typings/src/_packages/types.index.d.ts +4 -2
- package/esm/typings/src/_packages/utils.index.d.ts +2 -0
- package/esm/typings/src/cli/common/$provideLlmToolsForCli.d.ts +1 -1
- package/esm/typings/src/collection/PipelineCollection.d.ts +0 -2
- package/esm/typings/src/collection/SimplePipelineCollection.d.ts +1 -1
- package/esm/typings/src/commands/FOREACH/ForeachJson.d.ts +6 -6
- package/esm/typings/src/commands/FOREACH/foreachCommandParser.d.ts +0 -2
- package/esm/typings/src/commands/FORMFACTOR/formfactorCommandParser.d.ts +1 -1
- package/esm/typings/src/commands/_BOILERPLATE/boilerplateCommandParser.d.ts +1 -1
- package/esm/typings/src/commands/_common/types/CommandParser.d.ts +36 -28
- package/esm/typings/src/config.d.ts +41 -11
- package/esm/typings/src/constants.d.ts +43 -2
- package/esm/typings/src/conversion/archive/loadArchive.d.ts +2 -2
- package/esm/typings/src/errors/0-BoilerplateError.d.ts +2 -2
- package/esm/typings/src/executables/$provideExecutablesForNode.d.ts +1 -1
- package/esm/typings/src/executables/apps/locateLibreoffice.d.ts +2 -1
- package/esm/typings/src/executables/apps/locatePandoc.d.ts +2 -1
- package/esm/typings/src/executables/locateApp.d.ts +2 -2
- package/esm/typings/src/executables/platforms/locateAppOnLinux.d.ts +2 -1
- package/esm/typings/src/executables/platforms/locateAppOnMacOs.d.ts +2 -1
- package/esm/typings/src/executables/platforms/locateAppOnWindows.d.ts +2 -1
- package/esm/typings/src/execution/AbstractTaskResult.d.ts +1 -1
- package/esm/typings/src/execution/CommonToolsOptions.d.ts +5 -1
- package/esm/typings/src/execution/LlmExecutionToolsConstructor.d.ts +2 -1
- package/esm/typings/src/execution/PipelineExecutorResult.d.ts +4 -2
- package/esm/typings/src/execution/createPipelineExecutor/$OngoingTaskResult.d.ts +12 -9
- package/esm/typings/src/execution/createPipelineExecutor/10-executePipeline.d.ts +13 -10
- package/esm/typings/src/execution/createPipelineExecutor/20-executeTask.d.ts +12 -9
- package/esm/typings/src/execution/createPipelineExecutor/30-executeFormatSubvalues.d.ts +15 -3
- package/esm/typings/src/execution/createPipelineExecutor/40-executeAttempts.d.ts +20 -14
- package/esm/typings/src/execution/createPipelineExecutor/computeCosineSimilarity.d.ts +13 -0
- package/esm/typings/src/execution/createPipelineExecutor/filterJustOutputParameters.d.ts +7 -6
- package/esm/typings/src/execution/createPipelineExecutor/getContextForTask.d.ts +5 -1
- package/esm/typings/src/execution/createPipelineExecutor/getExamplesForTask.d.ts +1 -1
- package/esm/typings/src/execution/createPipelineExecutor/getKnowledgeForTask.d.ts +21 -5
- package/esm/typings/src/execution/createPipelineExecutor/getReservedParametersForTask.d.ts +19 -5
- package/esm/typings/src/execution/createPipelineExecutor/knowledgePiecesToString.d.ts +9 -0
- package/esm/typings/src/execution/translation/automatic-translate/automatic-translators/LindatAutomaticTranslator.d.ts +4 -4
- package/esm/typings/src/execution/utils/checkExpectations.d.ts +1 -1
- package/esm/typings/src/execution/utils/uncertainNumber.d.ts +3 -2
- package/esm/typings/src/formats/_common/{FormatDefinition.d.ts → FormatParser.d.ts} +8 -6
- package/esm/typings/src/formats/_common/FormatSubvalueParser.d.ts +66 -0
- package/esm/typings/src/formats/csv/CsvFormatParser.d.ts +17 -0
- package/esm/typings/src/formats/csv/CsvSettings.d.ts +2 -2
- package/esm/typings/src/formats/csv/utils/csvParse.d.ts +12 -0
- package/esm/typings/src/formats/csv/utils/isValidCsvString.d.ts +1 -1
- package/esm/typings/src/formats/index.d.ts +2 -2
- package/esm/typings/src/formats/json/{JsonFormatDefinition.d.ts → JsonFormatParser.d.ts} +6 -6
- package/esm/typings/src/formats/json/utils/isValidJsonString.d.ts +1 -1
- package/esm/typings/src/formats/json/utils/jsonParse.d.ts +8 -0
- package/esm/typings/src/formats/text/{TextFormatDefinition.d.ts → TextFormatParser.d.ts} +7 -7
- package/esm/typings/src/formats/xml/XmlFormatParser.d.ts +19 -0
- package/esm/typings/src/formats/xml/utils/isValidXmlString.d.ts +1 -1
- package/esm/typings/src/formfactors/_boilerplate/BoilerplateFormfactorDefinition.d.ts +3 -2
- package/esm/typings/src/formfactors/_common/AbstractFormfactorDefinition.d.ts +16 -7
- package/esm/typings/src/formfactors/_common/FormfactorDefinition.d.ts +3 -1
- package/esm/typings/src/formfactors/_common/string_formfactor_name.d.ts +2 -1
- package/esm/typings/src/formfactors/chatbot/ChatbotFormfactorDefinition.d.ts +2 -2
- package/esm/typings/src/formfactors/completion/CompletionFormfactorDefinition.d.ts +29 -0
- package/esm/typings/src/formfactors/generator/GeneratorFormfactorDefinition.d.ts +2 -1
- package/esm/typings/src/formfactors/generic/GenericFormfactorDefinition.d.ts +2 -2
- package/esm/typings/src/formfactors/index.d.ts +33 -8
- package/esm/typings/src/formfactors/matcher/MatcherFormfactorDefinition.d.ts +4 -2
- package/esm/typings/src/formfactors/sheets/SheetsFormfactorDefinition.d.ts +3 -2
- package/esm/typings/src/formfactors/translator/TranslatorFormfactorDefinition.d.ts +3 -2
- package/esm/typings/src/high-level-abstractions/index.d.ts +2 -2
- package/esm/typings/src/llm-providers/_common/register/$llmToolsMetadataRegister.d.ts +3 -3
- package/esm/typings/src/llm-providers/_common/register/$llmToolsRegister.d.ts +3 -3
- package/esm/typings/src/llm-providers/_common/register/$provideLlmToolsConfigurationFromEnv.d.ts +4 -4
- package/esm/typings/src/llm-providers/_common/register/$provideLlmToolsForTestingAndScriptsAndPlayground.d.ts +4 -3
- package/esm/typings/src/llm-providers/_common/register/$provideLlmToolsFromEnv.d.ts +17 -4
- package/esm/typings/src/llm-providers/_common/register/LlmToolsConfiguration.d.ts +11 -4
- package/esm/typings/src/llm-providers/_common/register/LlmToolsMetadata.d.ts +27 -5
- package/esm/typings/src/llm-providers/_common/register/LlmToolsOptions.d.ts +9 -2
- package/esm/typings/src/llm-providers/_common/register/createLlmToolsFromConfiguration.d.ts +12 -3
- package/esm/typings/src/llm-providers/_common/utils/cache/CacheItem.d.ts +10 -5
- package/esm/typings/src/llm-providers/_common/utils/cache/CacheLlmToolsOptions.d.ts +5 -3
- package/esm/typings/src/llm-providers/_common/utils/cache/cacheLlmTools.d.ts +3 -3
- package/esm/typings/src/llm-providers/_common/utils/count-total-usage/limitTotalUsage.d.ts +5 -5
- package/esm/typings/src/llm-providers/anthropic-claude/anthropic-claude-models.d.ts +1 -1
- package/esm/typings/src/llm-providers/azure-openai/AzureOpenAiExecutionTools.d.ts +4 -0
- package/esm/typings/src/llm-providers/deepseek/deepseek-models.d.ts +23 -0
- package/esm/typings/src/llm-providers/google/google-models.d.ts +23 -0
- package/esm/typings/src/llm-providers/openai/OpenAiExecutionTools.d.ts +4 -0
- package/esm/typings/src/llm-providers/openai/openai-models.d.ts +1 -1
- package/esm/typings/src/llm-providers/openai/register-configuration.d.ts +2 -2
- package/esm/typings/src/llm-providers/openai/register-constructor.d.ts +2 -2
- package/esm/typings/src/migrations/migratePipeline.d.ts +9 -0
- package/esm/typings/src/other/templates/getBookTemplates.d.ts +2 -2
- package/esm/typings/src/personas/preparePersona.d.ts +1 -1
- package/esm/typings/src/pipeline/PipelineInterface/PipelineInterface.d.ts +3 -3
- package/esm/typings/src/pipeline/PipelineInterface/constants.d.ts +1 -1
- package/esm/typings/src/pipeline/PipelineInterface/getPipelineInterface.d.ts +1 -1
- package/esm/typings/src/pipeline/PipelineInterface/isPipelineImplementingInterface.d.ts +5 -4
- package/esm/typings/src/pipeline/PipelineInterface/isPipelineInterfacesEqual.d.ts +1 -1
- package/esm/typings/src/pipeline/PipelineJson/CommonTaskJson.d.ts +9 -6
- package/esm/typings/src/pipeline/PipelineJson/PersonaJson.d.ts +4 -2
- package/esm/typings/src/pipeline/PipelineJson/PipelineJson.d.ts +3 -2
- package/esm/typings/src/pipeline/PipelineString.d.ts +3 -1
- package/esm/typings/src/pipeline/book-notation.d.ts +2 -2
- package/esm/typings/src/postprocessing/utils/extractJsonBlock.d.ts +1 -1
- package/esm/typings/src/prepare/prepareTasks.d.ts +7 -4
- package/esm/typings/src/remote-server/openapi-types.d.ts +348 -6
- package/esm/typings/src/remote-server/openapi.d.ts +398 -4
- package/esm/typings/src/remote-server/types/RemoteServerOptions.d.ts +2 -1
- package/esm/typings/src/scrapers/_boilerplate/BoilerplateScraper.d.ts +3 -3
- package/esm/typings/src/scrapers/_boilerplate/createBoilerplateScraper.d.ts +1 -1
- package/esm/typings/src/scrapers/_boilerplate/register-metadata.d.ts +1 -1
- package/esm/typings/src/scrapers/_common/Converter.d.ts +3 -1
- package/esm/typings/src/scrapers/_common/Scraper.d.ts +4 -3
- package/esm/typings/src/scrapers/_common/ScraperIntermediateSource.d.ts +4 -2
- package/esm/typings/src/scrapers/_common/register/$provideFilesystemForNode.d.ts +2 -1
- package/esm/typings/src/scrapers/_common/register/$provideScrapersForBrowser.d.ts +6 -3
- package/esm/typings/src/scrapers/_common/register/$provideScrapersForNode.d.ts +3 -5
- package/esm/typings/src/scrapers/_common/register/$scrapersMetadataRegister.d.ts +3 -3
- package/esm/typings/src/scrapers/_common/register/$scrapersRegister.d.ts +3 -2
- package/esm/typings/src/scrapers/_common/register/ScraperAndConverterMetadata.d.ts +8 -5
- package/esm/typings/src/scrapers/_common/register/ScraperConstructor.d.ts +2 -1
- package/esm/typings/src/scrapers/_common/utils/getScraperIntermediateSource.d.ts +6 -5
- package/esm/typings/src/scrapers/_common/utils/makeKnowledgeSourceHandler.d.ts +3 -1
- package/esm/typings/src/scrapers/document/createDocumentScraper.d.ts +1 -1
- package/esm/typings/src/scrapers/document-legacy/createLegacyDocumentScraper.d.ts +2 -1
- package/esm/typings/src/scrapers/markdown/createMarkdownScraper.d.ts +4 -1
- package/esm/typings/src/scrapers/markitdown/MarkitdownScraper.d.ts +1 -1
- package/esm/typings/src/scrapers/pdf/createPdfScraper.d.ts +2 -1
- package/esm/typings/src/scrapers/website/createWebsiteScraper.d.ts +3 -4
- package/esm/typings/src/scripting/javascript/postprocessing-functions.d.ts +5 -1
- package/esm/typings/src/storage/file-cache-storage/FileCacheStorage.d.ts +12 -5
- package/esm/typings/src/storage/file-cache-storage/FileCacheStorageOptions.d.ts +4 -2
- package/esm/typings/src/storage/file-cache-storage/utils/nameToSubfolderPath.d.ts +2 -1
- package/esm/typings/src/storage/local-storage/getIndexedDbStorage.d.ts +10 -0
- package/esm/typings/src/storage/local-storage/utils/makePromptbookStorageFromIndexedDb.d.ts +7 -0
- package/esm/typings/src/storage/local-storage/utils/makePromptbookStorageFromWebStorage.d.ts +2 -1
- package/esm/typings/src/types/IntermediateFilesStrategy.d.ts +2 -1
- package/esm/typings/src/types/ModelVariant.d.ts +5 -5
- package/esm/typings/src/types/typeAliases.d.ts +17 -13
- package/esm/typings/src/utils/$Register.d.ts +8 -7
- package/esm/typings/src/utils/editable/edit-pipeline-string/addPipelineCommand.d.ts +2 -2
- package/esm/typings/src/utils/editable/edit-pipeline-string/deflatePipeline.d.ts +4 -1
- package/esm/typings/src/utils/editable/utils/isFlatPipeline.d.ts +2 -1
- package/esm/typings/src/utils/environment/$getGlobalScope.d.ts +2 -1
- package/esm/typings/src/utils/expectation-counters/index.d.ts +1 -1
- package/esm/typings/src/utils/markdown/extractAllListItemsFromMarkdown.d.ts +1 -1
- package/esm/typings/src/utils/normalization/nameToUriPart.d.ts +4 -4
- package/esm/typings/src/utils/normalization/nameToUriParts.d.ts +4 -4
- package/esm/typings/src/utils/normalization/normalize-to-kebab-case.d.ts +3 -3
- package/esm/typings/src/utils/normalization/normalizeTo_SCREAMING_CASE.d.ts +3 -3
- package/esm/typings/src/utils/normalization/normalizeTo_camelCase.d.ts +4 -4
- package/esm/typings/src/utils/normalization/normalizeTo_snake_case.d.ts +3 -3
- package/esm/typings/src/utils/normalization/removeDiacritics.d.ts +3 -3
- package/esm/typings/src/utils/normalization/searchKeywords.d.ts +4 -1
- package/esm/typings/src/utils/normalization/titleToName.d.ts +4 -4
- package/esm/typings/src/utils/organization/empty_object.d.ts +2 -2
- package/esm/typings/src/utils/organization/just_empty_object.d.ts +4 -4
- package/esm/typings/src/utils/parameters/mapAvailableToExpectedParameters.d.ts +7 -7
- package/esm/typings/src/utils/serialization/clonePipeline.d.ts +4 -3
- package/esm/typings/src/utils/serialization/deepClone.d.ts +5 -1
- package/esm/typings/src/utils/validators/javascriptName/isValidJavascriptName.d.ts +3 -3
- package/esm/typings/src/utils/validators/parameterName/validateParameterName.d.ts +5 -4
- package/esm/typings/src/version.d.ts +2 -1
- package/package.json +2 -2
- package/umd/index.umd.js +1004 -303
- package/umd/index.umd.js.map +1 -1
- package/esm/typings/src/formats/_common/FormatSubvalueDefinition.d.ts +0 -31
- package/esm/typings/src/formats/csv/CsvFormatDefinition.d.ts +0 -17
- package/esm/typings/src/formats/xml/XmlFormatDefinition.d.ts +0 -19
package/umd/index.umd.js
CHANGED
|
@@ -1,8 +1,8 @@
|
|
|
1
1
|
(function (global, factory) {
|
|
2
|
-
typeof exports === 'object' && typeof module !== 'undefined' ? factory(exports, require('colors'), require('express'), require('
|
|
3
|
-
typeof define === 'function' && define.amd ? define(['exports', 'colors', 'express', 'http', 'socket.io', 'spacetrim', '
|
|
4
|
-
(global = typeof globalThis !== 'undefined' ? globalThis : global || self, factory(global["promptbook-remote-server"] = {}, global.colors, global.express, global.
|
|
5
|
-
})(this, (function (exports, colors, express, http, socket_io, spaceTrim,
|
|
2
|
+
typeof exports === 'object' && typeof module !== 'undefined' ? factory(exports, require('colors'), require('express'), require('express-openapi-validator'), require('http'), require('socket.io'), require('spacetrim'), require('swagger-ui-express'), require('waitasecond'), require('crypto'), require('child_process'), require('fs/promises'), require('path'), require('rxjs'), require('prettier'), require('prettier/parser-html'), require('crypto-js/enc-hex'), require('crypto-js/sha256'), require('crypto-js'), require('mime-types'), require('papaparse')) :
|
|
3
|
+
typeof define === 'function' && define.amd ? define(['exports', 'colors', 'express', 'express-openapi-validator', 'http', 'socket.io', 'spacetrim', 'swagger-ui-express', 'waitasecond', 'crypto', 'child_process', 'fs/promises', 'path', 'rxjs', 'prettier', 'prettier/parser-html', 'crypto-js/enc-hex', 'crypto-js/sha256', 'crypto-js', 'mime-types', 'papaparse'], factory) :
|
|
4
|
+
(global = typeof globalThis !== 'undefined' ? globalThis : global || self, factory(global["promptbook-remote-server"] = {}, global.colors, global.express, global.OpenApiValidator, global.http, global.socket_io, global.spaceTrim, global.swaggerUi, global.waitasecond, global.crypto, global.child_process, global.promises, global.path, global.rxjs, global.prettier, global.parserHtml, global.hexEncoder, global.sha256, global.cryptoJs, global.mimeTypes, global.papaparse));
|
|
5
|
+
})(this, (function (exports, colors, express, OpenApiValidator, http, socket_io, spaceTrim, swaggerUi, waitasecond, crypto, child_process, promises, path, rxjs, prettier, parserHtml, hexEncoder, sha256, cryptoJs, mimeTypes, papaparse) { 'use strict';
|
|
6
6
|
|
|
7
7
|
function _interopDefaultLegacy (e) { return e && typeof e === 'object' && 'default' in e ? e : { 'default': e }; }
|
|
8
8
|
|
|
@@ -26,9 +26,9 @@
|
|
|
26
26
|
|
|
27
27
|
var colors__default = /*#__PURE__*/_interopDefaultLegacy(colors);
|
|
28
28
|
var express__default = /*#__PURE__*/_interopDefaultLegacy(express);
|
|
29
|
+
var OpenApiValidator__namespace = /*#__PURE__*/_interopNamespace(OpenApiValidator);
|
|
29
30
|
var http__default = /*#__PURE__*/_interopDefaultLegacy(http);
|
|
30
31
|
var spaceTrim__default = /*#__PURE__*/_interopDefaultLegacy(spaceTrim);
|
|
31
|
-
var OpenApiValidator__namespace = /*#__PURE__*/_interopNamespace(OpenApiValidator);
|
|
32
32
|
var swaggerUi__default = /*#__PURE__*/_interopDefaultLegacy(swaggerUi);
|
|
33
33
|
var parserHtml__default = /*#__PURE__*/_interopDefaultLegacy(parserHtml);
|
|
34
34
|
var hexEncoder__default = /*#__PURE__*/_interopDefaultLegacy(hexEncoder);
|
|
@@ -48,7 +48,7 @@
|
|
|
48
48
|
* @generated
|
|
49
49
|
* @see https://github.com/webgptorg/promptbook
|
|
50
50
|
*/
|
|
51
|
-
const PROMPTBOOK_ENGINE_VERSION = '0.92.0-
|
|
51
|
+
const PROMPTBOOK_ENGINE_VERSION = '0.92.0-31';
|
|
52
52
|
/**
|
|
53
53
|
* TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
|
|
54
54
|
* Note: [💞] Ignore a discrepancy between file name and entity name
|
|
@@ -115,6 +115,21 @@
|
|
|
115
115
|
* @public exported from `@promptbook/core`
|
|
116
116
|
*/
|
|
117
117
|
const DEFAULT_MAX_FILE_SIZE = 100 * 1024 * 1024; // 100MB
|
|
118
|
+
/**
|
|
119
|
+
* Threshold value that determines when a dataset is considered "big"
|
|
120
|
+
* and may require special handling or optimizations
|
|
121
|
+
*
|
|
122
|
+
* For example, when error occurs in one item of the big dataset, it will not fail the whole pipeline
|
|
123
|
+
*
|
|
124
|
+
* @public exported from `@promptbook/core`
|
|
125
|
+
*/
|
|
126
|
+
const BIG_DATASET_TRESHOLD = 50;
|
|
127
|
+
/**
|
|
128
|
+
* Placeholder text used to represent a placeholder value of failed operation
|
|
129
|
+
*
|
|
130
|
+
* @public exported from `@promptbook/core`
|
|
131
|
+
*/
|
|
132
|
+
const FAILED_VALUE_PLACEHOLDER = '!?';
|
|
118
133
|
// <- TODO: [🧠] Better system for generator warnings - not always "code" and "by `@promptbook/cli`"
|
|
119
134
|
/**
|
|
120
135
|
* The maximum number of iterations for a loops
|
|
@@ -194,7 +209,7 @@
|
|
|
194
209
|
const DEFAULT_SCRAPE_CACHE_DIRNAME = './.promptbook/scrape-cache';
|
|
195
210
|
// <- TODO: [🧜♂️]
|
|
196
211
|
/**
|
|
197
|
-
*
|
|
212
|
+
* Default settings for parsing and generating CSV files in Promptbook.
|
|
198
213
|
*
|
|
199
214
|
* @public exported from `@promptbook/core`
|
|
200
215
|
*/
|
|
@@ -205,19 +220,19 @@
|
|
|
205
220
|
skipEmptyLines: true,
|
|
206
221
|
});
|
|
207
222
|
/**
|
|
208
|
-
*
|
|
223
|
+
* Controls whether verbose logging is enabled by default throughout the application.
|
|
209
224
|
*
|
|
210
225
|
* @public exported from `@promptbook/core`
|
|
211
226
|
*/
|
|
212
227
|
let DEFAULT_IS_VERBOSE = false;
|
|
213
228
|
/**
|
|
214
|
-
*
|
|
229
|
+
* Controls whether auto-installation of dependencies is enabled by default.
|
|
215
230
|
*
|
|
216
231
|
* @public exported from `@promptbook/core`
|
|
217
232
|
*/
|
|
218
233
|
const DEFAULT_IS_AUTO_INSTALLED = false;
|
|
219
234
|
/**
|
|
220
|
-
*
|
|
235
|
+
* Indicates whether pipeline logic validation is enabled. When true, the pipeline logic is checked for consistency.
|
|
221
236
|
*
|
|
222
237
|
* @private within the repository
|
|
223
238
|
*/
|
|
@@ -872,7 +887,8 @@
|
|
|
872
887
|
*/
|
|
873
888
|
|
|
874
889
|
/**
|
|
875
|
-
*
|
|
890
|
+
* Attempts to locate the specified application on a Linux system using the 'which' command.
|
|
891
|
+
* Returns the path to the executable if found, or null otherwise.
|
|
876
892
|
*
|
|
877
893
|
* @private within the repository
|
|
878
894
|
*/
|
|
@@ -892,7 +908,8 @@
|
|
|
892
908
|
*/
|
|
893
909
|
|
|
894
910
|
/**
|
|
895
|
-
*
|
|
911
|
+
* Provides filesystem access (for example for Node.js-based scrapers)
|
|
912
|
+
* Creates a standardized filesystem interface that scrapers can use for file operations.
|
|
896
913
|
*
|
|
897
914
|
* @public exported from `@promptbook/node`
|
|
898
915
|
*/
|
|
@@ -938,7 +955,8 @@
|
|
|
938
955
|
// eslint-disable-next-line @typescript-eslint/no-var-requires
|
|
939
956
|
const userhome = require('userhome');
|
|
940
957
|
/**
|
|
941
|
-
*
|
|
958
|
+
* Attempts to locate the specified application on a macOS system by checking standard application paths and using mdfind.
|
|
959
|
+
* Returns the path to the executable if found, or null otherwise.
|
|
942
960
|
*
|
|
943
961
|
* @private within the repository
|
|
944
962
|
*/
|
|
@@ -970,7 +988,8 @@
|
|
|
970
988
|
*/
|
|
971
989
|
|
|
972
990
|
/**
|
|
973
|
-
*
|
|
991
|
+
* Attempts to locate the specified application on a Windows system by searching common installation directories.
|
|
992
|
+
* Returns the path to the executable if found, or null otherwise.
|
|
974
993
|
*
|
|
975
994
|
* @private within the repository
|
|
976
995
|
*/
|
|
@@ -1041,7 +1060,8 @@
|
|
|
1041
1060
|
*/
|
|
1042
1061
|
|
|
1043
1062
|
/**
|
|
1044
|
-
*
|
|
1063
|
+
* Locates the LibreOffice executable on the current system by searching platform-specific paths.
|
|
1064
|
+
* Returns the path to the executable if found, or null otherwise.
|
|
1045
1065
|
*
|
|
1046
1066
|
* @private within the repository
|
|
1047
1067
|
*/
|
|
@@ -1059,7 +1079,8 @@
|
|
|
1059
1079
|
*/
|
|
1060
1080
|
|
|
1061
1081
|
/**
|
|
1062
|
-
*
|
|
1082
|
+
* Locates the Pandoc executable on the current system by searching platform-specific paths.
|
|
1083
|
+
* Returns the path to the executable if found, or null otherwise.
|
|
1063
1084
|
*
|
|
1064
1085
|
* @private within the repository
|
|
1065
1086
|
*/
|
|
@@ -1077,7 +1098,7 @@
|
|
|
1077
1098
|
*/
|
|
1078
1099
|
|
|
1079
1100
|
/**
|
|
1080
|
-
*
|
|
1101
|
+
* Provides paths to required executables (i.e. as Pandoc and LibreOffice) for Node.js environments.
|
|
1081
1102
|
*
|
|
1082
1103
|
* @public exported from `@promptbook/node`
|
|
1083
1104
|
*/
|
|
@@ -1277,8 +1298,12 @@
|
|
|
1277
1298
|
*/
|
|
1278
1299
|
|
|
1279
1300
|
/**
|
|
1280
|
-
*
|
|
1301
|
+
* Creates a deep clone of the given object
|
|
1302
|
+
*
|
|
1303
|
+
* Note: This method only works for objects that are fully serializable to JSON and do not contain functions, Dates, or special types.
|
|
1281
1304
|
*
|
|
1305
|
+
* @param objectValue The object to clone.
|
|
1306
|
+
* @returns A deep, writable clone of the input object.
|
|
1282
1307
|
* @public exported from `@promptbook/utils`
|
|
1283
1308
|
*/
|
|
1284
1309
|
function deepClone(objectValue) {
|
|
@@ -1360,13 +1385,13 @@
|
|
|
1360
1385
|
*/
|
|
1361
1386
|
const REPLACING_NONCE = 'ptbkauk42kV2dzao34faw7FudQUHYPtW';
|
|
1362
1387
|
/**
|
|
1363
|
-
*
|
|
1388
|
+
* Placeholder value indicating a parameter is missing its value.
|
|
1364
1389
|
*
|
|
1365
1390
|
* @private within the repository
|
|
1366
1391
|
*/
|
|
1367
1392
|
const RESERVED_PARAMETER_MISSING_VALUE = 'MISSING-' + REPLACING_NONCE;
|
|
1368
1393
|
/**
|
|
1369
|
-
*
|
|
1394
|
+
* Placeholder value indicating a parameter is restricted and cannot be used directly.
|
|
1370
1395
|
*
|
|
1371
1396
|
* @private within the repository
|
|
1372
1397
|
*/
|
|
@@ -1797,15 +1822,21 @@
|
|
|
1797
1822
|
* @public exported from `@promptbook/core`
|
|
1798
1823
|
*/
|
|
1799
1824
|
function isPipelinePrepared(pipeline) {
|
|
1800
|
-
// Note: Ignoring `pipeline.preparations`
|
|
1801
|
-
// Note: Ignoring `pipeline.knowledgePieces`
|
|
1825
|
+
// Note: Ignoring `pipeline.preparations`
|
|
1826
|
+
// Note: Ignoring `pipeline.knowledgePieces`
|
|
1802
1827
|
if (pipeline.title === undefined || pipeline.title === '' || pipeline.title === DEFAULT_BOOK_TITLE) {
|
|
1828
|
+
// TODO: !!! Comment this out
|
|
1829
|
+
console.log('Pipeline is not prepared because title is undefined or empty', pipeline);
|
|
1803
1830
|
return false;
|
|
1804
1831
|
}
|
|
1805
|
-
if (!pipeline.personas.every((persona) => persona.
|
|
1832
|
+
if (!pipeline.personas.every((persona) => persona.modelsRequirements !== undefined)) {
|
|
1833
|
+
// TODO: !!! Comment this out
|
|
1834
|
+
console.log('Pipeline is not prepared because personas are not prepared', pipeline.personas);
|
|
1806
1835
|
return false;
|
|
1807
1836
|
}
|
|
1808
1837
|
if (!pipeline.knowledgeSources.every((knowledgeSource) => knowledgeSource.preparationIds !== undefined)) {
|
|
1838
|
+
// TODO: !!! Comment this out
|
|
1839
|
+
console.log('Pipeline is not prepared because knowledge sources are not prepared', pipeline.knowledgeSources);
|
|
1809
1840
|
return false;
|
|
1810
1841
|
}
|
|
1811
1842
|
/*
|
|
@@ -1830,7 +1861,7 @@
|
|
|
1830
1861
|
* Function isValidJsonString will tell you if the string is valid JSON or not
|
|
1831
1862
|
*
|
|
1832
1863
|
* @param value The string to check
|
|
1833
|
-
* @returns
|
|
1864
|
+
* @returns `true` if the string is a valid JSON string, false otherwise
|
|
1834
1865
|
*
|
|
1835
1866
|
* @public exported from `@promptbook/utils`
|
|
1836
1867
|
*/
|
|
@@ -1848,6 +1879,42 @@
|
|
|
1848
1879
|
}
|
|
1849
1880
|
}
|
|
1850
1881
|
|
|
1882
|
+
/**
|
|
1883
|
+
* Converts a JavaScript Object Notation (JSON) string into an object.
|
|
1884
|
+
*
|
|
1885
|
+
* Note: This is wrapper around `JSON.parse()` with better error and type handling
|
|
1886
|
+
*
|
|
1887
|
+
* @public exported from `@promptbook/utils`
|
|
1888
|
+
*/
|
|
1889
|
+
function jsonParse(value) {
|
|
1890
|
+
if (value === undefined) {
|
|
1891
|
+
throw new Error(`Can not parse JSON from undefined value.`);
|
|
1892
|
+
}
|
|
1893
|
+
else if (typeof value !== 'string') {
|
|
1894
|
+
console.error('Can not parse JSON from non-string value.', { text: value });
|
|
1895
|
+
throw new Error(spaceTrim__default["default"](`
|
|
1896
|
+
Can not parse JSON from non-string value.
|
|
1897
|
+
|
|
1898
|
+
The value type: ${typeof value}
|
|
1899
|
+
See more in console.
|
|
1900
|
+
`));
|
|
1901
|
+
}
|
|
1902
|
+
try {
|
|
1903
|
+
return JSON.parse(value);
|
|
1904
|
+
}
|
|
1905
|
+
catch (error) {
|
|
1906
|
+
if (!(error instanceof Error)) {
|
|
1907
|
+
throw error;
|
|
1908
|
+
}
|
|
1909
|
+
throw new Error(spaceTrim__default["default"]((block) => `
|
|
1910
|
+
${block(error.message)}
|
|
1911
|
+
|
|
1912
|
+
The JSON text:
|
|
1913
|
+
${block(value)}
|
|
1914
|
+
`));
|
|
1915
|
+
}
|
|
1916
|
+
}
|
|
1917
|
+
|
|
1851
1918
|
/**
|
|
1852
1919
|
* Recursively converts JSON strings to JSON objects
|
|
1853
1920
|
|
|
@@ -1866,7 +1933,7 @@
|
|
|
1866
1933
|
const newObject = { ...object };
|
|
1867
1934
|
for (const [key, value] of Object.entries(object)) {
|
|
1868
1935
|
if (typeof value === 'string' && isValidJsonString(value)) {
|
|
1869
|
-
newObject[key] =
|
|
1936
|
+
newObject[key] = jsonParse(value);
|
|
1870
1937
|
}
|
|
1871
1938
|
else {
|
|
1872
1939
|
newObject[key] = jsonStringsToJsons(value);
|
|
@@ -2043,7 +2110,75 @@
|
|
|
2043
2110
|
* TODO: [🐚] Split into more files and make `PrepareTask` & `RemoteTask` + split the function
|
|
2044
2111
|
*/
|
|
2045
2112
|
|
|
2046
|
-
var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book",formfactorName:"GENERIC",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",resultingParameterName:"knowledgePieces",dependentParameterNames:["knowledgeContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge from Markdown\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book`\n- INPUT PARAMETER `{knowledgeContent}` Markdown document content\n- OUTPUT PARAMETER `{knowledgePieces}` The knowledge JSON object\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}\n```\n\n`-> {knowledgePieces}`\n"}],sourceFile:"./books/prepare-knowledge-from-markdown.book"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.book",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",resultingParameterName:"keywords",dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Keywords\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-keywords.book`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{keywords}` Keywords separated by comma\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}\n```\n\n`-> {keywords}`\n"}],sourceFile:"./books/prepare-knowledge-keywords.book"},{title:"Prepare Knowledge-piece Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.book",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",resultingParameterName:"title",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge-piece Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-title.book`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{title}` The title of the document\n\n## Knowledge\n\n- EXPECT MIN 1 WORD\n- EXPECT MAX 8 WORDS\n\n```markdown\nYou are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-knowledge-title.book"},{title:"Prepare Persona",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.book",formfactorName:"GENERIC",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-model-requirements",title:"Make modelRequirements",content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Example\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n- Your output format is JSON object\n- Write just the JSON object, no other text should be present\n- It contains the following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",resultingParameterName:"modelRequirements",format:"JSON",dependentParameterNames:["availableModelNames","personaDescription"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Persona\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-persona.book`\n- INPUT PARAMETER `{availableModelNames}` List of available model names separated by comma (,)\n- INPUT PARAMETER `{personaDescription}` Description of the persona\n- OUTPUT PARAMETER `{modelRequirements}` Specific requirements for the model\n\n## Make modelRequirements\n\n- FORMAT JSON\n\n```markdown\nYou are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Example\n\n\\`\\`\\`json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n\\`\\`\\`\n\n## Instructions\n\n- Your output format is JSON object\n- Write just the JSON object, no other text should be present\n- It contains the following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}\n```\n\n`-> {modelRequirements}`\n"}],sourceFile:"./books/prepare-persona.book"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-title.book",formfactorName:"GENERIC",parameters:[{name:"book",description:"The book to prepare the title for",isInput:true,isOutput:false},{name:"title",description:"Best title for the book",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-title",title:"Make title",content:"Make best title for given text which describes the workflow:\n\n## Rules\n\n- Write just title, nothing else\n- Title should be concise and clear - Write maximum ideally 2 words, maximum 5 words\n- Title starts with emoticon\n- Title should not mention the input and output of the workflow but the main purpose of the workflow\n _For example, not \"✍ Convert Knowledge-piece to title\" but \"✍ Title\"_\n\n## The workflow\n\n> {book}",resultingParameterName:"title",expectations:{words:{min:1,max:8},lines:{min:1,max:1}},dependentParameterNames:["book"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-title.book`\n- INPUT PARAMETER `{book}` The book to prepare the title for\n- OUTPUT PARAMETER `{title}` Best title for the book\n\n## Make title\n\n- EXPECT MIN 1 Word\n- EXPECT MAX 8 Words\n- EXPECT EXACTLY 1 Line\n\n```markdown\nMake best title for given text which describes the workflow:\n\n## Rules\n\n- Write just title, nothing else\n- Title should be concise and clear - Write maximum ideally 2 words, maximum 5 words\n- Title starts with emoticon\n- Title should not mention the input and output of the workflow but the main purpose of the workflow\n _For example, not \"✍ Convert Knowledge-piece to title\" but \"✍ Title\"_\n\n## The workflow\n\n> {book}\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-title.book"}];
|
|
2113
|
+
/**
|
|
2114
|
+
* Represents the uncertain value
|
|
2115
|
+
*
|
|
2116
|
+
* @public exported from `@promptbook/core`
|
|
2117
|
+
*/
|
|
2118
|
+
const ZERO_VALUE = $deepFreeze({ value: 0 });
|
|
2119
|
+
/**
|
|
2120
|
+
* Represents the uncertain value
|
|
2121
|
+
*
|
|
2122
|
+
* @public exported from `@promptbook/core`
|
|
2123
|
+
*/
|
|
2124
|
+
const UNCERTAIN_ZERO_VALUE = $deepFreeze({ value: 0, isUncertain: true });
|
|
2125
|
+
/**
|
|
2126
|
+
* Represents the usage with no resources consumed
|
|
2127
|
+
*
|
|
2128
|
+
* @public exported from `@promptbook/core`
|
|
2129
|
+
*/
|
|
2130
|
+
const ZERO_USAGE = $deepFreeze({
|
|
2131
|
+
price: ZERO_VALUE,
|
|
2132
|
+
input: {
|
|
2133
|
+
tokensCount: ZERO_VALUE,
|
|
2134
|
+
charactersCount: ZERO_VALUE,
|
|
2135
|
+
wordsCount: ZERO_VALUE,
|
|
2136
|
+
sentencesCount: ZERO_VALUE,
|
|
2137
|
+
linesCount: ZERO_VALUE,
|
|
2138
|
+
paragraphsCount: ZERO_VALUE,
|
|
2139
|
+
pagesCount: ZERO_VALUE,
|
|
2140
|
+
},
|
|
2141
|
+
output: {
|
|
2142
|
+
tokensCount: ZERO_VALUE,
|
|
2143
|
+
charactersCount: ZERO_VALUE,
|
|
2144
|
+
wordsCount: ZERO_VALUE,
|
|
2145
|
+
sentencesCount: ZERO_VALUE,
|
|
2146
|
+
linesCount: ZERO_VALUE,
|
|
2147
|
+
paragraphsCount: ZERO_VALUE,
|
|
2148
|
+
pagesCount: ZERO_VALUE,
|
|
2149
|
+
},
|
|
2150
|
+
});
|
|
2151
|
+
/**
|
|
2152
|
+
* Represents the usage with unknown resources consumed
|
|
2153
|
+
*
|
|
2154
|
+
* @public exported from `@promptbook/core`
|
|
2155
|
+
*/
|
|
2156
|
+
const UNCERTAIN_USAGE = $deepFreeze({
|
|
2157
|
+
price: UNCERTAIN_ZERO_VALUE,
|
|
2158
|
+
input: {
|
|
2159
|
+
tokensCount: UNCERTAIN_ZERO_VALUE,
|
|
2160
|
+
charactersCount: UNCERTAIN_ZERO_VALUE,
|
|
2161
|
+
wordsCount: UNCERTAIN_ZERO_VALUE,
|
|
2162
|
+
sentencesCount: UNCERTAIN_ZERO_VALUE,
|
|
2163
|
+
linesCount: UNCERTAIN_ZERO_VALUE,
|
|
2164
|
+
paragraphsCount: UNCERTAIN_ZERO_VALUE,
|
|
2165
|
+
pagesCount: UNCERTAIN_ZERO_VALUE,
|
|
2166
|
+
},
|
|
2167
|
+
output: {
|
|
2168
|
+
tokensCount: UNCERTAIN_ZERO_VALUE,
|
|
2169
|
+
charactersCount: UNCERTAIN_ZERO_VALUE,
|
|
2170
|
+
wordsCount: UNCERTAIN_ZERO_VALUE,
|
|
2171
|
+
sentencesCount: UNCERTAIN_ZERO_VALUE,
|
|
2172
|
+
linesCount: UNCERTAIN_ZERO_VALUE,
|
|
2173
|
+
paragraphsCount: UNCERTAIN_ZERO_VALUE,
|
|
2174
|
+
pagesCount: UNCERTAIN_ZERO_VALUE,
|
|
2175
|
+
},
|
|
2176
|
+
});
|
|
2177
|
+
/**
|
|
2178
|
+
* Note: [💞] Ignore a discrepancy between file name and entity name
|
|
2179
|
+
*/
|
|
2180
|
+
|
|
2181
|
+
var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book",formfactorName:"GENERIC",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",resultingParameterName:"knowledgePieces",dependentParameterNames:["knowledgeContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge from Markdown\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book`\n- INPUT PARAMETER `{knowledgeContent}` Markdown document content\n- OUTPUT PARAMETER `{knowledgePieces}` The knowledge JSON object\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}\n```\n\n`-> {knowledgePieces}`\n"}],sourceFile:"./books/prepare-knowledge-from-markdown.book"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.book",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",resultingParameterName:"keywords",dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Keywords\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-keywords.book`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{keywords}` Keywords separated by comma\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}\n```\n\n`-> {keywords}`\n"}],sourceFile:"./books/prepare-knowledge-keywords.book"},{title:"Prepare Knowledge-piece Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.book",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",resultingParameterName:"title",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge-piece Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-title.book`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{title}` The title of the document\n\n## Knowledge\n\n- EXPECT MIN 1 WORD\n- EXPECT MAX 8 WORDS\n\n```markdown\nYou are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-knowledge-title.book"},{title:"Prepare Persona",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.book",formfactorName:"GENERIC",parameters:[{name:"availableModels",description:"List of available model names together with their descriptions as JSON",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelsRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-model-requirements",title:"Make modelRequirements",content:"You are an experienced AI engineer, you need to find the best models for virtual assistants:\n\n## Example\n\n```json\n[\n {\n \"modelName\": \"gpt-4o\",\n \"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n \"temperature\": 0.7\n },\n {\n \"modelName\": \"claude-3-5-sonnet\",\n \"systemMessage\": \"You are a friendly and knowledgeable chatbot.\",\n \"temperature\": 0.5\n }\n]\n```\n\n## Instructions\n\n- Your output format is JSON array\n- Sort best-fitting models first\n- Omit any models that are not suitable\n- Write just the JSON, no other text should be present\n- Array contain items with following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nHere are the available models:\n\n```json\n{availableModels}\n```\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",resultingParameterName:"modelsRequirements",format:"JSON",dependentParameterNames:["availableModels","personaDescription"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Persona\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-persona.book`\n- INPUT PARAMETER `{availableModels}` List of available model names together with their descriptions as JSON\n- INPUT PARAMETER `{personaDescription}` Description of the persona\n- OUTPUT PARAMETER `{modelsRequirements}` Specific requirements for the model\n\n## Make modelRequirements\n\n- FORMAT JSON\n\n```markdown\nYou are an experienced AI engineer, you need to find the best models for virtual assistants:\n\n## Example\n\n\\`\\`\\`json\n[\n {\n \"modelName\": \"gpt-4o\",\n \"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n \"temperature\": 0.7\n },\n {\n \"modelName\": \"claude-3-5-sonnet\",\n \"systemMessage\": \"You are a friendly and knowledgeable chatbot.\",\n \"temperature\": 0.5\n }\n]\n\\`\\`\\`\n\n## Instructions\n\n- Your output format is JSON array\n- Sort best-fitting models first\n- Omit any models that are not suitable\n- Write just the JSON, no other text should be present\n- Array contain items with following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nHere are the available models:\n\n\\`\\`\\`json\n{availableModels}\n\\`\\`\\`\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}\n```\n\n`-> {modelsRequirements}`\n"}],sourceFile:"./books/prepare-persona.book"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-title.book",formfactorName:"GENERIC",parameters:[{name:"book",description:"The book to prepare the title for",isInput:true,isOutput:false},{name:"title",description:"Best title for the book",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-title",title:"Make title",content:"Make best title for given text which describes the workflow:\n\n## Rules\n\n- Write just title, nothing else\n- Title should be concise and clear - Write maximum ideally 2 words, maximum 5 words\n- Title starts with emoticon\n- Title should not mention the input and output of the workflow but the main purpose of the workflow\n _For example, not \"✍ Convert Knowledge-piece to title\" but \"✍ Title\"_\n\n## The workflow\n\n> {book}",resultingParameterName:"title",expectations:{words:{min:1,max:8},lines:{min:1,max:1}},dependentParameterNames:["book"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-title.book`\n- INPUT PARAMETER `{book}` The book to prepare the title for\n- OUTPUT PARAMETER `{title}` Best title for the book\n\n## Make title\n\n- EXPECT MIN 1 Word\n- EXPECT MAX 8 Words\n- EXPECT EXACTLY 1 Line\n\n```markdown\nMake best title for given text which describes the workflow:\n\n## Rules\n\n- Write just title, nothing else\n- Title should be concise and clear - Write maximum ideally 2 words, maximum 5 words\n- Title starts with emoticon\n- Title should not mention the input and output of the workflow but the main purpose of the workflow\n _For example, not \"✍ Convert Knowledge-piece to title\" but \"✍ Title\"_\n\n## The workflow\n\n> {book}\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-title.book"}];
|
|
2047
2182
|
|
|
2048
2183
|
/**
|
|
2049
2184
|
* Checks if value is valid email
|
|
@@ -2346,7 +2481,7 @@
|
|
|
2346
2481
|
*/
|
|
2347
2482
|
function unpreparePipeline(pipeline) {
|
|
2348
2483
|
let { personas, knowledgeSources, tasks } = pipeline;
|
|
2349
|
-
personas = personas.map((persona) => ({ ...persona,
|
|
2484
|
+
personas = personas.map((persona) => ({ ...persona, modelsRequirements: undefined, preparationIds: undefined }));
|
|
2350
2485
|
knowledgeSources = knowledgeSources.map((knowledgeSource) => ({ ...knowledgeSource, preparationIds: undefined }));
|
|
2351
2486
|
tasks = tasks.map((task) => {
|
|
2352
2487
|
let { dependentParameterNames } = task;
|
|
@@ -2387,7 +2522,7 @@
|
|
|
2387
2522
|
/**
|
|
2388
2523
|
* Constructs a pipeline collection from pipelines
|
|
2389
2524
|
*
|
|
2390
|
-
* @param pipelines
|
|
2525
|
+
* @param pipelines Array of pipeline JSON objects to include in the collection
|
|
2391
2526
|
*
|
|
2392
2527
|
* Note: During the construction logic of all pipelines are validated
|
|
2393
2528
|
* Note: It is not recommended to use this constructor directly, use `createCollectionFromJson` *(or other variant)* instead
|
|
@@ -2520,74 +2655,6 @@
|
|
|
2520
2655
|
await Promise.all(tasks);
|
|
2521
2656
|
}
|
|
2522
2657
|
|
|
2523
|
-
/**
|
|
2524
|
-
* Represents the uncertain value
|
|
2525
|
-
*
|
|
2526
|
-
* @public exported from `@promptbook/core`
|
|
2527
|
-
*/
|
|
2528
|
-
const ZERO_VALUE = $deepFreeze({ value: 0 });
|
|
2529
|
-
/**
|
|
2530
|
-
* Represents the uncertain value
|
|
2531
|
-
*
|
|
2532
|
-
* @public exported from `@promptbook/core`
|
|
2533
|
-
*/
|
|
2534
|
-
const UNCERTAIN_ZERO_VALUE = $deepFreeze({ value: 0, isUncertain: true });
|
|
2535
|
-
/**
|
|
2536
|
-
* Represents the usage with no resources consumed
|
|
2537
|
-
*
|
|
2538
|
-
* @public exported from `@promptbook/core`
|
|
2539
|
-
*/
|
|
2540
|
-
const ZERO_USAGE = $deepFreeze({
|
|
2541
|
-
price: ZERO_VALUE,
|
|
2542
|
-
input: {
|
|
2543
|
-
tokensCount: ZERO_VALUE,
|
|
2544
|
-
charactersCount: ZERO_VALUE,
|
|
2545
|
-
wordsCount: ZERO_VALUE,
|
|
2546
|
-
sentencesCount: ZERO_VALUE,
|
|
2547
|
-
linesCount: ZERO_VALUE,
|
|
2548
|
-
paragraphsCount: ZERO_VALUE,
|
|
2549
|
-
pagesCount: ZERO_VALUE,
|
|
2550
|
-
},
|
|
2551
|
-
output: {
|
|
2552
|
-
tokensCount: ZERO_VALUE,
|
|
2553
|
-
charactersCount: ZERO_VALUE,
|
|
2554
|
-
wordsCount: ZERO_VALUE,
|
|
2555
|
-
sentencesCount: ZERO_VALUE,
|
|
2556
|
-
linesCount: ZERO_VALUE,
|
|
2557
|
-
paragraphsCount: ZERO_VALUE,
|
|
2558
|
-
pagesCount: ZERO_VALUE,
|
|
2559
|
-
},
|
|
2560
|
-
});
|
|
2561
|
-
/**
|
|
2562
|
-
* Represents the usage with unknown resources consumed
|
|
2563
|
-
*
|
|
2564
|
-
* @public exported from `@promptbook/core`
|
|
2565
|
-
*/
|
|
2566
|
-
$deepFreeze({
|
|
2567
|
-
price: UNCERTAIN_ZERO_VALUE,
|
|
2568
|
-
input: {
|
|
2569
|
-
tokensCount: UNCERTAIN_ZERO_VALUE,
|
|
2570
|
-
charactersCount: UNCERTAIN_ZERO_VALUE,
|
|
2571
|
-
wordsCount: UNCERTAIN_ZERO_VALUE,
|
|
2572
|
-
sentencesCount: UNCERTAIN_ZERO_VALUE,
|
|
2573
|
-
linesCount: UNCERTAIN_ZERO_VALUE,
|
|
2574
|
-
paragraphsCount: UNCERTAIN_ZERO_VALUE,
|
|
2575
|
-
pagesCount: UNCERTAIN_ZERO_VALUE,
|
|
2576
|
-
},
|
|
2577
|
-
output: {
|
|
2578
|
-
tokensCount: UNCERTAIN_ZERO_VALUE,
|
|
2579
|
-
charactersCount: UNCERTAIN_ZERO_VALUE,
|
|
2580
|
-
wordsCount: UNCERTAIN_ZERO_VALUE,
|
|
2581
|
-
sentencesCount: UNCERTAIN_ZERO_VALUE,
|
|
2582
|
-
linesCount: UNCERTAIN_ZERO_VALUE,
|
|
2583
|
-
paragraphsCount: UNCERTAIN_ZERO_VALUE,
|
|
2584
|
-
pagesCount: UNCERTAIN_ZERO_VALUE,
|
|
2585
|
-
},
|
|
2586
|
-
});
|
|
2587
|
-
/**
|
|
2588
|
-
* Note: [💞] Ignore a discrepancy between file name and entity name
|
|
2589
|
-
*/
|
|
2590
|
-
|
|
2591
2658
|
/**
|
|
2592
2659
|
* Function `addUsage` will add multiple usages into one
|
|
2593
2660
|
*
|
|
@@ -2934,27 +3001,48 @@
|
|
|
2934
3001
|
pipeline: await collection.getPipelineByUrl('https://promptbook.studio/promptbook/prepare-persona.book'),
|
|
2935
3002
|
tools,
|
|
2936
3003
|
});
|
|
2937
|
-
// TODO: [🚐] Make arrayable LLMs -> single LLM DRY
|
|
2938
3004
|
const _llms = arrayableToArray(tools.llm);
|
|
2939
3005
|
const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
|
|
2940
|
-
const availableModels = await llmTools.listModels()
|
|
2941
|
-
const availableModelNames = availableModels
|
|
3006
|
+
const availableModels = (await llmTools.listModels())
|
|
2942
3007
|
.filter(({ modelVariant }) => modelVariant === 'CHAT')
|
|
2943
|
-
.map(({ modelName }) =>
|
|
2944
|
-
|
|
2945
|
-
|
|
3008
|
+
.map(({ modelName, modelDescription }) => ({
|
|
3009
|
+
modelName,
|
|
3010
|
+
modelDescription,
|
|
3011
|
+
// <- Note: `modelTitle` and `modelVariant` is not relevant for this task
|
|
3012
|
+
}));
|
|
3013
|
+
const result = await preparePersonaExecutor({
|
|
3014
|
+
availableModels /* <- Note: Passing as JSON */,
|
|
3015
|
+
personaDescription,
|
|
3016
|
+
}).asPromise();
|
|
2946
3017
|
const { outputParameters } = result;
|
|
2947
|
-
const {
|
|
2948
|
-
|
|
3018
|
+
const { modelsRequirements: modelsRequirementsJson } = outputParameters;
|
|
3019
|
+
let modelsRequirementsUnchecked = jsonParse(modelsRequirementsJson);
|
|
2949
3020
|
if (isVerbose) {
|
|
2950
|
-
console.info(`PERSONA ${personaDescription}`,
|
|
3021
|
+
console.info(`PERSONA ${personaDescription}`, modelsRequirementsUnchecked);
|
|
2951
3022
|
}
|
|
2952
|
-
|
|
2953
|
-
|
|
3023
|
+
if (!Array.isArray(modelsRequirementsUnchecked)) {
|
|
3024
|
+
// <- TODO: Book should have syntax and system to enforce shape of JSON
|
|
3025
|
+
modelsRequirementsUnchecked = [modelsRequirementsUnchecked];
|
|
3026
|
+
/*
|
|
3027
|
+
throw new UnexpectedError(
|
|
3028
|
+
spaceTrim(
|
|
3029
|
+
(block) => `
|
|
3030
|
+
Invalid \`modelsRequirements\`:
|
|
3031
|
+
|
|
3032
|
+
\`\`\`json
|
|
3033
|
+
${block(JSON.stringify(modelsRequirementsUnchecked, null, 4))}
|
|
3034
|
+
\`\`\`
|
|
3035
|
+
`,
|
|
3036
|
+
),
|
|
3037
|
+
);
|
|
3038
|
+
*/
|
|
3039
|
+
}
|
|
3040
|
+
const modelsRequirements = modelsRequirementsUnchecked.map((modelRequirements) => ({
|
|
2954
3041
|
modelVariant: 'CHAT',
|
|
2955
|
-
|
|
2956
|
-
|
|
2957
|
-
|
|
3042
|
+
...modelRequirements,
|
|
3043
|
+
}));
|
|
3044
|
+
return {
|
|
3045
|
+
modelsRequirements,
|
|
2958
3046
|
};
|
|
2959
3047
|
}
|
|
2960
3048
|
/**
|
|
@@ -2965,7 +3053,8 @@
|
|
|
2965
3053
|
*/
|
|
2966
3054
|
|
|
2967
3055
|
/**
|
|
2968
|
-
*
|
|
3056
|
+
* Safely retrieves the global scope object (window in browser, global in Node.js)
|
|
3057
|
+
* regardless of the JavaScript environment in which the code is running
|
|
2969
3058
|
*
|
|
2970
3059
|
* Note: `$` is used to indicate that this function is not a pure function - it access global scope
|
|
2971
3060
|
*
|
|
@@ -2976,10 +3065,10 @@
|
|
|
2976
3065
|
}
|
|
2977
3066
|
|
|
2978
3067
|
/**
|
|
2979
|
-
*
|
|
3068
|
+
* Normalizes a text string to SCREAMING_CASE (all uppercase with underscores).
|
|
2980
3069
|
*
|
|
2981
|
-
* @param text
|
|
2982
|
-
* @returns
|
|
3070
|
+
* @param text The text string to be converted to SCREAMING_CASE format.
|
|
3071
|
+
* @returns The normalized text in SCREAMING_CASE format.
|
|
2983
3072
|
* @example 'HELLO_WORLD'
|
|
2984
3073
|
* @example 'I_LOVE_PROMPTBOOK'
|
|
2985
3074
|
* @public exported from `@promptbook/utils`
|
|
@@ -3031,10 +3120,10 @@
|
|
|
3031
3120
|
*/
|
|
3032
3121
|
|
|
3033
3122
|
/**
|
|
3034
|
-
*
|
|
3123
|
+
* Normalizes a text string to snake_case format.
|
|
3035
3124
|
*
|
|
3036
|
-
* @param text
|
|
3037
|
-
* @returns
|
|
3125
|
+
* @param text The text string to be converted to snake_case format.
|
|
3126
|
+
* @returns The normalized text in snake_case format.
|
|
3038
3127
|
* @example 'hello_world'
|
|
3039
3128
|
* @example 'i_love_promptbook'
|
|
3040
3129
|
* @public exported from `@promptbook/utils`
|
|
@@ -3044,11 +3133,11 @@
|
|
|
3044
3133
|
}
|
|
3045
3134
|
|
|
3046
3135
|
/**
|
|
3047
|
-
*
|
|
3136
|
+
* Global registry for storing and managing registered entities of a given type.
|
|
3048
3137
|
*
|
|
3049
3138
|
* Note: `$` is used to indicate that this function is not a pure function - it accesses and adds variables in global scope.
|
|
3050
3139
|
*
|
|
3051
|
-
* @private internal utility, exported are only
|
|
3140
|
+
* @private internal utility, exported are only singleton instances of this class
|
|
3052
3141
|
*/
|
|
3053
3142
|
class $Register {
|
|
3054
3143
|
constructor(registerName) {
|
|
@@ -3092,10 +3181,10 @@
|
|
|
3092
3181
|
}
|
|
3093
3182
|
|
|
3094
3183
|
/**
|
|
3095
|
-
*
|
|
3184
|
+
* Global registry for storing metadata about all available scrapers and converters.
|
|
3096
3185
|
*
|
|
3097
|
-
* Note: `$` is used to indicate that this interacts with the global scope
|
|
3098
|
-
* @singleton Only one instance of each register is created per build, but
|
|
3186
|
+
* Note: `$` is used to indicate that this interacts with the global scope.
|
|
3187
|
+
* @singleton Only one instance of each register is created per build, but there can be more in different contexts (e.g., tests).
|
|
3099
3188
|
* @public exported from `@promptbook/core`
|
|
3100
3189
|
*/
|
|
3101
3190
|
const $scrapersMetadataRegister = new $Register('scrapers_metadata');
|
|
@@ -3104,10 +3193,11 @@
|
|
|
3104
3193
|
*/
|
|
3105
3194
|
|
|
3106
3195
|
/**
|
|
3107
|
-
*
|
|
3196
|
+
* Registry for all available scrapers in the system.
|
|
3197
|
+
* Central point for registering and accessing different types of content scrapers.
|
|
3108
3198
|
*
|
|
3109
3199
|
* Note: `$` is used to indicate that this interacts with the global scope
|
|
3110
|
-
* @singleton Only one instance of each register is created per build, but
|
|
3200
|
+
* @singleton Only one instance of each register is created per build, but there can be more than one in different build modules
|
|
3111
3201
|
* @public exported from `@promptbook/core`
|
|
3112
3202
|
*/
|
|
3113
3203
|
const $scrapersRegister = new $Register('scraper_constructors');
|
|
@@ -3447,10 +3537,10 @@
|
|
|
3447
3537
|
*/
|
|
3448
3538
|
|
|
3449
3539
|
/**
|
|
3450
|
-
*
|
|
3540
|
+
* Removes diacritic marks (accents) from characters in a string.
|
|
3451
3541
|
*
|
|
3452
|
-
* @param input
|
|
3453
|
-
* @returns
|
|
3542
|
+
* @param input The string containing diacritics to be normalized.
|
|
3543
|
+
* @returns The string with diacritics removed or normalized.
|
|
3454
3544
|
* @public exported from `@promptbook/utils`
|
|
3455
3545
|
*/
|
|
3456
3546
|
function removeDiacritics(input) {
|
|
@@ -3464,10 +3554,10 @@
|
|
|
3464
3554
|
*/
|
|
3465
3555
|
|
|
3466
3556
|
/**
|
|
3467
|
-
*
|
|
3557
|
+
* Converts a given text to kebab-case format.
|
|
3468
3558
|
*
|
|
3469
|
-
* @param text
|
|
3470
|
-
* @returns
|
|
3559
|
+
* @param text The text to be converted.
|
|
3560
|
+
* @returns The kebab-case formatted string.
|
|
3471
3561
|
* @example 'hello-world'
|
|
3472
3562
|
* @example 'i-love-promptbook'
|
|
3473
3563
|
* @public exported from `@promptbook/utils`
|
|
@@ -3536,7 +3626,8 @@
|
|
|
3536
3626
|
*/
|
|
3537
3627
|
|
|
3538
3628
|
/**
|
|
3539
|
-
*
|
|
3629
|
+
* Converts a name to a properly formatted subfolder path for cache storage.
|
|
3630
|
+
* Handles normalization and path formatting to create consistent cache directory structures.
|
|
3540
3631
|
*
|
|
3541
3632
|
* @private for `FileCacheStorage`
|
|
3542
3633
|
*/
|
|
@@ -3616,11 +3707,11 @@
|
|
|
3616
3707
|
}
|
|
3617
3708
|
|
|
3618
3709
|
/**
|
|
3619
|
-
*
|
|
3710
|
+
* Converts a title string into a normalized name.
|
|
3620
3711
|
*
|
|
3621
|
-
* @param value
|
|
3622
|
-
* @returns
|
|
3623
|
-
* @example
|
|
3712
|
+
* @param value The title string to be converted to a name.
|
|
3713
|
+
* @returns A normalized name derived from the input title.
|
|
3714
|
+
* @example 'Hello World!' -> 'hello-world'
|
|
3624
3715
|
* @public exported from `@promptbook/utils`
|
|
3625
3716
|
*/
|
|
3626
3717
|
function titleToName(value) {
|
|
@@ -3671,7 +3762,9 @@
|
|
|
3671
3762
|
*/
|
|
3672
3763
|
|
|
3673
3764
|
/**
|
|
3674
|
-
*
|
|
3765
|
+
* Factory function that creates a handler for processing knowledge sources.
|
|
3766
|
+
* Provides standardized processing of different types of knowledge sources
|
|
3767
|
+
* across various scraper implementations.
|
|
3675
3768
|
*
|
|
3676
3769
|
* @public exported from `@promptbook/core`
|
|
3677
3770
|
*/
|
|
@@ -3778,7 +3871,7 @@
|
|
|
3778
3871
|
> },
|
|
3779
3872
|
*/
|
|
3780
3873
|
async asJson() {
|
|
3781
|
-
return
|
|
3874
|
+
return jsonParse(await tools.fs.readFile(filename, 'utf-8'));
|
|
3782
3875
|
},
|
|
3783
3876
|
async asText() {
|
|
3784
3877
|
return await tools.fs.readFile(filename, 'utf-8');
|
|
@@ -3912,9 +4005,12 @@
|
|
|
3912
4005
|
*/
|
|
3913
4006
|
|
|
3914
4007
|
/**
|
|
3915
|
-
*
|
|
4008
|
+
* Prepares tasks by adding knowledge to the prompt and ensuring all necessary parameters are included.
|
|
3916
4009
|
*
|
|
3917
|
-
* @
|
|
4010
|
+
* @param tasks Sequence of tasks that are chained together to form a pipeline
|
|
4011
|
+
* @returns A promise that resolves to the prepared tasks.
|
|
4012
|
+
*
|
|
4013
|
+
* @private internal utility of `preparePipeline`
|
|
3918
4014
|
*/
|
|
3919
4015
|
async function prepareTasks(pipeline, tools, options) {
|
|
3920
4016
|
const { maxParallelCount = DEFAULT_MAX_PARALLEL_COUNT } = options;
|
|
@@ -4036,14 +4132,14 @@
|
|
|
4036
4132
|
// TODO: [🖌][🧠] Implement some `mapAsync` function
|
|
4037
4133
|
const preparedPersonas = new Array(personas.length);
|
|
4038
4134
|
await forEachAsync(personas, { maxParallelCount /* <- TODO: [🪂] When there are subtasks, this maximul limit can be broken */ }, async (persona, index) => {
|
|
4039
|
-
const
|
|
4135
|
+
const { modelsRequirements } = await preparePersona(persona.description, { ...tools, llm: llmToolsWithUsage }, {
|
|
4040
4136
|
rootDirname,
|
|
4041
4137
|
maxParallelCount /* <- TODO: [🪂] */,
|
|
4042
4138
|
isVerbose,
|
|
4043
4139
|
});
|
|
4044
4140
|
const preparedPersona = {
|
|
4045
4141
|
...persona,
|
|
4046
|
-
|
|
4142
|
+
modelsRequirements,
|
|
4047
4143
|
preparationIds: [/* TODO: [🧊] -> */ currentPreparation.id],
|
|
4048
4144
|
// <- TODO: [🍙] Make some standard order of json properties
|
|
4049
4145
|
};
|
|
@@ -4351,7 +4447,7 @@
|
|
|
4351
4447
|
}
|
|
4352
4448
|
|
|
4353
4449
|
/**
|
|
4354
|
-
*
|
|
4450
|
+
* Contains configuration options for parsing and generating CSV files, such as delimiters and quoting rules.
|
|
4355
4451
|
*
|
|
4356
4452
|
* @public exported from `@promptbook/core`
|
|
4357
4453
|
*/
|
|
@@ -4360,11 +4456,29 @@
|
|
|
4360
4456
|
// encoding: 'utf-8',
|
|
4361
4457
|
});
|
|
4362
4458
|
|
|
4459
|
+
/**
|
|
4460
|
+
* Converts a CSV string into an object
|
|
4461
|
+
*
|
|
4462
|
+
* Note: This is wrapper around `papaparse.parse()` with better autohealing
|
|
4463
|
+
*
|
|
4464
|
+
* @private - for now until `@promptbook/csv` is released
|
|
4465
|
+
*/
|
|
4466
|
+
function csvParse(value /* <- TODO: string_csv */, settings, schema /* <- TODO: Make CSV Schemas */) {
|
|
4467
|
+
settings = { ...settings, ...MANDATORY_CSV_SETTINGS };
|
|
4468
|
+
// Note: Autoheal invalid '\n' characters
|
|
4469
|
+
if (settings.newline && !settings.newline.includes('\r') && value.includes('\r')) {
|
|
4470
|
+
console.warn('CSV string contains carriage return characters, but in the CSV settings the `newline` setting does not include them. Autohealing the CSV string.');
|
|
4471
|
+
value = value.replace(/\r\n/g, '\n').replace(/\r/g, '\n');
|
|
4472
|
+
}
|
|
4473
|
+
const csv = papaparse.parse(value, settings);
|
|
4474
|
+
return csv;
|
|
4475
|
+
}
|
|
4476
|
+
|
|
4363
4477
|
/**
|
|
4364
4478
|
* Function to check if a string is valid CSV
|
|
4365
4479
|
*
|
|
4366
4480
|
* @param value The string to check
|
|
4367
|
-
* @returns
|
|
4481
|
+
* @returns `true` if the string is a valid CSV string, false otherwise
|
|
4368
4482
|
*
|
|
4369
4483
|
* @public exported from `@promptbook/utils`
|
|
4370
4484
|
*/
|
|
@@ -4388,7 +4502,7 @@
|
|
|
4388
4502
|
* @public exported from `@promptbook/core`
|
|
4389
4503
|
* <- TODO: [🏢] Export from package `@promptbook/csv`
|
|
4390
4504
|
*/
|
|
4391
|
-
const
|
|
4505
|
+
const CsvFormatParser = {
|
|
4392
4506
|
formatName: 'CSV',
|
|
4393
4507
|
aliases: ['SPREADSHEET', 'TABLE'],
|
|
4394
4508
|
isValid(value, settings, schema) {
|
|
@@ -4400,12 +4514,12 @@
|
|
|
4400
4514
|
heal(value, settings, schema) {
|
|
4401
4515
|
throw new Error('Not implemented');
|
|
4402
4516
|
},
|
|
4403
|
-
|
|
4517
|
+
subvalueParsers: [
|
|
4404
4518
|
{
|
|
4405
4519
|
subvalueName: 'ROW',
|
|
4406
|
-
async mapValues(
|
|
4407
|
-
|
|
4408
|
-
const csv =
|
|
4520
|
+
async mapValues(options) {
|
|
4521
|
+
const { value, outputParameterName, settings, mapCallback, onProgress } = options;
|
|
4522
|
+
const csv = csvParse(value, settings);
|
|
4409
4523
|
if (csv.errors.length !== 0) {
|
|
4410
4524
|
throw new CsvFormatError(spaceTrim__default["default"]((block) => `
|
|
4411
4525
|
CSV parsing error
|
|
@@ -4420,23 +4534,37 @@
|
|
|
4420
4534
|
${block(value)}
|
|
4421
4535
|
`));
|
|
4422
4536
|
}
|
|
4423
|
-
const mappedData =
|
|
4537
|
+
const mappedData = [];
|
|
4538
|
+
const length = csv.data.length;
|
|
4539
|
+
for (let index = 0; index < length; index++) {
|
|
4540
|
+
const row = csv.data[index];
|
|
4424
4541
|
if (row[outputParameterName]) {
|
|
4425
4542
|
throw new CsvFormatError(`Can not overwrite existing column "${outputParameterName}" in CSV row`);
|
|
4426
4543
|
}
|
|
4427
|
-
|
|
4544
|
+
const mappedRow = {
|
|
4428
4545
|
...row,
|
|
4429
|
-
[outputParameterName]: await mapCallback(row, index),
|
|
4546
|
+
[outputParameterName]: await mapCallback(row, index, length),
|
|
4430
4547
|
};
|
|
4431
|
-
|
|
4548
|
+
mappedData.push(mappedRow);
|
|
4549
|
+
if (onProgress) {
|
|
4550
|
+
// Note: Report the CSV with all rows mapped so far
|
|
4551
|
+
/*
|
|
4552
|
+
// TODO: [🛕] Report progress with all the rows including the pending ones
|
|
4553
|
+
const progressData = mappedData.map((row, i) =>
|
|
4554
|
+
i > index ? { ...row, [outputParameterName]: PENDING_VALUE_PLACEHOLDER } : row,
|
|
4555
|
+
);
|
|
4556
|
+
*/
|
|
4557
|
+
await onProgress(papaparse.unparse(mappedData, { ...settings, ...MANDATORY_CSV_SETTINGS }));
|
|
4558
|
+
}
|
|
4559
|
+
}
|
|
4432
4560
|
return papaparse.unparse(mappedData, { ...settings, ...MANDATORY_CSV_SETTINGS });
|
|
4433
4561
|
},
|
|
4434
4562
|
},
|
|
4435
4563
|
{
|
|
4436
4564
|
subvalueName: 'CELL',
|
|
4437
|
-
async mapValues(
|
|
4438
|
-
|
|
4439
|
-
const csv =
|
|
4565
|
+
async mapValues(options) {
|
|
4566
|
+
const { value, settings, mapCallback, onProgress } = options;
|
|
4567
|
+
const csv = csvParse(value, settings);
|
|
4440
4568
|
if (csv.errors.length !== 0) {
|
|
4441
4569
|
throw new CsvFormatError(spaceTrim__default["default"]((block) => `
|
|
4442
4570
|
CSV parsing error
|
|
@@ -4452,9 +4580,9 @@
|
|
|
4452
4580
|
`));
|
|
4453
4581
|
}
|
|
4454
4582
|
const mappedData = await Promise.all(csv.data.map(async (row, rowIndex) => {
|
|
4455
|
-
return /* not await */ Promise.all(Object.entries(row).map(async ([key, value], columnIndex) => {
|
|
4583
|
+
return /* not await */ Promise.all(Object.entries(row).map(async ([key, value], columnIndex, array) => {
|
|
4456
4584
|
const index = rowIndex * Object.keys(row).length + columnIndex;
|
|
4457
|
-
return /* not await */ mapCallback({ [key]: value }, index);
|
|
4585
|
+
return /* not await */ mapCallback({ [key]: value }, index, array.length);
|
|
4458
4586
|
}));
|
|
4459
4587
|
}));
|
|
4460
4588
|
return papaparse.unparse(mappedData, { ...settings, ...MANDATORY_CSV_SETTINGS });
|
|
@@ -4463,10 +4591,10 @@
|
|
|
4463
4591
|
],
|
|
4464
4592
|
};
|
|
4465
4593
|
/**
|
|
4466
|
-
* TODO: [🍓] In `
|
|
4467
|
-
* TODO: [🍓] In `
|
|
4468
|
-
* TODO: [🍓] In `
|
|
4469
|
-
* TODO: [🍓] In `
|
|
4594
|
+
* TODO: [🍓] In `CsvFormatParser` implement simple `isValid`
|
|
4595
|
+
* TODO: [🍓] In `CsvFormatParser` implement partial `canBeValid`
|
|
4596
|
+
* TODO: [🍓] In `CsvFormatParser` implement `heal
|
|
4597
|
+
* TODO: [🍓] In `CsvFormatParser` implement `subvalueParsers`
|
|
4470
4598
|
* TODO: [🏢] Allow to expect something inside CSV objects and other formats
|
|
4471
4599
|
*/
|
|
4472
4600
|
|
|
@@ -4475,7 +4603,7 @@
|
|
|
4475
4603
|
*
|
|
4476
4604
|
* @private still in development [🏢]
|
|
4477
4605
|
*/
|
|
4478
|
-
const
|
|
4606
|
+
const JsonFormatParser = {
|
|
4479
4607
|
formatName: 'JSON',
|
|
4480
4608
|
mimeType: 'application/json',
|
|
4481
4609
|
isValid(value, settings, schema) {
|
|
@@ -4487,28 +4615,28 @@
|
|
|
4487
4615
|
heal(value, settings, schema) {
|
|
4488
4616
|
throw new Error('Not implemented');
|
|
4489
4617
|
},
|
|
4490
|
-
|
|
4618
|
+
subvalueParsers: [],
|
|
4491
4619
|
};
|
|
4492
4620
|
/**
|
|
4493
4621
|
* TODO: [🧠] Maybe propper instance of object
|
|
4494
4622
|
* TODO: [0] Make string_serialized_json
|
|
4495
4623
|
* TODO: [1] Make type for JSON Settings and Schema
|
|
4496
4624
|
* TODO: [🧠] What to use for validating JSONs - JSON Schema, ZoD, typescript types/interfaces,...?
|
|
4497
|
-
* TODO: [🍓] In `
|
|
4498
|
-
* TODO: [🍓] In `
|
|
4499
|
-
* TODO: [🍓] In `
|
|
4500
|
-
* TODO: [🍓] In `
|
|
4625
|
+
* TODO: [🍓] In `JsonFormatParser` implement simple `isValid`
|
|
4626
|
+
* TODO: [🍓] In `JsonFormatParser` implement partial `canBeValid`
|
|
4627
|
+
* TODO: [🍓] In `JsonFormatParser` implement `heal
|
|
4628
|
+
* TODO: [🍓] In `JsonFormatParser` implement `subvalueParsers`
|
|
4501
4629
|
* TODO: [🏢] Allow to expect something inside JSON objects and other formats
|
|
4502
4630
|
*/
|
|
4503
4631
|
|
|
4504
4632
|
/**
|
|
4505
4633
|
* Definition for any text - this will be always valid
|
|
4506
4634
|
*
|
|
4507
|
-
* Note: This is not useful for validation, but for splitting and mapping with `
|
|
4635
|
+
* Note: This is not useful for validation, but for splitting and mapping with `subvalueParsers`
|
|
4508
4636
|
*
|
|
4509
4637
|
* @public exported from `@promptbook/core`
|
|
4510
4638
|
*/
|
|
4511
|
-
const
|
|
4639
|
+
const TextFormatParser = {
|
|
4512
4640
|
formatName: 'TEXT',
|
|
4513
4641
|
isValid(value) {
|
|
4514
4642
|
return typeof value === 'string';
|
|
@@ -4517,19 +4645,20 @@
|
|
|
4517
4645
|
return typeof partialValue === 'string';
|
|
4518
4646
|
},
|
|
4519
4647
|
heal() {
|
|
4520
|
-
throw new UnexpectedError('It does not make sense to call `
|
|
4648
|
+
throw new UnexpectedError('It does not make sense to call `TextFormatParser.heal`');
|
|
4521
4649
|
},
|
|
4522
|
-
|
|
4650
|
+
subvalueParsers: [
|
|
4523
4651
|
{
|
|
4524
4652
|
subvalueName: 'LINE',
|
|
4525
|
-
async mapValues(
|
|
4653
|
+
async mapValues(options) {
|
|
4654
|
+
const { value, mapCallback, onProgress } = options;
|
|
4526
4655
|
const lines = value.split('\n');
|
|
4527
|
-
const mappedLines = await Promise.all(lines.map((lineContent, lineNumber) =>
|
|
4656
|
+
const mappedLines = await Promise.all(lines.map((lineContent, lineNumber, array) =>
|
|
4528
4657
|
// TODO: [🧠] Maybe option to skip empty line
|
|
4529
4658
|
/* not await */ mapCallback({
|
|
4530
4659
|
lineContent,
|
|
4531
4660
|
// TODO: [🧠] Maybe also put here `lineNumber`
|
|
4532
|
-
}, lineNumber)));
|
|
4661
|
+
}, lineNumber, array.length)));
|
|
4533
4662
|
return mappedLines.join('\n');
|
|
4534
4663
|
},
|
|
4535
4664
|
},
|
|
@@ -4539,10 +4668,10 @@
|
|
|
4539
4668
|
/**
|
|
4540
4669
|
* TODO: [1] Make type for XML Text and Schema
|
|
4541
4670
|
* TODO: [🧠][🤠] Here should be all words, characters, lines, paragraphs, pages available as subvalues
|
|
4542
|
-
* TODO: [🍓] In `
|
|
4543
|
-
* TODO: [🍓] In `
|
|
4544
|
-
* TODO: [🍓] In `
|
|
4545
|
-
* TODO: [🍓] In `
|
|
4671
|
+
* TODO: [🍓] In `TextFormatParser` implement simple `isValid`
|
|
4672
|
+
* TODO: [🍓] In `TextFormatParser` implement partial `canBeValid`
|
|
4673
|
+
* TODO: [🍓] In `TextFormatParser` implement `heal
|
|
4674
|
+
* TODO: [🍓] In `TextFormatParser` implement `subvalueParsers`
|
|
4546
4675
|
* TODO: [🏢] Allow to expect something inside each item of list and other formats
|
|
4547
4676
|
*/
|
|
4548
4677
|
|
|
@@ -4550,7 +4679,7 @@
|
|
|
4550
4679
|
* Function to check if a string is valid XML
|
|
4551
4680
|
*
|
|
4552
4681
|
* @param value
|
|
4553
|
-
* @returns
|
|
4682
|
+
* @returns `true` if the string is a valid XML string, false otherwise
|
|
4554
4683
|
*
|
|
4555
4684
|
* @public exported from `@promptbook/utils`
|
|
4556
4685
|
*/
|
|
@@ -4575,7 +4704,7 @@
|
|
|
4575
4704
|
*
|
|
4576
4705
|
* @private still in development [🏢]
|
|
4577
4706
|
*/
|
|
4578
|
-
const
|
|
4707
|
+
const XmlFormatParser = {
|
|
4579
4708
|
formatName: 'XML',
|
|
4580
4709
|
mimeType: 'application/xml',
|
|
4581
4710
|
isValid(value, settings, schema) {
|
|
@@ -4587,17 +4716,17 @@
|
|
|
4587
4716
|
heal(value, settings, schema) {
|
|
4588
4717
|
throw new Error('Not implemented');
|
|
4589
4718
|
},
|
|
4590
|
-
|
|
4719
|
+
subvalueParsers: [],
|
|
4591
4720
|
};
|
|
4592
4721
|
/**
|
|
4593
4722
|
* TODO: [🧠] Maybe propper instance of object
|
|
4594
4723
|
* TODO: [0] Make string_serialized_xml
|
|
4595
4724
|
* TODO: [1] Make type for XML Settings and Schema
|
|
4596
4725
|
* TODO: [🧠] What to use for validating XMLs - XSD,...
|
|
4597
|
-
* TODO: [🍓] In `
|
|
4598
|
-
* TODO: [🍓] In `
|
|
4599
|
-
* TODO: [🍓] In `
|
|
4600
|
-
* TODO: [🍓] In `
|
|
4726
|
+
* TODO: [🍓] In `XmlFormatParser` implement simple `isValid`
|
|
4727
|
+
* TODO: [🍓] In `XmlFormatParser` implement partial `canBeValid`
|
|
4728
|
+
* TODO: [🍓] In `XmlFormatParser` implement `heal
|
|
4729
|
+
* TODO: [🍓] In `XmlFormatParser` implement `subvalueParsers`
|
|
4601
4730
|
* TODO: [🏢] Allow to expect something inside XML and other formats
|
|
4602
4731
|
*/
|
|
4603
4732
|
|
|
@@ -4606,24 +4735,19 @@
|
|
|
4606
4735
|
*
|
|
4607
4736
|
* @private internal index of `...` <- TODO [🏢]
|
|
4608
4737
|
*/
|
|
4609
|
-
const FORMAT_DEFINITIONS = [
|
|
4610
|
-
JsonFormatDefinition,
|
|
4611
|
-
XmlFormatDefinition,
|
|
4612
|
-
TextFormatDefinition,
|
|
4613
|
-
CsvFormatDefinition,
|
|
4614
|
-
];
|
|
4738
|
+
const FORMAT_DEFINITIONS = [JsonFormatParser, XmlFormatParser, TextFormatParser, CsvFormatParser];
|
|
4615
4739
|
/**
|
|
4616
4740
|
* Note: [💞] Ignore a discrepancy between file name and entity name
|
|
4617
4741
|
*/
|
|
4618
4742
|
|
|
4619
4743
|
/**
|
|
4620
|
-
* Maps available parameters to expected parameters
|
|
4744
|
+
* Maps available parameters to expected parameters for a pipeline task.
|
|
4621
4745
|
*
|
|
4622
4746
|
* The strategy is:
|
|
4623
|
-
* 1)
|
|
4624
|
-
* 2)
|
|
4747
|
+
* 1) First, match parameters by name where both available and expected.
|
|
4748
|
+
* 2) Then, if there are unmatched expected and available parameters, map them by order.
|
|
4625
4749
|
*
|
|
4626
|
-
* @throws {PipelineExecutionError}
|
|
4750
|
+
* @throws {PipelineExecutionError} If the number of unmatched expected and available parameters does not match, or mapping is ambiguous.
|
|
4627
4751
|
* @private within the repository used in `createPipelineExecutor`
|
|
4628
4752
|
*/
|
|
4629
4753
|
function mapAvailableToExpectedParameters(options) {
|
|
@@ -4646,7 +4770,7 @@
|
|
|
4646
4770
|
else if (!availableParametersNames.has(parameterName) && expectedParameterNames.has(parameterName)) ;
|
|
4647
4771
|
}
|
|
4648
4772
|
if (expectedParameterNames.size === 0) {
|
|
4649
|
-
// Note: [👨👨👧] Now we can freeze `mappedParameters` to prevent
|
|
4773
|
+
// Note: [👨👨👧] Now we can freeze `mappedParameters` to prevent accidental modifications after mapping
|
|
4650
4774
|
Object.freeze(mappedParameters);
|
|
4651
4775
|
return mappedParameters;
|
|
4652
4776
|
}
|
|
@@ -4677,7 +4801,7 @@
|
|
|
4677
4801
|
for (let i = 0; i < expectedParameterNames.size; i++) {
|
|
4678
4802
|
mappedParameters[expectedParameterNamesArray[i]] = availableParameters[availableParametersNamesArray[i]];
|
|
4679
4803
|
}
|
|
4680
|
-
// Note: [👨👨👧] Now we can freeze `mappedParameters` to prevent
|
|
4804
|
+
// Note: [👨👨👧] Now we can freeze `mappedParameters` to prevent accidental modifications after mapping
|
|
4681
4805
|
Object.freeze(mappedParameters);
|
|
4682
4806
|
return mappedParameters;
|
|
4683
4807
|
}
|
|
@@ -4781,7 +4905,7 @@
|
|
|
4781
4905
|
}
|
|
4782
4906
|
/**
|
|
4783
4907
|
* TODO: Add some auto-healing logic + extract YAML, JSON5, TOML, etc.
|
|
4784
|
-
* TODO: [🏢] Make this logic part of `
|
|
4908
|
+
* TODO: [🏢] Make this logic part of `JsonFormatParser` or `isValidJsonString`
|
|
4785
4909
|
*/
|
|
4786
4910
|
|
|
4787
4911
|
/**
|
|
@@ -4841,10 +4965,12 @@
|
|
|
4841
4965
|
throw new PipelineExecutionError('Parameter is already opened or not closed');
|
|
4842
4966
|
}
|
|
4843
4967
|
if (parameters[parameterName] === undefined) {
|
|
4968
|
+
console.log('!!! templateParameters 1', { parameterName, template, parameters });
|
|
4844
4969
|
throw new PipelineExecutionError(`Parameter \`{${parameterName}}\` is not defined`);
|
|
4845
4970
|
}
|
|
4846
4971
|
let parameterValue = parameters[parameterName];
|
|
4847
4972
|
if (parameterValue === undefined) {
|
|
4973
|
+
console.log('!!! templateParameters 2', { parameterName, template, parameters });
|
|
4848
4974
|
throw new PipelineExecutionError(`Parameter \`{${parameterName}}\` is not defined`);
|
|
4849
4975
|
}
|
|
4850
4976
|
parameterValue = valueToString(parameterValue);
|
|
@@ -5000,7 +5126,7 @@
|
|
|
5000
5126
|
PAGES: countPages,
|
|
5001
5127
|
};
|
|
5002
5128
|
/**
|
|
5003
|
-
* TODO: [🧠][🤠] This should be probbably as part of `
|
|
5129
|
+
* TODO: [🧠][🤠] This should be probbably as part of `TextFormatParser`
|
|
5004
5130
|
* Note: [💞] Ignore a discrepancy between file name and entity name
|
|
5005
5131
|
*/
|
|
5006
5132
|
|
|
@@ -5028,13 +5154,17 @@
|
|
|
5028
5154
|
}
|
|
5029
5155
|
/**
|
|
5030
5156
|
* TODO: [💝] Unite object for expecting amount and format
|
|
5031
|
-
* TODO: [🧠][🤠] This should be part of `
|
|
5157
|
+
* TODO: [🧠][🤠] This should be part of `TextFormatParser`
|
|
5032
5158
|
* Note: [💝] and [🤠] are interconnected together
|
|
5033
5159
|
*/
|
|
5034
5160
|
|
|
5035
5161
|
/**
|
|
5036
|
-
*
|
|
5162
|
+
* Executes a pipeline task with multiple attempts, including joker and retry logic. Handles different task types
|
|
5163
|
+
* (prompt, script, dialog, etc.), applies postprocessing, checks expectations, and updates the execution report.
|
|
5164
|
+
* Throws errors if execution fails after all attempts.
|
|
5037
5165
|
*
|
|
5166
|
+
* @param options - The options for execution, including task, parameters, pipeline, and configuration.
|
|
5167
|
+
* @returns The result string of the executed task.
|
|
5038
5168
|
* @private internal utility of `createPipelineExecutor`
|
|
5039
5169
|
*/
|
|
5040
5170
|
async function executeAttempts(options) {
|
|
@@ -5256,7 +5386,7 @@
|
|
|
5256
5386
|
if (task.format) {
|
|
5257
5387
|
if (task.format === 'JSON') {
|
|
5258
5388
|
if (!isValidJsonString($ongoingTaskResult.$resultString || '')) {
|
|
5259
|
-
// TODO: [🏢] Do more universally via `
|
|
5389
|
+
// TODO: [🏢] Do more universally via `FormatParser`
|
|
5260
5390
|
try {
|
|
5261
5391
|
$ongoingTaskResult.$resultString = extractJsonBlock($ongoingTaskResult.$resultString || '');
|
|
5262
5392
|
}
|
|
@@ -5358,12 +5488,16 @@
|
|
|
5358
5488
|
*/
|
|
5359
5489
|
|
|
5360
5490
|
/**
|
|
5361
|
-
*
|
|
5491
|
+
* Executes a pipeline task that requires mapping or iterating over subvalues of a parameter (such as rows in a CSV).
|
|
5492
|
+
* Handles format and subformat resolution, error handling, and progress reporting.
|
|
5493
|
+
*
|
|
5494
|
+
* @param options - Options for execution, including task details and progress callback.
|
|
5495
|
+
* @returns The result of the subvalue mapping or execution attempts.
|
|
5362
5496
|
*
|
|
5363
5497
|
* @private internal utility of `createPipelineExecutor`
|
|
5364
5498
|
*/
|
|
5365
5499
|
async function executeFormatSubvalues(options) {
|
|
5366
|
-
const { task, jokerParameterNames, parameters, priority, csvSettings, pipelineIdentification } = options;
|
|
5500
|
+
const { task, jokerParameterNames, parameters, priority, csvSettings, onProgress, pipelineIdentification } = options;
|
|
5367
5501
|
if (task.foreach === undefined) {
|
|
5368
5502
|
return /* not await */ executeAttempts(options);
|
|
5369
5503
|
}
|
|
@@ -5394,16 +5528,16 @@
|
|
|
5394
5528
|
${block(pipelineIdentification)}
|
|
5395
5529
|
`));
|
|
5396
5530
|
}
|
|
5397
|
-
const
|
|
5398
|
-
if (
|
|
5531
|
+
const subvalueParser = formatDefinition.subvalueParsers.find((subvalueParser) => [subvalueParser.subvalueName, ...(subvalueParser.aliases || [])].includes(task.foreach.subformatName));
|
|
5532
|
+
if (subvalueParser === undefined) {
|
|
5399
5533
|
throw new UnexpectedError(
|
|
5400
5534
|
// <- TODO: [🧠][🧐] Should be formats fixed per promptbook version or behave as plugins (=> change UnexpectedError)
|
|
5401
5535
|
spaceTrim__default["default"]((block) => `
|
|
5402
5536
|
Unsupported subformat name "${task.foreach.subformatName}" for format "${task.foreach.formatName}"
|
|
5403
5537
|
|
|
5404
5538
|
Available subformat names for format "${formatDefinition.formatName}":
|
|
5405
|
-
${block(formatDefinition.
|
|
5406
|
-
.map((
|
|
5539
|
+
${block(formatDefinition.subvalueParsers
|
|
5540
|
+
.map((subvalueParser) => subvalueParser.subvalueName)
|
|
5407
5541
|
.map((subvalueName) => `- ${subvalueName}`)
|
|
5408
5542
|
.join('\n'))}
|
|
5409
5543
|
|
|
@@ -5417,53 +5551,83 @@
|
|
|
5417
5551
|
formatSettings = csvSettings;
|
|
5418
5552
|
// <- TODO: [🤹♂️] More universal, make simmilar pattern for other formats for example \n vs \r\n in text
|
|
5419
5553
|
}
|
|
5420
|
-
const resultString = await
|
|
5421
|
-
|
|
5422
|
-
|
|
5423
|
-
|
|
5424
|
-
|
|
5425
|
-
|
|
5426
|
-
|
|
5427
|
-
|
|
5428
|
-
|
|
5429
|
-
|
|
5430
|
-
|
|
5431
|
-
|
|
5432
|
-
|
|
5554
|
+
const resultString = await subvalueParser.mapValues({
|
|
5555
|
+
value: parameterValue,
|
|
5556
|
+
outputParameterName: task.foreach.outputSubparameterName,
|
|
5557
|
+
settings: formatSettings,
|
|
5558
|
+
onProgress(partialResultString) {
|
|
5559
|
+
return onProgress(Object.freeze({
|
|
5560
|
+
[task.resultingParameterName]: partialResultString,
|
|
5561
|
+
}));
|
|
5562
|
+
},
|
|
5563
|
+
async mapCallback(subparameters, index, length) {
|
|
5564
|
+
let mappedParameters;
|
|
5565
|
+
try {
|
|
5566
|
+
mappedParameters = mapAvailableToExpectedParameters({
|
|
5567
|
+
expectedParameters: Object.fromEntries(task.foreach.inputSubparameterNames.map((subparameterName) => [subparameterName, null])),
|
|
5568
|
+
availableParameters: subparameters,
|
|
5569
|
+
});
|
|
5433
5570
|
}
|
|
5434
|
-
|
|
5435
|
-
|
|
5571
|
+
catch (error) {
|
|
5572
|
+
if (!(error instanceof PipelineExecutionError)) {
|
|
5573
|
+
throw error;
|
|
5574
|
+
}
|
|
5575
|
+
const highLevelError = new PipelineExecutionError(spaceTrim__default["default"]((block) => `
|
|
5576
|
+
${error.message}
|
|
5436
5577
|
|
|
5437
|
-
|
|
5438
|
-
|
|
5578
|
+
This is error in FOREACH command when mapping ${formatDefinition.formatName} ${subvalueParser.subvalueName} data (${index + 1}/${length})
|
|
5579
|
+
You have probbably passed wrong data to pipeline or wrong data was generated which are processed by FOREACH command
|
|
5439
5580
|
|
|
5440
|
-
|
|
5441
|
-
|
|
5442
|
-
|
|
5443
|
-
|
|
5444
|
-
|
|
5445
|
-
|
|
5446
|
-
|
|
5447
|
-
|
|
5448
|
-
|
|
5449
|
-
|
|
5450
|
-
|
|
5451
|
-
|
|
5452
|
-
|
|
5453
|
-
|
|
5454
|
-
|
|
5455
|
-
|
|
5456
|
-
|
|
5457
|
-
|
|
5458
|
-
|
|
5459
|
-
|
|
5581
|
+
${block(pipelineIdentification)}
|
|
5582
|
+
`));
|
|
5583
|
+
if (length > BIG_DATASET_TRESHOLD) {
|
|
5584
|
+
console.error(highLevelError);
|
|
5585
|
+
return FAILED_VALUE_PLACEHOLDER;
|
|
5586
|
+
}
|
|
5587
|
+
throw highLevelError;
|
|
5588
|
+
}
|
|
5589
|
+
const allSubparameters = {
|
|
5590
|
+
...parameters,
|
|
5591
|
+
...mappedParameters,
|
|
5592
|
+
};
|
|
5593
|
+
Object.freeze(allSubparameters);
|
|
5594
|
+
try {
|
|
5595
|
+
const subresultString = await executeAttempts({
|
|
5596
|
+
...options,
|
|
5597
|
+
priority: priority + index,
|
|
5598
|
+
parameters: allSubparameters,
|
|
5599
|
+
pipelineIdentification: spaceTrim__default["default"]((block) => `
|
|
5600
|
+
${block(pipelineIdentification)}
|
|
5601
|
+
Subparameter index: ${index}
|
|
5602
|
+
`),
|
|
5603
|
+
});
|
|
5604
|
+
return subresultString;
|
|
5605
|
+
}
|
|
5606
|
+
catch (error) {
|
|
5607
|
+
if (length > BIG_DATASET_TRESHOLD) {
|
|
5608
|
+
console.error(spaceTrim__default["default"]((block) => `
|
|
5609
|
+
${error.message}
|
|
5610
|
+
|
|
5611
|
+
This is error in FOREACH command when processing ${formatDefinition.formatName} ${subvalueParser.subvalueName} data (${index + 1}/${length})
|
|
5612
|
+
|
|
5613
|
+
${block(pipelineIdentification)}
|
|
5614
|
+
`));
|
|
5615
|
+
return FAILED_VALUE_PLACEHOLDER;
|
|
5616
|
+
}
|
|
5617
|
+
throw error;
|
|
5618
|
+
}
|
|
5619
|
+
},
|
|
5460
5620
|
});
|
|
5461
5621
|
return resultString;
|
|
5462
5622
|
}
|
|
5463
5623
|
|
|
5464
5624
|
/**
|
|
5465
|
-
*
|
|
5625
|
+
* Returns the context for a given task, typically used to provide additional information or variables
|
|
5626
|
+
* required for the execution of the task within a pipeline. The context is returned as a string value
|
|
5627
|
+
* that may include markdown formatting.
|
|
5466
5628
|
*
|
|
5629
|
+
* @param task - The task for which the context is being generated. This should be a deeply immutable TaskJson object.
|
|
5630
|
+
* @returns The context as a string, formatted as markdown and parameter value.
|
|
5467
5631
|
* @private internal utility of `createPipelineExecutor`
|
|
5468
5632
|
*/
|
|
5469
5633
|
async function getContextForTask(task) {
|
|
@@ -5471,7 +5635,7 @@
|
|
|
5471
5635
|
}
|
|
5472
5636
|
|
|
5473
5637
|
/**
|
|
5474
|
-
*
|
|
5638
|
+
* Retrieves example values or templates for a given task, used to guide or validate pipeline execution.
|
|
5475
5639
|
*
|
|
5476
5640
|
* @private internal utility of `createPipelineExecutor`
|
|
5477
5641
|
*/
|
|
@@ -5480,29 +5644,131 @@
|
|
|
5480
5644
|
}
|
|
5481
5645
|
|
|
5482
5646
|
/**
|
|
5483
|
-
*
|
|
5647
|
+
* Computes the cosine similarity between two embedding vectors
|
|
5648
|
+
*
|
|
5649
|
+
* Note: This is helping function for RAG (retrieval-augmented generation)
|
|
5650
|
+
*
|
|
5651
|
+
* @param embeddingVector1
|
|
5652
|
+
* @param embeddingVector2
|
|
5653
|
+
* @returns Cosine similarity between the two vectors
|
|
5654
|
+
*
|
|
5655
|
+
* @public exported from `@promptbook/core`
|
|
5656
|
+
*/
|
|
5657
|
+
function computeCosineSimilarity(embeddingVector1, embeddingVector2) {
|
|
5658
|
+
if (embeddingVector1.length !== embeddingVector2.length) {
|
|
5659
|
+
throw new TypeError('Embedding vectors must have the same length');
|
|
5660
|
+
}
|
|
5661
|
+
const dotProduct = embeddingVector1.reduce((sum, value, index) => sum + value * embeddingVector2[index], 0);
|
|
5662
|
+
const magnitude1 = Math.sqrt(embeddingVector1.reduce((sum, value) => sum + value * value, 0));
|
|
5663
|
+
const magnitude2 = Math.sqrt(embeddingVector2.reduce((sum, value) => sum + value * value, 0));
|
|
5664
|
+
return 1 - dotProduct / (magnitude1 * magnitude2);
|
|
5665
|
+
}
|
|
5666
|
+
|
|
5667
|
+
/**
|
|
5668
|
+
*
|
|
5669
|
+
* @param knowledgePieces
|
|
5670
|
+
* @returns
|
|
5484
5671
|
*
|
|
5485
5672
|
* @private internal utility of `createPipelineExecutor`
|
|
5486
5673
|
*/
|
|
5487
|
-
|
|
5488
|
-
|
|
5489
|
-
|
|
5490
|
-
|
|
5674
|
+
function knowledgePiecesToString(knowledgePieces) {
|
|
5675
|
+
return knowledgePieces
|
|
5676
|
+
.map((knowledgePiece) => {
|
|
5677
|
+
const { content } = knowledgePiece;
|
|
5678
|
+
return `- ${content}`;
|
|
5679
|
+
})
|
|
5680
|
+
.join('\n');
|
|
5681
|
+
// <- TODO: [🧠] Some smarter aggregation of knowledge pieces, single-line vs multi-line vs mixed
|
|
5491
5682
|
}
|
|
5492
5683
|
|
|
5493
5684
|
/**
|
|
5494
|
-
*
|
|
5685
|
+
* Retrieves the most relevant knowledge pieces for a given task using embedding-based similarity search.
|
|
5686
|
+
* This is where retrieval-augmented generation (RAG) is performed to enhance the task with external knowledge.
|
|
5495
5687
|
*
|
|
5496
5688
|
* @private internal utility of `createPipelineExecutor`
|
|
5497
5689
|
*/
|
|
5498
|
-
async function
|
|
5499
|
-
const { preparedPipeline, task,
|
|
5500
|
-
const
|
|
5501
|
-
const
|
|
5502
|
-
|
|
5503
|
-
|
|
5504
|
-
|
|
5505
|
-
|
|
5690
|
+
async function getKnowledgeForTask(options) {
|
|
5691
|
+
const { tools, preparedPipeline, task, parameters } = options;
|
|
5692
|
+
const firstKnowlegePiece = preparedPipeline.knowledgePieces[0];
|
|
5693
|
+
const firstKnowlegeIndex = firstKnowlegePiece === null || firstKnowlegePiece === void 0 ? void 0 : firstKnowlegePiece.index[0];
|
|
5694
|
+
// <- TODO: Do not use just first knowledge piece and first index to determine embedding model, use also keyword search
|
|
5695
|
+
if (firstKnowlegePiece === undefined || firstKnowlegeIndex === undefined) {
|
|
5696
|
+
return ''; // <- Note: Np knowledge present, return empty string
|
|
5697
|
+
}
|
|
5698
|
+
try {
|
|
5699
|
+
// TODO: [🚐] Make arrayable LLMs -> single LLM DRY
|
|
5700
|
+
const _llms = arrayableToArray(tools.llm);
|
|
5701
|
+
const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
|
|
5702
|
+
const taskEmbeddingPrompt = {
|
|
5703
|
+
title: 'Knowledge Search',
|
|
5704
|
+
modelRequirements: {
|
|
5705
|
+
modelVariant: 'EMBEDDING',
|
|
5706
|
+
modelName: firstKnowlegeIndex.modelName,
|
|
5707
|
+
},
|
|
5708
|
+
content: task.content,
|
|
5709
|
+
parameters,
|
|
5710
|
+
};
|
|
5711
|
+
const taskEmbeddingResult = await llmTools.callEmbeddingModel(taskEmbeddingPrompt);
|
|
5712
|
+
const knowledgePiecesWithRelevance = preparedPipeline.knowledgePieces.map((knowledgePiece) => {
|
|
5713
|
+
const { index } = knowledgePiece;
|
|
5714
|
+
const knowledgePieceIndex = index.find((i) => i.modelName === firstKnowlegeIndex.modelName);
|
|
5715
|
+
// <- TODO: Do not use just first knowledge piece and first index to determine embedding model
|
|
5716
|
+
if (knowledgePieceIndex === undefined) {
|
|
5717
|
+
return {
|
|
5718
|
+
content: knowledgePiece.content,
|
|
5719
|
+
relevance: 0,
|
|
5720
|
+
};
|
|
5721
|
+
}
|
|
5722
|
+
const relevance = computeCosineSimilarity(knowledgePieceIndex.position, taskEmbeddingResult.content);
|
|
5723
|
+
return {
|
|
5724
|
+
content: knowledgePiece.content,
|
|
5725
|
+
relevance,
|
|
5726
|
+
};
|
|
5727
|
+
});
|
|
5728
|
+
const knowledgePiecesSorted = knowledgePiecesWithRelevance.sort((a, b) => a.relevance - b.relevance);
|
|
5729
|
+
const knowledgePiecesLimited = knowledgePiecesSorted.slice(0, 5);
|
|
5730
|
+
console.log('!!! Embedding', {
|
|
5731
|
+
task,
|
|
5732
|
+
taskEmbeddingPrompt,
|
|
5733
|
+
taskEmbeddingResult,
|
|
5734
|
+
firstKnowlegePiece,
|
|
5735
|
+
firstKnowlegeIndex,
|
|
5736
|
+
knowledgePiecesWithRelevance,
|
|
5737
|
+
knowledgePiecesSorted,
|
|
5738
|
+
knowledgePiecesLimited,
|
|
5739
|
+
});
|
|
5740
|
+
return knowledgePiecesToString(knowledgePiecesLimited);
|
|
5741
|
+
}
|
|
5742
|
+
catch (error) {
|
|
5743
|
+
assertsError(error);
|
|
5744
|
+
console.error('Error in `getKnowledgeForTask`', error);
|
|
5745
|
+
// Note: If the LLM fails, just return all knowledge pieces
|
|
5746
|
+
return knowledgePiecesToString(preparedPipeline.knowledgePieces);
|
|
5747
|
+
}
|
|
5748
|
+
}
|
|
5749
|
+
/**
|
|
5750
|
+
* TODO: !!!! Verify if this is working
|
|
5751
|
+
* TODO: [♨] Implement Better - use keyword search
|
|
5752
|
+
* TODO: [♨] Examples of values
|
|
5753
|
+
*/
|
|
5754
|
+
|
|
5755
|
+
/**
|
|
5756
|
+
* Retrieves all reserved parameters for a given pipeline task, including context, knowledge, examples, and metadata.
|
|
5757
|
+
* Ensures all reserved parameters are defined and throws if any are missing.
|
|
5758
|
+
*
|
|
5759
|
+
* @param options - Options including tools, pipeline, task, and context.
|
|
5760
|
+
* @returns An object containing all reserved parameters for the task.
|
|
5761
|
+
*
|
|
5762
|
+
* @private internal utility of `createPipelineExecutor`
|
|
5763
|
+
*/
|
|
5764
|
+
async function getReservedParametersForTask(options) {
|
|
5765
|
+
const { tools, preparedPipeline, task, parameters, pipelineIdentification } = options;
|
|
5766
|
+
const context = await getContextForTask(); // <- [🏍]
|
|
5767
|
+
const knowledge = await getKnowledgeForTask({ tools, preparedPipeline, task, parameters });
|
|
5768
|
+
const examples = await getExamplesForTask();
|
|
5769
|
+
const currentDate = new Date().toISOString(); // <- TODO: [🧠][💩] Better
|
|
5770
|
+
const modelName = RESERVED_PARAMETER_MISSING_VALUE;
|
|
5771
|
+
const reservedParameters = {
|
|
5506
5772
|
content: RESERVED_PARAMETER_RESTRICTED,
|
|
5507
5773
|
context,
|
|
5508
5774
|
knowledge,
|
|
@@ -5524,23 +5790,21 @@
|
|
|
5524
5790
|
}
|
|
5525
5791
|
|
|
5526
5792
|
/**
|
|
5527
|
-
*
|
|
5793
|
+
* Executes a single task within a pipeline, handling parameter validation, error checking, and progress reporting.
|
|
5794
|
+
*
|
|
5795
|
+
* @param options - Options for execution, including the task, pipeline, parameters, and callbacks.
|
|
5796
|
+
* @returns The output parameters produced by the task.
|
|
5528
5797
|
*
|
|
5529
5798
|
* @private internal utility of `createPipelineExecutor`
|
|
5530
5799
|
*/
|
|
5531
5800
|
async function executeTask(options) {
|
|
5532
5801
|
const { currentTask, preparedPipeline, parametersToPass, tools, onProgress, $executionReport, pipelineIdentification, maxExecutionAttempts, maxParallelCount, csvSettings, isVerbose, rootDirname, cacheDirname, intermediateFilesStrategy, isAutoInstalled, isNotPreparedWarningSupressed, } = options;
|
|
5533
5802
|
const priority = preparedPipeline.tasks.length - preparedPipeline.tasks.indexOf(currentTask);
|
|
5534
|
-
await onProgress({
|
|
5535
|
-
outputParameters: {
|
|
5536
|
-
[currentTask.resultingParameterName]: '', // <- TODO: [🧠] What is the best value here?
|
|
5537
|
-
},
|
|
5538
|
-
});
|
|
5539
5803
|
// Note: Check consistency of used and dependent parameters which was also done in `validatePipeline`, but it’s good to doublecheck
|
|
5540
5804
|
const usedParameterNames = extractParameterNamesFromTask(currentTask);
|
|
5541
5805
|
const dependentParameterNames = new Set(currentTask.dependentParameterNames);
|
|
5542
5806
|
// TODO: [👩🏾🤝👩🏻] Use here `mapAvailableToExpectedParameters`
|
|
5543
|
-
if (union(difference(usedParameterNames, dependentParameterNames), difference(dependentParameterNames, usedParameterNames)).size !== 0) {
|
|
5807
|
+
if (difference(union(difference(usedParameterNames, dependentParameterNames), difference(dependentParameterNames, usedParameterNames)), new Set(RESERVED_PARAMETER_NAMES)).size !== 0) {
|
|
5544
5808
|
throw new UnexpectedError(spaceTrim.spaceTrim((block) => `
|
|
5545
5809
|
Dependent parameters are not consistent with used parameters:
|
|
5546
5810
|
|
|
@@ -5560,9 +5824,11 @@
|
|
|
5560
5824
|
}
|
|
5561
5825
|
const definedParameters = Object.freeze({
|
|
5562
5826
|
...(await getReservedParametersForTask({
|
|
5827
|
+
tools,
|
|
5563
5828
|
preparedPipeline,
|
|
5564
5829
|
task: currentTask,
|
|
5565
5830
|
pipelineIdentification,
|
|
5831
|
+
parameters: parametersToPass,
|
|
5566
5832
|
})),
|
|
5567
5833
|
...parametersToPass,
|
|
5568
5834
|
});
|
|
@@ -5608,6 +5874,7 @@
|
|
|
5608
5874
|
preparedPipeline,
|
|
5609
5875
|
tools,
|
|
5610
5876
|
$executionReport,
|
|
5877
|
+
onProgress,
|
|
5611
5878
|
pipelineIdentification,
|
|
5612
5879
|
maxExecutionAttempts,
|
|
5613
5880
|
maxParallelCount,
|
|
@@ -5635,7 +5902,8 @@
|
|
|
5635
5902
|
*/
|
|
5636
5903
|
|
|
5637
5904
|
/**
|
|
5638
|
-
*
|
|
5905
|
+
* Filters and returns only the output parameters from the provided pipeline execution options.
|
|
5906
|
+
* Adds warnings for any expected output parameters that are missing.
|
|
5639
5907
|
*
|
|
5640
5908
|
* @private internal utility of `createPipelineExecutor`
|
|
5641
5909
|
*/
|
|
@@ -5660,9 +5928,12 @@
|
|
|
5660
5928
|
}
|
|
5661
5929
|
|
|
5662
5930
|
/**
|
|
5663
|
-
*
|
|
5931
|
+
* Executes an entire pipeline, resolving tasks in dependency order, handling errors, and reporting progress.
|
|
5664
5932
|
*
|
|
5665
|
-
* Note: This is not a `PipelineExecutor` (which is
|
|
5933
|
+
* Note: This is not a `PipelineExecutor` (which is bound to a single pipeline), but a utility function used by `createPipelineExecutor` to create a `PipelineExecutor`.
|
|
5934
|
+
*
|
|
5935
|
+
* @param options - Options for execution, including input parameters, pipeline, and callbacks.
|
|
5936
|
+
* @returns The result of the pipeline execution, including output parameters, errors, and usage statistics.
|
|
5666
5937
|
*
|
|
5667
5938
|
* @private internal utility of `createPipelineExecutor`
|
|
5668
5939
|
*/
|
|
@@ -5985,6 +6256,22 @@
|
|
|
5985
6256
|
cacheDirname,
|
|
5986
6257
|
intermediateFilesStrategy,
|
|
5987
6258
|
isAutoInstalled,
|
|
6259
|
+
}).catch((error) => {
|
|
6260
|
+
assertsError(error);
|
|
6261
|
+
return exportJson({
|
|
6262
|
+
name: 'pipelineExecutorResult',
|
|
6263
|
+
message: `Unuccessful PipelineExecutorResult, last catch`,
|
|
6264
|
+
order: [],
|
|
6265
|
+
value: {
|
|
6266
|
+
isSuccessful: false,
|
|
6267
|
+
errors: [serializeError(error)],
|
|
6268
|
+
warnings: [],
|
|
6269
|
+
usage: UNCERTAIN_USAGE,
|
|
6270
|
+
executionReport: null,
|
|
6271
|
+
outputParameters: {},
|
|
6272
|
+
preparedPipeline,
|
|
6273
|
+
},
|
|
6274
|
+
});
|
|
5988
6275
|
});
|
|
5989
6276
|
};
|
|
5990
6277
|
const pipelineExecutor = (inputParameters) => createTask({
|
|
@@ -6000,10 +6287,10 @@
|
|
|
6000
6287
|
}
|
|
6001
6288
|
|
|
6002
6289
|
/**
|
|
6003
|
-
*
|
|
6290
|
+
* Register for LLM tools.
|
|
6004
6291
|
*
|
|
6005
6292
|
* Note: `$` is used to indicate that this interacts with the global scope
|
|
6006
|
-
* @singleton Only one instance of each register is created per build, but
|
|
6293
|
+
* @singleton Only one instance of each register is created per build, but there can be more instances across different builds or environments.
|
|
6007
6294
|
* @public exported from `@promptbook/core`
|
|
6008
6295
|
*/
|
|
6009
6296
|
const $llmToolsRegister = new $Register('llm_execution_tools_constructors');
|
|
@@ -6012,10 +6299,10 @@
|
|
|
6012
6299
|
*/
|
|
6013
6300
|
|
|
6014
6301
|
/**
|
|
6015
|
-
*
|
|
6302
|
+
* Register for LLM tools metadata.
|
|
6016
6303
|
*
|
|
6017
6304
|
* Note: `$` is used to indicate that this interacts with the global scope
|
|
6018
|
-
* @singleton Only one instance of each register is created per build, but
|
|
6305
|
+
* @singleton Only one instance of each register is created per build, but there can be more instances across different builds or environments.
|
|
6019
6306
|
* @public exported from `@promptbook/core`
|
|
6020
6307
|
*/
|
|
6021
6308
|
const $llmToolsMetadataRegister = new $Register('llm_tools_metadata');
|
|
@@ -6148,11 +6435,16 @@
|
|
|
6148
6435
|
*/
|
|
6149
6436
|
|
|
6150
6437
|
/**
|
|
6151
|
-
*
|
|
6438
|
+
* Creates LLM execution tools from provided configuration objects
|
|
6439
|
+
*
|
|
6440
|
+
* Instantiates and configures LLM tool instances for each configuration entry,
|
|
6441
|
+
* combining them into a unified interface via MultipleLlmExecutionTools.
|
|
6152
6442
|
*
|
|
6153
6443
|
* Note: This function is not cached, every call creates new instance of `MultipleLlmExecutionTools`
|
|
6154
6444
|
*
|
|
6155
|
-
* @
|
|
6445
|
+
* @param configuration Array of LLM tool configurations to instantiate
|
|
6446
|
+
* @param options Additional options for configuring the LLM tools
|
|
6447
|
+
* @returns A unified interface combining all successfully instantiated LLM tools
|
|
6156
6448
|
* @public exported from `@promptbook/core`
|
|
6157
6449
|
*/
|
|
6158
6450
|
function createLlmToolsFromConfiguration(configuration, options = {}) {
|
|
@@ -6191,7 +6483,11 @@
|
|
|
6191
6483
|
/**
|
|
6192
6484
|
* TODO: [🎌] Together with `createLlmToolsFromConfiguration` + 'EXECUTION_TOOLS_CLASSES' gets to `@promptbook/core` ALL model providers, make this more efficient
|
|
6193
6485
|
* TODO: [🧠][🎌] Dynamically install required providers
|
|
6194
|
-
* TODO:
|
|
6486
|
+
* TODO: We should implement an interactive configuration wizard that would:
|
|
6487
|
+
* 1. Detect which LLM providers are available in the environment
|
|
6488
|
+
* 2. Guide users through required configuration settings for each provider
|
|
6489
|
+
* 3. Allow testing connections before completing setup
|
|
6490
|
+
* 4. Generate appropriate configuration code for application integration
|
|
6195
6491
|
* TODO: [🧠][🍛] Which name is better `createLlmToolsFromConfig` or `createLlmToolsFromConfiguration`?
|
|
6196
6492
|
* TODO: [🧠] Is there some meaningfull way how to test this util
|
|
6197
6493
|
* TODO: This should be maybe not under `_common` but under `utils`
|
|
@@ -6199,11 +6495,9 @@
|
|
|
6199
6495
|
*/
|
|
6200
6496
|
|
|
6201
6497
|
/**
|
|
6202
|
-
*
|
|
6203
|
-
*
|
|
6204
|
-
*
|
|
6205
|
-
* 2) @@@
|
|
6206
|
-
*
|
|
6498
|
+
* Provides a collection of scrapers optimized for Node.js environment.
|
|
6499
|
+
* 1) `provideScrapersForNode` use as default
|
|
6500
|
+
* 2) `provideScrapersForBrowser` use in limited browser environment *
|
|
6207
6501
|
* @public exported from `@promptbook/node`
|
|
6208
6502
|
*/
|
|
6209
6503
|
async function $provideScrapersForNode(tools, options) {
|
|
@@ -6228,11 +6522,11 @@
|
|
|
6228
6522
|
*/
|
|
6229
6523
|
|
|
6230
6524
|
/**
|
|
6231
|
-
*
|
|
6525
|
+
* Normalizes a given text to camelCase format.
|
|
6232
6526
|
*
|
|
6233
|
-
* @param text
|
|
6234
|
-
* @param _isFirstLetterCapital
|
|
6235
|
-
* @returns
|
|
6527
|
+
* @param text The text to be normalized.
|
|
6528
|
+
* @param _isFirstLetterCapital Whether the first letter should be capitalized.
|
|
6529
|
+
* @returns The camelCase formatted string.
|
|
6236
6530
|
* @example 'helloWorld'
|
|
6237
6531
|
* @example 'iLovePromptbook'
|
|
6238
6532
|
* @public exported from `@promptbook/utils`
|
|
@@ -6362,11 +6656,11 @@
|
|
|
6362
6656
|
}
|
|
6363
6657
|
|
|
6364
6658
|
/**
|
|
6365
|
-
*
|
|
6659
|
+
* Converts a name string into a URI-compatible format.
|
|
6366
6660
|
*
|
|
6367
|
-
* @param name
|
|
6368
|
-
* @returns
|
|
6369
|
-
* @example
|
|
6661
|
+
* @param name The string to be converted to a URI-compatible format.
|
|
6662
|
+
* @returns A URI-compatible string derived from the input name.
|
|
6663
|
+
* @example 'Hello World' -> 'hello-world'
|
|
6370
6664
|
* @public exported from `@promptbook/utils`
|
|
6371
6665
|
*/
|
|
6372
6666
|
function nameToUriPart(name) {
|
|
@@ -6380,11 +6674,11 @@
|
|
|
6380
6674
|
}
|
|
6381
6675
|
|
|
6382
6676
|
/**
|
|
6383
|
-
*
|
|
6677
|
+
* Converts a given name into URI-compatible parts.
|
|
6384
6678
|
*
|
|
6385
|
-
* @param name
|
|
6386
|
-
* @returns
|
|
6387
|
-
* @example
|
|
6679
|
+
* @param name The name to be converted into URI parts.
|
|
6680
|
+
* @returns An array of URI-compatible parts derived from the name.
|
|
6681
|
+
* @example 'Example Name' -> ['example', 'name']
|
|
6388
6682
|
* @public exported from `@promptbook/utils`
|
|
6389
6683
|
*/
|
|
6390
6684
|
function nameToUriParts(name) {
|
|
@@ -6842,15 +7136,15 @@
|
|
|
6842
7136
|
* Note: [🟢] Code in this file should never be never released in packages that could be imported into browser environment
|
|
6843
7137
|
*/
|
|
6844
7138
|
|
|
6845
|
-
// TODO:
|
|
6846
|
-
// TODO:
|
|
7139
|
+
// TODO: [🥺] List running services from REMOTE_SERVER_URLS
|
|
7140
|
+
// TODO: [🥺] Import directly from YML
|
|
6847
7141
|
/**
|
|
6848
|
-
* @private
|
|
7142
|
+
* @private [🥺] Decide how to expose this
|
|
6849
7143
|
*/
|
|
6850
7144
|
const openapiJson = {
|
|
6851
7145
|
openapi: '3.0.0',
|
|
6852
7146
|
info: {
|
|
6853
|
-
title: 'Promptbook Remote Server API (
|
|
7147
|
+
title: 'Promptbook Remote Server API ([🥺] From YML)',
|
|
6854
7148
|
version: '1.0.0',
|
|
6855
7149
|
description: 'API documentation for the Promptbook Remote Server',
|
|
6856
7150
|
},
|
|
@@ -6862,6 +7156,13 @@
|
|
|
6862
7156
|
responses: {
|
|
6863
7157
|
'200': {
|
|
6864
7158
|
description: 'Server details in markdown format.',
|
|
7159
|
+
content: {
|
|
7160
|
+
'text/markdown': {
|
|
7161
|
+
schema: {
|
|
7162
|
+
type: 'string',
|
|
7163
|
+
},
|
|
7164
|
+
},
|
|
7165
|
+
},
|
|
6865
7166
|
},
|
|
6866
7167
|
},
|
|
6867
7168
|
},
|
|
@@ -6892,13 +7193,22 @@
|
|
|
6892
7193
|
},
|
|
6893
7194
|
},
|
|
6894
7195
|
responses: {
|
|
6895
|
-
'
|
|
7196
|
+
'201': {
|
|
6896
7197
|
description: 'Successful login',
|
|
6897
7198
|
content: {
|
|
6898
7199
|
'application/json': {
|
|
6899
7200
|
schema: {
|
|
6900
7201
|
type: 'object',
|
|
6901
7202
|
properties: {
|
|
7203
|
+
isSuccess: {
|
|
7204
|
+
type: 'boolean',
|
|
7205
|
+
},
|
|
7206
|
+
message: {
|
|
7207
|
+
type: 'string',
|
|
7208
|
+
},
|
|
7209
|
+
error: {
|
|
7210
|
+
type: 'object',
|
|
7211
|
+
},
|
|
6902
7212
|
identification: {
|
|
6903
7213
|
type: 'object',
|
|
6904
7214
|
},
|
|
@@ -6907,6 +7217,43 @@
|
|
|
6907
7217
|
},
|
|
6908
7218
|
},
|
|
6909
7219
|
},
|
|
7220
|
+
'400': {
|
|
7221
|
+
description: 'Bad request or login failed',
|
|
7222
|
+
content: {
|
|
7223
|
+
'application/json': {
|
|
7224
|
+
schema: {
|
|
7225
|
+
type: 'object',
|
|
7226
|
+
properties: {
|
|
7227
|
+
error: {
|
|
7228
|
+
type: 'object',
|
|
7229
|
+
},
|
|
7230
|
+
},
|
|
7231
|
+
},
|
|
7232
|
+
},
|
|
7233
|
+
},
|
|
7234
|
+
},
|
|
7235
|
+
'401': {
|
|
7236
|
+
description: 'Authentication error',
|
|
7237
|
+
content: {
|
|
7238
|
+
'application/json': {
|
|
7239
|
+
schema: {
|
|
7240
|
+
type: 'object',
|
|
7241
|
+
properties: {
|
|
7242
|
+
isSuccess: {
|
|
7243
|
+
type: 'boolean',
|
|
7244
|
+
enum: [false],
|
|
7245
|
+
},
|
|
7246
|
+
message: {
|
|
7247
|
+
type: 'string',
|
|
7248
|
+
},
|
|
7249
|
+
error: {
|
|
7250
|
+
type: 'object',
|
|
7251
|
+
},
|
|
7252
|
+
},
|
|
7253
|
+
},
|
|
7254
|
+
},
|
|
7255
|
+
},
|
|
7256
|
+
},
|
|
6910
7257
|
},
|
|
6911
7258
|
},
|
|
6912
7259
|
},
|
|
@@ -6928,6 +7275,16 @@
|
|
|
6928
7275
|
},
|
|
6929
7276
|
},
|
|
6930
7277
|
},
|
|
7278
|
+
'500': {
|
|
7279
|
+
description: 'No collection available',
|
|
7280
|
+
content: {
|
|
7281
|
+
'text/plain': {
|
|
7282
|
+
schema: {
|
|
7283
|
+
type: 'string',
|
|
7284
|
+
},
|
|
7285
|
+
},
|
|
7286
|
+
},
|
|
7287
|
+
},
|
|
6931
7288
|
},
|
|
6932
7289
|
},
|
|
6933
7290
|
},
|
|
@@ -6959,6 +7316,28 @@
|
|
|
6959
7316
|
},
|
|
6960
7317
|
'404': {
|
|
6961
7318
|
description: 'Book not found.',
|
|
7319
|
+
content: {
|
|
7320
|
+
'application/json': {
|
|
7321
|
+
schema: {
|
|
7322
|
+
type: 'object',
|
|
7323
|
+
properties: {
|
|
7324
|
+
error: {
|
|
7325
|
+
type: 'object',
|
|
7326
|
+
},
|
|
7327
|
+
},
|
|
7328
|
+
},
|
|
7329
|
+
},
|
|
7330
|
+
},
|
|
7331
|
+
},
|
|
7332
|
+
'500': {
|
|
7333
|
+
description: 'No collection available',
|
|
7334
|
+
content: {
|
|
7335
|
+
'text/plain': {
|
|
7336
|
+
schema: {
|
|
7337
|
+
type: 'string',
|
|
7338
|
+
},
|
|
7339
|
+
},
|
|
7340
|
+
},
|
|
6962
7341
|
},
|
|
6963
7342
|
},
|
|
6964
7343
|
},
|
|
@@ -6976,11 +7355,174 @@
|
|
|
6976
7355
|
type: 'array',
|
|
6977
7356
|
items: {
|
|
6978
7357
|
type: 'object',
|
|
7358
|
+
properties: {
|
|
7359
|
+
nonce: {
|
|
7360
|
+
type: 'string',
|
|
7361
|
+
},
|
|
7362
|
+
taskId: {
|
|
7363
|
+
type: 'string',
|
|
7364
|
+
},
|
|
7365
|
+
taskType: {
|
|
7366
|
+
type: 'string',
|
|
7367
|
+
},
|
|
7368
|
+
status: {
|
|
7369
|
+
type: 'string',
|
|
7370
|
+
},
|
|
7371
|
+
createdAt: {
|
|
7372
|
+
type: 'string',
|
|
7373
|
+
format: 'date-time',
|
|
7374
|
+
},
|
|
7375
|
+
updatedAt: {
|
|
7376
|
+
type: 'string',
|
|
7377
|
+
format: 'date-time',
|
|
7378
|
+
},
|
|
7379
|
+
},
|
|
7380
|
+
},
|
|
7381
|
+
},
|
|
7382
|
+
},
|
|
7383
|
+
},
|
|
7384
|
+
},
|
|
7385
|
+
},
|
|
7386
|
+
},
|
|
7387
|
+
},
|
|
7388
|
+
'/executions/last': {
|
|
7389
|
+
get: {
|
|
7390
|
+
summary: 'Get the last execution',
|
|
7391
|
+
description: 'Returns details of the last execution task.',
|
|
7392
|
+
responses: {
|
|
7393
|
+
'200': {
|
|
7394
|
+
description: 'The last execution task with full details.',
|
|
7395
|
+
content: {
|
|
7396
|
+
'application/json': {
|
|
7397
|
+
schema: {
|
|
7398
|
+
type: 'object',
|
|
7399
|
+
properties: {
|
|
7400
|
+
nonce: {
|
|
7401
|
+
type: 'string',
|
|
7402
|
+
},
|
|
7403
|
+
taskId: {
|
|
7404
|
+
type: 'string',
|
|
7405
|
+
},
|
|
7406
|
+
taskType: {
|
|
7407
|
+
type: 'string',
|
|
7408
|
+
},
|
|
7409
|
+
status: {
|
|
7410
|
+
type: 'string',
|
|
7411
|
+
},
|
|
7412
|
+
errors: {
|
|
7413
|
+
type: 'array',
|
|
7414
|
+
items: {
|
|
7415
|
+
type: 'object',
|
|
7416
|
+
},
|
|
7417
|
+
},
|
|
7418
|
+
warnings: {
|
|
7419
|
+
type: 'array',
|
|
7420
|
+
items: {
|
|
7421
|
+
type: 'object',
|
|
7422
|
+
},
|
|
7423
|
+
},
|
|
7424
|
+
createdAt: {
|
|
7425
|
+
type: 'string',
|
|
7426
|
+
format: 'date-time',
|
|
7427
|
+
},
|
|
7428
|
+
updatedAt: {
|
|
7429
|
+
type: 'string',
|
|
7430
|
+
format: 'date-time',
|
|
7431
|
+
},
|
|
7432
|
+
currentValue: {
|
|
7433
|
+
type: 'object',
|
|
7434
|
+
},
|
|
6979
7435
|
},
|
|
6980
7436
|
},
|
|
6981
7437
|
},
|
|
6982
7438
|
},
|
|
6983
7439
|
},
|
|
7440
|
+
'404': {
|
|
7441
|
+
description: 'No execution tasks found.',
|
|
7442
|
+
content: {
|
|
7443
|
+
'text/plain': {
|
|
7444
|
+
schema: {
|
|
7445
|
+
type: 'string',
|
|
7446
|
+
},
|
|
7447
|
+
},
|
|
7448
|
+
},
|
|
7449
|
+
},
|
|
7450
|
+
},
|
|
7451
|
+
},
|
|
7452
|
+
},
|
|
7453
|
+
'/executions/{taskId}': {
|
|
7454
|
+
get: {
|
|
7455
|
+
summary: 'Get specific execution',
|
|
7456
|
+
description: 'Returns details of a specific execution task.',
|
|
7457
|
+
parameters: [
|
|
7458
|
+
{
|
|
7459
|
+
in: 'path',
|
|
7460
|
+
name: 'taskId',
|
|
7461
|
+
required: true,
|
|
7462
|
+
schema: {
|
|
7463
|
+
type: 'string',
|
|
7464
|
+
},
|
|
7465
|
+
description: 'The ID of the execution task to retrieve.',
|
|
7466
|
+
},
|
|
7467
|
+
],
|
|
7468
|
+
responses: {
|
|
7469
|
+
'200': {
|
|
7470
|
+
description: 'The execution task with full details.',
|
|
7471
|
+
content: {
|
|
7472
|
+
'application/json': {
|
|
7473
|
+
schema: {
|
|
7474
|
+
type: 'object',
|
|
7475
|
+
properties: {
|
|
7476
|
+
nonce: {
|
|
7477
|
+
type: 'string',
|
|
7478
|
+
},
|
|
7479
|
+
taskId: {
|
|
7480
|
+
type: 'string',
|
|
7481
|
+
},
|
|
7482
|
+
taskType: {
|
|
7483
|
+
type: 'string',
|
|
7484
|
+
},
|
|
7485
|
+
status: {
|
|
7486
|
+
type: 'string',
|
|
7487
|
+
},
|
|
7488
|
+
errors: {
|
|
7489
|
+
type: 'array',
|
|
7490
|
+
items: {
|
|
7491
|
+
type: 'object',
|
|
7492
|
+
},
|
|
7493
|
+
},
|
|
7494
|
+
warnings: {
|
|
7495
|
+
type: 'array',
|
|
7496
|
+
items: {
|
|
7497
|
+
type: 'object',
|
|
7498
|
+
},
|
|
7499
|
+
},
|
|
7500
|
+
createdAt: {
|
|
7501
|
+
type: 'string',
|
|
7502
|
+
format: 'date-time',
|
|
7503
|
+
},
|
|
7504
|
+
updatedAt: {
|
|
7505
|
+
type: 'string',
|
|
7506
|
+
format: 'date-time',
|
|
7507
|
+
},
|
|
7508
|
+
currentValue: {
|
|
7509
|
+
type: 'object',
|
|
7510
|
+
},
|
|
7511
|
+
},
|
|
7512
|
+
},
|
|
7513
|
+
},
|
|
7514
|
+
},
|
|
7515
|
+
},
|
|
7516
|
+
'404': {
|
|
7517
|
+
description: 'Execution task not found.',
|
|
7518
|
+
content: {
|
|
7519
|
+
'text/plain': {
|
|
7520
|
+
schema: {
|
|
7521
|
+
type: 'string',
|
|
7522
|
+
},
|
|
7523
|
+
},
|
|
7524
|
+
},
|
|
7525
|
+
},
|
|
6984
7526
|
},
|
|
6985
7527
|
},
|
|
6986
7528
|
},
|
|
@@ -6997,12 +7539,19 @@
|
|
|
6997
7539
|
properties: {
|
|
6998
7540
|
pipelineUrl: {
|
|
6999
7541
|
type: 'string',
|
|
7542
|
+
description: 'URL of the pipeline to execute',
|
|
7543
|
+
},
|
|
7544
|
+
book: {
|
|
7545
|
+
type: 'string',
|
|
7546
|
+
description: 'Alternative field for pipelineUrl',
|
|
7000
7547
|
},
|
|
7001
7548
|
inputParameters: {
|
|
7002
7549
|
type: 'object',
|
|
7550
|
+
description: 'Parameters for pipeline execution',
|
|
7003
7551
|
},
|
|
7004
7552
|
identification: {
|
|
7005
7553
|
type: 'object',
|
|
7554
|
+
description: 'User identification data',
|
|
7006
7555
|
},
|
|
7007
7556
|
},
|
|
7008
7557
|
},
|
|
@@ -7022,13 +7571,164 @@
|
|
|
7022
7571
|
},
|
|
7023
7572
|
'400': {
|
|
7024
7573
|
description: 'Invalid input.',
|
|
7574
|
+
content: {
|
|
7575
|
+
'application/json': {
|
|
7576
|
+
schema: {
|
|
7577
|
+
type: 'object',
|
|
7578
|
+
properties: {
|
|
7579
|
+
error: {
|
|
7580
|
+
type: 'object',
|
|
7581
|
+
},
|
|
7582
|
+
},
|
|
7583
|
+
},
|
|
7584
|
+
},
|
|
7585
|
+
},
|
|
7586
|
+
},
|
|
7587
|
+
'404': {
|
|
7588
|
+
description: 'Pipeline not found.',
|
|
7589
|
+
content: {
|
|
7590
|
+
'text/plain': {
|
|
7591
|
+
schema: {
|
|
7592
|
+
type: 'string',
|
|
7593
|
+
},
|
|
7594
|
+
},
|
|
7595
|
+
},
|
|
7596
|
+
},
|
|
7597
|
+
},
|
|
7598
|
+
},
|
|
7599
|
+
},
|
|
7600
|
+
'/api-docs': {
|
|
7601
|
+
get: {
|
|
7602
|
+
summary: 'API documentation UI',
|
|
7603
|
+
description: 'Swagger UI for API documentation',
|
|
7604
|
+
responses: {
|
|
7605
|
+
'200': {
|
|
7606
|
+
description: 'HTML Swagger UI',
|
|
7607
|
+
},
|
|
7608
|
+
},
|
|
7609
|
+
},
|
|
7610
|
+
},
|
|
7611
|
+
'/swagger': {
|
|
7612
|
+
get: {
|
|
7613
|
+
summary: 'API documentation UI (alternative path)',
|
|
7614
|
+
description: 'Swagger UI for API documentation',
|
|
7615
|
+
responses: {
|
|
7616
|
+
'200': {
|
|
7617
|
+
description: 'HTML Swagger UI',
|
|
7618
|
+
},
|
|
7619
|
+
},
|
|
7620
|
+
},
|
|
7621
|
+
},
|
|
7622
|
+
'/openapi': {
|
|
7623
|
+
get: {
|
|
7624
|
+
summary: 'OpenAPI specification',
|
|
7625
|
+
description: 'Returns the OpenAPI JSON specification',
|
|
7626
|
+
responses: {
|
|
7627
|
+
'200': {
|
|
7628
|
+
description: 'OpenAPI specification',
|
|
7629
|
+
content: {
|
|
7630
|
+
'application/json': {
|
|
7631
|
+
schema: {
|
|
7632
|
+
type: 'object',
|
|
7633
|
+
},
|
|
7634
|
+
},
|
|
7635
|
+
},
|
|
7636
|
+
},
|
|
7637
|
+
},
|
|
7638
|
+
},
|
|
7639
|
+
},
|
|
7640
|
+
},
|
|
7641
|
+
components: {
|
|
7642
|
+
schemas: {
|
|
7643
|
+
Error: {
|
|
7644
|
+
type: 'object',
|
|
7645
|
+
properties: {
|
|
7646
|
+
error: {
|
|
7647
|
+
type: 'object',
|
|
7648
|
+
},
|
|
7649
|
+
},
|
|
7650
|
+
},
|
|
7651
|
+
ExecutionTaskSummary: {
|
|
7652
|
+
type: 'object',
|
|
7653
|
+
properties: {
|
|
7654
|
+
nonce: {
|
|
7655
|
+
type: 'string',
|
|
7656
|
+
},
|
|
7657
|
+
taskId: {
|
|
7658
|
+
type: 'string',
|
|
7659
|
+
},
|
|
7660
|
+
taskType: {
|
|
7661
|
+
type: 'string',
|
|
7662
|
+
},
|
|
7663
|
+
status: {
|
|
7664
|
+
type: 'string',
|
|
7665
|
+
},
|
|
7666
|
+
createdAt: {
|
|
7667
|
+
type: 'string',
|
|
7668
|
+
format: 'date-time',
|
|
7669
|
+
},
|
|
7670
|
+
updatedAt: {
|
|
7671
|
+
type: 'string',
|
|
7672
|
+
format: 'date-time',
|
|
7673
|
+
},
|
|
7674
|
+
},
|
|
7675
|
+
},
|
|
7676
|
+
ExecutionTaskFull: {
|
|
7677
|
+
type: 'object',
|
|
7678
|
+
properties: {
|
|
7679
|
+
nonce: {
|
|
7680
|
+
type: 'string',
|
|
7681
|
+
},
|
|
7682
|
+
taskId: {
|
|
7683
|
+
type: 'string',
|
|
7684
|
+
},
|
|
7685
|
+
taskType: {
|
|
7686
|
+
type: 'string',
|
|
7687
|
+
},
|
|
7688
|
+
status: {
|
|
7689
|
+
type: 'string',
|
|
7690
|
+
},
|
|
7691
|
+
errors: {
|
|
7692
|
+
type: 'array',
|
|
7693
|
+
items: {
|
|
7694
|
+
type: 'object',
|
|
7695
|
+
},
|
|
7696
|
+
},
|
|
7697
|
+
warnings: {
|
|
7698
|
+
type: 'array',
|
|
7699
|
+
items: {
|
|
7700
|
+
type: 'object',
|
|
7701
|
+
},
|
|
7702
|
+
},
|
|
7703
|
+
createdAt: {
|
|
7704
|
+
type: 'string',
|
|
7705
|
+
format: 'date-time',
|
|
7706
|
+
},
|
|
7707
|
+
updatedAt: {
|
|
7708
|
+
type: 'string',
|
|
7709
|
+
format: 'date-time',
|
|
7710
|
+
},
|
|
7711
|
+
currentValue: {
|
|
7712
|
+
type: 'object',
|
|
7025
7713
|
},
|
|
7026
7714
|
},
|
|
7027
7715
|
},
|
|
7028
7716
|
},
|
|
7029
7717
|
},
|
|
7030
|
-
|
|
7031
|
-
|
|
7718
|
+
tags: [
|
|
7719
|
+
{
|
|
7720
|
+
name: 'Books',
|
|
7721
|
+
description: 'Operations related to books and pipelines',
|
|
7722
|
+
},
|
|
7723
|
+
{
|
|
7724
|
+
name: 'Executions',
|
|
7725
|
+
description: 'Operations related to execution tasks',
|
|
7726
|
+
},
|
|
7727
|
+
{
|
|
7728
|
+
name: 'Authentication',
|
|
7729
|
+
description: 'Authentication operations',
|
|
7730
|
+
},
|
|
7731
|
+
],
|
|
7032
7732
|
};
|
|
7033
7733
|
/**
|
|
7034
7734
|
* Note: [💞] Ignore a discrepancy between file name and entity name
|
|
@@ -7102,7 +7802,7 @@
|
|
|
7102
7802
|
response.setHeader('X-Powered-By', 'Promptbook engine');
|
|
7103
7803
|
next();
|
|
7104
7804
|
});
|
|
7105
|
-
// TODO:
|
|
7805
|
+
// TODO: [🥺] Expose openapiJson to consumer and also allow to add new routes
|
|
7106
7806
|
app.use(OpenApiValidator__namespace.middleware({
|
|
7107
7807
|
apiSpec: openapiJson,
|
|
7108
7808
|
ignorePaths(path) {
|
|
@@ -7399,6 +8099,7 @@
|
|
|
7399
8099
|
promptResult = await llm.callCompletionModel(prompt);
|
|
7400
8100
|
break;
|
|
7401
8101
|
case 'EMBEDDING':
|
|
8102
|
+
console.log('!!! llm (EMBEDDING)', llm);
|
|
7402
8103
|
if (llm.callEmbeddingModel === undefined) {
|
|
7403
8104
|
// Note: [0] This check should not be a thing
|
|
7404
8105
|
throw new PipelineExecutionError(`Embedding model is not available`);
|