@promptbook/remote-server 0.92.0-3 → 0.92.0-30
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/esm/index.es.js +999 -298
- package/esm/index.es.js.map +1 -1
- package/esm/typings/src/_packages/browser.index.d.ts +2 -0
- package/esm/typings/src/_packages/core.index.d.ts +22 -6
- package/esm/typings/src/_packages/deepseek.index.d.ts +2 -0
- package/esm/typings/src/_packages/google.index.d.ts +2 -0
- package/esm/typings/src/_packages/types.index.d.ts +4 -2
- package/esm/typings/src/_packages/utils.index.d.ts +2 -0
- package/esm/typings/src/cli/common/$provideLlmToolsForCli.d.ts +1 -1
- package/esm/typings/src/collection/PipelineCollection.d.ts +0 -2
- package/esm/typings/src/collection/SimplePipelineCollection.d.ts +1 -1
- package/esm/typings/src/commands/FOREACH/ForeachJson.d.ts +6 -6
- package/esm/typings/src/commands/FOREACH/foreachCommandParser.d.ts +0 -2
- package/esm/typings/src/commands/FORMFACTOR/formfactorCommandParser.d.ts +1 -1
- package/esm/typings/src/commands/_BOILERPLATE/boilerplateCommandParser.d.ts +1 -1
- package/esm/typings/src/commands/_common/types/CommandParser.d.ts +36 -28
- package/esm/typings/src/config.d.ts +41 -11
- package/esm/typings/src/constants.d.ts +43 -2
- package/esm/typings/src/conversion/archive/loadArchive.d.ts +2 -2
- package/esm/typings/src/errors/0-BoilerplateError.d.ts +2 -2
- package/esm/typings/src/executables/$provideExecutablesForNode.d.ts +1 -1
- package/esm/typings/src/executables/apps/locateLibreoffice.d.ts +2 -1
- package/esm/typings/src/executables/apps/locatePandoc.d.ts +2 -1
- package/esm/typings/src/executables/platforms/locateAppOnLinux.d.ts +2 -1
- package/esm/typings/src/executables/platforms/locateAppOnMacOs.d.ts +2 -1
- package/esm/typings/src/executables/platforms/locateAppOnWindows.d.ts +2 -1
- package/esm/typings/src/execution/AbstractTaskResult.d.ts +1 -1
- package/esm/typings/src/execution/CommonToolsOptions.d.ts +5 -1
- package/esm/typings/src/execution/LlmExecutionToolsConstructor.d.ts +2 -1
- package/esm/typings/src/execution/PipelineExecutorResult.d.ts +4 -2
- package/esm/typings/src/execution/createPipelineExecutor/$OngoingTaskResult.d.ts +12 -9
- package/esm/typings/src/execution/createPipelineExecutor/10-executePipeline.d.ts +12 -9
- package/esm/typings/src/execution/createPipelineExecutor/20-executeTask.d.ts +11 -8
- package/esm/typings/src/execution/createPipelineExecutor/30-executeFormatSubvalues.d.ts +15 -3
- package/esm/typings/src/execution/createPipelineExecutor/40-executeAttempts.d.ts +20 -14
- package/esm/typings/src/execution/createPipelineExecutor/computeCosineSimilarity.d.ts +13 -0
- package/esm/typings/src/execution/createPipelineExecutor/filterJustOutputParameters.d.ts +7 -6
- package/esm/typings/src/execution/createPipelineExecutor/getContextForTask.d.ts +5 -1
- package/esm/typings/src/execution/createPipelineExecutor/getExamplesForTask.d.ts +1 -1
- package/esm/typings/src/execution/createPipelineExecutor/getKnowledgeForTask.d.ts +21 -5
- package/esm/typings/src/execution/createPipelineExecutor/getReservedParametersForTask.d.ts +19 -5
- package/esm/typings/src/execution/createPipelineExecutor/knowledgePiecesToString.d.ts +9 -0
- package/esm/typings/src/execution/translation/automatic-translate/automatic-translators/LindatAutomaticTranslator.d.ts +4 -4
- package/esm/typings/src/execution/utils/checkExpectations.d.ts +1 -1
- package/esm/typings/src/execution/utils/uncertainNumber.d.ts +3 -2
- package/esm/typings/src/formats/_common/{FormatDefinition.d.ts → FormatParser.d.ts} +8 -6
- package/esm/typings/src/formats/_common/FormatSubvalueParser.d.ts +66 -0
- package/esm/typings/src/formats/csv/CsvFormatParser.d.ts +17 -0
- package/esm/typings/src/formats/csv/CsvSettings.d.ts +2 -2
- package/esm/typings/src/formats/csv/utils/csvParse.d.ts +12 -0
- package/esm/typings/src/formats/csv/utils/isValidCsvString.d.ts +1 -1
- package/esm/typings/src/formats/index.d.ts +2 -2
- package/esm/typings/src/formats/json/{JsonFormatDefinition.d.ts → JsonFormatParser.d.ts} +6 -6
- package/esm/typings/src/formats/json/utils/isValidJsonString.d.ts +1 -1
- package/esm/typings/src/formats/json/utils/jsonParse.d.ts +8 -0
- package/esm/typings/src/formats/text/{TextFormatDefinition.d.ts → TextFormatParser.d.ts} +7 -7
- package/esm/typings/src/formats/xml/XmlFormatParser.d.ts +19 -0
- package/esm/typings/src/formats/xml/utils/isValidXmlString.d.ts +1 -1
- package/esm/typings/src/formfactors/_boilerplate/BoilerplateFormfactorDefinition.d.ts +3 -2
- package/esm/typings/src/formfactors/_common/AbstractFormfactorDefinition.d.ts +16 -7
- package/esm/typings/src/formfactors/_common/FormfactorDefinition.d.ts +3 -1
- package/esm/typings/src/formfactors/_common/string_formfactor_name.d.ts +2 -1
- package/esm/typings/src/formfactors/chatbot/ChatbotFormfactorDefinition.d.ts +2 -2
- package/esm/typings/src/formfactors/completion/CompletionFormfactorDefinition.d.ts +29 -0
- package/esm/typings/src/formfactors/generator/GeneratorFormfactorDefinition.d.ts +2 -1
- package/esm/typings/src/formfactors/generic/GenericFormfactorDefinition.d.ts +2 -2
- package/esm/typings/src/formfactors/index.d.ts +33 -8
- package/esm/typings/src/formfactors/matcher/MatcherFormfactorDefinition.d.ts +4 -2
- package/esm/typings/src/formfactors/sheets/SheetsFormfactorDefinition.d.ts +3 -2
- package/esm/typings/src/formfactors/translator/TranslatorFormfactorDefinition.d.ts +3 -2
- package/esm/typings/src/high-level-abstractions/index.d.ts +2 -2
- package/esm/typings/src/llm-providers/_common/register/$llmToolsMetadataRegister.d.ts +3 -3
- package/esm/typings/src/llm-providers/_common/register/$llmToolsRegister.d.ts +3 -3
- package/esm/typings/src/llm-providers/_common/register/$provideLlmToolsConfigurationFromEnv.d.ts +4 -4
- package/esm/typings/src/llm-providers/_common/register/$provideLlmToolsForTestingAndScriptsAndPlayground.d.ts +4 -3
- package/esm/typings/src/llm-providers/_common/register/$provideLlmToolsFromEnv.d.ts +17 -4
- package/esm/typings/src/llm-providers/_common/register/LlmToolsConfiguration.d.ts +11 -4
- package/esm/typings/src/llm-providers/_common/register/LlmToolsMetadata.d.ts +27 -5
- package/esm/typings/src/llm-providers/_common/register/LlmToolsOptions.d.ts +9 -2
- package/esm/typings/src/llm-providers/_common/register/createLlmToolsFromConfiguration.d.ts +12 -3
- package/esm/typings/src/llm-providers/_common/utils/cache/CacheItem.d.ts +10 -5
- package/esm/typings/src/llm-providers/_common/utils/cache/CacheLlmToolsOptions.d.ts +5 -3
- package/esm/typings/src/llm-providers/_common/utils/cache/cacheLlmTools.d.ts +3 -3
- package/esm/typings/src/llm-providers/_common/utils/count-total-usage/limitTotalUsage.d.ts +5 -5
- package/esm/typings/src/llm-providers/anthropic-claude/anthropic-claude-models.d.ts +1 -1
- package/esm/typings/src/llm-providers/azure-openai/AzureOpenAiExecutionTools.d.ts +4 -0
- package/esm/typings/src/llm-providers/deepseek/deepseek-models.d.ts +23 -0
- package/esm/typings/src/llm-providers/google/google-models.d.ts +23 -0
- package/esm/typings/src/llm-providers/openai/OpenAiExecutionTools.d.ts +4 -0
- package/esm/typings/src/llm-providers/openai/openai-models.d.ts +1 -1
- package/esm/typings/src/llm-providers/openai/register-configuration.d.ts +2 -2
- package/esm/typings/src/llm-providers/openai/register-constructor.d.ts +2 -2
- package/esm/typings/src/migrations/migratePipeline.d.ts +9 -0
- package/esm/typings/src/other/templates/getBookTemplates.d.ts +2 -2
- package/esm/typings/src/personas/preparePersona.d.ts +1 -1
- package/esm/typings/src/pipeline/PipelineInterface/PipelineInterface.d.ts +3 -3
- package/esm/typings/src/pipeline/PipelineInterface/constants.d.ts +1 -1
- package/esm/typings/src/pipeline/PipelineInterface/getPipelineInterface.d.ts +1 -1
- package/esm/typings/src/pipeline/PipelineInterface/isPipelineImplementingInterface.d.ts +5 -4
- package/esm/typings/src/pipeline/PipelineInterface/isPipelineInterfacesEqual.d.ts +1 -1
- package/esm/typings/src/pipeline/PipelineJson/CommonTaskJson.d.ts +9 -6
- package/esm/typings/src/pipeline/PipelineJson/PersonaJson.d.ts +4 -2
- package/esm/typings/src/pipeline/PipelineJson/PipelineJson.d.ts +3 -2
- package/esm/typings/src/pipeline/PipelineString.d.ts +3 -1
- package/esm/typings/src/pipeline/book-notation.d.ts +2 -2
- package/esm/typings/src/postprocessing/utils/extractJsonBlock.d.ts +1 -1
- package/esm/typings/src/prepare/prepareTasks.d.ts +7 -4
- package/esm/typings/src/remote-server/openapi-types.d.ts +348 -6
- package/esm/typings/src/remote-server/openapi.d.ts +398 -4
- package/esm/typings/src/remote-server/types/RemoteServerOptions.d.ts +2 -1
- package/esm/typings/src/scrapers/_boilerplate/BoilerplateScraper.d.ts +3 -3
- package/esm/typings/src/scrapers/_boilerplate/createBoilerplateScraper.d.ts +1 -1
- package/esm/typings/src/scrapers/_boilerplate/register-metadata.d.ts +1 -1
- package/esm/typings/src/scrapers/_common/Converter.d.ts +3 -1
- package/esm/typings/src/scrapers/_common/Scraper.d.ts +4 -3
- package/esm/typings/src/scrapers/_common/ScraperIntermediateSource.d.ts +4 -2
- package/esm/typings/src/scrapers/_common/register/$provideFilesystemForNode.d.ts +2 -1
- package/esm/typings/src/scrapers/_common/register/$provideScrapersForBrowser.d.ts +6 -3
- package/esm/typings/src/scrapers/_common/register/$provideScrapersForNode.d.ts +3 -5
- package/esm/typings/src/scrapers/_common/register/$scrapersMetadataRegister.d.ts +3 -3
- package/esm/typings/src/scrapers/_common/register/$scrapersRegister.d.ts +3 -2
- package/esm/typings/src/scrapers/_common/register/ScraperAndConverterMetadata.d.ts +8 -5
- package/esm/typings/src/scrapers/_common/register/ScraperConstructor.d.ts +2 -1
- package/esm/typings/src/scrapers/_common/utils/getScraperIntermediateSource.d.ts +6 -5
- package/esm/typings/src/scrapers/_common/utils/makeKnowledgeSourceHandler.d.ts +3 -1
- package/esm/typings/src/scrapers/document/createDocumentScraper.d.ts +1 -1
- package/esm/typings/src/scrapers/document-legacy/createLegacyDocumentScraper.d.ts +2 -1
- package/esm/typings/src/scrapers/markdown/createMarkdownScraper.d.ts +4 -1
- package/esm/typings/src/scrapers/markitdown/MarkitdownScraper.d.ts +1 -1
- package/esm/typings/src/scrapers/pdf/createPdfScraper.d.ts +2 -1
- package/esm/typings/src/scrapers/website/createWebsiteScraper.d.ts +3 -4
- package/esm/typings/src/scripting/javascript/postprocessing-functions.d.ts +5 -1
- package/esm/typings/src/storage/file-cache-storage/FileCacheStorage.d.ts +12 -5
- package/esm/typings/src/storage/file-cache-storage/FileCacheStorageOptions.d.ts +4 -2
- package/esm/typings/src/storage/file-cache-storage/utils/nameToSubfolderPath.d.ts +2 -1
- package/esm/typings/src/storage/local-storage/getIndexedDbStorage.d.ts +10 -0
- package/esm/typings/src/storage/local-storage/utils/makePromptbookStorageFromIndexedDb.d.ts +7 -0
- package/esm/typings/src/storage/local-storage/utils/makePromptbookStorageFromWebStorage.d.ts +2 -1
- package/esm/typings/src/types/IntermediateFilesStrategy.d.ts +2 -1
- package/esm/typings/src/types/ModelVariant.d.ts +5 -5
- package/esm/typings/src/types/typeAliases.d.ts +17 -13
- package/esm/typings/src/utils/$Register.d.ts +8 -7
- package/esm/typings/src/utils/editable/edit-pipeline-string/addPipelineCommand.d.ts +2 -2
- package/esm/typings/src/utils/editable/edit-pipeline-string/deflatePipeline.d.ts +4 -1
- package/esm/typings/src/utils/editable/utils/isFlatPipeline.d.ts +2 -1
- package/esm/typings/src/utils/environment/$getGlobalScope.d.ts +2 -1
- package/esm/typings/src/utils/expectation-counters/index.d.ts +1 -1
- package/esm/typings/src/utils/markdown/extractAllListItemsFromMarkdown.d.ts +1 -1
- package/esm/typings/src/utils/normalization/nameToUriPart.d.ts +4 -4
- package/esm/typings/src/utils/normalization/nameToUriParts.d.ts +4 -4
- package/esm/typings/src/utils/normalization/normalize-to-kebab-case.d.ts +3 -3
- package/esm/typings/src/utils/normalization/normalizeTo_SCREAMING_CASE.d.ts +3 -3
- package/esm/typings/src/utils/normalization/normalizeTo_camelCase.d.ts +4 -4
- package/esm/typings/src/utils/normalization/normalizeTo_snake_case.d.ts +3 -3
- package/esm/typings/src/utils/normalization/removeDiacritics.d.ts +3 -3
- package/esm/typings/src/utils/normalization/searchKeywords.d.ts +4 -1
- package/esm/typings/src/utils/normalization/titleToName.d.ts +4 -4
- package/esm/typings/src/utils/organization/empty_object.d.ts +2 -2
- package/esm/typings/src/utils/organization/just_empty_object.d.ts +4 -4
- package/esm/typings/src/utils/parameters/mapAvailableToExpectedParameters.d.ts +7 -7
- package/esm/typings/src/utils/serialization/clonePipeline.d.ts +4 -3
- package/esm/typings/src/utils/serialization/deepClone.d.ts +5 -1
- package/esm/typings/src/utils/validators/javascriptName/isValidJavascriptName.d.ts +3 -3
- package/esm/typings/src/utils/validators/parameterName/validateParameterName.d.ts +5 -4
- package/esm/typings/src/version.d.ts +2 -1
- package/package.json +2 -2
- package/umd/index.umd.js +1004 -303
- package/umd/index.umd.js.map +1 -1
- package/esm/typings/src/formats/_common/FormatSubvalueDefinition.d.ts +0 -31
- package/esm/typings/src/formats/csv/CsvFormatDefinition.d.ts +0 -17
- package/esm/typings/src/formats/xml/XmlFormatDefinition.d.ts +0 -19
package/esm/index.es.js
CHANGED
|
@@ -1,9 +1,9 @@
|
|
|
1
1
|
import colors from 'colors';
|
|
2
2
|
import express from 'express';
|
|
3
|
+
import * as OpenApiValidator from 'express-openapi-validator';
|
|
3
4
|
import http from 'http';
|
|
4
5
|
import { Server } from 'socket.io';
|
|
5
6
|
import spaceTrim, { spaceTrim as spaceTrim$1 } from 'spacetrim';
|
|
6
|
-
import * as OpenApiValidator from 'express-openapi-validator';
|
|
7
7
|
import swaggerUi from 'swagger-ui-express';
|
|
8
8
|
import { forTime } from 'waitasecond';
|
|
9
9
|
import { randomBytes } from 'crypto';
|
|
@@ -33,7 +33,7 @@ const BOOK_LANGUAGE_VERSION = '1.0.0';
|
|
|
33
33
|
* @generated
|
|
34
34
|
* @see https://github.com/webgptorg/promptbook
|
|
35
35
|
*/
|
|
36
|
-
const PROMPTBOOK_ENGINE_VERSION = '0.92.0-
|
|
36
|
+
const PROMPTBOOK_ENGINE_VERSION = '0.92.0-30';
|
|
37
37
|
/**
|
|
38
38
|
* TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
|
|
39
39
|
* Note: [💞] Ignore a discrepancy between file name and entity name
|
|
@@ -100,6 +100,21 @@ const DEFAULT_BOOK_TITLE = `✨ Untitled Book`;
|
|
|
100
100
|
* @public exported from `@promptbook/core`
|
|
101
101
|
*/
|
|
102
102
|
const DEFAULT_MAX_FILE_SIZE = 100 * 1024 * 1024; // 100MB
|
|
103
|
+
/**
|
|
104
|
+
* Threshold value that determines when a dataset is considered "big"
|
|
105
|
+
* and may require special handling or optimizations
|
|
106
|
+
*
|
|
107
|
+
* For example, when error occurs in one item of the big dataset, it will not fail the whole pipeline
|
|
108
|
+
*
|
|
109
|
+
* @public exported from `@promptbook/core`
|
|
110
|
+
*/
|
|
111
|
+
const BIG_DATASET_TRESHOLD = 50;
|
|
112
|
+
/**
|
|
113
|
+
* Placeholder text used to represent a placeholder value of failed operation
|
|
114
|
+
*
|
|
115
|
+
* @public exported from `@promptbook/core`
|
|
116
|
+
*/
|
|
117
|
+
const FAILED_VALUE_PLACEHOLDER = '!?';
|
|
103
118
|
// <- TODO: [🧠] Better system for generator warnings - not always "code" and "by `@promptbook/cli`"
|
|
104
119
|
/**
|
|
105
120
|
* The maximum number of iterations for a loops
|
|
@@ -179,7 +194,7 @@ const DEFAULT_DOWNLOAD_CACHE_DIRNAME = './.promptbook/download-cache';
|
|
|
179
194
|
const DEFAULT_SCRAPE_CACHE_DIRNAME = './.promptbook/scrape-cache';
|
|
180
195
|
// <- TODO: [🧜♂️]
|
|
181
196
|
/**
|
|
182
|
-
*
|
|
197
|
+
* Default settings for parsing and generating CSV files in Promptbook.
|
|
183
198
|
*
|
|
184
199
|
* @public exported from `@promptbook/core`
|
|
185
200
|
*/
|
|
@@ -190,19 +205,19 @@ const DEFAULT_CSV_SETTINGS = Object.freeze({
|
|
|
190
205
|
skipEmptyLines: true,
|
|
191
206
|
});
|
|
192
207
|
/**
|
|
193
|
-
*
|
|
208
|
+
* Controls whether verbose logging is enabled by default throughout the application.
|
|
194
209
|
*
|
|
195
210
|
* @public exported from `@promptbook/core`
|
|
196
211
|
*/
|
|
197
212
|
let DEFAULT_IS_VERBOSE = false;
|
|
198
213
|
/**
|
|
199
|
-
*
|
|
214
|
+
* Controls whether auto-installation of dependencies is enabled by default.
|
|
200
215
|
*
|
|
201
216
|
* @public exported from `@promptbook/core`
|
|
202
217
|
*/
|
|
203
218
|
const DEFAULT_IS_AUTO_INSTALLED = false;
|
|
204
219
|
/**
|
|
205
|
-
*
|
|
220
|
+
* Indicates whether pipeline logic validation is enabled. When true, the pipeline logic is checked for consistency.
|
|
206
221
|
*
|
|
207
222
|
* @private within the repository
|
|
208
223
|
*/
|
|
@@ -857,7 +872,8 @@ function $execCommand(options) {
|
|
|
857
872
|
*/
|
|
858
873
|
|
|
859
874
|
/**
|
|
860
|
-
*
|
|
875
|
+
* Attempts to locate the specified application on a Linux system using the 'which' command.
|
|
876
|
+
* Returns the path to the executable if found, or null otherwise.
|
|
861
877
|
*
|
|
862
878
|
* @private within the repository
|
|
863
879
|
*/
|
|
@@ -877,7 +893,8 @@ async function locateAppOnLinux({ linuxWhich, }) {
|
|
|
877
893
|
*/
|
|
878
894
|
|
|
879
895
|
/**
|
|
880
|
-
*
|
|
896
|
+
* Provides filesystem access (for example for Node.js-based scrapers)
|
|
897
|
+
* Creates a standardized filesystem interface that scrapers can use for file operations.
|
|
881
898
|
*
|
|
882
899
|
* @public exported from `@promptbook/node`
|
|
883
900
|
*/
|
|
@@ -923,7 +940,8 @@ async function isExecutable(path, fs) {
|
|
|
923
940
|
// eslint-disable-next-line @typescript-eslint/no-var-requires
|
|
924
941
|
const userhome = require('userhome');
|
|
925
942
|
/**
|
|
926
|
-
*
|
|
943
|
+
* Attempts to locate the specified application on a macOS system by checking standard application paths and using mdfind.
|
|
944
|
+
* Returns the path to the executable if found, or null otherwise.
|
|
927
945
|
*
|
|
928
946
|
* @private within the repository
|
|
929
947
|
*/
|
|
@@ -955,7 +973,8 @@ async function locateAppOnMacOs({ macOsName, }) {
|
|
|
955
973
|
*/
|
|
956
974
|
|
|
957
975
|
/**
|
|
958
|
-
*
|
|
976
|
+
* Attempts to locate the specified application on a Windows system by searching common installation directories.
|
|
977
|
+
* Returns the path to the executable if found, or null otherwise.
|
|
959
978
|
*
|
|
960
979
|
* @private within the repository
|
|
961
980
|
*/
|
|
@@ -1026,7 +1045,8 @@ function locateApp(options) {
|
|
|
1026
1045
|
*/
|
|
1027
1046
|
|
|
1028
1047
|
/**
|
|
1029
|
-
*
|
|
1048
|
+
* Locates the LibreOffice executable on the current system by searching platform-specific paths.
|
|
1049
|
+
* Returns the path to the executable if found, or null otherwise.
|
|
1030
1050
|
*
|
|
1031
1051
|
* @private within the repository
|
|
1032
1052
|
*/
|
|
@@ -1044,7 +1064,8 @@ function locateLibreoffice() {
|
|
|
1044
1064
|
*/
|
|
1045
1065
|
|
|
1046
1066
|
/**
|
|
1047
|
-
*
|
|
1067
|
+
* Locates the Pandoc executable on the current system by searching platform-specific paths.
|
|
1068
|
+
* Returns the path to the executable if found, or null otherwise.
|
|
1048
1069
|
*
|
|
1049
1070
|
* @private within the repository
|
|
1050
1071
|
*/
|
|
@@ -1062,7 +1083,7 @@ function locatePandoc() {
|
|
|
1062
1083
|
*/
|
|
1063
1084
|
|
|
1064
1085
|
/**
|
|
1065
|
-
*
|
|
1086
|
+
* Provides paths to required executables (i.e. as Pandoc and LibreOffice) for Node.js environments.
|
|
1066
1087
|
*
|
|
1067
1088
|
* @public exported from `@promptbook/node`
|
|
1068
1089
|
*/
|
|
@@ -1262,8 +1283,12 @@ function checkSerializableAsJson(options) {
|
|
|
1262
1283
|
*/
|
|
1263
1284
|
|
|
1264
1285
|
/**
|
|
1265
|
-
*
|
|
1286
|
+
* Creates a deep clone of the given object
|
|
1287
|
+
*
|
|
1288
|
+
* Note: This method only works for objects that are fully serializable to JSON and do not contain functions, Dates, or special types.
|
|
1266
1289
|
*
|
|
1290
|
+
* @param objectValue The object to clone.
|
|
1291
|
+
* @returns A deep, writable clone of the input object.
|
|
1267
1292
|
* @public exported from `@promptbook/utils`
|
|
1268
1293
|
*/
|
|
1269
1294
|
function deepClone(objectValue) {
|
|
@@ -1345,13 +1370,13 @@ const ORDER_OF_PIPELINE_JSON = [
|
|
|
1345
1370
|
*/
|
|
1346
1371
|
const REPLACING_NONCE = 'ptbkauk42kV2dzao34faw7FudQUHYPtW';
|
|
1347
1372
|
/**
|
|
1348
|
-
*
|
|
1373
|
+
* Placeholder value indicating a parameter is missing its value.
|
|
1349
1374
|
*
|
|
1350
1375
|
* @private within the repository
|
|
1351
1376
|
*/
|
|
1352
1377
|
const RESERVED_PARAMETER_MISSING_VALUE = 'MISSING-' + REPLACING_NONCE;
|
|
1353
1378
|
/**
|
|
1354
|
-
*
|
|
1379
|
+
* Placeholder value indicating a parameter is restricted and cannot be used directly.
|
|
1355
1380
|
*
|
|
1356
1381
|
* @private within the repository
|
|
1357
1382
|
*/
|
|
@@ -1782,15 +1807,21 @@ function validatePipeline_InnerFunction(pipeline) {
|
|
|
1782
1807
|
* @public exported from `@promptbook/core`
|
|
1783
1808
|
*/
|
|
1784
1809
|
function isPipelinePrepared(pipeline) {
|
|
1785
|
-
// Note: Ignoring `pipeline.preparations`
|
|
1786
|
-
// Note: Ignoring `pipeline.knowledgePieces`
|
|
1810
|
+
// Note: Ignoring `pipeline.preparations`
|
|
1811
|
+
// Note: Ignoring `pipeline.knowledgePieces`
|
|
1787
1812
|
if (pipeline.title === undefined || pipeline.title === '' || pipeline.title === DEFAULT_BOOK_TITLE) {
|
|
1813
|
+
// TODO: !!! Comment this out
|
|
1814
|
+
console.log('Pipeline is not prepared because title is undefined or empty', pipeline);
|
|
1788
1815
|
return false;
|
|
1789
1816
|
}
|
|
1790
|
-
if (!pipeline.personas.every((persona) => persona.
|
|
1817
|
+
if (!pipeline.personas.every((persona) => persona.modelsRequirements !== undefined)) {
|
|
1818
|
+
// TODO: !!! Comment this out
|
|
1819
|
+
console.log('Pipeline is not prepared because personas are not prepared', pipeline.personas);
|
|
1791
1820
|
return false;
|
|
1792
1821
|
}
|
|
1793
1822
|
if (!pipeline.knowledgeSources.every((knowledgeSource) => knowledgeSource.preparationIds !== undefined)) {
|
|
1823
|
+
// TODO: !!! Comment this out
|
|
1824
|
+
console.log('Pipeline is not prepared because knowledge sources are not prepared', pipeline.knowledgeSources);
|
|
1794
1825
|
return false;
|
|
1795
1826
|
}
|
|
1796
1827
|
/*
|
|
@@ -1815,7 +1846,7 @@ function isPipelinePrepared(pipeline) {
|
|
|
1815
1846
|
* Function isValidJsonString will tell you if the string is valid JSON or not
|
|
1816
1847
|
*
|
|
1817
1848
|
* @param value The string to check
|
|
1818
|
-
* @returns
|
|
1849
|
+
* @returns `true` if the string is a valid JSON string, false otherwise
|
|
1819
1850
|
*
|
|
1820
1851
|
* @public exported from `@promptbook/utils`
|
|
1821
1852
|
*/
|
|
@@ -1833,6 +1864,42 @@ function isValidJsonString(value /* <- [👨⚖️] */) {
|
|
|
1833
1864
|
}
|
|
1834
1865
|
}
|
|
1835
1866
|
|
|
1867
|
+
/**
|
|
1868
|
+
* Converts a JavaScript Object Notation (JSON) string into an object.
|
|
1869
|
+
*
|
|
1870
|
+
* Note: This is wrapper around `JSON.parse()` with better error and type handling
|
|
1871
|
+
*
|
|
1872
|
+
* @public exported from `@promptbook/utils`
|
|
1873
|
+
*/
|
|
1874
|
+
function jsonParse(value) {
|
|
1875
|
+
if (value === undefined) {
|
|
1876
|
+
throw new Error(`Can not parse JSON from undefined value.`);
|
|
1877
|
+
}
|
|
1878
|
+
else if (typeof value !== 'string') {
|
|
1879
|
+
console.error('Can not parse JSON from non-string value.', { text: value });
|
|
1880
|
+
throw new Error(spaceTrim(`
|
|
1881
|
+
Can not parse JSON from non-string value.
|
|
1882
|
+
|
|
1883
|
+
The value type: ${typeof value}
|
|
1884
|
+
See more in console.
|
|
1885
|
+
`));
|
|
1886
|
+
}
|
|
1887
|
+
try {
|
|
1888
|
+
return JSON.parse(value);
|
|
1889
|
+
}
|
|
1890
|
+
catch (error) {
|
|
1891
|
+
if (!(error instanceof Error)) {
|
|
1892
|
+
throw error;
|
|
1893
|
+
}
|
|
1894
|
+
throw new Error(spaceTrim((block) => `
|
|
1895
|
+
${block(error.message)}
|
|
1896
|
+
|
|
1897
|
+
The JSON text:
|
|
1898
|
+
${block(value)}
|
|
1899
|
+
`));
|
|
1900
|
+
}
|
|
1901
|
+
}
|
|
1902
|
+
|
|
1836
1903
|
/**
|
|
1837
1904
|
* Recursively converts JSON strings to JSON objects
|
|
1838
1905
|
|
|
@@ -1851,7 +1918,7 @@ function jsonStringsToJsons(object) {
|
|
|
1851
1918
|
const newObject = { ...object };
|
|
1852
1919
|
for (const [key, value] of Object.entries(object)) {
|
|
1853
1920
|
if (typeof value === 'string' && isValidJsonString(value)) {
|
|
1854
|
-
newObject[key] =
|
|
1921
|
+
newObject[key] = jsonParse(value);
|
|
1855
1922
|
}
|
|
1856
1923
|
else {
|
|
1857
1924
|
newObject[key] = jsonStringsToJsons(value);
|
|
@@ -2028,7 +2095,75 @@ function createTask(options) {
|
|
|
2028
2095
|
* TODO: [🐚] Split into more files and make `PrepareTask` & `RemoteTask` + split the function
|
|
2029
2096
|
*/
|
|
2030
2097
|
|
|
2031
|
-
var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book",formfactorName:"GENERIC",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",resultingParameterName:"knowledgePieces",dependentParameterNames:["knowledgeContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge from Markdown\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book`\n- INPUT PARAMETER `{knowledgeContent}` Markdown document content\n- OUTPUT PARAMETER `{knowledgePieces}` The knowledge JSON object\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}\n```\n\n`-> {knowledgePieces}`\n"}],sourceFile:"./books/prepare-knowledge-from-markdown.book"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.book",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",resultingParameterName:"keywords",dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Keywords\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-keywords.book`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{keywords}` Keywords separated by comma\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}\n```\n\n`-> {keywords}`\n"}],sourceFile:"./books/prepare-knowledge-keywords.book"},{title:"Prepare Knowledge-piece Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.book",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",resultingParameterName:"title",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge-piece Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-title.book`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{title}` The title of the document\n\n## Knowledge\n\n- EXPECT MIN 1 WORD\n- EXPECT MAX 8 WORDS\n\n```markdown\nYou are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-knowledge-title.book"},{title:"Prepare Persona",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.book",formfactorName:"GENERIC",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-model-requirements",title:"Make modelRequirements",content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Example\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n- Your output format is JSON object\n- Write just the JSON object, no other text should be present\n- It contains the following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",resultingParameterName:"modelRequirements",format:"JSON",dependentParameterNames:["availableModelNames","personaDescription"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Persona\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-persona.book`\n- INPUT PARAMETER `{availableModelNames}` List of available model names separated by comma (,)\n- INPUT PARAMETER `{personaDescription}` Description of the persona\n- OUTPUT PARAMETER `{modelRequirements}` Specific requirements for the model\n\n## Make modelRequirements\n\n- FORMAT JSON\n\n```markdown\nYou are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Example\n\n\\`\\`\\`json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n\\`\\`\\`\n\n## Instructions\n\n- Your output format is JSON object\n- Write just the JSON object, no other text should be present\n- It contains the following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}\n```\n\n`-> {modelRequirements}`\n"}],sourceFile:"./books/prepare-persona.book"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-title.book",formfactorName:"GENERIC",parameters:[{name:"book",description:"The book to prepare the title for",isInput:true,isOutput:false},{name:"title",description:"Best title for the book",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-title",title:"Make title",content:"Make best title for given text which describes the workflow:\n\n## Rules\n\n- Write just title, nothing else\n- Title should be concise and clear - Write maximum ideally 2 words, maximum 5 words\n- Title starts with emoticon\n- Title should not mention the input and output of the workflow but the main purpose of the workflow\n _For example, not \"✍ Convert Knowledge-piece to title\" but \"✍ Title\"_\n\n## The workflow\n\n> {book}",resultingParameterName:"title",expectations:{words:{min:1,max:8},lines:{min:1,max:1}},dependentParameterNames:["book"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-title.book`\n- INPUT PARAMETER `{book}` The book to prepare the title for\n- OUTPUT PARAMETER `{title}` Best title for the book\n\n## Make title\n\n- EXPECT MIN 1 Word\n- EXPECT MAX 8 Words\n- EXPECT EXACTLY 1 Line\n\n```markdown\nMake best title for given text which describes the workflow:\n\n## Rules\n\n- Write just title, nothing else\n- Title should be concise and clear - Write maximum ideally 2 words, maximum 5 words\n- Title starts with emoticon\n- Title should not mention the input and output of the workflow but the main purpose of the workflow\n _For example, not \"✍ Convert Knowledge-piece to title\" but \"✍ Title\"_\n\n## The workflow\n\n> {book}\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-title.book"}];
|
|
2098
|
+
/**
|
|
2099
|
+
* Represents the uncertain value
|
|
2100
|
+
*
|
|
2101
|
+
* @public exported from `@promptbook/core`
|
|
2102
|
+
*/
|
|
2103
|
+
const ZERO_VALUE = $deepFreeze({ value: 0 });
|
|
2104
|
+
/**
|
|
2105
|
+
* Represents the uncertain value
|
|
2106
|
+
*
|
|
2107
|
+
* @public exported from `@promptbook/core`
|
|
2108
|
+
*/
|
|
2109
|
+
const UNCERTAIN_ZERO_VALUE = $deepFreeze({ value: 0, isUncertain: true });
|
|
2110
|
+
/**
|
|
2111
|
+
* Represents the usage with no resources consumed
|
|
2112
|
+
*
|
|
2113
|
+
* @public exported from `@promptbook/core`
|
|
2114
|
+
*/
|
|
2115
|
+
const ZERO_USAGE = $deepFreeze({
|
|
2116
|
+
price: ZERO_VALUE,
|
|
2117
|
+
input: {
|
|
2118
|
+
tokensCount: ZERO_VALUE,
|
|
2119
|
+
charactersCount: ZERO_VALUE,
|
|
2120
|
+
wordsCount: ZERO_VALUE,
|
|
2121
|
+
sentencesCount: ZERO_VALUE,
|
|
2122
|
+
linesCount: ZERO_VALUE,
|
|
2123
|
+
paragraphsCount: ZERO_VALUE,
|
|
2124
|
+
pagesCount: ZERO_VALUE,
|
|
2125
|
+
},
|
|
2126
|
+
output: {
|
|
2127
|
+
tokensCount: ZERO_VALUE,
|
|
2128
|
+
charactersCount: ZERO_VALUE,
|
|
2129
|
+
wordsCount: ZERO_VALUE,
|
|
2130
|
+
sentencesCount: ZERO_VALUE,
|
|
2131
|
+
linesCount: ZERO_VALUE,
|
|
2132
|
+
paragraphsCount: ZERO_VALUE,
|
|
2133
|
+
pagesCount: ZERO_VALUE,
|
|
2134
|
+
},
|
|
2135
|
+
});
|
|
2136
|
+
/**
|
|
2137
|
+
* Represents the usage with unknown resources consumed
|
|
2138
|
+
*
|
|
2139
|
+
* @public exported from `@promptbook/core`
|
|
2140
|
+
*/
|
|
2141
|
+
const UNCERTAIN_USAGE = $deepFreeze({
|
|
2142
|
+
price: UNCERTAIN_ZERO_VALUE,
|
|
2143
|
+
input: {
|
|
2144
|
+
tokensCount: UNCERTAIN_ZERO_VALUE,
|
|
2145
|
+
charactersCount: UNCERTAIN_ZERO_VALUE,
|
|
2146
|
+
wordsCount: UNCERTAIN_ZERO_VALUE,
|
|
2147
|
+
sentencesCount: UNCERTAIN_ZERO_VALUE,
|
|
2148
|
+
linesCount: UNCERTAIN_ZERO_VALUE,
|
|
2149
|
+
paragraphsCount: UNCERTAIN_ZERO_VALUE,
|
|
2150
|
+
pagesCount: UNCERTAIN_ZERO_VALUE,
|
|
2151
|
+
},
|
|
2152
|
+
output: {
|
|
2153
|
+
tokensCount: UNCERTAIN_ZERO_VALUE,
|
|
2154
|
+
charactersCount: UNCERTAIN_ZERO_VALUE,
|
|
2155
|
+
wordsCount: UNCERTAIN_ZERO_VALUE,
|
|
2156
|
+
sentencesCount: UNCERTAIN_ZERO_VALUE,
|
|
2157
|
+
linesCount: UNCERTAIN_ZERO_VALUE,
|
|
2158
|
+
paragraphsCount: UNCERTAIN_ZERO_VALUE,
|
|
2159
|
+
pagesCount: UNCERTAIN_ZERO_VALUE,
|
|
2160
|
+
},
|
|
2161
|
+
});
|
|
2162
|
+
/**
|
|
2163
|
+
* Note: [💞] Ignore a discrepancy between file name and entity name
|
|
2164
|
+
*/
|
|
2165
|
+
|
|
2166
|
+
var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book",formfactorName:"GENERIC",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",resultingParameterName:"knowledgePieces",dependentParameterNames:["knowledgeContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge from Markdown\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book`\n- INPUT PARAMETER `{knowledgeContent}` Markdown document content\n- OUTPUT PARAMETER `{knowledgePieces}` The knowledge JSON object\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}\n```\n\n`-> {knowledgePieces}`\n"}],sourceFile:"./books/prepare-knowledge-from-markdown.book"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.book",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",resultingParameterName:"keywords",dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Keywords\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-keywords.book`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{keywords}` Keywords separated by comma\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}\n```\n\n`-> {keywords}`\n"}],sourceFile:"./books/prepare-knowledge-keywords.book"},{title:"Prepare Knowledge-piece Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.book",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",resultingParameterName:"title",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge-piece Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-title.book`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{title}` The title of the document\n\n## Knowledge\n\n- EXPECT MIN 1 WORD\n- EXPECT MAX 8 WORDS\n\n```markdown\nYou are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-knowledge-title.book"},{title:"Prepare Persona",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.book",formfactorName:"GENERIC",parameters:[{name:"availableModels",description:"List of available model names together with their descriptions as JSON",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelsRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-model-requirements",title:"Make modelRequirements",content:"You are an experienced AI engineer, you need to find the best models for virtual assistants:\n\n## Example\n\n```json\n[\n {\n \"modelName\": \"gpt-4o\",\n \"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n \"temperature\": 0.7\n },\n {\n \"modelName\": \"claude-3-5-sonnet\",\n \"systemMessage\": \"You are a friendly and knowledgeable chatbot.\",\n \"temperature\": 0.5\n }\n]\n```\n\n## Instructions\n\n- Your output format is JSON array\n- Sort best-fitting models first\n- Omit any models that are not suitable\n- Write just the JSON, no other text should be present\n- Array contain items with following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nHere are the available models:\n\n```json\n{availableModels}\n```\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",resultingParameterName:"modelsRequirements",format:"JSON",dependentParameterNames:["availableModels","personaDescription"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Persona\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-persona.book`\n- INPUT PARAMETER `{availableModels}` List of available model names together with their descriptions as JSON\n- INPUT PARAMETER `{personaDescription}` Description of the persona\n- OUTPUT PARAMETER `{modelsRequirements}` Specific requirements for the model\n\n## Make modelRequirements\n\n- FORMAT JSON\n\n```markdown\nYou are an experienced AI engineer, you need to find the best models for virtual assistants:\n\n## Example\n\n\\`\\`\\`json\n[\n {\n \"modelName\": \"gpt-4o\",\n \"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n \"temperature\": 0.7\n },\n {\n \"modelName\": \"claude-3-5-sonnet\",\n \"systemMessage\": \"You are a friendly and knowledgeable chatbot.\",\n \"temperature\": 0.5\n }\n]\n\\`\\`\\`\n\n## Instructions\n\n- Your output format is JSON array\n- Sort best-fitting models first\n- Omit any models that are not suitable\n- Write just the JSON, no other text should be present\n- Array contain items with following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nHere are the available models:\n\n\\`\\`\\`json\n{availableModels}\n\\`\\`\\`\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}\n```\n\n`-> {modelsRequirements}`\n"}],sourceFile:"./books/prepare-persona.book"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-title.book",formfactorName:"GENERIC",parameters:[{name:"book",description:"The book to prepare the title for",isInput:true,isOutput:false},{name:"title",description:"Best title for the book",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-title",title:"Make title",content:"Make best title for given text which describes the workflow:\n\n## Rules\n\n- Write just title, nothing else\n- Title should be concise and clear - Write maximum ideally 2 words, maximum 5 words\n- Title starts with emoticon\n- Title should not mention the input and output of the workflow but the main purpose of the workflow\n _For example, not \"✍ Convert Knowledge-piece to title\" but \"✍ Title\"_\n\n## The workflow\n\n> {book}",resultingParameterName:"title",expectations:{words:{min:1,max:8},lines:{min:1,max:1}},dependentParameterNames:["book"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-title.book`\n- INPUT PARAMETER `{book}` The book to prepare the title for\n- OUTPUT PARAMETER `{title}` Best title for the book\n\n## Make title\n\n- EXPECT MIN 1 Word\n- EXPECT MAX 8 Words\n- EXPECT EXACTLY 1 Line\n\n```markdown\nMake best title for given text which describes the workflow:\n\n## Rules\n\n- Write just title, nothing else\n- Title should be concise and clear - Write maximum ideally 2 words, maximum 5 words\n- Title starts with emoticon\n- Title should not mention the input and output of the workflow but the main purpose of the workflow\n _For example, not \"✍ Convert Knowledge-piece to title\" but \"✍ Title\"_\n\n## The workflow\n\n> {book}\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-title.book"}];
|
|
2032
2167
|
|
|
2033
2168
|
/**
|
|
2034
2169
|
* Checks if value is valid email
|
|
@@ -2331,7 +2466,7 @@ function extractParameterNames(template) {
|
|
|
2331
2466
|
*/
|
|
2332
2467
|
function unpreparePipeline(pipeline) {
|
|
2333
2468
|
let { personas, knowledgeSources, tasks } = pipeline;
|
|
2334
|
-
personas = personas.map((persona) => ({ ...persona,
|
|
2469
|
+
personas = personas.map((persona) => ({ ...persona, modelsRequirements: undefined, preparationIds: undefined }));
|
|
2335
2470
|
knowledgeSources = knowledgeSources.map((knowledgeSource) => ({ ...knowledgeSource, preparationIds: undefined }));
|
|
2336
2471
|
tasks = tasks.map((task) => {
|
|
2337
2472
|
let { dependentParameterNames } = task;
|
|
@@ -2372,7 +2507,7 @@ class SimplePipelineCollection {
|
|
|
2372
2507
|
/**
|
|
2373
2508
|
* Constructs a pipeline collection from pipelines
|
|
2374
2509
|
*
|
|
2375
|
-
* @param pipelines
|
|
2510
|
+
* @param pipelines Array of pipeline JSON objects to include in the collection
|
|
2376
2511
|
*
|
|
2377
2512
|
* Note: During the construction logic of all pipelines are validated
|
|
2378
2513
|
* Note: It is not recommended to use this constructor directly, use `createCollectionFromJson` *(or other variant)* instead
|
|
@@ -2505,74 +2640,6 @@ async function forEachAsync(array, options, callbackfunction) {
|
|
|
2505
2640
|
await Promise.all(tasks);
|
|
2506
2641
|
}
|
|
2507
2642
|
|
|
2508
|
-
/**
|
|
2509
|
-
* Represents the uncertain value
|
|
2510
|
-
*
|
|
2511
|
-
* @public exported from `@promptbook/core`
|
|
2512
|
-
*/
|
|
2513
|
-
const ZERO_VALUE = $deepFreeze({ value: 0 });
|
|
2514
|
-
/**
|
|
2515
|
-
* Represents the uncertain value
|
|
2516
|
-
*
|
|
2517
|
-
* @public exported from `@promptbook/core`
|
|
2518
|
-
*/
|
|
2519
|
-
const UNCERTAIN_ZERO_VALUE = $deepFreeze({ value: 0, isUncertain: true });
|
|
2520
|
-
/**
|
|
2521
|
-
* Represents the usage with no resources consumed
|
|
2522
|
-
*
|
|
2523
|
-
* @public exported from `@promptbook/core`
|
|
2524
|
-
*/
|
|
2525
|
-
const ZERO_USAGE = $deepFreeze({
|
|
2526
|
-
price: ZERO_VALUE,
|
|
2527
|
-
input: {
|
|
2528
|
-
tokensCount: ZERO_VALUE,
|
|
2529
|
-
charactersCount: ZERO_VALUE,
|
|
2530
|
-
wordsCount: ZERO_VALUE,
|
|
2531
|
-
sentencesCount: ZERO_VALUE,
|
|
2532
|
-
linesCount: ZERO_VALUE,
|
|
2533
|
-
paragraphsCount: ZERO_VALUE,
|
|
2534
|
-
pagesCount: ZERO_VALUE,
|
|
2535
|
-
},
|
|
2536
|
-
output: {
|
|
2537
|
-
tokensCount: ZERO_VALUE,
|
|
2538
|
-
charactersCount: ZERO_VALUE,
|
|
2539
|
-
wordsCount: ZERO_VALUE,
|
|
2540
|
-
sentencesCount: ZERO_VALUE,
|
|
2541
|
-
linesCount: ZERO_VALUE,
|
|
2542
|
-
paragraphsCount: ZERO_VALUE,
|
|
2543
|
-
pagesCount: ZERO_VALUE,
|
|
2544
|
-
},
|
|
2545
|
-
});
|
|
2546
|
-
/**
|
|
2547
|
-
* Represents the usage with unknown resources consumed
|
|
2548
|
-
*
|
|
2549
|
-
* @public exported from `@promptbook/core`
|
|
2550
|
-
*/
|
|
2551
|
-
$deepFreeze({
|
|
2552
|
-
price: UNCERTAIN_ZERO_VALUE,
|
|
2553
|
-
input: {
|
|
2554
|
-
tokensCount: UNCERTAIN_ZERO_VALUE,
|
|
2555
|
-
charactersCount: UNCERTAIN_ZERO_VALUE,
|
|
2556
|
-
wordsCount: UNCERTAIN_ZERO_VALUE,
|
|
2557
|
-
sentencesCount: UNCERTAIN_ZERO_VALUE,
|
|
2558
|
-
linesCount: UNCERTAIN_ZERO_VALUE,
|
|
2559
|
-
paragraphsCount: UNCERTAIN_ZERO_VALUE,
|
|
2560
|
-
pagesCount: UNCERTAIN_ZERO_VALUE,
|
|
2561
|
-
},
|
|
2562
|
-
output: {
|
|
2563
|
-
tokensCount: UNCERTAIN_ZERO_VALUE,
|
|
2564
|
-
charactersCount: UNCERTAIN_ZERO_VALUE,
|
|
2565
|
-
wordsCount: UNCERTAIN_ZERO_VALUE,
|
|
2566
|
-
sentencesCount: UNCERTAIN_ZERO_VALUE,
|
|
2567
|
-
linesCount: UNCERTAIN_ZERO_VALUE,
|
|
2568
|
-
paragraphsCount: UNCERTAIN_ZERO_VALUE,
|
|
2569
|
-
pagesCount: UNCERTAIN_ZERO_VALUE,
|
|
2570
|
-
},
|
|
2571
|
-
});
|
|
2572
|
-
/**
|
|
2573
|
-
* Note: [💞] Ignore a discrepancy between file name and entity name
|
|
2574
|
-
*/
|
|
2575
|
-
|
|
2576
2643
|
/**
|
|
2577
2644
|
* Function `addUsage` will add multiple usages into one
|
|
2578
2645
|
*
|
|
@@ -2919,27 +2986,48 @@ async function preparePersona(personaDescription, tools, options) {
|
|
|
2919
2986
|
pipeline: await collection.getPipelineByUrl('https://promptbook.studio/promptbook/prepare-persona.book'),
|
|
2920
2987
|
tools,
|
|
2921
2988
|
});
|
|
2922
|
-
// TODO: [🚐] Make arrayable LLMs -> single LLM DRY
|
|
2923
2989
|
const _llms = arrayableToArray(tools.llm);
|
|
2924
2990
|
const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
|
|
2925
|
-
const availableModels = await llmTools.listModels()
|
|
2926
|
-
const availableModelNames = availableModels
|
|
2991
|
+
const availableModels = (await llmTools.listModels())
|
|
2927
2992
|
.filter(({ modelVariant }) => modelVariant === 'CHAT')
|
|
2928
|
-
.map(({ modelName }) =>
|
|
2929
|
-
|
|
2930
|
-
|
|
2993
|
+
.map(({ modelName, modelDescription }) => ({
|
|
2994
|
+
modelName,
|
|
2995
|
+
modelDescription,
|
|
2996
|
+
// <- Note: `modelTitle` and `modelVariant` is not relevant for this task
|
|
2997
|
+
}));
|
|
2998
|
+
const result = await preparePersonaExecutor({
|
|
2999
|
+
availableModels /* <- Note: Passing as JSON */,
|
|
3000
|
+
personaDescription,
|
|
3001
|
+
}).asPromise();
|
|
2931
3002
|
const { outputParameters } = result;
|
|
2932
|
-
const {
|
|
2933
|
-
|
|
3003
|
+
const { modelsRequirements: modelsRequirementsJson } = outputParameters;
|
|
3004
|
+
let modelsRequirementsUnchecked = jsonParse(modelsRequirementsJson);
|
|
2934
3005
|
if (isVerbose) {
|
|
2935
|
-
console.info(`PERSONA ${personaDescription}`,
|
|
3006
|
+
console.info(`PERSONA ${personaDescription}`, modelsRequirementsUnchecked);
|
|
2936
3007
|
}
|
|
2937
|
-
|
|
2938
|
-
|
|
3008
|
+
if (!Array.isArray(modelsRequirementsUnchecked)) {
|
|
3009
|
+
// <- TODO: Book should have syntax and system to enforce shape of JSON
|
|
3010
|
+
modelsRequirementsUnchecked = [modelsRequirementsUnchecked];
|
|
3011
|
+
/*
|
|
3012
|
+
throw new UnexpectedError(
|
|
3013
|
+
spaceTrim(
|
|
3014
|
+
(block) => `
|
|
3015
|
+
Invalid \`modelsRequirements\`:
|
|
3016
|
+
|
|
3017
|
+
\`\`\`json
|
|
3018
|
+
${block(JSON.stringify(modelsRequirementsUnchecked, null, 4))}
|
|
3019
|
+
\`\`\`
|
|
3020
|
+
`,
|
|
3021
|
+
),
|
|
3022
|
+
);
|
|
3023
|
+
*/
|
|
3024
|
+
}
|
|
3025
|
+
const modelsRequirements = modelsRequirementsUnchecked.map((modelRequirements) => ({
|
|
2939
3026
|
modelVariant: 'CHAT',
|
|
2940
|
-
|
|
2941
|
-
|
|
2942
|
-
|
|
3027
|
+
...modelRequirements,
|
|
3028
|
+
}));
|
|
3029
|
+
return {
|
|
3030
|
+
modelsRequirements,
|
|
2943
3031
|
};
|
|
2944
3032
|
}
|
|
2945
3033
|
/**
|
|
@@ -2950,7 +3038,8 @@ async function preparePersona(personaDescription, tools, options) {
|
|
|
2950
3038
|
*/
|
|
2951
3039
|
|
|
2952
3040
|
/**
|
|
2953
|
-
*
|
|
3041
|
+
* Safely retrieves the global scope object (window in browser, global in Node.js)
|
|
3042
|
+
* regardless of the JavaScript environment in which the code is running
|
|
2954
3043
|
*
|
|
2955
3044
|
* Note: `$` is used to indicate that this function is not a pure function - it access global scope
|
|
2956
3045
|
*
|
|
@@ -2961,10 +3050,10 @@ function $getGlobalScope() {
|
|
|
2961
3050
|
}
|
|
2962
3051
|
|
|
2963
3052
|
/**
|
|
2964
|
-
*
|
|
3053
|
+
* Normalizes a text string to SCREAMING_CASE (all uppercase with underscores).
|
|
2965
3054
|
*
|
|
2966
|
-
* @param text
|
|
2967
|
-
* @returns
|
|
3055
|
+
* @param text The text string to be converted to SCREAMING_CASE format.
|
|
3056
|
+
* @returns The normalized text in SCREAMING_CASE format.
|
|
2968
3057
|
* @example 'HELLO_WORLD'
|
|
2969
3058
|
* @example 'I_LOVE_PROMPTBOOK'
|
|
2970
3059
|
* @public exported from `@promptbook/utils`
|
|
@@ -3016,10 +3105,10 @@ function normalizeTo_SCREAMING_CASE(text) {
|
|
|
3016
3105
|
*/
|
|
3017
3106
|
|
|
3018
3107
|
/**
|
|
3019
|
-
*
|
|
3108
|
+
* Normalizes a text string to snake_case format.
|
|
3020
3109
|
*
|
|
3021
|
-
* @param text
|
|
3022
|
-
* @returns
|
|
3110
|
+
* @param text The text string to be converted to snake_case format.
|
|
3111
|
+
* @returns The normalized text in snake_case format.
|
|
3023
3112
|
* @example 'hello_world'
|
|
3024
3113
|
* @example 'i_love_promptbook'
|
|
3025
3114
|
* @public exported from `@promptbook/utils`
|
|
@@ -3029,11 +3118,11 @@ function normalizeTo_snake_case(text) {
|
|
|
3029
3118
|
}
|
|
3030
3119
|
|
|
3031
3120
|
/**
|
|
3032
|
-
*
|
|
3121
|
+
* Global registry for storing and managing registered entities of a given type.
|
|
3033
3122
|
*
|
|
3034
3123
|
* Note: `$` is used to indicate that this function is not a pure function - it accesses and adds variables in global scope.
|
|
3035
3124
|
*
|
|
3036
|
-
* @private internal utility, exported are only
|
|
3125
|
+
* @private internal utility, exported are only singleton instances of this class
|
|
3037
3126
|
*/
|
|
3038
3127
|
class $Register {
|
|
3039
3128
|
constructor(registerName) {
|
|
@@ -3077,10 +3166,10 @@ class $Register {
|
|
|
3077
3166
|
}
|
|
3078
3167
|
|
|
3079
3168
|
/**
|
|
3080
|
-
*
|
|
3169
|
+
* Global registry for storing metadata about all available scrapers and converters.
|
|
3081
3170
|
*
|
|
3082
|
-
* Note: `$` is used to indicate that this interacts with the global scope
|
|
3083
|
-
* @singleton Only one instance of each register is created per build, but
|
|
3171
|
+
* Note: `$` is used to indicate that this interacts with the global scope.
|
|
3172
|
+
* @singleton Only one instance of each register is created per build, but there can be more in different contexts (e.g., tests).
|
|
3084
3173
|
* @public exported from `@promptbook/core`
|
|
3085
3174
|
*/
|
|
3086
3175
|
const $scrapersMetadataRegister = new $Register('scrapers_metadata');
|
|
@@ -3089,10 +3178,11 @@ const $scrapersMetadataRegister = new $Register('scrapers_metadata');
|
|
|
3089
3178
|
*/
|
|
3090
3179
|
|
|
3091
3180
|
/**
|
|
3092
|
-
*
|
|
3181
|
+
* Registry for all available scrapers in the system.
|
|
3182
|
+
* Central point for registering and accessing different types of content scrapers.
|
|
3093
3183
|
*
|
|
3094
3184
|
* Note: `$` is used to indicate that this interacts with the global scope
|
|
3095
|
-
* @singleton Only one instance of each register is created per build, but
|
|
3185
|
+
* @singleton Only one instance of each register is created per build, but there can be more than one in different build modules
|
|
3096
3186
|
* @public exported from `@promptbook/core`
|
|
3097
3187
|
*/
|
|
3098
3188
|
const $scrapersRegister = new $Register('scraper_constructors');
|
|
@@ -3432,10 +3522,10 @@ for (let i = 0; i < defaultDiacriticsRemovalMap.length; i++) {
|
|
|
3432
3522
|
*/
|
|
3433
3523
|
|
|
3434
3524
|
/**
|
|
3435
|
-
*
|
|
3525
|
+
* Removes diacritic marks (accents) from characters in a string.
|
|
3436
3526
|
*
|
|
3437
|
-
* @param input
|
|
3438
|
-
* @returns
|
|
3527
|
+
* @param input The string containing diacritics to be normalized.
|
|
3528
|
+
* @returns The string with diacritics removed or normalized.
|
|
3439
3529
|
* @public exported from `@promptbook/utils`
|
|
3440
3530
|
*/
|
|
3441
3531
|
function removeDiacritics(input) {
|
|
@@ -3449,10 +3539,10 @@ function removeDiacritics(input) {
|
|
|
3449
3539
|
*/
|
|
3450
3540
|
|
|
3451
3541
|
/**
|
|
3452
|
-
*
|
|
3542
|
+
* Converts a given text to kebab-case format.
|
|
3453
3543
|
*
|
|
3454
|
-
* @param text
|
|
3455
|
-
* @returns
|
|
3544
|
+
* @param text The text to be converted.
|
|
3545
|
+
* @returns The kebab-case formatted string.
|
|
3456
3546
|
* @example 'hello-world'
|
|
3457
3547
|
* @example 'i-love-promptbook'
|
|
3458
3548
|
* @public exported from `@promptbook/utils`
|
|
@@ -3521,7 +3611,8 @@ function knowledgeSourceContentToName(knowledgeSourceContent) {
|
|
|
3521
3611
|
*/
|
|
3522
3612
|
|
|
3523
3613
|
/**
|
|
3524
|
-
*
|
|
3614
|
+
* Converts a name to a properly formatted subfolder path for cache storage.
|
|
3615
|
+
* Handles normalization and path formatting to create consistent cache directory structures.
|
|
3525
3616
|
*
|
|
3526
3617
|
* @private for `FileCacheStorage`
|
|
3527
3618
|
*/
|
|
@@ -3601,11 +3692,11 @@ function removeEmojis(text) {
|
|
|
3601
3692
|
}
|
|
3602
3693
|
|
|
3603
3694
|
/**
|
|
3604
|
-
*
|
|
3695
|
+
* Converts a title string into a normalized name.
|
|
3605
3696
|
*
|
|
3606
|
-
* @param value
|
|
3607
|
-
* @returns
|
|
3608
|
-
* @example
|
|
3697
|
+
* @param value The title string to be converted to a name.
|
|
3698
|
+
* @returns A normalized name derived from the input title.
|
|
3699
|
+
* @example 'Hello World!' -> 'hello-world'
|
|
3609
3700
|
* @public exported from `@promptbook/utils`
|
|
3610
3701
|
*/
|
|
3611
3702
|
function titleToName(value) {
|
|
@@ -3656,7 +3747,9 @@ const promptbookFetch = async (urlOrRequest, init) => {
|
|
|
3656
3747
|
*/
|
|
3657
3748
|
|
|
3658
3749
|
/**
|
|
3659
|
-
*
|
|
3750
|
+
* Factory function that creates a handler for processing knowledge sources.
|
|
3751
|
+
* Provides standardized processing of different types of knowledge sources
|
|
3752
|
+
* across various scraper implementations.
|
|
3660
3753
|
*
|
|
3661
3754
|
* @public exported from `@promptbook/core`
|
|
3662
3755
|
*/
|
|
@@ -3763,7 +3856,7 @@ async function makeKnowledgeSourceHandler(knowledgeSource, tools, options) {
|
|
|
3763
3856
|
> },
|
|
3764
3857
|
*/
|
|
3765
3858
|
async asJson() {
|
|
3766
|
-
return
|
|
3859
|
+
return jsonParse(await tools.fs.readFile(filename, 'utf-8'));
|
|
3767
3860
|
},
|
|
3768
3861
|
async asText() {
|
|
3769
3862
|
return await tools.fs.readFile(filename, 'utf-8');
|
|
@@ -3897,9 +3990,12 @@ TODO: [🧊] This is how it can look in future
|
|
|
3897
3990
|
*/
|
|
3898
3991
|
|
|
3899
3992
|
/**
|
|
3900
|
-
*
|
|
3993
|
+
* Prepares tasks by adding knowledge to the prompt and ensuring all necessary parameters are included.
|
|
3901
3994
|
*
|
|
3902
|
-
* @
|
|
3995
|
+
* @param tasks Sequence of tasks that are chained together to form a pipeline
|
|
3996
|
+
* @returns A promise that resolves to the prepared tasks.
|
|
3997
|
+
*
|
|
3998
|
+
* @private internal utility of `preparePipeline`
|
|
3903
3999
|
*/
|
|
3904
4000
|
async function prepareTasks(pipeline, tools, options) {
|
|
3905
4001
|
const { maxParallelCount = DEFAULT_MAX_PARALLEL_COUNT } = options;
|
|
@@ -4021,14 +4117,14 @@ async function preparePipeline(pipeline, tools, options) {
|
|
|
4021
4117
|
// TODO: [🖌][🧠] Implement some `mapAsync` function
|
|
4022
4118
|
const preparedPersonas = new Array(personas.length);
|
|
4023
4119
|
await forEachAsync(personas, { maxParallelCount /* <- TODO: [🪂] When there are subtasks, this maximul limit can be broken */ }, async (persona, index) => {
|
|
4024
|
-
const
|
|
4120
|
+
const { modelsRequirements } = await preparePersona(persona.description, { ...tools, llm: llmToolsWithUsage }, {
|
|
4025
4121
|
rootDirname,
|
|
4026
4122
|
maxParallelCount /* <- TODO: [🪂] */,
|
|
4027
4123
|
isVerbose,
|
|
4028
4124
|
});
|
|
4029
4125
|
const preparedPersona = {
|
|
4030
4126
|
...persona,
|
|
4031
|
-
|
|
4127
|
+
modelsRequirements,
|
|
4032
4128
|
preparationIds: [/* TODO: [🧊] -> */ currentPreparation.id],
|
|
4033
4129
|
// <- TODO: [🍙] Make some standard order of json properties
|
|
4034
4130
|
};
|
|
@@ -4336,7 +4432,7 @@ function union(...sets) {
|
|
|
4336
4432
|
}
|
|
4337
4433
|
|
|
4338
4434
|
/**
|
|
4339
|
-
*
|
|
4435
|
+
* Contains configuration options for parsing and generating CSV files, such as delimiters and quoting rules.
|
|
4340
4436
|
*
|
|
4341
4437
|
* @public exported from `@promptbook/core`
|
|
4342
4438
|
*/
|
|
@@ -4345,11 +4441,29 @@ const MANDATORY_CSV_SETTINGS = Object.freeze({
|
|
|
4345
4441
|
// encoding: 'utf-8',
|
|
4346
4442
|
});
|
|
4347
4443
|
|
|
4444
|
+
/**
|
|
4445
|
+
* Converts a CSV string into an object
|
|
4446
|
+
*
|
|
4447
|
+
* Note: This is wrapper around `papaparse.parse()` with better autohealing
|
|
4448
|
+
*
|
|
4449
|
+
* @private - for now until `@promptbook/csv` is released
|
|
4450
|
+
*/
|
|
4451
|
+
function csvParse(value /* <- TODO: string_csv */, settings, schema /* <- TODO: Make CSV Schemas */) {
|
|
4452
|
+
settings = { ...settings, ...MANDATORY_CSV_SETTINGS };
|
|
4453
|
+
// Note: Autoheal invalid '\n' characters
|
|
4454
|
+
if (settings.newline && !settings.newline.includes('\r') && value.includes('\r')) {
|
|
4455
|
+
console.warn('CSV string contains carriage return characters, but in the CSV settings the `newline` setting does not include them. Autohealing the CSV string.');
|
|
4456
|
+
value = value.replace(/\r\n/g, '\n').replace(/\r/g, '\n');
|
|
4457
|
+
}
|
|
4458
|
+
const csv = parse(value, settings);
|
|
4459
|
+
return csv;
|
|
4460
|
+
}
|
|
4461
|
+
|
|
4348
4462
|
/**
|
|
4349
4463
|
* Function to check if a string is valid CSV
|
|
4350
4464
|
*
|
|
4351
4465
|
* @param value The string to check
|
|
4352
|
-
* @returns
|
|
4466
|
+
* @returns `true` if the string is a valid CSV string, false otherwise
|
|
4353
4467
|
*
|
|
4354
4468
|
* @public exported from `@promptbook/utils`
|
|
4355
4469
|
*/
|
|
@@ -4373,7 +4487,7 @@ function isValidCsvString(value) {
|
|
|
4373
4487
|
* @public exported from `@promptbook/core`
|
|
4374
4488
|
* <- TODO: [🏢] Export from package `@promptbook/csv`
|
|
4375
4489
|
*/
|
|
4376
|
-
const
|
|
4490
|
+
const CsvFormatParser = {
|
|
4377
4491
|
formatName: 'CSV',
|
|
4378
4492
|
aliases: ['SPREADSHEET', 'TABLE'],
|
|
4379
4493
|
isValid(value, settings, schema) {
|
|
@@ -4385,12 +4499,12 @@ const CsvFormatDefinition = {
|
|
|
4385
4499
|
heal(value, settings, schema) {
|
|
4386
4500
|
throw new Error('Not implemented');
|
|
4387
4501
|
},
|
|
4388
|
-
|
|
4502
|
+
subvalueParsers: [
|
|
4389
4503
|
{
|
|
4390
4504
|
subvalueName: 'ROW',
|
|
4391
|
-
async mapValues(
|
|
4392
|
-
|
|
4393
|
-
const csv =
|
|
4505
|
+
async mapValues(options) {
|
|
4506
|
+
const { value, outputParameterName, settings, mapCallback, onProgress } = options;
|
|
4507
|
+
const csv = csvParse(value, settings);
|
|
4394
4508
|
if (csv.errors.length !== 0) {
|
|
4395
4509
|
throw new CsvFormatError(spaceTrim((block) => `
|
|
4396
4510
|
CSV parsing error
|
|
@@ -4405,23 +4519,37 @@ const CsvFormatDefinition = {
|
|
|
4405
4519
|
${block(value)}
|
|
4406
4520
|
`));
|
|
4407
4521
|
}
|
|
4408
|
-
const mappedData =
|
|
4522
|
+
const mappedData = [];
|
|
4523
|
+
const length = csv.data.length;
|
|
4524
|
+
for (let index = 0; index < length; index++) {
|
|
4525
|
+
const row = csv.data[index];
|
|
4409
4526
|
if (row[outputParameterName]) {
|
|
4410
4527
|
throw new CsvFormatError(`Can not overwrite existing column "${outputParameterName}" in CSV row`);
|
|
4411
4528
|
}
|
|
4412
|
-
|
|
4529
|
+
const mappedRow = {
|
|
4413
4530
|
...row,
|
|
4414
|
-
[outputParameterName]: await mapCallback(row, index),
|
|
4531
|
+
[outputParameterName]: await mapCallback(row, index, length),
|
|
4415
4532
|
};
|
|
4416
|
-
|
|
4533
|
+
mappedData.push(mappedRow);
|
|
4534
|
+
if (onProgress) {
|
|
4535
|
+
// Note: Report the CSV with all rows mapped so far
|
|
4536
|
+
/*
|
|
4537
|
+
// TODO: [🛕] Report progress with all the rows including the pending ones
|
|
4538
|
+
const progressData = mappedData.map((row, i) =>
|
|
4539
|
+
i > index ? { ...row, [outputParameterName]: PENDING_VALUE_PLACEHOLDER } : row,
|
|
4540
|
+
);
|
|
4541
|
+
*/
|
|
4542
|
+
await onProgress(unparse(mappedData, { ...settings, ...MANDATORY_CSV_SETTINGS }));
|
|
4543
|
+
}
|
|
4544
|
+
}
|
|
4417
4545
|
return unparse(mappedData, { ...settings, ...MANDATORY_CSV_SETTINGS });
|
|
4418
4546
|
},
|
|
4419
4547
|
},
|
|
4420
4548
|
{
|
|
4421
4549
|
subvalueName: 'CELL',
|
|
4422
|
-
async mapValues(
|
|
4423
|
-
|
|
4424
|
-
const csv =
|
|
4550
|
+
async mapValues(options) {
|
|
4551
|
+
const { value, settings, mapCallback, onProgress } = options;
|
|
4552
|
+
const csv = csvParse(value, settings);
|
|
4425
4553
|
if (csv.errors.length !== 0) {
|
|
4426
4554
|
throw new CsvFormatError(spaceTrim((block) => `
|
|
4427
4555
|
CSV parsing error
|
|
@@ -4437,9 +4565,9 @@ const CsvFormatDefinition = {
|
|
|
4437
4565
|
`));
|
|
4438
4566
|
}
|
|
4439
4567
|
const mappedData = await Promise.all(csv.data.map(async (row, rowIndex) => {
|
|
4440
|
-
return /* not await */ Promise.all(Object.entries(row).map(async ([key, value], columnIndex) => {
|
|
4568
|
+
return /* not await */ Promise.all(Object.entries(row).map(async ([key, value], columnIndex, array) => {
|
|
4441
4569
|
const index = rowIndex * Object.keys(row).length + columnIndex;
|
|
4442
|
-
return /* not await */ mapCallback({ [key]: value }, index);
|
|
4570
|
+
return /* not await */ mapCallback({ [key]: value }, index, array.length);
|
|
4443
4571
|
}));
|
|
4444
4572
|
}));
|
|
4445
4573
|
return unparse(mappedData, { ...settings, ...MANDATORY_CSV_SETTINGS });
|
|
@@ -4448,10 +4576,10 @@ const CsvFormatDefinition = {
|
|
|
4448
4576
|
],
|
|
4449
4577
|
};
|
|
4450
4578
|
/**
|
|
4451
|
-
* TODO: [🍓] In `
|
|
4452
|
-
* TODO: [🍓] In `
|
|
4453
|
-
* TODO: [🍓] In `
|
|
4454
|
-
* TODO: [🍓] In `
|
|
4579
|
+
* TODO: [🍓] In `CsvFormatParser` implement simple `isValid`
|
|
4580
|
+
* TODO: [🍓] In `CsvFormatParser` implement partial `canBeValid`
|
|
4581
|
+
* TODO: [🍓] In `CsvFormatParser` implement `heal
|
|
4582
|
+
* TODO: [🍓] In `CsvFormatParser` implement `subvalueParsers`
|
|
4455
4583
|
* TODO: [🏢] Allow to expect something inside CSV objects and other formats
|
|
4456
4584
|
*/
|
|
4457
4585
|
|
|
@@ -4460,7 +4588,7 @@ const CsvFormatDefinition = {
|
|
|
4460
4588
|
*
|
|
4461
4589
|
* @private still in development [🏢]
|
|
4462
4590
|
*/
|
|
4463
|
-
const
|
|
4591
|
+
const JsonFormatParser = {
|
|
4464
4592
|
formatName: 'JSON',
|
|
4465
4593
|
mimeType: 'application/json',
|
|
4466
4594
|
isValid(value, settings, schema) {
|
|
@@ -4472,28 +4600,28 @@ const JsonFormatDefinition = {
|
|
|
4472
4600
|
heal(value, settings, schema) {
|
|
4473
4601
|
throw new Error('Not implemented');
|
|
4474
4602
|
},
|
|
4475
|
-
|
|
4603
|
+
subvalueParsers: [],
|
|
4476
4604
|
};
|
|
4477
4605
|
/**
|
|
4478
4606
|
* TODO: [🧠] Maybe propper instance of object
|
|
4479
4607
|
* TODO: [0] Make string_serialized_json
|
|
4480
4608
|
* TODO: [1] Make type for JSON Settings and Schema
|
|
4481
4609
|
* TODO: [🧠] What to use for validating JSONs - JSON Schema, ZoD, typescript types/interfaces,...?
|
|
4482
|
-
* TODO: [🍓] In `
|
|
4483
|
-
* TODO: [🍓] In `
|
|
4484
|
-
* TODO: [🍓] In `
|
|
4485
|
-
* TODO: [🍓] In `
|
|
4610
|
+
* TODO: [🍓] In `JsonFormatParser` implement simple `isValid`
|
|
4611
|
+
* TODO: [🍓] In `JsonFormatParser` implement partial `canBeValid`
|
|
4612
|
+
* TODO: [🍓] In `JsonFormatParser` implement `heal
|
|
4613
|
+
* TODO: [🍓] In `JsonFormatParser` implement `subvalueParsers`
|
|
4486
4614
|
* TODO: [🏢] Allow to expect something inside JSON objects and other formats
|
|
4487
4615
|
*/
|
|
4488
4616
|
|
|
4489
4617
|
/**
|
|
4490
4618
|
* Definition for any text - this will be always valid
|
|
4491
4619
|
*
|
|
4492
|
-
* Note: This is not useful for validation, but for splitting and mapping with `
|
|
4620
|
+
* Note: This is not useful for validation, but for splitting and mapping with `subvalueParsers`
|
|
4493
4621
|
*
|
|
4494
4622
|
* @public exported from `@promptbook/core`
|
|
4495
4623
|
*/
|
|
4496
|
-
const
|
|
4624
|
+
const TextFormatParser = {
|
|
4497
4625
|
formatName: 'TEXT',
|
|
4498
4626
|
isValid(value) {
|
|
4499
4627
|
return typeof value === 'string';
|
|
@@ -4502,19 +4630,20 @@ const TextFormatDefinition = {
|
|
|
4502
4630
|
return typeof partialValue === 'string';
|
|
4503
4631
|
},
|
|
4504
4632
|
heal() {
|
|
4505
|
-
throw new UnexpectedError('It does not make sense to call `
|
|
4633
|
+
throw new UnexpectedError('It does not make sense to call `TextFormatParser.heal`');
|
|
4506
4634
|
},
|
|
4507
|
-
|
|
4635
|
+
subvalueParsers: [
|
|
4508
4636
|
{
|
|
4509
4637
|
subvalueName: 'LINE',
|
|
4510
|
-
async mapValues(
|
|
4638
|
+
async mapValues(options) {
|
|
4639
|
+
const { value, mapCallback, onProgress } = options;
|
|
4511
4640
|
const lines = value.split('\n');
|
|
4512
|
-
const mappedLines = await Promise.all(lines.map((lineContent, lineNumber) =>
|
|
4641
|
+
const mappedLines = await Promise.all(lines.map((lineContent, lineNumber, array) =>
|
|
4513
4642
|
// TODO: [🧠] Maybe option to skip empty line
|
|
4514
4643
|
/* not await */ mapCallback({
|
|
4515
4644
|
lineContent,
|
|
4516
4645
|
// TODO: [🧠] Maybe also put here `lineNumber`
|
|
4517
|
-
}, lineNumber)));
|
|
4646
|
+
}, lineNumber, array.length)));
|
|
4518
4647
|
return mappedLines.join('\n');
|
|
4519
4648
|
},
|
|
4520
4649
|
},
|
|
@@ -4524,10 +4653,10 @@ const TextFormatDefinition = {
|
|
|
4524
4653
|
/**
|
|
4525
4654
|
* TODO: [1] Make type for XML Text and Schema
|
|
4526
4655
|
* TODO: [🧠][🤠] Here should be all words, characters, lines, paragraphs, pages available as subvalues
|
|
4527
|
-
* TODO: [🍓] In `
|
|
4528
|
-
* TODO: [🍓] In `
|
|
4529
|
-
* TODO: [🍓] In `
|
|
4530
|
-
* TODO: [🍓] In `
|
|
4656
|
+
* TODO: [🍓] In `TextFormatParser` implement simple `isValid`
|
|
4657
|
+
* TODO: [🍓] In `TextFormatParser` implement partial `canBeValid`
|
|
4658
|
+
* TODO: [🍓] In `TextFormatParser` implement `heal
|
|
4659
|
+
* TODO: [🍓] In `TextFormatParser` implement `subvalueParsers`
|
|
4531
4660
|
* TODO: [🏢] Allow to expect something inside each item of list and other formats
|
|
4532
4661
|
*/
|
|
4533
4662
|
|
|
@@ -4535,7 +4664,7 @@ const TextFormatDefinition = {
|
|
|
4535
4664
|
* Function to check if a string is valid XML
|
|
4536
4665
|
*
|
|
4537
4666
|
* @param value
|
|
4538
|
-
* @returns
|
|
4667
|
+
* @returns `true` if the string is a valid XML string, false otherwise
|
|
4539
4668
|
*
|
|
4540
4669
|
* @public exported from `@promptbook/utils`
|
|
4541
4670
|
*/
|
|
@@ -4560,7 +4689,7 @@ function isValidXmlString(value) {
|
|
|
4560
4689
|
*
|
|
4561
4690
|
* @private still in development [🏢]
|
|
4562
4691
|
*/
|
|
4563
|
-
const
|
|
4692
|
+
const XmlFormatParser = {
|
|
4564
4693
|
formatName: 'XML',
|
|
4565
4694
|
mimeType: 'application/xml',
|
|
4566
4695
|
isValid(value, settings, schema) {
|
|
@@ -4572,17 +4701,17 @@ const XmlFormatDefinition = {
|
|
|
4572
4701
|
heal(value, settings, schema) {
|
|
4573
4702
|
throw new Error('Not implemented');
|
|
4574
4703
|
},
|
|
4575
|
-
|
|
4704
|
+
subvalueParsers: [],
|
|
4576
4705
|
};
|
|
4577
4706
|
/**
|
|
4578
4707
|
* TODO: [🧠] Maybe propper instance of object
|
|
4579
4708
|
* TODO: [0] Make string_serialized_xml
|
|
4580
4709
|
* TODO: [1] Make type for XML Settings and Schema
|
|
4581
4710
|
* TODO: [🧠] What to use for validating XMLs - XSD,...
|
|
4582
|
-
* TODO: [🍓] In `
|
|
4583
|
-
* TODO: [🍓] In `
|
|
4584
|
-
* TODO: [🍓] In `
|
|
4585
|
-
* TODO: [🍓] In `
|
|
4711
|
+
* TODO: [🍓] In `XmlFormatParser` implement simple `isValid`
|
|
4712
|
+
* TODO: [🍓] In `XmlFormatParser` implement partial `canBeValid`
|
|
4713
|
+
* TODO: [🍓] In `XmlFormatParser` implement `heal
|
|
4714
|
+
* TODO: [🍓] In `XmlFormatParser` implement `subvalueParsers`
|
|
4586
4715
|
* TODO: [🏢] Allow to expect something inside XML and other formats
|
|
4587
4716
|
*/
|
|
4588
4717
|
|
|
@@ -4591,24 +4720,19 @@ const XmlFormatDefinition = {
|
|
|
4591
4720
|
*
|
|
4592
4721
|
* @private internal index of `...` <- TODO [🏢]
|
|
4593
4722
|
*/
|
|
4594
|
-
const FORMAT_DEFINITIONS = [
|
|
4595
|
-
JsonFormatDefinition,
|
|
4596
|
-
XmlFormatDefinition,
|
|
4597
|
-
TextFormatDefinition,
|
|
4598
|
-
CsvFormatDefinition,
|
|
4599
|
-
];
|
|
4723
|
+
const FORMAT_DEFINITIONS = [JsonFormatParser, XmlFormatParser, TextFormatParser, CsvFormatParser];
|
|
4600
4724
|
/**
|
|
4601
4725
|
* Note: [💞] Ignore a discrepancy between file name and entity name
|
|
4602
4726
|
*/
|
|
4603
4727
|
|
|
4604
4728
|
/**
|
|
4605
|
-
* Maps available parameters to expected parameters
|
|
4729
|
+
* Maps available parameters to expected parameters for a pipeline task.
|
|
4606
4730
|
*
|
|
4607
4731
|
* The strategy is:
|
|
4608
|
-
* 1)
|
|
4609
|
-
* 2)
|
|
4732
|
+
* 1) First, match parameters by name where both available and expected.
|
|
4733
|
+
* 2) Then, if there are unmatched expected and available parameters, map them by order.
|
|
4610
4734
|
*
|
|
4611
|
-
* @throws {PipelineExecutionError}
|
|
4735
|
+
* @throws {PipelineExecutionError} If the number of unmatched expected and available parameters does not match, or mapping is ambiguous.
|
|
4612
4736
|
* @private within the repository used in `createPipelineExecutor`
|
|
4613
4737
|
*/
|
|
4614
4738
|
function mapAvailableToExpectedParameters(options) {
|
|
@@ -4631,7 +4755,7 @@ function mapAvailableToExpectedParameters(options) {
|
|
|
4631
4755
|
else if (!availableParametersNames.has(parameterName) && expectedParameterNames.has(parameterName)) ;
|
|
4632
4756
|
}
|
|
4633
4757
|
if (expectedParameterNames.size === 0) {
|
|
4634
|
-
// Note: [👨👨👧] Now we can freeze `mappedParameters` to prevent
|
|
4758
|
+
// Note: [👨👨👧] Now we can freeze `mappedParameters` to prevent accidental modifications after mapping
|
|
4635
4759
|
Object.freeze(mappedParameters);
|
|
4636
4760
|
return mappedParameters;
|
|
4637
4761
|
}
|
|
@@ -4662,7 +4786,7 @@ function mapAvailableToExpectedParameters(options) {
|
|
|
4662
4786
|
for (let i = 0; i < expectedParameterNames.size; i++) {
|
|
4663
4787
|
mappedParameters[expectedParameterNamesArray[i]] = availableParameters[availableParametersNamesArray[i]];
|
|
4664
4788
|
}
|
|
4665
|
-
// Note: [👨👨👧] Now we can freeze `mappedParameters` to prevent
|
|
4789
|
+
// Note: [👨👨👧] Now we can freeze `mappedParameters` to prevent accidental modifications after mapping
|
|
4666
4790
|
Object.freeze(mappedParameters);
|
|
4667
4791
|
return mappedParameters;
|
|
4668
4792
|
}
|
|
@@ -4766,7 +4890,7 @@ function extractJsonBlock(markdown) {
|
|
|
4766
4890
|
}
|
|
4767
4891
|
/**
|
|
4768
4892
|
* TODO: Add some auto-healing logic + extract YAML, JSON5, TOML, etc.
|
|
4769
|
-
* TODO: [🏢] Make this logic part of `
|
|
4893
|
+
* TODO: [🏢] Make this logic part of `JsonFormatParser` or `isValidJsonString`
|
|
4770
4894
|
*/
|
|
4771
4895
|
|
|
4772
4896
|
/**
|
|
@@ -4826,10 +4950,12 @@ function templateParameters(template, parameters) {
|
|
|
4826
4950
|
throw new PipelineExecutionError('Parameter is already opened or not closed');
|
|
4827
4951
|
}
|
|
4828
4952
|
if (parameters[parameterName] === undefined) {
|
|
4953
|
+
console.log('!!! templateParameters 1', { parameterName, template, parameters });
|
|
4829
4954
|
throw new PipelineExecutionError(`Parameter \`{${parameterName}}\` is not defined`);
|
|
4830
4955
|
}
|
|
4831
4956
|
let parameterValue = parameters[parameterName];
|
|
4832
4957
|
if (parameterValue === undefined) {
|
|
4958
|
+
console.log('!!! templateParameters 2', { parameterName, template, parameters });
|
|
4833
4959
|
throw new PipelineExecutionError(`Parameter \`{${parameterName}}\` is not defined`);
|
|
4834
4960
|
}
|
|
4835
4961
|
parameterValue = valueToString(parameterValue);
|
|
@@ -4985,7 +5111,7 @@ const CountUtils = {
|
|
|
4985
5111
|
PAGES: countPages,
|
|
4986
5112
|
};
|
|
4987
5113
|
/**
|
|
4988
|
-
* TODO: [🧠][🤠] This should be probbably as part of `
|
|
5114
|
+
* TODO: [🧠][🤠] This should be probbably as part of `TextFormatParser`
|
|
4989
5115
|
* Note: [💞] Ignore a discrepancy between file name and entity name
|
|
4990
5116
|
*/
|
|
4991
5117
|
|
|
@@ -5013,13 +5139,17 @@ function checkExpectations(expectations, value) {
|
|
|
5013
5139
|
}
|
|
5014
5140
|
/**
|
|
5015
5141
|
* TODO: [💝] Unite object for expecting amount and format
|
|
5016
|
-
* TODO: [🧠][🤠] This should be part of `
|
|
5142
|
+
* TODO: [🧠][🤠] This should be part of `TextFormatParser`
|
|
5017
5143
|
* Note: [💝] and [🤠] are interconnected together
|
|
5018
5144
|
*/
|
|
5019
5145
|
|
|
5020
5146
|
/**
|
|
5021
|
-
*
|
|
5147
|
+
* Executes a pipeline task with multiple attempts, including joker and retry logic. Handles different task types
|
|
5148
|
+
* (prompt, script, dialog, etc.), applies postprocessing, checks expectations, and updates the execution report.
|
|
5149
|
+
* Throws errors if execution fails after all attempts.
|
|
5022
5150
|
*
|
|
5151
|
+
* @param options - The options for execution, including task, parameters, pipeline, and configuration.
|
|
5152
|
+
* @returns The result string of the executed task.
|
|
5023
5153
|
* @private internal utility of `createPipelineExecutor`
|
|
5024
5154
|
*/
|
|
5025
5155
|
async function executeAttempts(options) {
|
|
@@ -5241,7 +5371,7 @@ async function executeAttempts(options) {
|
|
|
5241
5371
|
if (task.format) {
|
|
5242
5372
|
if (task.format === 'JSON') {
|
|
5243
5373
|
if (!isValidJsonString($ongoingTaskResult.$resultString || '')) {
|
|
5244
|
-
// TODO: [🏢] Do more universally via `
|
|
5374
|
+
// TODO: [🏢] Do more universally via `FormatParser`
|
|
5245
5375
|
try {
|
|
5246
5376
|
$ongoingTaskResult.$resultString = extractJsonBlock($ongoingTaskResult.$resultString || '');
|
|
5247
5377
|
}
|
|
@@ -5343,12 +5473,16 @@ async function executeAttempts(options) {
|
|
|
5343
5473
|
*/
|
|
5344
5474
|
|
|
5345
5475
|
/**
|
|
5346
|
-
*
|
|
5476
|
+
* Executes a pipeline task that requires mapping or iterating over subvalues of a parameter (such as rows in a CSV).
|
|
5477
|
+
* Handles format and subformat resolution, error handling, and progress reporting.
|
|
5478
|
+
*
|
|
5479
|
+
* @param options - Options for execution, including task details and progress callback.
|
|
5480
|
+
* @returns The result of the subvalue mapping or execution attempts.
|
|
5347
5481
|
*
|
|
5348
5482
|
* @private internal utility of `createPipelineExecutor`
|
|
5349
5483
|
*/
|
|
5350
5484
|
async function executeFormatSubvalues(options) {
|
|
5351
|
-
const { task, jokerParameterNames, parameters, priority, csvSettings, pipelineIdentification } = options;
|
|
5485
|
+
const { task, jokerParameterNames, parameters, priority, csvSettings, onProgress, pipelineIdentification } = options;
|
|
5352
5486
|
if (task.foreach === undefined) {
|
|
5353
5487
|
return /* not await */ executeAttempts(options);
|
|
5354
5488
|
}
|
|
@@ -5379,16 +5513,16 @@ async function executeFormatSubvalues(options) {
|
|
|
5379
5513
|
${block(pipelineIdentification)}
|
|
5380
5514
|
`));
|
|
5381
5515
|
}
|
|
5382
|
-
const
|
|
5383
|
-
if (
|
|
5516
|
+
const subvalueParser = formatDefinition.subvalueParsers.find((subvalueParser) => [subvalueParser.subvalueName, ...(subvalueParser.aliases || [])].includes(task.foreach.subformatName));
|
|
5517
|
+
if (subvalueParser === undefined) {
|
|
5384
5518
|
throw new UnexpectedError(
|
|
5385
5519
|
// <- TODO: [🧠][🧐] Should be formats fixed per promptbook version or behave as plugins (=> change UnexpectedError)
|
|
5386
5520
|
spaceTrim((block) => `
|
|
5387
5521
|
Unsupported subformat name "${task.foreach.subformatName}" for format "${task.foreach.formatName}"
|
|
5388
5522
|
|
|
5389
5523
|
Available subformat names for format "${formatDefinition.formatName}":
|
|
5390
|
-
${block(formatDefinition.
|
|
5391
|
-
.map((
|
|
5524
|
+
${block(formatDefinition.subvalueParsers
|
|
5525
|
+
.map((subvalueParser) => subvalueParser.subvalueName)
|
|
5392
5526
|
.map((subvalueName) => `- ${subvalueName}`)
|
|
5393
5527
|
.join('\n'))}
|
|
5394
5528
|
|
|
@@ -5402,53 +5536,83 @@ async function executeFormatSubvalues(options) {
|
|
|
5402
5536
|
formatSettings = csvSettings;
|
|
5403
5537
|
// <- TODO: [🤹♂️] More universal, make simmilar pattern for other formats for example \n vs \r\n in text
|
|
5404
5538
|
}
|
|
5405
|
-
const resultString = await
|
|
5406
|
-
|
|
5407
|
-
|
|
5408
|
-
|
|
5409
|
-
|
|
5410
|
-
|
|
5411
|
-
|
|
5412
|
-
|
|
5413
|
-
|
|
5414
|
-
|
|
5415
|
-
|
|
5416
|
-
|
|
5417
|
-
|
|
5539
|
+
const resultString = await subvalueParser.mapValues({
|
|
5540
|
+
value: parameterValue,
|
|
5541
|
+
outputParameterName: task.foreach.outputSubparameterName,
|
|
5542
|
+
settings: formatSettings,
|
|
5543
|
+
onProgress(partialResultString) {
|
|
5544
|
+
return onProgress(Object.freeze({
|
|
5545
|
+
[task.resultingParameterName]: partialResultString,
|
|
5546
|
+
}));
|
|
5547
|
+
},
|
|
5548
|
+
async mapCallback(subparameters, index, length) {
|
|
5549
|
+
let mappedParameters;
|
|
5550
|
+
try {
|
|
5551
|
+
mappedParameters = mapAvailableToExpectedParameters({
|
|
5552
|
+
expectedParameters: Object.fromEntries(task.foreach.inputSubparameterNames.map((subparameterName) => [subparameterName, null])),
|
|
5553
|
+
availableParameters: subparameters,
|
|
5554
|
+
});
|
|
5418
5555
|
}
|
|
5419
|
-
|
|
5420
|
-
|
|
5556
|
+
catch (error) {
|
|
5557
|
+
if (!(error instanceof PipelineExecutionError)) {
|
|
5558
|
+
throw error;
|
|
5559
|
+
}
|
|
5560
|
+
const highLevelError = new PipelineExecutionError(spaceTrim((block) => `
|
|
5561
|
+
${error.message}
|
|
5421
5562
|
|
|
5422
|
-
|
|
5423
|
-
|
|
5563
|
+
This is error in FOREACH command when mapping ${formatDefinition.formatName} ${subvalueParser.subvalueName} data (${index + 1}/${length})
|
|
5564
|
+
You have probbably passed wrong data to pipeline or wrong data was generated which are processed by FOREACH command
|
|
5424
5565
|
|
|
5425
|
-
|
|
5426
|
-
|
|
5427
|
-
|
|
5428
|
-
|
|
5429
|
-
|
|
5430
|
-
|
|
5431
|
-
|
|
5432
|
-
|
|
5433
|
-
|
|
5434
|
-
|
|
5435
|
-
|
|
5436
|
-
|
|
5437
|
-
|
|
5438
|
-
|
|
5439
|
-
|
|
5440
|
-
|
|
5441
|
-
|
|
5442
|
-
|
|
5443
|
-
|
|
5444
|
-
|
|
5566
|
+
${block(pipelineIdentification)}
|
|
5567
|
+
`));
|
|
5568
|
+
if (length > BIG_DATASET_TRESHOLD) {
|
|
5569
|
+
console.error(highLevelError);
|
|
5570
|
+
return FAILED_VALUE_PLACEHOLDER;
|
|
5571
|
+
}
|
|
5572
|
+
throw highLevelError;
|
|
5573
|
+
}
|
|
5574
|
+
const allSubparameters = {
|
|
5575
|
+
...parameters,
|
|
5576
|
+
...mappedParameters,
|
|
5577
|
+
};
|
|
5578
|
+
Object.freeze(allSubparameters);
|
|
5579
|
+
try {
|
|
5580
|
+
const subresultString = await executeAttempts({
|
|
5581
|
+
...options,
|
|
5582
|
+
priority: priority + index,
|
|
5583
|
+
parameters: allSubparameters,
|
|
5584
|
+
pipelineIdentification: spaceTrim((block) => `
|
|
5585
|
+
${block(pipelineIdentification)}
|
|
5586
|
+
Subparameter index: ${index}
|
|
5587
|
+
`),
|
|
5588
|
+
});
|
|
5589
|
+
return subresultString;
|
|
5590
|
+
}
|
|
5591
|
+
catch (error) {
|
|
5592
|
+
if (length > BIG_DATASET_TRESHOLD) {
|
|
5593
|
+
console.error(spaceTrim((block) => `
|
|
5594
|
+
${error.message}
|
|
5595
|
+
|
|
5596
|
+
This is error in FOREACH command when processing ${formatDefinition.formatName} ${subvalueParser.subvalueName} data (${index + 1}/${length})
|
|
5597
|
+
|
|
5598
|
+
${block(pipelineIdentification)}
|
|
5599
|
+
`));
|
|
5600
|
+
return FAILED_VALUE_PLACEHOLDER;
|
|
5601
|
+
}
|
|
5602
|
+
throw error;
|
|
5603
|
+
}
|
|
5604
|
+
},
|
|
5445
5605
|
});
|
|
5446
5606
|
return resultString;
|
|
5447
5607
|
}
|
|
5448
5608
|
|
|
5449
5609
|
/**
|
|
5450
|
-
*
|
|
5610
|
+
* Returns the context for a given task, typically used to provide additional information or variables
|
|
5611
|
+
* required for the execution of the task within a pipeline. The context is returned as a string value
|
|
5612
|
+
* that may include markdown formatting.
|
|
5451
5613
|
*
|
|
5614
|
+
* @param task - The task for which the context is being generated. This should be a deeply immutable TaskJson object.
|
|
5615
|
+
* @returns The context as a string, formatted as markdown and parameter value.
|
|
5452
5616
|
* @private internal utility of `createPipelineExecutor`
|
|
5453
5617
|
*/
|
|
5454
5618
|
async function getContextForTask(task) {
|
|
@@ -5456,7 +5620,7 @@ async function getContextForTask(task) {
|
|
|
5456
5620
|
}
|
|
5457
5621
|
|
|
5458
5622
|
/**
|
|
5459
|
-
*
|
|
5623
|
+
* Retrieves example values or templates for a given task, used to guide or validate pipeline execution.
|
|
5460
5624
|
*
|
|
5461
5625
|
* @private internal utility of `createPipelineExecutor`
|
|
5462
5626
|
*/
|
|
@@ -5465,28 +5629,130 @@ async function getExamplesForTask(task) {
|
|
|
5465
5629
|
}
|
|
5466
5630
|
|
|
5467
5631
|
/**
|
|
5468
|
-
*
|
|
5632
|
+
* Computes the cosine similarity between two embedding vectors
|
|
5633
|
+
*
|
|
5634
|
+
* Note: This is helping function for RAG (retrieval-augmented generation)
|
|
5635
|
+
*
|
|
5636
|
+
* @param embeddingVector1
|
|
5637
|
+
* @param embeddingVector2
|
|
5638
|
+
* @returns Cosine similarity between the two vectors
|
|
5639
|
+
*
|
|
5640
|
+
* @public exported from `@promptbook/core`
|
|
5641
|
+
*/
|
|
5642
|
+
function computeCosineSimilarity(embeddingVector1, embeddingVector2) {
|
|
5643
|
+
if (embeddingVector1.length !== embeddingVector2.length) {
|
|
5644
|
+
throw new TypeError('Embedding vectors must have the same length');
|
|
5645
|
+
}
|
|
5646
|
+
const dotProduct = embeddingVector1.reduce((sum, value, index) => sum + value * embeddingVector2[index], 0);
|
|
5647
|
+
const magnitude1 = Math.sqrt(embeddingVector1.reduce((sum, value) => sum + value * value, 0));
|
|
5648
|
+
const magnitude2 = Math.sqrt(embeddingVector2.reduce((sum, value) => sum + value * value, 0));
|
|
5649
|
+
return 1 - dotProduct / (magnitude1 * magnitude2);
|
|
5650
|
+
}
|
|
5651
|
+
|
|
5652
|
+
/**
|
|
5653
|
+
*
|
|
5654
|
+
* @param knowledgePieces
|
|
5655
|
+
* @returns
|
|
5469
5656
|
*
|
|
5470
5657
|
* @private internal utility of `createPipelineExecutor`
|
|
5471
5658
|
*/
|
|
5472
|
-
|
|
5473
|
-
|
|
5474
|
-
|
|
5475
|
-
|
|
5659
|
+
function knowledgePiecesToString(knowledgePieces) {
|
|
5660
|
+
return knowledgePieces
|
|
5661
|
+
.map((knowledgePiece) => {
|
|
5662
|
+
const { content } = knowledgePiece;
|
|
5663
|
+
return `- ${content}`;
|
|
5664
|
+
})
|
|
5665
|
+
.join('\n');
|
|
5666
|
+
// <- TODO: [🧠] Some smarter aggregation of knowledge pieces, single-line vs multi-line vs mixed
|
|
5476
5667
|
}
|
|
5477
5668
|
|
|
5478
5669
|
/**
|
|
5479
|
-
*
|
|
5670
|
+
* Retrieves the most relevant knowledge pieces for a given task using embedding-based similarity search.
|
|
5671
|
+
* This is where retrieval-augmented generation (RAG) is performed to enhance the task with external knowledge.
|
|
5480
5672
|
*
|
|
5481
5673
|
* @private internal utility of `createPipelineExecutor`
|
|
5482
5674
|
*/
|
|
5483
|
-
async function
|
|
5484
|
-
const { preparedPipeline, task,
|
|
5485
|
-
const
|
|
5486
|
-
const
|
|
5487
|
-
|
|
5488
|
-
|
|
5489
|
-
|
|
5675
|
+
async function getKnowledgeForTask(options) {
|
|
5676
|
+
const { tools, preparedPipeline, task, parameters } = options;
|
|
5677
|
+
const firstKnowlegePiece = preparedPipeline.knowledgePieces[0];
|
|
5678
|
+
const firstKnowlegeIndex = firstKnowlegePiece === null || firstKnowlegePiece === void 0 ? void 0 : firstKnowlegePiece.index[0];
|
|
5679
|
+
// <- TODO: Do not use just first knowledge piece and first index to determine embedding model, use also keyword search
|
|
5680
|
+
if (firstKnowlegePiece === undefined || firstKnowlegeIndex === undefined) {
|
|
5681
|
+
return ''; // <- Note: Np knowledge present, return empty string
|
|
5682
|
+
}
|
|
5683
|
+
try {
|
|
5684
|
+
// TODO: [🚐] Make arrayable LLMs -> single LLM DRY
|
|
5685
|
+
const _llms = arrayableToArray(tools.llm);
|
|
5686
|
+
const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
|
|
5687
|
+
const taskEmbeddingPrompt = {
|
|
5688
|
+
title: 'Knowledge Search',
|
|
5689
|
+
modelRequirements: {
|
|
5690
|
+
modelVariant: 'EMBEDDING',
|
|
5691
|
+
modelName: firstKnowlegeIndex.modelName,
|
|
5692
|
+
},
|
|
5693
|
+
content: task.content,
|
|
5694
|
+
parameters,
|
|
5695
|
+
};
|
|
5696
|
+
const taskEmbeddingResult = await llmTools.callEmbeddingModel(taskEmbeddingPrompt);
|
|
5697
|
+
const knowledgePiecesWithRelevance = preparedPipeline.knowledgePieces.map((knowledgePiece) => {
|
|
5698
|
+
const { index } = knowledgePiece;
|
|
5699
|
+
const knowledgePieceIndex = index.find((i) => i.modelName === firstKnowlegeIndex.modelName);
|
|
5700
|
+
// <- TODO: Do not use just first knowledge piece and first index to determine embedding model
|
|
5701
|
+
if (knowledgePieceIndex === undefined) {
|
|
5702
|
+
return {
|
|
5703
|
+
content: knowledgePiece.content,
|
|
5704
|
+
relevance: 0,
|
|
5705
|
+
};
|
|
5706
|
+
}
|
|
5707
|
+
const relevance = computeCosineSimilarity(knowledgePieceIndex.position, taskEmbeddingResult.content);
|
|
5708
|
+
return {
|
|
5709
|
+
content: knowledgePiece.content,
|
|
5710
|
+
relevance,
|
|
5711
|
+
};
|
|
5712
|
+
});
|
|
5713
|
+
const knowledgePiecesSorted = knowledgePiecesWithRelevance.sort((a, b) => a.relevance - b.relevance);
|
|
5714
|
+
const knowledgePiecesLimited = knowledgePiecesSorted.slice(0, 5);
|
|
5715
|
+
console.log('!!! Embedding', {
|
|
5716
|
+
task,
|
|
5717
|
+
taskEmbeddingPrompt,
|
|
5718
|
+
taskEmbeddingResult,
|
|
5719
|
+
firstKnowlegePiece,
|
|
5720
|
+
firstKnowlegeIndex,
|
|
5721
|
+
knowledgePiecesWithRelevance,
|
|
5722
|
+
knowledgePiecesSorted,
|
|
5723
|
+
knowledgePiecesLimited,
|
|
5724
|
+
});
|
|
5725
|
+
return knowledgePiecesToString(knowledgePiecesLimited);
|
|
5726
|
+
}
|
|
5727
|
+
catch (error) {
|
|
5728
|
+
assertsError(error);
|
|
5729
|
+
console.error('Error in `getKnowledgeForTask`', error);
|
|
5730
|
+
// Note: If the LLM fails, just return all knowledge pieces
|
|
5731
|
+
return knowledgePiecesToString(preparedPipeline.knowledgePieces);
|
|
5732
|
+
}
|
|
5733
|
+
}
|
|
5734
|
+
/**
|
|
5735
|
+
* TODO: !!!! Verify if this is working
|
|
5736
|
+
* TODO: [♨] Implement Better - use keyword search
|
|
5737
|
+
* TODO: [♨] Examples of values
|
|
5738
|
+
*/
|
|
5739
|
+
|
|
5740
|
+
/**
|
|
5741
|
+
* Retrieves all reserved parameters for a given pipeline task, including context, knowledge, examples, and metadata.
|
|
5742
|
+
* Ensures all reserved parameters are defined and throws if any are missing.
|
|
5743
|
+
*
|
|
5744
|
+
* @param options - Options including tools, pipeline, task, and context.
|
|
5745
|
+
* @returns An object containing all reserved parameters for the task.
|
|
5746
|
+
*
|
|
5747
|
+
* @private internal utility of `createPipelineExecutor`
|
|
5748
|
+
*/
|
|
5749
|
+
async function getReservedParametersForTask(options) {
|
|
5750
|
+
const { tools, preparedPipeline, task, parameters, pipelineIdentification } = options;
|
|
5751
|
+
const context = await getContextForTask(); // <- [🏍]
|
|
5752
|
+
const knowledge = await getKnowledgeForTask({ tools, preparedPipeline, task, parameters });
|
|
5753
|
+
const examples = await getExamplesForTask();
|
|
5754
|
+
const currentDate = new Date().toISOString(); // <- TODO: [🧠][💩] Better
|
|
5755
|
+
const modelName = RESERVED_PARAMETER_MISSING_VALUE;
|
|
5490
5756
|
const reservedParameters = {
|
|
5491
5757
|
content: RESERVED_PARAMETER_RESTRICTED,
|
|
5492
5758
|
context,
|
|
@@ -5509,23 +5775,21 @@ async function getReservedParametersForTask(options) {
|
|
|
5509
5775
|
}
|
|
5510
5776
|
|
|
5511
5777
|
/**
|
|
5512
|
-
*
|
|
5778
|
+
* Executes a single task within a pipeline, handling parameter validation, error checking, and progress reporting.
|
|
5779
|
+
*
|
|
5780
|
+
* @param options - Options for execution, including the task, pipeline, parameters, and callbacks.
|
|
5781
|
+
* @returns The output parameters produced by the task.
|
|
5513
5782
|
*
|
|
5514
5783
|
* @private internal utility of `createPipelineExecutor`
|
|
5515
5784
|
*/
|
|
5516
5785
|
async function executeTask(options) {
|
|
5517
5786
|
const { currentTask, preparedPipeline, parametersToPass, tools, onProgress, $executionReport, pipelineIdentification, maxExecutionAttempts, maxParallelCount, csvSettings, isVerbose, rootDirname, cacheDirname, intermediateFilesStrategy, isAutoInstalled, isNotPreparedWarningSupressed, } = options;
|
|
5518
5787
|
const priority = preparedPipeline.tasks.length - preparedPipeline.tasks.indexOf(currentTask);
|
|
5519
|
-
await onProgress({
|
|
5520
|
-
outputParameters: {
|
|
5521
|
-
[currentTask.resultingParameterName]: '', // <- TODO: [🧠] What is the best value here?
|
|
5522
|
-
},
|
|
5523
|
-
});
|
|
5524
5788
|
// Note: Check consistency of used and dependent parameters which was also done in `validatePipeline`, but it’s good to doublecheck
|
|
5525
5789
|
const usedParameterNames = extractParameterNamesFromTask(currentTask);
|
|
5526
5790
|
const dependentParameterNames = new Set(currentTask.dependentParameterNames);
|
|
5527
5791
|
// TODO: [👩🏾🤝👩🏻] Use here `mapAvailableToExpectedParameters`
|
|
5528
|
-
if (union(difference(usedParameterNames, dependentParameterNames), difference(dependentParameterNames, usedParameterNames)).size !== 0) {
|
|
5792
|
+
if (difference(union(difference(usedParameterNames, dependentParameterNames), difference(dependentParameterNames, usedParameterNames)), new Set(RESERVED_PARAMETER_NAMES)).size !== 0) {
|
|
5529
5793
|
throw new UnexpectedError(spaceTrim$1((block) => `
|
|
5530
5794
|
Dependent parameters are not consistent with used parameters:
|
|
5531
5795
|
|
|
@@ -5545,9 +5809,11 @@ async function executeTask(options) {
|
|
|
5545
5809
|
}
|
|
5546
5810
|
const definedParameters = Object.freeze({
|
|
5547
5811
|
...(await getReservedParametersForTask({
|
|
5812
|
+
tools,
|
|
5548
5813
|
preparedPipeline,
|
|
5549
5814
|
task: currentTask,
|
|
5550
5815
|
pipelineIdentification,
|
|
5816
|
+
parameters: parametersToPass,
|
|
5551
5817
|
})),
|
|
5552
5818
|
...parametersToPass,
|
|
5553
5819
|
});
|
|
@@ -5593,6 +5859,7 @@ async function executeTask(options) {
|
|
|
5593
5859
|
preparedPipeline,
|
|
5594
5860
|
tools,
|
|
5595
5861
|
$executionReport,
|
|
5862
|
+
onProgress,
|
|
5596
5863
|
pipelineIdentification,
|
|
5597
5864
|
maxExecutionAttempts,
|
|
5598
5865
|
maxParallelCount,
|
|
@@ -5620,7 +5887,8 @@ async function executeTask(options) {
|
|
|
5620
5887
|
*/
|
|
5621
5888
|
|
|
5622
5889
|
/**
|
|
5623
|
-
*
|
|
5890
|
+
* Filters and returns only the output parameters from the provided pipeline execution options.
|
|
5891
|
+
* Adds warnings for any expected output parameters that are missing.
|
|
5624
5892
|
*
|
|
5625
5893
|
* @private internal utility of `createPipelineExecutor`
|
|
5626
5894
|
*/
|
|
@@ -5645,9 +5913,12 @@ function filterJustOutputParameters(options) {
|
|
|
5645
5913
|
}
|
|
5646
5914
|
|
|
5647
5915
|
/**
|
|
5648
|
-
*
|
|
5916
|
+
* Executes an entire pipeline, resolving tasks in dependency order, handling errors, and reporting progress.
|
|
5917
|
+
*
|
|
5918
|
+
* Note: This is not a `PipelineExecutor` (which is bound to a single pipeline), but a utility function used by `createPipelineExecutor` to create a `PipelineExecutor`.
|
|
5649
5919
|
*
|
|
5650
|
-
*
|
|
5920
|
+
* @param options - Options for execution, including input parameters, pipeline, and callbacks.
|
|
5921
|
+
* @returns The result of the pipeline execution, including output parameters, errors, and usage statistics.
|
|
5651
5922
|
*
|
|
5652
5923
|
* @private internal utility of `createPipelineExecutor`
|
|
5653
5924
|
*/
|
|
@@ -5970,6 +6241,22 @@ function createPipelineExecutor(options) {
|
|
|
5970
6241
|
cacheDirname,
|
|
5971
6242
|
intermediateFilesStrategy,
|
|
5972
6243
|
isAutoInstalled,
|
|
6244
|
+
}).catch((error) => {
|
|
6245
|
+
assertsError(error);
|
|
6246
|
+
return exportJson({
|
|
6247
|
+
name: 'pipelineExecutorResult',
|
|
6248
|
+
message: `Unuccessful PipelineExecutorResult, last catch`,
|
|
6249
|
+
order: [],
|
|
6250
|
+
value: {
|
|
6251
|
+
isSuccessful: false,
|
|
6252
|
+
errors: [serializeError(error)],
|
|
6253
|
+
warnings: [],
|
|
6254
|
+
usage: UNCERTAIN_USAGE,
|
|
6255
|
+
executionReport: null,
|
|
6256
|
+
outputParameters: {},
|
|
6257
|
+
preparedPipeline,
|
|
6258
|
+
},
|
|
6259
|
+
});
|
|
5973
6260
|
});
|
|
5974
6261
|
};
|
|
5975
6262
|
const pipelineExecutor = (inputParameters) => createTask({
|
|
@@ -5985,10 +6272,10 @@ function createPipelineExecutor(options) {
|
|
|
5985
6272
|
}
|
|
5986
6273
|
|
|
5987
6274
|
/**
|
|
5988
|
-
*
|
|
6275
|
+
* Register for LLM tools.
|
|
5989
6276
|
*
|
|
5990
6277
|
* Note: `$` is used to indicate that this interacts with the global scope
|
|
5991
|
-
* @singleton Only one instance of each register is created per build, but
|
|
6278
|
+
* @singleton Only one instance of each register is created per build, but there can be more instances across different builds or environments.
|
|
5992
6279
|
* @public exported from `@promptbook/core`
|
|
5993
6280
|
*/
|
|
5994
6281
|
const $llmToolsRegister = new $Register('llm_execution_tools_constructors');
|
|
@@ -5997,10 +6284,10 @@ const $llmToolsRegister = new $Register('llm_execution_tools_constructors');
|
|
|
5997
6284
|
*/
|
|
5998
6285
|
|
|
5999
6286
|
/**
|
|
6000
|
-
*
|
|
6287
|
+
* Register for LLM tools metadata.
|
|
6001
6288
|
*
|
|
6002
6289
|
* Note: `$` is used to indicate that this interacts with the global scope
|
|
6003
|
-
* @singleton Only one instance of each register is created per build, but
|
|
6290
|
+
* @singleton Only one instance of each register is created per build, but there can be more instances across different builds or environments.
|
|
6004
6291
|
* @public exported from `@promptbook/core`
|
|
6005
6292
|
*/
|
|
6006
6293
|
const $llmToolsMetadataRegister = new $Register('llm_tools_metadata');
|
|
@@ -6133,11 +6420,16 @@ function $registeredLlmToolsMessage() {
|
|
|
6133
6420
|
*/
|
|
6134
6421
|
|
|
6135
6422
|
/**
|
|
6136
|
-
*
|
|
6423
|
+
* Creates LLM execution tools from provided configuration objects
|
|
6424
|
+
*
|
|
6425
|
+
* Instantiates and configures LLM tool instances for each configuration entry,
|
|
6426
|
+
* combining them into a unified interface via MultipleLlmExecutionTools.
|
|
6137
6427
|
*
|
|
6138
6428
|
* Note: This function is not cached, every call creates new instance of `MultipleLlmExecutionTools`
|
|
6139
6429
|
*
|
|
6140
|
-
* @
|
|
6430
|
+
* @param configuration Array of LLM tool configurations to instantiate
|
|
6431
|
+
* @param options Additional options for configuring the LLM tools
|
|
6432
|
+
* @returns A unified interface combining all successfully instantiated LLM tools
|
|
6141
6433
|
* @public exported from `@promptbook/core`
|
|
6142
6434
|
*/
|
|
6143
6435
|
function createLlmToolsFromConfiguration(configuration, options = {}) {
|
|
@@ -6176,7 +6468,11 @@ function createLlmToolsFromConfiguration(configuration, options = {}) {
|
|
|
6176
6468
|
/**
|
|
6177
6469
|
* TODO: [🎌] Together with `createLlmToolsFromConfiguration` + 'EXECUTION_TOOLS_CLASSES' gets to `@promptbook/core` ALL model providers, make this more efficient
|
|
6178
6470
|
* TODO: [🧠][🎌] Dynamically install required providers
|
|
6179
|
-
* TODO:
|
|
6471
|
+
* TODO: We should implement an interactive configuration wizard that would:
|
|
6472
|
+
* 1. Detect which LLM providers are available in the environment
|
|
6473
|
+
* 2. Guide users through required configuration settings for each provider
|
|
6474
|
+
* 3. Allow testing connections before completing setup
|
|
6475
|
+
* 4. Generate appropriate configuration code for application integration
|
|
6180
6476
|
* TODO: [🧠][🍛] Which name is better `createLlmToolsFromConfig` or `createLlmToolsFromConfiguration`?
|
|
6181
6477
|
* TODO: [🧠] Is there some meaningfull way how to test this util
|
|
6182
6478
|
* TODO: This should be maybe not under `_common` but under `utils`
|
|
@@ -6184,11 +6480,9 @@ function createLlmToolsFromConfiguration(configuration, options = {}) {
|
|
|
6184
6480
|
*/
|
|
6185
6481
|
|
|
6186
6482
|
/**
|
|
6187
|
-
*
|
|
6188
|
-
*
|
|
6189
|
-
*
|
|
6190
|
-
* 2) @@@
|
|
6191
|
-
*
|
|
6483
|
+
* Provides a collection of scrapers optimized for Node.js environment.
|
|
6484
|
+
* 1) `provideScrapersForNode` use as default
|
|
6485
|
+
* 2) `provideScrapersForBrowser` use in limited browser environment *
|
|
6192
6486
|
* @public exported from `@promptbook/node`
|
|
6193
6487
|
*/
|
|
6194
6488
|
async function $provideScrapersForNode(tools, options) {
|
|
@@ -6213,11 +6507,11 @@ async function $provideScrapersForNode(tools, options) {
|
|
|
6213
6507
|
*/
|
|
6214
6508
|
|
|
6215
6509
|
/**
|
|
6216
|
-
*
|
|
6510
|
+
* Normalizes a given text to camelCase format.
|
|
6217
6511
|
*
|
|
6218
|
-
* @param text
|
|
6219
|
-
* @param _isFirstLetterCapital
|
|
6220
|
-
* @returns
|
|
6512
|
+
* @param text The text to be normalized.
|
|
6513
|
+
* @param _isFirstLetterCapital Whether the first letter should be capitalized.
|
|
6514
|
+
* @returns The camelCase formatted string.
|
|
6221
6515
|
* @example 'helloWorld'
|
|
6222
6516
|
* @example 'iLovePromptbook'
|
|
6223
6517
|
* @public exported from `@promptbook/utils`
|
|
@@ -6347,11 +6641,11 @@ function parseKeywordsFromString(input) {
|
|
|
6347
6641
|
}
|
|
6348
6642
|
|
|
6349
6643
|
/**
|
|
6350
|
-
*
|
|
6644
|
+
* Converts a name string into a URI-compatible format.
|
|
6351
6645
|
*
|
|
6352
|
-
* @param name
|
|
6353
|
-
* @returns
|
|
6354
|
-
* @example
|
|
6646
|
+
* @param name The string to be converted to a URI-compatible format.
|
|
6647
|
+
* @returns A URI-compatible string derived from the input name.
|
|
6648
|
+
* @example 'Hello World' -> 'hello-world'
|
|
6355
6649
|
* @public exported from `@promptbook/utils`
|
|
6356
6650
|
*/
|
|
6357
6651
|
function nameToUriPart(name) {
|
|
@@ -6365,11 +6659,11 @@ function nameToUriPart(name) {
|
|
|
6365
6659
|
}
|
|
6366
6660
|
|
|
6367
6661
|
/**
|
|
6368
|
-
*
|
|
6662
|
+
* Converts a given name into URI-compatible parts.
|
|
6369
6663
|
*
|
|
6370
|
-
* @param name
|
|
6371
|
-
* @returns
|
|
6372
|
-
* @example
|
|
6664
|
+
* @param name The name to be converted into URI parts.
|
|
6665
|
+
* @returns An array of URI-compatible parts derived from the name.
|
|
6666
|
+
* @example 'Example Name' -> ['example', 'name']
|
|
6373
6667
|
* @public exported from `@promptbook/utils`
|
|
6374
6668
|
*/
|
|
6375
6669
|
function nameToUriParts(name) {
|
|
@@ -6827,15 +7121,15 @@ async function $provideScriptingForNode(options) {
|
|
|
6827
7121
|
* Note: [🟢] Code in this file should never be never released in packages that could be imported into browser environment
|
|
6828
7122
|
*/
|
|
6829
7123
|
|
|
6830
|
-
// TODO:
|
|
6831
|
-
// TODO:
|
|
7124
|
+
// TODO: [🥺] List running services from REMOTE_SERVER_URLS
|
|
7125
|
+
// TODO: [🥺] Import directly from YML
|
|
6832
7126
|
/**
|
|
6833
|
-
* @private
|
|
7127
|
+
* @private [🥺] Decide how to expose this
|
|
6834
7128
|
*/
|
|
6835
7129
|
const openapiJson = {
|
|
6836
7130
|
openapi: '3.0.0',
|
|
6837
7131
|
info: {
|
|
6838
|
-
title: 'Promptbook Remote Server API (
|
|
7132
|
+
title: 'Promptbook Remote Server API ([🥺] From YML)',
|
|
6839
7133
|
version: '1.0.0',
|
|
6840
7134
|
description: 'API documentation for the Promptbook Remote Server',
|
|
6841
7135
|
},
|
|
@@ -6847,6 +7141,13 @@ const openapiJson = {
|
|
|
6847
7141
|
responses: {
|
|
6848
7142
|
'200': {
|
|
6849
7143
|
description: 'Server details in markdown format.',
|
|
7144
|
+
content: {
|
|
7145
|
+
'text/markdown': {
|
|
7146
|
+
schema: {
|
|
7147
|
+
type: 'string',
|
|
7148
|
+
},
|
|
7149
|
+
},
|
|
7150
|
+
},
|
|
6850
7151
|
},
|
|
6851
7152
|
},
|
|
6852
7153
|
},
|
|
@@ -6877,13 +7178,22 @@ const openapiJson = {
|
|
|
6877
7178
|
},
|
|
6878
7179
|
},
|
|
6879
7180
|
responses: {
|
|
6880
|
-
'
|
|
7181
|
+
'201': {
|
|
6881
7182
|
description: 'Successful login',
|
|
6882
7183
|
content: {
|
|
6883
7184
|
'application/json': {
|
|
6884
7185
|
schema: {
|
|
6885
7186
|
type: 'object',
|
|
6886
7187
|
properties: {
|
|
7188
|
+
isSuccess: {
|
|
7189
|
+
type: 'boolean',
|
|
7190
|
+
},
|
|
7191
|
+
message: {
|
|
7192
|
+
type: 'string',
|
|
7193
|
+
},
|
|
7194
|
+
error: {
|
|
7195
|
+
type: 'object',
|
|
7196
|
+
},
|
|
6887
7197
|
identification: {
|
|
6888
7198
|
type: 'object',
|
|
6889
7199
|
},
|
|
@@ -6892,6 +7202,43 @@ const openapiJson = {
|
|
|
6892
7202
|
},
|
|
6893
7203
|
},
|
|
6894
7204
|
},
|
|
7205
|
+
'400': {
|
|
7206
|
+
description: 'Bad request or login failed',
|
|
7207
|
+
content: {
|
|
7208
|
+
'application/json': {
|
|
7209
|
+
schema: {
|
|
7210
|
+
type: 'object',
|
|
7211
|
+
properties: {
|
|
7212
|
+
error: {
|
|
7213
|
+
type: 'object',
|
|
7214
|
+
},
|
|
7215
|
+
},
|
|
7216
|
+
},
|
|
7217
|
+
},
|
|
7218
|
+
},
|
|
7219
|
+
},
|
|
7220
|
+
'401': {
|
|
7221
|
+
description: 'Authentication error',
|
|
7222
|
+
content: {
|
|
7223
|
+
'application/json': {
|
|
7224
|
+
schema: {
|
|
7225
|
+
type: 'object',
|
|
7226
|
+
properties: {
|
|
7227
|
+
isSuccess: {
|
|
7228
|
+
type: 'boolean',
|
|
7229
|
+
enum: [false],
|
|
7230
|
+
},
|
|
7231
|
+
message: {
|
|
7232
|
+
type: 'string',
|
|
7233
|
+
},
|
|
7234
|
+
error: {
|
|
7235
|
+
type: 'object',
|
|
7236
|
+
},
|
|
7237
|
+
},
|
|
7238
|
+
},
|
|
7239
|
+
},
|
|
7240
|
+
},
|
|
7241
|
+
},
|
|
6895
7242
|
},
|
|
6896
7243
|
},
|
|
6897
7244
|
},
|
|
@@ -6913,6 +7260,16 @@ const openapiJson = {
|
|
|
6913
7260
|
},
|
|
6914
7261
|
},
|
|
6915
7262
|
},
|
|
7263
|
+
'500': {
|
|
7264
|
+
description: 'No collection available',
|
|
7265
|
+
content: {
|
|
7266
|
+
'text/plain': {
|
|
7267
|
+
schema: {
|
|
7268
|
+
type: 'string',
|
|
7269
|
+
},
|
|
7270
|
+
},
|
|
7271
|
+
},
|
|
7272
|
+
},
|
|
6916
7273
|
},
|
|
6917
7274
|
},
|
|
6918
7275
|
},
|
|
@@ -6944,6 +7301,28 @@ const openapiJson = {
|
|
|
6944
7301
|
},
|
|
6945
7302
|
'404': {
|
|
6946
7303
|
description: 'Book not found.',
|
|
7304
|
+
content: {
|
|
7305
|
+
'application/json': {
|
|
7306
|
+
schema: {
|
|
7307
|
+
type: 'object',
|
|
7308
|
+
properties: {
|
|
7309
|
+
error: {
|
|
7310
|
+
type: 'object',
|
|
7311
|
+
},
|
|
7312
|
+
},
|
|
7313
|
+
},
|
|
7314
|
+
},
|
|
7315
|
+
},
|
|
7316
|
+
},
|
|
7317
|
+
'500': {
|
|
7318
|
+
description: 'No collection available',
|
|
7319
|
+
content: {
|
|
7320
|
+
'text/plain': {
|
|
7321
|
+
schema: {
|
|
7322
|
+
type: 'string',
|
|
7323
|
+
},
|
|
7324
|
+
},
|
|
7325
|
+
},
|
|
6947
7326
|
},
|
|
6948
7327
|
},
|
|
6949
7328
|
},
|
|
@@ -6961,6 +7340,28 @@ const openapiJson = {
|
|
|
6961
7340
|
type: 'array',
|
|
6962
7341
|
items: {
|
|
6963
7342
|
type: 'object',
|
|
7343
|
+
properties: {
|
|
7344
|
+
nonce: {
|
|
7345
|
+
type: 'string',
|
|
7346
|
+
},
|
|
7347
|
+
taskId: {
|
|
7348
|
+
type: 'string',
|
|
7349
|
+
},
|
|
7350
|
+
taskType: {
|
|
7351
|
+
type: 'string',
|
|
7352
|
+
},
|
|
7353
|
+
status: {
|
|
7354
|
+
type: 'string',
|
|
7355
|
+
},
|
|
7356
|
+
createdAt: {
|
|
7357
|
+
type: 'string',
|
|
7358
|
+
format: 'date-time',
|
|
7359
|
+
},
|
|
7360
|
+
updatedAt: {
|
|
7361
|
+
type: 'string',
|
|
7362
|
+
format: 'date-time',
|
|
7363
|
+
},
|
|
7364
|
+
},
|
|
6964
7365
|
},
|
|
6965
7366
|
},
|
|
6966
7367
|
},
|
|
@@ -6969,6 +7370,147 @@ const openapiJson = {
|
|
|
6969
7370
|
},
|
|
6970
7371
|
},
|
|
6971
7372
|
},
|
|
7373
|
+
'/executions/last': {
|
|
7374
|
+
get: {
|
|
7375
|
+
summary: 'Get the last execution',
|
|
7376
|
+
description: 'Returns details of the last execution task.',
|
|
7377
|
+
responses: {
|
|
7378
|
+
'200': {
|
|
7379
|
+
description: 'The last execution task with full details.',
|
|
7380
|
+
content: {
|
|
7381
|
+
'application/json': {
|
|
7382
|
+
schema: {
|
|
7383
|
+
type: 'object',
|
|
7384
|
+
properties: {
|
|
7385
|
+
nonce: {
|
|
7386
|
+
type: 'string',
|
|
7387
|
+
},
|
|
7388
|
+
taskId: {
|
|
7389
|
+
type: 'string',
|
|
7390
|
+
},
|
|
7391
|
+
taskType: {
|
|
7392
|
+
type: 'string',
|
|
7393
|
+
},
|
|
7394
|
+
status: {
|
|
7395
|
+
type: 'string',
|
|
7396
|
+
},
|
|
7397
|
+
errors: {
|
|
7398
|
+
type: 'array',
|
|
7399
|
+
items: {
|
|
7400
|
+
type: 'object',
|
|
7401
|
+
},
|
|
7402
|
+
},
|
|
7403
|
+
warnings: {
|
|
7404
|
+
type: 'array',
|
|
7405
|
+
items: {
|
|
7406
|
+
type: 'object',
|
|
7407
|
+
},
|
|
7408
|
+
},
|
|
7409
|
+
createdAt: {
|
|
7410
|
+
type: 'string',
|
|
7411
|
+
format: 'date-time',
|
|
7412
|
+
},
|
|
7413
|
+
updatedAt: {
|
|
7414
|
+
type: 'string',
|
|
7415
|
+
format: 'date-time',
|
|
7416
|
+
},
|
|
7417
|
+
currentValue: {
|
|
7418
|
+
type: 'object',
|
|
7419
|
+
},
|
|
7420
|
+
},
|
|
7421
|
+
},
|
|
7422
|
+
},
|
|
7423
|
+
},
|
|
7424
|
+
},
|
|
7425
|
+
'404': {
|
|
7426
|
+
description: 'No execution tasks found.',
|
|
7427
|
+
content: {
|
|
7428
|
+
'text/plain': {
|
|
7429
|
+
schema: {
|
|
7430
|
+
type: 'string',
|
|
7431
|
+
},
|
|
7432
|
+
},
|
|
7433
|
+
},
|
|
7434
|
+
},
|
|
7435
|
+
},
|
|
7436
|
+
},
|
|
7437
|
+
},
|
|
7438
|
+
'/executions/{taskId}': {
|
|
7439
|
+
get: {
|
|
7440
|
+
summary: 'Get specific execution',
|
|
7441
|
+
description: 'Returns details of a specific execution task.',
|
|
7442
|
+
parameters: [
|
|
7443
|
+
{
|
|
7444
|
+
in: 'path',
|
|
7445
|
+
name: 'taskId',
|
|
7446
|
+
required: true,
|
|
7447
|
+
schema: {
|
|
7448
|
+
type: 'string',
|
|
7449
|
+
},
|
|
7450
|
+
description: 'The ID of the execution task to retrieve.',
|
|
7451
|
+
},
|
|
7452
|
+
],
|
|
7453
|
+
responses: {
|
|
7454
|
+
'200': {
|
|
7455
|
+
description: 'The execution task with full details.',
|
|
7456
|
+
content: {
|
|
7457
|
+
'application/json': {
|
|
7458
|
+
schema: {
|
|
7459
|
+
type: 'object',
|
|
7460
|
+
properties: {
|
|
7461
|
+
nonce: {
|
|
7462
|
+
type: 'string',
|
|
7463
|
+
},
|
|
7464
|
+
taskId: {
|
|
7465
|
+
type: 'string',
|
|
7466
|
+
},
|
|
7467
|
+
taskType: {
|
|
7468
|
+
type: 'string',
|
|
7469
|
+
},
|
|
7470
|
+
status: {
|
|
7471
|
+
type: 'string',
|
|
7472
|
+
},
|
|
7473
|
+
errors: {
|
|
7474
|
+
type: 'array',
|
|
7475
|
+
items: {
|
|
7476
|
+
type: 'object',
|
|
7477
|
+
},
|
|
7478
|
+
},
|
|
7479
|
+
warnings: {
|
|
7480
|
+
type: 'array',
|
|
7481
|
+
items: {
|
|
7482
|
+
type: 'object',
|
|
7483
|
+
},
|
|
7484
|
+
},
|
|
7485
|
+
createdAt: {
|
|
7486
|
+
type: 'string',
|
|
7487
|
+
format: 'date-time',
|
|
7488
|
+
},
|
|
7489
|
+
updatedAt: {
|
|
7490
|
+
type: 'string',
|
|
7491
|
+
format: 'date-time',
|
|
7492
|
+
},
|
|
7493
|
+
currentValue: {
|
|
7494
|
+
type: 'object',
|
|
7495
|
+
},
|
|
7496
|
+
},
|
|
7497
|
+
},
|
|
7498
|
+
},
|
|
7499
|
+
},
|
|
7500
|
+
},
|
|
7501
|
+
'404': {
|
|
7502
|
+
description: 'Execution task not found.',
|
|
7503
|
+
content: {
|
|
7504
|
+
'text/plain': {
|
|
7505
|
+
schema: {
|
|
7506
|
+
type: 'string',
|
|
7507
|
+
},
|
|
7508
|
+
},
|
|
7509
|
+
},
|
|
7510
|
+
},
|
|
7511
|
+
},
|
|
7512
|
+
},
|
|
7513
|
+
},
|
|
6972
7514
|
'/executions/new': {
|
|
6973
7515
|
post: {
|
|
6974
7516
|
summary: 'Start a new execution',
|
|
@@ -6982,12 +7524,19 @@ const openapiJson = {
|
|
|
6982
7524
|
properties: {
|
|
6983
7525
|
pipelineUrl: {
|
|
6984
7526
|
type: 'string',
|
|
7527
|
+
description: 'URL of the pipeline to execute',
|
|
7528
|
+
},
|
|
7529
|
+
book: {
|
|
7530
|
+
type: 'string',
|
|
7531
|
+
description: 'Alternative field for pipelineUrl',
|
|
6985
7532
|
},
|
|
6986
7533
|
inputParameters: {
|
|
6987
7534
|
type: 'object',
|
|
7535
|
+
description: 'Parameters for pipeline execution',
|
|
6988
7536
|
},
|
|
6989
7537
|
identification: {
|
|
6990
7538
|
type: 'object',
|
|
7539
|
+
description: 'User identification data',
|
|
6991
7540
|
},
|
|
6992
7541
|
},
|
|
6993
7542
|
},
|
|
@@ -7007,13 +7556,164 @@ const openapiJson = {
|
|
|
7007
7556
|
},
|
|
7008
7557
|
'400': {
|
|
7009
7558
|
description: 'Invalid input.',
|
|
7559
|
+
content: {
|
|
7560
|
+
'application/json': {
|
|
7561
|
+
schema: {
|
|
7562
|
+
type: 'object',
|
|
7563
|
+
properties: {
|
|
7564
|
+
error: {
|
|
7565
|
+
type: 'object',
|
|
7566
|
+
},
|
|
7567
|
+
},
|
|
7568
|
+
},
|
|
7569
|
+
},
|
|
7570
|
+
},
|
|
7571
|
+
},
|
|
7572
|
+
'404': {
|
|
7573
|
+
description: 'Pipeline not found.',
|
|
7574
|
+
content: {
|
|
7575
|
+
'text/plain': {
|
|
7576
|
+
schema: {
|
|
7577
|
+
type: 'string',
|
|
7578
|
+
},
|
|
7579
|
+
},
|
|
7580
|
+
},
|
|
7581
|
+
},
|
|
7582
|
+
},
|
|
7583
|
+
},
|
|
7584
|
+
},
|
|
7585
|
+
'/api-docs': {
|
|
7586
|
+
get: {
|
|
7587
|
+
summary: 'API documentation UI',
|
|
7588
|
+
description: 'Swagger UI for API documentation',
|
|
7589
|
+
responses: {
|
|
7590
|
+
'200': {
|
|
7591
|
+
description: 'HTML Swagger UI',
|
|
7592
|
+
},
|
|
7593
|
+
},
|
|
7594
|
+
},
|
|
7595
|
+
},
|
|
7596
|
+
'/swagger': {
|
|
7597
|
+
get: {
|
|
7598
|
+
summary: 'API documentation UI (alternative path)',
|
|
7599
|
+
description: 'Swagger UI for API documentation',
|
|
7600
|
+
responses: {
|
|
7601
|
+
'200': {
|
|
7602
|
+
description: 'HTML Swagger UI',
|
|
7603
|
+
},
|
|
7604
|
+
},
|
|
7605
|
+
},
|
|
7606
|
+
},
|
|
7607
|
+
'/openapi': {
|
|
7608
|
+
get: {
|
|
7609
|
+
summary: 'OpenAPI specification',
|
|
7610
|
+
description: 'Returns the OpenAPI JSON specification',
|
|
7611
|
+
responses: {
|
|
7612
|
+
'200': {
|
|
7613
|
+
description: 'OpenAPI specification',
|
|
7614
|
+
content: {
|
|
7615
|
+
'application/json': {
|
|
7616
|
+
schema: {
|
|
7617
|
+
type: 'object',
|
|
7618
|
+
},
|
|
7619
|
+
},
|
|
7620
|
+
},
|
|
7621
|
+
},
|
|
7622
|
+
},
|
|
7623
|
+
},
|
|
7624
|
+
},
|
|
7625
|
+
},
|
|
7626
|
+
components: {
|
|
7627
|
+
schemas: {
|
|
7628
|
+
Error: {
|
|
7629
|
+
type: 'object',
|
|
7630
|
+
properties: {
|
|
7631
|
+
error: {
|
|
7632
|
+
type: 'object',
|
|
7633
|
+
},
|
|
7634
|
+
},
|
|
7635
|
+
},
|
|
7636
|
+
ExecutionTaskSummary: {
|
|
7637
|
+
type: 'object',
|
|
7638
|
+
properties: {
|
|
7639
|
+
nonce: {
|
|
7640
|
+
type: 'string',
|
|
7641
|
+
},
|
|
7642
|
+
taskId: {
|
|
7643
|
+
type: 'string',
|
|
7644
|
+
},
|
|
7645
|
+
taskType: {
|
|
7646
|
+
type: 'string',
|
|
7647
|
+
},
|
|
7648
|
+
status: {
|
|
7649
|
+
type: 'string',
|
|
7650
|
+
},
|
|
7651
|
+
createdAt: {
|
|
7652
|
+
type: 'string',
|
|
7653
|
+
format: 'date-time',
|
|
7654
|
+
},
|
|
7655
|
+
updatedAt: {
|
|
7656
|
+
type: 'string',
|
|
7657
|
+
format: 'date-time',
|
|
7658
|
+
},
|
|
7659
|
+
},
|
|
7660
|
+
},
|
|
7661
|
+
ExecutionTaskFull: {
|
|
7662
|
+
type: 'object',
|
|
7663
|
+
properties: {
|
|
7664
|
+
nonce: {
|
|
7665
|
+
type: 'string',
|
|
7666
|
+
},
|
|
7667
|
+
taskId: {
|
|
7668
|
+
type: 'string',
|
|
7669
|
+
},
|
|
7670
|
+
taskType: {
|
|
7671
|
+
type: 'string',
|
|
7672
|
+
},
|
|
7673
|
+
status: {
|
|
7674
|
+
type: 'string',
|
|
7675
|
+
},
|
|
7676
|
+
errors: {
|
|
7677
|
+
type: 'array',
|
|
7678
|
+
items: {
|
|
7679
|
+
type: 'object',
|
|
7680
|
+
},
|
|
7681
|
+
},
|
|
7682
|
+
warnings: {
|
|
7683
|
+
type: 'array',
|
|
7684
|
+
items: {
|
|
7685
|
+
type: 'object',
|
|
7686
|
+
},
|
|
7687
|
+
},
|
|
7688
|
+
createdAt: {
|
|
7689
|
+
type: 'string',
|
|
7690
|
+
format: 'date-time',
|
|
7691
|
+
},
|
|
7692
|
+
updatedAt: {
|
|
7693
|
+
type: 'string',
|
|
7694
|
+
format: 'date-time',
|
|
7695
|
+
},
|
|
7696
|
+
currentValue: {
|
|
7697
|
+
type: 'object',
|
|
7010
7698
|
},
|
|
7011
7699
|
},
|
|
7012
7700
|
},
|
|
7013
7701
|
},
|
|
7014
7702
|
},
|
|
7015
|
-
|
|
7016
|
-
|
|
7703
|
+
tags: [
|
|
7704
|
+
{
|
|
7705
|
+
name: 'Books',
|
|
7706
|
+
description: 'Operations related to books and pipelines',
|
|
7707
|
+
},
|
|
7708
|
+
{
|
|
7709
|
+
name: 'Executions',
|
|
7710
|
+
description: 'Operations related to execution tasks',
|
|
7711
|
+
},
|
|
7712
|
+
{
|
|
7713
|
+
name: 'Authentication',
|
|
7714
|
+
description: 'Authentication operations',
|
|
7715
|
+
},
|
|
7716
|
+
],
|
|
7017
7717
|
};
|
|
7018
7718
|
/**
|
|
7019
7719
|
* Note: [💞] Ignore a discrepancy between file name and entity name
|
|
@@ -7087,7 +7787,7 @@ function startRemoteServer(options) {
|
|
|
7087
7787
|
response.setHeader('X-Powered-By', 'Promptbook engine');
|
|
7088
7788
|
next();
|
|
7089
7789
|
});
|
|
7090
|
-
// TODO:
|
|
7790
|
+
// TODO: [🥺] Expose openapiJson to consumer and also allow to add new routes
|
|
7091
7791
|
app.use(OpenApiValidator.middleware({
|
|
7092
7792
|
apiSpec: openapiJson,
|
|
7093
7793
|
ignorePaths(path) {
|
|
@@ -7384,6 +8084,7 @@ function startRemoteServer(options) {
|
|
|
7384
8084
|
promptResult = await llm.callCompletionModel(prompt);
|
|
7385
8085
|
break;
|
|
7386
8086
|
case 'EMBEDDING':
|
|
8087
|
+
console.log('!!! llm (EMBEDDING)', llm);
|
|
7387
8088
|
if (llm.callEmbeddingModel === undefined) {
|
|
7388
8089
|
// Note: [0] This check should not be a thing
|
|
7389
8090
|
throw new PipelineExecutionError(`Embedding model is not available`);
|