@promptbook/core 0.92.0-3 → 0.92.0-31

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (172) hide show
  1. package/esm/index.es.js +990 -467
  2. package/esm/index.es.js.map +1 -1
  3. package/esm/typings/src/_packages/browser.index.d.ts +2 -0
  4. package/esm/typings/src/_packages/core.index.d.ts +22 -6
  5. package/esm/typings/src/_packages/deepseek.index.d.ts +2 -0
  6. package/esm/typings/src/_packages/google.index.d.ts +2 -0
  7. package/esm/typings/src/_packages/types.index.d.ts +4 -2
  8. package/esm/typings/src/_packages/utils.index.d.ts +2 -0
  9. package/esm/typings/src/cli/common/$provideLlmToolsForCli.d.ts +1 -1
  10. package/esm/typings/src/collection/PipelineCollection.d.ts +0 -2
  11. package/esm/typings/src/collection/SimplePipelineCollection.d.ts +1 -1
  12. package/esm/typings/src/commands/FOREACH/ForeachJson.d.ts +6 -6
  13. package/esm/typings/src/commands/FOREACH/foreachCommandParser.d.ts +0 -2
  14. package/esm/typings/src/commands/FORMFACTOR/formfactorCommandParser.d.ts +1 -1
  15. package/esm/typings/src/commands/_BOILERPLATE/boilerplateCommandParser.d.ts +1 -1
  16. package/esm/typings/src/commands/_common/types/CommandParser.d.ts +36 -28
  17. package/esm/typings/src/config.d.ts +41 -11
  18. package/esm/typings/src/constants.d.ts +43 -2
  19. package/esm/typings/src/conversion/archive/loadArchive.d.ts +2 -2
  20. package/esm/typings/src/errors/0-BoilerplateError.d.ts +2 -2
  21. package/esm/typings/src/executables/$provideExecutablesForNode.d.ts +1 -1
  22. package/esm/typings/src/executables/apps/locateLibreoffice.d.ts +2 -1
  23. package/esm/typings/src/executables/apps/locatePandoc.d.ts +2 -1
  24. package/esm/typings/src/executables/locateApp.d.ts +2 -2
  25. package/esm/typings/src/executables/platforms/locateAppOnLinux.d.ts +2 -1
  26. package/esm/typings/src/executables/platforms/locateAppOnMacOs.d.ts +2 -1
  27. package/esm/typings/src/executables/platforms/locateAppOnWindows.d.ts +2 -1
  28. package/esm/typings/src/execution/AbstractTaskResult.d.ts +1 -1
  29. package/esm/typings/src/execution/CommonToolsOptions.d.ts +5 -1
  30. package/esm/typings/src/execution/LlmExecutionToolsConstructor.d.ts +2 -1
  31. package/esm/typings/src/execution/PipelineExecutorResult.d.ts +4 -2
  32. package/esm/typings/src/execution/createPipelineExecutor/$OngoingTaskResult.d.ts +12 -9
  33. package/esm/typings/src/execution/createPipelineExecutor/10-executePipeline.d.ts +13 -10
  34. package/esm/typings/src/execution/createPipelineExecutor/20-executeTask.d.ts +12 -9
  35. package/esm/typings/src/execution/createPipelineExecutor/30-executeFormatSubvalues.d.ts +15 -3
  36. package/esm/typings/src/execution/createPipelineExecutor/40-executeAttempts.d.ts +20 -14
  37. package/esm/typings/src/execution/createPipelineExecutor/computeCosineSimilarity.d.ts +13 -0
  38. package/esm/typings/src/execution/createPipelineExecutor/filterJustOutputParameters.d.ts +7 -6
  39. package/esm/typings/src/execution/createPipelineExecutor/getContextForTask.d.ts +5 -1
  40. package/esm/typings/src/execution/createPipelineExecutor/getExamplesForTask.d.ts +1 -1
  41. package/esm/typings/src/execution/createPipelineExecutor/getKnowledgeForTask.d.ts +21 -5
  42. package/esm/typings/src/execution/createPipelineExecutor/getReservedParametersForTask.d.ts +19 -5
  43. package/esm/typings/src/execution/createPipelineExecutor/knowledgePiecesToString.d.ts +9 -0
  44. package/esm/typings/src/execution/translation/automatic-translate/automatic-translators/LindatAutomaticTranslator.d.ts +4 -4
  45. package/esm/typings/src/execution/utils/checkExpectations.d.ts +1 -1
  46. package/esm/typings/src/execution/utils/uncertainNumber.d.ts +3 -2
  47. package/esm/typings/src/formats/_common/{FormatDefinition.d.ts → FormatParser.d.ts} +8 -6
  48. package/esm/typings/src/formats/_common/FormatSubvalueParser.d.ts +66 -0
  49. package/esm/typings/src/formats/csv/CsvFormatParser.d.ts +17 -0
  50. package/esm/typings/src/formats/csv/CsvSettings.d.ts +2 -2
  51. package/esm/typings/src/formats/csv/utils/csvParse.d.ts +12 -0
  52. package/esm/typings/src/formats/csv/utils/isValidCsvString.d.ts +1 -1
  53. package/esm/typings/src/formats/index.d.ts +2 -2
  54. package/esm/typings/src/formats/json/{JsonFormatDefinition.d.ts → JsonFormatParser.d.ts} +6 -6
  55. package/esm/typings/src/formats/json/utils/isValidJsonString.d.ts +1 -1
  56. package/esm/typings/src/formats/json/utils/jsonParse.d.ts +8 -0
  57. package/esm/typings/src/formats/text/{TextFormatDefinition.d.ts → TextFormatParser.d.ts} +7 -7
  58. package/esm/typings/src/formats/xml/XmlFormatParser.d.ts +19 -0
  59. package/esm/typings/src/formats/xml/utils/isValidXmlString.d.ts +1 -1
  60. package/esm/typings/src/formfactors/_boilerplate/BoilerplateFormfactorDefinition.d.ts +3 -2
  61. package/esm/typings/src/formfactors/_common/AbstractFormfactorDefinition.d.ts +16 -7
  62. package/esm/typings/src/formfactors/_common/FormfactorDefinition.d.ts +3 -1
  63. package/esm/typings/src/formfactors/_common/string_formfactor_name.d.ts +2 -1
  64. package/esm/typings/src/formfactors/chatbot/ChatbotFormfactorDefinition.d.ts +2 -2
  65. package/esm/typings/src/formfactors/completion/CompletionFormfactorDefinition.d.ts +29 -0
  66. package/esm/typings/src/formfactors/generator/GeneratorFormfactorDefinition.d.ts +2 -1
  67. package/esm/typings/src/formfactors/generic/GenericFormfactorDefinition.d.ts +2 -2
  68. package/esm/typings/src/formfactors/index.d.ts +33 -8
  69. package/esm/typings/src/formfactors/matcher/MatcherFormfactorDefinition.d.ts +4 -2
  70. package/esm/typings/src/formfactors/sheets/SheetsFormfactorDefinition.d.ts +3 -2
  71. package/esm/typings/src/formfactors/translator/TranslatorFormfactorDefinition.d.ts +3 -2
  72. package/esm/typings/src/high-level-abstractions/index.d.ts +2 -2
  73. package/esm/typings/src/llm-providers/_common/register/$llmToolsMetadataRegister.d.ts +3 -3
  74. package/esm/typings/src/llm-providers/_common/register/$llmToolsRegister.d.ts +3 -3
  75. package/esm/typings/src/llm-providers/_common/register/$provideLlmToolsConfigurationFromEnv.d.ts +4 -4
  76. package/esm/typings/src/llm-providers/_common/register/$provideLlmToolsForTestingAndScriptsAndPlayground.d.ts +4 -3
  77. package/esm/typings/src/llm-providers/_common/register/$provideLlmToolsFromEnv.d.ts +17 -4
  78. package/esm/typings/src/llm-providers/_common/register/LlmToolsConfiguration.d.ts +11 -4
  79. package/esm/typings/src/llm-providers/_common/register/LlmToolsMetadata.d.ts +27 -5
  80. package/esm/typings/src/llm-providers/_common/register/LlmToolsOptions.d.ts +9 -2
  81. package/esm/typings/src/llm-providers/_common/register/createLlmToolsFromConfiguration.d.ts +12 -3
  82. package/esm/typings/src/llm-providers/_common/utils/cache/CacheItem.d.ts +10 -5
  83. package/esm/typings/src/llm-providers/_common/utils/cache/CacheLlmToolsOptions.d.ts +5 -3
  84. package/esm/typings/src/llm-providers/_common/utils/cache/cacheLlmTools.d.ts +3 -3
  85. package/esm/typings/src/llm-providers/_common/utils/count-total-usage/limitTotalUsage.d.ts +5 -5
  86. package/esm/typings/src/llm-providers/anthropic-claude/anthropic-claude-models.d.ts +1 -1
  87. package/esm/typings/src/llm-providers/azure-openai/AzureOpenAiExecutionTools.d.ts +4 -0
  88. package/esm/typings/src/llm-providers/deepseek/deepseek-models.d.ts +23 -0
  89. package/esm/typings/src/llm-providers/google/google-models.d.ts +23 -0
  90. package/esm/typings/src/llm-providers/openai/OpenAiExecutionTools.d.ts +4 -0
  91. package/esm/typings/src/llm-providers/openai/openai-models.d.ts +1 -1
  92. package/esm/typings/src/llm-providers/openai/register-configuration.d.ts +2 -2
  93. package/esm/typings/src/llm-providers/openai/register-constructor.d.ts +2 -2
  94. package/esm/typings/src/migrations/migratePipeline.d.ts +9 -0
  95. package/esm/typings/src/other/templates/getBookTemplates.d.ts +2 -2
  96. package/esm/typings/src/personas/preparePersona.d.ts +1 -1
  97. package/esm/typings/src/pipeline/PipelineInterface/PipelineInterface.d.ts +3 -3
  98. package/esm/typings/src/pipeline/PipelineInterface/constants.d.ts +1 -1
  99. package/esm/typings/src/pipeline/PipelineInterface/getPipelineInterface.d.ts +1 -1
  100. package/esm/typings/src/pipeline/PipelineInterface/isPipelineImplementingInterface.d.ts +5 -4
  101. package/esm/typings/src/pipeline/PipelineInterface/isPipelineInterfacesEqual.d.ts +1 -1
  102. package/esm/typings/src/pipeline/PipelineJson/CommonTaskJson.d.ts +9 -6
  103. package/esm/typings/src/pipeline/PipelineJson/PersonaJson.d.ts +4 -2
  104. package/esm/typings/src/pipeline/PipelineJson/PipelineJson.d.ts +3 -2
  105. package/esm/typings/src/pipeline/PipelineString.d.ts +3 -1
  106. package/esm/typings/src/pipeline/book-notation.d.ts +2 -2
  107. package/esm/typings/src/postprocessing/utils/extractJsonBlock.d.ts +1 -1
  108. package/esm/typings/src/prepare/prepareTasks.d.ts +7 -4
  109. package/esm/typings/src/remote-server/openapi-types.d.ts +348 -6
  110. package/esm/typings/src/remote-server/openapi.d.ts +398 -4
  111. package/esm/typings/src/remote-server/types/RemoteServerOptions.d.ts +2 -1
  112. package/esm/typings/src/scrapers/_boilerplate/BoilerplateScraper.d.ts +3 -3
  113. package/esm/typings/src/scrapers/_boilerplate/createBoilerplateScraper.d.ts +1 -1
  114. package/esm/typings/src/scrapers/_boilerplate/register-metadata.d.ts +1 -1
  115. package/esm/typings/src/scrapers/_common/Converter.d.ts +3 -1
  116. package/esm/typings/src/scrapers/_common/Scraper.d.ts +4 -3
  117. package/esm/typings/src/scrapers/_common/ScraperIntermediateSource.d.ts +4 -2
  118. package/esm/typings/src/scrapers/_common/register/$provideFilesystemForNode.d.ts +2 -1
  119. package/esm/typings/src/scrapers/_common/register/$provideScrapersForBrowser.d.ts +6 -3
  120. package/esm/typings/src/scrapers/_common/register/$provideScrapersForNode.d.ts +3 -5
  121. package/esm/typings/src/scrapers/_common/register/$scrapersMetadataRegister.d.ts +3 -3
  122. package/esm/typings/src/scrapers/_common/register/$scrapersRegister.d.ts +3 -2
  123. package/esm/typings/src/scrapers/_common/register/ScraperAndConverterMetadata.d.ts +8 -5
  124. package/esm/typings/src/scrapers/_common/register/ScraperConstructor.d.ts +2 -1
  125. package/esm/typings/src/scrapers/_common/utils/getScraperIntermediateSource.d.ts +6 -5
  126. package/esm/typings/src/scrapers/_common/utils/makeKnowledgeSourceHandler.d.ts +3 -1
  127. package/esm/typings/src/scrapers/document/createDocumentScraper.d.ts +1 -1
  128. package/esm/typings/src/scrapers/document-legacy/createLegacyDocumentScraper.d.ts +2 -1
  129. package/esm/typings/src/scrapers/markdown/createMarkdownScraper.d.ts +4 -1
  130. package/esm/typings/src/scrapers/markitdown/MarkitdownScraper.d.ts +1 -1
  131. package/esm/typings/src/scrapers/pdf/createPdfScraper.d.ts +2 -1
  132. package/esm/typings/src/scrapers/website/createWebsiteScraper.d.ts +3 -4
  133. package/esm/typings/src/scripting/javascript/postprocessing-functions.d.ts +5 -1
  134. package/esm/typings/src/storage/file-cache-storage/FileCacheStorage.d.ts +12 -5
  135. package/esm/typings/src/storage/file-cache-storage/FileCacheStorageOptions.d.ts +4 -2
  136. package/esm/typings/src/storage/file-cache-storage/utils/nameToSubfolderPath.d.ts +2 -1
  137. package/esm/typings/src/storage/local-storage/getIndexedDbStorage.d.ts +10 -0
  138. package/esm/typings/src/storage/local-storage/utils/makePromptbookStorageFromIndexedDb.d.ts +7 -0
  139. package/esm/typings/src/storage/local-storage/utils/makePromptbookStorageFromWebStorage.d.ts +2 -1
  140. package/esm/typings/src/types/IntermediateFilesStrategy.d.ts +2 -1
  141. package/esm/typings/src/types/ModelVariant.d.ts +5 -5
  142. package/esm/typings/src/types/typeAliases.d.ts +17 -13
  143. package/esm/typings/src/utils/$Register.d.ts +8 -7
  144. package/esm/typings/src/utils/editable/edit-pipeline-string/addPipelineCommand.d.ts +2 -2
  145. package/esm/typings/src/utils/editable/edit-pipeline-string/deflatePipeline.d.ts +4 -1
  146. package/esm/typings/src/utils/editable/utils/isFlatPipeline.d.ts +2 -1
  147. package/esm/typings/src/utils/environment/$getGlobalScope.d.ts +2 -1
  148. package/esm/typings/src/utils/expectation-counters/index.d.ts +1 -1
  149. package/esm/typings/src/utils/markdown/extractAllListItemsFromMarkdown.d.ts +1 -1
  150. package/esm/typings/src/utils/normalization/nameToUriPart.d.ts +4 -4
  151. package/esm/typings/src/utils/normalization/nameToUriParts.d.ts +4 -4
  152. package/esm/typings/src/utils/normalization/normalize-to-kebab-case.d.ts +3 -3
  153. package/esm/typings/src/utils/normalization/normalizeTo_SCREAMING_CASE.d.ts +3 -3
  154. package/esm/typings/src/utils/normalization/normalizeTo_camelCase.d.ts +4 -4
  155. package/esm/typings/src/utils/normalization/normalizeTo_snake_case.d.ts +3 -3
  156. package/esm/typings/src/utils/normalization/removeDiacritics.d.ts +3 -3
  157. package/esm/typings/src/utils/normalization/searchKeywords.d.ts +4 -1
  158. package/esm/typings/src/utils/normalization/titleToName.d.ts +4 -4
  159. package/esm/typings/src/utils/organization/empty_object.d.ts +2 -2
  160. package/esm/typings/src/utils/organization/just_empty_object.d.ts +4 -4
  161. package/esm/typings/src/utils/parameters/mapAvailableToExpectedParameters.d.ts +7 -7
  162. package/esm/typings/src/utils/serialization/clonePipeline.d.ts +4 -3
  163. package/esm/typings/src/utils/serialization/deepClone.d.ts +5 -1
  164. package/esm/typings/src/utils/validators/javascriptName/isValidJavascriptName.d.ts +3 -3
  165. package/esm/typings/src/utils/validators/parameterName/validateParameterName.d.ts +5 -4
  166. package/esm/typings/src/version.d.ts +2 -1
  167. package/package.json +1 -1
  168. package/umd/index.umd.js +1008 -477
  169. package/umd/index.umd.js.map +1 -1
  170. package/esm/typings/src/formats/_common/FormatSubvalueDefinition.d.ts +0 -31
  171. package/esm/typings/src/formats/csv/CsvFormatDefinition.d.ts +0 -17
  172. package/esm/typings/src/formats/xml/XmlFormatDefinition.d.ts +0 -19
package/esm/index.es.js CHANGED
@@ -1,8 +1,8 @@
1
1
  import spaceTrim, { spaceTrim as spaceTrim$1 } from 'spacetrim';
2
2
  import { format } from 'prettier';
3
3
  import parserHtml from 'prettier/parser-html';
4
- import { Subject } from 'rxjs';
5
4
  import { randomBytes } from 'crypto';
5
+ import { Subject } from 'rxjs';
6
6
  import { forTime } from 'waitasecond';
7
7
  import { parse, unparse } from 'papaparse';
8
8
  import hexEncoder from 'crypto-js/enc-hex';
@@ -27,7 +27,7 @@ const BOOK_LANGUAGE_VERSION = '1.0.0';
27
27
  * @generated
28
28
  * @see https://github.com/webgptorg/promptbook
29
29
  */
30
- const PROMPTBOOK_ENGINE_VERSION = '0.92.0-3';
30
+ const PROMPTBOOK_ENGINE_VERSION = '0.92.0-31';
31
31
  /**
32
32
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
33
33
  * Note: [💞] Ignore a discrepancy between file name and entity name
@@ -274,6 +274,28 @@ const DEFAULT_BOOK_OUTPUT_PARAMETER_NAME = 'result';
274
274
  * @public exported from `@promptbook/core`
275
275
  */
276
276
  const DEFAULT_MAX_FILE_SIZE = 100 * 1024 * 1024; // 100MB
277
+ /**
278
+ * Threshold value that determines when a dataset is considered "big"
279
+ * and may require special handling or optimizations
280
+ *
281
+ * For example, when error occurs in one item of the big dataset, it will not fail the whole pipeline
282
+ *
283
+ * @public exported from `@promptbook/core`
284
+ */
285
+ const BIG_DATASET_TRESHOLD = 50;
286
+ /**
287
+ * Placeholder text used to represent a placeholder value of failed operation
288
+ *
289
+ * @public exported from `@promptbook/core`
290
+ */
291
+ const FAILED_VALUE_PLACEHOLDER = '!?';
292
+ /**
293
+ * Placeholder text used to represent operations or values that are still in progress
294
+ * or awaiting completion in UI displays and logging
295
+ *
296
+ * @public exported from `@promptbook/core`
297
+ */
298
+ const PENDING_VALUE_PLACEHOLDER = '…';
277
299
  // <- TODO: [🧠] Better system for generator warnings - not always "code" and "by `@promptbook/cli`"
278
300
  /**
279
301
  * The maximum number of iterations for a loops
@@ -334,19 +356,21 @@ const DEFAULT_MAX_PARALLEL_COUNT = 5; // <- TODO: [🤹‍♂️]
334
356
  */
335
357
  const DEFAULT_MAX_EXECUTION_ATTEMPTS = 10; // <- TODO: [🤹‍♂️]
336
358
  /**
337
- * @@@
338
- * TODO: [🐝][main] !!3 Use
359
+ * The maximum depth to which knowledge sources will be scraped when building a knowledge base.
360
+ * This prevents infinite recursion and limits resource usage.
339
361
  *
340
362
  * @public exported from `@promptbook/core`
341
363
  */
342
364
  const DEFAULT_MAX_KNOWLEDGE_SOURCES_SCRAPING_DEPTH = 3;
365
+ // <- TODO: [🐝]
343
366
  /**
344
- * @@@
345
- * TODO: [🐝][main] !!3 Use
367
+ * The maximum total number of knowledge sources that will be scraped in a single operation.
368
+ * This acts as a global limit to avoid excessive resource consumption.
346
369
  *
347
370
  * @public exported from `@promptbook/core`
348
371
  */
349
372
  const DEFAULT_MAX_KNOWLEDGE_SOURCES_SCRAPING_TOTAL = 200;
373
+ // <- TODO: [🐝]
350
374
  /**
351
375
  * Where to store your books
352
376
  * This is kind of a "src" for your books
@@ -424,7 +448,7 @@ const MOMENT_ARG_THRESHOLDS = {
424
448
  const DEFAULT_REMOTE_SERVER_URL = REMOTE_SERVER_URLS[0].urls[0];
425
449
  // <- TODO: [🧜‍♂️]
426
450
  /**
427
- * @@@
451
+ * Default settings for parsing and generating CSV files in Promptbook.
428
452
  *
429
453
  * @public exported from `@promptbook/core`
430
454
  */
@@ -435,15 +459,15 @@ const DEFAULT_CSV_SETTINGS = Object.freeze({
435
459
  skipEmptyLines: true,
436
460
  });
437
461
  /**
438
- * @@@
462
+ * Controls whether verbose logging is enabled by default throughout the application.
439
463
  *
440
464
  * @public exported from `@promptbook/core`
441
465
  */
442
466
  let DEFAULT_IS_VERBOSE = false;
443
467
  /**
444
- * @@@
468
+ * Enables or disables verbose logging globally at runtime.
445
469
  *
446
- * Note: This is experimental feature
470
+ * Note: This is an experimental feature.
447
471
  *
448
472
  * @public exported from `@promptbook/core`
449
473
  */
@@ -451,7 +475,7 @@ function SET_IS_VERBOSE(isVerbose) {
451
475
  DEFAULT_IS_VERBOSE = isVerbose;
452
476
  }
453
477
  /**
454
- * @@@
478
+ * Controls whether auto-installation of dependencies is enabled by default.
455
479
  *
456
480
  * @public exported from `@promptbook/core`
457
481
  */
@@ -463,7 +487,15 @@ const DEFAULT_IS_AUTO_INSTALLED = false;
463
487
  */
464
488
  const DEFAULT_GET_PIPELINE_COLLECTION_FUNCTION_NAME = `getPipelineCollection`;
465
489
  /**
466
- * @@@
490
+ * Default rate limits (requests per minute)
491
+ *
492
+ * Note: Adjust based on the provider tier you are have
493
+ *
494
+ * @public exported from `@promptbook/core`
495
+ */
496
+ const DEFAULT_MAX_REQUESTS_PER_MINUTE = 60;
497
+ /**
498
+ * Indicates whether pipeline logic validation is enabled. When true, the pipeline logic is checked for consistency.
467
499
  *
468
500
  * @private within the repository
469
501
  */
@@ -596,7 +628,7 @@ function assertsError(whatWasThrown) {
596
628
  * Function isValidJsonString will tell you if the string is valid JSON or not
597
629
  *
598
630
  * @param value The string to check
599
- * @returns True if the string is a valid JSON string, false otherwise
631
+ * @returns `true` if the string is a valid JSON string, false otherwise
600
632
  *
601
633
  * @public exported from `@promptbook/utils`
602
634
  */
@@ -1007,8 +1039,12 @@ function checkSerializableAsJson(options) {
1007
1039
  */
1008
1040
 
1009
1041
  /**
1010
- * @@@
1042
+ * Creates a deep clone of the given object
1043
+ *
1044
+ * Note: This method only works for objects that are fully serializable to JSON and do not contain functions, Dates, or special types.
1011
1045
  *
1046
+ * @param objectValue The object to clone.
1047
+ * @returns A deep, writable clone of the input object.
1012
1048
  * @public exported from `@promptbook/utils`
1013
1049
  */
1014
1050
  function deepClone(objectValue) {
@@ -1063,6 +1099,42 @@ function exportJson(options) {
1063
1099
  * TODO: [🧠] Is there a way how to meaningfully test this utility
1064
1100
  */
1065
1101
 
1102
+ /**
1103
+ * How is the model provider trusted?
1104
+ *
1105
+ * @public exported from `@promptbook/core`
1106
+ */
1107
+ const MODEL_TRUST_LEVELS = {
1108
+ FULL: `Model is running on the local machine, training data and model weights are known, data are ethically sourced`,
1109
+ OPEN: `Model is open source, training data and model weights are known`,
1110
+ PARTIALLY_OPEN: `Model is open source, but training data and model weights are not (fully) known`,
1111
+ CLOSED_LOCAL: `Model can be run locally, but it is not open source`,
1112
+ CLOSED_FREE: `Model is behind API gateway but free to use`,
1113
+ CLOSED_BUSINESS: `Model is behind API gateway and paid but has good SLA, TOS, privacy policy and in general is a good to use in business applications`,
1114
+ CLOSED: `Model is behind API gateway and paid`,
1115
+ UNTRUSTED: `Model has questions about the training data and ethics, but it is not known if it is a problem or not`,
1116
+ VURNABLE: `Model has some known serious vulnerabilities, leaks, ethical problems, etc.`,
1117
+ };
1118
+ // <- TODO: Maybe do better levels of trust
1119
+ /**
1120
+ * How is the model provider important?
1121
+ *
1122
+ * @public exported from `@promptbook/core`
1123
+ */
1124
+ const MODEL_ORDERS = {
1125
+ /**
1126
+ * Top-tier models, e.g. OpenAI, Anthropic,...
1127
+ */
1128
+ TOP_TIER: 333,
1129
+ /**
1130
+ * Mid-tier models, e.g. Llama, Mistral, etc.
1131
+ */
1132
+ NORMAL: 100,
1133
+ /**
1134
+ * Low-tier models, e.g. Phi, Tiny, etc.
1135
+ */
1136
+ LOW_TIER: 0,
1137
+ };
1066
1138
  /**
1067
1139
  * Order of keys in the pipeline JSON
1068
1140
  *
@@ -1090,13 +1162,13 @@ const ORDER_OF_PIPELINE_JSON = [
1090
1162
  */
1091
1163
  const REPLACING_NONCE = 'ptbkauk42kV2dzao34faw7FudQUHYPtW';
1092
1164
  /**
1093
- * @@@
1165
+ * Placeholder value indicating a parameter is missing its value.
1094
1166
  *
1095
1167
  * @private within the repository
1096
1168
  */
1097
1169
  const RESERVED_PARAMETER_MISSING_VALUE = 'MISSING-' + REPLACING_NONCE;
1098
1170
  /**
1099
- * @@@
1171
+ * Placeholder value indicating a parameter is restricted and cannot be used directly.
1100
1172
  *
1101
1173
  * @private within the repository
1102
1174
  */
@@ -1554,7 +1626,7 @@ function extractParameterNames(template) {
1554
1626
  */
1555
1627
  function unpreparePipeline(pipeline) {
1556
1628
  let { personas, knowledgeSources, tasks } = pipeline;
1557
- personas = personas.map((persona) => ({ ...persona, modelRequirements: undefined, preparationIds: undefined }));
1629
+ personas = personas.map((persona) => ({ ...persona, modelsRequirements: undefined, preparationIds: undefined }));
1558
1630
  knowledgeSources = knowledgeSources.map((knowledgeSource) => ({ ...knowledgeSource, preparationIds: undefined }));
1559
1631
  tasks = tasks.map((task) => {
1560
1632
  let { dependentParameterNames } = task;
@@ -1595,7 +1667,7 @@ class SimplePipelineCollection {
1595
1667
  /**
1596
1668
  * Constructs a pipeline collection from pipelines
1597
1669
  *
1598
- * @param pipelines @@@
1670
+ * @param pipelines Array of pipeline JSON objects to include in the collection
1599
1671
  *
1600
1672
  * Note: During the construction logic of all pipelines are validated
1601
1673
  * Note: It is not recommended to use this constructor directly, use `createCollectionFromJson` *(or other variant)* instead
@@ -1821,7 +1893,7 @@ function createSubcollection(collection, predicate) {
1821
1893
  };
1822
1894
  }
1823
1895
 
1824
- var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book",formfactorName:"GENERIC",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",resultingParameterName:"knowledgePieces",dependentParameterNames:["knowledgeContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge from Markdown\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book`\n- INPUT PARAMETER `{knowledgeContent}` Markdown document content\n- OUTPUT PARAMETER `{knowledgePieces}` The knowledge JSON object\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}\n```\n\n`-> {knowledgePieces}`\n"}],sourceFile:"./books/prepare-knowledge-from-markdown.book"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.book",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",resultingParameterName:"keywords",dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Keywords\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-keywords.book`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{keywords}` Keywords separated by comma\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}\n```\n\n`-> {keywords}`\n"}],sourceFile:"./books/prepare-knowledge-keywords.book"},{title:"Prepare Knowledge-piece Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.book",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",resultingParameterName:"title",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge-piece Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-title.book`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{title}` The title of the document\n\n## Knowledge\n\n- EXPECT MIN 1 WORD\n- EXPECT MAX 8 WORDS\n\n```markdown\nYou are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-knowledge-title.book"},{title:"Prepare Persona",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.book",formfactorName:"GENERIC",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-model-requirements",title:"Make modelRequirements",content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Example\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n- Your output format is JSON object\n- Write just the JSON object, no other text should be present\n- It contains the following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",resultingParameterName:"modelRequirements",format:"JSON",dependentParameterNames:["availableModelNames","personaDescription"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Persona\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-persona.book`\n- INPUT PARAMETER `{availableModelNames}` List of available model names separated by comma (,)\n- INPUT PARAMETER `{personaDescription}` Description of the persona\n- OUTPUT PARAMETER `{modelRequirements}` Specific requirements for the model\n\n## Make modelRequirements\n\n- FORMAT JSON\n\n```markdown\nYou are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Example\n\n\\`\\`\\`json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n\\`\\`\\`\n\n## Instructions\n\n- Your output format is JSON object\n- Write just the JSON object, no other text should be present\n- It contains the following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}\n```\n\n`-> {modelRequirements}`\n"}],sourceFile:"./books/prepare-persona.book"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-title.book",formfactorName:"GENERIC",parameters:[{name:"book",description:"The book to prepare the title for",isInput:true,isOutput:false},{name:"title",description:"Best title for the book",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-title",title:"Make title",content:"Make best title for given text which describes the workflow:\n\n## Rules\n\n- Write just title, nothing else\n- Title should be concise and clear - Write maximum ideally 2 words, maximum 5 words\n- Title starts with emoticon\n- Title should not mention the input and output of the workflow but the main purpose of the workflow\n _For example, not \"✍ Convert Knowledge-piece to title\" but \"✍ Title\"_\n\n## The workflow\n\n> {book}",resultingParameterName:"title",expectations:{words:{min:1,max:8},lines:{min:1,max:1}},dependentParameterNames:["book"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-title.book`\n- INPUT PARAMETER `{book}` The book to prepare the title for\n- OUTPUT PARAMETER `{title}` Best title for the book\n\n## Make title\n\n- EXPECT MIN 1 Word\n- EXPECT MAX 8 Words\n- EXPECT EXACTLY 1 Line\n\n```markdown\nMake best title for given text which describes the workflow:\n\n## Rules\n\n- Write just title, nothing else\n- Title should be concise and clear - Write maximum ideally 2 words, maximum 5 words\n- Title starts with emoticon\n- Title should not mention the input and output of the workflow but the main purpose of the workflow\n _For example, not \"✍ Convert Knowledge-piece to title\" but \"✍ Title\"_\n\n## The workflow\n\n> {book}\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-title.book"}];
1896
+ var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book",formfactorName:"GENERIC",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",resultingParameterName:"knowledgePieces",dependentParameterNames:["knowledgeContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge from Markdown\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book`\n- INPUT PARAMETER `{knowledgeContent}` Markdown document content\n- OUTPUT PARAMETER `{knowledgePieces}` The knowledge JSON object\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}\n```\n\n`-> {knowledgePieces}`\n"}],sourceFile:"./books/prepare-knowledge-from-markdown.book"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.book",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",resultingParameterName:"keywords",dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Keywords\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-keywords.book`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{keywords}` Keywords separated by comma\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}\n```\n\n`-> {keywords}`\n"}],sourceFile:"./books/prepare-knowledge-keywords.book"},{title:"Prepare Knowledge-piece Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.book",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",resultingParameterName:"title",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge-piece Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-title.book`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{title}` The title of the document\n\n## Knowledge\n\n- EXPECT MIN 1 WORD\n- EXPECT MAX 8 WORDS\n\n```markdown\nYou are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-knowledge-title.book"},{title:"Prepare Persona",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.book",formfactorName:"GENERIC",parameters:[{name:"availableModels",description:"List of available model names together with their descriptions as JSON",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelsRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-model-requirements",title:"Make modelRequirements",content:"You are an experienced AI engineer, you need to find the best models for virtual assistants:\n\n## Example\n\n```json\n[\n {\n \"modelName\": \"gpt-4o\",\n \"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n \"temperature\": 0.7\n },\n {\n \"modelName\": \"claude-3-5-sonnet\",\n \"systemMessage\": \"You are a friendly and knowledgeable chatbot.\",\n \"temperature\": 0.5\n }\n]\n```\n\n## Instructions\n\n- Your output format is JSON array\n- Sort best-fitting models first\n- Omit any models that are not suitable\n- Write just the JSON, no other text should be present\n- Array contain items with following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nHere are the available models:\n\n```json\n{availableModels}\n```\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",resultingParameterName:"modelsRequirements",format:"JSON",dependentParameterNames:["availableModels","personaDescription"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Persona\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-persona.book`\n- INPUT PARAMETER `{availableModels}` List of available model names together with their descriptions as JSON\n- INPUT PARAMETER `{personaDescription}` Description of the persona\n- OUTPUT PARAMETER `{modelsRequirements}` Specific requirements for the model\n\n## Make modelRequirements\n\n- FORMAT JSON\n\n```markdown\nYou are an experienced AI engineer, you need to find the best models for virtual assistants:\n\n## Example\n\n\\`\\`\\`json\n[\n {\n \"modelName\": \"gpt-4o\",\n \"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n \"temperature\": 0.7\n },\n {\n \"modelName\": \"claude-3-5-sonnet\",\n \"systemMessage\": \"You are a friendly and knowledgeable chatbot.\",\n \"temperature\": 0.5\n }\n]\n\\`\\`\\`\n\n## Instructions\n\n- Your output format is JSON array\n- Sort best-fitting models first\n- Omit any models that are not suitable\n- Write just the JSON, no other text should be present\n- Array contain items with following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nHere are the available models:\n\n\\`\\`\\`json\n{availableModels}\n\\`\\`\\`\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}\n```\n\n`-> {modelsRequirements}`\n"}],sourceFile:"./books/prepare-persona.book"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-title.book",formfactorName:"GENERIC",parameters:[{name:"book",description:"The book to prepare the title for",isInput:true,isOutput:false},{name:"title",description:"Best title for the book",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-title",title:"Make title",content:"Make best title for given text which describes the workflow:\n\n## Rules\n\n- Write just title, nothing else\n- Title should be concise and clear - Write maximum ideally 2 words, maximum 5 words\n- Title starts with emoticon\n- Title should not mention the input and output of the workflow but the main purpose of the workflow\n _For example, not \"✍ Convert Knowledge-piece to title\" but \"✍ Title\"_\n\n## The workflow\n\n> {book}",resultingParameterName:"title",expectations:{words:{min:1,max:8},lines:{min:1,max:1}},dependentParameterNames:["book"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-title.book`\n- INPUT PARAMETER `{book}` The book to prepare the title for\n- OUTPUT PARAMETER `{title}` Best title for the book\n\n## Make title\n\n- EXPECT MIN 1 Word\n- EXPECT MAX 8 Words\n- EXPECT EXACTLY 1 Line\n\n```markdown\nMake best title for given text which describes the workflow:\n\n## Rules\n\n- Write just title, nothing else\n- Title should be concise and clear - Write maximum ideally 2 words, maximum 5 words\n- Title starts with emoticon\n- Title should not mention the input and output of the workflow but the main purpose of the workflow\n _For example, not \"✍ Convert Knowledge-piece to title\" but \"✍ Title\"_\n\n## The workflow\n\n> {book}\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-title.book"}];
1825
1897
 
1826
1898
  /**
1827
1899
  * This error type indicates that some tools are missing for pipeline execution or preparation
@@ -1849,15 +1921,21 @@ class MissingToolsError extends Error {
1849
1921
  * @public exported from `@promptbook/core`
1850
1922
  */
1851
1923
  function isPipelinePrepared(pipeline) {
1852
- // Note: Ignoring `pipeline.preparations` @@@
1853
- // Note: Ignoring `pipeline.knowledgePieces` @@@
1924
+ // Note: Ignoring `pipeline.preparations`
1925
+ // Note: Ignoring `pipeline.knowledgePieces`
1854
1926
  if (pipeline.title === undefined || pipeline.title === '' || pipeline.title === DEFAULT_BOOK_TITLE) {
1927
+ // TODO: !!! Comment this out
1928
+ console.log('Pipeline is not prepared because title is undefined or empty', pipeline);
1855
1929
  return false;
1856
1930
  }
1857
- if (!pipeline.personas.every((persona) => persona.modelRequirements !== undefined)) {
1931
+ if (!pipeline.personas.every((persona) => persona.modelsRequirements !== undefined)) {
1932
+ // TODO: !!! Comment this out
1933
+ console.log('Pipeline is not prepared because personas are not prepared', pipeline.personas);
1858
1934
  return false;
1859
1935
  }
1860
1936
  if (!pipeline.knowledgeSources.every((knowledgeSource) => knowledgeSource.preparationIds !== undefined)) {
1937
+ // TODO: !!! Comment this out
1938
+ console.log('Pipeline is not prepared because knowledge sources are not prepared', pipeline.knowledgeSources);
1861
1939
  return false;
1862
1940
  }
1863
1941
  /*
@@ -1878,70 +1956,6 @@ function isPipelinePrepared(pipeline) {
1878
1956
  * - [♨] Are tasks prepared
1879
1957
  */
1880
1958
 
1881
- /**
1882
- * Generates random token
1883
- *
1884
- * Note: This function is cryptographically secure (it uses crypto.randomBytes internally)
1885
- *
1886
- * @private internal helper function
1887
- * @returns secure random token
1888
- */
1889
- function $randomToken(randomness) {
1890
- return randomBytes(randomness).toString('hex');
1891
- }
1892
- /**
1893
- * TODO: Maybe use nanoid instead https://github.com/ai/nanoid
1894
- */
1895
-
1896
- /**
1897
- * Recursively converts JSON strings to JSON objects
1898
-
1899
- * @public exported from `@promptbook/utils`
1900
- */
1901
- function jsonStringsToJsons(object) {
1902
- if (object === null) {
1903
- return object;
1904
- }
1905
- if (Array.isArray(object)) {
1906
- return object.map(jsonStringsToJsons);
1907
- }
1908
- if (typeof object !== 'object') {
1909
- return object;
1910
- }
1911
- const newObject = { ...object };
1912
- for (const [key, value] of Object.entries(object)) {
1913
- if (typeof value === 'string' && isValidJsonString(value)) {
1914
- newObject[key] = JSON.parse(value);
1915
- }
1916
- else {
1917
- newObject[key] = jsonStringsToJsons(value);
1918
- }
1919
- }
1920
- return newObject;
1921
- }
1922
- /**
1923
- * TODO: Type the return type correctly
1924
- */
1925
-
1926
- /**
1927
- * This error indicates errors during the execution of the pipeline
1928
- *
1929
- * @public exported from `@promptbook/core`
1930
- */
1931
- class PipelineExecutionError extends Error {
1932
- constructor(message) {
1933
- // Added id parameter
1934
- super(message);
1935
- this.name = 'PipelineExecutionError';
1936
- // TODO: [🐙] DRY - Maybe $randomId
1937
- this.id = `error-${$randomToken(8 /* <- TODO: To global config + Use Base58 to avoid simmilar char conflicts */)}`;
1938
- Object.setPrototypeOf(this, PipelineExecutionError.prototype);
1939
- }
1940
- }
1941
- /**
1942
- * TODO: [🧠][🌂] Add id to all errors
1943
- */
1944
-
1945
1959
  /**
1946
1960
  * This error indicates problems parsing the format value
1947
1961
  *
@@ -2076,6 +2090,40 @@ class NotYetImplementedError extends Error {
2076
2090
  }
2077
2091
  }
2078
2092
 
2093
+ /**
2094
+ * Generates random token
2095
+ *
2096
+ * Note: This function is cryptographically secure (it uses crypto.randomBytes internally)
2097
+ *
2098
+ * @private internal helper function
2099
+ * @returns secure random token
2100
+ */
2101
+ function $randomToken(randomness) {
2102
+ return randomBytes(randomness).toString('hex');
2103
+ }
2104
+ /**
2105
+ * TODO: Maybe use nanoid instead https://github.com/ai/nanoid
2106
+ */
2107
+
2108
+ /**
2109
+ * This error indicates errors during the execution of the pipeline
2110
+ *
2111
+ * @public exported from `@promptbook/core`
2112
+ */
2113
+ class PipelineExecutionError extends Error {
2114
+ constructor(message) {
2115
+ // Added id parameter
2116
+ super(message);
2117
+ this.name = 'PipelineExecutionError';
2118
+ // TODO: [🐙] DRY - Maybe $randomId
2119
+ this.id = `error-${$randomToken(8 /* <- TODO: To global config + Use Base58 to avoid simmilar char conflicts */)}`;
2120
+ Object.setPrototypeOf(this, PipelineExecutionError.prototype);
2121
+ }
2122
+ }
2123
+ /**
2124
+ * TODO: [🧠][🌂] Add id to all errors
2125
+ */
2126
+
2079
2127
  /**
2080
2128
  * Error thrown when a fetch request fails
2081
2129
  *
@@ -2151,6 +2199,101 @@ const ALL_ERRORS = {
2151
2199
  * Note: [💞] Ignore a discrepancy between file name and entity name
2152
2200
  */
2153
2201
 
2202
+ /**
2203
+ * Serializes an error into a [🚉] JSON-serializable object
2204
+ *
2205
+ * @public exported from `@promptbook/utils`
2206
+ */
2207
+ function serializeError(error) {
2208
+ const { name, message, stack } = error;
2209
+ const { id } = error;
2210
+ if (!Object.keys(ALL_ERRORS).includes(name)) {
2211
+ console.error(spaceTrim((block) => `
2212
+
2213
+ Cannot serialize error with name "${name}"
2214
+
2215
+ Authors of Promptbook probably forgot to add this error into the list of errors:
2216
+ https://github.com/webgptorg/promptbook/blob/main/src/errors/0-index.ts
2217
+
2218
+
2219
+ ${block(stack || message)}
2220
+
2221
+ `));
2222
+ }
2223
+ return {
2224
+ name: name,
2225
+ message,
2226
+ stack,
2227
+ id, // Include id in the serialized object
2228
+ };
2229
+ }
2230
+
2231
+ /**
2232
+ * Converts a JavaScript Object Notation (JSON) string into an object.
2233
+ *
2234
+ * Note: This is wrapper around `JSON.parse()` with better error and type handling
2235
+ *
2236
+ * @public exported from `@promptbook/utils`
2237
+ */
2238
+ function jsonParse(value) {
2239
+ if (value === undefined) {
2240
+ throw new Error(`Can not parse JSON from undefined value.`);
2241
+ }
2242
+ else if (typeof value !== 'string') {
2243
+ console.error('Can not parse JSON from non-string value.', { text: value });
2244
+ throw new Error(spaceTrim(`
2245
+ Can not parse JSON from non-string value.
2246
+
2247
+ The value type: ${typeof value}
2248
+ See more in console.
2249
+ `));
2250
+ }
2251
+ try {
2252
+ return JSON.parse(value);
2253
+ }
2254
+ catch (error) {
2255
+ if (!(error instanceof Error)) {
2256
+ throw error;
2257
+ }
2258
+ throw new Error(spaceTrim((block) => `
2259
+ ${block(error.message)}
2260
+
2261
+ The JSON text:
2262
+ ${block(value)}
2263
+ `));
2264
+ }
2265
+ }
2266
+
2267
+ /**
2268
+ * Recursively converts JSON strings to JSON objects
2269
+
2270
+ * @public exported from `@promptbook/utils`
2271
+ */
2272
+ function jsonStringsToJsons(object) {
2273
+ if (object === null) {
2274
+ return object;
2275
+ }
2276
+ if (Array.isArray(object)) {
2277
+ return object.map(jsonStringsToJsons);
2278
+ }
2279
+ if (typeof object !== 'object') {
2280
+ return object;
2281
+ }
2282
+ const newObject = { ...object };
2283
+ for (const [key, value] of Object.entries(object)) {
2284
+ if (typeof value === 'string' && isValidJsonString(value)) {
2285
+ newObject[key] = jsonParse(value);
2286
+ }
2287
+ else {
2288
+ newObject[key] = jsonStringsToJsons(value);
2289
+ }
2290
+ }
2291
+ return newObject;
2292
+ }
2293
+ /**
2294
+ * TODO: Type the return type correctly
2295
+ */
2296
+
2154
2297
  /**
2155
2298
  * Deserializes the error object
2156
2299
  *
@@ -2316,116 +2459,6 @@ function createTask(options) {
2316
2459
  * TODO: [🐚] Split into more files and make `PrepareTask` & `RemoteTask` + split the function
2317
2460
  */
2318
2461
 
2319
- /**
2320
- * Serializes an error into a [🚉] JSON-serializable object
2321
- *
2322
- * @public exported from `@promptbook/utils`
2323
- */
2324
- function serializeError(error) {
2325
- const { name, message, stack } = error;
2326
- const { id } = error;
2327
- if (!Object.keys(ALL_ERRORS).includes(name)) {
2328
- console.error(spaceTrim((block) => `
2329
-
2330
- Cannot serialize error with name "${name}"
2331
-
2332
- Authors of Promptbook probably forgot to add this error into the list of errors:
2333
- https://github.com/webgptorg/promptbook/blob/main/src/errors/0-index.ts
2334
-
2335
-
2336
- ${block(stack || message)}
2337
-
2338
- `));
2339
- }
2340
- return {
2341
- name: name,
2342
- message,
2343
- stack,
2344
- id, // Include id in the serialized object
2345
- };
2346
- }
2347
-
2348
- /**
2349
- * Format either small or big number
2350
- *
2351
- * @public exported from `@promptbook/utils`
2352
- */
2353
- function numberToString(value) {
2354
- if (value === 0) {
2355
- return '0';
2356
- }
2357
- else if (Number.isNaN(value)) {
2358
- return VALUE_STRINGS.nan;
2359
- }
2360
- else if (value === Infinity) {
2361
- return VALUE_STRINGS.infinity;
2362
- }
2363
- else if (value === -Infinity) {
2364
- return VALUE_STRINGS.negativeInfinity;
2365
- }
2366
- for (let exponent = 0; exponent < 15; exponent++) {
2367
- const factor = 10 ** exponent;
2368
- const valueRounded = Math.round(value * factor) / factor;
2369
- if (Math.abs(value - valueRounded) / value < SMALL_NUMBER) {
2370
- return valueRounded.toFixed(exponent);
2371
- }
2372
- }
2373
- return value.toString();
2374
- }
2375
-
2376
- /**
2377
- * Function `valueToString` will convert the given value to string
2378
- * This is useful and used in the `templateParameters` function
2379
- *
2380
- * Note: This function is not just calling `toString` method
2381
- * It's more complex and can handle this conversion specifically for LLM models
2382
- * See `VALUE_STRINGS`
2383
- *
2384
- * Note: There are 2 similar functions
2385
- * - `valueToString` converts value to string for LLM models as human-readable string
2386
- * - `asSerializable` converts value to string to preserve full information to be able to convert it back
2387
- *
2388
- * @public exported from `@promptbook/utils`
2389
- */
2390
- function valueToString(value) {
2391
- try {
2392
- if (value === '') {
2393
- return VALUE_STRINGS.empty;
2394
- }
2395
- else if (value === null) {
2396
- return VALUE_STRINGS.null;
2397
- }
2398
- else if (value === undefined) {
2399
- return VALUE_STRINGS.undefined;
2400
- }
2401
- else if (typeof value === 'string') {
2402
- return value;
2403
- }
2404
- else if (typeof value === 'number') {
2405
- return numberToString(value);
2406
- }
2407
- else if (value instanceof Date) {
2408
- return value.toISOString();
2409
- }
2410
- else {
2411
- try {
2412
- return JSON.stringify(value);
2413
- }
2414
- catch (error) {
2415
- if (error instanceof TypeError && error.message.includes('circular structure')) {
2416
- return VALUE_STRINGS.circular;
2417
- }
2418
- throw error;
2419
- }
2420
- }
2421
- }
2422
- catch (error) {
2423
- assertsError(error);
2424
- console.error(error);
2425
- return VALUE_STRINGS.unserializable;
2426
- }
2427
- }
2428
-
2429
2462
  /**
2430
2463
  * Represents the uncertain value
2431
2464
  *
@@ -2494,6 +2527,87 @@ const UNCERTAIN_USAGE = $deepFreeze({
2494
2527
  * Note: [💞] Ignore a discrepancy between file name and entity name
2495
2528
  */
2496
2529
 
2530
+ /**
2531
+ * Format either small or big number
2532
+ *
2533
+ * @public exported from `@promptbook/utils`
2534
+ */
2535
+ function numberToString(value) {
2536
+ if (value === 0) {
2537
+ return '0';
2538
+ }
2539
+ else if (Number.isNaN(value)) {
2540
+ return VALUE_STRINGS.nan;
2541
+ }
2542
+ else if (value === Infinity) {
2543
+ return VALUE_STRINGS.infinity;
2544
+ }
2545
+ else if (value === -Infinity) {
2546
+ return VALUE_STRINGS.negativeInfinity;
2547
+ }
2548
+ for (let exponent = 0; exponent < 15; exponent++) {
2549
+ const factor = 10 ** exponent;
2550
+ const valueRounded = Math.round(value * factor) / factor;
2551
+ if (Math.abs(value - valueRounded) / value < SMALL_NUMBER) {
2552
+ return valueRounded.toFixed(exponent);
2553
+ }
2554
+ }
2555
+ return value.toString();
2556
+ }
2557
+
2558
+ /**
2559
+ * Function `valueToString` will convert the given value to string
2560
+ * This is useful and used in the `templateParameters` function
2561
+ *
2562
+ * Note: This function is not just calling `toString` method
2563
+ * It's more complex and can handle this conversion specifically for LLM models
2564
+ * See `VALUE_STRINGS`
2565
+ *
2566
+ * Note: There are 2 similar functions
2567
+ * - `valueToString` converts value to string for LLM models as human-readable string
2568
+ * - `asSerializable` converts value to string to preserve full information to be able to convert it back
2569
+ *
2570
+ * @public exported from `@promptbook/utils`
2571
+ */
2572
+ function valueToString(value) {
2573
+ try {
2574
+ if (value === '') {
2575
+ return VALUE_STRINGS.empty;
2576
+ }
2577
+ else if (value === null) {
2578
+ return VALUE_STRINGS.null;
2579
+ }
2580
+ else if (value === undefined) {
2581
+ return VALUE_STRINGS.undefined;
2582
+ }
2583
+ else if (typeof value === 'string') {
2584
+ return value;
2585
+ }
2586
+ else if (typeof value === 'number') {
2587
+ return numberToString(value);
2588
+ }
2589
+ else if (value instanceof Date) {
2590
+ return value.toISOString();
2591
+ }
2592
+ else {
2593
+ try {
2594
+ return JSON.stringify(value);
2595
+ }
2596
+ catch (error) {
2597
+ if (error instanceof TypeError && error.message.includes('circular structure')) {
2598
+ return VALUE_STRINGS.circular;
2599
+ }
2600
+ throw error;
2601
+ }
2602
+ }
2603
+ }
2604
+ catch (error) {
2605
+ assertsError(error);
2606
+ console.error(error);
2607
+ return VALUE_STRINGS.unserializable;
2608
+ }
2609
+ }
2610
+
2497
2611
  /**
2498
2612
  * Function `addUsage` will add multiple usages into one
2499
2613
  *
@@ -2702,7 +2816,7 @@ function union(...sets) {
2702
2816
  }
2703
2817
 
2704
2818
  /**
2705
- * @@@
2819
+ * Contains configuration options for parsing and generating CSV files, such as delimiters and quoting rules.
2706
2820
  *
2707
2821
  * @public exported from `@promptbook/core`
2708
2822
  */
@@ -2711,11 +2825,29 @@ const MANDATORY_CSV_SETTINGS = Object.freeze({
2711
2825
  // encoding: 'utf-8',
2712
2826
  });
2713
2827
 
2828
+ /**
2829
+ * Converts a CSV string into an object
2830
+ *
2831
+ * Note: This is wrapper around `papaparse.parse()` with better autohealing
2832
+ *
2833
+ * @private - for now until `@promptbook/csv` is released
2834
+ */
2835
+ function csvParse(value /* <- TODO: string_csv */, settings, schema /* <- TODO: Make CSV Schemas */) {
2836
+ settings = { ...settings, ...MANDATORY_CSV_SETTINGS };
2837
+ // Note: Autoheal invalid '\n' characters
2838
+ if (settings.newline && !settings.newline.includes('\r') && value.includes('\r')) {
2839
+ console.warn('CSV string contains carriage return characters, but in the CSV settings the `newline` setting does not include them. Autohealing the CSV string.');
2840
+ value = value.replace(/\r\n/g, '\n').replace(/\r/g, '\n');
2841
+ }
2842
+ const csv = parse(value, settings);
2843
+ return csv;
2844
+ }
2845
+
2714
2846
  /**
2715
2847
  * Function to check if a string is valid CSV
2716
2848
  *
2717
2849
  * @param value The string to check
2718
- * @returns True if the string is a valid CSV string, false otherwise
2850
+ * @returns `true` if the string is a valid CSV string, false otherwise
2719
2851
  *
2720
2852
  * @public exported from `@promptbook/utils`
2721
2853
  */
@@ -2739,7 +2871,7 @@ function isValidCsvString(value) {
2739
2871
  * @public exported from `@promptbook/core`
2740
2872
  * <- TODO: [🏢] Export from package `@promptbook/csv`
2741
2873
  */
2742
- const CsvFormatDefinition = {
2874
+ const CsvFormatParser = {
2743
2875
  formatName: 'CSV',
2744
2876
  aliases: ['SPREADSHEET', 'TABLE'],
2745
2877
  isValid(value, settings, schema) {
@@ -2751,12 +2883,12 @@ const CsvFormatDefinition = {
2751
2883
  heal(value, settings, schema) {
2752
2884
  throw new Error('Not implemented');
2753
2885
  },
2754
- subvalueDefinitions: [
2886
+ subvalueParsers: [
2755
2887
  {
2756
2888
  subvalueName: 'ROW',
2757
- async mapValues(value, outputParameterName, settings, mapCallback) {
2758
- // TODO: [👨🏾‍🤝‍👨🏼] DRY csv parsing
2759
- const csv = parse(value, { ...settings, ...MANDATORY_CSV_SETTINGS });
2889
+ async mapValues(options) {
2890
+ const { value, outputParameterName, settings, mapCallback, onProgress } = options;
2891
+ const csv = csvParse(value, settings);
2760
2892
  if (csv.errors.length !== 0) {
2761
2893
  throw new CsvFormatError(spaceTrim((block) => `
2762
2894
  CSV parsing error
@@ -2771,23 +2903,37 @@ const CsvFormatDefinition = {
2771
2903
  ${block(value)}
2772
2904
  `));
2773
2905
  }
2774
- const mappedData = await Promise.all(csv.data.map(async (row, index) => {
2906
+ const mappedData = [];
2907
+ const length = csv.data.length;
2908
+ for (let index = 0; index < length; index++) {
2909
+ const row = csv.data[index];
2775
2910
  if (row[outputParameterName]) {
2776
2911
  throw new CsvFormatError(`Can not overwrite existing column "${outputParameterName}" in CSV row`);
2777
2912
  }
2778
- return {
2913
+ const mappedRow = {
2779
2914
  ...row,
2780
- [outputParameterName]: await mapCallback(row, index),
2915
+ [outputParameterName]: await mapCallback(row, index, length),
2781
2916
  };
2782
- }));
2917
+ mappedData.push(mappedRow);
2918
+ if (onProgress) {
2919
+ // Note: Report the CSV with all rows mapped so far
2920
+ /*
2921
+ // TODO: [🛕] Report progress with all the rows including the pending ones
2922
+ const progressData = mappedData.map((row, i) =>
2923
+ i > index ? { ...row, [outputParameterName]: PENDING_VALUE_PLACEHOLDER } : row,
2924
+ );
2925
+ */
2926
+ await onProgress(unparse(mappedData, { ...settings, ...MANDATORY_CSV_SETTINGS }));
2927
+ }
2928
+ }
2783
2929
  return unparse(mappedData, { ...settings, ...MANDATORY_CSV_SETTINGS });
2784
2930
  },
2785
2931
  },
2786
2932
  {
2787
2933
  subvalueName: 'CELL',
2788
- async mapValues(value, outputParameterName, settings, mapCallback) {
2789
- // TODO: [👨🏾‍🤝‍👨🏼] DRY csv parsing
2790
- const csv = parse(value, { ...settings, ...MANDATORY_CSV_SETTINGS });
2934
+ async mapValues(options) {
2935
+ const { value, settings, mapCallback, onProgress } = options;
2936
+ const csv = csvParse(value, settings);
2791
2937
  if (csv.errors.length !== 0) {
2792
2938
  throw new CsvFormatError(spaceTrim((block) => `
2793
2939
  CSV parsing error
@@ -2803,9 +2949,9 @@ const CsvFormatDefinition = {
2803
2949
  `));
2804
2950
  }
2805
2951
  const mappedData = await Promise.all(csv.data.map(async (row, rowIndex) => {
2806
- return /* not await */ Promise.all(Object.entries(row).map(async ([key, value], columnIndex) => {
2952
+ return /* not await */ Promise.all(Object.entries(row).map(async ([key, value], columnIndex, array) => {
2807
2953
  const index = rowIndex * Object.keys(row).length + columnIndex;
2808
- return /* not await */ mapCallback({ [key]: value }, index);
2954
+ return /* not await */ mapCallback({ [key]: value }, index, array.length);
2809
2955
  }));
2810
2956
  }));
2811
2957
  return unparse(mappedData, { ...settings, ...MANDATORY_CSV_SETTINGS });
@@ -2814,10 +2960,10 @@ const CsvFormatDefinition = {
2814
2960
  ],
2815
2961
  };
2816
2962
  /**
2817
- * TODO: [🍓] In `CsvFormatDefinition` implement simple `isValid`
2818
- * TODO: [🍓] In `CsvFormatDefinition` implement partial `canBeValid`
2819
- * TODO: [🍓] In `CsvFormatDefinition` implement `heal
2820
- * TODO: [🍓] In `CsvFormatDefinition` implement `subvalueDefinitions`
2963
+ * TODO: [🍓] In `CsvFormatParser` implement simple `isValid`
2964
+ * TODO: [🍓] In `CsvFormatParser` implement partial `canBeValid`
2965
+ * TODO: [🍓] In `CsvFormatParser` implement `heal
2966
+ * TODO: [🍓] In `CsvFormatParser` implement `subvalueParsers`
2821
2967
  * TODO: [🏢] Allow to expect something inside CSV objects and other formats
2822
2968
  */
2823
2969
 
@@ -2826,7 +2972,7 @@ const CsvFormatDefinition = {
2826
2972
  *
2827
2973
  * @private still in development [🏢]
2828
2974
  */
2829
- const JsonFormatDefinition = {
2975
+ const JsonFormatParser = {
2830
2976
  formatName: 'JSON',
2831
2977
  mimeType: 'application/json',
2832
2978
  isValid(value, settings, schema) {
@@ -2838,28 +2984,28 @@ const JsonFormatDefinition = {
2838
2984
  heal(value, settings, schema) {
2839
2985
  throw new Error('Not implemented');
2840
2986
  },
2841
- subvalueDefinitions: [],
2987
+ subvalueParsers: [],
2842
2988
  };
2843
2989
  /**
2844
2990
  * TODO: [🧠] Maybe propper instance of object
2845
2991
  * TODO: [0] Make string_serialized_json
2846
2992
  * TODO: [1] Make type for JSON Settings and Schema
2847
2993
  * TODO: [🧠] What to use for validating JSONs - JSON Schema, ZoD, typescript types/interfaces,...?
2848
- * TODO: [🍓] In `JsonFormatDefinition` implement simple `isValid`
2849
- * TODO: [🍓] In `JsonFormatDefinition` implement partial `canBeValid`
2850
- * TODO: [🍓] In `JsonFormatDefinition` implement `heal
2851
- * TODO: [🍓] In `JsonFormatDefinition` implement `subvalueDefinitions`
2994
+ * TODO: [🍓] In `JsonFormatParser` implement simple `isValid`
2995
+ * TODO: [🍓] In `JsonFormatParser` implement partial `canBeValid`
2996
+ * TODO: [🍓] In `JsonFormatParser` implement `heal
2997
+ * TODO: [🍓] In `JsonFormatParser` implement `subvalueParsers`
2852
2998
  * TODO: [🏢] Allow to expect something inside JSON objects and other formats
2853
2999
  */
2854
3000
 
2855
3001
  /**
2856
3002
  * Definition for any text - this will be always valid
2857
3003
  *
2858
- * Note: This is not useful for validation, but for splitting and mapping with `subvalueDefinitions`
3004
+ * Note: This is not useful for validation, but for splitting and mapping with `subvalueParsers`
2859
3005
  *
2860
3006
  * @public exported from `@promptbook/core`
2861
3007
  */
2862
- const TextFormatDefinition = {
3008
+ const TextFormatParser = {
2863
3009
  formatName: 'TEXT',
2864
3010
  isValid(value) {
2865
3011
  return typeof value === 'string';
@@ -2868,19 +3014,20 @@ const TextFormatDefinition = {
2868
3014
  return typeof partialValue === 'string';
2869
3015
  },
2870
3016
  heal() {
2871
- throw new UnexpectedError('It does not make sense to call `TextFormatDefinition.heal`');
3017
+ throw new UnexpectedError('It does not make sense to call `TextFormatParser.heal`');
2872
3018
  },
2873
- subvalueDefinitions: [
3019
+ subvalueParsers: [
2874
3020
  {
2875
3021
  subvalueName: 'LINE',
2876
- async mapValues(value, outputParameterName, settings, mapCallback) {
3022
+ async mapValues(options) {
3023
+ const { value, mapCallback, onProgress } = options;
2877
3024
  const lines = value.split('\n');
2878
- const mappedLines = await Promise.all(lines.map((lineContent, lineNumber) =>
3025
+ const mappedLines = await Promise.all(lines.map((lineContent, lineNumber, array) =>
2879
3026
  // TODO: [🧠] Maybe option to skip empty line
2880
3027
  /* not await */ mapCallback({
2881
3028
  lineContent,
2882
3029
  // TODO: [🧠] Maybe also put here `lineNumber`
2883
- }, lineNumber)));
3030
+ }, lineNumber, array.length)));
2884
3031
  return mappedLines.join('\n');
2885
3032
  },
2886
3033
  },
@@ -2890,10 +3037,10 @@ const TextFormatDefinition = {
2890
3037
  /**
2891
3038
  * TODO: [1] Make type for XML Text and Schema
2892
3039
  * TODO: [🧠][🤠] Here should be all words, characters, lines, paragraphs, pages available as subvalues
2893
- * TODO: [🍓] In `TextFormatDefinition` implement simple `isValid`
2894
- * TODO: [🍓] In `TextFormatDefinition` implement partial `canBeValid`
2895
- * TODO: [🍓] In `TextFormatDefinition` implement `heal
2896
- * TODO: [🍓] In `TextFormatDefinition` implement `subvalueDefinitions`
3040
+ * TODO: [🍓] In `TextFormatParser` implement simple `isValid`
3041
+ * TODO: [🍓] In `TextFormatParser` implement partial `canBeValid`
3042
+ * TODO: [🍓] In `TextFormatParser` implement `heal
3043
+ * TODO: [🍓] In `TextFormatParser` implement `subvalueParsers`
2897
3044
  * TODO: [🏢] Allow to expect something inside each item of list and other formats
2898
3045
  */
2899
3046
 
@@ -2901,7 +3048,7 @@ const TextFormatDefinition = {
2901
3048
  * Function to check if a string is valid XML
2902
3049
  *
2903
3050
  * @param value
2904
- * @returns True if the string is a valid XML string, false otherwise
3051
+ * @returns `true` if the string is a valid XML string, false otherwise
2905
3052
  *
2906
3053
  * @public exported from `@promptbook/utils`
2907
3054
  */
@@ -2926,7 +3073,7 @@ function isValidXmlString(value) {
2926
3073
  *
2927
3074
  * @private still in development [🏢]
2928
3075
  */
2929
- const XmlFormatDefinition = {
3076
+ const XmlFormatParser = {
2930
3077
  formatName: 'XML',
2931
3078
  mimeType: 'application/xml',
2932
3079
  isValid(value, settings, schema) {
@@ -2938,17 +3085,17 @@ const XmlFormatDefinition = {
2938
3085
  heal(value, settings, schema) {
2939
3086
  throw new Error('Not implemented');
2940
3087
  },
2941
- subvalueDefinitions: [],
3088
+ subvalueParsers: [],
2942
3089
  };
2943
3090
  /**
2944
3091
  * TODO: [🧠] Maybe propper instance of object
2945
3092
  * TODO: [0] Make string_serialized_xml
2946
3093
  * TODO: [1] Make type for XML Settings and Schema
2947
3094
  * TODO: [🧠] What to use for validating XMLs - XSD,...
2948
- * TODO: [🍓] In `XmlFormatDefinition` implement simple `isValid`
2949
- * TODO: [🍓] In `XmlFormatDefinition` implement partial `canBeValid`
2950
- * TODO: [🍓] In `XmlFormatDefinition` implement `heal
2951
- * TODO: [🍓] In `XmlFormatDefinition` implement `subvalueDefinitions`
3095
+ * TODO: [🍓] In `XmlFormatParser` implement simple `isValid`
3096
+ * TODO: [🍓] In `XmlFormatParser` implement partial `canBeValid`
3097
+ * TODO: [🍓] In `XmlFormatParser` implement `heal
3098
+ * TODO: [🍓] In `XmlFormatParser` implement `subvalueParsers`
2952
3099
  * TODO: [🏢] Allow to expect something inside XML and other formats
2953
3100
  */
2954
3101
 
@@ -2957,24 +3104,19 @@ const XmlFormatDefinition = {
2957
3104
  *
2958
3105
  * @private internal index of `...` <- TODO [🏢]
2959
3106
  */
2960
- const FORMAT_DEFINITIONS = [
2961
- JsonFormatDefinition,
2962
- XmlFormatDefinition,
2963
- TextFormatDefinition,
2964
- CsvFormatDefinition,
2965
- ];
3107
+ const FORMAT_DEFINITIONS = [JsonFormatParser, XmlFormatParser, TextFormatParser, CsvFormatParser];
2966
3108
  /**
2967
3109
  * Note: [💞] Ignore a discrepancy between file name and entity name
2968
3110
  */
2969
3111
 
2970
3112
  /**
2971
- * Maps available parameters to expected parameters
3113
+ * Maps available parameters to expected parameters for a pipeline task.
2972
3114
  *
2973
3115
  * The strategy is:
2974
- * 1) @@@
2975
- * 2) @@@
3116
+ * 1) First, match parameters by name where both available and expected.
3117
+ * 2) Then, if there are unmatched expected and available parameters, map them by order.
2976
3118
  *
2977
- * @throws {PipelineExecutionError} @@@
3119
+ * @throws {PipelineExecutionError} If the number of unmatched expected and available parameters does not match, or mapping is ambiguous.
2978
3120
  * @private within the repository used in `createPipelineExecutor`
2979
3121
  */
2980
3122
  function mapAvailableToExpectedParameters(options) {
@@ -2997,7 +3139,7 @@ function mapAvailableToExpectedParameters(options) {
2997
3139
  else if (!availableParametersNames.has(parameterName) && expectedParameterNames.has(parameterName)) ;
2998
3140
  }
2999
3141
  if (expectedParameterNames.size === 0) {
3000
- // Note: [👨‍👨‍👧] Now we can freeze `mappedParameters` to prevent @@@
3142
+ // Note: [👨‍👨‍👧] Now we can freeze `mappedParameters` to prevent accidental modifications after mapping
3001
3143
  Object.freeze(mappedParameters);
3002
3144
  return mappedParameters;
3003
3145
  }
@@ -3028,7 +3170,7 @@ function mapAvailableToExpectedParameters(options) {
3028
3170
  for (let i = 0; i < expectedParameterNames.size; i++) {
3029
3171
  mappedParameters[expectedParameterNamesArray[i]] = availableParameters[availableParametersNamesArray[i]];
3030
3172
  }
3031
- // Note: [👨‍👨‍👧] Now we can freeze `mappedParameters` to prevent @@@
3173
+ // Note: [👨‍👨‍👧] Now we can freeze `mappedParameters` to prevent accidental modifications after mapping
3032
3174
  Object.freeze(mappedParameters);
3033
3175
  return mappedParameters;
3034
3176
  }
@@ -3324,7 +3466,7 @@ function extractJsonBlock(markdown) {
3324
3466
  }
3325
3467
  /**
3326
3468
  * TODO: Add some auto-healing logic + extract YAML, JSON5, TOML, etc.
3327
- * TODO: [🏢] Make this logic part of `JsonFormatDefinition` or `isValidJsonString`
3469
+ * TODO: [🏢] Make this logic part of `JsonFormatParser` or `isValidJsonString`
3328
3470
  */
3329
3471
 
3330
3472
  /**
@@ -3403,10 +3545,12 @@ function templateParameters(template, parameters) {
3403
3545
  throw new PipelineExecutionError('Parameter is already opened or not closed');
3404
3546
  }
3405
3547
  if (parameters[parameterName] === undefined) {
3548
+ console.log('!!! templateParameters 1', { parameterName, template, parameters });
3406
3549
  throw new PipelineExecutionError(`Parameter \`{${parameterName}}\` is not defined`);
3407
3550
  }
3408
3551
  let parameterValue = parameters[parameterName];
3409
3552
  if (parameterValue === undefined) {
3553
+ console.log('!!! templateParameters 2', { parameterName, template, parameters });
3410
3554
  throw new PipelineExecutionError(`Parameter \`{${parameterName}}\` is not defined`);
3411
3555
  }
3412
3556
  parameterValue = valueToString(parameterValue);
@@ -3777,10 +3921,10 @@ for (let i = 0; i < defaultDiacriticsRemovalMap.length; i++) {
3777
3921
  */
3778
3922
 
3779
3923
  /**
3780
- * @@@
3924
+ * Removes diacritic marks (accents) from characters in a string.
3781
3925
  *
3782
- * @param input @@@
3783
- * @returns @@@
3926
+ * @param input The string containing diacritics to be normalized.
3927
+ * @returns The string with diacritics removed or normalized.
3784
3928
  * @public exported from `@promptbook/utils`
3785
3929
  */
3786
3930
  function removeDiacritics(input) {
@@ -3823,7 +3967,7 @@ const CountUtils = {
3823
3967
  PAGES: countPages,
3824
3968
  };
3825
3969
  /**
3826
- * TODO: [🧠][🤠] This should be probbably as part of `TextFormatDefinition`
3970
+ * TODO: [🧠][🤠] This should be probbably as part of `TextFormatParser`
3827
3971
  * Note: [💞] Ignore a discrepancy between file name and entity name
3828
3972
  */
3829
3973
 
@@ -3873,13 +4017,17 @@ function isPassingExpectations(expectations, value) {
3873
4017
  }
3874
4018
  /**
3875
4019
  * TODO: [💝] Unite object for expecting amount and format
3876
- * TODO: [🧠][🤠] This should be part of `TextFormatDefinition`
4020
+ * TODO: [🧠][🤠] This should be part of `TextFormatParser`
3877
4021
  * Note: [💝] and [🤠] are interconnected together
3878
4022
  */
3879
4023
 
3880
4024
  /**
3881
- * @@@
4025
+ * Executes a pipeline task with multiple attempts, including joker and retry logic. Handles different task types
4026
+ * (prompt, script, dialog, etc.), applies postprocessing, checks expectations, and updates the execution report.
4027
+ * Throws errors if execution fails after all attempts.
3882
4028
  *
4029
+ * @param options - The options for execution, including task, parameters, pipeline, and configuration.
4030
+ * @returns The result string of the executed task.
3883
4031
  * @private internal utility of `createPipelineExecutor`
3884
4032
  */
3885
4033
  async function executeAttempts(options) {
@@ -4101,7 +4249,7 @@ async function executeAttempts(options) {
4101
4249
  if (task.format) {
4102
4250
  if (task.format === 'JSON') {
4103
4251
  if (!isValidJsonString($ongoingTaskResult.$resultString || '')) {
4104
- // TODO: [🏢] Do more universally via `FormatDefinition`
4252
+ // TODO: [🏢] Do more universally via `FormatParser`
4105
4253
  try {
4106
4254
  $ongoingTaskResult.$resultString = extractJsonBlock($ongoingTaskResult.$resultString || '');
4107
4255
  }
@@ -4203,12 +4351,16 @@ async function executeAttempts(options) {
4203
4351
  */
4204
4352
 
4205
4353
  /**
4206
- * @@@
4354
+ * Executes a pipeline task that requires mapping or iterating over subvalues of a parameter (such as rows in a CSV).
4355
+ * Handles format and subformat resolution, error handling, and progress reporting.
4356
+ *
4357
+ * @param options - Options for execution, including task details and progress callback.
4358
+ * @returns The result of the subvalue mapping or execution attempts.
4207
4359
  *
4208
4360
  * @private internal utility of `createPipelineExecutor`
4209
4361
  */
4210
4362
  async function executeFormatSubvalues(options) {
4211
- const { task, jokerParameterNames, parameters, priority, csvSettings, pipelineIdentification } = options;
4363
+ const { task, jokerParameterNames, parameters, priority, csvSettings, onProgress, pipelineIdentification } = options;
4212
4364
  if (task.foreach === undefined) {
4213
4365
  return /* not await */ executeAttempts(options);
4214
4366
  }
@@ -4239,16 +4391,16 @@ async function executeFormatSubvalues(options) {
4239
4391
  ${block(pipelineIdentification)}
4240
4392
  `));
4241
4393
  }
4242
- const subvalueDefinition = formatDefinition.subvalueDefinitions.find((subvalueDefinition) => [subvalueDefinition.subvalueName, ...(subvalueDefinition.aliases || [])].includes(task.foreach.subformatName));
4243
- if (subvalueDefinition === undefined) {
4394
+ const subvalueParser = formatDefinition.subvalueParsers.find((subvalueParser) => [subvalueParser.subvalueName, ...(subvalueParser.aliases || [])].includes(task.foreach.subformatName));
4395
+ if (subvalueParser === undefined) {
4244
4396
  throw new UnexpectedError(
4245
4397
  // <- TODO: [🧠][🧐] Should be formats fixed per promptbook version or behave as plugins (=> change UnexpectedError)
4246
4398
  spaceTrim((block) => `
4247
4399
  Unsupported subformat name "${task.foreach.subformatName}" for format "${task.foreach.formatName}"
4248
4400
 
4249
4401
  Available subformat names for format "${formatDefinition.formatName}":
4250
- ${block(formatDefinition.subvalueDefinitions
4251
- .map((subvalueDefinition) => subvalueDefinition.subvalueName)
4402
+ ${block(formatDefinition.subvalueParsers
4403
+ .map((subvalueParser) => subvalueParser.subvalueName)
4252
4404
  .map((subvalueName) => `- ${subvalueName}`)
4253
4405
  .join('\n'))}
4254
4406
 
@@ -4262,53 +4414,83 @@ async function executeFormatSubvalues(options) {
4262
4414
  formatSettings = csvSettings;
4263
4415
  // <- TODO: [🤹‍♂️] More universal, make simmilar pattern for other formats for example \n vs \r\n in text
4264
4416
  }
4265
- const resultString = await subvalueDefinition.mapValues(parameterValue, task.foreach.outputSubparameterName, formatSettings, async (subparameters, index) => {
4266
- let mappedParameters;
4267
- // TODO: [🤹‍♂️][🪂] Limit to N concurrent executions
4268
- // TODO: When done [🐚] Report progress also for each subvalue here
4269
- try {
4270
- mappedParameters = mapAvailableToExpectedParameters({
4271
- expectedParameters: Object.fromEntries(task.foreach.inputSubparameterNames.map((subparameterName) => [subparameterName, null])),
4272
- availableParameters: subparameters,
4273
- });
4274
- }
4275
- catch (error) {
4276
- if (!(error instanceof PipelineExecutionError)) {
4277
- throw error;
4417
+ const resultString = await subvalueParser.mapValues({
4418
+ value: parameterValue,
4419
+ outputParameterName: task.foreach.outputSubparameterName,
4420
+ settings: formatSettings,
4421
+ onProgress(partialResultString) {
4422
+ return onProgress(Object.freeze({
4423
+ [task.resultingParameterName]: partialResultString,
4424
+ }));
4425
+ },
4426
+ async mapCallback(subparameters, index, length) {
4427
+ let mappedParameters;
4428
+ try {
4429
+ mappedParameters = mapAvailableToExpectedParameters({
4430
+ expectedParameters: Object.fromEntries(task.foreach.inputSubparameterNames.map((subparameterName) => [subparameterName, null])),
4431
+ availableParameters: subparameters,
4432
+ });
4278
4433
  }
4279
- throw new PipelineExecutionError(spaceTrim((block) => `
4280
- ${error.message}
4434
+ catch (error) {
4435
+ if (!(error instanceof PipelineExecutionError)) {
4436
+ throw error;
4437
+ }
4438
+ const highLevelError = new PipelineExecutionError(spaceTrim((block) => `
4439
+ ${error.message}
4281
4440
 
4282
- This is error in FOREACH command
4283
- You have probbably passed wrong data to pipeline or wrong data was generated which are processed by FOREACH command
4441
+ This is error in FOREACH command when mapping ${formatDefinition.formatName} ${subvalueParser.subvalueName} data (${index + 1}/${length})
4442
+ You have probbably passed wrong data to pipeline or wrong data was generated which are processed by FOREACH command
4284
4443
 
4285
- ${block(pipelineIdentification)}
4286
- Subparameter index: ${index}
4287
- `));
4288
- }
4289
- const allSubparameters = {
4290
- ...parameters,
4291
- ...mappedParameters,
4292
- };
4293
- // Note: [👨‍👨‍👧] Now we can freeze `subparameters` because we are sure that all and only used parameters are defined and are not going to be changed
4294
- Object.freeze(allSubparameters);
4295
- const subresultString = await executeAttempts({
4296
- ...options,
4297
- priority: priority + index,
4298
- parameters: allSubparameters,
4299
- pipelineIdentification: spaceTrim((block) => `
4300
- ${block(pipelineIdentification)}
4301
- Subparameter index: ${index}
4302
- `),
4303
- });
4304
- return subresultString;
4444
+ ${block(pipelineIdentification)}
4445
+ `));
4446
+ if (length > BIG_DATASET_TRESHOLD) {
4447
+ console.error(highLevelError);
4448
+ return FAILED_VALUE_PLACEHOLDER;
4449
+ }
4450
+ throw highLevelError;
4451
+ }
4452
+ const allSubparameters = {
4453
+ ...parameters,
4454
+ ...mappedParameters,
4455
+ };
4456
+ Object.freeze(allSubparameters);
4457
+ try {
4458
+ const subresultString = await executeAttempts({
4459
+ ...options,
4460
+ priority: priority + index,
4461
+ parameters: allSubparameters,
4462
+ pipelineIdentification: spaceTrim((block) => `
4463
+ ${block(pipelineIdentification)}
4464
+ Subparameter index: ${index}
4465
+ `),
4466
+ });
4467
+ return subresultString;
4468
+ }
4469
+ catch (error) {
4470
+ if (length > BIG_DATASET_TRESHOLD) {
4471
+ console.error(spaceTrim((block) => `
4472
+ ${error.message}
4473
+
4474
+ This is error in FOREACH command when processing ${formatDefinition.formatName} ${subvalueParser.subvalueName} data (${index + 1}/${length})
4475
+
4476
+ ${block(pipelineIdentification)}
4477
+ `));
4478
+ return FAILED_VALUE_PLACEHOLDER;
4479
+ }
4480
+ throw error;
4481
+ }
4482
+ },
4305
4483
  });
4306
4484
  return resultString;
4307
4485
  }
4308
4486
 
4309
4487
  /**
4310
- * @@@
4488
+ * Returns the context for a given task, typically used to provide additional information or variables
4489
+ * required for the execution of the task within a pipeline. The context is returned as a string value
4490
+ * that may include markdown formatting.
4311
4491
  *
4492
+ * @param task - The task for which the context is being generated. This should be a deeply immutable TaskJson object.
4493
+ * @returns The context as a string, formatted as markdown and parameter value.
4312
4494
  * @private internal utility of `createPipelineExecutor`
4313
4495
  */
4314
4496
  async function getContextForTask(task) {
@@ -4316,7 +4498,7 @@ async function getContextForTask(task) {
4316
4498
  }
4317
4499
 
4318
4500
  /**
4319
- * @@@
4501
+ * Retrieves example values or templates for a given task, used to guide or validate pipeline execution.
4320
4502
  *
4321
4503
  * @private internal utility of `createPipelineExecutor`
4322
4504
  */
@@ -4325,25 +4507,127 @@ async function getExamplesForTask(task) {
4325
4507
  }
4326
4508
 
4327
4509
  /**
4328
- * @@@
4510
+ * Computes the cosine similarity between two embedding vectors
4511
+ *
4512
+ * Note: This is helping function for RAG (retrieval-augmented generation)
4513
+ *
4514
+ * @param embeddingVector1
4515
+ * @param embeddingVector2
4516
+ * @returns Cosine similarity between the two vectors
4517
+ *
4518
+ * @public exported from `@promptbook/core`
4519
+ */
4520
+ function computeCosineSimilarity(embeddingVector1, embeddingVector2) {
4521
+ if (embeddingVector1.length !== embeddingVector2.length) {
4522
+ throw new TypeError('Embedding vectors must have the same length');
4523
+ }
4524
+ const dotProduct = embeddingVector1.reduce((sum, value, index) => sum + value * embeddingVector2[index], 0);
4525
+ const magnitude1 = Math.sqrt(embeddingVector1.reduce((sum, value) => sum + value * value, 0));
4526
+ const magnitude2 = Math.sqrt(embeddingVector2.reduce((sum, value) => sum + value * value, 0));
4527
+ return 1 - dotProduct / (magnitude1 * magnitude2);
4528
+ }
4529
+
4530
+ /**
4531
+ *
4532
+ * @param knowledgePieces
4533
+ * @returns
4534
+ *
4535
+ * @private internal utility of `createPipelineExecutor`
4536
+ */
4537
+ function knowledgePiecesToString(knowledgePieces) {
4538
+ return knowledgePieces
4539
+ .map((knowledgePiece) => {
4540
+ const { content } = knowledgePiece;
4541
+ return `- ${content}`;
4542
+ })
4543
+ .join('\n');
4544
+ // <- TODO: [🧠] Some smarter aggregation of knowledge pieces, single-line vs multi-line vs mixed
4545
+ }
4546
+
4547
+ /**
4548
+ * Retrieves the most relevant knowledge pieces for a given task using embedding-based similarity search.
4549
+ * This is where retrieval-augmented generation (RAG) is performed to enhance the task with external knowledge.
4329
4550
  *
4330
4551
  * @private internal utility of `createPipelineExecutor`
4331
4552
  */
4332
4553
  async function getKnowledgeForTask(options) {
4333
- const { preparedPipeline, task } = options;
4334
- return preparedPipeline.knowledgePieces.map(({ content }) => `- ${content}`).join('\n');
4335
- // <- TODO: [🧠] Some smart aggregation of knowledge pieces, single-line vs multi-line vs mixed
4554
+ const { tools, preparedPipeline, task, parameters } = options;
4555
+ const firstKnowlegePiece = preparedPipeline.knowledgePieces[0];
4556
+ const firstKnowlegeIndex = firstKnowlegePiece === null || firstKnowlegePiece === void 0 ? void 0 : firstKnowlegePiece.index[0];
4557
+ // <- TODO: Do not use just first knowledge piece and first index to determine embedding model, use also keyword search
4558
+ if (firstKnowlegePiece === undefined || firstKnowlegeIndex === undefined) {
4559
+ return ''; // <- Note: Np knowledge present, return empty string
4560
+ }
4561
+ try {
4562
+ // TODO: [🚐] Make arrayable LLMs -> single LLM DRY
4563
+ const _llms = arrayableToArray(tools.llm);
4564
+ const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
4565
+ const taskEmbeddingPrompt = {
4566
+ title: 'Knowledge Search',
4567
+ modelRequirements: {
4568
+ modelVariant: 'EMBEDDING',
4569
+ modelName: firstKnowlegeIndex.modelName,
4570
+ },
4571
+ content: task.content,
4572
+ parameters,
4573
+ };
4574
+ const taskEmbeddingResult = await llmTools.callEmbeddingModel(taskEmbeddingPrompt);
4575
+ const knowledgePiecesWithRelevance = preparedPipeline.knowledgePieces.map((knowledgePiece) => {
4576
+ const { index } = knowledgePiece;
4577
+ const knowledgePieceIndex = index.find((i) => i.modelName === firstKnowlegeIndex.modelName);
4578
+ // <- TODO: Do not use just first knowledge piece and first index to determine embedding model
4579
+ if (knowledgePieceIndex === undefined) {
4580
+ return {
4581
+ content: knowledgePiece.content,
4582
+ relevance: 0,
4583
+ };
4584
+ }
4585
+ const relevance = computeCosineSimilarity(knowledgePieceIndex.position, taskEmbeddingResult.content);
4586
+ return {
4587
+ content: knowledgePiece.content,
4588
+ relevance,
4589
+ };
4590
+ });
4591
+ const knowledgePiecesSorted = knowledgePiecesWithRelevance.sort((a, b) => a.relevance - b.relevance);
4592
+ const knowledgePiecesLimited = knowledgePiecesSorted.slice(0, 5);
4593
+ console.log('!!! Embedding', {
4594
+ task,
4595
+ taskEmbeddingPrompt,
4596
+ taskEmbeddingResult,
4597
+ firstKnowlegePiece,
4598
+ firstKnowlegeIndex,
4599
+ knowledgePiecesWithRelevance,
4600
+ knowledgePiecesSorted,
4601
+ knowledgePiecesLimited,
4602
+ });
4603
+ return knowledgePiecesToString(knowledgePiecesLimited);
4604
+ }
4605
+ catch (error) {
4606
+ assertsError(error);
4607
+ console.error('Error in `getKnowledgeForTask`', error);
4608
+ // Note: If the LLM fails, just return all knowledge pieces
4609
+ return knowledgePiecesToString(preparedPipeline.knowledgePieces);
4610
+ }
4336
4611
  }
4612
+ /**
4613
+ * TODO: !!!! Verify if this is working
4614
+ * TODO: [♨] Implement Better - use keyword search
4615
+ * TODO: [♨] Examples of values
4616
+ */
4337
4617
 
4338
4618
  /**
4339
- * @@@
4619
+ * Retrieves all reserved parameters for a given pipeline task, including context, knowledge, examples, and metadata.
4620
+ * Ensures all reserved parameters are defined and throws if any are missing.
4621
+ *
4622
+ * @param options - Options including tools, pipeline, task, and context.
4623
+ * @returns An object containing all reserved parameters for the task.
4340
4624
  *
4341
4625
  * @private internal utility of `createPipelineExecutor`
4342
4626
  */
4343
4627
  async function getReservedParametersForTask(options) {
4344
- const { preparedPipeline, task, pipelineIdentification } = options;
4628
+ const { tools, preparedPipeline, task, parameters, pipelineIdentification } = options;
4345
4629
  const context = await getContextForTask(); // <- [🏍]
4346
- const knowledge = await getKnowledgeForTask({ preparedPipeline, task });
4630
+ const knowledge = await getKnowledgeForTask({ tools, preparedPipeline, task, parameters });
4347
4631
  const examples = await getExamplesForTask();
4348
4632
  const currentDate = new Date().toISOString(); // <- TODO: [🧠][💩] Better
4349
4633
  const modelName = RESERVED_PARAMETER_MISSING_VALUE;
@@ -4369,23 +4653,21 @@ async function getReservedParametersForTask(options) {
4369
4653
  }
4370
4654
 
4371
4655
  /**
4372
- * @@@
4656
+ * Executes a single task within a pipeline, handling parameter validation, error checking, and progress reporting.
4657
+ *
4658
+ * @param options - Options for execution, including the task, pipeline, parameters, and callbacks.
4659
+ * @returns The output parameters produced by the task.
4373
4660
  *
4374
4661
  * @private internal utility of `createPipelineExecutor`
4375
4662
  */
4376
4663
  async function executeTask(options) {
4377
4664
  const { currentTask, preparedPipeline, parametersToPass, tools, onProgress, $executionReport, pipelineIdentification, maxExecutionAttempts, maxParallelCount, csvSettings, isVerbose, rootDirname, cacheDirname, intermediateFilesStrategy, isAutoInstalled, isNotPreparedWarningSupressed, } = options;
4378
4665
  const priority = preparedPipeline.tasks.length - preparedPipeline.tasks.indexOf(currentTask);
4379
- await onProgress({
4380
- outputParameters: {
4381
- [currentTask.resultingParameterName]: '', // <- TODO: [🧠] What is the best value here?
4382
- },
4383
- });
4384
4666
  // Note: Check consistency of used and dependent parameters which was also done in `validatePipeline`, but it’s good to doublecheck
4385
4667
  const usedParameterNames = extractParameterNamesFromTask(currentTask);
4386
4668
  const dependentParameterNames = new Set(currentTask.dependentParameterNames);
4387
4669
  // TODO: [👩🏾‍🤝‍👩🏻] Use here `mapAvailableToExpectedParameters`
4388
- if (union(difference(usedParameterNames, dependentParameterNames), difference(dependentParameterNames, usedParameterNames)).size !== 0) {
4670
+ if (difference(union(difference(usedParameterNames, dependentParameterNames), difference(dependentParameterNames, usedParameterNames)), new Set(RESERVED_PARAMETER_NAMES)).size !== 0) {
4389
4671
  throw new UnexpectedError(spaceTrim$1((block) => `
4390
4672
  Dependent parameters are not consistent with used parameters:
4391
4673
 
@@ -4405,9 +4687,11 @@ async function executeTask(options) {
4405
4687
  }
4406
4688
  const definedParameters = Object.freeze({
4407
4689
  ...(await getReservedParametersForTask({
4690
+ tools,
4408
4691
  preparedPipeline,
4409
4692
  task: currentTask,
4410
4693
  pipelineIdentification,
4694
+ parameters: parametersToPass,
4411
4695
  })),
4412
4696
  ...parametersToPass,
4413
4697
  });
@@ -4453,6 +4737,7 @@ async function executeTask(options) {
4453
4737
  preparedPipeline,
4454
4738
  tools,
4455
4739
  $executionReport,
4740
+ onProgress,
4456
4741
  pipelineIdentification,
4457
4742
  maxExecutionAttempts,
4458
4743
  maxParallelCount,
@@ -4480,7 +4765,8 @@ async function executeTask(options) {
4480
4765
  */
4481
4766
 
4482
4767
  /**
4483
- * @@@
4768
+ * Filters and returns only the output parameters from the provided pipeline execution options.
4769
+ * Adds warnings for any expected output parameters that are missing.
4484
4770
  *
4485
4771
  * @private internal utility of `createPipelineExecutor`
4486
4772
  */
@@ -4505,9 +4791,12 @@ function filterJustOutputParameters(options) {
4505
4791
  }
4506
4792
 
4507
4793
  /**
4508
- * @@@
4794
+ * Executes an entire pipeline, resolving tasks in dependency order, handling errors, and reporting progress.
4509
4795
  *
4510
- * Note: This is not a `PipelineExecutor` (which is binded with one exact pipeline), but a utility function of `createPipelineExecutor` which creates `PipelineExecutor`
4796
+ * Note: This is not a `PipelineExecutor` (which is bound to a single pipeline), but a utility function used by `createPipelineExecutor` to create a `PipelineExecutor`.
4797
+ *
4798
+ * @param options - Options for execution, including input parameters, pipeline, and callbacks.
4799
+ * @returns The result of the pipeline execution, including output parameters, errors, and usage statistics.
4511
4800
  *
4512
4801
  * @private internal utility of `createPipelineExecutor`
4513
4802
  */
@@ -4830,6 +5119,22 @@ function createPipelineExecutor(options) {
4830
5119
  cacheDirname,
4831
5120
  intermediateFilesStrategy,
4832
5121
  isAutoInstalled,
5122
+ }).catch((error) => {
5123
+ assertsError(error);
5124
+ return exportJson({
5125
+ name: 'pipelineExecutorResult',
5126
+ message: `Unuccessful PipelineExecutorResult, last catch`,
5127
+ order: [],
5128
+ value: {
5129
+ isSuccessful: false,
5130
+ errors: [serializeError(error)],
5131
+ warnings: [],
5132
+ usage: UNCERTAIN_USAGE,
5133
+ executionReport: null,
5134
+ outputParameters: {},
5135
+ preparedPipeline,
5136
+ },
5137
+ });
4833
5138
  });
4834
5139
  };
4835
5140
  const pipelineExecutor = (inputParameters) => createTask({
@@ -4961,27 +5266,48 @@ async function preparePersona(personaDescription, tools, options) {
4961
5266
  pipeline: await collection.getPipelineByUrl('https://promptbook.studio/promptbook/prepare-persona.book'),
4962
5267
  tools,
4963
5268
  });
4964
- // TODO: [🚐] Make arrayable LLMs -> single LLM DRY
4965
5269
  const _llms = arrayableToArray(tools.llm);
4966
5270
  const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
4967
- const availableModels = await llmTools.listModels();
4968
- const availableModelNames = availableModels
5271
+ const availableModels = (await llmTools.listModels())
4969
5272
  .filter(({ modelVariant }) => modelVariant === 'CHAT')
4970
- .map(({ modelName }) => modelName)
4971
- .join(',');
4972
- const result = await preparePersonaExecutor({ availableModelNames, personaDescription }).asPromise();
5273
+ .map(({ modelName, modelDescription }) => ({
5274
+ modelName,
5275
+ modelDescription,
5276
+ // <- Note: `modelTitle` and `modelVariant` is not relevant for this task
5277
+ }));
5278
+ const result = await preparePersonaExecutor({
5279
+ availableModels /* <- Note: Passing as JSON */,
5280
+ personaDescription,
5281
+ }).asPromise();
4973
5282
  const { outputParameters } = result;
4974
- const { modelRequirements: modelRequirementsRaw } = outputParameters;
4975
- const modelRequirements = JSON.parse(modelRequirementsRaw);
5283
+ const { modelsRequirements: modelsRequirementsJson } = outputParameters;
5284
+ let modelsRequirementsUnchecked = jsonParse(modelsRequirementsJson);
4976
5285
  if (isVerbose) {
4977
- console.info(`PERSONA ${personaDescription}`, modelRequirements);
5286
+ console.info(`PERSONA ${personaDescription}`, modelsRequirementsUnchecked);
4978
5287
  }
4979
- const { modelName, systemMessage, temperature } = modelRequirements;
4980
- return {
5288
+ if (!Array.isArray(modelsRequirementsUnchecked)) {
5289
+ // <- TODO: Book should have syntax and system to enforce shape of JSON
5290
+ modelsRequirementsUnchecked = [modelsRequirementsUnchecked];
5291
+ /*
5292
+ throw new UnexpectedError(
5293
+ spaceTrim(
5294
+ (block) => `
5295
+ Invalid \`modelsRequirements\`:
5296
+
5297
+ \`\`\`json
5298
+ ${block(JSON.stringify(modelsRequirementsUnchecked, null, 4))}
5299
+ \`\`\`
5300
+ `,
5301
+ ),
5302
+ );
5303
+ */
5304
+ }
5305
+ const modelsRequirements = modelsRequirementsUnchecked.map((modelRequirements) => ({
4981
5306
  modelVariant: 'CHAT',
4982
- modelName,
4983
- systemMessage,
4984
- temperature,
5307
+ ...modelRequirements,
5308
+ }));
5309
+ return {
5310
+ modelsRequirements,
4985
5311
  };
4986
5312
  }
4987
5313
  /**
@@ -4992,7 +5318,8 @@ async function preparePersona(personaDescription, tools, options) {
4992
5318
  */
4993
5319
 
4994
5320
  /**
4995
- * @@@
5321
+ * Safely retrieves the global scope object (window in browser, global in Node.js)
5322
+ * regardless of the JavaScript environment in which the code is running
4996
5323
  *
4997
5324
  * Note: `$` is used to indicate that this function is not a pure function - it access global scope
4998
5325
  *
@@ -5003,10 +5330,10 @@ function $getGlobalScope() {
5003
5330
  }
5004
5331
 
5005
5332
  /**
5006
- * @@@
5333
+ * Normalizes a text string to SCREAMING_CASE (all uppercase with underscores).
5007
5334
  *
5008
- * @param text @@@
5009
- * @returns @@@
5335
+ * @param text The text string to be converted to SCREAMING_CASE format.
5336
+ * @returns The normalized text in SCREAMING_CASE format.
5010
5337
  * @example 'HELLO_WORLD'
5011
5338
  * @example 'I_LOVE_PROMPTBOOK'
5012
5339
  * @public exported from `@promptbook/utils`
@@ -5058,10 +5385,10 @@ function normalizeTo_SCREAMING_CASE(text) {
5058
5385
  */
5059
5386
 
5060
5387
  /**
5061
- * @@@
5388
+ * Normalizes a text string to snake_case format.
5062
5389
  *
5063
- * @param text @@@
5064
- * @returns @@@
5390
+ * @param text The text string to be converted to snake_case format.
5391
+ * @returns The normalized text in snake_case format.
5065
5392
  * @example 'hello_world'
5066
5393
  * @example 'i_love_promptbook'
5067
5394
  * @public exported from `@promptbook/utils`
@@ -5071,11 +5398,11 @@ function normalizeTo_snake_case(text) {
5071
5398
  }
5072
5399
 
5073
5400
  /**
5074
- * Register is @@@
5401
+ * Global registry for storing and managing registered entities of a given type.
5075
5402
  *
5076
5403
  * Note: `$` is used to indicate that this function is not a pure function - it accesses and adds variables in global scope.
5077
5404
  *
5078
- * @private internal utility, exported are only signleton instances of this class
5405
+ * @private internal utility, exported are only singleton instances of this class
5079
5406
  */
5080
5407
  class $Register {
5081
5408
  constructor(registerName) {
@@ -5125,10 +5452,10 @@ class $Register {
5125
5452
  }
5126
5453
 
5127
5454
  /**
5128
- * @@@
5455
+ * Global registry for storing metadata about all available scrapers and converters.
5129
5456
  *
5130
- * Note: `$` is used to indicate that this interacts with the global scope
5131
- * @singleton Only one instance of each register is created per build, but thare can be more @@@
5457
+ * Note: `$` is used to indicate that this interacts with the global scope.
5458
+ * @singleton Only one instance of each register is created per build, but there can be more in different contexts (e.g., tests).
5132
5459
  * @public exported from `@promptbook/core`
5133
5460
  */
5134
5461
  const $scrapersMetadataRegister = new $Register('scrapers_metadata');
@@ -5137,10 +5464,11 @@ const $scrapersMetadataRegister = new $Register('scrapers_metadata');
5137
5464
  */
5138
5465
 
5139
5466
  /**
5140
- * @@@
5467
+ * Registry for all available scrapers in the system.
5468
+ * Central point for registering and accessing different types of content scrapers.
5141
5469
  *
5142
5470
  * Note: `$` is used to indicate that this interacts with the global scope
5143
- * @singleton Only one instance of each register is created per build, but thare can be more @@@
5471
+ * @singleton Only one instance of each register is created per build, but there can be more than one in different build modules
5144
5472
  * @public exported from `@promptbook/core`
5145
5473
  */
5146
5474
  const $scrapersRegister = new $Register('scraper_constructors');
@@ -5236,10 +5564,10 @@ function $registeredScrapersMessage(availableScrapers) {
5236
5564
  */
5237
5565
 
5238
5566
  /**
5239
- * @@@
5567
+ * Converts a given text to kebab-case format.
5240
5568
  *
5241
- * @param text @@@
5242
- * @returns @@@
5569
+ * @param text The text to be converted.
5570
+ * @returns The kebab-case formatted string.
5243
5571
  * @example 'hello-world'
5244
5572
  * @example 'i-love-promptbook'
5245
5573
  * @public exported from `@promptbook/utils`
@@ -5308,7 +5636,8 @@ function knowledgeSourceContentToName(knowledgeSourceContent) {
5308
5636
  */
5309
5637
 
5310
5638
  /**
5311
- * @@@
5639
+ * Converts a name to a properly formatted subfolder path for cache storage.
5640
+ * Handles normalization and path formatting to create consistent cache directory structures.
5312
5641
  *
5313
5642
  * @private for `FileCacheStorage`
5314
5643
  */
@@ -5388,11 +5717,11 @@ function removeEmojis(text) {
5388
5717
  }
5389
5718
 
5390
5719
  /**
5391
- * @@@
5720
+ * Converts a title string into a normalized name.
5392
5721
  *
5393
- * @param value @@@
5394
- * @returns @@@
5395
- * @example @@@
5722
+ * @param value The title string to be converted to a name.
5723
+ * @returns A normalized name derived from the input title.
5724
+ * @example 'Hello World!' -> 'hello-world'
5396
5725
  * @public exported from `@promptbook/utils`
5397
5726
  */
5398
5727
  function titleToName(value) {
@@ -5443,7 +5772,9 @@ const promptbookFetch = async (urlOrRequest, init) => {
5443
5772
  */
5444
5773
 
5445
5774
  /**
5446
- * @@@
5775
+ * Factory function that creates a handler for processing knowledge sources.
5776
+ * Provides standardized processing of different types of knowledge sources
5777
+ * across various scraper implementations.
5447
5778
  *
5448
5779
  * @public exported from `@promptbook/core`
5449
5780
  */
@@ -5550,7 +5881,7 @@ async function makeKnowledgeSourceHandler(knowledgeSource, tools, options) {
5550
5881
  > },
5551
5882
  */
5552
5883
  async asJson() {
5553
- return JSON.parse(await tools.fs.readFile(filename, 'utf-8'));
5884
+ return jsonParse(await tools.fs.readFile(filename, 'utf-8'));
5554
5885
  },
5555
5886
  async asText() {
5556
5887
  return await tools.fs.readFile(filename, 'utf-8');
@@ -5684,9 +6015,12 @@ TODO: [🧊] This is how it can look in future
5684
6015
  */
5685
6016
 
5686
6017
  /**
5687
- * @@@
6018
+ * Prepares tasks by adding knowledge to the prompt and ensuring all necessary parameters are included.
5688
6019
  *
5689
- * @public exported from `@promptbook/core`
6020
+ * @param tasks Sequence of tasks that are chained together to form a pipeline
6021
+ * @returns A promise that resolves to the prepared tasks.
6022
+ *
6023
+ * @private internal utility of `preparePipeline`
5690
6024
  */
5691
6025
  async function prepareTasks(pipeline, tools, options) {
5692
6026
  const { maxParallelCount = DEFAULT_MAX_PARALLEL_COUNT } = options;
@@ -5808,14 +6142,14 @@ async function preparePipeline(pipeline, tools, options) {
5808
6142
  // TODO: [🖌][🧠] Implement some `mapAsync` function
5809
6143
  const preparedPersonas = new Array(personas.length);
5810
6144
  await forEachAsync(personas, { maxParallelCount /* <- TODO: [🪂] When there are subtasks, this maximul limit can be broken */ }, async (persona, index) => {
5811
- const modelRequirements = await preparePersona(persona.description, { ...tools, llm: llmToolsWithUsage }, {
6145
+ const { modelsRequirements } = await preparePersona(persona.description, { ...tools, llm: llmToolsWithUsage }, {
5812
6146
  rootDirname,
5813
6147
  maxParallelCount /* <- TODO: [🪂] */,
5814
6148
  isVerbose,
5815
6149
  });
5816
6150
  const preparedPersona = {
5817
6151
  ...persona,
5818
- modelRequirements,
6152
+ modelsRequirements,
5819
6153
  preparationIds: [/* TODO: [🧊] -> */ currentPreparation.id],
5820
6154
  // <- TODO: [🍙] Make some standard order of json properties
5821
6155
  };
@@ -6204,7 +6538,7 @@ const sectionCommandParser = {
6204
6538
  /**
6205
6539
  * Parses the boilerplate command
6206
6540
  *
6207
- * Note: @@@ This command is used as boilerplate for new commands - it should NOT be used in any `.book` file
6541
+ * Note: @@ This command is used as boilerplate for new commands - it should NOT be used in any `.book` file
6208
6542
  *
6209
6543
  * @see `documentationUrl` for more details
6210
6544
  * @private within the commands folder
@@ -6592,11 +6926,11 @@ const expectCommandParser = {
6592
6926
  };
6593
6927
 
6594
6928
  /**
6595
- * @@@
6929
+ * Normalizes a given text to camelCase format.
6596
6930
  *
6597
- * @param text @@@
6598
- * @param _isFirstLetterCapital @@@
6599
- * @returns @@@
6931
+ * @param text The text to be normalized.
6932
+ * @param _isFirstLetterCapital Whether the first letter should be capitalized.
6933
+ * @returns The camelCase formatted string.
6600
6934
  * @example 'helloWorld'
6601
6935
  * @example 'iLovePromptbook'
6602
6936
  * @public exported from `@promptbook/utils`
@@ -6667,11 +7001,12 @@ function removeQuotes(text) {
6667
7001
  }
6668
7002
 
6669
7003
  /**
6670
- * Function `validateParameterName` will @@@
7004
+ * Function `validateParameterName` will normalize and validate a parameter name for use in pipelines.
7005
+ * It removes diacritics, emojis, and quotes, normalizes to camelCase, and checks for reserved names and invalid characters.
6671
7006
  *
6672
- * @param parameterName @@@
6673
- * @returns @@@
6674
- * @throws {ParseError} @@@
7007
+ * @param parameterName The parameter name to validate and normalize.
7008
+ * @returns The validated and normalized parameter name.
7009
+ * @throws {ParseError} If the parameter name is empty, reserved, or contains invalid characters.
6675
7010
  * @private within the repository
6676
7011
  */
6677
7012
  function validateParameterName(parameterName) {
@@ -6741,8 +7076,6 @@ function validateParameterName(parameterName) {
6741
7076
  /**
6742
7077
  * Parses the foreach command
6743
7078
  *
6744
- * Note: @@@ This command is used as foreach for new commands - it should NOT be used in any `.book` file
6745
- *
6746
7079
  * @see `documentationUrl` for more details
6747
7080
  * @public exported from `@promptbook/editable`
6748
7081
  */
@@ -6799,14 +7132,14 @@ const foreachCommandParser = {
6799
7132
  `));
6800
7133
  // <- TODO: [🏢] List all supported format names
6801
7134
  }
6802
- const subvalueDefinition = formatDefinition.subvalueDefinitions.find((subvalueDefinition) => [subvalueDefinition.subvalueName, ...(subvalueDefinition.aliases || [])].includes(subformatName));
6803
- if (subvalueDefinition === undefined) {
7135
+ const subvalueParser = formatDefinition.subvalueParsers.find((subvalueParser) => [subvalueParser.subvalueName, ...(subvalueParser.aliases || [])].includes(subformatName));
7136
+ if (subvalueParser === undefined) {
6804
7137
  throw new ParseError(spaceTrim((block) => `
6805
7138
  Unsupported subformat name "${subformatName}" for format "${formatName}"
6806
7139
 
6807
7140
  Available subformat names for format "${formatDefinition.formatName}":
6808
- ${block(formatDefinition.subvalueDefinitions
6809
- .map((subvalueDefinition) => subvalueDefinition.subvalueName)
7141
+ ${block(formatDefinition.subvalueParsers
7142
+ .map((subvalueParser) => subvalueParser.subvalueName)
6810
7143
  .map((subvalueName) => `- ${subvalueName}`)
6811
7144
  .join('\n'))}
6812
7145
  `));
@@ -6983,14 +7316,14 @@ const formatCommandParser = {
6983
7316
  };
6984
7317
 
6985
7318
  /**
6986
- * @@@
7319
+ * Chatbot form factor definition for conversational interfaces that interact with users in a chat-like manner.
6987
7320
  *
6988
7321
  * @public exported from `@promptbook/core`
6989
7322
  */
6990
7323
  const ChatbotFormfactorDefinition = {
6991
7324
  name: 'CHATBOT',
6992
7325
  aliasNames: ['CHAT'],
6993
- description: `@@@`,
7326
+ description: `A chatbot form factor for conversational user interfaces.`,
6994
7327
  documentationUrl: `https://github.com/webgptorg/promptbook/discussions/174`,
6995
7328
  pipelineInterface: {
6996
7329
  inputParameters: [
@@ -7017,7 +7350,45 @@ const ChatbotFormfactorDefinition = {
7017
7350
  };
7018
7351
 
7019
7352
  /**
7020
- * Generator is form of app that @@@
7353
+ * Completion is formfactor that emulates completion models
7354
+ *
7355
+ * @public exported from `@promptbook/core`
7356
+ */
7357
+ const CompletionFormfactorDefinition = {
7358
+ name: 'COMPLETION',
7359
+ description: `Completion is formfactor that emulates completion models`,
7360
+ documentationUrl: `https://github.com/webgptorg/promptbook/discussions/@@`,
7361
+ // <- TODO: https://github.com/webgptorg/promptbook/discussions/new?category=concepts
7362
+ // "🔠 Completion Formfactor"
7363
+ pipelineInterface: {
7364
+ inputParameters: [
7365
+ {
7366
+ name: 'inputText',
7367
+ description: `Input text to be completed`,
7368
+ isInput: true,
7369
+ isOutput: false,
7370
+ },
7371
+ {
7372
+ name: 'instructions',
7373
+ description: `Additional instructions for the model, for example the required length, empty by default`,
7374
+ isInput: true,
7375
+ isOutput: false,
7376
+ },
7377
+ ],
7378
+ outputParameters: [
7379
+ {
7380
+ name: 'followingText',
7381
+ description: `Text that follows the input text`,
7382
+ isInput: false,
7383
+ isOutput: true,
7384
+ },
7385
+ ],
7386
+ },
7387
+ };
7388
+
7389
+ /**
7390
+ * Generator form factor represents an application that generates content or data based on user input or predefined rules.
7391
+ * This form factor is used for apps that produce outputs, such as text, images, or other media, based on provided input.
7021
7392
  *
7022
7393
  * @public exported from `@promptbook/core`
7023
7394
  */
@@ -7046,7 +7417,7 @@ const GeneratorFormfactorDefinition = {
7046
7417
  };
7047
7418
 
7048
7419
  /**
7049
- * @@@
7420
+ * Pipeline interface which is equivalent to `any`
7050
7421
  *
7051
7422
  * @see https://github.com/webgptorg/promptbook/discussions/171
7052
7423
  *
@@ -7061,13 +7432,13 @@ const GENERIC_PIPELINE_INTERFACE = {
7061
7432
  */
7062
7433
 
7063
7434
  /**
7064
- * @@@
7435
+ * A generic pipeline
7065
7436
  *
7066
7437
  * @public exported from `@promptbook/core`
7067
7438
  */
7068
7439
  const GenericFormfactorDefinition = {
7069
7440
  name: 'GENERIC',
7070
- description: `@@@`,
7441
+ description: `A generic pipeline`,
7071
7442
  documentationUrl: `https://github.com/webgptorg/promptbook/discussions/173`,
7072
7443
  pipelineInterface: GENERIC_PIPELINE_INTERFACE,
7073
7444
  };
@@ -7102,17 +7473,20 @@ const ImageGeneratorFormfactorDefinition = {
7102
7473
  };
7103
7474
 
7104
7475
  /**
7105
- * Matcher is form of app that @@@
7476
+ * Matcher is form of app that evaluates (spreadsheet) content against defined criteria or patterns,
7477
+ * determining if it matches or meets specific requirements. Used for classification,
7478
+ * validation, filtering, and quality assessment of inputs.
7106
7479
  *
7107
7480
  * @public exported from `@promptbook/core`
7108
7481
  */
7109
7482
  const MatcherFormfactorDefinition = {
7110
7483
  name: 'EXPERIMENTAL_MATCHER',
7111
- description: `@@@`,
7484
+ description: `An evaluation system that determines whether content meets specific criteria or patterns.
7485
+ Used for content validation, quality assessment, and intelligent filtering tasks. Currently in experimental phase.`,
7112
7486
  documentationUrl: `https://github.com/webgptorg/promptbook/discussions/177`,
7113
7487
  pipelineInterface: {
7114
7488
  inputParameters: [
7115
- /* @@@ */
7489
+ /* Input parameters for content to be matched and criteria to match against */
7116
7490
  {
7117
7491
  name: 'nonce',
7118
7492
  description: 'Just to prevent EXPERIMENTAL_MATCHER to be set as implicit formfactor',
@@ -7121,20 +7495,21 @@ const MatcherFormfactorDefinition = {
7121
7495
  },
7122
7496
  ],
7123
7497
  outputParameters: [
7124
- /* @@@ */
7498
+ /* Output parameters containing match results, confidence scores, and relevant metadata */
7125
7499
  ],
7126
7500
  },
7127
7501
  };
7128
7502
 
7129
7503
  /**
7130
- * Sheets is form of app that @@@
7504
+ * Sheets is form of app that processes tabular data in CSV format, allowing transformation
7505
+ * and analysis of structured data through AI-powered operations
7131
7506
  *
7132
7507
  * @public exported from `@promptbook/core`
7133
7508
  */
7134
7509
  const SheetsFormfactorDefinition = {
7135
7510
  name: 'SHEETS',
7136
7511
  aliasNames: ['SHEETS', 'SHEET'],
7137
- description: `@@@`,
7512
+ description: `A formfactor for processing spreadsheet-like data in CSV format, enabling AI transformations on tabular data`,
7138
7513
  documentationUrl: `https://github.com/webgptorg/promptbook/discussions/176`,
7139
7514
  pipelineInterface: {
7140
7515
  inputParameters: [
@@ -7157,13 +7532,16 @@ const SheetsFormfactorDefinition = {
7157
7532
  };
7158
7533
 
7159
7534
  /**
7160
- * Translator is form of app that @@@
7535
+ * Translator is form of app that transforms input text from one form to another,
7536
+ * such as language translation, style conversion, tone modification, or other text transformations.
7161
7537
  *
7162
7538
  * @public exported from `@promptbook/core`
7163
7539
  */
7164
7540
  const TranslatorFormfactorDefinition = {
7165
7541
  name: 'TRANSLATOR',
7166
- description: `@@@`,
7542
+ description: `A text transformation system that converts input content into different forms,
7543
+ including language translations, paraphrasing, style conversions, and tone adjustments.
7544
+ This form factor takes one input and produces one transformed output.`,
7167
7545
  documentationUrl: `https://github.com/webgptorg/promptbook/discussions/175`,
7168
7546
  pipelineInterface: {
7169
7547
  inputParameters: [
@@ -7200,6 +7578,8 @@ const FORMFACTOR_DEFINITIONS = [
7200
7578
  MatcherFormfactorDefinition,
7201
7579
  GeneratorFormfactorDefinition,
7202
7580
  ImageGeneratorFormfactorDefinition,
7581
+ CompletionFormfactorDefinition,
7582
+ // <- [🛬] When making new formfactor, copy the _boilerplate and link it here
7203
7583
  ];
7204
7584
  /**
7205
7585
  * Note: [💞] Ignore a discrepancy between file name and entity name
@@ -7208,7 +7588,7 @@ const FORMFACTOR_DEFINITIONS = [
7208
7588
  /**
7209
7589
  * Parses the formfactor command
7210
7590
  *
7211
- * Note: @@@ This command is used as formfactor for new commands - it should NOT be used in any `.book` file
7591
+ * Note: This command is used as a formfactor for new commands and defines the app type format - it should NOT be used in any `.book` file
7212
7592
  *
7213
7593
  * @see `documentationUrl` for more details
7214
7594
  * @public exported from `@promptbook/editable`
@@ -7230,7 +7610,7 @@ const formfactorCommandParser = {
7230
7610
  /**
7231
7611
  * Description of the FORMFACTOR command
7232
7612
  */
7233
- description: `@@`,
7613
+ description: `Specifies the application type and interface requirements that this promptbook should conform to`,
7234
7614
  /**
7235
7615
  * Link to documentation
7236
7616
  */
@@ -7373,8 +7753,7 @@ const jokerCommandParser = {
7373
7753
  };
7374
7754
 
7375
7755
  /**
7376
- * @@@
7377
- *
7756
+ * @see {@link ModelVariant}
7378
7757
  * @public exported from `@promptbook/core`
7379
7758
  */
7380
7759
  const MODEL_VARIANTS = ['COMPLETION', 'CHAT', 'EMBEDDING' /* <- TODO [🏳] */ /* <- [🤖] */];
@@ -7806,10 +8185,10 @@ function $applyToTaskJson(command, $taskJson, $pipelineJson) {
7806
8185
  }
7807
8186
 
7808
8187
  /**
7809
- * @@@
8188
+ * Checks if the given value is a valid JavaScript identifier name.
7810
8189
  *
7811
- * @param javascriptName @@@
7812
- * @returns @@@
8190
+ * @param javascriptName The value to check for JavaScript identifier validity.
8191
+ * @returns `true` if the value is a valid JavaScript name, false otherwise.
7813
8192
  * @public exported from `@promptbook/utils`
7814
8193
  */
7815
8194
  function isValidJavascriptName(javascriptName) {
@@ -8289,7 +8668,10 @@ function parseCommand(raw, usagePlace) {
8289
8668
  `));
8290
8669
  }
8291
8670
  /**
8292
- * @@@
8671
+ * Generates a markdown-formatted message listing all supported commands
8672
+ * with their descriptions and documentation links
8673
+ *
8674
+ * @returns A formatted markdown string containing all available commands and their details
8293
8675
  */
8294
8676
  function getSupportedCommandsMessage() {
8295
8677
  return COMMANDS.flatMap(({ name, aliasNames, description, documentationUrl }) =>
@@ -8300,7 +8682,10 @@ function getSupportedCommandsMessage() {
8300
8682
  ]).join('\n');
8301
8683
  }
8302
8684
  /**
8303
- * @@@
8685
+ * Attempts to parse a command variant using the provided input parameters
8686
+ *
8687
+ * @param input Object containing command parsing information including raw command text and normalized values
8688
+ * @returns A parsed Command object if successful, or null if the command cannot be parsed
8304
8689
  */
8305
8690
  function parseCommandVariant(input) {
8306
8691
  const { commandNameRaw, usagePlace, normalized, args, raw, rawArgs } = input;
@@ -8347,7 +8732,7 @@ function parseCommandVariant(input) {
8347
8732
  }
8348
8733
 
8349
8734
  /**
8350
- * @@@
8735
+ * Extracts the interface (input and output parameters) from a pipeline.
8351
8736
  *
8352
8737
  * @deprecated https://github.com/webgptorg/promptbook/pull/186
8353
8738
  * @see https://github.com/webgptorg/promptbook/discussions/171
@@ -8380,7 +8765,7 @@ function getPipelineInterface(pipeline) {
8380
8765
  }
8381
8766
 
8382
8767
  /**
8383
- * @@@
8768
+ * Checks if two pipeline interfaces are structurally identical.
8384
8769
  *
8385
8770
  * @deprecated https://github.com/webgptorg/promptbook/pull/186
8386
8771
  * @see https://github.com/webgptorg/promptbook/discussions/171
@@ -8412,10 +8797,11 @@ function isPipelineInterfacesEqual(pipelineInterface1, pipelineInterface2) {
8412
8797
  }
8413
8798
 
8414
8799
  /**
8415
- * @@@
8800
+ * Checks if a given pipeline satisfies the requirements of a specified pipeline interface.
8416
8801
  *
8417
8802
  * @deprecated https://github.com/webgptorg/promptbook/pull/186
8418
8803
  * @see https://github.com/webgptorg/promptbook/discussions/171
8804
+ * @returns `true` if the pipeline implements the interface, `false` otherwise.
8419
8805
  *
8420
8806
  * @public exported from `@promptbook/core`
8421
8807
  */
@@ -8601,7 +8987,8 @@ function removeMarkdownComments(content) {
8601
8987
  }
8602
8988
 
8603
8989
  /**
8604
- * @@@
8990
+ * Utility to determine if a pipeline string is in flat format.
8991
+ * A flat pipeline is a simple text without proper structure (headers, blocks, etc).
8605
8992
  *
8606
8993
  * @public exported from `@promptbook/editable`
8607
8994
  */
@@ -8622,7 +9009,10 @@ function isFlatPipeline(pipelineString) {
8622
9009
  }
8623
9010
 
8624
9011
  /**
8625
- * @@@
9012
+ * Converts a pipeline structure to its string representation.
9013
+ *
9014
+ * Transforms a flat, simple pipeline into a properly formatted pipeline string
9015
+ * with sections for title, prompt, and return statement.
8626
9016
  *
8627
9017
  * @public exported from `@promptbook/editable`
8628
9018
  */
@@ -8679,7 +9069,7 @@ function deflatePipeline(pipelineString) {
8679
9069
  * Note: It can not work with html syntax and comments
8680
9070
  *
8681
9071
  * @param markdown any valid markdown
8682
- * @returns @@@
9072
+ * @returns An array of strings, each representing an individual list item found in the markdown
8683
9073
  * @public exported from `@promptbook/markdown-utils`
8684
9074
  */
8685
9075
  function extractAllListItemsFromMarkdown(markdown) {
@@ -9384,45 +9774,43 @@ function addAutoGeneratedSection(content, options) {
9384
9774
  */
9385
9775
  function renderPromptbookMermaid(pipelineJson, options) {
9386
9776
  const { linkTask = () => null } = options || {};
9777
+ const MERMAID_PREFIX = 'pipeline_';
9778
+ const MERMAID_KNOWLEDGE_NAME = MERMAID_PREFIX + 'knowledge';
9779
+ const MERMAID_RESERVED_NAME = MERMAID_PREFIX + 'reserved';
9780
+ const MERMAID_INPUT_NAME = MERMAID_PREFIX + 'input';
9781
+ const MERMAID_OUTPUT_NAME = MERMAID_PREFIX + 'output';
9387
9782
  const parameterNameToTaskName = (parameterName) => {
9783
+ if (parameterName === 'knowledge') {
9784
+ return MERMAID_KNOWLEDGE_NAME;
9785
+ }
9786
+ else if (RESERVED_PARAMETER_NAMES.includes(parameterName)) {
9787
+ return MERMAID_RESERVED_NAME;
9788
+ }
9388
9789
  const parameter = pipelineJson.parameters.find((parameter) => parameter.name === parameterName);
9389
9790
  if (!parameter) {
9390
9791
  throw new UnexpectedError(`Could not find {${parameterName}}`);
9391
- // <- TODO: !!6 This causes problems when {knowledge} and other reserved parameters are used
9792
+ // <- TODO: This causes problems when {knowledge} and other reserved parameters are used
9392
9793
  }
9393
9794
  if (parameter.isInput) {
9394
- return 'input';
9795
+ return MERMAID_INPUT_NAME;
9395
9796
  }
9396
9797
  const task = pipelineJson.tasks.find((task) => task.resultingParameterName === parameterName);
9397
9798
  if (!task) {
9398
9799
  throw new Error(`Could not find task for {${parameterName}}`);
9399
9800
  }
9400
- return task.name || normalizeTo_camelCase('task-' + titleToName(task.title));
9801
+ return MERMAID_PREFIX + (task.name || normalizeTo_camelCase('task-' + titleToName(task.title)));
9401
9802
  };
9402
- const promptbookMermaid = spaceTrim$1((block) => `
9403
-
9404
- %% 🔮 Tip: Open this on GitHub or in the VSCode website to see the Mermaid graph visually
9405
-
9406
- flowchart LR
9407
- subgraph "${pipelineJson.title}"
9408
-
9409
- direction TB
9410
-
9411
- input((Input)):::input
9412
- ${block(pipelineJson.tasks
9803
+ const inputAndIntermediateParametersMermaid = pipelineJson.tasks
9413
9804
  .flatMap(({ title, dependentParameterNames, resultingParameterName }) => [
9414
9805
  `${parameterNameToTaskName(resultingParameterName)}("${title}")`,
9415
9806
  ...dependentParameterNames.map((dependentParameterName) => `${parameterNameToTaskName(dependentParameterName)}--"{${dependentParameterName}}"-->${parameterNameToTaskName(resultingParameterName)}`),
9416
9807
  ])
9417
- .join('\n'))}
9418
-
9419
- ${block(pipelineJson.parameters
9808
+ .join('\n');
9809
+ const outputParametersMermaid = pipelineJson.parameters
9420
9810
  .filter(({ isOutput }) => isOutput)
9421
- .map(({ name }) => `${parameterNameToTaskName(name)}--"{${name}}"-->output`)
9422
- .join('\n'))}
9423
- output((Output)):::output
9424
-
9425
- ${block(pipelineJson.tasks
9811
+ .map(({ name }) => `${parameterNameToTaskName(name)}--"{${name}}"-->${MERMAID_OUTPUT_NAME}`)
9812
+ .join('\n');
9813
+ const linksMermaid = pipelineJson.tasks
9426
9814
  .map((task) => {
9427
9815
  const link = linkTask(task);
9428
9816
  if (link === null) {
@@ -9433,10 +9821,44 @@ function renderPromptbookMermaid(pipelineJson, options) {
9433
9821
  return `click ${taskName} href "${href}" "${title}";`;
9434
9822
  })
9435
9823
  .filter((line) => line !== '')
9436
- .join('\n'))}
9824
+ .join('\n');
9825
+ const interactionPointsMermaid = Object.entries({
9826
+ [MERMAID_INPUT_NAME]: 'Input',
9827
+ [MERMAID_OUTPUT_NAME]: 'Output',
9828
+ [MERMAID_RESERVED_NAME]: 'Other',
9829
+ [MERMAID_KNOWLEDGE_NAME]: 'Knowledge',
9830
+ })
9831
+ .filter(([MERMAID_NAME]) => (inputAndIntermediateParametersMermaid + outputParametersMermaid).includes(MERMAID_NAME))
9832
+ .map(([MERMAID_NAME, title]) => `${MERMAID_NAME}((${title})):::${MERMAID_NAME}`)
9833
+ .join('\n');
9834
+ const promptbookMermaid = spaceTrim$1((block) => `
9835
+
9836
+ %% 🔮 Tip: Open this on GitHub or in the VSCode website to see the Mermaid graph visually
9837
+
9838
+ flowchart LR
9839
+ subgraph "${pipelineJson.title}"
9840
+
9841
+ %% Basic configuration
9842
+ direction TB
9843
+
9844
+ %% Interaction points from pipeline to outside
9845
+ ${block(interactionPointsMermaid)}
9846
+
9847
+ %% Input and intermediate parameters
9848
+ ${block(inputAndIntermediateParametersMermaid)}
9849
+
9850
+
9851
+ %% Output parameters
9852
+ ${block(outputParametersMermaid)}
9437
9853
 
9438
- classDef input color: grey;
9439
- classDef output color: grey;
9854
+ %% Links
9855
+ ${block(linksMermaid)}
9856
+
9857
+ %% Styles
9858
+ classDef ${MERMAID_INPUT_NAME} color: grey;
9859
+ classDef ${MERMAID_OUTPUT_NAME} color: grey;
9860
+ classDef ${MERMAID_RESERVED_NAME} color: grey;
9861
+ classDef ${MERMAID_KNOWLEDGE_NAME} color: grey;
9440
9862
 
9441
9863
  end;
9442
9864
 
@@ -9513,7 +9935,7 @@ class CallbackInterfaceTools {
9513
9935
  }
9514
9936
 
9515
9937
  /**
9516
- * This error indicates @@@
9938
+ * This error indicates @@
9517
9939
  *
9518
9940
  * @public exported from `@promptbook/core`
9519
9941
  */
@@ -9525,7 +9947,7 @@ class BoilerplateError extends Error {
9525
9947
  }
9526
9948
  }
9527
9949
  /**
9528
- * TODO: @@@ Do not forget to add the error into `0-index.ts` ERRORS
9950
+ * TODO: @@ Do not forget to add the error into `0-index.ts` ERRORS
9529
9951
  */
9530
9952
 
9531
9953
  /**
@@ -9884,22 +10306,23 @@ function usageToHuman(usage) {
9884
10306
  */
9885
10307
 
9886
10308
  /**
9887
- * Boilerplate is form of app that @@@
10309
+ * Boilerplate is form of app that serves as a template structure for creating other formfactors
10310
+ * and should not be used directly in production.
9888
10311
  *
9889
10312
  * @public exported from `@promptbook/core`
9890
10313
  */
9891
10314
  const BoilerplateFormfactorDefinition = {
9892
10315
  name: 'BOILERPLATE',
9893
- description: `@@@`,
10316
+ description: `A template structure for creating new formfactors, providing the base architecture and interfaces that should be implemented.`,
9894
10317
  documentationUrl: `https://github.com/webgptorg/promptbook/discussions/@@`,
9895
10318
  // <- TODO: https://github.com/webgptorg/promptbook/discussions/new?category=concepts
9896
10319
  // "🔠 Boilerplate Formfactor"
9897
10320
  pipelineInterface: {
9898
10321
  inputParameters: [
9899
- /* @@@ */
10322
+ /* <- Example input parameters should be defined here */
9900
10323
  ],
9901
10324
  outputParameters: [
9902
- /* @@@ */
10325
+ /* <- Example output parameters should be defined here */
9903
10326
  ],
9904
10327
  },
9905
10328
  };
@@ -9980,10 +10403,10 @@ function filterModels(llmTools, modelFilter) {
9980
10403
  */
9981
10404
 
9982
10405
  /**
9983
- * @@@
10406
+ * Register for LLM tools metadata.
9984
10407
  *
9985
10408
  * Note: `$` is used to indicate that this interacts with the global scope
9986
- * @singleton Only one instance of each register is created per build, but thare can be more @@@
10409
+ * @singleton Only one instance of each register is created per build, but there can be more instances across different builds or environments.
9987
10410
  * @public exported from `@promptbook/core`
9988
10411
  */
9989
10412
  const $llmToolsMetadataRegister = new $Register('llm_tools_metadata');
@@ -9992,10 +10415,10 @@ const $llmToolsMetadataRegister = new $Register('llm_tools_metadata');
9992
10415
  */
9993
10416
 
9994
10417
  /**
9995
- * @@@
10418
+ * Register for LLM tools.
9996
10419
  *
9997
10420
  * Note: `$` is used to indicate that this interacts with the global scope
9998
- * @singleton Only one instance of each register is created per build, but thare can be more @@@
10421
+ * @singleton Only one instance of each register is created per build, but there can be more instances across different builds or environments.
9999
10422
  * @public exported from `@promptbook/core`
10000
10423
  */
10001
10424
  const $llmToolsRegister = new $Register('llm_execution_tools_constructors');
@@ -10146,11 +10569,16 @@ function $registeredLlmToolsMessage() {
10146
10569
  */
10147
10570
 
10148
10571
  /**
10149
- * @@@
10572
+ * Creates LLM execution tools from provided configuration objects
10573
+ *
10574
+ * Instantiates and configures LLM tool instances for each configuration entry,
10575
+ * combining them into a unified interface via MultipleLlmExecutionTools.
10150
10576
  *
10151
10577
  * Note: This function is not cached, every call creates new instance of `MultipleLlmExecutionTools`
10152
10578
  *
10153
- * @returns @@@
10579
+ * @param configuration Array of LLM tool configurations to instantiate
10580
+ * @param options Additional options for configuring the LLM tools
10581
+ * @returns A unified interface combining all successfully instantiated LLM tools
10154
10582
  * @public exported from `@promptbook/core`
10155
10583
  */
10156
10584
  function createLlmToolsFromConfiguration(configuration, options = {}) {
@@ -10189,7 +10617,11 @@ function createLlmToolsFromConfiguration(configuration, options = {}) {
10189
10617
  /**
10190
10618
  * TODO: [🎌] Together with `createLlmToolsFromConfiguration` + 'EXECUTION_TOOLS_CLASSES' gets to `@promptbook/core` ALL model providers, make this more efficient
10191
10619
  * TODO: [🧠][🎌] Dynamically install required providers
10192
- * TODO: @@@ write discussion about this - wizzard
10620
+ * TODO: We should implement an interactive configuration wizard that would:
10621
+ * 1. Detect which LLM providers are available in the environment
10622
+ * 2. Guide users through required configuration settings for each provider
10623
+ * 3. Allow testing connections before completing setup
10624
+ * 4. Generate appropriate configuration code for application integration
10193
10625
  * TODO: [🧠][🍛] Which name is better `createLlmToolsFromConfig` or `createLlmToolsFromConfiguration`?
10194
10626
  * TODO: [🧠] Is there some meaningfull way how to test this util
10195
10627
  * TODO: This should be maybe not under `_common` but under `utils`
@@ -10285,13 +10717,36 @@ function cacheLlmTools(llmTools, options = {}) {
10285
10717
  const callCommonModel = async (prompt) => {
10286
10718
  const { parameters, content, modelRequirements } = prompt;
10287
10719
  // <- Note: These are relevant things from the prompt that the cache key should depend on.
10720
+ // TODO: Maybe some standalone function for normalization of content for cache
10721
+ let normalizedContent = content;
10722
+ normalizedContent = normalizedContent.replace(/\s+/g, ' ');
10723
+ normalizedContent = normalizedContent.split('\r\n').join('\n');
10724
+ normalizedContent = spaceTrim(normalizedContent);
10725
+ // Note: Do not need to save everything in the cache, just the relevant parameters
10726
+ const relevantParameterNames = extractParameterNames(content);
10727
+ const relevantParameters = Object.fromEntries(Object.entries(parameters).filter(([key]) => relevantParameterNames.has(key)));
10728
+ const keyHashBase = { relevantParameters, normalizedContent, modelRequirements };
10288
10729
  const key = titleToName(prompt.title.substring(0, MAX_FILENAME_LENGTH - 10) +
10289
10730
  '-' +
10290
- sha256(hexEncoder.parse(JSON.stringify({ parameters, content, modelRequirements }))).toString( /* hex */));
10731
+ sha256(hexEncoder.parse(JSON.stringify(keyHashBase)))
10732
+ .toString( /* hex */)
10733
+ .substring(0, 10 - 1));
10291
10734
  const cacheItem = !isCacheReloaded ? await storage.getItem(key) : null;
10292
10735
  if (cacheItem) {
10736
+ console.log('!!! Cache hit for key:', { key, keyHashBase });
10293
10737
  return cacheItem.promptResult;
10294
10738
  }
10739
+ console.log('!!! Cache miss for key:', key, {
10740
+ prompt,
10741
+ 'prompt.title': prompt.title,
10742
+ MAX_FILENAME_LENGTH,
10743
+ keyHashBase,
10744
+ parameters,
10745
+ relevantParameters,
10746
+ content,
10747
+ normalizedContent,
10748
+ modelRequirements,
10749
+ });
10295
10750
  let promptResult;
10296
10751
  variant: switch (prompt.modelRequirements.modelVariant) {
10297
10752
  case 'CHAT':
@@ -10312,7 +10767,16 @@ function cacheLlmTools(llmTools, options = {}) {
10312
10767
  await storage.setItem(key, {
10313
10768
  date: $getCurrentDate(),
10314
10769
  promptbookVersion: PROMPTBOOK_ENGINE_VERSION,
10315
- prompt,
10770
+ bookVersion: BOOK_LANGUAGE_VERSION,
10771
+ prompt: {
10772
+ ...prompt,
10773
+ parameters: Object.entries(parameters).length === Object.entries(relevantParameters).length
10774
+ ? parameters
10775
+ : {
10776
+ ...relevantParameters,
10777
+ note: `<- Note: Only relevant parameters are stored in the cache`,
10778
+ },
10779
+ },
10316
10780
  promptResult,
10317
10781
  });
10318
10782
  return promptResult;
@@ -10338,13 +10802,13 @@ function cacheLlmTools(llmTools, options = {}) {
10338
10802
  /**
10339
10803
  * TODO: [🧠][💸] Maybe make some common abstraction `interceptLlmTools` and use here (or use javascript Proxy?)
10340
10804
  * TODO: [🧠] Is there some meaningfull way how to test this util
10341
- * TODO: [👷‍♂️] @@@ Manual about construction of llmTools
10342
- * @@@ write discussion about this and storages
10343
- * @@@ write how to combine multiple interceptors
10805
+ * TODO: [👷‍♂️] Comprehensive manual about construction of llmTools
10806
+ * Detailed explanation about caching strategies and appropriate storage selection for different use cases
10807
+ * Examples of how to combine multiple interceptors for advanced caching, logging, and usage tracking
10344
10808
  */
10345
10809
 
10346
10810
  /**
10347
- * @@@
10811
+ * Wraps LlmExecutionTools to limit the total usage based on provided limits.
10348
10812
  *
10349
10813
  * @public exported from `@promptbook/core`
10350
10814
  */
@@ -10395,9 +10859,11 @@ const _AnthropicClaudeMetadataRegistration = $llmToolsMetadataRegister.register(
10395
10859
  packageName: '@promptbook/anthropic-claude',
10396
10860
  className: 'AnthropicClaudeExecutionTools',
10397
10861
  envVariables: ['ANTHROPIC_CLAUDE_API_KEY'],
10862
+ trustLevel: 'CLOSED',
10863
+ order: MODEL_ORDERS.TOP_TIER,
10398
10864
  getBoilerplateConfiguration() {
10399
10865
  return {
10400
- title: 'Anthropic Claude (boilerplate)',
10866
+ title: 'Anthropic Claude',
10401
10867
  packageName: '@promptbook/anthropic-claude',
10402
10868
  className: 'AnthropicClaudeExecutionTools',
10403
10869
  options: {
@@ -10440,9 +10906,11 @@ const _AzureOpenAiMetadataRegistration = $llmToolsMetadataRegister.register({
10440
10906
  packageName: '@promptbook/azure-openai',
10441
10907
  className: 'AzureOpenAiExecutionTools',
10442
10908
  envVariables: ['AZUREOPENAI_RESOURCE_NAME', 'AZUREOPENAI_DEPLOYMENT_NAME', 'AZUREOPENAI_API_KEY'],
10909
+ trustLevel: 'CLOSED_BUSINESS',
10910
+ order: MODEL_ORDERS.NORMAL,
10443
10911
  getBoilerplateConfiguration() {
10444
10912
  return {
10445
- title: 'Azure Open AI (boilerplate)',
10913
+ title: 'Azure Open AI',
10446
10914
  packageName: '@promptbook/azure-openai',
10447
10915
  className: 'AzureOpenAiExecutionTools',
10448
10916
  options: {
@@ -10526,9 +10994,11 @@ const _DeepseekMetadataRegistration = $llmToolsMetadataRegister.register({
10526
10994
  packageName: '@promptbook/deepseek',
10527
10995
  className: 'DeepseekExecutionTools',
10528
10996
  envVariables: ['DEEPSEEK_GENERATIVE_AI_API_KEY'],
10997
+ trustLevel: 'UNTRUSTED',
10998
+ order: MODEL_ORDERS.NORMAL,
10529
10999
  getBoilerplateConfiguration() {
10530
11000
  return {
10531
- title: 'Deepseek (boilerplate)',
11001
+ title: 'Deepseek',
10532
11002
  packageName: '@promptbook/deepseek',
10533
11003
  className: 'DeepseekExecutionTools',
10534
11004
  options: {
@@ -10575,9 +11045,11 @@ const _GoogleMetadataRegistration = $llmToolsMetadataRegister.register({
10575
11045
  packageName: '@promptbook/google',
10576
11046
  className: 'GoogleExecutionTools',
10577
11047
  envVariables: ['GOOGLE_GENERATIVE_AI_API_KEY'],
11048
+ trustLevel: 'CLOSED',
11049
+ order: MODEL_ORDERS.NORMAL,
10578
11050
  getBoilerplateConfiguration() {
10579
11051
  return {
10580
- title: 'Google Gemini (boilerplate)',
11052
+ title: 'Google Gemini',
10581
11053
  packageName: '@promptbook/google',
10582
11054
  className: 'GoogleExecutionTools',
10583
11055
  options: {
@@ -10624,13 +11096,16 @@ const _OpenAiMetadataRegistration = $llmToolsMetadataRegister.register({
10624
11096
  packageName: '@promptbook/openai',
10625
11097
  className: 'OpenAiExecutionTools',
10626
11098
  envVariables: ['OPENAI_API_KEY'],
11099
+ trustLevel: 'CLOSED',
11100
+ order: MODEL_ORDERS.TOP_TIER,
10627
11101
  getBoilerplateConfiguration() {
10628
11102
  return {
10629
- title: 'Open AI (boilerplate)',
11103
+ title: 'Open AI',
10630
11104
  packageName: '@promptbook/openai',
10631
11105
  className: 'OpenAiExecutionTools',
10632
11106
  options: {
10633
11107
  apiKey: 'sk-',
11108
+ maxRequestsPerMinute: DEFAULT_MAX_REQUESTS_PER_MINUTE,
10634
11109
  },
10635
11110
  };
10636
11111
  },
@@ -10650,9 +11125,9 @@ const _OpenAiMetadataRegistration = $llmToolsMetadataRegister.register({
10650
11125
  },
10651
11126
  });
10652
11127
  /**
10653
- * @@@ registration1 of default configuration for Open AI
11128
+ * Registration of the OpenAI Assistant metadata
10654
11129
  *
10655
- * Note: [🏐] Configurations registrations are done in @@@ BUT constructor @@@
11130
+ * Note: [🏐] Configurations registrations are done in the metadata registration section, but the constructor registration is handled separately.
10656
11131
  *
10657
11132
  * @public exported from `@promptbook/core`
10658
11133
  * @public exported from `@promptbook/wizzard`
@@ -10664,9 +11139,11 @@ const _OpenAiAssistantMetadataRegistration = $llmToolsMetadataRegister.register(
10664
11139
  className: 'OpenAiAssistantExecutionTools',
10665
11140
  envVariables: null,
10666
11141
  // <- TODO: ['OPENAI_API_KEY', 'OPENAI_ASSISTANT_ID']
11142
+ trustLevel: 'CLOSED',
11143
+ order: MODEL_ORDERS.NORMAL,
10667
11144
  getBoilerplateConfiguration() {
10668
11145
  return {
10669
- title: 'Open AI Assistant (boilerplate)',
11146
+ title: 'Open AI Assistant',
10670
11147
  packageName: '@promptbook/openai',
10671
11148
  className: 'OpenAiAssistantExecutionTools',
10672
11149
  options: {
@@ -10698,6 +11175,52 @@ const _OpenAiAssistantMetadataRegistration = $llmToolsMetadataRegister.register(
10698
11175
  * Note: [💞] Ignore a discrepancy between file name and entity name
10699
11176
  */
10700
11177
 
11178
+ /**
11179
+ * Migrates the pipeline to the latest version
11180
+ *
11181
+ * Note: Migration does not do heavy lifting like calling the LLMs, just lightweight changes of the structure
11182
+ *
11183
+ * @public exported from `@promptbook/core`
11184
+ */
11185
+ function migratePipeline(deprecatedPipeline) {
11186
+ /* eslint-disable prefer-const */
11187
+ let { pipelineUrl, sourceFile, title, bookVersion, description, formfactorName, parameters, tasks, knowledgeSources, knowledgePieces, personas, preparations, sources, } = deprecatedPipeline;
11188
+ let isChanged = false;
11189
+ personas = personas.map((persona) => {
11190
+ const migratedPersona = { ...persona }; /* <- TODO: [🌪] */
11191
+ if (migratedPersona.modelRequirements !== undefined) {
11192
+ isChanged = true;
11193
+ migratedPersona.modelsRequirements = [migratedPersona.modelRequirements];
11194
+ delete migratedPersona.modelRequirements;
11195
+ }
11196
+ return migratedPersona;
11197
+ });
11198
+ if (!isChanged) {
11199
+ // Note: If nothing to migrate, return the same pipeline
11200
+ return deprecatedPipeline;
11201
+ }
11202
+ const migratedPipeline = {
11203
+ pipelineUrl,
11204
+ sourceFile,
11205
+ title,
11206
+ bookVersion,
11207
+ description,
11208
+ formfactorName,
11209
+ parameters,
11210
+ tasks,
11211
+ knowledgeSources,
11212
+ knowledgePieces,
11213
+ personas,
11214
+ preparations,
11215
+ sources,
11216
+ // <- TODO: [🍙] Make some standard order of json properties
11217
+ };
11218
+ console.info(`Book automatically migrated`, { deprecatedPipeline, migratedPipeline });
11219
+ // console.info(`Book automatically migrated from ${} -> ${}`, {deprecatedPipeline,migratedPipeline})
11220
+ // <- TODO: Report the versions of the migration, DO not migrate backwards, throw `CompatibilityError` when given newer version than current version of the engine and link the NPM + Docker packages
11221
+ return migratedPipeline;
11222
+ }
11223
+
10701
11224
  /**
10702
11225
  * Function `isValidPipelineString` will validate the if the string is a valid pipeline string
10703
11226
  * It does not check if the string is fully logically correct, but if it is a string that can be a pipeline string or the string looks completely different.
@@ -10783,8 +11306,8 @@ function prompt(strings, ...values) {
10783
11306
  * 2) `promptTemplate` alias for `prompt`
10784
11307
  * 3) `book` for notating and validating entire books exported from `@promptbook/utils`
10785
11308
  *
10786
- * @param strings @@@
10787
- * @param values @@@
11309
+ * @param strings The static string parts of the template literal
11310
+ * @param values The dynamic values embedded within the template literal used as data
10788
11311
  * @returns the pipeline string
10789
11312
  * @public exported from `@promptbook/core`
10790
11313
  */
@@ -10851,14 +11374,14 @@ const boilerplateScraperMetadata = $deepFreeze({
10851
11374
  packageName: '@promptbook/boilerplate',
10852
11375
  className: 'BoilerplateScraper',
10853
11376
  mimeTypes: [
10854
- '@@@/@@@',
10855
- // <- TODO: @@@ Add compatible mime types with Boilerplate scraper
11377
+ '@@/@@',
11378
+ // <- TODO: @@ Add compatible mime types with Boilerplate scraper
10856
11379
  ],
10857
- documentationUrl: 'https://github.com/webgptorg/promptbook/discussions/@@@',
11380
+ documentationUrl: 'https://github.com/webgptorg/promptbook/discussions/@@',
10858
11381
  isAvilableInBrowser: false,
10859
11382
  // <- Note: [🌏] Only `MarkdownScraper` makes sense to be available in the browser, for scraping non-markdown sources in the browser use a remote server
10860
11383
  requiredExecutables: [
10861
- /* @@@ 'Pandoc' */
11384
+ /* @@ 'Pandoc' */
10862
11385
  ],
10863
11386
  }); /* <- Note: [🤛] */
10864
11387
  /**
@@ -11130,5 +11653,5 @@ class PrefixStorage {
11130
11653
  }
11131
11654
  }
11132
11655
 
11133
- export { $llmToolsMetadataRegister, $llmToolsRegister, $scrapersMetadataRegister, $scrapersRegister, ADMIN_EMAIL, ADMIN_GITHUB_NAME, AbstractFormatError, AuthenticationError, BOOK_LANGUAGE_VERSION, BlackholeStorage, BoilerplateError, BoilerplateFormfactorDefinition, CLAIM, CLI_APP_ID, CallbackInterfaceTools, ChatbotFormfactorDefinition, CollectionError, CsvFormatDefinition, CsvFormatError, DEFAULT_BOOKS_DIRNAME, DEFAULT_BOOK_OUTPUT_PARAMETER_NAME, DEFAULT_BOOK_TITLE, DEFAULT_CSV_SETTINGS, DEFAULT_DOWNLOAD_CACHE_DIRNAME, DEFAULT_EXECUTION_CACHE_DIRNAME, DEFAULT_GET_PIPELINE_COLLECTION_FUNCTION_NAME, DEFAULT_INTERMEDIATE_FILES_STRATEGY, DEFAULT_IS_AUTO_INSTALLED, DEFAULT_IS_VERBOSE, DEFAULT_MAX_EXECUTION_ATTEMPTS, DEFAULT_MAX_FILE_SIZE, DEFAULT_MAX_KNOWLEDGE_SOURCES_SCRAPING_DEPTH, DEFAULT_MAX_KNOWLEDGE_SOURCES_SCRAPING_TOTAL, DEFAULT_MAX_PARALLEL_COUNT, DEFAULT_PIPELINE_COLLECTION_BASE_FILENAME, DEFAULT_PROMPT_TASK_TITLE, DEFAULT_REMOTE_SERVER_URL, DEFAULT_SCRAPE_CACHE_DIRNAME, DEFAULT_TASK_TITLE, EXPECTATION_UNITS, EnvironmentMismatchError, ExecutionReportStringOptionsDefaults, ExpectError, FORMFACTOR_DEFINITIONS, GENERIC_PIPELINE_INTERFACE, GeneratorFormfactorDefinition, GenericFormfactorDefinition, ImageGeneratorFormfactorDefinition, KnowledgeScrapeError, LimitReachedError, MANDATORY_CSV_SETTINGS, MAX_FILENAME_LENGTH, MODEL_VARIANTS, MatcherFormfactorDefinition, MemoryStorage, MissingToolsError, MultipleLlmExecutionTools, NAME, NonTaskSectionTypes, NotFoundError, NotYetImplementedError, ORDER_OF_PIPELINE_JSON, PLAYGROUND_APP_ID, PROMPTBOOK_ENGINE_VERSION, PROMPTBOOK_ERRORS, ParseError, PipelineExecutionError, PipelineLogicError, PipelineUrlError, PrefixStorage, PromptbookFetchError, REMOTE_SERVER_URLS, RESERVED_PARAMETER_NAMES, SET_IS_VERBOSE, SectionTypes, SheetsFormfactorDefinition, TaskTypes, TextFormatDefinition, TranslatorFormfactorDefinition, UNCERTAIN_USAGE, UNCERTAIN_ZERO_VALUE, UnexpectedError, WrappedError, ZERO_USAGE, ZERO_VALUE, _AnthropicClaudeMetadataRegistration, _AzureOpenAiMetadataRegistration, _BoilerplateScraperMetadataRegistration, _DeepseekMetadataRegistration, _DocumentScraperMetadataRegistration, _GoogleMetadataRegistration, _LegacyDocumentScraperMetadataRegistration, _MarkdownScraperMetadataRegistration, _MarkitdownScraperMetadataRegistration, _OpenAiAssistantMetadataRegistration, _OpenAiMetadataRegistration, _PdfScraperMetadataRegistration, _WebsiteScraperMetadataRegistration, addUsage, book, cacheLlmTools, collectionToJson, compilePipeline, countUsage, createCollectionFromJson, createCollectionFromPromise, createCollectionFromUrl, createLlmToolsFromConfiguration, createPipelineExecutor, createSubcollection, embeddingVectorToString, executionReportJsonToString, extractParameterNamesFromTask, filterModels, getPipelineInterface, identificationToPromptbookToken, isPassingExpectations, isPipelineImplementingInterface, isPipelineInterfacesEqual, isPipelinePrepared, isValidPipelineString, joinLlmExecutionTools, limitTotalUsage, makeKnowledgeSourceHandler, parsePipeline, pipelineJsonToString, prepareKnowledgePieces, preparePersona, preparePipeline, prepareTasks, prettifyPipelineString, promptbookFetch, promptbookTokenToIdentification, unpreparePipeline, usageToHuman, usageToWorktime, validatePipeline, validatePipelineString };
11656
+ export { $llmToolsMetadataRegister, $llmToolsRegister, $scrapersMetadataRegister, $scrapersRegister, ADMIN_EMAIL, ADMIN_GITHUB_NAME, AbstractFormatError, AuthenticationError, BIG_DATASET_TRESHOLD, BOOK_LANGUAGE_VERSION, BlackholeStorage, BoilerplateError, BoilerplateFormfactorDefinition, CLAIM, CLI_APP_ID, CallbackInterfaceTools, ChatbotFormfactorDefinition, CollectionError, CompletionFormfactorDefinition, CsvFormatError, CsvFormatParser, DEFAULT_BOOKS_DIRNAME, DEFAULT_BOOK_OUTPUT_PARAMETER_NAME, DEFAULT_BOOK_TITLE, DEFAULT_CSV_SETTINGS, DEFAULT_DOWNLOAD_CACHE_DIRNAME, DEFAULT_EXECUTION_CACHE_DIRNAME, DEFAULT_GET_PIPELINE_COLLECTION_FUNCTION_NAME, DEFAULT_INTERMEDIATE_FILES_STRATEGY, DEFAULT_IS_AUTO_INSTALLED, DEFAULT_IS_VERBOSE, DEFAULT_MAX_EXECUTION_ATTEMPTS, DEFAULT_MAX_FILE_SIZE, DEFAULT_MAX_KNOWLEDGE_SOURCES_SCRAPING_DEPTH, DEFAULT_MAX_KNOWLEDGE_SOURCES_SCRAPING_TOTAL, DEFAULT_MAX_PARALLEL_COUNT, DEFAULT_MAX_REQUESTS_PER_MINUTE, DEFAULT_PIPELINE_COLLECTION_BASE_FILENAME, DEFAULT_PROMPT_TASK_TITLE, DEFAULT_REMOTE_SERVER_URL, DEFAULT_SCRAPE_CACHE_DIRNAME, DEFAULT_TASK_TITLE, EXPECTATION_UNITS, EnvironmentMismatchError, ExecutionReportStringOptionsDefaults, ExpectError, FAILED_VALUE_PLACEHOLDER, FORMFACTOR_DEFINITIONS, GENERIC_PIPELINE_INTERFACE, GeneratorFormfactorDefinition, GenericFormfactorDefinition, ImageGeneratorFormfactorDefinition, KnowledgeScrapeError, LimitReachedError, MANDATORY_CSV_SETTINGS, MAX_FILENAME_LENGTH, MODEL_ORDERS, MODEL_TRUST_LEVELS, MODEL_VARIANTS, MatcherFormfactorDefinition, MemoryStorage, MissingToolsError, MultipleLlmExecutionTools, NAME, NonTaskSectionTypes, NotFoundError, NotYetImplementedError, ORDER_OF_PIPELINE_JSON, PENDING_VALUE_PLACEHOLDER, PLAYGROUND_APP_ID, PROMPTBOOK_ENGINE_VERSION, PROMPTBOOK_ERRORS, ParseError, PipelineExecutionError, PipelineLogicError, PipelineUrlError, PrefixStorage, PromptbookFetchError, REMOTE_SERVER_URLS, RESERVED_PARAMETER_NAMES, SET_IS_VERBOSE, SectionTypes, SheetsFormfactorDefinition, TaskTypes, TextFormatParser, TranslatorFormfactorDefinition, UNCERTAIN_USAGE, UNCERTAIN_ZERO_VALUE, UnexpectedError, WrappedError, ZERO_USAGE, ZERO_VALUE, _AnthropicClaudeMetadataRegistration, _AzureOpenAiMetadataRegistration, _BoilerplateScraperMetadataRegistration, _DeepseekMetadataRegistration, _DocumentScraperMetadataRegistration, _GoogleMetadataRegistration, _LegacyDocumentScraperMetadataRegistration, _MarkdownScraperMetadataRegistration, _MarkitdownScraperMetadataRegistration, _OpenAiAssistantMetadataRegistration, _OpenAiMetadataRegistration, _PdfScraperMetadataRegistration, _WebsiteScraperMetadataRegistration, addUsage, book, cacheLlmTools, collectionToJson, compilePipeline, computeCosineSimilarity, countUsage, createCollectionFromJson, createCollectionFromPromise, createCollectionFromUrl, createLlmToolsFromConfiguration, createPipelineExecutor, createSubcollection, embeddingVectorToString, executionReportJsonToString, extractParameterNamesFromTask, filterModels, getPipelineInterface, identificationToPromptbookToken, isPassingExpectations, isPipelineImplementingInterface, isPipelineInterfacesEqual, isPipelinePrepared, isValidPipelineString, joinLlmExecutionTools, limitTotalUsage, makeKnowledgeSourceHandler, migratePipeline, parsePipeline, pipelineJsonToString, prepareKnowledgePieces, preparePersona, preparePipeline, prettifyPipelineString, promptbookFetch, promptbookTokenToIdentification, unpreparePipeline, usageToHuman, usageToWorktime, validatePipeline, validatePipelineString };
11134
11657
  //# sourceMappingURL=index.es.js.map