@promptbook/core 0.92.0-26 → 0.92.0-28

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (50) hide show
  1. package/esm/index.es.js +128 -83
  2. package/esm/index.es.js.map +1 -1
  3. package/esm/typings/src/_packages/core.index.d.ts +4 -4
  4. package/esm/typings/src/commands/FOREACH/foreachCommandParser.d.ts +0 -2
  5. package/esm/typings/src/commands/_BOILERPLATE/boilerplateCommandParser.d.ts +1 -1
  6. package/esm/typings/src/constants.d.ts +35 -0
  7. package/esm/typings/src/executables/$provideExecutablesForNode.d.ts +1 -1
  8. package/esm/typings/src/executables/apps/locateLibreoffice.d.ts +2 -1
  9. package/esm/typings/src/executables/apps/locatePandoc.d.ts +2 -1
  10. package/esm/typings/src/executables/platforms/locateAppOnLinux.d.ts +2 -1
  11. package/esm/typings/src/executables/platforms/locateAppOnMacOs.d.ts +2 -1
  12. package/esm/typings/src/executables/platforms/locateAppOnWindows.d.ts +2 -1
  13. package/esm/typings/src/execution/AbstractTaskResult.d.ts +1 -1
  14. package/esm/typings/src/execution/LlmExecutionToolsConstructor.d.ts +2 -1
  15. package/esm/typings/src/execution/PipelineExecutorResult.d.ts +1 -1
  16. package/esm/typings/src/execution/createPipelineExecutor/$OngoingTaskResult.d.ts +12 -9
  17. package/esm/typings/src/execution/createPipelineExecutor/40-executeAttempts.d.ts +20 -14
  18. package/esm/typings/src/execution/createPipelineExecutor/filterJustOutputParameters.d.ts +7 -6
  19. package/esm/typings/src/execution/createPipelineExecutor/getContextForTask.d.ts +5 -1
  20. package/esm/typings/src/execution/createPipelineExecutor/getExamplesForTask.d.ts +1 -1
  21. package/esm/typings/src/execution/createPipelineExecutor/getKnowledgeForTask.d.ts +8 -11
  22. package/esm/typings/src/execution/translation/automatic-translate/automatic-translators/LindatAutomaticTranslator.d.ts +4 -4
  23. package/esm/typings/src/execution/utils/uncertainNumber.d.ts +3 -2
  24. package/esm/typings/src/formats/csv/CsvSettings.d.ts +2 -2
  25. package/esm/typings/src/formfactors/_common/AbstractFormfactorDefinition.d.ts +16 -7
  26. package/esm/typings/src/formfactors/_common/FormfactorDefinition.d.ts +3 -1
  27. package/esm/typings/src/formfactors/chatbot/ChatbotFormfactorDefinition.d.ts +2 -2
  28. package/esm/typings/src/formfactors/completion/CompletionFormfactorDefinition.d.ts +1 -1
  29. package/esm/typings/src/formfactors/generator/GeneratorFormfactorDefinition.d.ts +2 -1
  30. package/esm/typings/src/formfactors/generic/GenericFormfactorDefinition.d.ts +2 -2
  31. package/esm/typings/src/formfactors/index.d.ts +5 -5
  32. package/esm/typings/src/formfactors/matcher/MatcherFormfactorDefinition.d.ts +4 -2
  33. package/esm/typings/src/formfactors/translator/TranslatorFormfactorDefinition.d.ts +3 -2
  34. package/esm/typings/src/llm-providers/_common/register/$provideLlmToolsForTestingAndScriptsAndPlayground.d.ts +4 -3
  35. package/esm/typings/src/llm-providers/_common/register/$provideLlmToolsFromEnv.d.ts +17 -4
  36. package/esm/typings/src/llm-providers/_common/register/LlmToolsConfiguration.d.ts +11 -4
  37. package/esm/typings/src/llm-providers/_common/register/LlmToolsMetadata.d.ts +21 -42
  38. package/esm/typings/src/llm-providers/_common/register/LlmToolsOptions.d.ts +5 -1
  39. package/esm/typings/src/llm-providers/_common/register/createLlmToolsFromConfiguration.d.ts +12 -3
  40. package/esm/typings/src/llm-providers/_common/utils/cache/CacheItem.d.ts +5 -0
  41. package/esm/typings/src/llm-providers/anthropic-claude/anthropic-claude-models.d.ts +1 -1
  42. package/esm/typings/src/llm-providers/deepseek/deepseek-models.d.ts +1 -1
  43. package/esm/typings/src/llm-providers/google/google-models.d.ts +1 -1
  44. package/esm/typings/src/llm-providers/openai/openai-models.d.ts +1 -1
  45. package/esm/typings/src/llm-providers/openai/register-configuration.d.ts +2 -2
  46. package/esm/typings/src/llm-providers/openai/register-constructor.d.ts +2 -2
  47. package/esm/typings/src/version.d.ts +1 -1
  48. package/package.json +1 -1
  49. package/umd/index.umd.js +129 -84
  50. package/umd/index.umd.js.map +1 -1
package/esm/index.es.js CHANGED
@@ -27,7 +27,7 @@ const BOOK_LANGUAGE_VERSION = '1.0.0';
27
27
  * @generated
28
28
  * @see https://github.com/webgptorg/promptbook
29
29
  */
30
- const PROMPTBOOK_ENGINE_VERSION = '0.92.0-26';
30
+ const PROMPTBOOK_ENGINE_VERSION = '0.92.0-28';
31
31
  /**
32
32
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
33
33
  * Note: [💞] Ignore a discrepancy between file name and entity name
@@ -1099,6 +1099,42 @@ function exportJson(options) {
1099
1099
  * TODO: [🧠] Is there a way how to meaningfully test this utility
1100
1100
  */
1101
1101
 
1102
+ /**
1103
+ * How is the model provider trusted?
1104
+ *
1105
+ * @public exported from `@promptbook/core`
1106
+ */
1107
+ const MODEL_TRUST_LEVELS = {
1108
+ FULL: `Model is running on the local machine, training data and model weights are known, data are ethically sourced`,
1109
+ OPEN: `Model is open source, training data and model weights are known`,
1110
+ PARTIALLY_OPEN: `Model is open source, but training data and model weights are not (fully) known`,
1111
+ CLOSED_LOCAL: `Model can be run locally, but it is not open source`,
1112
+ CLOSED_FREE: `Model is behind API gateway but free to use`,
1113
+ CLOSED_BUSINESS: `Model is behind API gateway and paid but has good SLA, TOS, privacy policy and in general is a good to use in business applications`,
1114
+ CLOSED: `Model is behind API gateway and paid`,
1115
+ UNTRUSTED: `Model has questions about the training data and ethics, but it is not known if it is a problem or not`,
1116
+ VURNABLE: `Model has some known serious vulnerabilities, leaks, ethical problems, etc.`,
1117
+ };
1118
+ // <- TODO: Maybe do better levels of trust
1119
+ /**
1120
+ * How is the model provider important?
1121
+ *
1122
+ * @public exported from `@promptbook/core`
1123
+ */
1124
+ const MODEL_ORDERS = {
1125
+ /**
1126
+ * Top-tier models, e.g. OpenAI, Anthropic,...
1127
+ */
1128
+ TOP_TIER: 333,
1129
+ /**
1130
+ * Mid-tier models, e.g. Llama, Mistral, etc.
1131
+ */
1132
+ NORMAL: 100,
1133
+ /**
1134
+ * Low-tier models, e.g. Phi, Tiny, etc.
1135
+ */
1136
+ LOW_TIER: 0,
1137
+ };
1102
1138
  /**
1103
1139
  * Order of keys in the pipeline JSON
1104
1140
  *
@@ -2780,7 +2816,7 @@ function union(...sets) {
2780
2816
  }
2781
2817
 
2782
2818
  /**
2783
- * @@@
2819
+ * Contains configuration options for parsing and generating CSV files, such as delimiters and quoting rules.
2784
2820
  *
2785
2821
  * @public exported from `@promptbook/core`
2786
2822
  */
@@ -3989,8 +4025,12 @@ function isPassingExpectations(expectations, value) {
3989
4025
  */
3990
4026
 
3991
4027
  /**
3992
- * @@@
4028
+ * Executes a pipeline task with multiple attempts, including joker and retry logic. Handles different task types
4029
+ * (prompt, script, dialog, etc.), applies postprocessing, checks expectations, and updates the execution report.
4030
+ * Throws errors if execution fails after all attempts.
3993
4031
  *
4032
+ * @param options - The options for execution, including task, parameters, pipeline, and configuration.
4033
+ * @returns The result string of the executed task.
3994
4034
  * @private internal utility of `createPipelineExecutor`
3995
4035
  */
3996
4036
  async function executeAttempts(options) {
@@ -4448,8 +4488,12 @@ async function executeFormatSubvalues(options) {
4448
4488
  }
4449
4489
 
4450
4490
  /**
4451
- * @@@
4491
+ * Returns the context for a given task, typically used to provide additional information or variables
4492
+ * required for the execution of the task within a pipeline. The context is returned as a string value
4493
+ * that may include markdown formatting.
4452
4494
  *
4495
+ * @param task - The task for which the context is being generated. This should be a deeply immutable TaskJson object.
4496
+ * @returns The context as a string, formatted as markdown and parameter value.
4453
4497
  * @private internal utility of `createPipelineExecutor`
4454
4498
  */
4455
4499
  async function getContextForTask(task) {
@@ -4457,7 +4501,7 @@ async function getContextForTask(task) {
4457
4501
  }
4458
4502
 
4459
4503
  /**
4460
- * @@@
4504
+ * Retrieves example values or templates for a given task, used to guide or validate pipeline execution.
4461
4505
  *
4462
4506
  * @private internal utility of `createPipelineExecutor`
4463
4507
  */
@@ -4504,9 +4548,8 @@ function knowledgePiecesToString(knowledgePieces) {
4504
4548
  }
4505
4549
 
4506
4550
  /**
4507
- * @@@
4508
- *
4509
- * Here is the place where RAG (retrieval-augmented generation) happens
4551
+ * Retrieves the most relevant knowledge pieces for a given task using embedding-based similarity search.
4552
+ * This is where retrieval-augmented generation (RAG) is performed to enhance the task with external knowledge.
4510
4553
  *
4511
4554
  * @private internal utility of `createPipelineExecutor`
4512
4555
  */
@@ -4725,7 +4768,8 @@ async function executeTask(options) {
4725
4768
  */
4726
4769
 
4727
4770
  /**
4728
- * @@@
4771
+ * Filters and returns only the output parameters from the provided pipeline execution options.
4772
+ * Adds warnings for any expected output parameters that are missing.
4729
4773
  *
4730
4774
  * @private internal utility of `createPipelineExecutor`
4731
4775
  */
@@ -6497,7 +6541,7 @@ const sectionCommandParser = {
6497
6541
  /**
6498
6542
  * Parses the boilerplate command
6499
6543
  *
6500
- * Note: @@@ This command is used as boilerplate for new commands - it should NOT be used in any `.book` file
6544
+ * Note: @@ This command is used as boilerplate for new commands - it should NOT be used in any `.book` file
6501
6545
  *
6502
6546
  * @see `documentationUrl` for more details
6503
6547
  * @private within the commands folder
@@ -7035,8 +7079,6 @@ function validateParameterName(parameterName) {
7035
7079
  /**
7036
7080
  * Parses the foreach command
7037
7081
  *
7038
- * Note: @@@ This command is used as foreach for new commands - it should NOT be used in any `.book` file
7039
- *
7040
7082
  * @see `documentationUrl` for more details
7041
7083
  * @public exported from `@promptbook/editable`
7042
7084
  */
@@ -7277,14 +7319,14 @@ const formatCommandParser = {
7277
7319
  };
7278
7320
 
7279
7321
  /**
7280
- * @@@
7322
+ * Chatbot form factor definition for conversational interfaces that interact with users in a chat-like manner.
7281
7323
  *
7282
7324
  * @public exported from `@promptbook/core`
7283
7325
  */
7284
7326
  const ChatbotFormfactorDefinition = {
7285
7327
  name: 'CHATBOT',
7286
7328
  aliasNames: ['CHAT'],
7287
- description: `@@@`,
7329
+ description: `A chatbot form factor for conversational user interfaces.`,
7288
7330
  documentationUrl: `https://github.com/webgptorg/promptbook/discussions/174`,
7289
7331
  pipelineInterface: {
7290
7332
  inputParameters: [
@@ -7317,7 +7359,7 @@ const ChatbotFormfactorDefinition = {
7317
7359
  */
7318
7360
  const CompletionFormfactorDefinition = {
7319
7361
  name: 'COMPLETION',
7320
- description: `@@@`,
7362
+ description: `Completion is formfactor that emulates completion models`,
7321
7363
  documentationUrl: `https://github.com/webgptorg/promptbook/discussions/@@`,
7322
7364
  // <- TODO: https://github.com/webgptorg/promptbook/discussions/new?category=concepts
7323
7365
  // "🔠 Completion Formfactor"
@@ -7348,7 +7390,8 @@ const CompletionFormfactorDefinition = {
7348
7390
  };
7349
7391
 
7350
7392
  /**
7351
- * Generator is form of app that @@@
7393
+ * Generator form factor represents an application that generates content or data based on user input or predefined rules.
7394
+ * This form factor is used for apps that produce outputs, such as text, images, or other media, based on provided input.
7352
7395
  *
7353
7396
  * @public exported from `@promptbook/core`
7354
7397
  */
@@ -7392,13 +7435,13 @@ const GENERIC_PIPELINE_INTERFACE = {
7392
7435
  */
7393
7436
 
7394
7437
  /**
7395
- * @@@
7438
+ * A generic pipeline
7396
7439
  *
7397
7440
  * @public exported from `@promptbook/core`
7398
7441
  */
7399
7442
  const GenericFormfactorDefinition = {
7400
7443
  name: 'GENERIC',
7401
- description: `@@@`,
7444
+ description: `A generic pipeline`,
7402
7445
  documentationUrl: `https://github.com/webgptorg/promptbook/discussions/173`,
7403
7446
  pipelineInterface: GENERIC_PIPELINE_INTERFACE,
7404
7447
  };
@@ -7433,17 +7476,20 @@ const ImageGeneratorFormfactorDefinition = {
7433
7476
  };
7434
7477
 
7435
7478
  /**
7436
- * Matcher is form of app that @@@
7479
+ * Matcher is form of app that evaluates (spreadsheet) content against defined criteria or patterns,
7480
+ * determining if it matches or meets specific requirements. Used for classification,
7481
+ * validation, filtering, and quality assessment of inputs.
7437
7482
  *
7438
7483
  * @public exported from `@promptbook/core`
7439
7484
  */
7440
7485
  const MatcherFormfactorDefinition = {
7441
7486
  name: 'EXPERIMENTAL_MATCHER',
7442
- description: `@@@`,
7487
+ description: `An evaluation system that determines whether content meets specific criteria or patterns.
7488
+ Used for content validation, quality assessment, and intelligent filtering tasks. Currently in experimental phase.`,
7443
7489
  documentationUrl: `https://github.com/webgptorg/promptbook/discussions/177`,
7444
7490
  pipelineInterface: {
7445
7491
  inputParameters: [
7446
- /* @@@ */
7492
+ /* Input parameters for content to be matched and criteria to match against */
7447
7493
  {
7448
7494
  name: 'nonce',
7449
7495
  description: 'Just to prevent EXPERIMENTAL_MATCHER to be set as implicit formfactor',
@@ -7452,7 +7498,7 @@ const MatcherFormfactorDefinition = {
7452
7498
  },
7453
7499
  ],
7454
7500
  outputParameters: [
7455
- /* @@@ */
7501
+ /* Output parameters containing match results, confidence scores, and relevant metadata */
7456
7502
  ],
7457
7503
  },
7458
7504
  };
@@ -7489,13 +7535,16 @@ const SheetsFormfactorDefinition = {
7489
7535
  };
7490
7536
 
7491
7537
  /**
7492
- * Translator is form of app that @@@
7538
+ * Translator is form of app that transforms input text from one form to another,
7539
+ * such as language translation, style conversion, tone modification, or other text transformations.
7493
7540
  *
7494
7541
  * @public exported from `@promptbook/core`
7495
7542
  */
7496
7543
  const TranslatorFormfactorDefinition = {
7497
7544
  name: 'TRANSLATOR',
7498
- description: `@@@`,
7545
+ description: `A text transformation system that converts input content into different forms,
7546
+ including language translations, paraphrasing, style conversions, and tone adjustments.
7547
+ This form factor takes one input and produces one transformed output.`,
7499
7548
  documentationUrl: `https://github.com/webgptorg/promptbook/discussions/175`,
7500
7549
  pipelineInterface: {
7501
7550
  inputParameters: [
@@ -8622,7 +8671,10 @@ function parseCommand(raw, usagePlace) {
8622
8671
  `));
8623
8672
  }
8624
8673
  /**
8625
- * @@@
8674
+ * Generates a markdown-formatted message listing all supported commands
8675
+ * with their descriptions and documentation links
8676
+ *
8677
+ * @returns A formatted markdown string containing all available commands and their details
8626
8678
  */
8627
8679
  function getSupportedCommandsMessage() {
8628
8680
  return COMMANDS.flatMap(({ name, aliasNames, description, documentationUrl }) =>
@@ -8633,7 +8685,10 @@ function getSupportedCommandsMessage() {
8633
8685
  ]).join('\n');
8634
8686
  }
8635
8687
  /**
8636
- * @@@
8688
+ * Attempts to parse a command variant using the provided input parameters
8689
+ *
8690
+ * @param input Object containing command parsing information including raw command text and normalized values
8691
+ * @returns A parsed Command object if successful, or null if the command cannot be parsed
8637
8692
  */
8638
8693
  function parseCommandVariant(input) {
8639
8694
  const { commandNameRaw, usagePlace, normalized, args, raw, rawArgs } = input;
@@ -10494,11 +10549,16 @@ function $registeredLlmToolsMessage() {
10494
10549
  */
10495
10550
 
10496
10551
  /**
10497
- * @@@
10552
+ * Creates LLM execution tools from provided configuration objects
10553
+ *
10554
+ * Instantiates and configures LLM tool instances for each configuration entry,
10555
+ * combining them into a unified interface via MultipleLlmExecutionTools.
10498
10556
  *
10499
10557
  * Note: This function is not cached, every call creates new instance of `MultipleLlmExecutionTools`
10500
10558
  *
10501
- * @returns @@@
10559
+ * @param configuration Array of LLM tool configurations to instantiate
10560
+ * @param options Additional options for configuring the LLM tools
10561
+ * @returns A unified interface combining all successfully instantiated LLM tools
10502
10562
  * @public exported from `@promptbook/core`
10503
10563
  */
10504
10564
  function createLlmToolsFromConfiguration(configuration, options = {}) {
@@ -10537,55 +10597,17 @@ function createLlmToolsFromConfiguration(configuration, options = {}) {
10537
10597
  /**
10538
10598
  * TODO: [🎌] Together with `createLlmToolsFromConfiguration` + 'EXECUTION_TOOLS_CLASSES' gets to `@promptbook/core` ALL model providers, make this more efficient
10539
10599
  * TODO: [🧠][🎌] Dynamically install required providers
10540
- * TODO: @@@ write discussion about this - wizzard
10600
+ * TODO: We should implement an interactive configuration wizard that would:
10601
+ * 1. Detect which LLM providers are available in the environment
10602
+ * 2. Guide users through required configuration settings for each provider
10603
+ * 3. Allow testing connections before completing setup
10604
+ * 4. Generate appropriate configuration code for application integration
10541
10605
  * TODO: [🧠][🍛] Which name is better `createLlmToolsFromConfig` or `createLlmToolsFromConfiguration`?
10542
10606
  * TODO: [🧠] Is there some meaningfull way how to test this util
10543
10607
  * TODO: This should be maybe not under `_common` but under `utils`
10544
10608
  * TODO: [®] DRY Register logic
10545
10609
  */
10546
10610
 
10547
- /**
10548
- * How is the model provider trusted?
10549
- *
10550
- * @public exported from `@promptbook/core`
10551
- */
10552
- const MODEL_TRUST_LEVEL = {
10553
- FULL: `Model is running on the local machine, training data and model weights are known, data are ethically sourced`,
10554
- OPEN: `Model is open source, training data and model weights are known`,
10555
- PARTIALLY_OPEN: `Model is open source, but training data and model weights are not (fully) known`,
10556
- CLOSED_LOCAL: `Model can be run locally, but it is not open source`,
10557
- CLOSED_FREE: `Model is behind API gateway but free to use`,
10558
- CLOSED_BUSINESS: `Model is behind API gateway and paid but has good SLA, TOS, privacy policy and in general is a good to use in business applications`,
10559
- CLOSED: `Model is behind API gateway and paid`,
10560
- UNTRUSTED: `Model has questions about the training data and ethics, but it is not known if it is a problem or not`,
10561
- VURNABLE: `Model has some known serious vulnerabilities, leaks, ethical problems, etc.`,
10562
- };
10563
- // <- TODO: Maybe do better levels of trust
10564
- /**
10565
- * How is the model provider important?
10566
- *
10567
- * @public exported from `@promptbook/core`
10568
- */
10569
- const MODEL_ORDER = {
10570
- /**
10571
- * Top-tier models, e.g. OpenAI, Anthropic,...
10572
- */
10573
- TOP_TIER: 333,
10574
- /**
10575
- * Mid-tier models, e.g. Llama, Mistral, etc.
10576
- */
10577
- NORMAL: 100,
10578
- /**
10579
- * Low-tier models, e.g. Phi, Tiny, etc.
10580
- */
10581
- LOW_TIER: 0,
10582
- };
10583
- /**
10584
- * TODO: Add configuration schema and maybe some documentation link
10585
- * TODO: Maybe constrain LlmToolsConfiguration[number] by generic to ensure that `createConfigurationFromEnv` and `getBoilerplateConfiguration` always create same `packageName` and `className`
10586
- * TODO: [®] DRY Register logic
10587
- */
10588
-
10589
10611
  /**
10590
10612
  * Stores data in memory (HEAP)
10591
10613
  *
@@ -10675,20 +10697,34 @@ function cacheLlmTools(llmTools, options = {}) {
10675
10697
  const callCommonModel = async (prompt) => {
10676
10698
  const { parameters, content, modelRequirements } = prompt;
10677
10699
  // <- Note: These are relevant things from the prompt that the cache key should depend on.
10700
+ // TODO: Maybe some standalone function for normalization of content for cache
10701
+ let normalizedContent = content;
10702
+ normalizedContent = normalizedContent.replace(/\s+/g, ' ');
10703
+ normalizedContent = normalizedContent.split('\r\n').join('\n');
10704
+ normalizedContent = spaceTrim(normalizedContent);
10705
+ // Note: Do not need to save everything in the cache, just the relevant parameters
10706
+ const relevantParameterNames = extractParameterNames(content);
10707
+ const relevantParameters = Object.fromEntries(Object.entries(parameters).filter(([key]) => relevantParameterNames.has(key)));
10708
+ const keyHashBase = { relevantParameters, normalizedContent, modelRequirements };
10678
10709
  const key = titleToName(prompt.title.substring(0, MAX_FILENAME_LENGTH - 10) +
10679
10710
  '-' +
10680
- sha256(hexEncoder.parse(JSON.stringify({ parameters, content, modelRequirements }))).toString( /* hex */));
10711
+ sha256(hexEncoder.parse(JSON.stringify(keyHashBase)))
10712
+ .toString( /* hex */)
10713
+ .substring(0, 10 - 1));
10681
10714
  const cacheItem = !isCacheReloaded ? await storage.getItem(key) : null;
10682
10715
  if (cacheItem) {
10683
- console.log('!!! Cache hit for key:', key);
10716
+ console.log('!!! Cache hit for key:', { key, keyHashBase });
10684
10717
  return cacheItem.promptResult;
10685
10718
  }
10686
10719
  console.log('!!! Cache miss for key:', key, {
10687
10720
  prompt,
10688
10721
  'prompt.title': prompt.title,
10689
10722
  MAX_FILENAME_LENGTH,
10723
+ keyHashBase,
10690
10724
  parameters,
10725
+ relevantParameters,
10691
10726
  content,
10727
+ normalizedContent,
10692
10728
  modelRequirements,
10693
10729
  });
10694
10730
  let promptResult;
@@ -10711,7 +10747,16 @@ function cacheLlmTools(llmTools, options = {}) {
10711
10747
  await storage.setItem(key, {
10712
10748
  date: $getCurrentDate(),
10713
10749
  promptbookVersion: PROMPTBOOK_ENGINE_VERSION,
10714
- prompt,
10750
+ bookVersion: BOOK_LANGUAGE_VERSION,
10751
+ prompt: {
10752
+ ...prompt,
10753
+ parameters: Object.entries(parameters).length === Object.entries(relevantParameters).length
10754
+ ? parameters
10755
+ : {
10756
+ ...relevantParameters,
10757
+ note: `<- Note: Only relevant parameters are stored in the cache`,
10758
+ },
10759
+ },
10715
10760
  promptResult,
10716
10761
  });
10717
10762
  return promptResult;
@@ -10795,7 +10840,7 @@ const _AnthropicClaudeMetadataRegistration = $llmToolsMetadataRegister.register(
10795
10840
  className: 'AnthropicClaudeExecutionTools',
10796
10841
  envVariables: ['ANTHROPIC_CLAUDE_API_KEY'],
10797
10842
  trustLevel: 'CLOSED',
10798
- order: MODEL_ORDER.TOP_TIER,
10843
+ order: MODEL_ORDERS.TOP_TIER,
10799
10844
  getBoilerplateConfiguration() {
10800
10845
  return {
10801
10846
  title: 'Anthropic Claude',
@@ -10842,7 +10887,7 @@ const _AzureOpenAiMetadataRegistration = $llmToolsMetadataRegister.register({
10842
10887
  className: 'AzureOpenAiExecutionTools',
10843
10888
  envVariables: ['AZUREOPENAI_RESOURCE_NAME', 'AZUREOPENAI_DEPLOYMENT_NAME', 'AZUREOPENAI_API_KEY'],
10844
10889
  trustLevel: 'CLOSED_BUSINESS',
10845
- order: MODEL_ORDER.NORMAL,
10890
+ order: MODEL_ORDERS.NORMAL,
10846
10891
  getBoilerplateConfiguration() {
10847
10892
  return {
10848
10893
  title: 'Azure Open AI',
@@ -10930,7 +10975,7 @@ const _DeepseekMetadataRegistration = $llmToolsMetadataRegister.register({
10930
10975
  className: 'DeepseekExecutionTools',
10931
10976
  envVariables: ['DEEPSEEK_GENERATIVE_AI_API_KEY'],
10932
10977
  trustLevel: 'UNTRUSTED',
10933
- order: MODEL_ORDER.NORMAL,
10978
+ order: MODEL_ORDERS.NORMAL,
10934
10979
  getBoilerplateConfiguration() {
10935
10980
  return {
10936
10981
  title: 'Deepseek',
@@ -10981,7 +11026,7 @@ const _GoogleMetadataRegistration = $llmToolsMetadataRegister.register({
10981
11026
  className: 'GoogleExecutionTools',
10982
11027
  envVariables: ['GOOGLE_GENERATIVE_AI_API_KEY'],
10983
11028
  trustLevel: 'CLOSED',
10984
- order: MODEL_ORDER.NORMAL,
11029
+ order: MODEL_ORDERS.NORMAL,
10985
11030
  getBoilerplateConfiguration() {
10986
11031
  return {
10987
11032
  title: 'Google Gemini',
@@ -11032,7 +11077,7 @@ const _OpenAiMetadataRegistration = $llmToolsMetadataRegister.register({
11032
11077
  className: 'OpenAiExecutionTools',
11033
11078
  envVariables: ['OPENAI_API_KEY'],
11034
11079
  trustLevel: 'CLOSED',
11035
- order: MODEL_ORDER.TOP_TIER,
11080
+ order: MODEL_ORDERS.TOP_TIER,
11036
11081
  getBoilerplateConfiguration() {
11037
11082
  return {
11038
11083
  title: 'Open AI',
@@ -11060,9 +11105,9 @@ const _OpenAiMetadataRegistration = $llmToolsMetadataRegister.register({
11060
11105
  },
11061
11106
  });
11062
11107
  /**
11063
- * @@@ registration1 of default configuration for Open AI
11108
+ * Registration of the OpenAI Assistant metadata
11064
11109
  *
11065
- * Note: [🏐] Configurations registrations are done in @@@ BUT constructor @@@
11110
+ * Note: [🏐] Configurations registrations are done in the metadata registration section, but the constructor registration is handled separately.
11066
11111
  *
11067
11112
  * @public exported from `@promptbook/core`
11068
11113
  * @public exported from `@promptbook/wizzard`
@@ -11075,7 +11120,7 @@ const _OpenAiAssistantMetadataRegistration = $llmToolsMetadataRegister.register(
11075
11120
  envVariables: null,
11076
11121
  // <- TODO: ['OPENAI_API_KEY', 'OPENAI_ASSISTANT_ID']
11077
11122
  trustLevel: 'CLOSED',
11078
- order: MODEL_ORDER.NORMAL,
11123
+ order: MODEL_ORDERS.NORMAL,
11079
11124
  getBoilerplateConfiguration() {
11080
11125
  return {
11081
11126
  title: 'Open AI Assistant',
@@ -11588,5 +11633,5 @@ class PrefixStorage {
11588
11633
  }
11589
11634
  }
11590
11635
 
11591
- export { $llmToolsMetadataRegister, $llmToolsRegister, $scrapersMetadataRegister, $scrapersRegister, ADMIN_EMAIL, ADMIN_GITHUB_NAME, AbstractFormatError, AuthenticationError, BIG_DATASET_TRESHOLD, BOOK_LANGUAGE_VERSION, BlackholeStorage, BoilerplateError, BoilerplateFormfactorDefinition, CLAIM, CLI_APP_ID, CallbackInterfaceTools, ChatbotFormfactorDefinition, CollectionError, CompletionFormfactorDefinition, CsvFormatError, CsvFormatParser, DEFAULT_BOOKS_DIRNAME, DEFAULT_BOOK_OUTPUT_PARAMETER_NAME, DEFAULT_BOOK_TITLE, DEFAULT_CSV_SETTINGS, DEFAULT_DOWNLOAD_CACHE_DIRNAME, DEFAULT_EXECUTION_CACHE_DIRNAME, DEFAULT_GET_PIPELINE_COLLECTION_FUNCTION_NAME, DEFAULT_INTERMEDIATE_FILES_STRATEGY, DEFAULT_IS_AUTO_INSTALLED, DEFAULT_IS_VERBOSE, DEFAULT_MAX_EXECUTION_ATTEMPTS, DEFAULT_MAX_FILE_SIZE, DEFAULT_MAX_KNOWLEDGE_SOURCES_SCRAPING_DEPTH, DEFAULT_MAX_KNOWLEDGE_SOURCES_SCRAPING_TOTAL, DEFAULT_MAX_PARALLEL_COUNT, DEFAULT_PIPELINE_COLLECTION_BASE_FILENAME, DEFAULT_PROMPT_TASK_TITLE, DEFAULT_REMOTE_SERVER_URL, DEFAULT_RPM, DEFAULT_SCRAPE_CACHE_DIRNAME, DEFAULT_TASK_TITLE, EXPECTATION_UNITS, EnvironmentMismatchError, ExecutionReportStringOptionsDefaults, ExpectError, FAILED_VALUE_PLACEHOLDER, FORMFACTOR_DEFINITIONS, GENERIC_PIPELINE_INTERFACE, GeneratorFormfactorDefinition, GenericFormfactorDefinition, ImageGeneratorFormfactorDefinition, KnowledgeScrapeError, LimitReachedError, MANDATORY_CSV_SETTINGS, MAX_FILENAME_LENGTH, MODEL_ORDER, MODEL_TRUST_LEVEL, MODEL_VARIANTS, MatcherFormfactorDefinition, MemoryStorage, MissingToolsError, MultipleLlmExecutionTools, NAME, NonTaskSectionTypes, NotFoundError, NotYetImplementedError, ORDER_OF_PIPELINE_JSON, PENDING_VALUE_PLACEHOLDER, PLAYGROUND_APP_ID, PROMPTBOOK_ENGINE_VERSION, PROMPTBOOK_ERRORS, ParseError, PipelineExecutionError, PipelineLogicError, PipelineUrlError, PrefixStorage, PromptbookFetchError, REMOTE_SERVER_URLS, RESERVED_PARAMETER_NAMES, SET_IS_VERBOSE, SectionTypes, SheetsFormfactorDefinition, TaskTypes, TextFormatParser, TranslatorFormfactorDefinition, UNCERTAIN_USAGE, UNCERTAIN_ZERO_VALUE, UnexpectedError, WrappedError, ZERO_USAGE, ZERO_VALUE, _AnthropicClaudeMetadataRegistration, _AzureOpenAiMetadataRegistration, _BoilerplateScraperMetadataRegistration, _DeepseekMetadataRegistration, _DocumentScraperMetadataRegistration, _GoogleMetadataRegistration, _LegacyDocumentScraperMetadataRegistration, _MarkdownScraperMetadataRegistration, _MarkitdownScraperMetadataRegistration, _OpenAiAssistantMetadataRegistration, _OpenAiMetadataRegistration, _PdfScraperMetadataRegistration, _WebsiteScraperMetadataRegistration, addUsage, book, cacheLlmTools, collectionToJson, compilePipeline, computeCosineSimilarity, countUsage, createCollectionFromJson, createCollectionFromPromise, createCollectionFromUrl, createLlmToolsFromConfiguration, createPipelineExecutor, createSubcollection, embeddingVectorToString, executionReportJsonToString, extractParameterNamesFromTask, filterModels, getPipelineInterface, identificationToPromptbookToken, isPassingExpectations, isPipelineImplementingInterface, isPipelineInterfacesEqual, isPipelinePrepared, isValidPipelineString, joinLlmExecutionTools, limitTotalUsage, makeKnowledgeSourceHandler, migratePipeline, parsePipeline, pipelineJsonToString, prepareKnowledgePieces, preparePersona, preparePipeline, prettifyPipelineString, promptbookFetch, promptbookTokenToIdentification, unpreparePipeline, usageToHuman, usageToWorktime, validatePipeline, validatePipelineString };
11636
+ export { $llmToolsMetadataRegister, $llmToolsRegister, $scrapersMetadataRegister, $scrapersRegister, ADMIN_EMAIL, ADMIN_GITHUB_NAME, AbstractFormatError, AuthenticationError, BIG_DATASET_TRESHOLD, BOOK_LANGUAGE_VERSION, BlackholeStorage, BoilerplateError, BoilerplateFormfactorDefinition, CLAIM, CLI_APP_ID, CallbackInterfaceTools, ChatbotFormfactorDefinition, CollectionError, CompletionFormfactorDefinition, CsvFormatError, CsvFormatParser, DEFAULT_BOOKS_DIRNAME, DEFAULT_BOOK_OUTPUT_PARAMETER_NAME, DEFAULT_BOOK_TITLE, DEFAULT_CSV_SETTINGS, DEFAULT_DOWNLOAD_CACHE_DIRNAME, DEFAULT_EXECUTION_CACHE_DIRNAME, DEFAULT_GET_PIPELINE_COLLECTION_FUNCTION_NAME, DEFAULT_INTERMEDIATE_FILES_STRATEGY, DEFAULT_IS_AUTO_INSTALLED, DEFAULT_IS_VERBOSE, DEFAULT_MAX_EXECUTION_ATTEMPTS, DEFAULT_MAX_FILE_SIZE, DEFAULT_MAX_KNOWLEDGE_SOURCES_SCRAPING_DEPTH, DEFAULT_MAX_KNOWLEDGE_SOURCES_SCRAPING_TOTAL, DEFAULT_MAX_PARALLEL_COUNT, DEFAULT_PIPELINE_COLLECTION_BASE_FILENAME, DEFAULT_PROMPT_TASK_TITLE, DEFAULT_REMOTE_SERVER_URL, DEFAULT_RPM, DEFAULT_SCRAPE_CACHE_DIRNAME, DEFAULT_TASK_TITLE, EXPECTATION_UNITS, EnvironmentMismatchError, ExecutionReportStringOptionsDefaults, ExpectError, FAILED_VALUE_PLACEHOLDER, FORMFACTOR_DEFINITIONS, GENERIC_PIPELINE_INTERFACE, GeneratorFormfactorDefinition, GenericFormfactorDefinition, ImageGeneratorFormfactorDefinition, KnowledgeScrapeError, LimitReachedError, MANDATORY_CSV_SETTINGS, MAX_FILENAME_LENGTH, MODEL_ORDERS, MODEL_TRUST_LEVELS, MODEL_VARIANTS, MatcherFormfactorDefinition, MemoryStorage, MissingToolsError, MultipleLlmExecutionTools, NAME, NonTaskSectionTypes, NotFoundError, NotYetImplementedError, ORDER_OF_PIPELINE_JSON, PENDING_VALUE_PLACEHOLDER, PLAYGROUND_APP_ID, PROMPTBOOK_ENGINE_VERSION, PROMPTBOOK_ERRORS, ParseError, PipelineExecutionError, PipelineLogicError, PipelineUrlError, PrefixStorage, PromptbookFetchError, REMOTE_SERVER_URLS, RESERVED_PARAMETER_NAMES, SET_IS_VERBOSE, SectionTypes, SheetsFormfactorDefinition, TaskTypes, TextFormatParser, TranslatorFormfactorDefinition, UNCERTAIN_USAGE, UNCERTAIN_ZERO_VALUE, UnexpectedError, WrappedError, ZERO_USAGE, ZERO_VALUE, _AnthropicClaudeMetadataRegistration, _AzureOpenAiMetadataRegistration, _BoilerplateScraperMetadataRegistration, _DeepseekMetadataRegistration, _DocumentScraperMetadataRegistration, _GoogleMetadataRegistration, _LegacyDocumentScraperMetadataRegistration, _MarkdownScraperMetadataRegistration, _MarkitdownScraperMetadataRegistration, _OpenAiAssistantMetadataRegistration, _OpenAiMetadataRegistration, _PdfScraperMetadataRegistration, _WebsiteScraperMetadataRegistration, addUsage, book, cacheLlmTools, collectionToJson, compilePipeline, computeCosineSimilarity, countUsage, createCollectionFromJson, createCollectionFromPromise, createCollectionFromUrl, createLlmToolsFromConfiguration, createPipelineExecutor, createSubcollection, embeddingVectorToString, executionReportJsonToString, extractParameterNamesFromTask, filterModels, getPipelineInterface, identificationToPromptbookToken, isPassingExpectations, isPipelineImplementingInterface, isPipelineInterfacesEqual, isPipelinePrepared, isValidPipelineString, joinLlmExecutionTools, limitTotalUsage, makeKnowledgeSourceHandler, migratePipeline, parsePipeline, pipelineJsonToString, prepareKnowledgePieces, preparePersona, preparePipeline, prettifyPipelineString, promptbookFetch, promptbookTokenToIdentification, unpreparePipeline, usageToHuman, usageToWorktime, validatePipeline, validatePipelineString };
11592
11637
  //# sourceMappingURL=index.es.js.map