@promptbook/cli 0.92.0-26 → 0.92.0-28

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (50) hide show
  1. package/esm/index.es.js +194 -128
  2. package/esm/index.es.js.map +1 -1
  3. package/esm/typings/src/_packages/core.index.d.ts +4 -4
  4. package/esm/typings/src/commands/FOREACH/foreachCommandParser.d.ts +0 -2
  5. package/esm/typings/src/commands/_BOILERPLATE/boilerplateCommandParser.d.ts +1 -1
  6. package/esm/typings/src/constants.d.ts +35 -0
  7. package/esm/typings/src/executables/$provideExecutablesForNode.d.ts +1 -1
  8. package/esm/typings/src/executables/apps/locateLibreoffice.d.ts +2 -1
  9. package/esm/typings/src/executables/apps/locatePandoc.d.ts +2 -1
  10. package/esm/typings/src/executables/platforms/locateAppOnLinux.d.ts +2 -1
  11. package/esm/typings/src/executables/platforms/locateAppOnMacOs.d.ts +2 -1
  12. package/esm/typings/src/executables/platforms/locateAppOnWindows.d.ts +2 -1
  13. package/esm/typings/src/execution/AbstractTaskResult.d.ts +1 -1
  14. package/esm/typings/src/execution/LlmExecutionToolsConstructor.d.ts +2 -1
  15. package/esm/typings/src/execution/PipelineExecutorResult.d.ts +1 -1
  16. package/esm/typings/src/execution/createPipelineExecutor/$OngoingTaskResult.d.ts +12 -9
  17. package/esm/typings/src/execution/createPipelineExecutor/40-executeAttempts.d.ts +20 -14
  18. package/esm/typings/src/execution/createPipelineExecutor/filterJustOutputParameters.d.ts +7 -6
  19. package/esm/typings/src/execution/createPipelineExecutor/getContextForTask.d.ts +5 -1
  20. package/esm/typings/src/execution/createPipelineExecutor/getExamplesForTask.d.ts +1 -1
  21. package/esm/typings/src/execution/createPipelineExecutor/getKnowledgeForTask.d.ts +8 -11
  22. package/esm/typings/src/execution/translation/automatic-translate/automatic-translators/LindatAutomaticTranslator.d.ts +4 -4
  23. package/esm/typings/src/execution/utils/uncertainNumber.d.ts +3 -2
  24. package/esm/typings/src/formats/csv/CsvSettings.d.ts +2 -2
  25. package/esm/typings/src/formfactors/_common/AbstractFormfactorDefinition.d.ts +16 -7
  26. package/esm/typings/src/formfactors/_common/FormfactorDefinition.d.ts +3 -1
  27. package/esm/typings/src/formfactors/chatbot/ChatbotFormfactorDefinition.d.ts +2 -2
  28. package/esm/typings/src/formfactors/completion/CompletionFormfactorDefinition.d.ts +1 -1
  29. package/esm/typings/src/formfactors/generator/GeneratorFormfactorDefinition.d.ts +2 -1
  30. package/esm/typings/src/formfactors/generic/GenericFormfactorDefinition.d.ts +2 -2
  31. package/esm/typings/src/formfactors/index.d.ts +5 -5
  32. package/esm/typings/src/formfactors/matcher/MatcherFormfactorDefinition.d.ts +4 -2
  33. package/esm/typings/src/formfactors/translator/TranslatorFormfactorDefinition.d.ts +3 -2
  34. package/esm/typings/src/llm-providers/_common/register/$provideLlmToolsForTestingAndScriptsAndPlayground.d.ts +4 -3
  35. package/esm/typings/src/llm-providers/_common/register/$provideLlmToolsFromEnv.d.ts +17 -4
  36. package/esm/typings/src/llm-providers/_common/register/LlmToolsConfiguration.d.ts +11 -4
  37. package/esm/typings/src/llm-providers/_common/register/LlmToolsMetadata.d.ts +21 -42
  38. package/esm/typings/src/llm-providers/_common/register/LlmToolsOptions.d.ts +5 -1
  39. package/esm/typings/src/llm-providers/_common/register/createLlmToolsFromConfiguration.d.ts +12 -3
  40. package/esm/typings/src/llm-providers/_common/utils/cache/CacheItem.d.ts +5 -0
  41. package/esm/typings/src/llm-providers/anthropic-claude/anthropic-claude-models.d.ts +1 -1
  42. package/esm/typings/src/llm-providers/deepseek/deepseek-models.d.ts +1 -1
  43. package/esm/typings/src/llm-providers/google/google-models.d.ts +1 -1
  44. package/esm/typings/src/llm-providers/openai/openai-models.d.ts +1 -1
  45. package/esm/typings/src/llm-providers/openai/register-configuration.d.ts +2 -2
  46. package/esm/typings/src/llm-providers/openai/register-constructor.d.ts +2 -2
  47. package/esm/typings/src/version.d.ts +1 -1
  48. package/package.json +1 -1
  49. package/umd/index.umd.js +194 -128
  50. package/umd/index.umd.js.map +1 -1
package/esm/index.es.js CHANGED
@@ -47,7 +47,7 @@ const BOOK_LANGUAGE_VERSION = '1.0.0';
47
47
  * @generated
48
48
  * @see https://github.com/webgptorg/promptbook
49
49
  */
50
- const PROMPTBOOK_ENGINE_VERSION = '0.92.0-26';
50
+ const PROMPTBOOK_ENGINE_VERSION = '0.92.0-28';
51
51
  /**
52
52
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
53
53
  * Note: [💞] Ignore a discrepancy between file name and entity name
@@ -1551,6 +1551,26 @@ function exportJson(options) {
1551
1551
  * TODO: [🧠] Is there a way how to meaningfully test this utility
1552
1552
  */
1553
1553
 
1554
+ // <- TODO: Maybe do better levels of trust
1555
+ /**
1556
+ * How is the model provider important?
1557
+ *
1558
+ * @public exported from `@promptbook/core`
1559
+ */
1560
+ const MODEL_ORDERS = {
1561
+ /**
1562
+ * Top-tier models, e.g. OpenAI, Anthropic,...
1563
+ */
1564
+ TOP_TIER: 333,
1565
+ /**
1566
+ * Mid-tier models, e.g. Llama, Mistral, etc.
1567
+ */
1568
+ NORMAL: 100,
1569
+ /**
1570
+ * Low-tier models, e.g. Phi, Tiny, etc.
1571
+ */
1572
+ LOW_TIER: 0,
1573
+ };
1554
1574
  /**
1555
1575
  * Order of keys in the pipeline JSON
1556
1576
  *
@@ -2663,6 +2683,23 @@ class RemoteLlmExecutionTools {
2663
2683
  * TODO: [🧠] Maybe remove `@promptbook/remote-client` and just use `@promptbook/core`
2664
2684
  */
2665
2685
 
2686
+ /**
2687
+ * Parses the task and returns the list of all parameter names
2688
+ *
2689
+ * @param template the string template with parameters in {curly} braces
2690
+ * @returns the list of parameter names
2691
+ * @public exported from `@promptbook/utils`
2692
+ */
2693
+ function extractParameterNames(template) {
2694
+ const matches = template.matchAll(/{\w+}/g);
2695
+ const parameterNames = new Set();
2696
+ for (const match of matches) {
2697
+ const parameterName = match[0].slice(1, -1);
2698
+ parameterNames.add(parameterName);
2699
+ }
2700
+ return parameterNames;
2701
+ }
2702
+
2666
2703
  /**
2667
2704
  * Stores data in memory (HEAP)
2668
2705
  *
@@ -2752,20 +2789,34 @@ function cacheLlmTools(llmTools, options = {}) {
2752
2789
  const callCommonModel = async (prompt) => {
2753
2790
  const { parameters, content, modelRequirements } = prompt;
2754
2791
  // <- Note: These are relevant things from the prompt that the cache key should depend on.
2792
+ // TODO: Maybe some standalone function for normalization of content for cache
2793
+ let normalizedContent = content;
2794
+ normalizedContent = normalizedContent.replace(/\s+/g, ' ');
2795
+ normalizedContent = normalizedContent.split('\r\n').join('\n');
2796
+ normalizedContent = spaceTrim(normalizedContent);
2797
+ // Note: Do not need to save everything in the cache, just the relevant parameters
2798
+ const relevantParameterNames = extractParameterNames(content);
2799
+ const relevantParameters = Object.fromEntries(Object.entries(parameters).filter(([key]) => relevantParameterNames.has(key)));
2800
+ const keyHashBase = { relevantParameters, normalizedContent, modelRequirements };
2755
2801
  const key = titleToName(prompt.title.substring(0, MAX_FILENAME_LENGTH - 10) +
2756
2802
  '-' +
2757
- sha256(hexEncoder.parse(JSON.stringify({ parameters, content, modelRequirements }))).toString( /* hex */));
2803
+ sha256(hexEncoder.parse(JSON.stringify(keyHashBase)))
2804
+ .toString( /* hex */)
2805
+ .substring(0, 10 - 1));
2758
2806
  const cacheItem = !isCacheReloaded ? await storage.getItem(key) : null;
2759
2807
  if (cacheItem) {
2760
- console.log('!!! Cache hit for key:', key);
2808
+ console.log('!!! Cache hit for key:', { key, keyHashBase });
2761
2809
  return cacheItem.promptResult;
2762
2810
  }
2763
2811
  console.log('!!! Cache miss for key:', key, {
2764
2812
  prompt,
2765
2813
  'prompt.title': prompt.title,
2766
2814
  MAX_FILENAME_LENGTH,
2815
+ keyHashBase,
2767
2816
  parameters,
2817
+ relevantParameters,
2768
2818
  content,
2819
+ normalizedContent,
2769
2820
  modelRequirements,
2770
2821
  });
2771
2822
  let promptResult;
@@ -2788,7 +2839,16 @@ function cacheLlmTools(llmTools, options = {}) {
2788
2839
  await storage.setItem(key, {
2789
2840
  date: $getCurrentDate(),
2790
2841
  promptbookVersion: PROMPTBOOK_ENGINE_VERSION,
2791
- prompt,
2842
+ bookVersion: BOOK_LANGUAGE_VERSION,
2843
+ prompt: {
2844
+ ...prompt,
2845
+ parameters: Object.entries(parameters).length === Object.entries(relevantParameters).length
2846
+ ? parameters
2847
+ : {
2848
+ ...relevantParameters,
2849
+ note: `<- Note: Only relevant parameters are stored in the cache`,
2850
+ },
2851
+ },
2792
2852
  promptResult,
2793
2853
  });
2794
2854
  return promptResult;
@@ -3230,11 +3290,16 @@ function joinLlmExecutionTools(...llmExecutionTools) {
3230
3290
  */
3231
3291
 
3232
3292
  /**
3233
- * @@@
3293
+ * Creates LLM execution tools from provided configuration objects
3294
+ *
3295
+ * Instantiates and configures LLM tool instances for each configuration entry,
3296
+ * combining them into a unified interface via MultipleLlmExecutionTools.
3234
3297
  *
3235
3298
  * Note: This function is not cached, every call creates new instance of `MultipleLlmExecutionTools`
3236
3299
  *
3237
- * @returns @@@
3300
+ * @param configuration Array of LLM tool configurations to instantiate
3301
+ * @param options Additional options for configuring the LLM tools
3302
+ * @returns A unified interface combining all successfully instantiated LLM tools
3238
3303
  * @public exported from `@promptbook/core`
3239
3304
  */
3240
3305
  function createLlmToolsFromConfiguration(configuration, options = {}) {
@@ -3273,7 +3338,11 @@ function createLlmToolsFromConfiguration(configuration, options = {}) {
3273
3338
  /**
3274
3339
  * TODO: [🎌] Together with `createLlmToolsFromConfiguration` + 'EXECUTION_TOOLS_CLASSES' gets to `@promptbook/core` ALL model providers, make this more efficient
3275
3340
  * TODO: [🧠][🎌] Dynamically install required providers
3276
- * TODO: @@@ write discussion about this - wizzard
3341
+ * TODO: We should implement an interactive configuration wizard that would:
3342
+ * 1. Detect which LLM providers are available in the environment
3343
+ * 2. Guide users through required configuration settings for each provider
3344
+ * 3. Allow testing connections before completing setup
3345
+ * 4. Generate appropriate configuration code for application integration
3277
3346
  * TODO: [🧠][🍛] Which name is better `createLlmToolsFromConfig` or `createLlmToolsFromConfiguration`?
3278
3347
  * TODO: [🧠] Is there some meaningfull way how to test this util
3279
3348
  * TODO: This should be maybe not under `_common` but under `utils`
@@ -3281,11 +3350,14 @@ function createLlmToolsFromConfiguration(configuration, options = {}) {
3281
3350
  */
3282
3351
 
3283
3352
  /**
3284
- * @@@
3353
+ * Automatically configures LLM tools from environment variables in Node.js
3354
+ *
3355
+ * This utility function detects available LLM providers based on environment variables
3356
+ * and creates properly configured LLM execution tools for each detected provider.
3285
3357
  *
3286
3358
  * Note: This function is not cached, every call creates new instance of `MultipleLlmExecutionTools`
3287
3359
  *
3288
- * @@@ .env
3360
+ * Supports environment variables from .env files when dotenv is configured
3289
3361
  * Note: `$` is used to indicate that this function is not a pure function - it uses filesystem to access `.env` file
3290
3362
  *
3291
3363
  * It looks for environment variables:
@@ -3293,7 +3365,8 @@ function createLlmToolsFromConfiguration(configuration, options = {}) {
3293
3365
  * - `process.env.ANTHROPIC_CLAUDE_API_KEY`
3294
3366
  * - ...
3295
3367
  *
3296
- * @returns @@@
3368
+ * @param options Configuration options for the LLM tools
3369
+ * @returns A unified interface containing all detected and configured LLM tools
3297
3370
  * @public exported from `@promptbook/node`
3298
3371
  */
3299
3372
  async function $provideLlmToolsFromEnv(options = {}) {
@@ -3319,7 +3392,16 @@ async function $provideLlmToolsFromEnv(options = {}) {
3319
3392
  return createLlmToolsFromConfiguration(configuration, options);
3320
3393
  }
3321
3394
  /**
3322
- * TODO: @@@ write `$provideLlmToolsFromEnv` vs `$provideLlmToolsConfigurationFromEnv` vs `createLlmToolsFromConfiguration`
3395
+ * TODO: The architecture for LLM tools configuration consists of three key functions:
3396
+ * 1. `$provideLlmToolsFromEnv` - High-level function that detects available providers from env vars and returns ready-to-use LLM tools
3397
+ * 2. `$provideLlmToolsConfigurationFromEnv` - Middle layer that extracts configuration objects from environment variables
3398
+ * 3. `createLlmToolsFromConfiguration` - Low-level function that instantiates LLM tools from explicit configuration
3399
+ *
3400
+ * This layered approach allows flexibility in how tools are configured:
3401
+ * - Use $provideLlmToolsFromEnv for automatic detection and setup in Node.js environments
3402
+ * - Use $provideLlmToolsConfigurationFromEnv to extract config objects for modification before instantiation
3403
+ * - Use createLlmToolsFromConfiguration for explicit control over tool configurations
3404
+ *
3323
3405
  * TODO: [🧠][🍛] Which name is better `$provideLlmToolsFromEnv` or `$provideLlmToolsFromEnvironment`?
3324
3406
  * TODO: [🧠] Is there some meaningfull way how to test this util
3325
3407
  * Note: [🟢] Code in this file should never be never released in packages that could be imported into browser environment
@@ -3732,7 +3814,8 @@ function $execCommand(options) {
3732
3814
  */
3733
3815
 
3734
3816
  /**
3735
- * @@@
3817
+ * Attempts to locate the specified application on a Linux system using the 'which' command.
3818
+ * Returns the path to the executable if found, or null otherwise.
3736
3819
  *
3737
3820
  * @private within the repository
3738
3821
  */
@@ -3775,7 +3858,8 @@ async function isExecutable(path, fs) {
3775
3858
  // eslint-disable-next-line @typescript-eslint/no-var-requires
3776
3859
  const userhome = require('userhome');
3777
3860
  /**
3778
- * @@@
3861
+ * Attempts to locate the specified application on a macOS system by checking standard application paths and using mdfind.
3862
+ * Returns the path to the executable if found, or null otherwise.
3779
3863
  *
3780
3864
  * @private within the repository
3781
3865
  */
@@ -3807,7 +3891,8 @@ async function locateAppOnMacOs({ macOsName, }) {
3807
3891
  */
3808
3892
 
3809
3893
  /**
3810
- * @@@
3894
+ * Attempts to locate the specified application on a Windows system by searching common installation directories.
3895
+ * Returns the path to the executable if found, or null otherwise.
3811
3896
  *
3812
3897
  * @private within the repository
3813
3898
  */
@@ -3878,7 +3963,8 @@ function locateApp(options) {
3878
3963
  */
3879
3964
 
3880
3965
  /**
3881
- * @@@
3966
+ * Locates the LibreOffice executable on the current system by searching platform-specific paths.
3967
+ * Returns the path to the executable if found, or null otherwise.
3882
3968
  *
3883
3969
  * @private within the repository
3884
3970
  */
@@ -3896,7 +3982,8 @@ function locateLibreoffice() {
3896
3982
  */
3897
3983
 
3898
3984
  /**
3899
- * @@@
3985
+ * Locates the Pandoc executable on the current system by searching platform-specific paths.
3986
+ * Returns the path to the executable if found, or null otherwise.
3900
3987
  *
3901
3988
  * @private within the repository
3902
3989
  */
@@ -3914,7 +4001,7 @@ function locatePandoc() {
3914
4001
  */
3915
4002
 
3916
4003
  /**
3917
- * @@@
4004
+ * Provides paths to required executables (i.e. as Pandoc and LibreOffice) for Node.js environments.
3918
4005
  *
3919
4006
  * @public exported from `@promptbook/node`
3920
4007
  */
@@ -4787,23 +4874,6 @@ function taskParameterJsonToString(taskParameterJson) {
4787
4874
  * TODO: [🧠] Should be in generated .book.md file GENERATOR_WARNING
4788
4875
  */
4789
4876
 
4790
- /**
4791
- * Parses the task and returns the list of all parameter names
4792
- *
4793
- * @param template the string template with parameters in {curly} braces
4794
- * @returns the list of parameter names
4795
- * @public exported from `@promptbook/utils`
4796
- */
4797
- function extractParameterNames(template) {
4798
- const matches = template.matchAll(/{\w+}/g);
4799
- const parameterNames = new Set();
4800
- for (const match of matches) {
4801
- const parameterName = match[0].slice(1, -1);
4802
- parameterNames.add(parameterName);
4803
- }
4804
- return parameterNames;
4805
- }
4806
-
4807
4877
  /**
4808
4878
  * Unprepare just strips the preparation data of the pipeline
4809
4879
  *
@@ -5440,7 +5510,7 @@ function union(...sets) {
5440
5510
  }
5441
5511
 
5442
5512
  /**
5443
- * @@@
5513
+ * Contains configuration options for parsing and generating CSV files, such as delimiters and quoting rules.
5444
5514
  *
5445
5515
  * @public exported from `@promptbook/core`
5446
5516
  */
@@ -6157,8 +6227,12 @@ function checkExpectations(expectations, value) {
6157
6227
  */
6158
6228
 
6159
6229
  /**
6160
- * @@@
6230
+ * Executes a pipeline task with multiple attempts, including joker and retry logic. Handles different task types
6231
+ * (prompt, script, dialog, etc.), applies postprocessing, checks expectations, and updates the execution report.
6232
+ * Throws errors if execution fails after all attempts.
6161
6233
  *
6234
+ * @param options - The options for execution, including task, parameters, pipeline, and configuration.
6235
+ * @returns The result string of the executed task.
6162
6236
  * @private internal utility of `createPipelineExecutor`
6163
6237
  */
6164
6238
  async function executeAttempts(options) {
@@ -6616,8 +6690,12 @@ async function executeFormatSubvalues(options) {
6616
6690
  }
6617
6691
 
6618
6692
  /**
6619
- * @@@
6693
+ * Returns the context for a given task, typically used to provide additional information or variables
6694
+ * required for the execution of the task within a pipeline. The context is returned as a string value
6695
+ * that may include markdown formatting.
6620
6696
  *
6697
+ * @param task - The task for which the context is being generated. This should be a deeply immutable TaskJson object.
6698
+ * @returns The context as a string, formatted as markdown and parameter value.
6621
6699
  * @private internal utility of `createPipelineExecutor`
6622
6700
  */
6623
6701
  async function getContextForTask(task) {
@@ -6625,7 +6703,7 @@ async function getContextForTask(task) {
6625
6703
  }
6626
6704
 
6627
6705
  /**
6628
- * @@@
6706
+ * Retrieves example values or templates for a given task, used to guide or validate pipeline execution.
6629
6707
  *
6630
6708
  * @private internal utility of `createPipelineExecutor`
6631
6709
  */
@@ -6672,9 +6750,8 @@ function knowledgePiecesToString(knowledgePieces) {
6672
6750
  }
6673
6751
 
6674
6752
  /**
6675
- * @@@
6676
- *
6677
- * Here is the place where RAG (retrieval-augmented generation) happens
6753
+ * Retrieves the most relevant knowledge pieces for a given task using embedding-based similarity search.
6754
+ * This is where retrieval-augmented generation (RAG) is performed to enhance the task with external knowledge.
6678
6755
  *
6679
6756
  * @private internal utility of `createPipelineExecutor`
6680
6757
  */
@@ -6893,7 +6970,8 @@ async function executeTask(options) {
6893
6970
  */
6894
6971
 
6895
6972
  /**
6896
- * @@@
6973
+ * Filters and returns only the output parameters from the provided pipeline execution options.
6974
+ * Adds warnings for any expected output parameters that are missing.
6897
6975
  *
6898
6976
  * @private internal utility of `createPipelineExecutor`
6899
6977
  */
@@ -8191,7 +8269,7 @@ const sectionCommandParser = {
8191
8269
  /**
8192
8270
  * Parses the boilerplate command
8193
8271
  *
8194
- * Note: @@@ This command is used as boilerplate for new commands - it should NOT be used in any `.book` file
8272
+ * Note: @@ This command is used as boilerplate for new commands - it should NOT be used in any `.book` file
8195
8273
  *
8196
8274
  * @see `documentationUrl` for more details
8197
8275
  * @private within the commands folder
@@ -8729,8 +8807,6 @@ function validateParameterName(parameterName) {
8729
8807
  /**
8730
8808
  * Parses the foreach command
8731
8809
  *
8732
- * Note: @@@ This command is used as foreach for new commands - it should NOT be used in any `.book` file
8733
- *
8734
8810
  * @see `documentationUrl` for more details
8735
8811
  * @public exported from `@promptbook/editable`
8736
8812
  */
@@ -8971,14 +9047,14 @@ const formatCommandParser = {
8971
9047
  };
8972
9048
 
8973
9049
  /**
8974
- * @@@
9050
+ * Chatbot form factor definition for conversational interfaces that interact with users in a chat-like manner.
8975
9051
  *
8976
9052
  * @public exported from `@promptbook/core`
8977
9053
  */
8978
9054
  const ChatbotFormfactorDefinition = {
8979
9055
  name: 'CHATBOT',
8980
9056
  aliasNames: ['CHAT'],
8981
- description: `@@@`,
9057
+ description: `A chatbot form factor for conversational user interfaces.`,
8982
9058
  documentationUrl: `https://github.com/webgptorg/promptbook/discussions/174`,
8983
9059
  pipelineInterface: {
8984
9060
  inputParameters: [
@@ -9011,7 +9087,7 @@ const ChatbotFormfactorDefinition = {
9011
9087
  */
9012
9088
  const CompletionFormfactorDefinition = {
9013
9089
  name: 'COMPLETION',
9014
- description: `@@@`,
9090
+ description: `Completion is formfactor that emulates completion models`,
9015
9091
  documentationUrl: `https://github.com/webgptorg/promptbook/discussions/@@`,
9016
9092
  // <- TODO: https://github.com/webgptorg/promptbook/discussions/new?category=concepts
9017
9093
  // "🔠 Completion Formfactor"
@@ -9042,7 +9118,8 @@ const CompletionFormfactorDefinition = {
9042
9118
  };
9043
9119
 
9044
9120
  /**
9045
- * Generator is form of app that @@@
9121
+ * Generator form factor represents an application that generates content or data based on user input or predefined rules.
9122
+ * This form factor is used for apps that produce outputs, such as text, images, or other media, based on provided input.
9046
9123
  *
9047
9124
  * @public exported from `@promptbook/core`
9048
9125
  */
@@ -9086,13 +9163,13 @@ const GENERIC_PIPELINE_INTERFACE = {
9086
9163
  */
9087
9164
 
9088
9165
  /**
9089
- * @@@
9166
+ * A generic pipeline
9090
9167
  *
9091
9168
  * @public exported from `@promptbook/core`
9092
9169
  */
9093
9170
  const GenericFormfactorDefinition = {
9094
9171
  name: 'GENERIC',
9095
- description: `@@@`,
9172
+ description: `A generic pipeline`,
9096
9173
  documentationUrl: `https://github.com/webgptorg/promptbook/discussions/173`,
9097
9174
  pipelineInterface: GENERIC_PIPELINE_INTERFACE,
9098
9175
  };
@@ -9127,17 +9204,20 @@ const ImageGeneratorFormfactorDefinition = {
9127
9204
  };
9128
9205
 
9129
9206
  /**
9130
- * Matcher is form of app that @@@
9207
+ * Matcher is form of app that evaluates (spreadsheet) content against defined criteria or patterns,
9208
+ * determining if it matches or meets specific requirements. Used for classification,
9209
+ * validation, filtering, and quality assessment of inputs.
9131
9210
  *
9132
9211
  * @public exported from `@promptbook/core`
9133
9212
  */
9134
9213
  const MatcherFormfactorDefinition = {
9135
9214
  name: 'EXPERIMENTAL_MATCHER',
9136
- description: `@@@`,
9215
+ description: `An evaluation system that determines whether content meets specific criteria or patterns.
9216
+ Used for content validation, quality assessment, and intelligent filtering tasks. Currently in experimental phase.`,
9137
9217
  documentationUrl: `https://github.com/webgptorg/promptbook/discussions/177`,
9138
9218
  pipelineInterface: {
9139
9219
  inputParameters: [
9140
- /* @@@ */
9220
+ /* Input parameters for content to be matched and criteria to match against */
9141
9221
  {
9142
9222
  name: 'nonce',
9143
9223
  description: 'Just to prevent EXPERIMENTAL_MATCHER to be set as implicit formfactor',
@@ -9146,7 +9226,7 @@ const MatcherFormfactorDefinition = {
9146
9226
  },
9147
9227
  ],
9148
9228
  outputParameters: [
9149
- /* @@@ */
9229
+ /* Output parameters containing match results, confidence scores, and relevant metadata */
9150
9230
  ],
9151
9231
  },
9152
9232
  };
@@ -9183,13 +9263,16 @@ const SheetsFormfactorDefinition = {
9183
9263
  };
9184
9264
 
9185
9265
  /**
9186
- * Translator is form of app that @@@
9266
+ * Translator is form of app that transforms input text from one form to another,
9267
+ * such as language translation, style conversion, tone modification, or other text transformations.
9187
9268
  *
9188
9269
  * @public exported from `@promptbook/core`
9189
9270
  */
9190
9271
  const TranslatorFormfactorDefinition = {
9191
9272
  name: 'TRANSLATOR',
9192
- description: `@@@`,
9273
+ description: `A text transformation system that converts input content into different forms,
9274
+ including language translations, paraphrasing, style conversions, and tone adjustments.
9275
+ This form factor takes one input and produces one transformed output.`,
9193
9276
  documentationUrl: `https://github.com/webgptorg/promptbook/discussions/175`,
9194
9277
  pipelineInterface: {
9195
9278
  inputParameters: [
@@ -10316,7 +10399,10 @@ function parseCommand(raw, usagePlace) {
10316
10399
  `));
10317
10400
  }
10318
10401
  /**
10319
- * @@@
10402
+ * Generates a markdown-formatted message listing all supported commands
10403
+ * with their descriptions and documentation links
10404
+ *
10405
+ * @returns A formatted markdown string containing all available commands and their details
10320
10406
  */
10321
10407
  function getSupportedCommandsMessage() {
10322
10408
  return COMMANDS.flatMap(({ name, aliasNames, description, documentationUrl }) =>
@@ -10327,7 +10413,10 @@ function getSupportedCommandsMessage() {
10327
10413
  ]).join('\n');
10328
10414
  }
10329
10415
  /**
10330
- * @@@
10416
+ * Attempts to parse a command variant using the provided input parameters
10417
+ *
10418
+ * @param input Object containing command parsing information including raw command text and normalized values
10419
+ * @returns A parsed Command object if successful, or null if the command cannot be parsed
10331
10420
  */
10332
10421
  function parseCommandVariant(input) {
10333
10422
  const { commandNameRaw, usagePlace, normalized, args, raw, rawArgs } = input;
@@ -14866,37 +14955,6 @@ const _CLI = {
14866
14955
  * Note: [🟡] Code in this file should never be published outside of `@promptbook/cli`
14867
14956
  */
14868
14957
 
14869
- /**
14870
- * How is the model provider trusted?
14871
- *
14872
- * @public exported from `@promptbook/core`
14873
- */
14874
- // <- TODO: Maybe do better levels of trust
14875
- /**
14876
- * How is the model provider important?
14877
- *
14878
- * @public exported from `@promptbook/core`
14879
- */
14880
- const MODEL_ORDER = {
14881
- /**
14882
- * Top-tier models, e.g. OpenAI, Anthropic,...
14883
- */
14884
- TOP_TIER: 333,
14885
- /**
14886
- * Mid-tier models, e.g. Llama, Mistral, etc.
14887
- */
14888
- NORMAL: 100,
14889
- /**
14890
- * Low-tier models, e.g. Phi, Tiny, etc.
14891
- */
14892
- LOW_TIER: 0,
14893
- };
14894
- /**
14895
- * TODO: Add configuration schema and maybe some documentation link
14896
- * TODO: Maybe constrain LlmToolsConfiguration[number] by generic to ensure that `createConfigurationFromEnv` and `getBoilerplateConfiguration` always create same `packageName` and `className`
14897
- * TODO: [®] DRY Register logic
14898
- */
14899
-
14900
14958
  /**
14901
14959
  * Registration of LLM provider metadata
14902
14960
  *
@@ -14912,7 +14970,7 @@ const _AnthropicClaudeMetadataRegistration = $llmToolsMetadataRegister.register(
14912
14970
  className: 'AnthropicClaudeExecutionTools',
14913
14971
  envVariables: ['ANTHROPIC_CLAUDE_API_KEY'],
14914
14972
  trustLevel: 'CLOSED',
14915
- order: MODEL_ORDER.TOP_TIER,
14973
+ order: MODEL_ORDERS.TOP_TIER,
14916
14974
  getBoilerplateConfiguration() {
14917
14975
  return {
14918
14976
  title: 'Anthropic Claude',
@@ -14957,7 +15015,7 @@ function computeUsage(value) {
14957
15015
  /**
14958
15016
  * List of available Anthropic Claude models with pricing
14959
15017
  *
14960
- * Note: Done at 2024-08-16
15018
+ * Note: Done at 2025-05-06
14961
15019
  *
14962
15020
  * @see https://docs.anthropic.com/en/docs/models-overview
14963
15021
  * @public exported from `@promptbook/anthropic-claude`
@@ -14971,8 +15029,8 @@ const ANTHROPIC_CLAUDE_MODELS = exportJson({
14971
15029
  modelName: 'claude-3-5-sonnet-20240620',
14972
15030
  modelDescription: 'Latest Claude model with great reasoning, coding, and language understanding capabilities. 200K context window. Optimized balance of intelligence and speed.',
14973
15031
  pricing: {
14974
- prompt: computeUsage(`$3.00 / 1M tokens`),
14975
- output: computeUsage(`$15.00 / 1M tokens`),
15032
+ prompt: computeUsage(`$2.50 / 1M tokens`),
15033
+ output: computeUsage(`$12.50 / 1M tokens`),
14976
15034
  },
14977
15035
  },
14978
15036
  {
@@ -14981,8 +15039,8 @@ const ANTHROPIC_CLAUDE_MODELS = exportJson({
14981
15039
  modelName: 'claude-3-opus-20240229',
14982
15040
  modelDescription: 'Most capable Claude model excelling at complex reasoning, coding, and detailed instruction following. 200K context window. Best for sophisticated tasks requiring nuanced understanding.',
14983
15041
  pricing: {
14984
- prompt: computeUsage(`$15.00 / 1M tokens`),
14985
- output: computeUsage(`$75.00 / 1M tokens`),
15042
+ prompt: computeUsage(`$12.00 / 1M tokens`),
15043
+ output: computeUsage(`$60.00 / 1M tokens`),
14986
15044
  },
14987
15045
  },
14988
15046
  {
@@ -15041,8 +15099,8 @@ const ANTHROPIC_CLAUDE_MODELS = exportJson({
15041
15099
  modelName: 'claude-3-7-sonnet-20250219',
15042
15100
  modelDescription: 'Latest generation Claude model with advanced reasoning and language understanding. Enhanced capabilities over 3.5 with improved domain knowledge. 200K context window.',
15043
15101
  pricing: {
15044
- prompt: computeUsage(`$3.00 / 1M tokens`),
15045
- output: computeUsage(`$15.00 / 1M tokens`),
15102
+ prompt: computeUsage(`$2.50 / 1M tokens`),
15103
+ output: computeUsage(`$12.50 / 1M tokens`),
15046
15104
  },
15047
15105
  },
15048
15106
  {
@@ -15089,14 +15147,18 @@ function computeUsageCounts(content) {
15089
15147
  /**
15090
15148
  * Make UncertainNumber
15091
15149
  *
15092
- * @param value
15150
+ * @param value value of the uncertain number, if `NaN` or `undefined`, it will be set to 0 and `isUncertain=true`
15151
+ * @param isUncertain if `true`, the value is uncertain, otherwise depends on the value
15093
15152
  *
15094
15153
  * @private utility for initializating UncertainNumber
15095
15154
  */
15096
- function uncertainNumber(value) {
15155
+ function uncertainNumber(value, isUncertain) {
15097
15156
  if (value === null || value === undefined || Number.isNaN(value)) {
15098
15157
  return UNCERTAIN_ZERO_VALUE;
15099
15158
  }
15159
+ if (isUncertain === true) {
15160
+ return { value, isUncertain };
15161
+ }
15100
15162
  return { value };
15101
15163
  }
15102
15164
 
@@ -15452,7 +15514,7 @@ const _AzureOpenAiMetadataRegistration = $llmToolsMetadataRegister.register({
15452
15514
  className: 'AzureOpenAiExecutionTools',
15453
15515
  envVariables: ['AZUREOPENAI_RESOURCE_NAME', 'AZUREOPENAI_DEPLOYMENT_NAME', 'AZUREOPENAI_API_KEY'],
15454
15516
  trustLevel: 'CLOSED_BUSINESS',
15455
- order: MODEL_ORDER.NORMAL,
15517
+ order: MODEL_ORDERS.NORMAL,
15456
15518
  getBoilerplateConfiguration() {
15457
15519
  return {
15458
15520
  title: 'Azure Open AI',
@@ -15510,7 +15572,7 @@ const _AzureOpenAiMetadataRegistration = $llmToolsMetadataRegister.register({
15510
15572
  /**
15511
15573
  * List of available OpenAI models with pricing
15512
15574
  *
15513
- * Note: Done at 2024-05-20
15575
+ * Note: Done at 2025-05-06
15514
15576
  *
15515
15577
  * @see https://platform.openai.com/docs/models/
15516
15578
  * @see https://openai.com/api/pricing/
@@ -15771,7 +15833,7 @@ const OPENAI_MODELS = exportJson({
15771
15833
  modelName: 'text-embedding-3-large',
15772
15834
  modelDescription: "OpenAI's most capable text embedding model designed for high-quality embeddings for complex similarity tasks and information retrieval.",
15773
15835
  pricing: {
15774
- prompt: computeUsage(`$0.13 / 1M tokens`),
15836
+ prompt: computeUsage(`$0.13 / 1M tokens`),
15775
15837
  // TODO: [🏏] Leverage the batch API @see https://platform.openai.com/docs/guides/batch
15776
15838
  output: 0, // <- Note: [🆖] In Embedding models you dont pay for output
15777
15839
  },
@@ -15864,8 +15926,8 @@ const OPENAI_MODELS = exportJson({
15864
15926
  modelName: 'gpt-4o-mini',
15865
15927
  modelDescription: 'Smaller, more cost-effective version of GPT-4o with good performance across text, vision, and audio tasks at reduced complexity.',
15866
15928
  pricing: {
15867
- prompt: computeUsage(`$3.00 / 1M tokens`),
15868
- output: computeUsage(`$9.00 / 1M tokens`),
15929
+ prompt: computeUsage(`$0.15 / 1M tokens`),
15930
+ output: computeUsage(`$0.60 / 1M tokens`),
15869
15931
  },
15870
15932
  },
15871
15933
  /**/
@@ -16315,7 +16377,7 @@ const _DeepseekMetadataRegistration = $llmToolsMetadataRegister.register({
16315
16377
  className: 'DeepseekExecutionTools',
16316
16378
  envVariables: ['DEEPSEEK_GENERATIVE_AI_API_KEY'],
16317
16379
  trustLevel: 'UNTRUSTED',
16318
- order: MODEL_ORDER.NORMAL,
16380
+ order: MODEL_ORDERS.NORMAL,
16319
16381
  getBoilerplateConfiguration() {
16320
16382
  return {
16321
16383
  title: 'Deepseek',
@@ -16518,7 +16580,7 @@ function createExecutionToolsFromVercelProvider(options) {
16518
16580
  /**
16519
16581
  * List of available Deepseek models with descriptions
16520
16582
  *
16521
- * Note: Done at 2025-04-22
16583
+ * Note: Done at 2025-05-06
16522
16584
  *
16523
16585
  * @see https://www.deepseek.com/models
16524
16586
  * @public exported from `@promptbook/deepseek`
@@ -16532,8 +16594,8 @@ const DEEPSEEK_MODELS = exportJson({
16532
16594
  modelName: 'deepseek-chat',
16533
16595
  modelDescription: 'General-purpose language model with strong performance across conversation, reasoning, and content generation. 128K context window with excellent instruction following capabilities.',
16534
16596
  pricing: {
16535
- prompt: computeUsage(`$1.00 / 1M tokens`),
16536
- output: computeUsage(`$2.00 / 1M tokens`),
16597
+ prompt: computeUsage(`$0.80 / 1M tokens`),
16598
+ output: computeUsage(`$1.60 / 1M tokens`),
16537
16599
  },
16538
16600
  },
16539
16601
  {
@@ -16542,8 +16604,8 @@ const DEEPSEEK_MODELS = exportJson({
16542
16604
  modelName: 'deepseek-reasoner',
16543
16605
  modelDescription: 'Specialized model focused on complex reasoning tasks like mathematical problem-solving and logical analysis. Enhanced step-by-step reasoning with explicit chain-of-thought processes. 128K context window.',
16544
16606
  pricing: {
16545
- prompt: computeUsage(`$4.00 / 1M tokens`),
16546
- output: computeUsage(`$8.00 / 1M tokens`),
16607
+ prompt: computeUsage(`$3.50 / 1M tokens`),
16608
+ output: computeUsage(`$7.00 / 1M tokens`),
16547
16609
  },
16548
16610
  },
16549
16611
  {
@@ -16638,7 +16700,7 @@ const _GoogleMetadataRegistration = $llmToolsMetadataRegister.register({
16638
16700
  className: 'GoogleExecutionTools',
16639
16701
  envVariables: ['GOOGLE_GENERATIVE_AI_API_KEY'],
16640
16702
  trustLevel: 'CLOSED',
16641
- order: MODEL_ORDER.NORMAL,
16703
+ order: MODEL_ORDERS.NORMAL,
16642
16704
  getBoilerplateConfiguration() {
16643
16705
  return {
16644
16706
  title: 'Google Gemini',
@@ -16677,7 +16739,7 @@ const _GoogleMetadataRegistration = $llmToolsMetadataRegister.register({
16677
16739
  /**
16678
16740
  * List of available Google models with descriptions
16679
16741
  *
16680
- * Note: Done at 2025-04-22
16742
+ * Note: Done at 2025-05-06
16681
16743
  *
16682
16744
  * @see https://ai.google.dev/models/gemini
16683
16745
  * @public exported from `@promptbook/google`
@@ -16691,8 +16753,8 @@ const GOOGLE_MODELS = exportJson({
16691
16753
  modelName: 'gemini-2.5-pro-preview-03-25',
16692
16754
  modelDescription: 'Latest advanced multimodal model with exceptional reasoning, tool use, and instruction following. 1M token context window with improved vision capabilities for complex visual tasks.',
16693
16755
  pricing: {
16694
- prompt: computeUsage(`$7.00 / 1M tokens`),
16695
- output: computeUsage(`$21.00 / 1M tokens`),
16756
+ prompt: computeUsage(`$8.00 / 1M tokens`),
16757
+ output: computeUsage(`$24.00 / 1M tokens`),
16696
16758
  },
16697
16759
  },
16698
16760
  {
@@ -16731,8 +16793,8 @@ const GOOGLE_MODELS = exportJson({
16731
16793
  modelName: 'gemini-1.5-flash',
16732
16794
  modelDescription: 'Efficient model balancing speed and quality for general-purpose applications. 1M token context window with good multimodal capabilities and quick response times.',
16733
16795
  pricing: {
16734
- prompt: computeUsage(`$0.35 / 1M tokens`),
16735
- output: computeUsage(`$1.05 / 1M tokens`),
16796
+ prompt: computeUsage(`$0.25 / 1M tokens`),
16797
+ output: computeUsage(`$0.75 / 1M tokens`),
16736
16798
  },
16737
16799
  },
16738
16800
  {
@@ -16799,8 +16861,8 @@ const GOOGLE_MODELS = exportJson({
16799
16861
  modelName: 'gemini-1.5-pro',
16800
16862
  modelDescription: 'Flagship multimodal model with strong performance across text, code, vision, and audio tasks. 1M token context window with excellent reasoning capabilities.',
16801
16863
  pricing: {
16802
- prompt: computeUsage(`$7.00 / 1M tokens`),
16803
- output: computeUsage(`$21.00 / 1M tokens`),
16864
+ prompt: computeUsage(`$6.00 / 1M tokens`),
16865
+ output: computeUsage(`$18.00 / 1M tokens`),
16804
16866
  },
16805
16867
  },
16806
16868
  {
@@ -16903,7 +16965,7 @@ const _OpenAiMetadataRegistration = $llmToolsMetadataRegister.register({
16903
16965
  className: 'OpenAiExecutionTools',
16904
16966
  envVariables: ['OPENAI_API_KEY'],
16905
16967
  trustLevel: 'CLOSED',
16906
- order: MODEL_ORDER.TOP_TIER,
16968
+ order: MODEL_ORDERS.TOP_TIER,
16907
16969
  getBoilerplateConfiguration() {
16908
16970
  return {
16909
16971
  title: 'Open AI',
@@ -16931,9 +16993,9 @@ const _OpenAiMetadataRegistration = $llmToolsMetadataRegister.register({
16931
16993
  },
16932
16994
  });
16933
16995
  /**
16934
- * @@@ registration1 of default configuration for Open AI
16996
+ * Registration of the OpenAI Assistant metadata
16935
16997
  *
16936
- * Note: [🏐] Configurations registrations are done in @@@ BUT constructor @@@
16998
+ * Note: [🏐] Configurations registrations are done in the metadata registration section, but the constructor registration is handled separately.
16937
16999
  *
16938
17000
  * @public exported from `@promptbook/core`
16939
17001
  * @public exported from `@promptbook/wizzard`
@@ -16946,7 +17008,7 @@ const _OpenAiAssistantMetadataRegistration = $llmToolsMetadataRegister.register(
16946
17008
  envVariables: null,
16947
17009
  // <- TODO: ['OPENAI_API_KEY', 'OPENAI_ASSISTANT_ID']
16948
17010
  trustLevel: 'CLOSED',
16949
- order: MODEL_ORDER.NORMAL,
17011
+ order: MODEL_ORDERS.NORMAL,
16950
17012
  getBoilerplateConfiguration() {
16951
17013
  return {
16952
17014
  title: 'Open AI Assistant',
@@ -17003,10 +17065,14 @@ resultContent, rawResponse) {
17003
17065
  }
17004
17066
  const inputTokens = rawResponse.usage.prompt_tokens;
17005
17067
  const outputTokens = ((_b = rawResponse.usage) === null || _b === void 0 ? void 0 : _b.completion_tokens) || 0;
17068
+ let isUncertain = false;
17006
17069
  let modelInfo = OPENAI_MODELS.find((model) => model.modelName === rawResponse.model);
17007
17070
  if (modelInfo === undefined) {
17008
- // Note: Model is not in the list of known models, maybe just a different version of the same model
17009
- modelInfo = OPENAI_MODELS.find((model) => model.modelName.startsWith(rawResponse.model || SALT_NONCE));
17071
+ // Note: Model is not in the list of known models, fallback to the family of the models and mark price as uncertain
17072
+ modelInfo = OPENAI_MODELS.find((model) => (rawResponse.model || SALT_NONCE).startsWith(model.modelName));
17073
+ if (modelInfo !== undefined) {
17074
+ isUncertain = true;
17075
+ }
17010
17076
  }
17011
17077
  console.log('!!! computeOpenAiUsage', {
17012
17078
  inputTokens,
@@ -17022,7 +17088,7 @@ resultContent, rawResponse) {
17022
17088
  price = uncertainNumber();
17023
17089
  }
17024
17090
  else {
17025
- price = uncertainNumber(inputTokens * modelInfo.pricing.prompt + outputTokens * modelInfo.pricing.output);
17091
+ price = uncertainNumber(inputTokens * modelInfo.pricing.prompt + outputTokens * modelInfo.pricing.output, isUncertain);
17026
17092
  }
17027
17093
  return {
17028
17094
  price,
@@ -17611,9 +17677,9 @@ const createOpenAiExecutionTools = Object.assign((options) => {
17611
17677
  */
17612
17678
  const _OpenAiRegistration = $llmToolsRegister.register(createOpenAiExecutionTools);
17613
17679
  /**
17614
- * @@@ registration2
17680
+ * Registration of the OpenAI Assistant provider
17615
17681
  *
17616
- * Note: [🏐] Configurations registrations are done in @@@ BUT constructor @@@
17682
+ * Note: [🏐] Configurations registrations are done in register-constructor.ts BUT constructor register-constructor.ts
17617
17683
  *
17618
17684
  * @public exported from `@promptbook/openai`
17619
17685
  * @public exported from `@promptbook/wizzard`