@promptbook/wizard 0.101.0-9 → 0.102.0-0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (89) hide show
  1. package/esm/index.es.js +423 -250
  2. package/esm/index.es.js.map +1 -1
  3. package/esm/typings/src/_packages/components.index.d.ts +30 -0
  4. package/esm/typings/src/_packages/core.index.d.ts +12 -0
  5. package/esm/typings/src/_packages/types.index.d.ts +12 -0
  6. package/esm/typings/src/book-2.0/agent-source/AgentBasicInformation.d.ts +11 -4
  7. package/esm/typings/src/book-2.0/agent-source/AgentModelRequirements.d.ts +3 -0
  8. package/esm/typings/src/book-2.0/agent-source/createAgentModelRequirements.d.ts +4 -22
  9. package/esm/typings/src/book-2.0/agent-source/createAgentModelRequirementsWithCommitments.d.ts +1 -26
  10. package/esm/typings/src/book-2.0/commitments/ACTION/ACTION.d.ts +0 -12
  11. package/esm/typings/src/book-2.0/commitments/DELETE/DELETE.d.ts +0 -24
  12. package/esm/typings/src/book-2.0/commitments/FORMAT/FORMAT.d.ts +0 -12
  13. package/esm/typings/src/book-2.0/commitments/GOAL/GOAL.d.ts +0 -12
  14. package/esm/typings/src/book-2.0/commitments/KNOWLEDGE/KNOWLEDGE.d.ts +0 -6
  15. package/esm/typings/src/book-2.0/commitments/MEMORY/MEMORY.d.ts +0 -12
  16. package/esm/typings/src/book-2.0/commitments/MESSAGE/MESSAGE.d.ts +0 -12
  17. package/esm/typings/src/book-2.0/commitments/META/META.d.ts +0 -6
  18. package/esm/typings/src/book-2.0/commitments/META_IMAGE/META_IMAGE.d.ts +0 -6
  19. package/esm/typings/src/book-2.0/commitments/META_LINK/META_LINK.d.ts +0 -6
  20. package/esm/typings/src/book-2.0/commitments/MODEL/MODEL.d.ts +23 -14
  21. package/esm/typings/src/book-2.0/commitments/NOTE/NOTE.d.ts +2 -14
  22. package/esm/typings/src/book-2.0/commitments/PERSONA/PERSONA.d.ts +0 -12
  23. package/esm/typings/src/book-2.0/commitments/RULE/RULE.d.ts +0 -12
  24. package/esm/typings/src/book-2.0/commitments/SAMPLE/SAMPLE.d.ts +0 -12
  25. package/esm/typings/src/book-2.0/commitments/SCENARIO/SCENARIO.d.ts +0 -12
  26. package/esm/typings/src/book-2.0/commitments/STYLE/STYLE.d.ts +0 -12
  27. package/esm/typings/src/book-2.0/commitments/_base/createEmptyAgentModelRequirements.d.ts +1 -1
  28. package/esm/typings/src/book-2.0/commitments/index.d.ts +1 -1
  29. package/esm/typings/src/book-2.0/utils/generatePlaceholderAgentProfileImageUrl.d.ts +3 -0
  30. package/esm/typings/src/book-components/AvatarProfile/AvatarChip/AvatarChip.d.ts +5 -2
  31. package/esm/typings/src/book-components/AvatarProfile/AvatarProfile/AvatarProfile.d.ts +3 -0
  32. package/esm/typings/src/book-components/AvatarProfile/AvatarProfile/MockedChat.d.ts +18 -1
  33. package/esm/typings/src/book-components/BookEditor/BookEditor.d.ts +8 -0
  34. package/esm/typings/src/book-components/BookEditor/BookEditorInner.d.ts +2 -15
  35. package/esm/typings/src/book-components/Chat/Chat/Chat.d.ts +5 -1
  36. package/esm/typings/src/book-components/Chat/Chat/ChatProps.d.ts +9 -0
  37. package/esm/typings/src/book-components/Chat/LlmChat/LlmChatProps.d.ts +13 -0
  38. package/esm/typings/src/book-components/Chat/hooks/index.d.ts +2 -0
  39. package/esm/typings/src/book-components/Chat/hooks/useChatAutoScroll.d.ts +41 -0
  40. package/esm/typings/src/book-components/Chat/hooks/useSendMessageToLlmChat.d.ts +44 -0
  41. package/esm/typings/src/book-components/Chat/utils/parseMessageButtons.d.ts +22 -0
  42. package/esm/typings/src/book-components/Chat/utils/savePlugins.d.ts +55 -0
  43. package/esm/typings/src/book-components/icons/PauseIcon.d.ts +8 -0
  44. package/esm/typings/src/book-components/icons/PlayIcon.d.ts +8 -0
  45. package/esm/typings/src/execution/PromptResult.d.ts +2 -4
  46. package/esm/typings/src/execution/createPipelineExecutor/40-executeAttempts.d.ts +1 -2
  47. package/esm/typings/src/execution/createPipelineExecutor/getKnowledgeForTask.d.ts +1 -3
  48. package/esm/typings/src/formats/csv/CsvFormatError.d.ts +1 -1
  49. package/esm/typings/src/llm-providers/_common/register/$provideLlmToolsConfigurationFromEnv.d.ts +1 -1
  50. package/esm/typings/src/llm-providers/_common/register/$provideLlmToolsForTestingAndScriptsAndPlayground.d.ts +1 -1
  51. package/esm/typings/src/llm-providers/_common/register/$provideLlmToolsForWizardOrCli.d.ts +1 -2
  52. package/esm/typings/src/llm-providers/_common/register/createLlmToolsFromConfiguration.d.ts +8 -2
  53. package/esm/typings/src/llm-providers/_common/utils/removeUnsupportedModelRequirements.d.ts +25 -0
  54. package/esm/typings/src/llm-providers/_multiple/MultipleLlmExecutionTools.d.ts +7 -18
  55. package/esm/typings/src/llm-providers/_multiple/getSingleLlmExecutionTools.d.ts +11 -0
  56. package/esm/typings/src/llm-providers/_multiple/joinLlmExecutionTools.d.ts +2 -1
  57. package/esm/typings/src/llm-providers/agent/AgentLlmExecutionTools.d.ts +58 -0
  58. package/esm/typings/src/llm-providers/agent/createAgentLlmExecutionTools.d.ts +29 -0
  59. package/esm/typings/src/llm-providers/agent/playground/playground.d.ts +8 -0
  60. package/esm/typings/src/llm-providers/agent/register-configuration.d.ts +11 -0
  61. package/esm/typings/src/llm-providers/agent/register-constructor.d.ts +13 -0
  62. package/esm/typings/src/llm-providers/anthropic-claude/AnthropicClaudeExecutionTools.d.ts +3 -8
  63. package/esm/typings/src/llm-providers/azure-openai/AzureOpenAiExecutionTools.d.ts +4 -5
  64. package/esm/typings/src/llm-providers/mocked/$fakeTextToExpectations.d.ts +1 -0
  65. package/esm/typings/src/llm-providers/mocked/MockedEchoLlmExecutionTools.d.ts +4 -10
  66. package/esm/typings/src/llm-providers/mocked/MockedFackedLlmExecutionTools.d.ts +4 -6
  67. package/esm/typings/src/llm-providers/ollama/OllamaExecutionTools.d.ts +3 -3
  68. package/esm/typings/src/llm-providers/openai/OpenAiCompatibleExecutionTools.d.ts +16 -8
  69. package/esm/typings/src/llm-providers/openai/OpenAiExecutionTools.d.ts +3 -8
  70. package/esm/typings/src/llm-providers/remote/RemoteLlmExecutionTools.d.ts +5 -14
  71. package/esm/typings/src/personas/preparePersona.d.ts +1 -0
  72. package/esm/typings/src/remote-server/openapi-types.d.ts +31 -31
  73. package/esm/typings/src/scrapers/markdown/MarkdownScraper.d.ts +1 -2
  74. package/esm/typings/src/types/ModelRequirements.d.ts +2 -4
  75. package/esm/typings/src/utils/color/utils/colorSaturation.d.ts +1 -1
  76. package/esm/typings/src/utils/editable/edit-pipeline-string/addPipelineCommand.d.ts +1 -1
  77. package/esm/typings/src/utils/markdown/humanizeAiText.d.ts +0 -1
  78. package/esm/typings/src/utils/markdown/promptbookifyAiText.d.ts +2 -2
  79. package/esm/typings/src/version.d.ts +1 -1
  80. package/package.json +2 -2
  81. package/umd/index.umd.js +423 -250
  82. package/umd/index.umd.js.map +1 -1
  83. package/esm/typings/src/book-2.0/utils/extractAgentMetadata.d.ts +0 -17
  84. package/esm/typings/src/book-2.0/utils/extractProfileImageFromSystemMessage.d.ts +0 -12
  85. package/esm/typings/src/book-components/Chat/examples/ChatMarkdownDemo.d.ts +0 -16
  86. package/esm/typings/src/expectations/drafts/isDomainNameFree.d.ts +0 -10
  87. package/esm/typings/src/expectations/drafts/isGithubNameFree.d.ts +0 -10
  88. package/esm/typings/src/llm-providers/_common/profiles/llmProviderProfiles.d.ts +0 -81
  89. /package/esm/typings/src/llm-providers/_common/{profiles/test/llmProviderProfiles.test.d.ts → utils/removeUnsupportedModelRequirements.test.d.ts} +0 -0
package/esm/index.es.js CHANGED
@@ -36,7 +36,7 @@ const BOOK_LANGUAGE_VERSION = '1.0.0';
36
36
  * @generated
37
37
  * @see https://github.com/webgptorg/promptbook
38
38
  */
39
- const PROMPTBOOK_ENGINE_VERSION = '0.101.0-9';
39
+ const PROMPTBOOK_ENGINE_VERSION = '0.102.0-0';
40
40
  /**
41
41
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
42
42
  * Note: [💞] Ignore a discrepancy between file name and entity name
@@ -1337,76 +1337,6 @@ function deserializeError(error) {
1337
1337
  return deserializedError;
1338
1338
  }
1339
1339
 
1340
- /**
1341
- * Predefined profiles for LLM providers to maintain consistency across the application
1342
- * These profiles represent each provider as a virtual persona in chat interfaces
1343
- *
1344
- * @private !!!!
1345
- */
1346
- const LLM_PROVIDER_PROFILES = {
1347
- OPENAI: {
1348
- name: 'OPENAI',
1349
- fullname: 'OpenAI GPT',
1350
- color: '#10a37f', // OpenAI's signature green
1351
- // Note: avatarSrc could be added when we have provider logos available
1352
- },
1353
- ANTHROPIC: {
1354
- name: 'ANTHROPIC',
1355
- fullname: 'Anthropic Claude',
1356
- color: '#d97706', // Anthropic's orange/amber color
1357
- },
1358
- AZURE_OPENAI: {
1359
- name: 'AZURE_OPENAI',
1360
- fullname: 'Azure OpenAI',
1361
- color: '#0078d4', // Microsoft Azure blue
1362
- },
1363
- GOOGLE: {
1364
- name: 'GOOGLE',
1365
- fullname: 'Google Gemini',
1366
- color: '#4285f4', // Google blue
1367
- },
1368
- DEEPSEEK: {
1369
- name: 'DEEPSEEK',
1370
- fullname: 'DeepSeek',
1371
- color: '#7c3aed', // Purple color for DeepSeek
1372
- },
1373
- OLLAMA: {
1374
- name: 'OLLAMA',
1375
- fullname: 'Ollama',
1376
- color: '#059669', // Emerald green for local models
1377
- },
1378
- REMOTE: {
1379
- name: 'REMOTE',
1380
- fullname: 'Remote Server',
1381
- color: '#6b7280', // Gray for remote/proxy connections
1382
- },
1383
- MOCKED_ECHO: {
1384
- name: 'MOCKED_ECHO',
1385
- fullname: 'Echo (Test)',
1386
- color: '#8b5cf6', // Purple for test/mock tools
1387
- },
1388
- MOCKED_FAKE: {
1389
- name: 'MOCKED_FAKE',
1390
- fullname: 'Fake LLM (Test)',
1391
- color: '#ec4899', // Pink for fake/test tools
1392
- },
1393
- VERCEL: {
1394
- name: 'VERCEL',
1395
- fullname: 'Vercel AI',
1396
- color: '#000000', // Vercel's black
1397
- },
1398
- MULTIPLE: {
1399
- name: 'MULTIPLE',
1400
- fullname: 'Multiple Providers',
1401
- color: '#6366f1', // Indigo for combined/multiple providers
1402
- },
1403
- };
1404
- /**
1405
- * TODO: Refactor this - each profile must be alongside the provider definition
1406
- * TODO: [🕛] Unite `AgentBasicInformation`, `ChatParticipant`, `LlmExecutionTools` + `LlmToolsMetadata`
1407
- * Note: [💞] Ignore a discrepancy between file name and entity name
1408
- */
1409
-
1410
1340
  /**
1411
1341
  * Tests if given string is valid URL.
1412
1342
  *
@@ -1500,6 +1430,14 @@ async function createRemoteClient(options) {
1500
1430
  function keepUnused(...valuesToKeep) {
1501
1431
  }
1502
1432
 
1433
+ /**
1434
+ * Profile for Remote provider
1435
+ */
1436
+ const REMOTE_PROVIDER_PROFILE = {
1437
+ name: 'REMOTE',
1438
+ fullname: 'Remote Server',
1439
+ color: '#6b7280',
1440
+ };
1503
1441
  /**
1504
1442
  * Remote server is a proxy server that uses its execution tools internally and exposes the executor interface externally.
1505
1443
  *
@@ -1522,7 +1460,7 @@ class RemoteLlmExecutionTools {
1522
1460
  return `Models from Promptbook remote server ${this.options.remoteServerUrl}`;
1523
1461
  }
1524
1462
  get profile() {
1525
- return LLM_PROVIDER_PROFILES.REMOTE;
1463
+ return REMOTE_PROVIDER_PROFILE;
1526
1464
  }
1527
1465
  /**
1528
1466
  * Check the configuration of all execution tools
@@ -2495,6 +2433,14 @@ resultContent, rawResponse) {
2495
2433
  * TODO: [🤝] DRY Maybe some common abstraction between `computeOpenAiUsage` and `computeAnthropicClaudeUsage`
2496
2434
  */
2497
2435
 
2436
+ /**
2437
+ * Profile for Anthropic Claude provider
2438
+ */
2439
+ const ANTHROPIC_PROVIDER_PROFILE = {
2440
+ name: 'ANTHROPIC',
2441
+ fullname: 'Anthropic Claude',
2442
+ color: '#d97706',
2443
+ };
2498
2444
  /**
2499
2445
  * Execution Tools for calling Anthropic Claude API.
2500
2446
  *
@@ -2523,7 +2469,7 @@ class AnthropicClaudeExecutionTools {
2523
2469
  return 'Use all models provided by Anthropic Claude';
2524
2470
  }
2525
2471
  get profile() {
2526
- return LLM_PROVIDER_PROFILES.ANTHROPIC;
2472
+ return ANTHROPIC_PROVIDER_PROFILE;
2527
2473
  }
2528
2474
  async getClient() {
2529
2475
  if (this.client === null) {
@@ -2814,7 +2760,7 @@ const OPENAI_MODELS = exportJson({
2814
2760
  modelVariant: 'CHAT',
2815
2761
  modelTitle: 'gpt-5-mini',
2816
2762
  modelName: 'gpt-5-mini',
2817
- modelDescription: "A faster, cost-efficient version of GPT-5 for well-defined tasks with 200K context window. Maintains core GPT-5 capabilities while offering 5x faster inference and significantly lower costs. Features enhanced instruction following and reduced latency for production applications requiring quick responses with high quality.",
2763
+ modelDescription: 'A faster, cost-efficient version of GPT-5 for well-defined tasks with 200K context window. Maintains core GPT-5 capabilities while offering 5x faster inference and significantly lower costs. Features enhanced instruction following and reduced latency for production applications requiring quick responses with high quality.',
2818
2764
  pricing: {
2819
2765
  prompt: pricing(`$0.25 / 1M tokens`),
2820
2766
  output: pricing(`$2.00 / 1M tokens`),
@@ -2826,7 +2772,7 @@ const OPENAI_MODELS = exportJson({
2826
2772
  modelVariant: 'CHAT',
2827
2773
  modelTitle: 'gpt-5-nano',
2828
2774
  modelName: 'gpt-5-nano',
2829
- modelDescription: "The fastest, most cost-efficient version of GPT-5 with 200K context window. Optimized for summarization, classification, and simple reasoning tasks. Features 10x faster inference than base GPT-5 while maintaining good quality for straightforward applications. Ideal for high-volume, cost-sensitive deployments.",
2775
+ modelDescription: 'The fastest, most cost-efficient version of GPT-5 with 200K context window. Optimized for summarization, classification, and simple reasoning tasks. Features 10x faster inference than base GPT-5 while maintaining good quality for straightforward applications. Ideal for high-volume, cost-sensitive deployments.',
2830
2776
  pricing: {
2831
2777
  prompt: pricing(`$0.05 / 1M tokens`),
2832
2778
  output: pricing(`$0.40 / 1M tokens`),
@@ -2838,7 +2784,7 @@ const OPENAI_MODELS = exportJson({
2838
2784
  modelVariant: 'CHAT',
2839
2785
  modelTitle: 'gpt-4.1',
2840
2786
  modelName: 'gpt-4.1',
2841
- modelDescription: "Smartest non-reasoning model with 128K context window. Enhanced version of GPT-4 with improved instruction following, better factual accuracy, and reduced hallucinations. Features advanced function calling capabilities and superior performance on coding tasks. Ideal for applications requiring high intelligence without reasoning overhead.",
2787
+ modelDescription: 'Smartest non-reasoning model with 128K context window. Enhanced version of GPT-4 with improved instruction following, better factual accuracy, and reduced hallucinations. Features advanced function calling capabilities and superior performance on coding tasks. Ideal for applications requiring high intelligence without reasoning overhead.',
2842
2788
  pricing: {
2843
2789
  prompt: pricing(`$3.00 / 1M tokens`),
2844
2790
  output: pricing(`$12.00 / 1M tokens`),
@@ -2850,7 +2796,7 @@ const OPENAI_MODELS = exportJson({
2850
2796
  modelVariant: 'CHAT',
2851
2797
  modelTitle: 'gpt-4.1-mini',
2852
2798
  modelName: 'gpt-4.1-mini',
2853
- modelDescription: "Smaller, faster version of GPT-4.1 with 128K context window. Balances intelligence and efficiency with 3x faster inference than base GPT-4.1. Maintains strong capabilities across text generation, reasoning, and coding while offering better cost-performance ratio for most applications.",
2799
+ modelDescription: 'Smaller, faster version of GPT-4.1 with 128K context window. Balances intelligence and efficiency with 3x faster inference than base GPT-4.1. Maintains strong capabilities across text generation, reasoning, and coding while offering better cost-performance ratio for most applications.',
2854
2800
  pricing: {
2855
2801
  prompt: pricing(`$0.80 / 1M tokens`),
2856
2802
  output: pricing(`$3.20 / 1M tokens`),
@@ -2862,7 +2808,7 @@ const OPENAI_MODELS = exportJson({
2862
2808
  modelVariant: 'CHAT',
2863
2809
  modelTitle: 'gpt-4.1-nano',
2864
2810
  modelName: 'gpt-4.1-nano',
2865
- modelDescription: "Fastest, most cost-efficient version of GPT-4.1 with 128K context window. Optimized for high-throughput applications requiring good quality at minimal cost. Features 5x faster inference than GPT-4.1 while maintaining adequate performance for most general-purpose tasks.",
2811
+ modelDescription: 'Fastest, most cost-efficient version of GPT-4.1 with 128K context window. Optimized for high-throughput applications requiring good quality at minimal cost. Features 5x faster inference than GPT-4.1 while maintaining adequate performance for most general-purpose tasks.',
2866
2812
  pricing: {
2867
2813
  prompt: pricing(`$0.20 / 1M tokens`),
2868
2814
  output: pricing(`$0.80 / 1M tokens`),
@@ -2874,7 +2820,7 @@ const OPENAI_MODELS = exportJson({
2874
2820
  modelVariant: 'CHAT',
2875
2821
  modelTitle: 'o3',
2876
2822
  modelName: 'o3',
2877
- modelDescription: "Advanced reasoning model with 128K context window specializing in complex logical, mathematical, and analytical tasks. Successor to o1 with enhanced step-by-step problem-solving capabilities and superior performance on STEM-focused problems. Ideal for professional applications requiring deep analytical thinking and precise reasoning.",
2823
+ modelDescription: 'Advanced reasoning model with 128K context window specializing in complex logical, mathematical, and analytical tasks. Successor to o1 with enhanced step-by-step problem-solving capabilities and superior performance on STEM-focused problems. Ideal for professional applications requiring deep analytical thinking and precise reasoning.',
2878
2824
  pricing: {
2879
2825
  prompt: pricing(`$15.00 / 1M tokens`),
2880
2826
  output: pricing(`$60.00 / 1M tokens`),
@@ -2886,7 +2832,7 @@ const OPENAI_MODELS = exportJson({
2886
2832
  modelVariant: 'CHAT',
2887
2833
  modelTitle: 'o3-pro',
2888
2834
  modelName: 'o3-pro',
2889
- modelDescription: "Enhanced version of o3 with more compute allocated for better responses on the most challenging problems. Features extended reasoning time and improved accuracy on complex analytical tasks. Designed for applications where maximum reasoning quality is more important than response speed.",
2835
+ modelDescription: 'Enhanced version of o3 with more compute allocated for better responses on the most challenging problems. Features extended reasoning time and improved accuracy on complex analytical tasks. Designed for applications where maximum reasoning quality is more important than response speed.',
2890
2836
  pricing: {
2891
2837
  prompt: pricing(`$30.00 / 1M tokens`),
2892
2838
  output: pricing(`$120.00 / 1M tokens`),
@@ -2898,7 +2844,7 @@ const OPENAI_MODELS = exportJson({
2898
2844
  modelVariant: 'CHAT',
2899
2845
  modelTitle: 'o4-mini',
2900
2846
  modelName: 'o4-mini',
2901
- modelDescription: "Fast, cost-efficient reasoning model with 128K context window. Successor to o1-mini with improved analytical capabilities while maintaining speed advantages. Features enhanced mathematical reasoning and logical problem-solving at significantly lower cost than full reasoning models.",
2847
+ modelDescription: 'Fast, cost-efficient reasoning model with 128K context window. Successor to o1-mini with improved analytical capabilities while maintaining speed advantages. Features enhanced mathematical reasoning and logical problem-solving at significantly lower cost than full reasoning models.',
2902
2848
  pricing: {
2903
2849
  prompt: pricing(`$4.00 / 1M tokens`),
2904
2850
  output: pricing(`$16.00 / 1M tokens`),
@@ -2910,7 +2856,7 @@ const OPENAI_MODELS = exportJson({
2910
2856
  modelVariant: 'CHAT',
2911
2857
  modelTitle: 'o3-deep-research',
2912
2858
  modelName: 'o3-deep-research',
2913
- modelDescription: "Most powerful deep research model with 128K context window. Specialized for comprehensive research tasks, literature analysis, and complex information synthesis. Features advanced citation capabilities and enhanced factual accuracy for academic and professional research applications.",
2859
+ modelDescription: 'Most powerful deep research model with 128K context window. Specialized for comprehensive research tasks, literature analysis, and complex information synthesis. Features advanced citation capabilities and enhanced factual accuracy for academic and professional research applications.',
2914
2860
  pricing: {
2915
2861
  prompt: pricing(`$25.00 / 1M tokens`),
2916
2862
  output: pricing(`$100.00 / 1M tokens`),
@@ -2922,7 +2868,7 @@ const OPENAI_MODELS = exportJson({
2922
2868
  modelVariant: 'CHAT',
2923
2869
  modelTitle: 'o4-mini-deep-research',
2924
2870
  modelName: 'o4-mini-deep-research',
2925
- modelDescription: "Faster, more affordable deep research model with 128K context window. Balances research capabilities with cost efficiency, offering good performance on literature review, fact-checking, and information synthesis tasks at a more accessible price point.",
2871
+ modelDescription: 'Faster, more affordable deep research model with 128K context window. Balances research capabilities with cost efficiency, offering good performance on literature review, fact-checking, and information synthesis tasks at a more accessible price point.',
2926
2872
  pricing: {
2927
2873
  prompt: pricing(`$12.00 / 1M tokens`),
2928
2874
  output: pricing(`$48.00 / 1M tokens`),
@@ -3378,6 +3324,14 @@ const OPENAI_MODELS = exportJson({
3378
3324
  * Note: [💞] Ignore a discrepancy between file name and entity name
3379
3325
  */
3380
3326
 
3327
+ /**
3328
+ * Profile for Azure OpenAI provider
3329
+ */
3330
+ const AZURE_OPENAI_PROVIDER_PROFILE = {
3331
+ name: 'AZURE_OPENAI',
3332
+ fullname: 'Azure OpenAI',
3333
+ color: '#0078d4',
3334
+ };
3381
3335
  /**
3382
3336
  * Execution Tools for calling Azure OpenAI API.
3383
3337
  *
@@ -3406,6 +3360,9 @@ class AzureOpenAiExecutionTools {
3406
3360
  get description() {
3407
3361
  return 'Use all models trained by OpenAI provided by Azure';
3408
3362
  }
3363
+ get profile() {
3364
+ return AZURE_OPENAI_PROVIDER_PROFILE;
3365
+ }
3409
3366
  async getClient() {
3410
3367
  if (this.client === null) {
3411
3368
  this.client = new OpenAIClient(`https://${this.options.resourceName}.openai.azure.com/`, new AzureKeyCredential(this.options.apiKey));
@@ -3803,6 +3760,14 @@ function asSerializable(value) {
3803
3760
  }
3804
3761
  }
3805
3762
 
3763
+ /**
3764
+ * Profile for Vercel AI adapter
3765
+ */
3766
+ const VERCEL_PROVIDER_PROFILE = {
3767
+ name: 'VERCEL',
3768
+ fullname: 'Vercel AI',
3769
+ color: '#000000',
3770
+ };
3806
3771
  /**
3807
3772
  * Adapter which creates Promptbook execution tools from Vercel provider
3808
3773
  *
@@ -3825,6 +3790,7 @@ function createExecutionToolsFromVercelProvider(options) {
3825
3790
  return {
3826
3791
  title,
3827
3792
  description,
3793
+ profile: VERCEL_PROVIDER_PROFILE,
3828
3794
  checkConfiguration() {
3829
3795
  // Note: There is no way how to check configuration of Vercel provider
3830
3796
  return Promise.resolve();
@@ -4106,6 +4072,14 @@ const DEEPSEEK_MODELS = exportJson({
4106
4072
  * Note: [💞] Ignore a discrepancy between file name and entity name
4107
4073
  */
4108
4074
 
4075
+ /**
4076
+ * Profile for Deepseek provider
4077
+ */
4078
+ const DEEPSEEK_PROVIDER_PROFILE = {
4079
+ name: 'DEEPSEEK',
4080
+ fullname: 'DeepSeek',
4081
+ color: '#7c3aed',
4082
+ };
4109
4083
  /**
4110
4084
  * Execution Tools for calling Deepseek API.
4111
4085
  *
@@ -4123,13 +4097,17 @@ const createDeepseekExecutionTools = Object.assign((options) => {
4123
4097
  ...options,
4124
4098
  // apiKey: process.env.DEEPSEEK_GENERATIVE_AI_API_KEY,
4125
4099
  });
4126
- return createExecutionToolsFromVercelProvider({
4100
+ const baseTools = createExecutionToolsFromVercelProvider({
4127
4101
  title: 'Deepseek',
4128
4102
  description: 'Implementation of Deepseek models',
4129
4103
  vercelProvider: deepseekVercelProvider,
4130
4104
  availableModels: DEEPSEEK_MODELS,
4131
4105
  ...options,
4132
4106
  });
4107
+ return {
4108
+ ...baseTools,
4109
+ profile: DEEPSEEK_PROVIDER_PROFILE,
4110
+ };
4133
4111
  }, {
4134
4112
  packageName: '@promptbook/deepseek',
4135
4113
  className: 'DeepseekExecutionTools',
@@ -4431,6 +4409,14 @@ const GOOGLE_MODELS = exportJson({
4431
4409
  * Note: [💞] Ignore a discrepancy between file name and entity name
4432
4410
  */
4433
4411
 
4412
+ /**
4413
+ * Profile for Google Gemini provider
4414
+ */
4415
+ const GOOGLE_PROVIDER_PROFILE = {
4416
+ name: 'GOOGLE',
4417
+ fullname: 'Google Gemini',
4418
+ color: '#4285f4',
4419
+ };
4434
4420
  /**
4435
4421
  * Execution Tools for calling Google Gemini API.
4436
4422
  *
@@ -4448,13 +4434,17 @@ const createGoogleExecutionTools = Object.assign((options) => {
4448
4434
  ...options,
4449
4435
  /// apiKey: process.env.GOOGLE_GENERATIVE_AI_API_KEY,
4450
4436
  });
4451
- return createExecutionToolsFromVercelProvider({
4437
+ const baseTools = createExecutionToolsFromVercelProvider({
4452
4438
  title: 'Google',
4453
4439
  description: 'Implementation of Google models',
4454
4440
  vercelProvider: googleGeminiVercelProvider,
4455
4441
  availableModels: GOOGLE_MODELS,
4456
4442
  ...options,
4457
4443
  });
4444
+ return {
4445
+ ...baseTools,
4446
+ profile: GOOGLE_PROVIDER_PROFILE,
4447
+ };
4458
4448
  }, {
4459
4449
  packageName: '@promptbook/google',
4460
4450
  className: 'GoogleExecutionTools',
@@ -4622,6 +4612,62 @@ resultContent, rawResponse) {
4622
4612
  * TODO: [🤝] DRY Maybe some common abstraction between `computeOpenAiUsage` and `computeAnthropicClaudeUsage`
4623
4613
  */
4624
4614
 
4615
+ /**
4616
+ * Parses an OpenAI error message to identify which parameter is unsupported
4617
+ *
4618
+ * @param errorMessage The error message from OpenAI API
4619
+ * @returns The parameter name that is unsupported, or null if not an unsupported parameter error
4620
+ * @private utility of LLM Tools
4621
+ */
4622
+ function parseUnsupportedParameterError(errorMessage) {
4623
+ // Pattern to match "Unsupported value: 'parameter' does not support ..."
4624
+ const unsupportedValueMatch = errorMessage.match(/Unsupported value:\s*'([^']+)'\s*does not support/i);
4625
+ if (unsupportedValueMatch === null || unsupportedValueMatch === void 0 ? void 0 : unsupportedValueMatch[1]) {
4626
+ return unsupportedValueMatch[1];
4627
+ }
4628
+ // Pattern to match "'parameter' of type ... is not supported with this model"
4629
+ const parameterTypeMatch = errorMessage.match(/'([^']+)'\s*of type.*is not supported with this model/i);
4630
+ if (parameterTypeMatch === null || parameterTypeMatch === void 0 ? void 0 : parameterTypeMatch[1]) {
4631
+ return parameterTypeMatch[1];
4632
+ }
4633
+ return null;
4634
+ }
4635
+ /**
4636
+ * Creates a copy of model requirements with the specified parameter removed
4637
+ *
4638
+ * @param modelRequirements Original model requirements
4639
+ * @param unsupportedParameter The parameter to remove
4640
+ * @returns New model requirements without the unsupported parameter
4641
+ * @private utility of LLM Tools
4642
+ */
4643
+ function removeUnsupportedModelRequirement(modelRequirements, unsupportedParameter) {
4644
+ const newRequirements = { ...modelRequirements };
4645
+ // Map of parameter names that might appear in error messages to ModelRequirements properties
4646
+ const parameterMap = {
4647
+ temperature: 'temperature',
4648
+ max_tokens: 'maxTokens',
4649
+ maxTokens: 'maxTokens',
4650
+ seed: 'seed',
4651
+ };
4652
+ const propertyToRemove = parameterMap[unsupportedParameter];
4653
+ if (propertyToRemove && propertyToRemove in newRequirements) {
4654
+ delete newRequirements[propertyToRemove];
4655
+ }
4656
+ return newRequirements;
4657
+ }
4658
+ /**
4659
+ * Checks if an error is an "Unsupported value" error from OpenAI
4660
+ * @param error The error to check
4661
+ * @returns true if this is an unsupported parameter error
4662
+ * @private utility of LLM Tools
4663
+ */
4664
+ function isUnsupportedParameterError(error) {
4665
+ const errorMessage = error.message.toLowerCase();
4666
+ return (errorMessage.includes('unsupported value:') ||
4667
+ errorMessage.includes('is not supported with this model') ||
4668
+ errorMessage.includes('does not support'));
4669
+ }
4670
+
4625
4671
  /**
4626
4672
  * Execution Tools for calling OpenAI API or other OpenAI compatible provider
4627
4673
  *
@@ -4639,6 +4685,10 @@ class OpenAiCompatibleExecutionTools {
4639
4685
  * OpenAI API client.
4640
4686
  */
4641
4687
  this.client = null;
4688
+ /**
4689
+ * Tracks models and parameters that have already been retried to prevent infinite loops
4690
+ */
4691
+ this.retriedUnsupportedParameters = new Set();
4642
4692
  // TODO: Allow configuring rate limits via options
4643
4693
  this.limiter = new Bottleneck({
4644
4694
  minTime: 60000 / (this.options.maxRequestsPerMinute || DEFAULT_MAX_REQUESTS_PER_MINUTE),
@@ -4700,21 +4750,27 @@ class OpenAiCompatibleExecutionTools {
4700
4750
  * Calls OpenAI compatible API to use a chat model.
4701
4751
  */
4702
4752
  async callChatModel(prompt) {
4753
+ return this.callChatModelWithRetry(prompt, prompt.modelRequirements);
4754
+ }
4755
+ /**
4756
+ * Internal method that handles parameter retry for chat model calls
4757
+ */
4758
+ async callChatModelWithRetry(prompt, currentModelRequirements) {
4703
4759
  var _a;
4704
4760
  if (this.options.isVerbose) {
4705
- console.info(`💬 ${this.title} callChatModel call`, { prompt });
4761
+ console.info(`💬 ${this.title} callChatModel call`, { prompt, currentModelRequirements });
4706
4762
  }
4707
- const { content, parameters, modelRequirements, format } = prompt;
4763
+ const { content, parameters, format } = prompt;
4708
4764
  const client = await this.getClient();
4709
4765
  // TODO: [☂] Use here more modelRequirements
4710
- if (modelRequirements.modelVariant !== 'CHAT') {
4766
+ if (currentModelRequirements.modelVariant !== 'CHAT') {
4711
4767
  throw new PipelineExecutionError('Use callChatModel only for CHAT variant');
4712
4768
  }
4713
- const modelName = modelRequirements.modelName || this.getDefaultChatModel().modelName;
4769
+ const modelName = currentModelRequirements.modelName || this.getDefaultChatModel().modelName;
4714
4770
  const modelSettings = {
4715
4771
  model: modelName,
4716
- max_tokens: modelRequirements.maxTokens,
4717
- temperature: modelRequirements.temperature,
4772
+ max_tokens: currentModelRequirements.maxTokens,
4773
+ temperature: currentModelRequirements.temperature,
4718
4774
  // <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
4719
4775
  // <- Note: [🧆]
4720
4776
  }; // <- TODO: [💩] Guard here types better
@@ -4729,12 +4785,12 @@ class OpenAiCompatibleExecutionTools {
4729
4785
  const rawRequest = {
4730
4786
  ...modelSettings,
4731
4787
  messages: [
4732
- ...(modelRequirements.systemMessage === undefined
4788
+ ...(currentModelRequirements.systemMessage === undefined
4733
4789
  ? []
4734
4790
  : [
4735
4791
  {
4736
4792
  role: 'system',
4737
- content: modelRequirements.systemMessage,
4793
+ content: currentModelRequirements.systemMessage,
4738
4794
  },
4739
4795
  ]),
4740
4796
  {
@@ -4748,69 +4804,110 @@ class OpenAiCompatibleExecutionTools {
4748
4804
  if (this.options.isVerbose) {
4749
4805
  console.info(colors.bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
4750
4806
  }
4751
- const rawResponse = await this.limiter
4752
- .schedule(() => this.makeRequestWithRetry(() => client.chat.completions.create(rawRequest)))
4753
- .catch((error) => {
4754
- assertsError(error);
4807
+ try {
4808
+ const rawResponse = await this.limiter
4809
+ .schedule(() => this.makeRequestWithNetworkRetry(() => client.chat.completions.create(rawRequest)))
4810
+ .catch((error) => {
4811
+ assertsError(error);
4812
+ if (this.options.isVerbose) {
4813
+ console.info(colors.bgRed('error'), error);
4814
+ }
4815
+ throw error;
4816
+ });
4755
4817
  if (this.options.isVerbose) {
4756
- console.info(colors.bgRed('error'), error);
4818
+ console.info(colors.bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
4757
4819
  }
4758
- throw error;
4759
- });
4760
- if (this.options.isVerbose) {
4761
- console.info(colors.bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
4762
- }
4763
- const complete = $getCurrentDate();
4764
- if (!rawResponse.choices[0]) {
4765
- throw new PipelineExecutionError(`No choises from ${this.title}`);
4766
- }
4767
- if (rawResponse.choices.length > 1) {
4768
- // TODO: This should be maybe only warning
4769
- throw new PipelineExecutionError(`More than one choise from ${this.title}`);
4820
+ const complete = $getCurrentDate();
4821
+ if (!rawResponse.choices[0]) {
4822
+ throw new PipelineExecutionError(`No choises from ${this.title}`);
4823
+ }
4824
+ if (rawResponse.choices.length > 1) {
4825
+ // TODO: This should be maybe only warning
4826
+ throw new PipelineExecutionError(`More than one choise from ${this.title}`);
4827
+ }
4828
+ const resultContent = rawResponse.choices[0].message.content;
4829
+ const usage = this.computeUsage(content || '', resultContent || '', rawResponse);
4830
+ if (resultContent === null) {
4831
+ throw new PipelineExecutionError(`No response message from ${this.title}`);
4832
+ }
4833
+ return exportJson({
4834
+ name: 'promptResult',
4835
+ message: `Result of \`OpenAiCompatibleExecutionTools.callChatModel\``,
4836
+ order: [],
4837
+ value: {
4838
+ content: resultContent,
4839
+ modelName: rawResponse.model || modelName,
4840
+ timing: {
4841
+ start,
4842
+ complete,
4843
+ },
4844
+ usage,
4845
+ rawPromptContent,
4846
+ rawRequest,
4847
+ rawResponse,
4848
+ // <- [🗯]
4849
+ },
4850
+ });
4770
4851
  }
4771
- const resultContent = rawResponse.choices[0].message.content;
4772
- const usage = this.computeUsage(content || '', resultContent || '', rawResponse);
4773
- if (resultContent === null) {
4774
- throw new PipelineExecutionError(`No response message from ${this.title}`);
4852
+ catch (error) {
4853
+ assertsError(error);
4854
+ // Check if this is an unsupported parameter error
4855
+ if (!isUnsupportedParameterError(error)) {
4856
+ throw error;
4857
+ }
4858
+ // Parse which parameter is unsupported
4859
+ const unsupportedParameter = parseUnsupportedParameterError(error.message);
4860
+ if (!unsupportedParameter) {
4861
+ if (this.options.isVerbose) {
4862
+ console.warn(colors.bgYellow('Warning'), 'Could not parse unsupported parameter from error:', error.message);
4863
+ }
4864
+ throw error;
4865
+ }
4866
+ // Create a unique key for this model + parameter combination to prevent infinite loops
4867
+ const retryKey = `${modelName}-${unsupportedParameter}`;
4868
+ if (this.retriedUnsupportedParameters.has(retryKey)) {
4869
+ // Already retried this parameter, throw the error
4870
+ if (this.options.isVerbose) {
4871
+ console.warn(colors.bgRed('Error'), `Parameter '${unsupportedParameter}' for model '${modelName}' already retried once, throwing error:`, error.message);
4872
+ }
4873
+ throw error;
4874
+ }
4875
+ // Mark this parameter as retried
4876
+ this.retriedUnsupportedParameters.add(retryKey);
4877
+ // Log warning in verbose mode
4878
+ if (this.options.isVerbose) {
4879
+ console.warn(colors.bgYellow('Warning'), `Removing unsupported parameter '${unsupportedParameter}' for model '${modelName}' and retrying request`);
4880
+ }
4881
+ // Remove the unsupported parameter and retry
4882
+ const modifiedModelRequirements = removeUnsupportedModelRequirement(currentModelRequirements, unsupportedParameter);
4883
+ return this.callChatModelWithRetry(prompt, modifiedModelRequirements);
4775
4884
  }
4776
- return exportJson({
4777
- name: 'promptResult',
4778
- message: `Result of \`OpenAiCompatibleExecutionTools.callChatModel\``,
4779
- order: [],
4780
- value: {
4781
- content: resultContent,
4782
- modelName: rawResponse.model || modelName,
4783
- timing: {
4784
- start,
4785
- complete,
4786
- },
4787
- usage,
4788
- rawPromptContent,
4789
- rawRequest,
4790
- rawResponse,
4791
- // <- [🗯]
4792
- },
4793
- });
4794
4885
  }
4795
4886
  /**
4796
4887
  * Calls OpenAI API to use a complete model.
4797
4888
  */
4798
4889
  async callCompletionModel(prompt) {
4890
+ return this.callCompletionModelWithRetry(prompt, prompt.modelRequirements);
4891
+ }
4892
+ /**
4893
+ * Internal method that handles parameter retry for completion model calls
4894
+ */
4895
+ async callCompletionModelWithRetry(prompt, currentModelRequirements) {
4799
4896
  var _a;
4800
4897
  if (this.options.isVerbose) {
4801
- console.info(`🖋 ${this.title} callCompletionModel call`, { prompt });
4898
+ console.info(`🖋 ${this.title} callCompletionModel call`, { prompt, currentModelRequirements });
4802
4899
  }
4803
- const { content, parameters, modelRequirements } = prompt;
4900
+ const { content, parameters } = prompt;
4804
4901
  const client = await this.getClient();
4805
4902
  // TODO: [☂] Use here more modelRequirements
4806
- if (modelRequirements.modelVariant !== 'COMPLETION') {
4903
+ if (currentModelRequirements.modelVariant !== 'COMPLETION') {
4807
4904
  throw new PipelineExecutionError('Use callCompletionModel only for COMPLETION variant');
4808
4905
  }
4809
- const modelName = modelRequirements.modelName || this.getDefaultCompletionModel().modelName;
4906
+ const modelName = currentModelRequirements.modelName || this.getDefaultCompletionModel().modelName;
4810
4907
  const modelSettings = {
4811
4908
  model: modelName,
4812
- max_tokens: modelRequirements.maxTokens,
4813
- temperature: modelRequirements.temperature,
4909
+ max_tokens: currentModelRequirements.maxTokens,
4910
+ temperature: currentModelRequirements.temperature,
4814
4911
  // <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
4815
4912
  // <- Note: [🧆]
4816
4913
  };
@@ -4824,46 +4921,81 @@ class OpenAiCompatibleExecutionTools {
4824
4921
  if (this.options.isVerbose) {
4825
4922
  console.info(colors.bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
4826
4923
  }
4827
- const rawResponse = await this.limiter
4828
- .schedule(() => this.makeRequestWithRetry(() => client.completions.create(rawRequest)))
4829
- .catch((error) => {
4830
- assertsError(error);
4924
+ try {
4925
+ const rawResponse = await this.limiter
4926
+ .schedule(() => this.makeRequestWithNetworkRetry(() => client.completions.create(rawRequest)))
4927
+ .catch((error) => {
4928
+ assertsError(error);
4929
+ if (this.options.isVerbose) {
4930
+ console.info(colors.bgRed('error'), error);
4931
+ }
4932
+ throw error;
4933
+ });
4831
4934
  if (this.options.isVerbose) {
4832
- console.info(colors.bgRed('error'), error);
4935
+ console.info(colors.bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
4833
4936
  }
4834
- throw error;
4835
- });
4836
- if (this.options.isVerbose) {
4837
- console.info(colors.bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
4838
- }
4839
- const complete = $getCurrentDate();
4840
- if (!rawResponse.choices[0]) {
4841
- throw new PipelineExecutionError(`No choises from ${this.title}`);
4937
+ const complete = $getCurrentDate();
4938
+ if (!rawResponse.choices[0]) {
4939
+ throw new PipelineExecutionError(`No choises from ${this.title}`);
4940
+ }
4941
+ if (rawResponse.choices.length > 1) {
4942
+ // TODO: This should be maybe only warning
4943
+ throw new PipelineExecutionError(`More than one choise from ${this.title}`);
4944
+ }
4945
+ const resultContent = rawResponse.choices[0].text;
4946
+ const usage = this.computeUsage(content || '', resultContent || '', rawResponse);
4947
+ return exportJson({
4948
+ name: 'promptResult',
4949
+ message: `Result of \`OpenAiCompatibleExecutionTools.callCompletionModel\``,
4950
+ order: [],
4951
+ value: {
4952
+ content: resultContent,
4953
+ modelName: rawResponse.model || modelName,
4954
+ timing: {
4955
+ start,
4956
+ complete,
4957
+ },
4958
+ usage,
4959
+ rawPromptContent,
4960
+ rawRequest,
4961
+ rawResponse,
4962
+ // <- [🗯]
4963
+ },
4964
+ });
4842
4965
  }
4843
- if (rawResponse.choices.length > 1) {
4844
- // TODO: This should be maybe only warning
4845
- throw new PipelineExecutionError(`More than one choise from ${this.title}`);
4966
+ catch (error) {
4967
+ assertsError(error);
4968
+ // Check if this is an unsupported parameter error
4969
+ if (!isUnsupportedParameterError(error)) {
4970
+ throw error;
4971
+ }
4972
+ // Parse which parameter is unsupported
4973
+ const unsupportedParameter = parseUnsupportedParameterError(error.message);
4974
+ if (!unsupportedParameter) {
4975
+ if (this.options.isVerbose) {
4976
+ console.warn(colors.bgYellow('Warning'), 'Could not parse unsupported parameter from error:', error.message);
4977
+ }
4978
+ throw error;
4979
+ }
4980
+ // Create a unique key for this model + parameter combination to prevent infinite loops
4981
+ const retryKey = `${modelName}-${unsupportedParameter}`;
4982
+ if (this.retriedUnsupportedParameters.has(retryKey)) {
4983
+ // Already retried this parameter, throw the error
4984
+ if (this.options.isVerbose) {
4985
+ console.warn(colors.bgRed('Error'), `Parameter '${unsupportedParameter}' for model '${modelName}' already retried once, throwing error:`, error.message);
4986
+ }
4987
+ throw error;
4988
+ }
4989
+ // Mark this parameter as retried
4990
+ this.retriedUnsupportedParameters.add(retryKey);
4991
+ // Log warning in verbose mode
4992
+ if (this.options.isVerbose) {
4993
+ console.warn(colors.bgYellow('Warning'), `Removing unsupported parameter '${unsupportedParameter}' for model '${modelName}' and retrying request`);
4994
+ }
4995
+ // Remove the unsupported parameter and retry
4996
+ const modifiedModelRequirements = removeUnsupportedModelRequirement(currentModelRequirements, unsupportedParameter);
4997
+ return this.callCompletionModelWithRetry(prompt, modifiedModelRequirements);
4846
4998
  }
4847
- const resultContent = rawResponse.choices[0].text;
4848
- const usage = this.computeUsage(content || '', resultContent || '', rawResponse);
4849
- return exportJson({
4850
- name: 'promptResult',
4851
- message: `Result of \`OpenAiCompatibleExecutionTools.callCompletionModel\``,
4852
- order: [],
4853
- value: {
4854
- content: resultContent,
4855
- modelName: rawResponse.model || modelName,
4856
- timing: {
4857
- start,
4858
- complete,
4859
- },
4860
- usage,
4861
- rawPromptContent,
4862
- rawRequest,
4863
- rawResponse,
4864
- // <- [🗯]
4865
- },
4866
- });
4867
4999
  }
4868
5000
  /**
4869
5001
  * Calls OpenAI compatible API to use a embedding model
@@ -4889,7 +5021,7 @@ class OpenAiCompatibleExecutionTools {
4889
5021
  console.info(colors.bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
4890
5022
  }
4891
5023
  const rawResponse = await this.limiter
4892
- .schedule(() => this.makeRequestWithRetry(() => client.embeddings.create(rawRequest)))
5024
+ .schedule(() => this.makeRequestWithNetworkRetry(() => client.embeddings.create(rawRequest)))
4893
5025
  .catch((error) => {
4894
5026
  assertsError(error);
4895
5027
  if (this.options.isVerbose) {
@@ -4951,7 +5083,7 @@ class OpenAiCompatibleExecutionTools {
4951
5083
  /**
4952
5084
  * Makes a request with retry logic for network errors like ECONNRESET
4953
5085
  */
4954
- async makeRequestWithRetry(requestFn) {
5086
+ async makeRequestWithNetworkRetry(requestFn) {
4955
5087
  let lastError;
4956
5088
  for (let attempt = 1; attempt <= CONNECTION_RETRIES_LIMIT; attempt++) {
4957
5089
  try {
@@ -4963,8 +5095,8 @@ class OpenAiCompatibleExecutionTools {
4963
5095
  // Check if this is a retryable network error
4964
5096
  const isRetryableError = this.isRetryableNetworkError(error);
4965
5097
  if (!isRetryableError || attempt === CONNECTION_RETRIES_LIMIT) {
4966
- if (this.options.isVerbose) {
4967
- console.info(colors.bgRed('Final error after retries'), `Attempt ${attempt}/${CONNECTION_RETRIES_LIMIT}:`, error);
5098
+ if (this.options.isVerbose && this.isRetryableNetworkError(error)) {
5099
+ console.info(colors.bgRed('Final network error after retries'), `Attempt ${attempt}/${CONNECTION_RETRIES_LIMIT}:`, error);
4968
5100
  }
4969
5101
  throw error;
4970
5102
  }
@@ -4974,7 +5106,7 @@ class OpenAiCompatibleExecutionTools {
4974
5106
  const jitterDelay = Math.random() * 500; // Add some randomness
4975
5107
  const totalDelay = backoffDelay + jitterDelay;
4976
5108
  if (this.options.isVerbose) {
4977
- console.info(colors.bgYellow('Retrying request'), `Attempt ${attempt}/${CONNECTION_RETRIES_LIMIT}, waiting ${Math.round(totalDelay)}ms:`, error.message);
5109
+ console.info(colors.bgYellow('Retrying network request'), `Attempt ${attempt}/${CONNECTION_RETRIES_LIMIT}, waiting ${Math.round(totalDelay)}ms:`, error.message);
4978
5110
  }
4979
5111
  // Wait before retrying
4980
5112
  await new Promise((resolve) => setTimeout(resolve, totalDelay));
@@ -5023,6 +5155,7 @@ class OpenAiCompatibleExecutionTools {
5023
5155
  * TODO: [🛄] Maybe make custom `OpenAiCompatibleError`
5024
5156
  * TODO: [🧠][🈁] Maybe use `isDeterministic` from options
5025
5157
  * TODO: [🧠][🌰] Allow to pass `title` for tracking purposes
5158
+ * TODO: [🧠][🦢] Make reverse adapter from LlmExecutionTools to OpenAI-compatible:
5026
5159
  */
5027
5160
 
5028
5161
  /**
@@ -5278,6 +5411,14 @@ const OLLAMA_MODELS = exportJson({
5278
5411
  * Note: [💞] Ignore a discrepancy between file name and entity name
5279
5412
  */
5280
5413
 
5414
+ /**
5415
+ * Profile for Ollama provider
5416
+ */
5417
+ const OLLAMA_PROVIDER_PROFILE = {
5418
+ name: 'OLLAMA',
5419
+ fullname: 'Ollama',
5420
+ color: '#059669',
5421
+ };
5281
5422
  /**
5282
5423
  * Execution Tools for calling Ollama API
5283
5424
  *
@@ -5300,6 +5441,9 @@ class OllamaExecutionTools extends OpenAiCompatibleExecutionTools {
5300
5441
  get description() {
5301
5442
  return 'Use all models provided by Ollama';
5302
5443
  }
5444
+ get profile() {
5445
+ return OLLAMA_PROVIDER_PROFILE;
5446
+ }
5303
5447
  /**
5304
5448
  * List all available models (non dynamically)
5305
5449
  *
@@ -5503,6 +5647,14 @@ const _OpenAiCompatibleMetadataRegistration = $llmToolsMetadataRegister.register
5503
5647
  * Note: [💞] Ignore a discrepancy between file name and entity name
5504
5648
  */
5505
5649
 
5650
+ /**
5651
+ * Profile for OpenAI provider
5652
+ */
5653
+ const OPENAI_PROVIDER_PROFILE = {
5654
+ name: 'OPENAI',
5655
+ fullname: 'OpenAI GPT',
5656
+ color: '#10a37f',
5657
+ };
5506
5658
  /**
5507
5659
  * Execution Tools for calling OpenAI API
5508
5660
  *
@@ -5525,7 +5677,7 @@ class OpenAiExecutionTools extends OpenAiCompatibleExecutionTools {
5525
5677
  return 'Use all models provided by OpenAI';
5526
5678
  }
5527
5679
  get profile() {
5528
- return LLM_PROVIDER_PROFILES.OPENAI;
5680
+ return OPENAI_PROVIDER_PROFILE;
5529
5681
  }
5530
5682
  /*
5531
5683
  Note: Commenting this out to avoid circular dependency
@@ -6355,11 +6507,12 @@ async function getScraperIntermediateSource(source, options) {
6355
6507
  catch (error) {
6356
6508
  // Note: If we can't create cache directory, continue without it
6357
6509
  // This handles read-only filesystems, permission issues, and missing parent directories
6358
- if (error instanceof Error && (error.message.includes('EROFS') ||
6359
- error.message.includes('read-only') ||
6360
- error.message.includes('EACCES') ||
6361
- error.message.includes('EPERM') ||
6362
- error.message.includes('ENOENT'))) ;
6510
+ if (error instanceof Error &&
6511
+ (error.message.includes('EROFS') ||
6512
+ error.message.includes('read-only') ||
6513
+ error.message.includes('EACCES') ||
6514
+ error.message.includes('EPERM') ||
6515
+ error.message.includes('ENOENT'))) ;
6363
6516
  else {
6364
6517
  // Re-throw other unexpected errors
6365
6518
  throw error;
@@ -7701,6 +7854,33 @@ function countUsage(llmTools) {
7701
7854
  * TODO: [👷‍♂️] @@@ Manual about construction of llmTools
7702
7855
  */
7703
7856
 
7857
+ /**
7858
+ * Takes an item or an array of items and returns an array of items
7859
+ *
7860
+ * 1) Any item except array and undefined returns array with that one item (also null)
7861
+ * 2) Undefined returns empty array
7862
+ * 3) Array returns itself
7863
+ *
7864
+ * @private internal utility
7865
+ */
7866
+ function arrayableToArray(input) {
7867
+ if (input === undefined) {
7868
+ return [];
7869
+ }
7870
+ if (input instanceof Array) {
7871
+ return input;
7872
+ }
7873
+ return [input];
7874
+ }
7875
+
7876
+ /**
7877
+ * Profile for Multiple providers aggregation
7878
+ */
7879
+ const MULTIPLE_PROVIDER_PROFILE = {
7880
+ name: 'MULTIPLE',
7881
+ fullname: 'Multiple Providers',
7882
+ color: '#6366f1',
7883
+ };
7704
7884
  /**
7705
7885
  * Multiple LLM Execution Tools is a proxy server that uses multiple execution tools internally and exposes the executor interface externally.
7706
7886
  *
@@ -7711,12 +7891,10 @@ class MultipleLlmExecutionTools {
7711
7891
  /**
7712
7892
  * Gets array of execution tools in order of priority
7713
7893
  */
7714
- constructor(...llmExecutionTools) {
7894
+ constructor(title, ...llmExecutionTools) {
7895
+ this.title = title;
7715
7896
  this.llmExecutionTools = llmExecutionTools;
7716
7897
  }
7717
- get title() {
7718
- return 'Multiple LLM Providers';
7719
- }
7720
7898
  get description() {
7721
7899
  const innerModelsTitlesAndDescriptions = this.llmExecutionTools
7722
7900
  .map(({ title, description }, index) => {
@@ -7738,7 +7916,7 @@ class MultipleLlmExecutionTools {
7738
7916
  `);
7739
7917
  }
7740
7918
  get profile() {
7741
- return LLM_PROVIDER_PROFILES.MULTIPLE;
7919
+ return MULTIPLE_PROVIDER_PROFILE;
7742
7920
  }
7743
7921
  /**
7744
7922
  * Check the configuration of all execution tools
@@ -7802,7 +7980,7 @@ class MultipleLlmExecutionTools {
7802
7980
  return await llmExecutionTools.callEmbeddingModel(prompt);
7803
7981
  // <- case [🤖]:
7804
7982
  default:
7805
- throw new UnexpectedError(`Unknown model variant "${prompt.modelRequirements.modelVariant}"`);
7983
+ throw new UnexpectedError(`Unknown model variant "${prompt.modelRequirements.modelVariant}" in ${llmExecutionTools.title}`);
7806
7984
  }
7807
7985
  }
7808
7986
  catch (error) {
@@ -7823,7 +8001,7 @@ class MultipleLlmExecutionTools {
7823
8001
  // 2) AnthropicClaude throw PipelineExecutionError: Parameter `{knowledge}` is not defined
7824
8002
  // 3) ...
7825
8003
  spaceTrim((block) => `
7826
- All execution tools failed:
8004
+ All execution tools of ${this.title} failed:
7827
8005
 
7828
8006
  ${block(errors
7829
8007
  .map(({ error, llmExecutionTools }, i) => `${i + 1}) **${llmExecutionTools.title}** thrown **${error.name || 'Error'}:** ${error.message}`)
@@ -7832,11 +8010,11 @@ class MultipleLlmExecutionTools {
7832
8010
  `));
7833
8011
  }
7834
8012
  else if (this.llmExecutionTools.length === 0) {
7835
- throw new PipelineExecutionError(`You have not provided any \`LlmExecutionTools\``);
8013
+ throw new PipelineExecutionError(`You have not provided any \`LlmExecutionTools\` into ${this.title}`);
7836
8014
  }
7837
8015
  else {
7838
8016
  throw new PipelineExecutionError(spaceTrim((block) => `
7839
- You have not provided any \`LlmExecutionTools\` that support model variant "${prompt.modelRequirements.modelVariant}"
8017
+ You have not provided any \`LlmExecutionTools\` that support model variant "${prompt.modelRequirements.modelVariant}" into ${this.title}
7840
8018
 
7841
8019
  Available \`LlmExecutionTools\`:
7842
8020
  ${block(this.description)}
@@ -7866,7 +8044,7 @@ class MultipleLlmExecutionTools {
7866
8044
  *
7867
8045
  * @public exported from `@promptbook/core`
7868
8046
  */
7869
- function joinLlmExecutionTools(...llmExecutionTools) {
8047
+ function joinLlmExecutionTools(title, ...llmExecutionTools) {
7870
8048
  if (llmExecutionTools.length === 0) {
7871
8049
  const warningMessage = spaceTrim(`
7872
8050
  You have not provided any \`LlmExecutionTools\`
@@ -7898,30 +8076,27 @@ function joinLlmExecutionTools(...llmExecutionTools) {
7898
8076
  };
7899
8077
  */
7900
8078
  }
7901
- return new MultipleLlmExecutionTools(...llmExecutionTools);
8079
+ return new MultipleLlmExecutionTools(title || 'Multiple LLM Providers joined by `joinLlmExecutionTools`', ...llmExecutionTools);
7902
8080
  }
7903
8081
  /**
7904
8082
  * TODO: [👷‍♂️] @@@ Manual about construction of llmTools
7905
8083
  */
7906
8084
 
7907
8085
  /**
7908
- * Takes an item or an array of items and returns an array of items
8086
+ * Just returns the given `LlmExecutionTools` or joins multiple into one
7909
8087
  *
7910
- * 1) Any item except array and undefined returns array with that one item (also null)
7911
- * 2) Undefined returns empty array
7912
- * 3) Array returns itself
7913
- *
7914
- * @private internal utility
8088
+ * @public exported from `@promptbook/core`
7915
8089
  */
7916
- function arrayableToArray(input) {
7917
- if (input === undefined) {
7918
- return [];
7919
- }
7920
- if (input instanceof Array) {
7921
- return input;
7922
- }
7923
- return [input];
8090
+ function getSingleLlmExecutionTools(oneOrMoreLlmExecutionTools) {
8091
+ const _llms = arrayableToArray(oneOrMoreLlmExecutionTools);
8092
+ const llmTools = _llms.length === 1
8093
+ ? _llms[0]
8094
+ : joinLlmExecutionTools('Multiple LLM Providers joined by `getSingleLlmExecutionTools`', ..._llms);
8095
+ return llmTools;
7924
8096
  }
8097
+ /**
8098
+ * TODO: [👷‍♂️] @@@ Manual about construction of llmTools
8099
+ */
7925
8100
 
7926
8101
  /**
7927
8102
  * Prepares the persona for the pipeline
@@ -7940,8 +8115,7 @@ async function preparePersona(personaDescription, tools, options) {
7940
8115
  pipeline: await collection.getPipelineByUrl('https://promptbook.studio/promptbook/prepare-persona.book'),
7941
8116
  tools,
7942
8117
  });
7943
- const _llms = arrayableToArray(tools.llm);
7944
- const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
8118
+ const llmTools = getSingleLlmExecutionTools(tools.llm);
7945
8119
  const availableModels = (await llmTools.listModels())
7946
8120
  .filter(({ modelVariant }) => modelVariant === 'CHAT')
7947
8121
  .map(({ modelName, modelDescription }) => ({
@@ -7985,6 +8159,7 @@ async function preparePersona(personaDescription, tools, options) {
7985
8159
  };
7986
8160
  }
7987
8161
  /**
8162
+ * TODO: [😩] DRY `preparePersona` and `selectBestModelFromAvailable`
7988
8163
  * TODO: [🔃][main] If the persona was prepared with different version or different set of models, prepare it once again
7989
8164
  * TODO: [🏢] Check validity of `modelName` in pipeline
7990
8165
  * TODO: [🏢] Check validity of `systemMessage` in pipeline
@@ -8557,9 +8732,7 @@ async function preparePipeline(pipeline, tools, options) {
8557
8732
  if (tools === undefined || tools.llm === undefined) {
8558
8733
  throw new MissingToolsError('LLM tools are required for preparing the pipeline');
8559
8734
  }
8560
- // TODO: [🚐] Make arrayable LLMs -> single LLM DRY
8561
- const _llms = arrayableToArray(tools.llm);
8562
- const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
8735
+ const llmTools = getSingleLlmExecutionTools(tools.llm);
8563
8736
  const llmToolsWithUsage = countUsage(llmTools);
8564
8737
  // <- TODO: [🌯]
8565
8738
  /*
@@ -9429,9 +9602,7 @@ async function executeAttempts(options) {
9429
9602
  $scriptPipelineExecutionErrors: [],
9430
9603
  $failedResults: [], // Track all failed attempts
9431
9604
  };
9432
- // TODO: [🚐] Make arrayable LLMs -> single LLM DRY
9433
- const _llms = arrayableToArray(tools.llm);
9434
- const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
9605
+ const llmTools = getSingleLlmExecutionTools(tools.llm);
9435
9606
  attempts: for (let attemptIndex = -jokerParameterNames.length; attemptIndex < maxAttempts; attemptIndex++) {
9436
9607
  const isJokerAttempt = attemptIndex < 0;
9437
9608
  const jokerParameterName = jokerParameterNames[jokerParameterNames.length + attemptIndex];
@@ -9951,9 +10122,7 @@ async function getKnowledgeForTask(options) {
9951
10122
  return ''; // <- Note: Np knowledge present, return empty string
9952
10123
  }
9953
10124
  try {
9954
- // TODO: [🚐] Make arrayable LLMs -> single LLM DRY
9955
- const _llms = arrayableToArray(tools.llm);
9956
- const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
10125
+ const llmTools = getSingleLlmExecutionTools(tools.llm);
9957
10126
  const taskEmbeddingPrompt = {
9958
10127
  title: 'Knowledge Search',
9959
10128
  modelRequirements: {
@@ -10554,13 +10723,13 @@ function createPipelineExecutor(options) {
10554
10723
  // Calculate and update tldr based on pipeline progress
10555
10724
  const cv = newOngoingResult;
10556
10725
  // Calculate progress based on parameters resolved vs total parameters
10557
- const totalParameters = pipeline.parameters.filter(p => !p.isInput).length;
10726
+ const totalParameters = pipeline.parameters.filter((p) => !p.isInput).length;
10558
10727
  let resolvedParameters = 0;
10559
10728
  let currentTaskTitle = '';
10560
10729
  // Get the resolved parameters from output parameters
10561
10730
  if (cv === null || cv === void 0 ? void 0 : cv.outputParameters) {
10562
10731
  // Count how many output parameters have non-empty values
10563
- resolvedParameters = Object.values(cv.outputParameters).filter(value => value !== undefined && value !== null && String(value).trim() !== '').length;
10732
+ resolvedParameters = Object.values(cv.outputParameters).filter((value) => value !== undefined && value !== null && String(value).trim() !== '').length;
10564
10733
  }
10565
10734
  // Try to determine current task from execution report
10566
10735
  if (((_a = cv === null || cv === void 0 ? void 0 : cv.executionReport) === null || _a === void 0 ? void 0 : _a.promptExecutions) && cv.executionReport.promptExecutions.length > 0) {
@@ -10670,9 +10839,7 @@ class MarkdownScraper {
10670
10839
  throw new MissingToolsError('LLM tools are required for scraping external files');
10671
10840
  // <- Note: This scraper is used in all other scrapers, so saying "external files" not "markdown files"
10672
10841
  }
10673
- // TODO: [🚐] Make arrayable LLMs -> single LLM DRY
10674
- const _llms = arrayableToArray(llm);
10675
- const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
10842
+ const llmTools = getSingleLlmExecutionTools(llm);
10676
10843
  // TODO: [🌼] In future use `ptbk make` and made getPipelineCollection
10677
10844
  const collection = createCollectionFromJson(...PipelineCollection);
10678
10845
  const prepareKnowledgeFromMarkdownExecutor = createPipelineExecutor({
@@ -11449,11 +11616,12 @@ class MarkitdownScraper {
11449
11616
  catch (error) {
11450
11617
  // Note: If we can't write to cache, we'll continue without caching
11451
11618
  // This handles read-only filesystems like Vercel
11452
- if (error instanceof Error && (error.message.includes('EROFS') ||
11453
- error.message.includes('read-only') ||
11454
- error.message.includes('EACCES') ||
11455
- error.message.includes('EPERM') ||
11456
- error.message.includes('ENOENT'))) ;
11619
+ if (error instanceof Error &&
11620
+ (error.message.includes('EROFS') ||
11621
+ error.message.includes('read-only') ||
11622
+ error.message.includes('EACCES') ||
11623
+ error.message.includes('EPERM') ||
11624
+ error.message.includes('ENOENT'))) ;
11457
11625
  else {
11458
11626
  // Re-throw other unexpected errors
11459
11627
  throw error;
@@ -11753,11 +11921,12 @@ class WebsiteScraper {
11753
11921
  catch (error) {
11754
11922
  // Note: If we can't write to cache, we'll continue without caching
11755
11923
  // This handles read-only filesystems like Vercel
11756
- if (error instanceof Error && (error.message.includes('EROFS') ||
11757
- error.message.includes('read-only') ||
11758
- error.message.includes('EACCES') ||
11759
- error.message.includes('EPERM') ||
11760
- error.message.includes('ENOENT'))) ;
11924
+ if (error instanceof Error &&
11925
+ (error.message.includes('EROFS') ||
11926
+ error.message.includes('read-only') ||
11927
+ error.message.includes('EACCES') ||
11928
+ error.message.includes('EPERM') ||
11929
+ error.message.includes('ENOENT'))) ;
11761
11930
  else {
11762
11931
  // Re-throw other unexpected errors
11763
11932
  throw error;
@@ -12511,11 +12680,12 @@ class FileCacheStorage {
12511
12680
  catch (error) {
12512
12681
  // Note: If we can't write to cache, silently ignore the error
12513
12682
  // This handles read-only filesystems, permission issues, and missing parent directories
12514
- if (error instanceof Error && (error.message.includes('EROFS') ||
12515
- error.message.includes('read-only') ||
12516
- error.message.includes('EACCES') ||
12517
- error.message.includes('EPERM') ||
12518
- error.message.includes('ENOENT'))) {
12683
+ if (error instanceof Error &&
12684
+ (error.message.includes('EROFS') ||
12685
+ error.message.includes('read-only') ||
12686
+ error.message.includes('EACCES') ||
12687
+ error.message.includes('EPERM') ||
12688
+ error.message.includes('ENOENT'))) {
12519
12689
  // Silently ignore filesystem errors - caching is optional
12520
12690
  return;
12521
12691
  }
@@ -12808,7 +12978,7 @@ async function $provideLlmToolsConfigurationFromEnv() {
12808
12978
  * @public exported from `@promptbook/core`
12809
12979
  */
12810
12980
  function createLlmToolsFromConfiguration(configuration, options = {}) {
12811
- const { isVerbose = DEFAULT_IS_VERBOSE, userId } = options;
12981
+ const { title = 'LLM Tools from Configuration', isVerbose = DEFAULT_IS_VERBOSE, userId } = options;
12812
12982
  const llmTools = configuration.map((llmConfiguration) => {
12813
12983
  const registeredItem = $llmToolsRegister
12814
12984
  .list()
@@ -12840,7 +13010,7 @@ function createLlmToolsFromConfiguration(configuration, options = {}) {
12840
13010
  ...llmConfiguration.options,
12841
13011
  });
12842
13012
  });
12843
- return joinLlmExecutionTools(...llmTools);
13013
+ return joinLlmExecutionTools(title, ...llmTools);
12844
13014
  }
12845
13015
  /**
12846
13016
  * TODO: [🎌] Together with `createLlmToolsFromConfiguration` + 'EXECUTION_TOOLS_CLASSES' gets to `@promptbook/core` ALL model providers, make this more efficient
@@ -12957,7 +13127,9 @@ async function $provideLlmToolsForWizardOrCli(options) {
12957
13127
  });
12958
13128
  }
12959
13129
  else if (strategy === 'BRING_YOUR_OWN_KEYS') {
12960
- llmExecutionTools = await $provideLlmToolsFromEnv();
13130
+ llmExecutionTools = await $provideLlmToolsFromEnv({
13131
+ title: 'LLM Tools for wizard or CLI with BYOK strategy',
13132
+ });
12961
13133
  }
12962
13134
  else {
12963
13135
  throw new UnexpectedError(`\`$provideLlmToolsForWizardOrCli\` wrong strategy "${strategy}"`);
@@ -16990,7 +17162,7 @@ async function $provideExecutionToolsForNode(options) {
16990
17162
  throw new EnvironmentMismatchError('Function `$getExecutionToolsForNode` works only in Node.js environment');
16991
17163
  }
16992
17164
  const fs = $provideFilesystemForNode();
16993
- const llm = await $provideLlmToolsFromEnv(options);
17165
+ const llm = await $provideLlmToolsFromEnv({ title: 'LLM Tools for Node.js', ...options });
16994
17166
  const executables = await $provideExecutablesForNode();
16995
17167
  const tools = {
16996
17168
  llm,
@@ -17399,11 +17571,12 @@ async function $getCompiledBook(tools, pipelineSource, options) {
17399
17571
  catch (error) {
17400
17572
  // Note: Ignore filesystem errors (like EROFS on read-only systems like Vercel)
17401
17573
  // The compiled book can still be used even if it can't be cached
17402
- if (error instanceof Error && (error.message.includes('EROFS') ||
17403
- error.message.includes('read-only') ||
17404
- error.message.includes('EACCES') ||
17405
- error.message.includes('EPERM') ||
17406
- error.message.includes('ENOENT'))) ;
17574
+ if (error instanceof Error &&
17575
+ (error.message.includes('EROFS') ||
17576
+ error.message.includes('read-only') ||
17577
+ error.message.includes('EACCES') ||
17578
+ error.message.includes('EPERM') ||
17579
+ error.message.includes('ENOENT'))) ;
17407
17580
  else {
17408
17581
  // Re-throw other unexpected errors
17409
17582
  throw error;