@promptbook/wizard 0.101.0-2 → 0.101.0-20

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (76) hide show
  1. package/esm/index.es.js +329 -174
  2. package/esm/index.es.js.map +1 -1
  3. package/esm/typings/src/_packages/components.index.d.ts +20 -0
  4. package/esm/typings/src/_packages/core.index.d.ts +14 -0
  5. package/esm/typings/src/_packages/types.index.d.ts +14 -0
  6. package/esm/typings/src/book-2.0/agent-source/AgentBasicInformation.d.ts +41 -3
  7. package/esm/typings/src/book-2.0/agent-source/AgentModelRequirements.d.ts +3 -0
  8. package/esm/typings/src/book-2.0/agent-source/createAgentModelRequirements.d.ts +4 -22
  9. package/esm/typings/src/book-2.0/agent-source/createAgentModelRequirementsWithCommitments.d.ts +1 -26
  10. package/esm/typings/src/book-2.0/agent-source/parseParameters.d.ts +13 -0
  11. package/esm/typings/src/book-2.0/commitments/ACTION/ACTION.d.ts +8 -2
  12. package/esm/typings/src/book-2.0/commitments/DELETE/DELETE.d.ts +59 -0
  13. package/esm/typings/src/book-2.0/commitments/FORMAT/FORMAT.d.ts +8 -2
  14. package/esm/typings/src/book-2.0/commitments/GOAL/GOAL.d.ts +45 -0
  15. package/esm/typings/src/book-2.0/commitments/KNOWLEDGE/KNOWLEDGE.d.ts +1 -1
  16. package/esm/typings/src/book-2.0/commitments/MEMORY/MEMORY.d.ts +46 -0
  17. package/esm/typings/src/book-2.0/commitments/MESSAGE/MESSAGE.d.ts +47 -0
  18. package/esm/typings/src/book-2.0/commitments/META/META.d.ts +62 -0
  19. package/esm/typings/src/book-2.0/commitments/MODEL/MODEL.d.ts +31 -4
  20. package/esm/typings/src/book-2.0/commitments/NOTE/NOTE.d.ts +20 -2
  21. package/esm/typings/src/book-2.0/commitments/PERSONA/PERSONA.d.ts +8 -2
  22. package/esm/typings/src/book-2.0/commitments/SCENARIO/SCENARIO.d.ts +46 -0
  23. package/esm/typings/src/book-2.0/commitments/STYLE/STYLE.d.ts +8 -2
  24. package/esm/typings/src/book-2.0/commitments/index.d.ts +7 -3
  25. package/esm/typings/src/book-2.0/utils/generatePlaceholderAgentProfileImageUrl.d.ts +3 -0
  26. package/esm/typings/src/book-components/AvatarProfile/AvatarChip/AvatarChip.d.ts +2 -2
  27. package/esm/typings/src/book-components/AvatarProfile/AvatarProfile/MockedChat.d.ts +63 -0
  28. package/esm/typings/src/book-components/AvatarProfile/AvatarProfile/index.d.ts +3 -0
  29. package/esm/typings/src/book-components/BookEditor/BookEditor.d.ts +15 -0
  30. package/esm/typings/src/book-components/BookEditor/BookEditorInner.d.ts +4 -0
  31. package/esm/typings/src/book-components/Chat/Chat/ChatProps.d.ts +26 -0
  32. package/esm/typings/src/book-components/Chat/LlmChat/LlmChatProps.d.ts +13 -0
  33. package/esm/typings/src/book-components/Chat/hooks/index.d.ts +2 -0
  34. package/esm/typings/src/book-components/Chat/hooks/useChatAutoScroll.d.ts +41 -0
  35. package/esm/typings/src/book-components/Chat/hooks/useSendMessageToLlmChat.d.ts +44 -0
  36. package/esm/typings/src/book-components/Chat/utils/parseMessageButtons.d.ts +22 -0
  37. package/esm/typings/src/book-components/icons/PauseIcon.d.ts +8 -0
  38. package/esm/typings/src/book-components/icons/PlayIcon.d.ts +8 -0
  39. package/esm/typings/src/execution/createPipelineExecutor/40-executeAttempts.d.ts +1 -2
  40. package/esm/typings/src/execution/createPipelineExecutor/getKnowledgeForTask.d.ts +1 -3
  41. package/esm/typings/src/formats/csv/CsvFormatError.d.ts +1 -1
  42. package/esm/typings/src/llm-providers/_common/register/$provideLlmToolsConfigurationFromEnv.d.ts +1 -1
  43. package/esm/typings/src/llm-providers/_common/register/$provideLlmToolsForTestingAndScriptsAndPlayground.d.ts +1 -1
  44. package/esm/typings/src/llm-providers/_common/register/$provideLlmToolsForWizardOrCli.d.ts +1 -2
  45. package/esm/typings/src/llm-providers/_common/register/createLlmToolsFromConfiguration.d.ts +8 -2
  46. package/esm/typings/src/llm-providers/_common/utils/removeUnsupportedModelRequirements.d.ts +25 -0
  47. package/esm/typings/src/llm-providers/_multiple/MultipleLlmExecutionTools.d.ts +5 -13
  48. package/esm/typings/src/llm-providers/_multiple/getSingleLlmExecutionTools.d.ts +11 -0
  49. package/esm/typings/src/llm-providers/_multiple/joinLlmExecutionTools.d.ts +2 -1
  50. package/esm/typings/src/llm-providers/agent/AgentLlmExecutionTools.d.ts +54 -0
  51. package/esm/typings/src/llm-providers/agent/createAgentLlmExecutionTools.d.ts +29 -0
  52. package/esm/typings/src/llm-providers/agent/playground/playground.d.ts +8 -0
  53. package/esm/typings/src/llm-providers/agent/register-configuration.d.ts +11 -0
  54. package/esm/typings/src/llm-providers/agent/register-constructor.d.ts +13 -0
  55. package/esm/typings/src/llm-providers/mocked/$fakeTextToExpectations.d.ts +1 -0
  56. package/esm/typings/src/llm-providers/mocked/MockedEchoLlmExecutionTools.d.ts +2 -5
  57. package/esm/typings/src/llm-providers/mocked/MockedFackedLlmExecutionTools.d.ts +2 -6
  58. package/esm/typings/src/llm-providers/openai/OpenAiCompatibleExecutionTools.d.ts +15 -8
  59. package/esm/typings/src/personas/preparePersona.d.ts +1 -0
  60. package/esm/typings/src/remote-server/openapi-types.d.ts +31 -31
  61. package/esm/typings/src/scrapers/markdown/MarkdownScraper.d.ts +1 -2
  62. package/esm/typings/src/types/ModelRequirements.d.ts +2 -4
  63. package/esm/typings/src/utils/color/utils/colorSaturation.d.ts +1 -1
  64. package/esm/typings/src/utils/editable/edit-pipeline-string/addPipelineCommand.d.ts +1 -1
  65. package/esm/typings/src/utils/markdown/humanizeAiText.d.ts +1 -0
  66. package/esm/typings/src/version.d.ts +1 -1
  67. package/package.json +2 -2
  68. package/umd/index.umd.js +329 -174
  69. package/umd/index.umd.js.map +1 -1
  70. package/esm/typings/src/book-2.0/utils/extractAgentMetadata.d.ts +0 -17
  71. package/esm/typings/src/book-2.0/utils/extractProfileImageFromSystemMessage.d.ts +0 -12
  72. package/esm/typings/src/llm-providers/mocked/test/joker.test.d.ts +0 -4
  73. package/esm/typings/src/llm-providers/mocked/test/mocked-chat.test.d.ts +0 -5
  74. package/esm/typings/src/llm-providers/mocked/test/mocked-completion.test.d.ts +0 -4
  75. package/esm/typings/src/scripting/_test/postprocessing.test.d.ts +0 -1
  76. /package/esm/typings/src/{cli/test/ptbk.test.d.ts → llm-providers/_common/utils/removeUnsupportedModelRequirements.test.d.ts} +0 -0
package/esm/index.es.js CHANGED
@@ -36,7 +36,7 @@ const BOOK_LANGUAGE_VERSION = '1.0.0';
36
36
  * @generated
37
37
  * @see https://github.com/webgptorg/promptbook
38
38
  */
39
- const PROMPTBOOK_ENGINE_VERSION = '0.101.0-2';
39
+ const PROMPTBOOK_ENGINE_VERSION = '0.101.0-20';
40
40
  /**
41
41
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
42
42
  * Note: [💞] Ignore a discrepancy between file name and entity name
@@ -2814,7 +2814,7 @@ const OPENAI_MODELS = exportJson({
2814
2814
  modelVariant: 'CHAT',
2815
2815
  modelTitle: 'gpt-5-mini',
2816
2816
  modelName: 'gpt-5-mini',
2817
- modelDescription: "A faster, cost-efficient version of GPT-5 for well-defined tasks with 200K context window. Maintains core GPT-5 capabilities while offering 5x faster inference and significantly lower costs. Features enhanced instruction following and reduced latency for production applications requiring quick responses with high quality.",
2817
+ modelDescription: 'A faster, cost-efficient version of GPT-5 for well-defined tasks with 200K context window. Maintains core GPT-5 capabilities while offering 5x faster inference and significantly lower costs. Features enhanced instruction following and reduced latency for production applications requiring quick responses with high quality.',
2818
2818
  pricing: {
2819
2819
  prompt: pricing(`$0.25 / 1M tokens`),
2820
2820
  output: pricing(`$2.00 / 1M tokens`),
@@ -2826,7 +2826,7 @@ const OPENAI_MODELS = exportJson({
2826
2826
  modelVariant: 'CHAT',
2827
2827
  modelTitle: 'gpt-5-nano',
2828
2828
  modelName: 'gpt-5-nano',
2829
- modelDescription: "The fastest, most cost-efficient version of GPT-5 with 200K context window. Optimized for summarization, classification, and simple reasoning tasks. Features 10x faster inference than base GPT-5 while maintaining good quality for straightforward applications. Ideal for high-volume, cost-sensitive deployments.",
2829
+ modelDescription: 'The fastest, most cost-efficient version of GPT-5 with 200K context window. Optimized for summarization, classification, and simple reasoning tasks. Features 10x faster inference than base GPT-5 while maintaining good quality for straightforward applications. Ideal for high-volume, cost-sensitive deployments.',
2830
2830
  pricing: {
2831
2831
  prompt: pricing(`$0.05 / 1M tokens`),
2832
2832
  output: pricing(`$0.40 / 1M tokens`),
@@ -2838,7 +2838,7 @@ const OPENAI_MODELS = exportJson({
2838
2838
  modelVariant: 'CHAT',
2839
2839
  modelTitle: 'gpt-4.1',
2840
2840
  modelName: 'gpt-4.1',
2841
- modelDescription: "Smartest non-reasoning model with 128K context window. Enhanced version of GPT-4 with improved instruction following, better factual accuracy, and reduced hallucinations. Features advanced function calling capabilities and superior performance on coding tasks. Ideal for applications requiring high intelligence without reasoning overhead.",
2841
+ modelDescription: 'Smartest non-reasoning model with 128K context window. Enhanced version of GPT-4 with improved instruction following, better factual accuracy, and reduced hallucinations. Features advanced function calling capabilities and superior performance on coding tasks. Ideal for applications requiring high intelligence without reasoning overhead.',
2842
2842
  pricing: {
2843
2843
  prompt: pricing(`$3.00 / 1M tokens`),
2844
2844
  output: pricing(`$12.00 / 1M tokens`),
@@ -2850,7 +2850,7 @@ const OPENAI_MODELS = exportJson({
2850
2850
  modelVariant: 'CHAT',
2851
2851
  modelTitle: 'gpt-4.1-mini',
2852
2852
  modelName: 'gpt-4.1-mini',
2853
- modelDescription: "Smaller, faster version of GPT-4.1 with 128K context window. Balances intelligence and efficiency with 3x faster inference than base GPT-4.1. Maintains strong capabilities across text generation, reasoning, and coding while offering better cost-performance ratio for most applications.",
2853
+ modelDescription: 'Smaller, faster version of GPT-4.1 with 128K context window. Balances intelligence and efficiency with 3x faster inference than base GPT-4.1. Maintains strong capabilities across text generation, reasoning, and coding while offering better cost-performance ratio for most applications.',
2854
2854
  pricing: {
2855
2855
  prompt: pricing(`$0.80 / 1M tokens`),
2856
2856
  output: pricing(`$3.20 / 1M tokens`),
@@ -2862,7 +2862,7 @@ const OPENAI_MODELS = exportJson({
2862
2862
  modelVariant: 'CHAT',
2863
2863
  modelTitle: 'gpt-4.1-nano',
2864
2864
  modelName: 'gpt-4.1-nano',
2865
- modelDescription: "Fastest, most cost-efficient version of GPT-4.1 with 128K context window. Optimized for high-throughput applications requiring good quality at minimal cost. Features 5x faster inference than GPT-4.1 while maintaining adequate performance for most general-purpose tasks.",
2865
+ modelDescription: 'Fastest, most cost-efficient version of GPT-4.1 with 128K context window. Optimized for high-throughput applications requiring good quality at minimal cost. Features 5x faster inference than GPT-4.1 while maintaining adequate performance for most general-purpose tasks.',
2866
2866
  pricing: {
2867
2867
  prompt: pricing(`$0.20 / 1M tokens`),
2868
2868
  output: pricing(`$0.80 / 1M tokens`),
@@ -2874,7 +2874,7 @@ const OPENAI_MODELS = exportJson({
2874
2874
  modelVariant: 'CHAT',
2875
2875
  modelTitle: 'o3',
2876
2876
  modelName: 'o3',
2877
- modelDescription: "Advanced reasoning model with 128K context window specializing in complex logical, mathematical, and analytical tasks. Successor to o1 with enhanced step-by-step problem-solving capabilities and superior performance on STEM-focused problems. Ideal for professional applications requiring deep analytical thinking and precise reasoning.",
2877
+ modelDescription: 'Advanced reasoning model with 128K context window specializing in complex logical, mathematical, and analytical tasks. Successor to o1 with enhanced step-by-step problem-solving capabilities and superior performance on STEM-focused problems. Ideal for professional applications requiring deep analytical thinking and precise reasoning.',
2878
2878
  pricing: {
2879
2879
  prompt: pricing(`$15.00 / 1M tokens`),
2880
2880
  output: pricing(`$60.00 / 1M tokens`),
@@ -2886,7 +2886,7 @@ const OPENAI_MODELS = exportJson({
2886
2886
  modelVariant: 'CHAT',
2887
2887
  modelTitle: 'o3-pro',
2888
2888
  modelName: 'o3-pro',
2889
- modelDescription: "Enhanced version of o3 with more compute allocated for better responses on the most challenging problems. Features extended reasoning time and improved accuracy on complex analytical tasks. Designed for applications where maximum reasoning quality is more important than response speed.",
2889
+ modelDescription: 'Enhanced version of o3 with more compute allocated for better responses on the most challenging problems. Features extended reasoning time and improved accuracy on complex analytical tasks. Designed for applications where maximum reasoning quality is more important than response speed.',
2890
2890
  pricing: {
2891
2891
  prompt: pricing(`$30.00 / 1M tokens`),
2892
2892
  output: pricing(`$120.00 / 1M tokens`),
@@ -2898,7 +2898,7 @@ const OPENAI_MODELS = exportJson({
2898
2898
  modelVariant: 'CHAT',
2899
2899
  modelTitle: 'o4-mini',
2900
2900
  modelName: 'o4-mini',
2901
- modelDescription: "Fast, cost-efficient reasoning model with 128K context window. Successor to o1-mini with improved analytical capabilities while maintaining speed advantages. Features enhanced mathematical reasoning and logical problem-solving at significantly lower cost than full reasoning models.",
2901
+ modelDescription: 'Fast, cost-efficient reasoning model with 128K context window. Successor to o1-mini with improved analytical capabilities while maintaining speed advantages. Features enhanced mathematical reasoning and logical problem-solving at significantly lower cost than full reasoning models.',
2902
2902
  pricing: {
2903
2903
  prompt: pricing(`$4.00 / 1M tokens`),
2904
2904
  output: pricing(`$16.00 / 1M tokens`),
@@ -2910,7 +2910,7 @@ const OPENAI_MODELS = exportJson({
2910
2910
  modelVariant: 'CHAT',
2911
2911
  modelTitle: 'o3-deep-research',
2912
2912
  modelName: 'o3-deep-research',
2913
- modelDescription: "Most powerful deep research model with 128K context window. Specialized for comprehensive research tasks, literature analysis, and complex information synthesis. Features advanced citation capabilities and enhanced factual accuracy for academic and professional research applications.",
2913
+ modelDescription: 'Most powerful deep research model with 128K context window. Specialized for comprehensive research tasks, literature analysis, and complex information synthesis. Features advanced citation capabilities and enhanced factual accuracy for academic and professional research applications.',
2914
2914
  pricing: {
2915
2915
  prompt: pricing(`$25.00 / 1M tokens`),
2916
2916
  output: pricing(`$100.00 / 1M tokens`),
@@ -2922,7 +2922,7 @@ const OPENAI_MODELS = exportJson({
2922
2922
  modelVariant: 'CHAT',
2923
2923
  modelTitle: 'o4-mini-deep-research',
2924
2924
  modelName: 'o4-mini-deep-research',
2925
- modelDescription: "Faster, more affordable deep research model with 128K context window. Balances research capabilities with cost efficiency, offering good performance on literature review, fact-checking, and information synthesis tasks at a more accessible price point.",
2925
+ modelDescription: 'Faster, more affordable deep research model with 128K context window. Balances research capabilities with cost efficiency, offering good performance on literature review, fact-checking, and information synthesis tasks at a more accessible price point.',
2926
2926
  pricing: {
2927
2927
  prompt: pricing(`$12.00 / 1M tokens`),
2928
2928
  output: pricing(`$48.00 / 1M tokens`),
@@ -4622,6 +4622,62 @@ resultContent, rawResponse) {
4622
4622
  * TODO: [🤝] DRY Maybe some common abstraction between `computeOpenAiUsage` and `computeAnthropicClaudeUsage`
4623
4623
  */
4624
4624
 
4625
+ /**
4626
+ * Parses an OpenAI error message to identify which parameter is unsupported
4627
+ *
4628
+ * @param errorMessage The error message from OpenAI API
4629
+ * @returns The parameter name that is unsupported, or null if not an unsupported parameter error
4630
+ * @private utility of LLM Tools
4631
+ */
4632
+ function parseUnsupportedParameterError(errorMessage) {
4633
+ // Pattern to match "Unsupported value: 'parameter' does not support ..."
4634
+ const unsupportedValueMatch = errorMessage.match(/Unsupported value:\s*'([^']+)'\s*does not support/i);
4635
+ if (unsupportedValueMatch === null || unsupportedValueMatch === void 0 ? void 0 : unsupportedValueMatch[1]) {
4636
+ return unsupportedValueMatch[1];
4637
+ }
4638
+ // Pattern to match "'parameter' of type ... is not supported with this model"
4639
+ const parameterTypeMatch = errorMessage.match(/'([^']+)'\s*of type.*is not supported with this model/i);
4640
+ if (parameterTypeMatch === null || parameterTypeMatch === void 0 ? void 0 : parameterTypeMatch[1]) {
4641
+ return parameterTypeMatch[1];
4642
+ }
4643
+ return null;
4644
+ }
4645
+ /**
4646
+ * Creates a copy of model requirements with the specified parameter removed
4647
+ *
4648
+ * @param modelRequirements Original model requirements
4649
+ * @param unsupportedParameter The parameter to remove
4650
+ * @returns New model requirements without the unsupported parameter
4651
+ * @private utility of LLM Tools
4652
+ */
4653
+ function removeUnsupportedModelRequirement(modelRequirements, unsupportedParameter) {
4654
+ const newRequirements = { ...modelRequirements };
4655
+ // Map of parameter names that might appear in error messages to ModelRequirements properties
4656
+ const parameterMap = {
4657
+ temperature: 'temperature',
4658
+ max_tokens: 'maxTokens',
4659
+ maxTokens: 'maxTokens',
4660
+ seed: 'seed',
4661
+ };
4662
+ const propertyToRemove = parameterMap[unsupportedParameter];
4663
+ if (propertyToRemove && propertyToRemove in newRequirements) {
4664
+ delete newRequirements[propertyToRemove];
4665
+ }
4666
+ return newRequirements;
4667
+ }
4668
+ /**
4669
+ * Checks if an error is an "Unsupported value" error from OpenAI
4670
+ * @param error The error to check
4671
+ * @returns true if this is an unsupported parameter error
4672
+ * @private utility of LLM Tools
4673
+ */
4674
+ function isUnsupportedParameterError(error) {
4675
+ const errorMessage = error.message.toLowerCase();
4676
+ return (errorMessage.includes('unsupported value:') ||
4677
+ errorMessage.includes('is not supported with this model') ||
4678
+ errorMessage.includes('does not support'));
4679
+ }
4680
+
4625
4681
  /**
4626
4682
  * Execution Tools for calling OpenAI API or other OpenAI compatible provider
4627
4683
  *
@@ -4639,6 +4695,10 @@ class OpenAiCompatibleExecutionTools {
4639
4695
  * OpenAI API client.
4640
4696
  */
4641
4697
  this.client = null;
4698
+ /**
4699
+ * Tracks models and parameters that have already been retried to prevent infinite loops
4700
+ */
4701
+ this.retriedUnsupportedParameters = new Set();
4642
4702
  // TODO: Allow configuring rate limits via options
4643
4703
  this.limiter = new Bottleneck({
4644
4704
  minTime: 60000 / (this.options.maxRequestsPerMinute || DEFAULT_MAX_REQUESTS_PER_MINUTE),
@@ -4700,21 +4760,27 @@ class OpenAiCompatibleExecutionTools {
4700
4760
  * Calls OpenAI compatible API to use a chat model.
4701
4761
  */
4702
4762
  async callChatModel(prompt) {
4763
+ return this.callChatModelWithRetry(prompt, prompt.modelRequirements);
4764
+ }
4765
+ /**
4766
+ * Internal method that handles parameter retry for chat model calls
4767
+ */
4768
+ async callChatModelWithRetry(prompt, currentModelRequirements) {
4703
4769
  var _a;
4704
4770
  if (this.options.isVerbose) {
4705
- console.info(`💬 ${this.title} callChatModel call`, { prompt });
4771
+ console.info(`💬 ${this.title} callChatModel call`, { prompt, currentModelRequirements });
4706
4772
  }
4707
- const { content, parameters, modelRequirements, format } = prompt;
4773
+ const { content, parameters, format } = prompt;
4708
4774
  const client = await this.getClient();
4709
4775
  // TODO: [☂] Use here more modelRequirements
4710
- if (modelRequirements.modelVariant !== 'CHAT') {
4776
+ if (currentModelRequirements.modelVariant !== 'CHAT') {
4711
4777
  throw new PipelineExecutionError('Use callChatModel only for CHAT variant');
4712
4778
  }
4713
- const modelName = modelRequirements.modelName || this.getDefaultChatModel().modelName;
4779
+ const modelName = currentModelRequirements.modelName || this.getDefaultChatModel().modelName;
4714
4780
  const modelSettings = {
4715
4781
  model: modelName,
4716
- max_tokens: modelRequirements.maxTokens,
4717
- temperature: modelRequirements.temperature,
4782
+ max_tokens: currentModelRequirements.maxTokens,
4783
+ temperature: currentModelRequirements.temperature,
4718
4784
  // <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
4719
4785
  // <- Note: [🧆]
4720
4786
  }; // <- TODO: [💩] Guard here types better
@@ -4729,12 +4795,12 @@ class OpenAiCompatibleExecutionTools {
4729
4795
  const rawRequest = {
4730
4796
  ...modelSettings,
4731
4797
  messages: [
4732
- ...(modelRequirements.systemMessage === undefined
4798
+ ...(currentModelRequirements.systemMessage === undefined
4733
4799
  ? []
4734
4800
  : [
4735
4801
  {
4736
4802
  role: 'system',
4737
- content: modelRequirements.systemMessage,
4803
+ content: currentModelRequirements.systemMessage,
4738
4804
  },
4739
4805
  ]),
4740
4806
  {
@@ -4748,69 +4814,110 @@ class OpenAiCompatibleExecutionTools {
4748
4814
  if (this.options.isVerbose) {
4749
4815
  console.info(colors.bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
4750
4816
  }
4751
- const rawResponse = await this.limiter
4752
- .schedule(() => this.makeRequestWithRetry(() => client.chat.completions.create(rawRequest)))
4753
- .catch((error) => {
4754
- assertsError(error);
4817
+ try {
4818
+ const rawResponse = await this.limiter
4819
+ .schedule(() => this.makeRequestWithNetworkRetry(() => client.chat.completions.create(rawRequest)))
4820
+ .catch((error) => {
4821
+ assertsError(error);
4822
+ if (this.options.isVerbose) {
4823
+ console.info(colors.bgRed('error'), error);
4824
+ }
4825
+ throw error;
4826
+ });
4755
4827
  if (this.options.isVerbose) {
4756
- console.info(colors.bgRed('error'), error);
4828
+ console.info(colors.bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
4757
4829
  }
4758
- throw error;
4759
- });
4760
- if (this.options.isVerbose) {
4761
- console.info(colors.bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
4762
- }
4763
- const complete = $getCurrentDate();
4764
- if (!rawResponse.choices[0]) {
4765
- throw new PipelineExecutionError(`No choises from ${this.title}`);
4766
- }
4767
- if (rawResponse.choices.length > 1) {
4768
- // TODO: This should be maybe only warning
4769
- throw new PipelineExecutionError(`More than one choise from ${this.title}`);
4830
+ const complete = $getCurrentDate();
4831
+ if (!rawResponse.choices[0]) {
4832
+ throw new PipelineExecutionError(`No choises from ${this.title}`);
4833
+ }
4834
+ if (rawResponse.choices.length > 1) {
4835
+ // TODO: This should be maybe only warning
4836
+ throw new PipelineExecutionError(`More than one choise from ${this.title}`);
4837
+ }
4838
+ const resultContent = rawResponse.choices[0].message.content;
4839
+ const usage = this.computeUsage(content || '', resultContent || '', rawResponse);
4840
+ if (resultContent === null) {
4841
+ throw new PipelineExecutionError(`No response message from ${this.title}`);
4842
+ }
4843
+ return exportJson({
4844
+ name: 'promptResult',
4845
+ message: `Result of \`OpenAiCompatibleExecutionTools.callChatModel\``,
4846
+ order: [],
4847
+ value: {
4848
+ content: resultContent,
4849
+ modelName: rawResponse.model || modelName,
4850
+ timing: {
4851
+ start,
4852
+ complete,
4853
+ },
4854
+ usage,
4855
+ rawPromptContent,
4856
+ rawRequest,
4857
+ rawResponse,
4858
+ // <- [🗯]
4859
+ },
4860
+ });
4770
4861
  }
4771
- const resultContent = rawResponse.choices[0].message.content;
4772
- const usage = this.computeUsage(content || '', resultContent || '', rawResponse);
4773
- if (resultContent === null) {
4774
- throw new PipelineExecutionError(`No response message from ${this.title}`);
4862
+ catch (error) {
4863
+ assertsError(error);
4864
+ // Check if this is an unsupported parameter error
4865
+ if (!isUnsupportedParameterError(error)) {
4866
+ throw error;
4867
+ }
4868
+ // Parse which parameter is unsupported
4869
+ const unsupportedParameter = parseUnsupportedParameterError(error.message);
4870
+ if (!unsupportedParameter) {
4871
+ if (this.options.isVerbose) {
4872
+ console.warn(colors.bgYellow('Warning'), 'Could not parse unsupported parameter from error:', error.message);
4873
+ }
4874
+ throw error;
4875
+ }
4876
+ // Create a unique key for this model + parameter combination to prevent infinite loops
4877
+ const retryKey = `${modelName}-${unsupportedParameter}`;
4878
+ if (this.retriedUnsupportedParameters.has(retryKey)) {
4879
+ // Already retried this parameter, throw the error
4880
+ if (this.options.isVerbose) {
4881
+ console.warn(colors.bgRed('Error'), `Parameter '${unsupportedParameter}' for model '${modelName}' already retried once, throwing error:`, error.message);
4882
+ }
4883
+ throw error;
4884
+ }
4885
+ // Mark this parameter as retried
4886
+ this.retriedUnsupportedParameters.add(retryKey);
4887
+ // Log warning in verbose mode
4888
+ if (this.options.isVerbose) {
4889
+ console.warn(colors.bgYellow('Warning'), `Removing unsupported parameter '${unsupportedParameter}' for model '${modelName}' and retrying request`);
4890
+ }
4891
+ // Remove the unsupported parameter and retry
4892
+ const modifiedModelRequirements = removeUnsupportedModelRequirement(currentModelRequirements, unsupportedParameter);
4893
+ return this.callChatModelWithRetry(prompt, modifiedModelRequirements);
4775
4894
  }
4776
- return exportJson({
4777
- name: 'promptResult',
4778
- message: `Result of \`OpenAiCompatibleExecutionTools.callChatModel\``,
4779
- order: [],
4780
- value: {
4781
- content: resultContent,
4782
- modelName: rawResponse.model || modelName,
4783
- timing: {
4784
- start,
4785
- complete,
4786
- },
4787
- usage,
4788
- rawPromptContent,
4789
- rawRequest,
4790
- rawResponse,
4791
- // <- [🗯]
4792
- },
4793
- });
4794
4895
  }
4795
4896
  /**
4796
4897
  * Calls OpenAI API to use a complete model.
4797
4898
  */
4798
4899
  async callCompletionModel(prompt) {
4900
+ return this.callCompletionModelWithRetry(prompt, prompt.modelRequirements);
4901
+ }
4902
+ /**
4903
+ * Internal method that handles parameter retry for completion model calls
4904
+ */
4905
+ async callCompletionModelWithRetry(prompt, currentModelRequirements) {
4799
4906
  var _a;
4800
4907
  if (this.options.isVerbose) {
4801
- console.info(`🖋 ${this.title} callCompletionModel call`, { prompt });
4908
+ console.info(`🖋 ${this.title} callCompletionModel call`, { prompt, currentModelRequirements });
4802
4909
  }
4803
- const { content, parameters, modelRequirements } = prompt;
4910
+ const { content, parameters } = prompt;
4804
4911
  const client = await this.getClient();
4805
4912
  // TODO: [☂] Use here more modelRequirements
4806
- if (modelRequirements.modelVariant !== 'COMPLETION') {
4913
+ if (currentModelRequirements.modelVariant !== 'COMPLETION') {
4807
4914
  throw new PipelineExecutionError('Use callCompletionModel only for COMPLETION variant');
4808
4915
  }
4809
- const modelName = modelRequirements.modelName || this.getDefaultCompletionModel().modelName;
4916
+ const modelName = currentModelRequirements.modelName || this.getDefaultCompletionModel().modelName;
4810
4917
  const modelSettings = {
4811
4918
  model: modelName,
4812
- max_tokens: modelRequirements.maxTokens,
4813
- temperature: modelRequirements.temperature,
4919
+ max_tokens: currentModelRequirements.maxTokens,
4920
+ temperature: currentModelRequirements.temperature,
4814
4921
  // <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
4815
4922
  // <- Note: [🧆]
4816
4923
  };
@@ -4824,46 +4931,81 @@ class OpenAiCompatibleExecutionTools {
4824
4931
  if (this.options.isVerbose) {
4825
4932
  console.info(colors.bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
4826
4933
  }
4827
- const rawResponse = await this.limiter
4828
- .schedule(() => this.makeRequestWithRetry(() => client.completions.create(rawRequest)))
4829
- .catch((error) => {
4830
- assertsError(error);
4934
+ try {
4935
+ const rawResponse = await this.limiter
4936
+ .schedule(() => this.makeRequestWithNetworkRetry(() => client.completions.create(rawRequest)))
4937
+ .catch((error) => {
4938
+ assertsError(error);
4939
+ if (this.options.isVerbose) {
4940
+ console.info(colors.bgRed('error'), error);
4941
+ }
4942
+ throw error;
4943
+ });
4831
4944
  if (this.options.isVerbose) {
4832
- console.info(colors.bgRed('error'), error);
4945
+ console.info(colors.bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
4833
4946
  }
4834
- throw error;
4835
- });
4836
- if (this.options.isVerbose) {
4837
- console.info(colors.bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
4838
- }
4839
- const complete = $getCurrentDate();
4840
- if (!rawResponse.choices[0]) {
4841
- throw new PipelineExecutionError(`No choises from ${this.title}`);
4947
+ const complete = $getCurrentDate();
4948
+ if (!rawResponse.choices[0]) {
4949
+ throw new PipelineExecutionError(`No choises from ${this.title}`);
4950
+ }
4951
+ if (rawResponse.choices.length > 1) {
4952
+ // TODO: This should be maybe only warning
4953
+ throw new PipelineExecutionError(`More than one choise from ${this.title}`);
4954
+ }
4955
+ const resultContent = rawResponse.choices[0].text;
4956
+ const usage = this.computeUsage(content || '', resultContent || '', rawResponse);
4957
+ return exportJson({
4958
+ name: 'promptResult',
4959
+ message: `Result of \`OpenAiCompatibleExecutionTools.callCompletionModel\``,
4960
+ order: [],
4961
+ value: {
4962
+ content: resultContent,
4963
+ modelName: rawResponse.model || modelName,
4964
+ timing: {
4965
+ start,
4966
+ complete,
4967
+ },
4968
+ usage,
4969
+ rawPromptContent,
4970
+ rawRequest,
4971
+ rawResponse,
4972
+ // <- [🗯]
4973
+ },
4974
+ });
4842
4975
  }
4843
- if (rawResponse.choices.length > 1) {
4844
- // TODO: This should be maybe only warning
4845
- throw new PipelineExecutionError(`More than one choise from ${this.title}`);
4976
+ catch (error) {
4977
+ assertsError(error);
4978
+ // Check if this is an unsupported parameter error
4979
+ if (!isUnsupportedParameterError(error)) {
4980
+ throw error;
4981
+ }
4982
+ // Parse which parameter is unsupported
4983
+ const unsupportedParameter = parseUnsupportedParameterError(error.message);
4984
+ if (!unsupportedParameter) {
4985
+ if (this.options.isVerbose) {
4986
+ console.warn(colors.bgYellow('Warning'), 'Could not parse unsupported parameter from error:', error.message);
4987
+ }
4988
+ throw error;
4989
+ }
4990
+ // Create a unique key for this model + parameter combination to prevent infinite loops
4991
+ const retryKey = `${modelName}-${unsupportedParameter}`;
4992
+ if (this.retriedUnsupportedParameters.has(retryKey)) {
4993
+ // Already retried this parameter, throw the error
4994
+ if (this.options.isVerbose) {
4995
+ console.warn(colors.bgRed('Error'), `Parameter '${unsupportedParameter}' for model '${modelName}' already retried once, throwing error:`, error.message);
4996
+ }
4997
+ throw error;
4998
+ }
4999
+ // Mark this parameter as retried
5000
+ this.retriedUnsupportedParameters.add(retryKey);
5001
+ // Log warning in verbose mode
5002
+ if (this.options.isVerbose) {
5003
+ console.warn(colors.bgYellow('Warning'), `Removing unsupported parameter '${unsupportedParameter}' for model '${modelName}' and retrying request`);
5004
+ }
5005
+ // Remove the unsupported parameter and retry
5006
+ const modifiedModelRequirements = removeUnsupportedModelRequirement(currentModelRequirements, unsupportedParameter);
5007
+ return this.callCompletionModelWithRetry(prompt, modifiedModelRequirements);
4846
5008
  }
4847
- const resultContent = rawResponse.choices[0].text;
4848
- const usage = this.computeUsage(content || '', resultContent || '', rawResponse);
4849
- return exportJson({
4850
- name: 'promptResult',
4851
- message: `Result of \`OpenAiCompatibleExecutionTools.callCompletionModel\``,
4852
- order: [],
4853
- value: {
4854
- content: resultContent,
4855
- modelName: rawResponse.model || modelName,
4856
- timing: {
4857
- start,
4858
- complete,
4859
- },
4860
- usage,
4861
- rawPromptContent,
4862
- rawRequest,
4863
- rawResponse,
4864
- // <- [🗯]
4865
- },
4866
- });
4867
5009
  }
4868
5010
  /**
4869
5011
  * Calls OpenAI compatible API to use a embedding model
@@ -4889,7 +5031,7 @@ class OpenAiCompatibleExecutionTools {
4889
5031
  console.info(colors.bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
4890
5032
  }
4891
5033
  const rawResponse = await this.limiter
4892
- .schedule(() => this.makeRequestWithRetry(() => client.embeddings.create(rawRequest)))
5034
+ .schedule(() => this.makeRequestWithNetworkRetry(() => client.embeddings.create(rawRequest)))
4893
5035
  .catch((error) => {
4894
5036
  assertsError(error);
4895
5037
  if (this.options.isVerbose) {
@@ -4951,7 +5093,7 @@ class OpenAiCompatibleExecutionTools {
4951
5093
  /**
4952
5094
  * Makes a request with retry logic for network errors like ECONNRESET
4953
5095
  */
4954
- async makeRequestWithRetry(requestFn) {
5096
+ async makeRequestWithNetworkRetry(requestFn) {
4955
5097
  let lastError;
4956
5098
  for (let attempt = 1; attempt <= CONNECTION_RETRIES_LIMIT; attempt++) {
4957
5099
  try {
@@ -4963,8 +5105,8 @@ class OpenAiCompatibleExecutionTools {
4963
5105
  // Check if this is a retryable network error
4964
5106
  const isRetryableError = this.isRetryableNetworkError(error);
4965
5107
  if (!isRetryableError || attempt === CONNECTION_RETRIES_LIMIT) {
4966
- if (this.options.isVerbose) {
4967
- console.info(colors.bgRed('Final error after retries'), `Attempt ${attempt}/${CONNECTION_RETRIES_LIMIT}:`, error);
5108
+ if (this.options.isVerbose && this.isRetryableNetworkError(error)) {
5109
+ console.info(colors.bgRed('Final network error after retries'), `Attempt ${attempt}/${CONNECTION_RETRIES_LIMIT}:`, error);
4968
5110
  }
4969
5111
  throw error;
4970
5112
  }
@@ -4974,7 +5116,7 @@ class OpenAiCompatibleExecutionTools {
4974
5116
  const jitterDelay = Math.random() * 500; // Add some randomness
4975
5117
  const totalDelay = backoffDelay + jitterDelay;
4976
5118
  if (this.options.isVerbose) {
4977
- console.info(colors.bgYellow('Retrying request'), `Attempt ${attempt}/${CONNECTION_RETRIES_LIMIT}, waiting ${Math.round(totalDelay)}ms:`, error.message);
5119
+ console.info(colors.bgYellow('Retrying network request'), `Attempt ${attempt}/${CONNECTION_RETRIES_LIMIT}, waiting ${Math.round(totalDelay)}ms:`, error.message);
4978
5120
  }
4979
5121
  // Wait before retrying
4980
5122
  await new Promise((resolve) => setTimeout(resolve, totalDelay));
@@ -6355,11 +6497,12 @@ async function getScraperIntermediateSource(source, options) {
6355
6497
  catch (error) {
6356
6498
  // Note: If we can't create cache directory, continue without it
6357
6499
  // This handles read-only filesystems, permission issues, and missing parent directories
6358
- if (error instanceof Error && (error.message.includes('EROFS') ||
6359
- error.message.includes('read-only') ||
6360
- error.message.includes('EACCES') ||
6361
- error.message.includes('EPERM') ||
6362
- error.message.includes('ENOENT'))) ;
6500
+ if (error instanceof Error &&
6501
+ (error.message.includes('EROFS') ||
6502
+ error.message.includes('read-only') ||
6503
+ error.message.includes('EACCES') ||
6504
+ error.message.includes('EPERM') ||
6505
+ error.message.includes('ENOENT'))) ;
6363
6506
  else {
6364
6507
  // Re-throw other unexpected errors
6365
6508
  throw error;
@@ -7701,6 +7844,25 @@ function countUsage(llmTools) {
7701
7844
  * TODO: [👷‍♂️] @@@ Manual about construction of llmTools
7702
7845
  */
7703
7846
 
7847
+ /**
7848
+ * Takes an item or an array of items and returns an array of items
7849
+ *
7850
+ * 1) Any item except array and undefined returns array with that one item (also null)
7851
+ * 2) Undefined returns empty array
7852
+ * 3) Array returns itself
7853
+ *
7854
+ * @private internal utility
7855
+ */
7856
+ function arrayableToArray(input) {
7857
+ if (input === undefined) {
7858
+ return [];
7859
+ }
7860
+ if (input instanceof Array) {
7861
+ return input;
7862
+ }
7863
+ return [input];
7864
+ }
7865
+
7704
7866
  /**
7705
7867
  * Multiple LLM Execution Tools is a proxy server that uses multiple execution tools internally and exposes the executor interface externally.
7706
7868
  *
@@ -7711,12 +7873,10 @@ class MultipleLlmExecutionTools {
7711
7873
  /**
7712
7874
  * Gets array of execution tools in order of priority
7713
7875
  */
7714
- constructor(...llmExecutionTools) {
7876
+ constructor(title, ...llmExecutionTools) {
7877
+ this.title = title;
7715
7878
  this.llmExecutionTools = llmExecutionTools;
7716
7879
  }
7717
- get title() {
7718
- return 'Multiple LLM Providers';
7719
- }
7720
7880
  get description() {
7721
7881
  const innerModelsTitlesAndDescriptions = this.llmExecutionTools
7722
7882
  .map(({ title, description }, index) => {
@@ -7802,7 +7962,7 @@ class MultipleLlmExecutionTools {
7802
7962
  return await llmExecutionTools.callEmbeddingModel(prompt);
7803
7963
  // <- case [🤖]:
7804
7964
  default:
7805
- throw new UnexpectedError(`Unknown model variant "${prompt.modelRequirements.modelVariant}"`);
7965
+ throw new UnexpectedError(`Unknown model variant "${prompt.modelRequirements.modelVariant}" in ${llmExecutionTools.title}`);
7806
7966
  }
7807
7967
  }
7808
7968
  catch (error) {
@@ -7823,7 +7983,7 @@ class MultipleLlmExecutionTools {
7823
7983
  // 2) AnthropicClaude throw PipelineExecutionError: Parameter `{knowledge}` is not defined
7824
7984
  // 3) ...
7825
7985
  spaceTrim((block) => `
7826
- All execution tools failed:
7986
+ All execution tools of ${this.title} failed:
7827
7987
 
7828
7988
  ${block(errors
7829
7989
  .map(({ error, llmExecutionTools }, i) => `${i + 1}) **${llmExecutionTools.title}** thrown **${error.name || 'Error'}:** ${error.message}`)
@@ -7832,11 +7992,11 @@ class MultipleLlmExecutionTools {
7832
7992
  `));
7833
7993
  }
7834
7994
  else if (this.llmExecutionTools.length === 0) {
7835
- throw new PipelineExecutionError(`You have not provided any \`LlmExecutionTools\``);
7995
+ throw new PipelineExecutionError(`You have not provided any \`LlmExecutionTools\` into ${this.title}`);
7836
7996
  }
7837
7997
  else {
7838
7998
  throw new PipelineExecutionError(spaceTrim((block) => `
7839
- You have not provided any \`LlmExecutionTools\` that support model variant "${prompt.modelRequirements.modelVariant}"
7999
+ You have not provided any \`LlmExecutionTools\` that support model variant "${prompt.modelRequirements.modelVariant}" into ${this.title}
7840
8000
 
7841
8001
  Available \`LlmExecutionTools\`:
7842
8002
  ${block(this.description)}
@@ -7866,7 +8026,7 @@ class MultipleLlmExecutionTools {
7866
8026
  *
7867
8027
  * @public exported from `@promptbook/core`
7868
8028
  */
7869
- function joinLlmExecutionTools(...llmExecutionTools) {
8029
+ function joinLlmExecutionTools(title, ...llmExecutionTools) {
7870
8030
  if (llmExecutionTools.length === 0) {
7871
8031
  const warningMessage = spaceTrim(`
7872
8032
  You have not provided any \`LlmExecutionTools\`
@@ -7898,30 +8058,27 @@ function joinLlmExecutionTools(...llmExecutionTools) {
7898
8058
  };
7899
8059
  */
7900
8060
  }
7901
- return new MultipleLlmExecutionTools(...llmExecutionTools);
8061
+ return new MultipleLlmExecutionTools(title || 'Multiple LLM Providers joined by `joinLlmExecutionTools`', ...llmExecutionTools);
7902
8062
  }
7903
8063
  /**
7904
8064
  * TODO: [👷‍♂️] @@@ Manual about construction of llmTools
7905
8065
  */
7906
8066
 
7907
8067
  /**
7908
- * Takes an item or an array of items and returns an array of items
7909
- *
7910
- * 1) Any item except array and undefined returns array with that one item (also null)
7911
- * 2) Undefined returns empty array
7912
- * 3) Array returns itself
8068
+ * Just returns the given `LlmExecutionTools` or joins multiple into one
7913
8069
  *
7914
- * @private internal utility
8070
+ * @public exported from `@promptbook/core`
7915
8071
  */
7916
- function arrayableToArray(input) {
7917
- if (input === undefined) {
7918
- return [];
7919
- }
7920
- if (input instanceof Array) {
7921
- return input;
7922
- }
7923
- return [input];
8072
+ function getSingleLlmExecutionTools(oneOrMoreLlmExecutionTools) {
8073
+ const _llms = arrayableToArray(oneOrMoreLlmExecutionTools);
8074
+ const llmTools = _llms.length === 1
8075
+ ? _llms[0]
8076
+ : joinLlmExecutionTools('Multiple LLM Providers joined by `getSingleLlmExecutionTools`', ..._llms);
8077
+ return llmTools;
7924
8078
  }
8079
+ /**
8080
+ * TODO: [👷‍♂️] @@@ Manual about construction of llmTools
8081
+ */
7925
8082
 
7926
8083
  /**
7927
8084
  * Prepares the persona for the pipeline
@@ -7940,8 +8097,7 @@ async function preparePersona(personaDescription, tools, options) {
7940
8097
  pipeline: await collection.getPipelineByUrl('https://promptbook.studio/promptbook/prepare-persona.book'),
7941
8098
  tools,
7942
8099
  });
7943
- const _llms = arrayableToArray(tools.llm);
7944
- const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
8100
+ const llmTools = getSingleLlmExecutionTools(tools.llm);
7945
8101
  const availableModels = (await llmTools.listModels())
7946
8102
  .filter(({ modelVariant }) => modelVariant === 'CHAT')
7947
8103
  .map(({ modelName, modelDescription }) => ({
@@ -7985,6 +8141,7 @@ async function preparePersona(personaDescription, tools, options) {
7985
8141
  };
7986
8142
  }
7987
8143
  /**
8144
+ * TODO: [😩] DRY `preparePersona` and `selectBestModelFromAvailable`
7988
8145
  * TODO: [🔃][main] If the persona was prepared with different version or different set of models, prepare it once again
7989
8146
  * TODO: [🏢] Check validity of `modelName` in pipeline
7990
8147
  * TODO: [🏢] Check validity of `systemMessage` in pipeline
@@ -8557,9 +8714,7 @@ async function preparePipeline(pipeline, tools, options) {
8557
8714
  if (tools === undefined || tools.llm === undefined) {
8558
8715
  throw new MissingToolsError('LLM tools are required for preparing the pipeline');
8559
8716
  }
8560
- // TODO: [🚐] Make arrayable LLMs -> single LLM DRY
8561
- const _llms = arrayableToArray(tools.llm);
8562
- const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
8717
+ const llmTools = getSingleLlmExecutionTools(tools.llm);
8563
8718
  const llmToolsWithUsage = countUsage(llmTools);
8564
8719
  // <- TODO: [🌯]
8565
8720
  /*
@@ -9429,9 +9584,7 @@ async function executeAttempts(options) {
9429
9584
  $scriptPipelineExecutionErrors: [],
9430
9585
  $failedResults: [], // Track all failed attempts
9431
9586
  };
9432
- // TODO: [🚐] Make arrayable LLMs -> single LLM DRY
9433
- const _llms = arrayableToArray(tools.llm);
9434
- const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
9587
+ const llmTools = getSingleLlmExecutionTools(tools.llm);
9435
9588
  attempts: for (let attemptIndex = -jokerParameterNames.length; attemptIndex < maxAttempts; attemptIndex++) {
9436
9589
  const isJokerAttempt = attemptIndex < 0;
9437
9590
  const jokerParameterName = jokerParameterNames[jokerParameterNames.length + attemptIndex];
@@ -9951,9 +10104,7 @@ async function getKnowledgeForTask(options) {
9951
10104
  return ''; // <- Note: Np knowledge present, return empty string
9952
10105
  }
9953
10106
  try {
9954
- // TODO: [🚐] Make arrayable LLMs -> single LLM DRY
9955
- const _llms = arrayableToArray(tools.llm);
9956
- const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
10107
+ const llmTools = getSingleLlmExecutionTools(tools.llm);
9957
10108
  const taskEmbeddingPrompt = {
9958
10109
  title: 'Knowledge Search',
9959
10110
  modelRequirements: {
@@ -10554,13 +10705,13 @@ function createPipelineExecutor(options) {
10554
10705
  // Calculate and update tldr based on pipeline progress
10555
10706
  const cv = newOngoingResult;
10556
10707
  // Calculate progress based on parameters resolved vs total parameters
10557
- const totalParameters = pipeline.parameters.filter(p => !p.isInput).length;
10708
+ const totalParameters = pipeline.parameters.filter((p) => !p.isInput).length;
10558
10709
  let resolvedParameters = 0;
10559
10710
  let currentTaskTitle = '';
10560
10711
  // Get the resolved parameters from output parameters
10561
10712
  if (cv === null || cv === void 0 ? void 0 : cv.outputParameters) {
10562
10713
  // Count how many output parameters have non-empty values
10563
- resolvedParameters = Object.values(cv.outputParameters).filter(value => value !== undefined && value !== null && String(value).trim() !== '').length;
10714
+ resolvedParameters = Object.values(cv.outputParameters).filter((value) => value !== undefined && value !== null && String(value).trim() !== '').length;
10564
10715
  }
10565
10716
  // Try to determine current task from execution report
10566
10717
  if (((_a = cv === null || cv === void 0 ? void 0 : cv.executionReport) === null || _a === void 0 ? void 0 : _a.promptExecutions) && cv.executionReport.promptExecutions.length > 0) {
@@ -10670,9 +10821,7 @@ class MarkdownScraper {
10670
10821
  throw new MissingToolsError('LLM tools are required for scraping external files');
10671
10822
  // <- Note: This scraper is used in all other scrapers, so saying "external files" not "markdown files"
10672
10823
  }
10673
- // TODO: [🚐] Make arrayable LLMs -> single LLM DRY
10674
- const _llms = arrayableToArray(llm);
10675
- const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
10824
+ const llmTools = getSingleLlmExecutionTools(llm);
10676
10825
  // TODO: [🌼] In future use `ptbk make` and made getPipelineCollection
10677
10826
  const collection = createCollectionFromJson(...PipelineCollection);
10678
10827
  const prepareKnowledgeFromMarkdownExecutor = createPipelineExecutor({
@@ -11449,11 +11598,12 @@ class MarkitdownScraper {
11449
11598
  catch (error) {
11450
11599
  // Note: If we can't write to cache, we'll continue without caching
11451
11600
  // This handles read-only filesystems like Vercel
11452
- if (error instanceof Error && (error.message.includes('EROFS') ||
11453
- error.message.includes('read-only') ||
11454
- error.message.includes('EACCES') ||
11455
- error.message.includes('EPERM') ||
11456
- error.message.includes('ENOENT'))) ;
11601
+ if (error instanceof Error &&
11602
+ (error.message.includes('EROFS') ||
11603
+ error.message.includes('read-only') ||
11604
+ error.message.includes('EACCES') ||
11605
+ error.message.includes('EPERM') ||
11606
+ error.message.includes('ENOENT'))) ;
11457
11607
  else {
11458
11608
  // Re-throw other unexpected errors
11459
11609
  throw error;
@@ -11753,11 +11903,12 @@ class WebsiteScraper {
11753
11903
  catch (error) {
11754
11904
  // Note: If we can't write to cache, we'll continue without caching
11755
11905
  // This handles read-only filesystems like Vercel
11756
- if (error instanceof Error && (error.message.includes('EROFS') ||
11757
- error.message.includes('read-only') ||
11758
- error.message.includes('EACCES') ||
11759
- error.message.includes('EPERM') ||
11760
- error.message.includes('ENOENT'))) ;
11906
+ if (error instanceof Error &&
11907
+ (error.message.includes('EROFS') ||
11908
+ error.message.includes('read-only') ||
11909
+ error.message.includes('EACCES') ||
11910
+ error.message.includes('EPERM') ||
11911
+ error.message.includes('ENOENT'))) ;
11761
11912
  else {
11762
11913
  // Re-throw other unexpected errors
11763
11914
  throw error;
@@ -12511,11 +12662,12 @@ class FileCacheStorage {
12511
12662
  catch (error) {
12512
12663
  // Note: If we can't write to cache, silently ignore the error
12513
12664
  // This handles read-only filesystems, permission issues, and missing parent directories
12514
- if (error instanceof Error && (error.message.includes('EROFS') ||
12515
- error.message.includes('read-only') ||
12516
- error.message.includes('EACCES') ||
12517
- error.message.includes('EPERM') ||
12518
- error.message.includes('ENOENT'))) {
12665
+ if (error instanceof Error &&
12666
+ (error.message.includes('EROFS') ||
12667
+ error.message.includes('read-only') ||
12668
+ error.message.includes('EACCES') ||
12669
+ error.message.includes('EPERM') ||
12670
+ error.message.includes('ENOENT'))) {
12519
12671
  // Silently ignore filesystem errors - caching is optional
12520
12672
  return;
12521
12673
  }
@@ -12808,7 +12960,7 @@ async function $provideLlmToolsConfigurationFromEnv() {
12808
12960
  * @public exported from `@promptbook/core`
12809
12961
  */
12810
12962
  function createLlmToolsFromConfiguration(configuration, options = {}) {
12811
- const { isVerbose = DEFAULT_IS_VERBOSE, userId } = options;
12963
+ const { title = 'LLM Tools from Configuration', isVerbose = DEFAULT_IS_VERBOSE, userId } = options;
12812
12964
  const llmTools = configuration.map((llmConfiguration) => {
12813
12965
  const registeredItem = $llmToolsRegister
12814
12966
  .list()
@@ -12840,7 +12992,7 @@ function createLlmToolsFromConfiguration(configuration, options = {}) {
12840
12992
  ...llmConfiguration.options,
12841
12993
  });
12842
12994
  });
12843
- return joinLlmExecutionTools(...llmTools);
12995
+ return joinLlmExecutionTools(title, ...llmTools);
12844
12996
  }
12845
12997
  /**
12846
12998
  * TODO: [🎌] Together with `createLlmToolsFromConfiguration` + 'EXECUTION_TOOLS_CLASSES' gets to `@promptbook/core` ALL model providers, make this more efficient
@@ -12957,7 +13109,9 @@ async function $provideLlmToolsForWizardOrCli(options) {
12957
13109
  });
12958
13110
  }
12959
13111
  else if (strategy === 'BRING_YOUR_OWN_KEYS') {
12960
- llmExecutionTools = await $provideLlmToolsFromEnv();
13112
+ llmExecutionTools = await $provideLlmToolsFromEnv({
13113
+ title: 'LLM Tools for wizard or CLI with BYOK strategy',
13114
+ });
12961
13115
  }
12962
13116
  else {
12963
13117
  throw new UnexpectedError(`\`$provideLlmToolsForWizardOrCli\` wrong strategy "${strategy}"`);
@@ -16990,7 +17144,7 @@ async function $provideExecutionToolsForNode(options) {
16990
17144
  throw new EnvironmentMismatchError('Function `$getExecutionToolsForNode` works only in Node.js environment');
16991
17145
  }
16992
17146
  const fs = $provideFilesystemForNode();
16993
- const llm = await $provideLlmToolsFromEnv(options);
17147
+ const llm = await $provideLlmToolsFromEnv({ title: 'LLM Tools for Node.js', ...options });
16994
17148
  const executables = await $provideExecutablesForNode();
16995
17149
  const tools = {
16996
17150
  llm,
@@ -17399,11 +17553,12 @@ async function $getCompiledBook(tools, pipelineSource, options) {
17399
17553
  catch (error) {
17400
17554
  // Note: Ignore filesystem errors (like EROFS on read-only systems like Vercel)
17401
17555
  // The compiled book can still be used even if it can't be cached
17402
- if (error instanceof Error && (error.message.includes('EROFS') ||
17403
- error.message.includes('read-only') ||
17404
- error.message.includes('EACCES') ||
17405
- error.message.includes('EPERM') ||
17406
- error.message.includes('ENOENT'))) ;
17556
+ if (error instanceof Error &&
17557
+ (error.message.includes('EROFS') ||
17558
+ error.message.includes('read-only') ||
17559
+ error.message.includes('EACCES') ||
17560
+ error.message.includes('EPERM') ||
17561
+ error.message.includes('ENOENT'))) ;
17407
17562
  else {
17408
17563
  // Re-throw other unexpected errors
17409
17564
  throw error;