@promptbook/openai 0.101.0-8 → 0.101.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (88) hide show
  1. package/README.md +0 -4
  2. package/esm/index.es.js +266 -177
  3. package/esm/index.es.js.map +1 -1
  4. package/esm/typings/src/_packages/components.index.d.ts +14 -0
  5. package/esm/typings/src/_packages/core.index.d.ts +12 -0
  6. package/esm/typings/src/_packages/types.index.d.ts +8 -0
  7. package/esm/typings/src/book-2.0/agent-source/AgentBasicInformation.d.ts +11 -4
  8. package/esm/typings/src/book-2.0/agent-source/AgentModelRequirements.d.ts +3 -0
  9. package/esm/typings/src/book-2.0/agent-source/createAgentModelRequirements.d.ts +4 -22
  10. package/esm/typings/src/book-2.0/agent-source/createAgentModelRequirementsWithCommitments.d.ts +1 -26
  11. package/esm/typings/src/book-2.0/commitments/ACTION/ACTION.d.ts +2 -8
  12. package/esm/typings/src/book-2.0/commitments/DELETE/DELETE.d.ts +0 -24
  13. package/esm/typings/src/book-2.0/commitments/FORMAT/FORMAT.d.ts +2 -8
  14. package/esm/typings/src/book-2.0/commitments/GOAL/GOAL.d.ts +2 -8
  15. package/esm/typings/src/book-2.0/commitments/KNOWLEDGE/KNOWLEDGE.d.ts +0 -6
  16. package/esm/typings/src/book-2.0/commitments/MEMORY/MEMORY.d.ts +2 -8
  17. package/esm/typings/src/book-2.0/commitments/MESSAGE/MESSAGE.d.ts +2 -8
  18. package/esm/typings/src/book-2.0/commitments/META/META.d.ts +0 -6
  19. package/esm/typings/src/book-2.0/commitments/META_IMAGE/META_IMAGE.d.ts +0 -6
  20. package/esm/typings/src/book-2.0/commitments/META_LINK/META_LINK.d.ts +0 -6
  21. package/esm/typings/src/book-2.0/commitments/MODEL/MODEL.d.ts +25 -10
  22. package/esm/typings/src/book-2.0/commitments/NOTE/NOTE.d.ts +2 -8
  23. package/esm/typings/src/book-2.0/commitments/PERSONA/PERSONA.d.ts +2 -8
  24. package/esm/typings/src/book-2.0/commitments/RULE/RULE.d.ts +0 -12
  25. package/esm/typings/src/book-2.0/commitments/SAMPLE/SAMPLE.d.ts +0 -12
  26. package/esm/typings/src/book-2.0/commitments/SCENARIO/SCENARIO.d.ts +2 -8
  27. package/esm/typings/src/book-2.0/commitments/STYLE/STYLE.d.ts +2 -8
  28. package/esm/typings/src/book-2.0/commitments/_base/createEmptyAgentModelRequirements.d.ts +1 -1
  29. package/esm/typings/src/book-2.0/commitments/index.d.ts +1 -1
  30. package/esm/typings/src/book-2.0/utils/generatePlaceholderAgentProfileImageUrl.d.ts +3 -0
  31. package/esm/typings/src/book-components/AvatarProfile/AvatarChip/AvatarChip.d.ts +5 -2
  32. package/esm/typings/src/book-components/AvatarProfile/AvatarProfile/AvatarProfile.d.ts +3 -0
  33. package/esm/typings/src/book-components/AvatarProfile/AvatarProfile/MockedChat.d.ts +18 -1
  34. package/esm/typings/src/book-components/BookEditor/BookEditor.d.ts +8 -0
  35. package/esm/typings/src/book-components/BookEditor/BookEditorInner.d.ts +2 -15
  36. package/esm/typings/src/book-components/Chat/Chat/ChatProps.d.ts +9 -0
  37. package/esm/typings/src/book-components/Chat/LlmChat/LlmChatProps.d.ts +13 -0
  38. package/esm/typings/src/book-components/Chat/hooks/index.d.ts +2 -0
  39. package/esm/typings/src/book-components/Chat/hooks/useChatAutoScroll.d.ts +41 -0
  40. package/esm/typings/src/book-components/Chat/hooks/useSendMessageToLlmChat.d.ts +44 -0
  41. package/esm/typings/src/book-components/Chat/utils/parseMessageButtons.d.ts +22 -0
  42. package/esm/typings/src/book-components/icons/PauseIcon.d.ts +8 -0
  43. package/esm/typings/src/book-components/icons/PlayIcon.d.ts +8 -0
  44. package/esm/typings/src/execution/PromptResult.d.ts +2 -4
  45. package/esm/typings/src/execution/createPipelineExecutor/40-executeAttempts.d.ts +1 -2
  46. package/esm/typings/src/execution/createPipelineExecutor/getKnowledgeForTask.d.ts +1 -3
  47. package/esm/typings/src/formats/csv/CsvFormatError.d.ts +1 -1
  48. package/esm/typings/src/llm-providers/_common/register/$provideLlmToolsConfigurationFromEnv.d.ts +1 -1
  49. package/esm/typings/src/llm-providers/_common/register/$provideLlmToolsForTestingAndScriptsAndPlayground.d.ts +1 -1
  50. package/esm/typings/src/llm-providers/_common/register/$provideLlmToolsForWizardOrCli.d.ts +1 -2
  51. package/esm/typings/src/llm-providers/_common/register/createLlmToolsFromConfiguration.d.ts +8 -2
  52. package/esm/typings/src/llm-providers/_common/utils/removeUnsupportedModelRequirements.d.ts +25 -0
  53. package/esm/typings/src/llm-providers/_multiple/MultipleLlmExecutionTools.d.ts +7 -18
  54. package/esm/typings/src/llm-providers/_multiple/getSingleLlmExecutionTools.d.ts +11 -0
  55. package/esm/typings/src/llm-providers/_multiple/joinLlmExecutionTools.d.ts +2 -1
  56. package/esm/typings/src/llm-providers/agent/AgentLlmExecutionTools.d.ts +58 -0
  57. package/esm/typings/src/llm-providers/agent/createAgentLlmExecutionTools.d.ts +29 -0
  58. package/esm/typings/src/llm-providers/agent/playground/playground.d.ts +8 -0
  59. package/esm/typings/src/llm-providers/agent/register-configuration.d.ts +11 -0
  60. package/esm/typings/src/llm-providers/agent/register-constructor.d.ts +13 -0
  61. package/esm/typings/src/llm-providers/anthropic-claude/AnthropicClaudeExecutionTools.d.ts +3 -8
  62. package/esm/typings/src/llm-providers/azure-openai/AzureOpenAiExecutionTools.d.ts +4 -5
  63. package/esm/typings/src/llm-providers/mocked/$fakeTextToExpectations.d.ts +1 -0
  64. package/esm/typings/src/llm-providers/mocked/MockedEchoLlmExecutionTools.d.ts +4 -10
  65. package/esm/typings/src/llm-providers/mocked/MockedFackedLlmExecutionTools.d.ts +4 -6
  66. package/esm/typings/src/llm-providers/ollama/OllamaExecutionTools.d.ts +3 -3
  67. package/esm/typings/src/llm-providers/openai/OpenAiCompatibleExecutionTools.d.ts +16 -8
  68. package/esm/typings/src/llm-providers/openai/OpenAiExecutionTools.d.ts +3 -8
  69. package/esm/typings/src/llm-providers/remote/RemoteLlmExecutionTools.d.ts +5 -14
  70. package/esm/typings/src/personas/preparePersona.d.ts +1 -0
  71. package/esm/typings/src/remote-server/openapi-types.d.ts +31 -31
  72. package/esm/typings/src/scrapers/markdown/MarkdownScraper.d.ts +1 -2
  73. package/esm/typings/src/types/ModelRequirements.d.ts +2 -4
  74. package/esm/typings/src/utils/color/utils/colorSaturation.d.ts +1 -1
  75. package/esm/typings/src/utils/editable/edit-pipeline-string/addPipelineCommand.d.ts +1 -1
  76. package/esm/typings/src/utils/markdown/humanizeAiText.d.ts +0 -1
  77. package/esm/typings/src/utils/markdown/promptbookifyAiText.d.ts +2 -2
  78. package/esm/typings/src/version.d.ts +1 -1
  79. package/package.json +2 -2
  80. package/umd/index.umd.js +266 -177
  81. package/umd/index.umd.js.map +1 -1
  82. package/esm/typings/src/book-2.0/utils/extractAgentMetadata.d.ts +0 -17
  83. package/esm/typings/src/book-2.0/utils/extractProfileImageFromSystemMessage.d.ts +0 -12
  84. package/esm/typings/src/book-components/Chat/examples/ChatMarkdownDemo.d.ts +0 -16
  85. package/esm/typings/src/expectations/drafts/isDomainNameFree.d.ts +0 -10
  86. package/esm/typings/src/expectations/drafts/isGithubNameFree.d.ts +0 -10
  87. package/esm/typings/src/llm-providers/_common/profiles/llmProviderProfiles.d.ts +0 -81
  88. /package/esm/typings/src/llm-providers/_common/{profiles/test/llmProviderProfiles.test.d.ts → utils/removeUnsupportedModelRequirements.test.d.ts} +0 -0
package/esm/index.es.js CHANGED
@@ -19,7 +19,7 @@ const BOOK_LANGUAGE_VERSION = '1.0.0';
19
19
  * @generated
20
20
  * @see https://github.com/webgptorg/promptbook
21
21
  */
22
- const PROMPTBOOK_ENGINE_VERSION = '0.101.0-8';
22
+ const PROMPTBOOK_ENGINE_VERSION = '0.101.0';
23
23
  /**
24
24
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
25
25
  * Note: [💞] Ignore a discrepancy between file name and entity name
@@ -866,76 +866,6 @@ function templateParameters(template, parameters) {
866
866
  return replacedTemplates;
867
867
  }
868
868
 
869
- /**
870
- * Predefined profiles for LLM providers to maintain consistency across the application
871
- * These profiles represent each provider as a virtual persona in chat interfaces
872
- *
873
- * @private !!!!
874
- */
875
- const LLM_PROVIDER_PROFILES = {
876
- OPENAI: {
877
- name: 'OPENAI',
878
- fullname: 'OpenAI GPT',
879
- color: '#10a37f', // OpenAI's signature green
880
- // Note: avatarSrc could be added when we have provider logos available
881
- },
882
- ANTHROPIC: {
883
- name: 'ANTHROPIC',
884
- fullname: 'Anthropic Claude',
885
- color: '#d97706', // Anthropic's orange/amber color
886
- },
887
- AZURE_OPENAI: {
888
- name: 'AZURE_OPENAI',
889
- fullname: 'Azure OpenAI',
890
- color: '#0078d4', // Microsoft Azure blue
891
- },
892
- GOOGLE: {
893
- name: 'GOOGLE',
894
- fullname: 'Google Gemini',
895
- color: '#4285f4', // Google blue
896
- },
897
- DEEPSEEK: {
898
- name: 'DEEPSEEK',
899
- fullname: 'DeepSeek',
900
- color: '#7c3aed', // Purple color for DeepSeek
901
- },
902
- OLLAMA: {
903
- name: 'OLLAMA',
904
- fullname: 'Ollama',
905
- color: '#059669', // Emerald green for local models
906
- },
907
- REMOTE: {
908
- name: 'REMOTE',
909
- fullname: 'Remote Server',
910
- color: '#6b7280', // Gray for remote/proxy connections
911
- },
912
- MOCKED_ECHO: {
913
- name: 'MOCKED_ECHO',
914
- fullname: 'Echo (Test)',
915
- color: '#8b5cf6', // Purple for test/mock tools
916
- },
917
- MOCKED_FAKE: {
918
- name: 'MOCKED_FAKE',
919
- fullname: 'Fake LLM (Test)',
920
- color: '#ec4899', // Pink for fake/test tools
921
- },
922
- VERCEL: {
923
- name: 'VERCEL',
924
- fullname: 'Vercel AI',
925
- color: '#000000', // Vercel's black
926
- },
927
- MULTIPLE: {
928
- name: 'MULTIPLE',
929
- fullname: 'Multiple Providers',
930
- color: '#6366f1', // Indigo for combined/multiple providers
931
- },
932
- };
933
- /**
934
- * TODO: Refactor this - each profile must be alongside the provider definition
935
- * TODO: [🕛] Unite `AgentBasicInformation`, `ChatParticipant`, `LlmExecutionTools` + `LlmToolsMetadata`
936
- * Note: [💞] Ignore a discrepancy between file name and entity name
937
- */
938
-
939
869
  /**
940
870
  * Counts number of characters in the text
941
871
  *
@@ -1396,7 +1326,7 @@ const OPENAI_MODELS = exportJson({
1396
1326
  modelVariant: 'CHAT',
1397
1327
  modelTitle: 'gpt-5-mini',
1398
1328
  modelName: 'gpt-5-mini',
1399
- modelDescription: "A faster, cost-efficient version of GPT-5 for well-defined tasks with 200K context window. Maintains core GPT-5 capabilities while offering 5x faster inference and significantly lower costs. Features enhanced instruction following and reduced latency for production applications requiring quick responses with high quality.",
1329
+ modelDescription: 'A faster, cost-efficient version of GPT-5 for well-defined tasks with 200K context window. Maintains core GPT-5 capabilities while offering 5x faster inference and significantly lower costs. Features enhanced instruction following and reduced latency for production applications requiring quick responses with high quality.',
1400
1330
  pricing: {
1401
1331
  prompt: pricing(`$0.25 / 1M tokens`),
1402
1332
  output: pricing(`$2.00 / 1M tokens`),
@@ -1408,7 +1338,7 @@ const OPENAI_MODELS = exportJson({
1408
1338
  modelVariant: 'CHAT',
1409
1339
  modelTitle: 'gpt-5-nano',
1410
1340
  modelName: 'gpt-5-nano',
1411
- modelDescription: "The fastest, most cost-efficient version of GPT-5 with 200K context window. Optimized for summarization, classification, and simple reasoning tasks. Features 10x faster inference than base GPT-5 while maintaining good quality for straightforward applications. Ideal for high-volume, cost-sensitive deployments.",
1341
+ modelDescription: 'The fastest, most cost-efficient version of GPT-5 with 200K context window. Optimized for summarization, classification, and simple reasoning tasks. Features 10x faster inference than base GPT-5 while maintaining good quality for straightforward applications. Ideal for high-volume, cost-sensitive deployments.',
1412
1342
  pricing: {
1413
1343
  prompt: pricing(`$0.05 / 1M tokens`),
1414
1344
  output: pricing(`$0.40 / 1M tokens`),
@@ -1420,7 +1350,7 @@ const OPENAI_MODELS = exportJson({
1420
1350
  modelVariant: 'CHAT',
1421
1351
  modelTitle: 'gpt-4.1',
1422
1352
  modelName: 'gpt-4.1',
1423
- modelDescription: "Smartest non-reasoning model with 128K context window. Enhanced version of GPT-4 with improved instruction following, better factual accuracy, and reduced hallucinations. Features advanced function calling capabilities and superior performance on coding tasks. Ideal for applications requiring high intelligence without reasoning overhead.",
1353
+ modelDescription: 'Smartest non-reasoning model with 128K context window. Enhanced version of GPT-4 with improved instruction following, better factual accuracy, and reduced hallucinations. Features advanced function calling capabilities and superior performance on coding tasks. Ideal for applications requiring high intelligence without reasoning overhead.',
1424
1354
  pricing: {
1425
1355
  prompt: pricing(`$3.00 / 1M tokens`),
1426
1356
  output: pricing(`$12.00 / 1M tokens`),
@@ -1432,7 +1362,7 @@ const OPENAI_MODELS = exportJson({
1432
1362
  modelVariant: 'CHAT',
1433
1363
  modelTitle: 'gpt-4.1-mini',
1434
1364
  modelName: 'gpt-4.1-mini',
1435
- modelDescription: "Smaller, faster version of GPT-4.1 with 128K context window. Balances intelligence and efficiency with 3x faster inference than base GPT-4.1. Maintains strong capabilities across text generation, reasoning, and coding while offering better cost-performance ratio for most applications.",
1365
+ modelDescription: 'Smaller, faster version of GPT-4.1 with 128K context window. Balances intelligence and efficiency with 3x faster inference than base GPT-4.1. Maintains strong capabilities across text generation, reasoning, and coding while offering better cost-performance ratio for most applications.',
1436
1366
  pricing: {
1437
1367
  prompt: pricing(`$0.80 / 1M tokens`),
1438
1368
  output: pricing(`$3.20 / 1M tokens`),
@@ -1444,7 +1374,7 @@ const OPENAI_MODELS = exportJson({
1444
1374
  modelVariant: 'CHAT',
1445
1375
  modelTitle: 'gpt-4.1-nano',
1446
1376
  modelName: 'gpt-4.1-nano',
1447
- modelDescription: "Fastest, most cost-efficient version of GPT-4.1 with 128K context window. Optimized for high-throughput applications requiring good quality at minimal cost. Features 5x faster inference than GPT-4.1 while maintaining adequate performance for most general-purpose tasks.",
1377
+ modelDescription: 'Fastest, most cost-efficient version of GPT-4.1 with 128K context window. Optimized for high-throughput applications requiring good quality at minimal cost. Features 5x faster inference than GPT-4.1 while maintaining adequate performance for most general-purpose tasks.',
1448
1378
  pricing: {
1449
1379
  prompt: pricing(`$0.20 / 1M tokens`),
1450
1380
  output: pricing(`$0.80 / 1M tokens`),
@@ -1456,7 +1386,7 @@ const OPENAI_MODELS = exportJson({
1456
1386
  modelVariant: 'CHAT',
1457
1387
  modelTitle: 'o3',
1458
1388
  modelName: 'o3',
1459
- modelDescription: "Advanced reasoning model with 128K context window specializing in complex logical, mathematical, and analytical tasks. Successor to o1 with enhanced step-by-step problem-solving capabilities and superior performance on STEM-focused problems. Ideal for professional applications requiring deep analytical thinking and precise reasoning.",
1389
+ modelDescription: 'Advanced reasoning model with 128K context window specializing in complex logical, mathematical, and analytical tasks. Successor to o1 with enhanced step-by-step problem-solving capabilities and superior performance on STEM-focused problems. Ideal for professional applications requiring deep analytical thinking and precise reasoning.',
1460
1390
  pricing: {
1461
1391
  prompt: pricing(`$15.00 / 1M tokens`),
1462
1392
  output: pricing(`$60.00 / 1M tokens`),
@@ -1468,7 +1398,7 @@ const OPENAI_MODELS = exportJson({
1468
1398
  modelVariant: 'CHAT',
1469
1399
  modelTitle: 'o3-pro',
1470
1400
  modelName: 'o3-pro',
1471
- modelDescription: "Enhanced version of o3 with more compute allocated for better responses on the most challenging problems. Features extended reasoning time and improved accuracy on complex analytical tasks. Designed for applications where maximum reasoning quality is more important than response speed.",
1401
+ modelDescription: 'Enhanced version of o3 with more compute allocated for better responses on the most challenging problems. Features extended reasoning time and improved accuracy on complex analytical tasks. Designed for applications where maximum reasoning quality is more important than response speed.',
1472
1402
  pricing: {
1473
1403
  prompt: pricing(`$30.00 / 1M tokens`),
1474
1404
  output: pricing(`$120.00 / 1M tokens`),
@@ -1480,7 +1410,7 @@ const OPENAI_MODELS = exportJson({
1480
1410
  modelVariant: 'CHAT',
1481
1411
  modelTitle: 'o4-mini',
1482
1412
  modelName: 'o4-mini',
1483
- modelDescription: "Fast, cost-efficient reasoning model with 128K context window. Successor to o1-mini with improved analytical capabilities while maintaining speed advantages. Features enhanced mathematical reasoning and logical problem-solving at significantly lower cost than full reasoning models.",
1413
+ modelDescription: 'Fast, cost-efficient reasoning model with 128K context window. Successor to o1-mini with improved analytical capabilities while maintaining speed advantages. Features enhanced mathematical reasoning and logical problem-solving at significantly lower cost than full reasoning models.',
1484
1414
  pricing: {
1485
1415
  prompt: pricing(`$4.00 / 1M tokens`),
1486
1416
  output: pricing(`$16.00 / 1M tokens`),
@@ -1492,7 +1422,7 @@ const OPENAI_MODELS = exportJson({
1492
1422
  modelVariant: 'CHAT',
1493
1423
  modelTitle: 'o3-deep-research',
1494
1424
  modelName: 'o3-deep-research',
1495
- modelDescription: "Most powerful deep research model with 128K context window. Specialized for comprehensive research tasks, literature analysis, and complex information synthesis. Features advanced citation capabilities and enhanced factual accuracy for academic and professional research applications.",
1425
+ modelDescription: 'Most powerful deep research model with 128K context window. Specialized for comprehensive research tasks, literature analysis, and complex information synthesis. Features advanced citation capabilities and enhanced factual accuracy for academic and professional research applications.',
1496
1426
  pricing: {
1497
1427
  prompt: pricing(`$25.00 / 1M tokens`),
1498
1428
  output: pricing(`$100.00 / 1M tokens`),
@@ -1504,7 +1434,7 @@ const OPENAI_MODELS = exportJson({
1504
1434
  modelVariant: 'CHAT',
1505
1435
  modelTitle: 'o4-mini-deep-research',
1506
1436
  modelName: 'o4-mini-deep-research',
1507
- modelDescription: "Faster, more affordable deep research model with 128K context window. Balances research capabilities with cost efficiency, offering good performance on literature review, fact-checking, and information synthesis tasks at a more accessible price point.",
1437
+ modelDescription: 'Faster, more affordable deep research model with 128K context window. Balances research capabilities with cost efficiency, offering good performance on literature review, fact-checking, and information synthesis tasks at a more accessible price point.',
1508
1438
  pricing: {
1509
1439
  prompt: pricing(`$12.00 / 1M tokens`),
1510
1440
  output: pricing(`$48.00 / 1M tokens`),
@@ -2012,6 +1942,62 @@ resultContent, rawResponse) {
2012
1942
  * TODO: [ðŸĪ] DRY Maybe some common abstraction between `computeOpenAiUsage` and `computeAnthropicClaudeUsage`
2013
1943
  */
2014
1944
 
1945
+ /**
1946
+ * Parses an OpenAI error message to identify which parameter is unsupported
1947
+ *
1948
+ * @param errorMessage The error message from OpenAI API
1949
+ * @returns The parameter name that is unsupported, or null if not an unsupported parameter error
1950
+ * @private utility of LLM Tools
1951
+ */
1952
+ function parseUnsupportedParameterError(errorMessage) {
1953
+ // Pattern to match "Unsupported value: 'parameter' does not support ..."
1954
+ const unsupportedValueMatch = errorMessage.match(/Unsupported value:\s*'([^']+)'\s*does not support/i);
1955
+ if (unsupportedValueMatch === null || unsupportedValueMatch === void 0 ? void 0 : unsupportedValueMatch[1]) {
1956
+ return unsupportedValueMatch[1];
1957
+ }
1958
+ // Pattern to match "'parameter' of type ... is not supported with this model"
1959
+ const parameterTypeMatch = errorMessage.match(/'([^']+)'\s*of type.*is not supported with this model/i);
1960
+ if (parameterTypeMatch === null || parameterTypeMatch === void 0 ? void 0 : parameterTypeMatch[1]) {
1961
+ return parameterTypeMatch[1];
1962
+ }
1963
+ return null;
1964
+ }
1965
+ /**
1966
+ * Creates a copy of model requirements with the specified parameter removed
1967
+ *
1968
+ * @param modelRequirements Original model requirements
1969
+ * @param unsupportedParameter The parameter to remove
1970
+ * @returns New model requirements without the unsupported parameter
1971
+ * @private utility of LLM Tools
1972
+ */
1973
+ function removeUnsupportedModelRequirement(modelRequirements, unsupportedParameter) {
1974
+ const newRequirements = { ...modelRequirements };
1975
+ // Map of parameter names that might appear in error messages to ModelRequirements properties
1976
+ const parameterMap = {
1977
+ temperature: 'temperature',
1978
+ max_tokens: 'maxTokens',
1979
+ maxTokens: 'maxTokens',
1980
+ seed: 'seed',
1981
+ };
1982
+ const propertyToRemove = parameterMap[unsupportedParameter];
1983
+ if (propertyToRemove && propertyToRemove in newRequirements) {
1984
+ delete newRequirements[propertyToRemove];
1985
+ }
1986
+ return newRequirements;
1987
+ }
1988
+ /**
1989
+ * Checks if an error is an "Unsupported value" error from OpenAI
1990
+ * @param error The error to check
1991
+ * @returns true if this is an unsupported parameter error
1992
+ * @private utility of LLM Tools
1993
+ */
1994
+ function isUnsupportedParameterError(error) {
1995
+ const errorMessage = error.message.toLowerCase();
1996
+ return (errorMessage.includes('unsupported value:') ||
1997
+ errorMessage.includes('is not supported with this model') ||
1998
+ errorMessage.includes('does not support'));
1999
+ }
2000
+
2015
2001
  /**
2016
2002
  * Execution Tools for calling OpenAI API or other OpenAI compatible provider
2017
2003
  *
@@ -2029,6 +2015,10 @@ class OpenAiCompatibleExecutionTools {
2029
2015
  * OpenAI API client.
2030
2016
  */
2031
2017
  this.client = null;
2018
+ /**
2019
+ * Tracks models and parameters that have already been retried to prevent infinite loops
2020
+ */
2021
+ this.retriedUnsupportedParameters = new Set();
2032
2022
  // TODO: Allow configuring rate limits via options
2033
2023
  this.limiter = new Bottleneck({
2034
2024
  minTime: 60000 / (this.options.maxRequestsPerMinute || DEFAULT_MAX_REQUESTS_PER_MINUTE),
@@ -2090,21 +2080,27 @@ class OpenAiCompatibleExecutionTools {
2090
2080
  * Calls OpenAI compatible API to use a chat model.
2091
2081
  */
2092
2082
  async callChatModel(prompt) {
2083
+ return this.callChatModelWithRetry(prompt, prompt.modelRequirements);
2084
+ }
2085
+ /**
2086
+ * Internal method that handles parameter retry for chat model calls
2087
+ */
2088
+ async callChatModelWithRetry(prompt, currentModelRequirements) {
2093
2089
  var _a;
2094
2090
  if (this.options.isVerbose) {
2095
- console.info(`💎 ${this.title} callChatModel call`, { prompt });
2091
+ console.info(`💎 ${this.title} callChatModel call`, { prompt, currentModelRequirements });
2096
2092
  }
2097
- const { content, parameters, modelRequirements, format } = prompt;
2093
+ const { content, parameters, format } = prompt;
2098
2094
  const client = await this.getClient();
2099
2095
  // TODO: [☂] Use here more modelRequirements
2100
- if (modelRequirements.modelVariant !== 'CHAT') {
2096
+ if (currentModelRequirements.modelVariant !== 'CHAT') {
2101
2097
  throw new PipelineExecutionError('Use callChatModel only for CHAT variant');
2102
2098
  }
2103
- const modelName = modelRequirements.modelName || this.getDefaultChatModel().modelName;
2099
+ const modelName = currentModelRequirements.modelName || this.getDefaultChatModel().modelName;
2104
2100
  const modelSettings = {
2105
2101
  model: modelName,
2106
- max_tokens: modelRequirements.maxTokens,
2107
- temperature: modelRequirements.temperature,
2102
+ max_tokens: currentModelRequirements.maxTokens,
2103
+ temperature: currentModelRequirements.temperature,
2108
2104
  // <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
2109
2105
  // <- Note: [🧆]
2110
2106
  }; // <- TODO: [ðŸ’Đ] Guard here types better
@@ -2119,12 +2115,12 @@ class OpenAiCompatibleExecutionTools {
2119
2115
  const rawRequest = {
2120
2116
  ...modelSettings,
2121
2117
  messages: [
2122
- ...(modelRequirements.systemMessage === undefined
2118
+ ...(currentModelRequirements.systemMessage === undefined
2123
2119
  ? []
2124
2120
  : [
2125
2121
  {
2126
2122
  role: 'system',
2127
- content: modelRequirements.systemMessage,
2123
+ content: currentModelRequirements.systemMessage,
2128
2124
  },
2129
2125
  ]),
2130
2126
  {
@@ -2138,69 +2134,110 @@ class OpenAiCompatibleExecutionTools {
2138
2134
  if (this.options.isVerbose) {
2139
2135
  console.info(colors.bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
2140
2136
  }
2141
- const rawResponse = await this.limiter
2142
- .schedule(() => this.makeRequestWithRetry(() => client.chat.completions.create(rawRequest)))
2143
- .catch((error) => {
2144
- assertsError(error);
2137
+ try {
2138
+ const rawResponse = await this.limiter
2139
+ .schedule(() => this.makeRequestWithNetworkRetry(() => client.chat.completions.create(rawRequest)))
2140
+ .catch((error) => {
2141
+ assertsError(error);
2142
+ if (this.options.isVerbose) {
2143
+ console.info(colors.bgRed('error'), error);
2144
+ }
2145
+ throw error;
2146
+ });
2145
2147
  if (this.options.isVerbose) {
2146
- console.info(colors.bgRed('error'), error);
2148
+ console.info(colors.bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
2147
2149
  }
2148
- throw error;
2149
- });
2150
- if (this.options.isVerbose) {
2151
- console.info(colors.bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
2152
- }
2153
- const complete = $getCurrentDate();
2154
- if (!rawResponse.choices[0]) {
2155
- throw new PipelineExecutionError(`No choises from ${this.title}`);
2156
- }
2157
- if (rawResponse.choices.length > 1) {
2158
- // TODO: This should be maybe only warning
2159
- throw new PipelineExecutionError(`More than one choise from ${this.title}`);
2150
+ const complete = $getCurrentDate();
2151
+ if (!rawResponse.choices[0]) {
2152
+ throw new PipelineExecutionError(`No choises from ${this.title}`);
2153
+ }
2154
+ if (rawResponse.choices.length > 1) {
2155
+ // TODO: This should be maybe only warning
2156
+ throw new PipelineExecutionError(`More than one choise from ${this.title}`);
2157
+ }
2158
+ const resultContent = rawResponse.choices[0].message.content;
2159
+ const usage = this.computeUsage(content || '', resultContent || '', rawResponse);
2160
+ if (resultContent === null) {
2161
+ throw new PipelineExecutionError(`No response message from ${this.title}`);
2162
+ }
2163
+ return exportJson({
2164
+ name: 'promptResult',
2165
+ message: `Result of \`OpenAiCompatibleExecutionTools.callChatModel\``,
2166
+ order: [],
2167
+ value: {
2168
+ content: resultContent,
2169
+ modelName: rawResponse.model || modelName,
2170
+ timing: {
2171
+ start,
2172
+ complete,
2173
+ },
2174
+ usage,
2175
+ rawPromptContent,
2176
+ rawRequest,
2177
+ rawResponse,
2178
+ // <- [ðŸ—Ŋ]
2179
+ },
2180
+ });
2160
2181
  }
2161
- const resultContent = rawResponse.choices[0].message.content;
2162
- const usage = this.computeUsage(content || '', resultContent || '', rawResponse);
2163
- if (resultContent === null) {
2164
- throw new PipelineExecutionError(`No response message from ${this.title}`);
2182
+ catch (error) {
2183
+ assertsError(error);
2184
+ // Check if this is an unsupported parameter error
2185
+ if (!isUnsupportedParameterError(error)) {
2186
+ throw error;
2187
+ }
2188
+ // Parse which parameter is unsupported
2189
+ const unsupportedParameter = parseUnsupportedParameterError(error.message);
2190
+ if (!unsupportedParameter) {
2191
+ if (this.options.isVerbose) {
2192
+ console.warn(colors.bgYellow('Warning'), 'Could not parse unsupported parameter from error:', error.message);
2193
+ }
2194
+ throw error;
2195
+ }
2196
+ // Create a unique key for this model + parameter combination to prevent infinite loops
2197
+ const retryKey = `${modelName}-${unsupportedParameter}`;
2198
+ if (this.retriedUnsupportedParameters.has(retryKey)) {
2199
+ // Already retried this parameter, throw the error
2200
+ if (this.options.isVerbose) {
2201
+ console.warn(colors.bgRed('Error'), `Parameter '${unsupportedParameter}' for model '${modelName}' already retried once, throwing error:`, error.message);
2202
+ }
2203
+ throw error;
2204
+ }
2205
+ // Mark this parameter as retried
2206
+ this.retriedUnsupportedParameters.add(retryKey);
2207
+ // Log warning in verbose mode
2208
+ if (this.options.isVerbose) {
2209
+ console.warn(colors.bgYellow('Warning'), `Removing unsupported parameter '${unsupportedParameter}' for model '${modelName}' and retrying request`);
2210
+ }
2211
+ // Remove the unsupported parameter and retry
2212
+ const modifiedModelRequirements = removeUnsupportedModelRequirement(currentModelRequirements, unsupportedParameter);
2213
+ return this.callChatModelWithRetry(prompt, modifiedModelRequirements);
2165
2214
  }
2166
- return exportJson({
2167
- name: 'promptResult',
2168
- message: `Result of \`OpenAiCompatibleExecutionTools.callChatModel\``,
2169
- order: [],
2170
- value: {
2171
- content: resultContent,
2172
- modelName: rawResponse.model || modelName,
2173
- timing: {
2174
- start,
2175
- complete,
2176
- },
2177
- usage,
2178
- rawPromptContent,
2179
- rawRequest,
2180
- rawResponse,
2181
- // <- [ðŸ—Ŋ]
2182
- },
2183
- });
2184
2215
  }
2185
2216
  /**
2186
2217
  * Calls OpenAI API to use a complete model.
2187
2218
  */
2188
2219
  async callCompletionModel(prompt) {
2220
+ return this.callCompletionModelWithRetry(prompt, prompt.modelRequirements);
2221
+ }
2222
+ /**
2223
+ * Internal method that handles parameter retry for completion model calls
2224
+ */
2225
+ async callCompletionModelWithRetry(prompt, currentModelRequirements) {
2189
2226
  var _a;
2190
2227
  if (this.options.isVerbose) {
2191
- console.info(`🖋 ${this.title} callCompletionModel call`, { prompt });
2228
+ console.info(`🖋 ${this.title} callCompletionModel call`, { prompt, currentModelRequirements });
2192
2229
  }
2193
- const { content, parameters, modelRequirements } = prompt;
2230
+ const { content, parameters } = prompt;
2194
2231
  const client = await this.getClient();
2195
2232
  // TODO: [☂] Use here more modelRequirements
2196
- if (modelRequirements.modelVariant !== 'COMPLETION') {
2233
+ if (currentModelRequirements.modelVariant !== 'COMPLETION') {
2197
2234
  throw new PipelineExecutionError('Use callCompletionModel only for COMPLETION variant');
2198
2235
  }
2199
- const modelName = modelRequirements.modelName || this.getDefaultCompletionModel().modelName;
2236
+ const modelName = currentModelRequirements.modelName || this.getDefaultCompletionModel().modelName;
2200
2237
  const modelSettings = {
2201
2238
  model: modelName,
2202
- max_tokens: modelRequirements.maxTokens,
2203
- temperature: modelRequirements.temperature,
2239
+ max_tokens: currentModelRequirements.maxTokens,
2240
+ temperature: currentModelRequirements.temperature,
2204
2241
  // <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
2205
2242
  // <- Note: [🧆]
2206
2243
  };
@@ -2214,46 +2251,81 @@ class OpenAiCompatibleExecutionTools {
2214
2251
  if (this.options.isVerbose) {
2215
2252
  console.info(colors.bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
2216
2253
  }
2217
- const rawResponse = await this.limiter
2218
- .schedule(() => this.makeRequestWithRetry(() => client.completions.create(rawRequest)))
2219
- .catch((error) => {
2220
- assertsError(error);
2254
+ try {
2255
+ const rawResponse = await this.limiter
2256
+ .schedule(() => this.makeRequestWithNetworkRetry(() => client.completions.create(rawRequest)))
2257
+ .catch((error) => {
2258
+ assertsError(error);
2259
+ if (this.options.isVerbose) {
2260
+ console.info(colors.bgRed('error'), error);
2261
+ }
2262
+ throw error;
2263
+ });
2221
2264
  if (this.options.isVerbose) {
2222
- console.info(colors.bgRed('error'), error);
2265
+ console.info(colors.bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
2223
2266
  }
2224
- throw error;
2225
- });
2226
- if (this.options.isVerbose) {
2227
- console.info(colors.bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
2228
- }
2229
- const complete = $getCurrentDate();
2230
- if (!rawResponse.choices[0]) {
2231
- throw new PipelineExecutionError(`No choises from ${this.title}`);
2267
+ const complete = $getCurrentDate();
2268
+ if (!rawResponse.choices[0]) {
2269
+ throw new PipelineExecutionError(`No choises from ${this.title}`);
2270
+ }
2271
+ if (rawResponse.choices.length > 1) {
2272
+ // TODO: This should be maybe only warning
2273
+ throw new PipelineExecutionError(`More than one choise from ${this.title}`);
2274
+ }
2275
+ const resultContent = rawResponse.choices[0].text;
2276
+ const usage = this.computeUsage(content || '', resultContent || '', rawResponse);
2277
+ return exportJson({
2278
+ name: 'promptResult',
2279
+ message: `Result of \`OpenAiCompatibleExecutionTools.callCompletionModel\``,
2280
+ order: [],
2281
+ value: {
2282
+ content: resultContent,
2283
+ modelName: rawResponse.model || modelName,
2284
+ timing: {
2285
+ start,
2286
+ complete,
2287
+ },
2288
+ usage,
2289
+ rawPromptContent,
2290
+ rawRequest,
2291
+ rawResponse,
2292
+ // <- [ðŸ—Ŋ]
2293
+ },
2294
+ });
2232
2295
  }
2233
- if (rawResponse.choices.length > 1) {
2234
- // TODO: This should be maybe only warning
2235
- throw new PipelineExecutionError(`More than one choise from ${this.title}`);
2296
+ catch (error) {
2297
+ assertsError(error);
2298
+ // Check if this is an unsupported parameter error
2299
+ if (!isUnsupportedParameterError(error)) {
2300
+ throw error;
2301
+ }
2302
+ // Parse which parameter is unsupported
2303
+ const unsupportedParameter = parseUnsupportedParameterError(error.message);
2304
+ if (!unsupportedParameter) {
2305
+ if (this.options.isVerbose) {
2306
+ console.warn(colors.bgYellow('Warning'), 'Could not parse unsupported parameter from error:', error.message);
2307
+ }
2308
+ throw error;
2309
+ }
2310
+ // Create a unique key for this model + parameter combination to prevent infinite loops
2311
+ const retryKey = `${modelName}-${unsupportedParameter}`;
2312
+ if (this.retriedUnsupportedParameters.has(retryKey)) {
2313
+ // Already retried this parameter, throw the error
2314
+ if (this.options.isVerbose) {
2315
+ console.warn(colors.bgRed('Error'), `Parameter '${unsupportedParameter}' for model '${modelName}' already retried once, throwing error:`, error.message);
2316
+ }
2317
+ throw error;
2318
+ }
2319
+ // Mark this parameter as retried
2320
+ this.retriedUnsupportedParameters.add(retryKey);
2321
+ // Log warning in verbose mode
2322
+ if (this.options.isVerbose) {
2323
+ console.warn(colors.bgYellow('Warning'), `Removing unsupported parameter '${unsupportedParameter}' for model '${modelName}' and retrying request`);
2324
+ }
2325
+ // Remove the unsupported parameter and retry
2326
+ const modifiedModelRequirements = removeUnsupportedModelRequirement(currentModelRequirements, unsupportedParameter);
2327
+ return this.callCompletionModelWithRetry(prompt, modifiedModelRequirements);
2236
2328
  }
2237
- const resultContent = rawResponse.choices[0].text;
2238
- const usage = this.computeUsage(content || '', resultContent || '', rawResponse);
2239
- return exportJson({
2240
- name: 'promptResult',
2241
- message: `Result of \`OpenAiCompatibleExecutionTools.callCompletionModel\``,
2242
- order: [],
2243
- value: {
2244
- content: resultContent,
2245
- modelName: rawResponse.model || modelName,
2246
- timing: {
2247
- start,
2248
- complete,
2249
- },
2250
- usage,
2251
- rawPromptContent,
2252
- rawRequest,
2253
- rawResponse,
2254
- // <- [ðŸ—Ŋ]
2255
- },
2256
- });
2257
2329
  }
2258
2330
  /**
2259
2331
  * Calls OpenAI compatible API to use a embedding model
@@ -2279,7 +2351,7 @@ class OpenAiCompatibleExecutionTools {
2279
2351
  console.info(colors.bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
2280
2352
  }
2281
2353
  const rawResponse = await this.limiter
2282
- .schedule(() => this.makeRequestWithRetry(() => client.embeddings.create(rawRequest)))
2354
+ .schedule(() => this.makeRequestWithNetworkRetry(() => client.embeddings.create(rawRequest)))
2283
2355
  .catch((error) => {
2284
2356
  assertsError(error);
2285
2357
  if (this.options.isVerbose) {
@@ -2341,7 +2413,7 @@ class OpenAiCompatibleExecutionTools {
2341
2413
  /**
2342
2414
  * Makes a request with retry logic for network errors like ECONNRESET
2343
2415
  */
2344
- async makeRequestWithRetry(requestFn) {
2416
+ async makeRequestWithNetworkRetry(requestFn) {
2345
2417
  let lastError;
2346
2418
  for (let attempt = 1; attempt <= CONNECTION_RETRIES_LIMIT; attempt++) {
2347
2419
  try {
@@ -2353,8 +2425,8 @@ class OpenAiCompatibleExecutionTools {
2353
2425
  // Check if this is a retryable network error
2354
2426
  const isRetryableError = this.isRetryableNetworkError(error);
2355
2427
  if (!isRetryableError || attempt === CONNECTION_RETRIES_LIMIT) {
2356
- if (this.options.isVerbose) {
2357
- console.info(colors.bgRed('Final error after retries'), `Attempt ${attempt}/${CONNECTION_RETRIES_LIMIT}:`, error);
2428
+ if (this.options.isVerbose && this.isRetryableNetworkError(error)) {
2429
+ console.info(colors.bgRed('Final network error after retries'), `Attempt ${attempt}/${CONNECTION_RETRIES_LIMIT}:`, error);
2358
2430
  }
2359
2431
  throw error;
2360
2432
  }
@@ -2364,7 +2436,7 @@ class OpenAiCompatibleExecutionTools {
2364
2436
  const jitterDelay = Math.random() * 500; // Add some randomness
2365
2437
  const totalDelay = backoffDelay + jitterDelay;
2366
2438
  if (this.options.isVerbose) {
2367
- console.info(colors.bgYellow('Retrying request'), `Attempt ${attempt}/${CONNECTION_RETRIES_LIMIT}, waiting ${Math.round(totalDelay)}ms:`, error.message);
2439
+ console.info(colors.bgYellow('Retrying network request'), `Attempt ${attempt}/${CONNECTION_RETRIES_LIMIT}, waiting ${Math.round(totalDelay)}ms:`, error.message);
2368
2440
  }
2369
2441
  // Wait before retrying
2370
2442
  await new Promise((resolve) => setTimeout(resolve, totalDelay));
@@ -2413,8 +2485,17 @@ class OpenAiCompatibleExecutionTools {
2413
2485
  * TODO: [🛄] Maybe make custom `OpenAiCompatibleError`
2414
2486
  * TODO: [🧠][🈁] Maybe use `isDeterministic` from options
2415
2487
  * TODO: [🧠][🌰] Allow to pass `title` for tracking purposes
2488
+ * TODO: [🧠][ðŸĶĒ] Make reverse adapter from LlmExecutionTools to OpenAI-compatible:
2416
2489
  */
2417
2490
 
2491
+ /**
2492
+ * Profile for OpenAI provider
2493
+ */
2494
+ const OPENAI_PROVIDER_PROFILE = {
2495
+ name: 'OPENAI',
2496
+ fullname: 'OpenAI GPT',
2497
+ color: '#10a37f',
2498
+ };
2418
2499
  /**
2419
2500
  * Execution Tools for calling OpenAI API
2420
2501
  *
@@ -2437,7 +2518,7 @@ class OpenAiExecutionTools extends OpenAiCompatibleExecutionTools {
2437
2518
  return 'Use all models provided by OpenAI';
2438
2519
  }
2439
2520
  get profile() {
2440
- return LLM_PROVIDER_PROFILES.OPENAI;
2521
+ return OPENAI_PROVIDER_PROFILE;
2441
2522
  }
2442
2523
  /*
2443
2524
  Note: Commenting this out to avoid circular dependency
@@ -3015,6 +3096,14 @@ async function createRemoteClient(options) {
3015
3096
  });
3016
3097
  }
3017
3098
 
3099
+ /**
3100
+ * Profile for Remote provider
3101
+ */
3102
+ const REMOTE_PROVIDER_PROFILE = {
3103
+ name: 'REMOTE',
3104
+ fullname: 'Remote Server',
3105
+ color: '#6b7280',
3106
+ };
3018
3107
  /**
3019
3108
  * Remote server is a proxy server that uses its execution tools internally and exposes the executor interface externally.
3020
3109
  *
@@ -3037,7 +3126,7 @@ class RemoteLlmExecutionTools {
3037
3126
  return `Models from Promptbook remote server ${this.options.remoteServerUrl}`;
3038
3127
  }
3039
3128
  get profile() {
3040
- return LLM_PROVIDER_PROFILES.REMOTE;
3129
+ return REMOTE_PROVIDER_PROFILE;
3041
3130
  }
3042
3131
  /**
3043
3132
  * Check the configuration of all execution tools