@promptbook/openai 0.100.0-6 → 0.100.0-64

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (129) hide show
  1. package/README.md +7 -10
  2. package/esm/index.es.js +304 -12
  3. package/esm/index.es.js.map +1 -1
  4. package/esm/typings/src/_packages/color.index.d.ts +50 -0
  5. package/esm/typings/src/_packages/components.index.d.ts +36 -0
  6. package/esm/typings/src/_packages/core.index.d.ts +30 -0
  7. package/esm/typings/src/_packages/types.index.d.ts +38 -0
  8. package/esm/typings/src/book-2.0/agent-source/parseAgentSource.d.ts +30 -0
  9. package/esm/typings/src/book-2.0/agent-source/parseAgentSource.test.d.ts +1 -0
  10. package/esm/typings/src/book-2.0/agent-source/string_book.d.ts +26 -0
  11. package/esm/typings/src/book-2.0/commitments/ACTION/ACTION.d.ts +38 -0
  12. package/esm/typings/src/book-2.0/commitments/FORMAT/FORMAT.d.ts +39 -0
  13. package/esm/typings/src/book-2.0/commitments/KNOWLEDGE/KNOWLEDGE.d.ts +45 -0
  14. package/esm/typings/src/book-2.0/commitments/META_IMAGE/META_IMAGE.d.ts +44 -0
  15. package/esm/typings/src/book-2.0/commitments/META_LINK/META_LINK.d.ts +56 -0
  16. package/esm/typings/src/book-2.0/commitments/MODEL/MODEL.d.ts +39 -0
  17. package/esm/typings/src/book-2.0/commitments/NOTE/NOTE.d.ts +49 -0
  18. package/esm/typings/src/book-2.0/commitments/PERSONA/PERSONA.d.ts +46 -0
  19. package/esm/typings/src/book-2.0/commitments/RULE/RULE.d.ts +44 -0
  20. package/esm/typings/src/book-2.0/commitments/SAMPLE/SAMPLE.d.ts +44 -0
  21. package/esm/typings/src/book-2.0/commitments/STYLE/STYLE.d.ts +38 -0
  22. package/esm/typings/src/book-2.0/commitments/_base/BaseCommitmentDefinition.d.ts +52 -0
  23. package/esm/typings/src/book-2.0/commitments/_base/BookCommitment.d.ts +5 -0
  24. package/esm/typings/src/book-2.0/commitments/_base/CommitmentDefinition.d.ts +48 -0
  25. package/esm/typings/src/book-2.0/commitments/_base/NotYetImplementedCommitmentDefinition.d.ts +22 -0
  26. package/esm/typings/src/book-2.0/commitments/_base/createEmptyAgentModelRequirements.d.ts +19 -0
  27. package/esm/typings/src/book-2.0/commitments/_misc/AgentModelRequirements.d.ts +37 -0
  28. package/esm/typings/src/book-2.0/commitments/_misc/AgentSourceParseResult.d.ts +18 -0
  29. package/esm/typings/src/book-2.0/commitments/_misc/ParsedCommitment.d.ts +22 -0
  30. package/esm/typings/src/book-2.0/commitments/_misc/createAgentModelRequirements.d.ts +62 -0
  31. package/esm/typings/src/book-2.0/commitments/_misc/createAgentModelRequirementsWithCommitments.d.ts +36 -0
  32. package/esm/typings/src/book-2.0/commitments/_misc/createCommitmentRegex.d.ts +20 -0
  33. package/esm/typings/src/book-2.0/commitments/_misc/parseAgentSourceWithCommitments.d.ts +24 -0
  34. package/esm/typings/src/book-2.0/commitments/_misc/removeCommentsFromSystemMessage.d.ts +11 -0
  35. package/esm/typings/src/book-2.0/commitments/index.d.ts +56 -0
  36. package/esm/typings/src/book-2.0/utils/profileImageUtils.d.ts +39 -0
  37. package/esm/typings/src/book-components/AvatarProfile/AvatarChip/AvatarChip.d.ts +35 -0
  38. package/esm/typings/src/book-components/AvatarProfile/AvatarChip/AvatarChipFromSource.d.ts +21 -0
  39. package/esm/typings/src/book-components/AvatarProfile/AvatarChip/index.d.ts +2 -0
  40. package/esm/typings/src/book-components/AvatarProfile/AvatarProfile/AvatarProfile.d.ts +26 -0
  41. package/esm/typings/src/book-components/AvatarProfile/AvatarProfile/AvatarProfileFromSource.d.ts +19 -0
  42. package/esm/typings/src/book-components/BookEditor/BookEditor.d.ts +35 -0
  43. package/esm/typings/src/book-components/BookEditor/BookEditorInner.d.ts +15 -0
  44. package/esm/typings/src/book-components/BookEditor/config.d.ts +10 -0
  45. package/esm/typings/src/book-components/BookEditor/injectCssModuleIntoShadowRoot.d.ts +11 -0
  46. package/esm/typings/src/book-components/Chat/Chat/Chat.d.ts +20 -0
  47. package/esm/typings/src/book-components/Chat/Chat/ChatProps.d.ts +110 -0
  48. package/esm/typings/src/book-components/Chat/LlmChat/LlmChat.d.ts +14 -0
  49. package/esm/typings/src/book-components/Chat/LlmChat/LlmChat.test.d.ts +1 -0
  50. package/esm/typings/src/book-components/Chat/LlmChat/LlmChatProps.d.ts +24 -0
  51. package/esm/typings/src/book-components/Chat/types/ChatMessage.d.ts +16 -0
  52. package/esm/typings/src/book-components/Chat/types/ChatParticipant.d.ts +32 -0
  53. package/esm/typings/src/book-components/Chat/utils/ChatPersistence.d.ts +25 -0
  54. package/esm/typings/src/book-components/Chat/utils/ExportFormat.d.ts +4 -0
  55. package/esm/typings/src/book-components/Chat/utils/addUtmParamsToUrl.d.ts +7 -0
  56. package/esm/typings/src/book-components/Chat/utils/createShortLinkForChat.d.ts +7 -0
  57. package/esm/typings/src/book-components/Chat/utils/downloadFile.d.ts +6 -0
  58. package/esm/typings/src/book-components/Chat/utils/exportChatHistory.d.ts +9 -0
  59. package/esm/typings/src/book-components/Chat/utils/generatePdfContent.d.ts +8 -0
  60. package/esm/typings/src/book-components/Chat/utils/generateQrDataUrl.d.ts +7 -0
  61. package/esm/typings/src/book-components/Chat/utils/getPromptbookBranding.d.ts +6 -0
  62. package/esm/typings/src/book-components/Chat/utils/messagesToHtml.d.ts +8 -0
  63. package/esm/typings/src/book-components/Chat/utils/messagesToJson.d.ts +7 -0
  64. package/esm/typings/src/book-components/Chat/utils/messagesToMarkdown.d.ts +8 -0
  65. package/esm/typings/src/book-components/Chat/utils/messagesToText.d.ts +8 -0
  66. package/esm/typings/src/book-components/_common/react-utils/classNames.d.ts +7 -0
  67. package/esm/typings/src/book-components/_common/react-utils/collectCssTextsForClass.d.ts +7 -0
  68. package/esm/typings/src/book-components/_common/react-utils/escapeHtml.d.ts +6 -0
  69. package/esm/typings/src/book-components/_common/react-utils/escapeRegex.d.ts +6 -0
  70. package/esm/typings/src/config.d.ts +19 -0
  71. package/esm/typings/src/execution/AvailableModel.d.ts +4 -0
  72. package/esm/typings/src/execution/ExecutionTask.d.ts +27 -1
  73. package/esm/typings/src/execution/LlmExecutionTools.d.ts +8 -0
  74. package/esm/typings/src/execution/createPipelineExecutor/40-executeAttempts.d.ts +6 -1
  75. package/esm/typings/src/llm-providers/_common/filterModels.d.ts +0 -3
  76. package/esm/typings/src/llm-providers/_common/profiles/llmProviderProfiles.d.ts +81 -0
  77. package/esm/typings/src/llm-providers/_common/profiles/test/llmProviderProfiles.test.d.ts +1 -0
  78. package/esm/typings/src/llm-providers/_multiple/MultipleLlmExecutionTools.d.ts +5 -0
  79. package/esm/typings/src/llm-providers/anthropic-claude/AnthropicClaudeExecutionTools.d.ts +5 -5
  80. package/esm/typings/src/llm-providers/anthropic-claude/anthropic-claude-models.d.ts +1 -1
  81. package/esm/typings/src/llm-providers/deepseek/deepseek-models.d.ts +1 -1
  82. package/esm/typings/src/llm-providers/google/google-models.d.ts +1 -1
  83. package/esm/typings/src/llm-providers/mocked/MockedEchoLlmExecutionTools.d.ts +5 -0
  84. package/esm/typings/src/llm-providers/ollama/ollama-models.d.ts +1 -1
  85. package/esm/typings/src/llm-providers/openai/OpenAiCompatibleExecutionTools.d.ts +8 -0
  86. package/esm/typings/src/llm-providers/openai/OpenAiExecutionTools.d.ts +5 -0
  87. package/esm/typings/src/llm-providers/openai/openai-models.d.ts +1 -1
  88. package/esm/typings/src/llm-providers/remote/RemoteLlmExecutionTools.d.ts +5 -0
  89. package/esm/typings/src/pipeline/book-notation.d.ts +2 -1
  90. package/esm/typings/src/playground/permanent/error-handling-playground.d.ts +5 -0
  91. package/esm/typings/src/types/ModelRequirements.d.ts +0 -2
  92. package/esm/typings/src/types/typeAliases.d.ts +6 -0
  93. package/esm/typings/src/utils/color/$randomColor.d.ts +11 -0
  94. package/esm/typings/src/utils/color/Color.d.ts +180 -0
  95. package/esm/typings/src/utils/color/css-colors.d.ts +159 -0
  96. package/esm/typings/src/utils/color/internal-utils/checkChannelValue.d.ts +14 -0
  97. package/esm/typings/src/utils/color/internal-utils/hslToRgb.d.ts +17 -0
  98. package/esm/typings/src/utils/color/internal-utils/rgbToHsl.d.ts +17 -0
  99. package/esm/typings/src/utils/color/operators/ColorTransformer.d.ts +5 -0
  100. package/esm/typings/src/utils/color/operators/darken.d.ts +9 -0
  101. package/esm/typings/src/utils/color/operators/furthest.d.ts +16 -0
  102. package/esm/typings/src/utils/color/operators/grayscale.d.ts +9 -0
  103. package/esm/typings/src/utils/color/operators/lighten.d.ts +12 -0
  104. package/esm/typings/src/utils/color/operators/mixWithColor.d.ts +11 -0
  105. package/esm/typings/src/utils/color/operators/nearest.d.ts +10 -0
  106. package/esm/typings/src/utils/color/operators/negative.d.ts +7 -0
  107. package/esm/typings/src/utils/color/operators/negativeLightness.d.ts +7 -0
  108. package/esm/typings/src/utils/color/operators/withAlpha.d.ts +9 -0
  109. package/esm/typings/src/utils/color/utils/areColorsEqual.d.ts +14 -0
  110. package/esm/typings/src/utils/color/utils/colorDistance.d.ts +21 -0
  111. package/esm/typings/src/utils/color/utils/colorHue.d.ts +11 -0
  112. package/esm/typings/src/utils/color/utils/colorHueDistance.d.ts +11 -0
  113. package/esm/typings/src/utils/color/utils/colorHueDistance.test.d.ts +1 -0
  114. package/esm/typings/src/utils/color/utils/colorLuminance.d.ts +9 -0
  115. package/esm/typings/src/utils/color/utils/colorSatulightion.d.ts +7 -0
  116. package/esm/typings/src/utils/color/utils/colorSaturation.d.ts +9 -0
  117. package/esm/typings/src/utils/color/utils/colorToDataUrl.d.ts +10 -0
  118. package/esm/typings/src/utils/color/utils/mixColors.d.ts +11 -0
  119. package/esm/typings/src/utils/organization/preserve.d.ts +21 -0
  120. package/esm/typings/src/utils/take/classes/TakeChain.d.ts +11 -0
  121. package/esm/typings/src/utils/take/interfaces/ITakeChain.d.ts +12 -0
  122. package/esm/typings/src/utils/take/interfaces/Takeable.d.ts +7 -0
  123. package/esm/typings/src/utils/take/take.d.ts +12 -0
  124. package/esm/typings/src/utils/take/take.test.d.ts +1 -0
  125. package/esm/typings/src/version.d.ts +1 -1
  126. package/package.json +2 -2
  127. package/umd/index.umd.js +304 -12
  128. package/umd/index.umd.js.map +1 -1
  129. package/esm/typings/src/scripting/javascript/utils/preserve.d.ts +0 -14
package/README.md CHANGED
@@ -10,14 +10,18 @@ Write AI applications using plain human language across multiple models and plat
10
10
  [![NPM Version of ![Promptbook logo - cube with letters P and B](./design/logo-h1.png) Promptbook](https://badge.fury.io/js/promptbook.svg)](https://www.npmjs.com/package/promptbook)
11
11
  [![Quality of package ![Promptbook logo - cube with letters P and B](./design/logo-h1.png) Promptbook](https://packagequality.com/shield/promptbook.svg)](https://packagequality.com/#?package=promptbook)
12
12
  [![Known Vulnerabilities](https://snyk.io/test/github/webgptorg/promptbook/badge.svg)](https://snyk.io/test/github/webgptorg/promptbook)
13
- [![Build Status](https://github.com/webgptorg/promptbook/actions/workflows/ci.yml/badge.svg)](https://github.com/webgptorg/promptbook/actions)
14
- [![Coverage Status](https://coveralls.io/repos/github/webgptorg/promptbook/badge.svg?branch=main)](https://coveralls.io/github/webgptorg/promptbook?branch=main)
13
+ [![🧪 Test Books](https://github.com/webgptorg/promptbook/actions/workflows/test-books.yml/badge.svg)](https://github.com/webgptorg/promptbook/actions/workflows/test-books.yml)
14
+ [![🧪 Test build](https://github.com/webgptorg/promptbook/actions/workflows/test-build.yml/badge.svg)](https://github.com/webgptorg/promptbook/actions/workflows/test-build.yml)
15
+ [![🧪 Lint](https://github.com/webgptorg/promptbook/actions/workflows/test-lint.yml/badge.svg)](https://github.com/webgptorg/promptbook/actions/workflows/test-lint.yml)
16
+ [![🧪 Spell check](https://github.com/webgptorg/promptbook/actions/workflows/test-spell-check.yml/badge.svg)](https://github.com/webgptorg/promptbook/actions/workflows/test-spell-check.yml)
17
+ [![🧪 Test types](https://github.com/webgptorg/promptbook/actions/workflows/test-types.yml/badge.svg)](https://github.com/webgptorg/promptbook/actions/workflows/test-types.yml)
15
18
  [![Issues](https://img.shields.io/github/issues/webgptorg/promptbook.svg?style=flat)](https://github.com/webgptorg/promptbook/issues)
16
19
 
17
20
 
18
21
 
19
22
  ## 🌟 New Features
20
23
 
24
+ - 🚀 **GPT-5 Support** - Now includes OpenAI's most advanced language model with unprecedented reasoning capabilities and 200K context window
21
25
  - 💡 VS Code support for `.book` files with syntax highlighting and IntelliSense
22
26
  - 🐳 Official Docker image (`hejny/promptbook`) for seamless containerized usage
23
27
  - 🔥 Native support for OpenAI `o3-mini`, GPT-4 and other leading LLMs
@@ -312,8 +316,6 @@ Rest of the documentation is common for **entire promptbook ecosystem**:
312
316
 
313
317
  During the computer revolution, we have seen [multiple generations of computer languages](https://github.com/webgptorg/promptbook/discussions/180), from the physical rewiring of the vacuum tubes through low-level machine code to the high-level languages like Python or JavaScript. And now, we're on the edge of the **next revolution**!
314
318
 
315
-
316
-
317
319
  It's a revolution of writing software in **plain human language** that is understandable and executable by both humans and machines – and it's going to change everything!
318
320
 
319
321
  The incredible growth in power of microprocessors and the Moore's Law have been the driving force behind the ever-more powerful languages, and it's been an amazing journey! Similarly, the large language models (like GPT or Claude) are the next big thing in language technology, and they're set to transform the way we interact with computers.
@@ -439,8 +441,6 @@ Join our growing community of developers and users:
439
441
 
440
442
  _A concise, Markdown-based DSL for crafting AI workflows and automations._
441
443
 
442
-
443
-
444
444
  ### Introduction
445
445
 
446
446
  Book is a Markdown-based language that simplifies the creation of AI applications, workflows, and automations. With human-readable commands, you can define inputs, outputs, personas, knowledge sources, and actions—without needing model-specific details.
@@ -490,8 +490,6 @@ Personas can have access to different knowledge, tools and actions. They can als
490
490
 
491
491
  - [PERSONA](https://github.com/webgptorg/promptbook/blob/main/documents/commands/PERSONA.md)
492
492
 
493
-
494
-
495
493
  ### **3. How:** Knowledge, Instruments and Actions
496
494
 
497
495
  The resources used by the personas are used to do the work.
@@ -566,6 +564,7 @@ Or you can install them separately:
566
564
  - **[@promptbook/editable](https://www.npmjs.com/package/@promptbook/editable)** - Editable book as native javascript object with imperative object API
567
565
  - **[@promptbook/templates](https://www.npmjs.com/package/@promptbook/templates)** - Useful templates and examples of books which can be used as a starting point
568
566
  - **[@promptbook/types](https://www.npmjs.com/package/@promptbook/types)** - Just typescript types used in the library
567
+ - **[@promptbook/color](https://www.npmjs.com/package/@promptbook/color)** - Color manipulation library
569
568
  - ⭐ **[@promptbook/cli](https://www.npmjs.com/package/@promptbook/cli)** - Command line interface utilities for promptbooks
570
569
  - 🐋 **[Docker image](https://hub.docker.com/r/hejny/promptbook/)** - Promptbook server
571
570
 
@@ -591,8 +590,6 @@ The following glossary is used to clarify certain concepts:
591
590
 
592
591
  _Note: This section is not a complete dictionary, more list of general AI / LLM terms that has connection with Promptbook_
593
592
 
594
-
595
-
596
593
  ### 💯 Core concepts
597
594
 
598
595
  - [📚 Collection of pipelines](https://github.com/webgptorg/promptbook/discussions/65)
package/esm/index.es.js CHANGED
@@ -19,7 +19,7 @@ const BOOK_LANGUAGE_VERSION = '1.0.0';
19
19
  * @generated
20
20
  * @see https://github.com/webgptorg/promptbook
21
21
  */
22
- const PROMPTBOOK_ENGINE_VERSION = '0.100.0-6';
22
+ const PROMPTBOOK_ENGINE_VERSION = '0.100.0-64';
23
23
  /**
24
24
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
25
25
  * Note: [💞] Ignore a discrepancy between file name and entity name
@@ -310,6 +310,13 @@ Object.freeze({
310
310
  * @public exported from `@promptbook/core`
311
311
  */
312
312
  const DEFAULT_MAX_REQUESTS_PER_MINUTE = 60;
313
+ /**
314
+ * API request timeout in milliseconds
315
+ * Can be overridden via API_REQUEST_TIMEOUT environment variable
316
+ *
317
+ * @public exported from `@promptbook/core`
318
+ */
319
+ const API_REQUEST_TIMEOUT = parseInt(process.env.API_REQUEST_TIMEOUT || '90000');
313
320
  /**
314
321
  * Note: [💞] Ignore a discrepancy between file name and entity name
315
322
  * TODO: [🧠][🧜‍♂️] Maybe join remoteServerUrl and path into single value
@@ -858,6 +865,76 @@ function templateParameters(template, parameters) {
858
865
  return replacedTemplates;
859
866
  }
860
867
 
868
+ /**
869
+ * Predefined profiles for LLM providers to maintain consistency across the application
870
+ * These profiles represent each provider as a virtual persona in chat interfaces
871
+ *
872
+ * @private !!!!
873
+ */
874
+ const LLM_PROVIDER_PROFILES = {
875
+ OPENAI: {
876
+ name: 'OPENAI',
877
+ fullname: 'OpenAI GPT',
878
+ color: '#10a37f', // OpenAI's signature green
879
+ // Note: avatarSrc could be added when we have provider logos available
880
+ },
881
+ ANTHROPIC: {
882
+ name: 'ANTHROPIC',
883
+ fullname: 'Anthropic Claude',
884
+ color: '#d97706', // Anthropic's orange/amber color
885
+ },
886
+ AZURE_OPENAI: {
887
+ name: 'AZURE_OPENAI',
888
+ fullname: 'Azure OpenAI',
889
+ color: '#0078d4', // Microsoft Azure blue
890
+ },
891
+ GOOGLE: {
892
+ name: 'GOOGLE',
893
+ fullname: 'Google Gemini',
894
+ color: '#4285f4', // Google blue
895
+ },
896
+ DEEPSEEK: {
897
+ name: 'DEEPSEEK',
898
+ fullname: 'DeepSeek',
899
+ color: '#7c3aed', // Purple color for DeepSeek
900
+ },
901
+ OLLAMA: {
902
+ name: 'OLLAMA',
903
+ fullname: 'Ollama',
904
+ color: '#059669', // Emerald green for local models
905
+ },
906
+ REMOTE: {
907
+ name: 'REMOTE',
908
+ fullname: 'Remote Server',
909
+ color: '#6b7280', // Gray for remote/proxy connections
910
+ },
911
+ MOCKED_ECHO: {
912
+ name: 'MOCKED_ECHO',
913
+ fullname: 'Echo (Test)',
914
+ color: '#8b5cf6', // Purple for test/mock tools
915
+ },
916
+ MOCKED_FAKE: {
917
+ name: 'MOCKED_FAKE',
918
+ fullname: 'Fake LLM (Test)',
919
+ color: '#ec4899', // Pink for fake/test tools
920
+ },
921
+ VERCEL: {
922
+ name: 'VERCEL',
923
+ fullname: 'Vercel AI',
924
+ color: '#000000', // Vercel's black
925
+ },
926
+ MULTIPLE: {
927
+ name: 'MULTIPLE',
928
+ fullname: 'Multiple Providers',
929
+ color: '#6366f1', // Indigo for combined/multiple providers
930
+ },
931
+ };
932
+ /**
933
+ * TODO: Refactor this - each profile must be alongside the provider definition
934
+ * TODO: Unite `AvatarProfileProps` and `ChatParticipant`
935
+ * Note: [💞] Ignore a discrepancy between file name and entity name
936
+ */
937
+
861
938
  /**
862
939
  * Counts number of characters in the text
863
940
  *
@@ -1283,7 +1360,7 @@ function pricing(value) {
1283
1360
  /**
1284
1361
  * List of available OpenAI models with pricing
1285
1362
  *
1286
- * Note: Done at 2025-05-06
1363
+ * Note: Synced with official API docs at 2025-08-20
1287
1364
  *
1288
1365
  * @see https://platform.openai.com/docs/models/
1289
1366
  * @see https://openai.com/api/pricing/
@@ -1292,6 +1369,138 @@ function pricing(value) {
1292
1369
  const OPENAI_MODELS = exportJson({
1293
1370
  name: 'OPENAI_MODELS',
1294
1371
  value: [
1372
+ /**/
1373
+ {
1374
+ modelVariant: 'CHAT',
1375
+ modelTitle: 'gpt-5',
1376
+ modelName: 'gpt-5',
1377
+ modelDescription: "OpenAI's most advanced language model with unprecedented reasoning capabilities and 200K context window. Features revolutionary improvements in complex problem-solving, scientific reasoning, and creative tasks. Demonstrates human-level performance across diverse domains with enhanced safety measures and alignment. Represents the next generation of AI with superior understanding, nuanced responses, and advanced multimodal capabilities.",
1378
+ pricing: {
1379
+ prompt: pricing(`$1.25 / 1M tokens`),
1380
+ output: pricing(`$10.00 / 1M tokens`),
1381
+ },
1382
+ },
1383
+ /**/
1384
+ /**/
1385
+ {
1386
+ modelVariant: 'CHAT',
1387
+ modelTitle: 'gpt-5-mini',
1388
+ modelName: 'gpt-5-mini',
1389
+ modelDescription: "A faster, cost-efficient version of GPT-5 for well-defined tasks with 200K context window. Maintains core GPT-5 capabilities while offering 5x faster inference and significantly lower costs. Features enhanced instruction following and reduced latency for production applications requiring quick responses with high quality.",
1390
+ pricing: {
1391
+ prompt: pricing(`$0.25 / 1M tokens`),
1392
+ output: pricing(`$2.00 / 1M tokens`),
1393
+ },
1394
+ },
1395
+ /**/
1396
+ /**/
1397
+ {
1398
+ modelVariant: 'CHAT',
1399
+ modelTitle: 'gpt-5-nano',
1400
+ modelName: 'gpt-5-nano',
1401
+ modelDescription: "The fastest, most cost-efficient version of GPT-5 with 200K context window. Optimized for summarization, classification, and simple reasoning tasks. Features 10x faster inference than base GPT-5 while maintaining good quality for straightforward applications. Ideal for high-volume, cost-sensitive deployments.",
1402
+ pricing: {
1403
+ prompt: pricing(`$0.05 / 1M tokens`),
1404
+ output: pricing(`$0.40 / 1M tokens`),
1405
+ },
1406
+ },
1407
+ /**/
1408
+ /**/
1409
+ {
1410
+ modelVariant: 'CHAT',
1411
+ modelTitle: 'gpt-4.1',
1412
+ modelName: 'gpt-4.1',
1413
+ modelDescription: "Smartest non-reasoning model with 128K context window. Enhanced version of GPT-4 with improved instruction following, better factual accuracy, and reduced hallucinations. Features advanced function calling capabilities and superior performance on coding tasks. Ideal for applications requiring high intelligence without reasoning overhead.",
1414
+ pricing: {
1415
+ prompt: pricing(`$3.00 / 1M tokens`),
1416
+ output: pricing(`$12.00 / 1M tokens`),
1417
+ },
1418
+ },
1419
+ /**/
1420
+ /**/
1421
+ {
1422
+ modelVariant: 'CHAT',
1423
+ modelTitle: 'gpt-4.1-mini',
1424
+ modelName: 'gpt-4.1-mini',
1425
+ modelDescription: "Smaller, faster version of GPT-4.1 with 128K context window. Balances intelligence and efficiency with 3x faster inference than base GPT-4.1. Maintains strong capabilities across text generation, reasoning, and coding while offering better cost-performance ratio for most applications.",
1426
+ pricing: {
1427
+ prompt: pricing(`$0.80 / 1M tokens`),
1428
+ output: pricing(`$3.20 / 1M tokens`),
1429
+ },
1430
+ },
1431
+ /**/
1432
+ /**/
1433
+ {
1434
+ modelVariant: 'CHAT',
1435
+ modelTitle: 'gpt-4.1-nano',
1436
+ modelName: 'gpt-4.1-nano',
1437
+ modelDescription: "Fastest, most cost-efficient version of GPT-4.1 with 128K context window. Optimized for high-throughput applications requiring good quality at minimal cost. Features 5x faster inference than GPT-4.1 while maintaining adequate performance for most general-purpose tasks.",
1438
+ pricing: {
1439
+ prompt: pricing(`$0.20 / 1M tokens`),
1440
+ output: pricing(`$0.80 / 1M tokens`),
1441
+ },
1442
+ },
1443
+ /**/
1444
+ /**/
1445
+ {
1446
+ modelVariant: 'CHAT',
1447
+ modelTitle: 'o3',
1448
+ modelName: 'o3',
1449
+ modelDescription: "Advanced reasoning model with 128K context window specializing in complex logical, mathematical, and analytical tasks. Successor to o1 with enhanced step-by-step problem-solving capabilities and superior performance on STEM-focused problems. Ideal for professional applications requiring deep analytical thinking and precise reasoning.",
1450
+ pricing: {
1451
+ prompt: pricing(`$15.00 / 1M tokens`),
1452
+ output: pricing(`$60.00 / 1M tokens`),
1453
+ },
1454
+ },
1455
+ /**/
1456
+ /**/
1457
+ {
1458
+ modelVariant: 'CHAT',
1459
+ modelTitle: 'o3-pro',
1460
+ modelName: 'o3-pro',
1461
+ modelDescription: "Enhanced version of o3 with more compute allocated for better responses on the most challenging problems. Features extended reasoning time and improved accuracy on complex analytical tasks. Designed for applications where maximum reasoning quality is more important than response speed.",
1462
+ pricing: {
1463
+ prompt: pricing(`$30.00 / 1M tokens`),
1464
+ output: pricing(`$120.00 / 1M tokens`),
1465
+ },
1466
+ },
1467
+ /**/
1468
+ /**/
1469
+ {
1470
+ modelVariant: 'CHAT',
1471
+ modelTitle: 'o4-mini',
1472
+ modelName: 'o4-mini',
1473
+ modelDescription: "Fast, cost-efficient reasoning model with 128K context window. Successor to o1-mini with improved analytical capabilities while maintaining speed advantages. Features enhanced mathematical reasoning and logical problem-solving at significantly lower cost than full reasoning models.",
1474
+ pricing: {
1475
+ prompt: pricing(`$4.00 / 1M tokens`),
1476
+ output: pricing(`$16.00 / 1M tokens`),
1477
+ },
1478
+ },
1479
+ /**/
1480
+ /**/
1481
+ {
1482
+ modelVariant: 'CHAT',
1483
+ modelTitle: 'o3-deep-research',
1484
+ modelName: 'o3-deep-research',
1485
+ modelDescription: "Most powerful deep research model with 128K context window. Specialized for comprehensive research tasks, literature analysis, and complex information synthesis. Features advanced citation capabilities and enhanced factual accuracy for academic and professional research applications.",
1486
+ pricing: {
1487
+ prompt: pricing(`$25.00 / 1M tokens`),
1488
+ output: pricing(`$100.00 / 1M tokens`),
1489
+ },
1490
+ },
1491
+ /**/
1492
+ /**/
1493
+ {
1494
+ modelVariant: 'CHAT',
1495
+ modelTitle: 'o4-mini-deep-research',
1496
+ modelName: 'o4-mini-deep-research',
1497
+ modelDescription: "Faster, more affordable deep research model with 128K context window. Balances research capabilities with cost efficiency, offering good performance on literature review, fact-checking, and information synthesis tasks at a more accessible price point.",
1498
+ pricing: {
1499
+ prompt: pricing(`$12.00 / 1M tokens`),
1500
+ output: pricing(`$48.00 / 1M tokens`),
1501
+ },
1502
+ },
1503
+ /**/
1295
1504
  /*/
1296
1505
  {
1297
1506
  modelTitle: 'dall-e-3',
@@ -1821,7 +2030,18 @@ class OpenAiCompatibleExecutionTools {
1821
2030
  const openAiOptions = { ...this.options };
1822
2031
  delete openAiOptions.isVerbose;
1823
2032
  delete openAiOptions.userId;
1824
- this.client = new OpenAI(openAiOptions);
2033
+ // Enhanced configuration for better ECONNRESET handling
2034
+ const enhancedOptions = {
2035
+ ...openAiOptions,
2036
+ timeout: API_REQUEST_TIMEOUT,
2037
+ maxRetries: CONNECTION_RETRIES_LIMIT,
2038
+ defaultHeaders: {
2039
+ Connection: 'keep-alive',
2040
+ 'Keep-Alive': 'timeout=30, max=100',
2041
+ ...openAiOptions.defaultHeaders,
2042
+ },
2043
+ };
2044
+ this.client = new OpenAI(enhancedOptions);
1825
2045
  }
1826
2046
  return this.client;
1827
2047
  }
@@ -1874,7 +2094,6 @@ class OpenAiCompatibleExecutionTools {
1874
2094
  const modelSettings = {
1875
2095
  model: modelName,
1876
2096
  max_tokens: modelRequirements.maxTokens,
1877
- // <- TODO: [🌾] Make some global max cap for maxTokens
1878
2097
  temperature: modelRequirements.temperature,
1879
2098
  // <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
1880
2099
  // <- Note: [🧆]
@@ -1910,7 +2129,7 @@ class OpenAiCompatibleExecutionTools {
1910
2129
  console.info(colors.bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
1911
2130
  }
1912
2131
  const rawResponse = await this.limiter
1913
- .schedule(() => client.chat.completions.create(rawRequest))
2132
+ .schedule(() => this.makeRequestWithRetry(() => client.chat.completions.create(rawRequest)))
1914
2133
  .catch((error) => {
1915
2134
  assertsError(error);
1916
2135
  if (this.options.isVerbose) {
@@ -1970,8 +2189,7 @@ class OpenAiCompatibleExecutionTools {
1970
2189
  const modelName = modelRequirements.modelName || this.getDefaultCompletionModel().modelName;
1971
2190
  const modelSettings = {
1972
2191
  model: modelName,
1973
- max_tokens: modelRequirements.maxTokens || 2000,
1974
- // <- TODO: [🌾] Make some global max cap for maxTokens
2192
+ max_tokens: modelRequirements.maxTokens,
1975
2193
  temperature: modelRequirements.temperature,
1976
2194
  // <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
1977
2195
  // <- Note: [🧆]
@@ -1987,7 +2205,7 @@ class OpenAiCompatibleExecutionTools {
1987
2205
  console.info(colors.bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
1988
2206
  }
1989
2207
  const rawResponse = await this.limiter
1990
- .schedule(() => client.completions.create(rawRequest))
2208
+ .schedule(() => this.makeRequestWithRetry(() => client.completions.create(rawRequest)))
1991
2209
  .catch((error) => {
1992
2210
  assertsError(error);
1993
2211
  if (this.options.isVerbose) {
@@ -2051,7 +2269,7 @@ class OpenAiCompatibleExecutionTools {
2051
2269
  console.info(colors.bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
2052
2270
  }
2053
2271
  const rawResponse = await this.limiter
2054
- .schedule(() => client.embeddings.create(rawRequest))
2272
+ .schedule(() => this.makeRequestWithRetry(() => client.embeddings.create(rawRequest)))
2055
2273
  .catch((error) => {
2056
2274
  assertsError(error);
2057
2275
  if (this.options.isVerbose) {
@@ -2109,6 +2327,76 @@ class OpenAiCompatibleExecutionTools {
2109
2327
  }
2110
2328
  return model;
2111
2329
  }
2330
+ // <- Note: [🤖] getDefaultXxxModel
2331
+ /**
2332
+ * Makes a request with retry logic for network errors like ECONNRESET
2333
+ */
2334
+ async makeRequestWithRetry(requestFn) {
2335
+ let lastError;
2336
+ for (let attempt = 1; attempt <= CONNECTION_RETRIES_LIMIT; attempt++) {
2337
+ try {
2338
+ return await requestFn();
2339
+ }
2340
+ catch (error) {
2341
+ assertsError(error);
2342
+ lastError = error;
2343
+ // Check if this is a retryable network error
2344
+ const isRetryableError = this.isRetryableNetworkError(error);
2345
+ if (!isRetryableError || attempt === CONNECTION_RETRIES_LIMIT) {
2346
+ if (this.options.isVerbose) {
2347
+ console.info(colors.bgRed('Final error after retries'), `Attempt ${attempt}/${CONNECTION_RETRIES_LIMIT}:`, error);
2348
+ }
2349
+ throw error;
2350
+ }
2351
+ // Calculate exponential backoff delay
2352
+ const baseDelay = 1000; // 1 second
2353
+ const backoffDelay = baseDelay * Math.pow(2, attempt - 1);
2354
+ const jitterDelay = Math.random() * 500; // Add some randomness
2355
+ const totalDelay = backoffDelay + jitterDelay;
2356
+ if (this.options.isVerbose) {
2357
+ console.info(colors.bgYellow('Retrying request'), `Attempt ${attempt}/${CONNECTION_RETRIES_LIMIT}, waiting ${Math.round(totalDelay)}ms:`, error.message);
2358
+ }
2359
+ // Wait before retrying
2360
+ await new Promise((resolve) => setTimeout(resolve, totalDelay));
2361
+ }
2362
+ }
2363
+ throw lastError;
2364
+ }
2365
+ /**
2366
+ * Determines if an error is retryable (network-related errors)
2367
+ */
2368
+ isRetryableNetworkError(error) {
2369
+ const errorMessage = error.message.toLowerCase();
2370
+ const errorCode = error.code;
2371
+ // Network connection errors that should be retried
2372
+ const retryableErrors = [
2373
+ 'econnreset',
2374
+ 'enotfound',
2375
+ 'econnrefused',
2376
+ 'etimedout',
2377
+ 'socket hang up',
2378
+ 'network error',
2379
+ 'fetch failed',
2380
+ 'connection reset',
2381
+ 'connection refused',
2382
+ 'timeout',
2383
+ ];
2384
+ // Check error message
2385
+ if (retryableErrors.some((retryableError) => errorMessage.includes(retryableError))) {
2386
+ return true;
2387
+ }
2388
+ // Check error code
2389
+ if (errorCode && retryableErrors.includes(errorCode.toLowerCase())) {
2390
+ return true;
2391
+ }
2392
+ // Check for specific HTTP status codes that are retryable
2393
+ const errorWithStatus = error;
2394
+ const httpStatus = errorWithStatus.status || errorWithStatus.statusCode;
2395
+ if (httpStatus && [429, 500, 502, 503, 504].includes(httpStatus)) {
2396
+ return true;
2397
+ }
2398
+ return false;
2399
+ }
2112
2400
  }
2113
2401
  /**
2114
2402
  * TODO: [🛄] Some way how to re-wrap the errors from `OpenAiCompatibleExecutionTools`
@@ -2138,6 +2426,9 @@ class OpenAiExecutionTools extends OpenAiCompatibleExecutionTools {
2138
2426
  get description() {
2139
2427
  return 'Use all models provided by OpenAI';
2140
2428
  }
2429
+ get profile() {
2430
+ return LLM_PROVIDER_PROFILES.OPENAI;
2431
+ }
2141
2432
  /*
2142
2433
  Note: Commenting this out to avoid circular dependency
2143
2434
  /**
@@ -2162,7 +2453,7 @@ class OpenAiExecutionTools extends OpenAiCompatibleExecutionTools {
2162
2453
  * Default model for chat variant.
2163
2454
  */
2164
2455
  getDefaultChatModel() {
2165
- return this.getDefaultModel('gpt-4-turbo');
2456
+ return this.getDefaultModel('gpt-5');
2166
2457
  }
2167
2458
  /**
2168
2459
  * Default model for completion variant.
@@ -2230,8 +2521,6 @@ class OpenAiAssistantExecutionTools extends OpenAiExecutionTools {
2230
2521
  const modelName = modelRequirements.modelName || this.getDefaultChatModel().modelName;
2231
2522
  const modelSettings = {
2232
2523
  model: modelName,
2233
- max_tokens: modelRequirements.maxTokens,
2234
- // <- TODO: [🌾] Make some global max cap for maxTokens
2235
2524
 
2236
2525
  temperature: modelRequirements.temperature,
2237
2526
 
@@ -2736,6 +3025,9 @@ class RemoteLlmExecutionTools {
2736
3025
  get description() {
2737
3026
  return `Models from Promptbook remote server ${this.options.remoteServerUrl}`;
2738
3027
  }
3028
+ get profile() {
3029
+ return LLM_PROVIDER_PROFILES.REMOTE;
3030
+ }
2739
3031
  /**
2740
3032
  * Check the configuration of all execution tools
2741
3033
  */