@promptbook/openai 0.100.0-9 → 0.100.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (130) hide show
  1. package/README.md +7 -14
  2. package/esm/index.es.js +304 -12
  3. package/esm/index.es.js.map +1 -1
  4. package/esm/typings/src/_packages/color.index.d.ts +50 -0
  5. package/esm/typings/src/_packages/components.index.d.ts +36 -0
  6. package/esm/typings/src/_packages/core.index.d.ts +30 -0
  7. package/esm/typings/src/_packages/types.index.d.ts +38 -0
  8. package/esm/typings/src/book-2.0/agent-source/parseAgentSource.d.ts +30 -0
  9. package/esm/typings/src/book-2.0/agent-source/parseAgentSource.test.d.ts +1 -0
  10. package/esm/typings/src/book-2.0/agent-source/string_book.d.ts +26 -0
  11. package/esm/typings/src/book-2.0/commitments/ACTION/ACTION.d.ts +38 -0
  12. package/esm/typings/src/book-2.0/commitments/FORMAT/FORMAT.d.ts +39 -0
  13. package/esm/typings/src/book-2.0/commitments/KNOWLEDGE/KNOWLEDGE.d.ts +45 -0
  14. package/esm/typings/src/book-2.0/commitments/META_IMAGE/META_IMAGE.d.ts +44 -0
  15. package/esm/typings/src/book-2.0/commitments/META_LINK/META_LINK.d.ts +56 -0
  16. package/esm/typings/src/book-2.0/commitments/MODEL/MODEL.d.ts +39 -0
  17. package/esm/typings/src/book-2.0/commitments/NOTE/NOTE.d.ts +49 -0
  18. package/esm/typings/src/book-2.0/commitments/PERSONA/PERSONA.d.ts +46 -0
  19. package/esm/typings/src/book-2.0/commitments/RULE/RULE.d.ts +44 -0
  20. package/esm/typings/src/book-2.0/commitments/SAMPLE/SAMPLE.d.ts +44 -0
  21. package/esm/typings/src/book-2.0/commitments/STYLE/STYLE.d.ts +38 -0
  22. package/esm/typings/src/book-2.0/commitments/_base/BaseCommitmentDefinition.d.ts +52 -0
  23. package/esm/typings/src/book-2.0/commitments/_base/BookCommitment.d.ts +5 -0
  24. package/esm/typings/src/book-2.0/commitments/_base/CommitmentDefinition.d.ts +48 -0
  25. package/esm/typings/src/book-2.0/commitments/_base/NotYetImplementedCommitmentDefinition.d.ts +22 -0
  26. package/esm/typings/src/book-2.0/commitments/_base/createEmptyAgentModelRequirements.d.ts +19 -0
  27. package/esm/typings/src/book-2.0/commitments/_misc/AgentModelRequirements.d.ts +37 -0
  28. package/esm/typings/src/book-2.0/commitments/_misc/AgentSourceParseResult.d.ts +18 -0
  29. package/esm/typings/src/book-2.0/commitments/_misc/ParsedCommitment.d.ts +22 -0
  30. package/esm/typings/src/book-2.0/commitments/_misc/createAgentModelRequirements.d.ts +62 -0
  31. package/esm/typings/src/book-2.0/commitments/_misc/createAgentModelRequirementsWithCommitments.d.ts +36 -0
  32. package/esm/typings/src/book-2.0/commitments/_misc/createCommitmentRegex.d.ts +20 -0
  33. package/esm/typings/src/book-2.0/commitments/_misc/parseAgentSourceWithCommitments.d.ts +24 -0
  34. package/esm/typings/src/book-2.0/commitments/_misc/removeCommentsFromSystemMessage.d.ts +11 -0
  35. package/esm/typings/src/book-2.0/commitments/index.d.ts +56 -0
  36. package/esm/typings/src/book-2.0/utils/profileImageUtils.d.ts +39 -0
  37. package/esm/typings/src/book-components/AvatarProfile/AvatarChip/AvatarChip.d.ts +35 -0
  38. package/esm/typings/src/book-components/AvatarProfile/AvatarChip/AvatarChipFromSource.d.ts +21 -0
  39. package/esm/typings/src/book-components/AvatarProfile/AvatarChip/index.d.ts +2 -0
  40. package/esm/typings/src/book-components/AvatarProfile/AvatarProfile/AvatarProfile.d.ts +29 -0
  41. package/esm/typings/src/book-components/AvatarProfile/AvatarProfile/AvatarProfileFromSource.d.ts +19 -0
  42. package/esm/typings/src/book-components/BookEditor/BookEditor.d.ts +40 -0
  43. package/esm/typings/src/book-components/BookEditor/BookEditorInner.d.ts +15 -0
  44. package/esm/typings/src/book-components/BookEditor/config.d.ts +10 -0
  45. package/esm/typings/src/book-components/BookEditor/injectCssModuleIntoShadowRoot.d.ts +11 -0
  46. package/esm/typings/src/book-components/Chat/Chat/Chat.d.ts +20 -0
  47. package/esm/typings/src/book-components/Chat/Chat/ChatProps.d.ts +110 -0
  48. package/esm/typings/src/book-components/Chat/LlmChat/LlmChat.d.ts +14 -0
  49. package/esm/typings/src/book-components/Chat/LlmChat/LlmChat.test.d.ts +1 -0
  50. package/esm/typings/src/book-components/Chat/LlmChat/LlmChatProps.d.ts +24 -0
  51. package/esm/typings/src/book-components/Chat/types/ChatMessage.d.ts +16 -0
  52. package/esm/typings/src/book-components/Chat/types/ChatParticipant.d.ts +32 -0
  53. package/esm/typings/src/book-components/Chat/utils/ChatPersistence.d.ts +25 -0
  54. package/esm/typings/src/book-components/Chat/utils/ExportFormat.d.ts +4 -0
  55. package/esm/typings/src/book-components/Chat/utils/addUtmParamsToUrl.d.ts +7 -0
  56. package/esm/typings/src/book-components/Chat/utils/createShortLinkForChat.d.ts +7 -0
  57. package/esm/typings/src/book-components/Chat/utils/downloadFile.d.ts +6 -0
  58. package/esm/typings/src/book-components/Chat/utils/exportChatHistory.d.ts +9 -0
  59. package/esm/typings/src/book-components/Chat/utils/generatePdfContent.d.ts +8 -0
  60. package/esm/typings/src/book-components/Chat/utils/generateQrDataUrl.d.ts +7 -0
  61. package/esm/typings/src/book-components/Chat/utils/getPromptbookBranding.d.ts +6 -0
  62. package/esm/typings/src/book-components/Chat/utils/messagesToHtml.d.ts +8 -0
  63. package/esm/typings/src/book-components/Chat/utils/messagesToJson.d.ts +7 -0
  64. package/esm/typings/src/book-components/Chat/utils/messagesToMarkdown.d.ts +8 -0
  65. package/esm/typings/src/book-components/Chat/utils/messagesToText.d.ts +8 -0
  66. package/esm/typings/src/book-components/_common/react-utils/classNames.d.ts +7 -0
  67. package/esm/typings/src/book-components/_common/react-utils/collectCssTextsForClass.d.ts +7 -0
  68. package/esm/typings/src/book-components/_common/react-utils/escapeHtml.d.ts +6 -0
  69. package/esm/typings/src/book-components/_common/react-utils/escapeRegex.d.ts +6 -0
  70. package/esm/typings/src/config.d.ts +19 -0
  71. package/esm/typings/src/execution/AvailableModel.d.ts +4 -0
  72. package/esm/typings/src/execution/ExecutionTask.d.ts +27 -1
  73. package/esm/typings/src/execution/LlmExecutionTools.d.ts +11 -6
  74. package/esm/typings/src/execution/createPipelineExecutor/40-executeAttempts.d.ts +6 -1
  75. package/esm/typings/src/llm-providers/_common/filterModels.d.ts +0 -3
  76. package/esm/typings/src/llm-providers/_common/profiles/llmProviderProfiles.d.ts +81 -0
  77. package/esm/typings/src/llm-providers/_common/profiles/test/llmProviderProfiles.test.d.ts +1 -0
  78. package/esm/typings/src/llm-providers/_common/register/LlmToolsMetadata.d.ts +3 -4
  79. package/esm/typings/src/llm-providers/_multiple/MultipleLlmExecutionTools.d.ts +5 -0
  80. package/esm/typings/src/llm-providers/anthropic-claude/AnthropicClaudeExecutionTools.d.ts +5 -5
  81. package/esm/typings/src/llm-providers/anthropic-claude/anthropic-claude-models.d.ts +1 -1
  82. package/esm/typings/src/llm-providers/deepseek/deepseek-models.d.ts +1 -1
  83. package/esm/typings/src/llm-providers/google/google-models.d.ts +1 -1
  84. package/esm/typings/src/llm-providers/mocked/MockedEchoLlmExecutionTools.d.ts +5 -0
  85. package/esm/typings/src/llm-providers/ollama/ollama-models.d.ts +1 -1
  86. package/esm/typings/src/llm-providers/openai/OpenAiCompatibleExecutionTools.d.ts +8 -0
  87. package/esm/typings/src/llm-providers/openai/OpenAiExecutionTools.d.ts +5 -0
  88. package/esm/typings/src/llm-providers/openai/openai-models.d.ts +1 -1
  89. package/esm/typings/src/llm-providers/remote/RemoteLlmExecutionTools.d.ts +5 -0
  90. package/esm/typings/src/pipeline/book-notation.d.ts +2 -1
  91. package/esm/typings/src/playground/permanent/error-handling-playground.d.ts +5 -0
  92. package/esm/typings/src/types/ModelRequirements.d.ts +0 -2
  93. package/esm/typings/src/types/typeAliases.d.ts +6 -0
  94. package/esm/typings/src/utils/color/$randomColor.d.ts +11 -0
  95. package/esm/typings/src/utils/color/Color.d.ts +179 -0
  96. package/esm/typings/src/utils/color/css-colors.d.ts +159 -0
  97. package/esm/typings/src/utils/color/internal-utils/checkChannelValue.d.ts +14 -0
  98. package/esm/typings/src/utils/color/internal-utils/hslToRgb.d.ts +17 -0
  99. package/esm/typings/src/utils/color/internal-utils/rgbToHsl.d.ts +17 -0
  100. package/esm/typings/src/utils/color/operators/ColorTransformer.d.ts +5 -0
  101. package/esm/typings/src/utils/color/operators/darken.d.ts +9 -0
  102. package/esm/typings/src/utils/color/operators/furthest.d.ts +16 -0
  103. package/esm/typings/src/utils/color/operators/grayscale.d.ts +9 -0
  104. package/esm/typings/src/utils/color/operators/lighten.d.ts +12 -0
  105. package/esm/typings/src/utils/color/operators/mixWithColor.d.ts +11 -0
  106. package/esm/typings/src/utils/color/operators/nearest.d.ts +10 -0
  107. package/esm/typings/src/utils/color/operators/negative.d.ts +7 -0
  108. package/esm/typings/src/utils/color/operators/negativeLightness.d.ts +7 -0
  109. package/esm/typings/src/utils/color/operators/withAlpha.d.ts +9 -0
  110. package/esm/typings/src/utils/color/utils/areColorsEqual.d.ts +14 -0
  111. package/esm/typings/src/utils/color/utils/colorDistance.d.ts +21 -0
  112. package/esm/typings/src/utils/color/utils/colorHue.d.ts +11 -0
  113. package/esm/typings/src/utils/color/utils/colorHueDistance.d.ts +11 -0
  114. package/esm/typings/src/utils/color/utils/colorHueDistance.test.d.ts +1 -0
  115. package/esm/typings/src/utils/color/utils/colorLuminance.d.ts +9 -0
  116. package/esm/typings/src/utils/color/utils/colorSatulightion.d.ts +7 -0
  117. package/esm/typings/src/utils/color/utils/colorSaturation.d.ts +9 -0
  118. package/esm/typings/src/utils/color/utils/colorToDataUrl.d.ts +10 -0
  119. package/esm/typings/src/utils/color/utils/mixColors.d.ts +11 -0
  120. package/esm/typings/src/utils/organization/preserve.d.ts +21 -0
  121. package/esm/typings/src/utils/take/classes/TakeChain.d.ts +11 -0
  122. package/esm/typings/src/utils/take/interfaces/ITakeChain.d.ts +12 -0
  123. package/esm/typings/src/utils/take/interfaces/Takeable.d.ts +7 -0
  124. package/esm/typings/src/utils/take/take.d.ts +12 -0
  125. package/esm/typings/src/utils/take/take.test.d.ts +1 -0
  126. package/esm/typings/src/version.d.ts +1 -1
  127. package/package.json +2 -2
  128. package/umd/index.umd.js +304 -12
  129. package/umd/index.umd.js.map +1 -1
  130. package/esm/typings/src/scripting/javascript/utils/preserve.d.ts +0 -14
@@ -0,0 +1,11 @@
1
+ import { Color } from '../Color';
2
+ /**
3
+ * Calculates hue distance of two colors
4
+ *
5
+ * @returns hue distance in degrees <0-180)
6
+ *
7
+ * @see https://en.wikipedia.org/wiki/HSL_and_HSV#Hue_and_chroma
8
+ *
9
+ * @public exported from `@promptbook/color`
10
+ */
11
+ export declare function colorHueDistance(color1: Color, color2: Color): number;
@@ -0,0 +1,9 @@
1
+ import { Color } from '../Color';
2
+ /**
3
+ * Calculates luminance of the color
4
+ *
5
+ * @see https://en.wikipedia.org/wiki/Relative_luminance
6
+ *
7
+ * @public exported from `@promptbook/color`
8
+ */
9
+ export declare function colorLuminance(color: Color): number;
@@ -0,0 +1,7 @@
1
+ import { Color } from '../Color';
2
+ /**
3
+ * Calculates saturation*luminance of the color
4
+ *
5
+ * @public exported from `@promptbook/color`
6
+ */
7
+ export declare function colorSatulightion(color: Color): number;
@@ -0,0 +1,9 @@
1
+ import { Color } from "../Color";
2
+ /**
3
+ * Calculates saturation of the color
4
+ *
5
+ * @see https://en.wikipedia.org/wiki/HSL_and_HSV#Saturation
6
+ *
7
+ * @public exported from `@promptbook/color`
8
+ */
9
+ export declare function colorSaturation(color: Color): number;
@@ -0,0 +1,10 @@
1
+ import { Color } from '../Color';
2
+ /**
3
+ * Makes data url from color
4
+ *
5
+ * @public exported from `@promptbook/color`
6
+ */
7
+ export declare function colorToDataUrl(color: Color): string;
8
+ /**
9
+ * TODO: Make as functions NOT const
10
+ */
@@ -0,0 +1,11 @@
1
+ import type { WithTake } from '../../take/interfaces/ITakeChain';
2
+ import { Color } from '../Color';
3
+ /**
4
+ * Mixes an array of colors and returns the average color
5
+ *
6
+ * @param {...Color} colors - The array of colors to be mixed.
7
+ * @returns {WithTake<Color>} - The mixed color.
8
+ *
9
+ * @public exported from `@promptbook/color`
10
+ */
11
+ export declare function mixColors(...colors: Array<Color>): WithTake<Color>;
@@ -0,0 +1,21 @@
1
+ import type { really_any } from './really_any';
2
+ /**
3
+ * Does nothing, but preserves the function in the bundle
4
+ * Compiler is tricked into thinking the function is used
5
+ *
6
+ * @param value any function to preserve
7
+ * @returns nothing
8
+ * @private within the repository
9
+ */
10
+ export declare function $preserve(...value: Array<really_any>): void;
11
+ /**
12
+ * DO NOT USE THIS FUNCTION
13
+ * Only purpose of this function is to trick the compiler and javascript engine
14
+ * that `_preserved` array can be used in the future and should not be garbage collected
15
+ *
16
+ * @private internal for `preserve`
17
+ */
18
+ export declare function __DO_NOT_USE_getPreserved(): Array<really_any>;
19
+ /**
20
+ * Note: [💞] Ignore a discrepancy between file name and entity name
21
+ */
@@ -0,0 +1,11 @@
1
+ import type { ITakeChain } from '../interfaces/ITakeChain';
2
+ import type { Takeable } from '../interfaces/Takeable';
3
+ /**
4
+ * @private util of `@promptbook/color`
5
+ * @de
6
+ */
7
+ export declare class TakeChain<TValue extends Takeable> implements ITakeChain<TValue> {
8
+ value: TValue;
9
+ constructor(value: TValue);
10
+ then<TResultValue extends Takeable>(callback: (oldValue: TValue) => TResultValue): TResultValue & ITakeChain<TResultValue>;
11
+ }
@@ -0,0 +1,12 @@
1
+ import type { Takeable } from './Takeable';
2
+ /**
3
+ * Represents any value with take chain functionality
4
+ *
5
+ * @private util of `@promptbook/color`
6
+ * @deprecated [🤡] Use some better functional library instead of `TakeChain`
7
+ */
8
+ export type WithTake<TValue extends Takeable> = TValue & ITakeChain<TValue>;
9
+ export interface ITakeChain<TValue extends Takeable> {
10
+ readonly value: TValue;
11
+ then<TResultValue extends Takeable>(callback: (value: TValue) => TResultValue): WithTake<TResultValue>;
12
+ }
@@ -0,0 +1,7 @@
1
+ /**
2
+ * Represents a type that can be chained in take pipe
3
+ *
4
+ * @private util of `@promptbook/color`
5
+ * @deprecated [🤡] Use some better functional library instead of `TakeChain`
6
+ */
7
+ export type Takeable = Exclude<object, null>;
@@ -0,0 +1,12 @@
1
+ import type { WithTake } from './interfaces/ITakeChain';
2
+ import type { Takeable } from './interfaces/Takeable';
3
+ /**
4
+ * A function that takes an initial value and returns a proxy object with chainable methods.
5
+ *
6
+ * @param {*} initialValue - The initial value.
7
+ * @returns {Proxy<WithTake<TValue>>} - A proxy object with a `take` method.
8
+ *
9
+ * @private util of `@promptbook/color`
10
+ * @deprecated [🤡] Use some better functional library instead of `TakeChain`
11
+ */
12
+ export declare function take<TValue extends Takeable>(initialValue: TValue): WithTake<TValue>;
@@ -0,0 +1 @@
1
+ export {};
@@ -15,7 +15,7 @@ export declare const BOOK_LANGUAGE_VERSION: string_semantic_version;
15
15
  export declare const PROMPTBOOK_ENGINE_VERSION: string_promptbook_version;
16
16
  /**
17
17
  * Represents the version string of the Promptbook engine.
18
- * It follows semantic versioning (e.g., `0.100.0-8`).
18
+ * It follows semantic versioning (e.g., `0.100.0`).
19
19
  *
20
20
  * @generated
21
21
  */
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@promptbook/openai",
3
- "version": "0.100.0-9",
3
+ "version": "0.100.1",
4
4
  "description": "Promptbook: Run AI apps in plain human language across multiple models and platforms",
5
5
  "private": false,
6
6
  "sideEffects": false,
@@ -102,7 +102,7 @@
102
102
  "module": "./esm/index.es.js",
103
103
  "typings": "./esm/typings/src/_packages/openai.index.d.ts",
104
104
  "peerDependencies": {
105
- "@promptbook/core": "0.100.0-9"
105
+ "@promptbook/core": "0.100.1"
106
106
  },
107
107
  "dependencies": {
108
108
  "bottleneck": "^2.19.5",
package/umd/index.umd.js CHANGED
@@ -25,7 +25,7 @@
25
25
  * @generated
26
26
  * @see https://github.com/webgptorg/promptbook
27
27
  */
28
- const PROMPTBOOK_ENGINE_VERSION = '0.100.0-9';
28
+ const PROMPTBOOK_ENGINE_VERSION = '0.100.1';
29
29
  /**
30
30
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
31
31
  * Note: [💞] Ignore a discrepancy between file name and entity name
@@ -316,6 +316,13 @@
316
316
  * @public exported from `@promptbook/core`
317
317
  */
318
318
  const DEFAULT_MAX_REQUESTS_PER_MINUTE = 60;
319
+ /**
320
+ * API request timeout in milliseconds
321
+ * Can be overridden via API_REQUEST_TIMEOUT environment variable
322
+ *
323
+ * @public exported from `@promptbook/core`
324
+ */
325
+ const API_REQUEST_TIMEOUT = parseInt(process.env.API_REQUEST_TIMEOUT || '90000');
319
326
  /**
320
327
  * Note: [💞] Ignore a discrepancy between file name and entity name
321
328
  * TODO: [🧠][🧜‍♂️] Maybe join remoteServerUrl and path into single value
@@ -864,6 +871,76 @@
864
871
  return replacedTemplates;
865
872
  }
866
873
 
874
+ /**
875
+ * Predefined profiles for LLM providers to maintain consistency across the application
876
+ * These profiles represent each provider as a virtual persona in chat interfaces
877
+ *
878
+ * @private !!!!
879
+ */
880
+ const LLM_PROVIDER_PROFILES = {
881
+ OPENAI: {
882
+ name: 'OPENAI',
883
+ fullname: 'OpenAI GPT',
884
+ color: '#10a37f', // OpenAI's signature green
885
+ // Note: avatarSrc could be added when we have provider logos available
886
+ },
887
+ ANTHROPIC: {
888
+ name: 'ANTHROPIC',
889
+ fullname: 'Anthropic Claude',
890
+ color: '#d97706', // Anthropic's orange/amber color
891
+ },
892
+ AZURE_OPENAI: {
893
+ name: 'AZURE_OPENAI',
894
+ fullname: 'Azure OpenAI',
895
+ color: '#0078d4', // Microsoft Azure blue
896
+ },
897
+ GOOGLE: {
898
+ name: 'GOOGLE',
899
+ fullname: 'Google Gemini',
900
+ color: '#4285f4', // Google blue
901
+ },
902
+ DEEPSEEK: {
903
+ name: 'DEEPSEEK',
904
+ fullname: 'DeepSeek',
905
+ color: '#7c3aed', // Purple color for DeepSeek
906
+ },
907
+ OLLAMA: {
908
+ name: 'OLLAMA',
909
+ fullname: 'Ollama',
910
+ color: '#059669', // Emerald green for local models
911
+ },
912
+ REMOTE: {
913
+ name: 'REMOTE',
914
+ fullname: 'Remote Server',
915
+ color: '#6b7280', // Gray for remote/proxy connections
916
+ },
917
+ MOCKED_ECHO: {
918
+ name: 'MOCKED_ECHO',
919
+ fullname: 'Echo (Test)',
920
+ color: '#8b5cf6', // Purple for test/mock tools
921
+ },
922
+ MOCKED_FAKE: {
923
+ name: 'MOCKED_FAKE',
924
+ fullname: 'Fake LLM (Test)',
925
+ color: '#ec4899', // Pink for fake/test tools
926
+ },
927
+ VERCEL: {
928
+ name: 'VERCEL',
929
+ fullname: 'Vercel AI',
930
+ color: '#000000', // Vercel's black
931
+ },
932
+ MULTIPLE: {
933
+ name: 'MULTIPLE',
934
+ fullname: 'Multiple Providers',
935
+ color: '#6366f1', // Indigo for combined/multiple providers
936
+ },
937
+ };
938
+ /**
939
+ * TODO: Refactor this - each profile must be alongside the provider definition
940
+ * TODO: [🕛] Unite `AvatarProfileProps`, `ChatParticipant`, `LlmExecutionTools` + `LlmToolsMetadata`
941
+ * Note: [💞] Ignore a discrepancy between file name and entity name
942
+ */
943
+
867
944
  /**
868
945
  * Counts number of characters in the text
869
946
  *
@@ -1289,7 +1366,7 @@
1289
1366
  /**
1290
1367
  * List of available OpenAI models with pricing
1291
1368
  *
1292
- * Note: Done at 2025-05-06
1369
+ * Note: Synced with official API docs at 2025-08-20
1293
1370
  *
1294
1371
  * @see https://platform.openai.com/docs/models/
1295
1372
  * @see https://openai.com/api/pricing/
@@ -1298,6 +1375,138 @@
1298
1375
  const OPENAI_MODELS = exportJson({
1299
1376
  name: 'OPENAI_MODELS',
1300
1377
  value: [
1378
+ /**/
1379
+ {
1380
+ modelVariant: 'CHAT',
1381
+ modelTitle: 'gpt-5',
1382
+ modelName: 'gpt-5',
1383
+ modelDescription: "OpenAI's most advanced language model with unprecedented reasoning capabilities and 200K context window. Features revolutionary improvements in complex problem-solving, scientific reasoning, and creative tasks. Demonstrates human-level performance across diverse domains with enhanced safety measures and alignment. Represents the next generation of AI with superior understanding, nuanced responses, and advanced multimodal capabilities.",
1384
+ pricing: {
1385
+ prompt: pricing(`$1.25 / 1M tokens`),
1386
+ output: pricing(`$10.00 / 1M tokens`),
1387
+ },
1388
+ },
1389
+ /**/
1390
+ /**/
1391
+ {
1392
+ modelVariant: 'CHAT',
1393
+ modelTitle: 'gpt-5-mini',
1394
+ modelName: 'gpt-5-mini',
1395
+ modelDescription: "A faster, cost-efficient version of GPT-5 for well-defined tasks with 200K context window. Maintains core GPT-5 capabilities while offering 5x faster inference and significantly lower costs. Features enhanced instruction following and reduced latency for production applications requiring quick responses with high quality.",
1396
+ pricing: {
1397
+ prompt: pricing(`$0.25 / 1M tokens`),
1398
+ output: pricing(`$2.00 / 1M tokens`),
1399
+ },
1400
+ },
1401
+ /**/
1402
+ /**/
1403
+ {
1404
+ modelVariant: 'CHAT',
1405
+ modelTitle: 'gpt-5-nano',
1406
+ modelName: 'gpt-5-nano',
1407
+ modelDescription: "The fastest, most cost-efficient version of GPT-5 with 200K context window. Optimized for summarization, classification, and simple reasoning tasks. Features 10x faster inference than base GPT-5 while maintaining good quality for straightforward applications. Ideal for high-volume, cost-sensitive deployments.",
1408
+ pricing: {
1409
+ prompt: pricing(`$0.05 / 1M tokens`),
1410
+ output: pricing(`$0.40 / 1M tokens`),
1411
+ },
1412
+ },
1413
+ /**/
1414
+ /**/
1415
+ {
1416
+ modelVariant: 'CHAT',
1417
+ modelTitle: 'gpt-4.1',
1418
+ modelName: 'gpt-4.1',
1419
+ modelDescription: "Smartest non-reasoning model with 128K context window. Enhanced version of GPT-4 with improved instruction following, better factual accuracy, and reduced hallucinations. Features advanced function calling capabilities and superior performance on coding tasks. Ideal for applications requiring high intelligence without reasoning overhead.",
1420
+ pricing: {
1421
+ prompt: pricing(`$3.00 / 1M tokens`),
1422
+ output: pricing(`$12.00 / 1M tokens`),
1423
+ },
1424
+ },
1425
+ /**/
1426
+ /**/
1427
+ {
1428
+ modelVariant: 'CHAT',
1429
+ modelTitle: 'gpt-4.1-mini',
1430
+ modelName: 'gpt-4.1-mini',
1431
+ modelDescription: "Smaller, faster version of GPT-4.1 with 128K context window. Balances intelligence and efficiency with 3x faster inference than base GPT-4.1. Maintains strong capabilities across text generation, reasoning, and coding while offering better cost-performance ratio for most applications.",
1432
+ pricing: {
1433
+ prompt: pricing(`$0.80 / 1M tokens`),
1434
+ output: pricing(`$3.20 / 1M tokens`),
1435
+ },
1436
+ },
1437
+ /**/
1438
+ /**/
1439
+ {
1440
+ modelVariant: 'CHAT',
1441
+ modelTitle: 'gpt-4.1-nano',
1442
+ modelName: 'gpt-4.1-nano',
1443
+ modelDescription: "Fastest, most cost-efficient version of GPT-4.1 with 128K context window. Optimized for high-throughput applications requiring good quality at minimal cost. Features 5x faster inference than GPT-4.1 while maintaining adequate performance for most general-purpose tasks.",
1444
+ pricing: {
1445
+ prompt: pricing(`$0.20 / 1M tokens`),
1446
+ output: pricing(`$0.80 / 1M tokens`),
1447
+ },
1448
+ },
1449
+ /**/
1450
+ /**/
1451
+ {
1452
+ modelVariant: 'CHAT',
1453
+ modelTitle: 'o3',
1454
+ modelName: 'o3',
1455
+ modelDescription: "Advanced reasoning model with 128K context window specializing in complex logical, mathematical, and analytical tasks. Successor to o1 with enhanced step-by-step problem-solving capabilities and superior performance on STEM-focused problems. Ideal for professional applications requiring deep analytical thinking and precise reasoning.",
1456
+ pricing: {
1457
+ prompt: pricing(`$15.00 / 1M tokens`),
1458
+ output: pricing(`$60.00 / 1M tokens`),
1459
+ },
1460
+ },
1461
+ /**/
1462
+ /**/
1463
+ {
1464
+ modelVariant: 'CHAT',
1465
+ modelTitle: 'o3-pro',
1466
+ modelName: 'o3-pro',
1467
+ modelDescription: "Enhanced version of o3 with more compute allocated for better responses on the most challenging problems. Features extended reasoning time and improved accuracy on complex analytical tasks. Designed for applications where maximum reasoning quality is more important than response speed.",
1468
+ pricing: {
1469
+ prompt: pricing(`$30.00 / 1M tokens`),
1470
+ output: pricing(`$120.00 / 1M tokens`),
1471
+ },
1472
+ },
1473
+ /**/
1474
+ /**/
1475
+ {
1476
+ modelVariant: 'CHAT',
1477
+ modelTitle: 'o4-mini',
1478
+ modelName: 'o4-mini',
1479
+ modelDescription: "Fast, cost-efficient reasoning model with 128K context window. Successor to o1-mini with improved analytical capabilities while maintaining speed advantages. Features enhanced mathematical reasoning and logical problem-solving at significantly lower cost than full reasoning models.",
1480
+ pricing: {
1481
+ prompt: pricing(`$4.00 / 1M tokens`),
1482
+ output: pricing(`$16.00 / 1M tokens`),
1483
+ },
1484
+ },
1485
+ /**/
1486
+ /**/
1487
+ {
1488
+ modelVariant: 'CHAT',
1489
+ modelTitle: 'o3-deep-research',
1490
+ modelName: 'o3-deep-research',
1491
+ modelDescription: "Most powerful deep research model with 128K context window. Specialized for comprehensive research tasks, literature analysis, and complex information synthesis. Features advanced citation capabilities and enhanced factual accuracy for academic and professional research applications.",
1492
+ pricing: {
1493
+ prompt: pricing(`$25.00 / 1M tokens`),
1494
+ output: pricing(`$100.00 / 1M tokens`),
1495
+ },
1496
+ },
1497
+ /**/
1498
+ /**/
1499
+ {
1500
+ modelVariant: 'CHAT',
1501
+ modelTitle: 'o4-mini-deep-research',
1502
+ modelName: 'o4-mini-deep-research',
1503
+ modelDescription: "Faster, more affordable deep research model with 128K context window. Balances research capabilities with cost efficiency, offering good performance on literature review, fact-checking, and information synthesis tasks at a more accessible price point.",
1504
+ pricing: {
1505
+ prompt: pricing(`$12.00 / 1M tokens`),
1506
+ output: pricing(`$48.00 / 1M tokens`),
1507
+ },
1508
+ },
1509
+ /**/
1301
1510
  /*/
1302
1511
  {
1303
1512
  modelTitle: 'dall-e-3',
@@ -1827,7 +2036,18 @@
1827
2036
  const openAiOptions = { ...this.options };
1828
2037
  delete openAiOptions.isVerbose;
1829
2038
  delete openAiOptions.userId;
1830
- this.client = new OpenAI__default["default"](openAiOptions);
2039
+ // Enhanced configuration for better ECONNRESET handling
2040
+ const enhancedOptions = {
2041
+ ...openAiOptions,
2042
+ timeout: API_REQUEST_TIMEOUT,
2043
+ maxRetries: CONNECTION_RETRIES_LIMIT,
2044
+ defaultHeaders: {
2045
+ Connection: 'keep-alive',
2046
+ 'Keep-Alive': 'timeout=30, max=100',
2047
+ ...openAiOptions.defaultHeaders,
2048
+ },
2049
+ };
2050
+ this.client = new OpenAI__default["default"](enhancedOptions);
1831
2051
  }
1832
2052
  return this.client;
1833
2053
  }
@@ -1880,7 +2100,6 @@
1880
2100
  const modelSettings = {
1881
2101
  model: modelName,
1882
2102
  max_tokens: modelRequirements.maxTokens,
1883
- // <- TODO: [🌾] Make some global max cap for maxTokens
1884
2103
  temperature: modelRequirements.temperature,
1885
2104
  // <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
1886
2105
  // <- Note: [🧆]
@@ -1916,7 +2135,7 @@
1916
2135
  console.info(colors__default["default"].bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
1917
2136
  }
1918
2137
  const rawResponse = await this.limiter
1919
- .schedule(() => client.chat.completions.create(rawRequest))
2138
+ .schedule(() => this.makeRequestWithRetry(() => client.chat.completions.create(rawRequest)))
1920
2139
  .catch((error) => {
1921
2140
  assertsError(error);
1922
2141
  if (this.options.isVerbose) {
@@ -1976,8 +2195,7 @@
1976
2195
  const modelName = modelRequirements.modelName || this.getDefaultCompletionModel().modelName;
1977
2196
  const modelSettings = {
1978
2197
  model: modelName,
1979
- max_tokens: modelRequirements.maxTokens || 2000,
1980
- // <- TODO: [🌾] Make some global max cap for maxTokens
2198
+ max_tokens: modelRequirements.maxTokens,
1981
2199
  temperature: modelRequirements.temperature,
1982
2200
  // <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
1983
2201
  // <- Note: [🧆]
@@ -1993,7 +2211,7 @@
1993
2211
  console.info(colors__default["default"].bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
1994
2212
  }
1995
2213
  const rawResponse = await this.limiter
1996
- .schedule(() => client.completions.create(rawRequest))
2214
+ .schedule(() => this.makeRequestWithRetry(() => client.completions.create(rawRequest)))
1997
2215
  .catch((error) => {
1998
2216
  assertsError(error);
1999
2217
  if (this.options.isVerbose) {
@@ -2057,7 +2275,7 @@
2057
2275
  console.info(colors__default["default"].bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
2058
2276
  }
2059
2277
  const rawResponse = await this.limiter
2060
- .schedule(() => client.embeddings.create(rawRequest))
2278
+ .schedule(() => this.makeRequestWithRetry(() => client.embeddings.create(rawRequest)))
2061
2279
  .catch((error) => {
2062
2280
  assertsError(error);
2063
2281
  if (this.options.isVerbose) {
@@ -2115,6 +2333,76 @@
2115
2333
  }
2116
2334
  return model;
2117
2335
  }
2336
+ // <- Note: [🤖] getDefaultXxxModel
2337
+ /**
2338
+ * Makes a request with retry logic for network errors like ECONNRESET
2339
+ */
2340
+ async makeRequestWithRetry(requestFn) {
2341
+ let lastError;
2342
+ for (let attempt = 1; attempt <= CONNECTION_RETRIES_LIMIT; attempt++) {
2343
+ try {
2344
+ return await requestFn();
2345
+ }
2346
+ catch (error) {
2347
+ assertsError(error);
2348
+ lastError = error;
2349
+ // Check if this is a retryable network error
2350
+ const isRetryableError = this.isRetryableNetworkError(error);
2351
+ if (!isRetryableError || attempt === CONNECTION_RETRIES_LIMIT) {
2352
+ if (this.options.isVerbose) {
2353
+ console.info(colors__default["default"].bgRed('Final error after retries'), `Attempt ${attempt}/${CONNECTION_RETRIES_LIMIT}:`, error);
2354
+ }
2355
+ throw error;
2356
+ }
2357
+ // Calculate exponential backoff delay
2358
+ const baseDelay = 1000; // 1 second
2359
+ const backoffDelay = baseDelay * Math.pow(2, attempt - 1);
2360
+ const jitterDelay = Math.random() * 500; // Add some randomness
2361
+ const totalDelay = backoffDelay + jitterDelay;
2362
+ if (this.options.isVerbose) {
2363
+ console.info(colors__default["default"].bgYellow('Retrying request'), `Attempt ${attempt}/${CONNECTION_RETRIES_LIMIT}, waiting ${Math.round(totalDelay)}ms:`, error.message);
2364
+ }
2365
+ // Wait before retrying
2366
+ await new Promise((resolve) => setTimeout(resolve, totalDelay));
2367
+ }
2368
+ }
2369
+ throw lastError;
2370
+ }
2371
+ /**
2372
+ * Determines if an error is retryable (network-related errors)
2373
+ */
2374
+ isRetryableNetworkError(error) {
2375
+ const errorMessage = error.message.toLowerCase();
2376
+ const errorCode = error.code;
2377
+ // Network connection errors that should be retried
2378
+ const retryableErrors = [
2379
+ 'econnreset',
2380
+ 'enotfound',
2381
+ 'econnrefused',
2382
+ 'etimedout',
2383
+ 'socket hang up',
2384
+ 'network error',
2385
+ 'fetch failed',
2386
+ 'connection reset',
2387
+ 'connection refused',
2388
+ 'timeout',
2389
+ ];
2390
+ // Check error message
2391
+ if (retryableErrors.some((retryableError) => errorMessage.includes(retryableError))) {
2392
+ return true;
2393
+ }
2394
+ // Check error code
2395
+ if (errorCode && retryableErrors.includes(errorCode.toLowerCase())) {
2396
+ return true;
2397
+ }
2398
+ // Check for specific HTTP status codes that are retryable
2399
+ const errorWithStatus = error;
2400
+ const httpStatus = errorWithStatus.status || errorWithStatus.statusCode;
2401
+ if (httpStatus && [429, 500, 502, 503, 504].includes(httpStatus)) {
2402
+ return true;
2403
+ }
2404
+ return false;
2405
+ }
2118
2406
  }
2119
2407
  /**
2120
2408
  * TODO: [🛄] Some way how to re-wrap the errors from `OpenAiCompatibleExecutionTools`
@@ -2144,6 +2432,9 @@
2144
2432
  get description() {
2145
2433
  return 'Use all models provided by OpenAI';
2146
2434
  }
2435
+ get profile() {
2436
+ return LLM_PROVIDER_PROFILES.OPENAI;
2437
+ }
2147
2438
  /*
2148
2439
  Note: Commenting this out to avoid circular dependency
2149
2440
  /**
@@ -2168,7 +2459,7 @@
2168
2459
  * Default model for chat variant.
2169
2460
  */
2170
2461
  getDefaultChatModel() {
2171
- return this.getDefaultModel('gpt-4-turbo');
2462
+ return this.getDefaultModel('gpt-5');
2172
2463
  }
2173
2464
  /**
2174
2465
  * Default model for completion variant.
@@ -2236,8 +2527,6 @@
2236
2527
  const modelName = modelRequirements.modelName || this.getDefaultChatModel().modelName;
2237
2528
  const modelSettings = {
2238
2529
  model: modelName,
2239
- max_tokens: modelRequirements.maxTokens,
2240
- // <- TODO: [🌾] Make some global max cap for maxTokens
2241
2530
 
2242
2531
  temperature: modelRequirements.temperature,
2243
2532
 
@@ -2742,6 +3031,9 @@
2742
3031
  get description() {
2743
3032
  return `Models from Promptbook remote server ${this.options.remoteServerUrl}`;
2744
3033
  }
3034
+ get profile() {
3035
+ return LLM_PROVIDER_PROFILES.REMOTE;
3036
+ }
2745
3037
  /**
2746
3038
  * Check the configuration of all execution tools
2747
3039
  */