@promptbook/ollama 0.100.0-9 → 0.100.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (129) hide show
  1. package/README.md +7 -14
  2. package/esm/index.es.js +253 -10
  3. package/esm/index.es.js.map +1 -1
  4. package/esm/typings/src/_packages/color.index.d.ts +50 -0
  5. package/esm/typings/src/_packages/components.index.d.ts +36 -0
  6. package/esm/typings/src/_packages/core.index.d.ts +30 -0
  7. package/esm/typings/src/_packages/types.index.d.ts +38 -0
  8. package/esm/typings/src/book-2.0/agent-source/parseAgentSource.d.ts +30 -0
  9. package/esm/typings/src/book-2.0/agent-source/parseAgentSource.test.d.ts +1 -0
  10. package/esm/typings/src/book-2.0/agent-source/string_book.d.ts +26 -0
  11. package/esm/typings/src/book-2.0/commitments/ACTION/ACTION.d.ts +38 -0
  12. package/esm/typings/src/book-2.0/commitments/FORMAT/FORMAT.d.ts +39 -0
  13. package/esm/typings/src/book-2.0/commitments/KNOWLEDGE/KNOWLEDGE.d.ts +45 -0
  14. package/esm/typings/src/book-2.0/commitments/META_IMAGE/META_IMAGE.d.ts +44 -0
  15. package/esm/typings/src/book-2.0/commitments/META_LINK/META_LINK.d.ts +56 -0
  16. package/esm/typings/src/book-2.0/commitments/MODEL/MODEL.d.ts +39 -0
  17. package/esm/typings/src/book-2.0/commitments/NOTE/NOTE.d.ts +49 -0
  18. package/esm/typings/src/book-2.0/commitments/PERSONA/PERSONA.d.ts +46 -0
  19. package/esm/typings/src/book-2.0/commitments/RULE/RULE.d.ts +44 -0
  20. package/esm/typings/src/book-2.0/commitments/SAMPLE/SAMPLE.d.ts +44 -0
  21. package/esm/typings/src/book-2.0/commitments/STYLE/STYLE.d.ts +38 -0
  22. package/esm/typings/src/book-2.0/commitments/_base/BaseCommitmentDefinition.d.ts +52 -0
  23. package/esm/typings/src/book-2.0/commitments/_base/BookCommitment.d.ts +5 -0
  24. package/esm/typings/src/book-2.0/commitments/_base/CommitmentDefinition.d.ts +48 -0
  25. package/esm/typings/src/book-2.0/commitments/_base/NotYetImplementedCommitmentDefinition.d.ts +22 -0
  26. package/esm/typings/src/book-2.0/commitments/_base/createEmptyAgentModelRequirements.d.ts +19 -0
  27. package/esm/typings/src/book-2.0/commitments/_misc/AgentModelRequirements.d.ts +37 -0
  28. package/esm/typings/src/book-2.0/commitments/_misc/AgentSourceParseResult.d.ts +18 -0
  29. package/esm/typings/src/book-2.0/commitments/_misc/ParsedCommitment.d.ts +22 -0
  30. package/esm/typings/src/book-2.0/commitments/_misc/createAgentModelRequirements.d.ts +62 -0
  31. package/esm/typings/src/book-2.0/commitments/_misc/createAgentModelRequirementsWithCommitments.d.ts +36 -0
  32. package/esm/typings/src/book-2.0/commitments/_misc/createCommitmentRegex.d.ts +20 -0
  33. package/esm/typings/src/book-2.0/commitments/_misc/parseAgentSourceWithCommitments.d.ts +24 -0
  34. package/esm/typings/src/book-2.0/commitments/_misc/removeCommentsFromSystemMessage.d.ts +11 -0
  35. package/esm/typings/src/book-2.0/commitments/index.d.ts +56 -0
  36. package/esm/typings/src/book-2.0/utils/profileImageUtils.d.ts +39 -0
  37. package/esm/typings/src/book-components/AvatarProfile/AvatarChip/AvatarChip.d.ts +35 -0
  38. package/esm/typings/src/book-components/AvatarProfile/AvatarChip/AvatarChipFromSource.d.ts +21 -0
  39. package/esm/typings/src/book-components/AvatarProfile/AvatarChip/index.d.ts +2 -0
  40. package/esm/typings/src/book-components/AvatarProfile/AvatarProfile/AvatarProfile.d.ts +26 -0
  41. package/esm/typings/src/book-components/AvatarProfile/AvatarProfile/AvatarProfileFromSource.d.ts +19 -0
  42. package/esm/typings/src/book-components/BookEditor/BookEditor.d.ts +35 -0
  43. package/esm/typings/src/book-components/BookEditor/BookEditorInner.d.ts +15 -0
  44. package/esm/typings/src/book-components/BookEditor/config.d.ts +10 -0
  45. package/esm/typings/src/book-components/BookEditor/injectCssModuleIntoShadowRoot.d.ts +11 -0
  46. package/esm/typings/src/book-components/Chat/Chat/Chat.d.ts +20 -0
  47. package/esm/typings/src/book-components/Chat/Chat/ChatProps.d.ts +110 -0
  48. package/esm/typings/src/book-components/Chat/LlmChat/LlmChat.d.ts +14 -0
  49. package/esm/typings/src/book-components/Chat/LlmChat/LlmChat.test.d.ts +1 -0
  50. package/esm/typings/src/book-components/Chat/LlmChat/LlmChatProps.d.ts +24 -0
  51. package/esm/typings/src/book-components/Chat/types/ChatMessage.d.ts +16 -0
  52. package/esm/typings/src/book-components/Chat/types/ChatParticipant.d.ts +32 -0
  53. package/esm/typings/src/book-components/Chat/utils/ChatPersistence.d.ts +25 -0
  54. package/esm/typings/src/book-components/Chat/utils/ExportFormat.d.ts +4 -0
  55. package/esm/typings/src/book-components/Chat/utils/addUtmParamsToUrl.d.ts +7 -0
  56. package/esm/typings/src/book-components/Chat/utils/createShortLinkForChat.d.ts +7 -0
  57. package/esm/typings/src/book-components/Chat/utils/downloadFile.d.ts +6 -0
  58. package/esm/typings/src/book-components/Chat/utils/exportChatHistory.d.ts +9 -0
  59. package/esm/typings/src/book-components/Chat/utils/generatePdfContent.d.ts +8 -0
  60. package/esm/typings/src/book-components/Chat/utils/generateQrDataUrl.d.ts +7 -0
  61. package/esm/typings/src/book-components/Chat/utils/getPromptbookBranding.d.ts +6 -0
  62. package/esm/typings/src/book-components/Chat/utils/messagesToHtml.d.ts +8 -0
  63. package/esm/typings/src/book-components/Chat/utils/messagesToJson.d.ts +7 -0
  64. package/esm/typings/src/book-components/Chat/utils/messagesToMarkdown.d.ts +8 -0
  65. package/esm/typings/src/book-components/Chat/utils/messagesToText.d.ts +8 -0
  66. package/esm/typings/src/book-components/_common/react-utils/classNames.d.ts +7 -0
  67. package/esm/typings/src/book-components/_common/react-utils/collectCssTextsForClass.d.ts +7 -0
  68. package/esm/typings/src/book-components/_common/react-utils/escapeHtml.d.ts +6 -0
  69. package/esm/typings/src/book-components/_common/react-utils/escapeRegex.d.ts +6 -0
  70. package/esm/typings/src/config.d.ts +19 -0
  71. package/esm/typings/src/execution/AvailableModel.d.ts +4 -0
  72. package/esm/typings/src/execution/ExecutionTask.d.ts +27 -1
  73. package/esm/typings/src/execution/LlmExecutionTools.d.ts +8 -0
  74. package/esm/typings/src/execution/createPipelineExecutor/40-executeAttempts.d.ts +6 -1
  75. package/esm/typings/src/llm-providers/_common/filterModels.d.ts +0 -3
  76. package/esm/typings/src/llm-providers/_common/profiles/llmProviderProfiles.d.ts +81 -0
  77. package/esm/typings/src/llm-providers/_common/profiles/test/llmProviderProfiles.test.d.ts +1 -0
  78. package/esm/typings/src/llm-providers/_multiple/MultipleLlmExecutionTools.d.ts +5 -0
  79. package/esm/typings/src/llm-providers/anthropic-claude/AnthropicClaudeExecutionTools.d.ts +5 -5
  80. package/esm/typings/src/llm-providers/anthropic-claude/anthropic-claude-models.d.ts +1 -1
  81. package/esm/typings/src/llm-providers/deepseek/deepseek-models.d.ts +1 -1
  82. package/esm/typings/src/llm-providers/google/google-models.d.ts +1 -1
  83. package/esm/typings/src/llm-providers/mocked/MockedEchoLlmExecutionTools.d.ts +5 -0
  84. package/esm/typings/src/llm-providers/ollama/ollama-models.d.ts +1 -1
  85. package/esm/typings/src/llm-providers/openai/OpenAiCompatibleExecutionTools.d.ts +8 -0
  86. package/esm/typings/src/llm-providers/openai/OpenAiExecutionTools.d.ts +5 -0
  87. package/esm/typings/src/llm-providers/openai/openai-models.d.ts +1 -1
  88. package/esm/typings/src/llm-providers/remote/RemoteLlmExecutionTools.d.ts +5 -0
  89. package/esm/typings/src/pipeline/book-notation.d.ts +2 -1
  90. package/esm/typings/src/playground/permanent/error-handling-playground.d.ts +5 -0
  91. package/esm/typings/src/types/ModelRequirements.d.ts +0 -2
  92. package/esm/typings/src/types/typeAliases.d.ts +6 -0
  93. package/esm/typings/src/utils/color/$randomColor.d.ts +11 -0
  94. package/esm/typings/src/utils/color/Color.d.ts +180 -0
  95. package/esm/typings/src/utils/color/css-colors.d.ts +159 -0
  96. package/esm/typings/src/utils/color/internal-utils/checkChannelValue.d.ts +14 -0
  97. package/esm/typings/src/utils/color/internal-utils/hslToRgb.d.ts +17 -0
  98. package/esm/typings/src/utils/color/internal-utils/rgbToHsl.d.ts +17 -0
  99. package/esm/typings/src/utils/color/operators/ColorTransformer.d.ts +5 -0
  100. package/esm/typings/src/utils/color/operators/darken.d.ts +9 -0
  101. package/esm/typings/src/utils/color/operators/furthest.d.ts +16 -0
  102. package/esm/typings/src/utils/color/operators/grayscale.d.ts +9 -0
  103. package/esm/typings/src/utils/color/operators/lighten.d.ts +12 -0
  104. package/esm/typings/src/utils/color/operators/mixWithColor.d.ts +11 -0
  105. package/esm/typings/src/utils/color/operators/nearest.d.ts +10 -0
  106. package/esm/typings/src/utils/color/operators/negative.d.ts +7 -0
  107. package/esm/typings/src/utils/color/operators/negativeLightness.d.ts +7 -0
  108. package/esm/typings/src/utils/color/operators/withAlpha.d.ts +9 -0
  109. package/esm/typings/src/utils/color/utils/areColorsEqual.d.ts +14 -0
  110. package/esm/typings/src/utils/color/utils/colorDistance.d.ts +21 -0
  111. package/esm/typings/src/utils/color/utils/colorHue.d.ts +11 -0
  112. package/esm/typings/src/utils/color/utils/colorHueDistance.d.ts +11 -0
  113. package/esm/typings/src/utils/color/utils/colorHueDistance.test.d.ts +1 -0
  114. package/esm/typings/src/utils/color/utils/colorLuminance.d.ts +9 -0
  115. package/esm/typings/src/utils/color/utils/colorSatulightion.d.ts +7 -0
  116. package/esm/typings/src/utils/color/utils/colorSaturation.d.ts +9 -0
  117. package/esm/typings/src/utils/color/utils/colorToDataUrl.d.ts +10 -0
  118. package/esm/typings/src/utils/color/utils/mixColors.d.ts +11 -0
  119. package/esm/typings/src/utils/organization/preserve.d.ts +21 -0
  120. package/esm/typings/src/utils/take/classes/TakeChain.d.ts +11 -0
  121. package/esm/typings/src/utils/take/interfaces/ITakeChain.d.ts +12 -0
  122. package/esm/typings/src/utils/take/interfaces/Takeable.d.ts +7 -0
  123. package/esm/typings/src/utils/take/take.d.ts +12 -0
  124. package/esm/typings/src/utils/take/take.test.d.ts +1 -0
  125. package/esm/typings/src/version.d.ts +1 -1
  126. package/package.json +2 -2
  127. package/umd/index.umd.js +253 -10
  128. package/umd/index.umd.js.map +1 -1
  129. package/esm/typings/src/scripting/javascript/utils/preserve.d.ts +0 -14
@@ -0,0 +1,11 @@
1
+ import type { ITakeChain } from '../interfaces/ITakeChain';
2
+ import type { Takeable } from '../interfaces/Takeable';
3
+ /**
4
+ * @private util of `@promptbook/color`
5
+ * @de
6
+ */
7
+ export declare class TakeChain<TValue extends Takeable> implements ITakeChain<TValue> {
8
+ value: TValue;
9
+ constructor(value: TValue);
10
+ then<TResultValue extends Takeable>(callback: (oldValue: TValue) => TResultValue): TResultValue & ITakeChain<TResultValue>;
11
+ }
@@ -0,0 +1,12 @@
1
+ import type { Takeable } from './Takeable';
2
+ /**
3
+ * Represents any value with take chain functionality
4
+ *
5
+ * @private util of `@promptbook/color`
6
+ * @deprecated [🤡] Use some better functional library instead of `TakeChain`
7
+ */
8
+ export type WithTake<TValue extends Takeable> = TValue & ITakeChain<TValue>;
9
+ export interface ITakeChain<TValue extends Takeable> {
10
+ readonly value: TValue;
11
+ then<TResultValue extends Takeable>(callback: (value: TValue) => TResultValue): WithTake<TResultValue>;
12
+ }
@@ -0,0 +1,7 @@
1
+ /**
2
+ * Represents a type that can be chained in take pipe
3
+ *
4
+ * @private util of `@promptbook/color`
5
+ * @deprecated [🤡] Use some better functional library instead of `TakeChain`
6
+ */
7
+ export type Takeable = Exclude<object, null>;
@@ -0,0 +1,12 @@
1
+ import type { WithTake } from './interfaces/ITakeChain';
2
+ import type { Takeable } from './interfaces/Takeable';
3
+ /**
4
+ * A function that takes an initial value and returns a proxy object with chainable methods.
5
+ *
6
+ * @param {*} initialValue - The initial value.
7
+ * @returns {Proxy<WithTake<TValue>>} - A proxy object with a `take` method.
8
+ *
9
+ * @private util of `@promptbook/color`
10
+ * @deprecated [🤡] Use some better functional library instead of `TakeChain`
11
+ */
12
+ export declare function take<TValue extends Takeable>(initialValue: TValue): WithTake<TValue>;
@@ -0,0 +1 @@
1
+ export {};
@@ -15,7 +15,7 @@ export declare const BOOK_LANGUAGE_VERSION: string_semantic_version;
15
15
  export declare const PROMPTBOOK_ENGINE_VERSION: string_promptbook_version;
16
16
  /**
17
17
  * Represents the version string of the Promptbook engine.
18
- * It follows semantic versioning (e.g., `0.100.0-8`).
18
+ * It follows semantic versioning (e.g., `0.100.0-65`).
19
19
  *
20
20
  * @generated
21
21
  */
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@promptbook/ollama",
3
- "version": "0.100.0-9",
3
+ "version": "0.100.0",
4
4
  "description": "Promptbook: Run AI apps in plain human language across multiple models and platforms",
5
5
  "private": false,
6
6
  "sideEffects": false,
@@ -94,7 +94,7 @@
94
94
  "module": "./esm/index.es.js",
95
95
  "typings": "./esm/typings/src/_packages/ollama.index.d.ts",
96
96
  "peerDependencies": {
97
- "@promptbook/core": "0.100.0-9"
97
+ "@promptbook/core": "0.100.0"
98
98
  },
99
99
  "dependencies": {
100
100
  "bottleneck": "^2.19.5",
package/umd/index.umd.js CHANGED
@@ -25,7 +25,7 @@
25
25
  * @generated
26
26
  * @see https://github.com/webgptorg/promptbook
27
27
  */
28
- const PROMPTBOOK_ENGINE_VERSION = '0.100.0-9';
28
+ const PROMPTBOOK_ENGINE_VERSION = '0.100.0';
29
29
  /**
30
30
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
31
31
  * Note: [💞] Ignore a discrepancy between file name and entity name
@@ -229,6 +229,13 @@
229
229
  * @public exported from `@promptbook/utils`
230
230
  */
231
231
  const SMALL_NUMBER = 0.001;
232
+ // <- TODO: [⏳] Standardize timeouts, Make DEFAULT_TIMEOUT_MS as global constant
233
+ /**
234
+ * How many times to retry the connections
235
+ *
236
+ * @private within the repository - too low-level in comparison with other `MAX_...`
237
+ */
238
+ const CONNECTION_RETRIES_LIMIT = 5;
232
239
  // <- TODO: [🧜‍♂️]
233
240
  /**
234
241
  * Default settings for parsing and generating CSV files in Promptbook.
@@ -249,6 +256,13 @@
249
256
  * @public exported from `@promptbook/core`
250
257
  */
251
258
  const DEFAULT_MAX_REQUESTS_PER_MINUTE = 60;
259
+ /**
260
+ * API request timeout in milliseconds
261
+ * Can be overridden via API_REQUEST_TIMEOUT environment variable
262
+ *
263
+ * @public exported from `@promptbook/core`
264
+ */
265
+ const API_REQUEST_TIMEOUT = parseInt(process.env.API_REQUEST_TIMEOUT || '90000');
252
266
  /**
253
267
  * Note: [💞] Ignore a discrepancy between file name and entity name
254
268
  * TODO: [🧠][🧜‍♂️] Maybe join remoteServerUrl and path into single value
@@ -1076,7 +1090,7 @@
1076
1090
  /**
1077
1091
  * List of available OpenAI models with pricing
1078
1092
  *
1079
- * Note: Done at 2025-05-06
1093
+ * Note: Synced with official API docs at 2025-08-20
1080
1094
  *
1081
1095
  * @see https://platform.openai.com/docs/models/
1082
1096
  * @see https://openai.com/api/pricing/
@@ -1085,6 +1099,138 @@
1085
1099
  const OPENAI_MODELS = exportJson({
1086
1100
  name: 'OPENAI_MODELS',
1087
1101
  value: [
1102
+ /**/
1103
+ {
1104
+ modelVariant: 'CHAT',
1105
+ modelTitle: 'gpt-5',
1106
+ modelName: 'gpt-5',
1107
+ modelDescription: "OpenAI's most advanced language model with unprecedented reasoning capabilities and 200K context window. Features revolutionary improvements in complex problem-solving, scientific reasoning, and creative tasks. Demonstrates human-level performance across diverse domains with enhanced safety measures and alignment. Represents the next generation of AI with superior understanding, nuanced responses, and advanced multimodal capabilities.",
1108
+ pricing: {
1109
+ prompt: pricing(`$1.25 / 1M tokens`),
1110
+ output: pricing(`$10.00 / 1M tokens`),
1111
+ },
1112
+ },
1113
+ /**/
1114
+ /**/
1115
+ {
1116
+ modelVariant: 'CHAT',
1117
+ modelTitle: 'gpt-5-mini',
1118
+ modelName: 'gpt-5-mini',
1119
+ modelDescription: "A faster, cost-efficient version of GPT-5 for well-defined tasks with 200K context window. Maintains core GPT-5 capabilities while offering 5x faster inference and significantly lower costs. Features enhanced instruction following and reduced latency for production applications requiring quick responses with high quality.",
1120
+ pricing: {
1121
+ prompt: pricing(`$0.25 / 1M tokens`),
1122
+ output: pricing(`$2.00 / 1M tokens`),
1123
+ },
1124
+ },
1125
+ /**/
1126
+ /**/
1127
+ {
1128
+ modelVariant: 'CHAT',
1129
+ modelTitle: 'gpt-5-nano',
1130
+ modelName: 'gpt-5-nano',
1131
+ modelDescription: "The fastest, most cost-efficient version of GPT-5 with 200K context window. Optimized for summarization, classification, and simple reasoning tasks. Features 10x faster inference than base GPT-5 while maintaining good quality for straightforward applications. Ideal for high-volume, cost-sensitive deployments.",
1132
+ pricing: {
1133
+ prompt: pricing(`$0.05 / 1M tokens`),
1134
+ output: pricing(`$0.40 / 1M tokens`),
1135
+ },
1136
+ },
1137
+ /**/
1138
+ /**/
1139
+ {
1140
+ modelVariant: 'CHAT',
1141
+ modelTitle: 'gpt-4.1',
1142
+ modelName: 'gpt-4.1',
1143
+ modelDescription: "Smartest non-reasoning model with 128K context window. Enhanced version of GPT-4 with improved instruction following, better factual accuracy, and reduced hallucinations. Features advanced function calling capabilities and superior performance on coding tasks. Ideal for applications requiring high intelligence without reasoning overhead.",
1144
+ pricing: {
1145
+ prompt: pricing(`$3.00 / 1M tokens`),
1146
+ output: pricing(`$12.00 / 1M tokens`),
1147
+ },
1148
+ },
1149
+ /**/
1150
+ /**/
1151
+ {
1152
+ modelVariant: 'CHAT',
1153
+ modelTitle: 'gpt-4.1-mini',
1154
+ modelName: 'gpt-4.1-mini',
1155
+ modelDescription: "Smaller, faster version of GPT-4.1 with 128K context window. Balances intelligence and efficiency with 3x faster inference than base GPT-4.1. Maintains strong capabilities across text generation, reasoning, and coding while offering better cost-performance ratio for most applications.",
1156
+ pricing: {
1157
+ prompt: pricing(`$0.80 / 1M tokens`),
1158
+ output: pricing(`$3.20 / 1M tokens`),
1159
+ },
1160
+ },
1161
+ /**/
1162
+ /**/
1163
+ {
1164
+ modelVariant: 'CHAT',
1165
+ modelTitle: 'gpt-4.1-nano',
1166
+ modelName: 'gpt-4.1-nano',
1167
+ modelDescription: "Fastest, most cost-efficient version of GPT-4.1 with 128K context window. Optimized for high-throughput applications requiring good quality at minimal cost. Features 5x faster inference than GPT-4.1 while maintaining adequate performance for most general-purpose tasks.",
1168
+ pricing: {
1169
+ prompt: pricing(`$0.20 / 1M tokens`),
1170
+ output: pricing(`$0.80 / 1M tokens`),
1171
+ },
1172
+ },
1173
+ /**/
1174
+ /**/
1175
+ {
1176
+ modelVariant: 'CHAT',
1177
+ modelTitle: 'o3',
1178
+ modelName: 'o3',
1179
+ modelDescription: "Advanced reasoning model with 128K context window specializing in complex logical, mathematical, and analytical tasks. Successor to o1 with enhanced step-by-step problem-solving capabilities and superior performance on STEM-focused problems. Ideal for professional applications requiring deep analytical thinking and precise reasoning.",
1180
+ pricing: {
1181
+ prompt: pricing(`$15.00 / 1M tokens`),
1182
+ output: pricing(`$60.00 / 1M tokens`),
1183
+ },
1184
+ },
1185
+ /**/
1186
+ /**/
1187
+ {
1188
+ modelVariant: 'CHAT',
1189
+ modelTitle: 'o3-pro',
1190
+ modelName: 'o3-pro',
1191
+ modelDescription: "Enhanced version of o3 with more compute allocated for better responses on the most challenging problems. Features extended reasoning time and improved accuracy on complex analytical tasks. Designed for applications where maximum reasoning quality is more important than response speed.",
1192
+ pricing: {
1193
+ prompt: pricing(`$30.00 / 1M tokens`),
1194
+ output: pricing(`$120.00 / 1M tokens`),
1195
+ },
1196
+ },
1197
+ /**/
1198
+ /**/
1199
+ {
1200
+ modelVariant: 'CHAT',
1201
+ modelTitle: 'o4-mini',
1202
+ modelName: 'o4-mini',
1203
+ modelDescription: "Fast, cost-efficient reasoning model with 128K context window. Successor to o1-mini with improved analytical capabilities while maintaining speed advantages. Features enhanced mathematical reasoning and logical problem-solving at significantly lower cost than full reasoning models.",
1204
+ pricing: {
1205
+ prompt: pricing(`$4.00 / 1M tokens`),
1206
+ output: pricing(`$16.00 / 1M tokens`),
1207
+ },
1208
+ },
1209
+ /**/
1210
+ /**/
1211
+ {
1212
+ modelVariant: 'CHAT',
1213
+ modelTitle: 'o3-deep-research',
1214
+ modelName: 'o3-deep-research',
1215
+ modelDescription: "Most powerful deep research model with 128K context window. Specialized for comprehensive research tasks, literature analysis, and complex information synthesis. Features advanced citation capabilities and enhanced factual accuracy for academic and professional research applications.",
1216
+ pricing: {
1217
+ prompt: pricing(`$25.00 / 1M tokens`),
1218
+ output: pricing(`$100.00 / 1M tokens`),
1219
+ },
1220
+ },
1221
+ /**/
1222
+ /**/
1223
+ {
1224
+ modelVariant: 'CHAT',
1225
+ modelTitle: 'o4-mini-deep-research',
1226
+ modelName: 'o4-mini-deep-research',
1227
+ modelDescription: "Faster, more affordable deep research model with 128K context window. Balances research capabilities with cost efficiency, offering good performance on literature review, fact-checking, and information synthesis tasks at a more accessible price point.",
1228
+ pricing: {
1229
+ prompt: pricing(`$12.00 / 1M tokens`),
1230
+ output: pricing(`$48.00 / 1M tokens`),
1231
+ },
1232
+ },
1233
+ /**/
1088
1234
  /*/
1089
1235
  {
1090
1236
  modelTitle: 'dall-e-3',
@@ -1791,7 +1937,18 @@
1791
1937
  const openAiOptions = { ...this.options };
1792
1938
  delete openAiOptions.isVerbose;
1793
1939
  delete openAiOptions.userId;
1794
- this.client = new OpenAI__default["default"](openAiOptions);
1940
+ // Enhanced configuration for better ECONNRESET handling
1941
+ const enhancedOptions = {
1942
+ ...openAiOptions,
1943
+ timeout: API_REQUEST_TIMEOUT,
1944
+ maxRetries: CONNECTION_RETRIES_LIMIT,
1945
+ defaultHeaders: {
1946
+ Connection: 'keep-alive',
1947
+ 'Keep-Alive': 'timeout=30, max=100',
1948
+ ...openAiOptions.defaultHeaders,
1949
+ },
1950
+ };
1951
+ this.client = new OpenAI__default["default"](enhancedOptions);
1795
1952
  }
1796
1953
  return this.client;
1797
1954
  }
@@ -1844,7 +2001,6 @@
1844
2001
  const modelSettings = {
1845
2002
  model: modelName,
1846
2003
  max_tokens: modelRequirements.maxTokens,
1847
- // <- TODO: [🌾] Make some global max cap for maxTokens
1848
2004
  temperature: modelRequirements.temperature,
1849
2005
  // <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
1850
2006
  // <- Note: [🧆]
@@ -1880,7 +2036,7 @@
1880
2036
  console.info(colors__default["default"].bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
1881
2037
  }
1882
2038
  const rawResponse = await this.limiter
1883
- .schedule(() => client.chat.completions.create(rawRequest))
2039
+ .schedule(() => this.makeRequestWithRetry(() => client.chat.completions.create(rawRequest)))
1884
2040
  .catch((error) => {
1885
2041
  assertsError(error);
1886
2042
  if (this.options.isVerbose) {
@@ -1940,8 +2096,7 @@
1940
2096
  const modelName = modelRequirements.modelName || this.getDefaultCompletionModel().modelName;
1941
2097
  const modelSettings = {
1942
2098
  model: modelName,
1943
- max_tokens: modelRequirements.maxTokens || 2000,
1944
- // <- TODO: [🌾] Make some global max cap for maxTokens
2099
+ max_tokens: modelRequirements.maxTokens,
1945
2100
  temperature: modelRequirements.temperature,
1946
2101
  // <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
1947
2102
  // <- Note: [🧆]
@@ -1957,7 +2112,7 @@
1957
2112
  console.info(colors__default["default"].bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
1958
2113
  }
1959
2114
  const rawResponse = await this.limiter
1960
- .schedule(() => client.completions.create(rawRequest))
2115
+ .schedule(() => this.makeRequestWithRetry(() => client.completions.create(rawRequest)))
1961
2116
  .catch((error) => {
1962
2117
  assertsError(error);
1963
2118
  if (this.options.isVerbose) {
@@ -2021,7 +2176,7 @@
2021
2176
  console.info(colors__default["default"].bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
2022
2177
  }
2023
2178
  const rawResponse = await this.limiter
2024
- .schedule(() => client.embeddings.create(rawRequest))
2179
+ .schedule(() => this.makeRequestWithRetry(() => client.embeddings.create(rawRequest)))
2025
2180
  .catch((error) => {
2026
2181
  assertsError(error);
2027
2182
  if (this.options.isVerbose) {
@@ -2079,6 +2234,76 @@
2079
2234
  }
2080
2235
  return model;
2081
2236
  }
2237
+ // <- Note: [🤖] getDefaultXxxModel
2238
+ /**
2239
+ * Makes a request with retry logic for network errors like ECONNRESET
2240
+ */
2241
+ async makeRequestWithRetry(requestFn) {
2242
+ let lastError;
2243
+ for (let attempt = 1; attempt <= CONNECTION_RETRIES_LIMIT; attempt++) {
2244
+ try {
2245
+ return await requestFn();
2246
+ }
2247
+ catch (error) {
2248
+ assertsError(error);
2249
+ lastError = error;
2250
+ // Check if this is a retryable network error
2251
+ const isRetryableError = this.isRetryableNetworkError(error);
2252
+ if (!isRetryableError || attempt === CONNECTION_RETRIES_LIMIT) {
2253
+ if (this.options.isVerbose) {
2254
+ console.info(colors__default["default"].bgRed('Final error after retries'), `Attempt ${attempt}/${CONNECTION_RETRIES_LIMIT}:`, error);
2255
+ }
2256
+ throw error;
2257
+ }
2258
+ // Calculate exponential backoff delay
2259
+ const baseDelay = 1000; // 1 second
2260
+ const backoffDelay = baseDelay * Math.pow(2, attempt - 1);
2261
+ const jitterDelay = Math.random() * 500; // Add some randomness
2262
+ const totalDelay = backoffDelay + jitterDelay;
2263
+ if (this.options.isVerbose) {
2264
+ console.info(colors__default["default"].bgYellow('Retrying request'), `Attempt ${attempt}/${CONNECTION_RETRIES_LIMIT}, waiting ${Math.round(totalDelay)}ms:`, error.message);
2265
+ }
2266
+ // Wait before retrying
2267
+ await new Promise((resolve) => setTimeout(resolve, totalDelay));
2268
+ }
2269
+ }
2270
+ throw lastError;
2271
+ }
2272
+ /**
2273
+ * Determines if an error is retryable (network-related errors)
2274
+ */
2275
+ isRetryableNetworkError(error) {
2276
+ const errorMessage = error.message.toLowerCase();
2277
+ const errorCode = error.code;
2278
+ // Network connection errors that should be retried
2279
+ const retryableErrors = [
2280
+ 'econnreset',
2281
+ 'enotfound',
2282
+ 'econnrefused',
2283
+ 'etimedout',
2284
+ 'socket hang up',
2285
+ 'network error',
2286
+ 'fetch failed',
2287
+ 'connection reset',
2288
+ 'connection refused',
2289
+ 'timeout',
2290
+ ];
2291
+ // Check error message
2292
+ if (retryableErrors.some((retryableError) => errorMessage.includes(retryableError))) {
2293
+ return true;
2294
+ }
2295
+ // Check error code
2296
+ if (errorCode && retryableErrors.includes(errorCode.toLowerCase())) {
2297
+ return true;
2298
+ }
2299
+ // Check for specific HTTP status codes that are retryable
2300
+ const errorWithStatus = error;
2301
+ const httpStatus = errorWithStatus.status || errorWithStatus.statusCode;
2302
+ if (httpStatus && [429, 500, 502, 503, 504].includes(httpStatus)) {
2303
+ return true;
2304
+ }
2305
+ return false;
2306
+ }
2082
2307
  }
2083
2308
  /**
2084
2309
  * TODO: [🛄] Some way how to re-wrap the errors from `OpenAiCompatibleExecutionTools`
@@ -2090,7 +2315,7 @@
2090
2315
  /**
2091
2316
  * List of available models in Ollama library
2092
2317
  *
2093
- * Note: Done at 2025-05-19
2318
+ * Note: Synced with official API docs at 2025-08-20
2094
2319
  *
2095
2320
  * @see https://ollama.com/library
2096
2321
  * @public exported from `@promptbook/ollama`
@@ -2098,6 +2323,24 @@
2098
2323
  const OLLAMA_MODELS = exportJson({
2099
2324
  name: 'OLLAMA_MODELS',
2100
2325
  value: [
2326
+ {
2327
+ modelVariant: 'CHAT',
2328
+ modelTitle: 'llama3.3',
2329
+ modelName: 'llama3.3',
2330
+ modelDescription: 'Meta Llama 3.3 (70B parameters) with 128K context window. Latest generation foundation model with significantly enhanced reasoning, instruction following, and multilingual capabilities. Features improved performance on complex tasks and better factual accuracy compared to Llama 3.1.',
2331
+ },
2332
+ {
2333
+ modelVariant: 'CHAT',
2334
+ modelTitle: 'llama3.2',
2335
+ modelName: 'llama3.2',
2336
+ modelDescription: 'Meta Llama 3.2 (1B-90B parameters) with 128K context window. Enhanced model with improved reasoning capabilities, better instruction following, and multimodal support in larger variants. Features significant performance improvements over Llama 3.1 across diverse tasks.',
2337
+ },
2338
+ {
2339
+ modelVariant: 'CHAT',
2340
+ modelTitle: 'llama3.1',
2341
+ modelName: 'llama3.1',
2342
+ modelDescription: 'Meta Llama 3.1 (8B-405B parameters) with 128K context window. Advanced foundation model with enhanced reasoning, improved multilingual capabilities, and better performance on complex tasks. Features significant improvements in code generation and mathematical reasoning.',
2343
+ },
2101
2344
  {
2102
2345
  modelVariant: 'CHAT',
2103
2346
  modelTitle: 'llama3',