@danya-ai/cli 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (184) hide show
  1. package/LICENSE +201 -0
  2. package/README.md +336 -0
  3. package/cli-acp.js +82 -0
  4. package/cli.js +105 -0
  5. package/dist/REPL-EYUOXCEC.js +42 -0
  6. package/dist/REPL-EYUOXCEC.js.map +7 -0
  7. package/dist/acp-S5WNCLMD.js +1372 -0
  8. package/dist/acp-S5WNCLMD.js.map +7 -0
  9. package/dist/agentsValidate-RQ2QDGNY.js +373 -0
  10. package/dist/agentsValidate-RQ2QDGNY.js.map +7 -0
  11. package/dist/ask-TX526UBD.js +129 -0
  12. package/dist/ask-TX526UBD.js.map +7 -0
  13. package/dist/autoUpdater-63RAZ24N.js +17 -0
  14. package/dist/autoUpdater-63RAZ24N.js.map +7 -0
  15. package/dist/chunk-2VQWLLDU.js +16 -0
  16. package/dist/chunk-2VQWLLDU.js.map +7 -0
  17. package/dist/chunk-4CLHMO4I.js +656 -0
  18. package/dist/chunk-4CLHMO4I.js.map +7 -0
  19. package/dist/chunk-4ZNNWJZU.js +5696 -0
  20. package/dist/chunk-4ZNNWJZU.js.map +7 -0
  21. package/dist/chunk-66EZC7Y7.js +149 -0
  22. package/dist/chunk-66EZC7Y7.js.map +7 -0
  23. package/dist/chunk-6EPQRP3S.js +96 -0
  24. package/dist/chunk-6EPQRP3S.js.map +7 -0
  25. package/dist/chunk-77IRSDFR.js +195 -0
  26. package/dist/chunk-77IRSDFR.js.map +7 -0
  27. package/dist/chunk-7RZNLBEK.js +136 -0
  28. package/dist/chunk-7RZNLBEK.js.map +7 -0
  29. package/dist/chunk-BNBV2FXC.js +19 -0
  30. package/dist/chunk-BNBV2FXC.js.map +7 -0
  31. package/dist/chunk-CQCREBDO.js +248 -0
  32. package/dist/chunk-CQCREBDO.js.map +7 -0
  33. package/dist/chunk-D77XS6TB.js +74 -0
  34. package/dist/chunk-D77XS6TB.js.map +7 -0
  35. package/dist/chunk-DHYBJN3V.js +474 -0
  36. package/dist/chunk-DHYBJN3V.js.map +7 -0
  37. package/dist/chunk-DLSLSLTR.js +842 -0
  38. package/dist/chunk-DLSLSLTR.js.map +7 -0
  39. package/dist/chunk-ELAE6Z4H.js +514 -0
  40. package/dist/chunk-ELAE6Z4H.js.map +7 -0
  41. package/dist/chunk-ELZQD7ZR.js +531 -0
  42. package/dist/chunk-ELZQD7ZR.js.map +7 -0
  43. package/dist/chunk-F6DEGMX6.js +31269 -0
  44. package/dist/chunk-F6DEGMX6.js.map +7 -0
  45. package/dist/chunk-GDF2AON2.js +124 -0
  46. package/dist/chunk-GDF2AON2.js.map +7 -0
  47. package/dist/chunk-H7BGBV4P.js +498 -0
  48. package/dist/chunk-H7BGBV4P.js.map +7 -0
  49. package/dist/chunk-HIIHGKXP.js +24 -0
  50. package/dist/chunk-HIIHGKXP.js.map +7 -0
  51. package/dist/chunk-HJCCXED7.js +17 -0
  52. package/dist/chunk-HJCCXED7.js.map +7 -0
  53. package/dist/chunk-IQ6VZB2Y.js +139 -0
  54. package/dist/chunk-IQ6VZB2Y.js.map +7 -0
  55. package/dist/chunk-J4D7AELD.js +518 -0
  56. package/dist/chunk-J4D7AELD.js.map +7 -0
  57. package/dist/chunk-JVGG2YQR.js +23 -0
  58. package/dist/chunk-JVGG2YQR.js.map +7 -0
  59. package/dist/chunk-LGEK2NV7.js +939 -0
  60. package/dist/chunk-LGEK2NV7.js.map +7 -0
  61. package/dist/chunk-LWXT5RGE.js +95 -0
  62. package/dist/chunk-LWXT5RGE.js.map +7 -0
  63. package/dist/chunk-M3TKNAUR.js +35 -0
  64. package/dist/chunk-M3TKNAUR.js.map +7 -0
  65. package/dist/chunk-MRFO7QO5.js +170 -0
  66. package/dist/chunk-MRFO7QO5.js.map +7 -0
  67. package/dist/chunk-MVN3DHQF.js +95 -0
  68. package/dist/chunk-MVN3DHQF.js.map +7 -0
  69. package/dist/chunk-O25PXGOC.js +772 -0
  70. package/dist/chunk-O25PXGOC.js.map +7 -0
  71. package/dist/chunk-OBGVKM3N.js +1618 -0
  72. package/dist/chunk-OBGVKM3N.js.map +7 -0
  73. package/dist/chunk-OV5HJXXQ.js +198 -0
  74. package/dist/chunk-OV5HJXXQ.js.map +7 -0
  75. package/dist/chunk-P5VWDMRD.js +249 -0
  76. package/dist/chunk-P5VWDMRD.js.map +7 -0
  77. package/dist/chunk-PDSAJX7G.js +49 -0
  78. package/dist/chunk-PDSAJX7G.js.map +7 -0
  79. package/dist/chunk-RHNEZOPO.js +739 -0
  80. package/dist/chunk-RHNEZOPO.js.map +7 -0
  81. package/dist/chunk-SQGAHZPM.js +3004 -0
  82. package/dist/chunk-SQGAHZPM.js.map +7 -0
  83. package/dist/chunk-U7Z4MXY4.js +21 -0
  84. package/dist/chunk-U7Z4MXY4.js.map +7 -0
  85. package/dist/chunk-UNCTVIS7.js +146 -0
  86. package/dist/chunk-UNCTVIS7.js.map +7 -0
  87. package/dist/chunk-VMEOI6MH.js +1103 -0
  88. package/dist/chunk-VMEOI6MH.js.map +7 -0
  89. package/dist/chunk-WAY3DKFO.js +47 -0
  90. package/dist/chunk-WAY3DKFO.js.map +7 -0
  91. package/dist/chunk-XEYEKVFT.js +24 -0
  92. package/dist/chunk-XEYEKVFT.js.map +7 -0
  93. package/dist/chunk-Y4BQ36T4.js +796 -0
  94. package/dist/chunk-Y4BQ36T4.js.map +7 -0
  95. package/dist/chunk-Y5LQPJWK.js +12 -0
  96. package/dist/chunk-Y5LQPJWK.js.map +7 -0
  97. package/dist/chunk-YIJWUNWF.js +1260 -0
  98. package/dist/chunk-YIJWUNWF.js.map +7 -0
  99. package/dist/chunk-YMIWYEZ7.js +34 -0
  100. package/dist/chunk-YMIWYEZ7.js.map +7 -0
  101. package/dist/cli-PQNZWJX4.js +3952 -0
  102. package/dist/cli-PQNZWJX4.js.map +7 -0
  103. package/dist/commands-HOBCZ3VQ.js +46 -0
  104. package/dist/commands-HOBCZ3VQ.js.map +7 -0
  105. package/dist/config-MLH7ZTFA.js +81 -0
  106. package/dist/config-MLH7ZTFA.js.map +7 -0
  107. package/dist/context-FZ6G4J63.js +30 -0
  108. package/dist/context-FZ6G4J63.js.map +7 -0
  109. package/dist/costTracker-5WKZXN5S.js +19 -0
  110. package/dist/costTracker-5WKZXN5S.js.map +7 -0
  111. package/dist/customCommands-EB4MMZSS.js +25 -0
  112. package/dist/customCommands-EB4MMZSS.js.map +7 -0
  113. package/dist/env-VMEIP4EW.js +28 -0
  114. package/dist/env-VMEIP4EW.js.map +7 -0
  115. package/dist/index.js +36 -0
  116. package/dist/index.js.map +7 -0
  117. package/dist/kodeAgentSessionId-WUT74FSH.js +16 -0
  118. package/dist/kodeAgentSessionId-WUT74FSH.js.map +7 -0
  119. package/dist/kodeAgentSessionLoad-KR4JSD6D.js +21 -0
  120. package/dist/kodeAgentSessionLoad-KR4JSD6D.js.map +7 -0
  121. package/dist/kodeAgentSessionResume-BCD6UV74.js +18 -0
  122. package/dist/kodeAgentSessionResume-BCD6UV74.js.map +7 -0
  123. package/dist/kodeAgentStreamJson-EDHHWNNX.js +15 -0
  124. package/dist/kodeAgentStreamJson-EDHHWNNX.js.map +7 -0
  125. package/dist/kodeAgentStreamJsonSession-G4RBNZRN.js +133 -0
  126. package/dist/kodeAgentStreamJsonSession-G4RBNZRN.js.map +7 -0
  127. package/dist/kodeAgentStructuredStdio-UA5P5UNU.js +11 -0
  128. package/dist/kodeAgentStructuredStdio-UA5P5UNU.js.map +7 -0
  129. package/dist/kodeHooks-EHM6GSIQ.js +37 -0
  130. package/dist/kodeHooks-EHM6GSIQ.js.map +7 -0
  131. package/dist/llm-SJXCV7DA.js +3138 -0
  132. package/dist/llm-SJXCV7DA.js.map +7 -0
  133. package/dist/llmLazy-2QYJVD6K.js +15 -0
  134. package/dist/llmLazy-2QYJVD6K.js.map +7 -0
  135. package/dist/loader-LJX77EFL.js +28 -0
  136. package/dist/loader-LJX77EFL.js.map +7 -0
  137. package/dist/mcp-DOROSLPN.js +49 -0
  138. package/dist/mcp-DOROSLPN.js.map +7 -0
  139. package/dist/mentionProcessor-5UZRHCGH.js +215 -0
  140. package/dist/mentionProcessor-5UZRHCGH.js.map +7 -0
  141. package/dist/messages-N5KBI53P.js +65 -0
  142. package/dist/messages-N5KBI53P.js.map +7 -0
  143. package/dist/model-HPLBR53R.js +30 -0
  144. package/dist/model-HPLBR53R.js.map +7 -0
  145. package/dist/openai-YP4OJYKF.js +29 -0
  146. package/dist/openai-YP4OJYKF.js.map +7 -0
  147. package/dist/outputStyles-NNALI5D7.js +28 -0
  148. package/dist/outputStyles-NNALI5D7.js.map +7 -0
  149. package/dist/package.json +4 -0
  150. package/dist/pluginRuntime-JYYI5BSQ.js +220 -0
  151. package/dist/pluginRuntime-JYYI5BSQ.js.map +7 -0
  152. package/dist/pluginValidation-JWUFPZUE.js +17 -0
  153. package/dist/pluginValidation-JWUFPZUE.js.map +7 -0
  154. package/dist/prompts-B2SS7CWI.js +50 -0
  155. package/dist/prompts-B2SS7CWI.js.map +7 -0
  156. package/dist/query-HIK457UU.js +50 -0
  157. package/dist/query-HIK457UU.js.map +7 -0
  158. package/dist/responsesStreaming-L2BSN37C.js +10 -0
  159. package/dist/responsesStreaming-L2BSN37C.js.map +7 -0
  160. package/dist/ripgrep-GCKI4UTL.js +17 -0
  161. package/dist/ripgrep-GCKI4UTL.js.map +7 -0
  162. package/dist/skillMarketplace-PCTUUX46.js +37 -0
  163. package/dist/skillMarketplace-PCTUUX46.js.map +7 -0
  164. package/dist/state-XJICGOUA.js +18 -0
  165. package/dist/state-XJICGOUA.js.map +7 -0
  166. package/dist/theme-DP7O4SGH.js +14 -0
  167. package/dist/theme-DP7O4SGH.js.map +7 -0
  168. package/dist/toolPermissionContext-DHAGUPEW.js +17 -0
  169. package/dist/toolPermissionContext-DHAGUPEW.js.map +7 -0
  170. package/dist/toolPermissionSettings-PT65MQIQ.js +18 -0
  171. package/dist/toolPermissionSettings-PT65MQIQ.js.map +7 -0
  172. package/dist/tools-BHW37PCF.js +47 -0
  173. package/dist/tools-BHW37PCF.js.map +7 -0
  174. package/dist/userInput-XDRYT5TI.js +316 -0
  175. package/dist/userInput-XDRYT5TI.js.map +7 -0
  176. package/dist/uuid-QUYJMIUV.js +9 -0
  177. package/dist/uuid-QUYJMIUV.js.map +7 -0
  178. package/dist/yoga.wasm +0 -0
  179. package/package.json +115 -0
  180. package/scripts/binary-utils.cjs +62 -0
  181. package/scripts/cli-acp-wrapper.cjs +82 -0
  182. package/scripts/cli-wrapper.cjs +105 -0
  183. package/scripts/postinstall.js +144 -0
  184. package/yoga.wasm +0 -0
@@ -0,0 +1,939 @@
1
+ import { createRequire as __kodeCreateRequire } from "node:module";
2
+ const require = __kodeCreateRequire(import.meta.url);
3
+ import {
4
+ getSessionState,
5
+ setSessionState
6
+ } from "./chunk-XEYEKVFT.js";
7
+ import {
8
+ getGlobalConfig
9
+ } from "./chunk-DLSLSLTR.js";
10
+ import {
11
+ debug,
12
+ getCurrentRequest,
13
+ logAPIError
14
+ } from "./chunk-Y4BQ36T4.js";
15
+
16
+ // src/services/ai/openai.ts
17
+ var ProxyAgent = class {
18
+ constructor(_url) {
19
+ }
20
+ };
21
+ var RETRY_CONFIG = {
22
+ BASE_DELAY_MS: 1e3,
23
+ MAX_DELAY_MS: 32e3,
24
+ MAX_SERVER_DELAY_MS: 6e4,
25
+ JITTER_FACTOR: 0.1
26
+ };
27
+ function getRetryDelay(attempt, retryAfter) {
28
+ if (retryAfter) {
29
+ const retryAfterMs = parseInt(retryAfter) * 1e3;
30
+ if (!isNaN(retryAfterMs) && retryAfterMs > 0) {
31
+ return Math.min(retryAfterMs, RETRY_CONFIG.MAX_SERVER_DELAY_MS);
32
+ }
33
+ }
34
+ const delay = RETRY_CONFIG.BASE_DELAY_MS * Math.pow(2, attempt - 1);
35
+ const jitter = Math.random() * RETRY_CONFIG.JITTER_FACTOR * delay;
36
+ return Math.min(delay + jitter, RETRY_CONFIG.MAX_DELAY_MS);
37
+ }
38
+ function abortableDelay(delayMs, signal) {
39
+ return new Promise((resolve, reject) => {
40
+ if (signal?.aborted) {
41
+ reject(new Error("Request was aborted"));
42
+ return;
43
+ }
44
+ const timeoutId = setTimeout(() => {
45
+ resolve();
46
+ }, delayMs);
47
+ if (signal) {
48
+ const abortHandler = () => {
49
+ clearTimeout(timeoutId);
50
+ reject(new Error("Request was aborted"));
51
+ };
52
+ signal.addEventListener("abort", abortHandler, { once: true });
53
+ }
54
+ });
55
+ }
56
+ function getModelErrorKey(baseURL, model, type) {
57
+ return `${baseURL}:${model}:${type}`;
58
+ }
59
+ function hasModelError(baseURL, model, type) {
60
+ return !!getSessionState("modelErrors")[getModelErrorKey(baseURL, model, type)];
61
+ }
62
+ function setModelError(baseURL, model, type, error) {
63
+ setSessionState("modelErrors", {
64
+ [getModelErrorKey(baseURL, model, type)]: error
65
+ });
66
+ }
67
+ var GPT5_ERROR_HANDLERS = [
68
+ {
69
+ type: "max_completion_tokens" /* MaxCompletionTokens */,
70
+ detect: (errMsg) => {
71
+ const lowerMsg = errMsg.toLowerCase();
72
+ return lowerMsg.includes("unsupported parameter: 'max_tokens'") && lowerMsg.includes("'max_completion_tokens'") || lowerMsg.includes("max_tokens") && lowerMsg.includes("max_completion_tokens") || lowerMsg.includes("max_tokens") && lowerMsg.includes("not supported") || lowerMsg.includes("max_tokens") && lowerMsg.includes("use max_completion_tokens") || lowerMsg.includes("invalid parameter") && lowerMsg.includes("max_tokens") || lowerMsg.includes("parameter error") && lowerMsg.includes("max_tokens");
73
+ },
74
+ fix: async (opts) => {
75
+ debug.api("GPT5_FIX_MAX_TOKENS", {
76
+ from: opts.max_tokens,
77
+ to: opts.max_tokens
78
+ });
79
+ if ("max_tokens" in opts) {
80
+ opts.max_completion_tokens = opts.max_tokens;
81
+ delete opts.max_tokens;
82
+ }
83
+ }
84
+ },
85
+ {
86
+ type: "temperature_restriction" /* TemperatureRestriction */,
87
+ detect: (errMsg) => {
88
+ const lowerMsg = errMsg.toLowerCase();
89
+ return lowerMsg.includes("temperature") && (lowerMsg.includes("only supports") || lowerMsg.includes("must be 1") || lowerMsg.includes("invalid temperature"));
90
+ },
91
+ fix: async (opts) => {
92
+ debug.api("GPT5_FIX_TEMPERATURE", {
93
+ from: opts.temperature,
94
+ to: 1
95
+ });
96
+ opts.temperature = 1;
97
+ }
98
+ }
99
+ ];
100
+ var ERROR_HANDLERS = [
101
+ {
102
+ type: "1024" /* MaxLength */,
103
+ detect: (errMsg) => errMsg.includes("Expected a string with maximum length 1024"),
104
+ fix: async (opts) => {
105
+ const toolDescriptions = {};
106
+ for (const tool of opts.tools || []) {
107
+ if (tool.function.description.length <= 1024) continue;
108
+ let str = "";
109
+ let remainder = "";
110
+ for (let line of tool.function.description.split("\n")) {
111
+ if (str.length + line.length < 1024) {
112
+ str += line + "\n";
113
+ } else {
114
+ remainder += line + "\n";
115
+ }
116
+ }
117
+ tool.function.description = str;
118
+ toolDescriptions[tool.function.name] = remainder;
119
+ }
120
+ if (Object.keys(toolDescriptions).length > 0) {
121
+ let content = "<additional-tool-usage-instructions>\n\n";
122
+ for (const [name, description] of Object.entries(toolDescriptions)) {
123
+ content += `<${name}>
124
+ ${description}
125
+ </${name}>
126
+
127
+ `;
128
+ }
129
+ content += "</additional-tool-usage-instructions>";
130
+ for (let i = opts.messages.length - 1; i >= 0; i--) {
131
+ if (opts.messages[i].role === "system") {
132
+ opts.messages.splice(i + 1, 0, {
133
+ role: "system",
134
+ content
135
+ });
136
+ break;
137
+ }
138
+ }
139
+ }
140
+ }
141
+ },
142
+ {
143
+ type: "max_completion_tokens" /* MaxCompletionTokens */,
144
+ detect: (errMsg) => errMsg.includes("Use 'max_completion_tokens'"),
145
+ fix: async (opts) => {
146
+ opts.max_completion_tokens = opts.max_tokens;
147
+ delete opts.max_tokens;
148
+ }
149
+ },
150
+ {
151
+ type: "stream_options" /* StreamOptions */,
152
+ detect: (errMsg) => errMsg.includes("stream_options"),
153
+ fix: async (opts) => {
154
+ delete opts.stream_options;
155
+ }
156
+ },
157
+ {
158
+ type: "citations" /* Citations */,
159
+ detect: (errMsg) => errMsg.includes("Extra inputs are not permitted") && errMsg.includes("citations"),
160
+ fix: async (opts) => {
161
+ if (!opts.messages) return;
162
+ for (const message of opts.messages) {
163
+ if (!message) continue;
164
+ if (Array.isArray(message.content)) {
165
+ for (const item of message.content) {
166
+ if (item && typeof item === "object") {
167
+ const itemObj = item;
168
+ if ("citations" in itemObj) {
169
+ delete itemObj.citations;
170
+ }
171
+ }
172
+ }
173
+ } else if (message.content && typeof message.content === "object") {
174
+ const contentObj = message.content;
175
+ if ("citations" in contentObj) {
176
+ delete contentObj.citations;
177
+ }
178
+ }
179
+ }
180
+ }
181
+ }
182
+ ];
183
+ var MODEL_FEATURES = {
184
+ o1: { usesMaxCompletionTokens: true },
185
+ "o1-preview": { usesMaxCompletionTokens: true },
186
+ "o1-mini": { usesMaxCompletionTokens: true },
187
+ "o1-pro": { usesMaxCompletionTokens: true },
188
+ "o3-mini": { usesMaxCompletionTokens: true },
189
+ "gpt-5": {
190
+ usesMaxCompletionTokens: true,
191
+ supportsResponsesAPI: true,
192
+ requiresTemperatureOne: true,
193
+ supportsVerbosityControl: true,
194
+ supportsCustomTools: true,
195
+ supportsAllowedTools: true
196
+ },
197
+ "gpt-5-mini": {
198
+ usesMaxCompletionTokens: true,
199
+ supportsResponsesAPI: true,
200
+ requiresTemperatureOne: true,
201
+ supportsVerbosityControl: true,
202
+ supportsCustomTools: true,
203
+ supportsAllowedTools: true
204
+ },
205
+ "gpt-5-nano": {
206
+ usesMaxCompletionTokens: true,
207
+ supportsResponsesAPI: true,
208
+ requiresTemperatureOne: true,
209
+ supportsVerbosityControl: true,
210
+ supportsCustomTools: true,
211
+ supportsAllowedTools: true
212
+ },
213
+ "gpt-5-chat-latest": {
214
+ usesMaxCompletionTokens: true,
215
+ supportsResponsesAPI: false,
216
+ requiresTemperatureOne: true,
217
+ supportsVerbosityControl: true
218
+ }
219
+ };
220
+ function getModelFeatures(modelName) {
221
+ if (!modelName || typeof modelName !== "string") {
222
+ return { usesMaxCompletionTokens: false };
223
+ }
224
+ if (MODEL_FEATURES[modelName]) {
225
+ return MODEL_FEATURES[modelName];
226
+ }
227
+ if (modelName.toLowerCase().includes("gpt-5")) {
228
+ return {
229
+ usesMaxCompletionTokens: true,
230
+ supportsResponsesAPI: true,
231
+ requiresTemperatureOne: true,
232
+ supportsVerbosityControl: true,
233
+ supportsCustomTools: true,
234
+ supportsAllowedTools: true
235
+ };
236
+ }
237
+ for (const [key, features] of Object.entries(MODEL_FEATURES)) {
238
+ if (modelName.includes(key)) {
239
+ return features;
240
+ }
241
+ }
242
+ return { usesMaxCompletionTokens: false };
243
+ }
244
+ function applyModelSpecificTransformations(opts) {
245
+ if (!opts.model || typeof opts.model !== "string") {
246
+ return;
247
+ }
248
+ const features = getModelFeatures(opts.model);
249
+ const isGPT5 = opts.model.toLowerCase().includes("gpt-5");
250
+ if (isGPT5 || features.usesMaxCompletionTokens) {
251
+ if ("max_tokens" in opts && !("max_completion_tokens" in opts)) {
252
+ debug.api("OPENAI_TRANSFORM_MAX_TOKENS", {
253
+ model: opts.model,
254
+ from: opts.max_tokens
255
+ });
256
+ opts.max_completion_tokens = opts.max_tokens;
257
+ delete opts.max_tokens;
258
+ }
259
+ if (features.requiresTemperatureOne && "temperature" in opts) {
260
+ if (opts.temperature !== 1 && opts.temperature !== void 0) {
261
+ debug.api("OPENAI_TRANSFORM_TEMPERATURE", {
262
+ model: opts.model,
263
+ from: opts.temperature,
264
+ to: 1
265
+ });
266
+ opts.temperature = 1;
267
+ }
268
+ }
269
+ if (isGPT5) {
270
+ delete opts.frequency_penalty;
271
+ delete opts.presence_penalty;
272
+ delete opts.logit_bias;
273
+ delete opts.user;
274
+ if (!opts.reasoning_effort && features.supportsVerbosityControl) {
275
+ opts.reasoning_effort = "medium";
276
+ }
277
+ }
278
+ } else {
279
+ if (features.usesMaxCompletionTokens && "max_tokens" in opts && !("max_completion_tokens" in opts)) {
280
+ opts.max_completion_tokens = opts.max_tokens;
281
+ delete opts.max_tokens;
282
+ }
283
+ }
284
+ }
285
+ async function applyModelErrorFixes(opts, baseURL) {
286
+ const isGPT5 = opts.model.startsWith("gpt-5");
287
+ const handlers = isGPT5 ? [...GPT5_ERROR_HANDLERS, ...ERROR_HANDLERS] : ERROR_HANDLERS;
288
+ for (const handler of handlers) {
289
+ if (hasModelError(baseURL, opts.model, handler.type)) {
290
+ await handler.fix(opts);
291
+ return;
292
+ }
293
+ }
294
+ }
295
+ async function tryWithEndpointFallback(baseURL, opts, headers, provider, proxy, signal) {
296
+ const endpointsToTry = [];
297
+ if (provider === "minimax") {
298
+ endpointsToTry.push("/text/chatcompletion_v2", "/chat/completions");
299
+ } else {
300
+ endpointsToTry.push("/chat/completions");
301
+ }
302
+ let lastError = null;
303
+ for (const endpoint of endpointsToTry) {
304
+ try {
305
+ const response = await fetch(`${baseURL}${endpoint}`, {
306
+ method: "POST",
307
+ headers,
308
+ body: JSON.stringify(opts.stream ? { ...opts, stream: true } : opts),
309
+ ...proxy ? { dispatcher: proxy } : {},
310
+ signal
311
+ });
312
+ if (response.ok) {
313
+ return { response, endpoint };
314
+ }
315
+ if (response.status === 404 && endpointsToTry.length > 1) {
316
+ debug.api("OPENAI_ENDPOINT_FALLBACK", {
317
+ endpoint,
318
+ status: 404,
319
+ reason: "not_found"
320
+ });
321
+ continue;
322
+ }
323
+ return { response, endpoint };
324
+ } catch (error) {
325
+ lastError = error;
326
+ if (endpointsToTry.indexOf(endpoint) < endpointsToTry.length - 1) {
327
+ debug.api("OPENAI_ENDPOINT_FALLBACK", {
328
+ endpoint,
329
+ reason: "network_error",
330
+ error: error instanceof Error ? error.message : String(error)
331
+ });
332
+ continue;
333
+ }
334
+ }
335
+ }
336
+ throw lastError || new Error("All endpoints failed");
337
+ }
338
+ async function getCompletionWithProfile(modelProfile, opts, attempt = 0, maxAttempts = 10, signal) {
339
+ if (attempt >= maxAttempts) {
340
+ throw new Error("Max attempts reached");
341
+ }
342
+ const provider = modelProfile?.provider || "anthropic";
343
+ const baseURL = modelProfile?.baseURL;
344
+ const apiKey = modelProfile?.apiKey;
345
+ const proxy = getGlobalConfig().proxy ? new ProxyAgent(getGlobalConfig().proxy) : void 0;
346
+ const headers = {
347
+ "Content-Type": "application/json"
348
+ };
349
+ if (apiKey) {
350
+ if (provider === "azure") {
351
+ headers["api-key"] = apiKey;
352
+ } else {
353
+ headers["Authorization"] = `Bearer ${apiKey}`;
354
+ }
355
+ }
356
+ applyModelSpecificTransformations(opts);
357
+ await applyModelErrorFixes(opts, baseURL || "");
358
+ debug.api("OPENAI_API_CALL_START", {
359
+ endpoint: baseURL || "DEFAULT_OPENAI",
360
+ model: opts.model,
361
+ provider,
362
+ apiKeyConfigured: !!apiKey,
363
+ apiKeyPrefix: apiKey ? apiKey.substring(0, 8) : null,
364
+ maxTokens: opts.max_tokens,
365
+ temperature: opts.temperature,
366
+ messageCount: opts.messages?.length || 0,
367
+ streamMode: opts.stream,
368
+ timestamp: (/* @__PURE__ */ new Date()).toISOString(),
369
+ modelProfileModelName: modelProfile?.modelName,
370
+ modelProfileName: modelProfile?.name
371
+ });
372
+ opts.messages = opts.messages.map((msg) => {
373
+ if (msg.role === "tool") {
374
+ if (Array.isArray(msg.content)) {
375
+ return {
376
+ ...msg,
377
+ content: msg.content.map((c) => c.text || "").filter(Boolean).join("\n\n") || "(empty content)"
378
+ };
379
+ } else if (typeof msg.content !== "string") {
380
+ return {
381
+ ...msg,
382
+ content: typeof msg.content === "undefined" ? "(empty content)" : JSON.stringify(msg.content)
383
+ };
384
+ }
385
+ }
386
+ return msg;
387
+ });
388
+ const azureApiVersion = "2024-06-01";
389
+ let endpoint = "/chat/completions";
390
+ if (provider === "azure") {
391
+ endpoint = `/chat/completions?api-version=${azureApiVersion}`;
392
+ } else if (provider === "minimax") {
393
+ endpoint = "/text/chatcompletion_v2";
394
+ }
395
+ try {
396
+ if (opts.stream) {
397
+ const isOpenAICompatible2 = [
398
+ "minimax",
399
+ "kimi",
400
+ "deepseek",
401
+ "siliconflow",
402
+ "qwen",
403
+ "glm",
404
+ "glm-coding",
405
+ "baidu-qianfan",
406
+ "openai",
407
+ "mistral",
408
+ "xai",
409
+ "groq",
410
+ "custom-openai"
411
+ ].includes(provider);
412
+ let response2;
413
+ let usedEndpoint2;
414
+ if (isOpenAICompatible2 && provider !== "azure") {
415
+ const result = await tryWithEndpointFallback(
416
+ baseURL,
417
+ opts,
418
+ headers,
419
+ provider,
420
+ proxy,
421
+ signal
422
+ );
423
+ response2 = result.response;
424
+ usedEndpoint2 = result.endpoint;
425
+ } else {
426
+ response2 = await fetch(`${baseURL}${endpoint}`, {
427
+ method: "POST",
428
+ headers,
429
+ body: JSON.stringify({ ...opts, stream: true }),
430
+ ...proxy ? { dispatcher: proxy } : {},
431
+ signal
432
+ });
433
+ usedEndpoint2 = endpoint;
434
+ }
435
+ if (!response2.ok) {
436
+ if (signal?.aborted) {
437
+ throw new Error("Request cancelled by user");
438
+ }
439
+ try {
440
+ const errorData = await response2.json();
441
+ const hasError = (data) => {
442
+ return typeof data === "object" && data !== null;
443
+ };
444
+ const errorMessage = hasError(errorData) ? errorData.error?.message || errorData.message || `HTTP ${response2.status}` : `HTTP ${response2.status}`;
445
+ const isGPT5 = opts.model.startsWith("gpt-5");
446
+ const handlers = isGPT5 ? [...GPT5_ERROR_HANDLERS, ...ERROR_HANDLERS] : ERROR_HANDLERS;
447
+ for (const handler of handlers) {
448
+ if (handler.detect(errorMessage)) {
449
+ debug.api("OPENAI_MODEL_ERROR_DETECTED", {
450
+ model: opts.model,
451
+ type: handler.type,
452
+ errorMessage,
453
+ status: response2.status
454
+ });
455
+ setModelError(
456
+ baseURL || "",
457
+ opts.model,
458
+ handler.type,
459
+ errorMessage
460
+ );
461
+ await handler.fix(opts);
462
+ debug.api("OPENAI_MODEL_ERROR_FIXED", {
463
+ model: opts.model,
464
+ type: handler.type
465
+ });
466
+ return getCompletionWithProfile(
467
+ modelProfile,
468
+ opts,
469
+ attempt + 1,
470
+ maxAttempts,
471
+ signal
472
+ );
473
+ }
474
+ }
475
+ debug.warn("OPENAI_API_ERROR_UNHANDLED", {
476
+ model: opts.model,
477
+ status: response2.status,
478
+ errorMessage
479
+ });
480
+ logAPIError({
481
+ model: opts.model,
482
+ endpoint: `${baseURL}${endpoint}`,
483
+ status: response2.status,
484
+ error: errorMessage,
485
+ request: opts,
486
+ response: errorData,
487
+ provider
488
+ });
489
+ } catch (parseError) {
490
+ debug.warn("OPENAI_API_ERROR_PARSE_FAILED", {
491
+ model: opts.model,
492
+ status: response2.status,
493
+ error: parseError instanceof Error ? parseError.message : String(parseError)
494
+ });
495
+ logAPIError({
496
+ model: opts.model,
497
+ endpoint: `${baseURL}${endpoint}`,
498
+ status: response2.status,
499
+ error: `Could not parse error response: ${parseError.message}`,
500
+ request: opts,
501
+ response: { parseError: parseError.message },
502
+ provider
503
+ });
504
+ }
505
+ const delayMs = getRetryDelay(attempt);
506
+ debug.warn("OPENAI_API_RETRY", {
507
+ model: opts.model,
508
+ status: response2.status,
509
+ attempt: attempt + 1,
510
+ maxAttempts,
511
+ delayMs
512
+ });
513
+ try {
514
+ await abortableDelay(delayMs, signal);
515
+ } catch (error) {
516
+ if (error.message === "Request was aborted") {
517
+ throw new Error("Request cancelled by user");
518
+ }
519
+ throw error;
520
+ }
521
+ return getCompletionWithProfile(
522
+ modelProfile,
523
+ opts,
524
+ attempt + 1,
525
+ maxAttempts,
526
+ signal
527
+ );
528
+ }
529
+ const stream = createStreamProcessor(response2.body, signal);
530
+ return stream;
531
+ }
532
+ const isOpenAICompatible = [
533
+ "minimax",
534
+ "kimi",
535
+ "deepseek",
536
+ "siliconflow",
537
+ "qwen",
538
+ "glm",
539
+ "baidu-qianfan",
540
+ "openai",
541
+ "mistral",
542
+ "xai",
543
+ "groq",
544
+ "custom-openai"
545
+ ].includes(provider);
546
+ let response;
547
+ let usedEndpoint;
548
+ if (isOpenAICompatible && provider !== "azure") {
549
+ const result = await tryWithEndpointFallback(
550
+ baseURL,
551
+ opts,
552
+ headers,
553
+ provider,
554
+ proxy,
555
+ signal
556
+ );
557
+ response = result.response;
558
+ usedEndpoint = result.endpoint;
559
+ } else {
560
+ response = await fetch(`${baseURL}${endpoint}`, {
561
+ method: "POST",
562
+ headers,
563
+ body: JSON.stringify(opts),
564
+ ...proxy ? { dispatcher: proxy } : {},
565
+ signal
566
+ });
567
+ usedEndpoint = endpoint;
568
+ }
569
+ if (!response.ok) {
570
+ if (signal?.aborted) {
571
+ throw new Error("Request cancelled by user");
572
+ }
573
+ try {
574
+ const errorData = await response.json();
575
+ const hasError = (data) => {
576
+ return typeof data === "object" && data !== null;
577
+ };
578
+ const errorMessage = hasError(errorData) ? errorData.error?.message || errorData.message || `HTTP ${response.status}` : `HTTP ${response.status}`;
579
+ const isGPT5 = opts.model.startsWith("gpt-5");
580
+ const handlers = isGPT5 ? [...GPT5_ERROR_HANDLERS, ...ERROR_HANDLERS] : ERROR_HANDLERS;
581
+ for (const handler of handlers) {
582
+ if (handler.detect(errorMessage)) {
583
+ debug.api("OPENAI_MODEL_ERROR_DETECTED", {
584
+ model: opts.model,
585
+ type: handler.type,
586
+ errorMessage,
587
+ status: response.status
588
+ });
589
+ setModelError(baseURL || "", opts.model, handler.type, errorMessage);
590
+ await handler.fix(opts);
591
+ debug.api("OPENAI_MODEL_ERROR_FIXED", {
592
+ model: opts.model,
593
+ type: handler.type
594
+ });
595
+ return getCompletionWithProfile(
596
+ modelProfile,
597
+ opts,
598
+ attempt + 1,
599
+ maxAttempts,
600
+ signal
601
+ );
602
+ }
603
+ }
604
+ debug.warn("OPENAI_API_ERROR_UNHANDLED", {
605
+ model: opts.model,
606
+ status: response.status,
607
+ errorMessage
608
+ });
609
+ } catch (parseError) {
610
+ debug.warn("OPENAI_API_ERROR_PARSE_FAILED", {
611
+ model: opts.model,
612
+ status: response.status,
613
+ error: parseError instanceof Error ? parseError.message : String(parseError)
614
+ });
615
+ }
616
+ const delayMs = getRetryDelay(attempt);
617
+ debug.warn("OPENAI_API_RETRY", {
618
+ model: opts.model,
619
+ status: response.status,
620
+ attempt: attempt + 1,
621
+ maxAttempts,
622
+ delayMs
623
+ });
624
+ try {
625
+ await abortableDelay(delayMs, signal);
626
+ } catch (error) {
627
+ if (error.message === "Request was aborted") {
628
+ throw new Error("Request cancelled by user");
629
+ }
630
+ throw error;
631
+ }
632
+ return getCompletionWithProfile(
633
+ modelProfile,
634
+ opts,
635
+ attempt + 1,
636
+ maxAttempts,
637
+ signal
638
+ );
639
+ }
640
+ const responseData = await response.json();
641
+ return responseData;
642
+ } catch (error) {
643
+ if (signal?.aborted) {
644
+ throw new Error("Request cancelled by user");
645
+ }
646
+ if (attempt < maxAttempts) {
647
+ if (signal?.aborted) {
648
+ throw new Error("Request cancelled by user");
649
+ }
650
+ const delayMs = getRetryDelay(attempt);
651
+ debug.warn("OPENAI_NETWORK_RETRY", {
652
+ model: opts.model,
653
+ attempt: attempt + 1,
654
+ maxAttempts,
655
+ delayMs,
656
+ error: error instanceof Error ? error.message : String(error)
657
+ });
658
+ try {
659
+ await abortableDelay(delayMs, signal);
660
+ } catch (error2) {
661
+ if (error2.message === "Request was aborted") {
662
+ throw new Error("Request cancelled by user");
663
+ }
664
+ throw error2;
665
+ }
666
+ return getCompletionWithProfile(
667
+ modelProfile,
668
+ opts,
669
+ attempt + 1,
670
+ maxAttempts,
671
+ signal
672
+ );
673
+ }
674
+ throw error;
675
+ }
676
+ }
677
+ function createStreamProcessor(stream, signal) {
678
+ if (!stream) {
679
+ throw new Error("Stream is null or undefined");
680
+ }
681
+ return (async function* () {
682
+ const reader = stream.getReader();
683
+ const decoder = new TextDecoder("utf-8");
684
+ let buffer = "";
685
+ try {
686
+ while (true) {
687
+ if (signal?.aborted) {
688
+ break;
689
+ }
690
+ let readResult;
691
+ try {
692
+ readResult = await reader.read();
693
+ } catch (e) {
694
+ if (signal?.aborted) {
695
+ break;
696
+ }
697
+ debug.warn("OPENAI_STREAM_READ_ERROR", {
698
+ error: e instanceof Error ? e.message : String(e)
699
+ });
700
+ break;
701
+ }
702
+ const { done, value } = readResult;
703
+ if (done) {
704
+ break;
705
+ }
706
+ const chunk = decoder.decode(value, { stream: true });
707
+ buffer += chunk;
708
+ let lineEnd = buffer.indexOf("\n");
709
+ while (lineEnd !== -1) {
710
+ const line = buffer.substring(0, lineEnd).trim();
711
+ buffer = buffer.substring(lineEnd + 1);
712
+ if (line === "data: [DONE]") {
713
+ continue;
714
+ }
715
+ if (line.startsWith("data: ")) {
716
+ const data = line.slice(6).trim();
717
+ if (!data) continue;
718
+ try {
719
+ const parsed = JSON.parse(data);
720
+ yield parsed;
721
+ } catch (e) {
722
+ debug.warn("OPENAI_STREAM_JSON_PARSE_ERROR", {
723
+ data,
724
+ error: e instanceof Error ? e.message : String(e)
725
+ });
726
+ }
727
+ }
728
+ lineEnd = buffer.indexOf("\n");
729
+ }
730
+ }
731
+ if (buffer.trim()) {
732
+ const lines = buffer.trim().split("\n");
733
+ for (const line of lines) {
734
+ if (line.startsWith("data: ") && line !== "data: [DONE]") {
735
+ const data = line.slice(6).trim();
736
+ if (!data) continue;
737
+ try {
738
+ const parsed = JSON.parse(data);
739
+ yield parsed;
740
+ } catch (e) {
741
+ debug.warn("OPENAI_STREAM_FINAL_JSON_PARSE_ERROR", {
742
+ data,
743
+ error: e instanceof Error ? e.message : String(e)
744
+ });
745
+ }
746
+ }
747
+ }
748
+ }
749
+ } catch (e) {
750
+ debug.warn("OPENAI_STREAM_UNEXPECTED_ERROR", {
751
+ error: e instanceof Error ? e.message : String(e)
752
+ });
753
+ } finally {
754
+ try {
755
+ reader.releaseLock();
756
+ } catch (e) {
757
+ debug.warn("OPENAI_STREAM_RELEASE_LOCK_ERROR", {
758
+ error: e instanceof Error ? e.message : String(e)
759
+ });
760
+ }
761
+ }
762
+ })();
763
+ }
764
+ function streamCompletion(stream, signal) {
765
+ return createStreamProcessor(stream, signal);
766
+ }
767
+ async function callGPT5ResponsesAPI(modelProfile, request, signal) {
768
+ const baseURL = modelProfile?.baseURL || "https://api.openai.com/v1";
769
+ const apiKey = modelProfile?.apiKey;
770
+ const proxy = getGlobalConfig().proxy ? new ProxyAgent(getGlobalConfig().proxy) : void 0;
771
+ const headers = {
772
+ "Content-Type": "application/json",
773
+ Authorization: `Bearer ${apiKey}`
774
+ };
775
+ const responsesParams = request;
776
+ try {
777
+ const response = await fetch(`${baseURL}/responses`, {
778
+ method: "POST",
779
+ headers,
780
+ body: JSON.stringify(responsesParams),
781
+ ...proxy ? { dispatcher: proxy } : {},
782
+ signal
783
+ });
784
+ if (!response.ok) {
785
+ const errorText = await response.text();
786
+ throw new Error(
787
+ `GPT-5 Responses API error: ${response.status} ${response.statusText} - ${errorText}`
788
+ );
789
+ }
790
+ return response;
791
+ } catch (error) {
792
+ if (signal?.aborted) {
793
+ throw new Error("Request cancelled by user");
794
+ }
795
+ throw error;
796
+ }
797
+ }
798
+ async function getGPT5CompletionWithProfile(modelProfile, opts, attempt = 0, maxAttempts = 10, signal) {
799
+ const features = getModelFeatures(opts.model);
800
+ const isOfficialOpenAI = !modelProfile.baseURL || modelProfile.baseURL.includes("api.openai.com");
801
+ if (!isOfficialOpenAI) {
802
+ debug.api("GPT5_THIRD_PARTY_PROVIDER", {
803
+ model: opts.model,
804
+ baseURL: modelProfile.baseURL,
805
+ provider: modelProfile.provider,
806
+ supportsResponsesAPI: features.supportsResponsesAPI,
807
+ requestId: getCurrentRequest()?.id
808
+ });
809
+ debug.api("GPT5_PROVIDER_THIRD_PARTY_NOTICE", {
810
+ model: opts.model,
811
+ provider: modelProfile.provider,
812
+ baseURL: modelProfile.baseURL
813
+ });
814
+ if (modelProfile.provider === "azure") {
815
+ delete opts.reasoning_effort;
816
+ } else if (modelProfile.provider === "custom-openai") {
817
+ debug.api("GPT5_CUSTOM_PROVIDER_OPTIMIZATIONS", {
818
+ model: opts.model,
819
+ provider: modelProfile.provider
820
+ });
821
+ }
822
+ } else if (opts.stream) {
823
+ debug.api("GPT5_STREAMING_MODE", {
824
+ model: opts.model,
825
+ baseURL: modelProfile.baseURL || "official",
826
+ reason: "responses_api_no_streaming",
827
+ requestId: getCurrentRequest()?.id
828
+ });
829
+ debug.api("GPT5_STREAMING_FALLBACK_TO_CHAT_COMPLETIONS", {
830
+ model: opts.model,
831
+ reason: "responses_api_no_streaming"
832
+ });
833
+ }
834
+ debug.api("USING_CHAT_COMPLETIONS_FOR_GPT5", {
835
+ model: opts.model,
836
+ baseURL: modelProfile.baseURL || "official",
837
+ provider: modelProfile.provider,
838
+ reason: isOfficialOpenAI ? "streaming_or_fallback" : "third_party_provider",
839
+ requestId: getCurrentRequest()?.id
840
+ });
841
+ return await getCompletionWithProfile(
842
+ modelProfile,
843
+ opts,
844
+ attempt,
845
+ maxAttempts,
846
+ signal
847
+ );
848
+ }
849
+ async function fetchCustomModels(baseURL, apiKey) {
850
+ try {
851
+ const hasVersionNumber = /\/v\d+/.test(baseURL);
852
+ const cleanBaseURL = baseURL.replace(/\/+$/, "");
853
+ const modelsURL = hasVersionNumber ? `${cleanBaseURL}/models` : `${cleanBaseURL}/v1/models`;
854
+ const response = await fetch(modelsURL, {
855
+ method: "GET",
856
+ headers: {
857
+ Authorization: `Bearer ${apiKey}`,
858
+ "Content-Type": "application/json"
859
+ }
860
+ });
861
+ if (!response.ok) {
862
+ if (response.status === 401) {
863
+ throw new Error(
864
+ "Invalid API key. Please check your API key and try again."
865
+ );
866
+ } else if (response.status === 403) {
867
+ throw new Error(
868
+ "API key does not have permission to access models. Please check your API key permissions."
869
+ );
870
+ } else if (response.status === 404) {
871
+ throw new Error(
872
+ "API endpoint not found. Please check if the base URL is correct and supports the /models endpoint."
873
+ );
874
+ } else if (response.status === 429) {
875
+ throw new Error(
876
+ "Too many requests. Please wait a moment and try again."
877
+ );
878
+ } else if (response.status >= 500) {
879
+ throw new Error(
880
+ "API service is temporarily unavailable. Please try again later."
881
+ );
882
+ } else {
883
+ throw new Error(
884
+ `Unable to connect to API (${response.status}). Please check your base URL, API key, and internet connection.`
885
+ );
886
+ }
887
+ }
888
+ const data = await response.json();
889
+ const hasDataArray = (obj) => {
890
+ return typeof obj === "object" && obj !== null && "data" in obj && Array.isArray(obj.data);
891
+ };
892
+ const hasModelsArray = (obj) => {
893
+ return typeof obj === "object" && obj !== null && "models" in obj && Array.isArray(obj.models);
894
+ };
895
+ let models = [];
896
+ if (hasDataArray(data)) {
897
+ models = data.data;
898
+ } else if (Array.isArray(data)) {
899
+ models = data;
900
+ } else if (hasModelsArray(data)) {
901
+ models = data.models;
902
+ } else {
903
+ throw new Error(
904
+ 'API returned unexpected response format. Expected an array of models or an object with a "data" or "models" array.'
905
+ );
906
+ }
907
+ if (!Array.isArray(models)) {
908
+ throw new Error("API response format error: models data is not an array.");
909
+ }
910
+ return models;
911
+ } catch (error) {
912
+ if (error instanceof Error && (error.message.includes("API key") || error.message.includes("API endpoint") || error.message.includes("API service") || error.message.includes("response format"))) {
913
+ throw error;
914
+ }
915
+ debug.warn("CUSTOM_API_MODELS_FETCH_FAILED", {
916
+ baseURL,
917
+ error: error instanceof Error ? error.message : String(error)
918
+ });
919
+ if (error instanceof Error && error.message.includes("fetch")) {
920
+ throw new Error(
921
+ "Unable to connect to the API. Please check the base URL and your internet connection."
922
+ );
923
+ }
924
+ throw new Error(
925
+ "Failed to fetch models from custom API. Please check your configuration and try again."
926
+ );
927
+ }
928
+ }
929
+
930
+ export {
931
+ getModelFeatures,
932
+ applyModelSpecificTransformations,
933
+ getCompletionWithProfile,
934
+ createStreamProcessor,
935
+ streamCompletion,
936
+ callGPT5ResponsesAPI,
937
+ getGPT5CompletionWithProfile,
938
+ fetchCustomModels
939
+ };