pybao-cli 1.3.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (185) hide show
  1. package/LICENSE +201 -0
  2. package/README.md +440 -0
  3. package/README.zh-CN.md +338 -0
  4. package/cli-acp.js +82 -0
  5. package/cli.js +105 -0
  6. package/dist/REPL-WPV32MTF.js +42 -0
  7. package/dist/REPL-WPV32MTF.js.map +7 -0
  8. package/dist/acp-75HO2LBV.js +1357 -0
  9. package/dist/acp-75HO2LBV.js.map +7 -0
  10. package/dist/agentsValidate-6Z57ARKC.js +373 -0
  11. package/dist/agentsValidate-6Z57ARKC.js.map +7 -0
  12. package/dist/ask-NXXXCGY4.js +125 -0
  13. package/dist/ask-NXXXCGY4.js.map +7 -0
  14. package/dist/autoUpdater-PJMGNPUG.js +17 -0
  15. package/dist/autoUpdater-PJMGNPUG.js.map +7 -0
  16. package/dist/chunk-27GYWUY2.js +72 -0
  17. package/dist/chunk-27GYWUY2.js.map +7 -0
  18. package/dist/chunk-3DFBSQIT.js +23 -0
  19. package/dist/chunk-3DFBSQIT.js.map +7 -0
  20. package/dist/chunk-3KNGJX7Q.js +794 -0
  21. package/dist/chunk-3KNGJX7Q.js.map +7 -0
  22. package/dist/chunk-3PDD7M4T.js +164 -0
  23. package/dist/chunk-3PDD7M4T.js.map +7 -0
  24. package/dist/chunk-3ZNSAB7B.js +515 -0
  25. package/dist/chunk-3ZNSAB7B.js.map +7 -0
  26. package/dist/chunk-4SNFQYCY.js +511 -0
  27. package/dist/chunk-4SNFQYCY.js.map +7 -0
  28. package/dist/chunk-4XPNRLJG.js +1609 -0
  29. package/dist/chunk-4XPNRLJG.js.map +7 -0
  30. package/dist/chunk-5P7HBXTD.js +12 -0
  31. package/dist/chunk-5P7HBXTD.js.map +7 -0
  32. package/dist/chunk-6RZIUY5K.js +191 -0
  33. package/dist/chunk-6RZIUY5K.js.map +7 -0
  34. package/dist/chunk-6WELHKDA.js +240 -0
  35. package/dist/chunk-6WELHKDA.js.map +7 -0
  36. package/dist/chunk-7AAE6EO2.js +145 -0
  37. package/dist/chunk-7AAE6EO2.js.map +7 -0
  38. package/dist/chunk-A3BVXXA3.js +47 -0
  39. package/dist/chunk-A3BVXXA3.js.map +7 -0
  40. package/dist/chunk-A6PUMROK.js +152 -0
  41. package/dist/chunk-A6PUMROK.js.map +7 -0
  42. package/dist/chunk-BH3Y62E3.js +11 -0
  43. package/dist/chunk-BH3Y62E3.js.map +7 -0
  44. package/dist/chunk-BJSWTHRM.js +16 -0
  45. package/dist/chunk-BJSWTHRM.js.map +7 -0
  46. package/dist/chunk-BQA2EOUU.js +124 -0
  47. package/dist/chunk-BQA2EOUU.js.map +7 -0
  48. package/dist/chunk-CZZKRPE2.js +19 -0
  49. package/dist/chunk-CZZKRPE2.js.map +7 -0
  50. package/dist/chunk-ERMQRV55.js +24 -0
  51. package/dist/chunk-ERMQRV55.js.map +7 -0
  52. package/dist/chunk-HB2P6645.js +34 -0
  53. package/dist/chunk-HB2P6645.js.map +7 -0
  54. package/dist/chunk-HIRIJ2LQ.js +1256 -0
  55. package/dist/chunk-HIRIJ2LQ.js.map +7 -0
  56. package/dist/chunk-ICTEVBLN.js +735 -0
  57. package/dist/chunk-ICTEVBLN.js.map +7 -0
  58. package/dist/chunk-JKGOGSFT.js +128 -0
  59. package/dist/chunk-JKGOGSFT.js.map +7 -0
  60. package/dist/chunk-JZDE77EH.js +836 -0
  61. package/dist/chunk-JZDE77EH.js.map +7 -0
  62. package/dist/chunk-M624LT6O.js +17 -0
  63. package/dist/chunk-M624LT6O.js.map +7 -0
  64. package/dist/chunk-OMELVAJD.js +96 -0
  65. package/dist/chunk-OMELVAJD.js.map +7 -0
  66. package/dist/chunk-OUXHGDLH.js +95 -0
  67. package/dist/chunk-OUXHGDLH.js.map +7 -0
  68. package/dist/chunk-PCXUZ6AT.js +249 -0
  69. package/dist/chunk-PCXUZ6AT.js.map +7 -0
  70. package/dist/chunk-Q24ZGKIE.js +1097 -0
  71. package/dist/chunk-Q24ZGKIE.js.map +7 -0
  72. package/dist/chunk-QBHEERCF.js +30254 -0
  73. package/dist/chunk-QBHEERCF.js.map +7 -0
  74. package/dist/chunk-QIHB5PYM.js +472 -0
  75. package/dist/chunk-QIHB5PYM.js.map +7 -0
  76. package/dist/chunk-RQVLBMP7.js +24 -0
  77. package/dist/chunk-RQVLBMP7.js.map +7 -0
  78. package/dist/chunk-SWYJOV5E.js +490 -0
  79. package/dist/chunk-SWYJOV5E.js.map +7 -0
  80. package/dist/chunk-T6GVXTNQ.js +21 -0
  81. package/dist/chunk-T6GVXTNQ.js.map +7 -0
  82. package/dist/chunk-T7GPUZVK.js +766 -0
  83. package/dist/chunk-T7GPUZVK.js.map +7 -0
  84. package/dist/chunk-TXFCNQDE.js +2934 -0
  85. package/dist/chunk-TXFCNQDE.js.map +7 -0
  86. package/dist/chunk-UNNVICVU.js +95 -0
  87. package/dist/chunk-UNNVICVU.js.map +7 -0
  88. package/dist/chunk-UUNVJZWA.js +515 -0
  89. package/dist/chunk-UUNVJZWA.js.map +7 -0
  90. package/dist/chunk-VRGR4ZTQ.js +49 -0
  91. package/dist/chunk-VRGR4ZTQ.js.map +7 -0
  92. package/dist/chunk-VTVTEE5N.js +2613 -0
  93. package/dist/chunk-VTVTEE5N.js.map +7 -0
  94. package/dist/chunk-WPTPPOYN.js +936 -0
  95. package/dist/chunk-WPTPPOYN.js.map +7 -0
  96. package/dist/chunk-XXFY63TM.js +196 -0
  97. package/dist/chunk-XXFY63TM.js.map +7 -0
  98. package/dist/chunk-Z3HMXDXP.js +654 -0
  99. package/dist/chunk-Z3HMXDXP.js.map +7 -0
  100. package/dist/chunk-ZJGXEWKF.js +138 -0
  101. package/dist/chunk-ZJGXEWKF.js.map +7 -0
  102. package/dist/cli-RFYBXM7F.js +3917 -0
  103. package/dist/cli-RFYBXM7F.js.map +7 -0
  104. package/dist/commands-YOXMODDO.js +46 -0
  105. package/dist/commands-YOXMODDO.js.map +7 -0
  106. package/dist/config-5OPX3H2K.js +81 -0
  107. package/dist/config-5OPX3H2K.js.map +7 -0
  108. package/dist/context-THRRBPFP.js +30 -0
  109. package/dist/context-THRRBPFP.js.map +7 -0
  110. package/dist/costTracker-ELNBZ2DN.js +19 -0
  111. package/dist/costTracker-ELNBZ2DN.js.map +7 -0
  112. package/dist/customCommands-4XOZH44N.js +25 -0
  113. package/dist/customCommands-4XOZH44N.js.map +7 -0
  114. package/dist/env-EL4KBHMB.js +22 -0
  115. package/dist/env-EL4KBHMB.js.map +7 -0
  116. package/dist/index.js +34 -0
  117. package/dist/index.js.map +7 -0
  118. package/dist/kodeAgentSessionId-PROTVRBR.js +13 -0
  119. package/dist/kodeAgentSessionId-PROTVRBR.js.map +7 -0
  120. package/dist/kodeAgentSessionLoad-UMPV7MC3.js +18 -0
  121. package/dist/kodeAgentSessionLoad-UMPV7MC3.js.map +7 -0
  122. package/dist/kodeAgentSessionResume-YJS4FVQM.js +16 -0
  123. package/dist/kodeAgentSessionResume-YJS4FVQM.js.map +7 -0
  124. package/dist/kodeAgentStreamJson-3T26CHCP.js +13 -0
  125. package/dist/kodeAgentStreamJson-3T26CHCP.js.map +7 -0
  126. package/dist/kodeAgentStreamJsonSession-BZS2VDCY.js +131 -0
  127. package/dist/kodeAgentStreamJsonSession-BZS2VDCY.js.map +7 -0
  128. package/dist/kodeAgentStructuredStdio-TNB6U6SP.js +10 -0
  129. package/dist/kodeAgentStructuredStdio-TNB6U6SP.js.map +7 -0
  130. package/dist/kodeHooks-VUAWIY2D.js +36 -0
  131. package/dist/kodeHooks-VUAWIY2D.js.map +7 -0
  132. package/dist/llm-A3BCM4Q2.js +3118 -0
  133. package/dist/llm-A3BCM4Q2.js.map +7 -0
  134. package/dist/llmLazy-ZJSRLZVD.js +15 -0
  135. package/dist/llmLazy-ZJSRLZVD.js.map +7 -0
  136. package/dist/loader-HZQBWO74.js +28 -0
  137. package/dist/loader-HZQBWO74.js.map +7 -0
  138. package/dist/mcp-XKOJ55B2.js +49 -0
  139. package/dist/mcp-XKOJ55B2.js.map +7 -0
  140. package/dist/mentionProcessor-ANYU5MLF.js +211 -0
  141. package/dist/mentionProcessor-ANYU5MLF.js.map +7 -0
  142. package/dist/messages-75DL5XBP.js +63 -0
  143. package/dist/messages-75DL5XBP.js.map +7 -0
  144. package/dist/model-OPJGJZRC.js +30 -0
  145. package/dist/model-OPJGJZRC.js.map +7 -0
  146. package/dist/openai-DT54BAFP.js +29 -0
  147. package/dist/openai-DT54BAFP.js.map +7 -0
  148. package/dist/outputStyles-TPFVI52O.js +28 -0
  149. package/dist/outputStyles-TPFVI52O.js.map +7 -0
  150. package/dist/package.json +4 -0
  151. package/dist/pluginRuntime-W74PYSZ4.js +218 -0
  152. package/dist/pluginRuntime-W74PYSZ4.js.map +7 -0
  153. package/dist/pluginValidation-FALYRVI2.js +17 -0
  154. package/dist/pluginValidation-FALYRVI2.js.map +7 -0
  155. package/dist/prompts-J4TPRMJ3.js +48 -0
  156. package/dist/prompts-J4TPRMJ3.js.map +7 -0
  157. package/dist/query-K3QKBVDN.js +50 -0
  158. package/dist/query-K3QKBVDN.js.map +7 -0
  159. package/dist/responsesStreaming-HMB74TRD.js +10 -0
  160. package/dist/responsesStreaming-HMB74TRD.js.map +7 -0
  161. package/dist/ripgrep-XJGSUBG7.js +17 -0
  162. package/dist/ripgrep-XJGSUBG7.js.map +7 -0
  163. package/dist/skillMarketplace-AUGKNCPW.js +37 -0
  164. package/dist/skillMarketplace-AUGKNCPW.js.map +7 -0
  165. package/dist/state-DQYRXKTG.js +16 -0
  166. package/dist/state-DQYRXKTG.js.map +7 -0
  167. package/dist/theme-MS5HDUBJ.js +14 -0
  168. package/dist/theme-MS5HDUBJ.js.map +7 -0
  169. package/dist/toolPermissionContext-GYD5LYFK.js +17 -0
  170. package/dist/toolPermissionContext-GYD5LYFK.js.map +7 -0
  171. package/dist/toolPermissionSettings-4MPZVYDR.js +18 -0
  172. package/dist/toolPermissionSettings-4MPZVYDR.js.map +7 -0
  173. package/dist/tools-QW6SIJLJ.js +47 -0
  174. package/dist/tools-QW6SIJLJ.js.map +7 -0
  175. package/dist/userInput-F2PGBRFU.js +311 -0
  176. package/dist/userInput-F2PGBRFU.js.map +7 -0
  177. package/dist/uuid-GYYCQ6QK.js +9 -0
  178. package/dist/uuid-GYYCQ6QK.js.map +7 -0
  179. package/dist/yoga.wasm +0 -0
  180. package/package.json +136 -0
  181. package/scripts/binary-utils.cjs +62 -0
  182. package/scripts/cli-acp-wrapper.cjs +82 -0
  183. package/scripts/cli-wrapper.cjs +105 -0
  184. package/scripts/postinstall.js +144 -0
  185. package/yoga.wasm +0 -0
@@ -0,0 +1,936 @@
1
+ import { createRequire as __pybCreateRequire } from "node:module";
2
+ const require = __pybCreateRequire(import.meta.url);
3
+ import {
4
+ getSessionState,
5
+ setSessionState
6
+ } from "./chunk-ERMQRV55.js";
7
+ import {
8
+ getGlobalConfig
9
+ } from "./chunk-JZDE77EH.js";
10
+ import {
11
+ debug,
12
+ getCurrentRequest,
13
+ logAPIError
14
+ } from "./chunk-3KNGJX7Q.js";
15
+
16
+ // src/services/ai/openai.ts
17
+ import { ProxyAgent, fetch } from "undici";
18
+ var RETRY_CONFIG = {
19
+ BASE_DELAY_MS: 1e3,
20
+ MAX_DELAY_MS: 32e3,
21
+ MAX_SERVER_DELAY_MS: 6e4,
22
+ JITTER_FACTOR: 0.1
23
+ };
24
+ function getRetryDelay(attempt, retryAfter) {
25
+ if (retryAfter) {
26
+ const retryAfterMs = parseInt(retryAfter) * 1e3;
27
+ if (!isNaN(retryAfterMs) && retryAfterMs > 0) {
28
+ return Math.min(retryAfterMs, RETRY_CONFIG.MAX_SERVER_DELAY_MS);
29
+ }
30
+ }
31
+ const delay = RETRY_CONFIG.BASE_DELAY_MS * Math.pow(2, attempt - 1);
32
+ const jitter = Math.random() * RETRY_CONFIG.JITTER_FACTOR * delay;
33
+ return Math.min(delay + jitter, RETRY_CONFIG.MAX_DELAY_MS);
34
+ }
35
+ function abortableDelay(delayMs, signal) {
36
+ return new Promise((resolve, reject) => {
37
+ if (signal?.aborted) {
38
+ reject(new Error("Request was aborted"));
39
+ return;
40
+ }
41
+ const timeoutId = setTimeout(() => {
42
+ resolve();
43
+ }, delayMs);
44
+ if (signal) {
45
+ const abortHandler = () => {
46
+ clearTimeout(timeoutId);
47
+ reject(new Error("Request was aborted"));
48
+ };
49
+ signal.addEventListener("abort", abortHandler, { once: true });
50
+ }
51
+ });
52
+ }
53
+ function getModelErrorKey(baseURL, model, type) {
54
+ return `${baseURL}:${model}:${type}`;
55
+ }
56
+ function hasModelError(baseURL, model, type) {
57
+ return !!getSessionState("modelErrors")[getModelErrorKey(baseURL, model, type)];
58
+ }
59
+ function setModelError(baseURL, model, type, error) {
60
+ setSessionState("modelErrors", {
61
+ [getModelErrorKey(baseURL, model, type)]: error
62
+ });
63
+ }
64
+ var GPT5_ERROR_HANDLERS = [
65
+ {
66
+ type: "max_completion_tokens" /* MaxCompletionTokens */,
67
+ detect: (errMsg) => {
68
+ const lowerMsg = errMsg.toLowerCase();
69
+ return lowerMsg.includes("unsupported parameter: 'max_tokens'") && lowerMsg.includes("'max_completion_tokens'") || lowerMsg.includes("max_tokens") && lowerMsg.includes("max_completion_tokens") || lowerMsg.includes("max_tokens") && lowerMsg.includes("not supported") || lowerMsg.includes("max_tokens") && lowerMsg.includes("use max_completion_tokens") || lowerMsg.includes("invalid parameter") && lowerMsg.includes("max_tokens") || lowerMsg.includes("parameter error") && lowerMsg.includes("max_tokens");
70
+ },
71
+ fix: async (opts) => {
72
+ debug.api("GPT5_FIX_MAX_TOKENS", {
73
+ from: opts.max_tokens,
74
+ to: opts.max_tokens
75
+ });
76
+ if ("max_tokens" in opts) {
77
+ opts.max_completion_tokens = opts.max_tokens;
78
+ delete opts.max_tokens;
79
+ }
80
+ }
81
+ },
82
+ {
83
+ type: "temperature_restriction" /* TemperatureRestriction */,
84
+ detect: (errMsg) => {
85
+ const lowerMsg = errMsg.toLowerCase();
86
+ return lowerMsg.includes("temperature") && (lowerMsg.includes("only supports") || lowerMsg.includes("must be 1") || lowerMsg.includes("invalid temperature"));
87
+ },
88
+ fix: async (opts) => {
89
+ debug.api("GPT5_FIX_TEMPERATURE", {
90
+ from: opts.temperature,
91
+ to: 1
92
+ });
93
+ opts.temperature = 1;
94
+ }
95
+ }
96
+ ];
97
+ var ERROR_HANDLERS = [
98
+ {
99
+ type: "1024" /* MaxLength */,
100
+ detect: (errMsg) => errMsg.includes("Expected a string with maximum length 1024"),
101
+ fix: async (opts) => {
102
+ const toolDescriptions = {};
103
+ for (const tool of opts.tools || []) {
104
+ if (tool.function.description.length <= 1024) continue;
105
+ let str = "";
106
+ let remainder = "";
107
+ for (let line of tool.function.description.split("\n")) {
108
+ if (str.length + line.length < 1024) {
109
+ str += line + "\n";
110
+ } else {
111
+ remainder += line + "\n";
112
+ }
113
+ }
114
+ tool.function.description = str;
115
+ toolDescriptions[tool.function.name] = remainder;
116
+ }
117
+ if (Object.keys(toolDescriptions).length > 0) {
118
+ let content = "<additional-tool-usage-instructions>\n\n";
119
+ for (const [name, description] of Object.entries(toolDescriptions)) {
120
+ content += `<${name}>
121
+ ${description}
122
+ </${name}>
123
+
124
+ `;
125
+ }
126
+ content += "</additional-tool-usage-instructions>";
127
+ for (let i = opts.messages.length - 1; i >= 0; i--) {
128
+ if (opts.messages[i].role === "system") {
129
+ opts.messages.splice(i + 1, 0, {
130
+ role: "system",
131
+ content
132
+ });
133
+ break;
134
+ }
135
+ }
136
+ }
137
+ }
138
+ },
139
+ {
140
+ type: "max_completion_tokens" /* MaxCompletionTokens */,
141
+ detect: (errMsg) => errMsg.includes("Use 'max_completion_tokens'"),
142
+ fix: async (opts) => {
143
+ opts.max_completion_tokens = opts.max_tokens;
144
+ delete opts.max_tokens;
145
+ }
146
+ },
147
+ {
148
+ type: "stream_options" /* StreamOptions */,
149
+ detect: (errMsg) => errMsg.includes("stream_options"),
150
+ fix: async (opts) => {
151
+ delete opts.stream_options;
152
+ }
153
+ },
154
+ {
155
+ type: "citations" /* Citations */,
156
+ detect: (errMsg) => errMsg.includes("Extra inputs are not permitted") && errMsg.includes("citations"),
157
+ fix: async (opts) => {
158
+ if (!opts.messages) return;
159
+ for (const message of opts.messages) {
160
+ if (!message) continue;
161
+ if (Array.isArray(message.content)) {
162
+ for (const item of message.content) {
163
+ if (item && typeof item === "object") {
164
+ const itemObj = item;
165
+ if ("citations" in itemObj) {
166
+ delete itemObj.citations;
167
+ }
168
+ }
169
+ }
170
+ } else if (message.content && typeof message.content === "object") {
171
+ const contentObj = message.content;
172
+ if ("citations" in contentObj) {
173
+ delete contentObj.citations;
174
+ }
175
+ }
176
+ }
177
+ }
178
+ }
179
+ ];
180
+ var MODEL_FEATURES = {
181
+ o1: { usesMaxCompletionTokens: true },
182
+ "o1-preview": { usesMaxCompletionTokens: true },
183
+ "o1-mini": { usesMaxCompletionTokens: true },
184
+ "o1-pro": { usesMaxCompletionTokens: true },
185
+ "o3-mini": { usesMaxCompletionTokens: true },
186
+ "gpt-5": {
187
+ usesMaxCompletionTokens: true,
188
+ supportsResponsesAPI: true,
189
+ requiresTemperatureOne: true,
190
+ supportsVerbosityControl: true,
191
+ supportsCustomTools: true,
192
+ supportsAllowedTools: true
193
+ },
194
+ "gpt-5-mini": {
195
+ usesMaxCompletionTokens: true,
196
+ supportsResponsesAPI: true,
197
+ requiresTemperatureOne: true,
198
+ supportsVerbosityControl: true,
199
+ supportsCustomTools: true,
200
+ supportsAllowedTools: true
201
+ },
202
+ "gpt-5-nano": {
203
+ usesMaxCompletionTokens: true,
204
+ supportsResponsesAPI: true,
205
+ requiresTemperatureOne: true,
206
+ supportsVerbosityControl: true,
207
+ supportsCustomTools: true,
208
+ supportsAllowedTools: true
209
+ },
210
+ "gpt-5-chat-latest": {
211
+ usesMaxCompletionTokens: true,
212
+ supportsResponsesAPI: false,
213
+ requiresTemperatureOne: true,
214
+ supportsVerbosityControl: true
215
+ }
216
+ };
217
+ function getModelFeatures(modelName) {
218
+ if (!modelName || typeof modelName !== "string") {
219
+ return { usesMaxCompletionTokens: false };
220
+ }
221
+ if (MODEL_FEATURES[modelName]) {
222
+ return MODEL_FEATURES[modelName];
223
+ }
224
+ if (modelName.toLowerCase().includes("gpt-5")) {
225
+ return {
226
+ usesMaxCompletionTokens: true,
227
+ supportsResponsesAPI: true,
228
+ requiresTemperatureOne: true,
229
+ supportsVerbosityControl: true,
230
+ supportsCustomTools: true,
231
+ supportsAllowedTools: true
232
+ };
233
+ }
234
+ for (const [key, features] of Object.entries(MODEL_FEATURES)) {
235
+ if (modelName.includes(key)) {
236
+ return features;
237
+ }
238
+ }
239
+ return { usesMaxCompletionTokens: false };
240
+ }
241
+ function applyModelSpecificTransformations(opts) {
242
+ if (!opts.model || typeof opts.model !== "string") {
243
+ return;
244
+ }
245
+ const features = getModelFeatures(opts.model);
246
+ const isGPT5 = opts.model.toLowerCase().includes("gpt-5");
247
+ if (isGPT5 || features.usesMaxCompletionTokens) {
248
+ if ("max_tokens" in opts && !("max_completion_tokens" in opts)) {
249
+ debug.api("OPENAI_TRANSFORM_MAX_TOKENS", {
250
+ model: opts.model,
251
+ from: opts.max_tokens
252
+ });
253
+ opts.max_completion_tokens = opts.max_tokens;
254
+ delete opts.max_tokens;
255
+ }
256
+ if (features.requiresTemperatureOne && "temperature" in opts) {
257
+ if (opts.temperature !== 1 && opts.temperature !== void 0) {
258
+ debug.api("OPENAI_TRANSFORM_TEMPERATURE", {
259
+ model: opts.model,
260
+ from: opts.temperature,
261
+ to: 1
262
+ });
263
+ opts.temperature = 1;
264
+ }
265
+ }
266
+ if (isGPT5) {
267
+ delete opts.frequency_penalty;
268
+ delete opts.presence_penalty;
269
+ delete opts.logit_bias;
270
+ delete opts.user;
271
+ if (!opts.reasoning_effort && features.supportsVerbosityControl) {
272
+ opts.reasoning_effort = "medium";
273
+ }
274
+ }
275
+ } else {
276
+ if (features.usesMaxCompletionTokens && "max_tokens" in opts && !("max_completion_tokens" in opts)) {
277
+ opts.max_completion_tokens = opts.max_tokens;
278
+ delete opts.max_tokens;
279
+ }
280
+ }
281
+ }
282
+ async function applyModelErrorFixes(opts, baseURL) {
283
+ const isGPT5 = opts.model.startsWith("gpt-5");
284
+ const handlers = isGPT5 ? [...GPT5_ERROR_HANDLERS, ...ERROR_HANDLERS] : ERROR_HANDLERS;
285
+ for (const handler of handlers) {
286
+ if (hasModelError(baseURL, opts.model, handler.type)) {
287
+ await handler.fix(opts);
288
+ return;
289
+ }
290
+ }
291
+ }
292
+ async function tryWithEndpointFallback(baseURL, opts, headers, provider, proxy, signal) {
293
+ const endpointsToTry = [];
294
+ if (provider === "minimax") {
295
+ endpointsToTry.push("/text/chatcompletion_v2", "/chat/completions");
296
+ } else {
297
+ endpointsToTry.push("/chat/completions");
298
+ }
299
+ let lastError = null;
300
+ for (const endpoint of endpointsToTry) {
301
+ try {
302
+ const response = await fetch(`${baseURL}${endpoint}`, {
303
+ method: "POST",
304
+ headers,
305
+ body: JSON.stringify(opts.stream ? { ...opts, stream: true } : opts),
306
+ dispatcher: proxy,
307
+ signal
308
+ });
309
+ if (response.ok) {
310
+ return { response, endpoint };
311
+ }
312
+ if (response.status === 404 && endpointsToTry.length > 1) {
313
+ debug.api("OPENAI_ENDPOINT_FALLBACK", {
314
+ endpoint,
315
+ status: 404,
316
+ reason: "not_found"
317
+ });
318
+ continue;
319
+ }
320
+ return { response, endpoint };
321
+ } catch (error) {
322
+ lastError = error;
323
+ if (endpointsToTry.indexOf(endpoint) < endpointsToTry.length - 1) {
324
+ debug.api("OPENAI_ENDPOINT_FALLBACK", {
325
+ endpoint,
326
+ reason: "network_error",
327
+ error: error instanceof Error ? error.message : String(error)
328
+ });
329
+ continue;
330
+ }
331
+ }
332
+ }
333
+ throw lastError || new Error("All endpoints failed");
334
+ }
335
+ async function getCompletionWithProfile(modelProfile, opts, attempt = 0, maxAttempts = 10, signal) {
336
+ if (attempt >= maxAttempts) {
337
+ throw new Error("Max attempts reached");
338
+ }
339
+ const provider = modelProfile?.provider || "anthropic";
340
+ const baseURL = modelProfile?.baseURL;
341
+ const apiKey = modelProfile?.apiKey;
342
+ const proxy = getGlobalConfig().proxy ? new ProxyAgent(getGlobalConfig().proxy) : void 0;
343
+ const headers = {
344
+ "Content-Type": "application/json"
345
+ };
346
+ if (apiKey) {
347
+ if (provider === "azure") {
348
+ headers["api-key"] = apiKey;
349
+ } else {
350
+ headers["Authorization"] = `Bearer ${apiKey}`;
351
+ }
352
+ }
353
+ applyModelSpecificTransformations(opts);
354
+ await applyModelErrorFixes(opts, baseURL || "");
355
+ debug.api("OPENAI_API_CALL_START", {
356
+ endpoint: baseURL || "DEFAULT_OPENAI",
357
+ model: opts.model,
358
+ provider,
359
+ apiKeyConfigured: !!apiKey,
360
+ apiKeyPrefix: apiKey ? apiKey.substring(0, 8) : null,
361
+ maxTokens: opts.max_tokens,
362
+ temperature: opts.temperature,
363
+ messageCount: opts.messages?.length || 0,
364
+ streamMode: opts.stream,
365
+ timestamp: (/* @__PURE__ */ new Date()).toISOString(),
366
+ modelProfileModelName: modelProfile?.modelName,
367
+ modelProfileName: modelProfile?.name
368
+ });
369
+ opts.messages = opts.messages.map((msg) => {
370
+ if (msg.role === "tool") {
371
+ if (Array.isArray(msg.content)) {
372
+ return {
373
+ ...msg,
374
+ content: msg.content.map((c) => c.text || "").filter(Boolean).join("\n\n") || "(empty content)"
375
+ };
376
+ } else if (typeof msg.content !== "string") {
377
+ return {
378
+ ...msg,
379
+ content: typeof msg.content === "undefined" ? "(empty content)" : JSON.stringify(msg.content)
380
+ };
381
+ }
382
+ }
383
+ return msg;
384
+ });
385
+ const azureApiVersion = "2024-06-01";
386
+ let endpoint = "/chat/completions";
387
+ if (provider === "azure") {
388
+ endpoint = `/chat/completions?api-version=${azureApiVersion}`;
389
+ } else if (provider === "minimax") {
390
+ endpoint = "/text/chatcompletion_v2";
391
+ }
392
+ try {
393
+ if (opts.stream) {
394
+ const isOpenAICompatible2 = [
395
+ "minimax",
396
+ "kimi",
397
+ "deepseek",
398
+ "siliconflow",
399
+ "qwen",
400
+ "glm",
401
+ "glm-coding",
402
+ "baidu-qianfan",
403
+ "openai",
404
+ "mistral",
405
+ "xai",
406
+ "groq",
407
+ "custom-openai"
408
+ ].includes(provider);
409
+ let response2;
410
+ let usedEndpoint2;
411
+ if (isOpenAICompatible2 && provider !== "azure") {
412
+ const result = await tryWithEndpointFallback(
413
+ baseURL,
414
+ opts,
415
+ headers,
416
+ provider,
417
+ proxy,
418
+ signal
419
+ );
420
+ response2 = result.response;
421
+ usedEndpoint2 = result.endpoint;
422
+ } else {
423
+ response2 = await fetch(`${baseURL}${endpoint}`, {
424
+ method: "POST",
425
+ headers,
426
+ body: JSON.stringify({ ...opts, stream: true }),
427
+ dispatcher: proxy,
428
+ signal
429
+ });
430
+ usedEndpoint2 = endpoint;
431
+ }
432
+ if (!response2.ok) {
433
+ if (signal?.aborted) {
434
+ throw new Error("Request cancelled by user");
435
+ }
436
+ try {
437
+ const errorData = await response2.json();
438
+ const hasError = (data) => {
439
+ return typeof data === "object" && data !== null;
440
+ };
441
+ const errorMessage = hasError(errorData) ? errorData.error?.message || errorData.message || `HTTP ${response2.status}` : `HTTP ${response2.status}`;
442
+ const isGPT5 = opts.model.startsWith("gpt-5");
443
+ const handlers = isGPT5 ? [...GPT5_ERROR_HANDLERS, ...ERROR_HANDLERS] : ERROR_HANDLERS;
444
+ for (const handler of handlers) {
445
+ if (handler.detect(errorMessage)) {
446
+ debug.api("OPENAI_MODEL_ERROR_DETECTED", {
447
+ model: opts.model,
448
+ type: handler.type,
449
+ errorMessage,
450
+ status: response2.status
451
+ });
452
+ setModelError(
453
+ baseURL || "",
454
+ opts.model,
455
+ handler.type,
456
+ errorMessage
457
+ );
458
+ await handler.fix(opts);
459
+ debug.api("OPENAI_MODEL_ERROR_FIXED", {
460
+ model: opts.model,
461
+ type: handler.type
462
+ });
463
+ return getCompletionWithProfile(
464
+ modelProfile,
465
+ opts,
466
+ attempt + 1,
467
+ maxAttempts,
468
+ signal
469
+ );
470
+ }
471
+ }
472
+ debug.warn("OPENAI_API_ERROR_UNHANDLED", {
473
+ model: opts.model,
474
+ status: response2.status,
475
+ errorMessage
476
+ });
477
+ logAPIError({
478
+ model: opts.model,
479
+ endpoint: `${baseURL}${endpoint}`,
480
+ status: response2.status,
481
+ error: errorMessage,
482
+ request: opts,
483
+ response: errorData,
484
+ provider
485
+ });
486
+ } catch (parseError) {
487
+ debug.warn("OPENAI_API_ERROR_PARSE_FAILED", {
488
+ model: opts.model,
489
+ status: response2.status,
490
+ error: parseError instanceof Error ? parseError.message : String(parseError)
491
+ });
492
+ logAPIError({
493
+ model: opts.model,
494
+ endpoint: `${baseURL}${endpoint}`,
495
+ status: response2.status,
496
+ error: `Could not parse error response: ${parseError.message}`,
497
+ request: opts,
498
+ response: { parseError: parseError.message },
499
+ provider
500
+ });
501
+ }
502
+ const delayMs = getRetryDelay(attempt);
503
+ debug.warn("OPENAI_API_RETRY", {
504
+ model: opts.model,
505
+ status: response2.status,
506
+ attempt: attempt + 1,
507
+ maxAttempts,
508
+ delayMs
509
+ });
510
+ try {
511
+ await abortableDelay(delayMs, signal);
512
+ } catch (error) {
513
+ if (error.message === "Request was aborted") {
514
+ throw new Error("Request cancelled by user");
515
+ }
516
+ throw error;
517
+ }
518
+ return getCompletionWithProfile(
519
+ modelProfile,
520
+ opts,
521
+ attempt + 1,
522
+ maxAttempts,
523
+ signal
524
+ );
525
+ }
526
+ const stream = createStreamProcessor(response2.body, signal);
527
+ return stream;
528
+ }
529
+ const isOpenAICompatible = [
530
+ "minimax",
531
+ "kimi",
532
+ "deepseek",
533
+ "siliconflow",
534
+ "qwen",
535
+ "glm",
536
+ "baidu-qianfan",
537
+ "openai",
538
+ "mistral",
539
+ "xai",
540
+ "groq",
541
+ "custom-openai"
542
+ ].includes(provider);
543
+ let response;
544
+ let usedEndpoint;
545
+ if (isOpenAICompatible && provider !== "azure") {
546
+ const result = await tryWithEndpointFallback(
547
+ baseURL,
548
+ opts,
549
+ headers,
550
+ provider,
551
+ proxy,
552
+ signal
553
+ );
554
+ response = result.response;
555
+ usedEndpoint = result.endpoint;
556
+ } else {
557
+ response = await fetch(`${baseURL}${endpoint}`, {
558
+ method: "POST",
559
+ headers,
560
+ body: JSON.stringify(opts),
561
+ dispatcher: proxy,
562
+ signal
563
+ });
564
+ usedEndpoint = endpoint;
565
+ }
566
+ if (!response.ok) {
567
+ if (signal?.aborted) {
568
+ throw new Error("Request cancelled by user");
569
+ }
570
+ try {
571
+ const errorData = await response.json();
572
+ const hasError = (data) => {
573
+ return typeof data === "object" && data !== null;
574
+ };
575
+ const errorMessage = hasError(errorData) ? errorData.error?.message || errorData.message || `HTTP ${response.status}` : `HTTP ${response.status}`;
576
+ const isGPT5 = opts.model.startsWith("gpt-5");
577
+ const handlers = isGPT5 ? [...GPT5_ERROR_HANDLERS, ...ERROR_HANDLERS] : ERROR_HANDLERS;
578
+ for (const handler of handlers) {
579
+ if (handler.detect(errorMessage)) {
580
+ debug.api("OPENAI_MODEL_ERROR_DETECTED", {
581
+ model: opts.model,
582
+ type: handler.type,
583
+ errorMessage,
584
+ status: response.status
585
+ });
586
+ setModelError(baseURL || "", opts.model, handler.type, errorMessage);
587
+ await handler.fix(opts);
588
+ debug.api("OPENAI_MODEL_ERROR_FIXED", {
589
+ model: opts.model,
590
+ type: handler.type
591
+ });
592
+ return getCompletionWithProfile(
593
+ modelProfile,
594
+ opts,
595
+ attempt + 1,
596
+ maxAttempts,
597
+ signal
598
+ );
599
+ }
600
+ }
601
+ debug.warn("OPENAI_API_ERROR_UNHANDLED", {
602
+ model: opts.model,
603
+ status: response.status,
604
+ errorMessage
605
+ });
606
+ } catch (parseError) {
607
+ debug.warn("OPENAI_API_ERROR_PARSE_FAILED", {
608
+ model: opts.model,
609
+ status: response.status,
610
+ error: parseError instanceof Error ? parseError.message : String(parseError)
611
+ });
612
+ }
613
+ const delayMs = getRetryDelay(attempt);
614
+ debug.warn("OPENAI_API_RETRY", {
615
+ model: opts.model,
616
+ status: response.status,
617
+ attempt: attempt + 1,
618
+ maxAttempts,
619
+ delayMs
620
+ });
621
+ try {
622
+ await abortableDelay(delayMs, signal);
623
+ } catch (error) {
624
+ if (error.message === "Request was aborted") {
625
+ throw new Error("Request cancelled by user");
626
+ }
627
+ throw error;
628
+ }
629
+ return getCompletionWithProfile(
630
+ modelProfile,
631
+ opts,
632
+ attempt + 1,
633
+ maxAttempts,
634
+ signal
635
+ );
636
+ }
637
+ const responseData = await response.json();
638
+ return responseData;
639
+ } catch (error) {
640
+ if (signal?.aborted) {
641
+ throw new Error("Request cancelled by user");
642
+ }
643
+ if (attempt < maxAttempts) {
644
+ if (signal?.aborted) {
645
+ throw new Error("Request cancelled by user");
646
+ }
647
+ const delayMs = getRetryDelay(attempt);
648
+ debug.warn("OPENAI_NETWORK_RETRY", {
649
+ model: opts.model,
650
+ attempt: attempt + 1,
651
+ maxAttempts,
652
+ delayMs,
653
+ error: error instanceof Error ? error.message : String(error)
654
+ });
655
+ try {
656
+ await abortableDelay(delayMs, signal);
657
+ } catch (error2) {
658
+ if (error2.message === "Request was aborted") {
659
+ throw new Error("Request cancelled by user");
660
+ }
661
+ throw error2;
662
+ }
663
+ return getCompletionWithProfile(
664
+ modelProfile,
665
+ opts,
666
+ attempt + 1,
667
+ maxAttempts,
668
+ signal
669
+ );
670
+ }
671
+ throw error;
672
+ }
673
+ }
674
+ function createStreamProcessor(stream, signal) {
675
+ if (!stream) {
676
+ throw new Error("Stream is null or undefined");
677
+ }
678
+ return (async function* () {
679
+ const reader = stream.getReader();
680
+ const decoder = new TextDecoder("utf-8");
681
+ let buffer = "";
682
+ try {
683
+ while (true) {
684
+ if (signal?.aborted) {
685
+ break;
686
+ }
687
+ let readResult;
688
+ try {
689
+ readResult = await reader.read();
690
+ } catch (e) {
691
+ if (signal?.aborted) {
692
+ break;
693
+ }
694
+ debug.warn("OPENAI_STREAM_READ_ERROR", {
695
+ error: e instanceof Error ? e.message : String(e)
696
+ });
697
+ break;
698
+ }
699
+ const { done, value } = readResult;
700
+ if (done) {
701
+ break;
702
+ }
703
+ const chunk = decoder.decode(value, { stream: true });
704
+ buffer += chunk;
705
+ let lineEnd = buffer.indexOf("\n");
706
+ while (lineEnd !== -1) {
707
+ const line = buffer.substring(0, lineEnd).trim();
708
+ buffer = buffer.substring(lineEnd + 1);
709
+ if (line === "data: [DONE]") {
710
+ continue;
711
+ }
712
+ if (line.startsWith("data: ")) {
713
+ const data = line.slice(6).trim();
714
+ if (!data) continue;
715
+ try {
716
+ const parsed = JSON.parse(data);
717
+ yield parsed;
718
+ } catch (e) {
719
+ debug.warn("OPENAI_STREAM_JSON_PARSE_ERROR", {
720
+ data,
721
+ error: e instanceof Error ? e.message : String(e)
722
+ });
723
+ }
724
+ }
725
+ lineEnd = buffer.indexOf("\n");
726
+ }
727
+ }
728
+ if (buffer.trim()) {
729
+ const lines = buffer.trim().split("\n");
730
+ for (const line of lines) {
731
+ if (line.startsWith("data: ") && line !== "data: [DONE]") {
732
+ const data = line.slice(6).trim();
733
+ if (!data) continue;
734
+ try {
735
+ const parsed = JSON.parse(data);
736
+ yield parsed;
737
+ } catch (e) {
738
+ debug.warn("OPENAI_STREAM_FINAL_JSON_PARSE_ERROR", {
739
+ data,
740
+ error: e instanceof Error ? e.message : String(e)
741
+ });
742
+ }
743
+ }
744
+ }
745
+ }
746
+ } catch (e) {
747
+ debug.warn("OPENAI_STREAM_UNEXPECTED_ERROR", {
748
+ error: e instanceof Error ? e.message : String(e)
749
+ });
750
+ } finally {
751
+ try {
752
+ reader.releaseLock();
753
+ } catch (e) {
754
+ debug.warn("OPENAI_STREAM_RELEASE_LOCK_ERROR", {
755
+ error: e instanceof Error ? e.message : String(e)
756
+ });
757
+ }
758
+ }
759
+ })();
760
+ }
761
+ function streamCompletion(stream, signal) {
762
+ return createStreamProcessor(stream, signal);
763
+ }
764
+ async function callGPT5ResponsesAPI(modelProfile, request, signal) {
765
+ const baseURL = modelProfile?.baseURL || "https://api.openai.com/v1";
766
+ const apiKey = modelProfile?.apiKey;
767
+ const proxy = getGlobalConfig().proxy ? new ProxyAgent(getGlobalConfig().proxy) : void 0;
768
+ const headers = {
769
+ "Content-Type": "application/json",
770
+ Authorization: `Bearer ${apiKey}`
771
+ };
772
+ const responsesParams = request;
773
+ try {
774
+ const response = await fetch(`${baseURL}/responses`, {
775
+ method: "POST",
776
+ headers,
777
+ body: JSON.stringify(responsesParams),
778
+ dispatcher: proxy,
779
+ signal
780
+ });
781
+ if (!response.ok) {
782
+ const errorText = await response.text();
783
+ throw new Error(
784
+ `GPT-5 Responses API error: ${response.status} ${response.statusText} - ${errorText}`
785
+ );
786
+ }
787
+ return response;
788
+ } catch (error) {
789
+ if (signal?.aborted) {
790
+ throw new Error("Request cancelled by user");
791
+ }
792
+ throw error;
793
+ }
794
+ }
795
+ async function getGPT5CompletionWithProfile(modelProfile, opts, attempt = 0, maxAttempts = 10, signal) {
796
+ const features = getModelFeatures(opts.model);
797
+ const isOfficialOpenAI = !modelProfile.baseURL || modelProfile.baseURL.includes("api.openai.com");
798
+ if (!isOfficialOpenAI) {
799
+ debug.api("GPT5_THIRD_PARTY_PROVIDER", {
800
+ model: opts.model,
801
+ baseURL: modelProfile.baseURL,
802
+ provider: modelProfile.provider,
803
+ supportsResponsesAPI: features.supportsResponsesAPI,
804
+ requestId: getCurrentRequest()?.id
805
+ });
806
+ debug.api("GPT5_PROVIDER_THIRD_PARTY_NOTICE", {
807
+ model: opts.model,
808
+ provider: modelProfile.provider,
809
+ baseURL: modelProfile.baseURL
810
+ });
811
+ if (modelProfile.provider === "azure") {
812
+ delete opts.reasoning_effort;
813
+ } else if (modelProfile.provider === "custom-openai") {
814
+ debug.api("GPT5_CUSTOM_PROVIDER_OPTIMIZATIONS", {
815
+ model: opts.model,
816
+ provider: modelProfile.provider
817
+ });
818
+ }
819
+ } else if (opts.stream) {
820
+ debug.api("GPT5_STREAMING_MODE", {
821
+ model: opts.model,
822
+ baseURL: modelProfile.baseURL || "official",
823
+ reason: "responses_api_no_streaming",
824
+ requestId: getCurrentRequest()?.id
825
+ });
826
+ debug.api("GPT5_STREAMING_FALLBACK_TO_CHAT_COMPLETIONS", {
827
+ model: opts.model,
828
+ reason: "responses_api_no_streaming"
829
+ });
830
+ }
831
+ debug.api("USING_CHAT_COMPLETIONS_FOR_GPT5", {
832
+ model: opts.model,
833
+ baseURL: modelProfile.baseURL || "official",
834
+ provider: modelProfile.provider,
835
+ reason: isOfficialOpenAI ? "streaming_or_fallback" : "third_party_provider",
836
+ requestId: getCurrentRequest()?.id
837
+ });
838
+ return await getCompletionWithProfile(
839
+ modelProfile,
840
+ opts,
841
+ attempt,
842
+ maxAttempts,
843
+ signal
844
+ );
845
+ }
846
+ async function fetchCustomModels(baseURL, apiKey) {
847
+ try {
848
+ const hasVersionNumber = /\/v\d+/.test(baseURL);
849
+ const cleanBaseURL = baseURL.replace(/\/+$/, "");
850
+ const modelsURL = hasVersionNumber ? `${cleanBaseURL}/models` : `${cleanBaseURL}/v1/models`;
851
+ const response = await fetch(modelsURL, {
852
+ method: "GET",
853
+ headers: {
854
+ Authorization: `Bearer ${apiKey}`,
855
+ "Content-Type": "application/json"
856
+ }
857
+ });
858
+ if (!response.ok) {
859
+ if (response.status === 401) {
860
+ throw new Error(
861
+ "Invalid API key. Please check your API key and try again."
862
+ );
863
+ } else if (response.status === 403) {
864
+ throw new Error(
865
+ "API key does not have permission to access models. Please check your API key permissions."
866
+ );
867
+ } else if (response.status === 404) {
868
+ throw new Error(
869
+ "API endpoint not found. Please check if the base URL is correct and supports the /models endpoint."
870
+ );
871
+ } else if (response.status === 429) {
872
+ throw new Error(
873
+ "Too many requests. Please wait a moment and try again."
874
+ );
875
+ } else if (response.status >= 500) {
876
+ throw new Error(
877
+ "API service is temporarily unavailable. Please try again later."
878
+ );
879
+ } else {
880
+ throw new Error(
881
+ `Unable to connect to API (${response.status}). Please check your base URL, API key, and internet connection.`
882
+ );
883
+ }
884
+ }
885
+ const data = await response.json();
886
+ const hasDataArray = (obj) => {
887
+ return typeof obj === "object" && obj !== null && "data" in obj && Array.isArray(obj.data);
888
+ };
889
+ const hasModelsArray = (obj) => {
890
+ return typeof obj === "object" && obj !== null && "models" in obj && Array.isArray(obj.models);
891
+ };
892
+ let models = [];
893
+ if (hasDataArray(data)) {
894
+ models = data.data;
895
+ } else if (Array.isArray(data)) {
896
+ models = data;
897
+ } else if (hasModelsArray(data)) {
898
+ models = data.models;
899
+ } else {
900
+ throw new Error(
901
+ 'API returned unexpected response format. Expected an array of models or an object with a "data" or "models" array.'
902
+ );
903
+ }
904
+ if (!Array.isArray(models)) {
905
+ throw new Error("API response format error: models data is not an array.");
906
+ }
907
+ return models;
908
+ } catch (error) {
909
+ if (error instanceof Error && (error.message.includes("API key") || error.message.includes("API endpoint") || error.message.includes("API service") || error.message.includes("response format"))) {
910
+ throw error;
911
+ }
912
+ debug.warn("CUSTOM_API_MODELS_FETCH_FAILED", {
913
+ baseURL,
914
+ error: error instanceof Error ? error.message : String(error)
915
+ });
916
+ if (error instanceof Error && error.message.includes("fetch")) {
917
+ throw new Error(
918
+ "Unable to connect to the API. Please check the base URL and your internet connection."
919
+ );
920
+ }
921
+ throw new Error(
922
+ "Failed to fetch models from custom API. Please check your configuration and try again."
923
+ );
924
+ }
925
+ }
926
+
927
+ export {
928
+ getModelFeatures,
929
+ applyModelSpecificTransformations,
930
+ getCompletionWithProfile,
931
+ createStreamProcessor,
932
+ streamCompletion,
933
+ callGPT5ResponsesAPI,
934
+ getGPT5CompletionWithProfile,
935
+ fetchCustomModels
936
+ };