@shareai-lab/kode 2.0.2 → 2.0.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (343) hide show
  1. package/LICENSE +201 -0
  2. package/README.md +649 -25
  3. package/README.zh-CN.md +579 -0
  4. package/cli-acp.js +3 -17
  5. package/cli.js +5 -7
  6. package/dist/chunks/Doctor-M3J7GRTJ.js +12 -0
  7. package/dist/chunks/LogList-ISWZ6DDD.js +121 -0
  8. package/dist/chunks/LogList-ISWZ6DDD.js.map +7 -0
  9. package/dist/chunks/REPL-RQ6LO6S7.js +56 -0
  10. package/dist/chunks/ResumeConversation-6DMVBEGH.js +56 -0
  11. package/dist/chunks/agentLoader-FCRG3TFJ.js +31 -0
  12. package/dist/{agentsValidate-XP3CFN6F.js → chunks/agentsValidate-PEWMYN4Q.js} +97 -69
  13. package/dist/chunks/agentsValidate-PEWMYN4Q.js.map +7 -0
  14. package/dist/{ask-3G5H5KD5.js → chunks/ask-D7SOHJ6Z.js} +36 -44
  15. package/dist/chunks/ask-D7SOHJ6Z.js.map +7 -0
  16. package/dist/chunks/autoUpdater-CNESBOKO.js +19 -0
  17. package/dist/{chunk-EH34V7CY.js → chunks/chunk-2JN5MY67.js} +12 -14
  18. package/dist/chunks/chunk-2JN5MY67.js.map +7 -0
  19. package/dist/chunks/chunk-2QONJ5MG.js +14 -0
  20. package/dist/chunks/chunk-2QONJ5MG.js.map +7 -0
  21. package/dist/chunks/chunk-2WEXPKHH.js +903 -0
  22. package/dist/chunks/chunk-2WEXPKHH.js.map +7 -0
  23. package/dist/{chunk-K2MI4TPB.js → chunks/chunk-3BYE3ME6.js} +717 -792
  24. package/dist/chunks/chunk-3BYE3ME6.js.map +7 -0
  25. package/dist/chunks/chunk-3JDNWX7W.js +1264 -0
  26. package/dist/chunks/chunk-3JDNWX7W.js.map +7 -0
  27. package/dist/chunks/chunk-3OEJVB5A.js +906 -0
  28. package/dist/chunks/chunk-3OEJVB5A.js.map +7 -0
  29. package/dist/chunks/chunk-3TNIOEBO.js +369 -0
  30. package/dist/chunks/chunk-3TNIOEBO.js.map +7 -0
  31. package/dist/chunks/chunk-4A46ZXMJ.js +67 -0
  32. package/dist/chunks/chunk-4A46ZXMJ.js.map +7 -0
  33. package/dist/{chunk-4GAIJGRH.js → chunks/chunk-4ATBQOFO.js} +107 -55
  34. package/dist/chunks/chunk-4ATBQOFO.js.map +7 -0
  35. package/dist/chunks/chunk-4CRUCZR4.js +0 -0
  36. package/dist/{chunk-54DNHKOD.js → chunks/chunk-4EO6SIQY.js} +32 -75
  37. package/dist/chunks/chunk-4EO6SIQY.js.map +7 -0
  38. package/dist/chunks/chunk-53M46S5I.js +64 -0
  39. package/dist/chunks/chunk-53M46S5I.js.map +7 -0
  40. package/dist/{chunk-JC6NCUG5.js → chunks/chunk-54KOYG5C.js} +0 -2
  41. package/dist/{chunk-EZXMVTDU.js → chunks/chunk-6BAS4WY6.js} +29 -45
  42. package/dist/chunks/chunk-6BAS4WY6.js.map +7 -0
  43. package/dist/{chunk-BHGTA6JQ.js → chunks/chunk-6KRRFSDN.js} +4 -6
  44. package/dist/chunks/chunk-6KRRFSDN.js.map +7 -0
  45. package/dist/chunks/chunk-6LJNZK4K.js +39 -0
  46. package/dist/chunks/chunk-6LJNZK4K.js.map +7 -0
  47. package/dist/chunks/chunk-6ZWEOSEI.js +666 -0
  48. package/dist/chunks/chunk-6ZWEOSEI.js.map +7 -0
  49. package/dist/chunks/chunk-77XDJMBP.js +3326 -0
  50. package/dist/chunks/chunk-77XDJMBP.js.map +7 -0
  51. package/dist/chunks/chunk-7RRW4NTB.js +6454 -0
  52. package/dist/chunks/chunk-7RRW4NTB.js.map +7 -0
  53. package/dist/chunks/chunk-7X3TW4JB.js +4520 -0
  54. package/dist/chunks/chunk-7X3TW4JB.js.map +7 -0
  55. package/dist/chunks/chunk-B3MW3YGY.js +1409 -0
  56. package/dist/chunks/chunk-B3MW3YGY.js.map +7 -0
  57. package/dist/chunks/chunk-BBJFHTBC.js +28 -0
  58. package/dist/chunks/chunk-BBJFHTBC.js.map +7 -0
  59. package/dist/chunks/chunk-BHDHXOXB.js +24 -0
  60. package/dist/chunks/chunk-BHDHXOXB.js.map +7 -0
  61. package/dist/{chunk-OZNRLY3E.js → chunks/chunk-BTA7SZ26.js} +152 -223
  62. package/dist/chunks/chunk-BTA7SZ26.js.map +7 -0
  63. package/dist/chunks/chunk-CDGRYGPZ.js +103 -0
  64. package/dist/chunks/chunk-CDGRYGPZ.js.map +7 -0
  65. package/dist/{chunk-S6HRABTA.js → chunks/chunk-CP6E5UG6.js} +1 -4
  66. package/dist/chunks/chunk-CP6E5UG6.js.map +7 -0
  67. package/dist/{chunk-2PMO2FS2.js → chunks/chunk-DQ4JHXMT.js} +462 -424
  68. package/dist/chunks/chunk-DQ4JHXMT.js.map +7 -0
  69. package/dist/chunks/chunk-DXD76CMV.js +208 -0
  70. package/dist/chunks/chunk-DXD76CMV.js.map +7 -0
  71. package/dist/chunks/chunk-GCQCAXJZ.js +0 -0
  72. package/dist/chunks/chunk-GELCZWMB.js +42 -0
  73. package/dist/chunks/chunk-GELCZWMB.js.map +7 -0
  74. package/dist/{chunk-NQLEUHMS.js → chunks/chunk-HJYOH4HC.js} +23 -18
  75. package/dist/chunks/chunk-HJYOH4HC.js.map +7 -0
  76. package/dist/chunks/chunk-HPYNW6TT.js +744 -0
  77. package/dist/chunks/chunk-HPYNW6TT.js.map +7 -0
  78. package/dist/{chunk-2KWKUXLT.js → chunks/chunk-HRJ3ICQK.js} +59 -55
  79. package/dist/chunks/chunk-HRJ3ICQK.js.map +7 -0
  80. package/dist/{chunk-ZQU3TXLC.js → chunks/chunk-IFCIADS3.js} +571 -573
  81. package/dist/chunks/chunk-IFCIADS3.js.map +7 -0
  82. package/dist/chunks/chunk-IN7XZ7BC.js +27 -0
  83. package/dist/chunks/chunk-IN7XZ7BC.js.map +7 -0
  84. package/dist/chunks/chunk-L7P4M4KW.js +193 -0
  85. package/dist/chunks/chunk-L7P4M4KW.js.map +7 -0
  86. package/dist/chunks/chunk-LB6TCPDI.js +0 -0
  87. package/dist/{chunk-3RUXVV4S.js → chunks/chunk-LOCXPQNJ.js} +1 -4
  88. package/dist/{chunk-3RUXVV4S.js.map → chunks/chunk-LOCXPQNJ.js.map} +2 -2
  89. package/dist/{chunk-IE2CG2TV.js → chunks/chunk-LOD5ZHCI.js} +213 -208
  90. package/dist/chunks/chunk-LOD5ZHCI.js.map +7 -0
  91. package/dist/{chunk-S3J2TLV6.js → chunks/chunk-M7P3QNRU.js} +1 -4
  92. package/dist/{chunk-S3J2TLV6.js.map → chunks/chunk-M7P3QNRU.js.map} +2 -2
  93. package/dist/chunks/chunk-PPHLQVL7.js +4234 -0
  94. package/dist/chunks/chunk-PPHLQVL7.js.map +7 -0
  95. package/dist/{chunk-ABLVTESJ.js → chunks/chunk-QAXE37B5.js} +1 -4
  96. package/dist/chunks/chunk-QAXE37B5.js.map +7 -0
  97. package/dist/chunks/chunk-QHQOBUF6.js +60 -0
  98. package/dist/chunks/chunk-QHQOBUF6.js.map +7 -0
  99. package/dist/{chunk-SRZZFAS7.js → chunks/chunk-RPJXO7GG.js} +241 -214
  100. package/dist/chunks/chunk-RPJXO7GG.js.map +7 -0
  101. package/dist/{chunk-NPFOMITO.js → chunks/chunk-SWQV4KSY.js} +1 -4
  102. package/dist/{chunk-NPFOMITO.js.map → chunks/chunk-SWQV4KSY.js.map} +2 -2
  103. package/dist/chunks/chunk-SZLAPULP.js +28 -0
  104. package/dist/chunks/chunk-SZLAPULP.js.map +7 -0
  105. package/dist/{chunk-SDGKPKDK.js → chunks/chunk-T7RB5V5J.js} +23 -25
  106. package/dist/chunks/chunk-T7RB5V5J.js.map +7 -0
  107. package/dist/{chunk-HN4E4UUQ.js → chunks/chunk-TI2CTTMA.js} +25 -17
  108. package/dist/chunks/chunk-TI2CTTMA.js.map +7 -0
  109. package/dist/{chunk-G6I7XROM.js → chunks/chunk-TNGVRTO5.js} +45 -20
  110. package/dist/chunks/chunk-TNGVRTO5.js.map +7 -0
  111. package/dist/chunks/chunk-TNWB3U5Y.js +2077 -0
  112. package/dist/chunks/chunk-TNWB3U5Y.js.map +7 -0
  113. package/dist/chunks/chunk-U2IHWPCU.js +12 -0
  114. package/dist/chunks/chunk-U2IHWPCU.js.map +7 -0
  115. package/dist/{chunk-KAA5BGMQ.js → chunks/chunk-UNOY3VJ2.js} +1 -4
  116. package/dist/{chunk-KAA5BGMQ.js.map → chunks/chunk-UNOY3VJ2.js.map} +2 -2
  117. package/dist/{chunk-3TXNP6HH.js → chunks/chunk-UVDJL6ZZ.js} +97 -58
  118. package/dist/chunks/chunk-UVDJL6ZZ.js.map +7 -0
  119. package/dist/chunks/chunk-VNCW4C2Z.js +13452 -0
  120. package/dist/chunks/chunk-VNCW4C2Z.js.map +7 -0
  121. package/dist/chunks/chunk-W5EGGA44.js +15 -0
  122. package/dist/chunks/chunk-W5EGGA44.js.map +7 -0
  123. package/dist/chunks/chunk-XR2W3MAM.js +1533 -0
  124. package/dist/chunks/chunk-XR2W3MAM.js.map +7 -0
  125. package/dist/{chunk-QYFKRZQC.js → chunks/chunk-YIO5EBMQ.js} +423 -377
  126. package/dist/chunks/chunk-YIO5EBMQ.js.map +7 -0
  127. package/dist/chunks/chunk-ZBVLKZ5V.js +1062 -0
  128. package/dist/chunks/chunk-ZBVLKZ5V.js.map +7 -0
  129. package/dist/{chunk-E6YNABER.js → chunks/chunk-ZCLTZIVP.js} +1 -4
  130. package/dist/chunks/chunk-ZCLTZIVP.js.map +7 -0
  131. package/dist/chunks/client-SILZNM5N.js +42 -0
  132. package/dist/{config-6ZMBCL23.js → chunks/config-25HRTPSP.js} +48 -10
  133. package/dist/chunks/cost-tracker-Z2UZT2J5.js +28 -0
  134. package/dist/{customCommands-DNEJS3ZU.js → chunks/customCommands-TYMYZRG5.js} +11 -8
  135. package/dist/chunks/engine-MRVF6FK6.js +39 -0
  136. package/dist/{env-OFAXZ3XG.js → chunks/env-TJ5NOBEB.js} +7 -5
  137. package/dist/{kodeAgentSessionId-X6XWQW7B.js → chunks/kodeAgentSessionId-VTNISJ2L.js} +2 -4
  138. package/dist/chunks/kodeAgentSessionLoad-YB2RKBGJ.js +15 -0
  139. package/dist/chunks/kodeAgentSessionResume-DZSIVKVA.js +13 -0
  140. package/dist/chunks/kodeAgentStreamJson-X5PLS2S6.js +11 -0
  141. package/dist/{kodeAgentStreamJsonSession-GRWG3SPE.js → chunks/kodeAgentStreamJsonSession-RDXM4XYF.js} +38 -24
  142. package/dist/chunks/kodeAgentStreamJsonSession-RDXM4XYF.js.map +7 -0
  143. package/dist/{chunk-4RTX4AG4.js → chunks/kodeAgentStructuredStdio-SVGDSB4P.js} +14 -9
  144. package/dist/chunks/kodeAgentStructuredStdio-SVGDSB4P.js.map +7 -0
  145. package/dist/{kodeHooks-TDMXFWSO.js → chunks/kodeHooks-RVKYRJHG.js} +11 -9
  146. package/dist/{llm-XVXWYOHK.js → chunks/llm-62N6T5ZT.js} +1734 -1526
  147. package/dist/chunks/llm-62N6T5ZT.js.map +7 -0
  148. package/dist/chunks/llmLazy-ZUSSE3ZA.js +13 -0
  149. package/dist/{mentionProcessor-YD7YXYGF.js → chunks/mentionProcessor-RJW5UPJD.js} +46 -16
  150. package/dist/chunks/mentionProcessor-RJW5UPJD.js.map +7 -0
  151. package/dist/{messages-OFUJSPRV.js → chunks/messages-EEWWLPHN.js} +2 -6
  152. package/dist/chunks/model-5TIEKQPD.js +37 -0
  153. package/dist/{openai-5G5D5Q4B.js → chunks/openai-XXK3YZG4.js} +13 -10
  154. package/dist/{outputStyles-HLDXFQK3.js → chunks/outputStyles-FAJTXN2A.js} +6 -9
  155. package/dist/chunks/permissions-HO7INPWM.js +27 -0
  156. package/dist/{pluginRuntime-FPTKK6NY.js → chunks/pluginRuntime-C7K5ULK2.js} +31 -48
  157. package/dist/chunks/pluginRuntime-C7K5ULK2.js.map +7 -0
  158. package/dist/chunks/pluginValidation-DAM7WRTC.js +20 -0
  159. package/dist/chunks/registry-XYJXMOA5.js +60 -0
  160. package/dist/chunks/responsesStreaming-JNGE2P3D.js +8 -0
  161. package/dist/chunks/runNonTextPrintMode-SVBLCZQX.js +577 -0
  162. package/dist/chunks/runNonTextPrintMode-SVBLCZQX.js.map +7 -0
  163. package/dist/chunks/server-REXXF5IK.js +46 -0
  164. package/dist/{skillMarketplace-PSNKDINM.js → chunks/skillMarketplace-N4HVHNST.js} +8 -6
  165. package/dist/chunks/src-OROQIWP3.js +44 -0
  166. package/dist/chunks/src-QXLGGMUW.js +1647 -0
  167. package/dist/chunks/src-QXLGGMUW.js.map +7 -0
  168. package/dist/{cli-SRV2INSL.js → chunks/src-SSDT6MVP.js} +2659 -3384
  169. package/dist/chunks/src-SSDT6MVP.js.map +7 -0
  170. package/dist/chunks/theme-YBJUIMWK.js +10 -0
  171. package/dist/{toolPermissionContext-65L65VEZ.js → chunks/toolPermissionContext-MOCTRR7N.js} +2 -4
  172. package/dist/chunks/toolPermissionSettings-EV2EJAXL.js +18 -0
  173. package/dist/chunks/toolPermissionSettings-EV2EJAXL.js.map +7 -0
  174. package/dist/chunks/uuid-6577SO6X.js +7 -0
  175. package/dist/chunks/uuid-6577SO6X.js.map +7 -0
  176. package/dist/chunks/webOnlyMode-ALXX7UQY.js +66 -0
  177. package/dist/chunks/webOnlyMode-ALXX7UQY.js.map +7 -0
  178. package/dist/entrypoints/cli.js +10 -0
  179. package/dist/entrypoints/cli.js.map +7 -0
  180. package/dist/entrypoints/daemon.js +10 -0
  181. package/dist/entrypoints/daemon.js.map +7 -0
  182. package/dist/entrypoints/mcp.js +71 -0
  183. package/dist/entrypoints/mcp.js.map +7 -0
  184. package/dist/index.js +6 -7
  185. package/dist/index.js.map +3 -3
  186. package/dist/sdk/client.cjs +391 -0
  187. package/dist/sdk/client.cjs.map +7 -0
  188. package/dist/sdk/client.js +364 -0
  189. package/dist/sdk/client.js.map +7 -0
  190. package/dist/sdk/core.cjs +19932 -0
  191. package/dist/sdk/core.cjs.map +7 -0
  192. package/dist/sdk/core.js +19893 -0
  193. package/dist/sdk/core.js.map +7 -0
  194. package/dist/sdk/daemon-client.cjs +257 -0
  195. package/dist/sdk/daemon-client.cjs.map +7 -0
  196. package/dist/sdk/daemon-client.js +221 -0
  197. package/dist/sdk/daemon-client.js.map +7 -0
  198. package/dist/sdk/protocol.cjs +170 -0
  199. package/dist/sdk/protocol.cjs.map +7 -0
  200. package/dist/sdk/protocol.js +140 -0
  201. package/dist/sdk/protocol.js.map +7 -0
  202. package/dist/sdk/runtime-node.cjs +236 -0
  203. package/dist/sdk/runtime-node.cjs.map +7 -0
  204. package/dist/sdk/runtime-node.js +222 -0
  205. package/dist/sdk/runtime-node.js.map +7 -0
  206. package/dist/sdk/runtime.cjs +17 -0
  207. package/dist/sdk/runtime.cjs.map +7 -0
  208. package/dist/sdk/runtime.js +0 -0
  209. package/dist/sdk/runtime.js.map +7 -0
  210. package/dist/sdk/tools.cjs +30300 -0
  211. package/dist/sdk/tools.cjs.map +7 -0
  212. package/dist/sdk/tools.js +30282 -0
  213. package/dist/sdk/tools.js.map +7 -0
  214. package/dist/webui/assets/index-5hlfByVS.css +1 -0
  215. package/dist/webui/assets/index-BR9lm1lA.js +82 -0
  216. package/dist/webui/index.html +28 -0
  217. package/package.json +93 -22
  218. package/scripts/binary-utils.cjs +12 -4
  219. package/scripts/cli-acp-wrapper.cjs +3 -17
  220. package/scripts/cli-wrapper.cjs +5 -7
  221. package/scripts/postinstall.js +8 -4
  222. package/dist/REPL-GIU4ZIXM.js +0 -42
  223. package/dist/acp-H3VJ77YG.js +0 -1357
  224. package/dist/acp-H3VJ77YG.js.map +0 -7
  225. package/dist/agentsValidate-XP3CFN6F.js.map +0 -7
  226. package/dist/ask-3G5H5KD5.js.map +0 -7
  227. package/dist/autoUpdater-DNRMJWFQ.js +0 -17
  228. package/dist/chunk-2KWKUXLT.js.map +0 -7
  229. package/dist/chunk-2PMO2FS2.js.map +0 -7
  230. package/dist/chunk-3TXNP6HH.js.map +0 -7
  231. package/dist/chunk-4GAIJGRH.js.map +0 -7
  232. package/dist/chunk-4RTX4AG4.js.map +0 -7
  233. package/dist/chunk-54DNHKOD.js.map +0 -7
  234. package/dist/chunk-67PY5IX6.js +0 -34
  235. package/dist/chunk-67PY5IX6.js.map +0 -7
  236. package/dist/chunk-6DRDLOLP.js +0 -2613
  237. package/dist/chunk-6DRDLOLP.js.map +0 -7
  238. package/dist/chunk-7CQVZNQV.js +0 -1609
  239. package/dist/chunk-7CQVZNQV.js.map +0 -7
  240. package/dist/chunk-ABLVTESJ.js.map +0 -7
  241. package/dist/chunk-AIMIPK4B.js +0 -835
  242. package/dist/chunk-AIMIPK4B.js.map +0 -7
  243. package/dist/chunk-BHGTA6JQ.js.map +0 -7
  244. package/dist/chunk-CIG63V4E.js +0 -72
  245. package/dist/chunk-CIG63V4E.js.map +0 -7
  246. package/dist/chunk-E6YNABER.js.map +0 -7
  247. package/dist/chunk-EH34V7CY.js.map +0 -7
  248. package/dist/chunk-EZXMVTDU.js.map +0 -7
  249. package/dist/chunk-FH5CHM6L.js +0 -148
  250. package/dist/chunk-FH5CHM6L.js.map +0 -7
  251. package/dist/chunk-G6I7XROM.js.map +0 -7
  252. package/dist/chunk-HN4E4UUQ.js.map +0 -7
  253. package/dist/chunk-HSPVVDIW.js +0 -30198
  254. package/dist/chunk-HSPVVDIW.js.map +0 -7
  255. package/dist/chunk-IE2CG2TV.js.map +0 -7
  256. package/dist/chunk-K2MI4TPB.js.map +0 -7
  257. package/dist/chunk-MN77D2F7.js +0 -2931
  258. package/dist/chunk-MN77D2F7.js.map +0 -7
  259. package/dist/chunk-NQLEUHMS.js.map +0 -7
  260. package/dist/chunk-OIFQB3S4.js +0 -515
  261. package/dist/chunk-OIFQB3S4.js.map +0 -7
  262. package/dist/chunk-OWTG2W3A.js +0 -164
  263. package/dist/chunk-OWTG2W3A.js.map +0 -7
  264. package/dist/chunk-OZNRLY3E.js.map +0 -7
  265. package/dist/chunk-QYFKRZQC.js.map +0 -7
  266. package/dist/chunk-S6HRABTA.js.map +0 -7
  267. package/dist/chunk-SDGKPKDK.js.map +0 -7
  268. package/dist/chunk-SRZZFAS7.js.map +0 -7
  269. package/dist/chunk-UKHTVRJM.js +0 -47
  270. package/dist/chunk-UKHTVRJM.js.map +0 -7
  271. package/dist/chunk-UYXEDKOZ.js +0 -24
  272. package/dist/chunk-UYXEDKOZ.js.map +0 -7
  273. package/dist/chunk-VBXVYQYY.js +0 -145
  274. package/dist/chunk-VBXVYQYY.js.map +0 -7
  275. package/dist/chunk-WVHORZQ5.js +0 -17
  276. package/dist/chunk-WVHORZQ5.js.map +0 -7
  277. package/dist/chunk-WWUWDNWW.js +0 -49
  278. package/dist/chunk-WWUWDNWW.js.map +0 -7
  279. package/dist/chunk-Z33T5YN5.js +0 -654
  280. package/dist/chunk-Z33T5YN5.js.map +0 -7
  281. package/dist/chunk-ZQU3TXLC.js.map +0 -7
  282. package/dist/cli-SRV2INSL.js.map +0 -7
  283. package/dist/commands-TWH6PGVG.js +0 -46
  284. package/dist/context-JQIOOI4W.js +0 -30
  285. package/dist/costTracker-6SL26FDB.js +0 -19
  286. package/dist/kodeAgentSessionLoad-6N27AC5K.js +0 -18
  287. package/dist/kodeAgentSessionResume-HUSAEO24.js +0 -16
  288. package/dist/kodeAgentStreamJson-NXFN7TXH.js +0 -13
  289. package/dist/kodeAgentStreamJsonSession-GRWG3SPE.js.map +0 -7
  290. package/dist/kodeAgentStructuredStdio-HGWJT7CU.js +0 -10
  291. package/dist/llm-XVXWYOHK.js.map +0 -7
  292. package/dist/llmLazy-7TD5N7XP.js +0 -15
  293. package/dist/loader-AUXIJTY6.js +0 -28
  294. package/dist/mcp-BXJ3K7NZ.js +0 -49
  295. package/dist/mentionProcessor-YD7YXYGF.js.map +0 -7
  296. package/dist/model-KPYCXWBK.js +0 -30
  297. package/dist/pluginRuntime-FPTKK6NY.js.map +0 -7
  298. package/dist/pluginValidation-DSFXZ4GF.js +0 -17
  299. package/dist/prompts-LWLAJRS2.js +0 -48
  300. package/dist/query-HVPWL27C.js +0 -50
  301. package/dist/responsesStreaming-AW344PQO.js +0 -10
  302. package/dist/ripgrep-YOPCY2GO.js +0 -17
  303. package/dist/state-KNRWP3FO.js +0 -16
  304. package/dist/theme-7S2QN2FO.js +0 -14
  305. package/dist/toolPermissionSettings-GPOBH4IV.js +0 -18
  306. package/dist/tools-FZU2FZBD.js +0 -47
  307. package/dist/userInput-VHNBN2MW.js +0 -311
  308. package/dist/userInput-VHNBN2MW.js.map +0 -7
  309. package/dist/uuid-QN2CNKKN.js +0 -9
  310. /package/dist/{REPL-GIU4ZIXM.js.map → chunks/Doctor-M3J7GRTJ.js.map} +0 -0
  311. /package/dist/{autoUpdater-DNRMJWFQ.js.map → chunks/REPL-RQ6LO6S7.js.map} +0 -0
  312. /package/dist/{chunk-JC6NCUG5.js.map → chunks/ResumeConversation-6DMVBEGH.js.map} +0 -0
  313. /package/dist/{commands-TWH6PGVG.js.map → chunks/agentLoader-FCRG3TFJ.js.map} +0 -0
  314. /package/dist/{config-6ZMBCL23.js.map → chunks/autoUpdater-CNESBOKO.js.map} +0 -0
  315. /package/dist/{context-JQIOOI4W.js.map → chunks/chunk-4CRUCZR4.js.map} +0 -0
  316. /package/dist/{costTracker-6SL26FDB.js.map → chunks/chunk-54KOYG5C.js.map} +0 -0
  317. /package/dist/{customCommands-DNEJS3ZU.js.map → chunks/chunk-GCQCAXJZ.js.map} +0 -0
  318. /package/dist/{env-OFAXZ3XG.js.map → chunks/chunk-LB6TCPDI.js.map} +0 -0
  319. /package/dist/{kodeAgentSessionId-X6XWQW7B.js.map → chunks/client-SILZNM5N.js.map} +0 -0
  320. /package/dist/{kodeAgentSessionLoad-6N27AC5K.js.map → chunks/config-25HRTPSP.js.map} +0 -0
  321. /package/dist/{kodeAgentSessionResume-HUSAEO24.js.map → chunks/cost-tracker-Z2UZT2J5.js.map} +0 -0
  322. /package/dist/{kodeAgentStreamJson-NXFN7TXH.js.map → chunks/customCommands-TYMYZRG5.js.map} +0 -0
  323. /package/dist/{kodeAgentStructuredStdio-HGWJT7CU.js.map → chunks/engine-MRVF6FK6.js.map} +0 -0
  324. /package/dist/{kodeHooks-TDMXFWSO.js.map → chunks/env-TJ5NOBEB.js.map} +0 -0
  325. /package/dist/{llmLazy-7TD5N7XP.js.map → chunks/kodeAgentSessionId-VTNISJ2L.js.map} +0 -0
  326. /package/dist/{loader-AUXIJTY6.js.map → chunks/kodeAgentSessionLoad-YB2RKBGJ.js.map} +0 -0
  327. /package/dist/{mcp-BXJ3K7NZ.js.map → chunks/kodeAgentSessionResume-DZSIVKVA.js.map} +0 -0
  328. /package/dist/{messages-OFUJSPRV.js.map → chunks/kodeAgentStreamJson-X5PLS2S6.js.map} +0 -0
  329. /package/dist/{model-KPYCXWBK.js.map → chunks/kodeHooks-RVKYRJHG.js.map} +0 -0
  330. /package/dist/{openai-5G5D5Q4B.js.map → chunks/llmLazy-ZUSSE3ZA.js.map} +0 -0
  331. /package/dist/{outputStyles-HLDXFQK3.js.map → chunks/messages-EEWWLPHN.js.map} +0 -0
  332. /package/dist/{pluginValidation-DSFXZ4GF.js.map → chunks/model-5TIEKQPD.js.map} +0 -0
  333. /package/dist/{prompts-LWLAJRS2.js.map → chunks/openai-XXK3YZG4.js.map} +0 -0
  334. /package/dist/{query-HVPWL27C.js.map → chunks/outputStyles-FAJTXN2A.js.map} +0 -0
  335. /package/dist/{responsesStreaming-AW344PQO.js.map → chunks/permissions-HO7INPWM.js.map} +0 -0
  336. /package/dist/{ripgrep-YOPCY2GO.js.map → chunks/pluginValidation-DAM7WRTC.js.map} +0 -0
  337. /package/dist/{skillMarketplace-PSNKDINM.js.map → chunks/registry-XYJXMOA5.js.map} +0 -0
  338. /package/dist/{state-KNRWP3FO.js.map → chunks/responsesStreaming-JNGE2P3D.js.map} +0 -0
  339. /package/dist/{theme-7S2QN2FO.js.map → chunks/server-REXXF5IK.js.map} +0 -0
  340. /package/dist/{toolPermissionContext-65L65VEZ.js.map → chunks/skillMarketplace-N4HVHNST.js.map} +0 -0
  341. /package/dist/{toolPermissionSettings-GPOBH4IV.js.map → chunks/src-OROQIWP3.js.map} +0 -0
  342. /package/dist/{tools-FZU2FZBD.js.map → chunks/theme-YBJUIMWK.js.map} +0 -0
  343. /package/dist/{uuid-QN2CNKKN.js.map → chunks/toolPermissionContext-MOCTRR7N.js.map} +0 -0
@@ -1,55 +1,152 @@
1
- import { createRequire as __kodeCreateRequire } from "node:module";
2
- const require = __kodeCreateRequire(import.meta.url);
3
1
  import {
4
2
  getSessionState,
5
3
  setSessionState
6
- } from "./chunk-E6YNABER.js";
7
- import {
8
- getGlobalConfig
9
- } from "./chunk-AIMIPK4B.js";
4
+ } from "./chunk-ZCLTZIVP.js";
10
5
  import {
11
6
  debug,
12
7
  getCurrentRequest,
13
8
  logAPIError
14
- } from "./chunk-QYFKRZQC.js";
9
+ } from "./chunk-YIO5EBMQ.js";
10
+ import {
11
+ getGlobalConfig
12
+ } from "./chunk-XR2W3MAM.js";
15
13
 
16
- // src/services/ai/openai.ts
17
- import { ProxyAgent, fetch } from "undici";
18
- var RETRY_CONFIG = {
19
- BASE_DELAY_MS: 1e3,
20
- MAX_DELAY_MS: 32e3,
21
- MAX_SERVER_DELAY_MS: 6e4,
22
- JITTER_FACTOR: 0.1
14
+ // packages/core/src/ai/openai/completion.ts
15
+ import { ProxyAgent as ProxyAgentCtor, fetch as fetch2 } from "undici";
16
+
17
+ // packages/core/src/constants/models/providers.ts
18
+ var providers = {
19
+ kimi: {
20
+ name: "Kimi (Moonshot)",
21
+ baseURL: "https://api.moonshot.cn/v1"
22
+ },
23
+ anthropic: {
24
+ name: "Messages API (Native)",
25
+ baseURL: "https://api.anthropic.com"
26
+ },
27
+ burncloud: {
28
+ name: "BurnCloud (All models)",
29
+ baseURL: "https://ai.burncloud.com/v1"
30
+ },
31
+ deepseek: {
32
+ name: "DeepSeek",
33
+ baseURL: "https://api.deepseek.com"
34
+ },
35
+ qwen: {
36
+ name: "Qwen (Alibaba)",
37
+ baseURL: "https://dashscope.aliyuncs.com/compatible-mode/v1"
38
+ },
39
+ openai: {
40
+ name: "OpenAI",
41
+ baseURL: "https://api.openai.com/v1"
42
+ },
43
+ ollama: {
44
+ name: "Ollama",
45
+ baseURL: "http://localhost:11434/v1"
46
+ },
47
+ gemini: {
48
+ name: "Gemini",
49
+ baseURL: "https://generativelanguage.googleapis.com/v1beta/openai"
50
+ },
51
+ "custom-openai": {
52
+ name: "Custom OpenAI-Compatible API",
53
+ baseURL: ""
54
+ // Will be configured by user
55
+ },
56
+ openrouter: {
57
+ name: "OpenRouter",
58
+ baseURL: "https://openrouter.ai/api/v1"
59
+ },
60
+ minimax: {
61
+ name: "MiniMax",
62
+ baseURL: "https://api.minimaxi.com/v1"
63
+ },
64
+ "minimax-coding": {
65
+ name: "MiniMax Coding Plan",
66
+ baseURL: "https://api.minimaxi.com/anthropic"
67
+ },
68
+ siliconflow: {
69
+ name: "SiliconFlow",
70
+ baseURL: "https://api.siliconflow.cn/v1"
71
+ },
72
+ glm: {
73
+ name: "GLM (Zhipu AI)",
74
+ baseURL: "https://open.bigmodel.cn/api/paas/v4"
75
+ },
76
+ "glm-coding": {
77
+ name: "GLM Coding Plan",
78
+ baseURL: "https://open.bigmodel.cn/api/coding/paas/v4"
79
+ },
80
+ "baidu-qianfan": {
81
+ name: "Baidu Qianfan",
82
+ baseURL: "https://qianfan.baidubce.com/v2"
83
+ },
84
+ mistral: {
85
+ name: "Mistral",
86
+ baseURL: "https://api.mistral.ai/v1"
87
+ },
88
+ xai: {
89
+ name: "xAI",
90
+ baseURL: "https://api.x.ai/v1"
91
+ },
92
+ groq: {
93
+ name: "Groq",
94
+ baseURL: "https://api.groq.com/openai/v1"
95
+ },
96
+ azure: {
97
+ name: "Azure OpenAI",
98
+ baseURL: ""
99
+ // Will be dynamically constructed based on resource name
100
+ }
23
101
  };
24
- function getRetryDelay(attempt, retryAfter) {
25
- if (retryAfter) {
26
- const retryAfterMs = parseInt(retryAfter) * 1e3;
27
- if (!isNaN(retryAfterMs) && retryAfterMs > 0) {
28
- return Math.min(retryAfterMs, RETRY_CONFIG.MAX_SERVER_DELAY_MS);
29
- }
102
+
103
+ // packages/core/src/ai/openai/endpointFallback.ts
104
+ import { fetch } from "undici";
105
+ async function tryWithEndpointFallback(baseURL, opts, headers, provider, proxy, signal) {
106
+ const endpointsToTry = [];
107
+ if (provider === "minimax") {
108
+ endpointsToTry.push("/text/chatcompletion_v2", "/chat/completions");
109
+ } else {
110
+ endpointsToTry.push("/chat/completions");
30
111
  }
31
- const delay = RETRY_CONFIG.BASE_DELAY_MS * Math.pow(2, attempt - 1);
32
- const jitter = Math.random() * RETRY_CONFIG.JITTER_FACTOR * delay;
33
- return Math.min(delay + jitter, RETRY_CONFIG.MAX_DELAY_MS);
34
- }
35
- function abortableDelay(delayMs, signal) {
36
- return new Promise((resolve, reject) => {
37
- if (signal?.aborted) {
38
- reject(new Error("Request was aborted"));
39
- return;
40
- }
41
- const timeoutId = setTimeout(() => {
42
- resolve();
43
- }, delayMs);
44
- if (signal) {
45
- const abortHandler = () => {
46
- clearTimeout(timeoutId);
47
- reject(new Error("Request was aborted"));
48
- };
49
- signal.addEventListener("abort", abortHandler, { once: true });
112
+ let lastError = null;
113
+ for (const endpoint of endpointsToTry) {
114
+ try {
115
+ const response = await fetch(`${baseURL}${endpoint}`, {
116
+ method: "POST",
117
+ headers,
118
+ body: JSON.stringify(opts.stream ? { ...opts, stream: true } : opts),
119
+ dispatcher: proxy,
120
+ signal
121
+ });
122
+ if (response.ok) {
123
+ return { response, endpoint };
124
+ }
125
+ if (response.status === 404 && endpointsToTry.length > 1) {
126
+ debug.api("OPENAI_ENDPOINT_FALLBACK", {
127
+ endpoint,
128
+ status: 404,
129
+ reason: "not_found"
130
+ });
131
+ continue;
132
+ }
133
+ return { response, endpoint };
134
+ } catch (error) {
135
+ lastError = error;
136
+ if (endpointsToTry.indexOf(endpoint) < endpointsToTry.length - 1) {
137
+ debug.api("OPENAI_ENDPOINT_FALLBACK", {
138
+ endpoint,
139
+ reason: "network_error",
140
+ error: error instanceof Error ? error.message : String(error)
141
+ });
142
+ continue;
143
+ }
50
144
  }
51
- });
145
+ }
146
+ throw lastError instanceof Error ? lastError : new Error("All endpoints failed");
52
147
  }
148
+
149
+ // packages/core/src/ai/openai/modelErrors.ts
53
150
  function getModelErrorKey(baseURL, model, type) {
54
151
  return `${baseURL}:${model}:${type}`;
55
152
  }
@@ -104,24 +201,20 @@ var ERROR_HANDLERS = [
104
201
  if (tool.function.description.length <= 1024) continue;
105
202
  let str = "";
106
203
  let remainder = "";
107
- for (let line of tool.function.description.split("\n")) {
204
+ for (const line of tool.function.description.split("\\n")) {
108
205
  if (str.length + line.length < 1024) {
109
- str += line + "\n";
206
+ str += line + "\\n";
110
207
  } else {
111
- remainder += line + "\n";
208
+ remainder += line + "\\n";
112
209
  }
113
210
  }
114
211
  tool.function.description = str;
115
212
  toolDescriptions[tool.function.name] = remainder;
116
213
  }
117
214
  if (Object.keys(toolDescriptions).length > 0) {
118
- let content = "<additional-tool-usage-instructions>\n\n";
215
+ let content = "<additional-tool-usage-instructions>\\n\\n";
119
216
  for (const [name, description] of Object.entries(toolDescriptions)) {
120
- content += `<${name}>
121
- ${description}
122
- </${name}>
123
-
124
- `;
217
+ content += `<${name}>\\n${description}\\n</${name}>\\n\\n`;
125
218
  }
126
219
  content += "</additional-tool-usage-instructions>";
127
220
  for (let i = opts.messages.length - 1; i >= 0; i--) {
@@ -177,6 +270,43 @@ ${description}
177
270
  }
178
271
  }
179
272
  ];
273
+ function handlersForModel(model) {
274
+ return model.startsWith("gpt-5") ? [...GPT5_ERROR_HANDLERS, ...ERROR_HANDLERS] : ERROR_HANDLERS;
275
+ }
276
+ async function applyModelErrorFixes(opts, baseURL) {
277
+ for (const handler of handlersForModel(opts.model)) {
278
+ if (hasModelError(baseURL, opts.model, handler.type)) {
279
+ await handler.fix(opts);
280
+ return;
281
+ }
282
+ }
283
+ }
284
+ async function maybeFixModelError(args) {
285
+ for (const handler of handlersForModel(args.opts.model)) {
286
+ if (!handler.detect(args.errorMessage)) continue;
287
+ debug.api("OPENAI_MODEL_ERROR_DETECTED", {
288
+ model: args.opts.model,
289
+ type: handler.type,
290
+ errorMessage: args.errorMessage,
291
+ status: args.status
292
+ });
293
+ setModelError(
294
+ args.baseURL,
295
+ args.opts.model,
296
+ handler.type,
297
+ args.errorMessage
298
+ );
299
+ await handler.fix(args.opts);
300
+ debug.api("OPENAI_MODEL_ERROR_FIXED", {
301
+ model: args.opts.model,
302
+ type: handler.type
303
+ });
304
+ return true;
305
+ }
306
+ return false;
307
+ }
308
+
309
+ // packages/core/src/ai/openai/modelFeatures.ts
180
310
  var MODEL_FEATURES = {
181
311
  o1: { usesMaxCompletionTokens: true },
182
312
  "o1-preview": { usesMaxCompletionTokens: true },
@@ -279,447 +409,84 @@ function applyModelSpecificTransformations(opts) {
279
409
  }
280
410
  }
281
411
  }
282
- async function applyModelErrorFixes(opts, baseURL) {
283
- const isGPT5 = opts.model.startsWith("gpt-5");
284
- const handlers = isGPT5 ? [...GPT5_ERROR_HANDLERS, ...ERROR_HANDLERS] : ERROR_HANDLERS;
285
- for (const handler of handlers) {
286
- if (hasModelError(baseURL, opts.model, handler.type)) {
287
- await handler.fix(opts);
288
- return;
289
- }
290
- }
291
- }
292
- async function tryWithEndpointFallback(baseURL, opts, headers, provider, proxy, signal) {
293
- const endpointsToTry = [];
294
- if (provider === "minimax") {
295
- endpointsToTry.push("/text/chatcompletion_v2", "/chat/completions");
296
- } else {
297
- endpointsToTry.push("/chat/completions");
298
- }
299
- let lastError = null;
300
- for (const endpoint of endpointsToTry) {
301
- try {
302
- const response = await fetch(`${baseURL}${endpoint}`, {
303
- method: "POST",
304
- headers,
305
- body: JSON.stringify(opts.stream ? { ...opts, stream: true } : opts),
306
- dispatcher: proxy,
307
- signal
308
- });
309
- if (response.ok) {
310
- return { response, endpoint };
311
- }
312
- if (response.status === 404 && endpointsToTry.length > 1) {
313
- debug.api("OPENAI_ENDPOINT_FALLBACK", {
314
- endpoint,
315
- status: 404,
316
- reason: "not_found"
317
- });
318
- continue;
319
- }
320
- return { response, endpoint };
321
- } catch (error) {
322
- lastError = error;
323
- if (endpointsToTry.indexOf(endpoint) < endpointsToTry.length - 1) {
324
- debug.api("OPENAI_ENDPOINT_FALLBACK", {
325
- endpoint,
326
- reason: "network_error",
327
- error: error instanceof Error ? error.message : String(error)
328
- });
329
- continue;
330
- }
412
+
413
+ // packages/core/src/ai/openai/retry.ts
414
+ var RETRY_CONFIG = {
415
+ BASE_DELAY_MS: 1e3,
416
+ MAX_DELAY_MS: 32e3,
417
+ MAX_SERVER_DELAY_MS: 6e4,
418
+ JITTER_FACTOR: 0.1
419
+ };
420
+ function getRetryDelay(attempt, retryAfter) {
421
+ if (retryAfter) {
422
+ const retryAfterMs = parseInt(retryAfter) * 1e3;
423
+ if (!isNaN(retryAfterMs) && retryAfterMs > 0) {
424
+ return Math.min(retryAfterMs, RETRY_CONFIG.MAX_SERVER_DELAY_MS);
331
425
  }
332
426
  }
333
- throw lastError || new Error("All endpoints failed");
427
+ const delay = RETRY_CONFIG.BASE_DELAY_MS * Math.pow(2, attempt - 1);
428
+ const jitter = Math.random() * RETRY_CONFIG.JITTER_FACTOR * delay;
429
+ return Math.min(delay + jitter, RETRY_CONFIG.MAX_DELAY_MS);
334
430
  }
335
- async function getCompletionWithProfile(modelProfile, opts, attempt = 0, maxAttempts = 10, signal) {
336
- if (attempt >= maxAttempts) {
337
- throw new Error("Max attempts reached");
338
- }
339
- const provider = modelProfile?.provider || "anthropic";
340
- const baseURL = modelProfile?.baseURL;
341
- const apiKey = modelProfile?.apiKey;
342
- const proxy = getGlobalConfig().proxy ? new ProxyAgent(getGlobalConfig().proxy) : void 0;
343
- const headers = {
344
- "Content-Type": "application/json"
345
- };
346
- if (apiKey) {
347
- if (provider === "azure") {
348
- headers["api-key"] = apiKey;
349
- } else {
350
- headers["Authorization"] = `Bearer ${apiKey}`;
351
- }
352
- }
353
- applyModelSpecificTransformations(opts);
354
- await applyModelErrorFixes(opts, baseURL || "");
355
- debug.api("OPENAI_API_CALL_START", {
356
- endpoint: baseURL || "DEFAULT_OPENAI",
357
- model: opts.model,
358
- provider,
359
- apiKeyConfigured: !!apiKey,
360
- apiKeyPrefix: apiKey ? apiKey.substring(0, 8) : null,
361
- maxTokens: opts.max_tokens,
362
- temperature: opts.temperature,
363
- messageCount: opts.messages?.length || 0,
364
- streamMode: opts.stream,
365
- timestamp: (/* @__PURE__ */ new Date()).toISOString(),
366
- modelProfileModelName: modelProfile?.modelName,
367
- modelProfileName: modelProfile?.name
368
- });
369
- opts.messages = opts.messages.map((msg) => {
370
- if (msg.role === "tool") {
371
- if (Array.isArray(msg.content)) {
372
- return {
373
- ...msg,
374
- content: msg.content.map((c) => c.text || "").filter(Boolean).join("\n\n") || "(empty content)"
375
- };
376
- } else if (typeof msg.content !== "string") {
377
- return {
378
- ...msg,
379
- content: typeof msg.content === "undefined" ? "(empty content)" : JSON.stringify(msg.content)
380
- };
381
- }
382
- }
383
- return msg;
384
- });
385
- const azureApiVersion = "2024-06-01";
386
- let endpoint = "/chat/completions";
387
- if (provider === "azure") {
388
- endpoint = `/chat/completions?api-version=${azureApiVersion}`;
389
- } else if (provider === "minimax") {
390
- endpoint = "/text/chatcompletion_v2";
391
- }
392
- try {
393
- if (opts.stream) {
394
- const isOpenAICompatible2 = [
395
- "minimax",
396
- "kimi",
397
- "deepseek",
398
- "siliconflow",
399
- "qwen",
400
- "glm",
401
- "glm-coding",
402
- "baidu-qianfan",
403
- "openai",
404
- "mistral",
405
- "xai",
406
- "groq",
407
- "custom-openai"
408
- ].includes(provider);
409
- let response2;
410
- let usedEndpoint2;
411
- if (isOpenAICompatible2 && provider !== "azure") {
412
- const result = await tryWithEndpointFallback(
413
- baseURL,
414
- opts,
415
- headers,
416
- provider,
417
- proxy,
418
- signal
419
- );
420
- response2 = result.response;
421
- usedEndpoint2 = result.endpoint;
422
- } else {
423
- response2 = await fetch(`${baseURL}${endpoint}`, {
424
- method: "POST",
425
- headers,
426
- body: JSON.stringify({ ...opts, stream: true }),
427
- dispatcher: proxy,
428
- signal
429
- });
430
- usedEndpoint2 = endpoint;
431
- }
432
- if (!response2.ok) {
433
- if (signal?.aborted) {
434
- throw new Error("Request cancelled by user");
435
- }
436
- try {
437
- const errorData = await response2.json();
438
- const hasError = (data) => {
439
- return typeof data === "object" && data !== null;
440
- };
441
- const errorMessage = hasError(errorData) ? errorData.error?.message || errorData.message || `HTTP ${response2.status}` : `HTTP ${response2.status}`;
442
- const isGPT5 = opts.model.startsWith("gpt-5");
443
- const handlers = isGPT5 ? [...GPT5_ERROR_HANDLERS, ...ERROR_HANDLERS] : ERROR_HANDLERS;
444
- for (const handler of handlers) {
445
- if (handler.detect(errorMessage)) {
446
- debug.api("OPENAI_MODEL_ERROR_DETECTED", {
447
- model: opts.model,
448
- type: handler.type,
449
- errorMessage,
450
- status: response2.status
451
- });
452
- setModelError(
453
- baseURL || "",
454
- opts.model,
455
- handler.type,
456
- errorMessage
457
- );
458
- await handler.fix(opts);
459
- debug.api("OPENAI_MODEL_ERROR_FIXED", {
460
- model: opts.model,
461
- type: handler.type
462
- });
463
- return getCompletionWithProfile(
464
- modelProfile,
465
- opts,
466
- attempt + 1,
467
- maxAttempts,
468
- signal
469
- );
470
- }
471
- }
472
- debug.warn("OPENAI_API_ERROR_UNHANDLED", {
473
- model: opts.model,
474
- status: response2.status,
475
- errorMessage
476
- });
477
- logAPIError({
478
- model: opts.model,
479
- endpoint: `${baseURL}${endpoint}`,
480
- status: response2.status,
481
- error: errorMessage,
482
- request: opts,
483
- response: errorData,
484
- provider
485
- });
486
- } catch (parseError) {
487
- debug.warn("OPENAI_API_ERROR_PARSE_FAILED", {
488
- model: opts.model,
489
- status: response2.status,
490
- error: parseError instanceof Error ? parseError.message : String(parseError)
491
- });
492
- logAPIError({
493
- model: opts.model,
494
- endpoint: `${baseURL}${endpoint}`,
495
- status: response2.status,
496
- error: `Could not parse error response: ${parseError.message}`,
497
- request: opts,
498
- response: { parseError: parseError.message },
499
- provider
500
- });
501
- }
502
- const delayMs = getRetryDelay(attempt);
503
- debug.warn("OPENAI_API_RETRY", {
504
- model: opts.model,
505
- status: response2.status,
506
- attempt: attempt + 1,
507
- maxAttempts,
508
- delayMs
509
- });
510
- try {
511
- await abortableDelay(delayMs, signal);
512
- } catch (error) {
513
- if (error.message === "Request was aborted") {
514
- throw new Error("Request cancelled by user");
515
- }
516
- throw error;
517
- }
518
- return getCompletionWithProfile(
519
- modelProfile,
520
- opts,
521
- attempt + 1,
522
- maxAttempts,
523
- signal
524
- );
525
- }
526
- const stream = createStreamProcessor(response2.body, signal);
527
- return stream;
528
- }
529
- const isOpenAICompatible = [
530
- "minimax",
531
- "kimi",
532
- "deepseek",
533
- "siliconflow",
534
- "qwen",
535
- "glm",
536
- "baidu-qianfan",
537
- "openai",
538
- "mistral",
539
- "xai",
540
- "groq",
541
- "custom-openai"
542
- ].includes(provider);
543
- let response;
544
- let usedEndpoint;
545
- if (isOpenAICompatible && provider !== "azure") {
546
- const result = await tryWithEndpointFallback(
547
- baseURL,
548
- opts,
549
- headers,
550
- provider,
551
- proxy,
552
- signal
553
- );
554
- response = result.response;
555
- usedEndpoint = result.endpoint;
556
- } else {
557
- response = await fetch(`${baseURL}${endpoint}`, {
558
- method: "POST",
559
- headers,
560
- body: JSON.stringify(opts),
561
- dispatcher: proxy,
562
- signal
563
- });
564
- usedEndpoint = endpoint;
565
- }
566
- if (!response.ok) {
567
- if (signal?.aborted) {
568
- throw new Error("Request cancelled by user");
569
- }
570
- try {
571
- const errorData = await response.json();
572
- const hasError = (data) => {
573
- return typeof data === "object" && data !== null;
574
- };
575
- const errorMessage = hasError(errorData) ? errorData.error?.message || errorData.message || `HTTP ${response.status}` : `HTTP ${response.status}`;
576
- const isGPT5 = opts.model.startsWith("gpt-5");
577
- const handlers = isGPT5 ? [...GPT5_ERROR_HANDLERS, ...ERROR_HANDLERS] : ERROR_HANDLERS;
578
- for (const handler of handlers) {
579
- if (handler.detect(errorMessage)) {
580
- debug.api("OPENAI_MODEL_ERROR_DETECTED", {
581
- model: opts.model,
582
- type: handler.type,
583
- errorMessage,
584
- status: response.status
585
- });
586
- setModelError(baseURL || "", opts.model, handler.type, errorMessage);
587
- await handler.fix(opts);
588
- debug.api("OPENAI_MODEL_ERROR_FIXED", {
589
- model: opts.model,
590
- type: handler.type
591
- });
592
- return getCompletionWithProfile(
593
- modelProfile,
594
- opts,
595
- attempt + 1,
596
- maxAttempts,
597
- signal
598
- );
599
- }
600
- }
601
- debug.warn("OPENAI_API_ERROR_UNHANDLED", {
602
- model: opts.model,
603
- status: response.status,
604
- errorMessage
605
- });
606
- } catch (parseError) {
607
- debug.warn("OPENAI_API_ERROR_PARSE_FAILED", {
608
- model: opts.model,
609
- status: response.status,
610
- error: parseError instanceof Error ? parseError.message : String(parseError)
611
- });
612
- }
613
- const delayMs = getRetryDelay(attempt);
614
- debug.warn("OPENAI_API_RETRY", {
615
- model: opts.model,
616
- status: response.status,
617
- attempt: attempt + 1,
618
- maxAttempts,
619
- delayMs
620
- });
621
- try {
622
- await abortableDelay(delayMs, signal);
623
- } catch (error) {
624
- if (error.message === "Request was aborted") {
625
- throw new Error("Request cancelled by user");
626
- }
627
- throw error;
628
- }
629
- return getCompletionWithProfile(
630
- modelProfile,
631
- opts,
632
- attempt + 1,
633
- maxAttempts,
634
- signal
635
- );
636
- }
637
- const responseData = await response.json();
638
- return responseData;
639
- } catch (error) {
431
+ function abortableDelay(delayMs, signal) {
432
+ return new Promise((resolve, reject) => {
640
433
  if (signal?.aborted) {
641
- throw new Error("Request cancelled by user");
434
+ reject(new Error("Request was aborted"));
435
+ return;
642
436
  }
643
- if (attempt < maxAttempts) {
644
- if (signal?.aborted) {
645
- throw new Error("Request cancelled by user");
646
- }
647
- const delayMs = getRetryDelay(attempt);
648
- debug.warn("OPENAI_NETWORK_RETRY", {
649
- model: opts.model,
650
- attempt: attempt + 1,
651
- maxAttempts,
652
- delayMs,
653
- error: error instanceof Error ? error.message : String(error)
654
- });
655
- try {
656
- await abortableDelay(delayMs, signal);
657
- } catch (error2) {
658
- if (error2.message === "Request was aborted") {
659
- throw new Error("Request cancelled by user");
660
- }
661
- throw error2;
662
- }
663
- return getCompletionWithProfile(
664
- modelProfile,
665
- opts,
666
- attempt + 1,
667
- maxAttempts,
668
- signal
669
- );
437
+ const timeoutId = setTimeout(() => resolve(), delayMs);
438
+ if (signal) {
439
+ const abortHandler = () => {
440
+ clearTimeout(timeoutId);
441
+ reject(new Error("Request was aborted"));
442
+ };
443
+ signal.addEventListener("abort", abortHandler, { once: true });
670
444
  }
671
- throw error;
672
- }
445
+ });
673
446
  }
447
+
448
+ // packages/core/src/ai/openai/stream.ts
674
449
  function createStreamProcessor(stream, signal) {
675
- if (!stream) {
676
- throw new Error("Stream is null or undefined");
677
- }
678
450
  return (async function* () {
679
451
  const reader = stream.getReader();
680
452
  const decoder = new TextDecoder("utf-8");
681
453
  let buffer = "";
682
454
  try {
683
455
  while (true) {
684
- if (signal?.aborted) {
685
- break;
686
- }
456
+ if (signal?.aborted) break;
687
457
  let readResult;
688
458
  try {
689
459
  readResult = await reader.read();
690
460
  } catch (e) {
691
- if (signal?.aborted) {
692
- break;
693
- }
461
+ if (signal?.aborted) break;
694
462
  debug.warn("OPENAI_STREAM_READ_ERROR", {
695
463
  error: e instanceof Error ? e.message : String(e)
696
464
  });
697
465
  break;
698
466
  }
699
467
  const { done, value } = readResult;
700
- if (done) {
701
- break;
702
- }
703
- const chunk = decoder.decode(value, { stream: true });
704
- buffer += chunk;
468
+ if (done) break;
469
+ const chunk = value instanceof Uint8Array ? value : new Uint8Array();
470
+ buffer += decoder.decode(chunk, { stream: true });
705
471
  let lineEnd = buffer.indexOf("\n");
706
472
  while (lineEnd !== -1) {
707
473
  const line = buffer.substring(0, lineEnd).trim();
708
474
  buffer = buffer.substring(lineEnd + 1);
709
475
  if (line === "data: [DONE]") {
476
+ lineEnd = buffer.indexOf("\n");
710
477
  continue;
711
478
  }
712
479
  if (line.startsWith("data: ")) {
713
480
  const data = line.slice(6).trim();
714
- if (!data) continue;
715
- try {
716
- const parsed = JSON.parse(data);
717
- yield parsed;
718
- } catch (e) {
719
- debug.warn("OPENAI_STREAM_JSON_PARSE_ERROR", {
720
- data,
721
- error: e instanceof Error ? e.message : String(e)
722
- });
481
+ if (data) {
482
+ try {
483
+ yield JSON.parse(data);
484
+ } catch (e) {
485
+ debug.warn("OPENAI_STREAM_JSON_PARSE_ERROR", {
486
+ data,
487
+ error: e instanceof Error ? e.message : String(e)
488
+ });
489
+ }
723
490
  }
724
491
  }
725
492
  lineEnd = buffer.indexOf("\n");
@@ -728,18 +495,16 @@ function createStreamProcessor(stream, signal) {
728
495
  if (buffer.trim()) {
729
496
  const lines = buffer.trim().split("\n");
730
497
  for (const line of lines) {
731
- if (line.startsWith("data: ") && line !== "data: [DONE]") {
732
- const data = line.slice(6).trim();
733
- if (!data) continue;
734
- try {
735
- const parsed = JSON.parse(data);
736
- yield parsed;
737
- } catch (e) {
738
- debug.warn("OPENAI_STREAM_FINAL_JSON_PARSE_ERROR", {
739
- data,
740
- error: e instanceof Error ? e.message : String(e)
741
- });
742
- }
498
+ if (!line.startsWith("data: ") || line === "data: [DONE]") continue;
499
+ const data = line.slice(6).trim();
500
+ if (!data) continue;
501
+ try {
502
+ yield JSON.parse(data);
503
+ } catch (e) {
504
+ debug.warn("OPENAI_STREAM_FINAL_JSON_PARSE_ERROR", {
505
+ data,
506
+ error: e instanceof Error ? e.message : String(e)
507
+ });
743
508
  }
744
509
  }
745
510
  }
@@ -761,65 +526,286 @@ function createStreamProcessor(stream, signal) {
761
526
  function streamCompletion(stream, signal) {
762
527
  return createStreamProcessor(stream, signal);
763
528
  }
764
- async function callGPT5ResponsesAPI(modelProfile, request, signal) {
765
- const baseURL = modelProfile?.baseURL || "https://api.openai.com/v1";
766
- const apiKey = modelProfile?.apiKey;
767
- const proxy = getGlobalConfig().proxy ? new ProxyAgent(getGlobalConfig().proxy) : void 0;
768
- const headers = {
769
- "Content-Type": "application/json",
770
- Authorization: `Bearer ${apiKey}`
771
- };
772
- const responsesParams = request;
773
- try {
774
- const response = await fetch(`${baseURL}/responses`, {
775
- method: "POST",
776
- headers,
777
- body: JSON.stringify(responsesParams),
778
- dispatcher: proxy,
779
- signal
780
- });
781
- if (!response.ok) {
782
- const errorText = await response.text();
783
- throw new Error(
784
- `GPT-5 Responses API error: ${response.status} ${response.statusText} - ${errorText}`
785
- );
529
+
530
+ // packages/core/src/ai/openai/completion.ts
531
+ var STREAM_OPENAI_COMPATIBLE = [
532
+ "minimax",
533
+ "kimi",
534
+ "deepseek",
535
+ "siliconflow",
536
+ "qwen",
537
+ "glm",
538
+ "glm-coding",
539
+ "baidu-qianfan",
540
+ "openai",
541
+ "mistral",
542
+ "xai",
543
+ "groq",
544
+ "custom-openai"
545
+ ];
546
+ var NON_STREAM_OPENAI_COMPATIBLE = [
547
+ "minimax",
548
+ "kimi",
549
+ "deepseek",
550
+ "siliconflow",
551
+ "qwen",
552
+ "glm",
553
+ "baidu-qianfan",
554
+ "openai",
555
+ "mistral",
556
+ "xai",
557
+ "groq",
558
+ "custom-openai"
559
+ ];
560
+ function throwIfAborted(signal) {
561
+ if (signal?.aborted) throw new Error("Request cancelled by user");
562
+ }
563
+ function normalizeToolMessages(opts) {
564
+ opts.messages = opts.messages.map((msg) => {
565
+ if (msg.role !== "tool") return msg;
566
+ if (Array.isArray(msg.content)) {
567
+ return {
568
+ ...msg,
569
+ content: msg.content.map((c) => c.text || "").filter(Boolean).join("\\n\\n") || "(empty content)"
570
+ };
786
571
  }
787
- return response;
788
- } catch (error) {
789
- if (signal?.aborted) {
790
- throw new Error("Request cancelled by user");
572
+ if (typeof msg.content !== "string") {
573
+ return {
574
+ ...msg,
575
+ content: typeof msg.content === "undefined" ? "(empty content)" : JSON.stringify(msg.content)
576
+ };
791
577
  }
792
- throw error;
578
+ return msg;
579
+ });
580
+ }
581
+ function parseErrorMessage(errorData, status) {
582
+ if (typeof errorData === "object" && errorData !== null) {
583
+ const record = errorData;
584
+ const errorObj = typeof record.error === "object" && record.error !== null ? record.error : null;
585
+ const nested = errorObj?.message;
586
+ if (typeof nested === "string" && nested.trim()) return nested;
587
+ const direct = record.message;
588
+ if (typeof direct === "string" && direct.trim()) return direct;
589
+ }
590
+ return `HTTP ${status}`;
591
+ }
592
+ function endpointForProvider(provider) {
593
+ const azureApiVersion = "2024-06-01";
594
+ if (provider === "azure") {
595
+ return `/chat/completions?api-version=${azureApiVersion}`;
793
596
  }
597
+ if (provider === "minimax") {
598
+ return "/text/chatcompletion_v2";
599
+ }
600
+ return "/chat/completions";
794
601
  }
602
+ function createProxy() {
603
+ return getGlobalConfig().proxy ? new ProxyAgentCtor(getGlobalConfig().proxy) : void 0;
604
+ }
605
+ function createHeaders(provider, apiKey) {
606
+ const headers = { "Content-Type": "application/json" };
607
+ if (apiKey) {
608
+ if (provider === "azure") {
609
+ headers["api-key"] = apiKey;
610
+ } else {
611
+ headers.Authorization = `Bearer ${apiKey}`;
612
+ }
613
+ }
614
+ return headers;
615
+ }
616
+ async function fetchCompletionResponse(args) {
617
+ const isOpenAICompatible = args.stream ? STREAM_OPENAI_COMPATIBLE.includes(
618
+ args.provider
619
+ ) : NON_STREAM_OPENAI_COMPATIBLE.includes(
620
+ args.provider
621
+ );
622
+ if (isOpenAICompatible && args.provider !== "azure") {
623
+ return await tryWithEndpointFallback(
624
+ args.baseURL,
625
+ args.opts,
626
+ args.headers,
627
+ args.provider,
628
+ args.proxy,
629
+ args.signal
630
+ );
631
+ }
632
+ const response = await fetch2(`${args.baseURL}${args.endpoint}`, {
633
+ method: "POST",
634
+ headers: args.headers,
635
+ body: JSON.stringify(
636
+ args.stream ? { ...args.opts, stream: true } : args.opts
637
+ ),
638
+ dispatcher: args.proxy,
639
+ signal: args.signal
640
+ });
641
+ return { response, endpoint: args.endpoint };
642
+ }
643
+ async function getCompletionWithProfile(modelProfile, opts, attempt = 0, maxAttempts = 10, signal) {
644
+ const profile = modelProfile;
645
+ const provider = profile?.provider || "anthropic";
646
+ const providerConfig = providers[provider];
647
+ const baseURL = profile?.baseURL || providerConfig?.baseURL || "";
648
+ const apiKey = profile?.apiKey;
649
+ const proxy = createProxy();
650
+ const headers = createHeaders(provider, apiKey);
651
+ for (let currentAttempt = attempt; currentAttempt < maxAttempts; currentAttempt++) {
652
+ throwIfAborted(signal);
653
+ applyModelSpecificTransformations(opts);
654
+ await applyModelErrorFixes(opts, baseURL || "");
655
+ normalizeToolMessages(opts);
656
+ debug.api("OPENAI_API_CALL_START", {
657
+ endpoint: baseURL || "DEFAULT_OPENAI",
658
+ model: opts.model,
659
+ provider,
660
+ apiKeyConfigured: !!apiKey,
661
+ apiKeyPrefix: apiKey ? apiKey.substring(0, 8) : null,
662
+ maxTokens: opts.max_tokens,
663
+ temperature: opts.temperature,
664
+ messageCount: opts.messages?.length || 0,
665
+ streamMode: opts.stream,
666
+ timestamp: (/* @__PURE__ */ new Date()).toISOString(),
667
+ modelProfileModelName: profile?.modelName,
668
+ modelProfileName: profile?.name
669
+ });
670
+ const endpoint = endpointForProvider(provider);
671
+ try {
672
+ const wantsStream = !!opts.stream;
673
+ const { response, endpoint: usedEndpoint } = await fetchCompletionResponse({
674
+ baseURL,
675
+ endpoint,
676
+ provider,
677
+ proxy,
678
+ headers,
679
+ opts,
680
+ stream: wantsStream,
681
+ signal
682
+ });
683
+ if (!response.ok) {
684
+ throwIfAborted(signal);
685
+ try {
686
+ const errorData = await response.json();
687
+ const errorMessage = parseErrorMessage(errorData, response.status);
688
+ const fixed = await maybeFixModelError({
689
+ baseURL: baseURL || "",
690
+ opts,
691
+ errorMessage,
692
+ status: response.status
693
+ });
694
+ if (fixed) {
695
+ continue;
696
+ }
697
+ debug.warn("OPENAI_API_ERROR_UNHANDLED", {
698
+ model: opts.model,
699
+ status: response.status,
700
+ errorMessage
701
+ });
702
+ if (wantsStream) {
703
+ logAPIError({
704
+ model: opts.model,
705
+ endpoint: `${baseURL}${usedEndpoint}`,
706
+ status: response.status,
707
+ error: errorMessage,
708
+ request: opts,
709
+ response: errorData,
710
+ provider
711
+ });
712
+ }
713
+ } catch (parseError) {
714
+ debug.warn("OPENAI_API_ERROR_PARSE_FAILED", {
715
+ model: opts.model,
716
+ status: response.status,
717
+ error: parseError instanceof Error ? parseError.message : String(parseError)
718
+ });
719
+ if (wantsStream) {
720
+ logAPIError({
721
+ model: opts.model,
722
+ endpoint: `${baseURL}${usedEndpoint}`,
723
+ status: response.status,
724
+ error: `Could not parse error response: ${parseError instanceof Error ? parseError.message : String(parseError)}`,
725
+ request: opts,
726
+ response: {
727
+ parseError: parseError instanceof Error ? parseError.message : String(parseError)
728
+ },
729
+ provider
730
+ });
731
+ }
732
+ }
733
+ debug.warn("OPENAI_API_RETRY", {
734
+ model: opts.model,
735
+ status: response.status,
736
+ attempt: currentAttempt + 1,
737
+ maxAttempts,
738
+ delayMs: getRetryDelay(currentAttempt)
739
+ });
740
+ await abortableDelay(getRetryDelay(currentAttempt), signal).catch(
741
+ (err) => {
742
+ if (err instanceof Error && err.message === "Request was aborted") {
743
+ throw new Error("Request cancelled by user");
744
+ }
745
+ throw err;
746
+ }
747
+ );
748
+ continue;
749
+ }
750
+ if (wantsStream) {
751
+ const body = response.body;
752
+ if (!body) throw new Error("Stream is null or undefined");
753
+ return createStreamProcessor(body, signal);
754
+ }
755
+ return await response.json();
756
+ } catch (error) {
757
+ throwIfAborted(signal);
758
+ if (currentAttempt + 1 >= maxAttempts) {
759
+ throw error;
760
+ }
761
+ debug.warn("OPENAI_NETWORK_RETRY", {
762
+ model: opts.model,
763
+ attempt: currentAttempt + 1,
764
+ maxAttempts,
765
+ delayMs: getRetryDelay(currentAttempt),
766
+ error: error instanceof Error ? error.message : String(error)
767
+ });
768
+ await abortableDelay(getRetryDelay(currentAttempt), signal).catch((err) => {
769
+ if (err instanceof Error && err.message === "Request was aborted") {
770
+ throw new Error("Request cancelled by user");
771
+ }
772
+ throw err;
773
+ });
774
+ }
775
+ }
776
+ throw new Error("Max attempts reached");
777
+ }
778
+
779
+ // packages/core/src/ai/openai/gpt5.ts
795
780
  async function getGPT5CompletionWithProfile(modelProfile, opts, attempt = 0, maxAttempts = 10, signal) {
781
+ const profile = modelProfile;
796
782
  const features = getModelFeatures(opts.model);
797
- const isOfficialOpenAI = !modelProfile.baseURL || modelProfile.baseURL.includes("api.openai.com");
783
+ const isOfficialOpenAI = !profile?.baseURL || profile.baseURL.includes("api.openai.com");
798
784
  if (!isOfficialOpenAI) {
799
785
  debug.api("GPT5_THIRD_PARTY_PROVIDER", {
800
786
  model: opts.model,
801
- baseURL: modelProfile.baseURL,
802
- provider: modelProfile.provider,
787
+ baseURL: profile?.baseURL,
788
+ provider: profile?.provider,
803
789
  supportsResponsesAPI: features.supportsResponsesAPI,
804
790
  requestId: getCurrentRequest()?.id
805
791
  });
806
792
  debug.api("GPT5_PROVIDER_THIRD_PARTY_NOTICE", {
807
793
  model: opts.model,
808
- provider: modelProfile.provider,
809
- baseURL: modelProfile.baseURL
794
+ provider: profile?.provider,
795
+ baseURL: profile?.baseURL
810
796
  });
811
- if (modelProfile.provider === "azure") {
797
+ if (profile?.provider === "azure") {
812
798
  delete opts.reasoning_effort;
813
- } else if (modelProfile.provider === "custom-openai") {
799
+ } else if (profile?.provider === "custom-openai") {
814
800
  debug.api("GPT5_CUSTOM_PROVIDER_OPTIMIZATIONS", {
815
801
  model: opts.model,
816
- provider: modelProfile.provider
802
+ provider: profile?.provider
817
803
  });
818
804
  }
819
805
  } else if (opts.stream) {
820
806
  debug.api("GPT5_STREAMING_MODE", {
821
807
  model: opts.model,
822
- baseURL: modelProfile.baseURL || "official",
808
+ baseURL: profile?.baseURL || "official",
823
809
  reason: "responses_api_no_streaming",
824
810
  requestId: getCurrentRequest()?.id
825
811
  });
@@ -830,8 +816,8 @@ async function getGPT5CompletionWithProfile(modelProfile, opts, attempt = 0, max
830
816
  }
831
817
  debug.api("USING_CHAT_COMPLETIONS_FOR_GPT5", {
832
818
  model: opts.model,
833
- baseURL: modelProfile.baseURL || "official",
834
- provider: modelProfile.provider,
819
+ baseURL: profile?.baseURL || "official",
820
+ provider: profile?.provider,
835
821
  reason: isOfficialOpenAI ? "streaming_or_fallback" : "third_party_provider",
836
822
  requestId: getCurrentRequest()?.id
837
823
  });
@@ -843,94 +829,106 @@ async function getGPT5CompletionWithProfile(modelProfile, opts, attempt = 0, max
843
829
  signal
844
830
  );
845
831
  }
846
- async function fetchCustomModels(baseURL, apiKey) {
832
+
833
+ // packages/core/src/ai/openai/responsesApi.ts
834
+ import { ProxyAgent as ProxyAgentCtor2, fetch as fetch3 } from "undici";
835
+ async function callGPT5ResponsesAPI(modelProfile, request, signal) {
836
+ const profile = modelProfile;
837
+ const baseURL = profile?.baseURL || "https://api.openai.com/v1";
838
+ const apiKey = profile?.apiKey;
839
+ const proxy = getGlobalConfig().proxy ? new ProxyAgentCtor2(getGlobalConfig().proxy) : void 0;
840
+ const headers = {
841
+ "Content-Type": "application/json",
842
+ Authorization: `Bearer ${apiKey}`
843
+ };
847
844
  try {
848
- const hasVersionNumber = /\/v\d+/.test(baseURL);
849
- const cleanBaseURL = baseURL.replace(/\/+$/, "");
850
- const modelsURL = hasVersionNumber ? `${cleanBaseURL}/models` : `${cleanBaseURL}/v1/models`;
851
- const response = await fetch(modelsURL, {
852
- method: "GET",
853
- headers: {
854
- Authorization: `Bearer ${apiKey}`,
855
- "Content-Type": "application/json"
856
- }
845
+ const response = await fetch3(`${baseURL}/responses`, {
846
+ method: "POST",
847
+ headers,
848
+ body: JSON.stringify(request),
849
+ dispatcher: proxy,
850
+ signal
857
851
  });
858
852
  if (!response.ok) {
859
- if (response.status === 401) {
860
- throw new Error(
861
- "Invalid API key. Please check your API key and try again."
862
- );
863
- } else if (response.status === 403) {
864
- throw new Error(
865
- "API key does not have permission to access models. Please check your API key permissions."
866
- );
867
- } else if (response.status === 404) {
868
- throw new Error(
869
- "API endpoint not found. Please check if the base URL is correct and supports the /models endpoint."
870
- );
871
- } else if (response.status === 429) {
872
- throw new Error(
873
- "Too many requests. Please wait a moment and try again."
874
- );
875
- } else if (response.status >= 500) {
876
- throw new Error(
877
- "API service is temporarily unavailable. Please try again later."
878
- );
879
- } else {
880
- throw new Error(
881
- `Unable to connect to API (${response.status}). Please check your base URL, API key, and internet connection.`
882
- );
883
- }
853
+ const errorText = await response.text();
854
+ throw new Error(
855
+ `GPT-5 Responses API error: ${response.status} ${response.statusText} - ${errorText}`
856
+ );
884
857
  }
885
- const data = await response.json();
886
- const hasDataArray = (obj) => {
887
- return typeof obj === "object" && obj !== null && "data" in obj && Array.isArray(obj.data);
888
- };
889
- const hasModelsArray = (obj) => {
890
- return typeof obj === "object" && obj !== null && "models" in obj && Array.isArray(obj.models);
891
- };
892
- let models = [];
893
- if (hasDataArray(data)) {
894
- models = data.data;
895
- } else if (Array.isArray(data)) {
896
- models = data;
897
- } else if (hasModelsArray(data)) {
898
- models = data.models;
899
- } else {
858
+ return response;
859
+ } catch (error) {
860
+ if (signal?.aborted) {
861
+ throw new Error("Request cancelled by user");
862
+ }
863
+ throw error;
864
+ }
865
+ }
866
+
867
+ // packages/core/src/ai/openai/customModels.ts
868
+ import { fetch as fetch4 } from "undici";
869
+ function asRecord(value) {
870
+ if (typeof value !== "object" || value === null) return null;
871
+ return value;
872
+ }
873
+ function extractModelArray(value) {
874
+ const record = asRecord(value);
875
+ if (!record) return null;
876
+ if (Array.isArray(record.data)) return record.data;
877
+ if (Array.isArray(record.models)) return record.models;
878
+ return null;
879
+ }
880
+ async function fetchCustomModels(baseURL, apiKey) {
881
+ const hasVersionNumber = /\/v\d+/.test(baseURL);
882
+ const cleanBaseURL = baseURL.replace(/\/+$/, "");
883
+ const modelsURL = hasVersionNumber ? `${cleanBaseURL}/models` : `${cleanBaseURL}/v1/models`;
884
+ const response = await fetch4(modelsURL, {
885
+ method: "GET",
886
+ headers: {
887
+ Authorization: `Bearer ${apiKey}`,
888
+ "Content-Type": "application/json"
889
+ }
890
+ });
891
+ if (!response.ok) {
892
+ if (response.status === 401) {
900
893
  throw new Error(
901
- 'API returned unexpected response format. Expected an array of models or an object with a "data" or "models" array.'
894
+ "Invalid API key. Please check your API key and try again."
902
895
  );
903
896
  }
904
- if (!Array.isArray(models)) {
905
- throw new Error("API response format error: models data is not an array.");
897
+ if (response.status === 403) {
898
+ throw new Error(
899
+ "API key does not have permission to access models. Please check your API key permissions."
900
+ );
906
901
  }
907
- return models;
908
- } catch (error) {
909
- if (error instanceof Error && (error.message.includes("API key") || error.message.includes("API endpoint") || error.message.includes("API service") || error.message.includes("response format"))) {
910
- throw error;
902
+ if (response.status === 404) {
903
+ throw new Error(
904
+ "API endpoint not found. Please check if the base URL is correct and supports the /models endpoint."
905
+ );
911
906
  }
912
- debug.warn("CUSTOM_API_MODELS_FETCH_FAILED", {
913
- baseURL,
914
- error: error instanceof Error ? error.message : String(error)
915
- });
916
- if (error instanceof Error && error.message.includes("fetch")) {
907
+ if (response.status === 429) {
917
908
  throw new Error(
918
- "Unable to connect to the API. Please check the base URL and your internet connection."
909
+ "Rate limit exceeded. Please wait a moment and try again."
919
910
  );
920
911
  }
921
912
  throw new Error(
922
- "Failed to fetch models from custom API. Please check your configuration and try again."
913
+ `Failed to fetch models: HTTP ${response.status} ${response.statusText}`
923
914
  );
924
915
  }
916
+ const json = await response.json();
917
+ const models = extractModelArray(json);
918
+ if (!models) {
919
+ throw new Error("Invalid response format: missing models array");
920
+ }
921
+ return models;
925
922
  }
926
923
 
927
924
  export {
925
+ providers,
928
926
  getModelFeatures,
929
927
  applyModelSpecificTransformations,
930
- getCompletionWithProfile,
931
928
  createStreamProcessor,
932
929
  streamCompletion,
933
- callGPT5ResponsesAPI,
930
+ getCompletionWithProfile,
934
931
  getGPT5CompletionWithProfile,
932
+ callGPT5ResponsesAPI,
935
933
  fetchCustomModels
936
934
  };