piclaw 0.0.19 → 0.0.21

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (270) hide show
  1. package/.output/nitro.json +1 -1
  2. package/.output/public/assets/defult-D5RLDUrI.js +1 -0
  3. package/.output/public/assets/{dist-CMBqBOCp.js → dist-BH_oa-kv.js} +1 -1
  4. package/.output/public/assets/index-7JvURuHy.js +204 -0
  5. package/.output/public/assets/index-K43slwjJ.css +1 -0
  6. package/.output/public/index.html +11 -2
  7. package/.output/server/_...path_.get.mjs +16 -0
  8. package/.output/server/_chunks/app.mjs +261 -181
  9. package/.output/server/_chunks/browser.mjs +4 -1
  10. package/.output/server/_chunks/config.mjs +4 -0
  11. package/.output/server/_chunks/db.mjs +32 -28
  12. package/.output/server/_chunks/device-bus.mjs +123 -0
  13. package/.output/server/_chunks/dummy.mjs +1 -1
  14. package/.output/server/_chunks/logger.mjs +23 -0
  15. package/.output/server/_chunks/login.mjs +1 -1
  16. package/.output/server/_chunks/notes.mjs +1 -3
  17. package/.output/server/_chunks/renderer-template.mjs +1 -1
  18. package/.output/server/_chunks/sandbox.mjs +217 -0
  19. package/.output/server/_chunks/server.mjs +2302 -122
  20. package/.output/server/_chunks/terminal.mjs +63 -8
  21. package/.output/server/_chunks/uploads.mjs +60 -0
  22. package/.output/server/_chunks/virtual.mjs +192 -54
  23. package/.output/server/_id_.delete.mjs +5 -2
  24. package/.output/server/_id_.patch.mjs +2 -0
  25. package/.output/server/_id_2.delete.mjs +8 -0
  26. package/.output/server/_jid_.delete.mjs +5 -2
  27. package/.output/server/_jid_.patch.mjs +37 -4
  28. package/.output/server/_jid_2.delete.mjs +5 -2
  29. package/.output/server/_libs/@acemir/cssom+[...].mjs +2269 -1137
  30. package/.output/server/_libs/@google/genai.mjs +337 -273
  31. package/.output/server/_libs/@mariozechner/pi-agent-core+[...].mjs +381 -2073
  32. package/.output/server/_libs/@mariozechner/pi-coding-agent+[...].mjs +231 -131
  33. package/.output/server/_libs/_.mjs +3 -2
  34. package/.output/server/_libs/_10.mjs +2 -4
  35. package/.output/server/_libs/_11.mjs +2 -4
  36. package/.output/server/_libs/_12.mjs +2 -3
  37. package/.output/server/_libs/_13.mjs +2 -3
  38. package/.output/server/_libs/_14.mjs +2 -4
  39. package/.output/server/_libs/_15.mjs +2 -4
  40. package/.output/server/_libs/_16.mjs +2 -3
  41. package/.output/server/_libs/_17.mjs +2 -4
  42. package/.output/server/_libs/_18.mjs +2 -2
  43. package/.output/server/_libs/_19.mjs +2 -2
  44. package/.output/server/_libs/_2.mjs +3 -3
  45. package/.output/server/_libs/_20.mjs +2 -2
  46. package/.output/server/_libs/_21.mjs +2 -2
  47. package/.output/server/_libs/_22.mjs +2 -2
  48. package/.output/server/_libs/_23.mjs +2 -2
  49. package/.output/server/_libs/_24.mjs +2 -2
  50. package/.output/server/_libs/_25.mjs +2 -2
  51. package/.output/server/_libs/_26.mjs +2 -2
  52. package/.output/server/_libs/_27.mjs +2 -2
  53. package/.output/server/_libs/_28.mjs +2 -2
  54. package/.output/server/_libs/_29.mjs +2 -2
  55. package/.output/server/_libs/_3.mjs +3 -3
  56. package/.output/server/_libs/_30.mjs +2 -2
  57. package/.output/server/_libs/_31.mjs +2 -2
  58. package/.output/server/_libs/_32.mjs +2 -2
  59. package/.output/server/_libs/_33.mjs +2 -2
  60. package/.output/server/_libs/_34.mjs +2 -2
  61. package/.output/server/_libs/_35.mjs +2 -2
  62. package/.output/server/_libs/_36.mjs +2 -2
  63. package/.output/server/_libs/_37.mjs +2 -2
  64. package/.output/server/_libs/_38.mjs +2 -2
  65. package/.output/server/_libs/_39.mjs +2 -2
  66. package/.output/server/_libs/_4.mjs +4 -3
  67. package/.output/server/_libs/_40.mjs +2 -2
  68. package/.output/server/_libs/_41.mjs +2 -2
  69. package/.output/server/_libs/_42.mjs +2 -2
  70. package/.output/server/_libs/_43.mjs +2 -2
  71. package/.output/server/_libs/_44.mjs +2 -2
  72. package/.output/server/_libs/_45.mjs +2 -2
  73. package/.output/server/_libs/_46.mjs +2 -2
  74. package/.output/server/_libs/_47.mjs +2 -2
  75. package/.output/server/_libs/_48.mjs +2 -2
  76. package/.output/server/_libs/_49.mjs +2 -2
  77. package/.output/server/_libs/_5.mjs +2 -3
  78. package/.output/server/_libs/_50.mjs +2 -2
  79. package/.output/server/_libs/_51.mjs +2 -2
  80. package/.output/server/_libs/_52.mjs +2 -2
  81. package/.output/server/_libs/_53.mjs +2 -2
  82. package/.output/server/_libs/_54.mjs +2 -2
  83. package/.output/server/_libs/_55.mjs +2 -2
  84. package/.output/server/_libs/_56.mjs +2 -2
  85. package/.output/server/_libs/_57.mjs +2 -2
  86. package/.output/server/_libs/_58.mjs +2 -2
  87. package/.output/server/_libs/_59.mjs +2 -2
  88. package/.output/server/_libs/_6.mjs +2 -3
  89. package/.output/server/_libs/_60.mjs +2 -2
  90. package/.output/server/_libs/_61.mjs +2 -2
  91. package/.output/server/_libs/_62.mjs +2 -2
  92. package/.output/server/_libs/_63.mjs +2 -2
  93. package/.output/server/_libs/_64.mjs +2 -2
  94. package/.output/server/_libs/_65.mjs +2 -2
  95. package/.output/server/_libs/_66.mjs +2 -2
  96. package/.output/server/_libs/_67.mjs +2 -2
  97. package/.output/server/_libs/_68.mjs +2 -2
  98. package/.output/server/_libs/_69.mjs +2 -2
  99. package/.output/server/_libs/_7.mjs +2 -5
  100. package/.output/server/_libs/_70.mjs +2 -2
  101. package/.output/server/_libs/_71.mjs +2 -2
  102. package/.output/server/_libs/_72.mjs +2 -2
  103. package/.output/server/_libs/_73.mjs +2 -2
  104. package/.output/server/_libs/_74.mjs +2 -2
  105. package/.output/server/_libs/_75.mjs +2 -2
  106. package/.output/server/_libs/_76.mjs +2 -2
  107. package/.output/server/_libs/_77.mjs +2 -2
  108. package/.output/server/_libs/_78.mjs +2 -2
  109. package/.output/server/_libs/_79.mjs +2 -2
  110. package/.output/server/_libs/_8.mjs +2 -3
  111. package/.output/server/_libs/_80.mjs +2 -2
  112. package/.output/server/_libs/_81.mjs +2 -2
  113. package/.output/server/_libs/_82.mjs +2 -2
  114. package/.output/server/_libs/_83.mjs +2 -2
  115. package/.output/server/_libs/_84.mjs +2 -2
  116. package/.output/server/_libs/_85.mjs +2 -2
  117. package/.output/server/_libs/_86.mjs +2 -2
  118. package/.output/server/_libs/_87.mjs +2 -2
  119. package/.output/server/_libs/_88.mjs +2 -2
  120. package/.output/server/_libs/_89.mjs +2 -2
  121. package/.output/server/_libs/_9.mjs +2 -4
  122. package/.output/server/_libs/_90.mjs +5 -2
  123. package/.output/server/_libs/_91.mjs +3 -2
  124. package/.output/server/_libs/_92.mjs +2 -2
  125. package/.output/server/_libs/_93.mjs +2 -2
  126. package/.output/server/_libs/_94.mjs +2 -2
  127. package/.output/server/_libs/agent-base.mjs +1 -1
  128. package/.output/server/_libs/cheerio+[...].mjs +1 -1
  129. package/.output/server/_libs/data-uri-to-buffer.mjs +2 -67
  130. package/.output/server/_libs/data-urls+[...].mjs +1 -1
  131. package/.output/server/_libs/diff.mjs +1 -1
  132. package/.output/server/_libs/exodus__bytes.mjs +99 -81
  133. package/.output/server/_libs/fetch-blob+node-domexception.mjs +1 -1
  134. package/.output/server/_libs/h3+rou3+srvx.mjs +34 -4
  135. package/.output/server/_libs/html-encoding-sniffer.mjs +1 -1
  136. package/.output/server/_libs/https-proxy-agent.mjs +2 -2
  137. package/.output/server/_libs/jsdom.mjs +1 -1
  138. package/.output/server/_libs/just-bash+[...].mjs +4676 -3916
  139. package/.output/server/_libs/mariozechner__jiti.mjs +1 -1
  140. package/.output/server/_libs/mariozechner__pi-ai.mjs +1472 -0
  141. package/.output/server/_libs/md4x.mjs +1 -1
  142. package/.output/server/_libs/mime.mjs +838 -1
  143. package/.output/server/_libs/node-fetch.mjs +4 -4
  144. package/.output/server/_libs/node-liblzma.mjs +1 -1
  145. package/.output/server/_libs/silvia-odwyer__photon-node.mjs +1 -1
  146. package/.output/server/_routes/api/auth/approve.mjs +2 -0
  147. package/.output/server/_routes/api/auth/revoke.mjs +2 -0
  148. package/.output/server/_routes/api/auth/status.mjs +25 -6
  149. package/.output/server/_routes/api/browser2.mjs +1 -1
  150. package/.output/server/_routes/api/config2.mjs +2 -0
  151. package/.output/server/_routes/api/device_events.mjs +36 -0
  152. package/.output/server/_routes/api/files/groups.mjs +1 -2
  153. package/.output/server/_routes/api/files/raw.mjs +1 -1
  154. package/.output/server/_routes/api/groups.mjs +5 -3
  155. package/.output/server/_routes/api/groups2.mjs +18 -6
  156. package/.output/server/_routes/api/health.mjs +1 -2
  157. package/.output/server/_routes/api/messages.mjs +7 -1
  158. package/.output/server/_routes/api/notes/delete.mjs +4 -1
  159. package/.output/server/_routes/api/notes/write.mjs +2 -0
  160. package/.output/server/_routes/api/ntfy/setup.mjs +8 -0
  161. package/.output/server/_routes/api/pi/apikey.mjs +3 -2
  162. package/.output/server/_routes/api/pi/apikey_providers.mjs +1 -2
  163. package/.output/server/_routes/api/pi/commands.mjs +13 -3
  164. package/.output/server/_routes/api/pi/login/events.mjs +0 -1
  165. package/.output/server/_routes/api/pi/login/respond.mjs +2 -1
  166. package/.output/server/_routes/api/pi/login.mjs +1 -2
  167. package/.output/server/_routes/api/pi/logout.mjs +2 -1
  168. package/.output/server/_routes/api/pi/models.mjs +1 -2
  169. package/.output/server/_routes/api/pi/models_config2.mjs +2 -0
  170. package/.output/server/_routes/api/pi/settings2.mjs +2 -0
  171. package/.output/server/_routes/api/pi/status.mjs +1 -2
  172. package/.output/server/_routes/api/proxy.mjs +19 -1
  173. package/.output/server/_routes/api/sandbox.mjs +26 -0
  174. package/.output/server/_routes/api/sandbox2.mjs +17 -0
  175. package/.output/server/_routes/api/send.mjs +26 -18
  176. package/.output/server/_routes/api/status.mjs +1 -3
  177. package/.output/server/_routes/api/stop.mjs +11 -0
  178. package/.output/server/_routes/api/store/plugins.mjs +75 -0
  179. package/.output/server/_routes/api/store/skills.mjs +11 -0
  180. package/.output/server/_routes/api/tasks2.mjs +3 -2
  181. package/.output/server/_routes/api/telegram/setup.mjs +5 -2
  182. package/.output/server/_routes/api/telegram/status.mjs +1 -2
  183. package/.output/server/_routes/api/terminal2.mjs +2 -1
  184. package/.output/server/_routes/api/tunnel/setup.mjs +4 -2
  185. package/.output/server/_runtime.mjs +1 -2
  186. package/.output/server/_utils.mjs +10 -2
  187. package/.output/server/index.mjs +1 -1
  188. package/.output/server/node_modules/amdefine/amdefine.js +301 -0
  189. package/.output/server/node_modules/amdefine/package.json +16 -0
  190. package/.output/server/node_modules/compressjs/lib/BWT.js +420 -0
  191. package/.output/server/node_modules/compressjs/lib/BWTC.js +234 -0
  192. package/.output/server/node_modules/compressjs/lib/BitStream.js +108 -0
  193. package/.output/server/node_modules/compressjs/lib/Bzip2.js +936 -0
  194. package/.output/server/node_modules/compressjs/lib/CRC32.js +105 -0
  195. package/.output/server/node_modules/compressjs/lib/Context1Model.js +56 -0
  196. package/.output/server/node_modules/compressjs/lib/DefSumModel.js +152 -0
  197. package/.output/server/node_modules/compressjs/lib/DeflateDistanceModel.js +55 -0
  198. package/.output/server/node_modules/compressjs/lib/Dmc.js +197 -0
  199. package/.output/server/node_modules/compressjs/lib/DummyRangeCoder.js +81 -0
  200. package/.output/server/node_modules/compressjs/lib/FenwickModel.js +194 -0
  201. package/.output/server/node_modules/compressjs/lib/Huffman.js +514 -0
  202. package/.output/server/node_modules/compressjs/lib/HuffmanAllocator.js +227 -0
  203. package/.output/server/node_modules/compressjs/lib/LogDistanceModel.js +46 -0
  204. package/.output/server/node_modules/compressjs/lib/Lzjb.js +300 -0
  205. package/.output/server/node_modules/compressjs/lib/LzjbR.js +241 -0
  206. package/.output/server/node_modules/compressjs/lib/Lzp3.js +273 -0
  207. package/.output/server/node_modules/compressjs/lib/MTFModel.js +208 -0
  208. package/.output/server/node_modules/compressjs/lib/NoModel.js +46 -0
  209. package/.output/server/node_modules/compressjs/lib/PPM.js +343 -0
  210. package/.output/server/node_modules/compressjs/lib/RangeCoder.js +238 -0
  211. package/.output/server/node_modules/compressjs/lib/Simple.js +111 -0
  212. package/.output/server/node_modules/compressjs/lib/Stream.js +53 -0
  213. package/.output/server/node_modules/compressjs/lib/Util.js +324 -0
  214. package/.output/server/node_modules/compressjs/lib/freeze.js +14 -0
  215. package/.output/server/node_modules/compressjs/main.js +29 -0
  216. package/.output/server/node_modules/compressjs/package.json +35 -0
  217. package/.output/server/package.json +2 -1
  218. package/README.md +10 -1
  219. package/lib/index.d.mts +1 -0
  220. package/lib/index.mjs +1 -0
  221. package/lib/piclaw.mjs +100 -0
  222. package/lib/utils.mjs +96 -0
  223. package/package.json +16 -11
  224. package/.output/public/assets/defult-CMO6TZ5a.js +0 -1
  225. package/.output/public/assets/index-jdnbJw-M.js +0 -204
  226. package/.output/public/assets/index-ooXrRwgl.css +0 -1
  227. package/.output/server/_chunks/commands.mjs +0 -282
  228. package/.output/server/_chunks/pi.mjs +0 -202
  229. package/.output/server/_chunks/session.mjs +0 -1114
  230. package/.output/server/_libs/@aws-crypto/crc32+[...].mjs +0 -299
  231. package/.output/server/_libs/@aws-sdk/client-bedrock-runtime+[...].mjs +0 -17828
  232. package/.output/server/_libs/@aws-sdk/credential-provider-http+[...].mjs +0 -122
  233. package/.output/server/_libs/@aws-sdk/credential-provider-ini+[...].mjs +0 -417
  234. package/.output/server/_libs/@aws-sdk/credential-provider-process+[...].mjs +0 -54
  235. package/.output/server/_libs/@aws-sdk/credential-provider-sso+[...].mjs +0 -1151
  236. package/.output/server/_libs/@aws-sdk/credential-provider-web-identity+[...].mjs +0 -50
  237. package/.output/server/_libs/@smithy/credential-provider-imds+[...].mjs +0 -369
  238. package/.output/server/_libs/@tootallnate/quickjs-emscripten+[...].mjs +0 -3011
  239. package/.output/server/_libs/_100.mjs +0 -2
  240. package/.output/server/_libs/_101.mjs +0 -2
  241. package/.output/server/_libs/_102.mjs +0 -5
  242. package/.output/server/_libs/_103.mjs +0 -3
  243. package/.output/server/_libs/_104.mjs +0 -2
  244. package/.output/server/_libs/_105.mjs +0 -3
  245. package/.output/server/_libs/_106.mjs +0 -2
  246. package/.output/server/_libs/_107.mjs +0 -2
  247. package/.output/server/_libs/_95.mjs +0 -2
  248. package/.output/server/_libs/_96.mjs +0 -2
  249. package/.output/server/_libs/_97.mjs +0 -2
  250. package/.output/server/_libs/_98.mjs +0 -2
  251. package/.output/server/_libs/_99.mjs +0 -2
  252. package/.output/server/_libs/amdefine.mjs +0 -188
  253. package/.output/server/_libs/ast-types.mjs +0 -2270
  254. package/.output/server/_libs/aws-sdk__nested-clients.mjs +0 -3141
  255. package/.output/server/_libs/basic-ftp.mjs +0 -1906
  256. package/.output/server/_libs/compressjs.mjs +0 -50
  257. package/.output/server/_libs/degenerator+[...].mjs +0 -9964
  258. package/.output/server/_libs/get-uri.mjs +0 -413
  259. package/.output/server/_libs/http-proxy-agent.mjs +0 -123
  260. package/.output/server/_libs/ip-address.mjs +0 -1423
  261. package/.output/server/_libs/lru-cache.mjs +0 -732
  262. package/.output/server/_libs/netmask.mjs +0 -139
  263. package/.output/server/_libs/pac-proxy-agent+[...].mjs +0 -3104
  264. package/.output/server/_libs/proxy-agent+proxy-from-env.mjs +0 -204
  265. package/.output/server/_libs/smithy__core.mjs +0 -192
  266. package/.output/server/node_modules/tslib/modules/index.js +0 -70
  267. package/.output/server/node_modules/tslib/modules/package.json +0 -3
  268. package/.output/server/node_modules/tslib/package.json +0 -47
  269. package/.output/server/node_modules/tslib/tslib.js +0 -484
  270. package/bin/piclaw.mjs +0 -195
@@ -1,5 +1,4 @@
1
- import { n as __esmMin, o as __toCommonJS, r as __exportAll, s as __toESM, t as __commonJSMin } from "../../_runtime.mjs";
2
- import { a as StopReason, c as BedrockRuntimeClient, i as ImageFormat, n as CacheTTL, o as ToolResultStatus, r as ConversationRole, s as ConverseStreamCommand, t as CachePointType } from "../@aws-sdk/client-bedrock-runtime+[...].mjs";
1
+ import { a as __toCommonJS, n as __esmMin, o as __toESM, r as __exportAll, t as __commonJSMin } from "../../_runtime.mjs";
3
2
  import { t as Anthropic } from "../anthropic-ai__sdk.mjs";
4
3
  import { i as ThinkingLevel, n as FunctionCallingConfigMode, r as GoogleGenAI, t as FinishReason } from "../@google/genai.mjs";
5
4
  var value_exports = /* @__PURE__ */ __exportAll({
@@ -3321,14 +3320,18 @@ function clearApiProviders() {
3321
3320
  var _existsSync = null;
3322
3321
  var _homedir = null;
3323
3322
  var _join = null;
3323
+ var dynamicImport$2 = (specifier) => import(specifier);
3324
+ var NODE_FS_SPECIFIER = "node:fs";
3325
+ var NODE_OS_SPECIFIER$1 = "node:os";
3326
+ var NODE_PATH_SPECIFIER = "node:path";
3324
3327
  if (typeof process !== "undefined" && (process.versions?.node || process.versions?.bun)) {
3325
- import("node:fs").then((m) => {
3328
+ dynamicImport$2(NODE_FS_SPECIFIER).then((m) => {
3326
3329
  _existsSync = m.existsSync;
3327
3330
  });
3328
- import("node:os").then((m) => {
3331
+ dynamicImport$2(NODE_OS_SPECIFIER$1).then((m) => {
3329
3332
  _homedir = m.homedir;
3330
3333
  });
3331
- import("node:path").then((m) => {
3334
+ dynamicImport$2(NODE_PATH_SPECIFIER).then((m) => {
3332
3335
  _join = m.join;
3333
3336
  });
3334
3337
  }
@@ -3372,6 +3375,7 @@ function getEnvApiKey(provider) {
3372
3375
  "minimax-cn": "MINIMAX_CN_API_KEY",
3373
3376
  huggingface: "HF_TOKEN",
3374
3377
  opencode: "OPENCODE_API_KEY",
3378
+ "opencode-go": "OPENCODE_API_KEY",
3375
3379
  "kimi-coding": "KIMI_API_KEY"
3376
3380
  }[provider];
3377
3381
  return envVar ? process.env[envVar] : void 0;
@@ -6677,6 +6681,23 @@ const MODELS = {
6677
6681
  contextWindow: 1e6,
6678
6682
  maxTokens: 64e3
6679
6683
  },
6684
+ "gemini-3.1-flash-lite-preview": {
6685
+ id: "gemini-3.1-flash-lite-preview",
6686
+ name: "Gemini 3.1 Flash Lite Preview",
6687
+ api: "google-generative-ai",
6688
+ provider: "google",
6689
+ baseUrl: "https://generativelanguage.googleapis.com/v1beta",
6690
+ reasoning: true,
6691
+ input: ["text", "image"],
6692
+ cost: {
6693
+ input: 0,
6694
+ output: 0,
6695
+ cacheRead: 0,
6696
+ cacheWrite: 0
6697
+ },
6698
+ contextWindow: 1048576,
6699
+ maxTokens: 65536
6700
+ },
6680
6701
  "gemini-3.1-pro-preview": {
6681
6702
  id: "gemini-3.1-pro-preview",
6682
6703
  name: "Gemini 3.1 Pro Preview",
@@ -6866,9 +6887,9 @@ const MODELS = {
6866
6887
  contextWindow: 1048576,
6867
6888
  maxTokens: 65535
6868
6889
  },
6869
- "gemini-3-pro-high": {
6870
- id: "gemini-3-pro-high",
6871
- name: "Gemini 3 Pro High (Antigravity)",
6890
+ "gemini-3.1-pro-high": {
6891
+ id: "gemini-3.1-pro-high",
6892
+ name: "Gemini 3.1 Pro High (Antigravity)",
6872
6893
  api: "google-gemini-cli",
6873
6894
  provider: "google-antigravity",
6874
6895
  baseUrl: "https://daily-cloudcode-pa.sandbox.googleapis.com",
@@ -6883,9 +6904,9 @@ const MODELS = {
6883
6904
  contextWindow: 1048576,
6884
6905
  maxTokens: 65535
6885
6906
  },
6886
- "gemini-3-pro-low": {
6887
- id: "gemini-3-pro-low",
6888
- name: "Gemini 3 Pro Low (Antigravity)",
6907
+ "gemini-3.1-pro-low": {
6908
+ id: "gemini-3.1-pro-low",
6909
+ name: "Gemini 3.1 Pro Low (Antigravity)",
6889
6910
  api: "google-gemini-cli",
6890
6911
  provider: "google-antigravity",
6891
6912
  baseUrl: "https://daily-cloudcode-pa.sandbox.googleapis.com",
@@ -9661,6 +9682,59 @@ const MODELS = {
9661
9682
  maxTokens: 131072
9662
9683
  }
9663
9684
  },
9685
+ "opencode-go": {
9686
+ "glm-5": {
9687
+ id: "glm-5",
9688
+ name: "GLM-5",
9689
+ api: "openai-completions",
9690
+ provider: "opencode-go",
9691
+ baseUrl: "https://opencode.ai/zen/go/v1",
9692
+ reasoning: true,
9693
+ input: ["text"],
9694
+ cost: {
9695
+ input: 1,
9696
+ output: 3.2,
9697
+ cacheRead: .2,
9698
+ cacheWrite: 0
9699
+ },
9700
+ contextWindow: 204800,
9701
+ maxTokens: 131072
9702
+ },
9703
+ "kimi-k2.5": {
9704
+ id: "kimi-k2.5",
9705
+ name: "Kimi K2.5",
9706
+ api: "openai-completions",
9707
+ provider: "opencode-go",
9708
+ baseUrl: "https://opencode.ai/zen/go/v1",
9709
+ reasoning: true,
9710
+ input: ["text", "image"],
9711
+ cost: {
9712
+ input: .6,
9713
+ output: 3,
9714
+ cacheRead: .1,
9715
+ cacheWrite: 0
9716
+ },
9717
+ contextWindow: 262144,
9718
+ maxTokens: 65536
9719
+ },
9720
+ "minimax-m2.5": {
9721
+ id: "minimax-m2.5",
9722
+ name: "MiniMax M2.5",
9723
+ api: "anthropic-messages",
9724
+ provider: "opencode-go",
9725
+ baseUrl: "https://opencode.ai/zen/go",
9726
+ reasoning: true,
9727
+ input: ["text"],
9728
+ cost: {
9729
+ input: .3,
9730
+ output: 1.2,
9731
+ cacheRead: .03,
9732
+ cacheWrite: 0
9733
+ },
9734
+ contextWindow: 204800,
9735
+ maxTokens: 131072
9736
+ }
9737
+ },
9664
9738
  "openrouter": {
9665
9739
  "ai21/jamba-large-1.7": {
9666
9740
  id: "ai21/jamba-large-1.7",
@@ -10036,6 +10110,23 @@ const MODELS = {
10036
10110
  contextWindow: 131e3,
10037
10111
  maxTokens: 4096
10038
10112
  },
10113
+ "arcee-ai/trinity-mini": {
10114
+ id: "arcee-ai/trinity-mini",
10115
+ name: "Arcee AI: Trinity Mini",
10116
+ api: "openai-completions",
10117
+ provider: "openrouter",
10118
+ baseUrl: "https://openrouter.ai/api/v1",
10119
+ reasoning: true,
10120
+ input: ["text"],
10121
+ cost: {
10122
+ input: .045,
10123
+ output: .15,
10124
+ cacheRead: 0,
10125
+ cacheWrite: 0
10126
+ },
10127
+ contextWindow: 131072,
10128
+ maxTokens: 131072
10129
+ },
10039
10130
  "arcee-ai/trinity-mini:free": {
10040
10131
  id: "arcee-ai/trinity-mini:free",
10041
10132
  name: "Arcee AI: Trinity Mini (free)",
@@ -10546,6 +10637,23 @@ const MODELS = {
10546
10637
  contextWindow: 1048576,
10547
10638
  maxTokens: 65536
10548
10639
  },
10640
+ "google/gemini-3.1-flash-lite-preview": {
10641
+ id: "google/gemini-3.1-flash-lite-preview",
10642
+ name: "Google: Gemini 3.1 Flash Lite Preview",
10643
+ api: "openai-completions",
10644
+ provider: "openrouter",
10645
+ baseUrl: "https://openrouter.ai/api/v1",
10646
+ reasoning: true,
10647
+ input: ["text", "image"],
10648
+ cost: {
10649
+ input: .25,
10650
+ output: 1.5,
10651
+ cacheRead: .024999999999999998,
10652
+ cacheWrite: .08333333333333334
10653
+ },
10654
+ contextWindow: 1048576,
10655
+ maxTokens: 65536
10656
+ },
10549
10657
  "google/gemini-3.1-pro-preview": {
10550
10658
  id: "google/gemini-3.1-pro-preview",
10551
10659
  name: "Google: Gemini 3.1 Pro Preview",
@@ -10624,8 +10732,8 @@ const MODELS = {
10624
10732
  input: ["text"],
10625
10733
  cost: {
10626
10734
  input: .25,
10627
- output: 1,
10628
- cacheRead: 0,
10735
+ output: .75,
10736
+ cacheRead: .024999999999999998,
10629
10737
  cacheWrite: 0
10630
10738
  },
10631
10739
  contextWindow: 128e3,
@@ -10641,8 +10749,8 @@ const MODELS = {
10641
10749
  input: ["text"],
10642
10750
  cost: {
10643
10751
  input: .25,
10644
- output: 1,
10645
- cacheRead: 0,
10752
+ output: .75,
10753
+ cacheRead: .024999999999999998,
10646
10754
  cacheWrite: 0
10647
10755
  },
10648
10756
  contextWindow: 128e3,
@@ -12093,6 +12201,23 @@ const MODELS = {
12093
12201
  contextWindow: 4e5,
12094
12202
  maxTokens: 128e3
12095
12203
  },
12204
+ "openai/gpt-5.3-chat": {
12205
+ id: "openai/gpt-5.3-chat",
12206
+ name: "OpenAI: GPT-5.3 Chat",
12207
+ api: "openai-completions",
12208
+ provider: "openrouter",
12209
+ baseUrl: "https://openrouter.ai/api/v1",
12210
+ reasoning: false,
12211
+ input: ["text", "image"],
12212
+ cost: {
12213
+ input: 1.75,
12214
+ output: 14,
12215
+ cacheRead: .175,
12216
+ cacheWrite: 0
12217
+ },
12218
+ contextWindow: 128e3,
12219
+ maxTokens: 16384
12220
+ },
12096
12221
  "openai/gpt-5.3-codex": {
12097
12222
  id: "openai/gpt-5.3-codex",
12098
12223
  name: "OpenAI: GPT-5.3-Codex",
@@ -12459,9 +12584,9 @@ const MODELS = {
12459
12584
  reasoning: false,
12460
12585
  input: ["text"],
12461
12586
  cost: {
12462
- input: 1.5999999999999999,
12463
- output: 6.3999999999999995,
12464
- cacheRead: .32,
12587
+ input: 1.04,
12588
+ output: 4.16,
12589
+ cacheRead: .20800000000000002,
12465
12590
  cacheWrite: 0
12466
12591
  },
12467
12592
  contextWindow: 32768,
@@ -12493,8 +12618,8 @@ const MODELS = {
12493
12618
  reasoning: false,
12494
12619
  input: ["text"],
12495
12620
  cost: {
12496
- input: .39999999999999997,
12497
- output: 1.2,
12621
+ input: .26,
12622
+ output: .78,
12498
12623
  cacheRead: 0,
12499
12624
  cacheWrite: 0
12500
12625
  },
@@ -12510,8 +12635,8 @@ const MODELS = {
12510
12635
  reasoning: true,
12511
12636
  input: ["text"],
12512
12637
  cost: {
12513
- input: .39999999999999997,
12514
- output: 1.2,
12638
+ input: .26,
12639
+ output: .78,
12515
12640
  cacheRead: 0,
12516
12641
  cacheWrite: 0
12517
12642
  },
@@ -12527,9 +12652,9 @@ const MODELS = {
12527
12652
  reasoning: false,
12528
12653
  input: ["text"],
12529
12654
  cost: {
12530
- input: .049999999999999996,
12531
- output: .19999999999999998,
12532
- cacheRead: .01,
12655
+ input: .0325,
12656
+ output: .13,
12657
+ cacheRead: .006500000000000001,
12533
12658
  cacheWrite: 0
12534
12659
  },
12535
12660
  contextWindow: 131072,
@@ -12765,9 +12890,9 @@ const MODELS = {
12765
12890
  reasoning: false,
12766
12891
  input: ["text"],
12767
12892
  cost: {
12768
- input: .3,
12769
- output: 1.5,
12770
- cacheRead: .06,
12893
+ input: .195,
12894
+ output: .975,
12895
+ cacheRead: .039,
12771
12896
  cacheWrite: 0
12772
12897
  },
12773
12898
  contextWindow: 1e6,
@@ -12799,9 +12924,9 @@ const MODELS = {
12799
12924
  reasoning: false,
12800
12925
  input: ["text"],
12801
12926
  cost: {
12802
- input: 1,
12803
- output: 5,
12804
- cacheRead: .19999999999999998,
12927
+ input: .65,
12928
+ output: 3.25,
12929
+ cacheRead: .13,
12805
12930
  cacheWrite: 0
12806
12931
  },
12807
12932
  contextWindow: 1e6,
@@ -12867,8 +12992,8 @@ const MODELS = {
12867
12992
  reasoning: true,
12868
12993
  input: ["text"],
12869
12994
  cost: {
12870
- input: 1.2,
12871
- output: 6,
12995
+ input: .78,
12996
+ output: 3.9,
12872
12997
  cacheRead: 0,
12873
12998
  cacheWrite: 0
12874
12999
  },
@@ -13054,8 +13179,8 @@ const MODELS = {
13054
13179
  reasoning: true,
13055
13180
  input: ["text", "image"],
13056
13181
  cost: {
13057
- input: .39999999999999997,
13058
- output: 3.1999999999999997,
13182
+ input: .26,
13183
+ output: 2.08,
13059
13184
  cacheRead: 0,
13060
13185
  cacheWrite: 0
13061
13186
  },
@@ -13071,8 +13196,8 @@ const MODELS = {
13071
13196
  reasoning: true,
13072
13197
  input: ["text", "image"],
13073
13198
  cost: {
13074
- input: .3,
13075
- output: 2.4,
13199
+ input: .195,
13200
+ output: 1.56,
13076
13201
  cacheRead: 0,
13077
13202
  cacheWrite: 0
13078
13203
  },
@@ -13088,13 +13213,13 @@ const MODELS = {
13088
13213
  reasoning: true,
13089
13214
  input: ["text", "image"],
13090
13215
  cost: {
13091
- input: .25,
13092
- output: 1,
13216
+ input: .1625,
13217
+ output: 1.3,
13093
13218
  cacheRead: 0,
13094
13219
  cacheWrite: 0
13095
13220
  },
13096
13221
  contextWindow: 262144,
13097
- maxTokens: 262144
13222
+ maxTokens: 65536
13098
13223
  },
13099
13224
  "qwen/qwen3.5-397b-a17b": {
13100
13225
  id: "qwen/qwen3.5-397b-a17b",
@@ -13105,9 +13230,9 @@ const MODELS = {
13105
13230
  reasoning: true,
13106
13231
  input: ["text", "image"],
13107
13232
  cost: {
13108
- input: .55,
13109
- output: 3.5,
13110
- cacheRead: .55,
13233
+ input: .39,
13234
+ output: 2.34,
13235
+ cacheRead: 0,
13111
13236
  cacheWrite: 0
13112
13237
  },
13113
13238
  contextWindow: 262144,
@@ -13139,8 +13264,8 @@ const MODELS = {
13139
13264
  reasoning: true,
13140
13265
  input: ["text", "image"],
13141
13266
  cost: {
13142
- input: .39999999999999997,
13143
- output: 2.4,
13267
+ input: .26,
13268
+ output: 1.56,
13144
13269
  cacheRead: 0,
13145
13270
  cacheWrite: 0
13146
13271
  },
@@ -13300,18 +13425,18 @@ const MODELS = {
13300
13425
  contextWindow: 163840,
13301
13426
  maxTokens: 163840
13302
13427
  },
13303
- "upstage/solar-pro-3:free": {
13304
- id: "upstage/solar-pro-3:free",
13305
- name: "Upstage: Solar Pro 3 (free)",
13428
+ "upstage/solar-pro-3": {
13429
+ id: "upstage/solar-pro-3",
13430
+ name: "Upstage: Solar Pro 3",
13306
13431
  api: "openai-completions",
13307
13432
  provider: "openrouter",
13308
13433
  baseUrl: "https://openrouter.ai/api/v1",
13309
13434
  reasoning: true,
13310
13435
  input: ["text"],
13311
13436
  cost: {
13312
- input: 0,
13313
- output: 0,
13314
- cacheRead: 0,
13437
+ input: .15,
13438
+ output: .6,
13439
+ cacheRead: .015,
13315
13440
  cacheWrite: 0
13316
13441
  },
13317
13442
  contextWindow: 128e3,
@@ -13496,13 +13621,13 @@ const MODELS = {
13496
13621
  reasoning: true,
13497
13622
  input: ["text"],
13498
13623
  cost: {
13499
- input: .55,
13500
- output: 2,
13501
- cacheRead: 0,
13624
+ input: .6,
13625
+ output: 2.2,
13626
+ cacheRead: .11,
13502
13627
  cacheWrite: 0
13503
13628
  },
13504
- contextWindow: 131e3,
13505
- maxTokens: 131e3
13629
+ contextWindow: 131072,
13630
+ maxTokens: 98304
13506
13631
  },
13507
13632
  "z-ai/glm-4.5-air": {
13508
13633
  id: "z-ai/glm-4.5-air",
@@ -13564,13 +13689,13 @@ const MODELS = {
13564
13689
  reasoning: true,
13565
13690
  input: ["text"],
13566
13691
  cost: {
13567
- input: .35,
13568
- output: 1.71,
13692
+ input: .39,
13693
+ output: 1.9,
13569
13694
  cacheRead: 0,
13570
13695
  cacheWrite: 0
13571
13696
  },
13572
- contextWindow: 202752,
13573
- maxTokens: 131072
13697
+ contextWindow: 204800,
13698
+ maxTokens: 204800
13574
13699
  },
13575
13700
  "z-ai/glm-4.6:exacto": {
13576
13701
  id: "z-ai/glm-4.6:exacto",
@@ -14373,6 +14498,23 @@ const MODELS = {
14373
14498
  contextWindow: 1e6,
14374
14499
  maxTokens: 64e3
14375
14500
  },
14501
+ "google/gemini-3.1-flash-lite-preview": {
14502
+ id: "google/gemini-3.1-flash-lite-preview",
14503
+ name: "Gemini 3.1 Flash Lite Preview",
14504
+ api: "anthropic-messages",
14505
+ provider: "vercel-ai-gateway",
14506
+ baseUrl: "https://ai-gateway.vercel.sh",
14507
+ reasoning: true,
14508
+ input: ["text", "image"],
14509
+ cost: {
14510
+ input: .25,
14511
+ output: 1.5,
14512
+ cacheRead: 0,
14513
+ cacheWrite: 0
14514
+ },
14515
+ contextWindow: 1e6,
14516
+ maxTokens: 65e3
14517
+ },
14376
14518
  "google/gemini-3.1-pro-preview": {
14377
14519
  id: "google/gemini-3.1-pro-preview",
14378
14520
  name: "Gemini 3.1 Pro Preview",
@@ -14390,6 +14532,23 @@ const MODELS = {
14390
14532
  contextWindow: 1e6,
14391
14533
  maxTokens: 64e3
14392
14534
  },
14535
+ "inception/mercury-2": {
14536
+ id: "inception/mercury-2",
14537
+ name: "Mercury 2",
14538
+ api: "anthropic-messages",
14539
+ provider: "vercel-ai-gateway",
14540
+ baseUrl: "https://ai-gateway.vercel.sh",
14541
+ reasoning: true,
14542
+ input: ["text"],
14543
+ cost: {
14544
+ input: .25,
14545
+ output: .75,
14546
+ cacheRead: .024999999999999998,
14547
+ cacheWrite: 0
14548
+ },
14549
+ contextWindow: 128e3,
14550
+ maxTokens: 128e3
14551
+ },
14393
14552
  "inception/mercury-coder-small": {
14394
14553
  id: "inception/mercury-coder-small",
14395
14554
  name: "Mercury Coder Small Beta",
@@ -15291,6 +15450,23 @@ const MODELS = {
15291
15450
  contextWindow: 4e5,
15292
15451
  maxTokens: 128e3
15293
15452
  },
15453
+ "openai/gpt-5.3-chat": {
15454
+ id: "openai/gpt-5.3-chat",
15455
+ name: "GPT-5.3 Chat",
15456
+ api: "anthropic-messages",
15457
+ provider: "vercel-ai-gateway",
15458
+ baseUrl: "https://ai-gateway.vercel.sh",
15459
+ reasoning: true,
15460
+ input: ["text", "image"],
15461
+ cost: {
15462
+ input: 1.75,
15463
+ output: 14,
15464
+ cacheRead: .175,
15465
+ cacheWrite: 0
15466
+ },
15467
+ contextWindow: 128e3,
15468
+ maxTokens: 16384
15469
+ },
15294
15470
  "openai/gpt-5.3-codex": {
15295
15471
  id: "openai/gpt-5.3-codex",
15296
15472
  name: "GPT 5.3 Codex",
@@ -17068,13 +17244,13 @@ function transformMessages(messages, model, normalizeToolCallId) {
17068
17244
  * Resolve cache retention preference.
17069
17245
  * Defaults to "short" and uses PI_CACHE_RETENTION for backward compatibility.
17070
17246
  */
17071
- function resolveCacheRetention$2(cacheRetention) {
17247
+ function resolveCacheRetention$1(cacheRetention) {
17072
17248
  if (cacheRetention) return cacheRetention;
17073
17249
  if (typeof process !== "undefined" && process.env.PI_CACHE_RETENTION === "long") return "long";
17074
17250
  return "short";
17075
17251
  }
17076
17252
  function getCacheControl(baseUrl, cacheRetention) {
17077
- const retention = resolveCacheRetention$2(cacheRetention);
17253
+ const retention = resolveCacheRetention$1(cacheRetention);
17078
17254
  if (retention === "none") return { retention };
17079
17255
  const ttl = retention === "long" && baseUrl.includes("api.anthropic.com") ? "1h" : void 0;
17080
17256
  return {
@@ -17329,7 +17505,7 @@ const streamAnthropic = (model, context, options) => {
17329
17505
  }
17330
17506
  }
17331
17507
  } else if (event.type === "message_delta") {
17332
- if (event.delta.stop_reason) output.stopReason = mapStopReason$4(event.delta.stop_reason);
17508
+ if (event.delta.stop_reason) output.stopReason = mapStopReason$3(event.delta.stop_reason);
17333
17509
  if (event.usage.input_tokens != null) output.usage.input = event.usage.input_tokens;
17334
17510
  if (event.usage.output_tokens != null) output.usage.output = event.usage.output_tokens;
17335
17511
  if (event.usage.cache_read_input_tokens != null) output.usage.cacheRead = event.usage.cache_read_input_tokens;
@@ -17362,14 +17538,14 @@ const streamAnthropic = (model, context, options) => {
17362
17538
  /**
17363
17539
  * Check if a model supports adaptive thinking (Opus 4.6 and Sonnet 4.6)
17364
17540
  */
17365
- function supportsAdaptiveThinking$1(modelId) {
17541
+ function supportsAdaptiveThinking(modelId) {
17366
17542
  return modelId.includes("opus-4-6") || modelId.includes("opus-4.6") || modelId.includes("sonnet-4-6") || modelId.includes("sonnet-4.6");
17367
17543
  }
17368
17544
  /**
17369
17545
  * Map ThinkingLevel to Anthropic effort levels for adaptive thinking.
17370
17546
  * Note: effort "max" is only valid on Opus 4.6.
17371
17547
  */
17372
- function mapThinkingLevelToEffort$1(level, modelId) {
17548
+ function mapThinkingLevelToEffort(level, modelId) {
17373
17549
  switch (level) {
17374
17550
  case "minimal": return "low";
17375
17551
  case "low": return "low";
@@ -17387,8 +17563,8 @@ const streamSimpleAnthropic = (model, context, options) => {
17387
17563
  ...base,
17388
17564
  thinkingEnabled: false
17389
17565
  });
17390
- if (supportsAdaptiveThinking$1(model.id)) {
17391
- const effort = mapThinkingLevelToEffort$1(options.reasoning, model.id);
17566
+ if (supportsAdaptiveThinking(model.id)) {
17567
+ const effort = mapThinkingLevelToEffort(options.reasoning, model.id);
17392
17568
  return streamAnthropic(model, context, {
17393
17569
  ...base,
17394
17570
  thinkingEnabled: true,
@@ -17407,7 +17583,7 @@ function isOAuthToken(apiKey) {
17407
17583
  return apiKey.includes("sk-ant-oat");
17408
17584
  }
17409
17585
  function createClient$5(model, apiKey, interleavedThinking, optionsHeaders, dynamicHeaders) {
17410
- const needsInterleavedBeta = interleavedThinking && !supportsAdaptiveThinking$1(model.id);
17586
+ const needsInterleavedBeta = interleavedThinking && !supportsAdaptiveThinking(model.id);
17411
17587
  if (model.provider === "github-copilot") {
17412
17588
  const betaFeatures = [];
17413
17589
  if (needsInterleavedBeta) betaFeatures.push("interleaved-thinking-2025-05-14");
@@ -17462,7 +17638,7 @@ function buildParams$5(model, context, isOAuthToken, options) {
17462
17638
  const { cacheControl } = getCacheControl(model.baseUrl, options?.cacheRetention);
17463
17639
  const params = {
17464
17640
  model: model.id,
17465
- messages: convertMessages$3(context.messages, model, isOAuthToken, cacheControl),
17641
+ messages: convertMessages$2(context.messages, model, isOAuthToken, cacheControl),
17466
17642
  max_tokens: options?.maxTokens || model.maxTokens / 3 | 0,
17467
17643
  stream: true
17468
17644
  };
@@ -17484,7 +17660,7 @@ function buildParams$5(model, context, isOAuthToken, options) {
17484
17660
  }];
17485
17661
  if (options?.temperature !== void 0 && !options?.thinkingEnabled) params.temperature = options.temperature;
17486
17662
  if (context.tools) params.tools = convertTools$2(context.tools, isOAuthToken);
17487
- if (options?.thinkingEnabled && model.reasoning) if (supportsAdaptiveThinking$1(model.id)) {
17663
+ if (options?.thinkingEnabled && model.reasoning) if (supportsAdaptiveThinking(model.id)) {
17488
17664
  params.thinking = { type: "adaptive" };
17489
17665
  if (options.effort) params.output_config = { effort: options.effort };
17490
17666
  } else params.thinking = {
@@ -17499,12 +17675,12 @@ function buildParams$5(model, context, isOAuthToken, options) {
17499
17675
  else params.tool_choice = options.toolChoice;
17500
17676
  return params;
17501
17677
  }
17502
- function normalizeToolCallId$1(id) {
17678
+ function normalizeToolCallId(id) {
17503
17679
  return id.replace(/[^a-zA-Z0-9_-]/g, "_").slice(0, 64);
17504
17680
  }
17505
- function convertMessages$3(messages, model, isOAuthToken, cacheControl) {
17681
+ function convertMessages$2(messages, model, isOAuthToken, cacheControl) {
17506
17682
  const params = [];
17507
- const transformedMessages = transformMessages(messages, model, normalizeToolCallId$1);
17683
+ const transformedMessages = transformMessages(messages, model, normalizeToolCallId);
17508
17684
  for (let i = 0; i < transformedMessages.length; i++) {
17509
17685
  const msg = transformedMessages[i];
17510
17686
  if (msg.role === "user") if (typeof msg.content === "string") {
@@ -17631,7 +17807,7 @@ function convertTools$2(tools, isOAuthToken) {
17631
17807
  };
17632
17808
  });
17633
17809
  }
17634
- function mapStopReason$4(reason) {
17810
+ function mapStopReason$3(reason) {
17635
17811
  switch (reason) {
17636
17812
  case "end_turn": return "stop";
17637
17813
  case "max_tokens": return "length";
@@ -24191,12 +24367,12 @@ async function processResponsesStream(openaiStream, output, stream, model, optio
24191
24367
  const serviceTier = response?.service_tier ?? options.serviceTier;
24192
24368
  options.applyServiceTierPricing(output.usage, serviceTier);
24193
24369
  }
24194
- output.stopReason = mapStopReason$3(response?.status);
24370
+ output.stopReason = mapStopReason$2(response?.status);
24195
24371
  if (output.content.some((b) => b.type === "toolCall") && output.stopReason === "stop") output.stopReason = "toolUse";
24196
24372
  } else if (event.type === "error") throw new Error(`Error Code ${event.code}: ${event.message}` || "Unknown error");
24197
24373
  else if (event.type === "response.failed") throw new Error("Unknown error");
24198
24374
  }
24199
- function mapStopReason$3(status) {
24375
+ function mapStopReason$2(status) {
24200
24376
  if (!status) return "stop";
24201
24377
  switch (status) {
24202
24378
  case "completed": return "stop";
@@ -24424,7 +24600,7 @@ function requiresToolCallId(modelId) {
24424
24600
  /**
24425
24601
  * Convert internal messages to Gemini Content[] format.
24426
24602
  */
24427
- function convertMessages$2(model, context) {
24603
+ function convertMessages$1(model, context) {
24428
24604
  const contents = [];
24429
24605
  const normalizeToolCallId = (id) => {
24430
24606
  if (!requiresToolCallId(model.id)) return id;
@@ -24551,7 +24727,7 @@ function mapToolChoice(choice) {
24551
24727
  /**
24552
24728
  * Map Gemini FinishReason to our StopReason.
24553
24729
  */
24554
- function mapStopReason$2(reason) {
24730
+ function mapStopReason$1(reason) {
24555
24731
  switch (reason) {
24556
24732
  case FinishReason.STOP: return "stop";
24557
24733
  case FinishReason.MAX_TOKENS: return "length";
@@ -24733,7 +24909,7 @@ const streamGoogle = (model, context, options) => {
24733
24909
  }
24734
24910
  }
24735
24911
  if (candidate?.finishReason) {
24736
- output.stopReason = mapStopReason$2(candidate.finishReason);
24912
+ output.stopReason = mapStopReason$1(candidate.finishReason);
24737
24913
  if (output.content.some((b) => b.type === "toolCall")) output.stopReason = "toolUse";
24738
24914
  }
24739
24915
  if (chunk.usageMetadata) {
@@ -24798,7 +24974,7 @@ const streamSimpleGoogle = (model, context, options) => {
24798
24974
  });
24799
24975
  const effort = clampReasoning(options.reasoning);
24800
24976
  const googleModel = model;
24801
- if (isGemini3ProModel$1(googleModel) || isGemini3FlashModel$1(googleModel)) return streamGoogle(model, context, {
24977
+ if (isGemini3ProModel$2(googleModel) || isGemini3FlashModel$2(googleModel)) return streamGoogle(model, context, {
24802
24978
  ...base,
24803
24979
  thinking: {
24804
24980
  enabled: true,
@@ -24829,7 +25005,7 @@ function createClient$3(model, apiKey, optionsHeaders) {
24829
25005
  });
24830
25006
  }
24831
25007
  function buildParams$3(model, context, options = {}) {
24832
- const contents = convertMessages$2(model, context);
25008
+ const contents = convertMessages$1(model, context);
24833
25009
  const generationConfig = {};
24834
25010
  if (options.temperature !== void 0) generationConfig.temperature = options.temperature;
24835
25011
  if (options.maxTokens !== void 0) generationConfig.maxOutputTokens = options.maxTokens;
@@ -24856,14 +25032,14 @@ function buildParams$3(model, context, options = {}) {
24856
25032
  config
24857
25033
  };
24858
25034
  }
24859
- function isGemini3ProModel$1(model) {
24860
- return model.id.includes("3-pro");
25035
+ function isGemini3ProModel$2(model) {
25036
+ return /gemini-3(?:\.\d+)?-pro/.test(model.id.toLowerCase());
24861
25037
  }
24862
- function isGemini3FlashModel$1(model) {
24863
- return model.id.includes("3-flash");
25038
+ function isGemini3FlashModel$2(model) {
25039
+ return /gemini-3(?:\.\d+)?-flash/.test(model.id.toLowerCase());
24864
25040
  }
24865
25041
  function getGemini3ThinkingLevel$1(effort, model) {
24866
- if (isGemini3ProModel$1(model)) switch (effort) {
25042
+ if (isGemini3ProModel$2(model)) switch (effort) {
24867
25043
  case "minimal":
24868
25044
  case "low": return "LOW";
24869
25045
  case "medium":
@@ -24908,7 +25084,7 @@ var GEMINI_CLI_HEADERS = {
24908
25084
  pluginType: "GEMINI"
24909
25085
  })
24910
25086
  };
24911
- var DEFAULT_ANTIGRAVITY_VERSION = "1.15.8";
25087
+ var DEFAULT_ANTIGRAVITY_VERSION = "1.18.3";
24912
25088
  function getAntigravityHeaders() {
24913
25089
  const version = process.env.PI_AI_ANTIGRAVITY_VERSION || DEFAULT_ANTIGRAVITY_VERSION;
24914
25090
  return {
@@ -25002,6 +25178,15 @@ function isClaudeThinkingModel(modelId) {
25002
25178
  const normalized = modelId.toLowerCase();
25003
25179
  return normalized.includes("claude") && normalized.includes("thinking");
25004
25180
  }
25181
+ function isGemini3ProModel$1(modelId) {
25182
+ return /gemini-3(?:\.1)?-pro/.test(modelId.toLowerCase());
25183
+ }
25184
+ function isGemini3FlashModel$1(modelId) {
25185
+ return /gemini-3(?:\.1)?-flash/.test(modelId.toLowerCase());
25186
+ }
25187
+ function isGemini3Model(modelId) {
25188
+ return isGemini3ProModel$1(modelId) || isGemini3FlashModel$1(modelId);
25189
+ }
25005
25190
  /**
25006
25191
  * Check if an error is retryable (rate limit, server error, network error, etc.)
25007
25192
  */
@@ -25404,7 +25589,7 @@ const streamSimpleGoogleGeminiCli = (model, context, options) => {
25404
25589
  thinking: { enabled: false }
25405
25590
  });
25406
25591
  const effort = clampReasoning(options.reasoning);
25407
- if (model.id.includes("3-pro") || model.id.includes("3-flash")) return streamGoogleGeminiCli(model, context, {
25592
+ if (isGemini3Model(model.id)) return streamGoogleGeminiCli(model, context, {
25408
25593
  ...base,
25409
25594
  thinking: {
25410
25595
  enabled: true,
@@ -25432,7 +25617,7 @@ const streamSimpleGoogleGeminiCli = (model, context, options) => {
25432
25617
  });
25433
25618
  };
25434
25619
  function buildRequest(model, context, projectId, options = {}, isAntigravity = false) {
25435
- const contents = convertMessages$2(model, context);
25620
+ const contents = convertMessages$1(model, context);
25436
25621
  const generationConfig = {};
25437
25622
  if (options.temperature !== void 0) generationConfig.temperature = options.temperature;
25438
25623
  if (options.maxTokens !== void 0) generationConfig.maxOutputTokens = options.maxTokens;
@@ -25471,7 +25656,7 @@ function buildRequest(model, context, projectId, options = {}, isAntigravity = f
25471
25656
  };
25472
25657
  }
25473
25658
  function getGeminiCliThinkingLevel(effort, modelId) {
25474
- if (modelId.includes("3-pro")) switch (effort) {
25659
+ if (isGemini3ProModel$1(modelId)) switch (effort) {
25475
25660
  case "minimal":
25476
25661
  case "low": return "LOW";
25477
25662
  case "medium":
@@ -25639,7 +25824,7 @@ const streamGoogleVertex = (model, context, options) => {
25639
25824
  }
25640
25825
  }
25641
25826
  if (candidate?.finishReason) {
25642
- output.stopReason = mapStopReason$2(candidate.finishReason);
25827
+ output.stopReason = mapStopReason$1(candidate.finishReason);
25643
25828
  if (output.content.some((b) => b.type === "toolCall")) output.stopReason = "toolUse";
25644
25829
  }
25645
25830
  if (chunk.usageMetadata) {
@@ -25742,7 +25927,7 @@ function resolveLocation(options) {
25742
25927
  return location;
25743
25928
  }
25744
25929
  function buildParams$2(model, context, options = {}) {
25745
- const contents = convertMessages$2(model, context);
25930
+ const contents = convertMessages$1(model, context);
25746
25931
  const generationConfig = {};
25747
25932
  if (options.temperature !== void 0) generationConfig.temperature = options.temperature;
25748
25933
  if (options.maxTokens !== void 0) generationConfig.maxOutputTokens = options.maxTokens;
@@ -25770,10 +25955,10 @@ function buildParams$2(model, context, options = {}) {
25770
25955
  };
25771
25956
  }
25772
25957
  function isGemini3ProModel(model) {
25773
- return model.id.includes("3-pro");
25958
+ return /gemini-3(?:\.\d+)?-pro/.test(model.id.toLowerCase());
25774
25959
  }
25775
25960
  function isGemini3FlashModel(model) {
25776
- return model.id.includes("3-flash");
25961
+ return /gemini-3(?:\.\d+)?-flash/.test(model.id.toLowerCase());
25777
25962
  }
25778
25963
  function getGemini3ThinkingLevel(effort, model) {
25779
25964
  if (isGemini3ProModel(model)) switch (effort) {
@@ -25917,7 +26102,7 @@ const streamOpenAICompletions = (model, context, options) => {
25917
26102
  }
25918
26103
  const choice = chunk.choices?.[0];
25919
26104
  if (!choice) continue;
25920
- if (choice.finish_reason) output.stopReason = mapStopReason$1(choice.finish_reason);
26105
+ if (choice.finish_reason) output.stopReason = mapStopReason(choice.finish_reason);
25921
26106
  if (choice.delta) {
25922
26107
  if (choice.delta.content !== null && choice.delta.content !== void 0 && choice.delta.content.length > 0) {
25923
26108
  if (!currentBlock || currentBlock.type !== "text") {
@@ -26085,7 +26270,7 @@ function createClient$1(model, context, apiKey, optionsHeaders) {
26085
26270
  }
26086
26271
  function buildParams$1(model, context, options) {
26087
26272
  const compat = getCompat(model);
26088
- const messages = convertMessages$1(model, context, compat);
26273
+ const messages = convertMessages(model, context, compat);
26089
26274
  maybeAddOpenRouterAnthropicCacheControl(model, messages);
26090
26275
  const params = {
26091
26276
  model: model.id,
@@ -26101,7 +26286,7 @@ function buildParams$1(model, context, options) {
26101
26286
  else if (hasToolHistory(context.messages)) params.tools = [];
26102
26287
  if (options?.toolChoice) params.tool_choice = options.toolChoice;
26103
26288
  if ((compat.thinkingFormat === "zai" || compat.thinkingFormat === "qwen") && model.reasoning) params.enable_thinking = !!options?.reasoningEffort;
26104
- else if (options?.reasoningEffort && model.reasoning && compat.supportsReasoningEffort) params.reasoning_effort = options.reasoningEffort;
26289
+ else if (options?.reasoningEffort && model.reasoning && compat.supportsReasoningEffort) params.reasoning_effort = mapReasoningEffort(options.reasoningEffort, compat.reasoningEffortMap);
26105
26290
  if (model.baseUrl.includes("openrouter.ai") && model.compat?.openRouterRouting) params.provider = model.compat.openRouterRouting;
26106
26291
  if (model.baseUrl.includes("ai-gateway.vercel.sh") && model.compat?.vercelGatewayRouting) {
26107
26292
  const routing = model.compat.vercelGatewayRouting;
@@ -26114,6 +26299,9 @@ function buildParams$1(model, context, options) {
26114
26299
  }
26115
26300
  return params;
26116
26301
  }
26302
+ function mapReasoningEffort(effort, reasoningEffortMap) {
26303
+ return reasoningEffortMap[effort] ?? effort;
26304
+ }
26117
26305
  function maybeAddOpenRouterAnthropicCacheControl(model, messages) {
26118
26306
  if (model.provider !== "openrouter" || !model.id.startsWith("anthropic/")) return;
26119
26307
  for (let i = messages.length - 1; i >= 0; i--) {
@@ -26137,7 +26325,7 @@ function maybeAddOpenRouterAnthropicCacheControl(model, messages) {
26137
26325
  }
26138
26326
  }
26139
26327
  }
26140
- function convertMessages$1(model, context, compat) {
26328
+ function convertMessages(model, context, compat) {
26141
26329
  const params = [];
26142
26330
  const normalizeToolCallId = (id) => {
26143
26331
  if (compat.requiresMistralToolIds) return normalizeMistralToolId(id);
@@ -26289,7 +26477,7 @@ function convertTools(tools, compat) {
26289
26477
  }
26290
26478
  }));
26291
26479
  }
26292
- function mapStopReason$1(reason) {
26480
+ function mapStopReason(reason) {
26293
26481
  if (reason === null) return "stop";
26294
26482
  switch (reason) {
26295
26483
  case "stop": return "stop";
@@ -26315,11 +26503,20 @@ function detectCompat(model) {
26315
26503
  const isNonStandard = provider === "cerebras" || baseUrl.includes("cerebras.ai") || provider === "xai" || baseUrl.includes("api.x.ai") || provider === "mistral" || baseUrl.includes("mistral.ai") || baseUrl.includes("chutes.ai") || baseUrl.includes("deepseek.com") || isZai || provider === "opencode" || baseUrl.includes("opencode.ai");
26316
26504
  const useMaxTokens = provider === "mistral" || baseUrl.includes("mistral.ai") || baseUrl.includes("chutes.ai");
26317
26505
  const isGrok = provider === "xai" || baseUrl.includes("api.x.ai");
26506
+ const isGroq = provider === "groq" || baseUrl.includes("groq.com");
26318
26507
  const isMistral = provider === "mistral" || baseUrl.includes("mistral.ai");
26508
+ const reasoningEffortMap = isGroq && model.id === "qwen/qwen3-32b" ? {
26509
+ minimal: "default",
26510
+ low: "default",
26511
+ medium: "default",
26512
+ high: "default",
26513
+ xhigh: "default"
26514
+ } : {};
26319
26515
  return {
26320
26516
  supportsStore: !isNonStandard,
26321
26517
  supportsDeveloperRole: !isNonStandard,
26322
26518
  supportsReasoningEffort: !isGrok && !isZai,
26519
+ reasoningEffortMap,
26323
26520
  supportsUsageInStreaming: true,
26324
26521
  maxTokensField: useMaxTokens ? "max_tokens" : "max_completion_tokens",
26325
26522
  requiresToolResultName: isMistral,
@@ -26343,6 +26540,7 @@ function getCompat(model) {
26343
26540
  supportsStore: model.compat.supportsStore ?? detected.supportsStore,
26344
26541
  supportsDeveloperRole: model.compat.supportsDeveloperRole ?? detected.supportsDeveloperRole,
26345
26542
  supportsReasoningEffort: model.compat.supportsReasoningEffort ?? detected.supportsReasoningEffort,
26543
+ reasoningEffortMap: model.compat.reasoningEffortMap ?? detected.reasoningEffortMap,
26346
26544
  supportsUsageInStreaming: model.compat.supportsUsageInStreaming ?? detected.supportsUsageInStreaming,
26347
26545
  maxTokensField: model.compat.maxTokensField ?? detected.maxTokensField,
26348
26546
  requiresToolResultName: model.compat.requiresToolResultName ?? detected.requiresToolResultName,
@@ -26364,7 +26562,7 @@ var OPENAI_TOOL_CALL_PROVIDERS = new Set([
26364
26562
  * Resolve cache retention preference.
26365
26563
  * Defaults to "short" and uses PI_CACHE_RETENTION for backward compatibility.
26366
26564
  */
26367
- function resolveCacheRetention$1(cacheRetention) {
26565
+ function resolveCacheRetention(cacheRetention) {
26368
26566
  if (cacheRetention) return cacheRetention;
26369
26567
  if (typeof process !== "undefined" && process.env.PI_CACHE_RETENTION === "long") return "long";
26370
26568
  return "short";
@@ -26475,7 +26673,7 @@ function createClient(model, context, apiKey, optionsHeaders) {
26475
26673
  }
26476
26674
  function buildParams(model, context, options) {
26477
26675
  const messages = convertResponsesMessages(model, context, OPENAI_TOOL_CALL_PROVIDERS);
26478
- const cacheRetention = resolveCacheRetention$1(options?.cacheRetention);
26676
+ const cacheRetention = resolveCacheRetention(options?.cacheRetention);
26479
26677
  const params = {
26480
26678
  model: model.id,
26481
26679
  input: messages,
@@ -26521,510 +26719,14 @@ function applyServiceTierPricing(usage, serviceTier) {
26521
26719
  usage.cost.cacheWrite *= multiplier;
26522
26720
  usage.cost.total = usage.cost.input + usage.cost.output + usage.cost.cacheRead + usage.cost.cacheWrite;
26523
26721
  }
26524
- const streamBedrock = (model, context, options = {}) => {
26525
- const stream = new AssistantMessageEventStream();
26526
- (async () => {
26527
- const output = {
26528
- role: "assistant",
26529
- content: [],
26530
- api: "bedrock-converse-stream",
26531
- provider: model.provider,
26532
- model: model.id,
26533
- usage: {
26534
- input: 0,
26535
- output: 0,
26536
- cacheRead: 0,
26537
- cacheWrite: 0,
26538
- totalTokens: 0,
26539
- cost: {
26540
- input: 0,
26541
- output: 0,
26542
- cacheRead: 0,
26543
- cacheWrite: 0,
26544
- total: 0
26545
- }
26546
- },
26547
- stopReason: "stop",
26548
- timestamp: Date.now()
26549
- };
26550
- const blocks = output.content;
26551
- const config = {
26552
- region: options.region,
26553
- profile: options.profile
26554
- };
26555
- if (typeof process !== "undefined" && (process.versions?.node || process.versions?.bun)) {
26556
- config.region = config.region || process.env.AWS_REGION || process.env.AWS_DEFAULT_REGION;
26557
- if (process.env.AWS_BEDROCK_SKIP_AUTH === "1") config.credentials = {
26558
- accessKeyId: "dummy-access-key",
26559
- secretAccessKey: "dummy-secret-key"
26560
- };
26561
- if (process.env.HTTP_PROXY || process.env.HTTPS_PROXY || process.env.NO_PROXY || process.env.http_proxy || process.env.https_proxy || process.env.no_proxy) {
26562
- const nodeHttpHandler = await import("../_2.mjs");
26563
- const agent = new (await (import("../_105.mjs").then((m) => /* @__PURE__ */ __toESM(m.default, 1)))).ProxyAgent();
26564
- config.requestHandler = new nodeHttpHandler.NodeHttpHandler({
26565
- httpAgent: agent,
26566
- httpsAgent: agent
26567
- });
26568
- } else if (process.env.AWS_BEDROCK_FORCE_HTTP1 === "1") config.requestHandler = new (await (import("../_2.mjs"))).NodeHttpHandler();
26569
- }
26570
- config.region = config.region || "us-east-1";
26571
- try {
26572
- const client = new BedrockRuntimeClient(config);
26573
- const cacheRetention = resolveCacheRetention(options.cacheRetention);
26574
- const commandInput = {
26575
- modelId: model.id,
26576
- messages: convertMessages(context, model, cacheRetention),
26577
- system: buildSystemPrompt(context.systemPrompt, model, cacheRetention),
26578
- inferenceConfig: {
26579
- maxTokens: options.maxTokens,
26580
- temperature: options.temperature
26581
- },
26582
- toolConfig: convertToolConfig(context.tools, options.toolChoice),
26583
- additionalModelRequestFields: buildAdditionalModelRequestFields(model, options)
26584
- };
26585
- options?.onPayload?.(commandInput);
26586
- const command = new ConverseStreamCommand(commandInput);
26587
- const response = await client.send(command, { abortSignal: options.signal });
26588
- for await (const item of response.stream) if (item.messageStart) {
26589
- if (item.messageStart.role !== ConversationRole.ASSISTANT) throw new Error("Unexpected assistant message start but got user message start instead");
26590
- stream.push({
26591
- type: "start",
26592
- partial: output
26593
- });
26594
- } else if (item.contentBlockStart) handleContentBlockStart(item.contentBlockStart, blocks, output, stream);
26595
- else if (item.contentBlockDelta) handleContentBlockDelta(item.contentBlockDelta, blocks, output, stream);
26596
- else if (item.contentBlockStop) handleContentBlockStop(item.contentBlockStop, blocks, output, stream);
26597
- else if (item.messageStop) output.stopReason = mapStopReason(item.messageStop.stopReason);
26598
- else if (item.metadata) handleMetadata(item.metadata, model, output);
26599
- else if (item.internalServerException) throw new Error(`Internal server error: ${item.internalServerException.message}`);
26600
- else if (item.modelStreamErrorException) throw new Error(`Model stream error: ${item.modelStreamErrorException.message}`);
26601
- else if (item.validationException) throw new Error(`Validation error: ${item.validationException.message}`);
26602
- else if (item.throttlingException) throw new Error(`Throttling error: ${item.throttlingException.message}`);
26603
- else if (item.serviceUnavailableException) throw new Error(`Service unavailable: ${item.serviceUnavailableException.message}`);
26604
- if (options.signal?.aborted) throw new Error("Request was aborted");
26605
- if (output.stopReason === "error" || output.stopReason === "aborted") throw new Error("An unknown error occurred");
26606
- stream.push({
26607
- type: "done",
26608
- reason: output.stopReason,
26609
- message: output
26610
- });
26611
- stream.end();
26612
- } catch (error) {
26613
- for (const block of output.content) {
26614
- delete block.index;
26615
- delete block.partialJson;
26616
- }
26617
- output.stopReason = options.signal?.aborted ? "aborted" : "error";
26618
- output.errorMessage = error instanceof Error ? error.message : JSON.stringify(error);
26619
- stream.push({
26620
- type: "error",
26621
- reason: output.stopReason,
26622
- error: output
26623
- });
26624
- stream.end();
26625
- }
26626
- })();
26627
- return stream;
26628
- };
26629
- const streamSimpleBedrock = (model, context, options) => {
26630
- const base = buildBaseOptions(model, options, void 0);
26631
- if (!options?.reasoning) return streamBedrock(model, context, {
26632
- ...base,
26633
- reasoning: void 0
26634
- });
26635
- if (model.id.includes("anthropic.claude") || model.id.includes("anthropic/claude")) {
26636
- if (supportsAdaptiveThinking(model.id)) return streamBedrock(model, context, {
26637
- ...base,
26638
- reasoning: options.reasoning,
26639
- thinkingBudgets: options.thinkingBudgets
26640
- });
26641
- const adjusted = adjustMaxTokensForThinking(base.maxTokens || 0, model.maxTokens, options.reasoning, options.thinkingBudgets);
26642
- return streamBedrock(model, context, {
26643
- ...base,
26644
- maxTokens: adjusted.maxTokens,
26645
- reasoning: options.reasoning,
26646
- thinkingBudgets: {
26647
- ...options.thinkingBudgets || {},
26648
- [clampReasoning(options.reasoning)]: adjusted.thinkingBudget
26649
- }
26650
- });
26651
- }
26652
- return streamBedrock(model, context, {
26653
- ...base,
26654
- reasoning: options.reasoning,
26655
- thinkingBudgets: options.thinkingBudgets
26656
- });
26657
- };
26658
- function handleContentBlockStart(event, blocks, output, stream) {
26659
- const index = event.contentBlockIndex;
26660
- const start = event.start;
26661
- if (start?.toolUse) {
26662
- const block = {
26663
- type: "toolCall",
26664
- id: start.toolUse.toolUseId || "",
26665
- name: start.toolUse.name || "",
26666
- arguments: {},
26667
- partialJson: "",
26668
- index
26669
- };
26670
- output.content.push(block);
26671
- stream.push({
26672
- type: "toolcall_start",
26673
- contentIndex: blocks.length - 1,
26674
- partial: output
26675
- });
26676
- }
26677
- }
26678
- function handleContentBlockDelta(event, blocks, output, stream) {
26679
- const contentBlockIndex = event.contentBlockIndex;
26680
- const delta = event.delta;
26681
- let index = blocks.findIndex((b) => b.index === contentBlockIndex);
26682
- let block = blocks[index];
26683
- if (delta?.text !== void 0) {
26684
- if (!block) {
26685
- const newBlock = {
26686
- type: "text",
26687
- text: "",
26688
- index: contentBlockIndex
26689
- };
26690
- output.content.push(newBlock);
26691
- index = blocks.length - 1;
26692
- block = blocks[index];
26693
- stream.push({
26694
- type: "text_start",
26695
- contentIndex: index,
26696
- partial: output
26697
- });
26698
- }
26699
- if (block.type === "text") {
26700
- block.text += delta.text;
26701
- stream.push({
26702
- type: "text_delta",
26703
- contentIndex: index,
26704
- delta: delta.text,
26705
- partial: output
26706
- });
26707
- }
26708
- } else if (delta?.toolUse && block?.type === "toolCall") {
26709
- block.partialJson = (block.partialJson || "") + (delta.toolUse.input || "");
26710
- block.arguments = parseStreamingJson(block.partialJson);
26711
- stream.push({
26712
- type: "toolcall_delta",
26713
- contentIndex: index,
26714
- delta: delta.toolUse.input || "",
26715
- partial: output
26716
- });
26717
- } else if (delta?.reasoningContent) {
26718
- let thinkingBlock = block;
26719
- let thinkingIndex = index;
26720
- if (!thinkingBlock) {
26721
- const newBlock = {
26722
- type: "thinking",
26723
- thinking: "",
26724
- thinkingSignature: "",
26725
- index: contentBlockIndex
26726
- };
26727
- output.content.push(newBlock);
26728
- thinkingIndex = blocks.length - 1;
26729
- thinkingBlock = blocks[thinkingIndex];
26730
- stream.push({
26731
- type: "thinking_start",
26732
- contentIndex: thinkingIndex,
26733
- partial: output
26734
- });
26735
- }
26736
- if (thinkingBlock?.type === "thinking") {
26737
- if (delta.reasoningContent.text) {
26738
- thinkingBlock.thinking += delta.reasoningContent.text;
26739
- stream.push({
26740
- type: "thinking_delta",
26741
- contentIndex: thinkingIndex,
26742
- delta: delta.reasoningContent.text,
26743
- partial: output
26744
- });
26745
- }
26746
- if (delta.reasoningContent.signature) thinkingBlock.thinkingSignature = (thinkingBlock.thinkingSignature || "") + delta.reasoningContent.signature;
26747
- }
26748
- }
26749
- }
26750
- function handleMetadata(event, model, output) {
26751
- if (event.usage) {
26752
- output.usage.input = event.usage.inputTokens || 0;
26753
- output.usage.output = event.usage.outputTokens || 0;
26754
- output.usage.cacheRead = event.usage.cacheReadInputTokens || 0;
26755
- output.usage.cacheWrite = event.usage.cacheWriteInputTokens || 0;
26756
- output.usage.totalTokens = event.usage.totalTokens || output.usage.input + output.usage.output;
26757
- calculateCost(model, output.usage);
26758
- }
26759
- }
26760
- function handleContentBlockStop(event, blocks, output, stream) {
26761
- const index = blocks.findIndex((b) => b.index === event.contentBlockIndex);
26762
- const block = blocks[index];
26763
- if (!block) return;
26764
- delete block.index;
26765
- switch (block.type) {
26766
- case "text":
26767
- stream.push({
26768
- type: "text_end",
26769
- contentIndex: index,
26770
- content: block.text,
26771
- partial: output
26772
- });
26773
- break;
26774
- case "thinking":
26775
- stream.push({
26776
- type: "thinking_end",
26777
- contentIndex: index,
26778
- content: block.thinking,
26779
- partial: output
26780
- });
26781
- break;
26782
- case "toolCall":
26783
- block.arguments = parseStreamingJson(block.partialJson);
26784
- delete block.partialJson;
26785
- stream.push({
26786
- type: "toolcall_end",
26787
- contentIndex: index,
26788
- toolCall: block,
26789
- partial: output
26790
- });
26791
- break;
26792
- }
26793
- }
26794
- /**
26795
- * Check if the model supports adaptive thinking (Opus 4.6 and Sonnet 4.6).
26796
- */
26797
- function supportsAdaptiveThinking(modelId) {
26798
- return modelId.includes("opus-4-6") || modelId.includes("opus-4.6") || modelId.includes("sonnet-4-6") || modelId.includes("sonnet-4.6");
26799
- }
26800
- function mapThinkingLevelToEffort(level, modelId) {
26801
- switch (level) {
26802
- case "minimal":
26803
- case "low": return "low";
26804
- case "medium": return "medium";
26805
- case "high": return "high";
26806
- case "xhigh": return modelId.includes("opus-4-6") || modelId.includes("opus-4.6") ? "max" : "high";
26807
- default: return "high";
26808
- }
26809
- }
26810
- /**
26811
- * Resolve cache retention preference.
26812
- * Defaults to "short" and uses PI_CACHE_RETENTION for backward compatibility.
26813
- */
26814
- function resolveCacheRetention(cacheRetention) {
26815
- if (cacheRetention) return cacheRetention;
26816
- if (typeof process !== "undefined" && process.env.PI_CACHE_RETENTION === "long") return "long";
26817
- return "short";
26818
- }
26819
- /**
26820
- * Check if the model supports prompt caching.
26821
- * Supported: Claude 3.5 Haiku, Claude 3.7 Sonnet, Claude 4.x models
26822
- */
26823
- function supportsPromptCaching(model) {
26824
- if (model.cost.cacheRead || model.cost.cacheWrite) return true;
26825
- const id = model.id.toLowerCase();
26826
- if (id.includes("claude") && (id.includes("-4-") || id.includes("-4."))) return true;
26827
- if (id.includes("claude-3-7-sonnet")) return true;
26828
- if (id.includes("claude-3-5-haiku")) return true;
26829
- return false;
26830
- }
26831
- /**
26832
- * Check if the model supports thinking signatures in reasoningContent.
26833
- * Only Anthropic Claude models support the signature field.
26834
- * Other models (OpenAI, Qwen, Minimax, Moonshot, etc.) reject it with:
26835
- * "This model doesn't support the reasoningContent.reasoningText.signature field"
26836
- */
26837
- function supportsThinkingSignature(model) {
26838
- const id = model.id.toLowerCase();
26839
- return id.includes("anthropic.claude") || id.includes("anthropic/claude");
26840
- }
26841
- function buildSystemPrompt(systemPrompt, model, cacheRetention) {
26842
- if (!systemPrompt) return void 0;
26843
- const blocks = [{ text: sanitizeSurrogates(systemPrompt) }];
26844
- if (cacheRetention !== "none" && supportsPromptCaching(model)) blocks.push({ cachePoint: {
26845
- type: CachePointType.DEFAULT,
26846
- ...cacheRetention === "long" ? { ttl: CacheTTL.ONE_HOUR } : {}
26847
- } });
26848
- return blocks;
26849
- }
26850
- function normalizeToolCallId(id) {
26851
- const sanitized = id.replace(/[^a-zA-Z0-9_-]/g, "_");
26852
- return sanitized.length > 64 ? sanitized.slice(0, 64) : sanitized;
26853
- }
26854
- function convertMessages(context, model, cacheRetention) {
26855
- const result = [];
26856
- const transformedMessages = transformMessages(context.messages, model, normalizeToolCallId);
26857
- for (let i = 0; i < transformedMessages.length; i++) {
26858
- const m = transformedMessages[i];
26859
- switch (m.role) {
26860
- case "user":
26861
- result.push({
26862
- role: ConversationRole.USER,
26863
- content: typeof m.content === "string" ? [{ text: sanitizeSurrogates(m.content) }] : m.content.map((c) => {
26864
- switch (c.type) {
26865
- case "text": return { text: sanitizeSurrogates(c.text) };
26866
- case "image": return { image: createImageBlock(c.mimeType, c.data) };
26867
- default: throw new Error("Unknown user content type");
26868
- }
26869
- })
26870
- });
26871
- break;
26872
- case "assistant": {
26873
- if (m.content.length === 0) continue;
26874
- const contentBlocks = [];
26875
- for (const c of m.content) switch (c.type) {
26876
- case "text":
26877
- if (c.text.trim().length === 0) continue;
26878
- contentBlocks.push({ text: sanitizeSurrogates(c.text) });
26879
- break;
26880
- case "toolCall":
26881
- contentBlocks.push({ toolUse: {
26882
- toolUseId: c.id,
26883
- name: c.name,
26884
- input: c.arguments
26885
- } });
26886
- break;
26887
- case "thinking":
26888
- if (c.thinking.trim().length === 0) continue;
26889
- if (supportsThinkingSignature(model)) contentBlocks.push({ reasoningContent: { reasoningText: {
26890
- text: sanitizeSurrogates(c.thinking),
26891
- signature: c.thinkingSignature
26892
- } } });
26893
- else contentBlocks.push({ reasoningContent: { reasoningText: { text: sanitizeSurrogates(c.thinking) } } });
26894
- break;
26895
- default: throw new Error("Unknown assistant content type");
26896
- }
26897
- if (contentBlocks.length === 0) continue;
26898
- result.push({
26899
- role: ConversationRole.ASSISTANT,
26900
- content: contentBlocks
26901
- });
26902
- break;
26903
- }
26904
- case "toolResult": {
26905
- const toolResults = [];
26906
- toolResults.push({ toolResult: {
26907
- toolUseId: m.toolCallId,
26908
- content: m.content.map((c) => c.type === "image" ? { image: createImageBlock(c.mimeType, c.data) } : { text: sanitizeSurrogates(c.text) }),
26909
- status: m.isError ? ToolResultStatus.ERROR : ToolResultStatus.SUCCESS
26910
- } });
26911
- let j = i + 1;
26912
- while (j < transformedMessages.length && transformedMessages[j].role === "toolResult") {
26913
- const nextMsg = transformedMessages[j];
26914
- toolResults.push({ toolResult: {
26915
- toolUseId: nextMsg.toolCallId,
26916
- content: nextMsg.content.map((c) => c.type === "image" ? { image: createImageBlock(c.mimeType, c.data) } : { text: sanitizeSurrogates(c.text) }),
26917
- status: nextMsg.isError ? ToolResultStatus.ERROR : ToolResultStatus.SUCCESS
26918
- } });
26919
- j++;
26920
- }
26921
- i = j - 1;
26922
- result.push({
26923
- role: ConversationRole.USER,
26924
- content: toolResults
26925
- });
26926
- break;
26927
- }
26928
- default: throw new Error("Unknown message role");
26929
- }
26930
- }
26931
- if (cacheRetention !== "none" && supportsPromptCaching(model) && result.length > 0) {
26932
- const lastMessage = result[result.length - 1];
26933
- if (lastMessage.role === ConversationRole.USER && lastMessage.content) lastMessage.content.push({ cachePoint: {
26934
- type: CachePointType.DEFAULT,
26935
- ...cacheRetention === "long" ? { ttl: CacheTTL.ONE_HOUR } : {}
26936
- } });
26937
- }
26938
- return result;
26939
- }
26940
- function convertToolConfig(tools, toolChoice) {
26941
- if (!tools?.length || toolChoice === "none") return void 0;
26942
- const bedrockTools = tools.map((tool) => ({ toolSpec: {
26943
- name: tool.name,
26944
- description: tool.description,
26945
- inputSchema: { json: tool.parameters }
26946
- } }));
26947
- let bedrockToolChoice;
26948
- switch (toolChoice) {
26949
- case "auto":
26950
- bedrockToolChoice = { auto: {} };
26951
- break;
26952
- case "any":
26953
- bedrockToolChoice = { any: {} };
26954
- break;
26955
- default: if (toolChoice?.type === "tool") bedrockToolChoice = { tool: { name: toolChoice.name } };
26956
- }
26957
- return {
26958
- tools: bedrockTools,
26959
- toolChoice: bedrockToolChoice
26960
- };
26961
- }
26962
- function mapStopReason(reason) {
26963
- switch (reason) {
26964
- case StopReason.END_TURN:
26965
- case StopReason.STOP_SEQUENCE: return "stop";
26966
- case StopReason.MAX_TOKENS:
26967
- case StopReason.MODEL_CONTEXT_WINDOW_EXCEEDED: return "length";
26968
- case StopReason.TOOL_USE: return "toolUse";
26969
- default: return "error";
26970
- }
26971
- }
26972
- function buildAdditionalModelRequestFields(model, options) {
26973
- if (!options.reasoning || !model.reasoning) return;
26974
- if (model.id.includes("anthropic.claude") || model.id.includes("anthropic/claude")) {
26975
- const result = supportsAdaptiveThinking(model.id) ? {
26976
- thinking: { type: "adaptive" },
26977
- output_config: { effort: mapThinkingLevelToEffort(options.reasoning, model.id) }
26978
- } : (() => {
26979
- const defaultBudgets = {
26980
- minimal: 1024,
26981
- low: 2048,
26982
- medium: 8192,
26983
- high: 16384,
26984
- xhigh: 16384
26985
- };
26986
- const level = options.reasoning === "xhigh" ? "high" : options.reasoning;
26987
- return { thinking: {
26988
- type: "enabled",
26989
- budget_tokens: options.thinkingBudgets?.[level] ?? defaultBudgets[options.reasoning]
26990
- } };
26991
- })();
26992
- if (!supportsAdaptiveThinking(model.id) && (options.interleavedThinking ?? true)) result.anthropic_beta = ["interleaved-thinking-2025-05-14"];
26993
- return result;
26994
- }
26995
- }
26996
- function createImageBlock(mimeType, data) {
26997
- let format;
26998
- switch (mimeType) {
26999
- case "image/jpeg":
27000
- case "image/jpg":
27001
- format = ImageFormat.JPEG;
27002
- break;
27003
- case "image/png":
27004
- format = ImageFormat.PNG;
27005
- break;
27006
- case "image/gif":
27007
- format = ImageFormat.GIF;
27008
- break;
27009
- case "image/webp":
27010
- format = ImageFormat.WEBP;
27011
- break;
27012
- default: throw new Error(`Unknown image type: ${mimeType}`);
27013
- }
27014
- const binaryString = atob(data);
27015
- const bytes = new Uint8Array(binaryString.length);
27016
- for (let i = 0; i < binaryString.length; i++) bytes[i] = binaryString.charCodeAt(i);
27017
- return {
27018
- source: { bytes },
27019
- format
27020
- };
27021
- }
27022
26722
  var _os = null;
27023
- if (typeof process !== "undefined" && (process.versions?.node || process.versions?.bun)) import("node:os").then((m) => {
26723
+ var dynamicImport$1 = (specifier) => import(specifier);
26724
+ var NODE_OS_SPECIFIER = "node:os";
26725
+ if (typeof process !== "undefined" && (process.versions?.node || process.versions?.bun)) dynamicImport$1(NODE_OS_SPECIFIER).then((m) => {
27024
26726
  _os = m;
27025
26727
  });
27026
26728
  var DEFAULT_CODEX_BASE_URL = "https://chatgpt.com/backend-api";
27027
- var JWT_CLAIM_PATH$1 = "https://api.openai.com/auth";
26729
+ var JWT_CLAIM_PATH = "https://api.openai.com/auth";
27028
26730
  var MAX_RETRIES = 3;
27029
26731
  var BASE_DELAY_MS = 1e3;
27030
26732
  var CODEX_TOOL_CALL_PROVIDERS = new Set([
@@ -27587,7 +27289,7 @@ function extractAccountId(token) {
27587
27289
  try {
27588
27290
  const parts = token.split(".");
27589
27291
  if (parts.length !== 3) throw new Error("Invalid token");
27590
- const accountId = JSON.parse(atob(parts[1]))?.[JWT_CLAIM_PATH$1]?.chatgpt_account_id;
27292
+ const accountId = JSON.parse(atob(parts[1]))?.[JWT_CLAIM_PATH]?.chatgpt_account_id;
27591
27293
  if (!accountId) throw new Error("No account ID in token");
27592
27294
  return accountId;
27593
27295
  } catch {
@@ -27608,6 +27310,78 @@ function buildHeaders(initHeaders, additionalHeaders, accountId, token, sessionI
27608
27310
  if (sessionId) headers.set("session_id", sessionId);
27609
27311
  return headers;
27610
27312
  }
27313
+ var dynamicImport = (specifier) => import(specifier);
27314
+ var BEDROCK_PROVIDER_SPECIFIER = "./amazon-bedrock.js";
27315
+ var bedrockProviderModuleOverride;
27316
+ function setBedrockProviderModule(module) {
27317
+ bedrockProviderModuleOverride = module;
27318
+ }
27319
+ async function loadBedrockProviderModule() {
27320
+ if (bedrockProviderModuleOverride) return bedrockProviderModuleOverride;
27321
+ return await dynamicImport(BEDROCK_PROVIDER_SPECIFIER);
27322
+ }
27323
+ function forwardStream(target, source) {
27324
+ (async () => {
27325
+ for await (const event of source) target.push(event);
27326
+ target.end();
27327
+ })();
27328
+ }
27329
+ function createLazyLoadErrorMessage(model, error) {
27330
+ return {
27331
+ role: "assistant",
27332
+ content: [],
27333
+ api: "bedrock-converse-stream",
27334
+ provider: model.provider,
27335
+ model: model.id,
27336
+ usage: {
27337
+ input: 0,
27338
+ output: 0,
27339
+ cacheRead: 0,
27340
+ cacheWrite: 0,
27341
+ totalTokens: 0,
27342
+ cost: {
27343
+ input: 0,
27344
+ output: 0,
27345
+ cacheRead: 0,
27346
+ cacheWrite: 0,
27347
+ total: 0
27348
+ }
27349
+ },
27350
+ stopReason: "error",
27351
+ errorMessage: error instanceof Error ? error.message : String(error),
27352
+ timestamp: Date.now()
27353
+ };
27354
+ }
27355
+ function streamBedrockLazy(model, context, options) {
27356
+ const outer = new AssistantMessageEventStream();
27357
+ loadBedrockProviderModule().then((module) => {
27358
+ forwardStream(outer, module.streamBedrock(model, context, options));
27359
+ }).catch((error) => {
27360
+ const message = createLazyLoadErrorMessage(model, error);
27361
+ outer.push({
27362
+ type: "error",
27363
+ reason: "error",
27364
+ error: message
27365
+ });
27366
+ outer.end(message);
27367
+ });
27368
+ return outer;
27369
+ }
27370
+ function streamSimpleBedrockLazy(model, context, options) {
27371
+ const outer = new AssistantMessageEventStream();
27372
+ loadBedrockProviderModule().then((module) => {
27373
+ forwardStream(outer, module.streamSimpleBedrock(model, context, options));
27374
+ }).catch((error) => {
27375
+ const message = createLazyLoadErrorMessage(model, error);
27376
+ outer.push({
27377
+ type: "error",
27378
+ reason: "error",
27379
+ error: message
27380
+ });
27381
+ outer.end(message);
27382
+ });
27383
+ return outer;
27384
+ }
27611
27385
  function registerBuiltInApiProviders() {
27612
27386
  registerApiProvider({
27613
27387
  api: "anthropic-messages",
@@ -27651,8 +27425,8 @@ function registerBuiltInApiProviders() {
27651
27425
  });
27652
27426
  registerApiProvider({
27653
27427
  api: "bedrock-converse-stream",
27654
- stream: streamBedrock,
27655
- streamSimple: streamSimpleBedrock
27428
+ stream: streamBedrockLazy,
27429
+ streamSimple: streamSimpleBedrockLazy
27656
27430
  });
27657
27431
  }
27658
27432
  function resetApiProviders() {
@@ -27678,1448 +27452,6 @@ async function completeSimple(model, context, options) {
27678
27452
  return streamSimple(model, context, options).result();
27679
27453
  }
27680
27454
  /**
27681
- * PKCE utilities using Web Crypto API.
27682
- * Works in both Node.js 20+ and browsers.
27683
- */
27684
- /**
27685
- * Encode bytes as base64url string.
27686
- */
27687
- function base64urlEncode(bytes) {
27688
- let binary = "";
27689
- for (const byte of bytes) binary += String.fromCharCode(byte);
27690
- return btoa(binary).replace(/\+/g, "-").replace(/\//g, "_").replace(/=/g, "");
27691
- }
27692
- /**
27693
- * Generate PKCE code verifier and challenge.
27694
- * Uses Web Crypto API for cross-platform compatibility.
27695
- */
27696
- async function generatePKCE() {
27697
- const verifierBytes = new Uint8Array(32);
27698
- crypto.getRandomValues(verifierBytes);
27699
- const verifier = base64urlEncode(verifierBytes);
27700
- const data = new TextEncoder().encode(verifier);
27701
- const hashBuffer = await crypto.subtle.digest("SHA-256", data);
27702
- return {
27703
- verifier,
27704
- challenge: base64urlEncode(new Uint8Array(hashBuffer))
27705
- };
27706
- }
27707
- /**
27708
- * Anthropic OAuth flow (Claude Pro/Max)
27709
- */
27710
- var decode$3 = (s) => atob(s);
27711
- var CLIENT_ID$4 = decode$3("OWQxYzI1MGEtZTYxYi00NGQ5LTg4ZWQtNTk0NGQxOTYyZjVl");
27712
- var AUTHORIZE_URL$1 = "https://claude.ai/oauth/authorize";
27713
- var TOKEN_URL$3 = "https://console.anthropic.com/v1/oauth/token";
27714
- var REDIRECT_URI$3 = "https://console.anthropic.com/oauth/code/callback";
27715
- var SCOPES$2 = "org:create_api_key user:profile user:inference";
27716
- /**
27717
- * Login with Anthropic OAuth (device code flow)
27718
- *
27719
- * @param onAuthUrl - Callback to handle the authorization URL (e.g., open browser)
27720
- * @param onPromptCode - Callback to prompt user for the authorization code
27721
- */
27722
- async function loginAnthropic(onAuthUrl, onPromptCode) {
27723
- const { verifier, challenge } = await generatePKCE();
27724
- onAuthUrl(`${AUTHORIZE_URL$1}?${new URLSearchParams({
27725
- code: "true",
27726
- client_id: CLIENT_ID$4,
27727
- response_type: "code",
27728
- redirect_uri: REDIRECT_URI$3,
27729
- scope: SCOPES$2,
27730
- code_challenge: challenge,
27731
- code_challenge_method: "S256",
27732
- state: verifier
27733
- }).toString()}`);
27734
- const splits = (await onPromptCode()).split("#");
27735
- const code = splits[0];
27736
- const state = splits[1];
27737
- const tokenResponse = await fetch(TOKEN_URL$3, {
27738
- method: "POST",
27739
- headers: { "Content-Type": "application/json" },
27740
- body: JSON.stringify({
27741
- grant_type: "authorization_code",
27742
- client_id: CLIENT_ID$4,
27743
- code,
27744
- state,
27745
- redirect_uri: REDIRECT_URI$3,
27746
- code_verifier: verifier
27747
- })
27748
- });
27749
- if (!tokenResponse.ok) {
27750
- const error = await tokenResponse.text();
27751
- throw new Error(`Token exchange failed: ${error}`);
27752
- }
27753
- const tokenData = await tokenResponse.json();
27754
- const expiresAt = Date.now() + tokenData.expires_in * 1e3 - 300 * 1e3;
27755
- return {
27756
- refresh: tokenData.refresh_token,
27757
- access: tokenData.access_token,
27758
- expires: expiresAt
27759
- };
27760
- }
27761
- /**
27762
- * Refresh Anthropic OAuth token
27763
- */
27764
- async function refreshAnthropicToken(refreshToken) {
27765
- const response = await fetch(TOKEN_URL$3, {
27766
- method: "POST",
27767
- headers: { "Content-Type": "application/json" },
27768
- body: JSON.stringify({
27769
- grant_type: "refresh_token",
27770
- client_id: CLIENT_ID$4,
27771
- refresh_token: refreshToken
27772
- })
27773
- });
27774
- if (!response.ok) {
27775
- const error = await response.text();
27776
- throw new Error(`Anthropic token refresh failed: ${error}`);
27777
- }
27778
- const data = await response.json();
27779
- return {
27780
- refresh: data.refresh_token,
27781
- access: data.access_token,
27782
- expires: Date.now() + data.expires_in * 1e3 - 300 * 1e3
27783
- };
27784
- }
27785
- const anthropicOAuthProvider = {
27786
- id: "anthropic",
27787
- name: "Anthropic (Claude Pro/Max)",
27788
- async login(callbacks) {
27789
- return loginAnthropic((url) => callbacks.onAuth({ url }), () => callbacks.onPrompt({ message: "Paste the authorization code:" }));
27790
- },
27791
- async refreshToken(credentials) {
27792
- return refreshAnthropicToken(credentials.refresh);
27793
- },
27794
- getApiKey(credentials) {
27795
- return credentials.access;
27796
- }
27797
- };
27798
- /**
27799
- * GitHub Copilot OAuth flow
27800
- */
27801
- var decode$2 = (s) => atob(s);
27802
- var CLIENT_ID$3 = decode$2("SXYxLmI1MDdhMDhjODdlY2ZlOTg=");
27803
- var COPILOT_HEADERS = {
27804
- "User-Agent": "GitHubCopilotChat/0.35.0",
27805
- "Editor-Version": "vscode/1.107.0",
27806
- "Editor-Plugin-Version": "copilot-chat/0.35.0",
27807
- "Copilot-Integration-Id": "vscode-chat"
27808
- };
27809
- function normalizeDomain(input) {
27810
- const trimmed = input.trim();
27811
- if (!trimmed) return null;
27812
- try {
27813
- return (trimmed.includes("://") ? new URL(trimmed) : new URL(`https://${trimmed}`)).hostname;
27814
- } catch {
27815
- return null;
27816
- }
27817
- }
27818
- function getUrls(domain) {
27819
- return {
27820
- deviceCodeUrl: `https://${domain}/login/device/code`,
27821
- accessTokenUrl: `https://${domain}/login/oauth/access_token`,
27822
- copilotTokenUrl: `https://api.${domain}/copilot_internal/v2/token`
27823
- };
27824
- }
27825
- /**
27826
- * Parse the proxy-ep from a Copilot token and convert to API base URL.
27827
- * Token format: tid=...;exp=...;proxy-ep=proxy.individual.githubcopilot.com;...
27828
- * Returns API URL like https://api.individual.githubcopilot.com
27829
- */
27830
- function getBaseUrlFromToken(token) {
27831
- const match = token.match(/proxy-ep=([^;]+)/);
27832
- if (!match) return null;
27833
- return `https://${match[1].replace(/^proxy\./, "api.")}`;
27834
- }
27835
- function getGitHubCopilotBaseUrl(token, enterpriseDomain) {
27836
- if (token) {
27837
- const urlFromToken = getBaseUrlFromToken(token);
27838
- if (urlFromToken) return urlFromToken;
27839
- }
27840
- if (enterpriseDomain) return `https://copilot-api.${enterpriseDomain}`;
27841
- return "https://api.individual.githubcopilot.com";
27842
- }
27843
- async function fetchJson(url, init) {
27844
- const response = await fetch(url, init);
27845
- if (!response.ok) {
27846
- const text = await response.text();
27847
- throw new Error(`${response.status} ${response.statusText}: ${text}`);
27848
- }
27849
- return response.json();
27850
- }
27851
- async function startDeviceFlow(domain) {
27852
- const data = await fetchJson(getUrls(domain).deviceCodeUrl, {
27853
- method: "POST",
27854
- headers: {
27855
- Accept: "application/json",
27856
- "Content-Type": "application/json",
27857
- "User-Agent": "GitHubCopilotChat/0.35.0"
27858
- },
27859
- body: JSON.stringify({
27860
- client_id: CLIENT_ID$3,
27861
- scope: "read:user"
27862
- })
27863
- });
27864
- if (!data || typeof data !== "object") throw new Error("Invalid device code response");
27865
- const deviceCode = data.device_code;
27866
- const userCode = data.user_code;
27867
- const verificationUri = data.verification_uri;
27868
- const interval = data.interval;
27869
- const expiresIn = data.expires_in;
27870
- if (typeof deviceCode !== "string" || typeof userCode !== "string" || typeof verificationUri !== "string" || typeof interval !== "number" || typeof expiresIn !== "number") throw new Error("Invalid device code response fields");
27871
- return {
27872
- device_code: deviceCode,
27873
- user_code: userCode,
27874
- verification_uri: verificationUri,
27875
- interval,
27876
- expires_in: expiresIn
27877
- };
27878
- }
27879
- /**
27880
- * Sleep that can be interrupted by an AbortSignal
27881
- */
27882
- function abortableSleep(ms, signal) {
27883
- return new Promise((resolve, reject) => {
27884
- if (signal?.aborted) {
27885
- reject(/* @__PURE__ */ new Error("Login cancelled"));
27886
- return;
27887
- }
27888
- const timeout = setTimeout(resolve, ms);
27889
- signal?.addEventListener("abort", () => {
27890
- clearTimeout(timeout);
27891
- reject(/* @__PURE__ */ new Error("Login cancelled"));
27892
- }, { once: true });
27893
- });
27894
- }
27895
- async function pollForGitHubAccessToken(domain, deviceCode, intervalSeconds, expiresIn, signal) {
27896
- const urls = getUrls(domain);
27897
- const deadline = Date.now() + expiresIn * 1e3;
27898
- let intervalMs = Math.max(1e3, Math.floor(intervalSeconds * 1e3));
27899
- while (Date.now() < deadline) {
27900
- if (signal?.aborted) throw new Error("Login cancelled");
27901
- const raw = await fetchJson(urls.accessTokenUrl, {
27902
- method: "POST",
27903
- headers: {
27904
- Accept: "application/json",
27905
- "Content-Type": "application/json",
27906
- "User-Agent": "GitHubCopilotChat/0.35.0"
27907
- },
27908
- body: JSON.stringify({
27909
- client_id: CLIENT_ID$3,
27910
- device_code: deviceCode,
27911
- grant_type: "urn:ietf:params:oauth:grant-type:device_code"
27912
- })
27913
- });
27914
- if (raw && typeof raw === "object" && typeof raw.access_token === "string") return raw.access_token;
27915
- if (raw && typeof raw === "object" && typeof raw.error === "string") {
27916
- const err = raw.error;
27917
- if (err === "authorization_pending") {
27918
- await abortableSleep(intervalMs, signal);
27919
- continue;
27920
- }
27921
- if (err === "slow_down") {
27922
- intervalMs += 5e3;
27923
- await abortableSleep(intervalMs, signal);
27924
- continue;
27925
- }
27926
- throw new Error(`Device flow failed: ${err}`);
27927
- }
27928
- await abortableSleep(intervalMs, signal);
27929
- }
27930
- throw new Error("Device flow timed out");
27931
- }
27932
- /**
27933
- * Refresh GitHub Copilot token
27934
- */
27935
- async function refreshGitHubCopilotToken(refreshToken, enterpriseDomain) {
27936
- const raw = await fetchJson(getUrls(enterpriseDomain || "github.com").copilotTokenUrl, { headers: {
27937
- Accept: "application/json",
27938
- Authorization: `Bearer ${refreshToken}`,
27939
- ...COPILOT_HEADERS
27940
- } });
27941
- if (!raw || typeof raw !== "object") throw new Error("Invalid Copilot token response");
27942
- const token = raw.token;
27943
- const expiresAt = raw.expires_at;
27944
- if (typeof token !== "string" || typeof expiresAt !== "number") throw new Error("Invalid Copilot token response fields");
27945
- return {
27946
- refresh: refreshToken,
27947
- access: token,
27948
- expires: expiresAt * 1e3 - 300 * 1e3,
27949
- enterpriseUrl: enterpriseDomain
27950
- };
27951
- }
27952
- /**
27953
- * Enable a model for the user's GitHub Copilot account.
27954
- * This is required for some models (like Claude, Grok) before they can be used.
27955
- */
27956
- async function enableGitHubCopilotModel(token, modelId, enterpriseDomain) {
27957
- const url = `${getGitHubCopilotBaseUrl(token, enterpriseDomain)}/models/${modelId}/policy`;
27958
- try {
27959
- return (await fetch(url, {
27960
- method: "POST",
27961
- headers: {
27962
- "Content-Type": "application/json",
27963
- Authorization: `Bearer ${token}`,
27964
- ...COPILOT_HEADERS,
27965
- "openai-intent": "chat-policy",
27966
- "x-interaction-type": "chat-policy"
27967
- },
27968
- body: JSON.stringify({ state: "enabled" })
27969
- })).ok;
27970
- } catch {
27971
- return false;
27972
- }
27973
- }
27974
- /**
27975
- * Enable all known GitHub Copilot models that may require policy acceptance.
27976
- * Called after successful login to ensure all models are available.
27977
- */
27978
- async function enableAllGitHubCopilotModels(token, enterpriseDomain, onProgress) {
27979
- const models = getModels("github-copilot");
27980
- await Promise.all(models.map(async (model) => {
27981
- const success = await enableGitHubCopilotModel(token, model.id, enterpriseDomain);
27982
- onProgress?.(model.id, success);
27983
- }));
27984
- }
27985
- /**
27986
- * Login with GitHub Copilot OAuth (device code flow)
27987
- *
27988
- * @param options.onAuth - Callback with URL and optional instructions (user code)
27989
- * @param options.onPrompt - Callback to prompt user for input
27990
- * @param options.onProgress - Optional progress callback
27991
- * @param options.signal - Optional AbortSignal for cancellation
27992
- */
27993
- async function loginGitHubCopilot(options) {
27994
- const input = await options.onPrompt({
27995
- message: "GitHub Enterprise URL/domain (blank for github.com)",
27996
- placeholder: "company.ghe.com",
27997
- allowEmpty: true
27998
- });
27999
- if (options.signal?.aborted) throw new Error("Login cancelled");
28000
- const trimmed = input.trim();
28001
- const enterpriseDomain = normalizeDomain(input);
28002
- if (trimmed && !enterpriseDomain) throw new Error("Invalid GitHub Enterprise URL/domain");
28003
- const domain = enterpriseDomain || "github.com";
28004
- const device = await startDeviceFlow(domain);
28005
- options.onAuth(device.verification_uri, `Enter code: ${device.user_code}`);
28006
- const credentials = await refreshGitHubCopilotToken(await pollForGitHubAccessToken(domain, device.device_code, device.interval, device.expires_in, options.signal), enterpriseDomain ?? void 0);
28007
- options.onProgress?.("Enabling models...");
28008
- await enableAllGitHubCopilotModels(credentials.access, enterpriseDomain ?? void 0);
28009
- return credentials;
28010
- }
28011
- const githubCopilotOAuthProvider = {
28012
- id: "github-copilot",
28013
- name: "GitHub Copilot",
28014
- async login(callbacks) {
28015
- return loginGitHubCopilot({
28016
- onAuth: (url, instructions) => callbacks.onAuth({
28017
- url,
28018
- instructions
28019
- }),
28020
- onPrompt: callbacks.onPrompt,
28021
- onProgress: callbacks.onProgress,
28022
- signal: callbacks.signal
28023
- });
28024
- },
28025
- async refreshToken(credentials) {
28026
- const creds = credentials;
28027
- return refreshGitHubCopilotToken(creds.refresh, creds.enterpriseUrl);
28028
- },
28029
- getApiKey(credentials) {
28030
- return credentials.access;
28031
- },
28032
- modifyModels(models, credentials) {
28033
- const creds = credentials;
28034
- const domain = creds.enterpriseUrl ? normalizeDomain(creds.enterpriseUrl) ?? void 0 : void 0;
28035
- const baseUrl = getGitHubCopilotBaseUrl(creds.access, domain);
28036
- return models.map((m) => m.provider === "github-copilot" ? {
28037
- ...m,
28038
- baseUrl
28039
- } : m);
28040
- }
28041
- };
28042
- /**
28043
- * Antigravity OAuth flow (Gemini 3, Claude, GPT-OSS via Google Cloud)
28044
- * Uses different OAuth credentials than google-gemini-cli for access to additional models.
28045
- *
28046
- * NOTE: This module uses Node.js http.createServer for the OAuth callback.
28047
- * It is only intended for CLI use, not browser environments.
28048
- */
28049
- var _createServer$1 = null;
28050
- var _httpImportPromise$1 = null;
28051
- if (typeof process !== "undefined" && (process.versions?.node || process.versions?.bun)) _httpImportPromise$1 = import("node:http").then((m) => {
28052
- _createServer$1 = m.createServer;
28053
- });
28054
- var decode$1 = (s) => atob(s);
28055
- var CLIENT_ID$2 = decode$1("MTA3MTAwNjA2MDU5MS10bWhzc2luMmgyMWxjcmUyMzV2dG9sb2poNGc0MDNlcC5hcHBzLmdvb2dsZXVzZXJjb250ZW50LmNvbQ==");
28056
- var CLIENT_SECRET$1 = decode$1("R09DU1BYLUs1OEZXUjQ4NkxkTEoxbUxCOHNYQzR6NnFEQWY=");
28057
- var REDIRECT_URI$2 = "http://localhost:51121/oauth-callback";
28058
- var SCOPES$1 = [
28059
- "https://www.googleapis.com/auth/cloud-platform",
28060
- "https://www.googleapis.com/auth/userinfo.email",
28061
- "https://www.googleapis.com/auth/userinfo.profile",
28062
- "https://www.googleapis.com/auth/cclog",
28063
- "https://www.googleapis.com/auth/experimentsandconfigs"
28064
- ];
28065
- var AUTH_URL$1 = "https://accounts.google.com/o/oauth2/v2/auth";
28066
- var TOKEN_URL$2 = "https://oauth2.googleapis.com/token";
28067
- var DEFAULT_PROJECT_ID = "rising-fact-p41fc";
28068
- /**
28069
- * Start a local HTTP server to receive the OAuth callback
28070
- */
28071
- async function getNodeCreateServer$1() {
28072
- if (_createServer$1) return _createServer$1;
28073
- if (_httpImportPromise$1) await _httpImportPromise$1;
28074
- if (_createServer$1) return _createServer$1;
28075
- throw new Error("Antigravity OAuth is only available in Node.js environments");
28076
- }
28077
- async function startCallbackServer$1() {
28078
- const createServer = await getNodeCreateServer$1();
28079
- return new Promise((resolve, reject) => {
28080
- let result = null;
28081
- let cancelled = false;
28082
- const server = createServer((req, res) => {
28083
- const url = new URL(req.url || "", `http://localhost:51121`);
28084
- if (url.pathname === "/oauth-callback") {
28085
- const code = url.searchParams.get("code");
28086
- const state = url.searchParams.get("state");
28087
- const error = url.searchParams.get("error");
28088
- if (error) {
28089
- res.writeHead(400, { "Content-Type": "text/html" });
28090
- res.end(`<html><body><h1>Authentication Failed</h1><p>Error: ${error}</p><p>You can close this window.</p></body></html>`);
28091
- return;
28092
- }
28093
- if (code && state) {
28094
- res.writeHead(200, { "Content-Type": "text/html" });
28095
- res.end(`<html><body><h1>Authentication Successful</h1><p>You can close this window and return to the terminal.</p></body></html>`);
28096
- result = {
28097
- code,
28098
- state
28099
- };
28100
- } else {
28101
- res.writeHead(400, { "Content-Type": "text/html" });
28102
- res.end(`<html><body><h1>Authentication Failed</h1><p>Missing code or state parameter.</p></body></html>`);
28103
- }
28104
- } else {
28105
- res.writeHead(404);
28106
- res.end();
28107
- }
28108
- });
28109
- server.on("error", (err) => {
28110
- reject(err);
28111
- });
28112
- server.listen(51121, "127.0.0.1", () => {
28113
- resolve({
28114
- server,
28115
- cancelWait: () => {
28116
- cancelled = true;
28117
- },
28118
- waitForCode: async () => {
28119
- const sleep = () => new Promise((r) => setTimeout(r, 100));
28120
- while (!result && !cancelled) await sleep();
28121
- return result;
28122
- }
28123
- });
28124
- });
28125
- });
28126
- }
28127
- /**
28128
- * Parse redirect URL to extract code and state
28129
- */
28130
- function parseRedirectUrl$1(input) {
28131
- const value = input.trim();
28132
- if (!value) return {};
28133
- try {
28134
- const url = new URL(value);
28135
- return {
28136
- code: url.searchParams.get("code") ?? void 0,
28137
- state: url.searchParams.get("state") ?? void 0
28138
- };
28139
- } catch {
28140
- return {};
28141
- }
28142
- }
28143
- /**
28144
- * Discover or provision a project for the user
28145
- */
28146
- async function discoverProject$1(accessToken, onProgress) {
28147
- const headers = {
28148
- Authorization: `Bearer ${accessToken}`,
28149
- "Content-Type": "application/json",
28150
- "User-Agent": "google-api-nodejs-client/9.15.1",
28151
- "X-Goog-Api-Client": "google-cloud-sdk vscode_cloudshelleditor/0.1",
28152
- "Client-Metadata": JSON.stringify({
28153
- ideType: "IDE_UNSPECIFIED",
28154
- platform: "PLATFORM_UNSPECIFIED",
28155
- pluginType: "GEMINI"
28156
- })
28157
- };
28158
- const endpoints = ["https://cloudcode-pa.googleapis.com", "https://daily-cloudcode-pa.sandbox.googleapis.com"];
28159
- onProgress?.("Checking for existing project...");
28160
- for (const endpoint of endpoints) try {
28161
- const loadResponse = await fetch(`${endpoint}/v1internal:loadCodeAssist`, {
28162
- method: "POST",
28163
- headers,
28164
- body: JSON.stringify({ metadata: {
28165
- ideType: "IDE_UNSPECIFIED",
28166
- platform: "PLATFORM_UNSPECIFIED",
28167
- pluginType: "GEMINI"
28168
- } })
28169
- });
28170
- if (loadResponse.ok) {
28171
- const data = await loadResponse.json();
28172
- if (typeof data.cloudaicompanionProject === "string" && data.cloudaicompanionProject) return data.cloudaicompanionProject;
28173
- if (data.cloudaicompanionProject && typeof data.cloudaicompanionProject === "object" && data.cloudaicompanionProject.id) return data.cloudaicompanionProject.id;
28174
- }
28175
- } catch {}
28176
- onProgress?.("Using default project...");
28177
- return DEFAULT_PROJECT_ID;
28178
- }
28179
- /**
28180
- * Get user email from the access token
28181
- */
28182
- async function getUserEmail$1(accessToken) {
28183
- try {
28184
- const response = await fetch("https://www.googleapis.com/oauth2/v1/userinfo?alt=json", { headers: { Authorization: `Bearer ${accessToken}` } });
28185
- if (response.ok) return (await response.json()).email;
28186
- } catch {}
28187
- }
28188
- /**
28189
- * Refresh Antigravity token
28190
- */
28191
- async function refreshAntigravityToken(refreshToken, projectId) {
28192
- const response = await fetch(TOKEN_URL$2, {
28193
- method: "POST",
28194
- headers: { "Content-Type": "application/x-www-form-urlencoded" },
28195
- body: new URLSearchParams({
28196
- client_id: CLIENT_ID$2,
28197
- client_secret: CLIENT_SECRET$1,
28198
- refresh_token: refreshToken,
28199
- grant_type: "refresh_token"
28200
- })
28201
- });
28202
- if (!response.ok) {
28203
- const error = await response.text();
28204
- throw new Error(`Antigravity token refresh failed: ${error}`);
28205
- }
28206
- const data = await response.json();
28207
- return {
28208
- refresh: data.refresh_token || refreshToken,
28209
- access: data.access_token,
28210
- expires: Date.now() + data.expires_in * 1e3 - 300 * 1e3,
28211
- projectId
28212
- };
28213
- }
28214
- /**
28215
- * Login with Antigravity OAuth
28216
- *
28217
- * @param onAuth - Callback with URL and optional instructions
28218
- * @param onProgress - Optional progress callback
28219
- * @param onManualCodeInput - Optional promise that resolves with user-pasted redirect URL.
28220
- * Races with browser callback - whichever completes first wins.
28221
- */
28222
- async function loginAntigravity(onAuth, onProgress, onManualCodeInput) {
28223
- const { verifier, challenge } = await generatePKCE();
28224
- onProgress?.("Starting local server for OAuth callback...");
28225
- const server = await startCallbackServer$1();
28226
- let code;
28227
- try {
28228
- onAuth({
28229
- url: `${AUTH_URL$1}?${new URLSearchParams({
28230
- client_id: CLIENT_ID$2,
28231
- response_type: "code",
28232
- redirect_uri: REDIRECT_URI$2,
28233
- scope: SCOPES$1.join(" "),
28234
- code_challenge: challenge,
28235
- code_challenge_method: "S256",
28236
- state: verifier,
28237
- access_type: "offline",
28238
- prompt: "consent"
28239
- }).toString()}`,
28240
- instructions: "Complete the sign-in in your browser."
28241
- });
28242
- onProgress?.("Waiting for OAuth callback...");
28243
- if (onManualCodeInput) {
28244
- let manualInput;
28245
- let manualError;
28246
- const manualPromise = onManualCodeInput().then((input) => {
28247
- manualInput = input;
28248
- server.cancelWait();
28249
- }).catch((err) => {
28250
- manualError = err instanceof Error ? err : new Error(String(err));
28251
- server.cancelWait();
28252
- });
28253
- const result = await server.waitForCode();
28254
- if (manualError) throw manualError;
28255
- if (result?.code) {
28256
- if (result.state !== verifier) throw new Error("OAuth state mismatch - possible CSRF attack");
28257
- code = result.code;
28258
- } else if (manualInput) {
28259
- const parsed = parseRedirectUrl$1(manualInput);
28260
- if (parsed.state && parsed.state !== verifier) throw new Error("OAuth state mismatch - possible CSRF attack");
28261
- code = parsed.code;
28262
- }
28263
- if (!code) {
28264
- await manualPromise;
28265
- if (manualError) throw manualError;
28266
- if (manualInput) {
28267
- const parsed = parseRedirectUrl$1(manualInput);
28268
- if (parsed.state && parsed.state !== verifier) throw new Error("OAuth state mismatch - possible CSRF attack");
28269
- code = parsed.code;
28270
- }
28271
- }
28272
- } else {
28273
- const result = await server.waitForCode();
28274
- if (result?.code) {
28275
- if (result.state !== verifier) throw new Error("OAuth state mismatch - possible CSRF attack");
28276
- code = result.code;
28277
- }
28278
- }
28279
- if (!code) throw new Error("No authorization code received");
28280
- onProgress?.("Exchanging authorization code for tokens...");
28281
- const tokenResponse = await fetch(TOKEN_URL$2, {
28282
- method: "POST",
28283
- headers: { "Content-Type": "application/x-www-form-urlencoded" },
28284
- body: new URLSearchParams({
28285
- client_id: CLIENT_ID$2,
28286
- client_secret: CLIENT_SECRET$1,
28287
- code,
28288
- grant_type: "authorization_code",
28289
- redirect_uri: REDIRECT_URI$2,
28290
- code_verifier: verifier
28291
- })
28292
- });
28293
- if (!tokenResponse.ok) {
28294
- const error = await tokenResponse.text();
28295
- throw new Error(`Token exchange failed: ${error}`);
28296
- }
28297
- const tokenData = await tokenResponse.json();
28298
- if (!tokenData.refresh_token) throw new Error("No refresh token received. Please try again.");
28299
- onProgress?.("Getting user info...");
28300
- const email = await getUserEmail$1(tokenData.access_token);
28301
- const projectId = await discoverProject$1(tokenData.access_token, onProgress);
28302
- const expiresAt = Date.now() + tokenData.expires_in * 1e3 - 300 * 1e3;
28303
- return {
28304
- refresh: tokenData.refresh_token,
28305
- access: tokenData.access_token,
28306
- expires: expiresAt,
28307
- projectId,
28308
- email
28309
- };
28310
- } finally {
28311
- server.server.close();
28312
- }
28313
- }
28314
- const antigravityOAuthProvider = {
28315
- id: "google-antigravity",
28316
- name: "Antigravity (Gemini 3, Claude, GPT-OSS)",
28317
- usesCallbackServer: true,
28318
- async login(callbacks) {
28319
- return loginAntigravity(callbacks.onAuth, callbacks.onProgress, callbacks.onManualCodeInput);
28320
- },
28321
- async refreshToken(credentials) {
28322
- const creds = credentials;
28323
- if (!creds.projectId) throw new Error("Antigravity credentials missing projectId");
28324
- return refreshAntigravityToken(creds.refresh, creds.projectId);
28325
- },
28326
- getApiKey(credentials) {
28327
- const creds = credentials;
28328
- return JSON.stringify({
28329
- token: creds.access,
28330
- projectId: creds.projectId
28331
- });
28332
- }
28333
- };
28334
- /**
28335
- * Gemini CLI OAuth flow (Google Cloud Code Assist)
28336
- * Standard Gemini models only (gemini-2.0-flash, gemini-2.5-*)
28337
- *
28338
- * NOTE: This module uses Node.js http.createServer for the OAuth callback.
28339
- * It is only intended for CLI use, not browser environments.
28340
- */
28341
- var _createServer = null;
28342
- var _httpImportPromise = null;
28343
- if (typeof process !== "undefined" && (process.versions?.node || process.versions?.bun)) _httpImportPromise = import("node:http").then((m) => {
28344
- _createServer = m.createServer;
28345
- });
28346
- var decode = (s) => atob(s);
28347
- var CLIENT_ID$1 = decode("NjgxMjU1ODA5Mzk1LW9vOGZ0Mm9wcmRybnA5ZTNhcWY2YXYzaG1kaWIxMzVqLmFwcHMuZ29vZ2xldXNlcmNvbnRlbnQuY29t");
28348
- var CLIENT_SECRET = decode("R09DU1BYLTR1SGdNUG0tMW83U2stZ2VWNkN1NWNsWEZzeGw=");
28349
- var REDIRECT_URI$1 = "http://localhost:8085/oauth2callback";
28350
- var SCOPES = [
28351
- "https://www.googleapis.com/auth/cloud-platform",
28352
- "https://www.googleapis.com/auth/userinfo.email",
28353
- "https://www.googleapis.com/auth/userinfo.profile"
28354
- ];
28355
- var AUTH_URL = "https://accounts.google.com/o/oauth2/v2/auth";
28356
- var TOKEN_URL$1 = "https://oauth2.googleapis.com/token";
28357
- var CODE_ASSIST_ENDPOINT = "https://cloudcode-pa.googleapis.com";
28358
- /**
28359
- * Start a local HTTP server to receive the OAuth callback
28360
- */
28361
- async function getNodeCreateServer() {
28362
- if (_createServer) return _createServer;
28363
- if (_httpImportPromise) await _httpImportPromise;
28364
- if (_createServer) return _createServer;
28365
- throw new Error("Gemini CLI OAuth is only available in Node.js environments");
28366
- }
28367
- async function startCallbackServer() {
28368
- const createServer = await getNodeCreateServer();
28369
- return new Promise((resolve, reject) => {
28370
- let result = null;
28371
- let cancelled = false;
28372
- const server = createServer((req, res) => {
28373
- const url = new URL(req.url || "", `http://localhost:8085`);
28374
- if (url.pathname === "/oauth2callback") {
28375
- const code = url.searchParams.get("code");
28376
- const state = url.searchParams.get("state");
28377
- const error = url.searchParams.get("error");
28378
- if (error) {
28379
- res.writeHead(400, { "Content-Type": "text/html" });
28380
- res.end(`<html><body><h1>Authentication Failed</h1><p>Error: ${error}</p><p>You can close this window.</p></body></html>`);
28381
- return;
28382
- }
28383
- if (code && state) {
28384
- res.writeHead(200, { "Content-Type": "text/html" });
28385
- res.end(`<html><body><h1>Authentication Successful</h1><p>You can close this window and return to the terminal.</p></body></html>`);
28386
- result = {
28387
- code,
28388
- state
28389
- };
28390
- } else {
28391
- res.writeHead(400, { "Content-Type": "text/html" });
28392
- res.end(`<html><body><h1>Authentication Failed</h1><p>Missing code or state parameter.</p></body></html>`);
28393
- }
28394
- } else {
28395
- res.writeHead(404);
28396
- res.end();
28397
- }
28398
- });
28399
- server.on("error", (err) => {
28400
- reject(err);
28401
- });
28402
- server.listen(8085, "127.0.0.1", () => {
28403
- resolve({
28404
- server,
28405
- cancelWait: () => {
28406
- cancelled = true;
28407
- },
28408
- waitForCode: async () => {
28409
- const sleep = () => new Promise((r) => setTimeout(r, 100));
28410
- while (!result && !cancelled) await sleep();
28411
- return result;
28412
- }
28413
- });
28414
- });
28415
- });
28416
- }
28417
- /**
28418
- * Parse redirect URL to extract code and state
28419
- */
28420
- function parseRedirectUrl(input) {
28421
- const value = input.trim();
28422
- if (!value) return {};
28423
- try {
28424
- const url = new URL(value);
28425
- return {
28426
- code: url.searchParams.get("code") ?? void 0,
28427
- state: url.searchParams.get("state") ?? void 0
28428
- };
28429
- } catch {
28430
- return {};
28431
- }
28432
- }
28433
- var TIER_FREE = "free-tier";
28434
- var TIER_LEGACY = "legacy-tier";
28435
- var TIER_STANDARD = "standard-tier";
28436
- /**
28437
- * Wait helper for onboarding retries
28438
- */
28439
- function wait(ms) {
28440
- return new Promise((resolve) => setTimeout(resolve, ms));
28441
- }
28442
- /**
28443
- * Get default tier from allowed tiers
28444
- */
28445
- function getDefaultTier(allowedTiers) {
28446
- if (!allowedTiers || allowedTiers.length === 0) return { id: TIER_LEGACY };
28447
- return allowedTiers.find((t) => t.isDefault) ?? { id: TIER_LEGACY };
28448
- }
28449
- function isVpcScAffectedUser(payload) {
28450
- if (!payload || typeof payload !== "object") return false;
28451
- if (!("error" in payload)) return false;
28452
- const error = payload.error;
28453
- if (!error?.details || !Array.isArray(error.details)) return false;
28454
- return error.details.some((detail) => detail.reason === "SECURITY_POLICY_VIOLATED");
28455
- }
28456
- /**
28457
- * Poll a long-running operation until completion
28458
- */
28459
- async function pollOperation(operationName, headers, onProgress) {
28460
- let attempt = 0;
28461
- while (true) {
28462
- if (attempt > 0) {
28463
- onProgress?.(`Waiting for project provisioning (attempt ${attempt + 1})...`);
28464
- await wait(5e3);
28465
- }
28466
- const response = await fetch(`${CODE_ASSIST_ENDPOINT}/v1internal/${operationName}`, {
28467
- method: "GET",
28468
- headers
28469
- });
28470
- if (!response.ok) throw new Error(`Failed to poll operation: ${response.status} ${response.statusText}`);
28471
- const data = await response.json();
28472
- if (data.done) return data;
28473
- attempt += 1;
28474
- }
28475
- }
28476
- /**
28477
- * Discover or provision a Google Cloud project for the user
28478
- */
28479
- async function discoverProject(accessToken, onProgress) {
28480
- const envProjectId = process.env.GOOGLE_CLOUD_PROJECT || process.env.GOOGLE_CLOUD_PROJECT_ID;
28481
- const headers = {
28482
- Authorization: `Bearer ${accessToken}`,
28483
- "Content-Type": "application/json",
28484
- "User-Agent": "google-api-nodejs-client/9.15.1",
28485
- "X-Goog-Api-Client": "gl-node/22.17.0"
28486
- };
28487
- onProgress?.("Checking for existing Cloud Code Assist project...");
28488
- const loadResponse = await fetch(`${CODE_ASSIST_ENDPOINT}/v1internal:loadCodeAssist`, {
28489
- method: "POST",
28490
- headers,
28491
- body: JSON.stringify({
28492
- cloudaicompanionProject: envProjectId,
28493
- metadata: {
28494
- ideType: "IDE_UNSPECIFIED",
28495
- platform: "PLATFORM_UNSPECIFIED",
28496
- pluginType: "GEMINI",
28497
- duetProject: envProjectId
28498
- }
28499
- })
28500
- });
28501
- let data;
28502
- if (!loadResponse.ok) {
28503
- let errorPayload;
28504
- try {
28505
- errorPayload = await loadResponse.clone().json();
28506
- } catch {
28507
- errorPayload = void 0;
28508
- }
28509
- if (isVpcScAffectedUser(errorPayload)) data = { currentTier: { id: TIER_STANDARD } };
28510
- else {
28511
- const errorText = await loadResponse.text();
28512
- throw new Error(`loadCodeAssist failed: ${loadResponse.status} ${loadResponse.statusText}: ${errorText}`);
28513
- }
28514
- } else data = await loadResponse.json();
28515
- if (data.currentTier) {
28516
- if (data.cloudaicompanionProject) return data.cloudaicompanionProject;
28517
- if (envProjectId) return envProjectId;
28518
- throw new Error("This account requires setting the GOOGLE_CLOUD_PROJECT or GOOGLE_CLOUD_PROJECT_ID environment variable. See https://goo.gle/gemini-cli-auth-docs#workspace-gca");
28519
- }
28520
- const tierId = getDefaultTier(data.allowedTiers)?.id ?? TIER_FREE;
28521
- if (tierId !== TIER_FREE && !envProjectId) throw new Error("This account requires setting the GOOGLE_CLOUD_PROJECT or GOOGLE_CLOUD_PROJECT_ID environment variable. See https://goo.gle/gemini-cli-auth-docs#workspace-gca");
28522
- onProgress?.("Provisioning Cloud Code Assist project (this may take a moment)...");
28523
- const onboardBody = {
28524
- tierId,
28525
- metadata: {
28526
- ideType: "IDE_UNSPECIFIED",
28527
- platform: "PLATFORM_UNSPECIFIED",
28528
- pluginType: "GEMINI"
28529
- }
28530
- };
28531
- if (tierId !== TIER_FREE && envProjectId) {
28532
- onboardBody.cloudaicompanionProject = envProjectId;
28533
- onboardBody.metadata.duetProject = envProjectId;
28534
- }
28535
- const onboardResponse = await fetch(`${CODE_ASSIST_ENDPOINT}/v1internal:onboardUser`, {
28536
- method: "POST",
28537
- headers,
28538
- body: JSON.stringify(onboardBody)
28539
- });
28540
- if (!onboardResponse.ok) {
28541
- const errorText = await onboardResponse.text();
28542
- throw new Error(`onboardUser failed: ${onboardResponse.status} ${onboardResponse.statusText}: ${errorText}`);
28543
- }
28544
- let lroData = await onboardResponse.json();
28545
- if (!lroData.done && lroData.name) lroData = await pollOperation(lroData.name, headers, onProgress);
28546
- const projectId = lroData.response?.cloudaicompanionProject?.id;
28547
- if (projectId) return projectId;
28548
- if (envProjectId) return envProjectId;
28549
- throw new Error("Could not discover or provision a Google Cloud project. Try setting the GOOGLE_CLOUD_PROJECT or GOOGLE_CLOUD_PROJECT_ID environment variable. See https://goo.gle/gemini-cli-auth-docs#workspace-gca");
28550
- }
28551
- /**
28552
- * Get user email from the access token
28553
- */
28554
- async function getUserEmail(accessToken) {
28555
- try {
28556
- const response = await fetch("https://www.googleapis.com/oauth2/v1/userinfo?alt=json", { headers: { Authorization: `Bearer ${accessToken}` } });
28557
- if (response.ok) return (await response.json()).email;
28558
- } catch {}
28559
- }
28560
- /**
28561
- * Refresh Google Cloud Code Assist token
28562
- */
28563
- async function refreshGoogleCloudToken(refreshToken, projectId) {
28564
- const response = await fetch(TOKEN_URL$1, {
28565
- method: "POST",
28566
- headers: { "Content-Type": "application/x-www-form-urlencoded" },
28567
- body: new URLSearchParams({
28568
- client_id: CLIENT_ID$1,
28569
- client_secret: CLIENT_SECRET,
28570
- refresh_token: refreshToken,
28571
- grant_type: "refresh_token"
28572
- })
28573
- });
28574
- if (!response.ok) {
28575
- const error = await response.text();
28576
- throw new Error(`Google Cloud token refresh failed: ${error}`);
28577
- }
28578
- const data = await response.json();
28579
- return {
28580
- refresh: data.refresh_token || refreshToken,
28581
- access: data.access_token,
28582
- expires: Date.now() + data.expires_in * 1e3 - 300 * 1e3,
28583
- projectId
28584
- };
28585
- }
28586
- /**
28587
- * Login with Gemini CLI (Google Cloud Code Assist) OAuth
28588
- *
28589
- * @param onAuth - Callback with URL and optional instructions
28590
- * @param onProgress - Optional progress callback
28591
- * @param onManualCodeInput - Optional promise that resolves with user-pasted redirect URL.
28592
- * Races with browser callback - whichever completes first wins.
28593
- */
28594
- async function loginGeminiCli(onAuth, onProgress, onManualCodeInput) {
28595
- const { verifier, challenge } = await generatePKCE();
28596
- onProgress?.("Starting local server for OAuth callback...");
28597
- const server = await startCallbackServer();
28598
- let code;
28599
- try {
28600
- onAuth({
28601
- url: `${AUTH_URL}?${new URLSearchParams({
28602
- client_id: CLIENT_ID$1,
28603
- response_type: "code",
28604
- redirect_uri: REDIRECT_URI$1,
28605
- scope: SCOPES.join(" "),
28606
- code_challenge: challenge,
28607
- code_challenge_method: "S256",
28608
- state: verifier,
28609
- access_type: "offline",
28610
- prompt: "consent"
28611
- }).toString()}`,
28612
- instructions: "Complete the sign-in in your browser."
28613
- });
28614
- onProgress?.("Waiting for OAuth callback...");
28615
- if (onManualCodeInput) {
28616
- let manualInput;
28617
- let manualError;
28618
- const manualPromise = onManualCodeInput().then((input) => {
28619
- manualInput = input;
28620
- server.cancelWait();
28621
- }).catch((err) => {
28622
- manualError = err instanceof Error ? err : new Error(String(err));
28623
- server.cancelWait();
28624
- });
28625
- const result = await server.waitForCode();
28626
- if (manualError) throw manualError;
28627
- if (result?.code) {
28628
- if (result.state !== verifier) throw new Error("OAuth state mismatch - possible CSRF attack");
28629
- code = result.code;
28630
- } else if (manualInput) {
28631
- const parsed = parseRedirectUrl(manualInput);
28632
- if (parsed.state && parsed.state !== verifier) throw new Error("OAuth state mismatch - possible CSRF attack");
28633
- code = parsed.code;
28634
- }
28635
- if (!code) {
28636
- await manualPromise;
28637
- if (manualError) throw manualError;
28638
- if (manualInput) {
28639
- const parsed = parseRedirectUrl(manualInput);
28640
- if (parsed.state && parsed.state !== verifier) throw new Error("OAuth state mismatch - possible CSRF attack");
28641
- code = parsed.code;
28642
- }
28643
- }
28644
- } else {
28645
- const result = await server.waitForCode();
28646
- if (result?.code) {
28647
- if (result.state !== verifier) throw new Error("OAuth state mismatch - possible CSRF attack");
28648
- code = result.code;
28649
- }
28650
- }
28651
- if (!code) throw new Error("No authorization code received");
28652
- onProgress?.("Exchanging authorization code for tokens...");
28653
- const tokenResponse = await fetch(TOKEN_URL$1, {
28654
- method: "POST",
28655
- headers: { "Content-Type": "application/x-www-form-urlencoded" },
28656
- body: new URLSearchParams({
28657
- client_id: CLIENT_ID$1,
28658
- client_secret: CLIENT_SECRET,
28659
- code,
28660
- grant_type: "authorization_code",
28661
- redirect_uri: REDIRECT_URI$1,
28662
- code_verifier: verifier
28663
- })
28664
- });
28665
- if (!tokenResponse.ok) {
28666
- const error = await tokenResponse.text();
28667
- throw new Error(`Token exchange failed: ${error}`);
28668
- }
28669
- const tokenData = await tokenResponse.json();
28670
- if (!tokenData.refresh_token) throw new Error("No refresh token received. Please try again.");
28671
- onProgress?.("Getting user info...");
28672
- const email = await getUserEmail(tokenData.access_token);
28673
- const projectId = await discoverProject(tokenData.access_token, onProgress);
28674
- const expiresAt = Date.now() + tokenData.expires_in * 1e3 - 300 * 1e3;
28675
- return {
28676
- refresh: tokenData.refresh_token,
28677
- access: tokenData.access_token,
28678
- expires: expiresAt,
28679
- projectId,
28680
- email
28681
- };
28682
- } finally {
28683
- server.server.close();
28684
- }
28685
- }
28686
- const geminiCliOAuthProvider = {
28687
- id: "google-gemini-cli",
28688
- name: "Google Cloud Code Assist (Gemini CLI)",
28689
- usesCallbackServer: true,
28690
- async login(callbacks) {
28691
- return loginGeminiCli(callbacks.onAuth, callbacks.onProgress, callbacks.onManualCodeInput);
28692
- },
28693
- async refreshToken(credentials) {
28694
- const creds = credentials;
28695
- if (!creds.projectId) throw new Error("Google Cloud credentials missing projectId");
28696
- return refreshGoogleCloudToken(creds.refresh, creds.projectId);
28697
- },
28698
- getApiKey(credentials) {
28699
- const creds = credentials;
28700
- return JSON.stringify({
28701
- token: creds.access,
28702
- projectId: creds.projectId
28703
- });
28704
- }
28705
- };
28706
- /**
28707
- * OpenAI Codex (ChatGPT OAuth) flow
28708
- *
28709
- * NOTE: This module uses Node.js crypto and http for the OAuth callback.
28710
- * It is only intended for CLI use, not browser environments.
28711
- */
28712
- var _randomBytes = null;
28713
- var _http = null;
28714
- if (typeof process !== "undefined" && (process.versions?.node || process.versions?.bun)) {
28715
- import("node:crypto").then((m) => {
28716
- _randomBytes = m.randomBytes;
28717
- });
28718
- import("node:http").then((m) => {
28719
- _http = m;
28720
- });
28721
- }
28722
- var CLIENT_ID = "app_EMoamEEZ73f0CkXaXp7hrann";
28723
- var AUTHORIZE_URL = "https://auth.openai.com/oauth/authorize";
28724
- var TOKEN_URL = "https://auth.openai.com/oauth/token";
28725
- var REDIRECT_URI = "http://localhost:1455/auth/callback";
28726
- var SCOPE = "openid profile email offline_access";
28727
- var JWT_CLAIM_PATH = "https://api.openai.com/auth";
28728
- var SUCCESS_HTML = `<!doctype html>
28729
- <html lang="en">
28730
- <head>
28731
- <meta charset="utf-8" />
28732
- <meta name="viewport" content="width=device-width, initial-scale=1" />
28733
- <title>Authentication successful</title>
28734
- </head>
28735
- <body>
28736
- <p>Authentication successful. Return to your terminal to continue.</p>
28737
- </body>
28738
- </html>`;
28739
- function createState() {
28740
- if (!_randomBytes) throw new Error("OpenAI Codex OAuth is only available in Node.js environments");
28741
- return _randomBytes(16).toString("hex");
28742
- }
28743
- function parseAuthorizationInput(input) {
28744
- const value = input.trim();
28745
- if (!value) return {};
28746
- try {
28747
- const url = new URL(value);
28748
- return {
28749
- code: url.searchParams.get("code") ?? void 0,
28750
- state: url.searchParams.get("state") ?? void 0
28751
- };
28752
- } catch {}
28753
- if (value.includes("#")) {
28754
- const [code, state] = value.split("#", 2);
28755
- return {
28756
- code,
28757
- state
28758
- };
28759
- }
28760
- if (value.includes("code=")) {
28761
- const params = new URLSearchParams(value);
28762
- return {
28763
- code: params.get("code") ?? void 0,
28764
- state: params.get("state") ?? void 0
28765
- };
28766
- }
28767
- return { code: value };
28768
- }
28769
- function decodeJwt(token) {
28770
- try {
28771
- const parts = token.split(".");
28772
- if (parts.length !== 3) return null;
28773
- const payload = parts[1] ?? "";
28774
- const decoded = atob(payload);
28775
- return JSON.parse(decoded);
28776
- } catch {
28777
- return null;
28778
- }
28779
- }
28780
- async function exchangeAuthorizationCode(code, verifier, redirectUri = REDIRECT_URI) {
28781
- const response = await fetch(TOKEN_URL, {
28782
- method: "POST",
28783
- headers: { "Content-Type": "application/x-www-form-urlencoded" },
28784
- body: new URLSearchParams({
28785
- grant_type: "authorization_code",
28786
- client_id: CLIENT_ID,
28787
- code,
28788
- code_verifier: verifier,
28789
- redirect_uri: redirectUri
28790
- })
28791
- });
28792
- if (!response.ok) {
28793
- const text = await response.text().catch(() => "");
28794
- console.error("[openai-codex] code->token failed:", response.status, text);
28795
- return { type: "failed" };
28796
- }
28797
- const json = await response.json();
28798
- if (!json.access_token || !json.refresh_token || typeof json.expires_in !== "number") {
28799
- console.error("[openai-codex] token response missing fields:", json);
28800
- return { type: "failed" };
28801
- }
28802
- return {
28803
- type: "success",
28804
- access: json.access_token,
28805
- refresh: json.refresh_token,
28806
- expires: Date.now() + json.expires_in * 1e3
28807
- };
28808
- }
28809
- async function refreshAccessToken(refreshToken) {
28810
- try {
28811
- const response = await fetch(TOKEN_URL, {
28812
- method: "POST",
28813
- headers: { "Content-Type": "application/x-www-form-urlencoded" },
28814
- body: new URLSearchParams({
28815
- grant_type: "refresh_token",
28816
- refresh_token: refreshToken,
28817
- client_id: CLIENT_ID
28818
- })
28819
- });
28820
- if (!response.ok) {
28821
- const text = await response.text().catch(() => "");
28822
- console.error("[openai-codex] Token refresh failed:", response.status, text);
28823
- return { type: "failed" };
28824
- }
28825
- const json = await response.json();
28826
- if (!json.access_token || !json.refresh_token || typeof json.expires_in !== "number") {
28827
- console.error("[openai-codex] Token refresh response missing fields:", json);
28828
- return { type: "failed" };
28829
- }
28830
- return {
28831
- type: "success",
28832
- access: json.access_token,
28833
- refresh: json.refresh_token,
28834
- expires: Date.now() + json.expires_in * 1e3
28835
- };
28836
- } catch (error) {
28837
- console.error("[openai-codex] Token refresh error:", error);
28838
- return { type: "failed" };
28839
- }
28840
- }
28841
- async function createAuthorizationFlow(originator = "pi") {
28842
- const { verifier, challenge } = await generatePKCE();
28843
- const state = createState();
28844
- const url = new URL(AUTHORIZE_URL);
28845
- url.searchParams.set("response_type", "code");
28846
- url.searchParams.set("client_id", CLIENT_ID);
28847
- url.searchParams.set("redirect_uri", REDIRECT_URI);
28848
- url.searchParams.set("scope", SCOPE);
28849
- url.searchParams.set("code_challenge", challenge);
28850
- url.searchParams.set("code_challenge_method", "S256");
28851
- url.searchParams.set("state", state);
28852
- url.searchParams.set("id_token_add_organizations", "true");
28853
- url.searchParams.set("codex_cli_simplified_flow", "true");
28854
- url.searchParams.set("originator", originator);
28855
- return {
28856
- verifier,
28857
- state,
28858
- url: url.toString()
28859
- };
28860
- }
28861
- function startLocalOAuthServer(state) {
28862
- if (!_http) throw new Error("OpenAI Codex OAuth is only available in Node.js environments");
28863
- let lastCode = null;
28864
- let cancelled = false;
28865
- const server = _http.createServer((req, res) => {
28866
- try {
28867
- const url = new URL(req.url || "", "http://localhost");
28868
- if (url.pathname !== "/auth/callback") {
28869
- res.statusCode = 404;
28870
- res.end("Not found");
28871
- return;
28872
- }
28873
- if (url.searchParams.get("state") !== state) {
28874
- res.statusCode = 400;
28875
- res.end("State mismatch");
28876
- return;
28877
- }
28878
- const code = url.searchParams.get("code");
28879
- if (!code) {
28880
- res.statusCode = 400;
28881
- res.end("Missing authorization code");
28882
- return;
28883
- }
28884
- res.statusCode = 200;
28885
- res.setHeader("Content-Type", "text/html; charset=utf-8");
28886
- res.end(SUCCESS_HTML);
28887
- lastCode = code;
28888
- } catch {
28889
- res.statusCode = 500;
28890
- res.end("Internal error");
28891
- }
28892
- });
28893
- return new Promise((resolve) => {
28894
- server.listen(1455, "127.0.0.1", () => {
28895
- resolve({
28896
- close: () => server.close(),
28897
- cancelWait: () => {
28898
- cancelled = true;
28899
- },
28900
- waitForCode: async () => {
28901
- const sleep = () => new Promise((r) => setTimeout(r, 100));
28902
- for (let i = 0; i < 600; i += 1) {
28903
- if (lastCode) return { code: lastCode };
28904
- if (cancelled) return null;
28905
- await sleep();
28906
- }
28907
- return null;
28908
- }
28909
- });
28910
- }).on("error", (err) => {
28911
- console.error("[openai-codex] Failed to bind http://127.0.0.1:1455 (", err.code, ") Falling back to manual paste.");
28912
- resolve({
28913
- close: () => {
28914
- try {
28915
- server.close();
28916
- } catch {}
28917
- },
28918
- cancelWait: () => {},
28919
- waitForCode: async () => null
28920
- });
28921
- });
28922
- });
28923
- }
28924
- function getAccountId(accessToken) {
28925
- const accountId = (decodeJwt(accessToken)?.[JWT_CLAIM_PATH])?.chatgpt_account_id;
28926
- return typeof accountId === "string" && accountId.length > 0 ? accountId : null;
28927
- }
28928
- /**
28929
- * Login with OpenAI Codex OAuth
28930
- *
28931
- * @param options.onAuth - Called with URL and instructions when auth starts
28932
- * @param options.onPrompt - Called to prompt user for manual code paste (fallback if no onManualCodeInput)
28933
- * @param options.onProgress - Optional progress messages
28934
- * @param options.onManualCodeInput - Optional promise that resolves with user-pasted code.
28935
- * Races with browser callback - whichever completes first wins.
28936
- * Useful for showing paste input immediately alongside browser flow.
28937
- * @param options.originator - OAuth originator parameter (defaults to "pi")
28938
- */
28939
- async function loginOpenAICodex(options) {
28940
- const { verifier, state, url } = await createAuthorizationFlow(options.originator);
28941
- const server = await startLocalOAuthServer(state);
28942
- options.onAuth({
28943
- url,
28944
- instructions: "A browser window should open. Complete login to finish."
28945
- });
28946
- let code;
28947
- try {
28948
- if (options.onManualCodeInput) {
28949
- let manualCode;
28950
- let manualError;
28951
- const manualPromise = options.onManualCodeInput().then((input) => {
28952
- manualCode = input;
28953
- server.cancelWait();
28954
- }).catch((err) => {
28955
- manualError = err instanceof Error ? err : new Error(String(err));
28956
- server.cancelWait();
28957
- });
28958
- const result = await server.waitForCode();
28959
- if (manualError) throw manualError;
28960
- if (result?.code) code = result.code;
28961
- else if (manualCode) {
28962
- const parsed = parseAuthorizationInput(manualCode);
28963
- if (parsed.state && parsed.state !== state) throw new Error("State mismatch");
28964
- code = parsed.code;
28965
- }
28966
- if (!code) {
28967
- await manualPromise;
28968
- if (manualError) throw manualError;
28969
- if (manualCode) {
28970
- const parsed = parseAuthorizationInput(manualCode);
28971
- if (parsed.state && parsed.state !== state) throw new Error("State mismatch");
28972
- code = parsed.code;
28973
- }
28974
- }
28975
- } else {
28976
- const result = await server.waitForCode();
28977
- if (result?.code) code = result.code;
28978
- }
28979
- if (!code) {
28980
- const parsed = parseAuthorizationInput(await options.onPrompt({ message: "Paste the authorization code (or full redirect URL):" }));
28981
- if (parsed.state && parsed.state !== state) throw new Error("State mismatch");
28982
- code = parsed.code;
28983
- }
28984
- if (!code) throw new Error("Missing authorization code");
28985
- const tokenResult = await exchangeAuthorizationCode(code, verifier);
28986
- if (tokenResult.type !== "success") throw new Error("Token exchange failed");
28987
- const accountId = getAccountId(tokenResult.access);
28988
- if (!accountId) throw new Error("Failed to extract accountId from token");
28989
- return {
28990
- access: tokenResult.access,
28991
- refresh: tokenResult.refresh,
28992
- expires: tokenResult.expires,
28993
- accountId
28994
- };
28995
- } finally {
28996
- server.close();
28997
- }
28998
- }
28999
- /**
29000
- * Refresh OpenAI Codex OAuth token
29001
- */
29002
- async function refreshOpenAICodexToken(refreshToken) {
29003
- const result = await refreshAccessToken(refreshToken);
29004
- if (result.type !== "success") throw new Error("Failed to refresh OpenAI Codex token");
29005
- const accountId = getAccountId(result.access);
29006
- if (!accountId) throw new Error("Failed to extract accountId from token");
29007
- return {
29008
- access: result.access,
29009
- refresh: result.refresh,
29010
- expires: result.expires,
29011
- accountId
29012
- };
29013
- }
29014
- const openaiCodexOAuthProvider = {
29015
- id: "openai-codex",
29016
- name: "ChatGPT Plus/Pro (Codex Subscription)",
29017
- usesCallbackServer: true,
29018
- async login(callbacks) {
29019
- return loginOpenAICodex({
29020
- onAuth: callbacks.onAuth,
29021
- onPrompt: callbacks.onPrompt,
29022
- onProgress: callbacks.onProgress,
29023
- onManualCodeInput: callbacks.onManualCodeInput
29024
- });
29025
- },
29026
- async refreshToken(credentials) {
29027
- return refreshOpenAICodexToken(credentials.refresh);
29028
- },
29029
- getApiKey(credentials) {
29030
- return credentials.access;
29031
- }
29032
- };
29033
- var BUILT_IN_OAUTH_PROVIDERS = [
29034
- anthropicOAuthProvider,
29035
- githubCopilotOAuthProvider,
29036
- geminiCliOAuthProvider,
29037
- antigravityOAuthProvider,
29038
- openaiCodexOAuthProvider
29039
- ];
29040
- var oauthProviderRegistry = new Map(BUILT_IN_OAUTH_PROVIDERS.map((provider) => [provider.id, provider]));
29041
- /**
29042
- * Get an OAuth provider by ID
29043
- */
29044
- function getOAuthProvider(id) {
29045
- return oauthProviderRegistry.get(id);
29046
- }
29047
- /**
29048
- * Register a custom OAuth provider
29049
- */
29050
- function registerOAuthProvider(provider) {
29051
- oauthProviderRegistry.set(provider.id, provider);
29052
- }
29053
- /**
29054
- * Unregister an OAuth provider.
29055
- *
29056
- * If the provider is built-in, restores the built-in implementation.
29057
- * Custom providers are removed completely.
29058
- */
29059
- function unregisterOAuthProvider(id) {
29060
- const builtInProvider = BUILT_IN_OAUTH_PROVIDERS.find((provider) => provider.id === id);
29061
- if (builtInProvider) {
29062
- oauthProviderRegistry.set(id, builtInProvider);
29063
- return;
29064
- }
29065
- oauthProviderRegistry.delete(id);
29066
- }
29067
- /**
29068
- * Reset OAuth providers to built-ins.
29069
- */
29070
- function resetOAuthProviders() {
29071
- oauthProviderRegistry.clear();
29072
- for (const provider of BUILT_IN_OAUTH_PROVIDERS) oauthProviderRegistry.set(provider.id, provider);
29073
- }
29074
- /**
29075
- * Get all registered OAuth providers
29076
- */
29077
- function getOAuthProviders() {
29078
- return Array.from(oauthProviderRegistry.values());
29079
- }
29080
- /**
29081
- * @deprecated Use getOAuthProviders() which returns OAuthProviderInterface[]
29082
- */
29083
- function getOAuthProviderInfoList() {
29084
- return getOAuthProviders().map((p) => ({
29085
- id: p.id,
29086
- name: p.name,
29087
- available: true
29088
- }));
29089
- }
29090
- /**
29091
- * Refresh token for any OAuth provider.
29092
- * @deprecated Use getOAuthProvider(id).refreshToken() instead
29093
- */
29094
- async function refreshOAuthToken(providerId, credentials) {
29095
- const provider = getOAuthProvider(providerId);
29096
- if (!provider) throw new Error(`Unknown OAuth provider: ${providerId}`);
29097
- return provider.refreshToken(credentials);
29098
- }
29099
- /**
29100
- * Get API key for a provider from OAuth credentials.
29101
- * Automatically refreshes expired tokens.
29102
- *
29103
- * @returns API key string and updated credentials, or null if no credentials
29104
- * @throws Error if refresh fails
29105
- */
29106
- async function getOAuthApiKey(providerId, credentials) {
29107
- const provider = getOAuthProvider(providerId);
29108
- if (!provider) throw new Error(`Unknown OAuth provider: ${providerId}`);
29109
- let creds = credentials[providerId];
29110
- if (!creds) return null;
29111
- if (Date.now() >= creds.expires) try {
29112
- creds = await provider.refreshToken(creds);
29113
- } catch (_error) {
29114
- throw new Error(`Failed to refresh OAuth token for ${providerId}`);
29115
- }
29116
- const apiKey = provider.getApiKey(creds);
29117
- return {
29118
- newCredentials: creds,
29119
- apiKey
29120
- };
29121
- }
29122
- /**
29123
27455
  * Regex patterns to detect context overflow errors from different providers.
29124
27456
  *
29125
27457
  * These patterns match error messages returned when the input exceeds
@@ -35252,51 +33584,28 @@ var dist_exports$1 = /* @__PURE__ */ __exportAll({
35252
33584
  EventStream: () => EventStream$1,
35253
33585
  StringEnum: () => StringEnum,
35254
33586
  Type: () => Type,
35255
- anthropicOAuthProvider: () => anthropicOAuthProvider,
35256
- antigravityOAuthProvider: () => antigravityOAuthProvider,
35257
33587
  buildRequest: () => buildRequest,
35258
33588
  calculateCost: () => calculateCost,
35259
33589
  clearApiProviders: () => clearApiProviders,
35260
33590
  complete: () => complete,
35261
33591
  completeSimple: () => completeSimple,
35262
- convertMessages: () => convertMessages$1,
33592
+ convertMessages: () => convertMessages,
35263
33593
  createAssistantMessageEventStream: () => createAssistantMessageEventStream,
35264
33594
  extractRetryDelay: () => extractRetryDelay,
35265
- geminiCliOAuthProvider: () => geminiCliOAuthProvider,
35266
33595
  getApiProvider: () => getApiProvider,
35267
33596
  getApiProviders: () => getApiProviders,
35268
33597
  getEnvApiKey: () => getEnvApiKey,
35269
- getGitHubCopilotBaseUrl: () => getGitHubCopilotBaseUrl,
35270
33598
  getModel: () => getModel,
35271
33599
  getModels: () => getModels,
35272
- getOAuthApiKey: () => getOAuthApiKey,
35273
- getOAuthProvider: () => getOAuthProvider,
35274
- getOAuthProviderInfoList: () => getOAuthProviderInfoList,
35275
- getOAuthProviders: () => getOAuthProviders,
35276
33600
  getOverflowPatterns: () => getOverflowPatterns,
35277
33601
  getProviders: () => getProviders,
35278
- githubCopilotOAuthProvider: () => githubCopilotOAuthProvider,
35279
33602
  isContextOverflow: () => isContextOverflow,
35280
- loginAnthropic: () => loginAnthropic,
35281
- loginAntigravity: () => loginAntigravity,
35282
- loginGeminiCli: () => loginGeminiCli,
35283
- loginGitHubCopilot: () => loginGitHubCopilot,
35284
- loginOpenAICodex: () => loginOpenAICodex,
35285
33603
  modelsAreEqual: () => modelsAreEqual,
35286
- normalizeDomain: () => normalizeDomain,
35287
- openaiCodexOAuthProvider: () => openaiCodexOAuthProvider,
35288
33604
  parseStreamingJson: () => parseStreamingJson,
35289
- refreshAnthropicToken: () => refreshAnthropicToken,
35290
- refreshAntigravityToken: () => refreshAntigravityToken,
35291
- refreshGitHubCopilotToken: () => refreshGitHubCopilotToken,
35292
- refreshGoogleCloudToken: () => refreshGoogleCloudToken,
35293
- refreshOAuthToken: () => refreshOAuthToken,
35294
- refreshOpenAICodexToken: () => refreshOpenAICodexToken,
35295
33605
  registerApiProvider: () => registerApiProvider,
35296
33606
  registerBuiltInApiProviders: () => registerBuiltInApiProviders,
35297
- registerOAuthProvider: () => registerOAuthProvider,
35298
33607
  resetApiProviders: () => resetApiProviders,
35299
- resetOAuthProviders: () => resetOAuthProviders,
33608
+ setBedrockProviderModule: () => setBedrockProviderModule,
35300
33609
  stream: () => stream,
35301
33610
  streamAnthropic: () => streamAnthropic,
35302
33611
  streamAzureOpenAIResponses: () => streamAzureOpenAIResponses,
@@ -35315,7 +33624,6 @@ var dist_exports$1 = /* @__PURE__ */ __exportAll({
35315
33624
  streamSimpleOpenAIResponses: () => streamSimpleOpenAIResponses,
35316
33625
  supportsXhigh: () => supportsXhigh,
35317
33626
  unregisterApiProviders: () => unregisterApiProviders,
35318
- unregisterOAuthProvider: () => unregisterOAuthProvider,
35319
33627
  validateToolArguments: () => validateToolArguments,
35320
33628
  validateToolCall: () => validateToolCall
35321
33629
  });
@@ -36302,4 +34610,4 @@ var dist_exports = /* @__PURE__ */ __exportAll({
36302
34610
  agentLoopContinue: () => agentLoopContinue,
36303
34611
  streamProxy: () => streamProxy
36304
34612
  });
36305
- export { IsSymbol$2 as $, Has$1 as A, IsArray$2 as B, KeyOfPattern as C, Get as D, Never as E, Kind as F, IsFunction$2 as G, IsBigInt$2 as H, TransformKind as I, IsNull$2 as J, IsInteger$2 as K, TypeBoxError as L, IsSchema$1 as M, IsTransform$1 as N, Has as O, IsUndefined$1 as P, IsString$2 as Q, TypeSystemPolicy as R, KeyOfPropertyEntries as S, Ref as T, IsBoolean$2 as U, IsAsyncIterator$2 as V, IsDate$2 as W, IsObject$2 as X, IsNumber$2 as Y, IsPromise$2 as Z, getEnvApiKey as _, isContextOverflow as a, Type as b, getOAuthProviders as c, completeSimple as d, IsUint8Array$2 as et, resetApiProviders as f, supportsXhigh as g, modelsAreEqual as h, require_ajv as i, IsSchema as j, Get$1 as k, registerOAuthProvider as l, getProviders as m, Agent as n, IsValueType as nt, getOAuthApiKey as o, getModels as p, IsIterator$2 as q, dist_exports$1 as r, getOAuthProvider as s, dist_exports as t, IsUndefined$2 as tt, resetOAuthProviders as u, registerApiProvider as v, KeyOfPropertyKeys as w, ExtendsUndefinedCheck as x, esm_exports as y, HasPropertyKey as z };
34613
+ export { Kind as A, IsFunction$2 as B, Has as C, IsSchema$1 as D, IsSchema as E, IsArray$2 as F, IsObject$2 as G, IsIterator$2 as H, IsAsyncIterator$2 as I, IsSymbol$2 as J, IsPromise$2 as K, IsBigInt$2 as L, TypeBoxError as M, TypeSystemPolicy as N, IsTransform$1 as O, HasPropertyKey as P, IsBoolean$2 as R, Get as S, Has$1 as T, IsNull$2 as U, IsInteger$2 as V, IsNumber$2 as W, IsUndefined$2 as X, IsUint8Array$2 as Y, IsValueType as Z, KeyOfPropertyEntries as _, isContextOverflow as a, Ref as b, getModels as c, supportsXhigh as d, getEnvApiKey as f, ExtendsUndefinedCheck as g, Type as h, require_ajv as i, TransformKind as j, IsUndefined$1 as k, getProviders as l, esm_exports as m, Agent as n, completeSimple as o, registerApiProvider as p, IsString$2 as q, dist_exports$1 as r, resetApiProviders as s, dist_exports as t, modelsAreEqual as u, KeyOfPattern as v, Get$1 as w, Never as x, KeyOfPropertyKeys as y, IsDate$2 as z };