@stonerzju/opencode 1.2.16-offline.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (496) hide show
  1. package/AGENTS.md +10 -0
  2. package/BUN_SHELL_MIGRATION_PLAN.md +136 -0
  3. package/Dockerfile +18 -0
  4. package/README.md +15 -0
  5. package/bin/opencode +179 -0
  6. package/bunfig.toml +7 -0
  7. package/drizzle.config.ts +10 -0
  8. package/migration/20260127222353_familiar_lady_ursula/migration.sql +90 -0
  9. package/migration/20260127222353_familiar_lady_ursula/snapshot.json +796 -0
  10. package/migration/20260211171708_add_project_commands/migration.sql +1 -0
  11. package/migration/20260211171708_add_project_commands/snapshot.json +806 -0
  12. package/migration/20260213144116_wakeful_the_professor/migration.sql +11 -0
  13. package/migration/20260213144116_wakeful_the_professor/snapshot.json +897 -0
  14. package/migration/20260225215848_workspace/migration.sql +7 -0
  15. package/migration/20260225215848_workspace/snapshot.json +959 -0
  16. package/package.json +140 -0
  17. package/package.json.bak +140 -0
  18. package/parsers-config.ts +254 -0
  19. package/script/build.ts +224 -0
  20. package/script/check-migrations.ts +16 -0
  21. package/script/postinstall.mjs +131 -0
  22. package/script/publish.ts +181 -0
  23. package/script/schema.ts +63 -0
  24. package/script/seed-e2e.ts +50 -0
  25. package/src/acp/README.md +174 -0
  26. package/src/acp/agent.ts +1741 -0
  27. package/src/acp/session.ts +116 -0
  28. package/src/acp/types.ts +23 -0
  29. package/src/agent/agent.ts +339 -0
  30. package/src/agent/generate.txt +75 -0
  31. package/src/agent/prompt/compaction.txt +14 -0
  32. package/src/agent/prompt/explore.txt +18 -0
  33. package/src/agent/prompt/summary.txt +11 -0
  34. package/src/agent/prompt/title.txt +44 -0
  35. package/src/auth/index.ts +68 -0
  36. package/src/bun/index.ts +131 -0
  37. package/src/bun/registry.ts +50 -0
  38. package/src/bus/bus-event.ts +43 -0
  39. package/src/bus/global.ts +10 -0
  40. package/src/bus/index.ts +105 -0
  41. package/src/cli/bootstrap.ts +17 -0
  42. package/src/cli/cmd/acp.ts +70 -0
  43. package/src/cli/cmd/agent.ts +257 -0
  44. package/src/cli/cmd/auth.ts +449 -0
  45. package/src/cli/cmd/cmd.ts +7 -0
  46. package/src/cli/cmd/db.ts +118 -0
  47. package/src/cli/cmd/debug/agent.ts +167 -0
  48. package/src/cli/cmd/debug/config.ts +16 -0
  49. package/src/cli/cmd/debug/file.ts +97 -0
  50. package/src/cli/cmd/debug/index.ts +48 -0
  51. package/src/cli/cmd/debug/lsp.ts +52 -0
  52. package/src/cli/cmd/debug/ripgrep.ts +87 -0
  53. package/src/cli/cmd/debug/scrap.ts +16 -0
  54. package/src/cli/cmd/debug/skill.ts +16 -0
  55. package/src/cli/cmd/debug/snapshot.ts +52 -0
  56. package/src/cli/cmd/export.ts +88 -0
  57. package/src/cli/cmd/generate.ts +38 -0
  58. package/src/cli/cmd/github.ts +1631 -0
  59. package/src/cli/cmd/import.ts +170 -0
  60. package/src/cli/cmd/mcp.ts +754 -0
  61. package/src/cli/cmd/models.ts +77 -0
  62. package/src/cli/cmd/pr.ts +112 -0
  63. package/src/cli/cmd/run.ts +625 -0
  64. package/src/cli/cmd/serve.ts +31 -0
  65. package/src/cli/cmd/session.ts +156 -0
  66. package/src/cli/cmd/stats.ts +410 -0
  67. package/src/cli/cmd/tui/app.tsx +845 -0
  68. package/src/cli/cmd/tui/attach.ts +88 -0
  69. package/src/cli/cmd/tui/component/border.tsx +21 -0
  70. package/src/cli/cmd/tui/component/dialog-agent.tsx +31 -0
  71. package/src/cli/cmd/tui/component/dialog-command.tsx +147 -0
  72. package/src/cli/cmd/tui/component/dialog-mcp.tsx +86 -0
  73. package/src/cli/cmd/tui/component/dialog-model.tsx +165 -0
  74. package/src/cli/cmd/tui/component/dialog-provider.tsx +259 -0
  75. package/src/cli/cmd/tui/component/dialog-session-list.tsx +108 -0
  76. package/src/cli/cmd/tui/component/dialog-session-rename.tsx +31 -0
  77. package/src/cli/cmd/tui/component/dialog-skill.tsx +36 -0
  78. package/src/cli/cmd/tui/component/dialog-stash.tsx +87 -0
  79. package/src/cli/cmd/tui/component/dialog-status.tsx +167 -0
  80. package/src/cli/cmd/tui/component/dialog-tag.tsx +44 -0
  81. package/src/cli/cmd/tui/component/dialog-theme-list.tsx +50 -0
  82. package/src/cli/cmd/tui/component/logo.tsx +85 -0
  83. package/src/cli/cmd/tui/component/prompt/autocomplete.tsx +667 -0
  84. package/src/cli/cmd/tui/component/prompt/frecency.tsx +90 -0
  85. package/src/cli/cmd/tui/component/prompt/history.tsx +108 -0
  86. package/src/cli/cmd/tui/component/prompt/index.tsx +1155 -0
  87. package/src/cli/cmd/tui/component/prompt/stash.tsx +101 -0
  88. package/src/cli/cmd/tui/component/spinner.tsx +24 -0
  89. package/src/cli/cmd/tui/component/textarea-keybindings.ts +73 -0
  90. package/src/cli/cmd/tui/component/tips.tsx +152 -0
  91. package/src/cli/cmd/tui/component/todo-item.tsx +32 -0
  92. package/src/cli/cmd/tui/context/args.tsx +15 -0
  93. package/src/cli/cmd/tui/context/directory.ts +13 -0
  94. package/src/cli/cmd/tui/context/exit.tsx +53 -0
  95. package/src/cli/cmd/tui/context/helper.tsx +25 -0
  96. package/src/cli/cmd/tui/context/keybind.tsx +102 -0
  97. package/src/cli/cmd/tui/context/kv.tsx +52 -0
  98. package/src/cli/cmd/tui/context/local.tsx +406 -0
  99. package/src/cli/cmd/tui/context/prompt.tsx +18 -0
  100. package/src/cli/cmd/tui/context/route.tsx +46 -0
  101. package/src/cli/cmd/tui/context/sdk.tsx +101 -0
  102. package/src/cli/cmd/tui/context/sync.tsx +488 -0
  103. package/src/cli/cmd/tui/context/theme/aura.json +69 -0
  104. package/src/cli/cmd/tui/context/theme/ayu.json +80 -0
  105. package/src/cli/cmd/tui/context/theme/carbonfox.json +248 -0
  106. package/src/cli/cmd/tui/context/theme/catppuccin-frappe.json +233 -0
  107. package/src/cli/cmd/tui/context/theme/catppuccin-macchiato.json +233 -0
  108. package/src/cli/cmd/tui/context/theme/catppuccin.json +112 -0
  109. package/src/cli/cmd/tui/context/theme/cobalt2.json +228 -0
  110. package/src/cli/cmd/tui/context/theme/cursor.json +249 -0
  111. package/src/cli/cmd/tui/context/theme/dracula.json +219 -0
  112. package/src/cli/cmd/tui/context/theme/everforest.json +241 -0
  113. package/src/cli/cmd/tui/context/theme/flexoki.json +237 -0
  114. package/src/cli/cmd/tui/context/theme/github.json +233 -0
  115. package/src/cli/cmd/tui/context/theme/gruvbox.json +242 -0
  116. package/src/cli/cmd/tui/context/theme/kanagawa.json +77 -0
  117. package/src/cli/cmd/tui/context/theme/lucent-orng.json +237 -0
  118. package/src/cli/cmd/tui/context/theme/material.json +235 -0
  119. package/src/cli/cmd/tui/context/theme/matrix.json +77 -0
  120. package/src/cli/cmd/tui/context/theme/mercury.json +252 -0
  121. package/src/cli/cmd/tui/context/theme/monokai.json +221 -0
  122. package/src/cli/cmd/tui/context/theme/nightowl.json +221 -0
  123. package/src/cli/cmd/tui/context/theme/nord.json +223 -0
  124. package/src/cli/cmd/tui/context/theme/one-dark.json +84 -0
  125. package/src/cli/cmd/tui/context/theme/orng.json +249 -0
  126. package/src/cli/cmd/tui/context/theme/osaka-jade.json +93 -0
  127. package/src/cli/cmd/tui/context/theme/palenight.json +222 -0
  128. package/src/cli/cmd/tui/context/theme/rosepine.json +234 -0
  129. package/src/cli/cmd/tui/context/theme/solarized.json +223 -0
  130. package/src/cli/cmd/tui/context/theme/synthwave84.json +226 -0
  131. package/src/cli/cmd/tui/context/theme/tokyonight.json +243 -0
  132. package/src/cli/cmd/tui/context/theme/vercel.json +245 -0
  133. package/src/cli/cmd/tui/context/theme/vesper.json +218 -0
  134. package/src/cli/cmd/tui/context/theme/zenburn.json +223 -0
  135. package/src/cli/cmd/tui/context/theme.tsx +1152 -0
  136. package/src/cli/cmd/tui/context/tui-config.tsx +9 -0
  137. package/src/cli/cmd/tui/event.ts +48 -0
  138. package/src/cli/cmd/tui/routes/home.tsx +145 -0
  139. package/src/cli/cmd/tui/routes/session/dialog-fork-from-timeline.tsx +64 -0
  140. package/src/cli/cmd/tui/routes/session/dialog-message.tsx +109 -0
  141. package/src/cli/cmd/tui/routes/session/dialog-subagent.tsx +26 -0
  142. package/src/cli/cmd/tui/routes/session/dialog-timeline.tsx +47 -0
  143. package/src/cli/cmd/tui/routes/session/footer.tsx +91 -0
  144. package/src/cli/cmd/tui/routes/session/header.tsx +135 -0
  145. package/src/cli/cmd/tui/routes/session/index.tsx +2219 -0
  146. package/src/cli/cmd/tui/routes/session/permission.tsx +685 -0
  147. package/src/cli/cmd/tui/routes/session/question.tsx +466 -0
  148. package/src/cli/cmd/tui/routes/session/sidebar.tsx +321 -0
  149. package/src/cli/cmd/tui/thread.ts +199 -0
  150. package/src/cli/cmd/tui/ui/dialog-alert.tsx +59 -0
  151. package/src/cli/cmd/tui/ui/dialog-confirm.tsx +85 -0
  152. package/src/cli/cmd/tui/ui/dialog-export-options.tsx +207 -0
  153. package/src/cli/cmd/tui/ui/dialog-help.tsx +40 -0
  154. package/src/cli/cmd/tui/ui/dialog-prompt.tsx +80 -0
  155. package/src/cli/cmd/tui/ui/dialog-select.tsx +401 -0
  156. package/src/cli/cmd/tui/ui/dialog.tsx +182 -0
  157. package/src/cli/cmd/tui/ui/link.tsx +28 -0
  158. package/src/cli/cmd/tui/ui/spinner.ts +368 -0
  159. package/src/cli/cmd/tui/ui/toast.tsx +100 -0
  160. package/src/cli/cmd/tui/util/clipboard.ts +164 -0
  161. package/src/cli/cmd/tui/util/editor.ts +33 -0
  162. package/src/cli/cmd/tui/util/selection.ts +25 -0
  163. package/src/cli/cmd/tui/util/signal.ts +7 -0
  164. package/src/cli/cmd/tui/util/terminal.ts +114 -0
  165. package/src/cli/cmd/tui/util/transcript.ts +98 -0
  166. package/src/cli/cmd/tui/win32.ts +129 -0
  167. package/src/cli/cmd/tui/worker.ts +157 -0
  168. package/src/cli/cmd/uninstall.ts +356 -0
  169. package/src/cli/cmd/upgrade.ts +73 -0
  170. package/src/cli/cmd/web.ts +81 -0
  171. package/src/cli/cmd/workspace-serve.ts +16 -0
  172. package/src/cli/error.ts +57 -0
  173. package/src/cli/logo.ts +6 -0
  174. package/src/cli/network.ts +60 -0
  175. package/src/cli/ui.ts +116 -0
  176. package/src/cli/upgrade.ts +25 -0
  177. package/src/command/index.ts +150 -0
  178. package/src/command/template/initialize.txt +10 -0
  179. package/src/command/template/review.txt +101 -0
  180. package/src/config/config.ts +1408 -0
  181. package/src/config/markdown.ts +99 -0
  182. package/src/config/migrate-tui-config.ts +155 -0
  183. package/src/config/paths.ts +174 -0
  184. package/src/config/tui-schema.ts +34 -0
  185. package/src/config/tui.ts +118 -0
  186. package/src/control/control.sql.ts +22 -0
  187. package/src/control/index.ts +67 -0
  188. package/src/control-plane/adaptors/index.ts +10 -0
  189. package/src/control-plane/adaptors/types.ts +7 -0
  190. package/src/control-plane/adaptors/worktree.ts +26 -0
  191. package/src/control-plane/config.ts +10 -0
  192. package/src/control-plane/session-proxy-middleware.ts +46 -0
  193. package/src/control-plane/sse.ts +66 -0
  194. package/src/control-plane/workspace-server/routes.ts +33 -0
  195. package/src/control-plane/workspace-server/server.ts +24 -0
  196. package/src/control-plane/workspace.sql.ts +12 -0
  197. package/src/control-plane/workspace.ts +160 -0
  198. package/src/env/index.ts +28 -0
  199. package/src/file/ignore.ts +82 -0
  200. package/src/file/index.ts +646 -0
  201. package/src/file/ripgrep.ts +372 -0
  202. package/src/file/time.ts +71 -0
  203. package/src/file/watcher.ts +128 -0
  204. package/src/flag/flag.ts +109 -0
  205. package/src/format/formatter.ts +395 -0
  206. package/src/format/index.ts +140 -0
  207. package/src/global/index.ts +54 -0
  208. package/src/id/id.ts +84 -0
  209. package/src/ide/index.ts +76 -0
  210. package/src/index.ts +210 -0
  211. package/src/installation/index.ts +266 -0
  212. package/src/lsp/client.ts +251 -0
  213. package/src/lsp/index.ts +485 -0
  214. package/src/lsp/language.ts +120 -0
  215. package/src/lsp/server.ts +2142 -0
  216. package/src/mcp/auth.ts +130 -0
  217. package/src/mcp/index.ts +937 -0
  218. package/src/mcp/oauth-callback.ts +200 -0
  219. package/src/mcp/oauth-provider.ts +176 -0
  220. package/src/patch/index.ts +680 -0
  221. package/src/permission/arity.ts +163 -0
  222. package/src/permission/index.ts +210 -0
  223. package/src/permission/next.ts +286 -0
  224. package/src/plugin/codex.ts +624 -0
  225. package/src/plugin/copilot.ts +327 -0
  226. package/src/plugin/index.ts +143 -0
  227. package/src/project/bootstrap.ts +33 -0
  228. package/src/project/instance.ts +114 -0
  229. package/src/project/project.sql.ts +15 -0
  230. package/src/project/project.ts +441 -0
  231. package/src/project/state.ts +70 -0
  232. package/src/project/vcs.ts +76 -0
  233. package/src/provider/auth.ts +147 -0
  234. package/src/provider/error.ts +189 -0
  235. package/src/provider/models.ts +146 -0
  236. package/src/provider/provider.ts +1338 -0
  237. package/src/provider/sdk/copilot/README.md +5 -0
  238. package/src/provider/sdk/copilot/chat/convert-to-openai-compatible-chat-messages.ts +164 -0
  239. package/src/provider/sdk/copilot/chat/get-response-metadata.ts +15 -0
  240. package/src/provider/sdk/copilot/chat/map-openai-compatible-finish-reason.ts +17 -0
  241. package/src/provider/sdk/copilot/chat/openai-compatible-api-types.ts +64 -0
  242. package/src/provider/sdk/copilot/chat/openai-compatible-chat-language-model.ts +780 -0
  243. package/src/provider/sdk/copilot/chat/openai-compatible-chat-options.ts +28 -0
  244. package/src/provider/sdk/copilot/chat/openai-compatible-metadata-extractor.ts +44 -0
  245. package/src/provider/sdk/copilot/chat/openai-compatible-prepare-tools.ts +87 -0
  246. package/src/provider/sdk/copilot/copilot-provider.ts +100 -0
  247. package/src/provider/sdk/copilot/index.ts +2 -0
  248. package/src/provider/sdk/copilot/openai-compatible-error.ts +27 -0
  249. package/src/provider/sdk/copilot/responses/convert-to-openai-responses-input.ts +303 -0
  250. package/src/provider/sdk/copilot/responses/map-openai-responses-finish-reason.ts +22 -0
  251. package/src/provider/sdk/copilot/responses/openai-config.ts +18 -0
  252. package/src/provider/sdk/copilot/responses/openai-error.ts +22 -0
  253. package/src/provider/sdk/copilot/responses/openai-responses-api-types.ts +207 -0
  254. package/src/provider/sdk/copilot/responses/openai-responses-language-model.ts +1732 -0
  255. package/src/provider/sdk/copilot/responses/openai-responses-prepare-tools.ts +177 -0
  256. package/src/provider/sdk/copilot/responses/openai-responses-settings.ts +1 -0
  257. package/src/provider/sdk/copilot/responses/tool/code-interpreter.ts +88 -0
  258. package/src/provider/sdk/copilot/responses/tool/file-search.ts +128 -0
  259. package/src/provider/sdk/copilot/responses/tool/image-generation.ts +115 -0
  260. package/src/provider/sdk/copilot/responses/tool/local-shell.ts +65 -0
  261. package/src/provider/sdk/copilot/responses/tool/web-search-preview.ts +104 -0
  262. package/src/provider/sdk/copilot/responses/tool/web-search.ts +103 -0
  263. package/src/provider/transform.ts +955 -0
  264. package/src/pty/index.ts +324 -0
  265. package/src/question/index.ts +171 -0
  266. package/src/scheduler/index.ts +61 -0
  267. package/src/server/error.ts +36 -0
  268. package/src/server/event.ts +7 -0
  269. package/src/server/mdns.ts +60 -0
  270. package/src/server/routes/config.ts +92 -0
  271. package/src/server/routes/experimental.ts +270 -0
  272. package/src/server/routes/file.ts +197 -0
  273. package/src/server/routes/global.ts +185 -0
  274. package/src/server/routes/mcp.ts +225 -0
  275. package/src/server/routes/permission.ts +68 -0
  276. package/src/server/routes/project.ts +82 -0
  277. package/src/server/routes/provider.ts +165 -0
  278. package/src/server/routes/pty.ts +200 -0
  279. package/src/server/routes/question.ts +98 -0
  280. package/src/server/routes/session.ts +974 -0
  281. package/src/server/routes/tui.ts +379 -0
  282. package/src/server/routes/workspace.ts +104 -0
  283. package/src/server/server.ts +623 -0
  284. package/src/session/compaction.ts +261 -0
  285. package/src/session/index.ts +877 -0
  286. package/src/session/instruction.ts +192 -0
  287. package/src/session/llm.ts +279 -0
  288. package/src/session/message-v2.ts +899 -0
  289. package/src/session/message.ts +189 -0
  290. package/src/session/processor.ts +421 -0
  291. package/src/session/prompt/anthropic-20250930.txt +166 -0
  292. package/src/session/prompt/anthropic.txt +105 -0
  293. package/src/session/prompt/beast.txt +147 -0
  294. package/src/session/prompt/build-switch.txt +5 -0
  295. package/src/session/prompt/codex_header.txt +79 -0
  296. package/src/session/prompt/copilot-gpt-5.txt +143 -0
  297. package/src/session/prompt/gemini.txt +155 -0
  298. package/src/session/prompt/max-steps.txt +16 -0
  299. package/src/session/prompt/plan-reminder-anthropic.txt +67 -0
  300. package/src/session/prompt/plan.txt +26 -0
  301. package/src/session/prompt/qwen.txt +109 -0
  302. package/src/session/prompt/trinity.txt +97 -0
  303. package/src/session/prompt.ts +1959 -0
  304. package/src/session/retry.ts +101 -0
  305. package/src/session/revert.ts +138 -0
  306. package/src/session/session.sql.ts +88 -0
  307. package/src/session/status.ts +76 -0
  308. package/src/session/summary.ts +161 -0
  309. package/src/session/system.ts +54 -0
  310. package/src/session/todo.ts +56 -0
  311. package/src/share/share-next.ts +210 -0
  312. package/src/share/share.sql.ts +13 -0
  313. package/src/shell/shell.ts +68 -0
  314. package/src/skill/discovery.ts +98 -0
  315. package/src/skill/index.ts +1 -0
  316. package/src/skill/skill.ts +189 -0
  317. package/src/snapshot/index.ts +297 -0
  318. package/src/sql.d.ts +4 -0
  319. package/src/storage/db.ts +155 -0
  320. package/src/storage/json-migration.ts +425 -0
  321. package/src/storage/schema.sql.ts +10 -0
  322. package/src/storage/schema.ts +5 -0
  323. package/src/storage/storage.ts +220 -0
  324. package/src/tool/apply_patch.ts +281 -0
  325. package/src/tool/apply_patch.txt +33 -0
  326. package/src/tool/bash.ts +274 -0
  327. package/src/tool/bash.txt +115 -0
  328. package/src/tool/batch.ts +181 -0
  329. package/src/tool/batch.txt +24 -0
  330. package/src/tool/codesearch.ts +132 -0
  331. package/src/tool/codesearch.txt +12 -0
  332. package/src/tool/edit.ts +654 -0
  333. package/src/tool/edit.txt +10 -0
  334. package/src/tool/external-directory.ts +32 -0
  335. package/src/tool/glob.ts +78 -0
  336. package/src/tool/glob.txt +6 -0
  337. package/src/tool/grep.ts +156 -0
  338. package/src/tool/grep.txt +8 -0
  339. package/src/tool/invalid.ts +17 -0
  340. package/src/tool/ls.ts +121 -0
  341. package/src/tool/ls.txt +1 -0
  342. package/src/tool/lsp.ts +97 -0
  343. package/src/tool/lsp.txt +19 -0
  344. package/src/tool/multiedit.ts +46 -0
  345. package/src/tool/multiedit.txt +41 -0
  346. package/src/tool/plan-enter.txt +14 -0
  347. package/src/tool/plan-exit.txt +13 -0
  348. package/src/tool/plan.ts +131 -0
  349. package/src/tool/question.ts +33 -0
  350. package/src/tool/question.txt +10 -0
  351. package/src/tool/read.ts +293 -0
  352. package/src/tool/read.txt +14 -0
  353. package/src/tool/registry.ts +173 -0
  354. package/src/tool/skill.ts +123 -0
  355. package/src/tool/task.ts +165 -0
  356. package/src/tool/task.txt +60 -0
  357. package/src/tool/todo.ts +53 -0
  358. package/src/tool/todoread.txt +14 -0
  359. package/src/tool/todowrite.txt +167 -0
  360. package/src/tool/tool.ts +89 -0
  361. package/src/tool/truncation.ts +107 -0
  362. package/src/tool/webfetch.ts +206 -0
  363. package/src/tool/webfetch.txt +13 -0
  364. package/src/tool/websearch.ts +150 -0
  365. package/src/tool/websearch.txt +14 -0
  366. package/src/tool/write.ts +84 -0
  367. package/src/tool/write.txt +8 -0
  368. package/src/util/abort.ts +35 -0
  369. package/src/util/archive.ts +16 -0
  370. package/src/util/color.ts +19 -0
  371. package/src/util/context.ts +25 -0
  372. package/src/util/defer.ts +12 -0
  373. package/src/util/eventloop.ts +20 -0
  374. package/src/util/filesystem.ts +189 -0
  375. package/src/util/fn.ts +11 -0
  376. package/src/util/format.ts +20 -0
  377. package/src/util/git.ts +35 -0
  378. package/src/util/glob.ts +34 -0
  379. package/src/util/iife.ts +3 -0
  380. package/src/util/keybind.ts +103 -0
  381. package/src/util/lazy.ts +23 -0
  382. package/src/util/locale.ts +81 -0
  383. package/src/util/lock.ts +98 -0
  384. package/src/util/log.ts +182 -0
  385. package/src/util/process.ts +126 -0
  386. package/src/util/proxied.ts +3 -0
  387. package/src/util/queue.ts +32 -0
  388. package/src/util/rpc.ts +66 -0
  389. package/src/util/scrap.ts +10 -0
  390. package/src/util/signal.ts +12 -0
  391. package/src/util/timeout.ts +14 -0
  392. package/src/util/token.ts +7 -0
  393. package/src/util/wildcard.ts +59 -0
  394. package/src/worktree/index.ts +643 -0
  395. package/sst-env.d.ts +10 -0
  396. package/test/AGENTS.md +81 -0
  397. package/test/acp/agent-interface.test.ts +51 -0
  398. package/test/acp/event-subscription.test.ts +683 -0
  399. package/test/agent/agent.test.ts +689 -0
  400. package/test/bun.test.ts +53 -0
  401. package/test/cli/github-action.test.ts +197 -0
  402. package/test/cli/github-remote.test.ts +80 -0
  403. package/test/cli/import.test.ts +38 -0
  404. package/test/cli/plugin-auth-picker.test.ts +120 -0
  405. package/test/cli/tui/transcript.test.ts +322 -0
  406. package/test/config/agent-color.test.ts +71 -0
  407. package/test/config/config.test.ts +1886 -0
  408. package/test/config/fixtures/empty-frontmatter.md +4 -0
  409. package/test/config/fixtures/frontmatter.md +28 -0
  410. package/test/config/fixtures/markdown-header.md +11 -0
  411. package/test/config/fixtures/no-frontmatter.md +1 -0
  412. package/test/config/fixtures/weird-model-id.md +13 -0
  413. package/test/config/markdown.test.ts +228 -0
  414. package/test/config/tui.test.ts +510 -0
  415. package/test/control-plane/session-proxy-middleware.test.ts +147 -0
  416. package/test/control-plane/sse.test.ts +56 -0
  417. package/test/control-plane/workspace-server-sse.test.ts +65 -0
  418. package/test/control-plane/workspace-sync.test.ts +97 -0
  419. package/test/file/ignore.test.ts +10 -0
  420. package/test/file/index.test.ts +394 -0
  421. package/test/file/path-traversal.test.ts +198 -0
  422. package/test/file/ripgrep.test.ts +39 -0
  423. package/test/file/time.test.ts +361 -0
  424. package/test/fixture/db.ts +11 -0
  425. package/test/fixture/fixture.ts +45 -0
  426. package/test/fixture/lsp/fake-lsp-server.js +77 -0
  427. package/test/fixture/skills/agents-sdk/SKILL.md +152 -0
  428. package/test/fixture/skills/agents-sdk/references/callable.md +92 -0
  429. package/test/fixture/skills/cloudflare/SKILL.md +211 -0
  430. package/test/fixture/skills/index.json +6 -0
  431. package/test/ide/ide.test.ts +82 -0
  432. package/test/keybind.test.ts +421 -0
  433. package/test/lsp/client.test.ts +95 -0
  434. package/test/mcp/headers.test.ts +153 -0
  435. package/test/mcp/oauth-browser.test.ts +249 -0
  436. package/test/memory/abort-leak.test.ts +136 -0
  437. package/test/patch/patch.test.ts +348 -0
  438. package/test/permission/arity.test.ts +33 -0
  439. package/test/permission/next.test.ts +689 -0
  440. package/test/permission-task.test.ts +319 -0
  441. package/test/plugin/auth-override.test.ts +44 -0
  442. package/test/plugin/codex.test.ts +123 -0
  443. package/test/preload.ts +80 -0
  444. package/test/project/project.test.ts +348 -0
  445. package/test/project/worktree-remove.test.ts +65 -0
  446. package/test/provider/amazon-bedrock.test.ts +446 -0
  447. package/test/provider/copilot/convert-to-copilot-messages.test.ts +523 -0
  448. package/test/provider/copilot/copilot-chat-model.test.ts +592 -0
  449. package/test/provider/gitlab-duo.test.ts +262 -0
  450. package/test/provider/provider.test.ts +2220 -0
  451. package/test/provider/transform.test.ts +2353 -0
  452. package/test/pty/pty-output-isolation.test.ts +140 -0
  453. package/test/question/question.test.ts +300 -0
  454. package/test/scheduler.test.ts +73 -0
  455. package/test/server/global-session-list.test.ts +89 -0
  456. package/test/server/session-list.test.ts +90 -0
  457. package/test/server/session-select.test.ts +78 -0
  458. package/test/session/compaction.test.ts +423 -0
  459. package/test/session/instruction.test.ts +170 -0
  460. package/test/session/llm.test.ts +667 -0
  461. package/test/session/message-v2.test.ts +924 -0
  462. package/test/session/prompt.test.ts +211 -0
  463. package/test/session/retry.test.ts +188 -0
  464. package/test/session/revert-compact.test.ts +285 -0
  465. package/test/session/session.test.ts +71 -0
  466. package/test/session/structured-output-integration.test.ts +233 -0
  467. package/test/session/structured-output.test.ts +385 -0
  468. package/test/skill/discovery.test.ts +110 -0
  469. package/test/skill/skill.test.ts +388 -0
  470. package/test/snapshot/snapshot.test.ts +1180 -0
  471. package/test/storage/json-migration.test.ts +846 -0
  472. package/test/tool/__snapshots__/tool.test.ts.snap +9 -0
  473. package/test/tool/apply_patch.test.ts +566 -0
  474. package/test/tool/bash.test.ts +402 -0
  475. package/test/tool/edit.test.ts +496 -0
  476. package/test/tool/external-directory.test.ts +127 -0
  477. package/test/tool/fixtures/large-image.png +0 -0
  478. package/test/tool/fixtures/models-api.json +38413 -0
  479. package/test/tool/grep.test.ts +110 -0
  480. package/test/tool/question.test.ts +107 -0
  481. package/test/tool/read.test.ts +504 -0
  482. package/test/tool/registry.test.ts +122 -0
  483. package/test/tool/skill.test.ts +112 -0
  484. package/test/tool/truncation.test.ts +160 -0
  485. package/test/tool/webfetch.test.ts +100 -0
  486. package/test/tool/write.test.ts +348 -0
  487. package/test/util/filesystem.test.ts +443 -0
  488. package/test/util/format.test.ts +59 -0
  489. package/test/util/glob.test.ts +164 -0
  490. package/test/util/iife.test.ts +36 -0
  491. package/test/util/lazy.test.ts +50 -0
  492. package/test/util/lock.test.ts +72 -0
  493. package/test/util/process.test.ts +59 -0
  494. package/test/util/timeout.test.ts +21 -0
  495. package/test/util/wildcard.test.ts +90 -0
  496. package/tsconfig.json +16 -0
@@ -0,0 +1,2353 @@
1
+ import { describe, expect, test } from "bun:test"
2
+ import { ProviderTransform } from "../../src/provider/transform"
3
+
4
+ const OUTPUT_TOKEN_MAX = 32000
5
+
6
+ describe("ProviderTransform.options - setCacheKey", () => {
7
+ const sessionID = "test-session-123"
8
+
9
+ const mockModel = {
10
+ id: "anthropic/claude-3-5-sonnet",
11
+ providerID: "anthropic",
12
+ api: {
13
+ id: "claude-3-5-sonnet-20241022",
14
+ url: "https://api.anthropic.com",
15
+ npm: "@ai-sdk/anthropic",
16
+ },
17
+ name: "Claude 3.5 Sonnet",
18
+ capabilities: {
19
+ temperature: true,
20
+ reasoning: false,
21
+ attachment: true,
22
+ toolcall: true,
23
+ input: { text: true, audio: false, image: true, video: false, pdf: true },
24
+ output: { text: true, audio: false, image: false, video: false, pdf: false },
25
+ interleaved: false,
26
+ },
27
+ cost: {
28
+ input: 0.003,
29
+ output: 0.015,
30
+ cache: { read: 0.0003, write: 0.00375 },
31
+ },
32
+ limit: {
33
+ context: 200000,
34
+ output: 8192,
35
+ },
36
+ status: "active",
37
+ options: {},
38
+ headers: {},
39
+ } as any
40
+
41
+ test("should set promptCacheKey when providerOptions.setCacheKey is true", () => {
42
+ const result = ProviderTransform.options({
43
+ model: mockModel,
44
+ sessionID,
45
+ providerOptions: { setCacheKey: true },
46
+ })
47
+ expect(result.promptCacheKey).toBe(sessionID)
48
+ })
49
+
50
+ test("should not set promptCacheKey when providerOptions.setCacheKey is false", () => {
51
+ const result = ProviderTransform.options({
52
+ model: mockModel,
53
+ sessionID,
54
+ providerOptions: { setCacheKey: false },
55
+ })
56
+ expect(result.promptCacheKey).toBeUndefined()
57
+ })
58
+
59
+ test("should not set promptCacheKey when providerOptions is undefined", () => {
60
+ const result = ProviderTransform.options({
61
+ model: mockModel,
62
+ sessionID,
63
+ providerOptions: undefined,
64
+ })
65
+ expect(result.promptCacheKey).toBeUndefined()
66
+ })
67
+
68
+ test("should not set promptCacheKey when providerOptions does not have setCacheKey", () => {
69
+ const result = ProviderTransform.options({ model: mockModel, sessionID, providerOptions: {} })
70
+ expect(result.promptCacheKey).toBeUndefined()
71
+ })
72
+
73
+ test("should set promptCacheKey for openai provider regardless of setCacheKey", () => {
74
+ const openaiModel = {
75
+ ...mockModel,
76
+ providerID: "openai",
77
+ api: {
78
+ id: "gpt-4",
79
+ url: "https://api.openai.com",
80
+ npm: "@ai-sdk/openai",
81
+ },
82
+ }
83
+ const result = ProviderTransform.options({ model: openaiModel, sessionID, providerOptions: {} })
84
+ expect(result.promptCacheKey).toBe(sessionID)
85
+ })
86
+
87
+ test("should set store=false for openai provider", () => {
88
+ const openaiModel = {
89
+ ...mockModel,
90
+ providerID: "openai",
91
+ api: {
92
+ id: "gpt-4",
93
+ url: "https://api.openai.com",
94
+ npm: "@ai-sdk/openai",
95
+ },
96
+ }
97
+ const result = ProviderTransform.options({
98
+ model: openaiModel,
99
+ sessionID,
100
+ providerOptions: {},
101
+ })
102
+ expect(result.store).toBe(false)
103
+ })
104
+ })
105
+
106
+ describe("ProviderTransform.options - gpt-5 textVerbosity", () => {
107
+ const sessionID = "test-session-123"
108
+
109
+ const createGpt5Model = (apiId: string) =>
110
+ ({
111
+ id: `openai/${apiId}`,
112
+ providerID: "openai",
113
+ api: {
114
+ id: apiId,
115
+ url: "https://api.openai.com",
116
+ npm: "@ai-sdk/openai",
117
+ },
118
+ name: apiId,
119
+ capabilities: {
120
+ temperature: true,
121
+ reasoning: true,
122
+ attachment: true,
123
+ toolcall: true,
124
+ input: { text: true, audio: false, image: true, video: false, pdf: false },
125
+ output: { text: true, audio: false, image: false, video: false, pdf: false },
126
+ interleaved: false,
127
+ },
128
+ cost: { input: 0.03, output: 0.06, cache: { read: 0.001, write: 0.002 } },
129
+ limit: { context: 128000, output: 4096 },
130
+ status: "active",
131
+ options: {},
132
+ headers: {},
133
+ }) as any
134
+
135
+ test("gpt-5.2 should have textVerbosity set to low", () => {
136
+ const model = createGpt5Model("gpt-5.2")
137
+ const result = ProviderTransform.options({ model, sessionID, providerOptions: {} })
138
+ expect(result.textVerbosity).toBe("low")
139
+ })
140
+
141
+ test("gpt-5.1 should have textVerbosity set to low", () => {
142
+ const model = createGpt5Model("gpt-5.1")
143
+ const result = ProviderTransform.options({ model, sessionID, providerOptions: {} })
144
+ expect(result.textVerbosity).toBe("low")
145
+ })
146
+
147
+ test("gpt-5.2-chat-latest should NOT have textVerbosity set (only supports medium)", () => {
148
+ const model = createGpt5Model("gpt-5.2-chat-latest")
149
+ const result = ProviderTransform.options({ model, sessionID, providerOptions: {} })
150
+ expect(result.textVerbosity).toBeUndefined()
151
+ })
152
+
153
+ test("gpt-5.1-chat-latest should NOT have textVerbosity set (only supports medium)", () => {
154
+ const model = createGpt5Model("gpt-5.1-chat-latest")
155
+ const result = ProviderTransform.options({ model, sessionID, providerOptions: {} })
156
+ expect(result.textVerbosity).toBeUndefined()
157
+ })
158
+
159
+ test("gpt-5.2-chat should NOT have textVerbosity set", () => {
160
+ const model = createGpt5Model("gpt-5.2-chat")
161
+ const result = ProviderTransform.options({ model, sessionID, providerOptions: {} })
162
+ expect(result.textVerbosity).toBeUndefined()
163
+ })
164
+
165
+ test("gpt-5-chat should NOT have textVerbosity set", () => {
166
+ const model = createGpt5Model("gpt-5-chat")
167
+ const result = ProviderTransform.options({ model, sessionID, providerOptions: {} })
168
+ expect(result.textVerbosity).toBeUndefined()
169
+ })
170
+
171
+ test("gpt-5.2-codex should NOT have textVerbosity set (codex models excluded)", () => {
172
+ const model = createGpt5Model("gpt-5.2-codex")
173
+ const result = ProviderTransform.options({ model, sessionID, providerOptions: {} })
174
+ expect(result.textVerbosity).toBeUndefined()
175
+ })
176
+ })
177
+
178
+ describe("ProviderTransform.options - gateway", () => {
179
+ const sessionID = "test-session-123"
180
+
181
+ const createModel = (id: string) =>
182
+ ({
183
+ id,
184
+ providerID: "vercel",
185
+ api: {
186
+ id,
187
+ url: "https://ai-gateway.vercel.sh/v3/ai",
188
+ npm: "@ai-sdk/gateway",
189
+ },
190
+ name: id,
191
+ capabilities: {
192
+ temperature: true,
193
+ reasoning: true,
194
+ attachment: true,
195
+ toolcall: true,
196
+ input: { text: true, audio: false, image: true, video: false, pdf: true },
197
+ output: { text: true, audio: false, image: false, video: false, pdf: false },
198
+ interleaved: false,
199
+ },
200
+ cost: {
201
+ input: 0.001,
202
+ output: 0.002,
203
+ cache: { read: 0.0001, write: 0.0002 },
204
+ },
205
+ limit: {
206
+ context: 200_000,
207
+ output: 8192,
208
+ },
209
+ status: "active",
210
+ options: {},
211
+ headers: {},
212
+ release_date: "2024-01-01",
213
+ }) as any
214
+
215
+ test("puts gateway defaults under gateway key", () => {
216
+ const model = createModel("anthropic/claude-sonnet-4")
217
+ const result = ProviderTransform.options({ model, sessionID, providerOptions: {} })
218
+ expect(result).toEqual({
219
+ gateway: {
220
+ caching: "auto",
221
+ },
222
+ })
223
+ })
224
+ })
225
+
226
+ describe("ProviderTransform.providerOptions", () => {
227
+ const createModel = (overrides: Partial<any> = {}) =>
228
+ ({
229
+ id: "test/test-model",
230
+ providerID: "test",
231
+ api: {
232
+ id: "test-model",
233
+ url: "https://api.test.com",
234
+ npm: "@ai-sdk/openai",
235
+ },
236
+ name: "Test Model",
237
+ capabilities: {
238
+ temperature: true,
239
+ reasoning: true,
240
+ attachment: true,
241
+ toolcall: true,
242
+ input: { text: true, audio: false, image: true, video: false, pdf: false },
243
+ output: { text: true, audio: false, image: false, video: false, pdf: false },
244
+ interleaved: false,
245
+ },
246
+ cost: {
247
+ input: 0.001,
248
+ output: 0.002,
249
+ cache: { read: 0.0001, write: 0.0002 },
250
+ },
251
+ limit: {
252
+ context: 200_000,
253
+ output: 64_000,
254
+ },
255
+ status: "active",
256
+ options: {},
257
+ headers: {},
258
+ release_date: "2024-01-01",
259
+ ...overrides,
260
+ }) as any
261
+
262
+ test("uses sdk key for non-gateway models", () => {
263
+ const model = createModel({
264
+ providerID: "my-bedrock",
265
+ api: {
266
+ id: "anthropic.claude-sonnet-4",
267
+ url: "https://bedrock.aws",
268
+ npm: "@ai-sdk/amazon-bedrock",
269
+ },
270
+ })
271
+
272
+ expect(ProviderTransform.providerOptions(model, { cachePoint: { type: "default" } })).toEqual({
273
+ bedrock: { cachePoint: { type: "default" } },
274
+ })
275
+ })
276
+
277
+ test("uses gateway model provider slug for gateway models", () => {
278
+ const model = createModel({
279
+ providerID: "vercel",
280
+ api: {
281
+ id: "anthropic/claude-sonnet-4",
282
+ url: "https://ai-gateway.vercel.sh/v3/ai",
283
+ npm: "@ai-sdk/gateway",
284
+ },
285
+ })
286
+
287
+ expect(ProviderTransform.providerOptions(model, { thinking: { type: "enabled", budgetTokens: 12_000 } })).toEqual({
288
+ anthropic: { thinking: { type: "enabled", budgetTokens: 12_000 } },
289
+ })
290
+ })
291
+
292
+ test("falls back to gateway key when gateway api id is unscoped", () => {
293
+ const model = createModel({
294
+ id: "anthropic/claude-sonnet-4",
295
+ providerID: "vercel",
296
+ api: {
297
+ id: "claude-sonnet-4",
298
+ url: "https://ai-gateway.vercel.sh/v3/ai",
299
+ npm: "@ai-sdk/gateway",
300
+ },
301
+ })
302
+
303
+ expect(ProviderTransform.providerOptions(model, { thinking: { type: "enabled", budgetTokens: 12_000 } })).toEqual({
304
+ gateway: { thinking: { type: "enabled", budgetTokens: 12_000 } },
305
+ })
306
+ })
307
+
308
+ test("splits gateway routing options from provider-specific options", () => {
309
+ const model = createModel({
310
+ providerID: "vercel",
311
+ api: {
312
+ id: "anthropic/claude-sonnet-4",
313
+ url: "https://ai-gateway.vercel.sh/v3/ai",
314
+ npm: "@ai-sdk/gateway",
315
+ },
316
+ })
317
+
318
+ expect(
319
+ ProviderTransform.providerOptions(model, {
320
+ gateway: { order: ["vertex", "anthropic"] },
321
+ thinking: { type: "enabled", budgetTokens: 12_000 },
322
+ }),
323
+ ).toEqual({
324
+ gateway: { order: ["vertex", "anthropic"] },
325
+ anthropic: { thinking: { type: "enabled", budgetTokens: 12_000 } },
326
+ } as any)
327
+ })
328
+
329
+ test("falls back to gateway key when model id has no provider slug", () => {
330
+ const model = createModel({
331
+ id: "claude-sonnet-4",
332
+ providerID: "vercel",
333
+ api: {
334
+ id: "claude-sonnet-4",
335
+ url: "https://ai-gateway.vercel.sh/v3/ai",
336
+ npm: "@ai-sdk/gateway",
337
+ },
338
+ })
339
+
340
+ expect(ProviderTransform.providerOptions(model, { reasoningEffort: "high" })).toEqual({
341
+ gateway: { reasoningEffort: "high" },
342
+ })
343
+ })
344
+
345
+ test("maps amazon slug to bedrock for provider options", () => {
346
+ const model = createModel({
347
+ providerID: "vercel",
348
+ api: {
349
+ id: "amazon/nova-2-lite",
350
+ url: "https://ai-gateway.vercel.sh/v3/ai",
351
+ npm: "@ai-sdk/gateway",
352
+ },
353
+ })
354
+
355
+ expect(ProviderTransform.providerOptions(model, { reasoningConfig: { type: "enabled" } })).toEqual({
356
+ bedrock: { reasoningConfig: { type: "enabled" } },
357
+ })
358
+ })
359
+
360
+ test("uses groq slug for groq models", () => {
361
+ const model = createModel({
362
+ providerID: "vercel",
363
+ api: {
364
+ id: "groq/llama-3.3-70b-versatile",
365
+ url: "https://ai-gateway.vercel.sh/v3/ai",
366
+ npm: "@ai-sdk/gateway",
367
+ },
368
+ })
369
+
370
+ expect(ProviderTransform.providerOptions(model, { reasoningFormat: "parsed" })).toEqual({
371
+ groq: { reasoningFormat: "parsed" },
372
+ })
373
+ })
374
+ })
375
+
376
+ describe("ProviderTransform.schema - gemini array items", () => {
377
+ test("adds missing items for array properties", () => {
378
+ const geminiModel = {
379
+ providerID: "google",
380
+ api: {
381
+ id: "gemini-3-pro",
382
+ },
383
+ } as any
384
+
385
+ const schema = {
386
+ type: "object",
387
+ properties: {
388
+ nodes: { type: "array" },
389
+ edges: { type: "array", items: { type: "string" } },
390
+ },
391
+ } as any
392
+
393
+ const result = ProviderTransform.schema(geminiModel, schema) as any
394
+
395
+ expect(result.properties.nodes.items).toBeDefined()
396
+ expect(result.properties.edges.items.type).toBe("string")
397
+ })
398
+ })
399
+
400
+ describe("ProviderTransform.schema - gemini nested array items", () => {
401
+ const geminiModel = {
402
+ providerID: "google",
403
+ api: {
404
+ id: "gemini-3-pro",
405
+ },
406
+ } as any
407
+
408
+ test("adds type to 2D array with empty inner items", () => {
409
+ const schema = {
410
+ type: "object",
411
+ properties: {
412
+ values: {
413
+ type: "array",
414
+ items: {
415
+ type: "array",
416
+ items: {}, // Empty items object
417
+ },
418
+ },
419
+ },
420
+ } as any
421
+
422
+ const result = ProviderTransform.schema(geminiModel, schema) as any
423
+
424
+ // Inner items should have a default type
425
+ expect(result.properties.values.items.items.type).toBe("string")
426
+ })
427
+
428
+ test("adds items and type to 2D array with missing inner items", () => {
429
+ const schema = {
430
+ type: "object",
431
+ properties: {
432
+ data: {
433
+ type: "array",
434
+ items: { type: "array" }, // No items at all
435
+ },
436
+ },
437
+ } as any
438
+
439
+ const result = ProviderTransform.schema(geminiModel, schema) as any
440
+
441
+ expect(result.properties.data.items.items).toBeDefined()
442
+ expect(result.properties.data.items.items.type).toBe("string")
443
+ })
444
+
445
+ test("handles deeply nested arrays (3D)", () => {
446
+ const schema = {
447
+ type: "object",
448
+ properties: {
449
+ matrix: {
450
+ type: "array",
451
+ items: {
452
+ type: "array",
453
+ items: {
454
+ type: "array",
455
+ // No items
456
+ },
457
+ },
458
+ },
459
+ },
460
+ } as any
461
+
462
+ const result = ProviderTransform.schema(geminiModel, schema) as any
463
+
464
+ expect(result.properties.matrix.items.items.items).toBeDefined()
465
+ expect(result.properties.matrix.items.items.items.type).toBe("string")
466
+ })
467
+
468
+ test("preserves existing item types in nested arrays", () => {
469
+ const schema = {
470
+ type: "object",
471
+ properties: {
472
+ numbers: {
473
+ type: "array",
474
+ items: {
475
+ type: "array",
476
+ items: { type: "number" }, // Has explicit type
477
+ },
478
+ },
479
+ },
480
+ } as any
481
+
482
+ const result = ProviderTransform.schema(geminiModel, schema) as any
483
+
484
+ // Should preserve the explicit type
485
+ expect(result.properties.numbers.items.items.type).toBe("number")
486
+ })
487
+
488
+ test("handles mixed nested structures with objects and arrays", () => {
489
+ const schema = {
490
+ type: "object",
491
+ properties: {
492
+ spreadsheetData: {
493
+ type: "object",
494
+ properties: {
495
+ rows: {
496
+ type: "array",
497
+ items: {
498
+ type: "array",
499
+ items: {}, // Empty items
500
+ },
501
+ },
502
+ },
503
+ },
504
+ },
505
+ } as any
506
+
507
+ const result = ProviderTransform.schema(geminiModel, schema) as any
508
+
509
+ expect(result.properties.spreadsheetData.properties.rows.items.items.type).toBe("string")
510
+ })
511
+ })
512
+
513
+ describe("ProviderTransform.schema - gemini non-object properties removal", () => {
514
+ const geminiModel = {
515
+ providerID: "google",
516
+ api: {
517
+ id: "gemini-3-pro",
518
+ },
519
+ } as any
520
+
521
+ test("removes properties from non-object types", () => {
522
+ const schema = {
523
+ type: "object",
524
+ properties: {
525
+ data: {
526
+ type: "string",
527
+ properties: { invalid: { type: "string" } },
528
+ },
529
+ },
530
+ } as any
531
+
532
+ const result = ProviderTransform.schema(geminiModel, schema) as any
533
+
534
+ expect(result.properties.data.type).toBe("string")
535
+ expect(result.properties.data.properties).toBeUndefined()
536
+ })
537
+
538
+ test("removes required from non-object types", () => {
539
+ const schema = {
540
+ type: "object",
541
+ properties: {
542
+ data: {
543
+ type: "array",
544
+ items: { type: "string" },
545
+ required: ["invalid"],
546
+ },
547
+ },
548
+ } as any
549
+
550
+ const result = ProviderTransform.schema(geminiModel, schema) as any
551
+
552
+ expect(result.properties.data.type).toBe("array")
553
+ expect(result.properties.data.required).toBeUndefined()
554
+ })
555
+
556
+ test("removes properties and required from nested non-object types", () => {
557
+ const schema = {
558
+ type: "object",
559
+ properties: {
560
+ outer: {
561
+ type: "object",
562
+ properties: {
563
+ inner: {
564
+ type: "number",
565
+ properties: { bad: { type: "string" } },
566
+ required: ["bad"],
567
+ },
568
+ },
569
+ },
570
+ },
571
+ } as any
572
+
573
+ const result = ProviderTransform.schema(geminiModel, schema) as any
574
+
575
+ expect(result.properties.outer.properties.inner.type).toBe("number")
576
+ expect(result.properties.outer.properties.inner.properties).toBeUndefined()
577
+ expect(result.properties.outer.properties.inner.required).toBeUndefined()
578
+ })
579
+
580
+ test("keeps properties and required on object types", () => {
581
+ const schema = {
582
+ type: "object",
583
+ properties: {
584
+ data: {
585
+ type: "object",
586
+ properties: { name: { type: "string" } },
587
+ required: ["name"],
588
+ },
589
+ },
590
+ } as any
591
+
592
+ const result = ProviderTransform.schema(geminiModel, schema) as any
593
+
594
+ expect(result.properties.data.type).toBe("object")
595
+ expect(result.properties.data.properties).toBeDefined()
596
+ expect(result.properties.data.required).toEqual(["name"])
597
+ })
598
+
599
+ test("does not affect non-gemini providers", () => {
600
+ const openaiModel = {
601
+ providerID: "openai",
602
+ api: {
603
+ id: "gpt-4",
604
+ },
605
+ } as any
606
+
607
+ const schema = {
608
+ type: "object",
609
+ properties: {
610
+ data: {
611
+ type: "string",
612
+ properties: { invalid: { type: "string" } },
613
+ },
614
+ },
615
+ } as any
616
+
617
+ const result = ProviderTransform.schema(openaiModel, schema) as any
618
+
619
+ expect(result.properties.data.properties).toBeDefined()
620
+ })
621
+ })
622
+
623
+ describe("ProviderTransform.message - DeepSeek reasoning content", () => {
624
+ test("DeepSeek with tool calls includes reasoning_content in providerOptions", () => {
625
+ const msgs = [
626
+ {
627
+ role: "assistant",
628
+ content: [
629
+ { type: "reasoning", text: "Let me think about this..." },
630
+ {
631
+ type: "tool-call",
632
+ toolCallId: "test",
633
+ toolName: "bash",
634
+ input: { command: "echo hello" },
635
+ },
636
+ ],
637
+ },
638
+ ] as any[]
639
+
640
+ const result = ProviderTransform.message(
641
+ msgs,
642
+ {
643
+ id: "deepseek/deepseek-chat",
644
+ providerID: "deepseek",
645
+ api: {
646
+ id: "deepseek-chat",
647
+ url: "https://api.deepseek.com",
648
+ npm: "@ai-sdk/openai-compatible",
649
+ },
650
+ name: "DeepSeek Chat",
651
+ capabilities: {
652
+ temperature: true,
653
+ reasoning: true,
654
+ attachment: false,
655
+ toolcall: true,
656
+ input: { text: true, audio: false, image: false, video: false, pdf: false },
657
+ output: { text: true, audio: false, image: false, video: false, pdf: false },
658
+ interleaved: {
659
+ field: "reasoning_content",
660
+ },
661
+ },
662
+ cost: {
663
+ input: 0.001,
664
+ output: 0.002,
665
+ cache: { read: 0.0001, write: 0.0002 },
666
+ },
667
+ limit: {
668
+ context: 128000,
669
+ output: 8192,
670
+ },
671
+ status: "active",
672
+ options: {},
673
+ headers: {},
674
+ release_date: "2023-04-01",
675
+ },
676
+ {},
677
+ )
678
+
679
+ expect(result).toHaveLength(1)
680
+ expect(result[0].content).toEqual([
681
+ {
682
+ type: "tool-call",
683
+ toolCallId: "test",
684
+ toolName: "bash",
685
+ input: { command: "echo hello" },
686
+ },
687
+ ])
688
+ expect(result[0].providerOptions?.openaiCompatible?.reasoning_content).toBe("Let me think about this...")
689
+ })
690
+
691
+ test("Non-DeepSeek providers leave reasoning content unchanged", () => {
692
+ const msgs = [
693
+ {
694
+ role: "assistant",
695
+ content: [
696
+ { type: "reasoning", text: "Should not be processed" },
697
+ { type: "text", text: "Answer" },
698
+ ],
699
+ },
700
+ ] as any[]
701
+
702
+ const result = ProviderTransform.message(
703
+ msgs,
704
+ {
705
+ id: "openai/gpt-4",
706
+ providerID: "openai",
707
+ api: {
708
+ id: "gpt-4",
709
+ url: "https://api.openai.com",
710
+ npm: "@ai-sdk/openai",
711
+ },
712
+ name: "GPT-4",
713
+ capabilities: {
714
+ temperature: true,
715
+ reasoning: false,
716
+ attachment: true,
717
+ toolcall: true,
718
+ input: { text: true, audio: false, image: true, video: false, pdf: false },
719
+ output: { text: true, audio: false, image: false, video: false, pdf: false },
720
+ interleaved: false,
721
+ },
722
+ cost: {
723
+ input: 0.03,
724
+ output: 0.06,
725
+ cache: { read: 0.001, write: 0.002 },
726
+ },
727
+ limit: {
728
+ context: 128000,
729
+ output: 4096,
730
+ },
731
+ status: "active",
732
+ options: {},
733
+ headers: {},
734
+ release_date: "2023-04-01",
735
+ },
736
+ {},
737
+ )
738
+
739
+ expect(result[0].content).toEqual([
740
+ { type: "reasoning", text: "Should not be processed" },
741
+ { type: "text", text: "Answer" },
742
+ ])
743
+ expect(result[0].providerOptions?.openaiCompatible?.reasoning_content).toBeUndefined()
744
+ })
745
+ })
746
+
747
+ describe("ProviderTransform.message - empty image handling", () => {
748
+ const mockModel = {
749
+ id: "anthropic/claude-3-5-sonnet",
750
+ providerID: "anthropic",
751
+ api: {
752
+ id: "claude-3-5-sonnet-20241022",
753
+ url: "https://api.anthropic.com",
754
+ npm: "@ai-sdk/anthropic",
755
+ },
756
+ name: "Claude 3.5 Sonnet",
757
+ capabilities: {
758
+ temperature: true,
759
+ reasoning: false,
760
+ attachment: true,
761
+ toolcall: true,
762
+ input: { text: true, audio: false, image: true, video: false, pdf: true },
763
+ output: { text: true, audio: false, image: false, video: false, pdf: false },
764
+ interleaved: false,
765
+ },
766
+ cost: {
767
+ input: 0.003,
768
+ output: 0.015,
769
+ cache: { read: 0.0003, write: 0.00375 },
770
+ },
771
+ limit: {
772
+ context: 200000,
773
+ output: 8192,
774
+ },
775
+ status: "active",
776
+ options: {},
777
+ headers: {},
778
+ } as any
779
+
780
+ test("should replace empty base64 image with error text", () => {
781
+ const msgs = [
782
+ {
783
+ role: "user",
784
+ content: [
785
+ { type: "text", text: "What is in this image?" },
786
+ { type: "image", image: "data:image/png;base64," },
787
+ ],
788
+ },
789
+ ] as any[]
790
+
791
+ const result = ProviderTransform.message(msgs, mockModel, {})
792
+
793
+ expect(result).toHaveLength(1)
794
+ expect(result[0].content).toHaveLength(2)
795
+ expect(result[0].content[0]).toEqual({ type: "text", text: "What is in this image?" })
796
+ expect(result[0].content[1]).toEqual({
797
+ type: "text",
798
+ text: "ERROR: Image file is empty or corrupted. Please provide a valid image.",
799
+ })
800
+ })
801
+
802
+ test("should keep valid base64 images unchanged", () => {
803
+ const validBase64 =
804
+ "iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mNk+M9QDwADhgGAWjR9awAAAABJRU5ErkJggg=="
805
+ const msgs = [
806
+ {
807
+ role: "user",
808
+ content: [
809
+ { type: "text", text: "What is in this image?" },
810
+ { type: "image", image: `data:image/png;base64,${validBase64}` },
811
+ ],
812
+ },
813
+ ] as any[]
814
+
815
+ const result = ProviderTransform.message(msgs, mockModel, {})
816
+
817
+ expect(result).toHaveLength(1)
818
+ expect(result[0].content).toHaveLength(2)
819
+ expect(result[0].content[0]).toEqual({ type: "text", text: "What is in this image?" })
820
+ expect(result[0].content[1]).toEqual({ type: "image", image: `data:image/png;base64,${validBase64}` })
821
+ })
822
+
823
+ test("should handle mixed valid and empty images", () => {
824
+ const validBase64 =
825
+ "iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mNk+M9QDwADhgGAWjR9awAAAABJRU5ErkJggg=="
826
+ const msgs = [
827
+ {
828
+ role: "user",
829
+ content: [
830
+ { type: "text", text: "Compare these images" },
831
+ { type: "image", image: `data:image/png;base64,${validBase64}` },
832
+ { type: "image", image: "data:image/jpeg;base64," },
833
+ ],
834
+ },
835
+ ] as any[]
836
+
837
+ const result = ProviderTransform.message(msgs, mockModel, {})
838
+
839
+ expect(result).toHaveLength(1)
840
+ expect(result[0].content).toHaveLength(3)
841
+ expect(result[0].content[0]).toEqual({ type: "text", text: "Compare these images" })
842
+ expect(result[0].content[1]).toEqual({ type: "image", image: `data:image/png;base64,${validBase64}` })
843
+ expect(result[0].content[2]).toEqual({
844
+ type: "text",
845
+ text: "ERROR: Image file is empty or corrupted. Please provide a valid image.",
846
+ })
847
+ })
848
+ })
849
+
850
+ describe("ProviderTransform.message - anthropic empty content filtering", () => {
851
+ const anthropicModel = {
852
+ id: "anthropic/claude-3-5-sonnet",
853
+ providerID: "anthropic",
854
+ api: {
855
+ id: "claude-3-5-sonnet-20241022",
856
+ url: "https://api.anthropic.com",
857
+ npm: "@ai-sdk/anthropic",
858
+ },
859
+ name: "Claude 3.5 Sonnet",
860
+ capabilities: {
861
+ temperature: true,
862
+ reasoning: false,
863
+ attachment: true,
864
+ toolcall: true,
865
+ input: { text: true, audio: false, image: true, video: false, pdf: true },
866
+ output: { text: true, audio: false, image: false, video: false, pdf: false },
867
+ interleaved: false,
868
+ },
869
+ cost: {
870
+ input: 0.003,
871
+ output: 0.015,
872
+ cache: { read: 0.0003, write: 0.00375 },
873
+ },
874
+ limit: {
875
+ context: 200000,
876
+ output: 8192,
877
+ },
878
+ status: "active",
879
+ options: {},
880
+ headers: {},
881
+ } as any
882
+
883
+ test("filters out messages with empty string content", () => {
884
+ const msgs = [
885
+ { role: "user", content: "Hello" },
886
+ { role: "assistant", content: "" },
887
+ { role: "user", content: "World" },
888
+ ] as any[]
889
+
890
+ const result = ProviderTransform.message(msgs, anthropicModel, {})
891
+
892
+ expect(result).toHaveLength(2)
893
+ expect(result[0].content).toBe("Hello")
894
+ expect(result[1].content).toBe("World")
895
+ })
896
+
897
+ test("filters out empty text parts from array content", () => {
898
+ const msgs = [
899
+ {
900
+ role: "assistant",
901
+ content: [
902
+ { type: "text", text: "" },
903
+ { type: "text", text: "Hello" },
904
+ { type: "text", text: "" },
905
+ ],
906
+ },
907
+ ] as any[]
908
+
909
+ const result = ProviderTransform.message(msgs, anthropicModel, {})
910
+
911
+ expect(result).toHaveLength(1)
912
+ expect(result[0].content).toHaveLength(1)
913
+ expect(result[0].content[0]).toEqual({ type: "text", text: "Hello" })
914
+ })
915
+
916
+ test("filters out empty reasoning parts from array content", () => {
917
+ const msgs = [
918
+ {
919
+ role: "assistant",
920
+ content: [
921
+ { type: "reasoning", text: "" },
922
+ { type: "text", text: "Answer" },
923
+ { type: "reasoning", text: "" },
924
+ ],
925
+ },
926
+ ] as any[]
927
+
928
+ const result = ProviderTransform.message(msgs, anthropicModel, {})
929
+
930
+ expect(result).toHaveLength(1)
931
+ expect(result[0].content).toHaveLength(1)
932
+ expect(result[0].content[0]).toEqual({ type: "text", text: "Answer" })
933
+ })
934
+
935
+ test("removes entire message when all parts are empty", () => {
936
+ const msgs = [
937
+ { role: "user", content: "Hello" },
938
+ {
939
+ role: "assistant",
940
+ content: [
941
+ { type: "text", text: "" },
942
+ { type: "reasoning", text: "" },
943
+ ],
944
+ },
945
+ { role: "user", content: "World" },
946
+ ] as any[]
947
+
948
+ const result = ProviderTransform.message(msgs, anthropicModel, {})
949
+
950
+ expect(result).toHaveLength(2)
951
+ expect(result[0].content).toBe("Hello")
952
+ expect(result[1].content).toBe("World")
953
+ })
954
+
955
+ test("keeps non-text/reasoning parts even if text parts are empty", () => {
956
+ const msgs = [
957
+ {
958
+ role: "assistant",
959
+ content: [
960
+ { type: "text", text: "" },
961
+ { type: "tool-call", toolCallId: "123", toolName: "bash", input: { command: "ls" } },
962
+ ],
963
+ },
964
+ ] as any[]
965
+
966
+ const result = ProviderTransform.message(msgs, anthropicModel, {})
967
+
968
+ expect(result).toHaveLength(1)
969
+ expect(result[0].content).toHaveLength(1)
970
+ expect(result[0].content[0]).toEqual({
971
+ type: "tool-call",
972
+ toolCallId: "123",
973
+ toolName: "bash",
974
+ input: { command: "ls" },
975
+ })
976
+ })
977
+
978
+ test("keeps messages with valid text alongside empty parts", () => {
979
+ const msgs = [
980
+ {
981
+ role: "assistant",
982
+ content: [
983
+ { type: "reasoning", text: "Thinking..." },
984
+ { type: "text", text: "" },
985
+ { type: "text", text: "Result" },
986
+ ],
987
+ },
988
+ ] as any[]
989
+
990
+ const result = ProviderTransform.message(msgs, anthropicModel, {})
991
+
992
+ expect(result).toHaveLength(1)
993
+ expect(result[0].content).toHaveLength(2)
994
+ expect(result[0].content[0]).toEqual({ type: "reasoning", text: "Thinking..." })
995
+ expect(result[0].content[1]).toEqual({ type: "text", text: "Result" })
996
+ })
997
+
998
+ test("does not filter for non-anthropic providers", () => {
999
+ const openaiModel = {
1000
+ ...anthropicModel,
1001
+ providerID: "openai",
1002
+ api: {
1003
+ id: "gpt-4",
1004
+ url: "https://api.openai.com",
1005
+ npm: "@ai-sdk/openai",
1006
+ },
1007
+ }
1008
+
1009
+ const msgs = [
1010
+ { role: "assistant", content: "" },
1011
+ {
1012
+ role: "assistant",
1013
+ content: [{ type: "text", text: "" }],
1014
+ },
1015
+ ] as any[]
1016
+
1017
+ const result = ProviderTransform.message(msgs, openaiModel, {})
1018
+
1019
+ expect(result).toHaveLength(2)
1020
+ expect(result[0].content).toBe("")
1021
+ expect(result[1].content).toHaveLength(1)
1022
+ })
1023
+ })
1024
+
1025
+ describe("ProviderTransform.message - strip openai metadata when store=false", () => {
1026
+ const openaiModel = {
1027
+ id: "openai/gpt-5",
1028
+ providerID: "openai",
1029
+ api: {
1030
+ id: "gpt-5",
1031
+ url: "https://api.openai.com",
1032
+ npm: "@ai-sdk/openai",
1033
+ },
1034
+ name: "GPT-5",
1035
+ capabilities: {
1036
+ temperature: true,
1037
+ reasoning: true,
1038
+ attachment: true,
1039
+ toolcall: true,
1040
+ input: { text: true, audio: false, image: true, video: false, pdf: false },
1041
+ output: { text: true, audio: false, image: false, video: false, pdf: false },
1042
+ interleaved: false,
1043
+ },
1044
+ cost: { input: 0.03, output: 0.06, cache: { read: 0.001, write: 0.002 } },
1045
+ limit: { context: 128000, output: 4096 },
1046
+ status: "active",
1047
+ options: {},
1048
+ headers: {},
1049
+ } as any
1050
+
1051
+ test("preserves itemId and reasoningEncryptedContent when store=false", () => {
1052
+ const msgs = [
1053
+ {
1054
+ role: "assistant",
1055
+ content: [
1056
+ {
1057
+ type: "reasoning",
1058
+ text: "thinking...",
1059
+ providerOptions: {
1060
+ openai: {
1061
+ itemId: "rs_123",
1062
+ reasoningEncryptedContent: "encrypted",
1063
+ },
1064
+ },
1065
+ },
1066
+ {
1067
+ type: "text",
1068
+ text: "Hello",
1069
+ providerOptions: {
1070
+ openai: {
1071
+ itemId: "msg_456",
1072
+ },
1073
+ },
1074
+ },
1075
+ ],
1076
+ },
1077
+ ] as any[]
1078
+
1079
+ const result = ProviderTransform.message(msgs, openaiModel, { store: false }) as any[]
1080
+
1081
+ expect(result).toHaveLength(1)
1082
+ expect(result[0].content[0].providerOptions?.openai?.itemId).toBe("rs_123")
1083
+ expect(result[0].content[1].providerOptions?.openai?.itemId).toBe("msg_456")
1084
+ })
1085
+
1086
+ test("preserves itemId and reasoningEncryptedContent when store=false even when not openai", () => {
1087
+ const zenModel = {
1088
+ ...openaiModel,
1089
+ providerID: "zen",
1090
+ }
1091
+ const msgs = [
1092
+ {
1093
+ role: "assistant",
1094
+ content: [
1095
+ {
1096
+ type: "reasoning",
1097
+ text: "thinking...",
1098
+ providerOptions: {
1099
+ openai: {
1100
+ itemId: "rs_123",
1101
+ reasoningEncryptedContent: "encrypted",
1102
+ },
1103
+ },
1104
+ },
1105
+ {
1106
+ type: "text",
1107
+ text: "Hello",
1108
+ providerOptions: {
1109
+ openai: {
1110
+ itemId: "msg_456",
1111
+ },
1112
+ },
1113
+ },
1114
+ ],
1115
+ },
1116
+ ] as any[]
1117
+
1118
+ const result = ProviderTransform.message(msgs, zenModel, { store: false }) as any[]
1119
+
1120
+ expect(result).toHaveLength(1)
1121
+ expect(result[0].content[0].providerOptions?.openai?.itemId).toBe("rs_123")
1122
+ expect(result[0].content[1].providerOptions?.openai?.itemId).toBe("msg_456")
1123
+ })
1124
+
1125
+ test("preserves other openai options including itemId", () => {
1126
+ const msgs = [
1127
+ {
1128
+ role: "assistant",
1129
+ content: [
1130
+ {
1131
+ type: "text",
1132
+ text: "Hello",
1133
+ providerOptions: {
1134
+ openai: {
1135
+ itemId: "msg_123",
1136
+ otherOption: "value",
1137
+ },
1138
+ },
1139
+ },
1140
+ ],
1141
+ },
1142
+ ] as any[]
1143
+
1144
+ const result = ProviderTransform.message(msgs, openaiModel, { store: false }) as any[]
1145
+
1146
+ expect(result[0].content[0].providerOptions?.openai?.itemId).toBe("msg_123")
1147
+ expect(result[0].content[0].providerOptions?.openai?.otherOption).toBe("value")
1148
+ })
1149
+
1150
+ test("preserves metadata for openai package when store is true", () => {
1151
+ const msgs = [
1152
+ {
1153
+ role: "assistant",
1154
+ content: [
1155
+ {
1156
+ type: "text",
1157
+ text: "Hello",
1158
+ providerOptions: {
1159
+ openai: {
1160
+ itemId: "msg_123",
1161
+ },
1162
+ },
1163
+ },
1164
+ ],
1165
+ },
1166
+ ] as any[]
1167
+
1168
+ // openai package preserves itemId regardless of store value
1169
+ const result = ProviderTransform.message(msgs, openaiModel, { store: true }) as any[]
1170
+
1171
+ expect(result[0].content[0].providerOptions?.openai?.itemId).toBe("msg_123")
1172
+ })
1173
+
1174
+ test("preserves metadata for non-openai packages when store is false", () => {
1175
+ const anthropicModel = {
1176
+ ...openaiModel,
1177
+ providerID: "anthropic",
1178
+ api: {
1179
+ id: "claude-3",
1180
+ url: "https://api.anthropic.com",
1181
+ npm: "@ai-sdk/anthropic",
1182
+ },
1183
+ }
1184
+ const msgs = [
1185
+ {
1186
+ role: "assistant",
1187
+ content: [
1188
+ {
1189
+ type: "text",
1190
+ text: "Hello",
1191
+ providerOptions: {
1192
+ openai: {
1193
+ itemId: "msg_123",
1194
+ },
1195
+ },
1196
+ },
1197
+ ],
1198
+ },
1199
+ ] as any[]
1200
+
1201
+ // store=false preserves metadata for non-openai packages
1202
+ const result = ProviderTransform.message(msgs, anthropicModel, { store: false }) as any[]
1203
+
1204
+ expect(result[0].content[0].providerOptions?.openai?.itemId).toBe("msg_123")
1205
+ })
1206
+
1207
+ test("preserves metadata using providerID key when store is false", () => {
1208
+ const opencodeModel = {
1209
+ ...openaiModel,
1210
+ providerID: "opencode",
1211
+ api: {
1212
+ id: "opencode-test",
1213
+ url: "https://api.opencode.ai",
1214
+ npm: "@ai-sdk/openai-compatible",
1215
+ },
1216
+ }
1217
+ const msgs = [
1218
+ {
1219
+ role: "assistant",
1220
+ content: [
1221
+ {
1222
+ type: "text",
1223
+ text: "Hello",
1224
+ providerOptions: {
1225
+ opencode: {
1226
+ itemId: "msg_123",
1227
+ otherOption: "value",
1228
+ },
1229
+ },
1230
+ },
1231
+ ],
1232
+ },
1233
+ ] as any[]
1234
+
1235
+ const result = ProviderTransform.message(msgs, opencodeModel, { store: false }) as any[]
1236
+
1237
+ expect(result[0].content[0].providerOptions?.opencode?.itemId).toBe("msg_123")
1238
+ expect(result[0].content[0].providerOptions?.opencode?.otherOption).toBe("value")
1239
+ })
1240
+
1241
+ test("preserves itemId across all providerOptions keys", () => {
1242
+ const opencodeModel = {
1243
+ ...openaiModel,
1244
+ providerID: "opencode",
1245
+ api: {
1246
+ id: "opencode-test",
1247
+ url: "https://api.opencode.ai",
1248
+ npm: "@ai-sdk/openai-compatible",
1249
+ },
1250
+ }
1251
+ const msgs = [
1252
+ {
1253
+ role: "assistant",
1254
+ providerOptions: {
1255
+ openai: { itemId: "msg_root" },
1256
+ opencode: { itemId: "msg_opencode" },
1257
+ extra: { itemId: "msg_extra" },
1258
+ },
1259
+ content: [
1260
+ {
1261
+ type: "text",
1262
+ text: "Hello",
1263
+ providerOptions: {
1264
+ openai: { itemId: "msg_openai_part" },
1265
+ opencode: { itemId: "msg_opencode_part" },
1266
+ extra: { itemId: "msg_extra_part" },
1267
+ },
1268
+ },
1269
+ ],
1270
+ },
1271
+ ] as any[]
1272
+
1273
+ const result = ProviderTransform.message(msgs, opencodeModel, { store: false }) as any[]
1274
+
1275
+ expect(result[0].providerOptions?.openai?.itemId).toBe("msg_root")
1276
+ expect(result[0].providerOptions?.opencode?.itemId).toBe("msg_opencode")
1277
+ expect(result[0].providerOptions?.extra?.itemId).toBe("msg_extra")
1278
+ expect(result[0].content[0].providerOptions?.openai?.itemId).toBe("msg_openai_part")
1279
+ expect(result[0].content[0].providerOptions?.opencode?.itemId).toBe("msg_opencode_part")
1280
+ expect(result[0].content[0].providerOptions?.extra?.itemId).toBe("msg_extra_part")
1281
+ })
1282
+
1283
+ test("does not strip metadata for non-openai packages when store is not false", () => {
1284
+ const anthropicModel = {
1285
+ ...openaiModel,
1286
+ providerID: "anthropic",
1287
+ api: {
1288
+ id: "claude-3",
1289
+ url: "https://api.anthropic.com",
1290
+ npm: "@ai-sdk/anthropic",
1291
+ },
1292
+ }
1293
+ const msgs = [
1294
+ {
1295
+ role: "assistant",
1296
+ content: [
1297
+ {
1298
+ type: "text",
1299
+ text: "Hello",
1300
+ providerOptions: {
1301
+ openai: {
1302
+ itemId: "msg_123",
1303
+ },
1304
+ },
1305
+ },
1306
+ ],
1307
+ },
1308
+ ] as any[]
1309
+
1310
+ const result = ProviderTransform.message(msgs, anthropicModel, {}) as any[]
1311
+
1312
+ expect(result[0].content[0].providerOptions?.openai?.itemId).toBe("msg_123")
1313
+ })
1314
+ })
1315
+
1316
+ describe("ProviderTransform.message - providerOptions key remapping", () => {
1317
+ const createModel = (providerID: string, npm: string) =>
1318
+ ({
1319
+ id: `${providerID}/test-model`,
1320
+ providerID,
1321
+ api: {
1322
+ id: "test-model",
1323
+ url: "https://api.test.com",
1324
+ npm,
1325
+ },
1326
+ name: "Test Model",
1327
+ capabilities: {
1328
+ temperature: true,
1329
+ reasoning: false,
1330
+ attachment: true,
1331
+ toolcall: true,
1332
+ input: { text: true, audio: false, image: true, video: false, pdf: true },
1333
+ output: { text: true, audio: false, image: false, video: false, pdf: false },
1334
+ interleaved: false,
1335
+ },
1336
+ cost: { input: 0.001, output: 0.002, cache: { read: 0.0001, write: 0.0002 } },
1337
+ limit: { context: 128000, output: 8192 },
1338
+ status: "active",
1339
+ options: {},
1340
+ headers: {},
1341
+ }) as any
1342
+
1343
+ test("azure keeps 'azure' key and does not remap to 'openai'", () => {
1344
+ const model = createModel("azure", "@ai-sdk/azure")
1345
+ const msgs = [
1346
+ {
1347
+ role: "user",
1348
+ content: "Hello",
1349
+ providerOptions: {
1350
+ azure: { someOption: "value" },
1351
+ },
1352
+ },
1353
+ ] as any[]
1354
+
1355
+ const result = ProviderTransform.message(msgs, model, {})
1356
+
1357
+ expect(result[0].providerOptions?.azure).toEqual({ someOption: "value" })
1358
+ expect(result[0].providerOptions?.openai).toBeUndefined()
1359
+ })
1360
+
1361
+ test("copilot remaps providerID to 'copilot' key", () => {
1362
+ const model = createModel("github-copilot", "@ai-sdk/github-copilot")
1363
+ const msgs = [
1364
+ {
1365
+ role: "user",
1366
+ content: "Hello",
1367
+ providerOptions: {
1368
+ copilot: { someOption: "value" },
1369
+ },
1370
+ },
1371
+ ] as any[]
1372
+
1373
+ const result = ProviderTransform.message(msgs, model, {})
1374
+
1375
+ expect(result[0].providerOptions?.copilot).toEqual({ someOption: "value" })
1376
+ expect(result[0].providerOptions?.["github-copilot"]).toBeUndefined()
1377
+ })
1378
+
1379
+ test("bedrock remaps providerID to 'bedrock' key", () => {
1380
+ const model = createModel("my-bedrock", "@ai-sdk/amazon-bedrock")
1381
+ const msgs = [
1382
+ {
1383
+ role: "user",
1384
+ content: "Hello",
1385
+ providerOptions: {
1386
+ "my-bedrock": { someOption: "value" },
1387
+ },
1388
+ },
1389
+ ] as any[]
1390
+
1391
+ const result = ProviderTransform.message(msgs, model, {})
1392
+
1393
+ expect(result[0].providerOptions?.bedrock).toEqual({ someOption: "value" })
1394
+ expect(result[0].providerOptions?.["my-bedrock"]).toBeUndefined()
1395
+ })
1396
+ })
1397
+
1398
+ describe("ProviderTransform.message - claude w/bedrock custom inference profile", () => {
1399
+ test("adds cachePoint", () => {
1400
+ const model = {
1401
+ id: "amazon-bedrock/custom-claude-sonnet-4.5",
1402
+ providerID: "amazon-bedrock",
1403
+ api: {
1404
+ id: "arn:aws:bedrock:xxx:yyy:application-inference-profile/zzz",
1405
+ url: "https://api.test.com",
1406
+ npm: "@ai-sdk/amazon-bedrock",
1407
+ },
1408
+ name: "Custom inference profile",
1409
+ capabilities: {},
1410
+ options: {},
1411
+ headers: {},
1412
+ } as any
1413
+
1414
+ const msgs = [
1415
+ {
1416
+ role: "user",
1417
+ content: "Hello",
1418
+ },
1419
+ ] as any[]
1420
+
1421
+ const result = ProviderTransform.message(msgs, model, {})
1422
+
1423
+ expect(result[0].providerOptions?.bedrock).toEqual(
1424
+ expect.objectContaining({
1425
+ cachePoint: {
1426
+ type: "default",
1427
+ },
1428
+ }),
1429
+ )
1430
+ })
1431
+ })
1432
+
1433
+ describe("ProviderTransform.message - cache control on gateway", () => {
1434
+ const createModel = (overrides: Partial<any> = {}) =>
1435
+ ({
1436
+ id: "anthropic/claude-sonnet-4",
1437
+ providerID: "vercel",
1438
+ api: {
1439
+ id: "anthropic/claude-sonnet-4",
1440
+ url: "https://ai-gateway.vercel.sh/v3/ai",
1441
+ npm: "@ai-sdk/gateway",
1442
+ },
1443
+ name: "Claude Sonnet 4",
1444
+ capabilities: {
1445
+ temperature: true,
1446
+ reasoning: true,
1447
+ attachment: true,
1448
+ toolcall: true,
1449
+ input: { text: true, audio: false, image: true, video: false, pdf: true },
1450
+ output: { text: true, audio: false, image: false, video: false, pdf: false },
1451
+ interleaved: false,
1452
+ },
1453
+ cost: { input: 0.001, output: 0.002, cache: { read: 0.0001, write: 0.0002 } },
1454
+ limit: { context: 200_000, output: 8192 },
1455
+ status: "active",
1456
+ options: {},
1457
+ headers: {},
1458
+ ...overrides,
1459
+ }) as any
1460
+
1461
+ test("gateway does not set cache control for anthropic models", () => {
1462
+ const model = createModel()
1463
+ const msgs = [
1464
+ {
1465
+ role: "system",
1466
+ content: [{ type: "text", text: "You are a helpful assistant" }],
1467
+ },
1468
+ {
1469
+ role: "user",
1470
+ content: "Hello",
1471
+ },
1472
+ ] as any[]
1473
+
1474
+ const result = ProviderTransform.message(msgs, model, {}) as any[]
1475
+
1476
+ expect(result[0].content[0].providerOptions).toBeUndefined()
1477
+ expect(result[0].providerOptions).toBeUndefined()
1478
+ })
1479
+
1480
+ test("non-gateway anthropic keeps existing cache control behavior", () => {
1481
+ const model = createModel({
1482
+ providerID: "anthropic",
1483
+ api: {
1484
+ id: "claude-sonnet-4",
1485
+ url: "https://api.anthropic.com",
1486
+ npm: "@ai-sdk/anthropic",
1487
+ },
1488
+ })
1489
+ const msgs = [
1490
+ {
1491
+ role: "system",
1492
+ content: "You are a helpful assistant",
1493
+ },
1494
+ {
1495
+ role: "user",
1496
+ content: "Hello",
1497
+ },
1498
+ ] as any[]
1499
+
1500
+ const result = ProviderTransform.message(msgs, model, {}) as any[]
1501
+
1502
+ expect(result[0].providerOptions).toEqual({
1503
+ anthropic: {
1504
+ cacheControl: {
1505
+ type: "ephemeral",
1506
+ },
1507
+ },
1508
+ openrouter: {
1509
+ cacheControl: {
1510
+ type: "ephemeral",
1511
+ },
1512
+ },
1513
+ bedrock: {
1514
+ cachePoint: {
1515
+ type: "default",
1516
+ },
1517
+ },
1518
+ openaiCompatible: {
1519
+ cache_control: {
1520
+ type: "ephemeral",
1521
+ },
1522
+ },
1523
+ copilot: {
1524
+ copilot_cache_control: {
1525
+ type: "ephemeral",
1526
+ },
1527
+ },
1528
+ })
1529
+ })
1530
+ })
1531
+
1532
+ describe("ProviderTransform.variants", () => {
1533
+ const createMockModel = (overrides: Partial<any> = {}): any => ({
1534
+ id: "test/test-model",
1535
+ providerID: "test",
1536
+ api: {
1537
+ id: "test-model",
1538
+ url: "https://api.test.com",
1539
+ npm: "@ai-sdk/openai",
1540
+ },
1541
+ name: "Test Model",
1542
+ capabilities: {
1543
+ temperature: true,
1544
+ reasoning: true,
1545
+ attachment: true,
1546
+ toolcall: true,
1547
+ input: { text: true, audio: false, image: true, video: false, pdf: false },
1548
+ output: { text: true, audio: false, image: false, video: false, pdf: false },
1549
+ interleaved: false,
1550
+ },
1551
+ cost: {
1552
+ input: 0.001,
1553
+ output: 0.002,
1554
+ cache: { read: 0.0001, write: 0.0002 },
1555
+ },
1556
+ limit: {
1557
+ context: 200_000,
1558
+ output: 64_000,
1559
+ },
1560
+ status: "active",
1561
+ options: {},
1562
+ headers: {},
1563
+ release_date: "2024-01-01",
1564
+ ...overrides,
1565
+ })
1566
+
1567
+ test("returns empty object when model has no reasoning capabilities", () => {
1568
+ const model = createMockModel({
1569
+ capabilities: { reasoning: false },
1570
+ })
1571
+ const result = ProviderTransform.variants(model)
1572
+ expect(result).toEqual({})
1573
+ })
1574
+
1575
+ test("deepseek returns empty object", () => {
1576
+ const model = createMockModel({
1577
+ id: "deepseek/deepseek-chat",
1578
+ providerID: "deepseek",
1579
+ api: {
1580
+ id: "deepseek-chat",
1581
+ url: "https://api.deepseek.com",
1582
+ npm: "@ai-sdk/openai-compatible",
1583
+ },
1584
+ })
1585
+ const result = ProviderTransform.variants(model)
1586
+ expect(result).toEqual({})
1587
+ })
1588
+
1589
+ test("minimax returns empty object", () => {
1590
+ const model = createMockModel({
1591
+ id: "minimax/minimax-model",
1592
+ providerID: "minimax",
1593
+ api: {
1594
+ id: "minimax-model",
1595
+ url: "https://api.minimax.com",
1596
+ npm: "@ai-sdk/openai-compatible",
1597
+ },
1598
+ })
1599
+ const result = ProviderTransform.variants(model)
1600
+ expect(result).toEqual({})
1601
+ })
1602
+
1603
+ test("glm returns empty object", () => {
1604
+ const model = createMockModel({
1605
+ id: "glm/glm-4",
1606
+ providerID: "glm",
1607
+ api: {
1608
+ id: "glm-4",
1609
+ url: "https://api.glm.com",
1610
+ npm: "@ai-sdk/openai-compatible",
1611
+ },
1612
+ })
1613
+ const result = ProviderTransform.variants(model)
1614
+ expect(result).toEqual({})
1615
+ })
1616
+
1617
+ test("mistral returns empty object", () => {
1618
+ const model = createMockModel({
1619
+ id: "mistral/mistral-large",
1620
+ providerID: "mistral",
1621
+ api: {
1622
+ id: "mistral-large-latest",
1623
+ url: "https://api.mistral.com",
1624
+ npm: "@ai-sdk/mistral",
1625
+ },
1626
+ })
1627
+ const result = ProviderTransform.variants(model)
1628
+ expect(result).toEqual({})
1629
+ })
1630
+
1631
+ describe("@openrouter/ai-sdk-provider", () => {
1632
+ test("returns empty object for non-qualifying models", () => {
1633
+ const model = createMockModel({
1634
+ id: "openrouter/test-model",
1635
+ providerID: "openrouter",
1636
+ api: {
1637
+ id: "test-model",
1638
+ url: "https://openrouter.ai",
1639
+ npm: "@openrouter/ai-sdk-provider",
1640
+ },
1641
+ })
1642
+ const result = ProviderTransform.variants(model)
1643
+ expect(result).toEqual({})
1644
+ })
1645
+
1646
+ test("gpt models return OPENAI_EFFORTS with reasoning", () => {
1647
+ const model = createMockModel({
1648
+ id: "openrouter/gpt-4",
1649
+ providerID: "openrouter",
1650
+ api: {
1651
+ id: "gpt-4",
1652
+ url: "https://openrouter.ai",
1653
+ npm: "@openrouter/ai-sdk-provider",
1654
+ },
1655
+ })
1656
+ const result = ProviderTransform.variants(model)
1657
+ expect(Object.keys(result)).toEqual(["none", "minimal", "low", "medium", "high", "xhigh"])
1658
+ expect(result.low).toEqual({ reasoning: { effort: "low" } })
1659
+ expect(result.high).toEqual({ reasoning: { effort: "high" } })
1660
+ })
1661
+
1662
+ test("gemini-3 returns OPENAI_EFFORTS with reasoning", () => {
1663
+ const model = createMockModel({
1664
+ id: "openrouter/gemini-3-5-pro",
1665
+ providerID: "openrouter",
1666
+ api: {
1667
+ id: "gemini-3-5-pro",
1668
+ url: "https://openrouter.ai",
1669
+ npm: "@openrouter/ai-sdk-provider",
1670
+ },
1671
+ })
1672
+ const result = ProviderTransform.variants(model)
1673
+ expect(Object.keys(result)).toEqual(["none", "minimal", "low", "medium", "high", "xhigh"])
1674
+ })
1675
+
1676
+ test("grok-4 returns empty object", () => {
1677
+ const model = createMockModel({
1678
+ id: "openrouter/grok-4",
1679
+ providerID: "openrouter",
1680
+ api: {
1681
+ id: "grok-4",
1682
+ url: "https://openrouter.ai",
1683
+ npm: "@openrouter/ai-sdk-provider",
1684
+ },
1685
+ })
1686
+ const result = ProviderTransform.variants(model)
1687
+ expect(result).toEqual({})
1688
+ })
1689
+
1690
+ test("grok-3-mini returns low and high with reasoning", () => {
1691
+ const model = createMockModel({
1692
+ id: "openrouter/grok-3-mini",
1693
+ providerID: "openrouter",
1694
+ api: {
1695
+ id: "grok-3-mini",
1696
+ url: "https://openrouter.ai",
1697
+ npm: "@openrouter/ai-sdk-provider",
1698
+ },
1699
+ })
1700
+ const result = ProviderTransform.variants(model)
1701
+ expect(Object.keys(result)).toEqual(["low", "high"])
1702
+ expect(result.low).toEqual({ reasoning: { effort: "low" } })
1703
+ expect(result.high).toEqual({ reasoning: { effort: "high" } })
1704
+ })
1705
+ })
1706
+
1707
+ describe("@ai-sdk/gateway", () => {
1708
+ test("anthropic sonnet 4.6 models return adaptive thinking options", () => {
1709
+ const model = createMockModel({
1710
+ id: "anthropic/claude-sonnet-4-6",
1711
+ providerID: "gateway",
1712
+ api: {
1713
+ id: "anthropic/claude-sonnet-4-6",
1714
+ url: "https://gateway.ai",
1715
+ npm: "@ai-sdk/gateway",
1716
+ },
1717
+ })
1718
+ const result = ProviderTransform.variants(model)
1719
+ expect(Object.keys(result)).toEqual(["low", "medium", "high", "max"])
1720
+ expect(result.medium).toEqual({
1721
+ thinking: {
1722
+ type: "adaptive",
1723
+ },
1724
+ effort: "medium",
1725
+ })
1726
+ })
1727
+
1728
+ test("anthropic sonnet 4.6 dot-format models return adaptive thinking options", () => {
1729
+ const model = createMockModel({
1730
+ id: "anthropic/claude-sonnet-4-6",
1731
+ providerID: "gateway",
1732
+ api: {
1733
+ id: "anthropic/claude-sonnet-4.6",
1734
+ url: "https://gateway.ai",
1735
+ npm: "@ai-sdk/gateway",
1736
+ },
1737
+ })
1738
+ const result = ProviderTransform.variants(model)
1739
+ expect(Object.keys(result)).toEqual(["low", "medium", "high", "max"])
1740
+ expect(result.medium).toEqual({
1741
+ thinking: {
1742
+ type: "adaptive",
1743
+ },
1744
+ effort: "medium",
1745
+ })
1746
+ })
1747
+
1748
+ test("anthropic opus 4.6 dot-format models return adaptive thinking options", () => {
1749
+ const model = createMockModel({
1750
+ id: "anthropic/claude-opus-4-6",
1751
+ providerID: "gateway",
1752
+ api: {
1753
+ id: "anthropic/claude-opus-4.6",
1754
+ url: "https://gateway.ai",
1755
+ npm: "@ai-sdk/gateway",
1756
+ },
1757
+ })
1758
+ const result = ProviderTransform.variants(model)
1759
+ expect(Object.keys(result)).toEqual(["low", "medium", "high", "max"])
1760
+ expect(result.high).toEqual({
1761
+ thinking: {
1762
+ type: "adaptive",
1763
+ },
1764
+ effort: "high",
1765
+ })
1766
+ })
1767
+
1768
+ test("anthropic models return anthropic thinking options", () => {
1769
+ const model = createMockModel({
1770
+ id: "anthropic/claude-sonnet-4",
1771
+ providerID: "gateway",
1772
+ api: {
1773
+ id: "anthropic/claude-sonnet-4",
1774
+ url: "https://gateway.ai",
1775
+ npm: "@ai-sdk/gateway",
1776
+ },
1777
+ })
1778
+ const result = ProviderTransform.variants(model)
1779
+ expect(Object.keys(result)).toEqual(["high", "max"])
1780
+ expect(result.high).toEqual({
1781
+ thinking: {
1782
+ type: "enabled",
1783
+ budgetTokens: 16000,
1784
+ },
1785
+ })
1786
+ expect(result.max).toEqual({
1787
+ thinking: {
1788
+ type: "enabled",
1789
+ budgetTokens: 31999,
1790
+ },
1791
+ })
1792
+ })
1793
+
1794
+ test("returns OPENAI_EFFORTS with reasoningEffort", () => {
1795
+ const model = createMockModel({
1796
+ id: "gateway/gateway-model",
1797
+ providerID: "gateway",
1798
+ api: {
1799
+ id: "gateway-model",
1800
+ url: "https://gateway.ai",
1801
+ npm: "@ai-sdk/gateway",
1802
+ },
1803
+ })
1804
+ const result = ProviderTransform.variants(model)
1805
+ expect(Object.keys(result)).toEqual(["none", "minimal", "low", "medium", "high", "xhigh"])
1806
+ expect(result.low).toEqual({ reasoningEffort: "low" })
1807
+ expect(result.high).toEqual({ reasoningEffort: "high" })
1808
+ })
1809
+ })
1810
+
1811
+ describe("@ai-sdk/github-copilot", () => {
1812
+ test("standard models return low, medium, high", () => {
1813
+ const model = createMockModel({
1814
+ id: "gpt-4.5",
1815
+ providerID: "github-copilot",
1816
+ api: {
1817
+ id: "gpt-4.5",
1818
+ url: "https://api.githubcopilot.com",
1819
+ npm: "@ai-sdk/github-copilot",
1820
+ },
1821
+ })
1822
+ const result = ProviderTransform.variants(model)
1823
+ expect(Object.keys(result)).toEqual(["low", "medium", "high"])
1824
+ expect(result.low).toEqual({
1825
+ reasoningEffort: "low",
1826
+ reasoningSummary: "auto",
1827
+ include: ["reasoning.encrypted_content"],
1828
+ })
1829
+ })
1830
+
1831
+ test("gpt-5.1-codex-max includes xhigh", () => {
1832
+ const model = createMockModel({
1833
+ id: "gpt-5.1-codex-max",
1834
+ providerID: "github-copilot",
1835
+ api: {
1836
+ id: "gpt-5.1-codex-max",
1837
+ url: "https://api.githubcopilot.com",
1838
+ npm: "@ai-sdk/github-copilot",
1839
+ },
1840
+ })
1841
+ const result = ProviderTransform.variants(model)
1842
+ expect(Object.keys(result)).toEqual(["low", "medium", "high", "xhigh"])
1843
+ })
1844
+
1845
+ test("gpt-5.1-codex-mini does not include xhigh", () => {
1846
+ const model = createMockModel({
1847
+ id: "gpt-5.1-codex-mini",
1848
+ providerID: "github-copilot",
1849
+ api: {
1850
+ id: "gpt-5.1-codex-mini",
1851
+ url: "https://api.githubcopilot.com",
1852
+ npm: "@ai-sdk/github-copilot",
1853
+ },
1854
+ })
1855
+ const result = ProviderTransform.variants(model)
1856
+ expect(Object.keys(result)).toEqual(["low", "medium", "high"])
1857
+ })
1858
+
1859
+ test("gpt-5.1-codex does not include xhigh", () => {
1860
+ const model = createMockModel({
1861
+ id: "gpt-5.1-codex",
1862
+ providerID: "github-copilot",
1863
+ api: {
1864
+ id: "gpt-5.1-codex",
1865
+ url: "https://api.githubcopilot.com",
1866
+ npm: "@ai-sdk/github-copilot",
1867
+ },
1868
+ })
1869
+ const result = ProviderTransform.variants(model)
1870
+ expect(Object.keys(result)).toEqual(["low", "medium", "high"])
1871
+ })
1872
+
1873
+ test("gpt-5.2 includes xhigh", () => {
1874
+ const model = createMockModel({
1875
+ id: "gpt-5.2",
1876
+ providerID: "github-copilot",
1877
+ api: {
1878
+ id: "gpt-5.2",
1879
+ url: "https://api.githubcopilot.com",
1880
+ npm: "@ai-sdk/github-copilot",
1881
+ },
1882
+ })
1883
+ const result = ProviderTransform.variants(model)
1884
+ expect(Object.keys(result)).toEqual(["low", "medium", "high", "xhigh"])
1885
+ expect(result.xhigh).toEqual({
1886
+ reasoningEffort: "xhigh",
1887
+ reasoningSummary: "auto",
1888
+ include: ["reasoning.encrypted_content"],
1889
+ })
1890
+ })
1891
+
1892
+ test("gpt-5.2-codex includes xhigh", () => {
1893
+ const model = createMockModel({
1894
+ id: "gpt-5.2-codex",
1895
+ providerID: "github-copilot",
1896
+ api: {
1897
+ id: "gpt-5.2-codex",
1898
+ url: "https://api.githubcopilot.com",
1899
+ npm: "@ai-sdk/github-copilot",
1900
+ },
1901
+ })
1902
+ const result = ProviderTransform.variants(model)
1903
+ expect(Object.keys(result)).toEqual(["low", "medium", "high", "xhigh"])
1904
+ })
1905
+ })
1906
+
1907
+ describe("@ai-sdk/cerebras", () => {
1908
+ test("returns WIDELY_SUPPORTED_EFFORTS with reasoningEffort", () => {
1909
+ const model = createMockModel({
1910
+ id: "cerebras/llama-4",
1911
+ providerID: "cerebras",
1912
+ api: {
1913
+ id: "llama-4-sc",
1914
+ url: "https://api.cerebras.ai",
1915
+ npm: "@ai-sdk/cerebras",
1916
+ },
1917
+ })
1918
+ const result = ProviderTransform.variants(model)
1919
+ expect(Object.keys(result)).toEqual(["low", "medium", "high"])
1920
+ expect(result.low).toEqual({ reasoningEffort: "low" })
1921
+ expect(result.high).toEqual({ reasoningEffort: "high" })
1922
+ })
1923
+ })
1924
+
1925
+ describe("@ai-sdk/togetherai", () => {
1926
+ test("returns WIDELY_SUPPORTED_EFFORTS with reasoningEffort", () => {
1927
+ const model = createMockModel({
1928
+ id: "togetherai/llama-4",
1929
+ providerID: "togetherai",
1930
+ api: {
1931
+ id: "llama-4-sc",
1932
+ url: "https://api.togetherai.com",
1933
+ npm: "@ai-sdk/togetherai",
1934
+ },
1935
+ })
1936
+ const result = ProviderTransform.variants(model)
1937
+ expect(Object.keys(result)).toEqual(["low", "medium", "high"])
1938
+ expect(result.low).toEqual({ reasoningEffort: "low" })
1939
+ expect(result.high).toEqual({ reasoningEffort: "high" })
1940
+ })
1941
+ })
1942
+
1943
+ describe("@ai-sdk/xai", () => {
1944
+ test("grok-3 returns empty object", () => {
1945
+ const model = createMockModel({
1946
+ id: "xai/grok-3",
1947
+ providerID: "xai",
1948
+ api: {
1949
+ id: "grok-3",
1950
+ url: "https://api.x.ai",
1951
+ npm: "@ai-sdk/xai",
1952
+ },
1953
+ })
1954
+ const result = ProviderTransform.variants(model)
1955
+ expect(result).toEqual({})
1956
+ })
1957
+
1958
+ test("grok-3-mini returns low and high with reasoningEffort", () => {
1959
+ const model = createMockModel({
1960
+ id: "xai/grok-3-mini",
1961
+ providerID: "xai",
1962
+ api: {
1963
+ id: "grok-3-mini",
1964
+ url: "https://api.x.ai",
1965
+ npm: "@ai-sdk/xai",
1966
+ },
1967
+ })
1968
+ const result = ProviderTransform.variants(model)
1969
+ expect(Object.keys(result)).toEqual(["low", "high"])
1970
+ expect(result.low).toEqual({ reasoningEffort: "low" })
1971
+ expect(result.high).toEqual({ reasoningEffort: "high" })
1972
+ })
1973
+ })
1974
+
1975
+ describe("@ai-sdk/deepinfra", () => {
1976
+ test("returns WIDELY_SUPPORTED_EFFORTS with reasoningEffort", () => {
1977
+ const model = createMockModel({
1978
+ id: "deepinfra/llama-4",
1979
+ providerID: "deepinfra",
1980
+ api: {
1981
+ id: "llama-4-sc",
1982
+ url: "https://api.deepinfra.com",
1983
+ npm: "@ai-sdk/deepinfra",
1984
+ },
1985
+ })
1986
+ const result = ProviderTransform.variants(model)
1987
+ expect(Object.keys(result)).toEqual(["low", "medium", "high"])
1988
+ expect(result.low).toEqual({ reasoningEffort: "low" })
1989
+ expect(result.high).toEqual({ reasoningEffort: "high" })
1990
+ })
1991
+ })
1992
+
1993
+ describe("@ai-sdk/openai-compatible", () => {
1994
+ test("returns WIDELY_SUPPORTED_EFFORTS with reasoningEffort", () => {
1995
+ const model = createMockModel({
1996
+ id: "custom-provider/custom-model",
1997
+ providerID: "custom-provider",
1998
+ api: {
1999
+ id: "custom-model",
2000
+ url: "https://api.custom.com",
2001
+ npm: "@ai-sdk/openai-compatible",
2002
+ },
2003
+ })
2004
+ const result = ProviderTransform.variants(model)
2005
+ expect(Object.keys(result)).toEqual(["low", "medium", "high"])
2006
+ expect(result.low).toEqual({ reasoningEffort: "low" })
2007
+ expect(result.high).toEqual({ reasoningEffort: "high" })
2008
+ })
2009
+ })
2010
+
2011
+ describe("@ai-sdk/azure", () => {
2012
+ test("o1-mini returns empty object", () => {
2013
+ const model = createMockModel({
2014
+ id: "o1-mini",
2015
+ providerID: "azure",
2016
+ api: {
2017
+ id: "o1-mini",
2018
+ url: "https://azure.com",
2019
+ npm: "@ai-sdk/azure",
2020
+ },
2021
+ })
2022
+ const result = ProviderTransform.variants(model)
2023
+ expect(result).toEqual({})
2024
+ })
2025
+
2026
+ test("standard azure models return custom efforts with reasoningSummary", () => {
2027
+ const model = createMockModel({
2028
+ id: "o1",
2029
+ providerID: "azure",
2030
+ api: {
2031
+ id: "o1",
2032
+ url: "https://azure.com",
2033
+ npm: "@ai-sdk/azure",
2034
+ },
2035
+ })
2036
+ const result = ProviderTransform.variants(model)
2037
+ expect(Object.keys(result)).toEqual(["low", "medium", "high"])
2038
+ expect(result.low).toEqual({
2039
+ reasoningEffort: "low",
2040
+ reasoningSummary: "auto",
2041
+ include: ["reasoning.encrypted_content"],
2042
+ })
2043
+ })
2044
+
2045
+ test("gpt-5 adds minimal effort", () => {
2046
+ const model = createMockModel({
2047
+ id: "gpt-5",
2048
+ providerID: "azure",
2049
+ api: {
2050
+ id: "gpt-5",
2051
+ url: "https://azure.com",
2052
+ npm: "@ai-sdk/azure",
2053
+ },
2054
+ })
2055
+ const result = ProviderTransform.variants(model)
2056
+ expect(Object.keys(result)).toEqual(["minimal", "low", "medium", "high"])
2057
+ })
2058
+ })
2059
+
2060
+ describe("@ai-sdk/openai", () => {
2061
+ test("gpt-5-pro returns empty object", () => {
2062
+ const model = createMockModel({
2063
+ id: "gpt-5-pro",
2064
+ providerID: "openai",
2065
+ api: {
2066
+ id: "gpt-5-pro",
2067
+ url: "https://api.openai.com",
2068
+ npm: "@ai-sdk/openai",
2069
+ },
2070
+ })
2071
+ const result = ProviderTransform.variants(model)
2072
+ expect(result).toEqual({})
2073
+ })
2074
+
2075
+ test("standard openai models return custom efforts with reasoningSummary", () => {
2076
+ const model = createMockModel({
2077
+ id: "gpt-5",
2078
+ providerID: "openai",
2079
+ api: {
2080
+ id: "gpt-5",
2081
+ url: "https://api.openai.com",
2082
+ npm: "@ai-sdk/openai",
2083
+ },
2084
+ release_date: "2024-06-01",
2085
+ })
2086
+ const result = ProviderTransform.variants(model)
2087
+ expect(Object.keys(result)).toEqual(["minimal", "low", "medium", "high"])
2088
+ expect(result.low).toEqual({
2089
+ reasoningEffort: "low",
2090
+ reasoningSummary: "auto",
2091
+ include: ["reasoning.encrypted_content"],
2092
+ })
2093
+ })
2094
+
2095
+ test("models after 2025-11-13 include 'none' effort", () => {
2096
+ const model = createMockModel({
2097
+ id: "gpt-5-nano",
2098
+ providerID: "openai",
2099
+ api: {
2100
+ id: "gpt-5-nano",
2101
+ url: "https://api.openai.com",
2102
+ npm: "@ai-sdk/openai",
2103
+ },
2104
+ release_date: "2025-11-14",
2105
+ })
2106
+ const result = ProviderTransform.variants(model)
2107
+ expect(Object.keys(result)).toEqual(["none", "minimal", "low", "medium", "high"])
2108
+ })
2109
+
2110
+ test("models after 2025-12-04 include 'xhigh' effort", () => {
2111
+ const model = createMockModel({
2112
+ id: "openai/gpt-5-chat",
2113
+ providerID: "openai",
2114
+ api: {
2115
+ id: "gpt-5-chat",
2116
+ url: "https://api.openai.com",
2117
+ npm: "@ai-sdk/openai",
2118
+ },
2119
+ release_date: "2025-12-05",
2120
+ })
2121
+ const result = ProviderTransform.variants(model)
2122
+ expect(Object.keys(result)).toEqual(["none", "minimal", "low", "medium", "high", "xhigh"])
2123
+ })
2124
+ })
2125
+
2126
+ describe("@ai-sdk/anthropic", () => {
2127
+ test("sonnet 4.6 returns adaptive thinking options", () => {
2128
+ const model = createMockModel({
2129
+ id: "anthropic/claude-sonnet-4-6",
2130
+ providerID: "anthropic",
2131
+ api: {
2132
+ id: "claude-sonnet-4-6",
2133
+ url: "https://api.anthropic.com",
2134
+ npm: "@ai-sdk/anthropic",
2135
+ },
2136
+ })
2137
+ const result = ProviderTransform.variants(model)
2138
+ expect(Object.keys(result)).toEqual(["low", "medium", "high", "max"])
2139
+ expect(result.high).toEqual({
2140
+ thinking: {
2141
+ type: "adaptive",
2142
+ },
2143
+ effort: "high",
2144
+ })
2145
+ })
2146
+
2147
+ test("returns high and max with thinking config", () => {
2148
+ const model = createMockModel({
2149
+ id: "anthropic/claude-4",
2150
+ providerID: "anthropic",
2151
+ api: {
2152
+ id: "claude-4",
2153
+ url: "https://api.anthropic.com",
2154
+ npm: "@ai-sdk/anthropic",
2155
+ },
2156
+ })
2157
+ const result = ProviderTransform.variants(model)
2158
+ expect(Object.keys(result)).toEqual(["high", "max"])
2159
+ expect(result.high).toEqual({
2160
+ thinking: {
2161
+ type: "enabled",
2162
+ budgetTokens: 16000,
2163
+ },
2164
+ })
2165
+ expect(result.max).toEqual({
2166
+ thinking: {
2167
+ type: "enabled",
2168
+ budgetTokens: 31999,
2169
+ },
2170
+ })
2171
+ })
2172
+ })
2173
+
2174
+ describe("@ai-sdk/amazon-bedrock", () => {
2175
+ test("anthropic sonnet 4.6 returns adaptive reasoning options", () => {
2176
+ const model = createMockModel({
2177
+ id: "bedrock/anthropic-claude-sonnet-4-6",
2178
+ providerID: "bedrock",
2179
+ api: {
2180
+ id: "anthropic.claude-sonnet-4-6",
2181
+ url: "https://bedrock.amazonaws.com",
2182
+ npm: "@ai-sdk/amazon-bedrock",
2183
+ },
2184
+ })
2185
+ const result = ProviderTransform.variants(model)
2186
+ expect(Object.keys(result)).toEqual(["low", "medium", "high", "max"])
2187
+ expect(result.max).toEqual({
2188
+ reasoningConfig: {
2189
+ type: "adaptive",
2190
+ maxReasoningEffort: "max",
2191
+ },
2192
+ })
2193
+ })
2194
+
2195
+ test("returns WIDELY_SUPPORTED_EFFORTS with reasoningConfig", () => {
2196
+ const model = createMockModel({
2197
+ id: "bedrock/llama-4",
2198
+ providerID: "bedrock",
2199
+ api: {
2200
+ id: "llama-4-sc",
2201
+ url: "https://bedrock.amazonaws.com",
2202
+ npm: "@ai-sdk/amazon-bedrock",
2203
+ },
2204
+ })
2205
+ const result = ProviderTransform.variants(model)
2206
+ expect(Object.keys(result)).toEqual(["low", "medium", "high"])
2207
+ expect(result.low).toEqual({
2208
+ reasoningConfig: {
2209
+ type: "enabled",
2210
+ maxReasoningEffort: "low",
2211
+ },
2212
+ })
2213
+ })
2214
+ })
2215
+
2216
+ describe("@ai-sdk/google", () => {
2217
+ test("gemini-2.5 returns high and max with thinkingConfig and thinkingBudget", () => {
2218
+ const model = createMockModel({
2219
+ id: "google/gemini-2.5-pro",
2220
+ providerID: "google",
2221
+ api: {
2222
+ id: "gemini-2.5-pro",
2223
+ url: "https://generativelanguage.googleapis.com",
2224
+ npm: "@ai-sdk/google",
2225
+ },
2226
+ })
2227
+ const result = ProviderTransform.variants(model)
2228
+ expect(Object.keys(result)).toEqual(["high", "max"])
2229
+ expect(result.high).toEqual({
2230
+ thinkingConfig: {
2231
+ includeThoughts: true,
2232
+ thinkingBudget: 16000,
2233
+ },
2234
+ })
2235
+ expect(result.max).toEqual({
2236
+ thinkingConfig: {
2237
+ includeThoughts: true,
2238
+ thinkingBudget: 24576,
2239
+ },
2240
+ })
2241
+ })
2242
+
2243
+ test("other gemini models return low and high with thinkingLevel", () => {
2244
+ const model = createMockModel({
2245
+ id: "google/gemini-2.0-pro",
2246
+ providerID: "google",
2247
+ api: {
2248
+ id: "gemini-2.0-pro",
2249
+ url: "https://generativelanguage.googleapis.com",
2250
+ npm: "@ai-sdk/google",
2251
+ },
2252
+ })
2253
+ const result = ProviderTransform.variants(model)
2254
+ expect(Object.keys(result)).toEqual(["low", "high"])
2255
+ expect(result.low).toEqual({
2256
+ thinkingConfig: {
2257
+ includeThoughts: true,
2258
+ thinkingLevel: "low",
2259
+ },
2260
+ })
2261
+ expect(result.high).toEqual({
2262
+ thinkingConfig: {
2263
+ includeThoughts: true,
2264
+ thinkingLevel: "high",
2265
+ },
2266
+ })
2267
+ })
2268
+ })
2269
+
2270
+ describe("@ai-sdk/google-vertex", () => {
2271
+ test("gemini-2.5 returns high and max with thinkingConfig and thinkingBudget", () => {
2272
+ const model = createMockModel({
2273
+ id: "google-vertex/gemini-2.5-pro",
2274
+ providerID: "google-vertex",
2275
+ api: {
2276
+ id: "gemini-2.5-pro",
2277
+ url: "https://vertexai.googleapis.com",
2278
+ npm: "@ai-sdk/google-vertex",
2279
+ },
2280
+ })
2281
+ const result = ProviderTransform.variants(model)
2282
+ expect(Object.keys(result)).toEqual(["high", "max"])
2283
+ })
2284
+
2285
+ test("other vertex models return low and high with thinkingLevel", () => {
2286
+ const model = createMockModel({
2287
+ id: "google-vertex/gemini-2.0-pro",
2288
+ providerID: "google-vertex",
2289
+ api: {
2290
+ id: "gemini-2.0-pro",
2291
+ url: "https://vertexai.googleapis.com",
2292
+ npm: "@ai-sdk/google-vertex",
2293
+ },
2294
+ })
2295
+ const result = ProviderTransform.variants(model)
2296
+ expect(Object.keys(result)).toEqual(["low", "high"])
2297
+ })
2298
+ })
2299
+
2300
+ describe("@ai-sdk/cohere", () => {
2301
+ test("returns empty object", () => {
2302
+ const model = createMockModel({
2303
+ id: "cohere/command-r",
2304
+ providerID: "cohere",
2305
+ api: {
2306
+ id: "command-r",
2307
+ url: "https://api.cohere.com",
2308
+ npm: "@ai-sdk/cohere",
2309
+ },
2310
+ })
2311
+ const result = ProviderTransform.variants(model)
2312
+ expect(result).toEqual({})
2313
+ })
2314
+ })
2315
+
2316
+ describe("@ai-sdk/groq", () => {
2317
+ test("returns none and WIDELY_SUPPORTED_EFFORTS with thinkingLevel", () => {
2318
+ const model = createMockModel({
2319
+ id: "groq/llama-4",
2320
+ providerID: "groq",
2321
+ api: {
2322
+ id: "llama-4-sc",
2323
+ url: "https://api.groq.com",
2324
+ npm: "@ai-sdk/groq",
2325
+ },
2326
+ })
2327
+ const result = ProviderTransform.variants(model)
2328
+ expect(Object.keys(result)).toEqual(["none", "low", "medium", "high"])
2329
+ expect(result.none).toEqual({
2330
+ reasoningEffort: "none",
2331
+ })
2332
+ expect(result.low).toEqual({
2333
+ reasoningEffort: "low",
2334
+ })
2335
+ })
2336
+ })
2337
+
2338
+ describe("@ai-sdk/perplexity", () => {
2339
+ test("returns empty object", () => {
2340
+ const model = createMockModel({
2341
+ id: "perplexity/sonar-plus",
2342
+ providerID: "perplexity",
2343
+ api: {
2344
+ id: "sonar-plus",
2345
+ url: "https://api.perplexity.ai",
2346
+ npm: "@ai-sdk/perplexity",
2347
+ },
2348
+ })
2349
+ const result = ProviderTransform.variants(model)
2350
+ expect(result).toEqual({})
2351
+ })
2352
+ })
2353
+ })