jonsoc 1.1.43

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (420) hide show
  1. package/AGENTS.md +27 -0
  2. package/Dockerfile +18 -0
  3. package/PUBLISHING_GUIDE.md +151 -0
  4. package/README.md +58 -0
  5. package/bin/jonsoc +279 -0
  6. package/bunfig.toml +7 -0
  7. package/package.json +147 -0
  8. package/package.json.placeholder +11 -0
  9. package/parsers-config.ts +253 -0
  10. package/script/build.ts +115 -0
  11. package/script/publish-registries.ts +197 -0
  12. package/script/publish.ts +110 -0
  13. package/script/schema.ts +47 -0
  14. package/script/seed-e2e.ts +50 -0
  15. package/src/acp/README.md +164 -0
  16. package/src/acp/agent.ts +1437 -0
  17. package/src/acp/session.ts +105 -0
  18. package/src/acp/types.ts +22 -0
  19. package/src/agent/agent.ts +347 -0
  20. package/src/agent/generate.txt +75 -0
  21. package/src/agent/prompt/compaction.txt +12 -0
  22. package/src/agent/prompt/explore.txt +18 -0
  23. package/src/agent/prompt/summary.txt +11 -0
  24. package/src/agent/prompt/title.txt +44 -0
  25. package/src/auth/index.ts +73 -0
  26. package/src/brand/index.ts +73 -0
  27. package/src/bun/index.ts +139 -0
  28. package/src/bus/bus-event.ts +43 -0
  29. package/src/bus/global.ts +10 -0
  30. package/src/bus/index.ts +105 -0
  31. package/src/cli/bootstrap.ts +17 -0
  32. package/src/cli/cmd/acp.ts +69 -0
  33. package/src/cli/cmd/agent.ts +257 -0
  34. package/src/cli/cmd/auth.ts +405 -0
  35. package/src/cli/cmd/cmd.ts +7 -0
  36. package/src/cli/cmd/debug/agent.ts +166 -0
  37. package/src/cli/cmd/debug/config.ts +16 -0
  38. package/src/cli/cmd/debug/file.ts +97 -0
  39. package/src/cli/cmd/debug/index.ts +48 -0
  40. package/src/cli/cmd/debug/lsp.ts +52 -0
  41. package/src/cli/cmd/debug/ripgrep.ts +87 -0
  42. package/src/cli/cmd/debug/scrap.ts +16 -0
  43. package/src/cli/cmd/debug/skill.ts +16 -0
  44. package/src/cli/cmd/debug/snapshot.ts +52 -0
  45. package/src/cli/cmd/export.ts +88 -0
  46. package/src/cli/cmd/generate.ts +38 -0
  47. package/src/cli/cmd/github.ts +1548 -0
  48. package/src/cli/cmd/import.ts +99 -0
  49. package/src/cli/cmd/mcp.ts +765 -0
  50. package/src/cli/cmd/models.ts +77 -0
  51. package/src/cli/cmd/pr.ts +112 -0
  52. package/src/cli/cmd/run.ts +395 -0
  53. package/src/cli/cmd/serve.ts +20 -0
  54. package/src/cli/cmd/session.ts +135 -0
  55. package/src/cli/cmd/stats.ts +402 -0
  56. package/src/cli/cmd/tui/app.tsx +923 -0
  57. package/src/cli/cmd/tui/attach.ts +39 -0
  58. package/src/cli/cmd/tui/component/border.tsx +21 -0
  59. package/src/cli/cmd/tui/component/dialog-agent.tsx +31 -0
  60. package/src/cli/cmd/tui/component/dialog-command.tsx +162 -0
  61. package/src/cli/cmd/tui/component/dialog-error-log.tsx +155 -0
  62. package/src/cli/cmd/tui/component/dialog-mcp.tsx +86 -0
  63. package/src/cli/cmd/tui/component/dialog-model.tsx +234 -0
  64. package/src/cli/cmd/tui/component/dialog-provider.tsx +256 -0
  65. package/src/cli/cmd/tui/component/dialog-session-list.tsx +114 -0
  66. package/src/cli/cmd/tui/component/dialog-session-rename.tsx +31 -0
  67. package/src/cli/cmd/tui/component/dialog-stash.tsx +87 -0
  68. package/src/cli/cmd/tui/component/dialog-status.tsx +164 -0
  69. package/src/cli/cmd/tui/component/dialog-tag.tsx +44 -0
  70. package/src/cli/cmd/tui/component/dialog-theme-list.tsx +50 -0
  71. package/src/cli/cmd/tui/component/dynamic-layout.tsx +86 -0
  72. package/src/cli/cmd/tui/component/inspector-overlay.tsx +247 -0
  73. package/src/cli/cmd/tui/component/logo.tsx +88 -0
  74. package/src/cli/cmd/tui/component/prompt/autocomplete.tsx +653 -0
  75. package/src/cli/cmd/tui/component/prompt/frecency.tsx +89 -0
  76. package/src/cli/cmd/tui/component/prompt/history.tsx +108 -0
  77. package/src/cli/cmd/tui/component/prompt/index.tsx +1347 -0
  78. package/src/cli/cmd/tui/component/prompt/stash.tsx +101 -0
  79. package/src/cli/cmd/tui/component/textarea-keybindings.ts +73 -0
  80. package/src/cli/cmd/tui/component/tips.tsx +153 -0
  81. package/src/cli/cmd/tui/component/todo-item.tsx +32 -0
  82. package/src/cli/cmd/tui/context/args.tsx +14 -0
  83. package/src/cli/cmd/tui/context/directory.ts +13 -0
  84. package/src/cli/cmd/tui/context/error-log.tsx +56 -0
  85. package/src/cli/cmd/tui/context/exit.tsx +26 -0
  86. package/src/cli/cmd/tui/context/helper.tsx +25 -0
  87. package/src/cli/cmd/tui/context/inspector.tsx +57 -0
  88. package/src/cli/cmd/tui/context/keybind.tsx +108 -0
  89. package/src/cli/cmd/tui/context/kv.tsx +53 -0
  90. package/src/cli/cmd/tui/context/layout.tsx +240 -0
  91. package/src/cli/cmd/tui/context/local.tsx +402 -0
  92. package/src/cli/cmd/tui/context/prompt.tsx +18 -0
  93. package/src/cli/cmd/tui/context/route.tsx +51 -0
  94. package/src/cli/cmd/tui/context/sdk.tsx +94 -0
  95. package/src/cli/cmd/tui/context/sync.tsx +449 -0
  96. package/src/cli/cmd/tui/context/theme/aura.json +69 -0
  97. package/src/cli/cmd/tui/context/theme/ayu.json +80 -0
  98. package/src/cli/cmd/tui/context/theme/carbonfox.json +248 -0
  99. package/src/cli/cmd/tui/context/theme/catppuccin-frappe.json +233 -0
  100. package/src/cli/cmd/tui/context/theme/catppuccin-macchiato.json +233 -0
  101. package/src/cli/cmd/tui/context/theme/catppuccin.json +112 -0
  102. package/src/cli/cmd/tui/context/theme/cobalt2.json +228 -0
  103. package/src/cli/cmd/tui/context/theme/cursor.json +249 -0
  104. package/src/cli/cmd/tui/context/theme/dracula.json +219 -0
  105. package/src/cli/cmd/tui/context/theme/everforest.json +241 -0
  106. package/src/cli/cmd/tui/context/theme/flexoki.json +237 -0
  107. package/src/cli/cmd/tui/context/theme/github.json +233 -0
  108. package/src/cli/cmd/tui/context/theme/gruvbox.json +242 -0
  109. package/src/cli/cmd/tui/context/theme/jonsoc.json +245 -0
  110. package/src/cli/cmd/tui/context/theme/kanagawa.json +77 -0
  111. package/src/cli/cmd/tui/context/theme/lucent-orng.json +237 -0
  112. package/src/cli/cmd/tui/context/theme/material.json +235 -0
  113. package/src/cli/cmd/tui/context/theme/matrix.json +77 -0
  114. package/src/cli/cmd/tui/context/theme/mercury.json +252 -0
  115. package/src/cli/cmd/tui/context/theme/monokai.json +221 -0
  116. package/src/cli/cmd/tui/context/theme/nightowl.json +221 -0
  117. package/src/cli/cmd/tui/context/theme/nord.json +223 -0
  118. package/src/cli/cmd/tui/context/theme/one-dark.json +84 -0
  119. package/src/cli/cmd/tui/context/theme/orng.json +249 -0
  120. package/src/cli/cmd/tui/context/theme/osaka-jade.json +93 -0
  121. package/src/cli/cmd/tui/context/theme/palenight.json +222 -0
  122. package/src/cli/cmd/tui/context/theme/rosepine.json +234 -0
  123. package/src/cli/cmd/tui/context/theme/solarized.json +223 -0
  124. package/src/cli/cmd/tui/context/theme/synthwave84.json +226 -0
  125. package/src/cli/cmd/tui/context/theme/tokyonight.json +243 -0
  126. package/src/cli/cmd/tui/context/theme/vercel.json +245 -0
  127. package/src/cli/cmd/tui/context/theme/vesper.json +218 -0
  128. package/src/cli/cmd/tui/context/theme/zenburn.json +223 -0
  129. package/src/cli/cmd/tui/context/theme.tsx +1153 -0
  130. package/src/cli/cmd/tui/event.ts +48 -0
  131. package/src/cli/cmd/tui/hooks/use-command-registry.tsx +184 -0
  132. package/src/cli/cmd/tui/routes/home.tsx +198 -0
  133. package/src/cli/cmd/tui/routes/session/dialog-fork-from-timeline.tsx +64 -0
  134. package/src/cli/cmd/tui/routes/session/dialog-message.tsx +109 -0
  135. package/src/cli/cmd/tui/routes/session/dialog-subagent.tsx +26 -0
  136. package/src/cli/cmd/tui/routes/session/dialog-timeline.tsx +47 -0
  137. package/src/cli/cmd/tui/routes/session/footer.tsx +91 -0
  138. package/src/cli/cmd/tui/routes/session/git-commit.tsx +59 -0
  139. package/src/cli/cmd/tui/routes/session/git-history.tsx +122 -0
  140. package/src/cli/cmd/tui/routes/session/header.tsx +185 -0
  141. package/src/cli/cmd/tui/routes/session/index.tsx +2363 -0
  142. package/src/cli/cmd/tui/routes/session/navigator-ui.tsx +214 -0
  143. package/src/cli/cmd/tui/routes/session/navigator.tsx +1124 -0
  144. package/src/cli/cmd/tui/routes/session/panel-explorer.tsx +553 -0
  145. package/src/cli/cmd/tui/routes/session/panel-viewer.tsx +386 -0
  146. package/src/cli/cmd/tui/routes/session/permission.tsx +501 -0
  147. package/src/cli/cmd/tui/routes/session/question.tsx +507 -0
  148. package/src/cli/cmd/tui/routes/session/sidebar.tsx +365 -0
  149. package/src/cli/cmd/tui/routes/session/vcs-diff-viewer.tsx +37 -0
  150. package/src/cli/cmd/tui/routes/ui-settings.tsx +449 -0
  151. package/src/cli/cmd/tui/thread.ts +172 -0
  152. package/src/cli/cmd/tui/ui/dialog-alert.tsx +90 -0
  153. package/src/cli/cmd/tui/ui/dialog-confirm.tsx +83 -0
  154. package/src/cli/cmd/tui/ui/dialog-export-options.tsx +204 -0
  155. package/src/cli/cmd/tui/ui/dialog-help.tsx +38 -0
  156. package/src/cli/cmd/tui/ui/dialog-prompt.tsx +77 -0
  157. package/src/cli/cmd/tui/ui/dialog-select.tsx +384 -0
  158. package/src/cli/cmd/tui/ui/dialog.tsx +170 -0
  159. package/src/cli/cmd/tui/ui/link.tsx +28 -0
  160. package/src/cli/cmd/tui/ui/spinner.ts +375 -0
  161. package/src/cli/cmd/tui/ui/toast.tsx +100 -0
  162. package/src/cli/cmd/tui/util/clipboard.ts +255 -0
  163. package/src/cli/cmd/tui/util/editor.ts +32 -0
  164. package/src/cli/cmd/tui/util/signal.ts +7 -0
  165. package/src/cli/cmd/tui/util/terminal.ts +114 -0
  166. package/src/cli/cmd/tui/util/transcript.ts +98 -0
  167. package/src/cli/cmd/tui/worker.ts +152 -0
  168. package/src/cli/cmd/uninstall.ts +362 -0
  169. package/src/cli/cmd/upgrade.ts +73 -0
  170. package/src/cli/cmd/web.ts +81 -0
  171. package/src/cli/error.ts +57 -0
  172. package/src/cli/network.ts +53 -0
  173. package/src/cli/ui.ts +119 -0
  174. package/src/cli/upgrade.ts +25 -0
  175. package/src/command/index.ts +131 -0
  176. package/src/command/template/initialize.txt +10 -0
  177. package/src/command/template/review.txt +99 -0
  178. package/src/config/config.ts +1404 -0
  179. package/src/config/markdown.ts +93 -0
  180. package/src/env/index.ts +26 -0
  181. package/src/file/ignore.ts +83 -0
  182. package/src/file/index.ts +432 -0
  183. package/src/file/ripgrep.ts +407 -0
  184. package/src/file/time.ts +69 -0
  185. package/src/file/watcher.ts +127 -0
  186. package/src/flag/flag.ts +80 -0
  187. package/src/format/formatter.ts +357 -0
  188. package/src/format/index.ts +137 -0
  189. package/src/global/index.ts +58 -0
  190. package/src/id/id.ts +83 -0
  191. package/src/ide/index.ts +76 -0
  192. package/src/index.ts +208 -0
  193. package/src/installation/index.ts +258 -0
  194. package/src/lsp/client.ts +252 -0
  195. package/src/lsp/index.ts +485 -0
  196. package/src/lsp/language.ts +119 -0
  197. package/src/lsp/server.ts +2046 -0
  198. package/src/mcp/auth.ts +135 -0
  199. package/src/mcp/index.ts +934 -0
  200. package/src/mcp/oauth-callback.ts +200 -0
  201. package/src/mcp/oauth-provider.ts +155 -0
  202. package/src/patch/index.ts +680 -0
  203. package/src/permission/arity.ts +163 -0
  204. package/src/permission/index.ts +210 -0
  205. package/src/permission/next.ts +280 -0
  206. package/src/plugin/codex.ts +500 -0
  207. package/src/plugin/copilot.ts +283 -0
  208. package/src/plugin/index.ts +135 -0
  209. package/src/project/bootstrap.ts +35 -0
  210. package/src/project/instance.ts +91 -0
  211. package/src/project/project.ts +371 -0
  212. package/src/project/state.ts +66 -0
  213. package/src/project/vcs.ts +151 -0
  214. package/src/provider/auth.ts +147 -0
  215. package/src/provider/models-macro.ts +14 -0
  216. package/src/provider/models.ts +114 -0
  217. package/src/provider/provider.ts +1220 -0
  218. package/src/provider/sdk/openai-compatible/src/README.md +5 -0
  219. package/src/provider/sdk/openai-compatible/src/index.ts +2 -0
  220. package/src/provider/sdk/openai-compatible/src/openai-compatible-provider.ts +100 -0
  221. package/src/provider/sdk/openai-compatible/src/responses/convert-to-openai-responses-input.ts +303 -0
  222. package/src/provider/sdk/openai-compatible/src/responses/map-openai-responses-finish-reason.ts +22 -0
  223. package/src/provider/sdk/openai-compatible/src/responses/openai-config.ts +18 -0
  224. package/src/provider/sdk/openai-compatible/src/responses/openai-error.ts +22 -0
  225. package/src/provider/sdk/openai-compatible/src/responses/openai-responses-api-types.ts +207 -0
  226. package/src/provider/sdk/openai-compatible/src/responses/openai-responses-language-model.ts +1732 -0
  227. package/src/provider/sdk/openai-compatible/src/responses/openai-responses-prepare-tools.ts +177 -0
  228. package/src/provider/sdk/openai-compatible/src/responses/openai-responses-settings.ts +1 -0
  229. package/src/provider/sdk/openai-compatible/src/responses/tool/code-interpreter.ts +88 -0
  230. package/src/provider/sdk/openai-compatible/src/responses/tool/file-search.ts +128 -0
  231. package/src/provider/sdk/openai-compatible/src/responses/tool/image-generation.ts +115 -0
  232. package/src/provider/sdk/openai-compatible/src/responses/tool/local-shell.ts +65 -0
  233. package/src/provider/sdk/openai-compatible/src/responses/tool/web-search-preview.ts +104 -0
  234. package/src/provider/sdk/openai-compatible/src/responses/tool/web-search.ts +103 -0
  235. package/src/provider/transform.ts +742 -0
  236. package/src/pty/index.ts +241 -0
  237. package/src/question/index.ts +176 -0
  238. package/src/scheduler/index.ts +61 -0
  239. package/src/server/error.ts +36 -0
  240. package/src/server/event.ts +7 -0
  241. package/src/server/mdns.ts +59 -0
  242. package/src/server/routes/config.ts +92 -0
  243. package/src/server/routes/experimental.ts +208 -0
  244. package/src/server/routes/file.ts +227 -0
  245. package/src/server/routes/global.ts +135 -0
  246. package/src/server/routes/mcp.ts +225 -0
  247. package/src/server/routes/permission.ts +68 -0
  248. package/src/server/routes/project.ts +82 -0
  249. package/src/server/routes/provider.ts +165 -0
  250. package/src/server/routes/pty.ts +169 -0
  251. package/src/server/routes/question.ts +98 -0
  252. package/src/server/routes/session.ts +939 -0
  253. package/src/server/routes/tui.ts +379 -0
  254. package/src/server/server.ts +663 -0
  255. package/src/session/compaction.ts +225 -0
  256. package/src/session/index.ts +498 -0
  257. package/src/session/llm.ts +288 -0
  258. package/src/session/message-v2.ts +740 -0
  259. package/src/session/message.ts +189 -0
  260. package/src/session/processor.ts +406 -0
  261. package/src/session/prompt/anthropic-20250930.txt +168 -0
  262. package/src/session/prompt/anthropic.txt +172 -0
  263. package/src/session/prompt/anthropic_spoof.txt +1 -0
  264. package/src/session/prompt/beast.txt +149 -0
  265. package/src/session/prompt/build-switch.txt +5 -0
  266. package/src/session/prompt/codex_header.txt +81 -0
  267. package/src/session/prompt/copilot-gpt-5.txt +145 -0
  268. package/src/session/prompt/gemini.txt +157 -0
  269. package/src/session/prompt/max-steps.txt +16 -0
  270. package/src/session/prompt/plan-reminder-anthropic.txt +67 -0
  271. package/src/session/prompt/plan.txt +26 -0
  272. package/src/session/prompt/qwen.txt +111 -0
  273. package/src/session/prompt.ts +1815 -0
  274. package/src/session/retry.ts +90 -0
  275. package/src/session/revert.ts +121 -0
  276. package/src/session/status.ts +76 -0
  277. package/src/session/summary.ts +150 -0
  278. package/src/session/system.ts +156 -0
  279. package/src/session/todo.ts +37 -0
  280. package/src/share/share-next.ts +204 -0
  281. package/src/share/share.ts +95 -0
  282. package/src/shell/shell.ts +67 -0
  283. package/src/skill/index.ts +1 -0
  284. package/src/skill/skill.ts +135 -0
  285. package/src/snapshot/index.ts +236 -0
  286. package/src/storage/storage.ts +227 -0
  287. package/src/tool/apply_patch.ts +279 -0
  288. package/src/tool/apply_patch.txt +33 -0
  289. package/src/tool/bash.ts +258 -0
  290. package/src/tool/bash.txt +115 -0
  291. package/src/tool/batch.ts +175 -0
  292. package/src/tool/batch.txt +24 -0
  293. package/src/tool/codesearch.ts +132 -0
  294. package/src/tool/codesearch.txt +12 -0
  295. package/src/tool/edit.ts +645 -0
  296. package/src/tool/edit.txt +10 -0
  297. package/src/tool/external-directory.ts +32 -0
  298. package/src/tool/glob.ts +77 -0
  299. package/src/tool/glob.txt +6 -0
  300. package/src/tool/grep.ts +154 -0
  301. package/src/tool/grep.txt +8 -0
  302. package/src/tool/invalid.ts +17 -0
  303. package/src/tool/ls.ts +121 -0
  304. package/src/tool/ls.txt +1 -0
  305. package/src/tool/lsp.ts +96 -0
  306. package/src/tool/lsp.txt +19 -0
  307. package/src/tool/multiedit.ts +46 -0
  308. package/src/tool/multiedit.txt +41 -0
  309. package/src/tool/plan-enter.txt +14 -0
  310. package/src/tool/plan-exit.txt +13 -0
  311. package/src/tool/plan.ts +130 -0
  312. package/src/tool/question.ts +33 -0
  313. package/src/tool/question.txt +10 -0
  314. package/src/tool/read.ts +202 -0
  315. package/src/tool/read.txt +12 -0
  316. package/src/tool/registry.ts +162 -0
  317. package/src/tool/skill.ts +82 -0
  318. package/src/tool/task.ts +188 -0
  319. package/src/tool/task.txt +60 -0
  320. package/src/tool/todo.ts +53 -0
  321. package/src/tool/todoread.txt +14 -0
  322. package/src/tool/todowrite.txt +167 -0
  323. package/src/tool/tool.ts +88 -0
  324. package/src/tool/truncation.ts +106 -0
  325. package/src/tool/webfetch.ts +182 -0
  326. package/src/tool/webfetch.txt +13 -0
  327. package/src/tool/websearch.ts +150 -0
  328. package/src/tool/websearch.txt +14 -0
  329. package/src/tool/write.ts +80 -0
  330. package/src/tool/write.txt +8 -0
  331. package/src/util/archive.ts +16 -0
  332. package/src/util/color.ts +19 -0
  333. package/src/util/context.ts +25 -0
  334. package/src/util/defer.ts +12 -0
  335. package/src/util/eventloop.ts +20 -0
  336. package/src/util/filesystem.ts +93 -0
  337. package/src/util/fn.ts +11 -0
  338. package/src/util/format.ts +20 -0
  339. package/src/util/iife.ts +3 -0
  340. package/src/util/keybind.ts +103 -0
  341. package/src/util/lazy.ts +18 -0
  342. package/src/util/locale.ts +81 -0
  343. package/src/util/lock.ts +98 -0
  344. package/src/util/log.ts +180 -0
  345. package/src/util/queue.ts +32 -0
  346. package/src/util/rpc.ts +66 -0
  347. package/src/util/scrap.ts +10 -0
  348. package/src/util/signal.ts +12 -0
  349. package/src/util/timeout.ts +14 -0
  350. package/src/util/token.ts +7 -0
  351. package/src/util/wildcard.ts +56 -0
  352. package/src/worktree/index.ts +524 -0
  353. package/sst-env.d.ts +9 -0
  354. package/test/acp/agent-interface.test.ts +51 -0
  355. package/test/acp/event-subscription.test.ts +436 -0
  356. package/test/agent/agent.test.ts +638 -0
  357. package/test/bun.test.ts +53 -0
  358. package/test/cli/cmd/tui/fileref.test.ts +30 -0
  359. package/test/cli/github-action.test.ts +129 -0
  360. package/test/cli/github-remote.test.ts +80 -0
  361. package/test/cli/tui/navigator_logic.test.ts +99 -0
  362. package/test/cli/tui/transcript.test.ts +297 -0
  363. package/test/cli/ui.test.ts +80 -0
  364. package/test/config/agent-color.test.ts +66 -0
  365. package/test/config/config.test.ts +1613 -0
  366. package/test/config/fixtures/empty-frontmatter.md +4 -0
  367. package/test/config/fixtures/frontmatter.md +28 -0
  368. package/test/config/fixtures/no-frontmatter.md +1 -0
  369. package/test/config/markdown.test.ts +192 -0
  370. package/test/file/ignore.test.ts +10 -0
  371. package/test/file/path-traversal.test.ts +198 -0
  372. package/test/fixture/fixture.ts +45 -0
  373. package/test/fixture/lsp/fake-lsp-server.js +77 -0
  374. package/test/ide/ide.test.ts +82 -0
  375. package/test/keybind.test.ts +421 -0
  376. package/test/lsp/client.test.ts +95 -0
  377. package/test/mcp/headers.test.ts +153 -0
  378. package/test/mcp/oauth-browser.test.ts +261 -0
  379. package/test/patch/patch.test.ts +348 -0
  380. package/test/permission/arity.test.ts +33 -0
  381. package/test/permission/next.test.ts +690 -0
  382. package/test/permission-task.test.ts +319 -0
  383. package/test/plugin/codex.test.ts +123 -0
  384. package/test/preload.ts +67 -0
  385. package/test/project/project.test.ts +120 -0
  386. package/test/provider/amazon-bedrock.test.ts +268 -0
  387. package/test/provider/gitlab-duo.test.ts +286 -0
  388. package/test/provider/provider.test.ts +2149 -0
  389. package/test/provider/transform.test.ts +1631 -0
  390. package/test/question/question.test.ts +300 -0
  391. package/test/scheduler.test.ts +73 -0
  392. package/test/server/session-list.test.ts +39 -0
  393. package/test/server/session-select.test.ts +78 -0
  394. package/test/session/compaction.test.ts +293 -0
  395. package/test/session/llm.test.ts +90 -0
  396. package/test/session/message-v2.test.ts +786 -0
  397. package/test/session/retry.test.ts +131 -0
  398. package/test/session/revert-compact.test.ts +285 -0
  399. package/test/session/session.test.ts +71 -0
  400. package/test/skill/skill.test.ts +185 -0
  401. package/test/snapshot/snapshot.test.ts +939 -0
  402. package/test/tool/__snapshots__/tool.test.ts.snap +9 -0
  403. package/test/tool/apply_patch.test.ts +499 -0
  404. package/test/tool/bash.test.ts +320 -0
  405. package/test/tool/external-directory.test.ts +126 -0
  406. package/test/tool/fixtures/large-image.png +0 -0
  407. package/test/tool/fixtures/models-api.json +33453 -0
  408. package/test/tool/grep.test.ts +109 -0
  409. package/test/tool/question.test.ts +105 -0
  410. package/test/tool/read.test.ts +332 -0
  411. package/test/tool/registry.test.ts +76 -0
  412. package/test/tool/truncation.test.ts +159 -0
  413. package/test/util/filesystem.test.ts +39 -0
  414. package/test/util/format.test.ts +59 -0
  415. package/test/util/iife.test.ts +36 -0
  416. package/test/util/lazy.test.ts +50 -0
  417. package/test/util/lock.test.ts +72 -0
  418. package/test/util/timeout.test.ts +21 -0
  419. package/test/util/wildcard.test.ts +75 -0
  420. package/tsconfig.json +16 -0
@@ -0,0 +1,1631 @@
1
+ import { describe, expect, test } from "bun:test"
2
+ import { ProviderTransform } from "../../src/provider/transform"
3
+
4
+ const OUTPUT_TOKEN_MAX = 32000
5
+
6
+ describe("ProviderTransform.options - setCacheKey", () => {
7
+ const sessionID = "test-session-123"
8
+
9
+ const mockModel = {
10
+ id: "anthropic/claude-3-5-sonnet",
11
+ providerID: "anthropic",
12
+ api: {
13
+ id: "claude-3-5-sonnet-20241022",
14
+ url: "https://api.anthropic.com",
15
+ npm: "@ai-sdk/anthropic",
16
+ },
17
+ name: "Claude 3.5 Sonnet",
18
+ capabilities: {
19
+ temperature: true,
20
+ reasoning: false,
21
+ attachment: true,
22
+ toolcall: true,
23
+ input: { text: true, audio: false, image: true, video: false, pdf: true },
24
+ output: { text: true, audio: false, image: false, video: false, pdf: false },
25
+ interleaved: false,
26
+ },
27
+ cost: {
28
+ input: 0.003,
29
+ output: 0.015,
30
+ cache: { read: 0.0003, write: 0.00375 },
31
+ },
32
+ limit: {
33
+ context: 200000,
34
+ output: 8192,
35
+ },
36
+ status: "active",
37
+ options: {},
38
+ headers: {},
39
+ } as any
40
+
41
+ test("should set promptCacheKey when providerOptions.setCacheKey is true", () => {
42
+ const result = ProviderTransform.options({
43
+ model: mockModel,
44
+ sessionID,
45
+ providerOptions: { setCacheKey: true },
46
+ })
47
+ expect(result.promptCacheKey).toBe(sessionID)
48
+ })
49
+
50
+ test("should not set promptCacheKey when providerOptions.setCacheKey is false", () => {
51
+ const result = ProviderTransform.options({
52
+ model: mockModel,
53
+ sessionID,
54
+ providerOptions: { setCacheKey: false },
55
+ })
56
+ expect(result.promptCacheKey).toBeUndefined()
57
+ })
58
+
59
+ test("should not set promptCacheKey when providerOptions is undefined", () => {
60
+ const result = ProviderTransform.options({
61
+ model: mockModel,
62
+ sessionID,
63
+ providerOptions: undefined,
64
+ })
65
+ expect(result.promptCacheKey).toBeUndefined()
66
+ })
67
+
68
+ test("should not set promptCacheKey when providerOptions does not have setCacheKey", () => {
69
+ const result = ProviderTransform.options({ model: mockModel, sessionID, providerOptions: {} })
70
+ expect(result.promptCacheKey).toBeUndefined()
71
+ })
72
+
73
+ test("should set promptCacheKey for openai provider regardless of setCacheKey", () => {
74
+ const openaiModel = {
75
+ ...mockModel,
76
+ providerID: "openai",
77
+ api: {
78
+ id: "gpt-4",
79
+ url: "https://api.openai.com",
80
+ npm: "@ai-sdk/openai",
81
+ },
82
+ }
83
+ const result = ProviderTransform.options({ model: openaiModel, sessionID, providerOptions: {} })
84
+ expect(result.promptCacheKey).toBe(sessionID)
85
+ })
86
+
87
+ test("should set store=false for openai provider", () => {
88
+ const openaiModel = {
89
+ ...mockModel,
90
+ providerID: "openai",
91
+ api: {
92
+ id: "gpt-4",
93
+ url: "https://api.openai.com",
94
+ npm: "@ai-sdk/openai",
95
+ },
96
+ }
97
+ const result = ProviderTransform.options({
98
+ model: openaiModel,
99
+ sessionID,
100
+ providerOptions: {},
101
+ })
102
+ expect(result.store).toBe(false)
103
+ })
104
+ })
105
+
106
+ describe("ProviderTransform.maxOutputTokens", () => {
107
+ test("returns 32k when modelLimit > 32k", () => {
108
+ const modelLimit = 100000
109
+ const result = ProviderTransform.maxOutputTokens("@ai-sdk/openai", {}, modelLimit, OUTPUT_TOKEN_MAX)
110
+ expect(result).toBe(OUTPUT_TOKEN_MAX)
111
+ })
112
+
113
+ test("returns modelLimit when modelLimit < 32k", () => {
114
+ const modelLimit = 16000
115
+ const result = ProviderTransform.maxOutputTokens("@ai-sdk/openai", {}, modelLimit, OUTPUT_TOKEN_MAX)
116
+ expect(result).toBe(16000)
117
+ })
118
+
119
+ describe("azure", () => {
120
+ test("returns 32k when modelLimit > 32k", () => {
121
+ const modelLimit = 100000
122
+ const result = ProviderTransform.maxOutputTokens("@ai-sdk/azure", {}, modelLimit, OUTPUT_TOKEN_MAX)
123
+ expect(result).toBe(OUTPUT_TOKEN_MAX)
124
+ })
125
+
126
+ test("returns modelLimit when modelLimit < 32k", () => {
127
+ const modelLimit = 16000
128
+ const result = ProviderTransform.maxOutputTokens("@ai-sdk/azure", {}, modelLimit, OUTPUT_TOKEN_MAX)
129
+ expect(result).toBe(16000)
130
+ })
131
+ })
132
+
133
+ describe("bedrock", () => {
134
+ test("returns 32k when modelLimit > 32k", () => {
135
+ const modelLimit = 100000
136
+ const result = ProviderTransform.maxOutputTokens("@ai-sdk/amazon-bedrock", {}, modelLimit, OUTPUT_TOKEN_MAX)
137
+ expect(result).toBe(OUTPUT_TOKEN_MAX)
138
+ })
139
+
140
+ test("returns modelLimit when modelLimit < 32k", () => {
141
+ const modelLimit = 16000
142
+ const result = ProviderTransform.maxOutputTokens("@ai-sdk/amazon-bedrock", {}, modelLimit, OUTPUT_TOKEN_MAX)
143
+ expect(result).toBe(16000)
144
+ })
145
+ })
146
+
147
+ describe("anthropic without thinking options", () => {
148
+ test("returns 32k when modelLimit > 32k", () => {
149
+ const modelLimit = 100000
150
+ const result = ProviderTransform.maxOutputTokens("@ai-sdk/anthropic", {}, modelLimit, OUTPUT_TOKEN_MAX)
151
+ expect(result).toBe(OUTPUT_TOKEN_MAX)
152
+ })
153
+
154
+ test("returns modelLimit when modelLimit < 32k", () => {
155
+ const modelLimit = 16000
156
+ const result = ProviderTransform.maxOutputTokens("@ai-sdk/anthropic", {}, modelLimit, OUTPUT_TOKEN_MAX)
157
+ expect(result).toBe(16000)
158
+ })
159
+ })
160
+
161
+ describe("anthropic with thinking options", () => {
162
+ test("returns 32k when budgetTokens + 32k <= modelLimit", () => {
163
+ const modelLimit = 100000
164
+ const options = {
165
+ thinking: {
166
+ type: "enabled",
167
+ budgetTokens: 10000,
168
+ },
169
+ }
170
+ const result = ProviderTransform.maxOutputTokens("@ai-sdk/anthropic", options, modelLimit, OUTPUT_TOKEN_MAX)
171
+ expect(result).toBe(OUTPUT_TOKEN_MAX)
172
+ })
173
+
174
+ test("returns modelLimit - budgetTokens when budgetTokens + 32k > modelLimit", () => {
175
+ const modelLimit = 50000
176
+ const options = {
177
+ thinking: {
178
+ type: "enabled",
179
+ budgetTokens: 30000,
180
+ },
181
+ }
182
+ const result = ProviderTransform.maxOutputTokens("@ai-sdk/anthropic", options, modelLimit, OUTPUT_TOKEN_MAX)
183
+ expect(result).toBe(20000)
184
+ })
185
+
186
+ test("returns 32k when thinking type is not enabled", () => {
187
+ const modelLimit = 100000
188
+ const options = {
189
+ thinking: {
190
+ type: "disabled",
191
+ budgetTokens: 10000,
192
+ },
193
+ }
194
+ const result = ProviderTransform.maxOutputTokens("@ai-sdk/anthropic", options, modelLimit, OUTPUT_TOKEN_MAX)
195
+ expect(result).toBe(OUTPUT_TOKEN_MAX)
196
+ })
197
+ })
198
+ })
199
+
200
+ describe("ProviderTransform.schema - gemini array items", () => {
201
+ test("adds missing items for array properties", () => {
202
+ const geminiModel = {
203
+ providerID: "google",
204
+ api: {
205
+ id: "gemini-3-pro",
206
+ },
207
+ } as any
208
+
209
+ const schema = {
210
+ type: "object",
211
+ properties: {
212
+ nodes: { type: "array" },
213
+ edges: { type: "array", items: { type: "string" } },
214
+ },
215
+ } as any
216
+
217
+ const result = ProviderTransform.schema(geminiModel, schema) as any
218
+
219
+ expect(result.properties.nodes.items).toBeDefined()
220
+ expect(result.properties.edges.items.type).toBe("string")
221
+ })
222
+ })
223
+
224
+ describe("ProviderTransform.message - DeepSeek reasoning content", () => {
225
+ test("DeepSeek with tool calls includes reasoning_content in providerOptions", () => {
226
+ const msgs = [
227
+ {
228
+ role: "assistant",
229
+ content: [
230
+ { type: "reasoning", text: "Let me think about this..." },
231
+ {
232
+ type: "tool-call",
233
+ toolCallId: "test",
234
+ toolName: "bash",
235
+ input: { command: "echo hello" },
236
+ },
237
+ ],
238
+ },
239
+ ] as any[]
240
+
241
+ const result = ProviderTransform.message(
242
+ msgs,
243
+ {
244
+ id: "deepseek/deepseek-chat",
245
+ providerID: "deepseek",
246
+ api: {
247
+ id: "deepseek-chat",
248
+ url: "https://api.deepseek.com",
249
+ npm: "@ai-sdk/openai-compatible",
250
+ },
251
+ name: "DeepSeek Chat",
252
+ capabilities: {
253
+ temperature: true,
254
+ reasoning: true,
255
+ attachment: false,
256
+ toolcall: true,
257
+ input: { text: true, audio: false, image: false, video: false, pdf: false },
258
+ output: { text: true, audio: false, image: false, video: false, pdf: false },
259
+ interleaved: {
260
+ field: "reasoning_content",
261
+ },
262
+ },
263
+ cost: {
264
+ input: 0.001,
265
+ output: 0.002,
266
+ cache: { read: 0.0001, write: 0.0002 },
267
+ },
268
+ limit: {
269
+ context: 128000,
270
+ output: 8192,
271
+ },
272
+ status: "active",
273
+ options: {},
274
+ headers: {},
275
+ release_date: "2023-04-01",
276
+ },
277
+ {},
278
+ )
279
+
280
+ expect(result).toHaveLength(1)
281
+ expect(result[0].content).toEqual([
282
+ {
283
+ type: "tool-call",
284
+ toolCallId: "test",
285
+ toolName: "bash",
286
+ input: { command: "echo hello" },
287
+ },
288
+ ])
289
+ expect(result[0].providerOptions?.openaiCompatible?.reasoning_content).toBe("Let me think about this...")
290
+ })
291
+
292
+ test("Non-DeepSeek providers leave reasoning content unchanged", () => {
293
+ const msgs = [
294
+ {
295
+ role: "assistant",
296
+ content: [
297
+ { type: "reasoning", text: "Should not be processed" },
298
+ { type: "text", text: "Answer" },
299
+ ],
300
+ },
301
+ ] as any[]
302
+
303
+ const result = ProviderTransform.message(
304
+ msgs,
305
+ {
306
+ id: "openai/gpt-4",
307
+ providerID: "openai",
308
+ api: {
309
+ id: "gpt-4",
310
+ url: "https://api.openai.com",
311
+ npm: "@ai-sdk/openai",
312
+ },
313
+ name: "GPT-4",
314
+ capabilities: {
315
+ temperature: true,
316
+ reasoning: false,
317
+ attachment: true,
318
+ toolcall: true,
319
+ input: { text: true, audio: false, image: true, video: false, pdf: false },
320
+ output: { text: true, audio: false, image: false, video: false, pdf: false },
321
+ interleaved: false,
322
+ },
323
+ cost: {
324
+ input: 0.03,
325
+ output: 0.06,
326
+ cache: { read: 0.001, write: 0.002 },
327
+ },
328
+ limit: {
329
+ context: 128000,
330
+ output: 4096,
331
+ },
332
+ status: "active",
333
+ options: {},
334
+ headers: {},
335
+ release_date: "2023-04-01",
336
+ },
337
+ {},
338
+ )
339
+
340
+ expect(result[0].content).toEqual([
341
+ { type: "reasoning", text: "Should not be processed" },
342
+ { type: "text", text: "Answer" },
343
+ ])
344
+ expect(result[0].providerOptions?.openaiCompatible?.reasoning_content).toBeUndefined()
345
+ })
346
+ })
347
+
348
+ describe("ProviderTransform.message - empty image handling", () => {
349
+ const mockModel = {
350
+ id: "anthropic/claude-3-5-sonnet",
351
+ providerID: "anthropic",
352
+ api: {
353
+ id: "claude-3-5-sonnet-20241022",
354
+ url: "https://api.anthropic.com",
355
+ npm: "@ai-sdk/anthropic",
356
+ },
357
+ name: "Claude 3.5 Sonnet",
358
+ capabilities: {
359
+ temperature: true,
360
+ reasoning: false,
361
+ attachment: true,
362
+ toolcall: true,
363
+ input: { text: true, audio: false, image: true, video: false, pdf: true },
364
+ output: { text: true, audio: false, image: false, video: false, pdf: false },
365
+ interleaved: false,
366
+ },
367
+ cost: {
368
+ input: 0.003,
369
+ output: 0.015,
370
+ cache: { read: 0.0003, write: 0.00375 },
371
+ },
372
+ limit: {
373
+ context: 200000,
374
+ output: 8192,
375
+ },
376
+ status: "active",
377
+ options: {},
378
+ headers: {},
379
+ } as any
380
+
381
+ test("should replace empty base64 image with error text", () => {
382
+ const msgs = [
383
+ {
384
+ role: "user",
385
+ content: [
386
+ { type: "text", text: "What is in this image?" },
387
+ { type: "image", image: "data:image/png;base64," },
388
+ ],
389
+ },
390
+ ] as any[]
391
+
392
+ const result = ProviderTransform.message(msgs, mockModel, {})
393
+
394
+ expect(result).toHaveLength(1)
395
+ expect(result[0].content).toHaveLength(2)
396
+ expect(result[0].content[0]).toEqual({ type: "text", text: "What is in this image?" })
397
+ expect(result[0].content[1]).toEqual({
398
+ type: "text",
399
+ text: "ERROR: Image file is empty or corrupted. Please provide a valid image.",
400
+ })
401
+ })
402
+
403
+ test("should keep valid base64 images unchanged", () => {
404
+ const validBase64 =
405
+ "iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mNk+M9QDwADhgGAWjR9awAAAABJRU5ErkJggg=="
406
+ const msgs = [
407
+ {
408
+ role: "user",
409
+ content: [
410
+ { type: "text", text: "What is in this image?" },
411
+ { type: "image", image: `data:image/png;base64,${validBase64}` },
412
+ ],
413
+ },
414
+ ] as any[]
415
+
416
+ const result = ProviderTransform.message(msgs, mockModel, {})
417
+
418
+ expect(result).toHaveLength(1)
419
+ expect(result[0].content).toHaveLength(2)
420
+ expect(result[0].content[0]).toEqual({ type: "text", text: "What is in this image?" })
421
+ expect(result[0].content[1]).toEqual({ type: "image", image: `data:image/png;base64,${validBase64}` })
422
+ })
423
+
424
+ test("should handle mixed valid and empty images", () => {
425
+ const validBase64 =
426
+ "iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mNk+M9QDwADhgGAWjR9awAAAABJRU5ErkJggg=="
427
+ const msgs = [
428
+ {
429
+ role: "user",
430
+ content: [
431
+ { type: "text", text: "Compare these images" },
432
+ { type: "image", image: `data:image/png;base64,${validBase64}` },
433
+ { type: "image", image: "data:image/jpeg;base64," },
434
+ ],
435
+ },
436
+ ] as any[]
437
+
438
+ const result = ProviderTransform.message(msgs, mockModel, {})
439
+
440
+ expect(result).toHaveLength(1)
441
+ expect(result[0].content).toHaveLength(3)
442
+ expect(result[0].content[0]).toEqual({ type: "text", text: "Compare these images" })
443
+ expect(result[0].content[1]).toEqual({ type: "image", image: `data:image/png;base64,${validBase64}` })
444
+ expect(result[0].content[2]).toEqual({
445
+ type: "text",
446
+ text: "ERROR: Image file is empty or corrupted. Please provide a valid image.",
447
+ })
448
+ })
449
+ })
450
+
451
+ describe("ProviderTransform.message - anthropic empty content filtering", () => {
452
+ const anthropicModel = {
453
+ id: "anthropic/claude-3-5-sonnet",
454
+ providerID: "anthropic",
455
+ api: {
456
+ id: "claude-3-5-sonnet-20241022",
457
+ url: "https://api.anthropic.com",
458
+ npm: "@ai-sdk/anthropic",
459
+ },
460
+ name: "Claude 3.5 Sonnet",
461
+ capabilities: {
462
+ temperature: true,
463
+ reasoning: false,
464
+ attachment: true,
465
+ toolcall: true,
466
+ input: { text: true, audio: false, image: true, video: false, pdf: true },
467
+ output: { text: true, audio: false, image: false, video: false, pdf: false },
468
+ interleaved: false,
469
+ },
470
+ cost: {
471
+ input: 0.003,
472
+ output: 0.015,
473
+ cache: { read: 0.0003, write: 0.00375 },
474
+ },
475
+ limit: {
476
+ context: 200000,
477
+ output: 8192,
478
+ },
479
+ status: "active",
480
+ options: {},
481
+ headers: {},
482
+ } as any
483
+
484
+ test("filters out messages with empty string content", () => {
485
+ const msgs = [
486
+ { role: "user", content: "Hello" },
487
+ { role: "assistant", content: "" },
488
+ { role: "user", content: "World" },
489
+ ] as any[]
490
+
491
+ const result = ProviderTransform.message(msgs, anthropicModel, {})
492
+
493
+ expect(result).toHaveLength(2)
494
+ expect(result[0].content).toBe("Hello")
495
+ expect(result[1].content).toBe("World")
496
+ })
497
+
498
+ test("filters out empty text parts from array content", () => {
499
+ const msgs = [
500
+ {
501
+ role: "assistant",
502
+ content: [
503
+ { type: "text", text: "" },
504
+ { type: "text", text: "Hello" },
505
+ { type: "text", text: "" },
506
+ ],
507
+ },
508
+ ] as any[]
509
+
510
+ const result = ProviderTransform.message(msgs, anthropicModel, {})
511
+
512
+ expect(result).toHaveLength(1)
513
+ expect(result[0].content).toHaveLength(1)
514
+ expect(result[0].content[0]).toEqual({ type: "text", text: "Hello" })
515
+ })
516
+
517
+ test("filters out empty reasoning parts from array content", () => {
518
+ const msgs = [
519
+ {
520
+ role: "assistant",
521
+ content: [
522
+ { type: "reasoning", text: "" },
523
+ { type: "text", text: "Answer" },
524
+ { type: "reasoning", text: "" },
525
+ ],
526
+ },
527
+ ] as any[]
528
+
529
+ const result = ProviderTransform.message(msgs, anthropicModel, {})
530
+
531
+ expect(result).toHaveLength(1)
532
+ expect(result[0].content).toHaveLength(1)
533
+ expect(result[0].content[0]).toEqual({ type: "text", text: "Answer" })
534
+ })
535
+
536
+ test("removes entire message when all parts are empty", () => {
537
+ const msgs = [
538
+ { role: "user", content: "Hello" },
539
+ {
540
+ role: "assistant",
541
+ content: [
542
+ { type: "text", text: "" },
543
+ { type: "reasoning", text: "" },
544
+ ],
545
+ },
546
+ { role: "user", content: "World" },
547
+ ] as any[]
548
+
549
+ const result = ProviderTransform.message(msgs, anthropicModel, {})
550
+
551
+ expect(result).toHaveLength(2)
552
+ expect(result[0].content).toBe("Hello")
553
+ expect(result[1].content).toBe("World")
554
+ })
555
+
556
+ test("keeps non-text/reasoning parts even if text parts are empty", () => {
557
+ const msgs = [
558
+ {
559
+ role: "assistant",
560
+ content: [
561
+ { type: "text", text: "" },
562
+ { type: "tool-call", toolCallId: "123", toolName: "bash", input: { command: "ls" } },
563
+ ],
564
+ },
565
+ ] as any[]
566
+
567
+ const result = ProviderTransform.message(msgs, anthropicModel, {})
568
+
569
+ expect(result).toHaveLength(1)
570
+ expect(result[0].content).toHaveLength(1)
571
+ expect(result[0].content[0]).toEqual({
572
+ type: "tool-call",
573
+ toolCallId: "123",
574
+ toolName: "bash",
575
+ input: { command: "ls" },
576
+ })
577
+ })
578
+
579
+ test("keeps messages with valid text alongside empty parts", () => {
580
+ const msgs = [
581
+ {
582
+ role: "assistant",
583
+ content: [
584
+ { type: "reasoning", text: "Thinking..." },
585
+ { type: "text", text: "" },
586
+ { type: "text", text: "Result" },
587
+ ],
588
+ },
589
+ ] as any[]
590
+
591
+ const result = ProviderTransform.message(msgs, anthropicModel, {})
592
+
593
+ expect(result).toHaveLength(1)
594
+ expect(result[0].content).toHaveLength(2)
595
+ expect(result[0].content[0]).toEqual({ type: "reasoning", text: "Thinking..." })
596
+ expect(result[0].content[1]).toEqual({ type: "text", text: "Result" })
597
+ })
598
+
599
+ test("does not filter for non-anthropic providers", () => {
600
+ const openaiModel = {
601
+ ...anthropicModel,
602
+ providerID: "openai",
603
+ api: {
604
+ id: "gpt-4",
605
+ url: "https://api.openai.com",
606
+ npm: "@ai-sdk/openai",
607
+ },
608
+ }
609
+
610
+ const msgs = [
611
+ { role: "assistant", content: "" },
612
+ {
613
+ role: "assistant",
614
+ content: [{ type: "text", text: "" }],
615
+ },
616
+ ] as any[]
617
+
618
+ const result = ProviderTransform.message(msgs, openaiModel, {})
619
+
620
+ expect(result).toHaveLength(2)
621
+ expect(result[0].content).toBe("")
622
+ expect(result[1].content).toHaveLength(1)
623
+ })
624
+ })
625
+
626
+ describe("ProviderTransform.message - strip openai metadata when store=false", () => {
627
+ const openaiModel = {
628
+ id: "openai/gpt-5",
629
+ providerID: "openai",
630
+ api: {
631
+ id: "gpt-5",
632
+ url: "https://api.openai.com",
633
+ npm: "@ai-sdk/openai",
634
+ },
635
+ name: "GPT-5",
636
+ capabilities: {
637
+ temperature: true,
638
+ reasoning: true,
639
+ attachment: true,
640
+ toolcall: true,
641
+ input: { text: true, audio: false, image: true, video: false, pdf: false },
642
+ output: { text: true, audio: false, image: false, video: false, pdf: false },
643
+ interleaved: false,
644
+ },
645
+ cost: { input: 0.03, output: 0.06, cache: { read: 0.001, write: 0.002 } },
646
+ limit: { context: 128000, output: 4096 },
647
+ status: "active",
648
+ options: {},
649
+ headers: {},
650
+ } as any
651
+
652
+ test("preserves itemId and reasoningEncryptedContent when store=false", () => {
653
+ const msgs = [
654
+ {
655
+ role: "assistant",
656
+ content: [
657
+ {
658
+ type: "reasoning",
659
+ text: "thinking...",
660
+ providerOptions: {
661
+ openai: {
662
+ itemId: "rs_123",
663
+ reasoningEncryptedContent: "encrypted",
664
+ },
665
+ },
666
+ },
667
+ {
668
+ type: "text",
669
+ text: "Hello",
670
+ providerOptions: {
671
+ openai: {
672
+ itemId: "msg_456",
673
+ },
674
+ },
675
+ },
676
+ ],
677
+ },
678
+ ] as any[]
679
+
680
+ const result = ProviderTransform.message(msgs, openaiModel, { store: false }) as any[]
681
+
682
+ expect(result).toHaveLength(1)
683
+ expect(result[0].content[0].providerOptions?.openai?.itemId).toBe("rs_123")
684
+ expect(result[0].content[1].providerOptions?.openai?.itemId).toBe("msg_456")
685
+ })
686
+
687
+ test("preserves itemId and reasoningEncryptedContent when store=false even when not openai", () => {
688
+ const zenModel = {
689
+ ...openaiModel,
690
+ providerID: "zen",
691
+ }
692
+ const msgs = [
693
+ {
694
+ role: "assistant",
695
+ content: [
696
+ {
697
+ type: "reasoning",
698
+ text: "thinking...",
699
+ providerOptions: {
700
+ openai: {
701
+ itemId: "rs_123",
702
+ reasoningEncryptedContent: "encrypted",
703
+ },
704
+ },
705
+ },
706
+ {
707
+ type: "text",
708
+ text: "Hello",
709
+ providerOptions: {
710
+ openai: {
711
+ itemId: "msg_456",
712
+ },
713
+ },
714
+ },
715
+ ],
716
+ },
717
+ ] as any[]
718
+
719
+ const result = ProviderTransform.message(msgs, zenModel, { store: false }) as any[]
720
+
721
+ expect(result).toHaveLength(1)
722
+ expect(result[0].content[0].providerOptions?.openai?.itemId).toBe("rs_123")
723
+ expect(result[0].content[1].providerOptions?.openai?.itemId).toBe("msg_456")
724
+ })
725
+
726
+ test("preserves other openai options including itemId", () => {
727
+ const msgs = [
728
+ {
729
+ role: "assistant",
730
+ content: [
731
+ {
732
+ type: "text",
733
+ text: "Hello",
734
+ providerOptions: {
735
+ openai: {
736
+ itemId: "msg_123",
737
+ otherOption: "value",
738
+ },
739
+ },
740
+ },
741
+ ],
742
+ },
743
+ ] as any[]
744
+
745
+ const result = ProviderTransform.message(msgs, openaiModel, { store: false }) as any[]
746
+
747
+ expect(result[0].content[0].providerOptions?.openai?.itemId).toBe("msg_123")
748
+ expect(result[0].content[0].providerOptions?.openai?.otherOption).toBe("value")
749
+ })
750
+
751
+ test("preserves metadata for openai package when store is true", () => {
752
+ const msgs = [
753
+ {
754
+ role: "assistant",
755
+ content: [
756
+ {
757
+ type: "text",
758
+ text: "Hello",
759
+ providerOptions: {
760
+ openai: {
761
+ itemId: "msg_123",
762
+ },
763
+ },
764
+ },
765
+ ],
766
+ },
767
+ ] as any[]
768
+
769
+ // openai package preserves itemId regardless of store value
770
+ const result = ProviderTransform.message(msgs, openaiModel, { store: true }) as any[]
771
+
772
+ expect(result[0].content[0].providerOptions?.openai?.itemId).toBe("msg_123")
773
+ })
774
+
775
+ test("preserves metadata for non-openai packages when store is false", () => {
776
+ const anthropicModel = {
777
+ ...openaiModel,
778
+ providerID: "anthropic",
779
+ api: {
780
+ id: "claude-3",
781
+ url: "https://api.anthropic.com",
782
+ npm: "@ai-sdk/anthropic",
783
+ },
784
+ }
785
+ const msgs = [
786
+ {
787
+ role: "assistant",
788
+ content: [
789
+ {
790
+ type: "text",
791
+ text: "Hello",
792
+ providerOptions: {
793
+ openai: {
794
+ itemId: "msg_123",
795
+ },
796
+ },
797
+ },
798
+ ],
799
+ },
800
+ ] as any[]
801
+
802
+ // store=false preserves metadata for non-openai packages
803
+ const result = ProviderTransform.message(msgs, anthropicModel, { store: false }) as any[]
804
+
805
+ expect(result[0].content[0].providerOptions?.openai?.itemId).toBe("msg_123")
806
+ })
807
+
808
+ test("preserves metadata using providerID key when store is false", () => {
809
+ const jonsocModel = {
810
+ ...openaiModel,
811
+ providerID: "jonsoc",
812
+ api: {
813
+ id: "jonsoc-test",
814
+ url: "https://api.jonsoc.com",
815
+ npm: "@ai-sdk/openai-compatible",
816
+ },
817
+ }
818
+ const msgs = [
819
+ {
820
+ role: "assistant",
821
+ content: [
822
+ {
823
+ type: "text",
824
+ text: "Hello",
825
+ providerOptions: {
826
+ jonsoc: {
827
+ itemId: "msg_123",
828
+ otherOption: "value",
829
+ },
830
+ },
831
+ },
832
+ ],
833
+ },
834
+ ] as any[]
835
+
836
+ const result = ProviderTransform.message(msgs, jonsocModel, { store: false }) as any[]
837
+
838
+ expect(result[0].content[0].providerOptions?.jonsoc?.itemId).toBe("msg_123")
839
+ expect(result[0].content[0].providerOptions?.jonsoc?.otherOption).toBe("value")
840
+ })
841
+
842
+ test("preserves itemId across all providerOptions keys", () => {
843
+ const jonsocModel = {
844
+ ...openaiModel,
845
+ providerID: "jonsoc",
846
+ api: {
847
+ id: "jonsoc-test",
848
+ url: "https://api.jonsoc.com",
849
+ npm: "@ai-sdk/openai-compatible",
850
+ },
851
+ }
852
+ const msgs = [
853
+ {
854
+ role: "assistant",
855
+ providerOptions: {
856
+ openai: { itemId: "msg_root" },
857
+ jonsoc: { itemId: "msg_jonsoc" },
858
+ extra: { itemId: "msg_extra" },
859
+ },
860
+ content: [
861
+ {
862
+ type: "text",
863
+ text: "Hello",
864
+ providerOptions: {
865
+ openai: { itemId: "msg_openai_part" },
866
+ jonsoc: { itemId: "msg_jonsoc_part" },
867
+ extra: { itemId: "msg_extra_part" },
868
+ },
869
+ },
870
+ ],
871
+ },
872
+ ] as any[]
873
+
874
+ const result = ProviderTransform.message(msgs, jonsocModel, { store: false }) as any[]
875
+
876
+ expect(result[0].providerOptions?.openai?.itemId).toBe("msg_root")
877
+ expect(result[0].providerOptions?.jonsoc?.itemId).toBe("msg_jonsoc")
878
+ expect(result[0].providerOptions?.extra?.itemId).toBe("msg_extra")
879
+ expect(result[0].content[0].providerOptions?.openai?.itemId).toBe("msg_openai_part")
880
+ expect(result[0].content[0].providerOptions?.jonsoc?.itemId).toBe("msg_jonsoc_part")
881
+ expect(result[0].content[0].providerOptions?.extra?.itemId).toBe("msg_extra_part")
882
+ })
883
+
884
+ test("does not strip metadata for non-openai packages when store is not false", () => {
885
+ const anthropicModel = {
886
+ ...openaiModel,
887
+ providerID: "anthropic",
888
+ api: {
889
+ id: "claude-3",
890
+ url: "https://api.anthropic.com",
891
+ npm: "@ai-sdk/anthropic",
892
+ },
893
+ }
894
+ const msgs = [
895
+ {
896
+ role: "assistant",
897
+ content: [
898
+ {
899
+ type: "text",
900
+ text: "Hello",
901
+ providerOptions: {
902
+ openai: {
903
+ itemId: "msg_123",
904
+ },
905
+ },
906
+ },
907
+ ],
908
+ },
909
+ ] as any[]
910
+
911
+ const result = ProviderTransform.message(msgs, anthropicModel, {}) as any[]
912
+
913
+ expect(result[0].content[0].providerOptions?.openai?.itemId).toBe("msg_123")
914
+ })
915
+ })
916
+
917
+ describe("ProviderTransform.message - providerOptions key remapping", () => {
918
+ const createModel = (providerID: string, npm: string) =>
919
+ ({
920
+ id: `${providerID}/test-model`,
921
+ providerID,
922
+ api: {
923
+ id: "test-model",
924
+ url: "https://api.test.com",
925
+ npm,
926
+ },
927
+ name: "Test Model",
928
+ capabilities: {
929
+ temperature: true,
930
+ reasoning: false,
931
+ attachment: true,
932
+ toolcall: true,
933
+ input: { text: true, audio: false, image: true, video: false, pdf: true },
934
+ output: { text: true, audio: false, image: false, video: false, pdf: false },
935
+ interleaved: false,
936
+ },
937
+ cost: { input: 0.001, output: 0.002, cache: { read: 0.0001, write: 0.0002 } },
938
+ limit: { context: 128000, output: 8192 },
939
+ status: "active",
940
+ options: {},
941
+ headers: {},
942
+ }) as any
943
+
944
+ test("azure keeps 'azure' key and does not remap to 'openai'", () => {
945
+ const model = createModel("azure", "@ai-sdk/azure")
946
+ const msgs = [
947
+ {
948
+ role: "user",
949
+ content: "Hello",
950
+ providerOptions: {
951
+ azure: { someOption: "value" },
952
+ },
953
+ },
954
+ ] as any[]
955
+
956
+ const result = ProviderTransform.message(msgs, model, {})
957
+
958
+ expect(result[0].providerOptions?.azure).toEqual({ someOption: "value" })
959
+ expect(result[0].providerOptions?.openai).toBeUndefined()
960
+ })
961
+
962
+ test("openai with github-copilot npm remaps providerID to 'openai'", () => {
963
+ const model = createModel("github-copilot", "@ai-sdk/github-copilot")
964
+ const msgs = [
965
+ {
966
+ role: "user",
967
+ content: "Hello",
968
+ providerOptions: {
969
+ "github-copilot": { someOption: "value" },
970
+ },
971
+ },
972
+ ] as any[]
973
+
974
+ const result = ProviderTransform.message(msgs, model, {})
975
+
976
+ expect(result[0].providerOptions?.openai).toEqual({ someOption: "value" })
977
+ expect(result[0].providerOptions?.["github-copilot"]).toBeUndefined()
978
+ })
979
+
980
+ test("bedrock remaps providerID to 'bedrock' key", () => {
981
+ const model = createModel("my-bedrock", "@ai-sdk/amazon-bedrock")
982
+ const msgs = [
983
+ {
984
+ role: "user",
985
+ content: "Hello",
986
+ providerOptions: {
987
+ "my-bedrock": { someOption: "value" },
988
+ },
989
+ },
990
+ ] as any[]
991
+
992
+ const result = ProviderTransform.message(msgs, model, {})
993
+
994
+ expect(result[0].providerOptions?.bedrock).toEqual({ someOption: "value" })
995
+ expect(result[0].providerOptions?.["my-bedrock"]).toBeUndefined()
996
+ })
997
+ })
998
+
999
+ describe("ProviderTransform.message - claude w/bedrock custom inference profile", () => {
1000
+ test("adds cachePoint", () => {
1001
+ const model = {
1002
+ id: "amazon-bedrock/custom-claude-sonnet-4.5",
1003
+ providerID: "amazon-bedrock",
1004
+ api: {
1005
+ id: "arn:aws:bedrock:xxx:yyy:application-inference-profile/zzz",
1006
+ url: "https://api.test.com",
1007
+ npm: "@ai-sdk/amazon-bedrock",
1008
+ },
1009
+ name: "Custom inference profile",
1010
+ capabilities: {},
1011
+ options: {},
1012
+ headers: {},
1013
+ } as any
1014
+
1015
+ const msgs = [
1016
+ {
1017
+ role: "user",
1018
+ content: "Hello",
1019
+ },
1020
+ ] as any[]
1021
+
1022
+ const result = ProviderTransform.message(msgs, model, {})
1023
+
1024
+ expect(result[0].providerOptions?.bedrock).toEqual(
1025
+ expect.objectContaining({
1026
+ cachePoint: {
1027
+ type: "ephemeral",
1028
+ },
1029
+ }),
1030
+ )
1031
+ })
1032
+ })
1033
+
1034
+ describe("ProviderTransform.variants", () => {
1035
+ const createMockModel = (overrides: Partial<any> = {}): any => ({
1036
+ id: "test/test-model",
1037
+ providerID: "test",
1038
+ api: {
1039
+ id: "test-model",
1040
+ url: "https://api.test.com",
1041
+ npm: "@ai-sdk/openai",
1042
+ },
1043
+ name: "Test Model",
1044
+ capabilities: {
1045
+ temperature: true,
1046
+ reasoning: true,
1047
+ attachment: true,
1048
+ toolcall: true,
1049
+ input: { text: true, audio: false, image: true, video: false, pdf: false },
1050
+ output: { text: true, audio: false, image: false, video: false, pdf: false },
1051
+ interleaved: false,
1052
+ },
1053
+ cost: {
1054
+ input: 0.001,
1055
+ output: 0.002,
1056
+ cache: { read: 0.0001, write: 0.0002 },
1057
+ },
1058
+ limit: {
1059
+ context: 128000,
1060
+ output: 8192,
1061
+ },
1062
+ status: "active",
1063
+ options: {},
1064
+ headers: {},
1065
+ release_date: "2024-01-01",
1066
+ ...overrides,
1067
+ })
1068
+
1069
+ test("returns empty object when model has no reasoning capabilities", () => {
1070
+ const model = createMockModel({
1071
+ capabilities: { reasoning: false },
1072
+ })
1073
+ const result = ProviderTransform.variants(model)
1074
+ expect(result).toEqual({})
1075
+ })
1076
+
1077
+ test("deepseek returns empty object", () => {
1078
+ const model = createMockModel({
1079
+ id: "deepseek/deepseek-chat",
1080
+ providerID: "deepseek",
1081
+ api: {
1082
+ id: "deepseek-chat",
1083
+ url: "https://api.deepseek.com",
1084
+ npm: "@ai-sdk/openai-compatible",
1085
+ },
1086
+ })
1087
+ const result = ProviderTransform.variants(model)
1088
+ expect(result).toEqual({})
1089
+ })
1090
+
1091
+ test("minimax returns empty object", () => {
1092
+ const model = createMockModel({
1093
+ id: "minimax/minimax-model",
1094
+ providerID: "minimax",
1095
+ api: {
1096
+ id: "minimax-model",
1097
+ url: "https://api.minimax.com",
1098
+ npm: "@ai-sdk/openai-compatible",
1099
+ },
1100
+ })
1101
+ const result = ProviderTransform.variants(model)
1102
+ expect(result).toEqual({})
1103
+ })
1104
+
1105
+ test("glm returns empty object", () => {
1106
+ const model = createMockModel({
1107
+ id: "glm/glm-4",
1108
+ providerID: "glm",
1109
+ api: {
1110
+ id: "glm-4",
1111
+ url: "https://api.glm.com",
1112
+ npm: "@ai-sdk/openai-compatible",
1113
+ },
1114
+ })
1115
+ const result = ProviderTransform.variants(model)
1116
+ expect(result).toEqual({})
1117
+ })
1118
+
1119
+ test("mistral returns empty object", () => {
1120
+ const model = createMockModel({
1121
+ id: "mistral/mistral-large",
1122
+ providerID: "mistral",
1123
+ api: {
1124
+ id: "mistral-large-latest",
1125
+ url: "https://api.mistral.com",
1126
+ npm: "@ai-sdk/mistral",
1127
+ },
1128
+ })
1129
+ const result = ProviderTransform.variants(model)
1130
+ expect(result).toEqual({})
1131
+ })
1132
+
1133
+ describe("@openrouter/ai-sdk-provider", () => {
1134
+ test("returns empty object for non-qualifying models", () => {
1135
+ const model = createMockModel({
1136
+ id: "openrouter/test-model",
1137
+ providerID: "openrouter",
1138
+ api: {
1139
+ id: "test-model",
1140
+ url: "https://openrouter.ai",
1141
+ npm: "@openrouter/ai-sdk-provider",
1142
+ },
1143
+ })
1144
+ const result = ProviderTransform.variants(model)
1145
+ expect(result).toEqual({})
1146
+ })
1147
+
1148
+ test("gpt models return OPENAI_EFFORTS with reasoning", () => {
1149
+ const model = createMockModel({
1150
+ id: "openrouter/gpt-4",
1151
+ providerID: "openrouter",
1152
+ api: {
1153
+ id: "gpt-4",
1154
+ url: "https://openrouter.ai",
1155
+ npm: "@openrouter/ai-sdk-provider",
1156
+ },
1157
+ })
1158
+ const result = ProviderTransform.variants(model)
1159
+ expect(Object.keys(result)).toEqual(["none", "minimal", "low", "medium", "high", "xhigh"])
1160
+ expect(result.low).toEqual({ reasoning: { effort: "low" } })
1161
+ expect(result.high).toEqual({ reasoning: { effort: "high" } })
1162
+ })
1163
+
1164
+ test("gemini-3 returns OPENAI_EFFORTS with reasoning", () => {
1165
+ const model = createMockModel({
1166
+ id: "openrouter/gemini-3-5-pro",
1167
+ providerID: "openrouter",
1168
+ api: {
1169
+ id: "gemini-3-5-pro",
1170
+ url: "https://openrouter.ai",
1171
+ npm: "@openrouter/ai-sdk-provider",
1172
+ },
1173
+ })
1174
+ const result = ProviderTransform.variants(model)
1175
+ expect(Object.keys(result)).toEqual(["none", "minimal", "low", "medium", "high", "xhigh"])
1176
+ })
1177
+
1178
+ test("grok-4 returns empty object", () => {
1179
+ const model = createMockModel({
1180
+ id: "openrouter/grok-4",
1181
+ providerID: "openrouter",
1182
+ api: {
1183
+ id: "grok-4",
1184
+ url: "https://openrouter.ai",
1185
+ npm: "@openrouter/ai-sdk-provider",
1186
+ },
1187
+ })
1188
+ const result = ProviderTransform.variants(model)
1189
+ expect(result).toEqual({})
1190
+ })
1191
+
1192
+ test("grok-3-mini returns low and high with reasoning", () => {
1193
+ const model = createMockModel({
1194
+ id: "openrouter/grok-3-mini",
1195
+ providerID: "openrouter",
1196
+ api: {
1197
+ id: "grok-3-mini",
1198
+ url: "https://openrouter.ai",
1199
+ npm: "@openrouter/ai-sdk-provider",
1200
+ },
1201
+ })
1202
+ const result = ProviderTransform.variants(model)
1203
+ expect(Object.keys(result)).toEqual(["low", "high"])
1204
+ expect(result.low).toEqual({ reasoning: { effort: "low" } })
1205
+ expect(result.high).toEqual({ reasoning: { effort: "high" } })
1206
+ })
1207
+ })
1208
+
1209
+ describe("@ai-sdk/gateway", () => {
1210
+ test("returns OPENAI_EFFORTS with reasoningEffort", () => {
1211
+ const model = createMockModel({
1212
+ id: "gateway/gateway-model",
1213
+ providerID: "gateway",
1214
+ api: {
1215
+ id: "gateway-model",
1216
+ url: "https://gateway.ai",
1217
+ npm: "@ai-sdk/gateway",
1218
+ },
1219
+ })
1220
+ const result = ProviderTransform.variants(model)
1221
+ expect(Object.keys(result)).toEqual(["none", "minimal", "low", "medium", "high", "xhigh"])
1222
+ expect(result.low).toEqual({ reasoningEffort: "low" })
1223
+ expect(result.high).toEqual({ reasoningEffort: "high" })
1224
+ })
1225
+ })
1226
+
1227
+ describe("@ai-sdk/cerebras", () => {
1228
+ test("returns WIDELY_SUPPORTED_EFFORTS with reasoningEffort", () => {
1229
+ const model = createMockModel({
1230
+ id: "cerebras/llama-4",
1231
+ providerID: "cerebras",
1232
+ api: {
1233
+ id: "llama-4-sc",
1234
+ url: "https://api.cerebras.ai",
1235
+ npm: "@ai-sdk/cerebras",
1236
+ },
1237
+ })
1238
+ const result = ProviderTransform.variants(model)
1239
+ expect(Object.keys(result)).toEqual(["low", "medium", "high"])
1240
+ expect(result.low).toEqual({ reasoningEffort: "low" })
1241
+ expect(result.high).toEqual({ reasoningEffort: "high" })
1242
+ })
1243
+ })
1244
+
1245
+ describe("@ai-sdk/togetherai", () => {
1246
+ test("returns WIDELY_SUPPORTED_EFFORTS with reasoningEffort", () => {
1247
+ const model = createMockModel({
1248
+ id: "togetherai/llama-4",
1249
+ providerID: "togetherai",
1250
+ api: {
1251
+ id: "llama-4-sc",
1252
+ url: "https://api.togetherai.com",
1253
+ npm: "@ai-sdk/togetherai",
1254
+ },
1255
+ })
1256
+ const result = ProviderTransform.variants(model)
1257
+ expect(Object.keys(result)).toEqual(["low", "medium", "high"])
1258
+ expect(result.low).toEqual({ reasoningEffort: "low" })
1259
+ expect(result.high).toEqual({ reasoningEffort: "high" })
1260
+ })
1261
+ })
1262
+
1263
+ describe("@ai-sdk/xai", () => {
1264
+ test("grok-3 returns empty object", () => {
1265
+ const model = createMockModel({
1266
+ id: "xai/grok-3",
1267
+ providerID: "xai",
1268
+ api: {
1269
+ id: "grok-3",
1270
+ url: "https://api.x.ai",
1271
+ npm: "@ai-sdk/xai",
1272
+ },
1273
+ })
1274
+ const result = ProviderTransform.variants(model)
1275
+ expect(result).toEqual({})
1276
+ })
1277
+
1278
+ test("grok-3-mini returns low and high with reasoningEffort", () => {
1279
+ const model = createMockModel({
1280
+ id: "xai/grok-3-mini",
1281
+ providerID: "xai",
1282
+ api: {
1283
+ id: "grok-3-mini",
1284
+ url: "https://api.x.ai",
1285
+ npm: "@ai-sdk/xai",
1286
+ },
1287
+ })
1288
+ const result = ProviderTransform.variants(model)
1289
+ expect(Object.keys(result)).toEqual(["low", "high"])
1290
+ expect(result.low).toEqual({ reasoningEffort: "low" })
1291
+ expect(result.high).toEqual({ reasoningEffort: "high" })
1292
+ })
1293
+ })
1294
+
1295
+ describe("@ai-sdk/deepinfra", () => {
1296
+ test("returns WIDELY_SUPPORTED_EFFORTS with reasoningEffort", () => {
1297
+ const model = createMockModel({
1298
+ id: "deepinfra/llama-4",
1299
+ providerID: "deepinfra",
1300
+ api: {
1301
+ id: "llama-4-sc",
1302
+ url: "https://api.deepinfra.com",
1303
+ npm: "@ai-sdk/deepinfra",
1304
+ },
1305
+ })
1306
+ const result = ProviderTransform.variants(model)
1307
+ expect(Object.keys(result)).toEqual(["low", "medium", "high"])
1308
+ expect(result.low).toEqual({ reasoningEffort: "low" })
1309
+ expect(result.high).toEqual({ reasoningEffort: "high" })
1310
+ })
1311
+ })
1312
+
1313
+ describe("@ai-sdk/openai-compatible", () => {
1314
+ test("returns WIDELY_SUPPORTED_EFFORTS with reasoningEffort", () => {
1315
+ const model = createMockModel({
1316
+ id: "custom-provider/custom-model",
1317
+ providerID: "custom-provider",
1318
+ api: {
1319
+ id: "custom-model",
1320
+ url: "https://api.custom.com",
1321
+ npm: "@ai-sdk/openai-compatible",
1322
+ },
1323
+ })
1324
+ const result = ProviderTransform.variants(model)
1325
+ expect(Object.keys(result)).toEqual(["low", "medium", "high"])
1326
+ expect(result.low).toEqual({ reasoningEffort: "low" })
1327
+ expect(result.high).toEqual({ reasoningEffort: "high" })
1328
+ })
1329
+ })
1330
+
1331
+ describe("@ai-sdk/azure", () => {
1332
+ test("o1-mini returns empty object", () => {
1333
+ const model = createMockModel({
1334
+ id: "o1-mini",
1335
+ providerID: "azure",
1336
+ api: {
1337
+ id: "o1-mini",
1338
+ url: "https://azure.com",
1339
+ npm: "@ai-sdk/azure",
1340
+ },
1341
+ })
1342
+ const result = ProviderTransform.variants(model)
1343
+ expect(result).toEqual({})
1344
+ })
1345
+
1346
+ test("standard azure models return custom efforts with reasoningSummary", () => {
1347
+ const model = createMockModel({
1348
+ id: "o1",
1349
+ providerID: "azure",
1350
+ api: {
1351
+ id: "o1",
1352
+ url: "https://azure.com",
1353
+ npm: "@ai-sdk/azure",
1354
+ },
1355
+ })
1356
+ const result = ProviderTransform.variants(model)
1357
+ expect(Object.keys(result)).toEqual(["low", "medium", "high"])
1358
+ expect(result.low).toEqual({
1359
+ reasoningEffort: "low",
1360
+ reasoningSummary: "auto",
1361
+ include: ["reasoning.encrypted_content"],
1362
+ })
1363
+ })
1364
+
1365
+ test("gpt-5 adds minimal effort", () => {
1366
+ const model = createMockModel({
1367
+ id: "gpt-5",
1368
+ providerID: "azure",
1369
+ api: {
1370
+ id: "gpt-5",
1371
+ url: "https://azure.com",
1372
+ npm: "@ai-sdk/azure",
1373
+ },
1374
+ })
1375
+ const result = ProviderTransform.variants(model)
1376
+ expect(Object.keys(result)).toEqual(["minimal", "low", "medium", "high"])
1377
+ })
1378
+ })
1379
+
1380
+ describe("@ai-sdk/openai", () => {
1381
+ test("gpt-5-pro returns empty object", () => {
1382
+ const model = createMockModel({
1383
+ id: "gpt-5-pro",
1384
+ providerID: "openai",
1385
+ api: {
1386
+ id: "gpt-5-pro",
1387
+ url: "https://api.openai.com",
1388
+ npm: "@ai-sdk/openai",
1389
+ },
1390
+ })
1391
+ const result = ProviderTransform.variants(model)
1392
+ expect(result).toEqual({})
1393
+ })
1394
+
1395
+ test("standard openai models return custom efforts with reasoningSummary", () => {
1396
+ const model = createMockModel({
1397
+ id: "gpt-5",
1398
+ providerID: "openai",
1399
+ api: {
1400
+ id: "gpt-5",
1401
+ url: "https://api.openai.com",
1402
+ npm: "@ai-sdk/openai",
1403
+ },
1404
+ release_date: "2024-06-01",
1405
+ })
1406
+ const result = ProviderTransform.variants(model)
1407
+ expect(Object.keys(result)).toEqual(["minimal", "low", "medium", "high"])
1408
+ expect(result.low).toEqual({
1409
+ reasoningEffort: "low",
1410
+ reasoningSummary: "auto",
1411
+ include: ["reasoning.encrypted_content"],
1412
+ })
1413
+ })
1414
+
1415
+ test("models after 2025-11-13 include 'none' effort", () => {
1416
+ const model = createMockModel({
1417
+ id: "gpt-5-nano",
1418
+ providerID: "openai",
1419
+ api: {
1420
+ id: "gpt-5-nano",
1421
+ url: "https://api.openai.com",
1422
+ npm: "@ai-sdk/openai",
1423
+ },
1424
+ release_date: "2025-11-14",
1425
+ })
1426
+ const result = ProviderTransform.variants(model)
1427
+ expect(Object.keys(result)).toEqual(["none", "minimal", "low", "medium", "high"])
1428
+ })
1429
+
1430
+ test("models after 2025-12-04 include 'xhigh' effort", () => {
1431
+ const model = createMockModel({
1432
+ id: "openai/gpt-5-chat",
1433
+ providerID: "openai",
1434
+ api: {
1435
+ id: "gpt-5-chat",
1436
+ url: "https://api.openai.com",
1437
+ npm: "@ai-sdk/openai",
1438
+ },
1439
+ release_date: "2025-12-05",
1440
+ })
1441
+ const result = ProviderTransform.variants(model)
1442
+ expect(Object.keys(result)).toEqual(["none", "minimal", "low", "medium", "high", "xhigh"])
1443
+ })
1444
+ })
1445
+
1446
+ describe("@ai-sdk/anthropic", () => {
1447
+ test("returns high and max with thinking config", () => {
1448
+ const model = createMockModel({
1449
+ id: "anthropic/claude-4",
1450
+ providerID: "anthropic",
1451
+ api: {
1452
+ id: "claude-4",
1453
+ url: "https://api.anthropic.com",
1454
+ npm: "@ai-sdk/anthropic",
1455
+ },
1456
+ })
1457
+ const result = ProviderTransform.variants(model)
1458
+ expect(Object.keys(result)).toEqual(["high", "max"])
1459
+ expect(result.high).toEqual({
1460
+ thinking: {
1461
+ type: "enabled",
1462
+ budgetTokens: 16000,
1463
+ },
1464
+ })
1465
+ expect(result.max).toEqual({
1466
+ thinking: {
1467
+ type: "enabled",
1468
+ budgetTokens: 31999,
1469
+ },
1470
+ })
1471
+ })
1472
+ })
1473
+
1474
+ describe("@ai-sdk/amazon-bedrock", () => {
1475
+ test("returns WIDELY_SUPPORTED_EFFORTS with reasoningConfig", () => {
1476
+ const model = createMockModel({
1477
+ id: "bedrock/llama-4",
1478
+ providerID: "bedrock",
1479
+ api: {
1480
+ id: "llama-4-sc",
1481
+ url: "https://bedrock.amazonaws.com",
1482
+ npm: "@ai-sdk/amazon-bedrock",
1483
+ },
1484
+ })
1485
+ const result = ProviderTransform.variants(model)
1486
+ expect(Object.keys(result)).toEqual(["low", "medium", "high"])
1487
+ expect(result.low).toEqual({
1488
+ reasoningConfig: {
1489
+ type: "enabled",
1490
+ maxReasoningEffort: "low",
1491
+ },
1492
+ })
1493
+ })
1494
+ })
1495
+
1496
+ describe("@ai-sdk/google", () => {
1497
+ test("gemini-2.5 returns high and max with thinkingConfig and thinkingBudget", () => {
1498
+ const model = createMockModel({
1499
+ id: "google/gemini-2.5-pro",
1500
+ providerID: "google",
1501
+ api: {
1502
+ id: "gemini-2.5-pro",
1503
+ url: "https://generativelanguage.googleapis.com",
1504
+ npm: "@ai-sdk/google",
1505
+ },
1506
+ })
1507
+ const result = ProviderTransform.variants(model)
1508
+ expect(Object.keys(result)).toEqual(["high", "max"])
1509
+ expect(result.high).toEqual({
1510
+ thinkingConfig: {
1511
+ includeThoughts: true,
1512
+ thinkingBudget: 16000,
1513
+ },
1514
+ })
1515
+ expect(result.max).toEqual({
1516
+ thinkingConfig: {
1517
+ includeThoughts: true,
1518
+ thinkingBudget: 24576,
1519
+ },
1520
+ })
1521
+ })
1522
+
1523
+ test("other gemini models return low and high with thinkingLevel", () => {
1524
+ const model = createMockModel({
1525
+ id: "google/gemini-2.0-pro",
1526
+ providerID: "google",
1527
+ api: {
1528
+ id: "gemini-2.0-pro",
1529
+ url: "https://generativelanguage.googleapis.com",
1530
+ npm: "@ai-sdk/google",
1531
+ },
1532
+ })
1533
+ const result = ProviderTransform.variants(model)
1534
+ expect(Object.keys(result)).toEqual(["low", "high"])
1535
+ expect(result.low).toEqual({
1536
+ includeThoughts: true,
1537
+ thinkingLevel: "low",
1538
+ })
1539
+ expect(result.high).toEqual({
1540
+ includeThoughts: true,
1541
+ thinkingLevel: "high",
1542
+ })
1543
+ })
1544
+ })
1545
+
1546
+ describe("@ai-sdk/google-vertex", () => {
1547
+ test("gemini-2.5 returns high and max with thinkingConfig and thinkingBudget", () => {
1548
+ const model = createMockModel({
1549
+ id: "google-vertex/gemini-2.5-pro",
1550
+ providerID: "google-vertex",
1551
+ api: {
1552
+ id: "gemini-2.5-pro",
1553
+ url: "https://vertexai.googleapis.com",
1554
+ npm: "@ai-sdk/google-vertex",
1555
+ },
1556
+ })
1557
+ const result = ProviderTransform.variants(model)
1558
+ expect(Object.keys(result)).toEqual(["high", "max"])
1559
+ })
1560
+
1561
+ test("other vertex models return low and high with thinkingLevel", () => {
1562
+ const model = createMockModel({
1563
+ id: "google-vertex/gemini-2.0-pro",
1564
+ providerID: "google-vertex",
1565
+ api: {
1566
+ id: "gemini-2.0-pro",
1567
+ url: "https://vertexai.googleapis.com",
1568
+ npm: "@ai-sdk/google-vertex",
1569
+ },
1570
+ })
1571
+ const result = ProviderTransform.variants(model)
1572
+ expect(Object.keys(result)).toEqual(["low", "high"])
1573
+ })
1574
+ })
1575
+
1576
+ describe("@ai-sdk/cohere", () => {
1577
+ test("returns empty object", () => {
1578
+ const model = createMockModel({
1579
+ id: "cohere/command-r",
1580
+ providerID: "cohere",
1581
+ api: {
1582
+ id: "command-r",
1583
+ url: "https://api.cohere.com",
1584
+ npm: "@ai-sdk/cohere",
1585
+ },
1586
+ })
1587
+ const result = ProviderTransform.variants(model)
1588
+ expect(result).toEqual({})
1589
+ })
1590
+ })
1591
+
1592
+ describe("@ai-sdk/groq", () => {
1593
+ test("returns none and WIDELY_SUPPORTED_EFFORTS with thinkingLevel", () => {
1594
+ const model = createMockModel({
1595
+ id: "groq/llama-4",
1596
+ providerID: "groq",
1597
+ api: {
1598
+ id: "llama-4-sc",
1599
+ url: "https://api.groq.com",
1600
+ npm: "@ai-sdk/groq",
1601
+ },
1602
+ })
1603
+ const result = ProviderTransform.variants(model)
1604
+ expect(Object.keys(result)).toEqual(["none", "low", "medium", "high"])
1605
+ expect(result.none).toEqual({
1606
+ includeThoughts: true,
1607
+ thinkingLevel: "none",
1608
+ })
1609
+ expect(result.low).toEqual({
1610
+ includeThoughts: true,
1611
+ thinkingLevel: "low",
1612
+ })
1613
+ })
1614
+ })
1615
+
1616
+ describe("@ai-sdk/perplexity", () => {
1617
+ test("returns empty object", () => {
1618
+ const model = createMockModel({
1619
+ id: "perplexity/sonar-plus",
1620
+ providerID: "perplexity",
1621
+ api: {
1622
+ id: "sonar-plus",
1623
+ url: "https://api.perplexity.ai",
1624
+ npm: "@ai-sdk/perplexity",
1625
+ },
1626
+ })
1627
+ const result = ProviderTransform.variants(model)
1628
+ expect(result).toEqual({})
1629
+ })
1630
+ })
1631
+ })