chad-code 1.3.1 → 1.3.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (337) hide show
  1. package/README.md +55 -6
  2. package/package.json +36 -98
  3. package/AGENTS.md +0 -27
  4. package/Dockerfile +0 -18
  5. package/README.npm.md +0 -64
  6. package/bunfig.toml +0 -7
  7. package/eslint.config.js +0 -29
  8. package/parsers-config.ts +0 -253
  9. package/script/build.ts +0 -167
  10. package/script/publish-registries.ts +0 -187
  11. package/script/publish.ts +0 -93
  12. package/script/schema.ts +0 -47
  13. package/src/acp/README.md +0 -164
  14. package/src/acp/agent.ts +0 -1086
  15. package/src/acp/session.ts +0 -101
  16. package/src/acp/types.ts +0 -22
  17. package/src/agent/agent.ts +0 -253
  18. package/src/agent/generate.txt +0 -75
  19. package/src/agent/prompt/compaction.txt +0 -12
  20. package/src/agent/prompt/explore.txt +0 -18
  21. package/src/agent/prompt/summary.txt +0 -11
  22. package/src/agent/prompt/title.txt +0 -36
  23. package/src/auth/index.ts +0 -70
  24. package/src/bun/index.ts +0 -130
  25. package/src/bus/bus-event.ts +0 -43
  26. package/src/bus/global.ts +0 -10
  27. package/src/bus/index.ts +0 -105
  28. package/src/cli/bootstrap.ts +0 -17
  29. package/src/cli/cmd/acp.ts +0 -69
  30. package/src/cli/cmd/agent.ts +0 -257
  31. package/src/cli/cmd/auth.ts +0 -132
  32. package/src/cli/cmd/cmd.ts +0 -7
  33. package/src/cli/cmd/debug/agent.ts +0 -28
  34. package/src/cli/cmd/debug/config.ts +0 -15
  35. package/src/cli/cmd/debug/file.ts +0 -91
  36. package/src/cli/cmd/debug/index.ts +0 -45
  37. package/src/cli/cmd/debug/lsp.ts +0 -48
  38. package/src/cli/cmd/debug/ripgrep.ts +0 -83
  39. package/src/cli/cmd/debug/scrap.ts +0 -15
  40. package/src/cli/cmd/debug/skill.ts +0 -15
  41. package/src/cli/cmd/debug/snapshot.ts +0 -48
  42. package/src/cli/cmd/export.ts +0 -88
  43. package/src/cli/cmd/generate.ts +0 -38
  44. package/src/cli/cmd/github.ts +0 -32
  45. package/src/cli/cmd/import.ts +0 -98
  46. package/src/cli/cmd/mcp.ts +0 -670
  47. package/src/cli/cmd/models.ts +0 -42
  48. package/src/cli/cmd/pr.ts +0 -112
  49. package/src/cli/cmd/run.ts +0 -374
  50. package/src/cli/cmd/serve.ts +0 -16
  51. package/src/cli/cmd/session.ts +0 -135
  52. package/src/cli/cmd/stats.ts +0 -402
  53. package/src/cli/cmd/tui/app.tsx +0 -705
  54. package/src/cli/cmd/tui/attach.ts +0 -32
  55. package/src/cli/cmd/tui/component/border.tsx +0 -21
  56. package/src/cli/cmd/tui/component/dialog-agent.tsx +0 -31
  57. package/src/cli/cmd/tui/component/dialog-command.tsx +0 -124
  58. package/src/cli/cmd/tui/component/dialog-mcp.tsx +0 -86
  59. package/src/cli/cmd/tui/component/dialog-model.tsx +0 -232
  60. package/src/cli/cmd/tui/component/dialog-provider.tsx +0 -228
  61. package/src/cli/cmd/tui/component/dialog-session-list.tsx +0 -115
  62. package/src/cli/cmd/tui/component/dialog-session-rename.tsx +0 -31
  63. package/src/cli/cmd/tui/component/dialog-stash.tsx +0 -86
  64. package/src/cli/cmd/tui/component/dialog-status.tsx +0 -162
  65. package/src/cli/cmd/tui/component/dialog-tag.tsx +0 -44
  66. package/src/cli/cmd/tui/component/dialog-theme-list.tsx +0 -50
  67. package/src/cli/cmd/tui/component/did-you-know.tsx +0 -85
  68. package/src/cli/cmd/tui/component/logo.tsx +0 -43
  69. package/src/cli/cmd/tui/component/prompt/autocomplete.tsx +0 -654
  70. package/src/cli/cmd/tui/component/prompt/history.tsx +0 -108
  71. package/src/cli/cmd/tui/component/prompt/index.tsx +0 -1078
  72. package/src/cli/cmd/tui/component/prompt/stash.tsx +0 -101
  73. package/src/cli/cmd/tui/component/textarea-keybindings.ts +0 -73
  74. package/src/cli/cmd/tui/component/tips.ts +0 -92
  75. package/src/cli/cmd/tui/component/todo-item.tsx +0 -32
  76. package/src/cli/cmd/tui/context/args.tsx +0 -14
  77. package/src/cli/cmd/tui/context/directory.ts +0 -13
  78. package/src/cli/cmd/tui/context/exit.tsx +0 -23
  79. package/src/cli/cmd/tui/context/helper.tsx +0 -25
  80. package/src/cli/cmd/tui/context/keybind.tsx +0 -101
  81. package/src/cli/cmd/tui/context/kv.tsx +0 -49
  82. package/src/cli/cmd/tui/context/local.tsx +0 -392
  83. package/src/cli/cmd/tui/context/prompt.tsx +0 -18
  84. package/src/cli/cmd/tui/context/route.tsx +0 -46
  85. package/src/cli/cmd/tui/context/sdk.tsx +0 -75
  86. package/src/cli/cmd/tui/context/sync.tsx +0 -384
  87. package/src/cli/cmd/tui/context/theme/aura.json +0 -69
  88. package/src/cli/cmd/tui/context/theme/ayu.json +0 -80
  89. package/src/cli/cmd/tui/context/theme/catppuccin-frappe.json +0 -233
  90. package/src/cli/cmd/tui/context/theme/catppuccin-macchiato.json +0 -233
  91. package/src/cli/cmd/tui/context/theme/catppuccin.json +0 -112
  92. package/src/cli/cmd/tui/context/theme/chad.json +0 -245
  93. package/src/cli/cmd/tui/context/theme/cobalt2.json +0 -228
  94. package/src/cli/cmd/tui/context/theme/cursor.json +0 -249
  95. package/src/cli/cmd/tui/context/theme/dracula.json +0 -219
  96. package/src/cli/cmd/tui/context/theme/everforest.json +0 -241
  97. package/src/cli/cmd/tui/context/theme/flexoki.json +0 -237
  98. package/src/cli/cmd/tui/context/theme/github.json +0 -233
  99. package/src/cli/cmd/tui/context/theme/gruvbox.json +0 -95
  100. package/src/cli/cmd/tui/context/theme/kanagawa.json +0 -77
  101. package/src/cli/cmd/tui/context/theme/lucent-orng.json +0 -227
  102. package/src/cli/cmd/tui/context/theme/material.json +0 -235
  103. package/src/cli/cmd/tui/context/theme/matrix.json +0 -77
  104. package/src/cli/cmd/tui/context/theme/mercury.json +0 -252
  105. package/src/cli/cmd/tui/context/theme/monokai.json +0 -221
  106. package/src/cli/cmd/tui/context/theme/nightowl.json +0 -221
  107. package/src/cli/cmd/tui/context/theme/nord.json +0 -223
  108. package/src/cli/cmd/tui/context/theme/one-dark.json +0 -84
  109. package/src/cli/cmd/tui/context/theme/orng.json +0 -245
  110. package/src/cli/cmd/tui/context/theme/osaka-jade.json +0 -93
  111. package/src/cli/cmd/tui/context/theme/palenight.json +0 -222
  112. package/src/cli/cmd/tui/context/theme/rosepine.json +0 -234
  113. package/src/cli/cmd/tui/context/theme/solarized.json +0 -223
  114. package/src/cli/cmd/tui/context/theme/synthwave84.json +0 -226
  115. package/src/cli/cmd/tui/context/theme/tokyonight.json +0 -243
  116. package/src/cli/cmd/tui/context/theme/vercel.json +0 -245
  117. package/src/cli/cmd/tui/context/theme/vesper.json +0 -218
  118. package/src/cli/cmd/tui/context/theme/zenburn.json +0 -223
  119. package/src/cli/cmd/tui/context/theme.tsx +0 -1137
  120. package/src/cli/cmd/tui/event.ts +0 -46
  121. package/src/cli/cmd/tui/routes/home.tsx +0 -138
  122. package/src/cli/cmd/tui/routes/session/dialog-fork-from-timeline.tsx +0 -64
  123. package/src/cli/cmd/tui/routes/session/dialog-message.tsx +0 -109
  124. package/src/cli/cmd/tui/routes/session/dialog-subagent.tsx +0 -26
  125. package/src/cli/cmd/tui/routes/session/dialog-timeline.tsx +0 -47
  126. package/src/cli/cmd/tui/routes/session/footer.tsx +0 -88
  127. package/src/cli/cmd/tui/routes/session/header.tsx +0 -125
  128. package/src/cli/cmd/tui/routes/session/index.tsx +0 -1814
  129. package/src/cli/cmd/tui/routes/session/permission.tsx +0 -416
  130. package/src/cli/cmd/tui/routes/session/sidebar.tsx +0 -318
  131. package/src/cli/cmd/tui/spawn.ts +0 -48
  132. package/src/cli/cmd/tui/thread.ts +0 -111
  133. package/src/cli/cmd/tui/ui/dialog-alert.tsx +0 -57
  134. package/src/cli/cmd/tui/ui/dialog-confirm.tsx +0 -83
  135. package/src/cli/cmd/tui/ui/dialog-export-options.tsx +0 -204
  136. package/src/cli/cmd/tui/ui/dialog-help.tsx +0 -38
  137. package/src/cli/cmd/tui/ui/dialog-prompt.tsx +0 -77
  138. package/src/cli/cmd/tui/ui/dialog-select.tsx +0 -345
  139. package/src/cli/cmd/tui/ui/dialog.tsx +0 -171
  140. package/src/cli/cmd/tui/ui/link.tsx +0 -28
  141. package/src/cli/cmd/tui/ui/spinner.ts +0 -368
  142. package/src/cli/cmd/tui/ui/toast.tsx +0 -100
  143. package/src/cli/cmd/tui/util/clipboard.ts +0 -127
  144. package/src/cli/cmd/tui/util/editor.ts +0 -32
  145. package/src/cli/cmd/tui/util/signal.ts +0 -7
  146. package/src/cli/cmd/tui/util/terminal.ts +0 -114
  147. package/src/cli/cmd/tui/util/transcript.ts +0 -98
  148. package/src/cli/cmd/tui/worker.ts +0 -68
  149. package/src/cli/cmd/uninstall.ts +0 -344
  150. package/src/cli/cmd/upgrade.ts +0 -67
  151. package/src/cli/cmd/web.ts +0 -73
  152. package/src/cli/error.ts +0 -56
  153. package/src/cli/network.ts +0 -53
  154. package/src/cli/ui.ts +0 -87
  155. package/src/cli/upgrade.ts +0 -25
  156. package/src/command/index.ts +0 -131
  157. package/src/command/template/initialize.txt +0 -10
  158. package/src/command/template/review.txt +0 -97
  159. package/src/config/config.ts +0 -1124
  160. package/src/config/markdown.ts +0 -41
  161. package/src/env/index.ts +0 -26
  162. package/src/file/ignore.ts +0 -83
  163. package/src/file/index.ts +0 -411
  164. package/src/file/ripgrep.ts +0 -402
  165. package/src/file/time.ts +0 -64
  166. package/src/file/watcher.ts +0 -117
  167. package/src/flag/flag.ts +0 -52
  168. package/src/format/formatter.ts +0 -359
  169. package/src/format/index.ts +0 -137
  170. package/src/global/index.ts +0 -55
  171. package/src/id/id.ts +0 -73
  172. package/src/ide/index.ts +0 -77
  173. package/src/index.ts +0 -159
  174. package/src/installation/index.ts +0 -198
  175. package/src/lsp/client.ts +0 -252
  176. package/src/lsp/index.ts +0 -485
  177. package/src/lsp/language.ts +0 -119
  178. package/src/lsp/server.ts +0 -2023
  179. package/src/mcp/auth.ts +0 -135
  180. package/src/mcp/index.ts +0 -874
  181. package/src/mcp/oauth-callback.ts +0 -200
  182. package/src/mcp/oauth-provider.ts +0 -154
  183. package/src/patch/index.ts +0 -622
  184. package/src/permission/arity.ts +0 -163
  185. package/src/permission/index.ts +0 -210
  186. package/src/permission/next.ts +0 -268
  187. package/src/plugin/index.ts +0 -106
  188. package/src/project/bootstrap.ts +0 -31
  189. package/src/project/instance.ts +0 -78
  190. package/src/project/project.ts +0 -263
  191. package/src/project/state.ts +0 -65
  192. package/src/project/vcs.ts +0 -76
  193. package/src/provider/auth.ts +0 -143
  194. package/src/provider/models-macro.ts +0 -4
  195. package/src/provider/models.ts +0 -77
  196. package/src/provider/provider.ts +0 -516
  197. package/src/provider/transform.ts +0 -114
  198. package/src/pty/index.ts +0 -212
  199. package/src/server/error.ts +0 -36
  200. package/src/server/mdns.ts +0 -57
  201. package/src/server/project.ts +0 -79
  202. package/src/server/server.ts +0 -2866
  203. package/src/server/tui.ts +0 -71
  204. package/src/session/compaction.ts +0 -225
  205. package/src/session/index.ts +0 -469
  206. package/src/session/llm.ts +0 -213
  207. package/src/session/message-v2.ts +0 -742
  208. package/src/session/message.ts +0 -189
  209. package/src/session/processor.ts +0 -402
  210. package/src/session/prompt/anthropic-20250930.txt +0 -166
  211. package/src/session/prompt/anthropic.txt +0 -105
  212. package/src/session/prompt/anthropic_spoof.txt +0 -1
  213. package/src/session/prompt/beast.txt +0 -147
  214. package/src/session/prompt/build-switch.txt +0 -5
  215. package/src/session/prompt/codex.txt +0 -318
  216. package/src/session/prompt/copilot-gpt-5.txt +0 -143
  217. package/src/session/prompt/gemini.txt +0 -155
  218. package/src/session/prompt/max-steps.txt +0 -16
  219. package/src/session/prompt/plan-reminder-anthropic.txt +0 -67
  220. package/src/session/prompt/plan.txt +0 -26
  221. package/src/session/prompt/qwen.txt +0 -109
  222. package/src/session/prompt.ts +0 -1621
  223. package/src/session/retry.ts +0 -90
  224. package/src/session/revert.ts +0 -108
  225. package/src/session/status.ts +0 -76
  226. package/src/session/summary.ts +0 -194
  227. package/src/session/system.ts +0 -108
  228. package/src/session/todo.ts +0 -37
  229. package/src/share/share-next.ts +0 -194
  230. package/src/share/share.ts +0 -23
  231. package/src/shell/shell.ts +0 -67
  232. package/src/skill/index.ts +0 -1
  233. package/src/skill/skill.ts +0 -124
  234. package/src/snapshot/index.ts +0 -197
  235. package/src/storage/storage.ts +0 -226
  236. package/src/tool/bash.ts +0 -262
  237. package/src/tool/bash.txt +0 -116
  238. package/src/tool/batch.ts +0 -175
  239. package/src/tool/batch.txt +0 -24
  240. package/src/tool/codesearch.ts +0 -132
  241. package/src/tool/codesearch.txt +0 -12
  242. package/src/tool/edit.ts +0 -655
  243. package/src/tool/edit.txt +0 -10
  244. package/src/tool/glob.ts +0 -75
  245. package/src/tool/glob.txt +0 -6
  246. package/src/tool/grep.ts +0 -132
  247. package/src/tool/grep.txt +0 -8
  248. package/src/tool/invalid.ts +0 -17
  249. package/src/tool/ls.ts +0 -119
  250. package/src/tool/ls.txt +0 -1
  251. package/src/tool/lsp.ts +0 -94
  252. package/src/tool/lsp.txt +0 -19
  253. package/src/tool/multiedit.ts +0 -46
  254. package/src/tool/multiedit.txt +0 -41
  255. package/src/tool/patch.ts +0 -210
  256. package/src/tool/patch.txt +0 -1
  257. package/src/tool/read.ts +0 -191
  258. package/src/tool/read.txt +0 -12
  259. package/src/tool/registry.ts +0 -137
  260. package/src/tool/skill.ts +0 -77
  261. package/src/tool/task.ts +0 -167
  262. package/src/tool/task.txt +0 -60
  263. package/src/tool/todo.ts +0 -53
  264. package/src/tool/todoread.txt +0 -14
  265. package/src/tool/todowrite.txt +0 -167
  266. package/src/tool/tool.ts +0 -73
  267. package/src/tool/webfetch.ts +0 -182
  268. package/src/tool/webfetch.txt +0 -13
  269. package/src/tool/websearch.ts +0 -144
  270. package/src/tool/websearch.txt +0 -11
  271. package/src/tool/write.ts +0 -84
  272. package/src/tool/write.txt +0 -8
  273. package/src/util/archive.ts +0 -16
  274. package/src/util/color.ts +0 -19
  275. package/src/util/context.ts +0 -25
  276. package/src/util/defer.ts +0 -12
  277. package/src/util/eventloop.ts +0 -20
  278. package/src/util/filesystem.ts +0 -83
  279. package/src/util/fn.ts +0 -11
  280. package/src/util/iife.ts +0 -3
  281. package/src/util/keybind.ts +0 -102
  282. package/src/util/lazy.ts +0 -18
  283. package/src/util/locale.ts +0 -81
  284. package/src/util/lock.ts +0 -98
  285. package/src/util/log.ts +0 -180
  286. package/src/util/queue.ts +0 -32
  287. package/src/util/rpc.ts +0 -42
  288. package/src/util/scrap.ts +0 -10
  289. package/src/util/signal.ts +0 -12
  290. package/src/util/timeout.ts +0 -14
  291. package/src/util/token.ts +0 -7
  292. package/src/util/wildcard.ts +0 -54
  293. package/src/worktree/index.ts +0 -217
  294. package/sst-env.d.ts +0 -9
  295. package/test/agent/agent.test.ts +0 -448
  296. package/test/bun.test.ts +0 -53
  297. package/test/cli/github-action.test.ts +0 -129
  298. package/test/cli/github-remote.test.ts +0 -80
  299. package/test/cli/tui/transcript.test.ts +0 -297
  300. package/test/config/agent-color.test.ts +0 -66
  301. package/test/config/config.test.ts +0 -870
  302. package/test/config/markdown.test.ts +0 -89
  303. package/test/file/ignore.test.ts +0 -10
  304. package/test/file/path-traversal.test.ts +0 -115
  305. package/test/fixture/fixture.ts +0 -45
  306. package/test/fixture/lsp/fake-lsp-server.js +0 -77
  307. package/test/ide/ide.test.ts +0 -82
  308. package/test/keybind.test.ts +0 -421
  309. package/test/lsp/client.test.ts +0 -95
  310. package/test/mcp/headers.test.ts +0 -153
  311. package/test/patch/patch.test.ts +0 -348
  312. package/test/permission/arity.test.ts +0 -33
  313. package/test/permission/next.test.ts +0 -652
  314. package/test/preload.ts +0 -63
  315. package/test/project/project.test.ts +0 -120
  316. package/test/provider/amazon-bedrock.test.ts +0 -236
  317. package/test/provider/provider.test.ts +0 -2127
  318. package/test/provider/transform.test.ts +0 -980
  319. package/test/server/session-select.test.ts +0 -78
  320. package/test/session/compaction.test.ts +0 -251
  321. package/test/session/message-v2.test.ts +0 -570
  322. package/test/session/retry.test.ts +0 -131
  323. package/test/session/revert-compact.test.ts +0 -285
  324. package/test/session/session.test.ts +0 -71
  325. package/test/skill/skill.test.ts +0 -185
  326. package/test/snapshot/snapshot.test.ts +0 -939
  327. package/test/tool/__snapshots__/tool.test.ts.snap +0 -9
  328. package/test/tool/bash.test.ts +0 -232
  329. package/test/tool/grep.test.ts +0 -109
  330. package/test/tool/patch.test.ts +0 -261
  331. package/test/tool/read.test.ts +0 -167
  332. package/test/util/iife.test.ts +0 -36
  333. package/test/util/lazy.test.ts +0 -50
  334. package/test/util/timeout.test.ts +0 -21
  335. package/test/util/wildcard.test.ts +0 -55
  336. package/tsconfig.json +0 -16
  337. /package/{script/postinstall.mjs → postinstall.mjs} +0 -0
@@ -1,980 +0,0 @@
1
- import { describe, expect, test } from "bun:test"
2
- import { ProviderTransform } from "../../src/provider/transform"
3
-
4
- const OUTPUT_TOKEN_MAX = 32000
5
-
6
- describe("ProviderTransform.options - setCacheKey", () => {
7
- const sessionID = "test-session-123"
8
-
9
- const mockModel = {
10
- id: "anthropic/claude-3-5-sonnet",
11
- providerID: "anthropic",
12
- api: {
13
- id: "claude-3-5-sonnet-20241022",
14
- url: "https://api.anthropic.com",
15
- npm: "@ai-sdk/anthropic",
16
- },
17
- name: "Claude 3.5 Sonnet",
18
- capabilities: {
19
- temperature: true,
20
- reasoning: false,
21
- attachment: true,
22
- toolcall: true,
23
- input: { text: true, audio: false, image: true, video: false, pdf: true },
24
- output: { text: true, audio: false, image: false, video: false, pdf: false },
25
- interleaved: false,
26
- },
27
- cost: {
28
- input: 0.003,
29
- output: 0.015,
30
- cache: { read: 0.0003, write: 0.00375 },
31
- },
32
- limit: {
33
- context: 200000,
34
- output: 8192,
35
- },
36
- status: "active",
37
- options: {},
38
- headers: {},
39
- } as any
40
-
41
- test("should set promptCacheKey when providerOptions.setCacheKey is true", () => {
42
- const result = ProviderTransform.options(mockModel, sessionID, { setCacheKey: true })
43
- expect(result.promptCacheKey).toBe(sessionID)
44
- })
45
-
46
- test("should not set promptCacheKey when providerOptions.setCacheKey is false", () => {
47
- const result = ProviderTransform.options(mockModel, sessionID, { setCacheKey: false })
48
- expect(result.promptCacheKey).toBeUndefined()
49
- })
50
-
51
- test("should not set promptCacheKey when providerOptions is undefined", () => {
52
- const result = ProviderTransform.options(mockModel, sessionID, undefined)
53
- expect(result.promptCacheKey).toBeUndefined()
54
- })
55
-
56
- test("should not set promptCacheKey when providerOptions does not have setCacheKey", () => {
57
- const result = ProviderTransform.options(mockModel, sessionID, {})
58
- expect(result.promptCacheKey).toBeUndefined()
59
- })
60
-
61
- test("should set promptCacheKey for openai provider regardless of setCacheKey", () => {
62
- const openaiModel = {
63
- ...mockModel,
64
- providerID: "openai",
65
- api: {
66
- id: "gpt-4",
67
- url: "https://api.openai.com",
68
- npm: "@ai-sdk/openai",
69
- },
70
- }
71
- const result = ProviderTransform.options(openaiModel, sessionID, {})
72
- expect(result.promptCacheKey).toBe(sessionID)
73
- })
74
- })
75
-
76
- describe("ProviderTransform.maxOutputTokens", () => {
77
- test("returns 32k when modelLimit > 32k", () => {
78
- const modelLimit = 100000
79
- const result = ProviderTransform.maxOutputTokens("@ai-sdk/openai", {}, modelLimit, OUTPUT_TOKEN_MAX)
80
- expect(result).toBe(OUTPUT_TOKEN_MAX)
81
- })
82
-
83
- test("returns modelLimit when modelLimit < 32k", () => {
84
- const modelLimit = 16000
85
- const result = ProviderTransform.maxOutputTokens("@ai-sdk/openai", {}, modelLimit, OUTPUT_TOKEN_MAX)
86
- expect(result).toBe(16000)
87
- })
88
-
89
- describe("azure", () => {
90
- test("returns 32k when modelLimit > 32k", () => {
91
- const modelLimit = 100000
92
- const result = ProviderTransform.maxOutputTokens("@ai-sdk/azure", {}, modelLimit, OUTPUT_TOKEN_MAX)
93
- expect(result).toBe(OUTPUT_TOKEN_MAX)
94
- })
95
-
96
- test("returns modelLimit when modelLimit < 32k", () => {
97
- const modelLimit = 16000
98
- const result = ProviderTransform.maxOutputTokens("@ai-sdk/azure", {}, modelLimit, OUTPUT_TOKEN_MAX)
99
- expect(result).toBe(16000)
100
- })
101
- })
102
-
103
- describe("bedrock", () => {
104
- test("returns 32k when modelLimit > 32k", () => {
105
- const modelLimit = 100000
106
- const result = ProviderTransform.maxOutputTokens("@ai-sdk/amazon-bedrock", {}, modelLimit, OUTPUT_TOKEN_MAX)
107
- expect(result).toBe(OUTPUT_TOKEN_MAX)
108
- })
109
-
110
- test("returns modelLimit when modelLimit < 32k", () => {
111
- const modelLimit = 16000
112
- const result = ProviderTransform.maxOutputTokens("@ai-sdk/amazon-bedrock", {}, modelLimit, OUTPUT_TOKEN_MAX)
113
- expect(result).toBe(16000)
114
- })
115
- })
116
-
117
- describe("anthropic without thinking options", () => {
118
- test("returns 32k when modelLimit > 32k", () => {
119
- const modelLimit = 100000
120
- const result = ProviderTransform.maxOutputTokens("@ai-sdk/anthropic", {}, modelLimit, OUTPUT_TOKEN_MAX)
121
- expect(result).toBe(OUTPUT_TOKEN_MAX)
122
- })
123
-
124
- test("returns modelLimit when modelLimit < 32k", () => {
125
- const modelLimit = 16000
126
- const result = ProviderTransform.maxOutputTokens("@ai-sdk/anthropic", {}, modelLimit, OUTPUT_TOKEN_MAX)
127
- expect(result).toBe(16000)
128
- })
129
- })
130
-
131
- describe("anthropic with thinking options", () => {
132
- test("returns 32k when budgetTokens + 32k <= modelLimit", () => {
133
- const modelLimit = 100000
134
- const options = {
135
- thinking: {
136
- type: "enabled",
137
- budgetTokens: 10000,
138
- },
139
- }
140
- const result = ProviderTransform.maxOutputTokens("@ai-sdk/anthropic", options, modelLimit, OUTPUT_TOKEN_MAX)
141
- expect(result).toBe(OUTPUT_TOKEN_MAX)
142
- })
143
-
144
- test("returns modelLimit - budgetTokens when budgetTokens + 32k > modelLimit", () => {
145
- const modelLimit = 50000
146
- const options = {
147
- thinking: {
148
- type: "enabled",
149
- budgetTokens: 30000,
150
- },
151
- }
152
- const result = ProviderTransform.maxOutputTokens("@ai-sdk/anthropic", options, modelLimit, OUTPUT_TOKEN_MAX)
153
- expect(result).toBe(20000)
154
- })
155
-
156
- test("returns 32k when thinking type is not enabled", () => {
157
- const modelLimit = 100000
158
- const options = {
159
- thinking: {
160
- type: "disabled",
161
- budgetTokens: 10000,
162
- },
163
- }
164
- const result = ProviderTransform.maxOutputTokens("@ai-sdk/anthropic", options, modelLimit, OUTPUT_TOKEN_MAX)
165
- expect(result).toBe(OUTPUT_TOKEN_MAX)
166
- })
167
- })
168
- })
169
-
170
- describe("ProviderTransform.schema - gemini array items", () => {
171
- test("adds missing items for array properties", () => {
172
- const geminiModel = {
173
- providerID: "google",
174
- api: {
175
- id: "gemini-3-pro",
176
- },
177
- } as any
178
-
179
- const schema = {
180
- type: "object",
181
- properties: {
182
- nodes: { type: "array" },
183
- edges: { type: "array", items: { type: "string" } },
184
- },
185
- } as any
186
-
187
- const result = ProviderTransform.schema(geminiModel, schema) as any
188
-
189
- expect(result.properties.nodes.items).toBeDefined()
190
- expect(result.properties.edges.items.type).toBe("string")
191
- })
192
- })
193
-
194
- describe("ProviderTransform.message - DeepSeek reasoning content", () => {
195
- test("DeepSeek with tool calls includes reasoning_content in providerOptions", () => {
196
- const msgs = [
197
- {
198
- role: "assistant",
199
- content: [
200
- { type: "reasoning", text: "Let me think about this..." },
201
- {
202
- type: "tool-call",
203
- toolCallId: "test",
204
- toolName: "bash",
205
- input: { command: "echo hello" },
206
- },
207
- ],
208
- },
209
- ] as any[]
210
-
211
- const result = ProviderTransform.message(msgs, {
212
- id: "deepseek/deepseek-chat",
213
- providerID: "deepseek",
214
- api: {
215
- id: "deepseek-chat",
216
- url: "https://api.deepseek.com",
217
- npm: "@ai-sdk/openai-compatible",
218
- },
219
- name: "DeepSeek Chat",
220
- capabilities: {
221
- temperature: true,
222
- reasoning: true,
223
- attachment: false,
224
- toolcall: true,
225
- input: { text: true, audio: false, image: false, video: false, pdf: false },
226
- output: { text: true, audio: false, image: false, video: false, pdf: false },
227
- interleaved: {
228
- field: "reasoning_content",
229
- },
230
- },
231
- cost: {
232
- input: 0.001,
233
- output: 0.002,
234
- cache: { read: 0.0001, write: 0.0002 },
235
- },
236
- limit: {
237
- context: 128000,
238
- output: 8192,
239
- },
240
- status: "active",
241
- options: {},
242
- headers: {},
243
- release_date: "2023-04-01",
244
- })
245
-
246
- expect(result).toHaveLength(1)
247
- expect(result[0].content).toEqual([
248
- {
249
- type: "tool-call",
250
- toolCallId: "test",
251
- toolName: "bash",
252
- input: { command: "echo hello" },
253
- },
254
- ])
255
- expect(result[0].providerOptions?.openaiCompatible?.reasoning_content).toBe("Let me think about this...")
256
- })
257
-
258
- test("Non-DeepSeek providers leave reasoning content unchanged", () => {
259
- const msgs = [
260
- {
261
- role: "assistant",
262
- content: [
263
- { type: "reasoning", text: "Should not be processed" },
264
- { type: "text", text: "Answer" },
265
- ],
266
- },
267
- ] as any[]
268
-
269
- const result = ProviderTransform.message(msgs, {
270
- id: "openai/gpt-4",
271
- providerID: "openai",
272
- api: {
273
- id: "gpt-4",
274
- url: "https://api.openai.com",
275
- npm: "@ai-sdk/openai",
276
- },
277
- name: "GPT-4",
278
- capabilities: {
279
- temperature: true,
280
- reasoning: false,
281
- attachment: true,
282
- toolcall: true,
283
- input: { text: true, audio: false, image: true, video: false, pdf: false },
284
- output: { text: true, audio: false, image: false, video: false, pdf: false },
285
- interleaved: false,
286
- },
287
- cost: {
288
- input: 0.03,
289
- output: 0.06,
290
- cache: { read: 0.001, write: 0.002 },
291
- },
292
- limit: {
293
- context: 128000,
294
- output: 4096,
295
- },
296
- status: "active",
297
- options: {},
298
- headers: {},
299
- release_date: "2023-04-01",
300
- })
301
-
302
- expect(result[0].content).toEqual([
303
- { type: "reasoning", text: "Should not be processed" },
304
- { type: "text", text: "Answer" },
305
- ])
306
- expect(result[0].providerOptions?.openaiCompatible?.reasoning_content).toBeUndefined()
307
- })
308
- })
309
-
310
- describe("ProviderTransform.message - empty image handling", () => {
311
- const mockModel = {
312
- id: "anthropic/claude-3-5-sonnet",
313
- providerID: "anthropic",
314
- api: {
315
- id: "claude-3-5-sonnet-20241022",
316
- url: "https://api.anthropic.com",
317
- npm: "@ai-sdk/anthropic",
318
- },
319
- name: "Claude 3.5 Sonnet",
320
- capabilities: {
321
- temperature: true,
322
- reasoning: false,
323
- attachment: true,
324
- toolcall: true,
325
- input: { text: true, audio: false, image: true, video: false, pdf: true },
326
- output: { text: true, audio: false, image: false, video: false, pdf: false },
327
- interleaved: false,
328
- },
329
- cost: {
330
- input: 0.003,
331
- output: 0.015,
332
- cache: { read: 0.0003, write: 0.00375 },
333
- },
334
- limit: {
335
- context: 200000,
336
- output: 8192,
337
- },
338
- status: "active",
339
- options: {},
340
- headers: {},
341
- } as any
342
-
343
- test("should replace empty base64 image with error text", () => {
344
- const msgs = [
345
- {
346
- role: "user",
347
- content: [
348
- { type: "text", text: "What is in this image?" },
349
- { type: "image", image: "data:image/png;base64," },
350
- ],
351
- },
352
- ] as any[]
353
-
354
- const result = ProviderTransform.message(msgs, mockModel)
355
-
356
- expect(result).toHaveLength(1)
357
- expect(result[0].content).toHaveLength(2)
358
- expect(result[0].content[0]).toEqual({ type: "text", text: "What is in this image?" })
359
- expect(result[0].content[1]).toEqual({
360
- type: "text",
361
- text: "ERROR: Image file is empty or corrupted. Please provide a valid image.",
362
- })
363
- })
364
-
365
- test("should keep valid base64 images unchanged", () => {
366
- const validBase64 =
367
- "iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mNk+M9QDwADhgGAWjR9awAAAABJRU5ErkJggg=="
368
- const msgs = [
369
- {
370
- role: "user",
371
- content: [
372
- { type: "text", text: "What is in this image?" },
373
- { type: "image", image: `data:image/png;base64,${validBase64}` },
374
- ],
375
- },
376
- ] as any[]
377
-
378
- const result = ProviderTransform.message(msgs, mockModel)
379
-
380
- expect(result).toHaveLength(1)
381
- expect(result[0].content).toHaveLength(2)
382
- expect(result[0].content[0]).toEqual({ type: "text", text: "What is in this image?" })
383
- expect(result[0].content[1]).toEqual({ type: "image", image: `data:image/png;base64,${validBase64}` })
384
- })
385
-
386
- test("should handle mixed valid and empty images", () => {
387
- const validBase64 =
388
- "iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mNk+M9QDwADhgGAWjR9awAAAABJRU5ErkJggg=="
389
- const msgs = [
390
- {
391
- role: "user",
392
- content: [
393
- { type: "text", text: "Compare these images" },
394
- { type: "image", image: `data:image/png;base64,${validBase64}` },
395
- { type: "image", image: "data:image/jpeg;base64," },
396
- ],
397
- },
398
- ] as any[]
399
-
400
- const result = ProviderTransform.message(msgs, mockModel)
401
-
402
- expect(result).toHaveLength(1)
403
- expect(result[0].content).toHaveLength(3)
404
- expect(result[0].content[0]).toEqual({ type: "text", text: "Compare these images" })
405
- expect(result[0].content[1]).toEqual({ type: "image", image: `data:image/png;base64,${validBase64}` })
406
- expect(result[0].content[2]).toEqual({
407
- type: "text",
408
- text: "ERROR: Image file is empty or corrupted. Please provide a valid image.",
409
- })
410
- })
411
- })
412
-
413
- describe("ProviderTransform.variants", () => {
414
- const createMockModel = (overrides: Partial<any> = {}): any => ({
415
- id: "test/test-model",
416
- providerID: "test",
417
- api: {
418
- id: "test-model",
419
- url: "https://api.test.com",
420
- npm: "@ai-sdk/openai",
421
- },
422
- name: "Test Model",
423
- capabilities: {
424
- temperature: true,
425
- reasoning: true,
426
- attachment: true,
427
- toolcall: true,
428
- input: { text: true, audio: false, image: true, video: false, pdf: false },
429
- output: { text: true, audio: false, image: false, video: false, pdf: false },
430
- interleaved: false,
431
- },
432
- cost: {
433
- input: 0.001,
434
- output: 0.002,
435
- cache: { read: 0.0001, write: 0.0002 },
436
- },
437
- limit: {
438
- context: 128000,
439
- output: 8192,
440
- },
441
- status: "active",
442
- options: {},
443
- headers: {},
444
- release_date: "2024-01-01",
445
- ...overrides,
446
- })
447
-
448
- test("returns empty object when model has no reasoning capabilities", () => {
449
- const model = createMockModel({
450
- capabilities: { reasoning: false },
451
- })
452
- const result = ProviderTransform.variants(model)
453
- expect(result).toEqual({})
454
- })
455
-
456
- test("deepseek returns empty object", () => {
457
- const model = createMockModel({
458
- id: "deepseek/deepseek-chat",
459
- providerID: "deepseek",
460
- api: {
461
- id: "deepseek-chat",
462
- url: "https://api.deepseek.com",
463
- npm: "@ai-sdk/openai-compatible",
464
- },
465
- })
466
- const result = ProviderTransform.variants(model)
467
- expect(result).toEqual({})
468
- })
469
-
470
- test("minimax returns empty object", () => {
471
- const model = createMockModel({
472
- id: "minimax/minimax-model",
473
- providerID: "minimax",
474
- api: {
475
- id: "minimax-model",
476
- url: "https://api.minimax.com",
477
- npm: "@ai-sdk/openai-compatible",
478
- },
479
- })
480
- const result = ProviderTransform.variants(model)
481
- expect(result).toEqual({})
482
- })
483
-
484
- test("glm returns empty object", () => {
485
- const model = createMockModel({
486
- id: "glm/glm-4",
487
- providerID: "glm",
488
- api: {
489
- id: "glm-4",
490
- url: "https://api.glm.com",
491
- npm: "@ai-sdk/openai-compatible",
492
- },
493
- })
494
- const result = ProviderTransform.variants(model)
495
- expect(result).toEqual({})
496
- })
497
-
498
- test("mistral returns empty object", () => {
499
- const model = createMockModel({
500
- id: "mistral/mistral-large",
501
- providerID: "mistral",
502
- api: {
503
- id: "mistral-large-latest",
504
- url: "https://api.mistral.com",
505
- npm: "@ai-sdk/mistral",
506
- },
507
- })
508
- const result = ProviderTransform.variants(model)
509
- expect(result).toEqual({})
510
- })
511
-
512
- describe("@openrouter/ai-sdk-provider", () => {
513
- test("returns empty object for non-qualifying models", () => {
514
- const model = createMockModel({
515
- id: "openrouter/test-model",
516
- providerID: "openrouter",
517
- api: {
518
- id: "test-model",
519
- url: "https://openrouter.ai",
520
- npm: "@openrouter/ai-sdk-provider",
521
- },
522
- })
523
- const result = ProviderTransform.variants(model)
524
- expect(result).toEqual({})
525
- })
526
-
527
- test("gpt models return OPENAI_EFFORTS with reasoning", () => {
528
- const model = createMockModel({
529
- id: "openrouter/gpt-4",
530
- providerID: "openrouter",
531
- api: {
532
- id: "gpt-4",
533
- url: "https://openrouter.ai",
534
- npm: "@openrouter/ai-sdk-provider",
535
- },
536
- })
537
- const result = ProviderTransform.variants(model)
538
- expect(Object.keys(result)).toEqual(["none", "minimal", "low", "medium", "high", "xhigh"])
539
- expect(result.low).toEqual({ reasoning: { effort: "low" } })
540
- expect(result.high).toEqual({ reasoning: { effort: "high" } })
541
- })
542
-
543
- test("gemini-3 returns OPENAI_EFFORTS with reasoning", () => {
544
- const model = createMockModel({
545
- id: "openrouter/gemini-3-5-pro",
546
- providerID: "openrouter",
547
- api: {
548
- id: "gemini-3-5-pro",
549
- url: "https://openrouter.ai",
550
- npm: "@openrouter/ai-sdk-provider",
551
- },
552
- })
553
- const result = ProviderTransform.variants(model)
554
- expect(Object.keys(result)).toEqual(["none", "minimal", "low", "medium", "high", "xhigh"])
555
- })
556
-
557
- test("grok-4 returns OPENAI_EFFORTS with reasoning", () => {
558
- const model = createMockModel({
559
- id: "openrouter/grok-4",
560
- providerID: "openrouter",
561
- api: {
562
- id: "grok-4",
563
- url: "https://openrouter.ai",
564
- npm: "@openrouter/ai-sdk-provider",
565
- },
566
- })
567
- const result = ProviderTransform.variants(model)
568
- expect(Object.keys(result)).toEqual(["none", "minimal", "low", "medium", "high", "xhigh"])
569
- })
570
- })
571
-
572
- describe("@ai-sdk/gateway", () => {
573
- test("returns OPENAI_EFFORTS with reasoningEffort", () => {
574
- const model = createMockModel({
575
- id: "gateway/gateway-model",
576
- providerID: "gateway",
577
- api: {
578
- id: "gateway-model",
579
- url: "https://gateway.ai",
580
- npm: "@ai-sdk/gateway",
581
- },
582
- })
583
- const result = ProviderTransform.variants(model)
584
- expect(Object.keys(result)).toEqual(["none", "minimal", "low", "medium", "high", "xhigh"])
585
- expect(result.low).toEqual({ reasoningEffort: "low" })
586
- expect(result.high).toEqual({ reasoningEffort: "high" })
587
- })
588
- })
589
-
590
- describe("@ai-sdk/cerebras", () => {
591
- test("returns WIDELY_SUPPORTED_EFFORTS with reasoningEffort", () => {
592
- const model = createMockModel({
593
- id: "cerebras/llama-4",
594
- providerID: "cerebras",
595
- api: {
596
- id: "llama-4-sc",
597
- url: "https://api.cerebras.ai",
598
- npm: "@ai-sdk/cerebras",
599
- },
600
- })
601
- const result = ProviderTransform.variants(model)
602
- expect(Object.keys(result)).toEqual(["low", "medium", "high"])
603
- expect(result.low).toEqual({ reasoningEffort: "low" })
604
- expect(result.high).toEqual({ reasoningEffort: "high" })
605
- })
606
- })
607
-
608
- describe("@ai-sdk/togetherai", () => {
609
- test("returns WIDELY_SUPPORTED_EFFORTS with reasoningEffort", () => {
610
- const model = createMockModel({
611
- id: "togetherai/llama-4",
612
- providerID: "togetherai",
613
- api: {
614
- id: "llama-4-sc",
615
- url: "https://api.togetherai.com",
616
- npm: "@ai-sdk/togetherai",
617
- },
618
- })
619
- const result = ProviderTransform.variants(model)
620
- expect(Object.keys(result)).toEqual(["low", "medium", "high"])
621
- expect(result.low).toEqual({ reasoningEffort: "low" })
622
- expect(result.high).toEqual({ reasoningEffort: "high" })
623
- })
624
- })
625
-
626
- describe("@ai-sdk/xai", () => {
627
- test("returns WIDELY_SUPPORTED_EFFORTS with reasoningEffort", () => {
628
- const model = createMockModel({
629
- id: "xai/grok-3",
630
- providerID: "xai",
631
- api: {
632
- id: "grok-3",
633
- url: "https://api.x.ai",
634
- npm: "@ai-sdk/xai",
635
- },
636
- })
637
- const result = ProviderTransform.variants(model)
638
- expect(Object.keys(result)).toEqual(["low", "medium", "high"])
639
- expect(result.low).toEqual({ reasoningEffort: "low" })
640
- expect(result.high).toEqual({ reasoningEffort: "high" })
641
- })
642
- })
643
-
644
- describe("@ai-sdk/deepinfra", () => {
645
- test("returns WIDELY_SUPPORTED_EFFORTS with reasoningEffort", () => {
646
- const model = createMockModel({
647
- id: "deepinfra/llama-4",
648
- providerID: "deepinfra",
649
- api: {
650
- id: "llama-4-sc",
651
- url: "https://api.deepinfra.com",
652
- npm: "@ai-sdk/deepinfra",
653
- },
654
- })
655
- const result = ProviderTransform.variants(model)
656
- expect(Object.keys(result)).toEqual(["low", "medium", "high"])
657
- expect(result.low).toEqual({ reasoningEffort: "low" })
658
- expect(result.high).toEqual({ reasoningEffort: "high" })
659
- })
660
- })
661
-
662
- describe("@ai-sdk/openai-compatible", () => {
663
- test("returns WIDELY_SUPPORTED_EFFORTS with reasoningEffort", () => {
664
- const model = createMockModel({
665
- id: "custom-provider/custom-model",
666
- providerID: "custom-provider",
667
- api: {
668
- id: "custom-model",
669
- url: "https://api.custom.com",
670
- npm: "@ai-sdk/openai-compatible",
671
- },
672
- })
673
- const result = ProviderTransform.variants(model)
674
- expect(Object.keys(result)).toEqual(["low", "medium", "high"])
675
- expect(result.low).toEqual({ reasoningEffort: "low" })
676
- expect(result.high).toEqual({ reasoningEffort: "high" })
677
- })
678
- })
679
-
680
- describe("@ai-sdk/azure", () => {
681
- test("o1-mini returns empty object", () => {
682
- const model = createMockModel({
683
- id: "o1-mini",
684
- providerID: "azure",
685
- api: {
686
- id: "o1-mini",
687
- url: "https://azure.com",
688
- npm: "@ai-sdk/azure",
689
- },
690
- })
691
- const result = ProviderTransform.variants(model)
692
- expect(result).toEqual({})
693
- })
694
-
695
- test("standard azure models return custom efforts with reasoningSummary", () => {
696
- const model = createMockModel({
697
- id: "o1",
698
- providerID: "azure",
699
- api: {
700
- id: "o1",
701
- url: "https://azure.com",
702
- npm: "@ai-sdk/azure",
703
- },
704
- })
705
- const result = ProviderTransform.variants(model)
706
- expect(Object.keys(result)).toEqual(["low", "medium", "high"])
707
- expect(result.low).toEqual({
708
- reasoningEffort: "low",
709
- reasoningSummary: "auto",
710
- include: ["reasoning.encrypted_content"],
711
- })
712
- })
713
-
714
- test("gpt-5 adds minimal effort", () => {
715
- const model = createMockModel({
716
- id: "gpt-5",
717
- providerID: "azure",
718
- api: {
719
- id: "gpt-5",
720
- url: "https://azure.com",
721
- npm: "@ai-sdk/azure",
722
- },
723
- })
724
- const result = ProviderTransform.variants(model)
725
- expect(Object.keys(result)).toEqual(["minimal", "low", "medium", "high"])
726
- })
727
- })
728
-
729
- describe("@ai-sdk/openai", () => {
730
- test("gpt-5-pro returns empty object", () => {
731
- const model = createMockModel({
732
- id: "gpt-5-pro",
733
- providerID: "openai",
734
- api: {
735
- id: "gpt-5-pro",
736
- url: "https://api.openai.com",
737
- npm: "@ai-sdk/openai",
738
- },
739
- })
740
- const result = ProviderTransform.variants(model)
741
- expect(result).toEqual({})
742
- })
743
-
744
- test("standard openai models return custom efforts with reasoningSummary", () => {
745
- const model = createMockModel({
746
- id: "gpt-5",
747
- providerID: "openai",
748
- api: {
749
- id: "gpt-5",
750
- url: "https://api.openai.com",
751
- npm: "@ai-sdk/openai",
752
- },
753
- release_date: "2024-06-01",
754
- })
755
- const result = ProviderTransform.variants(model)
756
- expect(Object.keys(result)).toEqual(["minimal", "low", "medium", "high"])
757
- expect(result.low).toEqual({
758
- reasoningEffort: "low",
759
- reasoningSummary: "auto",
760
- include: ["reasoning.encrypted_content"],
761
- })
762
- })
763
-
764
- test("models after 2025-11-13 include 'none' effort", () => {
765
- const model = createMockModel({
766
- id: "gpt-5-nano",
767
- providerID: "openai",
768
- api: {
769
- id: "gpt-5-nano",
770
- url: "https://api.openai.com",
771
- npm: "@ai-sdk/openai",
772
- },
773
- release_date: "2025-11-14",
774
- })
775
- const result = ProviderTransform.variants(model)
776
- expect(Object.keys(result)).toEqual(["none", "minimal", "low", "medium", "high"])
777
- })
778
-
779
- test("models after 2025-12-04 include 'xhigh' effort", () => {
780
- const model = createMockModel({
781
- id: "openai/gpt-5-chat",
782
- providerID: "openai",
783
- api: {
784
- id: "gpt-5-chat",
785
- url: "https://api.openai.com",
786
- npm: "@ai-sdk/openai",
787
- },
788
- release_date: "2025-12-05",
789
- })
790
- const result = ProviderTransform.variants(model)
791
- expect(Object.keys(result)).toEqual(["none", "minimal", "low", "medium", "high", "xhigh"])
792
- })
793
- })
794
-
795
- describe("@ai-sdk/anthropic", () => {
796
- test("returns high and max with thinking config", () => {
797
- const model = createMockModel({
798
- id: "anthropic/claude-4",
799
- providerID: "anthropic",
800
- api: {
801
- id: "claude-4",
802
- url: "https://api.anthropic.com",
803
- npm: "@ai-sdk/anthropic",
804
- },
805
- })
806
- const result = ProviderTransform.variants(model)
807
- expect(Object.keys(result)).toEqual(["high", "max"])
808
- expect(result.high).toEqual({
809
- thinking: {
810
- type: "enabled",
811
- budgetTokens: 16000,
812
- },
813
- })
814
- expect(result.max).toEqual({
815
- thinking: {
816
- type: "enabled",
817
- budgetTokens: 31999,
818
- },
819
- })
820
- })
821
- })
822
-
823
- describe("@ai-sdk/amazon-bedrock", () => {
824
- test("returns WIDELY_SUPPORTED_EFFORTS with reasoningConfig", () => {
825
- const model = createMockModel({
826
- id: "bedrock/llama-4",
827
- providerID: "bedrock",
828
- api: {
829
- id: "llama-4-sc",
830
- url: "https://bedrock.amazonaws.com",
831
- npm: "@ai-sdk/amazon-bedrock",
832
- },
833
- })
834
- const result = ProviderTransform.variants(model)
835
- expect(Object.keys(result)).toEqual(["low", "medium", "high"])
836
- expect(result.low).toEqual({
837
- reasoningConfig: {
838
- type: "enabled",
839
- maxReasoningEffort: "low",
840
- },
841
- })
842
- })
843
- })
844
-
845
- describe("@ai-sdk/google", () => {
846
- test("gemini-2.5 returns high and max with thinkingConfig and thinkingBudget", () => {
847
- const model = createMockModel({
848
- id: "google/gemini-2.5-pro",
849
- providerID: "google",
850
- api: {
851
- id: "gemini-2.5-pro",
852
- url: "https://generativelanguage.googleapis.com",
853
- npm: "@ai-sdk/google",
854
- },
855
- })
856
- const result = ProviderTransform.variants(model)
857
- expect(Object.keys(result)).toEqual(["high", "max"])
858
- expect(result.high).toEqual({
859
- thinkingConfig: {
860
- includeThoughts: true,
861
- thinkingBudget: 16000,
862
- },
863
- })
864
- expect(result.max).toEqual({
865
- thinkingConfig: {
866
- includeThoughts: true,
867
- thinkingBudget: 24576,
868
- },
869
- })
870
- })
871
-
872
- test("other gemini models return low and high with thinkingLevel", () => {
873
- const model = createMockModel({
874
- id: "google/gemini-2.0-pro",
875
- providerID: "google",
876
- api: {
877
- id: "gemini-2.0-pro",
878
- url: "https://generativelanguage.googleapis.com",
879
- npm: "@ai-sdk/google",
880
- },
881
- })
882
- const result = ProviderTransform.variants(model)
883
- expect(Object.keys(result)).toEqual(["low", "high"])
884
- expect(result.low).toEqual({
885
- includeThoughts: true,
886
- thinkingLevel: "low",
887
- })
888
- expect(result.high).toEqual({
889
- includeThoughts: true,
890
- thinkingLevel: "high",
891
- })
892
- })
893
- })
894
-
895
- describe("@ai-sdk/google-vertex", () => {
896
- test("gemini-2.5 returns high and max with thinkingConfig and thinkingBudget", () => {
897
- const model = createMockModel({
898
- id: "google-vertex/gemini-2.5-pro",
899
- providerID: "google-vertex",
900
- api: {
901
- id: "gemini-2.5-pro",
902
- url: "https://vertexai.googleapis.com",
903
- npm: "@ai-sdk/google-vertex",
904
- },
905
- })
906
- const result = ProviderTransform.variants(model)
907
- expect(Object.keys(result)).toEqual(["high", "max"])
908
- })
909
-
910
- test("other vertex models return low and high with thinkingLevel", () => {
911
- const model = createMockModel({
912
- id: "google-vertex/gemini-2.0-pro",
913
- providerID: "google-vertex",
914
- api: {
915
- id: "gemini-2.0-pro",
916
- url: "https://vertexai.googleapis.com",
917
- npm: "@ai-sdk/google-vertex",
918
- },
919
- })
920
- const result = ProviderTransform.variants(model)
921
- expect(Object.keys(result)).toEqual(["low", "high"])
922
- })
923
- })
924
-
925
- describe("@ai-sdk/cohere", () => {
926
- test("returns empty object", () => {
927
- const model = createMockModel({
928
- id: "cohere/command-r",
929
- providerID: "cohere",
930
- api: {
931
- id: "command-r",
932
- url: "https://api.cohere.com",
933
- npm: "@ai-sdk/cohere",
934
- },
935
- })
936
- const result = ProviderTransform.variants(model)
937
- expect(result).toEqual({})
938
- })
939
- })
940
-
941
- describe("@ai-sdk/groq", () => {
942
- test("returns none and WIDELY_SUPPORTED_EFFORTS with thinkingLevel", () => {
943
- const model = createMockModel({
944
- id: "groq/llama-4",
945
- providerID: "groq",
946
- api: {
947
- id: "llama-4-sc",
948
- url: "https://api.groq.com",
949
- npm: "@ai-sdk/groq",
950
- },
951
- })
952
- const result = ProviderTransform.variants(model)
953
- expect(Object.keys(result)).toEqual(["none", "low", "medium", "high"])
954
- expect(result.none).toEqual({
955
- includeThoughts: true,
956
- thinkingLevel: "none",
957
- })
958
- expect(result.low).toEqual({
959
- includeThoughts: true,
960
- thinkingLevel: "low",
961
- })
962
- })
963
- })
964
-
965
- describe("@ai-sdk/perplexity", () => {
966
- test("returns empty object", () => {
967
- const model = createMockModel({
968
- id: "perplexity/sonar-plus",
969
- providerID: "perplexity",
970
- api: {
971
- id: "sonar-plus",
972
- url: "https://api.perplexity.ai",
973
- npm: "@ai-sdk/perplexity",
974
- },
975
- })
976
- const result = ProviderTransform.variants(model)
977
- expect(result).toEqual({})
978
- })
979
- })
980
- })