nuwaxcode 1.1.34 → 1.1.44

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (398) hide show
  1. package/bin/nuwaxcode +19 -1
  2. package/package.json +15 -117
  3. package/{script/postinstall.mjs → postinstall.mjs} +18 -6
  4. package/AGENTS.md +0 -27
  5. package/Dockerfile +0 -18
  6. package/README.md +0 -15
  7. package/bunfig.toml +0 -7
  8. package/parsers-config.ts +0 -253
  9. package/script/build.ts +0 -172
  10. package/script/publish-registries.ts +0 -187
  11. package/script/publish.ts +0 -70
  12. package/script/schema.ts +0 -47
  13. package/src/acp/README.md +0 -164
  14. package/src/acp/agent.ts +0 -1280
  15. package/src/acp/session.ts +0 -111
  16. package/src/acp/types.ts +0 -24
  17. package/src/agent/agent.ts +0 -332
  18. package/src/agent/generate.txt +0 -75
  19. package/src/agent/prompt/compaction.txt +0 -12
  20. package/src/agent/prompt/explore.txt +0 -18
  21. package/src/agent/prompt/summary.txt +0 -11
  22. package/src/agent/prompt/title.txt +0 -43
  23. package/src/auth/index.ts +0 -73
  24. package/src/bun/index.ts +0 -134
  25. package/src/bus/bus-event.ts +0 -43
  26. package/src/bus/global.ts +0 -10
  27. package/src/bus/index.ts +0 -105
  28. package/src/cli/bootstrap.ts +0 -17
  29. package/src/cli/cmd/acp.ts +0 -69
  30. package/src/cli/cmd/agent.ts +0 -257
  31. package/src/cli/cmd/auth.ts +0 -400
  32. package/src/cli/cmd/cmd.ts +0 -7
  33. package/src/cli/cmd/debug/agent.ts +0 -166
  34. package/src/cli/cmd/debug/config.ts +0 -16
  35. package/src/cli/cmd/debug/file.ts +0 -97
  36. package/src/cli/cmd/debug/index.ts +0 -48
  37. package/src/cli/cmd/debug/lsp.ts +0 -52
  38. package/src/cli/cmd/debug/ripgrep.ts +0 -87
  39. package/src/cli/cmd/debug/scrap.ts +0 -16
  40. package/src/cli/cmd/debug/skill.ts +0 -16
  41. package/src/cli/cmd/debug/snapshot.ts +0 -52
  42. package/src/cli/cmd/export.ts +0 -88
  43. package/src/cli/cmd/generate.ts +0 -38
  44. package/src/cli/cmd/github.ts +0 -1548
  45. package/src/cli/cmd/import.ts +0 -98
  46. package/src/cli/cmd/mcp.ts +0 -755
  47. package/src/cli/cmd/models.ts +0 -77
  48. package/src/cli/cmd/pr.ts +0 -112
  49. package/src/cli/cmd/run.ts +0 -395
  50. package/src/cli/cmd/serve.ts +0 -20
  51. package/src/cli/cmd/session.ts +0 -135
  52. package/src/cli/cmd/stats.ts +0 -402
  53. package/src/cli/cmd/tui/app.tsx +0 -761
  54. package/src/cli/cmd/tui/attach.ts +0 -31
  55. package/src/cli/cmd/tui/component/border.tsx +0 -21
  56. package/src/cli/cmd/tui/component/dialog-agent.tsx +0 -31
  57. package/src/cli/cmd/tui/component/dialog-command.tsx +0 -148
  58. package/src/cli/cmd/tui/component/dialog-mcp.tsx +0 -86
  59. package/src/cli/cmd/tui/component/dialog-model.tsx +0 -234
  60. package/src/cli/cmd/tui/component/dialog-provider.tsx +0 -256
  61. package/src/cli/cmd/tui/component/dialog-session-list.tsx +0 -114
  62. package/src/cli/cmd/tui/component/dialog-session-rename.tsx +0 -31
  63. package/src/cli/cmd/tui/component/dialog-stash.tsx +0 -87
  64. package/src/cli/cmd/tui/component/dialog-status.tsx +0 -164
  65. package/src/cli/cmd/tui/component/dialog-tag.tsx +0 -44
  66. package/src/cli/cmd/tui/component/dialog-theme-list.tsx +0 -50
  67. package/src/cli/cmd/tui/component/logo.tsx +0 -88
  68. package/src/cli/cmd/tui/component/prompt/autocomplete.tsx +0 -632
  69. package/src/cli/cmd/tui/component/prompt/frecency.tsx +0 -89
  70. package/src/cli/cmd/tui/component/prompt/history.tsx +0 -108
  71. package/src/cli/cmd/tui/component/prompt/index.tsx +0 -1096
  72. package/src/cli/cmd/tui/component/prompt/stash.tsx +0 -101
  73. package/src/cli/cmd/tui/component/textarea-keybindings.ts +0 -73
  74. package/src/cli/cmd/tui/component/tips.tsx +0 -153
  75. package/src/cli/cmd/tui/component/todo-item.tsx +0 -32
  76. package/src/cli/cmd/tui/context/args.tsx +0 -14
  77. package/src/cli/cmd/tui/context/directory.ts +0 -13
  78. package/src/cli/cmd/tui/context/exit.tsx +0 -23
  79. package/src/cli/cmd/tui/context/helper.tsx +0 -25
  80. package/src/cli/cmd/tui/context/keybind.tsx +0 -101
  81. package/src/cli/cmd/tui/context/kv.tsx +0 -52
  82. package/src/cli/cmd/tui/context/local.tsx +0 -402
  83. package/src/cli/cmd/tui/context/prompt.tsx +0 -18
  84. package/src/cli/cmd/tui/context/route.tsx +0 -46
  85. package/src/cli/cmd/tui/context/sdk.tsx +0 -94
  86. package/src/cli/cmd/tui/context/sync.tsx +0 -427
  87. package/src/cli/cmd/tui/context/theme/aura.json +0 -69
  88. package/src/cli/cmd/tui/context/theme/ayu.json +0 -80
  89. package/src/cli/cmd/tui/context/theme/carbonfox.json +0 -248
  90. package/src/cli/cmd/tui/context/theme/catppuccin-frappe.json +0 -233
  91. package/src/cli/cmd/tui/context/theme/catppuccin-macchiato.json +0 -233
  92. package/src/cli/cmd/tui/context/theme/catppuccin.json +0 -112
  93. package/src/cli/cmd/tui/context/theme/cobalt2.json +0 -228
  94. package/src/cli/cmd/tui/context/theme/cursor.json +0 -249
  95. package/src/cli/cmd/tui/context/theme/dracula.json +0 -219
  96. package/src/cli/cmd/tui/context/theme/everforest.json +0 -241
  97. package/src/cli/cmd/tui/context/theme/flexoki.json +0 -237
  98. package/src/cli/cmd/tui/context/theme/github.json +0 -233
  99. package/src/cli/cmd/tui/context/theme/gruvbox.json +0 -95
  100. package/src/cli/cmd/tui/context/theme/kanagawa.json +0 -77
  101. package/src/cli/cmd/tui/context/theme/lucent-orng.json +0 -237
  102. package/src/cli/cmd/tui/context/theme/material.json +0 -235
  103. package/src/cli/cmd/tui/context/theme/matrix.json +0 -77
  104. package/src/cli/cmd/tui/context/theme/mercury.json +0 -252
  105. package/src/cli/cmd/tui/context/theme/monokai.json +0 -221
  106. package/src/cli/cmd/tui/context/theme/nightowl.json +0 -221
  107. package/src/cli/cmd/tui/context/theme/nord.json +0 -223
  108. package/src/cli/cmd/tui/context/theme/one-dark.json +0 -84
  109. package/src/cli/cmd/tui/context/theme/orng.json +0 -249
  110. package/src/cli/cmd/tui/context/theme/osaka-jade.json +0 -93
  111. package/src/cli/cmd/tui/context/theme/palenight.json +0 -222
  112. package/src/cli/cmd/tui/context/theme/rosepine.json +0 -234
  113. package/src/cli/cmd/tui/context/theme/solarized.json +0 -223
  114. package/src/cli/cmd/tui/context/theme/synthwave84.json +0 -226
  115. package/src/cli/cmd/tui/context/theme/tokyonight.json +0 -243
  116. package/src/cli/cmd/tui/context/theme/vercel.json +0 -245
  117. package/src/cli/cmd/tui/context/theme/vesper.json +0 -218
  118. package/src/cli/cmd/tui/context/theme/zenburn.json +0 -223
  119. package/src/cli/cmd/tui/context/theme.tsx +0 -1152
  120. package/src/cli/cmd/tui/event.ts +0 -48
  121. package/src/cli/cmd/tui/routes/home.tsx +0 -140
  122. package/src/cli/cmd/tui/routes/session/dialog-fork-from-timeline.tsx +0 -64
  123. package/src/cli/cmd/tui/routes/session/dialog-message.tsx +0 -109
  124. package/src/cli/cmd/tui/routes/session/dialog-subagent.tsx +0 -26
  125. package/src/cli/cmd/tui/routes/session/dialog-timeline.tsx +0 -47
  126. package/src/cli/cmd/tui/routes/session/footer.tsx +0 -91
  127. package/src/cli/cmd/tui/routes/session/header.tsx +0 -136
  128. package/src/cli/cmd/tui/routes/session/index.tsx +0 -2050
  129. package/src/cli/cmd/tui/routes/session/permission.tsx +0 -495
  130. package/src/cli/cmd/tui/routes/session/question.tsx +0 -435
  131. package/src/cli/cmd/tui/routes/session/sidebar.tsx +0 -312
  132. package/src/cli/cmd/tui/thread.ts +0 -165
  133. package/src/cli/cmd/tui/ui/dialog-alert.tsx +0 -57
  134. package/src/cli/cmd/tui/ui/dialog-confirm.tsx +0 -83
  135. package/src/cli/cmd/tui/ui/dialog-export-options.tsx +0 -204
  136. package/src/cli/cmd/tui/ui/dialog-help.tsx +0 -38
  137. package/src/cli/cmd/tui/ui/dialog-prompt.tsx +0 -77
  138. package/src/cli/cmd/tui/ui/dialog-select.tsx +0 -354
  139. package/src/cli/cmd/tui/ui/dialog.tsx +0 -167
  140. package/src/cli/cmd/tui/ui/link.tsx +0 -28
  141. package/src/cli/cmd/tui/ui/spinner.ts +0 -368
  142. package/src/cli/cmd/tui/ui/toast.tsx +0 -100
  143. package/src/cli/cmd/tui/util/clipboard.ts +0 -160
  144. package/src/cli/cmd/tui/util/editor.ts +0 -32
  145. package/src/cli/cmd/tui/util/signal.ts +0 -7
  146. package/src/cli/cmd/tui/util/terminal.ts +0 -114
  147. package/src/cli/cmd/tui/util/transcript.ts +0 -98
  148. package/src/cli/cmd/tui/worker.ts +0 -152
  149. package/src/cli/cmd/uninstall.ts +0 -357
  150. package/src/cli/cmd/upgrade.ts +0 -73
  151. package/src/cli/cmd/web.ts +0 -81
  152. package/src/cli/error.ts +0 -57
  153. package/src/cli/network.ts +0 -53
  154. package/src/cli/ui.ts +0 -84
  155. package/src/cli/upgrade.ts +0 -25
  156. package/src/command/index.ts +0 -131
  157. package/src/command/template/initialize.txt +0 -10
  158. package/src/command/template/review.txt +0 -99
  159. package/src/config/config.ts +0 -1255
  160. package/src/config/markdown.ts +0 -93
  161. package/src/env/index.ts +0 -26
  162. package/src/file/ignore.ts +0 -83
  163. package/src/file/index.ts +0 -411
  164. package/src/file/ripgrep.ts +0 -409
  165. package/src/file/time.ts +0 -64
  166. package/src/file/watcher.ts +0 -118
  167. package/src/flag/flag.ts +0 -54
  168. package/src/format/formatter.ts +0 -359
  169. package/src/format/index.ts +0 -137
  170. package/src/global/index.ts +0 -55
  171. package/src/id/id.ts +0 -83
  172. package/src/ide/index.ts +0 -76
  173. package/src/index.ts +0 -159
  174. package/src/installation/index.ts +0 -246
  175. package/src/lsp/client.ts +0 -252
  176. package/src/lsp/index.ts +0 -485
  177. package/src/lsp/language.ts +0 -119
  178. package/src/lsp/server.ts +0 -2046
  179. package/src/mcp/auth.ts +0 -135
  180. package/src/mcp/index.ts +0 -926
  181. package/src/mcp/oauth-callback.ts +0 -200
  182. package/src/mcp/oauth-provider.ts +0 -154
  183. package/src/patch/index.ts +0 -680
  184. package/src/permission/arity.ts +0 -163
  185. package/src/permission/index.ts +0 -210
  186. package/src/permission/next.ts +0 -269
  187. package/src/plugin/codex.ts +0 -493
  188. package/src/plugin/copilot.ts +0 -269
  189. package/src/plugin/index.ts +0 -135
  190. package/src/project/bootstrap.ts +0 -35
  191. package/src/project/instance.ts +0 -91
  192. package/src/project/project.ts +0 -320
  193. package/src/project/state.ts +0 -66
  194. package/src/project/vcs.ts +0 -76
  195. package/src/provider/auth.ts +0 -147
  196. package/src/provider/models-macro.ts +0 -11
  197. package/src/provider/models.ts +0 -112
  198. package/src/provider/provider.ts +0 -1219
  199. package/src/provider/sdk/openai-compatible/src/README.md +0 -5
  200. package/src/provider/sdk/openai-compatible/src/index.ts +0 -2
  201. package/src/provider/sdk/openai-compatible/src/openai-compatible-provider.ts +0 -100
  202. package/src/provider/sdk/openai-compatible/src/responses/convert-to-openai-responses-input.ts +0 -303
  203. package/src/provider/sdk/openai-compatible/src/responses/map-openai-responses-finish-reason.ts +0 -22
  204. package/src/provider/sdk/openai-compatible/src/responses/openai-config.ts +0 -18
  205. package/src/provider/sdk/openai-compatible/src/responses/openai-error.ts +0 -22
  206. package/src/provider/sdk/openai-compatible/src/responses/openai-responses-api-types.ts +0 -207
  207. package/src/provider/sdk/openai-compatible/src/responses/openai-responses-language-model.ts +0 -1732
  208. package/src/provider/sdk/openai-compatible/src/responses/openai-responses-prepare-tools.ts +0 -177
  209. package/src/provider/sdk/openai-compatible/src/responses/openai-responses-settings.ts +0 -1
  210. package/src/provider/sdk/openai-compatible/src/responses/tool/code-interpreter.ts +0 -88
  211. package/src/provider/sdk/openai-compatible/src/responses/tool/file-search.ts +0 -128
  212. package/src/provider/sdk/openai-compatible/src/responses/tool/image-generation.ts +0 -115
  213. package/src/provider/sdk/openai-compatible/src/responses/tool/local-shell.ts +0 -65
  214. package/src/provider/sdk/openai-compatible/src/responses/tool/web-search-preview.ts +0 -104
  215. package/src/provider/sdk/openai-compatible/src/responses/tool/web-search.ts +0 -103
  216. package/src/provider/transform.ts +0 -724
  217. package/src/pty/index.ts +0 -229
  218. package/src/question/index.ts +0 -171
  219. package/src/scheduler/index.ts +0 -61
  220. package/src/server/error.ts +0 -36
  221. package/src/server/mdns.ts +0 -59
  222. package/src/server/routes/config.ts +0 -92
  223. package/src/server/routes/experimental.ts +0 -157
  224. package/src/server/routes/file.ts +0 -197
  225. package/src/server/routes/global.ts +0 -135
  226. package/src/server/routes/mcp.ts +0 -225
  227. package/src/server/routes/permission.ts +0 -68
  228. package/src/server/routes/project.ts +0 -82
  229. package/src/server/routes/provider.ts +0 -165
  230. package/src/server/routes/pty.ts +0 -169
  231. package/src/server/routes/question.ts +0 -98
  232. package/src/server/routes/session.ts +0 -935
  233. package/src/server/routes/tui.ts +0 -379
  234. package/src/server/server.ts +0 -578
  235. package/src/session/compaction.ts +0 -225
  236. package/src/session/index.ts +0 -488
  237. package/src/session/llm.ts +0 -279
  238. package/src/session/message-v2.ts +0 -702
  239. package/src/session/message.ts +0 -189
  240. package/src/session/processor.ts +0 -406
  241. package/src/session/prompt/anthropic-20250930.txt +0 -166
  242. package/src/session/prompt/anthropic.txt +0 -105
  243. package/src/session/prompt/anthropic_spoof.txt +0 -1
  244. package/src/session/prompt/beast.txt +0 -147
  245. package/src/session/prompt/build-switch.txt +0 -5
  246. package/src/session/prompt/codex.txt +0 -73
  247. package/src/session/prompt/codex_header.txt +0 -72
  248. package/src/session/prompt/copilot-gpt-5.txt +0 -143
  249. package/src/session/prompt/gemini.txt +0 -155
  250. package/src/session/prompt/max-steps.txt +0 -16
  251. package/src/session/prompt/plan-reminder-anthropic.txt +0 -67
  252. package/src/session/prompt/plan.txt +0 -26
  253. package/src/session/prompt/qwen.txt +0 -109
  254. package/src/session/prompt.ts +0 -1805
  255. package/src/session/retry.ts +0 -90
  256. package/src/session/revert.ts +0 -108
  257. package/src/session/status.ts +0 -76
  258. package/src/session/summary.ts +0 -150
  259. package/src/session/system.ts +0 -138
  260. package/src/session/todo.ts +0 -37
  261. package/src/share/share-next.ts +0 -194
  262. package/src/share/share.ts +0 -87
  263. package/src/shell/shell.ts +0 -67
  264. package/src/skill/index.ts +0 -1
  265. package/src/skill/skill.ts +0 -136
  266. package/src/snapshot/index.ts +0 -236
  267. package/src/storage/storage.ts +0 -227
  268. package/src/tool/apply_patch.ts +0 -277
  269. package/src/tool/apply_patch.txt +0 -1
  270. package/src/tool/bash.ts +0 -258
  271. package/src/tool/bash.txt +0 -115
  272. package/src/tool/batch.ts +0 -175
  273. package/src/tool/batch.txt +0 -24
  274. package/src/tool/codesearch.ts +0 -132
  275. package/src/tool/codesearch.txt +0 -12
  276. package/src/tool/edit.ts +0 -645
  277. package/src/tool/edit.txt +0 -10
  278. package/src/tool/external-directory.ts +0 -32
  279. package/src/tool/glob.ts +0 -77
  280. package/src/tool/glob.txt +0 -6
  281. package/src/tool/grep.ts +0 -154
  282. package/src/tool/grep.txt +0 -8
  283. package/src/tool/invalid.ts +0 -17
  284. package/src/tool/ls.ts +0 -121
  285. package/src/tool/ls.txt +0 -1
  286. package/src/tool/lsp.ts +0 -96
  287. package/src/tool/lsp.txt +0 -19
  288. package/src/tool/multiedit.ts +0 -46
  289. package/src/tool/multiedit.txt +0 -41
  290. package/src/tool/plan-enter.txt +0 -14
  291. package/src/tool/plan-exit.txt +0 -13
  292. package/src/tool/plan.ts +0 -130
  293. package/src/tool/question.ts +0 -33
  294. package/src/tool/question.txt +0 -10
  295. package/src/tool/read.ts +0 -202
  296. package/src/tool/read.txt +0 -12
  297. package/src/tool/registry.ts +0 -158
  298. package/src/tool/skill.ts +0 -75
  299. package/src/tool/task.ts +0 -188
  300. package/src/tool/task.txt +0 -60
  301. package/src/tool/todo.ts +0 -53
  302. package/src/tool/todoread.txt +0 -14
  303. package/src/tool/todowrite.txt +0 -167
  304. package/src/tool/tool.ts +0 -88
  305. package/src/tool/truncation.ts +0 -106
  306. package/src/tool/webfetch.ts +0 -182
  307. package/src/tool/webfetch.txt +0 -13
  308. package/src/tool/websearch.ts +0 -150
  309. package/src/tool/websearch.txt +0 -14
  310. package/src/tool/write.ts +0 -80
  311. package/src/tool/write.txt +0 -8
  312. package/src/util/archive.ts +0 -16
  313. package/src/util/color.ts +0 -19
  314. package/src/util/context.ts +0 -25
  315. package/src/util/defer.ts +0 -12
  316. package/src/util/eventloop.ts +0 -20
  317. package/src/util/filesystem.ts +0 -93
  318. package/src/util/fn.ts +0 -11
  319. package/src/util/format.ts +0 -20
  320. package/src/util/iife.ts +0 -3
  321. package/src/util/keybind.ts +0 -103
  322. package/src/util/lazy.ts +0 -18
  323. package/src/util/locale.ts +0 -81
  324. package/src/util/lock.ts +0 -98
  325. package/src/util/log.ts +0 -180
  326. package/src/util/queue.ts +0 -32
  327. package/src/util/rpc.ts +0 -66
  328. package/src/util/scrap.ts +0 -10
  329. package/src/util/signal.ts +0 -12
  330. package/src/util/timeout.ts +0 -14
  331. package/src/util/token.ts +0 -7
  332. package/src/util/wildcard.ts +0 -56
  333. package/src/worktree/index.ts +0 -217
  334. package/sst-env.d.ts +0 -9
  335. package/test/acp/event-subscription.test.ts +0 -436
  336. package/test/acp/system-prompt.test.ts +0 -262
  337. package/test/agent/agent.test.ts +0 -638
  338. package/test/bun.test.ts +0 -53
  339. package/test/cli/github-action.test.ts +0 -129
  340. package/test/cli/github-remote.test.ts +0 -80
  341. package/test/cli/tui/transcript.test.ts +0 -297
  342. package/test/config/agent-color.test.ts +0 -66
  343. package/test/config/config.test.ts +0 -1414
  344. package/test/config/fixtures/empty-frontmatter.md +0 -4
  345. package/test/config/fixtures/frontmatter.md +0 -28
  346. package/test/config/fixtures/no-frontmatter.md +0 -1
  347. package/test/config/markdown.test.ts +0 -192
  348. package/test/file/ignore.test.ts +0 -10
  349. package/test/file/path-traversal.test.ts +0 -198
  350. package/test/fixture/fixture.ts +0 -45
  351. package/test/fixture/lsp/fake-lsp-server.js +0 -77
  352. package/test/ide/ide.test.ts +0 -82
  353. package/test/keybind.test.ts +0 -421
  354. package/test/lsp/client.test.ts +0 -95
  355. package/test/mcp/headers.test.ts +0 -153
  356. package/test/mcp/oauth-browser.test.ts +0 -261
  357. package/test/patch/patch.test.ts +0 -348
  358. package/test/permission/arity.test.ts +0 -33
  359. package/test/permission/next.test.ts +0 -652
  360. package/test/permission-task.test.ts +0 -319
  361. package/test/plugin/codex.test.ts +0 -123
  362. package/test/preload.ts +0 -65
  363. package/test/project/project.test.ts +0 -120
  364. package/test/provider/amazon-bedrock.test.ts +0 -268
  365. package/test/provider/gitlab-duo.test.ts +0 -286
  366. package/test/provider/provider.test.ts +0 -2149
  367. package/test/provider/transform.test.ts +0 -1596
  368. package/test/question/question.test.ts +0 -300
  369. package/test/scheduler.test.ts +0 -73
  370. package/test/server/session-list.test.ts +0 -39
  371. package/test/server/session-select.test.ts +0 -78
  372. package/test/session/compaction.test.ts +0 -293
  373. package/test/session/llm.test.ts +0 -90
  374. package/test/session/message-v2.test.ts +0 -662
  375. package/test/session/retry.test.ts +0 -131
  376. package/test/session/revert-compact.test.ts +0 -285
  377. package/test/session/session.test.ts +0 -71
  378. package/test/skill/skill.test.ts +0 -185
  379. package/test/snapshot/snapshot.test.ts +0 -939
  380. package/test/tool/__snapshots__/tool.test.ts.snap +0 -9
  381. package/test/tool/apply_patch.test.ts +0 -515
  382. package/test/tool/bash.test.ts +0 -320
  383. package/test/tool/external-directory.test.ts +0 -126
  384. package/test/tool/fixtures/large-image.png +0 -0
  385. package/test/tool/fixtures/models-api.json +0 -33453
  386. package/test/tool/grep.test.ts +0 -109
  387. package/test/tool/question.test.ts +0 -105
  388. package/test/tool/read.test.ts +0 -332
  389. package/test/tool/registry.test.ts +0 -76
  390. package/test/tool/truncation.test.ts +0 -159
  391. package/test/util/filesystem.test.ts +0 -39
  392. package/test/util/format.test.ts +0 -59
  393. package/test/util/iife.test.ts +0 -36
  394. package/test/util/lazy.test.ts +0 -50
  395. package/test/util/lock.test.ts +0 -72
  396. package/test/util/timeout.test.ts +0 -21
  397. package/test/util/wildcard.test.ts +0 -75
  398. package/tsconfig.json +0 -16
@@ -1,1732 +0,0 @@
1
- import {
2
- APICallError,
3
- type LanguageModelV2,
4
- type LanguageModelV2CallWarning,
5
- type LanguageModelV2Content,
6
- type LanguageModelV2FinishReason,
7
- type LanguageModelV2ProviderDefinedTool,
8
- type LanguageModelV2StreamPart,
9
- type LanguageModelV2Usage,
10
- type SharedV2ProviderMetadata,
11
- } from "@ai-sdk/provider"
12
- import {
13
- combineHeaders,
14
- createEventSourceResponseHandler,
15
- createJsonResponseHandler,
16
- generateId,
17
- parseProviderOptions,
18
- type ParseResult,
19
- postJsonToApi,
20
- } from "@ai-sdk/provider-utils"
21
- import { z } from "zod/v4"
22
- import type { OpenAIConfig } from "./openai-config"
23
- import { openaiFailedResponseHandler } from "./openai-error"
24
- import { codeInterpreterInputSchema, codeInterpreterOutputSchema } from "./tool/code-interpreter"
25
- import { fileSearchOutputSchema } from "./tool/file-search"
26
- import { imageGenerationOutputSchema } from "./tool/image-generation"
27
- import { convertToOpenAIResponsesInput } from "./convert-to-openai-responses-input"
28
- import { mapOpenAIResponseFinishReason } from "./map-openai-responses-finish-reason"
29
- import type { OpenAIResponsesIncludeOptions, OpenAIResponsesIncludeValue } from "./openai-responses-api-types"
30
- import { prepareResponsesTools } from "./openai-responses-prepare-tools"
31
- import type { OpenAIResponsesModelId } from "./openai-responses-settings"
32
- import { localShellInputSchema } from "./tool/local-shell"
33
-
34
- const webSearchCallItem = z.object({
35
- type: z.literal("web_search_call"),
36
- id: z.string(),
37
- status: z.string(),
38
- action: z
39
- .discriminatedUnion("type", [
40
- z.object({
41
- type: z.literal("search"),
42
- query: z.string().nullish(),
43
- }),
44
- z.object({
45
- type: z.literal("open_page"),
46
- url: z.string(),
47
- }),
48
- z.object({
49
- type: z.literal("find"),
50
- url: z.string(),
51
- pattern: z.string(),
52
- }),
53
- ])
54
- .nullish(),
55
- })
56
-
57
- const fileSearchCallItem = z.object({
58
- type: z.literal("file_search_call"),
59
- id: z.string(),
60
- queries: z.array(z.string()),
61
- results: z
62
- .array(
63
- z.object({
64
- attributes: z.record(z.string(), z.unknown()),
65
- file_id: z.string(),
66
- filename: z.string(),
67
- score: z.number(),
68
- text: z.string(),
69
- }),
70
- )
71
- .nullish(),
72
- })
73
-
74
- const codeInterpreterCallItem = z.object({
75
- type: z.literal("code_interpreter_call"),
76
- id: z.string(),
77
- code: z.string().nullable(),
78
- container_id: z.string(),
79
- outputs: z
80
- .array(
81
- z.discriminatedUnion("type", [
82
- z.object({ type: z.literal("logs"), logs: z.string() }),
83
- z.object({ type: z.literal("image"), url: z.string() }),
84
- ]),
85
- )
86
- .nullable(),
87
- })
88
-
89
- const localShellCallItem = z.object({
90
- type: z.literal("local_shell_call"),
91
- id: z.string(),
92
- call_id: z.string(),
93
- action: z.object({
94
- type: z.literal("exec"),
95
- command: z.array(z.string()),
96
- timeout_ms: z.number().optional(),
97
- user: z.string().optional(),
98
- working_directory: z.string().optional(),
99
- env: z.record(z.string(), z.string()).optional(),
100
- }),
101
- })
102
-
103
- const imageGenerationCallItem = z.object({
104
- type: z.literal("image_generation_call"),
105
- id: z.string(),
106
- result: z.string(),
107
- })
108
-
109
- /**
110
- * `top_logprobs` request body argument can be set to an integer between
111
- * 0 and 20 specifying the number of most likely tokens to return at each
112
- * token position, each with an associated log probability.
113
- *
114
- * @see https://platform.openai.com/docs/api-reference/responses/create#responses_create-top_logprobs
115
- */
116
- const TOP_LOGPROBS_MAX = 20
117
-
118
- const LOGPROBS_SCHEMA = z.array(
119
- z.object({
120
- token: z.string(),
121
- logprob: z.number(),
122
- top_logprobs: z.array(
123
- z.object({
124
- token: z.string(),
125
- logprob: z.number(),
126
- }),
127
- ),
128
- }),
129
- )
130
-
131
- export class OpenAIResponsesLanguageModel implements LanguageModelV2 {
132
- readonly specificationVersion = "v2"
133
-
134
- readonly modelId: OpenAIResponsesModelId
135
-
136
- private readonly config: OpenAIConfig
137
-
138
- constructor(modelId: OpenAIResponsesModelId, config: OpenAIConfig) {
139
- this.modelId = modelId
140
- this.config = config
141
- }
142
-
143
- readonly supportedUrls: Record<string, RegExp[]> = {
144
- "image/*": [/^https?:\/\/.*$/],
145
- "application/pdf": [/^https?:\/\/.*$/],
146
- }
147
-
148
- get provider(): string {
149
- return this.config.provider
150
- }
151
-
152
- private async getArgs({
153
- maxOutputTokens,
154
- temperature,
155
- stopSequences,
156
- topP,
157
- topK,
158
- presencePenalty,
159
- frequencyPenalty,
160
- seed,
161
- prompt,
162
- providerOptions,
163
- tools,
164
- toolChoice,
165
- responseFormat,
166
- }: Parameters<LanguageModelV2["doGenerate"]>[0]) {
167
- const warnings: LanguageModelV2CallWarning[] = []
168
- const modelConfig = getResponsesModelConfig(this.modelId)
169
-
170
- if (topK != null) {
171
- warnings.push({ type: "unsupported-setting", setting: "topK" })
172
- }
173
-
174
- if (seed != null) {
175
- warnings.push({ type: "unsupported-setting", setting: "seed" })
176
- }
177
-
178
- if (presencePenalty != null) {
179
- warnings.push({
180
- type: "unsupported-setting",
181
- setting: "presencePenalty",
182
- })
183
- }
184
-
185
- if (frequencyPenalty != null) {
186
- warnings.push({
187
- type: "unsupported-setting",
188
- setting: "frequencyPenalty",
189
- })
190
- }
191
-
192
- if (stopSequences != null) {
193
- warnings.push({ type: "unsupported-setting", setting: "stopSequences" })
194
- }
195
-
196
- const openaiOptions = await parseProviderOptions({
197
- provider: "openai",
198
- providerOptions,
199
- schema: openaiResponsesProviderOptionsSchema,
200
- })
201
-
202
- const { input, warnings: inputWarnings } = await convertToOpenAIResponsesInput({
203
- prompt,
204
- systemMessageMode: modelConfig.systemMessageMode,
205
- fileIdPrefixes: this.config.fileIdPrefixes,
206
- store: openaiOptions?.store ?? true,
207
- hasLocalShellTool: hasOpenAITool("openai.local_shell"),
208
- })
209
-
210
- warnings.push(...inputWarnings)
211
-
212
- const strictJsonSchema = openaiOptions?.strictJsonSchema ?? false
213
-
214
- let include: OpenAIResponsesIncludeOptions = openaiOptions?.include
215
-
216
- function addInclude(key: OpenAIResponsesIncludeValue) {
217
- include = include != null ? [...include, key] : [key]
218
- }
219
-
220
- function hasOpenAITool(id: string) {
221
- return tools?.find((tool) => tool.type === "provider-defined" && tool.id === id) != null
222
- }
223
-
224
- // when logprobs are requested, automatically include them:
225
- const topLogprobs =
226
- typeof openaiOptions?.logprobs === "number"
227
- ? openaiOptions?.logprobs
228
- : openaiOptions?.logprobs === true
229
- ? TOP_LOGPROBS_MAX
230
- : undefined
231
-
232
- if (topLogprobs) {
233
- addInclude("message.output_text.logprobs")
234
- }
235
-
236
- // when a web search tool is present, automatically include the sources:
237
- const webSearchToolName = (
238
- tools?.find(
239
- (tool) =>
240
- tool.type === "provider-defined" &&
241
- (tool.id === "openai.web_search" || tool.id === "openai.web_search_preview"),
242
- ) as LanguageModelV2ProviderDefinedTool | undefined
243
- )?.name
244
-
245
- if (webSearchToolName) {
246
- addInclude("web_search_call.action.sources")
247
- }
248
-
249
- // when a code interpreter tool is present, automatically include the outputs:
250
- if (hasOpenAITool("openai.code_interpreter")) {
251
- addInclude("code_interpreter_call.outputs")
252
- }
253
-
254
- const baseArgs = {
255
- model: this.modelId,
256
- input,
257
- temperature,
258
- top_p: topP,
259
- max_output_tokens: maxOutputTokens,
260
-
261
- ...((responseFormat?.type === "json" || openaiOptions?.textVerbosity) && {
262
- text: {
263
- ...(responseFormat?.type === "json" && {
264
- format:
265
- responseFormat.schema != null
266
- ? {
267
- type: "json_schema",
268
- strict: strictJsonSchema,
269
- name: responseFormat.name ?? "response",
270
- description: responseFormat.description,
271
- schema: responseFormat.schema,
272
- }
273
- : { type: "json_object" },
274
- }),
275
- ...(openaiOptions?.textVerbosity && {
276
- verbosity: openaiOptions.textVerbosity,
277
- }),
278
- },
279
- }),
280
-
281
- // provider options:
282
- max_tool_calls: openaiOptions?.maxToolCalls,
283
- metadata: openaiOptions?.metadata,
284
- parallel_tool_calls: openaiOptions?.parallelToolCalls,
285
- previous_response_id: openaiOptions?.previousResponseId,
286
- store: openaiOptions?.store,
287
- user: openaiOptions?.user,
288
- instructions: openaiOptions?.instructions,
289
- service_tier: openaiOptions?.serviceTier,
290
- include,
291
- prompt_cache_key: openaiOptions?.promptCacheKey,
292
- safety_identifier: openaiOptions?.safetyIdentifier,
293
- top_logprobs: topLogprobs,
294
-
295
- // model-specific settings:
296
- ...(modelConfig.isReasoningModel &&
297
- (openaiOptions?.reasoningEffort != null || openaiOptions?.reasoningSummary != null) && {
298
- reasoning: {
299
- ...(openaiOptions?.reasoningEffort != null && {
300
- effort: openaiOptions.reasoningEffort,
301
- }),
302
- ...(openaiOptions?.reasoningSummary != null && {
303
- summary: openaiOptions.reasoningSummary,
304
- }),
305
- },
306
- }),
307
- ...(modelConfig.requiredAutoTruncation && {
308
- truncation: "auto",
309
- }),
310
- }
311
-
312
- if (modelConfig.isReasoningModel) {
313
- // remove unsupported settings for reasoning models
314
- // see https://platform.openai.com/docs/guides/reasoning#limitations
315
- if (baseArgs.temperature != null) {
316
- baseArgs.temperature = undefined
317
- warnings.push({
318
- type: "unsupported-setting",
319
- setting: "temperature",
320
- details: "temperature is not supported for reasoning models",
321
- })
322
- }
323
-
324
- if (baseArgs.top_p != null) {
325
- baseArgs.top_p = undefined
326
- warnings.push({
327
- type: "unsupported-setting",
328
- setting: "topP",
329
- details: "topP is not supported for reasoning models",
330
- })
331
- }
332
- } else {
333
- if (openaiOptions?.reasoningEffort != null) {
334
- warnings.push({
335
- type: "unsupported-setting",
336
- setting: "reasoningEffort",
337
- details: "reasoningEffort is not supported for non-reasoning models",
338
- })
339
- }
340
-
341
- if (openaiOptions?.reasoningSummary != null) {
342
- warnings.push({
343
- type: "unsupported-setting",
344
- setting: "reasoningSummary",
345
- details: "reasoningSummary is not supported for non-reasoning models",
346
- })
347
- }
348
- }
349
-
350
- // Validate flex processing support
351
- if (openaiOptions?.serviceTier === "flex" && !modelConfig.supportsFlexProcessing) {
352
- warnings.push({
353
- type: "unsupported-setting",
354
- setting: "serviceTier",
355
- details: "flex processing is only available for o3, o4-mini, and gpt-5 models",
356
- })
357
- // Remove from args if not supported
358
- delete (baseArgs as any).service_tier
359
- }
360
-
361
- // Validate priority processing support
362
- if (openaiOptions?.serviceTier === "priority" && !modelConfig.supportsPriorityProcessing) {
363
- warnings.push({
364
- type: "unsupported-setting",
365
- setting: "serviceTier",
366
- details:
367
- "priority processing is only available for supported models (gpt-4, gpt-5, gpt-5-mini, o3, o4-mini) and requires Enterprise access. gpt-5-nano is not supported",
368
- })
369
- // Remove from args if not supported
370
- delete (baseArgs as any).service_tier
371
- }
372
-
373
- const {
374
- tools: openaiTools,
375
- toolChoice: openaiToolChoice,
376
- toolWarnings,
377
- } = prepareResponsesTools({
378
- tools,
379
- toolChoice,
380
- strictJsonSchema,
381
- })
382
-
383
- return {
384
- webSearchToolName,
385
- args: {
386
- ...baseArgs,
387
- tools: openaiTools,
388
- tool_choice: openaiToolChoice,
389
- },
390
- warnings: [...warnings, ...toolWarnings],
391
- }
392
- }
393
-
394
- async doGenerate(
395
- options: Parameters<LanguageModelV2["doGenerate"]>[0],
396
- ): Promise<Awaited<ReturnType<LanguageModelV2["doGenerate"]>>> {
397
- const { args: body, warnings, webSearchToolName } = await this.getArgs(options)
398
- const url = this.config.url({
399
- path: "/responses",
400
- modelId: this.modelId,
401
- })
402
-
403
- const {
404
- responseHeaders,
405
- value: response,
406
- rawValue: rawResponse,
407
- } = await postJsonToApi({
408
- url,
409
- headers: combineHeaders(this.config.headers(), options.headers),
410
- body,
411
- failedResponseHandler: openaiFailedResponseHandler,
412
- successfulResponseHandler: createJsonResponseHandler(
413
- z.object({
414
- id: z.string(),
415
- created_at: z.number(),
416
- error: z
417
- .object({
418
- code: z.string(),
419
- message: z.string(),
420
- })
421
- .nullish(),
422
- model: z.string(),
423
- output: z.array(
424
- z.discriminatedUnion("type", [
425
- z.object({
426
- type: z.literal("message"),
427
- role: z.literal("assistant"),
428
- id: z.string(),
429
- content: z.array(
430
- z.object({
431
- type: z.literal("output_text"),
432
- text: z.string(),
433
- logprobs: LOGPROBS_SCHEMA.nullish(),
434
- annotations: z.array(
435
- z.discriminatedUnion("type", [
436
- z.object({
437
- type: z.literal("url_citation"),
438
- start_index: z.number(),
439
- end_index: z.number(),
440
- url: z.string(),
441
- title: z.string(),
442
- }),
443
- z.object({
444
- type: z.literal("file_citation"),
445
- file_id: z.string(),
446
- filename: z.string().nullish(),
447
- index: z.number().nullish(),
448
- start_index: z.number().nullish(),
449
- end_index: z.number().nullish(),
450
- quote: z.string().nullish(),
451
- }),
452
- z.object({
453
- type: z.literal("container_file_citation"),
454
- }),
455
- ]),
456
- ),
457
- }),
458
- ),
459
- }),
460
- webSearchCallItem,
461
- fileSearchCallItem,
462
- codeInterpreterCallItem,
463
- imageGenerationCallItem,
464
- localShellCallItem,
465
- z.object({
466
- type: z.literal("function_call"),
467
- call_id: z.string(),
468
- name: z.string(),
469
- arguments: z.string(),
470
- id: z.string(),
471
- }),
472
- z.object({
473
- type: z.literal("computer_call"),
474
- id: z.string(),
475
- status: z.string().optional(),
476
- }),
477
- z.object({
478
- type: z.literal("reasoning"),
479
- id: z.string(),
480
- encrypted_content: z.string().nullish(),
481
- summary: z.array(
482
- z.object({
483
- type: z.literal("summary_text"),
484
- text: z.string(),
485
- }),
486
- ),
487
- }),
488
- ]),
489
- ),
490
- service_tier: z.string().nullish(),
491
- incomplete_details: z.object({ reason: z.string() }).nullish(),
492
- usage: usageSchema,
493
- }),
494
- ),
495
- abortSignal: options.abortSignal,
496
- fetch: this.config.fetch,
497
- })
498
-
499
- if (response.error) {
500
- throw new APICallError({
501
- message: response.error.message,
502
- url,
503
- requestBodyValues: body,
504
- statusCode: 400,
505
- responseHeaders,
506
- responseBody: rawResponse as string,
507
- isRetryable: false,
508
- })
509
- }
510
-
511
- const content: Array<LanguageModelV2Content> = []
512
- const logprobs: Array<z.infer<typeof LOGPROBS_SCHEMA>> = []
513
-
514
- // flag that checks if there have been client-side tool calls (not executed by openai)
515
- let hasFunctionCall = false
516
-
517
- // map response content to content array
518
- for (const part of response.output) {
519
- switch (part.type) {
520
- case "reasoning": {
521
- // when there are no summary parts, we need to add an empty reasoning part:
522
- if (part.summary.length === 0) {
523
- part.summary.push({ type: "summary_text", text: "" })
524
- }
525
-
526
- for (const summary of part.summary) {
527
- content.push({
528
- type: "reasoning" as const,
529
- text: summary.text,
530
- providerMetadata: {
531
- openai: {
532
- itemId: part.id,
533
- reasoningEncryptedContent: part.encrypted_content ?? null,
534
- },
535
- },
536
- })
537
- }
538
- break
539
- }
540
-
541
- case "image_generation_call": {
542
- content.push({
543
- type: "tool-call",
544
- toolCallId: part.id,
545
- toolName: "image_generation",
546
- input: "{}",
547
- providerExecuted: true,
548
- })
549
-
550
- content.push({
551
- type: "tool-result",
552
- toolCallId: part.id,
553
- toolName: "image_generation",
554
- result: {
555
- result: part.result,
556
- } satisfies z.infer<typeof imageGenerationOutputSchema>,
557
- providerExecuted: true,
558
- })
559
-
560
- break
561
- }
562
-
563
- case "local_shell_call": {
564
- content.push({
565
- type: "tool-call",
566
- toolCallId: part.call_id,
567
- toolName: "local_shell",
568
- input: JSON.stringify({ action: part.action } satisfies z.infer<typeof localShellInputSchema>),
569
- providerMetadata: {
570
- openai: {
571
- itemId: part.id,
572
- },
573
- },
574
- })
575
-
576
- break
577
- }
578
-
579
- case "message": {
580
- for (const contentPart of part.content) {
581
- if (options.providerOptions?.openai?.logprobs && contentPart.logprobs) {
582
- logprobs.push(contentPart.logprobs)
583
- }
584
-
585
- content.push({
586
- type: "text",
587
- text: contentPart.text,
588
- providerMetadata: {
589
- openai: {
590
- itemId: part.id,
591
- },
592
- },
593
- })
594
-
595
- for (const annotation of contentPart.annotations) {
596
- if (annotation.type === "url_citation") {
597
- content.push({
598
- type: "source",
599
- sourceType: "url",
600
- id: this.config.generateId?.() ?? generateId(),
601
- url: annotation.url,
602
- title: annotation.title,
603
- })
604
- } else if (annotation.type === "file_citation") {
605
- content.push({
606
- type: "source",
607
- sourceType: "document",
608
- id: this.config.generateId?.() ?? generateId(),
609
- mediaType: "text/plain",
610
- title: annotation.quote ?? annotation.filename ?? "Document",
611
- filename: annotation.filename ?? annotation.file_id,
612
- })
613
- }
614
- }
615
- }
616
-
617
- break
618
- }
619
-
620
- case "function_call": {
621
- hasFunctionCall = true
622
-
623
- content.push({
624
- type: "tool-call",
625
- toolCallId: part.call_id,
626
- toolName: part.name,
627
- input: part.arguments,
628
- providerMetadata: {
629
- openai: {
630
- itemId: part.id,
631
- },
632
- },
633
- })
634
- break
635
- }
636
-
637
- case "web_search_call": {
638
- content.push({
639
- type: "tool-call",
640
- toolCallId: part.id,
641
- toolName: webSearchToolName ?? "web_search",
642
- input: JSON.stringify({ action: part.action }),
643
- providerExecuted: true,
644
- })
645
-
646
- content.push({
647
- type: "tool-result",
648
- toolCallId: part.id,
649
- toolName: webSearchToolName ?? "web_search",
650
- result: { status: part.status },
651
- providerExecuted: true,
652
- })
653
-
654
- break
655
- }
656
-
657
- case "computer_call": {
658
- content.push({
659
- type: "tool-call",
660
- toolCallId: part.id,
661
- toolName: "computer_use",
662
- input: "",
663
- providerExecuted: true,
664
- })
665
-
666
- content.push({
667
- type: "tool-result",
668
- toolCallId: part.id,
669
- toolName: "computer_use",
670
- result: {
671
- type: "computer_use_tool_result",
672
- status: part.status || "completed",
673
- },
674
- providerExecuted: true,
675
- })
676
- break
677
- }
678
-
679
- case "file_search_call": {
680
- content.push({
681
- type: "tool-call",
682
- toolCallId: part.id,
683
- toolName: "file_search",
684
- input: "{}",
685
- providerExecuted: true,
686
- })
687
-
688
- content.push({
689
- type: "tool-result",
690
- toolCallId: part.id,
691
- toolName: "file_search",
692
- result: {
693
- queries: part.queries,
694
- results:
695
- part.results?.map((result) => ({
696
- attributes: result.attributes,
697
- fileId: result.file_id,
698
- filename: result.filename,
699
- score: result.score,
700
- text: result.text,
701
- })) ?? null,
702
- } satisfies z.infer<typeof fileSearchOutputSchema>,
703
- providerExecuted: true,
704
- })
705
- break
706
- }
707
-
708
- case "code_interpreter_call": {
709
- content.push({
710
- type: "tool-call",
711
- toolCallId: part.id,
712
- toolName: "code_interpreter",
713
- input: JSON.stringify({
714
- code: part.code,
715
- containerId: part.container_id,
716
- } satisfies z.infer<typeof codeInterpreterInputSchema>),
717
- providerExecuted: true,
718
- })
719
-
720
- content.push({
721
- type: "tool-result",
722
- toolCallId: part.id,
723
- toolName: "code_interpreter",
724
- result: {
725
- outputs: part.outputs,
726
- } satisfies z.infer<typeof codeInterpreterOutputSchema>,
727
- providerExecuted: true,
728
- })
729
- break
730
- }
731
- }
732
- }
733
-
734
- const providerMetadata: SharedV2ProviderMetadata = {
735
- openai: { responseId: response.id },
736
- }
737
-
738
- if (logprobs.length > 0) {
739
- providerMetadata.openai.logprobs = logprobs
740
- }
741
-
742
- if (typeof response.service_tier === "string") {
743
- providerMetadata.openai.serviceTier = response.service_tier
744
- }
745
-
746
- return {
747
- content,
748
- finishReason: mapOpenAIResponseFinishReason({
749
- finishReason: response.incomplete_details?.reason,
750
- hasFunctionCall,
751
- }),
752
- usage: {
753
- inputTokens: response.usage.input_tokens,
754
- outputTokens: response.usage.output_tokens,
755
- totalTokens: response.usage.input_tokens + response.usage.output_tokens,
756
- reasoningTokens: response.usage.output_tokens_details?.reasoning_tokens ?? undefined,
757
- cachedInputTokens: response.usage.input_tokens_details?.cached_tokens ?? undefined,
758
- },
759
- request: { body },
760
- response: {
761
- id: response.id,
762
- timestamp: new Date(response.created_at * 1000),
763
- modelId: response.model,
764
- headers: responseHeaders,
765
- body: rawResponse,
766
- },
767
- providerMetadata,
768
- warnings,
769
- }
770
- }
771
-
772
- async doStream(
773
- options: Parameters<LanguageModelV2["doStream"]>[0],
774
- ): Promise<Awaited<ReturnType<LanguageModelV2["doStream"]>>> {
775
- const { args: body, warnings, webSearchToolName } = await this.getArgs(options)
776
-
777
- const { responseHeaders, value: response } = await postJsonToApi({
778
- url: this.config.url({
779
- path: "/responses",
780
- modelId: this.modelId,
781
- }),
782
- headers: combineHeaders(this.config.headers(), options.headers),
783
- body: {
784
- ...body,
785
- stream: true,
786
- },
787
- failedResponseHandler: openaiFailedResponseHandler,
788
- successfulResponseHandler: createEventSourceResponseHandler(openaiResponsesChunkSchema),
789
- abortSignal: options.abortSignal,
790
- fetch: this.config.fetch,
791
- })
792
-
793
- const self = this
794
-
795
- let finishReason: LanguageModelV2FinishReason = "unknown"
796
- const usage: LanguageModelV2Usage = {
797
- inputTokens: undefined,
798
- outputTokens: undefined,
799
- totalTokens: undefined,
800
- }
801
- const logprobs: Array<z.infer<typeof LOGPROBS_SCHEMA>> = []
802
- let responseId: string | null = null
803
- const ongoingToolCalls: Record<
804
- number,
805
- | {
806
- toolName: string
807
- toolCallId: string
808
- codeInterpreter?: {
809
- containerId: string
810
- }
811
- }
812
- | undefined
813
- > = {}
814
-
815
- // flag that checks if there have been client-side tool calls (not executed by openai)
816
- let hasFunctionCall = false
817
-
818
- // Track reasoning by output_index instead of item_id
819
- // GitHub Copilot rotates encrypted item IDs on every event
820
- const activeReasoning: Record<
821
- number,
822
- {
823
- canonicalId: string // the item.id from output_item.added
824
- encryptedContent?: string | null
825
- summaryParts: number[]
826
- }
827
- > = {}
828
-
829
- // Track current active reasoning output_index for correlating summary events
830
- let currentReasoningOutputIndex: number | null = null
831
-
832
- // Track a stable text part id for the current assistant message.
833
- // Copilot may change item_id across text deltas; normalize to one id.
834
- let currentTextId: string | null = null
835
-
836
- let serviceTier: string | undefined
837
-
838
- return {
839
- stream: response.pipeThrough(
840
- new TransformStream<ParseResult<z.infer<typeof openaiResponsesChunkSchema>>, LanguageModelV2StreamPart>({
841
- start(controller) {
842
- controller.enqueue({ type: "stream-start", warnings })
843
- },
844
-
845
- transform(chunk, controller) {
846
- if (options.includeRawChunks) {
847
- controller.enqueue({ type: "raw", rawValue: chunk.rawValue })
848
- }
849
-
850
- // handle failed chunk parsing / validation:
851
- if (!chunk.success) {
852
- finishReason = "error"
853
- controller.enqueue({ type: "error", error: chunk.error })
854
- return
855
- }
856
-
857
- const value = chunk.value
858
-
859
- if (isResponseOutputItemAddedChunk(value)) {
860
- if (value.item.type === "function_call") {
861
- ongoingToolCalls[value.output_index] = {
862
- toolName: value.item.name,
863
- toolCallId: value.item.call_id,
864
- }
865
-
866
- controller.enqueue({
867
- type: "tool-input-start",
868
- id: value.item.call_id,
869
- toolName: value.item.name,
870
- })
871
- } else if (value.item.type === "web_search_call") {
872
- ongoingToolCalls[value.output_index] = {
873
- toolName: webSearchToolName ?? "web_search",
874
- toolCallId: value.item.id,
875
- }
876
-
877
- controller.enqueue({
878
- type: "tool-input-start",
879
- id: value.item.id,
880
- toolName: webSearchToolName ?? "web_search",
881
- })
882
- } else if (value.item.type === "computer_call") {
883
- ongoingToolCalls[value.output_index] = {
884
- toolName: "computer_use",
885
- toolCallId: value.item.id,
886
- }
887
-
888
- controller.enqueue({
889
- type: "tool-input-start",
890
- id: value.item.id,
891
- toolName: "computer_use",
892
- })
893
- } else if (value.item.type === "code_interpreter_call") {
894
- ongoingToolCalls[value.output_index] = {
895
- toolName: "code_interpreter",
896
- toolCallId: value.item.id,
897
- codeInterpreter: {
898
- containerId: value.item.container_id,
899
- },
900
- }
901
-
902
- controller.enqueue({
903
- type: "tool-input-start",
904
- id: value.item.id,
905
- toolName: "code_interpreter",
906
- })
907
-
908
- controller.enqueue({
909
- type: "tool-input-delta",
910
- id: value.item.id,
911
- delta: `{"containerId":"${value.item.container_id}","code":"`,
912
- })
913
- } else if (value.item.type === "file_search_call") {
914
- controller.enqueue({
915
- type: "tool-call",
916
- toolCallId: value.item.id,
917
- toolName: "file_search",
918
- input: "{}",
919
- providerExecuted: true,
920
- })
921
- } else if (value.item.type === "image_generation_call") {
922
- controller.enqueue({
923
- type: "tool-call",
924
- toolCallId: value.item.id,
925
- toolName: "image_generation",
926
- input: "{}",
927
- providerExecuted: true,
928
- })
929
- } else if (value.item.type === "message") {
930
- // Start a stable text part for this assistant message
931
- currentTextId = value.item.id
932
- controller.enqueue({
933
- type: "text-start",
934
- id: value.item.id,
935
- providerMetadata: {
936
- openai: {
937
- itemId: value.item.id,
938
- },
939
- },
940
- })
941
- } else if (isResponseOutputItemAddedReasoningChunk(value)) {
942
- activeReasoning[value.output_index] = {
943
- canonicalId: value.item.id,
944
- encryptedContent: value.item.encrypted_content,
945
- summaryParts: [0],
946
- }
947
- currentReasoningOutputIndex = value.output_index
948
-
949
- controller.enqueue({
950
- type: "reasoning-start",
951
- id: `${value.item.id}:0`,
952
- providerMetadata: {
953
- openai: {
954
- itemId: value.item.id,
955
- reasoningEncryptedContent: value.item.encrypted_content ?? null,
956
- },
957
- },
958
- })
959
- }
960
- } else if (isResponseOutputItemDoneChunk(value)) {
961
- if (value.item.type === "function_call") {
962
- ongoingToolCalls[value.output_index] = undefined
963
- hasFunctionCall = true
964
-
965
- controller.enqueue({
966
- type: "tool-input-end",
967
- id: value.item.call_id,
968
- })
969
-
970
- controller.enqueue({
971
- type: "tool-call",
972
- toolCallId: value.item.call_id,
973
- toolName: value.item.name,
974
- input: value.item.arguments,
975
- providerMetadata: {
976
- openai: {
977
- itemId: value.item.id,
978
- },
979
- },
980
- })
981
- } else if (value.item.type === "web_search_call") {
982
- ongoingToolCalls[value.output_index] = undefined
983
-
984
- controller.enqueue({
985
- type: "tool-input-end",
986
- id: value.item.id,
987
- })
988
-
989
- controller.enqueue({
990
- type: "tool-call",
991
- toolCallId: value.item.id,
992
- toolName: "web_search",
993
- input: JSON.stringify({ action: value.item.action }),
994
- providerExecuted: true,
995
- })
996
-
997
- controller.enqueue({
998
- type: "tool-result",
999
- toolCallId: value.item.id,
1000
- toolName: "web_search",
1001
- result: { status: value.item.status },
1002
- providerExecuted: true,
1003
- })
1004
- } else if (value.item.type === "computer_call") {
1005
- ongoingToolCalls[value.output_index] = undefined
1006
-
1007
- controller.enqueue({
1008
- type: "tool-input-end",
1009
- id: value.item.id,
1010
- })
1011
-
1012
- controller.enqueue({
1013
- type: "tool-call",
1014
- toolCallId: value.item.id,
1015
- toolName: "computer_use",
1016
- input: "",
1017
- providerExecuted: true,
1018
- })
1019
-
1020
- controller.enqueue({
1021
- type: "tool-result",
1022
- toolCallId: value.item.id,
1023
- toolName: "computer_use",
1024
- result: {
1025
- type: "computer_use_tool_result",
1026
- status: value.item.status || "completed",
1027
- },
1028
- providerExecuted: true,
1029
- })
1030
- } else if (value.item.type === "file_search_call") {
1031
- ongoingToolCalls[value.output_index] = undefined
1032
-
1033
- controller.enqueue({
1034
- type: "tool-result",
1035
- toolCallId: value.item.id,
1036
- toolName: "file_search",
1037
- result: {
1038
- queries: value.item.queries,
1039
- results:
1040
- value.item.results?.map((result) => ({
1041
- attributes: result.attributes,
1042
- fileId: result.file_id,
1043
- filename: result.filename,
1044
- score: result.score,
1045
- text: result.text,
1046
- })) ?? null,
1047
- } satisfies z.infer<typeof fileSearchOutputSchema>,
1048
- providerExecuted: true,
1049
- })
1050
- } else if (value.item.type === "code_interpreter_call") {
1051
- ongoingToolCalls[value.output_index] = undefined
1052
-
1053
- controller.enqueue({
1054
- type: "tool-result",
1055
- toolCallId: value.item.id,
1056
- toolName: "code_interpreter",
1057
- result: {
1058
- outputs: value.item.outputs,
1059
- } satisfies z.infer<typeof codeInterpreterOutputSchema>,
1060
- providerExecuted: true,
1061
- })
1062
- } else if (value.item.type === "image_generation_call") {
1063
- controller.enqueue({
1064
- type: "tool-result",
1065
- toolCallId: value.item.id,
1066
- toolName: "image_generation",
1067
- result: {
1068
- result: value.item.result,
1069
- } satisfies z.infer<typeof imageGenerationOutputSchema>,
1070
- providerExecuted: true,
1071
- })
1072
- } else if (value.item.type === "local_shell_call") {
1073
- ongoingToolCalls[value.output_index] = undefined
1074
-
1075
- controller.enqueue({
1076
- type: "tool-call",
1077
- toolCallId: value.item.call_id,
1078
- toolName: "local_shell",
1079
- input: JSON.stringify({
1080
- action: {
1081
- type: "exec",
1082
- command: value.item.action.command,
1083
- timeoutMs: value.item.action.timeout_ms,
1084
- user: value.item.action.user,
1085
- workingDirectory: value.item.action.working_directory,
1086
- env: value.item.action.env,
1087
- },
1088
- } satisfies z.infer<typeof localShellInputSchema>),
1089
- providerMetadata: {
1090
- openai: { itemId: value.item.id },
1091
- },
1092
- })
1093
- } else if (value.item.type === "message") {
1094
- if (currentTextId) {
1095
- controller.enqueue({
1096
- type: "text-end",
1097
- id: currentTextId,
1098
- })
1099
- currentTextId = null
1100
- }
1101
- } else if (isResponseOutputItemDoneReasoningChunk(value)) {
1102
- const activeReasoningPart = activeReasoning[value.output_index]
1103
- if (activeReasoningPart) {
1104
- for (const summaryIndex of activeReasoningPart.summaryParts) {
1105
- controller.enqueue({
1106
- type: "reasoning-end",
1107
- id: `${activeReasoningPart.canonicalId}:${summaryIndex}`,
1108
- providerMetadata: {
1109
- openai: {
1110
- itemId: activeReasoningPart.canonicalId,
1111
- reasoningEncryptedContent: value.item.encrypted_content ?? null,
1112
- },
1113
- },
1114
- })
1115
- }
1116
- delete activeReasoning[value.output_index]
1117
- if (currentReasoningOutputIndex === value.output_index) {
1118
- currentReasoningOutputIndex = null
1119
- }
1120
- }
1121
- }
1122
- } else if (isResponseFunctionCallArgumentsDeltaChunk(value)) {
1123
- const toolCall = ongoingToolCalls[value.output_index]
1124
-
1125
- if (toolCall != null) {
1126
- controller.enqueue({
1127
- type: "tool-input-delta",
1128
- id: toolCall.toolCallId,
1129
- delta: value.delta,
1130
- })
1131
- }
1132
- } else if (isResponseImageGenerationCallPartialImageChunk(value)) {
1133
- controller.enqueue({
1134
- type: "tool-result",
1135
- toolCallId: value.item_id,
1136
- toolName: "image_generation",
1137
- result: {
1138
- result: value.partial_image_b64,
1139
- } satisfies z.infer<typeof imageGenerationOutputSchema>,
1140
- providerExecuted: true,
1141
- })
1142
- } else if (isResponseCodeInterpreterCallCodeDeltaChunk(value)) {
1143
- const toolCall = ongoingToolCalls[value.output_index]
1144
-
1145
- if (toolCall != null) {
1146
- controller.enqueue({
1147
- type: "tool-input-delta",
1148
- id: toolCall.toolCallId,
1149
- // The delta is code, which is embedding in a JSON string.
1150
- // To escape it, we use JSON.stringify and slice to remove the outer quotes.
1151
- delta: JSON.stringify(value.delta).slice(1, -1),
1152
- })
1153
- }
1154
- } else if (isResponseCodeInterpreterCallCodeDoneChunk(value)) {
1155
- const toolCall = ongoingToolCalls[value.output_index]
1156
-
1157
- if (toolCall != null) {
1158
- controller.enqueue({
1159
- type: "tool-input-delta",
1160
- id: toolCall.toolCallId,
1161
- delta: '"}',
1162
- })
1163
-
1164
- controller.enqueue({
1165
- type: "tool-input-end",
1166
- id: toolCall.toolCallId,
1167
- })
1168
-
1169
- // immediately send the tool call after the input end:
1170
- controller.enqueue({
1171
- type: "tool-call",
1172
- toolCallId: toolCall.toolCallId,
1173
- toolName: "code_interpreter",
1174
- input: JSON.stringify({
1175
- code: value.code,
1176
- containerId: toolCall.codeInterpreter!.containerId,
1177
- } satisfies z.infer<typeof codeInterpreterInputSchema>),
1178
- providerExecuted: true,
1179
- })
1180
- }
1181
- } else if (isResponseCreatedChunk(value)) {
1182
- responseId = value.response.id
1183
- controller.enqueue({
1184
- type: "response-metadata",
1185
- id: value.response.id,
1186
- timestamp: new Date(value.response.created_at * 1000),
1187
- modelId: value.response.model,
1188
- })
1189
- } else if (isTextDeltaChunk(value)) {
1190
- // Ensure a text-start exists, and normalize deltas to a stable id
1191
- if (!currentTextId) {
1192
- currentTextId = value.item_id
1193
- controller.enqueue({
1194
- type: "text-start",
1195
- id: currentTextId,
1196
- providerMetadata: {
1197
- openai: { itemId: value.item_id },
1198
- },
1199
- })
1200
- }
1201
-
1202
- controller.enqueue({
1203
- type: "text-delta",
1204
- id: currentTextId,
1205
- delta: value.delta,
1206
- })
1207
-
1208
- if (options.providerOptions?.openai?.logprobs && value.logprobs) {
1209
- logprobs.push(value.logprobs)
1210
- }
1211
- } else if (isResponseReasoningSummaryPartAddedChunk(value)) {
1212
- const activeItem =
1213
- currentReasoningOutputIndex !== null ? activeReasoning[currentReasoningOutputIndex] : null
1214
-
1215
- // the first reasoning start is pushed in isResponseOutputItemAddedReasoningChunk.
1216
- if (activeItem && value.summary_index > 0) {
1217
- activeItem.summaryParts.push(value.summary_index)
1218
-
1219
- controller.enqueue({
1220
- type: "reasoning-start",
1221
- id: `${activeItem.canonicalId}:${value.summary_index}`,
1222
- providerMetadata: {
1223
- openai: {
1224
- itemId: activeItem.canonicalId,
1225
- reasoningEncryptedContent: activeItem.encryptedContent ?? null,
1226
- },
1227
- },
1228
- })
1229
- }
1230
- } else if (isResponseReasoningSummaryTextDeltaChunk(value)) {
1231
- const activeItem =
1232
- currentReasoningOutputIndex !== null ? activeReasoning[currentReasoningOutputIndex] : null
1233
-
1234
- if (activeItem) {
1235
- controller.enqueue({
1236
- type: "reasoning-delta",
1237
- id: `${activeItem.canonicalId}:${value.summary_index}`,
1238
- delta: value.delta,
1239
- providerMetadata: {
1240
- openai: {
1241
- itemId: activeItem.canonicalId,
1242
- },
1243
- },
1244
- })
1245
- }
1246
- } else if (isResponseFinishedChunk(value)) {
1247
- finishReason = mapOpenAIResponseFinishReason({
1248
- finishReason: value.response.incomplete_details?.reason,
1249
- hasFunctionCall,
1250
- })
1251
- usage.inputTokens = value.response.usage.input_tokens
1252
- usage.outputTokens = value.response.usage.output_tokens
1253
- usage.totalTokens = value.response.usage.input_tokens + value.response.usage.output_tokens
1254
- usage.reasoningTokens = value.response.usage.output_tokens_details?.reasoning_tokens ?? undefined
1255
- usage.cachedInputTokens = value.response.usage.input_tokens_details?.cached_tokens ?? undefined
1256
- if (typeof value.response.service_tier === "string") {
1257
- serviceTier = value.response.service_tier
1258
- }
1259
- } else if (isResponseAnnotationAddedChunk(value)) {
1260
- if (value.annotation.type === "url_citation") {
1261
- controller.enqueue({
1262
- type: "source",
1263
- sourceType: "url",
1264
- id: self.config.generateId?.() ?? generateId(),
1265
- url: value.annotation.url,
1266
- title: value.annotation.title,
1267
- })
1268
- } else if (value.annotation.type === "file_citation") {
1269
- controller.enqueue({
1270
- type: "source",
1271
- sourceType: "document",
1272
- id: self.config.generateId?.() ?? generateId(),
1273
- mediaType: "text/plain",
1274
- title: value.annotation.quote ?? value.annotation.filename ?? "Document",
1275
- filename: value.annotation.filename ?? value.annotation.file_id,
1276
- })
1277
- }
1278
- } else if (isErrorChunk(value)) {
1279
- controller.enqueue({ type: "error", error: value })
1280
- }
1281
- },
1282
-
1283
- flush(controller) {
1284
- // Close any dangling text part
1285
- if (currentTextId) {
1286
- controller.enqueue({ type: "text-end", id: currentTextId })
1287
- currentTextId = null
1288
- }
1289
-
1290
- const providerMetadata: SharedV2ProviderMetadata = {
1291
- openai: {
1292
- responseId,
1293
- },
1294
- }
1295
-
1296
- if (logprobs.length > 0) {
1297
- providerMetadata.openai.logprobs = logprobs
1298
- }
1299
-
1300
- if (serviceTier !== undefined) {
1301
- providerMetadata.openai.serviceTier = serviceTier
1302
- }
1303
-
1304
- controller.enqueue({
1305
- type: "finish",
1306
- finishReason,
1307
- usage,
1308
- providerMetadata,
1309
- })
1310
- },
1311
- }),
1312
- ),
1313
- request: { body },
1314
- response: { headers: responseHeaders },
1315
- }
1316
- }
1317
- }
1318
-
1319
- const usageSchema = z.object({
1320
- input_tokens: z.number(),
1321
- input_tokens_details: z.object({ cached_tokens: z.number().nullish() }).nullish(),
1322
- output_tokens: z.number(),
1323
- output_tokens_details: z.object({ reasoning_tokens: z.number().nullish() }).nullish(),
1324
- })
1325
-
1326
- const textDeltaChunkSchema = z.object({
1327
- type: z.literal("response.output_text.delta"),
1328
- item_id: z.string(),
1329
- delta: z.string(),
1330
- logprobs: LOGPROBS_SCHEMA.nullish(),
1331
- })
1332
-
1333
- const errorChunkSchema = z.object({
1334
- type: z.literal("error"),
1335
- code: z.string(),
1336
- message: z.string(),
1337
- param: z.string().nullish(),
1338
- sequence_number: z.number(),
1339
- })
1340
-
1341
- const responseFinishedChunkSchema = z.object({
1342
- type: z.enum(["response.completed", "response.incomplete"]),
1343
- response: z.object({
1344
- incomplete_details: z.object({ reason: z.string() }).nullish(),
1345
- usage: usageSchema,
1346
- service_tier: z.string().nullish(),
1347
- }),
1348
- })
1349
-
1350
- const responseCreatedChunkSchema = z.object({
1351
- type: z.literal("response.created"),
1352
- response: z.object({
1353
- id: z.string(),
1354
- created_at: z.number(),
1355
- model: z.string(),
1356
- service_tier: z.string().nullish(),
1357
- }),
1358
- })
1359
-
1360
- const responseOutputItemAddedSchema = z.object({
1361
- type: z.literal("response.output_item.added"),
1362
- output_index: z.number(),
1363
- item: z.discriminatedUnion("type", [
1364
- z.object({
1365
- type: z.literal("message"),
1366
- id: z.string(),
1367
- }),
1368
- z.object({
1369
- type: z.literal("reasoning"),
1370
- id: z.string(),
1371
- encrypted_content: z.string().nullish(),
1372
- }),
1373
- z.object({
1374
- type: z.literal("function_call"),
1375
- id: z.string(),
1376
- call_id: z.string(),
1377
- name: z.string(),
1378
- arguments: z.string(),
1379
- }),
1380
- z.object({
1381
- type: z.literal("web_search_call"),
1382
- id: z.string(),
1383
- status: z.string(),
1384
- action: z
1385
- .object({
1386
- type: z.literal("search"),
1387
- query: z.string().optional(),
1388
- })
1389
- .nullish(),
1390
- }),
1391
- z.object({
1392
- type: z.literal("computer_call"),
1393
- id: z.string(),
1394
- status: z.string(),
1395
- }),
1396
- z.object({
1397
- type: z.literal("file_search_call"),
1398
- id: z.string(),
1399
- }),
1400
- z.object({
1401
- type: z.literal("image_generation_call"),
1402
- id: z.string(),
1403
- }),
1404
- z.object({
1405
- type: z.literal("code_interpreter_call"),
1406
- id: z.string(),
1407
- container_id: z.string(),
1408
- code: z.string().nullable(),
1409
- outputs: z
1410
- .array(
1411
- z.discriminatedUnion("type", [
1412
- z.object({ type: z.literal("logs"), logs: z.string() }),
1413
- z.object({ type: z.literal("image"), url: z.string() }),
1414
- ]),
1415
- )
1416
- .nullable(),
1417
- status: z.string(),
1418
- }),
1419
- ]),
1420
- })
1421
-
1422
- const responseOutputItemDoneSchema = z.object({
1423
- type: z.literal("response.output_item.done"),
1424
- output_index: z.number(),
1425
- item: z.discriminatedUnion("type", [
1426
- z.object({
1427
- type: z.literal("message"),
1428
- id: z.string(),
1429
- }),
1430
- z.object({
1431
- type: z.literal("reasoning"),
1432
- id: z.string(),
1433
- encrypted_content: z.string().nullish(),
1434
- }),
1435
- z.object({
1436
- type: z.literal("function_call"),
1437
- id: z.string(),
1438
- call_id: z.string(),
1439
- name: z.string(),
1440
- arguments: z.string(),
1441
- status: z.literal("completed"),
1442
- }),
1443
- codeInterpreterCallItem,
1444
- imageGenerationCallItem,
1445
- webSearchCallItem,
1446
- fileSearchCallItem,
1447
- localShellCallItem,
1448
- z.object({
1449
- type: z.literal("computer_call"),
1450
- id: z.string(),
1451
- status: z.literal("completed"),
1452
- }),
1453
- ]),
1454
- })
1455
-
1456
- const responseFunctionCallArgumentsDeltaSchema = z.object({
1457
- type: z.literal("response.function_call_arguments.delta"),
1458
- item_id: z.string(),
1459
- output_index: z.number(),
1460
- delta: z.string(),
1461
- })
1462
-
1463
- const responseImageGenerationCallPartialImageSchema = z.object({
1464
- type: z.literal("response.image_generation_call.partial_image"),
1465
- item_id: z.string(),
1466
- output_index: z.number(),
1467
- partial_image_b64: z.string(),
1468
- })
1469
-
1470
- const responseCodeInterpreterCallCodeDeltaSchema = z.object({
1471
- type: z.literal("response.code_interpreter_call_code.delta"),
1472
- item_id: z.string(),
1473
- output_index: z.number(),
1474
- delta: z.string(),
1475
- })
1476
-
1477
- const responseCodeInterpreterCallCodeDoneSchema = z.object({
1478
- type: z.literal("response.code_interpreter_call_code.done"),
1479
- item_id: z.string(),
1480
- output_index: z.number(),
1481
- code: z.string(),
1482
- })
1483
-
1484
- const responseAnnotationAddedSchema = z.object({
1485
- type: z.literal("response.output_text.annotation.added"),
1486
- annotation: z.discriminatedUnion("type", [
1487
- z.object({
1488
- type: z.literal("url_citation"),
1489
- url: z.string(),
1490
- title: z.string(),
1491
- }),
1492
- z.object({
1493
- type: z.literal("file_citation"),
1494
- file_id: z.string(),
1495
- filename: z.string().nullish(),
1496
- index: z.number().nullish(),
1497
- start_index: z.number().nullish(),
1498
- end_index: z.number().nullish(),
1499
- quote: z.string().nullish(),
1500
- }),
1501
- ]),
1502
- })
1503
-
1504
- const responseReasoningSummaryPartAddedSchema = z.object({
1505
- type: z.literal("response.reasoning_summary_part.added"),
1506
- item_id: z.string(),
1507
- summary_index: z.number(),
1508
- })
1509
-
1510
- const responseReasoningSummaryTextDeltaSchema = z.object({
1511
- type: z.literal("response.reasoning_summary_text.delta"),
1512
- item_id: z.string(),
1513
- summary_index: z.number(),
1514
- delta: z.string(),
1515
- })
1516
-
1517
- const openaiResponsesChunkSchema = z.union([
1518
- textDeltaChunkSchema,
1519
- responseFinishedChunkSchema,
1520
- responseCreatedChunkSchema,
1521
- responseOutputItemAddedSchema,
1522
- responseOutputItemDoneSchema,
1523
- responseFunctionCallArgumentsDeltaSchema,
1524
- responseImageGenerationCallPartialImageSchema,
1525
- responseCodeInterpreterCallCodeDeltaSchema,
1526
- responseCodeInterpreterCallCodeDoneSchema,
1527
- responseAnnotationAddedSchema,
1528
- responseReasoningSummaryPartAddedSchema,
1529
- responseReasoningSummaryTextDeltaSchema,
1530
- errorChunkSchema,
1531
- z.object({ type: z.string() }).loose(), // fallback for unknown chunks
1532
- ])
1533
-
1534
- type ExtractByType<T, K extends T extends { type: infer U } ? U : never> = T extends { type: K } ? T : never
1535
-
1536
- function isTextDeltaChunk(
1537
- chunk: z.infer<typeof openaiResponsesChunkSchema>,
1538
- ): chunk is z.infer<typeof textDeltaChunkSchema> {
1539
- return chunk.type === "response.output_text.delta"
1540
- }
1541
-
1542
- function isResponseOutputItemDoneChunk(
1543
- chunk: z.infer<typeof openaiResponsesChunkSchema>,
1544
- ): chunk is z.infer<typeof responseOutputItemDoneSchema> {
1545
- return chunk.type === "response.output_item.done"
1546
- }
1547
-
1548
- function isResponseOutputItemDoneReasoningChunk(chunk: z.infer<typeof openaiResponsesChunkSchema>): chunk is z.infer<
1549
- typeof responseOutputItemDoneSchema
1550
- > & {
1551
- item: ExtractByType<z.infer<typeof responseOutputItemDoneSchema>["item"], "reasoning">
1552
- } {
1553
- return isResponseOutputItemDoneChunk(chunk) && chunk.item.type === "reasoning"
1554
- }
1555
-
1556
- function isResponseFinishedChunk(
1557
- chunk: z.infer<typeof openaiResponsesChunkSchema>,
1558
- ): chunk is z.infer<typeof responseFinishedChunkSchema> {
1559
- return chunk.type === "response.completed" || chunk.type === "response.incomplete"
1560
- }
1561
-
1562
- function isResponseCreatedChunk(
1563
- chunk: z.infer<typeof openaiResponsesChunkSchema>,
1564
- ): chunk is z.infer<typeof responseCreatedChunkSchema> {
1565
- return chunk.type === "response.created"
1566
- }
1567
-
1568
- function isResponseFunctionCallArgumentsDeltaChunk(
1569
- chunk: z.infer<typeof openaiResponsesChunkSchema>,
1570
- ): chunk is z.infer<typeof responseFunctionCallArgumentsDeltaSchema> {
1571
- return chunk.type === "response.function_call_arguments.delta"
1572
- }
1573
- function isResponseImageGenerationCallPartialImageChunk(
1574
- chunk: z.infer<typeof openaiResponsesChunkSchema>,
1575
- ): chunk is z.infer<typeof responseImageGenerationCallPartialImageSchema> {
1576
- return chunk.type === "response.image_generation_call.partial_image"
1577
- }
1578
-
1579
- function isResponseCodeInterpreterCallCodeDeltaChunk(
1580
- chunk: z.infer<typeof openaiResponsesChunkSchema>,
1581
- ): chunk is z.infer<typeof responseCodeInterpreterCallCodeDeltaSchema> {
1582
- return chunk.type === "response.code_interpreter_call_code.delta"
1583
- }
1584
-
1585
- function isResponseCodeInterpreterCallCodeDoneChunk(
1586
- chunk: z.infer<typeof openaiResponsesChunkSchema>,
1587
- ): chunk is z.infer<typeof responseCodeInterpreterCallCodeDoneSchema> {
1588
- return chunk.type === "response.code_interpreter_call_code.done"
1589
- }
1590
-
1591
- function isResponseOutputItemAddedChunk(
1592
- chunk: z.infer<typeof openaiResponsesChunkSchema>,
1593
- ): chunk is z.infer<typeof responseOutputItemAddedSchema> {
1594
- return chunk.type === "response.output_item.added"
1595
- }
1596
-
1597
- function isResponseOutputItemAddedReasoningChunk(chunk: z.infer<typeof openaiResponsesChunkSchema>): chunk is z.infer<
1598
- typeof responseOutputItemAddedSchema
1599
- > & {
1600
- item: ExtractByType<z.infer<typeof responseOutputItemAddedSchema>["item"], "reasoning">
1601
- } {
1602
- return isResponseOutputItemAddedChunk(chunk) && chunk.item.type === "reasoning"
1603
- }
1604
-
1605
- function isResponseAnnotationAddedChunk(
1606
- chunk: z.infer<typeof openaiResponsesChunkSchema>,
1607
- ): chunk is z.infer<typeof responseAnnotationAddedSchema> {
1608
- return chunk.type === "response.output_text.annotation.added"
1609
- }
1610
-
1611
- function isResponseReasoningSummaryPartAddedChunk(
1612
- chunk: z.infer<typeof openaiResponsesChunkSchema>,
1613
- ): chunk is z.infer<typeof responseReasoningSummaryPartAddedSchema> {
1614
- return chunk.type === "response.reasoning_summary_part.added"
1615
- }
1616
-
1617
- function isResponseReasoningSummaryTextDeltaChunk(
1618
- chunk: z.infer<typeof openaiResponsesChunkSchema>,
1619
- ): chunk is z.infer<typeof responseReasoningSummaryTextDeltaSchema> {
1620
- return chunk.type === "response.reasoning_summary_text.delta"
1621
- }
1622
-
1623
- function isErrorChunk(chunk: z.infer<typeof openaiResponsesChunkSchema>): chunk is z.infer<typeof errorChunkSchema> {
1624
- return chunk.type === "error"
1625
- }
1626
-
1627
- type ResponsesModelConfig = {
1628
- isReasoningModel: boolean
1629
- systemMessageMode: "remove" | "system" | "developer"
1630
- requiredAutoTruncation: boolean
1631
- supportsFlexProcessing: boolean
1632
- supportsPriorityProcessing: boolean
1633
- }
1634
-
1635
- function getResponsesModelConfig(modelId: string): ResponsesModelConfig {
1636
- const supportsFlexProcessing =
1637
- modelId.startsWith("o3") ||
1638
- modelId.startsWith("o4-mini") ||
1639
- (modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-chat"))
1640
- const supportsPriorityProcessing =
1641
- modelId.startsWith("gpt-4") ||
1642
- modelId.startsWith("gpt-5-mini") ||
1643
- (modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-nano") && !modelId.startsWith("gpt-5-chat")) ||
1644
- modelId.startsWith("o3") ||
1645
- modelId.startsWith("o4-mini")
1646
- const defaults = {
1647
- requiredAutoTruncation: false,
1648
- systemMessageMode: "system" as const,
1649
- supportsFlexProcessing,
1650
- supportsPriorityProcessing,
1651
- }
1652
-
1653
- // gpt-5-chat models are non-reasoning
1654
- if (modelId.startsWith("gpt-5-chat")) {
1655
- return {
1656
- ...defaults,
1657
- isReasoningModel: false,
1658
- }
1659
- }
1660
-
1661
- // o series reasoning models:
1662
- if (
1663
- modelId.startsWith("o") ||
1664
- modelId.startsWith("gpt-5") ||
1665
- modelId.startsWith("codex-") ||
1666
- modelId.startsWith("computer-use")
1667
- ) {
1668
- if (modelId.startsWith("o1-mini") || modelId.startsWith("o1-preview")) {
1669
- return {
1670
- ...defaults,
1671
- isReasoningModel: true,
1672
- systemMessageMode: "remove",
1673
- }
1674
- }
1675
-
1676
- return {
1677
- ...defaults,
1678
- isReasoningModel: true,
1679
- systemMessageMode: "developer",
1680
- }
1681
- }
1682
-
1683
- // gpt models:
1684
- return {
1685
- ...defaults,
1686
- isReasoningModel: false,
1687
- }
1688
- }
1689
-
1690
- // TODO AI SDK 6: use optional here instead of nullish
1691
- const openaiResponsesProviderOptionsSchema = z.object({
1692
- include: z
1693
- .array(z.enum(["reasoning.encrypted_content", "file_search_call.results", "message.output_text.logprobs"]))
1694
- .nullish(),
1695
- instructions: z.string().nullish(),
1696
-
1697
- /**
1698
- * Return the log probabilities of the tokens.
1699
- *
1700
- * Setting to true will return the log probabilities of the tokens that
1701
- * were generated.
1702
- *
1703
- * Setting to a number will return the log probabilities of the top n
1704
- * tokens that were generated.
1705
- *
1706
- * @see https://platform.openai.com/docs/api-reference/responses/create
1707
- * @see https://cookbook.openai.com/examples/using_logprobs
1708
- */
1709
- logprobs: z.union([z.boolean(), z.number().min(1).max(TOP_LOGPROBS_MAX)]).optional(),
1710
-
1711
- /**
1712
- * The maximum number of total calls to built-in tools that can be processed in a response.
1713
- * This maximum number applies across all built-in tool calls, not per individual tool.
1714
- * Any further attempts to call a tool by the model will be ignored.
1715
- */
1716
- maxToolCalls: z.number().nullish(),
1717
-
1718
- metadata: z.any().nullish(),
1719
- parallelToolCalls: z.boolean().nullish(),
1720
- previousResponseId: z.string().nullish(),
1721
- promptCacheKey: z.string().nullish(),
1722
- reasoningEffort: z.string().nullish(),
1723
- reasoningSummary: z.string().nullish(),
1724
- safetyIdentifier: z.string().nullish(),
1725
- serviceTier: z.enum(["auto", "flex", "priority"]).nullish(),
1726
- store: z.boolean().nullish(),
1727
- strictJsonSchema: z.boolean().nullish(),
1728
- textVerbosity: z.enum(["low", "medium", "high"]).nullish(),
1729
- user: z.string().nullish(),
1730
- })
1731
-
1732
- export type OpenAIResponsesProviderOptions = z.infer<typeof openaiResponsesProviderOptionsSchema>