ai 6.0.33 → 6.0.35

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (357) hide show
  1. package/CHANGELOG.md +16 -0
  2. package/dist/index.d.mts +50 -21
  3. package/dist/index.d.ts +50 -21
  4. package/dist/index.js +348 -286
  5. package/dist/index.js.map +1 -1
  6. package/dist/index.mjs +280 -219
  7. package/dist/index.mjs.map +1 -1
  8. package/dist/internal/index.js +1 -1
  9. package/dist/internal/index.mjs +1 -1
  10. package/docs/02-foundations/03-prompts.mdx +2 -2
  11. package/docs/03-ai-sdk-core/15-tools-and-tool-calling.mdx +1 -1
  12. package/docs/07-reference/01-ai-sdk-core/28-output.mdx +1 -1
  13. package/docs/07-reference/05-ai-sdk-errors/ai-ui-message-stream-error.mdx +67 -0
  14. package/package.json +6 -4
  15. package/src/agent/agent.ts +116 -0
  16. package/src/agent/create-agent-ui-stream-response.test.ts +258 -0
  17. package/src/agent/create-agent-ui-stream-response.ts +50 -0
  18. package/src/agent/create-agent-ui-stream.ts +73 -0
  19. package/src/agent/index.ts +33 -0
  20. package/src/agent/infer-agent-tools.ts +7 -0
  21. package/src/agent/infer-agent-ui-message.test-d.ts +54 -0
  22. package/src/agent/infer-agent-ui-message.ts +11 -0
  23. package/src/agent/pipe-agent-ui-stream-to-response.ts +52 -0
  24. package/src/agent/tool-loop-agent-on-finish-callback.ts +31 -0
  25. package/src/agent/tool-loop-agent-on-step-finish-callback.ts +11 -0
  26. package/src/agent/tool-loop-agent-settings.ts +182 -0
  27. package/src/agent/tool-loop-agent.test-d.ts +114 -0
  28. package/src/agent/tool-loop-agent.test.ts +442 -0
  29. package/src/agent/tool-loop-agent.ts +114 -0
  30. package/src/embed/__snapshots__/embed-many.test.ts.snap +191 -0
  31. package/src/embed/__snapshots__/embed.test.ts.snap +81 -0
  32. package/src/embed/embed-many-result.ts +53 -0
  33. package/src/embed/embed-many.test.ts +653 -0
  34. package/src/embed/embed-many.ts +378 -0
  35. package/src/embed/embed-result.ts +50 -0
  36. package/src/embed/embed.test.ts +298 -0
  37. package/src/embed/embed.ts +211 -0
  38. package/src/embed/index.ts +4 -0
  39. package/src/error/index.ts +35 -0
  40. package/src/error/invalid-argument-error.ts +34 -0
  41. package/src/error/invalid-stream-part-error.ts +28 -0
  42. package/src/error/invalid-tool-approval-error.ts +26 -0
  43. package/src/error/invalid-tool-input-error.ts +33 -0
  44. package/src/error/no-image-generated-error.ts +39 -0
  45. package/src/error/no-object-generated-error.ts +70 -0
  46. package/src/error/no-output-generated-error.ts +26 -0
  47. package/src/error/no-speech-generated-error.ts +18 -0
  48. package/src/error/no-such-tool-error.ts +35 -0
  49. package/src/error/no-transcript-generated-error.ts +20 -0
  50. package/src/error/tool-call-not-found-for-approval-error.ts +32 -0
  51. package/src/error/tool-call-repair-error.ts +30 -0
  52. package/src/error/ui-message-stream-error.ts +48 -0
  53. package/src/error/unsupported-model-version-error.ts +23 -0
  54. package/src/error/verify-no-object-generated-error.ts +27 -0
  55. package/src/generate-image/generate-image-result.ts +42 -0
  56. package/src/generate-image/generate-image.test.ts +1420 -0
  57. package/src/generate-image/generate-image.ts +360 -0
  58. package/src/generate-image/index.ts +18 -0
  59. package/src/generate-object/__snapshots__/generate-object.test.ts.snap +133 -0
  60. package/src/generate-object/__snapshots__/stream-object.test.ts.snap +297 -0
  61. package/src/generate-object/generate-object-result.ts +67 -0
  62. package/src/generate-object/generate-object.test-d.ts +49 -0
  63. package/src/generate-object/generate-object.test.ts +1191 -0
  64. package/src/generate-object/generate-object.ts +518 -0
  65. package/src/generate-object/index.ts +9 -0
  66. package/src/generate-object/inject-json-instruction.test.ts +181 -0
  67. package/src/generate-object/inject-json-instruction.ts +30 -0
  68. package/src/generate-object/output-strategy.ts +415 -0
  69. package/src/generate-object/parse-and-validate-object-result.ts +111 -0
  70. package/src/generate-object/repair-text.ts +12 -0
  71. package/src/generate-object/stream-object-result.ts +120 -0
  72. package/src/generate-object/stream-object.test-d.ts +74 -0
  73. package/src/generate-object/stream-object.test.ts +1950 -0
  74. package/src/generate-object/stream-object.ts +986 -0
  75. package/src/generate-object/validate-object-generation-input.ts +144 -0
  76. package/src/generate-speech/generate-speech-result.ts +30 -0
  77. package/src/generate-speech/generate-speech.test.ts +300 -0
  78. package/src/generate-speech/generate-speech.ts +190 -0
  79. package/src/generate-speech/generated-audio-file.ts +65 -0
  80. package/src/generate-speech/index.ts +3 -0
  81. package/src/generate-text/__snapshots__/generate-text.test.ts.snap +1872 -0
  82. package/src/generate-text/__snapshots__/stream-text.test.ts.snap +1255 -0
  83. package/src/generate-text/collect-tool-approvals.test.ts +553 -0
  84. package/src/generate-text/collect-tool-approvals.ts +116 -0
  85. package/src/generate-text/content-part.ts +25 -0
  86. package/src/generate-text/execute-tool-call.ts +129 -0
  87. package/src/generate-text/extract-reasoning-content.ts +17 -0
  88. package/src/generate-text/extract-text-content.ts +15 -0
  89. package/src/generate-text/generate-text-result.ts +168 -0
  90. package/src/generate-text/generate-text.test-d.ts +68 -0
  91. package/src/generate-text/generate-text.test.ts +7011 -0
  92. package/src/generate-text/generate-text.ts +1223 -0
  93. package/src/generate-text/generated-file.ts +70 -0
  94. package/src/generate-text/index.ts +57 -0
  95. package/src/generate-text/is-approval-needed.ts +29 -0
  96. package/src/generate-text/output-utils.ts +23 -0
  97. package/src/generate-text/output.test.ts +698 -0
  98. package/src/generate-text/output.ts +590 -0
  99. package/src/generate-text/parse-tool-call.test.ts +570 -0
  100. package/src/generate-text/parse-tool-call.ts +188 -0
  101. package/src/generate-text/prepare-step.ts +103 -0
  102. package/src/generate-text/prune-messages.test.ts +720 -0
  103. package/src/generate-text/prune-messages.ts +167 -0
  104. package/src/generate-text/reasoning-output.ts +20 -0
  105. package/src/generate-text/reasoning.ts +8 -0
  106. package/src/generate-text/response-message.ts +10 -0
  107. package/src/generate-text/run-tools-transformation.test.ts +1143 -0
  108. package/src/generate-text/run-tools-transformation.ts +420 -0
  109. package/src/generate-text/smooth-stream.test.ts +2101 -0
  110. package/src/generate-text/smooth-stream.ts +162 -0
  111. package/src/generate-text/step-result.ts +238 -0
  112. package/src/generate-text/stop-condition.ts +29 -0
  113. package/src/generate-text/stream-text-result.ts +463 -0
  114. package/src/generate-text/stream-text.test-d.ts +200 -0
  115. package/src/generate-text/stream-text.test.ts +19979 -0
  116. package/src/generate-text/stream-text.ts +2505 -0
  117. package/src/generate-text/to-response-messages.test.ts +922 -0
  118. package/src/generate-text/to-response-messages.ts +163 -0
  119. package/src/generate-text/tool-approval-request-output.ts +21 -0
  120. package/src/generate-text/tool-call-repair-function.ts +27 -0
  121. package/src/generate-text/tool-call.ts +47 -0
  122. package/src/generate-text/tool-error.ts +34 -0
  123. package/src/generate-text/tool-output-denied.ts +21 -0
  124. package/src/generate-text/tool-output.ts +7 -0
  125. package/src/generate-text/tool-result.ts +36 -0
  126. package/src/generate-text/tool-set.ts +14 -0
  127. package/src/global.ts +24 -0
  128. package/src/index.ts +50 -0
  129. package/src/logger/index.ts +6 -0
  130. package/src/logger/log-warnings.test.ts +351 -0
  131. package/src/logger/log-warnings.ts +119 -0
  132. package/src/middleware/__snapshots__/simulate-streaming-middleware.test.ts.snap +64 -0
  133. package/src/middleware/add-tool-input-examples-middleware.test.ts +476 -0
  134. package/src/middleware/add-tool-input-examples-middleware.ts +90 -0
  135. package/src/middleware/default-embedding-settings-middleware.test.ts +126 -0
  136. package/src/middleware/default-embedding-settings-middleware.ts +22 -0
  137. package/src/middleware/default-settings-middleware.test.ts +388 -0
  138. package/src/middleware/default-settings-middleware.ts +33 -0
  139. package/src/middleware/extract-json-middleware.test.ts +827 -0
  140. package/src/middleware/extract-json-middleware.ts +197 -0
  141. package/src/middleware/extract-reasoning-middleware.test.ts +1028 -0
  142. package/src/middleware/extract-reasoning-middleware.ts +238 -0
  143. package/src/middleware/index.ts +10 -0
  144. package/src/middleware/simulate-streaming-middleware.test.ts +911 -0
  145. package/src/middleware/simulate-streaming-middleware.ts +79 -0
  146. package/src/middleware/wrap-embedding-model.test.ts +358 -0
  147. package/src/middleware/wrap-embedding-model.ts +86 -0
  148. package/src/middleware/wrap-image-model.test.ts +423 -0
  149. package/src/middleware/wrap-image-model.ts +85 -0
  150. package/src/middleware/wrap-language-model.test.ts +518 -0
  151. package/src/middleware/wrap-language-model.ts +104 -0
  152. package/src/middleware/wrap-provider.test.ts +120 -0
  153. package/src/middleware/wrap-provider.ts +51 -0
  154. package/src/model/as-embedding-model-v3.test.ts +319 -0
  155. package/src/model/as-embedding-model-v3.ts +24 -0
  156. package/src/model/as-image-model-v3.test.ts +409 -0
  157. package/src/model/as-image-model-v3.ts +24 -0
  158. package/src/model/as-language-model-v3.test.ts +508 -0
  159. package/src/model/as-language-model-v3.ts +103 -0
  160. package/src/model/as-provider-v3.ts +36 -0
  161. package/src/model/as-speech-model-v3.test.ts +356 -0
  162. package/src/model/as-speech-model-v3.ts +24 -0
  163. package/src/model/as-transcription-model-v3.test.ts +529 -0
  164. package/src/model/as-transcription-model-v3.ts +24 -0
  165. package/src/model/resolve-model.test.ts +244 -0
  166. package/src/model/resolve-model.ts +126 -0
  167. package/src/prompt/call-settings.ts +148 -0
  168. package/src/prompt/content-part.ts +209 -0
  169. package/src/prompt/convert-to-language-model-prompt.test.ts +2018 -0
  170. package/src/prompt/convert-to-language-model-prompt.ts +442 -0
  171. package/src/prompt/create-tool-model-output.test.ts +508 -0
  172. package/src/prompt/create-tool-model-output.ts +34 -0
  173. package/src/prompt/data-content.test.ts +15 -0
  174. package/src/prompt/data-content.ts +134 -0
  175. package/src/prompt/index.ts +27 -0
  176. package/src/prompt/invalid-data-content-error.ts +29 -0
  177. package/src/prompt/invalid-message-role-error.ts +27 -0
  178. package/src/prompt/message-conversion-error.ts +28 -0
  179. package/src/prompt/message.ts +68 -0
  180. package/src/prompt/prepare-call-settings.test.ts +159 -0
  181. package/src/prompt/prepare-call-settings.ts +108 -0
  182. package/src/prompt/prepare-tools-and-tool-choice.test.ts +461 -0
  183. package/src/prompt/prepare-tools-and-tool-choice.ts +86 -0
  184. package/src/prompt/prompt.ts +43 -0
  185. package/src/prompt/split-data-url.ts +17 -0
  186. package/src/prompt/standardize-prompt.test.ts +82 -0
  187. package/src/prompt/standardize-prompt.ts +99 -0
  188. package/src/prompt/wrap-gateway-error.ts +29 -0
  189. package/src/registry/custom-provider.test.ts +211 -0
  190. package/src/registry/custom-provider.ts +155 -0
  191. package/src/registry/index.ts +7 -0
  192. package/src/registry/no-such-provider-error.ts +41 -0
  193. package/src/registry/provider-registry.test.ts +691 -0
  194. package/src/registry/provider-registry.ts +328 -0
  195. package/src/rerank/index.ts +2 -0
  196. package/src/rerank/rerank-result.ts +70 -0
  197. package/src/rerank/rerank.test.ts +516 -0
  198. package/src/rerank/rerank.ts +237 -0
  199. package/src/telemetry/assemble-operation-name.ts +21 -0
  200. package/src/telemetry/get-base-telemetry-attributes.ts +53 -0
  201. package/src/telemetry/get-tracer.ts +20 -0
  202. package/src/telemetry/noop-tracer.ts +69 -0
  203. package/src/telemetry/record-span.ts +63 -0
  204. package/src/telemetry/select-telemetry-attributes.ts +78 -0
  205. package/src/telemetry/select-temetry-attributes.test.ts +114 -0
  206. package/src/telemetry/stringify-for-telemetry.test.ts +114 -0
  207. package/src/telemetry/stringify-for-telemetry.ts +33 -0
  208. package/src/telemetry/telemetry-settings.ts +44 -0
  209. package/src/test/mock-embedding-model-v2.ts +35 -0
  210. package/src/test/mock-embedding-model-v3.ts +48 -0
  211. package/src/test/mock-image-model-v2.ts +28 -0
  212. package/src/test/mock-image-model-v3.ts +28 -0
  213. package/src/test/mock-language-model-v2.ts +72 -0
  214. package/src/test/mock-language-model-v3.ts +77 -0
  215. package/src/test/mock-provider-v2.ts +68 -0
  216. package/src/test/mock-provider-v3.ts +80 -0
  217. package/src/test/mock-reranking-model-v3.ts +25 -0
  218. package/src/test/mock-server-response.ts +69 -0
  219. package/src/test/mock-speech-model-v2.ts +24 -0
  220. package/src/test/mock-speech-model-v3.ts +24 -0
  221. package/src/test/mock-tracer.ts +156 -0
  222. package/src/test/mock-transcription-model-v2.ts +24 -0
  223. package/src/test/mock-transcription-model-v3.ts +24 -0
  224. package/src/test/mock-values.ts +4 -0
  225. package/src/test/not-implemented.ts +3 -0
  226. package/src/text-stream/create-text-stream-response.test.ts +38 -0
  227. package/src/text-stream/create-text-stream-response.ts +18 -0
  228. package/src/text-stream/index.ts +2 -0
  229. package/src/text-stream/pipe-text-stream-to-response.test.ts +38 -0
  230. package/src/text-stream/pipe-text-stream-to-response.ts +26 -0
  231. package/src/transcribe/index.ts +2 -0
  232. package/src/transcribe/transcribe-result.ts +60 -0
  233. package/src/transcribe/transcribe.test.ts +313 -0
  234. package/src/transcribe/transcribe.ts +173 -0
  235. package/src/types/embedding-model-middleware.ts +3 -0
  236. package/src/types/embedding-model.ts +18 -0
  237. package/src/types/image-model-middleware.ts +3 -0
  238. package/src/types/image-model-response-metadata.ts +16 -0
  239. package/src/types/image-model.ts +19 -0
  240. package/src/types/index.ts +29 -0
  241. package/src/types/json-value.ts +15 -0
  242. package/src/types/language-model-middleware.ts +3 -0
  243. package/src/types/language-model-request-metadata.ts +6 -0
  244. package/src/types/language-model-response-metadata.ts +21 -0
  245. package/src/types/language-model.ts +104 -0
  246. package/src/types/provider-metadata.ts +16 -0
  247. package/src/types/provider.ts +55 -0
  248. package/src/types/reranking-model.ts +6 -0
  249. package/src/types/speech-model-response-metadata.ts +21 -0
  250. package/src/types/speech-model.ts +6 -0
  251. package/src/types/transcription-model-response-metadata.ts +16 -0
  252. package/src/types/transcription-model.ts +9 -0
  253. package/src/types/usage.ts +200 -0
  254. package/src/types/warning.ts +7 -0
  255. package/src/ui/__snapshots__/append-response-messages.test.ts.snap +416 -0
  256. package/src/ui/__snapshots__/convert-to-model-messages.test.ts.snap +419 -0
  257. package/src/ui/__snapshots__/process-chat-text-response.test.ts.snap +142 -0
  258. package/src/ui/call-completion-api.ts +157 -0
  259. package/src/ui/chat-transport.ts +83 -0
  260. package/src/ui/chat.test-d.ts +233 -0
  261. package/src/ui/chat.test.ts +2695 -0
  262. package/src/ui/chat.ts +716 -0
  263. package/src/ui/convert-file-list-to-file-ui-parts.ts +36 -0
  264. package/src/ui/convert-to-model-messages.test.ts +2775 -0
  265. package/src/ui/convert-to-model-messages.ts +373 -0
  266. package/src/ui/default-chat-transport.ts +36 -0
  267. package/src/ui/direct-chat-transport.test.ts +446 -0
  268. package/src/ui/direct-chat-transport.ts +118 -0
  269. package/src/ui/http-chat-transport.test.ts +185 -0
  270. package/src/ui/http-chat-transport.ts +292 -0
  271. package/src/ui/index.ts +71 -0
  272. package/src/ui/last-assistant-message-is-complete-with-approval-responses.ts +44 -0
  273. package/src/ui/last-assistant-message-is-complete-with-tool-calls.test.ts +371 -0
  274. package/src/ui/last-assistant-message-is-complete-with-tool-calls.ts +39 -0
  275. package/src/ui/process-text-stream.test.ts +38 -0
  276. package/src/ui/process-text-stream.ts +16 -0
  277. package/src/ui/process-ui-message-stream.test.ts +8294 -0
  278. package/src/ui/process-ui-message-stream.ts +761 -0
  279. package/src/ui/text-stream-chat-transport.ts +23 -0
  280. package/src/ui/transform-text-to-ui-message-stream.test.ts +124 -0
  281. package/src/ui/transform-text-to-ui-message-stream.ts +27 -0
  282. package/src/ui/ui-messages.test.ts +48 -0
  283. package/src/ui/ui-messages.ts +534 -0
  284. package/src/ui/use-completion.ts +84 -0
  285. package/src/ui/validate-ui-messages.test.ts +1428 -0
  286. package/src/ui/validate-ui-messages.ts +476 -0
  287. package/src/ui-message-stream/create-ui-message-stream-response.test.ts +266 -0
  288. package/src/ui-message-stream/create-ui-message-stream-response.ts +32 -0
  289. package/src/ui-message-stream/create-ui-message-stream.test.ts +639 -0
  290. package/src/ui-message-stream/create-ui-message-stream.ts +124 -0
  291. package/src/ui-message-stream/get-response-ui-message-id.test.ts +55 -0
  292. package/src/ui-message-stream/get-response-ui-message-id.ts +24 -0
  293. package/src/ui-message-stream/handle-ui-message-stream-finish.test.ts +429 -0
  294. package/src/ui-message-stream/handle-ui-message-stream-finish.ts +135 -0
  295. package/src/ui-message-stream/index.ts +13 -0
  296. package/src/ui-message-stream/json-to-sse-transform-stream.ts +12 -0
  297. package/src/ui-message-stream/pipe-ui-message-stream-to-response.test.ts +90 -0
  298. package/src/ui-message-stream/pipe-ui-message-stream-to-response.ts +40 -0
  299. package/src/ui-message-stream/read-ui-message-stream.test.ts +122 -0
  300. package/src/ui-message-stream/read-ui-message-stream.ts +87 -0
  301. package/src/ui-message-stream/ui-message-chunks.test-d.ts +18 -0
  302. package/src/ui-message-stream/ui-message-chunks.ts +344 -0
  303. package/src/ui-message-stream/ui-message-stream-headers.ts +7 -0
  304. package/src/ui-message-stream/ui-message-stream-on-finish-callback.ts +32 -0
  305. package/src/ui-message-stream/ui-message-stream-response-init.ts +5 -0
  306. package/src/ui-message-stream/ui-message-stream-writer.ts +24 -0
  307. package/src/util/as-array.ts +3 -0
  308. package/src/util/async-iterable-stream.test.ts +241 -0
  309. package/src/util/async-iterable-stream.ts +94 -0
  310. package/src/util/consume-stream.ts +29 -0
  311. package/src/util/cosine-similarity.test.ts +57 -0
  312. package/src/util/cosine-similarity.ts +47 -0
  313. package/src/util/create-resolvable-promise.ts +30 -0
  314. package/src/util/create-stitchable-stream.test.ts +239 -0
  315. package/src/util/create-stitchable-stream.ts +112 -0
  316. package/src/util/data-url.ts +17 -0
  317. package/src/util/deep-partial.ts +84 -0
  318. package/src/util/detect-media-type.test.ts +670 -0
  319. package/src/util/detect-media-type.ts +184 -0
  320. package/src/util/download/download-function.ts +45 -0
  321. package/src/util/download/download.test.ts +69 -0
  322. package/src/util/download/download.ts +46 -0
  323. package/src/util/error-handler.ts +1 -0
  324. package/src/util/fix-json.test.ts +279 -0
  325. package/src/util/fix-json.ts +401 -0
  326. package/src/util/get-potential-start-index.test.ts +34 -0
  327. package/src/util/get-potential-start-index.ts +30 -0
  328. package/src/util/index.ts +11 -0
  329. package/src/util/is-deep-equal-data.test.ts +119 -0
  330. package/src/util/is-deep-equal-data.ts +48 -0
  331. package/src/util/is-non-empty-object.ts +5 -0
  332. package/src/util/job.ts +1 -0
  333. package/src/util/log-v2-compatibility-warning.ts +21 -0
  334. package/src/util/merge-abort-signals.test.ts +155 -0
  335. package/src/util/merge-abort-signals.ts +43 -0
  336. package/src/util/merge-objects.test.ts +118 -0
  337. package/src/util/merge-objects.ts +79 -0
  338. package/src/util/now.ts +4 -0
  339. package/src/util/parse-partial-json.test.ts +80 -0
  340. package/src/util/parse-partial-json.ts +30 -0
  341. package/src/util/prepare-headers.test.ts +51 -0
  342. package/src/util/prepare-headers.ts +14 -0
  343. package/src/util/prepare-retries.test.ts +10 -0
  344. package/src/util/prepare-retries.ts +47 -0
  345. package/src/util/retry-error.ts +41 -0
  346. package/src/util/retry-with-exponential-backoff.test.ts +446 -0
  347. package/src/util/retry-with-exponential-backoff.ts +154 -0
  348. package/src/util/serial-job-executor.test.ts +162 -0
  349. package/src/util/serial-job-executor.ts +36 -0
  350. package/src/util/simulate-readable-stream.test.ts +98 -0
  351. package/src/util/simulate-readable-stream.ts +39 -0
  352. package/src/util/split-array.test.ts +60 -0
  353. package/src/util/split-array.ts +20 -0
  354. package/src/util/value-of.ts +65 -0
  355. package/src/util/write-to-server-response.test.ts +266 -0
  356. package/src/util/write-to-server-response.ts +49 -0
  357. package/src/version.ts +5 -0
@@ -0,0 +1,986 @@
1
+ import {
2
+ JSONValue,
3
+ LanguageModelV3FinishReason,
4
+ LanguageModelV3StreamPart,
5
+ LanguageModelV3Usage,
6
+ SharedV3ProviderMetadata,
7
+ SharedV3Warning,
8
+ } from '@ai-sdk/provider';
9
+ import {
10
+ createIdGenerator,
11
+ DelayedPromise,
12
+ FlexibleSchema,
13
+ ProviderOptions,
14
+ type InferSchema,
15
+ } from '@ai-sdk/provider-utils';
16
+ import { ServerResponse } from 'http';
17
+ import { logWarnings } from '../logger/log-warnings';
18
+ import { resolveLanguageModel } from '../model/resolve-model';
19
+ import { CallSettings } from '../prompt/call-settings';
20
+ import { convertToLanguageModelPrompt } from '../prompt/convert-to-language-model-prompt';
21
+ import { prepareCallSettings } from '../prompt/prepare-call-settings';
22
+ import { Prompt } from '../prompt/prompt';
23
+ import { standardizePrompt } from '../prompt/standardize-prompt';
24
+ import { wrapGatewayError } from '../prompt/wrap-gateway-error';
25
+ import { assembleOperationName } from '../telemetry/assemble-operation-name';
26
+ import { getBaseTelemetryAttributes } from '../telemetry/get-base-telemetry-attributes';
27
+ import { getTracer } from '../telemetry/get-tracer';
28
+ import { recordSpan } from '../telemetry/record-span';
29
+ import { selectTelemetryAttributes } from '../telemetry/select-telemetry-attributes';
30
+ import { stringifyForTelemetry } from '../telemetry/stringify-for-telemetry';
31
+ import { TelemetrySettings } from '../telemetry/telemetry-settings';
32
+ import { createTextStreamResponse } from '../text-stream/create-text-stream-response';
33
+ import { pipeTextStreamToResponse } from '../text-stream/pipe-text-stream-to-response';
34
+ import {
35
+ CallWarning,
36
+ FinishReason,
37
+ LanguageModel,
38
+ } from '../types/language-model';
39
+ import { LanguageModelRequestMetadata } from '../types/language-model-request-metadata';
40
+ import { LanguageModelResponseMetadata } from '../types/language-model-response-metadata';
41
+ import { ProviderMetadata } from '../types/provider-metadata';
42
+ import {
43
+ asLanguageModelUsage,
44
+ createNullLanguageModelUsage,
45
+ LanguageModelUsage,
46
+ } from '../types/usage';
47
+ import { DeepPartial, isDeepEqualData, parsePartialJson } from '../util';
48
+ import {
49
+ AsyncIterableStream,
50
+ createAsyncIterableStream,
51
+ } from '../util/async-iterable-stream';
52
+ import { createStitchableStream } from '../util/create-stitchable-stream';
53
+ import { DownloadFunction } from '../util/download/download-function';
54
+ import { now as originalNow } from '../util/now';
55
+ import { prepareRetries } from '../util/prepare-retries';
56
+ import { getOutputStrategy, OutputStrategy } from './output-strategy';
57
+ import { parseAndValidateObjectResultWithRepair } from './parse-and-validate-object-result';
58
+ import { RepairTextFunction } from './repair-text';
59
+ import { ObjectStreamPart, StreamObjectResult } from './stream-object-result';
60
+ import { validateObjectGenerationInput } from './validate-object-generation-input';
61
+
62
+ const originalGenerateId = createIdGenerator({ prefix: 'aiobj', size: 24 });
63
+
64
+ /**
65
+ Callback that is set using the `onError` option.
66
+
67
+ @param event - The event that is passed to the callback.
68
+ */
69
+ export type StreamObjectOnErrorCallback = (event: {
70
+ error: unknown;
71
+ }) => Promise<void> | void;
72
+
73
+ /**
74
+ Callback that is set using the `onFinish` option.
75
+
76
+ @param event - The event that is passed to the callback.
77
+ */
78
+ export type StreamObjectOnFinishCallback<RESULT> = (event: {
79
+ /**
80
+ The token usage of the generated response.
81
+ */
82
+ usage: LanguageModelUsage;
83
+
84
+ /**
85
+ The generated object. Can be undefined if the final object does not match the schema.
86
+ */
87
+ object: RESULT | undefined;
88
+
89
+ /**
90
+ Optional error object. This is e.g. a TypeValidationError when the final object does not match the schema.
91
+ */
92
+ error: unknown | undefined;
93
+
94
+ /**
95
+ Response metadata.
96
+ */
97
+ response: LanguageModelResponseMetadata;
98
+
99
+ /**
100
+ Warnings from the model provider (e.g. unsupported settings).
101
+ */
102
+ warnings?: CallWarning[];
103
+
104
+ /**
105
+ Additional provider-specific metadata. They are passed through
106
+ to the provider from the AI SDK and enable provider-specific
107
+ functionality that can be fully encapsulated in the provider.
108
+ */
109
+ providerMetadata: ProviderMetadata | undefined;
110
+ }) => Promise<void> | void;
111
+
112
+ /**
113
+ Generate a structured, typed object for a given prompt and schema using a language model.
114
+
115
+ This function streams the output. If you do not want to stream the output, use `generateObject` instead.
116
+
117
+ @param model - The language model to use.
118
+ @param tools - Tools that are accessible to and can be called by the model. The model needs to support calling tools.
119
+
120
+ @param system - A system message that will be part of the prompt.
121
+ @param prompt - A simple text prompt. You can either use `prompt` or `messages` but not both.
122
+ @param messages - A list of messages. You can either use `prompt` or `messages` but not both.
123
+
124
+ @param maxOutputTokens - Maximum number of tokens to generate.
125
+ @param temperature - Temperature setting.
126
+ The value is passed through to the provider. The range depends on the provider and model.
127
+ It is recommended to set either `temperature` or `topP`, but not both.
128
+ @param topP - Nucleus sampling.
129
+ The value is passed through to the provider. The range depends on the provider and model.
130
+ It is recommended to set either `temperature` or `topP`, but not both.
131
+ @param topK - Only sample from the top K options for each subsequent token.
132
+ Used to remove "long tail" low probability responses.
133
+ Recommended for advanced use cases only. You usually only need to use temperature.
134
+ @param presencePenalty - Presence penalty setting.
135
+ It affects the likelihood of the model to repeat information that is already in the prompt.
136
+ The value is passed through to the provider. The range depends on the provider and model.
137
+ @param frequencyPenalty - Frequency penalty setting.
138
+ It affects the likelihood of the model to repeatedly use the same words or phrases.
139
+ The value is passed through to the provider. The range depends on the provider and model.
140
+ @param stopSequences - Stop sequences.
141
+ If set, the model will stop generating text when one of the stop sequences is generated.
142
+ @param seed - The seed (integer) to use for random sampling.
143
+ If set and supported by the model, calls will generate deterministic results.
144
+
145
+ @param maxRetries - Maximum number of retries. Set to 0 to disable retries. Default: 2.
146
+ @param abortSignal - An optional abort signal that can be used to cancel the call.
147
+ @param headers - Additional HTTP headers to be sent with the request. Only applicable for HTTP-based providers.
148
+
149
+ @param schema - The schema of the object that the model should generate.
150
+ @param schemaName - Optional name of the output that should be generated.
151
+ Used by some providers for additional LLM guidance, e.g.
152
+ via tool or schema name.
153
+ @param schemaDescription - Optional description of the output that should be generated.
154
+ Used by some providers for additional LLM guidance, e.g.
155
+ via tool or schema description.
156
+
157
+ @param output - The type of the output.
158
+
159
+ - 'object': The output is an object.
160
+ - 'array': The output is an array.
161
+ - 'enum': The output is an enum.
162
+ - 'no-schema': The output is not a schema.
163
+
164
+ @param experimental_telemetry - Optional telemetry configuration (experimental).
165
+
166
+ @param providerOptions - Additional provider-specific options. They are passed through
167
+ to the provider from the AI SDK and enable provider-specific
168
+ functionality that can be fully encapsulated in the provider.
169
+
170
+ @returns
171
+ A result object for accessing the partial object stream and additional information.
172
+
173
+ @deprecated Use `streamText` with an `output` setting instead.
174
+ */
175
+ export function streamObject<
176
+ SCHEMA extends FlexibleSchema<unknown> = FlexibleSchema<JSONValue>,
177
+ OUTPUT extends
178
+ | 'object'
179
+ | 'array'
180
+ | 'enum'
181
+ | 'no-schema' = InferSchema<SCHEMA> extends string ? 'enum' : 'object',
182
+ RESULT = OUTPUT extends 'array'
183
+ ? Array<InferSchema<SCHEMA>>
184
+ : InferSchema<SCHEMA>,
185
+ >(
186
+ options: Omit<CallSettings, 'stopSequences'> &
187
+ Prompt &
188
+ (OUTPUT extends 'enum'
189
+ ? {
190
+ /**
191
+ The enum values that the model should use.
192
+ */
193
+ enum: Array<RESULT>;
194
+ output: 'enum';
195
+ }
196
+ : OUTPUT extends 'no-schema'
197
+ ? {}
198
+ : {
199
+ /**
200
+ The schema of the object that the model should generate.
201
+ */
202
+ schema: SCHEMA;
203
+
204
+ /**
205
+ Optional name of the output that should be generated.
206
+ Used by some providers for additional LLM guidance, e.g.
207
+ via tool or schema name.
208
+ */
209
+ schemaName?: string;
210
+
211
+ /**
212
+ Optional description of the output that should be generated.
213
+ Used by some providers for additional LLM guidance, e.g.
214
+ via tool or schema description.
215
+ */
216
+ schemaDescription?: string;
217
+ }) & {
218
+ output?: OUTPUT;
219
+
220
+ /**
221
+ The language model to use.
222
+ */
223
+ model: LanguageModel;
224
+
225
+ /**
226
+ A function that attempts to repair the raw output of the model
227
+ to enable JSON parsing.
228
+ */
229
+ experimental_repairText?: RepairTextFunction;
230
+
231
+ /**
232
+ Optional telemetry configuration (experimental).
233
+ */
234
+
235
+ experimental_telemetry?: TelemetrySettings;
236
+
237
+ /**
238
+ Custom download function to use for URLs.
239
+
240
+ By default, files are downloaded if the model does not support the URL for the given media type.
241
+ */
242
+ experimental_download?: DownloadFunction | undefined;
243
+
244
+ /**
245
+ Additional provider-specific options. They are passed through
246
+ to the provider from the AI SDK and enable provider-specific
247
+ functionality that can be fully encapsulated in the provider.
248
+ */
249
+ providerOptions?: ProviderOptions;
250
+
251
+ /**
252
+ Callback that is invoked when an error occurs during streaming.
253
+ You can use it to log errors.
254
+ The stream processing will pause until the callback promise is resolved.
255
+ */
256
+ onError?: StreamObjectOnErrorCallback;
257
+
258
+ /**
259
+ Callback that is called when the LLM response and the final object validation are finished.
260
+ */
261
+ onFinish?: StreamObjectOnFinishCallback<RESULT>;
262
+
263
+ /**
264
+ * Internal. For test use only. May change without notice.
265
+ */
266
+ _internal?: {
267
+ generateId?: () => string;
268
+ currentDate?: () => Date;
269
+ now?: () => number;
270
+ };
271
+ },
272
+ ): StreamObjectResult<
273
+ OUTPUT extends 'enum'
274
+ ? string
275
+ : OUTPUT extends 'array'
276
+ ? RESULT
277
+ : DeepPartial<RESULT>,
278
+ OUTPUT extends 'array' ? RESULT : RESULT,
279
+ OUTPUT extends 'array'
280
+ ? RESULT extends Array<infer U>
281
+ ? AsyncIterableStream<U>
282
+ : never
283
+ : never
284
+ > {
285
+ const {
286
+ model,
287
+ output = 'object',
288
+ system,
289
+ prompt,
290
+ messages,
291
+ maxRetries,
292
+ abortSignal,
293
+ headers,
294
+ experimental_repairText: repairText,
295
+ experimental_telemetry: telemetry,
296
+ experimental_download: download,
297
+ providerOptions,
298
+ onError = ({ error }: { error: unknown }) => {
299
+ console.error(error);
300
+ },
301
+ onFinish,
302
+ _internal: {
303
+ generateId = originalGenerateId,
304
+ currentDate = () => new Date(),
305
+ now = originalNow,
306
+ } = {},
307
+ ...settings
308
+ } = options;
309
+
310
+ const enumValues =
311
+ 'enum' in options && options.enum ? options.enum : undefined;
312
+
313
+ const {
314
+ schema: inputSchema,
315
+ schemaDescription,
316
+ schemaName,
317
+ } = 'schema' in options ? options : {};
318
+
319
+ validateObjectGenerationInput({
320
+ output,
321
+ schema: inputSchema,
322
+ schemaName,
323
+ schemaDescription,
324
+ enumValues,
325
+ });
326
+
327
+ const outputStrategy = getOutputStrategy({
328
+ output,
329
+ schema: inputSchema,
330
+ enumValues,
331
+ });
332
+
333
+ return new DefaultStreamObjectResult({
334
+ model,
335
+ telemetry,
336
+ headers,
337
+ settings,
338
+ maxRetries,
339
+ abortSignal,
340
+ outputStrategy,
341
+ system,
342
+ prompt,
343
+ messages,
344
+ schemaName,
345
+ schemaDescription,
346
+ providerOptions,
347
+ repairText,
348
+ onError,
349
+ onFinish,
350
+ download,
351
+ generateId,
352
+ currentDate,
353
+ now,
354
+ });
355
+ }
356
+
357
+ class DefaultStreamObjectResult<PARTIAL, RESULT, ELEMENT_STREAM>
358
+ implements StreamObjectResult<PARTIAL, RESULT, ELEMENT_STREAM>
359
+ {
360
+ private readonly _object = new DelayedPromise<RESULT>();
361
+ private readonly _usage = new DelayedPromise<LanguageModelUsage>();
362
+ private readonly _providerMetadata = new DelayedPromise<
363
+ ProviderMetadata | undefined
364
+ >();
365
+ private readonly _warnings = new DelayedPromise<CallWarning[] | undefined>();
366
+ private readonly _request =
367
+ new DelayedPromise<LanguageModelRequestMetadata>();
368
+ private readonly _response =
369
+ new DelayedPromise<LanguageModelResponseMetadata>();
370
+ private readonly _finishReason = new DelayedPromise<FinishReason>();
371
+
372
+ private readonly baseStream: ReadableStream<ObjectStreamPart<PARTIAL>>;
373
+
374
+ private readonly outputStrategy: OutputStrategy<
375
+ PARTIAL,
376
+ RESULT,
377
+ ELEMENT_STREAM
378
+ >;
379
+
380
+ constructor({
381
+ model: modelArg,
382
+ headers,
383
+ telemetry,
384
+ settings,
385
+ maxRetries: maxRetriesArg,
386
+ abortSignal,
387
+ outputStrategy,
388
+ system,
389
+ prompt,
390
+ messages,
391
+ schemaName,
392
+ schemaDescription,
393
+ providerOptions,
394
+ repairText,
395
+ onError,
396
+ onFinish,
397
+ download,
398
+ generateId,
399
+ currentDate,
400
+ now,
401
+ }: {
402
+ model: LanguageModel;
403
+ telemetry: TelemetrySettings | undefined;
404
+ headers: Record<string, string | undefined> | undefined;
405
+ settings: Omit<CallSettings, 'abortSignal' | 'headers'>;
406
+ maxRetries: number | undefined;
407
+ abortSignal: AbortSignal | undefined;
408
+ outputStrategy: OutputStrategy<PARTIAL, RESULT, ELEMENT_STREAM>;
409
+ system: Prompt['system'];
410
+ prompt: Prompt['prompt'];
411
+ messages: Prompt['messages'];
412
+ schemaName: string | undefined;
413
+ schemaDescription: string | undefined;
414
+ providerOptions: ProviderOptions | undefined;
415
+ repairText: RepairTextFunction | undefined;
416
+ onError: StreamObjectOnErrorCallback;
417
+ onFinish: StreamObjectOnFinishCallback<RESULT> | undefined;
418
+ download: DownloadFunction | undefined;
419
+ generateId: () => string;
420
+ currentDate: () => Date;
421
+ now: () => number;
422
+ }) {
423
+ const model = resolveLanguageModel(modelArg);
424
+
425
+ const { maxRetries, retry } = prepareRetries({
426
+ maxRetries: maxRetriesArg,
427
+ abortSignal,
428
+ });
429
+
430
+ const callSettings = prepareCallSettings(settings);
431
+
432
+ const baseTelemetryAttributes = getBaseTelemetryAttributes({
433
+ model,
434
+ telemetry,
435
+ headers,
436
+ settings: { ...callSettings, maxRetries },
437
+ });
438
+
439
+ const tracer = getTracer(telemetry);
440
+ const self = this;
441
+
442
+ const stitchableStream =
443
+ createStitchableStream<ObjectStreamPart<PARTIAL>>();
444
+
445
+ const eventProcessor = new TransformStream<
446
+ ObjectStreamPart<PARTIAL>,
447
+ ObjectStreamPart<PARTIAL>
448
+ >({
449
+ transform(chunk, controller) {
450
+ controller.enqueue(chunk);
451
+
452
+ if (chunk.type === 'error') {
453
+ onError({ error: wrapGatewayError(chunk.error) });
454
+ }
455
+ },
456
+ });
457
+
458
+ this.baseStream = stitchableStream.stream.pipeThrough(eventProcessor);
459
+
460
+ recordSpan({
461
+ name: 'ai.streamObject',
462
+ attributes: selectTelemetryAttributes({
463
+ telemetry,
464
+ attributes: {
465
+ ...assembleOperationName({
466
+ operationId: 'ai.streamObject',
467
+ telemetry,
468
+ }),
469
+ ...baseTelemetryAttributes,
470
+ // specific settings that only make sense on the outer level:
471
+ 'ai.prompt': {
472
+ input: () => JSON.stringify({ system, prompt, messages }),
473
+ },
474
+ 'ai.schema': {
475
+ input: async () =>
476
+ JSON.stringify(await outputStrategy.jsonSchema()),
477
+ },
478
+ 'ai.schema.name': schemaName,
479
+ 'ai.schema.description': schemaDescription,
480
+ 'ai.settings.output': outputStrategy.type,
481
+ },
482
+ }),
483
+ tracer,
484
+ endWhenDone: false,
485
+ fn: async rootSpan => {
486
+ const standardizedPrompt = await standardizePrompt({
487
+ system,
488
+ prompt,
489
+ messages,
490
+ } as Prompt);
491
+
492
+ const callOptions = {
493
+ responseFormat: {
494
+ type: 'json' as const,
495
+ schema: await outputStrategy.jsonSchema(),
496
+ name: schemaName,
497
+ description: schemaDescription,
498
+ },
499
+ ...prepareCallSettings(settings),
500
+ prompt: await convertToLanguageModelPrompt({
501
+ prompt: standardizedPrompt,
502
+ supportedUrls: await model.supportedUrls,
503
+ download,
504
+ }),
505
+ providerOptions,
506
+ abortSignal,
507
+ headers,
508
+ includeRawChunks: false,
509
+ };
510
+
511
+ const transformer: Transformer<
512
+ LanguageModelV3StreamPart,
513
+ ObjectStreamInputPart
514
+ > = {
515
+ transform: (chunk, controller) => {
516
+ switch (chunk.type) {
517
+ case 'text-delta':
518
+ controller.enqueue(chunk.delta);
519
+ break;
520
+ case 'response-metadata':
521
+ case 'finish':
522
+ case 'error':
523
+ case 'stream-start':
524
+ controller.enqueue(chunk);
525
+ break;
526
+ }
527
+ },
528
+ };
529
+
530
+ const {
531
+ result: { stream, response, request },
532
+ doStreamSpan,
533
+ startTimestampMs,
534
+ } = await retry(() =>
535
+ recordSpan({
536
+ name: 'ai.streamObject.doStream',
537
+ attributes: selectTelemetryAttributes({
538
+ telemetry,
539
+ attributes: {
540
+ ...assembleOperationName({
541
+ operationId: 'ai.streamObject.doStream',
542
+ telemetry,
543
+ }),
544
+ ...baseTelemetryAttributes,
545
+ 'ai.prompt.messages': {
546
+ input: () => stringifyForTelemetry(callOptions.prompt),
547
+ },
548
+
549
+ // standardized gen-ai llm span attributes:
550
+ 'gen_ai.system': model.provider,
551
+ 'gen_ai.request.model': model.modelId,
552
+ 'gen_ai.request.frequency_penalty':
553
+ callSettings.frequencyPenalty,
554
+ 'gen_ai.request.max_tokens': callSettings.maxOutputTokens,
555
+ 'gen_ai.request.presence_penalty': callSettings.presencePenalty,
556
+ 'gen_ai.request.temperature': callSettings.temperature,
557
+ 'gen_ai.request.top_k': callSettings.topK,
558
+ 'gen_ai.request.top_p': callSettings.topP,
559
+ },
560
+ }),
561
+ tracer,
562
+ endWhenDone: false,
563
+ fn: async doStreamSpan => ({
564
+ startTimestampMs: now(),
565
+ doStreamSpan,
566
+ result: await model.doStream(callOptions),
567
+ }),
568
+ }),
569
+ );
570
+
571
+ self._request.resolve(request ?? {});
572
+
573
+ // store information for onFinish callback:
574
+ let warnings: SharedV3Warning[] | undefined;
575
+ let usage: LanguageModelUsage = createNullLanguageModelUsage();
576
+ let finishReason: FinishReason | undefined;
577
+ let providerMetadata: ProviderMetadata | undefined;
578
+ let object: RESULT | undefined;
579
+ let error: unknown | undefined;
580
+
581
+ // pipe chunks through a transformation stream that extracts metadata:
582
+ let accumulatedText = '';
583
+ let textDelta = '';
584
+ let fullResponse: {
585
+ id: string;
586
+ timestamp: Date;
587
+ modelId: string;
588
+ } = {
589
+ id: generateId(),
590
+ timestamp: currentDate(),
591
+ modelId: model.modelId,
592
+ };
593
+
594
+ // Keep track of raw parse result before type validation, since e.g. Zod might
595
+ // change the object by mapping properties.
596
+ let latestObjectJson: JSONValue | undefined = undefined;
597
+ let latestObject: PARTIAL | undefined = undefined;
598
+ let isFirstChunk = true;
599
+ let isFirstDelta = true;
600
+
601
+ const transformedStream = stream
602
+ .pipeThrough(new TransformStream(transformer))
603
+ .pipeThrough(
604
+ new TransformStream<
605
+ string | ObjectStreamInputPart,
606
+ ObjectStreamPart<PARTIAL>
607
+ >({
608
+ async transform(chunk, controller): Promise<void> {
609
+ if (
610
+ typeof chunk === 'object' &&
611
+ chunk.type === 'stream-start'
612
+ ) {
613
+ warnings = chunk.warnings;
614
+ return; // stream start chunks are sent immediately and do not count as first chunk
615
+ }
616
+
617
+ // Telemetry event for first chunk:
618
+ if (isFirstChunk) {
619
+ const msToFirstChunk = now() - startTimestampMs;
620
+
621
+ isFirstChunk = false;
622
+
623
+ doStreamSpan.addEvent('ai.stream.firstChunk', {
624
+ 'ai.stream.msToFirstChunk': msToFirstChunk,
625
+ });
626
+
627
+ doStreamSpan.setAttributes({
628
+ 'ai.stream.msToFirstChunk': msToFirstChunk,
629
+ });
630
+ }
631
+
632
+ // process partial text chunks
633
+ if (typeof chunk === 'string') {
634
+ accumulatedText += chunk;
635
+ textDelta += chunk;
636
+
637
+ const { value: currentObjectJson, state: parseState } =
638
+ await parsePartialJson(accumulatedText);
639
+
640
+ if (
641
+ currentObjectJson !== undefined &&
642
+ !isDeepEqualData(latestObjectJson, currentObjectJson)
643
+ ) {
644
+ const validationResult =
645
+ await outputStrategy.validatePartialResult({
646
+ value: currentObjectJson,
647
+ textDelta,
648
+ latestObject,
649
+ isFirstDelta,
650
+ isFinalDelta: parseState === 'successful-parse',
651
+ });
652
+
653
+ if (
654
+ validationResult.success &&
655
+ !isDeepEqualData(
656
+ latestObject,
657
+ validationResult.value.partial,
658
+ )
659
+ ) {
660
+ // inside inner check to correctly parse the final element in array mode:
661
+ latestObjectJson = currentObjectJson;
662
+ latestObject = validationResult.value.partial;
663
+
664
+ controller.enqueue({
665
+ type: 'object',
666
+ object: latestObject,
667
+ });
668
+
669
+ controller.enqueue({
670
+ type: 'text-delta',
671
+ textDelta: validationResult.value.textDelta,
672
+ });
673
+
674
+ textDelta = '';
675
+ isFirstDelta = false;
676
+ }
677
+ }
678
+
679
+ return;
680
+ }
681
+
682
+ switch (chunk.type) {
683
+ case 'response-metadata': {
684
+ fullResponse = {
685
+ id: chunk.id ?? fullResponse.id,
686
+ timestamp: chunk.timestamp ?? fullResponse.timestamp,
687
+ modelId: chunk.modelId ?? fullResponse.modelId,
688
+ };
689
+ break;
690
+ }
691
+
692
+ case 'finish': {
693
+ // send final text delta:
694
+ if (textDelta !== '') {
695
+ controller.enqueue({ type: 'text-delta', textDelta });
696
+ }
697
+
698
+ // store finish reason for telemetry:
699
+ finishReason = chunk.finishReason.unified;
700
+
701
+ // store usage and metadata for promises and onFinish callback:
702
+ usage = asLanguageModelUsage(chunk.usage);
703
+ providerMetadata = chunk.providerMetadata;
704
+
705
+ controller.enqueue({
706
+ ...chunk,
707
+ finishReason: chunk.finishReason.unified,
708
+ usage,
709
+ response: fullResponse,
710
+ });
711
+
712
+ // log warnings:
713
+ logWarnings({
714
+ warnings: warnings ?? [],
715
+ provider: model.provider,
716
+ model: model.modelId,
717
+ });
718
+
719
+ // resolve promises that can be resolved now:
720
+ self._usage.resolve(usage);
721
+ self._providerMetadata.resolve(providerMetadata);
722
+ self._warnings.resolve(warnings);
723
+ self._response.resolve({
724
+ ...fullResponse,
725
+ headers: response?.headers,
726
+ });
727
+ self._finishReason.resolve(finishReason ?? 'other');
728
+
729
+ try {
730
+ object = await parseAndValidateObjectResultWithRepair(
731
+ accumulatedText,
732
+ outputStrategy,
733
+ repairText,
734
+ {
735
+ response: fullResponse,
736
+ usage,
737
+ finishReason,
738
+ },
739
+ );
740
+ self._object.resolve(object);
741
+ } catch (e) {
742
+ error = e;
743
+ self._object.reject(e);
744
+ }
745
+ break;
746
+ }
747
+
748
+ default: {
749
+ controller.enqueue(chunk);
750
+ break;
751
+ }
752
+ }
753
+ },
754
+
755
+ // invoke onFinish callback and resolve toolResults promise when the stream is about to close:
756
+ async flush(controller) {
757
+ try {
758
+ const finalUsage = usage ?? {
759
+ promptTokens: NaN,
760
+ completionTokens: NaN,
761
+ totalTokens: NaN,
762
+ };
763
+
764
+ doStreamSpan.setAttributes(
765
+ await selectTelemetryAttributes({
766
+ telemetry,
767
+ attributes: {
768
+ 'ai.response.finishReason': finishReason,
769
+ 'ai.response.object': {
770
+ output: () => JSON.stringify(object),
771
+ },
772
+ 'ai.response.id': fullResponse.id,
773
+ 'ai.response.model': fullResponse.modelId,
774
+ 'ai.response.timestamp':
775
+ fullResponse.timestamp.toISOString(),
776
+ 'ai.response.providerMetadata':
777
+ JSON.stringify(providerMetadata),
778
+
779
+ 'ai.usage.inputTokens': finalUsage.inputTokens,
780
+ 'ai.usage.outputTokens': finalUsage.outputTokens,
781
+ 'ai.usage.totalTokens': finalUsage.totalTokens,
782
+ 'ai.usage.reasoningTokens': finalUsage.reasoningTokens,
783
+ 'ai.usage.cachedInputTokens':
784
+ finalUsage.cachedInputTokens,
785
+
786
+ // standardized gen-ai llm span attributes:
787
+ 'gen_ai.response.finish_reasons': [finishReason],
788
+ 'gen_ai.response.id': fullResponse.id,
789
+ 'gen_ai.response.model': fullResponse.modelId,
790
+ 'gen_ai.usage.input_tokens': finalUsage.inputTokens,
791
+ 'gen_ai.usage.output_tokens': finalUsage.outputTokens,
792
+ },
793
+ }),
794
+ );
795
+
796
+ // finish doStreamSpan before other operations for correct timing:
797
+ doStreamSpan.end();
798
+
799
+ // Add response information to the root span:
800
+ rootSpan.setAttributes(
801
+ await selectTelemetryAttributes({
802
+ telemetry,
803
+ attributes: {
804
+ 'ai.usage.inputTokens': finalUsage.inputTokens,
805
+ 'ai.usage.outputTokens': finalUsage.outputTokens,
806
+ 'ai.usage.totalTokens': finalUsage.totalTokens,
807
+ 'ai.usage.reasoningTokens': finalUsage.reasoningTokens,
808
+ 'ai.usage.cachedInputTokens':
809
+ finalUsage.cachedInputTokens,
810
+ 'ai.response.object': {
811
+ output: () => JSON.stringify(object),
812
+ },
813
+ 'ai.response.providerMetadata':
814
+ JSON.stringify(providerMetadata),
815
+ },
816
+ }),
817
+ );
818
+
819
+ // call onFinish callback:
820
+ await onFinish?.({
821
+ usage: finalUsage,
822
+ object,
823
+ error,
824
+ response: {
825
+ ...fullResponse,
826
+ headers: response?.headers,
827
+ },
828
+ warnings,
829
+ providerMetadata,
830
+ });
831
+ } catch (error) {
832
+ controller.enqueue({ type: 'error', error });
833
+ } finally {
834
+ rootSpan.end();
835
+ }
836
+ },
837
+ }),
838
+ );
839
+
840
+ stitchableStream.addStream(transformedStream);
841
+ },
842
+ })
843
+ .catch(error => {
844
+ // add an empty stream with an error to break the stream:
845
+ stitchableStream.addStream(
846
+ new ReadableStream({
847
+ start(controller) {
848
+ controller.enqueue({ type: 'error', error });
849
+ controller.close();
850
+ },
851
+ }),
852
+ );
853
+ })
854
+ .finally(() => {
855
+ stitchableStream.close();
856
+ });
857
+
858
+ this.outputStrategy = outputStrategy;
859
+ }
860
+
861
+ get object() {
862
+ return this._object.promise;
863
+ }
864
+
865
+ get usage() {
866
+ return this._usage.promise;
867
+ }
868
+
869
+ get providerMetadata() {
870
+ return this._providerMetadata.promise;
871
+ }
872
+
873
+ get warnings() {
874
+ return this._warnings.promise;
875
+ }
876
+
877
+ get request() {
878
+ return this._request.promise;
879
+ }
880
+
881
+ get response() {
882
+ return this._response.promise;
883
+ }
884
+
885
+ get finishReason() {
886
+ return this._finishReason.promise;
887
+ }
888
+
889
+ get partialObjectStream(): AsyncIterableStream<PARTIAL> {
890
+ return createAsyncIterableStream(
891
+ this.baseStream.pipeThrough(
892
+ new TransformStream<ObjectStreamPart<PARTIAL>, PARTIAL>({
893
+ transform(chunk, controller) {
894
+ switch (chunk.type) {
895
+ case 'object':
896
+ controller.enqueue(chunk.object);
897
+ break;
898
+
899
+ case 'text-delta':
900
+ case 'finish':
901
+ case 'error': // suppress error (use onError instead)
902
+ break;
903
+
904
+ default: {
905
+ const _exhaustiveCheck: never = chunk;
906
+ throw new Error(`Unsupported chunk type: ${_exhaustiveCheck}`);
907
+ }
908
+ }
909
+ },
910
+ }),
911
+ ),
912
+ );
913
+ }
914
+
915
+ get elementStream(): ELEMENT_STREAM {
916
+ return this.outputStrategy.createElementStream(this.baseStream);
917
+ }
918
+
919
+ get textStream(): AsyncIterableStream<string> {
920
+ return createAsyncIterableStream(
921
+ this.baseStream.pipeThrough(
922
+ new TransformStream<ObjectStreamPart<PARTIAL>, string>({
923
+ transform(chunk, controller) {
924
+ switch (chunk.type) {
925
+ case 'text-delta':
926
+ controller.enqueue(chunk.textDelta);
927
+ break;
928
+
929
+ case 'object':
930
+ case 'finish':
931
+ case 'error': // suppress error (use onError instead)
932
+ break;
933
+
934
+ default: {
935
+ const _exhaustiveCheck: never = chunk;
936
+ throw new Error(`Unsupported chunk type: ${_exhaustiveCheck}`);
937
+ }
938
+ }
939
+ },
940
+ }),
941
+ ),
942
+ );
943
+ }
944
+
945
+ get fullStream(): AsyncIterableStream<ObjectStreamPart<PARTIAL>> {
946
+ return createAsyncIterableStream(this.baseStream);
947
+ }
948
+
949
+ pipeTextStreamToResponse(response: ServerResponse, init?: ResponseInit) {
950
+ pipeTextStreamToResponse({
951
+ response,
952
+ textStream: this.textStream,
953
+ ...init,
954
+ });
955
+ }
956
+
957
+ toTextStreamResponse(init?: ResponseInit): Response {
958
+ return createTextStreamResponse({
959
+ textStream: this.textStream,
960
+ ...init,
961
+ });
962
+ }
963
+ }
964
+
965
+ export type ObjectStreamInputPart =
966
+ | string
967
+ | {
968
+ type: 'stream-start';
969
+ warnings: SharedV3Warning[];
970
+ }
971
+ | {
972
+ type: 'error';
973
+ error: unknown;
974
+ }
975
+ | {
976
+ type: 'response-metadata';
977
+ id?: string;
978
+ timestamp?: Date;
979
+ modelId?: string;
980
+ }
981
+ | {
982
+ type: 'finish';
983
+ finishReason: LanguageModelV3FinishReason;
984
+ usage: LanguageModelV3Usage;
985
+ providerMetadata?: SharedV3ProviderMetadata;
986
+ };