@ai-sdk/openai 3.0.14 → 3.0.15

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (110) hide show
  1. package/CHANGELOG.md +6 -0
  2. package/dist/index.js +1 -1
  3. package/dist/index.mjs +1 -1
  4. package/package.json +6 -5
  5. package/src/chat/__fixtures__/azure-model-router.1.chunks.txt +8 -0
  6. package/src/chat/__snapshots__/openai-chat-language-model.test.ts.snap +88 -0
  7. package/src/chat/convert-openai-chat-usage.ts +57 -0
  8. package/src/chat/convert-to-openai-chat-messages.test.ts +516 -0
  9. package/src/chat/convert-to-openai-chat-messages.ts +225 -0
  10. package/src/chat/get-response-metadata.ts +15 -0
  11. package/src/chat/map-openai-finish-reason.ts +19 -0
  12. package/src/chat/openai-chat-api.ts +198 -0
  13. package/src/chat/openai-chat-language-model.test.ts +3496 -0
  14. package/src/chat/openai-chat-language-model.ts +700 -0
  15. package/src/chat/openai-chat-options.ts +186 -0
  16. package/src/chat/openai-chat-prepare-tools.test.ts +322 -0
  17. package/src/chat/openai-chat-prepare-tools.ts +84 -0
  18. package/src/chat/openai-chat-prompt.ts +70 -0
  19. package/src/completion/convert-openai-completion-usage.ts +46 -0
  20. package/src/completion/convert-to-openai-completion-prompt.ts +93 -0
  21. package/src/completion/get-response-metadata.ts +15 -0
  22. package/src/completion/map-openai-finish-reason.ts +19 -0
  23. package/src/completion/openai-completion-api.ts +81 -0
  24. package/src/completion/openai-completion-language-model.test.ts +752 -0
  25. package/src/completion/openai-completion-language-model.ts +336 -0
  26. package/src/completion/openai-completion-options.ts +58 -0
  27. package/src/embedding/__snapshots__/openai-embedding-model.test.ts.snap +43 -0
  28. package/src/embedding/openai-embedding-api.ts +13 -0
  29. package/src/embedding/openai-embedding-model.test.ts +146 -0
  30. package/src/embedding/openai-embedding-model.ts +95 -0
  31. package/src/embedding/openai-embedding-options.ts +30 -0
  32. package/src/image/openai-image-api.ts +35 -0
  33. package/src/image/openai-image-model.test.ts +722 -0
  34. package/src/image/openai-image-model.ts +305 -0
  35. package/src/image/openai-image-options.ts +28 -0
  36. package/src/index.ts +9 -0
  37. package/src/internal/index.ts +19 -0
  38. package/src/openai-config.ts +18 -0
  39. package/src/openai-error.test.ts +34 -0
  40. package/src/openai-error.ts +22 -0
  41. package/src/openai-language-model-capabilities.test.ts +93 -0
  42. package/src/openai-language-model-capabilities.ts +54 -0
  43. package/src/openai-provider.test.ts +98 -0
  44. package/src/openai-provider.ts +270 -0
  45. package/src/openai-tools.ts +114 -0
  46. package/src/responses/__fixtures__/openai-apply-patch-tool-delete.1.chunks.txt +5 -0
  47. package/src/responses/__fixtures__/openai-apply-patch-tool.1.chunks.txt +38 -0
  48. package/src/responses/__fixtures__/openai-apply-patch-tool.1.json +69 -0
  49. package/src/responses/__fixtures__/openai-code-interpreter-tool.1.chunks.txt +393 -0
  50. package/src/responses/__fixtures__/openai-code-interpreter-tool.1.json +137 -0
  51. package/src/responses/__fixtures__/openai-error.1.chunks.txt +4 -0
  52. package/src/responses/__fixtures__/openai-error.1.json +8 -0
  53. package/src/responses/__fixtures__/openai-file-search-tool.1.chunks.txt +94 -0
  54. package/src/responses/__fixtures__/openai-file-search-tool.1.json +89 -0
  55. package/src/responses/__fixtures__/openai-file-search-tool.2.chunks.txt +93 -0
  56. package/src/responses/__fixtures__/openai-file-search-tool.2.json +112 -0
  57. package/src/responses/__fixtures__/openai-image-generation-tool.1.chunks.txt +16 -0
  58. package/src/responses/__fixtures__/openai-image-generation-tool.1.json +96 -0
  59. package/src/responses/__fixtures__/openai-local-shell-tool.1.chunks.txt +7 -0
  60. package/src/responses/__fixtures__/openai-local-shell-tool.1.json +70 -0
  61. package/src/responses/__fixtures__/openai-mcp-tool-approval.1.chunks.txt +11 -0
  62. package/src/responses/__fixtures__/openai-mcp-tool-approval.1.json +169 -0
  63. package/src/responses/__fixtures__/openai-mcp-tool-approval.2.chunks.txt +123 -0
  64. package/src/responses/__fixtures__/openai-mcp-tool-approval.2.json +176 -0
  65. package/src/responses/__fixtures__/openai-mcp-tool-approval.3.chunks.txt +11 -0
  66. package/src/responses/__fixtures__/openai-mcp-tool-approval.3.json +169 -0
  67. package/src/responses/__fixtures__/openai-mcp-tool-approval.4.chunks.txt +84 -0
  68. package/src/responses/__fixtures__/openai-mcp-tool-approval.4.json +182 -0
  69. package/src/responses/__fixtures__/openai-mcp-tool.1.chunks.txt +373 -0
  70. package/src/responses/__fixtures__/openai-mcp-tool.1.json +159 -0
  71. package/src/responses/__fixtures__/openai-reasoning-encrypted-content.1.chunks.txt +110 -0
  72. package/src/responses/__fixtures__/openai-reasoning-encrypted-content.1.json +117 -0
  73. package/src/responses/__fixtures__/openai-shell-tool.1.chunks.txt +182 -0
  74. package/src/responses/__fixtures__/openai-shell-tool.1.json +73 -0
  75. package/src/responses/__fixtures__/openai-web-search-tool.1.chunks.txt +185 -0
  76. package/src/responses/__fixtures__/openai-web-search-tool.1.json +266 -0
  77. package/src/responses/__snapshots__/openai-responses-language-model.test.ts.snap +10955 -0
  78. package/src/responses/convert-openai-responses-usage.ts +53 -0
  79. package/src/responses/convert-to-openai-responses-input.test.ts +2976 -0
  80. package/src/responses/convert-to-openai-responses-input.ts +578 -0
  81. package/src/responses/map-openai-responses-finish-reason.ts +22 -0
  82. package/src/responses/openai-responses-api.test.ts +89 -0
  83. package/src/responses/openai-responses-api.ts +1086 -0
  84. package/src/responses/openai-responses-language-model.test.ts +6927 -0
  85. package/src/responses/openai-responses-language-model.ts +1932 -0
  86. package/src/responses/openai-responses-options.ts +312 -0
  87. package/src/responses/openai-responses-prepare-tools.test.ts +924 -0
  88. package/src/responses/openai-responses-prepare-tools.ts +264 -0
  89. package/src/responses/openai-responses-provider-metadata.ts +39 -0
  90. package/src/speech/openai-speech-api.ts +38 -0
  91. package/src/speech/openai-speech-model.test.ts +202 -0
  92. package/src/speech/openai-speech-model.ts +137 -0
  93. package/src/speech/openai-speech-options.ts +22 -0
  94. package/src/tool/apply-patch.ts +141 -0
  95. package/src/tool/code-interpreter.ts +104 -0
  96. package/src/tool/file-search.ts +145 -0
  97. package/src/tool/image-generation.ts +126 -0
  98. package/src/tool/local-shell.test-d.ts +20 -0
  99. package/src/tool/local-shell.ts +72 -0
  100. package/src/tool/mcp.ts +125 -0
  101. package/src/tool/shell.ts +85 -0
  102. package/src/tool/web-search-preview.ts +139 -0
  103. package/src/tool/web-search.test-d.ts +13 -0
  104. package/src/tool/web-search.ts +179 -0
  105. package/src/transcription/openai-transcription-api.ts +37 -0
  106. package/src/transcription/openai-transcription-model.test.ts +507 -0
  107. package/src/transcription/openai-transcription-model.ts +232 -0
  108. package/src/transcription/openai-transcription-options.ts +50 -0
  109. package/src/transcription/transcription-test.mp3 +0 -0
  110. package/src/version.ts +6 -0
@@ -0,0 +1,700 @@
1
+ import {
2
+ InvalidResponseDataError,
3
+ LanguageModelV3,
4
+ LanguageModelV3CallOptions,
5
+ LanguageModelV3Content,
6
+ LanguageModelV3FinishReason,
7
+ LanguageModelV3GenerateResult,
8
+ LanguageModelV3StreamPart,
9
+ LanguageModelV3StreamResult,
10
+ SharedV3ProviderMetadata,
11
+ SharedV3Warning,
12
+ } from '@ai-sdk/provider';
13
+ import {
14
+ FetchFunction,
15
+ ParseResult,
16
+ combineHeaders,
17
+ createEventSourceResponseHandler,
18
+ createJsonResponseHandler,
19
+ generateId,
20
+ isParsableJson,
21
+ parseProviderOptions,
22
+ postJsonToApi,
23
+ } from '@ai-sdk/provider-utils';
24
+ import { openaiFailedResponseHandler } from '../openai-error';
25
+ import { getOpenAILanguageModelCapabilities } from '../openai-language-model-capabilities';
26
+ import {
27
+ OpenAIChatUsage,
28
+ convertOpenAIChatUsage,
29
+ } from './convert-openai-chat-usage';
30
+ import { convertToOpenAIChatMessages } from './convert-to-openai-chat-messages';
31
+ import { getResponseMetadata } from './get-response-metadata';
32
+ import { mapOpenAIFinishReason } from './map-openai-finish-reason';
33
+ import {
34
+ OpenAIChatChunk,
35
+ openaiChatChunkSchema,
36
+ openaiChatResponseSchema,
37
+ } from './openai-chat-api';
38
+ import {
39
+ OpenAIChatModelId,
40
+ openaiChatLanguageModelOptions,
41
+ } from './openai-chat-options';
42
+ import { prepareChatTools } from './openai-chat-prepare-tools';
43
+
44
+ type OpenAIChatConfig = {
45
+ provider: string;
46
+ headers: () => Record<string, string | undefined>;
47
+ url: (options: { modelId: string; path: string }) => string;
48
+ fetch?: FetchFunction;
49
+ };
50
+
51
+ export class OpenAIChatLanguageModel implements LanguageModelV3 {
52
+ readonly specificationVersion = 'v3';
53
+
54
+ readonly modelId: OpenAIChatModelId;
55
+
56
+ readonly supportedUrls = {
57
+ 'image/*': [/^https?:\/\/.*$/],
58
+ };
59
+
60
+ private readonly config: OpenAIChatConfig;
61
+
62
+ constructor(modelId: OpenAIChatModelId, config: OpenAIChatConfig) {
63
+ this.modelId = modelId;
64
+ this.config = config;
65
+ }
66
+
67
+ get provider(): string {
68
+ return this.config.provider;
69
+ }
70
+
71
+ private async getArgs({
72
+ prompt,
73
+ maxOutputTokens,
74
+ temperature,
75
+ topP,
76
+ topK,
77
+ frequencyPenalty,
78
+ presencePenalty,
79
+ stopSequences,
80
+ responseFormat,
81
+ seed,
82
+ tools,
83
+ toolChoice,
84
+ providerOptions,
85
+ }: LanguageModelV3CallOptions) {
86
+ const warnings: SharedV3Warning[] = [];
87
+
88
+ // Parse provider options
89
+ const openaiOptions =
90
+ (await parseProviderOptions({
91
+ provider: 'openai',
92
+ providerOptions,
93
+ schema: openaiChatLanguageModelOptions,
94
+ })) ?? {};
95
+
96
+ const modelCapabilities = getOpenAILanguageModelCapabilities(this.modelId);
97
+ const isReasoningModel =
98
+ openaiOptions.forceReasoning ?? modelCapabilities.isReasoningModel;
99
+
100
+ if (topK != null) {
101
+ warnings.push({ type: 'unsupported', feature: 'topK' });
102
+ }
103
+
104
+ const { messages, warnings: messageWarnings } = convertToOpenAIChatMessages(
105
+ {
106
+ prompt,
107
+ systemMessageMode:
108
+ openaiOptions.systemMessageMode ??
109
+ (isReasoningModel
110
+ ? 'developer'
111
+ : modelCapabilities.systemMessageMode),
112
+ },
113
+ );
114
+
115
+ warnings.push(...messageWarnings);
116
+
117
+ const strictJsonSchema = openaiOptions.strictJsonSchema ?? true;
118
+
119
+ const baseArgs = {
120
+ // model id:
121
+ model: this.modelId,
122
+
123
+ // model specific settings:
124
+ logit_bias: openaiOptions.logitBias,
125
+ logprobs:
126
+ openaiOptions.logprobs === true ||
127
+ typeof openaiOptions.logprobs === 'number'
128
+ ? true
129
+ : undefined,
130
+ top_logprobs:
131
+ typeof openaiOptions.logprobs === 'number'
132
+ ? openaiOptions.logprobs
133
+ : typeof openaiOptions.logprobs === 'boolean'
134
+ ? openaiOptions.logprobs
135
+ ? 0
136
+ : undefined
137
+ : undefined,
138
+ user: openaiOptions.user,
139
+ parallel_tool_calls: openaiOptions.parallelToolCalls,
140
+
141
+ // standardized settings:
142
+ max_tokens: maxOutputTokens,
143
+ temperature,
144
+ top_p: topP,
145
+ frequency_penalty: frequencyPenalty,
146
+ presence_penalty: presencePenalty,
147
+ response_format:
148
+ responseFormat?.type === 'json'
149
+ ? responseFormat.schema != null
150
+ ? {
151
+ type: 'json_schema',
152
+ json_schema: {
153
+ schema: responseFormat.schema,
154
+ strict: strictJsonSchema,
155
+ name: responseFormat.name ?? 'response',
156
+ description: responseFormat.description,
157
+ },
158
+ }
159
+ : { type: 'json_object' }
160
+ : undefined,
161
+ stop: stopSequences,
162
+ seed,
163
+ verbosity: openaiOptions.textVerbosity,
164
+
165
+ // openai specific settings:
166
+ // TODO AI SDK 6: remove, we auto-map maxOutputTokens now
167
+ max_completion_tokens: openaiOptions.maxCompletionTokens,
168
+ store: openaiOptions.store,
169
+ metadata: openaiOptions.metadata,
170
+ prediction: openaiOptions.prediction,
171
+ reasoning_effort: openaiOptions.reasoningEffort,
172
+ service_tier: openaiOptions.serviceTier,
173
+ prompt_cache_key: openaiOptions.promptCacheKey,
174
+ prompt_cache_retention: openaiOptions.promptCacheRetention,
175
+ safety_identifier: openaiOptions.safetyIdentifier,
176
+
177
+ // messages:
178
+ messages,
179
+ };
180
+
181
+ // remove unsupported settings for reasoning models
182
+ // see https://platform.openai.com/docs/guides/reasoning#limitations
183
+ if (isReasoningModel) {
184
+ // when reasoning effort is none, gpt-5.1 models allow temperature, topP, logprobs
185
+ // https://platform.openai.com/docs/guides/latest-model#gpt-5-1-parameter-compatibility
186
+ if (
187
+ openaiOptions.reasoningEffort !== 'none' ||
188
+ !modelCapabilities.supportsNonReasoningParameters
189
+ ) {
190
+ if (baseArgs.temperature != null) {
191
+ baseArgs.temperature = undefined;
192
+ warnings.push({
193
+ type: 'unsupported',
194
+ feature: 'temperature',
195
+ details: 'temperature is not supported for reasoning models',
196
+ });
197
+ }
198
+ if (baseArgs.top_p != null) {
199
+ baseArgs.top_p = undefined;
200
+ warnings.push({
201
+ type: 'unsupported',
202
+ feature: 'topP',
203
+ details: 'topP is not supported for reasoning models',
204
+ });
205
+ }
206
+ if (baseArgs.logprobs != null) {
207
+ baseArgs.logprobs = undefined;
208
+ warnings.push({
209
+ type: 'other',
210
+ message: 'logprobs is not supported for reasoning models',
211
+ });
212
+ }
213
+ }
214
+
215
+ if (baseArgs.frequency_penalty != null) {
216
+ baseArgs.frequency_penalty = undefined;
217
+ warnings.push({
218
+ type: 'unsupported',
219
+ feature: 'frequencyPenalty',
220
+ details: 'frequencyPenalty is not supported for reasoning models',
221
+ });
222
+ }
223
+ if (baseArgs.presence_penalty != null) {
224
+ baseArgs.presence_penalty = undefined;
225
+ warnings.push({
226
+ type: 'unsupported',
227
+ feature: 'presencePenalty',
228
+ details: 'presencePenalty is not supported for reasoning models',
229
+ });
230
+ }
231
+ if (baseArgs.logit_bias != null) {
232
+ baseArgs.logit_bias = undefined;
233
+ warnings.push({
234
+ type: 'other',
235
+ message: 'logitBias is not supported for reasoning models',
236
+ });
237
+ }
238
+
239
+ if (baseArgs.top_logprobs != null) {
240
+ baseArgs.top_logprobs = undefined;
241
+ warnings.push({
242
+ type: 'other',
243
+ message: 'topLogprobs is not supported for reasoning models',
244
+ });
245
+ }
246
+
247
+ // reasoning models use max_completion_tokens instead of max_tokens:
248
+ if (baseArgs.max_tokens != null) {
249
+ if (baseArgs.max_completion_tokens == null) {
250
+ baseArgs.max_completion_tokens = baseArgs.max_tokens;
251
+ }
252
+ baseArgs.max_tokens = undefined;
253
+ }
254
+ } else if (
255
+ this.modelId.startsWith('gpt-4o-search-preview') ||
256
+ this.modelId.startsWith('gpt-4o-mini-search-preview')
257
+ ) {
258
+ if (baseArgs.temperature != null) {
259
+ baseArgs.temperature = undefined;
260
+ warnings.push({
261
+ type: 'unsupported',
262
+ feature: 'temperature',
263
+ details:
264
+ 'temperature is not supported for the search preview models and has been removed.',
265
+ });
266
+ }
267
+ }
268
+
269
+ // Validate flex processing support
270
+ if (
271
+ openaiOptions.serviceTier === 'flex' &&
272
+ !modelCapabilities.supportsFlexProcessing
273
+ ) {
274
+ warnings.push({
275
+ type: 'unsupported',
276
+ feature: 'serviceTier',
277
+ details:
278
+ 'flex processing is only available for o3, o4-mini, and gpt-5 models',
279
+ });
280
+ baseArgs.service_tier = undefined;
281
+ }
282
+
283
+ // Validate priority processing support
284
+ if (
285
+ openaiOptions.serviceTier === 'priority' &&
286
+ !modelCapabilities.supportsPriorityProcessing
287
+ ) {
288
+ warnings.push({
289
+ type: 'unsupported',
290
+ feature: 'serviceTier',
291
+ details:
292
+ 'priority processing is only available for supported models (gpt-4, gpt-5, gpt-5-mini, o3, o4-mini) and requires Enterprise access. gpt-5-nano is not supported',
293
+ });
294
+ baseArgs.service_tier = undefined;
295
+ }
296
+
297
+ const {
298
+ tools: openaiTools,
299
+ toolChoice: openaiToolChoice,
300
+ toolWarnings,
301
+ } = prepareChatTools({
302
+ tools,
303
+ toolChoice,
304
+ });
305
+
306
+ return {
307
+ args: {
308
+ ...baseArgs,
309
+ tools: openaiTools,
310
+ tool_choice: openaiToolChoice,
311
+ },
312
+ warnings: [...warnings, ...toolWarnings],
313
+ };
314
+ }
315
+
316
+ async doGenerate(
317
+ options: LanguageModelV3CallOptions,
318
+ ): Promise<LanguageModelV3GenerateResult> {
319
+ const { args: body, warnings } = await this.getArgs(options);
320
+
321
+ const {
322
+ responseHeaders,
323
+ value: response,
324
+ rawValue: rawResponse,
325
+ } = await postJsonToApi({
326
+ url: this.config.url({
327
+ path: '/chat/completions',
328
+ modelId: this.modelId,
329
+ }),
330
+ headers: combineHeaders(this.config.headers(), options.headers),
331
+ body,
332
+ failedResponseHandler: openaiFailedResponseHandler,
333
+ successfulResponseHandler: createJsonResponseHandler(
334
+ openaiChatResponseSchema,
335
+ ),
336
+ abortSignal: options.abortSignal,
337
+ fetch: this.config.fetch,
338
+ });
339
+
340
+ const choice = response.choices[0];
341
+ const content: Array<LanguageModelV3Content> = [];
342
+
343
+ // text content:
344
+ const text = choice.message.content;
345
+ if (text != null && text.length > 0) {
346
+ content.push({ type: 'text', text });
347
+ }
348
+
349
+ // tool calls:
350
+ for (const toolCall of choice.message.tool_calls ?? []) {
351
+ content.push({
352
+ type: 'tool-call' as const,
353
+ toolCallId: toolCall.id ?? generateId(),
354
+ toolName: toolCall.function.name,
355
+ input: toolCall.function.arguments!,
356
+ });
357
+ }
358
+
359
+ // annotations/citations:
360
+ for (const annotation of choice.message.annotations ?? []) {
361
+ content.push({
362
+ type: 'source',
363
+ sourceType: 'url',
364
+ id: generateId(),
365
+ url: annotation.url_citation.url,
366
+ title: annotation.url_citation.title,
367
+ });
368
+ }
369
+
370
+ // provider metadata:
371
+ const completionTokenDetails = response.usage?.completion_tokens_details;
372
+ const promptTokenDetails = response.usage?.prompt_tokens_details;
373
+ const providerMetadata: SharedV3ProviderMetadata = { openai: {} };
374
+ if (completionTokenDetails?.accepted_prediction_tokens != null) {
375
+ providerMetadata.openai.acceptedPredictionTokens =
376
+ completionTokenDetails?.accepted_prediction_tokens;
377
+ }
378
+ if (completionTokenDetails?.rejected_prediction_tokens != null) {
379
+ providerMetadata.openai.rejectedPredictionTokens =
380
+ completionTokenDetails?.rejected_prediction_tokens;
381
+ }
382
+ if (choice.logprobs?.content != null) {
383
+ providerMetadata.openai.logprobs = choice.logprobs.content;
384
+ }
385
+
386
+ return {
387
+ content,
388
+ finishReason: {
389
+ unified: mapOpenAIFinishReason(choice.finish_reason),
390
+ raw: choice.finish_reason ?? undefined,
391
+ },
392
+ usage: convertOpenAIChatUsage(response.usage),
393
+ request: { body },
394
+ response: {
395
+ ...getResponseMetadata(response),
396
+ headers: responseHeaders,
397
+ body: rawResponse,
398
+ },
399
+ warnings,
400
+ providerMetadata,
401
+ };
402
+ }
403
+
404
+ async doStream(
405
+ options: LanguageModelV3CallOptions,
406
+ ): Promise<LanguageModelV3StreamResult> {
407
+ const { args, warnings } = await this.getArgs(options);
408
+
409
+ const body = {
410
+ ...args,
411
+ stream: true,
412
+ stream_options: {
413
+ include_usage: true,
414
+ },
415
+ };
416
+
417
+ const { responseHeaders, value: response } = await postJsonToApi({
418
+ url: this.config.url({
419
+ path: '/chat/completions',
420
+ modelId: this.modelId,
421
+ }),
422
+ headers: combineHeaders(this.config.headers(), options.headers),
423
+ body,
424
+ failedResponseHandler: openaiFailedResponseHandler,
425
+ successfulResponseHandler: createEventSourceResponseHandler(
426
+ openaiChatChunkSchema,
427
+ ),
428
+ abortSignal: options.abortSignal,
429
+ fetch: this.config.fetch,
430
+ });
431
+
432
+ const toolCalls: Array<{
433
+ id: string;
434
+ type: 'function';
435
+ function: {
436
+ name: string;
437
+ arguments: string;
438
+ };
439
+ hasFinished: boolean;
440
+ }> = [];
441
+
442
+ let finishReason: LanguageModelV3FinishReason = {
443
+ unified: 'other',
444
+ raw: undefined,
445
+ };
446
+ let usage: OpenAIChatUsage | undefined = undefined;
447
+ let metadataExtracted = false;
448
+ let isActiveText = false;
449
+
450
+ const providerMetadata: SharedV3ProviderMetadata = { openai: {} };
451
+
452
+ return {
453
+ stream: response.pipeThrough(
454
+ new TransformStream<
455
+ ParseResult<OpenAIChatChunk>,
456
+ LanguageModelV3StreamPart
457
+ >({
458
+ start(controller) {
459
+ controller.enqueue({ type: 'stream-start', warnings });
460
+ },
461
+
462
+ transform(chunk, controller) {
463
+ if (options.includeRawChunks) {
464
+ controller.enqueue({ type: 'raw', rawValue: chunk.rawValue });
465
+ }
466
+
467
+ // handle failed chunk parsing / validation:
468
+ if (!chunk.success) {
469
+ finishReason = { unified: 'error', raw: undefined };
470
+ controller.enqueue({ type: 'error', error: chunk.error });
471
+ return;
472
+ }
473
+
474
+ const value = chunk.value;
475
+
476
+ // handle error chunks:
477
+ if ('error' in value) {
478
+ finishReason = { unified: 'error', raw: undefined };
479
+ controller.enqueue({ type: 'error', error: value.error });
480
+ return;
481
+ }
482
+
483
+ // extract and emit response metadata once. Usually it comes in the first chunk.
484
+ // Azure may prepend a chunk with a `"prompt_filter_results"` key which does not contain other metadata,
485
+ // https://learn.microsoft.com/en-us/azure/ai-foundry/openai/concepts/content-filter-annotations?tabs=powershell
486
+ if (!metadataExtracted) {
487
+ const metadata = getResponseMetadata(value);
488
+ if (Object.values(metadata).some(Boolean)) {
489
+ metadataExtracted = true;
490
+ controller.enqueue({
491
+ type: 'response-metadata',
492
+ ...getResponseMetadata(value),
493
+ });
494
+ }
495
+ }
496
+
497
+ if (value.usage != null) {
498
+ usage = value.usage;
499
+
500
+ if (
501
+ value.usage.completion_tokens_details
502
+ ?.accepted_prediction_tokens != null
503
+ ) {
504
+ providerMetadata.openai.acceptedPredictionTokens =
505
+ value.usage.completion_tokens_details?.accepted_prediction_tokens;
506
+ }
507
+ if (
508
+ value.usage.completion_tokens_details
509
+ ?.rejected_prediction_tokens != null
510
+ ) {
511
+ providerMetadata.openai.rejectedPredictionTokens =
512
+ value.usage.completion_tokens_details?.rejected_prediction_tokens;
513
+ }
514
+ }
515
+
516
+ const choice = value.choices[0];
517
+
518
+ if (choice?.finish_reason != null) {
519
+ finishReason = {
520
+ unified: mapOpenAIFinishReason(choice.finish_reason),
521
+ raw: choice.finish_reason,
522
+ };
523
+ }
524
+
525
+ if (choice?.logprobs?.content != null) {
526
+ providerMetadata.openai.logprobs = choice.logprobs.content;
527
+ }
528
+
529
+ if (choice?.delta == null) {
530
+ return;
531
+ }
532
+
533
+ const delta = choice.delta;
534
+
535
+ if (delta.content != null) {
536
+ if (!isActiveText) {
537
+ controller.enqueue({ type: 'text-start', id: '0' });
538
+ isActiveText = true;
539
+ }
540
+
541
+ controller.enqueue({
542
+ type: 'text-delta',
543
+ id: '0',
544
+ delta: delta.content,
545
+ });
546
+ }
547
+
548
+ if (delta.tool_calls != null) {
549
+ for (const toolCallDelta of delta.tool_calls) {
550
+ const index = toolCallDelta.index;
551
+
552
+ // Tool call start. OpenAI returns all information except the arguments in the first chunk.
553
+ if (toolCalls[index] == null) {
554
+ if (toolCallDelta.type !== 'function') {
555
+ throw new InvalidResponseDataError({
556
+ data: toolCallDelta,
557
+ message: `Expected 'function' type.`,
558
+ });
559
+ }
560
+
561
+ if (toolCallDelta.id == null) {
562
+ throw new InvalidResponseDataError({
563
+ data: toolCallDelta,
564
+ message: `Expected 'id' to be a string.`,
565
+ });
566
+ }
567
+
568
+ if (toolCallDelta.function?.name == null) {
569
+ throw new InvalidResponseDataError({
570
+ data: toolCallDelta,
571
+ message: `Expected 'function.name' to be a string.`,
572
+ });
573
+ }
574
+
575
+ controller.enqueue({
576
+ type: 'tool-input-start',
577
+ id: toolCallDelta.id,
578
+ toolName: toolCallDelta.function.name,
579
+ });
580
+
581
+ toolCalls[index] = {
582
+ id: toolCallDelta.id,
583
+ type: 'function',
584
+ function: {
585
+ name: toolCallDelta.function.name,
586
+ arguments: toolCallDelta.function.arguments ?? '',
587
+ },
588
+ hasFinished: false,
589
+ };
590
+
591
+ const toolCall = toolCalls[index];
592
+
593
+ if (
594
+ toolCall.function?.name != null &&
595
+ toolCall.function?.arguments != null
596
+ ) {
597
+ // send delta if the argument text has already started:
598
+ if (toolCall.function.arguments.length > 0) {
599
+ controller.enqueue({
600
+ type: 'tool-input-delta',
601
+ id: toolCall.id,
602
+ delta: toolCall.function.arguments,
603
+ });
604
+ }
605
+
606
+ // check if tool call is complete
607
+ // (some providers send the full tool call in one chunk):
608
+ if (isParsableJson(toolCall.function.arguments)) {
609
+ controller.enqueue({
610
+ type: 'tool-input-end',
611
+ id: toolCall.id,
612
+ });
613
+
614
+ controller.enqueue({
615
+ type: 'tool-call',
616
+ toolCallId: toolCall.id ?? generateId(),
617
+ toolName: toolCall.function.name,
618
+ input: toolCall.function.arguments,
619
+ });
620
+ toolCall.hasFinished = true;
621
+ }
622
+ }
623
+
624
+ continue;
625
+ }
626
+
627
+ // existing tool call, merge if not finished
628
+ const toolCall = toolCalls[index];
629
+
630
+ if (toolCall.hasFinished) {
631
+ continue;
632
+ }
633
+
634
+ if (toolCallDelta.function?.arguments != null) {
635
+ toolCall.function!.arguments +=
636
+ toolCallDelta.function?.arguments ?? '';
637
+ }
638
+
639
+ // send delta
640
+ controller.enqueue({
641
+ type: 'tool-input-delta',
642
+ id: toolCall.id,
643
+ delta: toolCallDelta.function.arguments ?? '',
644
+ });
645
+
646
+ // check if tool call is complete
647
+ if (
648
+ toolCall.function?.name != null &&
649
+ toolCall.function?.arguments != null &&
650
+ isParsableJson(toolCall.function.arguments)
651
+ ) {
652
+ controller.enqueue({
653
+ type: 'tool-input-end',
654
+ id: toolCall.id,
655
+ });
656
+
657
+ controller.enqueue({
658
+ type: 'tool-call',
659
+ toolCallId: toolCall.id ?? generateId(),
660
+ toolName: toolCall.function.name,
661
+ input: toolCall.function.arguments,
662
+ });
663
+ toolCall.hasFinished = true;
664
+ }
665
+ }
666
+ }
667
+
668
+ // annotations/citations:
669
+ if (delta.annotations != null) {
670
+ for (const annotation of delta.annotations) {
671
+ controller.enqueue({
672
+ type: 'source',
673
+ sourceType: 'url',
674
+ id: generateId(),
675
+ url: annotation.url_citation.url,
676
+ title: annotation.url_citation.title,
677
+ });
678
+ }
679
+ }
680
+ },
681
+
682
+ flush(controller) {
683
+ if (isActiveText) {
684
+ controller.enqueue({ type: 'text-end', id: '0' });
685
+ }
686
+
687
+ controller.enqueue({
688
+ type: 'finish',
689
+ finishReason,
690
+ usage: convertOpenAIChatUsage(usage),
691
+ ...(providerMetadata != null ? { providerMetadata } : {}),
692
+ });
693
+ },
694
+ }),
695
+ ),
696
+ request: { body },
697
+ response: { headers: responseHeaders },
698
+ };
699
+ }
700
+ }