ai 5.0.0-canary.1 → 5.0.0-canary.10

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. package/CHANGELOG.md +136 -0
  2. package/dist/index.d.mts +1449 -718
  3. package/dist/index.d.ts +1449 -718
  4. package/dist/index.js +2550 -760
  5. package/dist/index.js.map +1 -1
  6. package/dist/index.mjs +2423 -670
  7. package/dist/index.mjs.map +1 -1
  8. package/dist/internal/index.d.mts +730 -0
  9. package/dist/internal/index.d.ts +730 -0
  10. package/dist/internal/index.js +1482 -0
  11. package/dist/internal/index.js.map +1 -0
  12. package/{rsc/dist/rsc-server.mjs → dist/internal/index.mjs} +855 -1555
  13. package/dist/internal/index.mjs.map +1 -0
  14. package/{mcp-stdio/dist → dist/mcp-stdio}/index.js +1 -1
  15. package/dist/mcp-stdio/index.js.map +1 -0
  16. package/{mcp-stdio/dist → dist/mcp-stdio}/index.mjs +1 -1
  17. package/dist/mcp-stdio/index.mjs.map +1 -0
  18. package/{test/dist → dist/test}/index.d.mts +18 -16
  19. package/{test/dist → dist/test}/index.d.ts +18 -16
  20. package/{test/dist → dist/test}/index.js +28 -8
  21. package/dist/test/index.js.map +1 -0
  22. package/{test/dist → dist/test}/index.mjs +27 -7
  23. package/dist/test/index.mjs.map +1 -0
  24. package/package.json +28 -47
  25. package/mcp-stdio/create-child-process.test.ts +0 -92
  26. package/mcp-stdio/create-child-process.ts +0 -21
  27. package/mcp-stdio/dist/index.js.map +0 -1
  28. package/mcp-stdio/dist/index.mjs.map +0 -1
  29. package/mcp-stdio/get-environment.ts +0 -43
  30. package/mcp-stdio/index.ts +0 -4
  31. package/mcp-stdio/mcp-stdio-transport.test.ts +0 -262
  32. package/mcp-stdio/mcp-stdio-transport.ts +0 -157
  33. package/rsc/dist/index.d.ts +0 -813
  34. package/rsc/dist/index.mjs +0 -18
  35. package/rsc/dist/rsc-client.d.mts +0 -1
  36. package/rsc/dist/rsc-client.mjs +0 -18
  37. package/rsc/dist/rsc-client.mjs.map +0 -1
  38. package/rsc/dist/rsc-server.d.mts +0 -748
  39. package/rsc/dist/rsc-server.mjs.map +0 -1
  40. package/rsc/dist/rsc-shared.d.mts +0 -101
  41. package/rsc/dist/rsc-shared.mjs +0 -308
  42. package/rsc/dist/rsc-shared.mjs.map +0 -1
  43. package/test/dist/index.js.map +0 -1
  44. package/test/dist/index.mjs.map +0 -1
  45. package/{mcp-stdio/dist → dist/mcp-stdio}/index.d.mts +6 -6
  46. package/{mcp-stdio/dist → dist/mcp-stdio}/index.d.ts +6 -6
package/dist/index.mjs CHANGED
@@ -6,17 +6,1483 @@ var __export = (target, all) => {
6
6
 
7
7
  // core/index.ts
8
8
  import { createIdGenerator as createIdGenerator5, generateId as generateId2 } from "@ai-sdk/provider-utils";
9
- import {
10
- formatDataStreamPart as formatDataStreamPart3,
11
- jsonSchema as jsonSchema2,
12
- parseDataStreamPart,
13
- processDataStream,
14
- processTextStream,
15
- zodSchema
16
- } from "@ai-sdk/ui-utils";
9
+
10
+ // core/util/index.ts
11
+ import { generateId } from "@ai-sdk/provider-utils";
12
+
13
+ // core/util/process-chat-response.ts
14
+ import { generateId as generateIdFunction } from "@ai-sdk/provider-utils";
15
+
16
+ // core/types/duplicated/usage.ts
17
+ function calculateLanguageModelUsage({
18
+ promptTokens,
19
+ completionTokens
20
+ }) {
21
+ return {
22
+ promptTokens,
23
+ completionTokens,
24
+ totalTokens: promptTokens + completionTokens
25
+ };
26
+ }
27
+
28
+ // core/util/parse-partial-json.ts
29
+ import { safeParseJSON } from "@ai-sdk/provider-utils";
30
+
31
+ // core/util/fix-json.ts
32
+ function fixJson(input) {
33
+ const stack = ["ROOT"];
34
+ let lastValidIndex = -1;
35
+ let literalStart = null;
36
+ function processValueStart(char, i, swapState) {
37
+ {
38
+ switch (char) {
39
+ case '"': {
40
+ lastValidIndex = i;
41
+ stack.pop();
42
+ stack.push(swapState);
43
+ stack.push("INSIDE_STRING");
44
+ break;
45
+ }
46
+ case "f":
47
+ case "t":
48
+ case "n": {
49
+ lastValidIndex = i;
50
+ literalStart = i;
51
+ stack.pop();
52
+ stack.push(swapState);
53
+ stack.push("INSIDE_LITERAL");
54
+ break;
55
+ }
56
+ case "-": {
57
+ stack.pop();
58
+ stack.push(swapState);
59
+ stack.push("INSIDE_NUMBER");
60
+ break;
61
+ }
62
+ case "0":
63
+ case "1":
64
+ case "2":
65
+ case "3":
66
+ case "4":
67
+ case "5":
68
+ case "6":
69
+ case "7":
70
+ case "8":
71
+ case "9": {
72
+ lastValidIndex = i;
73
+ stack.pop();
74
+ stack.push(swapState);
75
+ stack.push("INSIDE_NUMBER");
76
+ break;
77
+ }
78
+ case "{": {
79
+ lastValidIndex = i;
80
+ stack.pop();
81
+ stack.push(swapState);
82
+ stack.push("INSIDE_OBJECT_START");
83
+ break;
84
+ }
85
+ case "[": {
86
+ lastValidIndex = i;
87
+ stack.pop();
88
+ stack.push(swapState);
89
+ stack.push("INSIDE_ARRAY_START");
90
+ break;
91
+ }
92
+ }
93
+ }
94
+ }
95
+ function processAfterObjectValue(char, i) {
96
+ switch (char) {
97
+ case ",": {
98
+ stack.pop();
99
+ stack.push("INSIDE_OBJECT_AFTER_COMMA");
100
+ break;
101
+ }
102
+ case "}": {
103
+ lastValidIndex = i;
104
+ stack.pop();
105
+ break;
106
+ }
107
+ }
108
+ }
109
+ function processAfterArrayValue(char, i) {
110
+ switch (char) {
111
+ case ",": {
112
+ stack.pop();
113
+ stack.push("INSIDE_ARRAY_AFTER_COMMA");
114
+ break;
115
+ }
116
+ case "]": {
117
+ lastValidIndex = i;
118
+ stack.pop();
119
+ break;
120
+ }
121
+ }
122
+ }
123
+ for (let i = 0; i < input.length; i++) {
124
+ const char = input[i];
125
+ const currentState = stack[stack.length - 1];
126
+ switch (currentState) {
127
+ case "ROOT":
128
+ processValueStart(char, i, "FINISH");
129
+ break;
130
+ case "INSIDE_OBJECT_START": {
131
+ switch (char) {
132
+ case '"': {
133
+ stack.pop();
134
+ stack.push("INSIDE_OBJECT_KEY");
135
+ break;
136
+ }
137
+ case "}": {
138
+ lastValidIndex = i;
139
+ stack.pop();
140
+ break;
141
+ }
142
+ }
143
+ break;
144
+ }
145
+ case "INSIDE_OBJECT_AFTER_COMMA": {
146
+ switch (char) {
147
+ case '"': {
148
+ stack.pop();
149
+ stack.push("INSIDE_OBJECT_KEY");
150
+ break;
151
+ }
152
+ }
153
+ break;
154
+ }
155
+ case "INSIDE_OBJECT_KEY": {
156
+ switch (char) {
157
+ case '"': {
158
+ stack.pop();
159
+ stack.push("INSIDE_OBJECT_AFTER_KEY");
160
+ break;
161
+ }
162
+ }
163
+ break;
164
+ }
165
+ case "INSIDE_OBJECT_AFTER_KEY": {
166
+ switch (char) {
167
+ case ":": {
168
+ stack.pop();
169
+ stack.push("INSIDE_OBJECT_BEFORE_VALUE");
170
+ break;
171
+ }
172
+ }
173
+ break;
174
+ }
175
+ case "INSIDE_OBJECT_BEFORE_VALUE": {
176
+ processValueStart(char, i, "INSIDE_OBJECT_AFTER_VALUE");
177
+ break;
178
+ }
179
+ case "INSIDE_OBJECT_AFTER_VALUE": {
180
+ processAfterObjectValue(char, i);
181
+ break;
182
+ }
183
+ case "INSIDE_STRING": {
184
+ switch (char) {
185
+ case '"': {
186
+ stack.pop();
187
+ lastValidIndex = i;
188
+ break;
189
+ }
190
+ case "\\": {
191
+ stack.push("INSIDE_STRING_ESCAPE");
192
+ break;
193
+ }
194
+ default: {
195
+ lastValidIndex = i;
196
+ }
197
+ }
198
+ break;
199
+ }
200
+ case "INSIDE_ARRAY_START": {
201
+ switch (char) {
202
+ case "]": {
203
+ lastValidIndex = i;
204
+ stack.pop();
205
+ break;
206
+ }
207
+ default: {
208
+ lastValidIndex = i;
209
+ processValueStart(char, i, "INSIDE_ARRAY_AFTER_VALUE");
210
+ break;
211
+ }
212
+ }
213
+ break;
214
+ }
215
+ case "INSIDE_ARRAY_AFTER_VALUE": {
216
+ switch (char) {
217
+ case ",": {
218
+ stack.pop();
219
+ stack.push("INSIDE_ARRAY_AFTER_COMMA");
220
+ break;
221
+ }
222
+ case "]": {
223
+ lastValidIndex = i;
224
+ stack.pop();
225
+ break;
226
+ }
227
+ default: {
228
+ lastValidIndex = i;
229
+ break;
230
+ }
231
+ }
232
+ break;
233
+ }
234
+ case "INSIDE_ARRAY_AFTER_COMMA": {
235
+ processValueStart(char, i, "INSIDE_ARRAY_AFTER_VALUE");
236
+ break;
237
+ }
238
+ case "INSIDE_STRING_ESCAPE": {
239
+ stack.pop();
240
+ lastValidIndex = i;
241
+ break;
242
+ }
243
+ case "INSIDE_NUMBER": {
244
+ switch (char) {
245
+ case "0":
246
+ case "1":
247
+ case "2":
248
+ case "3":
249
+ case "4":
250
+ case "5":
251
+ case "6":
252
+ case "7":
253
+ case "8":
254
+ case "9": {
255
+ lastValidIndex = i;
256
+ break;
257
+ }
258
+ case "e":
259
+ case "E":
260
+ case "-":
261
+ case ".": {
262
+ break;
263
+ }
264
+ case ",": {
265
+ stack.pop();
266
+ if (stack[stack.length - 1] === "INSIDE_ARRAY_AFTER_VALUE") {
267
+ processAfterArrayValue(char, i);
268
+ }
269
+ if (stack[stack.length - 1] === "INSIDE_OBJECT_AFTER_VALUE") {
270
+ processAfterObjectValue(char, i);
271
+ }
272
+ break;
273
+ }
274
+ case "}": {
275
+ stack.pop();
276
+ if (stack[stack.length - 1] === "INSIDE_OBJECT_AFTER_VALUE") {
277
+ processAfterObjectValue(char, i);
278
+ }
279
+ break;
280
+ }
281
+ case "]": {
282
+ stack.pop();
283
+ if (stack[stack.length - 1] === "INSIDE_ARRAY_AFTER_VALUE") {
284
+ processAfterArrayValue(char, i);
285
+ }
286
+ break;
287
+ }
288
+ default: {
289
+ stack.pop();
290
+ break;
291
+ }
292
+ }
293
+ break;
294
+ }
295
+ case "INSIDE_LITERAL": {
296
+ const partialLiteral = input.substring(literalStart, i + 1);
297
+ if (!"false".startsWith(partialLiteral) && !"true".startsWith(partialLiteral) && !"null".startsWith(partialLiteral)) {
298
+ stack.pop();
299
+ if (stack[stack.length - 1] === "INSIDE_OBJECT_AFTER_VALUE") {
300
+ processAfterObjectValue(char, i);
301
+ } else if (stack[stack.length - 1] === "INSIDE_ARRAY_AFTER_VALUE") {
302
+ processAfterArrayValue(char, i);
303
+ }
304
+ } else {
305
+ lastValidIndex = i;
306
+ }
307
+ break;
308
+ }
309
+ }
310
+ }
311
+ let result = input.slice(0, lastValidIndex + 1);
312
+ for (let i = stack.length - 1; i >= 0; i--) {
313
+ const state = stack[i];
314
+ switch (state) {
315
+ case "INSIDE_STRING": {
316
+ result += '"';
317
+ break;
318
+ }
319
+ case "INSIDE_OBJECT_KEY":
320
+ case "INSIDE_OBJECT_AFTER_KEY":
321
+ case "INSIDE_OBJECT_AFTER_COMMA":
322
+ case "INSIDE_OBJECT_START":
323
+ case "INSIDE_OBJECT_BEFORE_VALUE":
324
+ case "INSIDE_OBJECT_AFTER_VALUE": {
325
+ result += "}";
326
+ break;
327
+ }
328
+ case "INSIDE_ARRAY_START":
329
+ case "INSIDE_ARRAY_AFTER_COMMA":
330
+ case "INSIDE_ARRAY_AFTER_VALUE": {
331
+ result += "]";
332
+ break;
333
+ }
334
+ case "INSIDE_LITERAL": {
335
+ const partialLiteral = input.substring(literalStart, input.length);
336
+ if ("true".startsWith(partialLiteral)) {
337
+ result += "true".slice(partialLiteral.length);
338
+ } else if ("false".startsWith(partialLiteral)) {
339
+ result += "false".slice(partialLiteral.length);
340
+ } else if ("null".startsWith(partialLiteral)) {
341
+ result += "null".slice(partialLiteral.length);
342
+ }
343
+ }
344
+ }
345
+ }
346
+ return result;
347
+ }
348
+
349
+ // core/util/parse-partial-json.ts
350
+ function parsePartialJson(jsonText) {
351
+ if (jsonText === void 0) {
352
+ return { value: void 0, state: "undefined-input" };
353
+ }
354
+ let result = safeParseJSON({ text: jsonText });
355
+ if (result.success) {
356
+ return { value: result.value, state: "successful-parse" };
357
+ }
358
+ result = safeParseJSON({ text: fixJson(jsonText) });
359
+ if (result.success) {
360
+ return { value: result.value, state: "repaired-parse" };
361
+ }
362
+ return { value: void 0, state: "failed-parse" };
363
+ }
364
+
365
+ // core/util/data-stream-parts.ts
366
+ var textStreamPart = {
367
+ code: "0",
368
+ name: "text",
369
+ parse: (value) => {
370
+ if (typeof value !== "string") {
371
+ throw new Error('"text" parts expect a string value.');
372
+ }
373
+ return { type: "text", value };
374
+ }
375
+ };
376
+ var dataStreamPart = {
377
+ code: "2",
378
+ name: "data",
379
+ parse: (value) => {
380
+ if (!Array.isArray(value)) {
381
+ throw new Error('"data" parts expect an array value.');
382
+ }
383
+ return { type: "data", value };
384
+ }
385
+ };
386
+ var errorStreamPart = {
387
+ code: "3",
388
+ name: "error",
389
+ parse: (value) => {
390
+ if (typeof value !== "string") {
391
+ throw new Error('"error" parts expect a string value.');
392
+ }
393
+ return { type: "error", value };
394
+ }
395
+ };
396
+ var messageAnnotationsStreamPart = {
397
+ code: "8",
398
+ name: "message_annotations",
399
+ parse: (value) => {
400
+ if (!Array.isArray(value)) {
401
+ throw new Error('"message_annotations" parts expect an array value.');
402
+ }
403
+ return { type: "message_annotations", value };
404
+ }
405
+ };
406
+ var toolCallStreamPart = {
407
+ code: "9",
408
+ name: "tool_call",
409
+ parse: (value) => {
410
+ if (value == null || typeof value !== "object" || !("toolCallId" in value) || typeof value.toolCallId !== "string" || !("toolName" in value) || typeof value.toolName !== "string" || !("args" in value) || typeof value.args !== "object") {
411
+ throw new Error(
412
+ '"tool_call" parts expect an object with a "toolCallId", "toolName", and "args" property.'
413
+ );
414
+ }
415
+ return {
416
+ type: "tool_call",
417
+ value
418
+ };
419
+ }
420
+ };
421
+ var toolResultStreamPart = {
422
+ code: "a",
423
+ name: "tool_result",
424
+ parse: (value) => {
425
+ if (value == null || typeof value !== "object" || !("toolCallId" in value) || typeof value.toolCallId !== "string" || !("result" in value)) {
426
+ throw new Error(
427
+ '"tool_result" parts expect an object with a "toolCallId" and a "result" property.'
428
+ );
429
+ }
430
+ return {
431
+ type: "tool_result",
432
+ value
433
+ };
434
+ }
435
+ };
436
+ var toolCallStreamingStartStreamPart = {
437
+ code: "b",
438
+ name: "tool_call_streaming_start",
439
+ parse: (value) => {
440
+ if (value == null || typeof value !== "object" || !("toolCallId" in value) || typeof value.toolCallId !== "string" || !("toolName" in value) || typeof value.toolName !== "string") {
441
+ throw new Error(
442
+ '"tool_call_streaming_start" parts expect an object with a "toolCallId" and "toolName" property.'
443
+ );
444
+ }
445
+ return {
446
+ type: "tool_call_streaming_start",
447
+ value
448
+ };
449
+ }
450
+ };
451
+ var toolCallDeltaStreamPart = {
452
+ code: "c",
453
+ name: "tool_call_delta",
454
+ parse: (value) => {
455
+ if (value == null || typeof value !== "object" || !("toolCallId" in value) || typeof value.toolCallId !== "string" || !("argsTextDelta" in value) || typeof value.argsTextDelta !== "string") {
456
+ throw new Error(
457
+ '"tool_call_delta" parts expect an object with a "toolCallId" and "argsTextDelta" property.'
458
+ );
459
+ }
460
+ return {
461
+ type: "tool_call_delta",
462
+ value
463
+ };
464
+ }
465
+ };
466
+ var finishMessageStreamPart = {
467
+ code: "d",
468
+ name: "finish_message",
469
+ parse: (value) => {
470
+ if (value == null || typeof value !== "object" || !("finishReason" in value) || typeof value.finishReason !== "string") {
471
+ throw new Error(
472
+ '"finish_message" parts expect an object with a "finishReason" property.'
473
+ );
474
+ }
475
+ const result = {
476
+ finishReason: value.finishReason
477
+ };
478
+ if ("usage" in value && value.usage != null && typeof value.usage === "object" && "promptTokens" in value.usage && "completionTokens" in value.usage) {
479
+ result.usage = {
480
+ promptTokens: typeof value.usage.promptTokens === "number" ? value.usage.promptTokens : Number.NaN,
481
+ completionTokens: typeof value.usage.completionTokens === "number" ? value.usage.completionTokens : Number.NaN
482
+ };
483
+ }
484
+ return {
485
+ type: "finish_message",
486
+ value: result
487
+ };
488
+ }
489
+ };
490
+ var finishStepStreamPart = {
491
+ code: "e",
492
+ name: "finish_step",
493
+ parse: (value) => {
494
+ if (value == null || typeof value !== "object" || !("finishReason" in value) || typeof value.finishReason !== "string") {
495
+ throw new Error(
496
+ '"finish_step" parts expect an object with a "finishReason" property.'
497
+ );
498
+ }
499
+ const result = {
500
+ finishReason: value.finishReason,
501
+ isContinued: false
502
+ };
503
+ if ("usage" in value && value.usage != null && typeof value.usage === "object" && "promptTokens" in value.usage && "completionTokens" in value.usage) {
504
+ result.usage = {
505
+ promptTokens: typeof value.usage.promptTokens === "number" ? value.usage.promptTokens : Number.NaN,
506
+ completionTokens: typeof value.usage.completionTokens === "number" ? value.usage.completionTokens : Number.NaN
507
+ };
508
+ }
509
+ if ("isContinued" in value && typeof value.isContinued === "boolean") {
510
+ result.isContinued = value.isContinued;
511
+ }
512
+ return {
513
+ type: "finish_step",
514
+ value: result
515
+ };
516
+ }
517
+ };
518
+ var startStepStreamPart = {
519
+ code: "f",
520
+ name: "start_step",
521
+ parse: (value) => {
522
+ if (value == null || typeof value !== "object" || !("messageId" in value) || typeof value.messageId !== "string") {
523
+ throw new Error(
524
+ '"start_step" parts expect an object with an "id" property.'
525
+ );
526
+ }
527
+ return {
528
+ type: "start_step",
529
+ value: {
530
+ messageId: value.messageId
531
+ }
532
+ };
533
+ }
534
+ };
535
+ var reasoningStreamPart = {
536
+ code: "g",
537
+ name: "reasoning",
538
+ parse: (value) => {
539
+ if (typeof value !== "string") {
540
+ throw new Error('"reasoning" parts expect a string value.');
541
+ }
542
+ return { type: "reasoning", value };
543
+ }
544
+ };
545
+ var sourcePart = {
546
+ code: "h",
547
+ name: "source",
548
+ parse: (value) => {
549
+ if (value == null || typeof value !== "object") {
550
+ throw new Error('"source" parts expect a Source object.');
551
+ }
552
+ return {
553
+ type: "source",
554
+ value
555
+ };
556
+ }
557
+ };
558
+ var redactedReasoningStreamPart = {
559
+ code: "i",
560
+ name: "redacted_reasoning",
561
+ parse: (value) => {
562
+ if (value == null || typeof value !== "object" || !("data" in value) || typeof value.data !== "string") {
563
+ throw new Error(
564
+ '"redacted_reasoning" parts expect an object with a "data" property.'
565
+ );
566
+ }
567
+ return { type: "redacted_reasoning", value: { data: value.data } };
568
+ }
569
+ };
570
+ var reasoningSignatureStreamPart = {
571
+ code: "j",
572
+ name: "reasoning_signature",
573
+ parse: (value) => {
574
+ if (value == null || typeof value !== "object" || !("signature" in value) || typeof value.signature !== "string") {
575
+ throw new Error(
576
+ '"reasoning_signature" parts expect an object with a "signature" property.'
577
+ );
578
+ }
579
+ return {
580
+ type: "reasoning_signature",
581
+ value: { signature: value.signature }
582
+ };
583
+ }
584
+ };
585
+ var fileStreamPart = {
586
+ code: "k",
587
+ name: "file",
588
+ parse: (value) => {
589
+ if (value == null || typeof value !== "object" || !("data" in value) || typeof value.data !== "string" || !("mimeType" in value) || typeof value.mimeType !== "string") {
590
+ throw new Error(
591
+ '"file" parts expect an object with a "data" and "mimeType" property.'
592
+ );
593
+ }
594
+ return { type: "file", value };
595
+ }
596
+ };
597
+ var dataStreamParts = [
598
+ textStreamPart,
599
+ dataStreamPart,
600
+ errorStreamPart,
601
+ messageAnnotationsStreamPart,
602
+ toolCallStreamPart,
603
+ toolResultStreamPart,
604
+ toolCallStreamingStartStreamPart,
605
+ toolCallDeltaStreamPart,
606
+ finishMessageStreamPart,
607
+ finishStepStreamPart,
608
+ startStepStreamPart,
609
+ reasoningStreamPart,
610
+ sourcePart,
611
+ redactedReasoningStreamPart,
612
+ reasoningSignatureStreamPart,
613
+ fileStreamPart
614
+ ];
615
+ var dataStreamPartsByCode = Object.fromEntries(
616
+ dataStreamParts.map((part) => [part.code, part])
617
+ );
618
+ var DataStreamStringPrefixes = Object.fromEntries(
619
+ dataStreamParts.map((part) => [part.name, part.code])
620
+ );
621
+ var validCodes = dataStreamParts.map((part) => part.code);
622
+ var parseDataStreamPart = (line) => {
623
+ const firstSeparatorIndex = line.indexOf(":");
624
+ if (firstSeparatorIndex === -1) {
625
+ throw new Error("Failed to parse stream string. No separator found.");
626
+ }
627
+ const prefix = line.slice(0, firstSeparatorIndex);
628
+ if (!validCodes.includes(prefix)) {
629
+ throw new Error(`Failed to parse stream string. Invalid code ${prefix}.`);
630
+ }
631
+ const code = prefix;
632
+ const textValue = line.slice(firstSeparatorIndex + 1);
633
+ const jsonValue = JSON.parse(textValue);
634
+ return dataStreamPartsByCode[code].parse(jsonValue);
635
+ };
636
+ function formatDataStreamPart(type, value) {
637
+ const streamPart = dataStreamParts.find((part) => part.name === type);
638
+ if (!streamPart) {
639
+ throw new Error(`Invalid stream part type: ${type}`);
640
+ }
641
+ return `${streamPart.code}:${JSON.stringify(value)}
642
+ `;
643
+ }
644
+
645
+ // core/util/process-data-stream.ts
646
+ var NEWLINE = "\n".charCodeAt(0);
647
+ function concatChunks(chunks, totalLength) {
648
+ const concatenatedChunks = new Uint8Array(totalLength);
649
+ let offset = 0;
650
+ for (const chunk of chunks) {
651
+ concatenatedChunks.set(chunk, offset);
652
+ offset += chunk.length;
653
+ }
654
+ chunks.length = 0;
655
+ return concatenatedChunks;
656
+ }
657
+ async function processDataStream({
658
+ stream,
659
+ onTextPart,
660
+ onReasoningPart,
661
+ onReasoningSignaturePart,
662
+ onRedactedReasoningPart,
663
+ onSourcePart,
664
+ onFilePart,
665
+ onDataPart,
666
+ onErrorPart,
667
+ onToolCallStreamingStartPart,
668
+ onToolCallDeltaPart,
669
+ onToolCallPart,
670
+ onToolResultPart,
671
+ onMessageAnnotationsPart,
672
+ onFinishMessagePart,
673
+ onFinishStepPart,
674
+ onStartStepPart
675
+ }) {
676
+ const reader = stream.getReader();
677
+ const decoder = new TextDecoder();
678
+ const chunks = [];
679
+ let totalLength = 0;
680
+ while (true) {
681
+ const { value } = await reader.read();
682
+ if (value) {
683
+ chunks.push(value);
684
+ totalLength += value.length;
685
+ if (value[value.length - 1] !== NEWLINE) {
686
+ continue;
687
+ }
688
+ }
689
+ if (chunks.length === 0) {
690
+ break;
691
+ }
692
+ const concatenatedChunks = concatChunks(chunks, totalLength);
693
+ totalLength = 0;
694
+ const streamParts = decoder.decode(concatenatedChunks, { stream: true }).split("\n").filter((line) => line !== "").map(parseDataStreamPart);
695
+ for (const { type, value: value2 } of streamParts) {
696
+ switch (type) {
697
+ case "text":
698
+ await (onTextPart == null ? void 0 : onTextPart(value2));
699
+ break;
700
+ case "reasoning":
701
+ await (onReasoningPart == null ? void 0 : onReasoningPart(value2));
702
+ break;
703
+ case "reasoning_signature":
704
+ await (onReasoningSignaturePart == null ? void 0 : onReasoningSignaturePart(value2));
705
+ break;
706
+ case "redacted_reasoning":
707
+ await (onRedactedReasoningPart == null ? void 0 : onRedactedReasoningPart(value2));
708
+ break;
709
+ case "file":
710
+ await (onFilePart == null ? void 0 : onFilePart(value2));
711
+ break;
712
+ case "source":
713
+ await (onSourcePart == null ? void 0 : onSourcePart(value2));
714
+ break;
715
+ case "data":
716
+ await (onDataPart == null ? void 0 : onDataPart(value2));
717
+ break;
718
+ case "error":
719
+ await (onErrorPart == null ? void 0 : onErrorPart(value2));
720
+ break;
721
+ case "message_annotations":
722
+ await (onMessageAnnotationsPart == null ? void 0 : onMessageAnnotationsPart(value2));
723
+ break;
724
+ case "tool_call_streaming_start":
725
+ await (onToolCallStreamingStartPart == null ? void 0 : onToolCallStreamingStartPart(value2));
726
+ break;
727
+ case "tool_call_delta":
728
+ await (onToolCallDeltaPart == null ? void 0 : onToolCallDeltaPart(value2));
729
+ break;
730
+ case "tool_call":
731
+ await (onToolCallPart == null ? void 0 : onToolCallPart(value2));
732
+ break;
733
+ case "tool_result":
734
+ await (onToolResultPart == null ? void 0 : onToolResultPart(value2));
735
+ break;
736
+ case "finish_message":
737
+ await (onFinishMessagePart == null ? void 0 : onFinishMessagePart(value2));
738
+ break;
739
+ case "finish_step":
740
+ await (onFinishStepPart == null ? void 0 : onFinishStepPart(value2));
741
+ break;
742
+ case "start_step":
743
+ await (onStartStepPart == null ? void 0 : onStartStepPart(value2));
744
+ break;
745
+ default: {
746
+ const exhaustiveCheck = type;
747
+ throw new Error(`Unknown stream part type: ${exhaustiveCheck}`);
748
+ }
749
+ }
750
+ }
751
+ }
752
+ }
753
+
754
+ // core/util/process-chat-response.ts
755
+ async function processChatResponse({
756
+ stream,
757
+ update,
758
+ onToolCall,
759
+ onFinish,
760
+ generateId: generateId3 = generateIdFunction,
761
+ getCurrentDate = () => /* @__PURE__ */ new Date(),
762
+ lastMessage
763
+ }) {
764
+ var _a17, _b;
765
+ const replaceLastMessage = (lastMessage == null ? void 0 : lastMessage.role) === "assistant";
766
+ let step = replaceLastMessage ? 1 + // find max step in existing tool invocations:
767
+ ((_b = (_a17 = lastMessage.toolInvocations) == null ? void 0 : _a17.reduce((max, toolInvocation) => {
768
+ var _a18;
769
+ return Math.max(max, (_a18 = toolInvocation.step) != null ? _a18 : 0);
770
+ }, 0)) != null ? _b : 0) : 0;
771
+ const message = replaceLastMessage ? structuredClone(lastMessage) : {
772
+ id: generateId3(),
773
+ createdAt: getCurrentDate(),
774
+ role: "assistant",
775
+ content: "",
776
+ parts: []
777
+ };
778
+ let currentTextPart = void 0;
779
+ let currentReasoningPart = void 0;
780
+ let currentReasoningTextDetail = void 0;
781
+ function updateToolInvocationPart(toolCallId, invocation) {
782
+ const part = message.parts.find(
783
+ (part2) => part2.type === "tool-invocation" && part2.toolInvocation.toolCallId === toolCallId
784
+ );
785
+ if (part != null) {
786
+ part.toolInvocation = invocation;
787
+ } else {
788
+ message.parts.push({
789
+ type: "tool-invocation",
790
+ toolInvocation: invocation
791
+ });
792
+ }
793
+ }
794
+ const data = [];
795
+ let messageAnnotations = replaceLastMessage ? lastMessage == null ? void 0 : lastMessage.annotations : void 0;
796
+ const partialToolCalls = {};
797
+ let usage = {
798
+ completionTokens: NaN,
799
+ promptTokens: NaN,
800
+ totalTokens: NaN
801
+ };
802
+ let finishReason = "unknown";
803
+ function execUpdate() {
804
+ const copiedData = [...data];
805
+ if (messageAnnotations == null ? void 0 : messageAnnotations.length) {
806
+ message.annotations = messageAnnotations;
807
+ }
808
+ const copiedMessage = {
809
+ // deep copy the message to ensure that deep changes (msg attachments) are updated
810
+ // with SolidJS. SolidJS uses referential integration of sub-objects to detect changes.
811
+ ...structuredClone(message),
812
+ // add a revision id to ensure that the message is updated with SWR. SWR uses a
813
+ // hashing approach by default to detect changes, but it only works for shallow
814
+ // changes. This is why we need to add a revision id to ensure that the message
815
+ // is updated with SWR (without it, the changes get stuck in SWR and are not
816
+ // forwarded to rendering):
817
+ revisionId: generateId3()
818
+ };
819
+ update({
820
+ message: copiedMessage,
821
+ data: copiedData,
822
+ replaceLastMessage
823
+ });
824
+ }
825
+ await processDataStream({
826
+ stream,
827
+ onTextPart(value) {
828
+ if (currentTextPart == null) {
829
+ currentTextPart = {
830
+ type: "text",
831
+ text: value
832
+ };
833
+ message.parts.push(currentTextPart);
834
+ } else {
835
+ currentTextPart.text += value;
836
+ }
837
+ message.content += value;
838
+ execUpdate();
839
+ },
840
+ onReasoningPart(value) {
841
+ var _a18;
842
+ if (currentReasoningTextDetail == null) {
843
+ currentReasoningTextDetail = { type: "text", text: value };
844
+ if (currentReasoningPart != null) {
845
+ currentReasoningPart.details.push(currentReasoningTextDetail);
846
+ }
847
+ } else {
848
+ currentReasoningTextDetail.text += value;
849
+ }
850
+ if (currentReasoningPart == null) {
851
+ currentReasoningPart = {
852
+ type: "reasoning",
853
+ reasoning: value,
854
+ details: [currentReasoningTextDetail]
855
+ };
856
+ message.parts.push(currentReasoningPart);
857
+ } else {
858
+ currentReasoningPart.reasoning += value;
859
+ }
860
+ message.reasoning = ((_a18 = message.reasoning) != null ? _a18 : "") + value;
861
+ execUpdate();
862
+ },
863
+ onReasoningSignaturePart(value) {
864
+ if (currentReasoningTextDetail != null) {
865
+ currentReasoningTextDetail.signature = value.signature;
866
+ }
867
+ },
868
+ onRedactedReasoningPart(value) {
869
+ if (currentReasoningPart == null) {
870
+ currentReasoningPart = {
871
+ type: "reasoning",
872
+ reasoning: "",
873
+ details: []
874
+ };
875
+ message.parts.push(currentReasoningPart);
876
+ }
877
+ currentReasoningPart.details.push({
878
+ type: "redacted",
879
+ data: value.data
880
+ });
881
+ currentReasoningTextDetail = void 0;
882
+ execUpdate();
883
+ },
884
+ onFilePart(value) {
885
+ message.parts.push({
886
+ type: "file",
887
+ mediaType: value.mimeType,
888
+ data: value.data
889
+ });
890
+ execUpdate();
891
+ },
892
+ onSourcePart(value) {
893
+ message.parts.push({
894
+ type: "source",
895
+ source: value
896
+ });
897
+ execUpdate();
898
+ },
899
+ onToolCallStreamingStartPart(value) {
900
+ if (message.toolInvocations == null) {
901
+ message.toolInvocations = [];
902
+ }
903
+ partialToolCalls[value.toolCallId] = {
904
+ text: "",
905
+ step,
906
+ toolName: value.toolName,
907
+ index: message.toolInvocations.length
908
+ };
909
+ const invocation = {
910
+ state: "partial-call",
911
+ step,
912
+ toolCallId: value.toolCallId,
913
+ toolName: value.toolName,
914
+ args: void 0
915
+ };
916
+ message.toolInvocations.push(invocation);
917
+ updateToolInvocationPart(value.toolCallId, invocation);
918
+ execUpdate();
919
+ },
920
+ onToolCallDeltaPart(value) {
921
+ const partialToolCall = partialToolCalls[value.toolCallId];
922
+ partialToolCall.text += value.argsTextDelta;
923
+ const { value: partialArgs } = parsePartialJson(partialToolCall.text);
924
+ const invocation = {
925
+ state: "partial-call",
926
+ step: partialToolCall.step,
927
+ toolCallId: value.toolCallId,
928
+ toolName: partialToolCall.toolName,
929
+ args: partialArgs
930
+ };
931
+ message.toolInvocations[partialToolCall.index] = invocation;
932
+ updateToolInvocationPart(value.toolCallId, invocation);
933
+ execUpdate();
934
+ },
935
+ async onToolCallPart(value) {
936
+ const invocation = {
937
+ state: "call",
938
+ step,
939
+ ...value
940
+ };
941
+ if (partialToolCalls[value.toolCallId] != null) {
942
+ message.toolInvocations[partialToolCalls[value.toolCallId].index] = invocation;
943
+ } else {
944
+ if (message.toolInvocations == null) {
945
+ message.toolInvocations = [];
946
+ }
947
+ message.toolInvocations.push(invocation);
948
+ }
949
+ updateToolInvocationPart(value.toolCallId, invocation);
950
+ execUpdate();
951
+ if (onToolCall) {
952
+ const result = await onToolCall({ toolCall: value });
953
+ if (result != null) {
954
+ const invocation2 = {
955
+ state: "result",
956
+ step,
957
+ ...value,
958
+ result
959
+ };
960
+ message.toolInvocations[message.toolInvocations.length - 1] = invocation2;
961
+ updateToolInvocationPart(value.toolCallId, invocation2);
962
+ execUpdate();
963
+ }
964
+ }
965
+ },
966
+ onToolResultPart(value) {
967
+ const toolInvocations = message.toolInvocations;
968
+ if (toolInvocations == null) {
969
+ throw new Error("tool_result must be preceded by a tool_call");
970
+ }
971
+ const toolInvocationIndex = toolInvocations.findIndex(
972
+ (invocation2) => invocation2.toolCallId === value.toolCallId
973
+ );
974
+ if (toolInvocationIndex === -1) {
975
+ throw new Error(
976
+ "tool_result must be preceded by a tool_call with the same toolCallId"
977
+ );
978
+ }
979
+ const invocation = {
980
+ ...toolInvocations[toolInvocationIndex],
981
+ state: "result",
982
+ ...value
983
+ };
984
+ toolInvocations[toolInvocationIndex] = invocation;
985
+ updateToolInvocationPart(value.toolCallId, invocation);
986
+ execUpdate();
987
+ },
988
+ onDataPart(value) {
989
+ data.push(...value);
990
+ execUpdate();
991
+ },
992
+ onMessageAnnotationsPart(value) {
993
+ if (messageAnnotations == null) {
994
+ messageAnnotations = [...value];
995
+ } else {
996
+ messageAnnotations.push(...value);
997
+ }
998
+ execUpdate();
999
+ },
1000
+ onFinishStepPart(value) {
1001
+ step += 1;
1002
+ currentTextPart = value.isContinued ? currentTextPart : void 0;
1003
+ currentReasoningPart = void 0;
1004
+ currentReasoningTextDetail = void 0;
1005
+ },
1006
+ onStartStepPart(value) {
1007
+ if (!replaceLastMessage) {
1008
+ message.id = value.messageId;
1009
+ }
1010
+ message.parts.push({ type: "step-start" });
1011
+ execUpdate();
1012
+ },
1013
+ onFinishMessagePart(value) {
1014
+ finishReason = value.finishReason;
1015
+ if (value.usage != null) {
1016
+ usage = calculateLanguageModelUsage(value.usage);
1017
+ }
1018
+ },
1019
+ onErrorPart(error) {
1020
+ throw new Error(error);
1021
+ }
1022
+ });
1023
+ onFinish == null ? void 0 : onFinish({ message, finishReason, usage });
1024
+ }
1025
+
1026
+ // core/util/process-chat-text-response.ts
1027
+ import { generateId as generateIdFunction2 } from "@ai-sdk/provider-utils";
1028
+
1029
+ // core/util/process-text-stream.ts
1030
+ async function processTextStream({
1031
+ stream,
1032
+ onTextPart
1033
+ }) {
1034
+ const reader = stream.pipeThrough(new TextDecoderStream()).getReader();
1035
+ while (true) {
1036
+ const { done, value } = await reader.read();
1037
+ if (done) {
1038
+ break;
1039
+ }
1040
+ await onTextPart(value);
1041
+ }
1042
+ }
1043
+
1044
+ // core/util/process-chat-text-response.ts
1045
+ async function processChatTextResponse({
1046
+ stream,
1047
+ update,
1048
+ onFinish,
1049
+ getCurrentDate = () => /* @__PURE__ */ new Date(),
1050
+ generateId: generateId3 = generateIdFunction2
1051
+ }) {
1052
+ const textPart = { type: "text", text: "" };
1053
+ const resultMessage = {
1054
+ id: generateId3(),
1055
+ createdAt: getCurrentDate(),
1056
+ role: "assistant",
1057
+ content: "",
1058
+ parts: [textPart]
1059
+ };
1060
+ await processTextStream({
1061
+ stream,
1062
+ onTextPart: (chunk) => {
1063
+ resultMessage.content += chunk;
1064
+ textPart.text += chunk;
1065
+ update({
1066
+ message: { ...resultMessage },
1067
+ data: [],
1068
+ replaceLastMessage: false
1069
+ });
1070
+ }
1071
+ });
1072
+ onFinish == null ? void 0 : onFinish(resultMessage, {
1073
+ usage: { completionTokens: NaN, promptTokens: NaN, totalTokens: NaN },
1074
+ finishReason: "unknown"
1075
+ });
1076
+ }
1077
+
1078
+ // core/util/call-chat-api.ts
1079
+ var getOriginalFetch = () => fetch;
1080
+ async function callChatApi({
1081
+ api,
1082
+ body,
1083
+ streamProtocol = "data",
1084
+ credentials,
1085
+ headers,
1086
+ abortController,
1087
+ restoreMessagesOnFailure,
1088
+ onResponse,
1089
+ onUpdate,
1090
+ onFinish,
1091
+ onToolCall,
1092
+ generateId: generateId3,
1093
+ fetch: fetch2 = getOriginalFetch(),
1094
+ lastMessage
1095
+ }) {
1096
+ var _a17, _b;
1097
+ const response = await fetch2(api, {
1098
+ method: "POST",
1099
+ body: JSON.stringify(body),
1100
+ headers: {
1101
+ "Content-Type": "application/json",
1102
+ ...headers
1103
+ },
1104
+ signal: (_a17 = abortController == null ? void 0 : abortController()) == null ? void 0 : _a17.signal,
1105
+ credentials
1106
+ }).catch((err) => {
1107
+ restoreMessagesOnFailure();
1108
+ throw err;
1109
+ });
1110
+ if (onResponse) {
1111
+ try {
1112
+ await onResponse(response);
1113
+ } catch (err) {
1114
+ throw err;
1115
+ }
1116
+ }
1117
+ if (!response.ok) {
1118
+ restoreMessagesOnFailure();
1119
+ throw new Error(
1120
+ (_b = await response.text()) != null ? _b : "Failed to fetch the chat response."
1121
+ );
1122
+ }
1123
+ if (!response.body) {
1124
+ throw new Error("The response body is empty.");
1125
+ }
1126
+ switch (streamProtocol) {
1127
+ case "text": {
1128
+ await processChatTextResponse({
1129
+ stream: response.body,
1130
+ update: onUpdate,
1131
+ onFinish,
1132
+ generateId: generateId3
1133
+ });
1134
+ return;
1135
+ }
1136
+ case "data": {
1137
+ await processChatResponse({
1138
+ stream: response.body,
1139
+ update: onUpdate,
1140
+ lastMessage,
1141
+ onToolCall,
1142
+ onFinish({ message, finishReason, usage }) {
1143
+ if (onFinish && message != null) {
1144
+ onFinish(message, { usage, finishReason });
1145
+ }
1146
+ },
1147
+ generateId: generateId3
1148
+ });
1149
+ return;
1150
+ }
1151
+ default: {
1152
+ const exhaustiveCheck = streamProtocol;
1153
+ throw new Error(`Unknown stream protocol: ${exhaustiveCheck}`);
1154
+ }
1155
+ }
1156
+ }
1157
+
1158
+ // core/util/call-completion-api.ts
1159
+ var getOriginalFetch2 = () => fetch;
1160
+ async function callCompletionApi({
1161
+ api,
1162
+ prompt,
1163
+ credentials,
1164
+ headers,
1165
+ body,
1166
+ streamProtocol = "data",
1167
+ setCompletion,
1168
+ setLoading,
1169
+ setError,
1170
+ setAbortController,
1171
+ onResponse,
1172
+ onFinish,
1173
+ onError,
1174
+ onData,
1175
+ fetch: fetch2 = getOriginalFetch2()
1176
+ }) {
1177
+ var _a17;
1178
+ try {
1179
+ setLoading(true);
1180
+ setError(void 0);
1181
+ const abortController = new AbortController();
1182
+ setAbortController(abortController);
1183
+ setCompletion("");
1184
+ const response = await fetch2(api, {
1185
+ method: "POST",
1186
+ body: JSON.stringify({
1187
+ prompt,
1188
+ ...body
1189
+ }),
1190
+ credentials,
1191
+ headers: {
1192
+ "Content-Type": "application/json",
1193
+ ...headers
1194
+ },
1195
+ signal: abortController.signal
1196
+ }).catch((err) => {
1197
+ throw err;
1198
+ });
1199
+ if (onResponse) {
1200
+ try {
1201
+ await onResponse(response);
1202
+ } catch (err) {
1203
+ throw err;
1204
+ }
1205
+ }
1206
+ if (!response.ok) {
1207
+ throw new Error(
1208
+ (_a17 = await response.text()) != null ? _a17 : "Failed to fetch the chat response."
1209
+ );
1210
+ }
1211
+ if (!response.body) {
1212
+ throw new Error("The response body is empty.");
1213
+ }
1214
+ let result = "";
1215
+ switch (streamProtocol) {
1216
+ case "text": {
1217
+ await processTextStream({
1218
+ stream: response.body,
1219
+ onTextPart: (chunk) => {
1220
+ result += chunk;
1221
+ setCompletion(result);
1222
+ }
1223
+ });
1224
+ break;
1225
+ }
1226
+ case "data": {
1227
+ await processDataStream({
1228
+ stream: response.body,
1229
+ onTextPart(value) {
1230
+ result += value;
1231
+ setCompletion(result);
1232
+ },
1233
+ onDataPart(value) {
1234
+ onData == null ? void 0 : onData(value);
1235
+ },
1236
+ onErrorPart(value) {
1237
+ throw new Error(value);
1238
+ }
1239
+ });
1240
+ break;
1241
+ }
1242
+ default: {
1243
+ const exhaustiveCheck = streamProtocol;
1244
+ throw new Error(`Unknown stream protocol: ${exhaustiveCheck}`);
1245
+ }
1246
+ }
1247
+ if (onFinish) {
1248
+ onFinish(prompt, result);
1249
+ }
1250
+ setAbortController(null);
1251
+ return result;
1252
+ } catch (err) {
1253
+ if (err.name === "AbortError") {
1254
+ setAbortController(null);
1255
+ return null;
1256
+ }
1257
+ if (err instanceof Error) {
1258
+ if (onError) {
1259
+ onError(err);
1260
+ }
1261
+ }
1262
+ setError(err);
1263
+ } finally {
1264
+ setLoading(false);
1265
+ }
1266
+ }
1267
+
1268
+ // core/util/data-url.ts
1269
+ function getTextFromDataUrl(dataUrl) {
1270
+ const [header, base64Content] = dataUrl.split(",");
1271
+ const mediaType = header.split(";")[0].split(":")[1];
1272
+ if (mediaType == null || base64Content == null) {
1273
+ throw new Error("Invalid data URL format");
1274
+ }
1275
+ try {
1276
+ return window.atob(base64Content);
1277
+ } catch (error) {
1278
+ throw new Error(`Error decoding data URL`);
1279
+ }
1280
+ }
1281
+
1282
+ // core/util/extract-max-tool-invocation-step.ts
1283
+ function extractMaxToolInvocationStep(toolInvocations) {
1284
+ return toolInvocations == null ? void 0 : toolInvocations.reduce((max, toolInvocation) => {
1285
+ var _a17;
1286
+ return Math.max(max, (_a17 = toolInvocation.step) != null ? _a17 : 0);
1287
+ }, 0);
1288
+ }
1289
+
1290
+ // core/util/get-message-parts.ts
1291
+ function getMessageParts(message) {
1292
+ var _a17;
1293
+ return (_a17 = message.parts) != null ? _a17 : [
1294
+ ...message.toolInvocations ? message.toolInvocations.map((toolInvocation) => ({
1295
+ type: "tool-invocation",
1296
+ toolInvocation
1297
+ })) : [],
1298
+ ...message.reasoning ? [
1299
+ {
1300
+ type: "reasoning",
1301
+ reasoning: message.reasoning,
1302
+ details: [{ type: "text", text: message.reasoning }]
1303
+ }
1304
+ ] : [],
1305
+ ...message.content ? [{ type: "text", text: message.content }] : []
1306
+ ];
1307
+ }
1308
+
1309
+ // core/util/fill-message-parts.ts
1310
+ function fillMessageParts(messages) {
1311
+ return messages.map((message) => ({
1312
+ ...message,
1313
+ parts: getMessageParts(message)
1314
+ }));
1315
+ }
1316
+
1317
+ // core/util/is-deep-equal-data.ts
1318
+ function isDeepEqualData(obj1, obj2) {
1319
+ if (obj1 === obj2)
1320
+ return true;
1321
+ if (obj1 == null || obj2 == null)
1322
+ return false;
1323
+ if (typeof obj1 !== "object" && typeof obj2 !== "object")
1324
+ return obj1 === obj2;
1325
+ if (obj1.constructor !== obj2.constructor)
1326
+ return false;
1327
+ if (obj1 instanceof Date && obj2 instanceof Date) {
1328
+ return obj1.getTime() === obj2.getTime();
1329
+ }
1330
+ if (Array.isArray(obj1)) {
1331
+ if (obj1.length !== obj2.length)
1332
+ return false;
1333
+ for (let i = 0; i < obj1.length; i++) {
1334
+ if (!isDeepEqualData(obj1[i], obj2[i]))
1335
+ return false;
1336
+ }
1337
+ return true;
1338
+ }
1339
+ const keys1 = Object.keys(obj1);
1340
+ const keys2 = Object.keys(obj2);
1341
+ if (keys1.length !== keys2.length)
1342
+ return false;
1343
+ for (const key of keys1) {
1344
+ if (!keys2.includes(key))
1345
+ return false;
1346
+ if (!isDeepEqualData(obj1[key], obj2[key]))
1347
+ return false;
1348
+ }
1349
+ return true;
1350
+ }
1351
+
1352
+ // core/util/prepare-attachments-for-request.ts
1353
+ async function prepareAttachmentsForRequest(attachmentsFromOptions) {
1354
+ if (!attachmentsFromOptions) {
1355
+ return [];
1356
+ }
1357
+ if (attachmentsFromOptions instanceof FileList) {
1358
+ return Promise.all(
1359
+ Array.from(attachmentsFromOptions).map(async (attachment) => {
1360
+ const { name: name17, type } = attachment;
1361
+ const dataUrl = await new Promise((resolve, reject) => {
1362
+ const reader = new FileReader();
1363
+ reader.onload = (readerEvent) => {
1364
+ var _a17;
1365
+ resolve((_a17 = readerEvent.target) == null ? void 0 : _a17.result);
1366
+ };
1367
+ reader.onerror = (error) => reject(error);
1368
+ reader.readAsDataURL(attachment);
1369
+ });
1370
+ return {
1371
+ name: name17,
1372
+ contentType: type,
1373
+ url: dataUrl
1374
+ };
1375
+ })
1376
+ );
1377
+ }
1378
+ if (Array.isArray(attachmentsFromOptions)) {
1379
+ return attachmentsFromOptions;
1380
+ }
1381
+ throw new Error("Invalid attachments type");
1382
+ }
1383
+
1384
+ // core/util/schema.ts
1385
+ import { validatorSymbol } from "@ai-sdk/provider-utils";
1386
+
1387
+ // core/util/zod-schema.ts
1388
+ import zodToJsonSchema from "zod-to-json-schema";
1389
+ function zodSchema(zodSchema2, options) {
1390
+ var _a17;
1391
+ const useReferences = (_a17 = options == null ? void 0 : options.useReferences) != null ? _a17 : false;
1392
+ return jsonSchema(
1393
+ zodToJsonSchema(zodSchema2, {
1394
+ $refStrategy: useReferences ? "root" : "none",
1395
+ target: "jsonSchema7"
1396
+ // note: openai mode breaks various gemini conversions
1397
+ }),
1398
+ {
1399
+ validate: (value) => {
1400
+ const result = zodSchema2.safeParse(value);
1401
+ return result.success ? { success: true, value: result.data } : { success: false, error: result.error };
1402
+ }
1403
+ }
1404
+ );
1405
+ }
1406
+
1407
+ // core/util/schema.ts
1408
+ var schemaSymbol = Symbol.for("vercel.ai.schema");
1409
+ function jsonSchema(jsonSchema2, {
1410
+ validate
1411
+ } = {}) {
1412
+ return {
1413
+ [schemaSymbol]: true,
1414
+ _type: void 0,
1415
+ // should never be used directly
1416
+ [validatorSymbol]: true,
1417
+ jsonSchema: jsonSchema2,
1418
+ validate
1419
+ };
1420
+ }
1421
+ function isSchema(value) {
1422
+ return typeof value === "object" && value !== null && schemaSymbol in value && value[schemaSymbol] === true && "jsonSchema" in value && "validate" in value;
1423
+ }
1424
+ function asSchema(schema) {
1425
+ return schema == null ? jsonSchema({
1426
+ properties: {},
1427
+ additionalProperties: false
1428
+ }) : isSchema(schema) ? schema : zodSchema(schema);
1429
+ }
1430
+
1431
+ // core/util/should-resubmit-messages.ts
1432
+ function shouldResubmitMessages({
1433
+ originalMaxToolInvocationStep,
1434
+ originalMessageCount,
1435
+ maxSteps,
1436
+ messages
1437
+ }) {
1438
+ var _a17;
1439
+ const lastMessage = messages[messages.length - 1];
1440
+ return (
1441
+ // check if the feature is enabled:
1442
+ maxSteps > 1 && // ensure there is a last message:
1443
+ lastMessage != null && // ensure we actually have new steps (to prevent infinite loops in case of errors):
1444
+ (messages.length > originalMessageCount || extractMaxToolInvocationStep(lastMessage.toolInvocations) !== originalMaxToolInvocationStep) && // check that next step is possible:
1445
+ isAssistantMessageWithCompletedToolCalls(lastMessage) && // limit the number of automatic steps:
1446
+ ((_a17 = extractMaxToolInvocationStep(lastMessage.toolInvocations)) != null ? _a17 : 0) < maxSteps
1447
+ );
1448
+ }
1449
+ function isAssistantMessageWithCompletedToolCalls(message) {
1450
+ if (message.role !== "assistant") {
1451
+ return false;
1452
+ }
1453
+ const lastStepStartIndex = message.parts.reduce((lastIndex, part, index) => {
1454
+ return part.type === "step-start" ? index : lastIndex;
1455
+ }, -1);
1456
+ const lastStepToolInvocations = message.parts.slice(lastStepStartIndex + 1).filter((part) => part.type === "tool-invocation");
1457
+ return lastStepToolInvocations.length > 0 && lastStepToolInvocations.every((part) => "result" in part.toolInvocation);
1458
+ }
1459
+
1460
+ // core/util/update-tool-call-result.ts
1461
+ function updateToolCallResult({
1462
+ messages,
1463
+ toolCallId,
1464
+ toolResult: result
1465
+ }) {
1466
+ var _a17;
1467
+ const lastMessage = messages[messages.length - 1];
1468
+ const invocationPart = lastMessage.parts.find(
1469
+ (part) => part.type === "tool-invocation" && part.toolInvocation.toolCallId === toolCallId
1470
+ );
1471
+ if (invocationPart == null) {
1472
+ return;
1473
+ }
1474
+ const toolResult = {
1475
+ ...invocationPart.toolInvocation,
1476
+ state: "result",
1477
+ result
1478
+ };
1479
+ invocationPart.toolInvocation = toolResult;
1480
+ lastMessage.toolInvocations = (_a17 = lastMessage.toolInvocations) == null ? void 0 : _a17.map(
1481
+ (toolInvocation) => toolInvocation.toolCallId === toolCallId ? toolResult : toolInvocation
1482
+ );
1483
+ }
17
1484
 
18
1485
  // core/data-stream/create-data-stream.ts
19
- import { formatDataStreamPart } from "@ai-sdk/ui-utils";
20
1486
  function createDataStream({
21
1487
  execute,
22
1488
  onError = () => "An error occurred."
@@ -527,6 +1993,7 @@ function selectTelemetryAttributes({
527
1993
  async function embed({
528
1994
  model,
529
1995
  value,
1996
+ providerOptions,
530
1997
  maxRetries: maxRetriesArg,
531
1998
  abortSignal,
532
1999
  headers,
@@ -552,7 +2019,7 @@ async function embed({
552
2019
  }),
553
2020
  tracer,
554
2021
  fn: async (span) => {
555
- const { embedding, usage, rawResponse } = await retry(
2022
+ const { embedding, usage, response } = await retry(
556
2023
  () => (
557
2024
  // nested spans to align with the embedMany telemetry data:
558
2025
  recordSpan({
@@ -575,7 +2042,8 @@ async function embed({
575
2042
  const modelResponse = await model.doEmbed({
576
2043
  values: [value],
577
2044
  abortSignal,
578
- headers
2045
+ headers,
2046
+ providerOptions
579
2047
  });
580
2048
  const embedding2 = modelResponse.embeddings[0];
581
2049
  const usage2 = (_a17 = modelResponse.usage) != null ? _a17 : { tokens: NaN };
@@ -595,7 +2063,7 @@ async function embed({
595
2063
  return {
596
2064
  embedding: embedding2,
597
2065
  usage: usage2,
598
- rawResponse: modelResponse.rawResponse
2066
+ response: modelResponse.response
599
2067
  };
600
2068
  }
601
2069
  })
@@ -610,7 +2078,12 @@ async function embed({
610
2078
  }
611
2079
  })
612
2080
  );
613
- return new DefaultEmbedResult({ value, embedding, usage, rawResponse });
2081
+ return new DefaultEmbedResult({
2082
+ value,
2083
+ embedding,
2084
+ usage,
2085
+ response
2086
+ });
614
2087
  }
615
2088
  });
616
2089
  }
@@ -619,7 +2092,7 @@ var DefaultEmbedResult = class {
619
2092
  this.value = options.value;
620
2093
  this.embedding = options.embedding;
621
2094
  this.usage = options.usage;
622
- this.rawResponse = options.rawResponse;
2095
+ this.response = options.response;
623
2096
  }
624
2097
  };
625
2098
 
@@ -642,6 +2115,7 @@ async function embedMany({
642
2115
  maxRetries: maxRetriesArg,
643
2116
  abortSignal,
644
2117
  headers,
2118
+ providerOptions,
645
2119
  experimental_telemetry: telemetry
646
2120
  }) {
647
2121
  const { maxRetries, retry } = prepareRetries({ maxRetries: maxRetriesArg });
@@ -669,7 +2143,7 @@ async function embedMany({
669
2143
  fn: async (span) => {
670
2144
  const maxEmbeddingsPerCall = model.maxEmbeddingsPerCall;
671
2145
  if (maxEmbeddingsPerCall == null) {
672
- const { embeddings: embeddings2, usage } = await retry(() => {
2146
+ const { embeddings: embeddings2, usage, response } = await retry(() => {
673
2147
  return recordSpan({
674
2148
  name: "ai.embedMany.doEmbed",
675
2149
  attributes: selectTelemetryAttributes({
@@ -692,7 +2166,8 @@ async function embedMany({
692
2166
  const modelResponse = await model.doEmbed({
693
2167
  values,
694
2168
  abortSignal,
695
- headers
2169
+ headers,
2170
+ providerOptions
696
2171
  });
697
2172
  const embeddings3 = modelResponse.embeddings;
698
2173
  const usage2 = (_a17 = modelResponse.usage) != null ? _a17 : { tokens: NaN };
@@ -707,7 +2182,11 @@ async function embedMany({
707
2182
  }
708
2183
  })
709
2184
  );
710
- return { embeddings: embeddings3, usage: usage2 };
2185
+ return {
2186
+ embeddings: embeddings3,
2187
+ usage: usage2,
2188
+ response: modelResponse.response
2189
+ };
711
2190
  }
712
2191
  });
713
2192
  });
@@ -722,13 +2201,23 @@ async function embedMany({
722
2201
  }
723
2202
  })
724
2203
  );
725
- return new DefaultEmbedManyResult({ values, embeddings: embeddings2, usage });
2204
+ return new DefaultEmbedManyResult({
2205
+ values,
2206
+ embeddings: embeddings2,
2207
+ usage,
2208
+ responses: [response]
2209
+ });
726
2210
  }
727
2211
  const valueChunks = splitArray(values, maxEmbeddingsPerCall);
728
2212
  const embeddings = [];
2213
+ const responses = [];
729
2214
  let tokens = 0;
730
2215
  for (const chunk of valueChunks) {
731
- const { embeddings: responseEmbeddings, usage } = await retry(() => {
2216
+ const {
2217
+ embeddings: responseEmbeddings,
2218
+ usage,
2219
+ response
2220
+ } = await retry(() => {
732
2221
  return recordSpan({
733
2222
  name: "ai.embedMany.doEmbed",
734
2223
  attributes: selectTelemetryAttributes({
@@ -751,7 +2240,8 @@ async function embedMany({
751
2240
  const modelResponse = await model.doEmbed({
752
2241
  values: chunk,
753
2242
  abortSignal,
754
- headers
2243
+ headers,
2244
+ providerOptions
755
2245
  });
756
2246
  const embeddings2 = modelResponse.embeddings;
757
2247
  const usage2 = (_a17 = modelResponse.usage) != null ? _a17 : { tokens: NaN };
@@ -766,11 +2256,16 @@ async function embedMany({
766
2256
  }
767
2257
  })
768
2258
  );
769
- return { embeddings: embeddings2, usage: usage2 };
2259
+ return {
2260
+ embeddings: embeddings2,
2261
+ usage: usage2,
2262
+ response: modelResponse.response
2263
+ };
770
2264
  }
771
2265
  });
772
2266
  });
773
2267
  embeddings.push(...responseEmbeddings);
2268
+ responses.push(response);
774
2269
  tokens += usage.tokens;
775
2270
  }
776
2271
  span.setAttributes(
@@ -787,7 +2282,8 @@ async function embedMany({
787
2282
  return new DefaultEmbedManyResult({
788
2283
  values,
789
2284
  embeddings,
790
- usage: { tokens }
2285
+ usage: { tokens },
2286
+ responses
791
2287
  });
792
2288
  }
793
2289
  });
@@ -797,6 +2293,7 @@ var DefaultEmbedManyResult = class {
797
2293
  this.values = options.values;
798
2294
  this.embeddings = options.embeddings;
799
2295
  this.usage = options.usage;
2296
+ this.responses = options.responses;
800
2297
  }
801
2298
  };
802
2299
 
@@ -830,12 +2327,12 @@ import {
830
2327
  var DefaultGeneratedFile = class {
831
2328
  constructor({
832
2329
  data,
833
- mimeType
2330
+ mediaType
834
2331
  }) {
835
2332
  const isUint8Array = data instanceof Uint8Array;
836
2333
  this.base64Data = isUint8Array ? void 0 : data;
837
2334
  this.uint8ArrayData = isUint8Array ? data : void 0;
838
- this.mimeType = mimeType;
2335
+ this.mediaType = mediaType;
839
2336
  }
840
2337
  // lazy conversion with caching to avoid unnecessary conversion overhead:
841
2338
  get base64() {
@@ -859,45 +2356,46 @@ var DefaultGeneratedFileWithType = class extends DefaultGeneratedFile {
859
2356
  }
860
2357
  };
861
2358
 
862
- // core/util/detect-image-mimetype.ts
863
- var mimeTypeSignatures = [
2359
+ // core/util/detect-media-type.ts
2360
+ import { convertBase64ToUint8Array as convertBase64ToUint8Array2 } from "@ai-sdk/provider-utils";
2361
+ var imageMediaTypeSignatures = [
864
2362
  {
865
- mimeType: "image/gif",
2363
+ mediaType: "image/gif",
866
2364
  bytesPrefix: [71, 73, 70],
867
2365
  base64Prefix: "R0lG"
868
2366
  },
869
2367
  {
870
- mimeType: "image/png",
2368
+ mediaType: "image/png",
871
2369
  bytesPrefix: [137, 80, 78, 71],
872
2370
  base64Prefix: "iVBORw"
873
2371
  },
874
2372
  {
875
- mimeType: "image/jpeg",
2373
+ mediaType: "image/jpeg",
876
2374
  bytesPrefix: [255, 216],
877
2375
  base64Prefix: "/9j/"
878
2376
  },
879
2377
  {
880
- mimeType: "image/webp",
2378
+ mediaType: "image/webp",
881
2379
  bytesPrefix: [82, 73, 70, 70],
882
2380
  base64Prefix: "UklGRg"
883
2381
  },
884
2382
  {
885
- mimeType: "image/bmp",
2383
+ mediaType: "image/bmp",
886
2384
  bytesPrefix: [66, 77],
887
2385
  base64Prefix: "Qk"
888
2386
  },
889
2387
  {
890
- mimeType: "image/tiff",
2388
+ mediaType: "image/tiff",
891
2389
  bytesPrefix: [73, 73, 42, 0],
892
2390
  base64Prefix: "SUkqAA"
893
2391
  },
894
2392
  {
895
- mimeType: "image/tiff",
2393
+ mediaType: "image/tiff",
896
2394
  bytesPrefix: [77, 77, 0, 42],
897
2395
  base64Prefix: "TU0AKg"
898
2396
  },
899
2397
  {
900
- mimeType: "image/avif",
2398
+ mediaType: "image/avif",
901
2399
  bytesPrefix: [
902
2400
  0,
903
2401
  0,
@@ -915,7 +2413,7 @@ var mimeTypeSignatures = [
915
2413
  base64Prefix: "AAAAIGZ0eXBhdmlm"
916
2414
  },
917
2415
  {
918
- mimeType: "image/heic",
2416
+ mediaType: "image/heic",
919
2417
  bytesPrefix: [
920
2418
  0,
921
2419
  0,
@@ -933,10 +2431,59 @@ var mimeTypeSignatures = [
933
2431
  base64Prefix: "AAAAIGZ0eXBoZWlj"
934
2432
  }
935
2433
  ];
936
- function detectImageMimeType(image) {
937
- for (const signature of mimeTypeSignatures) {
938
- if (typeof image === "string" ? image.startsWith(signature.base64Prefix) : image.length >= signature.bytesPrefix.length && signature.bytesPrefix.every((byte, index) => image[index] === byte)) {
939
- return signature.mimeType;
2434
+ var audioMediaTypeSignatures = [
2435
+ {
2436
+ mediaType: "audio/mpeg",
2437
+ bytesPrefix: [255, 251],
2438
+ base64Prefix: "//s="
2439
+ },
2440
+ {
2441
+ mediaType: "audio/wav",
2442
+ bytesPrefix: [82, 73, 70, 70],
2443
+ base64Prefix: "UklGR"
2444
+ },
2445
+ {
2446
+ mediaType: "audio/ogg",
2447
+ bytesPrefix: [79, 103, 103, 83],
2448
+ base64Prefix: "T2dnUw"
2449
+ },
2450
+ {
2451
+ mediaType: "audio/flac",
2452
+ bytesPrefix: [102, 76, 97, 67],
2453
+ base64Prefix: "ZkxhQw"
2454
+ },
2455
+ {
2456
+ mediaType: "audio/aac",
2457
+ bytesPrefix: [64, 21, 0, 0],
2458
+ base64Prefix: "QBUA"
2459
+ },
2460
+ {
2461
+ mediaType: "audio/mp4",
2462
+ bytesPrefix: [102, 116, 121, 112],
2463
+ base64Prefix: "ZnR5cA"
2464
+ }
2465
+ ];
2466
+ var stripID3 = (data) => {
2467
+ const bytes = typeof data === "string" ? convertBase64ToUint8Array2(data) : data;
2468
+ const id3Size = (bytes[6] & 127) << 21 | (bytes[7] & 127) << 14 | (bytes[8] & 127) << 7 | bytes[9] & 127;
2469
+ return bytes.slice(id3Size + 10);
2470
+ };
2471
+ function stripID3TagsIfPresent(data) {
2472
+ const hasId3 = typeof data === "string" && data.startsWith("SUQz") || typeof data !== "string" && data.length > 10 && data[0] === 73 && // 'I'
2473
+ data[1] === 68 && // 'D'
2474
+ data[2] === 51;
2475
+ return hasId3 ? stripID3(data) : data;
2476
+ }
2477
+ function detectMediaType({
2478
+ data,
2479
+ signatures
2480
+ }) {
2481
+ const processedData = stripID3TagsIfPresent(data);
2482
+ for (const signature of signatures) {
2483
+ if (typeof processedData === "string" ? processedData.startsWith(signature.base64Prefix) : processedData.length >= signature.bytesPrefix.length && signature.bytesPrefix.every(
2484
+ (byte, index) => processedData[index] === byte
2485
+ )) {
2486
+ return signature.mediaType;
940
2487
  }
941
2488
  }
942
2489
  return void 0;
@@ -992,7 +2539,10 @@ async function generateImage({
992
2539
  var _a18;
993
2540
  return new DefaultGeneratedFile({
994
2541
  data: image,
995
- mimeType: (_a18 = detectImageMimeType(image)) != null ? _a18 : "image/png"
2542
+ mediaType: (_a18 = detectMediaType({
2543
+ data: image,
2544
+ signatures: imageMediaTypeSignatures
2545
+ })) != null ? _a18 : "image/png"
996
2546
  });
997
2547
  }
998
2548
  )
@@ -1021,7 +2571,7 @@ import {
1021
2571
  JSONParseError,
1022
2572
  TypeValidationError as TypeValidationError2
1023
2573
  } from "@ai-sdk/provider";
1024
- import { createIdGenerator, safeParseJSON } from "@ai-sdk/provider-utils";
2574
+ import { createIdGenerator, safeParseJSON as safeParseJSON2 } from "@ai-sdk/provider-utils";
1025
2575
 
1026
2576
  // errors/no-object-generated-error.ts
1027
2577
  import { AISDKError as AISDKError4 } from "@ai-sdk/provider";
@@ -1092,7 +2642,7 @@ async function download({ url }) {
1092
2642
  }
1093
2643
  return {
1094
2644
  data: new Uint8Array(await response.arrayBuffer()),
1095
- mimeType: (_a17 = response.headers.get("content-type")) != null ? _a17 : void 0
2645
+ mediaType: (_a17 = response.headers.get("content-type")) != null ? _a17 : void 0
1096
2646
  };
1097
2647
  } catch (error) {
1098
2648
  if (DownloadError.isInstance(error)) {
@@ -1103,10 +2653,12 @@ async function download({ url }) {
1103
2653
  }
1104
2654
 
1105
2655
  // core/prompt/data-content.ts
2656
+ import { AISDKError as AISDKError7 } from "@ai-sdk/provider";
1106
2657
  import {
1107
- convertBase64ToUint8Array as convertBase64ToUint8Array2,
2658
+ convertBase64ToUint8Array as convertBase64ToUint8Array3,
1108
2659
  convertUint8ArrayToBase64 as convertUint8ArrayToBase642
1109
2660
  } from "@ai-sdk/provider-utils";
2661
+ import { z } from "zod";
1110
2662
 
1111
2663
  // core/prompt/invalid-data-content-error.ts
1112
2664
  import { AISDKError as AISDKError6 } from "@ai-sdk/provider";
@@ -1130,8 +2682,23 @@ var InvalidDataContentError = class extends AISDKError6 {
1130
2682
  };
1131
2683
  _a6 = symbol6;
1132
2684
 
2685
+ // core/prompt/split-data-url.ts
2686
+ function splitDataUrl(dataUrl) {
2687
+ try {
2688
+ const [header, base64Content] = dataUrl.split(",");
2689
+ return {
2690
+ mediaType: header.split(";")[0].split(":")[1],
2691
+ base64Content
2692
+ };
2693
+ } catch (error) {
2694
+ return {
2695
+ mediaType: void 0,
2696
+ base64Content: void 0
2697
+ };
2698
+ }
2699
+ }
2700
+
1133
2701
  // core/prompt/data-content.ts
1134
- import { z } from "zod";
1135
2702
  var dataContentSchema = z.union([
1136
2703
  z.string(),
1137
2704
  z.instanceof(Uint8Array),
@@ -1145,6 +2712,33 @@ var dataContentSchema = z.union([
1145
2712
  { message: "Must be a Buffer" }
1146
2713
  )
1147
2714
  ]);
2715
+ function convertToLanguageModelV2DataContent(content) {
2716
+ if (content instanceof Uint8Array) {
2717
+ return { data: content, mediaType: void 0 };
2718
+ }
2719
+ if (content instanceof ArrayBuffer) {
2720
+ return { data: new Uint8Array(content), mediaType: void 0 };
2721
+ }
2722
+ if (typeof content === "string") {
2723
+ try {
2724
+ content = new URL(content);
2725
+ } catch (error) {
2726
+ }
2727
+ }
2728
+ if (content instanceof URL && content.protocol === "data:") {
2729
+ const { mediaType: dataUrlMediaType, base64Content } = splitDataUrl(
2730
+ content.toString()
2731
+ );
2732
+ if (dataUrlMediaType == null || base64Content == null) {
2733
+ throw new AISDKError7({
2734
+ name: "InvalidDataContentError",
2735
+ message: `Invalid data URL format in content ${content.toString()}`
2736
+ });
2737
+ }
2738
+ return { data: base64Content, mediaType: dataUrlMediaType };
2739
+ }
2740
+ return { data: content, mediaType: void 0 };
2741
+ }
1148
2742
  function convertDataContentToBase64String(content) {
1149
2743
  if (typeof content === "string") {
1150
2744
  return content;
@@ -1160,7 +2754,7 @@ function convertDataContentToUint8Array(content) {
1160
2754
  }
1161
2755
  if (typeof content === "string") {
1162
2756
  try {
1163
- return convertBase64ToUint8Array2(content);
2757
+ return convertBase64ToUint8Array3(content);
1164
2758
  } catch (error) {
1165
2759
  throw new InvalidDataContentError({
1166
2760
  message: "Invalid data content. Content string is not a base64-encoded media.",
@@ -1183,12 +2777,12 @@ function convertUint8ArrayToText(uint8Array) {
1183
2777
  }
1184
2778
 
1185
2779
  // core/prompt/invalid-message-role-error.ts
1186
- import { AISDKError as AISDKError7 } from "@ai-sdk/provider";
2780
+ import { AISDKError as AISDKError8 } from "@ai-sdk/provider";
1187
2781
  var name7 = "AI_InvalidMessageRoleError";
1188
2782
  var marker7 = `vercel.ai.error.${name7}`;
1189
2783
  var symbol7 = Symbol.for(marker7);
1190
2784
  var _a7;
1191
- var InvalidMessageRoleError = class extends AISDKError7 {
2785
+ var InvalidMessageRoleError = class extends AISDKError8 {
1192
2786
  constructor({
1193
2787
  role,
1194
2788
  message = `Invalid message role: '${role}'. Must be one of: "system", "user", "assistant", "tool".`
@@ -1198,27 +2792,11 @@ var InvalidMessageRoleError = class extends AISDKError7 {
1198
2792
  this.role = role;
1199
2793
  }
1200
2794
  static isInstance(error) {
1201
- return AISDKError7.hasMarker(error, marker7);
2795
+ return AISDKError8.hasMarker(error, marker7);
1202
2796
  }
1203
2797
  };
1204
2798
  _a7 = symbol7;
1205
2799
 
1206
- // core/prompt/split-data-url.ts
1207
- function splitDataUrl(dataUrl) {
1208
- try {
1209
- const [header, base64Content] = dataUrl.split(",");
1210
- return {
1211
- mimeType: header.split(";")[0].split(":")[1],
1212
- base64Content
1213
- };
1214
- } catch (error) {
1215
- return {
1216
- mimeType: void 0,
1217
- base64Content: void 0
1218
- };
1219
- }
1220
- }
1221
-
1222
2800
  // core/prompt/convert-to-language-model-prompt.ts
1223
2801
  async function convertToLanguageModelPrompt({
1224
2802
  prompt,
@@ -1240,14 +2818,13 @@ async function convertToLanguageModelPrompt({
1240
2818
  ];
1241
2819
  }
1242
2820
  function convertToLanguageModelMessage(message, downloadedAssets) {
1243
- var _a17, _b, _c, _d, _e, _f;
1244
2821
  const role = message.role;
1245
2822
  switch (role) {
1246
2823
  case "system": {
1247
2824
  return {
1248
2825
  role: "system",
1249
2826
  content: message.content,
1250
- providerMetadata: (_a17 = message.providerOptions) != null ? _a17 : message.experimental_providerMetadata
2827
+ providerOptions: message.providerOptions
1251
2828
  };
1252
2829
  }
1253
2830
  case "user": {
@@ -1255,13 +2832,13 @@ function convertToLanguageModelMessage(message, downloadedAssets) {
1255
2832
  return {
1256
2833
  role: "user",
1257
2834
  content: [{ type: "text", text: message.content }],
1258
- providerMetadata: (_b = message.providerOptions) != null ? _b : message.experimental_providerMetadata
2835
+ providerOptions: message.providerOptions
1259
2836
  };
1260
2837
  }
1261
2838
  return {
1262
2839
  role: "user",
1263
2840
  content: message.content.map((part) => convertPartToLanguageModelPart(part, downloadedAssets)).filter((part) => part.type !== "text" || part.text !== ""),
1264
- providerMetadata: (_c = message.providerOptions) != null ? _c : message.experimental_providerMetadata
2841
+ providerOptions: message.providerOptions
1265
2842
  };
1266
2843
  }
1267
2844
  case "assistant": {
@@ -1269,7 +2846,7 @@ function convertToLanguageModelMessage(message, downloadedAssets) {
1269
2846
  return {
1270
2847
  role: "assistant",
1271
2848
  content: [{ type: "text", text: message.content }],
1272
- providerMetadata: (_d = message.providerOptions) != null ? _d : message.experimental_providerMetadata
2849
+ providerOptions: message.providerOptions
1273
2850
  };
1274
2851
  }
1275
2852
  return {
@@ -1278,16 +2855,19 @@ function convertToLanguageModelMessage(message, downloadedAssets) {
1278
2855
  // remove empty text parts:
1279
2856
  (part) => part.type !== "text" || part.text !== ""
1280
2857
  ).map((part) => {
1281
- var _a18;
1282
- const providerOptions = (_a18 = part.providerOptions) != null ? _a18 : part.experimental_providerMetadata;
2858
+ var _a17;
2859
+ const providerOptions = part.providerOptions;
1283
2860
  switch (part.type) {
1284
2861
  case "file": {
2862
+ const { data, mediaType } = convertToLanguageModelV2DataContent(
2863
+ part.data
2864
+ );
1285
2865
  return {
1286
2866
  type: "file",
1287
- data: part.data instanceof URL ? part.data : convertDataContentToBase64String(part.data),
2867
+ data,
1288
2868
  filename: part.filename,
1289
- mimeType: part.mimeType,
1290
- providerMetadata: providerOptions
2869
+ mediaType: (_a17 = mediaType != null ? mediaType : part.mediaType) != null ? _a17 : part.mimeType,
2870
+ providerOptions
1291
2871
  };
1292
2872
  }
1293
2873
  case "reasoning": {
@@ -1295,21 +2875,21 @@ function convertToLanguageModelMessage(message, downloadedAssets) {
1295
2875
  type: "reasoning",
1296
2876
  text: part.text,
1297
2877
  signature: part.signature,
1298
- providerMetadata: providerOptions
2878
+ providerOptions
1299
2879
  };
1300
2880
  }
1301
2881
  case "redacted-reasoning": {
1302
2882
  return {
1303
2883
  type: "redacted-reasoning",
1304
2884
  data: part.data,
1305
- providerMetadata: providerOptions
2885
+ providerOptions
1306
2886
  };
1307
2887
  }
1308
2888
  case "text": {
1309
2889
  return {
1310
2890
  type: "text",
1311
2891
  text: part.text,
1312
- providerMetadata: providerOptions
2892
+ providerOptions
1313
2893
  };
1314
2894
  }
1315
2895
  case "tool-call": {
@@ -1318,30 +2898,27 @@ function convertToLanguageModelMessage(message, downloadedAssets) {
1318
2898
  toolCallId: part.toolCallId,
1319
2899
  toolName: part.toolName,
1320
2900
  args: part.args,
1321
- providerMetadata: providerOptions
2901
+ providerOptions
1322
2902
  };
1323
2903
  }
1324
2904
  }
1325
2905
  }),
1326
- providerMetadata: (_e = message.providerOptions) != null ? _e : message.experimental_providerMetadata
2906
+ providerOptions: message.providerOptions
1327
2907
  };
1328
2908
  }
1329
2909
  case "tool": {
1330
2910
  return {
1331
2911
  role: "tool",
1332
- content: message.content.map((part) => {
1333
- var _a18;
1334
- return {
1335
- type: "tool-result",
1336
- toolCallId: part.toolCallId,
1337
- toolName: part.toolName,
1338
- result: part.result,
1339
- content: part.experimental_content,
1340
- isError: part.isError,
1341
- providerMetadata: (_a18 = part.providerOptions) != null ? _a18 : part.experimental_providerMetadata
1342
- };
1343
- }),
1344
- providerMetadata: (_f = message.providerOptions) != null ? _f : message.experimental_providerMetadata
2912
+ content: message.content.map((part) => ({
2913
+ type: "tool-result",
2914
+ toolCallId: part.toolCallId,
2915
+ toolName: part.toolName,
2916
+ result: part.result,
2917
+ content: part.experimental_content,
2918
+ isError: part.isError,
2919
+ providerOptions: part.providerOptions
2920
+ })),
2921
+ providerOptions: message.providerOptions
1345
2922
  };
1346
2923
  }
1347
2924
  default: {
@@ -1374,78 +2951,60 @@ async function downloadAssets(messages, downloadImplementation, modelSupportsIma
1374
2951
  );
1375
2952
  }
1376
2953
  function convertPartToLanguageModelPart(part, downloadedAssets) {
1377
- var _a17, _b, _c, _d;
2954
+ var _a17, _b, _c;
1378
2955
  if (part.type === "text") {
1379
2956
  return {
1380
2957
  type: "text",
1381
2958
  text: part.text,
1382
- providerMetadata: (_a17 = part.providerOptions) != null ? _a17 : part.experimental_providerMetadata
2959
+ providerOptions: part.providerOptions
1383
2960
  };
1384
2961
  }
1385
- let mimeType = part.mimeType;
1386
- let data;
1387
- let content;
1388
- let normalizedData;
2962
+ let originalData;
1389
2963
  const type = part.type;
1390
2964
  switch (type) {
1391
2965
  case "image":
1392
- data = part.image;
2966
+ originalData = part.image;
1393
2967
  break;
1394
2968
  case "file":
1395
- data = part.data;
2969
+ originalData = part.data;
1396
2970
  break;
1397
2971
  default:
1398
2972
  throw new Error(`Unsupported part type: ${type}`);
1399
2973
  }
1400
- try {
1401
- content = typeof data === "string" ? new URL(data) : data;
1402
- } catch (error) {
1403
- content = data;
1404
- }
1405
- if (content instanceof URL) {
1406
- if (content.protocol === "data:") {
1407
- const { mimeType: dataUrlMimeType, base64Content } = splitDataUrl(
1408
- content.toString()
1409
- );
1410
- if (dataUrlMimeType == null || base64Content == null) {
1411
- throw new Error(`Invalid data URL format in part ${type}`);
1412
- }
1413
- mimeType = dataUrlMimeType;
1414
- normalizedData = convertDataContentToUint8Array(base64Content);
1415
- } else {
1416
- const downloadedFile = downloadedAssets[content.toString()];
1417
- if (downloadedFile) {
1418
- normalizedData = downloadedFile.data;
1419
- mimeType != null ? mimeType : mimeType = downloadedFile.mimeType;
1420
- } else {
1421
- normalizedData = content;
1422
- }
2974
+ const { data: convertedData, mediaType: convertedMediaType } = convertToLanguageModelV2DataContent(originalData);
2975
+ let mediaType = (_a17 = convertedMediaType != null ? convertedMediaType : part.mediaType) != null ? _a17 : part.mimeType;
2976
+ let data = convertedData;
2977
+ if (data instanceof URL) {
2978
+ const downloadedFile = downloadedAssets[data.toString()];
2979
+ if (downloadedFile) {
2980
+ data = downloadedFile.data;
2981
+ mediaType = (_b = downloadedFile.mediaType) != null ? _b : mediaType;
1423
2982
  }
1424
- } else {
1425
- normalizedData = convertDataContentToUint8Array(content);
1426
2983
  }
1427
2984
  switch (type) {
1428
2985
  case "image": {
1429
- if (normalizedData instanceof Uint8Array) {
1430
- mimeType = (_b = detectImageMimeType(normalizedData)) != null ? _b : mimeType;
2986
+ if (data instanceof Uint8Array || typeof data === "string") {
2987
+ mediaType = (_c = detectMediaType({ data, signatures: imageMediaTypeSignatures })) != null ? _c : mediaType;
1431
2988
  }
1432
2989
  return {
1433
- type: "image",
1434
- image: normalizedData,
1435
- mimeType,
1436
- providerMetadata: (_c = part.providerOptions) != null ? _c : part.experimental_providerMetadata
2990
+ type: "file",
2991
+ mediaType: mediaType != null ? mediaType : "image/*",
2992
+ // any image
2993
+ filename: void 0,
2994
+ data,
2995
+ providerOptions: part.providerOptions
1437
2996
  };
1438
2997
  }
1439
2998
  case "file": {
1440
- if (mimeType == null) {
1441
- throw new Error(`Mime type is missing for file part`);
2999
+ if (mediaType == null) {
3000
+ throw new Error(`Media type is missing for file part`);
1442
3001
  }
1443
3002
  return {
1444
3003
  type: "file",
1445
- data: normalizedData instanceof Uint8Array ? convertDataContentToBase64String(normalizedData) : normalizedData,
3004
+ mediaType,
1446
3005
  filename: part.filename,
1447
- mimeType,
1448
- providerMetadata: (_d = part.providerOptions) != null ? _d : part.experimental_providerMetadata
3006
+ data,
3007
+ providerOptions: part.providerOptions
1449
3008
  };
1450
3009
  }
1451
3010
  }
@@ -1453,7 +3012,7 @@ function convertPartToLanguageModelPart(part, downloadedAssets) {
1453
3012
 
1454
3013
  // core/prompt/prepare-call-settings.ts
1455
3014
  function prepareCallSettings({
1456
- maxTokens,
3015
+ maxOutputTokens,
1457
3016
  temperature,
1458
3017
  topP,
1459
3018
  topK,
@@ -1462,19 +3021,19 @@ function prepareCallSettings({
1462
3021
  stopSequences,
1463
3022
  seed
1464
3023
  }) {
1465
- if (maxTokens != null) {
1466
- if (!Number.isInteger(maxTokens)) {
3024
+ if (maxOutputTokens != null) {
3025
+ if (!Number.isInteger(maxOutputTokens)) {
1467
3026
  throw new InvalidArgumentError({
1468
- parameter: "maxTokens",
1469
- value: maxTokens,
1470
- message: "maxTokens must be an integer"
3027
+ parameter: "maxOutputTokens",
3028
+ value: maxOutputTokens,
3029
+ message: "maxOutputTokens must be an integer"
1471
3030
  });
1472
3031
  }
1473
- if (maxTokens < 1) {
3032
+ if (maxOutputTokens < 1) {
1474
3033
  throw new InvalidArgumentError({
1475
- parameter: "maxTokens",
1476
- value: maxTokens,
1477
- message: "maxTokens must be >= 1"
3034
+ parameter: "maxOutputTokens",
3035
+ value: maxOutputTokens,
3036
+ message: "maxOutputTokens must be >= 1"
1478
3037
  });
1479
3038
  }
1480
3039
  }
@@ -1533,7 +3092,7 @@ function prepareCallSettings({
1533
3092
  }
1534
3093
  }
1535
3094
  return {
1536
- maxTokens,
3095
+ maxOutputTokens,
1537
3096
  // TODO v5 remove default 0 for temperature
1538
3097
  temperature: temperature != null ? temperature : 0,
1539
3098
  topP,
@@ -1575,7 +3134,7 @@ function attachmentsToParts(attachments) {
1575
3134
  parts.push({
1576
3135
  type: "file",
1577
3136
  data: url,
1578
- mimeType: attachment.contentType
3137
+ mediaType: attachment.contentType
1579
3138
  });
1580
3139
  }
1581
3140
  break;
@@ -1583,14 +3142,14 @@ function attachmentsToParts(attachments) {
1583
3142
  case "data:": {
1584
3143
  let header;
1585
3144
  let base64Content;
1586
- let mimeType;
3145
+ let mediaType;
1587
3146
  try {
1588
3147
  [header, base64Content] = attachment.url.split(",");
1589
- mimeType = header.split(";")[0].split(":")[1];
3148
+ mediaType = header.split(";")[0].split(":")[1];
1590
3149
  } catch (error) {
1591
3150
  throw new Error(`Error processing data URL: ${attachment.url}`);
1592
3151
  }
1593
- if (mimeType == null || base64Content == null) {
3152
+ if (mediaType == null || base64Content == null) {
1594
3153
  throw new Error(`Invalid data URL format: ${attachment.url}`);
1595
3154
  }
1596
3155
  if ((_b = attachment.contentType) == null ? void 0 : _b.startsWith("image/")) {
@@ -1614,7 +3173,7 @@ function attachmentsToParts(attachments) {
1614
3173
  parts.push({
1615
3174
  type: "file",
1616
3175
  data: base64Content,
1617
- mimeType: attachment.contentType
3176
+ mediaType: attachment.contentType
1618
3177
  });
1619
3178
  }
1620
3179
  break;
@@ -1628,12 +3187,12 @@ function attachmentsToParts(attachments) {
1628
3187
  }
1629
3188
 
1630
3189
  // core/prompt/message-conversion-error.ts
1631
- import { AISDKError as AISDKError8 } from "@ai-sdk/provider";
3190
+ import { AISDKError as AISDKError9 } from "@ai-sdk/provider";
1632
3191
  var name8 = "AI_MessageConversionError";
1633
3192
  var marker8 = `vercel.ai.error.${name8}`;
1634
3193
  var symbol8 = Symbol.for(marker8);
1635
3194
  var _a8;
1636
- var MessageConversionError = class extends AISDKError8 {
3195
+ var MessageConversionError = class extends AISDKError9 {
1637
3196
  constructor({
1638
3197
  originalMessage,
1639
3198
  message
@@ -1643,7 +3202,7 @@ var MessageConversionError = class extends AISDKError8 {
1643
3202
  this.originalMessage = originalMessage;
1644
3203
  }
1645
3204
  static isInstance(error) {
1646
- return AISDKError8.hasMarker(error, marker8);
3205
+ return AISDKError9.hasMarker(error, marker8);
1647
3206
  }
1648
3207
  };
1649
3208
  _a8 = symbol8;
@@ -1689,14 +3248,23 @@ function convertToCoreMessages(messages, options) {
1689
3248
  case "assistant": {
1690
3249
  if (message.parts != null) {
1691
3250
  let processBlock2 = function() {
3251
+ var _a18;
1692
3252
  const content2 = [];
1693
3253
  for (const part of block) {
1694
3254
  switch (part.type) {
1695
- case "file":
1696
3255
  case "text": {
1697
3256
  content2.push(part);
1698
3257
  break;
1699
3258
  }
3259
+ case "file": {
3260
+ content2.push({
3261
+ type: "file",
3262
+ data: part.data,
3263
+ mediaType: (_a18 = part.mediaType) != null ? _a18 : part.mimeType
3264
+ // TODO migration, remove
3265
+ });
3266
+ break;
3267
+ }
1700
3268
  case "reasoning": {
1701
3269
  for (const detail of part.details) {
1702
3270
  switch (detail.type) {
@@ -1908,7 +3476,7 @@ function detectSingleMessageCharacteristics(message) {
1908
3476
  "experimental_attachments" in message)) {
1909
3477
  return "has-ui-specific-parts";
1910
3478
  } else if (typeof message === "object" && message !== null && "content" in message && (Array.isArray(message.content) || // Core messages can have array content
1911
- "experimental_providerMetadata" in message || "providerOptions" in message)) {
3479
+ "providerOptions" in message)) {
1912
3480
  return "has-core-specific-parts";
1913
3481
  } else if (typeof message === "object" && message !== null && "role" in message && "content" in message && typeof message.content === "string" && ["system", "user", "assistant", "tool"].includes(message.role)) {
1914
3482
  return "message";
@@ -1953,7 +3521,7 @@ var toolResultContentSchema = z4.array(
1953
3521
  z4.object({
1954
3522
  type: z4.literal("image"),
1955
3523
  data: z4.string(),
1956
- mimeType: z4.string().optional()
3524
+ mediaType: z4.string().optional()
1957
3525
  })
1958
3526
  ])
1959
3527
  );
@@ -1962,43 +3530,39 @@ var toolResultContentSchema = z4.array(
1962
3530
  var textPartSchema = z5.object({
1963
3531
  type: z5.literal("text"),
1964
3532
  text: z5.string(),
1965
- providerOptions: providerMetadataSchema.optional(),
1966
- experimental_providerMetadata: providerMetadataSchema.optional()
3533
+ providerOptions: providerMetadataSchema.optional()
1967
3534
  });
1968
3535
  var imagePartSchema = z5.object({
1969
3536
  type: z5.literal("image"),
1970
3537
  image: z5.union([dataContentSchema, z5.instanceof(URL)]),
3538
+ mediaType: z5.string().optional(),
1971
3539
  mimeType: z5.string().optional(),
1972
- providerOptions: providerMetadataSchema.optional(),
1973
- experimental_providerMetadata: providerMetadataSchema.optional()
3540
+ providerOptions: providerMetadataSchema.optional()
1974
3541
  });
1975
3542
  var filePartSchema = z5.object({
1976
3543
  type: z5.literal("file"),
1977
3544
  data: z5.union([dataContentSchema, z5.instanceof(URL)]),
1978
3545
  filename: z5.string().optional(),
1979
- mimeType: z5.string(),
1980
- providerOptions: providerMetadataSchema.optional(),
1981
- experimental_providerMetadata: providerMetadataSchema.optional()
3546
+ mediaType: z5.string(),
3547
+ mimeType: z5.string().optional(),
3548
+ providerOptions: providerMetadataSchema.optional()
1982
3549
  });
1983
3550
  var reasoningPartSchema = z5.object({
1984
3551
  type: z5.literal("reasoning"),
1985
3552
  text: z5.string(),
1986
- providerOptions: providerMetadataSchema.optional(),
1987
- experimental_providerMetadata: providerMetadataSchema.optional()
3553
+ providerOptions: providerMetadataSchema.optional()
1988
3554
  });
1989
3555
  var redactedReasoningPartSchema = z5.object({
1990
3556
  type: z5.literal("redacted-reasoning"),
1991
3557
  data: z5.string(),
1992
- providerOptions: providerMetadataSchema.optional(),
1993
- experimental_providerMetadata: providerMetadataSchema.optional()
3558
+ providerOptions: providerMetadataSchema.optional()
1994
3559
  });
1995
3560
  var toolCallPartSchema = z5.object({
1996
3561
  type: z5.literal("tool-call"),
1997
3562
  toolCallId: z5.string(),
1998
3563
  toolName: z5.string(),
1999
3564
  args: z5.unknown(),
2000
- providerOptions: providerMetadataSchema.optional(),
2001
- experimental_providerMetadata: providerMetadataSchema.optional()
3565
+ providerOptions: providerMetadataSchema.optional()
2002
3566
  });
2003
3567
  var toolResultPartSchema = z5.object({
2004
3568
  type: z5.literal("tool-result"),
@@ -2007,16 +3571,14 @@ var toolResultPartSchema = z5.object({
2007
3571
  result: z5.unknown(),
2008
3572
  content: toolResultContentSchema.optional(),
2009
3573
  isError: z5.boolean().optional(),
2010
- providerOptions: providerMetadataSchema.optional(),
2011
- experimental_providerMetadata: providerMetadataSchema.optional()
3574
+ providerOptions: providerMetadataSchema.optional()
2012
3575
  });
2013
3576
 
2014
3577
  // core/prompt/message.ts
2015
3578
  var coreSystemMessageSchema = z6.object({
2016
3579
  role: z6.literal("system"),
2017
3580
  content: z6.string(),
2018
- providerOptions: providerMetadataSchema.optional(),
2019
- experimental_providerMetadata: providerMetadataSchema.optional()
3581
+ providerOptions: providerMetadataSchema.optional()
2020
3582
  });
2021
3583
  var coreUserMessageSchema = z6.object({
2022
3584
  role: z6.literal("user"),
@@ -2024,8 +3586,7 @@ var coreUserMessageSchema = z6.object({
2024
3586
  z6.string(),
2025
3587
  z6.array(z6.union([textPartSchema, imagePartSchema, filePartSchema]))
2026
3588
  ]),
2027
- providerOptions: providerMetadataSchema.optional(),
2028
- experimental_providerMetadata: providerMetadataSchema.optional()
3589
+ providerOptions: providerMetadataSchema.optional()
2029
3590
  });
2030
3591
  var coreAssistantMessageSchema = z6.object({
2031
3592
  role: z6.literal("assistant"),
@@ -2041,14 +3602,12 @@ var coreAssistantMessageSchema = z6.object({
2041
3602
  ])
2042
3603
  )
2043
3604
  ]),
2044
- providerOptions: providerMetadataSchema.optional(),
2045
- experimental_providerMetadata: providerMetadataSchema.optional()
3605
+ providerOptions: providerMetadataSchema.optional()
2046
3606
  });
2047
3607
  var coreToolMessageSchema = z6.object({
2048
3608
  role: z6.literal("tool"),
2049
3609
  content: z6.array(toolResultPartSchema),
2050
- providerOptions: providerMetadataSchema.optional(),
2051
- experimental_providerMetadata: providerMetadataSchema.optional()
3610
+ providerOptions: providerMetadataSchema.optional()
2052
3611
  });
2053
3612
  var coreMessageSchema = z6.union([
2054
3613
  coreSystemMessageSchema,
@@ -2136,14 +3695,14 @@ function standardizePrompt({
2136
3695
  }
2137
3696
 
2138
3697
  // core/types/usage.ts
2139
- function calculateLanguageModelUsage({
2140
- promptTokens,
2141
- completionTokens
3698
+ function calculateLanguageModelUsage2({
3699
+ inputTokens,
3700
+ outputTokens
2142
3701
  }) {
2143
3702
  return {
2144
- promptTokens,
2145
- completionTokens,
2146
- totalTokens: promptTokens + completionTokens
3703
+ promptTokens: inputTokens != null ? inputTokens : NaN,
3704
+ completionTokens: outputTokens != null ? outputTokens : NaN,
3705
+ totalTokens: (inputTokens != null ? inputTokens : 0) + (outputTokens != null ? outputTokens : 0)
2147
3706
  };
2148
3707
  }
2149
3708
  function addLanguageModelUsage(usage1, usage2) {
@@ -2182,7 +3741,6 @@ import {
2182
3741
  UnsupportedFunctionalityError
2183
3742
  } from "@ai-sdk/provider";
2184
3743
  import { safeValidateTypes as safeValidateTypes2 } from "@ai-sdk/provider-utils";
2185
- import { asSchema } from "@ai-sdk/ui-utils";
2186
3744
 
2187
3745
  // core/util/async-iterable-stream.ts
2188
3746
  function createAsyncIterableStream(source) {
@@ -2550,6 +4108,17 @@ function validateObjectGenerationInput({
2550
4108
  }
2551
4109
  }
2552
4110
 
4111
+ // core/generate-text/extract-content-text.ts
4112
+ function extractContentText(content) {
4113
+ const parts = content.filter(
4114
+ (content2) => content2.type === "text"
4115
+ );
4116
+ if (parts.length === 0) {
4117
+ return void 0;
4118
+ }
4119
+ return parts.map((content2) => content2.text).join("");
4120
+ }
4121
+
2553
4122
  // core/generate-object/generate-object.ts
2554
4123
  var originalGenerateId = createIdGenerator({ prefix: "aiobj", size: 24 });
2555
4124
  async function generateObject({
@@ -2569,8 +4138,7 @@ async function generateObject({
2569
4138
  headers,
2570
4139
  experimental_repairText: repairText,
2571
4140
  experimental_telemetry: telemetry,
2572
- experimental_providerMetadata,
2573
- providerOptions = experimental_providerMetadata,
4141
+ providerOptions,
2574
4142
  _internal: {
2575
4143
  generateId: generateId3 = originalGenerateId,
2576
4144
  currentDate = () => /* @__PURE__ */ new Date()
@@ -2632,7 +4200,6 @@ async function generateObject({
2632
4200
  let finishReason;
2633
4201
  let usage;
2634
4202
  let warnings;
2635
- let rawResponse;
2636
4203
  let response;
2637
4204
  let request;
2638
4205
  let logprobs;
@@ -2678,7 +4245,7 @@ async function generateObject({
2678
4245
  "gen_ai.system": model.provider,
2679
4246
  "gen_ai.request.model": model.modelId,
2680
4247
  "gen_ai.request.frequency_penalty": settings.frequencyPenalty,
2681
- "gen_ai.request.max_tokens": settings.maxTokens,
4248
+ "gen_ai.request.max_tokens": settings.maxOutputTokens,
2682
4249
  "gen_ai.request.presence_penalty": settings.presencePenalty,
2683
4250
  "gen_ai.request.temperature": settings.temperature,
2684
4251
  "gen_ai.request.top_k": settings.topK,
@@ -2687,10 +4254,10 @@ async function generateObject({
2687
4254
  }),
2688
4255
  tracer,
2689
4256
  fn: async (span2) => {
2690
- var _a18, _b2, _c2, _d2, _e, _f;
4257
+ var _a18, _b2, _c2, _d2, _e, _f, _g, _h;
2691
4258
  const result2 = await model.doGenerate({
2692
- mode: {
2693
- type: "object-json",
4259
+ responseFormat: {
4260
+ type: "json",
2694
4261
  schema: outputStrategy.jsonSchema,
2695
4262
  name: schemaName,
2696
4263
  description: schemaDescription
@@ -2698,20 +4265,23 @@ async function generateObject({
2698
4265
  ...prepareCallSettings(settings),
2699
4266
  inputFormat: standardizedPrompt.type,
2700
4267
  prompt: promptMessages,
2701
- providerMetadata: providerOptions,
4268
+ providerOptions,
2702
4269
  abortSignal,
2703
4270
  headers
2704
4271
  });
2705
4272
  const responseData = {
2706
4273
  id: (_b2 = (_a18 = result2.response) == null ? void 0 : _a18.id) != null ? _b2 : generateId3(),
2707
4274
  timestamp: (_d2 = (_c2 = result2.response) == null ? void 0 : _c2.timestamp) != null ? _d2 : currentDate(),
2708
- modelId: (_f = (_e = result2.response) == null ? void 0 : _e.modelId) != null ? _f : model.modelId
4275
+ modelId: (_f = (_e = result2.response) == null ? void 0 : _e.modelId) != null ? _f : model.modelId,
4276
+ headers: (_g = result2.response) == null ? void 0 : _g.headers,
4277
+ body: (_h = result2.response) == null ? void 0 : _h.body
2709
4278
  };
2710
- if (result2.text === void 0) {
4279
+ const text2 = extractContentText(result2.content);
4280
+ if (text2 === void 0) {
2711
4281
  throw new NoObjectGeneratedError({
2712
4282
  message: "No object generated: the model did not return a response.",
2713
4283
  response: responseData,
2714
- usage: calculateLanguageModelUsage(result2.usage),
4284
+ usage: calculateLanguageModelUsage2(result2.usage),
2715
4285
  finishReason: result2.finishReason
2716
4286
  });
2717
4287
  }
@@ -2720,22 +4290,23 @@ async function generateObject({
2720
4290
  telemetry,
2721
4291
  attributes: {
2722
4292
  "ai.response.finishReason": result2.finishReason,
2723
- "ai.response.object": { output: () => result2.text },
4293
+ "ai.response.object": { output: () => text2 },
2724
4294
  "ai.response.id": responseData.id,
2725
4295
  "ai.response.model": responseData.modelId,
2726
4296
  "ai.response.timestamp": responseData.timestamp.toISOString(),
2727
- "ai.usage.promptTokens": result2.usage.promptTokens,
2728
- "ai.usage.completionTokens": result2.usage.completionTokens,
4297
+ // TODO rename telemetry attributes to inputTokens and outputTokens
4298
+ "ai.usage.promptTokens": result2.usage.inputTokens,
4299
+ "ai.usage.completionTokens": result2.usage.outputTokens,
2729
4300
  // standardized gen-ai llm span attributes:
2730
4301
  "gen_ai.response.finish_reasons": [result2.finishReason],
2731
4302
  "gen_ai.response.id": responseData.id,
2732
4303
  "gen_ai.response.model": responseData.modelId,
2733
- "gen_ai.usage.prompt_tokens": result2.usage.promptTokens,
2734
- "gen_ai.usage.completion_tokens": result2.usage.completionTokens
4304
+ "gen_ai.usage.input_tokens": result2.usage.inputTokens,
4305
+ "gen_ai.usage.output_tokens": result2.usage.outputTokens
2735
4306
  }
2736
4307
  })
2737
4308
  );
2738
- return { ...result2, objectText: result2.text, responseData };
4309
+ return { ...result2, objectText: text2, responseData };
2739
4310
  }
2740
4311
  })
2741
4312
  );
@@ -2743,7 +4314,6 @@ async function generateObject({
2743
4314
  finishReason = generateResult.finishReason;
2744
4315
  usage = generateResult.usage;
2745
4316
  warnings = generateResult.warnings;
2746
- rawResponse = generateResult.rawResponse;
2747
4317
  logprobs = generateResult.logprobs;
2748
4318
  resultProviderMetadata = generateResult.providerMetadata;
2749
4319
  request = (_b = generateResult.request) != null ? _b : {};
@@ -2784,7 +4354,7 @@ async function generateObject({
2784
4354
  "gen_ai.system": model.provider,
2785
4355
  "gen_ai.request.model": model.modelId,
2786
4356
  "gen_ai.request.frequency_penalty": settings.frequencyPenalty,
2787
- "gen_ai.request.max_tokens": settings.maxTokens,
4357
+ "gen_ai.request.max_tokens": settings.maxOutputTokens,
2788
4358
  "gen_ai.request.presence_penalty": settings.presencePenalty,
2789
4359
  "gen_ai.request.temperature": settings.temperature,
2790
4360
  "gen_ai.request.top_k": settings.topK,
@@ -2795,33 +4365,38 @@ async function generateObject({
2795
4365
  fn: async (span2) => {
2796
4366
  var _a18, _b2, _c2, _d2, _e, _f, _g, _h;
2797
4367
  const result2 = await model.doGenerate({
2798
- mode: {
2799
- type: "object-tool",
2800
- tool: {
4368
+ tools: [
4369
+ {
2801
4370
  type: "function",
2802
4371
  name: schemaName != null ? schemaName : "json",
2803
4372
  description: schemaDescription != null ? schemaDescription : "Respond with a JSON object.",
2804
4373
  parameters: outputStrategy.jsonSchema
2805
4374
  }
2806
- },
4375
+ ],
4376
+ toolChoice: { type: "required" },
2807
4377
  ...prepareCallSettings(settings),
2808
4378
  inputFormat,
2809
4379
  prompt: promptMessages,
2810
- providerMetadata: providerOptions,
4380
+ providerOptions,
2811
4381
  abortSignal,
2812
4382
  headers
2813
4383
  });
2814
- const objectText = (_b2 = (_a18 = result2.toolCalls) == null ? void 0 : _a18[0]) == null ? void 0 : _b2.args;
4384
+ const firstToolCall = result2.content.find(
4385
+ (content) => content.type === "tool-call"
4386
+ );
4387
+ const objectText = firstToolCall == null ? void 0 : firstToolCall.args;
2815
4388
  const responseData = {
2816
- id: (_d2 = (_c2 = result2.response) == null ? void 0 : _c2.id) != null ? _d2 : generateId3(),
2817
- timestamp: (_f = (_e = result2.response) == null ? void 0 : _e.timestamp) != null ? _f : currentDate(),
2818
- modelId: (_h = (_g = result2.response) == null ? void 0 : _g.modelId) != null ? _h : model.modelId
4389
+ id: (_b2 = (_a18 = result2.response) == null ? void 0 : _a18.id) != null ? _b2 : generateId3(),
4390
+ timestamp: (_d2 = (_c2 = result2.response) == null ? void 0 : _c2.timestamp) != null ? _d2 : currentDate(),
4391
+ modelId: (_f = (_e = result2.response) == null ? void 0 : _e.modelId) != null ? _f : model.modelId,
4392
+ headers: (_g = result2.response) == null ? void 0 : _g.headers,
4393
+ body: (_h = result2.response) == null ? void 0 : _h.body
2819
4394
  };
2820
4395
  if (objectText === void 0) {
2821
4396
  throw new NoObjectGeneratedError({
2822
4397
  message: "No object generated: the tool was not called.",
2823
4398
  response: responseData,
2824
- usage: calculateLanguageModelUsage(result2.usage),
4399
+ usage: calculateLanguageModelUsage2(result2.usage),
2825
4400
  finishReason: result2.finishReason
2826
4401
  });
2827
4402
  }
@@ -2834,14 +4409,15 @@ async function generateObject({
2834
4409
  "ai.response.id": responseData.id,
2835
4410
  "ai.response.model": responseData.modelId,
2836
4411
  "ai.response.timestamp": responseData.timestamp.toISOString(),
2837
- "ai.usage.promptTokens": result2.usage.promptTokens,
2838
- "ai.usage.completionTokens": result2.usage.completionTokens,
4412
+ // TODO rename telemetry attributes to inputTokens and outputTokens
4413
+ "ai.usage.promptTokens": result2.usage.inputTokens,
4414
+ "ai.usage.completionTokens": result2.usage.outputTokens,
2839
4415
  // standardized gen-ai llm span attributes:
2840
4416
  "gen_ai.response.finish_reasons": [result2.finishReason],
2841
4417
  "gen_ai.response.id": responseData.id,
2842
4418
  "gen_ai.response.model": responseData.modelId,
2843
- "gen_ai.usage.input_tokens": result2.usage.promptTokens,
2844
- "gen_ai.usage.output_tokens": result2.usage.completionTokens
4419
+ "gen_ai.usage.input_tokens": result2.usage.inputTokens,
4420
+ "gen_ai.usage.output_tokens": result2.usage.outputTokens
2845
4421
  }
2846
4422
  })
2847
4423
  );
@@ -2853,7 +4429,6 @@ async function generateObject({
2853
4429
  finishReason = generateResult.finishReason;
2854
4430
  usage = generateResult.usage;
2855
4431
  warnings = generateResult.warnings;
2856
- rawResponse = generateResult.rawResponse;
2857
4432
  logprobs = generateResult.logprobs;
2858
4433
  resultProviderMetadata = generateResult.providerMetadata;
2859
4434
  request = (_d = generateResult.request) != null ? _d : {};
@@ -2871,14 +4446,14 @@ async function generateObject({
2871
4446
  }
2872
4447
  }
2873
4448
  function processResult(result2) {
2874
- const parseResult = safeParseJSON({ text: result2 });
4449
+ const parseResult = safeParseJSON2({ text: result2 });
2875
4450
  if (!parseResult.success) {
2876
4451
  throw new NoObjectGeneratedError({
2877
4452
  message: "No object generated: could not parse the response.",
2878
4453
  cause: parseResult.error,
2879
4454
  text: result2,
2880
4455
  response,
2881
- usage: calculateLanguageModelUsage(usage),
4456
+ usage: calculateLanguageModelUsage2(usage),
2882
4457
  finishReason
2883
4458
  });
2884
4459
  }
@@ -2887,7 +4462,7 @@ async function generateObject({
2887
4462
  {
2888
4463
  text: result2,
2889
4464
  response,
2890
- usage: calculateLanguageModelUsage(usage)
4465
+ usage: calculateLanguageModelUsage2(usage)
2891
4466
  }
2892
4467
  );
2893
4468
  if (!validationResult.success) {
@@ -2896,7 +4471,7 @@ async function generateObject({
2896
4471
  cause: validationResult.error,
2897
4472
  text: result2,
2898
4473
  response,
2899
- usage: calculateLanguageModelUsage(usage),
4474
+ usage: calculateLanguageModelUsage2(usage),
2900
4475
  finishReason
2901
4476
  });
2902
4477
  }
@@ -2927,22 +4502,19 @@ async function generateObject({
2927
4502
  "ai.response.object": {
2928
4503
  output: () => JSON.stringify(object2)
2929
4504
  },
2930
- "ai.usage.promptTokens": usage.promptTokens,
2931
- "ai.usage.completionTokens": usage.completionTokens
4505
+ // TODO rename telemetry attributes to inputTokens and outputTokens
4506
+ "ai.usage.promptTokens": usage.inputTokens,
4507
+ "ai.usage.completionTokens": usage.outputTokens
2932
4508
  }
2933
4509
  })
2934
4510
  );
2935
4511
  return new DefaultGenerateObjectResult({
2936
4512
  object: object2,
2937
4513
  finishReason,
2938
- usage: calculateLanguageModelUsage(usage),
4514
+ usage: calculateLanguageModelUsage2(usage),
2939
4515
  warnings,
2940
4516
  request,
2941
- response: {
2942
- ...response,
2943
- headers: rawResponse == null ? void 0 : rawResponse.headers,
2944
- body: rawResponse == null ? void 0 : rawResponse.body
2945
- },
4517
+ response,
2946
4518
  logprobs,
2947
4519
  providerMetadata: resultProviderMetadata
2948
4520
  });
@@ -2956,7 +4528,6 @@ var DefaultGenerateObjectResult = class {
2956
4528
  this.usage = options.usage;
2957
4529
  this.warnings = options.warnings;
2958
4530
  this.providerMetadata = options.providerMetadata;
2959
- this.experimental_providerMetadata = options.providerMetadata;
2960
4531
  this.response = options.response;
2961
4532
  this.request = options.request;
2962
4533
  this.logprobs = options.logprobs;
@@ -2974,10 +4545,6 @@ var DefaultGenerateObjectResult = class {
2974
4545
 
2975
4546
  // core/generate-object/stream-object.ts
2976
4547
  import { createIdGenerator as createIdGenerator2 } from "@ai-sdk/provider-utils";
2977
- import {
2978
- isDeepEqualData,
2979
- parsePartialJson
2980
- } from "@ai-sdk/ui-utils";
2981
4548
 
2982
4549
  // util/delayed-promise.ts
2983
4550
  var DelayedPromise = class {
@@ -3136,8 +4703,7 @@ function streamObject({
3136
4703
  abortSignal,
3137
4704
  headers,
3138
4705
  experimental_telemetry: telemetry,
3139
- experimental_providerMetadata,
3140
- providerOptions = experimental_providerMetadata,
4706
+ providerOptions,
3141
4707
  onError,
3142
4708
  onFinish,
3143
4709
  _internal: {
@@ -3273,8 +4839,8 @@ var DefaultStreamObjectResult = class {
3273
4839
  tools: void 0
3274
4840
  });
3275
4841
  callOptions = {
3276
- mode: {
3277
- type: "object-json",
4842
+ responseFormat: {
4843
+ type: "json",
3278
4844
  schema: outputStrategy.jsonSchema,
3279
4845
  name: schemaName,
3280
4846
  description: schemaDescription
@@ -3287,15 +4853,15 @@ var DefaultStreamObjectResult = class {
3287
4853
  modelSupportsUrl: (_a17 = model.supportsUrl) == null ? void 0 : _a17.bind(model)
3288
4854
  // support 'this' context
3289
4855
  }),
3290
- providerMetadata: providerOptions,
4856
+ providerOptions,
3291
4857
  abortSignal,
3292
4858
  headers
3293
4859
  };
3294
4860
  transformer = {
3295
4861
  transform: (chunk, controller) => {
3296
4862
  switch (chunk.type) {
3297
- case "text-delta":
3298
- controller.enqueue(chunk.textDelta);
4863
+ case "text":
4864
+ controller.enqueue(chunk.text);
3299
4865
  break;
3300
4866
  case "response-metadata":
3301
4867
  case "finish":
@@ -3313,15 +4879,15 @@ var DefaultStreamObjectResult = class {
3313
4879
  tools: void 0
3314
4880
  });
3315
4881
  callOptions = {
3316
- mode: {
3317
- type: "object-tool",
3318
- tool: {
4882
+ tools: [
4883
+ {
3319
4884
  type: "function",
3320
4885
  name: schemaName != null ? schemaName : "json",
3321
4886
  description: schemaDescription != null ? schemaDescription : "Respond with a JSON object.",
3322
4887
  parameters: outputStrategy.jsonSchema
3323
4888
  }
3324
- },
4889
+ ],
4890
+ toolChoice: { type: "required" },
3325
4891
  ...prepareCallSettings(settings),
3326
4892
  inputFormat: standardizedPrompt.type,
3327
4893
  prompt: await convertToLanguageModelPrompt({
@@ -3330,7 +4896,7 @@ var DefaultStreamObjectResult = class {
3330
4896
  modelSupportsUrl: (_b = model.supportsUrl) == null ? void 0 : _b.bind(model)
3331
4897
  // support 'this' context,
3332
4898
  }),
3333
- providerMetadata: providerOptions,
4899
+ providerOptions,
3334
4900
  abortSignal,
3335
4901
  headers
3336
4902
  };
@@ -3361,7 +4927,7 @@ var DefaultStreamObjectResult = class {
3361
4927
  }
3362
4928
  }
3363
4929
  const {
3364
- result: { stream, warnings, rawResponse, request },
4930
+ result: { stream, response, request },
3365
4931
  doStreamSpan,
3366
4932
  startTimestampMs
3367
4933
  } = await retry(
@@ -3386,7 +4952,7 @@ var DefaultStreamObjectResult = class {
3386
4952
  "gen_ai.system": model.provider,
3387
4953
  "gen_ai.request.model": model.modelId,
3388
4954
  "gen_ai.request.frequency_penalty": settings.frequencyPenalty,
3389
- "gen_ai.request.max_tokens": settings.maxTokens,
4955
+ "gen_ai.request.max_tokens": settings.maxOutputTokens,
3390
4956
  "gen_ai.request.presence_penalty": settings.presencePenalty,
3391
4957
  "gen_ai.request.temperature": settings.temperature,
3392
4958
  "gen_ai.request.top_k": settings.topK,
@@ -3403,6 +4969,7 @@ var DefaultStreamObjectResult = class {
3403
4969
  })
3404
4970
  );
3405
4971
  self.requestPromise.resolve(request != null ? request : {});
4972
+ let warnings;
3406
4973
  let usage;
3407
4974
  let finishReason;
3408
4975
  let providerMetadata;
@@ -3410,7 +4977,7 @@ var DefaultStreamObjectResult = class {
3410
4977
  let error;
3411
4978
  let accumulatedText = "";
3412
4979
  let textDelta = "";
3413
- let response = {
4980
+ let fullResponse = {
3414
4981
  id: generateId3(),
3415
4982
  timestamp: currentDate(),
3416
4983
  modelId: model.modelId
@@ -3423,6 +4990,10 @@ var DefaultStreamObjectResult = class {
3423
4990
  new TransformStream({
3424
4991
  async transform(chunk, controller) {
3425
4992
  var _a18, _b2, _c;
4993
+ if (typeof chunk === "object" && chunk.type === "stream-start") {
4994
+ warnings = chunk.warnings;
4995
+ return;
4996
+ }
3426
4997
  if (isFirstChunk) {
3427
4998
  const msToFirstChunk = now2() - startTimestampMs;
3428
4999
  isFirstChunk = false;
@@ -3467,10 +5038,10 @@ var DefaultStreamObjectResult = class {
3467
5038
  }
3468
5039
  switch (chunk.type) {
3469
5040
  case "response-metadata": {
3470
- response = {
3471
- id: (_a18 = chunk.id) != null ? _a18 : response.id,
3472
- timestamp: (_b2 = chunk.timestamp) != null ? _b2 : response.timestamp,
3473
- modelId: (_c = chunk.modelId) != null ? _c : response.modelId
5041
+ fullResponse = {
5042
+ id: (_a18 = chunk.id) != null ? _a18 : fullResponse.id,
5043
+ timestamp: (_b2 = chunk.timestamp) != null ? _b2 : fullResponse.timestamp,
5044
+ modelId: (_c = chunk.modelId) != null ? _c : fullResponse.modelId
3474
5045
  };
3475
5046
  break;
3476
5047
  }
@@ -3479,20 +5050,24 @@ var DefaultStreamObjectResult = class {
3479
5050
  controller.enqueue({ type: "text-delta", textDelta });
3480
5051
  }
3481
5052
  finishReason = chunk.finishReason;
3482
- usage = calculateLanguageModelUsage(chunk.usage);
5053
+ usage = calculateLanguageModelUsage2(chunk.usage);
3483
5054
  providerMetadata = chunk.providerMetadata;
3484
- controller.enqueue({ ...chunk, usage, response });
5055
+ controller.enqueue({
5056
+ ...chunk,
5057
+ usage,
5058
+ response: fullResponse
5059
+ });
3485
5060
  self.usagePromise.resolve(usage);
3486
5061
  self.providerMetadataPromise.resolve(providerMetadata);
3487
5062
  self.responsePromise.resolve({
3488
- ...response,
3489
- headers: rawResponse == null ? void 0 : rawResponse.headers
5063
+ ...fullResponse,
5064
+ headers: response == null ? void 0 : response.headers
3490
5065
  });
3491
5066
  const validationResult = outputStrategy.validateFinalResult(
3492
5067
  latestObjectJson,
3493
5068
  {
3494
5069
  text: accumulatedText,
3495
- response,
5070
+ response: fullResponse,
3496
5071
  usage
3497
5072
  }
3498
5073
  );
@@ -3504,7 +5079,7 @@ var DefaultStreamObjectResult = class {
3504
5079
  message: "No object generated: response did not match schema.",
3505
5080
  cause: validationResult.error,
3506
5081
  text: accumulatedText,
3507
- response,
5082
+ response: fullResponse,
3508
5083
  usage,
3509
5084
  finishReason
3510
5085
  });
@@ -3534,15 +5109,15 @@ var DefaultStreamObjectResult = class {
3534
5109
  "ai.response.object": {
3535
5110
  output: () => JSON.stringify(object2)
3536
5111
  },
3537
- "ai.response.id": response.id,
3538
- "ai.response.model": response.modelId,
3539
- "ai.response.timestamp": response.timestamp.toISOString(),
5112
+ "ai.response.id": fullResponse.id,
5113
+ "ai.response.model": fullResponse.modelId,
5114
+ "ai.response.timestamp": fullResponse.timestamp.toISOString(),
3540
5115
  "ai.usage.promptTokens": finalUsage.promptTokens,
3541
5116
  "ai.usage.completionTokens": finalUsage.completionTokens,
3542
5117
  // standardized gen-ai llm span attributes:
3543
5118
  "gen_ai.response.finish_reasons": [finishReason],
3544
- "gen_ai.response.id": response.id,
3545
- "gen_ai.response.model": response.modelId,
5119
+ "gen_ai.response.id": fullResponse.id,
5120
+ "gen_ai.response.model": fullResponse.modelId,
3546
5121
  "gen_ai.usage.input_tokens": finalUsage.promptTokens,
3547
5122
  "gen_ai.usage.output_tokens": finalUsage.completionTokens
3548
5123
  }
@@ -3566,12 +5141,11 @@ var DefaultStreamObjectResult = class {
3566
5141
  object: object2,
3567
5142
  error,
3568
5143
  response: {
3569
- ...response,
3570
- headers: rawResponse == null ? void 0 : rawResponse.headers
5144
+ ...fullResponse,
5145
+ headers: response == null ? void 0 : response.headers
3571
5146
  },
3572
5147
  warnings,
3573
- providerMetadata,
3574
- experimental_providerMetadata: providerMetadata
5148
+ providerMetadata
3575
5149
  }));
3576
5150
  } catch (error2) {
3577
5151
  controller.enqueue({ type: "error", error: error2 });
@@ -3603,9 +5177,6 @@ var DefaultStreamObjectResult = class {
3603
5177
  get usage() {
3604
5178
  return this.usagePromise.value;
3605
5179
  }
3606
- get experimental_providerMetadata() {
3607
- return this.providerMetadataPromise.value;
3608
- }
3609
5180
  get providerMetadata() {
3610
5181
  return this.providerMetadataPromise.value;
3611
5182
  }
@@ -3696,30 +5267,30 @@ var DefaultStreamObjectResult = class {
3696
5267
  import { createIdGenerator as createIdGenerator3 } from "@ai-sdk/provider-utils";
3697
5268
 
3698
5269
  // errors/no-output-specified-error.ts
3699
- import { AISDKError as AISDKError9 } from "@ai-sdk/provider";
5270
+ import { AISDKError as AISDKError10 } from "@ai-sdk/provider";
3700
5271
  var name9 = "AI_NoOutputSpecifiedError";
3701
5272
  var marker9 = `vercel.ai.error.${name9}`;
3702
5273
  var symbol9 = Symbol.for(marker9);
3703
5274
  var _a9;
3704
- var NoOutputSpecifiedError = class extends AISDKError9 {
5275
+ var NoOutputSpecifiedError = class extends AISDKError10 {
3705
5276
  // used in isInstance
3706
5277
  constructor({ message = "No output specified." } = {}) {
3707
5278
  super({ name: name9, message });
3708
5279
  this[_a9] = true;
3709
5280
  }
3710
5281
  static isInstance(error) {
3711
- return AISDKError9.hasMarker(error, marker9);
5282
+ return AISDKError10.hasMarker(error, marker9);
3712
5283
  }
3713
5284
  };
3714
5285
  _a9 = symbol9;
3715
5286
 
3716
5287
  // errors/tool-execution-error.ts
3717
- import { AISDKError as AISDKError10, getErrorMessage as getErrorMessage2 } from "@ai-sdk/provider";
5288
+ import { AISDKError as AISDKError11, getErrorMessage as getErrorMessage2 } from "@ai-sdk/provider";
3718
5289
  var name10 = "AI_ToolExecutionError";
3719
5290
  var marker10 = `vercel.ai.error.${name10}`;
3720
5291
  var symbol10 = Symbol.for(marker10);
3721
5292
  var _a10;
3722
- var ToolExecutionError = class extends AISDKError10 {
5293
+ var ToolExecutionError = class extends AISDKError11 {
3723
5294
  constructor({
3724
5295
  toolArgs,
3725
5296
  toolName,
@@ -3734,14 +5305,11 @@ var ToolExecutionError = class extends AISDKError10 {
3734
5305
  this.toolCallId = toolCallId;
3735
5306
  }
3736
5307
  static isInstance(error) {
3737
- return AISDKError10.hasMarker(error, marker10);
5308
+ return AISDKError11.hasMarker(error, marker10);
3738
5309
  }
3739
5310
  };
3740
5311
  _a10 = symbol10;
3741
5312
 
3742
- // core/prompt/prepare-tools-and-tool-choice.ts
3743
- import { asSchema as asSchema2 } from "@ai-sdk/ui-utils";
3744
-
3745
5313
  // core/util/is-non-empty-object.ts
3746
5314
  function isNonEmptyObject(object2) {
3747
5315
  return object2 != null && Object.keys(object2).length > 0;
@@ -3772,7 +5340,7 @@ function prepareToolsAndToolChoice({
3772
5340
  type: "function",
3773
5341
  name: name17,
3774
5342
  description: tool2.description,
3775
- parameters: asSchema2(tool2.parameters).jsonSchema
5343
+ parameters: asSchema(tool2.parameters).jsonSchema
3776
5344
  };
3777
5345
  case "provider-defined":
3778
5346
  return {
@@ -3805,16 +5373,15 @@ function removeTextAfterLastWhitespace(text2) {
3805
5373
  }
3806
5374
 
3807
5375
  // core/generate-text/parse-tool-call.ts
3808
- import { safeParseJSON as safeParseJSON2, safeValidateTypes as safeValidateTypes3 } from "@ai-sdk/provider-utils";
3809
- import { asSchema as asSchema3 } from "@ai-sdk/ui-utils";
5376
+ import { safeParseJSON as safeParseJSON3, safeValidateTypes as safeValidateTypes3 } from "@ai-sdk/provider-utils";
3810
5377
 
3811
5378
  // errors/invalid-tool-arguments-error.ts
3812
- import { AISDKError as AISDKError11, getErrorMessage as getErrorMessage3 } from "@ai-sdk/provider";
5379
+ import { AISDKError as AISDKError12, getErrorMessage as getErrorMessage3 } from "@ai-sdk/provider";
3813
5380
  var name11 = "AI_InvalidToolArgumentsError";
3814
5381
  var marker11 = `vercel.ai.error.${name11}`;
3815
5382
  var symbol11 = Symbol.for(marker11);
3816
5383
  var _a11;
3817
- var InvalidToolArgumentsError = class extends AISDKError11 {
5384
+ var InvalidToolArgumentsError = class extends AISDKError12 {
3818
5385
  constructor({
3819
5386
  toolArgs,
3820
5387
  toolName,
@@ -3829,18 +5396,18 @@ var InvalidToolArgumentsError = class extends AISDKError11 {
3829
5396
  this.toolName = toolName;
3830
5397
  }
3831
5398
  static isInstance(error) {
3832
- return AISDKError11.hasMarker(error, marker11);
5399
+ return AISDKError12.hasMarker(error, marker11);
3833
5400
  }
3834
5401
  };
3835
5402
  _a11 = symbol11;
3836
5403
 
3837
5404
  // errors/no-such-tool-error.ts
3838
- import { AISDKError as AISDKError12 } from "@ai-sdk/provider";
5405
+ import { AISDKError as AISDKError13 } from "@ai-sdk/provider";
3839
5406
  var name12 = "AI_NoSuchToolError";
3840
5407
  var marker12 = `vercel.ai.error.${name12}`;
3841
5408
  var symbol12 = Symbol.for(marker12);
3842
5409
  var _a12;
3843
- var NoSuchToolError = class extends AISDKError12 {
5410
+ var NoSuchToolError = class extends AISDKError13 {
3844
5411
  constructor({
3845
5412
  toolName,
3846
5413
  availableTools = void 0,
@@ -3852,18 +5419,18 @@ var NoSuchToolError = class extends AISDKError12 {
3852
5419
  this.availableTools = availableTools;
3853
5420
  }
3854
5421
  static isInstance(error) {
3855
- return AISDKError12.hasMarker(error, marker12);
5422
+ return AISDKError13.hasMarker(error, marker12);
3856
5423
  }
3857
5424
  };
3858
5425
  _a12 = symbol12;
3859
5426
 
3860
5427
  // errors/tool-call-repair-error.ts
3861
- import { AISDKError as AISDKError13, getErrorMessage as getErrorMessage4 } from "@ai-sdk/provider";
5428
+ import { AISDKError as AISDKError14, getErrorMessage as getErrorMessage4 } from "@ai-sdk/provider";
3862
5429
  var name13 = "AI_ToolCallRepairError";
3863
5430
  var marker13 = `vercel.ai.error.${name13}`;
3864
5431
  var symbol13 = Symbol.for(marker13);
3865
5432
  var _a13;
3866
- var ToolCallRepairError = class extends AISDKError13 {
5433
+ var ToolCallRepairError = class extends AISDKError14 {
3867
5434
  constructor({
3868
5435
  cause,
3869
5436
  originalError,
@@ -3874,7 +5441,7 @@ var ToolCallRepairError = class extends AISDKError13 {
3874
5441
  this.originalError = originalError;
3875
5442
  }
3876
5443
  static isInstance(error) {
3877
- return AISDKError13.hasMarker(error, marker13);
5444
+ return AISDKError14.hasMarker(error, marker13);
3878
5445
  }
3879
5446
  };
3880
5447
  _a13 = symbol13;
@@ -3901,7 +5468,10 @@ async function parseToolCall({
3901
5468
  repairedToolCall = await repairToolCall({
3902
5469
  toolCall,
3903
5470
  tools,
3904
- parameterSchema: ({ toolName }) => asSchema3(tools[toolName].parameters).jsonSchema,
5471
+ parameterSchema: ({ toolName }) => {
5472
+ const { parameters } = tools[toolName];
5473
+ return asSchema(parameters).jsonSchema;
5474
+ },
3905
5475
  system,
3906
5476
  messages,
3907
5477
  error
@@ -3930,8 +5500,8 @@ async function doParseToolCall({
3930
5500
  availableTools: Object.keys(tools)
3931
5501
  });
3932
5502
  }
3933
- const schema = asSchema3(tool2.parameters);
3934
- const parseResult = toolCall.args.trim() === "" ? safeValidateTypes3({ value: {}, schema }) : safeParseJSON2({ text: toolCall.args, schema });
5503
+ const schema = asSchema(tool2.parameters);
5504
+ const parseResult = toolCall.args.trim() === "" ? safeValidateTypes3({ value: {}, schema }) : safeParseJSON3({ text: toolCall.args, schema });
3935
5505
  if (parseResult.success === false) {
3936
5506
  throw new InvalidToolArgumentsError({
3937
5507
  toolName,
@@ -3943,11 +5513,11 @@ async function doParseToolCall({
3943
5513
  type: "tool-call",
3944
5514
  toolCallId: toolCall.toolCallId,
3945
5515
  toolName,
3946
- args: parseResult.value
5516
+ args: parseResult == null ? void 0 : parseResult.value
3947
5517
  };
3948
5518
  }
3949
5519
 
3950
- // core/generate-text/reasoning-detail.ts
5520
+ // core/generate-text/reasoning.ts
3951
5521
  function asReasoningText(reasoning) {
3952
5522
  const reasoningText = reasoning.filter((part) => part.type === "text").map((part) => part.text).join("");
3953
5523
  return reasoningText.length > 0 ? reasoningText : void 0;
@@ -3965,23 +5535,36 @@ function toResponseMessages({
3965
5535
  generateMessageId
3966
5536
  }) {
3967
5537
  const responseMessages = [];
3968
- responseMessages.push({
3969
- role: "assistant",
3970
- content: [
5538
+ const content = [];
5539
+ if (reasoning.length > 0) {
5540
+ content.push(
3971
5541
  ...reasoning.map(
3972
5542
  (part) => part.type === "text" ? { ...part, type: "reasoning" } : { ...part, type: "redacted-reasoning" }
3973
- ),
3974
- // TODO language model v2: switch to order response content (instead of type-based ordering)
5543
+ )
5544
+ );
5545
+ }
5546
+ if (files.length > 0) {
5547
+ content.push(
3975
5548
  ...files.map((file) => ({
3976
5549
  type: "file",
3977
5550
  data: file.base64,
3978
- mimeType: file.mimeType
3979
- })),
3980
- { type: "text", text: text2 },
3981
- ...toolCalls
3982
- ],
3983
- id: messageId
3984
- });
5551
+ mediaType: file.mediaType
5552
+ }))
5553
+ );
5554
+ }
5555
+ if (text2.length > 0) {
5556
+ content.push({ type: "text", text: text2 });
5557
+ }
5558
+ if (toolCalls.length > 0) {
5559
+ content.push(...toolCalls);
5560
+ }
5561
+ if (content.length > 0) {
5562
+ responseMessages.push({
5563
+ role: "assistant",
5564
+ content,
5565
+ id: messageId
5566
+ });
5567
+ }
3985
5568
  if (toolResults.length > 0) {
3986
5569
  responseMessages.push({
3987
5570
  role: "tool",
@@ -4032,8 +5615,7 @@ async function generateText({
4032
5615
  experimental_output: output,
4033
5616
  experimental_continueSteps: continueSteps = false,
4034
5617
  experimental_telemetry: telemetry,
4035
- experimental_providerMetadata,
4036
- providerOptions = experimental_providerMetadata,
5618
+ providerOptions,
4037
5619
  experimental_activeTools: activeTools,
4038
5620
  experimental_repairToolCall: repairToolCall,
4039
5621
  _internal: {
@@ -4086,9 +5668,8 @@ async function generateText({
4086
5668
  }),
4087
5669
  tracer,
4088
5670
  fn: async (span) => {
4089
- var _a18, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k;
4090
- const mode = {
4091
- type: "regular",
5671
+ var _a18, _b, _c, _d;
5672
+ const toolsAndToolChoice = {
4092
5673
  ...prepareToolsAndToolChoice({ tools, toolChoice, activeTools })
4093
5674
  };
4094
5675
  const callSettings = prepareCallSettings(settings);
@@ -4142,17 +5723,17 @@ async function generateText({
4142
5723
  // convert the language model level tools:
4143
5724
  input: () => {
4144
5725
  var _a19;
4145
- return (_a19 = mode.tools) == null ? void 0 : _a19.map((tool2) => JSON.stringify(tool2));
5726
+ return (_a19 = toolsAndToolChoice.tools) == null ? void 0 : _a19.map((tool2) => JSON.stringify(tool2));
4146
5727
  }
4147
5728
  },
4148
5729
  "ai.prompt.toolChoice": {
4149
- input: () => mode.toolChoice != null ? JSON.stringify(mode.toolChoice) : void 0
5730
+ input: () => toolsAndToolChoice.toolChoice != null ? JSON.stringify(toolsAndToolChoice.toolChoice) : void 0
4150
5731
  },
4151
5732
  // standardized gen-ai llm span attributes:
4152
5733
  "gen_ai.system": model.provider,
4153
5734
  "gen_ai.request.model": model.modelId,
4154
5735
  "gen_ai.request.frequency_penalty": settings.frequencyPenalty,
4155
- "gen_ai.request.max_tokens": settings.maxTokens,
5736
+ "gen_ai.request.max_tokens": settings.maxOutputTokens,
4156
5737
  "gen_ai.request.presence_penalty": settings.presencePenalty,
4157
5738
  "gen_ai.request.stop_sequences": settings.stopSequences,
4158
5739
  "gen_ai.request.temperature": settings.temperature,
@@ -4162,21 +5743,23 @@ async function generateText({
4162
5743
  }),
4163
5744
  tracer,
4164
5745
  fn: async (span2) => {
4165
- var _a19, _b2, _c2, _d2, _e2, _f2;
5746
+ var _a19, _b2, _c2, _d2, _e, _f, _g, _h;
4166
5747
  const result = await model.doGenerate({
4167
- mode,
4168
5748
  ...callSettings,
5749
+ ...toolsAndToolChoice,
4169
5750
  inputFormat: promptFormat,
4170
5751
  responseFormat: output == null ? void 0 : output.responseFormat({ model }),
4171
5752
  prompt: promptMessages,
4172
- providerMetadata: providerOptions,
5753
+ providerOptions,
4173
5754
  abortSignal,
4174
5755
  headers
4175
5756
  });
4176
5757
  const responseData = {
4177
5758
  id: (_b2 = (_a19 = result.response) == null ? void 0 : _a19.id) != null ? _b2 : generateId3(),
4178
5759
  timestamp: (_d2 = (_c2 = result.response) == null ? void 0 : _c2.timestamp) != null ? _d2 : currentDate(),
4179
- modelId: (_f2 = (_e2 = result.response) == null ? void 0 : _e2.modelId) != null ? _f2 : model.modelId
5760
+ modelId: (_f = (_e = result.response) == null ? void 0 : _e.modelId) != null ? _f : model.modelId,
5761
+ headers: (_g = result.response) == null ? void 0 : _g.headers,
5762
+ body: (_h = result.response) == null ? void 0 : _h.body
4180
5763
  };
4181
5764
  span2.setAttributes(
4182
5765
  selectTelemetryAttributes({
@@ -4184,22 +5767,26 @@ async function generateText({
4184
5767
  attributes: {
4185
5768
  "ai.response.finishReason": result.finishReason,
4186
5769
  "ai.response.text": {
4187
- output: () => result.text
5770
+ output: () => extractContentText(result.content)
4188
5771
  },
4189
5772
  "ai.response.toolCalls": {
4190
- output: () => JSON.stringify(result.toolCalls)
5773
+ output: () => {
5774
+ const toolCalls = asToolCalls(result.content);
5775
+ return toolCalls == null ? void 0 : JSON.stringify(toolCalls);
5776
+ }
4191
5777
  },
4192
5778
  "ai.response.id": responseData.id,
4193
5779
  "ai.response.model": responseData.modelId,
4194
5780
  "ai.response.timestamp": responseData.timestamp.toISOString(),
4195
- "ai.usage.promptTokens": result.usage.promptTokens,
4196
- "ai.usage.completionTokens": result.usage.completionTokens,
5781
+ // TODO rename telemetry attributes to inputTokens and outputTokens
5782
+ "ai.usage.promptTokens": result.usage.inputTokens,
5783
+ "ai.usage.completionTokens": result.usage.outputTokens,
4197
5784
  // standardized gen-ai llm span attributes:
4198
5785
  "gen_ai.response.finish_reasons": [result.finishReason],
4199
5786
  "gen_ai.response.id": responseData.id,
4200
5787
  "gen_ai.response.model": responseData.modelId,
4201
- "gen_ai.usage.input_tokens": result.usage.promptTokens,
4202
- "gen_ai.usage.output_tokens": result.usage.completionTokens
5788
+ "gen_ai.usage.input_tokens": result.usage.inputTokens,
5789
+ "gen_ai.usage.output_tokens": result.usage.outputTokens
4203
5790
  }
4204
5791
  })
4205
5792
  );
@@ -4208,7 +5795,9 @@ async function generateText({
4208
5795
  })
4209
5796
  );
4210
5797
  currentToolCalls = await Promise.all(
4211
- ((_b = currentModelResponse.toolCalls) != null ? _b : []).map(
5798
+ currentModelResponse.content.filter(
5799
+ (part) => part.type === "tool-call"
5800
+ ).map(
4212
5801
  (toolCall) => parseToolCall({
4213
5802
  toolCall,
4214
5803
  tools,
@@ -4226,7 +5815,7 @@ async function generateText({
4226
5815
  messages: stepInputMessages,
4227
5816
  abortSignal
4228
5817
  });
4229
- const currentUsage = calculateLanguageModelUsage(
5818
+ const currentUsage = calculateLanguageModelUsage2(
4230
5819
  currentModelResponse.usage
4231
5820
  );
4232
5821
  usage = addLanguageModelUsage(usage, currentUsage);
@@ -4243,15 +5832,19 @@ async function generateText({
4243
5832
  nextStepType = "tool-result";
4244
5833
  }
4245
5834
  }
4246
- const originalText = (_c = currentModelResponse.text) != null ? _c : "";
5835
+ const originalText = (_b = extractContentText(currentModelResponse.content)) != null ? _b : "";
4247
5836
  const stepTextLeadingWhitespaceTrimmed = stepType === "continue" && // only for continue steps
4248
5837
  text2.trimEnd() !== text2 ? originalText.trimStart() : originalText;
4249
5838
  const stepText = nextStepType === "continue" ? removeTextAfterLastWhitespace(stepTextLeadingWhitespaceTrimmed) : stepTextLeadingWhitespaceTrimmed;
4250
5839
  text2 = nextStepType === "continue" || stepType === "continue" ? text2 + stepText : stepText;
4251
5840
  currentReasoningDetails = asReasoningDetails(
4252
- currentModelResponse.reasoning
5841
+ currentModelResponse.content
5842
+ );
5843
+ sources.push(
5844
+ ...currentModelResponse.content.filter(
5845
+ (part) => part.type === "source"
5846
+ )
4253
5847
  );
4254
- sources.push(...(_d = currentModelResponse.sources) != null ? _d : []);
4255
5848
  if (stepType === "continue") {
4256
5849
  const lastMessage = responseMessages[responseMessages.length - 1];
4257
5850
  if (typeof lastMessage.content === "string") {
@@ -4266,8 +5859,8 @@ async function generateText({
4266
5859
  responseMessages.push(
4267
5860
  ...toResponseMessages({
4268
5861
  text: text2,
4269
- files: asFiles(currentModelResponse.files),
4270
- reasoning: asReasoningDetails(currentModelResponse.reasoning),
5862
+ files: asFiles(currentModelResponse.content),
5863
+ reasoning: asReasoningDetails(currentModelResponse.content),
4271
5864
  tools: tools != null ? tools : {},
4272
5865
  toolCalls: currentToolCalls,
4273
5866
  toolResults: currentToolResults,
@@ -4279,27 +5872,25 @@ async function generateText({
4279
5872
  const currentStepResult = {
4280
5873
  stepType,
4281
5874
  text: stepText,
4282
- // TODO v5: rename reasoning to reasoningText (and use reasoning for composite array)
4283
- reasoning: asReasoningText(currentReasoningDetails),
4284
- reasoningDetails: currentReasoningDetails,
4285
- files: asFiles(currentModelResponse.files),
4286
- sources: (_e = currentModelResponse.sources) != null ? _e : [],
5875
+ reasoningText: asReasoningText(currentReasoningDetails),
5876
+ reasoning: currentReasoningDetails,
5877
+ files: asFiles(currentModelResponse.content),
5878
+ sources: currentModelResponse.content.filter(
5879
+ (part) => part.type === "source"
5880
+ ),
4287
5881
  toolCalls: currentToolCalls,
4288
5882
  toolResults: currentToolResults,
4289
5883
  finishReason: currentModelResponse.finishReason,
4290
5884
  usage: currentUsage,
4291
5885
  warnings: currentModelResponse.warnings,
4292
5886
  logprobs: currentModelResponse.logprobs,
4293
- request: (_f = currentModelResponse.request) != null ? _f : {},
5887
+ request: (_c = currentModelResponse.request) != null ? _c : {},
4294
5888
  response: {
4295
5889
  ...currentModelResponse.response,
4296
- headers: (_g = currentModelResponse.rawResponse) == null ? void 0 : _g.headers,
4297
- body: (_h = currentModelResponse.rawResponse) == null ? void 0 : _h.body,
4298
5890
  // deep clone msgs to avoid mutating past messages in multi-step:
4299
5891
  messages: structuredClone(responseMessages)
4300
5892
  },
4301
5893
  providerMetadata: currentModelResponse.providerMetadata,
4302
- experimental_providerMetadata: currentModelResponse.providerMetadata,
4303
5894
  isContinued: nextStepType === "continue"
4304
5895
  };
4305
5896
  steps.push(currentStepResult);
@@ -4312,19 +5903,23 @@ async function generateText({
4312
5903
  attributes: {
4313
5904
  "ai.response.finishReason": currentModelResponse.finishReason,
4314
5905
  "ai.response.text": {
4315
- output: () => currentModelResponse.text
5906
+ output: () => extractContentText(currentModelResponse.content)
4316
5907
  },
4317
5908
  "ai.response.toolCalls": {
4318
- output: () => JSON.stringify(currentModelResponse.toolCalls)
5909
+ output: () => {
5910
+ const toolCalls = asToolCalls(currentModelResponse.content);
5911
+ return toolCalls == null ? void 0 : JSON.stringify(toolCalls);
5912
+ }
4319
5913
  },
4320
- "ai.usage.promptTokens": currentModelResponse.usage.promptTokens,
4321
- "ai.usage.completionTokens": currentModelResponse.usage.completionTokens
5914
+ // TODO rename telemetry attributes to inputTokens and outputTokens
5915
+ "ai.usage.promptTokens": currentModelResponse.usage.inputTokens,
5916
+ "ai.usage.completionTokens": currentModelResponse.usage.outputTokens
4322
5917
  }
4323
5918
  })
4324
5919
  );
4325
5920
  return new DefaultGenerateTextResult({
4326
5921
  text: text2,
4327
- files: asFiles(currentModelResponse.files),
5922
+ files: asFiles(currentModelResponse.content),
4328
5923
  reasoning: asReasoningText(currentReasoningDetails),
4329
5924
  reasoningDetails: currentReasoningDetails,
4330
5925
  sources,
@@ -4346,11 +5941,9 @@ async function generateText({
4346
5941
  finishReason: currentModelResponse.finishReason,
4347
5942
  usage,
4348
5943
  warnings: currentModelResponse.warnings,
4349
- request: (_i = currentModelResponse.request) != null ? _i : {},
5944
+ request: (_d = currentModelResponse.request) != null ? _d : {},
4350
5945
  response: {
4351
5946
  ...currentModelResponse.response,
4352
- headers: (_j = currentModelResponse.rawResponse) == null ? void 0 : _j.headers,
4353
- body: (_k = currentModelResponse.rawResponse) == null ? void 0 : _k.body,
4354
5947
  messages: responseMessages
4355
5948
  },
4356
5949
  logprobs: currentModelResponse.logprobs,
@@ -4439,8 +6032,8 @@ var DefaultGenerateTextResult = class {
4439
6032
  constructor(options) {
4440
6033
  this.text = options.text;
4441
6034
  this.files = options.files;
4442
- this.reasoning = options.reasoning;
4443
- this.reasoningDetails = options.reasoningDetails;
6035
+ this.reasoningText = options.reasoning;
6036
+ this.reasoning = options.reasoningDetails;
4444
6037
  this.toolCalls = options.toolCalls;
4445
6038
  this.toolResults = options.toolResults;
4446
6039
  this.finishReason = options.finishReason;
@@ -4449,7 +6042,6 @@ var DefaultGenerateTextResult = class {
4449
6042
  this.request = options.request;
4450
6043
  this.response = options.response;
4451
6044
  this.steps = options.steps;
4452
- this.experimental_providerMetadata = options.providerMetadata;
4453
6045
  this.providerMetadata = options.providerMetadata;
4454
6046
  this.logprobs = options.logprobs;
4455
6047
  this.outputResolver = options.outputResolver;
@@ -4459,18 +6051,50 @@ var DefaultGenerateTextResult = class {
4459
6051
  return this.outputResolver();
4460
6052
  }
4461
6053
  };
4462
- function asReasoningDetails(reasoning) {
4463
- if (reasoning == null) {
6054
+ function asReasoningDetails(content) {
6055
+ const reasoning = content.filter((part) => part.type === "reasoning");
6056
+ if (reasoning.length === 0) {
4464
6057
  return [];
4465
6058
  }
4466
- if (typeof reasoning === "string") {
4467
- return [{ type: "text", text: reasoning }];
6059
+ const result = [];
6060
+ let activeReasoningText;
6061
+ for (const part of reasoning) {
6062
+ if (part.reasoningType === "text") {
6063
+ if (activeReasoningText == null) {
6064
+ activeReasoningText = { type: "text", text: part.text };
6065
+ result.push(activeReasoningText);
6066
+ } else {
6067
+ activeReasoningText.text += part.text;
6068
+ }
6069
+ } else if (part.reasoningType === "signature") {
6070
+ if (activeReasoningText == null) {
6071
+ activeReasoningText = { type: "text", text: "" };
6072
+ result.push(activeReasoningText);
6073
+ }
6074
+ activeReasoningText.signature = part.signature;
6075
+ activeReasoningText = void 0;
6076
+ } else if (part.reasoningType === "redacted") {
6077
+ result.push({ type: "redacted", data: part.data });
6078
+ }
4468
6079
  }
4469
- return reasoning;
6080
+ return result;
4470
6081
  }
4471
- function asFiles(files) {
4472
- var _a17;
4473
- return (_a17 = files == null ? void 0 : files.map((file) => new DefaultGeneratedFile(file))) != null ? _a17 : [];
6082
+ function asFiles(content) {
6083
+ return content.filter((part) => part.type === "file").map((part) => new DefaultGeneratedFile(part));
6084
+ }
6085
+ function asToolCalls(content) {
6086
+ const parts = content.filter(
6087
+ (part) => part.type === "tool-call"
6088
+ );
6089
+ if (parts.length === 0) {
6090
+ return void 0;
6091
+ }
6092
+ return parts.map((toolCall) => ({
6093
+ toolCallType: toolCall.toolCallType,
6094
+ toolCallId: toolCall.toolCallId,
6095
+ toolName: toolCall.toolName,
6096
+ args: toolCall.args
6097
+ }));
4474
6098
  }
4475
6099
 
4476
6100
  // core/generate-text/output.ts
@@ -4479,15 +6103,11 @@ __export(output_exports, {
4479
6103
  object: () => object,
4480
6104
  text: () => text
4481
6105
  });
4482
- import { safeParseJSON as safeParseJSON3, safeValidateTypes as safeValidateTypes4 } from "@ai-sdk/provider-utils";
4483
- import {
4484
- asSchema as asSchema4,
4485
- parsePartialJson as parsePartialJson2
4486
- } from "@ai-sdk/ui-utils";
6106
+ import { safeParseJSON as safeParseJSON4, safeValidateTypes as safeValidateTypes4 } from "@ai-sdk/provider-utils";
4487
6107
 
4488
6108
  // errors/index.ts
4489
6109
  import {
4490
- AISDKError as AISDKError16,
6110
+ AISDKError as AISDKError17,
4491
6111
  APICallError as APICallError2,
4492
6112
  EmptyResponseBodyError,
4493
6113
  InvalidPromptError as InvalidPromptError2,
@@ -4501,12 +6121,12 @@ import {
4501
6121
  } from "@ai-sdk/provider";
4502
6122
 
4503
6123
  // errors/invalid-stream-part-error.ts
4504
- import { AISDKError as AISDKError14 } from "@ai-sdk/provider";
6124
+ import { AISDKError as AISDKError15 } from "@ai-sdk/provider";
4505
6125
  var name14 = "AI_InvalidStreamPartError";
4506
6126
  var marker14 = `vercel.ai.error.${name14}`;
4507
6127
  var symbol14 = Symbol.for(marker14);
4508
6128
  var _a14;
4509
- var InvalidStreamPartError = class extends AISDKError14 {
6129
+ var InvalidStreamPartError = class extends AISDKError15 {
4510
6130
  constructor({
4511
6131
  chunk,
4512
6132
  message
@@ -4516,18 +6136,18 @@ var InvalidStreamPartError = class extends AISDKError14 {
4516
6136
  this.chunk = chunk;
4517
6137
  }
4518
6138
  static isInstance(error) {
4519
- return AISDKError14.hasMarker(error, marker14);
6139
+ return AISDKError15.hasMarker(error, marker14);
4520
6140
  }
4521
6141
  };
4522
6142
  _a14 = symbol14;
4523
6143
 
4524
6144
  // errors/mcp-client-error.ts
4525
- import { AISDKError as AISDKError15 } from "@ai-sdk/provider";
6145
+ import { AISDKError as AISDKError16 } from "@ai-sdk/provider";
4526
6146
  var name15 = "AI_MCPClientError";
4527
6147
  var marker15 = `vercel.ai.error.${name15}`;
4528
6148
  var symbol15 = Symbol.for(marker15);
4529
6149
  var _a15;
4530
- var MCPClientError = class extends AISDKError15 {
6150
+ var MCPClientError = class extends AISDKError16 {
4531
6151
  constructor({
4532
6152
  name: name17 = "MCPClientError",
4533
6153
  message,
@@ -4537,7 +6157,7 @@ var MCPClientError = class extends AISDKError15 {
4537
6157
  this[_a15] = true;
4538
6158
  }
4539
6159
  static isInstance(error) {
4540
- return AISDKError15.hasMarker(error, marker15);
6160
+ return AISDKError16.hasMarker(error, marker15);
4541
6161
  }
4542
6162
  };
4543
6163
  _a15 = symbol15;
@@ -4559,7 +6179,7 @@ var text = () => ({
4559
6179
  var object = ({
4560
6180
  schema: inputSchema
4561
6181
  }) => {
4562
- const schema = asSchema4(inputSchema);
6182
+ const schema = asSchema(inputSchema);
4563
6183
  return {
4564
6184
  type: "object",
4565
6185
  responseFormat: ({ model }) => ({
@@ -4573,7 +6193,7 @@ var object = ({
4573
6193
  });
4574
6194
  },
4575
6195
  parsePartial({ text: text2 }) {
4576
- const result = parsePartialJson2(text2);
6196
+ const result = parsePartialJson(text2);
4577
6197
  switch (result.state) {
4578
6198
  case "failed-parse":
4579
6199
  case "undefined-input":
@@ -4591,7 +6211,7 @@ var object = ({
4591
6211
  }
4592
6212
  },
4593
6213
  parseOutput({ text: text2 }, context) {
4594
- const parseResult = safeParseJSON3({ text: text2 });
6214
+ const parseResult = safeParseJSON4({ text: text2 });
4595
6215
  if (!parseResult.success) {
4596
6216
  throw new NoObjectGeneratedError({
4597
6217
  message: "No object generated: could not parse the response.",
@@ -4670,18 +6290,18 @@ function smoothStream({
4670
6290
  let buffer = "";
4671
6291
  return new TransformStream({
4672
6292
  async transform(chunk, controller) {
4673
- if (chunk.type !== "text-delta") {
6293
+ if (chunk.type !== "text") {
4674
6294
  if (buffer.length > 0) {
4675
- controller.enqueue({ type: "text-delta", textDelta: buffer });
6295
+ controller.enqueue({ type: "text", text: buffer });
4676
6296
  buffer = "";
4677
6297
  }
4678
6298
  controller.enqueue(chunk);
4679
6299
  return;
4680
6300
  }
4681
- buffer += chunk.textDelta;
6301
+ buffer += chunk.text;
4682
6302
  let match;
4683
6303
  while ((match = detectChunk(buffer)) != null) {
4684
- controller.enqueue({ type: "text-delta", textDelta: match });
6304
+ controller.enqueue({ type: "text", text: match });
4685
6305
  buffer = buffer.slice(match.length);
4686
6306
  await delay2(delayInMs);
4687
6307
  }
@@ -4691,15 +6311,35 @@ function smoothStream({
4691
6311
  }
4692
6312
 
4693
6313
  // core/generate-text/stream-text.ts
4694
- import { AISDKError as AISDKError17 } from "@ai-sdk/provider";
6314
+ import {
6315
+ AISDKError as AISDKError18
6316
+ } from "@ai-sdk/provider";
4695
6317
  import { createIdGenerator as createIdGenerator4 } from "@ai-sdk/provider-utils";
4696
- import { formatDataStreamPart as formatDataStreamPart2 } from "@ai-sdk/ui-utils";
4697
6318
 
4698
6319
  // util/as-array.ts
4699
6320
  function asArray(value) {
4700
6321
  return value === void 0 ? [] : Array.isArray(value) ? value : [value];
4701
6322
  }
4702
6323
 
6324
+ // util/consume-stream.ts
6325
+ async function consumeStream({
6326
+ stream,
6327
+ onError
6328
+ }) {
6329
+ const reader = stream.getReader();
6330
+ try {
6331
+ while (true) {
6332
+ const { done } = await reader.read();
6333
+ if (done)
6334
+ break;
6335
+ }
6336
+ } catch (error) {
6337
+ onError == null ? void 0 : onError(error);
6338
+ } finally {
6339
+ reader.releaseLock();
6340
+ }
6341
+ }
6342
+
4703
6343
  // core/util/merge-streams.ts
4704
6344
  function mergeStreams(stream1, stream2) {
4705
6345
  const reader1 = stream1.getReader();
@@ -4789,7 +6429,6 @@ function mergeStreams(stream1, stream2) {
4789
6429
  }
4790
6430
 
4791
6431
  // core/generate-text/run-tools-transformation.ts
4792
- import { generateId } from "@ai-sdk/ui-utils";
4793
6432
  function runToolsTransformation({
4794
6433
  tools,
4795
6434
  generatorStream,
@@ -4823,10 +6462,9 @@ function runToolsTransformation({
4823
6462
  async transform(chunk, controller) {
4824
6463
  const chunkType = chunk.type;
4825
6464
  switch (chunkType) {
4826
- case "text-delta":
6465
+ case "stream-start":
6466
+ case "text":
4827
6467
  case "reasoning":
4828
- case "reasoning-signature":
4829
- case "redacted-reasoning":
4830
6468
  case "source":
4831
6469
  case "response-metadata":
4832
6470
  case "error": {
@@ -4834,12 +6472,13 @@ function runToolsTransformation({
4834
6472
  break;
4835
6473
  }
4836
6474
  case "file": {
4837
- controller.enqueue(
4838
- new DefaultGeneratedFileWithType({
6475
+ controller.enqueue({
6476
+ type: "file",
6477
+ file: new DefaultGeneratedFileWithType({
4839
6478
  data: chunk.data,
4840
- mimeType: chunk.mimeType
6479
+ mediaType: chunk.mediaType
4841
6480
  })
4842
- );
6481
+ });
4843
6482
  break;
4844
6483
  }
4845
6484
  case "tool-call-delta": {
@@ -4948,8 +6587,8 @@ function runToolsTransformation({
4948
6587
  type: "finish",
4949
6588
  finishReason: chunk.finishReason,
4950
6589
  logprobs: chunk.logprobs,
4951
- usage: calculateLanguageModelUsage(chunk.usage),
4952
- experimental_providerMetadata: chunk.providerMetadata
6590
+ usage: calculateLanguageModelUsage2(chunk.usage),
6591
+ providerMetadata: chunk.providerMetadata
4953
6592
  };
4954
6593
  break;
4955
6594
  }
@@ -5015,8 +6654,7 @@ function streamText({
5015
6654
  experimental_output: output,
5016
6655
  experimental_continueSteps: continueSteps = false,
5017
6656
  experimental_telemetry: telemetry,
5018
- experimental_providerMetadata,
5019
- providerOptions = experimental_providerMetadata,
6657
+ providerOptions,
5020
6658
  experimental_toolCallStreaming = false,
5021
6659
  toolCallStreaming = experimental_toolCallStreaming,
5022
6660
  experimental_activeTools: activeTools,
@@ -5079,7 +6717,7 @@ function createOutputTransformStream(output) {
5079
6717
  partialOutput = void 0
5080
6718
  }) {
5081
6719
  controller.enqueue({
5082
- part: { type: "text-delta", textDelta: textChunk },
6720
+ part: { type: "text", text: textChunk },
5083
6721
  partialOutput
5084
6722
  });
5085
6723
  textChunk = "";
@@ -5089,12 +6727,12 @@ function createOutputTransformStream(output) {
5089
6727
  if (chunk.type === "step-finish") {
5090
6728
  publishTextChunk({ controller });
5091
6729
  }
5092
- if (chunk.type !== "text-delta") {
6730
+ if (chunk.type !== "text") {
5093
6731
  controller.enqueue({ part: chunk, partialOutput: void 0 });
5094
6732
  return;
5095
6733
  }
5096
- text2 += chunk.textDelta;
5097
- textChunk += chunk.textDelta;
6734
+ text2 += chunk.text;
6735
+ textChunk += chunk.text;
5098
6736
  const result = output.parsePartial({ text: text2 });
5099
6737
  if (result != null) {
5100
6738
  const currentJson = JSON.stringify(result.partial);
@@ -5189,44 +6827,44 @@ var DefaultStreamTextResult = class {
5189
6827
  async transform(chunk, controller) {
5190
6828
  controller.enqueue(chunk);
5191
6829
  const { part } = chunk;
5192
- if (part.type === "text-delta" || part.type === "reasoning" || part.type === "source" || part.type === "tool-call" || part.type === "tool-result" || part.type === "tool-call-streaming-start" || part.type === "tool-call-delta") {
6830
+ if (part.type === "text" || part.type === "reasoning" || part.type === "source" || part.type === "tool-call" || part.type === "tool-result" || part.type === "tool-call-streaming-start" || part.type === "tool-call-delta") {
5193
6831
  await (onChunk == null ? void 0 : onChunk({ chunk: part }));
5194
6832
  }
5195
6833
  if (part.type === "error") {
5196
6834
  await (onError == null ? void 0 : onError({ error: part.error }));
5197
6835
  }
5198
- if (part.type === "text-delta") {
5199
- recordedStepText += part.textDelta;
5200
- recordedContinuationText += part.textDelta;
5201
- recordedFullText += part.textDelta;
6836
+ if (part.type === "text") {
6837
+ recordedStepText += part.text;
6838
+ recordedContinuationText += part.text;
6839
+ recordedFullText += part.text;
5202
6840
  }
5203
6841
  if (part.type === "reasoning") {
5204
- if (activeReasoningText == null) {
5205
- activeReasoningText = { type: "text", text: part.textDelta };
5206
- stepReasoning.push(activeReasoningText);
5207
- } else {
5208
- activeReasoningText.text += part.textDelta;
5209
- }
5210
- }
5211
- if (part.type === "reasoning-signature") {
5212
- if (activeReasoningText == null) {
5213
- throw new AISDKError17({
5214
- name: "InvalidStreamPart",
5215
- message: "reasoning-signature without reasoning"
5216
- });
6842
+ if (part.reasoningType === "text") {
6843
+ if (activeReasoningText == null) {
6844
+ activeReasoningText = { type: "text", text: part.text };
6845
+ stepReasoning.push(activeReasoningText);
6846
+ } else {
6847
+ activeReasoningText.text += part.text;
6848
+ }
6849
+ } else if (part.reasoningType === "signature") {
6850
+ if (activeReasoningText == null) {
6851
+ throw new AISDKError18({
6852
+ name: "InvalidStreamPart",
6853
+ message: "reasoning-signature without reasoning"
6854
+ });
6855
+ }
6856
+ activeReasoningText.signature = part.signature;
6857
+ activeReasoningText = void 0;
6858
+ } else if (part.reasoningType === "redacted") {
6859
+ stepReasoning.push({ type: "redacted", data: part.data });
5217
6860
  }
5218
- activeReasoningText.signature = part.signature;
5219
- activeReasoningText = void 0;
5220
- }
5221
- if (part.type === "redacted-reasoning") {
5222
- stepReasoning.push({ type: "redacted", data: part.data });
5223
6861
  }
5224
6862
  if (part.type === "file") {
5225
- stepFiles.push(part);
6863
+ stepFiles.push(part.file);
5226
6864
  }
5227
6865
  if (part.type === "source") {
5228
- recordedSources.push(part.source);
5229
- recordedStepSources.push(part.source);
6866
+ recordedSources.push(part);
6867
+ recordedStepSources.push(part);
5230
6868
  }
5231
6869
  if (part.type === "tool-call") {
5232
6870
  recordedToolCalls.push(part);
@@ -5262,8 +6900,8 @@ var DefaultStreamTextResult = class {
5262
6900
  const currentStepResult = {
5263
6901
  stepType,
5264
6902
  text: recordedStepText,
5265
- reasoning: asReasoningText(stepReasoning),
5266
- reasoningDetails: stepReasoning,
6903
+ reasoningText: asReasoningText(stepReasoning),
6904
+ reasoning: stepReasoning,
5267
6905
  files: stepFiles,
5268
6906
  sources: recordedStepSources,
5269
6907
  toolCalls: recordedToolCalls,
@@ -5277,8 +6915,7 @@ var DefaultStreamTextResult = class {
5277
6915
  ...part.response,
5278
6916
  messages: [...recordedResponse.messages, ...stepMessages]
5279
6917
  },
5280
- providerMetadata: part.experimental_providerMetadata,
5281
- experimental_providerMetadata: part.experimental_providerMetadata,
6918
+ providerMetadata: part.providerMetadata,
5282
6919
  isContinued: part.isContinued
5283
6920
  };
5284
6921
  await (onStepFinish == null ? void 0 : onStepFinish(currentStepResult));
@@ -5319,11 +6956,9 @@ var DefaultStreamTextResult = class {
5319
6956
  self.responsePromise.resolve(lastStep.response);
5320
6957
  self.toolCallsPromise.resolve(lastStep.toolCalls);
5321
6958
  self.toolResultsPromise.resolve(lastStep.toolResults);
5322
- self.providerMetadataPromise.resolve(
5323
- lastStep.experimental_providerMetadata
5324
- );
5325
- self.reasoningPromise.resolve(lastStep.reasoning);
5326
- self.reasoningDetailsPromise.resolve(lastStep.reasoningDetails);
6959
+ self.providerMetadataPromise.resolve(lastStep.providerMetadata);
6960
+ self.reasoningPromise.resolve(lastStep.reasoningText);
6961
+ self.reasoningDetailsPromise.resolve(lastStep.reasoning);
5327
6962
  const finishReason = recordedFinishReason != null ? recordedFinishReason : "unknown";
5328
6963
  const usage = recordedUsage != null ? recordedUsage : {
5329
6964
  completionTokens: NaN,
@@ -5341,8 +6976,8 @@ var DefaultStreamTextResult = class {
5341
6976
  logprobs: void 0,
5342
6977
  usage,
5343
6978
  text: recordedFullText,
6979
+ reasoningText: lastStep.reasoningText,
5344
6980
  reasoning: lastStep.reasoning,
5345
- reasoningDetails: lastStep.reasoningDetails,
5346
6981
  files: lastStep.files,
5347
6982
  sources: lastStep.sources,
5348
6983
  toolCalls: lastStep.toolCalls,
@@ -5351,7 +6986,6 @@ var DefaultStreamTextResult = class {
5351
6986
  response: lastStep.response,
5352
6987
  warnings: lastStep.warnings,
5353
6988
  providerMetadata: lastStep.providerMetadata,
5354
- experimental_providerMetadata: lastStep.experimental_providerMetadata,
5355
6989
  steps: recordedSteps
5356
6990
  }));
5357
6991
  rootSpan.setAttributes(
@@ -5455,12 +7089,11 @@ var DefaultStreamTextResult = class {
5455
7089
  modelSupportsUrl: (_a18 = model.supportsUrl) == null ? void 0 : _a18.bind(model)
5456
7090
  // support 'this' context
5457
7091
  });
5458
- const mode = {
5459
- type: "regular",
7092
+ const toolsAndToolChoice = {
5460
7093
  ...prepareToolsAndToolChoice({ tools, toolChoice, activeTools })
5461
7094
  };
5462
7095
  const {
5463
- result: { stream: stream2, warnings, rawResponse, request },
7096
+ result: { stream: stream2, response, request },
5464
7097
  doStreamSpan,
5465
7098
  startTimestampMs
5466
7099
  } = await retry(
@@ -5484,17 +7117,19 @@ var DefaultStreamTextResult = class {
5484
7117
  // convert the language model level tools:
5485
7118
  input: () => {
5486
7119
  var _a19;
5487
- return (_a19 = mode.tools) == null ? void 0 : _a19.map((tool2) => JSON.stringify(tool2));
7120
+ return (_a19 = toolsAndToolChoice.tools) == null ? void 0 : _a19.map(
7121
+ (tool2) => JSON.stringify(tool2)
7122
+ );
5488
7123
  }
5489
7124
  },
5490
7125
  "ai.prompt.toolChoice": {
5491
- input: () => mode.toolChoice != null ? JSON.stringify(mode.toolChoice) : void 0
7126
+ input: () => toolsAndToolChoice.toolChoice != null ? JSON.stringify(toolsAndToolChoice.toolChoice) : void 0
5492
7127
  },
5493
7128
  // standardized gen-ai llm span attributes:
5494
7129
  "gen_ai.system": model.provider,
5495
7130
  "gen_ai.request.model": model.modelId,
5496
7131
  "gen_ai.request.frequency_penalty": settings.frequencyPenalty,
5497
- "gen_ai.request.max_tokens": settings.maxTokens,
7132
+ "gen_ai.request.max_tokens": settings.maxOutputTokens,
5498
7133
  "gen_ai.request.presence_penalty": settings.presencePenalty,
5499
7134
  "gen_ai.request.stop_sequences": settings.stopSequences,
5500
7135
  "gen_ai.request.temperature": settings.temperature,
@@ -5509,12 +7144,12 @@ var DefaultStreamTextResult = class {
5509
7144
  // get before the call
5510
7145
  doStreamSpan: doStreamSpan2,
5511
7146
  result: await model.doStream({
5512
- mode,
5513
7147
  ...prepareCallSettings(settings),
7148
+ ...toolsAndToolChoice,
5514
7149
  inputFormat: promptFormat,
5515
7150
  responseFormat: output == null ? void 0 : output.responseFormat({ model }),
5516
7151
  prompt: promptMessages,
5517
- providerMetadata: providerOptions,
7152
+ providerOptions,
5518
7153
  abortSignal,
5519
7154
  headers
5520
7155
  })
@@ -5535,6 +7170,7 @@ var DefaultStreamTextResult = class {
5535
7170
  const stepRequest = request != null ? request : {};
5536
7171
  const stepToolCalls = [];
5537
7172
  const stepToolResults = [];
7173
+ let warnings;
5538
7174
  const stepReasoning2 = [];
5539
7175
  const stepFiles2 = [];
5540
7176
  let activeReasoningText2 = void 0;
@@ -5563,16 +7199,20 @@ var DefaultStreamTextResult = class {
5563
7199
  chunk
5564
7200
  }) {
5565
7201
  controller.enqueue(chunk);
5566
- stepText += chunk.textDelta;
5567
- fullStepText += chunk.textDelta;
7202
+ stepText += chunk.text;
7203
+ fullStepText += chunk.text;
5568
7204
  chunkTextPublished = true;
5569
- hasWhitespaceSuffix = chunk.textDelta.trimEnd() !== chunk.textDelta;
7205
+ hasWhitespaceSuffix = chunk.text.trimEnd() !== chunk.text;
5570
7206
  }
5571
7207
  self.addStream(
5572
7208
  transformedStream.pipeThrough(
5573
7209
  new TransformStream({
5574
7210
  async transform(chunk, controller) {
5575
7211
  var _a19, _b, _c;
7212
+ if (chunk.type === "stream-start") {
7213
+ warnings = chunk.warnings;
7214
+ return;
7215
+ }
5576
7216
  if (stepFirstChunk) {
5577
7217
  const msToFirstChunk = now2() - startTimestampMs;
5578
7218
  stepFirstChunk = false;
@@ -5589,14 +7229,14 @@ var DefaultStreamTextResult = class {
5589
7229
  warnings: warnings != null ? warnings : []
5590
7230
  });
5591
7231
  }
5592
- if (chunk.type === "text-delta" && chunk.textDelta.length === 0) {
7232
+ if (chunk.type === "text" && chunk.text.length === 0) {
5593
7233
  return;
5594
7234
  }
5595
7235
  const chunkType = chunk.type;
5596
7236
  switch (chunkType) {
5597
- case "text-delta": {
7237
+ case "text": {
5598
7238
  if (continueSteps) {
5599
- const trimmedChunkText = inWhitespacePrefix && hasLeadingWhitespace ? chunk.textDelta.trimStart() : chunk.textDelta;
7239
+ const trimmedChunkText = inWhitespacePrefix && hasLeadingWhitespace ? chunk.text.trimStart() : chunk.text;
5600
7240
  if (trimmedChunkText.length === 0) {
5601
7241
  break;
5602
7242
  }
@@ -5608,8 +7248,8 @@ var DefaultStreamTextResult = class {
5608
7248
  await publishTextChunk({
5609
7249
  controller,
5610
7250
  chunk: {
5611
- type: "text-delta",
5612
- textDelta: split.prefix + split.whitespace
7251
+ type: "text",
7252
+ text: split.prefix + split.whitespace
5613
7253
  }
5614
7254
  });
5615
7255
  }
@@ -5620,35 +7260,31 @@ var DefaultStreamTextResult = class {
5620
7260
  }
5621
7261
  case "reasoning": {
5622
7262
  controller.enqueue(chunk);
5623
- if (activeReasoningText2 == null) {
5624
- activeReasoningText2 = {
5625
- type: "text",
5626
- text: chunk.textDelta
5627
- };
5628
- stepReasoning2.push(activeReasoningText2);
5629
- } else {
5630
- activeReasoningText2.text += chunk.textDelta;
5631
- }
5632
- break;
5633
- }
5634
- case "reasoning-signature": {
5635
- controller.enqueue(chunk);
5636
- if (activeReasoningText2 == null) {
5637
- throw new InvalidStreamPartError({
5638
- chunk,
5639
- message: "reasoning-signature without reasoning"
7263
+ if (chunk.reasoningType === "text") {
7264
+ if (activeReasoningText2 == null) {
7265
+ activeReasoningText2 = {
7266
+ type: "text",
7267
+ text: chunk.text
7268
+ };
7269
+ stepReasoning2.push(activeReasoningText2);
7270
+ } else {
7271
+ activeReasoningText2.text += chunk.text;
7272
+ }
7273
+ } else if (chunk.reasoningType === "signature") {
7274
+ if (activeReasoningText2 == null) {
7275
+ throw new InvalidStreamPartError({
7276
+ chunk,
7277
+ message: "reasoning-signature without reasoning"
7278
+ });
7279
+ }
7280
+ activeReasoningText2.signature = chunk.signature;
7281
+ activeReasoningText2 = void 0;
7282
+ } else if (chunk.reasoningType === "redacted") {
7283
+ stepReasoning2.push({
7284
+ type: "redacted",
7285
+ data: chunk.data
5640
7286
  });
5641
7287
  }
5642
- activeReasoningText2.signature = chunk.signature;
5643
- activeReasoningText2 = void 0;
5644
- break;
5645
- }
5646
- case "redacted-reasoning": {
5647
- controller.enqueue(chunk);
5648
- stepReasoning2.push({
5649
- type: "redacted",
5650
- data: chunk.data
5651
- });
5652
7288
  break;
5653
7289
  }
5654
7290
  case "tool-call": {
@@ -5672,7 +7308,7 @@ var DefaultStreamTextResult = class {
5672
7308
  case "finish": {
5673
7309
  stepUsage = chunk.usage;
5674
7310
  stepFinishReason = chunk.finishReason;
5675
- stepProviderMetadata = chunk.experimental_providerMetadata;
7311
+ stepProviderMetadata = chunk.providerMetadata;
5676
7312
  stepLogProbs = chunk.logprobs;
5677
7313
  const msToFinish = now2() - startTimestampMs;
5678
7314
  doStreamSpan.addEvent("ai.stream.finish");
@@ -5683,7 +7319,7 @@ var DefaultStreamTextResult = class {
5683
7319
  break;
5684
7320
  }
5685
7321
  case "file": {
5686
- stepFiles2.push(chunk);
7322
+ stepFiles2.push(chunk.file);
5687
7323
  controller.enqueue(chunk);
5688
7324
  break;
5689
7325
  }
@@ -5724,10 +7360,7 @@ var DefaultStreamTextResult = class {
5724
7360
  stepType2 === "continue" && !chunkTextPublished)) {
5725
7361
  await publishTextChunk({
5726
7362
  controller,
5727
- chunk: {
5728
- type: "text-delta",
5729
- textDelta: chunkBuffer
5730
- }
7363
+ chunk: { type: "text", text: chunkBuffer }
5731
7364
  });
5732
7365
  chunkBuffer = "";
5733
7366
  }
@@ -5764,12 +7397,11 @@ var DefaultStreamTextResult = class {
5764
7397
  finishReason: stepFinishReason,
5765
7398
  usage: stepUsage,
5766
7399
  providerMetadata: stepProviderMetadata,
5767
- experimental_providerMetadata: stepProviderMetadata,
5768
7400
  logprobs: stepLogProbs,
5769
7401
  request: stepRequest,
5770
7402
  response: {
5771
7403
  ...stepResponse,
5772
- headers: rawResponse == null ? void 0 : rawResponse.headers
7404
+ headers: response == null ? void 0 : response.headers
5773
7405
  },
5774
7406
  warnings,
5775
7407
  isContinued: nextStepType === "continue",
@@ -5782,11 +7414,10 @@ var DefaultStreamTextResult = class {
5782
7414
  finishReason: stepFinishReason,
5783
7415
  usage: combinedUsage,
5784
7416
  providerMetadata: stepProviderMetadata,
5785
- experimental_providerMetadata: stepProviderMetadata,
5786
7417
  logprobs: stepLogProbs,
5787
7418
  response: {
5788
7419
  ...stepResponse,
5789
- headers: rawResponse == null ? void 0 : rawResponse.headers
7420
+ headers: response == null ? void 0 : response.headers
5790
7421
  }
5791
7422
  });
5792
7423
  self.closeStream();
@@ -5868,19 +7499,16 @@ var DefaultStreamTextResult = class {
5868
7499
  get finishReason() {
5869
7500
  return this.finishReasonPromise.value;
5870
7501
  }
5871
- get experimental_providerMetadata() {
5872
- return this.providerMetadataPromise.value;
5873
- }
5874
7502
  get providerMetadata() {
5875
7503
  return this.providerMetadataPromise.value;
5876
7504
  }
5877
7505
  get text() {
5878
7506
  return this.textPromise.value;
5879
7507
  }
5880
- get reasoning() {
7508
+ get reasoningText() {
5881
7509
  return this.reasoningPromise.value;
5882
7510
  }
5883
- get reasoningDetails() {
7511
+ get reasoning() {
5884
7512
  return this.reasoningDetailsPromise.value;
5885
7513
  }
5886
7514
  get sources() {
@@ -5922,8 +7550,8 @@ var DefaultStreamTextResult = class {
5922
7550
  this.teeStream().pipeThrough(
5923
7551
  new TransformStream({
5924
7552
  transform({ part }, controller) {
5925
- if (part.type === "text-delta") {
5926
- controller.enqueue(part.textDelta);
7553
+ if (part.type === "text") {
7554
+ controller.enqueue(part.text);
5927
7555
  }
5928
7556
  }
5929
7557
  })
@@ -5941,9 +7569,15 @@ var DefaultStreamTextResult = class {
5941
7569
  )
5942
7570
  );
5943
7571
  }
5944
- async consumeStream() {
5945
- const stream = this.fullStream;
5946
- for await (const part of stream) {
7572
+ async consumeStream(options) {
7573
+ var _a17;
7574
+ try {
7575
+ await consumeStream({
7576
+ stream: this.fullStream,
7577
+ onError: options == null ? void 0 : options.onError
7578
+ });
7579
+ } catch (error) {
7580
+ (_a17 = options == null ? void 0 : options.onError) == null ? void 0 : _a17.call(options, error);
5947
7581
  }
5948
7582
  }
5949
7583
  get experimental_partialOutputStream() {
@@ -5975,58 +7609,51 @@ var DefaultStreamTextResult = class {
5975
7609
  transform: async (chunk, controller) => {
5976
7610
  const chunkType = chunk.type;
5977
7611
  switch (chunkType) {
5978
- case "text-delta": {
5979
- controller.enqueue(formatDataStreamPart2("text", chunk.textDelta));
7612
+ case "text": {
7613
+ controller.enqueue(formatDataStreamPart("text", chunk.text));
5980
7614
  break;
5981
7615
  }
5982
7616
  case "reasoning": {
5983
7617
  if (sendReasoning) {
5984
- controller.enqueue(
5985
- formatDataStreamPart2("reasoning", chunk.textDelta)
5986
- );
5987
- }
5988
- break;
5989
- }
5990
- case "redacted-reasoning": {
5991
- if (sendReasoning) {
5992
- controller.enqueue(
5993
- formatDataStreamPart2("redacted_reasoning", {
5994
- data: chunk.data
5995
- })
5996
- );
5997
- }
5998
- break;
5999
- }
6000
- case "reasoning-signature": {
6001
- if (sendReasoning) {
6002
- controller.enqueue(
6003
- formatDataStreamPart2("reasoning_signature", {
6004
- signature: chunk.signature
6005
- })
6006
- );
7618
+ if (chunk.reasoningType === "text") {
7619
+ controller.enqueue(
7620
+ formatDataStreamPart("reasoning", chunk.text)
7621
+ );
7622
+ } else if (chunk.reasoningType === "signature") {
7623
+ controller.enqueue(
7624
+ formatDataStreamPart("reasoning_signature", {
7625
+ signature: chunk.signature
7626
+ })
7627
+ );
7628
+ } else if (chunk.reasoningType === "redacted") {
7629
+ controller.enqueue(
7630
+ formatDataStreamPart("redacted_reasoning", {
7631
+ data: chunk.data
7632
+ })
7633
+ );
7634
+ }
6007
7635
  }
6008
7636
  break;
6009
7637
  }
6010
7638
  case "file": {
6011
7639
  controller.enqueue(
6012
- formatDataStreamPart2("file", {
6013
- mimeType: chunk.mimeType,
6014
- data: chunk.base64
7640
+ // TODO update protocol to v2 or replace with event stream
7641
+ formatDataStreamPart("file", {
7642
+ mimeType: chunk.file.mediaType,
7643
+ data: chunk.file.base64
6015
7644
  })
6016
7645
  );
6017
7646
  break;
6018
7647
  }
6019
7648
  case "source": {
6020
7649
  if (sendSources) {
6021
- controller.enqueue(
6022
- formatDataStreamPart2("source", chunk.source)
6023
- );
7650
+ controller.enqueue(formatDataStreamPart("source", chunk));
6024
7651
  }
6025
7652
  break;
6026
7653
  }
6027
7654
  case "tool-call-streaming-start": {
6028
7655
  controller.enqueue(
6029
- formatDataStreamPart2("tool_call_streaming_start", {
7656
+ formatDataStreamPart("tool_call_streaming_start", {
6030
7657
  toolCallId: chunk.toolCallId,
6031
7658
  toolName: chunk.toolName
6032
7659
  })
@@ -6035,7 +7662,7 @@ var DefaultStreamTextResult = class {
6035
7662
  }
6036
7663
  case "tool-call-delta": {
6037
7664
  controller.enqueue(
6038
- formatDataStreamPart2("tool_call_delta", {
7665
+ formatDataStreamPart("tool_call_delta", {
6039
7666
  toolCallId: chunk.toolCallId,
6040
7667
  argsTextDelta: chunk.argsTextDelta
6041
7668
  })
@@ -6044,7 +7671,7 @@ var DefaultStreamTextResult = class {
6044
7671
  }
6045
7672
  case "tool-call": {
6046
7673
  controller.enqueue(
6047
- formatDataStreamPart2("tool_call", {
7674
+ formatDataStreamPart("tool_call", {
6048
7675
  toolCallId: chunk.toolCallId,
6049
7676
  toolName: chunk.toolName,
6050
7677
  args: chunk.args
@@ -6054,7 +7681,7 @@ var DefaultStreamTextResult = class {
6054
7681
  }
6055
7682
  case "tool-result": {
6056
7683
  controller.enqueue(
6057
- formatDataStreamPart2("tool_result", {
7684
+ formatDataStreamPart("tool_result", {
6058
7685
  toolCallId: chunk.toolCallId,
6059
7686
  result: chunk.result
6060
7687
  })
@@ -6063,13 +7690,13 @@ var DefaultStreamTextResult = class {
6063
7690
  }
6064
7691
  case "error": {
6065
7692
  controller.enqueue(
6066
- formatDataStreamPart2("error", getErrorMessage5(chunk.error))
7693
+ formatDataStreamPart("error", getErrorMessage5(chunk.error))
6067
7694
  );
6068
7695
  break;
6069
7696
  }
6070
7697
  case "step-start": {
6071
7698
  controller.enqueue(
6072
- formatDataStreamPart2("start_step", {
7699
+ formatDataStreamPart("start_step", {
6073
7700
  messageId: chunk.messageId
6074
7701
  })
6075
7702
  );
@@ -6077,7 +7704,7 @@ var DefaultStreamTextResult = class {
6077
7704
  }
6078
7705
  case "step-finish": {
6079
7706
  controller.enqueue(
6080
- formatDataStreamPart2("finish_step", {
7707
+ formatDataStreamPart("finish_step", {
6081
7708
  finishReason: chunk.finishReason,
6082
7709
  usage: sendUsage ? {
6083
7710
  promptTokens: chunk.usage.promptTokens,
@@ -6091,7 +7718,7 @@ var DefaultStreamTextResult = class {
6091
7718
  case "finish": {
6092
7719
  if (experimental_sendFinish) {
6093
7720
  controller.enqueue(
6094
- formatDataStreamPart2("finish_message", {
7721
+ formatDataStreamPart("finish_message", {
6095
7722
  finishReason: chunk.finishReason,
6096
7723
  usage: sendUsage ? {
6097
7724
  promptTokens: chunk.usage.promptTokens,
@@ -6214,6 +7841,160 @@ var DefaultStreamTextResult = class {
6214
7841
  }
6215
7842
  };
6216
7843
 
7844
+ // errors/no-speech-generated-error.ts
7845
+ import { AISDKError as AISDKError19 } from "@ai-sdk/provider";
7846
+ var NoSpeechGeneratedError = class extends AISDKError19 {
7847
+ constructor(options) {
7848
+ super({
7849
+ name: "AI_NoSpeechGeneratedError",
7850
+ message: "No speech audio generated."
7851
+ });
7852
+ this.responses = options.responses;
7853
+ }
7854
+ };
7855
+
7856
+ // core/generate-speech/generated-audio-file.ts
7857
+ var DefaultGeneratedAudioFile = class extends DefaultGeneratedFile {
7858
+ constructor({
7859
+ data,
7860
+ mediaType
7861
+ }) {
7862
+ super({ data, mediaType });
7863
+ let format = "mp3";
7864
+ if (mediaType) {
7865
+ const mimeTypeParts = mediaType.split("/");
7866
+ if (mimeTypeParts.length === 2) {
7867
+ if (mediaType !== "audio/mpeg") {
7868
+ format = mimeTypeParts[1];
7869
+ }
7870
+ }
7871
+ }
7872
+ if (!format) {
7873
+ throw new Error(
7874
+ "Audio format must be provided or determinable from mimeType"
7875
+ );
7876
+ }
7877
+ this.format = format;
7878
+ }
7879
+ };
7880
+
7881
+ // core/generate-speech/generate-speech.ts
7882
+ async function generateSpeech({
7883
+ model,
7884
+ text: text2,
7885
+ voice,
7886
+ outputFormat,
7887
+ instructions,
7888
+ speed,
7889
+ providerOptions = {},
7890
+ maxRetries: maxRetriesArg,
7891
+ abortSignal,
7892
+ headers
7893
+ }) {
7894
+ var _a17;
7895
+ const { retry } = prepareRetries({ maxRetries: maxRetriesArg });
7896
+ const result = await retry(
7897
+ () => model.doGenerate({
7898
+ text: text2,
7899
+ voice,
7900
+ outputFormat,
7901
+ instructions,
7902
+ speed,
7903
+ abortSignal,
7904
+ headers,
7905
+ providerOptions
7906
+ })
7907
+ );
7908
+ if (!result.audio || result.audio.length === 0) {
7909
+ throw new NoSpeechGeneratedError({ responses: [result.response] });
7910
+ }
7911
+ return new DefaultSpeechResult({
7912
+ audio: new DefaultGeneratedAudioFile({
7913
+ data: result.audio,
7914
+ mediaType: (_a17 = detectMediaType({
7915
+ data: result.audio,
7916
+ signatures: audioMediaTypeSignatures
7917
+ })) != null ? _a17 : "audio/mp3"
7918
+ }),
7919
+ warnings: result.warnings,
7920
+ responses: [result.response],
7921
+ providerMetadata: result.providerMetadata
7922
+ });
7923
+ }
7924
+ var DefaultSpeechResult = class {
7925
+ constructor(options) {
7926
+ var _a17;
7927
+ this.audio = options.audio;
7928
+ this.warnings = options.warnings;
7929
+ this.responses = options.responses;
7930
+ this.providerMetadata = (_a17 = options.providerMetadata) != null ? _a17 : {};
7931
+ }
7932
+ };
7933
+
7934
+ // errors/no-transcript-generated-error.ts
7935
+ import { AISDKError as AISDKError20 } from "@ai-sdk/provider";
7936
+ var NoTranscriptGeneratedError = class extends AISDKError20 {
7937
+ constructor(options) {
7938
+ super({
7939
+ name: "AI_NoTranscriptGeneratedError",
7940
+ message: "No transcript generated."
7941
+ });
7942
+ this.responses = options.responses;
7943
+ }
7944
+ };
7945
+
7946
+ // core/transcribe/transcribe.ts
7947
+ async function transcribe({
7948
+ model,
7949
+ audio,
7950
+ providerOptions = {},
7951
+ maxRetries: maxRetriesArg,
7952
+ abortSignal,
7953
+ headers
7954
+ }) {
7955
+ const { retry } = prepareRetries({ maxRetries: maxRetriesArg });
7956
+ const audioData = audio instanceof URL ? (await download({ url: audio })).data : convertDataContentToUint8Array(audio);
7957
+ const result = await retry(
7958
+ () => {
7959
+ var _a17;
7960
+ return model.doGenerate({
7961
+ audio: audioData,
7962
+ abortSignal,
7963
+ headers,
7964
+ providerOptions,
7965
+ mediaType: (_a17 = detectMediaType({
7966
+ data: audioData,
7967
+ signatures: audioMediaTypeSignatures
7968
+ })) != null ? _a17 : "audio/wav"
7969
+ });
7970
+ }
7971
+ );
7972
+ if (!result.text) {
7973
+ throw new NoTranscriptGeneratedError({ responses: [result.response] });
7974
+ }
7975
+ return new DefaultTranscriptionResult({
7976
+ text: result.text,
7977
+ segments: result.segments,
7978
+ language: result.language,
7979
+ durationInSeconds: result.durationInSeconds,
7980
+ warnings: result.warnings,
7981
+ responses: [result.response],
7982
+ providerMetadata: result.providerMetadata
7983
+ });
7984
+ }
7985
+ var DefaultTranscriptionResult = class {
7986
+ constructor(options) {
7987
+ var _a17;
7988
+ this.text = options.text;
7989
+ this.segments = options.segments;
7990
+ this.language = options.language;
7991
+ this.durationInSeconds = options.durationInSeconds;
7992
+ this.warnings = options.warnings;
7993
+ this.responses = options.responses;
7994
+ this.providerMetadata = (_a17 = options.providerMetadata) != null ? _a17 : {};
7995
+ }
7996
+ };
7997
+
6217
7998
  // core/util/merge-objects.ts
6218
7999
  function mergeObjects(target, source) {
6219
8000
  if (target === void 0 && source === void 0) {
@@ -6258,9 +8039,9 @@ function defaultSettingsMiddleware({
6258
8039
  return {
6259
8040
  ...settings,
6260
8041
  ...params,
6261
- providerMetadata: mergeObjects(
6262
- settings.providerMetadata,
6263
- params.providerMetadata
8042
+ providerOptions: mergeObjects(
8043
+ settings.providerOptions,
8044
+ params.providerOptions
6264
8045
  ),
6265
8046
  // special case for temperature 0
6266
8047
  // TODO remove when temperature defaults to undefined
@@ -6299,27 +8080,41 @@ function extractReasoningMiddleware({
6299
8080
  return {
6300
8081
  middlewareVersion: "v2",
6301
8082
  wrapGenerate: async ({ doGenerate }) => {
6302
- const { text: rawText, ...rest } = await doGenerate();
6303
- if (rawText == null) {
6304
- return { text: rawText, ...rest };
6305
- }
6306
- const text2 = startWithReasoning ? openingTag + rawText : rawText;
6307
- const regexp = new RegExp(`${openingTag}(.*?)${closingTag}`, "gs");
6308
- const matches = Array.from(text2.matchAll(regexp));
6309
- if (!matches.length) {
6310
- return { text: text2, ...rest };
6311
- }
6312
- const reasoning = matches.map((match) => match[1]).join(separator);
6313
- let textWithoutReasoning = text2;
6314
- for (let i = matches.length - 1; i >= 0; i--) {
6315
- const match = matches[i];
6316
- const beforeMatch = textWithoutReasoning.slice(0, match.index);
6317
- const afterMatch = textWithoutReasoning.slice(
6318
- match.index + match[0].length
6319
- );
6320
- textWithoutReasoning = beforeMatch + (beforeMatch.length > 0 && afterMatch.length > 0 ? separator : "") + afterMatch;
8083
+ const { content, ...rest } = await doGenerate();
8084
+ const transformedContent = [];
8085
+ for (const part of content) {
8086
+ if (part.type !== "text") {
8087
+ transformedContent.push(part);
8088
+ continue;
8089
+ }
8090
+ const text2 = startWithReasoning ? openingTag + part.text : part.text;
8091
+ const regexp = new RegExp(`${openingTag}(.*?)${closingTag}`, "gs");
8092
+ const matches = Array.from(text2.matchAll(regexp));
8093
+ if (!matches.length) {
8094
+ transformedContent.push(part);
8095
+ continue;
8096
+ }
8097
+ const reasoningText = matches.map((match) => match[1]).join(separator);
8098
+ let textWithoutReasoning = text2;
8099
+ for (let i = matches.length - 1; i >= 0; i--) {
8100
+ const match = matches[i];
8101
+ const beforeMatch = textWithoutReasoning.slice(0, match.index);
8102
+ const afterMatch = textWithoutReasoning.slice(
8103
+ match.index + match[0].length
8104
+ );
8105
+ textWithoutReasoning = beforeMatch + (beforeMatch.length > 0 && afterMatch.length > 0 ? separator : "") + afterMatch;
8106
+ }
8107
+ transformedContent.push({
8108
+ type: "reasoning",
8109
+ reasoningType: "text",
8110
+ text: reasoningText
8111
+ });
8112
+ transformedContent.push({
8113
+ type: "text",
8114
+ text: textWithoutReasoning
8115
+ });
6321
8116
  }
6322
- return { ...rest, text: textWithoutReasoning, reasoning };
8117
+ return { content: transformedContent, ...rest };
6323
8118
  },
6324
8119
  wrapStream: async ({ doStream }) => {
6325
8120
  const { stream, ...rest } = await doStream();
@@ -6332,18 +8127,24 @@ function extractReasoningMiddleware({
6332
8127
  stream: stream.pipeThrough(
6333
8128
  new TransformStream({
6334
8129
  transform: (chunk, controller) => {
6335
- if (chunk.type !== "text-delta") {
8130
+ if (chunk.type !== "text") {
6336
8131
  controller.enqueue(chunk);
6337
8132
  return;
6338
8133
  }
6339
- buffer += chunk.textDelta;
8134
+ buffer += chunk.text;
6340
8135
  function publish(text2) {
6341
8136
  if (text2.length > 0) {
6342
8137
  const prefix = afterSwitch && (isReasoning ? !isFirstReasoning : !isFirstText) ? separator : "";
6343
- controller.enqueue({
6344
- type: isReasoning ? "reasoning" : "text-delta",
6345
- textDelta: prefix + text2
6346
- });
8138
+ controller.enqueue(
8139
+ isReasoning ? {
8140
+ type: "reasoning",
8141
+ reasoningType: "text",
8142
+ text: prefix + text2
8143
+ } : {
8144
+ type: "text",
8145
+ text: prefix + text2
8146
+ }
8147
+ );
6347
8148
  afterSwitch = false;
6348
8149
  if (isReasoning) {
6349
8150
  isFirstReasoning = false;
@@ -6388,60 +8189,13 @@ function simulateStreamingMiddleware() {
6388
8189
  const result = await doGenerate();
6389
8190
  const simulatedStream = new ReadableStream({
6390
8191
  start(controller) {
8192
+ controller.enqueue({
8193
+ type: "stream-start",
8194
+ warnings: result.warnings
8195
+ });
6391
8196
  controller.enqueue({ type: "response-metadata", ...result.response });
6392
- if (result.reasoning) {
6393
- if (typeof result.reasoning === "string") {
6394
- controller.enqueue({
6395
- type: "reasoning",
6396
- textDelta: result.reasoning
6397
- });
6398
- } else {
6399
- for (const reasoning of result.reasoning) {
6400
- switch (reasoning.type) {
6401
- case "text": {
6402
- controller.enqueue({
6403
- type: "reasoning",
6404
- textDelta: reasoning.text
6405
- });
6406
- if (reasoning.signature != null) {
6407
- controller.enqueue({
6408
- type: "reasoning-signature",
6409
- signature: reasoning.signature
6410
- });
6411
- }
6412
- break;
6413
- }
6414
- case "redacted": {
6415
- controller.enqueue({
6416
- type: "redacted-reasoning",
6417
- data: reasoning.data
6418
- });
6419
- break;
6420
- }
6421
- }
6422
- }
6423
- }
6424
- }
6425
- if (result.text) {
6426
- controller.enqueue({
6427
- type: "text-delta",
6428
- textDelta: result.text
6429
- });
6430
- }
6431
- if (result.toolCalls) {
6432
- for (const toolCall of result.toolCalls) {
6433
- controller.enqueue({
6434
- type: "tool-call-delta",
6435
- toolCallType: "function",
6436
- toolCallId: toolCall.toolCallId,
6437
- toolName: toolCall.toolName,
6438
- argsTextDelta: toolCall.args
6439
- });
6440
- controller.enqueue({
6441
- type: "tool-call",
6442
- ...toolCall
6443
- });
6444
- }
8197
+ for (const part of result.content) {
8198
+ controller.enqueue(part);
6445
8199
  }
6446
8200
  controller.enqueue({
6447
8201
  type: "finish",
@@ -6455,9 +8209,8 @@ function simulateStreamingMiddleware() {
6455
8209
  });
6456
8210
  return {
6457
8211
  stream: simulatedStream,
6458
- rawCall: result.rawCall,
6459
- rawResponse: result.rawResponse,
6460
- warnings: result.warnings
8212
+ request: result.request,
8213
+ response: result.response
6461
8214
  };
6462
8215
  }
6463
8216
  };
@@ -6514,7 +8267,6 @@ var doWrap = ({
6514
8267
  }
6515
8268
  };
6516
8269
  };
6517
- var experimental_wrapLanguageModel = wrapLanguageModel;
6518
8270
 
6519
8271
  // core/prompt/append-client-message.ts
6520
8272
  function appendClientMessage({
@@ -6528,16 +8280,13 @@ function appendClientMessage({
6528
8280
  }
6529
8281
 
6530
8282
  // core/prompt/append-response-messages.ts
6531
- import {
6532
- extractMaxToolInvocationStep
6533
- } from "@ai-sdk/ui-utils";
6534
- import { AISDKError as AISDKError18 } from "@ai-sdk/provider";
8283
+ import { AISDKError as AISDKError21 } from "@ai-sdk/provider";
6535
8284
  function appendResponseMessages({
6536
8285
  messages,
6537
8286
  responseMessages,
6538
8287
  _internal: { currentDate = () => /* @__PURE__ */ new Date() } = {}
6539
8288
  }) {
6540
- var _a17, _b, _c, _d;
8289
+ var _a17, _b, _c, _d, _e;
6541
8290
  const clonedMessages = structuredClone(messages);
6542
8291
  for (const message of responseMessages) {
6543
8292
  const role = message.role;
@@ -6614,14 +8363,14 @@ function appendResponseMessages({
6614
8363
  break;
6615
8364
  case "file":
6616
8365
  if (part.data instanceof URL) {
6617
- throw new AISDKError18({
8366
+ throw new AISDKError21({
6618
8367
  name: "InvalidAssistantFileData",
6619
8368
  message: "File data cannot be a URL"
6620
8369
  });
6621
8370
  }
6622
8371
  parts.push({
6623
8372
  type: "file",
6624
- mimeType: part.mimeType,
8373
+ mediaType: (_a17 = part.mediaType) != null ? _a17 : part.mimeType,
6625
8374
  data: convertDataContentToBase64String(part.data)
6626
8375
  });
6627
8376
  break;
@@ -6632,12 +8381,12 @@ function appendResponseMessages({
6632
8381
  const maxStep = extractMaxToolInvocationStep(
6633
8382
  lastMessage.toolInvocations
6634
8383
  );
6635
- (_a17 = lastMessage.parts) != null ? _a17 : lastMessage.parts = [];
8384
+ (_b = lastMessage.parts) != null ? _b : lastMessage.parts = [];
6636
8385
  lastMessage.content = textContent;
6637
8386
  lastMessage.reasoning = reasoningTextContent;
6638
8387
  lastMessage.parts.push(...parts);
6639
8388
  lastMessage.toolInvocations = [
6640
- ...(_b = lastMessage.toolInvocations) != null ? _b : [],
8389
+ ...(_c = lastMessage.toolInvocations) != null ? _c : [],
6641
8390
  ...getToolInvocations2(maxStep === void 0 ? 0 : maxStep + 1)
6642
8391
  ];
6643
8392
  getToolInvocations2(maxStep === void 0 ? 0 : maxStep + 1).map((call) => ({
@@ -6667,13 +8416,13 @@ function appendResponseMessages({
6667
8416
  break;
6668
8417
  }
6669
8418
  case "tool": {
6670
- (_c = lastMessage.toolInvocations) != null ? _c : lastMessage.toolInvocations = [];
8419
+ (_d = lastMessage.toolInvocations) != null ? _d : lastMessage.toolInvocations = [];
6671
8420
  if (lastMessage.role !== "assistant") {
6672
8421
  throw new Error(
6673
8422
  `Tool result must follow an assistant message: ${lastMessage.role}`
6674
8423
  );
6675
8424
  }
6676
- (_d = lastMessage.parts) != null ? _d : lastMessage.parts = [];
8425
+ (_e = lastMessage.parts) != null ? _e : lastMessage.parts = [];
6677
8426
  for (const contentPart of message.content) {
6678
8427
  const toolCall = lastMessage.toolInvocations.find(
6679
8428
  (call) => call.toolCallId === contentPart.toolCallId
@@ -6748,7 +8497,7 @@ function customProvider({
6748
8497
  var experimental_customProvider = customProvider;
6749
8498
 
6750
8499
  // core/registry/no-such-provider-error.ts
6751
- import { AISDKError as AISDKError19, NoSuchModelError as NoSuchModelError3 } from "@ai-sdk/provider";
8500
+ import { AISDKError as AISDKError22, NoSuchModelError as NoSuchModelError3 } from "@ai-sdk/provider";
6752
8501
  var name16 = "AI_NoSuchProviderError";
6753
8502
  var marker16 = `vercel.ai.error.${name16}`;
6754
8503
  var symbol16 = Symbol.for(marker16);
@@ -6767,7 +8516,7 @@ var NoSuchProviderError = class extends NoSuchModelError3 {
6767
8516
  this.availableProviders = availableProviders;
6768
8517
  }
6769
8518
  static isInstance(error) {
6770
- return AISDKError19.hasMarker(error, marker16);
8519
+ return AISDKError22.hasMarker(error, marker16);
6771
8520
  }
6772
8521
  };
6773
8522
  _a16 = symbol16;
@@ -6818,7 +8567,7 @@ var DefaultProviderRegistry = class {
6818
8567
  message: `Invalid ${modelType} id for registry: ${id} (must be in the format "providerId${this.separator}modelId")`
6819
8568
  });
6820
8569
  }
6821
- return [id.slice(0, index), id.slice(index + 1)];
8570
+ return [id.slice(0, index), id.slice(index + this.separator.length)];
6822
8571
  }
6823
8572
  languageModel(id) {
6824
8573
  var _a17, _b;
@@ -6854,9 +8603,6 @@ var DefaultProviderRegistry = class {
6854
8603
  }
6855
8604
  };
6856
8605
 
6857
- // core/tool/mcp/mcp-client.ts
6858
- import { jsonSchema } from "@ai-sdk/ui-utils";
6859
-
6860
8606
  // core/tool/tool.ts
6861
8607
  function tool(tool2) {
6862
8608
  return tool2;
@@ -7341,6 +9087,7 @@ var MCPClient = class {
7341
9087
  async tools({
7342
9088
  schemas = "automatic"
7343
9089
  } = {}) {
9090
+ var _a17;
7344
9091
  const tools = {};
7345
9092
  try {
7346
9093
  const listToolsResult = await this.listTools();
@@ -7348,14 +9095,18 @@ var MCPClient = class {
7348
9095
  if (schemas !== "automatic" && !(name17 in schemas)) {
7349
9096
  continue;
7350
9097
  }
7351
- const parameters = schemas === "automatic" ? jsonSchema(inputSchema) : schemas[name17].parameters;
9098
+ const parameters = schemas === "automatic" ? jsonSchema({
9099
+ ...inputSchema,
9100
+ properties: (_a17 = inputSchema.properties) != null ? _a17 : {},
9101
+ additionalProperties: false
9102
+ }) : schemas[name17].parameters;
7352
9103
  const self = this;
7353
9104
  const toolWithExecute = tool({
7354
9105
  description,
7355
9106
  parameters,
7356
9107
  execute: async (args, options) => {
7357
- var _a17;
7358
- (_a17 = options == null ? void 0 : options.abortSignal) == null ? void 0 : _a17.throwIfAborted();
9108
+ var _a18;
9109
+ (_a18 = options == null ? void 0 : options.abortSignal) == null ? void 0 : _a18.throwIfAborted();
7359
9110
  return self.callTool({
7360
9111
  name: name17,
7361
9112
  args,
@@ -7408,7 +9159,7 @@ var MCPClient = class {
7408
9159
  };
7409
9160
 
7410
9161
  // core/util/cosine-similarity.ts
7411
- function cosineSimilarity(vector1, vector2, options) {
9162
+ function cosineSimilarity(vector1, vector2) {
7412
9163
  if (vector1.length !== vector2.length) {
7413
9164
  throw new InvalidArgumentError({
7414
9165
  parameter: "vector1,vector2",
@@ -7418,13 +9169,6 @@ function cosineSimilarity(vector1, vector2, options) {
7418
9169
  }
7419
9170
  const n = vector1.length;
7420
9171
  if (n === 0) {
7421
- if (options == null ? void 0 : options.throwErrorForEmptyVectors) {
7422
- throw new InvalidArgumentError({
7423
- parameter: "vector1",
7424
- value: vector1,
7425
- message: "Vectors cannot be empty"
7426
- });
7427
- }
7428
9172
  return 0;
7429
9173
  }
7430
9174
  let magnitudeSquared1 = 0;
@@ -7470,7 +9214,6 @@ __export(langchain_adapter_exports, {
7470
9214
  toDataStream: () => toDataStream,
7471
9215
  toDataStreamResponse: () => toDataStreamResponse
7472
9216
  });
7473
- import { formatDataStreamPart as formatDataStreamPart4 } from "@ai-sdk/ui-utils";
7474
9217
 
7475
9218
  // streams/stream-callbacks.ts
7476
9219
  function createCallbacksTransformer(callbacks = {}) {
@@ -7526,7 +9269,7 @@ function toDataStreamInternal(stream, callbacks) {
7526
9269
  ).pipeThrough(createCallbacksTransformer(callbacks)).pipeThrough(new TextDecoderStream()).pipeThrough(
7527
9270
  new TransformStream({
7528
9271
  transform: async (chunk, controller) => {
7529
- controller.enqueue(formatDataStreamPart4("text", chunk));
9272
+ controller.enqueue(formatDataStreamPart("text", chunk));
7530
9273
  }
7531
9274
  })
7532
9275
  );
@@ -7578,7 +9321,6 @@ __export(llamaindex_adapter_exports, {
7578
9321
  toDataStreamResponse: () => toDataStreamResponse2
7579
9322
  });
7580
9323
  import { convertAsyncIteratorToReadableStream } from "@ai-sdk/provider-utils";
7581
- import { formatDataStreamPart as formatDataStreamPart5 } from "@ai-sdk/ui-utils";
7582
9324
  function toDataStreamInternal2(stream, callbacks) {
7583
9325
  const trimStart = trimStartOfStream();
7584
9326
  return convertAsyncIteratorToReadableStream(stream[Symbol.asyncIterator]()).pipeThrough(
@@ -7590,7 +9332,7 @@ function toDataStreamInternal2(stream, callbacks) {
7590
9332
  ).pipeThrough(createCallbacksTransformer(callbacks)).pipeThrough(new TextDecoderStream()).pipeThrough(
7591
9333
  new TransformStream({
7592
9334
  transform: async (chunk, controller) => {
7593
- controller.enqueue(formatDataStreamPart5("text", chunk));
9335
+ controller.enqueue(formatDataStreamPart("text", chunk));
7594
9336
  }
7595
9337
  })
7596
9338
  );
@@ -7631,9 +9373,6 @@ function trimStartOfStream() {
7631
9373
  };
7632
9374
  }
7633
9375
 
7634
- // streams/stream-data.ts
7635
- import { formatDataStreamPart as formatDataStreamPart6 } from "@ai-sdk/ui-utils";
7636
-
7637
9376
  // util/constants.ts
7638
9377
  var HANGING_STREAM_WARNING_TIME_MS = 15 * 1e3;
7639
9378
 
@@ -7684,7 +9423,7 @@ var StreamData = class {
7684
9423
  throw new Error("Stream controller is not initialized.");
7685
9424
  }
7686
9425
  this.controller.enqueue(
7687
- this.encoder.encode(formatDataStreamPart6("data", [value]))
9426
+ this.encoder.encode(formatDataStreamPart("data", [value]))
7688
9427
  );
7689
9428
  }
7690
9429
  appendMessageAnnotation(value) {
@@ -7695,12 +9434,12 @@ var StreamData = class {
7695
9434
  throw new Error("Stream controller is not initialized.");
7696
9435
  }
7697
9436
  this.controller.enqueue(
7698
- this.encoder.encode(formatDataStreamPart6("message_annotations", [value]))
9437
+ this.encoder.encode(formatDataStreamPart("message_annotations", [value]))
7699
9438
  );
7700
9439
  }
7701
9440
  };
7702
9441
  export {
7703
- AISDKError16 as AISDKError,
9442
+ AISDKError17 as AISDKError,
7704
9443
  APICallError2 as APICallError,
7705
9444
  DownloadError,
7706
9445
  EmptyResponseBodyError,
@@ -7733,6 +9472,9 @@ export {
7733
9472
  UnsupportedFunctionalityError2 as UnsupportedFunctionalityError,
7734
9473
  appendClientMessage,
7735
9474
  appendResponseMessages,
9475
+ asSchema,
9476
+ callChatApi,
9477
+ callCompletionApi,
7736
9478
  convertToCoreMessages,
7737
9479
  coreAssistantMessageSchema,
7738
9480
  coreMessageSchema,
@@ -7752,23 +9494,34 @@ export {
7752
9494
  experimental_createProviderRegistry,
7753
9495
  experimental_customProvider,
7754
9496
  generateImage as experimental_generateImage,
7755
- experimental_wrapLanguageModel,
9497
+ generateSpeech as experimental_generateSpeech,
9498
+ transcribe as experimental_transcribe,
9499
+ extractMaxToolInvocationStep,
7756
9500
  extractReasoningMiddleware,
7757
- formatDataStreamPart3 as formatDataStreamPart,
9501
+ fillMessageParts,
9502
+ formatDataStreamPart,
7758
9503
  generateId2 as generateId,
7759
9504
  generateObject,
7760
9505
  generateText,
7761
- jsonSchema2 as jsonSchema,
9506
+ getMessageParts,
9507
+ getTextFromDataUrl,
9508
+ isAssistantMessageWithCompletedToolCalls,
9509
+ isDeepEqualData,
9510
+ jsonSchema,
7762
9511
  parseDataStreamPart,
9512
+ parsePartialJson,
7763
9513
  pipeDataStreamToResponse,
9514
+ prepareAttachmentsForRequest,
7764
9515
  processDataStream,
7765
9516
  processTextStream,
9517
+ shouldResubmitMessages,
7766
9518
  simulateReadableStream,
7767
9519
  simulateStreamingMiddleware,
7768
9520
  smoothStream,
7769
9521
  streamObject,
7770
9522
  streamText,
7771
9523
  tool,
9524
+ updateToolCallResult,
7772
9525
  wrapLanguageModel,
7773
9526
  zodSchema
7774
9527
  };