ai 3.3.19 → 3.3.21

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.mjs CHANGED
@@ -9,7 +9,7 @@ import {
9
9
  formatStreamPart,
10
10
  parseStreamPart,
11
11
  readDataStream,
12
- parseComplexResponse
12
+ processDataProtocolResponse
13
13
  } from "@ai-sdk/ui-utils";
14
14
  import { generateId as generateIdImpl } from "@ai-sdk/provider-utils";
15
15
 
@@ -2479,6 +2479,31 @@ function prepareToolsAndToolChoice({
2479
2479
  };
2480
2480
  }
2481
2481
 
2482
+ // core/generate-text/to-response-messages.ts
2483
+ function toResponseMessages({
2484
+ text = "",
2485
+ toolCalls,
2486
+ toolResults
2487
+ }) {
2488
+ const responseMessages = [];
2489
+ responseMessages.push({
2490
+ role: "assistant",
2491
+ content: [{ type: "text", text }, ...toolCalls]
2492
+ });
2493
+ if (toolResults.length > 0) {
2494
+ responseMessages.push({
2495
+ role: "tool",
2496
+ content: toolResults.map((result) => ({
2497
+ type: "tool-result",
2498
+ toolCallId: result.toolCallId,
2499
+ toolName: result.toolName,
2500
+ result: result.result
2501
+ }))
2502
+ });
2503
+ }
2504
+ return responseMessages;
2505
+ }
2506
+
2482
2507
  // core/generate-text/tool-call.ts
2483
2508
  import { safeParseJSON as safeParseJSON2 } from "@ai-sdk/provider-utils";
2484
2509
  import { asSchema as asSchema3 } from "@ai-sdk/ui-utils";
@@ -2647,7 +2672,7 @@ async function generateText({
2647
2672
  }),
2648
2673
  tracer,
2649
2674
  fn: async (span) => {
2650
- var _a12, _b, _c, _d;
2675
+ var _a12, _b, _c;
2651
2676
  const retry = retryWithExponentialBackoff({ maxRetries });
2652
2677
  const validatedPrompt = validatePrompt({
2653
2678
  system,
@@ -2758,7 +2783,7 @@ async function generateText({
2758
2783
  logprobs: currentModelResponse.logprobs
2759
2784
  });
2760
2785
  const newResponseMessages = toResponseMessages({
2761
- text: (_c = currentModelResponse.text) != null ? _c : "",
2786
+ text: currentModelResponse.text,
2762
2787
  toolCalls: currentToolCalls,
2763
2788
  toolResults: currentToolResults
2764
2789
  });
@@ -2794,7 +2819,7 @@ async function generateText({
2794
2819
  // Always return a string so that the caller doesn't have to check for undefined.
2795
2820
  // If they need to check if the model did not return any text,
2796
2821
  // they can check the length of the string:
2797
- text: (_d = currentModelResponse.text) != null ? _d : "",
2822
+ text: (_c = currentModelResponse.text) != null ? _c : "",
2798
2823
  toolCalls: currentToolCalls,
2799
2824
  toolResults: currentToolResults,
2800
2825
  finishReason: currentModelResponse.finishReason,
@@ -2883,30 +2908,68 @@ var DefaultGenerateTextResult = class {
2883
2908
  this.experimental_providerMetadata = options.providerMetadata;
2884
2909
  }
2885
2910
  };
2886
- function toResponseMessages({
2887
- text,
2888
- toolCalls,
2889
- toolResults
2890
- }) {
2891
- const responseMessages = [];
2892
- responseMessages.push({
2893
- role: "assistant",
2894
- content: [{ type: "text", text }, ...toolCalls]
2895
- });
2896
- if (toolResults.length > 0) {
2897
- responseMessages.push({
2898
- role: "tool",
2899
- content: toolResults.map((result) => ({
2900
- type: "tool-result",
2901
- toolCallId: result.toolCallId,
2902
- toolName: result.toolName,
2903
- result: result.result
2904
- }))
2905
- });
2906
- }
2907
- return responseMessages;
2911
+
2912
+ // core/util/create-stitchable-stream.ts
2913
+ function createStitchableStream() {
2914
+ let innerStreamReaders = [];
2915
+ let controller = null;
2916
+ let isClosed = false;
2917
+ const processPull = async () => {
2918
+ if (isClosed && innerStreamReaders.length === 0) {
2919
+ controller == null ? void 0 : controller.close();
2920
+ return;
2921
+ }
2922
+ if (innerStreamReaders.length === 0) {
2923
+ return;
2924
+ }
2925
+ try {
2926
+ const { value, done } = await innerStreamReaders[0].read();
2927
+ if (done) {
2928
+ innerStreamReaders.shift();
2929
+ if (innerStreamReaders.length > 0) {
2930
+ await processPull();
2931
+ } else if (isClosed) {
2932
+ controller == null ? void 0 : controller.close();
2933
+ }
2934
+ } else {
2935
+ controller == null ? void 0 : controller.enqueue(value);
2936
+ }
2937
+ } catch (error) {
2938
+ controller == null ? void 0 : controller.error(error);
2939
+ innerStreamReaders.shift();
2940
+ if (isClosed && innerStreamReaders.length === 0) {
2941
+ controller == null ? void 0 : controller.close();
2942
+ }
2943
+ }
2944
+ };
2945
+ return {
2946
+ stream: new ReadableStream({
2947
+ start(controllerParam) {
2948
+ controller = controllerParam;
2949
+ },
2950
+ pull: processPull,
2951
+ async cancel() {
2952
+ for (const reader of innerStreamReaders) {
2953
+ await reader.cancel();
2954
+ }
2955
+ innerStreamReaders = [];
2956
+ isClosed = true;
2957
+ }
2958
+ }),
2959
+ addStream: (innerStream) => {
2960
+ if (isClosed) {
2961
+ throw new Error("Cannot add inner stream: outer stream is closed");
2962
+ }
2963
+ innerStreamReaders.push(innerStream.getReader());
2964
+ },
2965
+ close: () => {
2966
+ isClosed = true;
2967
+ if (innerStreamReaders.length === 0) {
2968
+ controller == null ? void 0 : controller.close();
2969
+ }
2970
+ }
2971
+ };
2908
2972
  }
2909
- var experimental_generateText = generateText;
2910
2973
 
2911
2974
  // core/util/merge-streams.ts
2912
2975
  function mergeStreams(stream1, stream2) {
@@ -3195,6 +3258,7 @@ async function streamText({
3195
3258
  maxRetries,
3196
3259
  abortSignal,
3197
3260
  headers,
3261
+ maxToolRoundtrips = 0,
3198
3262
  experimental_telemetry: telemetry,
3199
3263
  experimental_toolCallStreaming: toolCallStreaming = false,
3200
3264
  onChunk,
@@ -3226,68 +3290,89 @@ async function streamText({
3226
3290
  endWhenDone: false,
3227
3291
  fn: async (rootSpan) => {
3228
3292
  const retry = retryWithExponentialBackoff({ maxRetries });
3229
- const validatedPrompt = validatePrompt({ system, prompt, messages });
3293
+ const startRoundtrip = async ({
3294
+ promptMessages: promptMessages2,
3295
+ promptType
3296
+ }) => {
3297
+ const {
3298
+ result: { stream: stream2, warnings: warnings2, rawResponse: rawResponse2 },
3299
+ doStreamSpan: doStreamSpan2,
3300
+ startTimestamp: startTimestamp2
3301
+ } = await retry(
3302
+ () => recordSpan({
3303
+ name: "ai.streamText.doStream",
3304
+ attributes: selectTelemetryAttributes({
3305
+ telemetry,
3306
+ attributes: {
3307
+ ...assembleOperationName({
3308
+ operationId: "ai.streamText.doStream",
3309
+ telemetry
3310
+ }),
3311
+ ...baseTelemetryAttributes,
3312
+ "ai.prompt.format": {
3313
+ input: () => promptType
3314
+ },
3315
+ "ai.prompt.messages": {
3316
+ input: () => JSON.stringify(promptMessages2)
3317
+ },
3318
+ // standardized gen-ai llm span attributes:
3319
+ "gen_ai.request.model": model.modelId,
3320
+ "gen_ai.system": model.provider,
3321
+ "gen_ai.request.max_tokens": settings.maxTokens,
3322
+ "gen_ai.request.temperature": settings.temperature,
3323
+ "gen_ai.request.top_p": settings.topP
3324
+ }
3325
+ }),
3326
+ tracer,
3327
+ endWhenDone: false,
3328
+ fn: async (doStreamSpan3) => ({
3329
+ startTimestamp: performance.now(),
3330
+ // get before the call
3331
+ doStreamSpan: doStreamSpan3,
3332
+ result: await model.doStream({
3333
+ mode: {
3334
+ type: "regular",
3335
+ ...prepareToolsAndToolChoice({ tools, toolChoice })
3336
+ },
3337
+ ...prepareCallSettings(settings),
3338
+ inputFormat: promptType,
3339
+ prompt: promptMessages2,
3340
+ abortSignal,
3341
+ headers
3342
+ })
3343
+ })
3344
+ })
3345
+ );
3346
+ return {
3347
+ result: {
3348
+ stream: runToolsTransformation({
3349
+ tools,
3350
+ generatorStream: stream2,
3351
+ toolCallStreaming,
3352
+ tracer,
3353
+ telemetry
3354
+ }),
3355
+ warnings: warnings2,
3356
+ rawResponse: rawResponse2
3357
+ },
3358
+ doStreamSpan: doStreamSpan2,
3359
+ startTimestamp: startTimestamp2
3360
+ };
3361
+ };
3230
3362
  const promptMessages = await convertToLanguageModelPrompt({
3231
- prompt: validatedPrompt,
3363
+ prompt: validatePrompt({ system, prompt, messages }),
3232
3364
  modelSupportsImageUrls: model.supportsImageUrls
3233
3365
  });
3234
3366
  const {
3235
3367
  result: { stream, warnings, rawResponse },
3236
3368
  doStreamSpan,
3237
3369
  startTimestamp
3238
- } = await retry(
3239
- () => recordSpan({
3240
- name: "ai.streamText.doStream",
3241
- attributes: selectTelemetryAttributes({
3242
- telemetry,
3243
- attributes: {
3244
- ...assembleOperationName({
3245
- operationId: "ai.streamText.doStream",
3246
- telemetry
3247
- }),
3248
- ...baseTelemetryAttributes,
3249
- "ai.prompt.format": {
3250
- input: () => validatedPrompt.type
3251
- },
3252
- "ai.prompt.messages": {
3253
- input: () => JSON.stringify(promptMessages)
3254
- },
3255
- // standardized gen-ai llm span attributes:
3256
- "gen_ai.request.model": model.modelId,
3257
- "gen_ai.system": model.provider,
3258
- "gen_ai.request.max_tokens": settings.maxTokens,
3259
- "gen_ai.request.temperature": settings.temperature,
3260
- "gen_ai.request.top_p": settings.topP
3261
- }
3262
- }),
3263
- tracer,
3264
- endWhenDone: false,
3265
- fn: async (doStreamSpan2) => ({
3266
- startTimestamp: performance.now(),
3267
- // get before the call
3268
- doStreamSpan: doStreamSpan2,
3269
- result: await model.doStream({
3270
- mode: {
3271
- type: "regular",
3272
- ...prepareToolsAndToolChoice({ tools, toolChoice })
3273
- },
3274
- ...prepareCallSettings(settings),
3275
- inputFormat: validatedPrompt.type,
3276
- prompt: promptMessages,
3277
- abortSignal,
3278
- headers
3279
- })
3280
- })
3281
- })
3282
- );
3370
+ } = await startRoundtrip({
3371
+ promptType: validatePrompt({ system, prompt, messages }).type,
3372
+ promptMessages
3373
+ });
3283
3374
  return new DefaultStreamTextResult({
3284
- stream: runToolsTransformation({
3285
- tools,
3286
- generatorStream: stream,
3287
- toolCallStreaming,
3288
- tracer,
3289
- telemetry
3290
- }),
3375
+ stream,
3291
3376
  warnings,
3292
3377
  rawResponse,
3293
3378
  onChunk,
@@ -3295,7 +3380,10 @@ async function streamText({
3295
3380
  rootSpan,
3296
3381
  doStreamSpan,
3297
3382
  telemetry,
3298
- startTimestamp
3383
+ startTimestamp,
3384
+ maxToolRoundtrips,
3385
+ startRoundtrip,
3386
+ promptMessages
3299
3387
  });
3300
3388
  }
3301
3389
  });
@@ -3310,7 +3398,10 @@ var DefaultStreamTextResult = class {
3310
3398
  rootSpan,
3311
3399
  doStreamSpan,
3312
3400
  telemetry,
3313
- startTimestamp
3401
+ startTimestamp,
3402
+ maxToolRoundtrips,
3403
+ startRoundtrip,
3404
+ promptMessages
3314
3405
  }) {
3315
3406
  this.warnings = warnings;
3316
3407
  this.rawResponse = rawResponse;
@@ -3329,129 +3420,223 @@ var DefaultStreamTextResult = class {
3329
3420
  promise: providerMetadataPromise
3330
3421
  } = createResolvablePromise();
3331
3422
  this.experimental_providerMetadata = providerMetadataPromise;
3332
- let finishReason;
3333
- let usage;
3334
- let providerMetadata;
3335
- let text = "";
3336
- const toolCalls = [];
3337
- const toolResults = [];
3338
- let firstChunk = true;
3339
- this.originalStream = stream.pipeThrough(
3340
- new TransformStream({
3341
- async transform(chunk, controller) {
3342
- if (firstChunk) {
3343
- const msToFirstChunk = performance.now() - startTimestamp;
3344
- firstChunk = false;
3345
- doStreamSpan.addEvent("ai.stream.firstChunk", {
3346
- "ai.stream.msToFirstChunk": msToFirstChunk
3347
- });
3348
- doStreamSpan.setAttributes({
3349
- "ai.stream.msToFirstChunk": msToFirstChunk
3350
- });
3351
- }
3352
- if (chunk.type === "text-delta" && chunk.textDelta.length === 0) {
3353
- return;
3354
- }
3355
- controller.enqueue(chunk);
3356
- const chunkType = chunk.type;
3357
- switch (chunkType) {
3358
- case "text-delta":
3359
- text += chunk.textDelta;
3360
- await (onChunk == null ? void 0 : onChunk({ chunk }));
3361
- break;
3362
- case "tool-call":
3363
- toolCalls.push(chunk);
3364
- await (onChunk == null ? void 0 : onChunk({ chunk }));
3365
- break;
3366
- case "tool-result":
3367
- toolResults.push(chunk);
3368
- await (onChunk == null ? void 0 : onChunk({ chunk }));
3369
- break;
3370
- case "finish":
3371
- usage = chunk.usage;
3372
- finishReason = chunk.finishReason;
3373
- providerMetadata = chunk.experimental_providerMetadata;
3374
- resolveUsage(usage);
3375
- resolveFinishReason(finishReason);
3376
- resolveText(text);
3377
- resolveToolCalls(toolCalls);
3378
- resolveProviderMetadata(providerMetadata);
3379
- break;
3380
- case "tool-call-streaming-start":
3381
- case "tool-call-delta": {
3382
- await (onChunk == null ? void 0 : onChunk({ chunk }));
3383
- break;
3384
- }
3385
- case "error":
3386
- break;
3387
- default: {
3388
- const exhaustiveCheck = chunkType;
3389
- throw new Error(`Unknown chunk type: ${exhaustiveCheck}`);
3390
- }
3391
- }
3392
- },
3393
- // invoke onFinish callback and resolve toolResults promise when the stream is about to close:
3394
- async flush(controller) {
3395
- try {
3396
- const finalUsage = usage != null ? usage : {
3397
- promptTokens: NaN,
3398
- completionTokens: NaN,
3399
- totalTokens: NaN
3400
- };
3401
- const finalFinishReason = finishReason != null ? finishReason : "unknown";
3402
- const telemetryToolCalls = toolCalls.length > 0 ? JSON.stringify(toolCalls) : void 0;
3403
- doStreamSpan.setAttributes(
3404
- selectTelemetryAttributes({
3405
- telemetry,
3406
- attributes: {
3407
- "ai.finishReason": finalFinishReason,
3408
- "ai.usage.promptTokens": finalUsage.promptTokens,
3409
- "ai.usage.completionTokens": finalUsage.completionTokens,
3410
- "ai.result.text": { output: () => text },
3411
- "ai.result.toolCalls": { output: () => telemetryToolCalls },
3412
- // standardized gen-ai llm span attributes:
3413
- "gen_ai.response.finish_reasons": [finalFinishReason],
3414
- "gen_ai.usage.prompt_tokens": finalUsage.promptTokens,
3415
- "gen_ai.usage.completion_tokens": finalUsage.completionTokens
3423
+ const {
3424
+ stream: stitchableStream,
3425
+ addStream,
3426
+ close: closeStitchableStream
3427
+ } = createStitchableStream();
3428
+ this.originalStream = stitchableStream;
3429
+ const self = this;
3430
+ function addRoundtripStream({
3431
+ stream: stream2,
3432
+ startTimestamp: startTimestamp2,
3433
+ doStreamSpan: doStreamSpan2,
3434
+ currentToolRoundtrip,
3435
+ promptMessages: promptMessages2,
3436
+ usage = {
3437
+ promptTokens: 0,
3438
+ completionTokens: 0,
3439
+ totalTokens: 0
3440
+ }
3441
+ }) {
3442
+ const roundtripToolCalls = [];
3443
+ const roundtripToolResults = [];
3444
+ let roundtripFinishReason = "unknown";
3445
+ let roundtripUsage = {
3446
+ promptTokens: 0,
3447
+ completionTokens: 0,
3448
+ totalTokens: 0
3449
+ };
3450
+ let roundtripProviderMetadata;
3451
+ let roundtripFirstChunk = true;
3452
+ let roundtripText = "";
3453
+ let roundtripLogProbs;
3454
+ addStream(
3455
+ stream2.pipeThrough(
3456
+ new TransformStream({
3457
+ async transform(chunk, controller) {
3458
+ if (roundtripFirstChunk) {
3459
+ const msToFirstChunk = performance.now() - startTimestamp2;
3460
+ roundtripFirstChunk = false;
3461
+ doStreamSpan2.addEvent("ai.stream.firstChunk", {
3462
+ "ai.stream.msToFirstChunk": msToFirstChunk
3463
+ });
3464
+ doStreamSpan2.setAttributes({
3465
+ "ai.stream.msToFirstChunk": msToFirstChunk
3466
+ });
3467
+ }
3468
+ if (chunk.type === "text-delta" && chunk.textDelta.length === 0) {
3469
+ return;
3470
+ }
3471
+ const chunkType = chunk.type;
3472
+ switch (chunkType) {
3473
+ case "text-delta":
3474
+ controller.enqueue(chunk);
3475
+ roundtripText += chunk.textDelta;
3476
+ await (onChunk == null ? void 0 : onChunk({ chunk }));
3477
+ break;
3478
+ case "tool-call":
3479
+ controller.enqueue(chunk);
3480
+ roundtripToolCalls.push(chunk);
3481
+ await (onChunk == null ? void 0 : onChunk({ chunk }));
3482
+ break;
3483
+ case "tool-result":
3484
+ controller.enqueue(chunk);
3485
+ roundtripToolResults.push(chunk);
3486
+ await (onChunk == null ? void 0 : onChunk({ chunk }));
3487
+ break;
3488
+ case "finish":
3489
+ roundtripUsage = chunk.usage;
3490
+ roundtripFinishReason = chunk.finishReason;
3491
+ roundtripProviderMetadata = chunk.experimental_providerMetadata;
3492
+ roundtripLogProbs = chunk.logprobs;
3493
+ controller.enqueue({
3494
+ type: "roundtrip-finish",
3495
+ finishReason: chunk.finishReason,
3496
+ usage: chunk.usage,
3497
+ experimental_providerMetadata: chunk.experimental_providerMetadata,
3498
+ logprobs: chunk.logprobs
3499
+ });
3500
+ break;
3501
+ case "tool-call-streaming-start":
3502
+ case "tool-call-delta": {
3503
+ controller.enqueue(chunk);
3504
+ await (onChunk == null ? void 0 : onChunk({ chunk }));
3505
+ break;
3416
3506
  }
3417
- })
3418
- );
3419
- doStreamSpan.end();
3420
- rootSpan.setAttributes(
3421
- selectTelemetryAttributes({
3422
- telemetry,
3423
- attributes: {
3424
- "ai.finishReason": finalFinishReason,
3425
- "ai.usage.promptTokens": finalUsage.promptTokens,
3426
- "ai.usage.completionTokens": finalUsage.completionTokens,
3427
- "ai.result.text": { output: () => text },
3428
- "ai.result.toolCalls": { output: () => telemetryToolCalls }
3507
+ case "error":
3508
+ controller.enqueue(chunk);
3509
+ roundtripFinishReason = "error";
3510
+ break;
3511
+ default: {
3512
+ const exhaustiveCheck = chunkType;
3513
+ throw new Error(`Unknown chunk type: ${exhaustiveCheck}`);
3429
3514
  }
3430
- })
3431
- );
3432
- resolveToolResults(toolResults);
3433
- await (onFinish == null ? void 0 : onFinish({
3434
- finishReason: finalFinishReason,
3435
- usage: finalUsage,
3436
- text,
3437
- toolCalls,
3438
- // The tool results are inferred as a never[] type, because they are
3439
- // optional and the execute method with an inferred result type is
3440
- // optional as well. Therefore we need to cast the toolResults to any.
3441
- // The type exposed to the users will be correctly inferred.
3442
- toolResults,
3443
- rawResponse,
3444
- warnings,
3445
- experimental_providerMetadata: providerMetadata
3446
- }));
3447
- } catch (error) {
3448
- controller.error(error);
3449
- } finally {
3450
- rootSpan.end();
3451
- }
3452
- }
3453
- })
3454
- );
3515
+ }
3516
+ },
3517
+ // invoke onFinish callback and resolve toolResults promise when the stream is about to close:
3518
+ async flush(controller) {
3519
+ const telemetryToolCalls = roundtripToolCalls.length > 0 ? JSON.stringify(roundtripToolCalls) : void 0;
3520
+ try {
3521
+ doStreamSpan2.setAttributes(
3522
+ selectTelemetryAttributes({
3523
+ telemetry,
3524
+ attributes: {
3525
+ "ai.finishReason": roundtripFinishReason,
3526
+ "ai.usage.promptTokens": roundtripUsage.promptTokens,
3527
+ "ai.usage.completionTokens": roundtripUsage.completionTokens,
3528
+ "ai.result.text": { output: () => roundtripText },
3529
+ "ai.result.toolCalls": {
3530
+ output: () => telemetryToolCalls
3531
+ },
3532
+ // standardized gen-ai llm span attributes:
3533
+ "gen_ai.response.finish_reasons": [roundtripFinishReason],
3534
+ "gen_ai.usage.prompt_tokens": roundtripUsage.promptTokens,
3535
+ "gen_ai.usage.completion_tokens": roundtripUsage.completionTokens
3536
+ }
3537
+ })
3538
+ );
3539
+ } catch (error) {
3540
+ } finally {
3541
+ doStreamSpan2.end();
3542
+ }
3543
+ const combinedUsage = {
3544
+ promptTokens: usage.promptTokens + roundtripUsage.promptTokens,
3545
+ completionTokens: usage.completionTokens + roundtripUsage.completionTokens,
3546
+ totalTokens: usage.totalTokens + roundtripUsage.totalTokens
3547
+ };
3548
+ if (
3549
+ // there are tool calls:
3550
+ roundtripToolCalls.length > 0 && // all current tool calls have results:
3551
+ roundtripToolResults.length === roundtripToolCalls.length && // the number of roundtrips is less than the maximum:
3552
+ currentToolRoundtrip < maxToolRoundtrips
3553
+ ) {
3554
+ promptMessages2.push(
3555
+ ...toResponseMessages({
3556
+ text: roundtripText,
3557
+ toolCalls: roundtripToolCalls,
3558
+ toolResults: roundtripToolResults
3559
+ }).map(
3560
+ (message) => convertToLanguageModelMessage(message, null)
3561
+ )
3562
+ );
3563
+ const { result, doStreamSpan: doStreamSpan3, startTimestamp: startTimestamp3 } = await startRoundtrip({
3564
+ promptType: "messages",
3565
+ promptMessages: promptMessages2
3566
+ });
3567
+ self.warnings = result.warnings;
3568
+ self.rawResponse = result.rawResponse;
3569
+ addRoundtripStream({
3570
+ stream: result.stream,
3571
+ startTimestamp: startTimestamp3,
3572
+ doStreamSpan: doStreamSpan3,
3573
+ currentToolRoundtrip: currentToolRoundtrip + 1,
3574
+ promptMessages: promptMessages2,
3575
+ usage: combinedUsage
3576
+ });
3577
+ return;
3578
+ }
3579
+ try {
3580
+ controller.enqueue({
3581
+ type: "finish",
3582
+ finishReason: roundtripFinishReason,
3583
+ usage: combinedUsage,
3584
+ experimental_providerMetadata: roundtripProviderMetadata,
3585
+ logprobs: roundtripLogProbs
3586
+ });
3587
+ closeStitchableStream();
3588
+ rootSpan.setAttributes(
3589
+ selectTelemetryAttributes({
3590
+ telemetry,
3591
+ attributes: {
3592
+ "ai.finishReason": roundtripFinishReason,
3593
+ "ai.usage.promptTokens": combinedUsage.promptTokens,
3594
+ "ai.usage.completionTokens": combinedUsage.completionTokens,
3595
+ "ai.result.text": { output: () => roundtripText },
3596
+ "ai.result.toolCalls": {
3597
+ output: () => telemetryToolCalls
3598
+ }
3599
+ }
3600
+ })
3601
+ );
3602
+ resolveUsage(combinedUsage);
3603
+ resolveFinishReason(roundtripFinishReason);
3604
+ resolveText(roundtripText);
3605
+ resolveToolCalls(roundtripToolCalls);
3606
+ resolveProviderMetadata(roundtripProviderMetadata);
3607
+ resolveToolResults(roundtripToolResults);
3608
+ await (onFinish == null ? void 0 : onFinish({
3609
+ finishReason: roundtripFinishReason,
3610
+ usage: combinedUsage,
3611
+ text: roundtripText,
3612
+ toolCalls: roundtripToolCalls,
3613
+ // The tool results are inferred as a never[] type, because they are
3614
+ // optional and the execute method with an inferred result type is
3615
+ // optional as well. Therefore we need to cast the toolResults to any.
3616
+ // The type exposed to the users will be correctly inferred.
3617
+ toolResults: roundtripToolResults,
3618
+ rawResponse,
3619
+ warnings,
3620
+ experimental_providerMetadata: roundtripProviderMetadata
3621
+ }));
3622
+ } catch (error) {
3623
+ controller.error(error);
3624
+ } finally {
3625
+ rootSpan.end();
3626
+ }
3627
+ }
3628
+ })
3629
+ )
3630
+ );
3631
+ }
3632
+ addRoundtripStream({
3633
+ stream,
3634
+ startTimestamp,
3635
+ doStreamSpan,
3636
+ currentToolRoundtrip: 0,
3637
+ promptMessages,
3638
+ usage: void 0
3639
+ });
3455
3640
  }
3456
3641
  /**
3457
3642
  Split out a new stream from the original stream.
@@ -3561,6 +3746,17 @@ var DefaultStreamTextResult = class {
3561
3746
  formatStreamPart("error", getErrorMessage4(chunk.error))
3562
3747
  );
3563
3748
  break;
3749
+ case "roundtrip-finish":
3750
+ controller.enqueue(
3751
+ formatStreamPart("finish_roundtrip", {
3752
+ finishReason: chunk.finishReason,
3753
+ usage: {
3754
+ promptTokens: chunk.usage.promptTokens,
3755
+ completionTokens: chunk.usage.completionTokens
3756
+ }
3757
+ })
3758
+ );
3759
+ break;
3564
3760
  case "finish":
3565
3761
  controller.enqueue(
3566
3762
  formatStreamPart("finish_message", {
@@ -3662,7 +3858,6 @@ var DefaultStreamTextResult = class {
3662
3858
  });
3663
3859
  }
3664
3860
  };
3665
- var experimental_streamText = streamText;
3666
3861
 
3667
3862
  // core/prompt/attachments-to-parts.ts
3668
3863
  function attachmentsToParts(attachments) {
@@ -5110,17 +5305,15 @@ export {
5110
5305
  experimental_createProviderRegistry,
5111
5306
  experimental_customProvider,
5112
5307
  experimental_generateObject,
5113
- experimental_generateText,
5114
5308
  experimental_streamObject,
5115
- experimental_streamText,
5116
5309
  formatStreamPart,
5117
5310
  generateId2 as generateId,
5118
5311
  generateObject,
5119
5312
  generateText,
5120
5313
  jsonSchema,
5121
5314
  nanoid,
5122
- parseComplexResponse,
5123
5315
  parseStreamPart,
5316
+ processDataProtocolResponse,
5124
5317
  readDataStream,
5125
5318
  readableFromAsyncIterable,
5126
5319
  streamObject,