ai 3.3.19 → 3.3.21

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js CHANGED
@@ -73,17 +73,15 @@ __export(streams_exports, {
73
73
  experimental_createProviderRegistry: () => experimental_createProviderRegistry,
74
74
  experimental_customProvider: () => experimental_customProvider,
75
75
  experimental_generateObject: () => experimental_generateObject,
76
- experimental_generateText: () => experimental_generateText,
77
76
  experimental_streamObject: () => experimental_streamObject,
78
- experimental_streamText: () => experimental_streamText,
79
77
  formatStreamPart: () => import_ui_utils10.formatStreamPart,
80
78
  generateId: () => generateId2,
81
79
  generateObject: () => generateObject,
82
80
  generateText: () => generateText,
83
81
  jsonSchema: () => import_ui_utils6.jsonSchema,
84
82
  nanoid: () => nanoid,
85
- parseComplexResponse: () => import_ui_utils10.parseComplexResponse,
86
83
  parseStreamPart: () => import_ui_utils10.parseStreamPart,
84
+ processDataProtocolResponse: () => import_ui_utils10.processDataProtocolResponse,
87
85
  readDataStream: () => import_ui_utils10.readDataStream,
88
86
  readableFromAsyncIterable: () => readableFromAsyncIterable,
89
87
  streamObject: () => streamObject,
@@ -2551,6 +2549,31 @@ function prepareToolsAndToolChoice({
2551
2549
  };
2552
2550
  }
2553
2551
 
2552
+ // core/generate-text/to-response-messages.ts
2553
+ function toResponseMessages({
2554
+ text = "",
2555
+ toolCalls,
2556
+ toolResults
2557
+ }) {
2558
+ const responseMessages = [];
2559
+ responseMessages.push({
2560
+ role: "assistant",
2561
+ content: [{ type: "text", text }, ...toolCalls]
2562
+ });
2563
+ if (toolResults.length > 0) {
2564
+ responseMessages.push({
2565
+ role: "tool",
2566
+ content: toolResults.map((result) => ({
2567
+ type: "tool-result",
2568
+ toolCallId: result.toolCallId,
2569
+ toolName: result.toolName,
2570
+ result: result.result
2571
+ }))
2572
+ });
2573
+ }
2574
+ return responseMessages;
2575
+ }
2576
+
2554
2577
  // core/generate-text/tool-call.ts
2555
2578
  var import_provider_utils7 = require("@ai-sdk/provider-utils");
2556
2579
  var import_ui_utils4 = require("@ai-sdk/ui-utils");
@@ -2719,7 +2742,7 @@ async function generateText({
2719
2742
  }),
2720
2743
  tracer,
2721
2744
  fn: async (span) => {
2722
- var _a12, _b, _c, _d;
2745
+ var _a12, _b, _c;
2723
2746
  const retry = retryWithExponentialBackoff({ maxRetries });
2724
2747
  const validatedPrompt = validatePrompt({
2725
2748
  system,
@@ -2830,7 +2853,7 @@ async function generateText({
2830
2853
  logprobs: currentModelResponse.logprobs
2831
2854
  });
2832
2855
  const newResponseMessages = toResponseMessages({
2833
- text: (_c = currentModelResponse.text) != null ? _c : "",
2856
+ text: currentModelResponse.text,
2834
2857
  toolCalls: currentToolCalls,
2835
2858
  toolResults: currentToolResults
2836
2859
  });
@@ -2866,7 +2889,7 @@ async function generateText({
2866
2889
  // Always return a string so that the caller doesn't have to check for undefined.
2867
2890
  // If they need to check if the model did not return any text,
2868
2891
  // they can check the length of the string:
2869
- text: (_d = currentModelResponse.text) != null ? _d : "",
2892
+ text: (_c = currentModelResponse.text) != null ? _c : "",
2870
2893
  toolCalls: currentToolCalls,
2871
2894
  toolResults: currentToolResults,
2872
2895
  finishReason: currentModelResponse.finishReason,
@@ -2955,30 +2978,68 @@ var DefaultGenerateTextResult = class {
2955
2978
  this.experimental_providerMetadata = options.providerMetadata;
2956
2979
  }
2957
2980
  };
2958
- function toResponseMessages({
2959
- text,
2960
- toolCalls,
2961
- toolResults
2962
- }) {
2963
- const responseMessages = [];
2964
- responseMessages.push({
2965
- role: "assistant",
2966
- content: [{ type: "text", text }, ...toolCalls]
2967
- });
2968
- if (toolResults.length > 0) {
2969
- responseMessages.push({
2970
- role: "tool",
2971
- content: toolResults.map((result) => ({
2972
- type: "tool-result",
2973
- toolCallId: result.toolCallId,
2974
- toolName: result.toolName,
2975
- result: result.result
2976
- }))
2977
- });
2978
- }
2979
- return responseMessages;
2981
+
2982
+ // core/util/create-stitchable-stream.ts
2983
+ function createStitchableStream() {
2984
+ let innerStreamReaders = [];
2985
+ let controller = null;
2986
+ let isClosed = false;
2987
+ const processPull = async () => {
2988
+ if (isClosed && innerStreamReaders.length === 0) {
2989
+ controller == null ? void 0 : controller.close();
2990
+ return;
2991
+ }
2992
+ if (innerStreamReaders.length === 0) {
2993
+ return;
2994
+ }
2995
+ try {
2996
+ const { value, done } = await innerStreamReaders[0].read();
2997
+ if (done) {
2998
+ innerStreamReaders.shift();
2999
+ if (innerStreamReaders.length > 0) {
3000
+ await processPull();
3001
+ } else if (isClosed) {
3002
+ controller == null ? void 0 : controller.close();
3003
+ }
3004
+ } else {
3005
+ controller == null ? void 0 : controller.enqueue(value);
3006
+ }
3007
+ } catch (error) {
3008
+ controller == null ? void 0 : controller.error(error);
3009
+ innerStreamReaders.shift();
3010
+ if (isClosed && innerStreamReaders.length === 0) {
3011
+ controller == null ? void 0 : controller.close();
3012
+ }
3013
+ }
3014
+ };
3015
+ return {
3016
+ stream: new ReadableStream({
3017
+ start(controllerParam) {
3018
+ controller = controllerParam;
3019
+ },
3020
+ pull: processPull,
3021
+ async cancel() {
3022
+ for (const reader of innerStreamReaders) {
3023
+ await reader.cancel();
3024
+ }
3025
+ innerStreamReaders = [];
3026
+ isClosed = true;
3027
+ }
3028
+ }),
3029
+ addStream: (innerStream) => {
3030
+ if (isClosed) {
3031
+ throw new Error("Cannot add inner stream: outer stream is closed");
3032
+ }
3033
+ innerStreamReaders.push(innerStream.getReader());
3034
+ },
3035
+ close: () => {
3036
+ isClosed = true;
3037
+ if (innerStreamReaders.length === 0) {
3038
+ controller == null ? void 0 : controller.close();
3039
+ }
3040
+ }
3041
+ };
2980
3042
  }
2981
- var experimental_generateText = generateText;
2982
3043
 
2983
3044
  // core/util/merge-streams.ts
2984
3045
  function mergeStreams(stream1, stream2) {
@@ -3267,6 +3328,7 @@ async function streamText({
3267
3328
  maxRetries,
3268
3329
  abortSignal,
3269
3330
  headers,
3331
+ maxToolRoundtrips = 0,
3270
3332
  experimental_telemetry: telemetry,
3271
3333
  experimental_toolCallStreaming: toolCallStreaming = false,
3272
3334
  onChunk,
@@ -3298,68 +3360,89 @@ async function streamText({
3298
3360
  endWhenDone: false,
3299
3361
  fn: async (rootSpan) => {
3300
3362
  const retry = retryWithExponentialBackoff({ maxRetries });
3301
- const validatedPrompt = validatePrompt({ system, prompt, messages });
3363
+ const startRoundtrip = async ({
3364
+ promptMessages: promptMessages2,
3365
+ promptType
3366
+ }) => {
3367
+ const {
3368
+ result: { stream: stream2, warnings: warnings2, rawResponse: rawResponse2 },
3369
+ doStreamSpan: doStreamSpan2,
3370
+ startTimestamp: startTimestamp2
3371
+ } = await retry(
3372
+ () => recordSpan({
3373
+ name: "ai.streamText.doStream",
3374
+ attributes: selectTelemetryAttributes({
3375
+ telemetry,
3376
+ attributes: {
3377
+ ...assembleOperationName({
3378
+ operationId: "ai.streamText.doStream",
3379
+ telemetry
3380
+ }),
3381
+ ...baseTelemetryAttributes,
3382
+ "ai.prompt.format": {
3383
+ input: () => promptType
3384
+ },
3385
+ "ai.prompt.messages": {
3386
+ input: () => JSON.stringify(promptMessages2)
3387
+ },
3388
+ // standardized gen-ai llm span attributes:
3389
+ "gen_ai.request.model": model.modelId,
3390
+ "gen_ai.system": model.provider,
3391
+ "gen_ai.request.max_tokens": settings.maxTokens,
3392
+ "gen_ai.request.temperature": settings.temperature,
3393
+ "gen_ai.request.top_p": settings.topP
3394
+ }
3395
+ }),
3396
+ tracer,
3397
+ endWhenDone: false,
3398
+ fn: async (doStreamSpan3) => ({
3399
+ startTimestamp: performance.now(),
3400
+ // get before the call
3401
+ doStreamSpan: doStreamSpan3,
3402
+ result: await model.doStream({
3403
+ mode: {
3404
+ type: "regular",
3405
+ ...prepareToolsAndToolChoice({ tools, toolChoice })
3406
+ },
3407
+ ...prepareCallSettings(settings),
3408
+ inputFormat: promptType,
3409
+ prompt: promptMessages2,
3410
+ abortSignal,
3411
+ headers
3412
+ })
3413
+ })
3414
+ })
3415
+ );
3416
+ return {
3417
+ result: {
3418
+ stream: runToolsTransformation({
3419
+ tools,
3420
+ generatorStream: stream2,
3421
+ toolCallStreaming,
3422
+ tracer,
3423
+ telemetry
3424
+ }),
3425
+ warnings: warnings2,
3426
+ rawResponse: rawResponse2
3427
+ },
3428
+ doStreamSpan: doStreamSpan2,
3429
+ startTimestamp: startTimestamp2
3430
+ };
3431
+ };
3302
3432
  const promptMessages = await convertToLanguageModelPrompt({
3303
- prompt: validatedPrompt,
3433
+ prompt: validatePrompt({ system, prompt, messages }),
3304
3434
  modelSupportsImageUrls: model.supportsImageUrls
3305
3435
  });
3306
3436
  const {
3307
3437
  result: { stream, warnings, rawResponse },
3308
3438
  doStreamSpan,
3309
3439
  startTimestamp
3310
- } = await retry(
3311
- () => recordSpan({
3312
- name: "ai.streamText.doStream",
3313
- attributes: selectTelemetryAttributes({
3314
- telemetry,
3315
- attributes: {
3316
- ...assembleOperationName({
3317
- operationId: "ai.streamText.doStream",
3318
- telemetry
3319
- }),
3320
- ...baseTelemetryAttributes,
3321
- "ai.prompt.format": {
3322
- input: () => validatedPrompt.type
3323
- },
3324
- "ai.prompt.messages": {
3325
- input: () => JSON.stringify(promptMessages)
3326
- },
3327
- // standardized gen-ai llm span attributes:
3328
- "gen_ai.request.model": model.modelId,
3329
- "gen_ai.system": model.provider,
3330
- "gen_ai.request.max_tokens": settings.maxTokens,
3331
- "gen_ai.request.temperature": settings.temperature,
3332
- "gen_ai.request.top_p": settings.topP
3333
- }
3334
- }),
3335
- tracer,
3336
- endWhenDone: false,
3337
- fn: async (doStreamSpan2) => ({
3338
- startTimestamp: performance.now(),
3339
- // get before the call
3340
- doStreamSpan: doStreamSpan2,
3341
- result: await model.doStream({
3342
- mode: {
3343
- type: "regular",
3344
- ...prepareToolsAndToolChoice({ tools, toolChoice })
3345
- },
3346
- ...prepareCallSettings(settings),
3347
- inputFormat: validatedPrompt.type,
3348
- prompt: promptMessages,
3349
- abortSignal,
3350
- headers
3351
- })
3352
- })
3353
- })
3354
- );
3440
+ } = await startRoundtrip({
3441
+ promptType: validatePrompt({ system, prompt, messages }).type,
3442
+ promptMessages
3443
+ });
3355
3444
  return new DefaultStreamTextResult({
3356
- stream: runToolsTransformation({
3357
- tools,
3358
- generatorStream: stream,
3359
- toolCallStreaming,
3360
- tracer,
3361
- telemetry
3362
- }),
3445
+ stream,
3363
3446
  warnings,
3364
3447
  rawResponse,
3365
3448
  onChunk,
@@ -3367,7 +3450,10 @@ async function streamText({
3367
3450
  rootSpan,
3368
3451
  doStreamSpan,
3369
3452
  telemetry,
3370
- startTimestamp
3453
+ startTimestamp,
3454
+ maxToolRoundtrips,
3455
+ startRoundtrip,
3456
+ promptMessages
3371
3457
  });
3372
3458
  }
3373
3459
  });
@@ -3382,7 +3468,10 @@ var DefaultStreamTextResult = class {
3382
3468
  rootSpan,
3383
3469
  doStreamSpan,
3384
3470
  telemetry,
3385
- startTimestamp
3471
+ startTimestamp,
3472
+ maxToolRoundtrips,
3473
+ startRoundtrip,
3474
+ promptMessages
3386
3475
  }) {
3387
3476
  this.warnings = warnings;
3388
3477
  this.rawResponse = rawResponse;
@@ -3401,129 +3490,223 @@ var DefaultStreamTextResult = class {
3401
3490
  promise: providerMetadataPromise
3402
3491
  } = createResolvablePromise();
3403
3492
  this.experimental_providerMetadata = providerMetadataPromise;
3404
- let finishReason;
3405
- let usage;
3406
- let providerMetadata;
3407
- let text = "";
3408
- const toolCalls = [];
3409
- const toolResults = [];
3410
- let firstChunk = true;
3411
- this.originalStream = stream.pipeThrough(
3412
- new TransformStream({
3413
- async transform(chunk, controller) {
3414
- if (firstChunk) {
3415
- const msToFirstChunk = performance.now() - startTimestamp;
3416
- firstChunk = false;
3417
- doStreamSpan.addEvent("ai.stream.firstChunk", {
3418
- "ai.stream.msToFirstChunk": msToFirstChunk
3419
- });
3420
- doStreamSpan.setAttributes({
3421
- "ai.stream.msToFirstChunk": msToFirstChunk
3422
- });
3423
- }
3424
- if (chunk.type === "text-delta" && chunk.textDelta.length === 0) {
3425
- return;
3426
- }
3427
- controller.enqueue(chunk);
3428
- const chunkType = chunk.type;
3429
- switch (chunkType) {
3430
- case "text-delta":
3431
- text += chunk.textDelta;
3432
- await (onChunk == null ? void 0 : onChunk({ chunk }));
3433
- break;
3434
- case "tool-call":
3435
- toolCalls.push(chunk);
3436
- await (onChunk == null ? void 0 : onChunk({ chunk }));
3437
- break;
3438
- case "tool-result":
3439
- toolResults.push(chunk);
3440
- await (onChunk == null ? void 0 : onChunk({ chunk }));
3441
- break;
3442
- case "finish":
3443
- usage = chunk.usage;
3444
- finishReason = chunk.finishReason;
3445
- providerMetadata = chunk.experimental_providerMetadata;
3446
- resolveUsage(usage);
3447
- resolveFinishReason(finishReason);
3448
- resolveText(text);
3449
- resolveToolCalls(toolCalls);
3450
- resolveProviderMetadata(providerMetadata);
3451
- break;
3452
- case "tool-call-streaming-start":
3453
- case "tool-call-delta": {
3454
- await (onChunk == null ? void 0 : onChunk({ chunk }));
3455
- break;
3456
- }
3457
- case "error":
3458
- break;
3459
- default: {
3460
- const exhaustiveCheck = chunkType;
3461
- throw new Error(`Unknown chunk type: ${exhaustiveCheck}`);
3462
- }
3463
- }
3464
- },
3465
- // invoke onFinish callback and resolve toolResults promise when the stream is about to close:
3466
- async flush(controller) {
3467
- try {
3468
- const finalUsage = usage != null ? usage : {
3469
- promptTokens: NaN,
3470
- completionTokens: NaN,
3471
- totalTokens: NaN
3472
- };
3473
- const finalFinishReason = finishReason != null ? finishReason : "unknown";
3474
- const telemetryToolCalls = toolCalls.length > 0 ? JSON.stringify(toolCalls) : void 0;
3475
- doStreamSpan.setAttributes(
3476
- selectTelemetryAttributes({
3477
- telemetry,
3478
- attributes: {
3479
- "ai.finishReason": finalFinishReason,
3480
- "ai.usage.promptTokens": finalUsage.promptTokens,
3481
- "ai.usage.completionTokens": finalUsage.completionTokens,
3482
- "ai.result.text": { output: () => text },
3483
- "ai.result.toolCalls": { output: () => telemetryToolCalls },
3484
- // standardized gen-ai llm span attributes:
3485
- "gen_ai.response.finish_reasons": [finalFinishReason],
3486
- "gen_ai.usage.prompt_tokens": finalUsage.promptTokens,
3487
- "gen_ai.usage.completion_tokens": finalUsage.completionTokens
3493
+ const {
3494
+ stream: stitchableStream,
3495
+ addStream,
3496
+ close: closeStitchableStream
3497
+ } = createStitchableStream();
3498
+ this.originalStream = stitchableStream;
3499
+ const self = this;
3500
+ function addRoundtripStream({
3501
+ stream: stream2,
3502
+ startTimestamp: startTimestamp2,
3503
+ doStreamSpan: doStreamSpan2,
3504
+ currentToolRoundtrip,
3505
+ promptMessages: promptMessages2,
3506
+ usage = {
3507
+ promptTokens: 0,
3508
+ completionTokens: 0,
3509
+ totalTokens: 0
3510
+ }
3511
+ }) {
3512
+ const roundtripToolCalls = [];
3513
+ const roundtripToolResults = [];
3514
+ let roundtripFinishReason = "unknown";
3515
+ let roundtripUsage = {
3516
+ promptTokens: 0,
3517
+ completionTokens: 0,
3518
+ totalTokens: 0
3519
+ };
3520
+ let roundtripProviderMetadata;
3521
+ let roundtripFirstChunk = true;
3522
+ let roundtripText = "";
3523
+ let roundtripLogProbs;
3524
+ addStream(
3525
+ stream2.pipeThrough(
3526
+ new TransformStream({
3527
+ async transform(chunk, controller) {
3528
+ if (roundtripFirstChunk) {
3529
+ const msToFirstChunk = performance.now() - startTimestamp2;
3530
+ roundtripFirstChunk = false;
3531
+ doStreamSpan2.addEvent("ai.stream.firstChunk", {
3532
+ "ai.stream.msToFirstChunk": msToFirstChunk
3533
+ });
3534
+ doStreamSpan2.setAttributes({
3535
+ "ai.stream.msToFirstChunk": msToFirstChunk
3536
+ });
3537
+ }
3538
+ if (chunk.type === "text-delta" && chunk.textDelta.length === 0) {
3539
+ return;
3540
+ }
3541
+ const chunkType = chunk.type;
3542
+ switch (chunkType) {
3543
+ case "text-delta":
3544
+ controller.enqueue(chunk);
3545
+ roundtripText += chunk.textDelta;
3546
+ await (onChunk == null ? void 0 : onChunk({ chunk }));
3547
+ break;
3548
+ case "tool-call":
3549
+ controller.enqueue(chunk);
3550
+ roundtripToolCalls.push(chunk);
3551
+ await (onChunk == null ? void 0 : onChunk({ chunk }));
3552
+ break;
3553
+ case "tool-result":
3554
+ controller.enqueue(chunk);
3555
+ roundtripToolResults.push(chunk);
3556
+ await (onChunk == null ? void 0 : onChunk({ chunk }));
3557
+ break;
3558
+ case "finish":
3559
+ roundtripUsage = chunk.usage;
3560
+ roundtripFinishReason = chunk.finishReason;
3561
+ roundtripProviderMetadata = chunk.experimental_providerMetadata;
3562
+ roundtripLogProbs = chunk.logprobs;
3563
+ controller.enqueue({
3564
+ type: "roundtrip-finish",
3565
+ finishReason: chunk.finishReason,
3566
+ usage: chunk.usage,
3567
+ experimental_providerMetadata: chunk.experimental_providerMetadata,
3568
+ logprobs: chunk.logprobs
3569
+ });
3570
+ break;
3571
+ case "tool-call-streaming-start":
3572
+ case "tool-call-delta": {
3573
+ controller.enqueue(chunk);
3574
+ await (onChunk == null ? void 0 : onChunk({ chunk }));
3575
+ break;
3488
3576
  }
3489
- })
3490
- );
3491
- doStreamSpan.end();
3492
- rootSpan.setAttributes(
3493
- selectTelemetryAttributes({
3494
- telemetry,
3495
- attributes: {
3496
- "ai.finishReason": finalFinishReason,
3497
- "ai.usage.promptTokens": finalUsage.promptTokens,
3498
- "ai.usage.completionTokens": finalUsage.completionTokens,
3499
- "ai.result.text": { output: () => text },
3500
- "ai.result.toolCalls": { output: () => telemetryToolCalls }
3577
+ case "error":
3578
+ controller.enqueue(chunk);
3579
+ roundtripFinishReason = "error";
3580
+ break;
3581
+ default: {
3582
+ const exhaustiveCheck = chunkType;
3583
+ throw new Error(`Unknown chunk type: ${exhaustiveCheck}`);
3501
3584
  }
3502
- })
3503
- );
3504
- resolveToolResults(toolResults);
3505
- await (onFinish == null ? void 0 : onFinish({
3506
- finishReason: finalFinishReason,
3507
- usage: finalUsage,
3508
- text,
3509
- toolCalls,
3510
- // The tool results are inferred as a never[] type, because they are
3511
- // optional and the execute method with an inferred result type is
3512
- // optional as well. Therefore we need to cast the toolResults to any.
3513
- // The type exposed to the users will be correctly inferred.
3514
- toolResults,
3515
- rawResponse,
3516
- warnings,
3517
- experimental_providerMetadata: providerMetadata
3518
- }));
3519
- } catch (error) {
3520
- controller.error(error);
3521
- } finally {
3522
- rootSpan.end();
3523
- }
3524
- }
3525
- })
3526
- );
3585
+ }
3586
+ },
3587
+ // invoke onFinish callback and resolve toolResults promise when the stream is about to close:
3588
+ async flush(controller) {
3589
+ const telemetryToolCalls = roundtripToolCalls.length > 0 ? JSON.stringify(roundtripToolCalls) : void 0;
3590
+ try {
3591
+ doStreamSpan2.setAttributes(
3592
+ selectTelemetryAttributes({
3593
+ telemetry,
3594
+ attributes: {
3595
+ "ai.finishReason": roundtripFinishReason,
3596
+ "ai.usage.promptTokens": roundtripUsage.promptTokens,
3597
+ "ai.usage.completionTokens": roundtripUsage.completionTokens,
3598
+ "ai.result.text": { output: () => roundtripText },
3599
+ "ai.result.toolCalls": {
3600
+ output: () => telemetryToolCalls
3601
+ },
3602
+ // standardized gen-ai llm span attributes:
3603
+ "gen_ai.response.finish_reasons": [roundtripFinishReason],
3604
+ "gen_ai.usage.prompt_tokens": roundtripUsage.promptTokens,
3605
+ "gen_ai.usage.completion_tokens": roundtripUsage.completionTokens
3606
+ }
3607
+ })
3608
+ );
3609
+ } catch (error) {
3610
+ } finally {
3611
+ doStreamSpan2.end();
3612
+ }
3613
+ const combinedUsage = {
3614
+ promptTokens: usage.promptTokens + roundtripUsage.promptTokens,
3615
+ completionTokens: usage.completionTokens + roundtripUsage.completionTokens,
3616
+ totalTokens: usage.totalTokens + roundtripUsage.totalTokens
3617
+ };
3618
+ if (
3619
+ // there are tool calls:
3620
+ roundtripToolCalls.length > 0 && // all current tool calls have results:
3621
+ roundtripToolResults.length === roundtripToolCalls.length && // the number of roundtrips is less than the maximum:
3622
+ currentToolRoundtrip < maxToolRoundtrips
3623
+ ) {
3624
+ promptMessages2.push(
3625
+ ...toResponseMessages({
3626
+ text: roundtripText,
3627
+ toolCalls: roundtripToolCalls,
3628
+ toolResults: roundtripToolResults
3629
+ }).map(
3630
+ (message) => convertToLanguageModelMessage(message, null)
3631
+ )
3632
+ );
3633
+ const { result, doStreamSpan: doStreamSpan3, startTimestamp: startTimestamp3 } = await startRoundtrip({
3634
+ promptType: "messages",
3635
+ promptMessages: promptMessages2
3636
+ });
3637
+ self.warnings = result.warnings;
3638
+ self.rawResponse = result.rawResponse;
3639
+ addRoundtripStream({
3640
+ stream: result.stream,
3641
+ startTimestamp: startTimestamp3,
3642
+ doStreamSpan: doStreamSpan3,
3643
+ currentToolRoundtrip: currentToolRoundtrip + 1,
3644
+ promptMessages: promptMessages2,
3645
+ usage: combinedUsage
3646
+ });
3647
+ return;
3648
+ }
3649
+ try {
3650
+ controller.enqueue({
3651
+ type: "finish",
3652
+ finishReason: roundtripFinishReason,
3653
+ usage: combinedUsage,
3654
+ experimental_providerMetadata: roundtripProviderMetadata,
3655
+ logprobs: roundtripLogProbs
3656
+ });
3657
+ closeStitchableStream();
3658
+ rootSpan.setAttributes(
3659
+ selectTelemetryAttributes({
3660
+ telemetry,
3661
+ attributes: {
3662
+ "ai.finishReason": roundtripFinishReason,
3663
+ "ai.usage.promptTokens": combinedUsage.promptTokens,
3664
+ "ai.usage.completionTokens": combinedUsage.completionTokens,
3665
+ "ai.result.text": { output: () => roundtripText },
3666
+ "ai.result.toolCalls": {
3667
+ output: () => telemetryToolCalls
3668
+ }
3669
+ }
3670
+ })
3671
+ );
3672
+ resolveUsage(combinedUsage);
3673
+ resolveFinishReason(roundtripFinishReason);
3674
+ resolveText(roundtripText);
3675
+ resolveToolCalls(roundtripToolCalls);
3676
+ resolveProviderMetadata(roundtripProviderMetadata);
3677
+ resolveToolResults(roundtripToolResults);
3678
+ await (onFinish == null ? void 0 : onFinish({
3679
+ finishReason: roundtripFinishReason,
3680
+ usage: combinedUsage,
3681
+ text: roundtripText,
3682
+ toolCalls: roundtripToolCalls,
3683
+ // The tool results are inferred as a never[] type, because they are
3684
+ // optional and the execute method with an inferred result type is
3685
+ // optional as well. Therefore we need to cast the toolResults to any.
3686
+ // The type exposed to the users will be correctly inferred.
3687
+ toolResults: roundtripToolResults,
3688
+ rawResponse,
3689
+ warnings,
3690
+ experimental_providerMetadata: roundtripProviderMetadata
3691
+ }));
3692
+ } catch (error) {
3693
+ controller.error(error);
3694
+ } finally {
3695
+ rootSpan.end();
3696
+ }
3697
+ }
3698
+ })
3699
+ )
3700
+ );
3701
+ }
3702
+ addRoundtripStream({
3703
+ stream,
3704
+ startTimestamp,
3705
+ doStreamSpan,
3706
+ currentToolRoundtrip: 0,
3707
+ promptMessages,
3708
+ usage: void 0
3709
+ });
3527
3710
  }
3528
3711
  /**
3529
3712
  Split out a new stream from the original stream.
@@ -3633,6 +3816,17 @@ var DefaultStreamTextResult = class {
3633
3816
  (0, import_ui_utils10.formatStreamPart)("error", getErrorMessage4(chunk.error))
3634
3817
  );
3635
3818
  break;
3819
+ case "roundtrip-finish":
3820
+ controller.enqueue(
3821
+ (0, import_ui_utils10.formatStreamPart)("finish_roundtrip", {
3822
+ finishReason: chunk.finishReason,
3823
+ usage: {
3824
+ promptTokens: chunk.usage.promptTokens,
3825
+ completionTokens: chunk.usage.completionTokens
3826
+ }
3827
+ })
3828
+ );
3829
+ break;
3636
3830
  case "finish":
3637
3831
  controller.enqueue(
3638
3832
  (0, import_ui_utils10.formatStreamPart)("finish_message", {
@@ -3734,7 +3928,6 @@ var DefaultStreamTextResult = class {
3734
3928
  });
3735
3929
  }
3736
3930
  };
3737
- var experimental_streamText = streamText;
3738
3931
 
3739
3932
  // core/prompt/attachments-to-parts.ts
3740
3933
  function attachmentsToParts(attachments) {
@@ -5164,17 +5357,15 @@ var nanoid = import_provider_utils8.generateId;
5164
5357
  experimental_createProviderRegistry,
5165
5358
  experimental_customProvider,
5166
5359
  experimental_generateObject,
5167
- experimental_generateText,
5168
5360
  experimental_streamObject,
5169
- experimental_streamText,
5170
5361
  formatStreamPart,
5171
5362
  generateId,
5172
5363
  generateObject,
5173
5364
  generateText,
5174
5365
  jsonSchema,
5175
5366
  nanoid,
5176
- parseComplexResponse,
5177
5367
  parseStreamPart,
5368
+ processDataProtocolResponse,
5178
5369
  readDataStream,
5179
5370
  readableFromAsyncIterable,
5180
5371
  streamObject,