ai 4.0.0-canary.10 → 4.0.0-canary.11

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js CHANGED
@@ -2338,21 +2338,6 @@ var DefaultGenerateObjectResult = class {
2338
2338
  var import_provider_utils6 = require("@ai-sdk/provider-utils");
2339
2339
  var import_ui_utils2 = require("@ai-sdk/ui-utils");
2340
2340
 
2341
- // util/create-resolvable-promise.ts
2342
- function createResolvablePromise() {
2343
- let resolve;
2344
- let reject;
2345
- const promise = new Promise((res, rej) => {
2346
- resolve = res;
2347
- reject = rej;
2348
- });
2349
- return {
2350
- promise,
2351
- resolve,
2352
- reject
2353
- };
2354
- }
2355
-
2356
2341
  // util/delayed-promise.ts
2357
2342
  var DelayedPromise = class {
2358
2343
  constructor() {
@@ -2444,9 +2429,91 @@ function writeToServerResponse({
2444
2429
  read();
2445
2430
  }
2446
2431
 
2432
+ // util/create-resolvable-promise.ts
2433
+ function createResolvablePromise() {
2434
+ let resolve;
2435
+ let reject;
2436
+ const promise = new Promise((res, rej) => {
2437
+ resolve = res;
2438
+ reject = rej;
2439
+ });
2440
+ return {
2441
+ promise,
2442
+ resolve,
2443
+ reject
2444
+ };
2445
+ }
2446
+
2447
+ // core/util/create-stitchable-stream.ts
2448
+ function createStitchableStream() {
2449
+ let innerStreamReaders = [];
2450
+ let controller = null;
2451
+ let isClosed = false;
2452
+ let waitForNewStream = createResolvablePromise();
2453
+ const processPull = async () => {
2454
+ if (isClosed && innerStreamReaders.length === 0) {
2455
+ controller == null ? void 0 : controller.close();
2456
+ return;
2457
+ }
2458
+ if (innerStreamReaders.length === 0) {
2459
+ waitForNewStream = createResolvablePromise();
2460
+ await waitForNewStream.promise;
2461
+ return processPull();
2462
+ }
2463
+ try {
2464
+ const { value, done } = await innerStreamReaders[0].read();
2465
+ if (done) {
2466
+ innerStreamReaders.shift();
2467
+ if (innerStreamReaders.length > 0) {
2468
+ await processPull();
2469
+ } else if (isClosed) {
2470
+ controller == null ? void 0 : controller.close();
2471
+ }
2472
+ } else {
2473
+ controller == null ? void 0 : controller.enqueue(value);
2474
+ }
2475
+ } catch (error) {
2476
+ controller == null ? void 0 : controller.error(error);
2477
+ innerStreamReaders.shift();
2478
+ if (isClosed && innerStreamReaders.length === 0) {
2479
+ controller == null ? void 0 : controller.close();
2480
+ }
2481
+ }
2482
+ };
2483
+ return {
2484
+ stream: new ReadableStream({
2485
+ start(controllerParam) {
2486
+ controller = controllerParam;
2487
+ },
2488
+ pull: processPull,
2489
+ async cancel() {
2490
+ for (const reader of innerStreamReaders) {
2491
+ await reader.cancel();
2492
+ }
2493
+ innerStreamReaders = [];
2494
+ isClosed = true;
2495
+ }
2496
+ }),
2497
+ addStream: (innerStream) => {
2498
+ if (isClosed) {
2499
+ throw new Error("Cannot add inner stream: outer stream is closed");
2500
+ }
2501
+ innerStreamReaders.push(innerStream.getReader());
2502
+ waitForNewStream.resolve();
2503
+ },
2504
+ close: () => {
2505
+ isClosed = true;
2506
+ waitForNewStream.resolve();
2507
+ if (innerStreamReaders.length === 0) {
2508
+ controller == null ? void 0 : controller.close();
2509
+ }
2510
+ }
2511
+ };
2512
+ }
2513
+
2447
2514
  // core/generate-object/stream-object.ts
2448
2515
  var originalGenerateId2 = (0, import_provider_utils6.createIdGenerator)({ prefix: "aiobj", size: 24 });
2449
- async function streamObject({
2516
+ function streamObject({
2450
2517
  model,
2451
2518
  schema: inputSchema,
2452
2519
  schemaName,
@@ -2480,400 +2547,433 @@ async function streamObject({
2480
2547
  if (outputStrategy.type === "no-schema" && mode === void 0) {
2481
2548
  mode = "json";
2482
2549
  }
2483
- const baseTelemetryAttributes = getBaseTelemetryAttributes({
2550
+ return new DefaultStreamObjectResult({
2484
2551
  model,
2485
2552
  telemetry,
2486
2553
  headers,
2487
- settings: { ...settings, maxRetries }
2488
- });
2489
- const tracer = getTracer(telemetry);
2490
- const retry = retryWithExponentialBackoff({ maxRetries });
2491
- return recordSpan({
2492
- name: "ai.streamObject",
2493
- attributes: selectTelemetryAttributes({
2494
- telemetry,
2495
- attributes: {
2496
- ...assembleOperationName({
2497
- operationId: "ai.streamObject",
2498
- telemetry
2499
- }),
2500
- ...baseTelemetryAttributes,
2501
- // specific settings that only make sense on the outer level:
2502
- "ai.prompt": {
2503
- input: () => JSON.stringify({ system, prompt, messages })
2504
- },
2505
- "ai.schema": outputStrategy.jsonSchema != null ? { input: () => JSON.stringify(outputStrategy.jsonSchema) } : void 0,
2506
- "ai.schema.name": schemaName,
2507
- "ai.schema.description": schemaDescription,
2508
- "ai.settings.output": outputStrategy.type,
2509
- "ai.settings.mode": mode
2510
- }
2511
- }),
2512
- tracer,
2513
- endWhenDone: false,
2514
- fn: async (rootSpan) => {
2515
- if (mode === "auto" || mode == null) {
2516
- mode = model.defaultObjectGenerationMode;
2517
- }
2518
- let callOptions;
2519
- let transformer;
2520
- switch (mode) {
2521
- case "json": {
2522
- const standardizedPrompt = standardizePrompt({
2523
- prompt: {
2524
- system: outputStrategy.jsonSchema == null ? injectJsonInstruction({ prompt: system }) : model.supportsStructuredOutputs ? system : injectJsonInstruction({
2525
- prompt: system,
2526
- schema: outputStrategy.jsonSchema
2527
- }),
2528
- prompt,
2529
- messages
2530
- },
2531
- tools: void 0
2532
- });
2533
- callOptions = {
2534
- mode: {
2535
- type: "object-json",
2536
- schema: outputStrategy.jsonSchema,
2537
- name: schemaName,
2538
- description: schemaDescription
2539
- },
2540
- ...prepareCallSettings(settings),
2541
- inputFormat: standardizedPrompt.type,
2542
- prompt: await convertToLanguageModelPrompt({
2543
- prompt: standardizedPrompt,
2544
- modelSupportsImageUrls: model.supportsImageUrls,
2545
- modelSupportsUrl: model.supportsUrl
2546
- }),
2547
- providerMetadata,
2548
- abortSignal,
2549
- headers
2550
- };
2551
- transformer = {
2552
- transform: (chunk, controller) => {
2553
- switch (chunk.type) {
2554
- case "text-delta":
2555
- controller.enqueue(chunk.textDelta);
2556
- break;
2557
- case "response-metadata":
2558
- case "finish":
2559
- case "error":
2560
- controller.enqueue(chunk);
2561
- break;
2562
- }
2563
- }
2564
- };
2565
- break;
2566
- }
2567
- case "tool": {
2568
- const standardizedPrompt = standardizePrompt({
2569
- prompt: { system, prompt, messages },
2570
- tools: void 0
2571
- });
2572
- callOptions = {
2573
- mode: {
2574
- type: "object-tool",
2575
- tool: {
2576
- type: "function",
2577
- name: schemaName != null ? schemaName : "json",
2578
- description: schemaDescription != null ? schemaDescription : "Respond with a JSON object.",
2579
- parameters: outputStrategy.jsonSchema
2580
- }
2581
- },
2582
- ...prepareCallSettings(settings),
2583
- inputFormat: standardizedPrompt.type,
2584
- prompt: await convertToLanguageModelPrompt({
2585
- prompt: standardizedPrompt,
2586
- modelSupportsImageUrls: model.supportsImageUrls,
2587
- modelSupportsUrl: model.supportsUrl
2588
- }),
2589
- providerMetadata,
2590
- abortSignal,
2591
- headers
2592
- };
2593
- transformer = {
2594
- transform(chunk, controller) {
2595
- switch (chunk.type) {
2596
- case "tool-call-delta":
2597
- controller.enqueue(chunk.argsTextDelta);
2598
- break;
2599
- case "response-metadata":
2600
- case "finish":
2601
- case "error":
2602
- controller.enqueue(chunk);
2603
- break;
2604
- }
2605
- }
2606
- };
2607
- break;
2608
- }
2609
- case void 0: {
2610
- throw new Error(
2611
- "Model does not have a default object generation mode."
2612
- );
2613
- }
2614
- default: {
2615
- const _exhaustiveCheck = mode;
2616
- throw new Error(`Unsupported mode: ${_exhaustiveCheck}`);
2617
- }
2618
- }
2619
- const {
2620
- result: { stream, warnings, rawResponse, request },
2621
- doStreamSpan,
2622
- startTimestampMs
2623
- } = await retry(
2624
- () => recordSpan({
2625
- name: "ai.streamObject.doStream",
2626
- attributes: selectTelemetryAttributes({
2627
- telemetry,
2628
- attributes: {
2629
- ...assembleOperationName({
2630
- operationId: "ai.streamObject.doStream",
2631
- telemetry
2632
- }),
2633
- ...baseTelemetryAttributes,
2634
- "ai.prompt.format": {
2635
- input: () => callOptions.inputFormat
2636
- },
2637
- "ai.prompt.messages": {
2638
- input: () => JSON.stringify(callOptions.prompt)
2639
- },
2640
- "ai.settings.mode": mode,
2641
- // standardized gen-ai llm span attributes:
2642
- "gen_ai.system": model.provider,
2643
- "gen_ai.request.model": model.modelId,
2644
- "gen_ai.request.frequency_penalty": settings.frequencyPenalty,
2645
- "gen_ai.request.max_tokens": settings.maxTokens,
2646
- "gen_ai.request.presence_penalty": settings.presencePenalty,
2647
- "gen_ai.request.temperature": settings.temperature,
2648
- "gen_ai.request.top_k": settings.topK,
2649
- "gen_ai.request.top_p": settings.topP
2650
- }
2651
- }),
2652
- tracer,
2653
- endWhenDone: false,
2654
- fn: async (doStreamSpan2) => ({
2655
- startTimestampMs: now2(),
2656
- doStreamSpan: doStreamSpan2,
2657
- result: await model.doStream(callOptions)
2658
- })
2659
- })
2660
- );
2661
- return new DefaultStreamObjectResult({
2662
- outputStrategy,
2663
- stream: stream.pipeThrough(new TransformStream(transformer)),
2664
- warnings,
2665
- rawResponse,
2666
- request: request != null ? request : {},
2667
- onFinish,
2668
- rootSpan,
2669
- doStreamSpan,
2670
- telemetry,
2671
- startTimestampMs,
2672
- modelId: model.modelId,
2673
- now: now2,
2674
- currentDate,
2675
- generateId: generateId3
2676
- });
2677
- }
2554
+ settings,
2555
+ maxRetries,
2556
+ abortSignal,
2557
+ outputStrategy,
2558
+ system,
2559
+ prompt,
2560
+ messages,
2561
+ schemaName,
2562
+ schemaDescription,
2563
+ inputProviderMetadata: providerMetadata,
2564
+ mode,
2565
+ onFinish,
2566
+ generateId: generateId3,
2567
+ currentDate,
2568
+ now: now2
2678
2569
  });
2679
2570
  }
2680
2571
  var DefaultStreamObjectResult = class {
2681
2572
  constructor({
2682
- stream,
2683
- warnings,
2684
- rawResponse,
2685
- request,
2573
+ model,
2574
+ headers,
2575
+ telemetry,
2576
+ settings,
2577
+ maxRetries,
2578
+ abortSignal,
2686
2579
  outputStrategy,
2580
+ system,
2581
+ prompt,
2582
+ messages,
2583
+ schemaName,
2584
+ schemaDescription,
2585
+ inputProviderMetadata,
2586
+ mode,
2687
2587
  onFinish,
2688
- rootSpan,
2689
- doStreamSpan,
2690
- telemetry,
2691
- startTimestampMs,
2692
- modelId,
2693
- now: now2,
2588
+ generateId: generateId3,
2694
2589
  currentDate,
2695
- generateId: generateId3
2590
+ now: now2
2696
2591
  }) {
2697
- this.warnings = warnings;
2698
- this.outputStrategy = outputStrategy;
2699
- this.request = Promise.resolve(request);
2700
2592
  this.objectPromise = new DelayedPromise();
2701
- const { resolve: resolveUsage, promise: usagePromise } = createResolvablePromise();
2702
- this.usage = usagePromise;
2703
- const { resolve: resolveResponse, promise: responsePromise } = createResolvablePromise();
2704
- this.response = responsePromise;
2705
- const {
2706
- resolve: resolveProviderMetadata,
2707
- promise: providerMetadataPromise
2708
- } = createResolvablePromise();
2709
- this.experimental_providerMetadata = providerMetadataPromise;
2710
- let usage;
2711
- let finishReason;
2712
- let providerMetadata;
2713
- let object;
2714
- let error;
2715
- let accumulatedText = "";
2716
- let textDelta = "";
2717
- let response = {
2718
- id: generateId3(),
2719
- timestamp: currentDate(),
2720
- modelId
2721
- };
2722
- let latestObjectJson = void 0;
2723
- let latestObject = void 0;
2724
- let isFirstChunk = true;
2725
- let isFirstDelta = true;
2593
+ this.usagePromise = new DelayedPromise();
2594
+ this.providerMetadataPromise = new DelayedPromise();
2595
+ this.warningsPromise = new DelayedPromise();
2596
+ this.requestPromise = new DelayedPromise();
2597
+ this.responsePromise = new DelayedPromise();
2598
+ this.stitchableStream = createStitchableStream();
2599
+ const baseTelemetryAttributes = getBaseTelemetryAttributes({
2600
+ model,
2601
+ telemetry,
2602
+ headers,
2603
+ settings: { ...settings, maxRetries }
2604
+ });
2605
+ const tracer = getTracer(telemetry);
2606
+ const retry = retryWithExponentialBackoff({ maxRetries });
2726
2607
  const self = this;
2727
- this.originalStream = stream.pipeThrough(
2728
- new TransformStream({
2729
- async transform(chunk, controller) {
2730
- var _a11, _b, _c;
2731
- if (isFirstChunk) {
2732
- const msToFirstChunk = now2() - startTimestampMs;
2733
- isFirstChunk = false;
2734
- doStreamSpan.addEvent("ai.stream.firstChunk", {
2735
- "ai.stream.msToFirstChunk": msToFirstChunk
2736
- });
2737
- doStreamSpan.setAttributes({
2738
- "ai.stream.msToFirstChunk": msToFirstChunk
2608
+ recordSpan({
2609
+ name: "ai.streamObject",
2610
+ attributes: selectTelemetryAttributes({
2611
+ telemetry,
2612
+ attributes: {
2613
+ ...assembleOperationName({
2614
+ operationId: "ai.streamObject",
2615
+ telemetry
2616
+ }),
2617
+ ...baseTelemetryAttributes,
2618
+ // specific settings that only make sense on the outer level:
2619
+ "ai.prompt": {
2620
+ input: () => JSON.stringify({ system, prompt, messages })
2621
+ },
2622
+ "ai.schema": outputStrategy.jsonSchema != null ? { input: () => JSON.stringify(outputStrategy.jsonSchema) } : void 0,
2623
+ "ai.schema.name": schemaName,
2624
+ "ai.schema.description": schemaDescription,
2625
+ "ai.settings.output": outputStrategy.type,
2626
+ "ai.settings.mode": mode
2627
+ }
2628
+ }),
2629
+ tracer,
2630
+ endWhenDone: false,
2631
+ fn: async (rootSpan) => {
2632
+ if (mode === "auto" || mode == null) {
2633
+ mode = model.defaultObjectGenerationMode;
2634
+ }
2635
+ let callOptions;
2636
+ let transformer;
2637
+ switch (mode) {
2638
+ case "json": {
2639
+ const standardizedPrompt = standardizePrompt({
2640
+ prompt: {
2641
+ system: outputStrategy.jsonSchema == null ? injectJsonInstruction({ prompt: system }) : model.supportsStructuredOutputs ? system : injectJsonInstruction({
2642
+ prompt: system,
2643
+ schema: outputStrategy.jsonSchema
2644
+ }),
2645
+ prompt,
2646
+ messages
2647
+ },
2648
+ tools: void 0
2739
2649
  });
2740
- }
2741
- if (typeof chunk === "string") {
2742
- accumulatedText += chunk;
2743
- textDelta += chunk;
2744
- const { value: currentObjectJson, state: parseState } = (0, import_ui_utils2.parsePartialJson)(accumulatedText);
2745
- if (currentObjectJson !== void 0 && !(0, import_ui_utils2.isDeepEqualData)(latestObjectJson, currentObjectJson)) {
2746
- const validationResult = outputStrategy.validatePartialResult({
2747
- value: currentObjectJson,
2748
- textDelta,
2749
- latestObject,
2750
- isFirstDelta,
2751
- isFinalDelta: parseState === "successful-parse"
2752
- });
2753
- if (validationResult.success && !(0, import_ui_utils2.isDeepEqualData)(latestObject, validationResult.value.partial)) {
2754
- latestObjectJson = currentObjectJson;
2755
- latestObject = validationResult.value.partial;
2756
- controller.enqueue({
2757
- type: "object",
2758
- object: latestObject
2759
- });
2760
- controller.enqueue({
2761
- type: "text-delta",
2762
- textDelta: validationResult.value.textDelta
2763
- });
2764
- textDelta = "";
2765
- isFirstDelta = false;
2766
- }
2767
- }
2768
- return;
2769
- }
2770
- switch (chunk.type) {
2771
- case "response-metadata": {
2772
- response = {
2773
- id: (_a11 = chunk.id) != null ? _a11 : response.id,
2774
- timestamp: (_b = chunk.timestamp) != null ? _b : response.timestamp,
2775
- modelId: (_c = chunk.modelId) != null ? _c : response.modelId
2776
- };
2777
- break;
2778
- }
2779
- case "finish": {
2780
- if (textDelta !== "") {
2781
- controller.enqueue({ type: "text-delta", textDelta });
2782
- }
2783
- finishReason = chunk.finishReason;
2784
- usage = calculateLanguageModelUsage(chunk.usage);
2785
- providerMetadata = chunk.providerMetadata;
2786
- controller.enqueue({ ...chunk, usage, response });
2787
- resolveUsage(usage);
2788
- resolveProviderMetadata(providerMetadata);
2789
- resolveResponse({
2790
- ...response,
2791
- headers: rawResponse == null ? void 0 : rawResponse.headers
2792
- });
2793
- const validationResult = outputStrategy.validateFinalResult(latestObjectJson);
2794
- if (validationResult.success) {
2795
- object = validationResult.value;
2796
- self.objectPromise.resolve(object);
2797
- } else {
2798
- error = validationResult.error;
2799
- self.objectPromise.reject(error);
2650
+ callOptions = {
2651
+ mode: {
2652
+ type: "object-json",
2653
+ schema: outputStrategy.jsonSchema,
2654
+ name: schemaName,
2655
+ description: schemaDescription
2656
+ },
2657
+ ...prepareCallSettings(settings),
2658
+ inputFormat: standardizedPrompt.type,
2659
+ prompt: await convertToLanguageModelPrompt({
2660
+ prompt: standardizedPrompt,
2661
+ modelSupportsImageUrls: model.supportsImageUrls,
2662
+ modelSupportsUrl: model.supportsUrl
2663
+ }),
2664
+ providerMetadata: inputProviderMetadata,
2665
+ abortSignal,
2666
+ headers
2667
+ };
2668
+ transformer = {
2669
+ transform: (chunk, controller) => {
2670
+ switch (chunk.type) {
2671
+ case "text-delta":
2672
+ controller.enqueue(chunk.textDelta);
2673
+ break;
2674
+ case "response-metadata":
2675
+ case "finish":
2676
+ case "error":
2677
+ controller.enqueue(chunk);
2678
+ break;
2679
+ }
2800
2680
  }
2801
- break;
2802
- }
2803
- default: {
2804
- controller.enqueue(chunk);
2805
- break;
2806
- }
2681
+ };
2682
+ break;
2807
2683
  }
2808
- },
2809
- // invoke onFinish callback and resolve toolResults promise when the stream is about to close:
2810
- async flush(controller) {
2811
- try {
2812
- const finalUsage = usage != null ? usage : {
2813
- promptTokens: NaN,
2814
- completionTokens: NaN,
2815
- totalTokens: NaN
2684
+ case "tool": {
2685
+ const standardizedPrompt = standardizePrompt({
2686
+ prompt: { system, prompt, messages },
2687
+ tools: void 0
2688
+ });
2689
+ callOptions = {
2690
+ mode: {
2691
+ type: "object-tool",
2692
+ tool: {
2693
+ type: "function",
2694
+ name: schemaName != null ? schemaName : "json",
2695
+ description: schemaDescription != null ? schemaDescription : "Respond with a JSON object.",
2696
+ parameters: outputStrategy.jsonSchema
2697
+ }
2698
+ },
2699
+ ...prepareCallSettings(settings),
2700
+ inputFormat: standardizedPrompt.type,
2701
+ prompt: await convertToLanguageModelPrompt({
2702
+ prompt: standardizedPrompt,
2703
+ modelSupportsImageUrls: model.supportsImageUrls,
2704
+ modelSupportsUrl: model.supportsUrl
2705
+ }),
2706
+ providerMetadata: inputProviderMetadata,
2707
+ abortSignal,
2708
+ headers
2816
2709
  };
2817
- doStreamSpan.setAttributes(
2818
- selectTelemetryAttributes({
2819
- telemetry,
2820
- attributes: {
2821
- "ai.response.finishReason": finishReason,
2822
- "ai.response.object": {
2823
- output: () => JSON.stringify(object)
2824
- },
2825
- "ai.response.id": response.id,
2826
- "ai.response.model": response.modelId,
2827
- "ai.response.timestamp": response.timestamp.toISOString(),
2828
- "ai.usage.promptTokens": finalUsage.promptTokens,
2829
- "ai.usage.completionTokens": finalUsage.completionTokens,
2830
- // standardized gen-ai llm span attributes:
2831
- "gen_ai.response.finish_reasons": [finishReason],
2832
- "gen_ai.response.id": response.id,
2833
- "gen_ai.response.model": response.modelId,
2834
- "gen_ai.usage.input_tokens": finalUsage.promptTokens,
2835
- "gen_ai.usage.output_tokens": finalUsage.completionTokens
2710
+ transformer = {
2711
+ transform(chunk, controller) {
2712
+ switch (chunk.type) {
2713
+ case "tool-call-delta":
2714
+ controller.enqueue(chunk.argsTextDelta);
2715
+ break;
2716
+ case "response-metadata":
2717
+ case "finish":
2718
+ case "error":
2719
+ controller.enqueue(chunk);
2720
+ break;
2836
2721
  }
2837
- })
2838
- );
2839
- doStreamSpan.end();
2840
- rootSpan.setAttributes(
2841
- selectTelemetryAttributes({
2842
- telemetry,
2843
- attributes: {
2844
- "ai.usage.promptTokens": finalUsage.promptTokens,
2845
- "ai.usage.completionTokens": finalUsage.completionTokens,
2846
- "ai.response.object": {
2847
- output: () => JSON.stringify(object)
2848
- }
2849
- }
2850
- })
2722
+ }
2723
+ };
2724
+ break;
2725
+ }
2726
+ case void 0: {
2727
+ throw new Error(
2728
+ "Model does not have a default object generation mode."
2851
2729
  );
2852
- await (onFinish == null ? void 0 : onFinish({
2853
- usage: finalUsage,
2854
- object,
2855
- error,
2856
- response: {
2857
- ...response,
2858
- headers: rawResponse == null ? void 0 : rawResponse.headers
2859
- },
2860
- warnings,
2861
- experimental_providerMetadata: providerMetadata
2862
- }));
2863
- } catch (error2) {
2864
- controller.error(error2);
2865
- } finally {
2866
- rootSpan.end();
2730
+ }
2731
+ default: {
2732
+ const _exhaustiveCheck = mode;
2733
+ throw new Error(`Unsupported mode: ${_exhaustiveCheck}`);
2867
2734
  }
2868
2735
  }
2869
- })
2870
- );
2736
+ const {
2737
+ result: { stream, warnings, rawResponse, request },
2738
+ doStreamSpan,
2739
+ startTimestampMs
2740
+ } = await retry(
2741
+ () => recordSpan({
2742
+ name: "ai.streamObject.doStream",
2743
+ attributes: selectTelemetryAttributes({
2744
+ telemetry,
2745
+ attributes: {
2746
+ ...assembleOperationName({
2747
+ operationId: "ai.streamObject.doStream",
2748
+ telemetry
2749
+ }),
2750
+ ...baseTelemetryAttributes,
2751
+ "ai.prompt.format": {
2752
+ input: () => callOptions.inputFormat
2753
+ },
2754
+ "ai.prompt.messages": {
2755
+ input: () => JSON.stringify(callOptions.prompt)
2756
+ },
2757
+ "ai.settings.mode": mode,
2758
+ // standardized gen-ai llm span attributes:
2759
+ "gen_ai.system": model.provider,
2760
+ "gen_ai.request.model": model.modelId,
2761
+ "gen_ai.request.frequency_penalty": settings.frequencyPenalty,
2762
+ "gen_ai.request.max_tokens": settings.maxTokens,
2763
+ "gen_ai.request.presence_penalty": settings.presencePenalty,
2764
+ "gen_ai.request.temperature": settings.temperature,
2765
+ "gen_ai.request.top_k": settings.topK,
2766
+ "gen_ai.request.top_p": settings.topP
2767
+ }
2768
+ }),
2769
+ tracer,
2770
+ endWhenDone: false,
2771
+ fn: async (doStreamSpan2) => ({
2772
+ startTimestampMs: now2(),
2773
+ doStreamSpan: doStreamSpan2,
2774
+ result: await model.doStream(callOptions)
2775
+ })
2776
+ })
2777
+ );
2778
+ self.requestPromise.resolve(request != null ? request : {});
2779
+ let usage;
2780
+ let finishReason;
2781
+ let providerMetadata;
2782
+ let object;
2783
+ let error;
2784
+ let accumulatedText = "";
2785
+ let textDelta = "";
2786
+ let response = {
2787
+ id: generateId3(),
2788
+ timestamp: currentDate(),
2789
+ modelId: model.modelId
2790
+ };
2791
+ let latestObjectJson = void 0;
2792
+ let latestObject = void 0;
2793
+ let isFirstChunk = true;
2794
+ let isFirstDelta = true;
2795
+ const transformedStream = stream.pipeThrough(new TransformStream(transformer)).pipeThrough(
2796
+ new TransformStream({
2797
+ async transform(chunk, controller) {
2798
+ var _a11, _b, _c;
2799
+ if (isFirstChunk) {
2800
+ const msToFirstChunk = now2() - startTimestampMs;
2801
+ isFirstChunk = false;
2802
+ doStreamSpan.addEvent("ai.stream.firstChunk", {
2803
+ "ai.stream.msToFirstChunk": msToFirstChunk
2804
+ });
2805
+ doStreamSpan.setAttributes({
2806
+ "ai.stream.msToFirstChunk": msToFirstChunk
2807
+ });
2808
+ }
2809
+ if (typeof chunk === "string") {
2810
+ accumulatedText += chunk;
2811
+ textDelta += chunk;
2812
+ const { value: currentObjectJson, state: parseState } = (0, import_ui_utils2.parsePartialJson)(accumulatedText);
2813
+ if (currentObjectJson !== void 0 && !(0, import_ui_utils2.isDeepEqualData)(latestObjectJson, currentObjectJson)) {
2814
+ const validationResult = outputStrategy.validatePartialResult({
2815
+ value: currentObjectJson,
2816
+ textDelta,
2817
+ latestObject,
2818
+ isFirstDelta,
2819
+ isFinalDelta: parseState === "successful-parse"
2820
+ });
2821
+ if (validationResult.success && !(0, import_ui_utils2.isDeepEqualData)(
2822
+ latestObject,
2823
+ validationResult.value.partial
2824
+ )) {
2825
+ latestObjectJson = currentObjectJson;
2826
+ latestObject = validationResult.value.partial;
2827
+ controller.enqueue({
2828
+ type: "object",
2829
+ object: latestObject
2830
+ });
2831
+ controller.enqueue({
2832
+ type: "text-delta",
2833
+ textDelta: validationResult.value.textDelta
2834
+ });
2835
+ textDelta = "";
2836
+ isFirstDelta = false;
2837
+ }
2838
+ }
2839
+ return;
2840
+ }
2841
+ switch (chunk.type) {
2842
+ case "response-metadata": {
2843
+ response = {
2844
+ id: (_a11 = chunk.id) != null ? _a11 : response.id,
2845
+ timestamp: (_b = chunk.timestamp) != null ? _b : response.timestamp,
2846
+ modelId: (_c = chunk.modelId) != null ? _c : response.modelId
2847
+ };
2848
+ break;
2849
+ }
2850
+ case "finish": {
2851
+ if (textDelta !== "") {
2852
+ controller.enqueue({ type: "text-delta", textDelta });
2853
+ }
2854
+ finishReason = chunk.finishReason;
2855
+ usage = calculateLanguageModelUsage(chunk.usage);
2856
+ providerMetadata = chunk.providerMetadata;
2857
+ controller.enqueue({ ...chunk, usage, response });
2858
+ self.usagePromise.resolve(usage);
2859
+ self.providerMetadataPromise.resolve(providerMetadata);
2860
+ self.responsePromise.resolve({
2861
+ ...response,
2862
+ headers: rawResponse == null ? void 0 : rawResponse.headers
2863
+ });
2864
+ const validationResult = outputStrategy.validateFinalResult(latestObjectJson);
2865
+ if (validationResult.success) {
2866
+ object = validationResult.value;
2867
+ self.objectPromise.resolve(object);
2868
+ } else {
2869
+ error = validationResult.error;
2870
+ self.objectPromise.reject(error);
2871
+ }
2872
+ break;
2873
+ }
2874
+ default: {
2875
+ controller.enqueue(chunk);
2876
+ break;
2877
+ }
2878
+ }
2879
+ },
2880
+ // invoke onFinish callback and resolve toolResults promise when the stream is about to close:
2881
+ async flush(controller) {
2882
+ try {
2883
+ const finalUsage = usage != null ? usage : {
2884
+ promptTokens: NaN,
2885
+ completionTokens: NaN,
2886
+ totalTokens: NaN
2887
+ };
2888
+ doStreamSpan.setAttributes(
2889
+ selectTelemetryAttributes({
2890
+ telemetry,
2891
+ attributes: {
2892
+ "ai.response.finishReason": finishReason,
2893
+ "ai.response.object": {
2894
+ output: () => JSON.stringify(object)
2895
+ },
2896
+ "ai.response.id": response.id,
2897
+ "ai.response.model": response.modelId,
2898
+ "ai.response.timestamp": response.timestamp.toISOString(),
2899
+ "ai.usage.promptTokens": finalUsage.promptTokens,
2900
+ "ai.usage.completionTokens": finalUsage.completionTokens,
2901
+ // standardized gen-ai llm span attributes:
2902
+ "gen_ai.response.finish_reasons": [finishReason],
2903
+ "gen_ai.response.id": response.id,
2904
+ "gen_ai.response.model": response.modelId,
2905
+ "gen_ai.usage.input_tokens": finalUsage.promptTokens,
2906
+ "gen_ai.usage.output_tokens": finalUsage.completionTokens
2907
+ }
2908
+ })
2909
+ );
2910
+ doStreamSpan.end();
2911
+ rootSpan.setAttributes(
2912
+ selectTelemetryAttributes({
2913
+ telemetry,
2914
+ attributes: {
2915
+ "ai.usage.promptTokens": finalUsage.promptTokens,
2916
+ "ai.usage.completionTokens": finalUsage.completionTokens,
2917
+ "ai.response.object": {
2918
+ output: () => JSON.stringify(object)
2919
+ }
2920
+ }
2921
+ })
2922
+ );
2923
+ await (onFinish == null ? void 0 : onFinish({
2924
+ usage: finalUsage,
2925
+ object,
2926
+ error,
2927
+ response: {
2928
+ ...response,
2929
+ headers: rawResponse == null ? void 0 : rawResponse.headers
2930
+ },
2931
+ warnings,
2932
+ experimental_providerMetadata: providerMetadata
2933
+ }));
2934
+ } catch (error2) {
2935
+ controller.error(error2);
2936
+ } finally {
2937
+ rootSpan.end();
2938
+ }
2939
+ }
2940
+ })
2941
+ );
2942
+ self.stitchableStream.addStream(transformedStream);
2943
+ }
2944
+ }).catch((error) => {
2945
+ self.stitchableStream.addStream(
2946
+ new ReadableStream({
2947
+ start(controller) {
2948
+ controller.error(error);
2949
+ }
2950
+ })
2951
+ );
2952
+ }).finally(() => {
2953
+ self.stitchableStream.close();
2954
+ });
2955
+ this.outputStrategy = outputStrategy;
2871
2956
  }
2872
2957
  get object() {
2873
2958
  return this.objectPromise.value;
2874
2959
  }
2960
+ get usage() {
2961
+ return this.usagePromise.value;
2962
+ }
2963
+ get experimental_providerMetadata() {
2964
+ return this.providerMetadataPromise.value;
2965
+ }
2966
+ get warnings() {
2967
+ return this.warningsPromise.value;
2968
+ }
2969
+ get request() {
2970
+ return this.requestPromise.value;
2971
+ }
2972
+ get response() {
2973
+ return this.responsePromise.value;
2974
+ }
2875
2975
  get partialObjectStream() {
2876
- return createAsyncIterableStream(this.originalStream, {
2976
+ return createAsyncIterableStream(this.stitchableStream.stream, {
2877
2977
  transform(chunk, controller) {
2878
2978
  switch (chunk.type) {
2879
2979
  case "object":
@@ -2894,10 +2994,12 @@ var DefaultStreamObjectResult = class {
2894
2994
  });
2895
2995
  }
2896
2996
  get elementStream() {
2897
- return this.outputStrategy.createElementStream(this.originalStream);
2997
+ return this.outputStrategy.createElementStream(
2998
+ this.stitchableStream.stream
2999
+ );
2898
3000
  }
2899
3001
  get textStream() {
2900
- return createAsyncIterableStream(this.originalStream, {
3002
+ return createAsyncIterableStream(this.stitchableStream.stream, {
2901
3003
  transform(chunk, controller) {
2902
3004
  switch (chunk.type) {
2903
3005
  case "text-delta":
@@ -2918,7 +3020,7 @@ var DefaultStreamObjectResult = class {
2918
3020
  });
2919
3021
  }
2920
3022
  get fullStream() {
2921
- return createAsyncIterableStream(this.originalStream, {
3023
+ return createAsyncIterableStream(this.stitchableStream.stream, {
2922
3024
  transform(chunk, controller) {
2923
3025
  controller.enqueue(chunk);
2924
3026
  }
@@ -3509,68 +3611,6 @@ var DefaultGenerateTextResult = class {
3509
3611
  var import_provider_utils9 = require("@ai-sdk/provider-utils");
3510
3612
  var import_ui_utils6 = require("@ai-sdk/ui-utils");
3511
3613
 
3512
- // core/util/create-stitchable-stream.ts
3513
- function createStitchableStream() {
3514
- let innerStreamReaders = [];
3515
- let controller = null;
3516
- let isClosed = false;
3517
- const processPull = async () => {
3518
- if (isClosed && innerStreamReaders.length === 0) {
3519
- controller == null ? void 0 : controller.close();
3520
- return;
3521
- }
3522
- if (innerStreamReaders.length === 0) {
3523
- return;
3524
- }
3525
- try {
3526
- const { value, done } = await innerStreamReaders[0].read();
3527
- if (done) {
3528
- innerStreamReaders.shift();
3529
- if (innerStreamReaders.length > 0) {
3530
- await processPull();
3531
- } else if (isClosed) {
3532
- controller == null ? void 0 : controller.close();
3533
- }
3534
- } else {
3535
- controller == null ? void 0 : controller.enqueue(value);
3536
- }
3537
- } catch (error) {
3538
- controller == null ? void 0 : controller.error(error);
3539
- innerStreamReaders.shift();
3540
- if (isClosed && innerStreamReaders.length === 0) {
3541
- controller == null ? void 0 : controller.close();
3542
- }
3543
- }
3544
- };
3545
- return {
3546
- stream: new ReadableStream({
3547
- start(controllerParam) {
3548
- controller = controllerParam;
3549
- },
3550
- pull: processPull,
3551
- async cancel() {
3552
- for (const reader of innerStreamReaders) {
3553
- await reader.cancel();
3554
- }
3555
- innerStreamReaders = [];
3556
- isClosed = true;
3557
- }
3558
- }),
3559
- addStream: (innerStream) => {
3560
- if (isClosed) {
3561
- throw new Error("Cannot add inner stream: outer stream is closed");
3562
- }
3563
- innerStreamReaders.push(innerStream.getReader());
3564
- },
3565
- close: () => {
3566
- isClosed = true;
3567
- if (innerStreamReaders.length === 0) {
3568
- controller == null ? void 0 : controller.close();
3569
- }
3570
- }
3571
- };
3572
- }
3573
-
3574
3614
  // core/util/merge-streams.ts
3575
3615
  function mergeStreams(stream1, stream2) {
3576
3616
  const reader1 = stream1.getReader();
@@ -3854,7 +3894,7 @@ function runToolsTransformation({
3854
3894
 
3855
3895
  // core/generate-text/stream-text.ts
3856
3896
  var originalGenerateId4 = (0, import_provider_utils9.createIdGenerator)({ prefix: "aitxt", size: 24 });
3857
- async function streamText({
3897
+ function streamText({
3858
3898
  model,
3859
3899
  tools,
3860
3900
  toolChoice,
@@ -3887,576 +3927,606 @@ async function streamText({
3887
3927
  message: "maxSteps must be at least 1"
3888
3928
  });
3889
3929
  }
3890
- const baseTelemetryAttributes = getBaseTelemetryAttributes({
3930
+ return new DefaultStreamTextResult({
3891
3931
  model,
3892
3932
  telemetry,
3893
3933
  headers,
3894
- settings: { ...settings, maxRetries }
3895
- });
3896
- const tracer = getTracer(telemetry);
3897
- const initialPrompt = standardizePrompt({
3898
- prompt: { system, prompt, messages },
3899
- tools
3934
+ settings,
3935
+ maxRetries,
3936
+ abortSignal,
3937
+ system,
3938
+ prompt,
3939
+ messages,
3940
+ tools,
3941
+ toolChoice,
3942
+ toolCallStreaming,
3943
+ activeTools,
3944
+ maxSteps,
3945
+ continueSteps,
3946
+ providerMetadata,
3947
+ onChunk,
3948
+ onFinish,
3949
+ onStepFinish,
3950
+ now: now2,
3951
+ currentDate,
3952
+ generateId: generateId3
3900
3953
  });
3901
- return recordSpan({
3902
- name: "ai.streamText",
3903
- attributes: selectTelemetryAttributes({
3954
+ }
3955
+ var DefaultStreamTextResult = class {
3956
+ constructor({
3957
+ model,
3958
+ telemetry,
3959
+ headers,
3960
+ settings,
3961
+ maxRetries,
3962
+ abortSignal,
3963
+ system,
3964
+ prompt,
3965
+ messages,
3966
+ tools,
3967
+ toolChoice,
3968
+ toolCallStreaming,
3969
+ activeTools,
3970
+ maxSteps,
3971
+ continueSteps,
3972
+ providerMetadata,
3973
+ onChunk,
3974
+ onFinish,
3975
+ onStepFinish,
3976
+ now: now2,
3977
+ currentDate,
3978
+ generateId: generateId3
3979
+ }) {
3980
+ this.warningsPromise = new DelayedPromise();
3981
+ this.usagePromise = new DelayedPromise();
3982
+ this.finishReasonPromise = new DelayedPromise();
3983
+ this.providerMetadataPromise = new DelayedPromise();
3984
+ this.textPromise = new DelayedPromise();
3985
+ this.toolCallsPromise = new DelayedPromise();
3986
+ this.toolResultsPromise = new DelayedPromise();
3987
+ this.requestPromise = new DelayedPromise();
3988
+ this.responsePromise = new DelayedPromise();
3989
+ this.stepsPromise = new DelayedPromise();
3990
+ this.stitchableStream = createStitchableStream();
3991
+ const tracer = getTracer(telemetry);
3992
+ const baseTelemetryAttributes = getBaseTelemetryAttributes({
3993
+ model,
3904
3994
  telemetry,
3905
- attributes: {
3906
- ...assembleOperationName({ operationId: "ai.streamText", telemetry }),
3907
- ...baseTelemetryAttributes,
3908
- // specific settings that only make sense on the outer level:
3909
- "ai.prompt": {
3910
- input: () => JSON.stringify({ system, prompt, messages })
3911
- },
3912
- "ai.settings.maxSteps": maxSteps
3913
- }
3914
- }),
3915
- tracer,
3916
- endWhenDone: false,
3917
- fn: async (rootSpan) => {
3918
- const retry = retryWithExponentialBackoff({ maxRetries });
3919
- const startStep = async ({
3920
- responseMessages
3921
- }) => {
3922
- const promptFormat = responseMessages.length === 0 ? initialPrompt.type : "messages";
3923
- const promptMessages = await convertToLanguageModelPrompt({
3924
- prompt: {
3925
- type: promptFormat,
3926
- system: initialPrompt.system,
3927
- messages: [...initialPrompt.messages, ...responseMessages]
3995
+ headers,
3996
+ settings: { ...settings, maxRetries }
3997
+ });
3998
+ const initialPrompt = standardizePrompt({
3999
+ prompt: { system, prompt, messages },
4000
+ tools
4001
+ });
4002
+ const self = this;
4003
+ const stepResults = [];
4004
+ recordSpan({
4005
+ name: "ai.streamText",
4006
+ attributes: selectTelemetryAttributes({
4007
+ telemetry,
4008
+ attributes: {
4009
+ ...assembleOperationName({ operationId: "ai.streamText", telemetry }),
4010
+ ...baseTelemetryAttributes,
4011
+ // specific settings that only make sense on the outer level:
4012
+ "ai.prompt": {
4013
+ input: () => JSON.stringify({ system, prompt, messages })
3928
4014
  },
3929
- modelSupportsImageUrls: model.supportsImageUrls,
3930
- modelSupportsUrl: model.supportsUrl
3931
- });
3932
- const mode = {
3933
- type: "regular",
3934
- ...prepareToolsAndToolChoice({ tools, toolChoice, activeTools })
3935
- };
3936
- const {
3937
- result: { stream: stream2, warnings: warnings2, rawResponse: rawResponse2, request: request2 },
3938
- doStreamSpan: doStreamSpan2,
3939
- startTimestampMs: startTimestampMs2
3940
- } = await retry(
3941
- () => recordSpan({
3942
- name: "ai.streamText.doStream",
3943
- attributes: selectTelemetryAttributes({
3944
- telemetry,
3945
- attributes: {
3946
- ...assembleOperationName({
3947
- operationId: "ai.streamText.doStream",
3948
- telemetry
3949
- }),
3950
- ...baseTelemetryAttributes,
3951
- "ai.prompt.format": {
3952
- input: () => promptFormat
3953
- },
3954
- "ai.prompt.messages": {
3955
- input: () => JSON.stringify(promptMessages)
3956
- },
3957
- "ai.prompt.tools": {
3958
- // convert the language model level tools:
3959
- input: () => {
3960
- var _a11;
3961
- return (_a11 = mode.tools) == null ? void 0 : _a11.map((tool2) => JSON.stringify(tool2));
3962
- }
3963
- },
3964
- "ai.prompt.toolChoice": {
3965
- input: () => mode.toolChoice != null ? JSON.stringify(mode.toolChoice) : void 0
3966
- },
3967
- // standardized gen-ai llm span attributes:
3968
- "gen_ai.system": model.provider,
3969
- "gen_ai.request.model": model.modelId,
3970
- "gen_ai.request.frequency_penalty": settings.frequencyPenalty,
3971
- "gen_ai.request.max_tokens": settings.maxTokens,
3972
- "gen_ai.request.presence_penalty": settings.presencePenalty,
3973
- "gen_ai.request.stop_sequences": settings.stopSequences,
3974
- "gen_ai.request.temperature": settings.temperature,
3975
- "gen_ai.request.top_k": settings.topK,
3976
- "gen_ai.request.top_p": settings.topP
3977
- }
3978
- }),
3979
- tracer,
3980
- endWhenDone: false,
3981
- fn: async (doStreamSpan3) => ({
3982
- startTimestampMs: now2(),
3983
- // get before the call
3984
- doStreamSpan: doStreamSpan3,
3985
- result: await model.doStream({
3986
- mode,
3987
- ...prepareCallSettings(settings),
3988
- inputFormat: promptFormat,
3989
- prompt: promptMessages,
3990
- providerMetadata,
3991
- abortSignal,
3992
- headers
4015
+ "ai.settings.maxSteps": maxSteps
4016
+ }
4017
+ }),
4018
+ tracer,
4019
+ endWhenDone: false,
4020
+ fn: async (rootSpan) => {
4021
+ const retry = retryWithExponentialBackoff({ maxRetries });
4022
+ const startStep = async ({
4023
+ responseMessages
4024
+ }) => {
4025
+ const promptFormat = responseMessages.length === 0 ? initialPrompt.type : "messages";
4026
+ const promptMessages = await convertToLanguageModelPrompt({
4027
+ prompt: {
4028
+ type: promptFormat,
4029
+ system: initialPrompt.system,
4030
+ messages: [...initialPrompt.messages, ...responseMessages]
4031
+ },
4032
+ modelSupportsImageUrls: model.supportsImageUrls,
4033
+ modelSupportsUrl: model.supportsUrl
4034
+ });
4035
+ const mode = {
4036
+ type: "regular",
4037
+ ...prepareToolsAndToolChoice({ tools, toolChoice, activeTools })
4038
+ };
4039
+ const {
4040
+ result: { stream: stream2, warnings: warnings2, rawResponse: rawResponse2, request: request2 },
4041
+ doStreamSpan: doStreamSpan2,
4042
+ startTimestampMs: startTimestampMs2
4043
+ } = await retry(
4044
+ () => recordSpan({
4045
+ name: "ai.streamText.doStream",
4046
+ attributes: selectTelemetryAttributes({
4047
+ telemetry,
4048
+ attributes: {
4049
+ ...assembleOperationName({
4050
+ operationId: "ai.streamText.doStream",
4051
+ telemetry
4052
+ }),
4053
+ ...baseTelemetryAttributes,
4054
+ "ai.prompt.format": {
4055
+ input: () => promptFormat
4056
+ },
4057
+ "ai.prompt.messages": {
4058
+ input: () => JSON.stringify(promptMessages)
4059
+ },
4060
+ "ai.prompt.tools": {
4061
+ // convert the language model level tools:
4062
+ input: () => {
4063
+ var _a11;
4064
+ return (_a11 = mode.tools) == null ? void 0 : _a11.map((tool2) => JSON.stringify(tool2));
4065
+ }
4066
+ },
4067
+ "ai.prompt.toolChoice": {
4068
+ input: () => mode.toolChoice != null ? JSON.stringify(mode.toolChoice) : void 0
4069
+ },
4070
+ // standardized gen-ai llm span attributes:
4071
+ "gen_ai.system": model.provider,
4072
+ "gen_ai.request.model": model.modelId,
4073
+ "gen_ai.request.frequency_penalty": settings.frequencyPenalty,
4074
+ "gen_ai.request.max_tokens": settings.maxTokens,
4075
+ "gen_ai.request.presence_penalty": settings.presencePenalty,
4076
+ "gen_ai.request.stop_sequences": settings.stopSequences,
4077
+ "gen_ai.request.temperature": settings.temperature,
4078
+ "gen_ai.request.top_k": settings.topK,
4079
+ "gen_ai.request.top_p": settings.topP
4080
+ }
4081
+ }),
4082
+ tracer,
4083
+ endWhenDone: false,
4084
+ fn: async (doStreamSpan3) => ({
4085
+ startTimestampMs: now2(),
4086
+ // get before the call
4087
+ doStreamSpan: doStreamSpan3,
4088
+ result: await model.doStream({
4089
+ mode,
4090
+ ...prepareCallSettings(settings),
4091
+ inputFormat: promptFormat,
4092
+ prompt: promptMessages,
4093
+ providerMetadata,
4094
+ abortSignal,
4095
+ headers
4096
+ })
3993
4097
  })
3994
4098
  })
3995
- })
3996
- );
3997
- return {
3998
- result: {
3999
- stream: runToolsTransformation({
4000
- tools,
4001
- generatorStream: stream2,
4002
- toolCallStreaming,
4003
- tracer,
4004
- telemetry,
4005
- abortSignal
4006
- }),
4007
- warnings: warnings2,
4008
- request: request2 != null ? request2 : {},
4009
- rawResponse: rawResponse2
4010
- },
4011
- doStreamSpan: doStreamSpan2,
4012
- startTimestampMs: startTimestampMs2
4099
+ );
4100
+ return {
4101
+ result: {
4102
+ stream: runToolsTransformation({
4103
+ tools,
4104
+ generatorStream: stream2,
4105
+ toolCallStreaming,
4106
+ tracer,
4107
+ telemetry,
4108
+ abortSignal
4109
+ }),
4110
+ warnings: warnings2,
4111
+ request: request2 != null ? request2 : {},
4112
+ rawResponse: rawResponse2
4113
+ },
4114
+ doStreamSpan: doStreamSpan2,
4115
+ startTimestampMs: startTimestampMs2
4116
+ };
4013
4117
  };
4014
- };
4015
- const {
4016
- result: { stream, warnings, rawResponse, request },
4017
- doStreamSpan,
4018
- startTimestampMs
4019
- } = await startStep({ responseMessages: [] });
4020
- return new DefaultStreamTextResult({
4021
- stream,
4022
- warnings,
4023
- rawResponse,
4024
- request,
4025
- onChunk,
4026
- onFinish,
4027
- onStepFinish,
4028
- rootSpan,
4029
- doStreamSpan,
4030
- telemetry,
4031
- startTimestampMs,
4032
- maxSteps,
4033
- continueSteps,
4034
- startStep,
4035
- modelId: model.modelId,
4036
- now: now2,
4037
- currentDate,
4038
- generateId: generateId3,
4039
- tools
4040
- });
4041
- }
4042
- });
4043
- }
4044
- var DefaultStreamTextResult = class {
4045
- constructor({
4046
- stream,
4047
- warnings,
4048
- rawResponse,
4049
- request,
4050
- onChunk,
4051
- onFinish,
4052
- onStepFinish,
4053
- rootSpan,
4054
- doStreamSpan,
4055
- telemetry,
4056
- startTimestampMs,
4057
- maxSteps,
4058
- continueSteps,
4059
- startStep,
4060
- modelId,
4061
- now: now2,
4062
- currentDate,
4063
- generateId: generateId3,
4064
- tools
4065
- }) {
4066
- this.rawWarnings = warnings;
4067
- this.rawResponse = rawResponse;
4068
- const { resolve: resolveUsage, promise: usagePromise } = createResolvablePromise();
4069
- this.usage = usagePromise;
4070
- const { resolve: resolveFinishReason, promise: finishReasonPromise } = createResolvablePromise();
4071
- this.finishReason = finishReasonPromise;
4072
- const { resolve: resolveText, promise: textPromise } = createResolvablePromise();
4073
- this.text = textPromise;
4074
- const { resolve: resolveToolCalls, promise: toolCallsPromise } = createResolvablePromise();
4075
- this.toolCalls = toolCallsPromise;
4076
- const { resolve: resolveToolResults, promise: toolResultsPromise } = createResolvablePromise();
4077
- this.toolResults = toolResultsPromise;
4078
- const { resolve: resolveSteps, promise: stepsPromise } = createResolvablePromise();
4079
- this.steps = stepsPromise;
4080
- const {
4081
- resolve: resolveProviderMetadata,
4082
- promise: providerMetadataPromise
4083
- } = createResolvablePromise();
4084
- this.experimental_providerMetadata = providerMetadataPromise;
4085
- const { resolve: resolveRequest, promise: requestPromise } = createResolvablePromise();
4086
- this.request = requestPromise;
4087
- const { resolve: resolveResponse, promise: responsePromise } = createResolvablePromise();
4088
- this.response = responsePromise;
4089
- const { resolve: resolveWarnings, promise: warningsPromise } = createResolvablePromise();
4090
- this.warnings = warningsPromise;
4091
- const {
4092
- stream: stitchableStream,
4093
- addStream,
4094
- close: closeStitchableStream
4095
- } = createStitchableStream();
4096
- this.originalStream = stitchableStream;
4097
- const stepResults = [];
4098
- const self = this;
4099
- function addStepStream({
4100
- stream: stream2,
4101
- startTimestamp,
4102
- doStreamSpan: doStreamSpan2,
4103
- currentStep,
4104
- responseMessages,
4105
- usage = {
4106
- promptTokens: 0,
4107
- completionTokens: 0,
4108
- totalTokens: 0
4109
- },
4110
- stepType,
4111
- previousStepText = "",
4112
- stepRequest,
4113
- hasLeadingWhitespace
4114
- }) {
4115
- const stepToolCalls = [];
4116
- const stepToolResults = [];
4117
- let stepFinishReason = "unknown";
4118
- let stepUsage = {
4119
- promptTokens: 0,
4120
- completionTokens: 0,
4121
- totalTokens: 0
4122
- };
4123
- let stepProviderMetadata;
4124
- let stepFirstChunk = true;
4125
- let stepText = "";
4126
- let fullStepText = stepType === "continue" ? previousStepText : "";
4127
- let stepLogProbs;
4128
- let stepResponse = {
4129
- id: generateId3(),
4130
- timestamp: currentDate(),
4131
- modelId
4132
- };
4133
- let chunkBuffer = "";
4134
- let chunkTextPublished = false;
4135
- let inWhitespacePrefix = true;
4136
- let hasWhitespaceSuffix = false;
4137
- async function publishTextChunk({
4138
- controller,
4139
- chunk
4140
- }) {
4141
- controller.enqueue(chunk);
4142
- stepText += chunk.textDelta;
4143
- fullStepText += chunk.textDelta;
4144
- chunkTextPublished = true;
4145
- hasWhitespaceSuffix = chunk.textDelta.trimEnd() !== chunk.textDelta;
4146
- await (onChunk == null ? void 0 : onChunk({ chunk }));
4147
- }
4148
- addStream(
4149
- stream2.pipeThrough(
4150
- new TransformStream({
4151
- async transform(chunk, controller) {
4152
- var _a11, _b, _c;
4153
- if (stepFirstChunk) {
4154
- const msToFirstChunk = now2() - startTimestamp;
4155
- stepFirstChunk = false;
4156
- doStreamSpan2.addEvent("ai.stream.firstChunk", {
4157
- "ai.response.msToFirstChunk": msToFirstChunk
4158
- });
4159
- doStreamSpan2.setAttributes({
4160
- "ai.response.msToFirstChunk": msToFirstChunk
4161
- });
4162
- }
4163
- if (chunk.type === "text-delta" && chunk.textDelta.length === 0) {
4164
- return;
4165
- }
4166
- const chunkType = chunk.type;
4167
- switch (chunkType) {
4168
- case "text-delta": {
4169
- if (continueSteps) {
4170
- const trimmedChunkText = inWhitespacePrefix && hasLeadingWhitespace ? chunk.textDelta.trimStart() : chunk.textDelta;
4171
- if (trimmedChunkText.length === 0) {
4118
+ const {
4119
+ result: { stream, warnings, rawResponse, request },
4120
+ doStreamSpan,
4121
+ startTimestampMs
4122
+ } = await startStep({ responseMessages: [] });
4123
+ function addStepStream({
4124
+ stream: stream2,
4125
+ startTimestamp,
4126
+ doStreamSpan: doStreamSpan2,
4127
+ currentStep,
4128
+ responseMessages,
4129
+ usage = {
4130
+ promptTokens: 0,
4131
+ completionTokens: 0,
4132
+ totalTokens: 0
4133
+ },
4134
+ stepType,
4135
+ previousStepText = "",
4136
+ stepRequest,
4137
+ hasLeadingWhitespace,
4138
+ warnings: warnings2,
4139
+ response
4140
+ }) {
4141
+ const stepToolCalls = [];
4142
+ const stepToolResults = [];
4143
+ let stepFinishReason = "unknown";
4144
+ let stepUsage = {
4145
+ promptTokens: 0,
4146
+ completionTokens: 0,
4147
+ totalTokens: 0
4148
+ };
4149
+ let stepProviderMetadata;
4150
+ let stepFirstChunk = true;
4151
+ let stepText = "";
4152
+ let fullStepText = stepType === "continue" ? previousStepText : "";
4153
+ let stepLogProbs;
4154
+ let stepResponse = {
4155
+ id: generateId3(),
4156
+ timestamp: currentDate(),
4157
+ modelId: model.modelId
4158
+ };
4159
+ let chunkBuffer = "";
4160
+ let chunkTextPublished = false;
4161
+ let inWhitespacePrefix = true;
4162
+ let hasWhitespaceSuffix = false;
4163
+ async function publishTextChunk({
4164
+ controller,
4165
+ chunk
4166
+ }) {
4167
+ controller.enqueue(chunk);
4168
+ stepText += chunk.textDelta;
4169
+ fullStepText += chunk.textDelta;
4170
+ chunkTextPublished = true;
4171
+ hasWhitespaceSuffix = chunk.textDelta.trimEnd() !== chunk.textDelta;
4172
+ await (onChunk == null ? void 0 : onChunk({ chunk }));
4173
+ }
4174
+ self.stitchableStream.addStream(
4175
+ stream2.pipeThrough(
4176
+ new TransformStream({
4177
+ async transform(chunk, controller) {
4178
+ var _a11, _b, _c;
4179
+ if (stepFirstChunk) {
4180
+ const msToFirstChunk = now2() - startTimestamp;
4181
+ stepFirstChunk = false;
4182
+ doStreamSpan2.addEvent("ai.stream.firstChunk", {
4183
+ "ai.response.msToFirstChunk": msToFirstChunk
4184
+ });
4185
+ doStreamSpan2.setAttributes({
4186
+ "ai.response.msToFirstChunk": msToFirstChunk
4187
+ });
4188
+ }
4189
+ if (chunk.type === "text-delta" && chunk.textDelta.length === 0) {
4190
+ return;
4191
+ }
4192
+ const chunkType = chunk.type;
4193
+ switch (chunkType) {
4194
+ case "text-delta": {
4195
+ if (continueSteps) {
4196
+ const trimmedChunkText = inWhitespacePrefix && hasLeadingWhitespace ? chunk.textDelta.trimStart() : chunk.textDelta;
4197
+ if (trimmedChunkText.length === 0) {
4198
+ break;
4199
+ }
4200
+ inWhitespacePrefix = false;
4201
+ chunkBuffer += trimmedChunkText;
4202
+ const split = splitOnLastWhitespace(chunkBuffer);
4203
+ if (split != null) {
4204
+ chunkBuffer = split.suffix;
4205
+ await publishTextChunk({
4206
+ controller,
4207
+ chunk: {
4208
+ type: "text-delta",
4209
+ textDelta: split.prefix + split.whitespace
4210
+ }
4211
+ });
4212
+ }
4213
+ } else {
4214
+ await publishTextChunk({ controller, chunk });
4215
+ }
4172
4216
  break;
4173
4217
  }
4174
- inWhitespacePrefix = false;
4175
- chunkBuffer += trimmedChunkText;
4176
- const split = splitOnLastWhitespace(chunkBuffer);
4177
- if (split != null) {
4178
- chunkBuffer = split.suffix;
4179
- await publishTextChunk({
4180
- controller,
4181
- chunk: {
4182
- type: "text-delta",
4183
- textDelta: split.prefix + split.whitespace
4184
- }
4218
+ case "tool-call": {
4219
+ controller.enqueue(chunk);
4220
+ stepToolCalls.push(chunk);
4221
+ await (onChunk == null ? void 0 : onChunk({ chunk }));
4222
+ break;
4223
+ }
4224
+ case "tool-result": {
4225
+ controller.enqueue(chunk);
4226
+ stepToolResults.push(chunk);
4227
+ await (onChunk == null ? void 0 : onChunk({ chunk }));
4228
+ break;
4229
+ }
4230
+ case "response-metadata": {
4231
+ stepResponse = {
4232
+ id: (_a11 = chunk.id) != null ? _a11 : stepResponse.id,
4233
+ timestamp: (_b = chunk.timestamp) != null ? _b : stepResponse.timestamp,
4234
+ modelId: (_c = chunk.modelId) != null ? _c : stepResponse.modelId
4235
+ };
4236
+ break;
4237
+ }
4238
+ case "finish": {
4239
+ stepUsage = chunk.usage;
4240
+ stepFinishReason = chunk.finishReason;
4241
+ stepProviderMetadata = chunk.experimental_providerMetadata;
4242
+ stepLogProbs = chunk.logprobs;
4243
+ const msToFinish = now2() - startTimestamp;
4244
+ doStreamSpan2.addEvent("ai.stream.finish");
4245
+ doStreamSpan2.setAttributes({
4246
+ "ai.response.msToFinish": msToFinish,
4247
+ "ai.response.avgCompletionTokensPerSecond": 1e3 * stepUsage.completionTokens / msToFinish
4185
4248
  });
4249
+ break;
4186
4250
  }
4187
- } else {
4188
- await publishTextChunk({ controller, chunk });
4189
- }
4190
- break;
4191
- }
4192
- case "tool-call": {
4193
- controller.enqueue(chunk);
4194
- stepToolCalls.push(chunk);
4195
- await (onChunk == null ? void 0 : onChunk({ chunk }));
4196
- break;
4197
- }
4198
- case "tool-result": {
4199
- controller.enqueue(chunk);
4200
- stepToolResults.push(chunk);
4201
- await (onChunk == null ? void 0 : onChunk({ chunk }));
4202
- break;
4203
- }
4204
- case "response-metadata": {
4205
- stepResponse = {
4206
- id: (_a11 = chunk.id) != null ? _a11 : stepResponse.id,
4207
- timestamp: (_b = chunk.timestamp) != null ? _b : stepResponse.timestamp,
4208
- modelId: (_c = chunk.modelId) != null ? _c : stepResponse.modelId
4209
- };
4210
- break;
4211
- }
4212
- case "finish": {
4213
- stepUsage = chunk.usage;
4214
- stepFinishReason = chunk.finishReason;
4215
- stepProviderMetadata = chunk.experimental_providerMetadata;
4216
- stepLogProbs = chunk.logprobs;
4217
- const msToFinish = now2() - startTimestamp;
4218
- doStreamSpan2.addEvent("ai.stream.finish");
4219
- doStreamSpan2.setAttributes({
4220
- "ai.response.msToFinish": msToFinish,
4221
- "ai.response.avgCompletionTokensPerSecond": 1e3 * stepUsage.completionTokens / msToFinish
4222
- });
4223
- break;
4224
- }
4225
- case "tool-call-streaming-start":
4226
- case "tool-call-delta": {
4227
- controller.enqueue(chunk);
4228
- await (onChunk == null ? void 0 : onChunk({ chunk }));
4229
- break;
4230
- }
4231
- case "error": {
4232
- controller.enqueue(chunk);
4233
- stepFinishReason = "error";
4234
- break;
4235
- }
4236
- default: {
4237
- const exhaustiveCheck = chunkType;
4238
- throw new Error(`Unknown chunk type: ${exhaustiveCheck}`);
4239
- }
4240
- }
4241
- },
4242
- // invoke onFinish callback and resolve toolResults promise when the stream is about to close:
4243
- async flush(controller) {
4244
- var _a11, _b;
4245
- const stepToolCallsJson = stepToolCalls.length > 0 ? JSON.stringify(stepToolCalls) : void 0;
4246
- let nextStepType = "done";
4247
- if (currentStep + 1 < maxSteps) {
4248
- if (continueSteps && stepFinishReason === "length" && // only use continue when there are no tool calls:
4249
- stepToolCalls.length === 0) {
4250
- nextStepType = "continue";
4251
- } else if (
4252
- // there are tool calls:
4253
- stepToolCalls.length > 0 && // all current tool calls have results:
4254
- stepToolResults.length === stepToolCalls.length
4255
- ) {
4256
- nextStepType = "tool-result";
4257
- }
4258
- }
4259
- if (continueSteps && chunkBuffer.length > 0 && (nextStepType !== "continue" || // when the next step is a regular step, publish the buffer
4260
- stepType === "continue" && !chunkTextPublished)) {
4261
- await publishTextChunk({
4262
- controller,
4263
- chunk: {
4264
- type: "text-delta",
4265
- textDelta: chunkBuffer
4266
- }
4267
- });
4268
- chunkBuffer = "";
4269
- }
4270
- try {
4271
- doStreamSpan2.setAttributes(
4272
- selectTelemetryAttributes({
4273
- telemetry,
4274
- attributes: {
4275
- "ai.response.finishReason": stepFinishReason,
4276
- "ai.response.text": { output: () => stepText },
4277
- "ai.response.toolCalls": {
4278
- output: () => stepToolCallsJson
4279
- },
4280
- "ai.response.id": stepResponse.id,
4281
- "ai.response.model": stepResponse.modelId,
4282
- "ai.response.timestamp": stepResponse.timestamp.toISOString(),
4283
- "ai.usage.promptTokens": stepUsage.promptTokens,
4284
- "ai.usage.completionTokens": stepUsage.completionTokens,
4285
- // standardized gen-ai llm span attributes:
4286
- "gen_ai.response.finish_reasons": [stepFinishReason],
4287
- "gen_ai.response.id": stepResponse.id,
4288
- "gen_ai.response.model": stepResponse.modelId,
4289
- "gen_ai.usage.input_tokens": stepUsage.promptTokens,
4290
- "gen_ai.usage.output_tokens": stepUsage.completionTokens
4251
+ case "tool-call-streaming-start":
4252
+ case "tool-call-delta": {
4253
+ controller.enqueue(chunk);
4254
+ await (onChunk == null ? void 0 : onChunk({ chunk }));
4255
+ break;
4291
4256
  }
4292
- })
4293
- );
4294
- } catch (error) {
4295
- } finally {
4296
- doStreamSpan2.end();
4297
- }
4298
- controller.enqueue({
4299
- type: "step-finish",
4300
- finishReason: stepFinishReason,
4301
- usage: stepUsage,
4302
- experimental_providerMetadata: stepProviderMetadata,
4303
- logprobs: stepLogProbs,
4304
- response: {
4305
- ...stepResponse
4257
+ case "error": {
4258
+ controller.enqueue(chunk);
4259
+ stepFinishReason = "error";
4260
+ break;
4261
+ }
4262
+ default: {
4263
+ const exhaustiveCheck = chunkType;
4264
+ throw new Error(`Unknown chunk type: ${exhaustiveCheck}`);
4265
+ }
4266
+ }
4306
4267
  },
4307
- isContinued: nextStepType === "continue"
4308
- });
4309
- if (stepType === "continue") {
4310
- const lastMessage = responseMessages[responseMessages.length - 1];
4311
- if (typeof lastMessage.content === "string") {
4312
- lastMessage.content += stepText;
4313
- } else {
4314
- lastMessage.content.push({
4315
- text: stepText,
4316
- type: "text"
4268
+ // invoke onFinish callback and resolve toolResults promise when the stream is about to close:
4269
+ async flush(controller) {
4270
+ const stepToolCallsJson = stepToolCalls.length > 0 ? JSON.stringify(stepToolCalls) : void 0;
4271
+ let nextStepType = "done";
4272
+ if (currentStep + 1 < maxSteps) {
4273
+ if (continueSteps && stepFinishReason === "length" && // only use continue when there are no tool calls:
4274
+ stepToolCalls.length === 0) {
4275
+ nextStepType = "continue";
4276
+ } else if (
4277
+ // there are tool calls:
4278
+ stepToolCalls.length > 0 && // all current tool calls have results:
4279
+ stepToolResults.length === stepToolCalls.length
4280
+ ) {
4281
+ nextStepType = "tool-result";
4282
+ }
4283
+ }
4284
+ if (continueSteps && chunkBuffer.length > 0 && (nextStepType !== "continue" || // when the next step is a regular step, publish the buffer
4285
+ stepType === "continue" && !chunkTextPublished)) {
4286
+ await publishTextChunk({
4287
+ controller,
4288
+ chunk: {
4289
+ type: "text-delta",
4290
+ textDelta: chunkBuffer
4291
+ }
4292
+ });
4293
+ chunkBuffer = "";
4294
+ }
4295
+ try {
4296
+ doStreamSpan2.setAttributes(
4297
+ selectTelemetryAttributes({
4298
+ telemetry,
4299
+ attributes: {
4300
+ "ai.response.finishReason": stepFinishReason,
4301
+ "ai.response.text": { output: () => stepText },
4302
+ "ai.response.toolCalls": {
4303
+ output: () => stepToolCallsJson
4304
+ },
4305
+ "ai.response.id": stepResponse.id,
4306
+ "ai.response.model": stepResponse.modelId,
4307
+ "ai.response.timestamp": stepResponse.timestamp.toISOString(),
4308
+ "ai.usage.promptTokens": stepUsage.promptTokens,
4309
+ "ai.usage.completionTokens": stepUsage.completionTokens,
4310
+ // standardized gen-ai llm span attributes:
4311
+ "gen_ai.response.finish_reasons": [stepFinishReason],
4312
+ "gen_ai.response.id": stepResponse.id,
4313
+ "gen_ai.response.model": stepResponse.modelId,
4314
+ "gen_ai.usage.input_tokens": stepUsage.promptTokens,
4315
+ "gen_ai.usage.output_tokens": stepUsage.completionTokens
4316
+ }
4317
+ })
4318
+ );
4319
+ } catch (error) {
4320
+ } finally {
4321
+ doStreamSpan2.end();
4322
+ }
4323
+ controller.enqueue({
4324
+ type: "step-finish",
4325
+ finishReason: stepFinishReason,
4326
+ usage: stepUsage,
4327
+ experimental_providerMetadata: stepProviderMetadata,
4328
+ logprobs: stepLogProbs,
4329
+ response: {
4330
+ ...stepResponse
4331
+ },
4332
+ isContinued: nextStepType === "continue"
4317
4333
  });
4318
- }
4319
- } else {
4320
- responseMessages.push(
4321
- ...toResponseMessages({
4334
+ if (stepType === "continue") {
4335
+ const lastMessage = responseMessages[responseMessages.length - 1];
4336
+ if (typeof lastMessage.content === "string") {
4337
+ lastMessage.content += stepText;
4338
+ } else {
4339
+ lastMessage.content.push({
4340
+ text: stepText,
4341
+ type: "text"
4342
+ });
4343
+ }
4344
+ } else {
4345
+ responseMessages.push(
4346
+ ...toResponseMessages({
4347
+ text: stepText,
4348
+ tools: tools != null ? tools : {},
4349
+ toolCalls: stepToolCalls,
4350
+ toolResults: stepToolResults
4351
+ })
4352
+ );
4353
+ }
4354
+ const currentStepResult = {
4355
+ stepType,
4322
4356
  text: stepText,
4323
- tools: tools != null ? tools : {},
4324
4357
  toolCalls: stepToolCalls,
4325
- toolResults: stepToolResults
4326
- })
4327
- );
4328
- }
4329
- const currentStepResult = {
4330
- stepType,
4331
- text: stepText,
4332
- toolCalls: stepToolCalls,
4333
- toolResults: stepToolResults,
4334
- finishReason: stepFinishReason,
4335
- usage: stepUsage,
4336
- warnings: self.rawWarnings,
4337
- logprobs: stepLogProbs,
4338
- request: stepRequest,
4339
- response: {
4340
- ...stepResponse,
4341
- headers: (_a11 = self.rawResponse) == null ? void 0 : _a11.headers,
4342
- // deep clone msgs to avoid mutating past messages in multi-step:
4343
- messages: JSON.parse(JSON.stringify(responseMessages))
4344
- },
4345
- experimental_providerMetadata: stepProviderMetadata,
4346
- isContinued: nextStepType === "continue"
4347
- };
4348
- stepResults.push(currentStepResult);
4349
- await (onStepFinish == null ? void 0 : onStepFinish(currentStepResult));
4350
- const combinedUsage = {
4351
- promptTokens: usage.promptTokens + stepUsage.promptTokens,
4352
- completionTokens: usage.completionTokens + stepUsage.completionTokens,
4353
- totalTokens: usage.totalTokens + stepUsage.totalTokens
4354
- };
4355
- if (nextStepType !== "done") {
4356
- const {
4357
- result,
4358
- doStreamSpan: doStreamSpan3,
4359
- startTimestampMs: startTimestamp2
4360
- } = await startStep({ responseMessages });
4361
- self.rawWarnings = result.warnings;
4362
- self.rawResponse = result.rawResponse;
4363
- addStepStream({
4364
- stream: result.stream,
4365
- startTimestamp: startTimestamp2,
4366
- doStreamSpan: doStreamSpan3,
4367
- currentStep: currentStep + 1,
4368
- responseMessages,
4369
- usage: combinedUsage,
4370
- stepType: nextStepType,
4371
- previousStepText: fullStepText,
4372
- stepRequest: result.request,
4373
- hasLeadingWhitespace: hasWhitespaceSuffix
4374
- });
4375
- return;
4376
- }
4377
- try {
4378
- controller.enqueue({
4379
- type: "finish",
4380
- finishReason: stepFinishReason,
4381
- usage: combinedUsage,
4382
- experimental_providerMetadata: stepProviderMetadata,
4383
- logprobs: stepLogProbs,
4384
- response: {
4385
- ...stepResponse
4358
+ toolResults: stepToolResults,
4359
+ finishReason: stepFinishReason,
4360
+ usage: stepUsage,
4361
+ warnings: warnings2,
4362
+ logprobs: stepLogProbs,
4363
+ request: stepRequest,
4364
+ response: {
4365
+ ...stepResponse,
4366
+ headers: response == null ? void 0 : response.headers,
4367
+ // deep clone msgs to avoid mutating past messages in multi-step:
4368
+ messages: JSON.parse(JSON.stringify(responseMessages))
4369
+ },
4370
+ experimental_providerMetadata: stepProviderMetadata,
4371
+ isContinued: nextStepType === "continue"
4372
+ };
4373
+ stepResults.push(currentStepResult);
4374
+ await (onStepFinish == null ? void 0 : onStepFinish(currentStepResult));
4375
+ const combinedUsage = {
4376
+ promptTokens: usage.promptTokens + stepUsage.promptTokens,
4377
+ completionTokens: usage.completionTokens + stepUsage.completionTokens,
4378
+ totalTokens: usage.totalTokens + stepUsage.totalTokens
4379
+ };
4380
+ if (nextStepType !== "done") {
4381
+ const {
4382
+ result,
4383
+ doStreamSpan: doStreamSpan3,
4384
+ startTimestampMs: startTimestamp2
4385
+ } = await startStep({ responseMessages });
4386
+ warnings2 = result.warnings;
4387
+ response = result.rawResponse;
4388
+ addStepStream({
4389
+ stream: result.stream,
4390
+ startTimestamp: startTimestamp2,
4391
+ doStreamSpan: doStreamSpan3,
4392
+ currentStep: currentStep + 1,
4393
+ responseMessages,
4394
+ usage: combinedUsage,
4395
+ stepType: nextStepType,
4396
+ previousStepText: fullStepText,
4397
+ stepRequest: result.request,
4398
+ hasLeadingWhitespace: hasWhitespaceSuffix,
4399
+ warnings: warnings2,
4400
+ response
4401
+ });
4402
+ return;
4386
4403
  }
4387
- });
4388
- closeStitchableStream();
4389
- rootSpan.setAttributes(
4390
- selectTelemetryAttributes({
4391
- telemetry,
4392
- attributes: {
4393
- "ai.response.finishReason": stepFinishReason,
4394
- "ai.response.text": { output: () => fullStepText },
4395
- "ai.response.toolCalls": {
4396
- output: () => stepToolCallsJson
4404
+ try {
4405
+ controller.enqueue({
4406
+ type: "finish",
4407
+ finishReason: stepFinishReason,
4408
+ usage: combinedUsage,
4409
+ experimental_providerMetadata: stepProviderMetadata,
4410
+ logprobs: stepLogProbs,
4411
+ response: {
4412
+ ...stepResponse
4413
+ }
4414
+ });
4415
+ self.stitchableStream.close();
4416
+ rootSpan.setAttributes(
4417
+ selectTelemetryAttributes({
4418
+ telemetry,
4419
+ attributes: {
4420
+ "ai.response.finishReason": stepFinishReason,
4421
+ "ai.response.text": { output: () => fullStepText },
4422
+ "ai.response.toolCalls": {
4423
+ output: () => stepToolCallsJson
4424
+ },
4425
+ "ai.usage.promptTokens": combinedUsage.promptTokens,
4426
+ "ai.usage.completionTokens": combinedUsage.completionTokens
4427
+ }
4428
+ })
4429
+ );
4430
+ self.usagePromise.resolve(combinedUsage);
4431
+ self.finishReasonPromise.resolve(stepFinishReason);
4432
+ self.textPromise.resolve(fullStepText);
4433
+ self.toolCallsPromise.resolve(stepToolCalls);
4434
+ self.providerMetadataPromise.resolve(stepProviderMetadata);
4435
+ self.toolResultsPromise.resolve(stepToolResults);
4436
+ self.requestPromise.resolve(stepRequest);
4437
+ self.responsePromise.resolve({
4438
+ ...stepResponse,
4439
+ headers: rawResponse == null ? void 0 : rawResponse.headers,
4440
+ messages: responseMessages
4441
+ });
4442
+ self.stepsPromise.resolve(stepResults);
4443
+ self.warningsPromise.resolve(warnings2 != null ? warnings2 : []);
4444
+ await (onFinish == null ? void 0 : onFinish({
4445
+ finishReason: stepFinishReason,
4446
+ logprobs: stepLogProbs,
4447
+ usage: combinedUsage,
4448
+ text: fullStepText,
4449
+ toolCalls: stepToolCalls,
4450
+ // The tool results are inferred as a never[] type, because they are
4451
+ // optional and the execute method with an inferred result type is
4452
+ // optional as well. Therefore we need to cast the toolResults to any.
4453
+ // The type exposed to the users will be correctly inferred.
4454
+ toolResults: stepToolResults,
4455
+ request: stepRequest,
4456
+ response: {
4457
+ ...stepResponse,
4458
+ headers: rawResponse == null ? void 0 : rawResponse.headers,
4459
+ messages: responseMessages
4397
4460
  },
4398
- "ai.usage.promptTokens": combinedUsage.promptTokens,
4399
- "ai.usage.completionTokens": combinedUsage.completionTokens
4400
- }
4401
- })
4402
- );
4403
- resolveUsage(combinedUsage);
4404
- resolveFinishReason(stepFinishReason);
4405
- resolveText(fullStepText);
4406
- resolveToolCalls(stepToolCalls);
4407
- resolveProviderMetadata(stepProviderMetadata);
4408
- resolveToolResults(stepToolResults);
4409
- resolveRequest(stepRequest);
4410
- resolveResponse({
4411
- ...stepResponse,
4412
- headers: rawResponse == null ? void 0 : rawResponse.headers,
4413
- messages: responseMessages
4414
- });
4415
- resolveSteps(stepResults);
4416
- resolveWarnings((_b = self.rawWarnings) != null ? _b : []);
4417
- await (onFinish == null ? void 0 : onFinish({
4418
- finishReason: stepFinishReason,
4419
- logprobs: stepLogProbs,
4420
- usage: combinedUsage,
4421
- text: fullStepText,
4422
- toolCalls: stepToolCalls,
4423
- // The tool results are inferred as a never[] type, because they are
4424
- // optional and the execute method with an inferred result type is
4425
- // optional as well. Therefore we need to cast the toolResults to any.
4426
- // The type exposed to the users will be correctly inferred.
4427
- toolResults: stepToolResults,
4428
- request: stepRequest,
4429
- response: {
4430
- ...stepResponse,
4431
- headers: rawResponse == null ? void 0 : rawResponse.headers,
4432
- messages: responseMessages
4433
- },
4434
- warnings,
4435
- experimental_providerMetadata: stepProviderMetadata,
4436
- steps: stepResults
4437
- }));
4438
- } catch (error) {
4439
- controller.error(error);
4440
- } finally {
4441
- rootSpan.end();
4442
- }
4443
- }
4444
- })
4445
- )
4461
+ warnings: warnings2,
4462
+ experimental_providerMetadata: stepProviderMetadata,
4463
+ steps: stepResults
4464
+ }));
4465
+ } catch (error) {
4466
+ controller.error(error);
4467
+ } finally {
4468
+ rootSpan.end();
4469
+ }
4470
+ }
4471
+ })
4472
+ )
4473
+ );
4474
+ }
4475
+ addStepStream({
4476
+ stream,
4477
+ startTimestamp: startTimestampMs,
4478
+ doStreamSpan,
4479
+ currentStep: 0,
4480
+ responseMessages: [],
4481
+ usage: void 0,
4482
+ stepType: "initial",
4483
+ stepRequest: request,
4484
+ hasLeadingWhitespace: false,
4485
+ warnings,
4486
+ response: rawResponse
4487
+ });
4488
+ }
4489
+ }).catch((error) => {
4490
+ self.stitchableStream.addStream(
4491
+ new ReadableStream({
4492
+ start(controller) {
4493
+ controller.error(error);
4494
+ }
4495
+ })
4446
4496
  );
4447
- }
4448
- addStepStream({
4449
- stream,
4450
- startTimestamp: startTimestampMs,
4451
- doStreamSpan,
4452
- currentStep: 0,
4453
- responseMessages: [],
4454
- usage: void 0,
4455
- stepType: "initial",
4456
- stepRequest: request,
4457
- hasLeadingWhitespace: false
4497
+ self.stitchableStream.close();
4458
4498
  });
4459
4499
  }
4500
+ get warnings() {
4501
+ return this.warningsPromise.value;
4502
+ }
4503
+ get usage() {
4504
+ return this.usagePromise.value;
4505
+ }
4506
+ get finishReason() {
4507
+ return this.finishReasonPromise.value;
4508
+ }
4509
+ get experimental_providerMetadata() {
4510
+ return this.providerMetadataPromise.value;
4511
+ }
4512
+ get text() {
4513
+ return this.textPromise.value;
4514
+ }
4515
+ get toolCalls() {
4516
+ return this.toolCallsPromise.value;
4517
+ }
4518
+ get toolResults() {
4519
+ return this.toolResultsPromise.value;
4520
+ }
4521
+ get request() {
4522
+ return this.requestPromise.value;
4523
+ }
4524
+ get response() {
4525
+ return this.responsePromise.value;
4526
+ }
4527
+ get steps() {
4528
+ return this.stepsPromise.value;
4529
+ }
4460
4530
  /**
4461
4531
  Split out a new stream from the original stream.
4462
4532
  The original stream is replaced to allow for further splitting,
@@ -4466,8 +4536,8 @@ var DefaultStreamTextResult = class {
4466
4536
  However, the LLM results are expected to be small enough to not cause issues.
4467
4537
  */
4468
4538
  teeStream() {
4469
- const [stream1, stream2] = this.originalStream.tee();
4470
- this.originalStream = stream2;
4539
+ const [stream1, stream2] = this.stitchableStream.stream.tee();
4540
+ this.stitchableStream.stream = stream2;
4471
4541
  return stream1;
4472
4542
  }
4473
4543
  get textStream() {