ai 4.0.0-canary.10 → 4.0.0-canary.11

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.mjs CHANGED
@@ -2293,21 +2293,6 @@ import {
2293
2293
  parsePartialJson
2294
2294
  } from "@ai-sdk/ui-utils";
2295
2295
 
2296
- // util/create-resolvable-promise.ts
2297
- function createResolvablePromise() {
2298
- let resolve;
2299
- let reject;
2300
- const promise = new Promise((res, rej) => {
2301
- resolve = res;
2302
- reject = rej;
2303
- });
2304
- return {
2305
- promise,
2306
- resolve,
2307
- reject
2308
- };
2309
- }
2310
-
2311
2296
  // util/delayed-promise.ts
2312
2297
  var DelayedPromise = class {
2313
2298
  constructor() {
@@ -2399,9 +2384,91 @@ function writeToServerResponse({
2399
2384
  read();
2400
2385
  }
2401
2386
 
2387
+ // util/create-resolvable-promise.ts
2388
+ function createResolvablePromise() {
2389
+ let resolve;
2390
+ let reject;
2391
+ const promise = new Promise((res, rej) => {
2392
+ resolve = res;
2393
+ reject = rej;
2394
+ });
2395
+ return {
2396
+ promise,
2397
+ resolve,
2398
+ reject
2399
+ };
2400
+ }
2401
+
2402
+ // core/util/create-stitchable-stream.ts
2403
+ function createStitchableStream() {
2404
+ let innerStreamReaders = [];
2405
+ let controller = null;
2406
+ let isClosed = false;
2407
+ let waitForNewStream = createResolvablePromise();
2408
+ const processPull = async () => {
2409
+ if (isClosed && innerStreamReaders.length === 0) {
2410
+ controller == null ? void 0 : controller.close();
2411
+ return;
2412
+ }
2413
+ if (innerStreamReaders.length === 0) {
2414
+ waitForNewStream = createResolvablePromise();
2415
+ await waitForNewStream.promise;
2416
+ return processPull();
2417
+ }
2418
+ try {
2419
+ const { value, done } = await innerStreamReaders[0].read();
2420
+ if (done) {
2421
+ innerStreamReaders.shift();
2422
+ if (innerStreamReaders.length > 0) {
2423
+ await processPull();
2424
+ } else if (isClosed) {
2425
+ controller == null ? void 0 : controller.close();
2426
+ }
2427
+ } else {
2428
+ controller == null ? void 0 : controller.enqueue(value);
2429
+ }
2430
+ } catch (error) {
2431
+ controller == null ? void 0 : controller.error(error);
2432
+ innerStreamReaders.shift();
2433
+ if (isClosed && innerStreamReaders.length === 0) {
2434
+ controller == null ? void 0 : controller.close();
2435
+ }
2436
+ }
2437
+ };
2438
+ return {
2439
+ stream: new ReadableStream({
2440
+ start(controllerParam) {
2441
+ controller = controllerParam;
2442
+ },
2443
+ pull: processPull,
2444
+ async cancel() {
2445
+ for (const reader of innerStreamReaders) {
2446
+ await reader.cancel();
2447
+ }
2448
+ innerStreamReaders = [];
2449
+ isClosed = true;
2450
+ }
2451
+ }),
2452
+ addStream: (innerStream) => {
2453
+ if (isClosed) {
2454
+ throw new Error("Cannot add inner stream: outer stream is closed");
2455
+ }
2456
+ innerStreamReaders.push(innerStream.getReader());
2457
+ waitForNewStream.resolve();
2458
+ },
2459
+ close: () => {
2460
+ isClosed = true;
2461
+ waitForNewStream.resolve();
2462
+ if (innerStreamReaders.length === 0) {
2463
+ controller == null ? void 0 : controller.close();
2464
+ }
2465
+ }
2466
+ };
2467
+ }
2468
+
2402
2469
  // core/generate-object/stream-object.ts
2403
2470
  var originalGenerateId2 = createIdGenerator2({ prefix: "aiobj", size: 24 });
2404
- async function streamObject({
2471
+ function streamObject({
2405
2472
  model,
2406
2473
  schema: inputSchema,
2407
2474
  schemaName,
@@ -2435,400 +2502,433 @@ async function streamObject({
2435
2502
  if (outputStrategy.type === "no-schema" && mode === void 0) {
2436
2503
  mode = "json";
2437
2504
  }
2438
- const baseTelemetryAttributes = getBaseTelemetryAttributes({
2505
+ return new DefaultStreamObjectResult({
2439
2506
  model,
2440
2507
  telemetry,
2441
2508
  headers,
2442
- settings: { ...settings, maxRetries }
2443
- });
2444
- const tracer = getTracer(telemetry);
2445
- const retry = retryWithExponentialBackoff({ maxRetries });
2446
- return recordSpan({
2447
- name: "ai.streamObject",
2448
- attributes: selectTelemetryAttributes({
2449
- telemetry,
2450
- attributes: {
2451
- ...assembleOperationName({
2452
- operationId: "ai.streamObject",
2453
- telemetry
2454
- }),
2455
- ...baseTelemetryAttributes,
2456
- // specific settings that only make sense on the outer level:
2457
- "ai.prompt": {
2458
- input: () => JSON.stringify({ system, prompt, messages })
2459
- },
2460
- "ai.schema": outputStrategy.jsonSchema != null ? { input: () => JSON.stringify(outputStrategy.jsonSchema) } : void 0,
2461
- "ai.schema.name": schemaName,
2462
- "ai.schema.description": schemaDescription,
2463
- "ai.settings.output": outputStrategy.type,
2464
- "ai.settings.mode": mode
2465
- }
2466
- }),
2467
- tracer,
2468
- endWhenDone: false,
2469
- fn: async (rootSpan) => {
2470
- if (mode === "auto" || mode == null) {
2471
- mode = model.defaultObjectGenerationMode;
2472
- }
2473
- let callOptions;
2474
- let transformer;
2475
- switch (mode) {
2476
- case "json": {
2477
- const standardizedPrompt = standardizePrompt({
2478
- prompt: {
2479
- system: outputStrategy.jsonSchema == null ? injectJsonInstruction({ prompt: system }) : model.supportsStructuredOutputs ? system : injectJsonInstruction({
2480
- prompt: system,
2481
- schema: outputStrategy.jsonSchema
2482
- }),
2483
- prompt,
2484
- messages
2485
- },
2486
- tools: void 0
2487
- });
2488
- callOptions = {
2489
- mode: {
2490
- type: "object-json",
2491
- schema: outputStrategy.jsonSchema,
2492
- name: schemaName,
2493
- description: schemaDescription
2494
- },
2495
- ...prepareCallSettings(settings),
2496
- inputFormat: standardizedPrompt.type,
2497
- prompt: await convertToLanguageModelPrompt({
2498
- prompt: standardizedPrompt,
2499
- modelSupportsImageUrls: model.supportsImageUrls,
2500
- modelSupportsUrl: model.supportsUrl
2501
- }),
2502
- providerMetadata,
2503
- abortSignal,
2504
- headers
2505
- };
2506
- transformer = {
2507
- transform: (chunk, controller) => {
2508
- switch (chunk.type) {
2509
- case "text-delta":
2510
- controller.enqueue(chunk.textDelta);
2511
- break;
2512
- case "response-metadata":
2513
- case "finish":
2514
- case "error":
2515
- controller.enqueue(chunk);
2516
- break;
2517
- }
2518
- }
2519
- };
2520
- break;
2521
- }
2522
- case "tool": {
2523
- const standardizedPrompt = standardizePrompt({
2524
- prompt: { system, prompt, messages },
2525
- tools: void 0
2526
- });
2527
- callOptions = {
2528
- mode: {
2529
- type: "object-tool",
2530
- tool: {
2531
- type: "function",
2532
- name: schemaName != null ? schemaName : "json",
2533
- description: schemaDescription != null ? schemaDescription : "Respond with a JSON object.",
2534
- parameters: outputStrategy.jsonSchema
2535
- }
2536
- },
2537
- ...prepareCallSettings(settings),
2538
- inputFormat: standardizedPrompt.type,
2539
- prompt: await convertToLanguageModelPrompt({
2540
- prompt: standardizedPrompt,
2541
- modelSupportsImageUrls: model.supportsImageUrls,
2542
- modelSupportsUrl: model.supportsUrl
2543
- }),
2544
- providerMetadata,
2545
- abortSignal,
2546
- headers
2547
- };
2548
- transformer = {
2549
- transform(chunk, controller) {
2550
- switch (chunk.type) {
2551
- case "tool-call-delta":
2552
- controller.enqueue(chunk.argsTextDelta);
2553
- break;
2554
- case "response-metadata":
2555
- case "finish":
2556
- case "error":
2557
- controller.enqueue(chunk);
2558
- break;
2559
- }
2560
- }
2561
- };
2562
- break;
2563
- }
2564
- case void 0: {
2565
- throw new Error(
2566
- "Model does not have a default object generation mode."
2567
- );
2568
- }
2569
- default: {
2570
- const _exhaustiveCheck = mode;
2571
- throw new Error(`Unsupported mode: ${_exhaustiveCheck}`);
2572
- }
2573
- }
2574
- const {
2575
- result: { stream, warnings, rawResponse, request },
2576
- doStreamSpan,
2577
- startTimestampMs
2578
- } = await retry(
2579
- () => recordSpan({
2580
- name: "ai.streamObject.doStream",
2581
- attributes: selectTelemetryAttributes({
2582
- telemetry,
2583
- attributes: {
2584
- ...assembleOperationName({
2585
- operationId: "ai.streamObject.doStream",
2586
- telemetry
2587
- }),
2588
- ...baseTelemetryAttributes,
2589
- "ai.prompt.format": {
2590
- input: () => callOptions.inputFormat
2591
- },
2592
- "ai.prompt.messages": {
2593
- input: () => JSON.stringify(callOptions.prompt)
2594
- },
2595
- "ai.settings.mode": mode,
2596
- // standardized gen-ai llm span attributes:
2597
- "gen_ai.system": model.provider,
2598
- "gen_ai.request.model": model.modelId,
2599
- "gen_ai.request.frequency_penalty": settings.frequencyPenalty,
2600
- "gen_ai.request.max_tokens": settings.maxTokens,
2601
- "gen_ai.request.presence_penalty": settings.presencePenalty,
2602
- "gen_ai.request.temperature": settings.temperature,
2603
- "gen_ai.request.top_k": settings.topK,
2604
- "gen_ai.request.top_p": settings.topP
2605
- }
2606
- }),
2607
- tracer,
2608
- endWhenDone: false,
2609
- fn: async (doStreamSpan2) => ({
2610
- startTimestampMs: now2(),
2611
- doStreamSpan: doStreamSpan2,
2612
- result: await model.doStream(callOptions)
2613
- })
2614
- })
2615
- );
2616
- return new DefaultStreamObjectResult({
2617
- outputStrategy,
2618
- stream: stream.pipeThrough(new TransformStream(transformer)),
2619
- warnings,
2620
- rawResponse,
2621
- request: request != null ? request : {},
2622
- onFinish,
2623
- rootSpan,
2624
- doStreamSpan,
2625
- telemetry,
2626
- startTimestampMs,
2627
- modelId: model.modelId,
2628
- now: now2,
2629
- currentDate,
2630
- generateId: generateId3
2631
- });
2632
- }
2509
+ settings,
2510
+ maxRetries,
2511
+ abortSignal,
2512
+ outputStrategy,
2513
+ system,
2514
+ prompt,
2515
+ messages,
2516
+ schemaName,
2517
+ schemaDescription,
2518
+ inputProviderMetadata: providerMetadata,
2519
+ mode,
2520
+ onFinish,
2521
+ generateId: generateId3,
2522
+ currentDate,
2523
+ now: now2
2633
2524
  });
2634
2525
  }
2635
2526
  var DefaultStreamObjectResult = class {
2636
2527
  constructor({
2637
- stream,
2638
- warnings,
2639
- rawResponse,
2640
- request,
2528
+ model,
2529
+ headers,
2530
+ telemetry,
2531
+ settings,
2532
+ maxRetries,
2533
+ abortSignal,
2641
2534
  outputStrategy,
2535
+ system,
2536
+ prompt,
2537
+ messages,
2538
+ schemaName,
2539
+ schemaDescription,
2540
+ inputProviderMetadata,
2541
+ mode,
2642
2542
  onFinish,
2643
- rootSpan,
2644
- doStreamSpan,
2645
- telemetry,
2646
- startTimestampMs,
2647
- modelId,
2648
- now: now2,
2543
+ generateId: generateId3,
2649
2544
  currentDate,
2650
- generateId: generateId3
2545
+ now: now2
2651
2546
  }) {
2652
- this.warnings = warnings;
2653
- this.outputStrategy = outputStrategy;
2654
- this.request = Promise.resolve(request);
2655
2547
  this.objectPromise = new DelayedPromise();
2656
- const { resolve: resolveUsage, promise: usagePromise } = createResolvablePromise();
2657
- this.usage = usagePromise;
2658
- const { resolve: resolveResponse, promise: responsePromise } = createResolvablePromise();
2659
- this.response = responsePromise;
2660
- const {
2661
- resolve: resolveProviderMetadata,
2662
- promise: providerMetadataPromise
2663
- } = createResolvablePromise();
2664
- this.experimental_providerMetadata = providerMetadataPromise;
2665
- let usage;
2666
- let finishReason;
2667
- let providerMetadata;
2668
- let object;
2669
- let error;
2670
- let accumulatedText = "";
2671
- let textDelta = "";
2672
- let response = {
2673
- id: generateId3(),
2674
- timestamp: currentDate(),
2675
- modelId
2676
- };
2677
- let latestObjectJson = void 0;
2678
- let latestObject = void 0;
2679
- let isFirstChunk = true;
2680
- let isFirstDelta = true;
2548
+ this.usagePromise = new DelayedPromise();
2549
+ this.providerMetadataPromise = new DelayedPromise();
2550
+ this.warningsPromise = new DelayedPromise();
2551
+ this.requestPromise = new DelayedPromise();
2552
+ this.responsePromise = new DelayedPromise();
2553
+ this.stitchableStream = createStitchableStream();
2554
+ const baseTelemetryAttributes = getBaseTelemetryAttributes({
2555
+ model,
2556
+ telemetry,
2557
+ headers,
2558
+ settings: { ...settings, maxRetries }
2559
+ });
2560
+ const tracer = getTracer(telemetry);
2561
+ const retry = retryWithExponentialBackoff({ maxRetries });
2681
2562
  const self = this;
2682
- this.originalStream = stream.pipeThrough(
2683
- new TransformStream({
2684
- async transform(chunk, controller) {
2685
- var _a11, _b, _c;
2686
- if (isFirstChunk) {
2687
- const msToFirstChunk = now2() - startTimestampMs;
2688
- isFirstChunk = false;
2689
- doStreamSpan.addEvent("ai.stream.firstChunk", {
2690
- "ai.stream.msToFirstChunk": msToFirstChunk
2691
- });
2692
- doStreamSpan.setAttributes({
2693
- "ai.stream.msToFirstChunk": msToFirstChunk
2563
+ recordSpan({
2564
+ name: "ai.streamObject",
2565
+ attributes: selectTelemetryAttributes({
2566
+ telemetry,
2567
+ attributes: {
2568
+ ...assembleOperationName({
2569
+ operationId: "ai.streamObject",
2570
+ telemetry
2571
+ }),
2572
+ ...baseTelemetryAttributes,
2573
+ // specific settings that only make sense on the outer level:
2574
+ "ai.prompt": {
2575
+ input: () => JSON.stringify({ system, prompt, messages })
2576
+ },
2577
+ "ai.schema": outputStrategy.jsonSchema != null ? { input: () => JSON.stringify(outputStrategy.jsonSchema) } : void 0,
2578
+ "ai.schema.name": schemaName,
2579
+ "ai.schema.description": schemaDescription,
2580
+ "ai.settings.output": outputStrategy.type,
2581
+ "ai.settings.mode": mode
2582
+ }
2583
+ }),
2584
+ tracer,
2585
+ endWhenDone: false,
2586
+ fn: async (rootSpan) => {
2587
+ if (mode === "auto" || mode == null) {
2588
+ mode = model.defaultObjectGenerationMode;
2589
+ }
2590
+ let callOptions;
2591
+ let transformer;
2592
+ switch (mode) {
2593
+ case "json": {
2594
+ const standardizedPrompt = standardizePrompt({
2595
+ prompt: {
2596
+ system: outputStrategy.jsonSchema == null ? injectJsonInstruction({ prompt: system }) : model.supportsStructuredOutputs ? system : injectJsonInstruction({
2597
+ prompt: system,
2598
+ schema: outputStrategy.jsonSchema
2599
+ }),
2600
+ prompt,
2601
+ messages
2602
+ },
2603
+ tools: void 0
2694
2604
  });
2695
- }
2696
- if (typeof chunk === "string") {
2697
- accumulatedText += chunk;
2698
- textDelta += chunk;
2699
- const { value: currentObjectJson, state: parseState } = parsePartialJson(accumulatedText);
2700
- if (currentObjectJson !== void 0 && !isDeepEqualData(latestObjectJson, currentObjectJson)) {
2701
- const validationResult = outputStrategy.validatePartialResult({
2702
- value: currentObjectJson,
2703
- textDelta,
2704
- latestObject,
2705
- isFirstDelta,
2706
- isFinalDelta: parseState === "successful-parse"
2707
- });
2708
- if (validationResult.success && !isDeepEqualData(latestObject, validationResult.value.partial)) {
2709
- latestObjectJson = currentObjectJson;
2710
- latestObject = validationResult.value.partial;
2711
- controller.enqueue({
2712
- type: "object",
2713
- object: latestObject
2714
- });
2715
- controller.enqueue({
2716
- type: "text-delta",
2717
- textDelta: validationResult.value.textDelta
2718
- });
2719
- textDelta = "";
2720
- isFirstDelta = false;
2721
- }
2722
- }
2723
- return;
2724
- }
2725
- switch (chunk.type) {
2726
- case "response-metadata": {
2727
- response = {
2728
- id: (_a11 = chunk.id) != null ? _a11 : response.id,
2729
- timestamp: (_b = chunk.timestamp) != null ? _b : response.timestamp,
2730
- modelId: (_c = chunk.modelId) != null ? _c : response.modelId
2731
- };
2732
- break;
2733
- }
2734
- case "finish": {
2735
- if (textDelta !== "") {
2736
- controller.enqueue({ type: "text-delta", textDelta });
2737
- }
2738
- finishReason = chunk.finishReason;
2739
- usage = calculateLanguageModelUsage(chunk.usage);
2740
- providerMetadata = chunk.providerMetadata;
2741
- controller.enqueue({ ...chunk, usage, response });
2742
- resolveUsage(usage);
2743
- resolveProviderMetadata(providerMetadata);
2744
- resolveResponse({
2745
- ...response,
2746
- headers: rawResponse == null ? void 0 : rawResponse.headers
2747
- });
2748
- const validationResult = outputStrategy.validateFinalResult(latestObjectJson);
2749
- if (validationResult.success) {
2750
- object = validationResult.value;
2751
- self.objectPromise.resolve(object);
2752
- } else {
2753
- error = validationResult.error;
2754
- self.objectPromise.reject(error);
2605
+ callOptions = {
2606
+ mode: {
2607
+ type: "object-json",
2608
+ schema: outputStrategy.jsonSchema,
2609
+ name: schemaName,
2610
+ description: schemaDescription
2611
+ },
2612
+ ...prepareCallSettings(settings),
2613
+ inputFormat: standardizedPrompt.type,
2614
+ prompt: await convertToLanguageModelPrompt({
2615
+ prompt: standardizedPrompt,
2616
+ modelSupportsImageUrls: model.supportsImageUrls,
2617
+ modelSupportsUrl: model.supportsUrl
2618
+ }),
2619
+ providerMetadata: inputProviderMetadata,
2620
+ abortSignal,
2621
+ headers
2622
+ };
2623
+ transformer = {
2624
+ transform: (chunk, controller) => {
2625
+ switch (chunk.type) {
2626
+ case "text-delta":
2627
+ controller.enqueue(chunk.textDelta);
2628
+ break;
2629
+ case "response-metadata":
2630
+ case "finish":
2631
+ case "error":
2632
+ controller.enqueue(chunk);
2633
+ break;
2634
+ }
2755
2635
  }
2756
- break;
2757
- }
2758
- default: {
2759
- controller.enqueue(chunk);
2760
- break;
2761
- }
2636
+ };
2637
+ break;
2762
2638
  }
2763
- },
2764
- // invoke onFinish callback and resolve toolResults promise when the stream is about to close:
2765
- async flush(controller) {
2766
- try {
2767
- const finalUsage = usage != null ? usage : {
2768
- promptTokens: NaN,
2769
- completionTokens: NaN,
2770
- totalTokens: NaN
2639
+ case "tool": {
2640
+ const standardizedPrompt = standardizePrompt({
2641
+ prompt: { system, prompt, messages },
2642
+ tools: void 0
2643
+ });
2644
+ callOptions = {
2645
+ mode: {
2646
+ type: "object-tool",
2647
+ tool: {
2648
+ type: "function",
2649
+ name: schemaName != null ? schemaName : "json",
2650
+ description: schemaDescription != null ? schemaDescription : "Respond with a JSON object.",
2651
+ parameters: outputStrategy.jsonSchema
2652
+ }
2653
+ },
2654
+ ...prepareCallSettings(settings),
2655
+ inputFormat: standardizedPrompt.type,
2656
+ prompt: await convertToLanguageModelPrompt({
2657
+ prompt: standardizedPrompt,
2658
+ modelSupportsImageUrls: model.supportsImageUrls,
2659
+ modelSupportsUrl: model.supportsUrl
2660
+ }),
2661
+ providerMetadata: inputProviderMetadata,
2662
+ abortSignal,
2663
+ headers
2771
2664
  };
2772
- doStreamSpan.setAttributes(
2773
- selectTelemetryAttributes({
2774
- telemetry,
2775
- attributes: {
2776
- "ai.response.finishReason": finishReason,
2777
- "ai.response.object": {
2778
- output: () => JSON.stringify(object)
2779
- },
2780
- "ai.response.id": response.id,
2781
- "ai.response.model": response.modelId,
2782
- "ai.response.timestamp": response.timestamp.toISOString(),
2783
- "ai.usage.promptTokens": finalUsage.promptTokens,
2784
- "ai.usage.completionTokens": finalUsage.completionTokens,
2785
- // standardized gen-ai llm span attributes:
2786
- "gen_ai.response.finish_reasons": [finishReason],
2787
- "gen_ai.response.id": response.id,
2788
- "gen_ai.response.model": response.modelId,
2789
- "gen_ai.usage.input_tokens": finalUsage.promptTokens,
2790
- "gen_ai.usage.output_tokens": finalUsage.completionTokens
2665
+ transformer = {
2666
+ transform(chunk, controller) {
2667
+ switch (chunk.type) {
2668
+ case "tool-call-delta":
2669
+ controller.enqueue(chunk.argsTextDelta);
2670
+ break;
2671
+ case "response-metadata":
2672
+ case "finish":
2673
+ case "error":
2674
+ controller.enqueue(chunk);
2675
+ break;
2791
2676
  }
2792
- })
2793
- );
2794
- doStreamSpan.end();
2795
- rootSpan.setAttributes(
2796
- selectTelemetryAttributes({
2797
- telemetry,
2798
- attributes: {
2799
- "ai.usage.promptTokens": finalUsage.promptTokens,
2800
- "ai.usage.completionTokens": finalUsage.completionTokens,
2801
- "ai.response.object": {
2802
- output: () => JSON.stringify(object)
2803
- }
2804
- }
2805
- })
2677
+ }
2678
+ };
2679
+ break;
2680
+ }
2681
+ case void 0: {
2682
+ throw new Error(
2683
+ "Model does not have a default object generation mode."
2806
2684
  );
2807
- await (onFinish == null ? void 0 : onFinish({
2808
- usage: finalUsage,
2809
- object,
2810
- error,
2811
- response: {
2812
- ...response,
2813
- headers: rawResponse == null ? void 0 : rawResponse.headers
2814
- },
2815
- warnings,
2816
- experimental_providerMetadata: providerMetadata
2817
- }));
2818
- } catch (error2) {
2819
- controller.error(error2);
2820
- } finally {
2821
- rootSpan.end();
2685
+ }
2686
+ default: {
2687
+ const _exhaustiveCheck = mode;
2688
+ throw new Error(`Unsupported mode: ${_exhaustiveCheck}`);
2822
2689
  }
2823
2690
  }
2824
- })
2825
- );
2691
+ const {
2692
+ result: { stream, warnings, rawResponse, request },
2693
+ doStreamSpan,
2694
+ startTimestampMs
2695
+ } = await retry(
2696
+ () => recordSpan({
2697
+ name: "ai.streamObject.doStream",
2698
+ attributes: selectTelemetryAttributes({
2699
+ telemetry,
2700
+ attributes: {
2701
+ ...assembleOperationName({
2702
+ operationId: "ai.streamObject.doStream",
2703
+ telemetry
2704
+ }),
2705
+ ...baseTelemetryAttributes,
2706
+ "ai.prompt.format": {
2707
+ input: () => callOptions.inputFormat
2708
+ },
2709
+ "ai.prompt.messages": {
2710
+ input: () => JSON.stringify(callOptions.prompt)
2711
+ },
2712
+ "ai.settings.mode": mode,
2713
+ // standardized gen-ai llm span attributes:
2714
+ "gen_ai.system": model.provider,
2715
+ "gen_ai.request.model": model.modelId,
2716
+ "gen_ai.request.frequency_penalty": settings.frequencyPenalty,
2717
+ "gen_ai.request.max_tokens": settings.maxTokens,
2718
+ "gen_ai.request.presence_penalty": settings.presencePenalty,
2719
+ "gen_ai.request.temperature": settings.temperature,
2720
+ "gen_ai.request.top_k": settings.topK,
2721
+ "gen_ai.request.top_p": settings.topP
2722
+ }
2723
+ }),
2724
+ tracer,
2725
+ endWhenDone: false,
2726
+ fn: async (doStreamSpan2) => ({
2727
+ startTimestampMs: now2(),
2728
+ doStreamSpan: doStreamSpan2,
2729
+ result: await model.doStream(callOptions)
2730
+ })
2731
+ })
2732
+ );
2733
+ self.requestPromise.resolve(request != null ? request : {});
2734
+ let usage;
2735
+ let finishReason;
2736
+ let providerMetadata;
2737
+ let object;
2738
+ let error;
2739
+ let accumulatedText = "";
2740
+ let textDelta = "";
2741
+ let response = {
2742
+ id: generateId3(),
2743
+ timestamp: currentDate(),
2744
+ modelId: model.modelId
2745
+ };
2746
+ let latestObjectJson = void 0;
2747
+ let latestObject = void 0;
2748
+ let isFirstChunk = true;
2749
+ let isFirstDelta = true;
2750
+ const transformedStream = stream.pipeThrough(new TransformStream(transformer)).pipeThrough(
2751
+ new TransformStream({
2752
+ async transform(chunk, controller) {
2753
+ var _a11, _b, _c;
2754
+ if (isFirstChunk) {
2755
+ const msToFirstChunk = now2() - startTimestampMs;
2756
+ isFirstChunk = false;
2757
+ doStreamSpan.addEvent("ai.stream.firstChunk", {
2758
+ "ai.stream.msToFirstChunk": msToFirstChunk
2759
+ });
2760
+ doStreamSpan.setAttributes({
2761
+ "ai.stream.msToFirstChunk": msToFirstChunk
2762
+ });
2763
+ }
2764
+ if (typeof chunk === "string") {
2765
+ accumulatedText += chunk;
2766
+ textDelta += chunk;
2767
+ const { value: currentObjectJson, state: parseState } = parsePartialJson(accumulatedText);
2768
+ if (currentObjectJson !== void 0 && !isDeepEqualData(latestObjectJson, currentObjectJson)) {
2769
+ const validationResult = outputStrategy.validatePartialResult({
2770
+ value: currentObjectJson,
2771
+ textDelta,
2772
+ latestObject,
2773
+ isFirstDelta,
2774
+ isFinalDelta: parseState === "successful-parse"
2775
+ });
2776
+ if (validationResult.success && !isDeepEqualData(
2777
+ latestObject,
2778
+ validationResult.value.partial
2779
+ )) {
2780
+ latestObjectJson = currentObjectJson;
2781
+ latestObject = validationResult.value.partial;
2782
+ controller.enqueue({
2783
+ type: "object",
2784
+ object: latestObject
2785
+ });
2786
+ controller.enqueue({
2787
+ type: "text-delta",
2788
+ textDelta: validationResult.value.textDelta
2789
+ });
2790
+ textDelta = "";
2791
+ isFirstDelta = false;
2792
+ }
2793
+ }
2794
+ return;
2795
+ }
2796
+ switch (chunk.type) {
2797
+ case "response-metadata": {
2798
+ response = {
2799
+ id: (_a11 = chunk.id) != null ? _a11 : response.id,
2800
+ timestamp: (_b = chunk.timestamp) != null ? _b : response.timestamp,
2801
+ modelId: (_c = chunk.modelId) != null ? _c : response.modelId
2802
+ };
2803
+ break;
2804
+ }
2805
+ case "finish": {
2806
+ if (textDelta !== "") {
2807
+ controller.enqueue({ type: "text-delta", textDelta });
2808
+ }
2809
+ finishReason = chunk.finishReason;
2810
+ usage = calculateLanguageModelUsage(chunk.usage);
2811
+ providerMetadata = chunk.providerMetadata;
2812
+ controller.enqueue({ ...chunk, usage, response });
2813
+ self.usagePromise.resolve(usage);
2814
+ self.providerMetadataPromise.resolve(providerMetadata);
2815
+ self.responsePromise.resolve({
2816
+ ...response,
2817
+ headers: rawResponse == null ? void 0 : rawResponse.headers
2818
+ });
2819
+ const validationResult = outputStrategy.validateFinalResult(latestObjectJson);
2820
+ if (validationResult.success) {
2821
+ object = validationResult.value;
2822
+ self.objectPromise.resolve(object);
2823
+ } else {
2824
+ error = validationResult.error;
2825
+ self.objectPromise.reject(error);
2826
+ }
2827
+ break;
2828
+ }
2829
+ default: {
2830
+ controller.enqueue(chunk);
2831
+ break;
2832
+ }
2833
+ }
2834
+ },
2835
+ // invoke onFinish callback and resolve toolResults promise when the stream is about to close:
2836
+ async flush(controller) {
2837
+ try {
2838
+ const finalUsage = usage != null ? usage : {
2839
+ promptTokens: NaN,
2840
+ completionTokens: NaN,
2841
+ totalTokens: NaN
2842
+ };
2843
+ doStreamSpan.setAttributes(
2844
+ selectTelemetryAttributes({
2845
+ telemetry,
2846
+ attributes: {
2847
+ "ai.response.finishReason": finishReason,
2848
+ "ai.response.object": {
2849
+ output: () => JSON.stringify(object)
2850
+ },
2851
+ "ai.response.id": response.id,
2852
+ "ai.response.model": response.modelId,
2853
+ "ai.response.timestamp": response.timestamp.toISOString(),
2854
+ "ai.usage.promptTokens": finalUsage.promptTokens,
2855
+ "ai.usage.completionTokens": finalUsage.completionTokens,
2856
+ // standardized gen-ai llm span attributes:
2857
+ "gen_ai.response.finish_reasons": [finishReason],
2858
+ "gen_ai.response.id": response.id,
2859
+ "gen_ai.response.model": response.modelId,
2860
+ "gen_ai.usage.input_tokens": finalUsage.promptTokens,
2861
+ "gen_ai.usage.output_tokens": finalUsage.completionTokens
2862
+ }
2863
+ })
2864
+ );
2865
+ doStreamSpan.end();
2866
+ rootSpan.setAttributes(
2867
+ selectTelemetryAttributes({
2868
+ telemetry,
2869
+ attributes: {
2870
+ "ai.usage.promptTokens": finalUsage.promptTokens,
2871
+ "ai.usage.completionTokens": finalUsage.completionTokens,
2872
+ "ai.response.object": {
2873
+ output: () => JSON.stringify(object)
2874
+ }
2875
+ }
2876
+ })
2877
+ );
2878
+ await (onFinish == null ? void 0 : onFinish({
2879
+ usage: finalUsage,
2880
+ object,
2881
+ error,
2882
+ response: {
2883
+ ...response,
2884
+ headers: rawResponse == null ? void 0 : rawResponse.headers
2885
+ },
2886
+ warnings,
2887
+ experimental_providerMetadata: providerMetadata
2888
+ }));
2889
+ } catch (error2) {
2890
+ controller.error(error2);
2891
+ } finally {
2892
+ rootSpan.end();
2893
+ }
2894
+ }
2895
+ })
2896
+ );
2897
+ self.stitchableStream.addStream(transformedStream);
2898
+ }
2899
+ }).catch((error) => {
2900
+ self.stitchableStream.addStream(
2901
+ new ReadableStream({
2902
+ start(controller) {
2903
+ controller.error(error);
2904
+ }
2905
+ })
2906
+ );
2907
+ }).finally(() => {
2908
+ self.stitchableStream.close();
2909
+ });
2910
+ this.outputStrategy = outputStrategy;
2826
2911
  }
2827
2912
  get object() {
2828
2913
  return this.objectPromise.value;
2829
2914
  }
2915
+ get usage() {
2916
+ return this.usagePromise.value;
2917
+ }
2918
+ get experimental_providerMetadata() {
2919
+ return this.providerMetadataPromise.value;
2920
+ }
2921
+ get warnings() {
2922
+ return this.warningsPromise.value;
2923
+ }
2924
+ get request() {
2925
+ return this.requestPromise.value;
2926
+ }
2927
+ get response() {
2928
+ return this.responsePromise.value;
2929
+ }
2830
2930
  get partialObjectStream() {
2831
- return createAsyncIterableStream(this.originalStream, {
2931
+ return createAsyncIterableStream(this.stitchableStream.stream, {
2832
2932
  transform(chunk, controller) {
2833
2933
  switch (chunk.type) {
2834
2934
  case "object":
@@ -2849,10 +2949,12 @@ var DefaultStreamObjectResult = class {
2849
2949
  });
2850
2950
  }
2851
2951
  get elementStream() {
2852
- return this.outputStrategy.createElementStream(this.originalStream);
2952
+ return this.outputStrategy.createElementStream(
2953
+ this.stitchableStream.stream
2954
+ );
2853
2955
  }
2854
2956
  get textStream() {
2855
- return createAsyncIterableStream(this.originalStream, {
2957
+ return createAsyncIterableStream(this.stitchableStream.stream, {
2856
2958
  transform(chunk, controller) {
2857
2959
  switch (chunk.type) {
2858
2960
  case "text-delta":
@@ -2873,7 +2975,7 @@ var DefaultStreamObjectResult = class {
2873
2975
  });
2874
2976
  }
2875
2977
  get fullStream() {
2876
- return createAsyncIterableStream(this.originalStream, {
2978
+ return createAsyncIterableStream(this.stitchableStream.stream, {
2877
2979
  transform(chunk, controller) {
2878
2980
  controller.enqueue(chunk);
2879
2981
  }
@@ -3476,68 +3578,6 @@ var DefaultGenerateTextResult = class {
3476
3578
  import { createIdGenerator as createIdGenerator4 } from "@ai-sdk/provider-utils";
3477
3579
  import { formatStreamPart } from "@ai-sdk/ui-utils";
3478
3580
 
3479
- // core/util/create-stitchable-stream.ts
3480
- function createStitchableStream() {
3481
- let innerStreamReaders = [];
3482
- let controller = null;
3483
- let isClosed = false;
3484
- const processPull = async () => {
3485
- if (isClosed && innerStreamReaders.length === 0) {
3486
- controller == null ? void 0 : controller.close();
3487
- return;
3488
- }
3489
- if (innerStreamReaders.length === 0) {
3490
- return;
3491
- }
3492
- try {
3493
- const { value, done } = await innerStreamReaders[0].read();
3494
- if (done) {
3495
- innerStreamReaders.shift();
3496
- if (innerStreamReaders.length > 0) {
3497
- await processPull();
3498
- } else if (isClosed) {
3499
- controller == null ? void 0 : controller.close();
3500
- }
3501
- } else {
3502
- controller == null ? void 0 : controller.enqueue(value);
3503
- }
3504
- } catch (error) {
3505
- controller == null ? void 0 : controller.error(error);
3506
- innerStreamReaders.shift();
3507
- if (isClosed && innerStreamReaders.length === 0) {
3508
- controller == null ? void 0 : controller.close();
3509
- }
3510
- }
3511
- };
3512
- return {
3513
- stream: new ReadableStream({
3514
- start(controllerParam) {
3515
- controller = controllerParam;
3516
- },
3517
- pull: processPull,
3518
- async cancel() {
3519
- for (const reader of innerStreamReaders) {
3520
- await reader.cancel();
3521
- }
3522
- innerStreamReaders = [];
3523
- isClosed = true;
3524
- }
3525
- }),
3526
- addStream: (innerStream) => {
3527
- if (isClosed) {
3528
- throw new Error("Cannot add inner stream: outer stream is closed");
3529
- }
3530
- innerStreamReaders.push(innerStream.getReader());
3531
- },
3532
- close: () => {
3533
- isClosed = true;
3534
- if (innerStreamReaders.length === 0) {
3535
- controller == null ? void 0 : controller.close();
3536
- }
3537
- }
3538
- };
3539
- }
3540
-
3541
3581
  // core/util/merge-streams.ts
3542
3582
  function mergeStreams(stream1, stream2) {
3543
3583
  const reader1 = stream1.getReader();
@@ -3821,7 +3861,7 @@ function runToolsTransformation({
3821
3861
 
3822
3862
  // core/generate-text/stream-text.ts
3823
3863
  var originalGenerateId4 = createIdGenerator4({ prefix: "aitxt", size: 24 });
3824
- async function streamText({
3864
+ function streamText({
3825
3865
  model,
3826
3866
  tools,
3827
3867
  toolChoice,
@@ -3854,576 +3894,606 @@ async function streamText({
3854
3894
  message: "maxSteps must be at least 1"
3855
3895
  });
3856
3896
  }
3857
- const baseTelemetryAttributes = getBaseTelemetryAttributes({
3897
+ return new DefaultStreamTextResult({
3858
3898
  model,
3859
3899
  telemetry,
3860
3900
  headers,
3861
- settings: { ...settings, maxRetries }
3862
- });
3863
- const tracer = getTracer(telemetry);
3864
- const initialPrompt = standardizePrompt({
3865
- prompt: { system, prompt, messages },
3866
- tools
3901
+ settings,
3902
+ maxRetries,
3903
+ abortSignal,
3904
+ system,
3905
+ prompt,
3906
+ messages,
3907
+ tools,
3908
+ toolChoice,
3909
+ toolCallStreaming,
3910
+ activeTools,
3911
+ maxSteps,
3912
+ continueSteps,
3913
+ providerMetadata,
3914
+ onChunk,
3915
+ onFinish,
3916
+ onStepFinish,
3917
+ now: now2,
3918
+ currentDate,
3919
+ generateId: generateId3
3867
3920
  });
3868
- return recordSpan({
3869
- name: "ai.streamText",
3870
- attributes: selectTelemetryAttributes({
3921
+ }
3922
+ var DefaultStreamTextResult = class {
3923
+ constructor({
3924
+ model,
3925
+ telemetry,
3926
+ headers,
3927
+ settings,
3928
+ maxRetries,
3929
+ abortSignal,
3930
+ system,
3931
+ prompt,
3932
+ messages,
3933
+ tools,
3934
+ toolChoice,
3935
+ toolCallStreaming,
3936
+ activeTools,
3937
+ maxSteps,
3938
+ continueSteps,
3939
+ providerMetadata,
3940
+ onChunk,
3941
+ onFinish,
3942
+ onStepFinish,
3943
+ now: now2,
3944
+ currentDate,
3945
+ generateId: generateId3
3946
+ }) {
3947
+ this.warningsPromise = new DelayedPromise();
3948
+ this.usagePromise = new DelayedPromise();
3949
+ this.finishReasonPromise = new DelayedPromise();
3950
+ this.providerMetadataPromise = new DelayedPromise();
3951
+ this.textPromise = new DelayedPromise();
3952
+ this.toolCallsPromise = new DelayedPromise();
3953
+ this.toolResultsPromise = new DelayedPromise();
3954
+ this.requestPromise = new DelayedPromise();
3955
+ this.responsePromise = new DelayedPromise();
3956
+ this.stepsPromise = new DelayedPromise();
3957
+ this.stitchableStream = createStitchableStream();
3958
+ const tracer = getTracer(telemetry);
3959
+ const baseTelemetryAttributes = getBaseTelemetryAttributes({
3960
+ model,
3871
3961
  telemetry,
3872
- attributes: {
3873
- ...assembleOperationName({ operationId: "ai.streamText", telemetry }),
3874
- ...baseTelemetryAttributes,
3875
- // specific settings that only make sense on the outer level:
3876
- "ai.prompt": {
3877
- input: () => JSON.stringify({ system, prompt, messages })
3878
- },
3879
- "ai.settings.maxSteps": maxSteps
3880
- }
3881
- }),
3882
- tracer,
3883
- endWhenDone: false,
3884
- fn: async (rootSpan) => {
3885
- const retry = retryWithExponentialBackoff({ maxRetries });
3886
- const startStep = async ({
3887
- responseMessages
3888
- }) => {
3889
- const promptFormat = responseMessages.length === 0 ? initialPrompt.type : "messages";
3890
- const promptMessages = await convertToLanguageModelPrompt({
3891
- prompt: {
3892
- type: promptFormat,
3893
- system: initialPrompt.system,
3894
- messages: [...initialPrompt.messages, ...responseMessages]
3962
+ headers,
3963
+ settings: { ...settings, maxRetries }
3964
+ });
3965
+ const initialPrompt = standardizePrompt({
3966
+ prompt: { system, prompt, messages },
3967
+ tools
3968
+ });
3969
+ const self = this;
3970
+ const stepResults = [];
3971
+ recordSpan({
3972
+ name: "ai.streamText",
3973
+ attributes: selectTelemetryAttributes({
3974
+ telemetry,
3975
+ attributes: {
3976
+ ...assembleOperationName({ operationId: "ai.streamText", telemetry }),
3977
+ ...baseTelemetryAttributes,
3978
+ // specific settings that only make sense on the outer level:
3979
+ "ai.prompt": {
3980
+ input: () => JSON.stringify({ system, prompt, messages })
3895
3981
  },
3896
- modelSupportsImageUrls: model.supportsImageUrls,
3897
- modelSupportsUrl: model.supportsUrl
3898
- });
3899
- const mode = {
3900
- type: "regular",
3901
- ...prepareToolsAndToolChoice({ tools, toolChoice, activeTools })
3902
- };
3903
- const {
3904
- result: { stream: stream2, warnings: warnings2, rawResponse: rawResponse2, request: request2 },
3905
- doStreamSpan: doStreamSpan2,
3906
- startTimestampMs: startTimestampMs2
3907
- } = await retry(
3908
- () => recordSpan({
3909
- name: "ai.streamText.doStream",
3910
- attributes: selectTelemetryAttributes({
3911
- telemetry,
3912
- attributes: {
3913
- ...assembleOperationName({
3914
- operationId: "ai.streamText.doStream",
3915
- telemetry
3916
- }),
3917
- ...baseTelemetryAttributes,
3918
- "ai.prompt.format": {
3919
- input: () => promptFormat
3920
- },
3921
- "ai.prompt.messages": {
3922
- input: () => JSON.stringify(promptMessages)
3923
- },
3924
- "ai.prompt.tools": {
3925
- // convert the language model level tools:
3926
- input: () => {
3927
- var _a11;
3928
- return (_a11 = mode.tools) == null ? void 0 : _a11.map((tool2) => JSON.stringify(tool2));
3929
- }
3930
- },
3931
- "ai.prompt.toolChoice": {
3932
- input: () => mode.toolChoice != null ? JSON.stringify(mode.toolChoice) : void 0
3933
- },
3934
- // standardized gen-ai llm span attributes:
3935
- "gen_ai.system": model.provider,
3936
- "gen_ai.request.model": model.modelId,
3937
- "gen_ai.request.frequency_penalty": settings.frequencyPenalty,
3938
- "gen_ai.request.max_tokens": settings.maxTokens,
3939
- "gen_ai.request.presence_penalty": settings.presencePenalty,
3940
- "gen_ai.request.stop_sequences": settings.stopSequences,
3941
- "gen_ai.request.temperature": settings.temperature,
3942
- "gen_ai.request.top_k": settings.topK,
3943
- "gen_ai.request.top_p": settings.topP
3944
- }
3945
- }),
3946
- tracer,
3947
- endWhenDone: false,
3948
- fn: async (doStreamSpan3) => ({
3949
- startTimestampMs: now2(),
3950
- // get before the call
3951
- doStreamSpan: doStreamSpan3,
3952
- result: await model.doStream({
3953
- mode,
3954
- ...prepareCallSettings(settings),
3955
- inputFormat: promptFormat,
3956
- prompt: promptMessages,
3957
- providerMetadata,
3958
- abortSignal,
3959
- headers
3982
+ "ai.settings.maxSteps": maxSteps
3983
+ }
3984
+ }),
3985
+ tracer,
3986
+ endWhenDone: false,
3987
+ fn: async (rootSpan) => {
3988
+ const retry = retryWithExponentialBackoff({ maxRetries });
3989
+ const startStep = async ({
3990
+ responseMessages
3991
+ }) => {
3992
+ const promptFormat = responseMessages.length === 0 ? initialPrompt.type : "messages";
3993
+ const promptMessages = await convertToLanguageModelPrompt({
3994
+ prompt: {
3995
+ type: promptFormat,
3996
+ system: initialPrompt.system,
3997
+ messages: [...initialPrompt.messages, ...responseMessages]
3998
+ },
3999
+ modelSupportsImageUrls: model.supportsImageUrls,
4000
+ modelSupportsUrl: model.supportsUrl
4001
+ });
4002
+ const mode = {
4003
+ type: "regular",
4004
+ ...prepareToolsAndToolChoice({ tools, toolChoice, activeTools })
4005
+ };
4006
+ const {
4007
+ result: { stream: stream2, warnings: warnings2, rawResponse: rawResponse2, request: request2 },
4008
+ doStreamSpan: doStreamSpan2,
4009
+ startTimestampMs: startTimestampMs2
4010
+ } = await retry(
4011
+ () => recordSpan({
4012
+ name: "ai.streamText.doStream",
4013
+ attributes: selectTelemetryAttributes({
4014
+ telemetry,
4015
+ attributes: {
4016
+ ...assembleOperationName({
4017
+ operationId: "ai.streamText.doStream",
4018
+ telemetry
4019
+ }),
4020
+ ...baseTelemetryAttributes,
4021
+ "ai.prompt.format": {
4022
+ input: () => promptFormat
4023
+ },
4024
+ "ai.prompt.messages": {
4025
+ input: () => JSON.stringify(promptMessages)
4026
+ },
4027
+ "ai.prompt.tools": {
4028
+ // convert the language model level tools:
4029
+ input: () => {
4030
+ var _a11;
4031
+ return (_a11 = mode.tools) == null ? void 0 : _a11.map((tool2) => JSON.stringify(tool2));
4032
+ }
4033
+ },
4034
+ "ai.prompt.toolChoice": {
4035
+ input: () => mode.toolChoice != null ? JSON.stringify(mode.toolChoice) : void 0
4036
+ },
4037
+ // standardized gen-ai llm span attributes:
4038
+ "gen_ai.system": model.provider,
4039
+ "gen_ai.request.model": model.modelId,
4040
+ "gen_ai.request.frequency_penalty": settings.frequencyPenalty,
4041
+ "gen_ai.request.max_tokens": settings.maxTokens,
4042
+ "gen_ai.request.presence_penalty": settings.presencePenalty,
4043
+ "gen_ai.request.stop_sequences": settings.stopSequences,
4044
+ "gen_ai.request.temperature": settings.temperature,
4045
+ "gen_ai.request.top_k": settings.topK,
4046
+ "gen_ai.request.top_p": settings.topP
4047
+ }
4048
+ }),
4049
+ tracer,
4050
+ endWhenDone: false,
4051
+ fn: async (doStreamSpan3) => ({
4052
+ startTimestampMs: now2(),
4053
+ // get before the call
4054
+ doStreamSpan: doStreamSpan3,
4055
+ result: await model.doStream({
4056
+ mode,
4057
+ ...prepareCallSettings(settings),
4058
+ inputFormat: promptFormat,
4059
+ prompt: promptMessages,
4060
+ providerMetadata,
4061
+ abortSignal,
4062
+ headers
4063
+ })
3960
4064
  })
3961
4065
  })
3962
- })
3963
- );
3964
- return {
3965
- result: {
3966
- stream: runToolsTransformation({
3967
- tools,
3968
- generatorStream: stream2,
3969
- toolCallStreaming,
3970
- tracer,
3971
- telemetry,
3972
- abortSignal
3973
- }),
3974
- warnings: warnings2,
3975
- request: request2 != null ? request2 : {},
3976
- rawResponse: rawResponse2
3977
- },
3978
- doStreamSpan: doStreamSpan2,
3979
- startTimestampMs: startTimestampMs2
4066
+ );
4067
+ return {
4068
+ result: {
4069
+ stream: runToolsTransformation({
4070
+ tools,
4071
+ generatorStream: stream2,
4072
+ toolCallStreaming,
4073
+ tracer,
4074
+ telemetry,
4075
+ abortSignal
4076
+ }),
4077
+ warnings: warnings2,
4078
+ request: request2 != null ? request2 : {},
4079
+ rawResponse: rawResponse2
4080
+ },
4081
+ doStreamSpan: doStreamSpan2,
4082
+ startTimestampMs: startTimestampMs2
4083
+ };
3980
4084
  };
3981
- };
3982
- const {
3983
- result: { stream, warnings, rawResponse, request },
3984
- doStreamSpan,
3985
- startTimestampMs
3986
- } = await startStep({ responseMessages: [] });
3987
- return new DefaultStreamTextResult({
3988
- stream,
3989
- warnings,
3990
- rawResponse,
3991
- request,
3992
- onChunk,
3993
- onFinish,
3994
- onStepFinish,
3995
- rootSpan,
3996
- doStreamSpan,
3997
- telemetry,
3998
- startTimestampMs,
3999
- maxSteps,
4000
- continueSteps,
4001
- startStep,
4002
- modelId: model.modelId,
4003
- now: now2,
4004
- currentDate,
4005
- generateId: generateId3,
4006
- tools
4007
- });
4008
- }
4009
- });
4010
- }
4011
- var DefaultStreamTextResult = class {
4012
- constructor({
4013
- stream,
4014
- warnings,
4015
- rawResponse,
4016
- request,
4017
- onChunk,
4018
- onFinish,
4019
- onStepFinish,
4020
- rootSpan,
4021
- doStreamSpan,
4022
- telemetry,
4023
- startTimestampMs,
4024
- maxSteps,
4025
- continueSteps,
4026
- startStep,
4027
- modelId,
4028
- now: now2,
4029
- currentDate,
4030
- generateId: generateId3,
4031
- tools
4032
- }) {
4033
- this.rawWarnings = warnings;
4034
- this.rawResponse = rawResponse;
4035
- const { resolve: resolveUsage, promise: usagePromise } = createResolvablePromise();
4036
- this.usage = usagePromise;
4037
- const { resolve: resolveFinishReason, promise: finishReasonPromise } = createResolvablePromise();
4038
- this.finishReason = finishReasonPromise;
4039
- const { resolve: resolveText, promise: textPromise } = createResolvablePromise();
4040
- this.text = textPromise;
4041
- const { resolve: resolveToolCalls, promise: toolCallsPromise } = createResolvablePromise();
4042
- this.toolCalls = toolCallsPromise;
4043
- const { resolve: resolveToolResults, promise: toolResultsPromise } = createResolvablePromise();
4044
- this.toolResults = toolResultsPromise;
4045
- const { resolve: resolveSteps, promise: stepsPromise } = createResolvablePromise();
4046
- this.steps = stepsPromise;
4047
- const {
4048
- resolve: resolveProviderMetadata,
4049
- promise: providerMetadataPromise
4050
- } = createResolvablePromise();
4051
- this.experimental_providerMetadata = providerMetadataPromise;
4052
- const { resolve: resolveRequest, promise: requestPromise } = createResolvablePromise();
4053
- this.request = requestPromise;
4054
- const { resolve: resolveResponse, promise: responsePromise } = createResolvablePromise();
4055
- this.response = responsePromise;
4056
- const { resolve: resolveWarnings, promise: warningsPromise } = createResolvablePromise();
4057
- this.warnings = warningsPromise;
4058
- const {
4059
- stream: stitchableStream,
4060
- addStream,
4061
- close: closeStitchableStream
4062
- } = createStitchableStream();
4063
- this.originalStream = stitchableStream;
4064
- const stepResults = [];
4065
- const self = this;
4066
- function addStepStream({
4067
- stream: stream2,
4068
- startTimestamp,
4069
- doStreamSpan: doStreamSpan2,
4070
- currentStep,
4071
- responseMessages,
4072
- usage = {
4073
- promptTokens: 0,
4074
- completionTokens: 0,
4075
- totalTokens: 0
4076
- },
4077
- stepType,
4078
- previousStepText = "",
4079
- stepRequest,
4080
- hasLeadingWhitespace
4081
- }) {
4082
- const stepToolCalls = [];
4083
- const stepToolResults = [];
4084
- let stepFinishReason = "unknown";
4085
- let stepUsage = {
4086
- promptTokens: 0,
4087
- completionTokens: 0,
4088
- totalTokens: 0
4089
- };
4090
- let stepProviderMetadata;
4091
- let stepFirstChunk = true;
4092
- let stepText = "";
4093
- let fullStepText = stepType === "continue" ? previousStepText : "";
4094
- let stepLogProbs;
4095
- let stepResponse = {
4096
- id: generateId3(),
4097
- timestamp: currentDate(),
4098
- modelId
4099
- };
4100
- let chunkBuffer = "";
4101
- let chunkTextPublished = false;
4102
- let inWhitespacePrefix = true;
4103
- let hasWhitespaceSuffix = false;
4104
- async function publishTextChunk({
4105
- controller,
4106
- chunk
4107
- }) {
4108
- controller.enqueue(chunk);
4109
- stepText += chunk.textDelta;
4110
- fullStepText += chunk.textDelta;
4111
- chunkTextPublished = true;
4112
- hasWhitespaceSuffix = chunk.textDelta.trimEnd() !== chunk.textDelta;
4113
- await (onChunk == null ? void 0 : onChunk({ chunk }));
4114
- }
4115
- addStream(
4116
- stream2.pipeThrough(
4117
- new TransformStream({
4118
- async transform(chunk, controller) {
4119
- var _a11, _b, _c;
4120
- if (stepFirstChunk) {
4121
- const msToFirstChunk = now2() - startTimestamp;
4122
- stepFirstChunk = false;
4123
- doStreamSpan2.addEvent("ai.stream.firstChunk", {
4124
- "ai.response.msToFirstChunk": msToFirstChunk
4125
- });
4126
- doStreamSpan2.setAttributes({
4127
- "ai.response.msToFirstChunk": msToFirstChunk
4128
- });
4129
- }
4130
- if (chunk.type === "text-delta" && chunk.textDelta.length === 0) {
4131
- return;
4132
- }
4133
- const chunkType = chunk.type;
4134
- switch (chunkType) {
4135
- case "text-delta": {
4136
- if (continueSteps) {
4137
- const trimmedChunkText = inWhitespacePrefix && hasLeadingWhitespace ? chunk.textDelta.trimStart() : chunk.textDelta;
4138
- if (trimmedChunkText.length === 0) {
4085
+ const {
4086
+ result: { stream, warnings, rawResponse, request },
4087
+ doStreamSpan,
4088
+ startTimestampMs
4089
+ } = await startStep({ responseMessages: [] });
4090
+ function addStepStream({
4091
+ stream: stream2,
4092
+ startTimestamp,
4093
+ doStreamSpan: doStreamSpan2,
4094
+ currentStep,
4095
+ responseMessages,
4096
+ usage = {
4097
+ promptTokens: 0,
4098
+ completionTokens: 0,
4099
+ totalTokens: 0
4100
+ },
4101
+ stepType,
4102
+ previousStepText = "",
4103
+ stepRequest,
4104
+ hasLeadingWhitespace,
4105
+ warnings: warnings2,
4106
+ response
4107
+ }) {
4108
+ const stepToolCalls = [];
4109
+ const stepToolResults = [];
4110
+ let stepFinishReason = "unknown";
4111
+ let stepUsage = {
4112
+ promptTokens: 0,
4113
+ completionTokens: 0,
4114
+ totalTokens: 0
4115
+ };
4116
+ let stepProviderMetadata;
4117
+ let stepFirstChunk = true;
4118
+ let stepText = "";
4119
+ let fullStepText = stepType === "continue" ? previousStepText : "";
4120
+ let stepLogProbs;
4121
+ let stepResponse = {
4122
+ id: generateId3(),
4123
+ timestamp: currentDate(),
4124
+ modelId: model.modelId
4125
+ };
4126
+ let chunkBuffer = "";
4127
+ let chunkTextPublished = false;
4128
+ let inWhitespacePrefix = true;
4129
+ let hasWhitespaceSuffix = false;
4130
+ async function publishTextChunk({
4131
+ controller,
4132
+ chunk
4133
+ }) {
4134
+ controller.enqueue(chunk);
4135
+ stepText += chunk.textDelta;
4136
+ fullStepText += chunk.textDelta;
4137
+ chunkTextPublished = true;
4138
+ hasWhitespaceSuffix = chunk.textDelta.trimEnd() !== chunk.textDelta;
4139
+ await (onChunk == null ? void 0 : onChunk({ chunk }));
4140
+ }
4141
+ self.stitchableStream.addStream(
4142
+ stream2.pipeThrough(
4143
+ new TransformStream({
4144
+ async transform(chunk, controller) {
4145
+ var _a11, _b, _c;
4146
+ if (stepFirstChunk) {
4147
+ const msToFirstChunk = now2() - startTimestamp;
4148
+ stepFirstChunk = false;
4149
+ doStreamSpan2.addEvent("ai.stream.firstChunk", {
4150
+ "ai.response.msToFirstChunk": msToFirstChunk
4151
+ });
4152
+ doStreamSpan2.setAttributes({
4153
+ "ai.response.msToFirstChunk": msToFirstChunk
4154
+ });
4155
+ }
4156
+ if (chunk.type === "text-delta" && chunk.textDelta.length === 0) {
4157
+ return;
4158
+ }
4159
+ const chunkType = chunk.type;
4160
+ switch (chunkType) {
4161
+ case "text-delta": {
4162
+ if (continueSteps) {
4163
+ const trimmedChunkText = inWhitespacePrefix && hasLeadingWhitespace ? chunk.textDelta.trimStart() : chunk.textDelta;
4164
+ if (trimmedChunkText.length === 0) {
4165
+ break;
4166
+ }
4167
+ inWhitespacePrefix = false;
4168
+ chunkBuffer += trimmedChunkText;
4169
+ const split = splitOnLastWhitespace(chunkBuffer);
4170
+ if (split != null) {
4171
+ chunkBuffer = split.suffix;
4172
+ await publishTextChunk({
4173
+ controller,
4174
+ chunk: {
4175
+ type: "text-delta",
4176
+ textDelta: split.prefix + split.whitespace
4177
+ }
4178
+ });
4179
+ }
4180
+ } else {
4181
+ await publishTextChunk({ controller, chunk });
4182
+ }
4139
4183
  break;
4140
4184
  }
4141
- inWhitespacePrefix = false;
4142
- chunkBuffer += trimmedChunkText;
4143
- const split = splitOnLastWhitespace(chunkBuffer);
4144
- if (split != null) {
4145
- chunkBuffer = split.suffix;
4146
- await publishTextChunk({
4147
- controller,
4148
- chunk: {
4149
- type: "text-delta",
4150
- textDelta: split.prefix + split.whitespace
4151
- }
4185
+ case "tool-call": {
4186
+ controller.enqueue(chunk);
4187
+ stepToolCalls.push(chunk);
4188
+ await (onChunk == null ? void 0 : onChunk({ chunk }));
4189
+ break;
4190
+ }
4191
+ case "tool-result": {
4192
+ controller.enqueue(chunk);
4193
+ stepToolResults.push(chunk);
4194
+ await (onChunk == null ? void 0 : onChunk({ chunk }));
4195
+ break;
4196
+ }
4197
+ case "response-metadata": {
4198
+ stepResponse = {
4199
+ id: (_a11 = chunk.id) != null ? _a11 : stepResponse.id,
4200
+ timestamp: (_b = chunk.timestamp) != null ? _b : stepResponse.timestamp,
4201
+ modelId: (_c = chunk.modelId) != null ? _c : stepResponse.modelId
4202
+ };
4203
+ break;
4204
+ }
4205
+ case "finish": {
4206
+ stepUsage = chunk.usage;
4207
+ stepFinishReason = chunk.finishReason;
4208
+ stepProviderMetadata = chunk.experimental_providerMetadata;
4209
+ stepLogProbs = chunk.logprobs;
4210
+ const msToFinish = now2() - startTimestamp;
4211
+ doStreamSpan2.addEvent("ai.stream.finish");
4212
+ doStreamSpan2.setAttributes({
4213
+ "ai.response.msToFinish": msToFinish,
4214
+ "ai.response.avgCompletionTokensPerSecond": 1e3 * stepUsage.completionTokens / msToFinish
4152
4215
  });
4216
+ break;
4153
4217
  }
4154
- } else {
4155
- await publishTextChunk({ controller, chunk });
4156
- }
4157
- break;
4158
- }
4159
- case "tool-call": {
4160
- controller.enqueue(chunk);
4161
- stepToolCalls.push(chunk);
4162
- await (onChunk == null ? void 0 : onChunk({ chunk }));
4163
- break;
4164
- }
4165
- case "tool-result": {
4166
- controller.enqueue(chunk);
4167
- stepToolResults.push(chunk);
4168
- await (onChunk == null ? void 0 : onChunk({ chunk }));
4169
- break;
4170
- }
4171
- case "response-metadata": {
4172
- stepResponse = {
4173
- id: (_a11 = chunk.id) != null ? _a11 : stepResponse.id,
4174
- timestamp: (_b = chunk.timestamp) != null ? _b : stepResponse.timestamp,
4175
- modelId: (_c = chunk.modelId) != null ? _c : stepResponse.modelId
4176
- };
4177
- break;
4178
- }
4179
- case "finish": {
4180
- stepUsage = chunk.usage;
4181
- stepFinishReason = chunk.finishReason;
4182
- stepProviderMetadata = chunk.experimental_providerMetadata;
4183
- stepLogProbs = chunk.logprobs;
4184
- const msToFinish = now2() - startTimestamp;
4185
- doStreamSpan2.addEvent("ai.stream.finish");
4186
- doStreamSpan2.setAttributes({
4187
- "ai.response.msToFinish": msToFinish,
4188
- "ai.response.avgCompletionTokensPerSecond": 1e3 * stepUsage.completionTokens / msToFinish
4189
- });
4190
- break;
4191
- }
4192
- case "tool-call-streaming-start":
4193
- case "tool-call-delta": {
4194
- controller.enqueue(chunk);
4195
- await (onChunk == null ? void 0 : onChunk({ chunk }));
4196
- break;
4197
- }
4198
- case "error": {
4199
- controller.enqueue(chunk);
4200
- stepFinishReason = "error";
4201
- break;
4202
- }
4203
- default: {
4204
- const exhaustiveCheck = chunkType;
4205
- throw new Error(`Unknown chunk type: ${exhaustiveCheck}`);
4206
- }
4207
- }
4208
- },
4209
- // invoke onFinish callback and resolve toolResults promise when the stream is about to close:
4210
- async flush(controller) {
4211
- var _a11, _b;
4212
- const stepToolCallsJson = stepToolCalls.length > 0 ? JSON.stringify(stepToolCalls) : void 0;
4213
- let nextStepType = "done";
4214
- if (currentStep + 1 < maxSteps) {
4215
- if (continueSteps && stepFinishReason === "length" && // only use continue when there are no tool calls:
4216
- stepToolCalls.length === 0) {
4217
- nextStepType = "continue";
4218
- } else if (
4219
- // there are tool calls:
4220
- stepToolCalls.length > 0 && // all current tool calls have results:
4221
- stepToolResults.length === stepToolCalls.length
4222
- ) {
4223
- nextStepType = "tool-result";
4224
- }
4225
- }
4226
- if (continueSteps && chunkBuffer.length > 0 && (nextStepType !== "continue" || // when the next step is a regular step, publish the buffer
4227
- stepType === "continue" && !chunkTextPublished)) {
4228
- await publishTextChunk({
4229
- controller,
4230
- chunk: {
4231
- type: "text-delta",
4232
- textDelta: chunkBuffer
4233
- }
4234
- });
4235
- chunkBuffer = "";
4236
- }
4237
- try {
4238
- doStreamSpan2.setAttributes(
4239
- selectTelemetryAttributes({
4240
- telemetry,
4241
- attributes: {
4242
- "ai.response.finishReason": stepFinishReason,
4243
- "ai.response.text": { output: () => stepText },
4244
- "ai.response.toolCalls": {
4245
- output: () => stepToolCallsJson
4246
- },
4247
- "ai.response.id": stepResponse.id,
4248
- "ai.response.model": stepResponse.modelId,
4249
- "ai.response.timestamp": stepResponse.timestamp.toISOString(),
4250
- "ai.usage.promptTokens": stepUsage.promptTokens,
4251
- "ai.usage.completionTokens": stepUsage.completionTokens,
4252
- // standardized gen-ai llm span attributes:
4253
- "gen_ai.response.finish_reasons": [stepFinishReason],
4254
- "gen_ai.response.id": stepResponse.id,
4255
- "gen_ai.response.model": stepResponse.modelId,
4256
- "gen_ai.usage.input_tokens": stepUsage.promptTokens,
4257
- "gen_ai.usage.output_tokens": stepUsage.completionTokens
4218
+ case "tool-call-streaming-start":
4219
+ case "tool-call-delta": {
4220
+ controller.enqueue(chunk);
4221
+ await (onChunk == null ? void 0 : onChunk({ chunk }));
4222
+ break;
4258
4223
  }
4259
- })
4260
- );
4261
- } catch (error) {
4262
- } finally {
4263
- doStreamSpan2.end();
4264
- }
4265
- controller.enqueue({
4266
- type: "step-finish",
4267
- finishReason: stepFinishReason,
4268
- usage: stepUsage,
4269
- experimental_providerMetadata: stepProviderMetadata,
4270
- logprobs: stepLogProbs,
4271
- response: {
4272
- ...stepResponse
4224
+ case "error": {
4225
+ controller.enqueue(chunk);
4226
+ stepFinishReason = "error";
4227
+ break;
4228
+ }
4229
+ default: {
4230
+ const exhaustiveCheck = chunkType;
4231
+ throw new Error(`Unknown chunk type: ${exhaustiveCheck}`);
4232
+ }
4233
+ }
4273
4234
  },
4274
- isContinued: nextStepType === "continue"
4275
- });
4276
- if (stepType === "continue") {
4277
- const lastMessage = responseMessages[responseMessages.length - 1];
4278
- if (typeof lastMessage.content === "string") {
4279
- lastMessage.content += stepText;
4280
- } else {
4281
- lastMessage.content.push({
4282
- text: stepText,
4283
- type: "text"
4235
+ // invoke onFinish callback and resolve toolResults promise when the stream is about to close:
4236
+ async flush(controller) {
4237
+ const stepToolCallsJson = stepToolCalls.length > 0 ? JSON.stringify(stepToolCalls) : void 0;
4238
+ let nextStepType = "done";
4239
+ if (currentStep + 1 < maxSteps) {
4240
+ if (continueSteps && stepFinishReason === "length" && // only use continue when there are no tool calls:
4241
+ stepToolCalls.length === 0) {
4242
+ nextStepType = "continue";
4243
+ } else if (
4244
+ // there are tool calls:
4245
+ stepToolCalls.length > 0 && // all current tool calls have results:
4246
+ stepToolResults.length === stepToolCalls.length
4247
+ ) {
4248
+ nextStepType = "tool-result";
4249
+ }
4250
+ }
4251
+ if (continueSteps && chunkBuffer.length > 0 && (nextStepType !== "continue" || // when the next step is a regular step, publish the buffer
4252
+ stepType === "continue" && !chunkTextPublished)) {
4253
+ await publishTextChunk({
4254
+ controller,
4255
+ chunk: {
4256
+ type: "text-delta",
4257
+ textDelta: chunkBuffer
4258
+ }
4259
+ });
4260
+ chunkBuffer = "";
4261
+ }
4262
+ try {
4263
+ doStreamSpan2.setAttributes(
4264
+ selectTelemetryAttributes({
4265
+ telemetry,
4266
+ attributes: {
4267
+ "ai.response.finishReason": stepFinishReason,
4268
+ "ai.response.text": { output: () => stepText },
4269
+ "ai.response.toolCalls": {
4270
+ output: () => stepToolCallsJson
4271
+ },
4272
+ "ai.response.id": stepResponse.id,
4273
+ "ai.response.model": stepResponse.modelId,
4274
+ "ai.response.timestamp": stepResponse.timestamp.toISOString(),
4275
+ "ai.usage.promptTokens": stepUsage.promptTokens,
4276
+ "ai.usage.completionTokens": stepUsage.completionTokens,
4277
+ // standardized gen-ai llm span attributes:
4278
+ "gen_ai.response.finish_reasons": [stepFinishReason],
4279
+ "gen_ai.response.id": stepResponse.id,
4280
+ "gen_ai.response.model": stepResponse.modelId,
4281
+ "gen_ai.usage.input_tokens": stepUsage.promptTokens,
4282
+ "gen_ai.usage.output_tokens": stepUsage.completionTokens
4283
+ }
4284
+ })
4285
+ );
4286
+ } catch (error) {
4287
+ } finally {
4288
+ doStreamSpan2.end();
4289
+ }
4290
+ controller.enqueue({
4291
+ type: "step-finish",
4292
+ finishReason: stepFinishReason,
4293
+ usage: stepUsage,
4294
+ experimental_providerMetadata: stepProviderMetadata,
4295
+ logprobs: stepLogProbs,
4296
+ response: {
4297
+ ...stepResponse
4298
+ },
4299
+ isContinued: nextStepType === "continue"
4284
4300
  });
4285
- }
4286
- } else {
4287
- responseMessages.push(
4288
- ...toResponseMessages({
4301
+ if (stepType === "continue") {
4302
+ const lastMessage = responseMessages[responseMessages.length - 1];
4303
+ if (typeof lastMessage.content === "string") {
4304
+ lastMessage.content += stepText;
4305
+ } else {
4306
+ lastMessage.content.push({
4307
+ text: stepText,
4308
+ type: "text"
4309
+ });
4310
+ }
4311
+ } else {
4312
+ responseMessages.push(
4313
+ ...toResponseMessages({
4314
+ text: stepText,
4315
+ tools: tools != null ? tools : {},
4316
+ toolCalls: stepToolCalls,
4317
+ toolResults: stepToolResults
4318
+ })
4319
+ );
4320
+ }
4321
+ const currentStepResult = {
4322
+ stepType,
4289
4323
  text: stepText,
4290
- tools: tools != null ? tools : {},
4291
4324
  toolCalls: stepToolCalls,
4292
- toolResults: stepToolResults
4293
- })
4294
- );
4295
- }
4296
- const currentStepResult = {
4297
- stepType,
4298
- text: stepText,
4299
- toolCalls: stepToolCalls,
4300
- toolResults: stepToolResults,
4301
- finishReason: stepFinishReason,
4302
- usage: stepUsage,
4303
- warnings: self.rawWarnings,
4304
- logprobs: stepLogProbs,
4305
- request: stepRequest,
4306
- response: {
4307
- ...stepResponse,
4308
- headers: (_a11 = self.rawResponse) == null ? void 0 : _a11.headers,
4309
- // deep clone msgs to avoid mutating past messages in multi-step:
4310
- messages: JSON.parse(JSON.stringify(responseMessages))
4311
- },
4312
- experimental_providerMetadata: stepProviderMetadata,
4313
- isContinued: nextStepType === "continue"
4314
- };
4315
- stepResults.push(currentStepResult);
4316
- await (onStepFinish == null ? void 0 : onStepFinish(currentStepResult));
4317
- const combinedUsage = {
4318
- promptTokens: usage.promptTokens + stepUsage.promptTokens,
4319
- completionTokens: usage.completionTokens + stepUsage.completionTokens,
4320
- totalTokens: usage.totalTokens + stepUsage.totalTokens
4321
- };
4322
- if (nextStepType !== "done") {
4323
- const {
4324
- result,
4325
- doStreamSpan: doStreamSpan3,
4326
- startTimestampMs: startTimestamp2
4327
- } = await startStep({ responseMessages });
4328
- self.rawWarnings = result.warnings;
4329
- self.rawResponse = result.rawResponse;
4330
- addStepStream({
4331
- stream: result.stream,
4332
- startTimestamp: startTimestamp2,
4333
- doStreamSpan: doStreamSpan3,
4334
- currentStep: currentStep + 1,
4335
- responseMessages,
4336
- usage: combinedUsage,
4337
- stepType: nextStepType,
4338
- previousStepText: fullStepText,
4339
- stepRequest: result.request,
4340
- hasLeadingWhitespace: hasWhitespaceSuffix
4341
- });
4342
- return;
4343
- }
4344
- try {
4345
- controller.enqueue({
4346
- type: "finish",
4347
- finishReason: stepFinishReason,
4348
- usage: combinedUsage,
4349
- experimental_providerMetadata: stepProviderMetadata,
4350
- logprobs: stepLogProbs,
4351
- response: {
4352
- ...stepResponse
4325
+ toolResults: stepToolResults,
4326
+ finishReason: stepFinishReason,
4327
+ usage: stepUsage,
4328
+ warnings: warnings2,
4329
+ logprobs: stepLogProbs,
4330
+ request: stepRequest,
4331
+ response: {
4332
+ ...stepResponse,
4333
+ headers: response == null ? void 0 : response.headers,
4334
+ // deep clone msgs to avoid mutating past messages in multi-step:
4335
+ messages: JSON.parse(JSON.stringify(responseMessages))
4336
+ },
4337
+ experimental_providerMetadata: stepProviderMetadata,
4338
+ isContinued: nextStepType === "continue"
4339
+ };
4340
+ stepResults.push(currentStepResult);
4341
+ await (onStepFinish == null ? void 0 : onStepFinish(currentStepResult));
4342
+ const combinedUsage = {
4343
+ promptTokens: usage.promptTokens + stepUsage.promptTokens,
4344
+ completionTokens: usage.completionTokens + stepUsage.completionTokens,
4345
+ totalTokens: usage.totalTokens + stepUsage.totalTokens
4346
+ };
4347
+ if (nextStepType !== "done") {
4348
+ const {
4349
+ result,
4350
+ doStreamSpan: doStreamSpan3,
4351
+ startTimestampMs: startTimestamp2
4352
+ } = await startStep({ responseMessages });
4353
+ warnings2 = result.warnings;
4354
+ response = result.rawResponse;
4355
+ addStepStream({
4356
+ stream: result.stream,
4357
+ startTimestamp: startTimestamp2,
4358
+ doStreamSpan: doStreamSpan3,
4359
+ currentStep: currentStep + 1,
4360
+ responseMessages,
4361
+ usage: combinedUsage,
4362
+ stepType: nextStepType,
4363
+ previousStepText: fullStepText,
4364
+ stepRequest: result.request,
4365
+ hasLeadingWhitespace: hasWhitespaceSuffix,
4366
+ warnings: warnings2,
4367
+ response
4368
+ });
4369
+ return;
4353
4370
  }
4354
- });
4355
- closeStitchableStream();
4356
- rootSpan.setAttributes(
4357
- selectTelemetryAttributes({
4358
- telemetry,
4359
- attributes: {
4360
- "ai.response.finishReason": stepFinishReason,
4361
- "ai.response.text": { output: () => fullStepText },
4362
- "ai.response.toolCalls": {
4363
- output: () => stepToolCallsJson
4371
+ try {
4372
+ controller.enqueue({
4373
+ type: "finish",
4374
+ finishReason: stepFinishReason,
4375
+ usage: combinedUsage,
4376
+ experimental_providerMetadata: stepProviderMetadata,
4377
+ logprobs: stepLogProbs,
4378
+ response: {
4379
+ ...stepResponse
4380
+ }
4381
+ });
4382
+ self.stitchableStream.close();
4383
+ rootSpan.setAttributes(
4384
+ selectTelemetryAttributes({
4385
+ telemetry,
4386
+ attributes: {
4387
+ "ai.response.finishReason": stepFinishReason,
4388
+ "ai.response.text": { output: () => fullStepText },
4389
+ "ai.response.toolCalls": {
4390
+ output: () => stepToolCallsJson
4391
+ },
4392
+ "ai.usage.promptTokens": combinedUsage.promptTokens,
4393
+ "ai.usage.completionTokens": combinedUsage.completionTokens
4394
+ }
4395
+ })
4396
+ );
4397
+ self.usagePromise.resolve(combinedUsage);
4398
+ self.finishReasonPromise.resolve(stepFinishReason);
4399
+ self.textPromise.resolve(fullStepText);
4400
+ self.toolCallsPromise.resolve(stepToolCalls);
4401
+ self.providerMetadataPromise.resolve(stepProviderMetadata);
4402
+ self.toolResultsPromise.resolve(stepToolResults);
4403
+ self.requestPromise.resolve(stepRequest);
4404
+ self.responsePromise.resolve({
4405
+ ...stepResponse,
4406
+ headers: rawResponse == null ? void 0 : rawResponse.headers,
4407
+ messages: responseMessages
4408
+ });
4409
+ self.stepsPromise.resolve(stepResults);
4410
+ self.warningsPromise.resolve(warnings2 != null ? warnings2 : []);
4411
+ await (onFinish == null ? void 0 : onFinish({
4412
+ finishReason: stepFinishReason,
4413
+ logprobs: stepLogProbs,
4414
+ usage: combinedUsage,
4415
+ text: fullStepText,
4416
+ toolCalls: stepToolCalls,
4417
+ // The tool results are inferred as a never[] type, because they are
4418
+ // optional and the execute method with an inferred result type is
4419
+ // optional as well. Therefore we need to cast the toolResults to any.
4420
+ // The type exposed to the users will be correctly inferred.
4421
+ toolResults: stepToolResults,
4422
+ request: stepRequest,
4423
+ response: {
4424
+ ...stepResponse,
4425
+ headers: rawResponse == null ? void 0 : rawResponse.headers,
4426
+ messages: responseMessages
4364
4427
  },
4365
- "ai.usage.promptTokens": combinedUsage.promptTokens,
4366
- "ai.usage.completionTokens": combinedUsage.completionTokens
4367
- }
4368
- })
4369
- );
4370
- resolveUsage(combinedUsage);
4371
- resolveFinishReason(stepFinishReason);
4372
- resolveText(fullStepText);
4373
- resolveToolCalls(stepToolCalls);
4374
- resolveProviderMetadata(stepProviderMetadata);
4375
- resolveToolResults(stepToolResults);
4376
- resolveRequest(stepRequest);
4377
- resolveResponse({
4378
- ...stepResponse,
4379
- headers: rawResponse == null ? void 0 : rawResponse.headers,
4380
- messages: responseMessages
4381
- });
4382
- resolveSteps(stepResults);
4383
- resolveWarnings((_b = self.rawWarnings) != null ? _b : []);
4384
- await (onFinish == null ? void 0 : onFinish({
4385
- finishReason: stepFinishReason,
4386
- logprobs: stepLogProbs,
4387
- usage: combinedUsage,
4388
- text: fullStepText,
4389
- toolCalls: stepToolCalls,
4390
- // The tool results are inferred as a never[] type, because they are
4391
- // optional and the execute method with an inferred result type is
4392
- // optional as well. Therefore we need to cast the toolResults to any.
4393
- // The type exposed to the users will be correctly inferred.
4394
- toolResults: stepToolResults,
4395
- request: stepRequest,
4396
- response: {
4397
- ...stepResponse,
4398
- headers: rawResponse == null ? void 0 : rawResponse.headers,
4399
- messages: responseMessages
4400
- },
4401
- warnings,
4402
- experimental_providerMetadata: stepProviderMetadata,
4403
- steps: stepResults
4404
- }));
4405
- } catch (error) {
4406
- controller.error(error);
4407
- } finally {
4408
- rootSpan.end();
4409
- }
4410
- }
4411
- })
4412
- )
4428
+ warnings: warnings2,
4429
+ experimental_providerMetadata: stepProviderMetadata,
4430
+ steps: stepResults
4431
+ }));
4432
+ } catch (error) {
4433
+ controller.error(error);
4434
+ } finally {
4435
+ rootSpan.end();
4436
+ }
4437
+ }
4438
+ })
4439
+ )
4440
+ );
4441
+ }
4442
+ addStepStream({
4443
+ stream,
4444
+ startTimestamp: startTimestampMs,
4445
+ doStreamSpan,
4446
+ currentStep: 0,
4447
+ responseMessages: [],
4448
+ usage: void 0,
4449
+ stepType: "initial",
4450
+ stepRequest: request,
4451
+ hasLeadingWhitespace: false,
4452
+ warnings,
4453
+ response: rawResponse
4454
+ });
4455
+ }
4456
+ }).catch((error) => {
4457
+ self.stitchableStream.addStream(
4458
+ new ReadableStream({
4459
+ start(controller) {
4460
+ controller.error(error);
4461
+ }
4462
+ })
4413
4463
  );
4414
- }
4415
- addStepStream({
4416
- stream,
4417
- startTimestamp: startTimestampMs,
4418
- doStreamSpan,
4419
- currentStep: 0,
4420
- responseMessages: [],
4421
- usage: void 0,
4422
- stepType: "initial",
4423
- stepRequest: request,
4424
- hasLeadingWhitespace: false
4464
+ self.stitchableStream.close();
4425
4465
  });
4426
4466
  }
4467
+ get warnings() {
4468
+ return this.warningsPromise.value;
4469
+ }
4470
+ get usage() {
4471
+ return this.usagePromise.value;
4472
+ }
4473
+ get finishReason() {
4474
+ return this.finishReasonPromise.value;
4475
+ }
4476
+ get experimental_providerMetadata() {
4477
+ return this.providerMetadataPromise.value;
4478
+ }
4479
+ get text() {
4480
+ return this.textPromise.value;
4481
+ }
4482
+ get toolCalls() {
4483
+ return this.toolCallsPromise.value;
4484
+ }
4485
+ get toolResults() {
4486
+ return this.toolResultsPromise.value;
4487
+ }
4488
+ get request() {
4489
+ return this.requestPromise.value;
4490
+ }
4491
+ get response() {
4492
+ return this.responsePromise.value;
4493
+ }
4494
+ get steps() {
4495
+ return this.stepsPromise.value;
4496
+ }
4427
4497
  /**
4428
4498
  Split out a new stream from the original stream.
4429
4499
  The original stream is replaced to allow for further splitting,
@@ -4433,8 +4503,8 @@ var DefaultStreamTextResult = class {
4433
4503
  However, the LLM results are expected to be small enough to not cause issues.
4434
4504
  */
4435
4505
  teeStream() {
4436
- const [stream1, stream2] = this.originalStream.tee();
4437
- this.originalStream = stream2;
4506
+ const [stream1, stream2] = this.stitchableStream.stream.tee();
4507
+ this.stitchableStream.stream = stream2;
4438
4508
  return stream1;
4439
4509
  }
4440
4510
  get textStream() {