ai 4.0.0-canary.10 → 4.0.0-canary.12

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js CHANGED
@@ -53,14 +53,16 @@ __export(streams_exports, {
53
53
  experimental_createProviderRegistry: () => experimental_createProviderRegistry,
54
54
  experimental_customProvider: () => experimental_customProvider,
55
55
  experimental_wrapLanguageModel: () => experimental_wrapLanguageModel,
56
- formatStreamPart: () => import_ui_utils10.formatStreamPart,
56
+ formatAssistantStreamPart: () => import_ui_utils10.formatAssistantStreamPart,
57
+ formatDataStreamPart: () => import_ui_utils10.formatDataStreamPart,
57
58
  generateId: () => import_provider_utils11.generateId,
58
59
  generateObject: () => generateObject,
59
60
  generateText: () => generateText,
60
61
  jsonSchema: () => import_ui_utils7.jsonSchema,
61
- parseStreamPart: () => import_ui_utils10.parseStreamPart,
62
- processDataProtocolResponse: () => import_ui_utils10.processDataProtocolResponse,
63
- readDataStream: () => import_ui_utils10.readDataStream,
62
+ parseAssistantStreamPart: () => import_ui_utils10.parseAssistantStreamPart,
63
+ parseDataStreamPart: () => import_ui_utils10.parseDataStreamPart,
64
+ processDataStream: () => import_ui_utils10.processDataStream,
65
+ processTextStream: () => import_ui_utils10.processTextStream,
64
66
  streamObject: () => streamObject,
65
67
  streamText: () => streamText,
66
68
  tool: () => tool
@@ -2338,21 +2340,6 @@ var DefaultGenerateObjectResult = class {
2338
2340
  var import_provider_utils6 = require("@ai-sdk/provider-utils");
2339
2341
  var import_ui_utils2 = require("@ai-sdk/ui-utils");
2340
2342
 
2341
- // util/create-resolvable-promise.ts
2342
- function createResolvablePromise() {
2343
- let resolve;
2344
- let reject;
2345
- const promise = new Promise((res, rej) => {
2346
- resolve = res;
2347
- reject = rej;
2348
- });
2349
- return {
2350
- promise,
2351
- resolve,
2352
- reject
2353
- };
2354
- }
2355
-
2356
2343
  // util/delayed-promise.ts
2357
2344
  var DelayedPromise = class {
2358
2345
  constructor() {
@@ -2444,9 +2431,91 @@ function writeToServerResponse({
2444
2431
  read();
2445
2432
  }
2446
2433
 
2434
+ // util/create-resolvable-promise.ts
2435
+ function createResolvablePromise() {
2436
+ let resolve;
2437
+ let reject;
2438
+ const promise = new Promise((res, rej) => {
2439
+ resolve = res;
2440
+ reject = rej;
2441
+ });
2442
+ return {
2443
+ promise,
2444
+ resolve,
2445
+ reject
2446
+ };
2447
+ }
2448
+
2449
+ // core/util/create-stitchable-stream.ts
2450
+ function createStitchableStream() {
2451
+ let innerStreamReaders = [];
2452
+ let controller = null;
2453
+ let isClosed = false;
2454
+ let waitForNewStream = createResolvablePromise();
2455
+ const processPull = async () => {
2456
+ if (isClosed && innerStreamReaders.length === 0) {
2457
+ controller == null ? void 0 : controller.close();
2458
+ return;
2459
+ }
2460
+ if (innerStreamReaders.length === 0) {
2461
+ waitForNewStream = createResolvablePromise();
2462
+ await waitForNewStream.promise;
2463
+ return processPull();
2464
+ }
2465
+ try {
2466
+ const { value, done } = await innerStreamReaders[0].read();
2467
+ if (done) {
2468
+ innerStreamReaders.shift();
2469
+ if (innerStreamReaders.length > 0) {
2470
+ await processPull();
2471
+ } else if (isClosed) {
2472
+ controller == null ? void 0 : controller.close();
2473
+ }
2474
+ } else {
2475
+ controller == null ? void 0 : controller.enqueue(value);
2476
+ }
2477
+ } catch (error) {
2478
+ controller == null ? void 0 : controller.error(error);
2479
+ innerStreamReaders.shift();
2480
+ if (isClosed && innerStreamReaders.length === 0) {
2481
+ controller == null ? void 0 : controller.close();
2482
+ }
2483
+ }
2484
+ };
2485
+ return {
2486
+ stream: new ReadableStream({
2487
+ start(controllerParam) {
2488
+ controller = controllerParam;
2489
+ },
2490
+ pull: processPull,
2491
+ async cancel() {
2492
+ for (const reader of innerStreamReaders) {
2493
+ await reader.cancel();
2494
+ }
2495
+ innerStreamReaders = [];
2496
+ isClosed = true;
2497
+ }
2498
+ }),
2499
+ addStream: (innerStream) => {
2500
+ if (isClosed) {
2501
+ throw new Error("Cannot add inner stream: outer stream is closed");
2502
+ }
2503
+ innerStreamReaders.push(innerStream.getReader());
2504
+ waitForNewStream.resolve();
2505
+ },
2506
+ close: () => {
2507
+ isClosed = true;
2508
+ waitForNewStream.resolve();
2509
+ if (innerStreamReaders.length === 0) {
2510
+ controller == null ? void 0 : controller.close();
2511
+ }
2512
+ }
2513
+ };
2514
+ }
2515
+
2447
2516
  // core/generate-object/stream-object.ts
2448
2517
  var originalGenerateId2 = (0, import_provider_utils6.createIdGenerator)({ prefix: "aiobj", size: 24 });
2449
- async function streamObject({
2518
+ function streamObject({
2450
2519
  model,
2451
2520
  schema: inputSchema,
2452
2521
  schemaName,
@@ -2480,400 +2549,433 @@ async function streamObject({
2480
2549
  if (outputStrategy.type === "no-schema" && mode === void 0) {
2481
2550
  mode = "json";
2482
2551
  }
2483
- const baseTelemetryAttributes = getBaseTelemetryAttributes({
2552
+ return new DefaultStreamObjectResult({
2484
2553
  model,
2485
2554
  telemetry,
2486
2555
  headers,
2487
- settings: { ...settings, maxRetries }
2488
- });
2489
- const tracer = getTracer(telemetry);
2490
- const retry = retryWithExponentialBackoff({ maxRetries });
2491
- return recordSpan({
2492
- name: "ai.streamObject",
2493
- attributes: selectTelemetryAttributes({
2494
- telemetry,
2495
- attributes: {
2496
- ...assembleOperationName({
2497
- operationId: "ai.streamObject",
2498
- telemetry
2499
- }),
2500
- ...baseTelemetryAttributes,
2501
- // specific settings that only make sense on the outer level:
2502
- "ai.prompt": {
2503
- input: () => JSON.stringify({ system, prompt, messages })
2504
- },
2505
- "ai.schema": outputStrategy.jsonSchema != null ? { input: () => JSON.stringify(outputStrategy.jsonSchema) } : void 0,
2506
- "ai.schema.name": schemaName,
2507
- "ai.schema.description": schemaDescription,
2508
- "ai.settings.output": outputStrategy.type,
2509
- "ai.settings.mode": mode
2510
- }
2511
- }),
2512
- tracer,
2513
- endWhenDone: false,
2514
- fn: async (rootSpan) => {
2515
- if (mode === "auto" || mode == null) {
2516
- mode = model.defaultObjectGenerationMode;
2517
- }
2518
- let callOptions;
2519
- let transformer;
2520
- switch (mode) {
2521
- case "json": {
2522
- const standardizedPrompt = standardizePrompt({
2523
- prompt: {
2524
- system: outputStrategy.jsonSchema == null ? injectJsonInstruction({ prompt: system }) : model.supportsStructuredOutputs ? system : injectJsonInstruction({
2525
- prompt: system,
2526
- schema: outputStrategy.jsonSchema
2527
- }),
2528
- prompt,
2529
- messages
2530
- },
2531
- tools: void 0
2532
- });
2533
- callOptions = {
2534
- mode: {
2535
- type: "object-json",
2536
- schema: outputStrategy.jsonSchema,
2537
- name: schemaName,
2538
- description: schemaDescription
2539
- },
2540
- ...prepareCallSettings(settings),
2541
- inputFormat: standardizedPrompt.type,
2542
- prompt: await convertToLanguageModelPrompt({
2543
- prompt: standardizedPrompt,
2544
- modelSupportsImageUrls: model.supportsImageUrls,
2545
- modelSupportsUrl: model.supportsUrl
2546
- }),
2547
- providerMetadata,
2548
- abortSignal,
2549
- headers
2550
- };
2551
- transformer = {
2552
- transform: (chunk, controller) => {
2553
- switch (chunk.type) {
2554
- case "text-delta":
2555
- controller.enqueue(chunk.textDelta);
2556
- break;
2557
- case "response-metadata":
2558
- case "finish":
2559
- case "error":
2560
- controller.enqueue(chunk);
2561
- break;
2562
- }
2563
- }
2564
- };
2565
- break;
2566
- }
2567
- case "tool": {
2568
- const standardizedPrompt = standardizePrompt({
2569
- prompt: { system, prompt, messages },
2570
- tools: void 0
2571
- });
2572
- callOptions = {
2573
- mode: {
2574
- type: "object-tool",
2575
- tool: {
2576
- type: "function",
2577
- name: schemaName != null ? schemaName : "json",
2578
- description: schemaDescription != null ? schemaDescription : "Respond with a JSON object.",
2579
- parameters: outputStrategy.jsonSchema
2580
- }
2581
- },
2582
- ...prepareCallSettings(settings),
2583
- inputFormat: standardizedPrompt.type,
2584
- prompt: await convertToLanguageModelPrompt({
2585
- prompt: standardizedPrompt,
2586
- modelSupportsImageUrls: model.supportsImageUrls,
2587
- modelSupportsUrl: model.supportsUrl
2588
- }),
2589
- providerMetadata,
2590
- abortSignal,
2591
- headers
2592
- };
2593
- transformer = {
2594
- transform(chunk, controller) {
2595
- switch (chunk.type) {
2596
- case "tool-call-delta":
2597
- controller.enqueue(chunk.argsTextDelta);
2598
- break;
2599
- case "response-metadata":
2600
- case "finish":
2601
- case "error":
2602
- controller.enqueue(chunk);
2603
- break;
2604
- }
2605
- }
2606
- };
2607
- break;
2608
- }
2609
- case void 0: {
2610
- throw new Error(
2611
- "Model does not have a default object generation mode."
2612
- );
2613
- }
2614
- default: {
2615
- const _exhaustiveCheck = mode;
2616
- throw new Error(`Unsupported mode: ${_exhaustiveCheck}`);
2617
- }
2618
- }
2619
- const {
2620
- result: { stream, warnings, rawResponse, request },
2621
- doStreamSpan,
2622
- startTimestampMs
2623
- } = await retry(
2624
- () => recordSpan({
2625
- name: "ai.streamObject.doStream",
2626
- attributes: selectTelemetryAttributes({
2627
- telemetry,
2628
- attributes: {
2629
- ...assembleOperationName({
2630
- operationId: "ai.streamObject.doStream",
2631
- telemetry
2632
- }),
2633
- ...baseTelemetryAttributes,
2634
- "ai.prompt.format": {
2635
- input: () => callOptions.inputFormat
2636
- },
2637
- "ai.prompt.messages": {
2638
- input: () => JSON.stringify(callOptions.prompt)
2639
- },
2640
- "ai.settings.mode": mode,
2641
- // standardized gen-ai llm span attributes:
2642
- "gen_ai.system": model.provider,
2643
- "gen_ai.request.model": model.modelId,
2644
- "gen_ai.request.frequency_penalty": settings.frequencyPenalty,
2645
- "gen_ai.request.max_tokens": settings.maxTokens,
2646
- "gen_ai.request.presence_penalty": settings.presencePenalty,
2647
- "gen_ai.request.temperature": settings.temperature,
2648
- "gen_ai.request.top_k": settings.topK,
2649
- "gen_ai.request.top_p": settings.topP
2650
- }
2651
- }),
2652
- tracer,
2653
- endWhenDone: false,
2654
- fn: async (doStreamSpan2) => ({
2655
- startTimestampMs: now2(),
2656
- doStreamSpan: doStreamSpan2,
2657
- result: await model.doStream(callOptions)
2658
- })
2659
- })
2660
- );
2661
- return new DefaultStreamObjectResult({
2662
- outputStrategy,
2663
- stream: stream.pipeThrough(new TransformStream(transformer)),
2664
- warnings,
2665
- rawResponse,
2666
- request: request != null ? request : {},
2667
- onFinish,
2668
- rootSpan,
2669
- doStreamSpan,
2670
- telemetry,
2671
- startTimestampMs,
2672
- modelId: model.modelId,
2673
- now: now2,
2674
- currentDate,
2675
- generateId: generateId3
2676
- });
2677
- }
2556
+ settings,
2557
+ maxRetries,
2558
+ abortSignal,
2559
+ outputStrategy,
2560
+ system,
2561
+ prompt,
2562
+ messages,
2563
+ schemaName,
2564
+ schemaDescription,
2565
+ inputProviderMetadata: providerMetadata,
2566
+ mode,
2567
+ onFinish,
2568
+ generateId: generateId3,
2569
+ currentDate,
2570
+ now: now2
2678
2571
  });
2679
2572
  }
2680
2573
  var DefaultStreamObjectResult = class {
2681
2574
  constructor({
2682
- stream,
2683
- warnings,
2684
- rawResponse,
2685
- request,
2575
+ model,
2576
+ headers,
2577
+ telemetry,
2578
+ settings,
2579
+ maxRetries,
2580
+ abortSignal,
2686
2581
  outputStrategy,
2582
+ system,
2583
+ prompt,
2584
+ messages,
2585
+ schemaName,
2586
+ schemaDescription,
2587
+ inputProviderMetadata,
2588
+ mode,
2687
2589
  onFinish,
2688
- rootSpan,
2689
- doStreamSpan,
2690
- telemetry,
2691
- startTimestampMs,
2692
- modelId,
2693
- now: now2,
2590
+ generateId: generateId3,
2694
2591
  currentDate,
2695
- generateId: generateId3
2592
+ now: now2
2696
2593
  }) {
2697
- this.warnings = warnings;
2698
- this.outputStrategy = outputStrategy;
2699
- this.request = Promise.resolve(request);
2700
2594
  this.objectPromise = new DelayedPromise();
2701
- const { resolve: resolveUsage, promise: usagePromise } = createResolvablePromise();
2702
- this.usage = usagePromise;
2703
- const { resolve: resolveResponse, promise: responsePromise } = createResolvablePromise();
2704
- this.response = responsePromise;
2705
- const {
2706
- resolve: resolveProviderMetadata,
2707
- promise: providerMetadataPromise
2708
- } = createResolvablePromise();
2709
- this.experimental_providerMetadata = providerMetadataPromise;
2710
- let usage;
2711
- let finishReason;
2712
- let providerMetadata;
2713
- let object;
2714
- let error;
2715
- let accumulatedText = "";
2716
- let textDelta = "";
2717
- let response = {
2718
- id: generateId3(),
2719
- timestamp: currentDate(),
2720
- modelId
2721
- };
2722
- let latestObjectJson = void 0;
2723
- let latestObject = void 0;
2724
- let isFirstChunk = true;
2725
- let isFirstDelta = true;
2595
+ this.usagePromise = new DelayedPromise();
2596
+ this.providerMetadataPromise = new DelayedPromise();
2597
+ this.warningsPromise = new DelayedPromise();
2598
+ this.requestPromise = new DelayedPromise();
2599
+ this.responsePromise = new DelayedPromise();
2600
+ this.stitchableStream = createStitchableStream();
2601
+ const baseTelemetryAttributes = getBaseTelemetryAttributes({
2602
+ model,
2603
+ telemetry,
2604
+ headers,
2605
+ settings: { ...settings, maxRetries }
2606
+ });
2607
+ const tracer = getTracer(telemetry);
2608
+ const retry = retryWithExponentialBackoff({ maxRetries });
2726
2609
  const self = this;
2727
- this.originalStream = stream.pipeThrough(
2728
- new TransformStream({
2729
- async transform(chunk, controller) {
2730
- var _a11, _b, _c;
2731
- if (isFirstChunk) {
2732
- const msToFirstChunk = now2() - startTimestampMs;
2733
- isFirstChunk = false;
2734
- doStreamSpan.addEvent("ai.stream.firstChunk", {
2735
- "ai.stream.msToFirstChunk": msToFirstChunk
2610
+ recordSpan({
2611
+ name: "ai.streamObject",
2612
+ attributes: selectTelemetryAttributes({
2613
+ telemetry,
2614
+ attributes: {
2615
+ ...assembleOperationName({
2616
+ operationId: "ai.streamObject",
2617
+ telemetry
2618
+ }),
2619
+ ...baseTelemetryAttributes,
2620
+ // specific settings that only make sense on the outer level:
2621
+ "ai.prompt": {
2622
+ input: () => JSON.stringify({ system, prompt, messages })
2623
+ },
2624
+ "ai.schema": outputStrategy.jsonSchema != null ? { input: () => JSON.stringify(outputStrategy.jsonSchema) } : void 0,
2625
+ "ai.schema.name": schemaName,
2626
+ "ai.schema.description": schemaDescription,
2627
+ "ai.settings.output": outputStrategy.type,
2628
+ "ai.settings.mode": mode
2629
+ }
2630
+ }),
2631
+ tracer,
2632
+ endWhenDone: false,
2633
+ fn: async (rootSpan) => {
2634
+ if (mode === "auto" || mode == null) {
2635
+ mode = model.defaultObjectGenerationMode;
2636
+ }
2637
+ let callOptions;
2638
+ let transformer;
2639
+ switch (mode) {
2640
+ case "json": {
2641
+ const standardizedPrompt = standardizePrompt({
2642
+ prompt: {
2643
+ system: outputStrategy.jsonSchema == null ? injectJsonInstruction({ prompt: system }) : model.supportsStructuredOutputs ? system : injectJsonInstruction({
2644
+ prompt: system,
2645
+ schema: outputStrategy.jsonSchema
2646
+ }),
2647
+ prompt,
2648
+ messages
2649
+ },
2650
+ tools: void 0
2736
2651
  });
2737
- doStreamSpan.setAttributes({
2738
- "ai.stream.msToFirstChunk": msToFirstChunk
2652
+ callOptions = {
2653
+ mode: {
2654
+ type: "object-json",
2655
+ schema: outputStrategy.jsonSchema,
2656
+ name: schemaName,
2657
+ description: schemaDescription
2658
+ },
2659
+ ...prepareCallSettings(settings),
2660
+ inputFormat: standardizedPrompt.type,
2661
+ prompt: await convertToLanguageModelPrompt({
2662
+ prompt: standardizedPrompt,
2663
+ modelSupportsImageUrls: model.supportsImageUrls,
2664
+ modelSupportsUrl: model.supportsUrl
2665
+ }),
2666
+ providerMetadata: inputProviderMetadata,
2667
+ abortSignal,
2668
+ headers
2669
+ };
2670
+ transformer = {
2671
+ transform: (chunk, controller) => {
2672
+ switch (chunk.type) {
2673
+ case "text-delta":
2674
+ controller.enqueue(chunk.textDelta);
2675
+ break;
2676
+ case "response-metadata":
2677
+ case "finish":
2678
+ case "error":
2679
+ controller.enqueue(chunk);
2680
+ break;
2681
+ }
2682
+ }
2683
+ };
2684
+ break;
2685
+ }
2686
+ case "tool": {
2687
+ const standardizedPrompt = standardizePrompt({
2688
+ prompt: { system, prompt, messages },
2689
+ tools: void 0
2739
2690
  });
2691
+ callOptions = {
2692
+ mode: {
2693
+ type: "object-tool",
2694
+ tool: {
2695
+ type: "function",
2696
+ name: schemaName != null ? schemaName : "json",
2697
+ description: schemaDescription != null ? schemaDescription : "Respond with a JSON object.",
2698
+ parameters: outputStrategy.jsonSchema
2699
+ }
2700
+ },
2701
+ ...prepareCallSettings(settings),
2702
+ inputFormat: standardizedPrompt.type,
2703
+ prompt: await convertToLanguageModelPrompt({
2704
+ prompt: standardizedPrompt,
2705
+ modelSupportsImageUrls: model.supportsImageUrls,
2706
+ modelSupportsUrl: model.supportsUrl
2707
+ }),
2708
+ providerMetadata: inputProviderMetadata,
2709
+ abortSignal,
2710
+ headers
2711
+ };
2712
+ transformer = {
2713
+ transform(chunk, controller) {
2714
+ switch (chunk.type) {
2715
+ case "tool-call-delta":
2716
+ controller.enqueue(chunk.argsTextDelta);
2717
+ break;
2718
+ case "response-metadata":
2719
+ case "finish":
2720
+ case "error":
2721
+ controller.enqueue(chunk);
2722
+ break;
2723
+ }
2724
+ }
2725
+ };
2726
+ break;
2740
2727
  }
2741
- if (typeof chunk === "string") {
2742
- accumulatedText += chunk;
2743
- textDelta += chunk;
2744
- const { value: currentObjectJson, state: parseState } = (0, import_ui_utils2.parsePartialJson)(accumulatedText);
2745
- if (currentObjectJson !== void 0 && !(0, import_ui_utils2.isDeepEqualData)(latestObjectJson, currentObjectJson)) {
2746
- const validationResult = outputStrategy.validatePartialResult({
2747
- value: currentObjectJson,
2748
- textDelta,
2749
- latestObject,
2750
- isFirstDelta,
2751
- isFinalDelta: parseState === "successful-parse"
2752
- });
2753
- if (validationResult.success && !(0, import_ui_utils2.isDeepEqualData)(latestObject, validationResult.value.partial)) {
2754
- latestObjectJson = currentObjectJson;
2755
- latestObject = validationResult.value.partial;
2756
- controller.enqueue({
2757
- type: "object",
2758
- object: latestObject
2728
+ case void 0: {
2729
+ throw new Error(
2730
+ "Model does not have a default object generation mode."
2731
+ );
2732
+ }
2733
+ default: {
2734
+ const _exhaustiveCheck = mode;
2735
+ throw new Error(`Unsupported mode: ${_exhaustiveCheck}`);
2736
+ }
2737
+ }
2738
+ const {
2739
+ result: { stream, warnings, rawResponse, request },
2740
+ doStreamSpan,
2741
+ startTimestampMs
2742
+ } = await retry(
2743
+ () => recordSpan({
2744
+ name: "ai.streamObject.doStream",
2745
+ attributes: selectTelemetryAttributes({
2746
+ telemetry,
2747
+ attributes: {
2748
+ ...assembleOperationName({
2749
+ operationId: "ai.streamObject.doStream",
2750
+ telemetry
2751
+ }),
2752
+ ...baseTelemetryAttributes,
2753
+ "ai.prompt.format": {
2754
+ input: () => callOptions.inputFormat
2755
+ },
2756
+ "ai.prompt.messages": {
2757
+ input: () => JSON.stringify(callOptions.prompt)
2758
+ },
2759
+ "ai.settings.mode": mode,
2760
+ // standardized gen-ai llm span attributes:
2761
+ "gen_ai.system": model.provider,
2762
+ "gen_ai.request.model": model.modelId,
2763
+ "gen_ai.request.frequency_penalty": settings.frequencyPenalty,
2764
+ "gen_ai.request.max_tokens": settings.maxTokens,
2765
+ "gen_ai.request.presence_penalty": settings.presencePenalty,
2766
+ "gen_ai.request.temperature": settings.temperature,
2767
+ "gen_ai.request.top_k": settings.topK,
2768
+ "gen_ai.request.top_p": settings.topP
2769
+ }
2770
+ }),
2771
+ tracer,
2772
+ endWhenDone: false,
2773
+ fn: async (doStreamSpan2) => ({
2774
+ startTimestampMs: now2(),
2775
+ doStreamSpan: doStreamSpan2,
2776
+ result: await model.doStream(callOptions)
2777
+ })
2778
+ })
2779
+ );
2780
+ self.requestPromise.resolve(request != null ? request : {});
2781
+ let usage;
2782
+ let finishReason;
2783
+ let providerMetadata;
2784
+ let object;
2785
+ let error;
2786
+ let accumulatedText = "";
2787
+ let textDelta = "";
2788
+ let response = {
2789
+ id: generateId3(),
2790
+ timestamp: currentDate(),
2791
+ modelId: model.modelId
2792
+ };
2793
+ let latestObjectJson = void 0;
2794
+ let latestObject = void 0;
2795
+ let isFirstChunk = true;
2796
+ let isFirstDelta = true;
2797
+ const transformedStream = stream.pipeThrough(new TransformStream(transformer)).pipeThrough(
2798
+ new TransformStream({
2799
+ async transform(chunk, controller) {
2800
+ var _a11, _b, _c;
2801
+ if (isFirstChunk) {
2802
+ const msToFirstChunk = now2() - startTimestampMs;
2803
+ isFirstChunk = false;
2804
+ doStreamSpan.addEvent("ai.stream.firstChunk", {
2805
+ "ai.stream.msToFirstChunk": msToFirstChunk
2759
2806
  });
2760
- controller.enqueue({
2761
- type: "text-delta",
2762
- textDelta: validationResult.value.textDelta
2807
+ doStreamSpan.setAttributes({
2808
+ "ai.stream.msToFirstChunk": msToFirstChunk
2763
2809
  });
2764
- textDelta = "";
2765
- isFirstDelta = false;
2766
- }
2767
- }
2768
- return;
2769
- }
2770
- switch (chunk.type) {
2771
- case "response-metadata": {
2772
- response = {
2773
- id: (_a11 = chunk.id) != null ? _a11 : response.id,
2774
- timestamp: (_b = chunk.timestamp) != null ? _b : response.timestamp,
2775
- modelId: (_c = chunk.modelId) != null ? _c : response.modelId
2776
- };
2777
- break;
2778
- }
2779
- case "finish": {
2780
- if (textDelta !== "") {
2781
- controller.enqueue({ type: "text-delta", textDelta });
2782
2810
  }
2783
- finishReason = chunk.finishReason;
2784
- usage = calculateLanguageModelUsage(chunk.usage);
2785
- providerMetadata = chunk.providerMetadata;
2786
- controller.enqueue({ ...chunk, usage, response });
2787
- resolveUsage(usage);
2788
- resolveProviderMetadata(providerMetadata);
2789
- resolveResponse({
2790
- ...response,
2791
- headers: rawResponse == null ? void 0 : rawResponse.headers
2792
- });
2793
- const validationResult = outputStrategy.validateFinalResult(latestObjectJson);
2794
- if (validationResult.success) {
2795
- object = validationResult.value;
2796
- self.objectPromise.resolve(object);
2797
- } else {
2798
- error = validationResult.error;
2799
- self.objectPromise.reject(error);
2811
+ if (typeof chunk === "string") {
2812
+ accumulatedText += chunk;
2813
+ textDelta += chunk;
2814
+ const { value: currentObjectJson, state: parseState } = (0, import_ui_utils2.parsePartialJson)(accumulatedText);
2815
+ if (currentObjectJson !== void 0 && !(0, import_ui_utils2.isDeepEqualData)(latestObjectJson, currentObjectJson)) {
2816
+ const validationResult = outputStrategy.validatePartialResult({
2817
+ value: currentObjectJson,
2818
+ textDelta,
2819
+ latestObject,
2820
+ isFirstDelta,
2821
+ isFinalDelta: parseState === "successful-parse"
2822
+ });
2823
+ if (validationResult.success && !(0, import_ui_utils2.isDeepEqualData)(
2824
+ latestObject,
2825
+ validationResult.value.partial
2826
+ )) {
2827
+ latestObjectJson = currentObjectJson;
2828
+ latestObject = validationResult.value.partial;
2829
+ controller.enqueue({
2830
+ type: "object",
2831
+ object: latestObject
2832
+ });
2833
+ controller.enqueue({
2834
+ type: "text-delta",
2835
+ textDelta: validationResult.value.textDelta
2836
+ });
2837
+ textDelta = "";
2838
+ isFirstDelta = false;
2839
+ }
2840
+ }
2841
+ return;
2800
2842
  }
2801
- break;
2802
- }
2803
- default: {
2804
- controller.enqueue(chunk);
2805
- break;
2806
- }
2807
- }
2808
- },
2809
- // invoke onFinish callback and resolve toolResults promise when the stream is about to close:
2810
- async flush(controller) {
2811
- try {
2812
- const finalUsage = usage != null ? usage : {
2813
- promptTokens: NaN,
2814
- completionTokens: NaN,
2815
- totalTokens: NaN
2816
- };
2817
- doStreamSpan.setAttributes(
2818
- selectTelemetryAttributes({
2819
- telemetry,
2820
- attributes: {
2821
- "ai.response.finishReason": finishReason,
2822
- "ai.response.object": {
2823
- output: () => JSON.stringify(object)
2824
- },
2825
- "ai.response.id": response.id,
2826
- "ai.response.model": response.modelId,
2827
- "ai.response.timestamp": response.timestamp.toISOString(),
2828
- "ai.usage.promptTokens": finalUsage.promptTokens,
2829
- "ai.usage.completionTokens": finalUsage.completionTokens,
2830
- // standardized gen-ai llm span attributes:
2831
- "gen_ai.response.finish_reasons": [finishReason],
2832
- "gen_ai.response.id": response.id,
2833
- "gen_ai.response.model": response.modelId,
2834
- "gen_ai.usage.input_tokens": finalUsage.promptTokens,
2835
- "gen_ai.usage.output_tokens": finalUsage.completionTokens
2843
+ switch (chunk.type) {
2844
+ case "response-metadata": {
2845
+ response = {
2846
+ id: (_a11 = chunk.id) != null ? _a11 : response.id,
2847
+ timestamp: (_b = chunk.timestamp) != null ? _b : response.timestamp,
2848
+ modelId: (_c = chunk.modelId) != null ? _c : response.modelId
2849
+ };
2850
+ break;
2836
2851
  }
2837
- })
2838
- );
2839
- doStreamSpan.end();
2840
- rootSpan.setAttributes(
2841
- selectTelemetryAttributes({
2842
- telemetry,
2843
- attributes: {
2844
- "ai.usage.promptTokens": finalUsage.promptTokens,
2845
- "ai.usage.completionTokens": finalUsage.completionTokens,
2846
- "ai.response.object": {
2847
- output: () => JSON.stringify(object)
2852
+ case "finish": {
2853
+ if (textDelta !== "") {
2854
+ controller.enqueue({ type: "text-delta", textDelta });
2855
+ }
2856
+ finishReason = chunk.finishReason;
2857
+ usage = calculateLanguageModelUsage(chunk.usage);
2858
+ providerMetadata = chunk.providerMetadata;
2859
+ controller.enqueue({ ...chunk, usage, response });
2860
+ self.usagePromise.resolve(usage);
2861
+ self.providerMetadataPromise.resolve(providerMetadata);
2862
+ self.responsePromise.resolve({
2863
+ ...response,
2864
+ headers: rawResponse == null ? void 0 : rawResponse.headers
2865
+ });
2866
+ const validationResult = outputStrategy.validateFinalResult(latestObjectJson);
2867
+ if (validationResult.success) {
2868
+ object = validationResult.value;
2869
+ self.objectPromise.resolve(object);
2870
+ } else {
2871
+ error = validationResult.error;
2872
+ self.objectPromise.reject(error);
2848
2873
  }
2874
+ break;
2849
2875
  }
2850
- })
2851
- );
2852
- await (onFinish == null ? void 0 : onFinish({
2853
- usage: finalUsage,
2854
- object,
2855
- error,
2856
- response: {
2857
- ...response,
2858
- headers: rawResponse == null ? void 0 : rawResponse.headers
2859
- },
2860
- warnings,
2861
- experimental_providerMetadata: providerMetadata
2862
- }));
2863
- } catch (error2) {
2864
- controller.error(error2);
2865
- } finally {
2866
- rootSpan.end();
2876
+ default: {
2877
+ controller.enqueue(chunk);
2878
+ break;
2879
+ }
2880
+ }
2881
+ },
2882
+ // invoke onFinish callback and resolve toolResults promise when the stream is about to close:
2883
+ async flush(controller) {
2884
+ try {
2885
+ const finalUsage = usage != null ? usage : {
2886
+ promptTokens: NaN,
2887
+ completionTokens: NaN,
2888
+ totalTokens: NaN
2889
+ };
2890
+ doStreamSpan.setAttributes(
2891
+ selectTelemetryAttributes({
2892
+ telemetry,
2893
+ attributes: {
2894
+ "ai.response.finishReason": finishReason,
2895
+ "ai.response.object": {
2896
+ output: () => JSON.stringify(object)
2897
+ },
2898
+ "ai.response.id": response.id,
2899
+ "ai.response.model": response.modelId,
2900
+ "ai.response.timestamp": response.timestamp.toISOString(),
2901
+ "ai.usage.promptTokens": finalUsage.promptTokens,
2902
+ "ai.usage.completionTokens": finalUsage.completionTokens,
2903
+ // standardized gen-ai llm span attributes:
2904
+ "gen_ai.response.finish_reasons": [finishReason],
2905
+ "gen_ai.response.id": response.id,
2906
+ "gen_ai.response.model": response.modelId,
2907
+ "gen_ai.usage.input_tokens": finalUsage.promptTokens,
2908
+ "gen_ai.usage.output_tokens": finalUsage.completionTokens
2909
+ }
2910
+ })
2911
+ );
2912
+ doStreamSpan.end();
2913
+ rootSpan.setAttributes(
2914
+ selectTelemetryAttributes({
2915
+ telemetry,
2916
+ attributes: {
2917
+ "ai.usage.promptTokens": finalUsage.promptTokens,
2918
+ "ai.usage.completionTokens": finalUsage.completionTokens,
2919
+ "ai.response.object": {
2920
+ output: () => JSON.stringify(object)
2921
+ }
2922
+ }
2923
+ })
2924
+ );
2925
+ await (onFinish == null ? void 0 : onFinish({
2926
+ usage: finalUsage,
2927
+ object,
2928
+ error,
2929
+ response: {
2930
+ ...response,
2931
+ headers: rawResponse == null ? void 0 : rawResponse.headers
2932
+ },
2933
+ warnings,
2934
+ experimental_providerMetadata: providerMetadata
2935
+ }));
2936
+ } catch (error2) {
2937
+ controller.error(error2);
2938
+ } finally {
2939
+ rootSpan.end();
2940
+ }
2941
+ }
2942
+ })
2943
+ );
2944
+ self.stitchableStream.addStream(transformedStream);
2945
+ }
2946
+ }).catch((error) => {
2947
+ self.stitchableStream.addStream(
2948
+ new ReadableStream({
2949
+ start(controller) {
2950
+ controller.error(error);
2867
2951
  }
2868
- }
2869
- })
2870
- );
2952
+ })
2953
+ );
2954
+ }).finally(() => {
2955
+ self.stitchableStream.close();
2956
+ });
2957
+ this.outputStrategy = outputStrategy;
2871
2958
  }
2872
2959
  get object() {
2873
2960
  return this.objectPromise.value;
2874
2961
  }
2962
+ get usage() {
2963
+ return this.usagePromise.value;
2964
+ }
2965
+ get experimental_providerMetadata() {
2966
+ return this.providerMetadataPromise.value;
2967
+ }
2968
+ get warnings() {
2969
+ return this.warningsPromise.value;
2970
+ }
2971
+ get request() {
2972
+ return this.requestPromise.value;
2973
+ }
2974
+ get response() {
2975
+ return this.responsePromise.value;
2976
+ }
2875
2977
  get partialObjectStream() {
2876
- return createAsyncIterableStream(this.originalStream, {
2978
+ return createAsyncIterableStream(this.stitchableStream.stream, {
2877
2979
  transform(chunk, controller) {
2878
2980
  switch (chunk.type) {
2879
2981
  case "object":
@@ -2894,10 +2996,12 @@ var DefaultStreamObjectResult = class {
2894
2996
  });
2895
2997
  }
2896
2998
  get elementStream() {
2897
- return this.outputStrategy.createElementStream(this.originalStream);
2999
+ return this.outputStrategy.createElementStream(
3000
+ this.stitchableStream.stream
3001
+ );
2898
3002
  }
2899
3003
  get textStream() {
2900
- return createAsyncIterableStream(this.originalStream, {
3004
+ return createAsyncIterableStream(this.stitchableStream.stream, {
2901
3005
  transform(chunk, controller) {
2902
3006
  switch (chunk.type) {
2903
3007
  case "text-delta":
@@ -2918,7 +3022,7 @@ var DefaultStreamObjectResult = class {
2918
3022
  });
2919
3023
  }
2920
3024
  get fullStream() {
2921
- return createAsyncIterableStream(this.originalStream, {
3025
+ return createAsyncIterableStream(this.stitchableStream.stream, {
2922
3026
  transform(chunk, controller) {
2923
3027
  controller.enqueue(chunk);
2924
3028
  }
@@ -3509,68 +3613,6 @@ var DefaultGenerateTextResult = class {
3509
3613
  var import_provider_utils9 = require("@ai-sdk/provider-utils");
3510
3614
  var import_ui_utils6 = require("@ai-sdk/ui-utils");
3511
3615
 
3512
- // core/util/create-stitchable-stream.ts
3513
- function createStitchableStream() {
3514
- let innerStreamReaders = [];
3515
- let controller = null;
3516
- let isClosed = false;
3517
- const processPull = async () => {
3518
- if (isClosed && innerStreamReaders.length === 0) {
3519
- controller == null ? void 0 : controller.close();
3520
- return;
3521
- }
3522
- if (innerStreamReaders.length === 0) {
3523
- return;
3524
- }
3525
- try {
3526
- const { value, done } = await innerStreamReaders[0].read();
3527
- if (done) {
3528
- innerStreamReaders.shift();
3529
- if (innerStreamReaders.length > 0) {
3530
- await processPull();
3531
- } else if (isClosed) {
3532
- controller == null ? void 0 : controller.close();
3533
- }
3534
- } else {
3535
- controller == null ? void 0 : controller.enqueue(value);
3536
- }
3537
- } catch (error) {
3538
- controller == null ? void 0 : controller.error(error);
3539
- innerStreamReaders.shift();
3540
- if (isClosed && innerStreamReaders.length === 0) {
3541
- controller == null ? void 0 : controller.close();
3542
- }
3543
- }
3544
- };
3545
- return {
3546
- stream: new ReadableStream({
3547
- start(controllerParam) {
3548
- controller = controllerParam;
3549
- },
3550
- pull: processPull,
3551
- async cancel() {
3552
- for (const reader of innerStreamReaders) {
3553
- await reader.cancel();
3554
- }
3555
- innerStreamReaders = [];
3556
- isClosed = true;
3557
- }
3558
- }),
3559
- addStream: (innerStream) => {
3560
- if (isClosed) {
3561
- throw new Error("Cannot add inner stream: outer stream is closed");
3562
- }
3563
- innerStreamReaders.push(innerStream.getReader());
3564
- },
3565
- close: () => {
3566
- isClosed = true;
3567
- if (innerStreamReaders.length === 0) {
3568
- controller == null ? void 0 : controller.close();
3569
- }
3570
- }
3571
- };
3572
- }
3573
-
3574
3616
  // core/util/merge-streams.ts
3575
3617
  function mergeStreams(stream1, stream2) {
3576
3618
  const reader1 = stream1.getReader();
@@ -3854,7 +3896,7 @@ function runToolsTransformation({
3854
3896
 
3855
3897
  // core/generate-text/stream-text.ts
3856
3898
  var originalGenerateId4 = (0, import_provider_utils9.createIdGenerator)({ prefix: "aitxt", size: 24 });
3857
- async function streamText({
3899
+ function streamText({
3858
3900
  model,
3859
3901
  tools,
3860
3902
  toolChoice,
@@ -3870,593 +3912,582 @@ async function streamText({
3870
3912
  experimental_providerMetadata: providerMetadata,
3871
3913
  experimental_toolCallStreaming: toolCallStreaming = false,
3872
3914
  experimental_activeTools: activeTools,
3873
- onChunk,
3874
- onFinish,
3875
- onStepFinish,
3876
- _internal: {
3877
- now: now2 = now,
3878
- generateId: generateId3 = originalGenerateId4,
3879
- currentDate = () => /* @__PURE__ */ new Date()
3880
- } = {},
3881
- ...settings
3882
- }) {
3883
- if (maxSteps < 1) {
3884
- throw new InvalidArgumentError({
3885
- parameter: "maxSteps",
3886
- value: maxSteps,
3887
- message: "maxSteps must be at least 1"
3888
- });
3889
- }
3890
- const baseTelemetryAttributes = getBaseTelemetryAttributes({
3891
- model,
3892
- telemetry,
3893
- headers,
3894
- settings: { ...settings, maxRetries }
3895
- });
3896
- const tracer = getTracer(telemetry);
3897
- const initialPrompt = standardizePrompt({
3898
- prompt: { system, prompt, messages },
3899
- tools
3900
- });
3901
- return recordSpan({
3902
- name: "ai.streamText",
3903
- attributes: selectTelemetryAttributes({
3904
- telemetry,
3905
- attributes: {
3906
- ...assembleOperationName({ operationId: "ai.streamText", telemetry }),
3907
- ...baseTelemetryAttributes,
3908
- // specific settings that only make sense on the outer level:
3909
- "ai.prompt": {
3910
- input: () => JSON.stringify({ system, prompt, messages })
3911
- },
3912
- "ai.settings.maxSteps": maxSteps
3913
- }
3914
- }),
3915
- tracer,
3916
- endWhenDone: false,
3917
- fn: async (rootSpan) => {
3918
- const retry = retryWithExponentialBackoff({ maxRetries });
3919
- const startStep = async ({
3920
- responseMessages
3921
- }) => {
3922
- const promptFormat = responseMessages.length === 0 ? initialPrompt.type : "messages";
3923
- const promptMessages = await convertToLanguageModelPrompt({
3924
- prompt: {
3925
- type: promptFormat,
3926
- system: initialPrompt.system,
3927
- messages: [...initialPrompt.messages, ...responseMessages]
3928
- },
3929
- modelSupportsImageUrls: model.supportsImageUrls,
3930
- modelSupportsUrl: model.supportsUrl
3931
- });
3932
- const mode = {
3933
- type: "regular",
3934
- ...prepareToolsAndToolChoice({ tools, toolChoice, activeTools })
3935
- };
3936
- const {
3937
- result: { stream: stream2, warnings: warnings2, rawResponse: rawResponse2, request: request2 },
3938
- doStreamSpan: doStreamSpan2,
3939
- startTimestampMs: startTimestampMs2
3940
- } = await retry(
3941
- () => recordSpan({
3942
- name: "ai.streamText.doStream",
3943
- attributes: selectTelemetryAttributes({
3944
- telemetry,
3945
- attributes: {
3946
- ...assembleOperationName({
3947
- operationId: "ai.streamText.doStream",
3948
- telemetry
3949
- }),
3950
- ...baseTelemetryAttributes,
3951
- "ai.prompt.format": {
3952
- input: () => promptFormat
3953
- },
3954
- "ai.prompt.messages": {
3955
- input: () => JSON.stringify(promptMessages)
3956
- },
3957
- "ai.prompt.tools": {
3958
- // convert the language model level tools:
3959
- input: () => {
3960
- var _a11;
3961
- return (_a11 = mode.tools) == null ? void 0 : _a11.map((tool2) => JSON.stringify(tool2));
3962
- }
3963
- },
3964
- "ai.prompt.toolChoice": {
3965
- input: () => mode.toolChoice != null ? JSON.stringify(mode.toolChoice) : void 0
3966
- },
3967
- // standardized gen-ai llm span attributes:
3968
- "gen_ai.system": model.provider,
3969
- "gen_ai.request.model": model.modelId,
3970
- "gen_ai.request.frequency_penalty": settings.frequencyPenalty,
3971
- "gen_ai.request.max_tokens": settings.maxTokens,
3972
- "gen_ai.request.presence_penalty": settings.presencePenalty,
3973
- "gen_ai.request.stop_sequences": settings.stopSequences,
3974
- "gen_ai.request.temperature": settings.temperature,
3975
- "gen_ai.request.top_k": settings.topK,
3976
- "gen_ai.request.top_p": settings.topP
3977
- }
3978
- }),
3979
- tracer,
3980
- endWhenDone: false,
3981
- fn: async (doStreamSpan3) => ({
3982
- startTimestampMs: now2(),
3983
- // get before the call
3984
- doStreamSpan: doStreamSpan3,
3985
- result: await model.doStream({
3986
- mode,
3987
- ...prepareCallSettings(settings),
3988
- inputFormat: promptFormat,
3989
- prompt: promptMessages,
3990
- providerMetadata,
3991
- abortSignal,
3992
- headers
3993
- })
3994
- })
3995
- })
3996
- );
3997
- return {
3998
- result: {
3999
- stream: runToolsTransformation({
4000
- tools,
4001
- generatorStream: stream2,
4002
- toolCallStreaming,
4003
- tracer,
4004
- telemetry,
4005
- abortSignal
4006
- }),
4007
- warnings: warnings2,
4008
- request: request2 != null ? request2 : {},
4009
- rawResponse: rawResponse2
4010
- },
4011
- doStreamSpan: doStreamSpan2,
4012
- startTimestampMs: startTimestampMs2
4013
- };
4014
- };
4015
- const {
4016
- result: { stream, warnings, rawResponse, request },
4017
- doStreamSpan,
4018
- startTimestampMs
4019
- } = await startStep({ responseMessages: [] });
4020
- return new DefaultStreamTextResult({
4021
- stream,
4022
- warnings,
4023
- rawResponse,
4024
- request,
4025
- onChunk,
4026
- onFinish,
4027
- onStepFinish,
4028
- rootSpan,
4029
- doStreamSpan,
4030
- telemetry,
4031
- startTimestampMs,
4032
- maxSteps,
4033
- continueSteps,
4034
- startStep,
4035
- modelId: model.modelId,
4036
- now: now2,
4037
- currentDate,
4038
- generateId: generateId3,
4039
- tools
4040
- });
4041
- }
3915
+ onChunk,
3916
+ onFinish,
3917
+ onStepFinish,
3918
+ _internal: {
3919
+ now: now2 = now,
3920
+ generateId: generateId3 = originalGenerateId4,
3921
+ currentDate = () => /* @__PURE__ */ new Date()
3922
+ } = {},
3923
+ ...settings
3924
+ }) {
3925
+ return new DefaultStreamTextResult({
3926
+ model,
3927
+ telemetry,
3928
+ headers,
3929
+ settings,
3930
+ maxRetries,
3931
+ abortSignal,
3932
+ system,
3933
+ prompt,
3934
+ messages,
3935
+ tools,
3936
+ toolChoice,
3937
+ toolCallStreaming,
3938
+ activeTools,
3939
+ maxSteps,
3940
+ continueSteps,
3941
+ providerMetadata,
3942
+ onChunk,
3943
+ onFinish,
3944
+ onStepFinish,
3945
+ now: now2,
3946
+ currentDate,
3947
+ generateId: generateId3
4042
3948
  });
4043
3949
  }
4044
3950
  var DefaultStreamTextResult = class {
4045
3951
  constructor({
4046
- stream,
4047
- warnings,
4048
- rawResponse,
4049
- request,
4050
- onChunk,
4051
- onFinish,
4052
- onStepFinish,
4053
- rootSpan,
4054
- doStreamSpan,
3952
+ model,
4055
3953
  telemetry,
4056
- startTimestampMs,
3954
+ headers,
3955
+ settings,
3956
+ maxRetries,
3957
+ abortSignal,
3958
+ system,
3959
+ prompt,
3960
+ messages,
3961
+ tools,
3962
+ toolChoice,
3963
+ toolCallStreaming,
3964
+ activeTools,
4057
3965
  maxSteps,
4058
3966
  continueSteps,
4059
- startStep,
4060
- modelId,
3967
+ providerMetadata,
3968
+ onChunk,
3969
+ onFinish,
3970
+ onStepFinish,
4061
3971
  now: now2,
4062
3972
  currentDate,
4063
- generateId: generateId3,
4064
- tools
3973
+ generateId: generateId3
4065
3974
  }) {
4066
- this.rawWarnings = warnings;
4067
- this.rawResponse = rawResponse;
4068
- const { resolve: resolveUsage, promise: usagePromise } = createResolvablePromise();
4069
- this.usage = usagePromise;
4070
- const { resolve: resolveFinishReason, promise: finishReasonPromise } = createResolvablePromise();
4071
- this.finishReason = finishReasonPromise;
4072
- const { resolve: resolveText, promise: textPromise } = createResolvablePromise();
4073
- this.text = textPromise;
4074
- const { resolve: resolveToolCalls, promise: toolCallsPromise } = createResolvablePromise();
4075
- this.toolCalls = toolCallsPromise;
4076
- const { resolve: resolveToolResults, promise: toolResultsPromise } = createResolvablePromise();
4077
- this.toolResults = toolResultsPromise;
4078
- const { resolve: resolveSteps, promise: stepsPromise } = createResolvablePromise();
4079
- this.steps = stepsPromise;
4080
- const {
4081
- resolve: resolveProviderMetadata,
4082
- promise: providerMetadataPromise
4083
- } = createResolvablePromise();
4084
- this.experimental_providerMetadata = providerMetadataPromise;
4085
- const { resolve: resolveRequest, promise: requestPromise } = createResolvablePromise();
4086
- this.request = requestPromise;
4087
- const { resolve: resolveResponse, promise: responsePromise } = createResolvablePromise();
4088
- this.response = responsePromise;
4089
- const { resolve: resolveWarnings, promise: warningsPromise } = createResolvablePromise();
4090
- this.warnings = warningsPromise;
4091
- const {
4092
- stream: stitchableStream,
4093
- addStream,
4094
- close: closeStitchableStream
4095
- } = createStitchableStream();
4096
- this.originalStream = stitchableStream;
4097
- const stepResults = [];
3975
+ this.warningsPromise = new DelayedPromise();
3976
+ this.usagePromise = new DelayedPromise();
3977
+ this.finishReasonPromise = new DelayedPromise();
3978
+ this.providerMetadataPromise = new DelayedPromise();
3979
+ this.textPromise = new DelayedPromise();
3980
+ this.toolCallsPromise = new DelayedPromise();
3981
+ this.toolResultsPromise = new DelayedPromise();
3982
+ this.requestPromise = new DelayedPromise();
3983
+ this.responsePromise = new DelayedPromise();
3984
+ this.stepsPromise = new DelayedPromise();
3985
+ this.stitchableStream = createStitchableStream();
3986
+ if (maxSteps < 1) {
3987
+ throw new InvalidArgumentError({
3988
+ parameter: "maxSteps",
3989
+ value: maxSteps,
3990
+ message: "maxSteps must be at least 1"
3991
+ });
3992
+ }
3993
+ const tracer = getTracer(telemetry);
3994
+ const baseTelemetryAttributes = getBaseTelemetryAttributes({
3995
+ model,
3996
+ telemetry,
3997
+ headers,
3998
+ settings: { ...settings, maxRetries }
3999
+ });
4000
+ const initialPrompt = standardizePrompt({
4001
+ prompt: { system, prompt, messages },
4002
+ tools
4003
+ });
4098
4004
  const self = this;
4099
- function addStepStream({
4100
- stream: stream2,
4101
- startTimestamp,
4102
- doStreamSpan: doStreamSpan2,
4103
- currentStep,
4104
- responseMessages,
4105
- usage = {
4106
- promptTokens: 0,
4107
- completionTokens: 0,
4108
- totalTokens: 0
4109
- },
4110
- stepType,
4111
- previousStepText = "",
4112
- stepRequest,
4113
- hasLeadingWhitespace
4114
- }) {
4115
- const stepToolCalls = [];
4116
- const stepToolResults = [];
4117
- let stepFinishReason = "unknown";
4118
- let stepUsage = {
4119
- promptTokens: 0,
4120
- completionTokens: 0,
4121
- totalTokens: 0
4122
- };
4123
- let stepProviderMetadata;
4124
- let stepFirstChunk = true;
4125
- let stepText = "";
4126
- let fullStepText = stepType === "continue" ? previousStepText : "";
4127
- let stepLogProbs;
4128
- let stepResponse = {
4129
- id: generateId3(),
4130
- timestamp: currentDate(),
4131
- modelId
4132
- };
4133
- let chunkBuffer = "";
4134
- let chunkTextPublished = false;
4135
- let inWhitespacePrefix = true;
4136
- let hasWhitespaceSuffix = false;
4137
- async function publishTextChunk({
4138
- controller,
4139
- chunk
4140
- }) {
4141
- controller.enqueue(chunk);
4142
- stepText += chunk.textDelta;
4143
- fullStepText += chunk.textDelta;
4144
- chunkTextPublished = true;
4145
- hasWhitespaceSuffix = chunk.textDelta.trimEnd() !== chunk.textDelta;
4146
- await (onChunk == null ? void 0 : onChunk({ chunk }));
4147
- }
4148
- addStream(
4149
- stream2.pipeThrough(
4150
- new TransformStream({
4151
- async transform(chunk, controller) {
4152
- var _a11, _b, _c;
4153
- if (stepFirstChunk) {
4154
- const msToFirstChunk = now2() - startTimestamp;
4155
- stepFirstChunk = false;
4156
- doStreamSpan2.addEvent("ai.stream.firstChunk", {
4157
- "ai.response.msToFirstChunk": msToFirstChunk
4158
- });
4159
- doStreamSpan2.setAttributes({
4160
- "ai.response.msToFirstChunk": msToFirstChunk
4161
- });
4162
- }
4163
- if (chunk.type === "text-delta" && chunk.textDelta.length === 0) {
4164
- return;
4165
- }
4166
- const chunkType = chunk.type;
4167
- switch (chunkType) {
4168
- case "text-delta": {
4169
- if (continueSteps) {
4170
- const trimmedChunkText = inWhitespacePrefix && hasLeadingWhitespace ? chunk.textDelta.trimStart() : chunk.textDelta;
4171
- if (trimmedChunkText.length === 0) {
4172
- break;
4005
+ recordSpan({
4006
+ name: "ai.streamText",
4007
+ attributes: selectTelemetryAttributes({
4008
+ telemetry,
4009
+ attributes: {
4010
+ ...assembleOperationName({ operationId: "ai.streamText", telemetry }),
4011
+ ...baseTelemetryAttributes,
4012
+ // specific settings that only make sense on the outer level:
4013
+ "ai.prompt": {
4014
+ input: () => JSON.stringify({ system, prompt, messages })
4015
+ },
4016
+ "ai.settings.maxSteps": maxSteps
4017
+ }
4018
+ }),
4019
+ tracer,
4020
+ endWhenDone: false,
4021
+ fn: async (rootSpan) => {
4022
+ const retry = retryWithExponentialBackoff({ maxRetries });
4023
+ const stepResults = [];
4024
+ async function streamStep({
4025
+ currentStep,
4026
+ responseMessages,
4027
+ usage,
4028
+ stepType,
4029
+ previousStepText,
4030
+ hasLeadingWhitespace
4031
+ }) {
4032
+ const promptFormat = responseMessages.length === 0 ? initialPrompt.type : "messages";
4033
+ const promptMessages = await convertToLanguageModelPrompt({
4034
+ prompt: {
4035
+ type: promptFormat,
4036
+ system: initialPrompt.system,
4037
+ messages: [...initialPrompt.messages, ...responseMessages]
4038
+ },
4039
+ modelSupportsImageUrls: model.supportsImageUrls,
4040
+ modelSupportsUrl: model.supportsUrl
4041
+ });
4042
+ const mode = {
4043
+ type: "regular",
4044
+ ...prepareToolsAndToolChoice({ tools, toolChoice, activeTools })
4045
+ };
4046
+ const {
4047
+ result: { stream, warnings, rawResponse, request },
4048
+ doStreamSpan,
4049
+ startTimestampMs
4050
+ } = await retry(
4051
+ () => recordSpan({
4052
+ name: "ai.streamText.doStream",
4053
+ attributes: selectTelemetryAttributes({
4054
+ telemetry,
4055
+ attributes: {
4056
+ ...assembleOperationName({
4057
+ operationId: "ai.streamText.doStream",
4058
+ telemetry
4059
+ }),
4060
+ ...baseTelemetryAttributes,
4061
+ "ai.prompt.format": {
4062
+ input: () => promptFormat
4063
+ },
4064
+ "ai.prompt.messages": {
4065
+ input: () => JSON.stringify(promptMessages)
4066
+ },
4067
+ "ai.prompt.tools": {
4068
+ // convert the language model level tools:
4069
+ input: () => {
4070
+ var _a11;
4071
+ return (_a11 = mode.tools) == null ? void 0 : _a11.map((tool2) => JSON.stringify(tool2));
4173
4072
  }
4174
- inWhitespacePrefix = false;
4175
- chunkBuffer += trimmedChunkText;
4176
- const split = splitOnLastWhitespace(chunkBuffer);
4177
- if (split != null) {
4178
- chunkBuffer = split.suffix;
4179
- await publishTextChunk({
4180
- controller,
4181
- chunk: {
4182
- type: "text-delta",
4183
- textDelta: split.prefix + split.whitespace
4073
+ },
4074
+ "ai.prompt.toolChoice": {
4075
+ input: () => mode.toolChoice != null ? JSON.stringify(mode.toolChoice) : void 0
4076
+ },
4077
+ // standardized gen-ai llm span attributes:
4078
+ "gen_ai.system": model.provider,
4079
+ "gen_ai.request.model": model.modelId,
4080
+ "gen_ai.request.frequency_penalty": settings.frequencyPenalty,
4081
+ "gen_ai.request.max_tokens": settings.maxTokens,
4082
+ "gen_ai.request.presence_penalty": settings.presencePenalty,
4083
+ "gen_ai.request.stop_sequences": settings.stopSequences,
4084
+ "gen_ai.request.temperature": settings.temperature,
4085
+ "gen_ai.request.top_k": settings.topK,
4086
+ "gen_ai.request.top_p": settings.topP
4087
+ }
4088
+ }),
4089
+ tracer,
4090
+ endWhenDone: false,
4091
+ fn: async (doStreamSpan2) => ({
4092
+ startTimestampMs: now2(),
4093
+ // get before the call
4094
+ doStreamSpan: doStreamSpan2,
4095
+ result: await model.doStream({
4096
+ mode,
4097
+ ...prepareCallSettings(settings),
4098
+ inputFormat: promptFormat,
4099
+ prompt: promptMessages,
4100
+ providerMetadata,
4101
+ abortSignal,
4102
+ headers
4103
+ })
4104
+ })
4105
+ })
4106
+ );
4107
+ const transformedStream = runToolsTransformation({
4108
+ tools,
4109
+ generatorStream: stream,
4110
+ toolCallStreaming,
4111
+ tracer,
4112
+ telemetry,
4113
+ abortSignal
4114
+ });
4115
+ const stepRequest = request != null ? request : {};
4116
+ const stepToolCalls = [];
4117
+ const stepToolResults = [];
4118
+ let stepFinishReason = "unknown";
4119
+ let stepUsage = {
4120
+ promptTokens: 0,
4121
+ completionTokens: 0,
4122
+ totalTokens: 0
4123
+ };
4124
+ let stepProviderMetadata;
4125
+ let stepFirstChunk = true;
4126
+ let stepText = "";
4127
+ let fullStepText = stepType === "continue" ? previousStepText : "";
4128
+ let stepLogProbs;
4129
+ let stepResponse = {
4130
+ id: generateId3(),
4131
+ timestamp: currentDate(),
4132
+ modelId: model.modelId
4133
+ };
4134
+ let chunkBuffer = "";
4135
+ let chunkTextPublished = false;
4136
+ let inWhitespacePrefix = true;
4137
+ let hasWhitespaceSuffix = false;
4138
+ async function publishTextChunk({
4139
+ controller,
4140
+ chunk
4141
+ }) {
4142
+ controller.enqueue(chunk);
4143
+ stepText += chunk.textDelta;
4144
+ fullStepText += chunk.textDelta;
4145
+ chunkTextPublished = true;
4146
+ hasWhitespaceSuffix = chunk.textDelta.trimEnd() !== chunk.textDelta;
4147
+ await (onChunk == null ? void 0 : onChunk({ chunk }));
4148
+ }
4149
+ self.stitchableStream.addStream(
4150
+ transformedStream.pipeThrough(
4151
+ new TransformStream({
4152
+ async transform(chunk, controller) {
4153
+ var _a11, _b, _c;
4154
+ if (stepFirstChunk) {
4155
+ const msToFirstChunk = now2() - startTimestampMs;
4156
+ stepFirstChunk = false;
4157
+ doStreamSpan.addEvent("ai.stream.firstChunk", {
4158
+ "ai.response.msToFirstChunk": msToFirstChunk
4159
+ });
4160
+ doStreamSpan.setAttributes({
4161
+ "ai.response.msToFirstChunk": msToFirstChunk
4162
+ });
4163
+ }
4164
+ if (chunk.type === "text-delta" && chunk.textDelta.length === 0) {
4165
+ return;
4166
+ }
4167
+ const chunkType = chunk.type;
4168
+ switch (chunkType) {
4169
+ case "text-delta": {
4170
+ if (continueSteps) {
4171
+ const trimmedChunkText = inWhitespacePrefix && hasLeadingWhitespace ? chunk.textDelta.trimStart() : chunk.textDelta;
4172
+ if (trimmedChunkText.length === 0) {
4173
+ break;
4174
+ }
4175
+ inWhitespacePrefix = false;
4176
+ chunkBuffer += trimmedChunkText;
4177
+ const split = splitOnLastWhitespace(chunkBuffer);
4178
+ if (split != null) {
4179
+ chunkBuffer = split.suffix;
4180
+ await publishTextChunk({
4181
+ controller,
4182
+ chunk: {
4183
+ type: "text-delta",
4184
+ textDelta: split.prefix + split.whitespace
4185
+ }
4186
+ });
4184
4187
  }
4188
+ } else {
4189
+ await publishTextChunk({ controller, chunk });
4190
+ }
4191
+ break;
4192
+ }
4193
+ case "tool-call": {
4194
+ controller.enqueue(chunk);
4195
+ stepToolCalls.push(chunk);
4196
+ await (onChunk == null ? void 0 : onChunk({ chunk }));
4197
+ break;
4198
+ }
4199
+ case "tool-result": {
4200
+ controller.enqueue(chunk);
4201
+ stepToolResults.push(chunk);
4202
+ await (onChunk == null ? void 0 : onChunk({ chunk }));
4203
+ break;
4204
+ }
4205
+ case "response-metadata": {
4206
+ stepResponse = {
4207
+ id: (_a11 = chunk.id) != null ? _a11 : stepResponse.id,
4208
+ timestamp: (_b = chunk.timestamp) != null ? _b : stepResponse.timestamp,
4209
+ modelId: (_c = chunk.modelId) != null ? _c : stepResponse.modelId
4210
+ };
4211
+ break;
4212
+ }
4213
+ case "finish": {
4214
+ stepUsage = chunk.usage;
4215
+ stepFinishReason = chunk.finishReason;
4216
+ stepProviderMetadata = chunk.experimental_providerMetadata;
4217
+ stepLogProbs = chunk.logprobs;
4218
+ const msToFinish = now2() - startTimestampMs;
4219
+ doStreamSpan.addEvent("ai.stream.finish");
4220
+ doStreamSpan.setAttributes({
4221
+ "ai.response.msToFinish": msToFinish,
4222
+ "ai.response.avgCompletionTokensPerSecond": 1e3 * stepUsage.completionTokens / msToFinish
4185
4223
  });
4224
+ break;
4186
4225
  }
4187
- } else {
4188
- await publishTextChunk({ controller, chunk });
4189
- }
4190
- break;
4191
- }
4192
- case "tool-call": {
4193
- controller.enqueue(chunk);
4194
- stepToolCalls.push(chunk);
4195
- await (onChunk == null ? void 0 : onChunk({ chunk }));
4196
- break;
4197
- }
4198
- case "tool-result": {
4199
- controller.enqueue(chunk);
4200
- stepToolResults.push(chunk);
4201
- await (onChunk == null ? void 0 : onChunk({ chunk }));
4202
- break;
4203
- }
4204
- case "response-metadata": {
4205
- stepResponse = {
4206
- id: (_a11 = chunk.id) != null ? _a11 : stepResponse.id,
4207
- timestamp: (_b = chunk.timestamp) != null ? _b : stepResponse.timestamp,
4208
- modelId: (_c = chunk.modelId) != null ? _c : stepResponse.modelId
4209
- };
4210
- break;
4211
- }
4212
- case "finish": {
4213
- stepUsage = chunk.usage;
4214
- stepFinishReason = chunk.finishReason;
4215
- stepProviderMetadata = chunk.experimental_providerMetadata;
4216
- stepLogProbs = chunk.logprobs;
4217
- const msToFinish = now2() - startTimestamp;
4218
- doStreamSpan2.addEvent("ai.stream.finish");
4219
- doStreamSpan2.setAttributes({
4220
- "ai.response.msToFinish": msToFinish,
4221
- "ai.response.avgCompletionTokensPerSecond": 1e3 * stepUsage.completionTokens / msToFinish
4222
- });
4223
- break;
4224
- }
4225
- case "tool-call-streaming-start":
4226
- case "tool-call-delta": {
4227
- controller.enqueue(chunk);
4228
- await (onChunk == null ? void 0 : onChunk({ chunk }));
4229
- break;
4230
- }
4231
- case "error": {
4232
- controller.enqueue(chunk);
4233
- stepFinishReason = "error";
4234
- break;
4235
- }
4236
- default: {
4237
- const exhaustiveCheck = chunkType;
4238
- throw new Error(`Unknown chunk type: ${exhaustiveCheck}`);
4239
- }
4240
- }
4241
- },
4242
- // invoke onFinish callback and resolve toolResults promise when the stream is about to close:
4243
- async flush(controller) {
4244
- var _a11, _b;
4245
- const stepToolCallsJson = stepToolCalls.length > 0 ? JSON.stringify(stepToolCalls) : void 0;
4246
- let nextStepType = "done";
4247
- if (currentStep + 1 < maxSteps) {
4248
- if (continueSteps && stepFinishReason === "length" && // only use continue when there are no tool calls:
4249
- stepToolCalls.length === 0) {
4250
- nextStepType = "continue";
4251
- } else if (
4252
- // there are tool calls:
4253
- stepToolCalls.length > 0 && // all current tool calls have results:
4254
- stepToolResults.length === stepToolCalls.length
4255
- ) {
4256
- nextStepType = "tool-result";
4257
- }
4258
- }
4259
- if (continueSteps && chunkBuffer.length > 0 && (nextStepType !== "continue" || // when the next step is a regular step, publish the buffer
4260
- stepType === "continue" && !chunkTextPublished)) {
4261
- await publishTextChunk({
4262
- controller,
4263
- chunk: {
4264
- type: "text-delta",
4265
- textDelta: chunkBuffer
4266
- }
4267
- });
4268
- chunkBuffer = "";
4269
- }
4270
- try {
4271
- doStreamSpan2.setAttributes(
4272
- selectTelemetryAttributes({
4273
- telemetry,
4274
- attributes: {
4275
- "ai.response.finishReason": stepFinishReason,
4276
- "ai.response.text": { output: () => stepText },
4277
- "ai.response.toolCalls": {
4278
- output: () => stepToolCallsJson
4279
- },
4280
- "ai.response.id": stepResponse.id,
4281
- "ai.response.model": stepResponse.modelId,
4282
- "ai.response.timestamp": stepResponse.timestamp.toISOString(),
4283
- "ai.usage.promptTokens": stepUsage.promptTokens,
4284
- "ai.usage.completionTokens": stepUsage.completionTokens,
4285
- // standardized gen-ai llm span attributes:
4286
- "gen_ai.response.finish_reasons": [stepFinishReason],
4287
- "gen_ai.response.id": stepResponse.id,
4288
- "gen_ai.response.model": stepResponse.modelId,
4289
- "gen_ai.usage.input_tokens": stepUsage.promptTokens,
4290
- "gen_ai.usage.output_tokens": stepUsage.completionTokens
4226
+ case "tool-call-streaming-start":
4227
+ case "tool-call-delta": {
4228
+ controller.enqueue(chunk);
4229
+ await (onChunk == null ? void 0 : onChunk({ chunk }));
4230
+ break;
4291
4231
  }
4292
- })
4293
- );
4294
- } catch (error) {
4295
- } finally {
4296
- doStreamSpan2.end();
4297
- }
4298
- controller.enqueue({
4299
- type: "step-finish",
4300
- finishReason: stepFinishReason,
4301
- usage: stepUsage,
4302
- experimental_providerMetadata: stepProviderMetadata,
4303
- logprobs: stepLogProbs,
4304
- response: {
4305
- ...stepResponse
4232
+ case "error": {
4233
+ controller.enqueue(chunk);
4234
+ stepFinishReason = "error";
4235
+ break;
4236
+ }
4237
+ default: {
4238
+ const exhaustiveCheck = chunkType;
4239
+ throw new Error(`Unknown chunk type: ${exhaustiveCheck}`);
4240
+ }
4241
+ }
4306
4242
  },
4307
- isContinued: nextStepType === "continue"
4308
- });
4309
- if (stepType === "continue") {
4310
- const lastMessage = responseMessages[responseMessages.length - 1];
4311
- if (typeof lastMessage.content === "string") {
4312
- lastMessage.content += stepText;
4313
- } else {
4314
- lastMessage.content.push({
4315
- text: stepText,
4316
- type: "text"
4243
+ // invoke onFinish callback and resolve toolResults promise when the stream is about to close:
4244
+ async flush(controller) {
4245
+ const stepToolCallsJson = stepToolCalls.length > 0 ? JSON.stringify(stepToolCalls) : void 0;
4246
+ let nextStepType = "done";
4247
+ if (currentStep + 1 < maxSteps) {
4248
+ if (continueSteps && stepFinishReason === "length" && // only use continue when there are no tool calls:
4249
+ stepToolCalls.length === 0) {
4250
+ nextStepType = "continue";
4251
+ } else if (
4252
+ // there are tool calls:
4253
+ stepToolCalls.length > 0 && // all current tool calls have results:
4254
+ stepToolResults.length === stepToolCalls.length
4255
+ ) {
4256
+ nextStepType = "tool-result";
4257
+ }
4258
+ }
4259
+ if (continueSteps && chunkBuffer.length > 0 && (nextStepType !== "continue" || // when the next step is a regular step, publish the buffer
4260
+ stepType === "continue" && !chunkTextPublished)) {
4261
+ await publishTextChunk({
4262
+ controller,
4263
+ chunk: {
4264
+ type: "text-delta",
4265
+ textDelta: chunkBuffer
4266
+ }
4267
+ });
4268
+ chunkBuffer = "";
4269
+ }
4270
+ try {
4271
+ doStreamSpan.setAttributes(
4272
+ selectTelemetryAttributes({
4273
+ telemetry,
4274
+ attributes: {
4275
+ "ai.response.finishReason": stepFinishReason,
4276
+ "ai.response.text": { output: () => stepText },
4277
+ "ai.response.toolCalls": {
4278
+ output: () => stepToolCallsJson
4279
+ },
4280
+ "ai.response.id": stepResponse.id,
4281
+ "ai.response.model": stepResponse.modelId,
4282
+ "ai.response.timestamp": stepResponse.timestamp.toISOString(),
4283
+ "ai.usage.promptTokens": stepUsage.promptTokens,
4284
+ "ai.usage.completionTokens": stepUsage.completionTokens,
4285
+ // standardized gen-ai llm span attributes:
4286
+ "gen_ai.response.finish_reasons": [stepFinishReason],
4287
+ "gen_ai.response.id": stepResponse.id,
4288
+ "gen_ai.response.model": stepResponse.modelId,
4289
+ "gen_ai.usage.input_tokens": stepUsage.promptTokens,
4290
+ "gen_ai.usage.output_tokens": stepUsage.completionTokens
4291
+ }
4292
+ })
4293
+ );
4294
+ } catch (error) {
4295
+ } finally {
4296
+ doStreamSpan.end();
4297
+ }
4298
+ controller.enqueue({
4299
+ type: "step-finish",
4300
+ finishReason: stepFinishReason,
4301
+ usage: stepUsage,
4302
+ experimental_providerMetadata: stepProviderMetadata,
4303
+ logprobs: stepLogProbs,
4304
+ response: {
4305
+ ...stepResponse
4306
+ },
4307
+ isContinued: nextStepType === "continue"
4317
4308
  });
4318
- }
4319
- } else {
4320
- responseMessages.push(
4321
- ...toResponseMessages({
4309
+ if (stepType === "continue") {
4310
+ const lastMessage = responseMessages[responseMessages.length - 1];
4311
+ if (typeof lastMessage.content === "string") {
4312
+ lastMessage.content += stepText;
4313
+ } else {
4314
+ lastMessage.content.push({
4315
+ text: stepText,
4316
+ type: "text"
4317
+ });
4318
+ }
4319
+ } else {
4320
+ responseMessages.push(
4321
+ ...toResponseMessages({
4322
+ text: stepText,
4323
+ tools: tools != null ? tools : {},
4324
+ toolCalls: stepToolCalls,
4325
+ toolResults: stepToolResults
4326
+ })
4327
+ );
4328
+ }
4329
+ const currentStepResult = {
4330
+ stepType,
4322
4331
  text: stepText,
4323
- tools: tools != null ? tools : {},
4324
4332
  toolCalls: stepToolCalls,
4325
- toolResults: stepToolResults
4326
- })
4327
- );
4328
- }
4329
- const currentStepResult = {
4330
- stepType,
4331
- text: stepText,
4332
- toolCalls: stepToolCalls,
4333
- toolResults: stepToolResults,
4334
- finishReason: stepFinishReason,
4335
- usage: stepUsage,
4336
- warnings: self.rawWarnings,
4337
- logprobs: stepLogProbs,
4338
- request: stepRequest,
4339
- response: {
4340
- ...stepResponse,
4341
- headers: (_a11 = self.rawResponse) == null ? void 0 : _a11.headers,
4342
- // deep clone msgs to avoid mutating past messages in multi-step:
4343
- messages: JSON.parse(JSON.stringify(responseMessages))
4344
- },
4345
- experimental_providerMetadata: stepProviderMetadata,
4346
- isContinued: nextStepType === "continue"
4347
- };
4348
- stepResults.push(currentStepResult);
4349
- await (onStepFinish == null ? void 0 : onStepFinish(currentStepResult));
4350
- const combinedUsage = {
4351
- promptTokens: usage.promptTokens + stepUsage.promptTokens,
4352
- completionTokens: usage.completionTokens + stepUsage.completionTokens,
4353
- totalTokens: usage.totalTokens + stepUsage.totalTokens
4354
- };
4355
- if (nextStepType !== "done") {
4356
- const {
4357
- result,
4358
- doStreamSpan: doStreamSpan3,
4359
- startTimestampMs: startTimestamp2
4360
- } = await startStep({ responseMessages });
4361
- self.rawWarnings = result.warnings;
4362
- self.rawResponse = result.rawResponse;
4363
- addStepStream({
4364
- stream: result.stream,
4365
- startTimestamp: startTimestamp2,
4366
- doStreamSpan: doStreamSpan3,
4367
- currentStep: currentStep + 1,
4368
- responseMessages,
4369
- usage: combinedUsage,
4370
- stepType: nextStepType,
4371
- previousStepText: fullStepText,
4372
- stepRequest: result.request,
4373
- hasLeadingWhitespace: hasWhitespaceSuffix
4374
- });
4375
- return;
4376
- }
4377
- try {
4378
- controller.enqueue({
4379
- type: "finish",
4380
- finishReason: stepFinishReason,
4381
- usage: combinedUsage,
4382
- experimental_providerMetadata: stepProviderMetadata,
4383
- logprobs: stepLogProbs,
4384
- response: {
4385
- ...stepResponse
4333
+ toolResults: stepToolResults,
4334
+ finishReason: stepFinishReason,
4335
+ usage: stepUsage,
4336
+ warnings,
4337
+ logprobs: stepLogProbs,
4338
+ request: stepRequest,
4339
+ response: {
4340
+ ...stepResponse,
4341
+ headers: rawResponse == null ? void 0 : rawResponse.headers,
4342
+ // deep clone msgs to avoid mutating past messages in multi-step:
4343
+ messages: JSON.parse(JSON.stringify(responseMessages))
4344
+ },
4345
+ experimental_providerMetadata: stepProviderMetadata,
4346
+ isContinued: nextStepType === "continue"
4347
+ };
4348
+ stepResults.push(currentStepResult);
4349
+ await (onStepFinish == null ? void 0 : onStepFinish(currentStepResult));
4350
+ const combinedUsage = {
4351
+ promptTokens: usage.promptTokens + stepUsage.promptTokens,
4352
+ completionTokens: usage.completionTokens + stepUsage.completionTokens,
4353
+ totalTokens: usage.totalTokens + stepUsage.totalTokens
4354
+ };
4355
+ if (nextStepType !== "done") {
4356
+ await streamStep({
4357
+ currentStep: currentStep + 1,
4358
+ responseMessages,
4359
+ usage: combinedUsage,
4360
+ stepType: nextStepType,
4361
+ previousStepText: fullStepText,
4362
+ hasLeadingWhitespace: hasWhitespaceSuffix
4363
+ });
4364
+ return;
4386
4365
  }
4387
- });
4388
- closeStitchableStream();
4389
- rootSpan.setAttributes(
4390
- selectTelemetryAttributes({
4391
- telemetry,
4392
- attributes: {
4393
- "ai.response.finishReason": stepFinishReason,
4394
- "ai.response.text": { output: () => fullStepText },
4395
- "ai.response.toolCalls": {
4396
- output: () => stepToolCallsJson
4366
+ try {
4367
+ controller.enqueue({
4368
+ type: "finish",
4369
+ finishReason: stepFinishReason,
4370
+ usage: combinedUsage,
4371
+ experimental_providerMetadata: stepProviderMetadata,
4372
+ logprobs: stepLogProbs,
4373
+ response: {
4374
+ ...stepResponse
4375
+ }
4376
+ });
4377
+ self.stitchableStream.close();
4378
+ rootSpan.setAttributes(
4379
+ selectTelemetryAttributes({
4380
+ telemetry,
4381
+ attributes: {
4382
+ "ai.response.finishReason": stepFinishReason,
4383
+ "ai.response.text": { output: () => fullStepText },
4384
+ "ai.response.toolCalls": {
4385
+ output: () => stepToolCallsJson
4386
+ },
4387
+ "ai.usage.promptTokens": combinedUsage.promptTokens,
4388
+ "ai.usage.completionTokens": combinedUsage.completionTokens
4389
+ }
4390
+ })
4391
+ );
4392
+ self.usagePromise.resolve(combinedUsage);
4393
+ self.finishReasonPromise.resolve(stepFinishReason);
4394
+ self.textPromise.resolve(fullStepText);
4395
+ self.toolCallsPromise.resolve(stepToolCalls);
4396
+ self.providerMetadataPromise.resolve(stepProviderMetadata);
4397
+ self.toolResultsPromise.resolve(stepToolResults);
4398
+ self.requestPromise.resolve(stepRequest);
4399
+ self.responsePromise.resolve({
4400
+ ...stepResponse,
4401
+ headers: rawResponse == null ? void 0 : rawResponse.headers,
4402
+ messages: responseMessages
4403
+ });
4404
+ self.stepsPromise.resolve(stepResults);
4405
+ self.warningsPromise.resolve(warnings != null ? warnings : []);
4406
+ await (onFinish == null ? void 0 : onFinish({
4407
+ finishReason: stepFinishReason,
4408
+ logprobs: stepLogProbs,
4409
+ usage: combinedUsage,
4410
+ text: fullStepText,
4411
+ toolCalls: stepToolCalls,
4412
+ // The tool results are inferred as a never[] type, because they are
4413
+ // optional and the execute method with an inferred result type is
4414
+ // optional as well. Therefore we need to cast the toolResults to any.
4415
+ // The type exposed to the users will be correctly inferred.
4416
+ toolResults: stepToolResults,
4417
+ request: stepRequest,
4418
+ response: {
4419
+ ...stepResponse,
4420
+ headers: rawResponse == null ? void 0 : rawResponse.headers,
4421
+ messages: responseMessages
4397
4422
  },
4398
- "ai.usage.promptTokens": combinedUsage.promptTokens,
4399
- "ai.usage.completionTokens": combinedUsage.completionTokens
4400
- }
4401
- })
4402
- );
4403
- resolveUsage(combinedUsage);
4404
- resolveFinishReason(stepFinishReason);
4405
- resolveText(fullStepText);
4406
- resolveToolCalls(stepToolCalls);
4407
- resolveProviderMetadata(stepProviderMetadata);
4408
- resolveToolResults(stepToolResults);
4409
- resolveRequest(stepRequest);
4410
- resolveResponse({
4411
- ...stepResponse,
4412
- headers: rawResponse == null ? void 0 : rawResponse.headers,
4413
- messages: responseMessages
4414
- });
4415
- resolveSteps(stepResults);
4416
- resolveWarnings((_b = self.rawWarnings) != null ? _b : []);
4417
- await (onFinish == null ? void 0 : onFinish({
4418
- finishReason: stepFinishReason,
4419
- logprobs: stepLogProbs,
4420
- usage: combinedUsage,
4421
- text: fullStepText,
4422
- toolCalls: stepToolCalls,
4423
- // The tool results are inferred as a never[] type, because they are
4424
- // optional and the execute method with an inferred result type is
4425
- // optional as well. Therefore we need to cast the toolResults to any.
4426
- // The type exposed to the users will be correctly inferred.
4427
- toolResults: stepToolResults,
4428
- request: stepRequest,
4429
- response: {
4430
- ...stepResponse,
4431
- headers: rawResponse == null ? void 0 : rawResponse.headers,
4432
- messages: responseMessages
4433
- },
4434
- warnings,
4435
- experimental_providerMetadata: stepProviderMetadata,
4436
- steps: stepResults
4437
- }));
4438
- } catch (error) {
4439
- controller.error(error);
4440
- } finally {
4441
- rootSpan.end();
4442
- }
4443
- }
4444
- })
4445
- )
4423
+ warnings,
4424
+ experimental_providerMetadata: stepProviderMetadata,
4425
+ steps: stepResults
4426
+ }));
4427
+ } catch (error) {
4428
+ controller.error(error);
4429
+ } finally {
4430
+ rootSpan.end();
4431
+ }
4432
+ }
4433
+ })
4434
+ )
4435
+ );
4436
+ }
4437
+ await streamStep({
4438
+ currentStep: 0,
4439
+ responseMessages: [],
4440
+ usage: {
4441
+ promptTokens: 0,
4442
+ completionTokens: 0,
4443
+ totalTokens: 0
4444
+ },
4445
+ previousStepText: "",
4446
+ stepType: "initial",
4447
+ hasLeadingWhitespace: false
4448
+ });
4449
+ }
4450
+ }).catch((error) => {
4451
+ self.stitchableStream.addStream(
4452
+ new ReadableStream({
4453
+ start(controller) {
4454
+ controller.error(error);
4455
+ }
4456
+ })
4446
4457
  );
4447
- }
4448
- addStepStream({
4449
- stream,
4450
- startTimestamp: startTimestampMs,
4451
- doStreamSpan,
4452
- currentStep: 0,
4453
- responseMessages: [],
4454
- usage: void 0,
4455
- stepType: "initial",
4456
- stepRequest: request,
4457
- hasLeadingWhitespace: false
4458
+ self.stitchableStream.close();
4458
4459
  });
4459
4460
  }
4461
+ get warnings() {
4462
+ return this.warningsPromise.value;
4463
+ }
4464
+ get usage() {
4465
+ return this.usagePromise.value;
4466
+ }
4467
+ get finishReason() {
4468
+ return this.finishReasonPromise.value;
4469
+ }
4470
+ get experimental_providerMetadata() {
4471
+ return this.providerMetadataPromise.value;
4472
+ }
4473
+ get text() {
4474
+ return this.textPromise.value;
4475
+ }
4476
+ get toolCalls() {
4477
+ return this.toolCallsPromise.value;
4478
+ }
4479
+ get toolResults() {
4480
+ return this.toolResultsPromise.value;
4481
+ }
4482
+ get request() {
4483
+ return this.requestPromise.value;
4484
+ }
4485
+ get response() {
4486
+ return this.responsePromise.value;
4487
+ }
4488
+ get steps() {
4489
+ return this.stepsPromise.value;
4490
+ }
4460
4491
  /**
4461
4492
  Split out a new stream from the original stream.
4462
4493
  The original stream is replaced to allow for further splitting,
@@ -4466,8 +4497,8 @@ var DefaultStreamTextResult = class {
4466
4497
  However, the LLM results are expected to be small enough to not cause issues.
4467
4498
  */
4468
4499
  teeStream() {
4469
- const [stream1, stream2] = this.originalStream.tee();
4470
- this.originalStream = stream2;
4500
+ const [stream1, stream2] = this.stitchableStream.stream.tee();
4501
+ this.stitchableStream.stream = stream2;
4471
4502
  return stream1;
4472
4503
  }
4473
4504
  get textStream() {
@@ -4507,12 +4538,12 @@ var DefaultStreamTextResult = class {
4507
4538
  const chunkType = chunk.type;
4508
4539
  switch (chunkType) {
4509
4540
  case "text-delta": {
4510
- controller.enqueue((0, import_ui_utils6.formatStreamPart)("text", chunk.textDelta));
4541
+ controller.enqueue((0, import_ui_utils6.formatDataStreamPart)("text", chunk.textDelta));
4511
4542
  break;
4512
4543
  }
4513
4544
  case "tool-call-streaming-start": {
4514
4545
  controller.enqueue(
4515
- (0, import_ui_utils6.formatStreamPart)("tool_call_streaming_start", {
4546
+ (0, import_ui_utils6.formatDataStreamPart)("tool_call_streaming_start", {
4516
4547
  toolCallId: chunk.toolCallId,
4517
4548
  toolName: chunk.toolName
4518
4549
  })
@@ -4521,7 +4552,7 @@ var DefaultStreamTextResult = class {
4521
4552
  }
4522
4553
  case "tool-call-delta": {
4523
4554
  controller.enqueue(
4524
- (0, import_ui_utils6.formatStreamPart)("tool_call_delta", {
4555
+ (0, import_ui_utils6.formatDataStreamPart)("tool_call_delta", {
4525
4556
  toolCallId: chunk.toolCallId,
4526
4557
  argsTextDelta: chunk.argsTextDelta
4527
4558
  })
@@ -4530,7 +4561,7 @@ var DefaultStreamTextResult = class {
4530
4561
  }
4531
4562
  case "tool-call": {
4532
4563
  controller.enqueue(
4533
- (0, import_ui_utils6.formatStreamPart)("tool_call", {
4564
+ (0, import_ui_utils6.formatDataStreamPart)("tool_call", {
4534
4565
  toolCallId: chunk.toolCallId,
4535
4566
  toolName: chunk.toolName,
4536
4567
  args: chunk.args
@@ -4540,7 +4571,7 @@ var DefaultStreamTextResult = class {
4540
4571
  }
4541
4572
  case "tool-result": {
4542
4573
  controller.enqueue(
4543
- (0, import_ui_utils6.formatStreamPart)("tool_result", {
4574
+ (0, import_ui_utils6.formatDataStreamPart)("tool_result", {
4544
4575
  toolCallId: chunk.toolCallId,
4545
4576
  result: chunk.result
4546
4577
  })
@@ -4549,13 +4580,13 @@ var DefaultStreamTextResult = class {
4549
4580
  }
4550
4581
  case "error": {
4551
4582
  controller.enqueue(
4552
- (0, import_ui_utils6.formatStreamPart)("error", getErrorMessage3(chunk.error))
4583
+ (0, import_ui_utils6.formatDataStreamPart)("error", getErrorMessage3(chunk.error))
4553
4584
  );
4554
4585
  break;
4555
4586
  }
4556
4587
  case "step-finish": {
4557
4588
  controller.enqueue(
4558
- (0, import_ui_utils6.formatStreamPart)("finish_step", {
4589
+ (0, import_ui_utils6.formatDataStreamPart)("finish_step", {
4559
4590
  finishReason: chunk.finishReason,
4560
4591
  usage: sendUsage ? {
4561
4592
  promptTokens: chunk.usage.promptTokens,
@@ -4568,7 +4599,7 @@ var DefaultStreamTextResult = class {
4568
4599
  }
4569
4600
  case "finish": {
4570
4601
  controller.enqueue(
4571
- (0, import_ui_utils6.formatStreamPart)("finish_message", {
4602
+ (0, import_ui_utils6.formatDataStreamPart)("finish_message", {
4572
4603
  finishReason: chunk.finishReason,
4573
4604
  usage: sendUsage ? {
4574
4605
  promptTokens: chunk.usage.promptTokens,
@@ -4845,17 +4876,21 @@ function AssistantResponse({ threadId, messageId }, process2) {
4845
4876
  const textEncoder = new TextEncoder();
4846
4877
  const sendMessage = (message) => {
4847
4878
  controller.enqueue(
4848
- textEncoder.encode((0, import_ui_utils8.formatStreamPart)("assistant_message", message))
4879
+ textEncoder.encode(
4880
+ (0, import_ui_utils8.formatAssistantStreamPart)("assistant_message", message)
4881
+ )
4849
4882
  );
4850
4883
  };
4851
4884
  const sendDataMessage = (message) => {
4852
4885
  controller.enqueue(
4853
- textEncoder.encode((0, import_ui_utils8.formatStreamPart)("data_message", message))
4886
+ textEncoder.encode(
4887
+ (0, import_ui_utils8.formatAssistantStreamPart)("data_message", message)
4888
+ )
4854
4889
  );
4855
4890
  };
4856
4891
  const sendError = (errorMessage) => {
4857
4892
  controller.enqueue(
4858
- textEncoder.encode((0, import_ui_utils8.formatStreamPart)("error", errorMessage))
4893
+ textEncoder.encode((0, import_ui_utils8.formatAssistantStreamPart)("error", errorMessage))
4859
4894
  );
4860
4895
  };
4861
4896
  const forwardStream = async (stream2) => {
@@ -4866,7 +4901,7 @@ function AssistantResponse({ threadId, messageId }, process2) {
4866
4901
  case "thread.message.created": {
4867
4902
  controller.enqueue(
4868
4903
  textEncoder.encode(
4869
- (0, import_ui_utils8.formatStreamPart)("assistant_message", {
4904
+ (0, import_ui_utils8.formatAssistantStreamPart)("assistant_message", {
4870
4905
  id: value.data.id,
4871
4906
  role: "assistant",
4872
4907
  content: [{ type: "text", text: { value: "" } }]
@@ -4880,7 +4915,7 @@ function AssistantResponse({ threadId, messageId }, process2) {
4880
4915
  if ((content == null ? void 0 : content.type) === "text" && ((_b = content.text) == null ? void 0 : _b.value) != null) {
4881
4916
  controller.enqueue(
4882
4917
  textEncoder.encode(
4883
- (0, import_ui_utils8.formatStreamPart)("text", content.text.value)
4918
+ (0, import_ui_utils8.formatAssistantStreamPart)("text", content.text.value)
4884
4919
  )
4885
4920
  );
4886
4921
  }
@@ -4897,7 +4932,7 @@ function AssistantResponse({ threadId, messageId }, process2) {
4897
4932
  };
4898
4933
  controller.enqueue(
4899
4934
  textEncoder.encode(
4900
- (0, import_ui_utils8.formatStreamPart)("assistant_control_data", {
4935
+ (0, import_ui_utils8.formatAssistantStreamPart)("assistant_control_data", {
4901
4936
  threadId,
4902
4937
  messageId
4903
4938
  })
@@ -5014,7 +5049,7 @@ var StreamData = class {
5014
5049
  throw new Error("Stream controller is not initialized.");
5015
5050
  }
5016
5051
  this.controller.enqueue(
5017
- this.encoder.encode((0, import_ui_utils9.formatStreamPart)("data", [value]))
5052
+ this.encoder.encode((0, import_ui_utils9.formatDataStreamPart)("data", [value]))
5018
5053
  );
5019
5054
  }
5020
5055
  appendMessageAnnotation(value) {
@@ -5025,7 +5060,7 @@ var StreamData = class {
5025
5060
  throw new Error("Stream controller is not initialized.");
5026
5061
  }
5027
5062
  this.controller.enqueue(
5028
- this.encoder.encode((0, import_ui_utils9.formatStreamPart)("message_annotations", [value]))
5063
+ this.encoder.encode((0, import_ui_utils9.formatDataStreamPart)("message_annotations", [value]))
5029
5064
  );
5030
5065
  }
5031
5066
  };
@@ -5035,7 +5070,7 @@ function createStreamDataTransformer() {
5035
5070
  return new TransformStream({
5036
5071
  transform: async (chunk, controller) => {
5037
5072
  const message = decoder.decode(chunk);
5038
- controller.enqueue(encoder.encode((0, import_ui_utils9.formatStreamPart)("text", message)));
5073
+ controller.enqueue(encoder.encode((0, import_ui_utils9.formatDataStreamPart)("text", message)));
5039
5074
  }
5040
5075
  });
5041
5076
  }
@@ -5169,14 +5204,16 @@ function trimStartOfStream() {
5169
5204
  experimental_createProviderRegistry,
5170
5205
  experimental_customProvider,
5171
5206
  experimental_wrapLanguageModel,
5172
- formatStreamPart,
5207
+ formatAssistantStreamPart,
5208
+ formatDataStreamPart,
5173
5209
  generateId,
5174
5210
  generateObject,
5175
5211
  generateText,
5176
5212
  jsonSchema,
5177
- parseStreamPart,
5178
- processDataProtocolResponse,
5179
- readDataStream,
5213
+ parseAssistantStreamPart,
5214
+ parseDataStreamPart,
5215
+ processDataStream,
5216
+ processTextStream,
5180
5217
  streamObject,
5181
5218
  streamText,
5182
5219
  tool