ai 4.0.0-canary.9 → 4.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js CHANGED
@@ -53,14 +53,16 @@ __export(streams_exports, {
53
53
  experimental_createProviderRegistry: () => experimental_createProviderRegistry,
54
54
  experimental_customProvider: () => experimental_customProvider,
55
55
  experimental_wrapLanguageModel: () => experimental_wrapLanguageModel,
56
- formatStreamPart: () => import_ui_utils10.formatStreamPart,
56
+ formatAssistantStreamPart: () => import_ui_utils10.formatAssistantStreamPart,
57
+ formatDataStreamPart: () => import_ui_utils10.formatDataStreamPart,
57
58
  generateId: () => import_provider_utils11.generateId,
58
59
  generateObject: () => generateObject,
59
60
  generateText: () => generateText,
60
61
  jsonSchema: () => import_ui_utils7.jsonSchema,
61
- parseStreamPart: () => import_ui_utils10.parseStreamPart,
62
- processDataProtocolResponse: () => import_ui_utils10.processDataProtocolResponse,
63
- readDataStream: () => import_ui_utils10.readDataStream,
62
+ parseAssistantStreamPart: () => import_ui_utils10.parseAssistantStreamPart,
63
+ parseDataStreamPart: () => import_ui_utils10.parseDataStreamPart,
64
+ processDataStream: () => import_ui_utils10.processDataStream,
65
+ processTextStream: () => import_ui_utils10.processTextStream,
64
66
  streamObject: () => streamObject,
65
67
  streamText: () => streamText,
66
68
  tool: () => tool
@@ -2338,21 +2340,6 @@ var DefaultGenerateObjectResult = class {
2338
2340
  var import_provider_utils6 = require("@ai-sdk/provider-utils");
2339
2341
  var import_ui_utils2 = require("@ai-sdk/ui-utils");
2340
2342
 
2341
- // util/create-resolvable-promise.ts
2342
- function createResolvablePromise() {
2343
- let resolve;
2344
- let reject;
2345
- const promise = new Promise((res, rej) => {
2346
- resolve = res;
2347
- reject = rej;
2348
- });
2349
- return {
2350
- promise,
2351
- resolve,
2352
- reject
2353
- };
2354
- }
2355
-
2356
2343
  // util/delayed-promise.ts
2357
2344
  var DelayedPromise = class {
2358
2345
  constructor() {
@@ -2444,9 +2431,91 @@ function writeToServerResponse({
2444
2431
  read();
2445
2432
  }
2446
2433
 
2434
+ // util/create-resolvable-promise.ts
2435
+ function createResolvablePromise() {
2436
+ let resolve;
2437
+ let reject;
2438
+ const promise = new Promise((res, rej) => {
2439
+ resolve = res;
2440
+ reject = rej;
2441
+ });
2442
+ return {
2443
+ promise,
2444
+ resolve,
2445
+ reject
2446
+ };
2447
+ }
2448
+
2449
+ // core/util/create-stitchable-stream.ts
2450
+ function createStitchableStream() {
2451
+ let innerStreamReaders = [];
2452
+ let controller = null;
2453
+ let isClosed = false;
2454
+ let waitForNewStream = createResolvablePromise();
2455
+ const processPull = async () => {
2456
+ if (isClosed && innerStreamReaders.length === 0) {
2457
+ controller == null ? void 0 : controller.close();
2458
+ return;
2459
+ }
2460
+ if (innerStreamReaders.length === 0) {
2461
+ waitForNewStream = createResolvablePromise();
2462
+ await waitForNewStream.promise;
2463
+ return processPull();
2464
+ }
2465
+ try {
2466
+ const { value, done } = await innerStreamReaders[0].read();
2467
+ if (done) {
2468
+ innerStreamReaders.shift();
2469
+ if (innerStreamReaders.length > 0) {
2470
+ await processPull();
2471
+ } else if (isClosed) {
2472
+ controller == null ? void 0 : controller.close();
2473
+ }
2474
+ } else {
2475
+ controller == null ? void 0 : controller.enqueue(value);
2476
+ }
2477
+ } catch (error) {
2478
+ controller == null ? void 0 : controller.error(error);
2479
+ innerStreamReaders.shift();
2480
+ if (isClosed && innerStreamReaders.length === 0) {
2481
+ controller == null ? void 0 : controller.close();
2482
+ }
2483
+ }
2484
+ };
2485
+ return {
2486
+ stream: new ReadableStream({
2487
+ start(controllerParam) {
2488
+ controller = controllerParam;
2489
+ },
2490
+ pull: processPull,
2491
+ async cancel() {
2492
+ for (const reader of innerStreamReaders) {
2493
+ await reader.cancel();
2494
+ }
2495
+ innerStreamReaders = [];
2496
+ isClosed = true;
2497
+ }
2498
+ }),
2499
+ addStream: (innerStream) => {
2500
+ if (isClosed) {
2501
+ throw new Error("Cannot add inner stream: outer stream is closed");
2502
+ }
2503
+ innerStreamReaders.push(innerStream.getReader());
2504
+ waitForNewStream.resolve();
2505
+ },
2506
+ close: () => {
2507
+ isClosed = true;
2508
+ waitForNewStream.resolve();
2509
+ if (innerStreamReaders.length === 0) {
2510
+ controller == null ? void 0 : controller.close();
2511
+ }
2512
+ }
2513
+ };
2514
+ }
2515
+
2447
2516
  // core/generate-object/stream-object.ts
2448
2517
  var originalGenerateId2 = (0, import_provider_utils6.createIdGenerator)({ prefix: "aiobj", size: 24 });
2449
- async function streamObject({
2518
+ function streamObject({
2450
2519
  model,
2451
2520
  schema: inputSchema,
2452
2521
  schemaName,
@@ -2480,400 +2549,433 @@ async function streamObject({
2480
2549
  if (outputStrategy.type === "no-schema" && mode === void 0) {
2481
2550
  mode = "json";
2482
2551
  }
2483
- const baseTelemetryAttributes = getBaseTelemetryAttributes({
2552
+ return new DefaultStreamObjectResult({
2484
2553
  model,
2485
2554
  telemetry,
2486
2555
  headers,
2487
- settings: { ...settings, maxRetries }
2556
+ settings,
2557
+ maxRetries,
2558
+ abortSignal,
2559
+ outputStrategy,
2560
+ system,
2561
+ prompt,
2562
+ messages,
2563
+ schemaName,
2564
+ schemaDescription,
2565
+ inputProviderMetadata: providerMetadata,
2566
+ mode,
2567
+ onFinish,
2568
+ generateId: generateId3,
2569
+ currentDate,
2570
+ now: now2
2488
2571
  });
2489
- const tracer = getTracer(telemetry);
2490
- const retry = retryWithExponentialBackoff({ maxRetries });
2491
- return recordSpan({
2492
- name: "ai.streamObject",
2493
- attributes: selectTelemetryAttributes({
2572
+ }
2573
+ var DefaultStreamObjectResult = class {
2574
+ constructor({
2575
+ model,
2576
+ headers,
2577
+ telemetry,
2578
+ settings,
2579
+ maxRetries,
2580
+ abortSignal,
2581
+ outputStrategy,
2582
+ system,
2583
+ prompt,
2584
+ messages,
2585
+ schemaName,
2586
+ schemaDescription,
2587
+ inputProviderMetadata,
2588
+ mode,
2589
+ onFinish,
2590
+ generateId: generateId3,
2591
+ currentDate,
2592
+ now: now2
2593
+ }) {
2594
+ this.objectPromise = new DelayedPromise();
2595
+ this.usagePromise = new DelayedPromise();
2596
+ this.providerMetadataPromise = new DelayedPromise();
2597
+ this.warningsPromise = new DelayedPromise();
2598
+ this.requestPromise = new DelayedPromise();
2599
+ this.responsePromise = new DelayedPromise();
2600
+ this.stitchableStream = createStitchableStream();
2601
+ const baseTelemetryAttributes = getBaseTelemetryAttributes({
2602
+ model,
2494
2603
  telemetry,
2495
- attributes: {
2496
- ...assembleOperationName({
2497
- operationId: "ai.streamObject",
2498
- telemetry
2499
- }),
2500
- ...baseTelemetryAttributes,
2501
- // specific settings that only make sense on the outer level:
2502
- "ai.prompt": {
2503
- input: () => JSON.stringify({ system, prompt, messages })
2504
- },
2505
- "ai.schema": outputStrategy.jsonSchema != null ? { input: () => JSON.stringify(outputStrategy.jsonSchema) } : void 0,
2506
- "ai.schema.name": schemaName,
2507
- "ai.schema.description": schemaDescription,
2508
- "ai.settings.output": outputStrategy.type,
2509
- "ai.settings.mode": mode
2510
- }
2511
- }),
2512
- tracer,
2513
- endWhenDone: false,
2514
- fn: async (rootSpan) => {
2515
- if (mode === "auto" || mode == null) {
2516
- mode = model.defaultObjectGenerationMode;
2517
- }
2518
- let callOptions;
2519
- let transformer;
2520
- switch (mode) {
2521
- case "json": {
2522
- const standardizedPrompt = standardizePrompt({
2523
- prompt: {
2524
- system: outputStrategy.jsonSchema == null ? injectJsonInstruction({ prompt: system }) : model.supportsStructuredOutputs ? system : injectJsonInstruction({
2525
- prompt: system,
2526
- schema: outputStrategy.jsonSchema
2604
+ headers,
2605
+ settings: { ...settings, maxRetries }
2606
+ });
2607
+ const tracer = getTracer(telemetry);
2608
+ const retry = retryWithExponentialBackoff({ maxRetries });
2609
+ const self = this;
2610
+ recordSpan({
2611
+ name: "ai.streamObject",
2612
+ attributes: selectTelemetryAttributes({
2613
+ telemetry,
2614
+ attributes: {
2615
+ ...assembleOperationName({
2616
+ operationId: "ai.streamObject",
2617
+ telemetry
2618
+ }),
2619
+ ...baseTelemetryAttributes,
2620
+ // specific settings that only make sense on the outer level:
2621
+ "ai.prompt": {
2622
+ input: () => JSON.stringify({ system, prompt, messages })
2623
+ },
2624
+ "ai.schema": outputStrategy.jsonSchema != null ? { input: () => JSON.stringify(outputStrategy.jsonSchema) } : void 0,
2625
+ "ai.schema.name": schemaName,
2626
+ "ai.schema.description": schemaDescription,
2627
+ "ai.settings.output": outputStrategy.type,
2628
+ "ai.settings.mode": mode
2629
+ }
2630
+ }),
2631
+ tracer,
2632
+ endWhenDone: false,
2633
+ fn: async (rootSpan) => {
2634
+ if (mode === "auto" || mode == null) {
2635
+ mode = model.defaultObjectGenerationMode;
2636
+ }
2637
+ let callOptions;
2638
+ let transformer;
2639
+ switch (mode) {
2640
+ case "json": {
2641
+ const standardizedPrompt = standardizePrompt({
2642
+ prompt: {
2643
+ system: outputStrategy.jsonSchema == null ? injectJsonInstruction({ prompt: system }) : model.supportsStructuredOutputs ? system : injectJsonInstruction({
2644
+ prompt: system,
2645
+ schema: outputStrategy.jsonSchema
2646
+ }),
2647
+ prompt,
2648
+ messages
2649
+ },
2650
+ tools: void 0
2651
+ });
2652
+ callOptions = {
2653
+ mode: {
2654
+ type: "object-json",
2655
+ schema: outputStrategy.jsonSchema,
2656
+ name: schemaName,
2657
+ description: schemaDescription
2658
+ },
2659
+ ...prepareCallSettings(settings),
2660
+ inputFormat: standardizedPrompt.type,
2661
+ prompt: await convertToLanguageModelPrompt({
2662
+ prompt: standardizedPrompt,
2663
+ modelSupportsImageUrls: model.supportsImageUrls,
2664
+ modelSupportsUrl: model.supportsUrl
2527
2665
  }),
2528
- prompt,
2529
- messages
2530
- },
2531
- tools: void 0
2532
- });
2533
- callOptions = {
2534
- mode: {
2535
- type: "object-json",
2536
- schema: outputStrategy.jsonSchema,
2537
- name: schemaName,
2538
- description: schemaDescription
2539
- },
2540
- ...prepareCallSettings(settings),
2541
- inputFormat: standardizedPrompt.type,
2542
- prompt: await convertToLanguageModelPrompt({
2543
- prompt: standardizedPrompt,
2544
- modelSupportsImageUrls: model.supportsImageUrls,
2545
- modelSupportsUrl: model.supportsUrl
2546
- }),
2547
- providerMetadata,
2548
- abortSignal,
2549
- headers
2550
- };
2551
- transformer = {
2552
- transform: (chunk, controller) => {
2553
- switch (chunk.type) {
2554
- case "text-delta":
2555
- controller.enqueue(chunk.textDelta);
2556
- break;
2557
- case "response-metadata":
2558
- case "finish":
2559
- case "error":
2560
- controller.enqueue(chunk);
2561
- break;
2666
+ providerMetadata: inputProviderMetadata,
2667
+ abortSignal,
2668
+ headers
2669
+ };
2670
+ transformer = {
2671
+ transform: (chunk, controller) => {
2672
+ switch (chunk.type) {
2673
+ case "text-delta":
2674
+ controller.enqueue(chunk.textDelta);
2675
+ break;
2676
+ case "response-metadata":
2677
+ case "finish":
2678
+ case "error":
2679
+ controller.enqueue(chunk);
2680
+ break;
2681
+ }
2562
2682
  }
2563
- }
2564
- };
2565
- break;
2683
+ };
2684
+ break;
2685
+ }
2686
+ case "tool": {
2687
+ const standardizedPrompt = standardizePrompt({
2688
+ prompt: { system, prompt, messages },
2689
+ tools: void 0
2690
+ });
2691
+ callOptions = {
2692
+ mode: {
2693
+ type: "object-tool",
2694
+ tool: {
2695
+ type: "function",
2696
+ name: schemaName != null ? schemaName : "json",
2697
+ description: schemaDescription != null ? schemaDescription : "Respond with a JSON object.",
2698
+ parameters: outputStrategy.jsonSchema
2699
+ }
2700
+ },
2701
+ ...prepareCallSettings(settings),
2702
+ inputFormat: standardizedPrompt.type,
2703
+ prompt: await convertToLanguageModelPrompt({
2704
+ prompt: standardizedPrompt,
2705
+ modelSupportsImageUrls: model.supportsImageUrls,
2706
+ modelSupportsUrl: model.supportsUrl
2707
+ }),
2708
+ providerMetadata: inputProviderMetadata,
2709
+ abortSignal,
2710
+ headers
2711
+ };
2712
+ transformer = {
2713
+ transform(chunk, controller) {
2714
+ switch (chunk.type) {
2715
+ case "tool-call-delta":
2716
+ controller.enqueue(chunk.argsTextDelta);
2717
+ break;
2718
+ case "response-metadata":
2719
+ case "finish":
2720
+ case "error":
2721
+ controller.enqueue(chunk);
2722
+ break;
2723
+ }
2724
+ }
2725
+ };
2726
+ break;
2727
+ }
2728
+ case void 0: {
2729
+ throw new Error(
2730
+ "Model does not have a default object generation mode."
2731
+ );
2732
+ }
2733
+ default: {
2734
+ const _exhaustiveCheck = mode;
2735
+ throw new Error(`Unsupported mode: ${_exhaustiveCheck}`);
2736
+ }
2566
2737
  }
2567
- case "tool": {
2568
- const standardizedPrompt = standardizePrompt({
2569
- prompt: { system, prompt, messages },
2570
- tools: void 0
2571
- });
2572
- callOptions = {
2573
- mode: {
2574
- type: "object-tool",
2575
- tool: {
2576
- type: "function",
2577
- name: schemaName != null ? schemaName : "json",
2578
- description: schemaDescription != null ? schemaDescription : "Respond with a JSON object.",
2579
- parameters: outputStrategy.jsonSchema
2738
+ const {
2739
+ result: { stream, warnings, rawResponse, request },
2740
+ doStreamSpan,
2741
+ startTimestampMs
2742
+ } = await retry(
2743
+ () => recordSpan({
2744
+ name: "ai.streamObject.doStream",
2745
+ attributes: selectTelemetryAttributes({
2746
+ telemetry,
2747
+ attributes: {
2748
+ ...assembleOperationName({
2749
+ operationId: "ai.streamObject.doStream",
2750
+ telemetry
2751
+ }),
2752
+ ...baseTelemetryAttributes,
2753
+ "ai.prompt.format": {
2754
+ input: () => callOptions.inputFormat
2755
+ },
2756
+ "ai.prompt.messages": {
2757
+ input: () => JSON.stringify(callOptions.prompt)
2758
+ },
2759
+ "ai.settings.mode": mode,
2760
+ // standardized gen-ai llm span attributes:
2761
+ "gen_ai.system": model.provider,
2762
+ "gen_ai.request.model": model.modelId,
2763
+ "gen_ai.request.frequency_penalty": settings.frequencyPenalty,
2764
+ "gen_ai.request.max_tokens": settings.maxTokens,
2765
+ "gen_ai.request.presence_penalty": settings.presencePenalty,
2766
+ "gen_ai.request.temperature": settings.temperature,
2767
+ "gen_ai.request.top_k": settings.topK,
2768
+ "gen_ai.request.top_p": settings.topP
2580
2769
  }
2581
- },
2582
- ...prepareCallSettings(settings),
2583
- inputFormat: standardizedPrompt.type,
2584
- prompt: await convertToLanguageModelPrompt({
2585
- prompt: standardizedPrompt,
2586
- modelSupportsImageUrls: model.supportsImageUrls,
2587
- modelSupportsUrl: model.supportsUrl
2588
2770
  }),
2589
- providerMetadata,
2590
- abortSignal,
2591
- headers
2592
- };
2593
- transformer = {
2594
- transform(chunk, controller) {
2771
+ tracer,
2772
+ endWhenDone: false,
2773
+ fn: async (doStreamSpan2) => ({
2774
+ startTimestampMs: now2(),
2775
+ doStreamSpan: doStreamSpan2,
2776
+ result: await model.doStream(callOptions)
2777
+ })
2778
+ })
2779
+ );
2780
+ self.requestPromise.resolve(request != null ? request : {});
2781
+ let usage;
2782
+ let finishReason;
2783
+ let providerMetadata;
2784
+ let object;
2785
+ let error;
2786
+ let accumulatedText = "";
2787
+ let textDelta = "";
2788
+ let response = {
2789
+ id: generateId3(),
2790
+ timestamp: currentDate(),
2791
+ modelId: model.modelId
2792
+ };
2793
+ let latestObjectJson = void 0;
2794
+ let latestObject = void 0;
2795
+ let isFirstChunk = true;
2796
+ let isFirstDelta = true;
2797
+ const transformedStream = stream.pipeThrough(new TransformStream(transformer)).pipeThrough(
2798
+ new TransformStream({
2799
+ async transform(chunk, controller) {
2800
+ var _a11, _b, _c;
2801
+ if (isFirstChunk) {
2802
+ const msToFirstChunk = now2() - startTimestampMs;
2803
+ isFirstChunk = false;
2804
+ doStreamSpan.addEvent("ai.stream.firstChunk", {
2805
+ "ai.stream.msToFirstChunk": msToFirstChunk
2806
+ });
2807
+ doStreamSpan.setAttributes({
2808
+ "ai.stream.msToFirstChunk": msToFirstChunk
2809
+ });
2810
+ }
2811
+ if (typeof chunk === "string") {
2812
+ accumulatedText += chunk;
2813
+ textDelta += chunk;
2814
+ const { value: currentObjectJson, state: parseState } = (0, import_ui_utils2.parsePartialJson)(accumulatedText);
2815
+ if (currentObjectJson !== void 0 && !(0, import_ui_utils2.isDeepEqualData)(latestObjectJson, currentObjectJson)) {
2816
+ const validationResult = outputStrategy.validatePartialResult({
2817
+ value: currentObjectJson,
2818
+ textDelta,
2819
+ latestObject,
2820
+ isFirstDelta,
2821
+ isFinalDelta: parseState === "successful-parse"
2822
+ });
2823
+ if (validationResult.success && !(0, import_ui_utils2.isDeepEqualData)(
2824
+ latestObject,
2825
+ validationResult.value.partial
2826
+ )) {
2827
+ latestObjectJson = currentObjectJson;
2828
+ latestObject = validationResult.value.partial;
2829
+ controller.enqueue({
2830
+ type: "object",
2831
+ object: latestObject
2832
+ });
2833
+ controller.enqueue({
2834
+ type: "text-delta",
2835
+ textDelta: validationResult.value.textDelta
2836
+ });
2837
+ textDelta = "";
2838
+ isFirstDelta = false;
2839
+ }
2840
+ }
2841
+ return;
2842
+ }
2595
2843
  switch (chunk.type) {
2596
- case "tool-call-delta":
2597
- controller.enqueue(chunk.argsTextDelta);
2844
+ case "response-metadata": {
2845
+ response = {
2846
+ id: (_a11 = chunk.id) != null ? _a11 : response.id,
2847
+ timestamp: (_b = chunk.timestamp) != null ? _b : response.timestamp,
2848
+ modelId: (_c = chunk.modelId) != null ? _c : response.modelId
2849
+ };
2598
2850
  break;
2599
- case "response-metadata":
2600
- case "finish":
2601
- case "error":
2851
+ }
2852
+ case "finish": {
2853
+ if (textDelta !== "") {
2854
+ controller.enqueue({ type: "text-delta", textDelta });
2855
+ }
2856
+ finishReason = chunk.finishReason;
2857
+ usage = calculateLanguageModelUsage(chunk.usage);
2858
+ providerMetadata = chunk.providerMetadata;
2859
+ controller.enqueue({ ...chunk, usage, response });
2860
+ self.usagePromise.resolve(usage);
2861
+ self.providerMetadataPromise.resolve(providerMetadata);
2862
+ self.responsePromise.resolve({
2863
+ ...response,
2864
+ headers: rawResponse == null ? void 0 : rawResponse.headers
2865
+ });
2866
+ const validationResult = outputStrategy.validateFinalResult(latestObjectJson);
2867
+ if (validationResult.success) {
2868
+ object = validationResult.value;
2869
+ self.objectPromise.resolve(object);
2870
+ } else {
2871
+ error = validationResult.error;
2872
+ self.objectPromise.reject(error);
2873
+ }
2874
+ break;
2875
+ }
2876
+ default: {
2602
2877
  controller.enqueue(chunk);
2603
2878
  break;
2879
+ }
2880
+ }
2881
+ },
2882
+ // invoke onFinish callback and resolve toolResults promise when the stream is about to close:
2883
+ async flush(controller) {
2884
+ try {
2885
+ const finalUsage = usage != null ? usage : {
2886
+ promptTokens: NaN,
2887
+ completionTokens: NaN,
2888
+ totalTokens: NaN
2889
+ };
2890
+ doStreamSpan.setAttributes(
2891
+ selectTelemetryAttributes({
2892
+ telemetry,
2893
+ attributes: {
2894
+ "ai.response.finishReason": finishReason,
2895
+ "ai.response.object": {
2896
+ output: () => JSON.stringify(object)
2897
+ },
2898
+ "ai.response.id": response.id,
2899
+ "ai.response.model": response.modelId,
2900
+ "ai.response.timestamp": response.timestamp.toISOString(),
2901
+ "ai.usage.promptTokens": finalUsage.promptTokens,
2902
+ "ai.usage.completionTokens": finalUsage.completionTokens,
2903
+ // standardized gen-ai llm span attributes:
2904
+ "gen_ai.response.finish_reasons": [finishReason],
2905
+ "gen_ai.response.id": response.id,
2906
+ "gen_ai.response.model": response.modelId,
2907
+ "gen_ai.usage.input_tokens": finalUsage.promptTokens,
2908
+ "gen_ai.usage.output_tokens": finalUsage.completionTokens
2909
+ }
2910
+ })
2911
+ );
2912
+ doStreamSpan.end();
2913
+ rootSpan.setAttributes(
2914
+ selectTelemetryAttributes({
2915
+ telemetry,
2916
+ attributes: {
2917
+ "ai.usage.promptTokens": finalUsage.promptTokens,
2918
+ "ai.usage.completionTokens": finalUsage.completionTokens,
2919
+ "ai.response.object": {
2920
+ output: () => JSON.stringify(object)
2921
+ }
2922
+ }
2923
+ })
2924
+ );
2925
+ await (onFinish == null ? void 0 : onFinish({
2926
+ usage: finalUsage,
2927
+ object,
2928
+ error,
2929
+ response: {
2930
+ ...response,
2931
+ headers: rawResponse == null ? void 0 : rawResponse.headers
2932
+ },
2933
+ warnings,
2934
+ experimental_providerMetadata: providerMetadata
2935
+ }));
2936
+ } catch (error2) {
2937
+ controller.error(error2);
2938
+ } finally {
2939
+ rootSpan.end();
2604
2940
  }
2605
2941
  }
2606
- };
2607
- break;
2608
- }
2609
- case void 0: {
2610
- throw new Error(
2611
- "Model does not have a default object generation mode."
2612
- );
2613
- }
2614
- default: {
2615
- const _exhaustiveCheck = mode;
2616
- throw new Error(`Unsupported mode: ${_exhaustiveCheck}`);
2617
- }
2618
- }
2619
- const {
2620
- result: { stream, warnings, rawResponse, request },
2621
- doStreamSpan,
2622
- startTimestampMs
2623
- } = await retry(
2624
- () => recordSpan({
2625
- name: "ai.streamObject.doStream",
2626
- attributes: selectTelemetryAttributes({
2627
- telemetry,
2628
- attributes: {
2629
- ...assembleOperationName({
2630
- operationId: "ai.streamObject.doStream",
2631
- telemetry
2632
- }),
2633
- ...baseTelemetryAttributes,
2634
- "ai.prompt.format": {
2635
- input: () => callOptions.inputFormat
2636
- },
2637
- "ai.prompt.messages": {
2638
- input: () => JSON.stringify(callOptions.prompt)
2639
- },
2640
- "ai.settings.mode": mode,
2641
- // standardized gen-ai llm span attributes:
2642
- "gen_ai.system": model.provider,
2643
- "gen_ai.request.model": model.modelId,
2644
- "gen_ai.request.frequency_penalty": settings.frequencyPenalty,
2645
- "gen_ai.request.max_tokens": settings.maxTokens,
2646
- "gen_ai.request.presence_penalty": settings.presencePenalty,
2647
- "gen_ai.request.temperature": settings.temperature,
2648
- "gen_ai.request.top_k": settings.topK,
2649
- "gen_ai.request.top_p": settings.topP
2650
- }
2651
- }),
2652
- tracer,
2653
- endWhenDone: false,
2654
- fn: async (doStreamSpan2) => ({
2655
- startTimestampMs: now2(),
2656
- doStreamSpan: doStreamSpan2,
2657
- result: await model.doStream(callOptions)
2658
2942
  })
2943
+ );
2944
+ self.stitchableStream.addStream(transformedStream);
2945
+ }
2946
+ }).catch((error) => {
2947
+ self.stitchableStream.addStream(
2948
+ new ReadableStream({
2949
+ start(controller) {
2950
+ controller.error(error);
2951
+ }
2659
2952
  })
2660
2953
  );
2661
- return new DefaultStreamObjectResult({
2662
- outputStrategy,
2663
- stream: stream.pipeThrough(new TransformStream(transformer)),
2664
- warnings,
2665
- rawResponse,
2666
- request: request != null ? request : {},
2667
- onFinish,
2668
- rootSpan,
2669
- doStreamSpan,
2670
- telemetry,
2671
- startTimestampMs,
2672
- modelId: model.modelId,
2673
- now: now2,
2674
- currentDate,
2675
- generateId: generateId3
2676
- });
2677
- }
2678
- });
2679
- }
2680
- var DefaultStreamObjectResult = class {
2681
- constructor({
2682
- stream,
2683
- warnings,
2684
- rawResponse,
2685
- request,
2686
- outputStrategy,
2687
- onFinish,
2688
- rootSpan,
2689
- doStreamSpan,
2690
- telemetry,
2691
- startTimestampMs,
2692
- modelId,
2693
- now: now2,
2694
- currentDate,
2695
- generateId: generateId3
2696
- }) {
2697
- this.warnings = warnings;
2954
+ }).finally(() => {
2955
+ self.stitchableStream.close();
2956
+ });
2698
2957
  this.outputStrategy = outputStrategy;
2699
- this.request = Promise.resolve(request);
2700
- this.objectPromise = new DelayedPromise();
2701
- const { resolve: resolveUsage, promise: usagePromise } = createResolvablePromise();
2702
- this.usage = usagePromise;
2703
- const { resolve: resolveResponse, promise: responsePromise } = createResolvablePromise();
2704
- this.response = responsePromise;
2705
- const {
2706
- resolve: resolveProviderMetadata,
2707
- promise: providerMetadataPromise
2708
- } = createResolvablePromise();
2709
- this.experimental_providerMetadata = providerMetadataPromise;
2710
- let usage;
2711
- let finishReason;
2712
- let providerMetadata;
2713
- let object;
2714
- let error;
2715
- let accumulatedText = "";
2716
- let textDelta = "";
2717
- let response = {
2718
- id: generateId3(),
2719
- timestamp: currentDate(),
2720
- modelId
2721
- };
2722
- let latestObjectJson = void 0;
2723
- let latestObject = void 0;
2724
- let isFirstChunk = true;
2725
- let isFirstDelta = true;
2726
- const self = this;
2727
- this.originalStream = stream.pipeThrough(
2728
- new TransformStream({
2729
- async transform(chunk, controller) {
2730
- var _a11, _b, _c;
2731
- if (isFirstChunk) {
2732
- const msToFirstChunk = now2() - startTimestampMs;
2733
- isFirstChunk = false;
2734
- doStreamSpan.addEvent("ai.stream.firstChunk", {
2735
- "ai.stream.msToFirstChunk": msToFirstChunk
2736
- });
2737
- doStreamSpan.setAttributes({
2738
- "ai.stream.msToFirstChunk": msToFirstChunk
2739
- });
2740
- }
2741
- if (typeof chunk === "string") {
2742
- accumulatedText += chunk;
2743
- textDelta += chunk;
2744
- const { value: currentObjectJson, state: parseState } = (0, import_ui_utils2.parsePartialJson)(accumulatedText);
2745
- if (currentObjectJson !== void 0 && !(0, import_ui_utils2.isDeepEqualData)(latestObjectJson, currentObjectJson)) {
2746
- const validationResult = outputStrategy.validatePartialResult({
2747
- value: currentObjectJson,
2748
- textDelta,
2749
- latestObject,
2750
- isFirstDelta,
2751
- isFinalDelta: parseState === "successful-parse"
2752
- });
2753
- if (validationResult.success && !(0, import_ui_utils2.isDeepEqualData)(latestObject, validationResult.value.partial)) {
2754
- latestObjectJson = currentObjectJson;
2755
- latestObject = validationResult.value.partial;
2756
- controller.enqueue({
2757
- type: "object",
2758
- object: latestObject
2759
- });
2760
- controller.enqueue({
2761
- type: "text-delta",
2762
- textDelta: validationResult.value.textDelta
2763
- });
2764
- textDelta = "";
2765
- isFirstDelta = false;
2766
- }
2767
- }
2768
- return;
2769
- }
2770
- switch (chunk.type) {
2771
- case "response-metadata": {
2772
- response = {
2773
- id: (_a11 = chunk.id) != null ? _a11 : response.id,
2774
- timestamp: (_b = chunk.timestamp) != null ? _b : response.timestamp,
2775
- modelId: (_c = chunk.modelId) != null ? _c : response.modelId
2776
- };
2777
- break;
2778
- }
2779
- case "finish": {
2780
- if (textDelta !== "") {
2781
- controller.enqueue({ type: "text-delta", textDelta });
2782
- }
2783
- finishReason = chunk.finishReason;
2784
- usage = calculateLanguageModelUsage(chunk.usage);
2785
- providerMetadata = chunk.providerMetadata;
2786
- controller.enqueue({ ...chunk, usage, response });
2787
- resolveUsage(usage);
2788
- resolveProviderMetadata(providerMetadata);
2789
- resolveResponse({
2790
- ...response,
2791
- headers: rawResponse == null ? void 0 : rawResponse.headers
2792
- });
2793
- const validationResult = outputStrategy.validateFinalResult(latestObjectJson);
2794
- if (validationResult.success) {
2795
- object = validationResult.value;
2796
- self.objectPromise.resolve(object);
2797
- } else {
2798
- error = validationResult.error;
2799
- self.objectPromise.reject(error);
2800
- }
2801
- break;
2802
- }
2803
- default: {
2804
- controller.enqueue(chunk);
2805
- break;
2806
- }
2807
- }
2808
- },
2809
- // invoke onFinish callback and resolve toolResults promise when the stream is about to close:
2810
- async flush(controller) {
2811
- try {
2812
- const finalUsage = usage != null ? usage : {
2813
- promptTokens: NaN,
2814
- completionTokens: NaN,
2815
- totalTokens: NaN
2816
- };
2817
- doStreamSpan.setAttributes(
2818
- selectTelemetryAttributes({
2819
- telemetry,
2820
- attributes: {
2821
- "ai.response.finishReason": finishReason,
2822
- "ai.response.object": {
2823
- output: () => JSON.stringify(object)
2824
- },
2825
- "ai.response.id": response.id,
2826
- "ai.response.model": response.modelId,
2827
- "ai.response.timestamp": response.timestamp.toISOString(),
2828
- "ai.usage.promptTokens": finalUsage.promptTokens,
2829
- "ai.usage.completionTokens": finalUsage.completionTokens,
2830
- // standardized gen-ai llm span attributes:
2831
- "gen_ai.response.finish_reasons": [finishReason],
2832
- "gen_ai.response.id": response.id,
2833
- "gen_ai.response.model": response.modelId,
2834
- "gen_ai.usage.input_tokens": finalUsage.promptTokens,
2835
- "gen_ai.usage.output_tokens": finalUsage.completionTokens
2836
- }
2837
- })
2838
- );
2839
- doStreamSpan.end();
2840
- rootSpan.setAttributes(
2841
- selectTelemetryAttributes({
2842
- telemetry,
2843
- attributes: {
2844
- "ai.usage.promptTokens": finalUsage.promptTokens,
2845
- "ai.usage.completionTokens": finalUsage.completionTokens,
2846
- "ai.response.object": {
2847
- output: () => JSON.stringify(object)
2848
- }
2849
- }
2850
- })
2851
- );
2852
- await (onFinish == null ? void 0 : onFinish({
2853
- usage: finalUsage,
2854
- object,
2855
- error,
2856
- response: {
2857
- ...response,
2858
- headers: rawResponse == null ? void 0 : rawResponse.headers
2859
- },
2860
- warnings,
2861
- experimental_providerMetadata: providerMetadata
2862
- }));
2863
- } catch (error2) {
2864
- controller.error(error2);
2865
- } finally {
2866
- rootSpan.end();
2867
- }
2868
- }
2869
- })
2870
- );
2871
2958
  }
2872
2959
  get object() {
2873
2960
  return this.objectPromise.value;
2874
2961
  }
2962
+ get usage() {
2963
+ return this.usagePromise.value;
2964
+ }
2965
+ get experimental_providerMetadata() {
2966
+ return this.providerMetadataPromise.value;
2967
+ }
2968
+ get warnings() {
2969
+ return this.warningsPromise.value;
2970
+ }
2971
+ get request() {
2972
+ return this.requestPromise.value;
2973
+ }
2974
+ get response() {
2975
+ return this.responsePromise.value;
2976
+ }
2875
2977
  get partialObjectStream() {
2876
- return createAsyncIterableStream(this.originalStream, {
2978
+ return createAsyncIterableStream(this.stitchableStream.stream, {
2877
2979
  transform(chunk, controller) {
2878
2980
  switch (chunk.type) {
2879
2981
  case "object":
@@ -2894,10 +2996,12 @@ var DefaultStreamObjectResult = class {
2894
2996
  });
2895
2997
  }
2896
2998
  get elementStream() {
2897
- return this.outputStrategy.createElementStream(this.originalStream);
2999
+ return this.outputStrategy.createElementStream(
3000
+ this.stitchableStream.stream
3001
+ );
2898
3002
  }
2899
3003
  get textStream() {
2900
- return createAsyncIterableStream(this.originalStream, {
3004
+ return createAsyncIterableStream(this.stitchableStream.stream, {
2901
3005
  transform(chunk, controller) {
2902
3006
  switch (chunk.type) {
2903
3007
  case "text-delta":
@@ -2918,7 +3022,7 @@ var DefaultStreamObjectResult = class {
2918
3022
  });
2919
3023
  }
2920
3024
  get fullStream() {
2921
- return createAsyncIterableStream(this.originalStream, {
3025
+ return createAsyncIterableStream(this.stitchableStream.stream, {
2922
3026
  transform(chunk, controller) {
2923
3027
  controller.enqueue(chunk);
2924
3028
  }
@@ -3220,15 +3324,16 @@ async function generateText({
3220
3324
  };
3221
3325
  let stepType = "initial";
3222
3326
  do {
3223
- if (stepCount === 1) {
3224
- initialPrompt.type = "messages";
3225
- }
3226
3327
  const promptFormat = stepCount === 0 ? initialPrompt.type : "messages";
3328
+ const stepInputMessages = [
3329
+ ...initialPrompt.messages,
3330
+ ...responseMessages
3331
+ ];
3227
3332
  const promptMessages = await convertToLanguageModelPrompt({
3228
3333
  prompt: {
3229
3334
  type: promptFormat,
3230
3335
  system: initialPrompt.system,
3231
- messages: [...initialPrompt.messages, ...responseMessages]
3336
+ messages: stepInputMessages
3232
3337
  },
3233
3338
  modelSupportsImageUrls: model.supportsImageUrls,
3234
3339
  modelSupportsUrl: model.supportsUrl
@@ -3324,6 +3429,7 @@ async function generateText({
3324
3429
  tools,
3325
3430
  tracer,
3326
3431
  telemetry,
3432
+ messages: stepInputMessages,
3327
3433
  abortSignal
3328
3434
  });
3329
3435
  const currentUsage = calculateLanguageModelUsage(
@@ -3434,6 +3540,7 @@ async function executeTools({
3434
3540
  tools,
3435
3541
  tracer,
3436
3542
  telemetry,
3543
+ messages,
3437
3544
  abortSignal
3438
3545
  }) {
3439
3546
  const toolResults = await Promise.all(
@@ -3460,7 +3567,10 @@ async function executeTools({
3460
3567
  }),
3461
3568
  tracer,
3462
3569
  fn: async (span) => {
3463
- const result2 = await tool2.execute(toolCall.args, { abortSignal });
3570
+ const result2 = await tool2.execute(toolCall.args, {
3571
+ messages,
3572
+ abortSignal
3573
+ });
3464
3574
  try {
3465
3575
  span.setAttributes(
3466
3576
  selectTelemetryAttributes({
@@ -3509,68 +3619,6 @@ var DefaultGenerateTextResult = class {
3509
3619
  var import_provider_utils9 = require("@ai-sdk/provider-utils");
3510
3620
  var import_ui_utils6 = require("@ai-sdk/ui-utils");
3511
3621
 
3512
- // core/util/create-stitchable-stream.ts
3513
- function createStitchableStream() {
3514
- let innerStreamReaders = [];
3515
- let controller = null;
3516
- let isClosed = false;
3517
- const processPull = async () => {
3518
- if (isClosed && innerStreamReaders.length === 0) {
3519
- controller == null ? void 0 : controller.close();
3520
- return;
3521
- }
3522
- if (innerStreamReaders.length === 0) {
3523
- return;
3524
- }
3525
- try {
3526
- const { value, done } = await innerStreamReaders[0].read();
3527
- if (done) {
3528
- innerStreamReaders.shift();
3529
- if (innerStreamReaders.length > 0) {
3530
- await processPull();
3531
- } else if (isClosed) {
3532
- controller == null ? void 0 : controller.close();
3533
- }
3534
- } else {
3535
- controller == null ? void 0 : controller.enqueue(value);
3536
- }
3537
- } catch (error) {
3538
- controller == null ? void 0 : controller.error(error);
3539
- innerStreamReaders.shift();
3540
- if (isClosed && innerStreamReaders.length === 0) {
3541
- controller == null ? void 0 : controller.close();
3542
- }
3543
- }
3544
- };
3545
- return {
3546
- stream: new ReadableStream({
3547
- start(controllerParam) {
3548
- controller = controllerParam;
3549
- },
3550
- pull: processPull,
3551
- async cancel() {
3552
- for (const reader of innerStreamReaders) {
3553
- await reader.cancel();
3554
- }
3555
- innerStreamReaders = [];
3556
- isClosed = true;
3557
- }
3558
- }),
3559
- addStream: (innerStream) => {
3560
- if (isClosed) {
3561
- throw new Error("Cannot add inner stream: outer stream is closed");
3562
- }
3563
- innerStreamReaders.push(innerStream.getReader());
3564
- },
3565
- close: () => {
3566
- isClosed = true;
3567
- if (innerStreamReaders.length === 0) {
3568
- controller == null ? void 0 : controller.close();
3569
- }
3570
- }
3571
- };
3572
- }
3573
-
3574
3622
  // core/util/merge-streams.ts
3575
3623
  function mergeStreams(stream1, stream2) {
3576
3624
  const reader1 = stream1.getReader();
@@ -3667,6 +3715,7 @@ function runToolsTransformation({
3667
3715
  toolCallStreaming,
3668
3716
  tracer,
3669
3717
  telemetry,
3718
+ messages,
3670
3719
  abortSignal
3671
3720
  }) {
3672
3721
  let toolResultsStreamController = null;
@@ -3762,7 +3811,10 @@ function runToolsTransformation({
3762
3811
  }
3763
3812
  }),
3764
3813
  tracer,
3765
- fn: async (span) => tool2.execute(toolCall.args, { abortSignal }).then(
3814
+ fn: async (span) => tool2.execute(toolCall.args, {
3815
+ messages,
3816
+ abortSignal
3817
+ }).then(
3766
3818
  (result) => {
3767
3819
  toolResultsStreamController.enqueue({
3768
3820
  ...toolCall,
@@ -3854,7 +3906,7 @@ function runToolsTransformation({
3854
3906
 
3855
3907
  // core/generate-text/stream-text.ts
3856
3908
  var originalGenerateId4 = (0, import_provider_utils9.createIdGenerator)({ prefix: "aitxt", size: 24 });
3857
- async function streamText({
3909
+ function streamText({
3858
3910
  model,
3859
3911
  tools,
3860
3912
  toolChoice,
@@ -3870,594 +3922,587 @@ async function streamText({
3870
3922
  experimental_providerMetadata: providerMetadata,
3871
3923
  experimental_toolCallStreaming: toolCallStreaming = false,
3872
3924
  experimental_activeTools: activeTools,
3873
- onChunk,
3874
- onFinish,
3875
- onStepFinish,
3876
- _internal: {
3877
- now: now2 = now,
3878
- generateId: generateId3 = originalGenerateId4,
3879
- currentDate = () => /* @__PURE__ */ new Date()
3880
- } = {},
3881
- ...settings
3882
- }) {
3883
- if (maxSteps < 1) {
3884
- throw new InvalidArgumentError({
3885
- parameter: "maxSteps",
3886
- value: maxSteps,
3887
- message: "maxSteps must be at least 1"
3888
- });
3889
- }
3890
- const baseTelemetryAttributes = getBaseTelemetryAttributes({
3891
- model,
3892
- telemetry,
3893
- headers,
3894
- settings: { ...settings, maxRetries }
3895
- });
3896
- const tracer = getTracer(telemetry);
3897
- const initialPrompt = standardizePrompt({
3898
- prompt: { system, prompt, messages },
3899
- tools
3900
- });
3901
- return recordSpan({
3902
- name: "ai.streamText",
3903
- attributes: selectTelemetryAttributes({
3904
- telemetry,
3905
- attributes: {
3906
- ...assembleOperationName({ operationId: "ai.streamText", telemetry }),
3907
- ...baseTelemetryAttributes,
3908
- // specific settings that only make sense on the outer level:
3909
- "ai.prompt": {
3910
- input: () => JSON.stringify({ system, prompt, messages })
3911
- },
3912
- "ai.settings.maxSteps": maxSteps
3913
- }
3914
- }),
3915
- tracer,
3916
- endWhenDone: false,
3917
- fn: async (rootSpan) => {
3918
- const retry = retryWithExponentialBackoff({ maxRetries });
3919
- const startStep = async ({
3920
- responseMessages
3921
- }) => {
3922
- const promptFormat = responseMessages.length === 0 ? initialPrompt.type : "messages";
3923
- const promptMessages = await convertToLanguageModelPrompt({
3924
- prompt: {
3925
- type: promptFormat,
3926
- system: initialPrompt.system,
3927
- messages: [...initialPrompt.messages, ...responseMessages]
3928
- },
3929
- modelSupportsImageUrls: model.supportsImageUrls,
3930
- modelSupportsUrl: model.supportsUrl
3931
- });
3932
- const mode = {
3933
- type: "regular",
3934
- ...prepareToolsAndToolChoice({ tools, toolChoice, activeTools })
3935
- };
3936
- const {
3937
- result: { stream: stream2, warnings: warnings2, rawResponse: rawResponse2, request: request2 },
3938
- doStreamSpan: doStreamSpan2,
3939
- startTimestampMs: startTimestampMs2
3940
- } = await retry(
3941
- () => recordSpan({
3942
- name: "ai.streamText.doStream",
3943
- attributes: selectTelemetryAttributes({
3944
- telemetry,
3945
- attributes: {
3946
- ...assembleOperationName({
3947
- operationId: "ai.streamText.doStream",
3948
- telemetry
3949
- }),
3950
- ...baseTelemetryAttributes,
3951
- "ai.prompt.format": {
3952
- input: () => promptFormat
3953
- },
3954
- "ai.prompt.messages": {
3955
- input: () => JSON.stringify(promptMessages)
3956
- },
3957
- "ai.prompt.tools": {
3958
- // convert the language model level tools:
3959
- input: () => {
3960
- var _a11;
3961
- return (_a11 = mode.tools) == null ? void 0 : _a11.map((tool2) => JSON.stringify(tool2));
3962
- }
3963
- },
3964
- "ai.prompt.toolChoice": {
3965
- input: () => mode.toolChoice != null ? JSON.stringify(mode.toolChoice) : void 0
3966
- },
3967
- // standardized gen-ai llm span attributes:
3968
- "gen_ai.system": model.provider,
3969
- "gen_ai.request.model": model.modelId,
3970
- "gen_ai.request.frequency_penalty": settings.frequencyPenalty,
3971
- "gen_ai.request.max_tokens": settings.maxTokens,
3972
- "gen_ai.request.presence_penalty": settings.presencePenalty,
3973
- "gen_ai.request.stop_sequences": settings.stopSequences,
3974
- "gen_ai.request.temperature": settings.temperature,
3975
- "gen_ai.request.top_k": settings.topK,
3976
- "gen_ai.request.top_p": settings.topP
3977
- }
3978
- }),
3979
- tracer,
3980
- endWhenDone: false,
3981
- fn: async (doStreamSpan3) => ({
3982
- startTimestampMs: now2(),
3983
- // get before the call
3984
- doStreamSpan: doStreamSpan3,
3985
- result: await model.doStream({
3986
- mode,
3987
- ...prepareCallSettings(settings),
3988
- inputFormat: promptFormat,
3989
- prompt: promptMessages,
3990
- providerMetadata,
3991
- abortSignal,
3992
- headers
3993
- })
3994
- })
3995
- })
3996
- );
3997
- return {
3998
- result: {
3999
- stream: runToolsTransformation({
4000
- tools,
4001
- generatorStream: stream2,
4002
- toolCallStreaming,
4003
- tracer,
4004
- telemetry,
4005
- abortSignal
4006
- }),
4007
- warnings: warnings2,
4008
- request: request2 != null ? request2 : {},
4009
- rawResponse: rawResponse2
4010
- },
4011
- doStreamSpan: doStreamSpan2,
4012
- startTimestampMs: startTimestampMs2
4013
- };
4014
- };
4015
- const {
4016
- result: { stream, warnings, rawResponse, request },
4017
- doStreamSpan,
4018
- startTimestampMs
4019
- } = await startStep({ responseMessages: [] });
4020
- return new DefaultStreamTextResult({
4021
- stream,
4022
- warnings,
4023
- rawResponse,
4024
- request,
4025
- onChunk,
4026
- onFinish,
4027
- onStepFinish,
4028
- rootSpan,
4029
- doStreamSpan,
4030
- telemetry,
4031
- startTimestampMs,
4032
- maxSteps,
4033
- continueSteps,
4034
- startStep,
4035
- modelId: model.modelId,
4036
- now: now2,
4037
- currentDate,
4038
- generateId: generateId3,
4039
- tools
4040
- });
4041
- }
3925
+ onChunk,
3926
+ onFinish,
3927
+ onStepFinish,
3928
+ _internal: {
3929
+ now: now2 = now,
3930
+ generateId: generateId3 = originalGenerateId4,
3931
+ currentDate = () => /* @__PURE__ */ new Date()
3932
+ } = {},
3933
+ ...settings
3934
+ }) {
3935
+ return new DefaultStreamTextResult({
3936
+ model,
3937
+ telemetry,
3938
+ headers,
3939
+ settings,
3940
+ maxRetries,
3941
+ abortSignal,
3942
+ system,
3943
+ prompt,
3944
+ messages,
3945
+ tools,
3946
+ toolChoice,
3947
+ toolCallStreaming,
3948
+ activeTools,
3949
+ maxSteps,
3950
+ continueSteps,
3951
+ providerMetadata,
3952
+ onChunk,
3953
+ onFinish,
3954
+ onStepFinish,
3955
+ now: now2,
3956
+ currentDate,
3957
+ generateId: generateId3
4042
3958
  });
4043
3959
  }
4044
3960
  var DefaultStreamTextResult = class {
4045
3961
  constructor({
4046
- stream,
4047
- warnings,
4048
- rawResponse,
4049
- request,
4050
- onChunk,
4051
- onFinish,
4052
- onStepFinish,
4053
- rootSpan,
4054
- doStreamSpan,
3962
+ model,
4055
3963
  telemetry,
4056
- startTimestampMs,
3964
+ headers,
3965
+ settings,
3966
+ maxRetries,
3967
+ abortSignal,
3968
+ system,
3969
+ prompt,
3970
+ messages,
3971
+ tools,
3972
+ toolChoice,
3973
+ toolCallStreaming,
3974
+ activeTools,
4057
3975
  maxSteps,
4058
3976
  continueSteps,
4059
- startStep,
4060
- modelId,
3977
+ providerMetadata,
3978
+ onChunk,
3979
+ onFinish,
3980
+ onStepFinish,
4061
3981
  now: now2,
4062
3982
  currentDate,
4063
- generateId: generateId3,
4064
- tools
3983
+ generateId: generateId3
4065
3984
  }) {
4066
- this.rawWarnings = warnings;
4067
- this.rawResponse = rawResponse;
4068
- const { resolve: resolveUsage, promise: usagePromise } = createResolvablePromise();
4069
- this.usage = usagePromise;
4070
- const { resolve: resolveFinishReason, promise: finishReasonPromise } = createResolvablePromise();
4071
- this.finishReason = finishReasonPromise;
4072
- const { resolve: resolveText, promise: textPromise } = createResolvablePromise();
4073
- this.text = textPromise;
4074
- const { resolve: resolveToolCalls, promise: toolCallsPromise } = createResolvablePromise();
4075
- this.toolCalls = toolCallsPromise;
4076
- const { resolve: resolveToolResults, promise: toolResultsPromise } = createResolvablePromise();
4077
- this.toolResults = toolResultsPromise;
4078
- const { resolve: resolveSteps, promise: stepsPromise } = createResolvablePromise();
4079
- this.steps = stepsPromise;
4080
- const {
4081
- resolve: resolveProviderMetadata,
4082
- promise: providerMetadataPromise
4083
- } = createResolvablePromise();
4084
- this.experimental_providerMetadata = providerMetadataPromise;
4085
- const { resolve: resolveRequest, promise: requestPromise } = createResolvablePromise();
4086
- this.request = requestPromise;
4087
- const { resolve: resolveResponse, promise: responsePromise } = createResolvablePromise();
4088
- this.response = responsePromise;
4089
- const { resolve: resolveWarnings, promise: warningsPromise } = createResolvablePromise();
4090
- this.warnings = warningsPromise;
4091
- const {
4092
- stream: stitchableStream,
4093
- addStream,
4094
- close: closeStitchableStream
4095
- } = createStitchableStream();
4096
- this.originalStream = stitchableStream;
4097
- const stepResults = [];
3985
+ this.warningsPromise = new DelayedPromise();
3986
+ this.usagePromise = new DelayedPromise();
3987
+ this.finishReasonPromise = new DelayedPromise();
3988
+ this.providerMetadataPromise = new DelayedPromise();
3989
+ this.textPromise = new DelayedPromise();
3990
+ this.toolCallsPromise = new DelayedPromise();
3991
+ this.toolResultsPromise = new DelayedPromise();
3992
+ this.requestPromise = new DelayedPromise();
3993
+ this.responsePromise = new DelayedPromise();
3994
+ this.stepsPromise = new DelayedPromise();
3995
+ this.stitchableStream = createStitchableStream();
3996
+ if (maxSteps < 1) {
3997
+ throw new InvalidArgumentError({
3998
+ parameter: "maxSteps",
3999
+ value: maxSteps,
4000
+ message: "maxSteps must be at least 1"
4001
+ });
4002
+ }
4003
+ const tracer = getTracer(telemetry);
4004
+ const baseTelemetryAttributes = getBaseTelemetryAttributes({
4005
+ model,
4006
+ telemetry,
4007
+ headers,
4008
+ settings: { ...settings, maxRetries }
4009
+ });
4010
+ const initialPrompt = standardizePrompt({
4011
+ prompt: { system, prompt, messages },
4012
+ tools
4013
+ });
4098
4014
  const self = this;
4099
- function addStepStream({
4100
- stream: stream2,
4101
- startTimestamp,
4102
- doStreamSpan: doStreamSpan2,
4103
- currentStep,
4104
- responseMessages,
4105
- usage = {
4106
- promptTokens: 0,
4107
- completionTokens: 0,
4108
- totalTokens: 0
4109
- },
4110
- stepType,
4111
- previousStepText = "",
4112
- stepRequest,
4113
- hasLeadingWhitespace
4114
- }) {
4115
- const stepToolCalls = [];
4116
- const stepToolResults = [];
4117
- let stepFinishReason = "unknown";
4118
- let stepUsage = {
4119
- promptTokens: 0,
4120
- completionTokens: 0,
4121
- totalTokens: 0
4122
- };
4123
- let stepProviderMetadata;
4124
- let stepFirstChunk = true;
4125
- let stepText = "";
4126
- let fullStepText = stepType === "continue" ? previousStepText : "";
4127
- let stepLogProbs;
4128
- let stepResponse = {
4129
- id: generateId3(),
4130
- timestamp: currentDate(),
4131
- modelId
4132
- };
4133
- let chunkBuffer = "";
4134
- let chunkTextPublished = false;
4135
- let inWhitespacePrefix = true;
4136
- let hasWhitespaceSuffix = false;
4137
- async function publishTextChunk({
4138
- controller,
4139
- chunk
4140
- }) {
4141
- controller.enqueue(chunk);
4142
- stepText += chunk.textDelta;
4143
- fullStepText += chunk.textDelta;
4144
- chunkTextPublished = true;
4145
- hasWhitespaceSuffix = chunk.textDelta.trimEnd() !== chunk.textDelta;
4146
- await (onChunk == null ? void 0 : onChunk({ chunk }));
4147
- }
4148
- addStream(
4149
- stream2.pipeThrough(
4150
- new TransformStream({
4151
- async transform(chunk, controller) {
4152
- var _a11, _b, _c;
4153
- if (stepFirstChunk) {
4154
- const msToFirstChunk = now2() - startTimestamp;
4155
- stepFirstChunk = false;
4156
- doStreamSpan2.addEvent("ai.stream.firstChunk", {
4157
- "ai.response.msToFirstChunk": msToFirstChunk
4158
- });
4159
- doStreamSpan2.setAttributes({
4160
- "ai.response.msToFirstChunk": msToFirstChunk
4161
- });
4162
- }
4163
- if (chunk.type === "text-delta" && chunk.textDelta.length === 0) {
4164
- return;
4165
- }
4166
- const chunkType = chunk.type;
4167
- switch (chunkType) {
4168
- case "text-delta": {
4169
- if (continueSteps) {
4170
- const trimmedChunkText = inWhitespacePrefix && hasLeadingWhitespace ? chunk.textDelta.trimStart() : chunk.textDelta;
4171
- if (trimmedChunkText.length === 0) {
4172
- break;
4015
+ recordSpan({
4016
+ name: "ai.streamText",
4017
+ attributes: selectTelemetryAttributes({
4018
+ telemetry,
4019
+ attributes: {
4020
+ ...assembleOperationName({ operationId: "ai.streamText", telemetry }),
4021
+ ...baseTelemetryAttributes,
4022
+ // specific settings that only make sense on the outer level:
4023
+ "ai.prompt": {
4024
+ input: () => JSON.stringify({ system, prompt, messages })
4025
+ },
4026
+ "ai.settings.maxSteps": maxSteps
4027
+ }
4028
+ }),
4029
+ tracer,
4030
+ endWhenDone: false,
4031
+ fn: async (rootSpan) => {
4032
+ const retry = retryWithExponentialBackoff({ maxRetries });
4033
+ const stepResults = [];
4034
+ async function streamStep({
4035
+ currentStep,
4036
+ responseMessages,
4037
+ usage,
4038
+ stepType,
4039
+ previousStepText,
4040
+ hasLeadingWhitespace
4041
+ }) {
4042
+ const promptFormat = responseMessages.length === 0 ? initialPrompt.type : "messages";
4043
+ const stepInputMessages = [
4044
+ ...initialPrompt.messages,
4045
+ ...responseMessages
4046
+ ];
4047
+ const promptMessages = await convertToLanguageModelPrompt({
4048
+ prompt: {
4049
+ type: promptFormat,
4050
+ system: initialPrompt.system,
4051
+ messages: stepInputMessages
4052
+ },
4053
+ modelSupportsImageUrls: model.supportsImageUrls,
4054
+ modelSupportsUrl: model.supportsUrl
4055
+ });
4056
+ const mode = {
4057
+ type: "regular",
4058
+ ...prepareToolsAndToolChoice({ tools, toolChoice, activeTools })
4059
+ };
4060
+ const {
4061
+ result: { stream, warnings, rawResponse, request },
4062
+ doStreamSpan,
4063
+ startTimestampMs
4064
+ } = await retry(
4065
+ () => recordSpan({
4066
+ name: "ai.streamText.doStream",
4067
+ attributes: selectTelemetryAttributes({
4068
+ telemetry,
4069
+ attributes: {
4070
+ ...assembleOperationName({
4071
+ operationId: "ai.streamText.doStream",
4072
+ telemetry
4073
+ }),
4074
+ ...baseTelemetryAttributes,
4075
+ "ai.prompt.format": {
4076
+ input: () => promptFormat
4077
+ },
4078
+ "ai.prompt.messages": {
4079
+ input: () => JSON.stringify(promptMessages)
4080
+ },
4081
+ "ai.prompt.tools": {
4082
+ // convert the language model level tools:
4083
+ input: () => {
4084
+ var _a11;
4085
+ return (_a11 = mode.tools) == null ? void 0 : _a11.map((tool2) => JSON.stringify(tool2));
4173
4086
  }
4174
- inWhitespacePrefix = false;
4175
- chunkBuffer += trimmedChunkText;
4176
- const split = splitOnLastWhitespace(chunkBuffer);
4177
- if (split != null) {
4178
- chunkBuffer = split.suffix;
4179
- await publishTextChunk({
4180
- controller,
4181
- chunk: {
4182
- type: "text-delta",
4183
- textDelta: split.prefix + split.whitespace
4087
+ },
4088
+ "ai.prompt.toolChoice": {
4089
+ input: () => mode.toolChoice != null ? JSON.stringify(mode.toolChoice) : void 0
4090
+ },
4091
+ // standardized gen-ai llm span attributes:
4092
+ "gen_ai.system": model.provider,
4093
+ "gen_ai.request.model": model.modelId,
4094
+ "gen_ai.request.frequency_penalty": settings.frequencyPenalty,
4095
+ "gen_ai.request.max_tokens": settings.maxTokens,
4096
+ "gen_ai.request.presence_penalty": settings.presencePenalty,
4097
+ "gen_ai.request.stop_sequences": settings.stopSequences,
4098
+ "gen_ai.request.temperature": settings.temperature,
4099
+ "gen_ai.request.top_k": settings.topK,
4100
+ "gen_ai.request.top_p": settings.topP
4101
+ }
4102
+ }),
4103
+ tracer,
4104
+ endWhenDone: false,
4105
+ fn: async (doStreamSpan2) => ({
4106
+ startTimestampMs: now2(),
4107
+ // get before the call
4108
+ doStreamSpan: doStreamSpan2,
4109
+ result: await model.doStream({
4110
+ mode,
4111
+ ...prepareCallSettings(settings),
4112
+ inputFormat: promptFormat,
4113
+ prompt: promptMessages,
4114
+ providerMetadata,
4115
+ abortSignal,
4116
+ headers
4117
+ })
4118
+ })
4119
+ })
4120
+ );
4121
+ const transformedStream = runToolsTransformation({
4122
+ tools,
4123
+ generatorStream: stream,
4124
+ toolCallStreaming,
4125
+ tracer,
4126
+ telemetry,
4127
+ messages: stepInputMessages,
4128
+ abortSignal
4129
+ });
4130
+ const stepRequest = request != null ? request : {};
4131
+ const stepToolCalls = [];
4132
+ const stepToolResults = [];
4133
+ let stepFinishReason = "unknown";
4134
+ let stepUsage = {
4135
+ promptTokens: 0,
4136
+ completionTokens: 0,
4137
+ totalTokens: 0
4138
+ };
4139
+ let stepProviderMetadata;
4140
+ let stepFirstChunk = true;
4141
+ let stepText = "";
4142
+ let fullStepText = stepType === "continue" ? previousStepText : "";
4143
+ let stepLogProbs;
4144
+ let stepResponse = {
4145
+ id: generateId3(),
4146
+ timestamp: currentDate(),
4147
+ modelId: model.modelId
4148
+ };
4149
+ let chunkBuffer = "";
4150
+ let chunkTextPublished = false;
4151
+ let inWhitespacePrefix = true;
4152
+ let hasWhitespaceSuffix = false;
4153
+ async function publishTextChunk({
4154
+ controller,
4155
+ chunk
4156
+ }) {
4157
+ controller.enqueue(chunk);
4158
+ stepText += chunk.textDelta;
4159
+ fullStepText += chunk.textDelta;
4160
+ chunkTextPublished = true;
4161
+ hasWhitespaceSuffix = chunk.textDelta.trimEnd() !== chunk.textDelta;
4162
+ await (onChunk == null ? void 0 : onChunk({ chunk }));
4163
+ }
4164
+ self.stitchableStream.addStream(
4165
+ transformedStream.pipeThrough(
4166
+ new TransformStream({
4167
+ async transform(chunk, controller) {
4168
+ var _a11, _b, _c;
4169
+ if (stepFirstChunk) {
4170
+ const msToFirstChunk = now2() - startTimestampMs;
4171
+ stepFirstChunk = false;
4172
+ doStreamSpan.addEvent("ai.stream.firstChunk", {
4173
+ "ai.response.msToFirstChunk": msToFirstChunk
4174
+ });
4175
+ doStreamSpan.setAttributes({
4176
+ "ai.response.msToFirstChunk": msToFirstChunk
4177
+ });
4178
+ }
4179
+ if (chunk.type === "text-delta" && chunk.textDelta.length === 0) {
4180
+ return;
4181
+ }
4182
+ const chunkType = chunk.type;
4183
+ switch (chunkType) {
4184
+ case "text-delta": {
4185
+ if (continueSteps) {
4186
+ const trimmedChunkText = inWhitespacePrefix && hasLeadingWhitespace ? chunk.textDelta.trimStart() : chunk.textDelta;
4187
+ if (trimmedChunkText.length === 0) {
4188
+ break;
4189
+ }
4190
+ inWhitespacePrefix = false;
4191
+ chunkBuffer += trimmedChunkText;
4192
+ const split = splitOnLastWhitespace(chunkBuffer);
4193
+ if (split != null) {
4194
+ chunkBuffer = split.suffix;
4195
+ await publishTextChunk({
4196
+ controller,
4197
+ chunk: {
4198
+ type: "text-delta",
4199
+ textDelta: split.prefix + split.whitespace
4200
+ }
4201
+ });
4184
4202
  }
4203
+ } else {
4204
+ await publishTextChunk({ controller, chunk });
4205
+ }
4206
+ break;
4207
+ }
4208
+ case "tool-call": {
4209
+ controller.enqueue(chunk);
4210
+ stepToolCalls.push(chunk);
4211
+ await (onChunk == null ? void 0 : onChunk({ chunk }));
4212
+ break;
4213
+ }
4214
+ case "tool-result": {
4215
+ controller.enqueue(chunk);
4216
+ stepToolResults.push(chunk);
4217
+ await (onChunk == null ? void 0 : onChunk({ chunk }));
4218
+ break;
4219
+ }
4220
+ case "response-metadata": {
4221
+ stepResponse = {
4222
+ id: (_a11 = chunk.id) != null ? _a11 : stepResponse.id,
4223
+ timestamp: (_b = chunk.timestamp) != null ? _b : stepResponse.timestamp,
4224
+ modelId: (_c = chunk.modelId) != null ? _c : stepResponse.modelId
4225
+ };
4226
+ break;
4227
+ }
4228
+ case "finish": {
4229
+ stepUsage = chunk.usage;
4230
+ stepFinishReason = chunk.finishReason;
4231
+ stepProviderMetadata = chunk.experimental_providerMetadata;
4232
+ stepLogProbs = chunk.logprobs;
4233
+ const msToFinish = now2() - startTimestampMs;
4234
+ doStreamSpan.addEvent("ai.stream.finish");
4235
+ doStreamSpan.setAttributes({
4236
+ "ai.response.msToFinish": msToFinish,
4237
+ "ai.response.avgCompletionTokensPerSecond": 1e3 * stepUsage.completionTokens / msToFinish
4185
4238
  });
4239
+ break;
4186
4240
  }
4187
- } else {
4188
- await publishTextChunk({ controller, chunk });
4189
- }
4190
- break;
4191
- }
4192
- case "tool-call": {
4193
- controller.enqueue(chunk);
4194
- stepToolCalls.push(chunk);
4195
- await (onChunk == null ? void 0 : onChunk({ chunk }));
4196
- break;
4197
- }
4198
- case "tool-result": {
4199
- controller.enqueue(chunk);
4200
- stepToolResults.push(chunk);
4201
- await (onChunk == null ? void 0 : onChunk({ chunk }));
4202
- break;
4203
- }
4204
- case "response-metadata": {
4205
- stepResponse = {
4206
- id: (_a11 = chunk.id) != null ? _a11 : stepResponse.id,
4207
- timestamp: (_b = chunk.timestamp) != null ? _b : stepResponse.timestamp,
4208
- modelId: (_c = chunk.modelId) != null ? _c : stepResponse.modelId
4209
- };
4210
- break;
4211
- }
4212
- case "finish": {
4213
- stepUsage = chunk.usage;
4214
- stepFinishReason = chunk.finishReason;
4215
- stepProviderMetadata = chunk.experimental_providerMetadata;
4216
- stepLogProbs = chunk.logprobs;
4217
- const msToFinish = now2() - startTimestamp;
4218
- doStreamSpan2.addEvent("ai.stream.finish");
4219
- doStreamSpan2.setAttributes({
4220
- "ai.response.msToFinish": msToFinish,
4221
- "ai.response.avgCompletionTokensPerSecond": 1e3 * stepUsage.completionTokens / msToFinish
4222
- });
4223
- break;
4224
- }
4225
- case "tool-call-streaming-start":
4226
- case "tool-call-delta": {
4227
- controller.enqueue(chunk);
4228
- await (onChunk == null ? void 0 : onChunk({ chunk }));
4229
- break;
4230
- }
4231
- case "error": {
4232
- controller.enqueue(chunk);
4233
- stepFinishReason = "error";
4234
- break;
4235
- }
4236
- default: {
4237
- const exhaustiveCheck = chunkType;
4238
- throw new Error(`Unknown chunk type: ${exhaustiveCheck}`);
4239
- }
4240
- }
4241
- },
4242
- // invoke onFinish callback and resolve toolResults promise when the stream is about to close:
4243
- async flush(controller) {
4244
- var _a11, _b;
4245
- const stepToolCallsJson = stepToolCalls.length > 0 ? JSON.stringify(stepToolCalls) : void 0;
4246
- let nextStepType = "done";
4247
- if (currentStep + 1 < maxSteps) {
4248
- if (continueSteps && stepFinishReason === "length" && // only use continue when there are no tool calls:
4249
- stepToolCalls.length === 0) {
4250
- nextStepType = "continue";
4251
- } else if (
4252
- // there are tool calls:
4253
- stepToolCalls.length > 0 && // all current tool calls have results:
4254
- stepToolResults.length === stepToolCalls.length
4255
- ) {
4256
- nextStepType = "tool-result";
4257
- }
4258
- }
4259
- if (continueSteps && chunkBuffer.length > 0 && (nextStepType !== "continue" || // when the next step is a regular step, publish the buffer
4260
- stepType === "continue" && !chunkTextPublished)) {
4261
- await publishTextChunk({
4262
- controller,
4263
- chunk: {
4264
- type: "text-delta",
4265
- textDelta: chunkBuffer
4266
- }
4267
- });
4268
- chunkBuffer = "";
4269
- }
4270
- try {
4271
- doStreamSpan2.setAttributes(
4272
- selectTelemetryAttributes({
4273
- telemetry,
4274
- attributes: {
4275
- "ai.response.finishReason": stepFinishReason,
4276
- "ai.response.text": { output: () => stepText },
4277
- "ai.response.toolCalls": {
4278
- output: () => stepToolCallsJson
4279
- },
4280
- "ai.response.id": stepResponse.id,
4281
- "ai.response.model": stepResponse.modelId,
4282
- "ai.response.timestamp": stepResponse.timestamp.toISOString(),
4283
- "ai.usage.promptTokens": stepUsage.promptTokens,
4284
- "ai.usage.completionTokens": stepUsage.completionTokens,
4285
- // standardized gen-ai llm span attributes:
4286
- "gen_ai.response.finish_reasons": [stepFinishReason],
4287
- "gen_ai.response.id": stepResponse.id,
4288
- "gen_ai.response.model": stepResponse.modelId,
4289
- "gen_ai.usage.input_tokens": stepUsage.promptTokens,
4290
- "gen_ai.usage.output_tokens": stepUsage.completionTokens
4241
+ case "tool-call-streaming-start":
4242
+ case "tool-call-delta": {
4243
+ controller.enqueue(chunk);
4244
+ await (onChunk == null ? void 0 : onChunk({ chunk }));
4245
+ break;
4291
4246
  }
4292
- })
4293
- );
4294
- } catch (error) {
4295
- } finally {
4296
- doStreamSpan2.end();
4297
- }
4298
- controller.enqueue({
4299
- type: "step-finish",
4300
- finishReason: stepFinishReason,
4301
- usage: stepUsage,
4302
- experimental_providerMetadata: stepProviderMetadata,
4303
- logprobs: stepLogProbs,
4304
- response: {
4305
- ...stepResponse
4247
+ case "error": {
4248
+ controller.enqueue(chunk);
4249
+ stepFinishReason = "error";
4250
+ break;
4251
+ }
4252
+ default: {
4253
+ const exhaustiveCheck = chunkType;
4254
+ throw new Error(`Unknown chunk type: ${exhaustiveCheck}`);
4255
+ }
4256
+ }
4306
4257
  },
4307
- isContinued: nextStepType === "continue"
4308
- });
4309
- if (stepType === "continue") {
4310
- const lastMessage = responseMessages[responseMessages.length - 1];
4311
- if (typeof lastMessage.content === "string") {
4312
- lastMessage.content += stepText;
4313
- } else {
4314
- lastMessage.content.push({
4315
- text: stepText,
4316
- type: "text"
4258
+ // invoke onFinish callback and resolve toolResults promise when the stream is about to close:
4259
+ async flush(controller) {
4260
+ const stepToolCallsJson = stepToolCalls.length > 0 ? JSON.stringify(stepToolCalls) : void 0;
4261
+ let nextStepType = "done";
4262
+ if (currentStep + 1 < maxSteps) {
4263
+ if (continueSteps && stepFinishReason === "length" && // only use continue when there are no tool calls:
4264
+ stepToolCalls.length === 0) {
4265
+ nextStepType = "continue";
4266
+ } else if (
4267
+ // there are tool calls:
4268
+ stepToolCalls.length > 0 && // all current tool calls have results:
4269
+ stepToolResults.length === stepToolCalls.length
4270
+ ) {
4271
+ nextStepType = "tool-result";
4272
+ }
4273
+ }
4274
+ if (continueSteps && chunkBuffer.length > 0 && (nextStepType !== "continue" || // when the next step is a regular step, publish the buffer
4275
+ stepType === "continue" && !chunkTextPublished)) {
4276
+ await publishTextChunk({
4277
+ controller,
4278
+ chunk: {
4279
+ type: "text-delta",
4280
+ textDelta: chunkBuffer
4281
+ }
4282
+ });
4283
+ chunkBuffer = "";
4284
+ }
4285
+ try {
4286
+ doStreamSpan.setAttributes(
4287
+ selectTelemetryAttributes({
4288
+ telemetry,
4289
+ attributes: {
4290
+ "ai.response.finishReason": stepFinishReason,
4291
+ "ai.response.text": { output: () => stepText },
4292
+ "ai.response.toolCalls": {
4293
+ output: () => stepToolCallsJson
4294
+ },
4295
+ "ai.response.id": stepResponse.id,
4296
+ "ai.response.model": stepResponse.modelId,
4297
+ "ai.response.timestamp": stepResponse.timestamp.toISOString(),
4298
+ "ai.usage.promptTokens": stepUsage.promptTokens,
4299
+ "ai.usage.completionTokens": stepUsage.completionTokens,
4300
+ // standardized gen-ai llm span attributes:
4301
+ "gen_ai.response.finish_reasons": [stepFinishReason],
4302
+ "gen_ai.response.id": stepResponse.id,
4303
+ "gen_ai.response.model": stepResponse.modelId,
4304
+ "gen_ai.usage.input_tokens": stepUsage.promptTokens,
4305
+ "gen_ai.usage.output_tokens": stepUsage.completionTokens
4306
+ }
4307
+ })
4308
+ );
4309
+ } catch (error) {
4310
+ } finally {
4311
+ doStreamSpan.end();
4312
+ }
4313
+ controller.enqueue({
4314
+ type: "step-finish",
4315
+ finishReason: stepFinishReason,
4316
+ usage: stepUsage,
4317
+ experimental_providerMetadata: stepProviderMetadata,
4318
+ logprobs: stepLogProbs,
4319
+ response: {
4320
+ ...stepResponse
4321
+ },
4322
+ isContinued: nextStepType === "continue"
4317
4323
  });
4318
- }
4319
- } else {
4320
- responseMessages.push(
4321
- ...toResponseMessages({
4324
+ if (stepType === "continue") {
4325
+ const lastMessage = responseMessages[responseMessages.length - 1];
4326
+ if (typeof lastMessage.content === "string") {
4327
+ lastMessage.content += stepText;
4328
+ } else {
4329
+ lastMessage.content.push({
4330
+ text: stepText,
4331
+ type: "text"
4332
+ });
4333
+ }
4334
+ } else {
4335
+ responseMessages.push(
4336
+ ...toResponseMessages({
4337
+ text: stepText,
4338
+ tools: tools != null ? tools : {},
4339
+ toolCalls: stepToolCalls,
4340
+ toolResults: stepToolResults
4341
+ })
4342
+ );
4343
+ }
4344
+ const currentStepResult = {
4345
+ stepType,
4322
4346
  text: stepText,
4323
- tools: tools != null ? tools : {},
4324
4347
  toolCalls: stepToolCalls,
4325
- toolResults: stepToolResults
4326
- })
4327
- );
4328
- }
4329
- const currentStepResult = {
4330
- stepType,
4331
- text: stepText,
4332
- toolCalls: stepToolCalls,
4333
- toolResults: stepToolResults,
4334
- finishReason: stepFinishReason,
4335
- usage: stepUsage,
4336
- warnings: self.rawWarnings,
4337
- logprobs: stepLogProbs,
4338
- request: stepRequest,
4339
- response: {
4340
- ...stepResponse,
4341
- headers: (_a11 = self.rawResponse) == null ? void 0 : _a11.headers,
4342
- // deep clone msgs to avoid mutating past messages in multi-step:
4343
- messages: JSON.parse(JSON.stringify(responseMessages))
4344
- },
4345
- experimental_providerMetadata: stepProviderMetadata,
4346
- isContinued: nextStepType === "continue"
4347
- };
4348
- stepResults.push(currentStepResult);
4349
- await (onStepFinish == null ? void 0 : onStepFinish(currentStepResult));
4350
- const combinedUsage = {
4351
- promptTokens: usage.promptTokens + stepUsage.promptTokens,
4352
- completionTokens: usage.completionTokens + stepUsage.completionTokens,
4353
- totalTokens: usage.totalTokens + stepUsage.totalTokens
4354
- };
4355
- if (nextStepType !== "done") {
4356
- const {
4357
- result,
4358
- doStreamSpan: doStreamSpan3,
4359
- startTimestampMs: startTimestamp2
4360
- } = await startStep({ responseMessages });
4361
- self.rawWarnings = result.warnings;
4362
- self.rawResponse = result.rawResponse;
4363
- addStepStream({
4364
- stream: result.stream,
4365
- startTimestamp: startTimestamp2,
4366
- doStreamSpan: doStreamSpan3,
4367
- currentStep: currentStep + 1,
4368
- responseMessages,
4369
- usage: combinedUsage,
4370
- stepType: nextStepType,
4371
- previousStepText: fullStepText,
4372
- stepRequest: result.request,
4373
- hasLeadingWhitespace: hasWhitespaceSuffix
4374
- });
4375
- return;
4376
- }
4377
- try {
4378
- controller.enqueue({
4379
- type: "finish",
4380
- finishReason: stepFinishReason,
4381
- usage: combinedUsage,
4382
- experimental_providerMetadata: stepProviderMetadata,
4383
- logprobs: stepLogProbs,
4384
- response: {
4385
- ...stepResponse
4348
+ toolResults: stepToolResults,
4349
+ finishReason: stepFinishReason,
4350
+ usage: stepUsage,
4351
+ warnings,
4352
+ logprobs: stepLogProbs,
4353
+ request: stepRequest,
4354
+ response: {
4355
+ ...stepResponse,
4356
+ headers: rawResponse == null ? void 0 : rawResponse.headers,
4357
+ // deep clone msgs to avoid mutating past messages in multi-step:
4358
+ messages: JSON.parse(JSON.stringify(responseMessages))
4359
+ },
4360
+ experimental_providerMetadata: stepProviderMetadata,
4361
+ isContinued: nextStepType === "continue"
4362
+ };
4363
+ stepResults.push(currentStepResult);
4364
+ await (onStepFinish == null ? void 0 : onStepFinish(currentStepResult));
4365
+ const combinedUsage = {
4366
+ promptTokens: usage.promptTokens + stepUsage.promptTokens,
4367
+ completionTokens: usage.completionTokens + stepUsage.completionTokens,
4368
+ totalTokens: usage.totalTokens + stepUsage.totalTokens
4369
+ };
4370
+ if (nextStepType !== "done") {
4371
+ await streamStep({
4372
+ currentStep: currentStep + 1,
4373
+ responseMessages,
4374
+ usage: combinedUsage,
4375
+ stepType: nextStepType,
4376
+ previousStepText: fullStepText,
4377
+ hasLeadingWhitespace: hasWhitespaceSuffix
4378
+ });
4379
+ return;
4386
4380
  }
4387
- });
4388
- closeStitchableStream();
4389
- rootSpan.setAttributes(
4390
- selectTelemetryAttributes({
4391
- telemetry,
4392
- attributes: {
4393
- "ai.response.finishReason": stepFinishReason,
4394
- "ai.response.text": { output: () => fullStepText },
4395
- "ai.response.toolCalls": {
4396
- output: () => stepToolCallsJson
4381
+ try {
4382
+ controller.enqueue({
4383
+ type: "finish",
4384
+ finishReason: stepFinishReason,
4385
+ usage: combinedUsage,
4386
+ experimental_providerMetadata: stepProviderMetadata,
4387
+ logprobs: stepLogProbs,
4388
+ response: {
4389
+ ...stepResponse
4390
+ }
4391
+ });
4392
+ self.stitchableStream.close();
4393
+ rootSpan.setAttributes(
4394
+ selectTelemetryAttributes({
4395
+ telemetry,
4396
+ attributes: {
4397
+ "ai.response.finishReason": stepFinishReason,
4398
+ "ai.response.text": { output: () => fullStepText },
4399
+ "ai.response.toolCalls": {
4400
+ output: () => stepToolCallsJson
4401
+ },
4402
+ "ai.usage.promptTokens": combinedUsage.promptTokens,
4403
+ "ai.usage.completionTokens": combinedUsage.completionTokens
4404
+ }
4405
+ })
4406
+ );
4407
+ self.usagePromise.resolve(combinedUsage);
4408
+ self.finishReasonPromise.resolve(stepFinishReason);
4409
+ self.textPromise.resolve(fullStepText);
4410
+ self.toolCallsPromise.resolve(stepToolCalls);
4411
+ self.providerMetadataPromise.resolve(stepProviderMetadata);
4412
+ self.toolResultsPromise.resolve(stepToolResults);
4413
+ self.requestPromise.resolve(stepRequest);
4414
+ self.responsePromise.resolve({
4415
+ ...stepResponse,
4416
+ headers: rawResponse == null ? void 0 : rawResponse.headers,
4417
+ messages: responseMessages
4418
+ });
4419
+ self.stepsPromise.resolve(stepResults);
4420
+ self.warningsPromise.resolve(warnings != null ? warnings : []);
4421
+ await (onFinish == null ? void 0 : onFinish({
4422
+ finishReason: stepFinishReason,
4423
+ logprobs: stepLogProbs,
4424
+ usage: combinedUsage,
4425
+ text: fullStepText,
4426
+ toolCalls: stepToolCalls,
4427
+ // The tool results are inferred as a never[] type, because they are
4428
+ // optional and the execute method with an inferred result type is
4429
+ // optional as well. Therefore we need to cast the toolResults to any.
4430
+ // The type exposed to the users will be correctly inferred.
4431
+ toolResults: stepToolResults,
4432
+ request: stepRequest,
4433
+ response: {
4434
+ ...stepResponse,
4435
+ headers: rawResponse == null ? void 0 : rawResponse.headers,
4436
+ messages: responseMessages
4397
4437
  },
4398
- "ai.usage.promptTokens": combinedUsage.promptTokens,
4399
- "ai.usage.completionTokens": combinedUsage.completionTokens
4400
- }
4401
- })
4402
- );
4403
- resolveUsage(combinedUsage);
4404
- resolveFinishReason(stepFinishReason);
4405
- resolveText(fullStepText);
4406
- resolveToolCalls(stepToolCalls);
4407
- resolveProviderMetadata(stepProviderMetadata);
4408
- resolveToolResults(stepToolResults);
4409
- resolveRequest(stepRequest);
4410
- resolveResponse({
4411
- ...stepResponse,
4412
- headers: rawResponse == null ? void 0 : rawResponse.headers,
4413
- messages: responseMessages
4414
- });
4415
- resolveSteps(stepResults);
4416
- resolveWarnings((_b = self.rawWarnings) != null ? _b : []);
4417
- await (onFinish == null ? void 0 : onFinish({
4418
- finishReason: stepFinishReason,
4419
- logprobs: stepLogProbs,
4420
- usage: combinedUsage,
4421
- text: fullStepText,
4422
- toolCalls: stepToolCalls,
4423
- // The tool results are inferred as a never[] type, because they are
4424
- // optional and the execute method with an inferred result type is
4425
- // optional as well. Therefore we need to cast the toolResults to any.
4426
- // The type exposed to the users will be correctly inferred.
4427
- toolResults: stepToolResults,
4428
- request: stepRequest,
4429
- response: {
4430
- ...stepResponse,
4431
- headers: rawResponse == null ? void 0 : rawResponse.headers,
4432
- messages: responseMessages
4433
- },
4434
- warnings,
4435
- experimental_providerMetadata: stepProviderMetadata,
4436
- steps: stepResults,
4437
- responseMessages
4438
- }));
4439
- } catch (error) {
4440
- controller.error(error);
4441
- } finally {
4442
- rootSpan.end();
4443
- }
4444
- }
4445
- })
4446
- )
4438
+ warnings,
4439
+ experimental_providerMetadata: stepProviderMetadata,
4440
+ steps: stepResults
4441
+ }));
4442
+ } catch (error) {
4443
+ controller.error(error);
4444
+ } finally {
4445
+ rootSpan.end();
4446
+ }
4447
+ }
4448
+ })
4449
+ )
4450
+ );
4451
+ }
4452
+ await streamStep({
4453
+ currentStep: 0,
4454
+ responseMessages: [],
4455
+ usage: {
4456
+ promptTokens: 0,
4457
+ completionTokens: 0,
4458
+ totalTokens: 0
4459
+ },
4460
+ previousStepText: "",
4461
+ stepType: "initial",
4462
+ hasLeadingWhitespace: false
4463
+ });
4464
+ }
4465
+ }).catch((error) => {
4466
+ self.stitchableStream.addStream(
4467
+ new ReadableStream({
4468
+ start(controller) {
4469
+ controller.error(error);
4470
+ }
4471
+ })
4447
4472
  );
4448
- }
4449
- addStepStream({
4450
- stream,
4451
- startTimestamp: startTimestampMs,
4452
- doStreamSpan,
4453
- currentStep: 0,
4454
- responseMessages: [],
4455
- usage: void 0,
4456
- stepType: "initial",
4457
- stepRequest: request,
4458
- hasLeadingWhitespace: false
4473
+ self.stitchableStream.close();
4459
4474
  });
4460
4475
  }
4476
+ get warnings() {
4477
+ return this.warningsPromise.value;
4478
+ }
4479
+ get usage() {
4480
+ return this.usagePromise.value;
4481
+ }
4482
+ get finishReason() {
4483
+ return this.finishReasonPromise.value;
4484
+ }
4485
+ get experimental_providerMetadata() {
4486
+ return this.providerMetadataPromise.value;
4487
+ }
4488
+ get text() {
4489
+ return this.textPromise.value;
4490
+ }
4491
+ get toolCalls() {
4492
+ return this.toolCallsPromise.value;
4493
+ }
4494
+ get toolResults() {
4495
+ return this.toolResultsPromise.value;
4496
+ }
4497
+ get request() {
4498
+ return this.requestPromise.value;
4499
+ }
4500
+ get response() {
4501
+ return this.responsePromise.value;
4502
+ }
4503
+ get steps() {
4504
+ return this.stepsPromise.value;
4505
+ }
4461
4506
  /**
4462
4507
  Split out a new stream from the original stream.
4463
4508
  The original stream is replaced to allow for further splitting,
@@ -4467,8 +4512,8 @@ var DefaultStreamTextResult = class {
4467
4512
  However, the LLM results are expected to be small enough to not cause issues.
4468
4513
  */
4469
4514
  teeStream() {
4470
- const [stream1, stream2] = this.originalStream.tee();
4471
- this.originalStream = stream2;
4515
+ const [stream1, stream2] = this.stitchableStream.stream.tee();
4516
+ this.stitchableStream.stream = stream2;
4472
4517
  return stream1;
4473
4518
  }
4474
4519
  get textStream() {
@@ -4508,12 +4553,12 @@ var DefaultStreamTextResult = class {
4508
4553
  const chunkType = chunk.type;
4509
4554
  switch (chunkType) {
4510
4555
  case "text-delta": {
4511
- controller.enqueue((0, import_ui_utils6.formatStreamPart)("text", chunk.textDelta));
4556
+ controller.enqueue((0, import_ui_utils6.formatDataStreamPart)("text", chunk.textDelta));
4512
4557
  break;
4513
4558
  }
4514
4559
  case "tool-call-streaming-start": {
4515
4560
  controller.enqueue(
4516
- (0, import_ui_utils6.formatStreamPart)("tool_call_streaming_start", {
4561
+ (0, import_ui_utils6.formatDataStreamPart)("tool_call_streaming_start", {
4517
4562
  toolCallId: chunk.toolCallId,
4518
4563
  toolName: chunk.toolName
4519
4564
  })
@@ -4522,7 +4567,7 @@ var DefaultStreamTextResult = class {
4522
4567
  }
4523
4568
  case "tool-call-delta": {
4524
4569
  controller.enqueue(
4525
- (0, import_ui_utils6.formatStreamPart)("tool_call_delta", {
4570
+ (0, import_ui_utils6.formatDataStreamPart)("tool_call_delta", {
4526
4571
  toolCallId: chunk.toolCallId,
4527
4572
  argsTextDelta: chunk.argsTextDelta
4528
4573
  })
@@ -4531,7 +4576,7 @@ var DefaultStreamTextResult = class {
4531
4576
  }
4532
4577
  case "tool-call": {
4533
4578
  controller.enqueue(
4534
- (0, import_ui_utils6.formatStreamPart)("tool_call", {
4579
+ (0, import_ui_utils6.formatDataStreamPart)("tool_call", {
4535
4580
  toolCallId: chunk.toolCallId,
4536
4581
  toolName: chunk.toolName,
4537
4582
  args: chunk.args
@@ -4541,7 +4586,7 @@ var DefaultStreamTextResult = class {
4541
4586
  }
4542
4587
  case "tool-result": {
4543
4588
  controller.enqueue(
4544
- (0, import_ui_utils6.formatStreamPart)("tool_result", {
4589
+ (0, import_ui_utils6.formatDataStreamPart)("tool_result", {
4545
4590
  toolCallId: chunk.toolCallId,
4546
4591
  result: chunk.result
4547
4592
  })
@@ -4550,13 +4595,13 @@ var DefaultStreamTextResult = class {
4550
4595
  }
4551
4596
  case "error": {
4552
4597
  controller.enqueue(
4553
- (0, import_ui_utils6.formatStreamPart)("error", getErrorMessage3(chunk.error))
4598
+ (0, import_ui_utils6.formatDataStreamPart)("error", getErrorMessage3(chunk.error))
4554
4599
  );
4555
4600
  break;
4556
4601
  }
4557
4602
  case "step-finish": {
4558
4603
  controller.enqueue(
4559
- (0, import_ui_utils6.formatStreamPart)("finish_step", {
4604
+ (0, import_ui_utils6.formatDataStreamPart)("finish_step", {
4560
4605
  finishReason: chunk.finishReason,
4561
4606
  usage: sendUsage ? {
4562
4607
  promptTokens: chunk.usage.promptTokens,
@@ -4569,7 +4614,7 @@ var DefaultStreamTextResult = class {
4569
4614
  }
4570
4615
  case "finish": {
4571
4616
  controller.enqueue(
4572
- (0, import_ui_utils6.formatStreamPart)("finish_message", {
4617
+ (0, import_ui_utils6.formatDataStreamPart)("finish_message", {
4573
4618
  finishReason: chunk.finishReason,
4574
4619
  usage: sendUsage ? {
4575
4620
  promptTokens: chunk.usage.promptTokens,
@@ -4846,17 +4891,21 @@ function AssistantResponse({ threadId, messageId }, process2) {
4846
4891
  const textEncoder = new TextEncoder();
4847
4892
  const sendMessage = (message) => {
4848
4893
  controller.enqueue(
4849
- textEncoder.encode((0, import_ui_utils8.formatStreamPart)("assistant_message", message))
4894
+ textEncoder.encode(
4895
+ (0, import_ui_utils8.formatAssistantStreamPart)("assistant_message", message)
4896
+ )
4850
4897
  );
4851
4898
  };
4852
4899
  const sendDataMessage = (message) => {
4853
4900
  controller.enqueue(
4854
- textEncoder.encode((0, import_ui_utils8.formatStreamPart)("data_message", message))
4901
+ textEncoder.encode(
4902
+ (0, import_ui_utils8.formatAssistantStreamPart)("data_message", message)
4903
+ )
4855
4904
  );
4856
4905
  };
4857
4906
  const sendError = (errorMessage) => {
4858
4907
  controller.enqueue(
4859
- textEncoder.encode((0, import_ui_utils8.formatStreamPart)("error", errorMessage))
4908
+ textEncoder.encode((0, import_ui_utils8.formatAssistantStreamPart)("error", errorMessage))
4860
4909
  );
4861
4910
  };
4862
4911
  const forwardStream = async (stream2) => {
@@ -4867,7 +4916,7 @@ function AssistantResponse({ threadId, messageId }, process2) {
4867
4916
  case "thread.message.created": {
4868
4917
  controller.enqueue(
4869
4918
  textEncoder.encode(
4870
- (0, import_ui_utils8.formatStreamPart)("assistant_message", {
4919
+ (0, import_ui_utils8.formatAssistantStreamPart)("assistant_message", {
4871
4920
  id: value.data.id,
4872
4921
  role: "assistant",
4873
4922
  content: [{ type: "text", text: { value: "" } }]
@@ -4881,7 +4930,7 @@ function AssistantResponse({ threadId, messageId }, process2) {
4881
4930
  if ((content == null ? void 0 : content.type) === "text" && ((_b = content.text) == null ? void 0 : _b.value) != null) {
4882
4931
  controller.enqueue(
4883
4932
  textEncoder.encode(
4884
- (0, import_ui_utils8.formatStreamPart)("text", content.text.value)
4933
+ (0, import_ui_utils8.formatAssistantStreamPart)("text", content.text.value)
4885
4934
  )
4886
4935
  );
4887
4936
  }
@@ -4898,7 +4947,7 @@ function AssistantResponse({ threadId, messageId }, process2) {
4898
4947
  };
4899
4948
  controller.enqueue(
4900
4949
  textEncoder.encode(
4901
- (0, import_ui_utils8.formatStreamPart)("assistant_control_data", {
4950
+ (0, import_ui_utils8.formatAssistantStreamPart)("assistant_control_data", {
4902
4951
  threadId,
4903
4952
  messageId
4904
4953
  })
@@ -5015,7 +5064,7 @@ var StreamData = class {
5015
5064
  throw new Error("Stream controller is not initialized.");
5016
5065
  }
5017
5066
  this.controller.enqueue(
5018
- this.encoder.encode((0, import_ui_utils9.formatStreamPart)("data", [value]))
5067
+ this.encoder.encode((0, import_ui_utils9.formatDataStreamPart)("data", [value]))
5019
5068
  );
5020
5069
  }
5021
5070
  appendMessageAnnotation(value) {
@@ -5026,7 +5075,7 @@ var StreamData = class {
5026
5075
  throw new Error("Stream controller is not initialized.");
5027
5076
  }
5028
5077
  this.controller.enqueue(
5029
- this.encoder.encode((0, import_ui_utils9.formatStreamPart)("message_annotations", [value]))
5078
+ this.encoder.encode((0, import_ui_utils9.formatDataStreamPart)("message_annotations", [value]))
5030
5079
  );
5031
5080
  }
5032
5081
  };
@@ -5036,7 +5085,7 @@ function createStreamDataTransformer() {
5036
5085
  return new TransformStream({
5037
5086
  transform: async (chunk, controller) => {
5038
5087
  const message = decoder.decode(chunk);
5039
- controller.enqueue(encoder.encode((0, import_ui_utils9.formatStreamPart)("text", message)));
5088
+ controller.enqueue(encoder.encode((0, import_ui_utils9.formatDataStreamPart)("text", message)));
5040
5089
  }
5041
5090
  });
5042
5091
  }
@@ -5170,14 +5219,16 @@ function trimStartOfStream() {
5170
5219
  experimental_createProviderRegistry,
5171
5220
  experimental_customProvider,
5172
5221
  experimental_wrapLanguageModel,
5173
- formatStreamPart,
5222
+ formatAssistantStreamPart,
5223
+ formatDataStreamPart,
5174
5224
  generateId,
5175
5225
  generateObject,
5176
5226
  generateText,
5177
5227
  jsonSchema,
5178
- parseStreamPart,
5179
- processDataProtocolResponse,
5180
- readDataStream,
5228
+ parseAssistantStreamPart,
5229
+ parseDataStreamPart,
5230
+ processDataStream,
5231
+ processTextStream,
5181
5232
  streamObject,
5182
5233
  streamText,
5183
5234
  tool