ai 4.0.0-canary.1 → 4.0.0-canary.11

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.mjs CHANGED
@@ -6,12 +6,12 @@ var __export = (target, all) => {
6
6
 
7
7
  // streams/index.ts
8
8
  import {
9
- formatStreamPart,
9
+ formatStreamPart as formatStreamPart4,
10
10
  parseStreamPart,
11
11
  readDataStream,
12
12
  processDataProtocolResponse
13
13
  } from "@ai-sdk/ui-utils";
14
- import { generateId as generateIdImpl } from "@ai-sdk/provider-utils";
14
+ import { generateId as generateId2 } from "@ai-sdk/provider-utils";
15
15
 
16
16
  // core/index.ts
17
17
  import { jsonSchema } from "@ai-sdk/ui-utils";
@@ -46,24 +46,6 @@ var RetryError = class extends AISDKError {
46
46
  static isInstance(error) {
47
47
  return AISDKError.hasMarker(error, marker);
48
48
  }
49
- /**
50
- * @deprecated use `isInstance` instead
51
- */
52
- static isRetryError(error) {
53
- return error instanceof Error && error.name === name && typeof error.reason === "string" && Array.isArray(error.errors);
54
- }
55
- /**
56
- * @deprecated Do not use this method. It will be removed in the next major version.
57
- */
58
- toJSON() {
59
- return {
60
- name: this.name,
61
- message: this.message,
62
- reason: this.reason,
63
- lastError: this.lastError,
64
- errors: this.errors
65
- };
66
- }
67
49
  };
68
50
  _a = symbol;
69
51
 
@@ -101,7 +83,7 @@ async function _retryWithExponentialBackoff(f, {
101
83
  errors: newErrors
102
84
  });
103
85
  }
104
- if (error instanceof Error && APICallError.isAPICallError(error) && error.isRetryable === true && tryNumber <= maxRetries) {
86
+ if (error instanceof Error && APICallError.isInstance(error) && error.isRetryable === true && tryNumber <= maxRetries) {
105
87
  await delay(delayInMs);
106
88
  return _retryWithExponentialBackoff(
107
89
  f,
@@ -616,25 +598,6 @@ var DownloadError = class extends AISDKError2 {
616
598
  static isInstance(error) {
617
599
  return AISDKError2.hasMarker(error, marker2);
618
600
  }
619
- /**
620
- * @deprecated use `isInstance` instead
621
- */
622
- static isDownloadError(error) {
623
- return error instanceof Error && error.name === name2 && typeof error.url === "string" && (error.statusCode == null || typeof error.statusCode === "number") && (error.statusText == null || typeof error.statusText === "string");
624
- }
625
- /**
626
- * @deprecated Do not use this method. It will be removed in the next major version.
627
- */
628
- toJSON() {
629
- return {
630
- name: this.name,
631
- message: this.message,
632
- url: this.url,
633
- statusCode: this.statusCode,
634
- statusText: this.statusText,
635
- cause: this.cause
636
- };
637
- }
638
601
  };
639
602
  _a2 = symbol2;
640
603
 
@@ -707,24 +670,6 @@ var InvalidDataContentError = class extends AISDKError3 {
707
670
  static isInstance(error) {
708
671
  return AISDKError3.hasMarker(error, marker3);
709
672
  }
710
- /**
711
- * @deprecated use `isInstance` instead
712
- */
713
- static isInvalidDataContentError(error) {
714
- return error instanceof Error && error.name === name3 && error.content != null;
715
- }
716
- /**
717
- * @deprecated Do not use this method. It will be removed in the next major version.
718
- */
719
- toJSON() {
720
- return {
721
- name: this.name,
722
- message: this.message,
723
- stack: this.stack,
724
- cause: this.cause,
725
- content: this.content
726
- };
727
- }
728
673
  };
729
674
  _a3 = symbol3;
730
675
 
@@ -798,23 +743,6 @@ var InvalidMessageRoleError = class extends AISDKError4 {
798
743
  static isInstance(error) {
799
744
  return AISDKError4.hasMarker(error, marker4);
800
745
  }
801
- /**
802
- * @deprecated use `isInstance` instead
803
- */
804
- static isInvalidMessageRoleError(error) {
805
- return error instanceof Error && error.name === name4 && typeof error.role === "string";
806
- }
807
- /**
808
- * @deprecated Do not use this method. It will be removed in the next major version.
809
- */
810
- toJSON() {
811
- return {
812
- name: this.name,
813
- message: this.message,
814
- stack: this.stack,
815
- role: this.role
816
- };
817
- }
818
746
  };
819
747
  _a4 = symbol4;
820
748
 
@@ -1042,21 +970,6 @@ var InvalidArgumentError = class extends AISDKError5 {
1042
970
  static isInstance(error) {
1043
971
  return AISDKError5.hasMarker(error, marker5);
1044
972
  }
1045
- /**
1046
- * @deprecated use `isInstance` instead
1047
- */
1048
- static isInvalidArgumentError(error) {
1049
- return error instanceof Error && error.name === name5 && typeof error.parameter === "string" && typeof error.value === "string";
1050
- }
1051
- toJSON() {
1052
- return {
1053
- name: this.name,
1054
- message: this.message,
1055
- stack: this.stack,
1056
- parameter: this.parameter,
1057
- value: this.value
1058
- };
1059
- }
1060
973
  };
1061
974
  _a5 = symbol5;
1062
975
 
@@ -1489,9 +1402,7 @@ function convertToCoreMessages(messages, options) {
1489
1402
  });
1490
1403
  break;
1491
1404
  }
1492
- case "function":
1493
- case "data":
1494
- case "tool": {
1405
+ case "data": {
1495
1406
  break;
1496
1407
  }
1497
1408
  default: {
@@ -1579,28 +1490,30 @@ function standardizePrompt({
1579
1490
  }
1580
1491
 
1581
1492
  // core/types/usage.ts
1582
- function calculateLanguageModelUsage(usage) {
1493
+ function calculateLanguageModelUsage({
1494
+ promptTokens,
1495
+ completionTokens
1496
+ }) {
1583
1497
  return {
1584
- promptTokens: usage.promptTokens,
1585
- completionTokens: usage.completionTokens,
1586
- totalTokens: usage.promptTokens + usage.completionTokens
1498
+ promptTokens,
1499
+ completionTokens,
1500
+ totalTokens: promptTokens + completionTokens
1587
1501
  };
1588
1502
  }
1589
1503
 
1590
1504
  // core/util/prepare-response-headers.ts
1591
- function prepareResponseHeaders(init, {
1505
+ function prepareResponseHeaders(headers, {
1592
1506
  contentType,
1593
1507
  dataStreamVersion
1594
1508
  }) {
1595
- var _a11;
1596
- const headers = new Headers((_a11 = init == null ? void 0 : init.headers) != null ? _a11 : {});
1597
- if (!headers.has("Content-Type")) {
1598
- headers.set("Content-Type", contentType);
1509
+ const responseHeaders = new Headers(headers != null ? headers : {});
1510
+ if (!responseHeaders.has("Content-Type")) {
1511
+ responseHeaders.set("Content-Type", contentType);
1599
1512
  }
1600
1513
  if (dataStreamVersion !== void 0) {
1601
- headers.set("X-Vercel-AI-Data-Stream", dataStreamVersion);
1514
+ responseHeaders.set("X-Vercel-AI-Data-Stream", dataStreamVersion);
1602
1515
  }
1603
- return headers;
1516
+ return responseHeaders;
1604
1517
  }
1605
1518
 
1606
1519
  // core/generate-object/inject-json-instruction.ts
@@ -1638,23 +1551,6 @@ var NoObjectGeneratedError = class extends AISDKError7 {
1638
1551
  static isInstance(error) {
1639
1552
  return AISDKError7.hasMarker(error, marker7);
1640
1553
  }
1641
- /**
1642
- * @deprecated Use isInstance instead.
1643
- */
1644
- static isNoObjectGeneratedError(error) {
1645
- return error instanceof Error && error.name === name7;
1646
- }
1647
- /**
1648
- * @deprecated Do not use this method. It will be removed in the next major version.
1649
- */
1650
- toJSON() {
1651
- return {
1652
- name: this.name,
1653
- cause: this.cause,
1654
- message: this.message,
1655
- stack: this.stack
1656
- };
1657
- }
1658
1554
  };
1659
1555
  _a7 = symbol7;
1660
1556
 
@@ -2191,9 +2087,6 @@ async function generateObject({
2191
2087
  "ai.response.timestamp": responseData.timestamp.toISOString(),
2192
2088
  "ai.usage.promptTokens": result2.usage.promptTokens,
2193
2089
  "ai.usage.completionTokens": result2.usage.completionTokens,
2194
- // deprecated:
2195
- "ai.finishReason": result2.finishReason,
2196
- "ai.result.object": { output: () => result2.text },
2197
2090
  // standardized gen-ai llm span attributes:
2198
2091
  "gen_ai.response.finish_reasons": [result2.finishReason],
2199
2092
  "gen_ai.response.id": responseData.id,
@@ -2298,9 +2191,6 @@ async function generateObject({
2298
2191
  "ai.response.timestamp": responseData.timestamp.toISOString(),
2299
2192
  "ai.usage.promptTokens": result2.usage.promptTokens,
2300
2193
  "ai.usage.completionTokens": result2.usage.completionTokens,
2301
- // deprecated:
2302
- "ai.finishReason": result2.finishReason,
2303
- "ai.result.object": { output: () => objectText },
2304
2194
  // standardized gen-ai llm span attributes:
2305
2195
  "gen_ai.response.finish_reasons": [result2.finishReason],
2306
2196
  "gen_ai.response.id": responseData.id,
@@ -2354,12 +2244,7 @@ async function generateObject({
2354
2244
  output: () => JSON.stringify(validationResult.value)
2355
2245
  },
2356
2246
  "ai.usage.promptTokens": usage.promptTokens,
2357
- "ai.usage.completionTokens": usage.completionTokens,
2358
- // deprecated:
2359
- "ai.finishReason": finishReason,
2360
- "ai.result.object": {
2361
- output: () => JSON.stringify(validationResult.value)
2362
- }
2247
+ "ai.usage.completionTokens": usage.completionTokens
2363
2248
  }
2364
2249
  })
2365
2250
  );
@@ -2388,22 +2273,18 @@ var DefaultGenerateObjectResult = class {
2388
2273
  this.experimental_providerMetadata = options.providerMetadata;
2389
2274
  this.response = options.response;
2390
2275
  this.request = options.request;
2391
- this.rawResponse = {
2392
- headers: options.response.headers
2393
- };
2394
2276
  this.logprobs = options.logprobs;
2395
2277
  }
2396
2278
  toJsonResponse(init) {
2397
2279
  var _a11;
2398
2280
  return new Response(JSON.stringify(this.object), {
2399
2281
  status: (_a11 = init == null ? void 0 : init.status) != null ? _a11 : 200,
2400
- headers: prepareResponseHeaders(init, {
2282
+ headers: prepareResponseHeaders(init == null ? void 0 : init.headers, {
2401
2283
  contentType: "application/json; charset=utf-8"
2402
2284
  })
2403
2285
  });
2404
2286
  }
2405
2287
  };
2406
- var experimental_generateObject = generateObject;
2407
2288
 
2408
2289
  // core/generate-object/stream-object.ts
2409
2290
  import { createIdGenerator as createIdGenerator2 } from "@ai-sdk/provider-utils";
@@ -2412,21 +2293,6 @@ import {
2412
2293
  parsePartialJson
2413
2294
  } from "@ai-sdk/ui-utils";
2414
2295
 
2415
- // util/create-resolvable-promise.ts
2416
- function createResolvablePromise() {
2417
- let resolve;
2418
- let reject;
2419
- const promise = new Promise((res, rej) => {
2420
- resolve = res;
2421
- reject = rej;
2422
- });
2423
- return {
2424
- promise,
2425
- resolve,
2426
- reject
2427
- };
2428
- }
2429
-
2430
2296
  // util/delayed-promise.ts
2431
2297
  var DelayedPromise = class {
2432
2298
  constructor() {
@@ -2472,23 +2338,23 @@ function now() {
2472
2338
  }
2473
2339
 
2474
2340
  // core/util/prepare-outgoing-http-headers.ts
2475
- function prepareOutgoingHttpHeaders(init, {
2341
+ function prepareOutgoingHttpHeaders(headers, {
2476
2342
  contentType,
2477
2343
  dataStreamVersion
2478
2344
  }) {
2479
- const headers = {};
2480
- if ((init == null ? void 0 : init.headers) != null) {
2481
- for (const [key, value] of Object.entries(init.headers)) {
2482
- headers[key] = value;
2345
+ const outgoingHeaders = {};
2346
+ if (headers != null) {
2347
+ for (const [key, value] of Object.entries(headers)) {
2348
+ outgoingHeaders[key] = value;
2483
2349
  }
2484
2350
  }
2485
- if (headers["Content-Type"] == null) {
2486
- headers["Content-Type"] = contentType;
2351
+ if (outgoingHeaders["Content-Type"] == null) {
2352
+ outgoingHeaders["Content-Type"] = contentType;
2487
2353
  }
2488
2354
  if (dataStreamVersion !== void 0) {
2489
- headers["X-Vercel-AI-Data-Stream"] = dataStreamVersion;
2355
+ outgoingHeaders["X-Vercel-AI-Data-Stream"] = dataStreamVersion;
2490
2356
  }
2491
- return headers;
2357
+ return outgoingHeaders;
2492
2358
  }
2493
2359
 
2494
2360
  // core/util/write-to-server-response.ts
@@ -2518,9 +2384,91 @@ function writeToServerResponse({
2518
2384
  read();
2519
2385
  }
2520
2386
 
2387
+ // util/create-resolvable-promise.ts
2388
+ function createResolvablePromise() {
2389
+ let resolve;
2390
+ let reject;
2391
+ const promise = new Promise((res, rej) => {
2392
+ resolve = res;
2393
+ reject = rej;
2394
+ });
2395
+ return {
2396
+ promise,
2397
+ resolve,
2398
+ reject
2399
+ };
2400
+ }
2401
+
2402
+ // core/util/create-stitchable-stream.ts
2403
+ function createStitchableStream() {
2404
+ let innerStreamReaders = [];
2405
+ let controller = null;
2406
+ let isClosed = false;
2407
+ let waitForNewStream = createResolvablePromise();
2408
+ const processPull = async () => {
2409
+ if (isClosed && innerStreamReaders.length === 0) {
2410
+ controller == null ? void 0 : controller.close();
2411
+ return;
2412
+ }
2413
+ if (innerStreamReaders.length === 0) {
2414
+ waitForNewStream = createResolvablePromise();
2415
+ await waitForNewStream.promise;
2416
+ return processPull();
2417
+ }
2418
+ try {
2419
+ const { value, done } = await innerStreamReaders[0].read();
2420
+ if (done) {
2421
+ innerStreamReaders.shift();
2422
+ if (innerStreamReaders.length > 0) {
2423
+ await processPull();
2424
+ } else if (isClosed) {
2425
+ controller == null ? void 0 : controller.close();
2426
+ }
2427
+ } else {
2428
+ controller == null ? void 0 : controller.enqueue(value);
2429
+ }
2430
+ } catch (error) {
2431
+ controller == null ? void 0 : controller.error(error);
2432
+ innerStreamReaders.shift();
2433
+ if (isClosed && innerStreamReaders.length === 0) {
2434
+ controller == null ? void 0 : controller.close();
2435
+ }
2436
+ }
2437
+ };
2438
+ return {
2439
+ stream: new ReadableStream({
2440
+ start(controllerParam) {
2441
+ controller = controllerParam;
2442
+ },
2443
+ pull: processPull,
2444
+ async cancel() {
2445
+ for (const reader of innerStreamReaders) {
2446
+ await reader.cancel();
2447
+ }
2448
+ innerStreamReaders = [];
2449
+ isClosed = true;
2450
+ }
2451
+ }),
2452
+ addStream: (innerStream) => {
2453
+ if (isClosed) {
2454
+ throw new Error("Cannot add inner stream: outer stream is closed");
2455
+ }
2456
+ innerStreamReaders.push(innerStream.getReader());
2457
+ waitForNewStream.resolve();
2458
+ },
2459
+ close: () => {
2460
+ isClosed = true;
2461
+ waitForNewStream.resolve();
2462
+ if (innerStreamReaders.length === 0) {
2463
+ controller == null ? void 0 : controller.close();
2464
+ }
2465
+ }
2466
+ };
2467
+ }
2468
+
2521
2469
  // core/generate-object/stream-object.ts
2522
2470
  var originalGenerateId2 = createIdGenerator2({ prefix: "aiobj", size: 24 });
2523
- async function streamObject({
2471
+ function streamObject({
2524
2472
  model,
2525
2473
  schema: inputSchema,
2526
2474
  schemaName,
@@ -2554,407 +2502,433 @@ async function streamObject({
2554
2502
  if (outputStrategy.type === "no-schema" && mode === void 0) {
2555
2503
  mode = "json";
2556
2504
  }
2557
- const baseTelemetryAttributes = getBaseTelemetryAttributes({
2505
+ return new DefaultStreamObjectResult({
2558
2506
  model,
2559
2507
  telemetry,
2560
2508
  headers,
2561
- settings: { ...settings, maxRetries }
2562
- });
2563
- const tracer = getTracer(telemetry);
2564
- const retry = retryWithExponentialBackoff({ maxRetries });
2565
- return recordSpan({
2566
- name: "ai.streamObject",
2567
- attributes: selectTelemetryAttributes({
2568
- telemetry,
2569
- attributes: {
2570
- ...assembleOperationName({
2571
- operationId: "ai.streamObject",
2572
- telemetry
2573
- }),
2574
- ...baseTelemetryAttributes,
2575
- // specific settings that only make sense on the outer level:
2576
- "ai.prompt": {
2577
- input: () => JSON.stringify({ system, prompt, messages })
2578
- },
2579
- "ai.schema": outputStrategy.jsonSchema != null ? { input: () => JSON.stringify(outputStrategy.jsonSchema) } : void 0,
2580
- "ai.schema.name": schemaName,
2581
- "ai.schema.description": schemaDescription,
2582
- "ai.settings.output": outputStrategy.type,
2583
- "ai.settings.mode": mode
2584
- }
2585
- }),
2586
- tracer,
2587
- endWhenDone: false,
2588
- fn: async (rootSpan) => {
2589
- if (mode === "auto" || mode == null) {
2590
- mode = model.defaultObjectGenerationMode;
2591
- }
2592
- let callOptions;
2593
- let transformer;
2594
- switch (mode) {
2595
- case "json": {
2596
- const standardizedPrompt = standardizePrompt({
2597
- prompt: {
2598
- system: outputStrategy.jsonSchema == null ? injectJsonInstruction({ prompt: system }) : model.supportsStructuredOutputs ? system : injectJsonInstruction({
2599
- prompt: system,
2600
- schema: outputStrategy.jsonSchema
2601
- }),
2602
- prompt,
2603
- messages
2604
- },
2605
- tools: void 0
2606
- });
2607
- callOptions = {
2608
- mode: {
2609
- type: "object-json",
2610
- schema: outputStrategy.jsonSchema,
2611
- name: schemaName,
2612
- description: schemaDescription
2613
- },
2614
- ...prepareCallSettings(settings),
2615
- inputFormat: standardizedPrompt.type,
2616
- prompt: await convertToLanguageModelPrompt({
2617
- prompt: standardizedPrompt,
2618
- modelSupportsImageUrls: model.supportsImageUrls,
2619
- modelSupportsUrl: model.supportsUrl
2620
- }),
2621
- providerMetadata,
2622
- abortSignal,
2623
- headers
2624
- };
2625
- transformer = {
2626
- transform: (chunk, controller) => {
2627
- switch (chunk.type) {
2628
- case "text-delta":
2629
- controller.enqueue(chunk.textDelta);
2630
- break;
2631
- case "response-metadata":
2632
- case "finish":
2633
- case "error":
2634
- controller.enqueue(chunk);
2635
- break;
2636
- }
2637
- }
2638
- };
2639
- break;
2640
- }
2641
- case "tool": {
2642
- const standardizedPrompt = standardizePrompt({
2643
- prompt: { system, prompt, messages },
2644
- tools: void 0
2645
- });
2646
- callOptions = {
2647
- mode: {
2648
- type: "object-tool",
2649
- tool: {
2650
- type: "function",
2651
- name: schemaName != null ? schemaName : "json",
2652
- description: schemaDescription != null ? schemaDescription : "Respond with a JSON object.",
2653
- parameters: outputStrategy.jsonSchema
2654
- }
2655
- },
2656
- ...prepareCallSettings(settings),
2657
- inputFormat: standardizedPrompt.type,
2658
- prompt: await convertToLanguageModelPrompt({
2659
- prompt: standardizedPrompt,
2660
- modelSupportsImageUrls: model.supportsImageUrls,
2661
- modelSupportsUrl: model.supportsUrl
2662
- }),
2663
- providerMetadata,
2664
- abortSignal,
2665
- headers
2666
- };
2667
- transformer = {
2668
- transform(chunk, controller) {
2669
- switch (chunk.type) {
2670
- case "tool-call-delta":
2671
- controller.enqueue(chunk.argsTextDelta);
2672
- break;
2673
- case "response-metadata":
2674
- case "finish":
2675
- case "error":
2676
- controller.enqueue(chunk);
2677
- break;
2678
- }
2679
- }
2680
- };
2681
- break;
2682
- }
2683
- case void 0: {
2684
- throw new Error(
2685
- "Model does not have a default object generation mode."
2686
- );
2687
- }
2688
- default: {
2689
- const _exhaustiveCheck = mode;
2690
- throw new Error(`Unsupported mode: ${_exhaustiveCheck}`);
2691
- }
2692
- }
2693
- const {
2694
- result: { stream, warnings, rawResponse, request },
2695
- doStreamSpan,
2696
- startTimestampMs
2697
- } = await retry(
2698
- () => recordSpan({
2699
- name: "ai.streamObject.doStream",
2700
- attributes: selectTelemetryAttributes({
2701
- telemetry,
2702
- attributes: {
2703
- ...assembleOperationName({
2704
- operationId: "ai.streamObject.doStream",
2705
- telemetry
2706
- }),
2707
- ...baseTelemetryAttributes,
2708
- "ai.prompt.format": {
2709
- input: () => callOptions.inputFormat
2710
- },
2711
- "ai.prompt.messages": {
2712
- input: () => JSON.stringify(callOptions.prompt)
2713
- },
2714
- "ai.settings.mode": mode,
2715
- // standardized gen-ai llm span attributes:
2716
- "gen_ai.system": model.provider,
2717
- "gen_ai.request.model": model.modelId,
2718
- "gen_ai.request.frequency_penalty": settings.frequencyPenalty,
2719
- "gen_ai.request.max_tokens": settings.maxTokens,
2720
- "gen_ai.request.presence_penalty": settings.presencePenalty,
2721
- "gen_ai.request.temperature": settings.temperature,
2722
- "gen_ai.request.top_k": settings.topK,
2723
- "gen_ai.request.top_p": settings.topP
2724
- }
2725
- }),
2726
- tracer,
2727
- endWhenDone: false,
2728
- fn: async (doStreamSpan2) => ({
2729
- startTimestampMs: now2(),
2730
- doStreamSpan: doStreamSpan2,
2731
- result: await model.doStream(callOptions)
2732
- })
2733
- })
2734
- );
2735
- return new DefaultStreamObjectResult({
2736
- outputStrategy,
2737
- stream: stream.pipeThrough(new TransformStream(transformer)),
2738
- warnings,
2739
- rawResponse,
2740
- request: request != null ? request : {},
2741
- onFinish,
2742
- rootSpan,
2743
- doStreamSpan,
2744
- telemetry,
2745
- startTimestampMs,
2746
- modelId: model.modelId,
2747
- now: now2,
2748
- currentDate,
2749
- generateId: generateId3
2750
- });
2751
- }
2509
+ settings,
2510
+ maxRetries,
2511
+ abortSignal,
2512
+ outputStrategy,
2513
+ system,
2514
+ prompt,
2515
+ messages,
2516
+ schemaName,
2517
+ schemaDescription,
2518
+ inputProviderMetadata: providerMetadata,
2519
+ mode,
2520
+ onFinish,
2521
+ generateId: generateId3,
2522
+ currentDate,
2523
+ now: now2
2752
2524
  });
2753
2525
  }
2754
2526
  var DefaultStreamObjectResult = class {
2755
2527
  constructor({
2756
- stream,
2757
- warnings,
2758
- rawResponse,
2759
- request,
2528
+ model,
2529
+ headers,
2530
+ telemetry,
2531
+ settings,
2532
+ maxRetries,
2533
+ abortSignal,
2760
2534
  outputStrategy,
2535
+ system,
2536
+ prompt,
2537
+ messages,
2538
+ schemaName,
2539
+ schemaDescription,
2540
+ inputProviderMetadata,
2541
+ mode,
2761
2542
  onFinish,
2762
- rootSpan,
2763
- doStreamSpan,
2764
- telemetry,
2765
- startTimestampMs,
2766
- modelId,
2767
- now: now2,
2543
+ generateId: generateId3,
2768
2544
  currentDate,
2769
- generateId: generateId3
2545
+ now: now2
2770
2546
  }) {
2771
- this.warnings = warnings;
2772
- this.rawResponse = rawResponse;
2773
- this.outputStrategy = outputStrategy;
2774
- this.request = Promise.resolve(request);
2775
2547
  this.objectPromise = new DelayedPromise();
2776
- const { resolve: resolveUsage, promise: usagePromise } = createResolvablePromise();
2777
- this.usage = usagePromise;
2778
- const { resolve: resolveResponse, promise: responsePromise } = createResolvablePromise();
2779
- this.response = responsePromise;
2780
- const {
2781
- resolve: resolveProviderMetadata,
2782
- promise: providerMetadataPromise
2783
- } = createResolvablePromise();
2784
- this.experimental_providerMetadata = providerMetadataPromise;
2785
- let usage;
2786
- let finishReason;
2787
- let providerMetadata;
2788
- let object;
2789
- let error;
2790
- let accumulatedText = "";
2791
- let textDelta = "";
2792
- let response = {
2793
- id: generateId3(),
2794
- timestamp: currentDate(),
2795
- modelId
2796
- };
2797
- let latestObjectJson = void 0;
2798
- let latestObject = void 0;
2799
- let isFirstChunk = true;
2800
- let isFirstDelta = true;
2548
+ this.usagePromise = new DelayedPromise();
2549
+ this.providerMetadataPromise = new DelayedPromise();
2550
+ this.warningsPromise = new DelayedPromise();
2551
+ this.requestPromise = new DelayedPromise();
2552
+ this.responsePromise = new DelayedPromise();
2553
+ this.stitchableStream = createStitchableStream();
2554
+ const baseTelemetryAttributes = getBaseTelemetryAttributes({
2555
+ model,
2556
+ telemetry,
2557
+ headers,
2558
+ settings: { ...settings, maxRetries }
2559
+ });
2560
+ const tracer = getTracer(telemetry);
2561
+ const retry = retryWithExponentialBackoff({ maxRetries });
2801
2562
  const self = this;
2802
- this.originalStream = stream.pipeThrough(
2803
- new TransformStream({
2804
- async transform(chunk, controller) {
2805
- var _a11, _b, _c;
2806
- if (isFirstChunk) {
2807
- const msToFirstChunk = now2() - startTimestampMs;
2808
- isFirstChunk = false;
2809
- doStreamSpan.addEvent("ai.stream.firstChunk", {
2810
- "ai.stream.msToFirstChunk": msToFirstChunk
2563
+ recordSpan({
2564
+ name: "ai.streamObject",
2565
+ attributes: selectTelemetryAttributes({
2566
+ telemetry,
2567
+ attributes: {
2568
+ ...assembleOperationName({
2569
+ operationId: "ai.streamObject",
2570
+ telemetry
2571
+ }),
2572
+ ...baseTelemetryAttributes,
2573
+ // specific settings that only make sense on the outer level:
2574
+ "ai.prompt": {
2575
+ input: () => JSON.stringify({ system, prompt, messages })
2576
+ },
2577
+ "ai.schema": outputStrategy.jsonSchema != null ? { input: () => JSON.stringify(outputStrategy.jsonSchema) } : void 0,
2578
+ "ai.schema.name": schemaName,
2579
+ "ai.schema.description": schemaDescription,
2580
+ "ai.settings.output": outputStrategy.type,
2581
+ "ai.settings.mode": mode
2582
+ }
2583
+ }),
2584
+ tracer,
2585
+ endWhenDone: false,
2586
+ fn: async (rootSpan) => {
2587
+ if (mode === "auto" || mode == null) {
2588
+ mode = model.defaultObjectGenerationMode;
2589
+ }
2590
+ let callOptions;
2591
+ let transformer;
2592
+ switch (mode) {
2593
+ case "json": {
2594
+ const standardizedPrompt = standardizePrompt({
2595
+ prompt: {
2596
+ system: outputStrategy.jsonSchema == null ? injectJsonInstruction({ prompt: system }) : model.supportsStructuredOutputs ? system : injectJsonInstruction({
2597
+ prompt: system,
2598
+ schema: outputStrategy.jsonSchema
2599
+ }),
2600
+ prompt,
2601
+ messages
2602
+ },
2603
+ tools: void 0
2811
2604
  });
2812
- doStreamSpan.setAttributes({
2813
- "ai.stream.msToFirstChunk": msToFirstChunk
2605
+ callOptions = {
2606
+ mode: {
2607
+ type: "object-json",
2608
+ schema: outputStrategy.jsonSchema,
2609
+ name: schemaName,
2610
+ description: schemaDescription
2611
+ },
2612
+ ...prepareCallSettings(settings),
2613
+ inputFormat: standardizedPrompt.type,
2614
+ prompt: await convertToLanguageModelPrompt({
2615
+ prompt: standardizedPrompt,
2616
+ modelSupportsImageUrls: model.supportsImageUrls,
2617
+ modelSupportsUrl: model.supportsUrl
2618
+ }),
2619
+ providerMetadata: inputProviderMetadata,
2620
+ abortSignal,
2621
+ headers
2622
+ };
2623
+ transformer = {
2624
+ transform: (chunk, controller) => {
2625
+ switch (chunk.type) {
2626
+ case "text-delta":
2627
+ controller.enqueue(chunk.textDelta);
2628
+ break;
2629
+ case "response-metadata":
2630
+ case "finish":
2631
+ case "error":
2632
+ controller.enqueue(chunk);
2633
+ break;
2634
+ }
2635
+ }
2636
+ };
2637
+ break;
2638
+ }
2639
+ case "tool": {
2640
+ const standardizedPrompt = standardizePrompt({
2641
+ prompt: { system, prompt, messages },
2642
+ tools: void 0
2814
2643
  });
2644
+ callOptions = {
2645
+ mode: {
2646
+ type: "object-tool",
2647
+ tool: {
2648
+ type: "function",
2649
+ name: schemaName != null ? schemaName : "json",
2650
+ description: schemaDescription != null ? schemaDescription : "Respond with a JSON object.",
2651
+ parameters: outputStrategy.jsonSchema
2652
+ }
2653
+ },
2654
+ ...prepareCallSettings(settings),
2655
+ inputFormat: standardizedPrompt.type,
2656
+ prompt: await convertToLanguageModelPrompt({
2657
+ prompt: standardizedPrompt,
2658
+ modelSupportsImageUrls: model.supportsImageUrls,
2659
+ modelSupportsUrl: model.supportsUrl
2660
+ }),
2661
+ providerMetadata: inputProviderMetadata,
2662
+ abortSignal,
2663
+ headers
2664
+ };
2665
+ transformer = {
2666
+ transform(chunk, controller) {
2667
+ switch (chunk.type) {
2668
+ case "tool-call-delta":
2669
+ controller.enqueue(chunk.argsTextDelta);
2670
+ break;
2671
+ case "response-metadata":
2672
+ case "finish":
2673
+ case "error":
2674
+ controller.enqueue(chunk);
2675
+ break;
2676
+ }
2677
+ }
2678
+ };
2679
+ break;
2815
2680
  }
2816
- if (typeof chunk === "string") {
2817
- accumulatedText += chunk;
2818
- textDelta += chunk;
2819
- const { value: currentObjectJson, state: parseState } = parsePartialJson(accumulatedText);
2820
- if (currentObjectJson !== void 0 && !isDeepEqualData(latestObjectJson, currentObjectJson)) {
2821
- const validationResult = outputStrategy.validatePartialResult({
2822
- value: currentObjectJson,
2823
- textDelta,
2824
- latestObject,
2825
- isFirstDelta,
2826
- isFinalDelta: parseState === "successful-parse"
2827
- });
2828
- if (validationResult.success && !isDeepEqualData(latestObject, validationResult.value.partial)) {
2829
- latestObjectJson = currentObjectJson;
2830
- latestObject = validationResult.value.partial;
2831
- controller.enqueue({
2832
- type: "object",
2833
- object: latestObject
2681
+ case void 0: {
2682
+ throw new Error(
2683
+ "Model does not have a default object generation mode."
2684
+ );
2685
+ }
2686
+ default: {
2687
+ const _exhaustiveCheck = mode;
2688
+ throw new Error(`Unsupported mode: ${_exhaustiveCheck}`);
2689
+ }
2690
+ }
2691
+ const {
2692
+ result: { stream, warnings, rawResponse, request },
2693
+ doStreamSpan,
2694
+ startTimestampMs
2695
+ } = await retry(
2696
+ () => recordSpan({
2697
+ name: "ai.streamObject.doStream",
2698
+ attributes: selectTelemetryAttributes({
2699
+ telemetry,
2700
+ attributes: {
2701
+ ...assembleOperationName({
2702
+ operationId: "ai.streamObject.doStream",
2703
+ telemetry
2704
+ }),
2705
+ ...baseTelemetryAttributes,
2706
+ "ai.prompt.format": {
2707
+ input: () => callOptions.inputFormat
2708
+ },
2709
+ "ai.prompt.messages": {
2710
+ input: () => JSON.stringify(callOptions.prompt)
2711
+ },
2712
+ "ai.settings.mode": mode,
2713
+ // standardized gen-ai llm span attributes:
2714
+ "gen_ai.system": model.provider,
2715
+ "gen_ai.request.model": model.modelId,
2716
+ "gen_ai.request.frequency_penalty": settings.frequencyPenalty,
2717
+ "gen_ai.request.max_tokens": settings.maxTokens,
2718
+ "gen_ai.request.presence_penalty": settings.presencePenalty,
2719
+ "gen_ai.request.temperature": settings.temperature,
2720
+ "gen_ai.request.top_k": settings.topK,
2721
+ "gen_ai.request.top_p": settings.topP
2722
+ }
2723
+ }),
2724
+ tracer,
2725
+ endWhenDone: false,
2726
+ fn: async (doStreamSpan2) => ({
2727
+ startTimestampMs: now2(),
2728
+ doStreamSpan: doStreamSpan2,
2729
+ result: await model.doStream(callOptions)
2730
+ })
2731
+ })
2732
+ );
2733
+ self.requestPromise.resolve(request != null ? request : {});
2734
+ let usage;
2735
+ let finishReason;
2736
+ let providerMetadata;
2737
+ let object;
2738
+ let error;
2739
+ let accumulatedText = "";
2740
+ let textDelta = "";
2741
+ let response = {
2742
+ id: generateId3(),
2743
+ timestamp: currentDate(),
2744
+ modelId: model.modelId
2745
+ };
2746
+ let latestObjectJson = void 0;
2747
+ let latestObject = void 0;
2748
+ let isFirstChunk = true;
2749
+ let isFirstDelta = true;
2750
+ const transformedStream = stream.pipeThrough(new TransformStream(transformer)).pipeThrough(
2751
+ new TransformStream({
2752
+ async transform(chunk, controller) {
2753
+ var _a11, _b, _c;
2754
+ if (isFirstChunk) {
2755
+ const msToFirstChunk = now2() - startTimestampMs;
2756
+ isFirstChunk = false;
2757
+ doStreamSpan.addEvent("ai.stream.firstChunk", {
2758
+ "ai.stream.msToFirstChunk": msToFirstChunk
2834
2759
  });
2835
- controller.enqueue({
2836
- type: "text-delta",
2837
- textDelta: validationResult.value.textDelta
2760
+ doStreamSpan.setAttributes({
2761
+ "ai.stream.msToFirstChunk": msToFirstChunk
2838
2762
  });
2839
- textDelta = "";
2840
- isFirstDelta = false;
2841
2763
  }
2842
- }
2843
- return;
2844
- }
2845
- switch (chunk.type) {
2846
- case "response-metadata": {
2847
- response = {
2848
- id: (_a11 = chunk.id) != null ? _a11 : response.id,
2849
- timestamp: (_b = chunk.timestamp) != null ? _b : response.timestamp,
2850
- modelId: (_c = chunk.modelId) != null ? _c : response.modelId
2851
- };
2852
- break;
2853
- }
2854
- case "finish": {
2855
- if (textDelta !== "") {
2856
- controller.enqueue({ type: "text-delta", textDelta });
2857
- }
2858
- finishReason = chunk.finishReason;
2859
- usage = calculateLanguageModelUsage(chunk.usage);
2860
- providerMetadata = chunk.providerMetadata;
2861
- controller.enqueue({ ...chunk, usage, response });
2862
- resolveUsage(usage);
2863
- resolveProviderMetadata(providerMetadata);
2864
- resolveResponse({
2865
- ...response,
2866
- headers: rawResponse == null ? void 0 : rawResponse.headers
2867
- });
2868
- const validationResult = outputStrategy.validateFinalResult(latestObjectJson);
2869
- if (validationResult.success) {
2870
- object = validationResult.value;
2871
- self.objectPromise.resolve(object);
2872
- } else {
2873
- error = validationResult.error;
2874
- self.objectPromise.reject(error);
2764
+ if (typeof chunk === "string") {
2765
+ accumulatedText += chunk;
2766
+ textDelta += chunk;
2767
+ const { value: currentObjectJson, state: parseState } = parsePartialJson(accumulatedText);
2768
+ if (currentObjectJson !== void 0 && !isDeepEqualData(latestObjectJson, currentObjectJson)) {
2769
+ const validationResult = outputStrategy.validatePartialResult({
2770
+ value: currentObjectJson,
2771
+ textDelta,
2772
+ latestObject,
2773
+ isFirstDelta,
2774
+ isFinalDelta: parseState === "successful-parse"
2775
+ });
2776
+ if (validationResult.success && !isDeepEqualData(
2777
+ latestObject,
2778
+ validationResult.value.partial
2779
+ )) {
2780
+ latestObjectJson = currentObjectJson;
2781
+ latestObject = validationResult.value.partial;
2782
+ controller.enqueue({
2783
+ type: "object",
2784
+ object: latestObject
2785
+ });
2786
+ controller.enqueue({
2787
+ type: "text-delta",
2788
+ textDelta: validationResult.value.textDelta
2789
+ });
2790
+ textDelta = "";
2791
+ isFirstDelta = false;
2792
+ }
2793
+ }
2794
+ return;
2875
2795
  }
2876
- break;
2877
- }
2878
- default: {
2879
- controller.enqueue(chunk);
2880
- break;
2881
- }
2882
- }
2883
- },
2884
- // invoke onFinish callback and resolve toolResults promise when the stream is about to close:
2885
- async flush(controller) {
2886
- try {
2887
- const finalUsage = usage != null ? usage : {
2888
- promptTokens: NaN,
2889
- completionTokens: NaN,
2890
- totalTokens: NaN
2891
- };
2892
- doStreamSpan.setAttributes(
2893
- selectTelemetryAttributes({
2894
- telemetry,
2895
- attributes: {
2896
- "ai.response.finishReason": finishReason,
2897
- "ai.response.object": {
2898
- output: () => JSON.stringify(object)
2899
- },
2900
- "ai.response.id": response.id,
2901
- "ai.response.model": response.modelId,
2902
- "ai.response.timestamp": response.timestamp.toISOString(),
2903
- "ai.usage.promptTokens": finalUsage.promptTokens,
2904
- "ai.usage.completionTokens": finalUsage.completionTokens,
2905
- // deprecated
2906
- "ai.finishReason": finishReason,
2907
- "ai.result.object": { output: () => JSON.stringify(object) },
2908
- // standardized gen-ai llm span attributes:
2909
- "gen_ai.response.finish_reasons": [finishReason],
2910
- "gen_ai.response.id": response.id,
2911
- "gen_ai.response.model": response.modelId,
2912
- "gen_ai.usage.input_tokens": finalUsage.promptTokens,
2913
- "gen_ai.usage.output_tokens": finalUsage.completionTokens
2796
+ switch (chunk.type) {
2797
+ case "response-metadata": {
2798
+ response = {
2799
+ id: (_a11 = chunk.id) != null ? _a11 : response.id,
2800
+ timestamp: (_b = chunk.timestamp) != null ? _b : response.timestamp,
2801
+ modelId: (_c = chunk.modelId) != null ? _c : response.modelId
2802
+ };
2803
+ break;
2914
2804
  }
2915
- })
2916
- );
2917
- doStreamSpan.end();
2918
- rootSpan.setAttributes(
2919
- selectTelemetryAttributes({
2920
- telemetry,
2921
- attributes: {
2922
- "ai.usage.promptTokens": finalUsage.promptTokens,
2923
- "ai.usage.completionTokens": finalUsage.completionTokens,
2924
- "ai.response.object": {
2925
- output: () => JSON.stringify(object)
2926
- },
2927
- // deprecated
2928
- "ai.result.object": { output: () => JSON.stringify(object) }
2805
+ case "finish": {
2806
+ if (textDelta !== "") {
2807
+ controller.enqueue({ type: "text-delta", textDelta });
2808
+ }
2809
+ finishReason = chunk.finishReason;
2810
+ usage = calculateLanguageModelUsage(chunk.usage);
2811
+ providerMetadata = chunk.providerMetadata;
2812
+ controller.enqueue({ ...chunk, usage, response });
2813
+ self.usagePromise.resolve(usage);
2814
+ self.providerMetadataPromise.resolve(providerMetadata);
2815
+ self.responsePromise.resolve({
2816
+ ...response,
2817
+ headers: rawResponse == null ? void 0 : rawResponse.headers
2818
+ });
2819
+ const validationResult = outputStrategy.validateFinalResult(latestObjectJson);
2820
+ if (validationResult.success) {
2821
+ object = validationResult.value;
2822
+ self.objectPromise.resolve(object);
2823
+ } else {
2824
+ error = validationResult.error;
2825
+ self.objectPromise.reject(error);
2826
+ }
2827
+ break;
2929
2828
  }
2930
- })
2931
- );
2932
- await (onFinish == null ? void 0 : onFinish({
2933
- usage: finalUsage,
2934
- object,
2935
- error,
2936
- rawResponse,
2937
- response: {
2938
- ...response,
2939
- headers: rawResponse == null ? void 0 : rawResponse.headers
2940
- },
2941
- warnings,
2942
- experimental_providerMetadata: providerMetadata
2943
- }));
2944
- } catch (error2) {
2945
- controller.error(error2);
2946
- } finally {
2947
- rootSpan.end();
2829
+ default: {
2830
+ controller.enqueue(chunk);
2831
+ break;
2832
+ }
2833
+ }
2834
+ },
2835
+ // invoke onFinish callback and resolve toolResults promise when the stream is about to close:
2836
+ async flush(controller) {
2837
+ try {
2838
+ const finalUsage = usage != null ? usage : {
2839
+ promptTokens: NaN,
2840
+ completionTokens: NaN,
2841
+ totalTokens: NaN
2842
+ };
2843
+ doStreamSpan.setAttributes(
2844
+ selectTelemetryAttributes({
2845
+ telemetry,
2846
+ attributes: {
2847
+ "ai.response.finishReason": finishReason,
2848
+ "ai.response.object": {
2849
+ output: () => JSON.stringify(object)
2850
+ },
2851
+ "ai.response.id": response.id,
2852
+ "ai.response.model": response.modelId,
2853
+ "ai.response.timestamp": response.timestamp.toISOString(),
2854
+ "ai.usage.promptTokens": finalUsage.promptTokens,
2855
+ "ai.usage.completionTokens": finalUsage.completionTokens,
2856
+ // standardized gen-ai llm span attributes:
2857
+ "gen_ai.response.finish_reasons": [finishReason],
2858
+ "gen_ai.response.id": response.id,
2859
+ "gen_ai.response.model": response.modelId,
2860
+ "gen_ai.usage.input_tokens": finalUsage.promptTokens,
2861
+ "gen_ai.usage.output_tokens": finalUsage.completionTokens
2862
+ }
2863
+ })
2864
+ );
2865
+ doStreamSpan.end();
2866
+ rootSpan.setAttributes(
2867
+ selectTelemetryAttributes({
2868
+ telemetry,
2869
+ attributes: {
2870
+ "ai.usage.promptTokens": finalUsage.promptTokens,
2871
+ "ai.usage.completionTokens": finalUsage.completionTokens,
2872
+ "ai.response.object": {
2873
+ output: () => JSON.stringify(object)
2874
+ }
2875
+ }
2876
+ })
2877
+ );
2878
+ await (onFinish == null ? void 0 : onFinish({
2879
+ usage: finalUsage,
2880
+ object,
2881
+ error,
2882
+ response: {
2883
+ ...response,
2884
+ headers: rawResponse == null ? void 0 : rawResponse.headers
2885
+ },
2886
+ warnings,
2887
+ experimental_providerMetadata: providerMetadata
2888
+ }));
2889
+ } catch (error2) {
2890
+ controller.error(error2);
2891
+ } finally {
2892
+ rootSpan.end();
2893
+ }
2894
+ }
2895
+ })
2896
+ );
2897
+ self.stitchableStream.addStream(transformedStream);
2898
+ }
2899
+ }).catch((error) => {
2900
+ self.stitchableStream.addStream(
2901
+ new ReadableStream({
2902
+ start(controller) {
2903
+ controller.error(error);
2948
2904
  }
2949
- }
2950
- })
2951
- );
2905
+ })
2906
+ );
2907
+ }).finally(() => {
2908
+ self.stitchableStream.close();
2909
+ });
2910
+ this.outputStrategy = outputStrategy;
2952
2911
  }
2953
2912
  get object() {
2954
2913
  return this.objectPromise.value;
2955
2914
  }
2915
+ get usage() {
2916
+ return this.usagePromise.value;
2917
+ }
2918
+ get experimental_providerMetadata() {
2919
+ return this.providerMetadataPromise.value;
2920
+ }
2921
+ get warnings() {
2922
+ return this.warningsPromise.value;
2923
+ }
2924
+ get request() {
2925
+ return this.requestPromise.value;
2926
+ }
2927
+ get response() {
2928
+ return this.responsePromise.value;
2929
+ }
2956
2930
  get partialObjectStream() {
2957
- return createAsyncIterableStream(this.originalStream, {
2931
+ return createAsyncIterableStream(this.stitchableStream.stream, {
2958
2932
  transform(chunk, controller) {
2959
2933
  switch (chunk.type) {
2960
2934
  case "object":
@@ -2975,10 +2949,12 @@ var DefaultStreamObjectResult = class {
2975
2949
  });
2976
2950
  }
2977
2951
  get elementStream() {
2978
- return this.outputStrategy.createElementStream(this.originalStream);
2952
+ return this.outputStrategy.createElementStream(
2953
+ this.stitchableStream.stream
2954
+ );
2979
2955
  }
2980
2956
  get textStream() {
2981
- return createAsyncIterableStream(this.originalStream, {
2957
+ return createAsyncIterableStream(this.stitchableStream.stream, {
2982
2958
  transform(chunk, controller) {
2983
2959
  switch (chunk.type) {
2984
2960
  case "text-delta":
@@ -2999,7 +2975,7 @@ var DefaultStreamObjectResult = class {
2999
2975
  });
3000
2976
  }
3001
2977
  get fullStream() {
3002
- return createAsyncIterableStream(this.originalStream, {
2978
+ return createAsyncIterableStream(this.stitchableStream.stream, {
3003
2979
  transform(chunk, controller) {
3004
2980
  controller.enqueue(chunk);
3005
2981
  }
@@ -3010,7 +2986,7 @@ var DefaultStreamObjectResult = class {
3010
2986
  response,
3011
2987
  status: init == null ? void 0 : init.status,
3012
2988
  statusText: init == null ? void 0 : init.statusText,
3013
- headers: prepareOutgoingHttpHeaders(init, {
2989
+ headers: prepareOutgoingHttpHeaders(init == null ? void 0 : init.headers, {
3014
2990
  contentType: "text/plain; charset=utf-8"
3015
2991
  }),
3016
2992
  stream: this.textStream.pipeThrough(new TextEncoderStream())
@@ -3020,13 +2996,12 @@ var DefaultStreamObjectResult = class {
3020
2996
  var _a11;
3021
2997
  return new Response(this.textStream.pipeThrough(new TextEncoderStream()), {
3022
2998
  status: (_a11 = init == null ? void 0 : init.status) != null ? _a11 : 200,
3023
- headers: prepareResponseHeaders(init, {
2999
+ headers: prepareResponseHeaders(init == null ? void 0 : init.headers, {
3024
3000
  contentType: "text/plain; charset=utf-8"
3025
3001
  })
3026
3002
  });
3027
3003
  }
3028
3004
  };
3029
- var experimental_streamObject = streamObject;
3030
3005
 
3031
3006
  // core/generate-text/generate-text.ts
3032
3007
  import { createIdGenerator as createIdGenerator3 } from "@ai-sdk/provider-utils";
@@ -3069,25 +3044,6 @@ var InvalidToolArgumentsError = class extends AISDKError8 {
3069
3044
  static isInstance(error) {
3070
3045
  return AISDKError8.hasMarker(error, marker8);
3071
3046
  }
3072
- /**
3073
- * @deprecated use `isInstance` instead
3074
- */
3075
- static isInvalidToolArgumentsError(error) {
3076
- return error instanceof Error && error.name === name8 && typeof error.toolName === "string" && typeof error.toolArgs === "string";
3077
- }
3078
- /**
3079
- * @deprecated Do not use this method. It will be removed in the next major version.
3080
- */
3081
- toJSON() {
3082
- return {
3083
- name: this.name,
3084
- message: this.message,
3085
- cause: this.cause,
3086
- stack: this.stack,
3087
- toolName: this.toolName,
3088
- toolArgs: this.toolArgs
3089
- };
3090
- }
3091
3047
  };
3092
3048
  _a8 = symbol8;
3093
3049
 
@@ -3111,24 +3067,6 @@ var NoSuchToolError = class extends AISDKError9 {
3111
3067
  static isInstance(error) {
3112
3068
  return AISDKError9.hasMarker(error, marker9);
3113
3069
  }
3114
- /**
3115
- * @deprecated use `isInstance` instead
3116
- */
3117
- static isNoSuchToolError(error) {
3118
- return error instanceof Error && error.name === name9 && "toolName" in error && error.toolName != void 0 && typeof error.name === "string";
3119
- }
3120
- /**
3121
- * @deprecated Do not use this method. It will be removed in the next major version.
3122
- */
3123
- toJSON() {
3124
- return {
3125
- name: this.name,
3126
- message: this.message,
3127
- stack: this.stack,
3128
- toolName: this.toolName,
3129
- availableTools: this.availableTools
3130
- };
3131
- }
3132
3070
  };
3133
3071
  _a9 = symbol9;
3134
3072
 
@@ -3281,11 +3219,8 @@ async function generateText({
3281
3219
  maxRetries,
3282
3220
  abortSignal,
3283
3221
  headers,
3284
- maxAutomaticRoundtrips = 0,
3285
- maxToolRoundtrips = maxAutomaticRoundtrips,
3286
- maxSteps = maxToolRoundtrips != null ? maxToolRoundtrips + 1 : 1,
3287
- experimental_continuationSteps,
3288
- experimental_continueSteps: continueSteps = experimental_continuationSteps != null ? experimental_continuationSteps : false,
3222
+ maxSteps = 1,
3223
+ experimental_continueSteps: continueSteps = false,
3289
3224
  experimental_telemetry: telemetry,
3290
3225
  experimental_providerMetadata: providerMetadata,
3291
3226
  experimental_activeTools: activeTools,
@@ -3437,14 +3372,6 @@ async function generateText({
3437
3372
  "ai.response.timestamp": responseData.timestamp.toISOString(),
3438
3373
  "ai.usage.promptTokens": result.usage.promptTokens,
3439
3374
  "ai.usage.completionTokens": result.usage.completionTokens,
3440
- // deprecated:
3441
- "ai.finishReason": result.finishReason,
3442
- "ai.result.text": {
3443
- output: () => result.text
3444
- },
3445
- "ai.result.toolCalls": {
3446
- output: () => JSON.stringify(result.toolCalls)
3447
- },
3448
3375
  // standardized gen-ai llm span attributes:
3449
3376
  "gen_ai.response.finish_reasons": [result.finishReason],
3450
3377
  "gen_ai.response.id": responseData.id,
@@ -3547,15 +3474,7 @@ async function generateText({
3547
3474
  output: () => JSON.stringify(currentModelResponse.toolCalls)
3548
3475
  },
3549
3476
  "ai.usage.promptTokens": currentModelResponse.usage.promptTokens,
3550
- "ai.usage.completionTokens": currentModelResponse.usage.completionTokens,
3551
- // deprecated:
3552
- "ai.finishReason": currentModelResponse.finishReason,
3553
- "ai.result.text": {
3554
- output: () => currentModelResponse.text
3555
- },
3556
- "ai.result.toolCalls": {
3557
- output: () => JSON.stringify(currentModelResponse.toolCalls)
3558
- }
3477
+ "ai.usage.completionTokens": currentModelResponse.usage.completionTokens
3559
3478
  }
3560
3479
  })
3561
3480
  );
@@ -3573,7 +3492,6 @@ async function generateText({
3573
3492
  messages: responseMessages
3574
3493
  },
3575
3494
  logprobs: currentModelResponse.logprobs,
3576
- responseMessages,
3577
3495
  steps,
3578
3496
  providerMetadata: currentModelResponse.providerMetadata
3579
3497
  });
@@ -3650,121 +3568,54 @@ var DefaultGenerateTextResult = class {
3650
3568
  this.warnings = options.warnings;
3651
3569
  this.request = options.request;
3652
3570
  this.response = options.response;
3653
- this.responseMessages = options.responseMessages;
3654
- this.roundtrips = options.steps;
3655
3571
  this.steps = options.steps;
3656
3572
  this.experimental_providerMetadata = options.providerMetadata;
3657
- this.rawResponse = {
3658
- headers: options.response.headers
3659
- };
3660
3573
  this.logprobs = options.logprobs;
3661
3574
  }
3662
3575
  };
3663
- var experimental_generateText = generateText;
3664
3576
 
3665
3577
  // core/generate-text/stream-text.ts
3666
3578
  import { createIdGenerator as createIdGenerator4 } from "@ai-sdk/provider-utils";
3579
+ import { formatStreamPart } from "@ai-sdk/ui-utils";
3667
3580
 
3668
- // core/util/create-stitchable-stream.ts
3669
- function createStitchableStream() {
3670
- let innerStreamReaders = [];
3671
- let controller = null;
3672
- let isClosed = false;
3673
- const processPull = async () => {
3674
- if (isClosed && innerStreamReaders.length === 0) {
3675
- controller == null ? void 0 : controller.close();
3676
- return;
3677
- }
3678
- if (innerStreamReaders.length === 0) {
3679
- return;
3680
- }
3581
+ // core/util/merge-streams.ts
3582
+ function mergeStreams(stream1, stream2) {
3583
+ const reader1 = stream1.getReader();
3584
+ const reader2 = stream2.getReader();
3585
+ let lastRead1 = void 0;
3586
+ let lastRead2 = void 0;
3587
+ let stream1Done = false;
3588
+ let stream2Done = false;
3589
+ async function readStream1(controller) {
3681
3590
  try {
3682
- const { value, done } = await innerStreamReaders[0].read();
3683
- if (done) {
3684
- innerStreamReaders.shift();
3685
- if (innerStreamReaders.length > 0) {
3686
- await processPull();
3687
- } else if (isClosed) {
3688
- controller == null ? void 0 : controller.close();
3689
- }
3591
+ if (lastRead1 == null) {
3592
+ lastRead1 = reader1.read();
3593
+ }
3594
+ const result = await lastRead1;
3595
+ lastRead1 = void 0;
3596
+ if (!result.done) {
3597
+ controller.enqueue(result.value);
3690
3598
  } else {
3691
- controller == null ? void 0 : controller.enqueue(value);
3599
+ controller.close();
3692
3600
  }
3693
3601
  } catch (error) {
3694
- controller == null ? void 0 : controller.error(error);
3695
- innerStreamReaders.shift();
3696
- if (isClosed && innerStreamReaders.length === 0) {
3697
- controller == null ? void 0 : controller.close();
3602
+ controller.error(error);
3603
+ }
3604
+ }
3605
+ async function readStream2(controller) {
3606
+ try {
3607
+ if (lastRead2 == null) {
3608
+ lastRead2 = reader2.read();
3609
+ }
3610
+ const result = await lastRead2;
3611
+ lastRead2 = void 0;
3612
+ if (!result.done) {
3613
+ controller.enqueue(result.value);
3614
+ } else {
3615
+ controller.close();
3698
3616
  }
3699
- }
3700
- };
3701
- return {
3702
- stream: new ReadableStream({
3703
- start(controllerParam) {
3704
- controller = controllerParam;
3705
- },
3706
- pull: processPull,
3707
- async cancel() {
3708
- for (const reader of innerStreamReaders) {
3709
- await reader.cancel();
3710
- }
3711
- innerStreamReaders = [];
3712
- isClosed = true;
3713
- }
3714
- }),
3715
- addStream: (innerStream) => {
3716
- if (isClosed) {
3717
- throw new Error("Cannot add inner stream: outer stream is closed");
3718
- }
3719
- innerStreamReaders.push(innerStream.getReader());
3720
- },
3721
- close: () => {
3722
- isClosed = true;
3723
- if (innerStreamReaders.length === 0) {
3724
- controller == null ? void 0 : controller.close();
3725
- }
3726
- }
3727
- };
3728
- }
3729
-
3730
- // core/util/merge-streams.ts
3731
- function mergeStreams(stream1, stream2) {
3732
- const reader1 = stream1.getReader();
3733
- const reader2 = stream2.getReader();
3734
- let lastRead1 = void 0;
3735
- let lastRead2 = void 0;
3736
- let stream1Done = false;
3737
- let stream2Done = false;
3738
- async function readStream1(controller) {
3739
- try {
3740
- if (lastRead1 == null) {
3741
- lastRead1 = reader1.read();
3742
- }
3743
- const result = await lastRead1;
3744
- lastRead1 = void 0;
3745
- if (!result.done) {
3746
- controller.enqueue(result.value);
3747
- } else {
3748
- controller.close();
3749
- }
3750
- } catch (error) {
3751
- controller.error(error);
3752
- }
3753
- }
3754
- async function readStream2(controller) {
3755
- try {
3756
- if (lastRead2 == null) {
3757
- lastRead2 = reader2.read();
3758
- }
3759
- const result = await lastRead2;
3760
- lastRead2 = void 0;
3761
- if (!result.done) {
3762
- controller.enqueue(result.value);
3763
- } else {
3764
- controller.close();
3765
- }
3766
- } catch (error) {
3767
- controller.error(error);
3617
+ } catch (error) {
3618
+ controller.error(error);
3768
3619
  }
3769
3620
  }
3770
3621
  return new ReadableStream({
@@ -4010,7 +3861,7 @@ function runToolsTransformation({
4010
3861
 
4011
3862
  // core/generate-text/stream-text.ts
4012
3863
  var originalGenerateId4 = createIdGenerator4({ prefix: "aitxt", size: 24 });
4013
- async function streamText({
3864
+ function streamText({
4014
3865
  model,
4015
3866
  tools,
4016
3867
  toolChoice,
@@ -4020,8 +3871,7 @@ async function streamText({
4020
3871
  maxRetries,
4021
3872
  abortSignal,
4022
3873
  headers,
4023
- maxToolRoundtrips = 0,
4024
- maxSteps = maxToolRoundtrips != null ? maxToolRoundtrips + 1 : 1,
3874
+ maxSteps = 1,
4025
3875
  experimental_continueSteps: continueSteps = false,
4026
3876
  experimental_telemetry: telemetry,
4027
3877
  experimental_providerMetadata: providerMetadata,
@@ -4044,598 +3894,606 @@ async function streamText({
4044
3894
  message: "maxSteps must be at least 1"
4045
3895
  });
4046
3896
  }
4047
- const baseTelemetryAttributes = getBaseTelemetryAttributes({
3897
+ return new DefaultStreamTextResult({
4048
3898
  model,
4049
3899
  telemetry,
4050
3900
  headers,
4051
- settings: { ...settings, maxRetries }
4052
- });
4053
- const tracer = getTracer(telemetry);
4054
- const initialPrompt = standardizePrompt({
4055
- prompt: { system, prompt, messages },
4056
- tools
4057
- });
4058
- return recordSpan({
4059
- name: "ai.streamText",
4060
- attributes: selectTelemetryAttributes({
4061
- telemetry,
4062
- attributes: {
4063
- ...assembleOperationName({ operationId: "ai.streamText", telemetry }),
4064
- ...baseTelemetryAttributes,
4065
- // specific settings that only make sense on the outer level:
4066
- "ai.prompt": {
4067
- input: () => JSON.stringify({ system, prompt, messages })
4068
- },
4069
- "ai.settings.maxSteps": maxSteps
4070
- }
4071
- }),
4072
- tracer,
4073
- endWhenDone: false,
4074
- fn: async (rootSpan) => {
4075
- const retry = retryWithExponentialBackoff({ maxRetries });
4076
- const startStep = async ({
4077
- responseMessages
4078
- }) => {
4079
- const promptFormat = responseMessages.length === 0 ? initialPrompt.type : "messages";
4080
- const promptMessages = await convertToLanguageModelPrompt({
4081
- prompt: {
4082
- type: promptFormat,
4083
- system: initialPrompt.system,
4084
- messages: [...initialPrompt.messages, ...responseMessages]
4085
- },
4086
- modelSupportsImageUrls: model.supportsImageUrls,
4087
- modelSupportsUrl: model.supportsUrl
4088
- });
4089
- const mode = {
4090
- type: "regular",
4091
- ...prepareToolsAndToolChoice({ tools, toolChoice, activeTools })
4092
- };
4093
- const {
4094
- result: { stream: stream2, warnings: warnings2, rawResponse: rawResponse2, request: request2 },
4095
- doStreamSpan: doStreamSpan2,
4096
- startTimestampMs: startTimestampMs2
4097
- } = await retry(
4098
- () => recordSpan({
4099
- name: "ai.streamText.doStream",
4100
- attributes: selectTelemetryAttributes({
4101
- telemetry,
4102
- attributes: {
4103
- ...assembleOperationName({
4104
- operationId: "ai.streamText.doStream",
4105
- telemetry
4106
- }),
4107
- ...baseTelemetryAttributes,
4108
- "ai.prompt.format": {
4109
- input: () => promptFormat
4110
- },
4111
- "ai.prompt.messages": {
4112
- input: () => JSON.stringify(promptMessages)
4113
- },
4114
- "ai.prompt.tools": {
4115
- // convert the language model level tools:
4116
- input: () => {
4117
- var _a11;
4118
- return (_a11 = mode.tools) == null ? void 0 : _a11.map((tool2) => JSON.stringify(tool2));
4119
- }
4120
- },
4121
- "ai.prompt.toolChoice": {
4122
- input: () => mode.toolChoice != null ? JSON.stringify(mode.toolChoice) : void 0
4123
- },
4124
- // standardized gen-ai llm span attributes:
4125
- "gen_ai.system": model.provider,
4126
- "gen_ai.request.model": model.modelId,
4127
- "gen_ai.request.frequency_penalty": settings.frequencyPenalty,
4128
- "gen_ai.request.max_tokens": settings.maxTokens,
4129
- "gen_ai.request.presence_penalty": settings.presencePenalty,
4130
- "gen_ai.request.stop_sequences": settings.stopSequences,
4131
- "gen_ai.request.temperature": settings.temperature,
4132
- "gen_ai.request.top_k": settings.topK,
4133
- "gen_ai.request.top_p": settings.topP
4134
- }
4135
- }),
4136
- tracer,
4137
- endWhenDone: false,
4138
- fn: async (doStreamSpan3) => ({
4139
- startTimestampMs: now2(),
4140
- // get before the call
4141
- doStreamSpan: doStreamSpan3,
4142
- result: await model.doStream({
4143
- mode,
4144
- ...prepareCallSettings(settings),
4145
- inputFormat: promptFormat,
4146
- prompt: promptMessages,
4147
- providerMetadata,
4148
- abortSignal,
4149
- headers
4150
- })
4151
- })
4152
- })
4153
- );
4154
- return {
4155
- result: {
4156
- stream: runToolsTransformation({
4157
- tools,
4158
- generatorStream: stream2,
4159
- toolCallStreaming,
4160
- tracer,
4161
- telemetry,
4162
- abortSignal
4163
- }),
4164
- warnings: warnings2,
4165
- request: request2 != null ? request2 : {},
4166
- rawResponse: rawResponse2
4167
- },
4168
- doStreamSpan: doStreamSpan2,
4169
- startTimestampMs: startTimestampMs2
4170
- };
4171
- };
4172
- const {
4173
- result: { stream, warnings, rawResponse, request },
4174
- doStreamSpan,
4175
- startTimestampMs
4176
- } = await startStep({ responseMessages: [] });
4177
- return new DefaultStreamTextResult({
4178
- stream,
4179
- warnings,
4180
- rawResponse,
4181
- request,
4182
- onChunk,
4183
- onFinish,
4184
- onStepFinish,
4185
- rootSpan,
4186
- doStreamSpan,
4187
- telemetry,
4188
- startTimestampMs,
4189
- maxSteps,
4190
- continueSteps,
4191
- startStep,
4192
- modelId: model.modelId,
4193
- now: now2,
4194
- currentDate,
4195
- generateId: generateId3,
4196
- tools
4197
- });
4198
- }
3901
+ settings,
3902
+ maxRetries,
3903
+ abortSignal,
3904
+ system,
3905
+ prompt,
3906
+ messages,
3907
+ tools,
3908
+ toolChoice,
3909
+ toolCallStreaming,
3910
+ activeTools,
3911
+ maxSteps,
3912
+ continueSteps,
3913
+ providerMetadata,
3914
+ onChunk,
3915
+ onFinish,
3916
+ onStepFinish,
3917
+ now: now2,
3918
+ currentDate,
3919
+ generateId: generateId3
4199
3920
  });
4200
3921
  }
4201
3922
  var DefaultStreamTextResult = class {
4202
3923
  constructor({
4203
- stream,
4204
- warnings,
4205
- rawResponse,
4206
- request,
4207
- onChunk,
4208
- onFinish,
4209
- onStepFinish,
4210
- rootSpan,
4211
- doStreamSpan,
3924
+ model,
4212
3925
  telemetry,
4213
- startTimestampMs,
3926
+ headers,
3927
+ settings,
3928
+ maxRetries,
3929
+ abortSignal,
3930
+ system,
3931
+ prompt,
3932
+ messages,
3933
+ tools,
3934
+ toolChoice,
3935
+ toolCallStreaming,
3936
+ activeTools,
4214
3937
  maxSteps,
4215
3938
  continueSteps,
4216
- startStep,
4217
- modelId,
3939
+ providerMetadata,
3940
+ onChunk,
3941
+ onFinish,
3942
+ onStepFinish,
4218
3943
  now: now2,
4219
3944
  currentDate,
4220
- generateId: generateId3,
4221
- tools
3945
+ generateId: generateId3
4222
3946
  }) {
4223
- this.warnings = warnings;
4224
- this.rawResponse = rawResponse;
4225
- const { resolve: resolveUsage, promise: usagePromise } = createResolvablePromise();
4226
- this.usage = usagePromise;
4227
- const { resolve: resolveFinishReason, promise: finishReasonPromise } = createResolvablePromise();
4228
- this.finishReason = finishReasonPromise;
4229
- const { resolve: resolveText, promise: textPromise } = createResolvablePromise();
4230
- this.text = textPromise;
4231
- const { resolve: resolveToolCalls, promise: toolCallsPromise } = createResolvablePromise();
4232
- this.toolCalls = toolCallsPromise;
4233
- const { resolve: resolveToolResults, promise: toolResultsPromise } = createResolvablePromise();
4234
- this.toolResults = toolResultsPromise;
4235
- const { resolve: resolveSteps, promise: stepsPromise } = createResolvablePromise();
4236
- this.steps = stepsPromise;
4237
- const {
4238
- resolve: resolveProviderMetadata,
4239
- promise: providerMetadataPromise
4240
- } = createResolvablePromise();
4241
- this.experimental_providerMetadata = providerMetadataPromise;
4242
- const { resolve: resolveRequest, promise: requestPromise } = createResolvablePromise();
4243
- this.request = requestPromise;
4244
- const { resolve: resolveResponse, promise: responsePromise } = createResolvablePromise();
4245
- this.response = responsePromise;
4246
- const {
4247
- resolve: resolveResponseMessages,
4248
- promise: responseMessagesPromise
4249
- } = createResolvablePromise();
4250
- this.responseMessages = responseMessagesPromise;
4251
- const {
4252
- stream: stitchableStream,
4253
- addStream,
4254
- close: closeStitchableStream
4255
- } = createStitchableStream();
4256
- this.originalStream = stitchableStream;
4257
- const stepResults = [];
3947
+ this.warningsPromise = new DelayedPromise();
3948
+ this.usagePromise = new DelayedPromise();
3949
+ this.finishReasonPromise = new DelayedPromise();
3950
+ this.providerMetadataPromise = new DelayedPromise();
3951
+ this.textPromise = new DelayedPromise();
3952
+ this.toolCallsPromise = new DelayedPromise();
3953
+ this.toolResultsPromise = new DelayedPromise();
3954
+ this.requestPromise = new DelayedPromise();
3955
+ this.responsePromise = new DelayedPromise();
3956
+ this.stepsPromise = new DelayedPromise();
3957
+ this.stitchableStream = createStitchableStream();
3958
+ const tracer = getTracer(telemetry);
3959
+ const baseTelemetryAttributes = getBaseTelemetryAttributes({
3960
+ model,
3961
+ telemetry,
3962
+ headers,
3963
+ settings: { ...settings, maxRetries }
3964
+ });
3965
+ const initialPrompt = standardizePrompt({
3966
+ prompt: { system, prompt, messages },
3967
+ tools
3968
+ });
4258
3969
  const self = this;
4259
- function addStepStream({
4260
- stream: stream2,
4261
- startTimestamp,
4262
- doStreamSpan: doStreamSpan2,
4263
- currentStep,
4264
- responseMessages,
4265
- usage = {
4266
- promptTokens: 0,
4267
- completionTokens: 0,
4268
- totalTokens: 0
4269
- },
4270
- stepType,
4271
- previousStepText = "",
4272
- stepRequest,
4273
- hasLeadingWhitespace
4274
- }) {
4275
- const stepToolCalls = [];
4276
- const stepToolResults = [];
4277
- let stepFinishReason = "unknown";
4278
- let stepUsage = {
4279
- promptTokens: 0,
4280
- completionTokens: 0,
4281
- totalTokens: 0
4282
- };
4283
- let stepProviderMetadata;
4284
- let stepFirstChunk = true;
4285
- let stepText = "";
4286
- let fullStepText = stepType === "continue" ? previousStepText : "";
4287
- let stepLogProbs;
4288
- let stepResponse = {
4289
- id: generateId3(),
4290
- timestamp: currentDate(),
4291
- modelId
4292
- };
4293
- let chunkBuffer = "";
4294
- let chunkTextPublished = false;
4295
- let inWhitespacePrefix = true;
4296
- let hasWhitespaceSuffix = false;
4297
- async function publishTextChunk({
4298
- controller,
4299
- chunk
4300
- }) {
4301
- controller.enqueue(chunk);
4302
- stepText += chunk.textDelta;
4303
- fullStepText += chunk.textDelta;
4304
- chunkTextPublished = true;
4305
- hasWhitespaceSuffix = chunk.textDelta.trimEnd() !== chunk.textDelta;
4306
- await (onChunk == null ? void 0 : onChunk({ chunk }));
4307
- }
4308
- addStream(
4309
- stream2.pipeThrough(
4310
- new TransformStream({
4311
- async transform(chunk, controller) {
4312
- var _a11, _b, _c;
4313
- if (stepFirstChunk) {
4314
- const msToFirstChunk = now2() - startTimestamp;
4315
- stepFirstChunk = false;
4316
- doStreamSpan2.addEvent("ai.stream.firstChunk", {
4317
- "ai.response.msToFirstChunk": msToFirstChunk,
4318
- // deprecated:
4319
- "ai.stream.msToFirstChunk": msToFirstChunk
4320
- });
4321
- doStreamSpan2.setAttributes({
4322
- "ai.response.msToFirstChunk": msToFirstChunk,
4323
- // deprecated:
4324
- "ai.stream.msToFirstChunk": msToFirstChunk
4325
- });
4326
- }
4327
- if (chunk.type === "text-delta" && chunk.textDelta.length === 0) {
4328
- return;
4329
- }
4330
- const chunkType = chunk.type;
4331
- switch (chunkType) {
4332
- case "text-delta": {
4333
- if (continueSteps) {
4334
- const trimmedChunkText = inWhitespacePrefix && hasLeadingWhitespace ? chunk.textDelta.trimStart() : chunk.textDelta;
4335
- if (trimmedChunkText.length === 0) {
4336
- break;
4337
- }
4338
- inWhitespacePrefix = false;
4339
- chunkBuffer += trimmedChunkText;
4340
- const split = splitOnLastWhitespace(chunkBuffer);
4341
- if (split != null) {
4342
- chunkBuffer = split.suffix;
4343
- await publishTextChunk({
4344
- controller,
4345
- chunk: {
4346
- type: "text-delta",
4347
- textDelta: split.prefix + split.whitespace
4348
- }
4349
- });
3970
+ const stepResults = [];
3971
+ recordSpan({
3972
+ name: "ai.streamText",
3973
+ attributes: selectTelemetryAttributes({
3974
+ telemetry,
3975
+ attributes: {
3976
+ ...assembleOperationName({ operationId: "ai.streamText", telemetry }),
3977
+ ...baseTelemetryAttributes,
3978
+ // specific settings that only make sense on the outer level:
3979
+ "ai.prompt": {
3980
+ input: () => JSON.stringify({ system, prompt, messages })
3981
+ },
3982
+ "ai.settings.maxSteps": maxSteps
3983
+ }
3984
+ }),
3985
+ tracer,
3986
+ endWhenDone: false,
3987
+ fn: async (rootSpan) => {
3988
+ const retry = retryWithExponentialBackoff({ maxRetries });
3989
+ const startStep = async ({
3990
+ responseMessages
3991
+ }) => {
3992
+ const promptFormat = responseMessages.length === 0 ? initialPrompt.type : "messages";
3993
+ const promptMessages = await convertToLanguageModelPrompt({
3994
+ prompt: {
3995
+ type: promptFormat,
3996
+ system: initialPrompt.system,
3997
+ messages: [...initialPrompt.messages, ...responseMessages]
3998
+ },
3999
+ modelSupportsImageUrls: model.supportsImageUrls,
4000
+ modelSupportsUrl: model.supportsUrl
4001
+ });
4002
+ const mode = {
4003
+ type: "regular",
4004
+ ...prepareToolsAndToolChoice({ tools, toolChoice, activeTools })
4005
+ };
4006
+ const {
4007
+ result: { stream: stream2, warnings: warnings2, rawResponse: rawResponse2, request: request2 },
4008
+ doStreamSpan: doStreamSpan2,
4009
+ startTimestampMs: startTimestampMs2
4010
+ } = await retry(
4011
+ () => recordSpan({
4012
+ name: "ai.streamText.doStream",
4013
+ attributes: selectTelemetryAttributes({
4014
+ telemetry,
4015
+ attributes: {
4016
+ ...assembleOperationName({
4017
+ operationId: "ai.streamText.doStream",
4018
+ telemetry
4019
+ }),
4020
+ ...baseTelemetryAttributes,
4021
+ "ai.prompt.format": {
4022
+ input: () => promptFormat
4023
+ },
4024
+ "ai.prompt.messages": {
4025
+ input: () => JSON.stringify(promptMessages)
4026
+ },
4027
+ "ai.prompt.tools": {
4028
+ // convert the language model level tools:
4029
+ input: () => {
4030
+ var _a11;
4031
+ return (_a11 = mode.tools) == null ? void 0 : _a11.map((tool2) => JSON.stringify(tool2));
4350
4032
  }
4351
- } else {
4352
- await publishTextChunk({ controller, chunk });
4353
- }
4354
- break;
4355
- }
4356
- case "tool-call": {
4357
- controller.enqueue(chunk);
4358
- stepToolCalls.push(chunk);
4359
- await (onChunk == null ? void 0 : onChunk({ chunk }));
4360
- break;
4361
- }
4362
- case "tool-result": {
4363
- controller.enqueue(chunk);
4364
- stepToolResults.push(chunk);
4365
- await (onChunk == null ? void 0 : onChunk({ chunk }));
4366
- break;
4367
- }
4368
- case "response-metadata": {
4369
- stepResponse = {
4370
- id: (_a11 = chunk.id) != null ? _a11 : stepResponse.id,
4371
- timestamp: (_b = chunk.timestamp) != null ? _b : stepResponse.timestamp,
4372
- modelId: (_c = chunk.modelId) != null ? _c : stepResponse.modelId
4373
- };
4374
- break;
4375
- }
4376
- case "finish": {
4377
- stepUsage = chunk.usage;
4378
- stepFinishReason = chunk.finishReason;
4379
- stepProviderMetadata = chunk.experimental_providerMetadata;
4380
- stepLogProbs = chunk.logprobs;
4381
- const msToFinish = now2() - startTimestamp;
4382
- doStreamSpan2.addEvent("ai.stream.finish");
4383
- doStreamSpan2.setAttributes({
4384
- "ai.response.msToFinish": msToFinish,
4385
- "ai.response.avgCompletionTokensPerSecond": 1e3 * stepUsage.completionTokens / msToFinish
4386
- });
4387
- break;
4388
- }
4389
- case "tool-call-streaming-start":
4390
- case "tool-call-delta": {
4391
- controller.enqueue(chunk);
4392
- await (onChunk == null ? void 0 : onChunk({ chunk }));
4393
- break;
4394
- }
4395
- case "error": {
4396
- controller.enqueue(chunk);
4397
- stepFinishReason = "error";
4398
- break;
4399
- }
4400
- default: {
4401
- const exhaustiveCheck = chunkType;
4402
- throw new Error(`Unknown chunk type: ${exhaustiveCheck}`);
4033
+ },
4034
+ "ai.prompt.toolChoice": {
4035
+ input: () => mode.toolChoice != null ? JSON.stringify(mode.toolChoice) : void 0
4036
+ },
4037
+ // standardized gen-ai llm span attributes:
4038
+ "gen_ai.system": model.provider,
4039
+ "gen_ai.request.model": model.modelId,
4040
+ "gen_ai.request.frequency_penalty": settings.frequencyPenalty,
4041
+ "gen_ai.request.max_tokens": settings.maxTokens,
4042
+ "gen_ai.request.presence_penalty": settings.presencePenalty,
4043
+ "gen_ai.request.stop_sequences": settings.stopSequences,
4044
+ "gen_ai.request.temperature": settings.temperature,
4045
+ "gen_ai.request.top_k": settings.topK,
4046
+ "gen_ai.request.top_p": settings.topP
4403
4047
  }
4404
- }
4048
+ }),
4049
+ tracer,
4050
+ endWhenDone: false,
4051
+ fn: async (doStreamSpan3) => ({
4052
+ startTimestampMs: now2(),
4053
+ // get before the call
4054
+ doStreamSpan: doStreamSpan3,
4055
+ result: await model.doStream({
4056
+ mode,
4057
+ ...prepareCallSettings(settings),
4058
+ inputFormat: promptFormat,
4059
+ prompt: promptMessages,
4060
+ providerMetadata,
4061
+ abortSignal,
4062
+ headers
4063
+ })
4064
+ })
4065
+ })
4066
+ );
4067
+ return {
4068
+ result: {
4069
+ stream: runToolsTransformation({
4070
+ tools,
4071
+ generatorStream: stream2,
4072
+ toolCallStreaming,
4073
+ tracer,
4074
+ telemetry,
4075
+ abortSignal
4076
+ }),
4077
+ warnings: warnings2,
4078
+ request: request2 != null ? request2 : {},
4079
+ rawResponse: rawResponse2
4405
4080
  },
4406
- // invoke onFinish callback and resolve toolResults promise when the stream is about to close:
4407
- async flush(controller) {
4408
- var _a11;
4409
- const stepToolCallsJson = stepToolCalls.length > 0 ? JSON.stringify(stepToolCalls) : void 0;
4410
- let nextStepType = "done";
4411
- if (currentStep + 1 < maxSteps) {
4412
- if (continueSteps && stepFinishReason === "length" && // only use continue when there are no tool calls:
4413
- stepToolCalls.length === 0) {
4414
- nextStepType = "continue";
4415
- } else if (
4416
- // there are tool calls:
4417
- stepToolCalls.length > 0 && // all current tool calls have results:
4418
- stepToolResults.length === stepToolCalls.length
4419
- ) {
4420
- nextStepType = "tool-result";
4421
- }
4422
- }
4423
- if (continueSteps && chunkBuffer.length > 0 && (nextStepType !== "continue" || // when the next step is a regular step, publish the buffer
4424
- stepType === "continue" && !chunkTextPublished)) {
4425
- await publishTextChunk({
4426
- controller,
4427
- chunk: {
4428
- type: "text-delta",
4429
- textDelta: chunkBuffer
4430
- }
4431
- });
4432
- chunkBuffer = "";
4433
- }
4434
- try {
4435
- doStreamSpan2.setAttributes(
4436
- selectTelemetryAttributes({
4437
- telemetry,
4438
- attributes: {
4439
- "ai.response.finishReason": stepFinishReason,
4440
- "ai.response.text": { output: () => stepText },
4441
- "ai.response.toolCalls": {
4442
- output: () => stepToolCallsJson
4443
- },
4444
- "ai.response.id": stepResponse.id,
4445
- "ai.response.model": stepResponse.modelId,
4446
- "ai.response.timestamp": stepResponse.timestamp.toISOString(),
4447
- "ai.usage.promptTokens": stepUsage.promptTokens,
4448
- "ai.usage.completionTokens": stepUsage.completionTokens,
4449
- // deprecated
4450
- "ai.finishReason": stepFinishReason,
4451
- "ai.result.text": { output: () => stepText },
4452
- "ai.result.toolCalls": {
4453
- output: () => stepToolCallsJson
4454
- },
4455
- // standardized gen-ai llm span attributes:
4456
- "gen_ai.response.finish_reasons": [stepFinishReason],
4457
- "gen_ai.response.id": stepResponse.id,
4458
- "gen_ai.response.model": stepResponse.modelId,
4459
- "gen_ai.usage.input_tokens": stepUsage.promptTokens,
4460
- "gen_ai.usage.output_tokens": stepUsage.completionTokens
4081
+ doStreamSpan: doStreamSpan2,
4082
+ startTimestampMs: startTimestampMs2
4083
+ };
4084
+ };
4085
+ const {
4086
+ result: { stream, warnings, rawResponse, request },
4087
+ doStreamSpan,
4088
+ startTimestampMs
4089
+ } = await startStep({ responseMessages: [] });
4090
+ function addStepStream({
4091
+ stream: stream2,
4092
+ startTimestamp,
4093
+ doStreamSpan: doStreamSpan2,
4094
+ currentStep,
4095
+ responseMessages,
4096
+ usage = {
4097
+ promptTokens: 0,
4098
+ completionTokens: 0,
4099
+ totalTokens: 0
4100
+ },
4101
+ stepType,
4102
+ previousStepText = "",
4103
+ stepRequest,
4104
+ hasLeadingWhitespace,
4105
+ warnings: warnings2,
4106
+ response
4107
+ }) {
4108
+ const stepToolCalls = [];
4109
+ const stepToolResults = [];
4110
+ let stepFinishReason = "unknown";
4111
+ let stepUsage = {
4112
+ promptTokens: 0,
4113
+ completionTokens: 0,
4114
+ totalTokens: 0
4115
+ };
4116
+ let stepProviderMetadata;
4117
+ let stepFirstChunk = true;
4118
+ let stepText = "";
4119
+ let fullStepText = stepType === "continue" ? previousStepText : "";
4120
+ let stepLogProbs;
4121
+ let stepResponse = {
4122
+ id: generateId3(),
4123
+ timestamp: currentDate(),
4124
+ modelId: model.modelId
4125
+ };
4126
+ let chunkBuffer = "";
4127
+ let chunkTextPublished = false;
4128
+ let inWhitespacePrefix = true;
4129
+ let hasWhitespaceSuffix = false;
4130
+ async function publishTextChunk({
4131
+ controller,
4132
+ chunk
4133
+ }) {
4134
+ controller.enqueue(chunk);
4135
+ stepText += chunk.textDelta;
4136
+ fullStepText += chunk.textDelta;
4137
+ chunkTextPublished = true;
4138
+ hasWhitespaceSuffix = chunk.textDelta.trimEnd() !== chunk.textDelta;
4139
+ await (onChunk == null ? void 0 : onChunk({ chunk }));
4140
+ }
4141
+ self.stitchableStream.addStream(
4142
+ stream2.pipeThrough(
4143
+ new TransformStream({
4144
+ async transform(chunk, controller) {
4145
+ var _a11, _b, _c;
4146
+ if (stepFirstChunk) {
4147
+ const msToFirstChunk = now2() - startTimestamp;
4148
+ stepFirstChunk = false;
4149
+ doStreamSpan2.addEvent("ai.stream.firstChunk", {
4150
+ "ai.response.msToFirstChunk": msToFirstChunk
4151
+ });
4152
+ doStreamSpan2.setAttributes({
4153
+ "ai.response.msToFirstChunk": msToFirstChunk
4154
+ });
4155
+ }
4156
+ if (chunk.type === "text-delta" && chunk.textDelta.length === 0) {
4157
+ return;
4158
+ }
4159
+ const chunkType = chunk.type;
4160
+ switch (chunkType) {
4161
+ case "text-delta": {
4162
+ if (continueSteps) {
4163
+ const trimmedChunkText = inWhitespacePrefix && hasLeadingWhitespace ? chunk.textDelta.trimStart() : chunk.textDelta;
4164
+ if (trimmedChunkText.length === 0) {
4165
+ break;
4166
+ }
4167
+ inWhitespacePrefix = false;
4168
+ chunkBuffer += trimmedChunkText;
4169
+ const split = splitOnLastWhitespace(chunkBuffer);
4170
+ if (split != null) {
4171
+ chunkBuffer = split.suffix;
4172
+ await publishTextChunk({
4173
+ controller,
4174
+ chunk: {
4175
+ type: "text-delta",
4176
+ textDelta: split.prefix + split.whitespace
4177
+ }
4178
+ });
4179
+ }
4180
+ } else {
4181
+ await publishTextChunk({ controller, chunk });
4182
+ }
4183
+ break;
4461
4184
  }
4462
- })
4463
- );
4464
- } catch (error) {
4465
- } finally {
4466
- doStreamSpan2.end();
4467
- }
4468
- controller.enqueue({
4469
- type: "step-finish",
4470
- finishReason: stepFinishReason,
4471
- usage: stepUsage,
4472
- experimental_providerMetadata: stepProviderMetadata,
4473
- logprobs: stepLogProbs,
4474
- response: {
4475
- ...stepResponse
4185
+ case "tool-call": {
4186
+ controller.enqueue(chunk);
4187
+ stepToolCalls.push(chunk);
4188
+ await (onChunk == null ? void 0 : onChunk({ chunk }));
4189
+ break;
4190
+ }
4191
+ case "tool-result": {
4192
+ controller.enqueue(chunk);
4193
+ stepToolResults.push(chunk);
4194
+ await (onChunk == null ? void 0 : onChunk({ chunk }));
4195
+ break;
4196
+ }
4197
+ case "response-metadata": {
4198
+ stepResponse = {
4199
+ id: (_a11 = chunk.id) != null ? _a11 : stepResponse.id,
4200
+ timestamp: (_b = chunk.timestamp) != null ? _b : stepResponse.timestamp,
4201
+ modelId: (_c = chunk.modelId) != null ? _c : stepResponse.modelId
4202
+ };
4203
+ break;
4204
+ }
4205
+ case "finish": {
4206
+ stepUsage = chunk.usage;
4207
+ stepFinishReason = chunk.finishReason;
4208
+ stepProviderMetadata = chunk.experimental_providerMetadata;
4209
+ stepLogProbs = chunk.logprobs;
4210
+ const msToFinish = now2() - startTimestamp;
4211
+ doStreamSpan2.addEvent("ai.stream.finish");
4212
+ doStreamSpan2.setAttributes({
4213
+ "ai.response.msToFinish": msToFinish,
4214
+ "ai.response.avgCompletionTokensPerSecond": 1e3 * stepUsage.completionTokens / msToFinish
4215
+ });
4216
+ break;
4217
+ }
4218
+ case "tool-call-streaming-start":
4219
+ case "tool-call-delta": {
4220
+ controller.enqueue(chunk);
4221
+ await (onChunk == null ? void 0 : onChunk({ chunk }));
4222
+ break;
4223
+ }
4224
+ case "error": {
4225
+ controller.enqueue(chunk);
4226
+ stepFinishReason = "error";
4227
+ break;
4228
+ }
4229
+ default: {
4230
+ const exhaustiveCheck = chunkType;
4231
+ throw new Error(`Unknown chunk type: ${exhaustiveCheck}`);
4232
+ }
4233
+ }
4476
4234
  },
4477
- isContinued: nextStepType === "continue"
4478
- });
4479
- if (stepType === "continue") {
4480
- const lastMessage = responseMessages[responseMessages.length - 1];
4481
- if (typeof lastMessage.content === "string") {
4482
- lastMessage.content += stepText;
4483
- } else {
4484
- lastMessage.content.push({
4485
- text: stepText,
4486
- type: "text"
4235
+ // invoke onFinish callback and resolve toolResults promise when the stream is about to close:
4236
+ async flush(controller) {
4237
+ const stepToolCallsJson = stepToolCalls.length > 0 ? JSON.stringify(stepToolCalls) : void 0;
4238
+ let nextStepType = "done";
4239
+ if (currentStep + 1 < maxSteps) {
4240
+ if (continueSteps && stepFinishReason === "length" && // only use continue when there are no tool calls:
4241
+ stepToolCalls.length === 0) {
4242
+ nextStepType = "continue";
4243
+ } else if (
4244
+ // there are tool calls:
4245
+ stepToolCalls.length > 0 && // all current tool calls have results:
4246
+ stepToolResults.length === stepToolCalls.length
4247
+ ) {
4248
+ nextStepType = "tool-result";
4249
+ }
4250
+ }
4251
+ if (continueSteps && chunkBuffer.length > 0 && (nextStepType !== "continue" || // when the next step is a regular step, publish the buffer
4252
+ stepType === "continue" && !chunkTextPublished)) {
4253
+ await publishTextChunk({
4254
+ controller,
4255
+ chunk: {
4256
+ type: "text-delta",
4257
+ textDelta: chunkBuffer
4258
+ }
4259
+ });
4260
+ chunkBuffer = "";
4261
+ }
4262
+ try {
4263
+ doStreamSpan2.setAttributes(
4264
+ selectTelemetryAttributes({
4265
+ telemetry,
4266
+ attributes: {
4267
+ "ai.response.finishReason": stepFinishReason,
4268
+ "ai.response.text": { output: () => stepText },
4269
+ "ai.response.toolCalls": {
4270
+ output: () => stepToolCallsJson
4271
+ },
4272
+ "ai.response.id": stepResponse.id,
4273
+ "ai.response.model": stepResponse.modelId,
4274
+ "ai.response.timestamp": stepResponse.timestamp.toISOString(),
4275
+ "ai.usage.promptTokens": stepUsage.promptTokens,
4276
+ "ai.usage.completionTokens": stepUsage.completionTokens,
4277
+ // standardized gen-ai llm span attributes:
4278
+ "gen_ai.response.finish_reasons": [stepFinishReason],
4279
+ "gen_ai.response.id": stepResponse.id,
4280
+ "gen_ai.response.model": stepResponse.modelId,
4281
+ "gen_ai.usage.input_tokens": stepUsage.promptTokens,
4282
+ "gen_ai.usage.output_tokens": stepUsage.completionTokens
4283
+ }
4284
+ })
4285
+ );
4286
+ } catch (error) {
4287
+ } finally {
4288
+ doStreamSpan2.end();
4289
+ }
4290
+ controller.enqueue({
4291
+ type: "step-finish",
4292
+ finishReason: stepFinishReason,
4293
+ usage: stepUsage,
4294
+ experimental_providerMetadata: stepProviderMetadata,
4295
+ logprobs: stepLogProbs,
4296
+ response: {
4297
+ ...stepResponse
4298
+ },
4299
+ isContinued: nextStepType === "continue"
4487
4300
  });
4488
- }
4489
- } else {
4490
- responseMessages.push(
4491
- ...toResponseMessages({
4301
+ if (stepType === "continue") {
4302
+ const lastMessage = responseMessages[responseMessages.length - 1];
4303
+ if (typeof lastMessage.content === "string") {
4304
+ lastMessage.content += stepText;
4305
+ } else {
4306
+ lastMessage.content.push({
4307
+ text: stepText,
4308
+ type: "text"
4309
+ });
4310
+ }
4311
+ } else {
4312
+ responseMessages.push(
4313
+ ...toResponseMessages({
4314
+ text: stepText,
4315
+ tools: tools != null ? tools : {},
4316
+ toolCalls: stepToolCalls,
4317
+ toolResults: stepToolResults
4318
+ })
4319
+ );
4320
+ }
4321
+ const currentStepResult = {
4322
+ stepType,
4492
4323
  text: stepText,
4493
- tools: tools != null ? tools : {},
4494
4324
  toolCalls: stepToolCalls,
4495
- toolResults: stepToolResults
4496
- })
4497
- );
4498
- }
4499
- const currentStepResult = {
4500
- stepType,
4501
- text: stepText,
4502
- toolCalls: stepToolCalls,
4503
- toolResults: stepToolResults,
4504
- finishReason: stepFinishReason,
4505
- usage: stepUsage,
4506
- warnings: self.warnings,
4507
- logprobs: stepLogProbs,
4508
- request: stepRequest,
4509
- rawResponse: self.rawResponse,
4510
- response: {
4511
- ...stepResponse,
4512
- headers: (_a11 = self.rawResponse) == null ? void 0 : _a11.headers,
4513
- // deep clone msgs to avoid mutating past messages in multi-step:
4514
- messages: JSON.parse(JSON.stringify(responseMessages))
4515
- },
4516
- experimental_providerMetadata: stepProviderMetadata,
4517
- isContinued: nextStepType === "continue"
4518
- };
4519
- stepResults.push(currentStepResult);
4520
- await (onStepFinish == null ? void 0 : onStepFinish(currentStepResult));
4521
- const combinedUsage = {
4522
- promptTokens: usage.promptTokens + stepUsage.promptTokens,
4523
- completionTokens: usage.completionTokens + stepUsage.completionTokens,
4524
- totalTokens: usage.totalTokens + stepUsage.totalTokens
4525
- };
4526
- if (nextStepType !== "done") {
4527
- const {
4528
- result,
4529
- doStreamSpan: doStreamSpan3,
4530
- startTimestampMs: startTimestamp2
4531
- } = await startStep({ responseMessages });
4532
- self.warnings = result.warnings;
4533
- self.rawResponse = result.rawResponse;
4534
- addStepStream({
4535
- stream: result.stream,
4536
- startTimestamp: startTimestamp2,
4537
- doStreamSpan: doStreamSpan3,
4538
- currentStep: currentStep + 1,
4539
- responseMessages,
4540
- usage: combinedUsage,
4541
- stepType: nextStepType,
4542
- previousStepText: fullStepText,
4543
- stepRequest: result.request,
4544
- hasLeadingWhitespace: hasWhitespaceSuffix
4545
- });
4546
- return;
4547
- }
4548
- try {
4549
- controller.enqueue({
4550
- type: "finish",
4551
- finishReason: stepFinishReason,
4552
- usage: combinedUsage,
4553
- experimental_providerMetadata: stepProviderMetadata,
4554
- logprobs: stepLogProbs,
4555
- response: {
4556
- ...stepResponse
4325
+ toolResults: stepToolResults,
4326
+ finishReason: stepFinishReason,
4327
+ usage: stepUsage,
4328
+ warnings: warnings2,
4329
+ logprobs: stepLogProbs,
4330
+ request: stepRequest,
4331
+ response: {
4332
+ ...stepResponse,
4333
+ headers: response == null ? void 0 : response.headers,
4334
+ // deep clone msgs to avoid mutating past messages in multi-step:
4335
+ messages: JSON.parse(JSON.stringify(responseMessages))
4336
+ },
4337
+ experimental_providerMetadata: stepProviderMetadata,
4338
+ isContinued: nextStepType === "continue"
4339
+ };
4340
+ stepResults.push(currentStepResult);
4341
+ await (onStepFinish == null ? void 0 : onStepFinish(currentStepResult));
4342
+ const combinedUsage = {
4343
+ promptTokens: usage.promptTokens + stepUsage.promptTokens,
4344
+ completionTokens: usage.completionTokens + stepUsage.completionTokens,
4345
+ totalTokens: usage.totalTokens + stepUsage.totalTokens
4346
+ };
4347
+ if (nextStepType !== "done") {
4348
+ const {
4349
+ result,
4350
+ doStreamSpan: doStreamSpan3,
4351
+ startTimestampMs: startTimestamp2
4352
+ } = await startStep({ responseMessages });
4353
+ warnings2 = result.warnings;
4354
+ response = result.rawResponse;
4355
+ addStepStream({
4356
+ stream: result.stream,
4357
+ startTimestamp: startTimestamp2,
4358
+ doStreamSpan: doStreamSpan3,
4359
+ currentStep: currentStep + 1,
4360
+ responseMessages,
4361
+ usage: combinedUsage,
4362
+ stepType: nextStepType,
4363
+ previousStepText: fullStepText,
4364
+ stepRequest: result.request,
4365
+ hasLeadingWhitespace: hasWhitespaceSuffix,
4366
+ warnings: warnings2,
4367
+ response
4368
+ });
4369
+ return;
4557
4370
  }
4558
- });
4559
- closeStitchableStream();
4560
- rootSpan.setAttributes(
4561
- selectTelemetryAttributes({
4562
- telemetry,
4563
- attributes: {
4564
- "ai.response.finishReason": stepFinishReason,
4565
- "ai.response.text": { output: () => fullStepText },
4566
- "ai.response.toolCalls": {
4567
- output: () => stepToolCallsJson
4568
- },
4569
- "ai.usage.promptTokens": combinedUsage.promptTokens,
4570
- "ai.usage.completionTokens": combinedUsage.completionTokens,
4571
- // deprecated
4572
- "ai.finishReason": stepFinishReason,
4573
- "ai.result.text": { output: () => fullStepText },
4574
- "ai.result.toolCalls": {
4575
- output: () => stepToolCallsJson
4371
+ try {
4372
+ controller.enqueue({
4373
+ type: "finish",
4374
+ finishReason: stepFinishReason,
4375
+ usage: combinedUsage,
4376
+ experimental_providerMetadata: stepProviderMetadata,
4377
+ logprobs: stepLogProbs,
4378
+ response: {
4379
+ ...stepResponse
4576
4380
  }
4577
- }
4578
- })
4579
- );
4580
- resolveUsage(combinedUsage);
4581
- resolveFinishReason(stepFinishReason);
4582
- resolveText(fullStepText);
4583
- resolveToolCalls(stepToolCalls);
4584
- resolveProviderMetadata(stepProviderMetadata);
4585
- resolveToolResults(stepToolResults);
4586
- resolveRequest(stepRequest);
4587
- resolveResponse({
4588
- ...stepResponse,
4589
- headers: rawResponse == null ? void 0 : rawResponse.headers,
4590
- messages: responseMessages
4591
- });
4592
- resolveSteps(stepResults);
4593
- resolveResponseMessages(responseMessages);
4594
- await (onFinish == null ? void 0 : onFinish({
4595
- finishReason: stepFinishReason,
4596
- logprobs: stepLogProbs,
4597
- usage: combinedUsage,
4598
- text: fullStepText,
4599
- toolCalls: stepToolCalls,
4600
- // The tool results are inferred as a never[] type, because they are
4601
- // optional and the execute method with an inferred result type is
4602
- // optional as well. Therefore we need to cast the toolResults to any.
4603
- // The type exposed to the users will be correctly inferred.
4604
- toolResults: stepToolResults,
4605
- request: stepRequest,
4606
- rawResponse,
4607
- response: {
4608
- ...stepResponse,
4609
- headers: rawResponse == null ? void 0 : rawResponse.headers,
4610
- messages: responseMessages
4611
- },
4612
- warnings,
4613
- experimental_providerMetadata: stepProviderMetadata,
4614
- steps: stepResults,
4615
- responseMessages
4616
- }));
4617
- } catch (error) {
4618
- controller.error(error);
4619
- } finally {
4620
- rootSpan.end();
4621
- }
4622
- }
4623
- })
4624
- )
4381
+ });
4382
+ self.stitchableStream.close();
4383
+ rootSpan.setAttributes(
4384
+ selectTelemetryAttributes({
4385
+ telemetry,
4386
+ attributes: {
4387
+ "ai.response.finishReason": stepFinishReason,
4388
+ "ai.response.text": { output: () => fullStepText },
4389
+ "ai.response.toolCalls": {
4390
+ output: () => stepToolCallsJson
4391
+ },
4392
+ "ai.usage.promptTokens": combinedUsage.promptTokens,
4393
+ "ai.usage.completionTokens": combinedUsage.completionTokens
4394
+ }
4395
+ })
4396
+ );
4397
+ self.usagePromise.resolve(combinedUsage);
4398
+ self.finishReasonPromise.resolve(stepFinishReason);
4399
+ self.textPromise.resolve(fullStepText);
4400
+ self.toolCallsPromise.resolve(stepToolCalls);
4401
+ self.providerMetadataPromise.resolve(stepProviderMetadata);
4402
+ self.toolResultsPromise.resolve(stepToolResults);
4403
+ self.requestPromise.resolve(stepRequest);
4404
+ self.responsePromise.resolve({
4405
+ ...stepResponse,
4406
+ headers: rawResponse == null ? void 0 : rawResponse.headers,
4407
+ messages: responseMessages
4408
+ });
4409
+ self.stepsPromise.resolve(stepResults);
4410
+ self.warningsPromise.resolve(warnings2 != null ? warnings2 : []);
4411
+ await (onFinish == null ? void 0 : onFinish({
4412
+ finishReason: stepFinishReason,
4413
+ logprobs: stepLogProbs,
4414
+ usage: combinedUsage,
4415
+ text: fullStepText,
4416
+ toolCalls: stepToolCalls,
4417
+ // The tool results are inferred as a never[] type, because they are
4418
+ // optional and the execute method with an inferred result type is
4419
+ // optional as well. Therefore we need to cast the toolResults to any.
4420
+ // The type exposed to the users will be correctly inferred.
4421
+ toolResults: stepToolResults,
4422
+ request: stepRequest,
4423
+ response: {
4424
+ ...stepResponse,
4425
+ headers: rawResponse == null ? void 0 : rawResponse.headers,
4426
+ messages: responseMessages
4427
+ },
4428
+ warnings: warnings2,
4429
+ experimental_providerMetadata: stepProviderMetadata,
4430
+ steps: stepResults
4431
+ }));
4432
+ } catch (error) {
4433
+ controller.error(error);
4434
+ } finally {
4435
+ rootSpan.end();
4436
+ }
4437
+ }
4438
+ })
4439
+ )
4440
+ );
4441
+ }
4442
+ addStepStream({
4443
+ stream,
4444
+ startTimestamp: startTimestampMs,
4445
+ doStreamSpan,
4446
+ currentStep: 0,
4447
+ responseMessages: [],
4448
+ usage: void 0,
4449
+ stepType: "initial",
4450
+ stepRequest: request,
4451
+ hasLeadingWhitespace: false,
4452
+ warnings,
4453
+ response: rawResponse
4454
+ });
4455
+ }
4456
+ }).catch((error) => {
4457
+ self.stitchableStream.addStream(
4458
+ new ReadableStream({
4459
+ start(controller) {
4460
+ controller.error(error);
4461
+ }
4462
+ })
4625
4463
  );
4626
- }
4627
- addStepStream({
4628
- stream,
4629
- startTimestamp: startTimestampMs,
4630
- doStreamSpan,
4631
- currentStep: 0,
4632
- responseMessages: [],
4633
- usage: void 0,
4634
- stepType: "initial",
4635
- stepRequest: request,
4636
- hasLeadingWhitespace: false
4464
+ self.stitchableStream.close();
4637
4465
  });
4638
4466
  }
4467
+ get warnings() {
4468
+ return this.warningsPromise.value;
4469
+ }
4470
+ get usage() {
4471
+ return this.usagePromise.value;
4472
+ }
4473
+ get finishReason() {
4474
+ return this.finishReasonPromise.value;
4475
+ }
4476
+ get experimental_providerMetadata() {
4477
+ return this.providerMetadataPromise.value;
4478
+ }
4479
+ get text() {
4480
+ return this.textPromise.value;
4481
+ }
4482
+ get toolCalls() {
4483
+ return this.toolCallsPromise.value;
4484
+ }
4485
+ get toolResults() {
4486
+ return this.toolResultsPromise.value;
4487
+ }
4488
+ get request() {
4489
+ return this.requestPromise.value;
4490
+ }
4491
+ get response() {
4492
+ return this.responsePromise.value;
4493
+ }
4494
+ get steps() {
4495
+ return this.stepsPromise.value;
4496
+ }
4639
4497
  /**
4640
4498
  Split out a new stream from the original stream.
4641
4499
  The original stream is replaced to allow for further splitting,
@@ -4645,8 +4503,8 @@ var DefaultStreamTextResult = class {
4645
4503
  However, the LLM results are expected to be small enough to not cause issues.
4646
4504
  */
4647
4505
  teeStream() {
4648
- const [stream1, stream2] = this.originalStream.tee();
4649
- this.originalStream = stream2;
4506
+ const [stream1, stream2] = this.stitchableStream.stream.tee();
4507
+ this.stitchableStream.stream = stream2;
4650
4508
  return stream1;
4651
4509
  }
4652
4510
  get textStream() {
@@ -4667,37 +4525,18 @@ var DefaultStreamTextResult = class {
4667
4525
  }
4668
4526
  });
4669
4527
  }
4670
- toAIStream(callbacks = {}) {
4671
- return this.toDataStreamInternal({ callbacks });
4672
- }
4673
4528
  toDataStreamInternal({
4674
- callbacks = {},
4675
4529
  getErrorMessage: getErrorMessage3 = () => "",
4676
4530
  // mask error messages for safety by default
4677
4531
  sendUsage = true
4678
4532
  } = {}) {
4679
4533
  let aggregatedResponse = "";
4680
4534
  const callbackTransformer = new TransformStream({
4681
- async start() {
4682
- if (callbacks.onStart)
4683
- await callbacks.onStart();
4684
- },
4685
4535
  async transform(chunk, controller) {
4686
4536
  controller.enqueue(chunk);
4687
4537
  if (chunk.type === "text-delta") {
4688
- const textDelta = chunk.textDelta;
4689
- aggregatedResponse += textDelta;
4690
- if (callbacks.onToken)
4691
- await callbacks.onToken(textDelta);
4692
- if (callbacks.onText)
4693
- await callbacks.onText(textDelta);
4538
+ aggregatedResponse += chunk.textDelta;
4694
4539
  }
4695
- },
4696
- async flush() {
4697
- if (callbacks.onCompletion)
4698
- await callbacks.onCompletion(aggregatedResponse);
4699
- if (callbacks.onFinal)
4700
- await callbacks.onFinal(aggregatedResponse);
4701
4540
  }
4702
4541
  });
4703
4542
  const streamPartsTransformer = new TransformStream({
@@ -4785,23 +4624,19 @@ var DefaultStreamTextResult = class {
4785
4624
  });
4786
4625
  return this.fullStream.pipeThrough(callbackTransformer).pipeThrough(streamPartsTransformer).pipeThrough(new TextEncoderStream());
4787
4626
  }
4788
- pipeAIStreamToResponse(response, init) {
4789
- return this.pipeDataStreamToResponse(response, init);
4790
- }
4791
- pipeDataStreamToResponse(response, options) {
4792
- const init = options == null ? void 0 : "init" in options ? options.init : {
4793
- headers: "headers" in options ? options.headers : void 0,
4794
- status: "status" in options ? options.status : void 0,
4795
- statusText: "statusText" in options ? options.statusText : void 0
4796
- };
4797
- const data = options == null ? void 0 : "data" in options ? options.data : void 0;
4798
- const getErrorMessage3 = options == null ? void 0 : "getErrorMessage" in options ? options.getErrorMessage : void 0;
4799
- const sendUsage = options == null ? void 0 : "sendUsage" in options ? options.sendUsage : void 0;
4627
+ pipeDataStreamToResponse(response, {
4628
+ status,
4629
+ statusText,
4630
+ headers,
4631
+ data,
4632
+ getErrorMessage: getErrorMessage3,
4633
+ sendUsage
4634
+ } = {}) {
4800
4635
  writeToServerResponse({
4801
4636
  response,
4802
- status: init == null ? void 0 : init.status,
4803
- statusText: init == null ? void 0 : init.statusText,
4804
- headers: prepareOutgoingHttpHeaders(init, {
4637
+ status,
4638
+ statusText,
4639
+ headers: prepareOutgoingHttpHeaders(headers, {
4805
4640
  contentType: "text/plain; charset=utf-8",
4806
4641
  dataStreamVersion: "v1"
4807
4642
  }),
@@ -4813,15 +4648,12 @@ var DefaultStreamTextResult = class {
4813
4648
  response,
4814
4649
  status: init == null ? void 0 : init.status,
4815
4650
  statusText: init == null ? void 0 : init.statusText,
4816
- headers: prepareOutgoingHttpHeaders(init, {
4651
+ headers: prepareOutgoingHttpHeaders(init == null ? void 0 : init.headers, {
4817
4652
  contentType: "text/plain; charset=utf-8"
4818
4653
  }),
4819
4654
  stream: this.textStream.pipeThrough(new TextEncoderStream())
4820
4655
  });
4821
4656
  }
4822
- toAIStreamResponse(options) {
4823
- return this.toDataStreamResponse(options);
4824
- }
4825
4657
  toDataStream(options) {
4826
4658
  const stream = this.toDataStreamInternal({
4827
4659
  getErrorMessage: options == null ? void 0 : options.getErrorMessage,
@@ -4829,22 +4661,20 @@ var DefaultStreamTextResult = class {
4829
4661
  });
4830
4662
  return (options == null ? void 0 : options.data) ? mergeStreams(options == null ? void 0 : options.data.stream, stream) : stream;
4831
4663
  }
4832
- toDataStreamResponse(options) {
4833
- var _a11;
4834
- const init = options == null ? void 0 : "init" in options ? options.init : {
4835
- headers: "headers" in options ? options.headers : void 0,
4836
- status: "status" in options ? options.status : void 0,
4837
- statusText: "statusText" in options ? options.statusText : void 0
4838
- };
4839
- const data = options == null ? void 0 : "data" in options ? options.data : void 0;
4840
- const getErrorMessage3 = options == null ? void 0 : "getErrorMessage" in options ? options.getErrorMessage : void 0;
4841
- const sendUsage = options == null ? void 0 : "sendUsage" in options ? options.sendUsage : void 0;
4664
+ toDataStreamResponse({
4665
+ headers,
4666
+ status,
4667
+ statusText,
4668
+ data,
4669
+ getErrorMessage: getErrorMessage3,
4670
+ sendUsage
4671
+ } = {}) {
4842
4672
  return new Response(
4843
4673
  this.toDataStream({ data, getErrorMessage: getErrorMessage3, sendUsage }),
4844
4674
  {
4845
- status: (_a11 = init == null ? void 0 : init.status) != null ? _a11 : 200,
4846
- statusText: init == null ? void 0 : init.statusText,
4847
- headers: prepareResponseHeaders(init, {
4675
+ status,
4676
+ statusText,
4677
+ headers: prepareResponseHeaders(headers, {
4848
4678
  contentType: "text/plain; charset=utf-8",
4849
4679
  dataStreamVersion: "v1"
4850
4680
  })
@@ -4855,13 +4685,12 @@ var DefaultStreamTextResult = class {
4855
4685
  var _a11;
4856
4686
  return new Response(this.textStream.pipeThrough(new TextEncoderStream()), {
4857
4687
  status: (_a11 = init == null ? void 0 : init.status) != null ? _a11 : 200,
4858
- headers: prepareResponseHeaders(init, {
4688
+ headers: prepareResponseHeaders(init == null ? void 0 : init.headers, {
4859
4689
  contentType: "text/plain; charset=utf-8"
4860
4690
  })
4861
4691
  });
4862
4692
  }
4863
4693
  };
4864
- var experimental_streamText = streamText;
4865
4694
 
4866
4695
  // core/middleware/wrap-language-model.ts
4867
4696
  var experimental_wrapLanguageModel = ({
@@ -4948,26 +4777,6 @@ var NoSuchProviderError = class extends NoSuchModelError3 {
4948
4777
  static isInstance(error) {
4949
4778
  return AISDKError11.hasMarker(error, marker10);
4950
4779
  }
4951
- /**
4952
- * @deprecated use `isInstance` instead
4953
- */
4954
- static isNoSuchProviderError(error) {
4955
- return error instanceof Error && error.name === name10 && typeof error.providerId === "string" && Array.isArray(error.availableProviders);
4956
- }
4957
- /**
4958
- * @deprecated Do not use this method. It will be removed in the next major version.
4959
- */
4960
- toJSON() {
4961
- return {
4962
- name: this.name,
4963
- message: this.message,
4964
- stack: this.stack,
4965
- modelId: this.modelId,
4966
- modelType: this.modelType,
4967
- providerId: this.providerId,
4968
- availableProviders: this.availableProviders
4969
- };
4970
- }
4971
4780
  };
4972
4781
  _a10 = symbol10;
4973
4782
 
@@ -4980,15 +4789,11 @@ function experimental_createProviderRegistry(providers) {
4980
4789
  }
4981
4790
  return registry;
4982
4791
  }
4983
- var experimental_createModelRegistry = experimental_createProviderRegistry;
4984
4792
  var DefaultProviderRegistry = class {
4985
4793
  constructor() {
4986
4794
  this.providers = {};
4987
4795
  }
4988
- registerProvider({
4989
- id,
4990
- provider
4991
- }) {
4796
+ registerProvider({ id, provider }) {
4992
4797
  this.providers[id] = provider;
4993
4798
  }
4994
4799
  getProvider(id) {
@@ -5024,10 +4829,10 @@ var DefaultProviderRegistry = class {
5024
4829
  return model;
5025
4830
  }
5026
4831
  textEmbeddingModel(id) {
5027
- var _a11, _b, _c;
4832
+ var _a11;
5028
4833
  const [providerId, modelId] = this.splitId(id, "textEmbeddingModel");
5029
4834
  const provider = this.getProvider(providerId);
5030
- const model = (_c = (_a11 = provider.textEmbeddingModel) == null ? void 0 : _a11.call(provider, modelId)) != null ? _c : "textEmbedding" in provider ? (_b = provider.textEmbedding) == null ? void 0 : _b.call(provider, modelId) : void 0;
4835
+ const model = (_a11 = provider.textEmbeddingModel) == null ? void 0 : _a11.call(provider, modelId);
5031
4836
  if (model == null) {
5032
4837
  throw new NoSuchModelError4({
5033
4838
  modelId: id,
@@ -5068,123 +4873,6 @@ function magnitude(vector) {
5068
4873
  return Math.sqrt(dotProduct(vector, vector));
5069
4874
  }
5070
4875
 
5071
- // streams/ai-stream.ts
5072
- import {
5073
- createParser
5074
- } from "eventsource-parser";
5075
- function createEventStreamTransformer(customParser) {
5076
- const textDecoder = new TextDecoder();
5077
- let eventSourceParser;
5078
- return new TransformStream({
5079
- async start(controller) {
5080
- eventSourceParser = createParser(
5081
- (event) => {
5082
- if ("data" in event && event.type === "event" && event.data === "[DONE]" || // Replicate doesn't send [DONE] but does send a 'done' event
5083
- // @see https://replicate.com/docs/streaming
5084
- event.event === "done") {
5085
- controller.terminate();
5086
- return;
5087
- }
5088
- if ("data" in event) {
5089
- const parsedMessage = customParser ? customParser(event.data, {
5090
- event: event.event
5091
- }) : event.data;
5092
- if (parsedMessage)
5093
- controller.enqueue(parsedMessage);
5094
- }
5095
- }
5096
- );
5097
- },
5098
- transform(chunk) {
5099
- eventSourceParser.feed(textDecoder.decode(chunk));
5100
- }
5101
- });
5102
- }
5103
- function createCallbacksTransformer(cb) {
5104
- const textEncoder = new TextEncoder();
5105
- let aggregatedResponse = "";
5106
- const callbacks = cb || {};
5107
- return new TransformStream({
5108
- async start() {
5109
- if (callbacks.onStart)
5110
- await callbacks.onStart();
5111
- },
5112
- async transform(message, controller) {
5113
- const content = typeof message === "string" ? message : message.content;
5114
- controller.enqueue(textEncoder.encode(content));
5115
- aggregatedResponse += content;
5116
- if (callbacks.onToken)
5117
- await callbacks.onToken(content);
5118
- if (callbacks.onText && typeof message === "string") {
5119
- await callbacks.onText(message);
5120
- }
5121
- },
5122
- async flush() {
5123
- if (callbacks.onCompletion) {
5124
- await callbacks.onCompletion(aggregatedResponse);
5125
- }
5126
- }
5127
- });
5128
- }
5129
- function trimStartOfStreamHelper() {
5130
- let isStreamStart = true;
5131
- return (text) => {
5132
- if (isStreamStart) {
5133
- text = text.trimStart();
5134
- if (text)
5135
- isStreamStart = false;
5136
- }
5137
- return text;
5138
- };
5139
- }
5140
- function AIStream(response, customParser, callbacks) {
5141
- if (!response.ok) {
5142
- if (response.body) {
5143
- const reader = response.body.getReader();
5144
- return new ReadableStream({
5145
- async start(controller) {
5146
- const { done, value } = await reader.read();
5147
- if (!done) {
5148
- const errorText = new TextDecoder().decode(value);
5149
- controller.error(new Error(`Response error: ${errorText}`));
5150
- }
5151
- }
5152
- });
5153
- } else {
5154
- return new ReadableStream({
5155
- start(controller) {
5156
- controller.error(new Error("Response error: No response body"));
5157
- }
5158
- });
5159
- }
5160
- }
5161
- const responseBodyStream = response.body || createEmptyReadableStream();
5162
- return responseBodyStream.pipeThrough(createEventStreamTransformer(customParser)).pipeThrough(createCallbacksTransformer(callbacks));
5163
- }
5164
- function createEmptyReadableStream() {
5165
- return new ReadableStream({
5166
- start(controller) {
5167
- controller.close();
5168
- }
5169
- });
5170
- }
5171
- function readableFromAsyncIterable(iterable) {
5172
- let it = iterable[Symbol.asyncIterator]();
5173
- return new ReadableStream({
5174
- async pull(controller) {
5175
- const { done, value } = await it.next();
5176
- if (done)
5177
- controller.close();
5178
- else
5179
- controller.enqueue(value);
5180
- },
5181
- async cancel(reason) {
5182
- var _a11;
5183
- await ((_a11 = it.return) == null ? void 0 : _a11.call(it, reason));
5184
- }
5185
- });
5186
- }
5187
-
5188
4876
  // streams/assistant-response.ts
5189
4877
  import {
5190
4878
  formatStreamPart as formatStreamPart2
@@ -5256,8 +4944,6 @@ function AssistantResponse({ threadId, messageId }, process2) {
5256
4944
  );
5257
4945
  try {
5258
4946
  await process2({
5259
- threadId,
5260
- messageId,
5261
4947
  sendMessage,
5262
4948
  sendDataMessage,
5263
4949
  forwardStream
@@ -5280,16 +4966,40 @@ function AssistantResponse({ threadId, messageId }, process2) {
5280
4966
  }
5281
4967
  });
5282
4968
  }
5283
- var experimental_AssistantResponse = AssistantResponse;
5284
4969
 
5285
4970
  // streams/langchain-adapter.ts
5286
4971
  var langchain_adapter_exports = {};
5287
4972
  __export(langchain_adapter_exports, {
5288
- toAIStream: () => toAIStream,
5289
4973
  toDataStream: () => toDataStream,
5290
4974
  toDataStreamResponse: () => toDataStreamResponse
5291
4975
  });
5292
4976
 
4977
+ // streams/stream-callbacks.ts
4978
+ function createCallbacksTransformer(callbacks = {}) {
4979
+ const textEncoder = new TextEncoder();
4980
+ let aggregatedResponse = "";
4981
+ return new TransformStream({
4982
+ async start() {
4983
+ if (callbacks.onStart)
4984
+ await callbacks.onStart();
4985
+ },
4986
+ async transform(message, controller) {
4987
+ controller.enqueue(textEncoder.encode(message));
4988
+ aggregatedResponse += message;
4989
+ if (callbacks.onToken)
4990
+ await callbacks.onToken(message);
4991
+ if (callbacks.onText && typeof message === "string") {
4992
+ await callbacks.onText(message);
4993
+ }
4994
+ },
4995
+ async flush() {
4996
+ if (callbacks.onCompletion) {
4997
+ await callbacks.onCompletion(aggregatedResponse);
4998
+ }
4999
+ }
5000
+ });
5001
+ }
5002
+
5293
5003
  // streams/stream-data.ts
5294
5004
  import { formatStreamPart as formatStreamPart3 } from "@ai-sdk/ui-utils";
5295
5005
 
@@ -5297,7 +5007,7 @@ import { formatStreamPart as formatStreamPart3 } from "@ai-sdk/ui-utils";
5297
5007
  var HANGING_STREAM_WARNING_TIME_MS = 15 * 1e3;
5298
5008
 
5299
5009
  // streams/stream-data.ts
5300
- var StreamData2 = class {
5010
+ var StreamData = class {
5301
5011
  constructor() {
5302
5012
  this.encoder = new TextEncoder();
5303
5013
  this.controller = null;
@@ -5368,13 +5078,8 @@ function createStreamDataTransformer() {
5368
5078
  }
5369
5079
  });
5370
5080
  }
5371
- var experimental_StreamData = class extends StreamData2 {
5372
- };
5373
5081
 
5374
5082
  // streams/langchain-adapter.ts
5375
- function toAIStream(stream, callbacks) {
5376
- return toDataStream(stream, callbacks);
5377
- }
5378
5083
  function toDataStream(stream, callbacks) {
5379
5084
  return stream.pipeThrough(
5380
5085
  new TransformStream({
@@ -5407,7 +5112,7 @@ function toDataStreamResponse(stream, options) {
5407
5112
  return new Response(responseStream, {
5408
5113
  status: (_a11 = init == null ? void 0 : init.status) != null ? _a11 : 200,
5409
5114
  statusText: init == null ? void 0 : init.statusText,
5410
- headers: prepareResponseHeaders(init, {
5115
+ headers: prepareResponseHeaders(init == null ? void 0 : init.headers, {
5411
5116
  contentType: "text/plain; charset=utf-8",
5412
5117
  dataStreamVersion: "v1"
5413
5118
  })
@@ -5432,8 +5137,16 @@ __export(llamaindex_adapter_exports, {
5432
5137
  toDataStream: () => toDataStream2,
5433
5138
  toDataStreamResponse: () => toDataStreamResponse2
5434
5139
  });
5140
+ import { convertAsyncIteratorToReadableStream } from "@ai-sdk/provider-utils";
5435
5141
  function toDataStream2(stream, callbacks) {
5436
- return toReadableStream(stream).pipeThrough(createCallbacksTransformer(callbacks)).pipeThrough(createStreamDataTransformer());
5142
+ const trimStart = trimStartOfStream();
5143
+ return convertAsyncIteratorToReadableStream(stream[Symbol.asyncIterator]()).pipeThrough(
5144
+ new TransformStream({
5145
+ async transform(message, controller) {
5146
+ controller.enqueue(trimStart(message.delta));
5147
+ }
5148
+ })
5149
+ ).pipeThrough(createCallbacksTransformer(callbacks)).pipeThrough(createStreamDataTransformer());
5437
5150
  }
5438
5151
  function toDataStreamResponse2(stream, options = {}) {
5439
5152
  var _a11;
@@ -5443,78 +5156,25 @@ function toDataStreamResponse2(stream, options = {}) {
5443
5156
  return new Response(responseStream, {
5444
5157
  status: (_a11 = init == null ? void 0 : init.status) != null ? _a11 : 200,
5445
5158
  statusText: init == null ? void 0 : init.statusText,
5446
- headers: prepareResponseHeaders(init, {
5159
+ headers: prepareResponseHeaders(init == null ? void 0 : init.headers, {
5447
5160
  contentType: "text/plain; charset=utf-8",
5448
5161
  dataStreamVersion: "v1"
5449
5162
  })
5450
5163
  });
5451
5164
  }
5452
- function toReadableStream(res) {
5453
- const it = res[Symbol.asyncIterator]();
5454
- const trimStartOfStream = trimStartOfStreamHelper();
5455
- return new ReadableStream({
5456
- async pull(controller) {
5457
- var _a11;
5458
- const { value, done } = await it.next();
5459
- if (done) {
5460
- controller.close();
5461
- return;
5462
- }
5463
- const text = trimStartOfStream((_a11 = value.delta) != null ? _a11 : "");
5464
- if (text) {
5465
- controller.enqueue(text);
5466
- }
5165
+ function trimStartOfStream() {
5166
+ let isStreamStart = true;
5167
+ return (text) => {
5168
+ if (isStreamStart) {
5169
+ text = text.trimStart();
5170
+ if (text)
5171
+ isStreamStart = false;
5467
5172
  }
5468
- });
5469
- }
5470
-
5471
- // streams/stream-to-response.ts
5472
- function streamToResponse(res, response, init, data) {
5473
- var _a11;
5474
- response.writeHead((_a11 = init == null ? void 0 : init.status) != null ? _a11 : 200, {
5475
- "Content-Type": "text/plain; charset=utf-8",
5476
- ...init == null ? void 0 : init.headers
5477
- });
5478
- let processedStream = res;
5479
- if (data) {
5480
- processedStream = mergeStreams(data.stream, res);
5481
- }
5482
- const reader = processedStream.getReader();
5483
- function read() {
5484
- reader.read().then(({ done, value }) => {
5485
- if (done) {
5486
- response.end();
5487
- return;
5488
- }
5489
- response.write(value);
5490
- read();
5491
- });
5492
- }
5493
- read();
5173
+ return text;
5174
+ };
5494
5175
  }
5495
-
5496
- // streams/streaming-text-response.ts
5497
- var StreamingTextResponse = class extends Response {
5498
- constructor(res, init, data) {
5499
- let processedStream = res;
5500
- if (data) {
5501
- processedStream = mergeStreams(data.stream, res);
5502
- }
5503
- super(processedStream, {
5504
- ...init,
5505
- status: 200,
5506
- headers: prepareResponseHeaders(init, {
5507
- contentType: "text/plain; charset=utf-8"
5508
- })
5509
- });
5510
- }
5511
- };
5512
-
5513
- // streams/index.ts
5514
- var generateId2 = generateIdImpl;
5515
5176
  export {
5516
5177
  AISDKError10 as AISDKError,
5517
- AIStream,
5518
5178
  APICallError2 as APICallError,
5519
5179
  AssistantResponse,
5520
5180
  DownloadError,
@@ -5536,28 +5196,18 @@ export {
5536
5196
  NoSuchProviderError,
5537
5197
  NoSuchToolError,
5538
5198
  RetryError,
5539
- StreamData2 as StreamData,
5540
- StreamingTextResponse,
5199
+ StreamData,
5541
5200
  TypeValidationError2 as TypeValidationError,
5542
5201
  UnsupportedFunctionalityError2 as UnsupportedFunctionalityError,
5543
5202
  convertToCoreMessages,
5544
5203
  cosineSimilarity,
5545
- createCallbacksTransformer,
5546
- createEventStreamTransformer,
5547
5204
  createStreamDataTransformer,
5548
5205
  embed,
5549
5206
  embedMany,
5550
- experimental_AssistantResponse,
5551
- experimental_StreamData,
5552
- experimental_createModelRegistry,
5553
5207
  experimental_createProviderRegistry,
5554
5208
  experimental_customProvider,
5555
- experimental_generateObject,
5556
- experimental_generateText,
5557
- experimental_streamObject,
5558
- experimental_streamText,
5559
5209
  experimental_wrapLanguageModel,
5560
- formatStreamPart,
5210
+ formatStreamPart4 as formatStreamPart,
5561
5211
  generateId2 as generateId,
5562
5212
  generateObject,
5563
5213
  generateText,
@@ -5565,11 +5215,8 @@ export {
5565
5215
  parseStreamPart,
5566
5216
  processDataProtocolResponse,
5567
5217
  readDataStream,
5568
- readableFromAsyncIterable,
5569
5218
  streamObject,
5570
5219
  streamText,
5571
- streamToResponse,
5572
- tool,
5573
- trimStartOfStreamHelper
5220
+ tool
5574
5221
  };
5575
5222
  //# sourceMappingURL=index.mjs.map