ai 3.3.25 → 3.3.27

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js CHANGED
@@ -73,7 +73,9 @@ __export(streams_exports, {
73
73
  experimental_createProviderRegistry: () => experimental_createProviderRegistry,
74
74
  experimental_customProvider: () => experimental_customProvider,
75
75
  experimental_generateObject: () => experimental_generateObject,
76
+ experimental_generateText: () => experimental_generateText,
76
77
  experimental_streamObject: () => experimental_streamObject,
78
+ experimental_streamText: () => experimental_streamText,
77
79
  formatStreamPart: () => import_ui_utils10.formatStreamPart,
78
80
  generateId: () => generateId2,
79
81
  generateObject: () => generateObject,
@@ -92,7 +94,7 @@ __export(streams_exports, {
92
94
  });
93
95
  module.exports = __toCommonJS(streams_exports);
94
96
  var import_ui_utils10 = require("@ai-sdk/ui-utils");
95
- var import_provider_utils8 = require("@ai-sdk/provider-utils");
97
+ var import_provider_utils11 = require("@ai-sdk/provider-utils");
96
98
 
97
99
  // core/index.ts
98
100
  var import_ui_utils6 = require("@ai-sdk/ui-utils");
@@ -1052,7 +1054,13 @@ function convertToLanguageModelMessage(message, downloadedImages) {
1052
1054
  content: message.content.filter(
1053
1055
  // remove empty text parts:
1054
1056
  (part) => part.type !== "text" || part.text !== ""
1055
- ),
1057
+ ).map((part) => {
1058
+ const { experimental_providerMetadata, ...rest } = part;
1059
+ return {
1060
+ ...rest,
1061
+ providerMetadata: experimental_providerMetadata
1062
+ };
1063
+ }),
1056
1064
  providerMetadata: message.experimental_providerMetadata
1057
1065
  };
1058
1066
  }
@@ -1385,8 +1393,8 @@ function validatePrompt(prompt) {
1385
1393
  throw new Error("unreachable");
1386
1394
  }
1387
1395
 
1388
- // core/types/token-usage.ts
1389
- function calculateCompletionTokenUsage(usage) {
1396
+ // core/types/usage.ts
1397
+ function calculateLanguageModelUsage(usage) {
1390
1398
  return {
1391
1399
  promptTokens: usage.promptTokens,
1392
1400
  completionTokens: usage.completionTokens,
@@ -1716,6 +1724,7 @@ function validateObjectGenerationInput({
1716
1724
  }
1717
1725
 
1718
1726
  // core/generate-object/generate-object.ts
1727
+ var originalGenerateId = (0, import_provider_utils6.createIdGenerator)({ prefix: "aiobj-", length: 24 });
1719
1728
  async function generateObject({
1720
1729
  model,
1721
1730
  schema: inputSchema,
@@ -1730,6 +1739,10 @@ async function generateObject({
1730
1739
  abortSignal,
1731
1740
  headers,
1732
1741
  experimental_telemetry: telemetry,
1742
+ _internal: {
1743
+ generateId: generateId3 = originalGenerateId,
1744
+ currentDate = () => /* @__PURE__ */ new Date()
1745
+ } = {},
1733
1746
  ...settings
1734
1747
  }) {
1735
1748
  var _a11;
@@ -1783,6 +1796,7 @@ async function generateObject({
1783
1796
  let usage;
1784
1797
  let warnings;
1785
1798
  let rawResponse;
1799
+ let response;
1786
1800
  let logprobs;
1787
1801
  let providerMetadata;
1788
1802
  switch (mode) {
@@ -1831,6 +1845,7 @@ async function generateObject({
1831
1845
  }),
1832
1846
  tracer,
1833
1847
  fn: async (span2) => {
1848
+ var _a12, _b, _c, _d, _e, _f;
1834
1849
  const result2 = await model.doGenerate({
1835
1850
  mode: {
1836
1851
  type: "object-json",
@@ -1847,12 +1862,20 @@ async function generateObject({
1847
1862
  if (result2.text === void 0) {
1848
1863
  throw new NoObjectGeneratedError();
1849
1864
  }
1865
+ const responseData = {
1866
+ id: (_b = (_a12 = result2.response) == null ? void 0 : _a12.id) != null ? _b : generateId3(),
1867
+ timestamp: (_d = (_c = result2.response) == null ? void 0 : _c.timestamp) != null ? _d : currentDate(),
1868
+ modelId: (_f = (_e = result2.response) == null ? void 0 : _e.modelId) != null ? _f : model.modelId
1869
+ };
1850
1870
  span2.setAttributes(
1851
1871
  selectTelemetryAttributes({
1852
1872
  telemetry,
1853
1873
  attributes: {
1854
1874
  "ai.response.finishReason": result2.finishReason,
1855
1875
  "ai.response.object": { output: () => result2.text },
1876
+ "ai.response.id": responseData.id,
1877
+ "ai.response.model": responseData.modelId,
1878
+ "ai.response.timestamp": responseData.timestamp.toISOString(),
1856
1879
  "ai.usage.promptTokens": result2.usage.promptTokens,
1857
1880
  "ai.usage.completionTokens": result2.usage.completionTokens,
1858
1881
  // deprecated:
@@ -1860,12 +1883,14 @@ async function generateObject({
1860
1883
  "ai.result.object": { output: () => result2.text },
1861
1884
  // standardized gen-ai llm span attributes:
1862
1885
  "gen_ai.response.finish_reasons": [result2.finishReason],
1886
+ "gen_ai.response.id": responseData.id,
1887
+ "gen_ai.response.model": responseData.modelId,
1863
1888
  "gen_ai.usage.prompt_tokens": result2.usage.promptTokens,
1864
1889
  "gen_ai.usage.completion_tokens": result2.usage.completionTokens
1865
1890
  }
1866
1891
  })
1867
1892
  );
1868
- return { ...result2, objectText: result2.text };
1893
+ return { ...result2, objectText: result2.text, responseData };
1869
1894
  }
1870
1895
  })
1871
1896
  );
@@ -1876,6 +1901,7 @@ async function generateObject({
1876
1901
  rawResponse = generateResult.rawResponse;
1877
1902
  logprobs = generateResult.logprobs;
1878
1903
  providerMetadata = generateResult.providerMetadata;
1904
+ response = generateResult.responseData;
1879
1905
  break;
1880
1906
  }
1881
1907
  case "tool": {
@@ -1920,7 +1946,7 @@ async function generateObject({
1920
1946
  }),
1921
1947
  tracer,
1922
1948
  fn: async (span2) => {
1923
- var _a12, _b;
1949
+ var _a12, _b, _c, _d, _e, _f, _g, _h;
1924
1950
  const result2 = await model.doGenerate({
1925
1951
  mode: {
1926
1952
  type: "object-tool",
@@ -1941,12 +1967,20 @@ async function generateObject({
1941
1967
  if (objectText === void 0) {
1942
1968
  throw new NoObjectGeneratedError();
1943
1969
  }
1970
+ const responseData = {
1971
+ id: (_d = (_c = result2.response) == null ? void 0 : _c.id) != null ? _d : generateId3(),
1972
+ timestamp: (_f = (_e = result2.response) == null ? void 0 : _e.timestamp) != null ? _f : currentDate(),
1973
+ modelId: (_h = (_g = result2.response) == null ? void 0 : _g.modelId) != null ? _h : model.modelId
1974
+ };
1944
1975
  span2.setAttributes(
1945
1976
  selectTelemetryAttributes({
1946
1977
  telemetry,
1947
1978
  attributes: {
1948
1979
  "ai.response.finishReason": result2.finishReason,
1949
1980
  "ai.response.object": { output: () => objectText },
1981
+ "ai.response.id": responseData.id,
1982
+ "ai.response.model": responseData.modelId,
1983
+ "ai.response.timestamp": responseData.timestamp.toISOString(),
1950
1984
  "ai.usage.promptTokens": result2.usage.promptTokens,
1951
1985
  "ai.usage.completionTokens": result2.usage.completionTokens,
1952
1986
  // deprecated:
@@ -1954,12 +1988,14 @@ async function generateObject({
1954
1988
  "ai.result.object": { output: () => objectText },
1955
1989
  // standardized gen-ai llm span attributes:
1956
1990
  "gen_ai.response.finish_reasons": [result2.finishReason],
1991
+ "gen_ai.response.id": responseData.id,
1992
+ "gen_ai.response.model": responseData.modelId,
1957
1993
  "gen_ai.usage.input_tokens": result2.usage.promptTokens,
1958
1994
  "gen_ai.usage.output_tokens": result2.usage.completionTokens
1959
1995
  }
1960
1996
  })
1961
1997
  );
1962
- return { ...result2, objectText };
1998
+ return { ...result2, objectText, responseData };
1963
1999
  }
1964
2000
  })
1965
2001
  );
@@ -1970,6 +2006,7 @@ async function generateObject({
1970
2006
  rawResponse = generateResult.rawResponse;
1971
2007
  logprobs = generateResult.logprobs;
1972
2008
  providerMetadata = generateResult.providerMetadata;
2009
+ response = generateResult.responseData;
1973
2010
  break;
1974
2011
  }
1975
2012
  case void 0: {
@@ -2013,9 +2050,12 @@ async function generateObject({
2013
2050
  return new DefaultGenerateObjectResult({
2014
2051
  object: validationResult.value,
2015
2052
  finishReason,
2016
- usage: calculateCompletionTokenUsage(usage),
2053
+ usage: calculateLanguageModelUsage(usage),
2017
2054
  warnings,
2018
- rawResponse,
2055
+ response: {
2056
+ ...response,
2057
+ headers: rawResponse == null ? void 0 : rawResponse.headers
2058
+ },
2019
2059
  logprobs,
2020
2060
  providerMetadata
2021
2061
  });
@@ -2028,9 +2068,12 @@ var DefaultGenerateObjectResult = class {
2028
2068
  this.finishReason = options.finishReason;
2029
2069
  this.usage = options.usage;
2030
2070
  this.warnings = options.warnings;
2031
- this.rawResponse = options.rawResponse;
2032
- this.logprobs = options.logprobs;
2033
2071
  this.experimental_providerMetadata = options.providerMetadata;
2072
+ this.response = options.response;
2073
+ this.rawResponse = {
2074
+ headers: options.response.headers
2075
+ };
2076
+ this.logprobs = options.logprobs;
2034
2077
  }
2035
2078
  toJsonResponse(init) {
2036
2079
  var _a11;
@@ -2107,6 +2150,8 @@ function now() {
2107
2150
  }
2108
2151
 
2109
2152
  // core/generate-object/stream-object.ts
2153
+ var import_provider_utils7 = require("@ai-sdk/provider-utils");
2154
+ var originalGenerateId2 = (0, import_provider_utils7.createIdGenerator)({ prefix: "aiobj-", length: 24 });
2110
2155
  async function streamObject({
2111
2156
  model,
2112
2157
  schema: inputSchema,
@@ -2122,7 +2167,11 @@ async function streamObject({
2122
2167
  headers,
2123
2168
  experimental_telemetry: telemetry,
2124
2169
  onFinish,
2125
- _internal: { now: now2 = now } = {},
2170
+ _internal: {
2171
+ generateId: generateId3 = originalGenerateId2,
2172
+ currentDate = () => /* @__PURE__ */ new Date(),
2173
+ now: now2 = now
2174
+ } = {},
2126
2175
  ...settings
2127
2176
  }) {
2128
2177
  var _a11;
@@ -2206,6 +2255,7 @@ async function streamObject({
2206
2255
  case "text-delta":
2207
2256
  controller.enqueue(chunk.textDelta);
2208
2257
  break;
2258
+ case "response-metadata":
2209
2259
  case "finish":
2210
2260
  case "error":
2211
2261
  controller.enqueue(chunk);
@@ -2246,6 +2296,7 @@ async function streamObject({
2246
2296
  case "tool-call-delta":
2247
2297
  controller.enqueue(chunk.argsTextDelta);
2248
2298
  break;
2299
+ case "response-metadata":
2249
2300
  case "finish":
2250
2301
  case "error":
2251
2302
  controller.enqueue(chunk);
@@ -2317,7 +2368,10 @@ async function streamObject({
2317
2368
  doStreamSpan,
2318
2369
  telemetry,
2319
2370
  startTimestampMs,
2320
- now: now2
2371
+ modelId: model.modelId,
2372
+ now: now2,
2373
+ currentDate,
2374
+ generateId: generateId3
2321
2375
  });
2322
2376
  }
2323
2377
  });
@@ -2333,7 +2387,10 @@ var DefaultStreamObjectResult = class {
2333
2387
  doStreamSpan,
2334
2388
  telemetry,
2335
2389
  startTimestampMs,
2336
- now: now2
2390
+ modelId,
2391
+ now: now2,
2392
+ currentDate,
2393
+ generateId: generateId3
2337
2394
  }) {
2338
2395
  this.warnings = warnings;
2339
2396
  this.rawResponse = rawResponse;
@@ -2341,6 +2398,8 @@ var DefaultStreamObjectResult = class {
2341
2398
  this.objectPromise = new DelayedPromise();
2342
2399
  const { resolve: resolveUsage, promise: usagePromise } = createResolvablePromise();
2343
2400
  this.usage = usagePromise;
2401
+ const { resolve: resolveResponse, promise: responsePromise } = createResolvablePromise();
2402
+ this.response = responsePromise;
2344
2403
  const {
2345
2404
  resolve: resolveProviderMetadata,
2346
2405
  promise: providerMetadataPromise
@@ -2353,6 +2412,11 @@ var DefaultStreamObjectResult = class {
2353
2412
  let error;
2354
2413
  let accumulatedText = "";
2355
2414
  let textDelta = "";
2415
+ let response = {
2416
+ id: generateId3(),
2417
+ timestamp: currentDate(),
2418
+ modelId
2419
+ };
2356
2420
  let latestObjectJson = void 0;
2357
2421
  let latestObject = void 0;
2358
2422
  let isFirstChunk = true;
@@ -2361,6 +2425,7 @@ var DefaultStreamObjectResult = class {
2361
2425
  this.originalStream = stream.pipeThrough(
2362
2426
  new TransformStream({
2363
2427
  async transform(chunk, controller) {
2428
+ var _a11, _b, _c;
2364
2429
  if (isFirstChunk) {
2365
2430
  const msToFirstChunk = now2() - startTimestampMs;
2366
2431
  isFirstChunk = false;
@@ -2401,16 +2466,28 @@ var DefaultStreamObjectResult = class {
2401
2466
  return;
2402
2467
  }
2403
2468
  switch (chunk.type) {
2469
+ case "response-metadata": {
2470
+ response = {
2471
+ id: (_a11 = chunk.id) != null ? _a11 : response.id,
2472
+ timestamp: (_b = chunk.timestamp) != null ? _b : response.timestamp,
2473
+ modelId: (_c = chunk.modelId) != null ? _c : response.modelId
2474
+ };
2475
+ break;
2476
+ }
2404
2477
  case "finish": {
2405
2478
  if (textDelta !== "") {
2406
2479
  controller.enqueue({ type: "text-delta", textDelta });
2407
2480
  }
2408
2481
  finishReason = chunk.finishReason;
2409
- usage = calculateCompletionTokenUsage(chunk.usage);
2482
+ usage = calculateLanguageModelUsage(chunk.usage);
2410
2483
  providerMetadata = chunk.providerMetadata;
2411
- controller.enqueue({ ...chunk, usage });
2484
+ controller.enqueue({ ...chunk, usage, response });
2412
2485
  resolveUsage(usage);
2413
2486
  resolveProviderMetadata(providerMetadata);
2487
+ resolveResponse({
2488
+ ...response,
2489
+ headers: rawResponse == null ? void 0 : rawResponse.headers
2490
+ });
2414
2491
  const validationResult = outputStrategy.validateFinalResult(latestObjectJson);
2415
2492
  if (validationResult.success) {
2416
2493
  object = validationResult.value;
@@ -2443,15 +2520,20 @@ var DefaultStreamObjectResult = class {
2443
2520
  "ai.response.object": {
2444
2521
  output: () => JSON.stringify(object)
2445
2522
  },
2523
+ "ai.response.id": response.id,
2524
+ "ai.response.model": response.modelId,
2525
+ "ai.response.timestamp": response.timestamp.toISOString(),
2446
2526
  "ai.usage.promptTokens": finalUsage.promptTokens,
2447
2527
  "ai.usage.completionTokens": finalUsage.completionTokens,
2448
2528
  // deprecated
2449
2529
  "ai.finishReason": finishReason,
2450
2530
  "ai.result.object": { output: () => JSON.stringify(object) },
2451
2531
  // standardized gen-ai llm span attributes:
2532
+ "gen_ai.response.finish_reasons": [finishReason],
2533
+ "gen_ai.response.id": response.id,
2534
+ "gen_ai.response.model": response.modelId,
2452
2535
  "gen_ai.usage.input_tokens": finalUsage.promptTokens,
2453
- "gen_ai.usage.output_tokens": finalUsage.completionTokens,
2454
- "gen_ai.response.finish_reasons": [finishReason]
2536
+ "gen_ai.usage.output_tokens": finalUsage.completionTokens
2455
2537
  }
2456
2538
  })
2457
2539
  );
@@ -2475,6 +2557,10 @@ var DefaultStreamObjectResult = class {
2475
2557
  object,
2476
2558
  error,
2477
2559
  rawResponse,
2560
+ response: {
2561
+ ...response,
2562
+ headers: rawResponse == null ? void 0 : rawResponse.headers
2563
+ },
2478
2564
  warnings,
2479
2565
  experimental_providerMetadata: providerMetadata
2480
2566
  }));
@@ -2577,6 +2663,9 @@ var DefaultStreamObjectResult = class {
2577
2663
  };
2578
2664
  var experimental_streamObject = streamObject;
2579
2665
 
2666
+ // core/generate-text/generate-text.ts
2667
+ var import_provider_utils9 = require("@ai-sdk/provider-utils");
2668
+
2580
2669
  // core/prompt/prepare-tools-and-tool-choice.ts
2581
2670
  var import_ui_utils3 = require("@ai-sdk/ui-utils");
2582
2671
 
@@ -2633,7 +2722,7 @@ function toResponseMessages({
2633
2722
  }
2634
2723
 
2635
2724
  // core/generate-text/tool-call.ts
2636
- var import_provider_utils7 = require("@ai-sdk/provider-utils");
2725
+ var import_provider_utils8 = require("@ai-sdk/provider-utils");
2637
2726
  var import_ui_utils4 = require("@ai-sdk/ui-utils");
2638
2727
 
2639
2728
  // errors/invalid-tool-arguments-error.ts
@@ -2738,7 +2827,7 @@ function parseToolCall({
2738
2827
  availableTools: Object.keys(tools)
2739
2828
  });
2740
2829
  }
2741
- const parseResult = (0, import_provider_utils7.safeParseJSON)({
2830
+ const parseResult = (0, import_provider_utils8.safeParseJSON)({
2742
2831
  text: toolCall.args,
2743
2832
  schema: (0, import_ui_utils4.asSchema)(tool2.parameters)
2744
2833
  });
@@ -2758,6 +2847,7 @@ function parseToolCall({
2758
2847
  }
2759
2848
 
2760
2849
  // core/generate-text/generate-text.ts
2850
+ var originalGenerateId3 = (0, import_provider_utils9.createIdGenerator)({ prefix: "aitxt-", length: 24 });
2761
2851
  async function generateText({
2762
2852
  model,
2763
2853
  tools,
@@ -2771,6 +2861,10 @@ async function generateText({
2771
2861
  maxAutomaticRoundtrips = 0,
2772
2862
  maxToolRoundtrips = maxAutomaticRoundtrips,
2773
2863
  experimental_telemetry: telemetry,
2864
+ _internal: {
2865
+ generateId: generateId3 = originalGenerateId3,
2866
+ currentDate = () => /* @__PURE__ */ new Date()
2867
+ } = {},
2774
2868
  ...settings
2775
2869
  }) {
2776
2870
  var _a11;
@@ -2800,7 +2894,7 @@ async function generateText({
2800
2894
  }),
2801
2895
  tracer,
2802
2896
  fn: async (span) => {
2803
- var _a12, _b, _c;
2897
+ var _a12, _b, _c, _d, _e;
2804
2898
  const retry = retryWithExponentialBackoff({ maxRetries });
2805
2899
  const validatedPrompt = validatePrompt({
2806
2900
  system,
@@ -2858,6 +2952,7 @@ async function generateText({
2858
2952
  }),
2859
2953
  tracer,
2860
2954
  fn: async (span2) => {
2955
+ var _a13, _b2, _c2, _d2, _e2, _f;
2861
2956
  const result = await model.doGenerate({
2862
2957
  mode,
2863
2958
  ...callSettings,
@@ -2866,6 +2961,11 @@ async function generateText({
2866
2961
  abortSignal,
2867
2962
  headers
2868
2963
  });
2964
+ const responseData = {
2965
+ id: (_b2 = (_a13 = result.response) == null ? void 0 : _a13.id) != null ? _b2 : generateId3(),
2966
+ timestamp: (_d2 = (_c2 = result.response) == null ? void 0 : _c2.timestamp) != null ? _d2 : currentDate(),
2967
+ modelId: (_f = (_e2 = result.response) == null ? void 0 : _e2.modelId) != null ? _f : model.modelId
2968
+ };
2869
2969
  span2.setAttributes(
2870
2970
  selectTelemetryAttributes({
2871
2971
  telemetry,
@@ -2877,6 +2977,9 @@ async function generateText({
2877
2977
  "ai.response.toolCalls": {
2878
2978
  output: () => JSON.stringify(result.toolCalls)
2879
2979
  },
2980
+ "ai.response.id": responseData.id,
2981
+ "ai.response.model": responseData.modelId,
2982
+ "ai.response.timestamp": responseData.timestamp.toISOString(),
2880
2983
  "ai.usage.promptTokens": result.usage.promptTokens,
2881
2984
  "ai.usage.completionTokens": result.usage.completionTokens,
2882
2985
  // deprecated:
@@ -2889,12 +2992,14 @@ async function generateText({
2889
2992
  },
2890
2993
  // standardized gen-ai llm span attributes:
2891
2994
  "gen_ai.response.finish_reasons": [result.finishReason],
2995
+ "gen_ai.response.id": responseData.id,
2996
+ "gen_ai.response.model": responseData.modelId,
2892
2997
  "gen_ai.usage.input_tokens": result.usage.promptTokens,
2893
2998
  "gen_ai.usage.output_tokens": result.usage.completionTokens
2894
2999
  }
2895
3000
  })
2896
3001
  );
2897
- return result;
3002
+ return { ...result, response: responseData };
2898
3003
  }
2899
3004
  })
2900
3005
  );
@@ -2907,7 +3012,7 @@ async function generateText({
2907
3012
  tracer,
2908
3013
  telemetry
2909
3014
  });
2910
- const currentUsage = calculateCompletionTokenUsage(
3015
+ const currentUsage = calculateLanguageModelUsage(
2911
3016
  currentModelResponse.usage
2912
3017
  );
2913
3018
  usage.completionTokens += currentUsage.completionTokens;
@@ -2920,7 +3025,11 @@ async function generateText({
2920
3025
  finishReason: currentModelResponse.finishReason,
2921
3026
  usage: currentUsage,
2922
3027
  warnings: currentModelResponse.warnings,
2923
- logprobs: currentModelResponse.logprobs
3028
+ logprobs: currentModelResponse.logprobs,
3029
+ response: {
3030
+ ...currentModelResponse.response,
3031
+ headers: (_c = currentModelResponse.rawResponse) == null ? void 0 : _c.headers
3032
+ }
2924
3033
  });
2925
3034
  const newResponseMessages = toResponseMessages({
2926
3035
  text: currentModelResponse.text,
@@ -2967,13 +3076,16 @@ async function generateText({
2967
3076
  // Always return a string so that the caller doesn't have to check for undefined.
2968
3077
  // If they need to check if the model did not return any text,
2969
3078
  // they can check the length of the string:
2970
- text: (_c = currentModelResponse.text) != null ? _c : "",
3079
+ text: (_d = currentModelResponse.text) != null ? _d : "",
2971
3080
  toolCalls: currentToolCalls,
2972
3081
  toolResults: currentToolResults,
2973
3082
  finishReason: currentModelResponse.finishReason,
2974
3083
  usage,
2975
3084
  warnings: currentModelResponse.warnings,
2976
- rawResponse: currentModelResponse.rawResponse,
3085
+ response: {
3086
+ ...currentModelResponse.response,
3087
+ headers: (_e = currentModelResponse.rawResponse) == null ? void 0 : _e.headers
3088
+ },
2977
3089
  logprobs: currentModelResponse.logprobs,
2978
3090
  responseMessages,
2979
3091
  roundtrips,
@@ -3049,13 +3161,20 @@ var DefaultGenerateTextResult = class {
3049
3161
  this.finishReason = options.finishReason;
3050
3162
  this.usage = options.usage;
3051
3163
  this.warnings = options.warnings;
3052
- this.rawResponse = options.rawResponse;
3053
- this.logprobs = options.logprobs;
3164
+ this.response = options.response;
3054
3165
  this.responseMessages = options.responseMessages;
3055
3166
  this.roundtrips = options.roundtrips;
3056
3167
  this.experimental_providerMetadata = options.providerMetadata;
3168
+ this.rawResponse = {
3169
+ headers: options.response.headers
3170
+ };
3171
+ this.logprobs = options.logprobs;
3057
3172
  }
3058
3173
  };
3174
+ var experimental_generateText = generateText;
3175
+
3176
+ // core/generate-text/stream-text.ts
3177
+ var import_provider_utils10 = require("@ai-sdk/provider-utils");
3059
3178
 
3060
3179
  // core/util/create-stitchable-stream.ts
3061
3180
  function createStitchableStream() {
@@ -3230,6 +3349,7 @@ function runToolsTransformation({
3230
3349
  const chunkType = chunk.type;
3231
3350
  switch (chunkType) {
3232
3351
  case "text-delta":
3352
+ case "response-metadata":
3233
3353
  case "error": {
3234
3354
  controller.enqueue(chunk);
3235
3355
  break;
@@ -3350,7 +3470,7 @@ function runToolsTransformation({
3350
3470
  type: "finish",
3351
3471
  finishReason: chunk.finishReason,
3352
3472
  logprobs: chunk.logprobs,
3353
- usage: calculateCompletionTokenUsage(chunk.usage),
3473
+ usage: calculateLanguageModelUsage(chunk.usage),
3354
3474
  experimental_providerMetadata: chunk.providerMetadata
3355
3475
  });
3356
3476
  break;
@@ -3396,6 +3516,7 @@ function runToolsTransformation({
3396
3516
  }
3397
3517
 
3398
3518
  // core/generate-text/stream-text.ts
3519
+ var originalGenerateId4 = (0, import_provider_utils10.createIdGenerator)({ prefix: "aitxt-", length: 24 });
3399
3520
  async function streamText({
3400
3521
  model,
3401
3522
  tools,
@@ -3411,7 +3532,11 @@ async function streamText({
3411
3532
  experimental_toolCallStreaming: toolCallStreaming = false,
3412
3533
  onChunk,
3413
3534
  onFinish,
3414
- _internal: { now: now2 = now } = {},
3535
+ _internal: {
3536
+ now: now2 = now,
3537
+ generateId: generateId3 = originalGenerateId4,
3538
+ currentDate = () => /* @__PURE__ */ new Date()
3539
+ } = {},
3415
3540
  ...settings
3416
3541
  }) {
3417
3542
  var _a11;
@@ -3537,7 +3662,10 @@ async function streamText({
3537
3662
  maxToolRoundtrips,
3538
3663
  startRoundtrip,
3539
3664
  promptMessages,
3540
- now: now2
3665
+ modelId: model.modelId,
3666
+ now: now2,
3667
+ currentDate,
3668
+ generateId: generateId3
3541
3669
  });
3542
3670
  }
3543
3671
  });
@@ -3556,7 +3684,10 @@ var DefaultStreamTextResult = class {
3556
3684
  maxToolRoundtrips,
3557
3685
  startRoundtrip,
3558
3686
  promptMessages,
3559
- now: now2
3687
+ modelId,
3688
+ now: now2,
3689
+ currentDate,
3690
+ generateId: generateId3
3560
3691
  }) {
3561
3692
  this.warnings = warnings;
3562
3693
  this.rawResponse = rawResponse;
@@ -3575,6 +3706,8 @@ var DefaultStreamTextResult = class {
3575
3706
  promise: providerMetadataPromise
3576
3707
  } = createResolvablePromise();
3577
3708
  this.experimental_providerMetadata = providerMetadataPromise;
3709
+ const { resolve: resolveResponse, promise: responsePromise } = createResolvablePromise();
3710
+ this.response = responsePromise;
3578
3711
  const {
3579
3712
  stream: stitchableStream,
3580
3713
  addStream,
@@ -3606,10 +3739,16 @@ var DefaultStreamTextResult = class {
3606
3739
  let roundtripFirstChunk = true;
3607
3740
  let roundtripText = "";
3608
3741
  let roundtripLogProbs;
3742
+ let roundtripResponse = {
3743
+ id: generateId3(),
3744
+ timestamp: currentDate(),
3745
+ modelId
3746
+ };
3609
3747
  addStream(
3610
3748
  stream2.pipeThrough(
3611
3749
  new TransformStream({
3612
3750
  async transform(chunk, controller) {
3751
+ var _a11, _b, _c;
3613
3752
  if (roundtripFirstChunk) {
3614
3753
  const msToFirstChunk = now2() - startTimestamp;
3615
3754
  roundtripFirstChunk = false;
@@ -3629,22 +3768,33 @@ var DefaultStreamTextResult = class {
3629
3768
  }
3630
3769
  const chunkType = chunk.type;
3631
3770
  switch (chunkType) {
3632
- case "text-delta":
3771
+ case "text-delta": {
3633
3772
  controller.enqueue(chunk);
3634
3773
  roundtripText += chunk.textDelta;
3635
3774
  await (onChunk == null ? void 0 : onChunk({ chunk }));
3636
3775
  break;
3637
- case "tool-call":
3776
+ }
3777
+ case "tool-call": {
3638
3778
  controller.enqueue(chunk);
3639
3779
  roundtripToolCalls.push(chunk);
3640
3780
  await (onChunk == null ? void 0 : onChunk({ chunk }));
3641
3781
  break;
3642
- case "tool-result":
3782
+ }
3783
+ case "tool-result": {
3643
3784
  controller.enqueue(chunk);
3644
3785
  roundtripToolResults.push(chunk);
3645
3786
  await (onChunk == null ? void 0 : onChunk({ chunk }));
3646
3787
  break;
3647
- case "finish":
3788
+ }
3789
+ case "response-metadata": {
3790
+ roundtripResponse = {
3791
+ id: (_a11 = chunk.id) != null ? _a11 : roundtripResponse.id,
3792
+ timestamp: (_b = chunk.timestamp) != null ? _b : roundtripResponse.timestamp,
3793
+ modelId: (_c = chunk.modelId) != null ? _c : roundtripResponse.modelId
3794
+ };
3795
+ break;
3796
+ }
3797
+ case "finish": {
3648
3798
  roundtripUsage = chunk.usage;
3649
3799
  roundtripFinishReason = chunk.finishReason;
3650
3800
  roundtripProviderMetadata = chunk.experimental_providerMetadata;
@@ -3656,16 +3806,18 @@ var DefaultStreamTextResult = class {
3656
3806
  "ai.response.avgCompletionTokensPerSecond": 1e3 * roundtripUsage.completionTokens / msToFinish
3657
3807
  });
3658
3808
  break;
3809
+ }
3659
3810
  case "tool-call-streaming-start":
3660
3811
  case "tool-call-delta": {
3661
3812
  controller.enqueue(chunk);
3662
3813
  await (onChunk == null ? void 0 : onChunk({ chunk }));
3663
3814
  break;
3664
3815
  }
3665
- case "error":
3816
+ case "error": {
3666
3817
  controller.enqueue(chunk);
3667
3818
  roundtripFinishReason = "error";
3668
3819
  break;
3820
+ }
3669
3821
  default: {
3670
3822
  const exhaustiveCheck = chunkType;
3671
3823
  throw new Error(`Unknown chunk type: ${exhaustiveCheck}`);
@@ -3679,7 +3831,8 @@ var DefaultStreamTextResult = class {
3679
3831
  finishReason: roundtripFinishReason,
3680
3832
  usage: roundtripUsage,
3681
3833
  experimental_providerMetadata: roundtripProviderMetadata,
3682
- logprobs: roundtripLogProbs
3834
+ logprobs: roundtripLogProbs,
3835
+ response: roundtripResponse
3683
3836
  });
3684
3837
  const telemetryToolCalls = roundtripToolCalls.length > 0 ? JSON.stringify(roundtripToolCalls) : void 0;
3685
3838
  try {
@@ -3692,6 +3845,9 @@ var DefaultStreamTextResult = class {
3692
3845
  "ai.response.toolCalls": {
3693
3846
  output: () => telemetryToolCalls
3694
3847
  },
3848
+ "ai.response.id": roundtripResponse.id,
3849
+ "ai.response.model": roundtripResponse.modelId,
3850
+ "ai.response.timestamp": roundtripResponse.timestamp.toISOString(),
3695
3851
  "ai.usage.promptTokens": roundtripUsage.promptTokens,
3696
3852
  "ai.usage.completionTokens": roundtripUsage.completionTokens,
3697
3853
  // deprecated
@@ -3702,6 +3858,8 @@ var DefaultStreamTextResult = class {
3702
3858
  },
3703
3859
  // standardized gen-ai llm span attributes:
3704
3860
  "gen_ai.response.finish_reasons": [roundtripFinishReason],
3861
+ "gen_ai.response.id": roundtripResponse.id,
3862
+ "gen_ai.response.model": roundtripResponse.modelId,
3705
3863
  "gen_ai.usage.input_tokens": roundtripUsage.promptTokens,
3706
3864
  "gen_ai.usage.output_tokens": roundtripUsage.completionTokens
3707
3865
  }
@@ -3757,7 +3915,8 @@ var DefaultStreamTextResult = class {
3757
3915
  finishReason: roundtripFinishReason,
3758
3916
  usage: combinedUsage,
3759
3917
  experimental_providerMetadata: roundtripProviderMetadata,
3760
- logprobs: roundtripLogProbs
3918
+ logprobs: roundtripLogProbs,
3919
+ response: roundtripResponse
3761
3920
  });
3762
3921
  closeStitchableStream();
3763
3922
  rootSpan.setAttributes(
@@ -3786,6 +3945,10 @@ var DefaultStreamTextResult = class {
3786
3945
  resolveToolCalls(roundtripToolCalls);
3787
3946
  resolveProviderMetadata(roundtripProviderMetadata);
3788
3947
  resolveToolResults(roundtripToolResults);
3948
+ resolveResponse({
3949
+ ...roundtripResponse,
3950
+ headers: rawResponse == null ? void 0 : rawResponse.headers
3951
+ });
3789
3952
  await (onFinish == null ? void 0 : onFinish({
3790
3953
  finishReason: roundtripFinishReason,
3791
3954
  usage: combinedUsage,
@@ -3797,6 +3960,10 @@ var DefaultStreamTextResult = class {
3797
3960
  // The type exposed to the users will be correctly inferred.
3798
3961
  toolResults: roundtripToolResults,
3799
3962
  rawResponse,
3963
+ response: {
3964
+ ...roundtripResponse,
3965
+ headers: rawResponse == null ? void 0 : rawResponse.headers
3966
+ },
3800
3967
  warnings,
3801
3968
  experimental_providerMetadata: roundtripProviderMetadata
3802
3969
  }));
@@ -4039,6 +4206,7 @@ var DefaultStreamTextResult = class {
4039
4206
  });
4040
4207
  }
4041
4208
  };
4209
+ var experimental_streamText = streamText;
4042
4210
 
4043
4211
  // core/prompt/attachments-to-parts.ts
4044
4212
  function attachmentsToParts(attachments) {
@@ -5411,8 +5579,8 @@ var StreamingTextResponse = class extends Response {
5411
5579
  };
5412
5580
 
5413
5581
  // streams/index.ts
5414
- var generateId2 = import_provider_utils8.generateId;
5415
- var nanoid = import_provider_utils8.generateId;
5582
+ var generateId2 = import_provider_utils11.generateId;
5583
+ var nanoid = import_provider_utils11.generateId;
5416
5584
  // Annotate the CommonJS export names for ESM import in node:
5417
5585
  0 && (module.exports = {
5418
5586
  AISDKError,
@@ -5468,7 +5636,9 @@ var nanoid = import_provider_utils8.generateId;
5468
5636
  experimental_createProviderRegistry,
5469
5637
  experimental_customProvider,
5470
5638
  experimental_generateObject,
5639
+ experimental_generateText,
5471
5640
  experimental_streamObject,
5641
+ experimental_streamText,
5472
5642
  formatStreamPart,
5473
5643
  generateId,
5474
5644
  generateObject,