ai 3.3.26 → 3.3.28

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js CHANGED
@@ -73,7 +73,10 @@ __export(streams_exports, {
73
73
  experimental_createProviderRegistry: () => experimental_createProviderRegistry,
74
74
  experimental_customProvider: () => experimental_customProvider,
75
75
  experimental_generateObject: () => experimental_generateObject,
76
+ experimental_generateText: () => experimental_generateText,
76
77
  experimental_streamObject: () => experimental_streamObject,
78
+ experimental_streamText: () => experimental_streamText,
79
+ experimental_wrapLanguageModel: () => experimental_wrapLanguageModel,
77
80
  formatStreamPart: () => import_ui_utils10.formatStreamPart,
78
81
  generateId: () => generateId2,
79
82
  generateObject: () => generateObject,
@@ -92,7 +95,7 @@ __export(streams_exports, {
92
95
  });
93
96
  module.exports = __toCommonJS(streams_exports);
94
97
  var import_ui_utils10 = require("@ai-sdk/ui-utils");
95
- var import_provider_utils8 = require("@ai-sdk/provider-utils");
98
+ var import_provider_utils11 = require("@ai-sdk/provider-utils");
96
99
 
97
100
  // core/index.ts
98
101
  var import_ui_utils6 = require("@ai-sdk/ui-utils");
@@ -1052,7 +1055,13 @@ function convertToLanguageModelMessage(message, downloadedImages) {
1052
1055
  content: message.content.filter(
1053
1056
  // remove empty text parts:
1054
1057
  (part) => part.type !== "text" || part.text !== ""
1055
- ),
1058
+ ).map((part) => {
1059
+ const { experimental_providerMetadata, ...rest } = part;
1060
+ return {
1061
+ ...rest,
1062
+ providerMetadata: experimental_providerMetadata
1063
+ };
1064
+ }),
1056
1065
  providerMetadata: message.experimental_providerMetadata
1057
1066
  };
1058
1067
  }
@@ -1385,8 +1394,8 @@ function validatePrompt(prompt) {
1385
1394
  throw new Error("unreachable");
1386
1395
  }
1387
1396
 
1388
- // core/types/token-usage.ts
1389
- function calculateCompletionTokenUsage(usage) {
1397
+ // core/types/usage.ts
1398
+ function calculateLanguageModelUsage(usage) {
1390
1399
  return {
1391
1400
  promptTokens: usage.promptTokens,
1392
1401
  completionTokens: usage.completionTokens,
@@ -1716,6 +1725,7 @@ function validateObjectGenerationInput({
1716
1725
  }
1717
1726
 
1718
1727
  // core/generate-object/generate-object.ts
1728
+ var originalGenerateId = (0, import_provider_utils6.createIdGenerator)({ prefix: "aiobj-", length: 24 });
1719
1729
  async function generateObject({
1720
1730
  model,
1721
1731
  schema: inputSchema,
@@ -1730,6 +1740,10 @@ async function generateObject({
1730
1740
  abortSignal,
1731
1741
  headers,
1732
1742
  experimental_telemetry: telemetry,
1743
+ _internal: {
1744
+ generateId: generateId3 = originalGenerateId,
1745
+ currentDate = () => /* @__PURE__ */ new Date()
1746
+ } = {},
1733
1747
  ...settings
1734
1748
  }) {
1735
1749
  var _a11;
@@ -1783,6 +1797,7 @@ async function generateObject({
1783
1797
  let usage;
1784
1798
  let warnings;
1785
1799
  let rawResponse;
1800
+ let response;
1786
1801
  let logprobs;
1787
1802
  let providerMetadata;
1788
1803
  switch (mode) {
@@ -1831,6 +1846,7 @@ async function generateObject({
1831
1846
  }),
1832
1847
  tracer,
1833
1848
  fn: async (span2) => {
1849
+ var _a12, _b, _c, _d, _e, _f;
1834
1850
  const result2 = await model.doGenerate({
1835
1851
  mode: {
1836
1852
  type: "object-json",
@@ -1847,12 +1863,20 @@ async function generateObject({
1847
1863
  if (result2.text === void 0) {
1848
1864
  throw new NoObjectGeneratedError();
1849
1865
  }
1866
+ const responseData = {
1867
+ id: (_b = (_a12 = result2.response) == null ? void 0 : _a12.id) != null ? _b : generateId3(),
1868
+ timestamp: (_d = (_c = result2.response) == null ? void 0 : _c.timestamp) != null ? _d : currentDate(),
1869
+ modelId: (_f = (_e = result2.response) == null ? void 0 : _e.modelId) != null ? _f : model.modelId
1870
+ };
1850
1871
  span2.setAttributes(
1851
1872
  selectTelemetryAttributes({
1852
1873
  telemetry,
1853
1874
  attributes: {
1854
1875
  "ai.response.finishReason": result2.finishReason,
1855
1876
  "ai.response.object": { output: () => result2.text },
1877
+ "ai.response.id": responseData.id,
1878
+ "ai.response.model": responseData.modelId,
1879
+ "ai.response.timestamp": responseData.timestamp.toISOString(),
1856
1880
  "ai.usage.promptTokens": result2.usage.promptTokens,
1857
1881
  "ai.usage.completionTokens": result2.usage.completionTokens,
1858
1882
  // deprecated:
@@ -1860,12 +1884,14 @@ async function generateObject({
1860
1884
  "ai.result.object": { output: () => result2.text },
1861
1885
  // standardized gen-ai llm span attributes:
1862
1886
  "gen_ai.response.finish_reasons": [result2.finishReason],
1887
+ "gen_ai.response.id": responseData.id,
1888
+ "gen_ai.response.model": responseData.modelId,
1863
1889
  "gen_ai.usage.prompt_tokens": result2.usage.promptTokens,
1864
1890
  "gen_ai.usage.completion_tokens": result2.usage.completionTokens
1865
1891
  }
1866
1892
  })
1867
1893
  );
1868
- return { ...result2, objectText: result2.text };
1894
+ return { ...result2, objectText: result2.text, responseData };
1869
1895
  }
1870
1896
  })
1871
1897
  );
@@ -1876,6 +1902,7 @@ async function generateObject({
1876
1902
  rawResponse = generateResult.rawResponse;
1877
1903
  logprobs = generateResult.logprobs;
1878
1904
  providerMetadata = generateResult.providerMetadata;
1905
+ response = generateResult.responseData;
1879
1906
  break;
1880
1907
  }
1881
1908
  case "tool": {
@@ -1920,7 +1947,7 @@ async function generateObject({
1920
1947
  }),
1921
1948
  tracer,
1922
1949
  fn: async (span2) => {
1923
- var _a12, _b;
1950
+ var _a12, _b, _c, _d, _e, _f, _g, _h;
1924
1951
  const result2 = await model.doGenerate({
1925
1952
  mode: {
1926
1953
  type: "object-tool",
@@ -1941,12 +1968,20 @@ async function generateObject({
1941
1968
  if (objectText === void 0) {
1942
1969
  throw new NoObjectGeneratedError();
1943
1970
  }
1971
+ const responseData = {
1972
+ id: (_d = (_c = result2.response) == null ? void 0 : _c.id) != null ? _d : generateId3(),
1973
+ timestamp: (_f = (_e = result2.response) == null ? void 0 : _e.timestamp) != null ? _f : currentDate(),
1974
+ modelId: (_h = (_g = result2.response) == null ? void 0 : _g.modelId) != null ? _h : model.modelId
1975
+ };
1944
1976
  span2.setAttributes(
1945
1977
  selectTelemetryAttributes({
1946
1978
  telemetry,
1947
1979
  attributes: {
1948
1980
  "ai.response.finishReason": result2.finishReason,
1949
1981
  "ai.response.object": { output: () => objectText },
1982
+ "ai.response.id": responseData.id,
1983
+ "ai.response.model": responseData.modelId,
1984
+ "ai.response.timestamp": responseData.timestamp.toISOString(),
1950
1985
  "ai.usage.promptTokens": result2.usage.promptTokens,
1951
1986
  "ai.usage.completionTokens": result2.usage.completionTokens,
1952
1987
  // deprecated:
@@ -1954,12 +1989,14 @@ async function generateObject({
1954
1989
  "ai.result.object": { output: () => objectText },
1955
1990
  // standardized gen-ai llm span attributes:
1956
1991
  "gen_ai.response.finish_reasons": [result2.finishReason],
1992
+ "gen_ai.response.id": responseData.id,
1993
+ "gen_ai.response.model": responseData.modelId,
1957
1994
  "gen_ai.usage.input_tokens": result2.usage.promptTokens,
1958
1995
  "gen_ai.usage.output_tokens": result2.usage.completionTokens
1959
1996
  }
1960
1997
  })
1961
1998
  );
1962
- return { ...result2, objectText };
1999
+ return { ...result2, objectText, responseData };
1963
2000
  }
1964
2001
  })
1965
2002
  );
@@ -1970,6 +2007,7 @@ async function generateObject({
1970
2007
  rawResponse = generateResult.rawResponse;
1971
2008
  logprobs = generateResult.logprobs;
1972
2009
  providerMetadata = generateResult.providerMetadata;
2010
+ response = generateResult.responseData;
1973
2011
  break;
1974
2012
  }
1975
2013
  case void 0: {
@@ -2013,9 +2051,12 @@ async function generateObject({
2013
2051
  return new DefaultGenerateObjectResult({
2014
2052
  object: validationResult.value,
2015
2053
  finishReason,
2016
- usage: calculateCompletionTokenUsage(usage),
2054
+ usage: calculateLanguageModelUsage(usage),
2017
2055
  warnings,
2018
- rawResponse,
2056
+ response: {
2057
+ ...response,
2058
+ headers: rawResponse == null ? void 0 : rawResponse.headers
2059
+ },
2019
2060
  logprobs,
2020
2061
  providerMetadata
2021
2062
  });
@@ -2028,9 +2069,12 @@ var DefaultGenerateObjectResult = class {
2028
2069
  this.finishReason = options.finishReason;
2029
2070
  this.usage = options.usage;
2030
2071
  this.warnings = options.warnings;
2031
- this.rawResponse = options.rawResponse;
2032
- this.logprobs = options.logprobs;
2033
2072
  this.experimental_providerMetadata = options.providerMetadata;
2073
+ this.response = options.response;
2074
+ this.rawResponse = {
2075
+ headers: options.response.headers
2076
+ };
2077
+ this.logprobs = options.logprobs;
2034
2078
  }
2035
2079
  toJsonResponse(init) {
2036
2080
  var _a11;
@@ -2107,6 +2151,8 @@ function now() {
2107
2151
  }
2108
2152
 
2109
2153
  // core/generate-object/stream-object.ts
2154
+ var import_provider_utils7 = require("@ai-sdk/provider-utils");
2155
+ var originalGenerateId2 = (0, import_provider_utils7.createIdGenerator)({ prefix: "aiobj-", length: 24 });
2110
2156
  async function streamObject({
2111
2157
  model,
2112
2158
  schema: inputSchema,
@@ -2122,7 +2168,11 @@ async function streamObject({
2122
2168
  headers,
2123
2169
  experimental_telemetry: telemetry,
2124
2170
  onFinish,
2125
- _internal: { now: now2 = now } = {},
2171
+ _internal: {
2172
+ generateId: generateId3 = originalGenerateId2,
2173
+ currentDate = () => /* @__PURE__ */ new Date(),
2174
+ now: now2 = now
2175
+ } = {},
2126
2176
  ...settings
2127
2177
  }) {
2128
2178
  var _a11;
@@ -2206,6 +2256,7 @@ async function streamObject({
2206
2256
  case "text-delta":
2207
2257
  controller.enqueue(chunk.textDelta);
2208
2258
  break;
2259
+ case "response-metadata":
2209
2260
  case "finish":
2210
2261
  case "error":
2211
2262
  controller.enqueue(chunk);
@@ -2246,6 +2297,7 @@ async function streamObject({
2246
2297
  case "tool-call-delta":
2247
2298
  controller.enqueue(chunk.argsTextDelta);
2248
2299
  break;
2300
+ case "response-metadata":
2249
2301
  case "finish":
2250
2302
  case "error":
2251
2303
  controller.enqueue(chunk);
@@ -2317,7 +2369,10 @@ async function streamObject({
2317
2369
  doStreamSpan,
2318
2370
  telemetry,
2319
2371
  startTimestampMs,
2320
- now: now2
2372
+ modelId: model.modelId,
2373
+ now: now2,
2374
+ currentDate,
2375
+ generateId: generateId3
2321
2376
  });
2322
2377
  }
2323
2378
  });
@@ -2333,7 +2388,10 @@ var DefaultStreamObjectResult = class {
2333
2388
  doStreamSpan,
2334
2389
  telemetry,
2335
2390
  startTimestampMs,
2336
- now: now2
2391
+ modelId,
2392
+ now: now2,
2393
+ currentDate,
2394
+ generateId: generateId3
2337
2395
  }) {
2338
2396
  this.warnings = warnings;
2339
2397
  this.rawResponse = rawResponse;
@@ -2341,6 +2399,8 @@ var DefaultStreamObjectResult = class {
2341
2399
  this.objectPromise = new DelayedPromise();
2342
2400
  const { resolve: resolveUsage, promise: usagePromise } = createResolvablePromise();
2343
2401
  this.usage = usagePromise;
2402
+ const { resolve: resolveResponse, promise: responsePromise } = createResolvablePromise();
2403
+ this.response = responsePromise;
2344
2404
  const {
2345
2405
  resolve: resolveProviderMetadata,
2346
2406
  promise: providerMetadataPromise
@@ -2353,6 +2413,11 @@ var DefaultStreamObjectResult = class {
2353
2413
  let error;
2354
2414
  let accumulatedText = "";
2355
2415
  let textDelta = "";
2416
+ let response = {
2417
+ id: generateId3(),
2418
+ timestamp: currentDate(),
2419
+ modelId
2420
+ };
2356
2421
  let latestObjectJson = void 0;
2357
2422
  let latestObject = void 0;
2358
2423
  let isFirstChunk = true;
@@ -2361,6 +2426,7 @@ var DefaultStreamObjectResult = class {
2361
2426
  this.originalStream = stream.pipeThrough(
2362
2427
  new TransformStream({
2363
2428
  async transform(chunk, controller) {
2429
+ var _a11, _b, _c;
2364
2430
  if (isFirstChunk) {
2365
2431
  const msToFirstChunk = now2() - startTimestampMs;
2366
2432
  isFirstChunk = false;
@@ -2401,16 +2467,28 @@ var DefaultStreamObjectResult = class {
2401
2467
  return;
2402
2468
  }
2403
2469
  switch (chunk.type) {
2470
+ case "response-metadata": {
2471
+ response = {
2472
+ id: (_a11 = chunk.id) != null ? _a11 : response.id,
2473
+ timestamp: (_b = chunk.timestamp) != null ? _b : response.timestamp,
2474
+ modelId: (_c = chunk.modelId) != null ? _c : response.modelId
2475
+ };
2476
+ break;
2477
+ }
2404
2478
  case "finish": {
2405
2479
  if (textDelta !== "") {
2406
2480
  controller.enqueue({ type: "text-delta", textDelta });
2407
2481
  }
2408
2482
  finishReason = chunk.finishReason;
2409
- usage = calculateCompletionTokenUsage(chunk.usage);
2483
+ usage = calculateLanguageModelUsage(chunk.usage);
2410
2484
  providerMetadata = chunk.providerMetadata;
2411
- controller.enqueue({ ...chunk, usage });
2485
+ controller.enqueue({ ...chunk, usage, response });
2412
2486
  resolveUsage(usage);
2413
2487
  resolveProviderMetadata(providerMetadata);
2488
+ resolveResponse({
2489
+ ...response,
2490
+ headers: rawResponse == null ? void 0 : rawResponse.headers
2491
+ });
2414
2492
  const validationResult = outputStrategy.validateFinalResult(latestObjectJson);
2415
2493
  if (validationResult.success) {
2416
2494
  object = validationResult.value;
@@ -2443,15 +2521,20 @@ var DefaultStreamObjectResult = class {
2443
2521
  "ai.response.object": {
2444
2522
  output: () => JSON.stringify(object)
2445
2523
  },
2524
+ "ai.response.id": response.id,
2525
+ "ai.response.model": response.modelId,
2526
+ "ai.response.timestamp": response.timestamp.toISOString(),
2446
2527
  "ai.usage.promptTokens": finalUsage.promptTokens,
2447
2528
  "ai.usage.completionTokens": finalUsage.completionTokens,
2448
2529
  // deprecated
2449
2530
  "ai.finishReason": finishReason,
2450
2531
  "ai.result.object": { output: () => JSON.stringify(object) },
2451
2532
  // standardized gen-ai llm span attributes:
2533
+ "gen_ai.response.finish_reasons": [finishReason],
2534
+ "gen_ai.response.id": response.id,
2535
+ "gen_ai.response.model": response.modelId,
2452
2536
  "gen_ai.usage.input_tokens": finalUsage.promptTokens,
2453
- "gen_ai.usage.output_tokens": finalUsage.completionTokens,
2454
- "gen_ai.response.finish_reasons": [finishReason]
2537
+ "gen_ai.usage.output_tokens": finalUsage.completionTokens
2455
2538
  }
2456
2539
  })
2457
2540
  );
@@ -2475,6 +2558,10 @@ var DefaultStreamObjectResult = class {
2475
2558
  object,
2476
2559
  error,
2477
2560
  rawResponse,
2561
+ response: {
2562
+ ...response,
2563
+ headers: rawResponse == null ? void 0 : rawResponse.headers
2564
+ },
2478
2565
  warnings,
2479
2566
  experimental_providerMetadata: providerMetadata
2480
2567
  }));
@@ -2577,6 +2664,9 @@ var DefaultStreamObjectResult = class {
2577
2664
  };
2578
2665
  var experimental_streamObject = streamObject;
2579
2666
 
2667
+ // core/generate-text/generate-text.ts
2668
+ var import_provider_utils9 = require("@ai-sdk/provider-utils");
2669
+
2580
2670
  // core/prompt/prepare-tools-and-tool-choice.ts
2581
2671
  var import_ui_utils3 = require("@ai-sdk/ui-utils");
2582
2672
 
@@ -2633,7 +2723,7 @@ function toResponseMessages({
2633
2723
  }
2634
2724
 
2635
2725
  // core/generate-text/tool-call.ts
2636
- var import_provider_utils7 = require("@ai-sdk/provider-utils");
2726
+ var import_provider_utils8 = require("@ai-sdk/provider-utils");
2637
2727
  var import_ui_utils4 = require("@ai-sdk/ui-utils");
2638
2728
 
2639
2729
  // errors/invalid-tool-arguments-error.ts
@@ -2738,7 +2828,7 @@ function parseToolCall({
2738
2828
  availableTools: Object.keys(tools)
2739
2829
  });
2740
2830
  }
2741
- const parseResult = (0, import_provider_utils7.safeParseJSON)({
2831
+ const parseResult = (0, import_provider_utils8.safeParseJSON)({
2742
2832
  text: toolCall.args,
2743
2833
  schema: (0, import_ui_utils4.asSchema)(tool2.parameters)
2744
2834
  });
@@ -2758,6 +2848,7 @@ function parseToolCall({
2758
2848
  }
2759
2849
 
2760
2850
  // core/generate-text/generate-text.ts
2851
+ var originalGenerateId3 = (0, import_provider_utils9.createIdGenerator)({ prefix: "aitxt-", length: 24 });
2761
2852
  async function generateText({
2762
2853
  model,
2763
2854
  tools,
@@ -2771,6 +2862,10 @@ async function generateText({
2771
2862
  maxAutomaticRoundtrips = 0,
2772
2863
  maxToolRoundtrips = maxAutomaticRoundtrips,
2773
2864
  experimental_telemetry: telemetry,
2865
+ _internal: {
2866
+ generateId: generateId3 = originalGenerateId3,
2867
+ currentDate = () => /* @__PURE__ */ new Date()
2868
+ } = {},
2774
2869
  ...settings
2775
2870
  }) {
2776
2871
  var _a11;
@@ -2800,7 +2895,7 @@ async function generateText({
2800
2895
  }),
2801
2896
  tracer,
2802
2897
  fn: async (span) => {
2803
- var _a12, _b, _c;
2898
+ var _a12, _b, _c, _d, _e;
2804
2899
  const retry = retryWithExponentialBackoff({ maxRetries });
2805
2900
  const validatedPrompt = validatePrompt({
2806
2901
  system,
@@ -2858,6 +2953,7 @@ async function generateText({
2858
2953
  }),
2859
2954
  tracer,
2860
2955
  fn: async (span2) => {
2956
+ var _a13, _b2, _c2, _d2, _e2, _f;
2861
2957
  const result = await model.doGenerate({
2862
2958
  mode,
2863
2959
  ...callSettings,
@@ -2866,6 +2962,11 @@ async function generateText({
2866
2962
  abortSignal,
2867
2963
  headers
2868
2964
  });
2965
+ const responseData = {
2966
+ id: (_b2 = (_a13 = result.response) == null ? void 0 : _a13.id) != null ? _b2 : generateId3(),
2967
+ timestamp: (_d2 = (_c2 = result.response) == null ? void 0 : _c2.timestamp) != null ? _d2 : currentDate(),
2968
+ modelId: (_f = (_e2 = result.response) == null ? void 0 : _e2.modelId) != null ? _f : model.modelId
2969
+ };
2869
2970
  span2.setAttributes(
2870
2971
  selectTelemetryAttributes({
2871
2972
  telemetry,
@@ -2877,6 +2978,9 @@ async function generateText({
2877
2978
  "ai.response.toolCalls": {
2878
2979
  output: () => JSON.stringify(result.toolCalls)
2879
2980
  },
2981
+ "ai.response.id": responseData.id,
2982
+ "ai.response.model": responseData.modelId,
2983
+ "ai.response.timestamp": responseData.timestamp.toISOString(),
2880
2984
  "ai.usage.promptTokens": result.usage.promptTokens,
2881
2985
  "ai.usage.completionTokens": result.usage.completionTokens,
2882
2986
  // deprecated:
@@ -2889,12 +2993,14 @@ async function generateText({
2889
2993
  },
2890
2994
  // standardized gen-ai llm span attributes:
2891
2995
  "gen_ai.response.finish_reasons": [result.finishReason],
2996
+ "gen_ai.response.id": responseData.id,
2997
+ "gen_ai.response.model": responseData.modelId,
2892
2998
  "gen_ai.usage.input_tokens": result.usage.promptTokens,
2893
2999
  "gen_ai.usage.output_tokens": result.usage.completionTokens
2894
3000
  }
2895
3001
  })
2896
3002
  );
2897
- return result;
3003
+ return { ...result, response: responseData };
2898
3004
  }
2899
3005
  })
2900
3006
  );
@@ -2907,7 +3013,7 @@ async function generateText({
2907
3013
  tracer,
2908
3014
  telemetry
2909
3015
  });
2910
- const currentUsage = calculateCompletionTokenUsage(
3016
+ const currentUsage = calculateLanguageModelUsage(
2911
3017
  currentModelResponse.usage
2912
3018
  );
2913
3019
  usage.completionTokens += currentUsage.completionTokens;
@@ -2920,7 +3026,11 @@ async function generateText({
2920
3026
  finishReason: currentModelResponse.finishReason,
2921
3027
  usage: currentUsage,
2922
3028
  warnings: currentModelResponse.warnings,
2923
- logprobs: currentModelResponse.logprobs
3029
+ logprobs: currentModelResponse.logprobs,
3030
+ response: {
3031
+ ...currentModelResponse.response,
3032
+ headers: (_c = currentModelResponse.rawResponse) == null ? void 0 : _c.headers
3033
+ }
2924
3034
  });
2925
3035
  const newResponseMessages = toResponseMessages({
2926
3036
  text: currentModelResponse.text,
@@ -2967,13 +3077,16 @@ async function generateText({
2967
3077
  // Always return a string so that the caller doesn't have to check for undefined.
2968
3078
  // If they need to check if the model did not return any text,
2969
3079
  // they can check the length of the string:
2970
- text: (_c = currentModelResponse.text) != null ? _c : "",
3080
+ text: (_d = currentModelResponse.text) != null ? _d : "",
2971
3081
  toolCalls: currentToolCalls,
2972
3082
  toolResults: currentToolResults,
2973
3083
  finishReason: currentModelResponse.finishReason,
2974
3084
  usage,
2975
3085
  warnings: currentModelResponse.warnings,
2976
- rawResponse: currentModelResponse.rawResponse,
3086
+ response: {
3087
+ ...currentModelResponse.response,
3088
+ headers: (_e = currentModelResponse.rawResponse) == null ? void 0 : _e.headers
3089
+ },
2977
3090
  logprobs: currentModelResponse.logprobs,
2978
3091
  responseMessages,
2979
3092
  roundtrips,
@@ -3049,13 +3162,20 @@ var DefaultGenerateTextResult = class {
3049
3162
  this.finishReason = options.finishReason;
3050
3163
  this.usage = options.usage;
3051
3164
  this.warnings = options.warnings;
3052
- this.rawResponse = options.rawResponse;
3053
- this.logprobs = options.logprobs;
3165
+ this.response = options.response;
3054
3166
  this.responseMessages = options.responseMessages;
3055
3167
  this.roundtrips = options.roundtrips;
3056
3168
  this.experimental_providerMetadata = options.providerMetadata;
3169
+ this.rawResponse = {
3170
+ headers: options.response.headers
3171
+ };
3172
+ this.logprobs = options.logprobs;
3057
3173
  }
3058
3174
  };
3175
+ var experimental_generateText = generateText;
3176
+
3177
+ // core/generate-text/stream-text.ts
3178
+ var import_provider_utils10 = require("@ai-sdk/provider-utils");
3059
3179
 
3060
3180
  // core/util/create-stitchable-stream.ts
3061
3181
  function createStitchableStream() {
@@ -3230,6 +3350,7 @@ function runToolsTransformation({
3230
3350
  const chunkType = chunk.type;
3231
3351
  switch (chunkType) {
3232
3352
  case "text-delta":
3353
+ case "response-metadata":
3233
3354
  case "error": {
3234
3355
  controller.enqueue(chunk);
3235
3356
  break;
@@ -3350,7 +3471,7 @@ function runToolsTransformation({
3350
3471
  type: "finish",
3351
3472
  finishReason: chunk.finishReason,
3352
3473
  logprobs: chunk.logprobs,
3353
- usage: calculateCompletionTokenUsage(chunk.usage),
3474
+ usage: calculateLanguageModelUsage(chunk.usage),
3354
3475
  experimental_providerMetadata: chunk.providerMetadata
3355
3476
  });
3356
3477
  break;
@@ -3396,6 +3517,7 @@ function runToolsTransformation({
3396
3517
  }
3397
3518
 
3398
3519
  // core/generate-text/stream-text.ts
3520
+ var originalGenerateId4 = (0, import_provider_utils10.createIdGenerator)({ prefix: "aitxt-", length: 24 });
3399
3521
  async function streamText({
3400
3522
  model,
3401
3523
  tools,
@@ -3411,7 +3533,11 @@ async function streamText({
3411
3533
  experimental_toolCallStreaming: toolCallStreaming = false,
3412
3534
  onChunk,
3413
3535
  onFinish,
3414
- _internal: { now: now2 = now } = {},
3536
+ _internal: {
3537
+ now: now2 = now,
3538
+ generateId: generateId3 = originalGenerateId4,
3539
+ currentDate = () => /* @__PURE__ */ new Date()
3540
+ } = {},
3415
3541
  ...settings
3416
3542
  }) {
3417
3543
  var _a11;
@@ -3537,7 +3663,10 @@ async function streamText({
3537
3663
  maxToolRoundtrips,
3538
3664
  startRoundtrip,
3539
3665
  promptMessages,
3540
- now: now2
3666
+ modelId: model.modelId,
3667
+ now: now2,
3668
+ currentDate,
3669
+ generateId: generateId3
3541
3670
  });
3542
3671
  }
3543
3672
  });
@@ -3556,7 +3685,10 @@ var DefaultStreamTextResult = class {
3556
3685
  maxToolRoundtrips,
3557
3686
  startRoundtrip,
3558
3687
  promptMessages,
3559
- now: now2
3688
+ modelId,
3689
+ now: now2,
3690
+ currentDate,
3691
+ generateId: generateId3
3560
3692
  }) {
3561
3693
  this.warnings = warnings;
3562
3694
  this.rawResponse = rawResponse;
@@ -3575,6 +3707,8 @@ var DefaultStreamTextResult = class {
3575
3707
  promise: providerMetadataPromise
3576
3708
  } = createResolvablePromise();
3577
3709
  this.experimental_providerMetadata = providerMetadataPromise;
3710
+ const { resolve: resolveResponse, promise: responsePromise } = createResolvablePromise();
3711
+ this.response = responsePromise;
3578
3712
  const {
3579
3713
  stream: stitchableStream,
3580
3714
  addStream,
@@ -3606,10 +3740,16 @@ var DefaultStreamTextResult = class {
3606
3740
  let roundtripFirstChunk = true;
3607
3741
  let roundtripText = "";
3608
3742
  let roundtripLogProbs;
3743
+ let roundtripResponse = {
3744
+ id: generateId3(),
3745
+ timestamp: currentDate(),
3746
+ modelId
3747
+ };
3609
3748
  addStream(
3610
3749
  stream2.pipeThrough(
3611
3750
  new TransformStream({
3612
3751
  async transform(chunk, controller) {
3752
+ var _a11, _b, _c;
3613
3753
  if (roundtripFirstChunk) {
3614
3754
  const msToFirstChunk = now2() - startTimestamp;
3615
3755
  roundtripFirstChunk = false;
@@ -3629,22 +3769,33 @@ var DefaultStreamTextResult = class {
3629
3769
  }
3630
3770
  const chunkType = chunk.type;
3631
3771
  switch (chunkType) {
3632
- case "text-delta":
3772
+ case "text-delta": {
3633
3773
  controller.enqueue(chunk);
3634
3774
  roundtripText += chunk.textDelta;
3635
3775
  await (onChunk == null ? void 0 : onChunk({ chunk }));
3636
3776
  break;
3637
- case "tool-call":
3777
+ }
3778
+ case "tool-call": {
3638
3779
  controller.enqueue(chunk);
3639
3780
  roundtripToolCalls.push(chunk);
3640
3781
  await (onChunk == null ? void 0 : onChunk({ chunk }));
3641
3782
  break;
3642
- case "tool-result":
3783
+ }
3784
+ case "tool-result": {
3643
3785
  controller.enqueue(chunk);
3644
3786
  roundtripToolResults.push(chunk);
3645
3787
  await (onChunk == null ? void 0 : onChunk({ chunk }));
3646
3788
  break;
3647
- case "finish":
3789
+ }
3790
+ case "response-metadata": {
3791
+ roundtripResponse = {
3792
+ id: (_a11 = chunk.id) != null ? _a11 : roundtripResponse.id,
3793
+ timestamp: (_b = chunk.timestamp) != null ? _b : roundtripResponse.timestamp,
3794
+ modelId: (_c = chunk.modelId) != null ? _c : roundtripResponse.modelId
3795
+ };
3796
+ break;
3797
+ }
3798
+ case "finish": {
3648
3799
  roundtripUsage = chunk.usage;
3649
3800
  roundtripFinishReason = chunk.finishReason;
3650
3801
  roundtripProviderMetadata = chunk.experimental_providerMetadata;
@@ -3656,16 +3807,18 @@ var DefaultStreamTextResult = class {
3656
3807
  "ai.response.avgCompletionTokensPerSecond": 1e3 * roundtripUsage.completionTokens / msToFinish
3657
3808
  });
3658
3809
  break;
3810
+ }
3659
3811
  case "tool-call-streaming-start":
3660
3812
  case "tool-call-delta": {
3661
3813
  controller.enqueue(chunk);
3662
3814
  await (onChunk == null ? void 0 : onChunk({ chunk }));
3663
3815
  break;
3664
3816
  }
3665
- case "error":
3817
+ case "error": {
3666
3818
  controller.enqueue(chunk);
3667
3819
  roundtripFinishReason = "error";
3668
3820
  break;
3821
+ }
3669
3822
  default: {
3670
3823
  const exhaustiveCheck = chunkType;
3671
3824
  throw new Error(`Unknown chunk type: ${exhaustiveCheck}`);
@@ -3679,7 +3832,8 @@ var DefaultStreamTextResult = class {
3679
3832
  finishReason: roundtripFinishReason,
3680
3833
  usage: roundtripUsage,
3681
3834
  experimental_providerMetadata: roundtripProviderMetadata,
3682
- logprobs: roundtripLogProbs
3835
+ logprobs: roundtripLogProbs,
3836
+ response: roundtripResponse
3683
3837
  });
3684
3838
  const telemetryToolCalls = roundtripToolCalls.length > 0 ? JSON.stringify(roundtripToolCalls) : void 0;
3685
3839
  try {
@@ -3692,6 +3846,9 @@ var DefaultStreamTextResult = class {
3692
3846
  "ai.response.toolCalls": {
3693
3847
  output: () => telemetryToolCalls
3694
3848
  },
3849
+ "ai.response.id": roundtripResponse.id,
3850
+ "ai.response.model": roundtripResponse.modelId,
3851
+ "ai.response.timestamp": roundtripResponse.timestamp.toISOString(),
3695
3852
  "ai.usage.promptTokens": roundtripUsage.promptTokens,
3696
3853
  "ai.usage.completionTokens": roundtripUsage.completionTokens,
3697
3854
  // deprecated
@@ -3702,6 +3859,8 @@ var DefaultStreamTextResult = class {
3702
3859
  },
3703
3860
  // standardized gen-ai llm span attributes:
3704
3861
  "gen_ai.response.finish_reasons": [roundtripFinishReason],
3862
+ "gen_ai.response.id": roundtripResponse.id,
3863
+ "gen_ai.response.model": roundtripResponse.modelId,
3705
3864
  "gen_ai.usage.input_tokens": roundtripUsage.promptTokens,
3706
3865
  "gen_ai.usage.output_tokens": roundtripUsage.completionTokens
3707
3866
  }
@@ -3757,7 +3916,8 @@ var DefaultStreamTextResult = class {
3757
3916
  finishReason: roundtripFinishReason,
3758
3917
  usage: combinedUsage,
3759
3918
  experimental_providerMetadata: roundtripProviderMetadata,
3760
- logprobs: roundtripLogProbs
3919
+ logprobs: roundtripLogProbs,
3920
+ response: roundtripResponse
3761
3921
  });
3762
3922
  closeStitchableStream();
3763
3923
  rootSpan.setAttributes(
@@ -3786,6 +3946,10 @@ var DefaultStreamTextResult = class {
3786
3946
  resolveToolCalls(roundtripToolCalls);
3787
3947
  resolveProviderMetadata(roundtripProviderMetadata);
3788
3948
  resolveToolResults(roundtripToolResults);
3949
+ resolveResponse({
3950
+ ...roundtripResponse,
3951
+ headers: rawResponse == null ? void 0 : rawResponse.headers
3952
+ });
3789
3953
  await (onFinish == null ? void 0 : onFinish({
3790
3954
  finishReason: roundtripFinishReason,
3791
3955
  usage: combinedUsage,
@@ -3797,6 +3961,10 @@ var DefaultStreamTextResult = class {
3797
3961
  // The type exposed to the users will be correctly inferred.
3798
3962
  toolResults: roundtripToolResults,
3799
3963
  rawResponse,
3964
+ response: {
3965
+ ...roundtripResponse,
3966
+ headers: rawResponse == null ? void 0 : rawResponse.headers
3967
+ },
3800
3968
  warnings,
3801
3969
  experimental_providerMetadata: roundtripProviderMetadata
3802
3970
  }));
@@ -4039,6 +4207,40 @@ var DefaultStreamTextResult = class {
4039
4207
  });
4040
4208
  }
4041
4209
  };
4210
+ var experimental_streamText = streamText;
4211
+
4212
+ // core/middleware/wrap-language-model.ts
4213
+ var experimental_wrapLanguageModel = ({
4214
+ model,
4215
+ middleware: { transformParams, wrapGenerate, wrapStream },
4216
+ modelId,
4217
+ providerId
4218
+ }) => {
4219
+ async function doTransform({
4220
+ params,
4221
+ type
4222
+ }) {
4223
+ return transformParams ? await transformParams({ params, type }) : params;
4224
+ }
4225
+ return {
4226
+ specificationVersion: "v1",
4227
+ provider: providerId != null ? providerId : model.provider,
4228
+ modelId: modelId != null ? modelId : model.modelId,
4229
+ defaultObjectGenerationMode: model.defaultObjectGenerationMode,
4230
+ supportsImageUrls: model.supportsImageUrls,
4231
+ supportsStructuredOutputs: model.supportsStructuredOutputs,
4232
+ async doGenerate(params) {
4233
+ const transformedParams = await doTransform({ params, type: "generate" });
4234
+ const doGenerate = async () => model.doGenerate(transformedParams);
4235
+ return wrapGenerate ? wrapGenerate({ doGenerate, params: transformedParams, model }) : doGenerate();
4236
+ },
4237
+ async doStream(params) {
4238
+ const transformedParams = await doTransform({ params, type: "stream" });
4239
+ const doStream = async () => model.doStream(transformedParams);
4240
+ return wrapStream ? wrapStream({ doStream, params: transformedParams, model }) : doStream();
4241
+ }
4242
+ };
4243
+ };
4042
4244
 
4043
4245
  // core/prompt/attachments-to-parts.ts
4044
4246
  function attachmentsToParts(attachments) {
@@ -5411,8 +5613,8 @@ var StreamingTextResponse = class extends Response {
5411
5613
  };
5412
5614
 
5413
5615
  // streams/index.ts
5414
- var generateId2 = import_provider_utils8.generateId;
5415
- var nanoid = import_provider_utils8.generateId;
5616
+ var generateId2 = import_provider_utils11.generateId;
5617
+ var nanoid = import_provider_utils11.generateId;
5416
5618
  // Annotate the CommonJS export names for ESM import in node:
5417
5619
  0 && (module.exports = {
5418
5620
  AISDKError,
@@ -5468,7 +5670,10 @@ var nanoid = import_provider_utils8.generateId;
5468
5670
  experimental_createProviderRegistry,
5469
5671
  experimental_customProvider,
5470
5672
  experimental_generateObject,
5673
+ experimental_generateText,
5471
5674
  experimental_streamObject,
5675
+ experimental_streamText,
5676
+ experimental_wrapLanguageModel,
5472
5677
  formatStreamPart,
5473
5678
  generateId,
5474
5679
  generateObject,