ai 3.3.24 → 3.3.25

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,5 +1,12 @@
1
1
  # ai
2
2
 
3
+ ## 3.3.25
4
+
5
+ ### Patch Changes
6
+
7
+ - 4f1530f: feat (ai/core): add OpenTelemetry Semantic Conventions for GenAI operations to v1.27.0 of standard
8
+ - dad775f: feat (ai/core): add finish event and avg output tokens per second (telemetry)
9
+
3
10
  ## 3.3.24
4
11
 
5
12
  ### Patch Changes
package/dist/index.d.mts CHANGED
@@ -924,6 +924,12 @@ Optional telemetry configuration (experimental).
924
924
  Callback that is called when the LLM response and the final object validation are finished.
925
925
  */
926
926
  onFinish?: OnFinishCallback<OBJECT>;
927
+ /**
928
+ * Internal. For test use only. May change without notice.
929
+ */
930
+ _internal?: {
931
+ now?: () => number;
932
+ };
927
933
  }): Promise<StreamObjectResult<DeepPartial<OBJECT>, OBJECT, never>>;
928
934
  /**
929
935
  Generate an array with structured, typed elements for a given prompt and element schema using a language model.
@@ -977,6 +983,12 @@ Optional telemetry configuration (experimental).
977
983
  Callback that is called when the LLM response and the final object validation are finished.
978
984
  */
979
985
  onFinish?: OnFinishCallback<Array<ELEMENT>>;
986
+ /**
987
+ * Internal. For test use only. May change without notice.
988
+ */
989
+ _internal?: {
990
+ now?: () => number;
991
+ };
980
992
  }): Promise<StreamObjectResult<Array<ELEMENT>, Array<ELEMENT>, AsyncIterableStream<ELEMENT>>>;
981
993
  /**
982
994
  Generate JSON with any schema for a given prompt using a language model.
@@ -1004,6 +1016,12 @@ Optional telemetry configuration (experimental).
1004
1016
  Callback that is called when the LLM response and the final object validation are finished.
1005
1017
  */
1006
1018
  onFinish?: OnFinishCallback<JSONValue>;
1019
+ /**
1020
+ * Internal. For test use only. May change without notice.
1021
+ */
1022
+ _internal?: {
1023
+ now?: () => number;
1024
+ };
1007
1025
  }): Promise<StreamObjectResult<JSONValue, JSONValue, never>>;
1008
1026
  /**
1009
1027
  * @deprecated Use `streamObject` instead.
@@ -1562,7 +1580,7 @@ If set and supported by the model, calls will generate deterministic results.
1562
1580
  @return
1563
1581
  A result object for accessing different stream types and additional information.
1564
1582
  */
1565
- declare function streamText<TOOLS extends Record<string, CoreTool>>({ model, tools, toolChoice, system, prompt, messages, maxRetries, abortSignal, headers, maxToolRoundtrips, experimental_telemetry: telemetry, experimental_toolCallStreaming: toolCallStreaming, onChunk, onFinish, ...settings }: CallSettings & Prompt & {
1583
+ declare function streamText<TOOLS extends Record<string, CoreTool>>({ model, tools, toolChoice, system, prompt, messages, maxRetries, abortSignal, headers, maxToolRoundtrips, experimental_telemetry: telemetry, experimental_toolCallStreaming: toolCallStreaming, onChunk, onFinish, _internal: { now }, ...settings }: CallSettings & Prompt & {
1566
1584
  /**
1567
1585
  The language model to use.
1568
1586
  */
@@ -1649,6 +1667,12 @@ Callback that is called when the LLM response and all request tool executions
1649
1667
  */
1650
1668
  readonly experimental_providerMetadata: ProviderMetadata | undefined;
1651
1669
  }) => Promise<void> | void;
1670
+ /**
1671
+ * Internal. For test use only. May change without notice.
1672
+ */
1673
+ _internal?: {
1674
+ now?: () => number;
1675
+ };
1652
1676
  }): Promise<StreamTextResult<TOOLS>>;
1653
1677
 
1654
1678
  /**
package/dist/index.d.ts CHANGED
@@ -924,6 +924,12 @@ Optional telemetry configuration (experimental).
924
924
  Callback that is called when the LLM response and the final object validation are finished.
925
925
  */
926
926
  onFinish?: OnFinishCallback<OBJECT>;
927
+ /**
928
+ * Internal. For test use only. May change without notice.
929
+ */
930
+ _internal?: {
931
+ now?: () => number;
932
+ };
927
933
  }): Promise<StreamObjectResult<DeepPartial<OBJECT>, OBJECT, never>>;
928
934
  /**
929
935
  Generate an array with structured, typed elements for a given prompt and element schema using a language model.
@@ -977,6 +983,12 @@ Optional telemetry configuration (experimental).
977
983
  Callback that is called when the LLM response and the final object validation are finished.
978
984
  */
979
985
  onFinish?: OnFinishCallback<Array<ELEMENT>>;
986
+ /**
987
+ * Internal. For test use only. May change without notice.
988
+ */
989
+ _internal?: {
990
+ now?: () => number;
991
+ };
980
992
  }): Promise<StreamObjectResult<Array<ELEMENT>, Array<ELEMENT>, AsyncIterableStream<ELEMENT>>>;
981
993
  /**
982
994
  Generate JSON with any schema for a given prompt using a language model.
@@ -1004,6 +1016,12 @@ Optional telemetry configuration (experimental).
1004
1016
  Callback that is called when the LLM response and the final object validation are finished.
1005
1017
  */
1006
1018
  onFinish?: OnFinishCallback<JSONValue>;
1019
+ /**
1020
+ * Internal. For test use only. May change without notice.
1021
+ */
1022
+ _internal?: {
1023
+ now?: () => number;
1024
+ };
1007
1025
  }): Promise<StreamObjectResult<JSONValue, JSONValue, never>>;
1008
1026
  /**
1009
1027
  * @deprecated Use `streamObject` instead.
@@ -1562,7 +1580,7 @@ If set and supported by the model, calls will generate deterministic results.
1562
1580
  @return
1563
1581
  A result object for accessing different stream types and additional information.
1564
1582
  */
1565
- declare function streamText<TOOLS extends Record<string, CoreTool>>({ model, tools, toolChoice, system, prompt, messages, maxRetries, abortSignal, headers, maxToolRoundtrips, experimental_telemetry: telemetry, experimental_toolCallStreaming: toolCallStreaming, onChunk, onFinish, ...settings }: CallSettings & Prompt & {
1583
+ declare function streamText<TOOLS extends Record<string, CoreTool>>({ model, tools, toolChoice, system, prompt, messages, maxRetries, abortSignal, headers, maxToolRoundtrips, experimental_telemetry: telemetry, experimental_toolCallStreaming: toolCallStreaming, onChunk, onFinish, _internal: { now }, ...settings }: CallSettings & Prompt & {
1566
1584
  /**
1567
1585
  The language model to use.
1568
1586
  */
@@ -1649,6 +1667,12 @@ Callback that is called when the LLM response and all request tool executions
1649
1667
  */
1650
1668
  readonly experimental_providerMetadata: ProviderMetadata | undefined;
1651
1669
  }) => Promise<void> | void;
1670
+ /**
1671
+ * Internal. For test use only. May change without notice.
1672
+ */
1673
+ _internal?: {
1674
+ now?: () => number;
1675
+ };
1652
1676
  }): Promise<StreamTextResult<TOOLS>>;
1653
1677
 
1654
1678
  /**
package/dist/index.js CHANGED
@@ -1819,10 +1819,13 @@ async function generateObject({
1819
1819
  },
1820
1820
  "ai.settings.mode": mode,
1821
1821
  // standardized gen-ai llm span attributes:
1822
- "gen_ai.request.model": model.modelId,
1823
1822
  "gen_ai.system": model.provider,
1823
+ "gen_ai.request.model": model.modelId,
1824
+ "gen_ai.request.frequency_penalty": settings.frequencyPenalty,
1824
1825
  "gen_ai.request.max_tokens": settings.maxTokens,
1826
+ "gen_ai.request.presence_penalty": settings.presencePenalty,
1825
1827
  "gen_ai.request.temperature": settings.temperature,
1828
+ "gen_ai.request.top_k": settings.topK,
1826
1829
  "gen_ai.request.top_p": settings.topP
1827
1830
  }
1828
1831
  }),
@@ -1848,9 +1851,12 @@ async function generateObject({
1848
1851
  selectTelemetryAttributes({
1849
1852
  telemetry,
1850
1853
  attributes: {
1851
- "ai.finishReason": result2.finishReason,
1854
+ "ai.response.finishReason": result2.finishReason,
1855
+ "ai.response.object": { output: () => result2.text },
1852
1856
  "ai.usage.promptTokens": result2.usage.promptTokens,
1853
1857
  "ai.usage.completionTokens": result2.usage.completionTokens,
1858
+ // deprecated:
1859
+ "ai.finishReason": result2.finishReason,
1854
1860
  "ai.result.object": { output: () => result2.text },
1855
1861
  // standardized gen-ai llm span attributes:
1856
1862
  "gen_ai.response.finish_reasons": [result2.finishReason],
@@ -1902,10 +1908,13 @@ async function generateObject({
1902
1908
  },
1903
1909
  "ai.settings.mode": mode,
1904
1910
  // standardized gen-ai llm span attributes:
1905
- "gen_ai.request.model": model.modelId,
1906
1911
  "gen_ai.system": model.provider,
1912
+ "gen_ai.request.model": model.modelId,
1913
+ "gen_ai.request.frequency_penalty": settings.frequencyPenalty,
1907
1914
  "gen_ai.request.max_tokens": settings.maxTokens,
1915
+ "gen_ai.request.presence_penalty": settings.presencePenalty,
1908
1916
  "gen_ai.request.temperature": settings.temperature,
1917
+ "gen_ai.request.top_k": settings.topK,
1909
1918
  "gen_ai.request.top_p": settings.topP
1910
1919
  }
1911
1920
  }),
@@ -1936,14 +1945,17 @@ async function generateObject({
1936
1945
  selectTelemetryAttributes({
1937
1946
  telemetry,
1938
1947
  attributes: {
1939
- "ai.finishReason": result2.finishReason,
1948
+ "ai.response.finishReason": result2.finishReason,
1949
+ "ai.response.object": { output: () => objectText },
1940
1950
  "ai.usage.promptTokens": result2.usage.promptTokens,
1941
1951
  "ai.usage.completionTokens": result2.usage.completionTokens,
1952
+ // deprecated:
1953
+ "ai.finishReason": result2.finishReason,
1942
1954
  "ai.result.object": { output: () => objectText },
1943
1955
  // standardized gen-ai llm span attributes:
1944
1956
  "gen_ai.response.finish_reasons": [result2.finishReason],
1945
- "gen_ai.usage.prompt_tokens": result2.usage.promptTokens,
1946
- "gen_ai.usage.completion_tokens": result2.usage.completionTokens
1957
+ "gen_ai.usage.input_tokens": result2.usage.promptTokens,
1958
+ "gen_ai.usage.output_tokens": result2.usage.completionTokens
1947
1959
  }
1948
1960
  })
1949
1961
  );
@@ -1984,9 +1996,14 @@ async function generateObject({
1984
1996
  selectTelemetryAttributes({
1985
1997
  telemetry,
1986
1998
  attributes: {
1987
- "ai.finishReason": finishReason,
1999
+ "ai.response.finishReason": finishReason,
2000
+ "ai.response.object": {
2001
+ output: () => JSON.stringify(validationResult.value)
2002
+ },
1988
2003
  "ai.usage.promptTokens": usage.promptTokens,
1989
2004
  "ai.usage.completionTokens": usage.completionTokens,
2005
+ // deprecated:
2006
+ "ai.finishReason": finishReason,
1990
2007
  "ai.result.object": {
1991
2008
  output: () => JSON.stringify(validationResult.value)
1992
2009
  }
@@ -2105,6 +2122,7 @@ async function streamObject({
2105
2122
  headers,
2106
2123
  experimental_telemetry: telemetry,
2107
2124
  onFinish,
2125
+ _internal: { now: now2 = now } = {},
2108
2126
  ...settings
2109
2127
  }) {
2110
2128
  var _a11;
@@ -2270,17 +2288,20 @@ async function streamObject({
2270
2288
  },
2271
2289
  "ai.settings.mode": mode,
2272
2290
  // standardized gen-ai llm span attributes:
2273
- "gen_ai.request.model": model.modelId,
2274
2291
  "gen_ai.system": model.provider,
2292
+ "gen_ai.request.model": model.modelId,
2293
+ "gen_ai.request.frequency_penalty": settings.frequencyPenalty,
2275
2294
  "gen_ai.request.max_tokens": settings.maxTokens,
2295
+ "gen_ai.request.presence_penalty": settings.presencePenalty,
2276
2296
  "gen_ai.request.temperature": settings.temperature,
2297
+ "gen_ai.request.top_k": settings.topK,
2277
2298
  "gen_ai.request.top_p": settings.topP
2278
2299
  }
2279
2300
  }),
2280
2301
  tracer,
2281
2302
  endWhenDone: false,
2282
2303
  fn: async (doStreamSpan2) => ({
2283
- startTimestampMs: now(),
2304
+ startTimestampMs: now2(),
2284
2305
  doStreamSpan: doStreamSpan2,
2285
2306
  result: await model.doStream(callOptions)
2286
2307
  })
@@ -2295,7 +2316,8 @@ async function streamObject({
2295
2316
  rootSpan,
2296
2317
  doStreamSpan,
2297
2318
  telemetry,
2298
- startTimestampMs
2319
+ startTimestampMs,
2320
+ now: now2
2299
2321
  });
2300
2322
  }
2301
2323
  });
@@ -2310,7 +2332,8 @@ var DefaultStreamObjectResult = class {
2310
2332
  rootSpan,
2311
2333
  doStreamSpan,
2312
2334
  telemetry,
2313
- startTimestampMs
2335
+ startTimestampMs,
2336
+ now: now2
2314
2337
  }) {
2315
2338
  this.warnings = warnings;
2316
2339
  this.rawResponse = rawResponse;
@@ -2339,7 +2362,7 @@ var DefaultStreamObjectResult = class {
2339
2362
  new TransformStream({
2340
2363
  async transform(chunk, controller) {
2341
2364
  if (isFirstChunk) {
2342
- const msToFirstChunk = now() - startTimestampMs;
2365
+ const msToFirstChunk = now2() - startTimestampMs;
2343
2366
  isFirstChunk = false;
2344
2367
  doStreamSpan.addEvent("ai.stream.firstChunk", {
2345
2368
  "ai.stream.msToFirstChunk": msToFirstChunk
@@ -2416,15 +2439,18 @@ var DefaultStreamObjectResult = class {
2416
2439
  selectTelemetryAttributes({
2417
2440
  telemetry,
2418
2441
  attributes: {
2419
- "ai.finishReason": finishReason,
2420
- "ai.usage.promptTokens": finalUsage.promptTokens,
2421
- "ai.usage.completionTokens": finalUsage.completionTokens,
2422
- "ai.result.object": {
2442
+ "ai.response.finishReason": finishReason,
2443
+ "ai.response.object": {
2423
2444
  output: () => JSON.stringify(object)
2424
2445
  },
2446
+ "ai.usage.promptTokens": finalUsage.promptTokens,
2447
+ "ai.usage.completionTokens": finalUsage.completionTokens,
2448
+ // deprecated
2449
+ "ai.finishReason": finishReason,
2450
+ "ai.result.object": { output: () => JSON.stringify(object) },
2425
2451
  // standardized gen-ai llm span attributes:
2426
- "gen_ai.usage.prompt_tokens": finalUsage.promptTokens,
2427
- "gen_ai.usage.completion_tokens": finalUsage.completionTokens,
2452
+ "gen_ai.usage.input_tokens": finalUsage.promptTokens,
2453
+ "gen_ai.usage.output_tokens": finalUsage.completionTokens,
2428
2454
  "gen_ai.response.finish_reasons": [finishReason]
2429
2455
  }
2430
2456
  })
@@ -2436,9 +2462,11 @@ var DefaultStreamObjectResult = class {
2436
2462
  attributes: {
2437
2463
  "ai.usage.promptTokens": finalUsage.promptTokens,
2438
2464
  "ai.usage.completionTokens": finalUsage.completionTokens,
2439
- "ai.result.object": {
2465
+ "ai.response.object": {
2440
2466
  output: () => JSON.stringify(object)
2441
- }
2467
+ },
2468
+ // deprecated
2469
+ "ai.result.object": { output: () => JSON.stringify(object) }
2442
2470
  }
2443
2471
  })
2444
2472
  );
@@ -2817,10 +2845,14 @@ async function generateText({
2817
2845
  input: () => JSON.stringify(promptMessages)
2818
2846
  },
2819
2847
  // standardized gen-ai llm span attributes:
2820
- "gen_ai.request.model": model.modelId,
2821
2848
  "gen_ai.system": model.provider,
2849
+ "gen_ai.request.model": model.modelId,
2850
+ "gen_ai.request.frequency_penalty": settings.frequencyPenalty,
2822
2851
  "gen_ai.request.max_tokens": settings.maxTokens,
2852
+ "gen_ai.request.presence_penalty": settings.presencePenalty,
2853
+ "gen_ai.request.stop_sequences": settings.stopSequences,
2823
2854
  "gen_ai.request.temperature": settings.temperature,
2855
+ "gen_ai.request.top_k": settings.topK,
2824
2856
  "gen_ai.request.top_p": settings.topP
2825
2857
  }
2826
2858
  }),
@@ -2838,9 +2870,17 @@ async function generateText({
2838
2870
  selectTelemetryAttributes({
2839
2871
  telemetry,
2840
2872
  attributes: {
2841
- "ai.finishReason": result.finishReason,
2873
+ "ai.response.finishReason": result.finishReason,
2874
+ "ai.response.text": {
2875
+ output: () => result.text
2876
+ },
2877
+ "ai.response.toolCalls": {
2878
+ output: () => JSON.stringify(result.toolCalls)
2879
+ },
2842
2880
  "ai.usage.promptTokens": result.usage.promptTokens,
2843
2881
  "ai.usage.completionTokens": result.usage.completionTokens,
2882
+ // deprecated:
2883
+ "ai.finishReason": result.finishReason,
2844
2884
  "ai.result.text": {
2845
2885
  output: () => result.text
2846
2886
  },
@@ -2849,8 +2889,8 @@ async function generateText({
2849
2889
  },
2850
2890
  // standardized gen-ai llm span attributes:
2851
2891
  "gen_ai.response.finish_reasons": [result.finishReason],
2852
- "gen_ai.usage.prompt_tokens": result.usage.promptTokens,
2853
- "gen_ai.usage.completion_tokens": result.usage.completionTokens
2892
+ "gen_ai.usage.input_tokens": result.usage.promptTokens,
2893
+ "gen_ai.usage.output_tokens": result.usage.completionTokens
2854
2894
  }
2855
2895
  })
2856
2896
  );
@@ -2903,9 +2943,17 @@ async function generateText({
2903
2943
  selectTelemetryAttributes({
2904
2944
  telemetry,
2905
2945
  attributes: {
2906
- "ai.finishReason": currentModelResponse.finishReason,
2946
+ "ai.response.finishReason": currentModelResponse.finishReason,
2947
+ "ai.response.text": {
2948
+ output: () => currentModelResponse.text
2949
+ },
2950
+ "ai.response.toolCalls": {
2951
+ output: () => JSON.stringify(currentModelResponse.toolCalls)
2952
+ },
2907
2953
  "ai.usage.promptTokens": currentModelResponse.usage.promptTokens,
2908
2954
  "ai.usage.completionTokens": currentModelResponse.usage.completionTokens,
2955
+ // deprecated:
2956
+ "ai.finishReason": currentModelResponse.finishReason,
2909
2957
  "ai.result.text": {
2910
2958
  output: () => currentModelResponse.text
2911
2959
  },
@@ -3363,6 +3411,7 @@ async function streamText({
3363
3411
  experimental_toolCallStreaming: toolCallStreaming = false,
3364
3412
  onChunk,
3365
3413
  onFinish,
3414
+ _internal: { now: now2 = now } = {},
3366
3415
  ...settings
3367
3416
  }) {
3368
3417
  var _a11;
@@ -3416,17 +3465,21 @@ async function streamText({
3416
3465
  input: () => JSON.stringify(promptMessages2)
3417
3466
  },
3418
3467
  // standardized gen-ai llm span attributes:
3419
- "gen_ai.request.model": model.modelId,
3420
3468
  "gen_ai.system": model.provider,
3469
+ "gen_ai.request.model": model.modelId,
3470
+ "gen_ai.request.frequency_penalty": settings.frequencyPenalty,
3421
3471
  "gen_ai.request.max_tokens": settings.maxTokens,
3472
+ "gen_ai.request.presence_penalty": settings.presencePenalty,
3473
+ "gen_ai.request.stop_sequences": settings.stopSequences,
3422
3474
  "gen_ai.request.temperature": settings.temperature,
3475
+ "gen_ai.request.top_k": settings.topK,
3423
3476
  "gen_ai.request.top_p": settings.topP
3424
3477
  }
3425
3478
  }),
3426
3479
  tracer,
3427
3480
  endWhenDone: false,
3428
3481
  fn: async (doStreamSpan3) => ({
3429
- startTimestampMs: now(),
3482
+ startTimestampMs: now2(),
3430
3483
  // get before the call
3431
3484
  doStreamSpan: doStreamSpan3,
3432
3485
  result: await model.doStream({
@@ -3483,7 +3536,8 @@ async function streamText({
3483
3536
  startTimestampMs,
3484
3537
  maxToolRoundtrips,
3485
3538
  startRoundtrip,
3486
- promptMessages
3539
+ promptMessages,
3540
+ now: now2
3487
3541
  });
3488
3542
  }
3489
3543
  });
@@ -3501,7 +3555,8 @@ var DefaultStreamTextResult = class {
3501
3555
  startTimestampMs,
3502
3556
  maxToolRoundtrips,
3503
3557
  startRoundtrip,
3504
- promptMessages
3558
+ promptMessages,
3559
+ now: now2
3505
3560
  }) {
3506
3561
  this.warnings = warnings;
3507
3562
  this.rawResponse = rawResponse;
@@ -3556,12 +3611,16 @@ var DefaultStreamTextResult = class {
3556
3611
  new TransformStream({
3557
3612
  async transform(chunk, controller) {
3558
3613
  if (roundtripFirstChunk) {
3559
- const msToFirstChunk = now() - startTimestamp;
3614
+ const msToFirstChunk = now2() - startTimestamp;
3560
3615
  roundtripFirstChunk = false;
3561
3616
  doStreamSpan2.addEvent("ai.stream.firstChunk", {
3617
+ "ai.response.msToFirstChunk": msToFirstChunk,
3618
+ // deprecated:
3562
3619
  "ai.stream.msToFirstChunk": msToFirstChunk
3563
3620
  });
3564
3621
  doStreamSpan2.setAttributes({
3622
+ "ai.response.msToFirstChunk": msToFirstChunk,
3623
+ // deprecated:
3565
3624
  "ai.stream.msToFirstChunk": msToFirstChunk
3566
3625
  });
3567
3626
  }
@@ -3590,6 +3649,12 @@ var DefaultStreamTextResult = class {
3590
3649
  roundtripFinishReason = chunk.finishReason;
3591
3650
  roundtripProviderMetadata = chunk.experimental_providerMetadata;
3592
3651
  roundtripLogProbs = chunk.logprobs;
3652
+ const msToFinish = now2() - startTimestamp;
3653
+ doStreamSpan2.addEvent("ai.stream.finish");
3654
+ doStreamSpan2.setAttributes({
3655
+ "ai.response.msToFinish": msToFinish,
3656
+ "ai.response.avgCompletionTokensPerSecond": 1e3 * roundtripUsage.completionTokens / msToFinish
3657
+ });
3593
3658
  break;
3594
3659
  case "tool-call-streaming-start":
3595
3660
  case "tool-call-delta": {
@@ -3622,17 +3687,23 @@ var DefaultStreamTextResult = class {
3622
3687
  selectTelemetryAttributes({
3623
3688
  telemetry,
3624
3689
  attributes: {
3625
- "ai.finishReason": roundtripFinishReason,
3690
+ "ai.response.finishReason": roundtripFinishReason,
3691
+ "ai.response.text": { output: () => roundtripText },
3692
+ "ai.response.toolCalls": {
3693
+ output: () => telemetryToolCalls
3694
+ },
3626
3695
  "ai.usage.promptTokens": roundtripUsage.promptTokens,
3627
3696
  "ai.usage.completionTokens": roundtripUsage.completionTokens,
3697
+ // deprecated
3698
+ "ai.finishReason": roundtripFinishReason,
3628
3699
  "ai.result.text": { output: () => roundtripText },
3629
3700
  "ai.result.toolCalls": {
3630
3701
  output: () => telemetryToolCalls
3631
3702
  },
3632
3703
  // standardized gen-ai llm span attributes:
3633
3704
  "gen_ai.response.finish_reasons": [roundtripFinishReason],
3634
- "gen_ai.usage.prompt_tokens": roundtripUsage.promptTokens,
3635
- "gen_ai.usage.completion_tokens": roundtripUsage.completionTokens
3705
+ "gen_ai.usage.input_tokens": roundtripUsage.promptTokens,
3706
+ "gen_ai.usage.output_tokens": roundtripUsage.completionTokens
3636
3707
  }
3637
3708
  })
3638
3709
  );
@@ -3693,9 +3764,15 @@ var DefaultStreamTextResult = class {
3693
3764
  selectTelemetryAttributes({
3694
3765
  telemetry,
3695
3766
  attributes: {
3696
- "ai.finishReason": roundtripFinishReason,
3767
+ "ai.response.finishReason": roundtripFinishReason,
3768
+ "ai.response.text": { output: () => roundtripText },
3769
+ "ai.response.toolCalls": {
3770
+ output: () => telemetryToolCalls
3771
+ },
3697
3772
  "ai.usage.promptTokens": combinedUsage.promptTokens,
3698
3773
  "ai.usage.completionTokens": combinedUsage.completionTokens,
3774
+ // deprecated
3775
+ "ai.finishReason": roundtripFinishReason,
3699
3776
  "ai.result.text": { output: () => roundtripText },
3700
3777
  "ai.result.toolCalls": {
3701
3778
  output: () => telemetryToolCalls