ai 3.2.40 → 3.2.41

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js CHANGED
@@ -258,6 +258,33 @@ function recordSpan({
258
258
  });
259
259
  }
260
260
 
261
+ // core/telemetry/select-telemetry-attributes.ts
262
+ function selectTelemetryAttributes({
263
+ telemetry,
264
+ attributes
265
+ }) {
266
+ return Object.entries(attributes).reduce((attributes2, [key, value]) => {
267
+ if (value === void 0) {
268
+ return attributes2;
269
+ }
270
+ if (typeof value === "object" && "input" in value && typeof value.input === "function") {
271
+ if ((telemetry == null ? void 0 : telemetry.recordInputs) === false) {
272
+ return attributes2;
273
+ }
274
+ const result = value.input();
275
+ return result === void 0 ? attributes2 : { ...attributes2, [key]: result };
276
+ }
277
+ if (typeof value === "object" && "output" in value && typeof value.output === "function") {
278
+ if ((telemetry == null ? void 0 : telemetry.recordOutputs) === false) {
279
+ return attributes2;
280
+ }
281
+ const result = value.output();
282
+ return result === void 0 ? attributes2 : { ...attributes2, [key]: result };
283
+ }
284
+ return { ...attributes2, [key]: value };
285
+ }, {});
286
+ }
287
+
261
288
  // core/util/retry-with-exponential-backoff.ts
262
289
  var import_provider = require("@ai-sdk/provider");
263
290
  var import_provider_utils = require("@ai-sdk/provider-utils");
@@ -340,11 +367,13 @@ async function embed({
340
367
  const tracer = getTracer({ isEnabled: (_a = telemetry == null ? void 0 : telemetry.isEnabled) != null ? _a : false });
341
368
  return recordSpan({
342
369
  name: "ai.embed",
343
- attributes: {
344
- ...baseTelemetryAttributes,
345
- // specific settings that only make sense on the outer level:
346
- "ai.value": JSON.stringify(value)
347
- },
370
+ attributes: selectTelemetryAttributes({
371
+ telemetry,
372
+ attributes: {
373
+ ...baseTelemetryAttributes,
374
+ "ai.value": { input: () => JSON.stringify(value) }
375
+ }
376
+ }),
348
377
  tracer,
349
378
  fn: async (span) => {
350
379
  const retry = retryWithExponentialBackoff({ maxRetries });
@@ -353,11 +382,14 @@ async function embed({
353
382
  // nested spans to align with the embedMany telemetry data:
354
383
  recordSpan({
355
384
  name: "ai.embed.doEmbed",
356
- attributes: {
357
- ...baseTelemetryAttributes,
358
- // specific settings that only make sense on the outer level:
359
- "ai.values": [JSON.stringify(value)]
360
- },
385
+ attributes: selectTelemetryAttributes({
386
+ telemetry,
387
+ attributes: {
388
+ ...baseTelemetryAttributes,
389
+ // specific settings that only make sense on the outer level:
390
+ "ai.values": { input: () => [JSON.stringify(value)] }
391
+ }
392
+ }),
361
393
  tracer,
362
394
  fn: async (doEmbedSpan) => {
363
395
  var _a2;
@@ -368,12 +400,19 @@ async function embed({
368
400
  });
369
401
  const embedding2 = modelResponse.embeddings[0];
370
402
  const usage2 = (_a2 = modelResponse.usage) != null ? _a2 : { tokens: NaN };
371
- doEmbedSpan.setAttributes({
372
- "ai.embeddings": modelResponse.embeddings.map(
373
- (embedding3) => JSON.stringify(embedding3)
374
- ),
375
- "ai.usage.tokens": usage2.tokens
376
- });
403
+ doEmbedSpan.setAttributes(
404
+ selectTelemetryAttributes({
405
+ telemetry,
406
+ attributes: {
407
+ "ai.embeddings": {
408
+ output: () => modelResponse.embeddings.map(
409
+ (embedding3) => JSON.stringify(embedding3)
410
+ )
411
+ },
412
+ "ai.usage.tokens": usage2.tokens
413
+ }
414
+ })
415
+ );
377
416
  return {
378
417
  embedding: embedding2,
379
418
  usage: usage2,
@@ -383,10 +422,15 @@ async function embed({
383
422
  })
384
423
  )
385
424
  );
386
- span.setAttributes({
387
- "ai.embedding": JSON.stringify(embedding),
388
- "ai.usage.tokens": usage.tokens
389
- });
425
+ span.setAttributes(
426
+ selectTelemetryAttributes({
427
+ telemetry,
428
+ attributes: {
429
+ "ai.embedding": { output: () => JSON.stringify(embedding) },
430
+ "ai.usage.tokens": usage.tokens
431
+ }
432
+ })
433
+ );
390
434
  return new DefaultEmbedResult({ value, embedding, usage, rawResponse });
391
435
  }
392
436
  });
@@ -432,11 +476,16 @@ async function embedMany({
432
476
  const tracer = getTracer({ isEnabled: (_a = telemetry == null ? void 0 : telemetry.isEnabled) != null ? _a : false });
433
477
  return recordSpan({
434
478
  name: "ai.embedMany",
435
- attributes: {
436
- ...baseTelemetryAttributes,
437
- // specific settings that only make sense on the outer level:
438
- "ai.values": values.map((value) => JSON.stringify(value))
439
- },
479
+ attributes: selectTelemetryAttributes({
480
+ telemetry,
481
+ attributes: {
482
+ ...baseTelemetryAttributes,
483
+ // specific settings that only make sense on the outer level:
484
+ "ai.values": {
485
+ input: () => values.map((value) => JSON.stringify(value))
486
+ }
487
+ }
488
+ }),
440
489
  tracer,
441
490
  fn: async (span) => {
442
491
  const retry = retryWithExponentialBackoff({ maxRetries });
@@ -445,11 +494,16 @@ async function embedMany({
445
494
  const { embeddings: embeddings2, usage } = await retry(() => {
446
495
  return recordSpan({
447
496
  name: "ai.embedMany.doEmbed",
448
- attributes: {
449
- ...baseTelemetryAttributes,
450
- // specific settings that only make sense on the outer level:
451
- "ai.values": values.map((value) => JSON.stringify(value))
452
- },
497
+ attributes: selectTelemetryAttributes({
498
+ telemetry,
499
+ attributes: {
500
+ ...baseTelemetryAttributes,
501
+ // specific settings that only make sense on the outer level:
502
+ "ai.values": {
503
+ input: () => values.map((value) => JSON.stringify(value))
504
+ }
505
+ }
506
+ }),
453
507
  tracer,
454
508
  fn: async (doEmbedSpan) => {
455
509
  var _a2;
@@ -460,22 +514,32 @@ async function embedMany({
460
514
  });
461
515
  const embeddings3 = modelResponse.embeddings;
462
516
  const usage2 = (_a2 = modelResponse.usage) != null ? _a2 : { tokens: NaN };
463
- doEmbedSpan.setAttributes({
464
- "ai.embeddings": embeddings3.map(
465
- (embedding) => JSON.stringify(embedding)
466
- ),
467
- "ai.usage.tokens": usage2.tokens
468
- });
517
+ doEmbedSpan.setAttributes(
518
+ selectTelemetryAttributes({
519
+ telemetry,
520
+ attributes: {
521
+ "ai.embeddings": {
522
+ output: () => embeddings3.map((embedding) => JSON.stringify(embedding))
523
+ },
524
+ "ai.usage.tokens": usage2.tokens
525
+ }
526
+ })
527
+ );
469
528
  return { embeddings: embeddings3, usage: usage2 };
470
529
  }
471
530
  });
472
531
  });
473
- span.setAttributes({
474
- "ai.embeddings": embeddings2.map(
475
- (embedding) => JSON.stringify(embedding)
476
- ),
477
- "ai.usage.tokens": usage.tokens
478
- });
532
+ span.setAttributes(
533
+ selectTelemetryAttributes({
534
+ telemetry,
535
+ attributes: {
536
+ "ai.embeddings": {
537
+ output: () => embeddings2.map((embedding) => JSON.stringify(embedding))
538
+ },
539
+ "ai.usage.tokens": usage.tokens
540
+ }
541
+ })
542
+ );
479
543
  return new DefaultEmbedManyResult({ values, embeddings: embeddings2, usage });
480
544
  }
481
545
  const valueChunks = splitArray(values, maxEmbeddingsPerCall);
@@ -485,11 +549,16 @@ async function embedMany({
485
549
  const { embeddings: responseEmbeddings, usage } = await retry(() => {
486
550
  return recordSpan({
487
551
  name: "ai.embedMany.doEmbed",
488
- attributes: {
489
- ...baseTelemetryAttributes,
490
- // specific settings that only make sense on the outer level:
491
- "ai.values": chunk.map((value) => JSON.stringify(value))
492
- },
552
+ attributes: selectTelemetryAttributes({
553
+ telemetry,
554
+ attributes: {
555
+ ...baseTelemetryAttributes,
556
+ // specific settings that only make sense on the outer level:
557
+ "ai.values": {
558
+ input: () => chunk.map((value) => JSON.stringify(value))
559
+ }
560
+ }
561
+ }),
493
562
  tracer,
494
563
  fn: async (doEmbedSpan) => {
495
564
  var _a2;
@@ -500,12 +569,17 @@ async function embedMany({
500
569
  });
501
570
  const embeddings2 = modelResponse.embeddings;
502
571
  const usage2 = (_a2 = modelResponse.usage) != null ? _a2 : { tokens: NaN };
503
- doEmbedSpan.setAttributes({
504
- "ai.embeddings": embeddings2.map(
505
- (embedding) => JSON.stringify(embedding)
506
- ),
507
- "ai.usage.tokens": usage2.tokens
508
- });
572
+ doEmbedSpan.setAttributes(
573
+ selectTelemetryAttributes({
574
+ telemetry,
575
+ attributes: {
576
+ "ai.embeddings": {
577
+ output: () => embeddings2.map((embedding) => JSON.stringify(embedding))
578
+ },
579
+ "ai.usage.tokens": usage2.tokens
580
+ }
581
+ })
582
+ );
509
583
  return { embeddings: embeddings2, usage: usage2 };
510
584
  }
511
585
  });
@@ -513,10 +587,17 @@ async function embedMany({
513
587
  embeddings.push(...responseEmbeddings);
514
588
  tokens += usage.tokens;
515
589
  }
516
- span.setAttributes({
517
- "ai.embeddings": embeddings.map((embedding) => JSON.stringify(embedding)),
518
- "ai.usage.tokens": tokens
519
- });
590
+ span.setAttributes(
591
+ selectTelemetryAttributes({
592
+ telemetry,
593
+ attributes: {
594
+ "ai.embeddings": {
595
+ output: () => embeddings.map((embedding) => JSON.stringify(embedding))
596
+ },
597
+ "ai.usage.tokens": tokens
598
+ }
599
+ })
600
+ );
520
601
  return new DefaultEmbedManyResult({
521
602
  values,
522
603
  embeddings,
@@ -1073,13 +1154,20 @@ async function generateObject({
1073
1154
  const tracer = getTracer({ isEnabled: (_a = telemetry == null ? void 0 : telemetry.isEnabled) != null ? _a : false });
1074
1155
  return recordSpan({
1075
1156
  name: "ai.generateObject",
1076
- attributes: {
1077
- ...baseTelemetryAttributes,
1078
- // specific settings that only make sense on the outer level:
1079
- "ai.prompt": JSON.stringify({ system, prompt, messages }),
1080
- "ai.schema": JSON.stringify(schema.jsonSchema),
1081
- "ai.settings.mode": mode
1082
- },
1157
+ attributes: selectTelemetryAttributes({
1158
+ telemetry,
1159
+ attributes: {
1160
+ ...baseTelemetryAttributes,
1161
+ // specific settings that only make sense on the outer level:
1162
+ "ai.prompt": {
1163
+ input: () => JSON.stringify({ system, prompt, messages })
1164
+ },
1165
+ "ai.schema": {
1166
+ input: () => JSON.stringify(schema.jsonSchema)
1167
+ },
1168
+ "ai.settings.mode": mode
1169
+ }
1170
+ }),
1083
1171
  tracer,
1084
1172
  fn: async (span) => {
1085
1173
  const retry = retryWithExponentialBackoff({ maxRetries });
@@ -1110,12 +1198,19 @@ async function generateObject({
1110
1198
  const generateResult = await retry(
1111
1199
  () => recordSpan({
1112
1200
  name: "ai.generateObject.doGenerate",
1113
- attributes: {
1114
- ...baseTelemetryAttributes,
1115
- "ai.prompt.format": inputFormat,
1116
- "ai.prompt.messages": JSON.stringify(promptMessages),
1117
- "ai.settings.mode": mode
1118
- },
1201
+ attributes: selectTelemetryAttributes({
1202
+ telemetry,
1203
+ attributes: {
1204
+ ...baseTelemetryAttributes,
1205
+ "ai.prompt.format": {
1206
+ input: () => inputFormat
1207
+ },
1208
+ "ai.prompt.messages": {
1209
+ input: () => JSON.stringify(promptMessages)
1210
+ },
1211
+ "ai.settings.mode": mode
1212
+ }
1213
+ }),
1119
1214
  tracer,
1120
1215
  fn: async (span2) => {
1121
1216
  const result2 = await model.doGenerate({
@@ -1129,12 +1224,17 @@ async function generateObject({
1129
1224
  if (result2.text === void 0) {
1130
1225
  throw new import_provider6.NoObjectGeneratedError();
1131
1226
  }
1132
- span2.setAttributes({
1133
- "ai.finishReason": result2.finishReason,
1134
- "ai.usage.promptTokens": result2.usage.promptTokens,
1135
- "ai.usage.completionTokens": result2.usage.completionTokens,
1136
- "ai.result.object": result2.text
1137
- });
1227
+ span2.setAttributes(
1228
+ selectTelemetryAttributes({
1229
+ telemetry,
1230
+ attributes: {
1231
+ "ai.finishReason": result2.finishReason,
1232
+ "ai.usage.promptTokens": result2.usage.promptTokens,
1233
+ "ai.usage.completionTokens": result2.usage.completionTokens,
1234
+ "ai.result.object": { output: () => result2.text }
1235
+ }
1236
+ })
1237
+ );
1138
1238
  return { ...result2, objectText: result2.text };
1139
1239
  }
1140
1240
  })
@@ -1161,12 +1261,19 @@ async function generateObject({
1161
1261
  const generateResult = await retry(
1162
1262
  () => recordSpan({
1163
1263
  name: "ai.generateObject.doGenerate",
1164
- attributes: {
1165
- ...baseTelemetryAttributes,
1166
- "ai.prompt.format": inputFormat,
1167
- "ai.prompt.messages": JSON.stringify(promptMessages),
1168
- "ai.settings.mode": mode
1169
- },
1264
+ attributes: selectTelemetryAttributes({
1265
+ telemetry,
1266
+ attributes: {
1267
+ ...baseTelemetryAttributes,
1268
+ "ai.prompt.format": {
1269
+ input: () => inputFormat
1270
+ },
1271
+ "ai.prompt.messages": {
1272
+ input: () => JSON.stringify(promptMessages)
1273
+ },
1274
+ "ai.settings.mode": mode
1275
+ }
1276
+ }),
1170
1277
  tracer,
1171
1278
  fn: async (span2) => {
1172
1279
  var _a2, _b;
@@ -1190,12 +1297,17 @@ async function generateObject({
1190
1297
  if (objectText === void 0) {
1191
1298
  throw new import_provider6.NoObjectGeneratedError();
1192
1299
  }
1193
- span2.setAttributes({
1194
- "ai.finishReason": result2.finishReason,
1195
- "ai.usage.promptTokens": result2.usage.promptTokens,
1196
- "ai.usage.completionTokens": result2.usage.completionTokens,
1197
- "ai.result.object": objectText
1198
- });
1300
+ span2.setAttributes(
1301
+ selectTelemetryAttributes({
1302
+ telemetry,
1303
+ attributes: {
1304
+ "ai.finishReason": result2.finishReason,
1305
+ "ai.usage.promptTokens": result2.usage.promptTokens,
1306
+ "ai.usage.completionTokens": result2.usage.completionTokens,
1307
+ "ai.result.object": { output: () => objectText }
1308
+ }
1309
+ })
1310
+ );
1199
1311
  return { ...result2, objectText };
1200
1312
  }
1201
1313
  })
@@ -1222,12 +1334,19 @@ async function generateObject({
1222
1334
  if (!parseResult.success) {
1223
1335
  throw parseResult.error;
1224
1336
  }
1225
- span.setAttributes({
1226
- "ai.finishReason": finishReason,
1227
- "ai.usage.promptTokens": usage.promptTokens,
1228
- "ai.usage.completionTokens": usage.completionTokens,
1229
- "ai.result.object": JSON.stringify(parseResult.value)
1230
- });
1337
+ span.setAttributes(
1338
+ selectTelemetryAttributes({
1339
+ telemetry,
1340
+ attributes: {
1341
+ "ai.finishReason": finishReason,
1342
+ "ai.usage.promptTokens": usage.promptTokens,
1343
+ "ai.usage.completionTokens": usage.completionTokens,
1344
+ "ai.result.object": {
1345
+ output: () => JSON.stringify(parseResult.value)
1346
+ }
1347
+ }
1348
+ })
1349
+ );
1231
1350
  return new DefaultGenerateObjectResult({
1232
1351
  object: parseResult.value,
1233
1352
  finishReason,
@@ -1347,13 +1466,18 @@ async function streamObject({
1347
1466
  const schema = asSchema(inputSchema);
1348
1467
  return recordSpan({
1349
1468
  name: "ai.streamObject",
1350
- attributes: {
1351
- ...baseTelemetryAttributes,
1352
- // specific settings that only make sense on the outer level:
1353
- "ai.prompt": JSON.stringify({ system, prompt, messages }),
1354
- "ai.schema": JSON.stringify(schema.jsonSchema),
1355
- "ai.settings.mode": mode
1356
- },
1469
+ attributes: selectTelemetryAttributes({
1470
+ telemetry,
1471
+ attributes: {
1472
+ ...baseTelemetryAttributes,
1473
+ // specific settings that only make sense on the outer level:
1474
+ "ai.prompt": {
1475
+ input: () => JSON.stringify({ system, prompt, messages })
1476
+ },
1477
+ "ai.schema": { input: () => JSON.stringify(schema.jsonSchema) },
1478
+ "ai.settings.mode": mode
1479
+ }
1480
+ }),
1357
1481
  tracer,
1358
1482
  endWhenDone: false,
1359
1483
  fn: async (rootSpan) => {
@@ -1454,20 +1578,25 @@ async function streamObject({
1454
1578
  } = await retry(
1455
1579
  () => recordSpan({
1456
1580
  name: "ai.streamObject.doStream",
1457
- attributes: {
1458
- ...baseTelemetryAttributes,
1459
- "ai.prompt.format": callOptions.inputFormat,
1460
- "ai.prompt.messages": JSON.stringify(callOptions.prompt),
1461
- "ai.settings.mode": mode
1462
- },
1581
+ attributes: selectTelemetryAttributes({
1582
+ telemetry,
1583
+ attributes: {
1584
+ ...baseTelemetryAttributes,
1585
+ "ai.prompt.format": {
1586
+ input: () => callOptions.inputFormat
1587
+ },
1588
+ "ai.prompt.messages": {
1589
+ input: () => JSON.stringify(callOptions.prompt)
1590
+ },
1591
+ "ai.settings.mode": mode
1592
+ }
1593
+ }),
1463
1594
  tracer,
1464
1595
  endWhenDone: false,
1465
- fn: async (doStreamSpan2) => {
1466
- return {
1467
- result: await model.doStream(callOptions),
1468
- doStreamSpan: doStreamSpan2
1469
- };
1470
- }
1596
+ fn: async (doStreamSpan2) => ({
1597
+ result: await model.doStream(callOptions),
1598
+ doStreamSpan: doStreamSpan2
1599
+ })
1471
1600
  })
1472
1601
  );
1473
1602
  return new DefaultStreamObjectResult({
@@ -1477,7 +1606,8 @@ async function streamObject({
1477
1606
  schema,
1478
1607
  onFinish,
1479
1608
  rootSpan,
1480
- doStreamSpan
1609
+ doStreamSpan,
1610
+ telemetry
1481
1611
  });
1482
1612
  }
1483
1613
  });
@@ -1490,7 +1620,8 @@ var DefaultStreamObjectResult = class {
1490
1620
  schema,
1491
1621
  onFinish,
1492
1622
  rootSpan,
1493
- doStreamSpan
1623
+ doStreamSpan,
1624
+ telemetry
1494
1625
  }) {
1495
1626
  this.warnings = warnings;
1496
1627
  this.rawResponse = rawResponse;
@@ -1572,17 +1703,31 @@ var DefaultStreamObjectResult = class {
1572
1703
  completionTokens: NaN,
1573
1704
  totalTokens: NaN
1574
1705
  };
1575
- doStreamSpan.setAttributes({
1576
- "ai.usage.promptTokens": finalUsage.promptTokens,
1577
- "ai.usage.completionTokens": finalUsage.completionTokens,
1578
- "ai.result.object": JSON.stringify(object)
1579
- });
1706
+ doStreamSpan.setAttributes(
1707
+ selectTelemetryAttributes({
1708
+ telemetry,
1709
+ attributes: {
1710
+ "ai.usage.promptTokens": finalUsage.promptTokens,
1711
+ "ai.usage.completionTokens": finalUsage.completionTokens,
1712
+ "ai.result.object": {
1713
+ output: () => JSON.stringify(object)
1714
+ }
1715
+ }
1716
+ })
1717
+ );
1580
1718
  doStreamSpan.end();
1581
- rootSpan.setAttributes({
1582
- "ai.usage.promptTokens": finalUsage.promptTokens,
1583
- "ai.usage.completionTokens": finalUsage.completionTokens,
1584
- "ai.result.object": JSON.stringify(object)
1585
- });
1719
+ rootSpan.setAttributes(
1720
+ selectTelemetryAttributes({
1721
+ telemetry,
1722
+ attributes: {
1723
+ "ai.usage.promptTokens": finalUsage.promptTokens,
1724
+ "ai.usage.completionTokens": finalUsage.completionTokens,
1725
+ "ai.result.object": {
1726
+ output: () => JSON.stringify(object)
1727
+ }
1728
+ }
1729
+ })
1730
+ );
1586
1731
  await (onFinish == null ? void 0 : onFinish({
1587
1732
  usage: finalUsage,
1588
1733
  object,
@@ -1777,12 +1922,17 @@ async function generateText({
1777
1922
  const tracer = getTracer({ isEnabled: (_a = telemetry == null ? void 0 : telemetry.isEnabled) != null ? _a : false });
1778
1923
  return recordSpan({
1779
1924
  name: "ai.generateText",
1780
- attributes: {
1781
- ...baseTelemetryAttributes,
1782
- // specific settings that only make sense on the outer level:
1783
- "ai.prompt": JSON.stringify({ system, prompt, messages }),
1784
- "ai.settings.maxToolRoundtrips": maxToolRoundtrips
1785
- },
1925
+ attributes: selectTelemetryAttributes({
1926
+ telemetry,
1927
+ attributes: {
1928
+ ...baseTelemetryAttributes,
1929
+ // specific settings that only make sense on the outer level:
1930
+ "ai.prompt": {
1931
+ input: () => JSON.stringify({ system, prompt, messages })
1932
+ },
1933
+ "ai.settings.maxToolRoundtrips": maxToolRoundtrips
1934
+ }
1935
+ }),
1786
1936
  tracer,
1787
1937
  fn: async (span) => {
1788
1938
  var _a2, _b, _c, _d;
@@ -1817,11 +1967,16 @@ async function generateText({
1817
1967
  currentModelResponse = await retry(
1818
1968
  () => recordSpan({
1819
1969
  name: "ai.generateText.doGenerate",
1820
- attributes: {
1821
- ...baseTelemetryAttributes,
1822
- "ai.prompt.format": currentInputFormat,
1823
- "ai.prompt.messages": JSON.stringify(promptMessages)
1824
- },
1970
+ attributes: selectTelemetryAttributes({
1971
+ telemetry,
1972
+ attributes: {
1973
+ ...baseTelemetryAttributes,
1974
+ "ai.prompt.format": { input: () => currentInputFormat },
1975
+ "ai.prompt.messages": {
1976
+ input: () => JSON.stringify(promptMessages)
1977
+ }
1978
+ }
1979
+ }),
1825
1980
  tracer,
1826
1981
  fn: async (span2) => {
1827
1982
  const result = await model.doGenerate({
@@ -1832,13 +1987,22 @@ async function generateText({
1832
1987
  abortSignal,
1833
1988
  headers
1834
1989
  });
1835
- span2.setAttributes({
1836
- "ai.finishReason": result.finishReason,
1837
- "ai.usage.promptTokens": result.usage.promptTokens,
1838
- "ai.usage.completionTokens": result.usage.completionTokens,
1839
- "ai.result.text": result.text,
1840
- "ai.result.toolCalls": JSON.stringify(result.toolCalls)
1841
- });
1990
+ span2.setAttributes(
1991
+ selectTelemetryAttributes({
1992
+ telemetry,
1993
+ attributes: {
1994
+ "ai.finishReason": result.finishReason,
1995
+ "ai.usage.promptTokens": result.usage.promptTokens,
1996
+ "ai.usage.completionTokens": result.usage.completionTokens,
1997
+ "ai.result.text": {
1998
+ output: () => result.text
1999
+ },
2000
+ "ai.result.toolCalls": {
2001
+ output: () => JSON.stringify(result.toolCalls)
2002
+ }
2003
+ }
2004
+ })
2005
+ );
1842
2006
  return result;
1843
2007
  }
1844
2008
  })
@@ -1849,7 +2013,8 @@ async function generateText({
1849
2013
  currentToolResults = tools == null ? [] : await executeTools({
1850
2014
  toolCalls: currentToolCalls,
1851
2015
  tools,
1852
- tracer
2016
+ tracer,
2017
+ telemetry
1853
2018
  });
1854
2019
  const currentUsage = calculateCompletionTokenUsage(
1855
2020
  currentModelResponse.usage
@@ -1883,13 +2048,22 @@ async function generateText({
1883
2048
  currentToolResults.length === currentToolCalls.length && // the number of roundtrips is less than the maximum:
1884
2049
  roundtripCount++ < maxToolRoundtrips
1885
2050
  );
1886
- span.setAttributes({
1887
- "ai.finishReason": currentModelResponse.finishReason,
1888
- "ai.usage.promptTokens": currentModelResponse.usage.promptTokens,
1889
- "ai.usage.completionTokens": currentModelResponse.usage.completionTokens,
1890
- "ai.result.text": currentModelResponse.text,
1891
- "ai.result.toolCalls": JSON.stringify(currentModelResponse.toolCalls)
1892
- });
2051
+ span.setAttributes(
2052
+ selectTelemetryAttributes({
2053
+ telemetry,
2054
+ attributes: {
2055
+ "ai.finishReason": currentModelResponse.finishReason,
2056
+ "ai.usage.promptTokens": currentModelResponse.usage.promptTokens,
2057
+ "ai.usage.completionTokens": currentModelResponse.usage.completionTokens,
2058
+ "ai.result.text": {
2059
+ output: () => currentModelResponse.text
2060
+ },
2061
+ "ai.result.toolCalls": {
2062
+ output: () => JSON.stringify(currentModelResponse.toolCalls)
2063
+ }
2064
+ }
2065
+ })
2066
+ );
1893
2067
  return new DefaultGenerateTextResult({
1894
2068
  // Always return a string so that the caller doesn't have to check for undefined.
1895
2069
  // If they need to check if the model did not return any text,
@@ -1911,7 +2085,8 @@ async function generateText({
1911
2085
  async function executeTools({
1912
2086
  toolCalls,
1913
2087
  tools,
1914
- tracer
2088
+ tracer,
2089
+ telemetry
1915
2090
  }) {
1916
2091
  const toolResults = await Promise.all(
1917
2092
  toolCalls.map(async (toolCall) => {
@@ -1921,18 +2096,30 @@ async function executeTools({
1921
2096
  }
1922
2097
  const result = await recordSpan({
1923
2098
  name: "ai.toolCall",
1924
- attributes: {
1925
- "ai.toolCall.name": toolCall.toolName,
1926
- "ai.toolCall.id": toolCall.toolCallId,
1927
- "ai.toolCall.args": JSON.stringify(toolCall.args)
1928
- },
2099
+ attributes: selectTelemetryAttributes({
2100
+ telemetry,
2101
+ attributes: {
2102
+ "ai.toolCall.name": toolCall.toolName,
2103
+ "ai.toolCall.id": toolCall.toolCallId,
2104
+ "ai.toolCall.args": {
2105
+ output: () => JSON.stringify(toolCall.args)
2106
+ }
2107
+ }
2108
+ }),
1929
2109
  tracer,
1930
2110
  fn: async (span) => {
1931
2111
  const result2 = await tool2.execute(toolCall.args);
1932
2112
  try {
1933
- span.setAttributes({
1934
- "ai.toolCall.result": JSON.stringify(result2)
1935
- });
2113
+ span.setAttributes(
2114
+ selectTelemetryAttributes({
2115
+ telemetry,
2116
+ attributes: {
2117
+ "ai.toolCall.result": {
2118
+ output: () => JSON.stringify(result2)
2119
+ }
2120
+ }
2121
+ })
2122
+ );
1936
2123
  } catch (ignored) {
1937
2124
  }
1938
2125
  return result2;
@@ -2084,7 +2271,8 @@ function runToolsTransformation({
2084
2271
  tools,
2085
2272
  generatorStream,
2086
2273
  toolCallStreaming,
2087
- tracer
2274
+ tracer,
2275
+ telemetry
2088
2276
  }) {
2089
2277
  let canClose = false;
2090
2278
  const outstandingToolCalls = /* @__PURE__ */ new Set();
@@ -2154,11 +2342,16 @@ function runToolsTransformation({
2154
2342
  outstandingToolCalls.add(toolExecutionId);
2155
2343
  recordSpan({
2156
2344
  name: "ai.toolCall",
2157
- attributes: {
2158
- "ai.toolCall.name": toolCall.toolName,
2159
- "ai.toolCall.id": toolCall.toolCallId,
2160
- "ai.toolCall.args": JSON.stringify(toolCall.args)
2161
- },
2345
+ attributes: selectTelemetryAttributes({
2346
+ telemetry,
2347
+ attributes: {
2348
+ "ai.toolCall.name": toolCall.toolName,
2349
+ "ai.toolCall.id": toolCall.toolCallId,
2350
+ "ai.toolCall.args": {
2351
+ output: () => JSON.stringify(toolCall.args)
2352
+ }
2353
+ }
2354
+ }),
2162
2355
  tracer,
2163
2356
  fn: async (span) => tool2.execute(toolCall.args).then(
2164
2357
  (result) => {
@@ -2172,9 +2365,16 @@ function runToolsTransformation({
2172
2365
  toolResultsStreamController.close();
2173
2366
  }
2174
2367
  try {
2175
- span.setAttributes({
2176
- "ai.toolCall.result": JSON.stringify(result)
2177
- });
2368
+ span.setAttributes(
2369
+ selectTelemetryAttributes({
2370
+ telemetry,
2371
+ attributes: {
2372
+ "ai.toolCall.result": {
2373
+ output: () => JSON.stringify(result)
2374
+ }
2375
+ }
2376
+ })
2377
+ );
2178
2378
  } catch (ignored) {
2179
2379
  }
2180
2380
  },
@@ -2275,11 +2475,16 @@ async function streamText({
2275
2475
  const tracer = getTracer({ isEnabled: (_a = telemetry == null ? void 0 : telemetry.isEnabled) != null ? _a : false });
2276
2476
  return recordSpan({
2277
2477
  name: "ai.streamText",
2278
- attributes: {
2279
- ...baseTelemetryAttributes,
2280
- // specific settings that only make sense on the outer level:
2281
- "ai.prompt": JSON.stringify({ system, prompt, messages })
2282
- },
2478
+ attributes: selectTelemetryAttributes({
2479
+ telemetry,
2480
+ attributes: {
2481
+ ...baseTelemetryAttributes,
2482
+ // specific settings that only make sense on the outer level:
2483
+ "ai.prompt": {
2484
+ input: () => JSON.stringify({ system, prompt, messages })
2485
+ }
2486
+ }
2487
+ }),
2283
2488
  tracer,
2284
2489
  endWhenDone: false,
2285
2490
  fn: async (rootSpan) => {
@@ -2295,11 +2500,18 @@ async function streamText({
2295
2500
  } = await retry(
2296
2501
  () => recordSpan({
2297
2502
  name: "ai.streamText.doStream",
2298
- attributes: {
2299
- ...baseTelemetryAttributes,
2300
- "ai.prompt.format": validatedPrompt.type,
2301
- "ai.prompt.messages": JSON.stringify(promptMessages)
2302
- },
2503
+ attributes: selectTelemetryAttributes({
2504
+ telemetry,
2505
+ attributes: {
2506
+ ...baseTelemetryAttributes,
2507
+ "ai.prompt.format": {
2508
+ input: () => validatedPrompt.type
2509
+ },
2510
+ "ai.prompt.messages": {
2511
+ input: () => JSON.stringify(promptMessages)
2512
+ }
2513
+ }
2514
+ }),
2303
2515
  tracer,
2304
2516
  endWhenDone: false,
2305
2517
  fn: async (doStreamSpan2) => {
@@ -2325,13 +2537,15 @@ async function streamText({
2325
2537
  tools,
2326
2538
  generatorStream: stream,
2327
2539
  toolCallStreaming,
2328
- tracer
2540
+ tracer,
2541
+ telemetry
2329
2542
  }),
2330
2543
  warnings,
2331
2544
  rawResponse,
2332
2545
  onFinish,
2333
2546
  rootSpan,
2334
- doStreamSpan
2547
+ doStreamSpan,
2548
+ telemetry
2335
2549
  });
2336
2550
  }
2337
2551
  });
@@ -2343,7 +2557,8 @@ var DefaultStreamTextResult = class {
2343
2557
  rawResponse,
2344
2558
  onFinish,
2345
2559
  rootSpan,
2346
- doStreamSpan
2560
+ doStreamSpan,
2561
+ telemetry
2347
2562
  }) {
2348
2563
  this.warnings = warnings;
2349
2564
  this.rawResponse = rawResponse;
@@ -2423,21 +2638,31 @@ var DefaultStreamTextResult = class {
2423
2638
  };
2424
2639
  const finalFinishReason = finishReason != null ? finishReason : "unknown";
2425
2640
  const telemetryToolCalls = toolCalls.length > 0 ? JSON.stringify(toolCalls) : void 0;
2426
- doStreamSpan.setAttributes({
2427
- "ai.finishReason": finalFinishReason,
2428
- "ai.usage.promptTokens": finalUsage.promptTokens,
2429
- "ai.usage.completionTokens": finalUsage.completionTokens,
2430
- "ai.result.text": text,
2431
- "ai.result.toolCalls": telemetryToolCalls
2432
- });
2641
+ doStreamSpan.setAttributes(
2642
+ selectTelemetryAttributes({
2643
+ telemetry,
2644
+ attributes: {
2645
+ "ai.finishReason": finalFinishReason,
2646
+ "ai.usage.promptTokens": finalUsage.promptTokens,
2647
+ "ai.usage.completionTokens": finalUsage.completionTokens,
2648
+ "ai.result.text": { output: () => text },
2649
+ "ai.result.toolCalls": { output: () => telemetryToolCalls }
2650
+ }
2651
+ })
2652
+ );
2433
2653
  doStreamSpan.end();
2434
- rootSpan.setAttributes({
2435
- "ai.finishReason": finalFinishReason,
2436
- "ai.usage.promptTokens": finalUsage.promptTokens,
2437
- "ai.usage.completionTokens": finalUsage.completionTokens,
2438
- "ai.result.text": text,
2439
- "ai.result.toolCalls": telemetryToolCalls
2440
- });
2654
+ rootSpan.setAttributes(
2655
+ selectTelemetryAttributes({
2656
+ telemetry,
2657
+ attributes: {
2658
+ "ai.finishReason": finalFinishReason,
2659
+ "ai.usage.promptTokens": finalUsage.promptTokens,
2660
+ "ai.usage.completionTokens": finalUsage.completionTokens,
2661
+ "ai.result.text": { output: () => text },
2662
+ "ai.result.toolCalls": { output: () => telemetryToolCalls }
2663
+ }
2664
+ })
2665
+ );
2441
2666
  resolveToolResults(toolResults);
2442
2667
  await ((_a = self.onFinish) == null ? void 0 : _a.call(self, {
2443
2668
  finishReason: finalFinishReason,