ai 3.2.40 → 3.2.41

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.mjs CHANGED
@@ -164,6 +164,33 @@ function recordSpan({
164
164
  });
165
165
  }
166
166
 
167
+ // core/telemetry/select-telemetry-attributes.ts
168
+ function selectTelemetryAttributes({
169
+ telemetry,
170
+ attributes
171
+ }) {
172
+ return Object.entries(attributes).reduce((attributes2, [key, value]) => {
173
+ if (value === void 0) {
174
+ return attributes2;
175
+ }
176
+ if (typeof value === "object" && "input" in value && typeof value.input === "function") {
177
+ if ((telemetry == null ? void 0 : telemetry.recordInputs) === false) {
178
+ return attributes2;
179
+ }
180
+ const result = value.input();
181
+ return result === void 0 ? attributes2 : { ...attributes2, [key]: result };
182
+ }
183
+ if (typeof value === "object" && "output" in value && typeof value.output === "function") {
184
+ if ((telemetry == null ? void 0 : telemetry.recordOutputs) === false) {
185
+ return attributes2;
186
+ }
187
+ const result = value.output();
188
+ return result === void 0 ? attributes2 : { ...attributes2, [key]: result };
189
+ }
190
+ return { ...attributes2, [key]: value };
191
+ }, {});
192
+ }
193
+
167
194
  // core/util/retry-with-exponential-backoff.ts
168
195
  import { APICallError, RetryError } from "@ai-sdk/provider";
169
196
  import { getErrorMessage, isAbortError } from "@ai-sdk/provider-utils";
@@ -246,11 +273,13 @@ async function embed({
246
273
  const tracer = getTracer({ isEnabled: (_a = telemetry == null ? void 0 : telemetry.isEnabled) != null ? _a : false });
247
274
  return recordSpan({
248
275
  name: "ai.embed",
249
- attributes: {
250
- ...baseTelemetryAttributes,
251
- // specific settings that only make sense on the outer level:
252
- "ai.value": JSON.stringify(value)
253
- },
276
+ attributes: selectTelemetryAttributes({
277
+ telemetry,
278
+ attributes: {
279
+ ...baseTelemetryAttributes,
280
+ "ai.value": { input: () => JSON.stringify(value) }
281
+ }
282
+ }),
254
283
  tracer,
255
284
  fn: async (span) => {
256
285
  const retry = retryWithExponentialBackoff({ maxRetries });
@@ -259,11 +288,14 @@ async function embed({
259
288
  // nested spans to align with the embedMany telemetry data:
260
289
  recordSpan({
261
290
  name: "ai.embed.doEmbed",
262
- attributes: {
263
- ...baseTelemetryAttributes,
264
- // specific settings that only make sense on the outer level:
265
- "ai.values": [JSON.stringify(value)]
266
- },
291
+ attributes: selectTelemetryAttributes({
292
+ telemetry,
293
+ attributes: {
294
+ ...baseTelemetryAttributes,
295
+ // specific settings that only make sense on the outer level:
296
+ "ai.values": { input: () => [JSON.stringify(value)] }
297
+ }
298
+ }),
267
299
  tracer,
268
300
  fn: async (doEmbedSpan) => {
269
301
  var _a2;
@@ -274,12 +306,19 @@ async function embed({
274
306
  });
275
307
  const embedding2 = modelResponse.embeddings[0];
276
308
  const usage2 = (_a2 = modelResponse.usage) != null ? _a2 : { tokens: NaN };
277
- doEmbedSpan.setAttributes({
278
- "ai.embeddings": modelResponse.embeddings.map(
279
- (embedding3) => JSON.stringify(embedding3)
280
- ),
281
- "ai.usage.tokens": usage2.tokens
282
- });
309
+ doEmbedSpan.setAttributes(
310
+ selectTelemetryAttributes({
311
+ telemetry,
312
+ attributes: {
313
+ "ai.embeddings": {
314
+ output: () => modelResponse.embeddings.map(
315
+ (embedding3) => JSON.stringify(embedding3)
316
+ )
317
+ },
318
+ "ai.usage.tokens": usage2.tokens
319
+ }
320
+ })
321
+ );
283
322
  return {
284
323
  embedding: embedding2,
285
324
  usage: usage2,
@@ -289,10 +328,15 @@ async function embed({
289
328
  })
290
329
  )
291
330
  );
292
- span.setAttributes({
293
- "ai.embedding": JSON.stringify(embedding),
294
- "ai.usage.tokens": usage.tokens
295
- });
331
+ span.setAttributes(
332
+ selectTelemetryAttributes({
333
+ telemetry,
334
+ attributes: {
335
+ "ai.embedding": { output: () => JSON.stringify(embedding) },
336
+ "ai.usage.tokens": usage.tokens
337
+ }
338
+ })
339
+ );
296
340
  return new DefaultEmbedResult({ value, embedding, usage, rawResponse });
297
341
  }
298
342
  });
@@ -338,11 +382,16 @@ async function embedMany({
338
382
  const tracer = getTracer({ isEnabled: (_a = telemetry == null ? void 0 : telemetry.isEnabled) != null ? _a : false });
339
383
  return recordSpan({
340
384
  name: "ai.embedMany",
341
- attributes: {
342
- ...baseTelemetryAttributes,
343
- // specific settings that only make sense on the outer level:
344
- "ai.values": values.map((value) => JSON.stringify(value))
345
- },
385
+ attributes: selectTelemetryAttributes({
386
+ telemetry,
387
+ attributes: {
388
+ ...baseTelemetryAttributes,
389
+ // specific settings that only make sense on the outer level:
390
+ "ai.values": {
391
+ input: () => values.map((value) => JSON.stringify(value))
392
+ }
393
+ }
394
+ }),
346
395
  tracer,
347
396
  fn: async (span) => {
348
397
  const retry = retryWithExponentialBackoff({ maxRetries });
@@ -351,11 +400,16 @@ async function embedMany({
351
400
  const { embeddings: embeddings2, usage } = await retry(() => {
352
401
  return recordSpan({
353
402
  name: "ai.embedMany.doEmbed",
354
- attributes: {
355
- ...baseTelemetryAttributes,
356
- // specific settings that only make sense on the outer level:
357
- "ai.values": values.map((value) => JSON.stringify(value))
358
- },
403
+ attributes: selectTelemetryAttributes({
404
+ telemetry,
405
+ attributes: {
406
+ ...baseTelemetryAttributes,
407
+ // specific settings that only make sense on the outer level:
408
+ "ai.values": {
409
+ input: () => values.map((value) => JSON.stringify(value))
410
+ }
411
+ }
412
+ }),
359
413
  tracer,
360
414
  fn: async (doEmbedSpan) => {
361
415
  var _a2;
@@ -366,22 +420,32 @@ async function embedMany({
366
420
  });
367
421
  const embeddings3 = modelResponse.embeddings;
368
422
  const usage2 = (_a2 = modelResponse.usage) != null ? _a2 : { tokens: NaN };
369
- doEmbedSpan.setAttributes({
370
- "ai.embeddings": embeddings3.map(
371
- (embedding) => JSON.stringify(embedding)
372
- ),
373
- "ai.usage.tokens": usage2.tokens
374
- });
423
+ doEmbedSpan.setAttributes(
424
+ selectTelemetryAttributes({
425
+ telemetry,
426
+ attributes: {
427
+ "ai.embeddings": {
428
+ output: () => embeddings3.map((embedding) => JSON.stringify(embedding))
429
+ },
430
+ "ai.usage.tokens": usage2.tokens
431
+ }
432
+ })
433
+ );
375
434
  return { embeddings: embeddings3, usage: usage2 };
376
435
  }
377
436
  });
378
437
  });
379
- span.setAttributes({
380
- "ai.embeddings": embeddings2.map(
381
- (embedding) => JSON.stringify(embedding)
382
- ),
383
- "ai.usage.tokens": usage.tokens
384
- });
438
+ span.setAttributes(
439
+ selectTelemetryAttributes({
440
+ telemetry,
441
+ attributes: {
442
+ "ai.embeddings": {
443
+ output: () => embeddings2.map((embedding) => JSON.stringify(embedding))
444
+ },
445
+ "ai.usage.tokens": usage.tokens
446
+ }
447
+ })
448
+ );
385
449
  return new DefaultEmbedManyResult({ values, embeddings: embeddings2, usage });
386
450
  }
387
451
  const valueChunks = splitArray(values, maxEmbeddingsPerCall);
@@ -391,11 +455,16 @@ async function embedMany({
391
455
  const { embeddings: responseEmbeddings, usage } = await retry(() => {
392
456
  return recordSpan({
393
457
  name: "ai.embedMany.doEmbed",
394
- attributes: {
395
- ...baseTelemetryAttributes,
396
- // specific settings that only make sense on the outer level:
397
- "ai.values": chunk.map((value) => JSON.stringify(value))
398
- },
458
+ attributes: selectTelemetryAttributes({
459
+ telemetry,
460
+ attributes: {
461
+ ...baseTelemetryAttributes,
462
+ // specific settings that only make sense on the outer level:
463
+ "ai.values": {
464
+ input: () => chunk.map((value) => JSON.stringify(value))
465
+ }
466
+ }
467
+ }),
399
468
  tracer,
400
469
  fn: async (doEmbedSpan) => {
401
470
  var _a2;
@@ -406,12 +475,17 @@ async function embedMany({
406
475
  });
407
476
  const embeddings2 = modelResponse.embeddings;
408
477
  const usage2 = (_a2 = modelResponse.usage) != null ? _a2 : { tokens: NaN };
409
- doEmbedSpan.setAttributes({
410
- "ai.embeddings": embeddings2.map(
411
- (embedding) => JSON.stringify(embedding)
412
- ),
413
- "ai.usage.tokens": usage2.tokens
414
- });
478
+ doEmbedSpan.setAttributes(
479
+ selectTelemetryAttributes({
480
+ telemetry,
481
+ attributes: {
482
+ "ai.embeddings": {
483
+ output: () => embeddings2.map((embedding) => JSON.stringify(embedding))
484
+ },
485
+ "ai.usage.tokens": usage2.tokens
486
+ }
487
+ })
488
+ );
415
489
  return { embeddings: embeddings2, usage: usage2 };
416
490
  }
417
491
  });
@@ -419,10 +493,17 @@ async function embedMany({
419
493
  embeddings.push(...responseEmbeddings);
420
494
  tokens += usage.tokens;
421
495
  }
422
- span.setAttributes({
423
- "ai.embeddings": embeddings.map((embedding) => JSON.stringify(embedding)),
424
- "ai.usage.tokens": tokens
425
- });
496
+ span.setAttributes(
497
+ selectTelemetryAttributes({
498
+ telemetry,
499
+ attributes: {
500
+ "ai.embeddings": {
501
+ output: () => embeddings.map((embedding) => JSON.stringify(embedding))
502
+ },
503
+ "ai.usage.tokens": tokens
504
+ }
505
+ })
506
+ );
426
507
  return new DefaultEmbedManyResult({
427
508
  values,
428
509
  embeddings,
@@ -982,13 +1063,20 @@ async function generateObject({
982
1063
  const tracer = getTracer({ isEnabled: (_a = telemetry == null ? void 0 : telemetry.isEnabled) != null ? _a : false });
983
1064
  return recordSpan({
984
1065
  name: "ai.generateObject",
985
- attributes: {
986
- ...baseTelemetryAttributes,
987
- // specific settings that only make sense on the outer level:
988
- "ai.prompt": JSON.stringify({ system, prompt, messages }),
989
- "ai.schema": JSON.stringify(schema.jsonSchema),
990
- "ai.settings.mode": mode
991
- },
1066
+ attributes: selectTelemetryAttributes({
1067
+ telemetry,
1068
+ attributes: {
1069
+ ...baseTelemetryAttributes,
1070
+ // specific settings that only make sense on the outer level:
1071
+ "ai.prompt": {
1072
+ input: () => JSON.stringify({ system, prompt, messages })
1073
+ },
1074
+ "ai.schema": {
1075
+ input: () => JSON.stringify(schema.jsonSchema)
1076
+ },
1077
+ "ai.settings.mode": mode
1078
+ }
1079
+ }),
992
1080
  tracer,
993
1081
  fn: async (span) => {
994
1082
  const retry = retryWithExponentialBackoff({ maxRetries });
@@ -1019,12 +1107,19 @@ async function generateObject({
1019
1107
  const generateResult = await retry(
1020
1108
  () => recordSpan({
1021
1109
  name: "ai.generateObject.doGenerate",
1022
- attributes: {
1023
- ...baseTelemetryAttributes,
1024
- "ai.prompt.format": inputFormat,
1025
- "ai.prompt.messages": JSON.stringify(promptMessages),
1026
- "ai.settings.mode": mode
1027
- },
1110
+ attributes: selectTelemetryAttributes({
1111
+ telemetry,
1112
+ attributes: {
1113
+ ...baseTelemetryAttributes,
1114
+ "ai.prompt.format": {
1115
+ input: () => inputFormat
1116
+ },
1117
+ "ai.prompt.messages": {
1118
+ input: () => JSON.stringify(promptMessages)
1119
+ },
1120
+ "ai.settings.mode": mode
1121
+ }
1122
+ }),
1028
1123
  tracer,
1029
1124
  fn: async (span2) => {
1030
1125
  const result2 = await model.doGenerate({
@@ -1038,12 +1133,17 @@ async function generateObject({
1038
1133
  if (result2.text === void 0) {
1039
1134
  throw new NoObjectGeneratedError();
1040
1135
  }
1041
- span2.setAttributes({
1042
- "ai.finishReason": result2.finishReason,
1043
- "ai.usage.promptTokens": result2.usage.promptTokens,
1044
- "ai.usage.completionTokens": result2.usage.completionTokens,
1045
- "ai.result.object": result2.text
1046
- });
1136
+ span2.setAttributes(
1137
+ selectTelemetryAttributes({
1138
+ telemetry,
1139
+ attributes: {
1140
+ "ai.finishReason": result2.finishReason,
1141
+ "ai.usage.promptTokens": result2.usage.promptTokens,
1142
+ "ai.usage.completionTokens": result2.usage.completionTokens,
1143
+ "ai.result.object": { output: () => result2.text }
1144
+ }
1145
+ })
1146
+ );
1047
1147
  return { ...result2, objectText: result2.text };
1048
1148
  }
1049
1149
  })
@@ -1070,12 +1170,19 @@ async function generateObject({
1070
1170
  const generateResult = await retry(
1071
1171
  () => recordSpan({
1072
1172
  name: "ai.generateObject.doGenerate",
1073
- attributes: {
1074
- ...baseTelemetryAttributes,
1075
- "ai.prompt.format": inputFormat,
1076
- "ai.prompt.messages": JSON.stringify(promptMessages),
1077
- "ai.settings.mode": mode
1078
- },
1173
+ attributes: selectTelemetryAttributes({
1174
+ telemetry,
1175
+ attributes: {
1176
+ ...baseTelemetryAttributes,
1177
+ "ai.prompt.format": {
1178
+ input: () => inputFormat
1179
+ },
1180
+ "ai.prompt.messages": {
1181
+ input: () => JSON.stringify(promptMessages)
1182
+ },
1183
+ "ai.settings.mode": mode
1184
+ }
1185
+ }),
1079
1186
  tracer,
1080
1187
  fn: async (span2) => {
1081
1188
  var _a2, _b;
@@ -1099,12 +1206,17 @@ async function generateObject({
1099
1206
  if (objectText === void 0) {
1100
1207
  throw new NoObjectGeneratedError();
1101
1208
  }
1102
- span2.setAttributes({
1103
- "ai.finishReason": result2.finishReason,
1104
- "ai.usage.promptTokens": result2.usage.promptTokens,
1105
- "ai.usage.completionTokens": result2.usage.completionTokens,
1106
- "ai.result.object": objectText
1107
- });
1209
+ span2.setAttributes(
1210
+ selectTelemetryAttributes({
1211
+ telemetry,
1212
+ attributes: {
1213
+ "ai.finishReason": result2.finishReason,
1214
+ "ai.usage.promptTokens": result2.usage.promptTokens,
1215
+ "ai.usage.completionTokens": result2.usage.completionTokens,
1216
+ "ai.result.object": { output: () => objectText }
1217
+ }
1218
+ })
1219
+ );
1108
1220
  return { ...result2, objectText };
1109
1221
  }
1110
1222
  })
@@ -1131,12 +1243,19 @@ async function generateObject({
1131
1243
  if (!parseResult.success) {
1132
1244
  throw parseResult.error;
1133
1245
  }
1134
- span.setAttributes({
1135
- "ai.finishReason": finishReason,
1136
- "ai.usage.promptTokens": usage.promptTokens,
1137
- "ai.usage.completionTokens": usage.completionTokens,
1138
- "ai.result.object": JSON.stringify(parseResult.value)
1139
- });
1246
+ span.setAttributes(
1247
+ selectTelemetryAttributes({
1248
+ telemetry,
1249
+ attributes: {
1250
+ "ai.finishReason": finishReason,
1251
+ "ai.usage.promptTokens": usage.promptTokens,
1252
+ "ai.usage.completionTokens": usage.completionTokens,
1253
+ "ai.result.object": {
1254
+ output: () => JSON.stringify(parseResult.value)
1255
+ }
1256
+ }
1257
+ })
1258
+ );
1140
1259
  return new DefaultGenerateObjectResult({
1141
1260
  object: parseResult.value,
1142
1261
  finishReason,
@@ -1259,13 +1378,18 @@ async function streamObject({
1259
1378
  const schema = asSchema(inputSchema);
1260
1379
  return recordSpan({
1261
1380
  name: "ai.streamObject",
1262
- attributes: {
1263
- ...baseTelemetryAttributes,
1264
- // specific settings that only make sense on the outer level:
1265
- "ai.prompt": JSON.stringify({ system, prompt, messages }),
1266
- "ai.schema": JSON.stringify(schema.jsonSchema),
1267
- "ai.settings.mode": mode
1268
- },
1381
+ attributes: selectTelemetryAttributes({
1382
+ telemetry,
1383
+ attributes: {
1384
+ ...baseTelemetryAttributes,
1385
+ // specific settings that only make sense on the outer level:
1386
+ "ai.prompt": {
1387
+ input: () => JSON.stringify({ system, prompt, messages })
1388
+ },
1389
+ "ai.schema": { input: () => JSON.stringify(schema.jsonSchema) },
1390
+ "ai.settings.mode": mode
1391
+ }
1392
+ }),
1269
1393
  tracer,
1270
1394
  endWhenDone: false,
1271
1395
  fn: async (rootSpan) => {
@@ -1366,20 +1490,25 @@ async function streamObject({
1366
1490
  } = await retry(
1367
1491
  () => recordSpan({
1368
1492
  name: "ai.streamObject.doStream",
1369
- attributes: {
1370
- ...baseTelemetryAttributes,
1371
- "ai.prompt.format": callOptions.inputFormat,
1372
- "ai.prompt.messages": JSON.stringify(callOptions.prompt),
1373
- "ai.settings.mode": mode
1374
- },
1493
+ attributes: selectTelemetryAttributes({
1494
+ telemetry,
1495
+ attributes: {
1496
+ ...baseTelemetryAttributes,
1497
+ "ai.prompt.format": {
1498
+ input: () => callOptions.inputFormat
1499
+ },
1500
+ "ai.prompt.messages": {
1501
+ input: () => JSON.stringify(callOptions.prompt)
1502
+ },
1503
+ "ai.settings.mode": mode
1504
+ }
1505
+ }),
1375
1506
  tracer,
1376
1507
  endWhenDone: false,
1377
- fn: async (doStreamSpan2) => {
1378
- return {
1379
- result: await model.doStream(callOptions),
1380
- doStreamSpan: doStreamSpan2
1381
- };
1382
- }
1508
+ fn: async (doStreamSpan2) => ({
1509
+ result: await model.doStream(callOptions),
1510
+ doStreamSpan: doStreamSpan2
1511
+ })
1383
1512
  })
1384
1513
  );
1385
1514
  return new DefaultStreamObjectResult({
@@ -1389,7 +1518,8 @@ async function streamObject({
1389
1518
  schema,
1390
1519
  onFinish,
1391
1520
  rootSpan,
1392
- doStreamSpan
1521
+ doStreamSpan,
1522
+ telemetry
1393
1523
  });
1394
1524
  }
1395
1525
  });
@@ -1402,7 +1532,8 @@ var DefaultStreamObjectResult = class {
1402
1532
  schema,
1403
1533
  onFinish,
1404
1534
  rootSpan,
1405
- doStreamSpan
1535
+ doStreamSpan,
1536
+ telemetry
1406
1537
  }) {
1407
1538
  this.warnings = warnings;
1408
1539
  this.rawResponse = rawResponse;
@@ -1484,17 +1615,31 @@ var DefaultStreamObjectResult = class {
1484
1615
  completionTokens: NaN,
1485
1616
  totalTokens: NaN
1486
1617
  };
1487
- doStreamSpan.setAttributes({
1488
- "ai.usage.promptTokens": finalUsage.promptTokens,
1489
- "ai.usage.completionTokens": finalUsage.completionTokens,
1490
- "ai.result.object": JSON.stringify(object)
1491
- });
1618
+ doStreamSpan.setAttributes(
1619
+ selectTelemetryAttributes({
1620
+ telemetry,
1621
+ attributes: {
1622
+ "ai.usage.promptTokens": finalUsage.promptTokens,
1623
+ "ai.usage.completionTokens": finalUsage.completionTokens,
1624
+ "ai.result.object": {
1625
+ output: () => JSON.stringify(object)
1626
+ }
1627
+ }
1628
+ })
1629
+ );
1492
1630
  doStreamSpan.end();
1493
- rootSpan.setAttributes({
1494
- "ai.usage.promptTokens": finalUsage.promptTokens,
1495
- "ai.usage.completionTokens": finalUsage.completionTokens,
1496
- "ai.result.object": JSON.stringify(object)
1497
- });
1631
+ rootSpan.setAttributes(
1632
+ selectTelemetryAttributes({
1633
+ telemetry,
1634
+ attributes: {
1635
+ "ai.usage.promptTokens": finalUsage.promptTokens,
1636
+ "ai.usage.completionTokens": finalUsage.completionTokens,
1637
+ "ai.result.object": {
1638
+ output: () => JSON.stringify(object)
1639
+ }
1640
+ }
1641
+ })
1642
+ );
1498
1643
  await (onFinish == null ? void 0 : onFinish({
1499
1644
  usage: finalUsage,
1500
1645
  object,
@@ -1692,12 +1837,17 @@ async function generateText({
1692
1837
  const tracer = getTracer({ isEnabled: (_a = telemetry == null ? void 0 : telemetry.isEnabled) != null ? _a : false });
1693
1838
  return recordSpan({
1694
1839
  name: "ai.generateText",
1695
- attributes: {
1696
- ...baseTelemetryAttributes,
1697
- // specific settings that only make sense on the outer level:
1698
- "ai.prompt": JSON.stringify({ system, prompt, messages }),
1699
- "ai.settings.maxToolRoundtrips": maxToolRoundtrips
1700
- },
1840
+ attributes: selectTelemetryAttributes({
1841
+ telemetry,
1842
+ attributes: {
1843
+ ...baseTelemetryAttributes,
1844
+ // specific settings that only make sense on the outer level:
1845
+ "ai.prompt": {
1846
+ input: () => JSON.stringify({ system, prompt, messages })
1847
+ },
1848
+ "ai.settings.maxToolRoundtrips": maxToolRoundtrips
1849
+ }
1850
+ }),
1701
1851
  tracer,
1702
1852
  fn: async (span) => {
1703
1853
  var _a2, _b, _c, _d;
@@ -1732,11 +1882,16 @@ async function generateText({
1732
1882
  currentModelResponse = await retry(
1733
1883
  () => recordSpan({
1734
1884
  name: "ai.generateText.doGenerate",
1735
- attributes: {
1736
- ...baseTelemetryAttributes,
1737
- "ai.prompt.format": currentInputFormat,
1738
- "ai.prompt.messages": JSON.stringify(promptMessages)
1739
- },
1885
+ attributes: selectTelemetryAttributes({
1886
+ telemetry,
1887
+ attributes: {
1888
+ ...baseTelemetryAttributes,
1889
+ "ai.prompt.format": { input: () => currentInputFormat },
1890
+ "ai.prompt.messages": {
1891
+ input: () => JSON.stringify(promptMessages)
1892
+ }
1893
+ }
1894
+ }),
1740
1895
  tracer,
1741
1896
  fn: async (span2) => {
1742
1897
  const result = await model.doGenerate({
@@ -1747,13 +1902,22 @@ async function generateText({
1747
1902
  abortSignal,
1748
1903
  headers
1749
1904
  });
1750
- span2.setAttributes({
1751
- "ai.finishReason": result.finishReason,
1752
- "ai.usage.promptTokens": result.usage.promptTokens,
1753
- "ai.usage.completionTokens": result.usage.completionTokens,
1754
- "ai.result.text": result.text,
1755
- "ai.result.toolCalls": JSON.stringify(result.toolCalls)
1756
- });
1905
+ span2.setAttributes(
1906
+ selectTelemetryAttributes({
1907
+ telemetry,
1908
+ attributes: {
1909
+ "ai.finishReason": result.finishReason,
1910
+ "ai.usage.promptTokens": result.usage.promptTokens,
1911
+ "ai.usage.completionTokens": result.usage.completionTokens,
1912
+ "ai.result.text": {
1913
+ output: () => result.text
1914
+ },
1915
+ "ai.result.toolCalls": {
1916
+ output: () => JSON.stringify(result.toolCalls)
1917
+ }
1918
+ }
1919
+ })
1920
+ );
1757
1921
  return result;
1758
1922
  }
1759
1923
  })
@@ -1764,7 +1928,8 @@ async function generateText({
1764
1928
  currentToolResults = tools == null ? [] : await executeTools({
1765
1929
  toolCalls: currentToolCalls,
1766
1930
  tools,
1767
- tracer
1931
+ tracer,
1932
+ telemetry
1768
1933
  });
1769
1934
  const currentUsage = calculateCompletionTokenUsage(
1770
1935
  currentModelResponse.usage
@@ -1798,13 +1963,22 @@ async function generateText({
1798
1963
  currentToolResults.length === currentToolCalls.length && // the number of roundtrips is less than the maximum:
1799
1964
  roundtripCount++ < maxToolRoundtrips
1800
1965
  );
1801
- span.setAttributes({
1802
- "ai.finishReason": currentModelResponse.finishReason,
1803
- "ai.usage.promptTokens": currentModelResponse.usage.promptTokens,
1804
- "ai.usage.completionTokens": currentModelResponse.usage.completionTokens,
1805
- "ai.result.text": currentModelResponse.text,
1806
- "ai.result.toolCalls": JSON.stringify(currentModelResponse.toolCalls)
1807
- });
1966
+ span.setAttributes(
1967
+ selectTelemetryAttributes({
1968
+ telemetry,
1969
+ attributes: {
1970
+ "ai.finishReason": currentModelResponse.finishReason,
1971
+ "ai.usage.promptTokens": currentModelResponse.usage.promptTokens,
1972
+ "ai.usage.completionTokens": currentModelResponse.usage.completionTokens,
1973
+ "ai.result.text": {
1974
+ output: () => currentModelResponse.text
1975
+ },
1976
+ "ai.result.toolCalls": {
1977
+ output: () => JSON.stringify(currentModelResponse.toolCalls)
1978
+ }
1979
+ }
1980
+ })
1981
+ );
1808
1982
  return new DefaultGenerateTextResult({
1809
1983
  // Always return a string so that the caller doesn't have to check for undefined.
1810
1984
  // If they need to check if the model did not return any text,
@@ -1826,7 +2000,8 @@ async function generateText({
1826
2000
  async function executeTools({
1827
2001
  toolCalls,
1828
2002
  tools,
1829
- tracer
2003
+ tracer,
2004
+ telemetry
1830
2005
  }) {
1831
2006
  const toolResults = await Promise.all(
1832
2007
  toolCalls.map(async (toolCall) => {
@@ -1836,18 +2011,30 @@ async function executeTools({
1836
2011
  }
1837
2012
  const result = await recordSpan({
1838
2013
  name: "ai.toolCall",
1839
- attributes: {
1840
- "ai.toolCall.name": toolCall.toolName,
1841
- "ai.toolCall.id": toolCall.toolCallId,
1842
- "ai.toolCall.args": JSON.stringify(toolCall.args)
1843
- },
2014
+ attributes: selectTelemetryAttributes({
2015
+ telemetry,
2016
+ attributes: {
2017
+ "ai.toolCall.name": toolCall.toolName,
2018
+ "ai.toolCall.id": toolCall.toolCallId,
2019
+ "ai.toolCall.args": {
2020
+ output: () => JSON.stringify(toolCall.args)
2021
+ }
2022
+ }
2023
+ }),
1844
2024
  tracer,
1845
2025
  fn: async (span) => {
1846
2026
  const result2 = await tool2.execute(toolCall.args);
1847
2027
  try {
1848
- span.setAttributes({
1849
- "ai.toolCall.result": JSON.stringify(result2)
1850
- });
2028
+ span.setAttributes(
2029
+ selectTelemetryAttributes({
2030
+ telemetry,
2031
+ attributes: {
2032
+ "ai.toolCall.result": {
2033
+ output: () => JSON.stringify(result2)
2034
+ }
2035
+ }
2036
+ })
2037
+ );
1851
2038
  } catch (ignored) {
1852
2039
  }
1853
2040
  return result2;
@@ -1999,7 +2186,8 @@ function runToolsTransformation({
1999
2186
  tools,
2000
2187
  generatorStream,
2001
2188
  toolCallStreaming,
2002
- tracer
2189
+ tracer,
2190
+ telemetry
2003
2191
  }) {
2004
2192
  let canClose = false;
2005
2193
  const outstandingToolCalls = /* @__PURE__ */ new Set();
@@ -2069,11 +2257,16 @@ function runToolsTransformation({
2069
2257
  outstandingToolCalls.add(toolExecutionId);
2070
2258
  recordSpan({
2071
2259
  name: "ai.toolCall",
2072
- attributes: {
2073
- "ai.toolCall.name": toolCall.toolName,
2074
- "ai.toolCall.id": toolCall.toolCallId,
2075
- "ai.toolCall.args": JSON.stringify(toolCall.args)
2076
- },
2260
+ attributes: selectTelemetryAttributes({
2261
+ telemetry,
2262
+ attributes: {
2263
+ "ai.toolCall.name": toolCall.toolName,
2264
+ "ai.toolCall.id": toolCall.toolCallId,
2265
+ "ai.toolCall.args": {
2266
+ output: () => JSON.stringify(toolCall.args)
2267
+ }
2268
+ }
2269
+ }),
2077
2270
  tracer,
2078
2271
  fn: async (span) => tool2.execute(toolCall.args).then(
2079
2272
  (result) => {
@@ -2087,9 +2280,16 @@ function runToolsTransformation({
2087
2280
  toolResultsStreamController.close();
2088
2281
  }
2089
2282
  try {
2090
- span.setAttributes({
2091
- "ai.toolCall.result": JSON.stringify(result)
2092
- });
2283
+ span.setAttributes(
2284
+ selectTelemetryAttributes({
2285
+ telemetry,
2286
+ attributes: {
2287
+ "ai.toolCall.result": {
2288
+ output: () => JSON.stringify(result)
2289
+ }
2290
+ }
2291
+ })
2292
+ );
2093
2293
  } catch (ignored) {
2094
2294
  }
2095
2295
  },
@@ -2190,11 +2390,16 @@ async function streamText({
2190
2390
  const tracer = getTracer({ isEnabled: (_a = telemetry == null ? void 0 : telemetry.isEnabled) != null ? _a : false });
2191
2391
  return recordSpan({
2192
2392
  name: "ai.streamText",
2193
- attributes: {
2194
- ...baseTelemetryAttributes,
2195
- // specific settings that only make sense on the outer level:
2196
- "ai.prompt": JSON.stringify({ system, prompt, messages })
2197
- },
2393
+ attributes: selectTelemetryAttributes({
2394
+ telemetry,
2395
+ attributes: {
2396
+ ...baseTelemetryAttributes,
2397
+ // specific settings that only make sense on the outer level:
2398
+ "ai.prompt": {
2399
+ input: () => JSON.stringify({ system, prompt, messages })
2400
+ }
2401
+ }
2402
+ }),
2198
2403
  tracer,
2199
2404
  endWhenDone: false,
2200
2405
  fn: async (rootSpan) => {
@@ -2210,11 +2415,18 @@ async function streamText({
2210
2415
  } = await retry(
2211
2416
  () => recordSpan({
2212
2417
  name: "ai.streamText.doStream",
2213
- attributes: {
2214
- ...baseTelemetryAttributes,
2215
- "ai.prompt.format": validatedPrompt.type,
2216
- "ai.prompt.messages": JSON.stringify(promptMessages)
2217
- },
2418
+ attributes: selectTelemetryAttributes({
2419
+ telemetry,
2420
+ attributes: {
2421
+ ...baseTelemetryAttributes,
2422
+ "ai.prompt.format": {
2423
+ input: () => validatedPrompt.type
2424
+ },
2425
+ "ai.prompt.messages": {
2426
+ input: () => JSON.stringify(promptMessages)
2427
+ }
2428
+ }
2429
+ }),
2218
2430
  tracer,
2219
2431
  endWhenDone: false,
2220
2432
  fn: async (doStreamSpan2) => {
@@ -2240,13 +2452,15 @@ async function streamText({
2240
2452
  tools,
2241
2453
  generatorStream: stream,
2242
2454
  toolCallStreaming,
2243
- tracer
2455
+ tracer,
2456
+ telemetry
2244
2457
  }),
2245
2458
  warnings,
2246
2459
  rawResponse,
2247
2460
  onFinish,
2248
2461
  rootSpan,
2249
- doStreamSpan
2462
+ doStreamSpan,
2463
+ telemetry
2250
2464
  });
2251
2465
  }
2252
2466
  });
@@ -2258,7 +2472,8 @@ var DefaultStreamTextResult = class {
2258
2472
  rawResponse,
2259
2473
  onFinish,
2260
2474
  rootSpan,
2261
- doStreamSpan
2475
+ doStreamSpan,
2476
+ telemetry
2262
2477
  }) {
2263
2478
  this.warnings = warnings;
2264
2479
  this.rawResponse = rawResponse;
@@ -2338,21 +2553,31 @@ var DefaultStreamTextResult = class {
2338
2553
  };
2339
2554
  const finalFinishReason = finishReason != null ? finishReason : "unknown";
2340
2555
  const telemetryToolCalls = toolCalls.length > 0 ? JSON.stringify(toolCalls) : void 0;
2341
- doStreamSpan.setAttributes({
2342
- "ai.finishReason": finalFinishReason,
2343
- "ai.usage.promptTokens": finalUsage.promptTokens,
2344
- "ai.usage.completionTokens": finalUsage.completionTokens,
2345
- "ai.result.text": text,
2346
- "ai.result.toolCalls": telemetryToolCalls
2347
- });
2556
+ doStreamSpan.setAttributes(
2557
+ selectTelemetryAttributes({
2558
+ telemetry,
2559
+ attributes: {
2560
+ "ai.finishReason": finalFinishReason,
2561
+ "ai.usage.promptTokens": finalUsage.promptTokens,
2562
+ "ai.usage.completionTokens": finalUsage.completionTokens,
2563
+ "ai.result.text": { output: () => text },
2564
+ "ai.result.toolCalls": { output: () => telemetryToolCalls }
2565
+ }
2566
+ })
2567
+ );
2348
2568
  doStreamSpan.end();
2349
- rootSpan.setAttributes({
2350
- "ai.finishReason": finalFinishReason,
2351
- "ai.usage.promptTokens": finalUsage.promptTokens,
2352
- "ai.usage.completionTokens": finalUsage.completionTokens,
2353
- "ai.result.text": text,
2354
- "ai.result.toolCalls": telemetryToolCalls
2355
- });
2569
+ rootSpan.setAttributes(
2570
+ selectTelemetryAttributes({
2571
+ telemetry,
2572
+ attributes: {
2573
+ "ai.finishReason": finalFinishReason,
2574
+ "ai.usage.promptTokens": finalUsage.promptTokens,
2575
+ "ai.usage.completionTokens": finalUsage.completionTokens,
2576
+ "ai.result.text": { output: () => text },
2577
+ "ai.result.toolCalls": { output: () => telemetryToolCalls }
2578
+ }
2579
+ })
2580
+ );
2356
2581
  resolveToolResults(toolResults);
2357
2582
  await ((_a = self.onFinish) == null ? void 0 : _a.call(self, {
2358
2583
  finishReason: finalFinishReason,