ai 3.2.40 → 3.2.42

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.mjs CHANGED
@@ -164,6 +164,33 @@ function recordSpan({
164
164
  });
165
165
  }
166
166
 
167
+ // core/telemetry/select-telemetry-attributes.ts
168
+ function selectTelemetryAttributes({
169
+ telemetry,
170
+ attributes
171
+ }) {
172
+ return Object.entries(attributes).reduce((attributes2, [key, value]) => {
173
+ if (value === void 0) {
174
+ return attributes2;
175
+ }
176
+ if (typeof value === "object" && "input" in value && typeof value.input === "function") {
177
+ if ((telemetry == null ? void 0 : telemetry.recordInputs) === false) {
178
+ return attributes2;
179
+ }
180
+ const result = value.input();
181
+ return result === void 0 ? attributes2 : { ...attributes2, [key]: result };
182
+ }
183
+ if (typeof value === "object" && "output" in value && typeof value.output === "function") {
184
+ if ((telemetry == null ? void 0 : telemetry.recordOutputs) === false) {
185
+ return attributes2;
186
+ }
187
+ const result = value.output();
188
+ return result === void 0 ? attributes2 : { ...attributes2, [key]: result };
189
+ }
190
+ return { ...attributes2, [key]: value };
191
+ }, {});
192
+ }
193
+
167
194
  // core/util/retry-with-exponential-backoff.ts
168
195
  import { APICallError, RetryError } from "@ai-sdk/provider";
169
196
  import { getErrorMessage, isAbortError } from "@ai-sdk/provider-utils";
@@ -246,11 +273,13 @@ async function embed({
246
273
  const tracer = getTracer({ isEnabled: (_a = telemetry == null ? void 0 : telemetry.isEnabled) != null ? _a : false });
247
274
  return recordSpan({
248
275
  name: "ai.embed",
249
- attributes: {
250
- ...baseTelemetryAttributes,
251
- // specific settings that only make sense on the outer level:
252
- "ai.value": JSON.stringify(value)
253
- },
276
+ attributes: selectTelemetryAttributes({
277
+ telemetry,
278
+ attributes: {
279
+ ...baseTelemetryAttributes,
280
+ "ai.value": { input: () => JSON.stringify(value) }
281
+ }
282
+ }),
254
283
  tracer,
255
284
  fn: async (span) => {
256
285
  const retry = retryWithExponentialBackoff({ maxRetries });
@@ -259,11 +288,14 @@ async function embed({
259
288
  // nested spans to align with the embedMany telemetry data:
260
289
  recordSpan({
261
290
  name: "ai.embed.doEmbed",
262
- attributes: {
263
- ...baseTelemetryAttributes,
264
- // specific settings that only make sense on the outer level:
265
- "ai.values": [JSON.stringify(value)]
266
- },
291
+ attributes: selectTelemetryAttributes({
292
+ telemetry,
293
+ attributes: {
294
+ ...baseTelemetryAttributes,
295
+ // specific settings that only make sense on the outer level:
296
+ "ai.values": { input: () => [JSON.stringify(value)] }
297
+ }
298
+ }),
267
299
  tracer,
268
300
  fn: async (doEmbedSpan) => {
269
301
  var _a2;
@@ -274,12 +306,19 @@ async function embed({
274
306
  });
275
307
  const embedding2 = modelResponse.embeddings[0];
276
308
  const usage2 = (_a2 = modelResponse.usage) != null ? _a2 : { tokens: NaN };
277
- doEmbedSpan.setAttributes({
278
- "ai.embeddings": modelResponse.embeddings.map(
279
- (embedding3) => JSON.stringify(embedding3)
280
- ),
281
- "ai.usage.tokens": usage2.tokens
282
- });
309
+ doEmbedSpan.setAttributes(
310
+ selectTelemetryAttributes({
311
+ telemetry,
312
+ attributes: {
313
+ "ai.embeddings": {
314
+ output: () => modelResponse.embeddings.map(
315
+ (embedding3) => JSON.stringify(embedding3)
316
+ )
317
+ },
318
+ "ai.usage.tokens": usage2.tokens
319
+ }
320
+ })
321
+ );
283
322
  return {
284
323
  embedding: embedding2,
285
324
  usage: usage2,
@@ -289,10 +328,15 @@ async function embed({
289
328
  })
290
329
  )
291
330
  );
292
- span.setAttributes({
293
- "ai.embedding": JSON.stringify(embedding),
294
- "ai.usage.tokens": usage.tokens
295
- });
331
+ span.setAttributes(
332
+ selectTelemetryAttributes({
333
+ telemetry,
334
+ attributes: {
335
+ "ai.embedding": { output: () => JSON.stringify(embedding) },
336
+ "ai.usage.tokens": usage.tokens
337
+ }
338
+ })
339
+ );
296
340
  return new DefaultEmbedResult({ value, embedding, usage, rawResponse });
297
341
  }
298
342
  });
@@ -338,11 +382,16 @@ async function embedMany({
338
382
  const tracer = getTracer({ isEnabled: (_a = telemetry == null ? void 0 : telemetry.isEnabled) != null ? _a : false });
339
383
  return recordSpan({
340
384
  name: "ai.embedMany",
341
- attributes: {
342
- ...baseTelemetryAttributes,
343
- // specific settings that only make sense on the outer level:
344
- "ai.values": values.map((value) => JSON.stringify(value))
345
- },
385
+ attributes: selectTelemetryAttributes({
386
+ telemetry,
387
+ attributes: {
388
+ ...baseTelemetryAttributes,
389
+ // specific settings that only make sense on the outer level:
390
+ "ai.values": {
391
+ input: () => values.map((value) => JSON.stringify(value))
392
+ }
393
+ }
394
+ }),
346
395
  tracer,
347
396
  fn: async (span) => {
348
397
  const retry = retryWithExponentialBackoff({ maxRetries });
@@ -351,11 +400,16 @@ async function embedMany({
351
400
  const { embeddings: embeddings2, usage } = await retry(() => {
352
401
  return recordSpan({
353
402
  name: "ai.embedMany.doEmbed",
354
- attributes: {
355
- ...baseTelemetryAttributes,
356
- // specific settings that only make sense on the outer level:
357
- "ai.values": values.map((value) => JSON.stringify(value))
358
- },
403
+ attributes: selectTelemetryAttributes({
404
+ telemetry,
405
+ attributes: {
406
+ ...baseTelemetryAttributes,
407
+ // specific settings that only make sense on the outer level:
408
+ "ai.values": {
409
+ input: () => values.map((value) => JSON.stringify(value))
410
+ }
411
+ }
412
+ }),
359
413
  tracer,
360
414
  fn: async (doEmbedSpan) => {
361
415
  var _a2;
@@ -366,22 +420,32 @@ async function embedMany({
366
420
  });
367
421
  const embeddings3 = modelResponse.embeddings;
368
422
  const usage2 = (_a2 = modelResponse.usage) != null ? _a2 : { tokens: NaN };
369
- doEmbedSpan.setAttributes({
370
- "ai.embeddings": embeddings3.map(
371
- (embedding) => JSON.stringify(embedding)
372
- ),
373
- "ai.usage.tokens": usage2.tokens
374
- });
423
+ doEmbedSpan.setAttributes(
424
+ selectTelemetryAttributes({
425
+ telemetry,
426
+ attributes: {
427
+ "ai.embeddings": {
428
+ output: () => embeddings3.map((embedding) => JSON.stringify(embedding))
429
+ },
430
+ "ai.usage.tokens": usage2.tokens
431
+ }
432
+ })
433
+ );
375
434
  return { embeddings: embeddings3, usage: usage2 };
376
435
  }
377
436
  });
378
437
  });
379
- span.setAttributes({
380
- "ai.embeddings": embeddings2.map(
381
- (embedding) => JSON.stringify(embedding)
382
- ),
383
- "ai.usage.tokens": usage.tokens
384
- });
438
+ span.setAttributes(
439
+ selectTelemetryAttributes({
440
+ telemetry,
441
+ attributes: {
442
+ "ai.embeddings": {
443
+ output: () => embeddings2.map((embedding) => JSON.stringify(embedding))
444
+ },
445
+ "ai.usage.tokens": usage.tokens
446
+ }
447
+ })
448
+ );
385
449
  return new DefaultEmbedManyResult({ values, embeddings: embeddings2, usage });
386
450
  }
387
451
  const valueChunks = splitArray(values, maxEmbeddingsPerCall);
@@ -391,11 +455,16 @@ async function embedMany({
391
455
  const { embeddings: responseEmbeddings, usage } = await retry(() => {
392
456
  return recordSpan({
393
457
  name: "ai.embedMany.doEmbed",
394
- attributes: {
395
- ...baseTelemetryAttributes,
396
- // specific settings that only make sense on the outer level:
397
- "ai.values": chunk.map((value) => JSON.stringify(value))
398
- },
458
+ attributes: selectTelemetryAttributes({
459
+ telemetry,
460
+ attributes: {
461
+ ...baseTelemetryAttributes,
462
+ // specific settings that only make sense on the outer level:
463
+ "ai.values": {
464
+ input: () => chunk.map((value) => JSON.stringify(value))
465
+ }
466
+ }
467
+ }),
399
468
  tracer,
400
469
  fn: async (doEmbedSpan) => {
401
470
  var _a2;
@@ -406,12 +475,17 @@ async function embedMany({
406
475
  });
407
476
  const embeddings2 = modelResponse.embeddings;
408
477
  const usage2 = (_a2 = modelResponse.usage) != null ? _a2 : { tokens: NaN };
409
- doEmbedSpan.setAttributes({
410
- "ai.embeddings": embeddings2.map(
411
- (embedding) => JSON.stringify(embedding)
412
- ),
413
- "ai.usage.tokens": usage2.tokens
414
- });
478
+ doEmbedSpan.setAttributes(
479
+ selectTelemetryAttributes({
480
+ telemetry,
481
+ attributes: {
482
+ "ai.embeddings": {
483
+ output: () => embeddings2.map((embedding) => JSON.stringify(embedding))
484
+ },
485
+ "ai.usage.tokens": usage2.tokens
486
+ }
487
+ })
488
+ );
415
489
  return { embeddings: embeddings2, usage: usage2 };
416
490
  }
417
491
  });
@@ -419,10 +493,17 @@ async function embedMany({
419
493
  embeddings.push(...responseEmbeddings);
420
494
  tokens += usage.tokens;
421
495
  }
422
- span.setAttributes({
423
- "ai.embeddings": embeddings.map((embedding) => JSON.stringify(embedding)),
424
- "ai.usage.tokens": tokens
425
- });
496
+ span.setAttributes(
497
+ selectTelemetryAttributes({
498
+ telemetry,
499
+ attributes: {
500
+ "ai.embeddings": {
501
+ output: () => embeddings.map((embedding) => JSON.stringify(embedding))
502
+ },
503
+ "ai.usage.tokens": tokens
504
+ }
505
+ })
506
+ );
426
507
  return new DefaultEmbedManyResult({
427
508
  values,
428
509
  embeddings,
@@ -982,13 +1063,20 @@ async function generateObject({
982
1063
  const tracer = getTracer({ isEnabled: (_a = telemetry == null ? void 0 : telemetry.isEnabled) != null ? _a : false });
983
1064
  return recordSpan({
984
1065
  name: "ai.generateObject",
985
- attributes: {
986
- ...baseTelemetryAttributes,
987
- // specific settings that only make sense on the outer level:
988
- "ai.prompt": JSON.stringify({ system, prompt, messages }),
989
- "ai.schema": JSON.stringify(schema.jsonSchema),
990
- "ai.settings.mode": mode
991
- },
1066
+ attributes: selectTelemetryAttributes({
1067
+ telemetry,
1068
+ attributes: {
1069
+ ...baseTelemetryAttributes,
1070
+ // specific settings that only make sense on the outer level:
1071
+ "ai.prompt": {
1072
+ input: () => JSON.stringify({ system, prompt, messages })
1073
+ },
1074
+ "ai.schema": {
1075
+ input: () => JSON.stringify(schema.jsonSchema)
1076
+ },
1077
+ "ai.settings.mode": mode
1078
+ }
1079
+ }),
992
1080
  tracer,
993
1081
  fn: async (span) => {
994
1082
  const retry = retryWithExponentialBackoff({ maxRetries });
@@ -1019,12 +1107,25 @@ async function generateObject({
1019
1107
  const generateResult = await retry(
1020
1108
  () => recordSpan({
1021
1109
  name: "ai.generateObject.doGenerate",
1022
- attributes: {
1023
- ...baseTelemetryAttributes,
1024
- "ai.prompt.format": inputFormat,
1025
- "ai.prompt.messages": JSON.stringify(promptMessages),
1026
- "ai.settings.mode": mode
1027
- },
1110
+ attributes: selectTelemetryAttributes({
1111
+ telemetry,
1112
+ attributes: {
1113
+ ...baseTelemetryAttributes,
1114
+ "ai.prompt.format": {
1115
+ input: () => inputFormat
1116
+ },
1117
+ "ai.prompt.messages": {
1118
+ input: () => JSON.stringify(promptMessages)
1119
+ },
1120
+ "ai.settings.mode": mode,
1121
+ // standardized gen-ai llm span attributes:
1122
+ "gen_ai.request.model": model.modelId,
1123
+ "gen_ai.system": model.provider,
1124
+ "gen_ai.request.max_tokens": settings.maxTokens,
1125
+ "gen_ai.request.temperature": settings.temperature,
1126
+ "gen_ai.request.top_p": settings.topP
1127
+ }
1128
+ }),
1028
1129
  tracer,
1029
1130
  fn: async (span2) => {
1030
1131
  const result2 = await model.doGenerate({
@@ -1038,12 +1139,21 @@ async function generateObject({
1038
1139
  if (result2.text === void 0) {
1039
1140
  throw new NoObjectGeneratedError();
1040
1141
  }
1041
- span2.setAttributes({
1042
- "ai.finishReason": result2.finishReason,
1043
- "ai.usage.promptTokens": result2.usage.promptTokens,
1044
- "ai.usage.completionTokens": result2.usage.completionTokens,
1045
- "ai.result.object": result2.text
1046
- });
1142
+ span2.setAttributes(
1143
+ selectTelemetryAttributes({
1144
+ telemetry,
1145
+ attributes: {
1146
+ "ai.finishReason": result2.finishReason,
1147
+ "ai.usage.promptTokens": result2.usage.promptTokens,
1148
+ "ai.usage.completionTokens": result2.usage.completionTokens,
1149
+ "ai.result.object": { output: () => result2.text },
1150
+ // standardized gen-ai llm span attributes:
1151
+ "gen_ai.response.finish_reasons": [result2.finishReason],
1152
+ "gen_ai.usage.prompt_tokens": result2.usage.promptTokens,
1153
+ "gen_ai.usage.completion_tokens": result2.usage.completionTokens
1154
+ }
1155
+ })
1156
+ );
1047
1157
  return { ...result2, objectText: result2.text };
1048
1158
  }
1049
1159
  })
@@ -1070,12 +1180,25 @@ async function generateObject({
1070
1180
  const generateResult = await retry(
1071
1181
  () => recordSpan({
1072
1182
  name: "ai.generateObject.doGenerate",
1073
- attributes: {
1074
- ...baseTelemetryAttributes,
1075
- "ai.prompt.format": inputFormat,
1076
- "ai.prompt.messages": JSON.stringify(promptMessages),
1077
- "ai.settings.mode": mode
1078
- },
1183
+ attributes: selectTelemetryAttributes({
1184
+ telemetry,
1185
+ attributes: {
1186
+ ...baseTelemetryAttributes,
1187
+ "ai.prompt.format": {
1188
+ input: () => inputFormat
1189
+ },
1190
+ "ai.prompt.messages": {
1191
+ input: () => JSON.stringify(promptMessages)
1192
+ },
1193
+ "ai.settings.mode": mode,
1194
+ // standardized gen-ai llm span attributes:
1195
+ "gen_ai.request.model": model.modelId,
1196
+ "gen_ai.system": model.provider,
1197
+ "gen_ai.request.max_tokens": settings.maxTokens,
1198
+ "gen_ai.request.temperature": settings.temperature,
1199
+ "gen_ai.request.top_p": settings.topP
1200
+ }
1201
+ }),
1079
1202
  tracer,
1080
1203
  fn: async (span2) => {
1081
1204
  var _a2, _b;
@@ -1099,12 +1222,21 @@ async function generateObject({
1099
1222
  if (objectText === void 0) {
1100
1223
  throw new NoObjectGeneratedError();
1101
1224
  }
1102
- span2.setAttributes({
1103
- "ai.finishReason": result2.finishReason,
1104
- "ai.usage.promptTokens": result2.usage.promptTokens,
1105
- "ai.usage.completionTokens": result2.usage.completionTokens,
1106
- "ai.result.object": objectText
1107
- });
1225
+ span2.setAttributes(
1226
+ selectTelemetryAttributes({
1227
+ telemetry,
1228
+ attributes: {
1229
+ "ai.finishReason": result2.finishReason,
1230
+ "ai.usage.promptTokens": result2.usage.promptTokens,
1231
+ "ai.usage.completionTokens": result2.usage.completionTokens,
1232
+ "ai.result.object": { output: () => objectText },
1233
+ // standardized gen-ai llm span attributes:
1234
+ "gen_ai.response.finish_reasons": [result2.finishReason],
1235
+ "gen_ai.usage.prompt_tokens": result2.usage.promptTokens,
1236
+ "gen_ai.usage.completion_tokens": result2.usage.completionTokens
1237
+ }
1238
+ })
1239
+ );
1108
1240
  return { ...result2, objectText };
1109
1241
  }
1110
1242
  })
@@ -1131,12 +1263,19 @@ async function generateObject({
1131
1263
  if (!parseResult.success) {
1132
1264
  throw parseResult.error;
1133
1265
  }
1134
- span.setAttributes({
1135
- "ai.finishReason": finishReason,
1136
- "ai.usage.promptTokens": usage.promptTokens,
1137
- "ai.usage.completionTokens": usage.completionTokens,
1138
- "ai.result.object": JSON.stringify(parseResult.value)
1139
- });
1266
+ span.setAttributes(
1267
+ selectTelemetryAttributes({
1268
+ telemetry,
1269
+ attributes: {
1270
+ "ai.finishReason": finishReason,
1271
+ "ai.usage.promptTokens": usage.promptTokens,
1272
+ "ai.usage.completionTokens": usage.completionTokens,
1273
+ "ai.result.object": {
1274
+ output: () => JSON.stringify(parseResult.value)
1275
+ }
1276
+ }
1277
+ })
1278
+ );
1140
1279
  return new DefaultGenerateObjectResult({
1141
1280
  object: parseResult.value,
1142
1281
  finishReason,
@@ -1259,13 +1398,18 @@ async function streamObject({
1259
1398
  const schema = asSchema(inputSchema);
1260
1399
  return recordSpan({
1261
1400
  name: "ai.streamObject",
1262
- attributes: {
1263
- ...baseTelemetryAttributes,
1264
- // specific settings that only make sense on the outer level:
1265
- "ai.prompt": JSON.stringify({ system, prompt, messages }),
1266
- "ai.schema": JSON.stringify(schema.jsonSchema),
1267
- "ai.settings.mode": mode
1268
- },
1401
+ attributes: selectTelemetryAttributes({
1402
+ telemetry,
1403
+ attributes: {
1404
+ ...baseTelemetryAttributes,
1405
+ // specific settings that only make sense on the outer level:
1406
+ "ai.prompt": {
1407
+ input: () => JSON.stringify({ system, prompt, messages })
1408
+ },
1409
+ "ai.schema": { input: () => JSON.stringify(schema.jsonSchema) },
1410
+ "ai.settings.mode": mode
1411
+ }
1412
+ }),
1269
1413
  tracer,
1270
1414
  endWhenDone: false,
1271
1415
  fn: async (rootSpan) => {
@@ -1366,20 +1510,31 @@ async function streamObject({
1366
1510
  } = await retry(
1367
1511
  () => recordSpan({
1368
1512
  name: "ai.streamObject.doStream",
1369
- attributes: {
1370
- ...baseTelemetryAttributes,
1371
- "ai.prompt.format": callOptions.inputFormat,
1372
- "ai.prompt.messages": JSON.stringify(callOptions.prompt),
1373
- "ai.settings.mode": mode
1374
- },
1513
+ attributes: selectTelemetryAttributes({
1514
+ telemetry,
1515
+ attributes: {
1516
+ ...baseTelemetryAttributes,
1517
+ "ai.prompt.format": {
1518
+ input: () => callOptions.inputFormat
1519
+ },
1520
+ "ai.prompt.messages": {
1521
+ input: () => JSON.stringify(callOptions.prompt)
1522
+ },
1523
+ "ai.settings.mode": mode,
1524
+ // standardized gen-ai llm span attributes:
1525
+ "gen_ai.request.model": model.modelId,
1526
+ "gen_ai.system": model.provider,
1527
+ "gen_ai.request.max_tokens": settings.maxTokens,
1528
+ "gen_ai.request.temperature": settings.temperature,
1529
+ "gen_ai.request.top_p": settings.topP
1530
+ }
1531
+ }),
1375
1532
  tracer,
1376
1533
  endWhenDone: false,
1377
- fn: async (doStreamSpan2) => {
1378
- return {
1379
- result: await model.doStream(callOptions),
1380
- doStreamSpan: doStreamSpan2
1381
- };
1382
- }
1534
+ fn: async (doStreamSpan2) => ({
1535
+ result: await model.doStream(callOptions),
1536
+ doStreamSpan: doStreamSpan2
1537
+ })
1383
1538
  })
1384
1539
  );
1385
1540
  return new DefaultStreamObjectResult({
@@ -1389,7 +1544,8 @@ async function streamObject({
1389
1544
  schema,
1390
1545
  onFinish,
1391
1546
  rootSpan,
1392
- doStreamSpan
1547
+ doStreamSpan,
1548
+ telemetry
1393
1549
  });
1394
1550
  }
1395
1551
  });
@@ -1402,7 +1558,8 @@ var DefaultStreamObjectResult = class {
1402
1558
  schema,
1403
1559
  onFinish,
1404
1560
  rootSpan,
1405
- doStreamSpan
1561
+ doStreamSpan,
1562
+ telemetry
1406
1563
  }) {
1407
1564
  this.warnings = warnings;
1408
1565
  this.rawResponse = rawResponse;
@@ -1412,6 +1569,7 @@ var DefaultStreamObjectResult = class {
1412
1569
  resolveUsage = resolve;
1413
1570
  });
1414
1571
  let usage;
1572
+ let finishReason;
1415
1573
  let object;
1416
1574
  let error;
1417
1575
  let accumulatedText = "";
@@ -1454,6 +1612,7 @@ var DefaultStreamObjectResult = class {
1454
1612
  textDelta: delta
1455
1613
  });
1456
1614
  }
1615
+ finishReason = chunk.finishReason;
1457
1616
  usage = calculateCompletionTokenUsage(chunk.usage);
1458
1617
  controller.enqueue({ ...chunk, usage });
1459
1618
  resolveUsage(usage);
@@ -1484,17 +1643,36 @@ var DefaultStreamObjectResult = class {
1484
1643
  completionTokens: NaN,
1485
1644
  totalTokens: NaN
1486
1645
  };
1487
- doStreamSpan.setAttributes({
1488
- "ai.usage.promptTokens": finalUsage.promptTokens,
1489
- "ai.usage.completionTokens": finalUsage.completionTokens,
1490
- "ai.result.object": JSON.stringify(object)
1491
- });
1646
+ doStreamSpan.setAttributes(
1647
+ selectTelemetryAttributes({
1648
+ telemetry,
1649
+ attributes: {
1650
+ "ai.finishReason": finishReason,
1651
+ "ai.usage.promptTokens": finalUsage.promptTokens,
1652
+ "ai.usage.completionTokens": finalUsage.completionTokens,
1653
+ "ai.result.object": {
1654
+ output: () => JSON.stringify(object)
1655
+ },
1656
+ // standardized gen-ai llm span attributes:
1657
+ "gen_ai.usage.prompt_tokens": finalUsage.promptTokens,
1658
+ "gen_ai.usage.completion_tokens": finalUsage.completionTokens,
1659
+ "gen_ai.response.finish_reasons": [finishReason]
1660
+ }
1661
+ })
1662
+ );
1492
1663
  doStreamSpan.end();
1493
- rootSpan.setAttributes({
1494
- "ai.usage.promptTokens": finalUsage.promptTokens,
1495
- "ai.usage.completionTokens": finalUsage.completionTokens,
1496
- "ai.result.object": JSON.stringify(object)
1497
- });
1664
+ rootSpan.setAttributes(
1665
+ selectTelemetryAttributes({
1666
+ telemetry,
1667
+ attributes: {
1668
+ "ai.usage.promptTokens": finalUsage.promptTokens,
1669
+ "ai.usage.completionTokens": finalUsage.completionTokens,
1670
+ "ai.result.object": {
1671
+ output: () => JSON.stringify(object)
1672
+ }
1673
+ }
1674
+ })
1675
+ );
1498
1676
  await (onFinish == null ? void 0 : onFinish({
1499
1677
  usage: finalUsage,
1500
1678
  object,
@@ -1692,12 +1870,17 @@ async function generateText({
1692
1870
  const tracer = getTracer({ isEnabled: (_a = telemetry == null ? void 0 : telemetry.isEnabled) != null ? _a : false });
1693
1871
  return recordSpan({
1694
1872
  name: "ai.generateText",
1695
- attributes: {
1696
- ...baseTelemetryAttributes,
1697
- // specific settings that only make sense on the outer level:
1698
- "ai.prompt": JSON.stringify({ system, prompt, messages }),
1699
- "ai.settings.maxToolRoundtrips": maxToolRoundtrips
1700
- },
1873
+ attributes: selectTelemetryAttributes({
1874
+ telemetry,
1875
+ attributes: {
1876
+ ...baseTelemetryAttributes,
1877
+ // specific settings that only make sense on the outer level:
1878
+ "ai.prompt": {
1879
+ input: () => JSON.stringify({ system, prompt, messages })
1880
+ },
1881
+ "ai.settings.maxToolRoundtrips": maxToolRoundtrips
1882
+ }
1883
+ }),
1701
1884
  tracer,
1702
1885
  fn: async (span) => {
1703
1886
  var _a2, _b, _c, _d;
@@ -1732,11 +1915,22 @@ async function generateText({
1732
1915
  currentModelResponse = await retry(
1733
1916
  () => recordSpan({
1734
1917
  name: "ai.generateText.doGenerate",
1735
- attributes: {
1736
- ...baseTelemetryAttributes,
1737
- "ai.prompt.format": currentInputFormat,
1738
- "ai.prompt.messages": JSON.stringify(promptMessages)
1739
- },
1918
+ attributes: selectTelemetryAttributes({
1919
+ telemetry,
1920
+ attributes: {
1921
+ ...baseTelemetryAttributes,
1922
+ "ai.prompt.format": { input: () => currentInputFormat },
1923
+ "ai.prompt.messages": {
1924
+ input: () => JSON.stringify(promptMessages)
1925
+ },
1926
+ // standardized gen-ai llm span attributes:
1927
+ "gen_ai.request.model": model.modelId,
1928
+ "gen_ai.system": model.provider,
1929
+ "gen_ai.request.max_tokens": settings.maxTokens,
1930
+ "gen_ai.request.temperature": settings.temperature,
1931
+ "gen_ai.request.top_p": settings.topP
1932
+ }
1933
+ }),
1740
1934
  tracer,
1741
1935
  fn: async (span2) => {
1742
1936
  const result = await model.doGenerate({
@@ -1747,13 +1941,26 @@ async function generateText({
1747
1941
  abortSignal,
1748
1942
  headers
1749
1943
  });
1750
- span2.setAttributes({
1751
- "ai.finishReason": result.finishReason,
1752
- "ai.usage.promptTokens": result.usage.promptTokens,
1753
- "ai.usage.completionTokens": result.usage.completionTokens,
1754
- "ai.result.text": result.text,
1755
- "ai.result.toolCalls": JSON.stringify(result.toolCalls)
1756
- });
1944
+ span2.setAttributes(
1945
+ selectTelemetryAttributes({
1946
+ telemetry,
1947
+ attributes: {
1948
+ "ai.finishReason": result.finishReason,
1949
+ "ai.usage.promptTokens": result.usage.promptTokens,
1950
+ "ai.usage.completionTokens": result.usage.completionTokens,
1951
+ "ai.result.text": {
1952
+ output: () => result.text
1953
+ },
1954
+ "ai.result.toolCalls": {
1955
+ output: () => JSON.stringify(result.toolCalls)
1956
+ },
1957
+ // standardized gen-ai llm span attributes:
1958
+ "gen_ai.response.finish_reasons": [result.finishReason],
1959
+ "gen_ai.usage.prompt_tokens": result.usage.promptTokens,
1960
+ "gen_ai.usage.completion_tokens": result.usage.completionTokens
1961
+ }
1962
+ })
1963
+ );
1757
1964
  return result;
1758
1965
  }
1759
1966
  })
@@ -1764,7 +1971,8 @@ async function generateText({
1764
1971
  currentToolResults = tools == null ? [] : await executeTools({
1765
1972
  toolCalls: currentToolCalls,
1766
1973
  tools,
1767
- tracer
1974
+ tracer,
1975
+ telemetry
1768
1976
  });
1769
1977
  const currentUsage = calculateCompletionTokenUsage(
1770
1978
  currentModelResponse.usage
@@ -1798,13 +2006,22 @@ async function generateText({
1798
2006
  currentToolResults.length === currentToolCalls.length && // the number of roundtrips is less than the maximum:
1799
2007
  roundtripCount++ < maxToolRoundtrips
1800
2008
  );
1801
- span.setAttributes({
1802
- "ai.finishReason": currentModelResponse.finishReason,
1803
- "ai.usage.promptTokens": currentModelResponse.usage.promptTokens,
1804
- "ai.usage.completionTokens": currentModelResponse.usage.completionTokens,
1805
- "ai.result.text": currentModelResponse.text,
1806
- "ai.result.toolCalls": JSON.stringify(currentModelResponse.toolCalls)
1807
- });
2009
+ span.setAttributes(
2010
+ selectTelemetryAttributes({
2011
+ telemetry,
2012
+ attributes: {
2013
+ "ai.finishReason": currentModelResponse.finishReason,
2014
+ "ai.usage.promptTokens": currentModelResponse.usage.promptTokens,
2015
+ "ai.usage.completionTokens": currentModelResponse.usage.completionTokens,
2016
+ "ai.result.text": {
2017
+ output: () => currentModelResponse.text
2018
+ },
2019
+ "ai.result.toolCalls": {
2020
+ output: () => JSON.stringify(currentModelResponse.toolCalls)
2021
+ }
2022
+ }
2023
+ })
2024
+ );
1808
2025
  return new DefaultGenerateTextResult({
1809
2026
  // Always return a string so that the caller doesn't have to check for undefined.
1810
2027
  // If they need to check if the model did not return any text,
@@ -1826,7 +2043,8 @@ async function generateText({
1826
2043
  async function executeTools({
1827
2044
  toolCalls,
1828
2045
  tools,
1829
- tracer
2046
+ tracer,
2047
+ telemetry
1830
2048
  }) {
1831
2049
  const toolResults = await Promise.all(
1832
2050
  toolCalls.map(async (toolCall) => {
@@ -1836,18 +2054,30 @@ async function executeTools({
1836
2054
  }
1837
2055
  const result = await recordSpan({
1838
2056
  name: "ai.toolCall",
1839
- attributes: {
1840
- "ai.toolCall.name": toolCall.toolName,
1841
- "ai.toolCall.id": toolCall.toolCallId,
1842
- "ai.toolCall.args": JSON.stringify(toolCall.args)
1843
- },
2057
+ attributes: selectTelemetryAttributes({
2058
+ telemetry,
2059
+ attributes: {
2060
+ "ai.toolCall.name": toolCall.toolName,
2061
+ "ai.toolCall.id": toolCall.toolCallId,
2062
+ "ai.toolCall.args": {
2063
+ output: () => JSON.stringify(toolCall.args)
2064
+ }
2065
+ }
2066
+ }),
1844
2067
  tracer,
1845
2068
  fn: async (span) => {
1846
2069
  const result2 = await tool2.execute(toolCall.args);
1847
2070
  try {
1848
- span.setAttributes({
1849
- "ai.toolCall.result": JSON.stringify(result2)
1850
- });
2071
+ span.setAttributes(
2072
+ selectTelemetryAttributes({
2073
+ telemetry,
2074
+ attributes: {
2075
+ "ai.toolCall.result": {
2076
+ output: () => JSON.stringify(result2)
2077
+ }
2078
+ }
2079
+ })
2080
+ );
1851
2081
  } catch (ignored) {
1852
2082
  }
1853
2083
  return result2;
@@ -1999,7 +2229,8 @@ function runToolsTransformation({
1999
2229
  tools,
2000
2230
  generatorStream,
2001
2231
  toolCallStreaming,
2002
- tracer
2232
+ tracer,
2233
+ telemetry
2003
2234
  }) {
2004
2235
  let canClose = false;
2005
2236
  const outstandingToolCalls = /* @__PURE__ */ new Set();
@@ -2069,11 +2300,16 @@ function runToolsTransformation({
2069
2300
  outstandingToolCalls.add(toolExecutionId);
2070
2301
  recordSpan({
2071
2302
  name: "ai.toolCall",
2072
- attributes: {
2073
- "ai.toolCall.name": toolCall.toolName,
2074
- "ai.toolCall.id": toolCall.toolCallId,
2075
- "ai.toolCall.args": JSON.stringify(toolCall.args)
2076
- },
2303
+ attributes: selectTelemetryAttributes({
2304
+ telemetry,
2305
+ attributes: {
2306
+ "ai.toolCall.name": toolCall.toolName,
2307
+ "ai.toolCall.id": toolCall.toolCallId,
2308
+ "ai.toolCall.args": {
2309
+ output: () => JSON.stringify(toolCall.args)
2310
+ }
2311
+ }
2312
+ }),
2077
2313
  tracer,
2078
2314
  fn: async (span) => tool2.execute(toolCall.args).then(
2079
2315
  (result) => {
@@ -2087,9 +2323,16 @@ function runToolsTransformation({
2087
2323
  toolResultsStreamController.close();
2088
2324
  }
2089
2325
  try {
2090
- span.setAttributes({
2091
- "ai.toolCall.result": JSON.stringify(result)
2092
- });
2326
+ span.setAttributes(
2327
+ selectTelemetryAttributes({
2328
+ telemetry,
2329
+ attributes: {
2330
+ "ai.toolCall.result": {
2331
+ output: () => JSON.stringify(result)
2332
+ }
2333
+ }
2334
+ })
2335
+ );
2093
2336
  } catch (ignored) {
2094
2337
  }
2095
2338
  },
@@ -2190,11 +2433,16 @@ async function streamText({
2190
2433
  const tracer = getTracer({ isEnabled: (_a = telemetry == null ? void 0 : telemetry.isEnabled) != null ? _a : false });
2191
2434
  return recordSpan({
2192
2435
  name: "ai.streamText",
2193
- attributes: {
2194
- ...baseTelemetryAttributes,
2195
- // specific settings that only make sense on the outer level:
2196
- "ai.prompt": JSON.stringify({ system, prompt, messages })
2197
- },
2436
+ attributes: selectTelemetryAttributes({
2437
+ telemetry,
2438
+ attributes: {
2439
+ ...baseTelemetryAttributes,
2440
+ // specific settings that only make sense on the outer level:
2441
+ "ai.prompt": {
2442
+ input: () => JSON.stringify({ system, prompt, messages })
2443
+ }
2444
+ }
2445
+ }),
2198
2446
  tracer,
2199
2447
  endWhenDone: false,
2200
2448
  fn: async (rootSpan) => {
@@ -2210,11 +2458,24 @@ async function streamText({
2210
2458
  } = await retry(
2211
2459
  () => recordSpan({
2212
2460
  name: "ai.streamText.doStream",
2213
- attributes: {
2214
- ...baseTelemetryAttributes,
2215
- "ai.prompt.format": validatedPrompt.type,
2216
- "ai.prompt.messages": JSON.stringify(promptMessages)
2217
- },
2461
+ attributes: selectTelemetryAttributes({
2462
+ telemetry,
2463
+ attributes: {
2464
+ ...baseTelemetryAttributes,
2465
+ "ai.prompt.format": {
2466
+ input: () => validatedPrompt.type
2467
+ },
2468
+ "ai.prompt.messages": {
2469
+ input: () => JSON.stringify(promptMessages)
2470
+ },
2471
+ // standardized gen-ai llm span attributes:
2472
+ "gen_ai.request.model": model.modelId,
2473
+ "gen_ai.system": model.provider,
2474
+ "gen_ai.request.max_tokens": settings.maxTokens,
2475
+ "gen_ai.request.temperature": settings.temperature,
2476
+ "gen_ai.request.top_p": settings.topP
2477
+ }
2478
+ }),
2218
2479
  tracer,
2219
2480
  endWhenDone: false,
2220
2481
  fn: async (doStreamSpan2) => {
@@ -2240,13 +2501,15 @@ async function streamText({
2240
2501
  tools,
2241
2502
  generatorStream: stream,
2242
2503
  toolCallStreaming,
2243
- tracer
2504
+ tracer,
2505
+ telemetry
2244
2506
  }),
2245
2507
  warnings,
2246
2508
  rawResponse,
2247
2509
  onFinish,
2248
2510
  rootSpan,
2249
- doStreamSpan
2511
+ doStreamSpan,
2512
+ telemetry
2250
2513
  });
2251
2514
  }
2252
2515
  });
@@ -2258,7 +2521,8 @@ var DefaultStreamTextResult = class {
2258
2521
  rawResponse,
2259
2522
  onFinish,
2260
2523
  rootSpan,
2261
- doStreamSpan
2524
+ doStreamSpan,
2525
+ telemetry
2262
2526
  }) {
2263
2527
  this.warnings = warnings;
2264
2528
  this.rawResponse = rawResponse;
@@ -2338,21 +2602,35 @@ var DefaultStreamTextResult = class {
2338
2602
  };
2339
2603
  const finalFinishReason = finishReason != null ? finishReason : "unknown";
2340
2604
  const telemetryToolCalls = toolCalls.length > 0 ? JSON.stringify(toolCalls) : void 0;
2341
- doStreamSpan.setAttributes({
2342
- "ai.finishReason": finalFinishReason,
2343
- "ai.usage.promptTokens": finalUsage.promptTokens,
2344
- "ai.usage.completionTokens": finalUsage.completionTokens,
2345
- "ai.result.text": text,
2346
- "ai.result.toolCalls": telemetryToolCalls
2347
- });
2605
+ doStreamSpan.setAttributes(
2606
+ selectTelemetryAttributes({
2607
+ telemetry,
2608
+ attributes: {
2609
+ "ai.finishReason": finalFinishReason,
2610
+ "ai.usage.promptTokens": finalUsage.promptTokens,
2611
+ "ai.usage.completionTokens": finalUsage.completionTokens,
2612
+ "ai.result.text": { output: () => text },
2613
+ "ai.result.toolCalls": { output: () => telemetryToolCalls },
2614
+ // standardized gen-ai llm span attributes:
2615
+ "gen_ai.response.finish_reasons": [finalFinishReason],
2616
+ "gen_ai.usage.prompt_tokens": finalUsage.promptTokens,
2617
+ "gen_ai.usage.completion_tokens": finalUsage.completionTokens
2618
+ }
2619
+ })
2620
+ );
2348
2621
  doStreamSpan.end();
2349
- rootSpan.setAttributes({
2350
- "ai.finishReason": finalFinishReason,
2351
- "ai.usage.promptTokens": finalUsage.promptTokens,
2352
- "ai.usage.completionTokens": finalUsage.completionTokens,
2353
- "ai.result.text": text,
2354
- "ai.result.toolCalls": telemetryToolCalls
2355
- });
2622
+ rootSpan.setAttributes(
2623
+ selectTelemetryAttributes({
2624
+ telemetry,
2625
+ attributes: {
2626
+ "ai.finishReason": finalFinishReason,
2627
+ "ai.usage.promptTokens": finalUsage.promptTokens,
2628
+ "ai.usage.completionTokens": finalUsage.completionTokens,
2629
+ "ai.result.text": { output: () => text },
2630
+ "ai.result.toolCalls": { output: () => telemetryToolCalls }
2631
+ }
2632
+ })
2633
+ );
2356
2634
  resolveToolResults(toolResults);
2357
2635
  await ((_a = self.onFinish) == null ? void 0 : _a.call(self, {
2358
2636
  finishReason: finalFinishReason,
@@ -2476,8 +2754,6 @@ var DefaultStreamTextResult = class {
2476
2754
  controller.enqueue(
2477
2755
  formatStreamPart("tool_result", {
2478
2756
  toolCallId: chunk.toolCallId,
2479
- toolName: chunk.toolName,
2480
- args: chunk.args,
2481
2757
  result: chunk.result
2482
2758
  })
2483
2759
  );