ai 3.2.40 → 3.2.42
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.d.mts +18 -2
- package/dist/index.d.ts +18 -2
- package/dist/index.js +474 -198
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +474 -198
- package/dist/index.mjs.map +1 -1
- package/package.json +6 -6
package/dist/index.js
CHANGED
@@ -258,6 +258,33 @@ function recordSpan({
|
|
258
258
|
});
|
259
259
|
}
|
260
260
|
|
261
|
+
// core/telemetry/select-telemetry-attributes.ts
|
262
|
+
function selectTelemetryAttributes({
|
263
|
+
telemetry,
|
264
|
+
attributes
|
265
|
+
}) {
|
266
|
+
return Object.entries(attributes).reduce((attributes2, [key, value]) => {
|
267
|
+
if (value === void 0) {
|
268
|
+
return attributes2;
|
269
|
+
}
|
270
|
+
if (typeof value === "object" && "input" in value && typeof value.input === "function") {
|
271
|
+
if ((telemetry == null ? void 0 : telemetry.recordInputs) === false) {
|
272
|
+
return attributes2;
|
273
|
+
}
|
274
|
+
const result = value.input();
|
275
|
+
return result === void 0 ? attributes2 : { ...attributes2, [key]: result };
|
276
|
+
}
|
277
|
+
if (typeof value === "object" && "output" in value && typeof value.output === "function") {
|
278
|
+
if ((telemetry == null ? void 0 : telemetry.recordOutputs) === false) {
|
279
|
+
return attributes2;
|
280
|
+
}
|
281
|
+
const result = value.output();
|
282
|
+
return result === void 0 ? attributes2 : { ...attributes2, [key]: result };
|
283
|
+
}
|
284
|
+
return { ...attributes2, [key]: value };
|
285
|
+
}, {});
|
286
|
+
}
|
287
|
+
|
261
288
|
// core/util/retry-with-exponential-backoff.ts
|
262
289
|
var import_provider = require("@ai-sdk/provider");
|
263
290
|
var import_provider_utils = require("@ai-sdk/provider-utils");
|
@@ -340,11 +367,13 @@ async function embed({
|
|
340
367
|
const tracer = getTracer({ isEnabled: (_a = telemetry == null ? void 0 : telemetry.isEnabled) != null ? _a : false });
|
341
368
|
return recordSpan({
|
342
369
|
name: "ai.embed",
|
343
|
-
attributes: {
|
344
|
-
|
345
|
-
|
346
|
-
|
347
|
-
|
370
|
+
attributes: selectTelemetryAttributes({
|
371
|
+
telemetry,
|
372
|
+
attributes: {
|
373
|
+
...baseTelemetryAttributes,
|
374
|
+
"ai.value": { input: () => JSON.stringify(value) }
|
375
|
+
}
|
376
|
+
}),
|
348
377
|
tracer,
|
349
378
|
fn: async (span) => {
|
350
379
|
const retry = retryWithExponentialBackoff({ maxRetries });
|
@@ -353,11 +382,14 @@ async function embed({
|
|
353
382
|
// nested spans to align with the embedMany telemetry data:
|
354
383
|
recordSpan({
|
355
384
|
name: "ai.embed.doEmbed",
|
356
|
-
attributes: {
|
357
|
-
|
358
|
-
|
359
|
-
|
360
|
-
|
385
|
+
attributes: selectTelemetryAttributes({
|
386
|
+
telemetry,
|
387
|
+
attributes: {
|
388
|
+
...baseTelemetryAttributes,
|
389
|
+
// specific settings that only make sense on the outer level:
|
390
|
+
"ai.values": { input: () => [JSON.stringify(value)] }
|
391
|
+
}
|
392
|
+
}),
|
361
393
|
tracer,
|
362
394
|
fn: async (doEmbedSpan) => {
|
363
395
|
var _a2;
|
@@ -368,12 +400,19 @@ async function embed({
|
|
368
400
|
});
|
369
401
|
const embedding2 = modelResponse.embeddings[0];
|
370
402
|
const usage2 = (_a2 = modelResponse.usage) != null ? _a2 : { tokens: NaN };
|
371
|
-
doEmbedSpan.setAttributes(
|
372
|
-
|
373
|
-
|
374
|
-
|
375
|
-
|
376
|
-
|
403
|
+
doEmbedSpan.setAttributes(
|
404
|
+
selectTelemetryAttributes({
|
405
|
+
telemetry,
|
406
|
+
attributes: {
|
407
|
+
"ai.embeddings": {
|
408
|
+
output: () => modelResponse.embeddings.map(
|
409
|
+
(embedding3) => JSON.stringify(embedding3)
|
410
|
+
)
|
411
|
+
},
|
412
|
+
"ai.usage.tokens": usage2.tokens
|
413
|
+
}
|
414
|
+
})
|
415
|
+
);
|
377
416
|
return {
|
378
417
|
embedding: embedding2,
|
379
418
|
usage: usage2,
|
@@ -383,10 +422,15 @@ async function embed({
|
|
383
422
|
})
|
384
423
|
)
|
385
424
|
);
|
386
|
-
span.setAttributes(
|
387
|
-
|
388
|
-
|
389
|
-
|
425
|
+
span.setAttributes(
|
426
|
+
selectTelemetryAttributes({
|
427
|
+
telemetry,
|
428
|
+
attributes: {
|
429
|
+
"ai.embedding": { output: () => JSON.stringify(embedding) },
|
430
|
+
"ai.usage.tokens": usage.tokens
|
431
|
+
}
|
432
|
+
})
|
433
|
+
);
|
390
434
|
return new DefaultEmbedResult({ value, embedding, usage, rawResponse });
|
391
435
|
}
|
392
436
|
});
|
@@ -432,11 +476,16 @@ async function embedMany({
|
|
432
476
|
const tracer = getTracer({ isEnabled: (_a = telemetry == null ? void 0 : telemetry.isEnabled) != null ? _a : false });
|
433
477
|
return recordSpan({
|
434
478
|
name: "ai.embedMany",
|
435
|
-
attributes: {
|
436
|
-
|
437
|
-
|
438
|
-
|
439
|
-
|
479
|
+
attributes: selectTelemetryAttributes({
|
480
|
+
telemetry,
|
481
|
+
attributes: {
|
482
|
+
...baseTelemetryAttributes,
|
483
|
+
// specific settings that only make sense on the outer level:
|
484
|
+
"ai.values": {
|
485
|
+
input: () => values.map((value) => JSON.stringify(value))
|
486
|
+
}
|
487
|
+
}
|
488
|
+
}),
|
440
489
|
tracer,
|
441
490
|
fn: async (span) => {
|
442
491
|
const retry = retryWithExponentialBackoff({ maxRetries });
|
@@ -445,11 +494,16 @@ async function embedMany({
|
|
445
494
|
const { embeddings: embeddings2, usage } = await retry(() => {
|
446
495
|
return recordSpan({
|
447
496
|
name: "ai.embedMany.doEmbed",
|
448
|
-
attributes: {
|
449
|
-
|
450
|
-
|
451
|
-
|
452
|
-
|
497
|
+
attributes: selectTelemetryAttributes({
|
498
|
+
telemetry,
|
499
|
+
attributes: {
|
500
|
+
...baseTelemetryAttributes,
|
501
|
+
// specific settings that only make sense on the outer level:
|
502
|
+
"ai.values": {
|
503
|
+
input: () => values.map((value) => JSON.stringify(value))
|
504
|
+
}
|
505
|
+
}
|
506
|
+
}),
|
453
507
|
tracer,
|
454
508
|
fn: async (doEmbedSpan) => {
|
455
509
|
var _a2;
|
@@ -460,22 +514,32 @@ async function embedMany({
|
|
460
514
|
});
|
461
515
|
const embeddings3 = modelResponse.embeddings;
|
462
516
|
const usage2 = (_a2 = modelResponse.usage) != null ? _a2 : { tokens: NaN };
|
463
|
-
doEmbedSpan.setAttributes(
|
464
|
-
|
465
|
-
|
466
|
-
|
467
|
-
|
468
|
-
|
517
|
+
doEmbedSpan.setAttributes(
|
518
|
+
selectTelemetryAttributes({
|
519
|
+
telemetry,
|
520
|
+
attributes: {
|
521
|
+
"ai.embeddings": {
|
522
|
+
output: () => embeddings3.map((embedding) => JSON.stringify(embedding))
|
523
|
+
},
|
524
|
+
"ai.usage.tokens": usage2.tokens
|
525
|
+
}
|
526
|
+
})
|
527
|
+
);
|
469
528
|
return { embeddings: embeddings3, usage: usage2 };
|
470
529
|
}
|
471
530
|
});
|
472
531
|
});
|
473
|
-
span.setAttributes(
|
474
|
-
|
475
|
-
|
476
|
-
|
477
|
-
|
478
|
-
|
532
|
+
span.setAttributes(
|
533
|
+
selectTelemetryAttributes({
|
534
|
+
telemetry,
|
535
|
+
attributes: {
|
536
|
+
"ai.embeddings": {
|
537
|
+
output: () => embeddings2.map((embedding) => JSON.stringify(embedding))
|
538
|
+
},
|
539
|
+
"ai.usage.tokens": usage.tokens
|
540
|
+
}
|
541
|
+
})
|
542
|
+
);
|
479
543
|
return new DefaultEmbedManyResult({ values, embeddings: embeddings2, usage });
|
480
544
|
}
|
481
545
|
const valueChunks = splitArray(values, maxEmbeddingsPerCall);
|
@@ -485,11 +549,16 @@ async function embedMany({
|
|
485
549
|
const { embeddings: responseEmbeddings, usage } = await retry(() => {
|
486
550
|
return recordSpan({
|
487
551
|
name: "ai.embedMany.doEmbed",
|
488
|
-
attributes: {
|
489
|
-
|
490
|
-
|
491
|
-
|
492
|
-
|
552
|
+
attributes: selectTelemetryAttributes({
|
553
|
+
telemetry,
|
554
|
+
attributes: {
|
555
|
+
...baseTelemetryAttributes,
|
556
|
+
// specific settings that only make sense on the outer level:
|
557
|
+
"ai.values": {
|
558
|
+
input: () => chunk.map((value) => JSON.stringify(value))
|
559
|
+
}
|
560
|
+
}
|
561
|
+
}),
|
493
562
|
tracer,
|
494
563
|
fn: async (doEmbedSpan) => {
|
495
564
|
var _a2;
|
@@ -500,12 +569,17 @@ async function embedMany({
|
|
500
569
|
});
|
501
570
|
const embeddings2 = modelResponse.embeddings;
|
502
571
|
const usage2 = (_a2 = modelResponse.usage) != null ? _a2 : { tokens: NaN };
|
503
|
-
doEmbedSpan.setAttributes(
|
504
|
-
|
505
|
-
|
506
|
-
|
507
|
-
|
508
|
-
|
572
|
+
doEmbedSpan.setAttributes(
|
573
|
+
selectTelemetryAttributes({
|
574
|
+
telemetry,
|
575
|
+
attributes: {
|
576
|
+
"ai.embeddings": {
|
577
|
+
output: () => embeddings2.map((embedding) => JSON.stringify(embedding))
|
578
|
+
},
|
579
|
+
"ai.usage.tokens": usage2.tokens
|
580
|
+
}
|
581
|
+
})
|
582
|
+
);
|
509
583
|
return { embeddings: embeddings2, usage: usage2 };
|
510
584
|
}
|
511
585
|
});
|
@@ -513,10 +587,17 @@ async function embedMany({
|
|
513
587
|
embeddings.push(...responseEmbeddings);
|
514
588
|
tokens += usage.tokens;
|
515
589
|
}
|
516
|
-
span.setAttributes(
|
517
|
-
|
518
|
-
|
519
|
-
|
590
|
+
span.setAttributes(
|
591
|
+
selectTelemetryAttributes({
|
592
|
+
telemetry,
|
593
|
+
attributes: {
|
594
|
+
"ai.embeddings": {
|
595
|
+
output: () => embeddings.map((embedding) => JSON.stringify(embedding))
|
596
|
+
},
|
597
|
+
"ai.usage.tokens": tokens
|
598
|
+
}
|
599
|
+
})
|
600
|
+
);
|
520
601
|
return new DefaultEmbedManyResult({
|
521
602
|
values,
|
522
603
|
embeddings,
|
@@ -1073,13 +1154,20 @@ async function generateObject({
|
|
1073
1154
|
const tracer = getTracer({ isEnabled: (_a = telemetry == null ? void 0 : telemetry.isEnabled) != null ? _a : false });
|
1074
1155
|
return recordSpan({
|
1075
1156
|
name: "ai.generateObject",
|
1076
|
-
attributes: {
|
1077
|
-
|
1078
|
-
|
1079
|
-
|
1080
|
-
|
1081
|
-
|
1082
|
-
|
1157
|
+
attributes: selectTelemetryAttributes({
|
1158
|
+
telemetry,
|
1159
|
+
attributes: {
|
1160
|
+
...baseTelemetryAttributes,
|
1161
|
+
// specific settings that only make sense on the outer level:
|
1162
|
+
"ai.prompt": {
|
1163
|
+
input: () => JSON.stringify({ system, prompt, messages })
|
1164
|
+
},
|
1165
|
+
"ai.schema": {
|
1166
|
+
input: () => JSON.stringify(schema.jsonSchema)
|
1167
|
+
},
|
1168
|
+
"ai.settings.mode": mode
|
1169
|
+
}
|
1170
|
+
}),
|
1083
1171
|
tracer,
|
1084
1172
|
fn: async (span) => {
|
1085
1173
|
const retry = retryWithExponentialBackoff({ maxRetries });
|
@@ -1110,12 +1198,25 @@ async function generateObject({
|
|
1110
1198
|
const generateResult = await retry(
|
1111
1199
|
() => recordSpan({
|
1112
1200
|
name: "ai.generateObject.doGenerate",
|
1113
|
-
attributes: {
|
1114
|
-
|
1115
|
-
|
1116
|
-
|
1117
|
-
|
1118
|
-
|
1201
|
+
attributes: selectTelemetryAttributes({
|
1202
|
+
telemetry,
|
1203
|
+
attributes: {
|
1204
|
+
...baseTelemetryAttributes,
|
1205
|
+
"ai.prompt.format": {
|
1206
|
+
input: () => inputFormat
|
1207
|
+
},
|
1208
|
+
"ai.prompt.messages": {
|
1209
|
+
input: () => JSON.stringify(promptMessages)
|
1210
|
+
},
|
1211
|
+
"ai.settings.mode": mode,
|
1212
|
+
// standardized gen-ai llm span attributes:
|
1213
|
+
"gen_ai.request.model": model.modelId,
|
1214
|
+
"gen_ai.system": model.provider,
|
1215
|
+
"gen_ai.request.max_tokens": settings.maxTokens,
|
1216
|
+
"gen_ai.request.temperature": settings.temperature,
|
1217
|
+
"gen_ai.request.top_p": settings.topP
|
1218
|
+
}
|
1219
|
+
}),
|
1119
1220
|
tracer,
|
1120
1221
|
fn: async (span2) => {
|
1121
1222
|
const result2 = await model.doGenerate({
|
@@ -1129,12 +1230,21 @@ async function generateObject({
|
|
1129
1230
|
if (result2.text === void 0) {
|
1130
1231
|
throw new import_provider6.NoObjectGeneratedError();
|
1131
1232
|
}
|
1132
|
-
span2.setAttributes(
|
1133
|
-
|
1134
|
-
|
1135
|
-
|
1136
|
-
|
1137
|
-
|
1233
|
+
span2.setAttributes(
|
1234
|
+
selectTelemetryAttributes({
|
1235
|
+
telemetry,
|
1236
|
+
attributes: {
|
1237
|
+
"ai.finishReason": result2.finishReason,
|
1238
|
+
"ai.usage.promptTokens": result2.usage.promptTokens,
|
1239
|
+
"ai.usage.completionTokens": result2.usage.completionTokens,
|
1240
|
+
"ai.result.object": { output: () => result2.text },
|
1241
|
+
// standardized gen-ai llm span attributes:
|
1242
|
+
"gen_ai.response.finish_reasons": [result2.finishReason],
|
1243
|
+
"gen_ai.usage.prompt_tokens": result2.usage.promptTokens,
|
1244
|
+
"gen_ai.usage.completion_tokens": result2.usage.completionTokens
|
1245
|
+
}
|
1246
|
+
})
|
1247
|
+
);
|
1138
1248
|
return { ...result2, objectText: result2.text };
|
1139
1249
|
}
|
1140
1250
|
})
|
@@ -1161,12 +1271,25 @@ async function generateObject({
|
|
1161
1271
|
const generateResult = await retry(
|
1162
1272
|
() => recordSpan({
|
1163
1273
|
name: "ai.generateObject.doGenerate",
|
1164
|
-
attributes: {
|
1165
|
-
|
1166
|
-
|
1167
|
-
|
1168
|
-
|
1169
|
-
|
1274
|
+
attributes: selectTelemetryAttributes({
|
1275
|
+
telemetry,
|
1276
|
+
attributes: {
|
1277
|
+
...baseTelemetryAttributes,
|
1278
|
+
"ai.prompt.format": {
|
1279
|
+
input: () => inputFormat
|
1280
|
+
},
|
1281
|
+
"ai.prompt.messages": {
|
1282
|
+
input: () => JSON.stringify(promptMessages)
|
1283
|
+
},
|
1284
|
+
"ai.settings.mode": mode,
|
1285
|
+
// standardized gen-ai llm span attributes:
|
1286
|
+
"gen_ai.request.model": model.modelId,
|
1287
|
+
"gen_ai.system": model.provider,
|
1288
|
+
"gen_ai.request.max_tokens": settings.maxTokens,
|
1289
|
+
"gen_ai.request.temperature": settings.temperature,
|
1290
|
+
"gen_ai.request.top_p": settings.topP
|
1291
|
+
}
|
1292
|
+
}),
|
1170
1293
|
tracer,
|
1171
1294
|
fn: async (span2) => {
|
1172
1295
|
var _a2, _b;
|
@@ -1190,12 +1313,21 @@ async function generateObject({
|
|
1190
1313
|
if (objectText === void 0) {
|
1191
1314
|
throw new import_provider6.NoObjectGeneratedError();
|
1192
1315
|
}
|
1193
|
-
span2.setAttributes(
|
1194
|
-
|
1195
|
-
|
1196
|
-
|
1197
|
-
|
1198
|
-
|
1316
|
+
span2.setAttributes(
|
1317
|
+
selectTelemetryAttributes({
|
1318
|
+
telemetry,
|
1319
|
+
attributes: {
|
1320
|
+
"ai.finishReason": result2.finishReason,
|
1321
|
+
"ai.usage.promptTokens": result2.usage.promptTokens,
|
1322
|
+
"ai.usage.completionTokens": result2.usage.completionTokens,
|
1323
|
+
"ai.result.object": { output: () => objectText },
|
1324
|
+
// standardized gen-ai llm span attributes:
|
1325
|
+
"gen_ai.response.finish_reasons": [result2.finishReason],
|
1326
|
+
"gen_ai.usage.prompt_tokens": result2.usage.promptTokens,
|
1327
|
+
"gen_ai.usage.completion_tokens": result2.usage.completionTokens
|
1328
|
+
}
|
1329
|
+
})
|
1330
|
+
);
|
1199
1331
|
return { ...result2, objectText };
|
1200
1332
|
}
|
1201
1333
|
})
|
@@ -1222,12 +1354,19 @@ async function generateObject({
|
|
1222
1354
|
if (!parseResult.success) {
|
1223
1355
|
throw parseResult.error;
|
1224
1356
|
}
|
1225
|
-
span.setAttributes(
|
1226
|
-
|
1227
|
-
|
1228
|
-
|
1229
|
-
|
1230
|
-
|
1357
|
+
span.setAttributes(
|
1358
|
+
selectTelemetryAttributes({
|
1359
|
+
telemetry,
|
1360
|
+
attributes: {
|
1361
|
+
"ai.finishReason": finishReason,
|
1362
|
+
"ai.usage.promptTokens": usage.promptTokens,
|
1363
|
+
"ai.usage.completionTokens": usage.completionTokens,
|
1364
|
+
"ai.result.object": {
|
1365
|
+
output: () => JSON.stringify(parseResult.value)
|
1366
|
+
}
|
1367
|
+
}
|
1368
|
+
})
|
1369
|
+
);
|
1231
1370
|
return new DefaultGenerateObjectResult({
|
1232
1371
|
object: parseResult.value,
|
1233
1372
|
finishReason,
|
@@ -1347,13 +1486,18 @@ async function streamObject({
|
|
1347
1486
|
const schema = asSchema(inputSchema);
|
1348
1487
|
return recordSpan({
|
1349
1488
|
name: "ai.streamObject",
|
1350
|
-
attributes: {
|
1351
|
-
|
1352
|
-
|
1353
|
-
|
1354
|
-
|
1355
|
-
|
1356
|
-
|
1489
|
+
attributes: selectTelemetryAttributes({
|
1490
|
+
telemetry,
|
1491
|
+
attributes: {
|
1492
|
+
...baseTelemetryAttributes,
|
1493
|
+
// specific settings that only make sense on the outer level:
|
1494
|
+
"ai.prompt": {
|
1495
|
+
input: () => JSON.stringify({ system, prompt, messages })
|
1496
|
+
},
|
1497
|
+
"ai.schema": { input: () => JSON.stringify(schema.jsonSchema) },
|
1498
|
+
"ai.settings.mode": mode
|
1499
|
+
}
|
1500
|
+
}),
|
1357
1501
|
tracer,
|
1358
1502
|
endWhenDone: false,
|
1359
1503
|
fn: async (rootSpan) => {
|
@@ -1454,20 +1598,31 @@ async function streamObject({
|
|
1454
1598
|
} = await retry(
|
1455
1599
|
() => recordSpan({
|
1456
1600
|
name: "ai.streamObject.doStream",
|
1457
|
-
attributes: {
|
1458
|
-
|
1459
|
-
|
1460
|
-
|
1461
|
-
|
1462
|
-
|
1601
|
+
attributes: selectTelemetryAttributes({
|
1602
|
+
telemetry,
|
1603
|
+
attributes: {
|
1604
|
+
...baseTelemetryAttributes,
|
1605
|
+
"ai.prompt.format": {
|
1606
|
+
input: () => callOptions.inputFormat
|
1607
|
+
},
|
1608
|
+
"ai.prompt.messages": {
|
1609
|
+
input: () => JSON.stringify(callOptions.prompt)
|
1610
|
+
},
|
1611
|
+
"ai.settings.mode": mode,
|
1612
|
+
// standardized gen-ai llm span attributes:
|
1613
|
+
"gen_ai.request.model": model.modelId,
|
1614
|
+
"gen_ai.system": model.provider,
|
1615
|
+
"gen_ai.request.max_tokens": settings.maxTokens,
|
1616
|
+
"gen_ai.request.temperature": settings.temperature,
|
1617
|
+
"gen_ai.request.top_p": settings.topP
|
1618
|
+
}
|
1619
|
+
}),
|
1463
1620
|
tracer,
|
1464
1621
|
endWhenDone: false,
|
1465
|
-
fn: async (doStreamSpan2) => {
|
1466
|
-
|
1467
|
-
|
1468
|
-
|
1469
|
-
};
|
1470
|
-
}
|
1622
|
+
fn: async (doStreamSpan2) => ({
|
1623
|
+
result: await model.doStream(callOptions),
|
1624
|
+
doStreamSpan: doStreamSpan2
|
1625
|
+
})
|
1471
1626
|
})
|
1472
1627
|
);
|
1473
1628
|
return new DefaultStreamObjectResult({
|
@@ -1477,7 +1632,8 @@ async function streamObject({
|
|
1477
1632
|
schema,
|
1478
1633
|
onFinish,
|
1479
1634
|
rootSpan,
|
1480
|
-
doStreamSpan
|
1635
|
+
doStreamSpan,
|
1636
|
+
telemetry
|
1481
1637
|
});
|
1482
1638
|
}
|
1483
1639
|
});
|
@@ -1490,7 +1646,8 @@ var DefaultStreamObjectResult = class {
|
|
1490
1646
|
schema,
|
1491
1647
|
onFinish,
|
1492
1648
|
rootSpan,
|
1493
|
-
doStreamSpan
|
1649
|
+
doStreamSpan,
|
1650
|
+
telemetry
|
1494
1651
|
}) {
|
1495
1652
|
this.warnings = warnings;
|
1496
1653
|
this.rawResponse = rawResponse;
|
@@ -1500,6 +1657,7 @@ var DefaultStreamObjectResult = class {
|
|
1500
1657
|
resolveUsage = resolve;
|
1501
1658
|
});
|
1502
1659
|
let usage;
|
1660
|
+
let finishReason;
|
1503
1661
|
let object;
|
1504
1662
|
let error;
|
1505
1663
|
let accumulatedText = "";
|
@@ -1542,6 +1700,7 @@ var DefaultStreamObjectResult = class {
|
|
1542
1700
|
textDelta: delta
|
1543
1701
|
});
|
1544
1702
|
}
|
1703
|
+
finishReason = chunk.finishReason;
|
1545
1704
|
usage = calculateCompletionTokenUsage(chunk.usage);
|
1546
1705
|
controller.enqueue({ ...chunk, usage });
|
1547
1706
|
resolveUsage(usage);
|
@@ -1572,17 +1731,36 @@ var DefaultStreamObjectResult = class {
|
|
1572
1731
|
completionTokens: NaN,
|
1573
1732
|
totalTokens: NaN
|
1574
1733
|
};
|
1575
|
-
doStreamSpan.setAttributes(
|
1576
|
-
|
1577
|
-
|
1578
|
-
|
1579
|
-
|
1734
|
+
doStreamSpan.setAttributes(
|
1735
|
+
selectTelemetryAttributes({
|
1736
|
+
telemetry,
|
1737
|
+
attributes: {
|
1738
|
+
"ai.finishReason": finishReason,
|
1739
|
+
"ai.usage.promptTokens": finalUsage.promptTokens,
|
1740
|
+
"ai.usage.completionTokens": finalUsage.completionTokens,
|
1741
|
+
"ai.result.object": {
|
1742
|
+
output: () => JSON.stringify(object)
|
1743
|
+
},
|
1744
|
+
// standardized gen-ai llm span attributes:
|
1745
|
+
"gen_ai.usage.prompt_tokens": finalUsage.promptTokens,
|
1746
|
+
"gen_ai.usage.completion_tokens": finalUsage.completionTokens,
|
1747
|
+
"gen_ai.response.finish_reasons": [finishReason]
|
1748
|
+
}
|
1749
|
+
})
|
1750
|
+
);
|
1580
1751
|
doStreamSpan.end();
|
1581
|
-
rootSpan.setAttributes(
|
1582
|
-
|
1583
|
-
|
1584
|
-
|
1585
|
-
|
1752
|
+
rootSpan.setAttributes(
|
1753
|
+
selectTelemetryAttributes({
|
1754
|
+
telemetry,
|
1755
|
+
attributes: {
|
1756
|
+
"ai.usage.promptTokens": finalUsage.promptTokens,
|
1757
|
+
"ai.usage.completionTokens": finalUsage.completionTokens,
|
1758
|
+
"ai.result.object": {
|
1759
|
+
output: () => JSON.stringify(object)
|
1760
|
+
}
|
1761
|
+
}
|
1762
|
+
})
|
1763
|
+
);
|
1586
1764
|
await (onFinish == null ? void 0 : onFinish({
|
1587
1765
|
usage: finalUsage,
|
1588
1766
|
object,
|
@@ -1777,12 +1955,17 @@ async function generateText({
|
|
1777
1955
|
const tracer = getTracer({ isEnabled: (_a = telemetry == null ? void 0 : telemetry.isEnabled) != null ? _a : false });
|
1778
1956
|
return recordSpan({
|
1779
1957
|
name: "ai.generateText",
|
1780
|
-
attributes: {
|
1781
|
-
|
1782
|
-
|
1783
|
-
|
1784
|
-
|
1785
|
-
|
1958
|
+
attributes: selectTelemetryAttributes({
|
1959
|
+
telemetry,
|
1960
|
+
attributes: {
|
1961
|
+
...baseTelemetryAttributes,
|
1962
|
+
// specific settings that only make sense on the outer level:
|
1963
|
+
"ai.prompt": {
|
1964
|
+
input: () => JSON.stringify({ system, prompt, messages })
|
1965
|
+
},
|
1966
|
+
"ai.settings.maxToolRoundtrips": maxToolRoundtrips
|
1967
|
+
}
|
1968
|
+
}),
|
1786
1969
|
tracer,
|
1787
1970
|
fn: async (span) => {
|
1788
1971
|
var _a2, _b, _c, _d;
|
@@ -1817,11 +2000,22 @@ async function generateText({
|
|
1817
2000
|
currentModelResponse = await retry(
|
1818
2001
|
() => recordSpan({
|
1819
2002
|
name: "ai.generateText.doGenerate",
|
1820
|
-
attributes: {
|
1821
|
-
|
1822
|
-
|
1823
|
-
|
1824
|
-
|
2003
|
+
attributes: selectTelemetryAttributes({
|
2004
|
+
telemetry,
|
2005
|
+
attributes: {
|
2006
|
+
...baseTelemetryAttributes,
|
2007
|
+
"ai.prompt.format": { input: () => currentInputFormat },
|
2008
|
+
"ai.prompt.messages": {
|
2009
|
+
input: () => JSON.stringify(promptMessages)
|
2010
|
+
},
|
2011
|
+
// standardized gen-ai llm span attributes:
|
2012
|
+
"gen_ai.request.model": model.modelId,
|
2013
|
+
"gen_ai.system": model.provider,
|
2014
|
+
"gen_ai.request.max_tokens": settings.maxTokens,
|
2015
|
+
"gen_ai.request.temperature": settings.temperature,
|
2016
|
+
"gen_ai.request.top_p": settings.topP
|
2017
|
+
}
|
2018
|
+
}),
|
1825
2019
|
tracer,
|
1826
2020
|
fn: async (span2) => {
|
1827
2021
|
const result = await model.doGenerate({
|
@@ -1832,13 +2026,26 @@ async function generateText({
|
|
1832
2026
|
abortSignal,
|
1833
2027
|
headers
|
1834
2028
|
});
|
1835
|
-
span2.setAttributes(
|
1836
|
-
|
1837
|
-
|
1838
|
-
|
1839
|
-
|
1840
|
-
|
1841
|
-
|
2029
|
+
span2.setAttributes(
|
2030
|
+
selectTelemetryAttributes({
|
2031
|
+
telemetry,
|
2032
|
+
attributes: {
|
2033
|
+
"ai.finishReason": result.finishReason,
|
2034
|
+
"ai.usage.promptTokens": result.usage.promptTokens,
|
2035
|
+
"ai.usage.completionTokens": result.usage.completionTokens,
|
2036
|
+
"ai.result.text": {
|
2037
|
+
output: () => result.text
|
2038
|
+
},
|
2039
|
+
"ai.result.toolCalls": {
|
2040
|
+
output: () => JSON.stringify(result.toolCalls)
|
2041
|
+
},
|
2042
|
+
// standardized gen-ai llm span attributes:
|
2043
|
+
"gen_ai.response.finish_reasons": [result.finishReason],
|
2044
|
+
"gen_ai.usage.prompt_tokens": result.usage.promptTokens,
|
2045
|
+
"gen_ai.usage.completion_tokens": result.usage.completionTokens
|
2046
|
+
}
|
2047
|
+
})
|
2048
|
+
);
|
1842
2049
|
return result;
|
1843
2050
|
}
|
1844
2051
|
})
|
@@ -1849,7 +2056,8 @@ async function generateText({
|
|
1849
2056
|
currentToolResults = tools == null ? [] : await executeTools({
|
1850
2057
|
toolCalls: currentToolCalls,
|
1851
2058
|
tools,
|
1852
|
-
tracer
|
2059
|
+
tracer,
|
2060
|
+
telemetry
|
1853
2061
|
});
|
1854
2062
|
const currentUsage = calculateCompletionTokenUsage(
|
1855
2063
|
currentModelResponse.usage
|
@@ -1883,13 +2091,22 @@ async function generateText({
|
|
1883
2091
|
currentToolResults.length === currentToolCalls.length && // the number of roundtrips is less than the maximum:
|
1884
2092
|
roundtripCount++ < maxToolRoundtrips
|
1885
2093
|
);
|
1886
|
-
span.setAttributes(
|
1887
|
-
|
1888
|
-
|
1889
|
-
|
1890
|
-
|
1891
|
-
|
1892
|
-
|
2094
|
+
span.setAttributes(
|
2095
|
+
selectTelemetryAttributes({
|
2096
|
+
telemetry,
|
2097
|
+
attributes: {
|
2098
|
+
"ai.finishReason": currentModelResponse.finishReason,
|
2099
|
+
"ai.usage.promptTokens": currentModelResponse.usage.promptTokens,
|
2100
|
+
"ai.usage.completionTokens": currentModelResponse.usage.completionTokens,
|
2101
|
+
"ai.result.text": {
|
2102
|
+
output: () => currentModelResponse.text
|
2103
|
+
},
|
2104
|
+
"ai.result.toolCalls": {
|
2105
|
+
output: () => JSON.stringify(currentModelResponse.toolCalls)
|
2106
|
+
}
|
2107
|
+
}
|
2108
|
+
})
|
2109
|
+
);
|
1893
2110
|
return new DefaultGenerateTextResult({
|
1894
2111
|
// Always return a string so that the caller doesn't have to check for undefined.
|
1895
2112
|
// If they need to check if the model did not return any text,
|
@@ -1911,7 +2128,8 @@ async function generateText({
|
|
1911
2128
|
async function executeTools({
|
1912
2129
|
toolCalls,
|
1913
2130
|
tools,
|
1914
|
-
tracer
|
2131
|
+
tracer,
|
2132
|
+
telemetry
|
1915
2133
|
}) {
|
1916
2134
|
const toolResults = await Promise.all(
|
1917
2135
|
toolCalls.map(async (toolCall) => {
|
@@ -1921,18 +2139,30 @@ async function executeTools({
|
|
1921
2139
|
}
|
1922
2140
|
const result = await recordSpan({
|
1923
2141
|
name: "ai.toolCall",
|
1924
|
-
attributes: {
|
1925
|
-
|
1926
|
-
|
1927
|
-
|
1928
|
-
|
2142
|
+
attributes: selectTelemetryAttributes({
|
2143
|
+
telemetry,
|
2144
|
+
attributes: {
|
2145
|
+
"ai.toolCall.name": toolCall.toolName,
|
2146
|
+
"ai.toolCall.id": toolCall.toolCallId,
|
2147
|
+
"ai.toolCall.args": {
|
2148
|
+
output: () => JSON.stringify(toolCall.args)
|
2149
|
+
}
|
2150
|
+
}
|
2151
|
+
}),
|
1929
2152
|
tracer,
|
1930
2153
|
fn: async (span) => {
|
1931
2154
|
const result2 = await tool2.execute(toolCall.args);
|
1932
2155
|
try {
|
1933
|
-
span.setAttributes(
|
1934
|
-
|
1935
|
-
|
2156
|
+
span.setAttributes(
|
2157
|
+
selectTelemetryAttributes({
|
2158
|
+
telemetry,
|
2159
|
+
attributes: {
|
2160
|
+
"ai.toolCall.result": {
|
2161
|
+
output: () => JSON.stringify(result2)
|
2162
|
+
}
|
2163
|
+
}
|
2164
|
+
})
|
2165
|
+
);
|
1936
2166
|
} catch (ignored) {
|
1937
2167
|
}
|
1938
2168
|
return result2;
|
@@ -2084,7 +2314,8 @@ function runToolsTransformation({
|
|
2084
2314
|
tools,
|
2085
2315
|
generatorStream,
|
2086
2316
|
toolCallStreaming,
|
2087
|
-
tracer
|
2317
|
+
tracer,
|
2318
|
+
telemetry
|
2088
2319
|
}) {
|
2089
2320
|
let canClose = false;
|
2090
2321
|
const outstandingToolCalls = /* @__PURE__ */ new Set();
|
@@ -2154,11 +2385,16 @@ function runToolsTransformation({
|
|
2154
2385
|
outstandingToolCalls.add(toolExecutionId);
|
2155
2386
|
recordSpan({
|
2156
2387
|
name: "ai.toolCall",
|
2157
|
-
attributes: {
|
2158
|
-
|
2159
|
-
|
2160
|
-
|
2161
|
-
|
2388
|
+
attributes: selectTelemetryAttributes({
|
2389
|
+
telemetry,
|
2390
|
+
attributes: {
|
2391
|
+
"ai.toolCall.name": toolCall.toolName,
|
2392
|
+
"ai.toolCall.id": toolCall.toolCallId,
|
2393
|
+
"ai.toolCall.args": {
|
2394
|
+
output: () => JSON.stringify(toolCall.args)
|
2395
|
+
}
|
2396
|
+
}
|
2397
|
+
}),
|
2162
2398
|
tracer,
|
2163
2399
|
fn: async (span) => tool2.execute(toolCall.args).then(
|
2164
2400
|
(result) => {
|
@@ -2172,9 +2408,16 @@ function runToolsTransformation({
|
|
2172
2408
|
toolResultsStreamController.close();
|
2173
2409
|
}
|
2174
2410
|
try {
|
2175
|
-
span.setAttributes(
|
2176
|
-
|
2177
|
-
|
2411
|
+
span.setAttributes(
|
2412
|
+
selectTelemetryAttributes({
|
2413
|
+
telemetry,
|
2414
|
+
attributes: {
|
2415
|
+
"ai.toolCall.result": {
|
2416
|
+
output: () => JSON.stringify(result)
|
2417
|
+
}
|
2418
|
+
}
|
2419
|
+
})
|
2420
|
+
);
|
2178
2421
|
} catch (ignored) {
|
2179
2422
|
}
|
2180
2423
|
},
|
@@ -2275,11 +2518,16 @@ async function streamText({
|
|
2275
2518
|
const tracer = getTracer({ isEnabled: (_a = telemetry == null ? void 0 : telemetry.isEnabled) != null ? _a : false });
|
2276
2519
|
return recordSpan({
|
2277
2520
|
name: "ai.streamText",
|
2278
|
-
attributes: {
|
2279
|
-
|
2280
|
-
|
2281
|
-
|
2282
|
-
|
2521
|
+
attributes: selectTelemetryAttributes({
|
2522
|
+
telemetry,
|
2523
|
+
attributes: {
|
2524
|
+
...baseTelemetryAttributes,
|
2525
|
+
// specific settings that only make sense on the outer level:
|
2526
|
+
"ai.prompt": {
|
2527
|
+
input: () => JSON.stringify({ system, prompt, messages })
|
2528
|
+
}
|
2529
|
+
}
|
2530
|
+
}),
|
2283
2531
|
tracer,
|
2284
2532
|
endWhenDone: false,
|
2285
2533
|
fn: async (rootSpan) => {
|
@@ -2295,11 +2543,24 @@ async function streamText({
|
|
2295
2543
|
} = await retry(
|
2296
2544
|
() => recordSpan({
|
2297
2545
|
name: "ai.streamText.doStream",
|
2298
|
-
attributes: {
|
2299
|
-
|
2300
|
-
|
2301
|
-
|
2302
|
-
|
2546
|
+
attributes: selectTelemetryAttributes({
|
2547
|
+
telemetry,
|
2548
|
+
attributes: {
|
2549
|
+
...baseTelemetryAttributes,
|
2550
|
+
"ai.prompt.format": {
|
2551
|
+
input: () => validatedPrompt.type
|
2552
|
+
},
|
2553
|
+
"ai.prompt.messages": {
|
2554
|
+
input: () => JSON.stringify(promptMessages)
|
2555
|
+
},
|
2556
|
+
// standardized gen-ai llm span attributes:
|
2557
|
+
"gen_ai.request.model": model.modelId,
|
2558
|
+
"gen_ai.system": model.provider,
|
2559
|
+
"gen_ai.request.max_tokens": settings.maxTokens,
|
2560
|
+
"gen_ai.request.temperature": settings.temperature,
|
2561
|
+
"gen_ai.request.top_p": settings.topP
|
2562
|
+
}
|
2563
|
+
}),
|
2303
2564
|
tracer,
|
2304
2565
|
endWhenDone: false,
|
2305
2566
|
fn: async (doStreamSpan2) => {
|
@@ -2325,13 +2586,15 @@ async function streamText({
|
|
2325
2586
|
tools,
|
2326
2587
|
generatorStream: stream,
|
2327
2588
|
toolCallStreaming,
|
2328
|
-
tracer
|
2589
|
+
tracer,
|
2590
|
+
telemetry
|
2329
2591
|
}),
|
2330
2592
|
warnings,
|
2331
2593
|
rawResponse,
|
2332
2594
|
onFinish,
|
2333
2595
|
rootSpan,
|
2334
|
-
doStreamSpan
|
2596
|
+
doStreamSpan,
|
2597
|
+
telemetry
|
2335
2598
|
});
|
2336
2599
|
}
|
2337
2600
|
});
|
@@ -2343,7 +2606,8 @@ var DefaultStreamTextResult = class {
|
|
2343
2606
|
rawResponse,
|
2344
2607
|
onFinish,
|
2345
2608
|
rootSpan,
|
2346
|
-
doStreamSpan
|
2609
|
+
doStreamSpan,
|
2610
|
+
telemetry
|
2347
2611
|
}) {
|
2348
2612
|
this.warnings = warnings;
|
2349
2613
|
this.rawResponse = rawResponse;
|
@@ -2423,21 +2687,35 @@ var DefaultStreamTextResult = class {
|
|
2423
2687
|
};
|
2424
2688
|
const finalFinishReason = finishReason != null ? finishReason : "unknown";
|
2425
2689
|
const telemetryToolCalls = toolCalls.length > 0 ? JSON.stringify(toolCalls) : void 0;
|
2426
|
-
doStreamSpan.setAttributes(
|
2427
|
-
|
2428
|
-
|
2429
|
-
|
2430
|
-
|
2431
|
-
|
2432
|
-
|
2690
|
+
doStreamSpan.setAttributes(
|
2691
|
+
selectTelemetryAttributes({
|
2692
|
+
telemetry,
|
2693
|
+
attributes: {
|
2694
|
+
"ai.finishReason": finalFinishReason,
|
2695
|
+
"ai.usage.promptTokens": finalUsage.promptTokens,
|
2696
|
+
"ai.usage.completionTokens": finalUsage.completionTokens,
|
2697
|
+
"ai.result.text": { output: () => text },
|
2698
|
+
"ai.result.toolCalls": { output: () => telemetryToolCalls },
|
2699
|
+
// standardized gen-ai llm span attributes:
|
2700
|
+
"gen_ai.response.finish_reasons": [finalFinishReason],
|
2701
|
+
"gen_ai.usage.prompt_tokens": finalUsage.promptTokens,
|
2702
|
+
"gen_ai.usage.completion_tokens": finalUsage.completionTokens
|
2703
|
+
}
|
2704
|
+
})
|
2705
|
+
);
|
2433
2706
|
doStreamSpan.end();
|
2434
|
-
rootSpan.setAttributes(
|
2435
|
-
|
2436
|
-
|
2437
|
-
|
2438
|
-
|
2439
|
-
|
2440
|
-
|
2707
|
+
rootSpan.setAttributes(
|
2708
|
+
selectTelemetryAttributes({
|
2709
|
+
telemetry,
|
2710
|
+
attributes: {
|
2711
|
+
"ai.finishReason": finalFinishReason,
|
2712
|
+
"ai.usage.promptTokens": finalUsage.promptTokens,
|
2713
|
+
"ai.usage.completionTokens": finalUsage.completionTokens,
|
2714
|
+
"ai.result.text": { output: () => text },
|
2715
|
+
"ai.result.toolCalls": { output: () => telemetryToolCalls }
|
2716
|
+
}
|
2717
|
+
})
|
2718
|
+
);
|
2441
2719
|
resolveToolResults(toolResults);
|
2442
2720
|
await ((_a = self.onFinish) == null ? void 0 : _a.call(self, {
|
2443
2721
|
finishReason: finalFinishReason,
|
@@ -2561,8 +2839,6 @@ var DefaultStreamTextResult = class {
|
|
2561
2839
|
controller.enqueue(
|
2562
2840
|
(0, import_ui_utils6.formatStreamPart)("tool_result", {
|
2563
2841
|
toolCallId: chunk.toolCallId,
|
2564
|
-
toolName: chunk.toolName,
|
2565
|
-
args: chunk.args,
|
2566
2842
|
result: chunk.result
|
2567
2843
|
})
|
2568
2844
|
);
|