ai 3.2.19 → 3.2.20

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.mjs CHANGED
@@ -1113,6 +1113,157 @@ function prepareToolsAndToolChoice({
1113
1113
  };
1114
1114
  }
1115
1115
 
1116
+ // core/telemetry/get-base-telemetry-attributes.ts
1117
+ function getBaseTelemetryAttributes({
1118
+ operationName,
1119
+ model,
1120
+ settings,
1121
+ telemetry,
1122
+ headers
1123
+ }) {
1124
+ var _a;
1125
+ return {
1126
+ "ai.model.provider": model.provider,
1127
+ "ai.model.id": model.modelId,
1128
+ // settings:
1129
+ ...Object.entries(settings).reduce((attributes, [key, value]) => {
1130
+ attributes[`ai.settings.${key}`] = value;
1131
+ return attributes;
1132
+ }, {}),
1133
+ // special telemetry information
1134
+ "operation.name": operationName,
1135
+ "resource.name": telemetry == null ? void 0 : telemetry.functionId,
1136
+ "ai.telemetry.functionId": telemetry == null ? void 0 : telemetry.functionId,
1137
+ // add metadata as attributes:
1138
+ ...Object.entries((_a = telemetry == null ? void 0 : telemetry.metadata) != null ? _a : {}).reduce(
1139
+ (attributes, [key, value]) => {
1140
+ attributes[`ai.telemetry.metadata.${key}`] = value;
1141
+ return attributes;
1142
+ },
1143
+ {}
1144
+ ),
1145
+ // request headers
1146
+ ...Object.entries(headers != null ? headers : {}).reduce((attributes, [key, value]) => {
1147
+ if (value !== void 0) {
1148
+ attributes[`ai.request.headers.${key}`] = value;
1149
+ }
1150
+ return attributes;
1151
+ }, {})
1152
+ };
1153
+ }
1154
+
1155
+ // core/telemetry/get-tracer.ts
1156
+ import { trace } from "@opentelemetry/api";
1157
+
1158
+ // core/telemetry/noop-tracer.ts
1159
+ var noopTracer = {
1160
+ startSpan() {
1161
+ return noopSpan;
1162
+ },
1163
+ startActiveSpan(name, arg1, arg2, arg3) {
1164
+ if (typeof arg1 === "function") {
1165
+ return arg1(noopSpan);
1166
+ }
1167
+ if (typeof arg2 === "function") {
1168
+ return arg2(noopSpan);
1169
+ }
1170
+ if (typeof arg3 === "function") {
1171
+ return arg3(noopSpan);
1172
+ }
1173
+ }
1174
+ };
1175
+ var noopSpan = {
1176
+ spanContext() {
1177
+ return noopSpanContext;
1178
+ },
1179
+ setAttribute() {
1180
+ return this;
1181
+ },
1182
+ setAttributes() {
1183
+ return this;
1184
+ },
1185
+ addEvent() {
1186
+ return this;
1187
+ },
1188
+ addLink() {
1189
+ return this;
1190
+ },
1191
+ addLinks() {
1192
+ return this;
1193
+ },
1194
+ setStatus() {
1195
+ return this;
1196
+ },
1197
+ updateName() {
1198
+ return this;
1199
+ },
1200
+ end() {
1201
+ return this;
1202
+ },
1203
+ isRecording() {
1204
+ return false;
1205
+ },
1206
+ recordException() {
1207
+ return this;
1208
+ }
1209
+ };
1210
+ var noopSpanContext = {
1211
+ traceId: "",
1212
+ spanId: "",
1213
+ traceFlags: 0
1214
+ };
1215
+
1216
+ // core/telemetry/get-tracer.ts
1217
+ var testTracer = void 0;
1218
+ function getTracer({ isEnabled }) {
1219
+ if (!isEnabled) {
1220
+ return noopTracer;
1221
+ }
1222
+ if (testTracer) {
1223
+ return testTracer;
1224
+ }
1225
+ return trace.getTracer("ai");
1226
+ }
1227
+
1228
+ // core/telemetry/record-span.ts
1229
+ import { SpanStatusCode } from "@opentelemetry/api";
1230
+ function recordSpan({
1231
+ name,
1232
+ tracer,
1233
+ attributes,
1234
+ fn,
1235
+ endWhenDone = true
1236
+ }) {
1237
+ return tracer.startActiveSpan(name, { attributes }, async (span) => {
1238
+ try {
1239
+ const result = await fn(span);
1240
+ if (endWhenDone) {
1241
+ span.end();
1242
+ }
1243
+ return result;
1244
+ } catch (error) {
1245
+ try {
1246
+ if (error instanceof Error) {
1247
+ span.recordException({
1248
+ name: error.name,
1249
+ message: error.message,
1250
+ stack: error.stack
1251
+ });
1252
+ span.setStatus({
1253
+ code: SpanStatusCode.ERROR,
1254
+ message: error.message
1255
+ });
1256
+ } else {
1257
+ span.setStatus({ code: SpanStatusCode.ERROR });
1258
+ }
1259
+ } finally {
1260
+ span.end();
1261
+ }
1262
+ throw error;
1263
+ }
1264
+ });
1265
+ }
1266
+
1116
1267
  // core/generate-text/tool-call.ts
1117
1268
  import {
1118
1269
  InvalidToolArgumentsError,
@@ -1166,71 +1317,128 @@ async function generateText({
1166
1317
  headers,
1167
1318
  maxAutomaticRoundtrips = 0,
1168
1319
  maxToolRoundtrips = maxAutomaticRoundtrips,
1320
+ experimental_telemetry: telemetry,
1169
1321
  ...settings
1170
1322
  }) {
1171
- var _a, _b, _c;
1172
- const retry = retryWithExponentialBackoff({ maxRetries });
1173
- const validatedPrompt = getValidatedPrompt({ system, prompt, messages });
1174
- const mode = {
1175
- type: "regular",
1176
- ...prepareToolsAndToolChoice({ tools, toolChoice })
1177
- };
1178
- const callSettings = prepareCallSettings(settings);
1179
- const promptMessages = convertToLanguageModelPrompt(validatedPrompt);
1180
- let currentModelResponse;
1181
- let currentToolCalls = [];
1182
- let currentToolResults = [];
1183
- let roundtrips = 0;
1184
- const responseMessages = [];
1185
- do {
1186
- currentModelResponse = await retry(() => {
1187
- return model.doGenerate({
1188
- mode,
1189
- ...callSettings,
1190
- // once we have a roundtrip, we need to switch to messages format:
1191
- inputFormat: roundtrips === 0 ? validatedPrompt.type : "messages",
1192
- prompt: promptMessages,
1193
- abortSignal,
1194
- headers
1323
+ var _a;
1324
+ const baseTelemetryAttributes = getBaseTelemetryAttributes({
1325
+ operationName: "ai.generateText",
1326
+ model,
1327
+ telemetry,
1328
+ headers,
1329
+ settings: { ...settings, maxRetries }
1330
+ });
1331
+ const tracer = getTracer({ isEnabled: (_a = telemetry == null ? void 0 : telemetry.isEnabled) != null ? _a : false });
1332
+ return recordSpan({
1333
+ name: "ai.generateText",
1334
+ attributes: {
1335
+ ...baseTelemetryAttributes,
1336
+ // specific settings that only make sense on the outer level:
1337
+ "ai.prompt": JSON.stringify({ system, prompt, messages }),
1338
+ "ai.settings.maxToolRoundtrips": maxToolRoundtrips
1339
+ },
1340
+ tracer,
1341
+ fn: async (span) => {
1342
+ var _a2, _b, _c;
1343
+ const retry = retryWithExponentialBackoff({ maxRetries });
1344
+ const validatedPrompt = getValidatedPrompt({
1345
+ system,
1346
+ prompt,
1347
+ messages
1195
1348
  });
1196
- });
1197
- currentToolCalls = ((_a = currentModelResponse.toolCalls) != null ? _a : []).map(
1198
- (modelToolCall) => parseToolCall({ toolCall: modelToolCall, tools })
1199
- );
1200
- currentToolResults = tools == null ? [] : await executeTools({ toolCalls: currentToolCalls, tools });
1201
- const newResponseMessages = toResponseMessages({
1202
- text: (_b = currentModelResponse.text) != null ? _b : "",
1203
- toolCalls: currentToolCalls,
1204
- toolResults: currentToolResults
1205
- });
1206
- responseMessages.push(...newResponseMessages);
1207
- promptMessages.push(
1208
- ...newResponseMessages.map(convertToLanguageModelMessage)
1209
- );
1210
- } while (
1211
- // there are tool calls:
1212
- currentToolCalls.length > 0 && // all current tool calls have results:
1213
- currentToolResults.length === currentToolCalls.length && // the number of roundtrips is less than the maximum:
1214
- roundtrips++ < maxToolRoundtrips
1215
- );
1216
- return new GenerateTextResult({
1217
- // Always return a string so that the caller doesn't have to check for undefined.
1218
- // If they need to check if the model did not return any text,
1219
- // they can check the length of the string:
1220
- text: (_c = currentModelResponse.text) != null ? _c : "",
1221
- toolCalls: currentToolCalls,
1222
- toolResults: currentToolResults,
1223
- finishReason: currentModelResponse.finishReason,
1224
- usage: calculateCompletionTokenUsage(currentModelResponse.usage),
1225
- warnings: currentModelResponse.warnings,
1226
- rawResponse: currentModelResponse.rawResponse,
1227
- logprobs: currentModelResponse.logprobs,
1228
- responseMessages
1349
+ const mode = {
1350
+ type: "regular",
1351
+ ...prepareToolsAndToolChoice({ tools, toolChoice })
1352
+ };
1353
+ const callSettings = prepareCallSettings(settings);
1354
+ const promptMessages = convertToLanguageModelPrompt(validatedPrompt);
1355
+ let currentModelResponse;
1356
+ let currentToolCalls = [];
1357
+ let currentToolResults = [];
1358
+ let roundtrips = 0;
1359
+ const responseMessages = [];
1360
+ do {
1361
+ const currentInputFormat = roundtrips === 0 ? validatedPrompt.type : "messages";
1362
+ currentModelResponse = await retry(
1363
+ () => recordSpan({
1364
+ name: "ai.generateText.doGenerate",
1365
+ attributes: {
1366
+ ...baseTelemetryAttributes,
1367
+ "ai.prompt.format": currentInputFormat,
1368
+ "ai.prompt.messages": JSON.stringify(promptMessages)
1369
+ },
1370
+ tracer,
1371
+ fn: async (span2) => {
1372
+ const result = await model.doGenerate({
1373
+ mode,
1374
+ ...callSettings,
1375
+ inputFormat: currentInputFormat,
1376
+ prompt: promptMessages,
1377
+ abortSignal,
1378
+ headers
1379
+ });
1380
+ span2.setAttributes({
1381
+ "ai.finishReason": result.finishReason,
1382
+ "ai.usage.promptTokens": result.usage.promptTokens,
1383
+ "ai.usage.completionTokens": result.usage.completionTokens,
1384
+ "ai.result.text": result.text,
1385
+ "ai.result.toolCalls": JSON.stringify(result.toolCalls)
1386
+ });
1387
+ return result;
1388
+ }
1389
+ })
1390
+ );
1391
+ currentToolCalls = ((_a2 = currentModelResponse.toolCalls) != null ? _a2 : []).map(
1392
+ (modelToolCall) => parseToolCall({ toolCall: modelToolCall, tools })
1393
+ );
1394
+ currentToolResults = tools == null ? [] : await executeTools({
1395
+ toolCalls: currentToolCalls,
1396
+ tools,
1397
+ tracer
1398
+ });
1399
+ const newResponseMessages = toResponseMessages({
1400
+ text: (_b = currentModelResponse.text) != null ? _b : "",
1401
+ toolCalls: currentToolCalls,
1402
+ toolResults: currentToolResults
1403
+ });
1404
+ responseMessages.push(...newResponseMessages);
1405
+ promptMessages.push(
1406
+ ...newResponseMessages.map(convertToLanguageModelMessage)
1407
+ );
1408
+ } while (
1409
+ // there are tool calls:
1410
+ currentToolCalls.length > 0 && // all current tool calls have results:
1411
+ currentToolResults.length === currentToolCalls.length && // the number of roundtrips is less than the maximum:
1412
+ roundtrips++ < maxToolRoundtrips
1413
+ );
1414
+ span.setAttributes({
1415
+ "ai.finishReason": currentModelResponse.finishReason,
1416
+ "ai.usage.promptTokens": currentModelResponse.usage.promptTokens,
1417
+ "ai.usage.completionTokens": currentModelResponse.usage.completionTokens,
1418
+ "ai.result.text": currentModelResponse.text,
1419
+ "ai.result.toolCalls": JSON.stringify(currentModelResponse.toolCalls)
1420
+ });
1421
+ return new GenerateTextResult({
1422
+ // Always return a string so that the caller doesn't have to check for undefined.
1423
+ // If they need to check if the model did not return any text,
1424
+ // they can check the length of the string:
1425
+ text: (_c = currentModelResponse.text) != null ? _c : "",
1426
+ toolCalls: currentToolCalls,
1427
+ toolResults: currentToolResults,
1428
+ finishReason: currentModelResponse.finishReason,
1429
+ usage: calculateCompletionTokenUsage(currentModelResponse.usage),
1430
+ warnings: currentModelResponse.warnings,
1431
+ rawResponse: currentModelResponse.rawResponse,
1432
+ logprobs: currentModelResponse.logprobs,
1433
+ responseMessages
1434
+ });
1435
+ }
1229
1436
  });
1230
1437
  }
1231
1438
  async function executeTools({
1232
1439
  toolCalls,
1233
- tools
1440
+ tools,
1441
+ tracer
1234
1442
  }) {
1235
1443
  const toolResults = await Promise.all(
1236
1444
  toolCalls.map(async (toolCall) => {
@@ -1238,7 +1446,25 @@ async function executeTools({
1238
1446
  if ((tool2 == null ? void 0 : tool2.execute) == null) {
1239
1447
  return void 0;
1240
1448
  }
1241
- const result = await tool2.execute(toolCall.args);
1449
+ const result = await recordSpan({
1450
+ name: "ai.toolCall",
1451
+ attributes: {
1452
+ "ai.toolCall.name": toolCall.toolName,
1453
+ "ai.toolCall.id": toolCall.toolCallId,
1454
+ "ai.toolCall.args": JSON.stringify(toolCall.args)
1455
+ },
1456
+ tracer,
1457
+ fn: async (span) => {
1458
+ const result2 = await tool2.execute(toolCall.args);
1459
+ try {
1460
+ span.setAttributes({
1461
+ "ai.toolCall.result": JSON.stringify(result2)
1462
+ });
1463
+ } catch (ignored) {
1464
+ }
1465
+ return result2;
1466
+ }
1467
+ });
1242
1468
  return {
1243
1469
  toolCallId: toolCall.toolCallId,
1244
1470
  toolName: toolCall.toolName,
@@ -1294,7 +1520,8 @@ import { NoSuchToolError as NoSuchToolError2 } from "@ai-sdk/provider";
1294
1520
  import { generateId } from "@ai-sdk/ui-utils";
1295
1521
  function runToolsTransformation({
1296
1522
  tools,
1297
- generatorStream
1523
+ generatorStream,
1524
+ tracer
1298
1525
  }) {
1299
1526
  let canClose = false;
1300
1527
  const outstandingToolCalls = /* @__PURE__ */ new Set();
@@ -1342,29 +1569,44 @@ function runToolsTransformation({
1342
1569
  if (tool2.execute != null) {
1343
1570
  const toolExecutionId = generateId();
1344
1571
  outstandingToolCalls.add(toolExecutionId);
1345
- tool2.execute(toolCall.args).then(
1346
- (result) => {
1347
- toolResultsStreamController.enqueue({
1348
- ...toolCall,
1349
- type: "tool-result",
1350
- result
1351
- });
1352
- outstandingToolCalls.delete(toolExecutionId);
1353
- if (canClose && outstandingToolCalls.size === 0) {
1354
- toolResultsStreamController.close();
1355
- }
1572
+ recordSpan({
1573
+ name: "ai.toolCall",
1574
+ attributes: {
1575
+ "ai.toolCall.name": toolCall.toolName,
1576
+ "ai.toolCall.id": toolCall.toolCallId,
1577
+ "ai.toolCall.args": JSON.stringify(toolCall.args)
1356
1578
  },
1357
- (error) => {
1358
- toolResultsStreamController.enqueue({
1359
- type: "error",
1360
- error
1361
- });
1362
- outstandingToolCalls.delete(toolExecutionId);
1363
- if (canClose && outstandingToolCalls.size === 0) {
1364
- toolResultsStreamController.close();
1579
+ tracer,
1580
+ fn: async (span) => tool2.execute(toolCall.args).then(
1581
+ (result) => {
1582
+ toolResultsStreamController.enqueue({
1583
+ ...toolCall,
1584
+ type: "tool-result",
1585
+ result
1586
+ });
1587
+ outstandingToolCalls.delete(toolExecutionId);
1588
+ if (canClose && outstandingToolCalls.size === 0) {
1589
+ toolResultsStreamController.close();
1590
+ }
1591
+ try {
1592
+ span.setAttributes({
1593
+ "ai.toolCall.result": JSON.stringify(result)
1594
+ });
1595
+ } catch (ignored) {
1596
+ }
1597
+ },
1598
+ (error) => {
1599
+ toolResultsStreamController.enqueue({
1600
+ type: "error",
1601
+ error
1602
+ });
1603
+ outstandingToolCalls.delete(toolExecutionId);
1604
+ if (canClose && outstandingToolCalls.size === 0) {
1605
+ toolResultsStreamController.close();
1606
+ }
1365
1607
  }
1366
- }
1367
- );
1608
+ )
1609
+ });
1368
1610
  }
1369
1611
  } catch (error) {
1370
1612
  toolResultsStreamController.enqueue({
@@ -1437,32 +1679,76 @@ async function streamText({
1437
1679
  maxRetries,
1438
1680
  abortSignal,
1439
1681
  headers,
1682
+ experimental_telemetry: telemetry,
1440
1683
  onFinish,
1441
1684
  ...settings
1442
1685
  }) {
1443
- const retry = retryWithExponentialBackoff({ maxRetries });
1444
- const validatedPrompt = getValidatedPrompt({ system, prompt, messages });
1445
- const { stream, warnings, rawResponse } = await retry(
1446
- () => model.doStream({
1447
- mode: {
1448
- type: "regular",
1449
- ...prepareToolsAndToolChoice({ tools, toolChoice })
1450
- },
1451
- ...prepareCallSettings(settings),
1452
- inputFormat: validatedPrompt.type,
1453
- prompt: convertToLanguageModelPrompt(validatedPrompt),
1454
- abortSignal,
1455
- headers
1456
- })
1457
- );
1458
- return new StreamTextResult({
1459
- stream: runToolsTransformation({
1460
- tools,
1461
- generatorStream: stream
1462
- }),
1463
- warnings,
1464
- rawResponse,
1465
- onFinish
1686
+ var _a;
1687
+ const baseTelemetryAttributes = getBaseTelemetryAttributes({
1688
+ operationName: "ai.streamText",
1689
+ model,
1690
+ telemetry,
1691
+ headers,
1692
+ settings: { ...settings, maxRetries }
1693
+ });
1694
+ const tracer = getTracer({ isEnabled: (_a = telemetry == null ? void 0 : telemetry.isEnabled) != null ? _a : false });
1695
+ return recordSpan({
1696
+ name: "ai.streamText",
1697
+ attributes: {
1698
+ ...baseTelemetryAttributes,
1699
+ // specific settings that only make sense on the outer level:
1700
+ "ai.prompt": JSON.stringify({ system, prompt, messages })
1701
+ },
1702
+ tracer,
1703
+ endWhenDone: false,
1704
+ fn: async (rootSpan) => {
1705
+ const retry = retryWithExponentialBackoff({ maxRetries });
1706
+ const validatedPrompt = getValidatedPrompt({ system, prompt, messages });
1707
+ const promptMessages = convertToLanguageModelPrompt(validatedPrompt);
1708
+ const {
1709
+ result: { stream, warnings, rawResponse },
1710
+ doStreamSpan
1711
+ } = await retry(
1712
+ () => recordSpan({
1713
+ name: "ai.streamText.doStream",
1714
+ attributes: {
1715
+ ...baseTelemetryAttributes,
1716
+ "ai.prompt.format": validatedPrompt.type,
1717
+ "ai.prompt.messages": JSON.stringify(promptMessages)
1718
+ },
1719
+ tracer,
1720
+ endWhenDone: false,
1721
+ fn: async (doStreamSpan2) => {
1722
+ return {
1723
+ result: await model.doStream({
1724
+ mode: {
1725
+ type: "regular",
1726
+ ...prepareToolsAndToolChoice({ tools, toolChoice })
1727
+ },
1728
+ ...prepareCallSettings(settings),
1729
+ inputFormat: validatedPrompt.type,
1730
+ prompt: promptMessages,
1731
+ abortSignal,
1732
+ headers
1733
+ }),
1734
+ doStreamSpan: doStreamSpan2
1735
+ };
1736
+ }
1737
+ })
1738
+ );
1739
+ return new StreamTextResult({
1740
+ stream: runToolsTransformation({
1741
+ tools,
1742
+ generatorStream: stream,
1743
+ tracer
1744
+ }),
1745
+ warnings,
1746
+ rawResponse,
1747
+ onFinish,
1748
+ rootSpan,
1749
+ doStreamSpan
1750
+ });
1751
+ }
1466
1752
  });
1467
1753
  }
1468
1754
  var StreamTextResult = class {
@@ -1470,7 +1756,9 @@ var StreamTextResult = class {
1470
1756
  stream,
1471
1757
  warnings,
1472
1758
  rawResponse,
1473
- onFinish
1759
+ onFinish,
1760
+ rootSpan,
1761
+ doStreamSpan
1474
1762
  }) {
1475
1763
  this.warnings = warnings;
1476
1764
  this.rawResponse = rawResponse;
@@ -1500,41 +1788,73 @@ var StreamTextResult = class {
1500
1788
  let text = "";
1501
1789
  const toolCalls = [];
1502
1790
  const toolResults = [];
1791
+ let firstChunk = true;
1503
1792
  const self = this;
1504
1793
  this.originalStream = stream.pipeThrough(
1505
1794
  new TransformStream({
1506
1795
  async transform(chunk, controller) {
1507
1796
  controller.enqueue(chunk);
1508
- if (chunk.type === "text-delta") {
1509
- text += chunk.textDelta;
1510
- }
1511
- if (chunk.type === "tool-call") {
1512
- toolCalls.push(chunk);
1797
+ if (firstChunk) {
1798
+ firstChunk = false;
1799
+ doStreamSpan.addEvent("ai.stream.firstChunk");
1513
1800
  }
1514
- if (chunk.type === "tool-result") {
1515
- toolResults.push(chunk);
1516
- }
1517
- if (chunk.type === "finish") {
1518
- usage = chunk.usage;
1519
- finishReason = chunk.finishReason;
1520
- resolveUsage(usage);
1521
- resolveFinishReason(finishReason);
1522
- resolveText(text);
1523
- resolveToolCalls(toolCalls);
1801
+ const chunkType = chunk.type;
1802
+ switch (chunkType) {
1803
+ case "text-delta":
1804
+ text += chunk.textDelta;
1805
+ break;
1806
+ case "tool-call":
1807
+ toolCalls.push(chunk);
1808
+ break;
1809
+ case "tool-result":
1810
+ toolResults.push(chunk);
1811
+ break;
1812
+ case "finish":
1813
+ usage = chunk.usage;
1814
+ finishReason = chunk.finishReason;
1815
+ resolveUsage(usage);
1816
+ resolveFinishReason(finishReason);
1817
+ resolveText(text);
1818
+ resolveToolCalls(toolCalls);
1819
+ break;
1820
+ case "error":
1821
+ break;
1822
+ default: {
1823
+ const exhaustiveCheck = chunkType;
1824
+ throw new Error(`Unknown chunk type: ${exhaustiveCheck}`);
1825
+ }
1524
1826
  }
1525
1827
  },
1526
1828
  // invoke onFinish callback and resolve toolResults promise when the stream is about to close:
1527
1829
  async flush(controller) {
1528
1830
  var _a;
1529
1831
  try {
1832
+ const finalUsage = usage != null ? usage : {
1833
+ promptTokens: NaN,
1834
+ completionTokens: NaN,
1835
+ totalTokens: NaN
1836
+ };
1837
+ const finalFinishReason = finishReason != null ? finishReason : "unknown";
1838
+ const telemetryToolCalls = toolCalls.length > 0 ? JSON.stringify(toolCalls) : void 0;
1839
+ doStreamSpan.setAttributes({
1840
+ "ai.finishReason": finalFinishReason,
1841
+ "ai.usage.promptTokens": finalUsage.promptTokens,
1842
+ "ai.usage.completionTokens": finalUsage.completionTokens,
1843
+ "ai.result.text": text,
1844
+ "ai.result.toolCalls": telemetryToolCalls
1845
+ });
1846
+ doStreamSpan.end();
1847
+ rootSpan.setAttributes({
1848
+ "ai.finishReason": finalFinishReason,
1849
+ "ai.usage.promptTokens": finalUsage.promptTokens,
1850
+ "ai.usage.completionTokens": finalUsage.completionTokens,
1851
+ "ai.result.text": text,
1852
+ "ai.result.toolCalls": telemetryToolCalls
1853
+ });
1530
1854
  resolveToolResults(toolResults);
1531
1855
  await ((_a = self.onFinish) == null ? void 0 : _a.call(self, {
1532
- finishReason: finishReason != null ? finishReason : "unknown",
1533
- usage: usage != null ? usage : {
1534
- promptTokens: NaN,
1535
- completionTokens: NaN,
1536
- totalTokens: NaN
1537
- },
1856
+ finishReason: finalFinishReason,
1857
+ usage: finalUsage,
1538
1858
  text,
1539
1859
  toolCalls,
1540
1860
  // The tool results are inferred as a never[] type, because they are
@@ -1547,6 +1867,8 @@ var StreamTextResult = class {
1547
1867
  }));
1548
1868
  } catch (error) {
1549
1869
  controller.error(error);
1870
+ } finally {
1871
+ rootSpan.end();
1550
1872
  }
1551
1873
  }
1552
1874
  })