ai 3.2.19 → 3.2.21

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js CHANGED
@@ -1205,6 +1205,157 @@ function prepareToolsAndToolChoice({
1205
1205
  };
1206
1206
  }
1207
1207
 
1208
+ // core/telemetry/get-base-telemetry-attributes.ts
1209
+ function getBaseTelemetryAttributes({
1210
+ operationName,
1211
+ model,
1212
+ settings,
1213
+ telemetry,
1214
+ headers
1215
+ }) {
1216
+ var _a;
1217
+ return {
1218
+ "ai.model.provider": model.provider,
1219
+ "ai.model.id": model.modelId,
1220
+ // settings:
1221
+ ...Object.entries(settings).reduce((attributes, [key, value]) => {
1222
+ attributes[`ai.settings.${key}`] = value;
1223
+ return attributes;
1224
+ }, {}),
1225
+ // special telemetry information
1226
+ "operation.name": operationName,
1227
+ "resource.name": telemetry == null ? void 0 : telemetry.functionId,
1228
+ "ai.telemetry.functionId": telemetry == null ? void 0 : telemetry.functionId,
1229
+ // add metadata as attributes:
1230
+ ...Object.entries((_a = telemetry == null ? void 0 : telemetry.metadata) != null ? _a : {}).reduce(
1231
+ (attributes, [key, value]) => {
1232
+ attributes[`ai.telemetry.metadata.${key}`] = value;
1233
+ return attributes;
1234
+ },
1235
+ {}
1236
+ ),
1237
+ // request headers
1238
+ ...Object.entries(headers != null ? headers : {}).reduce((attributes, [key, value]) => {
1239
+ if (value !== void 0) {
1240
+ attributes[`ai.request.headers.${key}`] = value;
1241
+ }
1242
+ return attributes;
1243
+ }, {})
1244
+ };
1245
+ }
1246
+
1247
+ // core/telemetry/get-tracer.ts
1248
+ var import_api = require("@opentelemetry/api");
1249
+
1250
+ // core/telemetry/noop-tracer.ts
1251
+ var noopTracer = {
1252
+ startSpan() {
1253
+ return noopSpan;
1254
+ },
1255
+ startActiveSpan(name, arg1, arg2, arg3) {
1256
+ if (typeof arg1 === "function") {
1257
+ return arg1(noopSpan);
1258
+ }
1259
+ if (typeof arg2 === "function") {
1260
+ return arg2(noopSpan);
1261
+ }
1262
+ if (typeof arg3 === "function") {
1263
+ return arg3(noopSpan);
1264
+ }
1265
+ }
1266
+ };
1267
+ var noopSpan = {
1268
+ spanContext() {
1269
+ return noopSpanContext;
1270
+ },
1271
+ setAttribute() {
1272
+ return this;
1273
+ },
1274
+ setAttributes() {
1275
+ return this;
1276
+ },
1277
+ addEvent() {
1278
+ return this;
1279
+ },
1280
+ addLink() {
1281
+ return this;
1282
+ },
1283
+ addLinks() {
1284
+ return this;
1285
+ },
1286
+ setStatus() {
1287
+ return this;
1288
+ },
1289
+ updateName() {
1290
+ return this;
1291
+ },
1292
+ end() {
1293
+ return this;
1294
+ },
1295
+ isRecording() {
1296
+ return false;
1297
+ },
1298
+ recordException() {
1299
+ return this;
1300
+ }
1301
+ };
1302
+ var noopSpanContext = {
1303
+ traceId: "",
1304
+ spanId: "",
1305
+ traceFlags: 0
1306
+ };
1307
+
1308
+ // core/telemetry/get-tracer.ts
1309
+ var testTracer = void 0;
1310
+ function getTracer({ isEnabled }) {
1311
+ if (!isEnabled) {
1312
+ return noopTracer;
1313
+ }
1314
+ if (testTracer) {
1315
+ return testTracer;
1316
+ }
1317
+ return import_api.trace.getTracer("ai");
1318
+ }
1319
+
1320
+ // core/telemetry/record-span.ts
1321
+ var import_api2 = require("@opentelemetry/api");
1322
+ function recordSpan({
1323
+ name,
1324
+ tracer,
1325
+ attributes,
1326
+ fn,
1327
+ endWhenDone = true
1328
+ }) {
1329
+ return tracer.startActiveSpan(name, { attributes }, async (span) => {
1330
+ try {
1331
+ const result = await fn(span);
1332
+ if (endWhenDone) {
1333
+ span.end();
1334
+ }
1335
+ return result;
1336
+ } catch (error) {
1337
+ try {
1338
+ if (error instanceof Error) {
1339
+ span.recordException({
1340
+ name: error.name,
1341
+ message: error.message,
1342
+ stack: error.stack
1343
+ });
1344
+ span.setStatus({
1345
+ code: import_api2.SpanStatusCode.ERROR,
1346
+ message: error.message
1347
+ });
1348
+ } else {
1349
+ span.setStatus({ code: import_api2.SpanStatusCode.ERROR });
1350
+ }
1351
+ } finally {
1352
+ span.end();
1353
+ }
1354
+ throw error;
1355
+ }
1356
+ });
1357
+ }
1358
+
1208
1359
  // core/generate-text/tool-call.ts
1209
1360
  var import_provider6 = require("@ai-sdk/provider");
1210
1361
  var import_provider_utils6 = require("@ai-sdk/provider-utils");
@@ -1255,71 +1406,128 @@ async function generateText({
1255
1406
  headers,
1256
1407
  maxAutomaticRoundtrips = 0,
1257
1408
  maxToolRoundtrips = maxAutomaticRoundtrips,
1409
+ experimental_telemetry: telemetry,
1258
1410
  ...settings
1259
1411
  }) {
1260
- var _a, _b, _c;
1261
- const retry = retryWithExponentialBackoff({ maxRetries });
1262
- const validatedPrompt = getValidatedPrompt({ system, prompt, messages });
1263
- const mode = {
1264
- type: "regular",
1265
- ...prepareToolsAndToolChoice({ tools, toolChoice })
1266
- };
1267
- const callSettings = prepareCallSettings(settings);
1268
- const promptMessages = convertToLanguageModelPrompt(validatedPrompt);
1269
- let currentModelResponse;
1270
- let currentToolCalls = [];
1271
- let currentToolResults = [];
1272
- let roundtrips = 0;
1273
- const responseMessages = [];
1274
- do {
1275
- currentModelResponse = await retry(() => {
1276
- return model.doGenerate({
1277
- mode,
1278
- ...callSettings,
1279
- // once we have a roundtrip, we need to switch to messages format:
1280
- inputFormat: roundtrips === 0 ? validatedPrompt.type : "messages",
1281
- prompt: promptMessages,
1282
- abortSignal,
1283
- headers
1412
+ var _a;
1413
+ const baseTelemetryAttributes = getBaseTelemetryAttributes({
1414
+ operationName: "ai.generateText",
1415
+ model,
1416
+ telemetry,
1417
+ headers,
1418
+ settings: { ...settings, maxRetries }
1419
+ });
1420
+ const tracer = getTracer({ isEnabled: (_a = telemetry == null ? void 0 : telemetry.isEnabled) != null ? _a : false });
1421
+ return recordSpan({
1422
+ name: "ai.generateText",
1423
+ attributes: {
1424
+ ...baseTelemetryAttributes,
1425
+ // specific settings that only make sense on the outer level:
1426
+ "ai.prompt": JSON.stringify({ system, prompt, messages }),
1427
+ "ai.settings.maxToolRoundtrips": maxToolRoundtrips
1428
+ },
1429
+ tracer,
1430
+ fn: async (span) => {
1431
+ var _a2, _b, _c;
1432
+ const retry = retryWithExponentialBackoff({ maxRetries });
1433
+ const validatedPrompt = getValidatedPrompt({
1434
+ system,
1435
+ prompt,
1436
+ messages
1284
1437
  });
1285
- });
1286
- currentToolCalls = ((_a = currentModelResponse.toolCalls) != null ? _a : []).map(
1287
- (modelToolCall) => parseToolCall({ toolCall: modelToolCall, tools })
1288
- );
1289
- currentToolResults = tools == null ? [] : await executeTools({ toolCalls: currentToolCalls, tools });
1290
- const newResponseMessages = toResponseMessages({
1291
- text: (_b = currentModelResponse.text) != null ? _b : "",
1292
- toolCalls: currentToolCalls,
1293
- toolResults: currentToolResults
1294
- });
1295
- responseMessages.push(...newResponseMessages);
1296
- promptMessages.push(
1297
- ...newResponseMessages.map(convertToLanguageModelMessage)
1298
- );
1299
- } while (
1300
- // there are tool calls:
1301
- currentToolCalls.length > 0 && // all current tool calls have results:
1302
- currentToolResults.length === currentToolCalls.length && // the number of roundtrips is less than the maximum:
1303
- roundtrips++ < maxToolRoundtrips
1304
- );
1305
- return new GenerateTextResult({
1306
- // Always return a string so that the caller doesn't have to check for undefined.
1307
- // If they need to check if the model did not return any text,
1308
- // they can check the length of the string:
1309
- text: (_c = currentModelResponse.text) != null ? _c : "",
1310
- toolCalls: currentToolCalls,
1311
- toolResults: currentToolResults,
1312
- finishReason: currentModelResponse.finishReason,
1313
- usage: calculateCompletionTokenUsage(currentModelResponse.usage),
1314
- warnings: currentModelResponse.warnings,
1315
- rawResponse: currentModelResponse.rawResponse,
1316
- logprobs: currentModelResponse.logprobs,
1317
- responseMessages
1438
+ const mode = {
1439
+ type: "regular",
1440
+ ...prepareToolsAndToolChoice({ tools, toolChoice })
1441
+ };
1442
+ const callSettings = prepareCallSettings(settings);
1443
+ const promptMessages = convertToLanguageModelPrompt(validatedPrompt);
1444
+ let currentModelResponse;
1445
+ let currentToolCalls = [];
1446
+ let currentToolResults = [];
1447
+ let roundtrips = 0;
1448
+ const responseMessages = [];
1449
+ do {
1450
+ const currentInputFormat = roundtrips === 0 ? validatedPrompt.type : "messages";
1451
+ currentModelResponse = await retry(
1452
+ () => recordSpan({
1453
+ name: "ai.generateText.doGenerate",
1454
+ attributes: {
1455
+ ...baseTelemetryAttributes,
1456
+ "ai.prompt.format": currentInputFormat,
1457
+ "ai.prompt.messages": JSON.stringify(promptMessages)
1458
+ },
1459
+ tracer,
1460
+ fn: async (span2) => {
1461
+ const result = await model.doGenerate({
1462
+ mode,
1463
+ ...callSettings,
1464
+ inputFormat: currentInputFormat,
1465
+ prompt: promptMessages,
1466
+ abortSignal,
1467
+ headers
1468
+ });
1469
+ span2.setAttributes({
1470
+ "ai.finishReason": result.finishReason,
1471
+ "ai.usage.promptTokens": result.usage.promptTokens,
1472
+ "ai.usage.completionTokens": result.usage.completionTokens,
1473
+ "ai.result.text": result.text,
1474
+ "ai.result.toolCalls": JSON.stringify(result.toolCalls)
1475
+ });
1476
+ return result;
1477
+ }
1478
+ })
1479
+ );
1480
+ currentToolCalls = ((_a2 = currentModelResponse.toolCalls) != null ? _a2 : []).map(
1481
+ (modelToolCall) => parseToolCall({ toolCall: modelToolCall, tools })
1482
+ );
1483
+ currentToolResults = tools == null ? [] : await executeTools({
1484
+ toolCalls: currentToolCalls,
1485
+ tools,
1486
+ tracer
1487
+ });
1488
+ const newResponseMessages = toResponseMessages({
1489
+ text: (_b = currentModelResponse.text) != null ? _b : "",
1490
+ toolCalls: currentToolCalls,
1491
+ toolResults: currentToolResults
1492
+ });
1493
+ responseMessages.push(...newResponseMessages);
1494
+ promptMessages.push(
1495
+ ...newResponseMessages.map(convertToLanguageModelMessage)
1496
+ );
1497
+ } while (
1498
+ // there are tool calls:
1499
+ currentToolCalls.length > 0 && // all current tool calls have results:
1500
+ currentToolResults.length === currentToolCalls.length && // the number of roundtrips is less than the maximum:
1501
+ roundtrips++ < maxToolRoundtrips
1502
+ );
1503
+ span.setAttributes({
1504
+ "ai.finishReason": currentModelResponse.finishReason,
1505
+ "ai.usage.promptTokens": currentModelResponse.usage.promptTokens,
1506
+ "ai.usage.completionTokens": currentModelResponse.usage.completionTokens,
1507
+ "ai.result.text": currentModelResponse.text,
1508
+ "ai.result.toolCalls": JSON.stringify(currentModelResponse.toolCalls)
1509
+ });
1510
+ return new GenerateTextResult({
1511
+ // Always return a string so that the caller doesn't have to check for undefined.
1512
+ // If they need to check if the model did not return any text,
1513
+ // they can check the length of the string:
1514
+ text: (_c = currentModelResponse.text) != null ? _c : "",
1515
+ toolCalls: currentToolCalls,
1516
+ toolResults: currentToolResults,
1517
+ finishReason: currentModelResponse.finishReason,
1518
+ usage: calculateCompletionTokenUsage(currentModelResponse.usage),
1519
+ warnings: currentModelResponse.warnings,
1520
+ rawResponse: currentModelResponse.rawResponse,
1521
+ logprobs: currentModelResponse.logprobs,
1522
+ responseMessages
1523
+ });
1524
+ }
1318
1525
  });
1319
1526
  }
1320
1527
  async function executeTools({
1321
1528
  toolCalls,
1322
- tools
1529
+ tools,
1530
+ tracer
1323
1531
  }) {
1324
1532
  const toolResults = await Promise.all(
1325
1533
  toolCalls.map(async (toolCall) => {
@@ -1327,7 +1535,25 @@ async function executeTools({
1327
1535
  if ((tool2 == null ? void 0 : tool2.execute) == null) {
1328
1536
  return void 0;
1329
1537
  }
1330
- const result = await tool2.execute(toolCall.args);
1538
+ const result = await recordSpan({
1539
+ name: "ai.toolCall",
1540
+ attributes: {
1541
+ "ai.toolCall.name": toolCall.toolName,
1542
+ "ai.toolCall.id": toolCall.toolCallId,
1543
+ "ai.toolCall.args": JSON.stringify(toolCall.args)
1544
+ },
1545
+ tracer,
1546
+ fn: async (span) => {
1547
+ const result2 = await tool2.execute(toolCall.args);
1548
+ try {
1549
+ span.setAttributes({
1550
+ "ai.toolCall.result": JSON.stringify(result2)
1551
+ });
1552
+ } catch (ignored) {
1553
+ }
1554
+ return result2;
1555
+ }
1556
+ });
1331
1557
  return {
1332
1558
  toolCallId: toolCall.toolCallId,
1333
1559
  toolName: toolCall.toolName,
@@ -1383,7 +1609,8 @@ var import_provider7 = require("@ai-sdk/provider");
1383
1609
  var import_ui_utils2 = require("@ai-sdk/ui-utils");
1384
1610
  function runToolsTransformation({
1385
1611
  tools,
1386
- generatorStream
1612
+ generatorStream,
1613
+ tracer
1387
1614
  }) {
1388
1615
  let canClose = false;
1389
1616
  const outstandingToolCalls = /* @__PURE__ */ new Set();
@@ -1431,29 +1658,44 @@ function runToolsTransformation({
1431
1658
  if (tool2.execute != null) {
1432
1659
  const toolExecutionId = (0, import_ui_utils2.generateId)();
1433
1660
  outstandingToolCalls.add(toolExecutionId);
1434
- tool2.execute(toolCall.args).then(
1435
- (result) => {
1436
- toolResultsStreamController.enqueue({
1437
- ...toolCall,
1438
- type: "tool-result",
1439
- result
1440
- });
1441
- outstandingToolCalls.delete(toolExecutionId);
1442
- if (canClose && outstandingToolCalls.size === 0) {
1443
- toolResultsStreamController.close();
1444
- }
1661
+ recordSpan({
1662
+ name: "ai.toolCall",
1663
+ attributes: {
1664
+ "ai.toolCall.name": toolCall.toolName,
1665
+ "ai.toolCall.id": toolCall.toolCallId,
1666
+ "ai.toolCall.args": JSON.stringify(toolCall.args)
1445
1667
  },
1446
- (error) => {
1447
- toolResultsStreamController.enqueue({
1448
- type: "error",
1449
- error
1450
- });
1451
- outstandingToolCalls.delete(toolExecutionId);
1452
- if (canClose && outstandingToolCalls.size === 0) {
1453
- toolResultsStreamController.close();
1668
+ tracer,
1669
+ fn: async (span) => tool2.execute(toolCall.args).then(
1670
+ (result) => {
1671
+ toolResultsStreamController.enqueue({
1672
+ ...toolCall,
1673
+ type: "tool-result",
1674
+ result
1675
+ });
1676
+ outstandingToolCalls.delete(toolExecutionId);
1677
+ if (canClose && outstandingToolCalls.size === 0) {
1678
+ toolResultsStreamController.close();
1679
+ }
1680
+ try {
1681
+ span.setAttributes({
1682
+ "ai.toolCall.result": JSON.stringify(result)
1683
+ });
1684
+ } catch (ignored) {
1685
+ }
1686
+ },
1687
+ (error) => {
1688
+ toolResultsStreamController.enqueue({
1689
+ type: "error",
1690
+ error
1691
+ });
1692
+ outstandingToolCalls.delete(toolExecutionId);
1693
+ if (canClose && outstandingToolCalls.size === 0) {
1694
+ toolResultsStreamController.close();
1695
+ }
1454
1696
  }
1455
- }
1456
- );
1697
+ )
1698
+ });
1457
1699
  }
1458
1700
  } catch (error) {
1459
1701
  toolResultsStreamController.enqueue({
@@ -1526,32 +1768,76 @@ async function streamText({
1526
1768
  maxRetries,
1527
1769
  abortSignal,
1528
1770
  headers,
1771
+ experimental_telemetry: telemetry,
1529
1772
  onFinish,
1530
1773
  ...settings
1531
1774
  }) {
1532
- const retry = retryWithExponentialBackoff({ maxRetries });
1533
- const validatedPrompt = getValidatedPrompt({ system, prompt, messages });
1534
- const { stream, warnings, rawResponse } = await retry(
1535
- () => model.doStream({
1536
- mode: {
1537
- type: "regular",
1538
- ...prepareToolsAndToolChoice({ tools, toolChoice })
1539
- },
1540
- ...prepareCallSettings(settings),
1541
- inputFormat: validatedPrompt.type,
1542
- prompt: convertToLanguageModelPrompt(validatedPrompt),
1543
- abortSignal,
1544
- headers
1545
- })
1546
- );
1547
- return new StreamTextResult({
1548
- stream: runToolsTransformation({
1549
- tools,
1550
- generatorStream: stream
1551
- }),
1552
- warnings,
1553
- rawResponse,
1554
- onFinish
1775
+ var _a;
1776
+ const baseTelemetryAttributes = getBaseTelemetryAttributes({
1777
+ operationName: "ai.streamText",
1778
+ model,
1779
+ telemetry,
1780
+ headers,
1781
+ settings: { ...settings, maxRetries }
1782
+ });
1783
+ const tracer = getTracer({ isEnabled: (_a = telemetry == null ? void 0 : telemetry.isEnabled) != null ? _a : false });
1784
+ return recordSpan({
1785
+ name: "ai.streamText",
1786
+ attributes: {
1787
+ ...baseTelemetryAttributes,
1788
+ // specific settings that only make sense on the outer level:
1789
+ "ai.prompt": JSON.stringify({ system, prompt, messages })
1790
+ },
1791
+ tracer,
1792
+ endWhenDone: false,
1793
+ fn: async (rootSpan) => {
1794
+ const retry = retryWithExponentialBackoff({ maxRetries });
1795
+ const validatedPrompt = getValidatedPrompt({ system, prompt, messages });
1796
+ const promptMessages = convertToLanguageModelPrompt(validatedPrompt);
1797
+ const {
1798
+ result: { stream, warnings, rawResponse },
1799
+ doStreamSpan
1800
+ } = await retry(
1801
+ () => recordSpan({
1802
+ name: "ai.streamText.doStream",
1803
+ attributes: {
1804
+ ...baseTelemetryAttributes,
1805
+ "ai.prompt.format": validatedPrompt.type,
1806
+ "ai.prompt.messages": JSON.stringify(promptMessages)
1807
+ },
1808
+ tracer,
1809
+ endWhenDone: false,
1810
+ fn: async (doStreamSpan2) => {
1811
+ return {
1812
+ result: await model.doStream({
1813
+ mode: {
1814
+ type: "regular",
1815
+ ...prepareToolsAndToolChoice({ tools, toolChoice })
1816
+ },
1817
+ ...prepareCallSettings(settings),
1818
+ inputFormat: validatedPrompt.type,
1819
+ prompt: promptMessages,
1820
+ abortSignal,
1821
+ headers
1822
+ }),
1823
+ doStreamSpan: doStreamSpan2
1824
+ };
1825
+ }
1826
+ })
1827
+ );
1828
+ return new StreamTextResult({
1829
+ stream: runToolsTransformation({
1830
+ tools,
1831
+ generatorStream: stream,
1832
+ tracer
1833
+ }),
1834
+ warnings,
1835
+ rawResponse,
1836
+ onFinish,
1837
+ rootSpan,
1838
+ doStreamSpan
1839
+ });
1840
+ }
1555
1841
  });
1556
1842
  }
1557
1843
  var StreamTextResult = class {
@@ -1559,7 +1845,9 @@ var StreamTextResult = class {
1559
1845
  stream,
1560
1846
  warnings,
1561
1847
  rawResponse,
1562
- onFinish
1848
+ onFinish,
1849
+ rootSpan,
1850
+ doStreamSpan
1563
1851
  }) {
1564
1852
  this.warnings = warnings;
1565
1853
  this.rawResponse = rawResponse;
@@ -1589,41 +1877,73 @@ var StreamTextResult = class {
1589
1877
  let text = "";
1590
1878
  const toolCalls = [];
1591
1879
  const toolResults = [];
1880
+ let firstChunk = true;
1592
1881
  const self = this;
1593
1882
  this.originalStream = stream.pipeThrough(
1594
1883
  new TransformStream({
1595
1884
  async transform(chunk, controller) {
1596
1885
  controller.enqueue(chunk);
1597
- if (chunk.type === "text-delta") {
1598
- text += chunk.textDelta;
1599
- }
1600
- if (chunk.type === "tool-call") {
1601
- toolCalls.push(chunk);
1886
+ if (firstChunk) {
1887
+ firstChunk = false;
1888
+ doStreamSpan.addEvent("ai.stream.firstChunk");
1602
1889
  }
1603
- if (chunk.type === "tool-result") {
1604
- toolResults.push(chunk);
1605
- }
1606
- if (chunk.type === "finish") {
1607
- usage = chunk.usage;
1608
- finishReason = chunk.finishReason;
1609
- resolveUsage(usage);
1610
- resolveFinishReason(finishReason);
1611
- resolveText(text);
1612
- resolveToolCalls(toolCalls);
1890
+ const chunkType = chunk.type;
1891
+ switch (chunkType) {
1892
+ case "text-delta":
1893
+ text += chunk.textDelta;
1894
+ break;
1895
+ case "tool-call":
1896
+ toolCalls.push(chunk);
1897
+ break;
1898
+ case "tool-result":
1899
+ toolResults.push(chunk);
1900
+ break;
1901
+ case "finish":
1902
+ usage = chunk.usage;
1903
+ finishReason = chunk.finishReason;
1904
+ resolveUsage(usage);
1905
+ resolveFinishReason(finishReason);
1906
+ resolveText(text);
1907
+ resolveToolCalls(toolCalls);
1908
+ break;
1909
+ case "error":
1910
+ break;
1911
+ default: {
1912
+ const exhaustiveCheck = chunkType;
1913
+ throw new Error(`Unknown chunk type: ${exhaustiveCheck}`);
1914
+ }
1613
1915
  }
1614
1916
  },
1615
1917
  // invoke onFinish callback and resolve toolResults promise when the stream is about to close:
1616
1918
  async flush(controller) {
1617
1919
  var _a;
1618
1920
  try {
1921
+ const finalUsage = usage != null ? usage : {
1922
+ promptTokens: NaN,
1923
+ completionTokens: NaN,
1924
+ totalTokens: NaN
1925
+ };
1926
+ const finalFinishReason = finishReason != null ? finishReason : "unknown";
1927
+ const telemetryToolCalls = toolCalls.length > 0 ? JSON.stringify(toolCalls) : void 0;
1928
+ doStreamSpan.setAttributes({
1929
+ "ai.finishReason": finalFinishReason,
1930
+ "ai.usage.promptTokens": finalUsage.promptTokens,
1931
+ "ai.usage.completionTokens": finalUsage.completionTokens,
1932
+ "ai.result.text": text,
1933
+ "ai.result.toolCalls": telemetryToolCalls
1934
+ });
1935
+ doStreamSpan.end();
1936
+ rootSpan.setAttributes({
1937
+ "ai.finishReason": finalFinishReason,
1938
+ "ai.usage.promptTokens": finalUsage.promptTokens,
1939
+ "ai.usage.completionTokens": finalUsage.completionTokens,
1940
+ "ai.result.text": text,
1941
+ "ai.result.toolCalls": telemetryToolCalls
1942
+ });
1619
1943
  resolveToolResults(toolResults);
1620
1944
  await ((_a = self.onFinish) == null ? void 0 : _a.call(self, {
1621
- finishReason: finishReason != null ? finishReason : "unknown",
1622
- usage: usage != null ? usage : {
1623
- promptTokens: NaN,
1624
- completionTokens: NaN,
1625
- totalTokens: NaN
1626
- },
1945
+ finishReason: finalFinishReason,
1946
+ usage: finalUsage,
1627
1947
  text,
1628
1948
  toolCalls,
1629
1949
  // The tool results are inferred as a never[] type, because they are
@@ -1636,6 +1956,8 @@ var StreamTextResult = class {
1636
1956
  }));
1637
1957
  } catch (error) {
1638
1958
  controller.error(error);
1959
+ } finally {
1960
+ rootSpan.end();
1639
1961
  }
1640
1962
  }
1641
1963
  })