@fallom/trace 0.1.11 → 0.1.12

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.d.mts CHANGED
@@ -214,12 +214,20 @@ declare function wrapGoogleAI<T extends {
214
214
  * }); // Automatically traced!
215
215
  * ```
216
216
  */
217
+ /** Options for wrapAISDK */
218
+ interface WrapAISDKOptions {
219
+ /**
220
+ * Enable debug logging to see the raw Vercel AI SDK response structure.
221
+ * Useful for debugging missing usage/token data.
222
+ */
223
+ debug?: boolean;
224
+ }
217
225
  declare function wrapAISDK<T extends {
218
226
  generateText: (...args: any[]) => Promise<any>;
219
227
  streamText: (...args: any[]) => any;
220
228
  generateObject?: (...args: any[]) => Promise<any>;
221
229
  streamObject?: (...args: any[]) => any;
222
- }>(ai: T): {
230
+ }>(ai: T, options?: WrapAISDKOptions): {
223
231
  generateText: T["generateText"];
224
232
  streamText: T["streamText"];
225
233
  generateObject: T["generateObject"];
package/dist/index.d.ts CHANGED
@@ -214,12 +214,20 @@ declare function wrapGoogleAI<T extends {
214
214
  * }); // Automatically traced!
215
215
  * ```
216
216
  */
217
+ /** Options for wrapAISDK */
218
+ interface WrapAISDKOptions {
219
+ /**
220
+ * Enable debug logging to see the raw Vercel AI SDK response structure.
221
+ * Useful for debugging missing usage/token data.
222
+ */
223
+ debug?: boolean;
224
+ }
217
225
  declare function wrapAISDK<T extends {
218
226
  generateText: (...args: any[]) => Promise<any>;
219
227
  streamText: (...args: any[]) => any;
220
228
  generateObject?: (...args: any[]) => Promise<any>;
221
229
  streamObject?: (...args: any[]) => any;
222
- }>(ai: T): {
230
+ }>(ai: T, options?: WrapAISDKOptions): {
223
231
  generateText: T["generateText"];
224
232
  streamText: T["streamText"];
225
233
  generateObject: T["generateObject"];
package/dist/index.js CHANGED
@@ -1194,7 +1194,13 @@ function wrapOpenAI(client) {
1194
1194
  response?.choices?.[0]?.message,
1195
1195
  response?.model || params?.model,
1196
1196
  response?.id
1197
- ) : void 0;
1197
+ ) : {};
1198
+ if (response?.usage) {
1199
+ attributes["fallom.raw.usage"] = JSON.stringify(response.usage);
1200
+ }
1201
+ if (response?.choices?.[0]?.finish_reason) {
1202
+ attributes["gen_ai.response.finish_reason"] = response.choices[0].finish_reason;
1203
+ }
1198
1204
  sendTrace({
1199
1205
  config_key: ctx.configKey,
1200
1206
  session_id: ctx.sessionId,
@@ -1212,7 +1218,7 @@ function wrapOpenAI(client) {
1212
1218
  prompt_tokens: response?.usage?.prompt_tokens,
1213
1219
  completion_tokens: response?.usage?.completion_tokens,
1214
1220
  total_tokens: response?.usage?.total_tokens,
1215
- attributes,
1221
+ attributes: Object.keys(attributes).length > 0 ? attributes : void 0,
1216
1222
  prompt_key: promptCtx?.promptKey,
1217
1223
  prompt_version: promptCtx?.promptVersion,
1218
1224
  prompt_ab_test_key: promptCtx?.abTestKey,
@@ -1285,10 +1291,16 @@ function wrapAnthropic(client) {
1285
1291
  { role: "assistant", content: response?.content?.[0]?.text || "" },
1286
1292
  response?.model || params?.model,
1287
1293
  response?.id
1288
- ) : void 0;
1289
- if (attributes && params?.system) {
1294
+ ) : {};
1295
+ if (params?.system) {
1290
1296
  attributes["gen_ai.system_prompt"] = params.system;
1291
1297
  }
1298
+ if (response?.usage) {
1299
+ attributes["fallom.raw.usage"] = JSON.stringify(response.usage);
1300
+ }
1301
+ if (response?.stop_reason) {
1302
+ attributes["gen_ai.response.finish_reason"] = response.stop_reason;
1303
+ }
1292
1304
  sendTrace({
1293
1305
  config_key: ctx.configKey,
1294
1306
  session_id: ctx.sessionId,
@@ -1306,7 +1318,7 @@ function wrapAnthropic(client) {
1306
1318
  prompt_tokens: response?.usage?.input_tokens,
1307
1319
  completion_tokens: response?.usage?.output_tokens,
1308
1320
  total_tokens: (response?.usage?.input_tokens || 0) + (response?.usage?.output_tokens || 0),
1309
- attributes,
1321
+ attributes: Object.keys(attributes).length > 0 ? attributes : void 0,
1310
1322
  prompt_key: promptCtx?.promptKey,
1311
1323
  prompt_version: promptCtx?.promptVersion,
1312
1324
  prompt_ab_test_key: promptCtx?.abTestKey,
@@ -1399,6 +1411,13 @@ function wrapGoogleAI(model) {
1399
1411
  attributes["gen_ai.completion.0.content"] = outputText;
1400
1412
  }
1401
1413
  }
1414
+ if (usage) {
1415
+ attributes["fallom.raw.usage"] = JSON.stringify(usage);
1416
+ }
1417
+ const candidate = result?.candidates?.[0];
1418
+ if (candidate?.finishReason) {
1419
+ attributes["gen_ai.response.finish_reason"] = candidate.finishReason;
1420
+ }
1402
1421
  sendTrace({
1403
1422
  config_key: ctx.configKey,
1404
1423
  session_id: ctx.sessionId,
@@ -1416,7 +1435,7 @@ function wrapGoogleAI(model) {
1416
1435
  prompt_tokens: usage?.promptTokenCount,
1417
1436
  completion_tokens: usage?.candidatesTokenCount,
1418
1437
  total_tokens: usage?.totalTokenCount,
1419
- attributes: captureContent ? attributes : void 0,
1438
+ attributes: Object.keys(attributes).length > 0 ? attributes : void 0,
1420
1439
  prompt_key: promptCtx?.promptKey,
1421
1440
  prompt_version: promptCtx?.promptVersion,
1422
1441
  prompt_ab_test_key: promptCtx?.abTestKey,
@@ -1464,8 +1483,37 @@ function wrapGoogleAI(model) {
1464
1483
  };
1465
1484
  return model;
1466
1485
  }
1467
- function wrapAISDK(ai) {
1486
+ var aiSdkDebug = false;
1487
+ function extractUsageFromResult(result, directUsage) {
1488
+ let usage = directUsage ?? result?.usage;
1489
+ const isValidNumber = (v) => v !== null && v !== void 0 && !Number.isNaN(v);
1490
+ let promptTokens = isValidNumber(usage?.promptTokens) ? usage.promptTokens : void 0;
1491
+ let completionTokens = isValidNumber(usage?.completionTokens) ? usage.completionTokens : void 0;
1492
+ let totalTokens = isValidNumber(usage?.totalTokens) ? usage.totalTokens : void 0;
1493
+ let cost;
1494
+ const orUsage = result?.experimental_providerMetadata?.openrouter?.usage;
1495
+ if (orUsage) {
1496
+ if (promptTokens === void 0 && isValidNumber(orUsage.promptTokens)) {
1497
+ promptTokens = orUsage.promptTokens;
1498
+ }
1499
+ if (completionTokens === void 0 && isValidNumber(orUsage.completionTokens)) {
1500
+ completionTokens = orUsage.completionTokens;
1501
+ }
1502
+ if (totalTokens === void 0 && isValidNumber(orUsage.totalTokens)) {
1503
+ totalTokens = orUsage.totalTokens;
1504
+ }
1505
+ if (isValidNumber(orUsage.cost)) {
1506
+ cost = orUsage.cost;
1507
+ }
1508
+ }
1509
+ if (totalTokens === void 0 && (promptTokens !== void 0 || completionTokens !== void 0)) {
1510
+ totalTokens = (promptTokens ?? 0) + (completionTokens ?? 0);
1511
+ }
1512
+ return { promptTokens, completionTokens, totalTokens, cost };
1513
+ }
1514
+ function wrapAISDK(ai, options) {
1468
1515
  const aiModule = ai;
1516
+ aiSdkDebug = options?.debug ?? false;
1469
1517
  return {
1470
1518
  generateText: createGenerateTextWrapper(aiModule),
1471
1519
  streamText: createStreamTextWrapper(aiModule),
@@ -1494,6 +1542,28 @@ function createGenerateTextWrapper(aiModule) {
1494
1542
  try {
1495
1543
  const result = await aiModule.generateText(...args);
1496
1544
  const endTime = Date.now();
1545
+ if (aiSdkDebug) {
1546
+ console.log(
1547
+ "\n\u{1F50D} [Fallom Debug] generateText result keys:",
1548
+ Object.keys(result || {})
1549
+ );
1550
+ console.log(
1551
+ "\u{1F50D} [Fallom Debug] result.usage:",
1552
+ JSON.stringify(result?.usage, null, 2)
1553
+ );
1554
+ console.log(
1555
+ "\u{1F50D} [Fallom Debug] result.response keys:",
1556
+ Object.keys(result?.response || {})
1557
+ );
1558
+ console.log(
1559
+ "\u{1F50D} [Fallom Debug] result.response.usage:",
1560
+ JSON.stringify(result?.response?.usage, null, 2)
1561
+ );
1562
+ console.log(
1563
+ "\u{1F50D} [Fallom Debug] result.experimental_providerMetadata:",
1564
+ JSON.stringify(result?.experimental_providerMetadata, null, 2)
1565
+ );
1566
+ }
1497
1567
  const modelId = result?.response?.modelId || params?.model?.modelId || String(params?.model || "unknown");
1498
1568
  const attributes = {};
1499
1569
  if (captureContent) {
@@ -1517,6 +1587,18 @@ function createGenerateTextWrapper(aiModule) {
1517
1587
  attributes["gen_ai.response.id"] = result.response.id;
1518
1588
  }
1519
1589
  }
1590
+ if (result?.usage) {
1591
+ attributes["fallom.raw.usage"] = JSON.stringify(result.usage);
1592
+ }
1593
+ if (result?.experimental_providerMetadata) {
1594
+ attributes["fallom.raw.providerMetadata"] = JSON.stringify(
1595
+ result.experimental_providerMetadata
1596
+ );
1597
+ }
1598
+ if (result?.finishReason) {
1599
+ attributes["gen_ai.response.finish_reason"] = result.finishReason;
1600
+ }
1601
+ const usage = extractUsageFromResult(result);
1520
1602
  sendTrace({
1521
1603
  config_key: ctx.configKey,
1522
1604
  session_id: ctx.sessionId,
@@ -1531,9 +1613,9 @@ function createGenerateTextWrapper(aiModule) {
1531
1613
  end_time: new Date(endTime).toISOString(),
1532
1614
  duration_ms: endTime - startTime,
1533
1615
  status: "OK",
1534
- prompt_tokens: result?.usage?.promptTokens,
1535
- completion_tokens: result?.usage?.completionTokens,
1536
- total_tokens: result?.usage?.totalTokens,
1616
+ prompt_tokens: usage.promptTokens,
1617
+ completion_tokens: usage.completionTokens,
1618
+ total_tokens: usage.totalTokens,
1537
1619
  attributes: captureContent ? attributes : void 0,
1538
1620
  prompt_key: promptCtx?.promptKey,
1539
1621
  prompt_version: promptCtx?.promptVersion,
@@ -1592,9 +1674,31 @@ function createStreamTextWrapper(aiModule) {
1592
1674
  } catch {
1593
1675
  }
1594
1676
  if (result?.usage) {
1595
- result.usage.then((usage) => {
1677
+ result.usage.then(async (rawUsage) => {
1596
1678
  const endTime = Date.now();
1597
- log2("\u{1F4CA} streamText usage:", JSON.stringify(usage, null, 2));
1679
+ if (aiSdkDebug) {
1680
+ console.log(
1681
+ "\n\u{1F50D} [Fallom Debug] streamText usage:",
1682
+ JSON.stringify(rawUsage, null, 2)
1683
+ );
1684
+ console.log(
1685
+ "\u{1F50D} [Fallom Debug] streamText result keys:",
1686
+ Object.keys(result || {})
1687
+ );
1688
+ }
1689
+ log2("\u{1F4CA} streamText usage:", JSON.stringify(rawUsage, null, 2));
1690
+ let providerMetadata = result?.experimental_providerMetadata;
1691
+ if (providerMetadata && typeof providerMetadata.then === "function") {
1692
+ try {
1693
+ providerMetadata = await providerMetadata;
1694
+ } catch {
1695
+ providerMetadata = void 0;
1696
+ }
1697
+ }
1698
+ const usage = extractUsageFromResult(
1699
+ { experimental_providerMetadata: providerMetadata },
1700
+ rawUsage
1701
+ );
1598
1702
  const attributes = {};
1599
1703
  if (captureContent) {
1600
1704
  attributes["gen_ai.request.model"] = modelId;
@@ -1606,6 +1710,12 @@ function createStreamTextWrapper(aiModule) {
1606
1710
  if (firstTokenTime) {
1607
1711
  attributes["gen_ai.time_to_first_token_ms"] = firstTokenTime - startTime;
1608
1712
  }
1713
+ if (rawUsage) {
1714
+ attributes["fallom.raw.usage"] = JSON.stringify(rawUsage);
1715
+ }
1716
+ if (providerMetadata) {
1717
+ attributes["fallom.raw.providerMetadata"] = JSON.stringify(providerMetadata);
1718
+ }
1609
1719
  const tracePayload = {
1610
1720
  config_key: ctx.configKey,
1611
1721
  session_id: ctx.sessionId,
@@ -1620,9 +1730,9 @@ function createStreamTextWrapper(aiModule) {
1620
1730
  end_time: new Date(endTime).toISOString(),
1621
1731
  duration_ms: endTime - startTime,
1622
1732
  status: "OK",
1623
- prompt_tokens: usage?.promptTokens,
1624
- completion_tokens: usage?.completionTokens,
1625
- total_tokens: usage?.totalTokens,
1733
+ prompt_tokens: usage.promptTokens,
1734
+ completion_tokens: usage.completionTokens,
1735
+ total_tokens: usage.totalTokens,
1626
1736
  time_to_first_token_ms: firstTokenTime ? firstTokenTime - startTime : void 0,
1627
1737
  attributes: captureContent ? attributes : void 0,
1628
1738
  prompt_key: promptCtx?.promptKey,
@@ -1702,6 +1812,24 @@ function createGenerateObjectWrapper(aiModule) {
1702
1812
  try {
1703
1813
  const result = await aiModule.generateObject(...args);
1704
1814
  const endTime = Date.now();
1815
+ if (aiSdkDebug) {
1816
+ console.log(
1817
+ "\n\u{1F50D} [Fallom Debug] generateObject result keys:",
1818
+ Object.keys(result || {})
1819
+ );
1820
+ console.log(
1821
+ "\u{1F50D} [Fallom Debug] result.usage:",
1822
+ JSON.stringify(result?.usage, null, 2)
1823
+ );
1824
+ console.log(
1825
+ "\u{1F50D} [Fallom Debug] result.response keys:",
1826
+ Object.keys(result?.response || {})
1827
+ );
1828
+ console.log(
1829
+ "\u{1F50D} [Fallom Debug] result.response.usage:",
1830
+ JSON.stringify(result?.response?.usage, null, 2)
1831
+ );
1832
+ }
1705
1833
  const modelId = result?.response?.modelId || params?.model?.modelId || String(params?.model || "unknown");
1706
1834
  const attributes = {};
1707
1835
  if (captureContent) {
@@ -1714,6 +1842,18 @@ function createGenerateObjectWrapper(aiModule) {
1714
1842
  );
1715
1843
  }
1716
1844
  }
1845
+ if (result?.usage) {
1846
+ attributes["fallom.raw.usage"] = JSON.stringify(result.usage);
1847
+ }
1848
+ if (result?.experimental_providerMetadata) {
1849
+ attributes["fallom.raw.providerMetadata"] = JSON.stringify(
1850
+ result.experimental_providerMetadata
1851
+ );
1852
+ }
1853
+ if (result?.finishReason) {
1854
+ attributes["gen_ai.response.finish_reason"] = result.finishReason;
1855
+ }
1856
+ const usage = extractUsageFromResult(result);
1717
1857
  sendTrace({
1718
1858
  config_key: ctx.configKey,
1719
1859
  session_id: ctx.sessionId,
@@ -1728,9 +1868,9 @@ function createGenerateObjectWrapper(aiModule) {
1728
1868
  end_time: new Date(endTime).toISOString(),
1729
1869
  duration_ms: endTime - startTime,
1730
1870
  status: "OK",
1731
- prompt_tokens: result?.usage?.promptTokens,
1732
- completion_tokens: result?.usage?.completionTokens,
1733
- total_tokens: result?.usage?.totalTokens,
1871
+ prompt_tokens: usage.promptTokens,
1872
+ completion_tokens: usage.completionTokens,
1873
+ total_tokens: usage.totalTokens,
1734
1874
  attributes: captureContent ? attributes : void 0,
1735
1875
  prompt_key: promptCtx?.promptKey,
1736
1876
  prompt_version: promptCtx?.promptVersion,
@@ -1790,9 +1930,31 @@ function createStreamObjectWrapper(aiModule) {
1790
1930
  } catch {
1791
1931
  }
1792
1932
  if (result?.usage) {
1793
- result.usage.then((usage) => {
1933
+ result.usage.then(async (rawUsage) => {
1794
1934
  const endTime = Date.now();
1795
- log2("\u{1F4CA} streamObject usage:", JSON.stringify(usage, null, 2));
1935
+ if (aiSdkDebug) {
1936
+ console.log(
1937
+ "\n\u{1F50D} [Fallom Debug] streamObject usage:",
1938
+ JSON.stringify(rawUsage, null, 2)
1939
+ );
1940
+ console.log(
1941
+ "\u{1F50D} [Fallom Debug] streamObject result keys:",
1942
+ Object.keys(result || {})
1943
+ );
1944
+ }
1945
+ log2("\u{1F4CA} streamObject usage:", JSON.stringify(rawUsage, null, 2));
1946
+ let providerMetadata = result?.experimental_providerMetadata;
1947
+ if (providerMetadata && typeof providerMetadata.then === "function") {
1948
+ try {
1949
+ providerMetadata = await providerMetadata;
1950
+ } catch {
1951
+ providerMetadata = void 0;
1952
+ }
1953
+ }
1954
+ const usage = extractUsageFromResult(
1955
+ { experimental_providerMetadata: providerMetadata },
1956
+ rawUsage
1957
+ );
1796
1958
  const attributes = {};
1797
1959
  if (captureContent) {
1798
1960
  attributes["gen_ai.request.model"] = modelId;
@@ -1800,6 +1962,12 @@ function createStreamObjectWrapper(aiModule) {
1800
1962
  if (firstTokenTime) {
1801
1963
  attributes["gen_ai.time_to_first_token_ms"] = firstTokenTime - startTime;
1802
1964
  }
1965
+ if (rawUsage) {
1966
+ attributes["fallom.raw.usage"] = JSON.stringify(rawUsage);
1967
+ }
1968
+ if (providerMetadata) {
1969
+ attributes["fallom.raw.providerMetadata"] = JSON.stringify(providerMetadata);
1970
+ }
1803
1971
  sendTrace({
1804
1972
  config_key: ctx.configKey,
1805
1973
  session_id: ctx.sessionId,
@@ -1814,9 +1982,9 @@ function createStreamObjectWrapper(aiModule) {
1814
1982
  end_time: new Date(endTime).toISOString(),
1815
1983
  duration_ms: endTime - startTime,
1816
1984
  status: "OK",
1817
- prompt_tokens: usage?.promptTokens,
1818
- completion_tokens: usage?.completionTokens,
1819
- total_tokens: usage?.totalTokens,
1985
+ prompt_tokens: usage.promptTokens,
1986
+ completion_tokens: usage.completionTokens,
1987
+ total_tokens: usage.totalTokens,
1820
1988
  attributes: captureContent ? attributes : void 0,
1821
1989
  prompt_key: promptCtx?.promptKey,
1822
1990
  prompt_version: promptCtx?.promptVersion,
package/dist/index.mjs CHANGED
@@ -917,7 +917,13 @@ function wrapOpenAI(client) {
917
917
  response?.choices?.[0]?.message,
918
918
  response?.model || params?.model,
919
919
  response?.id
920
- ) : void 0;
920
+ ) : {};
921
+ if (response?.usage) {
922
+ attributes["fallom.raw.usage"] = JSON.stringify(response.usage);
923
+ }
924
+ if (response?.choices?.[0]?.finish_reason) {
925
+ attributes["gen_ai.response.finish_reason"] = response.choices[0].finish_reason;
926
+ }
921
927
  sendTrace({
922
928
  config_key: ctx.configKey,
923
929
  session_id: ctx.sessionId,
@@ -935,7 +941,7 @@ function wrapOpenAI(client) {
935
941
  prompt_tokens: response?.usage?.prompt_tokens,
936
942
  completion_tokens: response?.usage?.completion_tokens,
937
943
  total_tokens: response?.usage?.total_tokens,
938
- attributes,
944
+ attributes: Object.keys(attributes).length > 0 ? attributes : void 0,
939
945
  prompt_key: promptCtx?.promptKey,
940
946
  prompt_version: promptCtx?.promptVersion,
941
947
  prompt_ab_test_key: promptCtx?.abTestKey,
@@ -1008,10 +1014,16 @@ function wrapAnthropic(client) {
1008
1014
  { role: "assistant", content: response?.content?.[0]?.text || "" },
1009
1015
  response?.model || params?.model,
1010
1016
  response?.id
1011
- ) : void 0;
1012
- if (attributes && params?.system) {
1017
+ ) : {};
1018
+ if (params?.system) {
1013
1019
  attributes["gen_ai.system_prompt"] = params.system;
1014
1020
  }
1021
+ if (response?.usage) {
1022
+ attributes["fallom.raw.usage"] = JSON.stringify(response.usage);
1023
+ }
1024
+ if (response?.stop_reason) {
1025
+ attributes["gen_ai.response.finish_reason"] = response.stop_reason;
1026
+ }
1015
1027
  sendTrace({
1016
1028
  config_key: ctx.configKey,
1017
1029
  session_id: ctx.sessionId,
@@ -1029,7 +1041,7 @@ function wrapAnthropic(client) {
1029
1041
  prompt_tokens: response?.usage?.input_tokens,
1030
1042
  completion_tokens: response?.usage?.output_tokens,
1031
1043
  total_tokens: (response?.usage?.input_tokens || 0) + (response?.usage?.output_tokens || 0),
1032
- attributes,
1044
+ attributes: Object.keys(attributes).length > 0 ? attributes : void 0,
1033
1045
  prompt_key: promptCtx?.promptKey,
1034
1046
  prompt_version: promptCtx?.promptVersion,
1035
1047
  prompt_ab_test_key: promptCtx?.abTestKey,
@@ -1122,6 +1134,13 @@ function wrapGoogleAI(model) {
1122
1134
  attributes["gen_ai.completion.0.content"] = outputText;
1123
1135
  }
1124
1136
  }
1137
+ if (usage) {
1138
+ attributes["fallom.raw.usage"] = JSON.stringify(usage);
1139
+ }
1140
+ const candidate = result?.candidates?.[0];
1141
+ if (candidate?.finishReason) {
1142
+ attributes["gen_ai.response.finish_reason"] = candidate.finishReason;
1143
+ }
1125
1144
  sendTrace({
1126
1145
  config_key: ctx.configKey,
1127
1146
  session_id: ctx.sessionId,
@@ -1139,7 +1158,7 @@ function wrapGoogleAI(model) {
1139
1158
  prompt_tokens: usage?.promptTokenCount,
1140
1159
  completion_tokens: usage?.candidatesTokenCount,
1141
1160
  total_tokens: usage?.totalTokenCount,
1142
- attributes: captureContent ? attributes : void 0,
1161
+ attributes: Object.keys(attributes).length > 0 ? attributes : void 0,
1143
1162
  prompt_key: promptCtx?.promptKey,
1144
1163
  prompt_version: promptCtx?.promptVersion,
1145
1164
  prompt_ab_test_key: promptCtx?.abTestKey,
@@ -1187,8 +1206,37 @@ function wrapGoogleAI(model) {
1187
1206
  };
1188
1207
  return model;
1189
1208
  }
1190
- function wrapAISDK(ai) {
1209
+ var aiSdkDebug = false;
1210
+ function extractUsageFromResult(result, directUsage) {
1211
+ let usage = directUsage ?? result?.usage;
1212
+ const isValidNumber = (v) => v !== null && v !== void 0 && !Number.isNaN(v);
1213
+ let promptTokens = isValidNumber(usage?.promptTokens) ? usage.promptTokens : void 0;
1214
+ let completionTokens = isValidNumber(usage?.completionTokens) ? usage.completionTokens : void 0;
1215
+ let totalTokens = isValidNumber(usage?.totalTokens) ? usage.totalTokens : void 0;
1216
+ let cost;
1217
+ const orUsage = result?.experimental_providerMetadata?.openrouter?.usage;
1218
+ if (orUsage) {
1219
+ if (promptTokens === void 0 && isValidNumber(orUsage.promptTokens)) {
1220
+ promptTokens = orUsage.promptTokens;
1221
+ }
1222
+ if (completionTokens === void 0 && isValidNumber(orUsage.completionTokens)) {
1223
+ completionTokens = orUsage.completionTokens;
1224
+ }
1225
+ if (totalTokens === void 0 && isValidNumber(orUsage.totalTokens)) {
1226
+ totalTokens = orUsage.totalTokens;
1227
+ }
1228
+ if (isValidNumber(orUsage.cost)) {
1229
+ cost = orUsage.cost;
1230
+ }
1231
+ }
1232
+ if (totalTokens === void 0 && (promptTokens !== void 0 || completionTokens !== void 0)) {
1233
+ totalTokens = (promptTokens ?? 0) + (completionTokens ?? 0);
1234
+ }
1235
+ return { promptTokens, completionTokens, totalTokens, cost };
1236
+ }
1237
+ function wrapAISDK(ai, options) {
1191
1238
  const aiModule = ai;
1239
+ aiSdkDebug = options?.debug ?? false;
1192
1240
  return {
1193
1241
  generateText: createGenerateTextWrapper(aiModule),
1194
1242
  streamText: createStreamTextWrapper(aiModule),
@@ -1217,6 +1265,28 @@ function createGenerateTextWrapper(aiModule) {
1217
1265
  try {
1218
1266
  const result = await aiModule.generateText(...args);
1219
1267
  const endTime = Date.now();
1268
+ if (aiSdkDebug) {
1269
+ console.log(
1270
+ "\n\u{1F50D} [Fallom Debug] generateText result keys:",
1271
+ Object.keys(result || {})
1272
+ );
1273
+ console.log(
1274
+ "\u{1F50D} [Fallom Debug] result.usage:",
1275
+ JSON.stringify(result?.usage, null, 2)
1276
+ );
1277
+ console.log(
1278
+ "\u{1F50D} [Fallom Debug] result.response keys:",
1279
+ Object.keys(result?.response || {})
1280
+ );
1281
+ console.log(
1282
+ "\u{1F50D} [Fallom Debug] result.response.usage:",
1283
+ JSON.stringify(result?.response?.usage, null, 2)
1284
+ );
1285
+ console.log(
1286
+ "\u{1F50D} [Fallom Debug] result.experimental_providerMetadata:",
1287
+ JSON.stringify(result?.experimental_providerMetadata, null, 2)
1288
+ );
1289
+ }
1220
1290
  const modelId = result?.response?.modelId || params?.model?.modelId || String(params?.model || "unknown");
1221
1291
  const attributes = {};
1222
1292
  if (captureContent) {
@@ -1240,6 +1310,18 @@ function createGenerateTextWrapper(aiModule) {
1240
1310
  attributes["gen_ai.response.id"] = result.response.id;
1241
1311
  }
1242
1312
  }
1313
+ if (result?.usage) {
1314
+ attributes["fallom.raw.usage"] = JSON.stringify(result.usage);
1315
+ }
1316
+ if (result?.experimental_providerMetadata) {
1317
+ attributes["fallom.raw.providerMetadata"] = JSON.stringify(
1318
+ result.experimental_providerMetadata
1319
+ );
1320
+ }
1321
+ if (result?.finishReason) {
1322
+ attributes["gen_ai.response.finish_reason"] = result.finishReason;
1323
+ }
1324
+ const usage = extractUsageFromResult(result);
1243
1325
  sendTrace({
1244
1326
  config_key: ctx.configKey,
1245
1327
  session_id: ctx.sessionId,
@@ -1254,9 +1336,9 @@ function createGenerateTextWrapper(aiModule) {
1254
1336
  end_time: new Date(endTime).toISOString(),
1255
1337
  duration_ms: endTime - startTime,
1256
1338
  status: "OK",
1257
- prompt_tokens: result?.usage?.promptTokens,
1258
- completion_tokens: result?.usage?.completionTokens,
1259
- total_tokens: result?.usage?.totalTokens,
1339
+ prompt_tokens: usage.promptTokens,
1340
+ completion_tokens: usage.completionTokens,
1341
+ total_tokens: usage.totalTokens,
1260
1342
  attributes: captureContent ? attributes : void 0,
1261
1343
  prompt_key: promptCtx?.promptKey,
1262
1344
  prompt_version: promptCtx?.promptVersion,
@@ -1315,9 +1397,31 @@ function createStreamTextWrapper(aiModule) {
1315
1397
  } catch {
1316
1398
  }
1317
1399
  if (result?.usage) {
1318
- result.usage.then((usage) => {
1400
+ result.usage.then(async (rawUsage) => {
1319
1401
  const endTime = Date.now();
1320
- log("\u{1F4CA} streamText usage:", JSON.stringify(usage, null, 2));
1402
+ if (aiSdkDebug) {
1403
+ console.log(
1404
+ "\n\u{1F50D} [Fallom Debug] streamText usage:",
1405
+ JSON.stringify(rawUsage, null, 2)
1406
+ );
1407
+ console.log(
1408
+ "\u{1F50D} [Fallom Debug] streamText result keys:",
1409
+ Object.keys(result || {})
1410
+ );
1411
+ }
1412
+ log("\u{1F4CA} streamText usage:", JSON.stringify(rawUsage, null, 2));
1413
+ let providerMetadata = result?.experimental_providerMetadata;
1414
+ if (providerMetadata && typeof providerMetadata.then === "function") {
1415
+ try {
1416
+ providerMetadata = await providerMetadata;
1417
+ } catch {
1418
+ providerMetadata = void 0;
1419
+ }
1420
+ }
1421
+ const usage = extractUsageFromResult(
1422
+ { experimental_providerMetadata: providerMetadata },
1423
+ rawUsage
1424
+ );
1321
1425
  const attributes = {};
1322
1426
  if (captureContent) {
1323
1427
  attributes["gen_ai.request.model"] = modelId;
@@ -1329,6 +1433,12 @@ function createStreamTextWrapper(aiModule) {
1329
1433
  if (firstTokenTime) {
1330
1434
  attributes["gen_ai.time_to_first_token_ms"] = firstTokenTime - startTime;
1331
1435
  }
1436
+ if (rawUsage) {
1437
+ attributes["fallom.raw.usage"] = JSON.stringify(rawUsage);
1438
+ }
1439
+ if (providerMetadata) {
1440
+ attributes["fallom.raw.providerMetadata"] = JSON.stringify(providerMetadata);
1441
+ }
1332
1442
  const tracePayload = {
1333
1443
  config_key: ctx.configKey,
1334
1444
  session_id: ctx.sessionId,
@@ -1343,9 +1453,9 @@ function createStreamTextWrapper(aiModule) {
1343
1453
  end_time: new Date(endTime).toISOString(),
1344
1454
  duration_ms: endTime - startTime,
1345
1455
  status: "OK",
1346
- prompt_tokens: usage?.promptTokens,
1347
- completion_tokens: usage?.completionTokens,
1348
- total_tokens: usage?.totalTokens,
1456
+ prompt_tokens: usage.promptTokens,
1457
+ completion_tokens: usage.completionTokens,
1458
+ total_tokens: usage.totalTokens,
1349
1459
  time_to_first_token_ms: firstTokenTime ? firstTokenTime - startTime : void 0,
1350
1460
  attributes: captureContent ? attributes : void 0,
1351
1461
  prompt_key: promptCtx?.promptKey,
@@ -1425,6 +1535,24 @@ function createGenerateObjectWrapper(aiModule) {
1425
1535
  try {
1426
1536
  const result = await aiModule.generateObject(...args);
1427
1537
  const endTime = Date.now();
1538
+ if (aiSdkDebug) {
1539
+ console.log(
1540
+ "\n\u{1F50D} [Fallom Debug] generateObject result keys:",
1541
+ Object.keys(result || {})
1542
+ );
1543
+ console.log(
1544
+ "\u{1F50D} [Fallom Debug] result.usage:",
1545
+ JSON.stringify(result?.usage, null, 2)
1546
+ );
1547
+ console.log(
1548
+ "\u{1F50D} [Fallom Debug] result.response keys:",
1549
+ Object.keys(result?.response || {})
1550
+ );
1551
+ console.log(
1552
+ "\u{1F50D} [Fallom Debug] result.response.usage:",
1553
+ JSON.stringify(result?.response?.usage, null, 2)
1554
+ );
1555
+ }
1428
1556
  const modelId = result?.response?.modelId || params?.model?.modelId || String(params?.model || "unknown");
1429
1557
  const attributes = {};
1430
1558
  if (captureContent) {
@@ -1437,6 +1565,18 @@ function createGenerateObjectWrapper(aiModule) {
1437
1565
  );
1438
1566
  }
1439
1567
  }
1568
+ if (result?.usage) {
1569
+ attributes["fallom.raw.usage"] = JSON.stringify(result.usage);
1570
+ }
1571
+ if (result?.experimental_providerMetadata) {
1572
+ attributes["fallom.raw.providerMetadata"] = JSON.stringify(
1573
+ result.experimental_providerMetadata
1574
+ );
1575
+ }
1576
+ if (result?.finishReason) {
1577
+ attributes["gen_ai.response.finish_reason"] = result.finishReason;
1578
+ }
1579
+ const usage = extractUsageFromResult(result);
1440
1580
  sendTrace({
1441
1581
  config_key: ctx.configKey,
1442
1582
  session_id: ctx.sessionId,
@@ -1451,9 +1591,9 @@ function createGenerateObjectWrapper(aiModule) {
1451
1591
  end_time: new Date(endTime).toISOString(),
1452
1592
  duration_ms: endTime - startTime,
1453
1593
  status: "OK",
1454
- prompt_tokens: result?.usage?.promptTokens,
1455
- completion_tokens: result?.usage?.completionTokens,
1456
- total_tokens: result?.usage?.totalTokens,
1594
+ prompt_tokens: usage.promptTokens,
1595
+ completion_tokens: usage.completionTokens,
1596
+ total_tokens: usage.totalTokens,
1457
1597
  attributes: captureContent ? attributes : void 0,
1458
1598
  prompt_key: promptCtx?.promptKey,
1459
1599
  prompt_version: promptCtx?.promptVersion,
@@ -1513,9 +1653,31 @@ function createStreamObjectWrapper(aiModule) {
1513
1653
  } catch {
1514
1654
  }
1515
1655
  if (result?.usage) {
1516
- result.usage.then((usage) => {
1656
+ result.usage.then(async (rawUsage) => {
1517
1657
  const endTime = Date.now();
1518
- log("\u{1F4CA} streamObject usage:", JSON.stringify(usage, null, 2));
1658
+ if (aiSdkDebug) {
1659
+ console.log(
1660
+ "\n\u{1F50D} [Fallom Debug] streamObject usage:",
1661
+ JSON.stringify(rawUsage, null, 2)
1662
+ );
1663
+ console.log(
1664
+ "\u{1F50D} [Fallom Debug] streamObject result keys:",
1665
+ Object.keys(result || {})
1666
+ );
1667
+ }
1668
+ log("\u{1F4CA} streamObject usage:", JSON.stringify(rawUsage, null, 2));
1669
+ let providerMetadata = result?.experimental_providerMetadata;
1670
+ if (providerMetadata && typeof providerMetadata.then === "function") {
1671
+ try {
1672
+ providerMetadata = await providerMetadata;
1673
+ } catch {
1674
+ providerMetadata = void 0;
1675
+ }
1676
+ }
1677
+ const usage = extractUsageFromResult(
1678
+ { experimental_providerMetadata: providerMetadata },
1679
+ rawUsage
1680
+ );
1519
1681
  const attributes = {};
1520
1682
  if (captureContent) {
1521
1683
  attributes["gen_ai.request.model"] = modelId;
@@ -1523,6 +1685,12 @@ function createStreamObjectWrapper(aiModule) {
1523
1685
  if (firstTokenTime) {
1524
1686
  attributes["gen_ai.time_to_first_token_ms"] = firstTokenTime - startTime;
1525
1687
  }
1688
+ if (rawUsage) {
1689
+ attributes["fallom.raw.usage"] = JSON.stringify(rawUsage);
1690
+ }
1691
+ if (providerMetadata) {
1692
+ attributes["fallom.raw.providerMetadata"] = JSON.stringify(providerMetadata);
1693
+ }
1526
1694
  sendTrace({
1527
1695
  config_key: ctx.configKey,
1528
1696
  session_id: ctx.sessionId,
@@ -1537,9 +1705,9 @@ function createStreamObjectWrapper(aiModule) {
1537
1705
  end_time: new Date(endTime).toISOString(),
1538
1706
  duration_ms: endTime - startTime,
1539
1707
  status: "OK",
1540
- prompt_tokens: usage?.promptTokens,
1541
- completion_tokens: usage?.completionTokens,
1542
- total_tokens: usage?.totalTokens,
1708
+ prompt_tokens: usage.promptTokens,
1709
+ completion_tokens: usage.completionTokens,
1710
+ total_tokens: usage.totalTokens,
1543
1711
  attributes: captureContent ? attributes : void 0,
1544
1712
  prompt_key: promptCtx?.promptKey,
1545
1713
  prompt_version: promptCtx?.promptVersion,
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@fallom/trace",
3
- "version": "0.1.11",
3
+ "version": "0.1.12",
4
4
  "description": "Model A/B testing and tracing for LLM applications. Zero latency, production-ready.",
5
5
  "main": "./dist/index.js",
6
6
  "module": "./dist/index.mjs",