@fallom/trace 0.1.10 → 0.1.12

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.mjs CHANGED
@@ -17,6 +17,7 @@ __export(trace_exports, {
17
17
  wrapAISDK: () => wrapAISDK,
18
18
  wrapAnthropic: () => wrapAnthropic,
19
19
  wrapGoogleAI: () => wrapGoogleAI,
20
+ wrapMastraAgent: () => wrapMastraAgent,
20
21
  wrapOpenAI: () => wrapOpenAI
21
22
  });
22
23
  import { AsyncLocalStorage } from "async_hooks";
@@ -26,7 +27,7 @@ import { OTLPTraceExporter } from "@opentelemetry/exporter-trace-otlp-http";
26
27
  // node_modules/@opentelemetry/resources/build/esm/Resource.js
27
28
  import { diag } from "@opentelemetry/api";
28
29
 
29
- // node_modules/@opentelemetry/semantic-conventions/build/esm/resource/SemanticResourceAttributes.js
30
+ // node_modules/@opentelemetry/resources/node_modules/@opentelemetry/semantic-conventions/build/esm/resource/SemanticResourceAttributes.js
30
31
  var SemanticResourceAttributes = {
31
32
  /**
32
33
  * Name of the cloud provider.
@@ -406,35 +407,9 @@ var SemanticResourceAttributes = {
406
407
  */
407
408
  WEBENGINE_DESCRIPTION: "webengine.description"
408
409
  };
409
- var TelemetrySdkLanguageValues = {
410
- /** cpp. */
411
- CPP: "cpp",
412
- /** dotnet. */
413
- DOTNET: "dotnet",
414
- /** erlang. */
415
- ERLANG: "erlang",
416
- /** go. */
417
- GO: "go",
418
- /** java. */
419
- JAVA: "java",
420
- /** nodejs. */
421
- NODEJS: "nodejs",
422
- /** php. */
423
- PHP: "php",
424
- /** python. */
425
- PYTHON: "python",
426
- /** ruby. */
427
- RUBY: "ruby",
428
- /** webjs. */
429
- WEBJS: "webjs"
430
- };
431
-
432
- // node_modules/@opentelemetry/core/build/esm/version.js
433
- var VERSION = "1.19.0";
434
410
 
435
- // node_modules/@opentelemetry/core/build/esm/platform/node/sdk-info.js
436
- var _a;
437
- var SDK_INFO = (_a = {}, _a[SemanticResourceAttributes.TELEMETRY_SDK_NAME] = "opentelemetry", _a[SemanticResourceAttributes.PROCESS_RUNTIME_NAME] = "node", _a[SemanticResourceAttributes.TELEMETRY_SDK_LANGUAGE] = TelemetrySdkLanguageValues.NODEJS, _a[SemanticResourceAttributes.TELEMETRY_SDK_VERSION] = VERSION, _a);
411
+ // node_modules/@opentelemetry/resources/build/esm/Resource.js
412
+ import { SDK_INFO } from "@opentelemetry/core";
438
413
 
439
414
  // node_modules/@opentelemetry/resources/build/esm/platform/node/default-service-name.js
440
415
  function defaultServiceName() {
@@ -571,10 +546,10 @@ var Resource = (
571
546
  (function() {
572
547
  function Resource2(attributes, asyncAttributesPromise) {
573
548
  var _this = this;
574
- var _a2;
549
+ var _a;
575
550
  this._attributes = attributes;
576
551
  this.asyncAttributesPending = asyncAttributesPromise != null;
577
- this._syncAttributes = (_a2 = this._attributes) !== null && _a2 !== void 0 ? _a2 : {};
552
+ this._syncAttributes = (_a = this._attributes) !== null && _a !== void 0 ? _a : {};
578
553
  this._asyncAttributesPromise = asyncAttributesPromise === null || asyncAttributesPromise === void 0 ? void 0 : asyncAttributesPromise.then(function(asyncAttributes) {
579
554
  _this._attributes = Object.assign({}, _this._attributes, asyncAttributes);
580
555
  _this.asyncAttributesPending = false;
@@ -589,30 +564,30 @@ var Resource = (
589
564
  return Resource2.EMPTY;
590
565
  };
591
566
  Resource2.default = function() {
592
- var _a2;
593
- return new Resource2((_a2 = {}, _a2[SemanticResourceAttributes.SERVICE_NAME] = defaultServiceName(), _a2[SemanticResourceAttributes.TELEMETRY_SDK_LANGUAGE] = SDK_INFO[SemanticResourceAttributes.TELEMETRY_SDK_LANGUAGE], _a2[SemanticResourceAttributes.TELEMETRY_SDK_NAME] = SDK_INFO[SemanticResourceAttributes.TELEMETRY_SDK_NAME], _a2[SemanticResourceAttributes.TELEMETRY_SDK_VERSION] = SDK_INFO[SemanticResourceAttributes.TELEMETRY_SDK_VERSION], _a2));
567
+ var _a;
568
+ return new Resource2((_a = {}, _a[SemanticResourceAttributes.SERVICE_NAME] = defaultServiceName(), _a[SemanticResourceAttributes.TELEMETRY_SDK_LANGUAGE] = SDK_INFO[SemanticResourceAttributes.TELEMETRY_SDK_LANGUAGE], _a[SemanticResourceAttributes.TELEMETRY_SDK_NAME] = SDK_INFO[SemanticResourceAttributes.TELEMETRY_SDK_NAME], _a[SemanticResourceAttributes.TELEMETRY_SDK_VERSION] = SDK_INFO[SemanticResourceAttributes.TELEMETRY_SDK_VERSION], _a));
594
569
  };
595
570
  Object.defineProperty(Resource2.prototype, "attributes", {
596
571
  get: function() {
597
- var _a2;
572
+ var _a;
598
573
  if (this.asyncAttributesPending) {
599
574
  diag.error("Accessing resource attributes before async attributes settled");
600
575
  }
601
- return (_a2 = this._attributes) !== null && _a2 !== void 0 ? _a2 : {};
576
+ return (_a = this._attributes) !== null && _a !== void 0 ? _a : {};
602
577
  },
603
578
  enumerable: false,
604
579
  configurable: true
605
580
  });
606
581
  Resource2.prototype.waitForAsyncAttributes = function() {
607
582
  return __awaiter(this, void 0, void 0, function() {
608
- return __generator(this, function(_a2) {
609
- switch (_a2.label) {
583
+ return __generator(this, function(_a) {
584
+ switch (_a.label) {
610
585
  case 0:
611
586
  if (!this.asyncAttributesPending) return [3, 2];
612
587
  return [4, this._asyncAttributesPromise];
613
588
  case 1:
614
- _a2.sent();
615
- _a2.label = 2;
589
+ _a.sent();
590
+ _a.label = 2;
616
591
  case 2:
617
592
  return [
618
593
  2
@@ -624,19 +599,19 @@ var Resource = (
624
599
  };
625
600
  Resource2.prototype.merge = function(other) {
626
601
  var _this = this;
627
- var _a2;
602
+ var _a;
628
603
  if (!other)
629
604
  return this;
630
- var mergedSyncAttributes = __assign(__assign({}, this._syncAttributes), (_a2 = other._syncAttributes) !== null && _a2 !== void 0 ? _a2 : other.attributes);
605
+ var mergedSyncAttributes = __assign(__assign({}, this._syncAttributes), (_a = other._syncAttributes) !== null && _a !== void 0 ? _a : other.attributes);
631
606
  if (!this._asyncAttributesPromise && !other._asyncAttributesPromise) {
632
607
  return new Resource2(mergedSyncAttributes);
633
608
  }
634
609
  var mergedAttributesPromise = Promise.all([
635
610
  this._asyncAttributesPromise,
636
611
  other._asyncAttributesPromise
637
- ]).then(function(_a3) {
612
+ ]).then(function(_a2) {
638
613
  var _b;
639
- var _c = __read(_a3, 2), thisAsyncAttributes = _c[0], otherAsyncAttributes = _c[1];
614
+ var _c = __read(_a2, 2), thisAsyncAttributes = _c[0], otherAsyncAttributes = _c[1];
640
615
  return __assign(__assign(__assign(__assign({}, _this._syncAttributes), thisAsyncAttributes), (_b = other._syncAttributes) !== null && _b !== void 0 ? _b : other.attributes), otherAsyncAttributes);
641
616
  });
642
617
  return new Resource2(mergedSyncAttributes, mergedAttributesPromise);
@@ -942,7 +917,13 @@ function wrapOpenAI(client) {
942
917
  response?.choices?.[0]?.message,
943
918
  response?.model || params?.model,
944
919
  response?.id
945
- ) : void 0;
920
+ ) : {};
921
+ if (response?.usage) {
922
+ attributes["fallom.raw.usage"] = JSON.stringify(response.usage);
923
+ }
924
+ if (response?.choices?.[0]?.finish_reason) {
925
+ attributes["gen_ai.response.finish_reason"] = response.choices[0].finish_reason;
926
+ }
946
927
  sendTrace({
947
928
  config_key: ctx.configKey,
948
929
  session_id: ctx.sessionId,
@@ -960,7 +941,7 @@ function wrapOpenAI(client) {
960
941
  prompt_tokens: response?.usage?.prompt_tokens,
961
942
  completion_tokens: response?.usage?.completion_tokens,
962
943
  total_tokens: response?.usage?.total_tokens,
963
- attributes,
944
+ attributes: Object.keys(attributes).length > 0 ? attributes : void 0,
964
945
  prompt_key: promptCtx?.promptKey,
965
946
  prompt_version: promptCtx?.promptVersion,
966
947
  prompt_ab_test_key: promptCtx?.abTestKey,
@@ -1033,10 +1014,16 @@ function wrapAnthropic(client) {
1033
1014
  { role: "assistant", content: response?.content?.[0]?.text || "" },
1034
1015
  response?.model || params?.model,
1035
1016
  response?.id
1036
- ) : void 0;
1037
- if (attributes && params?.system) {
1017
+ ) : {};
1018
+ if (params?.system) {
1038
1019
  attributes["gen_ai.system_prompt"] = params.system;
1039
1020
  }
1021
+ if (response?.usage) {
1022
+ attributes["fallom.raw.usage"] = JSON.stringify(response.usage);
1023
+ }
1024
+ if (response?.stop_reason) {
1025
+ attributes["gen_ai.response.finish_reason"] = response.stop_reason;
1026
+ }
1040
1027
  sendTrace({
1041
1028
  config_key: ctx.configKey,
1042
1029
  session_id: ctx.sessionId,
@@ -1054,7 +1041,7 @@ function wrapAnthropic(client) {
1054
1041
  prompt_tokens: response?.usage?.input_tokens,
1055
1042
  completion_tokens: response?.usage?.output_tokens,
1056
1043
  total_tokens: (response?.usage?.input_tokens || 0) + (response?.usage?.output_tokens || 0),
1057
- attributes,
1044
+ attributes: Object.keys(attributes).length > 0 ? attributes : void 0,
1058
1045
  prompt_key: promptCtx?.promptKey,
1059
1046
  prompt_version: promptCtx?.promptVersion,
1060
1047
  prompt_ab_test_key: promptCtx?.abTestKey,
@@ -1147,6 +1134,13 @@ function wrapGoogleAI(model) {
1147
1134
  attributes["gen_ai.completion.0.content"] = outputText;
1148
1135
  }
1149
1136
  }
1137
+ if (usage) {
1138
+ attributes["fallom.raw.usage"] = JSON.stringify(usage);
1139
+ }
1140
+ const candidate = result?.candidates?.[0];
1141
+ if (candidate?.finishReason) {
1142
+ attributes["gen_ai.response.finish_reason"] = candidate.finishReason;
1143
+ }
1150
1144
  sendTrace({
1151
1145
  config_key: ctx.configKey,
1152
1146
  session_id: ctx.sessionId,
@@ -1164,7 +1158,7 @@ function wrapGoogleAI(model) {
1164
1158
  prompt_tokens: usage?.promptTokenCount,
1165
1159
  completion_tokens: usage?.candidatesTokenCount,
1166
1160
  total_tokens: usage?.totalTokenCount,
1167
- attributes: captureContent ? attributes : void 0,
1161
+ attributes: Object.keys(attributes).length > 0 ? attributes : void 0,
1168
1162
  prompt_key: promptCtx?.promptKey,
1169
1163
  prompt_version: promptCtx?.promptVersion,
1170
1164
  prompt_ab_test_key: promptCtx?.abTestKey,
@@ -1212,8 +1206,37 @@ function wrapGoogleAI(model) {
1212
1206
  };
1213
1207
  return model;
1214
1208
  }
1215
- function wrapAISDK(ai) {
1209
+ var aiSdkDebug = false;
1210
+ function extractUsageFromResult(result, directUsage) {
1211
+ let usage = directUsage ?? result?.usage;
1212
+ const isValidNumber = (v) => v !== null && v !== void 0 && !Number.isNaN(v);
1213
+ let promptTokens = isValidNumber(usage?.promptTokens) ? usage.promptTokens : void 0;
1214
+ let completionTokens = isValidNumber(usage?.completionTokens) ? usage.completionTokens : void 0;
1215
+ let totalTokens = isValidNumber(usage?.totalTokens) ? usage.totalTokens : void 0;
1216
+ let cost;
1217
+ const orUsage = result?.experimental_providerMetadata?.openrouter?.usage;
1218
+ if (orUsage) {
1219
+ if (promptTokens === void 0 && isValidNumber(orUsage.promptTokens)) {
1220
+ promptTokens = orUsage.promptTokens;
1221
+ }
1222
+ if (completionTokens === void 0 && isValidNumber(orUsage.completionTokens)) {
1223
+ completionTokens = orUsage.completionTokens;
1224
+ }
1225
+ if (totalTokens === void 0 && isValidNumber(orUsage.totalTokens)) {
1226
+ totalTokens = orUsage.totalTokens;
1227
+ }
1228
+ if (isValidNumber(orUsage.cost)) {
1229
+ cost = orUsage.cost;
1230
+ }
1231
+ }
1232
+ if (totalTokens === void 0 && (promptTokens !== void 0 || completionTokens !== void 0)) {
1233
+ totalTokens = (promptTokens ?? 0) + (completionTokens ?? 0);
1234
+ }
1235
+ return { promptTokens, completionTokens, totalTokens, cost };
1236
+ }
1237
+ function wrapAISDK(ai, options) {
1216
1238
  const aiModule = ai;
1239
+ aiSdkDebug = options?.debug ?? false;
1217
1240
  return {
1218
1241
  generateText: createGenerateTextWrapper(aiModule),
1219
1242
  streamText: createStreamTextWrapper(aiModule),
@@ -1242,6 +1265,28 @@ function createGenerateTextWrapper(aiModule) {
1242
1265
  try {
1243
1266
  const result = await aiModule.generateText(...args);
1244
1267
  const endTime = Date.now();
1268
+ if (aiSdkDebug) {
1269
+ console.log(
1270
+ "\n\u{1F50D} [Fallom Debug] generateText result keys:",
1271
+ Object.keys(result || {})
1272
+ );
1273
+ console.log(
1274
+ "\u{1F50D} [Fallom Debug] result.usage:",
1275
+ JSON.stringify(result?.usage, null, 2)
1276
+ );
1277
+ console.log(
1278
+ "\u{1F50D} [Fallom Debug] result.response keys:",
1279
+ Object.keys(result?.response || {})
1280
+ );
1281
+ console.log(
1282
+ "\u{1F50D} [Fallom Debug] result.response.usage:",
1283
+ JSON.stringify(result?.response?.usage, null, 2)
1284
+ );
1285
+ console.log(
1286
+ "\u{1F50D} [Fallom Debug] result.experimental_providerMetadata:",
1287
+ JSON.stringify(result?.experimental_providerMetadata, null, 2)
1288
+ );
1289
+ }
1245
1290
  const modelId = result?.response?.modelId || params?.model?.modelId || String(params?.model || "unknown");
1246
1291
  const attributes = {};
1247
1292
  if (captureContent) {
@@ -1265,6 +1310,18 @@ function createGenerateTextWrapper(aiModule) {
1265
1310
  attributes["gen_ai.response.id"] = result.response.id;
1266
1311
  }
1267
1312
  }
1313
+ if (result?.usage) {
1314
+ attributes["fallom.raw.usage"] = JSON.stringify(result.usage);
1315
+ }
1316
+ if (result?.experimental_providerMetadata) {
1317
+ attributes["fallom.raw.providerMetadata"] = JSON.stringify(
1318
+ result.experimental_providerMetadata
1319
+ );
1320
+ }
1321
+ if (result?.finishReason) {
1322
+ attributes["gen_ai.response.finish_reason"] = result.finishReason;
1323
+ }
1324
+ const usage = extractUsageFromResult(result);
1268
1325
  sendTrace({
1269
1326
  config_key: ctx.configKey,
1270
1327
  session_id: ctx.sessionId,
@@ -1279,9 +1336,9 @@ function createGenerateTextWrapper(aiModule) {
1279
1336
  end_time: new Date(endTime).toISOString(),
1280
1337
  duration_ms: endTime - startTime,
1281
1338
  status: "OK",
1282
- prompt_tokens: result?.usage?.promptTokens,
1283
- completion_tokens: result?.usage?.completionTokens,
1284
- total_tokens: result?.usage?.totalTokens,
1339
+ prompt_tokens: usage.promptTokens,
1340
+ completion_tokens: usage.completionTokens,
1341
+ total_tokens: usage.totalTokens,
1285
1342
  attributes: captureContent ? attributes : void 0,
1286
1343
  prompt_key: promptCtx?.promptKey,
1287
1344
  prompt_version: promptCtx?.promptVersion,
@@ -1340,9 +1397,31 @@ function createStreamTextWrapper(aiModule) {
1340
1397
  } catch {
1341
1398
  }
1342
1399
  if (result?.usage) {
1343
- result.usage.then((usage) => {
1400
+ result.usage.then(async (rawUsage) => {
1344
1401
  const endTime = Date.now();
1345
- log("\u{1F4CA} streamText usage:", JSON.stringify(usage, null, 2));
1402
+ if (aiSdkDebug) {
1403
+ console.log(
1404
+ "\n\u{1F50D} [Fallom Debug] streamText usage:",
1405
+ JSON.stringify(rawUsage, null, 2)
1406
+ );
1407
+ console.log(
1408
+ "\u{1F50D} [Fallom Debug] streamText result keys:",
1409
+ Object.keys(result || {})
1410
+ );
1411
+ }
1412
+ log("\u{1F4CA} streamText usage:", JSON.stringify(rawUsage, null, 2));
1413
+ let providerMetadata = result?.experimental_providerMetadata;
1414
+ if (providerMetadata && typeof providerMetadata.then === "function") {
1415
+ try {
1416
+ providerMetadata = await providerMetadata;
1417
+ } catch {
1418
+ providerMetadata = void 0;
1419
+ }
1420
+ }
1421
+ const usage = extractUsageFromResult(
1422
+ { experimental_providerMetadata: providerMetadata },
1423
+ rawUsage
1424
+ );
1346
1425
  const attributes = {};
1347
1426
  if (captureContent) {
1348
1427
  attributes["gen_ai.request.model"] = modelId;
@@ -1354,6 +1433,12 @@ function createStreamTextWrapper(aiModule) {
1354
1433
  if (firstTokenTime) {
1355
1434
  attributes["gen_ai.time_to_first_token_ms"] = firstTokenTime - startTime;
1356
1435
  }
1436
+ if (rawUsage) {
1437
+ attributes["fallom.raw.usage"] = JSON.stringify(rawUsage);
1438
+ }
1439
+ if (providerMetadata) {
1440
+ attributes["fallom.raw.providerMetadata"] = JSON.stringify(providerMetadata);
1441
+ }
1357
1442
  const tracePayload = {
1358
1443
  config_key: ctx.configKey,
1359
1444
  session_id: ctx.sessionId,
@@ -1368,9 +1453,9 @@ function createStreamTextWrapper(aiModule) {
1368
1453
  end_time: new Date(endTime).toISOString(),
1369
1454
  duration_ms: endTime - startTime,
1370
1455
  status: "OK",
1371
- prompt_tokens: usage?.promptTokens,
1372
- completion_tokens: usage?.completionTokens,
1373
- total_tokens: usage?.totalTokens,
1456
+ prompt_tokens: usage.promptTokens,
1457
+ completion_tokens: usage.completionTokens,
1458
+ total_tokens: usage.totalTokens,
1374
1459
  time_to_first_token_ms: firstTokenTime ? firstTokenTime - startTime : void 0,
1375
1460
  attributes: captureContent ? attributes : void 0,
1376
1461
  prompt_key: promptCtx?.promptKey,
@@ -1450,6 +1535,24 @@ function createGenerateObjectWrapper(aiModule) {
1450
1535
  try {
1451
1536
  const result = await aiModule.generateObject(...args);
1452
1537
  const endTime = Date.now();
1538
+ if (aiSdkDebug) {
1539
+ console.log(
1540
+ "\n\u{1F50D} [Fallom Debug] generateObject result keys:",
1541
+ Object.keys(result || {})
1542
+ );
1543
+ console.log(
1544
+ "\u{1F50D} [Fallom Debug] result.usage:",
1545
+ JSON.stringify(result?.usage, null, 2)
1546
+ );
1547
+ console.log(
1548
+ "\u{1F50D} [Fallom Debug] result.response keys:",
1549
+ Object.keys(result?.response || {})
1550
+ );
1551
+ console.log(
1552
+ "\u{1F50D} [Fallom Debug] result.response.usage:",
1553
+ JSON.stringify(result?.response?.usage, null, 2)
1554
+ );
1555
+ }
1453
1556
  const modelId = result?.response?.modelId || params?.model?.modelId || String(params?.model || "unknown");
1454
1557
  const attributes = {};
1455
1558
  if (captureContent) {
@@ -1462,6 +1565,18 @@ function createGenerateObjectWrapper(aiModule) {
1462
1565
  );
1463
1566
  }
1464
1567
  }
1568
+ if (result?.usage) {
1569
+ attributes["fallom.raw.usage"] = JSON.stringify(result.usage);
1570
+ }
1571
+ if (result?.experimental_providerMetadata) {
1572
+ attributes["fallom.raw.providerMetadata"] = JSON.stringify(
1573
+ result.experimental_providerMetadata
1574
+ );
1575
+ }
1576
+ if (result?.finishReason) {
1577
+ attributes["gen_ai.response.finish_reason"] = result.finishReason;
1578
+ }
1579
+ const usage = extractUsageFromResult(result);
1465
1580
  sendTrace({
1466
1581
  config_key: ctx.configKey,
1467
1582
  session_id: ctx.sessionId,
@@ -1476,9 +1591,9 @@ function createGenerateObjectWrapper(aiModule) {
1476
1591
  end_time: new Date(endTime).toISOString(),
1477
1592
  duration_ms: endTime - startTime,
1478
1593
  status: "OK",
1479
- prompt_tokens: result?.usage?.promptTokens,
1480
- completion_tokens: result?.usage?.completionTokens,
1481
- total_tokens: result?.usage?.totalTokens,
1594
+ prompt_tokens: usage.promptTokens,
1595
+ completion_tokens: usage.completionTokens,
1596
+ total_tokens: usage.totalTokens,
1482
1597
  attributes: captureContent ? attributes : void 0,
1483
1598
  prompt_key: promptCtx?.promptKey,
1484
1599
  prompt_version: promptCtx?.promptVersion,
@@ -1538,9 +1653,31 @@ function createStreamObjectWrapper(aiModule) {
1538
1653
  } catch {
1539
1654
  }
1540
1655
  if (result?.usage) {
1541
- result.usage.then((usage) => {
1656
+ result.usage.then(async (rawUsage) => {
1542
1657
  const endTime = Date.now();
1543
- log("\u{1F4CA} streamObject usage:", JSON.stringify(usage, null, 2));
1658
+ if (aiSdkDebug) {
1659
+ console.log(
1660
+ "\n\u{1F50D} [Fallom Debug] streamObject usage:",
1661
+ JSON.stringify(rawUsage, null, 2)
1662
+ );
1663
+ console.log(
1664
+ "\u{1F50D} [Fallom Debug] streamObject result keys:",
1665
+ Object.keys(result || {})
1666
+ );
1667
+ }
1668
+ log("\u{1F4CA} streamObject usage:", JSON.stringify(rawUsage, null, 2));
1669
+ let providerMetadata = result?.experimental_providerMetadata;
1670
+ if (providerMetadata && typeof providerMetadata.then === "function") {
1671
+ try {
1672
+ providerMetadata = await providerMetadata;
1673
+ } catch {
1674
+ providerMetadata = void 0;
1675
+ }
1676
+ }
1677
+ const usage = extractUsageFromResult(
1678
+ { experimental_providerMetadata: providerMetadata },
1679
+ rawUsage
1680
+ );
1544
1681
  const attributes = {};
1545
1682
  if (captureContent) {
1546
1683
  attributes["gen_ai.request.model"] = modelId;
@@ -1548,6 +1685,12 @@ function createStreamObjectWrapper(aiModule) {
1548
1685
  if (firstTokenTime) {
1549
1686
  attributes["gen_ai.time_to_first_token_ms"] = firstTokenTime - startTime;
1550
1687
  }
1688
+ if (rawUsage) {
1689
+ attributes["fallom.raw.usage"] = JSON.stringify(rawUsage);
1690
+ }
1691
+ if (providerMetadata) {
1692
+ attributes["fallom.raw.providerMetadata"] = JSON.stringify(providerMetadata);
1693
+ }
1551
1694
  sendTrace({
1552
1695
  config_key: ctx.configKey,
1553
1696
  session_id: ctx.sessionId,
@@ -1562,9 +1705,9 @@ function createStreamObjectWrapper(aiModule) {
1562
1705
  end_time: new Date(endTime).toISOString(),
1563
1706
  duration_ms: endTime - startTime,
1564
1707
  status: "OK",
1565
- prompt_tokens: usage?.promptTokens,
1566
- completion_tokens: usage?.completionTokens,
1567
- total_tokens: usage?.totalTokens,
1708
+ prompt_tokens: usage.promptTokens,
1709
+ completion_tokens: usage.completionTokens,
1710
+ total_tokens: usage.totalTokens,
1568
1711
  attributes: captureContent ? attributes : void 0,
1569
1712
  prompt_key: promptCtx?.promptKey,
1570
1713
  prompt_version: promptCtx?.promptVersion,
@@ -1620,6 +1763,127 @@ function createStreamObjectWrapper(aiModule) {
1620
1763
  return result;
1621
1764
  };
1622
1765
  }
1766
+ function wrapMastraAgent(agent) {
1767
+ const originalGenerate = agent.generate.bind(agent);
1768
+ const agentName = agent.name || "MastraAgent";
1769
+ agent.generate = async function(...args) {
1770
+ const ctx = sessionStorage.getStore() || fallbackSession;
1771
+ if (!ctx || !initialized) {
1772
+ return originalGenerate(...args);
1773
+ }
1774
+ let promptCtx = null;
1775
+ try {
1776
+ const { getPromptContext } = await import("./prompts-VAN5E3L4.mjs");
1777
+ promptCtx = getPromptContext();
1778
+ } catch {
1779
+ }
1780
+ const traceId = generateHexId(32);
1781
+ const spanId = generateHexId(16);
1782
+ const startTime = Date.now();
1783
+ const messages = args[0] || [];
1784
+ try {
1785
+ const result = await originalGenerate(...args);
1786
+ const endTime = Date.now();
1787
+ const model = result?.model?.modelId || "unknown";
1788
+ const toolCalls = [];
1789
+ if (result?.steps?.length) {
1790
+ for (const step of result.steps) {
1791
+ if (step.toolCalls?.length) {
1792
+ for (let i = 0; i < step.toolCalls.length; i++) {
1793
+ const tc = step.toolCalls[i];
1794
+ const tr = step.toolResults?.[i];
1795
+ toolCalls.push({
1796
+ name: tc.toolName,
1797
+ arguments: tc.args,
1798
+ result: tr?.result
1799
+ });
1800
+ }
1801
+ }
1802
+ }
1803
+ }
1804
+ const attributes = {
1805
+ "gen_ai.system": "Mastra",
1806
+ "gen_ai.request.model": model,
1807
+ "gen_ai.response.model": model,
1808
+ "fallom.source": "mastra-agent",
1809
+ "llm.request.type": "chat"
1810
+ };
1811
+ if (Array.isArray(messages)) {
1812
+ messages.forEach((msg, i) => {
1813
+ attributes[`gen_ai.prompt.${i}.role`] = msg.role || "user";
1814
+ attributes[`gen_ai.prompt.${i}.content`] = typeof msg.content === "string" ? msg.content : JSON.stringify(msg.content);
1815
+ });
1816
+ }
1817
+ if (result?.text) {
1818
+ attributes["gen_ai.completion.0.role"] = "assistant";
1819
+ attributes["gen_ai.completion.0.content"] = result.text;
1820
+ attributes["gen_ai.completion.0.finish_reason"] = "stop";
1821
+ }
1822
+ if (toolCalls.length > 0) {
1823
+ attributes["fallom.tool_calls"] = JSON.stringify(toolCalls);
1824
+ toolCalls.forEach((tc, i) => {
1825
+ attributes[`gen_ai.completion.0.tool_calls.${i}.name`] = tc.name;
1826
+ attributes[`gen_ai.completion.0.tool_calls.${i}.type`] = "function";
1827
+ attributes[`gen_ai.completion.0.tool_calls.${i}.arguments`] = JSON.stringify(tc.arguments);
1828
+ });
1829
+ }
1830
+ if (result?.usage) {
1831
+ attributes["gen_ai.usage.prompt_tokens"] = result.usage.promptTokens;
1832
+ attributes["gen_ai.usage.completion_tokens"] = result.usage.completionTokens;
1833
+ attributes["llm.usage.total_tokens"] = result.usage.totalTokens;
1834
+ }
1835
+ const traceData = {
1836
+ config_key: ctx.configKey,
1837
+ session_id: ctx.sessionId,
1838
+ customer_id: ctx.customerId,
1839
+ trace_id: traceId,
1840
+ span_id: spanId,
1841
+ name: `mastra.${agentName}.generate`,
1842
+ kind: "client",
1843
+ model,
1844
+ start_time: new Date(startTime).toISOString(),
1845
+ end_time: new Date(endTime).toISOString(),
1846
+ duration_ms: endTime - startTime,
1847
+ status: "OK",
1848
+ prompt_tokens: result?.usage?.promptTokens,
1849
+ completion_tokens: result?.usage?.completionTokens,
1850
+ total_tokens: result?.usage?.totalTokens,
1851
+ attributes,
1852
+ prompt_key: promptCtx?.promptKey,
1853
+ prompt_version: promptCtx?.promptVersion,
1854
+ prompt_ab_test_key: promptCtx?.abTestKey,
1855
+ prompt_variant_index: promptCtx?.variantIndex
1856
+ };
1857
+ sendTrace(traceData).catch(() => {
1858
+ });
1859
+ return result;
1860
+ } catch (error) {
1861
+ const endTime = Date.now();
1862
+ const traceData = {
1863
+ config_key: ctx.configKey,
1864
+ session_id: ctx.sessionId,
1865
+ customer_id: ctx.customerId,
1866
+ trace_id: traceId,
1867
+ span_id: spanId,
1868
+ name: `mastra.${agentName}.generate`,
1869
+ kind: "client",
1870
+ start_time: new Date(startTime).toISOString(),
1871
+ end_time: new Date(endTime).toISOString(),
1872
+ duration_ms: endTime - startTime,
1873
+ status: "ERROR",
1874
+ error_message: error instanceof Error ? error.message : String(error),
1875
+ prompt_key: promptCtx?.promptKey,
1876
+ prompt_version: promptCtx?.promptVersion,
1877
+ prompt_ab_test_key: promptCtx?.abTestKey,
1878
+ prompt_variant_index: promptCtx?.variantIndex
1879
+ };
1880
+ sendTrace(traceData).catch(() => {
1881
+ });
1882
+ throw error;
1883
+ }
1884
+ };
1885
+ return agent;
1886
+ }
1623
1887
 
1624
1888
  // src/models.ts
1625
1889
  var models_exports = {};
@@ -1885,6 +2149,241 @@ async function init4(options = {}) {
1885
2149
  });
1886
2150
  }
1887
2151
 
2152
+ // src/mastra.ts
2153
+ import { ExportResultCode } from "@opentelemetry/core";
2154
+ var promptContext = {};
2155
+ function setMastraPrompt(promptKey, version) {
2156
+ promptContext = {
2157
+ promptKey,
2158
+ promptVersion: version,
2159
+ promptAbTestKey: void 0,
2160
+ promptVariantIndex: void 0
2161
+ };
2162
+ }
2163
+ function setMastraPromptAB(abTestKey, variantIndex) {
2164
+ promptContext = {
2165
+ promptKey: void 0,
2166
+ promptVersion: void 0,
2167
+ promptAbTestKey: abTestKey,
2168
+ promptVariantIndex: variantIndex
2169
+ };
2170
+ }
2171
+ function clearMastraPrompt() {
2172
+ promptContext = {};
2173
+ }
2174
+ var FallomExporter = class {
2175
+ constructor(options = {}) {
2176
+ this.pendingExports = [];
2177
+ this.apiKey = options.apiKey ?? process.env.FALLOM_API_KEY ?? "";
2178
+ this.baseUrl = options.baseUrl ?? "https://traces.fallom.com";
2179
+ this.debug = options.debug ?? false;
2180
+ console.log("[FallomExporter] Constructor called, debug:", this.debug);
2181
+ console.log("[FallomExporter] API key present:", !!this.apiKey);
2182
+ console.log("[FallomExporter] Base URL:", this.baseUrl);
2183
+ if (!this.apiKey) {
2184
+ console.warn(
2185
+ "[FallomExporter] No API key provided. Set FALLOM_API_KEY env var or pass apiKey option."
2186
+ );
2187
+ }
2188
+ }
2189
+ log(...args) {
2190
+ if (this.debug) {
2191
+ console.log("[FallomExporter]", ...args);
2192
+ }
2193
+ }
2194
+ /**
2195
+ * Export spans to Fallom.
2196
+ */
2197
+ export(spans, resultCallback) {
2198
+ if (spans.length === 0) {
2199
+ resultCallback({ code: ExportResultCode.SUCCESS });
2200
+ return;
2201
+ }
2202
+ this.log(`Exporting ${spans.length} spans...`);
2203
+ if (this.debug) {
2204
+ for (const span2 of spans) {
2205
+ this.log(` - ${span2.name}`, {
2206
+ attributes: Object.fromEntries(
2207
+ Object.entries(span2.attributes).filter(
2208
+ ([k]) => k.startsWith("gen_ai") || k.startsWith("llm")
2209
+ )
2210
+ )
2211
+ });
2212
+ }
2213
+ }
2214
+ const exportPromise = this.sendSpans(spans).then(() => {
2215
+ this.log("Export successful");
2216
+ resultCallback({ code: ExportResultCode.SUCCESS });
2217
+ }).catch((error) => {
2218
+ console.error("[FallomExporter] Export failed:", error);
2219
+ resultCallback({
2220
+ code: ExportResultCode.FAILED,
2221
+ error: error instanceof Error ? error : new Error(String(error))
2222
+ });
2223
+ });
2224
+ this.pendingExports.push(exportPromise);
2225
+ }
2226
+ /**
2227
+ * Shutdown the exporter, waiting for pending exports.
2228
+ */
2229
+ async shutdown() {
2230
+ await Promise.all(this.pendingExports);
2231
+ this.pendingExports = [];
2232
+ }
2233
+ /**
2234
+ * Force flush pending exports.
2235
+ */
2236
+ async forceFlush() {
2237
+ await Promise.all(this.pendingExports);
2238
+ }
2239
+ /**
2240
+ * Send spans to Fallom's OTLP endpoint.
2241
+ */
2242
+ async sendSpans(spans) {
2243
+ const session = getSession();
2244
+ const resourceSpans = this.spansToOtlpJson(spans);
2245
+ const headers = {
2246
+ "Content-Type": "application/json",
2247
+ Authorization: `Bearer ${this.apiKey}`
2248
+ };
2249
+ if (session?.configKey) {
2250
+ headers["X-Fallom-Config-Key"] = session.configKey;
2251
+ }
2252
+ if (session?.sessionId) {
2253
+ headers["X-Fallom-Session-Id"] = session.sessionId;
2254
+ }
2255
+ if (session?.customerId) {
2256
+ headers["X-Fallom-Customer-Id"] = session.customerId;
2257
+ }
2258
+ if (promptContext.promptKey) {
2259
+ headers["X-Fallom-Prompt-Key"] = promptContext.promptKey;
2260
+ }
2261
+ if (promptContext.promptVersion !== void 0) {
2262
+ headers["X-Fallom-Prompt-Version"] = String(promptContext.promptVersion);
2263
+ }
2264
+ if (promptContext.promptAbTestKey) {
2265
+ headers["X-Fallom-Prompt-AB-Test"] = promptContext.promptAbTestKey;
2266
+ }
2267
+ if (promptContext.promptVariantIndex !== void 0) {
2268
+ headers["X-Fallom-Prompt-Variant"] = String(
2269
+ promptContext.promptVariantIndex
2270
+ );
2271
+ }
2272
+ const endpoint = `${this.baseUrl}/v1/traces`;
2273
+ this.log("Sending to", endpoint);
2274
+ this.log("Headers:", {
2275
+ ...headers,
2276
+ Authorization: "Bearer ***"
2277
+ });
2278
+ const response = await fetch(endpoint, {
2279
+ method: "POST",
2280
+ headers,
2281
+ body: JSON.stringify({ resourceSpans })
2282
+ });
2283
+ if (!response.ok) {
2284
+ const text = await response.text();
2285
+ throw new Error(`Failed to export: ${response.status} ${text}`);
2286
+ }
2287
+ }
2288
+ /**
2289
+ * Convert OpenTelemetry spans to OTLP JSON format.
2290
+ */
2291
+ spansToOtlpJson(spans) {
2292
+ const resourceMap = /* @__PURE__ */ new Map();
2293
+ for (const span2 of spans) {
2294
+ const resourceKey = JSON.stringify(span2.resource.attributes);
2295
+ if (!resourceMap.has(resourceKey)) {
2296
+ resourceMap.set(resourceKey, []);
2297
+ }
2298
+ resourceMap.get(resourceKey).push(span2);
2299
+ }
2300
+ const resourceSpans = [];
2301
+ for (const [_resourceKey, resourceSpanList] of resourceMap) {
2302
+ const firstSpan = resourceSpanList[0];
2303
+ resourceSpans.push({
2304
+ resource: {
2305
+ attributes: this.attributesToOtlp(firstSpan.resource.attributes)
2306
+ },
2307
+ scopeSpans: [
2308
+ {
2309
+ scope: {
2310
+ name: firstSpan.instrumentationLibrary.name,
2311
+ version: firstSpan.instrumentationLibrary.version
2312
+ },
2313
+ spans: resourceSpanList.map((span2) => this.spanToOtlp(span2))
2314
+ }
2315
+ ]
2316
+ });
2317
+ }
2318
+ return resourceSpans;
2319
+ }
2320
+ /**
2321
+ * Convert a single span to OTLP format.
2322
+ */
2323
+ spanToOtlp(span2) {
2324
+ return {
2325
+ traceId: span2.spanContext().traceId,
2326
+ spanId: span2.spanContext().spanId,
2327
+ parentSpanId: span2.parentSpanId,
2328
+ name: span2.name,
2329
+ kind: span2.kind,
2330
+ startTimeUnixNano: this.hrTimeToNanos(span2.startTime),
2331
+ endTimeUnixNano: this.hrTimeToNanos(span2.endTime),
2332
+ attributes: this.attributesToOtlp(span2.attributes),
2333
+ status: {
2334
+ code: span2.status.code,
2335
+ message: span2.status.message
2336
+ },
2337
+ events: span2.events.map((event) => ({
2338
+ timeUnixNano: this.hrTimeToNanos(event.time),
2339
+ name: event.name,
2340
+ attributes: this.attributesToOtlp(event.attributes || {})
2341
+ }))
2342
+ };
2343
+ }
2344
+ /**
2345
+ * Convert attributes to OTLP format.
2346
+ */
2347
+ attributesToOtlp(attrs) {
2348
+ return Object.entries(attrs).map(([key, value]) => ({
2349
+ key,
2350
+ value: this.valueToOtlp(value)
2351
+ }));
2352
+ }
2353
+ /**
2354
+ * Convert a value to OTLP AnyValue format.
2355
+ */
2356
+ valueToOtlp(value) {
2357
+ if (typeof value === "string") {
2358
+ return { stringValue: value };
2359
+ }
2360
+ if (typeof value === "number") {
2361
+ if (Number.isInteger(value)) {
2362
+ return { intValue: value };
2363
+ }
2364
+ return { doubleValue: value };
2365
+ }
2366
+ if (typeof value === "boolean") {
2367
+ return { boolValue: value };
2368
+ }
2369
+ if (Array.isArray(value)) {
2370
+ return {
2371
+ arrayValue: {
2372
+ values: value.map((v) => this.valueToOtlp(v))
2373
+ }
2374
+ };
2375
+ }
2376
+ return { stringValue: String(value) };
2377
+ }
2378
+ /**
2379
+ * Convert HrTime to nanoseconds string.
2380
+ */
2381
+ hrTimeToNanos(hrTime) {
2382
+ const [seconds, nanos] = hrTime;
2383
+ return String(BigInt(seconds) * BigInt(1e9) + BigInt(nanos));
2384
+ }
2385
+ };
2386
+
1888
2387
  // src/index.ts
1889
2388
  var index_default = {
1890
2389
  init: init4,
@@ -1893,9 +2392,13 @@ var index_default = {
1893
2392
  prompts: prompts_exports
1894
2393
  };
1895
2394
  export {
2395
+ FallomExporter,
2396
+ clearMastraPrompt,
1896
2397
  index_default as default,
1897
2398
  init4 as init,
1898
2399
  models_exports as models,
1899
2400
  prompts_exports as prompts,
2401
+ setMastraPrompt,
2402
+ setMastraPromptAB,
1900
2403
  trace_exports as trace
1901
2404
  };