@mastra/observability 1.0.0-beta.3 → 1.0.0-beta.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,5 +1,14 @@
1
1
  # @mastra/observability
2
2
 
3
+ ## 1.0.0-beta.4
4
+
5
+ ### Patch Changes
6
+
7
+ - Fixed CachedToken tracking in all Observability Exporters. Also fixed TimeToFirstToken in Langfuse, Braintrust, PostHog exporters. Fixed trace formatting in Posthog Exporter. ([#11029](https://github.com/mastra-ai/mastra/pull/11029))
8
+
9
+ - Updated dependencies [[`edb07e4`](https://github.com/mastra-ai/mastra/commit/edb07e49283e0c28bd094a60e03439bf6ecf0221), [`b7e17d3`](https://github.com/mastra-ai/mastra/commit/b7e17d3f5390bb5a71efc112204413656fcdc18d), [`261473a`](https://github.com/mastra-ai/mastra/commit/261473ac637e633064a22076671e2e02b002214d), [`5d7000f`](https://github.com/mastra-ai/mastra/commit/5d7000f757cd65ea9dc5b05e662fd83dfd44e932), [`4f0331a`](https://github.com/mastra-ai/mastra/commit/4f0331a79bf6eb5ee598a5086e55de4b5a0ada03), [`8a000da`](https://github.com/mastra-ai/mastra/commit/8a000da0c09c679a2312f6b3aa05b2ca78ca7393)]:
10
+ - @mastra/core@1.0.0-beta.10
11
+
3
12
  ## 1.0.0-beta.3
4
13
 
5
14
  ### Patch Changes
package/dist/index.cjs CHANGED
@@ -948,6 +948,58 @@ var TestExporter = class extends BaseExporter {
948
948
  this.logger.info("TestExporter shutdown");
949
949
  }
950
950
  };
951
+
952
+ // src/usage.ts
953
+ function extractUsageMetrics(usage, providerMetadata) {
954
+ if (!usage) {
955
+ return {};
956
+ }
957
+ const inputDetails = {};
958
+ const outputDetails = {};
959
+ let inputTokens = usage.inputTokens;
960
+ const outputTokens = usage.outputTokens;
961
+ if (usage.cachedInputTokens) {
962
+ inputDetails.cacheRead = usage.cachedInputTokens;
963
+ }
964
+ if (usage.reasoningTokens) {
965
+ outputDetails.reasoning = usage.reasoningTokens;
966
+ }
967
+ const anthropic = providerMetadata?.anthropic;
968
+ if (anthropic) {
969
+ if (anthropic.cacheReadInputTokens) {
970
+ inputDetails.cacheRead = anthropic.cacheReadInputTokens;
971
+ }
972
+ if (anthropic.cacheCreationInputTokens) {
973
+ inputDetails.cacheWrite = anthropic.cacheCreationInputTokens;
974
+ }
975
+ if (anthropic.cacheReadInputTokens || anthropic.cacheCreationInputTokens) {
976
+ inputDetails.text = usage.inputTokens;
977
+ inputTokens = (usage.inputTokens ?? 0) + (anthropic.cacheReadInputTokens ?? 0) + (anthropic.cacheCreationInputTokens ?? 0);
978
+ }
979
+ }
980
+ const google = providerMetadata?.google;
981
+ if (google?.usageMetadata) {
982
+ if (google.usageMetadata.cachedContentTokenCount) {
983
+ inputDetails.cacheRead = google.usageMetadata.cachedContentTokenCount;
984
+ }
985
+ if (google.usageMetadata.thoughtsTokenCount) {
986
+ outputDetails.reasoning = google.usageMetadata.thoughtsTokenCount;
987
+ }
988
+ }
989
+ const result = {
990
+ inputTokens,
991
+ outputTokens
992
+ };
993
+ if (Object.keys(inputDetails).length > 0) {
994
+ result.inputDetails = inputDetails;
995
+ }
996
+ if (Object.keys(outputDetails).length > 0) {
997
+ result.outputDetails = outputDetails;
998
+ }
999
+ return result;
1000
+ }
1001
+
1002
+ // src/model-tracing.ts
951
1003
  var ModelSpanTracker = class {
952
1004
  #modelSpan;
953
1005
  #currentStepSpan;
@@ -955,8 +1007,7 @@ var ModelSpanTracker = class {
955
1007
  #accumulator = {};
956
1008
  #stepIndex = 0;
957
1009
  #chunkSequence = 0;
958
- /** Tracks whether completionStartTime has been captured for this generation */
959
- #completionStartTimeCaptured = false;
1010
+ #completionStartTime;
960
1011
  /** Tracks tool output accumulators by toolCallId for consolidating sub-agent streams */
961
1012
  #toolOutputAccumulators = /* @__PURE__ */ new Map();
962
1013
  /** Tracks toolCallIds that had streaming output (to skip redundant tool-result spans) */
@@ -966,18 +1017,12 @@ var ModelSpanTracker = class {
966
1017
  }
967
1018
  /**
968
1019
  * Capture the completion start time (time to first token) when the first content chunk arrives.
969
- * This is used by observability providers like Langfuse to calculate TTFT metrics.
970
1020
  */
971
1021
  #captureCompletionStartTime() {
972
- if (this.#completionStartTimeCaptured || !this.#modelSpan) {
1022
+ if (this.#completionStartTime) {
973
1023
  return;
974
1024
  }
975
- this.#completionStartTimeCaptured = true;
976
- this.#modelSpan.update({
977
- attributes: {
978
- completionStartTime: /* @__PURE__ */ new Date()
979
- }
980
- });
1025
+ this.#completionStartTime = /* @__PURE__ */ new Date();
981
1026
  }
982
1027
  /**
983
1028
  * Get the tracing context for creating child spans.
@@ -995,10 +1040,16 @@ var ModelSpanTracker = class {
995
1040
  this.#modelSpan?.error(options);
996
1041
  }
997
1042
  /**
998
- * End the generation span
1043
+ * End the generation span with optional raw usage data.
1044
+ * If usage is provided, it will be converted to UsageStats with cache token details.
999
1045
  */
1000
1046
  endGeneration(options) {
1001
- this.#modelSpan?.end(options);
1047
+ const { usage, providerMetadata, ...spanOptions } = options ?? {};
1048
+ if (spanOptions.attributes) {
1049
+ spanOptions.attributes.completionStartTime = this.#completionStartTime;
1050
+ spanOptions.attributes.usage = extractUsageMetrics(usage, providerMetadata);
1051
+ }
1052
+ this.#modelSpan?.end(spanOptions);
1002
1053
  }
1003
1054
  /**
1004
1055
  * Update the generation span
@@ -1028,9 +1079,10 @@ var ModelSpanTracker = class {
1028
1079
  #endStepSpan(payload) {
1029
1080
  if (!this.#currentStepSpan) return;
1030
1081
  const output = payload.output;
1031
- const { usage, ...otherOutput } = output;
1082
+ const { usage: rawUsage, ...otherOutput } = output;
1032
1083
  const stepResult = payload.stepResult;
1033
1084
  const metadata = payload.metadata;
1085
+ const usage = extractUsageMetrics(rawUsage, metadata?.providerMetadata);
1034
1086
  const cleanMetadata = metadata ? { ...metadata } : void 0;
1035
1087
  if (cleanMetadata?.request) {
1036
1088
  delete cleanMetadata.request;
@@ -1281,13 +1333,15 @@ var ModelSpanTracker = class {
1281
1333
  * create MODEL_STEP and MODEL_CHUNK spans for each semantic unit in the stream.
1282
1334
  */
1283
1335
  wrapStream(stream) {
1284
- let captureCompletionStartTime = false;
1285
1336
  return stream.pipeThrough(
1286
1337
  new web.TransformStream({
1287
1338
  transform: (chunk, controller) => {
1288
- if (!captureCompletionStartTime) {
1289
- captureCompletionStartTime = true;
1290
- this.#captureCompletionStartTime();
1339
+ switch (chunk.type) {
1340
+ case "text-delta":
1341
+ case "tool-call-delta":
1342
+ case "reasoning-delta":
1343
+ this.#captureCompletionStartTime();
1344
+ break;
1291
1345
  }
1292
1346
  controller.enqueue(chunk);
1293
1347
  switch (chunk.type) {
@@ -1546,6 +1600,9 @@ function deepClean(value, options = {}, _seen = /* @__PURE__ */ new WeakSet(), _
1546
1600
  return "[Circular]";
1547
1601
  }
1548
1602
  _seen.add(value);
1603
+ if (value instanceof Date) {
1604
+ return value;
1605
+ }
1549
1606
  if (Array.isArray(value)) {
1550
1607
  return value.map((item) => deepClean(item, options, _seen, _depth + 1));
1551
1608
  }