@mastra/memory 1.14.0-alpha.1 → 1.14.0-alpha.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,5 +1,40 @@
1
1
  # @mastra/memory
2
2
 
3
+ ## 1.14.0-alpha.2
4
+
5
+ ### Minor Changes
6
+
7
+ - Added tracing support to Memory operations (recall, save, delete, update working memory). When an `observabilityContext` is provided, Memory creates `MEMORY_OPERATION` spans that capture operation type, message counts, embedding token usage, and vector result counts. Tracing is fully opt-in — existing usage without `observabilityContext` is unaffected. ([#14305](https://github.com/mastra-ai/mastra/pull/14305))
8
+
9
+ **Example usage:**
10
+
11
+ ```typescript
12
+ import { Memory } from '@mastra/memory';
13
+ import { InMemoryStore } from '@mastra/core/storage';
14
+
15
+ const memory = new Memory({ storage: new InMemoryStore() });
16
+
17
+ // Pass observabilityContext to create observable spans
18
+ await memory.recall({
19
+ threadId: 'thread-1',
20
+ observabilityContext: { tracingContext: { currentSpan: parentSpan } },
21
+ });
22
+
23
+ await memory.saveMessages({
24
+ messages: [userMessage, assistantMessage],
25
+ observabilityContext: { tracingContext: { currentSpan: parentSpan } },
26
+ });
27
+ ```
28
+
29
+ - Added per-record config overrides for observation and reflection thresholds in Observational Memory. Each thread can now have its own `messageTokens` and `observationTokens` thresholds that override the instance-level defaults, without requiring a process restart or cache invalidation. If no per-record override is set, the instance-level config is used as before. ([#15102](https://github.com/mastra-ai/mastra/pull/15102))
30
+
31
+ ### Patch Changes
32
+
33
+ - Fixed recall() to hide dynamic system reminder messages by default, with includeSystemReminders available when callers need raw reminder history. ([#15100](https://github.com/mastra-ai/mastra/pull/15100))
34
+
35
+ - Updated dependencies [[`ac7baf6`](https://github.com/mastra-ai/mastra/commit/ac7baf66ef1db15e03975ef4ebb02724f015a391), [`0df8321`](https://github.com/mastra-ai/mastra/commit/0df832196eeb2450ab77ce887e8553abdd44c5a6), [`61109b3`](https://github.com/mastra-ai/mastra/commit/61109b34feb0e38d54bee4b8ca83eb7345b1d557), [`33f1ead`](https://github.com/mastra-ai/mastra/commit/33f1eadfa19c86953f593478e5fa371093b33779)]:
36
+ - @mastra/core@1.23.0-alpha.8
37
+
3
38
  ## 1.14.0-alpha.1
4
39
 
5
40
  ### Minor Changes
@@ -6258,6 +6258,47 @@ Async buffering is enabled by default \u2014 this opt-out is only needed when us
6258
6258
  }
6259
6259
  }
6260
6260
  }
6261
+ /**
6262
+ * Resolve the effective messageTokens for a record.
6263
+ * Only explicit per-record overrides (stored under `_overrides`) win;
6264
+ * the initial config snapshot written by getOrCreateRecord() is ignored
6265
+ * so that later instance-level changes still take effect.
6266
+ *
6267
+ * Overrides that fall below the instance-level buffering floor
6268
+ * (bufferTokens / absolute bufferActivation) are clamped to the
6269
+ * instance threshold to preserve buffering invariants.
6270
+ */
6271
+ getEffectiveMessageTokens(record) {
6272
+ const overrides = record.config?._overrides;
6273
+ const recordTokens = overrides?.observation?.messageTokens;
6274
+ if (recordTokens) {
6275
+ const maxOverride = getMaxThreshold(recordTokens);
6276
+ const bufferTokens = this.observationConfig.bufferTokens;
6277
+ if (bufferTokens && maxOverride <= bufferTokens) {
6278
+ return this.observationConfig.messageTokens;
6279
+ }
6280
+ const bufferActivation = this.observationConfig.bufferActivation;
6281
+ if (bufferActivation && bufferActivation >= 1e3 && maxOverride <= bufferActivation) {
6282
+ return this.observationConfig.messageTokens;
6283
+ }
6284
+ return recordTokens;
6285
+ }
6286
+ return this.observationConfig.messageTokens;
6287
+ }
6288
+ /**
6289
+ * Resolve the effective reflection observationTokens for a record.
6290
+ * Only explicit per-record overrides (stored under `_overrides`) win;
6291
+ * the initial config snapshot is ignored so instance-level changes
6292
+ * still take effect for existing records.
6293
+ */
6294
+ getEffectiveReflectionTokens(record) {
6295
+ const overrides = record.config?._overrides;
6296
+ const recordTokens = overrides?.reflection?.observationTokens;
6297
+ if (recordTokens) {
6298
+ return recordTokens;
6299
+ }
6300
+ return this.reflectionConfig.observationTokens;
6301
+ }
6261
6302
  /**
6262
6303
  * Check whether the unobserved message tokens meet the observation threshold.
6263
6304
  */
@@ -6265,7 +6306,7 @@ Async buffering is enabled by default \u2014 this opt-out is only needed when us
6265
6306
  const { record, unobservedTokens, extraTokens = 0 } = opts;
6266
6307
  const pendingTokens = (record.pendingMessageTokens ?? 0) + unobservedTokens + extraTokens;
6267
6308
  const currentObservationTokens = record.observationTokenCount ?? 0;
6268
- const threshold = calculateDynamicThreshold(this.observationConfig.messageTokens, currentObservationTokens);
6309
+ const threshold = calculateDynamicThreshold(this.getEffectiveMessageTokens(record), currentObservationTokens);
6269
6310
  return pendingTokens >= threshold;
6270
6311
  }
6271
6312
  /**
@@ -7414,7 +7455,7 @@ ${grouped}` : grouped;
7414
7455
  const projectedMessageRemoval = calculateProjectedMessageRemoval(
7415
7456
  bufferedChunks,
7416
7457
  this.observationConfig.bufferActivation ?? 1,
7417
- getMaxThreshold(this.observationConfig.messageTokens),
7458
+ getMaxThreshold(this.getEffectiveMessageTokens(record)),
7418
7459
  pendingTokens
7419
7460
  );
7420
7461
  let obsBufferStatus = "idle";
@@ -7503,7 +7544,7 @@ ${grouped}` : grouped;
7503
7544
  otherThreadTokens = otherContext ? this.tokenCounter.countString(otherContext) : 0;
7504
7545
  }
7505
7546
  const pendingTokens = Math.max(0, contextWindowTokens + otherThreadTokens);
7506
- const threshold = calculateDynamicThreshold(this.observationConfig.messageTokens, currentObservationTokens);
7547
+ const threshold = calculateDynamicThreshold(this.getEffectiveMessageTokens(record), currentObservationTokens);
7507
7548
  const bufferedChunks = getBufferedChunks(record);
7508
7549
  const bufferedChunkCount = bufferedChunks.length;
7509
7550
  const bufferedChunkTokens = bufferedChunks.reduce((sum, chunk) => sum + (chunk.messageTokens ?? 0), 0);
@@ -7520,11 +7561,12 @@ ${grouped}` : grouped;
7520
7561
  );
7521
7562
  }
7522
7563
  const shouldObserve = pendingTokens >= threshold;
7523
- const reflectThreshold = getMaxThreshold(this.reflectionConfig.observationTokens);
7564
+ const reflectThreshold = getMaxThreshold(this.getEffectiveReflectionTokens(record));
7524
7565
  const shouldReflect = currentObservationTokens >= reflectThreshold;
7525
7566
  const canActivate = bufferedChunkCount > 0;
7526
- const isSharedBudget = typeof this.observationConfig.messageTokens !== "number";
7527
- const totalBudget = isSharedBudget ? this.observationConfig.messageTokens.max : 0;
7567
+ const effectiveMessageTokens = this.getEffectiveMessageTokens(record);
7568
+ const isSharedBudget = typeof effectiveMessageTokens !== "number";
7569
+ const totalBudget = isSharedBudget ? effectiveMessageTokens.max : 0;
7528
7570
  const effectiveObservationTokensThreshold = isSharedBudget ? Math.max(totalBudget - threshold, 1e3) : reflectThreshold;
7529
7571
  const unbufferedPendingTokens = Math.max(0, pendingTokens - bufferedChunkTokens);
7530
7572
  return {
@@ -7839,7 +7881,7 @@ ${grouped}` : grouped;
7839
7881
  if (!freshChunks.length) {
7840
7882
  return { activated: false, record };
7841
7883
  }
7842
- const messageTokensThreshold = getMaxThreshold(this.observationConfig.messageTokens);
7884
+ const messageTokensThreshold = getMaxThreshold(this.getEffectiveMessageTokens(freshRecord));
7843
7885
  const bufferActivation = this.observationConfig.bufferActivation ?? 0.7;
7844
7886
  const activationRatio = resolveActivationRatio(bufferActivation, messageTokensThreshold);
7845
7887
  const totalChunkMessageTokens = freshChunks.reduce((sum, c) => sum + (c.messageTokens ?? 0), 0);
@@ -7988,7 +8030,7 @@ ${grouped}` : grouped;
7988
8030
  await this.storage.setReflectingFlag(record.id, true);
7989
8031
  registerOp(record.id, "reflecting");
7990
8032
  try {
7991
- const reflectThreshold = getMaxThreshold(this.reflectionConfig.observationTokens);
8033
+ const reflectThreshold = getMaxThreshold(this.getEffectiveReflectionTokens(record));
7992
8034
  const reflectResult = await this.reflector.call(
7993
8035
  record.activeObservations,
7994
8036
  prompt,
@@ -8033,6 +8075,34 @@ ${grouped}` : grouped;
8033
8075
  const ids = this.getStorageIds(threadId, resourceId);
8034
8076
  return this.storage.getObservationalMemory(ids.threadId, ids.resourceId);
8035
8077
  }
8078
+ /**
8079
+ * Update per-record config overrides for observation and/or reflection thresholds.
8080
+ * The provided config is deep-merged into the record's `_overrides` key,
8081
+ * so you only need to specify the fields you want to change.
8082
+ *
8083
+ * Overrides that violate buffering invariants (e.g. messageTokens below
8084
+ * bufferTokens) are silently ignored at read time — the helpers fall back
8085
+ * to the instance-level config.
8086
+ *
8087
+ * @example
8088
+ * ```ts
8089
+ * await om.updateRecordConfig('thread-1', undefined, {
8090
+ * observation: { messageTokens: 2000 },
8091
+ * reflection: { observationTokens: 8000 },
8092
+ * });
8093
+ * ```
8094
+ */
8095
+ async updateRecordConfig(threadId, resourceId, config) {
8096
+ const ids = this.getStorageIds(threadId, resourceId);
8097
+ const record = await this.storage.getObservationalMemory(ids.threadId, ids.resourceId);
8098
+ if (!record) {
8099
+ throw new Error(`No observational memory record found for thread ${ids.threadId}`);
8100
+ }
8101
+ await this.storage.updateObservationalMemoryConfig({
8102
+ id: record.id,
8103
+ config: { _overrides: config }
8104
+ });
8105
+ }
8036
8106
  /**
8037
8107
  * Get observation history (previous generations)
8038
8108
  */
@@ -8584,5 +8654,5 @@ function getObservationsAsOf(activeObservations, asOf) {
8584
8654
  }
8585
8655
 
8586
8656
  export { ModelByInputTokens, OBSERVER_SYSTEM_PROMPT, ObservationalMemory, ObservationalMemoryProcessor, TokenCounter, buildObserverPrompt, buildObserverSystemPrompt, combineObservationGroupRanges, deriveObservationGroupProvenance, extractCurrentTask, formatMessagesForObserver, formatToolResultForObserver, getObservationsAsOf, hasCurrentTaskSection, injectAnchorIds, optimizeObservationsForContext, parseAnchorId, parseObservationGroups, parseObserverOutput, reconcileObservationGroupsFromReflection, renderObservationGroupsForReflection, resolveToolResultValue, stripEphemeralAnchorIds, stripObservationGroups, truncateStringByTokens, wrapInObservationGroup };
8587
- //# sourceMappingURL=chunk-FQGF36BE.js.map
8588
- //# sourceMappingURL=chunk-FQGF36BE.js.map
8657
+ //# sourceMappingURL=chunk-GXDPND6K.js.map
8658
+ //# sourceMappingURL=chunk-GXDPND6K.js.map