@mastra/memory 1.13.2-alpha.0 → 1.14.0-alpha.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (34) hide show
  1. package/CHANGELOG.md +73 -0
  2. package/dist/{chunk-C7PARRAD.js → chunk-GXDPND6K.js} +117 -26
  3. package/dist/chunk-GXDPND6K.js.map +1 -0
  4. package/dist/{chunk-4FMHSWZD.cjs → chunk-ZVRO2GUN.cjs} +117 -26
  5. package/dist/chunk-ZVRO2GUN.cjs.map +1 -0
  6. package/dist/docs/SKILL.md +1 -1
  7. package/dist/docs/assets/SOURCE_MAP.json +29 -29
  8. package/dist/index.cjs +307 -183
  9. package/dist/index.cjs.map +1 -1
  10. package/dist/index.d.ts +29 -3
  11. package/dist/index.d.ts.map +1 -1
  12. package/dist/index.js +300 -176
  13. package/dist/index.js.map +1 -1
  14. package/dist/{observational-memory-5YDQLKHE.cjs → observational-memory-IRCDSDUB.cjs} +26 -26
  15. package/dist/{observational-memory-5YDQLKHE.cjs.map → observational-memory-IRCDSDUB.cjs.map} +1 -1
  16. package/dist/{observational-memory-B7AUSTEY.js → observational-memory-OVRHDQRG.js} +3 -3
  17. package/dist/{observational-memory-B7AUSTEY.js.map → observational-memory-OVRHDQRG.js.map} +1 -1
  18. package/dist/processors/index.cjs +24 -24
  19. package/dist/processors/index.js +1 -1
  20. package/dist/processors/observational-memory/index.d.ts +1 -1
  21. package/dist/processors/observational-memory/index.d.ts.map +1 -1
  22. package/dist/processors/observational-memory/observation-strategies/base.d.ts +3 -3
  23. package/dist/processors/observational-memory/observation-strategies/base.d.ts.map +1 -1
  24. package/dist/processors/observational-memory/observation-strategies/types.d.ts +9 -0
  25. package/dist/processors/observational-memory/observation-strategies/types.d.ts.map +1 -1
  26. package/dist/processors/observational-memory/observational-memory.d.ts +40 -3
  27. package/dist/processors/observational-memory/observational-memory.d.ts.map +1 -1
  28. package/dist/processors/observational-memory/processor.d.ts.map +1 -1
  29. package/dist/processors/observational-memory/reflector-runner.d.ts.map +1 -1
  30. package/dist/processors/observational-memory/types.d.ts +13 -2
  31. package/dist/processors/observational-memory/types.d.ts.map +1 -1
  32. package/package.json +4 -4
  33. package/dist/chunk-4FMHSWZD.cjs.map +0 -1
  34. package/dist/chunk-C7PARRAD.js.map +0 -1
package/CHANGELOG.md CHANGED
@@ -1,5 +1,78 @@
1
1
  # @mastra/memory
2
2
 
3
+ ## 1.14.0-alpha.2
4
+
5
+ ### Minor Changes
6
+
7
+ - Added tracing support to Memory operations (recall, save, delete, update working memory). When an `observabilityContext` is provided, Memory creates `MEMORY_OPERATION` spans that capture operation type, message counts, embedding token usage, and vector result counts. Tracing is fully opt-in — existing usage without `observabilityContext` is unaffected. ([#14305](https://github.com/mastra-ai/mastra/pull/14305))
8
+
9
+ **Example usage:**
10
+
11
+ ```typescript
12
+ import { Memory } from '@mastra/memory';
13
+ import { InMemoryStore } from '@mastra/core/storage';
14
+
15
+ const memory = new Memory({ storage: new InMemoryStore() });
16
+
17
+ // Pass observabilityContext to create observable spans
18
+ await memory.recall({
19
+ threadId: 'thread-1',
20
+ observabilityContext: { tracingContext: { currentSpan: parentSpan } },
21
+ });
22
+
23
+ await memory.saveMessages({
24
+ messages: [userMessage, assistantMessage],
25
+ observabilityContext: { tracingContext: { currentSpan: parentSpan } },
26
+ });
27
+ ```
28
+
29
+ - Added per-record config overrides for observation and reflection thresholds in Observational Memory. Each thread can now have its own `messageTokens` and `observationTokens` thresholds that override the instance-level defaults, without requiring a process restart or cache invalidation. If no per-record override is set, the instance-level config is used as before. ([#15102](https://github.com/mastra-ai/mastra/pull/15102))
30
+
31
+ ### Patch Changes
32
+
33
+ - Fixed recall() to hide dynamic system reminder messages by default, with includeSystemReminders available when callers need raw reminder history. ([#15100](https://github.com/mastra-ai/mastra/pull/15100))
34
+
35
+ - Updated dependencies [[`ac7baf6`](https://github.com/mastra-ai/mastra/commit/ac7baf66ef1db15e03975ef4ebb02724f015a391), [`0df8321`](https://github.com/mastra-ai/mastra/commit/0df832196eeb2450ab77ce887e8553abdd44c5a6), [`61109b3`](https://github.com/mastra-ai/mastra/commit/61109b34feb0e38d54bee4b8ca83eb7345b1d557), [`33f1ead`](https://github.com/mastra-ai/mastra/commit/33f1eadfa19c86953f593478e5fa371093b33779)]:
36
+ - @mastra/core@1.23.0-alpha.8
37
+
38
+ ## 1.14.0-alpha.1
39
+
40
+ ### Minor Changes
41
+
42
+ - Added usage data to ObserveHooks callbacks and standalone reflect() return. ([#15047](https://github.com/mastra-ai/mastra/pull/15047))
43
+
44
+ **ObserveHooks:** `onObservationEnd` and `onReflectionEnd` now receive a result object containing token usage from the underlying LLM call. This enables reliable usage tracking across all observation and reflection paths (sync, async buffered, and resource-scoped).
45
+
46
+ **Standalone reflect():** `reflect()` now returns `{ reflected, record, usage? }` so callers can capture token usage without hooks.
47
+
48
+ **Examples**
49
+
50
+ ```ts
51
+ // Via hooks
52
+ await memory.observe({
53
+ threadId,
54
+ messages,
55
+ hooks: {
56
+ onObservationEnd: ({ usage }) => {
57
+ // usage: { inputTokens, outputTokens, totalTokens }
58
+ },
59
+ onReflectionEnd: ({ usage }) => {
60
+ // usage: { inputTokens, outputTokens, totalTokens }
61
+ },
62
+ },
63
+ });
64
+
65
+ // Via standalone reflect()
66
+ const { reflected, usage } = await memory.reflect(threadId, resourceId);
67
+ ```
68
+
69
+ Existing callbacks that accept no arguments continue to work without changes.
70
+
71
+ ### Patch Changes
72
+
73
+ - Updated dependencies [[`fff91cf`](https://github.com/mastra-ai/mastra/commit/fff91cf914de0e731578aacebffdeebef82f0440)]:
74
+ - @mastra/core@1.23.0-alpha.4
75
+
3
76
  ## 1.13.2-alpha.0
4
77
 
5
78
  ### Patch Changes
@@ -945,7 +945,7 @@ var ObservationStrategy = class _ObservationStrategy {
945
945
  static create;
946
946
  /**
947
947
  * Run the full observation lifecycle.
948
- * @returns `true` if a full observation cycle completed; `false` if skipped (stale lock) or async-buffer failure was swallowed.
948
+ * @returns Result with `observed` flag and optional `usage` from the observer LLM call.
949
949
  * @throws On sync/resource-scoped observer failure after failed markers (same as pre–Option-A contract).
950
950
  */
951
951
  async run() {
@@ -955,7 +955,7 @@ var ObservationStrategy = class _ObservationStrategy {
955
955
  if (this.needsLock) {
956
956
  const fresh = await this.storage.getObservationalMemory(record.threadId, record.resourceId);
957
957
  if (fresh?.lastObservedAt && record.lastObservedAt && fresh.lastObservedAt > record.lastObservedAt) {
958
- return false;
958
+ return { observed: false };
959
959
  }
960
960
  }
961
961
  const { messages, existingObservations } = await this.prepare();
@@ -976,7 +976,7 @@ var ObservationStrategy = class _ObservationStrategy {
976
976
  observabilityContext: this.opts.observabilityContext
977
977
  });
978
978
  }
979
- return true;
979
+ return { observed: true, usage: output.usage };
980
980
  } catch (error) {
981
981
  await this.emitFailedMarkers(cycleId, error);
982
982
  if (!this.rethrowOnFailure) {
@@ -995,7 +995,7 @@ var ObservationStrategy = class _ObservationStrategy {
995
995
  });
996
996
  if (abortSignal?.aborted) throw error;
997
997
  omError("[OM] Observation failed", error);
998
- return false;
998
+ return { observed: false };
999
999
  }
1000
1000
  omError("[OM] Observation failed", error);
1001
1001
  throw error;
@@ -4193,7 +4193,7 @@ var ReflectorRunner = class {
4193
4193
  /**
4194
4194
  * Start an async buffered reflection in the background.
4195
4195
  */
4196
- startAsyncBufferedReflection(record, observationTokens, lockKey, writer, requestContext, observabilityContext) {
4196
+ startAsyncBufferedReflection(record, observationTokens, lockKey, writer, requestContext, observabilityContext, reflectionHooks) {
4197
4197
  const bufferKey = this.buffering.getReflectionBufferKey(lockKey);
4198
4198
  if (this.buffering.isAsyncBufferingInProgress(bufferKey)) {
4199
4199
  return;
@@ -4203,7 +4203,10 @@ var ReflectorRunner = class {
4203
4203
  this.storage.setBufferingReflectionFlag(record.id, true).catch((err) => {
4204
4204
  omError("[OM] Failed to set buffering reflection flag", err);
4205
4205
  });
4206
- const asyncOp = this.doAsyncBufferedReflection(record, bufferKey, writer, requestContext, observabilityContext).catch(async (error) => {
4206
+ reflectionHooks?.onReflectionStart?.();
4207
+ const asyncOp = this.doAsyncBufferedReflection(record, bufferKey, writer, requestContext, observabilityContext).then((usage) => {
4208
+ reflectionHooks?.onReflectionEnd?.({ usage });
4209
+ }).catch(async (error) => {
4207
4210
  if (writer) {
4208
4211
  const failedMarker = createBufferingFailedMarker({
4209
4212
  cycleId: `reflect-buf-${Date.now()}-${Math.random().toString(36).slice(2, 11)}`,
@@ -4219,6 +4222,10 @@ var ReflectorRunner = class {
4219
4222
  await this.persistMarkerToStorage(failedMarker, record.threadId ?? "", record.resourceId ?? void 0);
4220
4223
  }
4221
4224
  omError("[OM] Async buffered reflection failed", error);
4225
+ reflectionHooks?.onReflectionEnd?.({
4226
+ usage: void 0,
4227
+ error: error instanceof Error ? error : new Error(String(error))
4228
+ });
4222
4229
  BufferingCoordinator.lastBufferedBoundary.delete(bufferKey);
4223
4230
  }).finally(() => {
4224
4231
  BufferingCoordinator.asyncBufferingOps.delete(bufferKey);
@@ -4312,6 +4319,7 @@ var ReflectorRunner = class {
4312
4319
  });
4313
4320
  await this.persistMarkerToStorage(endMarker, currentRecord.threadId ?? "", currentRecord.resourceId ?? void 0);
4314
4321
  }
4322
+ return reflectResult.usage;
4315
4323
  }
4316
4324
  /**
4317
4325
  * Try to activate buffered reflection when threshold is reached.
@@ -4432,7 +4440,8 @@ ${unreflectedContent}` : freshRecord.bufferedReflection;
4432
4440
  lockKey,
4433
4441
  writer,
4434
4442
  requestContext,
4435
- observabilityContext
4443
+ observabilityContext,
4444
+ reflectionHooks
4436
4445
  );
4437
4446
  }
4438
4447
  }
@@ -4466,7 +4475,8 @@ ${unreflectedContent}` : freshRecord.bufferedReflection;
4466
4475
  lockKey,
4467
4476
  writer,
4468
4477
  requestContext,
4469
- observabilityContext
4478
+ observabilityContext,
4479
+ reflectionHooks
4470
4480
  );
4471
4481
  return;
4472
4482
  }
@@ -4505,6 +4515,8 @@ ${unreflectedContent}` : freshRecord.bufferedReflection;
4505
4515
  recordId: record.id,
4506
4516
  threadId
4507
4517
  } : void 0;
4518
+ let reflectionUsage;
4519
+ let reflectionError;
4508
4520
  try {
4509
4521
  const compressionStartLevel = await this.getCompressionStartLevel(requestContext);
4510
4522
  const reflectResult = await this.call(
@@ -4518,6 +4530,7 @@ ${unreflectedContent}` : freshRecord.bufferedReflection;
4518
4530
  requestContext,
4519
4531
  observabilityContext
4520
4532
  );
4533
+ reflectionUsage = reflectResult.usage;
4521
4534
  const reflectionTokenCount = this.tokenCounter.countObservations(reflectResult.observations);
4522
4535
  await this.storage.createReflectionGeneration({
4523
4536
  currentRecord: record,
@@ -4562,13 +4575,14 @@ ${unreflectedContent}` : freshRecord.bufferedReflection;
4562
4575
  await writer.custom(failedMarker).catch(() => {
4563
4576
  });
4564
4577
  }
4578
+ reflectionError = error instanceof Error ? error : new Error(String(error));
4565
4579
  if (abortSignal?.aborted) {
4566
4580
  throw error;
4567
4581
  }
4568
4582
  omError("[OM] Reflection failed", error);
4569
4583
  } finally {
4570
4584
  await this.storage.setReflectingFlag(record.id, false);
4571
- reflectionHooks?.onReflectionEnd?.();
4585
+ reflectionHooks?.onReflectionEnd?.({ usage: reflectionUsage, error: reflectionError });
4572
4586
  unregisterOp(record.id, "reflecting");
4573
4587
  }
4574
4588
  }
@@ -6244,6 +6258,47 @@ Async buffering is enabled by default \u2014 this opt-out is only needed when us
6244
6258
  }
6245
6259
  }
6246
6260
  }
6261
+ /**
6262
+ * Resolve the effective messageTokens for a record.
6263
+ * Only explicit per-record overrides (stored under `_overrides`) win;
6264
+ * the initial config snapshot written by getOrCreateRecord() is ignored
6265
+ * so that later instance-level changes still take effect.
6266
+ *
6267
+ * Overrides that fall below the instance-level buffering floor
6268
+ * (bufferTokens / absolute bufferActivation) are clamped to the
6269
+ * instance threshold to preserve buffering invariants.
6270
+ */
6271
+ getEffectiveMessageTokens(record) {
6272
+ const overrides = record.config?._overrides;
6273
+ const recordTokens = overrides?.observation?.messageTokens;
6274
+ if (recordTokens) {
6275
+ const maxOverride = getMaxThreshold(recordTokens);
6276
+ const bufferTokens = this.observationConfig.bufferTokens;
6277
+ if (bufferTokens && maxOverride <= bufferTokens) {
6278
+ return this.observationConfig.messageTokens;
6279
+ }
6280
+ const bufferActivation = this.observationConfig.bufferActivation;
6281
+ if (bufferActivation && bufferActivation >= 1e3 && maxOverride <= bufferActivation) {
6282
+ return this.observationConfig.messageTokens;
6283
+ }
6284
+ return recordTokens;
6285
+ }
6286
+ return this.observationConfig.messageTokens;
6287
+ }
6288
+ /**
6289
+ * Resolve the effective reflection observationTokens for a record.
6290
+ * Only explicit per-record overrides (stored under `_overrides`) win;
6291
+ * the initial config snapshot is ignored so instance-level changes
6292
+ * still take effect for existing records.
6293
+ */
6294
+ getEffectiveReflectionTokens(record) {
6295
+ const overrides = record.config?._overrides;
6296
+ const recordTokens = overrides?.reflection?.observationTokens;
6297
+ if (recordTokens) {
6298
+ return recordTokens;
6299
+ }
6300
+ return this.reflectionConfig.observationTokens;
6301
+ }
6247
6302
  /**
6248
6303
  * Check whether the unobserved message tokens meet the observation threshold.
6249
6304
  */
@@ -6251,7 +6306,7 @@ Async buffering is enabled by default \u2014 this opt-out is only needed when us
6251
6306
  const { record, unobservedTokens, extraTokens = 0 } = opts;
6252
6307
  const pendingTokens = (record.pendingMessageTokens ?? 0) + unobservedTokens + extraTokens;
6253
6308
  const currentObservationTokens = record.observationTokenCount ?? 0;
6254
- const threshold = calculateDynamicThreshold(this.observationConfig.messageTokens, currentObservationTokens);
6309
+ const threshold = calculateDynamicThreshold(this.getEffectiveMessageTokens(record), currentObservationTokens);
6255
6310
  return pendingTokens >= threshold;
6256
6311
  }
6257
6312
  /**
@@ -7400,7 +7455,7 @@ ${grouped}` : grouped;
7400
7455
  const projectedMessageRemoval = calculateProjectedMessageRemoval(
7401
7456
  bufferedChunks,
7402
7457
  this.observationConfig.bufferActivation ?? 1,
7403
- getMaxThreshold(this.observationConfig.messageTokens),
7458
+ getMaxThreshold(this.getEffectiveMessageTokens(record)),
7404
7459
  pendingTokens
7405
7460
  );
7406
7461
  let obsBufferStatus = "idle";
@@ -7489,7 +7544,7 @@ ${grouped}` : grouped;
7489
7544
  otherThreadTokens = otherContext ? this.tokenCounter.countString(otherContext) : 0;
7490
7545
  }
7491
7546
  const pendingTokens = Math.max(0, contextWindowTokens + otherThreadTokens);
7492
- const threshold = calculateDynamicThreshold(this.observationConfig.messageTokens, currentObservationTokens);
7547
+ const threshold = calculateDynamicThreshold(this.getEffectiveMessageTokens(record), currentObservationTokens);
7493
7548
  const bufferedChunks = getBufferedChunks(record);
7494
7549
  const bufferedChunkCount = bufferedChunks.length;
7495
7550
  const bufferedChunkTokens = bufferedChunks.reduce((sum, chunk) => sum + (chunk.messageTokens ?? 0), 0);
@@ -7506,11 +7561,12 @@ ${grouped}` : grouped;
7506
7561
  );
7507
7562
  }
7508
7563
  const shouldObserve = pendingTokens >= threshold;
7509
- const reflectThreshold = getMaxThreshold(this.reflectionConfig.observationTokens);
7564
+ const reflectThreshold = getMaxThreshold(this.getEffectiveReflectionTokens(record));
7510
7565
  const shouldReflect = currentObservationTokens >= reflectThreshold;
7511
7566
  const canActivate = bufferedChunkCount > 0;
7512
- const isSharedBudget = typeof this.observationConfig.messageTokens !== "number";
7513
- const totalBudget = isSharedBudget ? this.observationConfig.messageTokens.max : 0;
7567
+ const effectiveMessageTokens = this.getEffectiveMessageTokens(record);
7568
+ const isSharedBudget = typeof effectiveMessageTokens !== "number";
7569
+ const totalBudget = isSharedBudget ? effectiveMessageTokens.max : 0;
7514
7570
  const effectiveObservationTokensThreshold = isSharedBudget ? Math.max(totalBudget - threshold, 1e3) : reflectThreshold;
7515
7571
  const unbufferedPendingTokens = Math.max(0, pendingTokens - bufferedChunkTokens);
7516
7572
  return {
@@ -7825,7 +7881,7 @@ ${grouped}` : grouped;
7825
7881
  if (!freshChunks.length) {
7826
7882
  return { activated: false, record };
7827
7883
  }
7828
- const messageTokensThreshold = getMaxThreshold(this.observationConfig.messageTokens);
7884
+ const messageTokensThreshold = getMaxThreshold(this.getEffectiveMessageTokens(freshRecord));
7829
7885
  const bufferActivation = this.observationConfig.bufferActivation ?? 0.7;
7830
7886
  const activationRatio = resolveActivationRatio(bufferActivation, messageTokensThreshold);
7831
7887
  const totalChunkMessageTokens = freshChunks.reduce((sum, c) => sum + (c.messageTokens ?? 0), 0);
@@ -7913,6 +7969,7 @@ ${grouped}` : grouped;
7913
7969
  const lockKey = this.buffering.getLockKey(threadId, resourceId);
7914
7970
  const reflectionHooks = hooks ? { onReflectionStart: hooks.onReflectionStart, onReflectionEnd: hooks.onReflectionEnd } : void 0;
7915
7971
  let observed = false;
7972
+ let observationUsage;
7916
7973
  let generationBefore = -1;
7917
7974
  await this.withLock(lockKey, async () => {
7918
7975
  const freshRecord = await this.getOrCreateRecord(threadId, resourceId);
@@ -7929,8 +7986,9 @@ ${grouped}` : grouped;
7929
7986
  return;
7930
7987
  }
7931
7988
  hooks?.onObservationStart?.();
7989
+ let observationError;
7932
7990
  try {
7933
- observed = await ObservationStrategy.create(this, {
7991
+ const result = await ObservationStrategy.create(this, {
7934
7992
  record: freshRecord,
7935
7993
  threadId,
7936
7994
  resourceId,
@@ -7940,8 +7998,13 @@ ${grouped}` : grouped;
7940
7998
  writer: opts.writer,
7941
7999
  observabilityContext: opts.observabilityContext
7942
8000
  }).run();
8001
+ observed = result.observed;
8002
+ observationUsage = result.usage;
8003
+ } catch (error) {
8004
+ observationError = error instanceof Error ? error : new Error(String(error));
8005
+ throw error;
7943
8006
  } finally {
7944
- hooks?.onObservationEnd?.();
8007
+ hooks?.onObservationEnd?.({ usage: observationUsage, error: observationError });
7945
8008
  }
7946
8009
  });
7947
8010
  const record = await this.getOrCreateRecord(threadId, resourceId);
@@ -7962,12 +8025,12 @@ ${grouped}` : grouped;
7962
8025
  async reflect(threadId, resourceId, prompt, requestContext, observabilityContext) {
7963
8026
  const record = await this.getOrCreateRecord(threadId, resourceId);
7964
8027
  if (!record.activeObservations) {
7965
- return { reflected: false, record };
8028
+ return { reflected: false, record, usage: void 0 };
7966
8029
  }
7967
8030
  await this.storage.setReflectingFlag(record.id, true);
7968
8031
  registerOp(record.id, "reflecting");
7969
8032
  try {
7970
- const reflectThreshold = getMaxThreshold(this.reflectionConfig.observationTokens);
8033
+ const reflectThreshold = getMaxThreshold(this.getEffectiveReflectionTokens(record));
7971
8034
  const reflectResult = await this.reflector.call(
7972
8035
  record.activeObservations,
7973
8036
  prompt,
@@ -7987,11 +8050,11 @@ ${grouped}` : grouped;
7987
8050
  tokenCount: reflectionTokenCount
7988
8051
  });
7989
8052
  const updatedRecord = await this.getOrCreateRecord(threadId, resourceId);
7990
- return { reflected: true, record: updatedRecord };
8053
+ return { reflected: true, record: updatedRecord, usage: reflectResult.usage };
7991
8054
  } catch (error) {
7992
8055
  omError("[OM] reflect() failed", error);
7993
8056
  const latestRecord = await this.getOrCreateRecord(threadId, resourceId);
7994
- return { reflected: false, record: latestRecord };
8057
+ return { reflected: false, record: latestRecord, usage: void 0 };
7995
8058
  } finally {
7996
8059
  await this.storage.setReflectingFlag(record.id, false);
7997
8060
  unregisterOp(record.id, "reflecting");
@@ -8012,12 +8075,40 @@ ${grouped}` : grouped;
8012
8075
  const ids = this.getStorageIds(threadId, resourceId);
8013
8076
  return this.storage.getObservationalMemory(ids.threadId, ids.resourceId);
8014
8077
  }
8078
+ /**
8079
+ * Update per-record config overrides for observation and/or reflection thresholds.
8080
+ * The provided config is deep-merged into the record's `_overrides` key,
8081
+ * so you only need to specify the fields you want to change.
8082
+ *
8083
+ * Overrides that violate buffering invariants (e.g. messageTokens below
8084
+ * bufferTokens) are silently ignored at read time — the helpers fall back
8085
+ * to the instance-level config.
8086
+ *
8087
+ * @example
8088
+ * ```ts
8089
+ * await om.updateRecordConfig('thread-1', undefined, {
8090
+ * observation: { messageTokens: 2000 },
8091
+ * reflection: { observationTokens: 8000 },
8092
+ * });
8093
+ * ```
8094
+ */
8095
+ async updateRecordConfig(threadId, resourceId, config) {
8096
+ const ids = this.getStorageIds(threadId, resourceId);
8097
+ const record = await this.storage.getObservationalMemory(ids.threadId, ids.resourceId);
8098
+ if (!record) {
8099
+ throw new Error(`No observational memory record found for thread ${ids.threadId}`);
8100
+ }
8101
+ await this.storage.updateObservationalMemoryConfig({
8102
+ id: record.id,
8103
+ config: { _overrides: config }
8104
+ });
8105
+ }
8015
8106
  /**
8016
8107
  * Get observation history (previous generations)
8017
8108
  */
8018
- async getHistory(threadId, resourceId, limit) {
8109
+ async getHistory(threadId, resourceId, limit, options) {
8019
8110
  const ids = this.getStorageIds(threadId, resourceId);
8020
- return this.storage.getObservationalMemoryHistory(ids.threadId, ids.resourceId, limit);
8111
+ return this.storage.getObservationalMemoryHistory(ids.threadId, ids.resourceId, limit, options);
8021
8112
  }
8022
8113
  /**
8023
8114
  * Clear all memory for a specific thread/resource
@@ -8563,5 +8654,5 @@ function getObservationsAsOf(activeObservations, asOf) {
8563
8654
  }
8564
8655
 
8565
8656
  export { ModelByInputTokens, OBSERVER_SYSTEM_PROMPT, ObservationalMemory, ObservationalMemoryProcessor, TokenCounter, buildObserverPrompt, buildObserverSystemPrompt, combineObservationGroupRanges, deriveObservationGroupProvenance, extractCurrentTask, formatMessagesForObserver, formatToolResultForObserver, getObservationsAsOf, hasCurrentTaskSection, injectAnchorIds, optimizeObservationsForContext, parseAnchorId, parseObservationGroups, parseObserverOutput, reconcileObservationGroupsFromReflection, renderObservationGroupsForReflection, resolveToolResultValue, stripEphemeralAnchorIds, stripObservationGroups, truncateStringByTokens, wrapInObservationGroup };
8566
- //# sourceMappingURL=chunk-C7PARRAD.js.map
8567
- //# sourceMappingURL=chunk-C7PARRAD.js.map
8657
+ //# sourceMappingURL=chunk-GXDPND6K.js.map
8658
+ //# sourceMappingURL=chunk-GXDPND6K.js.map