@mastra/memory 1.16.0-alpha.0 → 1.16.0-alpha.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (37) hide show
  1. package/CHANGELOG.md +42 -0
  2. package/dist/{chunk-3NECGYWZ.cjs → chunk-EOHXGI4J.cjs} +149 -14
  3. package/dist/chunk-EOHXGI4J.cjs.map +1 -0
  4. package/dist/{chunk-HB6AYAFD.js → chunk-MGHUIRKN.js} +149 -14
  5. package/dist/chunk-MGHUIRKN.js.map +1 -0
  6. package/dist/docs/SKILL.md +1 -1
  7. package/dist/docs/assets/SOURCE_MAP.json +47 -47
  8. package/dist/docs/references/docs-memory-observational-memory.md +4 -0
  9. package/dist/index.cjs +15 -14
  10. package/dist/index.cjs.map +1 -1
  11. package/dist/index.d.ts +1 -0
  12. package/dist/index.d.ts.map +1 -1
  13. package/dist/index.js +6 -5
  14. package/dist/index.js.map +1 -1
  15. package/dist/{observational-memory-X4N2R4CA.cjs → observational-memory-FCDIQ3SX.cjs} +26 -26
  16. package/dist/{observational-memory-X4N2R4CA.cjs.map → observational-memory-FCDIQ3SX.cjs.map} +1 -1
  17. package/dist/{observational-memory-WWAB2MMI.js → observational-memory-WJ4BDRUP.js} +3 -3
  18. package/dist/{observational-memory-WWAB2MMI.js.map → observational-memory-WJ4BDRUP.js.map} +1 -1
  19. package/dist/processors/index.cjs +24 -24
  20. package/dist/processors/index.js +1 -1
  21. package/dist/processors/observational-memory/markers.d.ts +3 -1
  22. package/dist/processors/observational-memory/markers.d.ts.map +1 -1
  23. package/dist/processors/observational-memory/model-context.d.ts +10 -0
  24. package/dist/processors/observational-memory/model-context.d.ts.map +1 -0
  25. package/dist/processors/observational-memory/observation-turn/step.d.ts.map +1 -1
  26. package/dist/processors/observational-memory/observation-turn/turn.d.ts +3 -0
  27. package/dist/processors/observational-memory/observation-turn/turn.d.ts.map +1 -1
  28. package/dist/processors/observational-memory/observational-memory.d.ts +9 -1
  29. package/dist/processors/observational-memory/observational-memory.d.ts.map +1 -1
  30. package/dist/processors/observational-memory/processor.d.ts.map +1 -1
  31. package/dist/processors/observational-memory/reflector-runner.d.ts +5 -2
  32. package/dist/processors/observational-memory/reflector-runner.d.ts.map +1 -1
  33. package/dist/processors/observational-memory/types.d.ts +19 -2
  34. package/dist/processors/observational-memory/types.d.ts.map +1 -1
  35. package/package.json +4 -4
  36. package/dist/chunk-3NECGYWZ.cjs.map +0 -1
  37. package/dist/chunk-HB6AYAFD.js.map +0 -1
package/CHANGELOG.md CHANGED
@@ -1,5 +1,47 @@
1
1
  # @mastra/memory
2
2
 
3
+ ## 1.16.0-alpha.2
4
+
5
+ ### Minor Changes
6
+
7
+ - Added `activateOnProviderChange` so observational memory can activate buffered observations and reflections before switching to a different provider or model. ([#15420](https://github.com/mastra-ai/mastra/pull/15420))
8
+
9
+ ```ts
10
+ const memory = new Memory({
11
+ options: {
12
+ observationalMemory: {
13
+ model: 'google/gemini-2.5-flash',
14
+ activateOnProviderChange: true,
15
+ },
16
+ },
17
+ });
18
+ ```
19
+
20
+ This helps keep prompt-cache savings when the next step cannot reuse the previous provider's cache.
21
+
22
+ ### Patch Changes
23
+
24
+ - Fixed early observational memory activations so buffered reflections are only activated when they still leave a healthy active observation set. ([#15462](https://github.com/mastra-ai/mastra/pull/15462))
25
+
26
+ Before this change, idle-timeout (`activateAfterIdle`) and model/provider-change (`activateOnProviderChange`) activations could swap in a buffered reflection too early. In bad cases, that replaced a large raw observation tail with a much smaller mostly-compressed result, which hurt reflection quality.
27
+
28
+ Early activations now stay buffered unless both of these checks pass:
29
+ - The unreflected observation tail is at least as large as the buffered reflection, so the activated result is not dominated by compressed content.
30
+ - The combined post-activation size is at least 75% of what a normal threshold activation would produce, so early activations do not cliff far below the regular target.
31
+
32
+ This update also fixes false `provider_change` activations when older persisted messages only contain a bare model id like `gpt-5.4` while newer turns use the fully qualified `provider/modelId` form.
33
+
34
+ - Updated dependencies [[`0474c2b`](https://github.com/mastra-ai/mastra/commit/0474c2b2e7c7e1ad8691dca031284841391ff1ef), [`f607106`](https://github.com/mastra-ai/mastra/commit/f607106854c6416c4a07d4082604b9f66d047221), [`62919a6`](https://github.com/mastra-ai/mastra/commit/62919a6ee0fbf3779ad21a97b1ec6696515d5104), [`0fd90a2`](https://github.com/mastra-ai/mastra/commit/0fd90a215caf5fca8099c15a67ca03e4427747a3)]:
35
+ - @mastra/core@1.26.0-alpha.4
36
+
37
+ ## 1.16.0-alpha.1
38
+
39
+ ### Patch Changes
40
+
41
+ - Updated dependencies [[`fdd54cf`](https://github.com/mastra-ai/mastra/commit/fdd54cf612a9af876e9fdd85e534454f6e7dd518), [`7db42a9`](https://github.com/mastra-ai/mastra/commit/7db42a9cccd3b29c44fb0731f792c51575e8421c), [`30456b6`](https://github.com/mastra-ai/mastra/commit/30456b6b08c8fd17e109dd093b73d93b65e83bc5), [`9d11a8c`](https://github.com/mastra-ai/mastra/commit/9d11a8c1c8924eb975a245a5884d40ca1b7e0491), [`d246696`](https://github.com/mastra-ai/mastra/commit/d246696139a3144a5b21b042d41c532688e957e1), [`354f9ce`](https://github.com/mastra-ai/mastra/commit/354f9ce1ca6af2074b6a196a23f8ec30012dccca), [`e9837b5`](https://github.com/mastra-ai/mastra/commit/e9837b53699e18711b09e0ca010a4106376f2653)]:
42
+ - @mastra/core@1.26.0-alpha.3
43
+ - @mastra/schema-compat@1.2.9-alpha.1
44
+
3
45
  ## 1.16.0-alpha.0
4
46
 
5
47
  ### Minor Changes
@@ -208,6 +208,19 @@ var BufferingCoordinator = class _BufferingCoordinator {
208
208
  }
209
209
  };
210
210
 
211
+ // src/processors/observational-memory/model-context.ts
212
+ function didProviderChange(actorModel, lastModel) {
213
+ if (actorModel === void 0 || lastModel === void 0) return false;
214
+ const actorHasSlash = actorModel.includes("/");
215
+ const lastHasSlash = lastModel.includes("/");
216
+ if (actorHasSlash && lastHasSlash) {
217
+ return actorModel !== lastModel;
218
+ }
219
+ const actorModelId = actorHasSlash ? actorModel.slice(actorModel.indexOf("/") + 1) : actorModel;
220
+ const lastModelId = lastHasSlash ? lastModel.slice(lastModel.indexOf("/") + 1) : lastModel;
221
+ return actorModelId !== lastModelId;
222
+ }
223
+
211
224
  // src/processors/observational-memory/date-utils.ts
212
225
  function formatRelativeTime(date, currentDate) {
213
226
  const diffMs = currentDate.getTime() - date.getTime();
@@ -494,7 +507,9 @@ function createActivationMarker(params) {
494
507
  observations: params.observations,
495
508
  triggeredBy: params.triggeredBy,
496
509
  lastActivityAt: params.lastActivityAt,
497
- ttlExpiredMs: params.ttlExpiredMs
510
+ ttlExpiredMs: params.ttlExpiredMs,
511
+ previousModel: params.previousModel,
512
+ currentModel: params.currentModel
498
513
  }
499
514
  };
500
515
  }
@@ -1989,6 +2004,7 @@ var ObservationStep = class {
1989
2004
  resourceId,
1990
2005
  checkThreshold: true,
1991
2006
  messages: step0Messages,
2007
+ currentModel: this.turn.actorModelContext,
1992
2008
  writer: this.turn.writer,
1993
2009
  messageList
1994
2010
  });
@@ -2012,6 +2028,8 @@ var ObservationStep = class {
2012
2028
  observationTokens: obsTokens,
2013
2029
  threadId,
2014
2030
  writer: this.turn.writer,
2031
+ messageList,
2032
+ currentModel: this.turn.actorModelContext,
2015
2033
  requestContext: this.turn.requestContext,
2016
2034
  observabilityContext: this.turn.observabilityContext,
2017
2035
  lastActivityAt: getLastActivityFromMessages(messageList.get.all.db())
@@ -2178,6 +2196,7 @@ var ObservationStep = class {
2178
2196
  threadId,
2179
2197
  resourceId,
2180
2198
  messages: messageList.get.all.db(),
2199
+ currentModel: this.turn.actorModelContext,
2181
2200
  writer: this.turn.writer,
2182
2201
  messageList
2183
2202
  });
@@ -2189,6 +2208,7 @@ var ObservationStep = class {
2189
2208
  threadId,
2190
2209
  writer: this.turn.writer,
2191
2210
  messageList,
2211
+ currentModel: this.turn.actorModelContext,
2192
2212
  requestContext: this.turn.requestContext,
2193
2213
  observabilityContext: this.turn.observabilityContext,
2194
2214
  lastActivityAt: getLastActivityFromMessages(messageList.get.all.db())
@@ -2233,6 +2253,8 @@ var ObservationTurn = class {
2233
2253
  requestContext;
2234
2254
  /** Optional observability context for nested OM spans. */
2235
2255
  observabilityContext;
2256
+ /** Current actor model for this step. Updated by the processor before prepare(). */
2257
+ actorModelContext;
2236
2258
  /** Optional processor-provided hooks for turn/step lifecycle integration. */
2237
2259
  hooks;
2238
2260
  constructor(opts) {
@@ -4101,12 +4123,44 @@ function validateCompression(reflectedTokens, targetThreshold) {
4101
4123
  }
4102
4124
 
4103
4125
  // src/processors/observational-memory/reflector-runner.ts
4126
+ function formatModelContext(provider, modelId) {
4127
+ if (provider && modelId) {
4128
+ return `${provider}/${modelId}`;
4129
+ }
4130
+ return modelId;
4131
+ }
4132
+ function getCurrentModel(model) {
4133
+ return formatModelContext(model?.provider, model?.modelId);
4134
+ }
4135
+ function getLastModelFromMessageList(messageList) {
4136
+ const messages = messageList?.get.all.db();
4137
+ if (!messages) return void 0;
4138
+ for (let i = messages.length - 1; i >= 0; i--) {
4139
+ const message = messages[i];
4140
+ if (!message || message.role !== "assistant" || !message.content || typeof message.content === "string") {
4141
+ continue;
4142
+ }
4143
+ for (let j = message.content.parts.length - 1; j >= 0; j--) {
4144
+ const part = message.content.parts[j];
4145
+ if (part?.type === "step-start" && typeof part.model === "string" && part.model.length > 0) {
4146
+ return part.model;
4147
+ }
4148
+ }
4149
+ const metadata = message.content.metadata;
4150
+ const model = formatModelContext(metadata?.provider, metadata?.modelId);
4151
+ if (model) {
4152
+ return model;
4153
+ }
4154
+ }
4155
+ return void 0;
4156
+ }
4104
4157
  async function withAbortCheck(fn, abortSignal) {
4105
4158
  if (abortSignal?.aborted) throw new Error("The operation was aborted.");
4106
4159
  const result = await fn();
4107
4160
  if (abortSignal?.aborted) throw new Error("The operation was aborted.");
4108
4161
  return result;
4109
4162
  }
4163
+ var EARLY_ACTIVATION_SIZE_FLOOR_RATIO = 0.75;
4110
4164
  var ReflectorRunner = class {
4111
4165
  reflectionConfig;
4112
4166
  observationConfig;
@@ -4431,7 +4485,9 @@ var ReflectorRunner = class {
4431
4485
  }
4432
4486
  /**
4433
4487
  * Try to activate buffered reflection when threshold is reached.
4434
- * Returns true if activation succeeded.
4488
+ * Returns a discriminated result so the caller can distinguish between
4489
+ * "activated", "no buffer present", and "suppressed by overshoot guard"
4490
+ * without re-deriving that state.
4435
4491
  */
4436
4492
  async tryActivateBufferedReflection(record, lockKey, writer, messageList, activationMetadata) {
4437
4493
  const bufferKey = this.buffering.getReflectionBufferKey(lockKey);
@@ -4454,8 +4510,8 @@ var ReflectorRunner = class {
4454
4510
  `[OM:reflect] tryActivateBufferedReflection: freshRecord.id=${freshRecord?.id}, freshBufferedReflection=${freshRecord?.bufferedReflection ? "present (" + freshRecord.bufferedReflection.length + " chars)" : "empty"}, freshObsTokens=${freshRecord?.observationTokenCount}`
4455
4511
  );
4456
4512
  if (!freshRecord?.bufferedReflection) {
4457
- omDebug(`[OM:reflect] tryActivateBufferedReflection: no buffered reflection after re-fetch, returning false`);
4458
- return false;
4513
+ omDebug(`[OM:reflect] tryActivateBufferedReflection: no buffered reflection after re-fetch`);
4514
+ return { status: "no-buffer" };
4459
4515
  }
4460
4516
  const beforeTokens = freshRecord.observationTokenCount ?? 0;
4461
4517
  const reflectedLineCount = freshRecord.reflectedObservationLineCount ?? 0;
@@ -4467,6 +4523,26 @@ var ReflectorRunner = class {
4467
4523
 
4468
4524
  ${unreflectedContent}` : freshRecord.bufferedReflection;
4469
4525
  const combinedTokenCount = this.tokenCounter.countObservations(combinedObservations);
4526
+ if (activationMetadata?.triggeredBy === "ttl" || activationMetadata?.triggeredBy === "provider_change") {
4527
+ const unreflectedTailTokens = unreflectedContent ? this.tokenCounter.countObservations(unreflectedContent) : 0;
4528
+ const bufferedReflectionTokens = freshRecord.bufferedReflectionTokens ?? 0;
4529
+ if (unreflectedTailTokens < bufferedReflectionTokens) {
4530
+ omDebug(
4531
+ `[OM:reflect] tryActivateBufferedReflection: suppressing early ${activationMetadata.triggeredBy} activation \u2014 unreflectedTailTokens=${unreflectedTailTokens} < bufferedReflectionTokens=${bufferedReflectionTokens}; keeping buffer for threshold activation`
4532
+ );
4533
+ return { status: "suppressed", reason: "composition" };
4534
+ }
4535
+ const bufferActivation = this.reflectionConfig.bufferActivation;
4536
+ const reflectThreshold = getMaxThreshold(this.getEffectiveReflectionTokens(freshRecord));
4537
+ const regularActivationTarget = reflectThreshold * (1 - bufferActivation);
4538
+ const minCombinedTokens = Math.round(regularActivationTarget * EARLY_ACTIVATION_SIZE_FLOOR_RATIO);
4539
+ if (combinedTokenCount < minCombinedTokens) {
4540
+ omDebug(
4541
+ `[OM:reflect] tryActivateBufferedReflection: suppressing early ${activationMetadata.triggeredBy} activation \u2014 combinedTokenCount=${combinedTokenCount} < minCombinedTokens=${minCombinedTokens} (${EARLY_ACTIVATION_SIZE_FLOOR_RATIO * 100}% of regular activation target ${Math.round(regularActivationTarget)}, threshold=${reflectThreshold}, bufferActivation=${bufferActivation}); keeping buffer for threshold activation`
4542
+ );
4543
+ return { status: "suppressed", reason: "size" };
4544
+ }
4545
+ }
4470
4546
  omDebug(
4471
4547
  `[OM:reflect] tryActivateBufferedReflection: activating, beforeTokens=${beforeTokens}, combinedTokenCount=${combinedTokenCount}, reflectedLineCount=${reflectedLineCount}, unreflectedLines=${unreflectedLines.length}`
4472
4548
  );
@@ -4496,6 +4572,8 @@ ${unreflectedContent}` : freshRecord.bufferedReflection;
4496
4572
  triggeredBy: activationMetadata?.triggeredBy,
4497
4573
  lastActivityAt: activationMetadata?.lastActivityAt,
4498
4574
  ttlExpiredMs: activationMetadata?.ttlExpiredMs,
4575
+ previousModel: activationMetadata?.previousModel,
4576
+ currentModel: activationMetadata?.currentModel,
4499
4577
  config: this.getObservationMarkerConfig(freshRecord)
4500
4578
  });
4501
4579
  void writer.custom(activationMarker).catch(() => {
@@ -4508,7 +4586,7 @@ ${unreflectedContent}` : freshRecord.bufferedReflection;
4508
4586
  );
4509
4587
  }
4510
4588
  BufferingCoordinator.reflectionBufferCycleIds.delete(bufferKey);
4511
- return true;
4589
+ return { status: "activated" };
4512
4590
  }
4513
4591
  /**
4514
4592
  * Check if reflection needed and trigger if so.
@@ -4522,6 +4600,7 @@ ${unreflectedContent}` : freshRecord.bufferedReflection;
4522
4600
  writer,
4523
4601
  abortSignal,
4524
4602
  messageList,
4603
+ currentModel,
4525
4604
  reflectionHooks,
4526
4605
  requestContext,
4527
4606
  observabilityContext,
@@ -4561,13 +4640,19 @@ ${unreflectedContent}` : freshRecord.bufferedReflection;
4561
4640
  const activateAfterIdle = this.reflectionConfig.activateAfterIdle;
4562
4641
  const ttlExpiredMs = activateAfterIdle !== void 0 && lastActivityAt !== void 0 ? Date.now() - lastActivityAt : void 0;
4563
4642
  const ttlExpired = ttlExpiredMs !== void 0 && activateAfterIdle !== void 0 && ttlExpiredMs >= activateAfterIdle;
4564
- if (observationTokens < reflectThreshold && !ttlExpired) {
4643
+ const actorModel = getCurrentModel(currentModel);
4644
+ const lastModel = getLastModelFromMessageList(messageList);
4645
+ const providerChanged = this.reflectionConfig.activateOnProviderChange === true && didProviderChange(actorModel, lastModel);
4646
+ if (observationTokens < reflectThreshold && !ttlExpired && !providerChanged) {
4565
4647
  return;
4566
4648
  }
4649
+ const activationTriggeredBy = observationTokens >= reflectThreshold ? "threshold" : providerChanged ? "provider_change" : "ttl";
4567
4650
  const activationMetadata = {
4568
- triggeredBy: ttlExpired ? "ttl" : "threshold",
4569
- lastActivityAt: ttlExpired ? lastActivityAt : void 0,
4570
- ttlExpiredMs: ttlExpired ? ttlExpiredMs : void 0
4651
+ triggeredBy: activationTriggeredBy,
4652
+ lastActivityAt: activationTriggeredBy === "ttl" ? lastActivityAt : void 0,
4653
+ ttlExpiredMs: activationTriggeredBy === "ttl" ? ttlExpiredMs : void 0,
4654
+ previousModel: activationTriggeredBy === "provider_change" ? lastModel : void 0,
4655
+ currentModel: activationTriggeredBy === "provider_change" ? actorModel : void 0
4571
4656
  };
4572
4657
  if (record.isReflecting) {
4573
4658
  if (isOpActiveInProcess(record.id, "reflecting")) {
@@ -4578,14 +4663,20 @@ ${unreflectedContent}` : freshRecord.bufferedReflection;
4578
4663
  await this.storage.setReflectingFlag(record.id, false);
4579
4664
  }
4580
4665
  if (this.buffering.isAsyncReflectionEnabled()) {
4581
- const activationSuccess = await this.tryActivateBufferedReflection(
4666
+ const activationResult = await this.tryActivateBufferedReflection(
4582
4667
  record,
4583
4668
  lockKey,
4584
4669
  writer,
4585
4670
  messageList,
4586
4671
  activationMetadata
4587
4672
  );
4588
- if (activationSuccess) {
4673
+ if (activationResult.status === "activated") {
4674
+ return;
4675
+ }
4676
+ if (activationResult.status === "suppressed") {
4677
+ omDebug(
4678
+ `[OM:reflect] skipping sync fallback / re-buffer after suppressed early ${activationMetadata.triggeredBy} activation (reason=${activationResult.reason})`
4679
+ );
4589
4680
  return;
4590
4681
  }
4591
4682
  if (this.reflectionConfig.blockAfter && observationTokens >= this.reflectionConfig.blockAfter) {
@@ -6286,6 +6377,36 @@ function getLastActivityFromMessages(messages) {
6286
6377
  }
6287
6378
  return void 0;
6288
6379
  }
6380
+ function formatModelContext2(provider, modelId) {
6381
+ if (provider && modelId) {
6382
+ return `${provider}/${modelId}`;
6383
+ }
6384
+ return modelId;
6385
+ }
6386
+ function getLastModelFromMessages(messages) {
6387
+ if (!messages) return void 0;
6388
+ for (let i = messages.length - 1; i >= 0; i--) {
6389
+ const message = messages[i];
6390
+ if (!message || message.role !== "assistant" || !message.content || typeof message.content === "string") {
6391
+ continue;
6392
+ }
6393
+ for (let j = message.content.parts.length - 1; j >= 0; j--) {
6394
+ const part = message.content.parts[j];
6395
+ if (part?.type === "step-start" && typeof part.model === "string" && part.model.length > 0) {
6396
+ return part.model;
6397
+ }
6398
+ }
6399
+ const metadata = message.content.metadata;
6400
+ const model = formatModelContext2(metadata?.provider, metadata?.modelId);
6401
+ if (model) {
6402
+ return model;
6403
+ }
6404
+ }
6405
+ return void 0;
6406
+ }
6407
+ function getCurrentModel2(model) {
6408
+ return formatModelContext2(model?.provider, model?.modelId);
6409
+ }
6289
6410
  function parseActivationTTL(value, fieldPath) {
6290
6411
  if (value === void 0) {
6291
6412
  return void 0;
@@ -6462,6 +6583,7 @@ Async buffering is enabled by default \u2014 this opt-out is only needed when us
6462
6583
  ),
6463
6584
  bufferActivation: asyncBufferingDisabled ? void 0 : config.observation?.bufferActivation ?? chunkD4J4XPGM_cjs.OBSERVATIONAL_MEMORY_DEFAULTS.observation.bufferActivation,
6464
6585
  activateAfterIdle: parseActivationTTL(config.activateAfterIdle, "activateAfterIdle"),
6586
+ activateOnProviderChange: config.activateOnProviderChange ?? false,
6465
6587
  blockAfter: asyncBufferingDisabled ? void 0 : resolveBlockAfter(
6466
6588
  config.observation?.blockAfter ?? (config.observation?.bufferTokens ?? chunkD4J4XPGM_cjs.OBSERVATIONAL_MEMORY_DEFAULTS.observation.bufferTokens ? 1.2 : void 0),
6467
6589
  config.observation?.messageTokens ?? chunkD4J4XPGM_cjs.OBSERVATIONAL_MEMORY_DEFAULTS.observation.messageTokens
@@ -6481,6 +6603,7 @@ Async buffering is enabled by default \u2014 this opt-out is only needed when us
6481
6603
  providerOptions: config.reflection?.providerOptions ?? chunkD4J4XPGM_cjs.OBSERVATIONAL_MEMORY_DEFAULTS.reflection.providerOptions,
6482
6604
  bufferActivation: asyncBufferingDisabled ? void 0 : config?.reflection?.bufferActivation ?? chunkD4J4XPGM_cjs.OBSERVATIONAL_MEMORY_DEFAULTS.reflection.bufferActivation,
6483
6605
  activateAfterIdle: parseActivationTTL(config.activateAfterIdle, "activateAfterIdle"),
6606
+ activateOnProviderChange: config.activateOnProviderChange ?? false,
6484
6607
  blockAfter: asyncBufferingDisabled ? void 0 : resolveBlockAfter(
6485
6608
  config.reflection?.blockAfter ?? (config.reflection?.bufferActivation ?? chunkD4J4XPGM_cjs.OBSERVATIONAL_MEMORY_DEFAULTS.reflection.bufferActivation ? 1.2 : void 0),
6486
6609
  config.reflection?.observationTokens ?? chunkD4J4XPGM_cjs.OBSERVATIONAL_MEMORY_DEFAULTS.reflection.observationTokens
@@ -8377,6 +8500,8 @@ ${grouped}` : grouped;
8377
8500
  let activationTriggeredBy = "threshold";
8378
8501
  let activationLastActivityAt;
8379
8502
  let activateAfterIdleExpiredMs;
8503
+ let previousModel;
8504
+ let currentModel;
8380
8505
  if (opts.checkThreshold) {
8381
8506
  const thresholdMessages = opts.messages ?? await this.loadMessagesFromStorage(
8382
8507
  threadId,
@@ -8387,7 +8512,14 @@ ${grouped}` : grouped;
8387
8512
  const lastActivityAt = getLastActivityFromMessages(thresholdMessages);
8388
8513
  const ttlExpiredMs = activateAfterIdle !== void 0 && lastActivityAt !== void 0 ? Date.now() - lastActivityAt : void 0;
8389
8514
  const ttlExpired = ttlExpiredMs !== void 0 && activateAfterIdle !== void 0 && ttlExpiredMs >= activateAfterIdle;
8390
- if (ttlExpired) {
8515
+ const actorModel = getCurrentModel2(opts.currentModel);
8516
+ const lastModel = getLastModelFromMessages(thresholdMessages);
8517
+ const providerChanged = this.observationConfig.activateOnProviderChange === true && didProviderChange(actorModel, lastModel);
8518
+ if (providerChanged) {
8519
+ activationTriggeredBy = "provider_change";
8520
+ previousModel = lastModel;
8521
+ currentModel = actorModel;
8522
+ } else if (ttlExpired) {
8391
8523
  activationTriggeredBy = "ttl";
8392
8524
  activationLastActivityAt = lastActivityAt;
8393
8525
  activateAfterIdleExpiredMs = ttlExpiredMs;
@@ -8456,6 +8588,8 @@ ${grouped}` : grouped;
8456
8588
  triggeredBy: activationTriggeredBy,
8457
8589
  lastActivityAt: activationLastActivityAt,
8458
8590
  ttlExpiredMs: activateAfterIdleExpiredMs,
8591
+ previousModel,
8592
+ currentModel,
8459
8593
  config: this.getObservationMarkerConfig()
8460
8594
  });
8461
8595
  void opts.writer.custom(activationMarker).catch(() => {
@@ -8813,6 +8947,7 @@ var ObservationalMemoryProcessor = class {
8813
8947
  const observabilityContext = getOmObservabilityContext(args);
8814
8948
  state.__omObservabilityContext = observabilityContext;
8815
8949
  this.turn.observabilityContext = observabilityContext;
8950
+ this.turn.actorModelContext = actorModelContext;
8816
8951
  {
8817
8952
  const step = this.turn.step(stepNumber);
8818
8953
  let ctx;
@@ -8971,5 +9106,5 @@ exports.stripEphemeralAnchorIds = stripEphemeralAnchorIds;
8971
9106
  exports.stripObservationGroups = stripObservationGroups;
8972
9107
  exports.truncateStringByTokens = truncateStringByTokens;
8973
9108
  exports.wrapInObservationGroup = wrapInObservationGroup;
8974
- //# sourceMappingURL=chunk-3NECGYWZ.cjs.map
8975
- //# sourceMappingURL=chunk-3NECGYWZ.cjs.map
9109
+ //# sourceMappingURL=chunk-EOHXGI4J.cjs.map
9110
+ //# sourceMappingURL=chunk-EOHXGI4J.cjs.map