@mastra/memory 1.0.1 → 1.1.0-alpha.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (38) hide show
  1. package/CHANGELOG.md +60 -0
  2. package/dist/chunk-6TXUWFIU.js +3188 -0
  3. package/dist/chunk-6TXUWFIU.js.map +1 -0
  4. package/dist/chunk-FQJWVCDF.cjs +3205 -0
  5. package/dist/chunk-FQJWVCDF.cjs.map +1 -0
  6. package/dist/docs/README.md +1 -1
  7. package/dist/docs/SKILL.md +12 -1
  8. package/dist/docs/SOURCE_MAP.json +62 -2
  9. package/dist/docs/memory/02-storage.md +10 -0
  10. package/dist/index.cjs +108 -5
  11. package/dist/index.cjs.map +1 -1
  12. package/dist/index.d.ts +62 -1
  13. package/dist/index.d.ts.map +1 -1
  14. package/dist/index.js +108 -5
  15. package/dist/index.js.map +1 -1
  16. package/dist/observational-memory-3Q42SITP.cjs +52 -0
  17. package/dist/observational-memory-3Q42SITP.cjs.map +1 -0
  18. package/dist/observational-memory-VXLHOSDZ.js +3 -0
  19. package/dist/observational-memory-VXLHOSDZ.js.map +1 -0
  20. package/dist/processors/index.cjs +52 -0
  21. package/dist/processors/index.cjs.map +1 -0
  22. package/dist/processors/index.d.ts +2 -0
  23. package/dist/processors/index.d.ts.map +1 -0
  24. package/dist/processors/index.js +3 -0
  25. package/dist/processors/index.js.map +1 -0
  26. package/dist/processors/observational-memory/index.d.ts +18 -0
  27. package/dist/processors/observational-memory/index.d.ts.map +1 -0
  28. package/dist/processors/observational-memory/observational-memory.d.ts +579 -0
  29. package/dist/processors/observational-memory/observational-memory.d.ts.map +1 -0
  30. package/dist/processors/observational-memory/observer-agent.d.ts +117 -0
  31. package/dist/processors/observational-memory/observer-agent.d.ts.map +1 -0
  32. package/dist/processors/observational-memory/reflector-agent.d.ts +46 -0
  33. package/dist/processors/observational-memory/reflector-agent.d.ts.map +1 -0
  34. package/dist/processors/observational-memory/token-counter.d.ts +30 -0
  35. package/dist/processors/observational-memory/token-counter.d.ts.map +1 -0
  36. package/dist/processors/observational-memory/types.d.ts +288 -0
  37. package/dist/processors/observational-memory/types.d.ts.map +1 -0
  38. package/package.json +15 -5
@@ -33,4 +33,4 @@ docs/
33
33
  ## Version
34
34
 
35
35
  Package: @mastra/memory
36
- Version: 1.0.1
36
+ Version: 1.1.0-alpha.1
@@ -5,7 +5,7 @@ description: Documentation for @mastra/memory. Includes links to type definition
5
5
 
6
6
  # @mastra/memory Documentation
7
7
 
8
- > **Version**: 1.0.1
8
+ > **Version**: 1.1.0-alpha.1
9
9
  > **Package**: @mastra/memory
10
10
 
11
11
  ## Quick Navigation
@@ -23,6 +23,17 @@ Each export maps to:
23
23
 
24
24
  ## Top Exports
25
25
 
26
+ - OBSERVATIONAL_MEMORY_DEFAULTS: dist/processors/index.d.ts
27
+ - OBSERVER_SYSTEM_PROMPT: dist/processors/index.d.ts
28
+ - ObservationalMemory: dist/processors/index.d.ts
29
+ - TokenCounter: dist/processors/index.d.ts
30
+ - buildObserverPrompt: dist/processors/index.d.ts
31
+ - buildObserverSystemPrompt: dist/processors/index.d.ts
32
+ - extractCurrentTask: dist/processors/index.d.ts
33
+ - formatMessagesForObserver: dist/processors/index.d.ts
34
+ - hasCurrentTaskSection: dist/processors/index.d.ts
35
+ - optimizeObservationsForContext: dist/processors/index.d.ts
36
+ - parseObserverOutput: dist/processors/index.d.ts
26
37
  - extractWorkingMemoryContent: dist/index.d.ts
27
38
  - extractWorkingMemoryTags: dist/index.d.ts
28
39
  - removeWorkingMemoryTags: dist/index.d.ts
@@ -1,7 +1,60 @@
1
1
  {
2
- "version": "1.0.1",
2
+ "version": "1.1.0-alpha.1",
3
3
  "package": "@mastra/memory",
4
4
  "exports": {
5
+ "OBSERVATIONAL_MEMORY_DEFAULTS": {
6
+ "types": "dist/processors/index.d.ts",
7
+ "implementation": "dist/chunk-6TXUWFIU.js"
8
+ },
9
+ "OBSERVER_SYSTEM_PROMPT": {
10
+ "types": "dist/processors/index.d.ts",
11
+ "implementation": "dist/chunk-6TXUWFIU.js"
12
+ },
13
+ "ObservationalMemory": {
14
+ "types": "dist/processors/index.d.ts",
15
+ "implementation": "dist/chunk-6TXUWFIU.js",
16
+ "line": 1206
17
+ },
18
+ "TokenCounter": {
19
+ "types": "dist/processors/index.d.ts",
20
+ "implementation": "dist/chunk-6TXUWFIU.js",
21
+ "line": 925
22
+ },
23
+ "buildObserverPrompt": {
24
+ "types": "dist/processors/index.d.ts",
25
+ "implementation": "dist/chunk-6TXUWFIU.js",
26
+ "line": 644
27
+ },
28
+ "buildObserverSystemPrompt": {
29
+ "types": "dist/processors/index.d.ts",
30
+ "implementation": "dist/chunk-6TXUWFIU.js",
31
+ "line": 403
32
+ },
33
+ "extractCurrentTask": {
34
+ "types": "dist/processors/index.d.ts",
35
+ "implementation": "dist/chunk-6TXUWFIU.js",
36
+ "line": 725
37
+ },
38
+ "formatMessagesForObserver": {
39
+ "types": "dist/processors/index.d.ts",
40
+ "implementation": "dist/chunk-6TXUWFIU.js",
41
+ "line": 490
42
+ },
43
+ "hasCurrentTaskSection": {
44
+ "types": "dist/processors/index.d.ts",
45
+ "implementation": "dist/chunk-6TXUWFIU.js",
46
+ "line": 713
47
+ },
48
+ "optimizeObservationsForContext": {
49
+ "types": "dist/processors/index.d.ts",
50
+ "implementation": "dist/chunk-6TXUWFIU.js",
51
+ "line": 736
52
+ },
53
+ "parseObserverOutput": {
54
+ "types": "dist/processors/index.d.ts",
55
+ "implementation": "dist/chunk-6TXUWFIU.js",
56
+ "line": 670
57
+ },
5
58
  "extractWorkingMemoryContent": {
6
59
  "types": "dist/index.d.ts",
7
60
  "implementation": "dist/memory"
@@ -27,5 +80,12 @@
27
80
  "implementation": "dist/processors"
28
81
  }
29
82
  },
30
- "modules": {}
83
+ "modules": {
84
+ "processors": {
85
+ "index": "dist/processors/index.js",
86
+ "chunks": [
87
+ "chunk-6TXUWFIU.js"
88
+ ]
89
+ }
90
+ }
31
91
  }
@@ -15,6 +15,16 @@ export const mastra = new Mastra({
15
15
  }),
16
16
  });
17
17
  ```
18
+
19
+ > **Sharing the database with Mastra Studio**
20
+ When running `mastra dev` alongside your application (e.g., Next.js), use an absolute path to ensure both processes access the same database:
21
+
22
+ ```typescript
23
+ url: "file:/absolute/path/to/your/project/mastra.db"
24
+ ```
25
+
26
+ Relative paths like `file:./mastra.db` resolve based on each process's working directory, which may differ.
27
+
18
28
  This configures instance-level storage, which all agents share by default. You can also configure [agent-level storage](#agent-level-storage) for isolated data boundaries.
19
29
 
20
30
  Mastra automatically creates the necessary tables on first interaction. See the [core schema](https://mastra.ai/reference/storage/overview#core-schema) for details on what gets created, including tables for messages, threads, resources, workflows, traces, and evaluation datasets.
package/dist/index.cjs CHANGED
@@ -6,6 +6,7 @@ var zod = require('zod');
6
6
  var z4 = require('zod/v4');
7
7
  var v3 = require('zod/v3');
8
8
  var agent = require('@mastra/core/agent');
9
+ var features = require('@mastra/core/features');
9
10
  var memory = require('@mastra/core/memory');
10
11
  var utils = require('@mastra/core/utils');
11
12
  var zodToJson = require('@mastra/schema-compat/zod-to-json');
@@ -14599,6 +14600,12 @@ var __experimental_updateWorkingMemoryToolVNext = (config) => {
14599
14600
  }
14600
14601
  });
14601
14602
  };
14603
+ function normalizeObservationalMemoryConfig(config) {
14604
+ if (config === true) return {};
14605
+ if (config === false || config === void 0) return void 0;
14606
+ if (typeof config === "object" && config.enabled === false) return void 0;
14607
+ return config;
14608
+ }
14602
14609
  var CHARS_PER_TOKEN = 4;
14603
14610
  var DEFAULT_MESSAGE_RANGE = { before: 1, after: 1 };
14604
14611
  var DEFAULT_TOP_K = 4;
@@ -14613,7 +14620,8 @@ var Memory = class extends memory.MastraMemory {
14613
14620
  // and someone bumps @mastra/memory without bumping @mastra/core the defaults wouldn't exist yet
14614
14621
  enabled: false,
14615
14622
  template: this.defaultWorkingMemoryTemplate
14616
- }
14623
+ },
14624
+ observationalMemory: config.options?.observationalMemory
14617
14625
  });
14618
14626
  this.threadConfig = mergedConfig;
14619
14627
  }
@@ -14627,6 +14635,10 @@ var Memory = class extends memory.MastraMemory {
14627
14635
  }
14628
14636
  return store;
14629
14637
  }
14638
+ async listMessagesByResourceId(args) {
14639
+ const memoryStore = await this.getMemoryStore();
14640
+ return memoryStore.listMessagesByResourceId(args);
14641
+ }
14630
14642
  async validateThreadIsOwnedByResource(threadId, resourceId, config) {
14631
14643
  const resourceScope = typeof config?.semanticRecall === "object" && config?.semanticRecall?.scope !== `thread` || config.semanticRecall === true;
14632
14644
  const thread = await this.getThreadById({ threadId });
@@ -14671,8 +14683,11 @@ var Memory = class extends memory.MastraMemory {
14671
14683
  `Memory error: Resource-scoped semantic recall is enabled but no resourceId was provided. Either provide a resourceId or explicitly set semanticRecall.scope to 'thread'.`
14672
14684
  );
14673
14685
  }
14686
+ let usage;
14674
14687
  if (config?.semanticRecall && vectorSearchString && this.vector) {
14675
- const { embeddings, dimension } = await this.embedMessageContent(vectorSearchString);
14688
+ const result = await this.embedMessageContent(vectorSearchString);
14689
+ usage = result.usage;
14690
+ const { embeddings, dimension } = result;
14676
14691
  const { indexName } = await this.createEmbeddingIndex(dimension, config);
14677
14692
  await Promise.all(
14678
14693
  embeddings.map(async (embedding) => {
@@ -14716,7 +14731,7 @@ var Memory = class extends memory.MastraMemory {
14716
14731
  const rawMessages = shouldGetNewestAndReverse ? paginatedResult.messages.reverse() : paginatedResult.messages;
14717
14732
  const list = new agent.MessageList({ threadId, resourceId }).add(rawMessages, "memory");
14718
14733
  const messages = list.get.all.db();
14719
- return { messages };
14734
+ return { messages, usage };
14720
14735
  }
14721
14736
  async getThreadById({ threadId }) {
14722
14737
  const memoryStore = await this.getMemoryStore();
@@ -14963,10 +14978,11 @@ ${workingMemory}`;
14963
14978
  ...this.embedderOptions || {}
14964
14979
  });
14965
14980
  if (isFastEmbed && !this.firstEmbed) this.firstEmbed = promise;
14966
- const { embeddings } = await promise;
14981
+ const { embeddings, usage } = await promise;
14967
14982
  const result = {
14968
14983
  embeddings,
14969
14984
  chunks,
14985
+ usage,
14970
14986
  dimension: embeddings[0]?.length
14971
14987
  };
14972
14988
  this.embeddingCache.set(key, result);
@@ -14987,6 +15003,7 @@ ${workingMemory}`;
14987
15003
  const result = await memoryStore.saveMessages({
14988
15004
  messages: dbMessages
14989
15005
  });
15006
+ let totalTokens = 0;
14990
15007
  if (this.vector && config.semanticRecall) {
14991
15008
  const embeddingData = [];
14992
15009
  let dimension;
@@ -15002,6 +15019,9 @@ ${workingMemory}`;
15002
15019
  if (!textForEmbedding) return;
15003
15020
  const result2 = await this.embedMessageContent(textForEmbedding);
15004
15021
  dimension = result2.dimension;
15022
+ if (result2.usage?.tokens) {
15023
+ totalTokens += result2.usage.tokens;
15024
+ }
15005
15025
  embeddingData.push({
15006
15026
  embeddings: result2.embeddings,
15007
15027
  metadata: result2.chunks.map(() => ({
@@ -15030,7 +15050,7 @@ ${workingMemory}`;
15030
15050
  });
15031
15051
  }
15032
15052
  }
15033
- return result;
15053
+ return { ...result, usage: totalTokens > 0 ? { tokens: totalTokens } : void 0 };
15034
15054
  }
15035
15055
  updateMessageToHideWorkingMemoryV2(message) {
15036
15056
  const newMessage = { ...message };
@@ -15636,6 +15656,89 @@ Notes:
15636
15656
  }
15637
15657
  return history;
15638
15658
  }
15659
+ /**
15660
+ * Get input processors for this memory instance.
15661
+ * Extends the base implementation to add ObservationalMemory processor when configured.
15662
+ *
15663
+ * @param configuredProcessors - Processors already configured by the user (for deduplication)
15664
+ * @param context - Request context for runtime configuration
15665
+ * @returns Array of input processors configured for this memory instance
15666
+ */
15667
+ async getInputProcessors(configuredProcessors = [], context) {
15668
+ const processors = await super.getInputProcessors(configuredProcessors, context);
15669
+ const om = await this.createOMProcessor(configuredProcessors, context);
15670
+ if (om) {
15671
+ processors.push(om);
15672
+ }
15673
+ return processors;
15674
+ }
15675
+ /**
15676
+ * Extends the base implementation to add ObservationalMemory as an output processor.
15677
+ * OM needs processOutputResult to save messages at the end of the agent turn,
15678
+ * even when the observation threshold was never reached during the loop.
15679
+ */
15680
+ async getOutputProcessors(configuredProcessors = [], context) {
15681
+ const processors = await super.getOutputProcessors(configuredProcessors, context);
15682
+ const om = await this.createOMProcessor(configuredProcessors, context);
15683
+ if (om) {
15684
+ processors.push(om);
15685
+ }
15686
+ return processors;
15687
+ }
15688
+ /**
15689
+ * Creates an ObservationalMemory processor instance if configured and not already present.
15690
+ * A new instance is created per call — processorStates (e.g., sealedIds) are shared
15691
+ * via the ProcessorRunner's state map keyed by processor ID, not by instance identity.
15692
+ */
15693
+ async createOMProcessor(configuredProcessors = [], context) {
15694
+ const hasObservationalMemory = configuredProcessors.some(
15695
+ (p) => !("workflow" in p) && p.id === "observational-memory"
15696
+ );
15697
+ const memoryContext = context?.get("MastraMemory");
15698
+ const runtimeMemoryConfig = memoryContext?.memoryConfig;
15699
+ const effectiveConfig = runtimeMemoryConfig ? this.getMergedThreadConfig(runtimeMemoryConfig) : this.threadConfig;
15700
+ const omConfig = normalizeObservationalMemoryConfig(effectiveConfig.observationalMemory);
15701
+ if (!omConfig || hasObservationalMemory) {
15702
+ return null;
15703
+ }
15704
+ const coreSupportsOM = features.coreFeatures.has("observationalMemory");
15705
+ if (!coreSupportsOM) {
15706
+ throw new Error(
15707
+ "Observational memory is enabled but the installed version of @mastra/core does not support it. Please upgrade @mastra/core to a version that includes observational memory support."
15708
+ );
15709
+ }
15710
+ const memoryStore = await this.storage.getStore("memory");
15711
+ if (!memoryStore) {
15712
+ throw new Error(
15713
+ "Using Mastra Memory observational memory requires a storage adapter but no attached adapter was detected."
15714
+ );
15715
+ }
15716
+ if (!memoryStore.supportsObservationalMemory) {
15717
+ throw new Error(
15718
+ `Observational memory is enabled but the storage adapter (${memoryStore.constructor.name}) does not support it. If you're using @mastra/libsql, @mastra/pg, or @mastra/mongodb, upgrade to the latest version. Otherwise, use one of those adapters or disable observational memory.`
15719
+ );
15720
+ }
15721
+ const { ObservationalMemory } = await import('./observational-memory-3Q42SITP.cjs');
15722
+ return new ObservationalMemory({
15723
+ storage: memoryStore,
15724
+ scope: omConfig.scope,
15725
+ shareTokenBudget: omConfig.shareTokenBudget,
15726
+ model: omConfig.model,
15727
+ observation: omConfig.observation ? {
15728
+ model: omConfig.observation.model,
15729
+ messageTokens: omConfig.observation.messageTokens,
15730
+ modelSettings: omConfig.observation.modelSettings,
15731
+ maxTokensPerBatch: omConfig.observation.maxTokensPerBatch,
15732
+ providerOptions: omConfig.observation.providerOptions
15733
+ } : void 0,
15734
+ reflection: omConfig.reflection ? {
15735
+ model: omConfig.reflection.model,
15736
+ observationTokens: omConfig.reflection.observationTokens,
15737
+ modelSettings: omConfig.reflection.modelSettings,
15738
+ providerOptions: omConfig.reflection.providerOptions
15739
+ } : void 0
15740
+ });
15741
+ }
15639
15742
  };
15640
15743
 
15641
15744
  Object.defineProperty(exports, "extractWorkingMemoryContent", {