@elizaos/plugin-memory 1.0.5 → 2.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (36) hide show
  1. package/README.md +339 -207
  2. package/dist/browser/index.browser.js +348 -151
  3. package/dist/browser/index.browser.js.map +23 -13
  4. package/dist/cjs/index.node.cjs +2200 -1047
  5. package/dist/cjs/index.node.js.map +23 -13
  6. package/dist/evaluators/consolidation.d.ts +19 -0
  7. package/dist/evaluators/summarization.d.ts +5 -24
  8. package/dist/index.d.ts +152 -30
  9. package/dist/node/index.node.js +2242 -1084
  10. package/dist/node/index.node.js.map +23 -13
  11. package/dist/prompts/consolidation.d.ts +35 -0
  12. package/dist/prompts/summarization.d.ts +25 -0
  13. package/dist/providers/action-results.d.ts +2 -0
  14. package/dist/providers/long-term-memory.d.ts +18 -11
  15. package/dist/providers/recent-conversation-summary.d.ts +2 -0
  16. package/dist/repositories/conversation-summary.d.ts +33 -0
  17. package/dist/repositories/index.d.ts +17 -0
  18. package/dist/repositories/long-term-memory.d.ts +53 -0
  19. package/dist/schemas/conversation-summaries.d.ts +494 -0
  20. package/dist/schemas/index.d.ts +16 -6
  21. package/dist/schemas/long-term-memories.d.ts +308 -70
  22. package/dist/services/memory-service.d.ts +95 -51
  23. package/dist/types/index.d.ts +299 -55
  24. package/dist/utils/db-mapping.d.ts +20 -0
  25. package/dist/utils/decay-scoring.d.ts +41 -0
  26. package/dist/utils/embedding.d.ts +21 -0
  27. package/dist/utils/formatting.d.ts +17 -0
  28. package/dist/utils/index.d.ts +17 -0
  29. package/dist/utils/search-merging.d.ts +18 -0
  30. package/dist/utils/token-counter.d.ts +53 -0
  31. package/package.json +83 -1
  32. package/dist/actions/remember.d.ts +0 -11
  33. package/dist/evaluators/long-term-extraction.d.ts +0 -8
  34. package/dist/providers/short-term-memory.d.ts +0 -19
  35. package/dist/schemas/memory-access-logs.d.ts +0 -154
  36. package/dist/schemas/session-summaries.d.ts +0 -283
@@ -0,0 +1,19 @@
1
+ import { type Evaluator } from '@elizaos/core';
2
+ /**
3
+ * Memory Consolidation Evaluator
4
+ *
5
+ * Research: Section 2.3 "Consolidation Pipeline"
6
+ *
7
+ * This evaluator implements the full consolidation flow:
8
+ * 1. Buffers conversation messages
9
+ * 2. Validates when threshold is reached
10
+ * 3. Extracts persistent facts using LLM
11
+ * 4. Stores memories via service
12
+ *
13
+ * Architecture Design:
14
+ * - Evaluator owns the consolidation logic and prompts
15
+ * - Service handles storage/retrieval only
16
+ * - System prompt is temporarily swapped during consolidation
17
+ * - Non-blocking: Doesn't wait for consolidation to complete
18
+ */
19
+ export declare const consolidationEvaluator: Evaluator;
@@ -1,30 +1,11 @@
1
1
  import { type Evaluator } from '@elizaos/core';
2
2
  /**
3
- * Short-term Memory Summarization Evaluator
3
+ * Hierarchical Summarization Evaluator
4
4
  *
5
- * Automatically generates and updates conversation summaries when conversations
6
- * exceed the configured threshold (default: 16 messages).
5
+ * Research: Section 5.1.2 "Hierarchical Episodic Summarization"
7
6
  *
8
- * BEHAVIOR:
9
- * - Monitors message count per room
10
- * - Creates initial summary when count >= threshold (e.g., 16 messages)
11
- * - Updates summary at regular intervals (e.g., every 10 new messages)
12
- * - Condenses existing summary with new messages to stay under token limit
13
- * - Tracks offset to avoid re-processing messages
14
- * - Caps new messages per update to prevent context bloat (default: 20)
15
- *
16
- * OPTIMIZATION:
17
- * - Only triggers LLM when crossing threshold or interval boundaries
18
- * - Processes only NEW messages since last update
19
- * - Maintains rolling summary (fixed size, not ever-growing)
20
- * - LLM is instructed to merge and condense, keeping under 2500 tokens
21
- *
22
- * INTEGRATION:
23
- * Works with shortTermMemoryProvider which:
24
- * - Shows full conversation when < threshold (no summarization needed)
25
- * - Shows summaries + recent messages when >= threshold (optimized context)
26
- *
27
- * This creates an adaptive system that starts with full context and seamlessly
28
- * transitions to efficient summarization as conversations grow.
7
+ * Implements recursive, multi-level summarization for token-efficient conversation history:
8
+ * - Level 1: Summarizes 50-100 messages into narrative paragraphs
9
+ * - Level 2+: Summarizes lower-level summaries into higher-level abstractions
29
10
  */
30
11
  export declare const summarizationEvaluator: Evaluator;
package/dist/index.d.ts CHANGED
@@ -1,38 +1,160 @@
1
1
  import type { Plugin } from '@elizaos/core';
2
+ import { longTermMemoryProvider } from './providers/long-term-memory';
3
+ import { recentContextProvider } from './providers/recent-conversation-summary';
4
+ import { actionResultsProvider } from './providers/action-results';
2
5
  export * from './types/index';
3
6
  export * from './schemas/index';
7
+ export * from './prompts/consolidation';
8
+ export * from './prompts/summarization';
9
+ export * from './utils/index';
10
+ export * from './repositories/index';
4
11
  export { MemoryService } from './services/memory-service';
12
+ export { longTermMemoryProvider, recentContextProvider, actionResultsProvider };
5
13
  /**
6
- * Memory Plugin
7
- *
8
- * Advanced memory management plugin that provides:
9
- *
10
- * **Short-term Memory (Conversation Summarization)**:
11
- * - Automatically summarizes long conversations to reduce context size
12
- * - Retains recent messages while archiving older ones as summaries
13
- * - Configurable thresholds for when to summarize
14
- *
15
- * **Long-term Memory (Persistent Facts)**:
16
- * - Extracts and stores persistent facts about users
17
- * - Categorizes information (identity, expertise, preferences, etc.)
18
- * - Provides context-aware user profiles across all conversations
19
- *
20
- * **Components**:
21
- * - `MemoryService`: Manages all memory operations
22
- * - Evaluators: Process conversations to create summaries and extract facts
23
- * - Providers: Inject memory context into conversations
24
- * - Actions: Allow manual memory storage via user commands
25
- *
26
- * **Configuration** (via environment variables):
27
- * - `MEMORY_SUMMARIZATION_THRESHOLD`: Messages before summarization (default: 50)
28
- * - `MEMORY_RETAIN_RECENT`: Recent messages to keep (default: 10)
29
- * - `MEMORY_LONG_TERM_ENABLED`: Enable long-term extraction (default: true)
30
- * - `MEMORY_CONFIDENCE_THRESHOLD`: Minimum confidence to store (default: 0.7)
31
- *
32
- * **Database Tables**:
33
- * - `long_term_memories`: Persistent user facts
34
- * - `session_summaries`: Conversation summaries
35
- * - `memory_access_logs`: Optional usage tracking
14
+ * Memory Plugin - State-of-the-Art Cognitive Memory System
15
+ *
16
+ * Based on comprehensive research analysis (refactor.md), this plugin implements
17
+ * a sophisticated memory architecture that mirrors human cognitive processes.
18
+ *
19
+ * ## Core Concepts (Research-Based)
20
+ *
21
+ * ### 1. Three-Tier Memory Taxonomy
22
+ * Research: Section 1.1 "The Cognitive Hierarchy"
23
+ * - **EPISODIC**: Specific events anchored in time and place (conversation logs)
24
+ * - **SEMANTIC**: Facts and knowledge detached from episodes (user preferences, identity)
25
+ * - **PROCEDURAL**: Skills and successful tool execution patterns ("know-how")
26
+ *
27
+ * ### 2. Memory Consolidation Pipeline
28
+ * Research: Section 2 "Architecture for Extraction & Taxonomy"
29
+ * - Buffers conversation messages asynchronously
30
+ * - Triggers consolidation every N messages (configurable)
31
+ * - Extracts persistent facts using LLM with specialized prompts
32
+ * - Filters transient intents (e.g., "draw a cat") from persistent facts (e.g., "user likes cats")
33
+ *
34
+ * ### 3. Hybrid Retrieval (Vector + BM25)
35
+ * Research: Section 3 "Retrieval Augmented Generation Strategy"
36
+ * - **Vector Search**: Semantic similarity using embeddings
37
+ * - **BM25**: Keyword-based search for exact matches
38
+ * - Combined with exponential decay for time-weighted scoring
39
+ *
40
+ * ### 4. Exponential Decay & Forgetting
41
+ * Research: Section 4.2 "Mathematical Model for Memory Decay"
42
+ * - Memories fade over time using Ebbinghaus forgetting curve
43
+ * - Reinforcement: Frequently accessed memories decay slower
44
+ * - Configurable decay rates per memory type
45
+ *
46
+ * ### 5. Contextual Embeddings
47
+ * Research: Section 3.2.1 "Solving the Pronoun Problem"
48
+ * - Enriches content with context before embedding
49
+ * - Example: "It was terrible" → "[User movie preference]: The movie Inception was terrible"
50
+ * - Enables superior retrieval accuracy
51
+ *
52
+ * ### 6. Contradiction Detection & Resolution
53
+ * Research: Section 4.3 "Handling Contradictions"
54
+ * - Detects when new facts contradict existing ones
55
+ * - Uses soft-delete (isActive flag) to supersede old memories
56
+ * - Maintains provenance chain (supersedesId)
57
+ *
58
+ * ## Components
59
+ *
60
+ * - **MemoryService**: Core service managing all memory operations
61
+ * - **consolidationEvaluator**: Buffers messages and triggers fact extraction
62
+ * - **summarizationEvaluator**: Creates hierarchical conversation summaries
63
+ * - **longTermMemoryProvider** (LONG_TERM_MEMORY): User knowledge and facts
64
+ * - **recentContextProvider** (RECENT_CONVERSATION_SUMMARY): Conversational history with summaries
65
+ * - **actionResultsProvider** (ACTION_RESULTS): Recent action executions and tool memory
66
+ *
67
+ * ## Configuration (Environment Variables)
68
+ *
69
+ * All settings use `runtime.getSetting()` pattern:
70
+ *
71
+ * - `MEMORY_CONSOLIDATION_THRESHOLD`: Messages before consolidation (default: 12)
72
+ * - `MEMORY_MIN_CONFIDENCE`: Minimum confidence to store (default: 0.7)
73
+ * - `MEMORY_ENABLE_VECTOR_SEARCH`: Enable semantic search (default: true)
74
+ * - `MEMORY_ENABLE_BM25`: Enable keyword search (default: true)
75
+ * - `MEMORY_RETRIEVAL_LIMIT`: Max memories to retrieve (default: 5)
76
+ * - `MEMORY_TOKEN_BUDGET`: Token budget for memory context (default: 1000)
77
+ *
78
+ * ### Hierarchical Summarization
79
+ *
80
+ * - `MEMORY_SUMMARY_ENABLED`: Enable hierarchical summarization (default: true)
81
+ * - `MEMORY_MESSAGES_PER_SUMMARY`: Messages per Level 1 summary (default: 7)
82
+ * - `MEMORY_SUMMARIES_PER_LEVEL`: Summaries before next level (default: 5)
83
+ * - `MEMORY_SUMMARY_MAX_DEPTH`: Maximum hierarchy depth (default: 3)
84
+ * - `MEMORY_SUMMARY_TOKEN_BUDGET`: Token budget for summaries (default: 500)
85
+ * - `CONTEXT_OVERLAP_USER_MESSAGES`: User messages overlap after summary (default: 2)
86
+ *
87
+ * ## Database Tables
88
+ *
89
+ * - `long_term_memories`: Unified storage for all memory types
90
+ * - `conversation_summaries`: Hierarchical conversation summaries
91
+ *
92
+ * ## Design Decisions
93
+ *
94
+ * ### Why Single Unified Table?
95
+ * Research: Section 4.1 "Data Structure & Schema"
96
+ * - Easier cross-type queries
97
+ * - Unified retrieval logic
98
+ * - Flexible type reassignment (episodic can become semantic over time)
99
+ * - Simpler codebase
100
+ *
101
+ * ### Why Async Consolidation?
102
+ * Research: Section 2.3 "Consolidation Pipeline"
103
+ * - Doesn't block conversation flow
104
+ * - Allows for deeper analysis without latency
105
+ * - Mirrors biological "sleep" consolidation
106
+ *
107
+ * ### Why Contextual Embeddings?
108
+ * Research: Section 3.2.1 "Contextual Retrieval"
109
+ * - Solves pronoun ambiguity ("it", "that")
110
+ * - Improves retrieval accuracy by 49% (Anthropic research)
111
+ * - Self-contained memory chunks
112
+ *
113
+ * ## Usage Examples
114
+ *
115
+ * ### Accessing the Service
116
+ * ```typescript
117
+ * const memoryService = runtime.getService<MemoryService>('memory');
118
+ * ```
119
+ *
120
+ * ### Searching Memories
121
+ * ```typescript
122
+ * const memories = await memoryService.searchLongTermMemories({
123
+ * entityId: userId,
124
+ * query: "What does the user like?",
125
+ * type: MemoryType.SEMANTIC,
126
+ * limit: 5
127
+ * });
128
+ * ```
129
+ *
130
+ * ### Storing a Memory Manually
131
+ * ```typescript
132
+ * await memoryService.storeLongTermMemory({
133
+ * agentId: runtime.agentId,
134
+ * entityId: userId,
135
+ * type: MemoryType.SEMANTIC,
136
+ * content: "User is allergic to peanuts",
137
+ * embeddingContext: "[User health information]: User is allergic to peanuts",
138
+ * confidence: 1.0,
139
+ * decayRate: 0.0, // Core fact, never decays
140
+ * decayFunction: DecayFunction.NONE,
141
+ * source: { authorId: userId },
142
+ * metadata: { category: "health" }
143
+ * });
144
+ * ```
145
+ *
146
+ * ## Research References
147
+ *
148
+ * This implementation is based on the comprehensive research document (refactor.md)
149
+ * which analyzed 40+ academic papers and industry implementations to identify
150
+ * state-of-the-art techniques for agentic memory systems.
151
+ *
152
+ * Key research areas:
153
+ * - Cognitive science (memory hierarchies, consolidation)
154
+ * - RAG optimization (vector search, BM25, contextual retrieval)
155
+ * - Forgetting curves (Ebbinghaus, exponential decay)
156
+ * - LLM prompting (Chain-of-Thought, extraction patterns)
157
+ *
36
158
  */
37
159
  export declare const memoryPlugin: Plugin;
38
160
  export default memoryPlugin;