@elizaos/plugin-memory 1.0.5 → 1.0.7
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +58 -34
- package/dist/browser/index.browser.js +133 -75
- package/dist/browser/index.browser.js.map +10 -9
- package/dist/cjs/index.node.cjs +379 -332
- package/dist/cjs/index.node.js.map +10 -9
- package/dist/index.d.ts +3 -0
- package/dist/node/index.node.js +390 -340
- package/dist/node/index.node.js.map +10 -9
- package/dist/providers/context-summary.d.ts +12 -0
- package/dist/providers/recent-messages.d.ts +15 -0
- package/dist/types/index.d.ts +9 -10
- package/package.json +1 -1
- package/dist/providers/short-term-memory.d.ts +0 -19
package/README.md
CHANGED
|
@@ -14,21 +14,29 @@ Advanced memory management plugin for ElizaOS that provides intelligent conversa
|
|
|
14
14
|
### 🧠 Long-term Memory (Persistent Facts)
|
|
15
15
|
|
|
16
16
|
- **Intelligent Extraction**: Automatically learns facts about users from conversations
|
|
17
|
-
- **
|
|
17
|
+
- **Cognitive Science Based**: Organizes information into 3 core memory types (episodic, semantic, procedural)
|
|
18
|
+
- **Strict Criteria**: Only extracts truly significant, persistent information
|
|
18
19
|
- **Confidence Scoring**: Tracks reliability of stored information
|
|
19
|
-
- **Cross-session Persistence**: Remembers user
|
|
20
|
+
- **Cross-session Persistence**: Remembers user context across all interactions
|
|
20
21
|
|
|
21
|
-
### 📊 Memory Categories
|
|
22
|
+
### 📊 Memory Categories (Based on Cognitive Science)
|
|
22
23
|
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
24
|
+
The plugin uses the three fundamental types of long-term memory from cognitive science:
|
|
25
|
+
|
|
26
|
+
1. **Episodic Memory**: Personal experiences and specific events
|
|
27
|
+
- Example: "User completed migration from MongoDB to PostgreSQL in Q2 2024"
|
|
28
|
+
- Contains: WHO did WHAT, WHEN/WHERE
|
|
29
|
+
- Use for: Significant project milestones, important incidents, formative experiences
|
|
30
|
+
|
|
31
|
+
2. **Semantic Memory**: General facts, concepts, and knowledge
|
|
32
|
+
- Example: "User is a senior TypeScript developer with 8 years experience"
|
|
33
|
+
- Contains: Factual, timeless information
|
|
34
|
+
- Use for: Professional identity, core expertise, established facts about work context
|
|
35
|
+
|
|
36
|
+
3. **Procedural Memory**: Skills, workflows, and how-to knowledge
|
|
37
|
+
- Example: "User follows TDD workflow: writes tests first, then implementation"
|
|
38
|
+
- Contains: HOW user does things
|
|
39
|
+
- Use for: Consistent workflows, methodologies, debugging processes
|
|
32
40
|
|
|
33
41
|
## Installation
|
|
34
42
|
|
|
@@ -65,9 +73,9 @@ MEMORY_MAX_NEW_MESSAGES=20 # Max new messages in summary update (defaul
|
|
|
65
73
|
|
|
66
74
|
# Long-term Memory Settings
|
|
67
75
|
MEMORY_LONG_TERM_ENABLED=true # Enable long-term extraction (default: true)
|
|
68
|
-
MEMORY_EXTRACTION_THRESHOLD=
|
|
69
|
-
MEMORY_EXTRACTION_INTERVAL=
|
|
70
|
-
MEMORY_CONFIDENCE_THRESHOLD=0.
|
|
76
|
+
MEMORY_EXTRACTION_THRESHOLD=30 # Min messages before extraction starts (default: 30)
|
|
77
|
+
MEMORY_EXTRACTION_INTERVAL=10 # Run extraction every N messages (default: 10)
|
|
78
|
+
MEMORY_CONFIDENCE_THRESHOLD=0.85 # Minimum confidence to store (default: 0.85)
|
|
71
79
|
```
|
|
72
80
|
|
|
73
81
|
### Manual Memory Storage
|
|
@@ -76,13 +84,13 @@ Users can explicitly ask the agent to remember information:
|
|
|
76
84
|
|
|
77
85
|
```
|
|
78
86
|
User: "Remember that I prefer TypeScript over JavaScript"
|
|
79
|
-
Agent: I've made a note of that in my
|
|
87
|
+
Agent: I've made a note of that in my Semantic memory: "User prefers TypeScript over JavaScript"
|
|
80
88
|
|
|
81
89
|
User: "Keep in mind I'm working on a startup project"
|
|
82
|
-
Agent: I've made a note of that in my
|
|
90
|
+
Agent: I've made a note of that in my Episodic memory: "User is working on a startup project"
|
|
83
91
|
|
|
84
|
-
User: "Don't forget I use
|
|
85
|
-
Agent: I've made a note of that in my
|
|
92
|
+
User: "Don't forget I always use TDD"
|
|
93
|
+
Agent: I've made a note of that in my Procedural memory: "User follows TDD (Test-Driven Development) methodology"
|
|
86
94
|
```
|
|
87
95
|
|
|
88
96
|
### Accessing the Memory Service
|
|
@@ -97,7 +105,7 @@ const memoryService = runtime.getService('memory') as MemoryService;
|
|
|
97
105
|
await memoryService.storeLongTermMemory({
|
|
98
106
|
agentId: runtime.agentId,
|
|
99
107
|
entityId: userId,
|
|
100
|
-
category: LongTermMemoryCategory.
|
|
108
|
+
category: LongTermMemoryCategory.SEMANTIC,
|
|
101
109
|
content: 'User prefers concise responses',
|
|
102
110
|
confidence: 0.9,
|
|
103
111
|
source: 'manual',
|
|
@@ -174,16 +182,29 @@ No manual migration is required - the schema is handled automatically by the run
|
|
|
174
182
|
|
|
175
183
|
### Long-term Memory Flow
|
|
176
184
|
|
|
177
|
-
1. **Warm-up Period**: Extraction waits until
|
|
178
|
-
2. **Monitoring**: longTermExtractionEvaluator runs periodically (every
|
|
185
|
+
1. **Warm-up Period**: Extraction waits until 30+ messages (configurable) to ensure meaningful patterns
|
|
186
|
+
2. **Monitoring**: longTermExtractionEvaluator runs periodically (every 10 messages after threshold)
|
|
179
187
|
3. **Analysis**: LLM analyzes conversation for **persistent, important** facts worth remembering
|
|
180
|
-
4. **Strict Filtering**:
|
|
181
|
-
5. **Storage**: High-confidence facts stored in long_term_memories table
|
|
188
|
+
4. **Strict Filtering**: Applies cognitive science principles to extract only truly significant information
|
|
189
|
+
5. **Storage**: High-confidence facts (≥0.85) stored in long_term_memories table
|
|
182
190
|
6. **Retrieval**: longTermMemoryProvider injects relevant facts in all future conversations
|
|
183
191
|
|
|
184
|
-
**Extraction Criteria**: The evaluator uses
|
|
185
|
-
|
|
186
|
-
-
|
|
192
|
+
**Ultra-Strict Extraction Criteria**: The evaluator uses stringent criteria to prevent memory pollution:
|
|
193
|
+
|
|
194
|
+
- ✅ **DO Extract:**
|
|
195
|
+
- **Episodic**: Significant milestones, important incidents, major decisions with lasting impact
|
|
196
|
+
- **Semantic**: Professional identity, core expertise, established facts (explicitly stated or conclusively demonstrated)
|
|
197
|
+
- **Procedural**: Consistent workflows (3+ occurrences or explicitly stated), standard practices, methodologies
|
|
198
|
+
|
|
199
|
+
- ❌ **NEVER Extract:**
|
|
200
|
+
- One-time requests or tasks
|
|
201
|
+
- Casual conversations without lasting significance
|
|
202
|
+
- Exploratory questions or testing
|
|
203
|
+
- Temporary context or situational information
|
|
204
|
+
- Preferences from single occurrence
|
|
205
|
+
- Social pleasantries
|
|
206
|
+
- Common patterns everyone has
|
|
207
|
+
- General knowledge not specific to user
|
|
187
208
|
|
|
188
209
|
### Manual Memory Flow
|
|
189
210
|
|
|
@@ -236,12 +257,13 @@ No manual migration is required - the schema is handled automatically by the run
|
|
|
236
257
|
- **Adjust retention**: Keep more recent messages for immediate context
|
|
237
258
|
|
|
238
259
|
**Long-term Memory:**
|
|
239
|
-
- **
|
|
240
|
-
- **
|
|
241
|
-
- **
|
|
242
|
-
- **
|
|
243
|
-
- **
|
|
244
|
-
- **
|
|
260
|
+
- **Conservative extraction**: Keep threshold at 30+ messages for better pattern recognition (default)
|
|
261
|
+
- **Aggressive extraction**: Lower threshold to 20 messages if needed (may reduce quality)
|
|
262
|
+
- **Balanced approach**: Default 0.85 confidence threshold ensures high-quality extractions
|
|
263
|
+
- **More permissive**: Lower confidence to 0.80 for more extractions (risk of lower quality)
|
|
264
|
+
- **Most strict**: Raise confidence to 0.90 for only the most certain facts
|
|
265
|
+
- **Frequent updates**: Lower extraction interval to 5-8 messages for faster learning
|
|
266
|
+
- **Conservative updates**: Keep default 10+ message interval to prevent over-extraction
|
|
245
267
|
|
|
246
268
|
## Advanced Features
|
|
247
269
|
|
|
@@ -263,7 +285,7 @@ Use the `memory_access_logs` table to:
|
|
|
263
285
|
|
|
264
286
|
### Custom Categories
|
|
265
287
|
|
|
266
|
-
|
|
288
|
+
The plugin uses three scientifically-grounded memory types from cognitive science. If you need additional categories for domain-specific use cases, you can extend the enum:
|
|
267
289
|
|
|
268
290
|
```typescript
|
|
269
291
|
export enum CustomMemoryCategory {
|
|
@@ -273,6 +295,8 @@ export enum CustomMemoryCategory {
|
|
|
273
295
|
}
|
|
274
296
|
```
|
|
275
297
|
|
|
298
|
+
**Note**: Consider carefully whether your custom category truly represents a different type of memory, or if it can be classified under episodic (events), semantic (facts), or procedural (how-to) memory.
|
|
299
|
+
|
|
276
300
|
## Testing
|
|
277
301
|
|
|
278
302
|
Run the test suite:
|
|
@@ -1,8 +1,8 @@
|
|
|
1
|
-
var DJ=Object.defineProperty;var zJ=(K,W)=>{for(var Q in W)DJ(K,Q,{get:W[Q],enumerable:!0,configurable:!0,set:(Y)=>W[Q]=()=>Y})};import{Service as MJ,logger as H}from"@elizaos/core";import{eq as E,and as I,desc as y,sql as UJ,cosineDistance as yJ,gte as lJ}from"drizzle-orm";var JJ={};zJ(JJ,{sessionSummaries:()=>k,memoryAccessLogs:()=>jJ,longTermMemories:()=>X});import{sql as $J}from"drizzle-orm";import{pgTable as RJ,text as m,integer as bJ,jsonb as fJ,real as AJ,index as u,varchar as t,timestamp as s}from"drizzle-orm/pg-core";var X=RJ("long_term_memories",{id:t("id",{length:36}).primaryKey(),agentId:t("agent_id",{length:36}).notNull(),entityId:t("entity_id",{length:36}).notNull(),category:m("category").notNull(),content:m("content").notNull(),metadata:fJ("metadata"),embedding:AJ("embedding").array(),confidence:AJ("confidence").default(1),source:m("source"),createdAt:s("created_at").default($J`now()`).notNull(),updatedAt:s("updated_at").default($J`now()`).notNull(),lastAccessedAt:s("last_accessed_at"),accessCount:bJ("access_count").default(0)},(K)=>({agentEntityIdx:u("long_term_memories_agent_entity_idx").on(K.agentId,K.entityId),categoryIdx:u("long_term_memories_category_idx").on(K.category),confidenceIdx:u("long_term_memories_confidence_idx").on(K.confidence),createdAtIdx:u("long_term_memories_created_at_idx").on(K.createdAt)}));import{sql as VJ}from"drizzle-orm";import{pgTable as CJ,text as vJ,integer as GJ,jsonb as XJ,real as TJ,index as r,varchar as g,timestamp as i}from"drizzle-orm/pg-core";var k=CJ("session_summaries",{id:g("id",{length:36}).primaryKey(),agentId:g("agent_id",{length:36}).notNull(),roomId:g("room_id",{length:36}).notNull(),entityId:g("entity_id",{length:36}),summary:vJ("summary").notNull(),messageCount:GJ("message_count").notNull(),lastMessageOffset:GJ("last_message_offset").notNull().default(0),startTime:i("start_time").notNull(),endTime:i("end_time").notNull(),topics:XJ("topics"),metadata:XJ("metadata"),embedding:TJ("embedding").array(),createdAt:i("created_at").default(VJ`now()`).notNull(),updatedAt:i("updated_at").default(VJ`now()`).notNull()},(K)=>({agentRoomIdx:r("session_summaries_agent_room_idx").on(K.agentId,K.roomId),entityIdx:r("session_summaries_entity_idx").on(K.entityId),startTimeIdx:r("session_summaries_start_time_idx").on(K.startTime)}));import{sql as hJ}from"drizzle-orm";import{pgTable as SJ,text as xJ,integer as IJ,real as pJ,index as e,varchar as n,timestamp as wJ}from"drizzle-orm/pg-core";var jJ=SJ("memory_access_logs",{id:n("id",{length:36}).primaryKey(),agentId:n("agent_id",{length:36}).notNull(),memoryId:n("memory_id",{length:36}).notNull(),memoryType:xJ("memory_type").notNull(),accessedAt:wJ("accessed_at").default(hJ`now()`).notNull(),roomId:n("room_id",{length:36}),relevanceScore:pJ("relevance_score"),wasUseful:IJ("was_useful")},(K)=>({memoryIdx:e("memory_access_logs_memory_idx").on(K.memoryId),agentIdx:e("memory_access_logs_agent_idx").on(K.agentId),accessedAtIdx:e("memory_access_logs_accessed_at_idx").on(K.accessedAt)}));class l extends MJ{static serviceType="memory";sessionMessageCounts;memoryConfig;lastExtractionCheckpoints;capabilityDescription="Advanced memory management with short-term summarization and long-term persistent facts";constructor(K){super(K);this.sessionMessageCounts=new Map,this.lastExtractionCheckpoints=new Map,this.memoryConfig={shortTermSummarizationThreshold:16,shortTermRetainRecent:10,shortTermSummarizationInterval:10,longTermExtractionEnabled:!0,longTermVectorSearchEnabled:!1,longTermConfidenceThreshold:0.7,longTermExtractionThreshold:20,longTermExtractionInterval:5,summaryModelType:"TEXT_LARGE",summaryMaxTokens:2500,summaryMaxNewMessages:20}}static async start(K){let W=new l(K);return await W.initialize(K),W}async stop(){H.info("MemoryService stopped")}async initialize(K){this.runtime=K;let W=K.getSetting("MEMORY_SUMMARIZATION_THRESHOLD");if(W)this.memoryConfig.shortTermSummarizationThreshold=parseInt(W,10);let Q=K.getSetting("MEMORY_RETAIN_RECENT");if(Q)this.memoryConfig.shortTermRetainRecent=parseInt(Q,10);let Y=K.getSetting("MEMORY_SUMMARIZATION_INTERVAL");if(Y)this.memoryConfig.shortTermSummarizationInterval=parseInt(Y,10);let J=K.getSetting("MEMORY_MAX_NEW_MESSAGES");if(J)this.memoryConfig.summaryMaxNewMessages=parseInt(J,10);let Z=K.getSetting("MEMORY_LONG_TERM_ENABLED");if(Z==="false")this.memoryConfig.longTermExtractionEnabled=!1;else if(Z==="true")this.memoryConfig.longTermExtractionEnabled=!0;let _=K.getSetting("MEMORY_CONFIDENCE_THRESHOLD");if(_)this.memoryConfig.longTermConfidenceThreshold=parseFloat(_);let A=K.getSetting("MEMORY_EXTRACTION_THRESHOLD");if(A)this.memoryConfig.longTermExtractionThreshold=parseInt(A,10);let V=K.getSetting("MEMORY_EXTRACTION_INTERVAL");if(V)this.memoryConfig.longTermExtractionInterval=parseInt(V,10);H.info({summarizationThreshold:this.memoryConfig.shortTermSummarizationThreshold,summarizationInterval:this.memoryConfig.shortTermSummarizationInterval,maxNewMessages:this.memoryConfig.summaryMaxNewMessages,retainRecent:this.memoryConfig.shortTermRetainRecent,longTermEnabled:this.memoryConfig.longTermExtractionEnabled,extractionThreshold:this.memoryConfig.longTermExtractionThreshold,extractionInterval:this.memoryConfig.longTermExtractionInterval,confidenceThreshold:this.memoryConfig.longTermConfidenceThreshold},"MemoryService initialized")}getDb(){let K=this.runtime.db;if(!K)throw Error("Database not available");return K}getConfig(){return{...this.memoryConfig}}updateConfig(K){this.memoryConfig={...this.memoryConfig,...K}}incrementMessageCount(K){let Q=(this.sessionMessageCounts.get(K)||0)+1;return this.sessionMessageCounts.set(K,Q),Q}resetMessageCount(K){this.sessionMessageCounts.set(K,0)}async shouldSummarize(K){return await this.runtime.countMemories(K,!1,"messages")>=this.memoryConfig.shortTermSummarizationThreshold}getExtractionKey(K,W){return`memory:extraction:${K}:${W}`}async getLastExtractionCheckpoint(K,W){let Q=this.getExtractionKey(K,W),Y=this.lastExtractionCheckpoints.get(Q);if(Y!==void 0)return Y;try{let Z=await this.runtime.getCache(Q)??0;return this.lastExtractionCheckpoints.set(Q,Z),Z}catch(J){return H.warn({error:J},"Failed to get extraction checkpoint from cache"),0}}async setLastExtractionCheckpoint(K,W,Q){let Y=this.getExtractionKey(K,W);this.lastExtractionCheckpoints.set(Y,Q);try{await this.runtime.setCache(Y,Q),H.debug(`Set extraction checkpoint for ${K} in room ${W} at message count ${Q}`)}catch(J){H.error({error:J},"Failed to persist extraction checkpoint to cache")}}async shouldRunExtraction(K,W,Q){let Y=this.memoryConfig.longTermExtractionThreshold,J=this.memoryConfig.longTermExtractionInterval;if(Q<Y)return H.debug({entityId:K,roomId:W,currentMessageCount:Q,threshold:Y,shouldRun:!1},"Extraction check: below threshold"),!1;let Z=await this.getLastExtractionCheckpoint(K,W),_=Math.floor(Q/J)*J,A=Q>=Y&&_>Z;return H.debug({entityId:K,roomId:W,currentMessageCount:Q,threshold:Y,interval:J,lastCheckpoint:Z,currentCheckpoint:_,shouldRun:A},"Extraction check"),A}async storeLongTermMemory(K){let W=this.getDb(),Q=crypto.randomUUID(),Y=new Date,J={id:Q,createdAt:Y,updatedAt:Y,accessCount:0,...K};try{await W.insert(X).values({id:J.id,agentId:J.agentId,entityId:J.entityId,category:J.category,content:J.content,metadata:J.metadata||{},embedding:J.embedding,confidence:J.confidence,source:J.source,accessCount:J.accessCount,createdAt:Y,updatedAt:Y,lastAccessedAt:J.lastAccessedAt})}catch(Z){throw H.error({error:Z},"Failed to store long-term memory"),Z}return H.info(`Stored long-term memory: ${J.category} for entity ${J.entityId}`),J}async getLongTermMemories(K,W,Q=10){let Y=this.getDb(),J=[E(X.agentId,this.runtime.agentId),E(X.entityId,K)];if(W)J.push(E(X.category,W));return(await Y.select().from(X).where(I(...J)).orderBy(y(X.confidence),y(X.updatedAt)).limit(Q)).map((_)=>({id:_.id,agentId:_.agentId,entityId:_.entityId,category:_.category,content:_.content,metadata:_.metadata,embedding:_.embedding,confidence:_.confidence,source:_.source,createdAt:_.createdAt,updatedAt:_.updatedAt,lastAccessedAt:_.lastAccessedAt,accessCount:_.accessCount}))}async updateLongTermMemory(K,W,Q){let Y=this.getDb(),J={updatedAt:new Date};if(Q.content!==void 0)J.content=Q.content;if(Q.metadata!==void 0)J.metadata=Q.metadata;if(Q.confidence!==void 0)J.confidence=Q.confidence;if(Q.embedding!==void 0)J.embedding=Q.embedding;if(Q.lastAccessedAt!==void 0)J.lastAccessedAt=Q.lastAccessedAt;if(Q.accessCount!==void 0)J.accessCount=Q.accessCount;await Y.update(X).set(J).where(I(E(X.id,K),E(X.agentId,this.runtime.agentId),E(X.entityId,W))),H.info(`Updated long-term memory: ${K} for entity ${W}`)}async deleteLongTermMemory(K,W){await this.getDb().delete(X).where(I(E(X.id,K),E(X.agentId,this.runtime.agentId),E(X.entityId,W))),H.info(`Deleted long-term memory: ${K} for entity ${W}`)}async getCurrentSessionSummary(K){let Q=await this.getDb().select().from(k).where(I(E(k.agentId,this.runtime.agentId),E(k.roomId,K))).orderBy(y(k.updatedAt)).limit(1);if(Q.length===0)return null;let Y=Q[0];return{id:Y.id,agentId:Y.agentId,roomId:Y.roomId,entityId:Y.entityId,summary:Y.summary,messageCount:Y.messageCount,lastMessageOffset:Y.lastMessageOffset,startTime:Y.startTime,endTime:Y.endTime,topics:Y.topics||[],metadata:Y.metadata,embedding:Y.embedding,createdAt:Y.createdAt,updatedAt:Y.updatedAt}}async storeSessionSummary(K){let W=this.getDb(),Q=crypto.randomUUID(),Y=new Date,J={id:Q,createdAt:Y,updatedAt:Y,...K};return await W.insert(k).values({id:J.id,agentId:J.agentId,roomId:J.roomId,entityId:J.entityId||null,summary:J.summary,messageCount:J.messageCount,lastMessageOffset:J.lastMessageOffset,startTime:J.startTime,endTime:J.endTime,topics:J.topics||[],metadata:J.metadata||{},embedding:J.embedding,createdAt:Y,updatedAt:Y}),H.info(`Stored session summary for room ${J.roomId}`),J}async updateSessionSummary(K,W,Q){let Y=this.getDb(),J={updatedAt:new Date};if(Q.summary!==void 0)J.summary=Q.summary;if(Q.messageCount!==void 0)J.messageCount=Q.messageCount;if(Q.lastMessageOffset!==void 0)J.lastMessageOffset=Q.lastMessageOffset;if(Q.endTime!==void 0)J.endTime=Q.endTime;if(Q.topics!==void 0)J.topics=Q.topics;if(Q.metadata!==void 0)J.metadata=Q.metadata;if(Q.embedding!==void 0)J.embedding=Q.embedding;await Y.update(k).set(J).where(I(E(k.id,K),E(k.agentId,this.runtime.agentId),E(k.roomId,W))),H.info(`Updated session summary: ${K} for room ${W}`)}async getSessionSummaries(K,W=5){return(await this.getDb().select().from(k).where(I(E(k.agentId,this.runtime.agentId),E(k.roomId,K))).orderBy(y(k.updatedAt)).limit(W)).map((J)=>({id:J.id,agentId:J.agentId,roomId:J.roomId,entityId:J.entityId,summary:J.summary,messageCount:J.messageCount,lastMessageOffset:J.lastMessageOffset,startTime:J.startTime,endTime:J.endTime,topics:J.topics||[],metadata:J.metadata,embedding:J.embedding,createdAt:J.createdAt,updatedAt:J.updatedAt}))}async searchLongTermMemories(K,W,Q=5,Y=0.7){if(!this.memoryConfig.longTermVectorSearchEnabled)return H.warn("Vector search is not enabled, falling back to recent memories"),this.getLongTermMemories(K,void 0,Q);let J=this.getDb();try{let Z=W.map(($)=>Number.isFinite($)?Number($.toFixed(6)):0),_=UJ`1 - (${yJ(X.embedding,Z)})`,A=[E(X.agentId,this.runtime.agentId),E(X.entityId,K),UJ`${X.embedding} IS NOT NULL`];if(Y>0)A.push(lJ(_,Y));return(await J.select({memory:X,similarity:_}).from(X).where(I(...A)).orderBy(y(_)).limit(Q)).map(($)=>({id:$.memory.id,agentId:$.memory.agentId,entityId:$.memory.entityId,category:$.memory.category,content:$.memory.content,metadata:$.memory.metadata,embedding:$.memory.embedding,confidence:$.memory.confidence,source:$.memory.source,createdAt:$.memory.createdAt,updatedAt:$.memory.updatedAt,lastAccessedAt:$.memory.lastAccessedAt,accessCount:$.memory.accessCount,similarity:$.similarity}))}catch(Z){return H.warn({error:Z},"Vector search failed, falling back to recent memories"),this.getLongTermMemories(K,void 0,Q)}}async getFormattedLongTermMemories(K){let W=await this.getLongTermMemories(K,void 0,20);if(W.length===0)return"";let Q=new Map;for(let J of W){if(!Q.has(J.category))Q.set(J.category,[]);Q.get(J.category)?.push(J)}let Y=[];for(let[J,Z]of Q.entries()){let _=J.split("_").map((V)=>V.charAt(0).toUpperCase()+V.slice(1)).join(" "),A=Z.map((V)=>`- ${V.content}`).join(`
|
|
2
|
-
`);
|
|
3
|
-
${
|
|
1
|
+
var EB=Object.defineProperty;var NB=(A,Q)=>{for(var J in Q)EB(A,J,{get:Q[J],enumerable:!0,configurable:!0,set:(K)=>Q[J]=()=>K})};import{Service as vB,logger as O}from"@elizaos/core";import{eq as W,and as b,desc as S,sql as ZB,cosineDistance as xB,gte as hB}from"drizzle-orm";var s={};NB(s,{sessionSummaries:()=>E,memoryAccessLogs:()=>YB,longTermMemories:()=>j});import{sql as BB}from"drizzle-orm";import{pgTable as OB,text as g,integer as PB,jsonb as LB,real as AB,index as w,varchar as u,timestamp as i}from"drizzle-orm/pg-core";var j=OB("long_term_memories",{id:u("id",{length:36}).primaryKey(),agentId:u("agent_id",{length:36}).notNull(),entityId:u("entity_id",{length:36}).notNull(),category:g("category").notNull(),content:g("content").notNull(),metadata:LB("metadata"),embedding:AB("embedding").array(),confidence:AB("confidence").default(1),source:g("source"),createdAt:i("created_at").default(BB`now()`).notNull(),updatedAt:i("updated_at").default(BB`now()`).notNull(),lastAccessedAt:i("last_accessed_at"),accessCount:PB("access_count").default(0)},(A)=>({agentEntityIdx:w("long_term_memories_agent_entity_idx").on(A.agentId,A.entityId),categoryIdx:w("long_term_memories_category_idx").on(A.category),confidenceIdx:w("long_term_memories_confidence_idx").on(A.confidence),createdAtIdx:w("long_term_memories_created_at_idx").on(A.createdAt)}));import{sql as JB}from"drizzle-orm";import{pgTable as kB,text as RB,integer as KB,jsonb as QB,real as zB,index as n,varchar as y,timestamp as c}from"drizzle-orm/pg-core";var E=kB("session_summaries",{id:y("id",{length:36}).primaryKey(),agentId:y("agent_id",{length:36}).notNull(),roomId:y("room_id",{length:36}).notNull(),entityId:y("entity_id",{length:36}),summary:RB("summary").notNull(),messageCount:KB("message_count").notNull(),lastMessageOffset:KB("last_message_offset").notNull().default(0),startTime:c("start_time").notNull(),endTime:c("end_time").notNull(),topics:QB("topics"),metadata:QB("metadata"),embedding:zB("embedding").array(),createdAt:c("created_at").default(JB`now()`).notNull(),updatedAt:c("updated_at").default(JB`now()`).notNull()},(A)=>({agentRoomIdx:n("session_summaries_agent_room_idx").on(A.agentId,A.roomId),entityIdx:n("session_summaries_entity_idx").on(A.entityId),startTimeIdx:n("session_summaries_start_time_idx").on(A.startTime)}));import{sql as qB}from"drizzle-orm";import{pgTable as fB,text as bB,integer as DB,real as CB,index as a,varchar as l,timestamp as TB}from"drizzle-orm/pg-core";var YB=fB("memory_access_logs",{id:l("id",{length:36}).primaryKey(),agentId:l("agent_id",{length:36}).notNull(),memoryId:l("memory_id",{length:36}).notNull(),memoryType:bB("memory_type").notNull(),accessedAt:TB("accessed_at").default(qB`now()`).notNull(),roomId:l("room_id",{length:36}),relevanceScore:CB("relevance_score"),wasUseful:DB("was_useful")},(A)=>({memoryIdx:a("memory_access_logs_memory_idx").on(A.memoryId),agentIdx:a("memory_access_logs_agent_idx").on(A.agentId),accessedAtIdx:a("memory_access_logs_accessed_at_idx").on(A.accessedAt)}));class I extends vB{static serviceType="memory";sessionMessageCounts;memoryConfig;lastExtractionCheckpoints;capabilityDescription="Advanced memory management with short-term summarization and long-term persistent facts";constructor(A){super(A);this.sessionMessageCounts=new Map,this.lastExtractionCheckpoints=new Map,this.memoryConfig={shortTermSummarizationThreshold:16,shortTermRetainRecent:6,shortTermSummarizationInterval:10,longTermExtractionEnabled:!0,longTermVectorSearchEnabled:!1,longTermConfidenceThreshold:0.85,longTermExtractionThreshold:30,longTermExtractionInterval:10,summaryModelType:"TEXT_LARGE",summaryMaxTokens:2500,summaryMaxNewMessages:20}}static async start(A){let Q=new I(A);return await Q.initialize(A),Q}async stop(){O.info("MemoryService stopped")}async initialize(A){this.runtime=A;let Q=A.getSetting("MEMORY_SUMMARIZATION_THRESHOLD");if(Q)this.memoryConfig.shortTermSummarizationThreshold=parseInt(Q,10);let J=A.getSetting("MEMORY_RETAIN_RECENT");if(J)this.memoryConfig.shortTermRetainRecent=parseInt(J,10);let K=A.getSetting("MEMORY_SUMMARIZATION_INTERVAL");if(K)this.memoryConfig.shortTermSummarizationInterval=parseInt(K,10);let B=A.getSetting("MEMORY_MAX_NEW_MESSAGES");if(B)this.memoryConfig.summaryMaxNewMessages=parseInt(B,10);let Y=A.getSetting("MEMORY_LONG_TERM_ENABLED");if(Y==="false")this.memoryConfig.longTermExtractionEnabled=!1;else if(Y==="true")this.memoryConfig.longTermExtractionEnabled=!0;let Z=A.getSetting("MEMORY_CONFIDENCE_THRESHOLD");if(Z)this.memoryConfig.longTermConfidenceThreshold=parseFloat(Z);let $=A.getSetting("MEMORY_EXTRACTION_THRESHOLD");if($)this.memoryConfig.longTermExtractionThreshold=parseInt($,10);let G=A.getSetting("MEMORY_EXTRACTION_INTERVAL");if(G)this.memoryConfig.longTermExtractionInterval=parseInt(G,10);O.debug({summarizationThreshold:this.memoryConfig.shortTermSummarizationThreshold,summarizationInterval:this.memoryConfig.shortTermSummarizationInterval,maxNewMessages:this.memoryConfig.summaryMaxNewMessages,retainRecent:this.memoryConfig.shortTermRetainRecent,longTermEnabled:this.memoryConfig.longTermExtractionEnabled,extractionThreshold:this.memoryConfig.longTermExtractionThreshold,extractionInterval:this.memoryConfig.longTermExtractionInterval,confidenceThreshold:this.memoryConfig.longTermConfidenceThreshold},"MemoryService initialized")}getDb(){let A=this.runtime.db;if(!A)throw Error("Database not available");return A}getConfig(){return{...this.memoryConfig}}updateConfig(A){this.memoryConfig={...this.memoryConfig,...A}}incrementMessageCount(A){let J=(this.sessionMessageCounts.get(A)||0)+1;return this.sessionMessageCounts.set(A,J),J}resetMessageCount(A){this.sessionMessageCounts.set(A,0)}async shouldSummarize(A){return await this.runtime.countMemories(A,!1,"messages")>=this.memoryConfig.shortTermSummarizationThreshold}getExtractionKey(A,Q){return`memory:extraction:${A}:${Q}`}async getLastExtractionCheckpoint(A,Q){let J=this.getExtractionKey(A,Q),K=this.lastExtractionCheckpoints.get(J);if(K!==void 0)return K;try{let Y=await this.runtime.getCache(J)??0;return this.lastExtractionCheckpoints.set(J,Y),Y}catch(B){return O.warn({error:B},"Failed to get extraction checkpoint from cache"),0}}async setLastExtractionCheckpoint(A,Q,J){let K=this.getExtractionKey(A,Q);this.lastExtractionCheckpoints.set(K,J);try{await this.runtime.setCache(K,J),O.debug(`Set extraction checkpoint for ${A} in room ${Q} at message count ${J}`)}catch(B){O.error({error:B},"Failed to persist extraction checkpoint to cache")}}async shouldRunExtraction(A,Q,J){let K=this.memoryConfig.longTermExtractionThreshold,B=this.memoryConfig.longTermExtractionInterval;if(J<K)return O.debug({entityId:A,roomId:Q,currentMessageCount:J,threshold:K,shouldRun:!1},"Extraction check: below threshold"),!1;let Y=await this.getLastExtractionCheckpoint(A,Q),Z=Math.floor(J/B)*B,$=J>=K&&Z>Y;return O.debug({entityId:A,roomId:Q,currentMessageCount:J,threshold:K,interval:B,lastCheckpoint:Y,currentCheckpoint:Z,shouldRun:$},"Extraction check"),$}async storeLongTermMemory(A){let Q=this.getDb(),J=crypto.randomUUID(),K=new Date,B={id:J,createdAt:K,updatedAt:K,accessCount:0,...A};try{await Q.insert(j).values({id:B.id,agentId:B.agentId,entityId:B.entityId,category:B.category,content:B.content,metadata:B.metadata||{},embedding:B.embedding,confidence:B.confidence,source:B.source,accessCount:B.accessCount,createdAt:K,updatedAt:K,lastAccessedAt:B.lastAccessedAt})}catch(Y){throw O.error({error:Y},"Failed to store long-term memory"),Y}return O.info(`Stored long-term memory: ${B.category} for entity ${B.entityId}`),B}async getLongTermMemories(A,Q,J=10){let K=this.getDb(),B=[W(j.agentId,this.runtime.agentId),W(j.entityId,A)];if(Q)B.push(W(j.category,Q));return(await K.select().from(j).where(b(...B)).orderBy(S(j.confidence),S(j.updatedAt)).limit(J)).map((Z)=>({id:Z.id,agentId:Z.agentId,entityId:Z.entityId,category:Z.category,content:Z.content,metadata:Z.metadata,embedding:Z.embedding,confidence:Z.confidence,source:Z.source,createdAt:Z.createdAt,updatedAt:Z.updatedAt,lastAccessedAt:Z.lastAccessedAt,accessCount:Z.accessCount}))}async updateLongTermMemory(A,Q,J){let K=this.getDb(),B={updatedAt:new Date};if(J.content!==void 0)B.content=J.content;if(J.metadata!==void 0)B.metadata=J.metadata;if(J.confidence!==void 0)B.confidence=J.confidence;if(J.embedding!==void 0)B.embedding=J.embedding;if(J.lastAccessedAt!==void 0)B.lastAccessedAt=J.lastAccessedAt;if(J.accessCount!==void 0)B.accessCount=J.accessCount;await K.update(j).set(B).where(b(W(j.id,A),W(j.agentId,this.runtime.agentId),W(j.entityId,Q))),O.info(`Updated long-term memory: ${A} for entity ${Q}`)}async deleteLongTermMemory(A,Q){await this.getDb().delete(j).where(b(W(j.id,A),W(j.agentId,this.runtime.agentId),W(j.entityId,Q))),O.info(`Deleted long-term memory: ${A} for entity ${Q}`)}async getCurrentSessionSummary(A){let J=await this.getDb().select().from(E).where(b(W(E.agentId,this.runtime.agentId),W(E.roomId,A))).orderBy(S(E.updatedAt)).limit(1);if(J.length===0)return null;let K=J[0];return{id:K.id,agentId:K.agentId,roomId:K.roomId,entityId:K.entityId,summary:K.summary,messageCount:K.messageCount,lastMessageOffset:K.lastMessageOffset,startTime:K.startTime,endTime:K.endTime,topics:K.topics||[],metadata:K.metadata,embedding:K.embedding,createdAt:K.createdAt,updatedAt:K.updatedAt}}async storeSessionSummary(A){let Q=this.getDb(),J=crypto.randomUUID(),K=new Date,B={id:J,createdAt:K,updatedAt:K,...A};return await Q.insert(E).values({id:B.id,agentId:B.agentId,roomId:B.roomId,entityId:B.entityId||null,summary:B.summary,messageCount:B.messageCount,lastMessageOffset:B.lastMessageOffset,startTime:B.startTime,endTime:B.endTime,topics:B.topics||[],metadata:B.metadata||{},embedding:B.embedding,createdAt:K,updatedAt:K}),O.info(`Stored session summary for room ${B.roomId}`),B}async updateSessionSummary(A,Q,J){let K=this.getDb(),B={updatedAt:new Date};if(J.summary!==void 0)B.summary=J.summary;if(J.messageCount!==void 0)B.messageCount=J.messageCount;if(J.lastMessageOffset!==void 0)B.lastMessageOffset=J.lastMessageOffset;if(J.endTime!==void 0)B.endTime=J.endTime;if(J.topics!==void 0)B.topics=J.topics;if(J.metadata!==void 0)B.metadata=J.metadata;if(J.embedding!==void 0)B.embedding=J.embedding;await K.update(E).set(B).where(b(W(E.id,A),W(E.agentId,this.runtime.agentId),W(E.roomId,Q))),O.info(`Updated session summary: ${A} for room ${Q}`)}async getSessionSummaries(A,Q=5){return(await this.getDb().select().from(E).where(b(W(E.agentId,this.runtime.agentId),W(E.roomId,A))).orderBy(S(E.updatedAt)).limit(Q)).map((B)=>({id:B.id,agentId:B.agentId,roomId:B.roomId,entityId:B.entityId,summary:B.summary,messageCount:B.messageCount,lastMessageOffset:B.lastMessageOffset,startTime:B.startTime,endTime:B.endTime,topics:B.topics||[],metadata:B.metadata,embedding:B.embedding,createdAt:B.createdAt,updatedAt:B.updatedAt}))}async searchLongTermMemories(A,Q,J=5,K=0.7){if(!this.memoryConfig.longTermVectorSearchEnabled)return O.warn("Vector search is not enabled, falling back to recent memories"),this.getLongTermMemories(A,void 0,J);let B=this.getDb();try{let Y=Q.map((_)=>Number.isFinite(_)?Number(_.toFixed(6)):0),Z=ZB`1 - (${xB(j.embedding,Y)})`,$=[W(j.agentId,this.runtime.agentId),W(j.entityId,A),ZB`${j.embedding} IS NOT NULL`];if(K>0)$.push(hB(Z,K));return(await B.select({memory:j,similarity:Z}).from(j).where(b(...$)).orderBy(S(Z)).limit(J)).map((_)=>({id:_.memory.id,agentId:_.memory.agentId,entityId:_.memory.entityId,category:_.memory.category,content:_.memory.content,metadata:_.memory.metadata,embedding:_.memory.embedding,confidence:_.memory.confidence,source:_.memory.source,createdAt:_.memory.createdAt,updatedAt:_.memory.updatedAt,lastAccessedAt:_.memory.lastAccessedAt,accessCount:_.memory.accessCount,similarity:_.similarity}))}catch(Y){return O.warn({error:Y},"Vector search failed, falling back to recent memories"),this.getLongTermMemories(A,void 0,J)}}async getFormattedLongTermMemories(A){let Q=await this.getLongTermMemories(A,void 0,20);if(Q.length===0)return"";let J=new Map;for(let B of Q){if(!J.has(B.category))J.set(B.category,[]);J.get(B.category)?.push(B)}let K=[];for(let[B,Y]of J.entries()){let Z=B.split("_").map((G)=>G.charAt(0).toUpperCase()+G.slice(1)).join(" "),$=Y.map((G)=>`- ${G.content}`).join(`
|
|
2
|
+
`);K.push(`**${Z}**:
|
|
3
|
+
${$}`)}return K.join(`
|
|
4
4
|
|
|
5
|
-
`)}}import{logger as
|
|
5
|
+
`)}}import{logger as L,ModelType as SB,composePromptFromState as _B}from"@elizaos/core";async function IB(A,Q){return(await A.getMemories({tableName:"messages",roomId:Q,count:100,unique:!1})).filter((B)=>!(B.content?.type==="action_result"&&B.metadata?.type==="action_result")&&(B.metadata?.type==="agent_response_message"||B.metadata?.type==="user_message")).length}var MB=`# Task: Summarize Conversation
|
|
6
6
|
|
|
7
7
|
You are analyzing a conversation to create a concise summary that captures the key points, topics, and important details.
|
|
8
8
|
|
|
@@ -31,7 +31,7 @@ Respond in this XML format:
|
|
|
31
31
|
<point>First key point</point>
|
|
32
32
|
<point>Second key point</point>
|
|
33
33
|
</keyPoints>
|
|
34
|
-
</summary>`,
|
|
34
|
+
</summary>`,pB=`# Task: Update and Condense Conversation Summary
|
|
35
35
|
|
|
36
36
|
You are updating an existing conversation summary with new messages, while keeping the total summary concise.
|
|
37
37
|
|
|
@@ -62,10 +62,11 @@ Respond in this XML format:
|
|
|
62
62
|
<point>First key point</point>
|
|
63
63
|
<point>Second key point</point>
|
|
64
64
|
</keyPoints>
|
|
65
|
-
</summary>`;function
|
|
66
|
-
`),
|
|
65
|
+
</summary>`;function wB(A){let Q=A.match(/<text>([\s\S]*?)<\/text>/),J=A.match(/<topics>([\s\S]*?)<\/topics>/),K=A.matchAll(/<point>([\s\S]*?)<\/point>/g),B=Q?Q[1].trim():"Summary not available",Y=J?J[1].split(",").map(($)=>$.trim()).filter(Boolean):[],Z=Array.from(K).map(($)=>$[1].trim());return{summary:B,topics:Y,keyPoints:Z}}var $B={name:"MEMORY_SUMMARIZATION",description:"Automatically summarizes conversations to optimize context usage",similes:["CONVERSATION_SUMMARY","CONTEXT_COMPRESSION","MEMORY_OPTIMIZATION"],alwaysRun:!0,validate:async(A,Q)=>{if(!Q.content?.text)return L.debug("Skipping summarization: no message text"),!1;let J=A.getService("memory");if(!J)return L.debug("Skipping summarization: memory service not available"),!1;let K=J.getConfig(),B=await IB(A,Q.roomId),Y=await J.getCurrentSessionSummary(Q.roomId);if(!Y){let Z=B>=K.shortTermSummarizationThreshold;return L.debug({roomId:Q.roomId,currentDialogueCount:B,threshold:K.shortTermSummarizationThreshold,shouldSummarize:Z,reason:"initial_summary_check"},"Summarization validation check"),Z}else{let Z=B-Y.lastMessageOffset,$=Z>=K.shortTermSummarizationInterval;return L.debug({roomId:Q.roomId,currentDialogueCount:B,lastOffset:Y.lastMessageOffset,newDialogueCount:Z,interval:K.shortTermSummarizationInterval,shouldUpdate:$,reason:"summary_update_check"},"Summarization validation check"),$}},handler:async(A,Q)=>{let J=A.getService("memory");if(!J){L.error("MemoryService not found");return}let K=J.getConfig(),{roomId:B}=Q;try{L.info(`Starting summarization for room ${B}`);let Y=await J.getCurrentSessionSummary(B),Z=Y?.lastMessageOffset||0,G=(await A.getMemories({tableName:"messages",roomId:B,count:1000,unique:!1})).filter((X)=>!(X.content?.type==="action_result"&&X.metadata?.type==="action_result")&&(X.metadata?.type==="agent_response_message"||X.metadata?.type==="user_message")),_=G.length,H=_-Z;if(H===0){L.debug("No new dialogue messages to summarize");return}let F=K.summaryMaxNewMessages||50,T=Math.min(H,F);if(H>F)L.warn(`Capping new dialogue messages at ${F} (${H} available). Oldest messages will be skipped.`);let q=G.sort((X,z)=>(X.createdAt||0)-(z.createdAt||0)),U=q.slice(Z,Z+T);if(U.length===0){L.debug("No new dialogue messages retrieved after filtering");return}let V=U.map((X)=>{return`${X.entityId===A.agentId?A.character.name:"User"}: ${X.content.text||"[non-text message]"}`}).join(`
|
|
66
|
+
`),R=await A.composeState(Q),v,D;if(Y)D=pB,v=_B({state:{...R,existingSummary:Y.summary,existingTopics:Y.topics?.join(", ")||"None",newMessages:V},template:D});else{let X=q.map((z)=>{return`${z.entityId===A.agentId?A.character.name:"User"}: ${z.content.text||"[non-text message]"}`}).join(`
|
|
67
|
+
`);D=MB,v=_B({state:{...R,recentMessages:X},template:D})}let d=await A.useModel(SB.TEXT_LARGE,{prompt:v,maxTokens:K.summaryMaxTokens||2500}),k=wB(d);L.info(`${Y?"Updated":"Generated"} summary: ${k.summary.substring(0,100)}...`);let x=Z+U.length,C=U[0],f=U[U.length-1],h=Y?Y.startTime:C?.createdAt&&C.createdAt>0?new Date(C.createdAt):new Date,p=f?.createdAt&&f.createdAt>0?new Date(f.createdAt):new Date;if(Y)await J.updateSessionSummary(Y.id,B,{summary:k.summary,messageCount:Y.messageCount+U.length,lastMessageOffset:x,endTime:p,topics:k.topics,metadata:{keyPoints:k.keyPoints}}),L.info(`Updated summary for room ${B}: ${U.length} new dialogue messages processed (offset: ${Z} → ${x})`);else await J.storeSessionSummary({agentId:A.agentId,roomId:B,entityId:Q.entityId!==A.agentId?Q.entityId:void 0,summary:k.summary,messageCount:_,lastMessageOffset:_,startTime:h,endTime:p,topics:k.topics,metadata:{keyPoints:k.keyPoints}}),L.info(`Created new summary for room ${B}: ${_} dialogue messages summarized (offset: 0 → ${_})`)}catch(Y){L.error({error:Y},"Error during summarization:")}},examples:[]};import{logger as P,ModelType as yB,composePromptFromState as cB}from"@elizaos/core";var m;((K)=>{K.EPISODIC="episodic";K.SEMANTIC="semantic";K.PROCEDURAL="procedural"})(m||={});var lB=`# Task: Extract Long-Term Memory (Strict Criteria)
|
|
67
68
|
|
|
68
|
-
You are analyzing a conversation to extract ONLY the most
|
|
69
|
+
You are analyzing a conversation to extract ONLY the most critical, persistent information about the user using cognitive science memory categories.
|
|
69
70
|
|
|
70
71
|
# Recent Messages
|
|
71
72
|
{{recentMessages}}
|
|
@@ -73,79 +74,136 @@ You are analyzing a conversation to extract ONLY the most important, persistent
|
|
|
73
74
|
# Current Long-Term Memories
|
|
74
75
|
{{existingMemories}}
|
|
75
76
|
|
|
76
|
-
# Memory Categories
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
-
|
|
94
|
-
-
|
|
95
|
-
-
|
|
96
|
-
-
|
|
97
|
-
|
|
98
|
-
**
|
|
99
|
-
-
|
|
100
|
-
-
|
|
101
|
-
-
|
|
102
|
-
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
-
|
|
110
|
-
-
|
|
111
|
-
|
|
112
|
-
|
|
77
|
+
# Memory Categories (Based on Cognitive Science)
|
|
78
|
+
|
|
79
|
+
## 1. EPISODIC Memory
|
|
80
|
+
Personal experiences and specific events with temporal/spatial context.
|
|
81
|
+
**Examples:**
|
|
82
|
+
- "User completed migration project from MongoDB to PostgreSQL in Q2 2024"
|
|
83
|
+
- "User encountered authentication bug in production on March 15th"
|
|
84
|
+
- "User had a negative experience with Docker networking in previous job"
|
|
85
|
+
|
|
86
|
+
**Requirements:**
|
|
87
|
+
- Must include WHO did WHAT, WHEN/WHERE
|
|
88
|
+
- Must be a specific, concrete event (not a pattern)
|
|
89
|
+
- Must have significant impact or relevance to future work
|
|
90
|
+
|
|
91
|
+
## 2. SEMANTIC Memory
|
|
92
|
+
General facts, concepts, knowledge, and established truths about the user.
|
|
93
|
+
**Examples:**
|
|
94
|
+
- "User is a senior backend engineer with 8 years experience"
|
|
95
|
+
- "User specializes in distributed systems and microservices architecture"
|
|
96
|
+
- "User's primary programming language is TypeScript"
|
|
97
|
+
- "User works at Acme Corp as technical lead"
|
|
98
|
+
|
|
99
|
+
**Requirements:**
|
|
100
|
+
- Must be factual, timeless information
|
|
101
|
+
- Must be explicitly stated or demonstrated conclusively
|
|
102
|
+
- No speculation or inference from single instances
|
|
103
|
+
- Core identity, expertise, or knowledge only
|
|
104
|
+
|
|
105
|
+
## 3. PROCEDURAL Memory
|
|
106
|
+
Skills, workflows, methodologies, and how-to knowledge.
|
|
107
|
+
**Examples:**
|
|
108
|
+
- "User follows strict TDD workflow: write tests first, then implementation"
|
|
109
|
+
- "User prefers git rebase over merge to maintain linear history"
|
|
110
|
+
- "User's debugging process: check logs → reproduce locally → binary search"
|
|
111
|
+
- "User always writes JSDoc comments before implementing functions"
|
|
112
|
+
|
|
113
|
+
**Requirements:**
|
|
114
|
+
- Must describe HOW user does something
|
|
115
|
+
- Must be a repeated, consistent pattern (seen 3+ times or explicitly stated as standard practice)
|
|
116
|
+
- Must be a workflow, methodology, or skill application
|
|
117
|
+
- Not one-off preferences
|
|
118
|
+
|
|
119
|
+
# ULTRA-STRICT EXTRACTION CRITERIA
|
|
120
|
+
|
|
121
|
+
## ✅ DO EXTRACT (Only These):
|
|
122
|
+
|
|
123
|
+
**EPISODIC:**
|
|
124
|
+
- Significant completed projects or milestones
|
|
125
|
+
- Important bugs, incidents, or problems encountered
|
|
126
|
+
- Major decisions made with lasting impact
|
|
127
|
+
- Formative experiences that shape future work
|
|
128
|
+
|
|
129
|
+
**SEMANTIC:**
|
|
130
|
+
- Professional identity (role, title, company)
|
|
131
|
+
- Core expertise and specializations (stated explicitly or demonstrated conclusively)
|
|
132
|
+
- Primary languages, frameworks, or tools (not exploratory use)
|
|
133
|
+
- Established facts about their work context
|
|
134
|
+
|
|
135
|
+
**PROCEDURAL:**
|
|
136
|
+
- Consistent workflows demonstrated 3+ times or explicitly stated
|
|
137
|
+
- Standard practices user always follows
|
|
138
|
+
- Methodology preferences with clear rationale
|
|
139
|
+
- Debugging, testing, or development processes
|
|
140
|
+
|
|
141
|
+
## ❌ NEVER EXTRACT:
|
|
142
|
+
|
|
143
|
+
- **One-time requests or tasks** (e.g., "can you generate an image", "help me debug this")
|
|
144
|
+
- **Casual conversations** without lasting significance
|
|
145
|
+
- **Exploratory questions** (e.g., "how does X work?")
|
|
146
|
+
- **Temporary context** (current bug, today's task)
|
|
147
|
+
- **Preferences from single occurrence** (e.g., user asked for code once)
|
|
148
|
+
- **Social pleasantries** (thank you, greetings)
|
|
149
|
+
- **Testing or experimentation** (trying out a feature)
|
|
150
|
+
- **Common patterns everyone has** (likes clear explanations)
|
|
151
|
+
- **Situational information** (working on feature X today)
|
|
152
|
+
- **Opinions without persistence** (single complaint, isolated praise)
|
|
153
|
+
- **General knowledge** (not specific to user)
|
|
154
|
+
|
|
155
|
+
# Quality Gates (ALL Must Pass)
|
|
156
|
+
|
|
157
|
+
1. **Significance Test**: Will this matter in 3+ months?
|
|
158
|
+
2. **Specificity Test**: Is this concrete and actionable?
|
|
159
|
+
3. **Evidence Test**: Is there strong evidence (3+ instances OR explicit self-identification)?
|
|
160
|
+
4. **Uniqueness Test**: Is this specific to THIS user (not generic)?
|
|
161
|
+
5. **Confidence Test**: Confidence must be >= 0.85 (be VERY conservative)
|
|
162
|
+
6. **Non-Redundancy Test**: Does this add NEW information not in existing memories?
|
|
163
|
+
|
|
164
|
+
# Confidence Scoring (Be Conservative)
|
|
165
|
+
|
|
166
|
+
- **0.95-1.0**: User explicitly stated as core identity/practice AND demonstrated multiple times
|
|
167
|
+
- **0.85-0.94**: User explicitly stated OR consistently demonstrated 5+ times
|
|
168
|
+
- **0.75-0.84**: Strong pattern (3-4 instances) with supporting context
|
|
169
|
+
- **Below 0.75**: DO NOT EXTRACT (insufficient evidence)
|
|
170
|
+
|
|
171
|
+
# Critical Instructions
|
|
172
|
+
|
|
173
|
+
1. **Default to NOT extracting** - When in doubt, skip it
|
|
174
|
+
2. **Require overwhelming evidence** - One or two mentions is NOT enough
|
|
175
|
+
3. **Focus on what's PERSISTENT** - Not what's temporary or situational
|
|
176
|
+
4. **Verify against existing memories** - Don't duplicate or contradict
|
|
177
|
+
5. **Maximum 2-3 extractions per run** - Quality over quantity
|
|
178
|
+
|
|
179
|
+
**If there are no qualifying facts (which is common), respond with <memories></memories>**
|
|
180
|
+
|
|
181
|
+
# Response Format
|
|
113
182
|
|
|
114
|
-
# Instructions
|
|
115
|
-
Extract ONLY truly important NEW information that meets the strict criteria above. For each item:
|
|
116
|
-
- Determine which category it belongs to
|
|
117
|
-
- Write a clear, factual statement
|
|
118
|
-
- Assess confidence (0.0 to 1.0) - BE CONSERVATIVE
|
|
119
|
-
- Require strong evidence before extraction
|
|
120
|
-
|
|
121
|
-
**When in doubt, DO NOT extract.** It's better to miss temporary information than to clutter long-term memory.
|
|
122
|
-
|
|
123
|
-
If there are no new long-term facts to extract, respond with <memories></memories>
|
|
124
|
-
|
|
125
|
-
Respond in this XML format:
|
|
126
183
|
<memories>
|
|
127
184
|
<memory>
|
|
128
|
-
<category>
|
|
129
|
-
<content>User is a
|
|
185
|
+
<category>semantic</category>
|
|
186
|
+
<content>User is a senior TypeScript developer with 8 years of backend experience</content>
|
|
130
187
|
<confidence>0.95</confidence>
|
|
131
188
|
</memory>
|
|
132
189
|
<memory>
|
|
133
|
-
<category>
|
|
134
|
-
<content>
|
|
135
|
-
<confidence>0.
|
|
190
|
+
<category>procedural</category>
|
|
191
|
+
<content>User follows TDD workflow: writes tests before implementation, runs tests after each change</content>
|
|
192
|
+
<confidence>0.88</confidence>
|
|
136
193
|
</memory>
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
`)
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
194
|
+
<memory>
|
|
195
|
+
<category>episodic</category>
|
|
196
|
+
<content>User led database migration from MongoDB to PostgreSQL for payment system in Q2 2024</content>
|
|
197
|
+
<confidence>0.92</confidence>
|
|
198
|
+
</memory>
|
|
199
|
+
</memories>`;function dB(A){let Q=A.matchAll(/<memory>[\s\S]*?<category>(.*?)<\/category>[\s\S]*?<content>(.*?)<\/content>[\s\S]*?<confidence>(.*?)<\/confidence>[\s\S]*?<\/memory>/g),J=[];for(let K of Q){let B=K[1].trim(),Y=K[2].trim(),Z=parseFloat(K[3].trim());if(!Object.values(m).includes(B)){P.warn(`Invalid memory category: ${B}`);continue}if(Y&&!isNaN(Z))J.push({category:B,content:Y,confidence:Z})}return J}var VB={name:"LONG_TERM_MEMORY_EXTRACTION",description:"Extracts long-term facts about users from conversations",similes:["MEMORY_EXTRACTION","FACT_LEARNING","USER_PROFILING"],alwaysRun:!0,validate:async(A,Q)=>{if(P.debug(`Validating long-term memory extraction for message: ${Q.content?.text}`),Q.entityId===A.agentId)return P.debug("Skipping long-term memory extraction for agent's own message"),!1;if(!Q.content?.text)return P.debug("Skipping long-term memory extraction for message without text"),!1;let J=A.getService("memory");if(!J)return P.debug("MemoryService not found"),!1;if(!J.getConfig().longTermExtractionEnabled)return P.debug("Long-term memory extraction is disabled"),!1;let B=await A.countMemories(Q.roomId,!1,"messages"),Y=await J.shouldRunExtraction(Q.entityId,Q.roomId,B);return P.debug(`Should run extraction: ${Y}`),Y},handler:async(A,Q)=>{let J=A.getService("memory");if(!J){P.error("MemoryService not found");return}let K=J.getConfig(),{entityId:B,roomId:Y}=Q;try{P.info(`Extracting long-term memories for entity ${B}`);let $=(await A.getMemories({tableName:"messages",roomId:Y,count:20,unique:!1})).sort((V,R)=>(V.createdAt||0)-(R.createdAt||0)).map((V)=>{return`${V.entityId===A.agentId?A.character.name:"User"}: ${V.content.text||"[non-text message]"}`}).join(`
|
|
200
|
+
`),G=await J.getLongTermMemories(B,void 0,30),_=G.length>0?G.map((V)=>`[${V.category}] ${V.content} (confidence: ${V.confidence})`).join(`
|
|
201
|
+
`):"None yet",H=await A.composeState(Q),F=cB({state:{...H,recentMessages:$,existingMemories:_},template:lB}),T=await A.useModel(yB.TEXT_LARGE,{prompt:F}),q=dB(T);P.info(`Extracted ${q.length} long-term memories`);for(let V of q)if(V.confidence>=Math.max(K.longTermConfidenceThreshold,0.85))await J.storeLongTermMemory({agentId:A.agentId,entityId:B,category:V.category,content:V.content,confidence:V.confidence,source:"conversation",metadata:{roomId:Y,extractedAt:new Date().toISOString()}}),P.info(`Stored long-term memory: [${V.category}] ${V.content.substring(0,50)}...`);else P.debug(`Skipped low-confidence memory: ${V.content} (confidence: ${V.confidence}, threshold: ${Math.max(K.longTermConfidenceThreshold,0.85)})`);let U=await A.countMemories(Y,!1,"messages");await J.setLastExtractionCheckpoint(B,Y,U),P.debug(`Updated extraction checkpoint to ${U} for entity ${B} in room ${Y}`)}catch(Z){P.error({error:Z},"Error during long-term memory extraction:")}},examples:[]};import{logger as gB,addHeader as uB}from"@elizaos/core";var o={name:"LONG_TERM_MEMORY",description:"Persistent facts and preferences about the user",position:50,get:async(A,Q,J)=>{try{let K=A.getService("memory");if(!K)return{data:{memories:[]},values:{longTermMemories:""},text:""};let{entityId:B}=Q;if(B===A.agentId)return{data:{memories:[]},values:{longTermMemories:""},text:""};let Y=await K.getLongTermMemories(B,void 0,25);if(Y.length===0)return{data:{memories:[]},values:{longTermMemories:""},text:""};let Z=await K.getFormattedLongTermMemories(B),$=uB("# What I Know About You",Z),G=new Map;for(let H of Y){let F=G.get(H.category)||0;G.set(H.category,F+1)}let _=Array.from(G.entries()).map(([H,F])=>`${H}: ${F}`).join(", ");return{data:{memories:Y,categoryCounts:Object.fromEntries(G)},values:{longTermMemories:$,memoryCategories:_},text:$}}catch(K){return gB.error({error:K},"Error in longTermMemoryProvider:"),{data:{memories:[]},values:{longTermMemories:""},text:""}}}};import{addHeader as GB,logger as iB}from"@elizaos/core";var t={name:"SUMMARIZED_CONTEXT",description:"Provides summarized context from previous conversations",position:96,get:async(A,Q,J)=>{try{let K=A.getService("memory"),{roomId:B}=Q;if(!K)return{data:{summary:null},values:{sessionSummaries:"",sessionSummariesWithTopics:""},text:""};let Y=await K.getCurrentSessionSummary(B);if(!Y)return{data:{summary:null},values:{sessionSummaries:"",sessionSummariesWithTopics:""},text:""};let Z=`${Y.messageCount} messages`,$=new Date(Y.startTime).toLocaleDateString(),G=`**Previous Conversation** (${Z}, ${$})
|
|
202
|
+
`;G+=Y.summary;let _=G;if(Y.topics&&Y.topics.length>0)_+=`
|
|
203
|
+
*Topics: ${Y.topics.join(", ")}*`;let H=GB("# Conversation Summary",G),F=GB("# Conversation Summary",_);return{data:{summary:Y},values:{sessionSummaries:H,sessionSummariesWithTopics:F},text:F}}catch(K){return iB.error({error:K},"Error in contextSummaryProvider:"),{data:{summary:null},values:{sessionSummaries:"",sessionSummariesWithTopics:""},text:""}}}};import{addHeader as M,ChannelType as XB,formatMessages as nB,formatPosts as aB,getEntityDetails as sB,logger as mB}from"@elizaos/core";var r={name:"RECENT_MESSAGES",description:"Provides recent conversation messages with detailed context",position:94,get:async(A,Q,J)=>{try{let K=A.getService("memory"),{roomId:B}=Q,Y=K?.getConfig()||{shortTermSummarizationThreshold:16,shortTermRetainRecent:6},Z=A.getConversationLength(),$=Y.shortTermRetainRecent,G=0,_=!1;if(K){let X=await K.getCurrentSessionSummary(B);if(X)_=!0,G=X.lastMessageOffset||0}if(!_){if((await A.getMemories({tableName:"messages",roomId:B,count:Z,unique:!1})).filter((N)=>!(N.content?.type==="action_result"&&N.metadata?.type==="action_result")&&(N.metadata?.type==="agent_response_message"||N.metadata?.type==="user_message")).length<Y.shortTermSummarizationThreshold)$=Z}let[H,F,T]=await Promise.all([sB({runtime:A,roomId:B}),A.getRoom(B),A.getMemories({tableName:"messages",roomId:B,count:$,unique:!1,start:G})]),q=F?.type?F.type===XB.FEED||F.type===XB.THREAD:!1,U=T.filter((X)=>!(X.content?.type==="action_result"&&X.metadata?.type==="action_result")&&(X.metadata?.type==="agent_response_message"||X.metadata?.type==="user_message")),V="";if(U.length>0){if(q)V=aB({messages:U,entities:H,conversationHeader:!1});else V=nB({messages:U,entities:H});if(V)V=M("# Recent Messages",V)}let R=(X,z)=>{return X.sort((N,e)=>(N.createdAt||0)-(e.createdAt||0)).map((N)=>{let jB=H.find((FB)=>FB.id===N.entityId)?.names[0]||(N.entityId===A.agentId?A.character.name:"Unknown"),HB=N.createdAt?new Date(N.createdAt).toLocaleString():"Unknown time",UB=N.content.text||"",WB=z&&N.content.internalMonologue?`
|
|
204
|
+
[Internal thought: ${N.content.internalMonologue}]`:"";return`[${HB}] ${jB}: ${UB}${WB}`}).join(`
|
|
205
|
+
`)},v=M("# Conversation Messages",R(U,!1)),D=M("# Conversation Messages",R(U,!0)),d=Q.metadata,k=H.find((X)=>X.id===Q.entityId)?.names[0]||d?.entityName||"Unknown User",x=Q.content.text,C=!!x?.trim(),f=C?M("# Received Message",`${k}: ${x}`):"",h=C?M("# Focus your response",`You are replying to the above message from **${k}**. Keep your answer relevant to that message.`):"",p=[V,f,h].filter(Boolean).join(`
|
|
148
206
|
|
|
149
|
-
`);return{data:{
|
|
207
|
+
`);return{data:{messages:U},values:{recentMessages:V,conversationLog:v,conversationLogWithAgentThoughts:D,...f&&{receivedMessageHeader:f},...h&&{focusHeader:h}},text:p}}catch(K){return mB.error({error:K},"Error in recentMessagesProvider:"),{data:{messages:[]},values:{recentMessages:"",conversationLog:"",conversationLogWithAgentThoughts:"",receivedMessageHeader:"",focusHeader:""},text:""}}}};var oB={name:"memory",description:"Advanced memory management with conversation summarization and long-term persistent memory",services:[I],evaluators:[$B,VB],providers:[o,t,r],schema:s},tB=oB;export{E as sessionSummaries,r as recentMessagesProvider,oB as memoryPlugin,YB as memoryAccessLogs,o as longTermMemoryProvider,j as longTermMemories,tB as default,t as contextSummaryProvider,I as MemoryService,m as LongTermMemoryCategory};
|
|
150
208
|
|
|
151
|
-
//# debugId=
|
|
209
|
+
//# debugId=0DCBA5EC5774080B64756E2164756E21
|