@elizaos/plugin-memory 1.0.4 → 1.0.6
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +72 -31
- package/dist/browser/index.browser.js +148 -48
- package/dist/browser/index.browser.js.map +7 -7
- package/dist/cjs/index.node.cjs +493 -108
- package/dist/cjs/index.node.js.map +7 -7
- package/dist/evaluators/summarization.d.ts +13 -6
- package/dist/node/index.node.js +493 -108
- package/dist/node/index.node.js.map +7 -7
- package/dist/providers/short-term-memory.d.ts +8 -6
- package/dist/services/memory-service.d.ts +6 -3
- package/dist/types/index.d.ts +12 -10
- package/package.json +1 -1
package/README.md
CHANGED
|
@@ -14,21 +14,29 @@ Advanced memory management plugin for ElizaOS that provides intelligent conversa
|
|
|
14
14
|
### 🧠 Long-term Memory (Persistent Facts)
|
|
15
15
|
|
|
16
16
|
- **Intelligent Extraction**: Automatically learns facts about users from conversations
|
|
17
|
-
- **
|
|
17
|
+
- **Cognitive Science Based**: Organizes information into 3 core memory types (episodic, semantic, procedural)
|
|
18
|
+
- **Strict Criteria**: Only extracts truly significant, persistent information
|
|
18
19
|
- **Confidence Scoring**: Tracks reliability of stored information
|
|
19
|
-
- **Cross-session Persistence**: Remembers user
|
|
20
|
+
- **Cross-session Persistence**: Remembers user context across all interactions
|
|
20
21
|
|
|
21
|
-
### 📊 Memory Categories
|
|
22
|
+
### 📊 Memory Categories (Based on Cognitive Science)
|
|
22
23
|
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
24
|
+
The plugin uses the three fundamental types of long-term memory from cognitive science:
|
|
25
|
+
|
|
26
|
+
1. **Episodic Memory**: Personal experiences and specific events
|
|
27
|
+
- Example: "User completed migration from MongoDB to PostgreSQL in Q2 2024"
|
|
28
|
+
- Contains: WHO did WHAT, WHEN/WHERE
|
|
29
|
+
- Use for: Significant project milestones, important incidents, formative experiences
|
|
30
|
+
|
|
31
|
+
2. **Semantic Memory**: General facts, concepts, and knowledge
|
|
32
|
+
- Example: "User is a senior TypeScript developer with 8 years experience"
|
|
33
|
+
- Contains: Factual, timeless information
|
|
34
|
+
- Use for: Professional identity, core expertise, established facts about work context
|
|
35
|
+
|
|
36
|
+
3. **Procedural Memory**: Skills, workflows, and how-to knowledge
|
|
37
|
+
- Example: "User follows TDD workflow: writes tests first, then implementation"
|
|
38
|
+
- Contains: HOW user does things
|
|
39
|
+
- Use for: Consistent workflows, methodologies, debugging processes
|
|
32
40
|
|
|
33
41
|
## Installation
|
|
34
42
|
|
|
@@ -58,12 +66,16 @@ Configure the plugin via environment variables in your `.env` file:
|
|
|
58
66
|
|
|
59
67
|
```env
|
|
60
68
|
# Short-term Memory Settings
|
|
61
|
-
MEMORY_SUMMARIZATION_THRESHOLD=
|
|
62
|
-
|
|
69
|
+
MEMORY_SUMMARIZATION_THRESHOLD=16 # Messages before summarization starts (default: 16)
|
|
70
|
+
MEMORY_SUMMARIZATION_INTERVAL=10 # Update summary every N messages (default: 10)
|
|
71
|
+
MEMORY_RETAIN_RECENT=10 # Recent messages to keep (default: 10)
|
|
72
|
+
MEMORY_MAX_NEW_MESSAGES=20 # Max new messages in summary update (default: 20)
|
|
63
73
|
|
|
64
74
|
# Long-term Memory Settings
|
|
65
|
-
MEMORY_LONG_TERM_ENABLED=true
|
|
66
|
-
|
|
75
|
+
MEMORY_LONG_TERM_ENABLED=true # Enable long-term extraction (default: true)
|
|
76
|
+
MEMORY_EXTRACTION_THRESHOLD=30 # Min messages before extraction starts (default: 30)
|
|
77
|
+
MEMORY_EXTRACTION_INTERVAL=10 # Run extraction every N messages (default: 10)
|
|
78
|
+
MEMORY_CONFIDENCE_THRESHOLD=0.85 # Minimum confidence to store (default: 0.85)
|
|
67
79
|
```
|
|
68
80
|
|
|
69
81
|
### Manual Memory Storage
|
|
@@ -72,13 +84,13 @@ Users can explicitly ask the agent to remember information:
|
|
|
72
84
|
|
|
73
85
|
```
|
|
74
86
|
User: "Remember that I prefer TypeScript over JavaScript"
|
|
75
|
-
Agent: I've made a note of that in my
|
|
87
|
+
Agent: I've made a note of that in my Semantic memory: "User prefers TypeScript over JavaScript"
|
|
76
88
|
|
|
77
89
|
User: "Keep in mind I'm working on a startup project"
|
|
78
|
-
Agent: I've made a note of that in my
|
|
90
|
+
Agent: I've made a note of that in my Episodic memory: "User is working on a startup project"
|
|
79
91
|
|
|
80
|
-
User: "Don't forget I use
|
|
81
|
-
Agent: I've made a note of that in my
|
|
92
|
+
User: "Don't forget I always use TDD"
|
|
93
|
+
Agent: I've made a note of that in my Procedural memory: "User follows TDD (Test-Driven Development) methodology"
|
|
82
94
|
```
|
|
83
95
|
|
|
84
96
|
### Accessing the Memory Service
|
|
@@ -93,7 +105,7 @@ const memoryService = runtime.getService('memory') as MemoryService;
|
|
|
93
105
|
await memoryService.storeLongTermMemory({
|
|
94
106
|
agentId: runtime.agentId,
|
|
95
107
|
entityId: userId,
|
|
96
|
-
category: LongTermMemoryCategory.
|
|
108
|
+
category: LongTermMemoryCategory.SEMANTIC,
|
|
97
109
|
content: 'User prefers concise responses',
|
|
98
110
|
confidence: 0.9,
|
|
99
111
|
source: 'manual',
|
|
@@ -170,11 +182,29 @@ No manual migration is required - the schema is handled automatically by the run
|
|
|
170
182
|
|
|
171
183
|
### Long-term Memory Flow
|
|
172
184
|
|
|
173
|
-
1. **
|
|
174
|
-
2. **
|
|
175
|
-
3. **
|
|
176
|
-
4. **
|
|
177
|
-
5. **
|
|
185
|
+
1. **Warm-up Period**: Extraction waits until 30+ messages (configurable) to ensure meaningful patterns
|
|
186
|
+
2. **Monitoring**: longTermExtractionEvaluator runs periodically (every 10 messages after threshold)
|
|
187
|
+
3. **Analysis**: LLM analyzes conversation for **persistent, important** facts worth remembering
|
|
188
|
+
4. **Strict Filtering**: Applies cognitive science principles to extract only truly significant information
|
|
189
|
+
5. **Storage**: High-confidence facts (≥0.85) stored in long_term_memories table
|
|
190
|
+
6. **Retrieval**: longTermMemoryProvider injects relevant facts in all future conversations
|
|
191
|
+
|
|
192
|
+
**Ultra-Strict Extraction Criteria**: The evaluator uses stringent criteria to prevent memory pollution:
|
|
193
|
+
|
|
194
|
+
- ✅ **DO Extract:**
|
|
195
|
+
- **Episodic**: Significant milestones, important incidents, major decisions with lasting impact
|
|
196
|
+
- **Semantic**: Professional identity, core expertise, established facts (explicitly stated or conclusively demonstrated)
|
|
197
|
+
- **Procedural**: Consistent workflows (3+ occurrences or explicitly stated), standard practices, methodologies
|
|
198
|
+
|
|
199
|
+
- ❌ **NEVER Extract:**
|
|
200
|
+
- One-time requests or tasks
|
|
201
|
+
- Casual conversations without lasting significance
|
|
202
|
+
- Exploratory questions or testing
|
|
203
|
+
- Temporary context or situational information
|
|
204
|
+
- Preferences from single occurrence
|
|
205
|
+
- Social pleasantries
|
|
206
|
+
- Common patterns everyone has
|
|
207
|
+
- General knowledge not specific to user
|
|
178
208
|
|
|
179
209
|
### Manual Memory Flow
|
|
180
210
|
|
|
@@ -221,10 +251,19 @@ No manual migration is required - the schema is handled automatically by the run
|
|
|
221
251
|
|
|
222
252
|
### Configuration Tips
|
|
223
253
|
|
|
224
|
-
|
|
225
|
-
- **
|
|
226
|
-
- **
|
|
227
|
-
- **
|
|
254
|
+
**Short-term Memory:**
|
|
255
|
+
- **High-frequency chatbots**: Lower summarization threshold (10-15 messages)
|
|
256
|
+
- **Long-form conversations**: Higher threshold (20-30 messages)
|
|
257
|
+
- **Adjust retention**: Keep more recent messages for immediate context
|
|
258
|
+
|
|
259
|
+
**Long-term Memory:**
|
|
260
|
+
- **Conservative extraction**: Keep threshold at 30+ messages for better pattern recognition (default)
|
|
261
|
+
- **Aggressive extraction**: Lower threshold to 20 messages if needed (may reduce quality)
|
|
262
|
+
- **Balanced approach**: Default 0.85 confidence threshold ensures high-quality extractions
|
|
263
|
+
- **More permissive**: Lower confidence to 0.80 for more extractions (risk of lower quality)
|
|
264
|
+
- **Most strict**: Raise confidence to 0.90 for only the most certain facts
|
|
265
|
+
- **Frequent updates**: Lower extraction interval to 5-8 messages for faster learning
|
|
266
|
+
- **Conservative updates**: Keep default 10+ message interval to prevent over-extraction
|
|
228
267
|
|
|
229
268
|
## Advanced Features
|
|
230
269
|
|
|
@@ -246,7 +285,7 @@ Use the `memory_access_logs` table to:
|
|
|
246
285
|
|
|
247
286
|
### Custom Categories
|
|
248
287
|
|
|
249
|
-
|
|
288
|
+
The plugin uses three scientifically-grounded memory types from cognitive science. If you need additional categories for domain-specific use cases, you can extend the enum:
|
|
250
289
|
|
|
251
290
|
```typescript
|
|
252
291
|
export enum CustomMemoryCategory {
|
|
@@ -256,6 +295,8 @@ export enum CustomMemoryCategory {
|
|
|
256
295
|
}
|
|
257
296
|
```
|
|
258
297
|
|
|
298
|
+
**Note**: Consider carefully whether your custom category truly represents a different type of memory, or if it can be classified under episodic (events), semantic (facts), or procedural (how-to) memory.
|
|
299
|
+
|
|
259
300
|
## Testing
|
|
260
301
|
|
|
261
302
|
Run the test suite:
|
|
@@ -1,8 +1,8 @@
|
|
|
1
|
-
var zJ=Object.defineProperty;var DJ=(J,Q)=>{for(var Y in Q)zJ(J,Y,{get:Q[Y],enumerable:!0,configurable:!0,set:(W)=>Q[Y]=()=>W})};import{Service as MJ,logger as H}from"@elizaos/core";import{eq as D,and as g,desc as p,sql as jJ,cosineDistance as yJ,gte as lJ}from"drizzle-orm";var JJ={};DJ(JJ,{sessionSummaries:()=>q,memoryAccessLogs:()=>XJ,longTermMemories:()=>X});import{sql as $J}from"drizzle-orm";import{pgTable as RJ,text as m,integer as bJ,jsonb as CJ,real as AJ,index as l,varchar as s,timestamp as t}from"drizzle-orm/pg-core";var X=RJ("long_term_memories",{id:s("id",{length:36}).primaryKey(),agentId:s("agent_id",{length:36}).notNull(),entityId:s("entity_id",{length:36}).notNull(),category:m("category").notNull(),content:m("content").notNull(),metadata:CJ("metadata"),embedding:AJ("embedding").array(),confidence:AJ("confidence").default(1),source:m("source"),createdAt:t("created_at").default($J`now()`).notNull(),updatedAt:t("updated_at").default($J`now()`).notNull(),lastAccessedAt:t("last_accessed_at"),accessCount:bJ("access_count").default(0)},(J)=>({agentEntityIdx:l("long_term_memories_agent_entity_idx").on(J.agentId,J.entityId),categoryIdx:l("long_term_memories_category_idx").on(J.category),confidenceIdx:l("long_term_memories_confidence_idx").on(J.confidence),createdAtIdx:l("long_term_memories_created_at_idx").on(J.createdAt)}));import{sql as UJ}from"drizzle-orm";import{pgTable as fJ,text as hJ,integer as VJ,jsonb as GJ,real as TJ,index as r,varchar as c,timestamp as d}from"drizzle-orm/pg-core";var q=fJ("session_summaries",{id:c("id",{length:36}).primaryKey(),agentId:c("agent_id",{length:36}).notNull(),roomId:c("room_id",{length:36}).notNull(),entityId:c("entity_id",{length:36}),summary:hJ("summary").notNull(),messageCount:VJ("message_count").notNull(),lastMessageOffset:VJ("last_message_offset").notNull().default(0),startTime:d("start_time").notNull(),endTime:d("end_time").notNull(),topics:GJ("topics"),metadata:GJ("metadata"),embedding:TJ("embedding").array(),createdAt:d("created_at").default(UJ`now()`).notNull(),updatedAt:d("updated_at").default(UJ`now()`).notNull()},(J)=>({agentRoomIdx:r("session_summaries_agent_room_idx").on(J.agentId,J.roomId),entityIdx:r("session_summaries_entity_idx").on(J.entityId),startTimeIdx:r("session_summaries_start_time_idx").on(J.startTime)}));import{sql as vJ}from"drizzle-orm";import{pgTable as IJ,text as SJ,integer as xJ,real as pJ,index as e,varchar as u,timestamp as wJ}from"drizzle-orm/pg-core";var XJ=IJ("memory_access_logs",{id:u("id",{length:36}).primaryKey(),agentId:u("agent_id",{length:36}).notNull(),memoryId:u("memory_id",{length:36}).notNull(),memoryType:SJ("memory_type").notNull(),accessedAt:wJ("accessed_at").default(vJ`now()`).notNull(),roomId:u("room_id",{length:36}),relevanceScore:pJ("relevance_score"),wasUseful:xJ("was_useful")},(J)=>({memoryIdx:e("memory_access_logs_memory_idx").on(J.memoryId),agentIdx:e("memory_access_logs_agent_idx").on(J.agentId),accessedAtIdx:e("memory_access_logs_accessed_at_idx").on(J.accessedAt)}));class w extends MJ{static serviceType="memory";sessionMessageCounts;memoryConfig;lastExtractionCheckpoints;capabilityDescription="Advanced memory management with short-term summarization and long-term persistent facts";constructor(J){super(J);this.sessionMessageCounts=new Map,this.lastExtractionCheckpoints=new Map,this.memoryConfig={shortTermSummarizationThreshold:5,shortTermRetainRecent:10,longTermExtractionEnabled:!0,longTermVectorSearchEnabled:!1,longTermConfidenceThreshold:0.7,longTermExtractionInterval:5,summaryModelType:"TEXT_LARGE",summaryMaxTokens:2500}}static async start(J){let Q=new w(J);return await Q.initialize(J),Q}async stop(){H.info("MemoryService stopped")}async initialize(J){this.runtime=J;let Q=J.getSetting("MEMORY_SUMMARIZATION_THRESHOLD");if(Q)this.memoryConfig.shortTermSummarizationThreshold=parseInt(Q,10);let Y=J.getSetting("MEMORY_RETAIN_RECENT");if(Y)this.memoryConfig.shortTermRetainRecent=parseInt(Y,10);let W=J.getSetting("MEMORY_LONG_TERM_ENABLED");if(W==="false")this.memoryConfig.longTermExtractionEnabled=!1;else if(W==="true")this.memoryConfig.longTermExtractionEnabled=!0;let K=J.getSetting("MEMORY_CONFIDENCE_THRESHOLD");if(K)this.memoryConfig.longTermConfidenceThreshold=parseFloat(K);H.info({summarizationThreshold:this.memoryConfig.shortTermSummarizationThreshold,retainRecent:this.memoryConfig.shortTermRetainRecent,longTermEnabled:this.memoryConfig.longTermExtractionEnabled,extractionInterval:this.memoryConfig.longTermExtractionInterval,confidenceThreshold:this.memoryConfig.longTermConfidenceThreshold},"MemoryService initialized")}getDb(){let J=this.runtime.db;if(!J)throw Error("Database not available");return J}getConfig(){return{...this.memoryConfig}}updateConfig(J){this.memoryConfig={...this.memoryConfig,...J}}incrementMessageCount(J){let Y=(this.sessionMessageCounts.get(J)||0)+1;return this.sessionMessageCounts.set(J,Y),Y}resetMessageCount(J){this.sessionMessageCounts.set(J,0)}async shouldSummarize(J){return await this.runtime.countMemories(J,!1,"messages")>=this.memoryConfig.shortTermSummarizationThreshold}getExtractionKey(J,Q){return`memory:extraction:${J}:${Q}`}async getLastExtractionCheckpoint(J,Q){let Y=this.getExtractionKey(J,Q),W=this.lastExtractionCheckpoints.get(Y);if(W!==void 0)return W;try{let Z=await this.runtime.getCache(Y)??0;return this.lastExtractionCheckpoints.set(Y,Z),Z}catch(K){return H.warn({error:K},"Failed to get extraction checkpoint from cache"),0}}async setLastExtractionCheckpoint(J,Q,Y){let W=this.getExtractionKey(J,Q);this.lastExtractionCheckpoints.set(W,Y);try{await this.runtime.setCache(W,Y),H.debug(`Set extraction checkpoint for ${J} in room ${Q} at message count ${Y}`)}catch(K){H.error({error:K},"Failed to persist extraction checkpoint to cache")}}async shouldRunExtraction(J,Q,Y){let W=this.memoryConfig.longTermExtractionInterval,K=await this.getLastExtractionCheckpoint(J,Q),Z=Math.floor(Y/W)*W,$=Y>=W&&Z>K;return H.debug({entityId:J,roomId:Q,currentMessageCount:Y,interval:W,lastCheckpoint:K,currentCheckpoint:Z,shouldRun:$},"Extraction check"),$}async storeLongTermMemory(J){let Q=this.getDb(),Y=crypto.randomUUID(),W=new Date,K={id:Y,createdAt:W,updatedAt:W,accessCount:0,...J};try{await Q.insert(X).values({id:K.id,agentId:K.agentId,entityId:K.entityId,category:K.category,content:K.content,metadata:K.metadata||{},embedding:K.embedding,confidence:K.confidence,source:K.source,accessCount:K.accessCount,createdAt:W,updatedAt:W,lastAccessedAt:K.lastAccessedAt})}catch(Z){throw H.error({error:Z},"Failed to store long-term memory"),Z}return H.info(`Stored long-term memory: ${K.category} for entity ${K.entityId}`),K}async getLongTermMemories(J,Q,Y=10){let W=this.getDb(),K=[D(X.agentId,this.runtime.agentId),D(X.entityId,J)];if(Q)K.push(D(X.category,Q));return(await W.select().from(X).where(g(...K)).orderBy(p(X.confidence),p(X.updatedAt)).limit(Y)).map(($)=>({id:$.id,agentId:$.agentId,entityId:$.entityId,category:$.category,content:$.content,metadata:$.metadata,embedding:$.embedding,confidence:$.confidence,source:$.source,createdAt:$.createdAt,updatedAt:$.updatedAt,lastAccessedAt:$.lastAccessedAt,accessCount:$.accessCount}))}async updateLongTermMemory(J,Q){let Y=this.getDb(),W={updatedAt:new Date};if(Q.content!==void 0)W.content=Q.content;if(Q.metadata!==void 0)W.metadata=Q.metadata;if(Q.confidence!==void 0)W.confidence=Q.confidence;if(Q.embedding!==void 0)W.embedding=Q.embedding;if(Q.lastAccessedAt!==void 0)W.lastAccessedAt=Q.lastAccessedAt;if(Q.accessCount!==void 0)W.accessCount=Q.accessCount;await Y.update(X).set(W).where(D(X.id,J)),H.info(`Updated long-term memory: ${J}`)}async deleteLongTermMemory(J){await this.getDb().delete(X).where(D(X.id,J)),H.info(`Deleted long-term memory: ${J}`)}async getCurrentSessionSummary(J){let Y=await this.getDb().select().from(q).where(g(D(q.agentId,this.runtime.agentId),D(q.roomId,J))).orderBy(p(q.updatedAt)).limit(1);if(Y.length===0)return null;let W=Y[0];return{id:W.id,agentId:W.agentId,roomId:W.roomId,entityId:W.entityId,summary:W.summary,messageCount:W.messageCount,lastMessageOffset:W.lastMessageOffset,startTime:W.startTime,endTime:W.endTime,topics:W.topics||[],metadata:W.metadata,embedding:W.embedding,createdAt:W.createdAt,updatedAt:W.updatedAt}}async storeSessionSummary(J){let Q=this.getDb(),Y=crypto.randomUUID(),W=new Date,K={id:Y,createdAt:W,updatedAt:W,...J};return await Q.insert(q).values({id:K.id,agentId:K.agentId,roomId:K.roomId,entityId:K.entityId||null,summary:K.summary,messageCount:K.messageCount,lastMessageOffset:K.lastMessageOffset,startTime:K.startTime,endTime:K.endTime,topics:K.topics||[],metadata:K.metadata||{},embedding:K.embedding,createdAt:W,updatedAt:W}),H.info(`Stored session summary for room ${K.roomId}`),K}async updateSessionSummary(J,Q){let Y=this.getDb(),W={updatedAt:new Date};if(Q.summary!==void 0)W.summary=Q.summary;if(Q.messageCount!==void 0)W.messageCount=Q.messageCount;if(Q.lastMessageOffset!==void 0)W.lastMessageOffset=Q.lastMessageOffset;if(Q.endTime!==void 0)W.endTime=Q.endTime;if(Q.topics!==void 0)W.topics=Q.topics;if(Q.metadata!==void 0)W.metadata=Q.metadata;if(Q.embedding!==void 0)W.embedding=Q.embedding;await Y.update(q).set(W).where(D(q.id,J)),H.info(`Updated session summary: ${J}`)}async getSessionSummaries(J,Q=5){return(await this.getDb().select().from(q).where(g(D(q.agentId,this.runtime.agentId),D(q.roomId,J))).orderBy(p(q.updatedAt)).limit(Q)).map((K)=>({id:K.id,agentId:K.agentId,roomId:K.roomId,entityId:K.entityId,summary:K.summary,messageCount:K.messageCount,lastMessageOffset:K.lastMessageOffset,startTime:K.startTime,endTime:K.endTime,topics:K.topics||[],metadata:K.metadata,embedding:K.embedding,createdAt:K.createdAt,updatedAt:K.updatedAt}))}async searchLongTermMemories(J,Q,Y=5,W=0.7){if(!this.memoryConfig.longTermVectorSearchEnabled)return H.warn("Vector search is not enabled, falling back to recent memories"),this.getLongTermMemories(J,void 0,Y);let K=this.getDb();try{let Z=Q.map((_)=>Number.isFinite(_)?Number(_.toFixed(6)):0),$=jJ`1 - (${yJ(X.embedding,Z)})`,U=[D(X.agentId,this.runtime.agentId),D(X.entityId,J),jJ`${X.embedding} IS NOT NULL`];if(W>0)U.push(lJ($,W));return(await K.select({memory:X,similarity:$}).from(X).where(g(...U)).orderBy(p($)).limit(Y)).map((_)=>({id:_.memory.id,agentId:_.memory.agentId,entityId:_.memory.entityId,category:_.memory.category,content:_.memory.content,metadata:_.memory.metadata,embedding:_.memory.embedding,confidence:_.memory.confidence,source:_.memory.source,createdAt:_.memory.createdAt,updatedAt:_.memory.updatedAt,lastAccessedAt:_.memory.lastAccessedAt,accessCount:_.memory.accessCount,similarity:_.similarity}))}catch(Z){return H.warn({error:Z},"Vector search failed, falling back to recent memories"),this.getLongTermMemories(J,void 0,Y)}}async getFormattedLongTermMemories(J){let Q=await this.getLongTermMemories(J,void 0,20);if(Q.length===0)return"";let Y=new Map;for(let K of Q){if(!Y.has(K.category))Y.set(K.category,[]);Y.get(K.category)?.push(K)}let W=[];for(let[K,Z]of Y.entries()){let $=K.split("_").map((V)=>V.charAt(0).toUpperCase()+V.slice(1)).join(" "),U=Z.map((V)=>`- ${V.content}`).join(`
|
|
2
|
-
`);
|
|
3
|
-
${U}`)}return
|
|
1
|
+
var MJ=Object.defineProperty;var wJ=(J,Y)=>{for(var Q in Y)MJ(J,Q,{get:Y[Q],enumerable:!0,configurable:!0,set:(Z)=>Y[Q]=()=>Z})};import{Service as rJ,logger as f}from"@elizaos/core";import{eq as k,and as i,desc as JJ,sql as CJ,cosineDistance as tJ,gte as eJ}from"drizzle-orm";var kJ={};wJ(kJ,{sessionSummaries:()=>D,memoryAccessLogs:()=>bJ,longTermMemories:()=>H});import{sql as zJ}from"drizzle-orm";import{pgTable as yJ,text as OJ,integer as lJ,jsonb as cJ,real as qJ,index as VJ,varchar as HJ,timestamp as PJ}from"drizzle-orm/pg-core";var H=yJ("long_term_memories",{id:HJ("id",{length:36}).primaryKey(),agentId:HJ("agent_id",{length:36}).notNull(),entityId:HJ("entity_id",{length:36}).notNull(),category:OJ("category").notNull(),content:OJ("content").notNull(),metadata:cJ("metadata"),embedding:qJ("embedding").array(),confidence:qJ("confidence").default(1),source:OJ("source"),createdAt:PJ("created_at").default(zJ`now()`).notNull(),updatedAt:PJ("updated_at").default(zJ`now()`).notNull(),lastAccessedAt:PJ("last_accessed_at"),accessCount:lJ("access_count").default(0)},(J)=>({agentEntityIdx:VJ("long_term_memories_agent_entity_idx").on(J.agentId,J.entityId),categoryIdx:VJ("long_term_memories_category_idx").on(J.category),confidenceIdx:VJ("long_term_memories_confidence_idx").on(J.confidence),createdAtIdx:VJ("long_term_memories_created_at_idx").on(J.createdAt)}));import{sql as DJ}from"drizzle-orm";import{pgTable as dJ,text as uJ,integer as RJ,jsonb as fJ,real as gJ,index as LJ,varchar as WJ,timestamp as AJ}from"drizzle-orm/pg-core";var D=dJ("session_summaries",{id:WJ("id",{length:36}).primaryKey(),agentId:WJ("agent_id",{length:36}).notNull(),roomId:WJ("room_id",{length:36}).notNull(),entityId:WJ("entity_id",{length:36}),summary:uJ("summary").notNull(),messageCount:RJ("message_count").notNull(),lastMessageOffset:RJ("last_message_offset").notNull().default(0),startTime:AJ("start_time").notNull(),endTime:AJ("end_time").notNull(),topics:fJ("topics"),metadata:fJ("metadata"),embedding:gJ("embedding").array(),createdAt:AJ("created_at").default(DJ`now()`).notNull(),updatedAt:AJ("updated_at").default(DJ`now()`).notNull()},(J)=>({agentRoomIdx:LJ("session_summaries_agent_room_idx").on(J.agentId,J.roomId),entityIdx:LJ("session_summaries_entity_idx").on(J.entityId),startTimeIdx:LJ("session_summaries_start_time_idx").on(J.startTime)}));import{sql as iJ}from"drizzle-orm";import{pgTable as nJ,text as aJ,integer as mJ,real as oJ,index as NJ,varchar as GJ,timestamp as sJ}from"drizzle-orm/pg-core";var bJ=nJ("memory_access_logs",{id:GJ("id",{length:36}).primaryKey(),agentId:GJ("agent_id",{length:36}).notNull(),memoryId:GJ("memory_id",{length:36}).notNull(),memoryType:aJ("memory_type").notNull(),accessedAt:sJ("accessed_at").default(iJ`now()`).notNull(),roomId:GJ("room_id",{length:36}),relevanceScore:oJ("relevance_score"),wasUseful:mJ("was_useful")},(J)=>({memoryIdx:NJ("memory_access_logs_memory_idx").on(J.memoryId),agentIdx:NJ("memory_access_logs_agent_idx").on(J.agentId),accessedAtIdx:NJ("memory_access_logs_accessed_at_idx").on(J.accessedAt)}));class KJ extends rJ{static serviceType="memory";sessionMessageCounts;memoryConfig;lastExtractionCheckpoints;capabilityDescription="Advanced memory management with short-term summarization and long-term persistent facts";constructor(J){super(J);this.sessionMessageCounts=new Map,this.lastExtractionCheckpoints=new Map,this.memoryConfig={shortTermSummarizationThreshold:16,shortTermRetainRecent:6,shortTermSummarizationInterval:10,longTermExtractionEnabled:!0,longTermVectorSearchEnabled:!1,longTermConfidenceThreshold:0.85,longTermExtractionThreshold:30,longTermExtractionInterval:10,summaryModelType:"TEXT_LARGE",summaryMaxTokens:2500,summaryMaxNewMessages:20}}static async start(J){let Y=new KJ(J);return await Y.initialize(J),Y}async stop(){f.info("MemoryService stopped")}async initialize(J){this.runtime=J;let Y=J.getSetting("MEMORY_SUMMARIZATION_THRESHOLD");if(Y)this.memoryConfig.shortTermSummarizationThreshold=parseInt(Y,10);let Q=J.getSetting("MEMORY_RETAIN_RECENT");if(Q)this.memoryConfig.shortTermRetainRecent=parseInt(Q,10);let Z=J.getSetting("MEMORY_SUMMARIZATION_INTERVAL");if(Z)this.memoryConfig.shortTermSummarizationInterval=parseInt(Z,10);let K=J.getSetting("MEMORY_MAX_NEW_MESSAGES");if(K)this.memoryConfig.summaryMaxNewMessages=parseInt(K,10);let _=J.getSetting("MEMORY_LONG_TERM_ENABLED");if(_==="false")this.memoryConfig.longTermExtractionEnabled=!1;else if(_==="true")this.memoryConfig.longTermExtractionEnabled=!0;let $=J.getSetting("MEMORY_CONFIDENCE_THRESHOLD");if($)this.memoryConfig.longTermConfidenceThreshold=parseFloat($);let U=J.getSetting("MEMORY_EXTRACTION_THRESHOLD");if(U)this.memoryConfig.longTermExtractionThreshold=parseInt(U,10);let P=J.getSetting("MEMORY_EXTRACTION_INTERVAL");if(P)this.memoryConfig.longTermExtractionInterval=parseInt(P,10);f.info({summarizationThreshold:this.memoryConfig.shortTermSummarizationThreshold,summarizationInterval:this.memoryConfig.shortTermSummarizationInterval,maxNewMessages:this.memoryConfig.summaryMaxNewMessages,retainRecent:this.memoryConfig.shortTermRetainRecent,longTermEnabled:this.memoryConfig.longTermExtractionEnabled,extractionThreshold:this.memoryConfig.longTermExtractionThreshold,extractionInterval:this.memoryConfig.longTermExtractionInterval,confidenceThreshold:this.memoryConfig.longTermConfidenceThreshold},"MemoryService initialized")}getDb(){let J=this.runtime.db;if(!J)throw Error("Database not available");return J}getConfig(){return{...this.memoryConfig}}updateConfig(J){this.memoryConfig={...this.memoryConfig,...J}}incrementMessageCount(J){let Q=(this.sessionMessageCounts.get(J)||0)+1;return this.sessionMessageCounts.set(J,Q),Q}resetMessageCount(J){this.sessionMessageCounts.set(J,0)}async shouldSummarize(J){return await this.runtime.countMemories(J,!1,"messages")>=this.memoryConfig.shortTermSummarizationThreshold}getExtractionKey(J,Y){return`memory:extraction:${J}:${Y}`}async getLastExtractionCheckpoint(J,Y){let Q=this.getExtractionKey(J,Y),Z=this.lastExtractionCheckpoints.get(Q);if(Z!==void 0)return Z;try{let _=await this.runtime.getCache(Q)??0;return this.lastExtractionCheckpoints.set(Q,_),_}catch(K){return f.warn({error:K},"Failed to get extraction checkpoint from cache"),0}}async setLastExtractionCheckpoint(J,Y,Q){let Z=this.getExtractionKey(J,Y);this.lastExtractionCheckpoints.set(Z,Q);try{await this.runtime.setCache(Z,Q),f.debug(`Set extraction checkpoint for ${J} in room ${Y} at message count ${Q}`)}catch(K){f.error({error:K},"Failed to persist extraction checkpoint to cache")}}async shouldRunExtraction(J,Y,Q){let Z=this.memoryConfig.longTermExtractionThreshold,K=this.memoryConfig.longTermExtractionInterval;if(Q<Z)return f.debug({entityId:J,roomId:Y,currentMessageCount:Q,threshold:Z,shouldRun:!1},"Extraction check: below threshold"),!1;let _=await this.getLastExtractionCheckpoint(J,Y),$=Math.floor(Q/K)*K,U=Q>=Z&&$>_;return f.debug({entityId:J,roomId:Y,currentMessageCount:Q,threshold:Z,interval:K,lastCheckpoint:_,currentCheckpoint:$,shouldRun:U},"Extraction check"),U}async storeLongTermMemory(J){let Y=this.getDb(),Q=crypto.randomUUID(),Z=new Date,K={id:Q,createdAt:Z,updatedAt:Z,accessCount:0,...J};try{await Y.insert(H).values({id:K.id,agentId:K.agentId,entityId:K.entityId,category:K.category,content:K.content,metadata:K.metadata||{},embedding:K.embedding,confidence:K.confidence,source:K.source,accessCount:K.accessCount,createdAt:Z,updatedAt:Z,lastAccessedAt:K.lastAccessedAt})}catch(_){throw f.error({error:_},"Failed to store long-term memory"),_}return f.info(`Stored long-term memory: ${K.category} for entity ${K.entityId}`),K}async getLongTermMemories(J,Y,Q=10){let Z=this.getDb(),K=[k(H.agentId,this.runtime.agentId),k(H.entityId,J)];if(Y)K.push(k(H.category,Y));return(await Z.select().from(H).where(i(...K)).orderBy(JJ(H.confidence),JJ(H.updatedAt)).limit(Q)).map(($)=>({id:$.id,agentId:$.agentId,entityId:$.entityId,category:$.category,content:$.content,metadata:$.metadata,embedding:$.embedding,confidence:$.confidence,source:$.source,createdAt:$.createdAt,updatedAt:$.updatedAt,lastAccessedAt:$.lastAccessedAt,accessCount:$.accessCount}))}async updateLongTermMemory(J,Y,Q){let Z=this.getDb(),K={updatedAt:new Date};if(Q.content!==void 0)K.content=Q.content;if(Q.metadata!==void 0)K.metadata=Q.metadata;if(Q.confidence!==void 0)K.confidence=Q.confidence;if(Q.embedding!==void 0)K.embedding=Q.embedding;if(Q.lastAccessedAt!==void 0)K.lastAccessedAt=Q.lastAccessedAt;if(Q.accessCount!==void 0)K.accessCount=Q.accessCount;await Z.update(H).set(K).where(i(k(H.id,J),k(H.agentId,this.runtime.agentId),k(H.entityId,Y))),f.info(`Updated long-term memory: ${J} for entity ${Y}`)}async deleteLongTermMemory(J,Y){await this.getDb().delete(H).where(i(k(H.id,J),k(H.agentId,this.runtime.agentId),k(H.entityId,Y))),f.info(`Deleted long-term memory: ${J} for entity ${Y}`)}async getCurrentSessionSummary(J){let Q=await this.getDb().select().from(D).where(i(k(D.agentId,this.runtime.agentId),k(D.roomId,J))).orderBy(JJ(D.updatedAt)).limit(1);if(Q.length===0)return null;let Z=Q[0];return{id:Z.id,agentId:Z.agentId,roomId:Z.roomId,entityId:Z.entityId,summary:Z.summary,messageCount:Z.messageCount,lastMessageOffset:Z.lastMessageOffset,startTime:Z.startTime,endTime:Z.endTime,topics:Z.topics||[],metadata:Z.metadata,embedding:Z.embedding,createdAt:Z.createdAt,updatedAt:Z.updatedAt}}async storeSessionSummary(J){let Y=this.getDb(),Q=crypto.randomUUID(),Z=new Date,K={id:Q,createdAt:Z,updatedAt:Z,...J};return await Y.insert(D).values({id:K.id,agentId:K.agentId,roomId:K.roomId,entityId:K.entityId||null,summary:K.summary,messageCount:K.messageCount,lastMessageOffset:K.lastMessageOffset,startTime:K.startTime,endTime:K.endTime,topics:K.topics||[],metadata:K.metadata||{},embedding:K.embedding,createdAt:Z,updatedAt:Z}),f.info(`Stored session summary for room ${K.roomId}`),K}async updateSessionSummary(J,Y,Q){let Z=this.getDb(),K={updatedAt:new Date};if(Q.summary!==void 0)K.summary=Q.summary;if(Q.messageCount!==void 0)K.messageCount=Q.messageCount;if(Q.lastMessageOffset!==void 0)K.lastMessageOffset=Q.lastMessageOffset;if(Q.endTime!==void 0)K.endTime=Q.endTime;if(Q.topics!==void 0)K.topics=Q.topics;if(Q.metadata!==void 0)K.metadata=Q.metadata;if(Q.embedding!==void 0)K.embedding=Q.embedding;await Z.update(D).set(K).where(i(k(D.id,J),k(D.agentId,this.runtime.agentId),k(D.roomId,Y))),f.info(`Updated session summary: ${J} for room ${Y}`)}async getSessionSummaries(J,Y=5){return(await this.getDb().select().from(D).where(i(k(D.agentId,this.runtime.agentId),k(D.roomId,J))).orderBy(JJ(D.updatedAt)).limit(Y)).map((K)=>({id:K.id,agentId:K.agentId,roomId:K.roomId,entityId:K.entityId,summary:K.summary,messageCount:K.messageCount,lastMessageOffset:K.lastMessageOffset,startTime:K.startTime,endTime:K.endTime,topics:K.topics||[],metadata:K.metadata,embedding:K.embedding,createdAt:K.createdAt,updatedAt:K.updatedAt}))}async searchLongTermMemories(J,Y,Q=5,Z=0.7){if(!this.memoryConfig.longTermVectorSearchEnabled)return f.warn("Vector search is not enabled, falling back to recent memories"),this.getLongTermMemories(J,void 0,Q);let K=this.getDb();try{let _=Y.map((W)=>Number.isFinite(W)?Number(W.toFixed(6)):0),$=CJ`1 - (${tJ(H.embedding,_)})`,U=[k(H.agentId,this.runtime.agentId),k(H.entityId,J),CJ`${H.embedding} IS NOT NULL`];if(Z>0)U.push(eJ($,Z));return(await K.select({memory:H,similarity:$}).from(H).where(i(...U)).orderBy(JJ($)).limit(Q)).map((W)=>({id:W.memory.id,agentId:W.memory.agentId,entityId:W.memory.entityId,category:W.memory.category,content:W.memory.content,metadata:W.memory.metadata,embedding:W.memory.embedding,confidence:W.memory.confidence,source:W.memory.source,createdAt:W.memory.createdAt,updatedAt:W.memory.updatedAt,lastAccessedAt:W.memory.lastAccessedAt,accessCount:W.memory.accessCount,similarity:W.similarity}))}catch(_){return f.warn({error:_},"Vector search failed, falling back to recent memories"),this.getLongTermMemories(J,void 0,Q)}}async getFormattedLongTermMemories(J){let Y=await this.getLongTermMemories(J,void 0,20);if(Y.length===0)return"";let Q=new Map;for(let K of Y){if(!Q.has(K.category))Q.set(K.category,[]);Q.get(K.category)?.push(K)}let Z=[];for(let[K,_]of Q.entries()){let $=K.split("_").map((P)=>P.charAt(0).toUpperCase()+P.slice(1)).join(" "),U=_.map((P)=>`- ${P.content}`).join(`
|
|
2
|
+
`);Z.push(`**${$}**:
|
|
3
|
+
${U}`)}return Z.join(`
|
|
4
4
|
|
|
5
|
-
`)}}import{logger as
|
|
5
|
+
`)}}import{logger as x,ModelType as JK,composePromptFromState as vJ}from"@elizaos/core";var KK=`# Task: Summarize Conversation
|
|
6
6
|
|
|
7
7
|
You are analyzing a conversation to create a concise summary that captures the key points, topics, and important details.
|
|
8
8
|
|
|
@@ -31,7 +31,7 @@ Respond in this XML format:
|
|
|
31
31
|
<point>First key point</point>
|
|
32
32
|
<point>Second key point</point>
|
|
33
33
|
</keyPoints>
|
|
34
|
-
</summary>`,
|
|
34
|
+
</summary>`,QK=`# Task: Update and Condense Conversation Summary
|
|
35
35
|
|
|
36
36
|
You are updating an existing conversation summary with new messages, while keeping the total summary concise.
|
|
37
37
|
|
|
@@ -62,10 +62,10 @@ Respond in this XML format:
|
|
|
62
62
|
<point>First key point</point>
|
|
63
63
|
<point>Second key point</point>
|
|
64
64
|
</keyPoints>
|
|
65
|
-
</summary>`;function
|
|
66
|
-
`),
|
|
65
|
+
</summary>`;function YK(J){let Y=J.match(/<text>([\s\S]*?)<\/text>/),Q=J.match(/<topics>([\s\S]*?)<\/topics>/),Z=J.matchAll(/<point>([\s\S]*?)<\/point>/g),K=Y?Y[1].trim():"Summary not available",_=Q?Q[1].split(",").map((U)=>U.trim()).filter(Boolean):[],$=Array.from(Z).map((U)=>U[1].trim());return{summary:K,topics:_,keyPoints:$}}var TJ={name:"MEMORY_SUMMARIZATION",description:"Automatically summarizes conversations to optimize context usage",similes:["CONVERSATION_SUMMARY","CONTEXT_COMPRESSION","MEMORY_OPTIMIZATION"],alwaysRun:!0,validate:async(J,Y)=>{if(!Y.content?.text)return x.debug("Skipping summarization: no message text"),!1;let Q=J.getService("memory");if(!Q)return x.debug("Skipping summarization: memory service not available"),!1;let Z=Q.getConfig(),K=await J.countMemories(Y.roomId,!1,"messages"),_=await Q.getCurrentSessionSummary(Y.roomId);if(!_){let $=K>=Z.shortTermSummarizationThreshold;return x.debug({roomId:Y.roomId,currentMessageCount:K,threshold:Z.shortTermSummarizationThreshold,shouldSummarize:$,reason:"initial_summary_check"},"Summarization validation check"),$}else{let $=K-_.lastMessageOffset,U=$>=Z.shortTermSummarizationInterval;return x.debug({roomId:Y.roomId,currentMessageCount:K,lastOffset:_.lastMessageOffset,newMessageCount:$,interval:Z.shortTermSummarizationInterval,shouldUpdate:U,reason:"summary_update_check"},"Summarization validation check"),U}},handler:async(J,Y)=>{let Q=J.getService("memory");if(!Q){x.error("MemoryService not found");return}let Z=Q.getConfig(),{roomId:K}=Y;try{x.info(`Starting summarization for room ${K}`);let _=await Q.getCurrentSessionSummary(K),$=_?.lastMessageOffset||0,U=await J.countMemories(K,!1,"messages"),P=U-$,W=Z.summaryMaxNewMessages||50,R=Math.min(P,W);if(R===0){x.debug("No new messages to summarize");return}if(P>W)x.warn(`Capping new messages at ${W} (${P} available). Oldest messages will be skipped.`);let T=await J.getMemories({tableName:"messages",roomId:K,count:R,unique:!1,start:$});if(T.length===0){x.debug("No new messages retrieved");return}let O=T.sort((c,h)=>(c.createdAt||0)-(h.createdAt||0)),S=O.map((c)=>{return`${c.entityId===J.agentId?J.character.name:"User"}: ${c.content.text||"[non-text message]"}`}).join(`
|
|
66
|
+
`),E=await J.composeState(Y),B,b;if(_)b=QK,B=vJ({state:{...E,existingSummary:_.summary,existingTopics:_.topics?.join(", ")||"None",newMessages:S},template:b});else b=KK,B=vJ({state:{...E,recentMessages:S},template:b});let l=await J.useModel(JK.TEXT_LARGE,{prompt:B,maxTokens:Z.summaryMaxTokens||2500}),N=YK(l);x.info(`${_?"Updated":"Generated"} summary: ${N.summary.substring(0,100)}...`);let I=U,C=O[0],p=O[O.length-1],M=_?_.startTime:C?.createdAt&&C.createdAt>0?new Date(C.createdAt):new Date,w=p?.createdAt&&p.createdAt>0?new Date(p.createdAt):new Date;if(_)await Q.updateSessionSummary(_.id,K,{summary:N.summary,messageCount:_.messageCount+O.length,lastMessageOffset:I,endTime:w,topics:N.topics,metadata:{keyPoints:N.keyPoints}}),x.info(`Updated summary for room ${K}: ${O.length} new messages processed (offset: ${$} → ${I})`);else await Q.storeSessionSummary({agentId:J.agentId,roomId:K,entityId:Y.entityId!==J.agentId?Y.entityId:void 0,summary:N.summary,messageCount:O.length,lastMessageOffset:I,startTime:M,endTime:w,topics:N.topics,metadata:{keyPoints:N.keyPoints}}),x.info(`Created new summary for room ${K}: ${O.length} messages summarized (offset: 0 → ${I})`)}catch(_){x.error({error:_},"Error during summarization:")}},examples:[]};import{logger as v,ModelType as ZK,composePromptFromState as _K}from"@elizaos/core";var EJ;((Z)=>{Z.EPISODIC="episodic";Z.SEMANTIC="semantic";Z.PROCEDURAL="procedural"})(EJ||={});var $K=`# Task: Extract Long-Term Memory (Strict Criteria)
|
|
67
67
|
|
|
68
|
-
You are analyzing a conversation to extract
|
|
68
|
+
You are analyzing a conversation to extract ONLY the most critical, persistent information about the user using cognitive science memory categories.
|
|
69
69
|
|
|
70
70
|
# Recent Messages
|
|
71
71
|
{{recentMessages}}
|
|
@@ -73,50 +73,150 @@ You are analyzing a conversation to extract facts that should be remembered long
|
|
|
73
73
|
# Current Long-Term Memories
|
|
74
74
|
{{existingMemories}}
|
|
75
75
|
|
|
76
|
-
# Memory Categories
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
76
|
+
# Memory Categories (Based on Cognitive Science)
|
|
77
|
+
|
|
78
|
+
## 1. EPISODIC Memory
|
|
79
|
+
Personal experiences and specific events with temporal/spatial context.
|
|
80
|
+
**Examples:**
|
|
81
|
+
- "User completed migration project from MongoDB to PostgreSQL in Q2 2024"
|
|
82
|
+
- "User encountered authentication bug in production on March 15th"
|
|
83
|
+
- "User had a negative experience with Docker networking in previous job"
|
|
84
|
+
|
|
85
|
+
**Requirements:**
|
|
86
|
+
- Must include WHO did WHAT, WHEN/WHERE
|
|
87
|
+
- Must be a specific, concrete event (not a pattern)
|
|
88
|
+
- Must have significant impact or relevance to future work
|
|
89
|
+
|
|
90
|
+
## 2. SEMANTIC Memory
|
|
91
|
+
General facts, concepts, knowledge, and established truths about the user.
|
|
92
|
+
**Examples:**
|
|
93
|
+
- "User is a senior backend engineer with 8 years experience"
|
|
94
|
+
- "User specializes in distributed systems and microservices architecture"
|
|
95
|
+
- "User's primary programming language is TypeScript"
|
|
96
|
+
- "User works at Acme Corp as technical lead"
|
|
97
|
+
|
|
98
|
+
**Requirements:**
|
|
99
|
+
- Must be factual, timeless information
|
|
100
|
+
- Must be explicitly stated or demonstrated conclusively
|
|
101
|
+
- No speculation or inference from single instances
|
|
102
|
+
- Core identity, expertise, or knowledge only
|
|
103
|
+
|
|
104
|
+
## 3. PROCEDURAL Memory
|
|
105
|
+
Skills, workflows, methodologies, and how-to knowledge.
|
|
106
|
+
**Examples:**
|
|
107
|
+
- "User follows strict TDD workflow: write tests first, then implementation"
|
|
108
|
+
- "User prefers git rebase over merge to maintain linear history"
|
|
109
|
+
- "User's debugging process: check logs → reproduce locally → binary search"
|
|
110
|
+
- "User always writes JSDoc comments before implementing functions"
|
|
111
|
+
|
|
112
|
+
**Requirements:**
|
|
113
|
+
- Must describe HOW user does something
|
|
114
|
+
- Must be a repeated, consistent pattern (seen 3+ times or explicitly stated as standard practice)
|
|
115
|
+
- Must be a workflow, methodology, or skill application
|
|
116
|
+
- Not one-off preferences
|
|
117
|
+
|
|
118
|
+
# ULTRA-STRICT EXTRACTION CRITERIA
|
|
119
|
+
|
|
120
|
+
## ✅ DO EXTRACT (Only These):
|
|
121
|
+
|
|
122
|
+
**EPISODIC:**
|
|
123
|
+
- Significant completed projects or milestones
|
|
124
|
+
- Important bugs, incidents, or problems encountered
|
|
125
|
+
- Major decisions made with lasting impact
|
|
126
|
+
- Formative experiences that shape future work
|
|
127
|
+
|
|
128
|
+
**SEMANTIC:**
|
|
129
|
+
- Professional identity (role, title, company)
|
|
130
|
+
- Core expertise and specializations (stated explicitly or demonstrated conclusively)
|
|
131
|
+
- Primary languages, frameworks, or tools (not exploratory use)
|
|
132
|
+
- Established facts about their work context
|
|
133
|
+
|
|
134
|
+
**PROCEDURAL:**
|
|
135
|
+
- Consistent workflows demonstrated 3+ times or explicitly stated
|
|
136
|
+
- Standard practices user always follows
|
|
137
|
+
- Methodology preferences with clear rationale
|
|
138
|
+
- Debugging, testing, or development processes
|
|
139
|
+
|
|
140
|
+
## ❌ NEVER EXTRACT:
|
|
141
|
+
|
|
142
|
+
- **One-time requests or tasks** (e.g., "can you generate an image", "help me debug this")
|
|
143
|
+
- **Casual conversations** without lasting significance
|
|
144
|
+
- **Exploratory questions** (e.g., "how does X work?")
|
|
145
|
+
- **Temporary context** (current bug, today's task)
|
|
146
|
+
- **Preferences from single occurrence** (e.g., user asked for code once)
|
|
147
|
+
- **Social pleasantries** (thank you, greetings)
|
|
148
|
+
- **Testing or experimentation** (trying out a feature)
|
|
149
|
+
- **Common patterns everyone has** (likes clear explanations)
|
|
150
|
+
- **Situational information** (working on feature X today)
|
|
151
|
+
- **Opinions without persistence** (single complaint, isolated praise)
|
|
152
|
+
- **General knowledge** (not specific to user)
|
|
153
|
+
|
|
154
|
+
# Quality Gates (ALL Must Pass)
|
|
155
|
+
|
|
156
|
+
1. **Significance Test**: Will this matter in 3+ months?
|
|
157
|
+
2. **Specificity Test**: Is this concrete and actionable?
|
|
158
|
+
3. **Evidence Test**: Is there strong evidence (3+ instances OR explicit self-identification)?
|
|
159
|
+
4. **Uniqueness Test**: Is this specific to THIS user (not generic)?
|
|
160
|
+
5. **Confidence Test**: Confidence must be >= 0.85 (be VERY conservative)
|
|
161
|
+
6. **Non-Redundancy Test**: Does this add NEW information not in existing memories?
|
|
162
|
+
|
|
163
|
+
# Confidence Scoring (Be Conservative)
|
|
164
|
+
|
|
165
|
+
- **0.95-1.0**: User explicitly stated as core identity/practice AND demonstrated multiple times
|
|
166
|
+
- **0.85-0.94**: User explicitly stated OR consistently demonstrated 5+ times
|
|
167
|
+
- **0.75-0.84**: Strong pattern (3-4 instances) with supporting context
|
|
168
|
+
- **Below 0.75**: DO NOT EXTRACT (insufficient evidence)
|
|
169
|
+
|
|
170
|
+
# Critical Instructions
|
|
171
|
+
|
|
172
|
+
1. **Default to NOT extracting** - When in doubt, skip it
|
|
173
|
+
2. **Require overwhelming evidence** - One or two mentions is NOT enough
|
|
174
|
+
3. **Focus on what's PERSISTENT** - Not what's temporary or situational
|
|
175
|
+
4. **Verify against existing memories** - Don't duplicate or contradict
|
|
176
|
+
5. **Maximum 2-3 extractions per run** - Quality over quantity
|
|
177
|
+
|
|
178
|
+
**If there are no qualifying facts (which is common), respond with <memories></memories>**
|
|
179
|
+
|
|
180
|
+
# Response Format
|
|
86
181
|
|
|
87
|
-
# Instructions
|
|
88
|
-
Extract any NEW information that should be remembered long-term. For each item:
|
|
89
|
-
- Determine which category it belongs to
|
|
90
|
-
- Write a clear, factual statement
|
|
91
|
-
- Assess confidence (0.0 to 1.0)
|
|
92
|
-
- Only include information explicitly stated or strongly implied
|
|
93
|
-
|
|
94
|
-
If there are no new long-term facts to extract, respond with <memories></memories>
|
|
95
|
-
|
|
96
|
-
Respond in this XML format:
|
|
97
182
|
<memories>
|
|
98
183
|
<memory>
|
|
99
|
-
<category>
|
|
100
|
-
<content>User is a
|
|
184
|
+
<category>semantic</category>
|
|
185
|
+
<content>User is a senior TypeScript developer with 8 years of backend experience</content>
|
|
101
186
|
<confidence>0.95</confidence>
|
|
102
187
|
</memory>
|
|
103
188
|
<memory>
|
|
104
|
-
<category>
|
|
105
|
-
<content>
|
|
106
|
-
<confidence>0.
|
|
189
|
+
<category>procedural</category>
|
|
190
|
+
<content>User follows TDD workflow: writes tests before implementation, runs tests after each change</content>
|
|
191
|
+
<confidence>0.88</confidence>
|
|
107
192
|
</memory>
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
`)
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
193
|
+
<memory>
|
|
194
|
+
<category>episodic</category>
|
|
195
|
+
<content>User led database migration from MongoDB to PostgreSQL for payment system in Q2 2024</content>
|
|
196
|
+
<confidence>0.92</confidence>
|
|
197
|
+
</memory>
|
|
198
|
+
</memories>`;function VK(J){let Y=J.matchAll(/<memory>[\s\S]*?<category>(.*?)<\/category>[\s\S]*?<content>(.*?)<\/content>[\s\S]*?<confidence>(.*?)<\/confidence>[\s\S]*?<\/memory>/g),Q=[];for(let Z of Y){let K=Z[1].trim(),_=Z[2].trim(),$=parseFloat(Z[3].trim());if(!Object.values(EJ).includes(K)){v.warn(`Invalid memory category: ${K}`);continue}if(_&&!isNaN($))Q.push({category:K,content:_,confidence:$})}return Q}var SJ={name:"LONG_TERM_MEMORY_EXTRACTION",description:"Extracts long-term facts about users from conversations",similes:["MEMORY_EXTRACTION","FACT_LEARNING","USER_PROFILING"],alwaysRun:!0,validate:async(J,Y)=>{if(v.debug(`Validating long-term memory extraction for message: ${Y.content?.text}`),Y.entityId===J.agentId)return v.debug("Skipping long-term memory extraction for agent's own message"),!1;if(!Y.content?.text)return v.debug("Skipping long-term memory extraction for message without text"),!1;let Q=J.getService("memory");if(!Q)return v.debug("MemoryService not found"),!1;if(!Q.getConfig().longTermExtractionEnabled)return v.debug("Long-term memory extraction is disabled"),!1;let K=await J.countMemories(Y.roomId,!1,"messages"),_=await Q.shouldRunExtraction(Y.entityId,Y.roomId,K);return v.debug(`Should run extraction: ${_}`),_},handler:async(J,Y)=>{let Q=J.getService("memory");if(!Q){v.error("MemoryService not found");return}let Z=Q.getConfig(),{entityId:K,roomId:_}=Y;try{v.info(`Extracting long-term memories for entity ${K}`);let U=(await J.getMemories({tableName:"messages",roomId:_,count:20,unique:!1})).sort((B,b)=>(B.createdAt||0)-(b.createdAt||0)).map((B)=>{return`${B.entityId===J.agentId?J.character.name:"User"}: ${B.content.text||"[non-text message]"}`}).join(`
|
|
199
|
+
`),P=await Q.getLongTermMemories(K,void 0,30),W=P.length>0?P.map((B)=>`[${B.category}] ${B.content} (confidence: ${B.confidence})`).join(`
|
|
200
|
+
`):"None yet",R=await J.composeState(Y),T=_K({state:{...R,recentMessages:U,existingMemories:W},template:$K}),O=await J.useModel(ZK.TEXT_LARGE,{prompt:T}),S=VK(O);v.info(`Extracted ${S.length} long-term memories`);for(let B of S)if(B.confidence>=Math.max(Z.longTermConfidenceThreshold,0.85))await Q.storeLongTermMemory({agentId:J.agentId,entityId:K,category:B.category,content:B.content,confidence:B.confidence,source:"conversation",metadata:{roomId:_,extractedAt:new Date().toISOString()}}),v.info(`Stored long-term memory: [${B.category}] ${B.content.substring(0,50)}...`);else v.debug(`Skipped low-confidence memory: ${B.content} (confidence: ${B.confidence}, threshold: ${Math.max(Z.longTermConfidenceThreshold,0.85)})`);let E=await J.countMemories(_,!1,"messages");await Q.setLastExtractionCheckpoint(K,_,E),v.debug(`Updated extraction checkpoint to ${E} for entity ${K} in room ${_}`)}catch($){v.error({error:$},"Error during long-term memory extraction:")}},examples:[]};import{addHeader as y,ChannelType as XJ,formatMessages as jJ,formatPosts as BJ,getEntityDetails as hJ,logger as WK}from"@elizaos/core";var xJ=async(J,Y,Q,Z)=>{let K=await J.getRoomsForParticipants([Y,Q]);return J.getMemoriesByRoomIds({tableName:"messages",roomIds:K.filter((_)=>_!==Z),limit:20})},IJ={name:"SHORT_TERM_MEMORY",description:"Unified conversation context with smart summarization and interactions",position:95,get:async(J,Y,Q)=>{try{let Z=J.getService("memory"),{roomId:K}=Y,_=16,$=Z?.getConfig()||{shortTermSummarizationThreshold:16,shortTermRetainRecent:6},U=await J.countMemories(K,!1,"messages");if(!(Z&&U>=$.shortTermSummarizationThreshold)){let[W,R,T,O]=await Promise.all([hJ({runtime:J,roomId:K}),J.getRoom(K),J.getMemories({tableName:"messages",roomId:K,count:16,unique:!1}),Y.entityId!==J.agentId?xJ(J,Y.entityId,J.agentId,K):Promise.resolve([])]),S=T.filter((X)=>X.content?.type==="action_result"&&X.metadata?.type==="action_result"),E=T.filter((X)=>!(X.content?.type==="action_result"&&X.metadata?.type==="action_result")),B=R?.type?R.type===XJ.FEED||R.type===XJ.THREAD:!1,[b,l]=await Promise.all([jJ({messages:E,entities:W}),BJ({messages:E,entities:W,conversationHeader:!1})]),N=(X,A)=>{return X.sort((V,G)=>(V.createdAt||0)-(G.createdAt||0)).map((V)=>{let j=W.find((u)=>u.id===V.entityId)?.names[0]||(V.entityId===J.agentId?J.character.name:"Unknown"),L=V.createdAt?new Date(V.createdAt).toLocaleString():"Unknown time",z=V.content.text||"",q=A&&V.content.internalMonologue?`
|
|
201
|
+
[Internal thought: ${V.content.internalMonologue}]`:"";return`[${L}] ${j}: ${z}${q}`}).join(`
|
|
202
|
+
`)},I=N(E,!1),C=N(E,!0),p="";if(S.length>0){let X=new Map;for(let V of S){let G=String(V.content?.runId||"unknown");if(!X.has(G))X.set(G,[]);let j=X.get(G);if(j)j.push(V)}let A=Array.from(X.entries()).slice(-3).map(([V,G])=>{let j=G.sort((q,u)=>(q.createdAt||0)-(u.createdAt||0)),L=j[0]?.content?.planThought||"",z=j.map((q)=>{let u=q.content?.actionName||"Unknown",_J=q.content?.actionStatus||"unknown",m=q.content?.planStep||"",o=q.content?.text||"",g=q.content?.error||"",$J=` - ${u} (${_J})`;if(m)$J+=` [${m}]`;if(g)$J+=`: Error - ${g}`;else if(o&&o!==`Executed action: ${u}`)$J+=`: ${o}`;return $J}).join(`
|
|
203
|
+
`);return`**Action Run ${V.slice(0,8)}**${L?` - "${L}"`:""}
|
|
204
|
+
${z}`}).join(`
|
|
205
|
+
|
|
206
|
+
`);p=A?y("# Recent Action Executions",A):""}let M=l&&l.length>0?y("# Posts in Thread",l):"",w=b&&b.length>0?y("# Conversation Messages",b):"";if(!M&&!w&&E.length===0&&!Y.content.text)return{data:{summaries:[],recentMessages:[],recentInteractions:[],actionResults:[],mode:"full_conversation"},values:{recentPosts:"",recentMessages:"",recentMessageInteractions:"",recentPostInteractions:"",recentInteractions:"",recentActionResults:"",recentMessage:"No recent message available.",conversationLog:"",conversationLogWithAgentThoughts:""},text:"No recent messages available"};let c="No recent message available.";if(E.length>0){let X=[...E].sort((V,G)=>(G.createdAt||0)-(V.createdAt||0))[0],A=jJ({messages:[X],entities:W});if(A)c=A}let h=Y.metadata,QJ=W.find((X)=>X.id===Y.entityId)?.names[0]||h?.entityName||"Unknown User",s=Y.content.text,r=!!s?.trim(),n=r?y("# Received Message",`${QJ}: ${s}`):"",t=r?y("# Focus your response",`You are replying to the above message from **${QJ}**. Keep your answer relevant to that message. Do not repeat earlier replies unless the sender asks again.`):"",d=new Map;if(O.length>0){let X=[...new Set(O.map((j)=>j.entityId).filter((j)=>j!==J.agentId))],A=new Set(X),V=new Set;W.forEach((j)=>{if(A.has(j.id))d.set(j.id,j),V.add(j.id)});let G=X.filter((j)=>!V.has(j));if(G.length>0)(await Promise.all(G.map((L)=>J.getEntityById(L)))).forEach((L,z)=>{if(L)d.set(G[z],L)})}let a=async(X)=>{return X.map((V)=>{let G=V.entityId===J.agentId,j;if(G)j=J.character.name;else j=d.get(V.entityId)?.metadata?.userName||"unknown";return`${j}: ${V.content.text}`}).join(`
|
|
207
|
+
`)},UJ=async(X,A)=>{let V=[...A],G=new Set(A.map((L)=>L.id));for(let[L,z]of d.entries())if(!G.has(L))V.push(z);return BJ({messages:X,entities:V,conversationHeader:!0})},[YJ,e]=await Promise.all([a(O),UJ(O,W)]),ZJ={summaries:[],recentMessages:E,recentInteractions:O,actionResults:S,mode:"full_conversation"},FJ={recentPosts:M,recentMessages:w,recentMessageInteractions:YJ,recentPostInteractions:e,recentInteractions:B?e:YJ,recentActionResults:p,recentMessage:c,conversationLog:I,conversationLogWithAgentThoughts:C},F=[B?M:w,p,w||M||Y.content.text?n:"",w||M||Y.content.text?t:""].filter(Boolean).join(`
|
|
208
|
+
|
|
209
|
+
`);return{data:ZJ,values:FJ,text:F}}else{let W=await Z.getCurrentSessionSummary(K),R=W?.lastMessageOffset||0,T=$.shortTermRetainRecent,[O,S,E,B]=await Promise.all([hJ({runtime:J,roomId:K}),J.getRoom(K),J.getMemories({tableName:"messages",roomId:K,count:T,unique:!1,start:R}),Y.entityId!==J.agentId?xJ(J,Y.entityId,J.agentId,K):Promise.resolve([])]),b=S?.type?S.type===XJ.FEED||S.type===XJ.THREAD:!1,l=E.filter((F)=>F.content?.type==="action_result"&&F.metadata?.type==="action_result"),N=E.filter((F)=>!(F.content?.type==="action_result"&&F.metadata?.type==="action_result")),I="";if(l.length>0){let F=new Map;for(let A of l){let V=String(A.content?.runId||"unknown");if(!F.has(V))F.set(V,[]);F.get(V)?.push(A)}let X=Array.from(F.entries()).slice(-3).map(([A,V])=>{let G=V.sort((z,q)=>(z.createdAt||0)-(q.createdAt||0)),j=G[0]?.content?.planThought||"",L=G.map((z)=>{let q=z.content?.actionName||"Unknown",u=z.content?.actionStatus||"unknown",_J=z.content?.planStep||"",m=z.content?.text||"",o=z.content?.error||"",g=` - ${q} (${u})`;if(_J)g+=` [${_J}]`;if(o)g+=`: Error - ${o}`;else if(m&&m!==`Executed action: ${q}`)g+=`: ${m}`;return g}).join(`
|
|
210
|
+
`);return`**Action Run ${A.slice(0,8)}**${j?` - "${j}"`:""}
|
|
211
|
+
${L}`}).join(`
|
|
212
|
+
|
|
213
|
+
`);I=X?y("# Recent Action Executions",X):""}let C="";if(N.length>0){if(b)C=BJ({messages:N,entities:O,conversationHeader:!1});else C=jJ({messages:N,entities:O});if(C)C=y("# Recent Messages",C)}let p="No recent message available.";if(N.length>0){let F=[...N].sort((A,V)=>(V.createdAt||0)-(A.createdAt||0))[0],X=jJ({messages:[F],entities:O});if(X)p=X}let M=(F,X)=>{return F.sort((A,V)=>(A.createdAt||0)-(V.createdAt||0)).map((A)=>{let G=O.find((q)=>q.id===A.entityId)?.names[0]||(A.entityId===J.agentId?J.character.name:"Unknown"),j=A.createdAt?new Date(A.createdAt).toLocaleString():"Unknown time",L=A.content.text||"",z=X&&A.content.internalMonologue?`
|
|
214
|
+
[Internal thought: ${A.content.internalMonologue}]`:"";return`[${j}] ${G}: ${L}${z}`}).join(`
|
|
215
|
+
`)},w=M(N,!1),c=M(N,!0),h="";if(W){let F=`${W.messageCount} messages`,X=new Date(W.startTime).toLocaleDateString();if(h=`**Previous Conversation** (${F}, ${X})
|
|
216
|
+
`,h+=W.summary,W.topics&&W.topics.length>0)h+=`
|
|
217
|
+
*Topics: ${W.topics.join(", ")}*`;h=y("# Conversation Summary",h)}let QJ=Y.metadata,s=O.find((F)=>F.id===Y.entityId)?.names[0]||QJ?.entityName||"Unknown User",r=Y.content.text,n=!!r?.trim(),t=n?y("# Received Message",`${s}: ${r}`):"",d=n?y("# Focus your response",`You are replying to the above message from **${s}**. Keep your answer relevant to that message.`):"",a=new Map;if(B.length>0){let F=[...new Set(B.map((G)=>G.entityId).filter((G)=>G!==J.agentId))],X=new Set(F),A=new Set;O.forEach((G)=>{if(X.has(G.id))a.set(G.id,G),A.add(G.id)});let V=F.filter((G)=>!A.has(G));if(V.length>0)(await Promise.all(V.map((j)=>J.getEntityById(j)))).forEach((j,L)=>{if(j)a.set(V[L],j)})}let UJ=async(F)=>{return F.map((A)=>{let V=A.entityId===J.agentId,G;if(V)G=J.character.name;else G=a.get(A.entityId)?.metadata?.userName||"unknown";return`${G}: ${A.content.text}`}).join(`
|
|
218
|
+
`)},YJ=async(F,X)=>{let A=[...X],V=new Set(X.map((j)=>j.id));for(let[j,L]of a.entries())if(!V.has(j))A.push(L);return BJ({messages:F,entities:A,conversationHeader:!0})},[e,ZJ]=await Promise.all([UJ(B),YJ(B,O)]),FJ=[h,C,I,n?t:"",n?d:""].filter(Boolean).join(`
|
|
219
|
+
|
|
220
|
+
`);return{data:{summaries:W?[W]:[],recentMessages:N,recentInteractions:B,actionResults:l,mode:"summarized"},values:{...h&&{sessionSummaries:h},...C&&{recentMessages:C},recentMessageInteractions:e,recentPostInteractions:ZJ,recentInteractions:b?ZJ:e,...I&&{recentActionResults:I},recentMessage:p,conversationLog:w,conversationLogWithAgentThoughts:c,...t&&{receivedMessageHeader:t},...d&&{focusHeader:d}},text:FJ}}}catch(Z){return WK.error({error:Z},"Error in shortTermMemoryProvider:"),{data:{summaries:[],recentMessages:[],recentInteractions:[],actionResults:[],mode:"error"},values:{recentPosts:"",recentMessages:"",recentMessageInteractions:"",recentPostInteractions:"",recentInteractions:"",recentActionResults:"",conversationLog:"",conversationLogWithAgentThoughts:""},text:"Error retrieving conversation context."}}}};import{logger as AK,addHeader as GK}from"@elizaos/core";var pJ={name:"LONG_TERM_MEMORY",description:"Persistent facts and preferences about the user",position:50,get:async(J,Y,Q)=>{try{let Z=J.getService("memory");if(!Z)return{data:{memories:[]},values:{longTermMemories:""},text:""};let{entityId:K}=Y;if(K===J.agentId)return{data:{memories:[]},values:{longTermMemories:""},text:""};let _=await Z.getLongTermMemories(K,void 0,25);if(_.length===0)return{data:{memories:[]},values:{longTermMemories:""},text:""};let $=await Z.getFormattedLongTermMemories(K),U=GK("# What I Know About You",$),P=new Map;for(let R of _){let T=P.get(R.category)||0;P.set(R.category,T+1)}let W=Array.from(P.entries()).map(([R,T])=>`${R}: ${T}`).join(", ");return{data:{memories:_,categoryCounts:Object.fromEntries(P)},values:{longTermMemories:U,memoryCategories:W},text:U}}catch(Z){return AK.error({error:Z},"Error in longTermMemoryProvider:"),{data:{memories:[]},values:{longTermMemories:""},text:""}}}};var XK={name:"memory",description:"Advanced memory management with conversation summarization and long-term persistent memory",services:[KJ],evaluators:[TJ,SJ],providers:[pJ,IJ],schema:kJ},jK=XK;export{D as sessionSummaries,XK as memoryPlugin,bJ as memoryAccessLogs,H as longTermMemories,jK as default,KJ as MemoryService,EJ as LongTermMemoryCategory};
|
|
221
|
+
|
|
222
|
+
//# debugId=F6EA319C101EB9A964756E2164756E21
|