@elizaos/plugin-memory 1.0.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +294 -0
- package/dist/actions/remember.d.ts +11 -0
- package/dist/browser/index.browser.js +116 -0
- package/dist/browser/index.browser.js.map +19 -0
- package/dist/browser/index.d.ts +2 -0
- package/dist/cjs/index.d.ts +2 -0
- package/dist/cjs/index.node.cjs +1010 -0
- package/dist/cjs/index.node.js.map +19 -0
- package/dist/evaluators/long-term-extraction.d.ts +8 -0
- package/dist/evaluators/summarization.d.ts +8 -0
- package/dist/index.browser.d.ts +2 -0
- package/dist/index.d.ts +38 -0
- package/dist/index.node.d.ts +2 -0
- package/dist/node/index.d.ts +2 -0
- package/dist/node/index.node.js +1024 -0
- package/dist/node/index.node.js.map +19 -0
- package/dist/providers/long-term-memory.d.ts +17 -0
- package/dist/providers/short-term-memory.d.ts +13 -0
- package/dist/schemas/index.d.ts +9 -0
- package/dist/schemas/long-term-memories.d.ts +264 -0
- package/dist/schemas/memory-access-logs.d.ts +154 -0
- package/dist/schemas/session-summaries.d.ts +283 -0
- package/dist/services/memory-service.d.ts +99 -0
- package/dist/types/index.d.ts +83 -0
- package/package.json +70 -0
package/README.md
ADDED
|
@@ -0,0 +1,294 @@
|
|
|
1
|
+
# @elizaos/plugin-memory
|
|
2
|
+
|
|
3
|
+
Advanced memory management plugin for ElizaOS that provides intelligent conversation summarization and persistent long-term memory storage.
|
|
4
|
+
|
|
5
|
+
## Features
|
|
6
|
+
|
|
7
|
+
### 🔄 Short-term Memory (Conversation Summarization)
|
|
8
|
+
|
|
9
|
+
- **Automatic Summarization**: Compresses long conversations when they exceed configurable thresholds
|
|
10
|
+
- **Context Preservation**: Maintains conversation flow while dramatically reducing token usage
|
|
11
|
+
- **Recent Message Retention**: Keeps the most recent messages for immediate context
|
|
12
|
+
- **Topic Extraction**: Identifies and tracks main topics discussed in each session
|
|
13
|
+
|
|
14
|
+
### 🧠Long-term Memory (Persistent Facts)
|
|
15
|
+
|
|
16
|
+
- **Intelligent Extraction**: Automatically learns facts about users from conversations
|
|
17
|
+
- **Categorized Storage**: Organizes information into 9 semantic categories
|
|
18
|
+
- **Confidence Scoring**: Tracks reliability of stored information
|
|
19
|
+
- **Cross-session Persistence**: Remembers user preferences and context across all interactions
|
|
20
|
+
|
|
21
|
+
### 📊 Memory Categories
|
|
22
|
+
|
|
23
|
+
1. **Identity**: User's name, role, profession (e.g., "I'm a data scientist")
|
|
24
|
+
2. **Expertise**: Domain knowledge, skills, familiarity with topics
|
|
25
|
+
3. **Projects**: Ongoing work, past interactions, recurring topics
|
|
26
|
+
4. **Preferences**: Communication style, format preferences, verbosity
|
|
27
|
+
5. **Data Sources**: Frequently used files, databases, APIs
|
|
28
|
+
6. **Goals**: Broader intentions and objectives
|
|
29
|
+
7. **Constraints**: User-defined rules or limitations
|
|
30
|
+
8. **Definitions**: Custom terms, acronyms, glossaries
|
|
31
|
+
9. **Behavioral Patterns**: Interaction styles and tendencies
|
|
32
|
+
|
|
33
|
+
## Installation
|
|
34
|
+
|
|
35
|
+
```bash
|
|
36
|
+
bun add @elizaos/plugin-memory
|
|
37
|
+
```
|
|
38
|
+
|
|
39
|
+
## Usage
|
|
40
|
+
|
|
41
|
+
### Basic Setup
|
|
42
|
+
|
|
43
|
+
```typescript
|
|
44
|
+
import { memoryPlugin } from '@elizaos/plugin-memory';
|
|
45
|
+
|
|
46
|
+
const agent = new Agent({
|
|
47
|
+
name: 'MyAgent',
|
|
48
|
+
plugins: [
|
|
49
|
+
memoryPlugin,
|
|
50
|
+
// ... other plugins
|
|
51
|
+
],
|
|
52
|
+
});
|
|
53
|
+
```
|
|
54
|
+
|
|
55
|
+
### Configuration
|
|
56
|
+
|
|
57
|
+
Configure the plugin via environment variables in your `.env` file:
|
|
58
|
+
|
|
59
|
+
```env
|
|
60
|
+
# Short-term Memory Settings
|
|
61
|
+
MEMORY_SUMMARIZATION_THRESHOLD=50 # Messages before summarization (default: 50)
|
|
62
|
+
MEMORY_RETAIN_RECENT=10 # Recent messages to keep (default: 10)
|
|
63
|
+
|
|
64
|
+
# Long-term Memory Settings
|
|
65
|
+
MEMORY_LONG_TERM_ENABLED=true # Enable long-term extraction (default: true)
|
|
66
|
+
MEMORY_CONFIDENCE_THRESHOLD=0.7 # Minimum confidence to store (default: 0.7)
|
|
67
|
+
```
|
|
68
|
+
|
|
69
|
+
### Manual Memory Storage
|
|
70
|
+
|
|
71
|
+
Users can explicitly ask the agent to remember information:
|
|
72
|
+
|
|
73
|
+
```
|
|
74
|
+
User: "Remember that I prefer TypeScript over JavaScript"
|
|
75
|
+
Agent: I've made a note of that in my Preferences memory: "User prefers TypeScript over JavaScript"
|
|
76
|
+
|
|
77
|
+
User: "Keep in mind I'm working on a startup project"
|
|
78
|
+
Agent: I've made a note of that in my Projects memory: "User is working on a startup project"
|
|
79
|
+
|
|
80
|
+
User: "Don't forget I use Python 3.11"
|
|
81
|
+
Agent: I've made a note of that in my Data Sources memory: "User uses Python 3.11"
|
|
82
|
+
```
|
|
83
|
+
|
|
84
|
+
### Accessing the Memory Service
|
|
85
|
+
|
|
86
|
+
```typescript
|
|
87
|
+
import { MemoryService } from '@elizaos/plugin-memory';
|
|
88
|
+
|
|
89
|
+
// Get the service from runtime
|
|
90
|
+
const memoryService = runtime.getService('memory') as MemoryService;
|
|
91
|
+
|
|
92
|
+
// Store a long-term memory manually
|
|
93
|
+
await memoryService.storeLongTermMemory({
|
|
94
|
+
agentId: runtime.agentId,
|
|
95
|
+
entityId: userId,
|
|
96
|
+
category: LongTermMemoryCategory.PREFERENCES,
|
|
97
|
+
content: 'User prefers concise responses',
|
|
98
|
+
confidence: 0.9,
|
|
99
|
+
source: 'manual',
|
|
100
|
+
});
|
|
101
|
+
|
|
102
|
+
// Retrieve memories
|
|
103
|
+
const memories = await memoryService.getLongTermMemories(userId);
|
|
104
|
+
|
|
105
|
+
// Get session summaries
|
|
106
|
+
const summaries = await memoryService.getSessionSummaries(roomId);
|
|
107
|
+
```
|
|
108
|
+
|
|
109
|
+
## Database Setup
|
|
110
|
+
|
|
111
|
+
The plugin uses ElizaOS's dynamic migration system. Database tables are automatically created when the plugin is loaded. The plugin defines three tables:
|
|
112
|
+
|
|
113
|
+
- **`long_term_memories`**: Stores persistent facts about users
|
|
114
|
+
- **`session_summaries`**: Stores conversation summaries
|
|
115
|
+
- **`memory_access_logs`**: Optional usage tracking for analytics
|
|
116
|
+
|
|
117
|
+
No manual migration is required - the schema is handled automatically by the runtime.
|
|
118
|
+
|
|
119
|
+
## Architecture
|
|
120
|
+
|
|
121
|
+
### Components
|
|
122
|
+
|
|
123
|
+
#### Services
|
|
124
|
+
|
|
125
|
+
- **MemoryService**: Core service managing all memory operations
|
|
126
|
+
- Tracks message counts for summarization triggers
|
|
127
|
+
- Stores and retrieves long-term memories
|
|
128
|
+
- Manages session summaries
|
|
129
|
+
- Provides formatted memory context
|
|
130
|
+
|
|
131
|
+
#### Evaluators
|
|
132
|
+
|
|
133
|
+
- **summarizationEvaluator**: Runs after conversations reach threshold
|
|
134
|
+
- Generates comprehensive summaries using LLM
|
|
135
|
+
- Extracts topics and key points
|
|
136
|
+
- Archives old messages while preserving summaries
|
|
137
|
+
- **longTermExtractionEvaluator**: Periodically analyzes conversations
|
|
138
|
+
- Identifies facts worth remembering long-term
|
|
139
|
+
- Categorizes information semantically
|
|
140
|
+
- Assigns confidence scores
|
|
141
|
+
- Stores high-confidence memories
|
|
142
|
+
|
|
143
|
+
#### Providers
|
|
144
|
+
|
|
145
|
+
- **longTermMemoryProvider**: Injects persistent user facts into context
|
|
146
|
+
- Runs early (position: 50) to establish user context
|
|
147
|
+
- Formats memories by category
|
|
148
|
+
- Provides "What I Know About You" context
|
|
149
|
+
- **shortTermMemoryProvider**: Provides conversation summaries
|
|
150
|
+
- Runs before recentMessages (position: 95)
|
|
151
|
+
- Includes recent session summaries
|
|
152
|
+
- Shows topics and message counts
|
|
153
|
+
|
|
154
|
+
#### Actions
|
|
155
|
+
|
|
156
|
+
- **rememberAction**: Handles explicit memory requests
|
|
157
|
+
- Triggers on keywords like "remember", "keep in mind", etc.
|
|
158
|
+
- Uses LLM to extract what to remember
|
|
159
|
+
- Categorizes and stores with confirmation
|
|
160
|
+
|
|
161
|
+
## How It Works
|
|
162
|
+
|
|
163
|
+
### Short-term Memory Flow
|
|
164
|
+
|
|
165
|
+
1. **Tracking**: MemoryService tracks message count per room
|
|
166
|
+
2. **Trigger**: When count reaches threshold (default: 50), summarizationEvaluator activates
|
|
167
|
+
3. **Summarization**: LLM generates comprehensive summary of conversation
|
|
168
|
+
4. **Archival**: Older messages deleted, summary stored, recent messages retained
|
|
169
|
+
5. **Context Injection**: shortTermMemoryProvider injects summaries in future conversations
|
|
170
|
+
|
|
171
|
+
### Long-term Memory Flow
|
|
172
|
+
|
|
173
|
+
1. **Monitoring**: longTermExtractionEvaluator runs periodically (every 10 messages)
|
|
174
|
+
2. **Analysis**: LLM analyzes conversation for facts worth remembering
|
|
175
|
+
3. **Extraction**: Identifies facts, categorizes them, assigns confidence
|
|
176
|
+
4. **Storage**: High-confidence facts stored in long_term_memories table
|
|
177
|
+
5. **Retrieval**: longTermMemoryProvider injects relevant facts in all future conversations
|
|
178
|
+
|
|
179
|
+
### Manual Memory Flow
|
|
180
|
+
|
|
181
|
+
1. **Detection**: User says "remember that..." or similar trigger phrase
|
|
182
|
+
2. **Validation**: rememberAction validates the request
|
|
183
|
+
3. **Extraction**: LLM extracts what to remember and categorizes it
|
|
184
|
+
4. **Storage**: Fact stored with 'manual' source and high confidence
|
|
185
|
+
5. **Confirmation**: Agent confirms what was stored
|
|
186
|
+
|
|
187
|
+
## Performance Optimization
|
|
188
|
+
|
|
189
|
+
### Context Reduction
|
|
190
|
+
|
|
191
|
+
- Without plugin: 1000 messages = ~200,000 tokens
|
|
192
|
+
- With plugin: 1000 messages = ~20 summaries + 10 recent = ~25,000 tokens
|
|
193
|
+
- **Savings**: ~85% reduction in context size
|
|
194
|
+
|
|
195
|
+
### Token Efficiency
|
|
196
|
+
|
|
197
|
+
- Summaries are 1/10th the size of original conversations
|
|
198
|
+
- Long-term memories provide rich context in minimal tokens
|
|
199
|
+
- Recent messages still available for immediate context
|
|
200
|
+
|
|
201
|
+
### Database Optimization
|
|
202
|
+
|
|
203
|
+
- Indexed queries for fast retrieval
|
|
204
|
+
- Separate tables for different memory types
|
|
205
|
+
- Optional vector search for semantic similarity (requires pgvector)
|
|
206
|
+
|
|
207
|
+
## Best Practices
|
|
208
|
+
|
|
209
|
+
### For Users
|
|
210
|
+
|
|
211
|
+
- Use explicit commands: "Remember that...", "Keep in mind...", "Don't forget..."
|
|
212
|
+
- Provide clear, factual information for better storage
|
|
213
|
+
- Verify important memories were stored correctly
|
|
214
|
+
|
|
215
|
+
### For Developers
|
|
216
|
+
|
|
217
|
+
- Adjust thresholds based on your use case
|
|
218
|
+
- Monitor summarization quality with test conversations
|
|
219
|
+
- Use confidence thresholds to filter low-quality extractions
|
|
220
|
+
- Consider enabling vector search for large-scale deployments
|
|
221
|
+
|
|
222
|
+
### Configuration Tips
|
|
223
|
+
|
|
224
|
+
- **High-frequency chatbots**: Lower threshold (30-40 messages)
|
|
225
|
+
- **Long-form conversations**: Higher threshold (60-100 messages)
|
|
226
|
+
- **Critical applications**: Higher confidence threshold (0.8-0.9)
|
|
227
|
+
- **Exploratory use**: Lower confidence threshold (0.6-0.7)
|
|
228
|
+
|
|
229
|
+
## Advanced Features
|
|
230
|
+
|
|
231
|
+
### Vector Search (Optional)
|
|
232
|
+
|
|
233
|
+
Enable semantic search for memories by:
|
|
234
|
+
|
|
235
|
+
1. Installing pgvector extension
|
|
236
|
+
2. Setting `MEMORY_VECTOR_SEARCH_ENABLED=true`
|
|
237
|
+
3. Generating embeddings for memories
|
|
238
|
+
|
|
239
|
+
### Memory Analytics
|
|
240
|
+
|
|
241
|
+
Use the `memory_access_logs` table to:
|
|
242
|
+
|
|
243
|
+
- Track which memories are most frequently accessed
|
|
244
|
+
- Identify useful vs. unused memories
|
|
245
|
+
- Optimize extraction strategies
|
|
246
|
+
|
|
247
|
+
### Custom Categories
|
|
248
|
+
|
|
249
|
+
Extend `LongTermMemoryCategory` enum for domain-specific categories:
|
|
250
|
+
|
|
251
|
+
```typescript
|
|
252
|
+
export enum CustomMemoryCategory {
|
|
253
|
+
...LongTermMemoryCategory,
|
|
254
|
+
MEDICAL_HISTORY = 'medical_history',
|
|
255
|
+
FINANCIAL_DATA = 'financial_data',
|
|
256
|
+
}
|
|
257
|
+
```
|
|
258
|
+
|
|
259
|
+
## Testing
|
|
260
|
+
|
|
261
|
+
Run the test suite:
|
|
262
|
+
|
|
263
|
+
```bash
|
|
264
|
+
cd packages/plugin-memory
|
|
265
|
+
bun test
|
|
266
|
+
```
|
|
267
|
+
|
|
268
|
+
## Troubleshooting
|
|
269
|
+
|
|
270
|
+
### Summaries not generating
|
|
271
|
+
|
|
272
|
+
- Check that message threshold is reached
|
|
273
|
+
- Verify MemoryService is registered
|
|
274
|
+
- Check LLM provider is configured
|
|
275
|
+
|
|
276
|
+
### Long-term memories not stored
|
|
277
|
+
|
|
278
|
+
- Verify `MEMORY_LONG_TERM_ENABLED=true`
|
|
279
|
+
- Check confidence threshold isn't too high
|
|
280
|
+
- Ensure facts are being extracted (check logs)
|
|
281
|
+
|
|
282
|
+
### High token usage
|
|
283
|
+
|
|
284
|
+
- Lower summarization threshold
|
|
285
|
+
- Reduce number of retained recent messages
|
|
286
|
+
- Limit number of long-term memories retrieved
|
|
287
|
+
|
|
288
|
+
## License
|
|
289
|
+
|
|
290
|
+
MIT
|
|
291
|
+
|
|
292
|
+
## Contributing
|
|
293
|
+
|
|
294
|
+
Contributions welcome! Please see the main ElizaOS contributing guide.
|
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
import { type Action } from '@elizaos/core';
|
|
2
|
+
/**
|
|
3
|
+
* Remember Action
|
|
4
|
+
*
|
|
5
|
+
* Allows users to explicitly ask the agent to remember information.
|
|
6
|
+
* Examples:
|
|
7
|
+
* - "Remember that I prefer Python over JavaScript"
|
|
8
|
+
* - "Please remember I'm working on a startup project"
|
|
9
|
+
* - "Keep in mind that I don't like verbose explanations"
|
|
10
|
+
*/
|
|
11
|
+
export declare const rememberAction: Action;
|
|
@@ -0,0 +1,116 @@
|
|
|
1
|
+
var BA=Object.defineProperty;var JA=(A,J)=>{for(var Q in J)BA(A,Q,{get:J[Q],enumerable:!0,configurable:!0,set:(K)=>J[Q]=()=>K})};import{Service as jA,logger as H}from"@elizaos/core";import{eq as j,and as T,desc as k,sql as s,cosineDistance as GA,gte as FA}from"drizzle-orm";var w={};JA(w,{sessionSummaries:()=>V,memoryAccessLogs:()=>a,longTermMemories:()=>N});import{sql as d}from"drizzle-orm";import{pgTable as KA,text as x,integer as QA,jsonb as WA,real as g,index as z,varchar as I,timestamp as S}from"drizzle-orm/pg-core";var N=KA("long_term_memories",{id:I("id",{length:36}).primaryKey(),agentId:I("agent_id",{length:36}).notNull(),entityId:I("entity_id",{length:36}).notNull(),category:x("category").notNull(),content:x("content").notNull(),metadata:WA("metadata"),embedding:g("embedding").array(),confidence:g("confidence").default(1),source:x("source"),createdAt:S("created_at").default(d`now()`).notNull(),updatedAt:S("updated_at").default(d`now()`).notNull(),lastAccessedAt:S("last_accessed_at"),accessCount:QA("access_count").default(0)},(A)=>({agentEntityIdx:z("long_term_memories_agent_entity_idx").on(A.agentId,A.entityId),categoryIdx:z("long_term_memories_category_idx").on(A.category),confidenceIdx:z("long_term_memories_confidence_idx").on(A.confidence),createdAtIdx:z("long_term_memories_created_at_idx").on(A.createdAt)}));import{sql as u}from"drizzle-orm";import{pgTable as YA,text as ZA,integer as n,jsonb as i,real as _A,index as M,varchar as f,timestamp as C}from"drizzle-orm/pg-core";var V=YA("session_summaries",{id:f("id",{length:36}).primaryKey(),agentId:f("agent_id",{length:36}).notNull(),roomId:f("room_id",{length:36}).notNull(),entityId:f("entity_id",{length:36}),summary:ZA("summary").notNull(),messageCount:n("message_count").notNull(),lastMessageOffset:n("last_message_offset").notNull().default(0),startTime:C("start_time").notNull(),endTime:C("end_time").notNull(),topics:i("topics"),metadata:i("metadata"),embedding:_A("embedding").array(),createdAt:C("created_at").default(u`now()`).notNull(),updatedAt:C("updated_at").default(u`now()`).notNull()},(A)=>({agentRoomIdx:M("session_summaries_agent_room_idx").on(A.agentId,A.roomId),entityIdx:M("session_summaries_entity_idx").on(A.entityId),startTimeIdx:M("session_summaries_start_time_idx").on(A.startTime)}));import{sql as $A}from"drizzle-orm";import{pgTable as NA,text as UA,integer as VA,real as HA,index as p,varchar as b,timestamp as XA}from"drizzle-orm/pg-core";var a=NA("memory_access_logs",{id:b("id",{length:36}).primaryKey(),agentId:b("agent_id",{length:36}).notNull(),memoryId:b("memory_id",{length:36}).notNull(),memoryType:UA("memory_type").notNull(),accessedAt:XA("accessed_at").default($A`now()`).notNull(),roomId:b("room_id",{length:36}),relevanceScore:HA("relevance_score"),wasUseful:VA("was_useful")},(A)=>({memoryIdx:p("memory_access_logs_memory_idx").on(A.memoryId),agentIdx:p("memory_access_logs_agent_idx").on(A.agentId),accessedAtIdx:p("memory_access_logs_accessed_at_idx").on(A.accessedAt)}));class D extends jA{static serviceType="memory";sessionMessageCounts;memoryConfig;lastExtractionCheckpoints;capabilityDescription="Advanced memory management with short-term summarization and long-term persistent facts";constructor(A){super(A);this.sessionMessageCounts=new Map,this.lastExtractionCheckpoints=new Map,this.memoryConfig={shortTermSummarizationThreshold:5,shortTermRetainRecent:10,longTermExtractionEnabled:!0,longTermVectorSearchEnabled:!1,longTermConfidenceThreshold:0.7,longTermExtractionInterval:5,summaryModelType:"TEXT_LARGE",summaryMaxTokens:2500}}static async start(A){let J=new D(A);return await J.initialize(A),J}async stop(){H.info("MemoryService stopped")}async initialize(A){this.runtime=A;let J=A.getSetting("MEMORY_SUMMARIZATION_THRESHOLD");if(J)this.memoryConfig.shortTermSummarizationThreshold=parseInt(J,10);let Q=A.getSetting("MEMORY_RETAIN_RECENT");if(Q)this.memoryConfig.shortTermRetainRecent=parseInt(Q,10);let K=A.getSetting("MEMORY_LONG_TERM_ENABLED");if(K==="false")this.memoryConfig.longTermExtractionEnabled=!1;else if(K==="true")this.memoryConfig.longTermExtractionEnabled=!0;let B=A.getSetting("MEMORY_CONFIDENCE_THRESHOLD");if(B)this.memoryConfig.longTermConfidenceThreshold=parseFloat(B);H.info({summarizationThreshold:this.memoryConfig.shortTermSummarizationThreshold,retainRecent:this.memoryConfig.shortTermRetainRecent,longTermEnabled:this.memoryConfig.longTermExtractionEnabled,extractionInterval:this.memoryConfig.longTermExtractionInterval,confidenceThreshold:this.memoryConfig.longTermConfidenceThreshold},"MemoryService initialized")}getDb(){let A=this.runtime.db;if(!A)throw Error("Database not available");return A}getConfig(){return{...this.memoryConfig}}updateConfig(A){this.memoryConfig={...this.memoryConfig,...A}}incrementMessageCount(A){let Q=(this.sessionMessageCounts.get(A)||0)+1;return this.sessionMessageCounts.set(A,Q),Q}resetMessageCount(A){this.sessionMessageCounts.set(A,0)}async shouldSummarize(A){return await this.runtime.countMemories(A,!1,"messages")>=this.memoryConfig.shortTermSummarizationThreshold}getExtractionKey(A,J){return`memory:extraction:${A}:${J}`}async getLastExtractionCheckpoint(A,J){let Q=this.getExtractionKey(A,J),K=this.lastExtractionCheckpoints.get(Q);if(K!==void 0)return K;try{let W=await this.runtime.getCache(Q)??0;return this.lastExtractionCheckpoints.set(Q,W),W}catch(B){return H.warn({error:B},"Failed to get extraction checkpoint from cache"),0}}async setLastExtractionCheckpoint(A,J,Q){let K=this.getExtractionKey(A,J);this.lastExtractionCheckpoints.set(K,Q);try{await this.runtime.setCache(K,Q),H.debug(`Set extraction checkpoint for ${A} in room ${J} at message count ${Q}`)}catch(B){H.error({error:B},"Failed to persist extraction checkpoint to cache")}}async shouldRunExtraction(A,J,Q){let K=this.memoryConfig.longTermExtractionInterval,B=await this.getLastExtractionCheckpoint(A,J),W=Math.floor(Q/K)*K,Z=Q>=K&&W>B;return H.debug({entityId:A,roomId:J,currentMessageCount:Q,interval:K,lastCheckpoint:B,currentCheckpoint:W,shouldRun:Z},"Extraction check"),Z}async storeLongTermMemory(A){let J=this.getDb(),Q=crypto.randomUUID(),K=new Date,B={id:Q,createdAt:K,updatedAt:K,accessCount:0,...A};try{await J.insert(N).values({id:B.id,agentId:B.agentId,entityId:B.entityId,category:B.category,content:B.content,metadata:B.metadata||{},embedding:B.embedding,confidence:B.confidence,source:B.source,accessCount:B.accessCount,createdAt:K,updatedAt:K,lastAccessedAt:B.lastAccessedAt})}catch(W){throw H.error({error:W},"Failed to store long-term memory"),W}return H.info(`Stored long-term memory: ${B.category} for entity ${B.entityId}`),B}async getLongTermMemories(A,J,Q=10){let K=this.getDb(),B=[j(N.agentId,this.runtime.agentId),j(N.entityId,A)];if(J)B.push(j(N.category,J));return(await K.select().from(N).where(T(...B)).orderBy(k(N.confidence),k(N.updatedAt)).limit(Q)).map((Z)=>({id:Z.id,agentId:Z.agentId,entityId:Z.entityId,category:Z.category,content:Z.content,metadata:Z.metadata,embedding:Z.embedding,confidence:Z.confidence,source:Z.source,createdAt:Z.createdAt,updatedAt:Z.updatedAt,lastAccessedAt:Z.lastAccessedAt,accessCount:Z.accessCount}))}async updateLongTermMemory(A,J){let Q=this.getDb(),K={updatedAt:new Date};if(J.content!==void 0)K.content=J.content;if(J.metadata!==void 0)K.metadata=J.metadata;if(J.confidence!==void 0)K.confidence=J.confidence;if(J.embedding!==void 0)K.embedding=J.embedding;if(J.lastAccessedAt!==void 0)K.lastAccessedAt=J.lastAccessedAt;if(J.accessCount!==void 0)K.accessCount=J.accessCount;await Q.update(N).set(K).where(j(N.id,A)),H.info(`Updated long-term memory: ${A}`)}async deleteLongTermMemory(A){await this.getDb().delete(N).where(j(N.id,A)),H.info(`Deleted long-term memory: ${A}`)}async getCurrentSessionSummary(A){let Q=await this.getDb().select().from(V).where(T(j(V.agentId,this.runtime.agentId),j(V.roomId,A))).orderBy(k(V.updatedAt)).limit(1);if(Q.length===0)return null;let K=Q[0];return{id:K.id,agentId:K.agentId,roomId:K.roomId,entityId:K.entityId,summary:K.summary,messageCount:K.messageCount,lastMessageOffset:K.lastMessageOffset,startTime:K.startTime,endTime:K.endTime,topics:K.topics||[],metadata:K.metadata,embedding:K.embedding,createdAt:K.createdAt,updatedAt:K.updatedAt}}async storeSessionSummary(A){let J=this.getDb(),Q=crypto.randomUUID(),K=new Date,B={id:Q,createdAt:K,updatedAt:K,...A};return await J.insert(V).values({id:B.id,agentId:B.agentId,roomId:B.roomId,entityId:B.entityId||null,summary:B.summary,messageCount:B.messageCount,lastMessageOffset:B.lastMessageOffset,startTime:B.startTime,endTime:B.endTime,topics:B.topics||[],metadata:B.metadata||{},embedding:B.embedding,createdAt:K,updatedAt:K}),H.info(`Stored session summary for room ${B.roomId}`),B}async updateSessionSummary(A,J){let Q=this.getDb(),K={updatedAt:new Date};if(J.summary!==void 0)K.summary=J.summary;if(J.messageCount!==void 0)K.messageCount=J.messageCount;if(J.lastMessageOffset!==void 0)K.lastMessageOffset=J.lastMessageOffset;if(J.endTime!==void 0)K.endTime=J.endTime;if(J.topics!==void 0)K.topics=J.topics;if(J.metadata!==void 0)K.metadata=J.metadata;if(J.embedding!==void 0)K.embedding=J.embedding;await Q.update(V).set(K).where(j(V.id,A)),H.info(`Updated session summary: ${A}`)}async getSessionSummaries(A,J=5){return(await this.getDb().select().from(V).where(T(j(V.agentId,this.runtime.agentId),j(V.roomId,A))).orderBy(k(V.updatedAt)).limit(J)).map((B)=>({id:B.id,agentId:B.agentId,roomId:B.roomId,entityId:B.entityId,summary:B.summary,messageCount:B.messageCount,lastMessageOffset:B.lastMessageOffset,startTime:B.startTime,endTime:B.endTime,topics:B.topics||[],metadata:B.metadata,embedding:B.embedding,createdAt:B.createdAt,updatedAt:B.updatedAt}))}async searchLongTermMemories(A,J,Q=5,K=0.7){if(!this.memoryConfig.longTermVectorSearchEnabled)return H.warn("Vector search is not enabled, falling back to recent memories"),this.getLongTermMemories(A,void 0,Q);let B=this.getDb();try{let W=J.map((Y)=>Number.isFinite(Y)?Number(Y.toFixed(6)):0),Z=s`1 - (${GA(N.embedding,W)})`,U=[j(N.agentId,this.runtime.agentId),j(N.entityId,A),s`${N.embedding} IS NOT NULL`];if(K>0)U.push(FA(Z,K));return(await B.select({memory:N,similarity:Z}).from(N).where(T(...U)).orderBy(k(Z)).limit(Q)).map((Y)=>({id:Y.memory.id,agentId:Y.memory.agentId,entityId:Y.memory.entityId,category:Y.memory.category,content:Y.memory.content,metadata:Y.memory.metadata,embedding:Y.memory.embedding,confidence:Y.memory.confidence,source:Y.memory.source,createdAt:Y.memory.createdAt,updatedAt:Y.memory.updatedAt,lastAccessedAt:Y.memory.lastAccessedAt,accessCount:Y.memory.accessCount,similarity:Y.similarity}))}catch(W){return H.warn({error:W},"Vector search failed, falling back to recent memories"),this.getLongTermMemories(A,void 0,Q)}}async getFormattedLongTermMemories(A){let J=await this.getLongTermMemories(A,void 0,20);if(J.length===0)return"";let Q=new Map;for(let B of J){if(!Q.has(B.category))Q.set(B.category,[]);Q.get(B.category)?.push(B)}let K=[];for(let[B,W]of Q.entries()){let Z=B.split("_").map(($)=>$.charAt(0).toUpperCase()+$.slice(1)).join(" "),U=W.map(($)=>`- ${$.content}`).join(`
|
|
2
|
+
`);K.push(`**${Z}**:
|
|
3
|
+
${U}`)}return K.join(`
|
|
4
|
+
|
|
5
|
+
`)}}import{logger as P,ModelType as OA,composePromptFromState as m}from"@elizaos/core";var PA=`# Task: Summarize Conversation
|
|
6
|
+
|
|
7
|
+
You are analyzing a conversation to create a concise summary that captures the key points, topics, and important details.
|
|
8
|
+
|
|
9
|
+
# Recent Messages
|
|
10
|
+
{{recentMessages}}
|
|
11
|
+
|
|
12
|
+
# Instructions
|
|
13
|
+
Generate a summary that:
|
|
14
|
+
1. Captures the main topics discussed
|
|
15
|
+
2. Highlights key information shared
|
|
16
|
+
3. Notes any decisions made or questions asked
|
|
17
|
+
4. Maintains context for future reference
|
|
18
|
+
5. Is concise but comprehensive
|
|
19
|
+
|
|
20
|
+
**IMPORTANT**: Keep the summary under 2500 tokens. Be comprehensive but concise.
|
|
21
|
+
|
|
22
|
+
Also extract:
|
|
23
|
+
- **Topics**: List of main topics discussed (comma-separated)
|
|
24
|
+
- **Key Points**: Important facts or decisions (bullet points)
|
|
25
|
+
|
|
26
|
+
Respond in this XML format:
|
|
27
|
+
<summary>
|
|
28
|
+
<text>Your comprehensive summary here</text>
|
|
29
|
+
<topics>topic1, topic2, topic3</topics>
|
|
30
|
+
<keyPoints>
|
|
31
|
+
<point>First key point</point>
|
|
32
|
+
<point>Second key point</point>
|
|
33
|
+
</keyPoints>
|
|
34
|
+
</summary>`,EA=`# Task: Update and Condense Conversation Summary
|
|
35
|
+
|
|
36
|
+
You are updating an existing conversation summary with new messages, while keeping the total summary concise.
|
|
37
|
+
|
|
38
|
+
# Existing Summary
|
|
39
|
+
{{existingSummary}}
|
|
40
|
+
|
|
41
|
+
# Existing Topics
|
|
42
|
+
{{existingTopics}}
|
|
43
|
+
|
|
44
|
+
# New Messages Since Last Summary
|
|
45
|
+
{{newMessages}}
|
|
46
|
+
|
|
47
|
+
# Instructions
|
|
48
|
+
Update the summary by:
|
|
49
|
+
1. Merging the existing summary with insights from the new messages
|
|
50
|
+
2. Removing redundant or less important details to stay under the token limit
|
|
51
|
+
3. Keeping the most important context and decisions
|
|
52
|
+
4. Adding new topics if they emerge
|
|
53
|
+
5. **CRITICAL**: Keep the ENTIRE updated summary under 2500 tokens
|
|
54
|
+
|
|
55
|
+
The goal is a rolling summary that captures the essence of the conversation without growing indefinitely.
|
|
56
|
+
|
|
57
|
+
Respond in this XML format:
|
|
58
|
+
<summary>
|
|
59
|
+
<text>Your updated and condensed summary here</text>
|
|
60
|
+
<topics>topic1, topic2, topic3</topics>
|
|
61
|
+
<keyPoints>
|
|
62
|
+
<point>First key point</point>
|
|
63
|
+
<point>Second key point</point>
|
|
64
|
+
</keyPoints>
|
|
65
|
+
</summary>`;function LA(A){let J=A.match(/<text>([\s\S]*?)<\/text>/),Q=A.match(/<topics>([\s\S]*?)<\/topics>/),K=A.matchAll(/<point>([\s\S]*?)<\/point>/g),B=J?J[1].trim():"Summary not available",W=Q?Q[1].split(",").map((U)=>U.trim()).filter(Boolean):[],Z=Array.from(K).map((U)=>U[1].trim());return{summary:B,topics:W,keyPoints:Z}}var o={name:"MEMORY_SUMMARIZATION",description:"Summarizes conversations to optimize short-term memory",similes:["CONVERSATION_SUMMARY","CONTEXT_COMPRESSION","MEMORY_OPTIMIZATION"],alwaysRun:!0,validate:async(A,J)=>{if(P.debug(`Validating summarization for message: ${J.content?.text}`),!J.content?.text)return!1;let Q=A.getService("memory");if(!Q)return!1;let K=Q.getConfig(),B=await A.countMemories(J.roomId,!1,"messages"),W=B>=K.shortTermSummarizationThreshold;return P.debug({roomId:J.roomId,currentMessageCount:B,threshold:K.shortTermSummarizationThreshold,shouldSummarize:W},"Summarization check"),W},handler:async(A,J)=>{let Q=A.getService("memory");if(!Q){P.error("MemoryService not found");return}let K=Q.getConfig(),{roomId:B}=J;try{P.info(`Starting summarization for room ${B}`);let W=await Q.getCurrentSessionSummary(B),Z=W?.lastMessageOffset||0,U=await A.countMemories(B,!1,"messages"),$=await A.getMemories({tableName:"messages",roomId:B,count:K.shortTermSummarizationThreshold,unique:!1,start:Z});if($.length===0){P.debug("No new messages to summarize");return}let Y=$.sort((q,y)=>(q.createdAt||0)-(y.createdAt||0)),G=Y.map((q)=>{return`${q.entityId===A.agentId?A.character.name:"User"}: ${q.content.text||"[non-text message]"}`}).join(`
|
|
66
|
+
`),F=await A.composeState(J),O,E;if(W)E=EA,O=m({state:{...F,existingSummary:W.summary,existingTopics:W.topics?.join(", ")||"None",newMessages:G},template:E});else E=PA,O=m({state:{...F,recentMessages:G},template:E});let R=await A.useModel(OA.TEXT_LARGE,{prompt:O,maxTokens:K.summaryMaxTokens||2500}),_=LA(R);P.info(`${W?"Updated":"Generated"} summary: ${_.summary.substring(0,100)}...`);let L=U,v=Y[0],h=Y[Y.length-1],AA=W?W.startTime:v?.createdAt&&v.createdAt>0?new Date(v.createdAt):new Date,l=h?.createdAt&&h.createdAt>0?new Date(h.createdAt):new Date;if(W)await Q.updateSessionSummary(W.id,{summary:_.summary,messageCount:W.messageCount+Y.length,lastMessageOffset:L,endTime:l,topics:_.topics,metadata:{keyPoints:_.keyPoints}}),P.info(`Updated summary for room ${B}: ${Y.length} new messages processed (offset: ${Z} → ${L})`);else await Q.storeSessionSummary({agentId:A.agentId,roomId:B,entityId:J.entityId!==A.agentId?J.entityId:void 0,summary:_.summary,messageCount:Y.length,lastMessageOffset:L,startTime:AA,endTime:l,topics:_.topics,metadata:{keyPoints:_.keyPoints}}),P.info(`Created new summary for room ${B}: ${Y.length} messages summarized (offset: 0 → ${L})`)}catch(W){P.error({error:W},"Error during summarization:")}},examples:[]};import{logger as X,ModelType as kA,composePromptFromState as DA}from"@elizaos/core";var c;((Y)=>{Y.IDENTITY="identity";Y.EXPERTISE="expertise";Y.PROJECTS="projects";Y.PREFERENCES="preferences";Y.DATA_SOURCES="data_sources";Y.GOALS="goals";Y.CONSTRAINTS="constraints";Y.DEFINITIONS="definitions";Y.BEHAVIORAL_PATTERNS="behavioral_patterns"})(c||={});var RA=`# Task: Extract Long-Term Memory
|
|
67
|
+
|
|
68
|
+
You are analyzing a conversation to extract facts that should be remembered long-term about the user.
|
|
69
|
+
|
|
70
|
+
# Recent Messages
|
|
71
|
+
{{recentMessages}}
|
|
72
|
+
|
|
73
|
+
# Current Long-Term Memories
|
|
74
|
+
{{existingMemories}}
|
|
75
|
+
|
|
76
|
+
# Memory Categories
|
|
77
|
+
1. **identity**: User's name, role, identity (e.g., "I'm a data scientist")
|
|
78
|
+
2. **expertise**: User's skills, knowledge domains, or unfamiliarity with topics
|
|
79
|
+
3. **projects**: Ongoing projects, past interactions, recurring topics
|
|
80
|
+
4. **preferences**: Communication style, format preferences, verbosity, etc.
|
|
81
|
+
5. **data_sources**: Frequently used files, databases, APIs
|
|
82
|
+
6. **goals**: Broader intentions (e.g., "preparing for interview")
|
|
83
|
+
7. **constraints**: User-defined rules or limitations
|
|
84
|
+
8. **definitions**: Custom terms, acronyms, glossaries
|
|
85
|
+
9. **behavioral_patterns**: How the user tends to interact
|
|
86
|
+
|
|
87
|
+
# Instructions
|
|
88
|
+
Extract any NEW information that should be remembered long-term. For each item:
|
|
89
|
+
- Determine which category it belongs to
|
|
90
|
+
- Write a clear, factual statement
|
|
91
|
+
- Assess confidence (0.0 to 1.0)
|
|
92
|
+
- Only include information explicitly stated or strongly implied
|
|
93
|
+
|
|
94
|
+
If there are no new long-term facts to extract, respond with <memories></memories>
|
|
95
|
+
|
|
96
|
+
Respond in this XML format:
|
|
97
|
+
<memories>
|
|
98
|
+
<memory>
|
|
99
|
+
<category>identity</category>
|
|
100
|
+
<content>User is a software engineer specializing in backend development</content>
|
|
101
|
+
<confidence>0.95</confidence>
|
|
102
|
+
</memory>
|
|
103
|
+
<memory>
|
|
104
|
+
<category>preferences</category>
|
|
105
|
+
<content>Prefers code examples over lengthy explanations</content>
|
|
106
|
+
<confidence>0.85</confidence>
|
|
107
|
+
</memory>
|
|
108
|
+
</memories>`;function qA(A){let J=A.matchAll(/<memory>[\s\S]*?<category>(.*?)<\/category>[\s\S]*?<content>(.*?)<\/content>[\s\S]*?<confidence>(.*?)<\/confidence>[\s\S]*?<\/memory>/g),Q=[];for(let K of J){let B=K[1].trim(),W=K[2].trim(),Z=parseFloat(K[3].trim());if(!Object.values(c).includes(B)){X.warn(`Invalid memory category: ${B}`);continue}if(W&&!isNaN(Z))Q.push({category:B,content:W,confidence:Z})}return Q}var t={name:"LONG_TERM_MEMORY_EXTRACTION",description:"Extracts long-term facts about users from conversations",similes:["MEMORY_EXTRACTION","FACT_LEARNING","USER_PROFILING"],alwaysRun:!0,validate:async(A,J)=>{if(X.debug(`Validating long-term memory extraction for message: ${J.content?.text}`),J.entityId===A.agentId)return X.debug("Skipping long-term memory extraction for agent's own message"),!1;if(!J.content?.text)return X.debug("Skipping long-term memory extraction for message without text"),!1;let Q=A.getService("memory");if(!Q)return X.debug("MemoryService not found"),!1;if(!Q.getConfig().longTermExtractionEnabled)return X.debug("Long-term memory extraction is disabled"),!1;let B=await A.countMemories(J.roomId,!1,"messages"),W=await Q.shouldRunExtraction(J.entityId,J.roomId,B);return X.debug(`Should run extraction: ${W}`),W},handler:async(A,J)=>{let Q=A.getService("memory");if(!Q){X.error("MemoryService not found");return}let K=Q.getConfig(),{entityId:B,roomId:W}=J;try{X.info(`Extracting long-term memories for entity ${B}`);let U=(await A.getMemories({tableName:"messages",roomId:W,count:20,unique:!1})).sort((_,L)=>(_.createdAt||0)-(L.createdAt||0)).map((_)=>{return`${_.entityId===A.agentId?A.character.name:"User"}: ${_.content.text||"[non-text message]"}`}).join(`
|
|
109
|
+
`),$=await Q.getLongTermMemories(B,void 0,30),Y=$.length>0?$.map((_)=>`[${_.category}] ${_.content} (confidence: ${_.confidence})`).join(`
|
|
110
|
+
`):"None yet",G=await A.composeState(J),F=DA({state:{...G,recentMessages:U,existingMemories:Y},template:RA}),O=await A.useModel(kA.TEXT_LARGE,{prompt:F}),E=qA(O);X.info(`Extracted ${E.length} long-term memories`);for(let _ of E)if(_.confidence>=K.longTermConfidenceThreshold)await Q.storeLongTermMemory({agentId:A.agentId,entityId:B,category:_.category,content:_.content,confidence:_.confidence,source:"conversation",metadata:{roomId:W,extractedAt:new Date().toISOString()}}),X.info(`Stored long-term memory: [${_.category}] ${_.content.substring(0,50)}...`);else X.debug(`Skipped low-confidence memory: ${_.content} (confidence: ${_.confidence})`);let R=await A.countMemories(W,!1,"messages");await Q.setLastExtractionCheckpoint(B,W,R),X.debug(`Updated extraction checkpoint to ${R} for entity ${B} in room ${W}`)}catch(Z){X.error({error:Z},"Error during long-term memory extraction:")}},examples:[]};import{logger as zA,addHeader as fA}from"@elizaos/core";var r={name:"SHORT_TERM_MEMORY",description:"Recent conversation summaries to maintain context efficiently",position:95,get:async(A,J,Q)=>{try{let K=A.getService("memory");if(!K)return{data:{summaries:[]},values:{sessionSummaries:""},text:""};let{roomId:B}=J,W=await K.getSessionSummaries(B,3);if(W.length===0)return{data:{summaries:[]},values:{sessionSummaries:""},text:""};let Z=W.reverse().map(($,Y)=>{let G=`${$.messageCount} messages`,F=new Date($.startTime).toLocaleDateString(),O=`**Session ${Y+1}** (${G}, ${F})
|
|
111
|
+
`;if(O+=$.summary,$.topics&&$.topics.length>0)O+=`
|
|
112
|
+
*Topics: ${$.topics.join(", ")}*`;return O}).join(`
|
|
113
|
+
|
|
114
|
+
`),U=fA("# Previous Conversation Context",Z);return{data:{summaries:W},values:{sessionSummaries:U},text:U}}catch(K){return zA.error({error:K},"Error in shortTermMemoryProvider:"),{data:{summaries:[]},values:{sessionSummaries:""},text:""}}}};import{logger as CA,addHeader as bA}from"@elizaos/core";var e={name:"LONG_TERM_MEMORY",description:"Persistent facts and preferences about the user",position:50,get:async(A,J,Q)=>{try{let K=A.getService("memory");if(!K)return{data:{memories:[]},values:{longTermMemories:""},text:""};let{entityId:B}=J;if(B===A.agentId)return{data:{memories:[]},values:{longTermMemories:""},text:""};let W=await K.getLongTermMemories(B,void 0,25);if(W.length===0)return{data:{memories:[]},values:{longTermMemories:""},text:""};let Z=await K.getFormattedLongTermMemories(B),U=bA("# What I Know About You",Z),$=new Map;for(let G of W){let F=$.get(G.category)||0;$.set(G.category,F+1)}let Y=Array.from($.entries()).map(([G,F])=>`${G}: ${F}`).join(", ");return{data:{memories:W,categoryCounts:Object.fromEntries($)},values:{longTermMemories:U,memoryCategories:Y},text:U}}catch(K){return CA.error({error:K},"Error in longTermMemoryProvider:"),{data:{memories:[]},values:{longTermMemories:""},text:""}}}};var TA={name:"memory",description:"Advanced memory management with conversation summarization and long-term persistent memory",services:[D],evaluators:[o,t],providers:[e,r],schema:w},vA=TA;export{V as sessionSummaries,TA as memoryPlugin,a as memoryAccessLogs,N as longTermMemories,vA as default,D as MemoryService,c as LongTermMemoryCategory};
|
|
115
|
+
|
|
116
|
+
//# debugId=30473D3367DF880764756E2164756E21
|
|
@@ -0,0 +1,19 @@
|
|
|
1
|
+
{
|
|
2
|
+
"version": 3,
|
|
3
|
+
"sources": ["../../src/services/memory-service.ts", "../../src/schemas/long-term-memories.ts", "../../src/schemas/session-summaries.ts", "../../src/schemas/memory-access-logs.ts", "../../src/evaluators/summarization.ts", "../../src/evaluators/long-term-extraction.ts", "../../src/types/index.ts", "../../src/providers/short-term-memory.ts", "../../src/providers/long-term-memory.ts", "../../src/index.ts"],
|
|
4
|
+
"sourcesContent": [
|
|
5
|
+
"import {\n type IAgentRuntime,\n Service,\n type UUID,\n logger,\n type ServiceTypeName,\n} from '@elizaos/core';\nimport { eq, and, desc, sql, cosineDistance, gte } from 'drizzle-orm';\nimport {\n type LongTermMemory,\n type SessionSummary,\n type MemoryConfig,\n LongTermMemoryCategory,\n} from '../types/index';\nimport { longTermMemories, sessionSummaries } from '../schemas/index';\n\n/**\n * Memory Service\n * Manages both short-term (session summaries) and long-term (persistent facts) memory\n */\nexport class MemoryService extends Service {\n static serviceType: ServiceTypeName = 'memory' as ServiceTypeName;\n\n private sessionMessageCounts: Map<UUID, number>;\n private memoryConfig: MemoryConfig;\n private lastExtractionCheckpoints: Map<string, number>; // Track last extraction per entity-room pair\n\n capabilityDescription =\n 'Advanced memory management with short-term summarization and long-term persistent facts';\n\n constructor(runtime?: IAgentRuntime) {\n super(runtime);\n this.sessionMessageCounts = new Map();\n this.lastExtractionCheckpoints = new Map();\n this.memoryConfig = {\n shortTermSummarizationThreshold: 5,\n shortTermRetainRecent: 10,\n longTermExtractionEnabled: true,\n longTermVectorSearchEnabled: false,\n longTermConfidenceThreshold: 0.7,\n longTermExtractionInterval: 5, // Run extraction every N messages\n summaryModelType: 'TEXT_LARGE',\n summaryMaxTokens: 2500,\n };\n }\n\n static async start(runtime: IAgentRuntime): Promise<Service> {\n const service = new MemoryService(runtime);\n await service.initialize(runtime);\n return service;\n }\n\n async stop(): Promise<void> {\n // No cleanup needed for this service\n logger.info('MemoryService stopped');\n }\n\n async initialize(runtime: IAgentRuntime): Promise<void> {\n this.runtime = runtime;\n\n // Load configuration from runtime settings\n const threshold = runtime.getSetting('MEMORY_SUMMARIZATION_THRESHOLD');\n if (threshold) {\n this.memoryConfig.shortTermSummarizationThreshold = parseInt(threshold, 10);\n }\n\n const retainRecent = runtime.getSetting('MEMORY_RETAIN_RECENT');\n if (retainRecent) {\n this.memoryConfig.shortTermRetainRecent = parseInt(retainRecent, 10);\n }\n\n const longTermEnabled = runtime.getSetting('MEMORY_LONG_TERM_ENABLED');\n // Only override default if explicitly set to 'false'\n if (longTermEnabled === 'false') {\n this.memoryConfig.longTermExtractionEnabled = false;\n } else if (longTermEnabled === 'true') {\n this.memoryConfig.longTermExtractionEnabled = true;\n }\n // Otherwise keep the default value (true)\n\n const confidenceThreshold = runtime.getSetting('MEMORY_CONFIDENCE_THRESHOLD');\n if (confidenceThreshold) {\n this.memoryConfig.longTermConfidenceThreshold = parseFloat(confidenceThreshold);\n }\n\n logger.info(\n {\n summarizationThreshold: this.memoryConfig.shortTermSummarizationThreshold,\n retainRecent: this.memoryConfig.shortTermRetainRecent,\n longTermEnabled: this.memoryConfig.longTermExtractionEnabled,\n extractionInterval: this.memoryConfig.longTermExtractionInterval,\n confidenceThreshold: this.memoryConfig.longTermConfidenceThreshold,\n },\n 'MemoryService initialized'\n );\n }\n\n /**\n * Get the Drizzle database instance\n */\n private getDb(): any {\n const db = (this.runtime as any).db;\n if (!db) {\n throw new Error('Database not available');\n }\n return db;\n }\n\n /**\n * Get configuration\n */\n getConfig(): MemoryConfig {\n return { ...this.memoryConfig };\n }\n\n /**\n * Update configuration\n */\n updateConfig(updates: Partial<MemoryConfig>): void {\n this.memoryConfig = { ...this.memoryConfig, ...updates };\n }\n\n /**\n * Track message count for a room\n */\n incrementMessageCount(roomId: UUID): number {\n const current = this.sessionMessageCounts.get(roomId) || 0;\n const newCount = current + 1;\n this.sessionMessageCounts.set(roomId, newCount);\n return newCount;\n }\n\n /**\n * Reset message count for a room\n */\n resetMessageCount(roomId: UUID): void {\n this.sessionMessageCounts.set(roomId, 0);\n }\n\n /**\n * Check if summarization is needed for a room\n */\n async shouldSummarize(roomId: UUID): Promise<boolean> {\n const count = await this.runtime.countMemories(roomId, false, 'messages');\n return count >= this.memoryConfig.shortTermSummarizationThreshold;\n }\n\n /**\n * Generate cache key for tracking extraction checkpoints per entity-room pair\n */\n private getExtractionKey(entityId: UUID, roomId: UUID): string {\n return `memory:extraction:${entityId}:${roomId}`;\n }\n\n /**\n * Get the last extraction checkpoint for an entity in a room\n * Uses the cache table via adapter\n */\n async getLastExtractionCheckpoint(entityId: UUID, roomId: UUID): Promise<number> {\n const key = this.getExtractionKey(entityId, roomId);\n\n // Check in-memory cache first\n const cached = this.lastExtractionCheckpoints.get(key);\n if (cached !== undefined) {\n return cached;\n }\n\n // Check database cache table via adapter\n try {\n const checkpoint = await this.runtime.getCache<number>(key);\n const messageCount = checkpoint ?? 0;\n\n // Cache it in memory for faster access\n this.lastExtractionCheckpoints.set(key, messageCount);\n\n return messageCount;\n } catch (error) {\n logger.warn({ error }, 'Failed to get extraction checkpoint from cache');\n return 0;\n }\n }\n\n /**\n * Set the last extraction checkpoint for an entity in a room\n * Uses the cache table via adapter\n */\n async setLastExtractionCheckpoint(\n entityId: UUID,\n roomId: UUID,\n messageCount: number\n ): Promise<void> {\n const key = this.getExtractionKey(entityId, roomId);\n\n // Update in-memory cache\n this.lastExtractionCheckpoints.set(key, messageCount);\n\n // Persist to database cache table via adapter\n try {\n await this.runtime.setCache(key, messageCount);\n logger.debug(\n `Set extraction checkpoint for ${entityId} in room ${roomId} at message count ${messageCount}`\n );\n } catch (error) {\n logger.error({ error }, 'Failed to persist extraction checkpoint to cache');\n }\n }\n\n /**\n * Check if long-term extraction should run based on message count and interval\n */\n async shouldRunExtraction(\n entityId: UUID,\n roomId: UUID,\n currentMessageCount: number\n ): Promise<boolean> {\n const interval = this.memoryConfig.longTermExtractionInterval;\n const lastCheckpoint = await this.getLastExtractionCheckpoint(entityId, roomId);\n\n // Calculate the current checkpoint (e.g., if interval=5: 5, 10, 15, 20...)\n const currentCheckpoint = Math.floor(currentMessageCount / interval) * interval;\n\n // Run if we're at or past a checkpoint and haven't processed this checkpoint yet\n const shouldRun = currentMessageCount >= interval && currentCheckpoint > lastCheckpoint;\n\n logger.debug(\n {\n entityId,\n roomId,\n currentMessageCount,\n interval,\n lastCheckpoint,\n currentCheckpoint,\n shouldRun,\n },\n 'Extraction check'\n );\n\n return shouldRun;\n }\n\n /**\n * Store a long-term memory\n */\n async storeLongTermMemory(\n memory: Omit<LongTermMemory, 'id' | 'createdAt' | 'updatedAt'>\n ): Promise<LongTermMemory> {\n const db = this.getDb();\n\n const id = crypto.randomUUID() as UUID;\n const now = new Date();\n\n const newMemory: LongTermMemory = {\n id,\n createdAt: now,\n updatedAt: now,\n accessCount: 0,\n ...memory,\n };\n\n try {\n await db.insert(longTermMemories).values({\n id: newMemory.id,\n agentId: newMemory.agentId,\n entityId: newMemory.entityId,\n category: newMemory.category,\n content: newMemory.content,\n metadata: newMemory.metadata || {},\n embedding: newMemory.embedding,\n confidence: newMemory.confidence,\n source: newMemory.source,\n accessCount: newMemory.accessCount,\n createdAt: now,\n updatedAt: now,\n lastAccessedAt: newMemory.lastAccessedAt,\n });\n } catch (error) {\n logger.error({ error }, 'Failed to store long-term memory');\n throw error;\n }\n\n logger.info(`Stored long-term memory: ${newMemory.category} for entity ${newMemory.entityId}`);\n return newMemory;\n }\n\n /**\n * Retrieve long-term memories for an entity\n */\n async getLongTermMemories(\n entityId: UUID,\n category?: LongTermMemoryCategory,\n limit: number = 10\n ): Promise<LongTermMemory[]> {\n const db = this.getDb();\n\n const conditions = [\n eq(longTermMemories.agentId, this.runtime.agentId),\n eq(longTermMemories.entityId, entityId),\n ];\n\n if (category) {\n conditions.push(eq(longTermMemories.category, category));\n }\n\n const results = await db\n .select()\n .from(longTermMemories)\n .where(and(...conditions))\n .orderBy(desc(longTermMemories.confidence), desc(longTermMemories.updatedAt))\n .limit(limit);\n\n return results.map((row) => ({\n id: row.id as UUID,\n agentId: row.agentId as UUID,\n entityId: row.entityId as UUID,\n category: row.category as LongTermMemoryCategory,\n content: row.content,\n metadata: row.metadata as Record<string, unknown>,\n embedding: row.embedding as number[],\n confidence: row.confidence as number,\n source: row.source as string,\n createdAt: row.createdAt,\n updatedAt: row.updatedAt,\n lastAccessedAt: row.lastAccessedAt,\n accessCount: row.accessCount as number,\n }));\n }\n\n /**\n * Update a long-term memory\n */\n async updateLongTermMemory(\n id: UUID,\n updates: Partial<Omit<LongTermMemory, 'id' | 'agentId' | 'createdAt'>>\n ): Promise<void> {\n const db = this.getDb();\n\n const updateData: any = {\n updatedAt: new Date(),\n };\n\n if (updates.content !== undefined) {\n updateData.content = updates.content;\n }\n\n if (updates.metadata !== undefined) {\n updateData.metadata = updates.metadata;\n }\n\n if (updates.confidence !== undefined) {\n updateData.confidence = updates.confidence;\n }\n\n if (updates.embedding !== undefined) {\n updateData.embedding = updates.embedding;\n }\n\n if (updates.lastAccessedAt !== undefined) {\n updateData.lastAccessedAt = updates.lastAccessedAt;\n }\n\n if (updates.accessCount !== undefined) {\n updateData.accessCount = updates.accessCount;\n }\n\n await db.update(longTermMemories).set(updateData).where(eq(longTermMemories.id, id));\n\n logger.info(`Updated long-term memory: ${id}`);\n }\n\n /**\n * Delete a long-term memory\n */\n async deleteLongTermMemory(id: UUID): Promise<void> {\n const db = this.getDb();\n\n await db.delete(longTermMemories).where(eq(longTermMemories.id, id));\n\n logger.info(`Deleted long-term memory: ${id}`);\n }\n\n /**\n * Get the current session summary for a room (latest one)\n */\n async getCurrentSessionSummary(roomId: UUID): Promise<SessionSummary | null> {\n const db = this.getDb();\n\n const results = await db\n .select()\n .from(sessionSummaries)\n .where(\n and(eq(sessionSummaries.agentId, this.runtime.agentId), eq(sessionSummaries.roomId, roomId))\n )\n .orderBy(desc(sessionSummaries.updatedAt))\n .limit(1);\n\n if (results.length === 0) {\n return null;\n }\n\n const row = results[0];\n return {\n id: row.id as UUID,\n agentId: row.agentId as UUID,\n roomId: row.roomId as UUID,\n entityId: row.entityId as UUID | undefined,\n summary: row.summary,\n messageCount: row.messageCount,\n lastMessageOffset: row.lastMessageOffset,\n startTime: row.startTime,\n endTime: row.endTime,\n topics: (row.topics as string[]) || [],\n metadata: row.metadata as Record<string, unknown>,\n embedding: row.embedding as number[],\n createdAt: row.createdAt,\n updatedAt: row.updatedAt,\n };\n }\n\n /**\n * Store a session summary (initial creation)\n */\n async storeSessionSummary(\n summary: Omit<SessionSummary, 'id' | 'createdAt' | 'updatedAt'>\n ): Promise<SessionSummary> {\n const db = this.getDb();\n\n const id = crypto.randomUUID() as UUID;\n const now = new Date();\n\n const newSummary: SessionSummary = {\n id,\n createdAt: now,\n updatedAt: now,\n ...summary,\n };\n\n await db.insert(sessionSummaries).values({\n id: newSummary.id,\n agentId: newSummary.agentId,\n roomId: newSummary.roomId,\n entityId: newSummary.entityId || null,\n summary: newSummary.summary,\n messageCount: newSummary.messageCount,\n lastMessageOffset: newSummary.lastMessageOffset,\n startTime: newSummary.startTime,\n endTime: newSummary.endTime,\n topics: newSummary.topics || [],\n metadata: newSummary.metadata || {},\n embedding: newSummary.embedding,\n createdAt: now,\n updatedAt: now,\n });\n\n logger.info(`Stored session summary for room ${newSummary.roomId}`);\n return newSummary;\n }\n\n /**\n * Update an existing session summary\n */\n async updateSessionSummary(\n id: UUID,\n updates: Partial<Omit<SessionSummary, 'id' | 'agentId' | 'roomId' | 'createdAt' | 'updatedAt'>>\n ): Promise<void> {\n const db = this.getDb();\n\n const updateData: any = {\n updatedAt: new Date(),\n };\n\n if (updates.summary !== undefined) {\n updateData.summary = updates.summary;\n }\n\n if (updates.messageCount !== undefined) {\n updateData.messageCount = updates.messageCount;\n }\n\n if (updates.lastMessageOffset !== undefined) {\n updateData.lastMessageOffset = updates.lastMessageOffset;\n }\n\n if (updates.endTime !== undefined) {\n updateData.endTime = updates.endTime;\n }\n\n if (updates.topics !== undefined) {\n updateData.topics = updates.topics;\n }\n\n if (updates.metadata !== undefined) {\n updateData.metadata = updates.metadata;\n }\n\n if (updates.embedding !== undefined) {\n updateData.embedding = updates.embedding;\n }\n\n await db.update(sessionSummaries).set(updateData).where(eq(sessionSummaries.id, id));\n\n logger.info(`Updated session summary: ${id}`);\n }\n\n /**\n * Get session summaries for a room\n */\n async getSessionSummaries(roomId: UUID, limit: number = 5): Promise<SessionSummary[]> {\n const db = this.getDb();\n\n const results = await db\n .select()\n .from(sessionSummaries)\n .where(\n and(eq(sessionSummaries.agentId, this.runtime.agentId), eq(sessionSummaries.roomId, roomId))\n )\n .orderBy(desc(sessionSummaries.updatedAt))\n .limit(limit);\n\n return results.map((row) => ({\n id: row.id as UUID,\n agentId: row.agentId as UUID,\n roomId: row.roomId as UUID,\n entityId: row.entityId as UUID | undefined,\n summary: row.summary,\n messageCount: row.messageCount,\n lastMessageOffset: row.lastMessageOffset,\n startTime: row.startTime,\n endTime: row.endTime,\n topics: (row.topics as string[]) || [],\n metadata: row.metadata as Record<string, unknown>,\n embedding: row.embedding as number[],\n createdAt: row.createdAt,\n updatedAt: row.updatedAt,\n }));\n }\n\n /**\n * Search long-term memories by semantic similarity (if embeddings are available)\n */\n async searchLongTermMemories(\n entityId: UUID,\n queryEmbedding: number[],\n limit: number = 5,\n matchThreshold: number = 0.7\n ): Promise<LongTermMemory[]> {\n if (!this.memoryConfig.longTermVectorSearchEnabled) {\n logger.warn('Vector search is not enabled, falling back to recent memories');\n return this.getLongTermMemories(entityId, undefined, limit);\n }\n\n const db = this.getDb();\n\n try {\n // Clean the vector to ensure all numbers are finite and properly formatted\n const cleanVector = queryEmbedding.map((n) =>\n Number.isFinite(n) ? Number(n.toFixed(6)) : 0\n );\n\n // Calculate similarity using Drizzle's cosineDistance\n const similarity = sql<number>`1 - (${cosineDistance(\n longTermMemories.embedding,\n cleanVector\n )})`;\n\n const conditions = [\n eq(longTermMemories.agentId, this.runtime.agentId),\n eq(longTermMemories.entityId, entityId),\n sql`${longTermMemories.embedding} IS NOT NULL`,\n ];\n\n // Add similarity threshold if specified\n if (matchThreshold > 0) {\n conditions.push(gte(similarity, matchThreshold));\n }\n\n const results = await db\n .select({\n memory: longTermMemories,\n similarity,\n })\n .from(longTermMemories)\n .where(and(...conditions))\n .orderBy(desc(similarity))\n .limit(limit);\n\n return results.map((row) => ({\n id: row.memory.id as UUID,\n agentId: row.memory.agentId as UUID,\n entityId: row.memory.entityId as UUID,\n category: row.memory.category as LongTermMemoryCategory,\n content: row.memory.content,\n metadata: row.memory.metadata as Record<string, unknown>,\n embedding: row.memory.embedding as number[],\n confidence: row.memory.confidence as number,\n source: row.memory.source as string,\n createdAt: row.memory.createdAt,\n updatedAt: row.memory.updatedAt,\n lastAccessedAt: row.memory.lastAccessedAt,\n accessCount: row.memory.accessCount as number,\n similarity: row.similarity,\n }));\n } catch (error) {\n logger.warn({ error }, 'Vector search failed, falling back to recent memories');\n return this.getLongTermMemories(entityId, undefined, limit);\n }\n }\n\n /**\n * Get all long-term memories formatted for context\n */\n async getFormattedLongTermMemories(entityId: UUID): Promise<string> {\n const memories = await this.getLongTermMemories(entityId, undefined, 20);\n\n if (memories.length === 0) {\n return '';\n }\n\n // Group by category\n const grouped = new Map<LongTermMemoryCategory, LongTermMemory[]>();\n\n for (const memory of memories) {\n if (!grouped.has(memory.category)) {\n grouped.set(memory.category, []);\n }\n grouped.get(memory.category)?.push(memory);\n }\n\n // Format each category\n const sections: string[] = [];\n\n for (const [category, categoryMemories] of grouped.entries()) {\n const categoryName = category\n .split('_')\n .map((word) => word.charAt(0).toUpperCase() + word.slice(1))\n .join(' ');\n\n const items = categoryMemories.map((m) => `- ${m.content}`).join('\\n');\n sections.push(`**${categoryName}**:\\n${items}`);\n }\n\n return sections.join('\\n\\n');\n }\n}\n",
|
|
6
|
+
"import { sql } from 'drizzle-orm';\nimport {\n pgTable,\n text,\n integer,\n jsonb,\n real,\n index,\n varchar,\n timestamp,\n} from 'drizzle-orm/pg-core';\n\n/**\n * Long-term memory storage table\n * Stores persistent facts about users across all conversations\n */\nexport const longTermMemories = pgTable(\n 'long_term_memories',\n {\n id: varchar('id', { length: 36 }).primaryKey(),\n agentId: varchar('agent_id', { length: 36 }).notNull(),\n entityId: varchar('entity_id', { length: 36 }).notNull(),\n category: text('category').notNull(),\n content: text('content').notNull(),\n metadata: jsonb('metadata'),\n embedding: real('embedding').array(),\n confidence: real('confidence').default(1.0),\n source: text('source'),\n createdAt: timestamp('created_at')\n .default(sql`now()`)\n .notNull(),\n updatedAt: timestamp('updated_at')\n .default(sql`now()`)\n .notNull(),\n lastAccessedAt: timestamp('last_accessed_at'),\n accessCount: integer('access_count').default(0),\n },\n (table) => ({\n agentEntityIdx: index('long_term_memories_agent_entity_idx').on(table.agentId, table.entityId),\n categoryIdx: index('long_term_memories_category_idx').on(table.category),\n confidenceIdx: index('long_term_memories_confidence_idx').on(table.confidence),\n createdAtIdx: index('long_term_memories_created_at_idx').on(table.createdAt),\n })\n);\n",
|
|
7
|
+
"import { sql } from 'drizzle-orm';\nimport {\n pgTable,\n text,\n integer,\n jsonb,\n real,\n index,\n varchar,\n timestamp,\n} from 'drizzle-orm/pg-core';\n\n/**\n * Session summaries table\n * Stores condensed summaries of conversation sessions\n */\nexport const sessionSummaries = pgTable(\n 'session_summaries',\n {\n id: varchar('id', { length: 36 }).primaryKey(),\n agentId: varchar('agent_id', { length: 36 }).notNull(),\n roomId: varchar('room_id', { length: 36 }).notNull(),\n entityId: varchar('entity_id', { length: 36 }),\n summary: text('summary').notNull(),\n messageCount: integer('message_count').notNull(),\n lastMessageOffset: integer('last_message_offset').notNull().default(0),\n startTime: timestamp('start_time').notNull(),\n endTime: timestamp('end_time').notNull(),\n topics: jsonb('topics'),\n metadata: jsonb('metadata'),\n embedding: real('embedding').array(),\n createdAt: timestamp('created_at')\n .default(sql`now()`)\n .notNull(),\n updatedAt: timestamp('updated_at')\n .default(sql`now()`)\n .notNull(),\n },\n (table) => ({\n agentRoomIdx: index('session_summaries_agent_room_idx').on(table.agentId, table.roomId),\n entityIdx: index('session_summaries_entity_idx').on(table.entityId),\n startTimeIdx: index('session_summaries_start_time_idx').on(table.startTime),\n })\n);\n",
|
|
8
|
+
"import { sql } from 'drizzle-orm';\nimport { pgTable, text, integer, real, index, varchar, timestamp } from 'drizzle-orm/pg-core';\n\n/**\n * Memory access logs (optional - for tracking and improving memory retrieval)\n */\nexport const memoryAccessLogs = pgTable(\n 'memory_access_logs',\n {\n id: varchar('id', { length: 36 }).primaryKey(),\n agentId: varchar('agent_id', { length: 36 }).notNull(),\n memoryId: varchar('memory_id', { length: 36 }).notNull(),\n memoryType: text('memory_type').notNull(), // 'long_term' or 'session_summary'\n accessedAt: timestamp('accessed_at')\n .default(sql`now()`)\n .notNull(),\n roomId: varchar('room_id', { length: 36 }),\n relevanceScore: real('relevance_score'),\n wasUseful: integer('was_useful'), // 1 = useful, 0 = not useful, null = unknown\n },\n (table) => ({\n memoryIdx: index('memory_access_logs_memory_idx').on(table.memoryId),\n agentIdx: index('memory_access_logs_agent_idx').on(table.agentId),\n accessedAtIdx: index('memory_access_logs_accessed_at_idx').on(table.accessedAt),\n })\n);\n",
|
|
9
|
+
"import {\n type IAgentRuntime,\n type Memory,\n type Evaluator,\n logger,\n ModelType,\n composePromptFromState,\n} from '@elizaos/core';\nimport { MemoryService } from '../services/memory-service';\nimport type { SummaryResult } from '../types/index';\n\n/**\n * Template for generating initial conversation summary\n */\nconst initialSummarizationTemplate = `# Task: Summarize Conversation\n\nYou are analyzing a conversation to create a concise summary that captures the key points, topics, and important details.\n\n# Recent Messages\n{{recentMessages}}\n\n# Instructions\nGenerate a summary that:\n1. Captures the main topics discussed\n2. Highlights key information shared\n3. Notes any decisions made or questions asked\n4. Maintains context for future reference\n5. Is concise but comprehensive\n\n**IMPORTANT**: Keep the summary under 2500 tokens. Be comprehensive but concise.\n\nAlso extract:\n- **Topics**: List of main topics discussed (comma-separated)\n- **Key Points**: Important facts or decisions (bullet points)\n\nRespond in this XML format:\n<summary>\n <text>Your comprehensive summary here</text>\n <topics>topic1, topic2, topic3</topics>\n <keyPoints>\n <point>First key point</point>\n <point>Second key point</point>\n </keyPoints>\n</summary>`;\n\n/**\n * Template for updating/condensing an existing summary\n */\nconst updateSummarizationTemplate = `# Task: Update and Condense Conversation Summary\n\nYou are updating an existing conversation summary with new messages, while keeping the total summary concise.\n\n# Existing Summary\n{{existingSummary}}\n\n# Existing Topics\n{{existingTopics}}\n\n# New Messages Since Last Summary\n{{newMessages}}\n\n# Instructions\nUpdate the summary by:\n1. Merging the existing summary with insights from the new messages\n2. Removing redundant or less important details to stay under the token limit\n3. Keeping the most important context and decisions\n4. Adding new topics if they emerge\n5. **CRITICAL**: Keep the ENTIRE updated summary under 2500 tokens\n\nThe goal is a rolling summary that captures the essence of the conversation without growing indefinitely.\n\nRespond in this XML format:\n<summary>\n <text>Your updated and condensed summary here</text>\n <topics>topic1, topic2, topic3</topics>\n <keyPoints>\n <point>First key point</point>\n <point>Second key point</point>\n </keyPoints>\n</summary>`;\n\n/**\n * Parse XML summary response\n */\nfunction parseSummaryXML(xml: string): SummaryResult {\n const summaryMatch = xml.match(/<text>([\\s\\S]*?)<\\/text>/);\n const topicsMatch = xml.match(/<topics>([\\s\\S]*?)<\\/topics>/);\n const keyPointsMatches = xml.matchAll(/<point>([\\s\\S]*?)<\\/point>/g);\n\n const summary = summaryMatch ? summaryMatch[1].trim() : 'Summary not available';\n const topics = topicsMatch\n ? topicsMatch[1]\n .split(',')\n .map((t) => t.trim())\n .filter(Boolean)\n : [];\n const keyPoints = Array.from(keyPointsMatches).map((match) => match[1].trim());\n\n return { summary, topics, keyPoints };\n}\n\n/**\n * Short-term Memory Summarization Evaluator\n *\n * Monitors conversation length and generates summaries when threshold is reached.\n * Summaries replace older messages to reduce context size while preserving information.\n */\nexport const summarizationEvaluator: Evaluator = {\n name: 'MEMORY_SUMMARIZATION',\n description: 'Summarizes conversations to optimize short-term memory',\n similes: ['CONVERSATION_SUMMARY', 'CONTEXT_COMPRESSION', 'MEMORY_OPTIMIZATION'],\n alwaysRun: true,\n\n validate: async (runtime: IAgentRuntime, message: Memory): Promise<boolean> => {\n logger.debug(`Validating summarization for message: ${message.content?.text}`);\n // Only run after messages (not on agent's own messages during generation)\n if (!message.content?.text) {\n return false;\n }\n\n const memoryService = runtime.getService('memory') as MemoryService | null;\n if (!memoryService) {\n return false;\n }\n\n const config = memoryService.getConfig();\n const currentMessageCount = await runtime.countMemories(message.roomId, false, 'messages');\n const shouldSummarize = currentMessageCount >= config.shortTermSummarizationThreshold;\n\n logger.debug(\n {\n roomId: message.roomId,\n currentMessageCount,\n threshold: config.shortTermSummarizationThreshold,\n shouldSummarize,\n },\n 'Summarization check'\n );\n\n return shouldSummarize;\n },\n\n handler: async (runtime: IAgentRuntime, message: Memory): Promise<void> => {\n const memoryService = runtime.getService('memory') as MemoryService;\n if (!memoryService) {\n logger.error('MemoryService not found');\n return;\n }\n\n const config = memoryService.getConfig();\n const { roomId } = message;\n\n try {\n logger.info(`Starting summarization for room ${roomId}`);\n\n // Get the current summary (if any)\n const existingSummary = await memoryService.getCurrentSessionSummary(roomId);\n const lastOffset = existingSummary?.lastMessageOffset || 0;\n\n // Get total message count\n const totalMessageCount = await runtime.countMemories(roomId, false, 'messages');\n\n // Get new messages since last offset\n const newMessages = await runtime.getMemories({\n tableName: 'messages',\n roomId,\n count: config.shortTermSummarizationThreshold,\n unique: false,\n start: lastOffset,\n });\n\n if (newMessages.length === 0) {\n logger.debug('No new messages to summarize');\n return;\n }\n\n // Sort by timestamp\n const sortedMessages = newMessages.sort((a, b) => (a.createdAt || 0) - (b.createdAt || 0));\n\n // Format messages for summarization\n const formattedMessages = sortedMessages\n .map((msg) => {\n const sender = msg.entityId === runtime.agentId ? runtime.character.name : 'User';\n return `${sender}: ${msg.content.text || '[non-text message]'}`;\n })\n .join('\\n');\n\n // Generate or update summary using LLM\n const state = await runtime.composeState(message);\n let prompt: string;\n let template: string;\n\n if (existingSummary) {\n // Update existing summary\n template = updateSummarizationTemplate;\n prompt = composePromptFromState({\n state: {\n ...state,\n existingSummary: existingSummary.summary,\n existingTopics: existingSummary.topics?.join(', ') || 'None',\n newMessages: formattedMessages,\n },\n template,\n });\n } else {\n // Create initial summary\n template = initialSummarizationTemplate;\n prompt = composePromptFromState({\n state: {\n ...state,\n recentMessages: formattedMessages,\n },\n template,\n });\n }\n\n const response = await runtime.useModel(ModelType.TEXT_LARGE, {\n prompt,\n maxTokens: config.summaryMaxTokens || 2500,\n });\n\n const summaryResult = parseSummaryXML(response);\n\n logger.info(\n `${existingSummary ? 'Updated' : 'Generated'} summary: ${summaryResult.summary.substring(0, 100)}...`\n );\n\n // Calculate new offset (current total)\n const newOffset = totalMessageCount;\n\n // Get timing info\n const firstMessage = sortedMessages[0];\n const lastMessage = sortedMessages[sortedMessages.length - 1];\n\n const startTime = existingSummary\n ? existingSummary.startTime\n : firstMessage?.createdAt && firstMessage.createdAt > 0\n ? new Date(firstMessage.createdAt)\n : new Date();\n const endTime =\n lastMessage?.createdAt && lastMessage.createdAt > 0\n ? new Date(lastMessage.createdAt)\n : new Date();\n\n if (existingSummary) {\n // Update existing summary\n await memoryService.updateSessionSummary(existingSummary.id, {\n summary: summaryResult.summary,\n messageCount: existingSummary.messageCount + sortedMessages.length,\n lastMessageOffset: newOffset,\n endTime,\n topics: summaryResult.topics,\n metadata: {\n keyPoints: summaryResult.keyPoints,\n },\n });\n\n logger.info(\n `Updated summary for room ${roomId}: ${sortedMessages.length} new messages processed (offset: ${lastOffset} → ${newOffset})`\n );\n } else {\n // Create new summary\n await memoryService.storeSessionSummary({\n agentId: runtime.agentId,\n roomId,\n entityId: message.entityId !== runtime.agentId ? message.entityId : undefined,\n summary: summaryResult.summary,\n messageCount: sortedMessages.length,\n lastMessageOffset: newOffset,\n startTime,\n endTime,\n topics: summaryResult.topics,\n metadata: {\n keyPoints: summaryResult.keyPoints,\n },\n });\n\n logger.info(\n `Created new summary for room ${roomId}: ${sortedMessages.length} messages summarized (offset: 0 → ${newOffset})`\n );\n }\n\n // Note: We do NOT delete messages - they stay in the database\n // The offset tracks what's been summarized\n } catch (error) {\n logger.error({ error }, 'Error during summarization:');\n }\n },\n\n examples: [],\n};\n",
|
|
10
|
+
"import {\n type IAgentRuntime,\n type Memory,\n type Evaluator,\n logger,\n ModelType,\n composePromptFromState,\n} from '@elizaos/core';\nimport { MemoryService } from '../services/memory-service';\nimport { LongTermMemoryCategory, type MemoryExtraction } from '../types/index';\n\n/**\n * Template for extracting long-term memories\n */\nconst extractionTemplate = `# Task: Extract Long-Term Memory\n\nYou are analyzing a conversation to extract facts that should be remembered long-term about the user.\n\n# Recent Messages\n{{recentMessages}}\n\n# Current Long-Term Memories\n{{existingMemories}}\n\n# Memory Categories\n1. **identity**: User's name, role, identity (e.g., \"I'm a data scientist\")\n2. **expertise**: User's skills, knowledge domains, or unfamiliarity with topics\n3. **projects**: Ongoing projects, past interactions, recurring topics\n4. **preferences**: Communication style, format preferences, verbosity, etc.\n5. **data_sources**: Frequently used files, databases, APIs\n6. **goals**: Broader intentions (e.g., \"preparing for interview\")\n7. **constraints**: User-defined rules or limitations\n8. **definitions**: Custom terms, acronyms, glossaries\n9. **behavioral_patterns**: How the user tends to interact\n\n# Instructions\nExtract any NEW information that should be remembered long-term. For each item:\n- Determine which category it belongs to\n- Write a clear, factual statement\n- Assess confidence (0.0 to 1.0)\n- Only include information explicitly stated or strongly implied\n\nIf there are no new long-term facts to extract, respond with <memories></memories>\n\nRespond in this XML format:\n<memories>\n <memory>\n <category>identity</category>\n <content>User is a software engineer specializing in backend development</content>\n <confidence>0.95</confidence>\n </memory>\n <memory>\n <category>preferences</category>\n <content>Prefers code examples over lengthy explanations</content>\n <confidence>0.85</confidence>\n </memory>\n</memories>`;\n\n/**\n * Parse XML memory extraction response\n */\nfunction parseMemoryExtractionXML(xml: string): MemoryExtraction[] {\n const memoryMatches = xml.matchAll(\n /<memory>[\\s\\S]*?<category>(.*?)<\\/category>[\\s\\S]*?<content>(.*?)<\\/content>[\\s\\S]*?<confidence>(.*?)<\\/confidence>[\\s\\S]*?<\\/memory>/g\n );\n\n const extractions: MemoryExtraction[] = [];\n\n for (const match of memoryMatches) {\n const category = match[1].trim() as LongTermMemoryCategory;\n const content = match[2].trim();\n const confidence = parseFloat(match[3].trim());\n\n // Validate category\n if (!Object.values(LongTermMemoryCategory).includes(category)) {\n logger.warn(`Invalid memory category: ${category}`);\n continue;\n }\n\n if (content && !isNaN(confidence)) {\n extractions.push({ category, content, confidence });\n }\n }\n\n return extractions;\n}\n\n/**\n * Long-term Memory Extraction Evaluator\n *\n * Analyzes conversations to extract persistent facts about users that should be remembered\n * across all future conversations.\n */\nexport const longTermExtractionEvaluator: Evaluator = {\n name: 'LONG_TERM_MEMORY_EXTRACTION',\n description: 'Extracts long-term facts about users from conversations',\n similes: ['MEMORY_EXTRACTION', 'FACT_LEARNING', 'USER_PROFILING'],\n alwaysRun: true,\n\n validate: async (runtime: IAgentRuntime, message: Memory): Promise<boolean> => {\n logger.debug(`Validating long-term memory extraction for message: ${message.content?.text}`);\n // Only run on user messages (not agent's own)\n if (message.entityId === runtime.agentId) {\n logger.debug(\"Skipping long-term memory extraction for agent's own message\");\n return false;\n }\n\n if (!message.content?.text) {\n logger.debug('Skipping long-term memory extraction for message without text');\n return false;\n }\n\n const memoryService = runtime.getService('memory') as MemoryService | null;\n if (!memoryService) {\n logger.debug('MemoryService not found');\n return false;\n }\n\n const config = memoryService.getConfig();\n if (!config.longTermExtractionEnabled) {\n logger.debug('Long-term memory extraction is disabled');\n return false;\n }\n\n // Count total messages from this entity in this room\n const currentMessageCount = await runtime.countMemories(message.roomId, false, 'messages');\n\n const shouldRun = await memoryService.shouldRunExtraction(\n message.entityId,\n message.roomId,\n currentMessageCount\n );\n logger.debug(`Should run extraction: ${shouldRun}`);\n return shouldRun;\n },\n\n handler: async (runtime: IAgentRuntime, message: Memory): Promise<void> => {\n const memoryService = runtime.getService('memory') as MemoryService;\n if (!memoryService) {\n logger.error('MemoryService not found');\n return;\n }\n\n const config = memoryService.getConfig();\n const { entityId, roomId } = message;\n\n try {\n logger.info(`Extracting long-term memories for entity ${entityId}`);\n\n // Get recent conversation context\n const recentMessages = await runtime.getMemories({\n tableName: 'messages',\n roomId,\n count: 20,\n unique: false,\n });\n\n const formattedMessages = recentMessages\n .sort((a, b) => (a.createdAt || 0) - (b.createdAt || 0))\n .map((msg) => {\n const sender = msg.entityId === runtime.agentId ? runtime.character.name : 'User';\n return `${sender}: ${msg.content.text || '[non-text message]'}`;\n })\n .join('\\n');\n\n // Get existing long-term memories\n const existingMemories = await memoryService.getLongTermMemories(entityId, undefined, 30);\n const formattedExisting =\n existingMemories.length > 0\n ? existingMemories\n .map((m) => `[${m.category}] ${m.content} (confidence: ${m.confidence})`)\n .join('\\n')\n : 'None yet';\n\n // Generate extraction using LLM\n const state = await runtime.composeState(message);\n const prompt = composePromptFromState({\n state: {\n ...state,\n recentMessages: formattedMessages,\n existingMemories: formattedExisting,\n },\n template: extractionTemplate,\n });\n\n const response = await runtime.useModel(ModelType.TEXT_LARGE, {\n prompt,\n });\n\n const extractions = parseMemoryExtractionXML(response);\n\n logger.info(`Extracted ${extractions.length} long-term memories`);\n\n // Store each extracted memory\n for (const extraction of extractions) {\n if (extraction.confidence >= config.longTermConfidenceThreshold) {\n await memoryService.storeLongTermMemory({\n agentId: runtime.agentId,\n entityId,\n category: extraction.category,\n content: extraction.content,\n confidence: extraction.confidence,\n source: 'conversation',\n metadata: {\n roomId,\n extractedAt: new Date().toISOString(),\n },\n });\n\n logger.info(\n `Stored long-term memory: [${extraction.category}] ${extraction.content.substring(0, 50)}...`\n );\n } else {\n logger.debug(\n `Skipped low-confidence memory: ${extraction.content} (confidence: ${extraction.confidence})`\n );\n }\n }\n\n // Update the extraction checkpoint after successful extraction\n const currentMessageCount = await runtime.countMemories(roomId, false, 'messages');\n await memoryService.setLastExtractionCheckpoint(entityId, roomId, currentMessageCount);\n logger.debug(\n `Updated extraction checkpoint to ${currentMessageCount} for entity ${entityId} in room ${roomId}`\n );\n } catch (error) {\n logger.error({ error }, 'Error during long-term memory extraction:');\n }\n },\n\n examples: [],\n};\n",
|
|
11
|
+
"import type { UUID } from '@elizaos/core';\n\n/**\n * Categories of long-term memory\n */\nexport enum LongTermMemoryCategory {\n IDENTITY = 'identity', // User identity, name, roles\n EXPERTISE = 'expertise', // Domain knowledge and familiarity\n PROJECTS = 'projects', // Past interactions and recurring topics\n PREFERENCES = 'preferences', // User preferences for interaction style\n DATA_SOURCES = 'data_sources', // Frequently used files, databases, APIs\n GOALS = 'goals', // User's broader intentions and objectives\n CONSTRAINTS = 'constraints', // User-defined rules and limitations\n DEFINITIONS = 'definitions', // Custom terms, acronyms, glossaries\n BEHAVIORAL_PATTERNS = 'behavioral_patterns', // User interaction patterns\n}\n\n/**\n * Long-term memory entry\n */\nexport interface LongTermMemory {\n id: UUID;\n agentId: UUID;\n entityId: UUID; // The user/entity this memory is about\n category: LongTermMemoryCategory;\n content: string; // The actual memory content\n metadata?: Record<string, unknown>; // Additional structured data\n embedding?: number[]; // Vector embedding for semantic search\n confidence?: number; // Confidence score (0-1)\n source?: string; // Where this memory came from (conversation, manual, etc.)\n createdAt: Date;\n updatedAt: Date;\n lastAccessedAt?: Date;\n accessCount?: number;\n similarity?: number; // Optional similarity score from vector search\n}\n\n/**\n * Short-term memory session summary\n */\nexport interface SessionSummary {\n id: UUID;\n agentId: UUID;\n roomId: UUID;\n entityId?: UUID; // Optional: specific user in the session\n summary: string; // The summarized conversation\n messageCount: number; // Number of messages summarized\n lastMessageOffset: number; // Index of last summarized message (for pagination)\n startTime: Date; // Timestamp of first message\n endTime: Date; // Timestamp of last message\n topics?: string[]; // Main topics discussed\n metadata?: Record<string, unknown>;\n embedding?: number[]; // Vector embedding of the summary\n createdAt: Date;\n updatedAt: Date; // Track when summary was last updated\n}\n\n/**\n * Configuration for memory plugin\n */\nexport interface MemoryConfig {\n // Short-term memory settings\n shortTermSummarizationThreshold: number; // Messages count before summarization\n shortTermRetainRecent: number; // Number of recent messages to keep after summarization\n\n // Long-term memory settings\n longTermExtractionEnabled: boolean;\n longTermVectorSearchEnabled: boolean;\n longTermConfidenceThreshold: number; // Minimum confidence to store\n longTermExtractionInterval: number; // Run extraction every N messages (e.g., 5, 10, 15...)\n\n // Summarization settings\n summaryModelType?: string;\n summaryMaxTokens?: number;\n}\n\n/**\n * Memory extraction result from evaluator\n */\nexport interface MemoryExtraction {\n category: LongTermMemoryCategory;\n content: string;\n confidence: number;\n metadata?: Record<string, unknown>;\n}\n\n/**\n * Summary generation result\n */\nexport interface SummaryResult {\n summary: string;\n topics: string[];\n keyPoints: string[];\n}\n",
|
|
12
|
+
"import {\n type IAgentRuntime,\n type Memory,\n type Provider,\n type State,\n logger,\n addHeader,\n} from '@elizaos/core';\nimport { MemoryService } from '../services/memory-service';\n\n/**\n * Short-term Memory Provider\n *\n * Provides conversation context by combining:\n * 1. Recent session summaries (for older conversations)\n * 2. Recent unsummarized messages (most recent activity)\n *\n * This provider works alongside recentMessagesProvider to optimize context usage.\n * When conversations get long, older messages are summarized and this provider\n * injects those summaries instead of full message history.\n */\nexport const shortTermMemoryProvider: Provider = {\n name: 'SHORT_TERM_MEMORY',\n description: 'Recent conversation summaries to maintain context efficiently',\n position: 95, // Run before recentMessagesProvider (100) to provide summary context first\n\n get: async (runtime: IAgentRuntime, message: Memory, _state: State) => {\n try {\n const memoryService = runtime.getService('memory') as MemoryService | null;\n if (!memoryService) {\n return {\n data: { summaries: [] },\n values: { sessionSummaries: '' },\n text: '',\n };\n }\n\n const { roomId } = message;\n\n // Get recent session summaries for this room\n const summaries = await memoryService.getSessionSummaries(roomId, 3);\n\n if (summaries.length === 0) {\n return {\n data: { summaries: [] },\n values: { sessionSummaries: '' },\n text: '',\n };\n }\n\n // Format summaries for context\n const formattedSummaries = summaries\n .reverse() // Show oldest to newest\n .map((summary, index) => {\n const messageRange = `${summary.messageCount} messages`;\n const timeRange = new Date(summary.startTime).toLocaleDateString();\n\n let text = `**Session ${index + 1}** (${messageRange}, ${timeRange})\\n`;\n text += summary.summary;\n\n if (summary.topics && summary.topics.length > 0) {\n text += `\\n*Topics: ${summary.topics.join(', ')}*`;\n }\n\n return text;\n })\n .join('\\n\\n');\n\n const text = addHeader('# Previous Conversation Context', formattedSummaries);\n\n return {\n data: { summaries },\n values: { sessionSummaries: text },\n text,\n };\n } catch (error) {\n logger.error({ error }, 'Error in shortTermMemoryProvider:');\n return {\n data: { summaries: [] },\n values: { sessionSummaries: '' },\n text: '',\n };\n }\n },\n};\n",
|
|
13
|
+
"import {\n type IAgentRuntime,\n type Memory,\n type Provider,\n type State,\n logger,\n addHeader,\n} from '@elizaos/core';\nimport { MemoryService } from '../services/memory-service';\n\n/**\n * Long-term Memory Provider\n *\n * Provides persistent facts about the user that have been learned across\n * all conversations. This includes:\n * - User identity and roles\n * - Domain expertise\n * - Preferences\n * - Goals and projects\n * - Custom definitions\n * - Behavioral patterns\n *\n * This provider enriches the context with relevant long-term information\n * to make the agent's responses more personalized and contextually aware.\n */\nexport const longTermMemoryProvider: Provider = {\n name: 'LONG_TERM_MEMORY',\n description: 'Persistent facts and preferences about the user',\n position: 50, // Run early to establish user context\n\n get: async (runtime: IAgentRuntime, message: Memory, _state: State) => {\n try {\n const memoryService = runtime.getService('memory') as MemoryService | null;\n if (!memoryService) {\n return {\n data: { memories: [] },\n values: { longTermMemories: '' },\n text: '',\n };\n }\n\n const { entityId } = message;\n\n // Skip for agent's own messages\n if (entityId === runtime.agentId) {\n return {\n data: { memories: [] },\n values: { longTermMemories: '' },\n text: '',\n };\n }\n\n // Get long-term memories for this entity\n const memories = await memoryService.getLongTermMemories(entityId, undefined, 25);\n\n if (memories.length === 0) {\n return {\n data: { memories: [] },\n values: { longTermMemories: '' },\n text: '',\n };\n }\n\n // Format memories using the service's built-in formatter\n const formattedMemories = await memoryService.getFormattedLongTermMemories(entityId);\n\n const text = addHeader('# What I Know About You', formattedMemories);\n\n // Create a summary of memory categories for quick reference\n const categoryCounts = new Map<string, number>();\n for (const memory of memories) {\n const count = categoryCounts.get(memory.category) || 0;\n categoryCounts.set(memory.category, count + 1);\n }\n\n const categoryList = Array.from(categoryCounts.entries())\n .map(([cat, count]) => `${cat}: ${count}`)\n .join(', ');\n\n return {\n data: {\n memories,\n categoryCounts: Object.fromEntries(categoryCounts),\n },\n values: {\n longTermMemories: text,\n memoryCategories: categoryList,\n },\n text,\n };\n } catch (error) {\n logger.error({ error }, 'Error in longTermMemoryProvider:');\n return {\n data: { memories: [] },\n values: { longTermMemories: '' },\n text: '',\n };\n }\n },\n};\n",
|
|
14
|
+
"import type { Plugin } from '@elizaos/core';\nimport { MemoryService } from './services/memory-service';\nimport { summarizationEvaluator } from './evaluators/summarization';\nimport { longTermExtractionEvaluator } from './evaluators/long-term-extraction';\nimport { shortTermMemoryProvider } from './providers/short-term-memory';\nimport { longTermMemoryProvider } from './providers/long-term-memory';\n// import { rememberAction } from './actions/remember';\nimport * as schema from './schemas/index';\n\nexport * from './types/index';\nexport * from './schemas/index';\nexport { MemoryService } from './services/memory-service';\n\n/**\n * Memory Plugin\n *\n * Advanced memory management plugin that provides:\n *\n * **Short-term Memory (Conversation Summarization)**:\n * - Automatically summarizes long conversations to reduce context size\n * - Retains recent messages while archiving older ones as summaries\n * - Configurable thresholds for when to summarize\n *\n * **Long-term Memory (Persistent Facts)**:\n * - Extracts and stores persistent facts about users\n * - Categorizes information (identity, expertise, preferences, etc.)\n * - Provides context-aware user profiles across all conversations\n *\n * **Components**:\n * - `MemoryService`: Manages all memory operations\n * - Evaluators: Process conversations to create summaries and extract facts\n * - Providers: Inject memory context into conversations\n * - Actions: Allow manual memory storage via user commands\n *\n * **Configuration** (via environment variables):\n * - `MEMORY_SUMMARIZATION_THRESHOLD`: Messages before summarization (default: 50)\n * - `MEMORY_RETAIN_RECENT`: Recent messages to keep (default: 10)\n * - `MEMORY_LONG_TERM_ENABLED`: Enable long-term extraction (default: true)\n * - `MEMORY_CONFIDENCE_THRESHOLD`: Minimum confidence to store (default: 0.7)\n *\n * **Database Tables**:\n * - `long_term_memories`: Persistent user facts\n * - `session_summaries`: Conversation summaries\n * - `memory_access_logs`: Optional usage tracking\n */\nexport const memoryPlugin: Plugin = {\n name: 'memory',\n description:\n 'Advanced memory management with conversation summarization and long-term persistent memory',\n\n services: [MemoryService],\n\n evaluators: [summarizationEvaluator, longTermExtractionEvaluator],\n\n providers: [longTermMemoryProvider, shortTermMemoryProvider],\n\n // actions: [rememberAction],\n\n // Export schema for dynamic migrations\n schema,\n};\n\nexport default memoryPlugin;\n"
|
|
15
|
+
],
|
|
16
|
+
"mappings": "iIAAA,kBAEE,aAEA,sBAGF,aAAS,SAAI,UAAK,SAAM,oBAAK,UAAgB,2GCP7C,cAAS,oBACT,kBACE,WACA,aACA,YACA,WACA,WACA,aACA,eACA,4BAOK,IAAM,EAAmB,GAC9B,qBACA,CACE,GAAI,EAAQ,KAAM,CAAE,OAAQ,EAAG,CAAC,EAAE,WAAW,EAC7C,QAAS,EAAQ,WAAY,CAAE,OAAQ,EAAG,CAAC,EAAE,QAAQ,EACrD,SAAU,EAAQ,YAAa,CAAE,OAAQ,EAAG,CAAC,EAAE,QAAQ,EACvD,SAAU,EAAK,UAAU,EAAE,QAAQ,EACnC,QAAS,EAAK,SAAS,EAAE,QAAQ,EACjC,SAAU,GAAM,UAAU,EAC1B,UAAW,EAAK,WAAW,EAAE,MAAM,EACnC,WAAY,EAAK,YAAY,EAAE,QAAQ,CAAG,EAC1C,OAAQ,EAAK,QAAQ,EACrB,UAAW,EAAU,YAAY,EAC9B,QAAQ,QAAU,EAClB,QAAQ,EACX,UAAW,EAAU,YAAY,EAC9B,QAAQ,QAAU,EAClB,QAAQ,EACX,eAAgB,EAAU,kBAAkB,EAC5C,YAAa,GAAQ,cAAc,EAAE,QAAQ,CAAC,CAChD,EACA,CAAC,KAAW,CACV,eAAgB,EAAM,qCAAqC,EAAE,GAAG,EAAM,QAAS,EAAM,QAAQ,EAC7F,YAAa,EAAM,iCAAiC,EAAE,GAAG,EAAM,QAAQ,EACvE,cAAe,EAAM,mCAAmC,EAAE,GAAG,EAAM,UAAU,EAC7E,aAAc,EAAM,mCAAmC,EAAE,GAAG,EAAM,SAAS,CAC7E,EACF,EC3CA,cAAS,oBACT,kBACE,WACA,cACA,WACA,UACA,YACA,aACA,eACA,4BAOK,IAAM,EAAmB,GAC9B,oBACA,CACE,GAAI,EAAQ,KAAM,CAAE,OAAQ,EAAG,CAAC,EAAE,WAAW,EAC7C,QAAS,EAAQ,WAAY,CAAE,OAAQ,EAAG,CAAC,EAAE,QAAQ,EACrD,OAAQ,EAAQ,UAAW,CAAE,OAAQ,EAAG,CAAC,EAAE,QAAQ,EACnD,SAAU,EAAQ,YAAa,CAAE,OAAQ,EAAG,CAAC,EAC7C,QAAS,GAAK,SAAS,EAAE,QAAQ,EACjC,aAAc,EAAQ,eAAe,EAAE,QAAQ,EAC/C,kBAAmB,EAAQ,qBAAqB,EAAE,QAAQ,EAAE,QAAQ,CAAC,EACrE,UAAW,EAAU,YAAY,EAAE,QAAQ,EAC3C,QAAS,EAAU,UAAU,EAAE,QAAQ,EACvC,OAAQ,EAAM,QAAQ,EACtB,SAAU,EAAM,UAAU,EAC1B,UAAW,GAAK,WAAW,EAAE,MAAM,EACnC,UAAW,EAAU,YAAY,EAC9B,QAAQ,QAAU,EAClB,QAAQ,EACX,UAAW,EAAU,YAAY,EAC9B,QAAQ,QAAU,EAClB,QAAQ,CACb,EACA,CAAC,KAAW,CACV,aAAc,EAAM,kCAAkC,EAAE,GAAG,EAAM,QAAS,EAAM,MAAM,EACtF,UAAW,EAAM,8BAA8B,EAAE,GAAG,EAAM,QAAQ,EAClE,aAAc,EAAM,kCAAkC,EAAE,GAAG,EAAM,SAAS,CAC5E,EACF,EC3CA,cAAS,qBACT,kBAAS,WAAS,cAAM,WAAS,YAAM,aAAO,eAAS,6BAKhD,IAAM,EAAmB,GAC9B,qBACA,CACE,GAAI,EAAQ,KAAM,CAAE,OAAQ,EAAG,CAAC,EAAE,WAAW,EAC7C,QAAS,EAAQ,WAAY,CAAE,OAAQ,EAAG,CAAC,EAAE,QAAQ,EACrD,SAAU,EAAQ,YAAa,CAAE,OAAQ,EAAG,CAAC,EAAE,QAAQ,EACvD,WAAY,GAAK,aAAa,EAAE,QAAQ,EACxC,WAAY,GAAU,aAAa,EAChC,QAAQ,SAAU,EAClB,QAAQ,EACX,OAAQ,EAAQ,UAAW,CAAE,OAAQ,EAAG,CAAC,EACzC,eAAgB,GAAK,iBAAiB,EACtC,UAAW,GAAQ,YAAY,CACjC,EACA,CAAC,KAAW,CACV,UAAW,EAAM,+BAA+B,EAAE,GAAG,EAAM,QAAQ,EACnE,SAAU,EAAM,8BAA8B,EAAE,GAAG,EAAM,OAAO,EAChE,cAAe,EAAM,oCAAoC,EAAE,GAAG,EAAM,UAAU,CAChF,EACF,EHLO,MAAM,UAAsB,EAAQ,OAClC,aAA+B,SAE9B,qBACA,aACA,0BAER,sBACE,0FAEF,WAAW,CAAC,EAAyB,CACnC,MAAM,CAAO,EACb,KAAK,qBAAuB,IAAI,IAChC,KAAK,0BAA4B,IAAI,IACrC,KAAK,aAAe,CAClB,gCAAiC,EACjC,sBAAuB,GACvB,0BAA2B,GAC3B,4BAA6B,GAC7B,4BAA6B,IAC7B,2BAA4B,EAC5B,iBAAkB,aAClB,iBAAkB,IACpB,cAGW,MAAK,CAAC,EAA0C,CAC3D,IAAM,EAAU,IAAI,EAAc,CAAO,EAEzC,OADA,MAAM,EAAQ,WAAW,CAAO,EACzB,OAGH,KAAI,EAAkB,CAE1B,EAAO,KAAK,uBAAuB,OAG/B,WAAU,CAAC,EAAuC,CACtD,KAAK,QAAU,EAGf,IAAM,EAAY,EAAQ,WAAW,gCAAgC,EACrE,GAAI,EACF,KAAK,aAAa,gCAAkC,SAAS,EAAW,EAAE,EAG5E,IAAM,EAAe,EAAQ,WAAW,sBAAsB,EAC9D,GAAI,EACF,KAAK,aAAa,sBAAwB,SAAS,EAAc,EAAE,EAGrE,IAAM,EAAkB,EAAQ,WAAW,0BAA0B,EAErE,GAAI,IAAoB,QACtB,KAAK,aAAa,0BAA4B,GACzC,QAAI,IAAoB,OAC7B,KAAK,aAAa,0BAA4B,GAIhD,IAAM,EAAsB,EAAQ,WAAW,6BAA6B,EAC5E,GAAI,EACF,KAAK,aAAa,4BAA8B,WAAW,CAAmB,EAGhF,EAAO,KACL,CACE,uBAAwB,KAAK,aAAa,gCAC1C,aAAc,KAAK,aAAa,sBAChC,gBAAiB,KAAK,aAAa,0BACnC,mBAAoB,KAAK,aAAa,2BACtC,oBAAqB,KAAK,aAAa,2BACzC,EACA,2BACF,EAMM,KAAK,EAAQ,CACnB,IAAM,EAAM,KAAK,QAAgB,GACjC,GAAI,CAAC,EACH,MAAU,MAAM,wBAAwB,EAE1C,OAAO,EAMT,SAAS,EAAiB,CACxB,MAAO,IAAK,KAAK,YAAa,EAMhC,YAAY,CAAC,EAAsC,CACjD,KAAK,aAAe,IAAK,KAAK,gBAAiB,CAAQ,EAMzD,qBAAqB,CAAC,EAAsB,CAE1C,IAAM,GADU,KAAK,qBAAqB,IAAI,CAAM,GAAK,GAC9B,EAE3B,OADA,KAAK,qBAAqB,IAAI,EAAQ,CAAQ,EACvC,EAMT,iBAAiB,CAAC,EAAoB,CACpC,KAAK,qBAAqB,IAAI,EAAQ,CAAC,OAMnC,gBAAe,CAAC,EAAgC,CAEpD,OADc,MAAM,KAAK,QAAQ,cAAc,EAAQ,GAAO,UAAU,GACxD,KAAK,aAAa,gCAM5B,gBAAgB,CAAC,EAAgB,EAAsB,CAC7D,MAAO,qBAAqB,KAAY,SAOpC,4BAA2B,CAAC,EAAgB,EAA+B,CAC/E,IAAM,EAAM,KAAK,iBAAiB,EAAU,CAAM,EAG5C,EAAS,KAAK,0BAA0B,IAAI,CAAG,EACrD,GAAI,IAAW,OACb,OAAO,EAIT,GAAI,CAEF,IAAM,EADa,MAAM,KAAK,QAAQ,SAAiB,CAAG,GACvB,EAKnC,OAFA,KAAK,0BAA0B,IAAI,EAAK,CAAY,EAE7C,EACP,MAAO,EAAO,CAEd,OADA,EAAO,KAAK,CAAE,OAAM,EAAG,gDAAgD,EAChE,QAQL,4BAA2B,CAC/B,EACA,EACA,EACe,CACf,IAAM,EAAM,KAAK,iBAAiB,EAAU,CAAM,EAGlD,KAAK,0BAA0B,IAAI,EAAK,CAAY,EAGpD,GAAI,CACF,MAAM,KAAK,QAAQ,SAAS,EAAK,CAAY,EAC7C,EAAO,MACL,iCAAiC,aAAoB,sBAA2B,GAClF,EACA,MAAO,EAAO,CACd,EAAO,MAAM,CAAE,OAAM,EAAG,kDAAkD,QAOxE,oBAAmB,CACvB,EACA,EACA,EACkB,CAClB,IAAM,EAAW,KAAK,aAAa,2BAC7B,EAAiB,MAAM,KAAK,4BAA4B,EAAU,CAAM,EAGxE,EAAoB,KAAK,MAAM,EAAsB,CAAQ,EAAI,EAGjE,EAAY,GAAuB,GAAY,EAAoB,EAezE,OAbA,EAAO,MACL,CACE,WACA,SACA,sBACA,WACA,iBACA,oBACA,WACF,EACA,kBACF,EAEO,OAMH,oBAAmB,CACvB,EACyB,CACzB,IAAM,EAAK,KAAK,MAAM,EAEhB,EAAK,OAAO,WAAW,EACvB,EAAM,IAAI,KAEV,EAA4B,CAChC,KACA,UAAW,EACX,UAAW,EACX,YAAa,KACV,CACL,EAEA,GAAI,CACF,MAAM,EAAG,OAAO,CAAgB,EAAE,OAAO,CACvC,GAAI,EAAU,GACd,QAAS,EAAU,QACnB,SAAU,EAAU,SACpB,SAAU,EAAU,SACpB,QAAS,EAAU,QACnB,SAAU,EAAU,UAAY,CAAC,EACjC,UAAW,EAAU,UACrB,WAAY,EAAU,WACtB,OAAQ,EAAU,OAClB,YAAa,EAAU,YACvB,UAAW,EACX,UAAW,EACX,eAAgB,EAAU,cAC5B,CAAC,EACD,MAAO,EAAO,CAEd,MADA,EAAO,MAAM,CAAE,OAAM,EAAG,kCAAkC,EACpD,EAIR,OADA,EAAO,KAAK,4BAA4B,EAAU,uBAAuB,EAAU,UAAU,EACtF,OAMH,oBAAmB,CACvB,EACA,EACA,EAAgB,GACW,CAC3B,IAAM,EAAK,KAAK,MAAM,EAEhB,EAAa,CACjB,EAAG,EAAiB,QAAS,KAAK,QAAQ,OAAO,EACjD,EAAG,EAAiB,SAAU,CAAQ,CACxC,EAEA,GAAI,EACF,EAAW,KAAK,EAAG,EAAiB,SAAU,CAAQ,CAAC,EAUzD,OAPgB,MAAM,EACnB,OAAO,EACP,KAAK,CAAgB,EACrB,MAAM,EAAI,GAAG,CAAU,CAAC,EACxB,QAAQ,EAAK,EAAiB,UAAU,EAAG,EAAK,EAAiB,SAAS,CAAC,EAC3E,MAAM,CAAK,GAEC,IAAI,CAAC,KAAS,CAC3B,GAAI,EAAI,GACR,QAAS,EAAI,QACb,SAAU,EAAI,SACd,SAAU,EAAI,SACd,QAAS,EAAI,QACb,SAAU,EAAI,SACd,UAAW,EAAI,UACf,WAAY,EAAI,WAChB,OAAQ,EAAI,OACZ,UAAW,EAAI,UACf,UAAW,EAAI,UACf,eAAgB,EAAI,eACpB,YAAa,EAAI,WACnB,EAAE,OAME,qBAAoB,CACxB,EACA,EACe,CACf,IAAM,EAAK,KAAK,MAAM,EAEhB,EAAkB,CACtB,UAAW,IAAI,IACjB,EAEA,GAAI,EAAQ,UAAY,OACtB,EAAW,QAAU,EAAQ,QAG/B,GAAI,EAAQ,WAAa,OACvB,EAAW,SAAW,EAAQ,SAGhC,GAAI,EAAQ,aAAe,OACzB,EAAW,WAAa,EAAQ,WAGlC,GAAI,EAAQ,YAAc,OACxB,EAAW,UAAY,EAAQ,UAGjC,GAAI,EAAQ,iBAAmB,OAC7B,EAAW,eAAiB,EAAQ,eAGtC,GAAI,EAAQ,cAAgB,OAC1B,EAAW,YAAc,EAAQ,YAGnC,MAAM,EAAG,OAAO,CAAgB,EAAE,IAAI,CAAU,EAAE,MAAM,EAAG,EAAiB,GAAI,CAAE,CAAC,EAEnF,EAAO,KAAK,6BAA6B,GAAI,OAMzC,qBAAoB,CAAC,EAAyB,CAGlD,MAFW,KAAK,MAAM,EAEb,OAAO,CAAgB,EAAE,MAAM,EAAG,EAAiB,GAAI,CAAE,CAAC,EAEnE,EAAO,KAAK,6BAA6B,GAAI,OAMzC,yBAAwB,CAAC,EAA8C,CAG3E,IAAM,EAAU,MAFL,KAAK,MAAM,EAGnB,OAAO,EACP,KAAK,CAAgB,EACrB,MACC,EAAI,EAAG,EAAiB,QAAS,KAAK,QAAQ,OAAO,EAAG,EAAG,EAAiB,OAAQ,CAAM,CAAC,CAC7F,EACC,QAAQ,EAAK,EAAiB,SAAS,CAAC,EACxC,MAAM,CAAC,EAEV,GAAI,EAAQ,SAAW,EACrB,OAAO,KAGT,IAAM,EAAM,EAAQ,GACpB,MAAO,CACL,GAAI,EAAI,GACR,QAAS,EAAI,QACb,OAAQ,EAAI,OACZ,SAAU,EAAI,SACd,QAAS,EAAI,QACb,aAAc,EAAI,aAClB,kBAAmB,EAAI,kBACvB,UAAW,EAAI,UACf,QAAS,EAAI,QACb,OAAS,EAAI,QAAuB,CAAC,EACrC,SAAU,EAAI,SACd,UAAW,EAAI,UACf,UAAW,EAAI,UACf,UAAW,EAAI,SACjB,OAMI,oBAAmB,CACvB,EACyB,CACzB,IAAM,EAAK,KAAK,MAAM,EAEhB,EAAK,OAAO,WAAW,EACvB,EAAM,IAAI,KAEV,EAA6B,CACjC,KACA,UAAW,EACX,UAAW,KACR,CACL,EAoBA,OAlBA,MAAM,EAAG,OAAO,CAAgB,EAAE,OAAO,CACvC,GAAI,EAAW,GACf,QAAS,EAAW,QACpB,OAAQ,EAAW,OACnB,SAAU,EAAW,UAAY,KACjC,QAAS,EAAW,QACpB,aAAc,EAAW,aACzB,kBAAmB,EAAW,kBAC9B,UAAW,EAAW,UACtB,QAAS,EAAW,QACpB,OAAQ,EAAW,QAAU,CAAC,EAC9B,SAAU,EAAW,UAAY,CAAC,EAClC,UAAW,EAAW,UACtB,UAAW,EACX,UAAW,CACb,CAAC,EAED,EAAO,KAAK,mCAAmC,EAAW,QAAQ,EAC3D,OAMH,qBAAoB,CACxB,EACA,EACe,CACf,IAAM,EAAK,KAAK,MAAM,EAEhB,EAAkB,CACtB,UAAW,IAAI,IACjB,EAEA,GAAI,EAAQ,UAAY,OACtB,EAAW,QAAU,EAAQ,QAG/B,GAAI,EAAQ,eAAiB,OAC3B,EAAW,aAAe,EAAQ,aAGpC,GAAI,EAAQ,oBAAsB,OAChC,EAAW,kBAAoB,EAAQ,kBAGzC,GAAI,EAAQ,UAAY,OACtB,EAAW,QAAU,EAAQ,QAG/B,GAAI,EAAQ,SAAW,OACrB,EAAW,OAAS,EAAQ,OAG9B,GAAI,EAAQ,WAAa,OACvB,EAAW,SAAW,EAAQ,SAGhC,GAAI,EAAQ,YAAc,OACxB,EAAW,UAAY,EAAQ,UAGjC,MAAM,EAAG,OAAO,CAAgB,EAAE,IAAI,CAAU,EAAE,MAAM,EAAG,EAAiB,GAAI,CAAE,CAAC,EAEnF,EAAO,KAAK,4BAA4B,GAAI,OAMxC,oBAAmB,CAAC,EAAc,EAAgB,EAA8B,CAYpF,OATgB,MAFL,KAAK,MAAM,EAGnB,OAAO,EACP,KAAK,CAAgB,EACrB,MACC,EAAI,EAAG,EAAiB,QAAS,KAAK,QAAQ,OAAO,EAAG,EAAG,EAAiB,OAAQ,CAAM,CAAC,CAC7F,EACC,QAAQ,EAAK,EAAiB,SAAS,CAAC,EACxC,MAAM,CAAK,GAEC,IAAI,CAAC,KAAS,CAC3B,GAAI,EAAI,GACR,QAAS,EAAI,QACb,OAAQ,EAAI,OACZ,SAAU,EAAI,SACd,QAAS,EAAI,QACb,aAAc,EAAI,aAClB,kBAAmB,EAAI,kBACvB,UAAW,EAAI,UACf,QAAS,EAAI,QACb,OAAS,EAAI,QAAuB,CAAC,EACrC,SAAU,EAAI,SACd,UAAW,EAAI,UACf,UAAW,EAAI,UACf,UAAW,EAAI,SACjB,EAAE,OAME,uBAAsB,CAC1B,EACA,EACA,EAAgB,EAChB,EAAyB,IACE,CAC3B,GAAI,CAAC,KAAK,aAAa,4BAErB,OADA,EAAO,KAAK,+DAA+D,EACpE,KAAK,oBAAoB,EAAU,OAAW,CAAK,EAG5D,IAAM,EAAK,KAAK,MAAM,EAEtB,GAAI,CAEF,IAAM,EAAc,EAAe,IAAI,CAAC,IACtC,OAAO,SAAS,CAAC,EAAI,OAAO,EAAE,QAAQ,CAAC,CAAC,EAAI,CAC9C,EAGM,EAAa,SAAmB,GACpC,EAAiB,UACjB,CACF,KAEM,EAAa,CACjB,EAAG,EAAiB,QAAS,KAAK,QAAQ,OAAO,EACjD,EAAG,EAAiB,SAAU,CAAQ,EACtC,IAAM,EAAiB,uBACzB,EAGA,GAAI,EAAiB,EACnB,EAAW,KAAK,GAAI,EAAY,CAAc,CAAC,EAajD,OAVgB,MAAM,EACnB,OAAO,CACN,OAAQ,EACR,YACF,CAAC,EACA,KAAK,CAAgB,EACrB,MAAM,EAAI,GAAG,CAAU,CAAC,EACxB,QAAQ,EAAK,CAAU,CAAC,EACxB,MAAM,CAAK,GAEC,IAAI,CAAC,KAAS,CAC3B,GAAI,EAAI,OAAO,GACf,QAAS,EAAI,OAAO,QACpB,SAAU,EAAI,OAAO,SACrB,SAAU,EAAI,OAAO,SACrB,QAAS,EAAI,OAAO,QACpB,SAAU,EAAI,OAAO,SACrB,UAAW,EAAI,OAAO,UACtB,WAAY,EAAI,OAAO,WACvB,OAAQ,EAAI,OAAO,OACnB,UAAW,EAAI,OAAO,UACtB,UAAW,EAAI,OAAO,UACtB,eAAgB,EAAI,OAAO,eAC3B,YAAa,EAAI,OAAO,YACxB,WAAY,EAAI,UAClB,EAAE,EACF,MAAO,EAAO,CAEd,OADA,EAAO,KAAK,CAAE,OAAM,EAAG,uDAAuD,EACvE,KAAK,oBAAoB,EAAU,OAAW,CAAK,QAOxD,6BAA4B,CAAC,EAAiC,CAClE,IAAM,EAAW,MAAM,KAAK,oBAAoB,EAAU,OAAW,EAAE,EAEvE,GAAI,EAAS,SAAW,EACtB,MAAO,GAIT,IAAM,EAAU,IAAI,IAEpB,QAAW,KAAU,EAAU,CAC7B,GAAI,CAAC,EAAQ,IAAI,EAAO,QAAQ,EAC9B,EAAQ,IAAI,EAAO,SAAU,CAAC,CAAC,EAEjC,EAAQ,IAAI,EAAO,QAAQ,GAAG,KAAK,CAAM,EAI3C,IAAM,EAAqB,CAAC,EAE5B,QAAY,EAAU,KAAqB,EAAQ,QAAQ,EAAG,CAC5D,IAAM,EAAe,EAClB,MAAM,GAAG,EACT,IAAI,CAAC,IAAS,EAAK,OAAO,CAAC,EAAE,YAAY,EAAI,EAAK,MAAM,CAAC,CAAC,EAC1D,KAAK,GAAG,EAEL,EAAQ,EAAiB,IAAI,CAAC,IAAM,KAAK,EAAE,SAAS,EAAE,KAAK;AAAA,CAAI,EACrE,EAAS,KAAK,KAAK;AAAA,EAAoB,GAAO,EAGhD,OAAO,EAAS,KAAK;AAAA;AAAA,CAAM,EAE/B,CIloBA,iBAIE,eACA,6BACA,sBAQF,IAAM,GAA+B;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,YAkC/B,GAA8B;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,YAoCpC,SAAS,EAAe,CAAC,EAA4B,CACnD,IAAM,EAAe,EAAI,MAAM,0BAA0B,EACnD,EAAc,EAAI,MAAM,8BAA8B,EACtD,EAAmB,EAAI,SAAS,6BAA6B,EAE7D,EAAU,EAAe,EAAa,GAAG,KAAK,EAAI,wBAClD,EAAS,EACX,EAAY,GACT,MAAM,GAAG,EACT,IAAI,CAAC,IAAM,EAAE,KAAK,CAAC,EACnB,OAAO,OAAO,EACjB,CAAC,EACC,EAAY,MAAM,KAAK,CAAgB,EAAE,IAAI,CAAC,IAAU,EAAM,GAAG,KAAK,CAAC,EAE7E,MAAO,CAAE,UAAS,SAAQ,WAAU,EAS/B,IAAM,EAAoC,CAC/C,KAAM,uBACN,YAAa,yDACb,QAAS,CAAC,uBAAwB,sBAAuB,qBAAqB,EAC9E,UAAW,GAEX,SAAU,MAAO,EAAwB,IAAsC,CAG7E,GAFA,EAAO,MAAM,yCAAyC,EAAQ,SAAS,MAAM,EAEzE,CAAC,EAAQ,SAAS,KACpB,MAAO,GAGT,IAAM,EAAgB,EAAQ,WAAW,QAAQ,EACjD,GAAI,CAAC,EACH,MAAO,GAGT,IAAM,EAAS,EAAc,UAAU,EACjC,EAAsB,MAAM,EAAQ,cAAc,EAAQ,OAAQ,GAAO,UAAU,EACnF,EAAkB,GAAuB,EAAO,gCAYtD,OAVA,EAAO,MACL,CACE,OAAQ,EAAQ,OAChB,sBACA,UAAW,EAAO,gCAClB,iBACF,EACA,qBACF,EAEO,GAGT,QAAS,MAAO,EAAwB,IAAmC,CACzE,IAAM,EAAgB,EAAQ,WAAW,QAAQ,EACjD,GAAI,CAAC,EAAe,CAClB,EAAO,MAAM,yBAAyB,EACtC,OAGF,IAAM,EAAS,EAAc,UAAU,GAC/B,UAAW,EAEnB,GAAI,CACF,EAAO,KAAK,mCAAmC,GAAQ,EAGvD,IAAM,EAAkB,MAAM,EAAc,yBAAyB,CAAM,EACrE,EAAa,GAAiB,mBAAqB,EAGnD,EAAoB,MAAM,EAAQ,cAAc,EAAQ,GAAO,UAAU,EAGzE,EAAc,MAAM,EAAQ,YAAY,CAC5C,UAAW,WACX,SACA,MAAO,EAAO,gCACd,OAAQ,GACR,MAAO,CACT,CAAC,EAED,GAAI,EAAY,SAAW,EAAG,CAC5B,EAAO,MAAM,8BAA8B,EAC3C,OAIF,IAAM,EAAiB,EAAY,KAAK,CAAC,EAAG,KAAO,EAAE,WAAa,IAAM,EAAE,WAAa,EAAE,EAGnF,EAAoB,EACvB,IAAI,CAAC,IAAQ,CAEZ,MAAO,GADQ,EAAI,WAAa,EAAQ,QAAU,EAAQ,UAAU,KAAO,WACtD,EAAI,QAAQ,MAAQ,uBAC1C,EACA,KAAK;AAAA,CAAI,EAGN,EAAQ,MAAM,EAAQ,aAAa,CAAO,EAC5C,EACA,EAEJ,GAAI,EAEF,EAAW,GACX,EAAS,EAAuB,CAC9B,MAAO,IACF,EACH,gBAAiB,EAAgB,QACjC,eAAgB,EAAgB,QAAQ,KAAK,IAAI,GAAK,OACtD,YAAa,CACf,EACA,UACF,CAAC,EAGD,OAAW,GACX,EAAS,EAAuB,CAC9B,MAAO,IACF,EACH,eAAgB,CAClB,EACA,UACF,CAAC,EAGH,IAAM,EAAW,MAAM,EAAQ,SAAS,GAAU,WAAY,CAC5D,SACA,UAAW,EAAO,kBAAoB,IACxC,CAAC,EAEK,EAAgB,GAAgB,CAAQ,EAE9C,EAAO,KACL,GAAG,EAAkB,UAAY,wBAAwB,EAAc,QAAQ,UAAU,EAAG,GAAG,MACjG,EAGA,IAAM,EAAY,EAGZ,EAAe,EAAe,GAC9B,EAAc,EAAe,EAAe,OAAS,GAErD,GAAY,EACd,EAAgB,UAChB,GAAc,WAAa,EAAa,UAAY,EAClD,IAAI,KAAK,EAAa,SAAS,EAC/B,IAAI,KACJ,EACJ,GAAa,WAAa,EAAY,UAAY,EAC9C,IAAI,KAAK,EAAY,SAAS,EAC9B,IAAI,KAEV,GAAI,EAEF,MAAM,EAAc,qBAAqB,EAAgB,GAAI,CAC3D,QAAS,EAAc,QACvB,aAAc,EAAgB,aAAe,EAAe,OAC5D,kBAAmB,EACnB,UACA,OAAQ,EAAc,OACtB,SAAU,CACR,UAAW,EAAc,SAC3B,CACF,CAAC,EAED,EAAO,KACL,4BAA4B,MAAW,EAAe,0CAA0C,OAAe,IACjH,EAGA,WAAM,EAAc,oBAAoB,CACtC,QAAS,EAAQ,QACjB,SACA,SAAU,EAAQ,WAAa,EAAQ,QAAU,EAAQ,SAAW,OACpE,QAAS,EAAc,QACvB,aAAc,EAAe,OAC7B,kBAAmB,EACnB,aACA,UACA,OAAQ,EAAc,OACtB,SAAU,CACR,UAAW,EAAc,SAC3B,CACF,CAAC,EAED,EAAO,KACL,gCAAgC,MAAW,EAAe,2CAA0C,IACtG,EAKF,MAAO,EAAO,CACd,EAAO,MAAM,CAAE,OAAM,EAAG,6BAA6B,IAIzD,SAAU,CAAC,CACb,EClSA,iBAIE,eACA,6BACA,uBCDK,IAAK,GAAL,CAAK,IAAL,CACL,WAAW,WACX,YAAY,YACZ,WAAW,WACX,cAAc,cACd,eAAe,eACf,QAAQ,QACR,cAAc,cACd,cAAc,cACd,sBAAsB,wBATZ,QDSZ,IAAM,GAAqB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,aA+C3B,SAAS,EAAwB,CAAC,EAAiC,CACjE,IAAM,EAAgB,EAAI,SACxB,wIACF,EAEM,EAAkC,CAAC,EAEzC,QAAW,KAAS,EAAe,CACjC,IAAM,EAAW,EAAM,GAAG,KAAK,EACzB,EAAU,EAAM,GAAG,KAAK,EACxB,EAAa,WAAW,EAAM,GAAG,KAAK,CAAC,EAG7C,GAAI,CAAC,OAAO,OAAO,CAAsB,EAAE,SAAS,CAAQ,EAAG,CAC7D,EAAO,KAAK,4BAA4B,GAAU,EAClD,SAGF,GAAI,GAAW,CAAC,MAAM,CAAU,EAC9B,EAAY,KAAK,CAAE,WAAU,UAAS,YAAW,CAAC,EAItD,OAAO,EASF,IAAM,EAAyC,CACpD,KAAM,8BACN,YAAa,0DACb,QAAS,CAAC,oBAAqB,gBAAiB,gBAAgB,EAChE,UAAW,GAEX,SAAU,MAAO,EAAwB,IAAsC,CAG7E,GAFA,EAAO,MAAM,uDAAuD,EAAQ,SAAS,MAAM,EAEvF,EAAQ,WAAa,EAAQ,QAE/B,OADA,EAAO,MAAM,8DAA8D,EACpE,GAGT,GAAI,CAAC,EAAQ,SAAS,KAEpB,OADA,EAAO,MAAM,+DAA+D,EACrE,GAGT,IAAM,EAAgB,EAAQ,WAAW,QAAQ,EACjD,GAAI,CAAC,EAEH,OADA,EAAO,MAAM,yBAAyB,EAC/B,GAIT,GAAI,CADW,EAAc,UAAU,EAC3B,0BAEV,OADA,EAAO,MAAM,yCAAyC,EAC/C,GAIT,IAAM,EAAsB,MAAM,EAAQ,cAAc,EAAQ,OAAQ,GAAO,UAAU,EAEnF,EAAY,MAAM,EAAc,oBACpC,EAAQ,SACR,EAAQ,OACR,CACF,EAEA,OADA,EAAO,MAAM,0BAA0B,GAAW,EAC3C,GAGT,QAAS,MAAO,EAAwB,IAAmC,CACzE,IAAM,EAAgB,EAAQ,WAAW,QAAQ,EACjD,GAAI,CAAC,EAAe,CAClB,EAAO,MAAM,yBAAyB,EACtC,OAGF,IAAM,EAAS,EAAc,UAAU,GAC/B,WAAU,UAAW,EAE7B,GAAI,CACF,EAAO,KAAK,4CAA4C,GAAU,EAUlE,IAAM,GAPiB,MAAM,EAAQ,YAAY,CAC/C,UAAW,WACX,SACA,MAAO,GACP,OAAQ,EACV,CAAC,GAGE,KAAK,CAAC,EAAG,KAAO,EAAE,WAAa,IAAM,EAAE,WAAa,EAAE,EACtD,IAAI,CAAC,IAAQ,CAEZ,MAAO,GADQ,EAAI,WAAa,EAAQ,QAAU,EAAQ,UAAU,KAAO,WACtD,EAAI,QAAQ,MAAQ,uBAC1C,EACA,KAAK;AAAA,CAAI,EAGN,EAAmB,MAAM,EAAc,oBAAoB,EAAU,OAAW,EAAE,EAClF,EACJ,EAAiB,OAAS,EACtB,EACG,IAAI,CAAC,IAAM,IAAI,EAAE,aAAa,EAAE,wBAAwB,EAAE,aAAa,EACvE,KAAK;AAAA,CAAI,EACZ,WAGA,EAAQ,MAAM,EAAQ,aAAa,CAAO,EAC1C,EAAS,GAAuB,CACpC,MAAO,IACF,EACH,eAAgB,EAChB,iBAAkB,CACpB,EACA,SAAU,EACZ,CAAC,EAEK,EAAW,MAAM,EAAQ,SAAS,GAAU,WAAY,CAC5D,QACF,CAAC,EAEK,EAAc,GAAyB,CAAQ,EAErD,EAAO,KAAK,aAAa,EAAY,2BAA2B,EAGhE,QAAW,KAAc,EACvB,GAAI,EAAW,YAAc,EAAO,4BAClC,MAAM,EAAc,oBAAoB,CACtC,QAAS,EAAQ,QACjB,WACA,SAAU,EAAW,SACrB,QAAS,EAAW,QACpB,WAAY,EAAW,WACvB,OAAQ,eACR,SAAU,CACR,SACA,YAAa,IAAI,KAAK,EAAE,YAAY,CACtC,CACF,CAAC,EAED,EAAO,KACL,6BAA6B,EAAW,aAAa,EAAW,QAAQ,UAAU,EAAG,EAAE,MACzF,EAEA,OAAO,MACL,kCAAkC,EAAW,wBAAwB,EAAW,aAClF,EAKJ,IAAM,EAAsB,MAAM,EAAQ,cAAc,EAAQ,GAAO,UAAU,EACjF,MAAM,EAAc,4BAA4B,EAAU,EAAQ,CAAmB,EACrF,EAAO,MACL,oCAAoC,gBAAkC,aAAoB,GAC5F,EACA,MAAO,EAAO,CACd,EAAO,MAAM,CAAE,OAAM,EAAG,2CAA2C,IAIvE,SAAU,CAAC,CACb,EEvOA,iBAKE,gBACA,uBAeK,IAAM,EAAoC,CAC/C,KAAM,oBACN,YAAa,gEACb,SAAU,GAEV,IAAK,MAAO,EAAwB,EAAiB,IAAkB,CACrE,GAAI,CACF,IAAM,EAAgB,EAAQ,WAAW,QAAQ,EACjD,GAAI,CAAC,EACH,MAAO,CACL,KAAM,CAAE,UAAW,CAAC,CAAE,EACtB,OAAQ,CAAE,iBAAkB,EAAG,EAC/B,KAAM,EACR,EAGF,IAAQ,UAAW,EAGb,EAAY,MAAM,EAAc,oBAAoB,EAAQ,CAAC,EAEnE,GAAI,EAAU,SAAW,EACvB,MAAO,CACL,KAAM,CAAE,UAAW,CAAC,CAAE,EACtB,OAAQ,CAAE,iBAAkB,EAAG,EAC/B,KAAM,EACR,EAIF,IAAM,EAAqB,EACxB,QAAQ,EACR,IAAI,CAAC,EAAS,IAAU,CACvB,IAAM,EAAe,GAAG,EAAQ,wBAC1B,EAAY,IAAI,KAAK,EAAQ,SAAS,EAAE,mBAAmB,EAE7D,EAAO,aAAa,EAAQ,QAAQ,MAAiB;AAAA,EAGzD,GAFA,GAAQ,EAAQ,QAEZ,EAAQ,QAAU,EAAQ,OAAO,OAAS,EAC5C,GAAQ;AAAA,WAAc,EAAQ,OAAO,KAAK,IAAI,KAGhD,OAAO,EACR,EACA,KAAK;AAAA;AAAA,CAAM,EAER,EAAO,GAAU,kCAAmC,CAAkB,EAE5E,MAAO,CACL,KAAM,CAAE,WAAU,EAClB,OAAQ,CAAE,iBAAkB,CAAK,EACjC,MACF,EACA,MAAO,EAAO,CAEd,OADA,GAAO,MAAM,CAAE,OAAM,EAAG,mCAAmC,EACpD,CACL,KAAM,CAAE,UAAW,CAAC,CAAE,EACtB,OAAQ,CAAE,iBAAkB,EAAG,EAC/B,KAAM,EACR,GAGN,ECpFA,iBAKE,gBACA,uBAmBK,IAAM,EAAmC,CAC9C,KAAM,mBACN,YAAa,kDACb,SAAU,GAEV,IAAK,MAAO,EAAwB,EAAiB,IAAkB,CACrE,GAAI,CACF,IAAM,EAAgB,EAAQ,WAAW,QAAQ,EACjD,GAAI,CAAC,EACH,MAAO,CACL,KAAM,CAAE,SAAU,CAAC,CAAE,EACrB,OAAQ,CAAE,iBAAkB,EAAG,EAC/B,KAAM,EACR,EAGF,IAAQ,YAAa,EAGrB,GAAI,IAAa,EAAQ,QACvB,MAAO,CACL,KAAM,CAAE,SAAU,CAAC,CAAE,EACrB,OAAQ,CAAE,iBAAkB,EAAG,EAC/B,KAAM,EACR,EAIF,IAAM,EAAW,MAAM,EAAc,oBAAoB,EAAU,OAAW,EAAE,EAEhF,GAAI,EAAS,SAAW,EACtB,MAAO,CACL,KAAM,CAAE,SAAU,CAAC,CAAE,EACrB,OAAQ,CAAE,iBAAkB,EAAG,EAC/B,KAAM,EACR,EAIF,IAAM,EAAoB,MAAM,EAAc,6BAA6B,CAAQ,EAE7E,EAAO,GAAU,0BAA2B,CAAiB,EAG7D,EAAiB,IAAI,IAC3B,QAAW,KAAU,EAAU,CAC7B,IAAM,EAAQ,EAAe,IAAI,EAAO,QAAQ,GAAK,EACrD,EAAe,IAAI,EAAO,SAAU,EAAQ,CAAC,EAG/C,IAAM,EAAe,MAAM,KAAK,EAAe,QAAQ,CAAC,EACrD,IAAI,EAAE,EAAK,KAAW,GAAG,MAAQ,GAAO,EACxC,KAAK,IAAI,EAEZ,MAAO,CACL,KAAM,CACJ,WACA,eAAgB,OAAO,YAAY,CAAc,CACnD,EACA,OAAQ,CACN,iBAAkB,EAClB,iBAAkB,CACpB,EACA,MACF,EACA,MAAO,EAAO,CAEd,OADA,GAAO,MAAM,CAAE,OAAM,EAAG,kCAAkC,EACnD,CACL,KAAM,CAAE,SAAU,CAAC,CAAE,EACrB,OAAQ,CAAE,iBAAkB,EAAG,EAC/B,KAAM,EACR,GAGN,ECtDO,IAAM,GAAuB,CAClC,KAAM,SACN,YACE,6FAEF,SAAU,CAAC,CAAa,EAExB,WAAY,CAAC,EAAwB,CAA2B,EAEhE,UAAW,CAAC,EAAwB,CAAuB,EAK3D,QACF,EAEe",
|
|
17
|
+
"debugId": "30473D3367DF880764756E2164756E21",
|
|
18
|
+
"names": []
|
|
19
|
+
}
|