webagents 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- webagents/__init__.py +18 -0
- webagents/__main__.py +55 -0
- webagents/agents/__init__.py +13 -0
- webagents/agents/core/__init__.py +19 -0
- webagents/agents/core/base_agent.py +1834 -0
- webagents/agents/core/handoffs.py +293 -0
- webagents/agents/handoffs/__init__.py +0 -0
- webagents/agents/interfaces/__init__.py +0 -0
- webagents/agents/lifecycle/__init__.py +0 -0
- webagents/agents/skills/__init__.py +109 -0
- webagents/agents/skills/base.py +136 -0
- webagents/agents/skills/core/__init__.py +8 -0
- webagents/agents/skills/core/guardrails/__init__.py +0 -0
- webagents/agents/skills/core/llm/__init__.py +0 -0
- webagents/agents/skills/core/llm/anthropic/__init__.py +1 -0
- webagents/agents/skills/core/llm/litellm/__init__.py +10 -0
- webagents/agents/skills/core/llm/litellm/skill.py +538 -0
- webagents/agents/skills/core/llm/openai/__init__.py +1 -0
- webagents/agents/skills/core/llm/xai/__init__.py +1 -0
- webagents/agents/skills/core/mcp/README.md +375 -0
- webagents/agents/skills/core/mcp/__init__.py +15 -0
- webagents/agents/skills/core/mcp/skill.py +731 -0
- webagents/agents/skills/core/memory/__init__.py +11 -0
- webagents/agents/skills/core/memory/long_term_memory/__init__.py +10 -0
- webagents/agents/skills/core/memory/long_term_memory/memory_skill.py +639 -0
- webagents/agents/skills/core/memory/short_term_memory/__init__.py +9 -0
- webagents/agents/skills/core/memory/short_term_memory/skill.py +341 -0
- webagents/agents/skills/core/memory/vector_memory/skill.py +447 -0
- webagents/agents/skills/core/planning/__init__.py +9 -0
- webagents/agents/skills/core/planning/planner.py +343 -0
- webagents/agents/skills/ecosystem/__init__.py +0 -0
- webagents/agents/skills/ecosystem/crewai/__init__.py +1 -0
- webagents/agents/skills/ecosystem/database/__init__.py +1 -0
- webagents/agents/skills/ecosystem/filesystem/__init__.py +0 -0
- webagents/agents/skills/ecosystem/google/__init__.py +0 -0
- webagents/agents/skills/ecosystem/google/calendar/__init__.py +6 -0
- webagents/agents/skills/ecosystem/google/calendar/skill.py +306 -0
- webagents/agents/skills/ecosystem/n8n/__init__.py +0 -0
- webagents/agents/skills/ecosystem/openai_agents/__init__.py +0 -0
- webagents/agents/skills/ecosystem/web/__init__.py +0 -0
- webagents/agents/skills/ecosystem/zapier/__init__.py +0 -0
- webagents/agents/skills/robutler/__init__.py +11 -0
- webagents/agents/skills/robutler/auth/README.md +63 -0
- webagents/agents/skills/robutler/auth/__init__.py +17 -0
- webagents/agents/skills/robutler/auth/skill.py +354 -0
- webagents/agents/skills/robutler/crm/__init__.py +18 -0
- webagents/agents/skills/robutler/crm/skill.py +368 -0
- webagents/agents/skills/robutler/discovery/README.md +281 -0
- webagents/agents/skills/robutler/discovery/__init__.py +16 -0
- webagents/agents/skills/robutler/discovery/skill.py +230 -0
- webagents/agents/skills/robutler/kv/__init__.py +6 -0
- webagents/agents/skills/robutler/kv/skill.py +80 -0
- webagents/agents/skills/robutler/message_history/__init__.py +9 -0
- webagents/agents/skills/robutler/message_history/skill.py +270 -0
- webagents/agents/skills/robutler/messages/__init__.py +0 -0
- webagents/agents/skills/robutler/nli/__init__.py +13 -0
- webagents/agents/skills/robutler/nli/skill.py +687 -0
- webagents/agents/skills/robutler/notifications/__init__.py +5 -0
- webagents/agents/skills/robutler/notifications/skill.py +141 -0
- webagents/agents/skills/robutler/payments/__init__.py +41 -0
- webagents/agents/skills/robutler/payments/exceptions.py +255 -0
- webagents/agents/skills/robutler/payments/skill.py +610 -0
- webagents/agents/skills/robutler/storage/__init__.py +10 -0
- webagents/agents/skills/robutler/storage/files/__init__.py +9 -0
- webagents/agents/skills/robutler/storage/files/skill.py +445 -0
- webagents/agents/skills/robutler/storage/json/__init__.py +9 -0
- webagents/agents/skills/robutler/storage/json/skill.py +336 -0
- webagents/agents/skills/robutler/storage/kv/skill.py +88 -0
- webagents/agents/skills/robutler/storage.py +389 -0
- webagents/agents/tools/__init__.py +0 -0
- webagents/agents/tools/decorators.py +426 -0
- webagents/agents/tracing/__init__.py +0 -0
- webagents/agents/workflows/__init__.py +0 -0
- webagents/scripts/__init__.py +0 -0
- webagents/server/__init__.py +28 -0
- webagents/server/context/__init__.py +0 -0
- webagents/server/context/context_vars.py +121 -0
- webagents/server/core/__init__.py +0 -0
- webagents/server/core/app.py +843 -0
- webagents/server/core/middleware.py +69 -0
- webagents/server/core/models.py +98 -0
- webagents/server/core/monitoring.py +59 -0
- webagents/server/endpoints/__init__.py +0 -0
- webagents/server/interfaces/__init__.py +0 -0
- webagents/server/middleware.py +330 -0
- webagents/server/models.py +92 -0
- webagents/server/monitoring.py +659 -0
- webagents/utils/__init__.py +0 -0
- webagents/utils/logging.py +359 -0
- webagents-0.1.0.dist-info/METADATA +230 -0
- webagents-0.1.0.dist-info/RECORD +94 -0
- webagents-0.1.0.dist-info/WHEEL +4 -0
- webagents-0.1.0.dist-info/entry_points.txt +2 -0
- webagents-0.1.0.dist-info/licenses/LICENSE +20 -0
@@ -0,0 +1,11 @@
|
|
1
|
+
"""
|
2
|
+
Core Memory Skills Package
|
3
|
+
|
4
|
+
Provides essential memory capabilities for agents including short-term
|
5
|
+
and long-term memory management.
|
6
|
+
"""
|
7
|
+
|
8
|
+
from .short_term_memory import ShortTermMemorySkill
|
9
|
+
from .long_term_memory import LongTermMemorySkill, MemoryItem
|
10
|
+
|
11
|
+
__all__ = ["ShortTermMemorySkill", "LongTermMemorySkill", "MemoryItem"]
|
@@ -0,0 +1,10 @@
|
|
1
|
+
"""
|
2
|
+
Long Term Memory Skills Package
|
3
|
+
|
4
|
+
Provides intelligent long-term memory management and context preservation
|
5
|
+
across conversations and sessions.
|
6
|
+
"""
|
7
|
+
|
8
|
+
from .memory_skill import LongTermMemorySkill, MemoryItem
|
9
|
+
|
10
|
+
__all__ = ['LongTermMemorySkill', 'MemoryItem']
|
@@ -0,0 +1,639 @@
|
|
1
|
+
"""
|
2
|
+
LongTermMemorySkill - Persistent Memory Management
|
3
|
+
|
4
|
+
Automatically extracts and stores key facts, preferences, and context
|
5
|
+
from conversations for future reference using WebAgents portal storage
|
6
|
+
via dependencies.
|
7
|
+
"""
|
8
|
+
|
9
|
+
import json
|
10
|
+
import uuid
|
11
|
+
import os
|
12
|
+
from typing import Dict, List, Any, Optional
|
13
|
+
from datetime import datetime
|
14
|
+
from dataclasses import dataclass, asdict
|
15
|
+
|
16
|
+
from ....base import Skill
|
17
|
+
from .....tools.decorators import tool, prompt
|
18
|
+
|
19
|
+
|
20
|
+
@dataclass
|
21
|
+
class MemoryItem:
|
22
|
+
"""Represents a single memory item"""
|
23
|
+
id: str
|
24
|
+
content: str # The actual memory content
|
25
|
+
category: str # Type of memory (preference, fact, context, etc.)
|
26
|
+
importance: int # 1-10 importance score
|
27
|
+
source: str # Where this memory came from
|
28
|
+
tags: List[str] # Keywords for searching
|
29
|
+
created_at: str # ISO timestamp
|
30
|
+
last_accessed: Optional[str] = None # When last used
|
31
|
+
access_count: int = 0 # How often accessed
|
32
|
+
|
33
|
+
def __post_init__(self):
|
34
|
+
if self.created_at is None:
|
35
|
+
self.created_at = datetime.utcnow().isoformat()
|
36
|
+
|
37
|
+
|
38
|
+
class LongTermMemorySkill(Skill):
|
39
|
+
"""
|
40
|
+
Long-term memory management skill with webagents portal integration via dependencies.
|
41
|
+
|
42
|
+
Features:
|
43
|
+
- Automatic memory extraction from conversations
|
44
|
+
- Categorized memory storage (preferences, facts, context)
|
45
|
+
- Importance scoring and prioritization
|
46
|
+
- Searchable memory retrieval
|
47
|
+
- Memory cleanup and maintenance
|
48
|
+
- Integration with webagents portal storage via dependencies
|
49
|
+
"""
|
50
|
+
|
51
|
+
def __init__(self, config: Optional[Dict[str, Any]] = None):
|
52
|
+
super().__init__(config)
|
53
|
+
self.max_memories = config.get('max_memories', 100) if config else 100
|
54
|
+
self.auto_extract = config.get('auto_extract', True) if config else True
|
55
|
+
self.use_webagents_storage = config.get('use_webagents_storage', True) if config else True
|
56
|
+
self.fallback_file = config.get('fallback_file', '/tmp/agent_memory.json') if config else '/tmp/agent_memory.json'
|
57
|
+
self.memories: Dict[str, MemoryItem] = {}
|
58
|
+
self.agent_name = config.get('agent_name', 'default_agent') if config else 'default_agent'
|
59
|
+
|
60
|
+
# Dependencies
|
61
|
+
self.dependencies = config.get('dependencies', {}) if config else {}
|
62
|
+
self.storage = None
|
63
|
+
|
64
|
+
async def initialize(self, agent_reference):
|
65
|
+
"""Initialize with agent reference and dependencies"""
|
66
|
+
await super().initialize(agent_reference)
|
67
|
+
self.agent = agent_reference
|
68
|
+
|
69
|
+
# Extract agent name from agent reference if available
|
70
|
+
if hasattr(agent_reference, 'name'):
|
71
|
+
self.agent_name = agent_reference.name
|
72
|
+
|
73
|
+
# Get JSON storage from dependencies
|
74
|
+
if 'webagents.json_storage' in self.dependencies:
|
75
|
+
self.storage = self.dependencies['webagents.json_storage']
|
76
|
+
elif hasattr(agent_reference, 'skills') and 'json_storage' in agent_reference.skills:
|
77
|
+
# Fallback to agent skills
|
78
|
+
self.storage = agent_reference.skills['json_storage']
|
79
|
+
elif hasattr(agent_reference, 'skills') and 'storage' in agent_reference.skills:
|
80
|
+
# Legacy fallback for old storage skill
|
81
|
+
self.storage = agent_reference.skills['storage']
|
82
|
+
|
83
|
+
# Load existing memories
|
84
|
+
await self._load_memories()
|
85
|
+
|
86
|
+
@prompt(priority=15, scope="all")
|
87
|
+
def memory_extraction_guidance(self, context) -> str:
|
88
|
+
"""Guide the LLM on when and how to extract memories"""
|
89
|
+
return """
|
90
|
+
LONG-TERM MEMORY EXTRACTION GUIDANCE:
|
91
|
+
|
92
|
+
Automatically extract and store important information from conversations for future reference:
|
93
|
+
|
94
|
+
WHAT TO MEMORIZE:
|
95
|
+
1. **User Preferences** - Coding style, frameworks, tools they prefer
|
96
|
+
2. **Project Context** - Current projects, technologies being used
|
97
|
+
3. **Key Facts** - Important decisions, requirements, constraints
|
98
|
+
4. **Work Patterns** - How they like to organize code, test preferences
|
99
|
+
5. **Domain Knowledge** - Specific business rules, technical details
|
100
|
+
6. **Recurring Themes** - Common problems, frequent requests
|
101
|
+
|
102
|
+
WHEN TO EXTRACT MEMORIES:
|
103
|
+
- User mentions preferences ("I prefer pytest over unittest")
|
104
|
+
- Important project decisions are made
|
105
|
+
- Technical requirements are established
|
106
|
+
- User provides context about their workflow
|
107
|
+
- Key facts emerge that would be useful later
|
108
|
+
|
109
|
+
HOW TO EXTRACT:
|
110
|
+
Use extract_key_memories() when you notice:
|
111
|
+
- Statements about preferences or requirements
|
112
|
+
- Important project context or decisions
|
113
|
+
- Technical specifications or constraints
|
114
|
+
- Workflow patterns or methodologies
|
115
|
+
- Domain-specific knowledge
|
116
|
+
|
117
|
+
MEMORY CATEGORIES:
|
118
|
+
- "preference" - User likes/dislikes, preferred tools/methods
|
119
|
+
- "project" - Current work, technologies, requirements
|
120
|
+
- "fact" - Important decisions, specifications, constraints
|
121
|
+
- "workflow" - How user organizes work, testing approaches
|
122
|
+
- "domain" - Business rules, technical knowledge, context
|
123
|
+
|
124
|
+
EXAMPLE TRIGGERS:
|
125
|
+
- "I always use pytest for testing" → extract_key_memories()
|
126
|
+
- "This project uses React and TypeScript" → extract_key_memories()
|
127
|
+
- "We need to support Python 3.8+" → extract_key_memories()
|
128
|
+
- "I organize tests in a dedicated tests/ folder" → extract_key_memories()
|
129
|
+
"""
|
130
|
+
|
131
|
+
@tool
|
132
|
+
async def extract_key_memories(
|
133
|
+
self,
|
134
|
+
conversation_context: str,
|
135
|
+
focus_area: Optional[str] = None
|
136
|
+
) -> str:
|
137
|
+
"""
|
138
|
+
Extract key memories from conversation context.
|
139
|
+
|
140
|
+
Args:
|
141
|
+
conversation_context: Recent conversation or context to analyze
|
142
|
+
focus_area: Optional area to focus on (preferences, project, workflow)
|
143
|
+
|
144
|
+
Returns:
|
145
|
+
JSON string with extracted memories
|
146
|
+
"""
|
147
|
+
try:
|
148
|
+
# Analyze the conversation context and extract key information
|
149
|
+
extracted_memories = self._analyze_and_extract(conversation_context, focus_area)
|
150
|
+
|
151
|
+
saved_memories = []
|
152
|
+
for memory_data in extracted_memories:
|
153
|
+
memory_id = await self._save_memory_item(
|
154
|
+
content=memory_data['content'],
|
155
|
+
category=memory_data['category'],
|
156
|
+
importance=memory_data['importance'],
|
157
|
+
source="conversation_extraction",
|
158
|
+
tags=memory_data.get('tags', [])
|
159
|
+
)
|
160
|
+
saved_memories.append({
|
161
|
+
"id": memory_id,
|
162
|
+
"content": memory_data['content'],
|
163
|
+
"category": memory_data['category']
|
164
|
+
})
|
165
|
+
|
166
|
+
# Save to persistent storage
|
167
|
+
await self._save_memories()
|
168
|
+
|
169
|
+
return json.dumps({
|
170
|
+
"extracted_count": len(saved_memories),
|
171
|
+
"memories": saved_memories,
|
172
|
+
"status": "success"
|
173
|
+
}, indent=2)
|
174
|
+
|
175
|
+
except Exception as e:
|
176
|
+
return json.dumps({
|
177
|
+
"error": f"Failed to extract memories: {str(e)}",
|
178
|
+
"extracted_count": 0
|
179
|
+
})
|
180
|
+
|
181
|
+
@tool
|
182
|
+
async def save_memory(
|
183
|
+
self,
|
184
|
+
content: str,
|
185
|
+
category: str = "fact",
|
186
|
+
importance: int = 5,
|
187
|
+
tags: Optional[List[str]] = None
|
188
|
+
) -> str:
|
189
|
+
"""
|
190
|
+
Manually save a specific memory.
|
191
|
+
|
192
|
+
Args:
|
193
|
+
content: The memory content to store
|
194
|
+
category: Memory category (preference, project, fact, workflow, domain)
|
195
|
+
importance: Importance score 1-10
|
196
|
+
tags: Optional tags for searching
|
197
|
+
|
198
|
+
Returns:
|
199
|
+
JSON string with memory details
|
200
|
+
"""
|
201
|
+
try:
|
202
|
+
memory_id = await self._save_memory_item(
|
203
|
+
content=content,
|
204
|
+
category=category,
|
205
|
+
importance=importance,
|
206
|
+
source="manual_entry",
|
207
|
+
tags=tags or []
|
208
|
+
)
|
209
|
+
|
210
|
+
# Save to persistent storage
|
211
|
+
await self._save_memories()
|
212
|
+
|
213
|
+
return json.dumps({
|
214
|
+
"memory_id": memory_id,
|
215
|
+
"content": content,
|
216
|
+
"category": category,
|
217
|
+
"importance": importance,
|
218
|
+
"status": "saved"
|
219
|
+
}, indent=2)
|
220
|
+
|
221
|
+
except Exception as e:
|
222
|
+
return json.dumps({
|
223
|
+
"error": f"Failed to save memory: {str(e)}",
|
224
|
+
"content": content
|
225
|
+
})
|
226
|
+
|
227
|
+
@tool
|
228
|
+
async def list_memories(
|
229
|
+
self,
|
230
|
+
category: Optional[str] = None,
|
231
|
+
min_importance: int = 1,
|
232
|
+
search_tags: Optional[List[str]] = None
|
233
|
+
) -> str:
|
234
|
+
"""
|
235
|
+
List stored memories with optional filtering.
|
236
|
+
|
237
|
+
Args:
|
238
|
+
category: Filter by category
|
239
|
+
min_importance: Minimum importance score
|
240
|
+
search_tags: Filter by tags
|
241
|
+
|
242
|
+
Returns:
|
243
|
+
JSON string with memory list
|
244
|
+
"""
|
245
|
+
try:
|
246
|
+
filtered_memories = []
|
247
|
+
|
248
|
+
for memory in self.memories.values():
|
249
|
+
# Apply filters
|
250
|
+
if category and memory.category != category:
|
251
|
+
continue
|
252
|
+
if memory.importance < min_importance:
|
253
|
+
continue
|
254
|
+
if search_tags and not any(tag in memory.tags for tag in search_tags):
|
255
|
+
continue
|
256
|
+
|
257
|
+
filtered_memories.append({
|
258
|
+
"id": memory.id,
|
259
|
+
"content": memory.content,
|
260
|
+
"category": memory.category,
|
261
|
+
"importance": memory.importance,
|
262
|
+
"tags": memory.tags,
|
263
|
+
"created_at": memory.created_at,
|
264
|
+
"access_count": memory.access_count
|
265
|
+
})
|
266
|
+
|
267
|
+
# Sort by importance and access count
|
268
|
+
filtered_memories.sort(key=lambda x: (x['importance'], x['access_count']), reverse=True)
|
269
|
+
|
270
|
+
storage_location = "webagents_json_storage" if self.storage else "local_file"
|
271
|
+
|
272
|
+
return json.dumps({
|
273
|
+
"total_memories": len(self.memories),
|
274
|
+
"filtered_count": len(filtered_memories),
|
275
|
+
"memories": filtered_memories,
|
276
|
+
"categories": list(set(m.category for m in self.memories.values())),
|
277
|
+
"storage_location": storage_location
|
278
|
+
}, indent=2)
|
279
|
+
|
280
|
+
except Exception as e:
|
281
|
+
return json.dumps({
|
282
|
+
"error": f"Failed to list memories: {str(e)}",
|
283
|
+
"total_memories": len(self.memories)
|
284
|
+
})
|
285
|
+
|
286
|
+
@tool
|
287
|
+
async def search_memories(self, query: str, max_results: int = 10) -> str:
|
288
|
+
"""
|
289
|
+
Search memories by content, tags, or category.
|
290
|
+
|
291
|
+
Args:
|
292
|
+
query: Search query
|
293
|
+
max_results: Maximum number of results
|
294
|
+
|
295
|
+
Returns:
|
296
|
+
JSON string with search results
|
297
|
+
"""
|
298
|
+
try:
|
299
|
+
query_lower = query.lower()
|
300
|
+
matches = []
|
301
|
+
|
302
|
+
for memory in self.memories.values():
|
303
|
+
score = 0
|
304
|
+
|
305
|
+
# Content matching
|
306
|
+
if query_lower in memory.content.lower():
|
307
|
+
score += 3
|
308
|
+
|
309
|
+
# Tag matching
|
310
|
+
if any(query_lower in tag.lower() for tag in memory.tags):
|
311
|
+
score += 2
|
312
|
+
|
313
|
+
# Category matching
|
314
|
+
if query_lower in memory.category.lower():
|
315
|
+
score += 1
|
316
|
+
|
317
|
+
if score > 0:
|
318
|
+
# Update access tracking
|
319
|
+
memory.last_accessed = datetime.utcnow().isoformat()
|
320
|
+
memory.access_count += 1
|
321
|
+
|
322
|
+
matches.append({
|
323
|
+
"id": memory.id,
|
324
|
+
"content": memory.content,
|
325
|
+
"category": memory.category,
|
326
|
+
"importance": memory.importance,
|
327
|
+
"tags": memory.tags,
|
328
|
+
"score": score
|
329
|
+
})
|
330
|
+
|
331
|
+
# Sort by score and importance
|
332
|
+
matches.sort(key=lambda x: (x['score'], x['importance']), reverse=True)
|
333
|
+
matches = matches[:max_results]
|
334
|
+
|
335
|
+
# Save updated access counts
|
336
|
+
await self._save_memories()
|
337
|
+
|
338
|
+
return json.dumps({
|
339
|
+
"query": query,
|
340
|
+
"total_matches": len(matches),
|
341
|
+
"memories": matches
|
342
|
+
}, indent=2)
|
343
|
+
|
344
|
+
except Exception as e:
|
345
|
+
return json.dumps({
|
346
|
+
"error": f"Failed to search memories: {str(e)}",
|
347
|
+
"query": query
|
348
|
+
})
|
349
|
+
|
350
|
+
@tool
|
351
|
+
async def delete_memory(self, memory_id: str) -> str:
|
352
|
+
"""
|
353
|
+
Delete a specific memory.
|
354
|
+
|
355
|
+
Args:
|
356
|
+
memory_id: ID of memory to delete
|
357
|
+
|
358
|
+
Returns:
|
359
|
+
JSON string with deletion result
|
360
|
+
"""
|
361
|
+
try:
|
362
|
+
if memory_id not in self.memories:
|
363
|
+
return json.dumps({
|
364
|
+
"error": f"Memory {memory_id} not found",
|
365
|
+
"memory_id": memory_id
|
366
|
+
})
|
367
|
+
|
368
|
+
memory = self.memories[memory_id]
|
369
|
+
del self.memories[memory_id]
|
370
|
+
|
371
|
+
# Save to persistent storage
|
372
|
+
await self._save_memories()
|
373
|
+
|
374
|
+
return json.dumps({
|
375
|
+
"memory_id": memory_id,
|
376
|
+
"deleted_content": memory.content,
|
377
|
+
"status": "deleted"
|
378
|
+
})
|
379
|
+
|
380
|
+
except Exception as e:
|
381
|
+
return json.dumps({
|
382
|
+
"error": f"Failed to delete memory: {str(e)}",
|
383
|
+
"memory_id": memory_id
|
384
|
+
})
|
385
|
+
|
386
|
+
@tool
|
387
|
+
async def get_memory_stats(self) -> str:
|
388
|
+
"""
|
389
|
+
Get statistics about stored memories.
|
390
|
+
|
391
|
+
Returns:
|
392
|
+
JSON string with memory statistics
|
393
|
+
"""
|
394
|
+
try:
|
395
|
+
storage_location = "webagents_json_storage" if self.storage else "local_file"
|
396
|
+
|
397
|
+
if not self.memories:
|
398
|
+
return json.dumps({
|
399
|
+
"total_memories": 0,
|
400
|
+
"categories": {},
|
401
|
+
"importance_distribution": {},
|
402
|
+
"storage_location": storage_location,
|
403
|
+
"agent_name": self.agent_name
|
404
|
+
})
|
405
|
+
|
406
|
+
# Category breakdown
|
407
|
+
categories = {}
|
408
|
+
importance_dist = {}
|
409
|
+
|
410
|
+
for memory in self.memories.values():
|
411
|
+
categories[memory.category] = categories.get(memory.category, 0) + 1
|
412
|
+
importance_dist[memory.importance] = importance_dist.get(memory.importance, 0) + 1
|
413
|
+
|
414
|
+
# Most accessed memories
|
415
|
+
top_memories = sorted(
|
416
|
+
self.memories.values(),
|
417
|
+
key=lambda x: x.access_count,
|
418
|
+
reverse=True
|
419
|
+
)[:5]
|
420
|
+
|
421
|
+
return json.dumps({
|
422
|
+
"total_memories": len(self.memories),
|
423
|
+
"categories": categories,
|
424
|
+
"importance_distribution": importance_dist,
|
425
|
+
"most_accessed": [
|
426
|
+
{
|
427
|
+
"content": m.content[:100] + "..." if len(m.content) > 100 else m.content,
|
428
|
+
"category": m.category,
|
429
|
+
"access_count": m.access_count
|
430
|
+
}
|
431
|
+
for m in top_memories
|
432
|
+
],
|
433
|
+
"storage_location": storage_location,
|
434
|
+
"agent_name": self.agent_name,
|
435
|
+
"max_memories": self.max_memories
|
436
|
+
}, indent=2)
|
437
|
+
|
438
|
+
except Exception as e:
|
439
|
+
return json.dumps({
|
440
|
+
"error": f"Failed to get memory stats: {str(e)}",
|
441
|
+
"total_memories": len(self.memories)
|
442
|
+
})
|
443
|
+
|
444
|
+
def _analyze_and_extract(self, context: str, focus_area: Optional[str] = None) -> List[Dict[str, Any]]:
|
445
|
+
"""Analyze conversation context and extract key memories"""
|
446
|
+
memories = []
|
447
|
+
|
448
|
+
# Simple extraction patterns for now (could be enhanced with LLM)
|
449
|
+
context_lower = context.lower()
|
450
|
+
|
451
|
+
# Extract preferences
|
452
|
+
if "prefer" in context_lower or "like to" in context_lower or "always use" in context_lower:
|
453
|
+
memories.append({
|
454
|
+
"content": self._extract_preference_from_context(context),
|
455
|
+
"category": "preference",
|
456
|
+
"importance": 7,
|
457
|
+
"tags": ["preference", "workflow"]
|
458
|
+
})
|
459
|
+
|
460
|
+
# Extract project context
|
461
|
+
if any(word in context_lower for word in ["project", "using", "building", "working on"]):
|
462
|
+
memories.append({
|
463
|
+
"content": self._extract_project_context(context),
|
464
|
+
"category": "project",
|
465
|
+
"importance": 6,
|
466
|
+
"tags": ["project", "technology"]
|
467
|
+
})
|
468
|
+
|
469
|
+
# Extract technical requirements
|
470
|
+
if any(word in context_lower for word in ["requirement", "must", "need to", "should"]):
|
471
|
+
memories.append({
|
472
|
+
"content": self._extract_requirements(context),
|
473
|
+
"category": "fact",
|
474
|
+
"importance": 8,
|
475
|
+
"tags": ["requirement", "constraint"]
|
476
|
+
})
|
477
|
+
|
478
|
+
return [m for m in memories if m["content"]] # Filter out empty content
|
479
|
+
|
480
|
+
def _extract_preference_from_context(self, context: str) -> str:
|
481
|
+
"""Extract preference statements from context"""
|
482
|
+
sentences = context.split('.')
|
483
|
+
for sentence in sentences:
|
484
|
+
if any(word in sentence.lower() for word in ["prefer", "like to", "always use", "usually"]):
|
485
|
+
return sentence.strip()
|
486
|
+
return ""
|
487
|
+
|
488
|
+
def _extract_project_context(self, context: str) -> str:
|
489
|
+
"""Extract project-related information"""
|
490
|
+
sentences = context.split('.')
|
491
|
+
for sentence in sentences:
|
492
|
+
if any(word in sentence.lower() for word in ["project", "using", "building", "working on"]):
|
493
|
+
return sentence.strip()
|
494
|
+
return ""
|
495
|
+
|
496
|
+
def _extract_requirements(self, context: str) -> str:
|
497
|
+
"""Extract requirement statements"""
|
498
|
+
sentences = context.split('.')
|
499
|
+
for sentence in sentences:
|
500
|
+
if any(word in sentence.lower() for word in ["requirement", "must", "need to", "should"]):
|
501
|
+
return sentence.strip()
|
502
|
+
return ""
|
503
|
+
|
504
|
+
async def _save_memory_item(
|
505
|
+
self,
|
506
|
+
content: str,
|
507
|
+
category: str,
|
508
|
+
importance: int,
|
509
|
+
source: str,
|
510
|
+
tags: List[str]
|
511
|
+
) -> str:
|
512
|
+
"""Save a memory item and return its ID"""
|
513
|
+
memory_id = str(uuid.uuid4())
|
514
|
+
|
515
|
+
memory = MemoryItem(
|
516
|
+
id=memory_id,
|
517
|
+
content=content,
|
518
|
+
category=category,
|
519
|
+
importance=max(1, min(10, importance)), # Clamp to 1-10
|
520
|
+
source=source,
|
521
|
+
tags=tags,
|
522
|
+
created_at=datetime.utcnow().isoformat()
|
523
|
+
)
|
524
|
+
|
525
|
+
self.memories[memory_id] = memory
|
526
|
+
|
527
|
+
# Clean up if we exceed max memories
|
528
|
+
if len(self.memories) > self.max_memories:
|
529
|
+
self._cleanup_old_memories()
|
530
|
+
|
531
|
+
return memory_id
|
532
|
+
|
533
|
+
def _cleanup_old_memories(self):
|
534
|
+
"""Remove least important/accessed memories when limit is exceeded"""
|
535
|
+
# Sort by importance and access count (ascending)
|
536
|
+
sorted_memories = sorted(
|
537
|
+
self.memories.items(),
|
538
|
+
key=lambda x: (x[1].importance, x[1].access_count)
|
539
|
+
)
|
540
|
+
|
541
|
+
# Remove the least important ones
|
542
|
+
to_remove = len(self.memories) - self.max_memories + 10 # Remove a few extra
|
543
|
+
for i in range(to_remove):
|
544
|
+
if i < len(sorted_memories):
|
545
|
+
memory_id = sorted_memories[i][0]
|
546
|
+
del self.memories[memory_id]
|
547
|
+
|
548
|
+
async def _load_memories(self):
|
549
|
+
"""Load memories from storage"""
|
550
|
+
try:
|
551
|
+
if self.use_webagents_storage and self.storage:
|
552
|
+
# Try to load from webagents JSON storage
|
553
|
+
result = await self.storage.retrieve_json_data(f"{self.agent_name}_memory.json")
|
554
|
+
result_data = json.loads(result)
|
555
|
+
|
556
|
+
if result_data.get("success") and "data" in result_data:
|
557
|
+
data = result_data["data"]
|
558
|
+
for memory_data in data.get('memories', []):
|
559
|
+
memory = MemoryItem(**memory_data)
|
560
|
+
self.memories[memory.id] = memory
|
561
|
+
return
|
562
|
+
|
563
|
+
# Fallback to local file
|
564
|
+
if os.path.exists(self.fallback_file):
|
565
|
+
with open(self.fallback_file, 'r') as f:
|
566
|
+
data = json.load(f)
|
567
|
+
for memory_data in data.get('memories', []):
|
568
|
+
memory = MemoryItem(**memory_data)
|
569
|
+
self.memories[memory.id] = memory
|
570
|
+
except Exception as e:
|
571
|
+
# If loading fails, start with empty memories
|
572
|
+
self.memories = {}
|
573
|
+
|
574
|
+
async def _save_memories(self):
|
575
|
+
"""Save memories to storage"""
|
576
|
+
try:
|
577
|
+
data = {
|
578
|
+
"memories": [asdict(memory) for memory in self.memories.values()],
|
579
|
+
"metadata": {
|
580
|
+
"total_count": len(self.memories),
|
581
|
+
"last_updated": datetime.utcnow().isoformat(),
|
582
|
+
"max_memories": self.max_memories,
|
583
|
+
"agent_name": self.agent_name
|
584
|
+
}
|
585
|
+
}
|
586
|
+
|
587
|
+
if self.use_webagents_storage and self.storage:
|
588
|
+
# Try to save to webagents JSON storage
|
589
|
+
result = await self.storage.store_json_data(
|
590
|
+
f"{self.agent_name}_memory.json",
|
591
|
+
data,
|
592
|
+
f"Long-term memory storage for {self.agent_name}"
|
593
|
+
)
|
594
|
+
result_data = json.loads(result)
|
595
|
+
if result_data.get("success"):
|
596
|
+
return
|
597
|
+
|
598
|
+
# Fallback to local file
|
599
|
+
os.makedirs(os.path.dirname(self.fallback_file), exist_ok=True)
|
600
|
+
with open(self.fallback_file, 'w') as f:
|
601
|
+
json.dump(data, f, indent=2)
|
602
|
+
|
603
|
+
except Exception as e:
|
604
|
+
# Log error but don't fail the operation
|
605
|
+
pass
|
606
|
+
|
607
|
+
def get_skill_info(self) -> Dict[str, Any]:
|
608
|
+
"""Get comprehensive skill information"""
|
609
|
+
return {
|
610
|
+
"name": "LongTermMemorySkill",
|
611
|
+
"description": "Persistent long-term memory with webagents JSON storage via dependencies",
|
612
|
+
"version": "3.0.0",
|
613
|
+
"capabilities": [
|
614
|
+
"Automatic memory extraction from conversations",
|
615
|
+
"Categorized memory storage (preferences, facts, etc.)",
|
616
|
+
"Searchable memory retrieval",
|
617
|
+
"Importance-based prioritization",
|
618
|
+
"Memory cleanup and maintenance",
|
619
|
+
"WebAgents JSON storage integration via dependencies"
|
620
|
+
],
|
621
|
+
"tools": [
|
622
|
+
"extract_key_memories",
|
623
|
+
"save_memory",
|
624
|
+
"list_memories",
|
625
|
+
"search_memories",
|
626
|
+
"delete_memory",
|
627
|
+
"get_memory_stats"
|
628
|
+
],
|
629
|
+
"total_memories": len(self.memories),
|
630
|
+
"categories": list(set(m.category for m in self.memories.values())) if self.memories else [],
|
631
|
+
"config": {
|
632
|
+
"use_webagents_storage": self.use_webagents_storage,
|
633
|
+
"agent_name": self.agent_name,
|
634
|
+
"max_memories": self.max_memories,
|
635
|
+
"auto_extract": self.auto_extract,
|
636
|
+
"storage_available": self.storage is not None,
|
637
|
+
"dependencies": list(self.dependencies.keys())
|
638
|
+
}
|
639
|
+
}
|