claude-mpm 3.3.0__py3-none-any.whl → 3.4.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- claude_mpm/agents/templates/data_engineer.json +1 -1
- claude_mpm/agents/templates/documentation.json +1 -1
- claude_mpm/agents/templates/engineer.json +1 -1
- claude_mpm/agents/templates/ops.json +1 -1
- claude_mpm/agents/templates/pm.json +1 -1
- claude_mpm/agents/templates/qa.json +1 -1
- claude_mpm/agents/templates/research.json +1 -1
- claude_mpm/agents/templates/security.json +1 -1
- claude_mpm/agents/templates/test_integration.json +112 -0
- claude_mpm/agents/templates/version_control.json +1 -1
- claude_mpm/cli/commands/memory.py +749 -26
- claude_mpm/cli/commands/run.py +115 -14
- claude_mpm/cli/parser.py +89 -1
- claude_mpm/constants.py +6 -0
- claude_mpm/core/claude_runner.py +74 -11
- claude_mpm/core/config.py +1 -1
- claude_mpm/core/session_manager.py +46 -0
- claude_mpm/core/simple_runner.py +74 -11
- claude_mpm/hooks/builtin/mpm_command_hook.py +5 -5
- claude_mpm/hooks/claude_hooks/hook_handler.py +213 -30
- claude_mpm/hooks/claude_hooks/hook_wrapper.sh +9 -2
- claude_mpm/hooks/memory_integration_hook.py +51 -5
- claude_mpm/services/__init__.py +23 -5
- claude_mpm/services/agent_memory_manager.py +800 -71
- claude_mpm/services/memory_builder.py +823 -0
- claude_mpm/services/memory_optimizer.py +619 -0
- claude_mpm/services/memory_router.py +445 -0
- claude_mpm/services/project_analyzer.py +771 -0
- claude_mpm/services/socketio_server.py +649 -45
- claude_mpm/services/version_control/git_operations.py +26 -0
- claude_mpm-3.4.0.dist-info/METADATA +183 -0
- {claude_mpm-3.3.0.dist-info → claude_mpm-3.4.0.dist-info}/RECORD +36 -52
- claude_mpm/agents/agent-template.yaml +0 -83
- claude_mpm/agents/templates/test-integration-agent.md +0 -34
- claude_mpm/agents/test_fix_deployment/.claude-pm/config/project.json +0 -6
- claude_mpm/cli/README.md +0 -109
- claude_mpm/cli_module/refactoring_guide.md +0 -253
- claude_mpm/core/agent_registry.py.bak +0 -312
- claude_mpm/core/base_service.py.bak +0 -406
- claude_mpm/core/websocket_handler.py +0 -233
- claude_mpm/hooks/README.md +0 -97
- claude_mpm/orchestration/SUBPROCESS_DESIGN.md +0 -66
- claude_mpm/schemas/README_SECURITY.md +0 -92
- claude_mpm/schemas/agent_schema.json +0 -395
- claude_mpm/schemas/agent_schema_documentation.md +0 -181
- claude_mpm/schemas/agent_schema_security_notes.md +0 -165
- claude_mpm/schemas/examples/standard_workflow.json +0 -505
- claude_mpm/schemas/ticket_workflow_documentation.md +0 -482
- claude_mpm/schemas/ticket_workflow_schema.json +0 -590
- claude_mpm/services/framework_claude_md_generator/README.md +0 -92
- claude_mpm/services/parent_directory_manager/README.md +0 -83
- claude_mpm/services/version_control/VERSION +0 -1
- claude_mpm/services/websocket_server.py +0 -376
- claude_mpm-3.3.0.dist-info/METADATA +0 -432
- {claude_mpm-3.3.0.dist-info → claude_mpm-3.4.0.dist-info}/WHEEL +0 -0
- {claude_mpm-3.3.0.dist-info → claude_mpm-3.4.0.dist-info}/entry_points.txt +0 -0
- {claude_mpm-3.3.0.dist-info → claude_mpm-3.4.0.dist-info}/licenses/LICENSE +0 -0
- {claude_mpm-3.3.0.dist-info → claude_mpm-3.4.0.dist-info}/top_level.txt +0 -0
|
@@ -24,13 +24,14 @@ from datetime import datetime
|
|
|
24
24
|
import re
|
|
25
25
|
import logging
|
|
26
26
|
|
|
27
|
-
from claude_mpm.core import LoggerMixin
|
|
28
27
|
from claude_mpm.core.config import Config
|
|
28
|
+
from claude_mpm.core.mixins import LoggerMixin
|
|
29
29
|
from claude_mpm.utils.paths import PathResolver
|
|
30
|
-
from claude_mpm.services.
|
|
30
|
+
from claude_mpm.services.project_analyzer import ProjectAnalyzer
|
|
31
|
+
# Socket.IO notifications are optional - we'll skip them if server is not available
|
|
31
32
|
|
|
32
33
|
|
|
33
|
-
class AgentMemoryManager
|
|
34
|
+
class AgentMemoryManager:
|
|
34
35
|
"""Manages agent memory files with size limits and validation.
|
|
35
36
|
|
|
36
37
|
WHY: Agents need to accumulate project-specific knowledge over time to become
|
|
@@ -60,22 +61,49 @@ class AgentMemoryManager(LoggerMixin):
|
|
|
60
61
|
'Current Technical Context'
|
|
61
62
|
]
|
|
62
63
|
|
|
63
|
-
def __init__(self, config: Optional[Config] = None):
|
|
64
|
+
def __init__(self, config: Optional[Config] = None, working_directory: Optional[Path] = None):
|
|
64
65
|
"""Initialize the memory manager.
|
|
65
66
|
|
|
66
67
|
Sets up the memories directory and ensures it exists with proper README.
|
|
67
68
|
|
|
68
69
|
Args:
|
|
69
70
|
config: Optional Config object. If not provided, will create default Config.
|
|
71
|
+
working_directory: Optional working directory. If not provided, uses project root.
|
|
70
72
|
"""
|
|
71
|
-
|
|
73
|
+
# Initialize logger using the same pattern as LoggerMixin
|
|
74
|
+
self._logger_instance = None
|
|
75
|
+
self._logger_name = None
|
|
76
|
+
|
|
72
77
|
self.config = config or Config()
|
|
73
78
|
self.project_root = PathResolver.get_project_root()
|
|
79
|
+
self.working_directory = working_directory or self.project_root
|
|
74
80
|
self.memories_dir = self.project_root / ".claude-mpm" / "memories"
|
|
75
81
|
self._ensure_memories_directory()
|
|
76
82
|
|
|
77
83
|
# Initialize memory limits from configuration
|
|
78
84
|
self._init_memory_limits()
|
|
85
|
+
|
|
86
|
+
# Initialize project analyzer for context-aware memory creation
|
|
87
|
+
self.project_analyzer = ProjectAnalyzer(self.config, self.working_directory)
|
|
88
|
+
|
|
89
|
+
@property
|
|
90
|
+
def logger(self):
|
|
91
|
+
"""Get or create the logger instance (like LoggerMixin)."""
|
|
92
|
+
if self._logger_instance is None:
|
|
93
|
+
if self._logger_name:
|
|
94
|
+
logger_name = self._logger_name
|
|
95
|
+
else:
|
|
96
|
+
module = self.__class__.__module__
|
|
97
|
+
class_name = self.__class__.__name__
|
|
98
|
+
|
|
99
|
+
if module and module != "__main__":
|
|
100
|
+
logger_name = f"{module}.{class_name}"
|
|
101
|
+
else:
|
|
102
|
+
logger_name = class_name
|
|
103
|
+
|
|
104
|
+
self._logger_instance = logging.getLogger(logger_name)
|
|
105
|
+
|
|
106
|
+
return self._logger_instance
|
|
79
107
|
|
|
80
108
|
def _init_memory_limits(self):
|
|
81
109
|
"""Initialize memory limits from configuration.
|
|
@@ -85,7 +113,7 @@ class AgentMemoryManager(LoggerMixin):
|
|
|
85
113
|
"""
|
|
86
114
|
# Check if memory system is enabled
|
|
87
115
|
self.memory_enabled = self.config.get('memory.enabled', True)
|
|
88
|
-
self.auto_learning = self.config.get('memory.auto_learning',
|
|
116
|
+
self.auto_learning = self.config.get('memory.auto_learning', True) # Changed default to True
|
|
89
117
|
|
|
90
118
|
# Load default limits from configuration
|
|
91
119
|
config_limits = self.config.get('memory.limits', {})
|
|
@@ -164,15 +192,7 @@ class AgentMemoryManager(LoggerMixin):
|
|
|
164
192
|
try:
|
|
165
193
|
content = memory_file.read_text(encoding='utf-8')
|
|
166
194
|
|
|
167
|
-
#
|
|
168
|
-
try:
|
|
169
|
-
ws_server = get_websocket_server()
|
|
170
|
-
file_size = len(content.encode('utf-8'))
|
|
171
|
-
# Count sections by looking for lines starting with ##
|
|
172
|
-
sections_count = sum(1 for line in content.split('\n') if line.startswith('## '))
|
|
173
|
-
ws_server.memory_loaded(agent_id, file_size, sections_count)
|
|
174
|
-
except Exception as ws_error:
|
|
175
|
-
self.logger.debug(f"WebSocket notification failed: {ws_error}")
|
|
195
|
+
# Socket.IO notifications removed - memory manager works independently
|
|
176
196
|
|
|
177
197
|
return self._validate_and_repair(content, agent_id)
|
|
178
198
|
except Exception as e:
|
|
@@ -241,79 +261,85 @@ class AgentMemoryManager(LoggerMixin):
|
|
|
241
261
|
section = section_mapping.get(learning_type, 'Recent Learnings')
|
|
242
262
|
success = self.update_agent_memory(agent_id, section, content)
|
|
243
263
|
|
|
244
|
-
#
|
|
245
|
-
if success:
|
|
246
|
-
try:
|
|
247
|
-
ws_server = get_websocket_server()
|
|
248
|
-
ws_server.memory_updated(agent_id, learning_type, content, section)
|
|
249
|
-
except Exception as ws_error:
|
|
250
|
-
self.logger.debug(f"WebSocket notification failed: {ws_error}")
|
|
264
|
+
# Socket.IO notifications removed - memory manager works independently
|
|
251
265
|
|
|
252
266
|
return success
|
|
253
267
|
|
|
254
268
|
def _create_default_memory(self, agent_id: str) -> str:
|
|
255
|
-
"""Create default memory file for agent.
|
|
269
|
+
"""Create project-specific default memory file for agent.
|
|
256
270
|
|
|
257
|
-
WHY:
|
|
258
|
-
|
|
271
|
+
WHY: Instead of generic templates, agents need project-specific knowledge
|
|
272
|
+
from the start. This analyzes the current project and creates contextual
|
|
273
|
+
memories with actual project characteristics.
|
|
259
274
|
|
|
260
275
|
Args:
|
|
261
276
|
agent_id: The agent identifier
|
|
262
277
|
|
|
263
278
|
Returns:
|
|
264
|
-
str: The
|
|
279
|
+
str: The project-specific memory template content
|
|
265
280
|
"""
|
|
266
281
|
# Convert agent_id to proper name, handling cases like "test_agent" -> "Test"
|
|
267
282
|
agent_name = agent_id.replace('_agent', '').replace('_', ' ').title()
|
|
268
|
-
project_name = self.project_root.name
|
|
269
283
|
timestamp = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
|
|
270
284
|
|
|
271
285
|
# Get limits for this agent
|
|
272
286
|
limits = self._get_agent_limits(agent_id)
|
|
273
287
|
|
|
274
|
-
|
|
288
|
+
# Analyze the project for context-specific content
|
|
289
|
+
try:
|
|
290
|
+
project_characteristics = self.project_analyzer.analyze_project()
|
|
291
|
+
project_context = self.project_analyzer.get_project_context_summary()
|
|
292
|
+
important_files = self.project_analyzer.get_important_files_for_context()
|
|
293
|
+
|
|
294
|
+
self.logger.info(f"Creating project-specific memory for {agent_id} using analyzed project context")
|
|
295
|
+
except Exception as e:
|
|
296
|
+
self.logger.warning(f"Error analyzing project for {agent_id}, falling back to basic template: {e}")
|
|
297
|
+
return self._create_basic_memory_template(agent_id)
|
|
298
|
+
|
|
299
|
+
# Create project-specific sections
|
|
300
|
+
architecture_items = self._generate_architecture_section(project_characteristics)
|
|
301
|
+
coding_patterns = self._generate_coding_patterns_section(project_characteristics)
|
|
302
|
+
implementation_guidelines = self._generate_implementation_guidelines(project_characteristics)
|
|
303
|
+
tech_context = self._generate_technical_context(project_characteristics)
|
|
304
|
+
integration_points = self._generate_integration_points(project_characteristics)
|
|
305
|
+
|
|
306
|
+
template = f"""# {agent_name} Agent Memory - {project_characteristics.project_name}
|
|
275
307
|
|
|
276
308
|
<!-- MEMORY LIMITS: {limits['max_file_size_kb']}KB max | {limits['max_sections']} sections max | {limits['max_items_per_section']} items per section -->
|
|
277
309
|
<!-- Last Updated: {timestamp} | Auto-updated by: {agent_id} -->
|
|
278
310
|
|
|
279
|
-
## Project
|
|
280
|
-
|
|
281
|
-
- Three-tier agent hierarchy: project → user → system
|
|
282
|
-
- Agent definitions use standardized JSON schema validation
|
|
311
|
+
## Project Context
|
|
312
|
+
{project_context}
|
|
283
313
|
|
|
284
|
-
##
|
|
285
|
-
|
|
286
|
-
- SubprocessRunner utility for external command execution
|
|
287
|
-
- LoggerMixin provides consistent logging across all services
|
|
314
|
+
## Project Architecture
|
|
315
|
+
{self._format_section_items(architecture_items)}
|
|
288
316
|
|
|
289
|
-
##
|
|
290
|
-
|
|
291
|
-
- Follow existing import patterns: from claude_mpm.module import Class
|
|
292
|
-
- Use existing utilities instead of reimplementing functionality
|
|
317
|
+
## Coding Patterns Learned
|
|
318
|
+
{self._format_section_items(coding_patterns)}
|
|
293
319
|
|
|
294
|
-
##
|
|
295
|
-
|
|
320
|
+
## Implementation Guidelines
|
|
321
|
+
{self._format_section_items(implementation_guidelines)}
|
|
322
|
+
|
|
323
|
+
## Domain-Specific Knowledge
|
|
324
|
+
<!-- Agent-specific knowledge for {project_characteristics.project_name} domain -->
|
|
325
|
+
{self._generate_domain_knowledge_starters(project_characteristics, agent_id)}
|
|
296
326
|
|
|
297
|
-
## Effective Strategies
|
|
327
|
+
## Effective Strategies
|
|
298
328
|
<!-- Successful approaches discovered through experience -->
|
|
299
329
|
|
|
300
|
-
## Common Mistakes to Avoid
|
|
301
|
-
|
|
302
|
-
- Avoid duplicating code - check utils/ for existing implementations
|
|
303
|
-
- Never hardcode file paths, use PathResolver utilities
|
|
330
|
+
## Common Mistakes to Avoid
|
|
331
|
+
{self._format_section_items(self._generate_common_mistakes(project_characteristics))}
|
|
304
332
|
|
|
305
|
-
## Integration Points
|
|
306
|
-
|
|
333
|
+
## Integration Points
|
|
334
|
+
{self._format_section_items(integration_points)}
|
|
307
335
|
|
|
308
|
-
## Performance Considerations
|
|
309
|
-
|
|
336
|
+
## Performance Considerations
|
|
337
|
+
{self._format_section_items(self._generate_performance_considerations(project_characteristics))}
|
|
310
338
|
|
|
311
|
-
## Current Technical Context
|
|
312
|
-
|
|
313
|
-
- Target: 80% test coverage (current: 23.6%)
|
|
314
|
-
- Integration with Claude Code 1.0.60+ native agent framework
|
|
339
|
+
## Current Technical Context
|
|
340
|
+
{self._format_section_items(tech_context)}
|
|
315
341
|
|
|
316
|
-
## Recent Learnings
|
|
342
|
+
## Recent Learnings
|
|
317
343
|
<!-- Most recent discoveries and insights -->
|
|
318
344
|
"""
|
|
319
345
|
|
|
@@ -321,19 +347,313 @@ class AgentMemoryManager(LoggerMixin):
|
|
|
321
347
|
try:
|
|
322
348
|
memory_file = self.memories_dir / f"{agent_id}_agent.md"
|
|
323
349
|
memory_file.write_text(template, encoding='utf-8')
|
|
324
|
-
self.logger.info(f"Created
|
|
325
|
-
|
|
326
|
-
# Emit WebSocket event for memory created
|
|
327
|
-
try:
|
|
328
|
-
ws_server = get_websocket_server()
|
|
329
|
-
ws_server.memory_created(agent_id, "default")
|
|
330
|
-
except Exception as ws_error:
|
|
331
|
-
self.logger.debug(f"WebSocket notification failed: {ws_error}")
|
|
350
|
+
self.logger.info(f"Created project-specific memory file for {agent_id}")
|
|
351
|
+
|
|
332
352
|
except Exception as e:
|
|
333
353
|
self.logger.error(f"Error saving default memory for {agent_id}: {e}")
|
|
334
354
|
|
|
335
355
|
return template
|
|
336
356
|
|
|
357
|
+
def _create_basic_memory_template(self, agent_id: str) -> str:
|
|
358
|
+
"""Create basic memory template when project analysis fails.
|
|
359
|
+
|
|
360
|
+
WHY: Fallback template ensures agents always get some memory structure
|
|
361
|
+
even if project analysis encounters errors.
|
|
362
|
+
|
|
363
|
+
Args:
|
|
364
|
+
agent_id: The agent identifier
|
|
365
|
+
|
|
366
|
+
Returns:
|
|
367
|
+
str: Basic memory template
|
|
368
|
+
"""
|
|
369
|
+
agent_name = agent_id.replace('_agent', '').replace('_', ' ').title()
|
|
370
|
+
project_name = self.project_root.name
|
|
371
|
+
timestamp = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
|
|
372
|
+
limits = self._get_agent_limits(agent_id)
|
|
373
|
+
|
|
374
|
+
return f"""# {agent_name} Agent Memory - {project_name}
|
|
375
|
+
|
|
376
|
+
<!-- MEMORY LIMITS: {limits['max_file_size_kb']}KB max | {limits['max_sections']} sections max | {limits['max_items_per_section']} items per section -->
|
|
377
|
+
<!-- Last Updated: {timestamp} | Auto-updated by: {agent_id} -->
|
|
378
|
+
|
|
379
|
+
## Project Context
|
|
380
|
+
{project_name}: Software project requiring analysis
|
|
381
|
+
|
|
382
|
+
## Project Architecture
|
|
383
|
+
- Analyze project structure to understand architecture patterns
|
|
384
|
+
|
|
385
|
+
## Coding Patterns Learned
|
|
386
|
+
- Observe codebase patterns and conventions during tasks
|
|
387
|
+
|
|
388
|
+
## Implementation Guidelines
|
|
389
|
+
- Extract implementation guidelines from project documentation
|
|
390
|
+
|
|
391
|
+
## Domain-Specific Knowledge
|
|
392
|
+
<!-- Agent-specific knowledge accumulates here -->
|
|
393
|
+
|
|
394
|
+
## Effective Strategies
|
|
395
|
+
<!-- Successful approaches discovered through experience -->
|
|
396
|
+
|
|
397
|
+
## Common Mistakes to Avoid
|
|
398
|
+
- Learn from errors encountered during project work
|
|
399
|
+
|
|
400
|
+
## Integration Points
|
|
401
|
+
<!-- Key interfaces and integration patterns -->
|
|
402
|
+
|
|
403
|
+
## Performance Considerations
|
|
404
|
+
<!-- Performance insights and optimization patterns -->
|
|
405
|
+
|
|
406
|
+
## Current Technical Context
|
|
407
|
+
- Project analysis pending - gather context during tasks
|
|
408
|
+
|
|
409
|
+
## Recent Learnings
|
|
410
|
+
<!-- Most recent discoveries and insights -->
|
|
411
|
+
"""
|
|
412
|
+
|
|
413
|
+
def _generate_architecture_section(self, characteristics) -> List[str]:
|
|
414
|
+
"""Generate architecture section items based on project analysis."""
|
|
415
|
+
items = []
|
|
416
|
+
|
|
417
|
+
# Architecture type
|
|
418
|
+
items.append(f"{characteristics.architecture_type} with {characteristics.primary_language or 'mixed'} implementation")
|
|
419
|
+
|
|
420
|
+
# Key directories structure
|
|
421
|
+
if characteristics.key_directories:
|
|
422
|
+
key_dirs = ", ".join(characteristics.key_directories[:5])
|
|
423
|
+
items.append(f"Main directories: {key_dirs}")
|
|
424
|
+
|
|
425
|
+
# Main modules
|
|
426
|
+
if characteristics.main_modules:
|
|
427
|
+
modules = ", ".join(characteristics.main_modules[:4])
|
|
428
|
+
items.append(f"Core modules: {modules}")
|
|
429
|
+
|
|
430
|
+
# Entry points
|
|
431
|
+
if characteristics.entry_points:
|
|
432
|
+
entries = ", ".join(characteristics.entry_points[:3])
|
|
433
|
+
items.append(f"Entry points: {entries}")
|
|
434
|
+
|
|
435
|
+
# Frameworks affecting architecture
|
|
436
|
+
if characteristics.web_frameworks:
|
|
437
|
+
frameworks = ", ".join(characteristics.web_frameworks[:3])
|
|
438
|
+
items.append(f"Web framework stack: {frameworks}")
|
|
439
|
+
|
|
440
|
+
return items[:8] # Limit to prevent overwhelming
|
|
441
|
+
|
|
442
|
+
def _generate_coding_patterns_section(self, characteristics) -> List[str]:
|
|
443
|
+
"""Generate coding patterns section based on project analysis."""
|
|
444
|
+
items = []
|
|
445
|
+
|
|
446
|
+
# Language-specific patterns
|
|
447
|
+
if characteristics.primary_language == 'python':
|
|
448
|
+
items.append("Python project: use type hints, follow PEP 8 conventions")
|
|
449
|
+
if 'django' in [fw.lower() for fw in characteristics.web_frameworks]:
|
|
450
|
+
items.append("Django patterns: models, views, templates separation")
|
|
451
|
+
elif 'flask' in [fw.lower() for fw in characteristics.web_frameworks]:
|
|
452
|
+
items.append("Flask patterns: blueprint organization, app factory pattern")
|
|
453
|
+
elif characteristics.primary_language == 'node_js':
|
|
454
|
+
items.append("Node.js project: use async/await, ES6+ features")
|
|
455
|
+
if 'express' in [fw.lower() for fw in characteristics.web_frameworks]:
|
|
456
|
+
items.append("Express patterns: middleware usage, route organization")
|
|
457
|
+
|
|
458
|
+
# Framework-specific patterns
|
|
459
|
+
for framework in characteristics.frameworks[:3]:
|
|
460
|
+
if 'react' in framework.lower():
|
|
461
|
+
items.append("React patterns: component composition, hooks usage")
|
|
462
|
+
elif 'vue' in framework.lower():
|
|
463
|
+
items.append("Vue patterns: single file components, composition API")
|
|
464
|
+
|
|
465
|
+
# Code conventions found
|
|
466
|
+
for convention in characteristics.code_conventions[:3]:
|
|
467
|
+
items.append(f"Project uses: {convention}")
|
|
468
|
+
|
|
469
|
+
return items[:8]
|
|
470
|
+
|
|
471
|
+
def _generate_implementation_guidelines(self, characteristics) -> List[str]:
|
|
472
|
+
"""Generate implementation guidelines based on project analysis."""
|
|
473
|
+
items = []
|
|
474
|
+
|
|
475
|
+
# Package manager guidance
|
|
476
|
+
if characteristics.package_manager:
|
|
477
|
+
items.append(f"Use {characteristics.package_manager} for dependency management")
|
|
478
|
+
|
|
479
|
+
# Testing guidelines
|
|
480
|
+
if characteristics.testing_framework:
|
|
481
|
+
items.append(f"Write tests using {characteristics.testing_framework}")
|
|
482
|
+
|
|
483
|
+
# Test patterns
|
|
484
|
+
for pattern in characteristics.test_patterns[:2]:
|
|
485
|
+
items.append(f"Follow {pattern.lower()}")
|
|
486
|
+
|
|
487
|
+
# Build tools
|
|
488
|
+
if characteristics.build_tools:
|
|
489
|
+
tools = ", ".join(characteristics.build_tools[:2])
|
|
490
|
+
items.append(f"Use build tools: {tools}")
|
|
491
|
+
|
|
492
|
+
# Configuration patterns
|
|
493
|
+
for config_pattern in characteristics.configuration_patterns[:2]:
|
|
494
|
+
items.append(f"Configuration: {config_pattern}")
|
|
495
|
+
|
|
496
|
+
# Important files to reference
|
|
497
|
+
important_configs = characteristics.important_configs[:3]
|
|
498
|
+
if important_configs:
|
|
499
|
+
configs = ", ".join(important_configs)
|
|
500
|
+
items.append(f"Key config files: {configs}")
|
|
501
|
+
|
|
502
|
+
return items[:8]
|
|
503
|
+
|
|
504
|
+
def _generate_technical_context(self, characteristics) -> List[str]:
|
|
505
|
+
"""Generate current technical context based on project analysis."""
|
|
506
|
+
items = []
|
|
507
|
+
|
|
508
|
+
# Technology stack summary
|
|
509
|
+
tech_stack = []
|
|
510
|
+
if characteristics.primary_language:
|
|
511
|
+
tech_stack.append(characteristics.primary_language)
|
|
512
|
+
tech_stack.extend(characteristics.frameworks[:2])
|
|
513
|
+
if tech_stack:
|
|
514
|
+
items.append(f"Tech stack: {', '.join(tech_stack)}")
|
|
515
|
+
|
|
516
|
+
# Databases in use
|
|
517
|
+
if characteristics.databases:
|
|
518
|
+
dbs = ", ".join(characteristics.databases[:3])
|
|
519
|
+
items.append(f"Data storage: {dbs}")
|
|
520
|
+
|
|
521
|
+
# API patterns
|
|
522
|
+
if characteristics.api_patterns:
|
|
523
|
+
apis = ", ".join(characteristics.api_patterns[:2])
|
|
524
|
+
items.append(f"API patterns: {apis}")
|
|
525
|
+
|
|
526
|
+
# Key dependencies
|
|
527
|
+
if characteristics.key_dependencies:
|
|
528
|
+
deps = ", ".join(characteristics.key_dependencies[:4])
|
|
529
|
+
items.append(f"Key dependencies: {deps}")
|
|
530
|
+
|
|
531
|
+
# Documentation available
|
|
532
|
+
if characteristics.documentation_files:
|
|
533
|
+
docs = ", ".join(characteristics.documentation_files[:3])
|
|
534
|
+
items.append(f"Documentation: {docs}")
|
|
535
|
+
|
|
536
|
+
return items[:8]
|
|
537
|
+
|
|
538
|
+
def _generate_integration_points(self, characteristics) -> List[str]:
|
|
539
|
+
"""Generate integration points based on project analysis."""
|
|
540
|
+
items = []
|
|
541
|
+
|
|
542
|
+
# Database integrations
|
|
543
|
+
for db in characteristics.databases[:3]:
|
|
544
|
+
items.append(f"{db.title()} database integration")
|
|
545
|
+
|
|
546
|
+
# Web framework integrations
|
|
547
|
+
for framework in characteristics.web_frameworks[:2]:
|
|
548
|
+
items.append(f"{framework} web framework integration")
|
|
549
|
+
|
|
550
|
+
# API integrations
|
|
551
|
+
for api_pattern in characteristics.api_patterns[:2]:
|
|
552
|
+
items.append(f"{api_pattern} integration pattern")
|
|
553
|
+
|
|
554
|
+
# Common integration patterns based on dependencies
|
|
555
|
+
integration_deps = [dep for dep in characteristics.key_dependencies
|
|
556
|
+
if any(keyword in dep.lower() for keyword in ['redis', 'rabbit', 'celery', 'kafka', 'docker'])]
|
|
557
|
+
for dep in integration_deps[:3]:
|
|
558
|
+
items.append(f"{dep} integration")
|
|
559
|
+
|
|
560
|
+
return items[:6]
|
|
561
|
+
|
|
562
|
+
def _generate_common_mistakes(self, characteristics) -> List[str]:
|
|
563
|
+
"""Generate common mistakes based on project type and stack."""
|
|
564
|
+
items = []
|
|
565
|
+
|
|
566
|
+
# Language-specific mistakes
|
|
567
|
+
if characteristics.primary_language == 'python':
|
|
568
|
+
items.append("Avoid circular imports - use late imports when needed")
|
|
569
|
+
items.append("Don't ignore virtual environment - always activate before work")
|
|
570
|
+
elif characteristics.primary_language == 'node_js':
|
|
571
|
+
items.append("Avoid callback hell - use async/await consistently")
|
|
572
|
+
items.append("Don't commit node_modules - ensure .gitignore is correct")
|
|
573
|
+
|
|
574
|
+
# Framework-specific mistakes
|
|
575
|
+
if 'django' in [fw.lower() for fw in characteristics.web_frameworks]:
|
|
576
|
+
items.append("Don't skip migrations - always create and apply them")
|
|
577
|
+
elif 'flask' in [fw.lower() for fw in characteristics.web_frameworks]:
|
|
578
|
+
items.append("Avoid app context issues - use proper application factory")
|
|
579
|
+
|
|
580
|
+
# Database-specific mistakes
|
|
581
|
+
if characteristics.databases:
|
|
582
|
+
items.append("Don't ignore database transactions in multi-step operations")
|
|
583
|
+
items.append("Avoid N+1 queries - use proper joins or prefetching")
|
|
584
|
+
|
|
585
|
+
# Testing mistakes
|
|
586
|
+
if characteristics.testing_framework:
|
|
587
|
+
items.append("Don't skip test isolation - ensure tests can run independently")
|
|
588
|
+
|
|
589
|
+
return items[:8]
|
|
590
|
+
|
|
591
|
+
def _generate_performance_considerations(self, characteristics) -> List[str]:
|
|
592
|
+
"""Generate performance considerations based on project stack."""
|
|
593
|
+
items = []
|
|
594
|
+
|
|
595
|
+
# Language-specific performance
|
|
596
|
+
if characteristics.primary_language == 'python':
|
|
597
|
+
items.append("Use list comprehensions over loops where appropriate")
|
|
598
|
+
items.append("Consider caching for expensive operations")
|
|
599
|
+
elif characteristics.primary_language == 'node_js':
|
|
600
|
+
items.append("Leverage event loop - avoid blocking operations")
|
|
601
|
+
items.append("Use streams for large data processing")
|
|
602
|
+
|
|
603
|
+
# Database performance
|
|
604
|
+
if characteristics.databases:
|
|
605
|
+
items.append("Index frequently queried columns")
|
|
606
|
+
items.append("Use connection pooling for database connections")
|
|
607
|
+
|
|
608
|
+
# Web framework performance
|
|
609
|
+
if characteristics.web_frameworks:
|
|
610
|
+
items.append("Implement appropriate caching strategies")
|
|
611
|
+
items.append("Optimize static asset delivery")
|
|
612
|
+
|
|
613
|
+
# Framework-specific performance
|
|
614
|
+
if 'react' in [fw.lower() for fw in characteristics.frameworks]:
|
|
615
|
+
items.append("Use React.memo for expensive component renders")
|
|
616
|
+
|
|
617
|
+
return items[:6]
|
|
618
|
+
|
|
619
|
+
def _generate_domain_knowledge_starters(self, characteristics, agent_id: str) -> str:
|
|
620
|
+
"""Generate domain-specific knowledge starters based on project and agent type."""
|
|
621
|
+
items = []
|
|
622
|
+
|
|
623
|
+
# Project terminology
|
|
624
|
+
if characteristics.project_terminology:
|
|
625
|
+
terms = ", ".join(characteristics.project_terminology[:4])
|
|
626
|
+
items.append(f"- Key project terms: {terms}")
|
|
627
|
+
|
|
628
|
+
# Agent-specific starters
|
|
629
|
+
if 'research' in agent_id.lower():
|
|
630
|
+
items.append("- Focus on code analysis, pattern discovery, and architectural insights")
|
|
631
|
+
if characteristics.documentation_files:
|
|
632
|
+
items.append("- Prioritize documentation analysis for comprehensive understanding")
|
|
633
|
+
elif 'engineer' in agent_id.lower():
|
|
634
|
+
items.append("- Focus on implementation patterns, coding standards, and best practices")
|
|
635
|
+
if characteristics.testing_framework:
|
|
636
|
+
items.append(f"- Ensure test coverage using {characteristics.testing_framework}")
|
|
637
|
+
elif 'pm' in agent_id.lower() or 'manager' in agent_id.lower():
|
|
638
|
+
items.append("- Focus on project coordination, task delegation, and progress tracking")
|
|
639
|
+
items.append("- Monitor integration points and cross-component dependencies")
|
|
640
|
+
|
|
641
|
+
return '\n'.join(items) if items else "<!-- Domain knowledge will accumulate here -->"
|
|
642
|
+
|
|
643
|
+
def _format_section_items(self, items: List[str]) -> str:
|
|
644
|
+
"""Format list of items as markdown bullet points."""
|
|
645
|
+
if not items:
|
|
646
|
+
return "<!-- Items will be added as knowledge accumulates -->"
|
|
647
|
+
|
|
648
|
+
formatted_items = []
|
|
649
|
+
for item in items:
|
|
650
|
+
# Ensure each item starts with a dash and is properly formatted
|
|
651
|
+
if not item.startswith('- '):
|
|
652
|
+
item = f"- {item}"
|
|
653
|
+
formatted_items.append(item)
|
|
654
|
+
|
|
655
|
+
return '\n'.join(formatted_items)
|
|
656
|
+
|
|
337
657
|
def _add_item_to_section(self, content: str, section: str, new_item: str) -> str:
|
|
338
658
|
"""Add item to specified section, respecting limits.
|
|
339
659
|
|
|
@@ -392,9 +712,10 @@ class AgentMemoryManager(LoggerMixin):
|
|
|
392
712
|
):
|
|
393
713
|
insert_point += 1
|
|
394
714
|
|
|
395
|
-
# Ensure line length limit
|
|
396
|
-
|
|
397
|
-
|
|
715
|
+
# Ensure line length limit (account for "- " prefix)
|
|
716
|
+
max_item_length = self.memory_limits['max_line_length'] - 2 # Subtract 2 for "- " prefix
|
|
717
|
+
if len(new_item) > max_item_length:
|
|
718
|
+
new_item = new_item[:max_item_length - 3] + '...'
|
|
398
719
|
|
|
399
720
|
lines.insert(insert_point, f"- {new_item}")
|
|
400
721
|
|
|
@@ -437,7 +758,7 @@ class AgentMemoryManager(LoggerMixin):
|
|
|
437
758
|
# Insert new section
|
|
438
759
|
new_section = [
|
|
439
760
|
'',
|
|
440
|
-
f'## {section}
|
|
761
|
+
f'## {section}',
|
|
441
762
|
f'- {new_item}',
|
|
442
763
|
''
|
|
443
764
|
]
|
|
@@ -577,7 +898,7 @@ class AgentMemoryManager(LoggerMixin):
|
|
|
577
898
|
for section in missing_sections:
|
|
578
899
|
section_content = [
|
|
579
900
|
'',
|
|
580
|
-
f'## {section}
|
|
901
|
+
f'## {section}',
|
|
581
902
|
'<!-- Section added by repair -->',
|
|
582
903
|
''
|
|
583
904
|
]
|
|
@@ -609,6 +930,413 @@ class AgentMemoryManager(LoggerMixin):
|
|
|
609
930
|
self.logger.error(f"Error saving memory for {agent_id}: {e}")
|
|
610
931
|
return False
|
|
611
932
|
|
|
933
|
+
def optimize_memory(self, agent_id: Optional[str] = None) -> Dict[str, Any]:
|
|
934
|
+
"""Optimize agent memory by consolidating/cleaning memories.
|
|
935
|
+
|
|
936
|
+
WHY: Over time, memory files accumulate redundant or outdated information.
|
|
937
|
+
This method delegates to the memory optimizer service to clean up and
|
|
938
|
+
consolidate memories while preserving important information.
|
|
939
|
+
|
|
940
|
+
Args:
|
|
941
|
+
agent_id: Optional specific agent ID. If None, optimizes all agents.
|
|
942
|
+
|
|
943
|
+
Returns:
|
|
944
|
+
Dict containing optimization results and statistics
|
|
945
|
+
"""
|
|
946
|
+
try:
|
|
947
|
+
from claude_mpm.services.memory_optimizer import MemoryOptimizer
|
|
948
|
+
optimizer = MemoryOptimizer(self.config)
|
|
949
|
+
|
|
950
|
+
if agent_id:
|
|
951
|
+
result = optimizer.optimize_agent_memory(agent_id)
|
|
952
|
+
self.logger.info(f"Optimized memory for agent: {agent_id}")
|
|
953
|
+
else:
|
|
954
|
+
result = optimizer.optimize_all_memories()
|
|
955
|
+
self.logger.info("Optimized all agent memories")
|
|
956
|
+
|
|
957
|
+
return result
|
|
958
|
+
except Exception as e:
|
|
959
|
+
self.logger.error(f"Error optimizing memory: {e}")
|
|
960
|
+
return {"success": False, "error": str(e)}
|
|
961
|
+
|
|
962
|
+
def build_memories_from_docs(self, force_rebuild: bool = False) -> Dict[str, Any]:
|
|
963
|
+
"""Build agent memories from project documentation.
|
|
964
|
+
|
|
965
|
+
WHY: Project documentation contains valuable knowledge that should be
|
|
966
|
+
extracted and assigned to appropriate agents for better context awareness.
|
|
967
|
+
|
|
968
|
+
Args:
|
|
969
|
+
force_rebuild: If True, rebuilds even if docs haven't changed
|
|
970
|
+
|
|
971
|
+
Returns:
|
|
972
|
+
Dict containing build results and statistics
|
|
973
|
+
"""
|
|
974
|
+
try:
|
|
975
|
+
from claude_mpm.services.memory_builder import MemoryBuilder
|
|
976
|
+
builder = MemoryBuilder(self.config)
|
|
977
|
+
|
|
978
|
+
result = builder.build_from_documentation(force_rebuild)
|
|
979
|
+
self.logger.info("Built memories from documentation")
|
|
980
|
+
|
|
981
|
+
return result
|
|
982
|
+
except Exception as e:
|
|
983
|
+
self.logger.error(f"Error building memories from docs: {e}")
|
|
984
|
+
return {"success": False, "error": str(e)}
|
|
985
|
+
|
|
986
|
+
def route_memory_command(self, content: str, context: Optional[Dict] = None) -> Dict[str, Any]:
|
|
987
|
+
"""Route memory command to appropriate agent via PM delegation.
|
|
988
|
+
|
|
989
|
+
WHY: Memory commands like "remember this for next time" need to be analyzed
|
|
990
|
+
to determine which agent should store the information. This method provides
|
|
991
|
+
routing logic for PM agent delegation.
|
|
992
|
+
|
|
993
|
+
Args:
|
|
994
|
+
content: The content to be remembered
|
|
995
|
+
context: Optional context for routing decisions
|
|
996
|
+
|
|
997
|
+
Returns:
|
|
998
|
+
Dict containing routing decision and reasoning
|
|
999
|
+
"""
|
|
1000
|
+
try:
|
|
1001
|
+
from claude_mpm.services.memory_router import MemoryRouter
|
|
1002
|
+
router = MemoryRouter(self.config)
|
|
1003
|
+
|
|
1004
|
+
routing_result = router.analyze_and_route(content, context)
|
|
1005
|
+
self.logger.debug(f"Routed memory command: {routing_result['target_agent']}")
|
|
1006
|
+
|
|
1007
|
+
return routing_result
|
|
1008
|
+
except Exception as e:
|
|
1009
|
+
self.logger.error(f"Error routing memory command: {e}")
|
|
1010
|
+
return {"success": False, "error": str(e)}
|
|
1011
|
+
|
|
1012
|
+
def get_memory_status(self) -> Dict[str, Any]:
|
|
1013
|
+
"""Get comprehensive memory system status.
|
|
1014
|
+
|
|
1015
|
+
WHY: Provides detailed overview of memory system health, file sizes,
|
|
1016
|
+
optimization opportunities, and agent-specific statistics for monitoring
|
|
1017
|
+
and maintenance purposes.
|
|
1018
|
+
|
|
1019
|
+
Returns:
|
|
1020
|
+
Dict containing comprehensive memory system status
|
|
1021
|
+
"""
|
|
1022
|
+
try:
|
|
1023
|
+
status = {
|
|
1024
|
+
"system_enabled": self.memory_enabled,
|
|
1025
|
+
"auto_learning": self.auto_learning,
|
|
1026
|
+
"memory_directory": str(self.memories_dir),
|
|
1027
|
+
"total_agents": 0,
|
|
1028
|
+
"total_size_kb": 0,
|
|
1029
|
+
"agents": {},
|
|
1030
|
+
"optimization_opportunities": [],
|
|
1031
|
+
"system_health": "healthy"
|
|
1032
|
+
}
|
|
1033
|
+
|
|
1034
|
+
if not self.memories_dir.exists():
|
|
1035
|
+
status["system_health"] = "no_memory_dir"
|
|
1036
|
+
return status
|
|
1037
|
+
|
|
1038
|
+
memory_files = list(self.memories_dir.glob("*_agent.md"))
|
|
1039
|
+
status["total_agents"] = len(memory_files)
|
|
1040
|
+
|
|
1041
|
+
total_size = 0
|
|
1042
|
+
for file_path in memory_files:
|
|
1043
|
+
stat = file_path.stat()
|
|
1044
|
+
size_kb = stat.st_size / 1024
|
|
1045
|
+
total_size += stat.st_size
|
|
1046
|
+
|
|
1047
|
+
agent_id = file_path.stem.replace('_agent', '')
|
|
1048
|
+
limits = self._get_agent_limits(agent_id)
|
|
1049
|
+
|
|
1050
|
+
# Analyze file content
|
|
1051
|
+
try:
|
|
1052
|
+
content = file_path.read_text()
|
|
1053
|
+
section_count = len([line for line in content.splitlines() if line.startswith('## ')])
|
|
1054
|
+
learning_count = len([line for line in content.splitlines() if line.strip().startswith('- ')])
|
|
1055
|
+
|
|
1056
|
+
agent_status = {
|
|
1057
|
+
"size_kb": round(size_kb, 2),
|
|
1058
|
+
"size_limit_kb": limits['max_file_size_kb'],
|
|
1059
|
+
"size_utilization": min(100, round((size_kb / limits['max_file_size_kb']) * 100, 1)),
|
|
1060
|
+
"sections": section_count,
|
|
1061
|
+
"items": learning_count,
|
|
1062
|
+
"last_modified": datetime.fromtimestamp(stat.st_mtime).isoformat(),
|
|
1063
|
+
"auto_learning": self._get_agent_auto_learning(agent_id)
|
|
1064
|
+
}
|
|
1065
|
+
|
|
1066
|
+
# Check for optimization opportunities
|
|
1067
|
+
if size_kb > limits['max_file_size_kb'] * 0.8:
|
|
1068
|
+
status["optimization_opportunities"].append(f"{agent_id}: High memory usage ({size_kb:.1f}KB)")
|
|
1069
|
+
|
|
1070
|
+
if section_count > limits['max_sections'] * 0.8:
|
|
1071
|
+
status["optimization_opportunities"].append(f"{agent_id}: Many sections ({section_count})")
|
|
1072
|
+
|
|
1073
|
+
status["agents"][agent_id] = agent_status
|
|
1074
|
+
|
|
1075
|
+
except Exception as e:
|
|
1076
|
+
status["agents"][agent_id] = {"error": str(e)}
|
|
1077
|
+
|
|
1078
|
+
status["total_size_kb"] = round(total_size / 1024, 2)
|
|
1079
|
+
|
|
1080
|
+
# Determine overall system health
|
|
1081
|
+
if len(status["optimization_opportunities"]) > 3:
|
|
1082
|
+
status["system_health"] = "needs_optimization"
|
|
1083
|
+
elif status["total_size_kb"] > 100: # More than 100KB total
|
|
1084
|
+
status["system_health"] = "high_usage"
|
|
1085
|
+
|
|
1086
|
+
return status
|
|
1087
|
+
|
|
1088
|
+
except Exception as e:
|
|
1089
|
+
self.logger.error(f"Error getting memory status: {e}")
|
|
1090
|
+
return {"success": False, "error": str(e)}
|
|
1091
|
+
|
|
1092
|
+
def cross_reference_memories(self, query: Optional[str] = None) -> Dict[str, Any]:
|
|
1093
|
+
"""Find common patterns and cross-references across agent memories.
|
|
1094
|
+
|
|
1095
|
+
WHY: Different agents may have learned similar or related information.
|
|
1096
|
+
Cross-referencing helps identify knowledge gaps, redundancies, and
|
|
1097
|
+
opportunities for knowledge sharing between agents.
|
|
1098
|
+
|
|
1099
|
+
Args:
|
|
1100
|
+
query: Optional query to filter cross-references
|
|
1101
|
+
|
|
1102
|
+
Returns:
|
|
1103
|
+
Dict containing cross-reference analysis results
|
|
1104
|
+
"""
|
|
1105
|
+
try:
|
|
1106
|
+
cross_refs = {
|
|
1107
|
+
"common_patterns": [],
|
|
1108
|
+
"knowledge_gaps": [],
|
|
1109
|
+
"redundancies": [],
|
|
1110
|
+
"agent_correlations": {},
|
|
1111
|
+
"query_matches": [] if query else None
|
|
1112
|
+
}
|
|
1113
|
+
|
|
1114
|
+
if not self.memories_dir.exists():
|
|
1115
|
+
return cross_refs
|
|
1116
|
+
|
|
1117
|
+
memory_files = list(self.memories_dir.glob("*_agent.md"))
|
|
1118
|
+
agent_memories = {}
|
|
1119
|
+
|
|
1120
|
+
# Load all agent memories
|
|
1121
|
+
for file_path in memory_files:
|
|
1122
|
+
agent_id = file_path.stem.replace('_agent', '')
|
|
1123
|
+
try:
|
|
1124
|
+
content = file_path.read_text()
|
|
1125
|
+
agent_memories[agent_id] = content
|
|
1126
|
+
except Exception as e:
|
|
1127
|
+
self.logger.warning(f"Error reading memory for {agent_id}: {e}")
|
|
1128
|
+
continue
|
|
1129
|
+
|
|
1130
|
+
# Find common patterns across agents
|
|
1131
|
+
all_lines = []
|
|
1132
|
+
agent_lines = {}
|
|
1133
|
+
|
|
1134
|
+
for agent_id, content in agent_memories.items():
|
|
1135
|
+
lines = [line.strip() for line in content.splitlines()
|
|
1136
|
+
if line.strip().startswith('- ')]
|
|
1137
|
+
agent_lines[agent_id] = lines
|
|
1138
|
+
all_lines.extend([(line, agent_id) for line in lines])
|
|
1139
|
+
|
|
1140
|
+
# Look for similar content (basic similarity check)
|
|
1141
|
+
line_counts = {}
|
|
1142
|
+
for line, agent_id in all_lines:
|
|
1143
|
+
# Normalize line for comparison
|
|
1144
|
+
normalized = line.lower().replace('- ', '').strip()
|
|
1145
|
+
if len(normalized) > 20: # Only check substantial lines
|
|
1146
|
+
if normalized not in line_counts:
|
|
1147
|
+
line_counts[normalized] = []
|
|
1148
|
+
line_counts[normalized].append(agent_id)
|
|
1149
|
+
|
|
1150
|
+
# Find patterns appearing in multiple agents
|
|
1151
|
+
for line, agents in line_counts.items():
|
|
1152
|
+
if len(set(agents)) > 1: # Appears in multiple agents
|
|
1153
|
+
cross_refs["common_patterns"].append({
|
|
1154
|
+
"pattern": line[:100] + "..." if len(line) > 100 else line,
|
|
1155
|
+
"agents": list(set(agents)),
|
|
1156
|
+
"count": len(agents)
|
|
1157
|
+
})
|
|
1158
|
+
|
|
1159
|
+
# Query-specific matches
|
|
1160
|
+
if query:
|
|
1161
|
+
query_lower = query.lower()
|
|
1162
|
+
for agent_id, content in agent_memories.items():
|
|
1163
|
+
matches = []
|
|
1164
|
+
for line in content.splitlines():
|
|
1165
|
+
if query_lower in line.lower():
|
|
1166
|
+
matches.append(line.strip())
|
|
1167
|
+
|
|
1168
|
+
if matches:
|
|
1169
|
+
cross_refs["query_matches"].append({
|
|
1170
|
+
"agent": agent_id,
|
|
1171
|
+
"matches": matches[:5] # Limit to first 5 matches
|
|
1172
|
+
})
|
|
1173
|
+
|
|
1174
|
+
# Calculate agent correlations (agents with similar knowledge domains)
|
|
1175
|
+
for agent_a in agent_memories:
|
|
1176
|
+
for agent_b in agent_memories:
|
|
1177
|
+
if agent_a < agent_b: # Avoid duplicates
|
|
1178
|
+
common_count = len([
|
|
1179
|
+
line for line in line_counts.values()
|
|
1180
|
+
if agent_a in line and agent_b in line
|
|
1181
|
+
])
|
|
1182
|
+
|
|
1183
|
+
if common_count > 0:
|
|
1184
|
+
correlation_key = f"{agent_a}+{agent_b}"
|
|
1185
|
+
cross_refs["agent_correlations"][correlation_key] = common_count
|
|
1186
|
+
|
|
1187
|
+
return cross_refs
|
|
1188
|
+
|
|
1189
|
+
except Exception as e:
|
|
1190
|
+
self.logger.error(f"Error cross-referencing memories: {e}")
|
|
1191
|
+
return {"success": False, "error": str(e)}
|
|
1192
|
+
|
|
1193
|
+
def get_all_memories_raw(self) -> Dict[str, Any]:
|
|
1194
|
+
"""Get all agent memories in structured JSON format.
|
|
1195
|
+
|
|
1196
|
+
WHY: This provides programmatic access to all agent memories, allowing
|
|
1197
|
+
external tools, scripts, or APIs to retrieve and process the complete
|
|
1198
|
+
memory state of the system.
|
|
1199
|
+
|
|
1200
|
+
DESIGN DECISION: Returns structured data with metadata for each agent
|
|
1201
|
+
including file stats, sections, and parsed content. This enables both
|
|
1202
|
+
content access and system analysis.
|
|
1203
|
+
|
|
1204
|
+
Returns:
|
|
1205
|
+
Dict containing structured memory data for all agents
|
|
1206
|
+
"""
|
|
1207
|
+
try:
|
|
1208
|
+
result = {
|
|
1209
|
+
"success": True,
|
|
1210
|
+
"timestamp": datetime.now().isoformat(),
|
|
1211
|
+
"total_agents": 0,
|
|
1212
|
+
"total_size_bytes": 0,
|
|
1213
|
+
"agents": {}
|
|
1214
|
+
}
|
|
1215
|
+
|
|
1216
|
+
# Ensure directory exists
|
|
1217
|
+
if not self.memories_dir.exists():
|
|
1218
|
+
return {
|
|
1219
|
+
"success": True,
|
|
1220
|
+
"timestamp": datetime.now().isoformat(),
|
|
1221
|
+
"total_agents": 0,
|
|
1222
|
+
"total_size_bytes": 0,
|
|
1223
|
+
"agents": {},
|
|
1224
|
+
"message": "No memory directory found"
|
|
1225
|
+
}
|
|
1226
|
+
|
|
1227
|
+
# Find all agent memory files
|
|
1228
|
+
memory_files = list(self.memories_dir.glob("*_agent.md"))
|
|
1229
|
+
result["total_agents"] = len(memory_files)
|
|
1230
|
+
|
|
1231
|
+
# Process each agent memory file
|
|
1232
|
+
for file_path in sorted(memory_files):
|
|
1233
|
+
agent_id = file_path.stem.replace('_agent', '')
|
|
1234
|
+
|
|
1235
|
+
try:
|
|
1236
|
+
# Get file stats
|
|
1237
|
+
stat = file_path.stat()
|
|
1238
|
+
file_size = stat.st_size
|
|
1239
|
+
result["total_size_bytes"] += file_size
|
|
1240
|
+
|
|
1241
|
+
# Load and parse memory content
|
|
1242
|
+
memory_content = self.load_agent_memory(agent_id)
|
|
1243
|
+
|
|
1244
|
+
if memory_content:
|
|
1245
|
+
sections = self._parse_memory_content_to_dict(memory_content)
|
|
1246
|
+
|
|
1247
|
+
# Count total items across all sections
|
|
1248
|
+
total_items = sum(len(items) for items in sections.values())
|
|
1249
|
+
|
|
1250
|
+
result["agents"][agent_id] = {
|
|
1251
|
+
"agent_id": agent_id,
|
|
1252
|
+
"file_path": str(file_path),
|
|
1253
|
+
"file_size_bytes": file_size,
|
|
1254
|
+
"file_size_kb": round(file_size / 1024, 2),
|
|
1255
|
+
"last_modified": datetime.fromtimestamp(stat.st_mtime).isoformat(),
|
|
1256
|
+
"sections_count": len(sections),
|
|
1257
|
+
"total_items": total_items,
|
|
1258
|
+
"auto_learning": self._get_agent_auto_learning(agent_id),
|
|
1259
|
+
"size_limits": self._get_agent_limits(agent_id),
|
|
1260
|
+
"sections": sections,
|
|
1261
|
+
"raw_content": memory_content
|
|
1262
|
+
}
|
|
1263
|
+
else:
|
|
1264
|
+
result["agents"][agent_id] = {
|
|
1265
|
+
"agent_id": agent_id,
|
|
1266
|
+
"file_path": str(file_path),
|
|
1267
|
+
"file_size_bytes": file_size,
|
|
1268
|
+
"file_size_kb": round(file_size / 1024, 2),
|
|
1269
|
+
"last_modified": datetime.fromtimestamp(stat.st_mtime).isoformat(),
|
|
1270
|
+
"error": "Could not load memory content",
|
|
1271
|
+
"sections": {},
|
|
1272
|
+
"raw_content": ""
|
|
1273
|
+
}
|
|
1274
|
+
|
|
1275
|
+
except Exception as e:
|
|
1276
|
+
self.logger.error(f"Error processing memory for agent {agent_id}: {e}")
|
|
1277
|
+
result["agents"][agent_id] = {
|
|
1278
|
+
"agent_id": agent_id,
|
|
1279
|
+
"file_path": str(file_path),
|
|
1280
|
+
"error": str(e),
|
|
1281
|
+
"sections": {},
|
|
1282
|
+
"raw_content": ""
|
|
1283
|
+
}
|
|
1284
|
+
|
|
1285
|
+
result["total_size_kb"] = round(result["total_size_bytes"] / 1024, 2)
|
|
1286
|
+
return result
|
|
1287
|
+
|
|
1288
|
+
except Exception as e:
|
|
1289
|
+
self.logger.error(f"Error getting all memories raw: {e}")
|
|
1290
|
+
return {
|
|
1291
|
+
"success": False,
|
|
1292
|
+
"error": str(e),
|
|
1293
|
+
"timestamp": datetime.now().isoformat()
|
|
1294
|
+
}
|
|
1295
|
+
|
|
1296
|
+
def _parse_memory_content_to_dict(self, content: str) -> Dict[str, List[str]]:
|
|
1297
|
+
"""Parse memory content into structured dictionary format.
|
|
1298
|
+
|
|
1299
|
+
WHY: Provides consistent parsing of memory content into sections and items
|
|
1300
|
+
for both display and programmatic access. This ensures the same parsing
|
|
1301
|
+
logic is used across the system.
|
|
1302
|
+
|
|
1303
|
+
Args:
|
|
1304
|
+
content: Raw memory file content
|
|
1305
|
+
|
|
1306
|
+
Returns:
|
|
1307
|
+
Dict mapping section names to lists of items
|
|
1308
|
+
"""
|
|
1309
|
+
sections = {}
|
|
1310
|
+
current_section = None
|
|
1311
|
+
current_items = []
|
|
1312
|
+
|
|
1313
|
+
for line in content.split('\n'):
|
|
1314
|
+
line = line.strip()
|
|
1315
|
+
|
|
1316
|
+
# Skip empty lines and header information
|
|
1317
|
+
if not line or line.startswith('#') and 'Memory Usage' in line:
|
|
1318
|
+
continue
|
|
1319
|
+
|
|
1320
|
+
if line.startswith('## ') and not line.startswith('## Memory Usage'):
|
|
1321
|
+
# New section found
|
|
1322
|
+
if current_section and current_items:
|
|
1323
|
+
sections[current_section] = current_items.copy()
|
|
1324
|
+
|
|
1325
|
+
current_section = line[3:].strip()
|
|
1326
|
+
current_items = []
|
|
1327
|
+
|
|
1328
|
+
elif line.startswith('- ') and current_section:
|
|
1329
|
+
# Item in current section
|
|
1330
|
+
item = line[2:].strip()
|
|
1331
|
+
if item and len(item) > 3: # Filter out very short items
|
|
1332
|
+
current_items.append(item)
|
|
1333
|
+
|
|
1334
|
+
# Add final section
|
|
1335
|
+
if current_section and current_items:
|
|
1336
|
+
sections[current_section] = current_items
|
|
1337
|
+
|
|
1338
|
+
return sections
|
|
1339
|
+
|
|
612
1340
|
def _ensure_memories_directory(self):
|
|
613
1341
|
"""Ensure memories directory exists with README.
|
|
614
1342
|
|
|
@@ -667,7 +1395,7 @@ Standard markdown with structured sections. Agents expect:
|
|
|
667
1395
|
|
|
668
1396
|
|
|
669
1397
|
# Convenience functions for external use
|
|
670
|
-
def get_memory_manager(config: Optional[Config] = None) -> AgentMemoryManager:
|
|
1398
|
+
def get_memory_manager(config: Optional[Config] = None, working_directory: Optional[Path] = None) -> AgentMemoryManager:
|
|
671
1399
|
"""Get a singleton instance of the memory manager.
|
|
672
1400
|
|
|
673
1401
|
WHY: The memory manager should be shared across the application to ensure
|
|
@@ -675,10 +1403,11 @@ def get_memory_manager(config: Optional[Config] = None) -> AgentMemoryManager:
|
|
|
675
1403
|
|
|
676
1404
|
Args:
|
|
677
1405
|
config: Optional Config object. Only used on first instantiation.
|
|
1406
|
+
working_directory: Optional working directory. Only used on first instantiation.
|
|
678
1407
|
|
|
679
1408
|
Returns:
|
|
680
1409
|
AgentMemoryManager: The memory manager instance
|
|
681
1410
|
"""
|
|
682
1411
|
if not hasattr(get_memory_manager, '_instance'):
|
|
683
|
-
get_memory_manager._instance = AgentMemoryManager(config)
|
|
1412
|
+
get_memory_manager._instance = AgentMemoryManager(config, working_directory)
|
|
684
1413
|
return get_memory_manager._instance
|