claude-mpm 3.3.2__py3-none-any.whl → 3.4.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- claude_mpm/cli/commands/memory.py +192 -14
- claude_mpm/cli/parser.py +13 -1
- claude_mpm/constants.py +1 -0
- claude_mpm/core/claude_runner.py +61 -0
- claude_mpm/core/config.py +161 -1
- claude_mpm/core/simple_runner.py +61 -0
- claude_mpm/hooks/builtin/mpm_command_hook.py +5 -5
- claude_mpm/hooks/claude_hooks/hook_handler.py +211 -4
- claude_mpm/hooks/claude_hooks/hook_wrapper.sh +10 -3
- claude_mpm/hooks/memory_integration_hook.py +51 -5
- claude_mpm/scripts/socketio_daemon.py +49 -9
- claude_mpm/scripts/socketio_server_manager.py +370 -45
- claude_mpm/services/__init__.py +41 -5
- claude_mpm/services/agent_memory_manager.py +541 -51
- claude_mpm/services/exceptions.py +677 -0
- claude_mpm/services/health_monitor.py +892 -0
- claude_mpm/services/memory_builder.py +341 -7
- claude_mpm/services/memory_optimizer.py +6 -2
- claude_mpm/services/project_analyzer.py +771 -0
- claude_mpm/services/recovery_manager.py +670 -0
- claude_mpm/services/socketio_server.py +653 -36
- claude_mpm/services/standalone_socketio_server.py +703 -34
- claude_mpm/services/version_control/git_operations.py +26 -0
- {claude_mpm-3.3.2.dist-info → claude_mpm-3.4.2.dist-info}/METADATA +34 -10
- {claude_mpm-3.3.2.dist-info → claude_mpm-3.4.2.dist-info}/RECORD +30 -44
- claude_mpm/agents/agent-template.yaml +0 -83
- claude_mpm/agents/test_fix_deployment/.claude-pm/config/project.json +0 -6
- claude_mpm/cli/README.md +0 -109
- claude_mpm/cli_module/refactoring_guide.md +0 -253
- claude_mpm/core/agent_registry.py.bak +0 -312
- claude_mpm/core/base_service.py.bak +0 -406
- claude_mpm/hooks/README.md +0 -97
- claude_mpm/orchestration/SUBPROCESS_DESIGN.md +0 -66
- claude_mpm/schemas/README_SECURITY.md +0 -92
- claude_mpm/schemas/agent_schema.json +0 -395
- claude_mpm/schemas/agent_schema_documentation.md +0 -181
- claude_mpm/schemas/agent_schema_security_notes.md +0 -165
- claude_mpm/schemas/examples/standard_workflow.json +0 -505
- claude_mpm/schemas/ticket_workflow_documentation.md +0 -482
- claude_mpm/schemas/ticket_workflow_schema.json +0 -590
- claude_mpm/services/framework_claude_md_generator/README.md +0 -92
- claude_mpm/services/parent_directory_manager/README.md +0 -83
- claude_mpm/services/version_control/VERSION +0 -1
- /claude_mpm/{web → dashboard}/open_dashboard.py +0 -0
- {claude_mpm-3.3.2.dist-info → claude_mpm-3.4.2.dist-info}/WHEEL +0 -0
- {claude_mpm-3.3.2.dist-info → claude_mpm-3.4.2.dist-info}/entry_points.txt +0 -0
- {claude_mpm-3.3.2.dist-info → claude_mpm-3.4.2.dist-info}/licenses/LICENSE +0 -0
- {claude_mpm-3.3.2.dist-info → claude_mpm-3.4.2.dist-info}/top_level.txt +0 -0
|
@@ -23,14 +23,16 @@ from typing import Dict, List, Optional, Any
|
|
|
23
23
|
from datetime import datetime
|
|
24
24
|
import re
|
|
25
25
|
import logging
|
|
26
|
+
import os
|
|
26
27
|
|
|
27
|
-
from claude_mpm.core import LoggerMixin
|
|
28
28
|
from claude_mpm.core.config import Config
|
|
29
|
+
from claude_mpm.core.mixins import LoggerMixin
|
|
29
30
|
from claude_mpm.utils.paths import PathResolver
|
|
31
|
+
from claude_mpm.services.project_analyzer import ProjectAnalyzer
|
|
30
32
|
# Socket.IO notifications are optional - we'll skip them if server is not available
|
|
31
33
|
|
|
32
34
|
|
|
33
|
-
class AgentMemoryManager
|
|
35
|
+
class AgentMemoryManager:
|
|
34
36
|
"""Manages agent memory files with size limits and validation.
|
|
35
37
|
|
|
36
38
|
WHY: Agents need to accumulate project-specific knowledge over time to become
|
|
@@ -60,22 +62,50 @@ class AgentMemoryManager(LoggerMixin):
|
|
|
60
62
|
'Current Technical Context'
|
|
61
63
|
]
|
|
62
64
|
|
|
63
|
-
def __init__(self, config: Optional[Config] = None):
|
|
65
|
+
def __init__(self, config: Optional[Config] = None, working_directory: Optional[Path] = None):
|
|
64
66
|
"""Initialize the memory manager.
|
|
65
67
|
|
|
66
68
|
Sets up the memories directory and ensures it exists with proper README.
|
|
67
69
|
|
|
68
70
|
Args:
|
|
69
71
|
config: Optional Config object. If not provided, will create default Config.
|
|
72
|
+
working_directory: Optional working directory. If not provided, uses current working directory.
|
|
70
73
|
"""
|
|
71
|
-
|
|
74
|
+
# Initialize logger using the same pattern as LoggerMixin
|
|
75
|
+
self._logger_instance = None
|
|
76
|
+
self._logger_name = None
|
|
77
|
+
|
|
72
78
|
self.config = config or Config()
|
|
73
79
|
self.project_root = PathResolver.get_project_root()
|
|
74
|
-
|
|
80
|
+
# Use current working directory by default, not project root
|
|
81
|
+
self.working_directory = working_directory or Path(os.getcwd())
|
|
82
|
+
self.memories_dir = self.working_directory / ".claude-mpm" / "memories"
|
|
75
83
|
self._ensure_memories_directory()
|
|
76
84
|
|
|
77
85
|
# Initialize memory limits from configuration
|
|
78
86
|
self._init_memory_limits()
|
|
87
|
+
|
|
88
|
+
# Initialize project analyzer for context-aware memory creation
|
|
89
|
+
self.project_analyzer = ProjectAnalyzer(self.config, self.working_directory)
|
|
90
|
+
|
|
91
|
+
@property
|
|
92
|
+
def logger(self):
|
|
93
|
+
"""Get or create the logger instance (like LoggerMixin)."""
|
|
94
|
+
if self._logger_instance is None:
|
|
95
|
+
if self._logger_name:
|
|
96
|
+
logger_name = self._logger_name
|
|
97
|
+
else:
|
|
98
|
+
module = self.__class__.__module__
|
|
99
|
+
class_name = self.__class__.__name__
|
|
100
|
+
|
|
101
|
+
if module and module != "__main__":
|
|
102
|
+
logger_name = f"{module}.{class_name}"
|
|
103
|
+
else:
|
|
104
|
+
logger_name = class_name
|
|
105
|
+
|
|
106
|
+
self._logger_instance = logging.getLogger(logger_name)
|
|
107
|
+
|
|
108
|
+
return self._logger_instance
|
|
79
109
|
|
|
80
110
|
def _init_memory_limits(self):
|
|
81
111
|
"""Initialize memory limits from configuration.
|
|
@@ -85,7 +115,7 @@ class AgentMemoryManager(LoggerMixin):
|
|
|
85
115
|
"""
|
|
86
116
|
# Check if memory system is enabled
|
|
87
117
|
self.memory_enabled = self.config.get('memory.enabled', True)
|
|
88
|
-
self.auto_learning = self.config.get('memory.auto_learning',
|
|
118
|
+
self.auto_learning = self.config.get('memory.auto_learning', True) # Changed default to True
|
|
89
119
|
|
|
90
120
|
# Load default limits from configuration
|
|
91
121
|
config_limits = self.config.get('memory.limits', {})
|
|
@@ -238,68 +268,80 @@ class AgentMemoryManager(LoggerMixin):
|
|
|
238
268
|
return success
|
|
239
269
|
|
|
240
270
|
def _create_default_memory(self, agent_id: str) -> str:
|
|
241
|
-
"""Create default memory file for agent.
|
|
271
|
+
"""Create project-specific default memory file for agent.
|
|
242
272
|
|
|
243
|
-
WHY:
|
|
244
|
-
|
|
273
|
+
WHY: Instead of generic templates, agents need project-specific knowledge
|
|
274
|
+
from the start. This analyzes the current project and creates contextual
|
|
275
|
+
memories with actual project characteristics.
|
|
245
276
|
|
|
246
277
|
Args:
|
|
247
278
|
agent_id: The agent identifier
|
|
248
279
|
|
|
249
280
|
Returns:
|
|
250
|
-
str: The
|
|
281
|
+
str: The project-specific memory template content
|
|
251
282
|
"""
|
|
252
283
|
# Convert agent_id to proper name, handling cases like "test_agent" -> "Test"
|
|
253
284
|
agent_name = agent_id.replace('_agent', '').replace('_', ' ').title()
|
|
254
|
-
project_name = self.project_root.name
|
|
255
285
|
timestamp = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
|
|
256
286
|
|
|
257
287
|
# Get limits for this agent
|
|
258
288
|
limits = self._get_agent_limits(agent_id)
|
|
259
289
|
|
|
260
|
-
|
|
290
|
+
# Analyze the project for context-specific content
|
|
291
|
+
try:
|
|
292
|
+
project_characteristics = self.project_analyzer.analyze_project()
|
|
293
|
+
project_context = self.project_analyzer.get_project_context_summary()
|
|
294
|
+
important_files = self.project_analyzer.get_important_files_for_context()
|
|
295
|
+
|
|
296
|
+
self.logger.info(f"Creating project-specific memory for {agent_id} using analyzed project context")
|
|
297
|
+
except Exception as e:
|
|
298
|
+
self.logger.warning(f"Error analyzing project for {agent_id}, falling back to basic template: {e}")
|
|
299
|
+
return self._create_basic_memory_template(agent_id)
|
|
300
|
+
|
|
301
|
+
# Create project-specific sections
|
|
302
|
+
architecture_items = self._generate_architecture_section(project_characteristics)
|
|
303
|
+
coding_patterns = self._generate_coding_patterns_section(project_characteristics)
|
|
304
|
+
implementation_guidelines = self._generate_implementation_guidelines(project_characteristics)
|
|
305
|
+
tech_context = self._generate_technical_context(project_characteristics)
|
|
306
|
+
integration_points = self._generate_integration_points(project_characteristics)
|
|
307
|
+
|
|
308
|
+
template = f"""# {agent_name} Agent Memory - {project_characteristics.project_name}
|
|
261
309
|
|
|
262
310
|
<!-- MEMORY LIMITS: {limits['max_file_size_kb']}KB max | {limits['max_sections']} sections max | {limits['max_items_per_section']} items per section -->
|
|
263
311
|
<!-- Last Updated: {timestamp} | Auto-updated by: {agent_id} -->
|
|
264
312
|
|
|
265
|
-
## Project
|
|
266
|
-
|
|
267
|
-
- Three-tier agent hierarchy: project → user → system
|
|
268
|
-
- Agent definitions use standardized JSON schema validation
|
|
313
|
+
## Project Context
|
|
314
|
+
{project_context}
|
|
269
315
|
|
|
270
|
-
##
|
|
271
|
-
|
|
272
|
-
- SubprocessRunner utility for external command execution
|
|
273
|
-
- LoggerMixin provides consistent logging across all services
|
|
316
|
+
## Project Architecture
|
|
317
|
+
{self._format_section_items(architecture_items)}
|
|
274
318
|
|
|
275
|
-
##
|
|
276
|
-
|
|
277
|
-
- Follow existing import patterns: from claude_mpm.module import Class
|
|
278
|
-
- Use existing utilities instead of reimplementing functionality
|
|
319
|
+
## Coding Patterns Learned
|
|
320
|
+
{self._format_section_items(coding_patterns)}
|
|
279
321
|
|
|
280
|
-
##
|
|
281
|
-
|
|
322
|
+
## Implementation Guidelines
|
|
323
|
+
{self._format_section_items(implementation_guidelines)}
|
|
282
324
|
|
|
283
|
-
##
|
|
325
|
+
## Domain-Specific Knowledge
|
|
326
|
+
<!-- Agent-specific knowledge for {project_characteristics.project_name} domain -->
|
|
327
|
+
{self._generate_domain_knowledge_starters(project_characteristics, agent_id)}
|
|
328
|
+
|
|
329
|
+
## Effective Strategies
|
|
284
330
|
<!-- Successful approaches discovered through experience -->
|
|
285
331
|
|
|
286
|
-
## Common Mistakes to Avoid
|
|
287
|
-
|
|
288
|
-
- Avoid duplicating code - check utils/ for existing implementations
|
|
289
|
-
- Never hardcode file paths, use PathResolver utilities
|
|
332
|
+
## Common Mistakes to Avoid
|
|
333
|
+
{self._format_section_items(self._generate_common_mistakes(project_characteristics))}
|
|
290
334
|
|
|
291
|
-
## Integration Points
|
|
292
|
-
|
|
335
|
+
## Integration Points
|
|
336
|
+
{self._format_section_items(integration_points)}
|
|
293
337
|
|
|
294
|
-
## Performance Considerations
|
|
295
|
-
|
|
338
|
+
## Performance Considerations
|
|
339
|
+
{self._format_section_items(self._generate_performance_considerations(project_characteristics))}
|
|
296
340
|
|
|
297
|
-
## Current Technical Context
|
|
298
|
-
|
|
299
|
-
- Target: 80% test coverage (current: 23.6%)
|
|
300
|
-
- Integration with Claude Code 1.0.60+ native agent framework
|
|
341
|
+
## Current Technical Context
|
|
342
|
+
{self._format_section_items(tech_context)}
|
|
301
343
|
|
|
302
|
-
## Recent Learnings
|
|
344
|
+
## Recent Learnings
|
|
303
345
|
<!-- Most recent discoveries and insights -->
|
|
304
346
|
"""
|
|
305
347
|
|
|
@@ -307,14 +349,313 @@ class AgentMemoryManager(LoggerMixin):
|
|
|
307
349
|
try:
|
|
308
350
|
memory_file = self.memories_dir / f"{agent_id}_agent.md"
|
|
309
351
|
memory_file.write_text(template, encoding='utf-8')
|
|
310
|
-
self.logger.info(f"Created
|
|
352
|
+
self.logger.info(f"Created project-specific memory file for {agent_id}")
|
|
311
353
|
|
|
312
|
-
# Socket.IO notifications removed - memory manager works independently
|
|
313
354
|
except Exception as e:
|
|
314
355
|
self.logger.error(f"Error saving default memory for {agent_id}: {e}")
|
|
315
356
|
|
|
316
357
|
return template
|
|
317
358
|
|
|
359
|
+
def _create_basic_memory_template(self, agent_id: str) -> str:
|
|
360
|
+
"""Create basic memory template when project analysis fails.
|
|
361
|
+
|
|
362
|
+
WHY: Fallback template ensures agents always get some memory structure
|
|
363
|
+
even if project analysis encounters errors.
|
|
364
|
+
|
|
365
|
+
Args:
|
|
366
|
+
agent_id: The agent identifier
|
|
367
|
+
|
|
368
|
+
Returns:
|
|
369
|
+
str: Basic memory template
|
|
370
|
+
"""
|
|
371
|
+
agent_name = agent_id.replace('_agent', '').replace('_', ' ').title()
|
|
372
|
+
project_name = self.project_root.name
|
|
373
|
+
timestamp = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
|
|
374
|
+
limits = self._get_agent_limits(agent_id)
|
|
375
|
+
|
|
376
|
+
return f"""# {agent_name} Agent Memory - {project_name}
|
|
377
|
+
|
|
378
|
+
<!-- MEMORY LIMITS: {limits['max_file_size_kb']}KB max | {limits['max_sections']} sections max | {limits['max_items_per_section']} items per section -->
|
|
379
|
+
<!-- Last Updated: {timestamp} | Auto-updated by: {agent_id} -->
|
|
380
|
+
|
|
381
|
+
## Project Context
|
|
382
|
+
{project_name}: Software project requiring analysis
|
|
383
|
+
|
|
384
|
+
## Project Architecture
|
|
385
|
+
- Analyze project structure to understand architecture patterns
|
|
386
|
+
|
|
387
|
+
## Coding Patterns Learned
|
|
388
|
+
- Observe codebase patterns and conventions during tasks
|
|
389
|
+
|
|
390
|
+
## Implementation Guidelines
|
|
391
|
+
- Extract implementation guidelines from project documentation
|
|
392
|
+
|
|
393
|
+
## Domain-Specific Knowledge
|
|
394
|
+
<!-- Agent-specific knowledge accumulates here -->
|
|
395
|
+
|
|
396
|
+
## Effective Strategies
|
|
397
|
+
<!-- Successful approaches discovered through experience -->
|
|
398
|
+
|
|
399
|
+
## Common Mistakes to Avoid
|
|
400
|
+
- Learn from errors encountered during project work
|
|
401
|
+
|
|
402
|
+
## Integration Points
|
|
403
|
+
<!-- Key interfaces and integration patterns -->
|
|
404
|
+
|
|
405
|
+
## Performance Considerations
|
|
406
|
+
<!-- Performance insights and optimization patterns -->
|
|
407
|
+
|
|
408
|
+
## Current Technical Context
|
|
409
|
+
- Project analysis pending - gather context during tasks
|
|
410
|
+
|
|
411
|
+
## Recent Learnings
|
|
412
|
+
<!-- Most recent discoveries and insights -->
|
|
413
|
+
"""
|
|
414
|
+
|
|
415
|
+
def _generate_architecture_section(self, characteristics) -> List[str]:
|
|
416
|
+
"""Generate architecture section items based on project analysis."""
|
|
417
|
+
items = []
|
|
418
|
+
|
|
419
|
+
# Architecture type
|
|
420
|
+
items.append(f"{characteristics.architecture_type} with {characteristics.primary_language or 'mixed'} implementation")
|
|
421
|
+
|
|
422
|
+
# Key directories structure
|
|
423
|
+
if characteristics.key_directories:
|
|
424
|
+
key_dirs = ", ".join(characteristics.key_directories[:5])
|
|
425
|
+
items.append(f"Main directories: {key_dirs}")
|
|
426
|
+
|
|
427
|
+
# Main modules
|
|
428
|
+
if characteristics.main_modules:
|
|
429
|
+
modules = ", ".join(characteristics.main_modules[:4])
|
|
430
|
+
items.append(f"Core modules: {modules}")
|
|
431
|
+
|
|
432
|
+
# Entry points
|
|
433
|
+
if characteristics.entry_points:
|
|
434
|
+
entries = ", ".join(characteristics.entry_points[:3])
|
|
435
|
+
items.append(f"Entry points: {entries}")
|
|
436
|
+
|
|
437
|
+
# Frameworks affecting architecture
|
|
438
|
+
if characteristics.web_frameworks:
|
|
439
|
+
frameworks = ", ".join(characteristics.web_frameworks[:3])
|
|
440
|
+
items.append(f"Web framework stack: {frameworks}")
|
|
441
|
+
|
|
442
|
+
return items[:8] # Limit to prevent overwhelming
|
|
443
|
+
|
|
444
|
+
def _generate_coding_patterns_section(self, characteristics) -> List[str]:
|
|
445
|
+
"""Generate coding patterns section based on project analysis."""
|
|
446
|
+
items = []
|
|
447
|
+
|
|
448
|
+
# Language-specific patterns
|
|
449
|
+
if characteristics.primary_language == 'python':
|
|
450
|
+
items.append("Python project: use type hints, follow PEP 8 conventions")
|
|
451
|
+
if 'django' in [fw.lower() for fw in characteristics.web_frameworks]:
|
|
452
|
+
items.append("Django patterns: models, views, templates separation")
|
|
453
|
+
elif 'flask' in [fw.lower() for fw in characteristics.web_frameworks]:
|
|
454
|
+
items.append("Flask patterns: blueprint organization, app factory pattern")
|
|
455
|
+
elif characteristics.primary_language == 'node_js':
|
|
456
|
+
items.append("Node.js project: use async/await, ES6+ features")
|
|
457
|
+
if 'express' in [fw.lower() for fw in characteristics.web_frameworks]:
|
|
458
|
+
items.append("Express patterns: middleware usage, route organization")
|
|
459
|
+
|
|
460
|
+
# Framework-specific patterns
|
|
461
|
+
for framework in characteristics.frameworks[:3]:
|
|
462
|
+
if 'react' in framework.lower():
|
|
463
|
+
items.append("React patterns: component composition, hooks usage")
|
|
464
|
+
elif 'vue' in framework.lower():
|
|
465
|
+
items.append("Vue patterns: single file components, composition API")
|
|
466
|
+
|
|
467
|
+
# Code conventions found
|
|
468
|
+
for convention in characteristics.code_conventions[:3]:
|
|
469
|
+
items.append(f"Project uses: {convention}")
|
|
470
|
+
|
|
471
|
+
return items[:8]
|
|
472
|
+
|
|
473
|
+
def _generate_implementation_guidelines(self, characteristics) -> List[str]:
|
|
474
|
+
"""Generate implementation guidelines based on project analysis."""
|
|
475
|
+
items = []
|
|
476
|
+
|
|
477
|
+
# Package manager guidance
|
|
478
|
+
if characteristics.package_manager:
|
|
479
|
+
items.append(f"Use {characteristics.package_manager} for dependency management")
|
|
480
|
+
|
|
481
|
+
# Testing guidelines
|
|
482
|
+
if characteristics.testing_framework:
|
|
483
|
+
items.append(f"Write tests using {characteristics.testing_framework}")
|
|
484
|
+
|
|
485
|
+
# Test patterns
|
|
486
|
+
for pattern in characteristics.test_patterns[:2]:
|
|
487
|
+
items.append(f"Follow {pattern.lower()}")
|
|
488
|
+
|
|
489
|
+
# Build tools
|
|
490
|
+
if characteristics.build_tools:
|
|
491
|
+
tools = ", ".join(characteristics.build_tools[:2])
|
|
492
|
+
items.append(f"Use build tools: {tools}")
|
|
493
|
+
|
|
494
|
+
# Configuration patterns
|
|
495
|
+
for config_pattern in characteristics.configuration_patterns[:2]:
|
|
496
|
+
items.append(f"Configuration: {config_pattern}")
|
|
497
|
+
|
|
498
|
+
# Important files to reference
|
|
499
|
+
important_configs = characteristics.important_configs[:3]
|
|
500
|
+
if important_configs:
|
|
501
|
+
configs = ", ".join(important_configs)
|
|
502
|
+
items.append(f"Key config files: {configs}")
|
|
503
|
+
|
|
504
|
+
return items[:8]
|
|
505
|
+
|
|
506
|
+
def _generate_technical_context(self, characteristics) -> List[str]:
|
|
507
|
+
"""Generate current technical context based on project analysis."""
|
|
508
|
+
items = []
|
|
509
|
+
|
|
510
|
+
# Technology stack summary
|
|
511
|
+
tech_stack = []
|
|
512
|
+
if characteristics.primary_language:
|
|
513
|
+
tech_stack.append(characteristics.primary_language)
|
|
514
|
+
tech_stack.extend(characteristics.frameworks[:2])
|
|
515
|
+
if tech_stack:
|
|
516
|
+
items.append(f"Tech stack: {', '.join(tech_stack)}")
|
|
517
|
+
|
|
518
|
+
# Databases in use
|
|
519
|
+
if characteristics.databases:
|
|
520
|
+
dbs = ", ".join(characteristics.databases[:3])
|
|
521
|
+
items.append(f"Data storage: {dbs}")
|
|
522
|
+
|
|
523
|
+
# API patterns
|
|
524
|
+
if characteristics.api_patterns:
|
|
525
|
+
apis = ", ".join(characteristics.api_patterns[:2])
|
|
526
|
+
items.append(f"API patterns: {apis}")
|
|
527
|
+
|
|
528
|
+
# Key dependencies
|
|
529
|
+
if characteristics.key_dependencies:
|
|
530
|
+
deps = ", ".join(characteristics.key_dependencies[:4])
|
|
531
|
+
items.append(f"Key dependencies: {deps}")
|
|
532
|
+
|
|
533
|
+
# Documentation available
|
|
534
|
+
if characteristics.documentation_files:
|
|
535
|
+
docs = ", ".join(characteristics.documentation_files[:3])
|
|
536
|
+
items.append(f"Documentation: {docs}")
|
|
537
|
+
|
|
538
|
+
return items[:8]
|
|
539
|
+
|
|
540
|
+
def _generate_integration_points(self, characteristics) -> List[str]:
|
|
541
|
+
"""Generate integration points based on project analysis."""
|
|
542
|
+
items = []
|
|
543
|
+
|
|
544
|
+
# Database integrations
|
|
545
|
+
for db in characteristics.databases[:3]:
|
|
546
|
+
items.append(f"{db.title()} database integration")
|
|
547
|
+
|
|
548
|
+
# Web framework integrations
|
|
549
|
+
for framework in characteristics.web_frameworks[:2]:
|
|
550
|
+
items.append(f"{framework} web framework integration")
|
|
551
|
+
|
|
552
|
+
# API integrations
|
|
553
|
+
for api_pattern in characteristics.api_patterns[:2]:
|
|
554
|
+
items.append(f"{api_pattern} integration pattern")
|
|
555
|
+
|
|
556
|
+
# Common integration patterns based on dependencies
|
|
557
|
+
integration_deps = [dep for dep in characteristics.key_dependencies
|
|
558
|
+
if any(keyword in dep.lower() for keyword in ['redis', 'rabbit', 'celery', 'kafka', 'docker'])]
|
|
559
|
+
for dep in integration_deps[:3]:
|
|
560
|
+
items.append(f"{dep} integration")
|
|
561
|
+
|
|
562
|
+
return items[:6]
|
|
563
|
+
|
|
564
|
+
def _generate_common_mistakes(self, characteristics) -> List[str]:
|
|
565
|
+
"""Generate common mistakes based on project type and stack."""
|
|
566
|
+
items = []
|
|
567
|
+
|
|
568
|
+
# Language-specific mistakes
|
|
569
|
+
if characteristics.primary_language == 'python':
|
|
570
|
+
items.append("Avoid circular imports - use late imports when needed")
|
|
571
|
+
items.append("Don't ignore virtual environment - always activate before work")
|
|
572
|
+
elif characteristics.primary_language == 'node_js':
|
|
573
|
+
items.append("Avoid callback hell - use async/await consistently")
|
|
574
|
+
items.append("Don't commit node_modules - ensure .gitignore is correct")
|
|
575
|
+
|
|
576
|
+
# Framework-specific mistakes
|
|
577
|
+
if 'django' in [fw.lower() for fw in characteristics.web_frameworks]:
|
|
578
|
+
items.append("Don't skip migrations - always create and apply them")
|
|
579
|
+
elif 'flask' in [fw.lower() for fw in characteristics.web_frameworks]:
|
|
580
|
+
items.append("Avoid app context issues - use proper application factory")
|
|
581
|
+
|
|
582
|
+
# Database-specific mistakes
|
|
583
|
+
if characteristics.databases:
|
|
584
|
+
items.append("Don't ignore database transactions in multi-step operations")
|
|
585
|
+
items.append("Avoid N+1 queries - use proper joins or prefetching")
|
|
586
|
+
|
|
587
|
+
# Testing mistakes
|
|
588
|
+
if characteristics.testing_framework:
|
|
589
|
+
items.append("Don't skip test isolation - ensure tests can run independently")
|
|
590
|
+
|
|
591
|
+
return items[:8]
|
|
592
|
+
|
|
593
|
+
def _generate_performance_considerations(self, characteristics) -> List[str]:
|
|
594
|
+
"""Generate performance considerations based on project stack."""
|
|
595
|
+
items = []
|
|
596
|
+
|
|
597
|
+
# Language-specific performance
|
|
598
|
+
if characteristics.primary_language == 'python':
|
|
599
|
+
items.append("Use list comprehensions over loops where appropriate")
|
|
600
|
+
items.append("Consider caching for expensive operations")
|
|
601
|
+
elif characteristics.primary_language == 'node_js':
|
|
602
|
+
items.append("Leverage event loop - avoid blocking operations")
|
|
603
|
+
items.append("Use streams for large data processing")
|
|
604
|
+
|
|
605
|
+
# Database performance
|
|
606
|
+
if characteristics.databases:
|
|
607
|
+
items.append("Index frequently queried columns")
|
|
608
|
+
items.append("Use connection pooling for database connections")
|
|
609
|
+
|
|
610
|
+
# Web framework performance
|
|
611
|
+
if characteristics.web_frameworks:
|
|
612
|
+
items.append("Implement appropriate caching strategies")
|
|
613
|
+
items.append("Optimize static asset delivery")
|
|
614
|
+
|
|
615
|
+
# Framework-specific performance
|
|
616
|
+
if 'react' in [fw.lower() for fw in characteristics.frameworks]:
|
|
617
|
+
items.append("Use React.memo for expensive component renders")
|
|
618
|
+
|
|
619
|
+
return items[:6]
|
|
620
|
+
|
|
621
|
+
def _generate_domain_knowledge_starters(self, characteristics, agent_id: str) -> str:
|
|
622
|
+
"""Generate domain-specific knowledge starters based on project and agent type."""
|
|
623
|
+
items = []
|
|
624
|
+
|
|
625
|
+
# Project terminology
|
|
626
|
+
if characteristics.project_terminology:
|
|
627
|
+
terms = ", ".join(characteristics.project_terminology[:4])
|
|
628
|
+
items.append(f"- Key project terms: {terms}")
|
|
629
|
+
|
|
630
|
+
# Agent-specific starters
|
|
631
|
+
if 'research' in agent_id.lower():
|
|
632
|
+
items.append("- Focus on code analysis, pattern discovery, and architectural insights")
|
|
633
|
+
if characteristics.documentation_files:
|
|
634
|
+
items.append("- Prioritize documentation analysis for comprehensive understanding")
|
|
635
|
+
elif 'engineer' in agent_id.lower():
|
|
636
|
+
items.append("- Focus on implementation patterns, coding standards, and best practices")
|
|
637
|
+
if characteristics.testing_framework:
|
|
638
|
+
items.append(f"- Ensure test coverage using {characteristics.testing_framework}")
|
|
639
|
+
elif 'pm' in agent_id.lower() or 'manager' in agent_id.lower():
|
|
640
|
+
items.append("- Focus on project coordination, task delegation, and progress tracking")
|
|
641
|
+
items.append("- Monitor integration points and cross-component dependencies")
|
|
642
|
+
|
|
643
|
+
return '\n'.join(items) if items else "<!-- Domain knowledge will accumulate here -->"
|
|
644
|
+
|
|
645
|
+
def _format_section_items(self, items: List[str]) -> str:
|
|
646
|
+
"""Format list of items as markdown bullet points."""
|
|
647
|
+
if not items:
|
|
648
|
+
return "<!-- Items will be added as knowledge accumulates -->"
|
|
649
|
+
|
|
650
|
+
formatted_items = []
|
|
651
|
+
for item in items:
|
|
652
|
+
# Ensure each item starts with a dash and is properly formatted
|
|
653
|
+
if not item.startswith('- '):
|
|
654
|
+
item = f"- {item}"
|
|
655
|
+
formatted_items.append(item)
|
|
656
|
+
|
|
657
|
+
return '\n'.join(formatted_items)
|
|
658
|
+
|
|
318
659
|
def _add_item_to_section(self, content: str, section: str, new_item: str) -> str:
|
|
319
660
|
"""Add item to specified section, respecting limits.
|
|
320
661
|
|
|
@@ -373,9 +714,10 @@ class AgentMemoryManager(LoggerMixin):
|
|
|
373
714
|
):
|
|
374
715
|
insert_point += 1
|
|
375
716
|
|
|
376
|
-
# Ensure line length limit
|
|
377
|
-
|
|
378
|
-
|
|
717
|
+
# Ensure line length limit (account for "- " prefix)
|
|
718
|
+
max_item_length = self.memory_limits['max_line_length'] - 2 # Subtract 2 for "- " prefix
|
|
719
|
+
if len(new_item) > max_item_length:
|
|
720
|
+
new_item = new_item[:max_item_length - 3] + '...'
|
|
379
721
|
|
|
380
722
|
lines.insert(insert_point, f"- {new_item}")
|
|
381
723
|
|
|
@@ -418,7 +760,7 @@ class AgentMemoryManager(LoggerMixin):
|
|
|
418
760
|
# Insert new section
|
|
419
761
|
new_section = [
|
|
420
762
|
'',
|
|
421
|
-
f'## {section}
|
|
763
|
+
f'## {section}',
|
|
422
764
|
f'- {new_item}',
|
|
423
765
|
''
|
|
424
766
|
]
|
|
@@ -558,7 +900,7 @@ class AgentMemoryManager(LoggerMixin):
|
|
|
558
900
|
for section in missing_sections:
|
|
559
901
|
section_content = [
|
|
560
902
|
'',
|
|
561
|
-
f'## {section}
|
|
903
|
+
f'## {section}',
|
|
562
904
|
'<!-- Section added by repair -->',
|
|
563
905
|
''
|
|
564
906
|
]
|
|
@@ -605,7 +947,7 @@ class AgentMemoryManager(LoggerMixin):
|
|
|
605
947
|
"""
|
|
606
948
|
try:
|
|
607
949
|
from claude_mpm.services.memory_optimizer import MemoryOptimizer
|
|
608
|
-
optimizer = MemoryOptimizer(self.config)
|
|
950
|
+
optimizer = MemoryOptimizer(self.config, self.working_directory)
|
|
609
951
|
|
|
610
952
|
if agent_id:
|
|
611
953
|
result = optimizer.optimize_agent_memory(agent_id)
|
|
@@ -633,7 +975,7 @@ class AgentMemoryManager(LoggerMixin):
|
|
|
633
975
|
"""
|
|
634
976
|
try:
|
|
635
977
|
from claude_mpm.services.memory_builder import MemoryBuilder
|
|
636
|
-
builder = MemoryBuilder(self.config)
|
|
978
|
+
builder = MemoryBuilder(self.config, self.working_directory)
|
|
637
979
|
|
|
638
980
|
result = builder.build_from_documentation(force_rebuild)
|
|
639
981
|
self.logger.info("Built memories from documentation")
|
|
@@ -850,6 +1192,153 @@ class AgentMemoryManager(LoggerMixin):
|
|
|
850
1192
|
self.logger.error(f"Error cross-referencing memories: {e}")
|
|
851
1193
|
return {"success": False, "error": str(e)}
|
|
852
1194
|
|
|
1195
|
+
def get_all_memories_raw(self) -> Dict[str, Any]:
|
|
1196
|
+
"""Get all agent memories in structured JSON format.
|
|
1197
|
+
|
|
1198
|
+
WHY: This provides programmatic access to all agent memories, allowing
|
|
1199
|
+
external tools, scripts, or APIs to retrieve and process the complete
|
|
1200
|
+
memory state of the system.
|
|
1201
|
+
|
|
1202
|
+
DESIGN DECISION: Returns structured data with metadata for each agent
|
|
1203
|
+
including file stats, sections, and parsed content. This enables both
|
|
1204
|
+
content access and system analysis.
|
|
1205
|
+
|
|
1206
|
+
Returns:
|
|
1207
|
+
Dict containing structured memory data for all agents
|
|
1208
|
+
"""
|
|
1209
|
+
try:
|
|
1210
|
+
result = {
|
|
1211
|
+
"success": True,
|
|
1212
|
+
"timestamp": datetime.now().isoformat(),
|
|
1213
|
+
"total_agents": 0,
|
|
1214
|
+
"total_size_bytes": 0,
|
|
1215
|
+
"agents": {}
|
|
1216
|
+
}
|
|
1217
|
+
|
|
1218
|
+
# Ensure directory exists
|
|
1219
|
+
if not self.memories_dir.exists():
|
|
1220
|
+
return {
|
|
1221
|
+
"success": True,
|
|
1222
|
+
"timestamp": datetime.now().isoformat(),
|
|
1223
|
+
"total_agents": 0,
|
|
1224
|
+
"total_size_bytes": 0,
|
|
1225
|
+
"agents": {},
|
|
1226
|
+
"message": "No memory directory found"
|
|
1227
|
+
}
|
|
1228
|
+
|
|
1229
|
+
# Find all agent memory files
|
|
1230
|
+
memory_files = list(self.memories_dir.glob("*_agent.md"))
|
|
1231
|
+
result["total_agents"] = len(memory_files)
|
|
1232
|
+
|
|
1233
|
+
# Process each agent memory file
|
|
1234
|
+
for file_path in sorted(memory_files):
|
|
1235
|
+
agent_id = file_path.stem.replace('_agent', '')
|
|
1236
|
+
|
|
1237
|
+
try:
|
|
1238
|
+
# Get file stats
|
|
1239
|
+
stat = file_path.stat()
|
|
1240
|
+
file_size = stat.st_size
|
|
1241
|
+
result["total_size_bytes"] += file_size
|
|
1242
|
+
|
|
1243
|
+
# Load and parse memory content
|
|
1244
|
+
memory_content = self.load_agent_memory(agent_id)
|
|
1245
|
+
|
|
1246
|
+
if memory_content:
|
|
1247
|
+
sections = self._parse_memory_content_to_dict(memory_content)
|
|
1248
|
+
|
|
1249
|
+
# Count total items across all sections
|
|
1250
|
+
total_items = sum(len(items) for items in sections.values())
|
|
1251
|
+
|
|
1252
|
+
result["agents"][agent_id] = {
|
|
1253
|
+
"agent_id": agent_id,
|
|
1254
|
+
"file_path": str(file_path),
|
|
1255
|
+
"file_size_bytes": file_size,
|
|
1256
|
+
"file_size_kb": round(file_size / 1024, 2),
|
|
1257
|
+
"last_modified": datetime.fromtimestamp(stat.st_mtime).isoformat(),
|
|
1258
|
+
"sections_count": len(sections),
|
|
1259
|
+
"total_items": total_items,
|
|
1260
|
+
"auto_learning": self._get_agent_auto_learning(agent_id),
|
|
1261
|
+
"size_limits": self._get_agent_limits(agent_id),
|
|
1262
|
+
"sections": sections,
|
|
1263
|
+
"raw_content": memory_content
|
|
1264
|
+
}
|
|
1265
|
+
else:
|
|
1266
|
+
result["agents"][agent_id] = {
|
|
1267
|
+
"agent_id": agent_id,
|
|
1268
|
+
"file_path": str(file_path),
|
|
1269
|
+
"file_size_bytes": file_size,
|
|
1270
|
+
"file_size_kb": round(file_size / 1024, 2),
|
|
1271
|
+
"last_modified": datetime.fromtimestamp(stat.st_mtime).isoformat(),
|
|
1272
|
+
"error": "Could not load memory content",
|
|
1273
|
+
"sections": {},
|
|
1274
|
+
"raw_content": ""
|
|
1275
|
+
}
|
|
1276
|
+
|
|
1277
|
+
except Exception as e:
|
|
1278
|
+
self.logger.error(f"Error processing memory for agent {agent_id}: {e}")
|
|
1279
|
+
result["agents"][agent_id] = {
|
|
1280
|
+
"agent_id": agent_id,
|
|
1281
|
+
"file_path": str(file_path),
|
|
1282
|
+
"error": str(e),
|
|
1283
|
+
"sections": {},
|
|
1284
|
+
"raw_content": ""
|
|
1285
|
+
}
|
|
1286
|
+
|
|
1287
|
+
result["total_size_kb"] = round(result["total_size_bytes"] / 1024, 2)
|
|
1288
|
+
return result
|
|
1289
|
+
|
|
1290
|
+
except Exception as e:
|
|
1291
|
+
self.logger.error(f"Error getting all memories raw: {e}")
|
|
1292
|
+
return {
|
|
1293
|
+
"success": False,
|
|
1294
|
+
"error": str(e),
|
|
1295
|
+
"timestamp": datetime.now().isoformat()
|
|
1296
|
+
}
|
|
1297
|
+
|
|
1298
|
+
def _parse_memory_content_to_dict(self, content: str) -> Dict[str, List[str]]:
|
|
1299
|
+
"""Parse memory content into structured dictionary format.
|
|
1300
|
+
|
|
1301
|
+
WHY: Provides consistent parsing of memory content into sections and items
|
|
1302
|
+
for both display and programmatic access. This ensures the same parsing
|
|
1303
|
+
logic is used across the system.
|
|
1304
|
+
|
|
1305
|
+
Args:
|
|
1306
|
+
content: Raw memory file content
|
|
1307
|
+
|
|
1308
|
+
Returns:
|
|
1309
|
+
Dict mapping section names to lists of items
|
|
1310
|
+
"""
|
|
1311
|
+
sections = {}
|
|
1312
|
+
current_section = None
|
|
1313
|
+
current_items = []
|
|
1314
|
+
|
|
1315
|
+
for line in content.split('\n'):
|
|
1316
|
+
line = line.strip()
|
|
1317
|
+
|
|
1318
|
+
# Skip empty lines and header information
|
|
1319
|
+
if not line or line.startswith('#') and 'Memory Usage' in line:
|
|
1320
|
+
continue
|
|
1321
|
+
|
|
1322
|
+
if line.startswith('## ') and not line.startswith('## Memory Usage'):
|
|
1323
|
+
# New section found
|
|
1324
|
+
if current_section and current_items:
|
|
1325
|
+
sections[current_section] = current_items.copy()
|
|
1326
|
+
|
|
1327
|
+
current_section = line[3:].strip()
|
|
1328
|
+
current_items = []
|
|
1329
|
+
|
|
1330
|
+
elif line.startswith('- ') and current_section:
|
|
1331
|
+
# Item in current section
|
|
1332
|
+
item = line[2:].strip()
|
|
1333
|
+
if item and len(item) > 3: # Filter out very short items
|
|
1334
|
+
current_items.append(item)
|
|
1335
|
+
|
|
1336
|
+
# Add final section
|
|
1337
|
+
if current_section and current_items:
|
|
1338
|
+
sections[current_section] = current_items
|
|
1339
|
+
|
|
1340
|
+
return sections
|
|
1341
|
+
|
|
853
1342
|
def _ensure_memories_directory(self):
|
|
854
1343
|
"""Ensure memories directory exists with README.
|
|
855
1344
|
|
|
@@ -908,7 +1397,7 @@ Standard markdown with structured sections. Agents expect:
|
|
|
908
1397
|
|
|
909
1398
|
|
|
910
1399
|
# Convenience functions for external use
|
|
911
|
-
def get_memory_manager(config: Optional[Config] = None) -> AgentMemoryManager:
|
|
1400
|
+
def get_memory_manager(config: Optional[Config] = None, working_directory: Optional[Path] = None) -> AgentMemoryManager:
|
|
912
1401
|
"""Get a singleton instance of the memory manager.
|
|
913
1402
|
|
|
914
1403
|
WHY: The memory manager should be shared across the application to ensure
|
|
@@ -916,10 +1405,11 @@ def get_memory_manager(config: Optional[Config] = None) -> AgentMemoryManager:
|
|
|
916
1405
|
|
|
917
1406
|
Args:
|
|
918
1407
|
config: Optional Config object. Only used on first instantiation.
|
|
1408
|
+
working_directory: Optional working directory. Only used on first instantiation.
|
|
919
1409
|
|
|
920
1410
|
Returns:
|
|
921
1411
|
AgentMemoryManager: The memory manager instance
|
|
922
1412
|
"""
|
|
923
1413
|
if not hasattr(get_memory_manager, '_instance'):
|
|
924
|
-
get_memory_manager._instance = AgentMemoryManager(config)
|
|
1414
|
+
get_memory_manager._instance = AgentMemoryManager(config, working_directory)
|
|
925
1415
|
return get_memory_manager._instance
|