claude-mpm 3.3.2__py3-none-any.whl → 3.4.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- claude_mpm/cli/commands/memory.py +186 -13
- claude_mpm/cli/parser.py +13 -1
- claude_mpm/constants.py +1 -0
- claude_mpm/core/claude_runner.py +61 -0
- claude_mpm/core/config.py +1 -1
- claude_mpm/core/simple_runner.py +61 -0
- claude_mpm/hooks/builtin/mpm_command_hook.py +5 -5
- claude_mpm/hooks/claude_hooks/hook_handler.py +211 -4
- claude_mpm/hooks/claude_hooks/hook_wrapper.sh +9 -2
- claude_mpm/hooks/memory_integration_hook.py +51 -5
- claude_mpm/services/__init__.py +23 -5
- claude_mpm/services/agent_memory_manager.py +536 -48
- claude_mpm/services/memory_builder.py +338 -6
- claude_mpm/services/project_analyzer.py +771 -0
- claude_mpm/services/socketio_server.py +473 -33
- claude_mpm/services/version_control/git_operations.py +26 -0
- {claude_mpm-3.3.2.dist-info → claude_mpm-3.4.0.dist-info}/METADATA +34 -10
- {claude_mpm-3.3.2.dist-info → claude_mpm-3.4.0.dist-info}/RECORD +22 -39
- claude_mpm/agents/agent-template.yaml +0 -83
- claude_mpm/agents/test_fix_deployment/.claude-pm/config/project.json +0 -6
- claude_mpm/cli/README.md +0 -109
- claude_mpm/cli_module/refactoring_guide.md +0 -253
- claude_mpm/core/agent_registry.py.bak +0 -312
- claude_mpm/core/base_service.py.bak +0 -406
- claude_mpm/hooks/README.md +0 -97
- claude_mpm/orchestration/SUBPROCESS_DESIGN.md +0 -66
- claude_mpm/schemas/README_SECURITY.md +0 -92
- claude_mpm/schemas/agent_schema.json +0 -395
- claude_mpm/schemas/agent_schema_documentation.md +0 -181
- claude_mpm/schemas/agent_schema_security_notes.md +0 -165
- claude_mpm/schemas/examples/standard_workflow.json +0 -505
- claude_mpm/schemas/ticket_workflow_documentation.md +0 -482
- claude_mpm/schemas/ticket_workflow_schema.json +0 -590
- claude_mpm/services/framework_claude_md_generator/README.md +0 -92
- claude_mpm/services/parent_directory_manager/README.md +0 -83
- claude_mpm/services/version_control/VERSION +0 -1
- {claude_mpm-3.3.2.dist-info → claude_mpm-3.4.0.dist-info}/WHEEL +0 -0
- {claude_mpm-3.3.2.dist-info → claude_mpm-3.4.0.dist-info}/entry_points.txt +0 -0
- {claude_mpm-3.3.2.dist-info → claude_mpm-3.4.0.dist-info}/licenses/LICENSE +0 -0
- {claude_mpm-3.3.2.dist-info → claude_mpm-3.4.0.dist-info}/top_level.txt +0 -0
|
@@ -24,13 +24,14 @@ from datetime import datetime
|
|
|
24
24
|
import re
|
|
25
25
|
import logging
|
|
26
26
|
|
|
27
|
-
from claude_mpm.core import LoggerMixin
|
|
28
27
|
from claude_mpm.core.config import Config
|
|
28
|
+
from claude_mpm.core.mixins import LoggerMixin
|
|
29
29
|
from claude_mpm.utils.paths import PathResolver
|
|
30
|
+
from claude_mpm.services.project_analyzer import ProjectAnalyzer
|
|
30
31
|
# Socket.IO notifications are optional - we'll skip them if server is not available
|
|
31
32
|
|
|
32
33
|
|
|
33
|
-
class AgentMemoryManager
|
|
34
|
+
class AgentMemoryManager:
|
|
34
35
|
"""Manages agent memory files with size limits and validation.
|
|
35
36
|
|
|
36
37
|
WHY: Agents need to accumulate project-specific knowledge over time to become
|
|
@@ -60,22 +61,49 @@ class AgentMemoryManager(LoggerMixin):
|
|
|
60
61
|
'Current Technical Context'
|
|
61
62
|
]
|
|
62
63
|
|
|
63
|
-
def __init__(self, config: Optional[Config] = None):
|
|
64
|
+
def __init__(self, config: Optional[Config] = None, working_directory: Optional[Path] = None):
|
|
64
65
|
"""Initialize the memory manager.
|
|
65
66
|
|
|
66
67
|
Sets up the memories directory and ensures it exists with proper README.
|
|
67
68
|
|
|
68
69
|
Args:
|
|
69
70
|
config: Optional Config object. If not provided, will create default Config.
|
|
71
|
+
working_directory: Optional working directory. If not provided, uses project root.
|
|
70
72
|
"""
|
|
71
|
-
|
|
73
|
+
# Initialize logger using the same pattern as LoggerMixin
|
|
74
|
+
self._logger_instance = None
|
|
75
|
+
self._logger_name = None
|
|
76
|
+
|
|
72
77
|
self.config = config or Config()
|
|
73
78
|
self.project_root = PathResolver.get_project_root()
|
|
79
|
+
self.working_directory = working_directory or self.project_root
|
|
74
80
|
self.memories_dir = self.project_root / ".claude-mpm" / "memories"
|
|
75
81
|
self._ensure_memories_directory()
|
|
76
82
|
|
|
77
83
|
# Initialize memory limits from configuration
|
|
78
84
|
self._init_memory_limits()
|
|
85
|
+
|
|
86
|
+
# Initialize project analyzer for context-aware memory creation
|
|
87
|
+
self.project_analyzer = ProjectAnalyzer(self.config, self.working_directory)
|
|
88
|
+
|
|
89
|
+
@property
|
|
90
|
+
def logger(self):
|
|
91
|
+
"""Get or create the logger instance (like LoggerMixin)."""
|
|
92
|
+
if self._logger_instance is None:
|
|
93
|
+
if self._logger_name:
|
|
94
|
+
logger_name = self._logger_name
|
|
95
|
+
else:
|
|
96
|
+
module = self.__class__.__module__
|
|
97
|
+
class_name = self.__class__.__name__
|
|
98
|
+
|
|
99
|
+
if module and module != "__main__":
|
|
100
|
+
logger_name = f"{module}.{class_name}"
|
|
101
|
+
else:
|
|
102
|
+
logger_name = class_name
|
|
103
|
+
|
|
104
|
+
self._logger_instance = logging.getLogger(logger_name)
|
|
105
|
+
|
|
106
|
+
return self._logger_instance
|
|
79
107
|
|
|
80
108
|
def _init_memory_limits(self):
|
|
81
109
|
"""Initialize memory limits from configuration.
|
|
@@ -85,7 +113,7 @@ class AgentMemoryManager(LoggerMixin):
|
|
|
85
113
|
"""
|
|
86
114
|
# Check if memory system is enabled
|
|
87
115
|
self.memory_enabled = self.config.get('memory.enabled', True)
|
|
88
|
-
self.auto_learning = self.config.get('memory.auto_learning',
|
|
116
|
+
self.auto_learning = self.config.get('memory.auto_learning', True) # Changed default to True
|
|
89
117
|
|
|
90
118
|
# Load default limits from configuration
|
|
91
119
|
config_limits = self.config.get('memory.limits', {})
|
|
@@ -238,68 +266,80 @@ class AgentMemoryManager(LoggerMixin):
|
|
|
238
266
|
return success
|
|
239
267
|
|
|
240
268
|
def _create_default_memory(self, agent_id: str) -> str:
|
|
241
|
-
"""Create default memory file for agent.
|
|
269
|
+
"""Create project-specific default memory file for agent.
|
|
242
270
|
|
|
243
|
-
WHY:
|
|
244
|
-
|
|
271
|
+
WHY: Instead of generic templates, agents need project-specific knowledge
|
|
272
|
+
from the start. This analyzes the current project and creates contextual
|
|
273
|
+
memories with actual project characteristics.
|
|
245
274
|
|
|
246
275
|
Args:
|
|
247
276
|
agent_id: The agent identifier
|
|
248
277
|
|
|
249
278
|
Returns:
|
|
250
|
-
str: The
|
|
279
|
+
str: The project-specific memory template content
|
|
251
280
|
"""
|
|
252
281
|
# Convert agent_id to proper name, handling cases like "test_agent" -> "Test"
|
|
253
282
|
agent_name = agent_id.replace('_agent', '').replace('_', ' ').title()
|
|
254
|
-
project_name = self.project_root.name
|
|
255
283
|
timestamp = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
|
|
256
284
|
|
|
257
285
|
# Get limits for this agent
|
|
258
286
|
limits = self._get_agent_limits(agent_id)
|
|
259
287
|
|
|
260
|
-
|
|
288
|
+
# Analyze the project for context-specific content
|
|
289
|
+
try:
|
|
290
|
+
project_characteristics = self.project_analyzer.analyze_project()
|
|
291
|
+
project_context = self.project_analyzer.get_project_context_summary()
|
|
292
|
+
important_files = self.project_analyzer.get_important_files_for_context()
|
|
293
|
+
|
|
294
|
+
self.logger.info(f"Creating project-specific memory for {agent_id} using analyzed project context")
|
|
295
|
+
except Exception as e:
|
|
296
|
+
self.logger.warning(f"Error analyzing project for {agent_id}, falling back to basic template: {e}")
|
|
297
|
+
return self._create_basic_memory_template(agent_id)
|
|
298
|
+
|
|
299
|
+
# Create project-specific sections
|
|
300
|
+
architecture_items = self._generate_architecture_section(project_characteristics)
|
|
301
|
+
coding_patterns = self._generate_coding_patterns_section(project_characteristics)
|
|
302
|
+
implementation_guidelines = self._generate_implementation_guidelines(project_characteristics)
|
|
303
|
+
tech_context = self._generate_technical_context(project_characteristics)
|
|
304
|
+
integration_points = self._generate_integration_points(project_characteristics)
|
|
305
|
+
|
|
306
|
+
template = f"""# {agent_name} Agent Memory - {project_characteristics.project_name}
|
|
261
307
|
|
|
262
308
|
<!-- MEMORY LIMITS: {limits['max_file_size_kb']}KB max | {limits['max_sections']} sections max | {limits['max_items_per_section']} items per section -->
|
|
263
309
|
<!-- Last Updated: {timestamp} | Auto-updated by: {agent_id} -->
|
|
264
310
|
|
|
265
|
-
## Project
|
|
266
|
-
|
|
267
|
-
- Three-tier agent hierarchy: project → user → system
|
|
268
|
-
- Agent definitions use standardized JSON schema validation
|
|
311
|
+
## Project Context
|
|
312
|
+
{project_context}
|
|
269
313
|
|
|
270
|
-
##
|
|
271
|
-
|
|
272
|
-
- SubprocessRunner utility for external command execution
|
|
273
|
-
- LoggerMixin provides consistent logging across all services
|
|
314
|
+
## Project Architecture
|
|
315
|
+
{self._format_section_items(architecture_items)}
|
|
274
316
|
|
|
275
|
-
##
|
|
276
|
-
|
|
277
|
-
- Follow existing import patterns: from claude_mpm.module import Class
|
|
278
|
-
- Use existing utilities instead of reimplementing functionality
|
|
317
|
+
## Coding Patterns Learned
|
|
318
|
+
{self._format_section_items(coding_patterns)}
|
|
279
319
|
|
|
280
|
-
##
|
|
281
|
-
|
|
320
|
+
## Implementation Guidelines
|
|
321
|
+
{self._format_section_items(implementation_guidelines)}
|
|
282
322
|
|
|
283
|
-
##
|
|
323
|
+
## Domain-Specific Knowledge
|
|
324
|
+
<!-- Agent-specific knowledge for {project_characteristics.project_name} domain -->
|
|
325
|
+
{self._generate_domain_knowledge_starters(project_characteristics, agent_id)}
|
|
326
|
+
|
|
327
|
+
## Effective Strategies
|
|
284
328
|
<!-- Successful approaches discovered through experience -->
|
|
285
329
|
|
|
286
|
-
## Common Mistakes to Avoid
|
|
287
|
-
|
|
288
|
-
- Avoid duplicating code - check utils/ for existing implementations
|
|
289
|
-
- Never hardcode file paths, use PathResolver utilities
|
|
330
|
+
## Common Mistakes to Avoid
|
|
331
|
+
{self._format_section_items(self._generate_common_mistakes(project_characteristics))}
|
|
290
332
|
|
|
291
|
-
## Integration Points
|
|
292
|
-
|
|
333
|
+
## Integration Points
|
|
334
|
+
{self._format_section_items(integration_points)}
|
|
293
335
|
|
|
294
|
-
## Performance Considerations
|
|
295
|
-
|
|
336
|
+
## Performance Considerations
|
|
337
|
+
{self._format_section_items(self._generate_performance_considerations(project_characteristics))}
|
|
296
338
|
|
|
297
|
-
## Current Technical Context
|
|
298
|
-
|
|
299
|
-
- Target: 80% test coverage (current: 23.6%)
|
|
300
|
-
- Integration with Claude Code 1.0.60+ native agent framework
|
|
339
|
+
## Current Technical Context
|
|
340
|
+
{self._format_section_items(tech_context)}
|
|
301
341
|
|
|
302
|
-
## Recent Learnings
|
|
342
|
+
## Recent Learnings
|
|
303
343
|
<!-- Most recent discoveries and insights -->
|
|
304
344
|
"""
|
|
305
345
|
|
|
@@ -307,14 +347,313 @@ class AgentMemoryManager(LoggerMixin):
|
|
|
307
347
|
try:
|
|
308
348
|
memory_file = self.memories_dir / f"{agent_id}_agent.md"
|
|
309
349
|
memory_file.write_text(template, encoding='utf-8')
|
|
310
|
-
self.logger.info(f"Created
|
|
350
|
+
self.logger.info(f"Created project-specific memory file for {agent_id}")
|
|
311
351
|
|
|
312
|
-
# Socket.IO notifications removed - memory manager works independently
|
|
313
352
|
except Exception as e:
|
|
314
353
|
self.logger.error(f"Error saving default memory for {agent_id}: {e}")
|
|
315
354
|
|
|
316
355
|
return template
|
|
317
356
|
|
|
357
|
+
def _create_basic_memory_template(self, agent_id: str) -> str:
|
|
358
|
+
"""Create basic memory template when project analysis fails.
|
|
359
|
+
|
|
360
|
+
WHY: Fallback template ensures agents always get some memory structure
|
|
361
|
+
even if project analysis encounters errors.
|
|
362
|
+
|
|
363
|
+
Args:
|
|
364
|
+
agent_id: The agent identifier
|
|
365
|
+
|
|
366
|
+
Returns:
|
|
367
|
+
str: Basic memory template
|
|
368
|
+
"""
|
|
369
|
+
agent_name = agent_id.replace('_agent', '').replace('_', ' ').title()
|
|
370
|
+
project_name = self.project_root.name
|
|
371
|
+
timestamp = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
|
|
372
|
+
limits = self._get_agent_limits(agent_id)
|
|
373
|
+
|
|
374
|
+
return f"""# {agent_name} Agent Memory - {project_name}
|
|
375
|
+
|
|
376
|
+
<!-- MEMORY LIMITS: {limits['max_file_size_kb']}KB max | {limits['max_sections']} sections max | {limits['max_items_per_section']} items per section -->
|
|
377
|
+
<!-- Last Updated: {timestamp} | Auto-updated by: {agent_id} -->
|
|
378
|
+
|
|
379
|
+
## Project Context
|
|
380
|
+
{project_name}: Software project requiring analysis
|
|
381
|
+
|
|
382
|
+
## Project Architecture
|
|
383
|
+
- Analyze project structure to understand architecture patterns
|
|
384
|
+
|
|
385
|
+
## Coding Patterns Learned
|
|
386
|
+
- Observe codebase patterns and conventions during tasks
|
|
387
|
+
|
|
388
|
+
## Implementation Guidelines
|
|
389
|
+
- Extract implementation guidelines from project documentation
|
|
390
|
+
|
|
391
|
+
## Domain-Specific Knowledge
|
|
392
|
+
<!-- Agent-specific knowledge accumulates here -->
|
|
393
|
+
|
|
394
|
+
## Effective Strategies
|
|
395
|
+
<!-- Successful approaches discovered through experience -->
|
|
396
|
+
|
|
397
|
+
## Common Mistakes to Avoid
|
|
398
|
+
- Learn from errors encountered during project work
|
|
399
|
+
|
|
400
|
+
## Integration Points
|
|
401
|
+
<!-- Key interfaces and integration patterns -->
|
|
402
|
+
|
|
403
|
+
## Performance Considerations
|
|
404
|
+
<!-- Performance insights and optimization patterns -->
|
|
405
|
+
|
|
406
|
+
## Current Technical Context
|
|
407
|
+
- Project analysis pending - gather context during tasks
|
|
408
|
+
|
|
409
|
+
## Recent Learnings
|
|
410
|
+
<!-- Most recent discoveries and insights -->
|
|
411
|
+
"""
|
|
412
|
+
|
|
413
|
+
def _generate_architecture_section(self, characteristics) -> List[str]:
|
|
414
|
+
"""Generate architecture section items based on project analysis."""
|
|
415
|
+
items = []
|
|
416
|
+
|
|
417
|
+
# Architecture type
|
|
418
|
+
items.append(f"{characteristics.architecture_type} with {characteristics.primary_language or 'mixed'} implementation")
|
|
419
|
+
|
|
420
|
+
# Key directories structure
|
|
421
|
+
if characteristics.key_directories:
|
|
422
|
+
key_dirs = ", ".join(characteristics.key_directories[:5])
|
|
423
|
+
items.append(f"Main directories: {key_dirs}")
|
|
424
|
+
|
|
425
|
+
# Main modules
|
|
426
|
+
if characteristics.main_modules:
|
|
427
|
+
modules = ", ".join(characteristics.main_modules[:4])
|
|
428
|
+
items.append(f"Core modules: {modules}")
|
|
429
|
+
|
|
430
|
+
# Entry points
|
|
431
|
+
if characteristics.entry_points:
|
|
432
|
+
entries = ", ".join(characteristics.entry_points[:3])
|
|
433
|
+
items.append(f"Entry points: {entries}")
|
|
434
|
+
|
|
435
|
+
# Frameworks affecting architecture
|
|
436
|
+
if characteristics.web_frameworks:
|
|
437
|
+
frameworks = ", ".join(characteristics.web_frameworks[:3])
|
|
438
|
+
items.append(f"Web framework stack: {frameworks}")
|
|
439
|
+
|
|
440
|
+
return items[:8] # Limit to prevent overwhelming
|
|
441
|
+
|
|
442
|
+
def _generate_coding_patterns_section(self, characteristics) -> List[str]:
|
|
443
|
+
"""Generate coding patterns section based on project analysis."""
|
|
444
|
+
items = []
|
|
445
|
+
|
|
446
|
+
# Language-specific patterns
|
|
447
|
+
if characteristics.primary_language == 'python':
|
|
448
|
+
items.append("Python project: use type hints, follow PEP 8 conventions")
|
|
449
|
+
if 'django' in [fw.lower() for fw in characteristics.web_frameworks]:
|
|
450
|
+
items.append("Django patterns: models, views, templates separation")
|
|
451
|
+
elif 'flask' in [fw.lower() for fw in characteristics.web_frameworks]:
|
|
452
|
+
items.append("Flask patterns: blueprint organization, app factory pattern")
|
|
453
|
+
elif characteristics.primary_language == 'node_js':
|
|
454
|
+
items.append("Node.js project: use async/await, ES6+ features")
|
|
455
|
+
if 'express' in [fw.lower() for fw in characteristics.web_frameworks]:
|
|
456
|
+
items.append("Express patterns: middleware usage, route organization")
|
|
457
|
+
|
|
458
|
+
# Framework-specific patterns
|
|
459
|
+
for framework in characteristics.frameworks[:3]:
|
|
460
|
+
if 'react' in framework.lower():
|
|
461
|
+
items.append("React patterns: component composition, hooks usage")
|
|
462
|
+
elif 'vue' in framework.lower():
|
|
463
|
+
items.append("Vue patterns: single file components, composition API")
|
|
464
|
+
|
|
465
|
+
# Code conventions found
|
|
466
|
+
for convention in characteristics.code_conventions[:3]:
|
|
467
|
+
items.append(f"Project uses: {convention}")
|
|
468
|
+
|
|
469
|
+
return items[:8]
|
|
470
|
+
|
|
471
|
+
def _generate_implementation_guidelines(self, characteristics) -> List[str]:
|
|
472
|
+
"""Generate implementation guidelines based on project analysis."""
|
|
473
|
+
items = []
|
|
474
|
+
|
|
475
|
+
# Package manager guidance
|
|
476
|
+
if characteristics.package_manager:
|
|
477
|
+
items.append(f"Use {characteristics.package_manager} for dependency management")
|
|
478
|
+
|
|
479
|
+
# Testing guidelines
|
|
480
|
+
if characteristics.testing_framework:
|
|
481
|
+
items.append(f"Write tests using {characteristics.testing_framework}")
|
|
482
|
+
|
|
483
|
+
# Test patterns
|
|
484
|
+
for pattern in characteristics.test_patterns[:2]:
|
|
485
|
+
items.append(f"Follow {pattern.lower()}")
|
|
486
|
+
|
|
487
|
+
# Build tools
|
|
488
|
+
if characteristics.build_tools:
|
|
489
|
+
tools = ", ".join(characteristics.build_tools[:2])
|
|
490
|
+
items.append(f"Use build tools: {tools}")
|
|
491
|
+
|
|
492
|
+
# Configuration patterns
|
|
493
|
+
for config_pattern in characteristics.configuration_patterns[:2]:
|
|
494
|
+
items.append(f"Configuration: {config_pattern}")
|
|
495
|
+
|
|
496
|
+
# Important files to reference
|
|
497
|
+
important_configs = characteristics.important_configs[:3]
|
|
498
|
+
if important_configs:
|
|
499
|
+
configs = ", ".join(important_configs)
|
|
500
|
+
items.append(f"Key config files: {configs}")
|
|
501
|
+
|
|
502
|
+
return items[:8]
|
|
503
|
+
|
|
504
|
+
def _generate_technical_context(self, characteristics) -> List[str]:
|
|
505
|
+
"""Generate current technical context based on project analysis."""
|
|
506
|
+
items = []
|
|
507
|
+
|
|
508
|
+
# Technology stack summary
|
|
509
|
+
tech_stack = []
|
|
510
|
+
if characteristics.primary_language:
|
|
511
|
+
tech_stack.append(characteristics.primary_language)
|
|
512
|
+
tech_stack.extend(characteristics.frameworks[:2])
|
|
513
|
+
if tech_stack:
|
|
514
|
+
items.append(f"Tech stack: {', '.join(tech_stack)}")
|
|
515
|
+
|
|
516
|
+
# Databases in use
|
|
517
|
+
if characteristics.databases:
|
|
518
|
+
dbs = ", ".join(characteristics.databases[:3])
|
|
519
|
+
items.append(f"Data storage: {dbs}")
|
|
520
|
+
|
|
521
|
+
# API patterns
|
|
522
|
+
if characteristics.api_patterns:
|
|
523
|
+
apis = ", ".join(characteristics.api_patterns[:2])
|
|
524
|
+
items.append(f"API patterns: {apis}")
|
|
525
|
+
|
|
526
|
+
# Key dependencies
|
|
527
|
+
if characteristics.key_dependencies:
|
|
528
|
+
deps = ", ".join(characteristics.key_dependencies[:4])
|
|
529
|
+
items.append(f"Key dependencies: {deps}")
|
|
530
|
+
|
|
531
|
+
# Documentation available
|
|
532
|
+
if characteristics.documentation_files:
|
|
533
|
+
docs = ", ".join(characteristics.documentation_files[:3])
|
|
534
|
+
items.append(f"Documentation: {docs}")
|
|
535
|
+
|
|
536
|
+
return items[:8]
|
|
537
|
+
|
|
538
|
+
def _generate_integration_points(self, characteristics) -> List[str]:
|
|
539
|
+
"""Generate integration points based on project analysis."""
|
|
540
|
+
items = []
|
|
541
|
+
|
|
542
|
+
# Database integrations
|
|
543
|
+
for db in characteristics.databases[:3]:
|
|
544
|
+
items.append(f"{db.title()} database integration")
|
|
545
|
+
|
|
546
|
+
# Web framework integrations
|
|
547
|
+
for framework in characteristics.web_frameworks[:2]:
|
|
548
|
+
items.append(f"{framework} web framework integration")
|
|
549
|
+
|
|
550
|
+
# API integrations
|
|
551
|
+
for api_pattern in characteristics.api_patterns[:2]:
|
|
552
|
+
items.append(f"{api_pattern} integration pattern")
|
|
553
|
+
|
|
554
|
+
# Common integration patterns based on dependencies
|
|
555
|
+
integration_deps = [dep for dep in characteristics.key_dependencies
|
|
556
|
+
if any(keyword in dep.lower() for keyword in ['redis', 'rabbit', 'celery', 'kafka', 'docker'])]
|
|
557
|
+
for dep in integration_deps[:3]:
|
|
558
|
+
items.append(f"{dep} integration")
|
|
559
|
+
|
|
560
|
+
return items[:6]
|
|
561
|
+
|
|
562
|
+
def _generate_common_mistakes(self, characteristics) -> List[str]:
|
|
563
|
+
"""Generate common mistakes based on project type and stack."""
|
|
564
|
+
items = []
|
|
565
|
+
|
|
566
|
+
# Language-specific mistakes
|
|
567
|
+
if characteristics.primary_language == 'python':
|
|
568
|
+
items.append("Avoid circular imports - use late imports when needed")
|
|
569
|
+
items.append("Don't ignore virtual environment - always activate before work")
|
|
570
|
+
elif characteristics.primary_language == 'node_js':
|
|
571
|
+
items.append("Avoid callback hell - use async/await consistently")
|
|
572
|
+
items.append("Don't commit node_modules - ensure .gitignore is correct")
|
|
573
|
+
|
|
574
|
+
# Framework-specific mistakes
|
|
575
|
+
if 'django' in [fw.lower() for fw in characteristics.web_frameworks]:
|
|
576
|
+
items.append("Don't skip migrations - always create and apply them")
|
|
577
|
+
elif 'flask' in [fw.lower() for fw in characteristics.web_frameworks]:
|
|
578
|
+
items.append("Avoid app context issues - use proper application factory")
|
|
579
|
+
|
|
580
|
+
# Database-specific mistakes
|
|
581
|
+
if characteristics.databases:
|
|
582
|
+
items.append("Don't ignore database transactions in multi-step operations")
|
|
583
|
+
items.append("Avoid N+1 queries - use proper joins or prefetching")
|
|
584
|
+
|
|
585
|
+
# Testing mistakes
|
|
586
|
+
if characteristics.testing_framework:
|
|
587
|
+
items.append("Don't skip test isolation - ensure tests can run independently")
|
|
588
|
+
|
|
589
|
+
return items[:8]
|
|
590
|
+
|
|
591
|
+
def _generate_performance_considerations(self, characteristics) -> List[str]:
|
|
592
|
+
"""Generate performance considerations based on project stack."""
|
|
593
|
+
items = []
|
|
594
|
+
|
|
595
|
+
# Language-specific performance
|
|
596
|
+
if characteristics.primary_language == 'python':
|
|
597
|
+
items.append("Use list comprehensions over loops where appropriate")
|
|
598
|
+
items.append("Consider caching for expensive operations")
|
|
599
|
+
elif characteristics.primary_language == 'node_js':
|
|
600
|
+
items.append("Leverage event loop - avoid blocking operations")
|
|
601
|
+
items.append("Use streams for large data processing")
|
|
602
|
+
|
|
603
|
+
# Database performance
|
|
604
|
+
if characteristics.databases:
|
|
605
|
+
items.append("Index frequently queried columns")
|
|
606
|
+
items.append("Use connection pooling for database connections")
|
|
607
|
+
|
|
608
|
+
# Web framework performance
|
|
609
|
+
if characteristics.web_frameworks:
|
|
610
|
+
items.append("Implement appropriate caching strategies")
|
|
611
|
+
items.append("Optimize static asset delivery")
|
|
612
|
+
|
|
613
|
+
# Framework-specific performance
|
|
614
|
+
if 'react' in [fw.lower() for fw in characteristics.frameworks]:
|
|
615
|
+
items.append("Use React.memo for expensive component renders")
|
|
616
|
+
|
|
617
|
+
return items[:6]
|
|
618
|
+
|
|
619
|
+
def _generate_domain_knowledge_starters(self, characteristics, agent_id: str) -> str:
|
|
620
|
+
"""Generate domain-specific knowledge starters based on project and agent type."""
|
|
621
|
+
items = []
|
|
622
|
+
|
|
623
|
+
# Project terminology
|
|
624
|
+
if characteristics.project_terminology:
|
|
625
|
+
terms = ", ".join(characteristics.project_terminology[:4])
|
|
626
|
+
items.append(f"- Key project terms: {terms}")
|
|
627
|
+
|
|
628
|
+
# Agent-specific starters
|
|
629
|
+
if 'research' in agent_id.lower():
|
|
630
|
+
items.append("- Focus on code analysis, pattern discovery, and architectural insights")
|
|
631
|
+
if characteristics.documentation_files:
|
|
632
|
+
items.append("- Prioritize documentation analysis for comprehensive understanding")
|
|
633
|
+
elif 'engineer' in agent_id.lower():
|
|
634
|
+
items.append("- Focus on implementation patterns, coding standards, and best practices")
|
|
635
|
+
if characteristics.testing_framework:
|
|
636
|
+
items.append(f"- Ensure test coverage using {characteristics.testing_framework}")
|
|
637
|
+
elif 'pm' in agent_id.lower() or 'manager' in agent_id.lower():
|
|
638
|
+
items.append("- Focus on project coordination, task delegation, and progress tracking")
|
|
639
|
+
items.append("- Monitor integration points and cross-component dependencies")
|
|
640
|
+
|
|
641
|
+
return '\n'.join(items) if items else "<!-- Domain knowledge will accumulate here -->"
|
|
642
|
+
|
|
643
|
+
def _format_section_items(self, items: List[str]) -> str:
|
|
644
|
+
"""Format list of items as markdown bullet points."""
|
|
645
|
+
if not items:
|
|
646
|
+
return "<!-- Items will be added as knowledge accumulates -->"
|
|
647
|
+
|
|
648
|
+
formatted_items = []
|
|
649
|
+
for item in items:
|
|
650
|
+
# Ensure each item starts with a dash and is properly formatted
|
|
651
|
+
if not item.startswith('- '):
|
|
652
|
+
item = f"- {item}"
|
|
653
|
+
formatted_items.append(item)
|
|
654
|
+
|
|
655
|
+
return '\n'.join(formatted_items)
|
|
656
|
+
|
|
318
657
|
def _add_item_to_section(self, content: str, section: str, new_item: str) -> str:
|
|
319
658
|
"""Add item to specified section, respecting limits.
|
|
320
659
|
|
|
@@ -373,9 +712,10 @@ class AgentMemoryManager(LoggerMixin):
|
|
|
373
712
|
):
|
|
374
713
|
insert_point += 1
|
|
375
714
|
|
|
376
|
-
# Ensure line length limit
|
|
377
|
-
|
|
378
|
-
|
|
715
|
+
# Ensure line length limit (account for "- " prefix)
|
|
716
|
+
max_item_length = self.memory_limits['max_line_length'] - 2 # Subtract 2 for "- " prefix
|
|
717
|
+
if len(new_item) > max_item_length:
|
|
718
|
+
new_item = new_item[:max_item_length - 3] + '...'
|
|
379
719
|
|
|
380
720
|
lines.insert(insert_point, f"- {new_item}")
|
|
381
721
|
|
|
@@ -418,7 +758,7 @@ class AgentMemoryManager(LoggerMixin):
|
|
|
418
758
|
# Insert new section
|
|
419
759
|
new_section = [
|
|
420
760
|
'',
|
|
421
|
-
f'## {section}
|
|
761
|
+
f'## {section}',
|
|
422
762
|
f'- {new_item}',
|
|
423
763
|
''
|
|
424
764
|
]
|
|
@@ -558,7 +898,7 @@ class AgentMemoryManager(LoggerMixin):
|
|
|
558
898
|
for section in missing_sections:
|
|
559
899
|
section_content = [
|
|
560
900
|
'',
|
|
561
|
-
f'## {section}
|
|
901
|
+
f'## {section}',
|
|
562
902
|
'<!-- Section added by repair -->',
|
|
563
903
|
''
|
|
564
904
|
]
|
|
@@ -850,6 +1190,153 @@ class AgentMemoryManager(LoggerMixin):
|
|
|
850
1190
|
self.logger.error(f"Error cross-referencing memories: {e}")
|
|
851
1191
|
return {"success": False, "error": str(e)}
|
|
852
1192
|
|
|
1193
|
+
def get_all_memories_raw(self) -> Dict[str, Any]:
|
|
1194
|
+
"""Get all agent memories in structured JSON format.
|
|
1195
|
+
|
|
1196
|
+
WHY: This provides programmatic access to all agent memories, allowing
|
|
1197
|
+
external tools, scripts, or APIs to retrieve and process the complete
|
|
1198
|
+
memory state of the system.
|
|
1199
|
+
|
|
1200
|
+
DESIGN DECISION: Returns structured data with metadata for each agent
|
|
1201
|
+
including file stats, sections, and parsed content. This enables both
|
|
1202
|
+
content access and system analysis.
|
|
1203
|
+
|
|
1204
|
+
Returns:
|
|
1205
|
+
Dict containing structured memory data for all agents
|
|
1206
|
+
"""
|
|
1207
|
+
try:
|
|
1208
|
+
result = {
|
|
1209
|
+
"success": True,
|
|
1210
|
+
"timestamp": datetime.now().isoformat(),
|
|
1211
|
+
"total_agents": 0,
|
|
1212
|
+
"total_size_bytes": 0,
|
|
1213
|
+
"agents": {}
|
|
1214
|
+
}
|
|
1215
|
+
|
|
1216
|
+
# Ensure directory exists
|
|
1217
|
+
if not self.memories_dir.exists():
|
|
1218
|
+
return {
|
|
1219
|
+
"success": True,
|
|
1220
|
+
"timestamp": datetime.now().isoformat(),
|
|
1221
|
+
"total_agents": 0,
|
|
1222
|
+
"total_size_bytes": 0,
|
|
1223
|
+
"agents": {},
|
|
1224
|
+
"message": "No memory directory found"
|
|
1225
|
+
}
|
|
1226
|
+
|
|
1227
|
+
# Find all agent memory files
|
|
1228
|
+
memory_files = list(self.memories_dir.glob("*_agent.md"))
|
|
1229
|
+
result["total_agents"] = len(memory_files)
|
|
1230
|
+
|
|
1231
|
+
# Process each agent memory file
|
|
1232
|
+
for file_path in sorted(memory_files):
|
|
1233
|
+
agent_id = file_path.stem.replace('_agent', '')
|
|
1234
|
+
|
|
1235
|
+
try:
|
|
1236
|
+
# Get file stats
|
|
1237
|
+
stat = file_path.stat()
|
|
1238
|
+
file_size = stat.st_size
|
|
1239
|
+
result["total_size_bytes"] += file_size
|
|
1240
|
+
|
|
1241
|
+
# Load and parse memory content
|
|
1242
|
+
memory_content = self.load_agent_memory(agent_id)
|
|
1243
|
+
|
|
1244
|
+
if memory_content:
|
|
1245
|
+
sections = self._parse_memory_content_to_dict(memory_content)
|
|
1246
|
+
|
|
1247
|
+
# Count total items across all sections
|
|
1248
|
+
total_items = sum(len(items) for items in sections.values())
|
|
1249
|
+
|
|
1250
|
+
result["agents"][agent_id] = {
|
|
1251
|
+
"agent_id": agent_id,
|
|
1252
|
+
"file_path": str(file_path),
|
|
1253
|
+
"file_size_bytes": file_size,
|
|
1254
|
+
"file_size_kb": round(file_size / 1024, 2),
|
|
1255
|
+
"last_modified": datetime.fromtimestamp(stat.st_mtime).isoformat(),
|
|
1256
|
+
"sections_count": len(sections),
|
|
1257
|
+
"total_items": total_items,
|
|
1258
|
+
"auto_learning": self._get_agent_auto_learning(agent_id),
|
|
1259
|
+
"size_limits": self._get_agent_limits(agent_id),
|
|
1260
|
+
"sections": sections,
|
|
1261
|
+
"raw_content": memory_content
|
|
1262
|
+
}
|
|
1263
|
+
else:
|
|
1264
|
+
result["agents"][agent_id] = {
|
|
1265
|
+
"agent_id": agent_id,
|
|
1266
|
+
"file_path": str(file_path),
|
|
1267
|
+
"file_size_bytes": file_size,
|
|
1268
|
+
"file_size_kb": round(file_size / 1024, 2),
|
|
1269
|
+
"last_modified": datetime.fromtimestamp(stat.st_mtime).isoformat(),
|
|
1270
|
+
"error": "Could not load memory content",
|
|
1271
|
+
"sections": {},
|
|
1272
|
+
"raw_content": ""
|
|
1273
|
+
}
|
|
1274
|
+
|
|
1275
|
+
except Exception as e:
|
|
1276
|
+
self.logger.error(f"Error processing memory for agent {agent_id}: {e}")
|
|
1277
|
+
result["agents"][agent_id] = {
|
|
1278
|
+
"agent_id": agent_id,
|
|
1279
|
+
"file_path": str(file_path),
|
|
1280
|
+
"error": str(e),
|
|
1281
|
+
"sections": {},
|
|
1282
|
+
"raw_content": ""
|
|
1283
|
+
}
|
|
1284
|
+
|
|
1285
|
+
result["total_size_kb"] = round(result["total_size_bytes"] / 1024, 2)
|
|
1286
|
+
return result
|
|
1287
|
+
|
|
1288
|
+
except Exception as e:
|
|
1289
|
+
self.logger.error(f"Error getting all memories raw: {e}")
|
|
1290
|
+
return {
|
|
1291
|
+
"success": False,
|
|
1292
|
+
"error": str(e),
|
|
1293
|
+
"timestamp": datetime.now().isoformat()
|
|
1294
|
+
}
|
|
1295
|
+
|
|
1296
|
+
def _parse_memory_content_to_dict(self, content: str) -> Dict[str, List[str]]:
|
|
1297
|
+
"""Parse memory content into structured dictionary format.
|
|
1298
|
+
|
|
1299
|
+
WHY: Provides consistent parsing of memory content into sections and items
|
|
1300
|
+
for both display and programmatic access. This ensures the same parsing
|
|
1301
|
+
logic is used across the system.
|
|
1302
|
+
|
|
1303
|
+
Args:
|
|
1304
|
+
content: Raw memory file content
|
|
1305
|
+
|
|
1306
|
+
Returns:
|
|
1307
|
+
Dict mapping section names to lists of items
|
|
1308
|
+
"""
|
|
1309
|
+
sections = {}
|
|
1310
|
+
current_section = None
|
|
1311
|
+
current_items = []
|
|
1312
|
+
|
|
1313
|
+
for line in content.split('\n'):
|
|
1314
|
+
line = line.strip()
|
|
1315
|
+
|
|
1316
|
+
# Skip empty lines and header information
|
|
1317
|
+
if not line or line.startswith('#') and 'Memory Usage' in line:
|
|
1318
|
+
continue
|
|
1319
|
+
|
|
1320
|
+
if line.startswith('## ') and not line.startswith('## Memory Usage'):
|
|
1321
|
+
# New section found
|
|
1322
|
+
if current_section and current_items:
|
|
1323
|
+
sections[current_section] = current_items.copy()
|
|
1324
|
+
|
|
1325
|
+
current_section = line[3:].strip()
|
|
1326
|
+
current_items = []
|
|
1327
|
+
|
|
1328
|
+
elif line.startswith('- ') and current_section:
|
|
1329
|
+
# Item in current section
|
|
1330
|
+
item = line[2:].strip()
|
|
1331
|
+
if item and len(item) > 3: # Filter out very short items
|
|
1332
|
+
current_items.append(item)
|
|
1333
|
+
|
|
1334
|
+
# Add final section
|
|
1335
|
+
if current_section and current_items:
|
|
1336
|
+
sections[current_section] = current_items
|
|
1337
|
+
|
|
1338
|
+
return sections
|
|
1339
|
+
|
|
853
1340
|
def _ensure_memories_directory(self):
|
|
854
1341
|
"""Ensure memories directory exists with README.
|
|
855
1342
|
|
|
@@ -908,7 +1395,7 @@ Standard markdown with structured sections. Agents expect:
|
|
|
908
1395
|
|
|
909
1396
|
|
|
910
1397
|
# Convenience functions for external use
|
|
911
|
-
def get_memory_manager(config: Optional[Config] = None) -> AgentMemoryManager:
|
|
1398
|
+
def get_memory_manager(config: Optional[Config] = None, working_directory: Optional[Path] = None) -> AgentMemoryManager:
|
|
912
1399
|
"""Get a singleton instance of the memory manager.
|
|
913
1400
|
|
|
914
1401
|
WHY: The memory manager should be shared across the application to ensure
|
|
@@ -916,10 +1403,11 @@ def get_memory_manager(config: Optional[Config] = None) -> AgentMemoryManager:
|
|
|
916
1403
|
|
|
917
1404
|
Args:
|
|
918
1405
|
config: Optional Config object. Only used on first instantiation.
|
|
1406
|
+
working_directory: Optional working directory. Only used on first instantiation.
|
|
919
1407
|
|
|
920
1408
|
Returns:
|
|
921
1409
|
AgentMemoryManager: The memory manager instance
|
|
922
1410
|
"""
|
|
923
1411
|
if not hasattr(get_memory_manager, '_instance'):
|
|
924
|
-
get_memory_manager._instance = AgentMemoryManager(config)
|
|
1412
|
+
get_memory_manager._instance = AgentMemoryManager(config, working_directory)
|
|
925
1413
|
return get_memory_manager._instance
|