claude-mpm 4.0.19__py3-none-any.whl → 4.0.22__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- claude_mpm/BUILD_NUMBER +1 -1
- claude_mpm/VERSION +1 -1
- claude_mpm/__main__.py +4 -0
- claude_mpm/agents/BASE_AGENT_TEMPLATE.md +38 -2
- claude_mpm/agents/INSTRUCTIONS.md +74 -0
- claude_mpm/agents/OUTPUT_STYLE.md +84 -0
- claude_mpm/agents/WORKFLOW.md +308 -4
- claude_mpm/agents/agents_metadata.py +52 -0
- claude_mpm/agents/base_agent_loader.py +75 -19
- claude_mpm/agents/templates/__init__.py +4 -0
- claude_mpm/agents/templates/api_qa.json +206 -0
- claude_mpm/agents/templates/qa.json +1 -1
- claude_mpm/agents/templates/research.json +24 -16
- claude_mpm/agents/templates/ticketing.json +18 -5
- claude_mpm/agents/templates/vercel_ops_agent.json +281 -0
- claude_mpm/agents/templates/vercel_ops_instructions.md +582 -0
- claude_mpm/cli/__init__.py +23 -1
- claude_mpm/cli/__main__.py +4 -0
- claude_mpm/cli/commands/mcp_command_router.py +87 -1
- claude_mpm/cli/commands/mcp_install_commands.py +207 -26
- claude_mpm/cli/commands/memory.py +32 -5
- claude_mpm/cli/commands/run.py +33 -6
- claude_mpm/cli/parsers/base_parser.py +5 -0
- claude_mpm/cli/parsers/mcp_parser.py +23 -0
- claude_mpm/cli/parsers/run_parser.py +5 -0
- claude_mpm/cli/utils.py +17 -4
- claude_mpm/constants.py +1 -0
- claude_mpm/core/base_service.py +8 -2
- claude_mpm/core/config.py +122 -32
- claude_mpm/core/framework_loader.py +385 -34
- claude_mpm/core/interactive_session.py +77 -12
- claude_mpm/core/oneshot_session.py +7 -1
- claude_mpm/core/output_style_manager.py +468 -0
- claude_mpm/core/unified_paths.py +190 -21
- claude_mpm/hooks/claude_hooks/hook_handler.py +91 -16
- claude_mpm/hooks/claude_hooks/hook_wrapper.sh +3 -0
- claude_mpm/init.py +1 -0
- claude_mpm/scripts/socketio_daemon.py +67 -7
- claude_mpm/scripts/socketio_daemon_hardened.py +897 -0
- claude_mpm/services/agents/deployment/agent_deployment.py +216 -10
- claude_mpm/services/agents/deployment/agent_template_builder.py +37 -1
- claude_mpm/services/agents/deployment/async_agent_deployment.py +65 -1
- claude_mpm/services/agents/deployment/multi_source_deployment_service.py +441 -0
- claude_mpm/services/agents/memory/__init__.py +0 -2
- claude_mpm/services/agents/memory/agent_memory_manager.py +577 -44
- claude_mpm/services/agents/memory/content_manager.py +144 -14
- claude_mpm/services/agents/memory/template_generator.py +7 -354
- claude_mpm/services/mcp_gateway/server/stdio_server.py +61 -169
- claude_mpm/services/memory_hook_service.py +62 -4
- claude_mpm/services/runner_configuration_service.py +5 -9
- claude_mpm/services/socketio/server/broadcaster.py +32 -1
- claude_mpm/services/socketio/server/core.py +4 -0
- claude_mpm/services/socketio/server/main.py +23 -4
- claude_mpm/services/subprocess_launcher_service.py +5 -0
- {claude_mpm-4.0.19.dist-info → claude_mpm-4.0.22.dist-info}/METADATA +1 -1
- {claude_mpm-4.0.19.dist-info → claude_mpm-4.0.22.dist-info}/RECORD +60 -54
- claude_mpm/services/agents/memory/analyzer.py +0 -430
- {claude_mpm-4.0.19.dist-info → claude_mpm-4.0.22.dist-info}/WHEEL +0 -0
- {claude_mpm-4.0.19.dist-info → claude_mpm-4.0.22.dist-info}/entry_points.txt +0 -0
- {claude_mpm-4.0.19.dist-info → claude_mpm-4.0.22.dist-info}/licenses/LICENSE +0 -0
- {claude_mpm-4.0.19.dist-info → claude_mpm-4.0.22.dist-info}/top_level.txt +0 -0
|
@@ -15,7 +15,8 @@ This module provides:
|
|
|
15
15
|
import logging
|
|
16
16
|
import re
|
|
17
17
|
from datetime import datetime
|
|
18
|
-
from
|
|
18
|
+
from difflib import SequenceMatcher
|
|
19
|
+
from typing import Any, Dict, List, Optional, Tuple
|
|
19
20
|
|
|
20
21
|
|
|
21
22
|
class MemoryContentManager:
|
|
@@ -43,11 +44,13 @@ class MemoryContentManager:
|
|
|
43
44
|
self.logger = logging.getLogger(f"{__name__}.{self.__class__.__name__}")
|
|
44
45
|
|
|
45
46
|
def add_item_to_section(self, content: str, section: str, new_item: str) -> str:
|
|
46
|
-
"""Add item to specified section
|
|
47
|
+
"""Add item to specified section with NLP-based deduplication.
|
|
47
48
|
|
|
48
49
|
WHY: Each section has a maximum item limit to prevent information overload
|
|
49
|
-
and maintain readability.
|
|
50
|
-
to
|
|
50
|
+
and maintain readability. Additionally, we use NLP-based similarity detection
|
|
51
|
+
to prevent duplicate or highly similar items from cluttering the memory.
|
|
52
|
+
When similar items are found (>80% similarity), the newer item replaces the
|
|
53
|
+
older one to maintain recency while avoiding redundancy.
|
|
51
54
|
|
|
52
55
|
Args:
|
|
53
56
|
content: Current memory file content
|
|
@@ -55,7 +58,7 @@ class MemoryContentManager:
|
|
|
55
58
|
new_item: Item to add
|
|
56
59
|
|
|
57
60
|
Returns:
|
|
58
|
-
str: Updated content with new item added
|
|
61
|
+
str: Updated content with new item added and duplicates removed
|
|
59
62
|
"""
|
|
60
63
|
lines = content.split("\n")
|
|
61
64
|
section_start = None
|
|
@@ -76,7 +79,34 @@ class MemoryContentManager:
|
|
|
76
79
|
if section_end is None:
|
|
77
80
|
section_end = len(lines)
|
|
78
81
|
|
|
79
|
-
#
|
|
82
|
+
# Ensure line length limit (account for "- " prefix)
|
|
83
|
+
max_item_length = (
|
|
84
|
+
self.memory_limits["max_line_length"] - 2
|
|
85
|
+
) # Subtract 2 for "- " prefix
|
|
86
|
+
if len(new_item) > max_item_length:
|
|
87
|
+
new_item = new_item[: max_item_length - 3] + "..."
|
|
88
|
+
|
|
89
|
+
# Check for duplicates or similar items using NLP similarity
|
|
90
|
+
items_to_remove = []
|
|
91
|
+
for i in range(section_start + 1, section_end):
|
|
92
|
+
if lines[i].strip().startswith("- "):
|
|
93
|
+
existing_item = lines[i].strip()[2:] # Remove "- " prefix
|
|
94
|
+
similarity = self._calculate_similarity(existing_item, new_item)
|
|
95
|
+
|
|
96
|
+
# If highly similar (>80%), mark for removal
|
|
97
|
+
if similarity > 0.8:
|
|
98
|
+
items_to_remove.append(i)
|
|
99
|
+
self.logger.debug(
|
|
100
|
+
f"Found similar item (similarity={similarity:.2f}): "
|
|
101
|
+
f"replacing '{existing_item[:50]}...' with '{new_item[:50]}...'"
|
|
102
|
+
)
|
|
103
|
+
|
|
104
|
+
# Remove similar items (in reverse order to maintain indices)
|
|
105
|
+
for idx in reversed(items_to_remove):
|
|
106
|
+
lines.pop(idx)
|
|
107
|
+
section_end -= 1
|
|
108
|
+
|
|
109
|
+
# Count remaining items after deduplication
|
|
80
110
|
item_count = 0
|
|
81
111
|
first_item_index = None
|
|
82
112
|
for i in range(section_start + 1, section_end):
|
|
@@ -85,7 +115,7 @@ class MemoryContentManager:
|
|
|
85
115
|
first_item_index = i
|
|
86
116
|
item_count += 1
|
|
87
117
|
|
|
88
|
-
# Check if we
|
|
118
|
+
# Check if we need to remove oldest item due to section limits
|
|
89
119
|
if item_count >= self.memory_limits["max_items_per_section"]:
|
|
90
120
|
# Remove oldest item (first one) to make room
|
|
91
121
|
if first_item_index is not None:
|
|
@@ -100,13 +130,6 @@ class MemoryContentManager:
|
|
|
100
130
|
):
|
|
101
131
|
insert_point += 1
|
|
102
132
|
|
|
103
|
-
# Ensure line length limit (account for "- " prefix)
|
|
104
|
-
max_item_length = (
|
|
105
|
-
self.memory_limits["max_line_length"] - 2
|
|
106
|
-
) # Subtract 2 for "- " prefix
|
|
107
|
-
if len(new_item) > max_item_length:
|
|
108
|
-
new_item = new_item[: max_item_length - 3] + "..."
|
|
109
|
-
|
|
110
133
|
lines.insert(insert_point, f"- {new_item}")
|
|
111
134
|
|
|
112
135
|
# Update timestamp
|
|
@@ -335,6 +358,113 @@ class MemoryContentManager:
|
|
|
335
358
|
|
|
336
359
|
return sections
|
|
337
360
|
|
|
361
|
+
def _calculate_similarity(self, str1: str, str2: str) -> float:
|
|
362
|
+
"""Calculate similarity between two strings using fuzzy matching.
|
|
363
|
+
|
|
364
|
+
WHY: We use difflib's SequenceMatcher for lightweight NLP-based similarity
|
|
365
|
+
detection. This avoids heavy ML dependencies while still providing effective
|
|
366
|
+
duplicate detection. The algorithm finds the longest contiguous matching
|
|
367
|
+
subsequences and calculates a ratio between 0 and 1.
|
|
368
|
+
|
|
369
|
+
DESIGN DECISION: We normalize strings before comparison by:
|
|
370
|
+
- Converting to lowercase for case-insensitive matching
|
|
371
|
+
- Stripping whitespace to ignore formatting differences
|
|
372
|
+
- This balances accuracy with performance for real-time deduplication
|
|
373
|
+
|
|
374
|
+
Args:
|
|
375
|
+
str1: First string to compare
|
|
376
|
+
str2: Second string to compare
|
|
377
|
+
|
|
378
|
+
Returns:
|
|
379
|
+
float: Similarity score between 0 (completely different) and 1 (identical)
|
|
380
|
+
"""
|
|
381
|
+
# Normalize strings for comparison
|
|
382
|
+
str1_normalized = str1.lower().strip()
|
|
383
|
+
str2_normalized = str2.lower().strip()
|
|
384
|
+
|
|
385
|
+
# Handle exact matches quickly
|
|
386
|
+
if str1_normalized == str2_normalized:
|
|
387
|
+
return 1.0
|
|
388
|
+
|
|
389
|
+
# Use SequenceMatcher for fuzzy matching
|
|
390
|
+
# None as first param tells it to use automatic junk heuristic
|
|
391
|
+
matcher = SequenceMatcher(None, str1_normalized, str2_normalized)
|
|
392
|
+
similarity = matcher.ratio()
|
|
393
|
+
|
|
394
|
+
# Additional check: if one string contains the other (substring match)
|
|
395
|
+
# This catches cases where one item is a more detailed version of another
|
|
396
|
+
if len(str1_normalized) > 20 and len(str2_normalized) > 20:
|
|
397
|
+
if str1_normalized in str2_normalized or str2_normalized in str1_normalized:
|
|
398
|
+
# Boost similarity for substring matches
|
|
399
|
+
similarity = max(similarity, 0.85)
|
|
400
|
+
|
|
401
|
+
return similarity
|
|
402
|
+
|
|
403
|
+
def deduplicate_section(self, content: str, section: str) -> Tuple[str, int]:
|
|
404
|
+
"""Deduplicate items within a section using NLP similarity.
|
|
405
|
+
|
|
406
|
+
WHY: Over time, sections can accumulate similar or duplicate items from
|
|
407
|
+
different sessions. This method cleans up existing sections by removing
|
|
408
|
+
similar items while preserving the most recent/relevant ones.
|
|
409
|
+
|
|
410
|
+
Args:
|
|
411
|
+
content: Current memory file content
|
|
412
|
+
section: Section name to deduplicate
|
|
413
|
+
|
|
414
|
+
Returns:
|
|
415
|
+
Tuple of (updated content, number of items removed)
|
|
416
|
+
"""
|
|
417
|
+
lines = content.split("\n")
|
|
418
|
+
section_start = None
|
|
419
|
+
section_end = None
|
|
420
|
+
|
|
421
|
+
# Find section boundaries
|
|
422
|
+
for i, line in enumerate(lines):
|
|
423
|
+
if line.startswith(f"## {section}"):
|
|
424
|
+
section_start = i
|
|
425
|
+
elif section_start is not None and line.startswith("## "):
|
|
426
|
+
section_end = i
|
|
427
|
+
break
|
|
428
|
+
|
|
429
|
+
if section_start is None:
|
|
430
|
+
return content, 0 # Section not found
|
|
431
|
+
|
|
432
|
+
if section_end is None:
|
|
433
|
+
section_end = len(lines)
|
|
434
|
+
|
|
435
|
+
# Collect all items in the section
|
|
436
|
+
items = []
|
|
437
|
+
item_indices = []
|
|
438
|
+
for i in range(section_start + 1, section_end):
|
|
439
|
+
if lines[i].strip().startswith("- "):
|
|
440
|
+
items.append(lines[i].strip()[2:]) # Remove "- " prefix
|
|
441
|
+
item_indices.append(i)
|
|
442
|
+
|
|
443
|
+
# Find duplicates using pairwise comparison
|
|
444
|
+
duplicates_to_remove = set()
|
|
445
|
+
for i in range(len(items)):
|
|
446
|
+
if i in duplicates_to_remove:
|
|
447
|
+
continue
|
|
448
|
+
for j in range(i + 1, len(items)):
|
|
449
|
+
if j in duplicates_to_remove:
|
|
450
|
+
continue
|
|
451
|
+
similarity = self._calculate_similarity(items[i], items[j])
|
|
452
|
+
if similarity > 0.8:
|
|
453
|
+
# Remove the older item (lower index)
|
|
454
|
+
duplicates_to_remove.add(i)
|
|
455
|
+
self.logger.debug(
|
|
456
|
+
f"Deduplicating: '{items[i][:50]}...' "
|
|
457
|
+
f"(keeping newer: '{items[j][:50]}...')"
|
|
458
|
+
)
|
|
459
|
+
break # Move to next item
|
|
460
|
+
|
|
461
|
+
# Remove duplicates (in reverse order to maintain indices)
|
|
462
|
+
removed_count = len(duplicates_to_remove)
|
|
463
|
+
for idx in sorted(duplicates_to_remove, reverse=True):
|
|
464
|
+
lines.pop(item_indices[idx])
|
|
465
|
+
|
|
466
|
+
return "\n".join(lines), removed_count
|
|
467
|
+
|
|
338
468
|
def validate_memory_size(self, content: str) -> tuple[bool, Optional[str]]:
|
|
339
469
|
"""Validate memory content size and structure.
|
|
340
470
|
|
|
@@ -18,15 +18,14 @@ from pathlib import Path
|
|
|
18
18
|
from typing import Any, Dict, List
|
|
19
19
|
|
|
20
20
|
from claude_mpm.core.config import Config
|
|
21
|
-
from claude_mpm.services.project.analyzer import ProjectAnalyzer
|
|
22
21
|
|
|
23
22
|
|
|
24
23
|
class MemoryTemplateGenerator:
|
|
25
24
|
"""Generates project-specific memory templates for agents.
|
|
26
25
|
|
|
27
26
|
WHY: Instead of generic templates, agents need project-specific knowledge
|
|
28
|
-
from the start. This class
|
|
29
|
-
|
|
27
|
+
from the start. This class creates simple memory templates that agents
|
|
28
|
+
can populate as they learn about the project.
|
|
30
29
|
"""
|
|
31
30
|
|
|
32
31
|
REQUIRED_SECTIONS = [
|
|
@@ -37,102 +36,34 @@ class MemoryTemplateGenerator:
|
|
|
37
36
|
]
|
|
38
37
|
|
|
39
38
|
def __init__(
|
|
40
|
-
self, config: Config, working_directory: Path
|
|
39
|
+
self, config: Config, working_directory: Path
|
|
41
40
|
):
|
|
42
41
|
"""Initialize the template generator.
|
|
43
42
|
|
|
44
43
|
Args:
|
|
45
44
|
config: Configuration object
|
|
46
45
|
working_directory: Working directory path
|
|
47
|
-
project_analyzer: Project analyzer instance
|
|
48
46
|
"""
|
|
49
47
|
self.config = config
|
|
50
48
|
self.working_directory = working_directory
|
|
51
|
-
self.project_analyzer = project_analyzer
|
|
52
49
|
self.logger = logging.getLogger(f"{__name__}.{self.__class__.__name__}")
|
|
53
50
|
|
|
54
51
|
def create_default_memory(self, agent_id: str, limits: Dict[str, Any]) -> str:
|
|
55
|
-
"""Create
|
|
52
|
+
"""Create basic memory template for agent.
|
|
56
53
|
|
|
57
54
|
Args:
|
|
58
55
|
agent_id: The agent identifier
|
|
59
56
|
limits: Memory limits for this agent
|
|
60
57
|
|
|
61
58
|
Returns:
|
|
62
|
-
str: The
|
|
59
|
+
str: The basic memory template content
|
|
63
60
|
"""
|
|
64
61
|
# Convert agent_id to proper name, handling cases like "test_agent" -> "Test"
|
|
65
62
|
agent_name = agent_id.replace("_agent", "").replace("_", " ").title()
|
|
66
63
|
timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
|
67
64
|
|
|
68
|
-
#
|
|
69
|
-
|
|
70
|
-
project_characteristics = self.project_analyzer.analyze_project()
|
|
71
|
-
project_context = self.project_analyzer.get_project_context_summary()
|
|
72
|
-
|
|
73
|
-
self.logger.info(
|
|
74
|
-
f"Creating project-specific memory for {agent_id} using analyzed project context"
|
|
75
|
-
)
|
|
76
|
-
except Exception as e:
|
|
77
|
-
self.logger.warning(
|
|
78
|
-
f"Error analyzing project for {agent_id}, falling back to basic template: {e}"
|
|
79
|
-
)
|
|
80
|
-
return self._create_basic_memory_template(agent_id, limits)
|
|
81
|
-
|
|
82
|
-
# Create project-specific sections
|
|
83
|
-
architecture_items = self._generate_architecture_section(
|
|
84
|
-
project_characteristics
|
|
85
|
-
)
|
|
86
|
-
coding_patterns = self._generate_coding_patterns_section(
|
|
87
|
-
project_characteristics
|
|
88
|
-
)
|
|
89
|
-
implementation_guidelines = self._generate_implementation_guidelines(
|
|
90
|
-
project_characteristics
|
|
91
|
-
)
|
|
92
|
-
tech_context = self._generate_technical_context(project_characteristics)
|
|
93
|
-
integration_points = self._generate_integration_points(project_characteristics)
|
|
94
|
-
|
|
95
|
-
template = f"""# {agent_name} Agent Memory - {project_characteristics.project_name}
|
|
96
|
-
|
|
97
|
-
<!-- MEMORY LIMITS: {limits['max_file_size_kb']}KB max | {limits['max_sections']} sections max | {limits['max_items_per_section']} items per section -->
|
|
98
|
-
<!-- Last Updated: {timestamp} | Auto-updated by: {agent_id} -->
|
|
99
|
-
|
|
100
|
-
## Project Context
|
|
101
|
-
{project_context}
|
|
102
|
-
|
|
103
|
-
## Project Architecture
|
|
104
|
-
{self._format_section_items(architecture_items)}
|
|
105
|
-
|
|
106
|
-
## Coding Patterns Learned
|
|
107
|
-
{self._format_section_items(coding_patterns)}
|
|
108
|
-
|
|
109
|
-
## Implementation Guidelines
|
|
110
|
-
{self._format_section_items(implementation_guidelines)}
|
|
111
|
-
|
|
112
|
-
## Domain-Specific Knowledge
|
|
113
|
-
<!-- Agent-specific knowledge for {project_characteristics.project_name} domain -->
|
|
114
|
-
{self._generate_domain_knowledge_starters(project_characteristics, agent_id)}
|
|
115
|
-
|
|
116
|
-
## Effective Strategies
|
|
117
|
-
<!-- Successful approaches discovered through experience -->
|
|
118
|
-
|
|
119
|
-
## Common Mistakes to Avoid
|
|
120
|
-
{self._format_section_items(self._generate_common_mistakes(project_characteristics))}
|
|
121
|
-
|
|
122
|
-
## Integration Points
|
|
123
|
-
{self._format_section_items(integration_points)}
|
|
124
|
-
|
|
125
|
-
## Performance Considerations
|
|
126
|
-
{self._format_section_items(self._generate_performance_considerations(project_characteristics))}
|
|
127
|
-
|
|
128
|
-
## Current Technical Context
|
|
129
|
-
{self._format_section_items(tech_context)}
|
|
130
|
-
|
|
131
|
-
## Recent Learnings
|
|
132
|
-
<!-- Most recent discoveries and insights -->
|
|
133
|
-
"""
|
|
134
|
-
|
|
135
|
-
return template
|
|
65
|
+
# Create a simple template that agents will populate through learning
|
|
66
|
+
return self._create_basic_memory_template(agent_id, limits)
|
|
136
67
|
|
|
137
68
|
def _create_basic_memory_template(
|
|
138
69
|
self, agent_id: str, limits: Dict[str, Any]
|
|
@@ -188,281 +119,3 @@ class MemoryTemplateGenerator:
|
|
|
188
119
|
## Recent Learnings
|
|
189
120
|
<!-- Most recent discoveries and insights -->
|
|
190
121
|
"""
|
|
191
|
-
|
|
192
|
-
def _generate_architecture_section(self, characteristics) -> List[str]:
|
|
193
|
-
"""Generate architecture section items based on project analysis."""
|
|
194
|
-
items = []
|
|
195
|
-
|
|
196
|
-
# Architecture type
|
|
197
|
-
items.append(
|
|
198
|
-
f"{characteristics.architecture_type} with {characteristics.primary_language or 'mixed'} implementation"
|
|
199
|
-
)
|
|
200
|
-
|
|
201
|
-
# Key directories structure
|
|
202
|
-
if characteristics.key_directories:
|
|
203
|
-
key_dirs = ", ".join(characteristics.key_directories[:5])
|
|
204
|
-
items.append(f"Main directories: {key_dirs}")
|
|
205
|
-
|
|
206
|
-
# Main modules
|
|
207
|
-
if characteristics.main_modules:
|
|
208
|
-
modules = ", ".join(characteristics.main_modules[:4])
|
|
209
|
-
items.append(f"Core modules: {modules}")
|
|
210
|
-
|
|
211
|
-
# Entry points
|
|
212
|
-
if characteristics.entry_points:
|
|
213
|
-
entries = ", ".join(characteristics.entry_points[:3])
|
|
214
|
-
items.append(f"Entry points: {entries}")
|
|
215
|
-
|
|
216
|
-
# Frameworks affecting architecture
|
|
217
|
-
if characteristics.web_frameworks:
|
|
218
|
-
frameworks = ", ".join(characteristics.web_frameworks[:3])
|
|
219
|
-
items.append(f"Web framework stack: {frameworks}")
|
|
220
|
-
|
|
221
|
-
return items[:8] # Limit to prevent overwhelming
|
|
222
|
-
|
|
223
|
-
def _generate_coding_patterns_section(self, characteristics) -> List[str]:
|
|
224
|
-
"""Generate coding patterns section based on project analysis."""
|
|
225
|
-
items = []
|
|
226
|
-
|
|
227
|
-
# Language-specific patterns
|
|
228
|
-
if characteristics.primary_language == "python":
|
|
229
|
-
items.append("Python project: use type hints, follow PEP 8 conventions")
|
|
230
|
-
if "django" in [fw.lower() for fw in characteristics.web_frameworks]:
|
|
231
|
-
items.append("Django patterns: models, views, templates separation")
|
|
232
|
-
elif "flask" in [fw.lower() for fw in characteristics.web_frameworks]:
|
|
233
|
-
items.append(
|
|
234
|
-
"Flask patterns: blueprint organization, app factory pattern"
|
|
235
|
-
)
|
|
236
|
-
elif characteristics.primary_language == "node_js":
|
|
237
|
-
items.append("Node.js project: use async/await, ES6+ features")
|
|
238
|
-
if "express" in [fw.lower() for fw in characteristics.web_frameworks]:
|
|
239
|
-
items.append("Express patterns: middleware usage, route organization")
|
|
240
|
-
|
|
241
|
-
# Framework-specific patterns
|
|
242
|
-
for framework in characteristics.frameworks[:3]:
|
|
243
|
-
if "react" in framework.lower():
|
|
244
|
-
items.append("React patterns: component composition, hooks usage")
|
|
245
|
-
elif "vue" in framework.lower():
|
|
246
|
-
items.append("Vue patterns: single file components, composition API")
|
|
247
|
-
|
|
248
|
-
# Code conventions found
|
|
249
|
-
for convention in characteristics.code_conventions[:3]:
|
|
250
|
-
items.append(f"Project uses: {convention}")
|
|
251
|
-
|
|
252
|
-
return items[:8]
|
|
253
|
-
|
|
254
|
-
def _generate_implementation_guidelines(self, characteristics) -> List[str]:
|
|
255
|
-
"""Generate implementation guidelines based on project analysis."""
|
|
256
|
-
items = []
|
|
257
|
-
|
|
258
|
-
# Package manager guidance
|
|
259
|
-
if characteristics.package_manager:
|
|
260
|
-
items.append(
|
|
261
|
-
f"Use {characteristics.package_manager} for dependency management"
|
|
262
|
-
)
|
|
263
|
-
|
|
264
|
-
# Testing guidelines
|
|
265
|
-
if characteristics.testing_framework:
|
|
266
|
-
items.append(f"Write tests using {characteristics.testing_framework}")
|
|
267
|
-
|
|
268
|
-
# Test patterns
|
|
269
|
-
for pattern in characteristics.test_patterns[:2]:
|
|
270
|
-
items.append(f"Follow {pattern.lower()}")
|
|
271
|
-
|
|
272
|
-
# Build tools
|
|
273
|
-
if characteristics.build_tools:
|
|
274
|
-
tools = ", ".join(characteristics.build_tools[:2])
|
|
275
|
-
items.append(f"Use build tools: {tools}")
|
|
276
|
-
|
|
277
|
-
# Configuration patterns
|
|
278
|
-
for config_pattern in characteristics.configuration_patterns[:2]:
|
|
279
|
-
items.append(f"Configuration: {config_pattern}")
|
|
280
|
-
|
|
281
|
-
# Important files to reference
|
|
282
|
-
important_configs = characteristics.important_configs[:3]
|
|
283
|
-
if important_configs:
|
|
284
|
-
configs = ", ".join(important_configs)
|
|
285
|
-
items.append(f"Key config files: {configs}")
|
|
286
|
-
|
|
287
|
-
return items[:8]
|
|
288
|
-
|
|
289
|
-
def _generate_technical_context(self, characteristics) -> List[str]:
|
|
290
|
-
"""Generate current technical context based on project analysis."""
|
|
291
|
-
items = []
|
|
292
|
-
|
|
293
|
-
# Technology stack summary
|
|
294
|
-
tech_stack = []
|
|
295
|
-
if characteristics.primary_language:
|
|
296
|
-
tech_stack.append(characteristics.primary_language)
|
|
297
|
-
tech_stack.extend(characteristics.frameworks[:2])
|
|
298
|
-
if tech_stack:
|
|
299
|
-
items.append(f"Tech stack: {', '.join(tech_stack)}")
|
|
300
|
-
|
|
301
|
-
# Databases in use
|
|
302
|
-
if characteristics.databases:
|
|
303
|
-
dbs = ", ".join(characteristics.databases[:3])
|
|
304
|
-
items.append(f"Data storage: {dbs}")
|
|
305
|
-
|
|
306
|
-
# API patterns
|
|
307
|
-
if characteristics.api_patterns:
|
|
308
|
-
apis = ", ".join(characteristics.api_patterns[:2])
|
|
309
|
-
items.append(f"API patterns: {apis}")
|
|
310
|
-
|
|
311
|
-
# Key dependencies
|
|
312
|
-
if characteristics.key_dependencies:
|
|
313
|
-
deps = ", ".join(characteristics.key_dependencies[:4])
|
|
314
|
-
items.append(f"Key dependencies: {deps}")
|
|
315
|
-
|
|
316
|
-
# Documentation available
|
|
317
|
-
if characteristics.documentation_files:
|
|
318
|
-
docs = ", ".join(characteristics.documentation_files[:3])
|
|
319
|
-
items.append(f"Documentation: {docs}")
|
|
320
|
-
|
|
321
|
-
return items[:8]
|
|
322
|
-
|
|
323
|
-
def _generate_integration_points(self, characteristics) -> List[str]:
|
|
324
|
-
"""Generate integration points based on project analysis."""
|
|
325
|
-
items = []
|
|
326
|
-
|
|
327
|
-
# Database integrations
|
|
328
|
-
for db in characteristics.databases[:3]:
|
|
329
|
-
items.append(f"{db.title()} database integration")
|
|
330
|
-
|
|
331
|
-
# Web framework integrations
|
|
332
|
-
for framework in characteristics.web_frameworks[:2]:
|
|
333
|
-
items.append(f"{framework} web framework integration")
|
|
334
|
-
|
|
335
|
-
# API integrations
|
|
336
|
-
for api_pattern in characteristics.api_patterns[:2]:
|
|
337
|
-
items.append(f"{api_pattern} integration pattern")
|
|
338
|
-
|
|
339
|
-
# Common integration patterns based on dependencies
|
|
340
|
-
integration_deps = [
|
|
341
|
-
dep
|
|
342
|
-
for dep in characteristics.key_dependencies
|
|
343
|
-
if any(
|
|
344
|
-
keyword in dep.lower()
|
|
345
|
-
for keyword in ["redis", "rabbit", "celery", "kafka", "docker"]
|
|
346
|
-
)
|
|
347
|
-
]
|
|
348
|
-
for dep in integration_deps[:3]:
|
|
349
|
-
items.append(f"{dep} integration")
|
|
350
|
-
|
|
351
|
-
return items[:6]
|
|
352
|
-
|
|
353
|
-
def _generate_common_mistakes(self, characteristics) -> List[str]:
|
|
354
|
-
"""Generate common mistakes based on project type and stack."""
|
|
355
|
-
items = []
|
|
356
|
-
|
|
357
|
-
# Language-specific mistakes
|
|
358
|
-
if characteristics.primary_language == "python":
|
|
359
|
-
items.append("Avoid circular imports - use late imports when needed")
|
|
360
|
-
items.append(
|
|
361
|
-
"Don't ignore virtual environment - always activate before work"
|
|
362
|
-
)
|
|
363
|
-
elif characteristics.primary_language == "node_js":
|
|
364
|
-
items.append("Avoid callback hell - use async/await consistently")
|
|
365
|
-
items.append("Don't commit node_modules - ensure .gitignore is correct")
|
|
366
|
-
|
|
367
|
-
# Framework-specific mistakes
|
|
368
|
-
if "django" in [fw.lower() for fw in characteristics.web_frameworks]:
|
|
369
|
-
items.append("Don't skip migrations - always create and apply them")
|
|
370
|
-
elif "flask" in [fw.lower() for fw in characteristics.web_frameworks]:
|
|
371
|
-
items.append("Avoid app context issues - use proper application factory")
|
|
372
|
-
|
|
373
|
-
# Database-specific mistakes
|
|
374
|
-
if characteristics.databases:
|
|
375
|
-
items.append("Don't ignore database transactions in multi-step operations")
|
|
376
|
-
items.append("Avoid N+1 queries - use proper joins or prefetching")
|
|
377
|
-
|
|
378
|
-
# Testing mistakes
|
|
379
|
-
if characteristics.testing_framework:
|
|
380
|
-
items.append(
|
|
381
|
-
"Don't skip test isolation - ensure tests can run independently"
|
|
382
|
-
)
|
|
383
|
-
|
|
384
|
-
return items[:8]
|
|
385
|
-
|
|
386
|
-
def _generate_performance_considerations(self, characteristics) -> List[str]:
|
|
387
|
-
"""Generate performance considerations based on project stack."""
|
|
388
|
-
items = []
|
|
389
|
-
|
|
390
|
-
# Language-specific performance
|
|
391
|
-
if characteristics.primary_language == "python":
|
|
392
|
-
items.append("Use list comprehensions over loops where appropriate")
|
|
393
|
-
items.append("Consider caching for expensive operations")
|
|
394
|
-
elif characteristics.primary_language == "node_js":
|
|
395
|
-
items.append("Leverage event loop - avoid blocking operations")
|
|
396
|
-
items.append("Use streams for large data processing")
|
|
397
|
-
|
|
398
|
-
# Database performance
|
|
399
|
-
if characteristics.databases:
|
|
400
|
-
items.append("Index frequently queried columns")
|
|
401
|
-
items.append("Use connection pooling for database connections")
|
|
402
|
-
|
|
403
|
-
# Web framework performance
|
|
404
|
-
if characteristics.web_frameworks:
|
|
405
|
-
items.append("Implement appropriate caching strategies")
|
|
406
|
-
items.append("Optimize static asset delivery")
|
|
407
|
-
|
|
408
|
-
# Framework-specific performance
|
|
409
|
-
if "react" in [fw.lower() for fw in characteristics.frameworks]:
|
|
410
|
-
items.append("Use React.memo for expensive component renders")
|
|
411
|
-
|
|
412
|
-
return items[:6]
|
|
413
|
-
|
|
414
|
-
def _generate_domain_knowledge_starters(
|
|
415
|
-
self, characteristics, agent_id: str
|
|
416
|
-
) -> str:
|
|
417
|
-
"""Generate domain-specific knowledge starters based on project and agent type."""
|
|
418
|
-
items = []
|
|
419
|
-
|
|
420
|
-
# Project terminology
|
|
421
|
-
if characteristics.project_terminology:
|
|
422
|
-
terms = ", ".join(characteristics.project_terminology[:4])
|
|
423
|
-
items.append(f"- Key project terms: {terms}")
|
|
424
|
-
|
|
425
|
-
# Agent-specific starters
|
|
426
|
-
if "research" in agent_id.lower():
|
|
427
|
-
items.append(
|
|
428
|
-
"- Focus on code analysis, pattern discovery, and architectural insights"
|
|
429
|
-
)
|
|
430
|
-
if characteristics.documentation_files:
|
|
431
|
-
items.append(
|
|
432
|
-
"- Prioritize documentation analysis for comprehensive understanding"
|
|
433
|
-
)
|
|
434
|
-
elif "engineer" in agent_id.lower():
|
|
435
|
-
items.append(
|
|
436
|
-
"- Focus on implementation patterns, coding standards, and best practices"
|
|
437
|
-
)
|
|
438
|
-
if characteristics.testing_framework:
|
|
439
|
-
items.append(
|
|
440
|
-
f"- Ensure test coverage using {characteristics.testing_framework}"
|
|
441
|
-
)
|
|
442
|
-
elif "pm" in agent_id.lower() or "manager" in agent_id.lower():
|
|
443
|
-
items.append(
|
|
444
|
-
"- Focus on project coordination, task delegation, and progress tracking"
|
|
445
|
-
)
|
|
446
|
-
items.append(
|
|
447
|
-
"- Monitor integration points and cross-component dependencies"
|
|
448
|
-
)
|
|
449
|
-
|
|
450
|
-
return (
|
|
451
|
-
"\n".join(items)
|
|
452
|
-
if items
|
|
453
|
-
else "<!-- Domain knowledge will accumulate here -->"
|
|
454
|
-
)
|
|
455
|
-
|
|
456
|
-
def _format_section_items(self, items: List[str]) -> str:
|
|
457
|
-
"""Format list of items as markdown bullet points."""
|
|
458
|
-
if not items:
|
|
459
|
-
return "<!-- Items will be added as knowledge accumulates -->"
|
|
460
|
-
|
|
461
|
-
formatted_items = []
|
|
462
|
-
for item in items:
|
|
463
|
-
# Ensure each item starts with a dash and is properly formatted
|
|
464
|
-
if not item.startswith("- "):
|
|
465
|
-
item = f"- {item}"
|
|
466
|
-
formatted_items.append(item)
|
|
467
|
-
|
|
468
|
-
return "\n".join(formatted_items)
|