claude-mpm 3.4.17__py3-none-any.whl → 3.4.22__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
claude_mpm/VERSION ADDED
@@ -0,0 +1 @@
1
+ 3.4.18
claude_mpm/__init__.py CHANGED
@@ -3,12 +3,18 @@
3
3
  from pathlib import Path
4
4
 
5
5
  # Get version from VERSION file - single source of truth
6
- version_file = Path(__file__).parent.parent.parent / "VERSION"
7
- if version_file.exists():
8
- __version__ = version_file.read_text().strip()
6
+ # Try package VERSION file first (for installed packages)
7
+ package_version_file = Path(__file__).parent / "VERSION"
8
+ if package_version_file.exists():
9
+ __version__ = package_version_file.read_text().strip()
9
10
  else:
10
- # Default version if VERSION file is missing
11
- __version__ = "0.0.0"
11
+ # Fall back to project root VERSION file (for development)
12
+ root_version_file = Path(__file__).parent.parent.parent / "VERSION"
13
+ if root_version_file.exists():
14
+ __version__ = root_version_file.read_text().strip()
15
+ else:
16
+ # Default version if VERSION file is missing
17
+ __version__ = "0.0.0"
12
18
 
13
19
  __author__ = "Claude MPM Team"
14
20
 
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "schema_version": "1.2.0",
3
3
  "agent_id": "data_engineer_agent",
4
- "agent_version": "1.2.0",
4
+ "agent_version": "1.3.0",
5
5
  "agent_type": "data_engineer",
6
6
  "metadata": {
7
7
  "name": "Data Engineer Agent",
@@ -18,7 +18,7 @@
18
18
  "updated_at": "2025-07-27T03:45:51.463714Z"
19
19
  },
20
20
  "capabilities": {
21
- "model": "claude-sonnet-4-20250514",
21
+ "model": "claude-3-opus-20240229",
22
22
  "tools": [
23
23
  "Read",
24
24
  "Write",
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "schema_version": "1.2.0",
3
3
  "agent_id": "documentation_agent",
4
- "agent_version": "1.1.0",
4
+ "agent_version": "1.3.0",
5
5
  "agent_type": "documentation",
6
6
  "metadata": {
7
7
  "name": "Documentation Agent",
@@ -18,7 +18,7 @@
18
18
  "updated_at": "2025-07-27T03:45:51.468280Z"
19
19
  },
20
20
  "capabilities": {
21
- "model": "claude-sonnet-4-20250514",
21
+ "model": "claude-3-5-sonnet-20241022",
22
22
  "tools": [
23
23
  "Read",
24
24
  "Write",
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "schema_version": "1.2.0",
3
3
  "agent_id": "engineer_agent",
4
- "agent_version": "1.1.0",
4
+ "agent_version": "1.3.0",
5
5
  "agent_type": "engineer",
6
6
  "metadata": {
7
7
  "name": "Engineer Agent",
@@ -19,7 +19,7 @@
19
19
  "updated_at": "2025-07-27T03:45:51.472566Z"
20
20
  },
21
21
  "capabilities": {
22
- "model": "claude-sonnet-4-20250514",
22
+ "model": "claude-3-opus-20240229",
23
23
  "tools": [
24
24
  "Read",
25
25
  "Write",
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "schema_version": "1.2.0",
3
3
  "agent_id": "ops_agent",
4
- "agent_version": "1.1.0",
4
+ "agent_version": "1.3.0",
5
5
  "agent_type": "ops",
6
6
  "metadata": {
7
7
  "name": "Ops Agent",
@@ -18,7 +18,7 @@
18
18
  "updated_at": "2025-07-27T03:45:51.476772Z"
19
19
  },
20
20
  "capabilities": {
21
- "model": "claude-sonnet-4-20250514",
21
+ "model": "claude-3-opus-20240229",
22
22
  "tools": [
23
23
  "Read",
24
24
  "Write",
@@ -1,25 +1,121 @@
1
1
  {
2
- "name": "pm",
3
- "display_name": "PM Agent",
4
- "description": "Project Manager agent for Claude MPM framework",
5
- "version": 2,
2
+ "schema_version": "1.2.0",
3
+ "agent_id": "pm_agent",
4
+ "agent_version": "2.2.0",
5
+ "agent_type": "pm",
6
+ "metadata": {
7
+ "name": "PM Agent",
8
+ "description": "Project Manager agent for Claude MPM framework",
9
+ "category": "management",
10
+ "tags": [
11
+ "pm",
12
+ "orchestration",
13
+ "delegation",
14
+ "coordination",
15
+ "management"
16
+ ],
17
+ "author": "Claude MPM Team",
18
+ "created_at": "2025-07-30T00:00:00.000000Z",
19
+ "updated_at": "2025-08-08T00:00:00.000000Z"
20
+ },
6
21
  "capabilities": {
22
+ "model": "claude-3-5-sonnet-20241022",
23
+ "tools": [
24
+ "Task",
25
+ "TodoWrite",
26
+ "WebSearch",
27
+ "WebFetch"
28
+ ],
29
+ "resource_tier": "standard",
30
+ "max_tokens": 8192,
31
+ "temperature": 0.1,
32
+ "timeout": 600,
33
+ "memory_limit": 2048,
34
+ "cpu_limit": 40,
35
+ "network_access": true,
36
+ "file_access": {
37
+ "read_paths": [],
38
+ "write_paths": []
39
+ },
7
40
  "can_delegate": true,
8
41
  "can_write": false,
9
42
  "can_read": false,
10
43
  "can_execute": false,
11
44
  "dangerous_tools": false,
12
- "review_required": false,
13
- "custom_tools": ["Task", "TodoWrite", "WebSearch", "WebFetch"]
45
+ "review_required": false
14
46
  },
15
- "metadata": {
16
- "team": "mpm-framework",
17
- "project": "claude-mpm",
18
- "priority": "critical",
19
- "created": "2025-07-30",
20
- "last_updated": "2025-07-30",
21
- "optimization_level": "v2_claude4",
22
- "token_efficiency": "optimized"
47
+ "instructions": "You are **Claude Multi-Agent Project Manager (claude-mpm)** - your **SOLE function** is **orchestration and delegation**.\n\n## CRITICAL AUTHORITY & IDENTITY\n\nYou are **FORBIDDEN** from direct work except:\n- **Task Tool** for delegation (primary function)\n- **TodoWrite** for tracking (with [Agent] prefixes, NEVER [PM] for implementation)\n- **WebSearch/WebFetch** only for delegation requirements\n- **Direct answers** for PM role/capability questions only\n- **Direct work** only when explicitly authorized: \"do this yourself\", \"don't delegate\", \"implement directly\"\n\n**ABSOLUTE RULE**: ALL other work must be delegated to specialized agents via Task Tool.\n\n**CRITICAL**: You must NEVER create todos with [PM] prefix for implementation work such as:\n- Updating files (delegate to appropriate agent)\n- Creating documentation (delegate to Documentation Agent)\n- Writing code (delegate to Engineer Agent)\n- Configuring systems (delegate to Ops Agent)\n- Creating roadmaps (delegate to Research Agent)\n\n## MANDATORY WORKFLOW\n**STRICT SEQUENCE - NO SKIPPING**:\n1. **Research** (ALWAYS FIRST) - analyze requirements, gather context\n2. **Engineer/Data Engineer** (ONLY after Research) - implementation\n3. **QA** (ONLY after Engineering) - **MUST receive original user instructions + explicit sign-off required**\n4. **Documentation** (ONLY after QA sign-off) - documentation work\n\n**QA Sign-off Format**: \"QA Complete: [Pass/Fail] - [Details]\"\n**User Override Required** to skip: \"Skip workflow\", \"Go directly to [phase]\", \"No QA needed\"\n\n## ENHANCED TASK DELEGATION FORMAT\n```\nTask: <Specific, measurable action>\nAgent: <Specialized Agent Name>\nContext:\n Goal: <Business outcome and success criteria>\n Inputs: <Files, data, dependencies, previous outputs>\n Acceptance Criteria: \n - <Objective test 1>\n - <Objective test 2>\n Constraints:\n Performance: <Speed, memory, scalability requirements>\n Style: <Coding standards, formatting, conventions>\n Security: <Auth, validation, compliance requirements>\n Timeline: <Deadlines, milestones>\n Priority: <Critical|High|Medium|Low>\n Dependencies: <Prerequisite tasks or external requirements>\n Risk Factors: <Potential issues and mitigation strategies>\n```\n\n## MEMORY MANAGEMENT (SECONDARY CAPABILITY)\n\n### Memory Evaluation Protocol\n**MANDATORY for ALL user prompts** - Evaluate every user request for memory indicators:\n\n**Memory Trigger Words/Phrases**:\n- \"remember\", \"don't forget\", \"keep in mind\", \"note that\"\n- \"make sure to\", \"always\", \"never\", \"important\"\n- \"going forward\", \"in the future\", \"from now on\"\n- \"this pattern\", \"this approach\", \"this way\"\n\n**When Memory Indicators Detected**:\n1. **Extract Key Information**: Identify facts, patterns, or guidelines to preserve\n2. **Determine Agent & Type**:\n - Code patterns/standards → Engineer Agent (type: pattern)\n - Architecture decisions → Research Agent (type: architecture)\n - Testing requirements → QA Agent (type: guideline)\n - Security policies → Security Agent (type: guideline)\n - Documentation standards → Documentation Agent (type: guideline)\n3. **Delegate Storage**: Use memory task format with appropriate agent\n4. **Confirm to User**: \"I'm storing this information: [brief summary] for [agent]\"\n\n### Memory Storage Task Format\nFor explicit memory requests:\n```\nTask: Store project-specific memory\nAgent: <appropriate agent based on content>\nContext:\n Goal: Preserve important project knowledge for future reference\n Memory Request: <user's original request>\n Suggested Format:\n # Add To Memory:\n Type: <pattern|architecture|guideline|mistake|strategy|integration|performance|context>\n Content: <concise summary under 100 chars>\n #\n```\n\n### Agent Memory Specialization Guide\n- **Engineering Agent**: Implementation patterns, code architecture, performance optimizations\n- **Research Agent**: Analysis findings, investigation results, domain knowledge\n- **QA Agent**: Testing strategies, quality standards, bug patterns\n- **Security Agent**: Security patterns, threat analysis, compliance requirements\n- **Documentation Agent**: Writing standards, content organization patterns\n\n## CONTEXT-AWARE AGENT SELECTION\n- **PM role/capabilities questions**: Answer directly (only exception)\n- **Explanations/How-to questions**: Delegate to Documentation Agent\n- **Codebase analysis**: Delegate to Research Agent\n- **Implementation tasks**: Delegate to Engineer Agent \n- **Security-sensitive operations**: Auto-route to Security Agent\n- **ALL other tasks**: Must delegate to appropriate specialized agent\n\n## TODOWRITE REQUIREMENTS\n**MANDATORY**: Always prefix tasks with [Agent] - NEVER use [PM] prefix for implementation work:\n- `[Research] Analyze authentication patterns`\n- `[Engineer] Implement user registration`\n- `[QA] Test payment flow (BLOCKED - waiting for fix)`\n- `[Documentation] Update API docs after QA sign-off`\n\n**FORBIDDEN [PM] todo examples**:\n- ❌ `[PM] Update CLAUDE.md` - Should delegate to Documentation Agent\n- ❌ `[PM] Create implementation roadmap` - Should delegate to Research Agent\n- ❌ `[PM] Configure systems` - Should delegate to Ops Agent\n\n**ONLY acceptable PM todos** (orchestration/delegation only):\n- ✅ `Building delegation context for [task]` (internal PM work)\n- ✅ `Aggregating results from agents` (internal PM work)\n\n## ERROR HANDLING PROTOCOL\n**3-Attempt Process**:\n1. **First Failure**: Re-delegate with enhanced context\n2. **Second Failure**: Mark \"ERROR - Attempt 2/3\", escalate to Research if needed\n3. **Third Failure**: TodoWrite escalation with user decision required\n\n## STANDARD OPERATING PROCEDURE\n1. **Analysis**: Parse request, assess context completeness (NO TOOLS)\n1.5. **Memory Evaluation**: Check for memory indicators, extract key information, delegate storage if detected\n2. **Planning**: Agent selection, task breakdown, priority assignment, dependency mapping\n3. **Delegation**: Task Tool with enhanced format, context enrichment\n4. **Monitoring**: Track progress, handle errors, dynamic adjustment\n5. **Integration**: Synthesize results (NO TOOLS), validate outputs, report or re-delegate\n\n## PROFESSIONAL COMMUNICATION\n- Maintain neutral, professional tone as default\n- Avoid overeager enthusiasm (\"Excellent!\", \"Amazing!\", \"Perfect!\")\n- Use appropriate acknowledgments (\"Understood\", \"Confirmed\", \"Noted\")\n- Never fallback to simpler solutions without explicit user instruction\n- Never use mock implementations outside test environments\n\nRemember: You are an **orchestrator and delegator ONLY**. Your power lies in coordinating specialized agents, not in doing the work yourself.",
48
+ "knowledge": {
49
+ "domain_expertise": [
50
+ "Project management and orchestration patterns",
51
+ "Agent coordination and delegation strategies",
52
+ "Workflow management and process optimization",
53
+ "Quality assurance and review coordination",
54
+ "Memory management and knowledge preservation"
55
+ ],
56
+ "best_practices": [
57
+ "Always delegate implementation work to specialized agents",
58
+ "Follow strict workflow sequence: Research → Engineering → QA → Documentation",
59
+ "Use enhanced task delegation format with detailed context",
60
+ "Evaluate all requests for memory indicators and delegate storage",
61
+ "Maintain professional communication and avoid direct implementation"
62
+ ],
63
+ "constraints": [
64
+ "Cannot perform direct implementation work except when explicitly authorized",
65
+ "Must delegate all technical tasks to appropriate specialized agents",
66
+ "Cannot skip workflow phases without explicit user override",
67
+ "Must use [Agent] prefixes in TodoWrite, never [PM] for implementation"
68
+ ],
69
+ "examples": []
70
+ },
71
+ "interactions": {
72
+ "input_format": {
73
+ "required_fields": [
74
+ "task"
75
+ ],
76
+ "optional_fields": [
77
+ "context",
78
+ "constraints",
79
+ "priority"
80
+ ]
81
+ },
82
+ "output_format": {
83
+ "structure": "markdown",
84
+ "includes": [
85
+ "delegation_plan",
86
+ "agent_assignments",
87
+ "progress_tracking"
88
+ ]
89
+ },
90
+ "handoff_agents": [
91
+ "research",
92
+ "engineer",
93
+ "data_engineer",
94
+ "qa",
95
+ "security",
96
+ "documentation",
97
+ "ops",
98
+ "version_control"
99
+ ],
100
+ "triggers": []
23
101
  },
24
- "instructions": "You are **Claude Multi-Agent Project Manager (claude-mpm)** - your **SOLE function** is **orchestration and delegation**.\n\n## CRITICAL AUTHORITY & IDENTITY\n\nYou are **FORBIDDEN** from direct work except:\n- **Task Tool** for delegation (primary function)\n- **TodoWrite** for tracking (with [Agent] prefixes, NEVER [PM] for implementation)\n- **WebSearch/WebFetch** only for delegation requirements\n- **Direct answers** for PM role/capability questions only\n- **Direct work** only when explicitly authorized: \"do this yourself\", \"don't delegate\", \"implement directly\"\n\n**ABSOLUTE RULE**: ALL other work must be delegated to specialized agents via Task Tool.\n\n**CRITICAL**: You must NEVER create todos with [PM] prefix for implementation work such as:\n- Updating files (delegate to appropriate agent)\n- Creating documentation (delegate to Documentation Agent)\n- Writing code (delegate to Engineer Agent)\n- Configuring systems (delegate to Ops Agent)\n- Creating roadmaps (delegate to Research Agent)\n\n## MANDATORY WORKFLOW\n**STRICT SEQUENCE - NO SKIPPING**:\n1. **Research** (ALWAYS FIRST) - analyze requirements, gather context\n2. **Engineer/Data Engineer** (ONLY after Research) - implementation\n3. **QA** (ONLY after Engineering) - **MUST receive original user instructions + explicit sign-off required**\n4. **Documentation** (ONLY after QA sign-off) - documentation work\n\n**QA Sign-off Format**: \"QA Complete: [Pass/Fail] - [Details]\"\n**User Override Required** to skip: \"Skip workflow\", \"Go directly to [phase]\", \"No QA needed\"\n\n## ENHANCED TASK DELEGATION FORMAT\n```\nTask: <Specific, measurable action>\nAgent: <Specialized Agent Name>\nContext:\n Goal: <Business outcome and success criteria>\n Inputs: <Files, data, dependencies, previous outputs>\n Acceptance Criteria: \n - <Objective test 1>\n - <Objective test 2>\n Constraints:\n Performance: <Speed, memory, scalability requirements>\n Style: <Coding standards, formatting, conventions>\n Security: <Auth, validation, compliance requirements>\n Timeline: <Deadlines, milestones>\n Priority: <Critical|High|Medium|Low>\n Dependencies: <Prerequisite tasks or external requirements>\n Risk Factors: <Potential issues and mitigation strategies>\n```\n\n## MEMORY MANAGEMENT (SECONDARY CAPABILITY)\n\n### Memory Evaluation Protocol\n**MANDATORY for ALL user prompts** - Evaluate every user request for memory indicators:\n\n**Memory Trigger Words/Phrases**:\n- \"remember\", \"don't forget\", \"keep in mind\", \"note that\"\n- \"make sure to\", \"always\", \"never\", \"important\"\n- \"going forward\", \"in the future\", \"from now on\"\n- \"this pattern\", \"this approach\", \"this way\"\n\n**When Memory Indicators Detected**:\n1. **Extract Key Information**: Identify facts, patterns, or guidelines to preserve\n2. **Determine Agent & Type**:\n - Code patterns/standards → Engineer Agent (type: pattern)\n - Architecture decisions → Research Agent (type: architecture)\n - Testing requirements → QA Agent (type: guideline)\n - Security policies → Security Agent (type: guideline)\n - Documentation standards → Documentation Agent (type: guideline)\n3. **Delegate Storage**: Use memory task format with appropriate agent\n4. **Confirm to User**: \"I'm storing this information: [brief summary] for [agent]\"\n\n### Memory Storage Task Format\nFor explicit memory requests:\n```\nTask: Store project-specific memory\nAgent: <appropriate agent based on content>\nContext:\n Goal: Preserve important project knowledge for future reference\n Memory Request: <user's original request>\n Suggested Format:\n # Add To Memory:\n Type: <pattern|architecture|guideline|mistake|strategy|integration|performance|context>\n Content: <concise summary under 100 chars>\n #\n```\n\n### Agent Memory Specialization Guide\n- **Engineering Agent**: Implementation patterns, code architecture, performance optimizations\n- **Research Agent**: Analysis findings, investigation results, domain knowledge\n- **QA Agent**: Testing strategies, quality standards, bug patterns\n- **Security Agent**: Security patterns, threat analysis, compliance requirements\n- **Documentation Agent**: Writing standards, content organization patterns\n\n## CONTEXT-AWARE AGENT SELECTION\n- **PM role/capabilities questions**: Answer directly (only exception)\n- **Explanations/How-to questions**: Delegate to Documentation Agent\n- **Codebase analysis**: Delegate to Research Agent\n- **Implementation tasks**: Delegate to Engineer Agent \n- **Security-sensitive operations**: Auto-route to Security Agent\n- **ALL other tasks**: Must delegate to appropriate specialized agent\n\n## TODOWRITE REQUIREMENTS\n**MANDATORY**: Always prefix tasks with [Agent] - NEVER use [PM] prefix for implementation work:\n- `[Research] Analyze authentication patterns`\n- `[Engineer] Implement user registration`\n- `[QA] Test payment flow (BLOCKED - waiting for fix)`\n- `[Documentation] Update API docs after QA sign-off`\n\n**FORBIDDEN [PM] todo examples**:\n- ❌ `[PM] Update CLAUDE.md` - Should delegate to Documentation Agent\n- ❌ `[PM] Create implementation roadmap` - Should delegate to Research Agent\n- ❌ `[PM] Configure systems` - Should delegate to Ops Agent\n\n**ONLY acceptable PM todos** (orchestration/delegation only):\n- ✅ `Building delegation context for [task]` (internal PM work)\n- ✅ `Aggregating results from agents` (internal PM work)\n\n## ERROR HANDLING PROTOCOL\n**3-Attempt Process**:\n1. **First Failure**: Re-delegate with enhanced context\n2. **Second Failure**: Mark \"ERROR - Attempt 2/3\", escalate to Research if needed\n3. **Third Failure**: TodoWrite escalation with user decision required\n\n## STANDARD OPERATING PROCEDURE\n1. **Analysis**: Parse request, assess context completeness (NO TOOLS)\n1.5. **Memory Evaluation**: Check for memory indicators, extract key information, delegate storage if detected\n2. **Planning**: Agent selection, task breakdown, priority assignment, dependency mapping\n3. **Delegation**: Task Tool with enhanced format, context enrichment\n4. **Monitoring**: Track progress, handle errors, dynamic adjustment\n5. **Integration**: Synthesize results (NO TOOLS), validate outputs, report or re-delegate\n\n## PROFESSIONAL COMMUNICATION\n- Maintain neutral, professional tone as default\n- Avoid overeager enthusiasm (\"Excellent!\", \"Amazing!\", \"Perfect!\")\n- Use appropriate acknowledgments (\"Understood\", \"Confirmed\", \"Noted\")\n- Never fallback to simpler solutions without explicit user instruction\n- Never use mock implementations outside test environments\n\nRemember: You are an **orchestrator and delegator ONLY**. Your power lies in coordinating specialized agents, not in doing the work yourself."
102
+ "testing": {
103
+ "test_cases": [
104
+ {
105
+ "name": "Basic PM delegation",
106
+ "input": "Implement a new feature",
107
+ "expected_behavior": "Agent delegates to Research first, then appropriate implementation agents",
108
+ "validation_criteria": [
109
+ "delegates_to_research_first",
110
+ "follows_workflow_sequence",
111
+ "uses_task_tool"
112
+ ]
113
+ }
114
+ ],
115
+ "performance_benchmarks": {
116
+ "response_time": 180,
117
+ "token_usage": 4096,
118
+ "success_rate": 0.98
119
+ }
120
+ }
25
121
  }
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "schema_version": "1.2.0",
3
3
  "agent_id": "qa_agent",
4
- "agent_version": "2.3.0",
4
+ "agent_version": "2.5.0",
5
5
  "agent_type": "qa",
6
6
  "metadata": {
7
7
  "name": "Qa Agent",
@@ -18,7 +18,7 @@
18
18
  "updated_at": "2025-07-27T03:45:51.480806Z"
19
19
  },
20
20
  "capabilities": {
21
- "model": "claude-sonnet-4-20250514",
21
+ "model": "claude-3-5-sonnet-20241022",
22
22
  "tools": [
23
23
  "Read",
24
24
  "Write",
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "schema_version": "1.2.0",
3
3
  "agent_id": "research_agent",
4
- "agent_version": "2.2.0",
4
+ "agent_version": "2.4.0",
5
5
  "agent_type": "research",
6
6
  "metadata": {
7
7
  "name": "Research Agent",
@@ -18,7 +18,7 @@
18
18
  "category": "research"
19
19
  },
20
20
  "capabilities": {
21
- "model": "claude-sonnet-4-20250514",
21
+ "model": "claude-3-5-sonnet-20241022",
22
22
  "tools": [
23
23
  "Read",
24
24
  "Grep",
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "schema_version": "1.2.0",
3
3
  "agent_id": "security_agent",
4
- "agent_version": "1.1.0",
4
+ "agent_version": "1.3.0",
5
5
  "agent_type": "security",
6
6
  "metadata": {
7
7
  "name": "Security Agent",
@@ -18,7 +18,7 @@
18
18
  "updated_at": "2025-07-27T03:45:51.489363Z"
19
19
  },
20
20
  "capabilities": {
21
- "model": "claude-sonnet-4-20250514",
21
+ "model": "claude-3-5-sonnet-20241022",
22
22
  "tools": [
23
23
  "Read",
24
24
  "Grep",
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "schema_version": "1.2.0",
3
3
  "agent_id": "test_integration_agent",
4
- "agent_version": "1.0.0",
4
+ "agent_version": "1.3.0",
5
5
  "agent_type": "test_integration",
6
6
  "metadata": {
7
7
  "name": "Test Integration Agent",
@@ -18,7 +18,7 @@
18
18
  "updated_at": "2025-08-05T00:00:00.000000Z"
19
19
  },
20
20
  "capabilities": {
21
- "model": "claude-sonnet-4-20250514",
21
+ "model": "claude-3-5-sonnet-20241022",
22
22
  "tools": [
23
23
  "Read",
24
24
  "Write",
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "schema_version": "1.2.0",
3
3
  "agent_id": "version_control_agent",
4
- "agent_version": "1.1.0",
4
+ "agent_version": "1.3.0",
5
5
  "agent_type": "version_control",
6
6
  "metadata": {
7
7
  "name": "Version Control Agent",
@@ -19,7 +19,7 @@
19
19
  "updated_at": "2025-07-27T03:45:51.494067Z"
20
20
  },
21
21
  "capabilities": {
22
- "model": "claude-sonnet-4-20250514",
22
+ "model": "claude-3-5-sonnet-20241022",
23
23
  "tools": [
24
24
  "Read",
25
25
  "Bash",
@@ -28,16 +28,22 @@ from .commands import (
28
28
  )
29
29
 
30
30
  # Get version from VERSION file - single source of truth
31
- version_file = Path(__file__).parent.parent.parent / "VERSION"
32
- if version_file.exists():
33
- __version__ = version_file.read_text().strip()
31
+ # Try package VERSION file first (for installed packages)
32
+ package_version_file = Path(__file__).parent.parent / "VERSION"
33
+ if package_version_file.exists():
34
+ __version__ = package_version_file.read_text().strip()
34
35
  else:
35
- # Try to import from package as fallback
36
- try:
37
- from .. import __version__
38
- except ImportError:
39
- # Default version if all else fails
40
- __version__ = "0.0.0"
36
+ # Fall back to project root VERSION file (for development)
37
+ root_version_file = Path(__file__).parent.parent.parent.parent / "VERSION"
38
+ if root_version_file.exists():
39
+ __version__ = root_version_file.read_text().strip()
40
+ else:
41
+ # Try to import from package as fallback
42
+ try:
43
+ from .. import __version__
44
+ except ImportError:
45
+ # Default version if all else fails
46
+ __version__ = "0.0.0"
41
47
 
42
48
 
43
49
  def main(argv: Optional[list] = None):
@@ -186,6 +186,7 @@ class AgentDeploymentService:
186
186
  "skipped": [],
187
187
  "updated": [],
188
188
  "migrated": [], # Track agents migrated from old format
189
+ "converted": [], # Track YAML to MD conversions
189
190
  "total": 0,
190
191
  # METRICS: Add detailed timing and performance data to results
191
192
  "metrics": {
@@ -212,6 +213,10 @@ class AgentDeploymentService:
212
213
  results["errors"].append(error_msg)
213
214
  return results
214
215
 
216
+ # Convert any existing YAML files to MD format
217
+ conversion_results = self._convert_yaml_to_md(target_dir)
218
+ results["converted"] = conversion_results.get("converted", [])
219
+
215
220
  # Load base agent content
216
221
  # OPERATIONAL NOTE: Base agent contains shared configuration and instructions
217
222
  # that all agents inherit. This reduces duplication and ensures consistency.
@@ -242,7 +247,7 @@ class AgentDeploymentService:
242
247
  agent_start_time = time.time()
243
248
 
244
249
  agent_name = template_file.stem
245
- target_file = target_dir / f"{agent_name}.yaml"
250
+ target_file = target_dir / f"{agent_name}.md"
246
251
 
247
252
  # Check if agent needs update
248
253
  needs_update = force_rebuild
@@ -265,12 +270,12 @@ class AgentDeploymentService:
265
270
  self.logger.debug(f"Skipped up-to-date agent: {agent_name}")
266
271
  continue
267
272
 
268
- # Build the agent file
269
- agent_yaml = self._build_agent_yaml(agent_name, template_file, base_agent_data)
273
+ # Build the agent file as markdown with YAML frontmatter
274
+ agent_content = self._build_agent_markdown(agent_name, template_file, base_agent_data)
270
275
 
271
276
  # Write the agent file
272
277
  is_update = target_file.exists()
273
- target_file.write_text(agent_yaml)
278
+ target_file.write_text(agent_content)
274
279
 
275
280
  # METRICS: Record deployment time for this agent
276
281
  agent_deployment_time = (time.time() - agent_start_time) * 1000 # Convert to ms
@@ -320,6 +325,7 @@ class AgentDeploymentService:
320
325
  f"Deployed {len(results['deployed'])} agents, "
321
326
  f"updated {len(results['updated'])}, "
322
327
  f"migrated {len(results['migrated'])}, "
328
+ f"converted {len(results['converted'])} YAML files, "
323
329
  f"skipped {len(results['skipped'])}, "
324
330
  f"errors: {len(results['errors'])}"
325
331
  )
@@ -514,6 +520,13 @@ class AgentDeploymentService:
514
520
  ["Read", "Write", "Edit", "Grep", "Glob", "LS"] # Default fallback
515
521
  )
516
522
 
523
+ # Get model from capabilities.model in new format
524
+ model = (
525
+ template_data.get('capabilities', {}).get('model') or
526
+ template_data.get('configuration_fields', {}).get('model') or
527
+ "claude-sonnet-4-20250514" # Default fallback
528
+ )
529
+
517
530
  frontmatter = f"""---
518
531
  name: {agent_name}
519
532
  description: "{description}"
@@ -523,6 +536,7 @@ created: "{datetime.now().isoformat()}Z"
523
536
  updated: "{datetime.now().isoformat()}Z"
524
537
  tags: {tags}
525
538
  tools: {tools}
539
+ model: "{model}"
526
540
  metadata:
527
541
  base_version: "{self._format_version_display(base_version)}"
528
542
  agent_version: "{self._format_version_display(agent_version)}"
@@ -848,7 +862,7 @@ temperature: {temperature}"""
848
862
  return results
849
863
 
850
864
  # List deployed agents
851
- agent_files = list(agents_dir.glob("*.yaml"))
865
+ agent_files = list(agents_dir.glob("*.md"))
852
866
  for agent_file in agent_files:
853
867
  try:
854
868
  # Read first few lines to get agent name from YAML
@@ -1101,7 +1115,7 @@ temperature: {temperature}"""
1101
1115
  return results
1102
1116
 
1103
1117
  # Remove system agents only (identified by claude-mpm author)
1104
- agent_files = list(agents_dir.glob("*.yaml"))
1118
+ agent_files = list(agents_dir.glob("*.md"))
1105
1119
 
1106
1120
  for agent_file in agent_files:
1107
1121
  try:
@@ -1531,4 +1545,212 @@ temperature: {temperature}"""
1531
1545
  except Exception as e:
1532
1546
  error_msg = f"Failed to deploy system instructions: {e}"
1533
1547
  self.logger.error(error_msg)
1534
- results["errors"].append(error_msg)
1548
+ results["errors"].append(error_msg)
1549
+
1550
+ def _convert_yaml_to_md(self, target_dir: Path) -> Dict[str, Any]:
1551
+ """
1552
+ Convert existing YAML agent files to MD format with YAML frontmatter.
1553
+
1554
+ This method handles backward compatibility by finding existing .yaml
1555
+ agent files and converting them to .md format expected by Claude Code.
1556
+
1557
+ Args:
1558
+ target_dir: Directory containing agent files
1559
+
1560
+ Returns:
1561
+ Dictionary with conversion results:
1562
+ - converted: List of converted files
1563
+ - errors: List of conversion errors
1564
+ - skipped: List of files that didn't need conversion
1565
+ """
1566
+ results = {
1567
+ "converted": [],
1568
+ "errors": [],
1569
+ "skipped": []
1570
+ }
1571
+
1572
+ try:
1573
+ # Find existing YAML agent files
1574
+ yaml_files = list(target_dir.glob("*.yaml"))
1575
+
1576
+ if not yaml_files:
1577
+ self.logger.debug("No YAML files found to convert")
1578
+ return results
1579
+
1580
+ self.logger.info(f"Found {len(yaml_files)} YAML files to convert to MD format")
1581
+
1582
+ for yaml_file in yaml_files:
1583
+ try:
1584
+ agent_name = yaml_file.stem
1585
+ md_file = target_dir / f"{agent_name}.md"
1586
+
1587
+ # Skip if MD file already exists (unless it's older than YAML)
1588
+ if md_file.exists():
1589
+ # Check modification times for safety
1590
+ yaml_mtime = yaml_file.stat().st_mtime
1591
+ md_mtime = md_file.stat().st_mtime
1592
+
1593
+ if md_mtime >= yaml_mtime:
1594
+ results["skipped"].append({
1595
+ "yaml_file": str(yaml_file),
1596
+ "md_file": str(md_file),
1597
+ "reason": "MD file already exists and is newer"
1598
+ })
1599
+ continue
1600
+ else:
1601
+ # MD file is older, proceed with conversion
1602
+ self.logger.info(f"MD file {md_file.name} is older than YAML, converting...")
1603
+
1604
+ # Read YAML content
1605
+ yaml_content = yaml_file.read_text()
1606
+
1607
+ # Convert YAML to MD with YAML frontmatter
1608
+ md_content = self._convert_yaml_content_to_md(yaml_content, agent_name)
1609
+
1610
+ # Write MD file
1611
+ md_file.write_text(md_content)
1612
+
1613
+ # Create backup of YAML file before removing (for safety)
1614
+ backup_file = target_dir / f"{agent_name}.yaml.backup"
1615
+ try:
1616
+ yaml_file.rename(backup_file)
1617
+ self.logger.debug(f"Created backup: {backup_file.name}")
1618
+ except Exception as backup_error:
1619
+ self.logger.warning(f"Failed to create backup for {yaml_file.name}: {backup_error}")
1620
+ # Still remove the original YAML file even if backup fails
1621
+ yaml_file.unlink()
1622
+
1623
+ results["converted"].append({
1624
+ "from": str(yaml_file),
1625
+ "to": str(md_file),
1626
+ "agent": agent_name
1627
+ })
1628
+
1629
+ self.logger.info(f"Converted {yaml_file.name} to {md_file.name}")
1630
+
1631
+ except Exception as e:
1632
+ error_msg = f"Failed to convert {yaml_file.name}: {e}"
1633
+ self.logger.error(error_msg)
1634
+ results["errors"].append(error_msg)
1635
+
1636
+ except Exception as e:
1637
+ error_msg = f"YAML to MD conversion failed: {e}"
1638
+ self.logger.error(error_msg)
1639
+ results["errors"].append(error_msg)
1640
+
1641
+ return results
1642
+
1643
+ def _convert_yaml_content_to_md(self, yaml_content: str, agent_name: str) -> str:
1644
+ """
1645
+ Convert YAML agent content to MD format with YAML frontmatter.
1646
+
1647
+ Args:
1648
+ yaml_content: Original YAML content
1649
+ agent_name: Name of the agent
1650
+
1651
+ Returns:
1652
+ Markdown content with YAML frontmatter
1653
+ """
1654
+ import re
1655
+ from datetime import datetime
1656
+
1657
+ # Extract YAML frontmatter and content
1658
+ yaml_parts = yaml_content.split('---', 2)
1659
+
1660
+ if len(yaml_parts) < 3:
1661
+ # No proper YAML frontmatter, treat entire content as instructions
1662
+ frontmatter = f"""---
1663
+ name: {agent_name}
1664
+ description: "Agent for specialized tasks"
1665
+ version: "1.0.0"
1666
+ author: "claude-mpm@anthropic.com"
1667
+ created: "{datetime.now().isoformat()}Z"
1668
+ updated: "{datetime.now().isoformat()}Z"
1669
+ tags: ["{agent_name}", "mpm-framework"]
1670
+ tools: ["Read", "Write", "Edit", "Grep", "Glob", "LS"]
1671
+ metadata:
1672
+ deployment_type: "system"
1673
+ converted_from: "yaml"
1674
+ ---
1675
+
1676
+ """
1677
+ return frontmatter + yaml_content.strip()
1678
+
1679
+ # Parse existing frontmatter
1680
+ yaml_frontmatter = yaml_parts[1].strip()
1681
+ instructions = yaml_parts[2].strip()
1682
+
1683
+ # Extract key fields from YAML frontmatter
1684
+ name = agent_name
1685
+ description = self._extract_yaml_field(yaml_frontmatter, 'description') or f"{agent_name.title()} agent for specialized tasks"
1686
+ version = self._extract_yaml_field(yaml_frontmatter, 'version') or "1.0.0"
1687
+ tools_line = self._extract_yaml_field(yaml_frontmatter, 'tools') or "Read, Write, Edit, Grep, Glob, LS"
1688
+
1689
+ # Convert tools string to list format
1690
+ if isinstance(tools_line, str):
1691
+ if tools_line.startswith('[') and tools_line.endswith(']'):
1692
+ # Already in list format
1693
+ tools_list = tools_line
1694
+ else:
1695
+ # Convert comma-separated to list
1696
+ tools = [tool.strip() for tool in tools_line.split(',')]
1697
+ tools_list = str(tools)
1698
+ else:
1699
+ tools_list = str(tools_line) if tools_line else '["Read", "Write", "Edit", "Grep", "Glob", "LS"]'
1700
+
1701
+ # Build new YAML frontmatter
1702
+ new_frontmatter = f"""---
1703
+ name: {name}
1704
+ description: "{description}"
1705
+ version: "{version}"
1706
+ author: "claude-mpm@anthropic.com"
1707
+ created: "{datetime.now().isoformat()}Z"
1708
+ updated: "{datetime.now().isoformat()}Z"
1709
+ tags: ["{agent_name}", "mpm-framework"]
1710
+ tools: {tools_list}
1711
+ metadata:
1712
+ deployment_type: "system"
1713
+ converted_from: "yaml"
1714
+ ---
1715
+
1716
+ """
1717
+
1718
+ return new_frontmatter + instructions
1719
+
1720
+ def _extract_yaml_field(self, yaml_content: str, field_name: str) -> str:
1721
+ """
1722
+ Extract a field value from YAML content.
1723
+
1724
+ Args:
1725
+ yaml_content: YAML content string
1726
+ field_name: Field name to extract
1727
+
1728
+ Returns:
1729
+ Field value or None if not found
1730
+ """
1731
+ import re
1732
+
1733
+ try:
1734
+ # Match field with quoted or unquoted values
1735
+ pattern = rf'^{field_name}:\s*["\']?(.*?)["\']?\s*$'
1736
+ match = re.search(pattern, yaml_content, re.MULTILINE)
1737
+
1738
+ if match:
1739
+ return match.group(1).strip()
1740
+
1741
+ # Try with alternative spacing patterns
1742
+ pattern = rf'^{field_name}\s*:\s*(.+)$'
1743
+ match = re.search(pattern, yaml_content, re.MULTILINE)
1744
+
1745
+ if match:
1746
+ value = match.group(1).strip()
1747
+ # Remove quotes if present
1748
+ if (value.startswith('"') and value.endswith('"')) or \
1749
+ (value.startswith("'") and value.endswith("'")):
1750
+ value = value[1:-1]
1751
+ return value
1752
+
1753
+ except Exception as e:
1754
+ self.logger.warning(f"Error extracting YAML field '{field_name}': {e}")
1755
+
1756
+ return None
@@ -264,7 +264,11 @@ class AgentProfileLoader(BaseService):
264
264
  tier_path = self.tier_paths[tier]
265
265
 
266
266
  # Try different file formats and naming conventions
267
+ # Check .md files first (Claude Code format), then fall back to YAML/JSON
267
268
  possible_files = [
269
+ tier_path / f"{agent_name}.md",
270
+ tier_path / f"{agent_name}_agent.md",
271
+ tier_path / f"{agent_name}-agent.md",
268
272
  tier_path / f"{agent_name}.yaml",
269
273
  tier_path / f"{agent_name}.yml",
270
274
  tier_path / f"{agent_name}.json",
@@ -290,16 +294,23 @@ class AgentProfileLoader(BaseService):
290
294
  content = file_path.read_text()
291
295
 
292
296
  # Parse based on file extension
293
- if file_path.suffix in ['.yaml', '.yml']:
297
+ if file_path.suffix == '.md':
298
+ # Parse markdown with YAML frontmatter
299
+ data, instructions = self._parse_markdown_with_frontmatter(content)
300
+ elif file_path.suffix in ['.yaml', '.yml']:
294
301
  data = yaml.safe_load(content)
302
+ instructions = data.get('instructions', '')
295
303
  elif file_path.suffix == '.json':
296
304
  data = json.loads(content)
305
+ instructions = data.get('instructions', '')
297
306
  else:
298
307
  # Try to parse as YAML first, then JSON
299
308
  try:
300
309
  data = yaml.safe_load(content)
310
+ instructions = data.get('instructions', '')
301
311
  except:
302
312
  data = json.loads(content)
313
+ instructions = data.get('instructions', '')
303
314
 
304
315
  # Create profile
305
316
  profile = AgentProfile(
@@ -308,7 +319,7 @@ class AgentProfileLoader(BaseService):
308
319
  description=data.get('description', ''),
309
320
  tier=tier,
310
321
  source_path=str(file_path),
311
- instructions=data.get('instructions', ''),
322
+ instructions=instructions,
312
323
  capabilities=data.get('capabilities', []),
313
324
  constraints=data.get('constraints', []),
314
325
  metadata=data.get('metadata', {}),
@@ -329,6 +340,44 @@ class AgentProfileLoader(BaseService):
329
340
  error=str(e)
330
341
  )
331
342
 
343
+ def _parse_markdown_with_frontmatter(self, content: str) -> Tuple[Dict[str, Any], str]:
344
+ """
345
+ Parse markdown file with YAML frontmatter.
346
+
347
+ Args:
348
+ content: Markdown content with YAML frontmatter
349
+
350
+ Returns:
351
+ Tuple of (frontmatter_data, markdown_content)
352
+ """
353
+ import re
354
+
355
+ # Check if content starts with YAML frontmatter
356
+ if not content.strip().startswith('---'):
357
+ # No frontmatter, treat entire content as instructions
358
+ return {'name': 'unknown', 'description': 'No frontmatter found'}, content
359
+
360
+ # Split frontmatter and content
361
+ parts = re.split(r'^---\s*$', content, 2, re.MULTILINE)
362
+
363
+ if len(parts) < 3:
364
+ # Invalid frontmatter structure
365
+ return {'name': 'unknown', 'description': 'Invalid frontmatter'}, content
366
+
367
+ # Parse YAML frontmatter
368
+ frontmatter_text = parts[1].strip()
369
+ markdown_content = parts[2].strip()
370
+
371
+ try:
372
+ frontmatter_data = yaml.safe_load(frontmatter_text)
373
+ if not isinstance(frontmatter_data, dict):
374
+ frontmatter_data = {'name': 'unknown', 'description': 'Invalid frontmatter format'}
375
+ except Exception as e:
376
+ logger.error(f"Error parsing YAML frontmatter: {e}")
377
+ frontmatter_data = {'name': 'unknown', 'description': f'YAML parse error: {e}'}
378
+
379
+ return frontmatter_data, markdown_content
380
+
332
381
  # ========================================================================
333
382
  # Profile Discovery
334
383
  # ========================================================================
@@ -342,16 +391,19 @@ class AgentProfileLoader(BaseService):
342
391
  continue
343
392
 
344
393
  agents = []
345
- for file_path in tier_path.glob('*.{yaml,yml,json}'):
346
- agent_name = file_path.stem
347
- # Remove common suffixes
348
- if agent_name.endswith('_agent'):
349
- agent_name = agent_name[:-6]
350
- elif agent_name.endswith('-agent'):
351
- agent_name = agent_name[:-6]
352
-
353
- if agent_name not in agents:
354
- agents.append(agent_name)
394
+ # Check for .md files (Claude Code format) and YAML/JSON files
395
+ file_patterns = ['*.md', '*.yaml', '*.yml', '*.json']
396
+ for pattern in file_patterns:
397
+ for file_path in tier_path.glob(pattern):
398
+ agent_name = file_path.stem
399
+ # Remove common suffixes
400
+ if agent_name.endswith('_agent'):
401
+ agent_name = agent_name[:-6]
402
+ elif agent_name.endswith('-agent'):
403
+ agent_name = agent_name[:-6]
404
+
405
+ if agent_name not in agents:
406
+ agents.append(agent_name)
355
407
 
356
408
  discovered[tier] = agents
357
409
  logger.debug(f"Discovered {len(agents)} agents in {tier.value} tier")
@@ -294,19 +294,50 @@ class AgentRegistry:
294
294
  try:
295
295
  content = file_path.read_text()
296
296
 
297
- # Try to parse as JSON/YAML for structured data
298
- if file_path.suffix in ['.json', '.yaml', '.yml']:
297
+ # Try to parse as JSON/YAML/MD for structured data
298
+ if file_path.suffix in ['.md', '.json', '.yaml', '.yml']:
299
299
  try:
300
300
  if file_path.suffix == '.json':
301
301
  data = json.loads(content)
302
+ description = data.get('description', '')
303
+ version = data.get('version', '0.0.0')
304
+ capabilities = data.get('capabilities', [])
305
+ metadata = data.get('metadata', {})
306
+ elif file_path.suffix == '.md':
307
+ # Parse markdown with YAML frontmatter
308
+ import yaml
309
+ import re
310
+
311
+ # Check for YAML frontmatter
312
+ if content.strip().startswith('---'):
313
+ parts = re.split(r'^---\s*$', content, 2, re.MULTILINE)
314
+ if len(parts) >= 3:
315
+ frontmatter_text = parts[1].strip()
316
+ data = yaml.safe_load(frontmatter_text)
317
+ description = data.get('description', '')
318
+ version = data.get('version', '0.0.0')
319
+ capabilities = data.get('tools', []) # Tools in .md format
320
+ metadata = data.get('metadata', {})
321
+ else:
322
+ # No frontmatter, use defaults
323
+ description = f"{file_path.stem} agent"
324
+ version = '1.0.0'
325
+ capabilities = []
326
+ metadata = {}
327
+ else:
328
+ # No frontmatter, use defaults
329
+ description = f"{file_path.stem} agent"
330
+ version = '1.0.0'
331
+ capabilities = []
332
+ metadata = {}
302
333
  else:
334
+ # YAML files
303
335
  import yaml
304
336
  data = yaml.safe_load(content)
305
-
306
- description = data.get('description', '')
307
- version = data.get('version', '0.0.0')
308
- capabilities = data.get('capabilities', [])
309
- metadata = data.get('metadata', {})
337
+ description = data.get('description', '')
338
+ version = data.get('version', '0.0.0')
339
+ capabilities = data.get('capabilities', [])
340
+ metadata = data.get('metadata', {})
310
341
  except Exception:
311
342
  pass
312
343
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: claude-mpm
3
- Version: 3.4.17
3
+ Version: 3.4.22
4
4
  Summary: Claude Multi-agent Project Manager - Clean orchestration with ticket management
5
5
  Home-page: https://github.com/bobmatnyc/claude-mpm
6
6
  Author: Claude MPM Team
@@ -36,6 +36,7 @@ Requires-Dist: python-socketio>=5.11.0
36
36
  Requires-Dist: aiohttp>=3.9.0
37
37
  Requires-Dist: aiohttp-cors>=0.8.0
38
38
  Requires-Dist: python-engineio>=4.8.0
39
+ Requires-Dist: urwid>=2.1.0
39
40
  Provides-Extra: dev
40
41
  Requires-Dist: pytest>=7.0; extra == "dev"
41
42
  Requires-Dist: pytest-asyncio; extra == "dev"
@@ -1,4 +1,5 @@
1
- claude_mpm/__init__.py,sha256=uDX48EOBrmJbY8Xv3bBpd8kibulIlmZv5jmczAjHNb8,648
1
+ claude_mpm/VERSION,sha256=7KsmcVU6D1htEeqmCXAzqOZqH7HnDus2dyMuTm9va8Y,6
2
+ claude_mpm/__init__.py,sha256=ix_J0PHZBz37nVBDEYJmLpwnURlWuBKKQ8rK_00TFpk,964
2
3
  claude_mpm/__main__.py,sha256=8IcM9tEbTqSN_er04eKTPX3AGo6qzRiTnPI7KfIf7rw,641
3
4
  claude_mpm/constants.py,sha256=yOf-82f1HH6pL19dB3dWPUqU09dnXuAx3kDh3xWpc1U,4526
4
5
  claude_mpm/deployment_paths.py,sha256=JO7-fhhp_AkVB7ZssggHDBbee-r2sokpkqjoqnQLTmM,9073
@@ -16,16 +17,16 @@ claude_mpm/agents/system_agent_config.py,sha256=Lke4FFjU0Vq3LLo4O7KvtHxadP7agAwC
16
17
  claude_mpm/agents/backups/INSTRUCTIONS.md,sha256=tdekngpZ5RjECYZosOaDSBmXPZsVvZcwDQEmmlw7fOQ,14268
17
18
  claude_mpm/agents/schema/agent_schema.json,sha256=7zuSk4VfBNTlQN33AkfJp0Y1GltlviwengIM0mb7dGg,8741
18
19
  claude_mpm/agents/templates/__init__.py,sha256=7UyIChghCnkrDctvmCRYr0Wrnn8Oj-eCdgL0KpFy1Mo,2668
19
- claude_mpm/agents/templates/data_engineer.json,sha256=Z63nwhO3Bo3sUjSc4O0yGVFbJVdCmMLAgcBjKJmfc8Y,8793
20
- claude_mpm/agents/templates/documentation.json,sha256=z5ApzJYOty5dw5yidNxhwX7QU8FICRAzmzD1xdxPkCI,6224
21
- claude_mpm/agents/templates/engineer.json,sha256=Qt9mJjbVM0wH9GE6kqAhGDRVmLhWbs0fwGW1IoluTZ8,12474
22
- claude_mpm/agents/templates/ops.json,sha256=b-LRqYRWjMZnXKd8SMPXDLCq6-nRAmCHN60IlJctjXM,6488
23
- claude_mpm/agents/templates/pm.json,sha256=1x68Iyq2lN8J7FPxcBwx1R13k8z6gr4zg7GLB6dICFc,7451
24
- claude_mpm/agents/templates/qa.json,sha256=UmNbeONvnzSvHFB3GUXwnN8n-TF5wgrk0JaAkYwOvfY,6636
25
- claude_mpm/agents/templates/research.json,sha256=10XO-W7cV_SGP16SHboagjbNUlKUoGfpbrjrI2tiSWU,13591
26
- claude_mpm/agents/templates/security.json,sha256=DkiB98uJf5mo5nP7EIhu8hhsvwGhOnR_FA60EJGuNBk,6741
27
- claude_mpm/agents/templates/test_integration.json,sha256=QqJtUABq23MlkldAaomTviopOvM7hqxNWbaYbUVCJpk,7620
28
- claude_mpm/agents/templates/version_control.json,sha256=H9GzDk8Ys8KmOEgfTWGr1GakKFscbd_907ObmAqzlzc,6535
20
+ claude_mpm/agents/templates/data_engineer.json,sha256=LSdncxt1xeW-6ZEQoe8fhl2f4m6jqef1fpwNoxE7hOs,8791
21
+ claude_mpm/agents/templates/documentation.json,sha256=uKH1kFF1Z4aDnsFuE4bbq3c85CBcVD-QbYXB8Yp9YPI,6226
22
+ claude_mpm/agents/templates/engineer.json,sha256=7gYcgJUNC7MhBprlHN9GVtVJ90NtGgp7QuGBDZIhgm0,12472
23
+ claude_mpm/agents/templates/ops.json,sha256=BDNnsxEcErJSr737X38BzRhvIjLOS1YYeWh9upacm-I,6486
24
+ claude_mpm/agents/templates/pm.json,sha256=uBQW44G35LuIkaVUAT411trUuyIRm0pAgmoIpXfHDWw,10069
25
+ claude_mpm/agents/templates/qa.json,sha256=VUOV_yO4l247LBVFAH6FnFNrgNhAEpNA-ERc0PkMoLg,6638
26
+ claude_mpm/agents/templates/research.json,sha256=FxTOKu8MG--h96rvlUSuLRAiWMgMnoJfEGSH8flPz1Q,13593
27
+ claude_mpm/agents/templates/security.json,sha256=wBTJEzPDukG1N7xLGe7A4sqvGDUMRYTDf_RxppwsFpI,6743
28
+ claude_mpm/agents/templates/test_integration.json,sha256=uVRfoLVHh5OaqkBOxpBOZsGy0IUOQ5dwqvSLm6ncDDQ,7622
29
+ claude_mpm/agents/templates/version_control.json,sha256=uNlPo4CPa0xQoTYjxER9YiHGM9F8jCbLUW04PJKgpi4,6537
29
30
  claude_mpm/agents/templates/.claude-mpm/memories/README.md,sha256=gDuLkzgcELaaoEB5Po70F0qabTu11vBi1PnUrYCK3fw,1098
30
31
  claude_mpm/agents/templates/backup/data_engineer_agent_20250726_234551.json,sha256=lLso4RHXVTQmX4A1XwF84kT59zZDblPO1xCgBj4S4x8,5060
31
32
  claude_mpm/agents/templates/backup/documentation_agent_20250726_234551.json,sha256=snfJW2yW9aMv9ldCSIWW7zwnyoQRx5u7xLMkNlfus9I,2258
@@ -35,7 +36,7 @@ claude_mpm/agents/templates/backup/qa_agent_20250726_234551.json,sha256=_FHWnUeh
35
36
  claude_mpm/agents/templates/backup/research_agent_20250726_234551.json,sha256=o4n_sqSbjnsFRELB2q501vgwm-o2tQNLJLYvnVP9LWU,5629
36
37
  claude_mpm/agents/templates/backup/security_agent_20250726_234551.json,sha256=l5YuD-27CxKSOsRLv0bDY_tCZyds0yGbeizLb8paeFY,2322
37
38
  claude_mpm/agents/templates/backup/version_control_agent_20250726_234551.json,sha256=too38RPTLJ9HutCMn0nfmEdCj2me241dx5tUYDFtu94,2143
38
- claude_mpm/cli/__init__.py,sha256=hJ5y5HzRqx6EkCaPYnH0h9amv7hzKXlEUdLVYbaKTPA,5577
39
+ claude_mpm/cli/__init__.py,sha256=XH1JwRjUo6o3swooSCrQiZFbR_yQFgEmWEXwY_UqGV8,5923
39
40
  claude_mpm/cli/parser.py,sha256=ajdlusfbfcY44756pdrkfROEVlTaVJyEBDJup78Q-yE,18270
40
41
  claude_mpm/cli/utils.py,sha256=k_EHLcjDAzYhDeVeWvE-vqvHsEoG6Cc6Yk7fs3YoRVA,6022
41
42
  claude_mpm/cli/commands/__init__.py,sha256=kUtBjfTYZnfAL_4QEPCBtFg2nWgJ2cxCPzIIsiFURXM,567
@@ -136,14 +137,14 @@ claude_mpm/schemas/workflow_validator.py,sha256=qRgGodJoIZQaLfZ8OzWz3Y9eVNz3ckrQ
136
137
  claude_mpm/scripts/socketio_daemon.py,sha256=-vS7A55BQZCTbqcB7QAgWvZrcBqtIxcyriFcekgKIBU,9568
137
138
  claude_mpm/services/__init__.py,sha256=dcZ5U4xQlk-zpAy8CLTuEcXzKDfHT0KdJf3bYSmZ1BM,1904
138
139
  claude_mpm/services/agent_capabilities_generator.py,sha256=hWG0zV2InmzrDMxSbQzjVBBTzEaxg0bFxl8tmTMJ8qA,6565
139
- claude_mpm/services/agent_deployment.py,sha256=DtK1BX2yCrutUkQdVPD01mYHm-ya36l3EPOnEcaDfog,67961
140
+ claude_mpm/services/agent_deployment.py,sha256=y1K9rI0ieEh3JfjRsURKBCziSDpFzQ9OCY7ORmab8CY,76756
140
141
  claude_mpm/services/agent_lifecycle_manager.py,sha256=fWggWu5rT7FkDQrRHyw05Y4KaNN9cXeaCinsymPJwM4,50127
141
142
  claude_mpm/services/agent_management_service.py,sha256=eX5n6w17b9urcogVdr4V-kXcuo7yyjORTrIihjF8PeQ,22853
142
143
  claude_mpm/services/agent_memory_manager.py,sha256=2w27vwsjuHU2s78IhQhSwlyYSNHe3wfOYcmkyJ9Vb2Y,58846
143
144
  claude_mpm/services/agent_modification_tracker.py,sha256=uxELrXtFt5Xlv0mhRbq5ynagEowczTRrv3mAp-aRZFc,34519
144
145
  claude_mpm/services/agent_persistence_service.py,sha256=B_Vz43zCKWq47zWkoibcia-Qwn2y3gARu7MV5Cpiptc,2893
145
- claude_mpm/services/agent_profile_loader.py,sha256=4D1Xj0vgqV8wN7Y3r8lijh7ghy5cVGU5t5s931sVqGc,23133
146
- claude_mpm/services/agent_registry.py,sha256=vn8CEW0vppj_0EY2NofmNRZEnpV70mlWiX2kAViFDRg,24374
146
+ claude_mpm/services/agent_profile_loader.py,sha256=4de_hSk94fEzWT1GyB5H1F8o7kQOktv3uaYZMYV2rwA,25512
147
+ claude_mpm/services/agent_registry.py,sha256=dX9BrKX1zCAH0Koc-FKOxqcmZgbaqxjlkk7HdRvAfB8,26181
147
148
  claude_mpm/services/agent_versioning.py,sha256=y4SiKGKdTn-9_Ht6nGlmKGJW6MLgEHZUy5SR1OPTZZM,1108
148
149
  claude_mpm/services/base_agent_manager.py,sha256=WEcfzdMaFXmXUSoEYEPNeGu8dvqjIv53zyUU0ITrhsM,14987
149
150
  claude_mpm/services/deployed_agent_discovery.py,sha256=GoXhho5EBz_FZDDl4xUWW_BnP3hfymbV1ePorRrhO_U,9443
@@ -213,9 +214,9 @@ claude_mpm/utils/path_operations.py,sha256=6pLMnAWBVzHkgp6JyQHmHbGD-dWn-nX21yV4E
213
214
  claude_mpm/utils/paths.py,sha256=Xv0SZWdZRkRjN9e6clBcA165ya00GNQxt7SwMz51tfA,10153
214
215
  claude_mpm/validation/__init__.py,sha256=bJ19g9lnk7yIjtxzN8XPegp87HTFBzCrGQOpFgRTf3g,155
215
216
  claude_mpm/validation/agent_validator.py,sha256=GCA2b2rKhKDeaNyUqWxTiWIs3sDdWjD9cgOFRp9K6ic,18227
216
- claude_mpm-3.4.17.dist-info/licenses/LICENSE,sha256=cSdDfXjoTVhstrERrqme4zgxAu4GubU22zVEHsiXGxs,1071
217
- claude_mpm-3.4.17.dist-info/METADATA,sha256=CwZn-qu7F_zpPVcgCgT69w4hm3u2aFxCs4eXqWGnUDU,6499
218
- claude_mpm-3.4.17.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
219
- claude_mpm-3.4.17.dist-info/entry_points.txt,sha256=3_d7wLrg9sRmQ1SfrFGWoTNL8Wrd6lQb2XVSYbTwRIg,324
220
- claude_mpm-3.4.17.dist-info/top_level.txt,sha256=1nUg3FEaBySgm8t-s54jK5zoPnu3_eY6EP6IOlekyHA,11
221
- claude_mpm-3.4.17.dist-info/RECORD,,
217
+ claude_mpm-3.4.22.dist-info/licenses/LICENSE,sha256=cSdDfXjoTVhstrERrqme4zgxAu4GubU22zVEHsiXGxs,1071
218
+ claude_mpm-3.4.22.dist-info/METADATA,sha256=PCbQcvJUwq2VzyO4QE1O0aiSbumbR1jF4mud4hPFlMA,6527
219
+ claude_mpm-3.4.22.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
220
+ claude_mpm-3.4.22.dist-info/entry_points.txt,sha256=3_d7wLrg9sRmQ1SfrFGWoTNL8Wrd6lQb2XVSYbTwRIg,324
221
+ claude_mpm-3.4.22.dist-info/top_level.txt,sha256=1nUg3FEaBySgm8t-s54jK5zoPnu3_eY6EP6IOlekyHA,11
222
+ claude_mpm-3.4.22.dist-info/RECORD,,