claude-mpm 4.17.1__py3-none-any.whl → 4.18.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of claude-mpm might be problematic. Click here for more details.

Files changed (27) hide show
  1. claude_mpm/VERSION +1 -1
  2. claude_mpm/agents/BASE_PM.md +48 -17
  3. claude_mpm/agents/agent_loader.py +4 -4
  4. claude_mpm/agents/templates/svelte-engineer.json +225 -0
  5. claude_mpm/config/agent_config.py +2 -2
  6. claude_mpm/core/factories.py +1 -1
  7. claude_mpm/core/optimized_agent_loader.py +3 -3
  8. claude_mpm/hooks/claude_hooks/response_tracking.py +35 -1
  9. claude_mpm/models/resume_log.py +340 -0
  10. claude_mpm/services/agents/auto_config_manager.py +1 -1
  11. claude_mpm/services/agents/deployment/agent_configuration_manager.py +1 -1
  12. claude_mpm/services/agents/deployment/agent_record_service.py +1 -1
  13. claude_mpm/services/agents/deployment/async_agent_deployment.py +1 -1
  14. claude_mpm/services/agents/deployment/local_template_deployment.py +1 -1
  15. claude_mpm/services/agents/local_template_manager.py +1 -1
  16. claude_mpm/services/core/path_resolver.py +1 -1
  17. claude_mpm/services/infrastructure/resume_log_generator.py +439 -0
  18. claude_mpm/services/mcp_config_manager.py +2 -2
  19. claude_mpm/services/session_manager.py +205 -1
  20. claude_mpm/services/unified/deployment_strategies/local.py +1 -1
  21. claude_mpm/utils/agent_dependency_loader.py +2 -2
  22. {claude_mpm-4.17.1.dist-info → claude_mpm-4.18.0.dist-info}/METADATA +68 -1
  23. {claude_mpm-4.17.1.dist-info → claude_mpm-4.18.0.dist-info}/RECORD +27 -24
  24. {claude_mpm-4.17.1.dist-info → claude_mpm-4.18.0.dist-info}/WHEEL +0 -0
  25. {claude_mpm-4.17.1.dist-info → claude_mpm-4.18.0.dist-info}/entry_points.txt +0 -0
  26. {claude_mpm-4.17.1.dist-info → claude_mpm-4.18.0.dist-info}/licenses/LICENSE +0 -0
  27. {claude_mpm-4.17.1.dist-info → claude_mpm-4.18.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,340 @@
1
+ """Resume Log Data Model.
2
+
3
+ This module defines the data structure for session resume logs that enable
4
+ seamless context restoration when Claude hits token limits.
5
+
6
+ Design Philosophy:
7
+ - Target 10k tokens maximum per resume log
8
+ - Human-readable markdown format
9
+ - Structured sections with token budgets
10
+ - Optimized for Claude consumption on session resume
11
+ """
12
+
13
+ from dataclasses import dataclass, field
14
+ from datetime import datetime, timezone
15
+ from pathlib import Path
16
+ from typing import Any, Dict, List, Optional
17
+
18
+ from claude_mpm.core.logging_utils import get_logger
19
+
20
+ logger = get_logger(__name__)
21
+
22
+
23
+ @dataclass
24
+ class ContextMetrics:
25
+ """Context window usage metrics."""
26
+
27
+ total_budget: int = 200000
28
+ used_tokens: int = 0
29
+ remaining_tokens: int = 0
30
+ percentage_used: float = 0.0
31
+ stop_reason: Optional[str] = None
32
+ model: str = "claude-sonnet-4.5"
33
+ session_id: str = ""
34
+ timestamp: str = field(
35
+ default_factory=lambda: datetime.now(timezone.utc).isoformat()
36
+ )
37
+
38
+ def to_dict(self) -> Dict[str, Any]:
39
+ """Convert to dictionary."""
40
+ return {
41
+ "total_budget": self.total_budget,
42
+ "used_tokens": self.used_tokens,
43
+ "remaining_tokens": self.remaining_tokens,
44
+ "percentage_used": self.percentage_used,
45
+ "stop_reason": self.stop_reason,
46
+ "model": self.model,
47
+ "session_id": self.session_id,
48
+ "timestamp": self.timestamp,
49
+ }
50
+
51
+ @classmethod
52
+ def from_dict(cls, data: Dict[str, Any]) -> "ContextMetrics":
53
+ """Create from dictionary."""
54
+ return cls(
55
+ total_budget=data.get("total_budget", 200000),
56
+ used_tokens=data.get("used_tokens", 0),
57
+ remaining_tokens=data.get("remaining_tokens", 0),
58
+ percentage_used=data.get("percentage_used", 0.0),
59
+ stop_reason=data.get("stop_reason"),
60
+ model=data.get("model", "claude-sonnet-4.5"),
61
+ session_id=data.get("session_id", ""),
62
+ timestamp=data.get("timestamp", datetime.now(timezone.utc).isoformat()),
63
+ )
64
+
65
+
66
+ @dataclass
67
+ class ResumeLog:
68
+ """Resume log containing all information needed to restore session context.
69
+
70
+ Token Budget Distribution (10k tokens total):
71
+ - Context Metrics: 500 tokens
72
+ - Mission Summary: 1,000 tokens
73
+ - Accomplishments: 2,000 tokens
74
+ - Key Findings: 2,500 tokens
75
+ - Decisions & Rationale: 1,500 tokens
76
+ - Next Steps: 1,500 tokens
77
+ - Critical Context: 1,000 tokens
78
+ """
79
+
80
+ # Session identification
81
+ session_id: str
82
+ previous_session_id: Optional[str] = None
83
+ created_at: str = field(
84
+ default_factory=lambda: datetime.now(timezone.utc).isoformat()
85
+ )
86
+
87
+ # Context metrics
88
+ context_metrics: ContextMetrics = field(default_factory=ContextMetrics)
89
+
90
+ # Core content sections (with token budgets)
91
+ mission_summary: str = "" # 1,000 tokens - What was the overall goal?
92
+ accomplishments: List[str] = field(
93
+ default_factory=list
94
+ ) # 2,000 tokens - What was completed?
95
+ key_findings: List[str] = field(
96
+ default_factory=list
97
+ ) # 2,500 tokens - What was discovered?
98
+ decisions_made: List[Dict[str, str]] = field(
99
+ default_factory=list
100
+ ) # 1,500 tokens - What choices were made and why?
101
+ next_steps: List[str] = field(
102
+ default_factory=list
103
+ ) # 1,500 tokens - What needs to happen next?
104
+ critical_context: Dict[str, Any] = field(
105
+ default_factory=dict
106
+ ) # 1,000 tokens - Essential state/data
107
+
108
+ # Metadata
109
+ files_modified: List[str] = field(default_factory=list)
110
+ agents_used: Dict[str, int] = field(default_factory=dict)
111
+ errors_encountered: List[str] = field(default_factory=list)
112
+ warnings: List[str] = field(default_factory=list)
113
+
114
+ def to_markdown(self) -> str:
115
+ """Generate markdown format for Claude consumption.
116
+
117
+ Returns:
118
+ Markdown-formatted resume log
119
+ """
120
+ sections = []
121
+
122
+ # Header
123
+ sections.append(f"# Session Resume Log: {self.session_id}\n")
124
+ sections.append(f"**Created**: {self.created_at}")
125
+ if self.previous_session_id:
126
+ sections.append(f"**Previous Session**: {self.previous_session_id}")
127
+ sections.append("")
128
+
129
+ # Context Metrics (500 tokens)
130
+ sections.append("## Context Metrics\n")
131
+ sections.append(f"- **Model**: {self.context_metrics.model}")
132
+ sections.append(
133
+ f"- **Tokens Used**: {self.context_metrics.used_tokens:,} / {self.context_metrics.total_budget:,}"
134
+ )
135
+ sections.append(
136
+ f"- **Percentage**: {self.context_metrics.percentage_used:.1f}%"
137
+ )
138
+ sections.append(
139
+ f"- **Remaining**: {self.context_metrics.remaining_tokens:,} tokens"
140
+ )
141
+ if self.context_metrics.stop_reason:
142
+ sections.append(f"- **Stop Reason**: {self.context_metrics.stop_reason}")
143
+ sections.append("")
144
+
145
+ # Mission Summary (1,000 tokens)
146
+ sections.append("## Mission Summary\n")
147
+ sections.append(
148
+ self.mission_summary
149
+ if self.mission_summary
150
+ else "_No mission summary provided_"
151
+ )
152
+ sections.append("")
153
+
154
+ # Accomplishments (2,000 tokens)
155
+ sections.append("## Accomplishments\n")
156
+ if self.accomplishments:
157
+ for i, item in enumerate(self.accomplishments, 1):
158
+ sections.append(f"{i}. {item}")
159
+ else:
160
+ sections.append("_No accomplishments recorded_")
161
+ sections.append("")
162
+
163
+ # Key Findings (2,500 tokens)
164
+ sections.append("## Key Findings\n")
165
+ if self.key_findings:
166
+ for i, finding in enumerate(self.key_findings, 1):
167
+ sections.append(f"{i}. {finding}")
168
+ else:
169
+ sections.append("_No key findings recorded_")
170
+ sections.append("")
171
+
172
+ # Decisions & Rationale (1,500 tokens)
173
+ sections.append("## Decisions & Rationale\n")
174
+ if self.decisions_made:
175
+ for i, decision in enumerate(self.decisions_made, 1):
176
+ decision_text = decision.get("decision", "")
177
+ rationale = decision.get("rationale", "")
178
+ sections.append(f"{i}. **Decision**: {decision_text}")
179
+ if rationale:
180
+ sections.append(f" **Rationale**: {rationale}")
181
+ else:
182
+ sections.append("_No decisions recorded_")
183
+ sections.append("")
184
+
185
+ # Next Steps (1,500 tokens)
186
+ sections.append("## Next Steps\n")
187
+ if self.next_steps:
188
+ for i, step in enumerate(self.next_steps, 1):
189
+ sections.append(f"{i}. {step}")
190
+ else:
191
+ sections.append("_No next steps defined_")
192
+ sections.append("")
193
+
194
+ # Critical Context (1,000 tokens)
195
+ sections.append("## Critical Context\n")
196
+ if self.critical_context:
197
+ for key, value in self.critical_context.items():
198
+ sections.append(f"- **{key}**: {value}")
199
+ else:
200
+ sections.append("_No critical context preserved_")
201
+ sections.append("")
202
+
203
+ # Metadata
204
+ sections.append("## Session Metadata\n")
205
+ if self.files_modified:
206
+ sections.append(f"**Files Modified** ({len(self.files_modified)}):")
207
+ for file in self.files_modified[:20]: # Limit to first 20
208
+ sections.append(f"- {file}")
209
+ if len(self.files_modified) > 20:
210
+ sections.append(f"- ... and {len(self.files_modified) - 20} more")
211
+ sections.append("")
212
+
213
+ if self.agents_used:
214
+ sections.append("**Agents Used**:")
215
+ for agent, count in self.agents_used.items():
216
+ sections.append(f"- {agent}: {count} delegations")
217
+ sections.append("")
218
+
219
+ if self.errors_encountered:
220
+ sections.append(f"**Errors** ({len(self.errors_encountered)}):")
221
+ for error in self.errors_encountered[:5]: # Limit to first 5
222
+ sections.append(f"- {error}")
223
+ sections.append("")
224
+
225
+ if self.warnings:
226
+ sections.append(f"**Warnings** ({len(self.warnings)}):")
227
+ for warning in self.warnings[:5]: # Limit to first 5
228
+ sections.append(f"- {warning}")
229
+ sections.append("")
230
+
231
+ return "\n".join(sections)
232
+
233
+ def to_dict(self) -> Dict[str, Any]:
234
+ """Convert to dictionary for JSON serialization."""
235
+ return {
236
+ "session_id": self.session_id,
237
+ "previous_session_id": self.previous_session_id,
238
+ "created_at": self.created_at,
239
+ "context_metrics": self.context_metrics.to_dict(),
240
+ "mission_summary": self.mission_summary,
241
+ "accomplishments": self.accomplishments,
242
+ "key_findings": self.key_findings,
243
+ "decisions_made": self.decisions_made,
244
+ "next_steps": self.next_steps,
245
+ "critical_context": self.critical_context,
246
+ "files_modified": self.files_modified,
247
+ "agents_used": self.agents_used,
248
+ "errors_encountered": self.errors_encountered,
249
+ "warnings": self.warnings,
250
+ }
251
+
252
+ @classmethod
253
+ def from_dict(cls, data: Dict[str, Any]) -> "ResumeLog":
254
+ """Create from dictionary."""
255
+ context_metrics_data = data.get("context_metrics", {})
256
+ context_metrics = ContextMetrics.from_dict(context_metrics_data)
257
+
258
+ return cls(
259
+ session_id=data.get("session_id", ""),
260
+ previous_session_id=data.get("previous_session_id"),
261
+ created_at=data.get("created_at", datetime.now(timezone.utc).isoformat()),
262
+ context_metrics=context_metrics,
263
+ mission_summary=data.get("mission_summary", ""),
264
+ accomplishments=data.get("accomplishments", []),
265
+ key_findings=data.get("key_findings", []),
266
+ decisions_made=data.get("decisions_made", []),
267
+ next_steps=data.get("next_steps", []),
268
+ critical_context=data.get("critical_context", {}),
269
+ files_modified=data.get("files_modified", []),
270
+ agents_used=data.get("agents_used", {}),
271
+ errors_encountered=data.get("errors_encountered", []),
272
+ warnings=data.get("warnings", []),
273
+ )
274
+
275
+ def save(self, storage_dir: Optional[Path] = None) -> Path:
276
+ """Save resume log to markdown file.
277
+
278
+ Args:
279
+ storage_dir: Directory to save the log (default: .claude-mpm/resume-logs)
280
+
281
+ Returns:
282
+ Path to saved file
283
+ """
284
+ if storage_dir is None:
285
+ storage_dir = Path.home() / ".claude-mpm" / "resume-logs"
286
+
287
+ storage_dir.mkdir(parents=True, exist_ok=True)
288
+
289
+ # Generate filename
290
+ file_path = storage_dir / f"session-{self.session_id}.md"
291
+
292
+ try:
293
+ # Write markdown file
294
+ markdown_content = self.to_markdown()
295
+ file_path.write_text(markdown_content, encoding="utf-8")
296
+
297
+ logger.info(f"Resume log saved: {file_path}")
298
+ return file_path
299
+
300
+ except Exception as e:
301
+ logger.error(f"Failed to save resume log: {e}")
302
+ raise
303
+
304
+ @classmethod
305
+ def load(
306
+ cls, session_id: str, storage_dir: Optional[Path] = None
307
+ ) -> Optional["ResumeLog"]:
308
+ """Load resume log from file.
309
+
310
+ Args:
311
+ session_id: Session ID to load
312
+ storage_dir: Directory to load from (default: .claude-mpm/resume-logs)
313
+
314
+ Returns:
315
+ ResumeLog instance or None if not found
316
+ """
317
+ if storage_dir is None:
318
+ storage_dir = Path.home() / ".claude-mpm" / "resume-logs"
319
+
320
+ file_path = storage_dir / f"session-{session_id}.md"
321
+
322
+ if not file_path.exists():
323
+ logger.debug(f"Resume log not found: {file_path}")
324
+ return None
325
+
326
+ try:
327
+ # For now, we just return the markdown content
328
+ # In the future, could parse markdown back to structured data
329
+ _ = file_path.read_text(encoding="utf-8")
330
+ logger.info(f"Resume log loaded: {file_path}")
331
+
332
+ # Return a basic ResumeLog with the markdown content embedded
333
+ return cls(
334
+ session_id=session_id,
335
+ mission_summary=f"Loaded from previous session. See full context in {file_path}",
336
+ )
337
+
338
+ except Exception as e:
339
+ logger.error(f"Failed to load resume log: {e}")
340
+ return None
@@ -678,7 +678,7 @@ class AutoConfigManagerService(BaseService, IAutoConfigManager):
678
678
  agent_id, agent_name, success=True
679
679
  )
680
680
  deployed.append(agent_id)
681
- self.logger.info(f"Successfully deployed agent: {agent_id}")
681
+ self.logger.debug(f"Successfully deployed agent: {agent_id}")
682
682
 
683
683
  except Exception as e:
684
684
  self.logger.error(
@@ -71,7 +71,7 @@ class AgentConfigurationManager:
71
71
  # Cache the result
72
72
  self._base_agent_cache = (base_agent_data, base_agent_version)
73
73
 
74
- self.logger.info(f"Loaded base agent from {self.base_agent_path}")
74
+ self.logger.debug(f"Loaded base agent from {self.base_agent_path}")
75
75
  return self._base_agent_cache
76
76
 
77
77
  except Exception as e:
@@ -107,7 +107,7 @@ class AgentRecordService(BaseService):
107
107
  record = self._deserialize_record(record_data)
108
108
  records[agent_name] = record
109
109
 
110
- self.logger.info(f"Loaded {len(records)} agent records")
110
+ self.logger.debug(f"Loaded {len(records)} agent records")
111
111
  else:
112
112
  self.logger.debug("No existing records file found")
113
113
 
@@ -224,7 +224,7 @@ class AsyncAgentDeploymentService:
224
224
 
225
225
  elapsed = (time.time() - start_time) * 1000
226
226
  self._metrics["time_saved_ms"] += max(0, (len(directories) * 75) - elapsed)
227
- self.logger.info(f"Discovered agents in {elapsed:.1f}ms (parallel scan)")
227
+ self.logger.debug(f"Discovered agents in {elapsed:.1f}ms (parallel scan)")
228
228
 
229
229
  return discovered
230
230
 
@@ -90,7 +90,7 @@ class LocalTemplateDeploymentService:
90
90
  logger.error(f"Failed to deploy local template {agent_id}: {e}")
91
91
  results["errors"].append(f"{agent_id}: {e}")
92
92
 
93
- logger.info(
93
+ logger.debug(
94
94
  f"Local template deployment: deployed={len(results['deployed'])}, "
95
95
  f"updated={len(results['updated'])}, skipped={len(results['skipped'])}, "
96
96
  f"errors={len(results['errors'])}"
@@ -182,7 +182,7 @@ class LocalAgentTemplateManager:
182
182
  self._discover_templates_in_dir(self.user_agents_dir, "user")
183
183
 
184
184
  self._cache_valid = True
185
- logger.info(f"Discovered {len(self._template_cache)} local agent templates")
185
+ logger.debug(f"Discovered {len(self._template_cache)} local agent templates")
186
186
 
187
187
  return self._template_cache
188
188
 
@@ -281,7 +281,7 @@ class PathResolver(IPathResolver):
281
281
 
282
282
  if agents_dir and agents_dir.exists():
283
283
  discovered_agents_dir = agents_dir
284
- self.logger.info(f"Using custom agents directory: {discovered_agents_dir}")
284
+ self.logger.debug(f"Using custom agents directory: {discovered_agents_dir}")
285
285
  elif framework_path and framework_path != Path("__PACKAGED__"):
286
286
  # Prioritize templates directory over main agents directory
287
287
  templates_dir = (