claude-mpm 4.17.1__py3-none-any.whl → 4.18.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of claude-mpm might be problematic. Click here for more details.

Files changed (28) hide show
  1. claude_mpm/VERSION +1 -1
  2. claude_mpm/agents/BASE_PM.md +48 -17
  3. claude_mpm/agents/agent_loader.py +4 -4
  4. claude_mpm/agents/templates/svelte-engineer.json +225 -0
  5. claude_mpm/config/agent_config.py +2 -2
  6. claude_mpm/core/factories.py +1 -1
  7. claude_mpm/core/optimized_agent_loader.py +3 -3
  8. claude_mpm/hooks/claude_hooks/response_tracking.py +35 -1
  9. claude_mpm/models/resume_log.py +340 -0
  10. claude_mpm/services/agents/auto_config_manager.py +1 -1
  11. claude_mpm/services/agents/deployment/agent_configuration_manager.py +1 -1
  12. claude_mpm/services/agents/deployment/agent_record_service.py +1 -1
  13. claude_mpm/services/agents/deployment/agent_validator.py +17 -1
  14. claude_mpm/services/agents/deployment/async_agent_deployment.py +1 -1
  15. claude_mpm/services/agents/deployment/local_template_deployment.py +1 -1
  16. claude_mpm/services/agents/local_template_manager.py +1 -1
  17. claude_mpm/services/core/path_resolver.py +1 -1
  18. claude_mpm/services/infrastructure/resume_log_generator.py +439 -0
  19. claude_mpm/services/mcp_config_manager.py +2 -2
  20. claude_mpm/services/session_manager.py +205 -1
  21. claude_mpm/services/unified/deployment_strategies/local.py +1 -1
  22. claude_mpm/utils/agent_dependency_loader.py +2 -2
  23. {claude_mpm-4.17.1.dist-info → claude_mpm-4.18.1.dist-info}/METADATA +68 -1
  24. {claude_mpm-4.17.1.dist-info → claude_mpm-4.18.1.dist-info}/RECORD +28 -25
  25. {claude_mpm-4.17.1.dist-info → claude_mpm-4.18.1.dist-info}/WHEEL +0 -0
  26. {claude_mpm-4.17.1.dist-info → claude_mpm-4.18.1.dist-info}/entry_points.txt +0 -0
  27. {claude_mpm-4.17.1.dist-info → claude_mpm-4.18.1.dist-info}/licenses/LICENSE +0 -0
  28. {claude_mpm-4.17.1.dist-info → claude_mpm-4.18.1.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,439 @@
1
+ """Resume Log Generator Service.
2
+
3
+ Automatically generates session resume logs when approaching or hitting token limits.
4
+ Integrates with session management and response tracking infrastructure.
5
+
6
+ Triggers:
7
+ - model_context_window_exceeded (stop_reason)
8
+ - Manual pause command
9
+ - 95% token threshold reached
10
+ - Session end with high token usage (>85%)
11
+
12
+ Design Principles:
13
+ - Atomic file operations (via state_storage)
14
+ - Non-blocking generation
15
+ - Graceful degradation if generation fails
16
+ - Integration with existing session state
17
+ """
18
+
19
+ from datetime import datetime, timezone
20
+ from pathlib import Path
21
+ from typing import Any, Dict, List, Optional
22
+
23
+ from claude_mpm.core.logging_utils import get_logger
24
+ from claude_mpm.models.resume_log import ContextMetrics, ResumeLog
25
+ from claude_mpm.storage.state_storage import StateStorage
26
+
27
+ logger = get_logger(__name__)
28
+
29
+
30
+ class ResumeLogGenerator:
31
+ """Service for generating session resume logs."""
32
+
33
+ def __init__(
34
+ self,
35
+ storage_dir: Optional[Path] = None,
36
+ config: Optional[Dict[str, Any]] = None,
37
+ ):
38
+ """Initialize resume log generator.
39
+
40
+ Args:
41
+ storage_dir: Directory for resume logs (default: .claude-mpm/resume-logs)
42
+ config: Configuration dictionary
43
+ """
44
+ self.storage_dir = storage_dir or Path.home() / ".claude-mpm" / "resume-logs"
45
+ self.storage_dir.mkdir(parents=True, exist_ok=True)
46
+
47
+ # State storage for atomic writes
48
+ self.state_storage = StateStorage(
49
+ storage_dir=self.storage_dir.parent / "storage"
50
+ )
51
+
52
+ # Configuration
53
+ self.config = config or {}
54
+ self.enabled = (
55
+ self.config.get("context_management", {})
56
+ .get("resume_logs", {})
57
+ .get("enabled", True)
58
+ )
59
+ self.auto_generate = (
60
+ self.config.get("context_management", {})
61
+ .get("resume_logs", {})
62
+ .get("auto_generate", True)
63
+ )
64
+ self.max_tokens = (
65
+ self.config.get("context_management", {})
66
+ .get("resume_logs", {})
67
+ .get("max_tokens", 10000)
68
+ )
69
+
70
+ # Trigger thresholds
71
+ thresholds = self.config.get("context_management", {}).get("thresholds", {})
72
+ self.threshold_caution = thresholds.get("caution", 0.70)
73
+ self.threshold_warning = thresholds.get("warning", 0.85)
74
+ self.threshold_critical = thresholds.get("critical", 0.95)
75
+
76
+ logger.info(
77
+ f"ResumeLogGenerator initialized (enabled={self.enabled}, auto_generate={self.auto_generate})"
78
+ )
79
+
80
+ def should_generate(
81
+ self,
82
+ stop_reason: Optional[str] = None,
83
+ token_usage_pct: Optional[float] = None,
84
+ manual_trigger: bool = False,
85
+ ) -> bool:
86
+ """Determine if resume log should be generated.
87
+
88
+ Args:
89
+ stop_reason: Claude API stop_reason
90
+ token_usage_pct: Current token usage percentage (0.0-1.0)
91
+ manual_trigger: Manual pause/stop command
92
+
93
+ Returns:
94
+ True if resume log should be generated
95
+ """
96
+ if not self.enabled or not self.auto_generate:
97
+ return manual_trigger # Only generate on manual trigger if auto is disabled
98
+
99
+ # Trigger conditions
100
+ triggers = [
101
+ stop_reason == "max_tokens",
102
+ stop_reason == "model_context_window_exceeded",
103
+ manual_trigger,
104
+ token_usage_pct and token_usage_pct >= self.threshold_critical,
105
+ token_usage_pct
106
+ and token_usage_pct >= self.threshold_warning, # Generate at 85% too
107
+ ]
108
+
109
+ should_gen = any(triggers)
110
+
111
+ if should_gen:
112
+ reason = "unknown"
113
+ if stop_reason:
114
+ reason = f"stop_reason={stop_reason}"
115
+ elif manual_trigger:
116
+ reason = "manual_trigger"
117
+ elif token_usage_pct:
118
+ reason = f"token_usage={token_usage_pct:.1%}"
119
+
120
+ logger.info(f"Resume log generation triggered: {reason}")
121
+
122
+ return should_gen
123
+
124
+ def generate_from_session_state(
125
+ self,
126
+ session_id: str,
127
+ session_state: Dict[str, Any],
128
+ stop_reason: Optional[str] = None,
129
+ ) -> Optional[ResumeLog]:
130
+ """Generate resume log from session state data.
131
+
132
+ Args:
133
+ session_id: Current session ID
134
+ session_state: Session state dictionary
135
+ stop_reason: Claude API stop_reason
136
+
137
+ Returns:
138
+ Generated ResumeLog or None if generation failed
139
+ """
140
+ try:
141
+ # Extract context metrics
142
+ context_data = session_state.get("context_metrics", {})
143
+ context_metrics = ContextMetrics(
144
+ total_budget=context_data.get("total_budget", 200000),
145
+ used_tokens=context_data.get("used_tokens", 0),
146
+ remaining_tokens=context_data.get("remaining_tokens", 0),
147
+ percentage_used=context_data.get("percentage_used", 0.0),
148
+ stop_reason=stop_reason or context_data.get("stop_reason"),
149
+ model=context_data.get("model", "claude-sonnet-4.5"),
150
+ session_id=session_id,
151
+ )
152
+
153
+ # Extract content from session state
154
+ mission_summary = session_state.get("mission_summary", "")
155
+ accomplishments = session_state.get("accomplishments", [])
156
+ key_findings = session_state.get("key_findings", [])
157
+ decisions_made = session_state.get("decisions_made", [])
158
+ next_steps = session_state.get("next_steps", [])
159
+ critical_context = session_state.get("critical_context", {})
160
+
161
+ # Extract metadata
162
+ files_modified = session_state.get("files_modified", [])
163
+ agents_used = session_state.get("agents_used", {})
164
+ errors_encountered = session_state.get("errors_encountered", [])
165
+ warnings = session_state.get("warnings", [])
166
+
167
+ # Create resume log
168
+ resume_log = ResumeLog(
169
+ session_id=session_id,
170
+ previous_session_id=session_state.get("previous_session_id"),
171
+ context_metrics=context_metrics,
172
+ mission_summary=mission_summary,
173
+ accomplishments=accomplishments,
174
+ key_findings=key_findings,
175
+ decisions_made=decisions_made,
176
+ next_steps=next_steps,
177
+ critical_context=critical_context,
178
+ files_modified=files_modified,
179
+ agents_used=agents_used,
180
+ errors_encountered=errors_encountered,
181
+ warnings=warnings,
182
+ )
183
+
184
+ logger.info(f"Generated resume log for session {session_id}")
185
+ return resume_log
186
+
187
+ except Exception as e:
188
+ logger.error(
189
+ f"Failed to generate resume log from session state: {e}", exc_info=True
190
+ )
191
+ return None
192
+
193
+ def generate_from_todo_list(
194
+ self,
195
+ session_id: str,
196
+ todos: List[Dict[str, Any]],
197
+ context_metrics: Optional[ContextMetrics] = None,
198
+ ) -> Optional[ResumeLog]:
199
+ """Generate resume log from TODO list.
200
+
201
+ Useful when session state is minimal but TODO list has rich information.
202
+
203
+ Args:
204
+ session_id: Current session ID
205
+ todos: TODO list items
206
+ context_metrics: Context metrics (optional)
207
+
208
+ Returns:
209
+ Generated ResumeLog or None if generation failed
210
+ """
211
+ try:
212
+ # Categorize todos
213
+ completed = [t for t in todos if t.get("status") == "completed"]
214
+ in_progress = [t for t in todos if t.get("status") == "in_progress"]
215
+ pending = [t for t in todos if t.get("status") == "pending"]
216
+
217
+ # Build accomplishments from completed tasks
218
+ accomplishments = [f"✓ {task['content']}" for task in completed]
219
+
220
+ # Build next steps from in-progress and pending
221
+ next_steps = []
222
+ for task in in_progress:
223
+ next_steps.append(f"[IN PROGRESS] {task['content']}")
224
+ for task in pending:
225
+ next_steps.append(f"[PENDING] {task['content']}")
226
+
227
+ # Create mission summary
228
+ mission_summary = f"Working on {len(todos)} tasks: {len(completed)} completed, {len(in_progress)} in progress, {len(pending)} pending."
229
+
230
+ # Use provided context metrics or create default
231
+ if context_metrics is None:
232
+ context_metrics = ContextMetrics(session_id=session_id)
233
+
234
+ # Create resume log
235
+ resume_log = ResumeLog(
236
+ session_id=session_id,
237
+ context_metrics=context_metrics,
238
+ mission_summary=mission_summary,
239
+ accomplishments=accomplishments,
240
+ next_steps=next_steps,
241
+ critical_context={
242
+ "total_tasks": len(todos),
243
+ "completed_tasks": len(completed),
244
+ "in_progress_tasks": len(in_progress),
245
+ "pending_tasks": len(pending),
246
+ },
247
+ )
248
+
249
+ logger.info(f"Generated resume log from TODO list for session {session_id}")
250
+ return resume_log
251
+
252
+ except Exception as e:
253
+ logger.error(
254
+ f"Failed to generate resume log from TODO list: {e}", exc_info=True
255
+ )
256
+ return None
257
+
258
+ def save_resume_log(self, resume_log: ResumeLog) -> Optional[Path]:
259
+ """Save resume log to storage.
260
+
261
+ Args:
262
+ resume_log: ResumeLog instance to save
263
+
264
+ Returns:
265
+ Path to saved file or None if save failed
266
+ """
267
+ try:
268
+ # Save as markdown (primary format)
269
+ md_path = resume_log.save(storage_dir=self.storage_dir)
270
+
271
+ # Also save as JSON for programmatic access
272
+ json_path = self.storage_dir / f"session-{resume_log.session_id}.json"
273
+ self.state_storage.write_json(
274
+ data=resume_log.to_dict(),
275
+ file_path=json_path,
276
+ atomic=True,
277
+ )
278
+
279
+ logger.info(f"Resume log saved: {md_path}")
280
+ return md_path
281
+
282
+ except Exception as e:
283
+ logger.error(f"Failed to save resume log: {e}", exc_info=True)
284
+ return None
285
+
286
+ def load_resume_log(self, session_id: str) -> Optional[str]:
287
+ """Load resume log markdown content.
288
+
289
+ Args:
290
+ session_id: Session ID to load
291
+
292
+ Returns:
293
+ Markdown content or None if not found
294
+ """
295
+ try:
296
+ md_path = self.storage_dir / f"session-{session_id}.md"
297
+
298
+ if not md_path.exists():
299
+ logger.debug(f"Resume log not found for session {session_id}")
300
+ return None
301
+
302
+ content = md_path.read_text(encoding="utf-8")
303
+ logger.info(f"Loaded resume log for session {session_id}")
304
+ return content
305
+
306
+ except Exception as e:
307
+ logger.error(f"Failed to load resume log: {e}", exc_info=True)
308
+ return None
309
+
310
+ def list_resume_logs(self) -> List[Dict[str, Any]]:
311
+ """List all available resume logs.
312
+
313
+ Returns:
314
+ List of resume log metadata
315
+ """
316
+ try:
317
+ logs = []
318
+
319
+ for md_file in self.storage_dir.glob("session-*.md"):
320
+ # Extract session ID from filename
321
+ session_id = md_file.stem.replace("session-", "")
322
+
323
+ # Check if JSON metadata exists
324
+ json_file = md_file.with_suffix(".json")
325
+ metadata = {}
326
+ if json_file.exists():
327
+ json_data = self.state_storage.read_json(json_file)
328
+ if json_data:
329
+ metadata = {
330
+ "session_id": session_id,
331
+ "created_at": json_data.get("created_at"),
332
+ "previous_session_id": json_data.get("previous_session_id"),
333
+ "context_metrics": json_data.get("context_metrics", {}),
334
+ "file_path": str(md_file),
335
+ }
336
+
337
+ if metadata:
338
+ logs.append(metadata)
339
+ else:
340
+ # Fallback to file metadata
341
+ logs.append(
342
+ {
343
+ "session_id": session_id,
344
+ "file_path": str(md_file),
345
+ "modified_at": datetime.fromtimestamp(
346
+ md_file.stat().st_mtime, tz=timezone.utc
347
+ ).isoformat(),
348
+ }
349
+ )
350
+
351
+ # Sort by creation time (newest first)
352
+ logs.sort(
353
+ key=lambda x: x.get("created_at", x.get("modified_at", "")),
354
+ reverse=True,
355
+ )
356
+
357
+ logger.debug(f"Found {len(logs)} resume logs")
358
+ return logs
359
+
360
+ except Exception as e:
361
+ logger.error(f"Failed to list resume logs: {e}", exc_info=True)
362
+ return []
363
+
364
+ def cleanup_old_logs(self, keep_count: int = 10) -> int:
365
+ """Clean up old resume logs, keeping only the most recent.
366
+
367
+ Args:
368
+ keep_count: Number of logs to keep
369
+
370
+ Returns:
371
+ Number of logs deleted
372
+ """
373
+ try:
374
+ logs = self.list_resume_logs()
375
+
376
+ if len(logs) <= keep_count:
377
+ logger.debug(
378
+ f"No cleanup needed ({len(logs)} logs <= {keep_count} keep)"
379
+ )
380
+ return 0
381
+
382
+ # Delete old logs
383
+ deleted = 0
384
+ for log in logs[keep_count:]:
385
+ try:
386
+ md_path = Path(log["file_path"])
387
+ json_path = md_path.with_suffix(".json")
388
+
389
+ if md_path.exists():
390
+ md_path.unlink()
391
+ deleted += 1
392
+
393
+ if json_path.exists():
394
+ json_path.unlink()
395
+
396
+ except Exception as e:
397
+ logger.warning(f"Failed to delete log {log['session_id']}: {e}")
398
+
399
+ logger.info(f"Cleaned up {deleted} old resume logs (kept {keep_count})")
400
+ return deleted
401
+
402
+ except Exception as e:
403
+ logger.error(f"Failed to cleanup old logs: {e}", exc_info=True)
404
+ return 0
405
+
406
+ def get_stats(self) -> Dict[str, Any]:
407
+ """Get resume log statistics.
408
+
409
+ Returns:
410
+ Dictionary with statistics
411
+ """
412
+ try:
413
+ logs = self.list_resume_logs()
414
+
415
+ total_size = 0
416
+ for log in logs:
417
+ path = Path(log["file_path"])
418
+ if path.exists():
419
+ total_size += path.stat().st_size
420
+
421
+ return {
422
+ "enabled": self.enabled,
423
+ "auto_generate": self.auto_generate,
424
+ "total_logs": len(logs),
425
+ "storage_dir": str(self.storage_dir),
426
+ "total_size_kb": round(total_size / 1024, 2),
427
+ "thresholds": {
428
+ "caution": f"{self.threshold_caution:.0%}",
429
+ "warning": f"{self.threshold_warning:.0%}",
430
+ "critical": f"{self.threshold_critical:.0%}",
431
+ },
432
+ }
433
+
434
+ except Exception as e:
435
+ logger.error(f"Failed to get stats: {e}", exc_info=True)
436
+ return {
437
+ "enabled": self.enabled,
438
+ "error": str(e),
439
+ }
@@ -1497,7 +1497,7 @@ class MCPConfigManager:
1497
1497
  )
1498
1498
 
1499
1499
  if result.returncode == 0:
1500
- self.logger.info(f" ✅ Successfully injected {dep}")
1500
+ self.logger.debug(f" ✅ Successfully injected {dep}")
1501
1501
  # Check if already injected (pipx will complain if package already exists)
1502
1502
  elif (
1503
1503
  "already satisfied" in result.stderr.lower()
@@ -1582,7 +1582,7 @@ class MCPConfigManager:
1582
1582
  )
1583
1583
 
1584
1584
  # Verify the reinstall worked
1585
- self.logger.info(f" → Verifying {service_name} installation...")
1585
+ self.logger.debug(f" → Verifying {service_name} installation...")
1586
1586
  issue = self._detect_service_issue(service_name)
1587
1587
 
1588
1588
  if issue is None:
@@ -6,12 +6,19 @@ Ensures a single session ID is generated and used across all components.
6
6
 
7
7
  This service addresses race conditions and duplicate session ID generation
8
8
  by providing a single source of truth for session identifiers.
9
+
10
+ Extended with:
11
+ - Token usage tracking and monitoring
12
+ - Resume log generation on session end
13
+ - Context metrics persistence
14
+ - Automatic resume log injection on session startup
9
15
  """
10
16
 
11
17
  import os
12
18
  from datetime import datetime, timezone
19
+ from pathlib import Path
13
20
  from threading import Lock
14
- from typing import Optional
21
+ from typing import Any, Dict, Optional
15
22
 
16
23
  from claude_mpm.core.logging_utils import get_logger
17
24
 
@@ -64,6 +71,24 @@ class SessionManager:
64
71
  self._session_id = self._generate_session_id()
65
72
  self._session_start_time = datetime.now(timezone.utc)
66
73
 
74
+ # Token usage tracking
75
+ self._cumulative_tokens = 0
76
+ self._total_budget = 200000 # Default Claude Code budget
77
+ self._last_stop_reason: Optional[str] = None
78
+
79
+ # Context metrics storage
80
+ self._context_metrics: Dict[str, Any] = {
81
+ "total_budget": self._total_budget,
82
+ "used_tokens": 0,
83
+ "remaining_tokens": self._total_budget,
84
+ "percentage_used": 0.0,
85
+ "stop_reason": None,
86
+ "model": "claude-sonnet-4.5",
87
+ }
88
+
89
+ # Resume log reference (loaded on startup if exists)
90
+ self._resume_log_content: Optional[str] = None
91
+
67
92
  # Mark as initialized
68
93
  self.__class__._initialized = True
69
94
 
@@ -71,6 +96,9 @@ class SessionManager:
71
96
  f"SessionManager initialized with session ID: {self._session_id}"
72
97
  )
73
98
 
99
+ # Check for resume log from previous session
100
+ self._load_resume_log()
101
+
74
102
  def _generate_session_id(self) -> str:
75
103
  """
76
104
  Generate or retrieve a session ID.
@@ -134,6 +162,182 @@ class SessionManager:
134
162
  f"Session ID already set to {session_id}, no change needed"
135
163
  )
136
164
 
165
+ def update_token_usage(
166
+ self,
167
+ input_tokens: int = 0,
168
+ output_tokens: int = 0,
169
+ stop_reason: Optional[str] = None,
170
+ ) -> Dict[str, Any]:
171
+ """
172
+ Update cumulative token usage for the session.
173
+
174
+ Args:
175
+ input_tokens: Input tokens from latest API call
176
+ output_tokens: Output tokens from latest API call
177
+ stop_reason: Stop reason from Claude API
178
+
179
+ Returns:
180
+ Updated context metrics
181
+ """
182
+ with self.__class__._lock:
183
+ # Update cumulative usage
184
+ tokens_used = input_tokens + output_tokens
185
+ self._cumulative_tokens += tokens_used
186
+
187
+ # Update stop reason if provided
188
+ if stop_reason:
189
+ self._last_stop_reason = stop_reason
190
+
191
+ # Calculate metrics
192
+ remaining = max(0, self._total_budget - self._cumulative_tokens)
193
+ percentage = (self._cumulative_tokens / self._total_budget) * 100
194
+
195
+ # Update context metrics
196
+ self._context_metrics = {
197
+ "total_budget": self._total_budget,
198
+ "used_tokens": self._cumulative_tokens,
199
+ "remaining_tokens": remaining,
200
+ "percentage_used": percentage,
201
+ "stop_reason": self._last_stop_reason,
202
+ "model": "claude-sonnet-4.5",
203
+ }
204
+
205
+ logger.debug(
206
+ f"Token usage updated: {self._cumulative_tokens}/{self._total_budget} "
207
+ f"({percentage:.1f}%) - Stop reason: {stop_reason}"
208
+ )
209
+
210
+ return self._context_metrics.copy()
211
+
212
+ def get_context_metrics(self) -> Dict[str, Any]:
213
+ """
214
+ Get current context metrics.
215
+
216
+ Returns:
217
+ Dictionary containing token usage and context metrics
218
+ """
219
+ with self.__class__._lock:
220
+ return self._context_metrics.copy()
221
+
222
+ def get_token_usage_percentage(self) -> float:
223
+ """
224
+ Get current token usage as a percentage (0.0 to 1.0).
225
+
226
+ Returns:
227
+ Token usage percentage
228
+ """
229
+ with self.__class__._lock:
230
+ return self._context_metrics["percentage_used"] / 100.0
231
+
232
+ def should_warn_context_limit(self, threshold: float = 0.70) -> bool:
233
+ """
234
+ Check if context usage has reached warning threshold.
235
+
236
+ Args:
237
+ threshold: Warning threshold (0.0 to 1.0)
238
+
239
+ Returns:
240
+ True if threshold reached
241
+ """
242
+ return self.get_token_usage_percentage() >= threshold
243
+
244
+ def _load_resume_log(self) -> None:
245
+ """
246
+ Load resume log from previous session if it exists.
247
+
248
+ This is called during initialization to check for session continuity.
249
+ """
250
+ try:
251
+ # Lazy import to avoid circular dependencies
252
+ from claude_mpm.services.infrastructure.resume_log_generator import (
253
+ ResumeLogGenerator,
254
+ )
255
+
256
+ generator = ResumeLogGenerator()
257
+
258
+ # Check if there's a resume log for this session
259
+ # (Could be from a previous interrupted session with same ID)
260
+ resume_content = generator.load_resume_log(self._session_id)
261
+
262
+ if resume_content:
263
+ self._resume_log_content = resume_content
264
+ logger.info(f"Loaded resume log for session {self._session_id}")
265
+ else:
266
+ logger.debug("No resume log found for current session")
267
+
268
+ except Exception as e:
269
+ logger.warning(f"Failed to load resume log: {e}")
270
+ # Non-critical error, continue without resume log
271
+
272
+ def get_resume_log_content(self) -> Optional[str]:
273
+ """
274
+ Get resume log content if loaded.
275
+
276
+ Returns:
277
+ Resume log markdown content or None
278
+ """
279
+ with self.__class__._lock:
280
+ return self._resume_log_content
281
+
282
+ def generate_resume_log(
283
+ self,
284
+ session_state: Optional[Dict[str, Any]] = None,
285
+ ) -> Optional[Path]:
286
+ """
287
+ Generate and save resume log for current session.
288
+
289
+ Args:
290
+ session_state: Optional session state data to include
291
+
292
+ Returns:
293
+ Path to saved resume log or None if generation failed
294
+ """
295
+ try:
296
+ # Lazy import to avoid circular dependencies
297
+ from claude_mpm.models.resume_log import ContextMetrics, ResumeLog
298
+ from claude_mpm.services.infrastructure.resume_log_generator import (
299
+ ResumeLogGenerator,
300
+ )
301
+
302
+ generator = ResumeLogGenerator()
303
+
304
+ # Create context metrics from current state
305
+ context_metrics = ContextMetrics(
306
+ total_budget=self._total_budget,
307
+ used_tokens=self._cumulative_tokens,
308
+ remaining_tokens=self._context_metrics["remaining_tokens"],
309
+ percentage_used=self._context_metrics["percentage_used"],
310
+ stop_reason=self._last_stop_reason,
311
+ model=self._context_metrics["model"],
312
+ session_id=self._session_id,
313
+ )
314
+
315
+ if session_state:
316
+ # Generate from provided session state
317
+ resume_log = generator.generate_from_session_state(
318
+ session_id=self._session_id,
319
+ session_state=session_state,
320
+ stop_reason=self._last_stop_reason,
321
+ )
322
+ else:
323
+ # Create minimal resume log
324
+ resume_log = ResumeLog(
325
+ session_id=self._session_id,
326
+ context_metrics=context_metrics,
327
+ mission_summary="Session ended - resume log auto-generated.",
328
+ )
329
+
330
+ if resume_log:
331
+ file_path = generator.save_resume_log(resume_log)
332
+ logger.info(f"Resume log generated and saved: {file_path}")
333
+ return file_path
334
+ logger.warning("Resume log generation returned None")
335
+ return None
336
+
337
+ except Exception as e:
338
+ logger.error(f"Failed to generate resume log: {e}", exc_info=True)
339
+ return None
340
+
137
341
  @classmethod
138
342
  def reset(cls) -> None:
139
343
  """
@@ -395,7 +395,7 @@ class LocalDeploymentStrategy(DeploymentStrategy):
395
395
  shutil.copy2(artifact, dest)
396
396
  deployed.append(dest)
397
397
 
398
- self._logger.info(f"Deployed agent: {dest}")
398
+ self._logger.debug(f"Deployed agent: {dest}")
399
399
 
400
400
  return deployed
401
401