claude-mpm 4.18.0__py3-none-any.whl → 4.20.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of claude-mpm might be problematic. Click here for more details.
- claude_mpm/VERSION +1 -1
- claude_mpm/agents/BASE_ENGINEER.md +286 -0
- claude_mpm/agents/BASE_PM.md +238 -37
- claude_mpm/agents/PM_INSTRUCTIONS.md +40 -0
- claude_mpm/agents/templates/engineer.json +5 -1
- claude_mpm/agents/templates/python_engineer.json +8 -3
- claude_mpm/agents/templates/rust_engineer.json +12 -7
- claude_mpm/cli/commands/mpm_init.py +109 -24
- claude_mpm/commands/mpm-init.md +112 -6
- claude_mpm/core/config.py +42 -0
- claude_mpm/hooks/__init__.py +8 -0
- claude_mpm/hooks/session_resume_hook.py +121 -0
- claude_mpm/services/agents/deployment/agent_validator.py +17 -1
- claude_mpm/services/cli/resume_service.py +617 -0
- claude_mpm/services/cli/session_manager.py +87 -0
- claude_mpm/services/cli/session_resume_helper.py +352 -0
- {claude_mpm-4.18.0.dist-info → claude_mpm-4.20.0.dist-info}/METADATA +19 -4
- {claude_mpm-4.18.0.dist-info → claude_mpm-4.20.0.dist-info}/RECORD +22 -19
- {claude_mpm-4.18.0.dist-info → claude_mpm-4.20.0.dist-info}/WHEEL +0 -0
- {claude_mpm-4.18.0.dist-info → claude_mpm-4.20.0.dist-info}/entry_points.txt +0 -0
- {claude_mpm-4.18.0.dist-info → claude_mpm-4.20.0.dist-info}/licenses/LICENSE +0 -0
- {claude_mpm-4.18.0.dist-info → claude_mpm-4.20.0.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,617 @@
|
|
|
1
|
+
"""Resume Service - Intelligent session resume from stop event logs.
|
|
2
|
+
|
|
3
|
+
WHY: This service provides resume capabilities by reading stop event logs from
|
|
4
|
+
.claude-mpm/responses/ and .claude-mpm/resume-logs/ to help users continue work.
|
|
5
|
+
|
|
6
|
+
DESIGN DECISIONS:
|
|
7
|
+
- Two-tier strategy: prefer resume logs, fallback to response logs
|
|
8
|
+
- Read JSON stop events from response logs
|
|
9
|
+
- Parse PM responses for context (tasks, files, next steps)
|
|
10
|
+
- Group by session_id for session-based resume
|
|
11
|
+
- Calculate time elapsed and display comprehensive context
|
|
12
|
+
- Non-blocking with graceful degradation
|
|
13
|
+
|
|
14
|
+
INTEGRATION:
|
|
15
|
+
- Used by /mpm-init resume command
|
|
16
|
+
- Complements SessionResumeHelper (pause-based) with log-based approach
|
|
17
|
+
"""
|
|
18
|
+
|
|
19
|
+
import json
|
|
20
|
+
import re
|
|
21
|
+
from dataclasses import dataclass
|
|
22
|
+
from datetime import datetime, timezone
|
|
23
|
+
from pathlib import Path
|
|
24
|
+
from typing import Dict, List, Optional
|
|
25
|
+
|
|
26
|
+
from claude_mpm.core.logger import get_logger
|
|
27
|
+
|
|
28
|
+
logger = get_logger(__name__)
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
@dataclass
|
|
32
|
+
class SessionSummary:
|
|
33
|
+
"""Summary of a session from logs."""
|
|
34
|
+
|
|
35
|
+
session_id: str
|
|
36
|
+
timestamp: datetime
|
|
37
|
+
agent_count: int
|
|
38
|
+
stop_reason: str
|
|
39
|
+
token_usage: int
|
|
40
|
+
last_agent: str
|
|
41
|
+
working_directory: str
|
|
42
|
+
git_branch: str
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
@dataclass
|
|
46
|
+
class SessionContext:
|
|
47
|
+
"""Full context for resuming a session."""
|
|
48
|
+
|
|
49
|
+
session_id: str
|
|
50
|
+
timestamp: datetime
|
|
51
|
+
time_ago: str
|
|
52
|
+
request: str
|
|
53
|
+
response: str
|
|
54
|
+
|
|
55
|
+
# Metadata
|
|
56
|
+
stop_reason: str
|
|
57
|
+
token_usage: int
|
|
58
|
+
working_directory: str
|
|
59
|
+
git_branch: str
|
|
60
|
+
|
|
61
|
+
# PM-specific data
|
|
62
|
+
tasks_completed: List[str]
|
|
63
|
+
files_affected: List[str]
|
|
64
|
+
next_steps: List[str]
|
|
65
|
+
context_management: Optional[str]
|
|
66
|
+
delegation_compliance: Optional[str]
|
|
67
|
+
|
|
68
|
+
# Response logs used
|
|
69
|
+
response_files: List[str]
|
|
70
|
+
|
|
71
|
+
|
|
72
|
+
class ResumeService:
|
|
73
|
+
"""Service for reading and parsing stop event logs for resume functionality."""
|
|
74
|
+
|
|
75
|
+
def __init__(self, project_path: Optional[Path] = None):
|
|
76
|
+
"""Initialize resume service.
|
|
77
|
+
|
|
78
|
+
Args:
|
|
79
|
+
project_path: Project root path (default: current directory)
|
|
80
|
+
"""
|
|
81
|
+
self.project_path = project_path or Path.cwd()
|
|
82
|
+
self.responses_dir = self.project_path / ".claude-mpm" / "responses"
|
|
83
|
+
self.resume_logs_dir = self.project_path / ".claude-mpm" / "resume-logs"
|
|
84
|
+
|
|
85
|
+
def list_sessions(self) -> List[SessionSummary]:
|
|
86
|
+
"""List all available sessions from response logs.
|
|
87
|
+
|
|
88
|
+
Returns:
|
|
89
|
+
List of SessionSummary objects sorted by most recent first
|
|
90
|
+
"""
|
|
91
|
+
if not self.responses_dir.exists():
|
|
92
|
+
logger.debug("No responses directory found")
|
|
93
|
+
return []
|
|
94
|
+
|
|
95
|
+
# Group response files by session_id
|
|
96
|
+
sessions_map: Dict[str, List[Path]] = {}
|
|
97
|
+
|
|
98
|
+
for response_file in self.responses_dir.glob("*.json"):
|
|
99
|
+
try:
|
|
100
|
+
with response_file.open("r") as f:
|
|
101
|
+
data = json.load(f)
|
|
102
|
+
|
|
103
|
+
session_id = data.get("session_id", "unknown")
|
|
104
|
+
if session_id not in sessions_map:
|
|
105
|
+
sessions_map[session_id] = []
|
|
106
|
+
sessions_map[session_id].append(response_file)
|
|
107
|
+
|
|
108
|
+
except Exception as e:
|
|
109
|
+
logger.warning(f"Failed to read {response_file}: {e}")
|
|
110
|
+
continue
|
|
111
|
+
|
|
112
|
+
# Create summaries
|
|
113
|
+
summaries = []
|
|
114
|
+
for session_id, files in sessions_map.items():
|
|
115
|
+
try:
|
|
116
|
+
# Use the most recent file for this session
|
|
117
|
+
files.sort(key=lambda p: p.stat().st_mtime, reverse=True)
|
|
118
|
+
latest_file = files[0]
|
|
119
|
+
|
|
120
|
+
with latest_file.open("r") as f:
|
|
121
|
+
data = json.load(f)
|
|
122
|
+
|
|
123
|
+
metadata = data.get("metadata", {})
|
|
124
|
+
timestamp_str = data.get("timestamp") or metadata.get("timestamp")
|
|
125
|
+
timestamp = self._parse_timestamp(timestamp_str)
|
|
126
|
+
|
|
127
|
+
summary = SessionSummary(
|
|
128
|
+
session_id=session_id,
|
|
129
|
+
timestamp=timestamp,
|
|
130
|
+
agent_count=len(files),
|
|
131
|
+
stop_reason=metadata.get("stop_reason", "unknown"),
|
|
132
|
+
token_usage=metadata.get("usage", {}).get("total_tokens", 0),
|
|
133
|
+
last_agent=data.get("agent", "unknown"),
|
|
134
|
+
working_directory=metadata.get("working_directory", ""),
|
|
135
|
+
git_branch=metadata.get("git_branch", "unknown"),
|
|
136
|
+
)
|
|
137
|
+
summaries.append(summary)
|
|
138
|
+
|
|
139
|
+
except Exception as e:
|
|
140
|
+
logger.warning(
|
|
141
|
+
f"Failed to create summary for session {session_id}: {e}"
|
|
142
|
+
)
|
|
143
|
+
continue
|
|
144
|
+
|
|
145
|
+
# Sort by most recent first
|
|
146
|
+
summaries.sort(key=lambda s: s.timestamp, reverse=True)
|
|
147
|
+
return summaries
|
|
148
|
+
|
|
149
|
+
def get_session_context(self, session_id: str) -> Optional[SessionContext]:
|
|
150
|
+
"""Get full context for a specific session.
|
|
151
|
+
|
|
152
|
+
Args:
|
|
153
|
+
session_id: Session ID to retrieve context for
|
|
154
|
+
|
|
155
|
+
Returns:
|
|
156
|
+
SessionContext object or None if not found
|
|
157
|
+
"""
|
|
158
|
+
# Try resume log first
|
|
159
|
+
resume_context = self._get_context_from_resume_log(session_id)
|
|
160
|
+
if resume_context:
|
|
161
|
+
return resume_context
|
|
162
|
+
|
|
163
|
+
# Fallback to response logs
|
|
164
|
+
return self._get_context_from_response_logs(session_id)
|
|
165
|
+
|
|
166
|
+
def get_latest_session(self) -> Optional[SessionContext]:
|
|
167
|
+
"""Get context from most recent session.
|
|
168
|
+
|
|
169
|
+
Returns:
|
|
170
|
+
SessionContext object or None if no sessions found
|
|
171
|
+
"""
|
|
172
|
+
sessions = self.list_sessions()
|
|
173
|
+
if not sessions:
|
|
174
|
+
logger.debug("No sessions found")
|
|
175
|
+
return None
|
|
176
|
+
|
|
177
|
+
latest = sessions[0]
|
|
178
|
+
return self.get_session_context(latest.session_id)
|
|
179
|
+
|
|
180
|
+
def _get_context_from_resume_log(self, session_id: str) -> Optional[SessionContext]:
|
|
181
|
+
"""Try to get context from structured resume log.
|
|
182
|
+
|
|
183
|
+
Args:
|
|
184
|
+
session_id: Session ID
|
|
185
|
+
|
|
186
|
+
Returns:
|
|
187
|
+
SessionContext or None if not found
|
|
188
|
+
"""
|
|
189
|
+
if not self.resume_logs_dir.exists():
|
|
190
|
+
return None
|
|
191
|
+
|
|
192
|
+
# Find resume log for this session
|
|
193
|
+
for resume_log in self.resume_logs_dir.glob(f"*{session_id}*.md"):
|
|
194
|
+
try:
|
|
195
|
+
content = resume_log.read_text(encoding="utf-8")
|
|
196
|
+
return self._parse_resume_log(session_id, content)
|
|
197
|
+
except Exception as e:
|
|
198
|
+
logger.warning(f"Failed to parse resume log {resume_log}: {e}")
|
|
199
|
+
continue
|
|
200
|
+
|
|
201
|
+
return None
|
|
202
|
+
|
|
203
|
+
def _get_context_from_response_logs(
|
|
204
|
+
self, session_id: str
|
|
205
|
+
) -> Optional[SessionContext]:
|
|
206
|
+
"""Get context from response logs for a session.
|
|
207
|
+
|
|
208
|
+
Args:
|
|
209
|
+
session_id: Session ID
|
|
210
|
+
|
|
211
|
+
Returns:
|
|
212
|
+
SessionContext or None if not found
|
|
213
|
+
"""
|
|
214
|
+
if not self.responses_dir.exists():
|
|
215
|
+
return None
|
|
216
|
+
|
|
217
|
+
# Find all response files for this session
|
|
218
|
+
response_files = []
|
|
219
|
+
for response_file in self.responses_dir.glob("*.json"):
|
|
220
|
+
try:
|
|
221
|
+
with response_file.open("r") as f:
|
|
222
|
+
data = json.load(f)
|
|
223
|
+
if data.get("session_id") == session_id:
|
|
224
|
+
response_files.append(response_file)
|
|
225
|
+
except Exception as e:
|
|
226
|
+
logger.warning(f"Failed to read {response_file}: {e}")
|
|
227
|
+
continue
|
|
228
|
+
|
|
229
|
+
if not response_files:
|
|
230
|
+
logger.debug(f"No response files found for session {session_id}")
|
|
231
|
+
return None
|
|
232
|
+
|
|
233
|
+
# Sort by timestamp (most recent last)
|
|
234
|
+
response_files.sort(key=lambda p: p.stat().st_mtime)
|
|
235
|
+
|
|
236
|
+
# Parse the files to build context
|
|
237
|
+
return self._build_context_from_files(session_id, response_files)
|
|
238
|
+
|
|
239
|
+
def _build_context_from_files(
|
|
240
|
+
self, session_id: str, response_files: List[Path]
|
|
241
|
+
) -> Optional[SessionContext]:
|
|
242
|
+
"""Build SessionContext from multiple response files.
|
|
243
|
+
|
|
244
|
+
Args:
|
|
245
|
+
session_id: Session ID
|
|
246
|
+
response_files: List of response file paths
|
|
247
|
+
|
|
248
|
+
Returns:
|
|
249
|
+
SessionContext or None if parsing fails
|
|
250
|
+
"""
|
|
251
|
+
try:
|
|
252
|
+
# Use the last (most recent) file for primary data
|
|
253
|
+
latest_file = response_files[-1]
|
|
254
|
+
|
|
255
|
+
with latest_file.open("r") as f:
|
|
256
|
+
latest_data = json.load(f)
|
|
257
|
+
|
|
258
|
+
metadata = latest_data.get("metadata", {})
|
|
259
|
+
timestamp_str = latest_data.get("timestamp") or metadata.get("timestamp")
|
|
260
|
+
timestamp = self._parse_timestamp(timestamp_str)
|
|
261
|
+
|
|
262
|
+
# Extract basic info
|
|
263
|
+
request = latest_data.get("request", "Unknown request")
|
|
264
|
+
response = latest_data.get("response", "")
|
|
265
|
+
|
|
266
|
+
# Parse PM response if available
|
|
267
|
+
pm_data = self.parse_pm_response(latest_data)
|
|
268
|
+
|
|
269
|
+
# Calculate time elapsed
|
|
270
|
+
time_ago = self._calculate_time_ago(timestamp)
|
|
271
|
+
|
|
272
|
+
return SessionContext(
|
|
273
|
+
session_id=session_id,
|
|
274
|
+
timestamp=timestamp,
|
|
275
|
+
time_ago=time_ago,
|
|
276
|
+
request=request,
|
|
277
|
+
response=response,
|
|
278
|
+
stop_reason=metadata.get("stop_reason", "unknown"),
|
|
279
|
+
token_usage=metadata.get("usage", {}).get("total_tokens", 0),
|
|
280
|
+
working_directory=metadata.get("working_directory", ""),
|
|
281
|
+
git_branch=metadata.get("git_branch", "unknown"),
|
|
282
|
+
tasks_completed=pm_data.get("tasks_completed", []),
|
|
283
|
+
files_affected=pm_data.get("files_affected", []),
|
|
284
|
+
next_steps=pm_data.get("next_steps", []),
|
|
285
|
+
context_management=pm_data.get("context_management"),
|
|
286
|
+
delegation_compliance=pm_data.get("delegation_compliance"),
|
|
287
|
+
response_files=[str(f) for f in response_files],
|
|
288
|
+
)
|
|
289
|
+
|
|
290
|
+
except Exception as e:
|
|
291
|
+
logger.error(f"Failed to build context from files: {e}")
|
|
292
|
+
return None
|
|
293
|
+
|
|
294
|
+
def parse_pm_response(self, response_json: dict) -> dict:
|
|
295
|
+
"""Extract key info from PM response JSON.
|
|
296
|
+
|
|
297
|
+
Parses the PM's response text looking for JSON blocks with pm_summary,
|
|
298
|
+
TodoWrite mentions, tasks, files, etc.
|
|
299
|
+
|
|
300
|
+
Args:
|
|
301
|
+
response_json: Full response JSON from log file
|
|
302
|
+
|
|
303
|
+
Returns:
|
|
304
|
+
Dict with extracted PM data
|
|
305
|
+
"""
|
|
306
|
+
result = {
|
|
307
|
+
"tasks_completed": [],
|
|
308
|
+
"files_affected": [],
|
|
309
|
+
"next_steps": [],
|
|
310
|
+
"context_management": None,
|
|
311
|
+
"delegation_compliance": None,
|
|
312
|
+
}
|
|
313
|
+
|
|
314
|
+
response_text = response_json.get("response", "")
|
|
315
|
+
if not response_text:
|
|
316
|
+
return result
|
|
317
|
+
|
|
318
|
+
# Try to find JSON block in response
|
|
319
|
+
json_blocks = re.findall(r"```json\s*(\{.*?\})\s*```", response_text, re.DOTALL)
|
|
320
|
+
|
|
321
|
+
for json_str in json_blocks:
|
|
322
|
+
try:
|
|
323
|
+
data = json.loads(json_str)
|
|
324
|
+
|
|
325
|
+
# Look for PM summary data
|
|
326
|
+
if data.get("pm_summary"):
|
|
327
|
+
# Extract tasks from various PM fields
|
|
328
|
+
result["tasks_completed"] = (
|
|
329
|
+
data.get("tasks_completed", [])
|
|
330
|
+
or data.get("measurable_outcomes", [])
|
|
331
|
+
or []
|
|
332
|
+
)
|
|
333
|
+
|
|
334
|
+
# Extract files
|
|
335
|
+
result["files_affected"] = data.get("files_affected", [])
|
|
336
|
+
|
|
337
|
+
# Extract next steps from various fields
|
|
338
|
+
result["next_steps"] = (
|
|
339
|
+
data.get("next_steps", [])
|
|
340
|
+
or data.get("next_actions", [])
|
|
341
|
+
or data.get("unresolved_requirements", [])
|
|
342
|
+
or []
|
|
343
|
+
)
|
|
344
|
+
|
|
345
|
+
result["context_management"] = data.get("context_management")
|
|
346
|
+
result["delegation_compliance"] = data.get("delegation_compliance")
|
|
347
|
+
return result # Found PM summary, use it
|
|
348
|
+
|
|
349
|
+
except json.JSONDecodeError:
|
|
350
|
+
continue
|
|
351
|
+
|
|
352
|
+
# Fallback: parse response text for common patterns
|
|
353
|
+
result["tasks_completed"] = self._extract_completed_tasks(response_text)
|
|
354
|
+
result["files_affected"] = self._extract_files(response_text)
|
|
355
|
+
result["next_steps"] = self._extract_next_steps(response_text)
|
|
356
|
+
|
|
357
|
+
return result
|
|
358
|
+
|
|
359
|
+
def _extract_completed_tasks(self, text: str) -> List[str]:
|
|
360
|
+
"""Extract completed tasks from response text."""
|
|
361
|
+
tasks = []
|
|
362
|
+
|
|
363
|
+
# Look for bullet lists with checkmarks
|
|
364
|
+
for line in text.split("\n"):
|
|
365
|
+
if re.search(r"[✓✅☑]\s*(.+)", line):
|
|
366
|
+
match = re.search(r"[✓✅☑]\s*(.+)", line)
|
|
367
|
+
if match:
|
|
368
|
+
tasks.append(match.group(1).strip())
|
|
369
|
+
|
|
370
|
+
return tasks[:10] # Limit to 10
|
|
371
|
+
|
|
372
|
+
def _extract_files(self, text: str) -> List[str]:
|
|
373
|
+
"""Extract file paths from response text."""
|
|
374
|
+
files = []
|
|
375
|
+
|
|
376
|
+
# Look for file paths (common patterns)
|
|
377
|
+
patterns = [
|
|
378
|
+
r"`([^`]+\.(py|js|ts|md|json|yaml|yml|txt|sh))`",
|
|
379
|
+
r"File:\s*([^\s]+)",
|
|
380
|
+
r"Modified:\s*([^\s]+)",
|
|
381
|
+
]
|
|
382
|
+
|
|
383
|
+
for pattern in patterns:
|
|
384
|
+
matches = re.findall(pattern, text)
|
|
385
|
+
for match in matches:
|
|
386
|
+
file_path = match[0] if isinstance(match, tuple) else match
|
|
387
|
+
if file_path and file_path not in files:
|
|
388
|
+
files.append(file_path)
|
|
389
|
+
|
|
390
|
+
return files[:20] # Limit to 20
|
|
391
|
+
|
|
392
|
+
def _extract_next_steps(self, text: str) -> List[str]:
|
|
393
|
+
"""Extract next steps from response text."""
|
|
394
|
+
steps = []
|
|
395
|
+
|
|
396
|
+
# Look for "Next steps" section
|
|
397
|
+
next_steps_section = re.search(
|
|
398
|
+
r"(?:Next [Ss]teps?|TODO|To [Dd]o):?\s*(.*?)(?:\n\n|\Z)", text, re.DOTALL
|
|
399
|
+
)
|
|
400
|
+
|
|
401
|
+
if next_steps_section:
|
|
402
|
+
section_text = next_steps_section.group(1)
|
|
403
|
+
for line in section_text.split("\n"):
|
|
404
|
+
# Look for bullet points or numbered items
|
|
405
|
+
if re.match(r"^\s*[-*•]\s*(.+)", line):
|
|
406
|
+
match = re.match(r"^\s*[-*•]\s*(.+)", line)
|
|
407
|
+
if match:
|
|
408
|
+
steps.append(match.group(1).strip())
|
|
409
|
+
elif re.match(r"^\s*\d+[\.)]\s*(.+)", line):
|
|
410
|
+
match = re.match(r"^\s*\d+[\.)]\s*(.+)", line)
|
|
411
|
+
if match:
|
|
412
|
+
steps.append(match.group(1).strip())
|
|
413
|
+
|
|
414
|
+
return steps[:10] # Limit to 10
|
|
415
|
+
|
|
416
|
+
def _parse_resume_log(
|
|
417
|
+
self, session_id: str, content: str
|
|
418
|
+
) -> Optional[SessionContext]:
|
|
419
|
+
"""Parse structured resume log markdown file.
|
|
420
|
+
|
|
421
|
+
Args:
|
|
422
|
+
session_id: Session ID
|
|
423
|
+
content: Markdown content of resume log
|
|
424
|
+
|
|
425
|
+
Returns:
|
|
426
|
+
SessionContext or None if parsing fails
|
|
427
|
+
"""
|
|
428
|
+
try:
|
|
429
|
+
# Extract sections using markdown headers
|
|
430
|
+
mission = self._extract_section(content, "Mission")
|
|
431
|
+
accomplishments = self._extract_list_items(content, "Accomplishments")
|
|
432
|
+
findings = self._extract_section(content, "Key Findings")
|
|
433
|
+
next_steps = self._extract_list_items(content, "Next Steps")
|
|
434
|
+
|
|
435
|
+
# Extract timestamp from header
|
|
436
|
+
timestamp_match = re.search(r"Session Resume:\s*(.+)", content)
|
|
437
|
+
timestamp_str = timestamp_match.group(1) if timestamp_match else None
|
|
438
|
+
timestamp = self._parse_timestamp(timestamp_str)
|
|
439
|
+
|
|
440
|
+
time_ago = self._calculate_time_ago(timestamp)
|
|
441
|
+
|
|
442
|
+
return SessionContext(
|
|
443
|
+
session_id=session_id,
|
|
444
|
+
timestamp=timestamp,
|
|
445
|
+
time_ago=time_ago,
|
|
446
|
+
request=mission or "Unknown",
|
|
447
|
+
response=findings or "",
|
|
448
|
+
stop_reason="resume_log",
|
|
449
|
+
token_usage=0,
|
|
450
|
+
working_directory="",
|
|
451
|
+
git_branch="unknown",
|
|
452
|
+
tasks_completed=accomplishments,
|
|
453
|
+
files_affected=[],
|
|
454
|
+
next_steps=next_steps,
|
|
455
|
+
context_management=None,
|
|
456
|
+
delegation_compliance=None,
|
|
457
|
+
response_files=[],
|
|
458
|
+
)
|
|
459
|
+
|
|
460
|
+
except Exception as e:
|
|
461
|
+
logger.error(f"Failed to parse resume log: {e}")
|
|
462
|
+
return None
|
|
463
|
+
|
|
464
|
+
def _extract_section(self, content: str, header: str) -> Optional[str]:
|
|
465
|
+
"""Extract content from a markdown section."""
|
|
466
|
+
pattern = rf"##\s+{header}\s*\n(.+?)(?=\n##|\Z)"
|
|
467
|
+
match = re.search(pattern, content, re.DOTALL)
|
|
468
|
+
return match.group(1).strip() if match else None
|
|
469
|
+
|
|
470
|
+
def _extract_list_items(self, content: str, header: str) -> List[str]:
|
|
471
|
+
"""Extract list items from a markdown section."""
|
|
472
|
+
section = self._extract_section(content, header)
|
|
473
|
+
if not section:
|
|
474
|
+
return []
|
|
475
|
+
|
|
476
|
+
items = []
|
|
477
|
+
for line in section.split("\n"):
|
|
478
|
+
if re.match(r"^\s*[-*•]\s*(.+)", line):
|
|
479
|
+
match = re.match(r"^\s*[-*•]\s*(.+)", line)
|
|
480
|
+
if match:
|
|
481
|
+
items.append(match.group(1).strip())
|
|
482
|
+
|
|
483
|
+
return items
|
|
484
|
+
|
|
485
|
+
def _parse_timestamp(self, timestamp_str: Optional[str]) -> datetime:
|
|
486
|
+
"""Parse timestamp string to datetime object.
|
|
487
|
+
|
|
488
|
+
Args:
|
|
489
|
+
timestamp_str: ISO-8601 timestamp string
|
|
490
|
+
|
|
491
|
+
Returns:
|
|
492
|
+
datetime object (defaults to epoch if parsing fails)
|
|
493
|
+
"""
|
|
494
|
+
if not timestamp_str:
|
|
495
|
+
return datetime.fromtimestamp(0, tz=timezone.utc)
|
|
496
|
+
|
|
497
|
+
try:
|
|
498
|
+
# Try ISO format
|
|
499
|
+
dt = datetime.fromisoformat(timestamp_str.replace("Z", "+00:00"))
|
|
500
|
+
if dt.tzinfo is None:
|
|
501
|
+
dt = dt.replace(tzinfo=timezone.utc)
|
|
502
|
+
return dt
|
|
503
|
+
except Exception:
|
|
504
|
+
# Fallback to epoch
|
|
505
|
+
logger.warning(f"Failed to parse timestamp: {timestamp_str}")
|
|
506
|
+
return datetime.fromtimestamp(0, tz=timezone.utc)
|
|
507
|
+
|
|
508
|
+
def _calculate_time_ago(self, timestamp: datetime) -> str:
|
|
509
|
+
"""Calculate human-readable time elapsed.
|
|
510
|
+
|
|
511
|
+
Args:
|
|
512
|
+
timestamp: Timestamp to calculate from
|
|
513
|
+
|
|
514
|
+
Returns:
|
|
515
|
+
Human-readable string like "2 hours ago"
|
|
516
|
+
"""
|
|
517
|
+
now = datetime.now(timezone.utc)
|
|
518
|
+
delta = now - timestamp
|
|
519
|
+
|
|
520
|
+
days = delta.days
|
|
521
|
+
hours = delta.seconds // 3600
|
|
522
|
+
minutes = (delta.seconds % 3600) // 60
|
|
523
|
+
|
|
524
|
+
if days > 0:
|
|
525
|
+
return f"{days} day{'s' if days != 1 else ''} ago"
|
|
526
|
+
if hours > 0:
|
|
527
|
+
return f"{hours} hour{'s' if hours != 1 else ''} ago"
|
|
528
|
+
if minutes > 0:
|
|
529
|
+
return f"{minutes} minute{'s' if minutes != 1 else ''} ago"
|
|
530
|
+
return "just now"
|
|
531
|
+
|
|
532
|
+
def format_resume_display(self, context: SessionContext) -> str:
|
|
533
|
+
"""Format context for user display.
|
|
534
|
+
|
|
535
|
+
Args:
|
|
536
|
+
context: SessionContext to format
|
|
537
|
+
|
|
538
|
+
Returns:
|
|
539
|
+
Formatted string for console display
|
|
540
|
+
"""
|
|
541
|
+
lines = []
|
|
542
|
+
lines.append("\n" + "=" * 80)
|
|
543
|
+
lines.append(f"📋 Resume Context - Session from {context.time_ago}")
|
|
544
|
+
lines.append("=" * 80)
|
|
545
|
+
lines.append("")
|
|
546
|
+
|
|
547
|
+
lines.append(f"Session ID: {context.session_id}")
|
|
548
|
+
lines.append(
|
|
549
|
+
f"Ended: {context.timestamp.strftime('%Y-%m-%d %H:%M')} ({context.time_ago})"
|
|
550
|
+
)
|
|
551
|
+
lines.append(f"Stop Reason: {self._format_stop_reason(context.stop_reason)}")
|
|
552
|
+
|
|
553
|
+
if context.token_usage > 0:
|
|
554
|
+
usage_pct = (context.token_usage / 200000) * 100
|
|
555
|
+
lines.append(
|
|
556
|
+
f"Token Usage: {context.token_usage:,} / 200,000 ({usage_pct:.0f}%)"
|
|
557
|
+
)
|
|
558
|
+
|
|
559
|
+
lines.append("")
|
|
560
|
+
lines.append("Working on:")
|
|
561
|
+
lines.append(f' "{context.request}"')
|
|
562
|
+
|
|
563
|
+
if context.tasks_completed:
|
|
564
|
+
lines.append("")
|
|
565
|
+
lines.append("✅ Completed:")
|
|
566
|
+
for task in context.tasks_completed[:10]:
|
|
567
|
+
lines.append(f" • {task}")
|
|
568
|
+
if len(context.tasks_completed) > 10:
|
|
569
|
+
lines.append(f" ... and {len(context.tasks_completed) - 10} more")
|
|
570
|
+
|
|
571
|
+
if context.files_affected:
|
|
572
|
+
lines.append("")
|
|
573
|
+
lines.append("📝 Files Modified:")
|
|
574
|
+
for file_path in context.files_affected[:15]:
|
|
575
|
+
lines.append(f" • {file_path}")
|
|
576
|
+
if len(context.files_affected) > 15:
|
|
577
|
+
lines.append(f" ... and {len(context.files_affected) - 15} more")
|
|
578
|
+
|
|
579
|
+
if context.next_steps:
|
|
580
|
+
lines.append("")
|
|
581
|
+
lines.append("🎯 Next Steps:")
|
|
582
|
+
for step in context.next_steps[:10]:
|
|
583
|
+
lines.append(f" • {step}")
|
|
584
|
+
if len(context.next_steps) > 10:
|
|
585
|
+
lines.append(f" ... and {len(context.next_steps) - 10} more")
|
|
586
|
+
|
|
587
|
+
if context.working_directory or context.git_branch != "unknown":
|
|
588
|
+
lines.append("")
|
|
589
|
+
lines.append("Git Context:")
|
|
590
|
+
if context.git_branch != "unknown":
|
|
591
|
+
lines.append(f" Branch: {context.git_branch}")
|
|
592
|
+
if context.working_directory:
|
|
593
|
+
lines.append(f" Working Directory: {context.working_directory}")
|
|
594
|
+
|
|
595
|
+
lines.append("")
|
|
596
|
+
lines.append("=" * 80)
|
|
597
|
+
lines.append("")
|
|
598
|
+
|
|
599
|
+
return "\n".join(lines)
|
|
600
|
+
|
|
601
|
+
def _format_stop_reason(self, reason: str) -> str:
|
|
602
|
+
"""Format stop reason for display."""
|
|
603
|
+
reason_map = {
|
|
604
|
+
"end_turn": "Natural completion",
|
|
605
|
+
"max_tokens": "Context limit reached",
|
|
606
|
+
"stop_sequence": "Stop sequence detected",
|
|
607
|
+
"tool_use": "Tool interaction completed",
|
|
608
|
+
"completed": "Task completed",
|
|
609
|
+
"resume_log": "From resume log",
|
|
610
|
+
"unknown": "Unknown",
|
|
611
|
+
}
|
|
612
|
+
|
|
613
|
+
# Check for context threshold
|
|
614
|
+
if "context" in reason.lower() and "threshold" in reason.lower():
|
|
615
|
+
return "Context threshold reached"
|
|
616
|
+
|
|
617
|
+
return reason_map.get(reason, reason.capitalize())
|