claude-mpm 4.17.0__py3-none-any.whl → 4.18.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of claude-mpm might be problematic. Click here for more details.
- claude_mpm/VERSION +1 -1
- claude_mpm/agents/BASE_ENGINEER.md +286 -0
- claude_mpm/agents/BASE_PM.md +48 -17
- claude_mpm/agents/agent_loader.py +4 -4
- claude_mpm/agents/templates/engineer.json +5 -1
- claude_mpm/agents/templates/svelte-engineer.json +225 -0
- claude_mpm/config/agent_config.py +2 -2
- claude_mpm/core/config.py +42 -0
- claude_mpm/core/factories.py +1 -1
- claude_mpm/core/optimized_agent_loader.py +3 -3
- claude_mpm/hooks/claude_hooks/response_tracking.py +35 -1
- claude_mpm/models/resume_log.py +340 -0
- claude_mpm/services/agents/auto_config_manager.py +1 -1
- claude_mpm/services/agents/deployment/agent_configuration_manager.py +1 -1
- claude_mpm/services/agents/deployment/agent_record_service.py +1 -1
- claude_mpm/services/agents/deployment/agent_validator.py +17 -1
- claude_mpm/services/agents/deployment/async_agent_deployment.py +1 -1
- claude_mpm/services/agents/deployment/local_template_deployment.py +1 -1
- claude_mpm/services/agents/local_template_manager.py +1 -1
- claude_mpm/services/cli/session_manager.py +87 -0
- claude_mpm/services/core/path_resolver.py +1 -1
- claude_mpm/services/infrastructure/resume_log_generator.py +439 -0
- claude_mpm/services/mcp_config_manager.py +2 -2
- claude_mpm/services/session_manager.py +205 -1
- claude_mpm/services/unified/deployment_strategies/local.py +1 -1
- claude_mpm/skills/bundled/api-documentation.md +393 -0
- claude_mpm/skills/bundled/async-testing.md +571 -0
- claude_mpm/skills/bundled/code-review.md +143 -0
- claude_mpm/skills/bundled/database-migration.md +199 -0
- claude_mpm/skills/bundled/docker-containerization.md +194 -0
- claude_mpm/skills/bundled/express-local-dev.md +1429 -0
- claude_mpm/skills/bundled/fastapi-local-dev.md +1199 -0
- claude_mpm/skills/bundled/git-workflow.md +414 -0
- claude_mpm/skills/bundled/imagemagick.md +204 -0
- claude_mpm/skills/bundled/json-data-handling.md +223 -0
- claude_mpm/skills/bundled/nextjs-local-dev.md +807 -0
- claude_mpm/skills/bundled/pdf.md +141 -0
- claude_mpm/skills/bundled/performance-profiling.md +567 -0
- claude_mpm/skills/bundled/refactoring-patterns.md +180 -0
- claude_mpm/skills/bundled/security-scanning.md +327 -0
- claude_mpm/skills/bundled/systematic-debugging.md +473 -0
- claude_mpm/skills/bundled/test-driven-development.md +378 -0
- claude_mpm/skills/bundled/vite-local-dev.md +1061 -0
- claude_mpm/skills/bundled/web-performance-optimization.md +2305 -0
- claude_mpm/skills/bundled/xlsx.md +157 -0
- claude_mpm/utils/agent_dependency_loader.py +2 -2
- {claude_mpm-4.17.0.dist-info → claude_mpm-4.18.3.dist-info}/METADATA +68 -1
- {claude_mpm-4.17.0.dist-info → claude_mpm-4.18.3.dist-info}/RECORD +52 -29
- {claude_mpm-4.17.0.dist-info → claude_mpm-4.18.3.dist-info}/WHEEL +0 -0
- {claude_mpm-4.17.0.dist-info → claude_mpm-4.18.3.dist-info}/entry_points.txt +0 -0
- {claude_mpm-4.17.0.dist-info → claude_mpm-4.18.3.dist-info}/licenses/LICENSE +0 -0
- {claude_mpm-4.17.0.dist-info → claude_mpm-4.18.3.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,340 @@
|
|
|
1
|
+
"""Resume Log Data Model.
|
|
2
|
+
|
|
3
|
+
This module defines the data structure for session resume logs that enable
|
|
4
|
+
seamless context restoration when Claude hits token limits.
|
|
5
|
+
|
|
6
|
+
Design Philosophy:
|
|
7
|
+
- Target 10k tokens maximum per resume log
|
|
8
|
+
- Human-readable markdown format
|
|
9
|
+
- Structured sections with token budgets
|
|
10
|
+
- Optimized for Claude consumption on session resume
|
|
11
|
+
"""
|
|
12
|
+
|
|
13
|
+
from dataclasses import dataclass, field
|
|
14
|
+
from datetime import datetime, timezone
|
|
15
|
+
from pathlib import Path
|
|
16
|
+
from typing import Any, Dict, List, Optional
|
|
17
|
+
|
|
18
|
+
from claude_mpm.core.logging_utils import get_logger
|
|
19
|
+
|
|
20
|
+
logger = get_logger(__name__)
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
@dataclass
|
|
24
|
+
class ContextMetrics:
|
|
25
|
+
"""Context window usage metrics."""
|
|
26
|
+
|
|
27
|
+
total_budget: int = 200000
|
|
28
|
+
used_tokens: int = 0
|
|
29
|
+
remaining_tokens: int = 0
|
|
30
|
+
percentage_used: float = 0.0
|
|
31
|
+
stop_reason: Optional[str] = None
|
|
32
|
+
model: str = "claude-sonnet-4.5"
|
|
33
|
+
session_id: str = ""
|
|
34
|
+
timestamp: str = field(
|
|
35
|
+
default_factory=lambda: datetime.now(timezone.utc).isoformat()
|
|
36
|
+
)
|
|
37
|
+
|
|
38
|
+
def to_dict(self) -> Dict[str, Any]:
|
|
39
|
+
"""Convert to dictionary."""
|
|
40
|
+
return {
|
|
41
|
+
"total_budget": self.total_budget,
|
|
42
|
+
"used_tokens": self.used_tokens,
|
|
43
|
+
"remaining_tokens": self.remaining_tokens,
|
|
44
|
+
"percentage_used": self.percentage_used,
|
|
45
|
+
"stop_reason": self.stop_reason,
|
|
46
|
+
"model": self.model,
|
|
47
|
+
"session_id": self.session_id,
|
|
48
|
+
"timestamp": self.timestamp,
|
|
49
|
+
}
|
|
50
|
+
|
|
51
|
+
@classmethod
|
|
52
|
+
def from_dict(cls, data: Dict[str, Any]) -> "ContextMetrics":
|
|
53
|
+
"""Create from dictionary."""
|
|
54
|
+
return cls(
|
|
55
|
+
total_budget=data.get("total_budget", 200000),
|
|
56
|
+
used_tokens=data.get("used_tokens", 0),
|
|
57
|
+
remaining_tokens=data.get("remaining_tokens", 0),
|
|
58
|
+
percentage_used=data.get("percentage_used", 0.0),
|
|
59
|
+
stop_reason=data.get("stop_reason"),
|
|
60
|
+
model=data.get("model", "claude-sonnet-4.5"),
|
|
61
|
+
session_id=data.get("session_id", ""),
|
|
62
|
+
timestamp=data.get("timestamp", datetime.now(timezone.utc).isoformat()),
|
|
63
|
+
)
|
|
64
|
+
|
|
65
|
+
|
|
66
|
+
@dataclass
|
|
67
|
+
class ResumeLog:
|
|
68
|
+
"""Resume log containing all information needed to restore session context.
|
|
69
|
+
|
|
70
|
+
Token Budget Distribution (10k tokens total):
|
|
71
|
+
- Context Metrics: 500 tokens
|
|
72
|
+
- Mission Summary: 1,000 tokens
|
|
73
|
+
- Accomplishments: 2,000 tokens
|
|
74
|
+
- Key Findings: 2,500 tokens
|
|
75
|
+
- Decisions & Rationale: 1,500 tokens
|
|
76
|
+
- Next Steps: 1,500 tokens
|
|
77
|
+
- Critical Context: 1,000 tokens
|
|
78
|
+
"""
|
|
79
|
+
|
|
80
|
+
# Session identification
|
|
81
|
+
session_id: str
|
|
82
|
+
previous_session_id: Optional[str] = None
|
|
83
|
+
created_at: str = field(
|
|
84
|
+
default_factory=lambda: datetime.now(timezone.utc).isoformat()
|
|
85
|
+
)
|
|
86
|
+
|
|
87
|
+
# Context metrics
|
|
88
|
+
context_metrics: ContextMetrics = field(default_factory=ContextMetrics)
|
|
89
|
+
|
|
90
|
+
# Core content sections (with token budgets)
|
|
91
|
+
mission_summary: str = "" # 1,000 tokens - What was the overall goal?
|
|
92
|
+
accomplishments: List[str] = field(
|
|
93
|
+
default_factory=list
|
|
94
|
+
) # 2,000 tokens - What was completed?
|
|
95
|
+
key_findings: List[str] = field(
|
|
96
|
+
default_factory=list
|
|
97
|
+
) # 2,500 tokens - What was discovered?
|
|
98
|
+
decisions_made: List[Dict[str, str]] = field(
|
|
99
|
+
default_factory=list
|
|
100
|
+
) # 1,500 tokens - What choices were made and why?
|
|
101
|
+
next_steps: List[str] = field(
|
|
102
|
+
default_factory=list
|
|
103
|
+
) # 1,500 tokens - What needs to happen next?
|
|
104
|
+
critical_context: Dict[str, Any] = field(
|
|
105
|
+
default_factory=dict
|
|
106
|
+
) # 1,000 tokens - Essential state/data
|
|
107
|
+
|
|
108
|
+
# Metadata
|
|
109
|
+
files_modified: List[str] = field(default_factory=list)
|
|
110
|
+
agents_used: Dict[str, int] = field(default_factory=dict)
|
|
111
|
+
errors_encountered: List[str] = field(default_factory=list)
|
|
112
|
+
warnings: List[str] = field(default_factory=list)
|
|
113
|
+
|
|
114
|
+
def to_markdown(self) -> str:
|
|
115
|
+
"""Generate markdown format for Claude consumption.
|
|
116
|
+
|
|
117
|
+
Returns:
|
|
118
|
+
Markdown-formatted resume log
|
|
119
|
+
"""
|
|
120
|
+
sections = []
|
|
121
|
+
|
|
122
|
+
# Header
|
|
123
|
+
sections.append(f"# Session Resume Log: {self.session_id}\n")
|
|
124
|
+
sections.append(f"**Created**: {self.created_at}")
|
|
125
|
+
if self.previous_session_id:
|
|
126
|
+
sections.append(f"**Previous Session**: {self.previous_session_id}")
|
|
127
|
+
sections.append("")
|
|
128
|
+
|
|
129
|
+
# Context Metrics (500 tokens)
|
|
130
|
+
sections.append("## Context Metrics\n")
|
|
131
|
+
sections.append(f"- **Model**: {self.context_metrics.model}")
|
|
132
|
+
sections.append(
|
|
133
|
+
f"- **Tokens Used**: {self.context_metrics.used_tokens:,} / {self.context_metrics.total_budget:,}"
|
|
134
|
+
)
|
|
135
|
+
sections.append(
|
|
136
|
+
f"- **Percentage**: {self.context_metrics.percentage_used:.1f}%"
|
|
137
|
+
)
|
|
138
|
+
sections.append(
|
|
139
|
+
f"- **Remaining**: {self.context_metrics.remaining_tokens:,} tokens"
|
|
140
|
+
)
|
|
141
|
+
if self.context_metrics.stop_reason:
|
|
142
|
+
sections.append(f"- **Stop Reason**: {self.context_metrics.stop_reason}")
|
|
143
|
+
sections.append("")
|
|
144
|
+
|
|
145
|
+
# Mission Summary (1,000 tokens)
|
|
146
|
+
sections.append("## Mission Summary\n")
|
|
147
|
+
sections.append(
|
|
148
|
+
self.mission_summary
|
|
149
|
+
if self.mission_summary
|
|
150
|
+
else "_No mission summary provided_"
|
|
151
|
+
)
|
|
152
|
+
sections.append("")
|
|
153
|
+
|
|
154
|
+
# Accomplishments (2,000 tokens)
|
|
155
|
+
sections.append("## Accomplishments\n")
|
|
156
|
+
if self.accomplishments:
|
|
157
|
+
for i, item in enumerate(self.accomplishments, 1):
|
|
158
|
+
sections.append(f"{i}. {item}")
|
|
159
|
+
else:
|
|
160
|
+
sections.append("_No accomplishments recorded_")
|
|
161
|
+
sections.append("")
|
|
162
|
+
|
|
163
|
+
# Key Findings (2,500 tokens)
|
|
164
|
+
sections.append("## Key Findings\n")
|
|
165
|
+
if self.key_findings:
|
|
166
|
+
for i, finding in enumerate(self.key_findings, 1):
|
|
167
|
+
sections.append(f"{i}. {finding}")
|
|
168
|
+
else:
|
|
169
|
+
sections.append("_No key findings recorded_")
|
|
170
|
+
sections.append("")
|
|
171
|
+
|
|
172
|
+
# Decisions & Rationale (1,500 tokens)
|
|
173
|
+
sections.append("## Decisions & Rationale\n")
|
|
174
|
+
if self.decisions_made:
|
|
175
|
+
for i, decision in enumerate(self.decisions_made, 1):
|
|
176
|
+
decision_text = decision.get("decision", "")
|
|
177
|
+
rationale = decision.get("rationale", "")
|
|
178
|
+
sections.append(f"{i}. **Decision**: {decision_text}")
|
|
179
|
+
if rationale:
|
|
180
|
+
sections.append(f" **Rationale**: {rationale}")
|
|
181
|
+
else:
|
|
182
|
+
sections.append("_No decisions recorded_")
|
|
183
|
+
sections.append("")
|
|
184
|
+
|
|
185
|
+
# Next Steps (1,500 tokens)
|
|
186
|
+
sections.append("## Next Steps\n")
|
|
187
|
+
if self.next_steps:
|
|
188
|
+
for i, step in enumerate(self.next_steps, 1):
|
|
189
|
+
sections.append(f"{i}. {step}")
|
|
190
|
+
else:
|
|
191
|
+
sections.append("_No next steps defined_")
|
|
192
|
+
sections.append("")
|
|
193
|
+
|
|
194
|
+
# Critical Context (1,000 tokens)
|
|
195
|
+
sections.append("## Critical Context\n")
|
|
196
|
+
if self.critical_context:
|
|
197
|
+
for key, value in self.critical_context.items():
|
|
198
|
+
sections.append(f"- **{key}**: {value}")
|
|
199
|
+
else:
|
|
200
|
+
sections.append("_No critical context preserved_")
|
|
201
|
+
sections.append("")
|
|
202
|
+
|
|
203
|
+
# Metadata
|
|
204
|
+
sections.append("## Session Metadata\n")
|
|
205
|
+
if self.files_modified:
|
|
206
|
+
sections.append(f"**Files Modified** ({len(self.files_modified)}):")
|
|
207
|
+
for file in self.files_modified[:20]: # Limit to first 20
|
|
208
|
+
sections.append(f"- {file}")
|
|
209
|
+
if len(self.files_modified) > 20:
|
|
210
|
+
sections.append(f"- ... and {len(self.files_modified) - 20} more")
|
|
211
|
+
sections.append("")
|
|
212
|
+
|
|
213
|
+
if self.agents_used:
|
|
214
|
+
sections.append("**Agents Used**:")
|
|
215
|
+
for agent, count in self.agents_used.items():
|
|
216
|
+
sections.append(f"- {agent}: {count} delegations")
|
|
217
|
+
sections.append("")
|
|
218
|
+
|
|
219
|
+
if self.errors_encountered:
|
|
220
|
+
sections.append(f"**Errors** ({len(self.errors_encountered)}):")
|
|
221
|
+
for error in self.errors_encountered[:5]: # Limit to first 5
|
|
222
|
+
sections.append(f"- {error}")
|
|
223
|
+
sections.append("")
|
|
224
|
+
|
|
225
|
+
if self.warnings:
|
|
226
|
+
sections.append(f"**Warnings** ({len(self.warnings)}):")
|
|
227
|
+
for warning in self.warnings[:5]: # Limit to first 5
|
|
228
|
+
sections.append(f"- {warning}")
|
|
229
|
+
sections.append("")
|
|
230
|
+
|
|
231
|
+
return "\n".join(sections)
|
|
232
|
+
|
|
233
|
+
def to_dict(self) -> Dict[str, Any]:
|
|
234
|
+
"""Convert to dictionary for JSON serialization."""
|
|
235
|
+
return {
|
|
236
|
+
"session_id": self.session_id,
|
|
237
|
+
"previous_session_id": self.previous_session_id,
|
|
238
|
+
"created_at": self.created_at,
|
|
239
|
+
"context_metrics": self.context_metrics.to_dict(),
|
|
240
|
+
"mission_summary": self.mission_summary,
|
|
241
|
+
"accomplishments": self.accomplishments,
|
|
242
|
+
"key_findings": self.key_findings,
|
|
243
|
+
"decisions_made": self.decisions_made,
|
|
244
|
+
"next_steps": self.next_steps,
|
|
245
|
+
"critical_context": self.critical_context,
|
|
246
|
+
"files_modified": self.files_modified,
|
|
247
|
+
"agents_used": self.agents_used,
|
|
248
|
+
"errors_encountered": self.errors_encountered,
|
|
249
|
+
"warnings": self.warnings,
|
|
250
|
+
}
|
|
251
|
+
|
|
252
|
+
@classmethod
|
|
253
|
+
def from_dict(cls, data: Dict[str, Any]) -> "ResumeLog":
|
|
254
|
+
"""Create from dictionary."""
|
|
255
|
+
context_metrics_data = data.get("context_metrics", {})
|
|
256
|
+
context_metrics = ContextMetrics.from_dict(context_metrics_data)
|
|
257
|
+
|
|
258
|
+
return cls(
|
|
259
|
+
session_id=data.get("session_id", ""),
|
|
260
|
+
previous_session_id=data.get("previous_session_id"),
|
|
261
|
+
created_at=data.get("created_at", datetime.now(timezone.utc).isoformat()),
|
|
262
|
+
context_metrics=context_metrics,
|
|
263
|
+
mission_summary=data.get("mission_summary", ""),
|
|
264
|
+
accomplishments=data.get("accomplishments", []),
|
|
265
|
+
key_findings=data.get("key_findings", []),
|
|
266
|
+
decisions_made=data.get("decisions_made", []),
|
|
267
|
+
next_steps=data.get("next_steps", []),
|
|
268
|
+
critical_context=data.get("critical_context", {}),
|
|
269
|
+
files_modified=data.get("files_modified", []),
|
|
270
|
+
agents_used=data.get("agents_used", {}),
|
|
271
|
+
errors_encountered=data.get("errors_encountered", []),
|
|
272
|
+
warnings=data.get("warnings", []),
|
|
273
|
+
)
|
|
274
|
+
|
|
275
|
+
def save(self, storage_dir: Optional[Path] = None) -> Path:
|
|
276
|
+
"""Save resume log to markdown file.
|
|
277
|
+
|
|
278
|
+
Args:
|
|
279
|
+
storage_dir: Directory to save the log (default: .claude-mpm/resume-logs)
|
|
280
|
+
|
|
281
|
+
Returns:
|
|
282
|
+
Path to saved file
|
|
283
|
+
"""
|
|
284
|
+
if storage_dir is None:
|
|
285
|
+
storage_dir = Path.home() / ".claude-mpm" / "resume-logs"
|
|
286
|
+
|
|
287
|
+
storage_dir.mkdir(parents=True, exist_ok=True)
|
|
288
|
+
|
|
289
|
+
# Generate filename
|
|
290
|
+
file_path = storage_dir / f"session-{self.session_id}.md"
|
|
291
|
+
|
|
292
|
+
try:
|
|
293
|
+
# Write markdown file
|
|
294
|
+
markdown_content = self.to_markdown()
|
|
295
|
+
file_path.write_text(markdown_content, encoding="utf-8")
|
|
296
|
+
|
|
297
|
+
logger.info(f"Resume log saved: {file_path}")
|
|
298
|
+
return file_path
|
|
299
|
+
|
|
300
|
+
except Exception as e:
|
|
301
|
+
logger.error(f"Failed to save resume log: {e}")
|
|
302
|
+
raise
|
|
303
|
+
|
|
304
|
+
@classmethod
|
|
305
|
+
def load(
|
|
306
|
+
cls, session_id: str, storage_dir: Optional[Path] = None
|
|
307
|
+
) -> Optional["ResumeLog"]:
|
|
308
|
+
"""Load resume log from file.
|
|
309
|
+
|
|
310
|
+
Args:
|
|
311
|
+
session_id: Session ID to load
|
|
312
|
+
storage_dir: Directory to load from (default: .claude-mpm/resume-logs)
|
|
313
|
+
|
|
314
|
+
Returns:
|
|
315
|
+
ResumeLog instance or None if not found
|
|
316
|
+
"""
|
|
317
|
+
if storage_dir is None:
|
|
318
|
+
storage_dir = Path.home() / ".claude-mpm" / "resume-logs"
|
|
319
|
+
|
|
320
|
+
file_path = storage_dir / f"session-{session_id}.md"
|
|
321
|
+
|
|
322
|
+
if not file_path.exists():
|
|
323
|
+
logger.debug(f"Resume log not found: {file_path}")
|
|
324
|
+
return None
|
|
325
|
+
|
|
326
|
+
try:
|
|
327
|
+
# For now, we just return the markdown content
|
|
328
|
+
# In the future, could parse markdown back to structured data
|
|
329
|
+
_ = file_path.read_text(encoding="utf-8")
|
|
330
|
+
logger.info(f"Resume log loaded: {file_path}")
|
|
331
|
+
|
|
332
|
+
# Return a basic ResumeLog with the markdown content embedded
|
|
333
|
+
return cls(
|
|
334
|
+
session_id=session_id,
|
|
335
|
+
mission_summary=f"Loaded from previous session. See full context in {file_path}",
|
|
336
|
+
)
|
|
337
|
+
|
|
338
|
+
except Exception as e:
|
|
339
|
+
logger.error(f"Failed to load resume log: {e}")
|
|
340
|
+
return None
|
|
@@ -678,7 +678,7 @@ class AutoConfigManagerService(BaseService, IAutoConfigManager):
|
|
|
678
678
|
agent_id, agent_name, success=True
|
|
679
679
|
)
|
|
680
680
|
deployed.append(agent_id)
|
|
681
|
-
self.logger.
|
|
681
|
+
self.logger.debug(f"Successfully deployed agent: {agent_id}")
|
|
682
682
|
|
|
683
683
|
except Exception as e:
|
|
684
684
|
self.logger.error(
|
|
@@ -71,7 +71,7 @@ class AgentConfigurationManager:
|
|
|
71
71
|
# Cache the result
|
|
72
72
|
self._base_agent_cache = (base_agent_data, base_agent_version)
|
|
73
73
|
|
|
74
|
-
self.logger.
|
|
74
|
+
self.logger.debug(f"Loaded base agent from {self.base_agent_path}")
|
|
75
75
|
return self._base_agent_cache
|
|
76
76
|
|
|
77
77
|
except Exception as e:
|
|
@@ -107,7 +107,7 @@ class AgentRecordService(BaseService):
|
|
|
107
107
|
record = self._deserialize_record(record_data)
|
|
108
108
|
records[agent_name] = record
|
|
109
109
|
|
|
110
|
-
self.logger.
|
|
110
|
+
self.logger.debug(f"Loaded {len(records)} agent records")
|
|
111
111
|
else:
|
|
112
112
|
self.logger.debug("No existing records file found")
|
|
113
113
|
|
|
@@ -329,10 +329,26 @@ class AgentValidator:
|
|
|
329
329
|
"type": "agent", # Default type
|
|
330
330
|
}
|
|
331
331
|
|
|
332
|
-
# Extract from YAML frontmatter
|
|
332
|
+
# Extract ONLY from YAML frontmatter (between --- markers)
|
|
333
333
|
lines = content.split("\n")
|
|
334
|
+
in_frontmatter = False
|
|
335
|
+
frontmatter_ended = False
|
|
336
|
+
|
|
334
337
|
for line in lines:
|
|
335
338
|
stripped_line = line.strip()
|
|
339
|
+
|
|
340
|
+
# Track frontmatter boundaries
|
|
341
|
+
if stripped_line == "---":
|
|
342
|
+
if not in_frontmatter:
|
|
343
|
+
in_frontmatter = True
|
|
344
|
+
continue
|
|
345
|
+
frontmatter_ended = True
|
|
346
|
+
break # Stop parsing after frontmatter ends
|
|
347
|
+
|
|
348
|
+
# Only parse within frontmatter
|
|
349
|
+
if not in_frontmatter or frontmatter_ended:
|
|
350
|
+
continue
|
|
351
|
+
|
|
336
352
|
if stripped_line.startswith("name:"):
|
|
337
353
|
agent_info["name"] = stripped_line.split(":", 1)[1].strip().strip("\"'")
|
|
338
354
|
elif stripped_line.startswith("description:"):
|
|
@@ -224,7 +224,7 @@ class AsyncAgentDeploymentService:
|
|
|
224
224
|
|
|
225
225
|
elapsed = (time.time() - start_time) * 1000
|
|
226
226
|
self._metrics["time_saved_ms"] += max(0, (len(directories) * 75) - elapsed)
|
|
227
|
-
self.logger.
|
|
227
|
+
self.logger.debug(f"Discovered agents in {elapsed:.1f}ms (parallel scan)")
|
|
228
228
|
|
|
229
229
|
return discovered
|
|
230
230
|
|
|
@@ -90,7 +90,7 @@ class LocalTemplateDeploymentService:
|
|
|
90
90
|
logger.error(f"Failed to deploy local template {agent_id}: {e}")
|
|
91
91
|
results["errors"].append(f"{agent_id}: {e}")
|
|
92
92
|
|
|
93
|
-
logger.
|
|
93
|
+
logger.debug(
|
|
94
94
|
f"Local template deployment: deployed={len(results['deployed'])}, "
|
|
95
95
|
f"updated={len(results['updated'])}, skipped={len(results['skipped'])}, "
|
|
96
96
|
f"errors={len(results['errors'])}"
|
|
@@ -182,7 +182,7 @@ class LocalAgentTemplateManager:
|
|
|
182
182
|
self._discover_templates_in_dir(self.user_agents_dir, "user")
|
|
183
183
|
|
|
184
184
|
self._cache_valid = True
|
|
185
|
-
logger.
|
|
185
|
+
logger.debug(f"Discovered {len(self._template_cache)} local agent templates")
|
|
186
186
|
|
|
187
187
|
return self._template_cache
|
|
188
188
|
|
|
@@ -11,8 +11,10 @@ DESIGN DECISIONS:
|
|
|
11
11
|
- Automatic session cleanup and archiving
|
|
12
12
|
- Thread-safe session operations
|
|
13
13
|
- Non-blocking validation with structured warnings
|
|
14
|
+
- Async-first design with periodic auto-save task
|
|
14
15
|
"""
|
|
15
16
|
|
|
17
|
+
import asyncio
|
|
16
18
|
import gzip
|
|
17
19
|
import json
|
|
18
20
|
import uuid
|
|
@@ -217,8 +219,39 @@ class SessionManager(ISessionManager):
|
|
|
217
219
|
self.config_service = config_service
|
|
218
220
|
self.logger = get_logger("SessionManager")
|
|
219
221
|
self._sessions_cache: Dict[str, SessionInfo] = {}
|
|
222
|
+
self._auto_save_task: Optional[asyncio.Task] = None
|
|
223
|
+
self._running = False
|
|
220
224
|
self._load_sessions()
|
|
221
225
|
|
|
226
|
+
# Start auto-save task if enabled and event loop is running
|
|
227
|
+
if config_service:
|
|
228
|
+
auto_save_enabled = config_service.get("session.auto_save", True)
|
|
229
|
+
if auto_save_enabled:
|
|
230
|
+
self._start_auto_save()
|
|
231
|
+
else:
|
|
232
|
+
self.logger.info("Auto-save disabled by configuration")
|
|
233
|
+
else:
|
|
234
|
+
self.logger.debug("No config service provided, auto-save not started")
|
|
235
|
+
|
|
236
|
+
def _start_auto_save(self) -> None:
|
|
237
|
+
"""Start the auto-save background task.
|
|
238
|
+
|
|
239
|
+
WHY: Separated from __init__ to allow safe initialization without event loop.
|
|
240
|
+
Can be called when event loop is available.
|
|
241
|
+
"""
|
|
242
|
+
try:
|
|
243
|
+
loop = asyncio.get_running_loop()
|
|
244
|
+
self._running = True
|
|
245
|
+
self._auto_save_task = loop.create_task(self._periodic_session_save())
|
|
246
|
+
self.logger.info("Auto-save task started")
|
|
247
|
+
except RuntimeError:
|
|
248
|
+
# No event loop running, schedule for later
|
|
249
|
+
self.logger.debug(
|
|
250
|
+
"No event loop running, auto-save will start when loop is available"
|
|
251
|
+
)
|
|
252
|
+
# Set flag so we know to start it later
|
|
253
|
+
self._running = True
|
|
254
|
+
|
|
222
255
|
def create_session(
|
|
223
256
|
self, context: str = "default", options: Optional[Dict[str, Any]] = None
|
|
224
257
|
) -> SessionInfo:
|
|
@@ -477,6 +510,60 @@ class SessionManager(ISessionManager):
|
|
|
477
510
|
self.logger.error(f"Failed to load sessions: {e}")
|
|
478
511
|
self._sessions_cache = {}
|
|
479
512
|
|
|
513
|
+
async def _periodic_session_save(self) -> None:
|
|
514
|
+
"""Periodically save sessions to disk.
|
|
515
|
+
|
|
516
|
+
WHY: Ensures sessions are persisted regularly to prevent data loss.
|
|
517
|
+
Follows the async pattern from EventAggregator._periodic_cleanup().
|
|
518
|
+
"""
|
|
519
|
+
if not self.config_service:
|
|
520
|
+
self.logger.warning("No config service, cannot determine save interval")
|
|
521
|
+
return
|
|
522
|
+
|
|
523
|
+
save_interval = self.config_service.get("session.save_interval", 300)
|
|
524
|
+
self.logger.info(f"Starting periodic session save (interval: {save_interval}s)")
|
|
525
|
+
|
|
526
|
+
while self._running:
|
|
527
|
+
try:
|
|
528
|
+
await asyncio.sleep(save_interval)
|
|
529
|
+
|
|
530
|
+
if self._sessions_cache:
|
|
531
|
+
self._save_sessions()
|
|
532
|
+
self.logger.debug(
|
|
533
|
+
f"Auto-saved {len(self._sessions_cache)} session(s)"
|
|
534
|
+
)
|
|
535
|
+
else:
|
|
536
|
+
self.logger.debug("No sessions to save")
|
|
537
|
+
|
|
538
|
+
except asyncio.CancelledError:
|
|
539
|
+
self.logger.info("Auto-save task cancelled")
|
|
540
|
+
break
|
|
541
|
+
except Exception as e:
|
|
542
|
+
self.logger.error(f"Error in auto-save task: {e}")
|
|
543
|
+
|
|
544
|
+
async def cleanup(self) -> None:
|
|
545
|
+
"""Clean up resources and stop background tasks.
|
|
546
|
+
|
|
547
|
+
WHY: Ensures graceful shutdown of the SessionManager and all background tasks.
|
|
548
|
+
"""
|
|
549
|
+
self.logger.info("Shutting down SessionManager...")
|
|
550
|
+
self._running = False
|
|
551
|
+
|
|
552
|
+
# Cancel auto-save task
|
|
553
|
+
if self._auto_save_task and not self._auto_save_task.done():
|
|
554
|
+
self._auto_save_task.cancel()
|
|
555
|
+
try:
|
|
556
|
+
await self._auto_save_task
|
|
557
|
+
except asyncio.CancelledError:
|
|
558
|
+
pass
|
|
559
|
+
|
|
560
|
+
# Final save before shutdown
|
|
561
|
+
if self._sessions_cache:
|
|
562
|
+
self._save_sessions()
|
|
563
|
+
self.logger.info(f"Final save: {len(self._sessions_cache)} session(s)")
|
|
564
|
+
|
|
565
|
+
self.logger.info("SessionManager shutdown complete")
|
|
566
|
+
|
|
480
567
|
|
|
481
568
|
# Context manager for session management
|
|
482
569
|
class ManagedSession:
|
|
@@ -281,7 +281,7 @@ class PathResolver(IPathResolver):
|
|
|
281
281
|
|
|
282
282
|
if agents_dir and agents_dir.exists():
|
|
283
283
|
discovered_agents_dir = agents_dir
|
|
284
|
-
self.logger.
|
|
284
|
+
self.logger.debug(f"Using custom agents directory: {discovered_agents_dir}")
|
|
285
285
|
elif framework_path and framework_path != Path("__PACKAGED__"):
|
|
286
286
|
# Prioritize templates directory over main agents directory
|
|
287
287
|
templates_dir = (
|