claude-mpm 4.3.6__py3-none-any.whl → 4.3.11__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- claude_mpm/VERSION +1 -1
- claude_mpm/agents/BASE_PM.md +41 -8
- claude_mpm/agents/PM_INSTRUCTIONS.md +85 -43
- claude_mpm/agents/templates/clerk-ops.json +223 -0
- claude_mpm/agents/templates/data_engineer.json +41 -5
- claude_mpm/agents/templates/php-engineer.json +185 -0
- claude_mpm/cli/__init__.py +40 -2
- claude_mpm/cli/commands/agents.py +2 -2
- claude_mpm/cli/commands/analyze.py +4 -4
- claude_mpm/cli/commands/cleanup.py +7 -7
- claude_mpm/cli/commands/configure_tui.py +2 -2
- claude_mpm/cli/commands/debug.py +2 -2
- claude_mpm/cli/commands/info.py +3 -4
- claude_mpm/cli/commands/mcp.py +8 -6
- claude_mpm/cli/commands/mcp_install_commands.py +9 -9
- claude_mpm/cli/commands/run.py +3 -3
- claude_mpm/cli/startup_logging.py +20 -7
- claude_mpm/hooks/instruction_reinforcement.py +295 -0
- claude_mpm/services/agents/deployment/deployment_wrapper.py +59 -0
- claude_mpm/utils/log_cleanup.py +17 -17
- claude_mpm/utils/subprocess_utils.py +6 -6
- {claude_mpm-4.3.6.dist-info → claude_mpm-4.3.11.dist-info}/METADATA +21 -1
- {claude_mpm-4.3.6.dist-info → claude_mpm-4.3.11.dist-info}/RECORD +27 -25
- claude_mpm/agents/templates/agent-manager.md +0 -619
- {claude_mpm-4.3.6.dist-info → claude_mpm-4.3.11.dist-info}/WHEEL +0 -0
- {claude_mpm-4.3.6.dist-info → claude_mpm-4.3.11.dist-info}/entry_points.txt +0 -0
- {claude_mpm-4.3.6.dist-info → claude_mpm-4.3.11.dist-info}/licenses/LICENSE +0 -0
- {claude_mpm-4.3.6.dist-info → claude_mpm-4.3.11.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,295 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Instruction Reinforcement Hook for PM Delegation Compliance
|
|
3
|
+
|
|
4
|
+
This hook monitors PM behavior for delegation violations and provides
|
|
5
|
+
escalating warnings when the PM attempts to implement instead of delegate.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import logging
|
|
9
|
+
import re
|
|
10
|
+
from dataclasses import dataclass
|
|
11
|
+
from enum import Enum
|
|
12
|
+
from typing import Dict, List, Optional, Tuple
|
|
13
|
+
|
|
14
|
+
logger = logging.getLogger(__name__)
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class ViolationType(Enum):
|
|
18
|
+
"""Types of PM delegation violations"""
|
|
19
|
+
|
|
20
|
+
EDIT_ATTEMPT = "Edit/Write/MultiEdit"
|
|
21
|
+
BASH_IMPLEMENTATION = "Bash for implementation"
|
|
22
|
+
FILE_CREATION = "Direct file creation"
|
|
23
|
+
TEST_EXECUTION = "Test execution"
|
|
24
|
+
DEPLOYMENT = "Deployment operation"
|
|
25
|
+
CODE_WRITING = "Code writing"
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
@dataclass
|
|
29
|
+
class Violation:
|
|
30
|
+
"""Record of a delegation violation"""
|
|
31
|
+
|
|
32
|
+
violation_type: ViolationType
|
|
33
|
+
context: str
|
|
34
|
+
timestamp: float
|
|
35
|
+
severity_level: int
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
class InstructionReinforcementHook:
|
|
39
|
+
"""
|
|
40
|
+
Monitors PM messages for delegation violations and provides corrective feedback.
|
|
41
|
+
|
|
42
|
+
Circuit breaker pattern implementation to prevent PM from implementing
|
|
43
|
+
instead of delegating work to appropriate agents.
|
|
44
|
+
"""
|
|
45
|
+
|
|
46
|
+
def __init__(self):
|
|
47
|
+
self.violation_count = 0
|
|
48
|
+
self.violations: List[Violation] = []
|
|
49
|
+
|
|
50
|
+
# Patterns that indicate PM is attempting forbidden actions
|
|
51
|
+
self.forbidden_patterns = [
|
|
52
|
+
# Direct implementation language - EXPANDED
|
|
53
|
+
(
|
|
54
|
+
r"I'll\s+(fix|create|write|implement|code|build|update|generate|modify|set\s+up|configure|optimize|rewrite|run|test|deploy|push|analyze|review|setup)",
|
|
55
|
+
ViolationType.CODE_WRITING,
|
|
56
|
+
),
|
|
57
|
+
(
|
|
58
|
+
r"I'm\s+(fix|create|write|implement|code|build|update|generate|modify)",
|
|
59
|
+
ViolationType.CODE_WRITING,
|
|
60
|
+
),
|
|
61
|
+
(
|
|
62
|
+
r"Let\s+me\s+(edit|write|modify|create|update|fix|run|execute|commit|refactor|configure|set\s+up|review)",
|
|
63
|
+
ViolationType.EDIT_ATTEMPT,
|
|
64
|
+
),
|
|
65
|
+
(
|
|
66
|
+
r"I\s+will\s+(implement|code|build|create|write|update|fix)",
|
|
67
|
+
ViolationType.CODE_WRITING,
|
|
68
|
+
),
|
|
69
|
+
(
|
|
70
|
+
r"I'm\s+(going\s+to|about\s+to)\s+(fix|create|write|implement|update|modify)",
|
|
71
|
+
ViolationType.CODE_WRITING,
|
|
72
|
+
),
|
|
73
|
+
# Common honeypot phrases
|
|
74
|
+
(
|
|
75
|
+
r"Here's\s+(the|my|an?)\s+(implementation|code|SQL|query|solution|analysis|fix)",
|
|
76
|
+
ViolationType.CODE_WRITING,
|
|
77
|
+
),
|
|
78
|
+
(
|
|
79
|
+
r"The\s+(query|code|implementation|solution)\s+(would\s+be|is)",
|
|
80
|
+
ViolationType.CODE_WRITING,
|
|
81
|
+
),
|
|
82
|
+
(r"I\s+(found|identified)\s+(these\s+)?issues", ViolationType.CODE_WRITING),
|
|
83
|
+
# Deployment and setup patterns
|
|
84
|
+
(
|
|
85
|
+
r"Setting\s+up\s+(the\s+)?(authentication|containers|environment|docker)",
|
|
86
|
+
ViolationType.DEPLOYMENT,
|
|
87
|
+
),
|
|
88
|
+
(r"Deploying\s+(to|the)", ViolationType.DEPLOYMENT),
|
|
89
|
+
(r"I'll\s+(deploy|push|host|launch)", ViolationType.DEPLOYMENT),
|
|
90
|
+
# Tool usage patterns - EXPANDED
|
|
91
|
+
(r"Using\s+(Edit|Write|MultiEdit)\s+tool", ViolationType.EDIT_ATTEMPT),
|
|
92
|
+
(r"<invoke\s+name=\"(Edit|Write|MultiEdit)\"", ViolationType.EDIT_ATTEMPT),
|
|
93
|
+
(
|
|
94
|
+
r"Running\s+(bash\s+command|git\s+commit|npm|yarn|python|node|go|tests|pytest)",
|
|
95
|
+
ViolationType.BASH_IMPLEMENTATION,
|
|
96
|
+
),
|
|
97
|
+
(r"Executing\s+(tests|test\s+suite|pytest)", ViolationType.TEST_EXECUTION),
|
|
98
|
+
# Testing patterns - EXPANDED
|
|
99
|
+
(
|
|
100
|
+
r"(Testing|I'll\s+test|Let\s+me\s+test)\s+(the\s+)?(payment|API|endpoint)",
|
|
101
|
+
ViolationType.TEST_EXECUTION,
|
|
102
|
+
),
|
|
103
|
+
(
|
|
104
|
+
r"I'll\s+(run|execute|verify)\s+(the\s+)?(tests|test\s+suite|endpoint)",
|
|
105
|
+
ViolationType.TEST_EXECUTION,
|
|
106
|
+
),
|
|
107
|
+
(r"pytest|npm\s+test|yarn\s+test|go\s+test", ViolationType.TEST_EXECUTION),
|
|
108
|
+
# File operation patterns - EXPANDED
|
|
109
|
+
(
|
|
110
|
+
r"Creating\s+(new\s+|a\s+|the\s+)?(file|YAML|README|workflow)",
|
|
111
|
+
ViolationType.FILE_CREATION,
|
|
112
|
+
),
|
|
113
|
+
(r"Writing\s+to\s+file", ViolationType.FILE_CREATION),
|
|
114
|
+
(
|
|
115
|
+
r"Updating\s+(the\s+)?(code|component|queries)",
|
|
116
|
+
ViolationType.CODE_WRITING,
|
|
117
|
+
),
|
|
118
|
+
(
|
|
119
|
+
r"I'll\s+(update|modify)\s+(the\s+)?(component|code|React)",
|
|
120
|
+
ViolationType.CODE_WRITING,
|
|
121
|
+
),
|
|
122
|
+
]
|
|
123
|
+
|
|
124
|
+
# Patterns for correct delegation behavior - EXPANDED
|
|
125
|
+
self.delegation_patterns = [
|
|
126
|
+
r"delegat(e|ing)\s+(to|this)",
|
|
127
|
+
r"Task\s+tool",
|
|
128
|
+
r"(asking|request|have|use)\s+\w+\s+agent",
|
|
129
|
+
r"requesting\s+\w+\s+to",
|
|
130
|
+
r"will\s+(have|ask|request)\s+\w+\s+agent",
|
|
131
|
+
r"I'll\s+(have|ask|request|delegate)",
|
|
132
|
+
r"the\s+\w+\s+agent\s+(will|can|should)",
|
|
133
|
+
]
|
|
134
|
+
|
|
135
|
+
def detect_violation_intent(
|
|
136
|
+
self, message: str
|
|
137
|
+
) -> Optional[Tuple[ViolationType, str]]:
|
|
138
|
+
"""
|
|
139
|
+
Check message for patterns indicating PM violation intent.
|
|
140
|
+
|
|
141
|
+
Args:
|
|
142
|
+
message: The PM's message to analyze
|
|
143
|
+
|
|
144
|
+
Returns:
|
|
145
|
+
Tuple of (ViolationType, matched_text) if violation detected, None otherwise
|
|
146
|
+
"""
|
|
147
|
+
message_lower = message.lower()
|
|
148
|
+
|
|
149
|
+
# Check for forbidden patterns
|
|
150
|
+
for pattern, violation_type in self.forbidden_patterns:
|
|
151
|
+
match = re.search(pattern, message_lower, re.IGNORECASE)
|
|
152
|
+
if match:
|
|
153
|
+
# Check if this is actually a delegation (false positive check)
|
|
154
|
+
is_delegation = any(
|
|
155
|
+
re.search(
|
|
156
|
+
del_pattern,
|
|
157
|
+
message_lower[max(0, match.start() - 50) : match.end() + 50],
|
|
158
|
+
)
|
|
159
|
+
for del_pattern in self.delegation_patterns
|
|
160
|
+
)
|
|
161
|
+
|
|
162
|
+
if not is_delegation:
|
|
163
|
+
return (violation_type, match.group(0))
|
|
164
|
+
|
|
165
|
+
return None
|
|
166
|
+
|
|
167
|
+
def escalate_warning(self) -> str:
|
|
168
|
+
"""
|
|
169
|
+
Generate escalating warning message based on violation count.
|
|
170
|
+
|
|
171
|
+
Returns:
|
|
172
|
+
Warning message with appropriate severity
|
|
173
|
+
"""
|
|
174
|
+
if self.violation_count == 1:
|
|
175
|
+
return (
|
|
176
|
+
"⚠️ DELEGATION REMINDER: PM must delegate ALL implementation work.\n"
|
|
177
|
+
"Use the Task tool to delegate to the appropriate agent."
|
|
178
|
+
)
|
|
179
|
+
if self.violation_count == 2:
|
|
180
|
+
return (
|
|
181
|
+
"🚨 DELEGATION WARNING: Critical PM violation detected!\n"
|
|
182
|
+
"You MUST delegate implementation work. Do NOT use Edit/Write/Bash for implementation.\n"
|
|
183
|
+
"Next violation will result in session failure."
|
|
184
|
+
)
|
|
185
|
+
if self.violation_count == 3:
|
|
186
|
+
return (
|
|
187
|
+
"❌ CRITICAL DELEGATION FAILURE: Multiple PM violations detected.\n"
|
|
188
|
+
"PM has repeatedly attempted to implement instead of delegate.\n"
|
|
189
|
+
"Session integrity compromised. All work must be delegated to agents."
|
|
190
|
+
)
|
|
191
|
+
return (
|
|
192
|
+
f"❌❌❌ SEVERE VIOLATION (Count: {self.violation_count}): PM continues to violate delegation rules.\n"
|
|
193
|
+
"MANDATORY: Use Task tool to delegate ALL implementation to appropriate agents.\n"
|
|
194
|
+
"Current session may need to be terminated and restarted."
|
|
195
|
+
)
|
|
196
|
+
|
|
197
|
+
def check_message(self, message: str) -> Optional[Dict[str, any]]:
|
|
198
|
+
"""
|
|
199
|
+
Check a PM message for violations and return feedback if needed.
|
|
200
|
+
|
|
201
|
+
Args:
|
|
202
|
+
message: The PM's message to check
|
|
203
|
+
|
|
204
|
+
Returns:
|
|
205
|
+
Dictionary with violation details and correction, or None if compliant
|
|
206
|
+
"""
|
|
207
|
+
violation_result = self.detect_violation_intent(message)
|
|
208
|
+
|
|
209
|
+
if violation_result:
|
|
210
|
+
violation_type, context = violation_result
|
|
211
|
+
self.violation_count += 1
|
|
212
|
+
|
|
213
|
+
# Record the violation
|
|
214
|
+
import time
|
|
215
|
+
|
|
216
|
+
violation = Violation(
|
|
217
|
+
violation_type=violation_type,
|
|
218
|
+
context=context,
|
|
219
|
+
timestamp=time.time(),
|
|
220
|
+
severity_level=min(self.violation_count, 4),
|
|
221
|
+
)
|
|
222
|
+
self.violations.append(violation)
|
|
223
|
+
|
|
224
|
+
# Generate corrective feedback
|
|
225
|
+
warning = self.escalate_warning()
|
|
226
|
+
|
|
227
|
+
# Determine which agent should handle this
|
|
228
|
+
agent_mapping = {
|
|
229
|
+
ViolationType.EDIT_ATTEMPT: "Engineer",
|
|
230
|
+
ViolationType.CODE_WRITING: "Engineer",
|
|
231
|
+
ViolationType.BASH_IMPLEMENTATION: "Engineer or Ops",
|
|
232
|
+
ViolationType.FILE_CREATION: "Engineer or Documentation",
|
|
233
|
+
ViolationType.TEST_EXECUTION: "QA",
|
|
234
|
+
ViolationType.DEPLOYMENT: "Ops",
|
|
235
|
+
}
|
|
236
|
+
|
|
237
|
+
suggested_agent = agent_mapping.get(violation_type, "appropriate agent")
|
|
238
|
+
|
|
239
|
+
# Clean up context for task suggestion
|
|
240
|
+
clean_context = (
|
|
241
|
+
context.replace("I will ", "")
|
|
242
|
+
.replace("I'll ", "")
|
|
243
|
+
.replace("Let me ", "")
|
|
244
|
+
)
|
|
245
|
+
|
|
246
|
+
return {
|
|
247
|
+
"violation_detected": True,
|
|
248
|
+
"violation_count": self.violation_count,
|
|
249
|
+
"violation_type": violation_type.value,
|
|
250
|
+
"context": context,
|
|
251
|
+
"warning": warning,
|
|
252
|
+
"correction": f"MUST delegate to {suggested_agent} using Task tool",
|
|
253
|
+
"suggested_task": f"Task: Please {clean_context}",
|
|
254
|
+
"severity": min(self.violation_count, 4),
|
|
255
|
+
}
|
|
256
|
+
|
|
257
|
+
return None
|
|
258
|
+
|
|
259
|
+
def get_violation_summary(self) -> Dict[str, any]:
|
|
260
|
+
"""
|
|
261
|
+
Get a summary of all violations in the session.
|
|
262
|
+
|
|
263
|
+
Returns:
|
|
264
|
+
Dictionary with violation statistics and details
|
|
265
|
+
"""
|
|
266
|
+
if not self.violations:
|
|
267
|
+
return {
|
|
268
|
+
"total_violations": 0,
|
|
269
|
+
"status": "COMPLIANT",
|
|
270
|
+
"message": "No PM delegation violations detected",
|
|
271
|
+
}
|
|
272
|
+
|
|
273
|
+
violation_types = {}
|
|
274
|
+
for v in self.violations:
|
|
275
|
+
vtype = v.violation_type.value
|
|
276
|
+
violation_types[vtype] = violation_types.get(vtype, 0) + 1
|
|
277
|
+
|
|
278
|
+
status = "WARNING" if self.violation_count < 3 else "CRITICAL"
|
|
279
|
+
|
|
280
|
+
return {
|
|
281
|
+
"total_violations": self.violation_count,
|
|
282
|
+
"status": status,
|
|
283
|
+
"violation_types": violation_types,
|
|
284
|
+
"most_recent": self.violations[-1].context if self.violations else None,
|
|
285
|
+
"recommendation": (
|
|
286
|
+
"Review PM delegation training"
|
|
287
|
+
if self.violation_count > 2
|
|
288
|
+
else "Continue monitoring"
|
|
289
|
+
),
|
|
290
|
+
}
|
|
291
|
+
|
|
292
|
+
def reset(self):
|
|
293
|
+
"""Reset violation tracking for a new session"""
|
|
294
|
+
self.violation_count = 0
|
|
295
|
+
self.violations = []
|
|
@@ -69,3 +69,62 @@ class DeploymentServiceWrapper:
|
|
|
69
69
|
"errors": [],
|
|
70
70
|
"target_dir": str(project_dir),
|
|
71
71
|
}
|
|
72
|
+
|
|
73
|
+
def get_agent_details(self, agent_name: str) -> Dict[str, Any]:
|
|
74
|
+
"""Get detailed information for a specific agent.
|
|
75
|
+
|
|
76
|
+
Args:
|
|
77
|
+
agent_name: Name of the agent
|
|
78
|
+
|
|
79
|
+
Returns:
|
|
80
|
+
Agent details dictionary or empty dict if not found
|
|
81
|
+
"""
|
|
82
|
+
try:
|
|
83
|
+
# Try to get from list of available agents
|
|
84
|
+
available_agents = self.service.list_available_agents()
|
|
85
|
+
for agent in available_agents:
|
|
86
|
+
if agent.get("name") == agent_name:
|
|
87
|
+
# Get template path for the agent
|
|
88
|
+
templates_dir = self.service.templates_dir
|
|
89
|
+
agent_path = templates_dir / f"{agent_name}.md"
|
|
90
|
+
|
|
91
|
+
# Read agent content if file exists
|
|
92
|
+
if agent_path.exists():
|
|
93
|
+
with open(agent_path) as f:
|
|
94
|
+
content = f.read()
|
|
95
|
+
|
|
96
|
+
# Parse metadata from content
|
|
97
|
+
import yaml
|
|
98
|
+
|
|
99
|
+
metadata = {}
|
|
100
|
+
if content.startswith("---"):
|
|
101
|
+
# Extract frontmatter
|
|
102
|
+
parts = content.split("---", 2)
|
|
103
|
+
if len(parts) >= 2:
|
|
104
|
+
try:
|
|
105
|
+
metadata = yaml.safe_load(parts[1])
|
|
106
|
+
except yaml.YAMLError:
|
|
107
|
+
pass
|
|
108
|
+
|
|
109
|
+
return {
|
|
110
|
+
"name": agent_name,
|
|
111
|
+
"path": str(agent_path),
|
|
112
|
+
"type": agent.get("type", "agent"),
|
|
113
|
+
"version": metadata.get("version", "1.0.0"),
|
|
114
|
+
"description": metadata.get("description", ""),
|
|
115
|
+
"specializations": metadata.get("specializations", []),
|
|
116
|
+
"metadata": metadata,
|
|
117
|
+
"content": content,
|
|
118
|
+
"exists": True,
|
|
119
|
+
}
|
|
120
|
+
|
|
121
|
+
# Agent not found in available agents
|
|
122
|
+
return {
|
|
123
|
+
"name": agent_name,
|
|
124
|
+
"exists": False,
|
|
125
|
+
"error": f"Agent '{agent_name}' not found",
|
|
126
|
+
}
|
|
127
|
+
|
|
128
|
+
except Exception as e:
|
|
129
|
+
# Return error information
|
|
130
|
+
return {"name": agent_name, "exists": False, "error": str(e)}
|
claude_mpm/utils/log_cleanup.py
CHANGED
|
@@ -9,7 +9,7 @@ import gzip
|
|
|
9
9
|
import logging
|
|
10
10
|
import os
|
|
11
11
|
import shutil
|
|
12
|
-
from datetime import datetime, timedelta
|
|
12
|
+
from datetime import datetime, timedelta, timezone
|
|
13
13
|
from pathlib import Path
|
|
14
14
|
from typing import Dict, Optional, Tuple
|
|
15
15
|
|
|
@@ -92,7 +92,7 @@ class LogCleanupUtility:
|
|
|
92
92
|
logger.info(f"Sessions directory not found: {sessions_dir}")
|
|
93
93
|
return 0, 0.0
|
|
94
94
|
|
|
95
|
-
cutoff_time = datetime.now() - timedelta(days=max_age_days)
|
|
95
|
+
cutoff_time = datetime.now(timezone.utc) - timedelta(days=max_age_days)
|
|
96
96
|
removed_count = 0
|
|
97
97
|
total_size = 0.0
|
|
98
98
|
|
|
@@ -107,7 +107,7 @@ class LogCleanupUtility:
|
|
|
107
107
|
|
|
108
108
|
try:
|
|
109
109
|
# Check directory modification time
|
|
110
|
-
mtime = datetime.fromtimestamp(session_dir.stat().st_mtime)
|
|
110
|
+
mtime = datetime.fromtimestamp(session_dir.stat().st_mtime, tz=timezone.utc)
|
|
111
111
|
|
|
112
112
|
if mtime < cutoff_time:
|
|
113
113
|
# Calculate directory size
|
|
@@ -117,14 +117,14 @@ class LogCleanupUtility:
|
|
|
117
117
|
if dry_run:
|
|
118
118
|
logger.info(
|
|
119
119
|
f"[DRY RUN] Would remove session: {session_dir.name} "
|
|
120
|
-
f"(age: {(datetime.now() - mtime).days} days, "
|
|
120
|
+
f"(age: {(datetime.now(timezone.utc) - mtime).days} days, "
|
|
121
121
|
f"size: {dir_size:.2f} MB)"
|
|
122
122
|
)
|
|
123
123
|
else:
|
|
124
124
|
shutil.rmtree(session_dir)
|
|
125
125
|
logger.info(
|
|
126
126
|
f"Removed session: {session_dir.name} "
|
|
127
|
-
f"(age: {(datetime.now() - mtime).days} days, "
|
|
127
|
+
f"(age: {(datetime.now(timezone.utc) - mtime).days} days, "
|
|
128
128
|
f"size: {dir_size:.2f} MB)"
|
|
129
129
|
)
|
|
130
130
|
|
|
@@ -159,7 +159,7 @@ class LogCleanupUtility:
|
|
|
159
159
|
Returns:
|
|
160
160
|
Tuple of (files removed, space freed in MB)
|
|
161
161
|
"""
|
|
162
|
-
cutoff_time = datetime.now() - timedelta(days=max_age_days)
|
|
162
|
+
cutoff_time = datetime.now(timezone.utc) - timedelta(days=max_age_days)
|
|
163
163
|
removed_count = 0
|
|
164
164
|
total_size = 0.0
|
|
165
165
|
|
|
@@ -169,7 +169,7 @@ class LogCleanupUtility:
|
|
|
169
169
|
for ext in LogCleanupConfig.ARCHIVE_EXTENSIONS:
|
|
170
170
|
for archive_file in self.base_log_dir.rglob(f"*{ext}"):
|
|
171
171
|
try:
|
|
172
|
-
mtime = datetime.fromtimestamp(archive_file.stat().st_mtime)
|
|
172
|
+
mtime = datetime.fromtimestamp(archive_file.stat().st_mtime, tz=timezone.utc)
|
|
173
173
|
|
|
174
174
|
if mtime < cutoff_time:
|
|
175
175
|
file_size = archive_file.stat().st_size / (1024 * 1024) # MB
|
|
@@ -178,14 +178,14 @@ class LogCleanupUtility:
|
|
|
178
178
|
if dry_run:
|
|
179
179
|
logger.info(
|
|
180
180
|
f"[DRY RUN] Would remove archive: {archive_file.name} "
|
|
181
|
-
f"(age: {(datetime.now() - mtime).days} days, "
|
|
181
|
+
f"(age: {(datetime.now(timezone.utc) - mtime).days} days, "
|
|
182
182
|
f"size: {file_size:.2f} MB)"
|
|
183
183
|
)
|
|
184
184
|
else:
|
|
185
185
|
archive_file.unlink()
|
|
186
186
|
logger.info(
|
|
187
187
|
f"Removed archive: {archive_file.name} "
|
|
188
|
-
f"(age: {(datetime.now() - mtime).days} days, "
|
|
188
|
+
f"(age: {(datetime.now(timezone.utc) - mtime).days} days, "
|
|
189
189
|
f"size: {file_size:.2f} MB)"
|
|
190
190
|
)
|
|
191
191
|
|
|
@@ -218,7 +218,7 @@ class LogCleanupUtility:
|
|
|
218
218
|
Returns:
|
|
219
219
|
Tuple of (files removed, space freed in MB)
|
|
220
220
|
"""
|
|
221
|
-
cutoff_time = datetime.now() - timedelta(days=max_age_days)
|
|
221
|
+
cutoff_time = datetime.now(timezone.utc) - timedelta(days=max_age_days)
|
|
222
222
|
removed_count = 0
|
|
223
223
|
total_size = 0.0
|
|
224
224
|
|
|
@@ -238,7 +238,7 @@ class LogCleanupUtility:
|
|
|
238
238
|
|
|
239
239
|
for log_file in log_dir.glob(pattern):
|
|
240
240
|
try:
|
|
241
|
-
mtime = datetime.fromtimestamp(log_file.stat().st_mtime)
|
|
241
|
+
mtime = datetime.fromtimestamp(log_file.stat().st_mtime, tz=timezone.utc)
|
|
242
242
|
|
|
243
243
|
if mtime < cutoff_time:
|
|
244
244
|
file_size = log_file.stat().st_size / (1024 * 1024) # MB
|
|
@@ -247,14 +247,14 @@ class LogCleanupUtility:
|
|
|
247
247
|
if dry_run:
|
|
248
248
|
logger.info(
|
|
249
249
|
f"[DRY RUN] Would remove log: {log_file.name} "
|
|
250
|
-
f"(age: {(datetime.now() - mtime).days} days, "
|
|
250
|
+
f"(age: {(datetime.now(timezone.utc) - mtime).days} days, "
|
|
251
251
|
f"size: {file_size:.2f} MB)"
|
|
252
252
|
)
|
|
253
253
|
else:
|
|
254
254
|
log_file.unlink()
|
|
255
255
|
logger.info(
|
|
256
256
|
f"Removed log: {log_file.name} "
|
|
257
|
-
f"(age: {(datetime.now() - mtime).days} days, "
|
|
257
|
+
f"(age: {(datetime.now(timezone.utc) - mtime).days} days, "
|
|
258
258
|
f"size: {file_size:.2f} MB)"
|
|
259
259
|
)
|
|
260
260
|
|
|
@@ -321,7 +321,7 @@ class LogCleanupUtility:
|
|
|
321
321
|
Returns:
|
|
322
322
|
Tuple of (files compressed, space saved in MB)
|
|
323
323
|
"""
|
|
324
|
-
cutoff_time = datetime.now() - timedelta(days=age_days)
|
|
324
|
+
cutoff_time = datetime.now(timezone.utc) - timedelta(days=age_days)
|
|
325
325
|
compressed_count = 0
|
|
326
326
|
space_saved = 0.0
|
|
327
327
|
|
|
@@ -331,7 +331,7 @@ class LogCleanupUtility:
|
|
|
331
331
|
continue
|
|
332
332
|
|
|
333
333
|
try:
|
|
334
|
-
mtime = datetime.fromtimestamp(log_file.stat().st_mtime)
|
|
334
|
+
mtime = datetime.fromtimestamp(log_file.stat().st_mtime, tz=timezone.utc)
|
|
335
335
|
|
|
336
336
|
if mtime < cutoff_time:
|
|
337
337
|
original_size = log_file.stat().st_size / (1024 * 1024) # MB
|
|
@@ -408,7 +408,7 @@ class LogCleanupUtility:
|
|
|
408
408
|
stats["oldest_session"] = {
|
|
409
409
|
"name": oldest.name,
|
|
410
410
|
"age_days": (
|
|
411
|
-
datetime.now() - datetime.fromtimestamp(oldest.stat().st_mtime)
|
|
411
|
+
datetime.now(timezone.utc) - datetime.fromtimestamp(oldest.stat().st_mtime, tz=timezone.utc)
|
|
412
412
|
).days,
|
|
413
413
|
}
|
|
414
414
|
|
|
@@ -429,7 +429,7 @@ class LogCleanupUtility:
|
|
|
429
429
|
"name": oldest_log.name,
|
|
430
430
|
"path": str(oldest_log.relative_to(self.base_log_dir)),
|
|
431
431
|
"age_days": (
|
|
432
|
-
datetime.now() - datetime.fromtimestamp(oldest_log.stat().st_mtime)
|
|
432
|
+
datetime.now(timezone.utc) - datetime.fromtimestamp(oldest_log.stat().st_mtime, tz=timezone.utc)
|
|
433
433
|
).days,
|
|
434
434
|
}
|
|
435
435
|
|
|
@@ -83,7 +83,7 @@ def run_command(command_string: str, timeout: float = 60) -> str:
|
|
|
83
83
|
returncode=getattr(e, "returncode", None),
|
|
84
84
|
stdout=getattr(e, "stdout", ""),
|
|
85
85
|
stderr=stderr,
|
|
86
|
-
)
|
|
86
|
+
) from e
|
|
87
87
|
|
|
88
88
|
|
|
89
89
|
def run_subprocess(
|
|
@@ -137,16 +137,16 @@ def run_subprocess(
|
|
|
137
137
|
returncode=None,
|
|
138
138
|
stdout=e.stdout.decode() if e.stdout else "",
|
|
139
139
|
stderr=e.stderr.decode() if e.stderr else "",
|
|
140
|
-
)
|
|
140
|
+
) from e
|
|
141
141
|
except subprocess.CalledProcessError as e:
|
|
142
142
|
raise SubprocessError(
|
|
143
143
|
f"Command failed with return code {e.returncode}: {' '.join(cmd)}",
|
|
144
144
|
returncode=e.returncode,
|
|
145
145
|
stdout=e.stdout if e.stdout else "",
|
|
146
146
|
stderr=e.stderr if e.stderr else "",
|
|
147
|
-
)
|
|
147
|
+
) from e
|
|
148
148
|
except Exception as e:
|
|
149
|
-
raise SubprocessError(f"Subprocess execution failed: {e}")
|
|
149
|
+
raise SubprocessError(f"Subprocess execution failed: {e}") from e
|
|
150
150
|
|
|
151
151
|
|
|
152
152
|
async def run_subprocess_async(
|
|
@@ -195,7 +195,7 @@ async def run_subprocess_async(
|
|
|
195
195
|
await process.wait()
|
|
196
196
|
raise SubprocessError(
|
|
197
197
|
f"Command timed out after {timeout}s: {' '.join(cmd)}", returncode=None
|
|
198
|
-
)
|
|
198
|
+
) from None
|
|
199
199
|
|
|
200
200
|
return SubprocessResult(
|
|
201
201
|
returncode=process.returncode,
|
|
@@ -205,7 +205,7 @@ async def run_subprocess_async(
|
|
|
205
205
|
|
|
206
206
|
except Exception as e:
|
|
207
207
|
if not isinstance(e, SubprocessError):
|
|
208
|
-
raise SubprocessError(f"Async subprocess execution failed: {e}")
|
|
208
|
+
raise SubprocessError(f"Async subprocess execution failed: {e}") from e
|
|
209
209
|
raise
|
|
210
210
|
|
|
211
211
|
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: claude-mpm
|
|
3
|
-
Version: 4.3.
|
|
3
|
+
Version: 4.3.11
|
|
4
4
|
Summary: Claude Multi-Agent Project Manager - Orchestrate Claude with agent delegation and ticket tracking
|
|
5
5
|
Author-email: Bob Matsuoka <bob@matsuoka.com>
|
|
6
6
|
Maintainer: Claude MPM Team
|
|
@@ -120,6 +120,26 @@ Requires-Dist: tree-sitter-cpp>=0.21.0; extra == "agents"
|
|
|
120
120
|
Requires-Dist: tree-sitter-c>=0.21.0; extra == "agents"
|
|
121
121
|
Requires-Dist: tree-sitter-ruby>=0.21.0; extra == "agents"
|
|
122
122
|
Requires-Dist: tree-sitter-php>=0.21.0; extra == "agents"
|
|
123
|
+
Provides-Extra: data-processing
|
|
124
|
+
Requires-Dist: pandas>=2.1.0; extra == "data-processing"
|
|
125
|
+
Requires-Dist: openpyxl>=3.1.0; extra == "data-processing"
|
|
126
|
+
Requires-Dist: xlsxwriter>=3.1.0; extra == "data-processing"
|
|
127
|
+
Requires-Dist: numpy>=1.24.0; extra == "data-processing"
|
|
128
|
+
Requires-Dist: pyarrow>=14.0.0; extra == "data-processing"
|
|
129
|
+
Requires-Dist: dask>=2023.12.0; extra == "data-processing"
|
|
130
|
+
Requires-Dist: polars>=0.19.0; extra == "data-processing"
|
|
131
|
+
Requires-Dist: xlrd>=2.0.0; extra == "data-processing"
|
|
132
|
+
Requires-Dist: xlwt>=1.3.0; extra == "data-processing"
|
|
133
|
+
Requires-Dist: csvkit>=1.3.0; extra == "data-processing"
|
|
134
|
+
Requires-Dist: tabulate>=0.9.0; extra == "data-processing"
|
|
135
|
+
Requires-Dist: python-dateutil>=2.8.0; extra == "data-processing"
|
|
136
|
+
Requires-Dist: lxml>=4.9.0; extra == "data-processing"
|
|
137
|
+
Requires-Dist: sqlalchemy>=2.0.0; extra == "data-processing"
|
|
138
|
+
Requires-Dist: psycopg2-binary>=2.9.0; extra == "data-processing"
|
|
139
|
+
Requires-Dist: pymongo>=4.5.0; extra == "data-processing"
|
|
140
|
+
Requires-Dist: redis>=5.0.0; extra == "data-processing"
|
|
141
|
+
Requires-Dist: beautifulsoup4>=4.12.0; extra == "data-processing"
|
|
142
|
+
Requires-Dist: jsonschema>=4.19.0; extra == "data-processing"
|
|
123
143
|
Dynamic: license-file
|
|
124
144
|
|
|
125
145
|
# Claude MPM - Multi-Agent Project Manager
|