claude-mpm 3.9.9__py3-none-any.whl → 3.9.11__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (38) hide show
  1. claude_mpm/VERSION +1 -1
  2. claude_mpm/agents/templates/memory_manager.json +155 -0
  3. claude_mpm/cli/__init__.py +15 -2
  4. claude_mpm/cli/commands/__init__.py +3 -0
  5. claude_mpm/cli/commands/mcp.py +280 -134
  6. claude_mpm/cli/commands/run_guarded.py +511 -0
  7. claude_mpm/cli/parser.py +8 -2
  8. claude_mpm/config/experimental_features.py +219 -0
  9. claude_mpm/config/memory_guardian_yaml.py +335 -0
  10. claude_mpm/constants.py +1 -0
  11. claude_mpm/core/memory_aware_runner.py +353 -0
  12. claude_mpm/services/infrastructure/context_preservation.py +537 -0
  13. claude_mpm/services/infrastructure/graceful_degradation.py +616 -0
  14. claude_mpm/services/infrastructure/health_monitor.py +775 -0
  15. claude_mpm/services/infrastructure/memory_dashboard.py +479 -0
  16. claude_mpm/services/infrastructure/memory_guardian.py +189 -15
  17. claude_mpm/services/infrastructure/restart_protection.py +642 -0
  18. claude_mpm/services/infrastructure/state_manager.py +774 -0
  19. claude_mpm/services/mcp_gateway/__init__.py +11 -11
  20. claude_mpm/services/mcp_gateway/core/__init__.py +2 -2
  21. claude_mpm/services/mcp_gateway/core/interfaces.py +10 -9
  22. claude_mpm/services/mcp_gateway/main.py +35 -5
  23. claude_mpm/services/mcp_gateway/manager.py +334 -0
  24. claude_mpm/services/mcp_gateway/registry/service_registry.py +4 -8
  25. claude_mpm/services/mcp_gateway/server/__init__.py +2 -2
  26. claude_mpm/services/mcp_gateway/server/{mcp_server.py → mcp_gateway.py} +60 -59
  27. claude_mpm/services/mcp_gateway/tools/base_adapter.py +1 -2
  28. claude_mpm/services/ticket_manager.py +8 -8
  29. claude_mpm/services/ticket_manager_di.py +5 -5
  30. claude_mpm/storage/__init__.py +9 -0
  31. claude_mpm/storage/state_storage.py +556 -0
  32. {claude_mpm-3.9.9.dist-info → claude_mpm-3.9.11.dist-info}/METADATA +25 -2
  33. {claude_mpm-3.9.9.dist-info → claude_mpm-3.9.11.dist-info}/RECORD +37 -24
  34. claude_mpm/services/mcp_gateway/server/mcp_server_simple.py +0 -444
  35. {claude_mpm-3.9.9.dist-info → claude_mpm-3.9.11.dist-info}/WHEEL +0 -0
  36. {claude_mpm-3.9.9.dist-info → claude_mpm-3.9.11.dist-info}/entry_points.txt +0 -0
  37. {claude_mpm-3.9.9.dist-info → claude_mpm-3.9.11.dist-info}/licenses/LICENSE +0 -0
  38. {claude_mpm-3.9.9.dist-info → claude_mpm-3.9.11.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,537 @@
1
+ """Context Preservation service for handling Claude conversation data.
2
+
3
+ This service specializes in parsing and preserving Claude's conversation
4
+ context, handling large .claude.json files efficiently.
5
+
6
+ Design Principles:
7
+ - Streaming JSON parsing for large files
8
+ - Memory-efficient processing
9
+ - Privacy-preserving extraction
10
+ - Graceful handling of corrupted data
11
+ """
12
+
13
+ import json
14
+ import logging
15
+ import gzip
16
+ import os
17
+ import shutil
18
+ import tempfile
19
+ from datetime import datetime, timedelta
20
+ from pathlib import Path
21
+ from typing import Dict, Any, List, Optional, Iterator, Tuple
22
+ import ijson # For streaming JSON parsing
23
+
24
+ from claude_mpm.services.core.base import BaseService
25
+ from claude_mpm.models.state_models import ConversationState, ConversationContext
26
+
27
+
28
+ class ContextPreservationService(BaseService):
29
+ """Service for preserving and managing Claude conversation context."""
30
+
31
+ def __init__(self, claude_dir: Optional[Path] = None):
32
+ """Initialize Context Preservation service.
33
+
34
+ Args:
35
+ claude_dir: Claude configuration directory (default: ~/.claude)
36
+ """
37
+ super().__init__("ContextPreservation")
38
+
39
+ # Claude configuration paths
40
+ self.claude_dir = claude_dir or Path.home() / ".claude"
41
+ self.claude_json_path = self.claude_dir / ".claude.json"
42
+ self.claude_backup_dir = self.claude_dir / "backups"
43
+
44
+ # Size thresholds
45
+ self.large_file_threshold_mb = 100
46
+ self.compression_threshold_mb = 50
47
+
48
+ # Context extraction limits
49
+ self.max_conversations_to_extract = 10
50
+ self.max_messages_per_conversation = 100
51
+ self.max_file_references = 1000
52
+
53
+ # Statistics
54
+ self.files_processed = 0
55
+ self.total_size_processed_mb = 0.0
56
+
57
+ self.log_info(f"Context Preservation initialized for: {self.claude_dir}")
58
+
59
+ async def initialize(self) -> bool:
60
+ """Initialize the Context Preservation service.
61
+
62
+ Returns:
63
+ True if initialization successful
64
+ """
65
+ try:
66
+ self.log_info("Initializing Context Preservation service")
67
+
68
+ # Ensure backup directory exists
69
+ self.claude_backup_dir.mkdir(parents=True, exist_ok=True)
70
+
71
+ # Check Claude configuration
72
+ if self.claude_json_path.exists():
73
+ size_mb = self.claude_json_path.stat().st_size / (1024 * 1024)
74
+ self.log_info(f"Found Claude configuration: {size_mb:.2f}MB")
75
+ else:
76
+ self.log_warning("Claude configuration not found")
77
+
78
+ self._initialized = True
79
+ self.log_info("Context Preservation service initialized successfully")
80
+ return True
81
+
82
+ except Exception as e:
83
+ self.log_error(f"Failed to initialize Context Preservation: {e}")
84
+ return False
85
+
86
+ async def parse_claude_json(self,
87
+ extract_full: bool = False) -> Optional[ConversationState]:
88
+ """Parse Claude's .claude.json file safely.
89
+
90
+ Args:
91
+ extract_full: Whether to extract full conversation data
92
+
93
+ Returns:
94
+ ConversationState object or None if parsing failed
95
+ """
96
+ try:
97
+ if not self.claude_json_path.exists():
98
+ self.log_debug("Claude configuration file not found")
99
+ return self._empty_conversation_state()
100
+
101
+ file_size_mb = self.claude_json_path.stat().st_size / (1024 * 1024)
102
+ self.log_info(f"Parsing Claude configuration: {file_size_mb:.2f}MB")
103
+
104
+ # Choose parsing strategy based on file size
105
+ if file_size_mb > self.large_file_threshold_mb:
106
+ self.log_info("Using streaming parser for large file")
107
+ return await self._parse_large_claude_json(extract_full)
108
+ else:
109
+ return await self._parse_standard_claude_json(extract_full)
110
+
111
+ except Exception as e:
112
+ self.log_error(f"Failed to parse Claude JSON: {e}")
113
+ return None
114
+
115
+ async def extract_active_conversation(self) -> Optional[ConversationContext]:
116
+ """Extract only the active conversation context.
117
+
118
+ Returns:
119
+ ConversationContext for active conversation or None
120
+ """
121
+ try:
122
+ if not self.claude_json_path.exists():
123
+ return None
124
+
125
+ # Use streaming to find active conversation
126
+ with open(self.claude_json_path, 'rb') as f:
127
+ parser = ijson.parse(f)
128
+
129
+ active_conv_id = None
130
+ in_conversations = False
131
+ current_conv = {}
132
+
133
+ for prefix, event, value in parser:
134
+ # Get active conversation ID
135
+ if prefix == 'activeConversationId':
136
+ active_conv_id = value
137
+
138
+ # Parse conversations array
139
+ elif prefix.startswith('conversations.item'):
140
+ if event == 'map_key':
141
+ current_key = value
142
+ elif active_conv_id and current_conv.get('id') == active_conv_id:
143
+ # Found active conversation
144
+ return self._create_conversation_context(current_conv)
145
+
146
+ return None
147
+
148
+ except Exception as e:
149
+ self.log_error(f"Failed to extract active conversation: {e}")
150
+ return None
151
+
152
+ async def compress_conversation_history(self,
153
+ keep_recent_days: int = 7) -> bool:
154
+ """Compress large conversation histories.
155
+
156
+ Args:
157
+ keep_recent_days: Days of recent conversations to keep uncompressed
158
+
159
+ Returns:
160
+ True if compression successful
161
+ """
162
+ try:
163
+ if not self.claude_json_path.exists():
164
+ return False
165
+
166
+ file_size_mb = self.claude_json_path.stat().st_size / (1024 * 1024)
167
+
168
+ if file_size_mb < self.compression_threshold_mb:
169
+ self.log_debug(f"File too small for compression: {file_size_mb:.2f}MB")
170
+ return False
171
+
172
+ self.log_info(f"Compressing conversation history: {file_size_mb:.2f}MB")
173
+
174
+ # Create backup first
175
+ backup_path = await self._create_backup()
176
+
177
+ # Load and filter conversations
178
+ cutoff_time = datetime.now().timestamp() - (keep_recent_days * 86400)
179
+
180
+ with open(self.claude_json_path, 'r') as f:
181
+ data = json.load(f)
182
+
183
+ original_count = len(data.get('conversations', []))
184
+
185
+ # Filter conversations
186
+ recent_conversations = []
187
+ archived_conversations = []
188
+
189
+ for conv in data.get('conversations', []):
190
+ updated_at = conv.get('updatedAt', 0) / 1000 # Convert from ms
191
+ if updated_at >= cutoff_time:
192
+ recent_conversations.append(conv)
193
+ else:
194
+ # Create minimal version for archive
195
+ archived_conversations.append({
196
+ 'id': conv.get('id'),
197
+ 'title': conv.get('title'),
198
+ 'createdAt': conv.get('createdAt'),
199
+ 'updatedAt': conv.get('updatedAt'),
200
+ 'messageCount': len(conv.get('messages', [])),
201
+ 'archived': True
202
+ })
203
+
204
+ # Update data with filtered conversations
205
+ data['conversations'] = recent_conversations
206
+ data['archivedConversations'] = archived_conversations
207
+
208
+ # Write compressed version
209
+ temp_path = self.claude_json_path.with_suffix('.tmp')
210
+ with open(temp_path, 'w') as f:
211
+ json.dump(data, f, separators=(',', ':')) # Compact format
212
+
213
+ # Replace original
214
+ temp_path.replace(self.claude_json_path)
215
+
216
+ new_size_mb = self.claude_json_path.stat().st_size / (1024 * 1024)
217
+ reduction_pct = ((file_size_mb - new_size_mb) / file_size_mb) * 100
218
+
219
+ self.log_info(f"Compression complete: {original_count} -> {len(recent_conversations)} "
220
+ f"conversations, {file_size_mb:.2f}MB -> {new_size_mb:.2f}MB "
221
+ f"({reduction_pct:.1f}% reduction)")
222
+
223
+ return True
224
+
225
+ except Exception as e:
226
+ self.log_error(f"Failed to compress conversation history: {e}")
227
+ return False
228
+
229
+ async def handle_file_references(self,
230
+ conversation: Dict[str, Any]) -> List[str]:
231
+ """Extract and validate file references from conversation.
232
+
233
+ Args:
234
+ conversation: Conversation data dictionary
235
+
236
+ Returns:
237
+ List of valid file paths referenced in conversation
238
+ """
239
+ try:
240
+ files = set()
241
+
242
+ # Extract from messages
243
+ for message in conversation.get('messages', [])[:self.max_messages_per_conversation]:
244
+ # Extract from content
245
+ content = message.get('content', '')
246
+ if isinstance(content, str):
247
+ files.update(self._extract_file_paths(content))
248
+
249
+ # Extract from attachments
250
+ for attachment in message.get('attachments', []):
251
+ if attachment.get('type') == 'file':
252
+ file_path = attachment.get('path')
253
+ if file_path:
254
+ files.add(file_path)
255
+
256
+ # Validate file existence
257
+ valid_files = []
258
+ for file_path in list(files)[:self.max_file_references]:
259
+ try:
260
+ if Path(file_path).exists():
261
+ valid_files.append(file_path)
262
+ except:
263
+ pass # Invalid path
264
+
265
+ return valid_files
266
+
267
+ except Exception as e:
268
+ self.log_error(f"Failed to handle file references: {e}")
269
+ return []
270
+
271
+ async def preserve_user_preferences(self) -> Dict[str, Any]:
272
+ """Extract and preserve user preferences and settings.
273
+
274
+ Returns:
275
+ Dictionary of user preferences
276
+ """
277
+ try:
278
+ if not self.claude_json_path.exists():
279
+ return {}
280
+
281
+ # Use streaming to extract preferences
282
+ preferences = {}
283
+
284
+ with open(self.claude_json_path, 'rb') as f:
285
+ parser = ijson.parse(f)
286
+
287
+ for prefix, event, value in parser:
288
+ if prefix.startswith('preferences'):
289
+ # Extract preference key and value
290
+ if event == 'map_key':
291
+ current_key = value
292
+ elif event in ('string', 'number', 'boolean'):
293
+ preferences[current_key] = value
294
+
295
+ self.log_debug(f"Preserved {len(preferences)} user preferences")
296
+ return preferences
297
+
298
+ except Exception as e:
299
+ self.log_error(f"Failed to preserve user preferences: {e}")
300
+ return {}
301
+
302
+ async def _parse_standard_claude_json(self,
303
+ extract_full: bool) -> ConversationState:
304
+ """Parse Claude JSON using standard JSON parser."""
305
+ try:
306
+ with open(self.claude_json_path, 'r') as f:
307
+ data = json.load(f)
308
+
309
+ return await self._extract_conversation_state(data, extract_full)
310
+
311
+ except json.JSONDecodeError as e:
312
+ self.log_error(f"JSON decode error: {e}")
313
+ return self._empty_conversation_state()
314
+
315
+ async def _parse_large_claude_json(self,
316
+ extract_full: bool) -> ConversationState:
317
+ """Parse large Claude JSON using streaming parser."""
318
+ try:
319
+ # Extract key data using streaming
320
+ active_conv_id = None
321
+ conversations = []
322
+ preferences = {}
323
+ open_files = []
324
+
325
+ with open(self.claude_json_path, 'rb') as f:
326
+ parser = ijson.parse(f)
327
+
328
+ conversation_count = 0
329
+ current_conv = {}
330
+ in_conversation = False
331
+
332
+ for prefix, event, value in parser:
333
+ # Limit conversations extracted
334
+ if conversation_count >= self.max_conversations_to_extract:
335
+ break
336
+
337
+ # Extract active conversation ID
338
+ if prefix == 'activeConversationId':
339
+ active_conv_id = value
340
+
341
+ # Extract conversations
342
+ elif prefix.startswith('conversations.item'):
343
+ if event == 'start_map':
344
+ in_conversation = True
345
+ current_conv = {}
346
+ elif event == 'end_map':
347
+ if in_conversation and current_conv:
348
+ conversations.append(current_conv)
349
+ conversation_count += 1
350
+ in_conversation = False
351
+ current_conv = {}
352
+ elif in_conversation and event == 'map_key':
353
+ current_key = value
354
+ elif in_conversation and current_key:
355
+ current_conv[current_key] = value
356
+
357
+ # Extract open files
358
+ elif prefix.startswith('openFiles.item'):
359
+ if event == 'string':
360
+ open_files.append(value)
361
+
362
+ # Find active conversation
363
+ active_conv = None
364
+ recent_convs = []
365
+
366
+ for conv in conversations:
367
+ if conv.get('id') == active_conv_id:
368
+ active_conv = self._create_conversation_context(conv)
369
+ else:
370
+ recent_convs.append(self._create_conversation_context(conv))
371
+
372
+ file_size_mb = self.claude_json_path.stat().st_size / (1024 * 1024)
373
+
374
+ return ConversationState(
375
+ active_conversation_id=active_conv_id,
376
+ active_conversation=active_conv,
377
+ recent_conversations=recent_convs[:5],
378
+ total_conversations=len(conversations),
379
+ total_storage_mb=file_size_mb,
380
+ preferences=preferences,
381
+ open_files=open_files[:100],
382
+ recent_files=[],
383
+ pinned_files=[]
384
+ )
385
+
386
+ except Exception as e:
387
+ self.log_error(f"Failed to parse large Claude JSON: {e}")
388
+ return self._empty_conversation_state()
389
+
390
+ async def _extract_conversation_state(self,
391
+ data: Dict[str, Any],
392
+ extract_full: bool) -> ConversationState:
393
+ """Extract conversation state from parsed data."""
394
+ try:
395
+ conversations = data.get('conversations', [])
396
+ active_conv_id = data.get('activeConversationId')
397
+
398
+ # Find active conversation
399
+ active_conv = None
400
+ if active_conv_id:
401
+ for conv in conversations:
402
+ if conv.get('id') == active_conv_id:
403
+ active_conv = self._create_conversation_context(conv)
404
+ break
405
+
406
+ # Get recent conversations
407
+ recent_convs = []
408
+ if extract_full:
409
+ sorted_convs = sorted(
410
+ conversations,
411
+ key=lambda c: c.get('updatedAt', 0),
412
+ reverse=True
413
+ )[:self.max_conversations_to_extract]
414
+
415
+ for conv in sorted_convs:
416
+ if conv.get('id') != active_conv_id:
417
+ recent_convs.append(self._create_conversation_context(conv))
418
+
419
+ file_size_mb = self.claude_json_path.stat().st_size / (1024 * 1024)
420
+
421
+ return ConversationState(
422
+ active_conversation_id=active_conv_id,
423
+ active_conversation=active_conv,
424
+ recent_conversations=recent_convs,
425
+ total_conversations=len(conversations),
426
+ total_storage_mb=file_size_mb,
427
+ preferences=data.get('preferences', {}),
428
+ open_files=data.get('openFiles', [])[:100],
429
+ recent_files=data.get('recentFiles', [])[:100],
430
+ pinned_files=data.get('pinnedFiles', [])[:50]
431
+ )
432
+
433
+ except Exception as e:
434
+ self.log_error(f"Failed to extract conversation state: {e}")
435
+ return self._empty_conversation_state()
436
+
437
+ def _create_conversation_context(self,
438
+ conv: Dict[str, Any]) -> ConversationContext:
439
+ """Create ConversationContext from conversation data."""
440
+ return ConversationContext(
441
+ conversation_id=conv.get('id', ''),
442
+ title=conv.get('title', 'Untitled'),
443
+ created_at=conv.get('createdAt', 0) / 1000, # Convert from ms
444
+ updated_at=conv.get('updatedAt', 0) / 1000,
445
+ message_count=len(conv.get('messages', [])),
446
+ total_tokens=conv.get('totalTokens', 0),
447
+ max_tokens=conv.get('maxTokens', 100000),
448
+ referenced_files=[], # Would need full extraction
449
+ open_tabs=conv.get('openTabs', []),
450
+ tags=conv.get('tags', []),
451
+ is_active=False
452
+ )
453
+
454
+ def _empty_conversation_state(self) -> ConversationState:
455
+ """Create empty conversation state."""
456
+ return ConversationState(
457
+ active_conversation_id=None,
458
+ active_conversation=None,
459
+ recent_conversations=[],
460
+ total_conversations=0,
461
+ total_storage_mb=0.0,
462
+ preferences={},
463
+ open_files=[],
464
+ recent_files=[],
465
+ pinned_files=[]
466
+ )
467
+
468
+ def _extract_file_paths(self, content: str) -> List[str]:
469
+ """Extract file paths from message content."""
470
+ import re
471
+
472
+ files = set()
473
+
474
+ # Common file path patterns
475
+ patterns = [
476
+ r'[\'"`]([/\\]?(?:[a-zA-Z]:)?[/\\]?[\w\-_./\\]+\.\w+)[\'"`]',
477
+ r'(?:^|\s)([/\\]?(?:[a-zA-Z]:)?[/\\]?[\w\-_./\\]+\.\w+)(?:\s|$)',
478
+ ]
479
+
480
+ for pattern in patterns:
481
+ matches = re.findall(pattern, content)
482
+ files.update(matches)
483
+
484
+ return list(files)
485
+
486
+ async def _create_backup(self) -> Path:
487
+ """Create backup of Claude configuration."""
488
+ try:
489
+ timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
490
+ backup_name = f"claude_backup_{timestamp}.json"
491
+
492
+ # Compress if large
493
+ file_size_mb = self.claude_json_path.stat().st_size / (1024 * 1024)
494
+ if file_size_mb > self.compression_threshold_mb:
495
+ backup_name += ".gz"
496
+ backup_path = self.claude_backup_dir / backup_name
497
+
498
+ with open(self.claude_json_path, 'rb') as f_in:
499
+ with gzip.open(backup_path, 'wb') as f_out:
500
+ shutil.copyfileobj(f_in, f_out)
501
+ else:
502
+ backup_path = self.claude_backup_dir / backup_name
503
+ shutil.copy2(self.claude_json_path, backup_path)
504
+
505
+ self.log_info(f"Created backup: {backup_path}")
506
+ return backup_path
507
+
508
+ except Exception as e:
509
+ self.log_error(f"Failed to create backup: {e}")
510
+ raise
511
+
512
+ def get_statistics(self) -> Dict[str, Any]:
513
+ """Get context preservation statistics.
514
+
515
+ Returns:
516
+ Dictionary containing statistics
517
+ """
518
+ claude_size_mb = 0.0
519
+ if self.claude_json_path.exists():
520
+ claude_size_mb = self.claude_json_path.stat().st_size / (1024 * 1024)
521
+
522
+ backup_count = 0
523
+ backup_size_mb = 0.0
524
+ if self.claude_backup_dir.exists():
525
+ backups = list(self.claude_backup_dir.glob("claude_backup_*.json*"))
526
+ backup_count = len(backups)
527
+ backup_size_mb = sum(f.stat().st_size for f in backups) / (1024 * 1024)
528
+
529
+ return {
530
+ 'claude_config_exists': self.claude_json_path.exists(),
531
+ 'claude_config_size_mb': round(claude_size_mb, 2),
532
+ 'is_large_file': claude_size_mb > self.large_file_threshold_mb,
533
+ 'backup_count': backup_count,
534
+ 'total_backup_size_mb': round(backup_size_mb, 2),
535
+ 'files_processed': self.files_processed,
536
+ 'total_size_processed_mb': round(self.total_size_processed_mb, 2)
537
+ }