claude-mpm 3.3.0__py3-none-any.whl → 3.4.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (58) hide show
  1. claude_mpm/agents/templates/data_engineer.json +1 -1
  2. claude_mpm/agents/templates/documentation.json +1 -1
  3. claude_mpm/agents/templates/engineer.json +1 -1
  4. claude_mpm/agents/templates/ops.json +1 -1
  5. claude_mpm/agents/templates/pm.json +1 -1
  6. claude_mpm/agents/templates/qa.json +1 -1
  7. claude_mpm/agents/templates/research.json +1 -1
  8. claude_mpm/agents/templates/security.json +1 -1
  9. claude_mpm/agents/templates/test_integration.json +112 -0
  10. claude_mpm/agents/templates/version_control.json +1 -1
  11. claude_mpm/cli/commands/memory.py +749 -26
  12. claude_mpm/cli/commands/run.py +115 -14
  13. claude_mpm/cli/parser.py +89 -1
  14. claude_mpm/constants.py +6 -0
  15. claude_mpm/core/claude_runner.py +74 -11
  16. claude_mpm/core/config.py +1 -1
  17. claude_mpm/core/session_manager.py +46 -0
  18. claude_mpm/core/simple_runner.py +74 -11
  19. claude_mpm/hooks/builtin/mpm_command_hook.py +5 -5
  20. claude_mpm/hooks/claude_hooks/hook_handler.py +213 -30
  21. claude_mpm/hooks/claude_hooks/hook_wrapper.sh +9 -2
  22. claude_mpm/hooks/memory_integration_hook.py +51 -5
  23. claude_mpm/services/__init__.py +23 -5
  24. claude_mpm/services/agent_memory_manager.py +800 -71
  25. claude_mpm/services/memory_builder.py +823 -0
  26. claude_mpm/services/memory_optimizer.py +619 -0
  27. claude_mpm/services/memory_router.py +445 -0
  28. claude_mpm/services/project_analyzer.py +771 -0
  29. claude_mpm/services/socketio_server.py +649 -45
  30. claude_mpm/services/version_control/git_operations.py +26 -0
  31. claude_mpm-3.4.0.dist-info/METADATA +183 -0
  32. {claude_mpm-3.3.0.dist-info → claude_mpm-3.4.0.dist-info}/RECORD +36 -52
  33. claude_mpm/agents/agent-template.yaml +0 -83
  34. claude_mpm/agents/templates/test-integration-agent.md +0 -34
  35. claude_mpm/agents/test_fix_deployment/.claude-pm/config/project.json +0 -6
  36. claude_mpm/cli/README.md +0 -109
  37. claude_mpm/cli_module/refactoring_guide.md +0 -253
  38. claude_mpm/core/agent_registry.py.bak +0 -312
  39. claude_mpm/core/base_service.py.bak +0 -406
  40. claude_mpm/core/websocket_handler.py +0 -233
  41. claude_mpm/hooks/README.md +0 -97
  42. claude_mpm/orchestration/SUBPROCESS_DESIGN.md +0 -66
  43. claude_mpm/schemas/README_SECURITY.md +0 -92
  44. claude_mpm/schemas/agent_schema.json +0 -395
  45. claude_mpm/schemas/agent_schema_documentation.md +0 -181
  46. claude_mpm/schemas/agent_schema_security_notes.md +0 -165
  47. claude_mpm/schemas/examples/standard_workflow.json +0 -505
  48. claude_mpm/schemas/ticket_workflow_documentation.md +0 -482
  49. claude_mpm/schemas/ticket_workflow_schema.json +0 -590
  50. claude_mpm/services/framework_claude_md_generator/README.md +0 -92
  51. claude_mpm/services/parent_directory_manager/README.md +0 -83
  52. claude_mpm/services/version_control/VERSION +0 -1
  53. claude_mpm/services/websocket_server.py +0 -376
  54. claude_mpm-3.3.0.dist-info/METADATA +0 -432
  55. {claude_mpm-3.3.0.dist-info → claude_mpm-3.4.0.dist-info}/WHEEL +0 -0
  56. {claude_mpm-3.3.0.dist-info → claude_mpm-3.4.0.dist-info}/entry_points.txt +0 -0
  57. {claude_mpm-3.3.0.dist-info → claude_mpm-3.4.0.dist-info}/licenses/LICENSE +0 -0
  58. {claude_mpm-3.3.0.dist-info → claude_mpm-3.4.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,619 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ Memory Optimizer Service
4
+ =======================
5
+
6
+ Optimizes agent memory files by removing duplicates, consolidating related items,
7
+ and reorganizing by priority/relevance.
8
+
9
+ This service provides:
10
+ - Duplicate detection and removal
11
+ - Related item consolidation
12
+ - Priority-based reorganization
13
+ - Per-agent optimization strategies
14
+ - Size optimization within limits
15
+
16
+ WHY: Agent memory files accumulate information over time and can become cluttered
17
+ with duplicates, outdated information, or poorly organized content. This service
18
+ maintains memory quality while preserving important learnings.
19
+
20
+ DESIGN DECISION: Uses conservative optimization strategies that preserve information
21
+ rather than aggressively removing content. Better to keep potentially useful
22
+ information than lose important insights.
23
+ """
24
+
25
+ import re
26
+ from pathlib import Path
27
+ from typing import Dict, List, Optional, Any, Set, Tuple
28
+ from datetime import datetime
29
+ from difflib import SequenceMatcher
30
+
31
+ from claude_mpm.core import LoggerMixin
32
+ from claude_mpm.core.config import Config
33
+ from claude_mpm.utils.paths import PathResolver
34
+
35
+
36
+ class MemoryOptimizer(LoggerMixin):
37
+ """Optimizes agent memory files through deduplication and reorganization.
38
+
39
+ WHY: Memory files need maintenance to stay useful. This service provides
40
+ automated cleanup while preserving valuable information and maintaining
41
+ the structured format agents expect.
42
+
43
+ DESIGN DECISION: Uses similarity thresholds and conservative merging to
44
+ avoid losing important nuances in learnings while removing clear duplicates.
45
+ """
46
+
47
+ # Similarity threshold for considering items duplicates
48
+ SIMILARITY_THRESHOLD = 0.85
49
+
50
+ # Minimum similarity for consolidation
51
+ CONSOLIDATION_THRESHOLD = 0.70
52
+
53
+ # Priority keywords for sorting (higher priority items kept/moved up)
54
+ PRIORITY_KEYWORDS = {
55
+ 'high': ['critical', 'important', 'essential', 'required', 'must', 'always', 'never'],
56
+ 'medium': ['should', 'recommended', 'prefer', 'avoid', 'consider'],
57
+ 'low': ['note', 'tip', 'hint', 'example', 'reference']
58
+ }
59
+
60
+ def __init__(self, config: Optional[Config] = None):
61
+ """Initialize the memory optimizer.
62
+
63
+ Args:
64
+ config: Optional Config object
65
+ """
66
+ super().__init__()
67
+ self.config = config or Config()
68
+ self.project_root = PathResolver.get_project_root()
69
+ self.memories_dir = self.project_root / ".claude-mpm" / "memories"
70
+
71
+ def optimize_agent_memory(self, agent_id: str) -> Dict[str, Any]:
72
+ """Optimize memory for a specific agent.
73
+
74
+ WHY: Individual agent memories can be optimized independently, allowing
75
+ for targeted cleanup of specific agents without affecting others.
76
+
77
+ Args:
78
+ agent_id: The agent identifier
79
+
80
+ Returns:
81
+ Dict containing optimization results and statistics
82
+ """
83
+ try:
84
+ memory_file = self.memories_dir / f"{agent_id}_agent.md"
85
+
86
+ if not memory_file.exists():
87
+ return {
88
+ "success": False,
89
+ "agent_id": agent_id,
90
+ "error": "Memory file not found"
91
+ }
92
+
93
+ # Load original content
94
+ original_content = memory_file.read_text(encoding='utf-8')
95
+ original_size = len(original_content)
96
+
97
+ # Parse memory structure
98
+ sections = self._parse_memory_sections(original_content)
99
+
100
+ # Optimize each section
101
+ optimized_sections = {}
102
+ optimization_stats = {
103
+ "duplicates_removed": 0,
104
+ "items_consolidated": 0,
105
+ "items_reordered": 0,
106
+ "sections_optimized": 0
107
+ }
108
+
109
+ for section_name, items in sections.items():
110
+ if section_name.lower() in ['header', 'metadata']:
111
+ # Preserve header sections as-is
112
+ optimized_sections[section_name] = items
113
+ continue
114
+
115
+ optimized_items, section_stats = self._optimize_section(items, agent_id)
116
+ optimized_sections[section_name] = optimized_items
117
+
118
+ # Aggregate stats
119
+ for key in optimization_stats:
120
+ if key in section_stats:
121
+ optimization_stats[key] += section_stats[key]
122
+
123
+ if section_stats.get('duplicates_removed', 0) > 0 or section_stats.get('items_consolidated', 0) > 0:
124
+ optimization_stats["sections_optimized"] += 1
125
+
126
+ # Rebuild memory content
127
+ optimized_content = self._rebuild_memory_content(optimized_sections, agent_id)
128
+ optimized_size = len(optimized_content)
129
+
130
+ # Create backup before saving
131
+ backup_path = self._create_backup(memory_file)
132
+
133
+ # Save optimized content
134
+ memory_file.write_text(optimized_content, encoding='utf-8')
135
+
136
+ result = {
137
+ "success": True,
138
+ "agent_id": agent_id,
139
+ "original_size": original_size,
140
+ "optimized_size": optimized_size,
141
+ "size_reduction": original_size - optimized_size,
142
+ "size_reduction_percent": round(((original_size - optimized_size) / original_size) * 100, 1) if original_size > 0 else 0,
143
+ "backup_created": str(backup_path),
144
+ "timestamp": datetime.now().isoformat(),
145
+ **optimization_stats
146
+ }
147
+
148
+ self.logger.info(f"Optimized memory for {agent_id}: {optimization_stats}")
149
+ return result
150
+
151
+ except Exception as e:
152
+ self.logger.error(f"Error optimizing memory for {agent_id}: {e}")
153
+ return {
154
+ "success": False,
155
+ "agent_id": agent_id,
156
+ "error": str(e)
157
+ }
158
+
159
+ def optimize_all_memories(self) -> Dict[str, Any]:
160
+ """Optimize all agent memory files.
161
+
162
+ WHY: Bulk optimization allows maintenance of the entire memory system
163
+ in one operation, providing comprehensive cleanup and consistency.
164
+
165
+ Returns:
166
+ Dict containing results for all agents
167
+ """
168
+ try:
169
+ if not self.memories_dir.exists():
170
+ return {
171
+ "success": False,
172
+ "error": "Memory directory not found"
173
+ }
174
+
175
+ memory_files = list(self.memories_dir.glob("*_agent.md"))
176
+ results = {}
177
+
178
+ total_stats = {
179
+ "agents_processed": 0,
180
+ "agents_optimized": 0,
181
+ "total_size_before": 0,
182
+ "total_size_after": 0,
183
+ "total_duplicates_removed": 0,
184
+ "total_items_consolidated": 0
185
+ }
186
+
187
+ for memory_file in memory_files:
188
+ agent_id = memory_file.stem.replace('_agent', '')
189
+ result = self.optimize_agent_memory(agent_id)
190
+ results[agent_id] = result
191
+
192
+ total_stats["agents_processed"] += 1
193
+
194
+ if result.get("success"):
195
+ total_stats["agents_optimized"] += 1
196
+ total_stats["total_size_before"] += result.get("original_size", 0)
197
+ total_stats["total_size_after"] += result.get("optimized_size", 0)
198
+ total_stats["total_duplicates_removed"] += result.get("duplicates_removed", 0)
199
+ total_stats["total_items_consolidated"] += result.get("items_consolidated", 0)
200
+
201
+ # Calculate overall statistics
202
+ total_reduction = total_stats["total_size_before"] - total_stats["total_size_after"]
203
+ total_reduction_percent = round(
204
+ (total_reduction / total_stats["total_size_before"]) * 100, 1
205
+ ) if total_stats["total_size_before"] > 0 else 0
206
+
207
+ return {
208
+ "success": True,
209
+ "timestamp": datetime.now().isoformat(),
210
+ "agents": results,
211
+ "summary": {
212
+ **total_stats,
213
+ "total_size_reduction": total_reduction,
214
+ "total_size_reduction_percent": total_reduction_percent
215
+ }
216
+ }
217
+
218
+ except Exception as e:
219
+ self.logger.error(f"Error optimizing all memories: {e}")
220
+ return {
221
+ "success": False,
222
+ "error": str(e)
223
+ }
224
+
225
+ def analyze_optimization_opportunities(self, agent_id: Optional[str] = None) -> Dict[str, Any]:
226
+ """Analyze potential optimization opportunities without making changes.
227
+
228
+ WHY: Users may want to understand what optimizations would be performed
229
+ before actually running them, allowing for informed decisions.
230
+
231
+ Args:
232
+ agent_id: Optional specific agent to analyze
233
+
234
+ Returns:
235
+ Dict containing analysis results
236
+ """
237
+ try:
238
+ if agent_id:
239
+ return self._analyze_single_agent(agent_id)
240
+ else:
241
+ return self._analyze_all_agents()
242
+
243
+ except Exception as e:
244
+ self.logger.error(f"Error analyzing optimization opportunities: {e}")
245
+ return {"success": False, "error": str(e)}
246
+
247
+ def _parse_memory_sections(self, content: str) -> Dict[str, List[str]]:
248
+ """Parse memory content into sections and items.
249
+
250
+ Args:
251
+ content: Memory file content
252
+
253
+ Returns:
254
+ Dict mapping section names to lists of items
255
+ """
256
+ lines = content.split('\n')
257
+ sections = {}
258
+ current_section = 'header'
259
+ current_items = []
260
+
261
+ for line in lines:
262
+ if line.startswith('## '):
263
+ # Save previous section
264
+ if current_section:
265
+ sections[current_section] = current_items
266
+
267
+ # Start new section
268
+ section_name = line[3:].split('(')[0].strip()
269
+ current_section = section_name
270
+ current_items = [line] # Include the header
271
+
272
+ else:
273
+ current_items.append(line)
274
+
275
+ # Save last section
276
+ if current_section:
277
+ sections[current_section] = current_items
278
+
279
+ return sections
280
+
281
+ def _optimize_section(self, items: List[str], agent_id: str) -> Tuple[List[str], Dict[str, int]]:
282
+ """Optimize a single section by removing duplicates and consolidating.
283
+
284
+ Args:
285
+ items: List of section content lines
286
+ agent_id: Agent identifier for context
287
+
288
+ Returns:
289
+ Tuple of (optimized_items, stats)
290
+ """
291
+ stats = {
292
+ "duplicates_removed": 0,
293
+ "items_consolidated": 0,
294
+ "items_reordered": 0
295
+ }
296
+
297
+ # Separate header and bullet points
298
+ header_lines = []
299
+ bullet_points = []
300
+ other_lines = []
301
+
302
+ for line in items:
303
+ stripped = line.strip()
304
+ if stripped.startswith('- '):
305
+ bullet_points.append(line)
306
+ elif stripped.startswith('## ') or stripped.startswith('<!--'):
307
+ header_lines.append(line)
308
+ else:
309
+ other_lines.append(line)
310
+
311
+ if not bullet_points:
312
+ return items, stats
313
+
314
+ # Remove duplicates
315
+ deduplicated_points, duplicates_removed = self._remove_duplicates(bullet_points)
316
+ stats["duplicates_removed"] = duplicates_removed
317
+
318
+ # Consolidate similar items
319
+ consolidated_points, items_consolidated = self._consolidate_similar_items(deduplicated_points)
320
+ stats["items_consolidated"] = items_consolidated
321
+
322
+ # Reorder by priority
323
+ reordered_points = self._reorder_by_priority(consolidated_points)
324
+ if reordered_points != consolidated_points:
325
+ stats["items_reordered"] = 1
326
+
327
+ # Rebuild section
328
+ optimized_items = header_lines + other_lines + reordered_points
329
+
330
+ return optimized_items, stats
331
+
332
+ def _remove_duplicates(self, bullet_points: List[str]) -> Tuple[List[str], int]:
333
+ """Remove duplicate bullet points.
334
+
335
+ Args:
336
+ bullet_points: List of bullet point lines
337
+
338
+ Returns:
339
+ Tuple of (deduplicated_points, count_removed)
340
+ """
341
+ seen_content = set()
342
+ unique_points = []
343
+ duplicates_removed = 0
344
+
345
+ for point in bullet_points:
346
+ # Normalize content for comparison
347
+ content = point.strip().lower().replace('- ', '')
348
+ content_normalized = re.sub(r'\s+', ' ', content).strip()
349
+
350
+ if content_normalized not in seen_content:
351
+ seen_content.add(content_normalized)
352
+ unique_points.append(point)
353
+ else:
354
+ duplicates_removed += 1
355
+ self.logger.debug(f"Removed duplicate: {point.strip()[:50]}...")
356
+
357
+ return unique_points, duplicates_removed
358
+
359
+ def _consolidate_similar_items(self, bullet_points: List[str]) -> Tuple[List[str], int]:
360
+ """Consolidate similar bullet points.
361
+
362
+ Args:
363
+ bullet_points: List of bullet point lines
364
+
365
+ Returns:
366
+ Tuple of (consolidated_points, count_consolidated)
367
+ """
368
+ if len(bullet_points) < 2:
369
+ return bullet_points, 0
370
+
371
+ consolidated = []
372
+ items_consolidated = 0
373
+ used_indices = set()
374
+
375
+ for i, point_a in enumerate(bullet_points):
376
+ if i in used_indices:
377
+ continue
378
+
379
+ content_a = point_a.strip().replace('- ', '')
380
+ similar_items = [point_a]
381
+ similar_indices = {i}
382
+
383
+ # Find similar items
384
+ for j, point_b in enumerate(bullet_points[i+1:], i+1):
385
+ if j in used_indices:
386
+ continue
387
+
388
+ content_b = point_b.strip().replace('- ', '')
389
+ similarity = SequenceMatcher(None, content_a.lower(), content_b.lower()).ratio()
390
+
391
+ if similarity >= self.CONSOLIDATION_THRESHOLD:
392
+ similar_items.append(point_b)
393
+ similar_indices.add(j)
394
+
395
+ # Consolidate if we found similar items
396
+ if len(similar_items) > 1:
397
+ consolidated_content = self._merge_similar_items(similar_items)
398
+ consolidated.append(f"- {consolidated_content}")
399
+ items_consolidated += len(similar_items) - 1
400
+ self.logger.debug(f"Consolidated {len(similar_items)} similar items")
401
+ else:
402
+ consolidated.append(point_a)
403
+
404
+ used_indices.update(similar_indices)
405
+
406
+ return consolidated, items_consolidated
407
+
408
+ def _merge_similar_items(self, similar_items: List[str]) -> str:
409
+ """Merge similar items into a single consolidated item.
410
+
411
+ Args:
412
+ similar_items: List of similar bullet points
413
+
414
+ Returns:
415
+ Consolidated content string
416
+ """
417
+ # Take the longest/most detailed item as base
418
+ contents = [item.strip().replace('- ', '') for item in similar_items]
419
+ base_content = max(contents, key=len)
420
+
421
+ # Look for additional details in other items
422
+ all_words = set()
423
+ for content in contents:
424
+ all_words.update(content.lower().split())
425
+
426
+ base_words = set(base_content.lower().split())
427
+ additional_words = all_words - base_words
428
+
429
+ # If there are meaningful additional words, add them
430
+ if additional_words and len(additional_words) < 5: # Don't add too much
431
+ additional_text = " (" + ", ".join(sorted(additional_words)) + ")"
432
+ return base_content + additional_text
433
+
434
+ return base_content
435
+
436
+ def _reorder_by_priority(self, bullet_points: List[str]) -> List[str]:
437
+ """Reorder bullet points by priority/importance.
438
+
439
+ Args:
440
+ bullet_points: List of bullet point lines
441
+
442
+ Returns:
443
+ Reordered list of bullet points
444
+ """
445
+ def get_priority_score(point: str) -> int:
446
+ content = point.lower()
447
+ score = 0
448
+
449
+ # High priority keywords
450
+ for keyword in self.PRIORITY_KEYWORDS['high']:
451
+ if keyword in content:
452
+ score += 3
453
+
454
+ # Medium priority keywords
455
+ for keyword in self.PRIORITY_KEYWORDS['medium']:
456
+ if keyword in content:
457
+ score += 2
458
+
459
+ # Low priority keywords
460
+ for keyword in self.PRIORITY_KEYWORDS['low']:
461
+ if keyword in content:
462
+ score += 1
463
+
464
+ # Length-based priority (more detailed items are often more important)
465
+ if len(content) > 100:
466
+ score += 1
467
+
468
+ return score
469
+
470
+ # Sort by priority score (descending) then alphabetically
471
+ return sorted(bullet_points, key=lambda x: (-get_priority_score(x), x.lower()))
472
+
473
+ def _rebuild_memory_content(self, sections: Dict[str, List[str]], agent_id: str) -> str:
474
+ """Rebuild memory content from optimized sections.
475
+
476
+ Args:
477
+ sections: Dict of section names to content lines
478
+ agent_id: Agent identifier
479
+
480
+ Returns:
481
+ Rebuilt memory content string
482
+ """
483
+ content_lines = []
484
+
485
+ # Add header if it exists
486
+ if 'header' in sections:
487
+ content_lines.extend(sections['header'])
488
+
489
+ # Add sections in a logical order
490
+ section_order = [
491
+ 'Project Architecture',
492
+ 'Coding Patterns Learned',
493
+ 'Implementation Guidelines',
494
+ 'Domain-Specific Knowledge',
495
+ 'Effective Strategies',
496
+ 'Common Mistakes to Avoid',
497
+ 'Integration Points',
498
+ 'Performance Considerations',
499
+ 'Current Technical Context',
500
+ 'Recent Learnings'
501
+ ]
502
+
503
+ # Add ordered sections
504
+ for section_name in section_order:
505
+ if section_name in sections and section_name != 'header':
506
+ if content_lines and not content_lines[-1].strip() == '':
507
+ content_lines.append('') # Add spacing
508
+ content_lines.extend(sections[section_name])
509
+
510
+ # Add any remaining sections not in the order
511
+ for section_name, section_content in sections.items():
512
+ if section_name not in section_order and section_name != 'header':
513
+ if content_lines and not content_lines[-1].strip() == '':
514
+ content_lines.append('')
515
+ content_lines.extend(section_content)
516
+
517
+ # Update timestamp
518
+ content = '\n'.join(content_lines)
519
+ timestamp = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
520
+ content = re.sub(
521
+ r'<!-- Last Updated: .+ \| Auto-updated by: .+ -->',
522
+ f'<!-- Last Updated: {timestamp} | Auto-updated by: optimizer -->',
523
+ content
524
+ )
525
+
526
+ return content
527
+
528
+ def _create_backup(self, memory_file: Path) -> Path:
529
+ """Create backup of memory file before optimization.
530
+
531
+ Args:
532
+ memory_file: Path to memory file
533
+
534
+ Returns:
535
+ Path to backup file
536
+ """
537
+ timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
538
+ backup_name = f"{memory_file.stem}_backup_{timestamp}{memory_file.suffix}"
539
+ backup_path = memory_file.parent / backup_name
540
+
541
+ backup_path.write_text(memory_file.read_text(encoding='utf-8'), encoding='utf-8')
542
+ self.logger.debug(f"Created backup: {backup_path}")
543
+
544
+ return backup_path
545
+
546
+ def _analyze_single_agent(self, agent_id: str) -> Dict[str, Any]:
547
+ """Analyze optimization opportunities for a single agent.
548
+
549
+ Args:
550
+ agent_id: Agent identifier
551
+
552
+ Returns:
553
+ Analysis results
554
+ """
555
+ memory_file = self.memories_dir / f"{agent_id}_agent.md"
556
+
557
+ if not memory_file.exists():
558
+ return {
559
+ "success": False,
560
+ "agent_id": agent_id,
561
+ "error": "Memory file not found"
562
+ }
563
+
564
+ content = memory_file.read_text(encoding='utf-8')
565
+ sections = self._parse_memory_sections(content)
566
+
567
+ analysis = {
568
+ "success": True,
569
+ "agent_id": agent_id,
570
+ "file_size": len(content),
571
+ "sections": len([s for s in sections if not s.lower() in ['header', 'metadata']]),
572
+ "opportunities": []
573
+ }
574
+
575
+ # Analyze each section for opportunities
576
+ for section_name, items in sections.items():
577
+ if section_name.lower() in ['header', 'metadata']:
578
+ continue
579
+
580
+ bullet_points = [line for line in items if line.strip().startswith('- ')]
581
+
582
+ if len(bullet_points) > 1:
583
+ # Check for duplicates
584
+ unique_points, duplicates = self._remove_duplicates(bullet_points)
585
+ if duplicates > 0:
586
+ analysis["opportunities"].append(f"{section_name}: {duplicates} duplicate items")
587
+
588
+ # Check for similar items
589
+ consolidated, consolidated_count = self._consolidate_similar_items(unique_points)
590
+ if consolidated_count > 0:
591
+ analysis["opportunities"].append(f"{section_name}: {consolidated_count} items can be consolidated")
592
+
593
+ return analysis
594
+
595
+ def _analyze_all_agents(self) -> Dict[str, Any]:
596
+ """Analyze optimization opportunities for all agents.
597
+
598
+ Returns:
599
+ Analysis results for all agents
600
+ """
601
+ if not self.memories_dir.exists():
602
+ return {
603
+ "success": False,
604
+ "error": "Memory directory not found"
605
+ }
606
+
607
+ memory_files = list(self.memories_dir.glob("*_agent.md"))
608
+ agents_analysis = {}
609
+
610
+ for memory_file in memory_files:
611
+ agent_id = memory_file.stem.replace('_agent', '')
612
+ agents_analysis[agent_id] = self._analyze_single_agent(agent_id)
613
+
614
+ return {
615
+ "success": True,
616
+ "timestamp": datetime.now().isoformat(),
617
+ "agents_analyzed": len(agents_analysis),
618
+ "agents": agents_analysis
619
+ }