claude-mpm 4.3.20__py3-none-any.whl → 4.4.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (96) hide show
  1. claude_mpm/VERSION +1 -1
  2. claude_mpm/agents/agent_loader.py +2 -2
  3. claude_mpm/agents/agent_loader_integration.py +2 -2
  4. claude_mpm/agents/async_agent_loader.py +2 -2
  5. claude_mpm/agents/base_agent_loader.py +2 -2
  6. claude_mpm/agents/frontmatter_validator.py +2 -2
  7. claude_mpm/agents/system_agent_config.py +2 -2
  8. claude_mpm/agents/templates/data_engineer.json +1 -2
  9. claude_mpm/cli/commands/doctor.py +2 -2
  10. claude_mpm/cli/commands/mpm_init.py +560 -47
  11. claude_mpm/cli/commands/mpm_init_handler.py +6 -0
  12. claude_mpm/cli/parsers/mpm_init_parser.py +39 -1
  13. claude_mpm/cli/startup_logging.py +11 -9
  14. claude_mpm/commands/mpm-init.md +76 -12
  15. claude_mpm/config/agent_config.py +2 -2
  16. claude_mpm/config/paths.py +2 -2
  17. claude_mpm/core/agent_name_normalizer.py +2 -2
  18. claude_mpm/core/config.py +2 -1
  19. claude_mpm/core/config_aliases.py +2 -2
  20. claude_mpm/core/file_utils.py +1 -0
  21. claude_mpm/core/log_manager.py +2 -2
  22. claude_mpm/core/tool_access_control.py +2 -2
  23. claude_mpm/core/unified_agent_registry.py +2 -2
  24. claude_mpm/core/unified_paths.py +2 -2
  25. claude_mpm/experimental/cli_enhancements.py +3 -2
  26. claude_mpm/hooks/base_hook.py +2 -2
  27. claude_mpm/hooks/instruction_reinforcement.py +2 -2
  28. claude_mpm/hooks/memory_integration_hook.py +1 -1
  29. claude_mpm/hooks/validation_hooks.py +2 -2
  30. claude_mpm/scripts/mpm_doctor.py +2 -2
  31. claude_mpm/services/agents/loading/agent_profile_loader.py +2 -2
  32. claude_mpm/services/agents/loading/base_agent_manager.py +2 -2
  33. claude_mpm/services/agents/loading/framework_agent_loader.py +2 -2
  34. claude_mpm/services/agents/management/agent_capabilities_generator.py +2 -2
  35. claude_mpm/services/agents/management/agent_management_service.py +2 -2
  36. claude_mpm/services/agents/memory/content_manager.py +5 -2
  37. claude_mpm/services/agents/memory/memory_categorization_service.py +5 -2
  38. claude_mpm/services/agents/memory/memory_file_service.py +28 -6
  39. claude_mpm/services/agents/memory/memory_format_service.py +5 -2
  40. claude_mpm/services/agents/memory/memory_limits_service.py +4 -2
  41. claude_mpm/services/agents/registry/deployed_agent_discovery.py +2 -2
  42. claude_mpm/services/agents/registry/modification_tracker.py +4 -4
  43. claude_mpm/services/async_session_logger.py +2 -1
  44. claude_mpm/services/claude_session_logger.py +2 -2
  45. claude_mpm/services/core/path_resolver.py +3 -2
  46. claude_mpm/services/diagnostics/diagnostic_runner.py +4 -3
  47. claude_mpm/services/event_bus/direct_relay.py +2 -1
  48. claude_mpm/services/event_bus/event_bus.py +2 -1
  49. claude_mpm/services/event_bus/relay.py +2 -2
  50. claude_mpm/services/framework_claude_md_generator/content_assembler.py +2 -2
  51. claude_mpm/services/infrastructure/daemon_manager.py +2 -2
  52. claude_mpm/services/memory/cache/simple_cache.py +2 -2
  53. claude_mpm/services/project/archive_manager.py +981 -0
  54. claude_mpm/services/project/documentation_manager.py +536 -0
  55. claude_mpm/services/project/enhanced_analyzer.py +491 -0
  56. claude_mpm/services/project/project_organizer.py +904 -0
  57. claude_mpm/services/response_tracker.py +2 -2
  58. claude_mpm/services/socketio/handlers/connection.py +14 -33
  59. claude_mpm/services/socketio/server/eventbus_integration.py +2 -2
  60. claude_mpm/services/unified/__init__.py +65 -0
  61. claude_mpm/services/unified/analyzer_strategies/__init__.py +44 -0
  62. claude_mpm/services/unified/analyzer_strategies/code_analyzer.py +473 -0
  63. claude_mpm/services/unified/analyzer_strategies/dependency_analyzer.py +643 -0
  64. claude_mpm/services/unified/analyzer_strategies/performance_analyzer.py +804 -0
  65. claude_mpm/services/unified/analyzer_strategies/security_analyzer.py +661 -0
  66. claude_mpm/services/unified/analyzer_strategies/structure_analyzer.py +696 -0
  67. claude_mpm/services/unified/deployment_strategies/__init__.py +97 -0
  68. claude_mpm/services/unified/deployment_strategies/base.py +557 -0
  69. claude_mpm/services/unified/deployment_strategies/cloud_strategies.py +486 -0
  70. claude_mpm/services/unified/deployment_strategies/local.py +594 -0
  71. claude_mpm/services/unified/deployment_strategies/utils.py +672 -0
  72. claude_mpm/services/unified/deployment_strategies/vercel.py +471 -0
  73. claude_mpm/services/unified/interfaces.py +499 -0
  74. claude_mpm/services/unified/migration.py +532 -0
  75. claude_mpm/services/unified/strategies.py +551 -0
  76. claude_mpm/services/unified/unified_analyzer.py +534 -0
  77. claude_mpm/services/unified/unified_config.py +688 -0
  78. claude_mpm/services/unified/unified_deployment.py +470 -0
  79. claude_mpm/services/version_control/version_parser.py +5 -4
  80. claude_mpm/storage/state_storage.py +2 -2
  81. claude_mpm/utils/agent_dependency_loader.py +49 -0
  82. claude_mpm/utils/common.py +542 -0
  83. claude_mpm/utils/database_connector.py +298 -0
  84. claude_mpm/utils/error_handler.py +2 -1
  85. claude_mpm/utils/log_cleanup.py +2 -2
  86. claude_mpm/utils/path_operations.py +2 -2
  87. claude_mpm/utils/robust_installer.py +56 -0
  88. claude_mpm/utils/session_logging.py +2 -2
  89. claude_mpm/utils/subprocess_utils.py +2 -2
  90. claude_mpm/validation/agent_validator.py +2 -2
  91. {claude_mpm-4.3.20.dist-info → claude_mpm-4.4.0.dist-info}/METADATA +1 -1
  92. {claude_mpm-4.3.20.dist-info → claude_mpm-4.4.0.dist-info}/RECORD +96 -71
  93. {claude_mpm-4.3.20.dist-info → claude_mpm-4.4.0.dist-info}/WHEEL +0 -0
  94. {claude_mpm-4.3.20.dist-info → claude_mpm-4.4.0.dist-info}/entry_points.txt +0 -0
  95. {claude_mpm-4.3.20.dist-info → claude_mpm-4.4.0.dist-info}/licenses/LICENSE +0 -0
  96. {claude_mpm-4.3.20.dist-info → claude_mpm-4.4.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,534 @@
1
+ """
2
+ Unified Analyzer Service Implementation
3
+ =======================================
4
+
5
+ This module implements the unified analyzer service that consolidates all
6
+ analysis-related services using the strategy pattern. It replaces multiple
7
+ specialized analyzer services with a single, extensible service.
8
+
9
+ Consolidates:
10
+ - CodeAnalyzer
11
+ - ComplexityAnalyzer
12
+ - DependencyAnalyzer
13
+ - PerformanceAnalyzer
14
+ - SecurityAnalyzer
15
+ - And other analysis-related services
16
+
17
+ Features:
18
+ - Strategy-based analysis for different target types
19
+ - Batch analysis operations
20
+ - Comparative analysis
21
+ - Metrics extraction and aggregation
22
+ - Recommendation generation
23
+ """
24
+
25
+ import asyncio
26
+ from pathlib import Path
27
+ from typing import Any, Dict, List, Optional, Union
28
+
29
+ from claude_mpm.core.logging_utils import get_logger
30
+
31
+ from .interfaces import (
32
+ AnalysisResult,
33
+ IAnalyzerService,
34
+ IUnifiedService,
35
+ ServiceCapability,
36
+ ServiceMetadata,
37
+ )
38
+ from .strategies import AnalyzerStrategy, StrategyContext, get_strategy_registry
39
+
40
+
41
+ class UnifiedAnalyzer(IAnalyzerService, IUnifiedService):
42
+ """
43
+ Unified analyzer service using strategy pattern.
44
+
45
+ This service consolidates all analysis operations through a
46
+ pluggable strategy system, enabling consistent analysis interfaces
47
+ across different target types.
48
+ """
49
+
50
+ def __init__(self):
51
+ """Initialize unified analyzer service."""
52
+ self._logger = get_logger(f"{__name__}.UnifiedAnalyzer")
53
+ self._registry = get_strategy_registry()
54
+ self._analysis_cache: Dict[str, AnalysisResult] = {}
55
+ self._metrics = {
56
+ "total_analyses": 0,
57
+ "cached_hits": 0,
58
+ "analysis_errors": 0,
59
+ "batch_operations": 0,
60
+ }
61
+ self._initialized = False
62
+
63
+ def get_metadata(self) -> ServiceMetadata:
64
+ """
65
+ Get service metadata.
66
+
67
+ Returns:
68
+ ServiceMetadata: Service metadata
69
+ """
70
+ return ServiceMetadata(
71
+ name="UnifiedAnalyzer",
72
+ version="1.0.0",
73
+ capabilities={
74
+ ServiceCapability.ASYNC_OPERATIONS,
75
+ ServiceCapability.BATCH_PROCESSING,
76
+ ServiceCapability.CACHING,
77
+ ServiceCapability.VALIDATION,
78
+ ServiceCapability.METRICS,
79
+ ServiceCapability.HEALTH_CHECK,
80
+ },
81
+ dependencies=["StrategyRegistry", "LoggingService"],
82
+ description="Unified service for all analysis operations",
83
+ tags={"analysis", "unified", "strategy-pattern"},
84
+ deprecated_services=[
85
+ "CodeAnalyzer",
86
+ "ComplexityAnalyzer",
87
+ "DependencyAnalyzer",
88
+ "PerformanceAnalyzer",
89
+ "SecurityAnalyzer",
90
+ ],
91
+ )
92
+
93
+ async def initialize(self) -> bool:
94
+ """
95
+ Initialize the service.
96
+
97
+ Returns:
98
+ bool: True if initialization successful
99
+ """
100
+ try:
101
+ self._logger.info("Initializing UnifiedAnalyzer")
102
+
103
+ # Register default strategies
104
+ self._register_default_strategies()
105
+
106
+ # Initialize analysis cache
107
+ self._analysis_cache.clear()
108
+
109
+ self._initialized = True
110
+ self._logger.info("UnifiedAnalyzer initialized successfully")
111
+ return True
112
+
113
+ except Exception as e:
114
+ self._logger.error(f"Failed to initialize: {str(e)}")
115
+ return False
116
+
117
+ async def shutdown(self) -> None:
118
+ """Gracefully shutdown the service."""
119
+ self._logger.info("Shutting down UnifiedAnalyzer")
120
+
121
+ # Clear analysis cache
122
+ self._analysis_cache.clear()
123
+
124
+ self._initialized = False
125
+ self._logger.info("UnifiedAnalyzer shutdown complete")
126
+
127
+ def health_check(self) -> Dict[str, Any]:
128
+ """
129
+ Perform health check.
130
+
131
+ Returns:
132
+ Dict[str, Any]: Health status
133
+ """
134
+ strategies = self._registry.list_strategies(AnalyzerStrategy)
135
+
136
+ return {
137
+ "service": "UnifiedAnalyzer",
138
+ "status": "healthy" if self._initialized else "unhealthy",
139
+ "initialized": self._initialized,
140
+ "registered_strategies": len(strategies),
141
+ "cache_size": len(self._analysis_cache),
142
+ "metrics": self.get_metrics(),
143
+ }
144
+
145
+ def get_metrics(self) -> Dict[str, Any]:
146
+ """
147
+ Get service metrics.
148
+
149
+ Returns:
150
+ Dict[str, Any]: Service metrics
151
+ """
152
+ cache_hit_rate = 0.0
153
+ if self._metrics["total_analyses"] > 0:
154
+ cache_hit_rate = (
155
+ self._metrics["cached_hits"] / self._metrics["total_analyses"]
156
+ ) * 100
157
+
158
+ error_rate = 0.0
159
+ if self._metrics["total_analyses"] > 0:
160
+ error_rate = (
161
+ self._metrics["analysis_errors"] / self._metrics["total_analyses"]
162
+ ) * 100
163
+
164
+ return {
165
+ **self._metrics,
166
+ "cache_hit_rate": cache_hit_rate,
167
+ "error_rate": error_rate,
168
+ "cache_entries": len(self._analysis_cache),
169
+ }
170
+
171
+ def reset(self) -> None:
172
+ """Reset service to initial state."""
173
+ self._logger.info("Resetting UnifiedAnalyzer")
174
+ self._analysis_cache.clear()
175
+ self._metrics = {
176
+ "total_analyses": 0,
177
+ "cached_hits": 0,
178
+ "analysis_errors": 0,
179
+ "batch_operations": 0,
180
+ }
181
+
182
+ def analyze(
183
+ self,
184
+ target: Union[str, Path, Any],
185
+ options: Optional[Dict[str, Any]] = None,
186
+ ) -> AnalysisResult:
187
+ """
188
+ Perform analysis on target.
189
+
190
+ Args:
191
+ target: Target to analyze
192
+ options: Analysis options
193
+
194
+ Returns:
195
+ AnalysisResult: Analysis result
196
+ """
197
+ options = options or {}
198
+ self._metrics["total_analyses"] += 1
199
+
200
+ try:
201
+ # Check cache
202
+ cache_key = self._generate_cache_key(target, options)
203
+ if cache_key in self._analysis_cache and not options.get(
204
+ "force_refresh", False
205
+ ):
206
+ self._metrics["cached_hits"] += 1
207
+ self._logger.debug(f"Returning cached analysis for {target}")
208
+ return self._analysis_cache[cache_key]
209
+
210
+ # Determine analysis type
211
+ analysis_type = self._determine_analysis_type(target, options)
212
+
213
+ # Select analysis strategy
214
+ context = StrategyContext(
215
+ target_type=analysis_type,
216
+ operation="analyze",
217
+ parameters={"target": target, "options": options},
218
+ )
219
+
220
+ strategy = self._registry.select_strategy(AnalyzerStrategy, context)
221
+
222
+ if not strategy:
223
+ self._metrics["analysis_errors"] += 1
224
+ return AnalysisResult(
225
+ success=False,
226
+ summary=f"No strategy available for analysis type: {analysis_type}",
227
+ severity="error",
228
+ )
229
+
230
+ # Execute analysis using strategy
231
+ self._logger.info(
232
+ f"Analyzing {target} using {strategy.metadata.name}"
233
+ )
234
+
235
+ # Validate input
236
+ validation_errors = strategy.validate_input(target)
237
+ if validation_errors:
238
+ self._metrics["analysis_errors"] += 1
239
+ return AnalysisResult(
240
+ success=False,
241
+ summary=f"Validation failed: {'; '.join(validation_errors)}",
242
+ severity="error",
243
+ )
244
+
245
+ # Perform analysis
246
+ result_data = strategy.analyze(target, options)
247
+
248
+ # Extract metrics
249
+ metrics = strategy.extract_metrics(result_data)
250
+
251
+ # Create analysis result
252
+ result = AnalysisResult(
253
+ success=True,
254
+ findings=result_data.get("findings", []),
255
+ metrics=metrics,
256
+ summary=result_data.get("summary", "Analysis completed"),
257
+ severity=result_data.get("severity", "info"),
258
+ recommendations=result_data.get("recommendations", []),
259
+ )
260
+
261
+ # Cache result
262
+ self._analysis_cache[cache_key] = result
263
+
264
+ return result
265
+
266
+ except Exception as e:
267
+ self._logger.error(f"Analysis error: {str(e)}")
268
+ self._metrics["analysis_errors"] += 1
269
+ return AnalysisResult(
270
+ success=False,
271
+ summary=f"Analysis failed: {str(e)}",
272
+ severity="error",
273
+ )
274
+
275
+ def batch_analyze(
276
+ self,
277
+ targets: List[Union[str, Path, Any]],
278
+ options: Optional[Dict[str, Any]] = None,
279
+ ) -> List[AnalysisResult]:
280
+ """
281
+ Perform batch analysis.
282
+
283
+ Args:
284
+ targets: List of targets
285
+ options: Analysis options
286
+
287
+ Returns:
288
+ List[AnalysisResult]: Results for each target
289
+ """
290
+ self._metrics["batch_operations"] += 1
291
+ results = []
292
+
293
+ self._logger.info(f"Starting batch analysis of {len(targets)} targets")
294
+
295
+ for target in targets:
296
+ result = self.analyze(target, options)
297
+ results.append(result)
298
+
299
+ # Aggregate metrics across all results
300
+ self._aggregate_batch_metrics(results)
301
+
302
+ return results
303
+
304
+ def get_metrics(self, target: Union[str, Path, Any]) -> Dict[str, Any]:
305
+ """
306
+ Get analysis metrics for target.
307
+
308
+ Args:
309
+ target: Target to get metrics for
310
+
311
+ Returns:
312
+ Dict[str, Any]: Analysis metrics
313
+ """
314
+ # Check if we have cached analysis
315
+ cache_key = self._generate_cache_key(target, {})
316
+ if cache_key in self._analysis_cache:
317
+ return self._analysis_cache[cache_key].metrics
318
+
319
+ # Perform fresh analysis to get metrics
320
+ result = self.analyze(target, {"metrics_only": True})
321
+ return result.metrics if result.success else {}
322
+
323
+ def compare(
324
+ self,
325
+ target1: Union[str, Path, Any],
326
+ target2: Union[str, Path, Any],
327
+ options: Optional[Dict[str, Any]] = None,
328
+ ) -> Dict[str, Any]:
329
+ """
330
+ Compare two targets.
331
+
332
+ Args:
333
+ target1: First target
334
+ target2: Second target
335
+ options: Comparison options
336
+
337
+ Returns:
338
+ Dict[str, Any]: Comparison results
339
+ """
340
+ options = options or {}
341
+
342
+ try:
343
+ # Analyze both targets
344
+ result1 = self.analyze(target1, options)
345
+ result2 = self.analyze(target2, options)
346
+
347
+ if not result1.success or not result2.success:
348
+ return {
349
+ "success": False,
350
+ "error": "Failed to analyze one or both targets",
351
+ }
352
+
353
+ # Compare metrics
354
+ metric_diff = self._compare_metrics(result1.metrics, result2.metrics)
355
+
356
+ # Compare findings
357
+ finding_diff = self._compare_findings(result1.findings, result2.findings)
358
+
359
+ return {
360
+ "success": True,
361
+ "target1": str(target1),
362
+ "target2": str(target2),
363
+ "metric_differences": metric_diff,
364
+ "finding_differences": finding_diff,
365
+ "severity_comparison": {
366
+ "target1": result1.severity,
367
+ "target2": result2.severity,
368
+ },
369
+ "recommendation_diff": {
370
+ "unique_to_target1": list(
371
+ set(result1.recommendations) - set(result2.recommendations)
372
+ ),
373
+ "unique_to_target2": list(
374
+ set(result2.recommendations) - set(result1.recommendations)
375
+ ),
376
+ "common": list(
377
+ set(result1.recommendations) & set(result2.recommendations)
378
+ ),
379
+ },
380
+ }
381
+
382
+ except Exception as e:
383
+ self._logger.error(f"Comparison error: {str(e)}")
384
+ return {"success": False, "error": str(e)}
385
+
386
+ def get_recommendations(
387
+ self, analysis_result: AnalysisResult
388
+ ) -> List[Dict[str, Any]]:
389
+ """
390
+ Get recommendations from analysis.
391
+
392
+ Args:
393
+ analysis_result: Analysis result
394
+
395
+ Returns:
396
+ List[Dict[str, Any]]: Recommendations
397
+ """
398
+ recommendations = []
399
+
400
+ # Basic recommendations from result
401
+ for rec in analysis_result.recommendations:
402
+ recommendations.append({
403
+ "type": "general",
404
+ "description": rec,
405
+ "priority": "medium",
406
+ })
407
+
408
+ # Add severity-based recommendations
409
+ if analysis_result.severity == "critical":
410
+ recommendations.insert(0, {
411
+ "type": "urgent",
412
+ "description": "Critical issues found - immediate attention required",
413
+ "priority": "high",
414
+ })
415
+ elif analysis_result.severity == "error":
416
+ recommendations.insert(0, {
417
+ "type": "important",
418
+ "description": "Errors found - should be addressed soon",
419
+ "priority": "high",
420
+ })
421
+
422
+ # Add metric-based recommendations
423
+ if analysis_result.metrics:
424
+ metric_recs = self._generate_metric_recommendations(
425
+ analysis_result.metrics
426
+ )
427
+ recommendations.extend(metric_recs)
428
+
429
+ return recommendations
430
+
431
+ # Private helper methods
432
+
433
+ def _register_default_strategies(self) -> None:
434
+ """Register default analyzer strategies."""
435
+ # Default strategies would be registered here
436
+ # This would be extended with actual strategy implementations
437
+ self._logger.debug("Default strategies registered")
438
+
439
+ def _determine_analysis_type(
440
+ self, target: Any, options: Dict[str, Any]
441
+ ) -> str:
442
+ """
443
+ Determine analysis type from target and options.
444
+
445
+ Args:
446
+ target: Analysis target
447
+ options: Analysis options
448
+
449
+ Returns:
450
+ str: Analysis type
451
+ """
452
+ # Check if type is explicitly specified
453
+ if "type" in options:
454
+ return options["type"]
455
+
456
+ # Infer from target type
457
+ if isinstance(target, Path) or isinstance(target, str):
458
+ path = Path(target)
459
+ if path.is_file():
460
+ # Determine by file extension
461
+ if path.suffix in [".py", ".js", ".ts", ".java"]:
462
+ return "code"
463
+ elif path.suffix in [".json", ".yaml", ".yml"]:
464
+ return "config"
465
+ elif path.is_dir():
466
+ return "project"
467
+
468
+ return "generic"
469
+
470
+ def _generate_cache_key(self, target: Any, options: Dict[str, Any]) -> str:
471
+ """Generate cache key for analysis."""
472
+ import hashlib
473
+ import json
474
+
475
+ key_data = {
476
+ "target": str(target),
477
+ "options": options,
478
+ }
479
+ key_str = json.dumps(key_data, sort_keys=True)
480
+ return hashlib.md5(key_str.encode()).hexdigest()
481
+
482
+ def _aggregate_batch_metrics(self, results: List[AnalysisResult]) -> None:
483
+ """Aggregate metrics from batch analysis."""
484
+ # Implementation would aggregate metrics across results
485
+ pass
486
+
487
+ def _compare_metrics(
488
+ self, metrics1: Dict[str, Any], metrics2: Dict[str, Any]
489
+ ) -> Dict[str, Any]:
490
+ """Compare two sets of metrics."""
491
+ diff = {}
492
+ all_keys = set(metrics1.keys()) | set(metrics2.keys())
493
+
494
+ for key in all_keys:
495
+ val1 = metrics1.get(key, None)
496
+ val2 = metrics2.get(key, None)
497
+
498
+ if val1 != val2:
499
+ diff[key] = {"target1": val1, "target2": val2}
500
+
501
+ return diff
502
+
503
+ def _compare_findings(
504
+ self, findings1: List[Dict[str, Any]], findings2: List[Dict[str, Any]]
505
+ ) -> Dict[str, Any]:
506
+ """Compare two sets of findings."""
507
+ return {
508
+ "target1_count": len(findings1),
509
+ "target2_count": len(findings2),
510
+ "difference": len(findings1) - len(findings2),
511
+ }
512
+
513
+ def _generate_metric_recommendations(
514
+ self, metrics: Dict[str, Any]
515
+ ) -> List[Dict[str, Any]]:
516
+ """Generate recommendations based on metrics."""
517
+ recommendations = []
518
+
519
+ # Example metric-based recommendations
520
+ if metrics.get("complexity", 0) > 10:
521
+ recommendations.append({
522
+ "type": "complexity",
523
+ "description": "Consider refactoring to reduce complexity",
524
+ "priority": "medium",
525
+ })
526
+
527
+ if metrics.get("code_duplication", 0) > 20:
528
+ recommendations.append({
529
+ "type": "duplication",
530
+ "description": "High code duplication detected - consider extracting common functionality",
531
+ "priority": "medium",
532
+ })
533
+
534
+ return recommendations