claude-mpm 4.1.4__py3-none-any.whl → 4.1.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (41) hide show
  1. claude_mpm/VERSION +1 -1
  2. claude_mpm/cli/commands/tickets.py +365 -784
  3. claude_mpm/core/output_style_manager.py +24 -0
  4. claude_mpm/core/unified_agent_registry.py +46 -15
  5. claude_mpm/services/agents/deployment/agent_discovery_service.py +12 -3
  6. claude_mpm/services/agents/deployment/agent_lifecycle_manager.py +172 -233
  7. claude_mpm/services/agents/deployment/agent_lifecycle_manager_refactored.py +575 -0
  8. claude_mpm/services/agents/deployment/agent_operation_service.py +573 -0
  9. claude_mpm/services/agents/deployment/agent_record_service.py +419 -0
  10. claude_mpm/services/agents/deployment/agent_state_service.py +381 -0
  11. claude_mpm/services/agents/deployment/multi_source_deployment_service.py +4 -2
  12. claude_mpm/services/infrastructure/__init__.py +31 -5
  13. claude_mpm/services/infrastructure/monitoring/__init__.py +43 -0
  14. claude_mpm/services/infrastructure/monitoring/aggregator.py +437 -0
  15. claude_mpm/services/infrastructure/monitoring/base.py +130 -0
  16. claude_mpm/services/infrastructure/monitoring/legacy.py +203 -0
  17. claude_mpm/services/infrastructure/monitoring/network.py +218 -0
  18. claude_mpm/services/infrastructure/monitoring/process.py +342 -0
  19. claude_mpm/services/infrastructure/monitoring/resources.py +243 -0
  20. claude_mpm/services/infrastructure/monitoring/service.py +367 -0
  21. claude_mpm/services/infrastructure/monitoring.py +67 -1030
  22. claude_mpm/services/project/analyzer.py +13 -4
  23. claude_mpm/services/project/analyzer_refactored.py +450 -0
  24. claude_mpm/services/project/analyzer_v2.py +566 -0
  25. claude_mpm/services/project/architecture_analyzer.py +461 -0
  26. claude_mpm/services/project/dependency_analyzer.py +462 -0
  27. claude_mpm/services/project/language_analyzer.py +265 -0
  28. claude_mpm/services/project/metrics_collector.py +410 -0
  29. claude_mpm/services/ticket_manager.py +5 -1
  30. claude_mpm/services/ticket_services/__init__.py +26 -0
  31. claude_mpm/services/ticket_services/crud_service.py +328 -0
  32. claude_mpm/services/ticket_services/formatter_service.py +290 -0
  33. claude_mpm/services/ticket_services/search_service.py +324 -0
  34. claude_mpm/services/ticket_services/validation_service.py +303 -0
  35. claude_mpm/services/ticket_services/workflow_service.py +244 -0
  36. {claude_mpm-4.1.4.dist-info → claude_mpm-4.1.5.dist-info}/METADATA +1 -1
  37. {claude_mpm-4.1.4.dist-info → claude_mpm-4.1.5.dist-info}/RECORD +41 -17
  38. {claude_mpm-4.1.4.dist-info → claude_mpm-4.1.5.dist-info}/WHEEL +0 -0
  39. {claude_mpm-4.1.4.dist-info → claude_mpm-4.1.5.dist-info}/entry_points.txt +0 -0
  40. {claude_mpm-4.1.4.dist-info → claude_mpm-4.1.5.dist-info}/licenses/LICENSE +0 -0
  41. {claude_mpm-4.1.4.dist-info → claude_mpm-4.1.5.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,265 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ Language Analyzer Service
4
+ =========================
5
+
6
+ WHY: Separates language-specific analysis from the main analyzer to follow
7
+ single responsibility principle. Handles detection and analysis of programming
8
+ languages, frameworks, and language-specific patterns.
9
+
10
+ DECISION: Extract language detection into a focused service that can be
11
+ enhanced independently with better language-specific analysis.
12
+ """
13
+
14
+ import logging
15
+ import re
16
+ from collections import Counter
17
+ from pathlib import Path
18
+ from typing import Dict, List, Optional
19
+
20
+
21
+ class LanguageAnalyzerService:
22
+ """Analyzes programming languages and frameworks in a project.
23
+
24
+ WHY: Language detection and framework identification is a complex task
25
+ that deserves its own focused service. This separation allows for better
26
+ testing and future enhancements like syntax analysis.
27
+ """
28
+
29
+ # Language detection by file extension
30
+ FILE_EXTENSIONS = {
31
+ ".py": "python",
32
+ ".js": "javascript",
33
+ ".ts": "typescript",
34
+ ".jsx": "react",
35
+ ".tsx": "react",
36
+ ".rs": "rust",
37
+ ".java": "java",
38
+ ".go": "go",
39
+ ".php": "php",
40
+ ".rb": "ruby",
41
+ ".cpp": "cpp",
42
+ ".cc": "cpp",
43
+ ".c": "c",
44
+ ".cs": "csharp",
45
+ ".swift": "swift",
46
+ ".kt": "kotlin",
47
+ ".scala": "scala",
48
+ ".r": "r",
49
+ ".m": "objective-c",
50
+ ".lua": "lua",
51
+ ".pl": "perl",
52
+ ".sh": "bash",
53
+ ".ps1": "powershell",
54
+ }
55
+
56
+ # Framework detection patterns
57
+ FRAMEWORK_PATTERNS = {
58
+ "flask": ["from flask", "Flask(", "app.route"],
59
+ "django": ["from django", "DJANGO_SETTINGS", "django.contrib"],
60
+ "fastapi": ["from fastapi", "FastAPI(", "@app."],
61
+ "express": ["express()", "app.get(", "app.post("],
62
+ "react": ["import React", "from 'react'", 'from "react"', "ReactDOM"],
63
+ "vue": ["Vue.createApp", "new Vue(", "vue-"],
64
+ "angular": ["@Component", "@Injectable", "Angular"],
65
+ "spring": ["@SpringBootApplication", "@RestController", "Spring"],
66
+ "rails": ["Rails.application", "ApplicationController"],
67
+ "nextjs": ["next/router", "next/link", "getServerSideProps"],
68
+ "nuxt": ["nuxt.config", "@nuxt/"],
69
+ "svelte": ["import { writable }", "svelte"],
70
+ "ember": ["ember-cli", "Ember.Application"],
71
+ "backbone": ["Backbone.Model", "Backbone.View"],
72
+ "laravel": ["Illuminate\\", "artisan"],
73
+ "symfony": ["Symfony\\Component", "symfony/"],
74
+ "dotnet": ["using System", "namespace", ".NET"],
75
+ }
76
+
77
+ def __init__(self, working_directory: Path):
78
+ """Initialize the language analyzer service.
79
+
80
+ Args:
81
+ working_directory: Project root directory
82
+ """
83
+ self.working_directory = working_directory
84
+ self.logger = logging.getLogger(f"{__name__}.{self.__class__.__name__}")
85
+
86
+ def detect_languages(self) -> List[str]:
87
+ """Detect programming languages used in the project.
88
+
89
+ WHY: Understanding which languages are used helps determine
90
+ appropriate analysis strategies and tools.
91
+
92
+ Returns:
93
+ List of detected language names
94
+ """
95
+ languages = set()
96
+
97
+ # Scan for source files
98
+ for ext, lang in self.FILE_EXTENSIONS.items():
99
+ files = list(self.working_directory.rglob(f"*{ext}"))
100
+ # Filter out vendor/node_modules directories
101
+ files = [
102
+ f
103
+ for f in files
104
+ if not any(
105
+ part in [".git", "node_modules", "vendor", "__pycache__"]
106
+ for part in f.parts
107
+ )
108
+ ]
109
+ if files:
110
+ languages.add(lang)
111
+
112
+ return sorted(list(languages))
113
+
114
+ def detect_primary_language(
115
+ self, file_counts: Optional[Dict[str, int]] = None
116
+ ) -> Optional[str]:
117
+ """Detect the primary programming language.
118
+
119
+ WHY: The primary language determines main analysis strategies
120
+ and helps prioritize which patterns to look for.
121
+
122
+ Args:
123
+ file_counts: Optional pre-computed file counts by extension
124
+
125
+ Returns:
126
+ Primary language name or None
127
+ """
128
+ if file_counts is None:
129
+ file_counts = self._count_files_by_extension()
130
+
131
+ if not file_counts:
132
+ return None
133
+
134
+ # Weight by file count and typical importance
135
+ language_weights = Counter()
136
+
137
+ for ext, count in file_counts.items():
138
+ if ext in self.FILE_EXTENSIONS:
139
+ lang = self.FILE_EXTENSIONS[ext]
140
+ # Give extra weight to certain languages based on project patterns
141
+ weight = count
142
+ if ext in [".py", ".js", ".ts", ".java", ".go", ".rs"]:
143
+ weight *= 1.5 # Boost common application languages
144
+ language_weights[lang] += weight
145
+
146
+ if language_weights:
147
+ return language_weights.most_common(1)[0][0]
148
+
149
+ return None
150
+
151
+ def detect_frameworks(self, sample_files: Optional[List[Path]] = None) -> List[str]:
152
+ """Detect frameworks used in the project.
153
+
154
+ WHY: Framework detection helps understand project architecture
155
+ and development patterns that agents should follow.
156
+
157
+ Args:
158
+ sample_files: Optional list of files to analyze
159
+
160
+ Returns:
161
+ List of detected framework names
162
+ """
163
+ if sample_files is None:
164
+ sample_files = self._get_sample_source_files()
165
+
166
+ framework_mentions = Counter()
167
+
168
+ for file_path in sample_files:
169
+ try:
170
+ content = file_path.read_text(encoding="utf-8", errors="ignore")
171
+
172
+ for framework, patterns in self.FRAMEWORK_PATTERNS.items():
173
+ if any(pattern in content for pattern in patterns):
174
+ framework_mentions[framework] += 1
175
+
176
+ except Exception as e:
177
+ self.logger.debug(f"Error analyzing {file_path}: {e}")
178
+
179
+ # Return frameworks mentioned in multiple files
180
+ return [fw for fw, count in framework_mentions.items() if count >= 2]
181
+
182
+ def analyze_code_style(
183
+ self, sample_files: Optional[List[Path]] = None
184
+ ) -> List[str]:
185
+ """Analyze coding style and conventions.
186
+
187
+ WHY: Understanding code style helps agents generate code that
188
+ matches the project's existing patterns.
189
+
190
+ Args:
191
+ sample_files: Optional list of files to analyze
192
+
193
+ Returns:
194
+ List of detected code conventions
195
+ """
196
+ if sample_files is None:
197
+ sample_files = self._get_sample_source_files(limit=10)
198
+
199
+ conventions = []
200
+ pattern_counts = Counter()
201
+
202
+ for file_path in sample_files:
203
+ try:
204
+ content = file_path.read_text(encoding="utf-8", errors="ignore")
205
+
206
+ # Check for common patterns
207
+ if "class " in content and "def __init__" in content:
208
+ pattern_counts["object_oriented"] += 1
209
+ if "async def" in content or "async function" in content:
210
+ pattern_counts["async_programming"] += 1
211
+ if "@property" in content or "@decorator" in content:
212
+ pattern_counts["decorators"] += 1
213
+ if "type:" in content or "->" in content:
214
+ pattern_counts["type_hints"] += 1
215
+ if re.search(r'""".*?"""', content, re.DOTALL):
216
+ pattern_counts["docstrings"] += 1
217
+
218
+ except Exception as e:
219
+ self.logger.debug(f"Error analyzing {file_path}: {e}")
220
+
221
+ # Include patterns found in multiple files
222
+ for pattern, count in pattern_counts.items():
223
+ if count >= 2:
224
+ conventions.append(pattern.replace("_", " ").title())
225
+
226
+ return conventions
227
+
228
+ def _count_files_by_extension(self) -> Dict[str, int]:
229
+ """Count files by extension in the project."""
230
+ counts = Counter()
231
+
232
+ for ext in self.FILE_EXTENSIONS.keys():
233
+ files = list(self.working_directory.rglob(f"*{ext}"))
234
+ # Filter out vendor directories
235
+ files = [
236
+ f
237
+ for f in files
238
+ if not any(
239
+ part in [".git", "node_modules", "vendor", "__pycache__"]
240
+ for part in f.parts
241
+ )
242
+ ]
243
+ if files:
244
+ counts[ext] = len(files)
245
+
246
+ return dict(counts)
247
+
248
+ def _get_sample_source_files(self, limit: int = 20) -> List[Path]:
249
+ """Get a sample of source files for analysis."""
250
+ source_files = []
251
+
252
+ for ext in self.FILE_EXTENSIONS.keys():
253
+ files = list(self.working_directory.rglob(f"*{ext}"))
254
+ # Filter out vendor directories
255
+ files = [
256
+ f
257
+ for f in files
258
+ if not any(
259
+ part in [".git", "node_modules", "vendor", "__pycache__"]
260
+ for part in f.parts
261
+ )
262
+ ]
263
+ source_files.extend(files[:5]) # Take up to 5 files per extension
264
+
265
+ return source_files[:limit]
@@ -0,0 +1,410 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ Metrics Collector Service
4
+ =========================
5
+
6
+ WHY: Separates code metrics collection from the main analyzer to follow
7
+ single responsibility principle. Collects quantitative metrics about
8
+ the codebase for analysis and reporting.
9
+
10
+ DECISION: Create a focused service for metrics that can provide insights
11
+ about code complexity, size, and quality indicators.
12
+ """
13
+
14
+ import logging
15
+ from collections import Counter
16
+ from dataclasses import asdict, dataclass
17
+ from pathlib import Path
18
+ from typing import Dict, List, Tuple
19
+
20
+
21
+ @dataclass
22
+ class ProjectMetrics:
23
+ """Container for project metrics."""
24
+
25
+ # Size metrics
26
+ total_files: int = 0
27
+ total_lines: int = 0
28
+ lines_of_code: int = 0
29
+ comment_lines: int = 0
30
+ blank_lines: int = 0
31
+
32
+ # File type distribution
33
+ file_types: Dict[str, int] = None
34
+ largest_files: List[Tuple[str, int]] = None
35
+
36
+ # Directory metrics
37
+ total_directories: int = 0
38
+ max_depth: int = 0
39
+ average_files_per_directory: float = 0.0
40
+
41
+ # Code complexity indicators
42
+ average_file_size: float = 0.0
43
+ files_over_500_lines: int = 0
44
+ files_over_1000_lines: int = 0
45
+
46
+ # Testing metrics
47
+ test_files: int = 0
48
+ test_coverage_files: int = 0
49
+ test_to_code_ratio: float = 0.0
50
+
51
+ def __post_init__(self):
52
+ """Initialize mutable defaults."""
53
+ if self.file_types is None:
54
+ self.file_types = {}
55
+ if self.largest_files is None:
56
+ self.largest_files = []
57
+
58
+ def to_dict(self) -> Dict:
59
+ """Convert to dictionary."""
60
+ return asdict(self)
61
+
62
+
63
+ class MetricsCollectorService:
64
+ """Collects quantitative metrics about a project.
65
+
66
+ WHY: Metrics provide objective insights about code health, complexity,
67
+ and structure that help agents understand the project scale and quality.
68
+ """
69
+
70
+ # File extensions to analyze
71
+ CODE_EXTENSIONS = {
72
+ ".py",
73
+ ".js",
74
+ ".ts",
75
+ ".jsx",
76
+ ".tsx",
77
+ ".java",
78
+ ".cpp",
79
+ ".c",
80
+ ".cs",
81
+ ".go",
82
+ ".rs",
83
+ ".rb",
84
+ ".php",
85
+ ".swift",
86
+ ".kt",
87
+ ".scala",
88
+ ".r",
89
+ ".lua",
90
+ }
91
+
92
+ # Test file patterns
93
+ TEST_PATTERNS = [
94
+ "test_",
95
+ "_test.",
96
+ ".test.",
97
+ ".spec.",
98
+ "_spec.",
99
+ "tests/",
100
+ "test/",
101
+ "__tests__/",
102
+ "spec/",
103
+ ]
104
+
105
+ # Directories to exclude from analysis
106
+ EXCLUDE_DIRS = {
107
+ ".git",
108
+ "node_modules",
109
+ "vendor",
110
+ "__pycache__",
111
+ ".pytest_cache",
112
+ "dist",
113
+ "build",
114
+ "target",
115
+ ".venv",
116
+ "venv",
117
+ "env",
118
+ ".tox",
119
+ "coverage",
120
+ ".coverage",
121
+ "htmlcov",
122
+ ".mypy_cache",
123
+ }
124
+
125
+ def __init__(self, working_directory: Path):
126
+ """Initialize the metrics collector service.
127
+
128
+ Args:
129
+ working_directory: Project root directory
130
+ """
131
+ self.working_directory = working_directory
132
+ self.logger = logging.getLogger(f"{__name__}.{self.__class__.__name__}")
133
+
134
+ def collect_metrics(self) -> ProjectMetrics:
135
+ """Collect comprehensive project metrics.
136
+
137
+ WHY: Comprehensive metrics help understand project scale,
138
+ complexity, and quality indicators.
139
+
140
+ Returns:
141
+ ProjectMetrics object with collected data
142
+ """
143
+ metrics = ProjectMetrics()
144
+
145
+ # Collect file and directory metrics
146
+ self._collect_file_metrics(metrics)
147
+
148
+ # Calculate derived metrics
149
+ self._calculate_derived_metrics(metrics)
150
+
151
+ # Collect test metrics
152
+ self._collect_test_metrics(metrics)
153
+
154
+ return metrics
155
+
156
+ def analyze_file_sizes(self) -> Dict[str, List[Tuple[str, int]]]:
157
+ """Analyze file sizes and identify outliers.
158
+
159
+ WHY: Large files often indicate code that needs refactoring
160
+ and can be harder to maintain.
161
+
162
+ Returns:
163
+ Dictionary with file size analysis
164
+ """
165
+ file_sizes = []
166
+
167
+ for file_path in self._iter_code_files():
168
+ try:
169
+ size = file_path.stat().st_size
170
+ lines = len(
171
+ file_path.read_text(encoding="utf-8", errors="ignore").splitlines()
172
+ )
173
+ file_sizes.append(
174
+ (str(file_path.relative_to(self.working_directory)), lines)
175
+ )
176
+ except Exception as e:
177
+ self.logger.debug(f"Error analyzing {file_path}: {e}")
178
+
179
+ # Sort by size
180
+ file_sizes.sort(key=lambda x: x[1], reverse=True)
181
+
182
+ return {
183
+ "largest_files": file_sizes[:10],
184
+ "files_over_500_lines": [f for f in file_sizes if f[1] > 500],
185
+ "files_over_1000_lines": [f for f in file_sizes if f[1] > 1000],
186
+ }
187
+
188
+ def analyze_directory_structure(self) -> Dict[str, any]:
189
+ """Analyze project directory structure.
190
+
191
+ WHY: Directory structure reveals architectural decisions
192
+ and organizational patterns.
193
+
194
+ Returns:
195
+ Dictionary with directory structure analysis
196
+ """
197
+ dir_info = {
198
+ "total_directories": 0,
199
+ "max_depth": 0,
200
+ "directories_by_depth": {},
201
+ "files_per_directory": {},
202
+ }
203
+
204
+ for dirpath, dirnames, filenames in self.working_directory.walk():
205
+ # Skip excluded directories
206
+ dirnames[:] = [d for d in dirnames if d not in self.EXCLUDE_DIRS]
207
+
208
+ # Calculate depth
209
+ depth = len(Path(dirpath).relative_to(self.working_directory).parts)
210
+ dir_info["max_depth"] = max(dir_info["max_depth"], depth)
211
+
212
+ # Count directories
213
+ dir_info["total_directories"] += 1
214
+
215
+ # Track directories by depth
216
+ if depth not in dir_info["directories_by_depth"]:
217
+ dir_info["directories_by_depth"][depth] = 0
218
+ dir_info["directories_by_depth"][depth] += 1
219
+
220
+ # Track files per directory
221
+ rel_path = str(Path(dirpath).relative_to(self.working_directory))
222
+ dir_info["files_per_directory"][rel_path] = len(filenames)
223
+
224
+ return dir_info
225
+
226
+ def calculate_code_to_comment_ratio(self) -> Dict[str, float]:
227
+ """Calculate code to comment ratio for different file types.
228
+
229
+ WHY: Comment ratio indicates documentation quality and
230
+ code self-documentation level.
231
+
232
+ Returns:
233
+ Dictionary with ratios by file type
234
+ """
235
+ ratios = {}
236
+
237
+ # Language-specific comment patterns
238
+ comment_patterns = {
239
+ ".py": (r"#", r'"""', r"'''"),
240
+ ".js": (r"//", r"/*", r"*/"),
241
+ ".java": (r"//", r"/*", r"*/"),
242
+ ".cpp": (r"//", r"/*", r"*/"),
243
+ ".c": (r"//", r"/*", r"*/"),
244
+ ".go": (r"//", r"/*", r"*/"),
245
+ ".rs": (r"//", r"/*", r"*/"),
246
+ ".rb": (r"#", r"=begin", r"=end"),
247
+ }
248
+
249
+ for ext, patterns in comment_patterns.items():
250
+ files = list(self.working_directory.rglob(f"*{ext}"))
251
+ files = [f for f in files if self._should_analyze_file(f)]
252
+
253
+ if not files:
254
+ continue
255
+
256
+ total_lines = 0
257
+ comment_lines = 0
258
+
259
+ for file_path in files[:20]: # Sample files
260
+ try:
261
+ content = file_path.read_text(encoding="utf-8", errors="ignore")
262
+ lines = content.splitlines()
263
+ total_lines += len(lines)
264
+
265
+ for line in lines:
266
+ stripped = line.strip()
267
+ if any(
268
+ stripped.startswith(p)
269
+ for p in patterns
270
+ if isinstance(p, str)
271
+ ):
272
+ comment_lines += 1
273
+
274
+ except Exception as e:
275
+ self.logger.debug(f"Error analyzing {file_path}: {e}")
276
+
277
+ if total_lines > 0:
278
+ ratios[ext] = comment_lines / total_lines
279
+
280
+ return ratios
281
+
282
+ def _collect_file_metrics(self, metrics: ProjectMetrics) -> None:
283
+ """Collect basic file metrics."""
284
+ file_counter = Counter()
285
+ total_lines = 0
286
+ code_lines = 0
287
+ comment_lines = 0
288
+ blank_lines = 0
289
+ file_sizes = []
290
+
291
+ for file_path in self._iter_code_files():
292
+ try:
293
+ content = file_path.read_text(encoding="utf-8", errors="ignore")
294
+ lines = content.splitlines()
295
+
296
+ metrics.total_files += 1
297
+ file_ext = file_path.suffix
298
+ file_counter[file_ext] += 1
299
+
300
+ file_line_count = len(lines)
301
+ total_lines += file_line_count
302
+ file_sizes.append(
303
+ (
304
+ str(file_path.relative_to(self.working_directory)),
305
+ file_line_count,
306
+ )
307
+ )
308
+
309
+ # Count line types
310
+ for line in lines:
311
+ stripped = line.strip()
312
+ if not stripped:
313
+ blank_lines += 1
314
+ elif stripped.startswith(("#", "//", "/*", "*")):
315
+ comment_lines += 1
316
+ else:
317
+ code_lines += 1
318
+
319
+ # Track large files
320
+ if file_line_count > 500:
321
+ metrics.files_over_500_lines += 1
322
+ if file_line_count > 1000:
323
+ metrics.files_over_1000_lines += 1
324
+
325
+ except Exception as e:
326
+ self.logger.debug(f"Error collecting metrics for {file_path}: {e}")
327
+
328
+ metrics.total_lines = total_lines
329
+ metrics.lines_of_code = code_lines
330
+ metrics.comment_lines = comment_lines
331
+ metrics.blank_lines = blank_lines
332
+ metrics.file_types = dict(file_counter)
333
+
334
+ # Get largest files
335
+ file_sizes.sort(key=lambda x: x[1], reverse=True)
336
+ metrics.largest_files = file_sizes[:10]
337
+
338
+ def _collect_test_metrics(self, metrics: ProjectMetrics) -> None:
339
+ """Collect testing-related metrics."""
340
+ test_files = 0
341
+ test_lines = 0
342
+
343
+ for file_path in self._iter_code_files():
344
+ try:
345
+ rel_path = str(file_path.relative_to(self.working_directory))
346
+
347
+ # Check if it's a test file
348
+ is_test = any(
349
+ pattern in rel_path.lower() for pattern in self.TEST_PATTERNS
350
+ )
351
+
352
+ if is_test:
353
+ test_files += 1
354
+ content = file_path.read_text(encoding="utf-8", errors="ignore")
355
+ test_lines += len(content.splitlines())
356
+
357
+ except Exception as e:
358
+ self.logger.debug(f"Error collecting test metrics for {file_path}: {e}")
359
+
360
+ metrics.test_files = test_files
361
+
362
+ # Calculate test to code ratio
363
+ if metrics.lines_of_code > 0:
364
+ metrics.test_to_code_ratio = test_lines / metrics.lines_of_code
365
+
366
+ # Check for coverage files
367
+ coverage_indicators = [".coverage", "coverage.xml", "coverage.json", "htmlcov"]
368
+ for indicator in coverage_indicators:
369
+ if (self.working_directory / indicator).exists():
370
+ metrics.test_coverage_files += 1
371
+
372
+ def _calculate_derived_metrics(self, metrics: ProjectMetrics) -> None:
373
+ """Calculate derived metrics from collected data."""
374
+ # Calculate average file size
375
+ if metrics.total_files > 0:
376
+ metrics.average_file_size = metrics.total_lines / metrics.total_files
377
+
378
+ # Count directories
379
+ dir_count = 0
380
+ for dirpath, dirnames, _ in self.working_directory.walk():
381
+ dirnames[:] = [d for d in dirnames if d not in self.EXCLUDE_DIRS]
382
+ dir_count += len(dirnames)
383
+
384
+ metrics.total_directories = dir_count
385
+
386
+ # Calculate average files per directory
387
+ if dir_count > 0:
388
+ metrics.average_files_per_directory = metrics.total_files / dir_count
389
+
390
+ # Calculate max depth
391
+ max_depth = 0
392
+ for file_path in self._iter_code_files():
393
+ depth = len(file_path.relative_to(self.working_directory).parts)
394
+ max_depth = max(max_depth, depth)
395
+ metrics.max_depth = max_depth
396
+
397
+ def _iter_code_files(self):
398
+ """Iterate over code files in the project."""
399
+ for ext in self.CODE_EXTENSIONS:
400
+ for file_path in self.working_directory.rglob(f"*{ext}"):
401
+ if self._should_analyze_file(file_path):
402
+ yield file_path
403
+
404
+ def _should_analyze_file(self, file_path: Path) -> bool:
405
+ """Check if a file should be analyzed."""
406
+ # Skip files in excluded directories
407
+ for part in file_path.parts:
408
+ if part in self.EXCLUDE_DIRS:
409
+ return False
410
+ return True
@@ -6,7 +6,11 @@ class TicketManager:
6
6
 
7
7
  def create_task(self, *args, **kwargs):
8
8
  """Stub method."""
9
- return
9
+ return "TSK-STUB-001" # Return a stub ticket ID
10
+
11
+ def create_ticket(self, *args, **kwargs):
12
+ """Stub method - alias for create_task."""
13
+ return self.create_task(*args, **kwargs)
10
14
 
11
15
  def list_recent_tickets(self, *args, **kwargs):
12
16
  """Stub method."""