gitflow-analytics 1.0.1__py3-none-any.whl → 1.3.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (119) hide show
  1. gitflow_analytics/__init__.py +11 -11
  2. gitflow_analytics/_version.py +2 -2
  3. gitflow_analytics/classification/__init__.py +31 -0
  4. gitflow_analytics/classification/batch_classifier.py +752 -0
  5. gitflow_analytics/classification/classifier.py +464 -0
  6. gitflow_analytics/classification/feature_extractor.py +725 -0
  7. gitflow_analytics/classification/linguist_analyzer.py +574 -0
  8. gitflow_analytics/classification/model.py +455 -0
  9. gitflow_analytics/cli.py +4490 -378
  10. gitflow_analytics/cli_rich.py +503 -0
  11. gitflow_analytics/config/__init__.py +43 -0
  12. gitflow_analytics/config/errors.py +261 -0
  13. gitflow_analytics/config/loader.py +904 -0
  14. gitflow_analytics/config/profiles.py +264 -0
  15. gitflow_analytics/config/repository.py +124 -0
  16. gitflow_analytics/config/schema.py +441 -0
  17. gitflow_analytics/config/validator.py +154 -0
  18. gitflow_analytics/config.py +44 -398
  19. gitflow_analytics/core/analyzer.py +1320 -172
  20. gitflow_analytics/core/branch_mapper.py +132 -132
  21. gitflow_analytics/core/cache.py +1554 -175
  22. gitflow_analytics/core/data_fetcher.py +1193 -0
  23. gitflow_analytics/core/identity.py +571 -185
  24. gitflow_analytics/core/metrics_storage.py +526 -0
  25. gitflow_analytics/core/progress.py +372 -0
  26. gitflow_analytics/core/schema_version.py +269 -0
  27. gitflow_analytics/extractors/base.py +13 -11
  28. gitflow_analytics/extractors/ml_tickets.py +1100 -0
  29. gitflow_analytics/extractors/story_points.py +77 -59
  30. gitflow_analytics/extractors/tickets.py +841 -89
  31. gitflow_analytics/identity_llm/__init__.py +6 -0
  32. gitflow_analytics/identity_llm/analysis_pass.py +231 -0
  33. gitflow_analytics/identity_llm/analyzer.py +464 -0
  34. gitflow_analytics/identity_llm/models.py +76 -0
  35. gitflow_analytics/integrations/github_integration.py +258 -87
  36. gitflow_analytics/integrations/jira_integration.py +572 -123
  37. gitflow_analytics/integrations/orchestrator.py +206 -82
  38. gitflow_analytics/metrics/activity_scoring.py +322 -0
  39. gitflow_analytics/metrics/branch_health.py +470 -0
  40. gitflow_analytics/metrics/dora.py +542 -179
  41. gitflow_analytics/models/database.py +986 -59
  42. gitflow_analytics/pm_framework/__init__.py +115 -0
  43. gitflow_analytics/pm_framework/adapters/__init__.py +50 -0
  44. gitflow_analytics/pm_framework/adapters/jira_adapter.py +1845 -0
  45. gitflow_analytics/pm_framework/base.py +406 -0
  46. gitflow_analytics/pm_framework/models.py +211 -0
  47. gitflow_analytics/pm_framework/orchestrator.py +652 -0
  48. gitflow_analytics/pm_framework/registry.py +333 -0
  49. gitflow_analytics/qualitative/__init__.py +29 -0
  50. gitflow_analytics/qualitative/chatgpt_analyzer.py +259 -0
  51. gitflow_analytics/qualitative/classifiers/__init__.py +13 -0
  52. gitflow_analytics/qualitative/classifiers/change_type.py +742 -0
  53. gitflow_analytics/qualitative/classifiers/domain_classifier.py +506 -0
  54. gitflow_analytics/qualitative/classifiers/intent_analyzer.py +535 -0
  55. gitflow_analytics/qualitative/classifiers/llm/__init__.py +35 -0
  56. gitflow_analytics/qualitative/classifiers/llm/base.py +193 -0
  57. gitflow_analytics/qualitative/classifiers/llm/batch_processor.py +383 -0
  58. gitflow_analytics/qualitative/classifiers/llm/cache.py +479 -0
  59. gitflow_analytics/qualitative/classifiers/llm/cost_tracker.py +435 -0
  60. gitflow_analytics/qualitative/classifiers/llm/openai_client.py +403 -0
  61. gitflow_analytics/qualitative/classifiers/llm/prompts.py +373 -0
  62. gitflow_analytics/qualitative/classifiers/llm/response_parser.py +287 -0
  63. gitflow_analytics/qualitative/classifiers/llm_commit_classifier.py +607 -0
  64. gitflow_analytics/qualitative/classifiers/risk_analyzer.py +438 -0
  65. gitflow_analytics/qualitative/core/__init__.py +13 -0
  66. gitflow_analytics/qualitative/core/llm_fallback.py +657 -0
  67. gitflow_analytics/qualitative/core/nlp_engine.py +382 -0
  68. gitflow_analytics/qualitative/core/pattern_cache.py +479 -0
  69. gitflow_analytics/qualitative/core/processor.py +673 -0
  70. gitflow_analytics/qualitative/enhanced_analyzer.py +2236 -0
  71. gitflow_analytics/qualitative/example_enhanced_usage.py +420 -0
  72. gitflow_analytics/qualitative/models/__init__.py +25 -0
  73. gitflow_analytics/qualitative/models/schemas.py +306 -0
  74. gitflow_analytics/qualitative/utils/__init__.py +13 -0
  75. gitflow_analytics/qualitative/utils/batch_processor.py +339 -0
  76. gitflow_analytics/qualitative/utils/cost_tracker.py +345 -0
  77. gitflow_analytics/qualitative/utils/metrics.py +361 -0
  78. gitflow_analytics/qualitative/utils/text_processing.py +285 -0
  79. gitflow_analytics/reports/__init__.py +100 -0
  80. gitflow_analytics/reports/analytics_writer.py +550 -18
  81. gitflow_analytics/reports/base.py +648 -0
  82. gitflow_analytics/reports/branch_health_writer.py +322 -0
  83. gitflow_analytics/reports/classification_writer.py +924 -0
  84. gitflow_analytics/reports/cli_integration.py +427 -0
  85. gitflow_analytics/reports/csv_writer.py +1700 -216
  86. gitflow_analytics/reports/data_models.py +504 -0
  87. gitflow_analytics/reports/database_report_generator.py +427 -0
  88. gitflow_analytics/reports/example_usage.py +344 -0
  89. gitflow_analytics/reports/factory.py +499 -0
  90. gitflow_analytics/reports/formatters.py +698 -0
  91. gitflow_analytics/reports/html_generator.py +1116 -0
  92. gitflow_analytics/reports/interfaces.py +489 -0
  93. gitflow_analytics/reports/json_exporter.py +2770 -0
  94. gitflow_analytics/reports/narrative_writer.py +2289 -158
  95. gitflow_analytics/reports/story_point_correlation.py +1144 -0
  96. gitflow_analytics/reports/weekly_trends_writer.py +389 -0
  97. gitflow_analytics/training/__init__.py +5 -0
  98. gitflow_analytics/training/model_loader.py +377 -0
  99. gitflow_analytics/training/pipeline.py +550 -0
  100. gitflow_analytics/tui/__init__.py +5 -0
  101. gitflow_analytics/tui/app.py +724 -0
  102. gitflow_analytics/tui/screens/__init__.py +8 -0
  103. gitflow_analytics/tui/screens/analysis_progress_screen.py +496 -0
  104. gitflow_analytics/tui/screens/configuration_screen.py +523 -0
  105. gitflow_analytics/tui/screens/loading_screen.py +348 -0
  106. gitflow_analytics/tui/screens/main_screen.py +321 -0
  107. gitflow_analytics/tui/screens/results_screen.py +722 -0
  108. gitflow_analytics/tui/widgets/__init__.py +7 -0
  109. gitflow_analytics/tui/widgets/data_table.py +255 -0
  110. gitflow_analytics/tui/widgets/export_modal.py +301 -0
  111. gitflow_analytics/tui/widgets/progress_widget.py +187 -0
  112. gitflow_analytics-1.3.6.dist-info/METADATA +1015 -0
  113. gitflow_analytics-1.3.6.dist-info/RECORD +122 -0
  114. gitflow_analytics-1.0.1.dist-info/METADATA +0 -463
  115. gitflow_analytics-1.0.1.dist-info/RECORD +0 -31
  116. {gitflow_analytics-1.0.1.dist-info → gitflow_analytics-1.3.6.dist-info}/WHEEL +0 -0
  117. {gitflow_analytics-1.0.1.dist-info → gitflow_analytics-1.3.6.dist-info}/entry_points.txt +0 -0
  118. {gitflow_analytics-1.0.1.dist-info → gitflow_analytics-1.3.6.dist-info}/licenses/LICENSE +0 -0
  119. {gitflow_analytics-1.0.1.dist-info → gitflow_analytics-1.3.6.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,438 @@
1
+ """Risk analyzer for assessing commit risk levels."""
2
+
3
+ import importlib.util
4
+ import logging
5
+ import re
6
+ from typing import Any
7
+
8
+ from ..models.schemas import RiskConfig
9
+
10
+ # Check if spacy is available without importing it
11
+ SPACY_AVAILABLE = importlib.util.find_spec("spacy") is not None
12
+
13
+ if SPACY_AVAILABLE:
14
+ from spacy.tokens import Doc
15
+ else:
16
+ Doc = Any
17
+
18
+
19
+ class RiskAnalyzer:
20
+ """Analyze commits to assess risk level and identify risk factors.
21
+
22
+ This analyzer evaluates multiple dimensions of risk:
23
+ - Content risk: Security-sensitive keywords, critical system changes
24
+ - Size risk: Large commits affecting many files/lines
25
+ - Context risk: Production deployments, emergency fixes
26
+ - Pattern risk: File patterns indicating high-risk areas
27
+
28
+ Risk levels: low, medium, high, critical
29
+ """
30
+
31
+ def __init__(self, config: RiskConfig):
32
+ """Initialize risk analyzer.
33
+
34
+ Args:
35
+ config: Configuration for risk analysis
36
+ """
37
+ self.config = config
38
+ self.logger = logging.getLogger(__name__)
39
+
40
+ # Compile file risk patterns for efficiency
41
+ self._compile_file_patterns()
42
+
43
+ # Additional risk patterns not in config
44
+ self.critical_keywords = {
45
+ "password",
46
+ "secret",
47
+ "key",
48
+ "token",
49
+ "credential",
50
+ "auth",
51
+ "admin",
52
+ "root",
53
+ "sudo",
54
+ "permission",
55
+ "access",
56
+ "security",
57
+ }
58
+
59
+ self.production_keywords = {
60
+ "production",
61
+ "prod",
62
+ "live",
63
+ "release",
64
+ "deploy",
65
+ "deployment",
66
+ "critical",
67
+ "urgent",
68
+ "emergency",
69
+ "hotfix",
70
+ "immediate",
71
+ }
72
+
73
+ self.database_keywords = {
74
+ "database",
75
+ "db",
76
+ "migration",
77
+ "schema",
78
+ "table",
79
+ "column",
80
+ "index",
81
+ "constraint",
82
+ "trigger",
83
+ "procedure",
84
+ }
85
+
86
+ # File extension risk mapping
87
+ self.extension_risk = {
88
+ # High risk extensions
89
+ ".sql": "high",
90
+ ".py": "medium", # Could be config or critical logic
91
+ ".js": "medium",
92
+ ".php": "medium",
93
+ ".java": "medium",
94
+ ".cs": "medium",
95
+ ".go": "medium",
96
+ ".rb": "medium",
97
+ # Configuration files
98
+ ".yml": "medium",
99
+ ".yaml": "medium",
100
+ ".json": "medium",
101
+ ".toml": "medium",
102
+ ".ini": "medium",
103
+ ".conf": "medium",
104
+ ".config": "medium",
105
+ # Low risk extensions
106
+ ".md": "low",
107
+ ".txt": "low",
108
+ ".rst": "low",
109
+ ".css": "low",
110
+ ".scss": "low",
111
+ ".less": "low",
112
+ }
113
+
114
+ def _compile_file_patterns(self) -> None:
115
+ """Compile file risk patterns for efficient matching."""
116
+ self.compiled_file_patterns = {}
117
+
118
+ for pattern, risk_level in self.config.file_risk_patterns.items():
119
+ try:
120
+ # Convert glob pattern to regex
121
+ regex_pattern = self._glob_to_regex(pattern)
122
+ self.compiled_file_patterns[re.compile(regex_pattern, re.IGNORECASE)] = risk_level
123
+ except re.error as e:
124
+ self.logger.warning(f"Invalid risk pattern '{pattern}': {e}")
125
+
126
+ def _glob_to_regex(self, pattern: str) -> str:
127
+ """Convert glob pattern to regex."""
128
+ pattern = pattern.replace(".", r"\.")
129
+ pattern = pattern.replace("*", ".*")
130
+ pattern = pattern.replace("?", ".")
131
+ pattern = f"^{pattern}$"
132
+ return pattern
133
+
134
+ def assess(self, commit: dict[str, Any], doc: Doc) -> dict[str, Any]:
135
+ """Assess risk level and identify risk factors for a commit.
136
+
137
+ Args:
138
+ commit: Commit dictionary with message, files, stats, etc.
139
+ doc: spaCy processed document (may be None)
140
+
141
+ Returns:
142
+ Dictionary with 'level' and 'factors' keys
143
+ """
144
+ risk_factors = []
145
+ risk_scores = []
146
+
147
+ # Analyze message content for risk keywords
148
+ message_risk = self._analyze_message_risk(commit.get("message", ""), doc)
149
+ risk_factors.extend(message_risk["factors"])
150
+ risk_scores.append(message_risk["score"])
151
+
152
+ # Analyze file patterns for risk
153
+ file_risk = self._analyze_file_risk(commit.get("files_changed", []))
154
+ risk_factors.extend(file_risk["factors"])
155
+ risk_scores.append(file_risk["score"])
156
+
157
+ # Analyze commit size for risk
158
+ size_risk = self._analyze_size_risk(commit)
159
+ risk_factors.extend(size_risk["factors"])
160
+ risk_scores.append(size_risk["score"])
161
+
162
+ # Analyze timing and context
163
+ context_risk = self._analyze_context_risk(commit)
164
+ risk_factors.extend(context_risk["factors"])
165
+ risk_scores.append(context_risk["score"])
166
+
167
+ # Calculate overall risk level
168
+ max_risk_score = max(risk_scores) if risk_scores else 0.0
169
+ risk_level = self._score_to_level(max_risk_score)
170
+
171
+ return {
172
+ "level": risk_level,
173
+ "factors": list(set(risk_factors)), # Remove duplicates
174
+ "score": max_risk_score,
175
+ "breakdown": {
176
+ "message_risk": message_risk["score"],
177
+ "file_risk": file_risk["score"],
178
+ "size_risk": size_risk["score"],
179
+ "context_risk": context_risk["score"],
180
+ },
181
+ }
182
+
183
+ def _analyze_message_risk(self, message: str, doc: Doc) -> dict[str, Any]:
184
+ """Analyze commit message for risk indicators.
185
+
186
+ Args:
187
+ message: Commit message
188
+ doc: spaCy processed document
189
+
190
+ Returns:
191
+ Dictionary with score and factors
192
+ """
193
+ if not message:
194
+ return {"score": 0.0, "factors": []}
195
+
196
+ message_lower = message.lower()
197
+ factors = []
198
+ risk_score = 0.0
199
+
200
+ # Check for high-risk patterns
201
+ for pattern in self.config.high_risk_patterns:
202
+ if pattern.lower() in message_lower:
203
+ factors.append(f"high_risk_keyword:{pattern}")
204
+ risk_score = max(risk_score, 0.8) # High risk
205
+
206
+ # Check for medium-risk patterns
207
+ for pattern in self.config.medium_risk_patterns:
208
+ if pattern.lower() in message_lower:
209
+ factors.append(f"medium_risk_keyword:{pattern}")
210
+ risk_score = max(risk_score, 0.5) # Medium risk
211
+
212
+ # Check for critical security keywords
213
+ for keyword in self.critical_keywords:
214
+ if keyword in message_lower:
215
+ factors.append(f"security_keyword:{keyword}")
216
+ risk_score = max(risk_score, 0.9) # Critical risk
217
+
218
+ # Check for production-related keywords
219
+ for keyword in self.production_keywords:
220
+ if keyword in message_lower:
221
+ factors.append(f"production_keyword:{keyword}")
222
+ risk_score = max(risk_score, 0.7) # High risk
223
+
224
+ # Check for database-related keywords
225
+ for keyword in self.database_keywords:
226
+ if keyword in message_lower:
227
+ factors.append(f"database_keyword:{keyword}")
228
+ risk_score = max(risk_score, 0.6) # Medium-high risk
229
+
230
+ # Check for urgency indicators
231
+ urgency_patterns = [
232
+ r"\b(urgent|critical|emergency|asap|immediate)\b",
233
+ r"\b(hotfix|quickfix|patch)\b",
234
+ r"\b(breaking|major)\b",
235
+ ]
236
+
237
+ for pattern in urgency_patterns:
238
+ if re.search(pattern, message_lower):
239
+ factors.append(f"urgency_indicator:{pattern}")
240
+ risk_score = max(risk_score, 0.6)
241
+
242
+ return {"score": risk_score, "factors": factors}
243
+
244
+ def _analyze_file_risk(self, files: list[str]) -> dict[str, Any]:
245
+ """Analyze changed files for risk indicators.
246
+
247
+ Args:
248
+ files: List of file paths
249
+
250
+ Returns:
251
+ Dictionary with score and factors
252
+ """
253
+ if not files:
254
+ return {"score": 0.0, "factors": []}
255
+
256
+ factors = []
257
+ risk_score = 0.0
258
+
259
+ for file_path in files:
260
+ file_lower = file_path.lower()
261
+
262
+ # Check compiled file risk patterns
263
+ for pattern, risk_level in self.compiled_file_patterns.items():
264
+ if pattern.search(file_path):
265
+ factors.append(f"file_pattern:{risk_level}:{file_path}")
266
+ if risk_level == "critical":
267
+ risk_score = max(risk_score, 1.0)
268
+ elif risk_level == "high":
269
+ risk_score = max(risk_score, 0.8)
270
+ elif risk_level == "medium":
271
+ risk_score = max(risk_score, 0.5)
272
+
273
+ # Check file extensions
274
+ if "." in file_path:
275
+ ext = "." + file_path.split(".")[-1].lower()
276
+ if ext in self.extension_risk:
277
+ ext_risk = self.extension_risk[ext]
278
+ factors.append(f"file_extension:{ext_risk}:{ext}")
279
+ if ext_risk == "high":
280
+ risk_score = max(risk_score, 0.7)
281
+ elif ext_risk == "medium":
282
+ risk_score = max(risk_score, 0.4)
283
+
284
+ # Check for sensitive file names
285
+ sensitive_patterns = [
286
+ r".*password.*",
287
+ r".*secret.*",
288
+ r".*key.*",
289
+ r".*token.*",
290
+ r".*config.*",
291
+ r".*env.*",
292
+ r".*credential.*",
293
+ ]
294
+
295
+ for pattern in sensitive_patterns:
296
+ if re.search(pattern, file_lower):
297
+ factors.append(f"sensitive_filename:{file_path}")
298
+ risk_score = max(risk_score, 0.8)
299
+ break
300
+
301
+ return {"score": risk_score, "factors": factors}
302
+
303
+ def _analyze_size_risk(self, commit: dict[str, Any]) -> dict[str, Any]:
304
+ """Analyze commit size for risk indicators.
305
+
306
+ Args:
307
+ commit: Commit dictionary
308
+
309
+ Returns:
310
+ Dictionary with score and factors
311
+ """
312
+ factors = []
313
+ risk_score = 0.0
314
+
315
+ files_changed = len(commit.get("files_changed", []))
316
+ insertions = commit.get("insertions", 0)
317
+ deletions = commit.get("deletions", 0)
318
+ total_changes = insertions + deletions
319
+
320
+ # Check file count thresholds
321
+ if files_changed >= self.config.size_thresholds["large_commit_files"]:
322
+ factors.append(f"large_file_count:{files_changed}")
323
+ # Very large commits get higher risk score
324
+ risk_score = max(risk_score, 0.8) if files_changed >= 50 else max(risk_score, 0.6)
325
+
326
+ # Check line change thresholds
327
+ if total_changes >= self.config.size_thresholds["massive_commit_lines"]:
328
+ factors.append(f"massive_changes:{total_changes}")
329
+ risk_score = max(risk_score, 0.9)
330
+ elif total_changes >= self.config.size_thresholds["large_commit_lines"]:
331
+ factors.append(f"large_changes:{total_changes}")
332
+ risk_score = max(risk_score, 0.6)
333
+
334
+ # Check deletion ratio (high deletion ratio can be risky)
335
+ if total_changes > 0:
336
+ deletion_ratio = deletions / total_changes
337
+ if deletion_ratio > 0.7: # More than 70% deletions
338
+ factors.append(f"high_deletion_ratio:{deletion_ratio:.2f}")
339
+ risk_score = max(risk_score, 0.5)
340
+
341
+ return {"score": risk_score, "factors": factors}
342
+
343
+ def _analyze_context_risk(self, commit: dict[str, Any]) -> dict[str, Any]:
344
+ """Analyze commit context for risk indicators.
345
+
346
+ Args:
347
+ commit: Commit dictionary
348
+
349
+ Returns:
350
+ Dictionary with score and factors
351
+ """
352
+ factors = []
353
+ risk_score = 0.0
354
+
355
+ # Check branch context if available
356
+ branch = commit.get("branch", "").lower()
357
+ if branch:
358
+ if any(term in branch for term in ["main", "master", "prod", "production"]):
359
+ factors.append(f"main_branch:{branch}")
360
+ risk_score = max(risk_score, 0.6)
361
+ elif "hotfix" in branch:
362
+ factors.append(f"hotfix_branch:{branch}")
363
+ risk_score = max(risk_score, 0.8)
364
+
365
+ # Check commit timing (if timestamp available)
366
+ # Weekend/night commits might be higher risk
367
+ timestamp = commit.get("timestamp")
368
+ if timestamp:
369
+ # This would require datetime analysis
370
+ # For now, skip this check
371
+ pass
372
+
373
+ # Check for merge commits
374
+ if commit.get("is_merge", False):
375
+ factors.append("merge_commit")
376
+ # Merges can be risky depending on what's being merged
377
+ risk_score = max(risk_score, 0.3)
378
+
379
+ return {"score": risk_score, "factors": factors}
380
+
381
+ def _score_to_level(self, score: float) -> str:
382
+ """Convert risk score to risk level.
383
+
384
+ Args:
385
+ score: Risk score (0.0 to 1.0)
386
+
387
+ Returns:
388
+ Risk level string
389
+ """
390
+ if score >= 0.9:
391
+ return "critical"
392
+ elif score >= 0.7:
393
+ return "high"
394
+ elif score >= 0.4:
395
+ return "medium"
396
+ else:
397
+ return "low"
398
+
399
+ def get_risk_statistics(self, commits: list[dict[str, Any]]) -> dict[str, Any]:
400
+ """Get risk analysis statistics for a set of commits.
401
+
402
+ Args:
403
+ commits: List of commit dictionaries
404
+
405
+ Returns:
406
+ Dictionary with risk statistics
407
+ """
408
+ if not commits:
409
+ return {"total_commits": 0}
410
+
411
+ risk_levels = {"low": 0, "medium": 0, "high": 0, "critical": 0}
412
+ all_factors = []
413
+
414
+ for commit in commits:
415
+ # Quick risk assessment without full doc processing
416
+ risk_result = self.assess(commit, None)
417
+ risk_levels[risk_result["level"]] += 1
418
+ all_factors.extend(risk_result["factors"])
419
+
420
+ # Count factor frequencies
421
+ factor_counts = {}
422
+ for factor in all_factors:
423
+ factor_type = factor.split(":")[0] if ":" in factor else factor
424
+ factor_counts[factor_type] = factor_counts.get(factor_type, 0) + 1
425
+
426
+ return {
427
+ "total_commits": len(commits),
428
+ "risk_distribution": risk_levels,
429
+ "risk_percentages": {
430
+ level: (count / len(commits)) * 100 for level, count in risk_levels.items()
431
+ },
432
+ "common_risk_factors": sorted(factor_counts.items(), key=lambda x: x[1], reverse=True)[
433
+ :10
434
+ ],
435
+ "high_risk_commits": risk_levels["high"] + risk_levels["critical"],
436
+ "high_risk_percentage": ((risk_levels["high"] + risk_levels["critical"]) / len(commits))
437
+ * 100,
438
+ }
@@ -0,0 +1,13 @@
1
+ """Core processing components for qualitative analysis."""
2
+
3
+ from .llm_fallback import LLMFallback
4
+ from .nlp_engine import NLPEngine
5
+ from .pattern_cache import PatternCache
6
+ from .processor import QualitativeProcessor
7
+
8
+ __all__ = [
9
+ "QualitativeProcessor",
10
+ "NLPEngine",
11
+ "LLMFallback",
12
+ "PatternCache",
13
+ ]