gitflow-analytics 1.3.11__py3-none-any.whl → 3.3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (48) hide show
  1. gitflow_analytics/_version.py +1 -1
  2. gitflow_analytics/classification/batch_classifier.py +156 -4
  3. gitflow_analytics/cli.py +803 -135
  4. gitflow_analytics/config/loader.py +39 -1
  5. gitflow_analytics/config/schema.py +1 -0
  6. gitflow_analytics/core/cache.py +20 -0
  7. gitflow_analytics/core/data_fetcher.py +1051 -117
  8. gitflow_analytics/core/git_auth.py +169 -0
  9. gitflow_analytics/core/git_timeout_wrapper.py +347 -0
  10. gitflow_analytics/core/metrics_storage.py +12 -3
  11. gitflow_analytics/core/progress.py +219 -18
  12. gitflow_analytics/core/subprocess_git.py +145 -0
  13. gitflow_analytics/extractors/ml_tickets.py +3 -2
  14. gitflow_analytics/extractors/tickets.py +93 -8
  15. gitflow_analytics/integrations/jira_integration.py +1 -1
  16. gitflow_analytics/integrations/orchestrator.py +47 -29
  17. gitflow_analytics/metrics/branch_health.py +3 -2
  18. gitflow_analytics/models/database.py +72 -1
  19. gitflow_analytics/pm_framework/adapters/jira_adapter.py +12 -5
  20. gitflow_analytics/pm_framework/orchestrator.py +8 -3
  21. gitflow_analytics/qualitative/classifiers/llm/openai_client.py +24 -4
  22. gitflow_analytics/qualitative/classifiers/llm_commit_classifier.py +3 -1
  23. gitflow_analytics/qualitative/core/llm_fallback.py +34 -2
  24. gitflow_analytics/reports/narrative_writer.py +118 -74
  25. gitflow_analytics/security/__init__.py +11 -0
  26. gitflow_analytics/security/config.py +189 -0
  27. gitflow_analytics/security/extractors/__init__.py +7 -0
  28. gitflow_analytics/security/extractors/dependency_checker.py +379 -0
  29. gitflow_analytics/security/extractors/secret_detector.py +197 -0
  30. gitflow_analytics/security/extractors/vulnerability_scanner.py +333 -0
  31. gitflow_analytics/security/llm_analyzer.py +347 -0
  32. gitflow_analytics/security/reports/__init__.py +5 -0
  33. gitflow_analytics/security/reports/security_report.py +358 -0
  34. gitflow_analytics/security/security_analyzer.py +414 -0
  35. gitflow_analytics/tui/app.py +3 -1
  36. gitflow_analytics/tui/progress_adapter.py +313 -0
  37. gitflow_analytics/tui/screens/analysis_progress_screen.py +407 -46
  38. gitflow_analytics/tui/screens/results_screen.py +219 -206
  39. gitflow_analytics/ui/__init__.py +21 -0
  40. gitflow_analytics/ui/progress_display.py +1477 -0
  41. gitflow_analytics/verify_activity.py +697 -0
  42. {gitflow_analytics-1.3.11.dist-info → gitflow_analytics-3.3.0.dist-info}/METADATA +2 -1
  43. {gitflow_analytics-1.3.11.dist-info → gitflow_analytics-3.3.0.dist-info}/RECORD +47 -31
  44. gitflow_analytics/cli_rich.py +0 -503
  45. {gitflow_analytics-1.3.11.dist-info → gitflow_analytics-3.3.0.dist-info}/WHEEL +0 -0
  46. {gitflow_analytics-1.3.11.dist-info → gitflow_analytics-3.3.0.dist-info}/entry_points.txt +0 -0
  47. {gitflow_analytics-1.3.11.dist-info → gitflow_analytics-3.3.0.dist-info}/licenses/LICENSE +0 -0
  48. {gitflow_analytics-1.3.11.dist-info → gitflow_analytics-3.3.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,414 @@
1
+ """Main security analyzer that orchestrates all security checks."""
2
+
3
+ import logging
4
+ from concurrent.futures import ThreadPoolExecutor
5
+ from dataclasses import dataclass
6
+ from datetime import datetime
7
+ from pathlib import Path
8
+ from typing import Dict, List, Optional
9
+
10
+ from .config import SecurityConfig
11
+ from .extractors import DependencyChecker, SecretDetector, VulnerabilityScanner
12
+ from .llm_analyzer import LLMSecurityAnalyzer
13
+
14
+ logger = logging.getLogger(__name__)
15
+
16
+
17
+ @dataclass
18
+ class SecurityAnalysis:
19
+ """Results from security analysis of a commit."""
20
+
21
+ commit_hash: str
22
+ timestamp: datetime
23
+ files_changed: List[str]
24
+
25
+ # Findings by type
26
+ secrets: List[Dict]
27
+ vulnerabilities: List[Dict]
28
+ dependency_issues: List[Dict]
29
+ llm_findings: List[Dict]
30
+
31
+ # Summary metrics
32
+ total_findings: int
33
+ critical_count: int
34
+ high_count: int
35
+ medium_count: int
36
+ low_count: int
37
+
38
+ # Risk score (0-100)
39
+ risk_score: float
40
+
41
+
42
+ class SecurityAnalyzer:
43
+ """Orchestrates comprehensive security analysis of git commits."""
44
+
45
+ def __init__(self, config: Optional[SecurityConfig] = None, repo_path: Optional[Path] = None):
46
+ """Initialize security analyzer.
47
+
48
+ Args:
49
+ config: Security configuration
50
+ repo_path: Repository path for context
51
+ """
52
+ self.config = config or SecurityConfig()
53
+ self.repo_path = repo_path or Path.cwd()
54
+
55
+ # Initialize components based on configuration
56
+ self.components = []
57
+
58
+ if self.config.secret_scanning.enabled:
59
+ self.secret_detector = SecretDetector(
60
+ patterns=self.config.secret_scanning.patterns,
61
+ entropy_threshold=self.config.secret_scanning.entropy_threshold,
62
+ exclude_paths=self.config.secret_scanning.exclude_paths,
63
+ )
64
+ self.components.append(("secrets", self.secret_detector))
65
+ else:
66
+ self.secret_detector = None
67
+
68
+ if self.config.vulnerability_scanning.enabled:
69
+ self.vulnerability_scanner = VulnerabilityScanner(self.config.vulnerability_scanning)
70
+ self.components.append(("vulnerabilities", self.vulnerability_scanner))
71
+ else:
72
+ self.vulnerability_scanner = None
73
+
74
+ if self.config.dependency_scanning.enabled:
75
+ self.dependency_checker = DependencyChecker(self.config.dependency_scanning)
76
+ self.components.append(("dependencies", self.dependency_checker))
77
+ else:
78
+ self.dependency_checker = None
79
+
80
+ if self.config.llm_security.enabled:
81
+ self.llm_analyzer = LLMSecurityAnalyzer(self.config.llm_security)
82
+ self.components.append(("llm", self.llm_analyzer))
83
+ else:
84
+ self.llm_analyzer = None
85
+
86
+ def analyze_commit(self, commit_data: Dict) -> SecurityAnalysis:
87
+ """Analyze a single commit for security issues.
88
+
89
+ Args:
90
+ commit_data: Dictionary with commit information
91
+
92
+ Returns:
93
+ SecurityAnalysis object with findings
94
+ """
95
+ findings = {"secrets": [], "vulnerabilities": [], "dependencies": [], "llm": []}
96
+
97
+ # Run analyses in parallel for performance
98
+ with ThreadPoolExecutor(max_workers=self.config.max_concurrent_scans) as executor:
99
+ futures = []
100
+
101
+ # Schedule secret detection
102
+ if self.secret_detector:
103
+ future = executor.submit(self._run_secret_detection, commit_data)
104
+ futures.append(("secrets", future))
105
+
106
+ # Schedule vulnerability scanning
107
+ if self.vulnerability_scanner:
108
+ future = executor.submit(self._run_vulnerability_scan, commit_data)
109
+ futures.append(("vulnerabilities", future))
110
+
111
+ # Schedule dependency checking
112
+ if self.dependency_checker:
113
+ future = executor.submit(self._run_dependency_check, commit_data)
114
+ futures.append(("dependencies", future))
115
+
116
+ # Schedule LLM analysis
117
+ if self.llm_analyzer:
118
+ future = executor.submit(self._run_llm_analysis, commit_data)
119
+ futures.append(("llm", future))
120
+
121
+ # Collect results
122
+ for finding_type, future in futures:
123
+ try:
124
+ result = future.result(timeout=self.config.scan_timeout_seconds)
125
+ findings[finding_type] = result
126
+ except Exception as e:
127
+ logger.warning(f"Error in {finding_type} analysis: {e}")
128
+
129
+ # Calculate summary metrics
130
+ all_findings = (
131
+ findings["secrets"]
132
+ + findings["vulnerabilities"]
133
+ + findings["dependencies"]
134
+ + findings["llm"]
135
+ )
136
+
137
+ severity_counts = self._count_severities(all_findings)
138
+ risk_score = self._calculate_risk_score(severity_counts)
139
+
140
+ return SecurityAnalysis(
141
+ commit_hash=commit_data.get("commit_hash", ""),
142
+ timestamp=commit_data.get("timestamp", datetime.now()),
143
+ files_changed=commit_data.get("files_changed", []),
144
+ secrets=findings["secrets"],
145
+ vulnerabilities=findings["vulnerabilities"],
146
+ dependency_issues=findings["dependencies"],
147
+ llm_findings=findings["llm"],
148
+ total_findings=len(all_findings),
149
+ critical_count=severity_counts.get("critical", 0),
150
+ high_count=severity_counts.get("high", 0),
151
+ medium_count=severity_counts.get("medium", 0),
152
+ low_count=severity_counts.get("low", 0),
153
+ risk_score=risk_score,
154
+ )
155
+
156
+ def analyze_batch(self, commits: List[Dict]) -> List[SecurityAnalysis]:
157
+ """Analyze multiple commits for security issues.
158
+
159
+ Args:
160
+ commits: List of commit data dictionaries
161
+
162
+ Returns:
163
+ List of SecurityAnalysis objects
164
+ """
165
+ results = []
166
+
167
+ for commit in commits:
168
+ try:
169
+ analysis = self.analyze_commit(commit)
170
+ results.append(analysis)
171
+
172
+ # Check for critical issues
173
+ if self.config.fail_on_critical and analysis.critical_count > 0:
174
+ logger.error(
175
+ f"Critical security issues found in commit {commit.get('commit_hash', '')}"
176
+ )
177
+ if self.config.fail_on_critical:
178
+ raise SecurityException(
179
+ f"Critical security issues detected: {analysis.critical_count}"
180
+ )
181
+
182
+ except Exception as e:
183
+ logger.error(f"Error analyzing commit {commit.get('commit_hash', '')}: {e}")
184
+
185
+ return results
186
+
187
+ def _run_secret_detection(self, commit_data: Dict) -> List[Dict]:
188
+ """Run secret detection on commit."""
189
+ try:
190
+ return self.secret_detector.scan_commit(commit_data)
191
+ except Exception as e:
192
+ logger.warning(f"Secret detection error: {e}")
193
+ return []
194
+
195
+ def _run_vulnerability_scan(self, commit_data: Dict) -> List[Dict]:
196
+ """Run vulnerability scanning on changed files."""
197
+ try:
198
+ files_changed = commit_data.get("files_changed", [])
199
+ return self.vulnerability_scanner.scan_files(files_changed, self.repo_path)
200
+ except Exception as e:
201
+ logger.warning(f"Vulnerability scanning error: {e}")
202
+ return []
203
+
204
+ def _run_dependency_check(self, commit_data: Dict) -> List[Dict]:
205
+ """Check for vulnerable dependencies."""
206
+ try:
207
+ files_changed = commit_data.get("files_changed", [])
208
+ return self.dependency_checker.check_files(files_changed, self.repo_path)
209
+ except Exception as e:
210
+ logger.warning(f"Dependency checking error: {e}")
211
+ return []
212
+
213
+ def _run_llm_analysis(self, commit_data: Dict) -> List[Dict]:
214
+ """Run LLM-based security analysis."""
215
+ try:
216
+ return self.llm_analyzer.analyze_commit(commit_data)
217
+ except Exception as e:
218
+ logger.warning(f"LLM analysis error: {e}")
219
+ return []
220
+
221
+ def _count_severities(self, findings: List[Dict]) -> Dict[str, int]:
222
+ """Count findings by severity level."""
223
+ counts = {"critical": 0, "high": 0, "medium": 0, "low": 0}
224
+
225
+ for finding in findings:
226
+ severity = finding.get("severity", "medium").lower()
227
+ if severity in counts:
228
+ counts[severity] += 1
229
+
230
+ return counts
231
+
232
+ def _calculate_risk_score(self, severity_counts: Dict[str, int]) -> float:
233
+ """Calculate overall risk score (0-100).
234
+
235
+ Weighted formula:
236
+ - Critical: 25 points each
237
+ - High: 10 points each
238
+ - Medium: 3 points each
239
+ - Low: 1 point each
240
+
241
+ Capped at 100.
242
+ """
243
+ score = (
244
+ severity_counts.get("critical", 0) * 25
245
+ + severity_counts.get("high", 0) * 10
246
+ + severity_counts.get("medium", 0) * 3
247
+ + severity_counts.get("low", 0) * 1
248
+ )
249
+
250
+ return min(100.0, float(score))
251
+
252
+ def generate_summary_report(self, analyses: List[SecurityAnalysis]) -> Dict:
253
+ """Generate summary report from multiple analyses.
254
+
255
+ Args:
256
+ analyses: List of SecurityAnalysis objects
257
+
258
+ Returns:
259
+ Summary dictionary with statistics and insights
260
+ """
261
+ if not analyses:
262
+ return {
263
+ "total_commits": 0,
264
+ "commits_with_issues": 0,
265
+ "total_findings": 0,
266
+ "risk_assessment": "No data available",
267
+ }
268
+
269
+ total_findings = sum(a.total_findings for a in analyses)
270
+ commits_with_issues = sum(1 for a in analyses if a.total_findings > 0)
271
+
272
+ # Aggregate findings by type
273
+ all_secrets = []
274
+ all_vulnerabilities = []
275
+ all_dependencies = []
276
+ all_llm = []
277
+
278
+ for analysis in analyses:
279
+ all_secrets.extend(analysis.secrets)
280
+ all_vulnerabilities.extend(analysis.vulnerabilities)
281
+ all_dependencies.extend(analysis.dependency_issues)
282
+ all_llm.extend(analysis.llm_findings)
283
+
284
+ # Calculate average risk score
285
+ avg_risk_score = sum(a.risk_score for a in analyses) / len(analyses) if analyses else 0
286
+
287
+ # Determine risk level
288
+ if avg_risk_score >= 75:
289
+ risk_level = "CRITICAL"
290
+ elif avg_risk_score >= 50:
291
+ risk_level = "HIGH"
292
+ elif avg_risk_score >= 25:
293
+ risk_level = "MEDIUM"
294
+ else:
295
+ risk_level = "LOW"
296
+
297
+ summary = {
298
+ "total_commits": len(analyses),
299
+ "commits_with_issues": commits_with_issues,
300
+ "total_findings": total_findings,
301
+ "average_risk_score": round(avg_risk_score, 2),
302
+ "risk_level": risk_level,
303
+ "findings_by_type": {
304
+ "secrets": len(all_secrets),
305
+ "vulnerabilities": len(all_vulnerabilities),
306
+ "dependency_issues": len(all_dependencies),
307
+ "llm_findings": len(all_llm),
308
+ },
309
+ "severity_distribution": {
310
+ "critical": sum(a.critical_count for a in analyses),
311
+ "high": sum(a.high_count for a in analyses),
312
+ "medium": sum(a.medium_count for a in analyses),
313
+ "low": sum(a.low_count for a in analyses),
314
+ },
315
+ "top_issues": self._identify_top_issues(analyses),
316
+ "recommendations": self._generate_recommendations(analyses),
317
+ }
318
+
319
+ # Add LLM insights if available
320
+ if self.llm_analyzer and all_llm:
321
+ all_findings = all_secrets + all_vulnerabilities + all_dependencies + all_llm
322
+ insights = self.llm_analyzer.generate_security_insights(all_findings)
323
+ summary["llm_insights"] = insights
324
+
325
+ return summary
326
+
327
+ def _identify_top_issues(self, analyses: List[SecurityAnalysis]) -> List[Dict]:
328
+ """Identify the most common/critical issues."""
329
+ issue_counts = {}
330
+
331
+ for analysis in analyses:
332
+ for finding in (
333
+ analysis.secrets
334
+ + analysis.vulnerabilities
335
+ + analysis.dependency_issues
336
+ + analysis.llm_findings
337
+ ):
338
+ issue_type = finding.get(
339
+ "vulnerability_type", finding.get("secret_type", "unknown")
340
+ )
341
+ severity = finding.get("severity", "medium")
342
+
343
+ key = f"{issue_type}:{severity}"
344
+ if key not in issue_counts:
345
+ issue_counts[key] = {
346
+ "type": issue_type,
347
+ "severity": severity,
348
+ "count": 0,
349
+ "files": set(),
350
+ }
351
+
352
+ issue_counts[key]["count"] += 1
353
+ if "file" in finding:
354
+ issue_counts[key]["files"].add(finding["file"])
355
+
356
+ # Sort by severity and count
357
+ severity_order = {"critical": 0, "high": 1, "medium": 2, "low": 3}
358
+ sorted_issues = sorted(
359
+ issue_counts.values(),
360
+ key=lambda x: (severity_order.get(x["severity"], 999), -x["count"]),
361
+ )
362
+
363
+ # Return top 10 issues
364
+ top_issues = []
365
+ for issue in sorted_issues[:10]:
366
+ top_issues.append(
367
+ {
368
+ "type": issue["type"],
369
+ "severity": issue["severity"],
370
+ "occurrences": issue["count"],
371
+ "affected_files": len(issue["files"]),
372
+ }
373
+ )
374
+
375
+ return top_issues
376
+
377
+ def _generate_recommendations(self, analyses: List[SecurityAnalysis]) -> List[str]:
378
+ """Generate actionable security recommendations."""
379
+ recommendations = []
380
+
381
+ # Count issue types
382
+ has_secrets = any(a.secrets for a in analyses)
383
+ has_vulnerabilities = any(a.vulnerabilities for a in analyses)
384
+ has_dependencies = any(a.dependency_issues for a in analyses)
385
+ critical_count = sum(a.critical_count for a in analyses)
386
+
387
+ if critical_count > 0:
388
+ recommendations.append(
389
+ f"🚨 Address {critical_count} critical security issues immediately"
390
+ )
391
+
392
+ if has_secrets:
393
+ recommendations.append("🔑 Implement pre-commit hooks to prevent secret commits")
394
+ recommendations.append("📝 Rotate all exposed credentials and API keys")
395
+
396
+ if has_vulnerabilities:
397
+ recommendations.append("🛡️ Enable static analysis tools in CI/CD pipeline")
398
+ recommendations.append("📚 Provide secure coding training for developers")
399
+
400
+ if has_dependencies:
401
+ recommendations.append("📦 Update vulnerable dependencies to patched versions")
402
+ recommendations.append("🔄 Implement automated dependency scanning")
403
+
404
+ if not recommendations:
405
+ recommendations.append("✅ No significant security issues detected")
406
+ recommendations.append("🔍 Continue regular security reviews")
407
+
408
+ return recommendations
409
+
410
+
411
+ class SecurityException(Exception):
412
+ """Exception raised for critical security issues."""
413
+
414
+ pass
@@ -279,6 +279,8 @@ class GitFlowAnalyticsApp(App):
279
279
  self.config_path: Optional[Path] = None
280
280
  self.initialization_complete = False
281
281
  self._nlp_engine = None
282
+ self.dark = True # Initialize dark mode state
283
+ self.default_weeks = 12 # Default weeks for analysis, can be overridden from CLI
282
284
 
283
285
  def compose(self) -> ComposeResult:
284
286
  """
@@ -456,7 +458,7 @@ class GitFlowAnalyticsApp(App):
456
458
  # Launch analysis progress screen
457
459
  analysis_screen = AnalysisProgressScreen(
458
460
  config=self.config,
459
- weeks=12, # TODO: Get from config or user preference
461
+ weeks=getattr(self, "default_weeks", 12), # Use CLI parameter or default to 12
460
462
  enable_qualitative=getattr(self.config, "qualitative", None)
461
463
  and self.config.qualitative.enabled,
462
464
  )