gitflow-analytics 3.3.0__py3-none-any.whl → 3.4.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (33) hide show
  1. gitflow_analytics/_version.py +1 -1
  2. gitflow_analytics/cli.py +164 -15
  3. gitflow_analytics/cli_wizards/__init__.py +10 -0
  4. gitflow_analytics/cli_wizards/install_wizard.py +936 -0
  5. gitflow_analytics/cli_wizards/run_launcher.py +343 -0
  6. gitflow_analytics/config/schema.py +12 -0
  7. gitflow_analytics/constants.py +75 -0
  8. gitflow_analytics/core/cache.py +7 -3
  9. gitflow_analytics/core/data_fetcher.py +66 -30
  10. gitflow_analytics/core/git_timeout_wrapper.py +6 -4
  11. gitflow_analytics/core/progress.py +2 -4
  12. gitflow_analytics/core/subprocess_git.py +31 -5
  13. gitflow_analytics/identity_llm/analysis_pass.py +13 -3
  14. gitflow_analytics/identity_llm/analyzer.py +14 -2
  15. gitflow_analytics/identity_llm/models.py +7 -1
  16. gitflow_analytics/qualitative/classifiers/llm/openai_client.py +5 -3
  17. gitflow_analytics/security/config.py +6 -6
  18. gitflow_analytics/security/extractors/dependency_checker.py +14 -14
  19. gitflow_analytics/security/extractors/secret_detector.py +8 -14
  20. gitflow_analytics/security/extractors/vulnerability_scanner.py +9 -9
  21. gitflow_analytics/security/llm_analyzer.py +10 -10
  22. gitflow_analytics/security/security_analyzer.py +17 -17
  23. gitflow_analytics/tui/screens/analysis_progress_screen.py +1 -1
  24. gitflow_analytics/ui/progress_display.py +36 -29
  25. gitflow_analytics/verify_activity.py +23 -26
  26. {gitflow_analytics-3.3.0.dist-info → gitflow_analytics-3.4.7.dist-info}/METADATA +1 -1
  27. {gitflow_analytics-3.3.0.dist-info → gitflow_analytics-3.4.7.dist-info}/RECORD +31 -29
  28. gitflow_analytics/security/reports/__init__.py +0 -5
  29. gitflow_analytics/security/reports/security_report.py +0 -358
  30. {gitflow_analytics-3.3.0.dist-info → gitflow_analytics-3.4.7.dist-info}/WHEEL +0 -0
  31. {gitflow_analytics-3.3.0.dist-info → gitflow_analytics-3.4.7.dist-info}/entry_points.txt +0 -0
  32. {gitflow_analytics-3.3.0.dist-info → gitflow_analytics-3.4.7.dist-info}/licenses/LICENSE +0 -0
  33. {gitflow_analytics-3.3.0.dist-info → gitflow_analytics-3.4.7.dist-info}/top_level.txt +0 -0
@@ -3,7 +3,7 @@
3
3
  import logging
4
4
  import math
5
5
  import re
6
- from typing import Dict, List, Optional, Tuple
6
+ from typing import Optional
7
7
 
8
8
  logger = logging.getLogger(__name__)
9
9
 
@@ -13,9 +13,9 @@ class SecretDetector:
13
13
 
14
14
  def __init__(
15
15
  self,
16
- patterns: Dict[str, str],
16
+ patterns: dict[str, str],
17
17
  entropy_threshold: float = 4.5,
18
- exclude_paths: List[str] = None,
18
+ exclude_paths: list[str] = None,
19
19
  ):
20
20
  """Initialize secret detector.
21
21
 
@@ -37,7 +37,7 @@ class SecretDetector:
37
37
  re.compile(r"xxx+|placeholder|your[_-]?api[_-]?key", re.IGNORECASE),
38
38
  ]
39
39
 
40
- def scan_text(self, text: str, file_path: Optional[str] = None) -> List[Dict]:
40
+ def scan_text(self, text: str, file_path: Optional[str] = None) -> list[dict]:
41
41
  """Scan text for potential secrets.
42
42
 
43
43
  Args:
@@ -93,7 +93,7 @@ class SecretDetector:
93
93
 
94
94
  return findings
95
95
 
96
- def scan_commit(self, commit_data: Dict) -> List[Dict]:
96
+ def scan_commit(self, commit_data: dict) -> list[dict]:
97
97
  """Scan a commit for secrets.
98
98
 
99
99
  Args:
@@ -118,17 +118,11 @@ class SecretDetector:
118
118
  """Check if file should be excluded from scanning."""
119
119
  from fnmatch import fnmatch
120
120
 
121
- for pattern in self.exclude_paths:
122
- if fnmatch(file_path, pattern):
123
- return True
124
- return False
121
+ return any(fnmatch(file_path, pattern) for pattern in self.exclude_paths)
125
122
 
126
123
  def _is_false_positive(self, value: str) -> bool:
127
124
  """Check if a detected secret is likely a false positive."""
128
- for pattern in self.false_positive_patterns:
129
- if pattern.search(value):
130
- return True
131
- return False
125
+ return any(pattern.search(value) for pattern in self.false_positive_patterns)
132
126
 
133
127
  def _get_severity(self, secret_type: str) -> str:
134
128
  """Determine severity based on secret type."""
@@ -164,7 +158,7 @@ class SecretDetector:
164
158
 
165
159
  def _find_high_entropy_strings(
166
160
  self, text: str, min_length: int = 20
167
- ) -> List[Tuple[str, float]]:
161
+ ) -> list[tuple[str, float]]:
168
162
  """Find strings with high entropy (potential secrets).
169
163
 
170
164
  Args:
@@ -8,7 +8,7 @@ import subprocess
8
8
  import tempfile
9
9
  from concurrent.futures import ThreadPoolExecutor
10
10
  from pathlib import Path
11
- from typing import Any, Dict, List, Optional
11
+ from typing import Any, Optional
12
12
 
13
13
  logger = logging.getLogger(__name__)
14
14
 
@@ -26,7 +26,7 @@ class VulnerabilityScanner:
26
26
  # Check which tools are available
27
27
  self.available_tools = self._detect_available_tools()
28
28
 
29
- def scan_files(self, files_changed: List[str], repo_path: Path) -> List[Dict]:
29
+ def scan_files(self, files_changed: list[str], repo_path: Path) -> list[dict]:
30
30
  """Scan changed files for vulnerabilities.
31
31
 
32
32
  Args:
@@ -49,7 +49,7 @@ class VulnerabilityScanner:
49
49
 
50
50
  return findings
51
51
 
52
- def _detect_available_tools(self) -> Dict[str, bool]:
52
+ def _detect_available_tools(self) -> dict[str, bool]:
53
53
  """Detect which security tools are installed."""
54
54
  tools = {}
55
55
 
@@ -77,7 +77,7 @@ class VulnerabilityScanner:
77
77
  """Check if a tool is available in PATH."""
78
78
  return shutil.which(tool_name) is not None
79
79
 
80
- def _scan_with_patterns(self, files_changed: List[str], repo_path: Path) -> List[Dict]:
80
+ def _scan_with_patterns(self, files_changed: list[str], repo_path: Path) -> list[dict]:
81
81
  """Quick pattern-based vulnerability detection."""
82
82
  findings = []
83
83
 
@@ -108,7 +108,7 @@ class VulnerabilityScanner:
108
108
 
109
109
  return findings
110
110
 
111
- def _scan_with_tools(self, files_changed: List[str], repo_path: Path) -> List[Dict]:
111
+ def _scan_with_tools(self, files_changed: list[str], repo_path: Path) -> list[dict]:
112
112
  """Run security tools on changed files."""
113
113
  all_findings = []
114
114
 
@@ -143,7 +143,7 @@ class VulnerabilityScanner:
143
143
 
144
144
  return all_findings
145
145
 
146
- def _group_files_by_language(self, files: List[str]) -> Dict[str, List[str]]:
146
+ def _group_files_by_language(self, files: list[str]) -> dict[str, list[str]]:
147
147
  """Group files by programming language."""
148
148
  groups = {}
149
149
 
@@ -166,7 +166,7 @@ class VulnerabilityScanner:
166
166
 
167
167
  return groups
168
168
 
169
- def _run_semgrep(self, files: List[str], repo_path: Path) -> List[Dict]:
169
+ def _run_semgrep(self, files: list[str], repo_path: Path) -> list[dict]:
170
170
  """Run Semgrep security scanning."""
171
171
  findings = []
172
172
 
@@ -219,7 +219,7 @@ class VulnerabilityScanner:
219
219
 
220
220
  return findings
221
221
 
222
- def _run_bandit(self, files: List[str], repo_path: Path) -> List[Dict]:
222
+ def _run_bandit(self, files: list[str], repo_path: Path) -> list[dict]:
223
223
  """Run Bandit for Python security scanning."""
224
224
  findings = []
225
225
 
@@ -258,7 +258,7 @@ class VulnerabilityScanner:
258
258
 
259
259
  return findings
260
260
 
261
- def _run_gosec(self, files: List[str], repo_path: Path) -> List[Dict]:
261
+ def _run_gosec(self, files: list[str], repo_path: Path) -> list[dict]:
262
262
  """Run gosec for Go security scanning."""
263
263
  findings = []
264
264
 
@@ -5,7 +5,7 @@ import logging
5
5
  import os
6
6
  from datetime import datetime, timedelta
7
7
  from pathlib import Path
8
- from typing import Any, Dict, List, Optional
8
+ from typing import Any, Optional
9
9
 
10
10
  import httpx
11
11
 
@@ -33,7 +33,7 @@ class LLMSecurityAnalyzer:
33
33
  # Cache LLM responses for 7 days to save costs
34
34
  self.cache_ttl = timedelta(days=7)
35
35
 
36
- def analyze_commit(self, commit_data: Dict) -> List[Dict]:
36
+ def analyze_commit(self, commit_data: dict) -> list[dict]:
37
37
  """Analyze a commit for security issues using LLM.
38
38
 
39
39
  Args:
@@ -72,7 +72,7 @@ class LLMSecurityAnalyzer:
72
72
 
73
73
  return findings
74
74
 
75
- def _analyze_commit_message(self, commit_data: Dict) -> List[Dict]:
75
+ def _analyze_commit_message(self, commit_data: dict) -> list[dict]:
76
76
  """Analyze commit message for security implications."""
77
77
  prompt = self.config.commit_review_prompt.format(
78
78
  message=commit_data.get("message", ""),
@@ -83,7 +83,7 @@ class LLMSecurityAnalyzer:
83
83
  response = self._call_llm(prompt)
84
84
  return self._parse_llm_response(response, commit_data)
85
85
 
86
- def _analyze_code_changes(self, commit_data: Dict) -> List[Dict]:
86
+ def _analyze_code_changes(self, commit_data: dict) -> list[dict]:
87
87
  """Analyze actual code changes for security issues."""
88
88
  # Limit the amount of code sent to LLM for cost control
89
89
  lines_added = commit_data.get("diff_content", "")
@@ -176,8 +176,8 @@ class LLMSecurityAnalyzer:
176
176
  return ""
177
177
 
178
178
  def _parse_llm_response(
179
- self, response: str, commit_data: Dict, is_code_analysis: bool = False
180
- ) -> List[Dict]:
179
+ self, response: str, commit_data: dict, is_code_analysis: bool = False
180
+ ) -> list[dict]:
181
181
  """Parse LLM response and extract security findings."""
182
182
  findings = []
183
183
 
@@ -273,7 +273,7 @@ class LLMSecurityAnalyzer:
273
273
 
274
274
  return "high" if len(response) > 100 else "medium"
275
275
 
276
- def _get_cache_key(self, commit_data: Dict) -> str:
276
+ def _get_cache_key(self, commit_data: dict) -> str:
277
277
  """Generate cache key for commit data."""
278
278
  key_parts = [
279
279
  commit_data.get("commit_hash", ""),
@@ -286,7 +286,7 @@ class LLMSecurityAnalyzer:
286
286
 
287
287
  return hashlib.sha256(key_str.encode()).hexdigest()[:16]
288
288
 
289
- def _get_cached_result(self, cache_key: str) -> Optional[List[Dict]]:
289
+ def _get_cached_result(self, cache_key: str) -> Optional[list[dict]]:
290
290
  """Get cached result if it exists and is not expired."""
291
291
  cache_file = self.cache_dir / f"{cache_key}.json"
292
292
  if not cache_file.exists():
@@ -305,7 +305,7 @@ class LLMSecurityAnalyzer:
305
305
  logger.debug(f"Error reading cache: {e}")
306
306
  return None
307
307
 
308
- def _cache_result(self, cache_key: str, result: List[Dict]) -> None:
308
+ def _cache_result(self, cache_key: str, result: list[dict]) -> None:
309
309
  """Cache the analysis result."""
310
310
  cache_file = self.cache_dir / f"{cache_key}.json"
311
311
  try:
@@ -314,7 +314,7 @@ class LLMSecurityAnalyzer:
314
314
  except Exception as e:
315
315
  logger.debug(f"Error writing cache: {e}")
316
316
 
317
- def generate_security_insights(self, all_findings: List[Dict]) -> str:
317
+ def generate_security_insights(self, all_findings: list[dict]) -> str:
318
318
  """Generate high-level security insights from all findings."""
319
319
  if not all_findings:
320
320
  return "No security issues detected in the analyzed period."
@@ -5,7 +5,7 @@ from concurrent.futures import ThreadPoolExecutor
5
5
  from dataclasses import dataclass
6
6
  from datetime import datetime
7
7
  from pathlib import Path
8
- from typing import Dict, List, Optional
8
+ from typing import Optional
9
9
 
10
10
  from .config import SecurityConfig
11
11
  from .extractors import DependencyChecker, SecretDetector, VulnerabilityScanner
@@ -20,13 +20,13 @@ class SecurityAnalysis:
20
20
 
21
21
  commit_hash: str
22
22
  timestamp: datetime
23
- files_changed: List[str]
23
+ files_changed: list[str]
24
24
 
25
25
  # Findings by type
26
- secrets: List[Dict]
27
- vulnerabilities: List[Dict]
28
- dependency_issues: List[Dict]
29
- llm_findings: List[Dict]
26
+ secrets: list[dict]
27
+ vulnerabilities: list[dict]
28
+ dependency_issues: list[dict]
29
+ llm_findings: list[dict]
30
30
 
31
31
  # Summary metrics
32
32
  total_findings: int
@@ -83,7 +83,7 @@ class SecurityAnalyzer:
83
83
  else:
84
84
  self.llm_analyzer = None
85
85
 
86
- def analyze_commit(self, commit_data: Dict) -> SecurityAnalysis:
86
+ def analyze_commit(self, commit_data: dict) -> SecurityAnalysis:
87
87
  """Analyze a single commit for security issues.
88
88
 
89
89
  Args:
@@ -153,7 +153,7 @@ class SecurityAnalyzer:
153
153
  risk_score=risk_score,
154
154
  )
155
155
 
156
- def analyze_batch(self, commits: List[Dict]) -> List[SecurityAnalysis]:
156
+ def analyze_batch(self, commits: list[dict]) -> list[SecurityAnalysis]:
157
157
  """Analyze multiple commits for security issues.
158
158
 
159
159
  Args:
@@ -184,7 +184,7 @@ class SecurityAnalyzer:
184
184
 
185
185
  return results
186
186
 
187
- def _run_secret_detection(self, commit_data: Dict) -> List[Dict]:
187
+ def _run_secret_detection(self, commit_data: dict) -> list[dict]:
188
188
  """Run secret detection on commit."""
189
189
  try:
190
190
  return self.secret_detector.scan_commit(commit_data)
@@ -192,7 +192,7 @@ class SecurityAnalyzer:
192
192
  logger.warning(f"Secret detection error: {e}")
193
193
  return []
194
194
 
195
- def _run_vulnerability_scan(self, commit_data: Dict) -> List[Dict]:
195
+ def _run_vulnerability_scan(self, commit_data: dict) -> list[dict]:
196
196
  """Run vulnerability scanning on changed files."""
197
197
  try:
198
198
  files_changed = commit_data.get("files_changed", [])
@@ -201,7 +201,7 @@ class SecurityAnalyzer:
201
201
  logger.warning(f"Vulnerability scanning error: {e}")
202
202
  return []
203
203
 
204
- def _run_dependency_check(self, commit_data: Dict) -> List[Dict]:
204
+ def _run_dependency_check(self, commit_data: dict) -> list[dict]:
205
205
  """Check for vulnerable dependencies."""
206
206
  try:
207
207
  files_changed = commit_data.get("files_changed", [])
@@ -210,7 +210,7 @@ class SecurityAnalyzer:
210
210
  logger.warning(f"Dependency checking error: {e}")
211
211
  return []
212
212
 
213
- def _run_llm_analysis(self, commit_data: Dict) -> List[Dict]:
213
+ def _run_llm_analysis(self, commit_data: dict) -> list[dict]:
214
214
  """Run LLM-based security analysis."""
215
215
  try:
216
216
  return self.llm_analyzer.analyze_commit(commit_data)
@@ -218,7 +218,7 @@ class SecurityAnalyzer:
218
218
  logger.warning(f"LLM analysis error: {e}")
219
219
  return []
220
220
 
221
- def _count_severities(self, findings: List[Dict]) -> Dict[str, int]:
221
+ def _count_severities(self, findings: list[dict]) -> dict[str, int]:
222
222
  """Count findings by severity level."""
223
223
  counts = {"critical": 0, "high": 0, "medium": 0, "low": 0}
224
224
 
@@ -229,7 +229,7 @@ class SecurityAnalyzer:
229
229
 
230
230
  return counts
231
231
 
232
- def _calculate_risk_score(self, severity_counts: Dict[str, int]) -> float:
232
+ def _calculate_risk_score(self, severity_counts: dict[str, int]) -> float:
233
233
  """Calculate overall risk score (0-100).
234
234
 
235
235
  Weighted formula:
@@ -249,7 +249,7 @@ class SecurityAnalyzer:
249
249
 
250
250
  return min(100.0, float(score))
251
251
 
252
- def generate_summary_report(self, analyses: List[SecurityAnalysis]) -> Dict:
252
+ def generate_summary_report(self, analyses: list[SecurityAnalysis]) -> dict:
253
253
  """Generate summary report from multiple analyses.
254
254
 
255
255
  Args:
@@ -324,7 +324,7 @@ class SecurityAnalyzer:
324
324
 
325
325
  return summary
326
326
 
327
- def _identify_top_issues(self, analyses: List[SecurityAnalysis]) -> List[Dict]:
327
+ def _identify_top_issues(self, analyses: list[SecurityAnalysis]) -> list[dict]:
328
328
  """Identify the most common/critical issues."""
329
329
  issue_counts = {}
330
330
 
@@ -374,7 +374,7 @@ class SecurityAnalyzer:
374
374
 
375
375
  return top_issues
376
376
 
377
- def _generate_recommendations(self, analyses: List[SecurityAnalysis]) -> List[str]:
377
+ def _generate_recommendations(self, analyses: list[SecurityAnalysis]) -> list[str]:
378
378
  """Generate actionable security recommendations."""
379
379
  recommendations = []
380
380
 
@@ -697,7 +697,7 @@ class AnalysisProgressScreen(Screen):
697
697
 
698
698
  # Convert daily_commits to flat commits list
699
699
  commits = []
700
- for date_str, day_commits in daily_commits.items():
700
+ for _date_str, day_commits in daily_commits.items():
701
701
  commits.extend(day_commits)
702
702
 
703
703
  # Add flattened commits to result for compatibility
@@ -11,7 +11,7 @@ from contextlib import contextmanager
11
11
  from dataclasses import dataclass
12
12
  from datetime import datetime, timedelta
13
13
  from enum import Enum
14
- from typing import Any, Dict, Optional
14
+ from typing import Any, Optional
15
15
 
16
16
  # Try to import psutil, but make it optional
17
17
  try:
@@ -23,10 +23,7 @@ except ImportError:
23
23
 
24
24
  try:
25
25
  from rich import box
26
- from rich.align import Align
27
- from rich.columns import Columns
28
26
  from rich.console import Console, Group
29
- from rich.layout import Layout
30
27
  from rich.live import Live
31
28
  from rich.panel import Panel
32
29
  from rich.progress import (
@@ -35,7 +32,6 @@ try:
35
32
  Progress,
36
33
  SpinnerColumn,
37
34
  TextColumn,
38
- TimeElapsedColumn,
39
35
  TimeRemainingColumn,
40
36
  )
41
37
  from rich.table import Table
@@ -160,7 +156,7 @@ class RichProgressDisplay:
160
156
  )
161
157
 
162
158
  # Data tracking
163
- self.repositories: Dict[str, RepositoryInfo] = {}
159
+ self.repositories: dict[str, RepositoryInfo] = {}
164
160
  self.statistics = ProgressStatistics()
165
161
  self.current_repo: Optional[str] = None
166
162
 
@@ -414,8 +410,20 @@ class RichProgressDisplay:
414
410
  try:
415
411
  self.statistics.memory_usage = self._process.memory_info().rss / 1024 / 1024
416
412
  self.statistics.cpu_percent = self._process.cpu_percent()
417
- except:
413
+ except (AttributeError, OSError):
414
+ # Process might have terminated or psutil unavailable
415
+ # This is non-critical for analysis, so just skip the update
418
416
  pass
417
+ except Exception as e:
418
+ # Log unexpected errors but don't fail progress display
419
+ # Only log once to avoid spam
420
+ if not hasattr(self, "_stats_error_logged"):
421
+ import logging
422
+
423
+ logging.getLogger(__name__).debug(
424
+ f"Could not update process statistics: {e}"
425
+ )
426
+ self._stats_error_logged = True
419
427
 
420
428
  stats_items = []
421
429
 
@@ -483,26 +491,26 @@ class RichProgressDisplay:
483
491
 
484
492
  # Estimate total time if possible
485
493
  eta_text = ""
486
- if self.statistics.processed_repositories > 0 and self.statistics.total_repositories > 0:
487
- if self.statistics.processed_repositories < self.statistics.total_repositories:
488
- elapsed_seconds = (
489
- (datetime.now() - self.statistics.start_time).total_seconds()
490
- if self.statistics.start_time
494
+ if (
495
+ self.statistics.processed_repositories > 0
496
+ and self.statistics.total_repositories > 0
497
+ and self.statistics.processed_repositories < self.statistics.total_repositories
498
+ ):
499
+ elapsed_seconds = (
500
+ (datetime.now() - self.statistics.start_time).total_seconds()
501
+ if self.statistics.start_time
502
+ else 0
503
+ )
504
+ if elapsed_seconds > 0:
505
+ rate = self.statistics.processed_repositories / elapsed_seconds
506
+ remaining = (
507
+ (self.statistics.total_repositories - self.statistics.processed_repositories)
508
+ / rate
509
+ if rate > 0
491
510
  else 0
492
511
  )
493
- if elapsed_seconds > 0:
494
- rate = self.statistics.processed_repositories / elapsed_seconds
495
- remaining = (
496
- (
497
- self.statistics.total_repositories
498
- - self.statistics.processed_repositories
499
- )
500
- / rate
501
- if rate > 0
502
- else 0
503
- )
504
- if remaining > 0:
505
- eta_text = f" • ETA: {timedelta(seconds=int(remaining))}"
512
+ if remaining > 0:
513
+ eta_text = f" ETA: {timedelta(seconds=int(remaining))}"
506
514
 
507
515
  stats_items.append(f"{phase_text} • {elapsed_text}{eta_text}")
508
516
 
@@ -766,7 +774,7 @@ class RichProgressDisplay:
766
774
  """
767
775
  with self._lock:
768
776
  # Pre-populate all repositories with their status
769
- for idx, repo in enumerate(repository_list):
777
+ for _idx, repo in enumerate(repository_list):
770
778
  repo_name = repo.get("name", "Unknown")
771
779
  status_str = repo.get("status", "pending")
772
780
 
@@ -870,9 +878,8 @@ class RichProgressDisplay:
870
878
  # Update overall progress (handle both "repos" and "main" for compatibility)
871
879
  if description:
872
880
  self.update_overall(completed or 0, description)
873
- elif advance:
874
- if self.overall_task_id is not None:
875
- self.overall_progress.advance(self.overall_task_id, advance)
881
+ elif advance and self.overall_task_id is not None:
882
+ self.overall_progress.advance(self.overall_task_id, advance)
876
883
  elif hasattr(self, "_task_ids") and task_id in self._task_ids:
877
884
  # Update specific task
878
885
  update_kwargs = {}
@@ -8,7 +8,7 @@ import logging
8
8
  from collections import defaultdict
9
9
  from datetime import datetime, timedelta, timezone
10
10
  from pathlib import Path
11
- from typing import Any, Dict, List, Optional
11
+ from typing import Any, Optional
12
12
 
13
13
  import click
14
14
  import git
@@ -48,7 +48,7 @@ class ActivityVerifier:
48
48
  if config.github and config.github.token:
49
49
  self.github_client = Github(config.github.token)
50
50
 
51
- def verify_all_projects(self) -> Dict[str, Any]:
51
+ def verify_all_projects(self) -> dict[str, Any]:
52
52
  """Verify activity for all configured projects.
53
53
 
54
54
  Returns:
@@ -115,7 +115,7 @@ class ActivityVerifier:
115
115
 
116
116
  return results
117
117
 
118
- def _get_repositories(self) -> List[Dict[str, Any]]:
118
+ def _get_repositories(self) -> list[dict[str, Any]]:
119
119
  """Get list of repositories to analyze.
120
120
 
121
121
  Returns:
@@ -209,23 +209,24 @@ class ActivityVerifier:
209
209
  try:
210
210
  org = self.github_client.get_organization(self.config.github.organization)
211
211
  for repo in org.get_repos(type="all"):
212
- if not repo.archived:
213
- # Check if not already added
214
- if not any(r["name"] == repo.full_name for r in repositories):
215
- repositories.append(
216
- {
217
- "name": repo.full_name,
218
- "path": None,
219
- "is_local": False,
220
- "github_name": repo.full_name,
221
- }
222
- )
212
+ # Check if not archived and not already added
213
+ if not repo.archived and not any(
214
+ r["name"] == repo.full_name for r in repositories
215
+ ):
216
+ repositories.append(
217
+ {
218
+ "name": repo.full_name,
219
+ "path": None,
220
+ "is_local": False,
221
+ "github_name": repo.full_name,
222
+ }
223
+ )
223
224
  except GithubException as e:
224
225
  logger.error(f"Error fetching organization repos: {e}")
225
226
 
226
227
  return repositories
227
228
 
228
- def _verify_project_activity(self, repo_info: Dict[str, Any]) -> Dict[str, Any]:
229
+ def _verify_project_activity(self, repo_info: dict[str, Any]) -> dict[str, Any]:
229
230
  """Verify activity for a single project.
230
231
 
231
232
  Args:
@@ -241,7 +242,7 @@ class ActivityVerifier:
241
242
  else:
242
243
  raise ValueError(f"No valid path or GitHub name for repository {repo_info['name']}")
243
244
 
244
- def _verify_github_activity(self, repo_name: str) -> Dict[str, Any]:
245
+ def _verify_github_activity(self, repo_name: str) -> dict[str, Any]:
245
246
  """Verify activity for a GitHub repository using API.
246
247
 
247
248
  Args:
@@ -327,7 +328,7 @@ class ActivityVerifier:
327
328
 
328
329
  return result
329
330
 
330
- def _verify_local_activity(self, repo_path: Path) -> Dict[str, Any]:
331
+ def _verify_local_activity(self, repo_path: Path) -> dict[str, Any]:
331
332
  """Verify activity for a local Git repository.
332
333
 
333
334
  Args:
@@ -451,7 +452,7 @@ class ActivityVerifier:
451
452
 
452
453
  return result
453
454
 
454
- def _initialize_daily_matrix(self) -> Dict[str, Dict[str, int]]:
455
+ def _initialize_daily_matrix(self) -> dict[str, dict[str, int]]:
455
456
  """Initialize the daily activity matrix structure.
456
457
 
457
458
  Returns:
@@ -468,7 +469,7 @@ class ActivityVerifier:
468
469
  return matrix
469
470
 
470
471
  def _update_daily_matrix(
471
- self, matrix: Dict[str, Dict[str, int]], project_name: str, daily_commits: Dict[str, int]
472
+ self, matrix: dict[str, dict[str, int]], project_name: str, daily_commits: dict[str, int]
472
473
  ) -> None:
473
474
  """Update the daily matrix with project commit data.
474
475
 
@@ -481,7 +482,7 @@ class ActivityVerifier:
481
482
  if date_str in matrix:
482
483
  matrix[date_str][project_name] = count
483
484
 
484
- def format_report(self, results: Dict[str, Any]) -> str:
485
+ def format_report(self, results: dict[str, Any]) -> str:
485
486
  """Format the verification results as a readable report.
486
487
 
487
488
  Args:
@@ -524,11 +525,7 @@ class ActivityVerifier:
524
525
  matrix_data = []
525
526
  dates = sorted(results["daily_matrix"].keys())
526
527
  projects = sorted(
527
- set(
528
- project
529
- for date_data in results["daily_matrix"].values()
530
- for project in date_data.keys()
531
- )
528
+ set(project for date_data in results["daily_matrix"].values() for project in date_data)
532
529
  )
533
530
 
534
531
  if projects and dates:
@@ -691,7 +688,7 @@ def verify_activity_command(
691
688
  click.echo(f" - {project}")
692
689
 
693
690
  zero_days = len(
694
- [d for d in results["daily_matrix"].keys() if sum(results["daily_matrix"][d].values()) == 0]
691
+ [d for d in results["daily_matrix"] if sum(results["daily_matrix"][d].values()) == 0]
695
692
  )
696
693
  if zero_days > 0:
697
694
  click.echo(f"\n⚠️ WARNING: Found {zero_days} days with zero activity across all projects!")
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: gitflow-analytics
3
- Version: 3.3.0
3
+ Version: 3.4.7
4
4
  Summary: Analyze Git repositories for developer productivity insights
5
5
  Author-email: Bob Matyas <bobmatnyc@gmail.com>
6
6
  License: MIT