gitflow-analytics 1.0.1__py3-none-any.whl → 1.0.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (57) hide show
  1. gitflow_analytics/__init__.py +11 -11
  2. gitflow_analytics/_version.py +2 -2
  3. gitflow_analytics/cli.py +612 -258
  4. gitflow_analytics/cli_rich.py +353 -0
  5. gitflow_analytics/config.py +251 -141
  6. gitflow_analytics/core/analyzer.py +140 -103
  7. gitflow_analytics/core/branch_mapper.py +132 -132
  8. gitflow_analytics/core/cache.py +240 -169
  9. gitflow_analytics/core/identity.py +210 -173
  10. gitflow_analytics/extractors/base.py +13 -11
  11. gitflow_analytics/extractors/story_points.py +70 -59
  12. gitflow_analytics/extractors/tickets.py +101 -87
  13. gitflow_analytics/integrations/github_integration.py +84 -77
  14. gitflow_analytics/integrations/jira_integration.py +116 -104
  15. gitflow_analytics/integrations/orchestrator.py +86 -85
  16. gitflow_analytics/metrics/dora.py +181 -177
  17. gitflow_analytics/models/database.py +190 -53
  18. gitflow_analytics/qualitative/__init__.py +30 -0
  19. gitflow_analytics/qualitative/classifiers/__init__.py +13 -0
  20. gitflow_analytics/qualitative/classifiers/change_type.py +468 -0
  21. gitflow_analytics/qualitative/classifiers/domain_classifier.py +399 -0
  22. gitflow_analytics/qualitative/classifiers/intent_analyzer.py +436 -0
  23. gitflow_analytics/qualitative/classifiers/risk_analyzer.py +412 -0
  24. gitflow_analytics/qualitative/core/__init__.py +13 -0
  25. gitflow_analytics/qualitative/core/llm_fallback.py +653 -0
  26. gitflow_analytics/qualitative/core/nlp_engine.py +373 -0
  27. gitflow_analytics/qualitative/core/pattern_cache.py +457 -0
  28. gitflow_analytics/qualitative/core/processor.py +540 -0
  29. gitflow_analytics/qualitative/models/__init__.py +25 -0
  30. gitflow_analytics/qualitative/models/schemas.py +272 -0
  31. gitflow_analytics/qualitative/utils/__init__.py +13 -0
  32. gitflow_analytics/qualitative/utils/batch_processor.py +326 -0
  33. gitflow_analytics/qualitative/utils/cost_tracker.py +343 -0
  34. gitflow_analytics/qualitative/utils/metrics.py +347 -0
  35. gitflow_analytics/qualitative/utils/text_processing.py +243 -0
  36. gitflow_analytics/reports/analytics_writer.py +11 -4
  37. gitflow_analytics/reports/csv_writer.py +51 -31
  38. gitflow_analytics/reports/narrative_writer.py +16 -14
  39. gitflow_analytics/tui/__init__.py +5 -0
  40. gitflow_analytics/tui/app.py +721 -0
  41. gitflow_analytics/tui/screens/__init__.py +8 -0
  42. gitflow_analytics/tui/screens/analysis_progress_screen.py +487 -0
  43. gitflow_analytics/tui/screens/configuration_screen.py +547 -0
  44. gitflow_analytics/tui/screens/loading_screen.py +358 -0
  45. gitflow_analytics/tui/screens/main_screen.py +304 -0
  46. gitflow_analytics/tui/screens/results_screen.py +698 -0
  47. gitflow_analytics/tui/widgets/__init__.py +7 -0
  48. gitflow_analytics/tui/widgets/data_table.py +257 -0
  49. gitflow_analytics/tui/widgets/export_modal.py +301 -0
  50. gitflow_analytics/tui/widgets/progress_widget.py +192 -0
  51. {gitflow_analytics-1.0.1.dist-info → gitflow_analytics-1.0.3.dist-info}/METADATA +31 -4
  52. gitflow_analytics-1.0.3.dist-info/RECORD +62 -0
  53. gitflow_analytics-1.0.1.dist-info/RECORD +0 -31
  54. {gitflow_analytics-1.0.1.dist-info → gitflow_analytics-1.0.3.dist-info}/WHEEL +0 -0
  55. {gitflow_analytics-1.0.1.dist-info → gitflow_analytics-1.0.3.dist-info}/entry_points.txt +0 -0
  56. {gitflow_analytics-1.0.1.dist-info → gitflow_analytics-1.0.3.dist-info}/licenses/LICENSE +0 -0
  57. {gitflow_analytics-1.0.1.dist-info → gitflow_analytics-1.0.3.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,243 @@
1
+ """Text processing utilities for qualitative analysis."""
2
+
3
+ import hashlib
4
+ import re
5
+ from typing import Dict, List, Set
6
+
7
+
8
+ class TextProcessor:
9
+ """Utility class for text preprocessing and feature extraction.
10
+
11
+ This class provides common text processing operations needed across
12
+ the qualitative analysis pipeline, including normalization, feature
13
+ extraction, and similarity calculations.
14
+ """
15
+
16
+ def __init__(self) -> None:
17
+ """Initialize text processor with common patterns."""
18
+ # Common patterns for normalization
19
+ self.url_pattern = re.compile(r'https?://[^\s]+')
20
+ self.email_pattern = re.compile(r'\b[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Z|a-z]{2,}\b')
21
+ self.hash_pattern = re.compile(r'\b[a-f0-9]{7,40}\b') # Git hashes
22
+ self.ticket_pattern = re.compile(r'\b(?:JIRA|TICKET|ISSUE|BUG|TASK)-?\d+\b', re.IGNORECASE)
23
+
24
+ # Stop words for feature extraction
25
+ self.stop_words: Set[str] = {
26
+ 'the', 'a', 'an', 'and', 'or', 'but', 'in', 'on', 'at', 'to', 'for',
27
+ 'of', 'with', 'by', 'is', 'are', 'was', 'were', 'be', 'been', 'being',
28
+ 'have', 'has', 'had', 'do', 'does', 'did', 'will', 'would', 'could',
29
+ 'should', 'may', 'might', 'can', 'this', 'that', 'these', 'those'
30
+ }
31
+
32
+ def normalize_message(self, message: str) -> str:
33
+ """Normalize commit message for consistent processing.
34
+
35
+ This method standardizes commit messages by removing URLs, emails,
36
+ hashes, and other variable content that doesn't contribute to
37
+ semantic classification.
38
+
39
+ Args:
40
+ message: Raw commit message
41
+
42
+ Returns:
43
+ Normalized message suitable for classification
44
+ """
45
+ if not message:
46
+ return ""
47
+
48
+ # Convert to lowercase for consistency
49
+ normalized = message.lower().strip()
50
+
51
+ # Remove URLs, emails, and hashes
52
+ normalized = self.url_pattern.sub('[URL]', normalized)
53
+ normalized = self.email_pattern.sub('[EMAIL]', normalized)
54
+ normalized = self.hash_pattern.sub('[HASH]', normalized)
55
+
56
+ # Normalize ticket references
57
+ normalized = self.ticket_pattern.sub('[TICKET]', normalized)
58
+
59
+ # Remove extra whitespace
60
+ normalized = re.sub(r'\s+', ' ', normalized)
61
+
62
+ return normalized.strip()
63
+
64
+ def extract_keywords(self, text: str, min_length: int = 3) -> List[str]:
65
+ """Extract meaningful keywords from text.
66
+
67
+ Extracts keywords by removing stop words, punctuation, and short words
68
+ that are unlikely to be semantically meaningful.
69
+
70
+ Args:
71
+ text: Input text to extract keywords from
72
+ min_length: Minimum length for keywords
73
+
74
+ Returns:
75
+ List of extracted keywords
76
+ """
77
+ if not text:
78
+ return []
79
+
80
+ # Split into words and clean
81
+ words = re.findall(r'\b[a-zA-Z]+\b', text.lower())
82
+
83
+ # Filter stop words and short words
84
+ keywords = [
85
+ word for word in words
86
+ if word not in self.stop_words and len(word) >= min_length
87
+ ]
88
+
89
+ return keywords
90
+
91
+ def create_semantic_fingerprint(self, message: str, files: List[str]) -> str:
92
+ """Create a semantic fingerprint for similarity matching.
93
+
94
+ Creates a hash-based fingerprint that captures the semantic essence
95
+ of a commit for pattern matching and caching.
96
+
97
+ Args:
98
+ message: Commit message
99
+ files: List of changed files
100
+
101
+ Returns:
102
+ Hex-encoded fingerprint string
103
+ """
104
+ # Normalize message for consistent fingerprinting
105
+ normalized_msg = self.normalize_message(message)
106
+ keywords = self.extract_keywords(normalized_msg)
107
+
108
+ # Extract file patterns (extensions, directories)
109
+ file_patterns = []
110
+ for file_path in files[:10]: # Limit to prevent huge fingerprints
111
+ # Get file extension
112
+ if '.' in file_path:
113
+ ext = file_path.split('.')[-1].lower()
114
+ file_patterns.append(f"ext:{ext}")
115
+
116
+ # Get directory patterns
117
+ parts = file_path.split('/')
118
+ if len(parts) > 1:
119
+ # First directory
120
+ file_patterns.append(f"dir:{parts[0]}")
121
+ # Last directory before file
122
+ if len(parts) > 2:
123
+ file_patterns.append(f"dir:{parts[-2]}")
124
+
125
+ # Combine keywords and file patterns
126
+ semantic_elements = sorted(keywords[:10]) + sorted(set(file_patterns))
127
+
128
+ # Create fingerprint
129
+ fingerprint_text = '|'.join(semantic_elements)
130
+ return hashlib.md5(fingerprint_text.encode()).hexdigest()
131
+
132
+ def calculate_message_similarity(self, msg1: str, msg2: str) -> float:
133
+ """Calculate semantic similarity between two commit messages.
134
+
135
+ Uses keyword overlap to estimate semantic similarity between
136
+ commit messages for grouping similar commits.
137
+
138
+ Args:
139
+ msg1: First commit message
140
+ msg2: Second commit message
141
+
142
+ Returns:
143
+ Similarity score between 0.0 and 1.0
144
+ """
145
+ if not msg1 or not msg2:
146
+ return 0.0
147
+
148
+ # Extract keywords from both messages
149
+ keywords1 = set(self.extract_keywords(self.normalize_message(msg1)))
150
+ keywords2 = set(self.extract_keywords(self.normalize_message(msg2)))
151
+
152
+ if not keywords1 or not keywords2:
153
+ return 0.0
154
+
155
+ # Calculate Jaccard similarity
156
+ intersection = len(keywords1.intersection(keywords2))
157
+ union = len(keywords1.union(keywords2))
158
+
159
+ return intersection / union if union > 0 else 0.0
160
+
161
+ def extract_file_patterns(self, files: List[str]) -> Dict[str, int]:
162
+ """Extract file patterns for domain classification.
163
+
164
+ Analyzes file paths to extract patterns useful for determining
165
+ the technical domain of changes.
166
+
167
+ Args:
168
+ files: List of file paths
169
+
170
+ Returns:
171
+ Dictionary mapping pattern types to counts
172
+ """
173
+ patterns = {
174
+ 'extensions': {},
175
+ 'directories': {},
176
+ 'special_files': {},
177
+ }
178
+
179
+ for file_path in files:
180
+ # File extensions
181
+ if '.' in file_path:
182
+ ext = file_path.split('.')[-1].lower()
183
+ patterns['extensions'][ext] = patterns['extensions'].get(ext, 0) + 1
184
+
185
+ # Directory patterns
186
+ parts = file_path.split('/')
187
+ for part in parts[:-1]: # Exclude filename
188
+ if part: # Skip empty parts
189
+ patterns['directories'][part] = patterns['directories'].get(part, 0) + 1
190
+
191
+ # Special files
192
+ filename = parts[-1].lower()
193
+ special_files = [
194
+ 'dockerfile', 'makefile', 'readme', 'license', 'changelog',
195
+ 'package.json', 'requirements.txt', 'setup.py', 'pom.xml'
196
+ ]
197
+ for special in special_files:
198
+ if special in filename:
199
+ patterns['special_files'][special] = patterns['special_files'].get(special, 0) + 1
200
+
201
+ return patterns
202
+
203
+ def calculate_commit_complexity(self, message: str, files: List[str],
204
+ insertions: int, deletions: int) -> Dict[str, float]:
205
+ """Calculate various complexity metrics for a commit.
206
+
207
+ Estimates the complexity of a commit based on message content,
208
+ file changes, and line changes to help with risk assessment.
209
+
210
+ Args:
211
+ message: Commit message
212
+ files: List of changed files
213
+ insertions: Number of lines inserted
214
+ deletions: Number of lines deleted
215
+
216
+ Returns:
217
+ Dictionary of complexity metrics
218
+ """
219
+ metrics = {}
220
+
221
+ # Message complexity (length, keywords)
222
+ metrics['message_length'] = len(message)
223
+ keywords = self.extract_keywords(message)
224
+ metrics['keyword_count'] = len(keywords)
225
+ metrics['message_complexity'] = min(1.0, len(keywords) / 10.0)
226
+
227
+ # File complexity
228
+ metrics['files_changed'] = len(files)
229
+ metrics['file_complexity'] = min(1.0, len(files) / 20.0)
230
+
231
+ # Line change complexity
232
+ total_changes = insertions + deletions
233
+ metrics['total_changes'] = total_changes
234
+ metrics['change_complexity'] = min(1.0, total_changes / 500.0)
235
+
236
+ # Overall complexity score (0.0 to 1.0)
237
+ metrics['overall_complexity'] = (
238
+ metrics['message_complexity'] * 0.2 +
239
+ metrics['file_complexity'] * 0.3 +
240
+ metrics['change_complexity'] * 0.5
241
+ )
242
+
243
+ return metrics
@@ -1,6 +1,6 @@
1
1
  """Advanced analytics report generation with percentage and qualitative metrics."""
2
2
  import csv
3
- from datetime import datetime, timedelta
3
+ from datetime import datetime, timedelta, timezone
4
4
  from pathlib import Path
5
5
  from typing import List, Dict, Any, Tuple
6
6
  from collections import defaultdict
@@ -127,8 +127,8 @@ class AnalyticsReportGenerator:
127
127
  output_path: Path,
128
128
  weeks: int = 12) -> Path:
129
129
  """Generate developer focus analysis showing concentration patterns and activity across all projects."""
130
- # Calculate week boundaries
131
- end_date = datetime.now()
130
+ # Calculate week boundaries (timezone-aware to match commit timestamps)
131
+ end_date = datetime.now(timezone.utc)
132
132
  start_date = end_date - timedelta(weeks=weeks)
133
133
 
134
134
  # Build developer lookup
@@ -442,8 +442,15 @@ class AnalyticsReportGenerator:
442
442
 
443
443
  def _get_week_start(self, date: datetime) -> datetime:
444
444
  """Get Monday of the week for a given date."""
445
+ # Ensure consistent timezone handling - keep timezone info
445
446
  if hasattr(date, 'tzinfo') and date.tzinfo is not None:
446
- date = date.replace(tzinfo=None)
447
+ # Keep timezone-aware but ensure it's UTC
448
+ if date.tzinfo != timezone.utc:
449
+ date = date.astimezone(timezone.utc)
450
+ else:
451
+ # Convert naive datetime to UTC timezone-aware
452
+ date = date.replace(tzinfo=timezone.utc)
453
+
447
454
  days_since_monday = date.weekday()
448
455
  monday = date - timedelta(days=days_since_monday)
449
456
  return monday.replace(hour=0, minute=0, second=0, microsecond=0)
@@ -1,8 +1,8 @@
1
1
  """CSV report generation for GitFlow Analytics."""
2
2
  import csv
3
- from datetime import datetime, timedelta
3
+ from datetime import datetime, timedelta, timezone
4
4
  from pathlib import Path
5
- from typing import List, Dict, Any, Optional
5
+ from typing import List, Dict, Any, Optional, Tuple
6
6
  from collections import defaultdict
7
7
  import pandas as pd
8
8
 
@@ -13,7 +13,7 @@ class CSVReportGenerator:
13
13
  def __init__(self, anonymize: bool = False):
14
14
  """Initialize report generator."""
15
15
  self.anonymize = anonymize
16
- self._anonymization_map = {}
16
+ self._anonymization_map: Dict[str, str] = {}
17
17
  self._anonymous_counter = 0
18
18
 
19
19
  def generate_weekly_report(self, commits: List[Dict[str, Any]],
@@ -21,12 +21,12 @@ class CSVReportGenerator:
21
21
  output_path: Path,
22
22
  weeks: int = 12) -> Path:
23
23
  """Generate weekly metrics CSV report."""
24
- # Calculate week boundaries
25
- end_date = datetime.now()
24
+ # Calculate week boundaries (timezone-aware to match commit timestamps)
25
+ end_date = datetime.now(timezone.utc)
26
26
  start_date = end_date - timedelta(weeks=weeks)
27
27
 
28
28
  # Group commits by week and developer
29
- weekly_data = self._aggregate_weekly_data(commits, start_date, end_date)
29
+ weekly_data: Dict[Tuple[datetime, str, str], Dict[str, Any]] = self._aggregate_weekly_data(commits, start_date, end_date)
30
30
 
31
31
  # Create developer lookup
32
32
  dev_lookup = {dev['canonical_id']: dev for dev in developer_stats}
@@ -198,9 +198,9 @@ class CSVReportGenerator:
198
198
 
199
199
  def _aggregate_weekly_data(self, commits: List[Dict[str, Any]],
200
200
  start_date: datetime,
201
- end_date: datetime) -> Dict[tuple, Dict[str, Any]]:
201
+ end_date: datetime) -> Dict[Tuple[datetime, str, str], Dict[str, Any]]:
202
202
  """Aggregate commit data by week."""
203
- weekly_data = defaultdict(lambda: {
203
+ weekly_data: defaultdict[Tuple[datetime, str, str], Dict[str, Any]] = defaultdict(lambda: {
204
204
  'commits': 0,
205
205
  'story_points': 0,
206
206
  'lines_added': 0,
@@ -214,10 +214,14 @@ class CSVReportGenerator:
214
214
 
215
215
  for commit in commits:
216
216
  timestamp = commit['timestamp']
217
- # Handle both timezone-aware and naive datetimes
217
+ # Ensure consistent timezone handling
218
218
  if hasattr(timestamp, 'tzinfo') and timestamp.tzinfo is not None:
219
- # Convert timezone-aware to naive (UTC)
220
- timestamp = timestamp.replace(tzinfo=None)
219
+ # Keep timezone-aware but ensure it's UTC
220
+ if timestamp.tzinfo != timezone.utc:
221
+ timestamp = timestamp.astimezone(timezone.utc)
222
+ else:
223
+ # Convert naive datetime to UTC timezone-aware
224
+ timestamp = timestamp.replace(tzinfo=timezone.utc)
221
225
 
222
226
  if timestamp < start_date or timestamp > end_date:
223
227
  continue
@@ -234,46 +238,53 @@ class CSVReportGenerator:
234
238
  key = (week_start, canonical_id, project_key)
235
239
 
236
240
  # Aggregate metrics
237
- weekly_data[key]['commits'] += 1
238
- weekly_data[key]['story_points'] += commit.get('story_points', 0) or 0
241
+ data = weekly_data[key]
242
+ data['commits'] += 1
243
+ data['story_points'] += commit.get('story_points', 0) or 0
239
244
 
240
245
  # Use filtered stats if available, otherwise fall back to raw stats
241
- weekly_data[key]['lines_added'] += commit.get('filtered_insertions', commit.get('insertions', 0))
242
- weekly_data[key]['lines_removed'] += commit.get('filtered_deletions', commit.get('deletions', 0))
243
- weekly_data[key]['files_changed'] += commit.get('filtered_files_changed', commit.get('files_changed', 0))
246
+ data['lines_added'] += commit.get('filtered_insertions', commit.get('insertions', 0)) or 0
247
+ data['lines_removed'] += commit.get('filtered_deletions', commit.get('deletions', 0)) or 0
248
+ data['files_changed'] += commit.get('filtered_files_changed', commit.get('files_changed', 0)) or 0
244
249
 
245
- weekly_data[key]['complexity_delta'] += commit.get('complexity_delta', 0.0)
250
+ data['complexity_delta'] += commit.get('complexity_delta', 0.0) or 0.0
246
251
 
247
252
  # Track tickets
248
- if commit.get('ticket_references'):
249
- weekly_data[key]['commits_with_tickets'] += 1
250
- for ticket in commit['ticket_references']:
253
+ ticket_refs = commit.get('ticket_references', [])
254
+ if ticket_refs:
255
+ data['commits_with_tickets'] += 1
256
+ tickets_set = data['tickets']
257
+ for ticket in ticket_refs:
251
258
  if isinstance(ticket, dict):
252
- weekly_data[key]['tickets'].add(ticket.get('full_id', ''))
259
+ tickets_set.add(ticket.get('full_id', ''))
253
260
  else:
254
- weekly_data[key]['tickets'].add(str(ticket))
261
+ tickets_set.add(str(ticket))
255
262
 
256
263
  # Track PRs (if available)
257
- if commit.get('pr_number'):
258
- weekly_data[key]['prs'].add(commit['pr_number'])
264
+ pr_number = commit.get('pr_number')
265
+ if pr_number:
266
+ prs_set = data['prs']
267
+ prs_set.add(pr_number)
259
268
 
260
269
  # Calculate derived metrics
261
- result = {}
270
+ result: Dict[Tuple[datetime, str, str], Dict[str, Any]] = {}
262
271
  for key, metrics in weekly_data.items():
263
- commits = metrics['commits']
264
- if commits > 0:
272
+ commits_count = metrics['commits']
273
+ if commits_count > 0:
265
274
  metrics['ticket_coverage_pct'] = (
266
- metrics['commits_with_tickets'] / commits * 100
275
+ metrics['commits_with_tickets'] / commits_count * 100
267
276
  )
268
277
  metrics['avg_commit_size'] = (
269
- (metrics['lines_added'] + metrics['lines_removed']) / commits
278
+ (metrics['lines_added'] + metrics['lines_removed']) / commits_count
270
279
  )
271
280
  else:
272
281
  metrics['ticket_coverage_pct'] = 0
273
282
  metrics['avg_commit_size'] = 0
274
283
 
275
- metrics['unique_tickets'] = len(metrics['tickets'])
276
- metrics['prs_involved'] = len(metrics['prs'])
284
+ tickets_set = metrics['tickets']
285
+ prs_set = metrics['prs']
286
+ metrics['unique_tickets'] = len(tickets_set)
287
+ metrics['prs_involved'] = len(prs_set)
277
288
 
278
289
  # Remove sets before returning
279
290
  del metrics['tickets']
@@ -286,6 +297,15 @@ class CSVReportGenerator:
286
297
 
287
298
  def _get_week_start(self, date: datetime) -> datetime:
288
299
  """Get Monday of the week for a given date."""
300
+ # Ensure consistent timezone handling - keep timezone info
301
+ if hasattr(date, 'tzinfo') and date.tzinfo is not None:
302
+ # Keep timezone-aware but ensure it's UTC
303
+ if date.tzinfo != timezone.utc:
304
+ date = date.astimezone(timezone.utc)
305
+ else:
306
+ # Convert naive datetime to UTC timezone-aware
307
+ date = date.replace(tzinfo=timezone.utc)
308
+
289
309
  days_since_monday = date.weekday()
290
310
  monday = date - timedelta(days=days_since_monday)
291
311
  return monday.replace(hour=0, minute=0, second=0, microsecond=0)
@@ -1,14 +1,14 @@
1
1
  """Narrative report generation in Markdown format."""
2
2
  from datetime import datetime
3
3
  from pathlib import Path
4
- from typing import List, Dict, Any
4
+ from typing import List, Dict, Any, Set
5
5
  from io import StringIO
6
6
 
7
7
 
8
8
  class NarrativeReportGenerator:
9
9
  """Generate human-readable narrative reports in Markdown."""
10
10
 
11
- def __init__(self):
11
+ def __init__(self) -> None:
12
12
  """Initialize narrative report generator."""
13
13
  self.templates = {
14
14
  'high_performer': "{name} led development with {commits} commits ({pct}% of total activity)",
@@ -74,7 +74,7 @@ class NarrativeReportGenerator:
74
74
 
75
75
  def _write_executive_summary(self, report: StringIO, commits: List[Dict[str, Any]],
76
76
  developer_stats: List[Dict[str, Any]],
77
- ticket_analysis: Dict[str, Any]):
77
+ ticket_analysis: Dict[str, Any]) -> None:
78
78
  """Write executive summary section."""
79
79
  total_commits = len(commits)
80
80
  total_developers = len(developer_stats)
@@ -100,7 +100,7 @@ class NarrativeReportGenerator:
100
100
  report.write(f"({top_dev['total_commits']} commits)\n")
101
101
 
102
102
  def _write_team_composition(self, report: StringIO, developer_stats: List[Dict[str, Any]],
103
- focus_data: List[Dict[str, Any]]):
103
+ focus_data: List[Dict[str, Any]]) -> None:
104
104
  """Write team composition analysis."""
105
105
  report.write("### Developer Profiles\n\n")
106
106
 
@@ -125,10 +125,10 @@ class NarrativeReportGenerator:
125
125
  report.write("\n")
126
126
 
127
127
  def _write_project_activity(self, report: StringIO, activity_dist: List[Dict[str, Any]],
128
- commits: List[Dict[str, Any]]):
128
+ commits: List[Dict[str, Any]]) -> None:
129
129
  """Write project activity breakdown."""
130
130
  # Aggregate by project
131
- project_totals = {}
131
+ project_totals: Dict[str, Dict[str, Any]] = {}
132
132
  for row in activity_dist:
133
133
  project = row['project']
134
134
  if project not in project_totals:
@@ -137,9 +137,11 @@ class NarrativeReportGenerator:
137
137
  'lines': 0,
138
138
  'developers': set()
139
139
  }
140
- project_totals[project]['commits'] += row['commits']
141
- project_totals[project]['lines'] += row['lines_changed']
142
- project_totals[project]['developers'].add(row['developer'])
140
+ data = project_totals[project]
141
+ data['commits'] += row['commits']
142
+ data['lines'] += row['lines_changed']
143
+ developers_set: Set[str] = data['developers']
144
+ developers_set.add(row['developer'])
143
145
 
144
146
  # Sort by commits
145
147
  sorted_projects = sorted(project_totals.items(),
@@ -154,12 +156,12 @@ class NarrativeReportGenerator:
154
156
  report.write(f"- Active Developers: {len(data['developers'])}\n\n")
155
157
 
156
158
  def _write_development_patterns(self, report: StringIO, insights: List[Dict[str, Any]],
157
- focus_data: List[Dict[str, Any]]):
159
+ focus_data: List[Dict[str, Any]]) -> None:
158
160
  """Write development patterns analysis."""
159
161
  report.write("### Key Patterns Identified\n\n")
160
162
 
161
163
  # Group insights by category
162
- by_category = {}
164
+ by_category: Dict[str, List[Dict[str, Any]]] = {}
163
165
  for insight in insights:
164
166
  category = insight['category']
165
167
  if category not in by_category:
@@ -186,7 +188,7 @@ class NarrativeReportGenerator:
186
188
  report.write("suggests high context switching\n")
187
189
 
188
190
  def _write_pr_analysis(self, report: StringIO, pr_metrics: Dict[str, Any],
189
- prs: List[Dict[str, Any]]):
191
+ prs: List[Dict[str, Any]]) -> None:
190
192
  """Write pull request analysis."""
191
193
  report.write(f"- **Total PRs Merged**: {pr_metrics['total_prs']}\n")
192
194
  report.write(f"- **Average PR Size**: {pr_metrics['avg_pr_size']:.0f} lines\n")
@@ -198,7 +200,7 @@ class NarrativeReportGenerator:
198
200
  avg_comments = pr_metrics['total_review_comments'] / pr_metrics['total_prs']
199
201
  report.write(f"- **Average Comments per PR**: {avg_comments:.1f}\n")
200
202
 
201
- def _write_ticket_tracking(self, report: StringIO, ticket_analysis: Dict[str, Any]):
203
+ def _write_ticket_tracking(self, report: StringIO, ticket_analysis: Dict[str, Any]) -> None:
202
204
  """Write ticket tracking analysis."""
203
205
  report.write("### Platform Usage\n\n")
204
206
 
@@ -221,7 +223,7 @@ class NarrativeReportGenerator:
221
223
  report.write(f"({commit['files_changed']} files)\n")
222
224
 
223
225
  def _write_recommendations(self, report: StringIO, insights: List[Dict[str, Any]],
224
- ticket_analysis: Dict[str, Any], focus_data: List[Dict[str, Any]]):
226
+ ticket_analysis: Dict[str, Any], focus_data: List[Dict[str, Any]]) -> None:
225
227
  """Write recommendations based on analysis."""
226
228
  recommendations = []
227
229
 
@@ -0,0 +1,5 @@
1
+ """Terminal User Interface for GitFlow Analytics."""
2
+
3
+ from .app import GitFlowAnalyticsApp
4
+
5
+ __all__ = ["GitFlowAnalyticsApp"]