gitflow-analytics 1.0.3__py3-none-any.whl → 1.3.11__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (116) hide show
  1. gitflow_analytics/_version.py +1 -1
  2. gitflow_analytics/classification/__init__.py +31 -0
  3. gitflow_analytics/classification/batch_classifier.py +752 -0
  4. gitflow_analytics/classification/classifier.py +464 -0
  5. gitflow_analytics/classification/feature_extractor.py +725 -0
  6. gitflow_analytics/classification/linguist_analyzer.py +574 -0
  7. gitflow_analytics/classification/model.py +455 -0
  8. gitflow_analytics/cli.py +4158 -350
  9. gitflow_analytics/cli_rich.py +198 -48
  10. gitflow_analytics/config/__init__.py +43 -0
  11. gitflow_analytics/config/errors.py +261 -0
  12. gitflow_analytics/config/loader.py +905 -0
  13. gitflow_analytics/config/profiles.py +264 -0
  14. gitflow_analytics/config/repository.py +124 -0
  15. gitflow_analytics/config/schema.py +444 -0
  16. gitflow_analytics/config/validator.py +154 -0
  17. gitflow_analytics/config.py +44 -508
  18. gitflow_analytics/core/analyzer.py +1209 -98
  19. gitflow_analytics/core/cache.py +1337 -29
  20. gitflow_analytics/core/data_fetcher.py +1285 -0
  21. gitflow_analytics/core/identity.py +363 -14
  22. gitflow_analytics/core/metrics_storage.py +526 -0
  23. gitflow_analytics/core/progress.py +372 -0
  24. gitflow_analytics/core/schema_version.py +269 -0
  25. gitflow_analytics/extractors/ml_tickets.py +1100 -0
  26. gitflow_analytics/extractors/story_points.py +8 -1
  27. gitflow_analytics/extractors/tickets.py +749 -11
  28. gitflow_analytics/identity_llm/__init__.py +6 -0
  29. gitflow_analytics/identity_llm/analysis_pass.py +231 -0
  30. gitflow_analytics/identity_llm/analyzer.py +464 -0
  31. gitflow_analytics/identity_llm/models.py +76 -0
  32. gitflow_analytics/integrations/github_integration.py +175 -11
  33. gitflow_analytics/integrations/jira_integration.py +461 -24
  34. gitflow_analytics/integrations/orchestrator.py +124 -1
  35. gitflow_analytics/metrics/activity_scoring.py +322 -0
  36. gitflow_analytics/metrics/branch_health.py +470 -0
  37. gitflow_analytics/metrics/dora.py +379 -20
  38. gitflow_analytics/models/database.py +843 -53
  39. gitflow_analytics/pm_framework/__init__.py +115 -0
  40. gitflow_analytics/pm_framework/adapters/__init__.py +50 -0
  41. gitflow_analytics/pm_framework/adapters/jira_adapter.py +1845 -0
  42. gitflow_analytics/pm_framework/base.py +406 -0
  43. gitflow_analytics/pm_framework/models.py +211 -0
  44. gitflow_analytics/pm_framework/orchestrator.py +652 -0
  45. gitflow_analytics/pm_framework/registry.py +333 -0
  46. gitflow_analytics/qualitative/__init__.py +9 -10
  47. gitflow_analytics/qualitative/chatgpt_analyzer.py +259 -0
  48. gitflow_analytics/qualitative/classifiers/__init__.py +3 -3
  49. gitflow_analytics/qualitative/classifiers/change_type.py +518 -244
  50. gitflow_analytics/qualitative/classifiers/domain_classifier.py +272 -165
  51. gitflow_analytics/qualitative/classifiers/intent_analyzer.py +321 -222
  52. gitflow_analytics/qualitative/classifiers/llm/__init__.py +35 -0
  53. gitflow_analytics/qualitative/classifiers/llm/base.py +193 -0
  54. gitflow_analytics/qualitative/classifiers/llm/batch_processor.py +383 -0
  55. gitflow_analytics/qualitative/classifiers/llm/cache.py +479 -0
  56. gitflow_analytics/qualitative/classifiers/llm/cost_tracker.py +435 -0
  57. gitflow_analytics/qualitative/classifiers/llm/openai_client.py +403 -0
  58. gitflow_analytics/qualitative/classifiers/llm/prompts.py +373 -0
  59. gitflow_analytics/qualitative/classifiers/llm/response_parser.py +287 -0
  60. gitflow_analytics/qualitative/classifiers/llm_commit_classifier.py +607 -0
  61. gitflow_analytics/qualitative/classifiers/risk_analyzer.py +215 -189
  62. gitflow_analytics/qualitative/core/__init__.py +4 -4
  63. gitflow_analytics/qualitative/core/llm_fallback.py +239 -235
  64. gitflow_analytics/qualitative/core/nlp_engine.py +157 -148
  65. gitflow_analytics/qualitative/core/pattern_cache.py +214 -192
  66. gitflow_analytics/qualitative/core/processor.py +381 -248
  67. gitflow_analytics/qualitative/enhanced_analyzer.py +2236 -0
  68. gitflow_analytics/qualitative/example_enhanced_usage.py +420 -0
  69. gitflow_analytics/qualitative/models/__init__.py +7 -7
  70. gitflow_analytics/qualitative/models/schemas.py +155 -121
  71. gitflow_analytics/qualitative/utils/__init__.py +4 -4
  72. gitflow_analytics/qualitative/utils/batch_processor.py +136 -123
  73. gitflow_analytics/qualitative/utils/cost_tracker.py +142 -140
  74. gitflow_analytics/qualitative/utils/metrics.py +172 -158
  75. gitflow_analytics/qualitative/utils/text_processing.py +146 -104
  76. gitflow_analytics/reports/__init__.py +100 -0
  77. gitflow_analytics/reports/analytics_writer.py +539 -14
  78. gitflow_analytics/reports/base.py +648 -0
  79. gitflow_analytics/reports/branch_health_writer.py +322 -0
  80. gitflow_analytics/reports/classification_writer.py +924 -0
  81. gitflow_analytics/reports/cli_integration.py +427 -0
  82. gitflow_analytics/reports/csv_writer.py +1676 -212
  83. gitflow_analytics/reports/data_models.py +504 -0
  84. gitflow_analytics/reports/database_report_generator.py +427 -0
  85. gitflow_analytics/reports/example_usage.py +344 -0
  86. gitflow_analytics/reports/factory.py +499 -0
  87. gitflow_analytics/reports/formatters.py +698 -0
  88. gitflow_analytics/reports/html_generator.py +1116 -0
  89. gitflow_analytics/reports/interfaces.py +489 -0
  90. gitflow_analytics/reports/json_exporter.py +2770 -0
  91. gitflow_analytics/reports/narrative_writer.py +2287 -158
  92. gitflow_analytics/reports/story_point_correlation.py +1144 -0
  93. gitflow_analytics/reports/weekly_trends_writer.py +389 -0
  94. gitflow_analytics/training/__init__.py +5 -0
  95. gitflow_analytics/training/model_loader.py +377 -0
  96. gitflow_analytics/training/pipeline.py +550 -0
  97. gitflow_analytics/tui/__init__.py +1 -1
  98. gitflow_analytics/tui/app.py +129 -126
  99. gitflow_analytics/tui/screens/__init__.py +3 -3
  100. gitflow_analytics/tui/screens/analysis_progress_screen.py +188 -179
  101. gitflow_analytics/tui/screens/configuration_screen.py +154 -178
  102. gitflow_analytics/tui/screens/loading_screen.py +100 -110
  103. gitflow_analytics/tui/screens/main_screen.py +89 -72
  104. gitflow_analytics/tui/screens/results_screen.py +305 -281
  105. gitflow_analytics/tui/widgets/__init__.py +2 -2
  106. gitflow_analytics/tui/widgets/data_table.py +67 -69
  107. gitflow_analytics/tui/widgets/export_modal.py +76 -76
  108. gitflow_analytics/tui/widgets/progress_widget.py +41 -46
  109. gitflow_analytics-1.3.11.dist-info/METADATA +1015 -0
  110. gitflow_analytics-1.3.11.dist-info/RECORD +122 -0
  111. gitflow_analytics-1.0.3.dist-info/METADATA +0 -490
  112. gitflow_analytics-1.0.3.dist-info/RECORD +0 -62
  113. {gitflow_analytics-1.0.3.dist-info → gitflow_analytics-1.3.11.dist-info}/WHEEL +0 -0
  114. {gitflow_analytics-1.0.3.dist-info → gitflow_analytics-1.3.11.dist-info}/entry_points.txt +0 -0
  115. {gitflow_analytics-1.0.3.dist-info → gitflow_analytics-1.3.11.dist-info}/licenses/LICENSE +0 -0
  116. {gitflow_analytics-1.0.3.dist-info → gitflow_analytics-1.3.11.dist-info}/top_level.txt +0 -0
@@ -3,37 +3,34 @@
3
3
  import json
4
4
  from datetime import datetime
5
5
  from pathlib import Path
6
- from typing import Optional, Dict, Any, List
6
+ from typing import Any, Optional
7
7
 
8
- from textual.widgets import (
9
- Header, Footer, Label, Static, Button,
10
- TabbedContent, TabPane, Rule
11
- )
12
- from textual.containers import Container, Vertical, Horizontal, ScrollableContainer
13
- from textual.screen import Screen
14
- from textual.binding import Binding
15
8
  from rich.table import Table
16
- from rich.text import Text
9
+ from textual.binding import Binding
10
+ from textual.containers import Container, Horizontal, ScrollableContainer, Vertical
11
+ from textual.screen import Screen
12
+ from textual.widgets import Button, Footer, Header, Label, Rule, Static, TabbedContent, TabPane
13
+
14
+ from gitflow_analytics.config import Config
17
15
 
18
16
  from ..widgets.data_table import EnhancedDataTable
19
17
  from ..widgets.export_modal import ExportModal
20
- from gitflow_analytics.config import Config
21
18
 
22
19
 
23
20
  class ResultsScreen(Screen):
24
21
  """
25
22
  Screen displaying comprehensive analysis results with interactive exploration.
26
-
27
- WHY: Analysis results are complex and multi-dimensional, requiring an
23
+
24
+ WHY: Analysis results are complex and multi-dimensional, requiring an
28
25
  interactive interface that allows users to explore different aspects of
29
26
  the data. Tabbed layout organizes information logically while providing
30
27
  powerful data exploration capabilities.
31
-
28
+
32
29
  DESIGN DECISION: Uses tabbed interface to separate different result categories
33
30
  while providing consistent export functionality across all views. Interactive
34
31
  tables allow users to sort, filter, and drill down into specific data points.
35
32
  """
36
-
33
+
37
34
  BINDINGS = [
38
35
  Binding("escape", "back", "Back to Main"),
39
36
  Binding("ctrl+s", "export", "Export Results"),
@@ -41,16 +38,16 @@ class ResultsScreen(Screen):
41
38
  Binding("r", "refresh", "Refresh View"),
42
39
  Binding("ctrl+e", "export_current", "Export Current View"),
43
40
  ]
44
-
41
+
45
42
  def __init__(
46
43
  self,
47
- commits: List[Dict],
48
- prs: List[Dict],
49
- developers: List[Dict],
44
+ commits: list[dict],
45
+ prs: list[dict],
46
+ developers: list[dict],
50
47
  config: Config,
51
48
  *,
52
49
  name: Optional[str] = None,
53
- id: Optional[str] = None
50
+ id: Optional[str] = None,
54
51
  ) -> None:
55
52
  super().__init__(name=name, id=id)
56
53
  self.commits = commits
@@ -58,414 +55,443 @@ class ResultsScreen(Screen):
58
55
  self.developers = developers
59
56
  self.config = config
60
57
  self.current_tab = "summary"
61
-
58
+
62
59
  def compose(self):
63
60
  """Compose the results screen."""
64
61
  yield Header()
65
-
62
+
66
63
  with Container(id="results-container"):
67
64
  yield Label("GitFlow Analytics - Results", classes="screen-title")
68
-
65
+
69
66
  with TabbedContent(initial="summary"):
70
67
  # Summary Tab
71
68
  with TabPane("Summary", id="summary"):
72
69
  yield self._create_summary_panel()
73
-
70
+
74
71
  # Developers Tab
75
72
  with TabPane("Developers", id="developers"):
76
73
  yield self._create_developers_panel()
77
-
74
+
78
75
  # Commits Tab
79
76
  with TabPane("Commits", id="commits"):
80
77
  yield self._create_commits_panel()
81
-
78
+
82
79
  # Pull Requests Tab (if available)
83
80
  if self.prs:
84
81
  with TabPane("Pull Requests", id="pull-requests"):
85
82
  yield self._create_prs_panel()
86
-
83
+
87
84
  # Qualitative Insights Tab (if available)
88
85
  if self._has_qualitative_data():
89
86
  with TabPane("Qualitative Insights", id="qualitative"):
90
87
  yield self._create_qualitative_panel()
91
-
88
+
92
89
  # Export Tab
93
90
  with TabPane("Export", id="export"):
94
91
  yield self._create_export_panel()
95
-
92
+
96
93
  yield Footer()
97
-
94
+
98
95
  def _create_summary_panel(self) -> ScrollableContainer:
99
96
  """
100
97
  Create comprehensive summary statistics panel.
101
-
98
+
102
99
  WHY: Provides high-level overview of all analysis results in a single view,
103
100
  allowing users to quickly understand the overall scope and key metrics
104
101
  without diving into detailed data tables.
105
102
  """
106
103
  container = ScrollableContainer()
107
-
104
+
108
105
  # Key metrics section
109
106
  container.mount(Label("Analysis Summary", classes="section-title"))
110
-
107
+
111
108
  # Create summary table
112
109
  summary_table = Table(show_header=False, show_edge=False, pad_edge=False)
113
110
  summary_table.add_column("Metric", style="bold cyan", width=25)
114
111
  summary_table.add_column("Value", style="green", width=15)
115
112
  summary_table.add_column("Details", style="dim", width=40)
116
-
113
+
117
114
  # Calculate key metrics
118
115
  total_commits = len(self.commits)
119
116
  total_prs = len(self.prs)
120
117
  total_developers = len(self.developers)
121
-
118
+
122
119
  # Time range
123
120
  if self.commits:
124
- dates = [c.get('timestamp') for c in self.commits if c.get('timestamp')]
121
+ dates = [c.get("timestamp") for c in self.commits if c.get("timestamp")]
125
122
  if dates:
126
- min_date = min(dates).strftime('%Y-%m-%d')
127
- max_date = max(dates).strftime('%Y-%m-%d')
123
+ min_date = min(dates).strftime("%Y-%m-%d")
124
+ max_date = max(dates).strftime("%Y-%m-%d")
128
125
  date_range = f"{min_date} to {max_date}"
129
126
  else:
130
127
  date_range = "Unknown"
131
128
  else:
132
129
  date_range = "No data"
133
-
130
+
134
131
  # Story points
135
- total_story_points = sum(c.get('story_points', 0) or 0 for c in self.commits)
136
-
132
+ total_story_points = sum(c.get("story_points", 0) or 0 for c in self.commits)
133
+
137
134
  # Ticket coverage
138
- commits_with_tickets = sum(1 for c in self.commits if c.get('ticket_references'))
135
+ commits_with_tickets = sum(1 for c in self.commits if c.get("ticket_references"))
139
136
  ticket_coverage = (commits_with_tickets / total_commits * 100) if total_commits > 0 else 0
140
-
137
+
141
138
  # Add metrics to table
142
- summary_table.add_row("Total Commits", f"{total_commits:,}", "All commits in analysis period")
139
+ summary_table.add_row(
140
+ "Total Commits", f"{total_commits:,}", "All commits in analysis period"
141
+ )
143
142
  summary_table.add_row("Total Pull Requests", f"{total_prs:,}", "Detected pull requests")
144
- summary_table.add_row("Active Developers", f"{total_developers:,}", "Unique developer identities")
143
+ summary_table.add_row(
144
+ "Active Developers", f"{total_developers:,}", "Unique developer identities"
145
+ )
145
146
  summary_table.add_row("Analysis Period", date_range, "Date range of analyzed commits")
146
- summary_table.add_row("Story Points", f"{total_story_points:,}", "Total story points completed")
147
- summary_table.add_row("Ticket Coverage", f"{ticket_coverage:.1f}%", "Commits with ticket references")
148
-
147
+ summary_table.add_row(
148
+ "Story Points", f"{total_story_points:,}", "Total story points completed"
149
+ )
150
+ summary_table.add_row(
151
+ "Ticket Coverage", f"{ticket_coverage:.1f}%", "Commits with ticket references"
152
+ )
153
+
149
154
  from rich.console import Console
150
155
  from rich.panel import Panel
151
- console = Console()
152
-
156
+
157
+ Console()
158
+
153
159
  container.mount(Static(Panel(summary_table, title="Key Metrics", border_style="blue")))
154
-
160
+
155
161
  # Top contributors section
156
162
  container.mount(Rule())
157
163
  container.mount(Label("Top Contributors", classes="section-title"))
158
-
164
+
159
165
  if self.developers:
160
- top_devs = sorted(self.developers, key=lambda d: d.get('total_commits', 0), reverse=True)[:10]
161
-
166
+ top_devs = sorted(
167
+ self.developers, key=lambda d: d.get("total_commits", 0), reverse=True
168
+ )[:10]
169
+
162
170
  contrib_table = Table(show_header=True, header_style="bold magenta")
163
171
  contrib_table.add_column("Developer", width=25)
164
172
  contrib_table.add_column("Commits", justify="right", width=10)
165
173
  contrib_table.add_column("Story Points", justify="right", width=12)
166
174
  contrib_table.add_column("Avg Points/Commit", justify="right", width=15)
167
-
175
+
168
176
  for dev in top_devs:
169
- commits = dev.get('total_commits', 0)
170
- points = dev.get('total_story_points', 0)
177
+ commits = dev.get("total_commits", 0)
178
+ points = dev.get("total_story_points", 0)
171
179
  avg_points = points / commits if commits > 0 else 0
172
-
180
+
173
181
  contrib_table.add_row(
174
- dev.get('primary_name', 'Unknown')[:23],
182
+ dev.get("primary_name", "Unknown")[:23],
175
183
  f"{commits:,}",
176
184
  f"{points:,}",
177
- f"{avg_points:.1f}"
185
+ f"{avg_points:.1f}",
178
186
  )
179
-
180
- container.mount(Static(Panel(contrib_table, title="Developer Activity", border_style="green")))
181
-
187
+
188
+ container.mount(
189
+ Static(Panel(contrib_table, title="Developer Activity", border_style="green"))
190
+ )
191
+
182
192
  # Qualitative insights summary (if available)
183
193
  if self._has_qualitative_data():
184
194
  container.mount(Rule())
185
195
  container.mount(Label("Qualitative Analysis Summary", classes="section-title"))
186
196
  container.mount(Static(self._create_qualitative_summary()))
187
-
197
+
188
198
  return container
189
-
199
+
190
200
  def _create_developers_panel(self) -> Container:
191
201
  """Create interactive developers data panel."""
192
202
  container = Container()
193
-
203
+
194
204
  container.mount(Label("Developer Statistics", classes="section-title"))
195
- container.mount(Static(
196
- f"Showing {len(self.developers)} unique developers. Click column headers to sort.",
197
- classes="help-text"
198
- ))
199
-
200
- # Create enhanced data table
201
- developers_table = EnhancedDataTable(
202
- data=self.developers,
203
- id="developers-table"
205
+ container.mount(
206
+ Static(
207
+ f"Showing {len(self.developers)} unique developers. Click column headers to sort.",
208
+ classes="help-text",
209
+ )
204
210
  )
205
-
211
+
212
+ # Create enhanced data table
213
+ developers_table = EnhancedDataTable(data=self.developers, id="developers-table")
214
+
206
215
  container.mount(developers_table)
207
-
216
+
208
217
  # Action buttons
209
218
  with container.mount(Horizontal(classes="action-bar")):
210
219
  yield Button("Export Developers", id="export-developers")
211
220
  yield Button("Show Identity Details", id="show-identities")
212
-
221
+
213
222
  return container
214
-
223
+
215
224
  def _create_commits_panel(self) -> Container:
216
225
  """Create interactive commits data panel."""
217
226
  container = Container()
218
-
227
+
219
228
  container.mount(Label("Commit Analysis", classes="section-title"))
220
- container.mount(Static(
221
- f"Showing {len(self.commits)} commits. Use filters to explore specific data.",
222
- classes="help-text"
223
- ))
224
-
229
+ container.mount(
230
+ Static(
231
+ f"Showing {len(self.commits)} commits. Use filters to explore specific data.",
232
+ classes="help-text",
233
+ )
234
+ )
235
+
225
236
  # Prepare commits data for table display
226
237
  commits_data = []
227
238
  for commit in self.commits[:1000]: # Limit to 1000 for performance
228
239
  commit_row = {
229
- 'date': commit.get('timestamp', '').strftime('%Y-%m-%d') if commit.get('timestamp') else '',
230
- 'author': commit.get('author_name', ''),
231
- 'message': commit.get('message', '')[:80] + '...' if len(commit.get('message', '')) > 80 else commit.get('message', ''),
232
- 'files_changed': commit.get('files_changed_count', 0),
233
- 'insertions': commit.get('insertions', 0),
234
- 'deletions': commit.get('deletions', 0),
235
- 'story_points': commit.get('story_points', 0),
236
- 'project_key': commit.get('project_key', ''),
237
- 'change_type': commit.get('change_type', 'unknown'),
238
- 'risk_level': commit.get('risk_level', 'unknown')
240
+ "date": (
241
+ commit.get("timestamp", "").strftime("%Y-%m-%d")
242
+ if commit.get("timestamp")
243
+ else ""
244
+ ),
245
+ "author": commit.get("author_name", ""),
246
+ "message": (
247
+ commit.get("message", "")[:80] + "..."
248
+ if len(commit.get("message", "")) > 80
249
+ else commit.get("message", "")
250
+ ),
251
+ "files_changed": commit.get("files_changed_count", 0),
252
+ "insertions": commit.get("insertions", 0),
253
+ "deletions": commit.get("deletions", 0),
254
+ "story_points": commit.get("story_points", 0),
255
+ "project_key": commit.get("project_key", ""),
256
+ "change_type": commit.get("change_type", "unknown"),
257
+ "risk_level": commit.get("risk_level", "unknown"),
239
258
  }
240
259
  commits_data.append(commit_row)
241
-
242
- commits_table = EnhancedDataTable(
243
- data=commits_data,
244
- id="commits-table"
245
- )
246
-
260
+
261
+ commits_table = EnhancedDataTable(data=commits_data, id="commits-table")
262
+
247
263
  container.mount(commits_table)
248
-
264
+
249
265
  # Action buttons
250
266
  with container.mount(Horizontal(classes="action-bar")):
251
267
  yield Button("Export Commits", id="export-commits")
252
268
  yield Button("Filter by Author", id="filter-author")
253
269
  yield Button("Filter by Project", id="filter-project")
254
-
270
+
255
271
  return container
256
-
272
+
257
273
  def _create_prs_panel(self) -> Container:
258
274
  """Create pull requests analysis panel."""
259
275
  container = Container()
260
-
276
+
261
277
  container.mount(Label("Pull Request Analysis", classes="section-title"))
262
- container.mount(Static(
263
- f"Showing {len(self.prs)} pull requests with metrics and timing data.",
264
- classes="help-text"
265
- ))
266
-
278
+ container.mount(
279
+ Static(
280
+ f"Showing {len(self.prs)} pull requests with metrics and timing data.",
281
+ classes="help-text",
282
+ )
283
+ )
284
+
267
285
  # Prepare PR data for table
268
286
  prs_data = []
269
287
  for pr in self.prs:
270
288
  pr_row = {
271
- 'title': pr.get('title', '')[:60] + '...' if len(pr.get('title', '')) > 60 else pr.get('title', ''),
272
- 'author': pr.get('author', ''),
273
- 'state': pr.get('state', ''),
274
- 'created_date': pr.get('created_at', '').strftime('%Y-%m-%d') if pr.get('created_at') else '',
275
- 'merged_date': pr.get('merged_at', '').strftime('%Y-%m-%d') if pr.get('merged_at') else '',
276
- 'commits': pr.get('commits_count', 0),
277
- 'changed_files': pr.get('changed_files', 0),
278
- 'additions': pr.get('additions', 0),
279
- 'deletions': pr.get('deletions', 0),
289
+ "title": (
290
+ pr.get("title", "")[:60] + "..."
291
+ if len(pr.get("title", "")) > 60
292
+ else pr.get("title", "")
293
+ ),
294
+ "author": pr.get("author", ""),
295
+ "state": pr.get("state", ""),
296
+ "created_date": (
297
+ pr.get("created_at", "").strftime("%Y-%m-%d") if pr.get("created_at") else ""
298
+ ),
299
+ "merged_date": (
300
+ pr.get("merged_at", "").strftime("%Y-%m-%d") if pr.get("merged_at") else ""
301
+ ),
302
+ "commits": pr.get("commits_count", 0),
303
+ "changed_files": pr.get("changed_files", 0),
304
+ "additions": pr.get("additions", 0),
305
+ "deletions": pr.get("deletions", 0),
280
306
  }
281
307
  prs_data.append(pr_row)
282
-
283
- prs_table = EnhancedDataTable(
284
- data=prs_data,
285
- id="prs-table"
286
- )
287
-
308
+
309
+ prs_table = EnhancedDataTable(data=prs_data, id="prs-table")
310
+
288
311
  container.mount(prs_table)
289
-
312
+
290
313
  # Action buttons
291
314
  with container.mount(Horizontal(classes="action-bar")):
292
315
  yield Button("Export PRs", id="export-prs")
293
316
  yield Button("Show PR Metrics", id="show-pr-metrics")
294
-
317
+
295
318
  return container
296
-
319
+
297
320
  def _create_qualitative_panel(self) -> ScrollableContainer:
298
321
  """Create qualitative insights panel."""
299
322
  container = ScrollableContainer()
300
-
323
+
301
324
  container.mount(Label("Qualitative Analysis Results", classes="section-title"))
302
-
325
+
303
326
  if not self._has_qualitative_data():
304
- container.mount(Static("No qualitative analysis data available.", classes="info-message"))
305
- container.mount(Static("Run analysis with qualitative processing enabled to see insights here."))
327
+ container.mount(
328
+ Static("No qualitative analysis data available.", classes="info-message")
329
+ )
330
+ container.mount(
331
+ Static("Run analysis with qualitative processing enabled to see insights here.")
332
+ )
306
333
  return container
307
-
334
+
308
335
  # Analyze qualitative data distributions
309
336
  change_types = {}
310
337
  risk_levels = {}
311
338
  domains = {}
312
339
  confidence_scores = []
313
-
340
+
314
341
  for commit in self.commits:
315
- if 'change_type' in commit:
316
- change_type = commit.get('change_type', 'unknown')
342
+ if "change_type" in commit:
343
+ change_type = commit.get("change_type", "unknown")
317
344
  change_types[change_type] = change_types.get(change_type, 0) + 1
318
-
319
- risk_level = commit.get('risk_level', 'unknown')
345
+
346
+ risk_level = commit.get("risk_level", "unknown")
320
347
  risk_levels[risk_level] = risk_levels.get(risk_level, 0) + 1
321
-
322
- domain = commit.get('business_domain', 'unknown')
348
+
349
+ domain = commit.get("business_domain", "unknown")
323
350
  domains[domain] = domains.get(domain, 0) + 1
324
-
325
- if 'confidence_score' in commit:
326
- confidence_scores.append(commit['confidence_score'])
327
-
351
+
352
+ if "confidence_score" in commit:
353
+ confidence_scores.append(commit["confidence_score"])
354
+
328
355
  # Change types distribution
329
356
  container.mount(Label("Change Type Distribution", classes="subsection-title"))
330
-
357
+
331
358
  change_table = Table(show_header=True, header_style="bold cyan")
332
359
  change_table.add_column("Change Type", width=20)
333
360
  change_table.add_column("Count", justify="right", width=10)
334
361
  change_table.add_column("Percentage", justify="right", width=12)
335
-
362
+
336
363
  total_commits = len(self.commits)
337
364
  for change_type, count in sorted(change_types.items(), key=lambda x: x[1], reverse=True):
338
365
  pct = (count / total_commits) * 100
339
- change_table.add_row(
340
- change_type.title(),
341
- f"{count:,}",
342
- f"{pct:.1f}%"
343
- )
344
-
366
+ change_table.add_row(change_type.title(), f"{count:,}", f"{pct:.1f}%")
367
+
345
368
  from rich.panel import Panel
369
+
346
370
  container.mount(Static(Panel(change_table, title="Change Types", border_style="cyan")))
347
-
371
+
348
372
  # Risk levels distribution
349
373
  container.mount(Rule())
350
374
  container.mount(Label("Risk Level Distribution", classes="subsection-title"))
351
-
375
+
352
376
  risk_table = Table(show_header=True, header_style="bold red")
353
377
  risk_table.add_column("Risk Level", width=20)
354
378
  risk_table.add_column("Count", justify="right", width=10)
355
379
  risk_table.add_column("Percentage", justify="right", width=12)
356
-
380
+
357
381
  for risk_level, count in sorted(risk_levels.items(), key=lambda x: x[1], reverse=True):
358
382
  pct = (count / total_commits) * 100
359
- risk_table.add_row(
360
- risk_level.title(),
361
- f"{count:,}",
362
- f"{pct:.1f}%"
363
- )
364
-
383
+ risk_table.add_row(risk_level.title(), f"{count:,}", f"{pct:.1f}%")
384
+
365
385
  container.mount(Static(Panel(risk_table, title="Risk Levels", border_style="red")))
366
-
386
+
367
387
  # Business domains
368
388
  container.mount(Rule())
369
389
  container.mount(Label("Business Domain Activity", classes="subsection-title"))
370
-
390
+
371
391
  domain_table = Table(show_header=True, header_style="bold green")
372
392
  domain_table.add_column("Business Domain", width=25)
373
393
  domain_table.add_column("Count", justify="right", width=10)
374
394
  domain_table.add_column("Percentage", justify="right", width=12)
375
-
395
+
376
396
  for domain, count in sorted(domains.items(), key=lambda x: x[1], reverse=True):
377
397
  pct = (count / total_commits) * 100
378
- domain_table.add_row(
379
- domain.title(),
380
- f"{count:,}",
381
- f"{pct:.1f}%"
382
- )
383
-
398
+ domain_table.add_row(domain.title(), f"{count:,}", f"{pct:.1f}%")
399
+
384
400
  container.mount(Static(Panel(domain_table, title="Business Domains", border_style="green")))
385
-
401
+
386
402
  # Confidence score statistics
387
403
  if confidence_scores:
388
404
  container.mount(Rule())
389
405
  container.mount(Label("Analysis Confidence", classes="subsection-title"))
390
-
406
+
391
407
  avg_confidence = sum(confidence_scores) / len(confidence_scores)
392
408
  min_confidence = min(confidence_scores)
393
409
  max_confidence = max(confidence_scores)
394
-
410
+
395
411
  confidence_text = f"""Average Confidence: {avg_confidence:.2f}
396
412
  Minimum Confidence: {min_confidence:.2f}
397
413
  Maximum Confidence: {max_confidence:.2f}
398
414
  Total Analyzed: {len(confidence_scores):,} commits"""
399
-
400
- container.mount(Static(Panel(confidence_text, title="Confidence Statistics", border_style="yellow")))
401
-
415
+
416
+ container.mount(
417
+ Static(Panel(confidence_text, title="Confidence Statistics", border_style="yellow"))
418
+ )
419
+
402
420
  return container
403
-
421
+
404
422
  def _create_export_panel(self) -> Container:
405
423
  """Create export options panel."""
406
424
  container = Container()
407
-
425
+
408
426
  container.mount(Label("Export Analysis Results", classes="section-title"))
409
- container.mount(Static(
410
- "Export your analysis results in various formats for further analysis or reporting.",
411
- classes="help-text"
412
- ))
413
-
427
+ container.mount(
428
+ Static(
429
+ "Export your analysis results in various formats for further analysis or reporting.",
430
+ classes="help-text",
431
+ )
432
+ )
433
+
414
434
  # Export options
415
435
  with container.mount(Vertical(id="export-options")):
416
- yield Button("📄 Export Summary Report (CSV)", variant="primary", id="export-summary-csv")
436
+ yield Button(
437
+ "📄 Export Summary Report (CSV)", variant="primary", id="export-summary-csv"
438
+ )
417
439
  yield Button("👥 Export Developer Statistics (CSV)", id="export-developers-csv")
418
440
  yield Button("📝 Export Commit Details (CSV)", id="export-commits-csv")
419
-
441
+
420
442
  if self.prs:
421
443
  yield Button("🔀 Export Pull Requests (CSV)", id="export-prs-csv")
422
-
444
+
423
445
  if self._has_qualitative_data():
424
446
  yield Button("🧠 Export Qualitative Insights (CSV)", id="export-qualitative-csv")
425
-
447
+
426
448
  yield Rule()
427
449
  yield Button("📊 Export Complete Dataset (JSON)", id="export-json")
428
450
  yield Button("📋 Generate Markdown Report", id="export-markdown")
429
-
451
+
430
452
  # Export status
431
453
  container.mount(Rule())
432
454
  container.mount(Static("", id="export-status"))
433
-
455
+
434
456
  return container
435
-
457
+
436
458
  def _has_qualitative_data(self) -> bool:
437
459
  """Check if qualitative analysis data is available."""
438
- return any('change_type' in commit for commit in self.commits)
439
-
460
+ return any("change_type" in commit for commit in self.commits)
461
+
440
462
  def _create_qualitative_summary(self) -> str:
441
463
  """Create a text summary of qualitative insights."""
442
464
  if not self._has_qualitative_data():
443
465
  return "No qualitative data available"
444
-
466
+
445
467
  # Count change types and risk levels
446
468
  change_types = {}
447
469
  risk_levels = {}
448
-
470
+
449
471
  for commit in self.commits:
450
- if 'change_type' in commit:
451
- change_type = commit.get('change_type', 'unknown')
472
+ if "change_type" in commit:
473
+ change_type = commit.get("change_type", "unknown")
452
474
  change_types[change_type] = change_types.get(change_type, 0) + 1
453
-
454
- risk_level = commit.get('risk_level', 'unknown')
475
+
476
+ risk_level = commit.get("risk_level", "unknown")
455
477
  risk_levels[risk_level] = risk_levels.get(risk_level, 0) + 1
456
-
478
+
457
479
  # Find most common values
458
- top_change_type = max(change_types.items(), key=lambda x: x[1]) if change_types else ('unknown', 0)
459
- top_risk_level = max(risk_levels.items(), key=lambda x: x[1]) if risk_levels else ('unknown', 0)
460
-
480
+ top_change_type = (
481
+ max(change_types.items(), key=lambda x: x[1]) if change_types else ("unknown", 0)
482
+ )
483
+ top_risk_level = (
484
+ max(risk_levels.items(), key=lambda x: x[1]) if risk_levels else ("unknown", 0)
485
+ )
486
+
461
487
  total_analyzed = sum(change_types.values())
462
-
488
+
463
489
  return f"""Qualitative Analysis Summary:
464
490
  • Total commits analyzed: {total_analyzed:,}
465
491
  • Most common change type: {top_change_type[0]} ({top_change_type[1]} commits)
466
492
  • Most common risk level: {top_risk_level[0]} ({top_risk_level[1]} commits)
467
493
  • Coverage: {(total_analyzed/len(self.commits)*100):.1f}% of all commits"""
468
-
494
+
469
495
  def on_button_pressed(self, event: Button.Pressed) -> None:
470
496
  """Handle button press events."""
471
497
  export_actions = {
@@ -477,7 +503,7 @@ Total Analyzed: {len(confidence_scores):,} commits"""
477
503
  "export-json": lambda: self._export_data("complete", "json"),
478
504
  "export-markdown": lambda: self._export_data("report", "markdown"),
479
505
  }
480
-
506
+
481
507
  action = export_actions.get(event.button.id)
482
508
  if action:
483
509
  action()
@@ -487,165 +513,163 @@ Total Analyzed: {len(confidence_scores):,} commits"""
487
513
  self._show_identity_details()
488
514
  elif event.button.id == "show-pr-metrics":
489
515
  self._show_pr_metrics()
490
-
516
+
491
517
  def _export_data(self, data_type: str, format_type: str) -> None:
492
518
  """
493
519
  Export specific data type in specified format.
494
-
520
+
495
521
  WHY: Provides flexible export functionality that allows users to
496
522
  export exactly the data they need in their preferred format.
497
523
  """
498
524
  try:
499
525
  # Determine data and filename based on type
500
526
  timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
501
-
527
+
502
528
  if data_type == "summary":
503
529
  data = self._prepare_summary_data()
504
- filename = f"gitflow_summary_{timestamp}.csv"
505
530
  elif data_type == "developers":
506
531
  data = self.developers
507
- filename = f"gitflow_developers_{timestamp}.csv"
508
532
  elif data_type == "commits":
509
533
  data = self.commits
510
- filename = f"gitflow_commits_{timestamp}.csv"
511
534
  elif data_type == "prs":
512
535
  data = self.prs
513
- filename = f"gitflow_prs_{timestamp}.csv"
514
536
  elif data_type == "qualitative":
515
537
  data = self._prepare_qualitative_data()
516
- filename = f"gitflow_qualitative_{timestamp}.csv"
517
538
  elif data_type == "complete":
518
539
  data = {
519
- 'commits': self.commits,
520
- 'prs': self.prs,
521
- 'developers': self.developers,
522
- 'config': self.config.__dict__ if hasattr(self.config, '__dict__') else {}
540
+ "commits": self.commits,
541
+ "prs": self.prs,
542
+ "developers": self.developers,
543
+ "config": self.config.__dict__ if hasattr(self.config, "__dict__") else {},
523
544
  }
524
- filename = f"gitflow_complete_{timestamp}.json"
525
545
  else:
526
546
  self.notify("Unknown export type", severity="error")
527
547
  return
528
-
548
+
529
549
  # Show export modal
530
550
  export_modal = ExportModal(
531
551
  available_formats=[format_type.upper()],
532
552
  default_path=Path("./reports"),
533
553
  data_info={
534
- 'type': data_type,
535
- 'row_count': len(data) if isinstance(data, list) else 'N/A',
536
- 'timestamp': timestamp
537
- }
554
+ "type": data_type,
555
+ "row_count": len(data) if isinstance(data, list) else "N/A",
556
+ "timestamp": timestamp,
557
+ },
538
558
  )
539
-
559
+
540
560
  def handle_export(config):
541
561
  if config:
542
562
  self._perform_export(data, config, format_type)
543
-
563
+
544
564
  self.app.push_screen(export_modal, handle_export)
545
-
565
+
546
566
  except Exception as e:
547
567
  self.notify(f"Export preparation failed: {e}", severity="error")
548
-
549
- def _perform_export(self, data: Any, export_config: Dict[str, Any], format_type: str) -> None:
568
+
569
+ def _perform_export(self, data: Any, export_config: dict[str, Any], format_type: str) -> None:
550
570
  """Perform the actual export operation."""
551
571
  try:
552
- export_path = export_config['path']
553
-
572
+ export_path = export_config["path"]
573
+
554
574
  if format_type == "csv":
555
575
  self._export_to_csv(data, export_path, export_config)
556
576
  elif format_type == "json":
557
577
  self._export_to_json(data, export_path, export_config)
558
578
  elif format_type == "markdown":
559
579
  self._export_to_markdown(data, export_path, export_config)
560
-
580
+
561
581
  self.notify(f"Successfully exported to {export_path}", severity="success")
562
-
582
+
563
583
  except Exception as e:
564
584
  self.notify(f"Export failed: {e}", severity="error")
565
-
566
- def _export_to_csv(self, data: List[Dict], path: Path, config: Dict[str, Any]) -> None:
585
+
586
+ def _export_to_csv(self, data: list[dict], path: Path, config: dict[str, Any]) -> None:
567
587
  """Export data to CSV format."""
568
588
  import csv
569
-
589
+
570
590
  if not data:
571
591
  return
572
-
592
+
573
593
  # Ensure parent directory exists
574
594
  path.parent.mkdir(parents=True, exist_ok=True)
575
-
576
- with open(path, 'w', newline='', encoding='utf-8') as csvfile:
595
+
596
+ with open(path, "w", newline="", encoding="utf-8") as csvfile:
577
597
  fieldnames = list(data[0].keys())
578
598
  writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
579
-
580
- if config.get('include_headers', True):
599
+
600
+ if config.get("include_headers", True):
581
601
  writer.writeheader()
582
-
602
+
583
603
  for row in data:
584
604
  # Anonymize if requested
585
- if config.get('anonymize', False):
605
+ if config.get("anonymize", False):
586
606
  row = self._anonymize_row(row)
587
607
  writer.writerow(row)
588
-
589
- def _export_to_json(self, data: Any, path: Path, config: Dict[str, Any]) -> None:
608
+
609
+ def _export_to_json(self, data: Any, path: Path, config: dict[str, Any]) -> None:
590
610
  """Export data to JSON format."""
591
611
  # Ensure parent directory exists
592
612
  path.parent.mkdir(parents=True, exist_ok=True)
593
-
613
+
594
614
  # Anonymize if requested
595
- if config.get('anonymize', False):
615
+ if config.get("anonymize", False):
596
616
  data = self._anonymize_data(data)
597
-
598
- with open(path, 'w', encoding='utf-8') as jsonfile:
617
+
618
+ with open(path, "w", encoding="utf-8") as jsonfile:
599
619
  json.dump(data, jsonfile, indent=2, default=str)
600
-
601
- def _export_to_markdown(self, data: Any, path: Path, config: Dict[str, Any]) -> None:
620
+
621
+ def _export_to_markdown(self, data: Any, path: Path, config: dict[str, Any]) -> None:
602
622
  """Export data as markdown report."""
603
623
  self.notify("Markdown export not yet implemented", severity="info")
604
624
  # TODO: Implement markdown report generation
605
-
606
- def _prepare_summary_data(self) -> List[Dict]:
625
+
626
+ def _prepare_summary_data(self) -> list[dict]:
607
627
  """Prepare summary statistics for export."""
608
628
  return [
609
- {'metric': 'Total Commits', 'value': len(self.commits)},
610
- {'metric': 'Total PRs', 'value': len(self.prs)},
611
- {'metric': 'Active Developers', 'value': len(self.developers)},
612
- {'metric': 'Total Story Points', 'value': sum(c.get('story_points', 0) or 0 for c in self.commits)},
629
+ {"metric": "Total Commits", "value": len(self.commits)},
630
+ {"metric": "Total PRs", "value": len(self.prs)},
631
+ {"metric": "Active Developers", "value": len(self.developers)},
632
+ {
633
+ "metric": "Total Story Points",
634
+ "value": sum(c.get("story_points", 0) or 0 for c in self.commits),
635
+ },
613
636
  ]
614
-
615
- def _prepare_qualitative_data(self) -> List[Dict]:
637
+
638
+ def _prepare_qualitative_data(self) -> list[dict]:
616
639
  """Prepare qualitative analysis data for export."""
617
640
  qualitative_commits = []
618
641
  for commit in self.commits:
619
- if 'change_type' in commit:
642
+ if "change_type" in commit:
620
643
  qual_commit = {
621
- 'commit_hash': commit.get('hash'),
622
- 'author': commit.get('author_name'),
623
- 'message': commit.get('message'),
624
- 'change_type': commit.get('change_type'),
625
- 'business_domain': commit.get('business_domain'),
626
- 'risk_level': commit.get('risk_level'),
627
- 'confidence_score': commit.get('confidence_score')
644
+ "commit_hash": commit.get("hash"),
645
+ "author": commit.get("author_name"),
646
+ "message": commit.get("message"),
647
+ "change_type": commit.get("change_type"),
648
+ "business_domain": commit.get("business_domain"),
649
+ "risk_level": commit.get("risk_level"),
650
+ "confidence_score": commit.get("confidence_score"),
628
651
  }
629
652
  qualitative_commits.append(qual_commit)
630
653
  return qualitative_commits
631
-
632
- def _anonymize_row(self, row: Dict) -> Dict:
654
+
655
+ def _anonymize_row(self, row: dict) -> dict:
633
656
  """Anonymize sensitive data in a row."""
634
657
  # Simple anonymization - replace names with hashed versions
635
658
  anonymized = row.copy()
636
-
659
+
637
660
  # Fields to anonymize
638
- sensitive_fields = ['author_name', 'author_email', 'primary_name', 'primary_email']
639
-
661
+ sensitive_fields = ["author_name", "author_email", "primary_name", "primary_email"]
662
+
640
663
  for field in sensitive_fields:
641
664
  if field in anonymized and anonymized[field]:
642
665
  # Simple hash-based anonymization
643
666
  import hashlib
667
+
644
668
  hash_value = hashlib.md5(str(anonymized[field]).encode()).hexdigest()[:8]
645
669
  anonymized[field] = f"User_{hash_value}"
646
-
670
+
647
671
  return anonymized
648
-
672
+
649
673
  def _anonymize_data(self, data: Any) -> Any:
650
674
  """Anonymize data structure recursively."""
651
675
  if isinstance(data, list):
@@ -654,38 +678,38 @@ Total Analyzed: {len(confidence_scores):,} commits"""
654
678
  return {key: self._anonymize_data(value) for key, value in data.items()}
655
679
  else:
656
680
  return data
657
-
681
+
658
682
  def _show_identity_details(self) -> None:
659
683
  """Show detailed developer identity information."""
660
684
  self.notify("Identity details view not yet implemented", severity="info")
661
-
685
+
662
686
  def _show_pr_metrics(self) -> None:
663
687
  """Show detailed pull request metrics."""
664
688
  self.notify("PR metrics view not yet implemented", severity="info")
665
-
689
+
666
690
  def action_back(self) -> None:
667
691
  """Go back to main screen."""
668
692
  self.app.pop_screen()
669
-
693
+
670
694
  def action_export(self) -> None:
671
695
  """Show export options."""
672
696
  # Switch to export tab
673
697
  tabbed_content = self.query_one(TabbedContent)
674
698
  tabbed_content.active = "export"
675
-
699
+
676
700
  def action_filter(self) -> None:
677
701
  """Show filter options for current tab."""
678
702
  self.notify("Filtering functionality not yet implemented", severity="info")
679
-
703
+
680
704
  def action_refresh(self) -> None:
681
705
  """Refresh current view."""
682
706
  self.refresh()
683
-
707
+
684
708
  def action_export_current(self) -> None:
685
709
  """Export data from currently active tab."""
686
710
  tabbed_content = self.query_one(TabbedContent)
687
711
  current_tab = tabbed_content.active
688
-
712
+
689
713
  if current_tab == "developers":
690
714
  self._export_data("developers", "csv")
691
715
  elif current_tab == "commits":
@@ -695,4 +719,4 @@ Total Analyzed: {len(confidence_scores):,} commits"""
695
719
  elif current_tab == "qualitative":
696
720
  self._export_data("qualitative", "csv")
697
721
  else:
698
- self._export_data("summary", "csv")
722
+ self._export_data("summary", "csv")