gitflow-analytics 1.0.1__py3-none-any.whl → 1.0.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- gitflow_analytics/__init__.py +11 -11
- gitflow_analytics/_version.py +2 -2
- gitflow_analytics/cli.py +612 -258
- gitflow_analytics/cli_rich.py +353 -0
- gitflow_analytics/config.py +251 -141
- gitflow_analytics/core/analyzer.py +140 -103
- gitflow_analytics/core/branch_mapper.py +132 -132
- gitflow_analytics/core/cache.py +240 -169
- gitflow_analytics/core/identity.py +210 -173
- gitflow_analytics/extractors/base.py +13 -11
- gitflow_analytics/extractors/story_points.py +70 -59
- gitflow_analytics/extractors/tickets.py +101 -87
- gitflow_analytics/integrations/github_integration.py +84 -77
- gitflow_analytics/integrations/jira_integration.py +116 -104
- gitflow_analytics/integrations/orchestrator.py +86 -85
- gitflow_analytics/metrics/dora.py +181 -177
- gitflow_analytics/models/database.py +190 -53
- gitflow_analytics/qualitative/__init__.py +30 -0
- gitflow_analytics/qualitative/classifiers/__init__.py +13 -0
- gitflow_analytics/qualitative/classifiers/change_type.py +468 -0
- gitflow_analytics/qualitative/classifiers/domain_classifier.py +399 -0
- gitflow_analytics/qualitative/classifiers/intent_analyzer.py +436 -0
- gitflow_analytics/qualitative/classifiers/risk_analyzer.py +412 -0
- gitflow_analytics/qualitative/core/__init__.py +13 -0
- gitflow_analytics/qualitative/core/llm_fallback.py +653 -0
- gitflow_analytics/qualitative/core/nlp_engine.py +373 -0
- gitflow_analytics/qualitative/core/pattern_cache.py +457 -0
- gitflow_analytics/qualitative/core/processor.py +540 -0
- gitflow_analytics/qualitative/models/__init__.py +25 -0
- gitflow_analytics/qualitative/models/schemas.py +272 -0
- gitflow_analytics/qualitative/utils/__init__.py +13 -0
- gitflow_analytics/qualitative/utils/batch_processor.py +326 -0
- gitflow_analytics/qualitative/utils/cost_tracker.py +343 -0
- gitflow_analytics/qualitative/utils/metrics.py +347 -0
- gitflow_analytics/qualitative/utils/text_processing.py +243 -0
- gitflow_analytics/reports/analytics_writer.py +11 -4
- gitflow_analytics/reports/csv_writer.py +51 -31
- gitflow_analytics/reports/narrative_writer.py +16 -14
- gitflow_analytics/tui/__init__.py +5 -0
- gitflow_analytics/tui/app.py +721 -0
- gitflow_analytics/tui/screens/__init__.py +8 -0
- gitflow_analytics/tui/screens/analysis_progress_screen.py +487 -0
- gitflow_analytics/tui/screens/configuration_screen.py +547 -0
- gitflow_analytics/tui/screens/loading_screen.py +358 -0
- gitflow_analytics/tui/screens/main_screen.py +304 -0
- gitflow_analytics/tui/screens/results_screen.py +698 -0
- gitflow_analytics/tui/widgets/__init__.py +7 -0
- gitflow_analytics/tui/widgets/data_table.py +257 -0
- gitflow_analytics/tui/widgets/export_modal.py +301 -0
- gitflow_analytics/tui/widgets/progress_widget.py +192 -0
- {gitflow_analytics-1.0.1.dist-info → gitflow_analytics-1.0.3.dist-info}/METADATA +31 -4
- gitflow_analytics-1.0.3.dist-info/RECORD +62 -0
- gitflow_analytics-1.0.1.dist-info/RECORD +0 -31
- {gitflow_analytics-1.0.1.dist-info → gitflow_analytics-1.0.3.dist-info}/WHEEL +0 -0
- {gitflow_analytics-1.0.1.dist-info → gitflow_analytics-1.0.3.dist-info}/entry_points.txt +0 -0
- {gitflow_analytics-1.0.1.dist-info → gitflow_analytics-1.0.3.dist-info}/licenses/LICENSE +0 -0
- {gitflow_analytics-1.0.1.dist-info → gitflow_analytics-1.0.3.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,353 @@
|
|
|
1
|
+
"""Rich CLI components for GitFlow Analytics with beautiful terminal output."""
|
|
2
|
+
|
|
3
|
+
from datetime import datetime
|
|
4
|
+
from pathlib import Path
|
|
5
|
+
from typing import Any, Dict, List, Optional
|
|
6
|
+
|
|
7
|
+
from rich import box
|
|
8
|
+
from rich.console import Console
|
|
9
|
+
from rich.live import Live
|
|
10
|
+
from rich.panel import Panel
|
|
11
|
+
from rich.progress import (
|
|
12
|
+
BarColumn,
|
|
13
|
+
MofNCompleteColumn,
|
|
14
|
+
Progress,
|
|
15
|
+
SpinnerColumn,
|
|
16
|
+
TaskID,
|
|
17
|
+
TextColumn,
|
|
18
|
+
TimeElapsedColumn,
|
|
19
|
+
)
|
|
20
|
+
from rich.table import Table
|
|
21
|
+
from rich.text import Text
|
|
22
|
+
from rich.tree import Tree
|
|
23
|
+
|
|
24
|
+
from ._version import __version__
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
class RichProgressDisplay:
|
|
28
|
+
"""
|
|
29
|
+
Rich terminal display for GitFlow Analytics progress and results.
|
|
30
|
+
|
|
31
|
+
WHY: Provides a clean, structured interface that shows users exactly what's happening
|
|
32
|
+
during analysis without the complexity of a full TUI. Uses Rich library for
|
|
33
|
+
beautiful terminal output with progress bars, tables, and status indicators.
|
|
34
|
+
|
|
35
|
+
DESIGN DECISION: Chose to use Rich's Live display for real-time updates because:
|
|
36
|
+
- Allows multiple progress bars and status updates in a single view
|
|
37
|
+
- Provides structured layout with panels and tables
|
|
38
|
+
- Much simpler than TUI but still provides excellent user experience
|
|
39
|
+
- Works in any terminal that supports ANSI colors
|
|
40
|
+
"""
|
|
41
|
+
|
|
42
|
+
def __init__(self, console: Optional[Console] = None) -> None:
|
|
43
|
+
"""Initialize the Rich progress display."""
|
|
44
|
+
self.console = console or Console()
|
|
45
|
+
self.progress = Progress(
|
|
46
|
+
SpinnerColumn(),
|
|
47
|
+
TextColumn("[progress.description]{task.description}"),
|
|
48
|
+
BarColumn(),
|
|
49
|
+
MofNCompleteColumn(),
|
|
50
|
+
TimeElapsedColumn(),
|
|
51
|
+
console=self.console,
|
|
52
|
+
)
|
|
53
|
+
self.live: Optional[Live] = None
|
|
54
|
+
self._tasks: Dict[str, TaskID] = {}
|
|
55
|
+
|
|
56
|
+
def show_header(self) -> None:
|
|
57
|
+
"""Display the application header."""
|
|
58
|
+
header = Panel(
|
|
59
|
+
f"[bold blue]GitFlow Analytics v{__version__}[/bold blue]",
|
|
60
|
+
box=box.DOUBLE,
|
|
61
|
+
style="blue",
|
|
62
|
+
)
|
|
63
|
+
self.console.print(header)
|
|
64
|
+
self.console.print()
|
|
65
|
+
|
|
66
|
+
def show_configuration_status(
|
|
67
|
+
self,
|
|
68
|
+
config_path: Path,
|
|
69
|
+
github_org: Optional[str] = None,
|
|
70
|
+
github_token_valid: bool = False,
|
|
71
|
+
jira_configured: bool = False,
|
|
72
|
+
jira_valid: bool = False,
|
|
73
|
+
analysis_weeks: int = 12,
|
|
74
|
+
) -> None:
|
|
75
|
+
"""
|
|
76
|
+
Display configuration validation status.
|
|
77
|
+
|
|
78
|
+
WHY: Users need immediate feedback on whether their configuration is valid
|
|
79
|
+
before starting analysis. This prevents wasted time on invalid configs.
|
|
80
|
+
"""
|
|
81
|
+
config_tree = Tree("[bold]Configuration Status[/bold]")
|
|
82
|
+
|
|
83
|
+
# Config file status
|
|
84
|
+
config_tree.add(f"[green]✓[/green] Config: {config_path}")
|
|
85
|
+
|
|
86
|
+
# GitHub configuration
|
|
87
|
+
if github_org:
|
|
88
|
+
github_status = "[green]✓[/green]" if github_token_valid else "[red]✗[/red]"
|
|
89
|
+
token_status = "Token: ✓" if github_token_valid else "Token: ✗"
|
|
90
|
+
config_tree.add(f"{github_status} GitHub: {github_org} ({token_status})")
|
|
91
|
+
|
|
92
|
+
# JIRA configuration
|
|
93
|
+
if jira_configured:
|
|
94
|
+
jira_status = "[green]✓[/green]" if jira_valid else "[red]✗[/red]"
|
|
95
|
+
cred_status = "Credentials: ✓" if jira_valid else "Credentials: ✗"
|
|
96
|
+
config_tree.add(f"{jira_status} JIRA: configured ({cred_status})")
|
|
97
|
+
|
|
98
|
+
# Analysis period
|
|
99
|
+
config_tree.add(f"Analysis Period: {analysis_weeks} weeks")
|
|
100
|
+
|
|
101
|
+
self.console.print(config_tree)
|
|
102
|
+
self.console.print()
|
|
103
|
+
|
|
104
|
+
def start_live_display(self) -> None:
|
|
105
|
+
"""Start the live display for real-time updates."""
|
|
106
|
+
self.live = Live(self.progress, console=self.console, refresh_per_second=10)
|
|
107
|
+
self.live.start()
|
|
108
|
+
|
|
109
|
+
def stop_live_display(self) -> None:
|
|
110
|
+
"""Stop the live display."""
|
|
111
|
+
if self.live:
|
|
112
|
+
self.live.stop()
|
|
113
|
+
self.live = None
|
|
114
|
+
|
|
115
|
+
def add_progress_task(self, name: str, description: str, total: int) -> None:
|
|
116
|
+
"""Add a new progress task."""
|
|
117
|
+
task_id = self.progress.add_task(description, total=total)
|
|
118
|
+
self._tasks[name] = task_id
|
|
119
|
+
|
|
120
|
+
def update_progress_task(self, name: str, advance: int = 1, description: Optional[str] = None) -> None:
|
|
121
|
+
"""Update progress for a specific task."""
|
|
122
|
+
if name in self._tasks:
|
|
123
|
+
kwargs = {"advance": advance}
|
|
124
|
+
if description:
|
|
125
|
+
kwargs["description"] = description
|
|
126
|
+
self.progress.update(self._tasks[name], **kwargs)
|
|
127
|
+
|
|
128
|
+
def complete_progress_task(self, name: str, description: Optional[str] = None) -> None:
|
|
129
|
+
"""Mark a progress task as complete."""
|
|
130
|
+
if name in self._tasks:
|
|
131
|
+
task = self.progress.tasks[self._tasks[name]]
|
|
132
|
+
remaining = task.total - task.completed if task.total else 0
|
|
133
|
+
if remaining > 0:
|
|
134
|
+
self.progress.update(self._tasks[name], advance=remaining)
|
|
135
|
+
if description:
|
|
136
|
+
self.progress.update(self._tasks[name], description=description)
|
|
137
|
+
|
|
138
|
+
def show_repository_discovery(self, repos: List[Dict[str, Any]]) -> None:
|
|
139
|
+
"""
|
|
140
|
+
Display repository discovery results.
|
|
141
|
+
|
|
142
|
+
WHY: Users need to see which repositories were discovered and their status
|
|
143
|
+
before analysis begins, especially with organization-based discovery.
|
|
144
|
+
"""
|
|
145
|
+
if not repos:
|
|
146
|
+
return
|
|
147
|
+
|
|
148
|
+
self.console.print(f"[bold]Repository Discovery[/bold] - Found {len(repos)} repositories")
|
|
149
|
+
|
|
150
|
+
repo_tree = Tree("")
|
|
151
|
+
for repo in repos:
|
|
152
|
+
status = "[green]✓[/green]" if repo.get("exists", True) else "[red]✗[/red]"
|
|
153
|
+
name = repo.get("name", "unknown")
|
|
154
|
+
github_repo = repo.get("github_repo", "")
|
|
155
|
+
if github_repo:
|
|
156
|
+
repo_tree.add(f"{status} {name} ({github_repo})")
|
|
157
|
+
else:
|
|
158
|
+
repo_tree.add(f"{status} {name}")
|
|
159
|
+
|
|
160
|
+
self.console.print(repo_tree)
|
|
161
|
+
self.console.print()
|
|
162
|
+
|
|
163
|
+
def show_analysis_summary(
|
|
164
|
+
self,
|
|
165
|
+
total_commits: int,
|
|
166
|
+
total_prs: int,
|
|
167
|
+
active_developers: int,
|
|
168
|
+
ticket_coverage: float,
|
|
169
|
+
story_points: int,
|
|
170
|
+
qualitative_analyzed: int = 0,
|
|
171
|
+
) -> None:
|
|
172
|
+
"""
|
|
173
|
+
Display analysis results summary.
|
|
174
|
+
|
|
175
|
+
WHY: Provides users with key metrics at a glance after analysis completes.
|
|
176
|
+
Uses a structured table format for easy scanning of important numbers.
|
|
177
|
+
"""
|
|
178
|
+
self.console.print()
|
|
179
|
+
|
|
180
|
+
summary_table = Table(title="[bold]Analysis Summary[/bold]", box=box.ROUNDED)
|
|
181
|
+
summary_table.add_column("Metric", style="cyan", width=20)
|
|
182
|
+
summary_table.add_column("Value", style="green", width=15)
|
|
183
|
+
|
|
184
|
+
summary_table.add_row("Total Commits", f"{total_commits:,}")
|
|
185
|
+
summary_table.add_row("Total PRs", f"{total_prs:,}")
|
|
186
|
+
summary_table.add_row("Active Developers", f"{active_developers:,}")
|
|
187
|
+
summary_table.add_row("Ticket Coverage", f"{ticket_coverage:.1f}%")
|
|
188
|
+
summary_table.add_row("Story Points", f"{story_points:,}")
|
|
189
|
+
|
|
190
|
+
if qualitative_analyzed > 0:
|
|
191
|
+
summary_table.add_row("Qualitative Analysis", f"{qualitative_analyzed:,} commits")
|
|
192
|
+
|
|
193
|
+
self.console.print(summary_table)
|
|
194
|
+
|
|
195
|
+
def show_dora_metrics(self, dora_metrics: Dict[str, Any]) -> None:
|
|
196
|
+
"""
|
|
197
|
+
Display DORA metrics in a structured format.
|
|
198
|
+
|
|
199
|
+
WHY: DORA metrics are key performance indicators that teams care about.
|
|
200
|
+
Displaying them prominently helps users understand their team's performance level.
|
|
201
|
+
"""
|
|
202
|
+
if not dora_metrics:
|
|
203
|
+
return
|
|
204
|
+
|
|
205
|
+
self.console.print()
|
|
206
|
+
|
|
207
|
+
dora_table = Table(title="[bold]DORA Metrics[/bold]", box=box.ROUNDED)
|
|
208
|
+
dora_table.add_column("Metric", style="cyan", width=25)
|
|
209
|
+
dora_table.add_column("Value", style="yellow", width=20)
|
|
210
|
+
|
|
211
|
+
# Deployment frequency
|
|
212
|
+
df_category = dora_metrics.get("deployment_frequency", {}).get("category", "Unknown")
|
|
213
|
+
dora_table.add_row("Deployment Frequency", df_category)
|
|
214
|
+
|
|
215
|
+
# Lead time
|
|
216
|
+
lead_time = dora_metrics.get("lead_time_hours", 0)
|
|
217
|
+
dora_table.add_row("Lead Time", f"{lead_time:.1f} hours")
|
|
218
|
+
|
|
219
|
+
# Change failure rate
|
|
220
|
+
cfr = dora_metrics.get("change_failure_rate", 0)
|
|
221
|
+
dora_table.add_row("Change Failure Rate", f"{cfr:.1f}%")
|
|
222
|
+
|
|
223
|
+
# MTTR
|
|
224
|
+
mttr = dora_metrics.get("mttr_hours", 0)
|
|
225
|
+
dora_table.add_row("MTTR", f"{mttr:.1f} hours")
|
|
226
|
+
|
|
227
|
+
# Performance level
|
|
228
|
+
perf_level = dora_metrics.get("performance_level", "Unknown")
|
|
229
|
+
dora_table.add_row("Performance Level", f"[bold]{perf_level}[/bold]")
|
|
230
|
+
|
|
231
|
+
self.console.print(dora_table)
|
|
232
|
+
|
|
233
|
+
def show_qualitative_stats(self, qual_stats: Dict[str, Any]) -> None:
|
|
234
|
+
"""
|
|
235
|
+
Display qualitative analysis statistics.
|
|
236
|
+
|
|
237
|
+
WHY: Qualitative analysis can be expensive (time/cost), so users need
|
|
238
|
+
visibility into processing efficiency and costs incurred.
|
|
239
|
+
"""
|
|
240
|
+
if not qual_stats:
|
|
241
|
+
return
|
|
242
|
+
|
|
243
|
+
processing_summary = qual_stats.get("processing_summary", {})
|
|
244
|
+
llm_stats = qual_stats.get("llm_statistics", {})
|
|
245
|
+
|
|
246
|
+
self.console.print()
|
|
247
|
+
|
|
248
|
+
qual_table = Table(title="[bold]Qualitative Analysis Stats[/bold]", box=box.ROUNDED)
|
|
249
|
+
qual_table.add_column("Metric", style="cyan", width=25)
|
|
250
|
+
qual_table.add_column("Value", style="magenta", width=20)
|
|
251
|
+
|
|
252
|
+
# Processing speed
|
|
253
|
+
commits_per_sec = processing_summary.get("commits_per_second", 0)
|
|
254
|
+
qual_table.add_row("Processing Speed", f"{commits_per_sec:.1f} commits/sec")
|
|
255
|
+
|
|
256
|
+
# Method breakdown
|
|
257
|
+
method_breakdown = processing_summary.get("method_breakdown", {})
|
|
258
|
+
cache_pct = method_breakdown.get("cache", 0)
|
|
259
|
+
nlp_pct = method_breakdown.get("nlp", 0)
|
|
260
|
+
llm_pct = method_breakdown.get("llm", 0)
|
|
261
|
+
qual_table.add_row("Cache Usage", f"{cache_pct:.1f}%")
|
|
262
|
+
qual_table.add_row("NLP Processing", f"{nlp_pct:.1f}%")
|
|
263
|
+
qual_table.add_row("LLM Processing", f"{llm_pct:.1f}%")
|
|
264
|
+
|
|
265
|
+
# LLM costs if available
|
|
266
|
+
if llm_stats.get("model_usage") == "available":
|
|
267
|
+
cost_tracking = llm_stats.get("cost_tracking", {})
|
|
268
|
+
total_cost = cost_tracking.get("total_cost", 0)
|
|
269
|
+
if total_cost > 0:
|
|
270
|
+
qual_table.add_row("LLM Cost", f"${total_cost:.4f}")
|
|
271
|
+
|
|
272
|
+
self.console.print(qual_table)
|
|
273
|
+
|
|
274
|
+
def show_reports_generated(self, output_dir: Path, report_files: List[str]) -> None:
|
|
275
|
+
"""
|
|
276
|
+
Display generated reports with file paths.
|
|
277
|
+
|
|
278
|
+
WHY: Users need to know where their reports were saved and what files
|
|
279
|
+
were generated. This provides clear next steps after analysis completes.
|
|
280
|
+
"""
|
|
281
|
+
if not report_files:
|
|
282
|
+
return
|
|
283
|
+
|
|
284
|
+
self.console.print()
|
|
285
|
+
|
|
286
|
+
reports_panel = Panel(
|
|
287
|
+
f"[bold green]✓[/bold green] Reports exported to: [cyan]{output_dir}[/cyan]",
|
|
288
|
+
title="[bold]Generated Reports[/bold]",
|
|
289
|
+
box=box.ROUNDED,
|
|
290
|
+
)
|
|
291
|
+
self.console.print(reports_panel)
|
|
292
|
+
|
|
293
|
+
# List individual report files
|
|
294
|
+
for report_file in report_files:
|
|
295
|
+
self.console.print(f" • {report_file}")
|
|
296
|
+
|
|
297
|
+
def show_error(self, error_message: str, show_debug_hint: bool = True) -> None:
|
|
298
|
+
"""
|
|
299
|
+
Display error messages in a prominent format.
|
|
300
|
+
|
|
301
|
+
WHY: Errors need to be clearly visible and actionable. The panel format
|
|
302
|
+
makes them stand out while providing helpful guidance.
|
|
303
|
+
"""
|
|
304
|
+
error_panel = Panel(
|
|
305
|
+
f"[red]{error_message}[/red]",
|
|
306
|
+
title="[bold red]Error[/bold red]",
|
|
307
|
+
box=box.HEAVY,
|
|
308
|
+
)
|
|
309
|
+
self.console.print(error_panel)
|
|
310
|
+
|
|
311
|
+
if show_debug_hint:
|
|
312
|
+
self.console.print("\n[dim]💡 Run with --debug for detailed error information[/dim]")
|
|
313
|
+
|
|
314
|
+
def show_warning(self, warning_message: str) -> None:
|
|
315
|
+
"""Display warning messages."""
|
|
316
|
+
warning_panel = Panel(
|
|
317
|
+
f"[yellow]{warning_message}[/yellow]",
|
|
318
|
+
title="[bold yellow]Warning[/bold yellow]",
|
|
319
|
+
box=box.ROUNDED,
|
|
320
|
+
)
|
|
321
|
+
self.console.print(warning_panel)
|
|
322
|
+
|
|
323
|
+
def print(self, *args: Any, **kwargs: Any) -> None:
|
|
324
|
+
"""Delegate print calls to the console."""
|
|
325
|
+
self.console.print(*args, **kwargs)
|
|
326
|
+
|
|
327
|
+
def print_status(self, message: str, status: str = "info") -> None:
|
|
328
|
+
"""
|
|
329
|
+
Print a status message with appropriate styling.
|
|
330
|
+
|
|
331
|
+
WHY: Provides consistent status messaging throughout the analysis process.
|
|
332
|
+
Different status types (info, success, warning, error) get appropriate styling.
|
|
333
|
+
"""
|
|
334
|
+
if status == "success":
|
|
335
|
+
self.console.print(f"[green]✓[/green] {message}")
|
|
336
|
+
elif status == "warning":
|
|
337
|
+
self.console.print(f"[yellow]⚠[/yellow] {message}")
|
|
338
|
+
elif status == "error":
|
|
339
|
+
self.console.print(f"[red]✗[/red] {message}")
|
|
340
|
+
elif status == "info":
|
|
341
|
+
self.console.print(f"[blue]ℹ[/blue] {message}")
|
|
342
|
+
else:
|
|
343
|
+
self.console.print(message)
|
|
344
|
+
|
|
345
|
+
|
|
346
|
+
def create_rich_display() -> RichProgressDisplay:
|
|
347
|
+
"""
|
|
348
|
+
Factory function to create a Rich progress display.
|
|
349
|
+
|
|
350
|
+
WHY: Centralizes the creation of the display component and ensures
|
|
351
|
+
consistent configuration across the application.
|
|
352
|
+
"""
|
|
353
|
+
return RichProgressDisplay()
|