gitflow-analytics 1.3.11__py3-none-any.whl → 3.3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (48) hide show
  1. gitflow_analytics/_version.py +1 -1
  2. gitflow_analytics/classification/batch_classifier.py +156 -4
  3. gitflow_analytics/cli.py +803 -135
  4. gitflow_analytics/config/loader.py +39 -1
  5. gitflow_analytics/config/schema.py +1 -0
  6. gitflow_analytics/core/cache.py +20 -0
  7. gitflow_analytics/core/data_fetcher.py +1051 -117
  8. gitflow_analytics/core/git_auth.py +169 -0
  9. gitflow_analytics/core/git_timeout_wrapper.py +347 -0
  10. gitflow_analytics/core/metrics_storage.py +12 -3
  11. gitflow_analytics/core/progress.py +219 -18
  12. gitflow_analytics/core/subprocess_git.py +145 -0
  13. gitflow_analytics/extractors/ml_tickets.py +3 -2
  14. gitflow_analytics/extractors/tickets.py +93 -8
  15. gitflow_analytics/integrations/jira_integration.py +1 -1
  16. gitflow_analytics/integrations/orchestrator.py +47 -29
  17. gitflow_analytics/metrics/branch_health.py +3 -2
  18. gitflow_analytics/models/database.py +72 -1
  19. gitflow_analytics/pm_framework/adapters/jira_adapter.py +12 -5
  20. gitflow_analytics/pm_framework/orchestrator.py +8 -3
  21. gitflow_analytics/qualitative/classifiers/llm/openai_client.py +24 -4
  22. gitflow_analytics/qualitative/classifiers/llm_commit_classifier.py +3 -1
  23. gitflow_analytics/qualitative/core/llm_fallback.py +34 -2
  24. gitflow_analytics/reports/narrative_writer.py +118 -74
  25. gitflow_analytics/security/__init__.py +11 -0
  26. gitflow_analytics/security/config.py +189 -0
  27. gitflow_analytics/security/extractors/__init__.py +7 -0
  28. gitflow_analytics/security/extractors/dependency_checker.py +379 -0
  29. gitflow_analytics/security/extractors/secret_detector.py +197 -0
  30. gitflow_analytics/security/extractors/vulnerability_scanner.py +333 -0
  31. gitflow_analytics/security/llm_analyzer.py +347 -0
  32. gitflow_analytics/security/reports/__init__.py +5 -0
  33. gitflow_analytics/security/reports/security_report.py +358 -0
  34. gitflow_analytics/security/security_analyzer.py +414 -0
  35. gitflow_analytics/tui/app.py +3 -1
  36. gitflow_analytics/tui/progress_adapter.py +313 -0
  37. gitflow_analytics/tui/screens/analysis_progress_screen.py +407 -46
  38. gitflow_analytics/tui/screens/results_screen.py +219 -206
  39. gitflow_analytics/ui/__init__.py +21 -0
  40. gitflow_analytics/ui/progress_display.py +1477 -0
  41. gitflow_analytics/verify_activity.py +697 -0
  42. {gitflow_analytics-1.3.11.dist-info → gitflow_analytics-3.3.0.dist-info}/METADATA +2 -1
  43. {gitflow_analytics-1.3.11.dist-info → gitflow_analytics-3.3.0.dist-info}/RECORD +47 -31
  44. gitflow_analytics/cli_rich.py +0 -503
  45. {gitflow_analytics-1.3.11.dist-info → gitflow_analytics-3.3.0.dist-info}/WHEEL +0 -0
  46. {gitflow_analytics-1.3.11.dist-info → gitflow_analytics-3.3.0.dist-info}/entry_points.txt +0 -0
  47. {gitflow_analytics-1.3.11.dist-info → gitflow_analytics-3.3.0.dist-info}/licenses/LICENSE +0 -0
  48. {gitflow_analytics-1.3.11.dist-info → gitflow_analytics-3.3.0.dist-info}/top_level.txt +0 -0
@@ -1,503 +0,0 @@
1
- """Rich CLI components for GitFlow Analytics with beautiful terminal output."""
2
-
3
- from pathlib import Path
4
- from typing import Any, Optional
5
-
6
- from rich import box
7
- from rich.console import Console
8
- from rich.live import Live
9
- from rich.panel import Panel
10
- from rich.progress import (
11
- BarColumn,
12
- MofNCompleteColumn,
13
- Progress,
14
- SpinnerColumn,
15
- TaskID,
16
- TextColumn,
17
- TimeElapsedColumn,
18
- )
19
- from rich.table import Table
20
- from rich.tree import Tree
21
-
22
- from ._version import __version__
23
-
24
-
25
- class RichProgressDisplay:
26
- """
27
- Rich terminal display for GitFlow Analytics progress and results.
28
-
29
- WHY: Provides a clean, structured interface that shows users exactly what's happening
30
- during analysis without the complexity of a full TUI. Uses Rich library for
31
- beautiful terminal output with progress bars, tables, and status indicators.
32
-
33
- DESIGN DECISION: Chose to use Rich's Live display for real-time updates because:
34
- - Allows multiple progress bars and status updates in a single view
35
- - Provides structured layout with panels and tables
36
- - Much simpler than TUI but still provides excellent user experience
37
- - Works in any terminal that supports ANSI colors
38
- """
39
-
40
- def __init__(self, console: Optional[Console] = None) -> None:
41
- """Initialize the Rich progress display."""
42
- self.console = console or Console()
43
- self.progress = Progress(
44
- SpinnerColumn(),
45
- TextColumn("[progress.description]{task.description}"),
46
- BarColumn(),
47
- MofNCompleteColumn(),
48
- TimeElapsedColumn(),
49
- console=self.console,
50
- )
51
- self.live: Optional[Live] = None
52
- self._tasks: dict[str, TaskID] = {}
53
-
54
- def show_header(self) -> None:
55
- """Display the application header."""
56
- header = Panel(
57
- f"[bold blue]GitFlow Analytics v{__version__}[/bold blue]",
58
- box=box.DOUBLE,
59
- style="blue",
60
- )
61
- self.console.print(header)
62
- self.console.print()
63
-
64
- def show_configuration_status(
65
- self,
66
- config_path: Path,
67
- github_org: Optional[str] = None,
68
- github_token_valid: bool = False,
69
- jira_configured: bool = False,
70
- jira_valid: bool = False,
71
- analysis_weeks: int = 12,
72
- ) -> None:
73
- """
74
- Display configuration validation status.
75
-
76
- WHY: Users need immediate feedback on whether their configuration is valid
77
- before starting analysis. This prevents wasted time on invalid configs.
78
- """
79
- config_tree = Tree("[bold]Configuration Status[/bold]")
80
-
81
- # Config file status
82
- config_tree.add(f"[green]✓[/green] Config: {config_path}")
83
-
84
- # GitHub configuration
85
- if github_org:
86
- github_status = "[green]✓[/green]" if github_token_valid else "[red]✗[/red]"
87
- token_status = "Token: ✓" if github_token_valid else "Token: ✗"
88
- config_tree.add(f"{github_status} GitHub: {github_org} ({token_status})")
89
-
90
- # JIRA configuration
91
- if jira_configured:
92
- jira_status = "[green]✓[/green]" if jira_valid else "[red]✗[/red]"
93
- cred_status = "Credentials: ✓" if jira_valid else "Credentials: ✗"
94
- config_tree.add(f"{jira_status} JIRA: configured ({cred_status})")
95
-
96
- # Analysis period
97
- config_tree.add(f"Analysis Period: {analysis_weeks} weeks")
98
-
99
- self.console.print(config_tree)
100
- self.console.print()
101
-
102
- def start_live_display(self) -> None:
103
- """Start the live display for real-time updates."""
104
- self.live = Live(self.progress, console=self.console, refresh_per_second=10)
105
- self.live.start()
106
-
107
- def stop_live_display(self) -> None:
108
- """Stop the live display."""
109
- if self.live:
110
- self.live.stop()
111
- self.live = None
112
-
113
- def add_progress_task(self, name: str, description: str, total: int) -> None:
114
- """Add a new progress task."""
115
- task_id = self.progress.add_task(description, total=total)
116
- self._tasks[name] = task_id
117
-
118
- def update_progress_task(
119
- self, name: str, advance: int = 1, description: Optional[str] = None
120
- ) -> None:
121
- """Update progress for a specific task."""
122
- if name in self._tasks:
123
- kwargs = {"advance": advance}
124
- if description:
125
- kwargs["description"] = description
126
- self.progress.update(self._tasks[name], **kwargs)
127
-
128
- def complete_progress_task(self, name: str, description: Optional[str] = None) -> None:
129
- """Mark a progress task as complete."""
130
- if name in self._tasks:
131
- task = self.progress.tasks[self._tasks[name]]
132
- remaining = task.total - task.completed if task.total else 0
133
- if remaining > 0:
134
- self.progress.update(self._tasks[name], advance=remaining)
135
- if description:
136
- self.progress.update(self._tasks[name], description=description)
137
-
138
- def show_repository_discovery(self, repos: list[dict[str, Any]]) -> None:
139
- """
140
- Display repository discovery results.
141
-
142
- WHY: Users need to see which repositories were discovered and their status
143
- before analysis begins, especially with organization-based discovery.
144
- """
145
- if not repos:
146
- return
147
-
148
- self.console.print(f"[bold]Repository Discovery[/bold] - Found {len(repos)} repositories")
149
-
150
- repo_tree = Tree("")
151
- for repo in repos:
152
- status = "[green]✓[/green]" if repo.get("exists", True) else "[red]✗[/red]"
153
- name = repo.get("name", "unknown")
154
- github_repo = repo.get("github_repo", "")
155
- if github_repo:
156
- repo_tree.add(f"{status} {name} ({github_repo})")
157
- else:
158
- repo_tree.add(f"{status} {name}")
159
-
160
- self.console.print(repo_tree)
161
- self.console.print()
162
-
163
- def show_analysis_summary(
164
- self,
165
- total_commits: int,
166
- total_prs: int,
167
- active_developers: int,
168
- ticket_coverage: float,
169
- story_points: int,
170
- qualitative_analyzed: int = 0,
171
- ) -> None:
172
- """
173
- Display analysis results summary.
174
-
175
- WHY: Provides users with key metrics at a glance after analysis completes.
176
- Uses a structured table format for easy scanning of important numbers.
177
- """
178
- self.console.print()
179
-
180
- summary_table = Table(title="[bold]Analysis Summary[/bold]", box=box.ROUNDED)
181
- summary_table.add_column("Metric", style="cyan", width=20)
182
- summary_table.add_column("Value", style="green", width=15)
183
-
184
- summary_table.add_row("Total Commits", f"{total_commits:,}")
185
- summary_table.add_row("Total PRs", f"{total_prs:,}")
186
- summary_table.add_row("Active Developers", f"{active_developers:,}")
187
- summary_table.add_row("Ticket Coverage", f"{ticket_coverage:.1f}%")
188
- summary_table.add_row("Story Points", f"{story_points:,}")
189
-
190
- if qualitative_analyzed > 0:
191
- summary_table.add_row("Qualitative Analysis", f"{qualitative_analyzed:,} commits")
192
-
193
- self.console.print(summary_table)
194
-
195
- def show_dora_metrics(self, dora_metrics: dict[str, Any]) -> None:
196
- """
197
- Display DORA metrics in a structured format.
198
-
199
- WHY: DORA metrics are key performance indicators that teams care about.
200
- Displaying them prominently helps users understand their team's performance level.
201
- """
202
- if not dora_metrics:
203
- return
204
-
205
- self.console.print()
206
-
207
- dora_table = Table(title="[bold]DORA Metrics[/bold]", box=box.ROUNDED)
208
- dora_table.add_column("Metric", style="cyan", width=25)
209
- dora_table.add_column("Value", style="yellow", width=20)
210
-
211
- # Deployment frequency
212
- df_category = dora_metrics.get("deployment_frequency", {}).get("category", "Unknown")
213
- dora_table.add_row("Deployment Frequency", df_category)
214
-
215
- # Lead time
216
- lead_time = dora_metrics.get("lead_time_hours", 0)
217
- dora_table.add_row("Lead Time", f"{lead_time:.1f} hours")
218
-
219
- # Change failure rate
220
- cfr = dora_metrics.get("change_failure_rate", 0)
221
- dora_table.add_row("Change Failure Rate", f"{cfr:.1f}%")
222
-
223
- # MTTR
224
- mttr = dora_metrics.get("mttr_hours", 0)
225
- dora_table.add_row("MTTR", f"{mttr:.1f} hours")
226
-
227
- # Performance level
228
- perf_level = dora_metrics.get("performance_level", "Unknown")
229
- dora_table.add_row("Performance Level", f"[bold]{perf_level}[/bold]")
230
-
231
- self.console.print(dora_table)
232
-
233
- def show_qualitative_stats(self, qual_stats: dict[str, Any]) -> None:
234
- """
235
- Display qualitative analysis statistics.
236
-
237
- WHY: Qualitative analysis can be expensive (time/cost), so users need
238
- visibility into processing efficiency and costs incurred.
239
- """
240
- if not qual_stats:
241
- return
242
-
243
- processing_summary = qual_stats.get("processing_summary", {})
244
- llm_stats = qual_stats.get("llm_statistics", {})
245
-
246
- self.console.print()
247
-
248
- qual_table = Table(title="[bold]Qualitative Analysis Stats[/bold]", box=box.ROUNDED)
249
- qual_table.add_column("Metric", style="cyan", width=25)
250
- qual_table.add_column("Value", style="magenta", width=20)
251
-
252
- # Processing speed
253
- commits_per_sec = processing_summary.get("commits_per_second", 0)
254
- qual_table.add_row("Processing Speed", f"{commits_per_sec:.1f} commits/sec")
255
-
256
- # Method breakdown
257
- method_breakdown = processing_summary.get("method_breakdown", {})
258
- cache_pct = method_breakdown.get("cache", 0)
259
- nlp_pct = method_breakdown.get("nlp", 0)
260
- llm_pct = method_breakdown.get("llm", 0)
261
- qual_table.add_row("Cache Usage", f"{cache_pct:.1f}%")
262
- qual_table.add_row("NLP Processing", f"{nlp_pct:.1f}%")
263
- qual_table.add_row("LLM Processing", f"{llm_pct:.1f}%")
264
-
265
- # LLM costs if available
266
- if llm_stats.get("model_usage") == "available":
267
- cost_tracking = llm_stats.get("cost_tracking", {})
268
- total_cost = cost_tracking.get("total_cost", 0)
269
- if total_cost > 0:
270
- qual_table.add_row("LLM Cost", f"${total_cost:.4f}")
271
-
272
- self.console.print(qual_table)
273
-
274
- def show_llm_cost_summary(
275
- self, cost_stats: dict[str, Any], identity_costs: Optional[dict[str, Any]] = None
276
- ) -> None:
277
- """
278
- Display comprehensive LLM usage and cost summary.
279
-
280
- WHY: LLM usage can be expensive and users need visibility into:
281
- - Token consumption by component (identity analysis, qualitative analysis)
282
- - Cost breakdown and budget utilization
283
- - Optimization suggestions to reduce costs
284
-
285
- DESIGN DECISION: Separate method from qualitative stats because:
286
- - Cost tracking spans multiple components (identity + qualitative)
287
- - Users need this summary even when qualitative analysis is disabled
288
- - Provides actionable cost optimization suggestions
289
-
290
- Args:
291
- cost_stats: Cost statistics from qualitative analysis
292
- identity_costs: Optional cost statistics from identity analysis
293
- """
294
- # Check if we have any cost data to display
295
- has_qual_costs = cost_stats and cost_stats.get("total_cost", 0) > 0
296
- has_identity_costs = identity_costs and identity_costs.get("total_cost", 0) > 0
297
-
298
- if not (has_qual_costs or has_identity_costs):
299
- return
300
-
301
- self.console.print()
302
-
303
- # Create main cost summary table
304
- cost_table = Table(title="[bold]🤖 LLM Usage Summary[/bold]", box=box.ROUNDED)
305
- cost_table.add_column("Component", style="cyan", width=20)
306
- cost_table.add_column("Calls", style="yellow", width=8)
307
- cost_table.add_column("Tokens", style="green", width=12)
308
- cost_table.add_column("Cost", style="magenta", width=12)
309
-
310
- total_calls = 0
311
- total_tokens = 0
312
- total_cost = 0.0
313
- budget_info = {}
314
-
315
- # Add identity analysis costs if available
316
- if has_identity_costs:
317
- identity_calls = identity_costs.get("total_calls", 0)
318
- identity_tokens = identity_costs.get("total_tokens", 0)
319
- identity_cost = identity_costs.get("total_cost", 0)
320
-
321
- cost_table.add_row(
322
- "Identity Analysis",
323
- f"{identity_calls:,}",
324
- f"{identity_tokens:,}",
325
- f"${identity_cost:.4f}",
326
- )
327
-
328
- total_calls += identity_calls
329
- total_tokens += identity_tokens
330
- total_cost += identity_cost
331
-
332
- # Add qualitative analysis costs if available
333
- if has_qual_costs:
334
- qual_calls = cost_stats.get("total_calls", 0)
335
- qual_tokens = cost_stats.get("total_tokens", 0)
336
- qual_cost = cost_stats.get("total_cost", 0)
337
-
338
- cost_table.add_row(
339
- "Qualitative Analysis", f"{qual_calls:,}", f"{qual_tokens:,}", f"${qual_cost:.4f}"
340
- )
341
-
342
- total_calls += qual_calls
343
- total_tokens += qual_tokens
344
- total_cost += qual_cost
345
-
346
- # Extract budget information (assuming it's in qualitative cost stats)
347
- budget_info = {
348
- "daily_budget": cost_stats.get("daily_budget", 5.0),
349
- "daily_spend": cost_stats.get("daily_spend", total_cost),
350
- "remaining_budget": cost_stats.get("remaining_budget", 5.0 - total_cost),
351
- }
352
-
353
- # Add total row
354
- if total_calls > 0:
355
- cost_table.add_row(
356
- "[bold]Total[/bold]",
357
- f"[bold]{total_calls:,}[/bold]",
358
- f"[bold]{total_tokens:,}[/bold]",
359
- f"[bold]${total_cost:.4f}[/bold]",
360
- )
361
-
362
- self.console.print(cost_table)
363
-
364
- # Display budget information if available
365
- if budget_info:
366
- daily_budget = budget_info.get("daily_budget", 5.0)
367
- remaining = budget_info.get("remaining_budget", daily_budget - total_cost)
368
- utilization = (total_cost / daily_budget) * 100 if daily_budget > 0 else 0
369
-
370
- budget_text = f"Budget: ${daily_budget:.2f}, Remaining: ${remaining:.2f}, Utilization: {utilization:.1f}%"
371
-
372
- # Color code based on utilization
373
- if utilization >= 90:
374
- budget_color = "red"
375
- elif utilization >= 70:
376
- budget_color = "yellow"
377
- else:
378
- budget_color = "green"
379
-
380
- self.console.print(f" [{budget_color}]💰 {budget_text}[/{budget_color}]")
381
-
382
- # Display cost optimization suggestions if we have detailed stats
383
- suggestions = []
384
- if has_qual_costs and "model_usage" in cost_stats:
385
- model_usage = cost_stats.get("model_usage", {})
386
-
387
- # Check for expensive model usage
388
- expensive_models = ["anthropic/claude-3-opus", "openai/gpt-4"]
389
- expensive_cost = sum(
390
- model_usage.get(model, {}).get("cost", 0) for model in expensive_models
391
- )
392
-
393
- if expensive_cost > total_cost * 0.3:
394
- suggestions.append(
395
- "Consider using cheaper models (Claude Haiku, GPT-3.5) for routine tasks (save ~40%)"
396
- )
397
-
398
- # Check for free model opportunities
399
- free_usage = model_usage.get("meta-llama/llama-3.1-8b-instruct:free", {}).get(
400
- "cost", -1
401
- )
402
- if free_usage == 0 and total_cost > 0.01: # If free models available but not used much
403
- suggestions.append(
404
- "Increase usage of free Llama models for simple classification tasks"
405
- )
406
-
407
- # Budget-based suggestions
408
- if budget_info:
409
- utilization = (total_cost / budget_info.get("daily_budget", 5.0)) * 100
410
- if utilization > 80:
411
- suggestions.append(
412
- "Approaching daily budget limit - consider increasing NLP confidence threshold"
413
- )
414
-
415
- # Display suggestions
416
- if suggestions:
417
- self.console.print()
418
- self.console.print("[bold]💡 Cost Optimization Suggestions:[/bold]")
419
- for suggestion in suggestions:
420
- self.console.print(f" • {suggestion}")
421
-
422
- self.console.print()
423
-
424
- def show_reports_generated(self, output_dir: Path, report_files: list[str]) -> None:
425
- """
426
- Display generated reports with file paths.
427
-
428
- WHY: Users need to know where their reports were saved and what files
429
- were generated. This provides clear next steps after analysis completes.
430
- """
431
- if not report_files:
432
- return
433
-
434
- self.console.print()
435
-
436
- reports_panel = Panel(
437
- f"[bold green]✓[/bold green] Reports exported to: [cyan]{output_dir}[/cyan]",
438
- title="[bold]Generated Reports[/bold]",
439
- box=box.ROUNDED,
440
- )
441
- self.console.print(reports_panel)
442
-
443
- # List individual report files
444
- for report_file in report_files:
445
- self.console.print(f" • {report_file}")
446
-
447
- def show_error(self, error_message: str, show_debug_hint: bool = True) -> None:
448
- """
449
- Display error messages in a prominent format.
450
-
451
- WHY: Errors need to be clearly visible and actionable. The panel format
452
- makes them stand out while providing helpful guidance.
453
- """
454
- error_panel = Panel(
455
- f"[red]{error_message}[/red]",
456
- title="[bold red]Error[/bold red]",
457
- box=box.HEAVY,
458
- )
459
- self.console.print(error_panel)
460
-
461
- if show_debug_hint:
462
- self.console.print("\n[dim]💡 Run with --debug for detailed error information[/dim]")
463
-
464
- def show_warning(self, warning_message: str) -> None:
465
- """Display warning messages."""
466
- warning_panel = Panel(
467
- f"[yellow]{warning_message}[/yellow]",
468
- title="[bold yellow]Warning[/bold yellow]",
469
- box=box.ROUNDED,
470
- )
471
- self.console.print(warning_panel)
472
-
473
- def print(self, *args: Any, **kwargs: Any) -> None:
474
- """Delegate print calls to the console."""
475
- self.console.print(*args, **kwargs)
476
-
477
- def print_status(self, message: str, status: str = "info") -> None:
478
- """
479
- Print a status message with appropriate styling.
480
-
481
- WHY: Provides consistent status messaging throughout the analysis process.
482
- Different status types (info, success, warning, error) get appropriate styling.
483
- """
484
- if status == "success":
485
- self.console.print(f"[green]✓[/green] {message}")
486
- elif status == "warning":
487
- self.console.print(f"[yellow]⚠[/yellow] {message}")
488
- elif status == "error":
489
- self.console.print(f"[red]✗[/red] {message}")
490
- elif status == "info":
491
- self.console.print(f"[blue]ℹ[/blue] {message}")
492
- else:
493
- self.console.print(message)
494
-
495
-
496
- def create_rich_display() -> RichProgressDisplay:
497
- """
498
- Factory function to create a Rich progress display.
499
-
500
- WHY: Centralizes the creation of the display component and ensures
501
- consistent configuration across the application.
502
- """
503
- return RichProgressDisplay()