gitflow-analytics 1.3.11__py3-none-any.whl → 3.3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (48) hide show
  1. gitflow_analytics/_version.py +1 -1
  2. gitflow_analytics/classification/batch_classifier.py +156 -4
  3. gitflow_analytics/cli.py +803 -135
  4. gitflow_analytics/config/loader.py +39 -1
  5. gitflow_analytics/config/schema.py +1 -0
  6. gitflow_analytics/core/cache.py +20 -0
  7. gitflow_analytics/core/data_fetcher.py +1051 -117
  8. gitflow_analytics/core/git_auth.py +169 -0
  9. gitflow_analytics/core/git_timeout_wrapper.py +347 -0
  10. gitflow_analytics/core/metrics_storage.py +12 -3
  11. gitflow_analytics/core/progress.py +219 -18
  12. gitflow_analytics/core/subprocess_git.py +145 -0
  13. gitflow_analytics/extractors/ml_tickets.py +3 -2
  14. gitflow_analytics/extractors/tickets.py +93 -8
  15. gitflow_analytics/integrations/jira_integration.py +1 -1
  16. gitflow_analytics/integrations/orchestrator.py +47 -29
  17. gitflow_analytics/metrics/branch_health.py +3 -2
  18. gitflow_analytics/models/database.py +72 -1
  19. gitflow_analytics/pm_framework/adapters/jira_adapter.py +12 -5
  20. gitflow_analytics/pm_framework/orchestrator.py +8 -3
  21. gitflow_analytics/qualitative/classifiers/llm/openai_client.py +24 -4
  22. gitflow_analytics/qualitative/classifiers/llm_commit_classifier.py +3 -1
  23. gitflow_analytics/qualitative/core/llm_fallback.py +34 -2
  24. gitflow_analytics/reports/narrative_writer.py +118 -74
  25. gitflow_analytics/security/__init__.py +11 -0
  26. gitflow_analytics/security/config.py +189 -0
  27. gitflow_analytics/security/extractors/__init__.py +7 -0
  28. gitflow_analytics/security/extractors/dependency_checker.py +379 -0
  29. gitflow_analytics/security/extractors/secret_detector.py +197 -0
  30. gitflow_analytics/security/extractors/vulnerability_scanner.py +333 -0
  31. gitflow_analytics/security/llm_analyzer.py +347 -0
  32. gitflow_analytics/security/reports/__init__.py +5 -0
  33. gitflow_analytics/security/reports/security_report.py +358 -0
  34. gitflow_analytics/security/security_analyzer.py +414 -0
  35. gitflow_analytics/tui/app.py +3 -1
  36. gitflow_analytics/tui/progress_adapter.py +313 -0
  37. gitflow_analytics/tui/screens/analysis_progress_screen.py +407 -46
  38. gitflow_analytics/tui/screens/results_screen.py +219 -206
  39. gitflow_analytics/ui/__init__.py +21 -0
  40. gitflow_analytics/ui/progress_display.py +1477 -0
  41. gitflow_analytics/verify_activity.py +697 -0
  42. {gitflow_analytics-1.3.11.dist-info → gitflow_analytics-3.3.0.dist-info}/METADATA +2 -1
  43. {gitflow_analytics-1.3.11.dist-info → gitflow_analytics-3.3.0.dist-info}/RECORD +47 -31
  44. gitflow_analytics/cli_rich.py +0 -503
  45. {gitflow_analytics-1.3.11.dist-info → gitflow_analytics-3.3.0.dist-info}/WHEEL +0 -0
  46. {gitflow_analytics-1.3.11.dist-info → gitflow_analytics-3.3.0.dist-info}/entry_points.txt +0 -0
  47. {gitflow_analytics-1.3.11.dist-info → gitflow_analytics-3.3.0.dist-info}/licenses/LICENSE +0 -0
  48. {gitflow_analytics-1.3.11.dist-info → gitflow_analytics-3.3.0.dist-info}/top_level.txt +0 -0
gitflow_analytics/cli.py CHANGED
@@ -18,10 +18,10 @@ import pandas as pd
18
18
  import yaml
19
19
 
20
20
  from ._version import __version__
21
- from .cli_rich import create_rich_display
22
21
  from .config import ConfigLoader
23
22
  from .core.analyzer import GitAnalyzer
24
23
  from .core.cache import GitAnalysisCache
24
+ from .core.git_auth import preflight_git_authentication
25
25
  from .core.identity import DeveloperIdentityResolver
26
26
  from .integrations.orchestrator import IntegrationOrchestrator
27
27
  from .metrics.dora import DORAMetricsCalculator
@@ -31,6 +31,7 @@ from .reports.json_exporter import ComprehensiveJSONExporter
31
31
  from .reports.narrative_writer import NarrativeReportGenerator
32
32
  from .reports.weekly_trends_writer import WeeklyTrendsWriter
33
33
  from .training.pipeline import CommitClassificationTrainer
34
+ from .ui.progress_display import create_progress_display
34
35
 
35
36
 
36
37
  class RichHelpFormatter:
@@ -243,55 +244,50 @@ class ImprovedErrorHandler:
243
244
  click.echo("\nFor help: gitflow-analytics help", err=True)
244
245
 
245
246
 
246
- class AnalyzeAsDefaultGroup(click.Group):
247
+ class TUIAsDefaultGroup(click.Group):
247
248
  """
248
- Custom Click group that treats unrecognized options as analyze command arguments.
249
- This allows 'gitflow-analytics -c config.yaml' to work like 'gitflow-analytics analyze -c config.yaml'
249
+ Custom Click group that defaults to TUI when no explicit subcommand is provided.
250
+ This allows 'gitflow-analytics -c config.yaml' to launch the TUI by default.
251
+ For explicit CLI analysis, use 'gitflow-analytics analyze -c config.yaml'
250
252
  """
251
253
 
252
254
  def parse_args(self, ctx, args):
253
- """Override parse_args to redirect unrecognized patterns to analyze command."""
255
+ """Override parse_args to default to TUI unless explicit subcommand or CLI-only options."""
254
256
  # Check if the first argument is a known subcommand
255
257
  if args and args[0] in self.list_commands(ctx):
256
258
  return super().parse_args(ctx, args)
257
259
 
258
- # Check for global options that should NOT be routed to analyze
260
+ # Check for global options that should NOT be routed to TUI
259
261
  global_options = {"--version", "--help", "-h"}
260
262
  if args and args[0] in global_options:
261
263
  return super().parse_args(ctx, args)
262
264
 
263
- # Check if we have arguments that look like analyze options
264
- analyze_indicators = {
265
- "-c",
266
- "--config",
267
- "-w",
268
- "--weeks",
269
- "--output",
270
- "-o",
271
- "--anonymize",
272
- "--no-cache",
265
+ # Check if we have arguments that indicate explicit CLI analysis request
266
+ cli_only_indicators = {
267
+ "--no-rich",
268
+ "--generate-csv",
273
269
  "--validate-only",
274
- "--clear-cache",
275
- "--enable-qualitative",
276
- "--qualitative-only",
277
- "--enable-pm",
278
- "--pm-platform",
279
- "--disable-pm",
280
- "--rich",
281
- "--log",
282
- "--skip-identity-analysis",
283
- "--apply-identity-suggestions",
284
270
  "--warm-cache",
285
271
  "--validate-cache",
286
- "--generate-csv",
272
+ "--qualitative-only",
287
273
  }
288
274
 
289
- # If first arg starts with - and looks like an analyze option, prepend 'analyze'
275
+ # If user explicitly requests CLI-only features, route to analyze
276
+ if args and any(arg in cli_only_indicators for arg in args):
277
+ new_args = ["analyze"] + args
278
+ return super().parse_args(ctx, new_args)
279
+
280
+ # For all other cases (including -c config.yaml), default to TUI
290
281
  if args and args[0].startswith("-"):
291
- # Check if any of the args are analyze indicators
292
- has_analyze_indicators = any(arg in analyze_indicators for arg in args)
293
- if has_analyze_indicators:
294
- # This looks like it should be an analyze command
282
+ # Check if TUI dependencies are available
283
+ try:
284
+ import textual
285
+
286
+ # TUI is available - route to TUI
287
+ new_args = ["tui"] + args
288
+ return super().parse_args(ctx, new_args)
289
+ except ImportError:
290
+ # TUI not available - fallback to analyze
295
291
  new_args = ["analyze"] + args
296
292
  return super().parse_args(ctx, new_args)
297
293
 
@@ -299,7 +295,7 @@ class AnalyzeAsDefaultGroup(click.Group):
299
295
  return super().parse_args(ctx, args)
300
296
 
301
297
 
302
- @click.group(cls=AnalyzeAsDefaultGroup, invoke_without_command=True)
298
+ @click.group(cls=TUIAsDefaultGroup, invoke_without_command=True)
303
299
  @click.version_option(version=__version__, prog_name="GitFlow Analytics")
304
300
  @click.help_option("-h", "--help")
305
301
  @click.pass_context
@@ -342,8 +338,117 @@ def cli(ctx: click.Context) -> None:
342
338
  ctx.exit(0)
343
339
 
344
340
 
345
- # TUI command removed - replaced with rich CLI output
346
- # Legacy TUI code preserved but not exposed
341
+ # TUI command - Terminal User Interface
342
+ @cli.command(name="tui")
343
+ @click.option(
344
+ "--config",
345
+ "-c",
346
+ type=click.Path(exists=True, path_type=Path),
347
+ default=None,
348
+ help="Path to YAML configuration file (optional - can be loaded in TUI)",
349
+ )
350
+ @click.option(
351
+ "--weeks",
352
+ "-w",
353
+ type=int,
354
+ default=None,
355
+ help="Number of weeks to analyze (passed to TUI)",
356
+ )
357
+ @click.option(
358
+ "--clear-cache",
359
+ is_flag=True,
360
+ help="Clear cache before analysis (passed to TUI)",
361
+ )
362
+ @click.option(
363
+ "--output",
364
+ "-o",
365
+ type=click.Path(path_type=Path),
366
+ default=None,
367
+ help="Output directory for reports (passed to TUI)",
368
+ )
369
+ def tui_command(
370
+ config: Optional[Path],
371
+ weeks: Optional[int],
372
+ clear_cache: bool,
373
+ output: Optional[Path],
374
+ ) -> None:
375
+ """Launch the Terminal User Interface for GitFlow Analytics.
376
+
377
+ \b
378
+ The TUI provides an interactive interface for:
379
+ - Loading and editing configuration files
380
+ - Running analysis with real-time progress updates
381
+ - Viewing results in an organized, navigable format
382
+ - Exporting reports in various formats
383
+
384
+ \b
385
+ FEATURES:
386
+ • Full-screen terminal interface with keyboard navigation
387
+ • Real-time progress tracking during analysis
388
+ • Interactive configuration management
389
+ • Results browser with filtering and export options
390
+ • Built-in help system and keyboard shortcuts
391
+
392
+ \b
393
+ EXAMPLES:
394
+ # Launch TUI without pre-loading configuration
395
+ gitflow-analytics tui
396
+
397
+ # Launch TUI with a specific configuration file
398
+ gitflow-analytics tui -c config.yaml
399
+
400
+ \b
401
+ KEYBOARD SHORTCUTS:
402
+ • Ctrl+Q / Ctrl+C: Quit application
403
+ • F1: Show help
404
+ • Ctrl+D: Toggle dark/light mode
405
+ • Escape: Go back/cancel current action
406
+ """
407
+ try:
408
+ # Import TUI components only when needed
409
+ from .tui.app import GitFlowAnalyticsApp
410
+
411
+ # Create and run the TUI application
412
+ app = GitFlowAnalyticsApp()
413
+
414
+ # Pass CLI parameters to TUI
415
+ if weeks is not None:
416
+ app.default_weeks = weeks
417
+ if clear_cache:
418
+ app.clear_cache_on_start = True
419
+ if output:
420
+ app.default_output_dir = output
421
+
422
+ # If config path provided, try to load it
423
+ if config:
424
+ try:
425
+ from .config import ConfigLoader
426
+
427
+ loaded_config = ConfigLoader.load(config)
428
+ app.config = loaded_config
429
+ app.config_path = config
430
+ app.initialization_complete = True
431
+ except Exception as e:
432
+ # Don't fail - let TUI handle config loading
433
+ click.echo(f"⚠️ Could not pre-load config: {e}")
434
+ click.echo(" You can load the configuration within the TUI.")
435
+
436
+ # Run the TUI
437
+ app.run()
438
+
439
+ except ImportError as e:
440
+ click.echo("❌ TUI dependencies not installed.", err=True)
441
+ click.echo(f" Error: {e}", err=True)
442
+ click.echo("\n💡 Install TUI dependencies:", err=True)
443
+ click.echo(" pip install 'gitflow-analytics[tui]'", err=True)
444
+ click.echo(" # or", err=True)
445
+ click.echo(" pip install textual>=0.41.0", err=True)
446
+ sys.exit(1)
447
+ except Exception as e:
448
+ click.echo(f"❌ Failed to launch TUI: {e}", err=True)
449
+ if "--debug" in sys.argv:
450
+ raise
451
+ sys.exit(1)
347
452
 
348
453
 
349
454
  @cli.command(name="analyze")
@@ -390,7 +495,10 @@ def cli(ctx: click.Context) -> None:
390
495
  "--disable-pm", is_flag=True, help="Disable PM platform integration (overrides config setting)"
391
496
  )
392
497
  @click.option(
393
- "--rich", is_flag=True, default=True, help="Use rich terminal output (default: enabled)"
498
+ "--no-rich",
499
+ is_flag=True,
500
+ default=True,
501
+ help="Disable rich terminal output (simple output is default to prevent TUI hanging)",
394
502
  )
395
503
  @click.option(
396
504
  "--log",
@@ -421,6 +529,17 @@ def cli(ctx: click.Context) -> None:
421
529
  @click.option(
422
530
  "--force-fetch", is_flag=True, help="Force fetch fresh data even if cached data exists"
423
531
  )
532
+ @click.option(
533
+ "--progress-style",
534
+ type=click.Choice(["rich", "simple", "auto"], case_sensitive=False),
535
+ default="simple",
536
+ help="Progress display style: rich (beautiful terminal UI), simple (tqdm), auto (detect)",
537
+ )
538
+ @click.option(
539
+ "--security-only",
540
+ is_flag=True,
541
+ help="Run only security analysis (skip productivity metrics)",
542
+ )
424
543
  def analyze_subcommand(
425
544
  config: Path,
426
545
  weeks: int,
@@ -434,7 +553,7 @@ def analyze_subcommand(
434
553
  enable_pm: bool,
435
554
  pm_platform: tuple[str, ...],
436
555
  disable_pm: bool,
437
- rich: bool,
556
+ no_rich: bool,
438
557
  log: str,
439
558
  skip_identity_analysis: bool,
440
559
  apply_identity_suggestions: bool,
@@ -443,6 +562,8 @@ def analyze_subcommand(
443
562
  generate_csv: bool,
444
563
  use_batch_classification: bool,
445
564
  force_fetch: bool,
565
+ progress_style: str,
566
+ security_only: bool,
446
567
  ) -> None:
447
568
  """Analyze Git repositories and generate comprehensive productivity reports.
448
569
 
@@ -468,6 +589,9 @@ def analyze_subcommand(
468
589
  # Analyze with qualitative insights
469
590
  gitflow-analytics analyze -c config.yaml --enable-qualitative
470
591
 
592
+ # Run only security analysis (requires security config)
593
+ gitflow-analytics analyze -c config.yaml --security-only
594
+
471
595
  \b
472
596
  OUTPUT FILES:
473
597
  - developer_metrics_YYYYMMDD.csv: Individual developer statistics
@@ -496,7 +620,7 @@ def analyze_subcommand(
496
620
  enable_pm=enable_pm,
497
621
  pm_platform=pm_platform,
498
622
  disable_pm=disable_pm,
499
- rich=rich,
623
+ no_rich=no_rich,
500
624
  log=log,
501
625
  skip_identity_analysis=skip_identity_analysis,
502
626
  apply_identity_suggestions=apply_identity_suggestions,
@@ -505,6 +629,8 @@ def analyze_subcommand(
505
629
  generate_csv=generate_csv,
506
630
  use_batch_classification=use_batch_classification,
507
631
  force_fetch=force_fetch,
632
+ progress_style=progress_style,
633
+ security_only=security_only,
508
634
  )
509
635
 
510
636
 
@@ -521,7 +647,7 @@ def analyze(
521
647
  enable_pm: bool,
522
648
  pm_platform: tuple[str, ...],
523
649
  disable_pm: bool,
524
- rich: bool,
650
+ no_rich: bool,
525
651
  log: str,
526
652
  skip_identity_analysis: bool,
527
653
  apply_identity_suggestions: bool,
@@ -530,11 +656,31 @@ def analyze(
530
656
  generate_csv: bool = False,
531
657
  use_batch_classification: bool = True,
532
658
  force_fetch: bool = False,
659
+ progress_style: str = "simple",
660
+ security_only: bool = False,
533
661
  ) -> None:
534
662
  """Analyze Git repositories using configuration file."""
535
663
 
536
- # Initialize display - use rich by default, fall back to simple output if needed
537
- display = create_rich_display() if rich else None
664
+ # Initialize progress service early with the correct style
665
+ from .core.progress import get_progress_service
666
+
667
+ try:
668
+ from ._version import __version__
669
+
670
+ version = __version__
671
+ except ImportError:
672
+ version = "1.3.11"
673
+
674
+ # Initialize progress service with user's preference
675
+ progress = get_progress_service(display_style=progress_style, version=version)
676
+
677
+ # Initialize display - simple output by default to prevent TUI hanging
678
+ # Create display - only create if rich output is explicitly enabled (--no-rich=False)
679
+ display = (
680
+ create_progress_display(style="simple" if no_rich else "rich", version=__version__)
681
+ if not no_rich
682
+ else None
683
+ )
538
684
 
539
685
  # Configure logging based on the --log option
540
686
  if log.upper() != "NONE":
@@ -622,6 +768,29 @@ def analyze(
622
768
  for warning in warnings:
623
769
  click.echo(f" - {warning}")
624
770
 
771
+ # Run pre-flight git authentication check
772
+ # Convert config object to dict for preflight check
773
+ config_dict = {
774
+ "github": {
775
+ "token": cfg.github.token if cfg.github else None,
776
+ "organization": cfg.github.organization if cfg.github else None,
777
+ }
778
+ }
779
+
780
+ if display:
781
+ display.print_status("Verifying GitHub authentication...", "info")
782
+ else:
783
+ click.echo("🔐 Verifying GitHub authentication...")
784
+
785
+ if not preflight_git_authentication(config_dict):
786
+ if display:
787
+ display.print_status(
788
+ "GitHub authentication failed. Cannot proceed with analysis.", "error"
789
+ )
790
+ else:
791
+ click.echo("❌ GitHub authentication failed. Cannot proceed with analysis.")
792
+ sys.exit(1)
793
+
625
794
  if validate_only:
626
795
  if not warnings:
627
796
  if display:
@@ -661,12 +830,35 @@ def analyze(
661
830
  analysis_weeks=weeks,
662
831
  )
663
832
 
833
+ # Start full-screen display immediately after showing configuration
834
+ # This ensures smooth transition for all modes, especially with organization discovery
835
+ # and prevents console prints from breaking the full-screen experience
836
+ try:
837
+ # Check if display has the method before calling
838
+ if hasattr(display, "start_live_display"):
839
+ display.start_live_display()
840
+ elif hasattr(display, "start"):
841
+ display.start(total_items=100, description="Initializing GitFlow Analytics")
842
+
843
+ # Add progress task if method exists
844
+ if hasattr(display, "add_progress_task"):
845
+ display.add_progress_task("main", "Initializing GitFlow Analytics", 100)
846
+ except Exception as e:
847
+ # Fall back to simple display if Rich has issues
848
+ click.echo(f"⚠️ Rich display initialization failed: {e}")
849
+ click.echo(" Continuing with simple output mode...")
850
+ # Set display to None to use fallback everywhere
851
+ display = None
852
+
664
853
  # Initialize components
665
854
  cache_dir = cfg.cache.directory
666
855
  cache = GitAnalysisCache(cache_dir, ttl_hours=0 if no_cache else cfg.cache.ttl_hours)
667
856
 
668
857
  if clear_cache:
669
- if display:
858
+ if display and display._live:
859
+ # We're in full-screen mode, update the task
860
+ display.update_progress_task("main", description="Clearing cache...", completed=5)
861
+ elif display:
670
862
  display.print_status("Clearing cache...", "info")
671
863
  else:
672
864
  click.echo("🗑️ Clearing cache...")
@@ -674,7 +866,13 @@ def analyze(
674
866
  try:
675
867
  # Use the new method that provides detailed feedback
676
868
  cleared_counts = cache.clear_all_cache()
677
- if display:
869
+ if display and display._live:
870
+ display.update_progress_task(
871
+ "main",
872
+ description=f"Cache cleared: {cleared_counts['commits']} commits, {cleared_counts['total']} total",
873
+ completed=10,
874
+ )
875
+ elif display:
678
876
  display.print_status(
679
877
  f"Cache cleared: {cleared_counts['commits']} commits, "
680
878
  f"{cleared_counts['repository_status']} repo status records, "
@@ -797,6 +995,149 @@ def analyze(
797
995
  if validate_only:
798
996
  return
799
997
 
998
+ # Security-only mode: Run only security analysis and exit
999
+ if security_only:
1000
+ if display:
1001
+ display.print_status("🔒 Running security-only analysis...", "info")
1002
+ else:
1003
+ click.echo("\n🔒 Running security-only analysis...")
1004
+
1005
+ from .core.data_fetcher import GitDataFetcher
1006
+ from .security import SecurityAnalyzer, SecurityConfig
1007
+ from .security.reports import SecurityReportGenerator
1008
+
1009
+ # GitAnalysisCache already imported at module level (line 24)
1010
+
1011
+ # Load security configuration
1012
+ security_config = SecurityConfig.from_dict(
1013
+ cfg.analysis.security if hasattr(cfg.analysis, "security") else {}
1014
+ )
1015
+
1016
+ if not security_config.enabled:
1017
+ if display:
1018
+ display.show_error("Security analysis is not enabled in configuration")
1019
+ else:
1020
+ click.echo("❌ Security analysis is not enabled in configuration")
1021
+ click.echo("💡 Add 'security:' section to your config with 'enabled: true'")
1022
+ return
1023
+
1024
+ # Setup cache directory
1025
+ cache_dir = cfg.cache.directory
1026
+ if not cache_dir.is_absolute():
1027
+ cache_dir = config.parent / cache_dir
1028
+ cache_dir.mkdir(parents=True, exist_ok=True)
1029
+
1030
+ # Initialize cache for data fetcher
1031
+ cache = GitAnalysisCache(
1032
+ cache_dir=cache_dir,
1033
+ ttl_hours=cfg.cache.ttl_hours if not no_cache else 0,
1034
+ )
1035
+
1036
+ # Initialize data fetcher for getting commits
1037
+ data_fetcher = GitDataFetcher(
1038
+ cache=cache,
1039
+ branch_mapping_rules=cfg.analysis.branch_mapping_rules,
1040
+ allowed_ticket_platforms=cfg.analysis.ticket_platforms,
1041
+ exclude_paths=cfg.analysis.exclude_paths,
1042
+ )
1043
+
1044
+ # Get commits from all repositories
1045
+ all_commits = []
1046
+ for repo_config in cfg.repositories:
1047
+ repo_path = Path(repo_config["path"])
1048
+ if not repo_path.exists():
1049
+ click.echo(f"⚠️ Repository not found: {repo_path}")
1050
+ continue
1051
+
1052
+ # Calculate date range
1053
+ end_date = datetime.now(timezone.utc)
1054
+ start_date = end_date - timedelta(weeks=weeks)
1055
+
1056
+ if display:
1057
+ display.print_status(f"Fetching commits from {repo_config['name']}...", "info")
1058
+ else:
1059
+ click.echo(f"📥 Fetching commits from {repo_config['name']}...")
1060
+
1061
+ # Fetch raw data for the repository
1062
+ raw_data = data_fetcher.fetch_raw_data(
1063
+ repositories=[repo_config],
1064
+ start_date=start_date,
1065
+ end_date=end_date,
1066
+ )
1067
+
1068
+ # Extract commits from the raw data
1069
+ if raw_data and raw_data.get("commits"):
1070
+ commits = raw_data["commits"]
1071
+ else:
1072
+ commits = []
1073
+ all_commits.extend(commits)
1074
+
1075
+ if not all_commits:
1076
+ if display:
1077
+ display.show_error("No commits found to analyze")
1078
+ else:
1079
+ click.echo("❌ No commits found to analyze")
1080
+ return
1081
+
1082
+ # Initialize security analyzer
1083
+ security_analyzer = SecurityAnalyzer(config=security_config)
1084
+
1085
+ # Analyze commits for security issues
1086
+ if display:
1087
+ display.print_status(
1088
+ f"Analyzing {len(all_commits)} commits for security issues...", "info"
1089
+ )
1090
+ else:
1091
+ click.echo(f"\n🔍 Analyzing {len(all_commits)} commits for security issues...")
1092
+
1093
+ analyses = []
1094
+ for commit in all_commits:
1095
+ analysis = security_analyzer.analyze_commit(commit)
1096
+ analyses.append(analysis)
1097
+
1098
+ # Generate summary
1099
+ summary = security_analyzer.generate_summary_report(analyses)
1100
+
1101
+ # Print summary to console
1102
+ click.echo("\n" + "=" * 60)
1103
+ click.echo("SECURITY ANALYSIS SUMMARY")
1104
+ click.echo("=" * 60)
1105
+ click.echo(f"Total Commits Analyzed: {summary['total_commits']}")
1106
+ click.echo(f"Commits with Issues: {summary['commits_with_issues']}")
1107
+ click.echo(f"Total Security Findings: {summary['total_findings']}")
1108
+ click.echo(
1109
+ f"Risk Level: {summary['risk_level']} (Score: {summary['average_risk_score']:.1f})"
1110
+ )
1111
+
1112
+ if summary["severity_distribution"]["critical"] > 0:
1113
+ click.echo(f"\n🔴 Critical Issues: {summary['severity_distribution']['critical']}")
1114
+ if summary["severity_distribution"]["high"] > 0:
1115
+ click.echo(f"🟠 High Issues: {summary['severity_distribution']['high']}")
1116
+ if summary["severity_distribution"]["medium"] > 0:
1117
+ click.echo(f"🟡 Medium Issues: {summary['severity_distribution']['medium']}")
1118
+
1119
+ # Generate reports
1120
+ report_dir = output or Path(cfg.output.directory)
1121
+ report_dir.mkdir(parents=True, exist_ok=True)
1122
+
1123
+ report_gen = SecurityReportGenerator(output_dir=report_dir)
1124
+ reports = report_gen.generate_reports(analyses, summary)
1125
+
1126
+ click.echo("\n✅ Security Reports Generated:")
1127
+ for report_type, path in reports.items():
1128
+ click.echo(f" - {report_type.upper()}: {path}")
1129
+
1130
+ # Show recommendations
1131
+ if summary["recommendations"]:
1132
+ click.echo("\n💡 Recommendations:")
1133
+ for rec in summary["recommendations"][:5]:
1134
+ click.echo(f" {rec}")
1135
+
1136
+ if display:
1137
+ display.print_status("Security analysis completed!", "success")
1138
+
1139
+ return # Exit after security-only analysis
1140
+
800
1141
  # Initialize identity resolver with comprehensive error handling
801
1142
  identity_db_path = cache_dir / "identities.db"
802
1143
  try:
@@ -890,7 +1231,9 @@ def analyze(
890
1231
  analyzer = GitAnalyzer(
891
1232
  cache,
892
1233
  branch_mapping_rules=cfg.analysis.branch_mapping_rules,
893
- allowed_ticket_platforms=getattr(cfg.analysis, "ticket_platforms", None),
1234
+ allowed_ticket_platforms=getattr(
1235
+ cfg.analysis, "ticket_platforms", ["jira", "github", "clickup", "linear"]
1236
+ ),
894
1237
  exclude_paths=cfg.analysis.exclude_paths,
895
1238
  story_point_patterns=cfg.analysis.story_point_patterns,
896
1239
  ml_categorization_config=ml_config,
@@ -902,9 +1245,12 @@ def analyze(
902
1245
  # Discovery organization repositories if needed
903
1246
  repositories_to_analyze = cfg.repositories
904
1247
  if cfg.github.organization and not repositories_to_analyze:
905
- if display:
906
- display.print_status(
907
- f"Discovering repositories from organization: {cfg.github.organization}", "info"
1248
+ if display and display._live:
1249
+ # We're in full-screen mode, update the task
1250
+ display.update_progress_task(
1251
+ "main",
1252
+ description=f"🔍 Discovering repositories from organization: {cfg.github.organization}",
1253
+ completed=15,
908
1254
  )
909
1255
  else:
910
1256
  click.echo(
@@ -917,27 +1263,29 @@ def analyze(
917
1263
  discovered_repos = cfg.discover_organization_repositories(clone_base_path=repos_dir)
918
1264
  repositories_to_analyze = discovered_repos
919
1265
 
920
- if display:
921
- display.print_status(
922
- f"Found {len(discovered_repos)} repositories in organization", "success"
1266
+ if display and display._live:
1267
+ # We're in full-screen mode, update progress and initialize repo list
1268
+ display.update_progress_task(
1269
+ "main",
1270
+ description=f"✅ Found {len(discovered_repos)} repositories in {cfg.github.organization}",
1271
+ completed=20,
923
1272
  )
924
- # Show repository discovery in structured format
925
- repo_data = [
926
- {
927
- "name": repo.name,
928
- "github_repo": repo.github_repo,
929
- "exists": repo.path.exists(),
930
- }
931
- for repo in discovered_repos
932
- ]
933
- display.show_repository_discovery(repo_data)
1273
+ # Initialize repository list for the full-screen display
1274
+ repo_list = []
1275
+ for repo in discovered_repos:
1276
+ repo_list.append({"name": repo.name, "status": "pending"})
1277
+ display.initialize_repositories(repo_list)
934
1278
  else:
935
1279
  click.echo(f" ✅ Found {len(discovered_repos)} repositories in organization")
936
1280
  for repo in discovered_repos:
937
- click.echo(f" - {repo.name} ({repo.github_repo})")
1281
+ status = "exists locally" if repo.path.exists() else "needs cloning"
1282
+ click.echo(f" - {repo.name} ({status})")
938
1283
  except Exception as e:
939
- if display:
940
- display.show_error(f"Failed to discover repositories: {e}")
1284
+ if display and display._live:
1285
+ # Update error in full-screen mode
1286
+ display.update_progress_task(
1287
+ "main", description=f"❌ Failed to discover repositories: {e}", completed=20
1288
+ )
941
1289
  else:
942
1290
  click.echo(f" ❌ Failed to discover repositories: {e}")
943
1291
  return
@@ -958,18 +1306,24 @@ def analyze(
958
1306
  end_date = get_week_end(last_complete_week_start + timedelta(days=6))
959
1307
 
960
1308
  if display:
961
- display.print_status(
962
- f"Analyzing {len(repositories_to_analyze)} repositories...", "info"
963
- )
964
- display.print_status(
965
- f"Period: {start_date.strftime('%Y-%m-%d')} to {end_date.strftime('%Y-%m-%d')}",
966
- "info",
967
- )
968
- # Start live progress display
969
- display.start_live_display()
970
- display.add_progress_task(
971
- "repos", "Processing repositories", len(repositories_to_analyze)
972
- )
1309
+ # Update task or initialize repositories in full-screen mode
1310
+ if display._live:
1311
+ # We're in full-screen mode
1312
+ display.update_progress_task(
1313
+ "main",
1314
+ description=f"Analyzing {len(repositories_to_analyze)} repositories",
1315
+ completed=25,
1316
+ )
1317
+ # Initialize repositories if not already done (e.g., when not using org discovery)
1318
+ if not cfg.github.organization or cfg.repositories:
1319
+ repo_list = [
1320
+ {
1321
+ "name": repo.name or repo.project_key or Path(repo.path).name,
1322
+ "status": "pending",
1323
+ }
1324
+ for repo in repositories_to_analyze
1325
+ ]
1326
+ display.initialize_repositories(repo_list)
973
1327
  else:
974
1328
  click.echo(f"\n🚀 Analyzing {len(repositories_to_analyze)} repositories...")
975
1329
  click.echo(
@@ -999,7 +1353,10 @@ def analyze(
999
1353
  # Check if we should use batch classification (two-step process)
1000
1354
  if use_batch_classification:
1001
1355
  if display:
1002
- display.print_status("Using two-step process: fetch then classify...", "info")
1356
+ # Add the repos task - this will start the display if needed
1357
+ display.add_progress_task(
1358
+ "repos", "Checking cache and preparing analysis", len(repositories_to_analyze)
1359
+ )
1003
1360
  else:
1004
1361
  click.echo("🔄 Using two-step process: fetch then classify...")
1005
1362
 
@@ -1009,7 +1366,13 @@ def analyze(
1009
1366
 
1010
1367
  if not force_fetch:
1011
1368
  if display:
1012
- display.print_status("Checking cache completeness...", "info")
1369
+ # Check if display is actually running, if not fall back to simple output
1370
+ if hasattr(display, "_live") and display._live:
1371
+ display.update_progress_task(
1372
+ "repos", description="Checking cache completeness...", completed=0
1373
+ )
1374
+ else:
1375
+ click.echo("🔍 Checking cache completeness...")
1013
1376
  else:
1014
1377
  click.echo("🔍 Checking cache completeness...")
1015
1378
 
@@ -1024,24 +1387,17 @@ def analyze(
1024
1387
 
1025
1388
  if status:
1026
1389
  cached_repos.append((repo_config, status))
1027
- if display:
1028
- display.print_status(
1029
- f" ✅ {repo_config.name}: Using cached data ({status['commit_count']} commits)",
1030
- "success",
1031
- )
1390
+ # In full-screen mode, we'll update repo status via the display
1032
1391
  else:
1033
1392
  repos_needing_analysis.append(repo_config)
1034
- if display:
1035
- display.print_status(
1036
- f" 📥 {repo_config.name}: Analysis needed", "info"
1037
- )
1038
1393
 
1039
1394
  if cached_repos:
1040
1395
  total_cached_commits = sum(status["commit_count"] for _, status in cached_repos)
1041
- if display:
1042
- display.print_status(
1043
- f"Found {len(cached_repos)} repos with cached data ({total_cached_commits} commits)",
1044
- "success",
1396
+ if display and hasattr(display, "_live") and display._live:
1397
+ display.update_progress_task(
1398
+ "repos",
1399
+ description=f"Found {len(cached_repos)} repos with cached data ({total_cached_commits} commits)",
1400
+ completed=10,
1045
1401
  )
1046
1402
  else:
1047
1403
  click.echo(
@@ -1050,17 +1406,22 @@ def analyze(
1050
1406
  else:
1051
1407
  # Force fetch: analyze all repositories
1052
1408
  repos_needing_analysis = repositories_to_analyze
1053
- if display:
1054
- display.print_status("Force fetch enabled - analyzing all repositories", "info")
1409
+ if display and display._live:
1410
+ display.update_progress_task(
1411
+ "repos",
1412
+ description="Force fetch enabled - analyzing all repositories",
1413
+ completed=5,
1414
+ )
1055
1415
  else:
1056
1416
  click.echo("🔄 Force fetch enabled - analyzing all repositories")
1057
1417
 
1058
1418
  # Step 1: Fetch data only for repos that need analysis
1059
1419
  if repos_needing_analysis:
1060
- if display:
1061
- display.print_status(
1062
- f"Step 1: Fetching data for {len(repos_needing_analysis)} repositories...",
1063
- "info",
1420
+ if display and display._live:
1421
+ display.update_progress_task(
1422
+ "repos",
1423
+ description=f"Step 1: Fetching data for {len(repos_needing_analysis)} repositories...",
1424
+ completed=15,
1064
1425
  )
1065
1426
  else:
1066
1427
  click.echo(
@@ -1084,12 +1445,84 @@ def analyze(
1084
1445
  orchestrator = IntegrationOrchestrator(cfg, cache)
1085
1446
  jira_integration = orchestrator.integrations.get("jira")
1086
1447
 
1087
- # Get progress service for overall repository progress
1088
- progress = get_progress_service()
1448
+ # Progress service already initialized at the start of the function
1449
+ # We can use the progress instance that was created earlier
1450
+
1451
+ # Update the progress task since display is already started
1452
+ if display:
1453
+ # Update the existing task since display was already started
1454
+ display.update_progress_task(
1455
+ "repos",
1456
+ description=f"Step 1: Fetching data for {len(repos_needing_analysis)} repositories",
1457
+ completed=0,
1458
+ )
1459
+
1460
+ # Initialize ALL repositories (both cached and to-be-fetched) with their status
1461
+ if hasattr(display, "initialize_repositories"):
1462
+ all_repo_list = []
1463
+
1464
+ # Add cached repos as COMPLETE
1465
+ for cached_repo, _ in cached_repos:
1466
+ repo_name = (
1467
+ cached_repo.name
1468
+ or cached_repo.project_key
1469
+ or Path(cached_repo.path).name
1470
+ )
1471
+ all_repo_list.append({"name": repo_name, "status": "complete"})
1472
+
1473
+ # Add repos needing analysis as PENDING
1474
+ for repo in repos_needing_analysis:
1475
+ repo_name = repo.name or repo.project_key or Path(repo.path).name
1476
+ all_repo_list.append({"name": repo_name, "status": "pending"})
1477
+
1478
+ display.initialize_repositories(all_repo_list)
1479
+
1480
+ # Also initialize progress service for compatibility
1481
+ if progress_style == "rich" or (
1482
+ progress_style == "auto" and progress._use_rich
1483
+ ):
1484
+ progress.start_rich_display(
1485
+ total_items=len(repos_needing_analysis),
1486
+ description=f"Analyzing {len(repos_needing_analysis)} repositories",
1487
+ )
1488
+ progress.initialize_repositories(
1489
+ all_repo_list if "all_repo_list" in locals() else []
1490
+ )
1491
+ progress.set_phase("Step 1: Data Fetching")
1492
+ else:
1493
+ # Fallback to progress service if no display
1494
+ if progress_style == "rich" or (
1495
+ progress_style == "auto" and progress._use_rich
1496
+ ):
1497
+ progress.start_rich_display(
1498
+ total_items=len(repos_needing_analysis),
1499
+ description=f"Analyzing {len(repos_needing_analysis)} repositories",
1500
+ )
1501
+
1502
+ # Initialize ALL repositories (both cached and to-be-fetched) with their status
1503
+ all_repo_list = []
1504
+
1505
+ # Add cached repos as COMPLETE
1506
+ for cached_repo, _ in cached_repos:
1507
+ repo_name = (
1508
+ cached_repo.name
1509
+ or cached_repo.project_key
1510
+ or Path(cached_repo.path).name
1511
+ )
1512
+ all_repo_list.append({"name": repo_name, "status": "complete"})
1513
+
1514
+ # Add repos needing analysis as PENDING
1515
+ for repo in repos_needing_analysis:
1516
+ repo_name = repo.name or repo.project_key or Path(repo.path).name
1517
+ all_repo_list.append({"name": repo_name, "status": "pending"})
1518
+
1519
+ progress.initialize_repositories(all_repo_list)
1520
+ progress.set_phase("Step 1: Data Fetching")
1089
1521
 
1090
1522
  # Fetch data for repositories that need analysis
1091
1523
  total_commits = 0
1092
1524
  total_tickets = 0
1525
+ total_developers = set() # Track unique developers
1093
1526
 
1094
1527
  # Create top-level progress for all repositories
1095
1528
  with progress.progress(
@@ -1103,12 +1536,29 @@ def analyze(
1103
1536
  repo_path = Path(repo_config.path)
1104
1537
  project_key = repo_config.project_key or repo_path.name
1105
1538
 
1106
- # Update overall progress description
1539
+ # Update overall progress description with clear repository info
1540
+ repo_display_name = repo_config.name or project_key
1107
1541
  progress.set_description(
1108
1542
  repos_progress_ctx,
1109
- f"Repository {idx}/{len(repos_needing_analysis)}: {project_key}",
1543
+ f"🔄 Analyzing repository: {repo_display_name} ({idx}/{len(repos_needing_analysis)})",
1110
1544
  )
1111
1545
 
1546
+ # Also update the display if available
1547
+ if display:
1548
+ display.update_progress_task(
1549
+ "repos",
1550
+ description=f"🔄 Processing: {repo_display_name} ({idx}/{len(repos_needing_analysis)})",
1551
+ completed=idx - 1,
1552
+ )
1553
+ # Update repository status to processing
1554
+ if hasattr(display, "update_repository_status"):
1555
+ display.update_repository_status(
1556
+ repo_display_name,
1557
+ "processing",
1558
+ f"Fetching data from {repo_display_name}",
1559
+ {},
1560
+ )
1561
+
1112
1562
  # Progress callback for fetch
1113
1563
  def progress_callback(message: str):
1114
1564
  if display:
@@ -1138,6 +1588,21 @@ def analyze(
1138
1588
  total_commits += result["stats"]["total_commits"]
1139
1589
  total_tickets += result["stats"]["unique_tickets"]
1140
1590
 
1591
+ # Collect unique developers if available
1592
+ if "developers" in result["stats"]:
1593
+ total_developers.update(result["stats"]["developers"])
1594
+
1595
+ # Update Rich display statistics
1596
+ if progress._use_rich:
1597
+ progress.update_statistics(
1598
+ total_commits=total_commits,
1599
+ total_tickets=total_tickets,
1600
+ total_developers=len(total_developers),
1601
+ total_repositories=len(repos_needing_analysis),
1602
+ processed_repositories=idx,
1603
+ )
1604
+ # Note: finish_repository is now called in data_fetcher
1605
+
1141
1606
  if display:
1142
1607
  display.print_status(
1143
1608
  f" ✅ {project_key}: {result['stats']['total_commits']} commits, "
@@ -1158,14 +1623,31 @@ def analyze(
1158
1623
  config_hash=config_hash,
1159
1624
  )
1160
1625
 
1626
+ # Update repository status to completed in display
1627
+ if display and hasattr(display, "update_repository_status"):
1628
+ repo_display_name = repo_config.name or project_key
1629
+ display.update_repository_status(
1630
+ repo_display_name,
1631
+ "completed",
1632
+ f"Completed {repo_display_name}",
1633
+ {
1634
+ "commits": result["stats"]["total_commits"],
1635
+ "tickets": result["stats"]["unique_tickets"],
1636
+ "developers": len(result["stats"].get("developers", [])),
1637
+ },
1638
+ )
1639
+
1161
1640
  # Update overall repository progress
1162
1641
  progress.update(repos_progress_ctx)
1163
1642
 
1164
1643
  except Exception as e:
1165
- if display:
1166
- display.print_status(
1167
- f" ❌ Error fetching {project_key}: {e}", "error"
1168
- )
1644
+ if display and display._live:
1645
+ # Update repository status to error in full-screen mode
1646
+ if hasattr(display, "update_repository_status"):
1647
+ repo_display_name = repo_config.name or project_key
1648
+ display.update_repository_status(
1649
+ repo_display_name, "error", f"Error: {str(e)}", {}
1650
+ )
1169
1651
  else:
1170
1652
  click.echo(f" ❌ Error fetching {project_key}: {e}")
1171
1653
 
@@ -1184,20 +1666,43 @@ def analyze(
1184
1666
  progress.update(repos_progress_ctx)
1185
1667
  continue
1186
1668
 
1187
- if display:
1188
- display.print_status(
1189
- f"Step 1 complete: {total_commits} commits, {total_tickets} tickets fetched",
1190
- "success",
1669
+ # Display repository fetch status summary
1670
+ repo_status = data_fetcher.get_repository_status_summary()
1671
+ if repo_status["failed_updates"] > 0 or repo_status["errors"]:
1672
+ logger.warning(
1673
+ f"\n⚠️ Repository Update Summary:\n"
1674
+ f" • Total repositories: {repo_status['total_repositories']}\n"
1675
+ f" • Successful updates: {repo_status['successful_updates']}\n"
1676
+ f" • Failed updates: {repo_status['failed_updates']}\n"
1677
+ f" • Skipped updates: {repo_status['skipped_updates']}"
1678
+ )
1679
+ if repo_status["failed_updates"] > 0:
1680
+ logger.warning(
1681
+ " ⚠️ Some repositories failed to fetch updates. Analysis uses potentially stale data.\n"
1682
+ " Check authentication, network connectivity, or try with --skip-remote-fetch."
1683
+ )
1684
+
1685
+ if display and display._live:
1686
+ display.update_progress_task(
1687
+ "repos",
1688
+ description=f"Step 1 complete: {total_commits} commits, {total_tickets} tickets fetched",
1689
+ completed=100,
1191
1690
  )
1691
+ # Stop the live display after Step 1
1692
+ display.stop_live_display()
1192
1693
  else:
1193
1694
  click.echo(
1194
1695
  f"📥 Step 1 complete: {total_commits} commits, {total_tickets} tickets fetched"
1195
1696
  )
1196
1697
  else:
1197
- if display:
1198
- display.print_status(
1199
- "All repositories use cached data - skipping data fetch", "success"
1698
+ if display and display._live:
1699
+ display.update_progress_task(
1700
+ "repos",
1701
+ description="All repositories use cached data - skipping data fetch",
1702
+ completed=100,
1200
1703
  )
1704
+ # Stop the live display if all data was cached
1705
+ display.stop_live_display()
1201
1706
  else:
1202
1707
  click.echo("✅ All repositories use cached data - skipping data fetch")
1203
1708
 
@@ -1377,6 +1882,21 @@ def analyze(
1377
1882
  total_commits += result["stats"]["total_commits"]
1378
1883
  total_tickets += result["stats"]["unique_tickets"]
1379
1884
 
1885
+ # Collect unique developers if available
1886
+ if "developers" in result["stats"]:
1887
+ total_developers.update(result["stats"]["developers"])
1888
+
1889
+ # Update Rich display statistics
1890
+ if progress._use_rich:
1891
+ progress.update_statistics(
1892
+ total_commits=total_commits,
1893
+ total_tickets=total_tickets,
1894
+ total_developers=len(total_developers),
1895
+ total_repositories=len(repos_needing_analysis),
1896
+ processed_repositories=idx,
1897
+ )
1898
+ # Note: finish_repository is now called in data_fetcher
1899
+
1380
1900
  if display:
1381
1901
  display.print_status(
1382
1902
  f" ✅ {project_key}: {result['stats']['total_commits']} commits, "
@@ -1537,6 +2057,33 @@ def analyze(
1537
2057
 
1538
2058
  if display:
1539
2059
  display.print_status("Step 2: Batch classification...", "info")
2060
+ # Restart the full-screen live display for Step 2
2061
+ display.start_live_display()
2062
+ # Get total number of batches to process
2063
+ with cache.get_session() as session:
2064
+ total_batches = (
2065
+ session.query(DailyCommitBatch)
2066
+ .filter(
2067
+ and_(
2068
+ DailyCommitBatch.date >= start_date.date(),
2069
+ DailyCommitBatch.date <= end_date.date(),
2070
+ )
2071
+ )
2072
+ .count()
2073
+ )
2074
+ display.add_progress_task(
2075
+ "repos", # Use "repos" task id to trigger the full display
2076
+ f"Classifying {total_batches} batches",
2077
+ total_batches,
2078
+ )
2079
+ # Reinitialize repositories for Step 2 display
2080
+ if hasattr(display, "initialize_repositories"):
2081
+ # Create a list of "batches" to display
2082
+ batch_list = []
2083
+ for repo in repositories_to_analyze:
2084
+ repo_name = repo.name or repo.project_key or Path(repo.path).name
2085
+ batch_list.append({"name": f"{repo_name} batches", "status": "pending"})
2086
+ display.initialize_repositories(batch_list)
1540
2087
  else:
1541
2088
  click.echo("🧠 Step 2: Batch classification...")
1542
2089
 
@@ -1572,6 +2119,7 @@ def analyze(
1572
2119
  project_keys.append(project_key)
1573
2120
 
1574
2121
  # Run batch classification
2122
+ # Note: The batch classifier will create its own progress bars, but our display should remain active
1575
2123
  classification_result = batch_classifier.classify_date_range(
1576
2124
  start_date=start_date,
1577
2125
  end_date=end_date,
@@ -1579,7 +2127,16 @@ def analyze(
1579
2127
  force_reclassify=clear_cache,
1580
2128
  )
1581
2129
 
2130
+ # Update display progress after classification
2131
+ if display and hasattr(display, "update_progress_task"):
2132
+ display.update_progress_task(
2133
+ "repos", completed=total_batches if "total_batches" in locals() else 0
2134
+ )
2135
+
1582
2136
  if display:
2137
+ # Complete the progress task and stop the live display
2138
+ display.complete_progress_task("repos", "Batch classification complete")
2139
+ display.stop_live_display()
1583
2140
  display.print_status(
1584
2141
  f"✅ Batch classification completed: {classification_result['processed_batches']} batches, "
1585
2142
  f"{classification_result['total_commits']} commits",
@@ -1675,30 +2232,36 @@ def analyze(
1675
2232
 
1676
2233
  all_commits.append(commit_dict)
1677
2234
 
1678
- if display:
1679
- display.print_status(
1680
- f"Loaded {len(all_commits)} classified commits from database", "success"
2235
+ if display and display._live:
2236
+ display.update_progress_task(
2237
+ "main",
2238
+ description=f"Loaded {len(all_commits)} classified commits from database",
2239
+ completed=85,
1681
2240
  )
1682
2241
  else:
1683
2242
  click.echo(f"✅ Loaded {len(all_commits)} classified commits from database")
1684
2243
 
1685
2244
  # Process the loaded commits to generate required statistics
1686
2245
  # Update developer identities
1687
- if display:
1688
- display.print_status("Processing developer identities...", "info")
2246
+ if display and display._live:
2247
+ display.update_progress_task(
2248
+ "main", description="Processing developer identities...", completed=90
2249
+ )
1689
2250
  else:
1690
2251
  click.echo("👥 Processing developer identities...")
1691
2252
 
1692
2253
  identity_resolver.update_commit_stats(all_commits)
1693
2254
 
1694
2255
  # Analyze ticket references using loaded commits
1695
- if display:
1696
- display.print_status("Analyzing ticket references...", "info")
2256
+ if display and display._live:
2257
+ display.update_progress_task(
2258
+ "main", description="Analyzing ticket references...", completed=95
2259
+ )
1697
2260
  else:
1698
2261
  click.echo("🎫 Analyzing ticket references...")
1699
2262
 
1700
2263
  ticket_analysis = analyzer.ticket_extractor.analyze_ticket_coverage(
1701
- all_commits, all_prs
2264
+ all_commits, all_prs, display
1702
2265
  )
1703
2266
 
1704
2267
  # Calculate per-developer ticket coverage and get updated developer stats
@@ -1709,9 +2272,11 @@ def analyze(
1709
2272
  ticket_coverage=developer_ticket_coverage
1710
2273
  )
1711
2274
 
1712
- if display:
1713
- display.print_status(
1714
- f"Identified {len(developer_stats)} unique developers", "success"
2275
+ if display and display._live:
2276
+ display.update_progress_task(
2277
+ "main",
2278
+ description=f"Identified {len(developer_stats)} unique developers",
2279
+ completed=98,
1715
2280
  )
1716
2281
  else:
1717
2282
  click.echo(f" ✅ Identified {len(developer_stats)} unique developers")
@@ -1724,6 +2289,13 @@ def analyze(
1724
2289
  all_enrichments = {}
1725
2290
  branch_health_metrics = {} # Store branch health metrics per repository
1726
2291
 
2292
+ # Note: Full-screen display is already started early after configuration
2293
+ # Just add the repository processing task
2294
+ if display and display._live:
2295
+ display.add_progress_task(
2296
+ "repos", "Processing repositories", len(repositories_to_analyze)
2297
+ )
2298
+
1727
2299
  # Analyze repositories (traditional mode or forced fetch)
1728
2300
  # Note: In batch mode, these are already populated from database
1729
2301
 
@@ -1743,10 +2315,14 @@ def analyze(
1743
2315
  if not repo_config.path.exists():
1744
2316
  # Try to clone if we have a github_repo configured
1745
2317
  if repo_config.github_repo and cfg.github.organization:
1746
- if display:
1747
- display.print_status(
1748
- f"Cloning {repo_config.github_repo} from GitHub...", "info"
1749
- )
2318
+ if display and display._live:
2319
+ # Update status in full-screen mode
2320
+ if hasattr(display, "update_repository_status"):
2321
+ display.update_repository_status(
2322
+ repo_config.name,
2323
+ "processing",
2324
+ f"Cloning {repo_config.github_repo} from GitHub...",
2325
+ )
1750
2326
  else:
1751
2327
  click.echo(f" 📥 Cloning {repo_config.github_repo} from GitHub...")
1752
2328
  try:
@@ -2202,7 +2778,9 @@ def analyze(
2202
2778
  click.echo("\n🎫 Analyzing ticket references...")
2203
2779
 
2204
2780
  # Use the analyzer's ticket extractor which may be ML-enhanced
2205
- ticket_analysis = analyzer.ticket_extractor.analyze_ticket_coverage(all_commits, all_prs)
2781
+ ticket_analysis = analyzer.ticket_extractor.analyze_ticket_coverage(
2782
+ all_commits, all_prs, display
2783
+ )
2206
2784
 
2207
2785
  # Calculate per-developer ticket coverage and update developer stats with accurate coverage
2208
2786
  developer_ticket_coverage = analyzer.ticket_extractor.calculate_developer_ticket_coverage(
@@ -2343,8 +2921,12 @@ def analyze(
2343
2921
  "author_email": commit.get("author_email"),
2344
2922
  "timestamp": commit.get("timestamp"),
2345
2923
  "files_changed": commit.get("files_changed") or [],
2346
- "insertions": commit.get("insertions", 0),
2347
- "deletions": commit.get("deletions", 0),
2924
+ "insertions": commit.get(
2925
+ "filtered_insertions", commit.get("insertions", 0)
2926
+ ),
2927
+ "deletions": commit.get(
2928
+ "filtered_deletions", commit.get("deletions", 0)
2929
+ ),
2348
2930
  "branch": commit.get("branch", "main"),
2349
2931
  }
2350
2932
  else:
@@ -2355,8 +2937,8 @@ def analyze(
2355
2937
  "author_email": commit.author_email,
2356
2938
  "timestamp": commit.timestamp,
2357
2939
  "files_changed": commit.files_changed or [],
2358
- "insertions": commit.insertions,
2359
- "deletions": commit.deletions,
2940
+ "insertions": getattr(commit, "filtered_insertions", commit.insertions),
2941
+ "deletions": getattr(commit, "filtered_deletions", commit.deletions),
2360
2942
  "branch": getattr(commit, "branch", "main"),
2361
2943
  }
2362
2944
  commits_for_qual.append(commit_dict)
@@ -3434,6 +4016,15 @@ def analyze(
3434
4016
 
3435
4017
  click.echo(f"\n✅ Analysis complete! Reports saved to {output}")
3436
4018
 
4019
+ # Stop Rich display if it was started
4020
+ if (
4021
+ "progress" in locals()
4022
+ and progress
4023
+ and hasattr(progress, "_use_rich")
4024
+ and progress._use_rich
4025
+ ):
4026
+ progress.stop_rich_display()
4027
+
3437
4028
  except click.ClickException:
3438
4029
  # Let Click handle its own exceptions
3439
4030
  raise
@@ -3484,7 +4075,10 @@ def analyze(
3484
4075
  help="Enable logging with specified level (default: none)",
3485
4076
  )
3486
4077
  @click.option(
3487
- "--rich", is_flag=True, default=True, help="Use rich terminal output (default: enabled)"
4078
+ "--no-rich",
4079
+ is_flag=True,
4080
+ default=True,
4081
+ help="Disable rich terminal output (simple output is default to prevent TUI hanging)",
3488
4082
  )
3489
4083
  def fetch(
3490
4084
  config: Path,
@@ -3492,7 +4086,7 @@ def fetch(
3492
4086
  output: Optional[Path],
3493
4087
  clear_cache: bool,
3494
4088
  log: str,
3495
- rich: bool,
4089
+ no_rich: bool,
3496
4090
  ) -> None:
3497
4091
  """Fetch data from external platforms for enhanced analysis.
3498
4092
 
@@ -3534,7 +4128,12 @@ def fetch(
3534
4128
  - Use --clear-cache to force fresh fetch
3535
4129
  """
3536
4130
  # Initialize display
3537
- display = create_rich_display() if rich else None
4131
+ # Create display - simple output by default to prevent TUI hanging, rich only when explicitly enabled
4132
+ display = (
4133
+ create_progress_display(style="simple" if no_rich else "rich", version=__version__)
4134
+ if not no_rich
4135
+ else None
4136
+ )
3538
4137
 
3539
4138
  # Configure logging
3540
4139
  if log.upper() != "NONE":
@@ -4026,7 +4625,9 @@ def identities(config: Path, weeks: int, apply: bool) -> None:
4026
4625
  analyzer = GitAnalyzer(
4027
4626
  cache,
4028
4627
  branch_mapping_rules=cfg.analysis.branch_mapping_rules,
4029
- allowed_ticket_platforms=getattr(cfg.analysis, "ticket_platforms", None),
4628
+ allowed_ticket_platforms=getattr(
4629
+ cfg.analysis, "ticket_platforms", ["jira", "github", "clickup", "linear"]
4630
+ ),
4030
4631
  exclude_paths=cfg.analysis.exclude_paths,
4031
4632
  story_point_patterns=cfg.analysis.story_point_patterns,
4032
4633
  ml_categorization_config=ml_config,
@@ -4486,6 +5087,73 @@ def train(
4486
5087
  sys.exit(1)
4487
5088
 
4488
5089
 
5090
+ @cli.command(name="verify-activity")
5091
+ @click.option(
5092
+ "--config",
5093
+ "-c",
5094
+ type=click.Path(exists=True, path_type=Path),
5095
+ required=True,
5096
+ help="Path to YAML configuration file",
5097
+ )
5098
+ @click.option(
5099
+ "--weeks",
5100
+ "-w",
5101
+ type=int,
5102
+ default=4,
5103
+ help="Number of weeks to analyze (default: 4)",
5104
+ )
5105
+ @click.option(
5106
+ "--output",
5107
+ "-o",
5108
+ type=click.Path(path_type=Path),
5109
+ help="Optional path to save the report",
5110
+ )
5111
+ def verify_activity(config: Path, weeks: int, output: Optional[Path]) -> None:
5112
+ """Verify day-by-day project activity without pulling code.
5113
+
5114
+ \b
5115
+ This command helps verify if reports showing "No Activity" are accurate by:
5116
+ - Querying repositories for activity summaries
5117
+ - Showing day-by-day activity for each project
5118
+ - Listing all branches and their last activity dates
5119
+ - Highlighting days with zero activity
5120
+ - Using GitHub API for remote repos or git commands for local repos
5121
+
5122
+ \b
5123
+ EXAMPLES:
5124
+ # Verify activity for last 4 weeks
5125
+ gitflow-analytics verify-activity -c config.yaml --weeks 4
5126
+
5127
+ # Save report to file
5128
+ gitflow-analytics verify-activity -c config.yaml --weeks 8 -o activity_report.txt
5129
+
5130
+ \b
5131
+ OUTPUT:
5132
+ - Daily activity matrix showing commits per day per project
5133
+ - Branch summary with last activity dates
5134
+ - Days with zero activity highlighted
5135
+ - Total statistics and inactive projects
5136
+
5137
+ \b
5138
+ NOTE: This command does NOT pull or fetch code, it only queries metadata.
5139
+ """
5140
+ try:
5141
+ from .verify_activity import verify_activity_command
5142
+
5143
+ verify_activity_command(config, weeks, output)
5144
+
5145
+ except ImportError as e:
5146
+ click.echo(f"❌ Missing dependency for activity verification: {e}")
5147
+ click.echo("Please install required packages: pip install tabulate")
5148
+ sys.exit(1)
5149
+ except Exception as e:
5150
+ click.echo(f"❌ Error during activity verification: {e}")
5151
+ import traceback
5152
+
5153
+ traceback.print_exc()
5154
+ sys.exit(1)
5155
+
5156
+
4489
5157
  @cli.command(name="help")
4490
5158
  def show_help() -> None:
4491
5159
  """Show comprehensive help and usage guide.