gitflow-analytics 1.0.1__py3-none-any.whl → 1.0.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- gitflow_analytics/__init__.py +11 -11
- gitflow_analytics/_version.py +2 -2
- gitflow_analytics/cli.py +612 -258
- gitflow_analytics/cli_rich.py +353 -0
- gitflow_analytics/config.py +251 -141
- gitflow_analytics/core/analyzer.py +140 -103
- gitflow_analytics/core/branch_mapper.py +132 -132
- gitflow_analytics/core/cache.py +240 -169
- gitflow_analytics/core/identity.py +210 -173
- gitflow_analytics/extractors/base.py +13 -11
- gitflow_analytics/extractors/story_points.py +70 -59
- gitflow_analytics/extractors/tickets.py +101 -87
- gitflow_analytics/integrations/github_integration.py +84 -77
- gitflow_analytics/integrations/jira_integration.py +116 -104
- gitflow_analytics/integrations/orchestrator.py +86 -85
- gitflow_analytics/metrics/dora.py +181 -177
- gitflow_analytics/models/database.py +190 -53
- gitflow_analytics/qualitative/__init__.py +30 -0
- gitflow_analytics/qualitative/classifiers/__init__.py +13 -0
- gitflow_analytics/qualitative/classifiers/change_type.py +468 -0
- gitflow_analytics/qualitative/classifiers/domain_classifier.py +399 -0
- gitflow_analytics/qualitative/classifiers/intent_analyzer.py +436 -0
- gitflow_analytics/qualitative/classifiers/risk_analyzer.py +412 -0
- gitflow_analytics/qualitative/core/__init__.py +13 -0
- gitflow_analytics/qualitative/core/llm_fallback.py +653 -0
- gitflow_analytics/qualitative/core/nlp_engine.py +373 -0
- gitflow_analytics/qualitative/core/pattern_cache.py +457 -0
- gitflow_analytics/qualitative/core/processor.py +540 -0
- gitflow_analytics/qualitative/models/__init__.py +25 -0
- gitflow_analytics/qualitative/models/schemas.py +272 -0
- gitflow_analytics/qualitative/utils/__init__.py +13 -0
- gitflow_analytics/qualitative/utils/batch_processor.py +326 -0
- gitflow_analytics/qualitative/utils/cost_tracker.py +343 -0
- gitflow_analytics/qualitative/utils/metrics.py +347 -0
- gitflow_analytics/qualitative/utils/text_processing.py +243 -0
- gitflow_analytics/reports/analytics_writer.py +11 -4
- gitflow_analytics/reports/csv_writer.py +51 -31
- gitflow_analytics/reports/narrative_writer.py +16 -14
- gitflow_analytics/tui/__init__.py +5 -0
- gitflow_analytics/tui/app.py +721 -0
- gitflow_analytics/tui/screens/__init__.py +8 -0
- gitflow_analytics/tui/screens/analysis_progress_screen.py +487 -0
- gitflow_analytics/tui/screens/configuration_screen.py +547 -0
- gitflow_analytics/tui/screens/loading_screen.py +358 -0
- gitflow_analytics/tui/screens/main_screen.py +304 -0
- gitflow_analytics/tui/screens/results_screen.py +698 -0
- gitflow_analytics/tui/widgets/__init__.py +7 -0
- gitflow_analytics/tui/widgets/data_table.py +257 -0
- gitflow_analytics/tui/widgets/export_modal.py +301 -0
- gitflow_analytics/tui/widgets/progress_widget.py +192 -0
- {gitflow_analytics-1.0.1.dist-info → gitflow_analytics-1.0.3.dist-info}/METADATA +31 -4
- gitflow_analytics-1.0.3.dist-info/RECORD +62 -0
- gitflow_analytics-1.0.1.dist-info/RECORD +0 -31
- {gitflow_analytics-1.0.1.dist-info → gitflow_analytics-1.0.3.dist-info}/WHEEL +0 -0
- {gitflow_analytics-1.0.1.dist-info → gitflow_analytics-1.0.3.dist-info}/entry_points.txt +0 -0
- {gitflow_analytics-1.0.1.dist-info → gitflow_analytics-1.0.3.dist-info}/licenses/LICENSE +0 -0
- {gitflow_analytics-1.0.1.dist-info → gitflow_analytics-1.0.3.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,8 @@
|
|
|
1
|
+
"""TUI screens for GitFlow Analytics."""
|
|
2
|
+
|
|
3
|
+
from .main_screen import MainScreen
|
|
4
|
+
from .configuration_screen import ConfigurationScreen
|
|
5
|
+
from .analysis_progress_screen import AnalysisProgressScreen
|
|
6
|
+
from .results_screen import ResultsScreen
|
|
7
|
+
|
|
8
|
+
__all__ = ["MainScreen", "ConfigurationScreen", "AnalysisProgressScreen", "ResultsScreen"]
|
|
@@ -0,0 +1,487 @@
|
|
|
1
|
+
"""Analysis progress screen for GitFlow Analytics TUI."""
|
|
2
|
+
|
|
3
|
+
import asyncio
|
|
4
|
+
import time
|
|
5
|
+
from datetime import datetime, timedelta, timezone
|
|
6
|
+
from pathlib import Path
|
|
7
|
+
from typing import Optional, Dict, Any, List
|
|
8
|
+
|
|
9
|
+
from textual.widgets import Header, Footer, Label, Log, Static
|
|
10
|
+
from textual.containers import Container, Vertical, Horizontal
|
|
11
|
+
from textual.screen import Screen
|
|
12
|
+
from textual.binding import Binding
|
|
13
|
+
from rich.pretty import Pretty
|
|
14
|
+
|
|
15
|
+
from ..widgets.progress_widget import AnalysisProgressWidget
|
|
16
|
+
from gitflow_analytics.config import Config
|
|
17
|
+
from gitflow_analytics.core.cache import GitAnalysisCache
|
|
18
|
+
from gitflow_analytics.core.analyzer import GitAnalyzer
|
|
19
|
+
from gitflow_analytics.core.identity import DeveloperIdentityResolver
|
|
20
|
+
from gitflow_analytics.integrations.orchestrator import IntegrationOrchestrator
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
class AnalysisProgressScreen(Screen):
|
|
24
|
+
"""
|
|
25
|
+
Screen showing real-time analysis progress with detailed status updates.
|
|
26
|
+
|
|
27
|
+
WHY: Long-running analysis operations require comprehensive progress feedback
|
|
28
|
+
to keep users informed and allow them to monitor the process. This screen
|
|
29
|
+
provides real-time updates on all phases of analysis.
|
|
30
|
+
|
|
31
|
+
DESIGN DECISION: Uses multiple progress widgets to show different phases
|
|
32
|
+
independently, allowing users to understand which part of the analysis is
|
|
33
|
+
currently running and estimated completion times for each phase.
|
|
34
|
+
"""
|
|
35
|
+
|
|
36
|
+
BINDINGS = [
|
|
37
|
+
Binding("ctrl+c", "cancel", "Cancel Analysis"),
|
|
38
|
+
Binding("escape", "back", "Back to Main"),
|
|
39
|
+
Binding("ctrl+l", "toggle_log", "Toggle Log"),
|
|
40
|
+
]
|
|
41
|
+
|
|
42
|
+
def __init__(
|
|
43
|
+
self,
|
|
44
|
+
config: Config,
|
|
45
|
+
weeks: int = 12,
|
|
46
|
+
enable_qualitative: bool = True,
|
|
47
|
+
*,
|
|
48
|
+
name: Optional[str] = None,
|
|
49
|
+
id: Optional[str] = None
|
|
50
|
+
) -> None:
|
|
51
|
+
super().__init__(name=name, id=id)
|
|
52
|
+
self.config = config
|
|
53
|
+
self.weeks = weeks
|
|
54
|
+
self.enable_qualitative = enable_qualitative
|
|
55
|
+
self.analysis_task: Optional[asyncio.Task] = None
|
|
56
|
+
self.analysis_results = {}
|
|
57
|
+
self.start_time = time.time()
|
|
58
|
+
|
|
59
|
+
def compose(self):
|
|
60
|
+
"""Compose the analysis progress screen."""
|
|
61
|
+
yield Header()
|
|
62
|
+
|
|
63
|
+
with Container(id="progress-container"):
|
|
64
|
+
yield Label("GitFlow Analytics - Analysis in Progress", classes="screen-title")
|
|
65
|
+
|
|
66
|
+
# Progress panels for different phases
|
|
67
|
+
with Vertical(id="progress-panels"):
|
|
68
|
+
yield AnalysisProgressWidget(
|
|
69
|
+
"Overall Progress",
|
|
70
|
+
total=100.0,
|
|
71
|
+
id="overall-progress"
|
|
72
|
+
)
|
|
73
|
+
|
|
74
|
+
yield AnalysisProgressWidget(
|
|
75
|
+
"Repository Analysis",
|
|
76
|
+
total=100.0,
|
|
77
|
+
id="repo-progress"
|
|
78
|
+
)
|
|
79
|
+
|
|
80
|
+
yield AnalysisProgressWidget(
|
|
81
|
+
"Integration Data",
|
|
82
|
+
total=100.0,
|
|
83
|
+
id="integration-progress"
|
|
84
|
+
)
|
|
85
|
+
|
|
86
|
+
if self.enable_qualitative:
|
|
87
|
+
yield AnalysisProgressWidget(
|
|
88
|
+
"Qualitative Analysis",
|
|
89
|
+
total=100.0,
|
|
90
|
+
id="qual-progress"
|
|
91
|
+
)
|
|
92
|
+
|
|
93
|
+
# Live statistics panel
|
|
94
|
+
with Container(classes="stats-panel"):
|
|
95
|
+
yield Label("Live Statistics", classes="panel-title")
|
|
96
|
+
yield Pretty({}, id="live-stats")
|
|
97
|
+
|
|
98
|
+
# Analysis log
|
|
99
|
+
with Container(classes="log-panel"):
|
|
100
|
+
yield Label("Analysis Log", classes="panel-title")
|
|
101
|
+
yield Log(auto_scroll=True, id="analysis-log")
|
|
102
|
+
|
|
103
|
+
yield Footer()
|
|
104
|
+
|
|
105
|
+
def on_mount(self) -> None:
|
|
106
|
+
"""Start analysis when screen mounts."""
|
|
107
|
+
self.analysis_task = asyncio.create_task(self._run_analysis())
|
|
108
|
+
|
|
109
|
+
async def _run_analysis(self) -> None:
|
|
110
|
+
"""
|
|
111
|
+
Run the complete analysis pipeline with progress updates.
|
|
112
|
+
|
|
113
|
+
WHY: Implements the full analysis workflow with detailed progress tracking
|
|
114
|
+
and error handling, ensuring users receive comprehensive feedback about
|
|
115
|
+
the analysis process.
|
|
116
|
+
"""
|
|
117
|
+
log = self.query_one("#analysis-log", Log)
|
|
118
|
+
overall_progress = self.query_one("#overall-progress", AnalysisProgressWidget)
|
|
119
|
+
|
|
120
|
+
try:
|
|
121
|
+
log.write_line("🚀 Starting GitFlow Analytics...")
|
|
122
|
+
|
|
123
|
+
# Phase 1: Initialize components (10%)
|
|
124
|
+
overall_progress.update_progress(5, "Initializing components...")
|
|
125
|
+
await self._initialize_components(log)
|
|
126
|
+
overall_progress.update_progress(10, "Components initialized")
|
|
127
|
+
|
|
128
|
+
# Phase 2: Repository discovery (20%)
|
|
129
|
+
overall_progress.update_progress(10, "Discovering repositories...")
|
|
130
|
+
repositories = await self._discover_repositories(log)
|
|
131
|
+
overall_progress.update_progress(20, f"Found {len(repositories)} repositories")
|
|
132
|
+
|
|
133
|
+
# Phase 3: Repository analysis (50%)
|
|
134
|
+
overall_progress.update_progress(20, "Analyzing repositories...")
|
|
135
|
+
commits, prs = await self._analyze_repositories(repositories, log)
|
|
136
|
+
overall_progress.update_progress(50, f"Analyzed {len(commits)} commits")
|
|
137
|
+
|
|
138
|
+
# Phase 4: Integration enrichment (70%)
|
|
139
|
+
overall_progress.update_progress(50, "Enriching with integration data...")
|
|
140
|
+
await self._enrich_with_integrations(repositories, commits, log)
|
|
141
|
+
overall_progress.update_progress(70, "Integration data complete")
|
|
142
|
+
|
|
143
|
+
# Phase 5: Identity resolution (80%)
|
|
144
|
+
overall_progress.update_progress(70, "Resolving developer identities...")
|
|
145
|
+
developer_stats = await self._resolve_identities(commits, log)
|
|
146
|
+
overall_progress.update_progress(80, f"Identified {len(developer_stats)} developers")
|
|
147
|
+
|
|
148
|
+
# Phase 6: Qualitative analysis (95%)
|
|
149
|
+
if self.enable_qualitative:
|
|
150
|
+
overall_progress.update_progress(80, "Running qualitative analysis...")
|
|
151
|
+
await self._run_qualitative_analysis(commits, log)
|
|
152
|
+
overall_progress.update_progress(95, "Qualitative analysis complete")
|
|
153
|
+
|
|
154
|
+
# Phase 7: Finalization (100%)
|
|
155
|
+
overall_progress.update_progress(95, "Finalizing results...")
|
|
156
|
+
self.analysis_results = {
|
|
157
|
+
'commits': commits,
|
|
158
|
+
'prs': prs,
|
|
159
|
+
'developers': developer_stats,
|
|
160
|
+
'repositories': repositories
|
|
161
|
+
}
|
|
162
|
+
|
|
163
|
+
overall_progress.complete("Analysis complete!")
|
|
164
|
+
|
|
165
|
+
total_time = time.time() - self.start_time
|
|
166
|
+
log.write_line(f"🎉 Analysis completed in {total_time:.1f} seconds!")
|
|
167
|
+
log.write_line(f" - Total commits: {len(commits):,}")
|
|
168
|
+
log.write_line(f" - Total PRs: {len(prs):,}")
|
|
169
|
+
log.write_line(f" - Active developers: {len(developer_stats):,}")
|
|
170
|
+
|
|
171
|
+
# Switch to results screen after brief pause
|
|
172
|
+
await asyncio.sleep(2)
|
|
173
|
+
from .results_screen import ResultsScreen
|
|
174
|
+
self.app.push_screen(ResultsScreen(
|
|
175
|
+
commits=commits,
|
|
176
|
+
prs=prs,
|
|
177
|
+
developers=developer_stats,
|
|
178
|
+
config=self.config
|
|
179
|
+
))
|
|
180
|
+
|
|
181
|
+
except asyncio.CancelledError:
|
|
182
|
+
log.write_line("❌ Analysis cancelled by user")
|
|
183
|
+
overall_progress.update_progress(0, "Cancelled")
|
|
184
|
+
except Exception as e:
|
|
185
|
+
log.write_line(f"❌ Analysis failed: {e}")
|
|
186
|
+
overall_progress.update_progress(0, f"Error: {str(e)[:50]}...")
|
|
187
|
+
self.notify(f"Analysis failed: {e}", severity="error")
|
|
188
|
+
|
|
189
|
+
async def _initialize_components(self, log: Log) -> None:
|
|
190
|
+
"""Initialize analysis components."""
|
|
191
|
+
log.write_line("📋 Initializing cache...")
|
|
192
|
+
|
|
193
|
+
self.cache = GitAnalysisCache(
|
|
194
|
+
self.config.cache.directory,
|
|
195
|
+
ttl_hours=self.config.cache.ttl_hours
|
|
196
|
+
)
|
|
197
|
+
|
|
198
|
+
log.write_line("👥 Initializing identity resolver...")
|
|
199
|
+
self.identity_resolver = DeveloperIdentityResolver(
|
|
200
|
+
self.config.cache.directory / 'identities.db',
|
|
201
|
+
similarity_threshold=self.config.analysis.similarity_threshold,
|
|
202
|
+
manual_mappings=self.config.analysis.manual_identity_mappings
|
|
203
|
+
)
|
|
204
|
+
|
|
205
|
+
log.write_line("🔍 Initializing analyzer...")
|
|
206
|
+
self.analyzer = GitAnalyzer(
|
|
207
|
+
self.cache,
|
|
208
|
+
branch_mapping_rules=self.config.analysis.branch_mapping_rules,
|
|
209
|
+
allowed_ticket_platforms=getattr(self.config.analysis, 'ticket_platforms', None),
|
|
210
|
+
exclude_paths=self.config.analysis.exclude_paths
|
|
211
|
+
)
|
|
212
|
+
|
|
213
|
+
log.write_line("🔗 Initializing integrations...")
|
|
214
|
+
self.orchestrator = IntegrationOrchestrator(self.config, self.cache)
|
|
215
|
+
|
|
216
|
+
# Check if we have pre-loaded NLP engine from startup
|
|
217
|
+
if hasattr(self.app, 'get_nlp_engine') and self.app.get_nlp_engine():
|
|
218
|
+
log.write_line("✅ NLP engine already loaded from startup")
|
|
219
|
+
elif self.enable_qualitative:
|
|
220
|
+
log.write_line("⚠️ NLP engine will be loaded during qualitative analysis phase")
|
|
221
|
+
|
|
222
|
+
# Small delay to show progress
|
|
223
|
+
await asyncio.sleep(0.5)
|
|
224
|
+
|
|
225
|
+
async def _discover_repositories(self, log: Log) -> List:
|
|
226
|
+
"""Discover repositories to analyze."""
|
|
227
|
+
repositories = self.config.repositories
|
|
228
|
+
|
|
229
|
+
if self.config.github.organization and not repositories:
|
|
230
|
+
log.write_line(f"🔍 Discovering repositories from organization: {self.config.github.organization}")
|
|
231
|
+
|
|
232
|
+
try:
|
|
233
|
+
# Use config directory for cloned repos
|
|
234
|
+
config_dir = Path.cwd() # TODO: Get actual config directory
|
|
235
|
+
repos_dir = config_dir / "repos"
|
|
236
|
+
|
|
237
|
+
discovered_repos = self.config.discover_organization_repositories(
|
|
238
|
+
clone_base_path=repos_dir
|
|
239
|
+
)
|
|
240
|
+
repositories = discovered_repos
|
|
241
|
+
|
|
242
|
+
for repo in repositories:
|
|
243
|
+
log.write_line(f" 📁 {repo.name} ({repo.github_repo})")
|
|
244
|
+
|
|
245
|
+
except Exception as e:
|
|
246
|
+
log.write_line(f" ❌ Repository discovery failed: {e}")
|
|
247
|
+
raise
|
|
248
|
+
|
|
249
|
+
await asyncio.sleep(0.5) # Brief pause for UI updates
|
|
250
|
+
return repositories
|
|
251
|
+
|
|
252
|
+
async def _analyze_repositories(self, repositories: List, log: Log) -> tuple:
|
|
253
|
+
"""Analyze all repositories and return commits and PRs."""
|
|
254
|
+
repo_progress = self.query_one("#repo-progress", AnalysisProgressWidget)
|
|
255
|
+
|
|
256
|
+
all_commits = []
|
|
257
|
+
all_prs = []
|
|
258
|
+
|
|
259
|
+
# Analysis period (timezone-aware to match commit timestamps)
|
|
260
|
+
end_date = datetime.now(timezone.utc)
|
|
261
|
+
start_date = end_date - timedelta(weeks=self.weeks)
|
|
262
|
+
|
|
263
|
+
for i, repo_config in enumerate(repositories):
|
|
264
|
+
progress = (i / len(repositories)) * 100
|
|
265
|
+
repo_progress.update_progress(progress, f"Analyzing {repo_config.name}...")
|
|
266
|
+
|
|
267
|
+
log.write_line(f"📁 Analyzing {repo_config.name}...")
|
|
268
|
+
|
|
269
|
+
try:
|
|
270
|
+
# Clone repository if needed
|
|
271
|
+
if not repo_config.path.exists() and repo_config.github_repo:
|
|
272
|
+
log.write_line(f" 📥 Cloning {repo_config.github_repo}...")
|
|
273
|
+
await self._clone_repository(repo_config, log)
|
|
274
|
+
|
|
275
|
+
# Analyze commits
|
|
276
|
+
commits = self.analyzer.analyze_repository(
|
|
277
|
+
repo_config.path,
|
|
278
|
+
start_date,
|
|
279
|
+
repo_config.branch
|
|
280
|
+
)
|
|
281
|
+
|
|
282
|
+
# Add project key and resolve identities
|
|
283
|
+
for commit in commits:
|
|
284
|
+
commit['project_key'] = repo_config.project_key or commit.get('inferred_project', 'UNKNOWN')
|
|
285
|
+
commit['canonical_id'] = self.identity_resolver.resolve_developer(
|
|
286
|
+
commit['author_name'],
|
|
287
|
+
commit['author_email']
|
|
288
|
+
)
|
|
289
|
+
|
|
290
|
+
all_commits.extend(commits)
|
|
291
|
+
log.write_line(f" ✅ Found {len(commits)} commits")
|
|
292
|
+
|
|
293
|
+
# Update live stats
|
|
294
|
+
await self._update_live_stats({
|
|
295
|
+
'repositories_analyzed': i + 1,
|
|
296
|
+
'total_repositories': len(repositories),
|
|
297
|
+
'total_commits': len(all_commits),
|
|
298
|
+
'current_repo': repo_config.name
|
|
299
|
+
})
|
|
300
|
+
|
|
301
|
+
# Small delay to allow UI updates
|
|
302
|
+
await asyncio.sleep(0.1)
|
|
303
|
+
|
|
304
|
+
except Exception as e:
|
|
305
|
+
log.write_line(f" ❌ Error analyzing {repo_config.name}: {e}")
|
|
306
|
+
continue
|
|
307
|
+
|
|
308
|
+
repo_progress.complete(f"Completed {len(repositories)} repositories")
|
|
309
|
+
return all_commits, all_prs
|
|
310
|
+
|
|
311
|
+
async def _enrich_with_integrations(self, repositories: List, commits: List, log: Log) -> None:
|
|
312
|
+
"""Enrich data with integration sources."""
|
|
313
|
+
integration_progress = self.query_one("#integration-progress", AnalysisProgressWidget)
|
|
314
|
+
|
|
315
|
+
end_date = datetime.now(timezone.utc)
|
|
316
|
+
start_date = end_date - timedelta(weeks=self.weeks)
|
|
317
|
+
|
|
318
|
+
for i, repo_config in enumerate(repositories):
|
|
319
|
+
progress = (i / len(repositories)) * 100
|
|
320
|
+
integration_progress.update_progress(progress, f"Enriching {repo_config.name}...")
|
|
321
|
+
|
|
322
|
+
try:
|
|
323
|
+
# Get repository commits for this repo
|
|
324
|
+
repo_commits = [c for c in commits if c.get('repository') == repo_config.name]
|
|
325
|
+
|
|
326
|
+
enrichment = self.orchestrator.enrich_repository_data(
|
|
327
|
+
repo_config, repo_commits, start_date
|
|
328
|
+
)
|
|
329
|
+
|
|
330
|
+
if enrichment.get('prs'):
|
|
331
|
+
log.write_line(f" ✅ Found {len(enrichment['prs'])} pull requests for {repo_config.name}")
|
|
332
|
+
|
|
333
|
+
await asyncio.sleep(0.1)
|
|
334
|
+
|
|
335
|
+
except Exception as e:
|
|
336
|
+
log.write_line(f" ⚠️ Integration enrichment failed for {repo_config.name}: {e}")
|
|
337
|
+
continue
|
|
338
|
+
|
|
339
|
+
integration_progress.complete("Integration enrichment complete")
|
|
340
|
+
|
|
341
|
+
async def _resolve_identities(self, commits: List, log: Log) -> List:
|
|
342
|
+
"""Resolve developer identities and return statistics."""
|
|
343
|
+
log.write_line("👥 Updating developer statistics...")
|
|
344
|
+
|
|
345
|
+
# Update commit statistics
|
|
346
|
+
self.identity_resolver.update_commit_stats(commits)
|
|
347
|
+
developer_stats = self.identity_resolver.get_developer_stats()
|
|
348
|
+
|
|
349
|
+
log.write_line(f" ✅ Resolved {len(developer_stats)} unique developer identities")
|
|
350
|
+
|
|
351
|
+
# Show top contributors
|
|
352
|
+
top_devs = sorted(developer_stats, key=lambda d: d['total_commits'], reverse=True)[:5]
|
|
353
|
+
for dev in top_devs:
|
|
354
|
+
log.write_line(f" • {dev['primary_name']}: {dev['total_commits']} commits")
|
|
355
|
+
|
|
356
|
+
await asyncio.sleep(0.5)
|
|
357
|
+
return developer_stats
|
|
358
|
+
|
|
359
|
+
async def _run_qualitative_analysis(self, commits: List, log: Log) -> None:
|
|
360
|
+
"""Run qualitative analysis if enabled."""
|
|
361
|
+
if not self.enable_qualitative:
|
|
362
|
+
return
|
|
363
|
+
|
|
364
|
+
qual_progress = self.query_one("#qual-progress", AnalysisProgressWidget)
|
|
365
|
+
|
|
366
|
+
try:
|
|
367
|
+
log.write_line("🧠 Starting qualitative analysis...")
|
|
368
|
+
|
|
369
|
+
# Check if NLP engine is pre-loaded from startup
|
|
370
|
+
nlp_engine = None
|
|
371
|
+
if hasattr(self.app, 'get_nlp_engine'):
|
|
372
|
+
nlp_engine = self.app.get_nlp_engine()
|
|
373
|
+
|
|
374
|
+
if nlp_engine:
|
|
375
|
+
log.write_line(" ✅ Using pre-loaded NLP engine")
|
|
376
|
+
qual_processor = None # We'll use the NLP engine directly
|
|
377
|
+
else:
|
|
378
|
+
log.write_line(" ⏳ Initializing qualitative processor...")
|
|
379
|
+
# Import qualitative processor
|
|
380
|
+
from gitflow_analytics.qualitative.core.processor import QualitativeProcessor
|
|
381
|
+
|
|
382
|
+
qual_processor = QualitativeProcessor(self.config.qualitative)
|
|
383
|
+
|
|
384
|
+
# Validate setup
|
|
385
|
+
is_valid, issues = qual_processor.validate_setup()
|
|
386
|
+
if not is_valid:
|
|
387
|
+
log.write_line(" ⚠️ Qualitative analysis setup issues:")
|
|
388
|
+
for issue in issues:
|
|
389
|
+
log.write_line(f" - {issue}")
|
|
390
|
+
return
|
|
391
|
+
|
|
392
|
+
# Process commits in batches
|
|
393
|
+
batch_size = 100
|
|
394
|
+
total_batches = (len(commits) + batch_size - 1) // batch_size
|
|
395
|
+
|
|
396
|
+
for batch_idx in range(total_batches):
|
|
397
|
+
start_idx = batch_idx * batch_size
|
|
398
|
+
end_idx = min(start_idx + batch_size, len(commits))
|
|
399
|
+
batch = commits[start_idx:end_idx]
|
|
400
|
+
|
|
401
|
+
progress = (batch_idx / total_batches) * 100
|
|
402
|
+
qual_progress.update_progress(
|
|
403
|
+
progress,
|
|
404
|
+
f"Processing batch {batch_idx + 1}/{total_batches}..."
|
|
405
|
+
)
|
|
406
|
+
|
|
407
|
+
# Convert to qualitative format
|
|
408
|
+
qual_batch = []
|
|
409
|
+
for commit in batch:
|
|
410
|
+
qual_commit = {
|
|
411
|
+
'hash': commit.get('hash'),
|
|
412
|
+
'message': commit.get('message'),
|
|
413
|
+
'author_name': commit.get('author_name'),
|
|
414
|
+
'author_email': commit.get('author_email'),
|
|
415
|
+
'timestamp': commit.get('timestamp'),
|
|
416
|
+
'files_changed': commit.get('files_changed', []),
|
|
417
|
+
'insertions': commit.get('insertions', 0),
|
|
418
|
+
'deletions': commit.get('deletions', 0),
|
|
419
|
+
'branch': commit.get('branch', 'main')
|
|
420
|
+
}
|
|
421
|
+
qual_batch.append(qual_commit)
|
|
422
|
+
|
|
423
|
+
# Process batch using pre-loaded NLP engine or processor
|
|
424
|
+
if nlp_engine:
|
|
425
|
+
# Use the pre-loaded NLP engine directly
|
|
426
|
+
results = nlp_engine.process_batch(qual_batch)
|
|
427
|
+
else:
|
|
428
|
+
# Use the qualitative processor
|
|
429
|
+
results = qual_processor.process_commits(qual_batch, show_progress=False)
|
|
430
|
+
|
|
431
|
+
# Update original commits with qualitative data
|
|
432
|
+
for original, enhanced in zip(batch, results):
|
|
433
|
+
if hasattr(enhanced, 'change_type'):
|
|
434
|
+
original['change_type'] = enhanced.change_type
|
|
435
|
+
original['business_domain'] = enhanced.business_domain
|
|
436
|
+
original['risk_level'] = enhanced.risk_level
|
|
437
|
+
original['confidence_score'] = enhanced.confidence_score
|
|
438
|
+
|
|
439
|
+
await asyncio.sleep(0.1) # Allow UI updates
|
|
440
|
+
|
|
441
|
+
qual_progress.complete("Qualitative analysis complete")
|
|
442
|
+
log.write_line(" ✅ Qualitative analysis completed")
|
|
443
|
+
|
|
444
|
+
except ImportError:
|
|
445
|
+
log.write_line(" ❌ Qualitative analysis dependencies not available")
|
|
446
|
+
qual_progress.update_progress(0, "Dependencies missing")
|
|
447
|
+
except Exception as e:
|
|
448
|
+
log.write_line(f" ❌ Qualitative analysis failed: {e}")
|
|
449
|
+
qual_progress.update_progress(0, f"Error: {str(e)[:30]}...")
|
|
450
|
+
|
|
451
|
+
async def _clone_repository(self, repo_config, log: Log) -> None:
|
|
452
|
+
"""Clone repository if needed."""
|
|
453
|
+
try:
|
|
454
|
+
import git
|
|
455
|
+
|
|
456
|
+
repo_config.path.parent.mkdir(parents=True, exist_ok=True)
|
|
457
|
+
|
|
458
|
+
clone_url = f"https://github.com/{repo_config.github_repo}.git"
|
|
459
|
+
if self.config.github.token:
|
|
460
|
+
clone_url = f"https://{self.config.github.token}@github.com/{repo_config.github_repo}.git"
|
|
461
|
+
|
|
462
|
+
git.Repo.clone_from(clone_url, repo_config.path, branch=repo_config.branch)
|
|
463
|
+
log.write_line(f" ✅ Successfully cloned {repo_config.github_repo}")
|
|
464
|
+
|
|
465
|
+
except Exception as e:
|
|
466
|
+
log.write_line(f" ❌ Failed to clone {repo_config.github_repo}: {e}")
|
|
467
|
+
raise
|
|
468
|
+
|
|
469
|
+
async def _update_live_stats(self, stats: Dict[str, Any]) -> None:
|
|
470
|
+
"""Update live statistics display."""
|
|
471
|
+
stats_widget = self.query_one("#live-stats", Pretty)
|
|
472
|
+
stats_widget.update(stats)
|
|
473
|
+
|
|
474
|
+
def action_cancel(self) -> None:
|
|
475
|
+
"""Cancel the analysis."""
|
|
476
|
+
if self.analysis_task and not self.analysis_task.done():
|
|
477
|
+
self.analysis_task.cancel()
|
|
478
|
+
self.app.pop_screen()
|
|
479
|
+
|
|
480
|
+
def action_back(self) -> None:
|
|
481
|
+
"""Go back to main screen."""
|
|
482
|
+
self.action_cancel()
|
|
483
|
+
|
|
484
|
+
def action_toggle_log(self) -> None:
|
|
485
|
+
"""Toggle log panel visibility."""
|
|
486
|
+
log_panel = self.query_one(".log-panel")
|
|
487
|
+
log_panel.set_class(not log_panel.has_class("hidden"), "hidden")
|