gitflow-analytics 1.0.3__py3-none-any.whl โ 1.3.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- gitflow_analytics/_version.py +1 -1
- gitflow_analytics/classification/__init__.py +31 -0
- gitflow_analytics/classification/batch_classifier.py +752 -0
- gitflow_analytics/classification/classifier.py +464 -0
- gitflow_analytics/classification/feature_extractor.py +725 -0
- gitflow_analytics/classification/linguist_analyzer.py +574 -0
- gitflow_analytics/classification/model.py +455 -0
- gitflow_analytics/cli.py +4108 -350
- gitflow_analytics/cli_rich.py +198 -48
- gitflow_analytics/config/__init__.py +43 -0
- gitflow_analytics/config/errors.py +261 -0
- gitflow_analytics/config/loader.py +904 -0
- gitflow_analytics/config/profiles.py +264 -0
- gitflow_analytics/config/repository.py +124 -0
- gitflow_analytics/config/schema.py +441 -0
- gitflow_analytics/config/validator.py +154 -0
- gitflow_analytics/config.py +44 -508
- gitflow_analytics/core/analyzer.py +1209 -98
- gitflow_analytics/core/cache.py +1337 -29
- gitflow_analytics/core/data_fetcher.py +1193 -0
- gitflow_analytics/core/identity.py +363 -14
- gitflow_analytics/core/metrics_storage.py +526 -0
- gitflow_analytics/core/progress.py +372 -0
- gitflow_analytics/core/schema_version.py +269 -0
- gitflow_analytics/extractors/ml_tickets.py +1100 -0
- gitflow_analytics/extractors/story_points.py +8 -1
- gitflow_analytics/extractors/tickets.py +749 -11
- gitflow_analytics/identity_llm/__init__.py +6 -0
- gitflow_analytics/identity_llm/analysis_pass.py +231 -0
- gitflow_analytics/identity_llm/analyzer.py +464 -0
- gitflow_analytics/identity_llm/models.py +76 -0
- gitflow_analytics/integrations/github_integration.py +175 -11
- gitflow_analytics/integrations/jira_integration.py +461 -24
- gitflow_analytics/integrations/orchestrator.py +124 -1
- gitflow_analytics/metrics/activity_scoring.py +322 -0
- gitflow_analytics/metrics/branch_health.py +470 -0
- gitflow_analytics/metrics/dora.py +379 -20
- gitflow_analytics/models/database.py +843 -53
- gitflow_analytics/pm_framework/__init__.py +115 -0
- gitflow_analytics/pm_framework/adapters/__init__.py +50 -0
- gitflow_analytics/pm_framework/adapters/jira_adapter.py +1845 -0
- gitflow_analytics/pm_framework/base.py +406 -0
- gitflow_analytics/pm_framework/models.py +211 -0
- gitflow_analytics/pm_framework/orchestrator.py +652 -0
- gitflow_analytics/pm_framework/registry.py +333 -0
- gitflow_analytics/qualitative/__init__.py +9 -10
- gitflow_analytics/qualitative/chatgpt_analyzer.py +259 -0
- gitflow_analytics/qualitative/classifiers/__init__.py +3 -3
- gitflow_analytics/qualitative/classifiers/change_type.py +518 -244
- gitflow_analytics/qualitative/classifiers/domain_classifier.py +272 -165
- gitflow_analytics/qualitative/classifiers/intent_analyzer.py +321 -222
- gitflow_analytics/qualitative/classifiers/llm/__init__.py +35 -0
- gitflow_analytics/qualitative/classifiers/llm/base.py +193 -0
- gitflow_analytics/qualitative/classifiers/llm/batch_processor.py +383 -0
- gitflow_analytics/qualitative/classifiers/llm/cache.py +479 -0
- gitflow_analytics/qualitative/classifiers/llm/cost_tracker.py +435 -0
- gitflow_analytics/qualitative/classifiers/llm/openai_client.py +403 -0
- gitflow_analytics/qualitative/classifiers/llm/prompts.py +373 -0
- gitflow_analytics/qualitative/classifiers/llm/response_parser.py +287 -0
- gitflow_analytics/qualitative/classifiers/llm_commit_classifier.py +607 -0
- gitflow_analytics/qualitative/classifiers/risk_analyzer.py +215 -189
- gitflow_analytics/qualitative/core/__init__.py +4 -4
- gitflow_analytics/qualitative/core/llm_fallback.py +239 -235
- gitflow_analytics/qualitative/core/nlp_engine.py +157 -148
- gitflow_analytics/qualitative/core/pattern_cache.py +214 -192
- gitflow_analytics/qualitative/core/processor.py +381 -248
- gitflow_analytics/qualitative/enhanced_analyzer.py +2236 -0
- gitflow_analytics/qualitative/example_enhanced_usage.py +420 -0
- gitflow_analytics/qualitative/models/__init__.py +7 -7
- gitflow_analytics/qualitative/models/schemas.py +155 -121
- gitflow_analytics/qualitative/utils/__init__.py +4 -4
- gitflow_analytics/qualitative/utils/batch_processor.py +136 -123
- gitflow_analytics/qualitative/utils/cost_tracker.py +142 -140
- gitflow_analytics/qualitative/utils/metrics.py +172 -158
- gitflow_analytics/qualitative/utils/text_processing.py +146 -104
- gitflow_analytics/reports/__init__.py +100 -0
- gitflow_analytics/reports/analytics_writer.py +539 -14
- gitflow_analytics/reports/base.py +648 -0
- gitflow_analytics/reports/branch_health_writer.py +322 -0
- gitflow_analytics/reports/classification_writer.py +924 -0
- gitflow_analytics/reports/cli_integration.py +427 -0
- gitflow_analytics/reports/csv_writer.py +1676 -212
- gitflow_analytics/reports/data_models.py +504 -0
- gitflow_analytics/reports/database_report_generator.py +427 -0
- gitflow_analytics/reports/example_usage.py +344 -0
- gitflow_analytics/reports/factory.py +499 -0
- gitflow_analytics/reports/formatters.py +698 -0
- gitflow_analytics/reports/html_generator.py +1116 -0
- gitflow_analytics/reports/interfaces.py +489 -0
- gitflow_analytics/reports/json_exporter.py +2770 -0
- gitflow_analytics/reports/narrative_writer.py +2287 -158
- gitflow_analytics/reports/story_point_correlation.py +1144 -0
- gitflow_analytics/reports/weekly_trends_writer.py +389 -0
- gitflow_analytics/training/__init__.py +5 -0
- gitflow_analytics/training/model_loader.py +377 -0
- gitflow_analytics/training/pipeline.py +550 -0
- gitflow_analytics/tui/__init__.py +1 -1
- gitflow_analytics/tui/app.py +129 -126
- gitflow_analytics/tui/screens/__init__.py +3 -3
- gitflow_analytics/tui/screens/analysis_progress_screen.py +188 -179
- gitflow_analytics/tui/screens/configuration_screen.py +154 -178
- gitflow_analytics/tui/screens/loading_screen.py +100 -110
- gitflow_analytics/tui/screens/main_screen.py +89 -72
- gitflow_analytics/tui/screens/results_screen.py +305 -281
- gitflow_analytics/tui/widgets/__init__.py +2 -2
- gitflow_analytics/tui/widgets/data_table.py +67 -69
- gitflow_analytics/tui/widgets/export_modal.py +76 -76
- gitflow_analytics/tui/widgets/progress_widget.py +41 -46
- gitflow_analytics-1.3.6.dist-info/METADATA +1015 -0
- gitflow_analytics-1.3.6.dist-info/RECORD +122 -0
- gitflow_analytics-1.0.3.dist-info/METADATA +0 -490
- gitflow_analytics-1.0.3.dist-info/RECORD +0 -62
- {gitflow_analytics-1.0.3.dist-info โ gitflow_analytics-1.3.6.dist-info}/WHEEL +0 -0
- {gitflow_analytics-1.0.3.dist-info โ gitflow_analytics-1.3.6.dist-info}/entry_points.txt +0 -0
- {gitflow_analytics-1.0.3.dist-info โ gitflow_analytics-1.3.6.dist-info}/licenses/LICENSE +0 -0
- {gitflow_analytics-1.0.3.dist-info โ gitflow_analytics-1.3.6.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,322 @@
|
|
|
1
|
+
"""Branch health report generation for GitFlow Analytics."""
|
|
2
|
+
|
|
3
|
+
import csv
|
|
4
|
+
import logging
|
|
5
|
+
from io import StringIO
|
|
6
|
+
from pathlib import Path
|
|
7
|
+
from typing import Any
|
|
8
|
+
|
|
9
|
+
import pandas as pd
|
|
10
|
+
|
|
11
|
+
logger = logging.getLogger(__name__)
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class BranchHealthReportGenerator:
|
|
15
|
+
"""Generate branch health reports in CSV and markdown formats."""
|
|
16
|
+
|
|
17
|
+
def __init__(self):
|
|
18
|
+
"""Initialize branch health report generator."""
|
|
19
|
+
pass
|
|
20
|
+
|
|
21
|
+
def generate_csv_report(
|
|
22
|
+
self, branch_health_metrics: dict[str, dict[str, Any]], output_path: Path
|
|
23
|
+
) -> Path:
|
|
24
|
+
"""Generate CSV report for branch health metrics.
|
|
25
|
+
|
|
26
|
+
Args:
|
|
27
|
+
branch_health_metrics: Dictionary mapping repo names to their branch health metrics
|
|
28
|
+
output_path: Path where the CSV should be written
|
|
29
|
+
|
|
30
|
+
Returns:
|
|
31
|
+
Path to the generated CSV file
|
|
32
|
+
"""
|
|
33
|
+
rows = []
|
|
34
|
+
|
|
35
|
+
for repo_name, metrics in branch_health_metrics.items():
|
|
36
|
+
# Add summary row for the repository
|
|
37
|
+
summary = metrics.get("summary", {})
|
|
38
|
+
health = metrics.get("health_indicators", {})
|
|
39
|
+
|
|
40
|
+
summary_row = {
|
|
41
|
+
"repository": repo_name,
|
|
42
|
+
"branch_name": "[SUMMARY]",
|
|
43
|
+
"total_branches": summary.get("total_branches", 0),
|
|
44
|
+
"active_branches": summary.get("active_branches", 0),
|
|
45
|
+
"stale_branches": summary.get("stale_branches", 0),
|
|
46
|
+
"long_lived_branches": summary.get("long_lived_branches", 0),
|
|
47
|
+
"overall_health": health.get("overall_health", "unknown"),
|
|
48
|
+
"stale_percentage": health.get("stale_branch_percentage", 0),
|
|
49
|
+
"branch_creation_rate_weekly": summary.get("branch_creation_rate_per_week", 0),
|
|
50
|
+
"average_branch_age_days": summary.get("average_branch_age_days", 0),
|
|
51
|
+
"average_commits_per_branch": summary.get("average_commits_per_branch", 0),
|
|
52
|
+
}
|
|
53
|
+
rows.append(summary_row)
|
|
54
|
+
|
|
55
|
+
# Add individual branch rows
|
|
56
|
+
branches = metrics.get("branches", {})
|
|
57
|
+
for branch_name, branch_data in branches.items():
|
|
58
|
+
branch_row = {
|
|
59
|
+
"repository": repo_name,
|
|
60
|
+
"branch_name": branch_name,
|
|
61
|
+
"age_days": branch_data.get("age_days", 0),
|
|
62
|
+
"is_stale": branch_data.get("is_stale", False),
|
|
63
|
+
"is_merged": branch_data.get("is_merged", False),
|
|
64
|
+
"total_commits": branch_data.get("total_commits", 0),
|
|
65
|
+
"unique_authors": branch_data.get("unique_authors", 0),
|
|
66
|
+
"ahead_of_main": branch_data.get("ahead_of_main", 0),
|
|
67
|
+
"behind_main": branch_data.get("behind_main", 0),
|
|
68
|
+
"divergence_score": branch_data.get("divergence_score", 0),
|
|
69
|
+
"health_score": branch_data.get("health_score", 0),
|
|
70
|
+
"latest_activity": branch_data.get("latest_activity", ""),
|
|
71
|
+
"daily_commit_average": branch_data.get("commit_frequency", {}).get(
|
|
72
|
+
"daily_average", 0
|
|
73
|
+
),
|
|
74
|
+
}
|
|
75
|
+
rows.append(branch_row)
|
|
76
|
+
|
|
77
|
+
# Write CSV
|
|
78
|
+
if rows:
|
|
79
|
+
df = pd.DataFrame(rows)
|
|
80
|
+
df.to_csv(output_path, index=False)
|
|
81
|
+
else:
|
|
82
|
+
# Write empty CSV with headers
|
|
83
|
+
with open(output_path, "w", newline="") as f:
|
|
84
|
+
writer = csv.DictWriter(
|
|
85
|
+
f,
|
|
86
|
+
fieldnames=[
|
|
87
|
+
"repository",
|
|
88
|
+
"branch_name",
|
|
89
|
+
"total_branches",
|
|
90
|
+
"active_branches",
|
|
91
|
+
"stale_branches",
|
|
92
|
+
"long_lived_branches",
|
|
93
|
+
"overall_health",
|
|
94
|
+
"stale_percentage",
|
|
95
|
+
"branch_creation_rate_weekly",
|
|
96
|
+
"average_branch_age_days",
|
|
97
|
+
"average_commits_per_branch",
|
|
98
|
+
"age_days",
|
|
99
|
+
"is_stale",
|
|
100
|
+
"is_merged",
|
|
101
|
+
"total_commits",
|
|
102
|
+
"unique_authors",
|
|
103
|
+
"ahead_of_main",
|
|
104
|
+
"behind_main",
|
|
105
|
+
"divergence_score",
|
|
106
|
+
"health_score",
|
|
107
|
+
"latest_activity",
|
|
108
|
+
"daily_commit_average",
|
|
109
|
+
],
|
|
110
|
+
)
|
|
111
|
+
writer.writeheader()
|
|
112
|
+
|
|
113
|
+
return output_path
|
|
114
|
+
|
|
115
|
+
def generate_markdown_section(self, branch_health_metrics: dict[str, dict[str, Any]]) -> str:
|
|
116
|
+
"""Generate markdown section for branch health to include in narrative reports.
|
|
117
|
+
|
|
118
|
+
Args:
|
|
119
|
+
branch_health_metrics: Dictionary mapping repo names to their branch health metrics
|
|
120
|
+
|
|
121
|
+
Returns:
|
|
122
|
+
Markdown formatted string with branch health insights
|
|
123
|
+
"""
|
|
124
|
+
if not branch_health_metrics:
|
|
125
|
+
return ""
|
|
126
|
+
|
|
127
|
+
report = StringIO()
|
|
128
|
+
report.write("\n## Branch Health Analysis\n\n")
|
|
129
|
+
|
|
130
|
+
# Overall summary across all repositories
|
|
131
|
+
total_repos = len(branch_health_metrics)
|
|
132
|
+
total_branches_all = sum(
|
|
133
|
+
m.get("summary", {}).get("total_branches", 0) for m in branch_health_metrics.values()
|
|
134
|
+
)
|
|
135
|
+
total_stale_all = sum(
|
|
136
|
+
m.get("summary", {}).get("stale_branches", 0) for m in branch_health_metrics.values()
|
|
137
|
+
)
|
|
138
|
+
|
|
139
|
+
report.write("### Overview\n\n")
|
|
140
|
+
report.write(
|
|
141
|
+
f"Analyzed **{total_repos} repositories** with a total of **{total_branches_all} branches**.\n\n"
|
|
142
|
+
)
|
|
143
|
+
|
|
144
|
+
if total_stale_all > 0:
|
|
145
|
+
stale_pct = (
|
|
146
|
+
(total_stale_all / total_branches_all * 100) if total_branches_all > 0 else 0
|
|
147
|
+
)
|
|
148
|
+
report.write(
|
|
149
|
+
f"โ ๏ธ Found **{total_stale_all} stale branches** ({stale_pct:.1f}% of total)\n\n"
|
|
150
|
+
)
|
|
151
|
+
|
|
152
|
+
# Repository breakdown
|
|
153
|
+
report.write("### Repository Branch Health\n\n")
|
|
154
|
+
|
|
155
|
+
for repo_name, metrics in branch_health_metrics.items():
|
|
156
|
+
summary = metrics.get("summary", {})
|
|
157
|
+
health = metrics.get("health_indicators", {})
|
|
158
|
+
|
|
159
|
+
# Repository header
|
|
160
|
+
health_emoji = self._get_health_emoji(health.get("overall_health", "unknown"))
|
|
161
|
+
report.write(f"#### {repo_name} {health_emoji}\n\n")
|
|
162
|
+
|
|
163
|
+
# Key metrics
|
|
164
|
+
report.write(f"- **Total Branches**: {summary.get('total_branches', 0)}\n")
|
|
165
|
+
report.write(f"- **Active**: {summary.get('active_branches', 0)}\n")
|
|
166
|
+
report.write(f"- **Stale**: {summary.get('stale_branches', 0)}\n")
|
|
167
|
+
report.write(f"- **Long-lived**: {summary.get('long_lived_branches', 0)}\n")
|
|
168
|
+
report.write(
|
|
169
|
+
f"- **Average Age**: {summary.get('average_branch_age_days', 0):.1f} days\n"
|
|
170
|
+
)
|
|
171
|
+
report.write(
|
|
172
|
+
f"- **Creation Rate**: {summary.get('branch_creation_rate_per_week', 0):.1f} branches/week\n"
|
|
173
|
+
)
|
|
174
|
+
report.write(
|
|
175
|
+
f"- **Health Status**: {health.get('overall_health', 'unknown').title()}\n\n"
|
|
176
|
+
)
|
|
177
|
+
|
|
178
|
+
# Top unhealthy branches
|
|
179
|
+
branches = metrics.get("branches", {})
|
|
180
|
+
unhealthy_branches = [
|
|
181
|
+
(name, data)
|
|
182
|
+
for name, data in branches.items()
|
|
183
|
+
if data.get("health_score", 100) < 60 and not data.get("is_merged", False)
|
|
184
|
+
]
|
|
185
|
+
|
|
186
|
+
if unhealthy_branches:
|
|
187
|
+
report.write("**Branches Needing Attention**:\n")
|
|
188
|
+
# Sort by health score (lowest first)
|
|
189
|
+
unhealthy_branches.sort(key=lambda x: x[1].get("health_score", 100))
|
|
190
|
+
|
|
191
|
+
for branch_name, branch_data in unhealthy_branches[:5]: # Top 5
|
|
192
|
+
age = branch_data.get("age_days", 0)
|
|
193
|
+
behind = branch_data.get("behind_main", 0)
|
|
194
|
+
score = branch_data.get("health_score", 0)
|
|
195
|
+
|
|
196
|
+
issues = []
|
|
197
|
+
if age > 30:
|
|
198
|
+
issues.append(f"{age} days old")
|
|
199
|
+
if behind > 50:
|
|
200
|
+
issues.append(f"{behind} commits behind")
|
|
201
|
+
|
|
202
|
+
report.write(f"- `{branch_name}` (score: {score:.0f}) - {', '.join(issues)}\n")
|
|
203
|
+
|
|
204
|
+
if len(unhealthy_branches) > 5:
|
|
205
|
+
report.write(f"- ...and {len(unhealthy_branches) - 5} more\n")
|
|
206
|
+
report.write("\n")
|
|
207
|
+
|
|
208
|
+
# Recommendations section
|
|
209
|
+
report.write("### Recommendations\n\n")
|
|
210
|
+
|
|
211
|
+
all_recommendations = []
|
|
212
|
+
for metrics in branch_health_metrics.values():
|
|
213
|
+
all_recommendations.extend(metrics.get("recommendations", []))
|
|
214
|
+
|
|
215
|
+
# Deduplicate and prioritize recommendations
|
|
216
|
+
unique_recommendations = []
|
|
217
|
+
seen = set()
|
|
218
|
+
for rec in all_recommendations:
|
|
219
|
+
# Create a simplified key for deduplication
|
|
220
|
+
key = rec.split()[0] # Use emoji as key
|
|
221
|
+
if key not in seen:
|
|
222
|
+
seen.add(key)
|
|
223
|
+
unique_recommendations.append(rec)
|
|
224
|
+
|
|
225
|
+
if unique_recommendations:
|
|
226
|
+
for rec in unique_recommendations[:5]: # Top 5 recommendations
|
|
227
|
+
report.write(f"- {rec}\n")
|
|
228
|
+
else:
|
|
229
|
+
report.write("- โ
All repositories show healthy branch management practices\n")
|
|
230
|
+
|
|
231
|
+
report.write("\n")
|
|
232
|
+
|
|
233
|
+
# Best practices reminder
|
|
234
|
+
report.write("### Best Practices (2025 Standards)\n\n")
|
|
235
|
+
report.write("- ๐ฏ **Elite teams** maintain <3% rework rate and <26 hour cycle times\n")
|
|
236
|
+
report.write(
|
|
237
|
+
"- ๐ **Small PRs** (<200 lines) correlate with better quality and faster reviews\n"
|
|
238
|
+
)
|
|
239
|
+
report.write(
|
|
240
|
+
"- ๐ **Frequent integration** reduces merge conflicts and improves deployment readiness\n"
|
|
241
|
+
)
|
|
242
|
+
report.write(
|
|
243
|
+
"- ๐งน **Regular cleanup** of merged and stale branches keeps repositories manageable\n"
|
|
244
|
+
)
|
|
245
|
+
|
|
246
|
+
return report.getvalue()
|
|
247
|
+
|
|
248
|
+
def _get_health_emoji(self, health_status: str) -> str:
|
|
249
|
+
"""Get emoji for health status."""
|
|
250
|
+
emoji_map = {
|
|
251
|
+
"excellent": "๐ข",
|
|
252
|
+
"good": "๐ข",
|
|
253
|
+
"fair": "๐ก",
|
|
254
|
+
"poor": "๐ด",
|
|
255
|
+
"unknown": "โช",
|
|
256
|
+
}
|
|
257
|
+
return emoji_map.get(health_status.lower(), "โช")
|
|
258
|
+
|
|
259
|
+
def generate_detailed_branch_report(
|
|
260
|
+
self, branch_health_metrics: dict[str, dict[str, Any]], output_path: Path
|
|
261
|
+
) -> Path:
|
|
262
|
+
"""Generate detailed branch-by-branch CSV report.
|
|
263
|
+
|
|
264
|
+
Args:
|
|
265
|
+
branch_health_metrics: Dictionary mapping repo names to their branch health metrics
|
|
266
|
+
output_path: Path where the CSV should be written
|
|
267
|
+
|
|
268
|
+
Returns:
|
|
269
|
+
Path to the generated CSV file
|
|
270
|
+
"""
|
|
271
|
+
rows = []
|
|
272
|
+
|
|
273
|
+
for repo_name, metrics in branch_health_metrics.items():
|
|
274
|
+
branches = metrics.get("branches", {})
|
|
275
|
+
main_branch = metrics.get("main_branch", "main")
|
|
276
|
+
|
|
277
|
+
for branch_name, branch_data in branches.items():
|
|
278
|
+
# Skip main branch in detailed report
|
|
279
|
+
if branch_name == main_branch:
|
|
280
|
+
continue
|
|
281
|
+
|
|
282
|
+
freq = branch_data.get("commit_frequency", {})
|
|
283
|
+
|
|
284
|
+
row = {
|
|
285
|
+
"repository": repo_name,
|
|
286
|
+
"branch": branch_name,
|
|
287
|
+
"age_days": branch_data.get("age_days", 0),
|
|
288
|
+
"health_score": round(branch_data.get("health_score", 0), 1),
|
|
289
|
+
"status": self._get_branch_status(branch_data),
|
|
290
|
+
"total_commits": branch_data.get("total_commits", 0),
|
|
291
|
+
"unique_authors": branch_data.get("unique_authors", 0),
|
|
292
|
+
"commits_ahead": branch_data.get("ahead_of_main", 0),
|
|
293
|
+
"commits_behind": branch_data.get("behind_main", 0),
|
|
294
|
+
"divergence_total": branch_data.get("divergence_score", 0),
|
|
295
|
+
"daily_commit_avg": round(freq.get("daily_average", 0), 2),
|
|
296
|
+
"weekly_commit_avg": round(freq.get("weekly_average", 0), 2),
|
|
297
|
+
"latest_activity": branch_data.get("latest_activity", ""),
|
|
298
|
+
"is_merged": branch_data.get("is_merged", False),
|
|
299
|
+
"is_stale": branch_data.get("is_stale", False),
|
|
300
|
+
}
|
|
301
|
+
rows.append(row)
|
|
302
|
+
|
|
303
|
+
# Sort by repository and health score
|
|
304
|
+
rows.sort(key=lambda x: (x["repository"], x["health_score"]))
|
|
305
|
+
|
|
306
|
+
# Write CSV
|
|
307
|
+
if rows:
|
|
308
|
+
df = pd.DataFrame(rows)
|
|
309
|
+
df.to_csv(output_path, index=False)
|
|
310
|
+
|
|
311
|
+
return output_path
|
|
312
|
+
|
|
313
|
+
def _get_branch_status(self, branch_data: dict[str, Any]) -> str:
|
|
314
|
+
"""Determine branch status based on metrics."""
|
|
315
|
+
if branch_data.get("is_merged", False):
|
|
316
|
+
return "merged"
|
|
317
|
+
elif branch_data.get("is_stale", False):
|
|
318
|
+
return "stale"
|
|
319
|
+
elif branch_data.get("age_days", 0) > 14:
|
|
320
|
+
return "long-lived"
|
|
321
|
+
else:
|
|
322
|
+
return "active"
|