@theihtisham/dev-pulse 1.0.0 → 1.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.editorconfig +12 -0
- package/.github/ISSUE_TEMPLATE/bug_report.yml +43 -0
- package/.github/ISSUE_TEMPLATE/feature_request.yml +33 -0
- package/.github/PULL_REQUEST_TEMPLATE.md +18 -0
- package/.github/dependabot.yml +16 -0
- package/.github/workflows/ci.yml +33 -0
- package/CODE_OF_CONDUCT.md +27 -0
- package/Dockerfile +8 -0
- package/LICENSE +21 -21
- package/README.md +135 -39
- package/SECURITY.md +22 -0
- package/devpulse/__init__.py +4 -4
- package/devpulse/api/__init__.py +1 -1
- package/devpulse/api/app.py +371 -371
- package/devpulse/cli/__init__.py +1 -1
- package/devpulse/cli/dashboard.py +131 -131
- package/devpulse/cli/main.py +678 -678
- package/devpulse/cli/render.py +175 -175
- package/devpulse/core/__init__.py +34 -34
- package/devpulse/core/analytics.py +487 -487
- package/devpulse/core/config.py +77 -77
- package/devpulse/core/database.py +612 -612
- package/devpulse/core/github_client.py +281 -281
- package/devpulse/core/models.py +142 -142
- package/devpulse/core/report_generator.py +454 -454
- package/devpulse/static/.gitkeep +1 -1
- package/devpulse/templates/report.html +64 -64
- package/package.json +35 -35
- package/pyproject.toml +80 -80
- package/requirements.txt +14 -14
- package/tests/__init__.py +1 -1
- package/tests/conftest.py +208 -208
- package/tests/test_analytics.py +284 -284
- package/tests/test_api.py +313 -313
- package/tests/test_cli.py +204 -204
- package/tests/test_config.py +47 -47
- package/tests/test_database.py +255 -255
- package/tests/test_models.py +107 -107
- package/tests/test_report_generator.py +173 -173
- package/jest.config.js +0 -7
|
@@ -1,487 +1,487 @@
|
|
|
1
|
-
"""Analytics engine — computes metrics, insights, and predictions."""
|
|
2
|
-
|
|
3
|
-
import math
|
|
4
|
-
from datetime import datetime, timedelta, date
|
|
5
|
-
from typing import Any, Optional
|
|
6
|
-
from collections import defaultdict
|
|
7
|
-
|
|
8
|
-
from devpulse.core.database import Database
|
|
9
|
-
from devpulse.core.models import (
|
|
10
|
-
DeveloperMetrics,
|
|
11
|
-
TeamHealth,
|
|
12
|
-
SprintData,
|
|
13
|
-
CodeQuality,
|
|
14
|
-
)
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
def _parse_date(dt_str: str) -> Optional[datetime]:
|
|
18
|
-
"""Parse an ISO date string safely."""
|
|
19
|
-
if not dt_str:
|
|
20
|
-
return None
|
|
21
|
-
for fmt in ("%Y-%m-%dT%H:%M:%SZ", "%Y-%m-%dT%H:%M:%S", "%Y-%m-%d"):
|
|
22
|
-
try:
|
|
23
|
-
return datetime.strptime(dt_str[:19] if "T" in dt_str else dt_str, fmt)
|
|
24
|
-
except ValueError:
|
|
25
|
-
continue
|
|
26
|
-
return None
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
def _days_between(start: str, end: str) -> float:
|
|
30
|
-
"""Return hours between two ISO date strings."""
|
|
31
|
-
s = _parse_date(start)
|
|
32
|
-
e = _parse_date(end)
|
|
33
|
-
if not s or not e:
|
|
34
|
-
return 0.0
|
|
35
|
-
return (e - s).total_seconds() / 3600.0
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
class AnalyticsEngine:
|
|
39
|
-
"""Compute developer metrics, team health, and AI-powered insights."""
|
|
40
|
-
|
|
41
|
-
def __init__(self, db: Optional[Database] = None) -> None:
|
|
42
|
-
self.db = db or Database()
|
|
43
|
-
|
|
44
|
-
# ── Developer Metrics ────────────────────────────────────────────
|
|
45
|
-
|
|
46
|
-
def developer_metrics(
|
|
47
|
-
self,
|
|
48
|
-
author: str,
|
|
49
|
-
days: int = 30,
|
|
50
|
-
repo: Optional[str] = None,
|
|
51
|
-
) -> DeveloperMetrics:
|
|
52
|
-
"""Compute comprehensive metrics for a single developer."""
|
|
53
|
-
since = (datetime.utcnow() - timedelta(days=days)).isoformat()
|
|
54
|
-
|
|
55
|
-
commits = self.db.get_commits(repo=repo, author=author, since=since)
|
|
56
|
-
prs = self.db.get_pull_requests(repo=repo, author=author, since=since)
|
|
57
|
-
issues_raw = self.db.get_issues(repo=repo, since=since)
|
|
58
|
-
reviews = self.db.get_reviews(author=author, since=since, repo=repo)
|
|
59
|
-
|
|
60
|
-
# Filter issues authored by this person
|
|
61
|
-
issues_opened = [i for i in issues_raw if i.get("author") == author]
|
|
62
|
-
issues_closed = [i for i in issues_raw if i.get("author") == author and i.get("state") == "closed"]
|
|
63
|
-
|
|
64
|
-
# Compute active days
|
|
65
|
-
commit_dates: set[str] = set()
|
|
66
|
-
for c in commits:
|
|
67
|
-
dt = _parse_date(c.get("author_date", ""))
|
|
68
|
-
if dt:
|
|
69
|
-
commit_dates.add(dt.strftime("%Y-%m-%d"))
|
|
70
|
-
|
|
71
|
-
# Compute avg PR merge time
|
|
72
|
-
merge_times: list[float] = []
|
|
73
|
-
prs_merged = 0
|
|
74
|
-
for p in prs:
|
|
75
|
-
if p.get("merged_at") and p.get("created_at"):
|
|
76
|
-
merge_times.append(_days_between(p["created_at"], p["merged_at"]))
|
|
77
|
-
prs_merged += 1
|
|
78
|
-
avg_merge_time = sum(merge_times) / len(merge_times) if merge_times else 0.0
|
|
79
|
-
|
|
80
|
-
# Compute avg review turnaround
|
|
81
|
-
review_times: list[float] = []
|
|
82
|
-
all_prs = self.db.get_pull_requests(repo=repo, since=since)
|
|
83
|
-
for p in all_prs:
|
|
84
|
-
pr_reviews = self.db.get_reviews(repo=p.get("repo", ""), since=since)
|
|
85
|
-
for r in pr_reviews:
|
|
86
|
-
if r.get("pr_number") == p.get("number") and p.get("created_at") and r.get("submitted_at"):
|
|
87
|
-
review_times.append(_days_between(p["created_at"], r["submitted_at"]))
|
|
88
|
-
avg_review = sum(review_times) / len(review_times) if review_times else 0.0
|
|
89
|
-
|
|
90
|
-
lines_added = sum(c.get("additions", 0) for c in commits)
|
|
91
|
-
lines_removed = sum(c.get("deletions", 0) for c in commits)
|
|
92
|
-
|
|
93
|
-
return DeveloperMetrics(
|
|
94
|
-
author=author,
|
|
95
|
-
commits_count=len(commits),
|
|
96
|
-
prs_created=len(prs),
|
|
97
|
-
prs_merged=prs_merged,
|
|
98
|
-
issues_opened=len(issues_opened),
|
|
99
|
-
issues_closed=len(issues_closed),
|
|
100
|
-
reviews_given=len(reviews),
|
|
101
|
-
avg_pr_merge_time_hours=round(avg_merge_time, 1),
|
|
102
|
-
avg_review_turnaround_hours=round(avg_review, 1),
|
|
103
|
-
lines_added=lines_added,
|
|
104
|
-
lines_removed=lines_removed,
|
|
105
|
-
commits_per_day=round(len(commits) / max(days, 1), 2),
|
|
106
|
-
active_days=len(commit_dates),
|
|
107
|
-
period_days=days,
|
|
108
|
-
)
|
|
109
|
-
|
|
110
|
-
def team_metrics(self, days: int = 30, repo: Optional[str] = None) -> list[DeveloperMetrics]:
|
|
111
|
-
"""Compute metrics for all contributors."""
|
|
112
|
-
since = (datetime.utcnow() - timedelta(days=days)).isoformat()
|
|
113
|
-
commits = self.db.get_commits(repo=repo, since=since)
|
|
114
|
-
authors = sorted({c["author"] for c in commits if c.get("author")})
|
|
115
|
-
return [self.developer_metrics(a, days=days, repo=repo) for a in authors]
|
|
116
|
-
|
|
117
|
-
# ── Team Health ──────────────────────────────────────────────────
|
|
118
|
-
|
|
119
|
-
def team_health(self, days: int = 30, repo: Optional[str] = None) -> TeamHealth:
|
|
120
|
-
"""Analyze team health: workload balance, burnout risk, collaboration."""
|
|
121
|
-
metrics = self.team_metrics(days=days, repo=repo)
|
|
122
|
-
if not metrics:
|
|
123
|
-
return TeamHealth(overall_score=0, recommendations=["No data available — sync GitHub first."])
|
|
124
|
-
|
|
125
|
-
# Workload balance: standard deviation of commits as ratio of mean
|
|
126
|
-
commit_counts = [m.commits_count for m in metrics]
|
|
127
|
-
mean_commits = sum(commit_counts) / len(commit_counts)
|
|
128
|
-
if mean_commits > 0:
|
|
129
|
-
variance = sum((c - mean_commits) ** 2 for c in commit_counts) / len(commit_counts)
|
|
130
|
-
std_dev = math.sqrt(variance)
|
|
131
|
-
balance = max(0.0, 1.0 - (std_dev / mean_commits))
|
|
132
|
-
else:
|
|
133
|
-
balance = 1.0
|
|
134
|
-
|
|
135
|
-
# Burnout risk per person
|
|
136
|
-
burnout_risk: dict[str, float] = {}
|
|
137
|
-
for m in metrics:
|
|
138
|
-
risk = 0.0
|
|
139
|
-
if m.period_days > 0:
|
|
140
|
-
commits_per_day = m.commits_count / m.period_days
|
|
141
|
-
if commits_per_day > 10:
|
|
142
|
-
risk += 0.3
|
|
143
|
-
if m.active_days > m.period_days * 0.9:
|
|
144
|
-
risk += 0.3
|
|
145
|
-
if m.avg_pr_merge_time_hours > 48:
|
|
146
|
-
risk += 0.2
|
|
147
|
-
if m.commits_per_day > 8:
|
|
148
|
-
risk += 0.2
|
|
149
|
-
burnout_risk[m.author] = round(min(risk, 1.0), 2)
|
|
150
|
-
|
|
151
|
-
# Collaboration score: reviews / PRs ratio
|
|
152
|
-
total_reviews = sum(m.reviews_given for m in metrics)
|
|
153
|
-
total_prs = sum(m.prs_created for m in metrics)
|
|
154
|
-
collab = min(total_reviews / max(total_prs, 1), 2.0) / 2.0 if total_prs > 0 else 0.5
|
|
155
|
-
|
|
156
|
-
# Velocity trend
|
|
157
|
-
prev_commits = self.db.get_commits(
|
|
158
|
-
repo=repo,
|
|
159
|
-
since=(datetime.utcnow() - timedelta(days=days * 2)).isoformat(),
|
|
160
|
-
until=(datetime.utcnow() - timedelta(days=days)).isoformat(),
|
|
161
|
-
)
|
|
162
|
-
curr_commits = self.db.get_commits(
|
|
163
|
-
repo=repo, since=(datetime.utcnow() - timedelta(days=days)).isoformat()
|
|
164
|
-
)
|
|
165
|
-
prev_count = len(prev_commits)
|
|
166
|
-
curr_count = len(curr_commits)
|
|
167
|
-
if prev_count == 0:
|
|
168
|
-
trend = "growing"
|
|
169
|
-
elif curr_count > prev_count * 1.1:
|
|
170
|
-
trend = "growing"
|
|
171
|
-
elif curr_count < prev_count * 0.9:
|
|
172
|
-
trend = "declining"
|
|
173
|
-
else:
|
|
174
|
-
trend = "stable"
|
|
175
|
-
|
|
176
|
-
# Overall score
|
|
177
|
-
overall = round((balance * 0.3 + collab * 0.3 + (1.0 - max(burnout_risk.values())) * 0.4) * 100, 1)
|
|
178
|
-
|
|
179
|
-
# Recommendations
|
|
180
|
-
recs: list[str] = []
|
|
181
|
-
high_burnout = [name for name, risk in burnout_risk.items() if risk >= 0.6]
|
|
182
|
-
if high_burnout:
|
|
183
|
-
recs.append(f"High burnout risk for: {', '.join(high_burnout)}. Consider redistributing workload.")
|
|
184
|
-
if balance < 0.5:
|
|
185
|
-
recs.append("Workload is heavily imbalanced. Review task assignment practices.")
|
|
186
|
-
if collab < 0.3:
|
|
187
|
-
recs.append("Low code review participation. Encourage team members to review more PRs.")
|
|
188
|
-
if trend == "declining":
|
|
189
|
-
recs.append("Commit velocity is declining. Check for blockers or team morale issues.")
|
|
190
|
-
if not recs:
|
|
191
|
-
recs.append("Team health looks good! Keep up the great work.")
|
|
192
|
-
|
|
193
|
-
return TeamHealth(
|
|
194
|
-
overall_score=overall,
|
|
195
|
-
workload_balance=round(balance, 2),
|
|
196
|
-
burnout_risk=burnout_risk,
|
|
197
|
-
collaboration_score=round(collab, 2),
|
|
198
|
-
velocity_trend=trend,
|
|
199
|
-
recommendations=recs,
|
|
200
|
-
)
|
|
201
|
-
|
|
202
|
-
# ── Sprint Analytics ─────────────────────────────────────────────
|
|
203
|
-
|
|
204
|
-
def sprint_analytics(self, sprint_name: Optional[str] = None) -> list[SprintData]:
|
|
205
|
-
"""Compute sprint velocity and burndown data."""
|
|
206
|
-
if sprint_name:
|
|
207
|
-
snapshots = self.db.get_sprint_snapshots(sprint_name)
|
|
208
|
-
if not snapshots:
|
|
209
|
-
return []
|
|
210
|
-
s = snapshots[-1]
|
|
211
|
-
total = s["total_points"]
|
|
212
|
-
completed = s["completed_points"]
|
|
213
|
-
added = s.get("added_points", 0)
|
|
214
|
-
return [
|
|
215
|
-
SprintData(
|
|
216
|
-
name=sprint_name,
|
|
217
|
-
total_points=total,
|
|
218
|
-
completed_points=completed,
|
|
219
|
-
remaining_points=total - completed,
|
|
220
|
-
added_points=added,
|
|
221
|
-
velocity=round(completed, 1),
|
|
222
|
-
scope_creep_pct=round(added / max(total - added, 1) * 100, 1),
|
|
223
|
-
)
|
|
224
|
-
]
|
|
225
|
-
|
|
226
|
-
# Return data for all sprints
|
|
227
|
-
conn = self.db._connect()
|
|
228
|
-
try:
|
|
229
|
-
sprints = conn.execute(
|
|
230
|
-
"SELECT DISTINCT sprint_name FROM sprint_snapshots ORDER BY sprint_name"
|
|
231
|
-
).fetchall()
|
|
232
|
-
finally:
|
|
233
|
-
conn.close()
|
|
234
|
-
|
|
235
|
-
results: list[SprintData] = []
|
|
236
|
-
for sp in sprints:
|
|
237
|
-
results.extend(self.sprint_analytics(sp["sprint_name"]))
|
|
238
|
-
return results
|
|
239
|
-
|
|
240
|
-
def sprint_burndown(self, sprint_name: str) -> list[dict[str, Any]]:
|
|
241
|
-
"""Get burndown data points for ASCII chart."""
|
|
242
|
-
snapshots = self.db.get_sprint_snapshots(sprint_name)
|
|
243
|
-
if not snapshots:
|
|
244
|
-
return []
|
|
245
|
-
return [
|
|
246
|
-
{
|
|
247
|
-
"day": i + 1,
|
|
248
|
-
"remaining": s["remaining_points"],
|
|
249
|
-
"ideal": snapshots[0]["remaining_points"] * (1 - (i / max(len(snapshots) - 1, 1))),
|
|
250
|
-
}
|
|
251
|
-
for i, s in enumerate(snapshots)
|
|
252
|
-
]
|
|
253
|
-
|
|
254
|
-
def predict_sprint_completion(
|
|
255
|
-
self, sprint_name: str, total_points: float, days_elapsed: int, total_days: int
|
|
256
|
-
) -> dict[str, Any]:
|
|
257
|
-
"""Predict if sprint will complete on time."""
|
|
258
|
-
snapshots = self.db.get_sprint_snapshots(sprint_name)
|
|
259
|
-
if not snapshots:
|
|
260
|
-
return {"prediction": "unknown", "confidence": 0, "message": "No data"}
|
|
261
|
-
|
|
262
|
-
completed = snapshots[-1]["completed_points"]
|
|
263
|
-
velocity = completed / max(days_elapsed, 1)
|
|
264
|
-
remaining = total_points - completed
|
|
265
|
-
days_left = total_days - days_elapsed
|
|
266
|
-
predicted_completion = velocity * days_left if velocity > 0 else 0
|
|
267
|
-
|
|
268
|
-
if predicted_completion >= remaining:
|
|
269
|
-
pct = round((predicted_completion - remaining) / max(remaining, 1) * 100, 1)
|
|
270
|
-
return {
|
|
271
|
-
"prediction": "on_track",
|
|
272
|
-
"confidence": min(pct, 100),
|
|
273
|
-
"message": f"On track — {pct}% buffer remaining. Predicted velocity: {velocity:.1f} pts/day.",
|
|
274
|
-
}
|
|
275
|
-
else:
|
|
276
|
-
deficit = round(remaining - predicted_completion, 1)
|
|
277
|
-
return {
|
|
278
|
-
"prediction": "at_risk",
|
|
279
|
-
"confidence": round((1 - predicted_completion / max(remaining, 1)) * 100, 1),
|
|
280
|
-
"message": f"At risk — {deficit} points may not be completed. Consider reducing scope.",
|
|
281
|
-
}
|
|
282
|
-
|
|
283
|
-
# ── Code Quality ─────────────────────────────────────────────────
|
|
284
|
-
|
|
285
|
-
def code_quality_trend(self, repo: Optional[str] = None, days: int = 90) -> list[CodeQuality]:
|
|
286
|
-
"""Get code quality snapshots over time."""
|
|
287
|
-
rows = self.db.get_quality_snapshots(repo=repo, days=days)
|
|
288
|
-
return [
|
|
289
|
-
CodeQuality(
|
|
290
|
-
repo=r["repo"],
|
|
291
|
-
date=r["snapshot_date"],
|
|
292
|
-
test_coverage=r.get("test_coverage", 0),
|
|
293
|
-
open_bugs=r.get("open_bugs", 0),
|
|
294
|
-
tech_debt_score=r.get("tech_debt_score", 0),
|
|
295
|
-
lines_added=r.get("lines_added", 0),
|
|
296
|
-
lines_removed=r.get("lines_removed", 0),
|
|
297
|
-
files_changed=r.get("files_changed", 0),
|
|
298
|
-
)
|
|
299
|
-
for r in rows
|
|
300
|
-
]
|
|
301
|
-
|
|
302
|
-
def compute_quality_score(self, repo: str, days: int = 30) -> dict[str, Any]:
|
|
303
|
-
"""Compute aggregate code quality score from commit/PR data."""
|
|
304
|
-
since = (datetime.utcnow() - timedelta(days=days)).isoformat()
|
|
305
|
-
commits = self.db.get_commits(repo=repo, since=since)
|
|
306
|
-
prs = self.db.get_pull_requests(repo=repo, since=since)
|
|
307
|
-
issues = self.db.get_issues(repo=repo, since=since)
|
|
308
|
-
|
|
309
|
-
total_additions = sum(c.get("additions", 0) for c in commits)
|
|
310
|
-
total_deletions = sum(c.get("deletions", 0) for c in commits)
|
|
311
|
-
bug_issues = [i for i in issues if "bug" in " ".join(
|
|
312
|
-
i.get("labels", []) if isinstance(i.get("labels"), list) else []
|
|
313
|
-
).lower()]
|
|
314
|
-
|
|
315
|
-
churn = total_additions + total_deletions
|
|
316
|
-
# Simple tech debt heuristic: bug rate + PR size
|
|
317
|
-
avg_pr_size = (total_additions + total_deletions) / max(len(prs), 1)
|
|
318
|
-
bug_rate = len(bug_issues) / max(len(commits), 1)
|
|
319
|
-
tech_debt = min(10, round(bug_rate * 50 + avg_pr_size / 100, 1))
|
|
320
|
-
|
|
321
|
-
return {
|
|
322
|
-
"repo": repo,
|
|
323
|
-
"churn": churn,
|
|
324
|
-
"bug_count": len(bug_issues),
|
|
325
|
-
"bug_rate": round(bug_rate, 3),
|
|
326
|
-
"avg_pr_size": round(avg_pr_size, 0),
|
|
327
|
-
"tech_debt_score": tech_debt,
|
|
328
|
-
"total_commits": len(commits),
|
|
329
|
-
"total_prs": len(prs),
|
|
330
|
-
}
|
|
331
|
-
|
|
332
|
-
# ── AI Insights ──────────────────────────────────────────────────
|
|
333
|
-
|
|
334
|
-
def generate_insights(self, days: int = 30, repo: Optional[str] = None) -> list[dict[str, str]]:
|
|
335
|
-
"""Generate AI-powered insights and recommendations."""
|
|
336
|
-
insights: list[dict[str, str]] = []
|
|
337
|
-
since = (datetime.utcnow() - timedelta(days=days)).isoformat()
|
|
338
|
-
|
|
339
|
-
commits = self.db.get_commits(repo=repo, since=since)
|
|
340
|
-
prs = self.db.get_pull_requests(repo=repo, since=since)
|
|
341
|
-
issues = self.db.get_issues(repo=repo, since=since)
|
|
342
|
-
|
|
343
|
-
# Insight: PR merge time
|
|
344
|
-
merge_times: list[float] = []
|
|
345
|
-
for p in prs:
|
|
346
|
-
if p.get("merged_at") and p.get("created_at"):
|
|
347
|
-
h = _days_between(p["created_at"], p["merged_at"])
|
|
348
|
-
merge_times.append(h)
|
|
349
|
-
if merge_times:
|
|
350
|
-
avg_mt = sum(merge_times) / len(merge_times)
|
|
351
|
-
if avg_mt > 48:
|
|
352
|
-
insights.append({
|
|
353
|
-
"type": "bottleneck",
|
|
354
|
-
"severity": "high",
|
|
355
|
-
"message": f"Average PR merge time is {avg_mt:.0f}h (>48h). Reviews are a bottleneck.",
|
|
356
|
-
"recommendation": "Consider assigning dedicated reviewers or setting SLA for reviews.",
|
|
357
|
-
})
|
|
358
|
-
elif avg_mt > 24:
|
|
359
|
-
insights.append({
|
|
360
|
-
"type": "warning",
|
|
361
|
-
"severity": "medium",
|
|
362
|
-
"message": f"Average PR merge time is {avg_mt:.0f}h. Room for improvement.",
|
|
363
|
-
"recommendation": "Set up PR size guidelines and automated review reminders.",
|
|
364
|
-
})
|
|
365
|
-
else:
|
|
366
|
-
insights.append({
|
|
367
|
-
"type": "positive",
|
|
368
|
-
"severity": "low",
|
|
369
|
-
"message": f"PR merge time is healthy at {avg_mt:.0f}h.",
|
|
370
|
-
"recommendation": "Maintain current review practices.",
|
|
371
|
-
})
|
|
372
|
-
|
|
373
|
-
# Insight: Issue velocity
|
|
374
|
-
closed_issues = [i for i in issues if i.get("state") == "closed"]
|
|
375
|
-
open_issues = [i for i in issues if i.get("state") == "open"]
|
|
376
|
-
if open_issues and closed_issues:
|
|
377
|
-
close_rate = len(closed_issues) / max(days, 1)
|
|
378
|
-
days_to_clear = len(open_issues) / max(close_rate, 0.01)
|
|
379
|
-
if days_to_clear > 60:
|
|
380
|
-
insights.append({
|
|
381
|
-
"type": "bottleneck",
|
|
382
|
-
"severity": "high",
|
|
383
|
-
"message": f"Open backlog would take {days_to_clear:.0f} days to clear at current rate.",
|
|
384
|
-
"recommendation": "Prioritize issue triage. Consider closing stale issues.",
|
|
385
|
-
})
|
|
386
|
-
|
|
387
|
-
# Insight: Commit patterns
|
|
388
|
-
author_commits: dict[str, int] = defaultdict(int)
|
|
389
|
-
for c in commits:
|
|
390
|
-
author_commits[c.get("author", "unknown")] += 1
|
|
391
|
-
if author_commits:
|
|
392
|
-
max_author = max(author_commits, key=author_commits.get) # type: ignore[arg-type]
|
|
393
|
-
max_count = author_commits[max_author]
|
|
394
|
-
total = sum(author_commits.values())
|
|
395
|
-
if max_count > total * 0.5 and len(author_commits) > 1:
|
|
396
|
-
insights.append({
|
|
397
|
-
"type": "warning",
|
|
398
|
-
"severity": "medium",
|
|
399
|
-
"message": f"{max_author} authored {max_count}/{total} commits ({max_count*100//total}%). Bus factor risk.",
|
|
400
|
-
"recommendation": "Encourage knowledge sharing and pair programming.",
|
|
401
|
-
})
|
|
402
|
-
|
|
403
|
-
# Insight: Large PRs
|
|
404
|
-
large_prs = [p for p in prs if p.get("additions", 0) + p.get("deletions", 0) > 500]
|
|
405
|
-
if large_prs:
|
|
406
|
-
insights.append({
|
|
407
|
-
"type": "warning",
|
|
408
|
-
"severity": "medium",
|
|
409
|
-
"message": f"{len(large_prs)} PRs with 500+ lines changed. These are harder to review.",
|
|
410
|
-
"recommendation": "Break large changes into smaller, focused PRs for faster reviews.",
|
|
411
|
-
})
|
|
412
|
-
|
|
413
|
-
# Insight: Weekend work
|
|
414
|
-
weekend_commits = 0
|
|
415
|
-
for c in commits:
|
|
416
|
-
dt = _parse_date(c.get("author_date", ""))
|
|
417
|
-
if dt and dt.weekday() >= 5:
|
|
418
|
-
weekend_commits += 1
|
|
419
|
-
if weekend_commits > len(commits) * 0.15 and weekend_commits > 3:
|
|
420
|
-
insights.append({
|
|
421
|
-
"type": "warning",
|
|
422
|
-
"severity": "medium",
|
|
423
|
-
"message": f"{weekend_commits} commits ({weekend_commits*100//max(len(commits),1)}%) on weekends.",
|
|
424
|
-
"recommendation": "Check for sustainable pace. Persistent weekend work signals potential burnout.",
|
|
425
|
-
})
|
|
426
|
-
|
|
427
|
-
if not insights:
|
|
428
|
-
insights.append({
|
|
429
|
-
"type": "positive",
|
|
430
|
-
"severity": "low",
|
|
431
|
-
"message": "No significant issues detected. Team is performing well.",
|
|
432
|
-
"recommendation": "Keep monitoring metrics and maintain current practices.",
|
|
433
|
-
})
|
|
434
|
-
|
|
435
|
-
return insights
|
|
436
|
-
|
|
437
|
-
# ── Activity Heatmap ─────────────────────────────────────────────
|
|
438
|
-
|
|
439
|
-
def activity_heatmap(self, author: Optional[str] = None, days: int = 365) -> list[dict[str, Any]]:
|
|
440
|
-
"""Get daily commit counts for contribution heatmap."""
|
|
441
|
-
return self.db.get_commit_count_by_day(author=author, days=days)
|
|
442
|
-
|
|
443
|
-
# ── Goal Progress ────────────────────────────────────────────────
|
|
444
|
-
|
|
445
|
-
def goal_coaching(self, goal_id: Optional[int] = None) -> list[dict[str, Any]]:
|
|
446
|
-
"""Generate AI coaching for goals."""
|
|
447
|
-
if goal_id:
|
|
448
|
-
goals = [g for g in self.db.get_goals() if g["id"] == goal_id]
|
|
449
|
-
else:
|
|
450
|
-
goals = self.db.get_goals(status="active")
|
|
451
|
-
|
|
452
|
-
coaching: list[dict[str, Any]] = []
|
|
453
|
-
for g in goals:
|
|
454
|
-
target = g["target_value"]
|
|
455
|
-
current = g["current_value"]
|
|
456
|
-
pct = (current / target * 100) if target > 0 else 0
|
|
457
|
-
remaining = target - current
|
|
458
|
-
|
|
459
|
-
advice = ""
|
|
460
|
-
if pct >= 100:
|
|
461
|
-
advice = "Goal achieved! Consider setting a new stretch target."
|
|
462
|
-
status = "achieved"
|
|
463
|
-
elif pct >= 75:
|
|
464
|
-
advice = "Almost there! Push through the final stretch."
|
|
465
|
-
status = "on_track"
|
|
466
|
-
elif pct >= 50:
|
|
467
|
-
advice = "Good progress. Maintain momentum and check for blockers."
|
|
468
|
-
status = "on_track"
|
|
469
|
-
elif pct >= 25:
|
|
470
|
-
advice = "Behind pace. Identify what's blocking progress and address it."
|
|
471
|
-
status = "at_risk"
|
|
472
|
-
else:
|
|
473
|
-
advice = "Significantly behind. Consider adjusting the target or removing obstacles."
|
|
474
|
-
status = "at_risk"
|
|
475
|
-
|
|
476
|
-
coaching.append({
|
|
477
|
-
"goal": g["title"],
|
|
478
|
-
"metric": g["metric"],
|
|
479
|
-
"progress_pct": round(pct, 1),
|
|
480
|
-
"current": current,
|
|
481
|
-
"target": target,
|
|
482
|
-
"remaining": round(remaining, 1),
|
|
483
|
-
"status": status,
|
|
484
|
-
"advice": advice,
|
|
485
|
-
})
|
|
486
|
-
|
|
487
|
-
return coaching
|
|
1
|
+
"""Analytics engine — computes metrics, insights, and predictions."""
|
|
2
|
+
|
|
3
|
+
import math
|
|
4
|
+
from datetime import datetime, timedelta, date
|
|
5
|
+
from typing import Any, Optional
|
|
6
|
+
from collections import defaultdict
|
|
7
|
+
|
|
8
|
+
from devpulse.core.database import Database
|
|
9
|
+
from devpulse.core.models import (
|
|
10
|
+
DeveloperMetrics,
|
|
11
|
+
TeamHealth,
|
|
12
|
+
SprintData,
|
|
13
|
+
CodeQuality,
|
|
14
|
+
)
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
def _parse_date(dt_str: str) -> Optional[datetime]:
|
|
18
|
+
"""Parse an ISO date string safely."""
|
|
19
|
+
if not dt_str:
|
|
20
|
+
return None
|
|
21
|
+
for fmt in ("%Y-%m-%dT%H:%M:%SZ", "%Y-%m-%dT%H:%M:%S", "%Y-%m-%d"):
|
|
22
|
+
try:
|
|
23
|
+
return datetime.strptime(dt_str[:19] if "T" in dt_str else dt_str, fmt)
|
|
24
|
+
except ValueError:
|
|
25
|
+
continue
|
|
26
|
+
return None
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
def _days_between(start: str, end: str) -> float:
|
|
30
|
+
"""Return hours between two ISO date strings."""
|
|
31
|
+
s = _parse_date(start)
|
|
32
|
+
e = _parse_date(end)
|
|
33
|
+
if not s or not e:
|
|
34
|
+
return 0.0
|
|
35
|
+
return (e - s).total_seconds() / 3600.0
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
class AnalyticsEngine:
|
|
39
|
+
"""Compute developer metrics, team health, and AI-powered insights."""
|
|
40
|
+
|
|
41
|
+
def __init__(self, db: Optional[Database] = None) -> None:
|
|
42
|
+
self.db = db or Database()
|
|
43
|
+
|
|
44
|
+
# ── Developer Metrics ────────────────────────────────────────────
|
|
45
|
+
|
|
46
|
+
def developer_metrics(
|
|
47
|
+
self,
|
|
48
|
+
author: str,
|
|
49
|
+
days: int = 30,
|
|
50
|
+
repo: Optional[str] = None,
|
|
51
|
+
) -> DeveloperMetrics:
|
|
52
|
+
"""Compute comprehensive metrics for a single developer."""
|
|
53
|
+
since = (datetime.utcnow() - timedelta(days=days)).isoformat()
|
|
54
|
+
|
|
55
|
+
commits = self.db.get_commits(repo=repo, author=author, since=since)
|
|
56
|
+
prs = self.db.get_pull_requests(repo=repo, author=author, since=since)
|
|
57
|
+
issues_raw = self.db.get_issues(repo=repo, since=since)
|
|
58
|
+
reviews = self.db.get_reviews(author=author, since=since, repo=repo)
|
|
59
|
+
|
|
60
|
+
# Filter issues authored by this person
|
|
61
|
+
issues_opened = [i for i in issues_raw if i.get("author") == author]
|
|
62
|
+
issues_closed = [i for i in issues_raw if i.get("author") == author and i.get("state") == "closed"]
|
|
63
|
+
|
|
64
|
+
# Compute active days
|
|
65
|
+
commit_dates: set[str] = set()
|
|
66
|
+
for c in commits:
|
|
67
|
+
dt = _parse_date(c.get("author_date", ""))
|
|
68
|
+
if dt:
|
|
69
|
+
commit_dates.add(dt.strftime("%Y-%m-%d"))
|
|
70
|
+
|
|
71
|
+
# Compute avg PR merge time
|
|
72
|
+
merge_times: list[float] = []
|
|
73
|
+
prs_merged = 0
|
|
74
|
+
for p in prs:
|
|
75
|
+
if p.get("merged_at") and p.get("created_at"):
|
|
76
|
+
merge_times.append(_days_between(p["created_at"], p["merged_at"]))
|
|
77
|
+
prs_merged += 1
|
|
78
|
+
avg_merge_time = sum(merge_times) / len(merge_times) if merge_times else 0.0
|
|
79
|
+
|
|
80
|
+
# Compute avg review turnaround
|
|
81
|
+
review_times: list[float] = []
|
|
82
|
+
all_prs = self.db.get_pull_requests(repo=repo, since=since)
|
|
83
|
+
for p in all_prs:
|
|
84
|
+
pr_reviews = self.db.get_reviews(repo=p.get("repo", ""), since=since)
|
|
85
|
+
for r in pr_reviews:
|
|
86
|
+
if r.get("pr_number") == p.get("number") and p.get("created_at") and r.get("submitted_at"):
|
|
87
|
+
review_times.append(_days_between(p["created_at"], r["submitted_at"]))
|
|
88
|
+
avg_review = sum(review_times) / len(review_times) if review_times else 0.0
|
|
89
|
+
|
|
90
|
+
lines_added = sum(c.get("additions", 0) for c in commits)
|
|
91
|
+
lines_removed = sum(c.get("deletions", 0) for c in commits)
|
|
92
|
+
|
|
93
|
+
return DeveloperMetrics(
|
|
94
|
+
author=author,
|
|
95
|
+
commits_count=len(commits),
|
|
96
|
+
prs_created=len(prs),
|
|
97
|
+
prs_merged=prs_merged,
|
|
98
|
+
issues_opened=len(issues_opened),
|
|
99
|
+
issues_closed=len(issues_closed),
|
|
100
|
+
reviews_given=len(reviews),
|
|
101
|
+
avg_pr_merge_time_hours=round(avg_merge_time, 1),
|
|
102
|
+
avg_review_turnaround_hours=round(avg_review, 1),
|
|
103
|
+
lines_added=lines_added,
|
|
104
|
+
lines_removed=lines_removed,
|
|
105
|
+
commits_per_day=round(len(commits) / max(days, 1), 2),
|
|
106
|
+
active_days=len(commit_dates),
|
|
107
|
+
period_days=days,
|
|
108
|
+
)
|
|
109
|
+
|
|
110
|
+
def team_metrics(self, days: int = 30, repo: Optional[str] = None) -> list[DeveloperMetrics]:
|
|
111
|
+
"""Compute metrics for all contributors."""
|
|
112
|
+
since = (datetime.utcnow() - timedelta(days=days)).isoformat()
|
|
113
|
+
commits = self.db.get_commits(repo=repo, since=since)
|
|
114
|
+
authors = sorted({c["author"] for c in commits if c.get("author")})
|
|
115
|
+
return [self.developer_metrics(a, days=days, repo=repo) for a in authors]
|
|
116
|
+
|
|
117
|
+
# ── Team Health ──────────────────────────────────────────────────
|
|
118
|
+
|
|
119
|
+
def team_health(self, days: int = 30, repo: Optional[str] = None) -> TeamHealth:
|
|
120
|
+
"""Analyze team health: workload balance, burnout risk, collaboration."""
|
|
121
|
+
metrics = self.team_metrics(days=days, repo=repo)
|
|
122
|
+
if not metrics:
|
|
123
|
+
return TeamHealth(overall_score=0, recommendations=["No data available — sync GitHub first."])
|
|
124
|
+
|
|
125
|
+
# Workload balance: standard deviation of commits as ratio of mean
|
|
126
|
+
commit_counts = [m.commits_count for m in metrics]
|
|
127
|
+
mean_commits = sum(commit_counts) / len(commit_counts)
|
|
128
|
+
if mean_commits > 0:
|
|
129
|
+
variance = sum((c - mean_commits) ** 2 for c in commit_counts) / len(commit_counts)
|
|
130
|
+
std_dev = math.sqrt(variance)
|
|
131
|
+
balance = max(0.0, 1.0 - (std_dev / mean_commits))
|
|
132
|
+
else:
|
|
133
|
+
balance = 1.0
|
|
134
|
+
|
|
135
|
+
# Burnout risk per person
|
|
136
|
+
burnout_risk: dict[str, float] = {}
|
|
137
|
+
for m in metrics:
|
|
138
|
+
risk = 0.0
|
|
139
|
+
if m.period_days > 0:
|
|
140
|
+
commits_per_day = m.commits_count / m.period_days
|
|
141
|
+
if commits_per_day > 10:
|
|
142
|
+
risk += 0.3
|
|
143
|
+
if m.active_days > m.period_days * 0.9:
|
|
144
|
+
risk += 0.3
|
|
145
|
+
if m.avg_pr_merge_time_hours > 48:
|
|
146
|
+
risk += 0.2
|
|
147
|
+
if m.commits_per_day > 8:
|
|
148
|
+
risk += 0.2
|
|
149
|
+
burnout_risk[m.author] = round(min(risk, 1.0), 2)
|
|
150
|
+
|
|
151
|
+
# Collaboration score: reviews / PRs ratio
|
|
152
|
+
total_reviews = sum(m.reviews_given for m in metrics)
|
|
153
|
+
total_prs = sum(m.prs_created for m in metrics)
|
|
154
|
+
collab = min(total_reviews / max(total_prs, 1), 2.0) / 2.0 if total_prs > 0 else 0.5
|
|
155
|
+
|
|
156
|
+
# Velocity trend
|
|
157
|
+
prev_commits = self.db.get_commits(
|
|
158
|
+
repo=repo,
|
|
159
|
+
since=(datetime.utcnow() - timedelta(days=days * 2)).isoformat(),
|
|
160
|
+
until=(datetime.utcnow() - timedelta(days=days)).isoformat(),
|
|
161
|
+
)
|
|
162
|
+
curr_commits = self.db.get_commits(
|
|
163
|
+
repo=repo, since=(datetime.utcnow() - timedelta(days=days)).isoformat()
|
|
164
|
+
)
|
|
165
|
+
prev_count = len(prev_commits)
|
|
166
|
+
curr_count = len(curr_commits)
|
|
167
|
+
if prev_count == 0:
|
|
168
|
+
trend = "growing"
|
|
169
|
+
elif curr_count > prev_count * 1.1:
|
|
170
|
+
trend = "growing"
|
|
171
|
+
elif curr_count < prev_count * 0.9:
|
|
172
|
+
trend = "declining"
|
|
173
|
+
else:
|
|
174
|
+
trend = "stable"
|
|
175
|
+
|
|
176
|
+
# Overall score
|
|
177
|
+
overall = round((balance * 0.3 + collab * 0.3 + (1.0 - max(burnout_risk.values())) * 0.4) * 100, 1)
|
|
178
|
+
|
|
179
|
+
# Recommendations
|
|
180
|
+
recs: list[str] = []
|
|
181
|
+
high_burnout = [name for name, risk in burnout_risk.items() if risk >= 0.6]
|
|
182
|
+
if high_burnout:
|
|
183
|
+
recs.append(f"High burnout risk for: {', '.join(high_burnout)}. Consider redistributing workload.")
|
|
184
|
+
if balance < 0.5:
|
|
185
|
+
recs.append("Workload is heavily imbalanced. Review task assignment practices.")
|
|
186
|
+
if collab < 0.3:
|
|
187
|
+
recs.append("Low code review participation. Encourage team members to review more PRs.")
|
|
188
|
+
if trend == "declining":
|
|
189
|
+
recs.append("Commit velocity is declining. Check for blockers or team morale issues.")
|
|
190
|
+
if not recs:
|
|
191
|
+
recs.append("Team health looks good! Keep up the great work.")
|
|
192
|
+
|
|
193
|
+
return TeamHealth(
|
|
194
|
+
overall_score=overall,
|
|
195
|
+
workload_balance=round(balance, 2),
|
|
196
|
+
burnout_risk=burnout_risk,
|
|
197
|
+
collaboration_score=round(collab, 2),
|
|
198
|
+
velocity_trend=trend,
|
|
199
|
+
recommendations=recs,
|
|
200
|
+
)
|
|
201
|
+
|
|
202
|
+
# ── Sprint Analytics ─────────────────────────────────────────────
|
|
203
|
+
|
|
204
|
+
def sprint_analytics(self, sprint_name: Optional[str] = None) -> list[SprintData]:
|
|
205
|
+
"""Compute sprint velocity and burndown data."""
|
|
206
|
+
if sprint_name:
|
|
207
|
+
snapshots = self.db.get_sprint_snapshots(sprint_name)
|
|
208
|
+
if not snapshots:
|
|
209
|
+
return []
|
|
210
|
+
s = snapshots[-1]
|
|
211
|
+
total = s["total_points"]
|
|
212
|
+
completed = s["completed_points"]
|
|
213
|
+
added = s.get("added_points", 0)
|
|
214
|
+
return [
|
|
215
|
+
SprintData(
|
|
216
|
+
name=sprint_name,
|
|
217
|
+
total_points=total,
|
|
218
|
+
completed_points=completed,
|
|
219
|
+
remaining_points=total - completed,
|
|
220
|
+
added_points=added,
|
|
221
|
+
velocity=round(completed, 1),
|
|
222
|
+
scope_creep_pct=round(added / max(total - added, 1) * 100, 1),
|
|
223
|
+
)
|
|
224
|
+
]
|
|
225
|
+
|
|
226
|
+
# Return data for all sprints
|
|
227
|
+
conn = self.db._connect()
|
|
228
|
+
try:
|
|
229
|
+
sprints = conn.execute(
|
|
230
|
+
"SELECT DISTINCT sprint_name FROM sprint_snapshots ORDER BY sprint_name"
|
|
231
|
+
).fetchall()
|
|
232
|
+
finally:
|
|
233
|
+
conn.close()
|
|
234
|
+
|
|
235
|
+
results: list[SprintData] = []
|
|
236
|
+
for sp in sprints:
|
|
237
|
+
results.extend(self.sprint_analytics(sp["sprint_name"]))
|
|
238
|
+
return results
|
|
239
|
+
|
|
240
|
+
def sprint_burndown(self, sprint_name: str) -> list[dict[str, Any]]:
|
|
241
|
+
"""Get burndown data points for ASCII chart."""
|
|
242
|
+
snapshots = self.db.get_sprint_snapshots(sprint_name)
|
|
243
|
+
if not snapshots:
|
|
244
|
+
return []
|
|
245
|
+
return [
|
|
246
|
+
{
|
|
247
|
+
"day": i + 1,
|
|
248
|
+
"remaining": s["remaining_points"],
|
|
249
|
+
"ideal": snapshots[0]["remaining_points"] * (1 - (i / max(len(snapshots) - 1, 1))),
|
|
250
|
+
}
|
|
251
|
+
for i, s in enumerate(snapshots)
|
|
252
|
+
]
|
|
253
|
+
|
|
254
|
+
def predict_sprint_completion(
|
|
255
|
+
self, sprint_name: str, total_points: float, days_elapsed: int, total_days: int
|
|
256
|
+
) -> dict[str, Any]:
|
|
257
|
+
"""Predict if sprint will complete on time."""
|
|
258
|
+
snapshots = self.db.get_sprint_snapshots(sprint_name)
|
|
259
|
+
if not snapshots:
|
|
260
|
+
return {"prediction": "unknown", "confidence": 0, "message": "No data"}
|
|
261
|
+
|
|
262
|
+
completed = snapshots[-1]["completed_points"]
|
|
263
|
+
velocity = completed / max(days_elapsed, 1)
|
|
264
|
+
remaining = total_points - completed
|
|
265
|
+
days_left = total_days - days_elapsed
|
|
266
|
+
predicted_completion = velocity * days_left if velocity > 0 else 0
|
|
267
|
+
|
|
268
|
+
if predicted_completion >= remaining:
|
|
269
|
+
pct = round((predicted_completion - remaining) / max(remaining, 1) * 100, 1)
|
|
270
|
+
return {
|
|
271
|
+
"prediction": "on_track",
|
|
272
|
+
"confidence": min(pct, 100),
|
|
273
|
+
"message": f"On track — {pct}% buffer remaining. Predicted velocity: {velocity:.1f} pts/day.",
|
|
274
|
+
}
|
|
275
|
+
else:
|
|
276
|
+
deficit = round(remaining - predicted_completion, 1)
|
|
277
|
+
return {
|
|
278
|
+
"prediction": "at_risk",
|
|
279
|
+
"confidence": round((1 - predicted_completion / max(remaining, 1)) * 100, 1),
|
|
280
|
+
"message": f"At risk — {deficit} points may not be completed. Consider reducing scope.",
|
|
281
|
+
}
|
|
282
|
+
|
|
283
|
+
# ── Code Quality ─────────────────────────────────────────────────
|
|
284
|
+
|
|
285
|
+
def code_quality_trend(self, repo: Optional[str] = None, days: int = 90) -> list[CodeQuality]:
|
|
286
|
+
"""Get code quality snapshots over time."""
|
|
287
|
+
rows = self.db.get_quality_snapshots(repo=repo, days=days)
|
|
288
|
+
return [
|
|
289
|
+
CodeQuality(
|
|
290
|
+
repo=r["repo"],
|
|
291
|
+
date=r["snapshot_date"],
|
|
292
|
+
test_coverage=r.get("test_coverage", 0),
|
|
293
|
+
open_bugs=r.get("open_bugs", 0),
|
|
294
|
+
tech_debt_score=r.get("tech_debt_score", 0),
|
|
295
|
+
lines_added=r.get("lines_added", 0),
|
|
296
|
+
lines_removed=r.get("lines_removed", 0),
|
|
297
|
+
files_changed=r.get("files_changed", 0),
|
|
298
|
+
)
|
|
299
|
+
for r in rows
|
|
300
|
+
]
|
|
301
|
+
|
|
302
|
+
def compute_quality_score(self, repo: str, days: int = 30) -> dict[str, Any]:
|
|
303
|
+
"""Compute aggregate code quality score from commit/PR data."""
|
|
304
|
+
since = (datetime.utcnow() - timedelta(days=days)).isoformat()
|
|
305
|
+
commits = self.db.get_commits(repo=repo, since=since)
|
|
306
|
+
prs = self.db.get_pull_requests(repo=repo, since=since)
|
|
307
|
+
issues = self.db.get_issues(repo=repo, since=since)
|
|
308
|
+
|
|
309
|
+
total_additions = sum(c.get("additions", 0) for c in commits)
|
|
310
|
+
total_deletions = sum(c.get("deletions", 0) for c in commits)
|
|
311
|
+
bug_issues = [i for i in issues if "bug" in " ".join(
|
|
312
|
+
i.get("labels", []) if isinstance(i.get("labels"), list) else []
|
|
313
|
+
).lower()]
|
|
314
|
+
|
|
315
|
+
churn = total_additions + total_deletions
|
|
316
|
+
# Simple tech debt heuristic: bug rate + PR size
|
|
317
|
+
avg_pr_size = (total_additions + total_deletions) / max(len(prs), 1)
|
|
318
|
+
bug_rate = len(bug_issues) / max(len(commits), 1)
|
|
319
|
+
tech_debt = min(10, round(bug_rate * 50 + avg_pr_size / 100, 1))
|
|
320
|
+
|
|
321
|
+
return {
|
|
322
|
+
"repo": repo,
|
|
323
|
+
"churn": churn,
|
|
324
|
+
"bug_count": len(bug_issues),
|
|
325
|
+
"bug_rate": round(bug_rate, 3),
|
|
326
|
+
"avg_pr_size": round(avg_pr_size, 0),
|
|
327
|
+
"tech_debt_score": tech_debt,
|
|
328
|
+
"total_commits": len(commits),
|
|
329
|
+
"total_prs": len(prs),
|
|
330
|
+
}
|
|
331
|
+
|
|
332
|
+
# ── AI Insights ──────────────────────────────────────────────────
|
|
333
|
+
|
|
334
|
+
def generate_insights(self, days: int = 30, repo: Optional[str] = None) -> list[dict[str, str]]:
|
|
335
|
+
"""Generate AI-powered insights and recommendations."""
|
|
336
|
+
insights: list[dict[str, str]] = []
|
|
337
|
+
since = (datetime.utcnow() - timedelta(days=days)).isoformat()
|
|
338
|
+
|
|
339
|
+
commits = self.db.get_commits(repo=repo, since=since)
|
|
340
|
+
prs = self.db.get_pull_requests(repo=repo, since=since)
|
|
341
|
+
issues = self.db.get_issues(repo=repo, since=since)
|
|
342
|
+
|
|
343
|
+
# Insight: PR merge time
|
|
344
|
+
merge_times: list[float] = []
|
|
345
|
+
for p in prs:
|
|
346
|
+
if p.get("merged_at") and p.get("created_at"):
|
|
347
|
+
h = _days_between(p["created_at"], p["merged_at"])
|
|
348
|
+
merge_times.append(h)
|
|
349
|
+
if merge_times:
|
|
350
|
+
avg_mt = sum(merge_times) / len(merge_times)
|
|
351
|
+
if avg_mt > 48:
|
|
352
|
+
insights.append({
|
|
353
|
+
"type": "bottleneck",
|
|
354
|
+
"severity": "high",
|
|
355
|
+
"message": f"Average PR merge time is {avg_mt:.0f}h (>48h). Reviews are a bottleneck.",
|
|
356
|
+
"recommendation": "Consider assigning dedicated reviewers or setting SLA for reviews.",
|
|
357
|
+
})
|
|
358
|
+
elif avg_mt > 24:
|
|
359
|
+
insights.append({
|
|
360
|
+
"type": "warning",
|
|
361
|
+
"severity": "medium",
|
|
362
|
+
"message": f"Average PR merge time is {avg_mt:.0f}h. Room for improvement.",
|
|
363
|
+
"recommendation": "Set up PR size guidelines and automated review reminders.",
|
|
364
|
+
})
|
|
365
|
+
else:
|
|
366
|
+
insights.append({
|
|
367
|
+
"type": "positive",
|
|
368
|
+
"severity": "low",
|
|
369
|
+
"message": f"PR merge time is healthy at {avg_mt:.0f}h.",
|
|
370
|
+
"recommendation": "Maintain current review practices.",
|
|
371
|
+
})
|
|
372
|
+
|
|
373
|
+
# Insight: Issue velocity
|
|
374
|
+
closed_issues = [i for i in issues if i.get("state") == "closed"]
|
|
375
|
+
open_issues = [i for i in issues if i.get("state") == "open"]
|
|
376
|
+
if open_issues and closed_issues:
|
|
377
|
+
close_rate = len(closed_issues) / max(days, 1)
|
|
378
|
+
days_to_clear = len(open_issues) / max(close_rate, 0.01)
|
|
379
|
+
if days_to_clear > 60:
|
|
380
|
+
insights.append({
|
|
381
|
+
"type": "bottleneck",
|
|
382
|
+
"severity": "high",
|
|
383
|
+
"message": f"Open backlog would take {days_to_clear:.0f} days to clear at current rate.",
|
|
384
|
+
"recommendation": "Prioritize issue triage. Consider closing stale issues.",
|
|
385
|
+
})
|
|
386
|
+
|
|
387
|
+
# Insight: Commit patterns
|
|
388
|
+
author_commits: dict[str, int] = defaultdict(int)
|
|
389
|
+
for c in commits:
|
|
390
|
+
author_commits[c.get("author", "unknown")] += 1
|
|
391
|
+
if author_commits:
|
|
392
|
+
max_author = max(author_commits, key=author_commits.get) # type: ignore[arg-type]
|
|
393
|
+
max_count = author_commits[max_author]
|
|
394
|
+
total = sum(author_commits.values())
|
|
395
|
+
if max_count > total * 0.5 and len(author_commits) > 1:
|
|
396
|
+
insights.append({
|
|
397
|
+
"type": "warning",
|
|
398
|
+
"severity": "medium",
|
|
399
|
+
"message": f"{max_author} authored {max_count}/{total} commits ({max_count*100//total}%). Bus factor risk.",
|
|
400
|
+
"recommendation": "Encourage knowledge sharing and pair programming.",
|
|
401
|
+
})
|
|
402
|
+
|
|
403
|
+
# Insight: Large PRs
|
|
404
|
+
large_prs = [p for p in prs if p.get("additions", 0) + p.get("deletions", 0) > 500]
|
|
405
|
+
if large_prs:
|
|
406
|
+
insights.append({
|
|
407
|
+
"type": "warning",
|
|
408
|
+
"severity": "medium",
|
|
409
|
+
"message": f"{len(large_prs)} PRs with 500+ lines changed. These are harder to review.",
|
|
410
|
+
"recommendation": "Break large changes into smaller, focused PRs for faster reviews.",
|
|
411
|
+
})
|
|
412
|
+
|
|
413
|
+
# Insight: Weekend work
|
|
414
|
+
weekend_commits = 0
|
|
415
|
+
for c in commits:
|
|
416
|
+
dt = _parse_date(c.get("author_date", ""))
|
|
417
|
+
if dt and dt.weekday() >= 5:
|
|
418
|
+
weekend_commits += 1
|
|
419
|
+
if weekend_commits > len(commits) * 0.15 and weekend_commits > 3:
|
|
420
|
+
insights.append({
|
|
421
|
+
"type": "warning",
|
|
422
|
+
"severity": "medium",
|
|
423
|
+
"message": f"{weekend_commits} commits ({weekend_commits*100//max(len(commits),1)}%) on weekends.",
|
|
424
|
+
"recommendation": "Check for sustainable pace. Persistent weekend work signals potential burnout.",
|
|
425
|
+
})
|
|
426
|
+
|
|
427
|
+
if not insights:
|
|
428
|
+
insights.append({
|
|
429
|
+
"type": "positive",
|
|
430
|
+
"severity": "low",
|
|
431
|
+
"message": "No significant issues detected. Team is performing well.",
|
|
432
|
+
"recommendation": "Keep monitoring metrics and maintain current practices.",
|
|
433
|
+
})
|
|
434
|
+
|
|
435
|
+
return insights
|
|
436
|
+
|
|
437
|
+
# ── Activity Heatmap ─────────────────────────────────────────────
|
|
438
|
+
|
|
439
|
+
def activity_heatmap(self, author: Optional[str] = None, days: int = 365) -> list[dict[str, Any]]:
|
|
440
|
+
"""Get daily commit counts for contribution heatmap."""
|
|
441
|
+
return self.db.get_commit_count_by_day(author=author, days=days)
|
|
442
|
+
|
|
443
|
+
# ── Goal Progress ────────────────────────────────────────────────
|
|
444
|
+
|
|
445
|
+
def goal_coaching(self, goal_id: Optional[int] = None) -> list[dict[str, Any]]:
|
|
446
|
+
"""Generate AI coaching for goals."""
|
|
447
|
+
if goal_id:
|
|
448
|
+
goals = [g for g in self.db.get_goals() if g["id"] == goal_id]
|
|
449
|
+
else:
|
|
450
|
+
goals = self.db.get_goals(status="active")
|
|
451
|
+
|
|
452
|
+
coaching: list[dict[str, Any]] = []
|
|
453
|
+
for g in goals:
|
|
454
|
+
target = g["target_value"]
|
|
455
|
+
current = g["current_value"]
|
|
456
|
+
pct = (current / target * 100) if target > 0 else 0
|
|
457
|
+
remaining = target - current
|
|
458
|
+
|
|
459
|
+
advice = ""
|
|
460
|
+
if pct >= 100:
|
|
461
|
+
advice = "Goal achieved! Consider setting a new stretch target."
|
|
462
|
+
status = "achieved"
|
|
463
|
+
elif pct >= 75:
|
|
464
|
+
advice = "Almost there! Push through the final stretch."
|
|
465
|
+
status = "on_track"
|
|
466
|
+
elif pct >= 50:
|
|
467
|
+
advice = "Good progress. Maintain momentum and check for blockers."
|
|
468
|
+
status = "on_track"
|
|
469
|
+
elif pct >= 25:
|
|
470
|
+
advice = "Behind pace. Identify what's blocking progress and address it."
|
|
471
|
+
status = "at_risk"
|
|
472
|
+
else:
|
|
473
|
+
advice = "Significantly behind. Consider adjusting the target or removing obstacles."
|
|
474
|
+
status = "at_risk"
|
|
475
|
+
|
|
476
|
+
coaching.append({
|
|
477
|
+
"goal": g["title"],
|
|
478
|
+
"metric": g["metric"],
|
|
479
|
+
"progress_pct": round(pct, 1),
|
|
480
|
+
"current": current,
|
|
481
|
+
"target": target,
|
|
482
|
+
"remaining": round(remaining, 1),
|
|
483
|
+
"status": status,
|
|
484
|
+
"advice": advice,
|
|
485
|
+
})
|
|
486
|
+
|
|
487
|
+
return coaching
|