@jeganwrites/claudash 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CONTRIBUTING.md +35 -0
- package/LICENSE +21 -0
- package/README.md +261 -0
- package/analyzer.py +890 -0
- package/bin/claudash.js +121 -0
- package/claude_ai_tracker.py +358 -0
- package/cli.py +1034 -0
- package/config.py +100 -0
- package/db.py +1156 -0
- package/fix_tracker.py +539 -0
- package/insights.py +359 -0
- package/mcp_server.py +414 -0
- package/package.json +39 -0
- package/scanner.py +385 -0
- package/server.py +762 -0
- package/templates/accounts.html +936 -0
- package/templates/dashboard.html +1742 -0
- package/tools/get-derived-keys.py +112 -0
- package/tools/mac-sync.py +386 -0
- package/tools/oauth_sync.py +308 -0
- package/tools/setup-pm2.sh +53 -0
- package/waste_patterns.py +334 -0
package/analyzer.py
ADDED
|
@@ -0,0 +1,890 @@
|
|
|
1
|
+
import time
|
|
2
|
+
import json
|
|
3
|
+
from datetime import datetime, timezone, timedelta
|
|
4
|
+
from collections import defaultdict
|
|
5
|
+
|
|
6
|
+
from config import MODEL_PRICING, MAX_WINDOW_HOURS, COST_TARGETS
|
|
7
|
+
from db import (
|
|
8
|
+
get_conn, insert_alert, clear_alerts, query_alerts,
|
|
9
|
+
upsert_daily_snapshot, get_daily_snapshots, insert_window_burn, get_window_burns,
|
|
10
|
+
get_accounts_config, get_project_map_config,
|
|
11
|
+
)
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
def _now():
|
|
15
|
+
return int(time.time())
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
def _days_ago(n):
|
|
19
|
+
return _now() - (n * 86400)
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
def _today_midnight():
|
|
23
|
+
dt = datetime.now(timezone.utc).replace(hour=0, minute=0, second=0, microsecond=0)
|
|
24
|
+
return int(dt.timestamp())
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
def _fetch_rows(conn, account=None, since=None):
|
|
28
|
+
sql = "SELECT * FROM sessions WHERE 1=1"
|
|
29
|
+
params = []
|
|
30
|
+
if account and account != "all":
|
|
31
|
+
sql += " AND account = ?"
|
|
32
|
+
params.append(account)
|
|
33
|
+
if since:
|
|
34
|
+
sql += " AND timestamp >= ?"
|
|
35
|
+
params.append(since)
|
|
36
|
+
return conn.execute(sql, params).fetchall()
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
# ── Per-account metrics ──
|
|
40
|
+
|
|
41
|
+
def account_metrics(conn, account="all"):
|
|
42
|
+
ACCOUNTS = get_accounts_config(conn)
|
|
43
|
+
rows_30d = _fetch_rows(conn, account, _days_ago(30))
|
|
44
|
+
rows_7d = _fetch_rows(conn, account, _days_ago(7))
|
|
45
|
+
rows_today = _fetch_rows(conn, account, _today_midnight())
|
|
46
|
+
|
|
47
|
+
total_cost_30d = sum(r["cost_usd"] for r in rows_30d)
|
|
48
|
+
sessions_today = len(set(r["session_id"] for r in rows_today))
|
|
49
|
+
|
|
50
|
+
session_tokens_7d = {}
|
|
51
|
+
for r in rows_7d:
|
|
52
|
+
sid = r["session_id"]
|
|
53
|
+
session_tokens_7d[sid] = session_tokens_7d.get(sid, 0) + r["input_tokens"] + r["output_tokens"]
|
|
54
|
+
avg_tokens_per_session = (
|
|
55
|
+
sum(session_tokens_7d.values()) / len(session_tokens_7d) if session_tokens_7d else 0
|
|
56
|
+
)
|
|
57
|
+
|
|
58
|
+
# Cache hit rate: cache_reads / (cache_reads + input_tokens).
|
|
59
|
+
# input_tokens = non-cached input tokens. This measures what fraction of
|
|
60
|
+
# total inbound context came from cache vs. fresh input.
|
|
61
|
+
total_cache_read = sum(r["cache_read_tokens"] for r in rows_30d)
|
|
62
|
+
total_input = sum(r["input_tokens"] for r in rows_30d)
|
|
63
|
+
cache_denominator = total_cache_read + total_input
|
|
64
|
+
cache_hit_rate = (total_cache_read / cache_denominator * 100) if cache_denominator > 0 else 0
|
|
65
|
+
|
|
66
|
+
cache_roi_usd = 0.0
|
|
67
|
+
for r in rows_30d:
|
|
68
|
+
pricing = MODEL_PRICING.get(r["model"], MODEL_PRICING["claude-sonnet"])
|
|
69
|
+
saved = r["cache_read_tokens"] * (pricing["input"] - pricing["cache_read"]) / 1_000_000
|
|
70
|
+
cache_roi_usd += saved
|
|
71
|
+
|
|
72
|
+
subscription_roi = 0.0
|
|
73
|
+
if account and account != "all":
|
|
74
|
+
acct_info = ACCOUNTS.get(account, {})
|
|
75
|
+
monthly_cost = acct_info.get("monthly_cost_usd", 0)
|
|
76
|
+
if monthly_cost > 0:
|
|
77
|
+
subscription_roi = round(total_cost_30d / monthly_cost, 2)
|
|
78
|
+
else:
|
|
79
|
+
total_plan = sum(a.get("monthly_cost_usd", 0) for a in ACCOUNTS.values())
|
|
80
|
+
if total_plan > 0:
|
|
81
|
+
subscription_roi = round(total_cost_30d / total_plan, 2)
|
|
82
|
+
|
|
83
|
+
session_turns = defaultdict(int)
|
|
84
|
+
for r in rows_7d:
|
|
85
|
+
session_turns[r["session_id"]] += 1
|
|
86
|
+
avg_session_depth = (
|
|
87
|
+
sum(session_turns.values()) / len(session_turns) if session_turns else 0
|
|
88
|
+
)
|
|
89
|
+
|
|
90
|
+
sessions_with_compact = set()
|
|
91
|
+
sessions_all = set()
|
|
92
|
+
for r in rows_30d:
|
|
93
|
+
sessions_all.add(r["session_id"])
|
|
94
|
+
if r["compaction_detected"]:
|
|
95
|
+
sessions_with_compact.add(r["session_id"])
|
|
96
|
+
compaction_rate = (len(sessions_with_compact) / len(sessions_all) * 100) if sessions_all else 0
|
|
97
|
+
|
|
98
|
+
hour_tokens = defaultdict(int)
|
|
99
|
+
for r in rows_7d:
|
|
100
|
+
h = datetime.fromtimestamp(r["timestamp"], tz=timezone.utc).hour
|
|
101
|
+
hour_tokens[h] += r["input_tokens"] + r["output_tokens"]
|
|
102
|
+
peak_burn_hour = max(hour_tokens, key=hour_tokens.get) if hour_tokens else None
|
|
103
|
+
|
|
104
|
+
day_sessions = defaultdict(int)
|
|
105
|
+
for r in rows_30d:
|
|
106
|
+
dow = datetime.fromtimestamp(r["timestamp"], tz=timezone.utc).strftime("%A")
|
|
107
|
+
day_sessions[dow] += 1
|
|
108
|
+
heaviest_day = max(day_sessions, key=day_sessions.get) if day_sessions else None
|
|
109
|
+
|
|
110
|
+
return {
|
|
111
|
+
"total_cost_30d": round(total_cost_30d, 2),
|
|
112
|
+
"sessions_today": sessions_today,
|
|
113
|
+
"avg_tokens_per_session": int(avg_tokens_per_session),
|
|
114
|
+
"cache_hit_rate": round(cache_hit_rate, 1),
|
|
115
|
+
"cache_roi_usd": round(cache_roi_usd, 2),
|
|
116
|
+
"subscription_roi": subscription_roi,
|
|
117
|
+
"avg_session_depth": round(avg_session_depth, 1),
|
|
118
|
+
"compaction_rate": round(compaction_rate, 1),
|
|
119
|
+
"peak_burn_hour": peak_burn_hour,
|
|
120
|
+
"heaviest_day": heaviest_day,
|
|
121
|
+
}
|
|
122
|
+
|
|
123
|
+
|
|
124
|
+
# ── Window metrics (per account) ──
|
|
125
|
+
|
|
126
|
+
def window_metrics(conn, account="personal_max"):
|
|
127
|
+
# NOTE: Uses UTC epoch-modulo for window boundaries.
|
|
128
|
+
# Anthropic's actual window resets_at may differ by up to 5 hours.
|
|
129
|
+
# For precise window tracking, enable browser sync (mac-sync.py or oauth_sync.py)
|
|
130
|
+
# which reads the actual resets_at from claude.ai API.
|
|
131
|
+
ACCOUNTS = get_accounts_config(conn)
|
|
132
|
+
acct_info = ACCOUNTS.get(account, {})
|
|
133
|
+
if not acct_info:
|
|
134
|
+
acct_info = next(iter(ACCOUNTS.values()), {})
|
|
135
|
+
window_limit = acct_info.get("window_token_limit", 1_000_000)
|
|
136
|
+
now = _now()
|
|
137
|
+
window_seconds = MAX_WINDOW_HOURS * 3600
|
|
138
|
+
|
|
139
|
+
acct_filter = account if account and account != "all" else None
|
|
140
|
+
if acct_filter:
|
|
141
|
+
row = conn.execute(
|
|
142
|
+
"SELECT MAX(timestamp) as last_ts FROM sessions WHERE account = ?", (acct_filter,)
|
|
143
|
+
).fetchone()
|
|
144
|
+
else:
|
|
145
|
+
row = conn.execute("SELECT MAX(timestamp) as last_ts FROM sessions").fetchone()
|
|
146
|
+
|
|
147
|
+
last_ts = row["last_ts"] if row and row["last_ts"] else now
|
|
148
|
+
|
|
149
|
+
window_start = last_ts - (last_ts % window_seconds)
|
|
150
|
+
if window_start + window_seconds < now:
|
|
151
|
+
window_start = now - (now % window_seconds)
|
|
152
|
+
window_end = window_start + window_seconds
|
|
153
|
+
|
|
154
|
+
if acct_filter:
|
|
155
|
+
rows = conn.execute(
|
|
156
|
+
"SELECT * FROM sessions WHERE account = ? AND timestamp >= ? AND timestamp < ?",
|
|
157
|
+
(acct_filter, window_start, window_end),
|
|
158
|
+
).fetchall()
|
|
159
|
+
else:
|
|
160
|
+
rows = conn.execute(
|
|
161
|
+
"SELECT * FROM sessions WHERE timestamp >= ? AND timestamp < ?",
|
|
162
|
+
(window_start, window_end),
|
|
163
|
+
).fetchall()
|
|
164
|
+
|
|
165
|
+
total_tokens = sum(r["input_tokens"] + r["output_tokens"] for r in rows)
|
|
166
|
+
window_pct = (total_tokens / window_limit * 100) if window_limit > 0 else 0
|
|
167
|
+
|
|
168
|
+
elapsed_seconds = max(now - window_start, 1)
|
|
169
|
+
if total_tokens > 0:
|
|
170
|
+
burn_per_second = total_tokens / elapsed_seconds
|
|
171
|
+
remaining_tokens = window_limit - total_tokens
|
|
172
|
+
if burn_per_second > 0 and remaining_tokens > 0:
|
|
173
|
+
seconds_to_limit = remaining_tokens / burn_per_second
|
|
174
|
+
predicted_limit_time = now + seconds_to_limit
|
|
175
|
+
minutes_to_limit = int(seconds_to_limit / 60)
|
|
176
|
+
else:
|
|
177
|
+
predicted_limit_time = None
|
|
178
|
+
minutes_to_limit = None
|
|
179
|
+
else:
|
|
180
|
+
burn_per_second = 0
|
|
181
|
+
predicted_limit_time = None
|
|
182
|
+
minutes_to_limit = None
|
|
183
|
+
|
|
184
|
+
window_history = [dict(r) for r in get_window_burns(conn, account, 7)]
|
|
185
|
+
|
|
186
|
+
return {
|
|
187
|
+
"account": account,
|
|
188
|
+
"window_start": window_start,
|
|
189
|
+
"window_end": window_end,
|
|
190
|
+
"total_tokens": total_tokens,
|
|
191
|
+
"tokens_limit": window_limit,
|
|
192
|
+
"window_pct": round(window_pct, 1),
|
|
193
|
+
"burn_per_minute": round(burn_per_second * 60, 0),
|
|
194
|
+
"minutes_to_limit": minutes_to_limit,
|
|
195
|
+
"predicted_limit_time": predicted_limit_time,
|
|
196
|
+
"window_history": window_history,
|
|
197
|
+
}
|
|
198
|
+
|
|
199
|
+
|
|
200
|
+
# ── Per-project metrics ──
|
|
201
|
+
|
|
202
|
+
def project_metrics(conn, account="all"):
|
|
203
|
+
ACCOUNTS = get_accounts_config(conn)
|
|
204
|
+
rows_30d = _fetch_rows(conn, account, _days_ago(30))
|
|
205
|
+
rows_7d = _fetch_rows(conn, account, _days_ago(7))
|
|
206
|
+
rows_prev_7d = _fetch_rows(conn, account, _days_ago(14))
|
|
207
|
+
if not rows_30d:
|
|
208
|
+
return []
|
|
209
|
+
|
|
210
|
+
total_tokens_all = sum(r["input_tokens"] + r["output_tokens"] for r in rows_30d)
|
|
211
|
+
|
|
212
|
+
projects = {}
|
|
213
|
+
for r in rows_30d:
|
|
214
|
+
p = r["project"]
|
|
215
|
+
if p not in projects:
|
|
216
|
+
projects[p] = {
|
|
217
|
+
"name": p, "account": r["account"], "tokens": 0, "cost": 0.0,
|
|
218
|
+
"cache_read": 0, "cache_create": 0, "input_tokens": 0, "models": {}, "sessions": set(),
|
|
219
|
+
"output_tokens_list": [], "timestamps": [],
|
|
220
|
+
}
|
|
221
|
+
d = projects[p]
|
|
222
|
+
d["tokens"] += r["input_tokens"] + r["output_tokens"]
|
|
223
|
+
d["cost"] += r["cost_usd"]
|
|
224
|
+
d["cache_read"] += r["cache_read_tokens"]
|
|
225
|
+
d["cache_create"] += r["cache_creation_tokens"]
|
|
226
|
+
d["input_tokens"] += r["input_tokens"]
|
|
227
|
+
d["models"][r["model"]] = d["models"].get(r["model"], 0) + 1
|
|
228
|
+
d["sessions"].add(r["session_id"])
|
|
229
|
+
d["output_tokens_list"].append(r["output_tokens"])
|
|
230
|
+
d["timestamps"].append(r["timestamp"])
|
|
231
|
+
|
|
232
|
+
this_week_cost = defaultdict(float)
|
|
233
|
+
for r in rows_7d:
|
|
234
|
+
this_week_cost[r["project"]] += r["cost_usd"]
|
|
235
|
+
|
|
236
|
+
last_week_cost = defaultdict(float)
|
|
237
|
+
for r in rows_prev_7d:
|
|
238
|
+
if r["timestamp"] < _days_ago(7):
|
|
239
|
+
last_week_cost[r["project"]] += r["cost_usd"]
|
|
240
|
+
|
|
241
|
+
result = []
|
|
242
|
+
for p, d in sorted(projects.items(), key=lambda x: -x[1]["cost"]):
|
|
243
|
+
cache_activity = d["cache_read"] + d["cache_create"]
|
|
244
|
+
cache_hit = (d["cache_read"] / cache_activity * 100) if cache_activity > 0 else 0
|
|
245
|
+
dominant_model = max(d["models"], key=d["models"].get) if d["models"] else "claude-sonnet"
|
|
246
|
+
session_count = len(d["sessions"])
|
|
247
|
+
avg_cost = d["cost"] / session_count if session_count > 0 else 0
|
|
248
|
+
token_share = (d["tokens"] / total_tokens_all * 100) if total_tokens_all > 0 else 0
|
|
249
|
+
|
|
250
|
+
total_model_rows = sum(d["models"].values())
|
|
251
|
+
model_consistency = (d["models"].get(dominant_model, 0) / total_model_rows * 100) if total_model_rows > 0 else 100
|
|
252
|
+
|
|
253
|
+
if d["timestamps"] and len(d["timestamps"]) > 1:
|
|
254
|
+
ts_sorted = sorted(d["timestamps"])
|
|
255
|
+
span_hours = max((ts_sorted[-1] - ts_sorted[0]) / 3600, 1)
|
|
256
|
+
token_velocity = d["tokens"] / span_hours
|
|
257
|
+
else:
|
|
258
|
+
token_velocity = 0
|
|
259
|
+
|
|
260
|
+
cache_roi = 0.0
|
|
261
|
+
for r in rows_30d:
|
|
262
|
+
if r["project"] == p:
|
|
263
|
+
pricing = MODEL_PRICING.get(r["model"], MODEL_PRICING["claude-sonnet"])
|
|
264
|
+
cache_roi += r["cache_read_tokens"] * (pricing["input"] - pricing["cache_read"]) / 1_000_000
|
|
265
|
+
|
|
266
|
+
tw = this_week_cost.get(p, 0)
|
|
267
|
+
lw = last_week_cost.get(p, 0)
|
|
268
|
+
wow_change = ((tw - lw) / lw * 100) if lw > 0 else 0
|
|
269
|
+
|
|
270
|
+
avg_output = sum(d["output_tokens_list"]) / len(d["output_tokens_list"]) if d["output_tokens_list"] else 0
|
|
271
|
+
rightsizing_savings = 0.0
|
|
272
|
+
if dominant_model == "claude-opus" and avg_output < 800:
|
|
273
|
+
opus_cost = d["cost"]
|
|
274
|
+
sonnet_ratio = MODEL_PRICING["claude-sonnet"]["output"] / MODEL_PRICING["claude-opus"]["output"]
|
|
275
|
+
rightsizing_savings = round(opus_cost * (1 - sonnet_ratio), 2)
|
|
276
|
+
|
|
277
|
+
result.append({
|
|
278
|
+
"name": p,
|
|
279
|
+
"account": d["account"],
|
|
280
|
+
"account_label": ACCOUNTS.get(d["account"], {}).get("label", d["account"]),
|
|
281
|
+
"token_share_pct": round(token_share, 1),
|
|
282
|
+
"cost_usd_30d": round(d["cost"], 2),
|
|
283
|
+
"dominant_model": dominant_model,
|
|
284
|
+
"cache_hit_rate": round(cache_hit, 1),
|
|
285
|
+
"avg_cost_per_session": round(avg_cost, 4),
|
|
286
|
+
"total_tokens": d["tokens"],
|
|
287
|
+
"session_count": session_count,
|
|
288
|
+
"model_consistency": round(model_consistency, 1),
|
|
289
|
+
"token_velocity": round(token_velocity, 0),
|
|
290
|
+
"cache_roi_usd": round(cache_roi, 2),
|
|
291
|
+
"wow_change_pct": round(wow_change, 1),
|
|
292
|
+
"rightsizing_savings": rightsizing_savings,
|
|
293
|
+
"avg_output_tokens": int(avg_output),
|
|
294
|
+
})
|
|
295
|
+
|
|
296
|
+
return result
|
|
297
|
+
|
|
298
|
+
|
|
299
|
+
# ── Compaction intelligence ──
|
|
300
|
+
|
|
301
|
+
def compaction_metrics(conn, account="all"):
|
|
302
|
+
ACCOUNTS = get_accounts_config(conn)
|
|
303
|
+
rows_30d = _fetch_rows(conn, account, _days_ago(30))
|
|
304
|
+
if not rows_30d:
|
|
305
|
+
return {"avg_savings_pct": 0, "compaction_count": 0, "sessions_needing_compact": 0, "per_project": []}
|
|
306
|
+
|
|
307
|
+
sessions = defaultdict(list)
|
|
308
|
+
for r in rows_30d:
|
|
309
|
+
sessions[r["session_id"]].append(r)
|
|
310
|
+
|
|
311
|
+
savings = []
|
|
312
|
+
high_usage_no_compact = 0
|
|
313
|
+
|
|
314
|
+
for sid, turns in sessions.items():
|
|
315
|
+
turns.sort(key=lambda x: x["timestamp"])
|
|
316
|
+
session_has_compact = False
|
|
317
|
+
# Compute both tokens-seen (for window budgeting) and context-size
|
|
318
|
+
# (for compaction heuristic). Context = input + cache_read because
|
|
319
|
+
# under prompt caching the real inbound prompt size lives in cache_read.
|
|
320
|
+
total_tokens = sum(t["input_tokens"] + t["output_tokens"] for t in turns)
|
|
321
|
+
|
|
322
|
+
for i in range(1, len(turns)):
|
|
323
|
+
prev_ctx = turns[i - 1]["input_tokens"] + turns[i - 1]["cache_read_tokens"]
|
|
324
|
+
curr_ctx = turns[i]["input_tokens"] + turns[i]["cache_read_tokens"]
|
|
325
|
+
if prev_ctx > 1000 and curr_ctx < prev_ctx * 0.7:
|
|
326
|
+
pct = (prev_ctx - curr_ctx) / prev_ctx * 100
|
|
327
|
+
savings.append(pct)
|
|
328
|
+
session_has_compact = True
|
|
329
|
+
|
|
330
|
+
# Use the first matching account's limit, or 1M default
|
|
331
|
+
first_acct = turns[0]["account"] if turns else None
|
|
332
|
+
window_limit = ACCOUNTS.get(first_acct, {}).get("window_token_limit", 1_000_000)
|
|
333
|
+
# Peak context in session: largest single-turn input+cache_read
|
|
334
|
+
peak_ctx = max((t["input_tokens"] + t["cache_read_tokens"] for t in turns), default=0)
|
|
335
|
+
if peak_ctx > window_limit * 0.7 and not session_has_compact:
|
|
336
|
+
high_usage_no_compact += 1
|
|
337
|
+
|
|
338
|
+
project_compact = defaultdict(lambda: {"compact_count": 0, "turn_count": 0, "session_count": 0})
|
|
339
|
+
for sid, turns in sessions.items():
|
|
340
|
+
proj = turns[0]["project"] if turns else "Other"
|
|
341
|
+
project_compact[proj]["session_count"] += 1
|
|
342
|
+
project_compact[proj]["turn_count"] += len(turns)
|
|
343
|
+
for i in range(1, len(turns)):
|
|
344
|
+
prev_ctx = turns[i - 1]["input_tokens"] + turns[i - 1]["cache_read_tokens"]
|
|
345
|
+
curr_ctx = turns[i]["input_tokens"] + turns[i]["cache_read_tokens"]
|
|
346
|
+
if prev_ctx > 1000 and curr_ctx < prev_ctx * 0.7:
|
|
347
|
+
project_compact[proj]["compact_count"] += 1
|
|
348
|
+
|
|
349
|
+
per_project = []
|
|
350
|
+
for proj, data in project_compact.items():
|
|
351
|
+
avg_turns_between = (data["turn_count"] / data["compact_count"]) if data["compact_count"] > 0 else 0
|
|
352
|
+
per_project.append({
|
|
353
|
+
"project": proj,
|
|
354
|
+
"compact_count": data["compact_count"],
|
|
355
|
+
"avg_turns_between_compact": round(avg_turns_between, 1),
|
|
356
|
+
"sessions_needing_compact": 0,
|
|
357
|
+
})
|
|
358
|
+
|
|
359
|
+
avg = sum(savings) / len(savings) if savings else 0
|
|
360
|
+
return {
|
|
361
|
+
"avg_savings_pct": round(avg, 1),
|
|
362
|
+
"compaction_count": len(savings),
|
|
363
|
+
"sessions_needing_compact": high_usage_no_compact,
|
|
364
|
+
"per_project": per_project,
|
|
365
|
+
}
|
|
366
|
+
|
|
367
|
+
|
|
368
|
+
# ── Model rightsizing ──
|
|
369
|
+
|
|
370
|
+
def model_rightsizing(conn, account="all"):
|
|
371
|
+
rows_30d = _fetch_rows(conn, account, _days_ago(30))
|
|
372
|
+
project_opus = {}
|
|
373
|
+
for r in rows_30d:
|
|
374
|
+
if r["model"] != "claude-opus":
|
|
375
|
+
continue
|
|
376
|
+
p = r["project"]
|
|
377
|
+
if p not in project_opus:
|
|
378
|
+
project_opus[p] = {"output_tokens": [], "cost": 0.0, "sessions": set()}
|
|
379
|
+
project_opus[p]["output_tokens"].append(r["output_tokens"])
|
|
380
|
+
project_opus[p]["cost"] += r["cost_usd"]
|
|
381
|
+
project_opus[p]["sessions"].add(r["session_id"])
|
|
382
|
+
|
|
383
|
+
suggestions = []
|
|
384
|
+
for p, d in project_opus.items():
|
|
385
|
+
avg_output = sum(d["output_tokens"]) / len(d["output_tokens"]) if d["output_tokens"] else 0
|
|
386
|
+
if avg_output < 800:
|
|
387
|
+
opus_cost = d["cost"]
|
|
388
|
+
sonnet_ratio = MODEL_PRICING["claude-sonnet"]["output"] / MODEL_PRICING["claude-opus"]["output"]
|
|
389
|
+
estimated_sonnet_cost = opus_cost * sonnet_ratio
|
|
390
|
+
monthly_savings = opus_cost - estimated_sonnet_cost
|
|
391
|
+
suggestions.append({
|
|
392
|
+
"project": p,
|
|
393
|
+
"avg_output_tokens": int(avg_output),
|
|
394
|
+
"current_model": "claude-opus",
|
|
395
|
+
"monthly_savings": round(monthly_savings, 2),
|
|
396
|
+
})
|
|
397
|
+
return suggestions
|
|
398
|
+
|
|
399
|
+
|
|
400
|
+
# ── Trends ──
|
|
401
|
+
|
|
402
|
+
def compute_daily_snapshots(conn, account="all"):
|
|
403
|
+
rows_30d = _fetch_rows(conn, account, _days_ago(30))
|
|
404
|
+
if not rows_30d:
|
|
405
|
+
return
|
|
406
|
+
|
|
407
|
+
buckets = defaultdict(lambda: {"tokens": 0, "cost": 0.0, "cache_read": 0, "cache_create": 0, "sessions": set()})
|
|
408
|
+
for r in rows_30d:
|
|
409
|
+
dt = datetime.fromtimestamp(r["timestamp"], tz=timezone.utc)
|
|
410
|
+
date_str = dt.strftime("%Y-%m-%d")
|
|
411
|
+
key = (date_str, r["account"], r["project"])
|
|
412
|
+
b = buckets[key]
|
|
413
|
+
b["tokens"] += r["input_tokens"] + r["output_tokens"]
|
|
414
|
+
b["cost"] += r["cost_usd"]
|
|
415
|
+
b["cache_read"] += r["cache_read_tokens"]
|
|
416
|
+
b["cache_create"] += r["cache_creation_tokens"]
|
|
417
|
+
b["sessions"].add(r["session_id"])
|
|
418
|
+
|
|
419
|
+
for (date_str, acct, proj), b in buckets.items():
|
|
420
|
+
cache_activity = b["cache_read"] + b["cache_create"]
|
|
421
|
+
cache_rate = (b["cache_read"] / cache_activity * 100) if cache_activity > 0 else 0
|
|
422
|
+
upsert_daily_snapshot(conn, date_str, acct, proj, b["tokens"], round(b["cost"], 4), round(cache_rate, 1), len(b["sessions"]))
|
|
423
|
+
|
|
424
|
+
conn.commit()
|
|
425
|
+
|
|
426
|
+
|
|
427
|
+
def trend_metrics(conn, account="all", days=7):
|
|
428
|
+
snapshots = get_daily_snapshots(conn, account, days)
|
|
429
|
+
|
|
430
|
+
daily = defaultdict(lambda: {"tokens": 0, "cost": 0.0, "sessions": 0})
|
|
431
|
+
for s in snapshots:
|
|
432
|
+
d = daily[s["date"]]
|
|
433
|
+
d["tokens"] += s["total_tokens"]
|
|
434
|
+
d["cost"] += s["total_cost_usd"]
|
|
435
|
+
d["sessions"] += s["session_count"]
|
|
436
|
+
|
|
437
|
+
result = []
|
|
438
|
+
for date_str in sorted(daily.keys()):
|
|
439
|
+
d = daily[date_str]
|
|
440
|
+
result.append({
|
|
441
|
+
"date": date_str,
|
|
442
|
+
"tokens": d["tokens"],
|
|
443
|
+
"cost": round(d["cost"], 2),
|
|
444
|
+
"sessions": d["sessions"],
|
|
445
|
+
})
|
|
446
|
+
|
|
447
|
+
if result:
|
|
448
|
+
recent_days = result[-min(7, len(result)):]
|
|
449
|
+
avg_daily_cost = sum(d["cost"] for d in recent_days) / len(recent_days)
|
|
450
|
+
monthly_projection = round(avg_daily_cost * 30, 2)
|
|
451
|
+
else:
|
|
452
|
+
monthly_projection = 0
|
|
453
|
+
|
|
454
|
+
proj_snapshots = defaultdict(lambda: {"this_week": 0, "last_week": 0})
|
|
455
|
+
now = datetime.now(timezone.utc)
|
|
456
|
+
week_ago = (now - timedelta(days=7)).strftime("%Y-%m-%d")
|
|
457
|
+
two_weeks_ago = (now - timedelta(days=14)).strftime("%Y-%m-%d")
|
|
458
|
+
for s in snapshots:
|
|
459
|
+
if s["date"] >= week_ago:
|
|
460
|
+
proj_snapshots[s["project"]]["this_week"] += s["total_cost_usd"]
|
|
461
|
+
elif s["date"] >= two_weeks_ago:
|
|
462
|
+
proj_snapshots[s["project"]]["last_week"] += s["total_cost_usd"]
|
|
463
|
+
|
|
464
|
+
project_wow = {}
|
|
465
|
+
for proj, data in proj_snapshots.items():
|
|
466
|
+
if data["last_week"] > 0:
|
|
467
|
+
project_wow[proj] = round((data["this_week"] - data["last_week"]) / data["last_week"] * 100, 1)
|
|
468
|
+
else:
|
|
469
|
+
project_wow[proj] = 0
|
|
470
|
+
|
|
471
|
+
return {
|
|
472
|
+
"daily": result,
|
|
473
|
+
"monthly_projection": monthly_projection,
|
|
474
|
+
"project_wow": project_wow,
|
|
475
|
+
}
|
|
476
|
+
|
|
477
|
+
|
|
478
|
+
# ── 5-hour window intelligence ──
|
|
479
|
+
|
|
480
|
+
def window_intelligence(conn, account="personal_max"):
|
|
481
|
+
wm = window_metrics(conn, account)
|
|
482
|
+
|
|
483
|
+
rows_7d = _fetch_rows(conn, account, _days_ago(7))
|
|
484
|
+
hour_tokens = defaultdict(int)
|
|
485
|
+
for r in rows_7d:
|
|
486
|
+
h = datetime.fromtimestamp(r["timestamp"], tz=timezone.utc).hour
|
|
487
|
+
hour_tokens[h] += r["input_tokens"] + r["output_tokens"]
|
|
488
|
+
|
|
489
|
+
best_start = 0
|
|
490
|
+
min_usage = float("inf")
|
|
491
|
+
for start_h in range(24):
|
|
492
|
+
block_usage = sum(hour_tokens.get((start_h + i) % 24, 0) for i in range(5))
|
|
493
|
+
if block_usage < min_usage:
|
|
494
|
+
min_usage = block_usage
|
|
495
|
+
best_start = start_h
|
|
496
|
+
|
|
497
|
+
history = wm.get("window_history", [])
|
|
498
|
+
avg_pct = sum(w.get("pct_used", 0) for w in history) / len(history) if history else 0
|
|
499
|
+
hit_limit_count = sum(1 for w in history if w.get("hit_limit", 0))
|
|
500
|
+
|
|
501
|
+
safe_for_heavy = wm["window_pct"] < 50
|
|
502
|
+
|
|
503
|
+
wm["best_start_hour"] = best_start
|
|
504
|
+
wm["avg_window_pct_7d"] = round(avg_pct, 1)
|
|
505
|
+
wm["windows_hit_limit_7d"] = hit_limit_count
|
|
506
|
+
wm["safe_for_heavy_session"] = safe_for_heavy
|
|
507
|
+
|
|
508
|
+
return wm
|
|
509
|
+
|
|
510
|
+
|
|
511
|
+
# ── Alert generation ──
|
|
512
|
+
|
|
513
|
+
def generate_alerts(conn):
|
|
514
|
+
ACCOUNTS = get_accounts_config(conn)
|
|
515
|
+
clear_alerts(conn)
|
|
516
|
+
|
|
517
|
+
rows_7d = _fetch_rows(conn, since=_days_ago(7))
|
|
518
|
+
rows_24h = _fetch_rows(conn, since=_days_ago(1))
|
|
519
|
+
|
|
520
|
+
project_cache_7d = defaultdict(int)
|
|
521
|
+
for r in rows_7d:
|
|
522
|
+
project_cache_7d[r["project"]] += r["cache_creation_tokens"]
|
|
523
|
+
|
|
524
|
+
project_cache_24h = defaultdict(int)
|
|
525
|
+
for r in rows_24h:
|
|
526
|
+
project_cache_24h[r["project"]] += r["cache_creation_tokens"]
|
|
527
|
+
|
|
528
|
+
for p, cache_24h in project_cache_24h.items():
|
|
529
|
+
avg_daily = project_cache_7d.get(p, 0) / 7
|
|
530
|
+
if avg_daily > 0 and cache_24h > avg_daily * 3:
|
|
531
|
+
insert_alert(conn, "red", p, f"Cache spike: {p}")
|
|
532
|
+
|
|
533
|
+
for acct_key in ACCOUNTS:
|
|
534
|
+
wm = window_metrics(conn, acct_key)
|
|
535
|
+
if wm["minutes_to_limit"] is not None and wm["minutes_to_limit"] <= 60:
|
|
536
|
+
label = ACCOUNTS[acct_key]["label"]
|
|
537
|
+
insert_alert(conn, "red", acct_key,
|
|
538
|
+
f"{label} window limit in ~{wm['minutes_to_limit']} min")
|
|
539
|
+
|
|
540
|
+
for s in model_rightsizing(conn):
|
|
541
|
+
insert_alert(conn, "amber", s["project"],
|
|
542
|
+
f"Opus overuse in {s['project']} — Sonnet saves ${s['monthly_savings']:.2f}/mo")
|
|
543
|
+
|
|
544
|
+
for acct_key in ACCOUNTS:
|
|
545
|
+
wm = window_metrics(conn, acct_key)
|
|
546
|
+
if wm["window_pct"] > 80:
|
|
547
|
+
comp = compaction_metrics(conn, acct_key)
|
|
548
|
+
if comp["compaction_count"] == 0:
|
|
549
|
+
insert_alert(conn, "amber", acct_key,
|
|
550
|
+
f"No /compact detected for {ACCOUNTS[acct_key]['label']} — context bloat risk")
|
|
551
|
+
|
|
552
|
+
projs = project_metrics(conn)
|
|
553
|
+
for pm in projs:
|
|
554
|
+
target = COST_TARGETS.get(pm["name"])
|
|
555
|
+
if target and pm["avg_cost_per_session"] <= target:
|
|
556
|
+
insert_alert(conn, "green", pm["name"],
|
|
557
|
+
f"{pm['name']} hit cost target (${pm['avg_cost_per_session']:.4f}/session)")
|
|
558
|
+
|
|
559
|
+
conn.commit()
|
|
560
|
+
|
|
561
|
+
|
|
562
|
+
# ── Record window burn ──
|
|
563
|
+
|
|
564
|
+
def record_window_burn(conn, account="personal_max"):
|
|
565
|
+
ACCOUNTS = get_accounts_config(conn)
|
|
566
|
+
wm = window_metrics(conn, account)
|
|
567
|
+
acct_info = ACCOUNTS.get(account, {})
|
|
568
|
+
insert_window_burn(
|
|
569
|
+
conn, account, wm["window_start"], wm["window_end"],
|
|
570
|
+
wm["total_tokens"], wm["tokens_limit"],
|
|
571
|
+
wm["window_pct"], 1 if wm["window_pct"] >= 100 else 0,
|
|
572
|
+
)
|
|
573
|
+
conn.commit()
|
|
574
|
+
|
|
575
|
+
|
|
576
|
+
# ── Sub-agent metrics ──
|
|
577
|
+
|
|
578
|
+
def subagent_metrics(conn, account="all"):
|
|
579
|
+
"""Per-project subagent cost rollup. Returns a dict keyed by project
|
|
580
|
+
with subagent_session_count, subagent_cost_usd, subagent_pct_of_total,
|
|
581
|
+
and the top 5 spawning parent sessions by subagent cost."""
|
|
582
|
+
acct_filter = None if account == "all" else account
|
|
583
|
+
conditions = []
|
|
584
|
+
params = []
|
|
585
|
+
if acct_filter:
|
|
586
|
+
conditions.append("account = ?")
|
|
587
|
+
params.append(acct_filter)
|
|
588
|
+
where_clause = (" AND ".join(conditions)) if conditions else "1=1"
|
|
589
|
+
|
|
590
|
+
# Per-project rollup
|
|
591
|
+
proj_rows = conn.execute(
|
|
592
|
+
"SELECT project, "
|
|
593
|
+
" SUM(CASE WHEN is_subagent=1 THEN cost_usd ELSE 0 END) AS sub_cost, "
|
|
594
|
+
" SUM(CASE WHEN is_subagent=1 THEN 1 ELSE 0 END) AS sub_rows, "
|
|
595
|
+
" SUM(cost_usd) AS total_cost, "
|
|
596
|
+
" COUNT(DISTINCT CASE WHEN is_subagent=1 THEN session_id END) AS sub_sessions "
|
|
597
|
+
"FROM sessions WHERE " + where_clause + " GROUP BY project",
|
|
598
|
+
params,
|
|
599
|
+
).fetchall()
|
|
600
|
+
|
|
601
|
+
result = {}
|
|
602
|
+
for r in proj_rows:
|
|
603
|
+
total = r["total_cost"] or 0
|
|
604
|
+
sub_cost = r["sub_cost"] or 0
|
|
605
|
+
pct = (sub_cost / total * 100) if total > 0 else 0
|
|
606
|
+
result[r["project"]] = {
|
|
607
|
+
"subagent_session_count": r["sub_sessions"] or 0,
|
|
608
|
+
"subagent_cost_usd": round(sub_cost, 4),
|
|
609
|
+
"subagent_pct_of_total": round(pct, 1),
|
|
610
|
+
"top_spawning_sessions": [],
|
|
611
|
+
}
|
|
612
|
+
|
|
613
|
+
# Top 5 spawning parents (per project) — parents ordered by subagent cost
|
|
614
|
+
top_conditions = ["is_subagent = 1", "parent_session_id IS NOT NULL", "project IS NOT NULL"]
|
|
615
|
+
if acct_filter:
|
|
616
|
+
top_conditions.append("account = ?")
|
|
617
|
+
top_where = " AND ".join(top_conditions)
|
|
618
|
+
top_rows = conn.execute(
|
|
619
|
+
"SELECT project, parent_session_id, "
|
|
620
|
+
" COUNT(DISTINCT session_id) AS spawned, "
|
|
621
|
+
" SUM(cost_usd) AS cost "
|
|
622
|
+
"FROM sessions "
|
|
623
|
+
"WHERE " + top_where + " "
|
|
624
|
+
"GROUP BY project, parent_session_id "
|
|
625
|
+
"ORDER BY cost DESC",
|
|
626
|
+
params,
|
|
627
|
+
).fetchall()
|
|
628
|
+
buckets = {}
|
|
629
|
+
for r in top_rows:
|
|
630
|
+
p = r["project"]
|
|
631
|
+
if p not in buckets:
|
|
632
|
+
buckets[p] = []
|
|
633
|
+
if len(buckets[p]) < 5:
|
|
634
|
+
buckets[p].append({
|
|
635
|
+
"parent_session_id": r["parent_session_id"],
|
|
636
|
+
"subagents_spawned": r["spawned"],
|
|
637
|
+
"cost_usd": round(r["cost"] or 0, 4),
|
|
638
|
+
})
|
|
639
|
+
for p, lst in buckets.items():
|
|
640
|
+
if p in result:
|
|
641
|
+
result[p]["top_spawning_sessions"] = lst
|
|
642
|
+
|
|
643
|
+
return result
|
|
644
|
+
|
|
645
|
+
|
|
646
|
+
# ── Daily budget metrics ──
|
|
647
|
+
|
|
648
|
+
def daily_budget_metrics(conn, account="all"):
|
|
649
|
+
"""Per-account today-vs-budget rollup. Returns dict keyed by account_id
|
|
650
|
+
with today_cost, budget_usd, budget_pct, budget_remaining,
|
|
651
|
+
projected_daily, on_track."""
|
|
652
|
+
ACCOUNTS = get_accounts_config(conn)
|
|
653
|
+
# Today midnight UTC → epoch
|
|
654
|
+
now_dt = datetime.now(timezone.utc)
|
|
655
|
+
midnight = now_dt.replace(hour=0, minute=0, second=0, microsecond=0)
|
|
656
|
+
midnight_epoch = int(midnight.timestamp())
|
|
657
|
+
hours_elapsed = max((now_dt - midnight).total_seconds() / 3600.0, 0.1)
|
|
658
|
+
|
|
659
|
+
result = {}
|
|
660
|
+
keys = ACCOUNTS.keys() if account == "all" else [account]
|
|
661
|
+
for acct_id in keys:
|
|
662
|
+
info = ACCOUNTS.get(acct_id, {})
|
|
663
|
+
budget = float(info.get("daily_budget_usd") or 0)
|
|
664
|
+
row = conn.execute(
|
|
665
|
+
"SELECT COALESCE(SUM(cost_usd), 0) AS cost FROM sessions WHERE account=? AND timestamp >= ?",
|
|
666
|
+
(acct_id, midnight_epoch),
|
|
667
|
+
).fetchone()
|
|
668
|
+
today_cost = row["cost"] or 0
|
|
669
|
+
projected = (today_cost / hours_elapsed) * 24 if hours_elapsed > 0 else today_cost
|
|
670
|
+
result[acct_id] = {
|
|
671
|
+
"today_cost": round(today_cost, 4),
|
|
672
|
+
"budget_usd": round(budget, 2),
|
|
673
|
+
"budget_pct": round((today_cost / budget * 100) if budget > 0 else 0, 1),
|
|
674
|
+
"budget_remaining": round(max(budget - today_cost, 0), 4) if budget > 0 else 0,
|
|
675
|
+
"projected_daily": round(projected, 4),
|
|
676
|
+
"on_track": (projected <= budget) if budget > 0 else True,
|
|
677
|
+
"has_budget": budget > 0,
|
|
678
|
+
}
|
|
679
|
+
return result
|
|
680
|
+
|
|
681
|
+
|
|
682
|
+
# ── Efficiency Score ──
|
|
683
|
+
|
|
684
|
+
def compute_efficiency_score(conn, account="all"):
|
|
685
|
+
"""
|
|
686
|
+
Compute Claude Code efficiency score 0-100.
|
|
687
|
+
Five dimensions, weighted:
|
|
688
|
+
1. Cache efficiency 25% — cache_read/(cache_read+input_tokens)
|
|
689
|
+
2. Model right-sizing 25% — % sessions NOT using Opus for <300 tok output
|
|
690
|
+
3. Window discipline 20% — avg window utilization (ideal 60-80%)
|
|
691
|
+
4. Floundering rate 20% — % sessions without floundering
|
|
692
|
+
5. Compaction 10% — % long sessions that used compaction
|
|
693
|
+
"""
|
|
694
|
+
cutoff = _days_ago(30)
|
|
695
|
+
where = "timestamp > ?"
|
|
696
|
+
params = [cutoff]
|
|
697
|
+
if account and account != "all":
|
|
698
|
+
where += " AND account = ?"
|
|
699
|
+
params.append(account)
|
|
700
|
+
|
|
701
|
+
# Dimension 1: Cache efficiency
|
|
702
|
+
r = conn.execute(
|
|
703
|
+
f"SELECT COALESCE(SUM(cache_read_tokens), 0), COALESCE(SUM(input_tokens), 0) "
|
|
704
|
+
f"FROM sessions WHERE {where}", params
|
|
705
|
+
).fetchone()
|
|
706
|
+
cache_reads = r[0] or 0
|
|
707
|
+
cache_inputs = r[1] or 0
|
|
708
|
+
denom = cache_reads + cache_inputs
|
|
709
|
+
cache_score = round(cache_reads / denom * 100) if denom > 0 else 0
|
|
710
|
+
|
|
711
|
+
# Dimension 2: Model right-sizing
|
|
712
|
+
total_opus = conn.execute(
|
|
713
|
+
f"SELECT COUNT(*) FROM sessions "
|
|
714
|
+
f"WHERE model LIKE '%opus%' AND {where}", params
|
|
715
|
+
).fetchone()[0]
|
|
716
|
+
opus_short = conn.execute(
|
|
717
|
+
f"SELECT COUNT(*) FROM sessions "
|
|
718
|
+
f"WHERE model LIKE '%opus%' AND output_tokens < 300 "
|
|
719
|
+
f"AND {where}", params
|
|
720
|
+
).fetchone()[0]
|
|
721
|
+
if total_opus > 0:
|
|
722
|
+
opus_waste_pct = opus_short / total_opus
|
|
723
|
+
model_score = round((1 - opus_waste_pct) * 100)
|
|
724
|
+
else:
|
|
725
|
+
model_score = 100
|
|
726
|
+
|
|
727
|
+
# Dimension 3: Window discipline (ideal 60-80%)
|
|
728
|
+
avg_window = conn.execute(
|
|
729
|
+
"SELECT AVG(pct_used) FROM window_burns "
|
|
730
|
+
"WHERE window_start > ? AND pct_used > 0",
|
|
731
|
+
[cutoff]
|
|
732
|
+
).fetchone()[0] or 0
|
|
733
|
+
if avg_window < 60:
|
|
734
|
+
window_score = round(avg_window / 60 * 70)
|
|
735
|
+
elif avg_window <= 80:
|
|
736
|
+
window_score = round(70 + (avg_window - 60) / 20 * 30)
|
|
737
|
+
else:
|
|
738
|
+
window_score = round(100 - (avg_window - 80) * 2)
|
|
739
|
+
window_score = max(0, min(100, window_score))
|
|
740
|
+
|
|
741
|
+
# Dimension 4: Floundering rate
|
|
742
|
+
total_sessions = conn.execute(
|
|
743
|
+
f"SELECT COUNT(DISTINCT session_id) FROM sessions WHERE {where}",
|
|
744
|
+
params
|
|
745
|
+
).fetchone()[0] or 1
|
|
746
|
+
flounder_sessions = conn.execute(
|
|
747
|
+
"SELECT COUNT(DISTINCT session_id) FROM waste_events "
|
|
748
|
+
"WHERE pattern_type='floundering' AND detected_at > ?",
|
|
749
|
+
[cutoff]
|
|
750
|
+
).fetchone()[0]
|
|
751
|
+
flounder_rate = flounder_sessions / total_sessions
|
|
752
|
+
flounder_score = round((1 - min(flounder_rate * 10, 1)) * 100)
|
|
753
|
+
|
|
754
|
+
# Dimension 5: Compaction discipline
|
|
755
|
+
compact_events = conn.execute(
|
|
756
|
+
f"SELECT COUNT(*) FROM sessions "
|
|
757
|
+
f"WHERE compaction_detected=1 AND {where}", params
|
|
758
|
+
).fetchone()[0]
|
|
759
|
+
compact_rate = compact_events / total_sessions
|
|
760
|
+
compaction_score = min(round(compact_rate / 0.05 * 100), 100)
|
|
761
|
+
|
|
762
|
+
# Weighted total
|
|
763
|
+
dimensions = [
|
|
764
|
+
{"name": "cache", "score": cache_score, "weight": 0.25,
|
|
765
|
+
"label": "Cache efficiency",
|
|
766
|
+
"detail": f"{cache_score}% of input tokens served from cache"},
|
|
767
|
+
{"name": "model", "score": model_score, "weight": 0.25,
|
|
768
|
+
"label": "Model right-sizing",
|
|
769
|
+
"detail": f"{100 - model_score}% of Opus sessions had short outputs"},
|
|
770
|
+
{"name": "window", "score": window_score, "weight": 0.20,
|
|
771
|
+
"label": "Window discipline",
|
|
772
|
+
"detail": f"{round(avg_window, 1)}% avg window utilization (ideal: 60-80%)"},
|
|
773
|
+
{"name": "flounder", "score": flounder_score, "weight": 0.20,
|
|
774
|
+
"label": "Floundering rate",
|
|
775
|
+
"detail": f"{flounder_sessions} stuck sessions detected"},
|
|
776
|
+
{"name": "compaction", "score": compaction_score, "weight": 0.10,
|
|
777
|
+
"label": "Compaction discipline",
|
|
778
|
+
"detail": f"{compact_events} compaction events in 30d"},
|
|
779
|
+
]
|
|
780
|
+
|
|
781
|
+
total = round(sum(d["score"] * d["weight"] for d in dimensions))
|
|
782
|
+
total = max(0, min(100, total))
|
|
783
|
+
|
|
784
|
+
if total >= 90:
|
|
785
|
+
grade = "A"
|
|
786
|
+
elif total >= 80:
|
|
787
|
+
grade = "B"
|
|
788
|
+
elif total >= 70:
|
|
789
|
+
grade = "C"
|
|
790
|
+
elif total >= 60:
|
|
791
|
+
grade = "D"
|
|
792
|
+
else:
|
|
793
|
+
grade = "F"
|
|
794
|
+
|
|
795
|
+
worst = min(dimensions, key=lambda d: d["score"] * d["weight"])
|
|
796
|
+
|
|
797
|
+
return {
|
|
798
|
+
"score": total,
|
|
799
|
+
"grade": grade,
|
|
800
|
+
"dimensions": dimensions,
|
|
801
|
+
"top_improvement": worst["label"],
|
|
802
|
+
"top_improvement_detail": worst["detail"],
|
|
803
|
+
}
|
|
804
|
+
|
|
805
|
+
|
|
806
|
+
# ── Full analysis ──
|
|
807
|
+
|
|
808
|
+
def full_analysis(conn, account="all"):
|
|
809
|
+
ACCOUNTS = get_accounts_config(conn)
|
|
810
|
+
generate_alerts(conn)
|
|
811
|
+
compute_daily_snapshots(conn, account)
|
|
812
|
+
|
|
813
|
+
for acct_key in ACCOUNTS:
|
|
814
|
+
try:
|
|
815
|
+
record_window_burn(conn, acct_key)
|
|
816
|
+
except Exception:
|
|
817
|
+
pass
|
|
818
|
+
|
|
819
|
+
am = account_metrics(conn, account)
|
|
820
|
+
pm = project_metrics(conn, account)
|
|
821
|
+
comp = compaction_metrics(conn, account)
|
|
822
|
+
rs = model_rightsizing(conn, account)
|
|
823
|
+
alerts = [dict(r) for r in query_alerts(conn)]
|
|
824
|
+
trends = trend_metrics(conn, account, 7)
|
|
825
|
+
|
|
826
|
+
windows = {}
|
|
827
|
+
for acct_key in ACCOUNTS:
|
|
828
|
+
windows[acct_key] = window_intelligence(conn, acct_key)
|
|
829
|
+
|
|
830
|
+
from db import get_insights
|
|
831
|
+
active_insights = get_insights(conn, account if account != "all" else None, dismissed=0, limit=100)
|
|
832
|
+
|
|
833
|
+
# Include account list for dynamic tabs — attach session count + browser data
|
|
834
|
+
acct_session_counts = {}
|
|
835
|
+
for row in conn.execute("SELECT account, COUNT(*) as cnt FROM sessions GROUP BY account").fetchall():
|
|
836
|
+
acct_session_counts[row["account"]] = row["cnt"]
|
|
837
|
+
# Latest browser snapshot per account (from claude.ai tracking)
|
|
838
|
+
browser_snaps = {}
|
|
839
|
+
for row in conn.execute(
|
|
840
|
+
"SELECT account_id, five_hour_utilization, seven_day_utilization "
|
|
841
|
+
"FROM claude_ai_snapshots "
|
|
842
|
+
"WHERE id IN (SELECT MAX(id) FROM claude_ai_snapshots GROUP BY account_id)"
|
|
843
|
+
).fetchall():
|
|
844
|
+
five = row["five_hour_utilization"] or 0
|
|
845
|
+
seven = row["seven_day_utilization"] or 0
|
|
846
|
+
browser_snaps[row["account_id"]] = {"five": five, "seven": seven}
|
|
847
|
+
accounts_list = []
|
|
848
|
+
for k, v in ACCOUNTS.items():
|
|
849
|
+
bs = browser_snaps.get(k, {})
|
|
850
|
+
accounts_list.append({
|
|
851
|
+
"account_id": k, "label": v["label"], "color": v.get("color", "teal"),
|
|
852
|
+
"sessions_count": acct_session_counts.get(k, 0),
|
|
853
|
+
"browser_window_pct": bs.get("five", 0),
|
|
854
|
+
"seven_day_pct": bs.get("seven", 0),
|
|
855
|
+
"has_browser_data": bs.get("five", 0) > 0 or bs.get("seven", 0) > 0,
|
|
856
|
+
})
|
|
857
|
+
|
|
858
|
+
# Sub-agent rollup, daily budget, waste summary
|
|
859
|
+
sub_metrics = subagent_metrics(conn, account)
|
|
860
|
+
budget_metrics = daily_budget_metrics(conn, account)
|
|
861
|
+
try:
|
|
862
|
+
from waste_patterns import waste_summary_by_project
|
|
863
|
+
waste_summary = waste_summary_by_project(conn, days=7)
|
|
864
|
+
except Exception:
|
|
865
|
+
waste_summary = {}
|
|
866
|
+
|
|
867
|
+
# Efficiency score
|
|
868
|
+
try:
|
|
869
|
+
efficiency = compute_efficiency_score(conn, account)
|
|
870
|
+
except Exception:
|
|
871
|
+
efficiency = {"score": 0, "grade": "-", "dimensions": [], "top_improvement": "unknown", "top_improvement_detail": ""}
|
|
872
|
+
|
|
873
|
+
return {
|
|
874
|
+
"account": account,
|
|
875
|
+
"account_label": ACCOUNTS.get(account, {}).get("label", "All Accounts"),
|
|
876
|
+
"metrics": am,
|
|
877
|
+
"windows": windows,
|
|
878
|
+
"projects": pm,
|
|
879
|
+
"compaction": comp,
|
|
880
|
+
"rightsizing": rs,
|
|
881
|
+
"alerts": alerts,
|
|
882
|
+
"trends": trends,
|
|
883
|
+
"insights_count": len(active_insights),
|
|
884
|
+
"accounts_list": accounts_list,
|
|
885
|
+
"subagent_metrics": sub_metrics,
|
|
886
|
+
"daily_budget": budget_metrics,
|
|
887
|
+
"waste_summary": waste_summary,
|
|
888
|
+
"efficiency": efficiency,
|
|
889
|
+
"generated_at": _now(),
|
|
890
|
+
}
|