@jeganwrites/claudash 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CONTRIBUTING.md +35 -0
- package/LICENSE +21 -0
- package/README.md +261 -0
- package/analyzer.py +890 -0
- package/bin/claudash.js +121 -0
- package/claude_ai_tracker.py +358 -0
- package/cli.py +1034 -0
- package/config.py +100 -0
- package/db.py +1156 -0
- package/fix_tracker.py +539 -0
- package/insights.py +359 -0
- package/mcp_server.py +414 -0
- package/package.json +39 -0
- package/scanner.py +385 -0
- package/server.py +762 -0
- package/templates/accounts.html +936 -0
- package/templates/dashboard.html +1742 -0
- package/tools/get-derived-keys.py +112 -0
- package/tools/mac-sync.py +386 -0
- package/tools/oauth_sync.py +308 -0
- package/tools/setup-pm2.sh +53 -0
- package/waste_patterns.py +334 -0
package/cli.py
ADDED
|
@@ -0,0 +1,1034 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""Claudash — CLI entry point."""
|
|
3
|
+
|
|
4
|
+
import sys
|
|
5
|
+
import os
|
|
6
|
+
import csv
|
|
7
|
+
import json
|
|
8
|
+
import time
|
|
9
|
+
from datetime import datetime, timezone, timedelta
|
|
10
|
+
|
|
11
|
+
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
|
|
12
|
+
|
|
13
|
+
from config import VPS_IP, VPS_PORT
|
|
14
|
+
from db import (
|
|
15
|
+
init_db, get_conn, get_insights, get_session_count, get_db_size_mb,
|
|
16
|
+
get_accounts_config, get_claude_ai_accounts_all, get_latest_claude_ai_snapshot,
|
|
17
|
+
get_setting, set_setting, get_project_map_config, sync_project_map_from_config,
|
|
18
|
+
)
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
HELP_TEXT = """
|
|
22
|
+
Claudash v1.0 — personal Claude usage dashboard
|
|
23
|
+
|
|
24
|
+
Commands:
|
|
25
|
+
dashboard Start dashboard server on :8080 (127.0.0.1 only)
|
|
26
|
+
scan Scan JSONL files for new sessions (incremental)
|
|
27
|
+
scan --reprocess
|
|
28
|
+
Re-tag every existing session using the current
|
|
29
|
+
PROJECT_MAP. Useful after adding projects to config.py.
|
|
30
|
+
show-other List all source paths of sessions currently tagged 'Other'
|
|
31
|
+
stats Print per-account stats table
|
|
32
|
+
insights Show active insights
|
|
33
|
+
window Show 5-hour window status
|
|
34
|
+
export Export last 30 days to CSV
|
|
35
|
+
waste Run waste-pattern detection and print summary
|
|
36
|
+
fixes List all recorded fixes with current status
|
|
37
|
+
fix add Interactively record a new fix (captures baseline)
|
|
38
|
+
measure <id> Capture current metrics for a fix, compute delta, print
|
|
39
|
+
a plan-aware verdict and share card
|
|
40
|
+
mcp Print MCP server settings.json snippet + run a quick test
|
|
41
|
+
keys Print dashboard_key and sync_token (sensitive — keep private)
|
|
42
|
+
keys --rotate Regenerate dashboard_key (invalidates existing browser sessions)
|
|
43
|
+
init First-run setup wizard (3 questions, then start)
|
|
44
|
+
claude-ai Show claude.ai browser tracking status
|
|
45
|
+
claude-ai --sync-token Print sync token (for tools/mac-sync.py)
|
|
46
|
+
claude-ai --setup <account_id> Paste a claude.ai session key interactively
|
|
47
|
+
|
|
48
|
+
Paste the dashboard_key into the browser prompt the first time an admin
|
|
49
|
+
button fails — it's saved to localStorage and reused.
|
|
50
|
+
|
|
51
|
+
Local: http://localhost:8080
|
|
52
|
+
SSH tunnel: ssh -L 8080:localhost:8080 user@YOUR_VPS_IP
|
|
53
|
+
"""
|
|
54
|
+
from scanner import scan_all, start_periodic_scan
|
|
55
|
+
from analyzer import (
|
|
56
|
+
account_metrics, project_metrics, window_intelligence,
|
|
57
|
+
trend_metrics, compaction_metrics, model_rightsizing,
|
|
58
|
+
compute_efficiency_score,
|
|
59
|
+
)
|
|
60
|
+
from insights import generate_insights
|
|
61
|
+
from server import start_server
|
|
62
|
+
|
|
63
|
+
from claude_ai_tracker import (
|
|
64
|
+
poll_all as poll_claude_ai, start_periodic_poll as start_claude_ai_poll,
|
|
65
|
+
setup_account as tracker_setup_account,
|
|
66
|
+
)
|
|
67
|
+
|
|
68
|
+
|
|
69
|
+
def cmd_dashboard():
|
|
70
|
+
import argparse
|
|
71
|
+
parser = argparse.ArgumentParser(prog="claudash dashboard", add_help=False)
|
|
72
|
+
parser.add_argument("--port", type=int, default=8080)
|
|
73
|
+
parser.add_argument("--no-browser", action="store_true")
|
|
74
|
+
parser.add_argument("--skip-init", action="store_true")
|
|
75
|
+
args = parser.parse_args(sys.argv[2:])
|
|
76
|
+
|
|
77
|
+
MAX_RESTARTS = 5
|
|
78
|
+
restart_count = 0
|
|
79
|
+
restart_delay = 5 # seconds
|
|
80
|
+
|
|
81
|
+
while restart_count <= MAX_RESTARTS:
|
|
82
|
+
try:
|
|
83
|
+
_run_dashboard(args.port, args.no_browser, args.skip_init)
|
|
84
|
+
break # clean exit
|
|
85
|
+
except KeyboardInterrupt:
|
|
86
|
+
print("\nClaudash stopped.")
|
|
87
|
+
break
|
|
88
|
+
except Exception as e:
|
|
89
|
+
restart_count += 1
|
|
90
|
+
if restart_count > MAX_RESTARTS:
|
|
91
|
+
print(f"Claudash crashed {MAX_RESTARTS} times. Giving up.")
|
|
92
|
+
print(f"Last error: {e}")
|
|
93
|
+
print(f"Check logs: tail /tmp/claudash.log")
|
|
94
|
+
break
|
|
95
|
+
print(f"Claudash crashed (attempt {restart_count}/{MAX_RESTARTS}): {e}")
|
|
96
|
+
print(f"Restarting in {restart_delay} seconds...")
|
|
97
|
+
time.sleep(restart_delay)
|
|
98
|
+
restart_delay = min(restart_delay * 2, 60)
|
|
99
|
+
|
|
100
|
+
|
|
101
|
+
def _run_dashboard(port=8080, no_browser=False, skip_init=False):
|
|
102
|
+
init_db()
|
|
103
|
+
rows = scan_all()
|
|
104
|
+
|
|
105
|
+
conn = get_conn()
|
|
106
|
+
n = generate_insights(conn)
|
|
107
|
+
total = get_session_count(conn)
|
|
108
|
+
db_mb = get_db_size_mb()
|
|
109
|
+
accounts = get_accounts_config(conn)
|
|
110
|
+
|
|
111
|
+
# First-run detection: no sessions or only default account label
|
|
112
|
+
accounts_customized = any(
|
|
113
|
+
v.get("label") != "Personal (Max)" for v in accounts.values()
|
|
114
|
+
)
|
|
115
|
+
if not skip_init and (total == 0 or (len(accounts) <= 1 and not accounts_customized)):
|
|
116
|
+
conn.close()
|
|
117
|
+
print("First run detected. Running setup wizard...", flush=True)
|
|
118
|
+
print("(Skip with: python3 cli.py dashboard --skip-init)", flush=True)
|
|
119
|
+
print(flush=True)
|
|
120
|
+
cmd_init()
|
|
121
|
+
return
|
|
122
|
+
|
|
123
|
+
conn.close()
|
|
124
|
+
|
|
125
|
+
url_str = f"localhost:{port}"
|
|
126
|
+
n_accts = f"{len(accounts)} configured"
|
|
127
|
+
db_str = f"{db_mb}MB"
|
|
128
|
+
|
|
129
|
+
print(flush=True)
|
|
130
|
+
print(" ╔══════════════════════════════╗", flush=True)
|
|
131
|
+
print(" ║ Claudash v1.0 ║", flush=True)
|
|
132
|
+
print(" ╠══════════════════════════════╣", flush=True)
|
|
133
|
+
print(f" ║ Records : {total:<17,}║", flush=True)
|
|
134
|
+
print(f" ║ Accounts : {n_accts:<17s}║", flush=True)
|
|
135
|
+
print(f" ║ DB : {db_str:<17s}║", flush=True)
|
|
136
|
+
print(f" ║ URL : {url_str:<17s}║", flush=True)
|
|
137
|
+
print(" ╚══════════════════════════════╝", flush=True)
|
|
138
|
+
print(flush=True)
|
|
139
|
+
def _is_headless():
|
|
140
|
+
import platform as _plat
|
|
141
|
+
if _plat.system() in ("Windows", "Darwin"):
|
|
142
|
+
return False
|
|
143
|
+
return not os.environ.get("DISPLAY") and not os.environ.get("WAYLAND_DISPLAY")
|
|
144
|
+
|
|
145
|
+
if not no_browser and not _is_headless():
|
|
146
|
+
import threading, webbrowser
|
|
147
|
+
threading.Thread(
|
|
148
|
+
target=lambda: (time.sleep(1.5), webbrowser.open(f"http://localhost:{port}")),
|
|
149
|
+
daemon=True,
|
|
150
|
+
).start()
|
|
151
|
+
|
|
152
|
+
if _is_headless():
|
|
153
|
+
print(" Headless server detected — no browser auto-open", flush=True)
|
|
154
|
+
print(f" To view dashboard, run on your local machine:", flush=True)
|
|
155
|
+
vps_host = VPS_IP if VPS_IP and VPS_IP != "localhost" else "YOUR_VPS_IP"
|
|
156
|
+
print(f" ssh -L {port}:localhost:{port} user@{vps_host}", flush=True)
|
|
157
|
+
print(f" Then open: http://localhost:{port}", flush=True)
|
|
158
|
+
elif VPS_IP and VPS_IP != "localhost":
|
|
159
|
+
print(f" SSH tunnel: ssh -L {port}:localhost:{port} user@{VPS_IP}", flush=True)
|
|
160
|
+
print(f" Then open http://localhost:{port} in your browser.", flush=True)
|
|
161
|
+
else:
|
|
162
|
+
print(f" Open http://localhost:{port} in your browser.", flush=True)
|
|
163
|
+
print(flush=True)
|
|
164
|
+
|
|
165
|
+
start_periodic_scan(interval_seconds=300)
|
|
166
|
+
poll_claude_ai()
|
|
167
|
+
start_claude_ai_poll(interval_seconds=300)
|
|
168
|
+
|
|
169
|
+
start_server(port=port)
|
|
170
|
+
|
|
171
|
+
|
|
172
|
+
def cmd_init():
|
|
173
|
+
"""Interactive first-run setup wizard."""
|
|
174
|
+
init_db()
|
|
175
|
+
conn = get_conn()
|
|
176
|
+
|
|
177
|
+
print(flush=True)
|
|
178
|
+
print(" Claudash Setup Wizard", flush=True)
|
|
179
|
+
print(" " + "-" * 40, flush=True)
|
|
180
|
+
print(" Answer 3 questions to configure your dashboard.", flush=True)
|
|
181
|
+
print(flush=True)
|
|
182
|
+
|
|
183
|
+
# Question 1: Plan type
|
|
184
|
+
print(" 1. What Claude plan are you on?", flush=True)
|
|
185
|
+
print(" [1] Max ($100/mo — 1M tokens/5hr window)", flush=True)
|
|
186
|
+
print(" [2] Pro ($20/mo — message-based limits)", flush=True)
|
|
187
|
+
print(" [3] API (pay per token)", flush=True)
|
|
188
|
+
print(" [4] Team (API with org billing)", flush=True)
|
|
189
|
+
try:
|
|
190
|
+
choice = input(" Enter 1-4: ").strip()
|
|
191
|
+
except EOFError:
|
|
192
|
+
choice = "1"
|
|
193
|
+
plan_map = {
|
|
194
|
+
"1": ("max", 100.0, 1_000_000),
|
|
195
|
+
"2": ("pro", 20.0, 0),
|
|
196
|
+
"3": ("api", 0.0, 0),
|
|
197
|
+
"4": ("api", 0.0, 0),
|
|
198
|
+
}
|
|
199
|
+
plan, cost, tokens = plan_map.get(choice, ("max", 100.0, 1_000_000))
|
|
200
|
+
|
|
201
|
+
# Question 1b: Monthly cost (if API)
|
|
202
|
+
if plan == "api":
|
|
203
|
+
try:
|
|
204
|
+
cost_input = input(" Monthly API spend (approx $): ").strip()
|
|
205
|
+
cost = float(cost_input)
|
|
206
|
+
except (ValueError, EOFError):
|
|
207
|
+
cost = 0.0
|
|
208
|
+
|
|
209
|
+
# Question 2: Show detected projects
|
|
210
|
+
print(flush=True)
|
|
211
|
+
print(" 2. Detected Claude Code projects:", flush=True)
|
|
212
|
+
projects = conn.execute(
|
|
213
|
+
"SELECT project, COUNT(*) as sessions "
|
|
214
|
+
"FROM sessions GROUP BY project "
|
|
215
|
+
"ORDER BY sessions DESC LIMIT 10"
|
|
216
|
+
).fetchall()
|
|
217
|
+
|
|
218
|
+
if projects:
|
|
219
|
+
for i, p in enumerate(projects, 1):
|
|
220
|
+
print(f" {i}. {p['project']} ({p['sessions']} sessions)", flush=True)
|
|
221
|
+
print(" These were auto-detected from your JSONL files.", flush=True)
|
|
222
|
+
print(" Add custom project names in config.py PROJECT_MAP", flush=True)
|
|
223
|
+
else:
|
|
224
|
+
print(" No sessions found yet.", flush=True)
|
|
225
|
+
print(" Run 'python3 cli.py scan' after using Claude Code.", flush=True)
|
|
226
|
+
|
|
227
|
+
# Question 3: Account name
|
|
228
|
+
print(flush=True)
|
|
229
|
+
print(" 3. What should we call this account?", flush=True)
|
|
230
|
+
print(" (e.g. 'Personal', 'Work', 'My Mac')", flush=True)
|
|
231
|
+
try:
|
|
232
|
+
name = input(" Account name: ").strip() or "Personal"
|
|
233
|
+
except EOFError:
|
|
234
|
+
name = "Personal"
|
|
235
|
+
|
|
236
|
+
# Save to DB
|
|
237
|
+
acct_row = conn.execute("SELECT account_id FROM accounts LIMIT 1").fetchone()
|
|
238
|
+
if acct_row:
|
|
239
|
+
conn.execute(
|
|
240
|
+
"UPDATE accounts SET label=?, plan=?, monthly_cost_usd=?, "
|
|
241
|
+
"window_token_limit=? WHERE account_id=?",
|
|
242
|
+
(f"{name} ({plan.title()})", plan, cost, tokens, acct_row["account_id"]),
|
|
243
|
+
)
|
|
244
|
+
conn.commit()
|
|
245
|
+
|
|
246
|
+
print(flush=True)
|
|
247
|
+
print(" Dashboard configured!", flush=True)
|
|
248
|
+
print(f" Account: {name} ({plan.title()})", flush=True)
|
|
249
|
+
print(f" Plan cost: ${cost}/mo", flush=True)
|
|
250
|
+
if tokens:
|
|
251
|
+
print(f" Window: {tokens:,} tokens per 5 hours", flush=True)
|
|
252
|
+
print(flush=True)
|
|
253
|
+
print(" Starting dashboard...", flush=True)
|
|
254
|
+
print(flush=True)
|
|
255
|
+
conn.close()
|
|
256
|
+
|
|
257
|
+
# Auto-start dashboard after init
|
|
258
|
+
cmd_dashboard()
|
|
259
|
+
|
|
260
|
+
|
|
261
|
+
def cmd_scan():
|
|
262
|
+
# `scan --reprocess` re-tags every existing session row from source JSONL
|
|
263
|
+
# without re-reading file offsets. It's the fix for "I added a new project
|
|
264
|
+
# to config.py but my old sessions still say Other".
|
|
265
|
+
if "--reprocess" in sys.argv:
|
|
266
|
+
cmd_scan_reprocess()
|
|
267
|
+
return
|
|
268
|
+
init_db()
|
|
269
|
+
rows = scan_all()
|
|
270
|
+
conn = get_conn()
|
|
271
|
+
n = generate_insights(conn)
|
|
272
|
+
# Waste-pattern detection after every scan
|
|
273
|
+
try:
|
|
274
|
+
from waste_patterns import detect_all as _detect_waste
|
|
275
|
+
waste_summary = _detect_waste(conn)
|
|
276
|
+
except Exception as e:
|
|
277
|
+
waste_summary = {"error": str(e)}
|
|
278
|
+
conn.close()
|
|
279
|
+
print(f"Scan complete: {rows} new rows (incremental), {n} insights generated")
|
|
280
|
+
if isinstance(waste_summary, dict) and "error" not in waste_summary:
|
|
281
|
+
parts = [f"{k}={v}" for k, v in waste_summary.items() if v]
|
|
282
|
+
if parts:
|
|
283
|
+
print(f"Waste patterns: {', '.join(parts)}")
|
|
284
|
+
|
|
285
|
+
|
|
286
|
+
def cmd_waste():
|
|
287
|
+
"""Run waste-pattern detection standalone and print a summary."""
|
|
288
|
+
init_db()
|
|
289
|
+
conn = get_conn()
|
|
290
|
+
from waste_patterns import detect_all as _detect_waste
|
|
291
|
+
summary = _detect_waste(conn)
|
|
292
|
+
print()
|
|
293
|
+
print(" Waste patterns detected (last scan)")
|
|
294
|
+
print(f" {'-' * 40}")
|
|
295
|
+
for k, v in summary.items():
|
|
296
|
+
print(f" {k:<20} {v:>6}")
|
|
297
|
+
print()
|
|
298
|
+
rows = conn.execute(
|
|
299
|
+
"SELECT project, pattern_type, COUNT(*) AS n, SUM(token_cost) AS cost "
|
|
300
|
+
"FROM waste_events GROUP BY project, pattern_type ORDER BY cost DESC LIMIT 10"
|
|
301
|
+
).fetchall()
|
|
302
|
+
if rows:
|
|
303
|
+
print(" Top waste events by estimated cost:")
|
|
304
|
+
print(f" {'Project':<18} {'Pattern':<18} {'Count':>6} {'$Cost':>10}")
|
|
305
|
+
print(f" {'-' * 56}")
|
|
306
|
+
for r in rows:
|
|
307
|
+
print(f" {str(r[0] or '-'):<18} {str(r[1] or '-'):<18} "
|
|
308
|
+
f"{(r[2] or 0):>6} ${(r[3] or 0):>9.2f}")
|
|
309
|
+
print()
|
|
310
|
+
conn.close()
|
|
311
|
+
|
|
312
|
+
|
|
313
|
+
def _fmt_fix_header(f):
|
|
314
|
+
dt = datetime.fromtimestamp(f["created_at"], tz=timezone.utc).strftime("%b %d")
|
|
315
|
+
return f"#{f['id']:<3} {f['project']} · {f['waste_pattern']} · {f['title']} (applied {dt})"
|
|
316
|
+
|
|
317
|
+
|
|
318
|
+
def _fmt_status_badge(status):
|
|
319
|
+
return {
|
|
320
|
+
"applied": "measuring",
|
|
321
|
+
"measuring": "measuring",
|
|
322
|
+
"confirmed": "confirmed ✓",
|
|
323
|
+
"reverted": "reverted",
|
|
324
|
+
}.get(status, status)
|
|
325
|
+
|
|
326
|
+
|
|
327
|
+
def cmd_fixes():
|
|
328
|
+
"""List all recorded fixes with current status."""
|
|
329
|
+
init_db()
|
|
330
|
+
conn = get_conn()
|
|
331
|
+
from fix_tracker import all_fixes_with_latest
|
|
332
|
+
fixes = all_fixes_with_latest(conn)
|
|
333
|
+
conn.close()
|
|
334
|
+
|
|
335
|
+
print()
|
|
336
|
+
if not fixes:
|
|
337
|
+
print(" Fix Tracker — no fixes recorded yet")
|
|
338
|
+
print()
|
|
339
|
+
print(" Start by recording one:")
|
|
340
|
+
print(" python3 cli.py fix add")
|
|
341
|
+
print()
|
|
342
|
+
return
|
|
343
|
+
|
|
344
|
+
print(f" Fix Tracker — {len(fixes)} fix{'es' if len(fixes) != 1 else ''} recorded")
|
|
345
|
+
print(f" {'─' * 60}")
|
|
346
|
+
now = int(time.time())
|
|
347
|
+
for f in fixes:
|
|
348
|
+
baseline = f.get("baseline") or {}
|
|
349
|
+
plan = baseline.get("plan_type", "max")
|
|
350
|
+
days_elapsed = max(int((now - (f["created_at"] or now)) / 86400), 0)
|
|
351
|
+
status_txt = _fmt_status_badge(f["status"])
|
|
352
|
+
print(f" #{f['id']:<3} {f['project']} · {f['waste_pattern']} · {f['title']}")
|
|
353
|
+
print(f" applied {datetime.fromtimestamp(f['created_at'], tz=timezone.utc).strftime('%b %d')} · "
|
|
354
|
+
f"status: {status_txt} · {days_elapsed}d elapsed")
|
|
355
|
+
|
|
356
|
+
latest = f.get("latest")
|
|
357
|
+
if f["status"] == "confirmed" and latest:
|
|
358
|
+
delta = latest.get("delta", {})
|
|
359
|
+
waste = delta.get("waste_events", {})
|
|
360
|
+
eff = delta.get("effective_window_pct", {})
|
|
361
|
+
cost = delta.get("avg_cost_per_session", {})
|
|
362
|
+
wb = waste.get("before", 0); wa = waste.get("after", 0); wp = waste.get("pct_change", 0)
|
|
363
|
+
if plan in ("max", "pro"):
|
|
364
|
+
eb = eff.get("before", 0); ea = eff.get("after", 0)
|
|
365
|
+
print(f" before: {wb} → after: {wa} ({wp:+.0f}%) · window: {eb}% → {ea}%")
|
|
366
|
+
else:
|
|
367
|
+
cb = cost.get("before", 0); ca = cost.get("after", 0); cp = cost.get("pct_change", 0)
|
|
368
|
+
print(f" before: {wb} → after: {wa} ({wp:+.0f}%) · cost/sess: ${cb:.2f} → ${ca:.2f} ({cp:+.0f}%)")
|
|
369
|
+
elif f["status"] in ("applied", "measuring"):
|
|
370
|
+
waste_b = (baseline.get("waste_events") or {}).get("total", 0)
|
|
371
|
+
print(f" baseline: {waste_b} waste events · run: python3 cli.py measure {f['id']}")
|
|
372
|
+
elif f["status"] == "reverted":
|
|
373
|
+
print(f" reverted")
|
|
374
|
+
print()
|
|
375
|
+
print(f" {'─' * 60}")
|
|
376
|
+
print()
|
|
377
|
+
conn.close() if False else None # conn already closed above; kept for pyflake silence
|
|
378
|
+
|
|
379
|
+
|
|
380
|
+
def cmd_fix_add():
|
|
381
|
+
"""Interactive baseline + fix recorder."""
|
|
382
|
+
init_db()
|
|
383
|
+
conn = get_conn()
|
|
384
|
+
from fix_tracker import record_fix, WASTE_PATTERNS, FIX_TYPES, WASTE_PATTERN_LABELS
|
|
385
|
+
|
|
386
|
+
# Discover candidate projects from the live DB
|
|
387
|
+
project_rows = conn.execute(
|
|
388
|
+
"SELECT project, COUNT(*) AS n FROM sessions GROUP BY project ORDER BY n DESC"
|
|
389
|
+
).fetchall()
|
|
390
|
+
projects = [r[0] for r in project_rows if r[0]]
|
|
391
|
+
|
|
392
|
+
print()
|
|
393
|
+
print(" Record a fix — Claudash Fix Tracker")
|
|
394
|
+
print(f" {'─' * 50}")
|
|
395
|
+
if projects:
|
|
396
|
+
print(" Projects in the DB:")
|
|
397
|
+
for i, p in enumerate(projects, 1):
|
|
398
|
+
print(f" {i}. {p}")
|
|
399
|
+
print()
|
|
400
|
+
project = input(" Project name: ").strip()
|
|
401
|
+
if not project:
|
|
402
|
+
print(" Cancelled.")
|
|
403
|
+
return
|
|
404
|
+
|
|
405
|
+
print()
|
|
406
|
+
print(" What waste pattern did you fix?")
|
|
407
|
+
for i, p in enumerate(WASTE_PATTERNS, 1):
|
|
408
|
+
print(f" {i}. {p} — {WASTE_PATTERN_LABELS.get(p, '')}")
|
|
409
|
+
sel = input(" Number or name: ").strip()
|
|
410
|
+
if sel.isdigit():
|
|
411
|
+
idx = int(sel) - 1
|
|
412
|
+
pattern = WASTE_PATTERNS[idx] if 0 <= idx < len(WASTE_PATTERNS) else "custom"
|
|
413
|
+
else:
|
|
414
|
+
pattern = sel if sel in WASTE_PATTERNS else "custom"
|
|
415
|
+
|
|
416
|
+
title = input(" Fix title (one line): ").strip()
|
|
417
|
+
if not title:
|
|
418
|
+
title = f"{pattern} fix"
|
|
419
|
+
|
|
420
|
+
print()
|
|
421
|
+
print(" Fix type:")
|
|
422
|
+
for i, t in enumerate(FIX_TYPES, 1):
|
|
423
|
+
print(f" {i}. {t}")
|
|
424
|
+
sel = input(" Number or name: ").strip()
|
|
425
|
+
if sel.isdigit():
|
|
426
|
+
idx = int(sel) - 1
|
|
427
|
+
fix_type = FIX_TYPES[idx] if 0 <= idx < len(FIX_TYPES) else "other"
|
|
428
|
+
else:
|
|
429
|
+
fix_type = sel if sel in FIX_TYPES else "other"
|
|
430
|
+
|
|
431
|
+
print()
|
|
432
|
+
print(" What exactly changed? (paste your fix, end with a blank line)")
|
|
433
|
+
lines = []
|
|
434
|
+
while True:
|
|
435
|
+
try:
|
|
436
|
+
line = input(" ")
|
|
437
|
+
except EOFError:
|
|
438
|
+
break
|
|
439
|
+
if not line and (not lines or lines[-1] == ""):
|
|
440
|
+
break
|
|
441
|
+
lines.append(line)
|
|
442
|
+
fix_detail = "\n".join(lines).strip()
|
|
443
|
+
|
|
444
|
+
print()
|
|
445
|
+
print(f" Capturing baseline for {project}…")
|
|
446
|
+
fix_id, baseline = record_fix(conn, project, pattern, title, fix_type, fix_detail)
|
|
447
|
+
conn.close()
|
|
448
|
+
|
|
449
|
+
waste_total = (baseline.get("waste_events") or {}).get("total", 0)
|
|
450
|
+
eff = baseline.get("effective_window_pct", 0)
|
|
451
|
+
avg_cost = baseline.get("avg_cost_per_session", 0)
|
|
452
|
+
print(f" ✓ Baseline: {waste_total} waste events, "
|
|
453
|
+
f"{eff:.0f}% window efficiency, ${avg_cost:.2f}/session API-equiv")
|
|
454
|
+
print(f" ✓ Fix #{fix_id} recorded.")
|
|
455
|
+
print()
|
|
456
|
+
print(" Next steps:")
|
|
457
|
+
print(" 1. Apply your fix to the project now.")
|
|
458
|
+
print(" 2. Use Claude Code normally for 7+ days.")
|
|
459
|
+
print(f" 3. Run: python3 cli.py measure {fix_id}")
|
|
460
|
+
print()
|
|
461
|
+
|
|
462
|
+
|
|
463
|
+
def cmd_measure():
|
|
464
|
+
"""Capture current metrics for a fix and print a plan-aware verdict."""
|
|
465
|
+
if len(sys.argv) < 3 or not sys.argv[2].isdigit():
|
|
466
|
+
print("Usage: python3 cli.py measure <fix_id>")
|
|
467
|
+
sys.exit(1)
|
|
468
|
+
fix_id = int(sys.argv[2])
|
|
469
|
+
init_db()
|
|
470
|
+
conn = get_conn()
|
|
471
|
+
from fix_tracker import measure_fix, build_share_card
|
|
472
|
+
from db import get_fix, get_latest_fix_measurement
|
|
473
|
+
delta, verdict, metrics = measure_fix(conn, fix_id)
|
|
474
|
+
if delta is None:
|
|
475
|
+
print(f"Fix #{fix_id} not found.")
|
|
476
|
+
conn.close()
|
|
477
|
+
sys.exit(1)
|
|
478
|
+
|
|
479
|
+
fix = get_fix(conn, fix_id)
|
|
480
|
+
plan = delta.get("plan_type", "max")
|
|
481
|
+
plan_cost = delta.get("plan_cost_usd", 0)
|
|
482
|
+
project = fix["project"]
|
|
483
|
+
pattern = fix["waste_pattern"]
|
|
484
|
+
title = fix["title"]
|
|
485
|
+
|
|
486
|
+
waste = delta.get("waste_events", {})
|
|
487
|
+
flounder = delta.get("floundering", {})
|
|
488
|
+
reads = delta.get("repeated_reads", {})
|
|
489
|
+
eff = delta.get("effective_window_pct", {})
|
|
490
|
+
fpw = delta.get("files_per_window", {})
|
|
491
|
+
turns = delta.get("avg_turns_per_session", {})
|
|
492
|
+
cps = delta.get("avg_cost_per_session", {})
|
|
493
|
+
total_cost = delta.get("cost_usd", {})
|
|
494
|
+
days = delta.get("days_elapsed", 0)
|
|
495
|
+
sessions_since = delta.get("sessions_since_fix", 0)
|
|
496
|
+
api_eq = delta.get("api_equivalent_savings_monthly", 0)
|
|
497
|
+
multiplier = delta.get("improvement_multiplier", 1.0)
|
|
498
|
+
|
|
499
|
+
def row(label, before, after, change, sign="pct", ok=None):
|
|
500
|
+
change_str = f"{change:+.0f}%"
|
|
501
|
+
marker = ""
|
|
502
|
+
if ok is not None:
|
|
503
|
+
marker = " ✓" if ok else " ✗"
|
|
504
|
+
if sign == "money":
|
|
505
|
+
return f" {label:<22} ${before:<10.2f} ${after:<10.2f} {change_str}{marker}"
|
|
506
|
+
if sign == "pct":
|
|
507
|
+
return f" {label:<22} {before!s:<11} {after!s:<11} {change_str}{marker}"
|
|
508
|
+
return f" {label:<22} {before!s:<11} {after!s:<11} {change_str}{marker}"
|
|
509
|
+
|
|
510
|
+
print()
|
|
511
|
+
print(f" Measuring Fix #{fix_id}: {project} · {pattern} — {title}")
|
|
512
|
+
print(f" {'─' * 60}")
|
|
513
|
+
print(f" {'Metric':<22} {'Before':<11} {'After':<11} {'Change'}")
|
|
514
|
+
print(f" {'─' * 60}")
|
|
515
|
+
print(row("Floundering events", flounder.get("before", 0), flounder.get("after", 0), flounder.get("pct_change", 0), ok=flounder.get("pct_change", 0) < 0))
|
|
516
|
+
print(row("Repeated reads", reads.get("before", 0), reads.get("after", 0), reads.get("pct_change", 0), ok=reads.get("pct_change", 0) < 0))
|
|
517
|
+
print(row("Waste total", waste.get("before", 0), waste.get("after", 0), waste.get("pct_change", 0), ok=waste.get("pct_change", 0) < 0))
|
|
518
|
+
|
|
519
|
+
if plan in ("max", "pro"):
|
|
520
|
+
print(row("Window efficiency", f"{eff.get('before', 0)}%", f"{eff.get('after', 0)}%",
|
|
521
|
+
eff.get("pct_change", 0), ok=eff.get("pct_change", 0) > 0))
|
|
522
|
+
print(row("Files per window", fpw.get("before", 0), fpw.get("after", 0),
|
|
523
|
+
fpw.get("pct_change", 0), ok=fpw.get("pct_change", 0) > 0))
|
|
524
|
+
print(row("Avg turns/session", turns.get("before", 0), turns.get("after", 0), turns.get("pct_change", 0)))
|
|
525
|
+
print(row("API-equiv cost/sess", cps.get("before", 0), cps.get("after", 0),
|
|
526
|
+
cps.get("pct_change", 0), sign="money"))
|
|
527
|
+
else:
|
|
528
|
+
print(row("Cost per session", cps.get("before", 0), cps.get("after", 0),
|
|
529
|
+
cps.get("pct_change", 0), sign="money", ok=cps.get("pct_change", 0) < 0))
|
|
530
|
+
print(row("Total cost (window)", total_cost.get("before", 0), total_cost.get("after", 0),
|
|
531
|
+
total_cost.get("pct_change", 0), sign="money", ok=total_cost.get("pct_change", 0) < 0))
|
|
532
|
+
|
|
533
|
+
print(f" {'─' * 60}")
|
|
534
|
+
verdict_upper = verdict.replace("_", " ").upper()
|
|
535
|
+
marker = "✓" if verdict == "improving" else ("✗" if verdict == "worsened" else "—")
|
|
536
|
+
print(f" Verdict: {verdict_upper} {marker} ({days} days, {sessions_since} sessions)")
|
|
537
|
+
print()
|
|
538
|
+
if plan in ("max", "pro"):
|
|
539
|
+
print(f" Same ${plan_cost:.0f}/mo {plan.upper()} plan — {multiplier}x more output per window.")
|
|
540
|
+
print(f" API-equivalent waste eliminated: ~${api_eq:.0f}/mo")
|
|
541
|
+
else:
|
|
542
|
+
print(f" Monthly savings: ~${api_eq:.0f}/mo")
|
|
543
|
+
print()
|
|
544
|
+
|
|
545
|
+
latest = get_latest_fix_measurement(conn, fix_id)
|
|
546
|
+
card = build_share_card(fix, latest)
|
|
547
|
+
conn.close()
|
|
548
|
+
print(" Share card:")
|
|
549
|
+
print()
|
|
550
|
+
for line in card.split("\n"):
|
|
551
|
+
print(" " + line)
|
|
552
|
+
print()
|
|
553
|
+
|
|
554
|
+
|
|
555
|
+
def cmd_mcp():
|
|
556
|
+
"""Print MCP server settings.json snippet + run a quick smoke test."""
|
|
557
|
+
import subprocess as _sp
|
|
558
|
+
mcp_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "mcp_server.py")
|
|
559
|
+
snippet = {
|
|
560
|
+
"mcpServers": {
|
|
561
|
+
"claudash": {
|
|
562
|
+
"command": "python3",
|
|
563
|
+
"args": [mcp_path],
|
|
564
|
+
}
|
|
565
|
+
}
|
|
566
|
+
}
|
|
567
|
+
print()
|
|
568
|
+
print(" Claudash MCP server")
|
|
569
|
+
print(f" {'-' * 50}")
|
|
570
|
+
print(" Add this to ~/.claude/settings.json (merge with any existing")
|
|
571
|
+
print(" `mcpServers` block — don't overwrite the whole file):")
|
|
572
|
+
print()
|
|
573
|
+
print(json.dumps(snippet, indent=2))
|
|
574
|
+
print()
|
|
575
|
+
print(" Running smoke test…")
|
|
576
|
+
print()
|
|
577
|
+
try:
|
|
578
|
+
result = _sp.run(["python3", mcp_path, "test"], capture_output=True, text=True, timeout=15)
|
|
579
|
+
print(" " + (result.stdout.strip() or "(no output)"))
|
|
580
|
+
if result.stderr.strip():
|
|
581
|
+
print(" stderr: " + result.stderr.strip())
|
|
582
|
+
if result.returncode != 0:
|
|
583
|
+
print(f" exit code: {result.returncode}")
|
|
584
|
+
except Exception as e:
|
|
585
|
+
print(f" FAILED to run mcp_server.py test: {e}")
|
|
586
|
+
print()
|
|
587
|
+
|
|
588
|
+
|
|
589
|
+
def _read_session_id_from_jsonl(filepath):
|
|
590
|
+
"""Return the first sessionId/session_id/uuid found in a JSONL file, or
|
|
591
|
+
None. Only reads until it finds one — cheap even for huge files."""
|
|
592
|
+
try:
|
|
593
|
+
with open(filepath, "r", errors="replace") as f:
|
|
594
|
+
for line in f:
|
|
595
|
+
line = line.strip()
|
|
596
|
+
if not line:
|
|
597
|
+
continue
|
|
598
|
+
try:
|
|
599
|
+
obj = json.loads(line)
|
|
600
|
+
except json.JSONDecodeError:
|
|
601
|
+
continue
|
|
602
|
+
sid = obj.get("sessionId") or obj.get("session_id") or obj.get("uuid")
|
|
603
|
+
if sid:
|
|
604
|
+
return sid
|
|
605
|
+
except OSError:
|
|
606
|
+
return None
|
|
607
|
+
return None
|
|
608
|
+
|
|
609
|
+
|
|
610
|
+
def cmd_scan_reprocess():
|
|
611
|
+
"""Re-tag every tracked session using the current PROJECT_MAP.
|
|
612
|
+
|
|
613
|
+
Steps:
|
|
614
|
+
1. Sync config.PROJECT_MAP → account_projects (so keyword edits land).
|
|
615
|
+
2. Walk scan_state.file_path (the authoritative list of scanned files).
|
|
616
|
+
3. For each file, read the first sessionId and resolve project/account
|
|
617
|
+
from the file's folder path.
|
|
618
|
+
4. UPDATE sessions SET source_path, project, account WHERE session_id.
|
|
619
|
+
5. Print a before/after distribution diff.
|
|
620
|
+
"""
|
|
621
|
+
import json as _json # local alias so we don't shadow module-level imports
|
|
622
|
+
from scanner import resolve_project, _parse_subagent_info
|
|
623
|
+
init_db()
|
|
624
|
+
conn = get_conn()
|
|
625
|
+
|
|
626
|
+
# Snapshot before
|
|
627
|
+
before = dict(conn.execute(
|
|
628
|
+
"SELECT project, COUNT(*) FROM sessions GROUP BY project"
|
|
629
|
+
).fetchall())
|
|
630
|
+
total_before = sum(before.values())
|
|
631
|
+
|
|
632
|
+
# Step 1 — sync keyword map from config.py
|
|
633
|
+
sync_project_map_from_config(conn)
|
|
634
|
+
project_map = get_project_map_config(conn)
|
|
635
|
+
|
|
636
|
+
# Step 2 — list all tracked JSONL files
|
|
637
|
+
files = [r[0] for r in conn.execute(
|
|
638
|
+
"SELECT file_path FROM scan_state ORDER BY file_path"
|
|
639
|
+
).fetchall()]
|
|
640
|
+
|
|
641
|
+
updated = 0
|
|
642
|
+
skipped_missing = 0
|
|
643
|
+
skipped_no_sid = 0
|
|
644
|
+
resolved_counts = {}
|
|
645
|
+
|
|
646
|
+
for filepath in files:
|
|
647
|
+
if not os.path.isfile(filepath):
|
|
648
|
+
skipped_missing += 1
|
|
649
|
+
continue
|
|
650
|
+
sid = _read_session_id_from_jsonl(filepath)
|
|
651
|
+
if not sid:
|
|
652
|
+
skipped_no_sid += 1
|
|
653
|
+
continue
|
|
654
|
+
# Subagent files inherit the parent's project tag — resolve against
|
|
655
|
+
# the parent project folder (grandparent of `subagents/`) when this
|
|
656
|
+
# is a subagent file.
|
|
657
|
+
is_subagent, parent_sid = _parse_subagent_info(filepath)
|
|
658
|
+
if is_subagent:
|
|
659
|
+
parent_project_folder = filepath.split("/subagents/")[0]
|
|
660
|
+
parent_project_folder = os.path.dirname(parent_project_folder)
|
|
661
|
+
folder = parent_project_folder or os.path.dirname(filepath)
|
|
662
|
+
else:
|
|
663
|
+
folder = os.path.dirname(filepath)
|
|
664
|
+
project, account = resolve_project(folder, project_map)
|
|
665
|
+
cur = conn.execute(
|
|
666
|
+
"UPDATE sessions SET source_path = ?, project = ?, account = ?, "
|
|
667
|
+
" is_subagent = ?, parent_session_id = ? "
|
|
668
|
+
"WHERE session_id = ?",
|
|
669
|
+
(filepath, project, account, is_subagent, parent_sid, sid)
|
|
670
|
+
)
|
|
671
|
+
updated += cur.rowcount
|
|
672
|
+
resolved_counts[project] = resolved_counts.get(project, 0) + 1
|
|
673
|
+
|
|
674
|
+
conn.commit()
|
|
675
|
+
|
|
676
|
+
# Snapshot after
|
|
677
|
+
after = dict(conn.execute(
|
|
678
|
+
"SELECT project, COUNT(*) FROM sessions GROUP BY project"
|
|
679
|
+
).fetchall())
|
|
680
|
+
total_after = sum(after.values())
|
|
681
|
+
|
|
682
|
+
print()
|
|
683
|
+
print(f" Reprocessed: {updated:,} session rows across {len(files):,} files")
|
|
684
|
+
print(f" Files skipped (missing on disk): {skipped_missing}")
|
|
685
|
+
print(f" Files skipped (no sessionId): {skipped_no_sid}")
|
|
686
|
+
print()
|
|
687
|
+
print(f" {'Project':<22} {'Before':>8} {'After':>8} {'Delta':>8}")
|
|
688
|
+
print(f" {'-' * 50}")
|
|
689
|
+
all_projs = sorted(set(before.keys()) | set(after.keys()),
|
|
690
|
+
key=lambda p: -(after.get(p, 0) or 0))
|
|
691
|
+
for p in all_projs:
|
|
692
|
+
b = before.get(p, 0)
|
|
693
|
+
a = after.get(p, 0)
|
|
694
|
+
d = a - b
|
|
695
|
+
delta = f"{d:+,}" if d else "—"
|
|
696
|
+
print(f" {str(p):<22} {b:>8,} {a:>8,} {delta:>8}")
|
|
697
|
+
print(f" {'-' * 50}")
|
|
698
|
+
print(f" {'TOTAL':<22} {total_before:>8,} {total_after:>8,}")
|
|
699
|
+
print()
|
|
700
|
+
|
|
701
|
+
conn.close()
|
|
702
|
+
|
|
703
|
+
|
|
704
|
+
def cmd_show_other():
|
|
705
|
+
"""List every source path currently tagged 'Other' so the user can see
|
|
706
|
+
what keywords need adding to PROJECT_MAP."""
|
|
707
|
+
init_db()
|
|
708
|
+
conn = get_conn()
|
|
709
|
+
rows = conn.execute(
|
|
710
|
+
"SELECT source_path, COUNT(*) AS n, COUNT(DISTINCT session_id) AS sessions "
|
|
711
|
+
"FROM sessions WHERE project = 'Other' "
|
|
712
|
+
"GROUP BY source_path ORDER BY n DESC"
|
|
713
|
+
).fetchall()
|
|
714
|
+
conn.close()
|
|
715
|
+
|
|
716
|
+
if not rows:
|
|
717
|
+
print("\n No sessions are tagged 'Other'. Every session has a project.\n")
|
|
718
|
+
return
|
|
719
|
+
|
|
720
|
+
print(f"\n Sessions tagged 'Other' — {len(rows)} distinct source paths:")
|
|
721
|
+
print(f" {'-' * 72}")
|
|
722
|
+
for r in rows:
|
|
723
|
+
path = r[0] or "(empty)"
|
|
724
|
+
n = r[1]
|
|
725
|
+
s = r[2]
|
|
726
|
+
# Truncate long paths for readability
|
|
727
|
+
display = path if len(path) <= 60 else "…" + path[-59:]
|
|
728
|
+
print(f" {display:<62} {n:>5} rows / {s:>3} sessions")
|
|
729
|
+
print(f" {'-' * 72}")
|
|
730
|
+
print(" Add folder keywords to PROJECT_MAP in config.py, then run:")
|
|
731
|
+
print(" python3 cli.py scan --reprocess")
|
|
732
|
+
print()
|
|
733
|
+
|
|
734
|
+
|
|
735
|
+
def cmd_stats():
|
|
736
|
+
init_db()
|
|
737
|
+
scan_all()
|
|
738
|
+
conn = get_conn()
|
|
739
|
+
ACCOUNTS = get_accounts_config(conn)
|
|
740
|
+
|
|
741
|
+
print()
|
|
742
|
+
for acct_key, acct_info in ACCOUNTS.items():
|
|
743
|
+
am = account_metrics(conn, acct_key)
|
|
744
|
+
projs = project_metrics(conn, acct_key)
|
|
745
|
+
|
|
746
|
+
label = acct_info["label"]
|
|
747
|
+
plan = acct_info.get("plan", "max").upper()
|
|
748
|
+
cost = acct_info.get("monthly_cost_usd", 0)
|
|
749
|
+
roi = am.get("subscription_roi", 0)
|
|
750
|
+
|
|
751
|
+
print(f" {label} ({plan} ${cost}/mo) — ROI: {roi}x")
|
|
752
|
+
print(f" {'Project':<15} {'Tokens':>12} {'Cost 30d':>10} {'Cache%':>8} {'Model':<14} {'Sessions':>8}")
|
|
753
|
+
print(f" {'-' * 73}")
|
|
754
|
+
|
|
755
|
+
for p in projs:
|
|
756
|
+
model_short = p["dominant_model"].replace("claude-", "")
|
|
757
|
+
print(
|
|
758
|
+
f" {p['name']:<15} {p['total_tokens']:>12,} "
|
|
759
|
+
f"${p['cost_usd_30d']:>8.2f} {p['cache_hit_rate']:>7.1f}% "
|
|
760
|
+
f"{model_short:<14} {p['session_count']:>8}"
|
|
761
|
+
)
|
|
762
|
+
|
|
763
|
+
print(f" {'-' * 73}")
|
|
764
|
+
print(f" {'TOTAL':<15} {'':>12} ${am['total_cost_30d']:>8.2f} {am['cache_hit_rate']:>7.1f}%")
|
|
765
|
+
print(f" Sessions today: {am['sessions_today']} | Cache ROI: ${am['cache_roi_usd']:.2f}")
|
|
766
|
+
print()
|
|
767
|
+
|
|
768
|
+
# Efficiency score (across all accounts)
|
|
769
|
+
try:
|
|
770
|
+
eff = compute_efficiency_score(conn)
|
|
771
|
+
print(f" Efficiency Score: {eff['score']}/100 (Grade {eff['grade']})")
|
|
772
|
+
print(f" Top improvement: {eff['top_improvement']}")
|
|
773
|
+
print()
|
|
774
|
+
except Exception:
|
|
775
|
+
pass
|
|
776
|
+
|
|
777
|
+
print(" Run `python3 cli.py keys` to retrieve the dashboard_key (never printed here).")
|
|
778
|
+
print()
|
|
779
|
+
|
|
780
|
+
conn.close()
|
|
781
|
+
|
|
782
|
+
|
|
783
|
+
def cmd_insights():
|
|
784
|
+
init_db()
|
|
785
|
+
conn = get_conn()
|
|
786
|
+
generate_insights(conn)
|
|
787
|
+
|
|
788
|
+
insights = get_insights(conn, dismissed=0)
|
|
789
|
+
conn.close()
|
|
790
|
+
|
|
791
|
+
if not insights:
|
|
792
|
+
print("No active insights.")
|
|
793
|
+
return
|
|
794
|
+
|
|
795
|
+
print(f"\n Active Insights ({len(insights)})")
|
|
796
|
+
print(f" {'=' * 70}")
|
|
797
|
+
|
|
798
|
+
colors = {
|
|
799
|
+
"model_waste": "AMBER", "cache_spike": "RED", "compaction_gap": "AMBER",
|
|
800
|
+
"cost_target": "GREEN", "window_risk": "RED", "roi_milestone": "GREEN",
|
|
801
|
+
"heavy_day": "BLUE", "best_window": "BLUE",
|
|
802
|
+
}
|
|
803
|
+
|
|
804
|
+
for i in insights:
|
|
805
|
+
itype = i["insight_type"]
|
|
806
|
+
color = colors.get(itype, "INFO")
|
|
807
|
+
dt = datetime.fromtimestamp(i["created_at"], tz=timezone.utc).strftime("%Y-%m-%d %H:%M")
|
|
808
|
+
print(f" [{color:>5}] {dt} {i['message']}")
|
|
809
|
+
|
|
810
|
+
print()
|
|
811
|
+
|
|
812
|
+
|
|
813
|
+
def cmd_window():
|
|
814
|
+
init_db()
|
|
815
|
+
conn = get_conn()
|
|
816
|
+
ACCOUNTS = get_accounts_config(conn)
|
|
817
|
+
|
|
818
|
+
print()
|
|
819
|
+
for acct_key, acct_info in ACCOUNTS.items():
|
|
820
|
+
wm = window_intelligence(conn, acct_key)
|
|
821
|
+
label = acct_info["label"]
|
|
822
|
+
limit = acct_info.get("window_token_limit", 1_000_000)
|
|
823
|
+
|
|
824
|
+
ws = datetime.fromtimestamp(wm["window_start"], tz=timezone.utc).strftime("%H:%M UTC")
|
|
825
|
+
we = datetime.fromtimestamp(wm["window_end"], tz=timezone.utc).strftime("%H:%M UTC")
|
|
826
|
+
|
|
827
|
+
pct = wm["window_pct"]
|
|
828
|
+
status = "OK"
|
|
829
|
+
if pct > 80:
|
|
830
|
+
status = "DANGER"
|
|
831
|
+
elif pct > 50:
|
|
832
|
+
status = "CAUTION"
|
|
833
|
+
|
|
834
|
+
print(f" {label}")
|
|
835
|
+
print(f" Window: {ws} - {we}")
|
|
836
|
+
print(f" Used: {wm['total_tokens']:,} / {limit:,} ({pct:.1f}%) [{status}]")
|
|
837
|
+
|
|
838
|
+
if wm["minutes_to_limit"]:
|
|
839
|
+
print(f" Predicted exhaust: ~{wm['minutes_to_limit']} min")
|
|
840
|
+
if wm.get("burn_per_minute", 0) > 0:
|
|
841
|
+
print(f" Burn rate: {int(wm['burn_per_minute']):,} tok/min")
|
|
842
|
+
|
|
843
|
+
safe = "Yes" if wm.get("safe_for_heavy_session") else "No"
|
|
844
|
+
print(f" Safe for heavy session: {safe}")
|
|
845
|
+
print(f" Best start hour (UTC): {wm.get('best_start_hour', '?')}:00")
|
|
846
|
+
|
|
847
|
+
history = wm.get("window_history", [])
|
|
848
|
+
if history:
|
|
849
|
+
avg = sum(w.get("pct_used", 0) for w in history) / len(history)
|
|
850
|
+
print(f" Last {len(history)} windows avg: {avg:.1f}%")
|
|
851
|
+
|
|
852
|
+
print()
|
|
853
|
+
|
|
854
|
+
conn.close()
|
|
855
|
+
|
|
856
|
+
|
|
857
|
+
def cmd_export():
|
|
858
|
+
init_db()
|
|
859
|
+
conn = get_conn()
|
|
860
|
+
since = int((datetime.now(timezone.utc) - timedelta(days=30)).timestamp())
|
|
861
|
+
rows = conn.execute(
|
|
862
|
+
"SELECT * FROM sessions WHERE timestamp >= ? ORDER BY timestamp", (since,)
|
|
863
|
+
).fetchall()
|
|
864
|
+
conn.close()
|
|
865
|
+
|
|
866
|
+
outpath = os.path.join(os.path.dirname(os.path.abspath(__file__)), "usage_export.csv")
|
|
867
|
+
with open(outpath, "w", newline="") as f:
|
|
868
|
+
writer = csv.writer(f)
|
|
869
|
+
writer.writerow([
|
|
870
|
+
"session_id", "timestamp", "datetime", "project", "account", "model",
|
|
871
|
+
"input_tokens", "output_tokens", "cache_read_tokens", "cache_creation_tokens",
|
|
872
|
+
"cost_usd", "source_path", "compaction_detected",
|
|
873
|
+
])
|
|
874
|
+
for r in rows:
|
|
875
|
+
dt = datetime.fromtimestamp(r["timestamp"], tz=timezone.utc).strftime("%Y-%m-%d %H:%M:%S")
|
|
876
|
+
writer.writerow([
|
|
877
|
+
r["session_id"], r["timestamp"], dt, r["project"], r["account"], r["model"],
|
|
878
|
+
r["input_tokens"], r["output_tokens"], r["cache_read_tokens"], r["cache_creation_tokens"],
|
|
879
|
+
r["cost_usd"], r["source_path"], r["compaction_detected"],
|
|
880
|
+
])
|
|
881
|
+
|
|
882
|
+
print(f"Exported {len(rows)} rows to {outpath}")
|
|
883
|
+
|
|
884
|
+
|
|
885
|
+
def cmd_keys():
|
|
886
|
+
"""Print dashboard_key and sync_token. Sensitive — do not paste into
|
|
887
|
+
screenshots, chat transcripts, or shared terminals."""
|
|
888
|
+
init_db()
|
|
889
|
+
|
|
890
|
+
if len(sys.argv) >= 3 and sys.argv[2] == "--rotate":
|
|
891
|
+
import secrets
|
|
892
|
+
new_key = secrets.token_hex(32)
|
|
893
|
+
conn = get_conn()
|
|
894
|
+
set_setting(conn, "dashboard_key", new_key)
|
|
895
|
+
conn.close()
|
|
896
|
+
print()
|
|
897
|
+
print(f" New dashboard_key: {new_key}")
|
|
898
|
+
print(f" Update this in your browser localStorage and any scripts.")
|
|
899
|
+
print()
|
|
900
|
+
return
|
|
901
|
+
|
|
902
|
+
conn = get_conn()
|
|
903
|
+
dk = get_setting(conn, "dashboard_key") or "(not set)"
|
|
904
|
+
st = get_setting(conn, "sync_token") or "(not set)"
|
|
905
|
+
conn.close()
|
|
906
|
+
print()
|
|
907
|
+
print(" These values grant full write access to your dashboard.")
|
|
908
|
+
print(" Keep them private. Do not share, screenshot, or commit them.")
|
|
909
|
+
print()
|
|
910
|
+
print(f" dashboard_key : {dk}")
|
|
911
|
+
print(f" → paste into the browser prompt when an admin button returns 401")
|
|
912
|
+
print()
|
|
913
|
+
print(f" sync_token : {st}")
|
|
914
|
+
print(f" → paste into tools/mac-sync.py SYNC_TOKEN variable")
|
|
915
|
+
print()
|
|
916
|
+
|
|
917
|
+
|
|
918
|
+
def cmd_claude_ai():
|
|
919
|
+
"""Show claude.ai browser tracking status for all accounts."""
|
|
920
|
+
init_db()
|
|
921
|
+
|
|
922
|
+
# Handle --sync-token: print ONLY the raw token, nothing else
|
|
923
|
+
if len(sys.argv) >= 3 and sys.argv[2] == "--sync-token":
|
|
924
|
+
conn = get_conn()
|
|
925
|
+
token = get_setting(conn, "sync_token")
|
|
926
|
+
conn.close()
|
|
927
|
+
print(token)
|
|
928
|
+
return
|
|
929
|
+
|
|
930
|
+
conn = get_conn()
|
|
931
|
+
accounts = get_claude_ai_accounts_all(conn)
|
|
932
|
+
|
|
933
|
+
if not accounts:
|
|
934
|
+
print("No claude.ai accounts configured.")
|
|
935
|
+
conn.close()
|
|
936
|
+
return
|
|
937
|
+
|
|
938
|
+
print()
|
|
939
|
+
for a in accounts:
|
|
940
|
+
aid = a["account_id"]
|
|
941
|
+
label = a.get("label", aid)
|
|
942
|
+
status = a.get("status", "unconfigured")
|
|
943
|
+
plan = a.get("plan", "max")
|
|
944
|
+
last_polled = a.get("last_polled")
|
|
945
|
+
|
|
946
|
+
poll_ago = ""
|
|
947
|
+
if last_polled:
|
|
948
|
+
diff = int(time.time()) - last_polled
|
|
949
|
+
if diff < 60:
|
|
950
|
+
poll_ago = f"{diff}s ago"
|
|
951
|
+
elif diff < 3600:
|
|
952
|
+
poll_ago = f"{diff // 60}m ago"
|
|
953
|
+
else:
|
|
954
|
+
poll_ago = f"{diff // 3600}h ago"
|
|
955
|
+
else:
|
|
956
|
+
poll_ago = "never"
|
|
957
|
+
|
|
958
|
+
snap = get_latest_claude_ai_snapshot(conn, aid)
|
|
959
|
+
|
|
960
|
+
if status == "unconfigured":
|
|
961
|
+
print(f" {label}: unconfigured")
|
|
962
|
+
elif status == "expired":
|
|
963
|
+
print(f" {label}: SESSION EXPIRED | last polled {poll_ago}")
|
|
964
|
+
elif status == "active" and snap:
|
|
965
|
+
if plan == "pro" and snap.get("messages_limit", 0) > 0:
|
|
966
|
+
print(f" {label}: {snap['messages_used']}/{snap['messages_limit']} messages | last polled {poll_ago} | ACTIVE")
|
|
967
|
+
else:
|
|
968
|
+
print(f" {label}: {snap.get('pct_used', 0):.1f}% window used | last polled {poll_ago} | ACTIVE")
|
|
969
|
+
else:
|
|
970
|
+
err = a.get("last_error", "unknown")
|
|
971
|
+
print(f" {label}: {status} | last polled {poll_ago} | {err}")
|
|
972
|
+
|
|
973
|
+
# Handle --setup flag
|
|
974
|
+
if len(sys.argv) >= 4 and sys.argv[2] == "--setup":
|
|
975
|
+
target_id = sys.argv[3]
|
|
976
|
+
print(f"\n Setting up claude.ai tracking for '{target_id}'...")
|
|
977
|
+
session_key = input(" Paste session key (sk-ant-sid01-...): ").strip()
|
|
978
|
+
if not session_key:
|
|
979
|
+
print(" Cancelled — no session key provided.")
|
|
980
|
+
else:
|
|
981
|
+
result = tracker_setup_account(target_id, session_key)
|
|
982
|
+
if result["success"]:
|
|
983
|
+
print(f" Connected: {result['label']}, {result['pct_used']:.1f}% window used")
|
|
984
|
+
else:
|
|
985
|
+
print(f" Error: {result['error']}")
|
|
986
|
+
|
|
987
|
+
print()
|
|
988
|
+
conn.close()
|
|
989
|
+
|
|
990
|
+
|
|
991
|
+
def main():
|
|
992
|
+
if len(sys.argv) < 2 or sys.argv[1] in ("--help", "-h", "help"):
|
|
993
|
+
print(HELP_TEXT.format(vps_ip=VPS_IP))
|
|
994
|
+
sys.exit(0)
|
|
995
|
+
|
|
996
|
+
cmd = sys.argv[1].lower()
|
|
997
|
+
|
|
998
|
+
# Two-word commands: `fix add`
|
|
999
|
+
if cmd == "fix":
|
|
1000
|
+
sub = sys.argv[2] if len(sys.argv) >= 3 else ""
|
|
1001
|
+
if sub == "add":
|
|
1002
|
+
cmd_fix_add()
|
|
1003
|
+
return
|
|
1004
|
+
print("Usage: python3 cli.py fix add")
|
|
1005
|
+
sys.exit(1)
|
|
1006
|
+
|
|
1007
|
+
commands = {
|
|
1008
|
+
"dashboard": cmd_dashboard,
|
|
1009
|
+
"init": cmd_init,
|
|
1010
|
+
"scan": cmd_scan,
|
|
1011
|
+
"show-other": cmd_show_other,
|
|
1012
|
+
"stats": cmd_stats,
|
|
1013
|
+
"insights": cmd_insights,
|
|
1014
|
+
"window": cmd_window,
|
|
1015
|
+
"export": cmd_export,
|
|
1016
|
+
"waste": cmd_waste,
|
|
1017
|
+
"fixes": cmd_fixes,
|
|
1018
|
+
"measure": cmd_measure,
|
|
1019
|
+
"mcp": cmd_mcp,
|
|
1020
|
+
"keys": cmd_keys,
|
|
1021
|
+
"claude-ai": cmd_claude_ai,
|
|
1022
|
+
}
|
|
1023
|
+
|
|
1024
|
+
handler = commands.get(cmd)
|
|
1025
|
+
if handler:
|
|
1026
|
+
handler()
|
|
1027
|
+
else:
|
|
1028
|
+
print(f"Unknown command: {cmd}")
|
|
1029
|
+
print(HELP_TEXT.format(vps_ip=VPS_IP))
|
|
1030
|
+
sys.exit(1)
|
|
1031
|
+
|
|
1032
|
+
|
|
1033
|
+
if __name__ == "__main__":
|
|
1034
|
+
main()
|