claude-code-tracker 1.0.0 → 1.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +10 -0
- package/install.sh +23 -0
- package/package.json +11 -4
- package/src/backfill.py +143 -0
- package/src/cost-summary.py +33 -8
- package/src/generate-charts.py +295 -57
- package/src/patch-durations.py +76 -0
- package/src/stop-hook.sh +27 -2
package/README.md
CHANGED
|
@@ -45,6 +45,16 @@ cd claude-code-tracker
|
|
|
45
45
|
|
|
46
46
|
Restart Claude Code after any install method.
|
|
47
47
|
|
|
48
|
+
### Backfilling historical sessions
|
|
49
|
+
|
|
50
|
+
The installer automatically backfills all pre-existing sessions for the current project. If you run `install.sh` from inside a git repo, any transcripts that exist in `~/.claude/projects/` for that project will be parsed and added to `tokens.json` with their original dates. Re-running install won't create duplicates.
|
|
51
|
+
|
|
52
|
+
To backfill manually (e.g. for a different project):
|
|
53
|
+
|
|
54
|
+
```bash
|
|
55
|
+
python3 ~/.claude/tracking/backfill.py /path/to/your/project
|
|
56
|
+
```
|
|
57
|
+
|
|
48
58
|
---
|
|
49
59
|
|
|
50
60
|
## What gets created
|
package/install.sh
CHANGED
|
@@ -49,4 +49,27 @@ with open(settings_file, 'w') as f:
|
|
|
49
49
|
print("Hook registered in", settings_file)
|
|
50
50
|
PYEOF
|
|
51
51
|
|
|
52
|
+
# Patch ~/.claude/CLAUDE.md — add tracking instruction if not present
|
|
53
|
+
CLAUDE_MD="$HOME/.claude/CLAUDE.md"
|
|
54
|
+
MARKER="planning session ends without implementation"
|
|
55
|
+
if [ -f "$CLAUDE_MD" ] && grep -qF "$MARKER" "$CLAUDE_MD"; then
|
|
56
|
+
echo "CLAUDE.md tracking instruction already present."
|
|
57
|
+
else
|
|
58
|
+
cat >> "$CLAUDE_MD" <<'MDEOF'
|
|
59
|
+
- When a planning session ends without implementation (plan rejected, approach changed, or pure research), still write a tracking entry — mark it as architecture category and note what was decided against and why.
|
|
60
|
+
MDEOF
|
|
61
|
+
echo "Tracking instruction added to $CLAUDE_MD"
|
|
62
|
+
fi
|
|
63
|
+
|
|
64
|
+
# Backfill historical sessions for the current project
|
|
65
|
+
PROJECT_ROOT="$PWD"
|
|
66
|
+
while [[ "$PROJECT_ROOT" != "/" ]]; do
|
|
67
|
+
[[ -d "$PROJECT_ROOT/.git" ]] && break
|
|
68
|
+
PROJECT_ROOT="$(dirname "$PROJECT_ROOT")"
|
|
69
|
+
done
|
|
70
|
+
if [[ "$PROJECT_ROOT" != "/" ]]; then
|
|
71
|
+
echo "Backfilling historical sessions..."
|
|
72
|
+
python3 "$INSTALL_DIR/backfill.py" "$PROJECT_ROOT"
|
|
73
|
+
fi
|
|
74
|
+
|
|
52
75
|
echo "claude-code-tracker installed. Restart Claude Code to activate."
|
package/package.json
CHANGED
|
@@ -1,18 +1,25 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "claude-code-tracker",
|
|
3
|
-
"version": "1.
|
|
3
|
+
"version": "1.1.0",
|
|
4
4
|
"description": "Automatic token, cost, and prompt tracking for Claude Code sessions",
|
|
5
|
-
"keywords": [
|
|
5
|
+
"keywords": [
|
|
6
|
+
"claude",
|
|
7
|
+
"claude-code",
|
|
8
|
+
"anthropic",
|
|
9
|
+
"tracking",
|
|
10
|
+
"cost",
|
|
11
|
+
"tokens"
|
|
12
|
+
],
|
|
6
13
|
"license": "MIT",
|
|
7
14
|
"repository": {
|
|
8
15
|
"type": "git",
|
|
9
|
-
"url": "https://github.com/kelsi-andrewss/claude-code-tracker"
|
|
16
|
+
"url": "git+https://github.com/kelsi-andrewss/claude-code-tracker.git"
|
|
10
17
|
},
|
|
11
18
|
"scripts": {
|
|
12
19
|
"postinstall": "bash ./install.sh"
|
|
13
20
|
},
|
|
14
21
|
"bin": {
|
|
15
|
-
"claude-tracker-cost": "
|
|
22
|
+
"claude-tracker-cost": "src/cost-summary.py"
|
|
16
23
|
},
|
|
17
24
|
"engines": {
|
|
18
25
|
"node": ">=14"
|
package/src/backfill.py
ADDED
|
@@ -0,0 +1,143 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
Backfill historical Claude Code sessions into tokens.json.
|
|
4
|
+
|
|
5
|
+
Usage:
|
|
6
|
+
python3 backfill.py <project_root>
|
|
7
|
+
|
|
8
|
+
Scans ~/.claude/projects/<slug>/*.jsonl for transcripts belonging to the
|
|
9
|
+
given project, parses token usage from each, and appends entries to
|
|
10
|
+
<project_root>/.claude/tracking/tokens.json. Sessions already present
|
|
11
|
+
are skipped.
|
|
12
|
+
"""
|
|
13
|
+
import sys, json, os, glob
|
|
14
|
+
from datetime import datetime
|
|
15
|
+
|
|
16
|
+
project_root = os.path.abspath(sys.argv[1])
|
|
17
|
+
project_name = os.path.basename(project_root)
|
|
18
|
+
tracking_dir = os.path.join(project_root, ".claude", "tracking")
|
|
19
|
+
tokens_file = os.path.join(tracking_dir, "tokens.json")
|
|
20
|
+
|
|
21
|
+
# Claude Code slugifies project paths: replace "/" with "-"
|
|
22
|
+
slug = project_root.replace("/", "-")
|
|
23
|
+
transcripts_dir = os.path.expanduser("~/.claude/projects/" + slug)
|
|
24
|
+
|
|
25
|
+
if not os.path.isdir(transcripts_dir):
|
|
26
|
+
print("No transcript directory found, nothing to backfill.")
|
|
27
|
+
sys.exit(0)
|
|
28
|
+
|
|
29
|
+
# Load existing data and build set of known session IDs
|
|
30
|
+
data = []
|
|
31
|
+
if os.path.exists(tokens_file):
|
|
32
|
+
try:
|
|
33
|
+
with open(tokens_file) as f:
|
|
34
|
+
data = json.load(f)
|
|
35
|
+
except Exception:
|
|
36
|
+
data = []
|
|
37
|
+
|
|
38
|
+
known_ids = {e.get("session_id") for e in data}
|
|
39
|
+
|
|
40
|
+
# Find all JSONL transcripts
|
|
41
|
+
jsonl_files = sorted(glob.glob(os.path.join(transcripts_dir, "*.jsonl")))
|
|
42
|
+
backfilled = 0
|
|
43
|
+
|
|
44
|
+
for jf in jsonl_files:
|
|
45
|
+
session_id = os.path.splitext(os.path.basename(jf))[0]
|
|
46
|
+
if session_id in known_ids:
|
|
47
|
+
continue
|
|
48
|
+
|
|
49
|
+
# Parse token usage — same logic as stop-hook.sh
|
|
50
|
+
inp = out = cache_create = cache_read = 0
|
|
51
|
+
model = "unknown"
|
|
52
|
+
first_ts = None
|
|
53
|
+
last_ts = None
|
|
54
|
+
|
|
55
|
+
try:
|
|
56
|
+
with open(jf) as f:
|
|
57
|
+
for line in f:
|
|
58
|
+
try:
|
|
59
|
+
obj = json.loads(line)
|
|
60
|
+
ts = obj.get("timestamp")
|
|
61
|
+
if ts:
|
|
62
|
+
if first_ts is None:
|
|
63
|
+
first_ts = ts
|
|
64
|
+
last_ts = ts
|
|
65
|
+
msg = obj.get("message", {})
|
|
66
|
+
if isinstance(msg, dict) and msg.get("role") == "assistant":
|
|
67
|
+
usage = msg.get("usage", {})
|
|
68
|
+
if usage:
|
|
69
|
+
inp += usage.get("input_tokens", 0)
|
|
70
|
+
out += usage.get("output_tokens", 0)
|
|
71
|
+
cache_create += usage.get("cache_creation_input_tokens", 0)
|
|
72
|
+
cache_read += usage.get("cache_read_input_tokens", 0)
|
|
73
|
+
m = msg.get("model", "")
|
|
74
|
+
if m:
|
|
75
|
+
model = m
|
|
76
|
+
except Exception:
|
|
77
|
+
pass
|
|
78
|
+
except Exception:
|
|
79
|
+
continue
|
|
80
|
+
|
|
81
|
+
total = inp + cache_create + cache_read + out
|
|
82
|
+
if total == 0:
|
|
83
|
+
continue
|
|
84
|
+
|
|
85
|
+
# Date from first timestamp in the transcript
|
|
86
|
+
session_date = None
|
|
87
|
+
if first_ts:
|
|
88
|
+
try:
|
|
89
|
+
session_date = datetime.fromisoformat(
|
|
90
|
+
first_ts.replace("Z", "+00:00")
|
|
91
|
+
).strftime("%Y-%m-%d")
|
|
92
|
+
except Exception:
|
|
93
|
+
pass
|
|
94
|
+
if not session_date:
|
|
95
|
+
session_date = datetime.fromtimestamp(os.path.getmtime(jf)).strftime("%Y-%m-%d")
|
|
96
|
+
|
|
97
|
+
# Duration
|
|
98
|
+
duration = 0
|
|
99
|
+
if first_ts and last_ts:
|
|
100
|
+
try:
|
|
101
|
+
t0 = datetime.fromisoformat(first_ts.replace("Z", "+00:00"))
|
|
102
|
+
t1 = datetime.fromisoformat(last_ts.replace("Z", "+00:00"))
|
|
103
|
+
duration = max(0, int((t1 - t0).total_seconds()))
|
|
104
|
+
except Exception:
|
|
105
|
+
pass
|
|
106
|
+
|
|
107
|
+
# Cost
|
|
108
|
+
if "opus" in model:
|
|
109
|
+
cost = inp * 15 / 1e6 + cache_create * 18.75 / 1e6 + cache_read * 1.50 / 1e6 + out * 75 / 1e6
|
|
110
|
+
else:
|
|
111
|
+
cost = inp * 3 / 1e6 + cache_create * 3.75 / 1e6 + cache_read * 0.30 / 1e6 + out * 15 / 1e6
|
|
112
|
+
|
|
113
|
+
entry = {
|
|
114
|
+
"date": session_date,
|
|
115
|
+
"project": project_name,
|
|
116
|
+
"session_id": session_id,
|
|
117
|
+
"input_tokens": inp,
|
|
118
|
+
"cache_creation_tokens": cache_create,
|
|
119
|
+
"cache_read_tokens": cache_read,
|
|
120
|
+
"output_tokens": out,
|
|
121
|
+
"total_tokens": total,
|
|
122
|
+
"estimated_cost_usd": round(cost, 4),
|
|
123
|
+
"model": model,
|
|
124
|
+
"duration_seconds": duration,
|
|
125
|
+
}
|
|
126
|
+
|
|
127
|
+
data.append(entry)
|
|
128
|
+
backfilled += 1
|
|
129
|
+
|
|
130
|
+
# Write updated tokens.json
|
|
131
|
+
if backfilled > 0:
|
|
132
|
+
os.makedirs(os.path.dirname(tokens_file), exist_ok=True)
|
|
133
|
+
with open(tokens_file, "w") as f:
|
|
134
|
+
json.dump(data, f, indent=2)
|
|
135
|
+
f.write("\n")
|
|
136
|
+
|
|
137
|
+
print(f"{backfilled} session{'s' if backfilled != 1 else ''} backfilled.")
|
|
138
|
+
|
|
139
|
+
# Regenerate charts if we added anything
|
|
140
|
+
if backfilled > 0:
|
|
141
|
+
script_dir = os.path.dirname(os.path.abspath(__file__))
|
|
142
|
+
charts_html = os.path.join(tracking_dir, "charts.html")
|
|
143
|
+
os.system(f'python3 "{script_dir}/generate-charts.py" "{tokens_file}" "{charts_html}" 2>/dev/null')
|
package/src/cost-summary.py
CHANGED
|
@@ -3,25 +3,47 @@
|
|
|
3
3
|
Usage:
|
|
4
4
|
python3 cost-summary.py <tokens.json>
|
|
5
5
|
python3 cost-summary.py (defaults to .claude/tracking/tokens.json in cwd's git root)
|
|
6
|
+
python3 cost-summary.py --chart (open tracking charts in browser)
|
|
6
7
|
"""
|
|
7
8
|
import sys
|
|
8
9
|
import json
|
|
9
10
|
import os
|
|
11
|
+
import webbrowser
|
|
10
12
|
from collections import defaultdict
|
|
11
13
|
from datetime import date
|
|
12
14
|
|
|
13
|
-
def
|
|
14
|
-
|
|
15
|
-
root = cwd
|
|
15
|
+
def find_git_root():
|
|
16
|
+
root = os.getcwd()
|
|
16
17
|
while root != "/":
|
|
17
18
|
if os.path.isdir(os.path.join(root, ".git")):
|
|
18
|
-
|
|
19
|
+
return root
|
|
19
20
|
root = os.path.dirname(root)
|
|
21
|
+
return root
|
|
22
|
+
|
|
23
|
+
def find_tokens_file():
|
|
24
|
+
root = find_git_root()
|
|
20
25
|
path = os.path.join(root, ".claude", "tracking", "tokens.json")
|
|
21
26
|
if os.path.exists(path):
|
|
22
27
|
return path
|
|
23
28
|
sys.exit(f"No tokens.json found at {path}")
|
|
24
29
|
|
|
30
|
+
def format_duration(seconds):
|
|
31
|
+
if seconds <= 0:
|
|
32
|
+
return "0m"
|
|
33
|
+
h = seconds // 3600
|
|
34
|
+
m = (seconds % 3600) // 60
|
|
35
|
+
s = seconds % 60
|
|
36
|
+
if h > 0:
|
|
37
|
+
return f"{h}h {m}m"
|
|
38
|
+
return f"{m}m {s}s"
|
|
39
|
+
|
|
40
|
+
if "--chart" in sys.argv:
|
|
41
|
+
chart = os.path.join(find_git_root(), ".claude", "tracking", "charts.html")
|
|
42
|
+
if not os.path.exists(chart):
|
|
43
|
+
sys.exit(f"No charts.html found at {chart} — run generate-charts.py first")
|
|
44
|
+
webbrowser.open(f"file://{chart}")
|
|
45
|
+
sys.exit(0)
|
|
46
|
+
|
|
25
47
|
tokens_file = sys.argv[1] if len(sys.argv) > 1 else find_tokens_file()
|
|
26
48
|
|
|
27
49
|
with open(tokens_file) as f:
|
|
@@ -32,7 +54,7 @@ if not data:
|
|
|
32
54
|
sys.exit(0)
|
|
33
55
|
|
|
34
56
|
# --- Aggregate ---
|
|
35
|
-
by_date = defaultdict(lambda: {"cost": 0, "sessions": 0, "output": 0, "cache_read": 0, "cache_create": 0, "input": 0})
|
|
57
|
+
by_date = defaultdict(lambda: {"cost": 0, "sessions": 0, "output": 0, "cache_read": 0, "cache_create": 0, "input": 0, "duration": 0})
|
|
36
58
|
by_model = defaultdict(lambda: {"cost": 0, "sessions": 0})
|
|
37
59
|
total_cost = 0
|
|
38
60
|
total_sessions = len(data)
|
|
@@ -50,6 +72,7 @@ for e in data:
|
|
|
50
72
|
by_date[d]["cache_read"] += e.get("cache_read_tokens", 0)
|
|
51
73
|
by_date[d]["cache_create"] += e.get("cache_creation_tokens", 0)
|
|
52
74
|
by_date[d]["input"] += e.get("input_tokens", 0)
|
|
75
|
+
by_date[d]["duration"] += e.get("duration_seconds", 0)
|
|
53
76
|
|
|
54
77
|
by_model[short_model]["cost"] += cost
|
|
55
78
|
by_model[short_model]["sessions"] += 1
|
|
@@ -70,11 +93,11 @@ print(f" Cost Summary — {os.path.basename(os.path.dirname(os.path.dirname(tok
|
|
|
70
93
|
print("=" * W)
|
|
71
94
|
|
|
72
95
|
print(f"\nBy date:")
|
|
73
|
-
print(f" {'Date':<12} {'Sessions':>8} {'Output':>10} {'Cache Read':>12} {'Cost':>10}")
|
|
74
|
-
print(f" {'-'*12} {'-'*8} {'-'*10} {'-'*12} {'-'*10}")
|
|
96
|
+
print(f" {'Date':<12} {'Sessions':>8} {'Output':>10} {'Cache Read':>12} {'Duration':>10} {'Cost':>10}")
|
|
97
|
+
print(f" {'-'*12} {'-'*8} {'-'*10} {'-'*12} {'-'*10} {'-'*10}")
|
|
75
98
|
for d in sorted(by_date):
|
|
76
99
|
r = by_date[d]
|
|
77
|
-
print(f" {d:<12} {r['sessions']:>8} {r['output']:>10,} {r['cache_read']:>12,} ${r['cost']:>9.2f}")
|
|
100
|
+
print(f" {d:<12} {r['sessions']:>8} {r['output']:>10,} {r['cache_read']:>12,} {format_duration(r['duration']):>10} ${r['cost']:>9.2f}")
|
|
78
101
|
|
|
79
102
|
print(f"\nBy model:")
|
|
80
103
|
print(f" {'Model':<30} {'Sessions':>8} {'Cost':>10}")
|
|
@@ -89,6 +112,8 @@ print(f" Input tokens: {total_input:>12,}")
|
|
|
89
112
|
print(f" Cache write: {total_cache_create:>12,}")
|
|
90
113
|
print(f" Cache read: {total_cache_read:>12,}")
|
|
91
114
|
print(f" Output tokens: {total_output:>12,}")
|
|
115
|
+
total_duration = sum(e.get("duration_seconds", 0) for e in data)
|
|
116
|
+
print(f" Session time: {format_duration(total_duration):>12}")
|
|
92
117
|
print(f" Estimated cost: ${total_cost:>11.2f}")
|
|
93
118
|
|
|
94
119
|
if total_output > 0:
|
package/src/generate-charts.py
CHANGED
|
@@ -11,6 +11,16 @@ from collections import defaultdict
|
|
|
11
11
|
tokens_file = sys.argv[1]
|
|
12
12
|
output_file = sys.argv[2]
|
|
13
13
|
|
|
14
|
+
def format_duration(seconds):
|
|
15
|
+
if seconds <= 0:
|
|
16
|
+
return "0m"
|
|
17
|
+
h = seconds // 3600
|
|
18
|
+
m = (seconds % 3600) // 60
|
|
19
|
+
s = seconds % 60
|
|
20
|
+
if h > 0:
|
|
21
|
+
return f"{h}h {m}m"
|
|
22
|
+
return f"{m}m {s}s"
|
|
23
|
+
|
|
14
24
|
with open(tokens_file) as f:
|
|
15
25
|
data = json.load(f)
|
|
16
26
|
|
|
@@ -20,11 +30,12 @@ if not data:
|
|
|
20
30
|
# --- Aggregate by date ---
|
|
21
31
|
by_date = defaultdict(lambda: {"cost": 0, "sessions": 0, "output": 0,
|
|
22
32
|
"cache_read": 0, "cache_create": 0, "input": 0,
|
|
23
|
-
"opus_cost": 0, "sonnet_cost": 0})
|
|
33
|
+
"opus_cost": 0, "sonnet_cost": 0, "duration": 0})
|
|
24
34
|
by_model = defaultdict(lambda: {"cost": 0, "sessions": 0})
|
|
25
35
|
cumulative = []
|
|
26
36
|
|
|
27
37
|
running_cost = 0
|
|
38
|
+
running_duration = 0
|
|
28
39
|
for e in sorted(data, key=lambda x: (x.get("date", ""), x.get("session_id", ""))):
|
|
29
40
|
d = e.get("date", "unknown")
|
|
30
41
|
cost = e.get("estimated_cost_usd", 0)
|
|
@@ -41,12 +52,15 @@ for e in sorted(data, key=lambda x: (x.get("date", ""), x.get("session_id", ""))
|
|
|
41
52
|
by_date[d]["opus_cost"] += cost
|
|
42
53
|
else:
|
|
43
54
|
by_date[d]["sonnet_cost"] += cost
|
|
55
|
+
by_date[d]["duration"] += e.get("duration_seconds", 0)
|
|
44
56
|
|
|
45
57
|
by_model[short]["cost"] += cost
|
|
46
58
|
by_model[short]["sessions"] += 1
|
|
47
59
|
|
|
48
60
|
running_cost += cost
|
|
61
|
+
running_duration += e.get("duration_seconds", 0)
|
|
49
62
|
cumulative.append({"date": d, "cumulative_cost": round(running_cost, 4),
|
|
63
|
+
"cumulative_duration": round(running_duration),
|
|
50
64
|
"session_id": e.get("session_id", "")[:8]})
|
|
51
65
|
|
|
52
66
|
dates = sorted(by_date.keys())
|
|
@@ -57,16 +71,22 @@ total_output = sum(e.get("output_tokens", 0) for e in data)
|
|
|
57
71
|
total_cache_read = sum(e.get("cache_read_tokens", 0) for e in data)
|
|
58
72
|
total_all_tokens = sum(e.get("total_tokens", 0) for e in data)
|
|
59
73
|
cache_pct = round(total_cache_read / total_all_tokens * 100, 1) if total_all_tokens > 0 else 0
|
|
74
|
+
total_duration = sum(e.get("duration_seconds", 0) for e in data)
|
|
75
|
+
avg_duration = total_duration // total_sessions if total_sessions > 0 else 0
|
|
60
76
|
|
|
61
77
|
project_name = data[0].get("project", "Project") if data else "Project"
|
|
62
78
|
|
|
63
79
|
# --- Count total human messages per date from JSONL transcripts ---
|
|
64
|
-
project_dir = os.path.dirname(os.path.dirname(os.path.dirname(tokens_file))) # project root
|
|
80
|
+
project_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(tokens_file)))) # project root
|
|
65
81
|
# Claude Code slugifies paths as: replace every "/" with "-" (keeping leading slash → leading dash)
|
|
66
82
|
transcripts_dir = os.path.expanduser(
|
|
67
83
|
"~/.claude/projects/" + project_dir.replace("/", "-")
|
|
68
84
|
)
|
|
69
85
|
human_by_date = defaultdict(int)
|
|
86
|
+
trivial_by_date = defaultdict(int)
|
|
87
|
+
|
|
88
|
+
def _is_trivial(text):
|
|
89
|
+
return len(text) < 40 and "?" not in text
|
|
70
90
|
|
|
71
91
|
if os.path.isdir(transcripts_dir):
|
|
72
92
|
for jf in glob.glob(os.path.join(transcripts_dir, "*.jsonl")):
|
|
@@ -97,24 +117,30 @@ if os.path.isdir(transcripts_dir):
|
|
|
97
117
|
content = obj.get("message", {}).get("content", "")
|
|
98
118
|
if isinstance(content, list):
|
|
99
119
|
# Skip pure tool-result messages
|
|
100
|
-
|
|
101
|
-
|
|
120
|
+
texts = [
|
|
121
|
+
c.get("text", "") for c in content
|
|
122
|
+
if isinstance(c, dict) and c.get("type") == "text"
|
|
102
123
|
and not str(c.get("text", "")).strip().startswith("<")
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
124
|
+
]
|
|
125
|
+
if texts:
|
|
126
|
+
text = " ".join(texts).strip()
|
|
106
127
|
human_by_date[session_date] += 1
|
|
128
|
+
if _is_trivial(text):
|
|
129
|
+
trivial_by_date[session_date] += 1
|
|
107
130
|
elif isinstance(content, str):
|
|
108
131
|
text = content.strip()
|
|
109
132
|
# Skip slash commands and empty
|
|
110
133
|
if text and not text.startswith("<") and not text.startswith("/"):
|
|
111
134
|
human_by_date[session_date] += 1
|
|
135
|
+
if _is_trivial(text):
|
|
136
|
+
trivial_by_date[session_date] += 1
|
|
112
137
|
except:
|
|
113
138
|
pass
|
|
114
139
|
except:
|
|
115
140
|
pass
|
|
116
141
|
|
|
117
142
|
total_human_msgs = sum(human_by_date.values())
|
|
143
|
+
total_trivial_msgs = sum(trivial_by_date.values())
|
|
118
144
|
|
|
119
145
|
# --- Aggregate prompt data from key-prompts/ folder ---
|
|
120
146
|
prompts_dir = os.path.join(os.path.dirname(tokens_file), "key-prompts")
|
|
@@ -145,9 +171,49 @@ output_by_date_js = json.dumps([by_date[d]["output"] for d in dates])
|
|
|
145
171
|
cache_read_by_date_js = json.dumps([by_date[d]["cache_read"] for d in dates])
|
|
146
172
|
opus_by_date_js = json.dumps([round(by_date[d]["opus_cost"], 4) for d in dates])
|
|
147
173
|
sonnet_by_date_js = json.dumps([round(by_date[d]["sonnet_cost"], 4) for d in dates])
|
|
174
|
+
duration_by_date_js = json.dumps([by_date[d]["duration"] for d in dates])
|
|
148
175
|
|
|
149
176
|
cumul_labels_js = json.dumps([f"{c['date']} #{i+1}" for i, c in enumerate(cumulative)])
|
|
150
177
|
cumul_values_js = json.dumps([c["cumulative_cost"] for c in cumulative])
|
|
178
|
+
cumul_duration_js = json.dumps([c["cumulative_duration"] for c in cumulative])
|
|
179
|
+
|
|
180
|
+
avg_duration_by_date_js = json.dumps([
|
|
181
|
+
round(by_date[d]["duration"] / by_date[d]["sessions"])
|
|
182
|
+
if by_date[d]["sessions"] > 0 else 0
|
|
183
|
+
for d in dates
|
|
184
|
+
])
|
|
185
|
+
|
|
186
|
+
scatter_data_js = json.dumps([
|
|
187
|
+
{"x": e.get("duration_seconds", 0),
|
|
188
|
+
"y": round(e.get("estimated_cost_usd", 0), 4),
|
|
189
|
+
"label": f"{e.get('date', '')} {e.get('session_id', '')[:6]}"}
|
|
190
|
+
for e in sorted(data, key=lambda x: x.get("date", ""))
|
|
191
|
+
if e.get("duration_seconds", 0) > 0
|
|
192
|
+
])
|
|
193
|
+
|
|
194
|
+
# Tokens per minute per session (output tokens / duration in minutes)
|
|
195
|
+
tpm_data_js = json.dumps([
|
|
196
|
+
{"x": e.get("duration_seconds", 0),
|
|
197
|
+
"y": round(e.get("output_tokens", 0) / (e["duration_seconds"] / 60), 1),
|
|
198
|
+
"label": f"{e.get('date', '')} {e.get('session_id', '')[:6]}"}
|
|
199
|
+
for e in sorted(data, key=lambda x: x.get("date", ""))
|
|
200
|
+
if e.get("duration_seconds", 0) > 0 and e.get("output_tokens", 0) > 0
|
|
201
|
+
])
|
|
202
|
+
|
|
203
|
+
# Duration histogram: bucket sessions into ranges
|
|
204
|
+
_dur_buckets = [("0–2m", 0, 120), ("2–5m", 120, 300), ("5–15m", 300, 900),
|
|
205
|
+
("15–30m", 900, 1800), ("30m+", 1800, None)]
|
|
206
|
+
_dur_counts = {label: 0 for label, _, _ in _dur_buckets}
|
|
207
|
+
for e in data:
|
|
208
|
+
d = e.get("duration_seconds", 0)
|
|
209
|
+
if d <= 0:
|
|
210
|
+
continue
|
|
211
|
+
for label, lo, hi in _dur_buckets:
|
|
212
|
+
if hi is None or d < hi:
|
|
213
|
+
_dur_counts[label] += 1
|
|
214
|
+
break
|
|
215
|
+
dur_hist_labels_js = json.dumps([b[0] for b in _dur_buckets])
|
|
216
|
+
dur_hist_values_js = json.dumps([_dur_counts[b[0]] for b in _dur_buckets])
|
|
151
217
|
|
|
152
218
|
model_labels_js = json.dumps(list(by_model.keys()))
|
|
153
219
|
model_costs_js = json.dumps([round(by_model[m]["cost"], 4) for m in by_model])
|
|
@@ -157,17 +223,21 @@ model_sessions_js = json.dumps([by_model[m]["sessions"] for m in by_model])
|
|
|
157
223
|
all_prompt_dates = sorted(set(list(prompt_by_date.keys()) + list(human_by_date.keys())))
|
|
158
224
|
all_prompt_dates_js = json.dumps(all_prompt_dates)
|
|
159
225
|
total_msgs_by_date_js = json.dumps([human_by_date.get(d, 0) for d in all_prompt_dates])
|
|
226
|
+
trivial_by_date_js = json.dumps([trivial_by_date.get(d, 0) for d in all_prompt_dates])
|
|
160
227
|
key_prompts_by_date_js = json.dumps([prompt_by_date.get(d, {}).get("total", 0) for d in all_prompt_dates])
|
|
161
228
|
|
|
162
|
-
# Efficiency ratio per date
|
|
229
|
+
# Efficiency ratio per date: key / (total - trivial) * 100, None if no non-trivial messages
|
|
163
230
|
efficiency_by_date = []
|
|
164
231
|
for d in all_prompt_dates:
|
|
165
232
|
total = human_by_date.get(d, 0)
|
|
233
|
+
trivial = trivial_by_date.get(d, 0)
|
|
234
|
+
non_trivial = total - trivial
|
|
166
235
|
key = prompt_by_date.get(d, {}).get("total", 0)
|
|
167
|
-
efficiency_by_date.append(round(key /
|
|
236
|
+
efficiency_by_date.append(round(key / non_trivial * 100, 1) if non_trivial > 0 else None)
|
|
168
237
|
efficiency_by_date_js = json.dumps(efficiency_by_date)
|
|
169
238
|
|
|
170
|
-
|
|
239
|
+
non_trivial_total = total_human_msgs - total_trivial_msgs
|
|
240
|
+
overall_efficiency = round(total_prompts / non_trivial_total * 100, 1) if non_trivial_total > 0 else 0
|
|
171
241
|
|
|
172
242
|
# Prompt chart data
|
|
173
243
|
prompt_dates_js = json.dumps(prompt_dates)
|
|
@@ -219,14 +289,22 @@ html = f"""<!DOCTYPE html>
|
|
|
219
289
|
letter-spacing: 0.05em; margin-bottom: 4px; }}
|
|
220
290
|
.stat-value {{ font-size: 1.4rem; font-weight: 700; color: #f8fafc; }}
|
|
221
291
|
.stat-sub {{ font-size: 0.7rem; color: #94a3b8; margin-top: 2px; }}
|
|
292
|
+
.section {{ margin-bottom: 36px; }}
|
|
293
|
+
.section-header {{ font-size: 0.75rem; font-weight: 600; color: #64748b;
|
|
294
|
+
text-transform: uppercase; letter-spacing: 0.08em;
|
|
295
|
+
padding: 0 0 10px 12px; margin-bottom: 16px;
|
|
296
|
+
border-bottom: 1px solid #2d3748; }}
|
|
297
|
+
.section-header.cost {{ border-left: 3px solid #6366f1; color: #818cf8; }}
|
|
298
|
+
.section-header.time {{ border-left: 3px solid #34d399; color: #34d399; }}
|
|
299
|
+
.section-header.prompts {{ border-left: 3px solid #a78bfa; color: #a78bfa; }}
|
|
222
300
|
.grid {{ display: grid; grid-template-columns: 1fr 1fr; gap: 20px; }}
|
|
223
301
|
.card {{ background: #1e2330; border: 1px solid #2d3748; border-radius: 10px;
|
|
224
302
|
padding: 16px; }}
|
|
225
303
|
.card.wide {{ grid-column: 1 / -1; }}
|
|
226
|
-
.card h2 {{ font-size: 0.
|
|
304
|
+
.card h2 {{ font-size: 0.78rem; font-weight: 600; color: #94a3b8;
|
|
227
305
|
text-transform: uppercase; letter-spacing: 0.05em; margin-bottom: 14px; }}
|
|
228
|
-
canvas {{ max-height:
|
|
229
|
-
.wide canvas {{ max-height:
|
|
306
|
+
canvas {{ max-height: 240px; }}
|
|
307
|
+
.wide canvas {{ max-height: 200px; }}
|
|
230
308
|
.notice {{ font-size: 0.78rem; color: #94a3b8; background: #1e2330;
|
|
231
309
|
border: 1px solid #3b4a6b; border-left: 3px solid #6366f1;
|
|
232
310
|
border-radius: 6px; padding: 10px 14px; margin-bottom: 20px; }}
|
|
@@ -260,6 +338,11 @@ html = f"""<!DOCTYPE html>
|
|
|
260
338
|
<div class="stat-value">{cache_pct}%</div>
|
|
261
339
|
<div class="stat-sub">of all tokens</div>
|
|
262
340
|
</div>
|
|
341
|
+
<div class="stat">
|
|
342
|
+
<div class="stat-label">Session time</div>
|
|
343
|
+
<div class="stat-value">{format_duration(total_duration)}</div>
|
|
344
|
+
<div class="stat-sub">avg {format_duration(avg_duration)} / session</div>
|
|
345
|
+
</div>
|
|
263
346
|
<div class="stat">
|
|
264
347
|
<div class="stat-label">Key prompts captured</div>
|
|
265
348
|
<div class="stat-value">{total_prompts}</div>
|
|
@@ -268,64 +351,99 @@ html = f"""<!DOCTYPE html>
|
|
|
268
351
|
<div class="stat">
|
|
269
352
|
<div class="stat-label">Prompt efficiency</div>
|
|
270
353
|
<div class="stat-value">{overall_efficiency}%</div>
|
|
271
|
-
<div class="stat-sub">key /
|
|
354
|
+
<div class="stat-sub">key / non-trivial (higher = better)</div>
|
|
272
355
|
</div>
|
|
273
356
|
</div>
|
|
274
357
|
|
|
275
|
-
<div class="
|
|
358
|
+
<div class="section">
|
|
359
|
+
<div class="section-header cost">Cost & Usage</div>
|
|
360
|
+
<div class="grid">
|
|
276
361
|
|
|
277
|
-
|
|
278
|
-
|
|
279
|
-
|
|
280
|
-
|
|
362
|
+
<div class="card wide">
|
|
363
|
+
<h2>Cumulative cost</h2>
|
|
364
|
+
<canvas id="cumul"></canvas>
|
|
365
|
+
</div>
|
|
281
366
|
|
|
282
|
-
|
|
283
|
-
|
|
284
|
-
|
|
285
|
-
|
|
367
|
+
<div class="card">
|
|
368
|
+
<h2>Cost per day</h2>
|
|
369
|
+
<canvas id="costDay"></canvas>
|
|
370
|
+
</div>
|
|
286
371
|
|
|
287
|
-
|
|
288
|
-
|
|
289
|
-
|
|
290
|
-
|
|
372
|
+
<div class="card">
|
|
373
|
+
<h2>Sessions per day</h2>
|
|
374
|
+
<canvas id="sessDay"></canvas>
|
|
375
|
+
</div>
|
|
291
376
|
|
|
292
|
-
|
|
293
|
-
|
|
294
|
-
|
|
295
|
-
|
|
377
|
+
<div class="card wide">
|
|
378
|
+
<h2>Cost by model</h2>
|
|
379
|
+
<canvas id="modelStack"></canvas>
|
|
380
|
+
</div>
|
|
296
381
|
|
|
297
|
-
<div class="card">
|
|
298
|
-
<h2>Output tokens per day</h2>
|
|
299
|
-
<canvas id="outputDay"></canvas>
|
|
300
382
|
</div>
|
|
301
|
-
|
|
302
383
|
</div>
|
|
303
384
|
|
|
304
|
-
<
|
|
305
|
-
|
|
385
|
+
<div class="section">
|
|
386
|
+
<div class="section-header prompts">Key Prompts</div>
|
|
387
|
+
<div class="grid">
|
|
306
388
|
|
|
307
|
-
<div class="
|
|
389
|
+
<div class="card wide">
|
|
390
|
+
<h2>Prompts per day</h2>
|
|
391
|
+
<canvas id="promptsVsTotal"></canvas>
|
|
392
|
+
</div>
|
|
308
393
|
|
|
309
|
-
|
|
310
|
-
|
|
311
|
-
|
|
312
|
-
|
|
394
|
+
<div class="card">
|
|
395
|
+
<h2>Efficiency per day (%)</h2>
|
|
396
|
+
<canvas id="promptEfficiency"></canvas>
|
|
397
|
+
</div>
|
|
313
398
|
|
|
314
|
-
|
|
315
|
-
|
|
316
|
-
|
|
317
|
-
|
|
399
|
+
<div class="card">
|
|
400
|
+
<h2>Category breakdown</h2>
|
|
401
|
+
<canvas id="promptDonut"></canvas>
|
|
402
|
+
</div>
|
|
318
403
|
|
|
319
|
-
|
|
320
|
-
|
|
321
|
-
|
|
322
|
-
|
|
404
|
+
<div class="card wide">
|
|
405
|
+
<h2>Categories per day</h2>
|
|
406
|
+
<canvas id="promptStack"></canvas>
|
|
407
|
+
</div>
|
|
323
408
|
|
|
324
|
-
<div class="card wide">
|
|
325
|
-
<h2>Category breakdown per day (stacked)</h2>
|
|
326
|
-
<canvas id="promptStack"></canvas>
|
|
327
409
|
</div>
|
|
410
|
+
</div>
|
|
411
|
+
|
|
412
|
+
<div class="section">
|
|
413
|
+
<div class="section-header time">Time</div>
|
|
414
|
+
<div class="grid">
|
|
415
|
+
|
|
416
|
+
<div class="card">
|
|
417
|
+
<h2>Duration per day</h2>
|
|
418
|
+
<canvas id="durationDay"></canvas>
|
|
419
|
+
</div>
|
|
420
|
+
|
|
421
|
+
<div class="card">
|
|
422
|
+
<h2>Avg duration per day</h2>
|
|
423
|
+
<canvas id="avgDurationDay"></canvas>
|
|
424
|
+
</div>
|
|
425
|
+
|
|
426
|
+
<div class="card">
|
|
427
|
+
<h2>Tokens per minute</h2>
|
|
428
|
+
<canvas id="tokensPerMin"></canvas>
|
|
429
|
+
</div>
|
|
328
430
|
|
|
431
|
+
<div class="card">
|
|
432
|
+
<h2>Session length distribution</h2>
|
|
433
|
+
<canvas id="durationDist"></canvas>
|
|
434
|
+
</div>
|
|
435
|
+
|
|
436
|
+
<div class="card wide">
|
|
437
|
+
<h2>Cumulative time</h2>
|
|
438
|
+
<canvas id="cumulTime"></canvas>
|
|
439
|
+
</div>
|
|
440
|
+
|
|
441
|
+
<div class="card wide">
|
|
442
|
+
<h2>Time vs cost</h2>
|
|
443
|
+
<canvas id="timeVsCost"></canvas>
|
|
444
|
+
</div>
|
|
445
|
+
|
|
446
|
+
</div>
|
|
329
447
|
</div>
|
|
330
448
|
|
|
331
449
|
<script>
|
|
@@ -348,8 +466,26 @@ const DONUT_VALUES = {donut_values_js};
|
|
|
348
466
|
const DONUT_COLORS = {donut_colors_js};
|
|
349
467
|
const ALL_PROMPT_DATES = {all_prompt_dates_js};
|
|
350
468
|
const TOTAL_MSGS_BY_DATE = {total_msgs_by_date_js};
|
|
469
|
+
const TRIVIAL_BY_DATE = {trivial_by_date_js};
|
|
351
470
|
const KEY_PROMPTS_BY_DATE = {key_prompts_by_date_js};
|
|
352
471
|
const EFFICIENCY_BY_DATE = {efficiency_by_date_js};
|
|
472
|
+
const DURATION_BY_DATE = {duration_by_date_js};
|
|
473
|
+
const CUMUL_DURATION = {cumul_duration_js};
|
|
474
|
+
const AVG_DURATION_BY_DATE = {avg_duration_by_date_js};
|
|
475
|
+
const SCATTER_DATA = {scatter_data_js};
|
|
476
|
+
const TPM_DATA = {tpm_data_js};
|
|
477
|
+
const DUR_HIST_LABELS = {dur_hist_labels_js};
|
|
478
|
+
const DUR_HIST_VALUES = {dur_hist_values_js};
|
|
479
|
+
|
|
480
|
+
function formatDuration(s) {{
|
|
481
|
+
if (s <= 0) return '0s';
|
|
482
|
+
const h = Math.floor(s / 3600);
|
|
483
|
+
const m = Math.floor((s % 3600) / 60);
|
|
484
|
+
const sec = Math.round(s % 60);
|
|
485
|
+
if (h > 0) return h + 'h ' + m + 'm';
|
|
486
|
+
if (m > 0) return m + 'm' + (sec > 0 ? ' ' + sec + 's' : '');
|
|
487
|
+
return sec + 's';
|
|
488
|
+
}}
|
|
353
489
|
|
|
354
490
|
const GRID = '#2d3748';
|
|
355
491
|
const TEXT = '#94a3b8';
|
|
@@ -415,15 +551,115 @@ new Chart(document.getElementById('modelStack'), {{
|
|
|
415
551
|
tooltip: {{ callbacks: {{ label: ctx => ' $' + ctx.parsed.y.toFixed(2) }} }} }} }}
|
|
416
552
|
}});
|
|
417
553
|
|
|
418
|
-
//
|
|
419
|
-
new Chart(document.getElementById('
|
|
554
|
+
// Session duration per day
|
|
555
|
+
new Chart(document.getElementById('durationDay'), {{
|
|
420
556
|
type: 'bar',
|
|
421
557
|
data: {{
|
|
422
558
|
labels: DATES,
|
|
423
|
-
datasets: [{{ label: '
|
|
559
|
+
datasets: [{{ label: 'Duration', data: DURATION_BY_DATE,
|
|
560
|
+
backgroundColor: '#f59e0b', borderRadius: 4 }}]
|
|
561
|
+
}},
|
|
562
|
+
options: {{ ...baseOpts,
|
|
563
|
+
scales: {{ ...baseOpts.scales,
|
|
564
|
+
y: {{ ...baseOpts.scales.y,
|
|
565
|
+
ticks: {{ ...baseOpts.scales.y.ticks, callback: v => formatDuration(v) }} }} }},
|
|
566
|
+
plugins: {{ ...baseOpts.plugins,
|
|
567
|
+
tooltip: {{ callbacks: {{ label: ctx => ' ' + formatDuration(ctx.parsed.y) }} }} }} }}
|
|
568
|
+
}});
|
|
569
|
+
|
|
570
|
+
// Avg session duration per day
|
|
571
|
+
new Chart(document.getElementById('avgDurationDay'), {{
|
|
572
|
+
type: 'line',
|
|
573
|
+
data: {{
|
|
574
|
+
labels: DATES,
|
|
575
|
+
datasets: [{{ label: 'Avg duration', data: AVG_DURATION_BY_DATE,
|
|
576
|
+
borderColor: '#a78bfa', backgroundColor: 'rgba(167,139,250,0.15)',
|
|
577
|
+
fill: true, tension: 0.3, pointRadius: 3 }}]
|
|
578
|
+
}},
|
|
579
|
+
options: {{ ...baseOpts,
|
|
580
|
+
scales: {{ ...baseOpts.scales,
|
|
581
|
+
y: {{ ...baseOpts.scales.y,
|
|
582
|
+
ticks: {{ ...baseOpts.scales.y.ticks, callback: v => formatDuration(v) }} }} }},
|
|
583
|
+
plugins: {{ ...baseOpts.plugins,
|
|
584
|
+
tooltip: {{ callbacks: {{ label: ctx => ' ' + formatDuration(ctx.parsed.y) }} }} }} }}
|
|
585
|
+
}});
|
|
586
|
+
|
|
587
|
+
// Cumulative time line
|
|
588
|
+
new Chart(document.getElementById('cumulTime'), {{
|
|
589
|
+
type: 'line',
|
|
590
|
+
data: {{
|
|
591
|
+
labels: CUMUL_LABELS,
|
|
592
|
+
datasets: [{{ label: 'Cumulative time', data: CUMUL_DURATION,
|
|
593
|
+
borderColor: '#22d3ee', backgroundColor: 'rgba(34,211,238,0.15)',
|
|
594
|
+
fill: true, tension: 0.3, pointRadius: 2 }}]
|
|
595
|
+
}},
|
|
596
|
+
options: {{ ...baseOpts,
|
|
597
|
+
scales: {{ ...baseOpts.scales,
|
|
598
|
+
y: {{ ...baseOpts.scales.y,
|
|
599
|
+
ticks: {{ ...baseOpts.scales.y.ticks, callback: v => formatDuration(v) }} }} }},
|
|
600
|
+
plugins: {{ ...baseOpts.plugins,
|
|
601
|
+
tooltip: {{ callbacks: {{ label: ctx => ' ' + formatDuration(ctx.parsed.y) }} }} }} }}
|
|
602
|
+
}});
|
|
603
|
+
|
|
604
|
+
// Time vs cost scatter
|
|
605
|
+
new Chart(document.getElementById('timeVsCost'), {{
|
|
606
|
+
type: 'scatter',
|
|
607
|
+
data: {{
|
|
608
|
+
datasets: [{{ label: 'Session', data: SCATTER_DATA,
|
|
609
|
+
backgroundColor: '#34d399', pointRadius: 5, pointHoverRadius: 7 }}]
|
|
610
|
+
}},
|
|
611
|
+
options: {{ ...baseOpts,
|
|
612
|
+
scales: {{ ...baseOpts.scales,
|
|
613
|
+
x: {{ ...baseOpts.scales.x, type: 'linear', min: 0,
|
|
614
|
+
ticks: {{ ...baseOpts.scales.x.ticks, callback: v => formatDuration(v) }},
|
|
615
|
+
title: {{ display: true, text: 'Duration', color: TEXT, font: {{ size: 10 }} }} }},
|
|
616
|
+
y: {{ ...baseOpts.scales.y,
|
|
617
|
+
ticks: {{ ...baseOpts.scales.y.ticks, callback: v => '$' + v.toFixed(2) }},
|
|
618
|
+
title: {{ display: true, text: 'Cost (USD)', color: TEXT, font: {{ size: 10 }} }} }} }},
|
|
619
|
+
plugins: {{ ...baseOpts.plugins,
|
|
620
|
+
tooltip: {{ callbacks: {{
|
|
621
|
+
label: ctx => {{
|
|
622
|
+
const d = ctx.raw;
|
|
623
|
+
return ` ${{d.label}}: ${{formatDuration(d.x)}} / $${{d.y.toFixed(4)}}`;
|
|
624
|
+
}}
|
|
625
|
+
}} }} }} }}
|
|
626
|
+
}});
|
|
627
|
+
|
|
628
|
+
// Tokens per minute scatter
|
|
629
|
+
new Chart(document.getElementById('tokensPerMin'), {{
|
|
630
|
+
type: 'scatter',
|
|
631
|
+
data: {{
|
|
632
|
+
datasets: [{{ label: 'Session', data: TPM_DATA,
|
|
633
|
+
backgroundColor: '#818cf8', pointRadius: 5, pointHoverRadius: 7 }}]
|
|
634
|
+
}},
|
|
635
|
+
options: {{ ...baseOpts,
|
|
636
|
+
scales: {{ ...baseOpts.scales,
|
|
637
|
+
x: {{ ...baseOpts.scales.x, type: 'linear', min: 0,
|
|
638
|
+
ticks: {{ ...baseOpts.scales.x.ticks, callback: v => formatDuration(v) }},
|
|
639
|
+
title: {{ display: true, text: 'Duration', color: TEXT, font: {{ size: 10 }} }} }},
|
|
640
|
+
y: {{ ...baseOpts.scales.y,
|
|
641
|
+
title: {{ display: true, text: 'Output tokens / min', color: TEXT, font: {{ size: 10 }} }} }} }},
|
|
642
|
+
plugins: {{ ...baseOpts.plugins,
|
|
643
|
+
tooltip: {{ callbacks: {{
|
|
644
|
+
label: ctx => {{
|
|
645
|
+
const d = ctx.raw;
|
|
646
|
+
return ` ${{d.label}}: ${{formatDuration(d.x)}} — ${{d.y}} tok/min`;
|
|
647
|
+
}}
|
|
648
|
+
}} }} }} }}
|
|
649
|
+
}});
|
|
650
|
+
|
|
651
|
+
// Session length distribution histogram
|
|
652
|
+
new Chart(document.getElementById('durationDist'), {{
|
|
653
|
+
type: 'bar',
|
|
654
|
+
data: {{
|
|
655
|
+
labels: DUR_HIST_LABELS,
|
|
656
|
+
datasets: [{{ label: 'Sessions', data: DUR_HIST_VALUES,
|
|
424
657
|
backgroundColor: '#34d399', borderRadius: 4 }}]
|
|
425
658
|
}},
|
|
426
|
-
options: baseOpts
|
|
659
|
+
options: {{ ...baseOpts,
|
|
660
|
+
plugins: {{ ...baseOpts.plugins, legend: {{ display: false }} }},
|
|
661
|
+
scales: {{ ...baseOpts.scales,
|
|
662
|
+
y: {{ ...baseOpts.scales.y, ticks: {{ ...baseOpts.scales.y.ticks, stepSize: 1 }} }} }} }}
|
|
427
663
|
}});
|
|
428
664
|
|
|
429
665
|
// Total vs key prompts per day
|
|
@@ -434,6 +670,8 @@ new Chart(document.getElementById('promptsVsTotal'), {{
|
|
|
434
670
|
datasets: [
|
|
435
671
|
{{ label: 'Total prompts', data: TOTAL_MSGS_BY_DATE,
|
|
436
672
|
backgroundColor: 'rgba(148,163,184,0.35)', borderRadius: 4 }},
|
|
673
|
+
{{ label: 'Trivial prompts', data: TRIVIAL_BY_DATE,
|
|
674
|
+
backgroundColor: '#34d399', borderRadius: 4 }},
|
|
437
675
|
{{ label: 'Key prompts', data: KEY_PROMPTS_BY_DATE,
|
|
438
676
|
backgroundColor: '#a78bfa', borderRadius: 4 }}
|
|
439
677
|
]
|
|
@@ -0,0 +1,76 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
Patch duration_seconds for existing tokens.json entries that have duration 0.
|
|
4
|
+
|
|
5
|
+
Usage:
|
|
6
|
+
python3 patch-durations.py <project_root>
|
|
7
|
+
"""
|
|
8
|
+
import sys, json, os, glob
|
|
9
|
+
from datetime import datetime
|
|
10
|
+
|
|
11
|
+
project_root = os.path.abspath(sys.argv[1])
|
|
12
|
+
tracking_dir = os.path.join(project_root, ".claude", "tracking")
|
|
13
|
+
tokens_file = os.path.join(tracking_dir, "tokens.json")
|
|
14
|
+
|
|
15
|
+
slug = project_root.replace("/", "-")
|
|
16
|
+
transcripts_dir = os.path.expanduser("~/.claude/projects/" + slug)
|
|
17
|
+
|
|
18
|
+
with open(tokens_file) as f:
|
|
19
|
+
data = json.load(f)
|
|
20
|
+
|
|
21
|
+
patched = 0
|
|
22
|
+
for entry in data:
|
|
23
|
+
sid = entry.get("session_id")
|
|
24
|
+
if not sid:
|
|
25
|
+
continue
|
|
26
|
+
jf = os.path.join(transcripts_dir, sid + ".jsonl")
|
|
27
|
+
if not os.path.exists(jf):
|
|
28
|
+
continue
|
|
29
|
+
|
|
30
|
+
msgs = []
|
|
31
|
+
try:
|
|
32
|
+
with open(jf) as f:
|
|
33
|
+
for line in f:
|
|
34
|
+
try:
|
|
35
|
+
obj = json.loads(line)
|
|
36
|
+
t = obj.get("type")
|
|
37
|
+
ts = obj.get("timestamp")
|
|
38
|
+
if t == "user" and not obj.get("isSidechain") and ts:
|
|
39
|
+
msgs.append(("user", ts))
|
|
40
|
+
elif t == "assistant" and ts:
|
|
41
|
+
msgs.append(("assistant", ts))
|
|
42
|
+
except Exception:
|
|
43
|
+
pass
|
|
44
|
+
except Exception:
|
|
45
|
+
continue
|
|
46
|
+
|
|
47
|
+
duration = 0
|
|
48
|
+
i = 0
|
|
49
|
+
while i < len(msgs):
|
|
50
|
+
if msgs[i][0] == "user":
|
|
51
|
+
j = i + 1
|
|
52
|
+
while j < len(msgs) and msgs[j][0] != "assistant":
|
|
53
|
+
j += 1
|
|
54
|
+
if j < len(msgs):
|
|
55
|
+
try:
|
|
56
|
+
t0 = datetime.fromisoformat(msgs[i][1].replace("Z", "+00:00"))
|
|
57
|
+
t1 = datetime.fromisoformat(msgs[j][1].replace("Z", "+00:00"))
|
|
58
|
+
duration += max(0, int((t1 - t0).total_seconds()))
|
|
59
|
+
except Exception:
|
|
60
|
+
pass
|
|
61
|
+
i += 1
|
|
62
|
+
|
|
63
|
+
if duration > 0:
|
|
64
|
+
entry["duration_seconds"] = duration
|
|
65
|
+
patched += 1
|
|
66
|
+
print(f" {sid[:8]} {duration}s")
|
|
67
|
+
|
|
68
|
+
if patched > 0:
|
|
69
|
+
with open(tokens_file, "w") as f:
|
|
70
|
+
json.dump(data, f, indent=2)
|
|
71
|
+
f.write("\n")
|
|
72
|
+
script_dir = os.path.dirname(os.path.abspath(__file__))
|
|
73
|
+
charts_html = os.path.join(tracking_dir, "charts.html")
|
|
74
|
+
os.system(f'python3 "{script_dir}/generate-charts.py" "{tokens_file}" "{charts_html}" 2>/dev/null')
|
|
75
|
+
|
|
76
|
+
print(f"{patched} session{'s' if patched != 1 else ''} patched.")
|
package/src/stop-hook.sh
CHANGED
|
@@ -33,7 +33,7 @@ fi
|
|
|
33
33
|
# Parse token usage from JSONL and update tokens.json
|
|
34
34
|
python3 - "$TRANSCRIPT" "$TRACKING_DIR/tokens.json" "$SESSION_ID" "$(basename "$PROJECT_ROOT")" <<'PYEOF'
|
|
35
35
|
import sys, json, os
|
|
36
|
-
from datetime import date
|
|
36
|
+
from datetime import date, datetime
|
|
37
37
|
|
|
38
38
|
transcript_path = sys.argv[1]
|
|
39
39
|
tokens_file = sys.argv[2]
|
|
@@ -44,10 +44,17 @@ today = date.today().isoformat()
|
|
|
44
44
|
# Sum all token usage from assistant messages in this session
|
|
45
45
|
inp = out = cache_create = cache_read = 0
|
|
46
46
|
model = "unknown"
|
|
47
|
+
msgs = []
|
|
47
48
|
with open(transcript_path) as f:
|
|
48
49
|
for line in f:
|
|
49
50
|
try:
|
|
50
51
|
obj = json.loads(line)
|
|
52
|
+
t = obj.get('type')
|
|
53
|
+
ts = obj.get('timestamp')
|
|
54
|
+
if t == 'user' and not obj.get('isSidechain') and ts:
|
|
55
|
+
msgs.append(('user', ts))
|
|
56
|
+
elif t == 'assistant' and ts:
|
|
57
|
+
msgs.append(('assistant', ts))
|
|
51
58
|
msg = obj.get('message', {})
|
|
52
59
|
if isinstance(msg, dict) and msg.get('role') == 'assistant':
|
|
53
60
|
usage = msg.get('usage', {})
|
|
@@ -62,6 +69,23 @@ with open(transcript_path) as f:
|
|
|
62
69
|
except:
|
|
63
70
|
pass
|
|
64
71
|
|
|
72
|
+
# Compute active time: sum of (first assistant reply - user message) per turn
|
|
73
|
+
duration = 0
|
|
74
|
+
i = 0
|
|
75
|
+
while i < len(msgs):
|
|
76
|
+
if msgs[i][0] == 'user':
|
|
77
|
+
j = i + 1
|
|
78
|
+
while j < len(msgs) and msgs[j][0] != 'assistant':
|
|
79
|
+
j += 1
|
|
80
|
+
if j < len(msgs):
|
|
81
|
+
try:
|
|
82
|
+
t0 = datetime.fromisoformat(msgs[i][1].replace('Z', '+00:00'))
|
|
83
|
+
t1 = datetime.fromisoformat(msgs[j][1].replace('Z', '+00:00'))
|
|
84
|
+
duration += max(0, int((t1 - t0).total_seconds()))
|
|
85
|
+
except:
|
|
86
|
+
pass
|
|
87
|
+
i += 1
|
|
88
|
+
|
|
65
89
|
total = inp + cache_create + cache_read + out
|
|
66
90
|
if 'opus' in model:
|
|
67
91
|
cost = inp * 15 / 1e6 + cache_create * 18.75 / 1e6 + cache_read * 1.50 / 1e6 + out * 75 / 1e6
|
|
@@ -88,7 +112,8 @@ entry = {
|
|
|
88
112
|
"output_tokens": out,
|
|
89
113
|
"total_tokens": total,
|
|
90
114
|
"estimated_cost_usd": round(cost, 4),
|
|
91
|
-
"model": model
|
|
115
|
+
"model": model,
|
|
116
|
+
"duration_seconds": duration
|
|
92
117
|
}
|
|
93
118
|
|
|
94
119
|
# Update existing or append new
|