token-tracker 0.1.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- token_tracker-0.1.0/PKG-INFO +6 -0
- token_tracker-0.1.0/README.md +53 -0
- token_tracker-0.1.0/pyproject.toml +18 -0
- token_tracker-0.1.0/setup.cfg +4 -0
- token_tracker-0.1.0/src/__init__.py +0 -0
- token_tracker-0.1.0/src/adapters/__init__.py +0 -0
- token_tracker-0.1.0/src/adapters/claude.py +161 -0
- token_tracker-0.1.0/src/adapters/codex.py +226 -0
- token_tracker-0.1.0/src/adapters/rate_limits.py +48 -0
- token_tracker-0.1.0/src/adapters/registry.py +11 -0
- token_tracker-0.1.0/src/adapters/types.py +129 -0
- token_tracker-0.1.0/src/analyzer/__init__.py +0 -0
- token_tracker-0.1.0/src/analyzer/aggregator.py +133 -0
- token_tracker-0.1.0/src/analyzer/blocks.py +73 -0
- token_tracker-0.1.0/src/analyzer/cost.py +115 -0
- token_tracker-0.1.0/src/cli.py +232 -0
- token_tracker-0.1.0/src/hooks.py +127 -0
- token_tracker-0.1.0/src/ui/__init__.py +0 -0
- token_tracker-0.1.0/src/ui/tables.py +800 -0
- token_tracker-0.1.0/token_tracker.egg-info/PKG-INFO +6 -0
- token_tracker-0.1.0/token_tracker.egg-info/SOURCES.txt +23 -0
- token_tracker-0.1.0/token_tracker.egg-info/dependency_links.txt +1 -0
- token_tracker-0.1.0/token_tracker.egg-info/entry_points.txt +2 -0
- token_tracker-0.1.0/token_tracker.egg-info/requires.txt +1 -0
- token_tracker-0.1.0/token_tracker.egg-info/top_level.txt +1 -0
|
@@ -0,0 +1,53 @@
|
|
|
1
|
+
# Token Tracker
|
|
2
|
+
|
|
3
|
+
CLI dashboard to track token usage across local AI agents.
|
|
4
|
+
|
|
5
|
+
Supports **Claude Code** and **Codex** — see how many tokens you burn, what it costs, and how close you are to rate limits.
|
|
6
|
+
|
|
7
|
+
 
|
|
8
|
+
|
|
9
|
+
## Features
|
|
10
|
+
|
|
11
|
+
- **Multi-agent tracking** — Claude Code + Codex in one place, interactive tab switching
|
|
12
|
+
- **Rate limit monitoring** — real-time 5h / 7d quota usage with reset countdown
|
|
13
|
+
- **Cost analysis** — per-session, daily, weekly, monthly cost breakdown (LiteLLM pricing)
|
|
14
|
+
- **Session insights** — project, model, duration, message count per session
|
|
15
|
+
- **5h billing block analysis** — burn rate, active/idle detection
|
|
16
|
+
- **Zero config** — auto-detects installed agents, reads local data directly
|
|
17
|
+
|
|
18
|
+
## Install
|
|
19
|
+
|
|
20
|
+
```bash
|
|
21
|
+
pip install git+https://github.com/stormzhang/token-tracker.git
|
|
22
|
+
```
|
|
23
|
+
|
|
24
|
+
## Usage
|
|
25
|
+
|
|
26
|
+
```bash
|
|
27
|
+
tt # interactive dashboard (arrow keys to switch agents)
|
|
28
|
+
tt claude # Claude Code only
|
|
29
|
+
tt codex # Codex only
|
|
30
|
+
tt daily # daily breakdown
|
|
31
|
+
tt weekly # weekly breakdown
|
|
32
|
+
tt monthly # monthly breakdown
|
|
33
|
+
tt sessions # recent sessions list
|
|
34
|
+
tt blocks # 5h billing block analysis
|
|
35
|
+
```
|
|
36
|
+
|
|
37
|
+
## Data Sources
|
|
38
|
+
|
|
39
|
+
| Agent | Path | Format |
|
|
40
|
+
|-------|------|--------|
|
|
41
|
+
| Claude Code | `~/.claude/projects/*/` | JSONL (per-message usage) |
|
|
42
|
+
| Codex | `~/.codex/sessions/` | JSONL + SQLite |
|
|
43
|
+
|
|
44
|
+
Token Tracker is **read-only** — it never modifies any agent data.
|
|
45
|
+
|
|
46
|
+
## Requirements
|
|
47
|
+
|
|
48
|
+
- Python 3.12+
|
|
49
|
+
- [Rich](https://github.com/Textualize/rich) (auto-installed)
|
|
50
|
+
|
|
51
|
+
## License
|
|
52
|
+
|
|
53
|
+
MIT
|
|
@@ -0,0 +1,18 @@
|
|
|
1
|
+
[build-system]
|
|
2
|
+
requires = ["setuptools>=68.0"]
|
|
3
|
+
build-backend = "setuptools.build_meta"
|
|
4
|
+
|
|
5
|
+
[project]
|
|
6
|
+
name = "token-tracker"
|
|
7
|
+
version = "0.1.0"
|
|
8
|
+
description = "Track token usage across local AI agents (Claude Code, Codex)"
|
|
9
|
+
requires-python = ">=3.12"
|
|
10
|
+
dependencies = [
|
|
11
|
+
"rich>=13.7",
|
|
12
|
+
]
|
|
13
|
+
|
|
14
|
+
[project.scripts]
|
|
15
|
+
tt = "src.cli:main"
|
|
16
|
+
|
|
17
|
+
[tool.setuptools.packages.find]
|
|
18
|
+
include = ["src*"]
|
|
File without changes
|
|
File without changes
|
|
@@ -0,0 +1,161 @@
|
|
|
1
|
+
import json
|
|
2
|
+
import os
|
|
3
|
+
from datetime import datetime, timezone
|
|
4
|
+
from pathlib import Path
|
|
5
|
+
|
|
6
|
+
from .types import AgentInfo, UsageEntry
|
|
7
|
+
|
|
8
|
+
CLAUDE_DIRS = [
|
|
9
|
+
os.path.expanduser("~/.claude/projects"),
|
|
10
|
+
os.path.expanduser("~/.config/claude/projects"),
|
|
11
|
+
]
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
def detect() -> AgentInfo | None:
|
|
15
|
+
for d in _get_claude_dirs():
|
|
16
|
+
if Path(d).is_dir():
|
|
17
|
+
return AgentInfo(
|
|
18
|
+
id="claude-code",
|
|
19
|
+
name="Claude Code",
|
|
20
|
+
data_dir=d,
|
|
21
|
+
installed=True,
|
|
22
|
+
)
|
|
23
|
+
return None
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
def load_entries(hours_back: int = 0) -> list[UsageEntry]:
|
|
27
|
+
entries: list[UsageEntry] = []
|
|
28
|
+
seen: set[str] = set()
|
|
29
|
+
cutoff = None
|
|
30
|
+
if hours_back > 0:
|
|
31
|
+
from datetime import timedelta
|
|
32
|
+
cutoff = datetime.now(timezone.utc) - timedelta(hours=hours_back)
|
|
33
|
+
|
|
34
|
+
for base_dir in _get_claude_dirs():
|
|
35
|
+
base = Path(base_dir)
|
|
36
|
+
if not base.is_dir():
|
|
37
|
+
continue
|
|
38
|
+
for jsonl_path in base.rglob("*.jsonl"):
|
|
39
|
+
fallback_project = _extract_project_from_dir(jsonl_path, base)
|
|
40
|
+
_parse_jsonl(jsonl_path, fallback_project, entries, seen, cutoff)
|
|
41
|
+
|
|
42
|
+
entries.sort(key=lambda e: e.timestamp)
|
|
43
|
+
return entries
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
def _get_claude_dirs() -> list[str]:
|
|
47
|
+
dirs = list(CLAUDE_DIRS)
|
|
48
|
+
env = os.environ.get("CLAUDE_CONFIG_DIR")
|
|
49
|
+
if env:
|
|
50
|
+
for p in env.split(","):
|
|
51
|
+
projects_dir = os.path.join(p.strip(), "projects")
|
|
52
|
+
if projects_dir not in dirs:
|
|
53
|
+
dirs.insert(0, projects_dir)
|
|
54
|
+
return dirs
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
def _project_from_cwd(cwd: str) -> str:
|
|
58
|
+
home = os.path.expanduser("~")
|
|
59
|
+
if cwd.startswith(home):
|
|
60
|
+
rel = cwd[len(home):].strip(os.sep)
|
|
61
|
+
else:
|
|
62
|
+
rel = cwd.strip(os.sep)
|
|
63
|
+
parts = rel.split(os.sep)
|
|
64
|
+
return parts[-1] if parts and parts[-1] else rel or "unknown"
|
|
65
|
+
|
|
66
|
+
|
|
67
|
+
def _extract_project_from_dir(jsonl_path: Path, base: Path) -> str:
|
|
68
|
+
rel = jsonl_path.relative_to(base)
|
|
69
|
+
project_dir = str(rel.parts[0]) if rel.parts else "unknown"
|
|
70
|
+
decoded = project_dir.replace("-", os.sep).strip(os.sep)
|
|
71
|
+
home = os.path.expanduser("~").strip(os.sep)
|
|
72
|
+
if decoded.startswith(home):
|
|
73
|
+
decoded = decoded[len(home):].strip(os.sep)
|
|
74
|
+
parts = decoded.split(os.sep)
|
|
75
|
+
return parts[-1] if parts else "unknown"
|
|
76
|
+
|
|
77
|
+
|
|
78
|
+
def _parse_jsonl(
|
|
79
|
+
path: Path,
|
|
80
|
+
project: str,
|
|
81
|
+
entries: list[UsageEntry],
|
|
82
|
+
seen: set[str],
|
|
83
|
+
cutoff: datetime | None,
|
|
84
|
+
) -> None:
|
|
85
|
+
try:
|
|
86
|
+
with open(path, "r", encoding="utf-8") as f:
|
|
87
|
+
for line in f:
|
|
88
|
+
line = line.strip()
|
|
89
|
+
if not line:
|
|
90
|
+
continue
|
|
91
|
+
try:
|
|
92
|
+
data = json.loads(line)
|
|
93
|
+
except json.JSONDecodeError:
|
|
94
|
+
continue
|
|
95
|
+
|
|
96
|
+
if data.get("type") != "assistant":
|
|
97
|
+
continue
|
|
98
|
+
|
|
99
|
+
entry = _parse_assistant_entry(data, project)
|
|
100
|
+
if entry is None:
|
|
101
|
+
continue
|
|
102
|
+
|
|
103
|
+
if cutoff and entry.timestamp < cutoff:
|
|
104
|
+
continue
|
|
105
|
+
|
|
106
|
+
if entry.dedup_key in seen:
|
|
107
|
+
continue
|
|
108
|
+
seen.add(entry.dedup_key)
|
|
109
|
+
|
|
110
|
+
entries.append(entry)
|
|
111
|
+
except (OSError, PermissionError):
|
|
112
|
+
pass
|
|
113
|
+
|
|
114
|
+
|
|
115
|
+
def _parse_assistant_entry(data: dict, project: str) -> UsageEntry | None:
|
|
116
|
+
message = data.get("message")
|
|
117
|
+
if not message or not isinstance(message, dict):
|
|
118
|
+
return None
|
|
119
|
+
|
|
120
|
+
usage = message.get("usage")
|
|
121
|
+
if not usage or not isinstance(usage, dict):
|
|
122
|
+
return None
|
|
123
|
+
|
|
124
|
+
input_tokens = usage.get("input_tokens") or 0
|
|
125
|
+
output_tokens = usage.get("output_tokens") or 0
|
|
126
|
+
cache_creation = usage.get("cache_creation_input_tokens") or 0
|
|
127
|
+
cache_read = usage.get("cache_read_input_tokens") or 0
|
|
128
|
+
|
|
129
|
+
if input_tokens == 0 and output_tokens == 0 and cache_creation == 0 and cache_read == 0:
|
|
130
|
+
return None
|
|
131
|
+
|
|
132
|
+
timestamp_str = data.get("timestamp", "")
|
|
133
|
+
try:
|
|
134
|
+
ts = datetime.fromisoformat(timestamp_str.replace("Z", "+00:00"))
|
|
135
|
+
except (ValueError, AttributeError):
|
|
136
|
+
return None
|
|
137
|
+
|
|
138
|
+
message_id = message.get("id", "")
|
|
139
|
+
request_id = data.get("requestId") or ""
|
|
140
|
+
model = message.get("model", "unknown")
|
|
141
|
+
session_id = data.get("sessionId", "")
|
|
142
|
+
cost_usd = data.get("costUSD")
|
|
143
|
+
|
|
144
|
+
cwd = data.get("cwd", "")
|
|
145
|
+
if cwd:
|
|
146
|
+
project = _project_from_cwd(cwd)
|
|
147
|
+
|
|
148
|
+
return UsageEntry(
|
|
149
|
+
timestamp=ts,
|
|
150
|
+
session_id=session_id,
|
|
151
|
+
message_id=message_id,
|
|
152
|
+
request_id=request_id,
|
|
153
|
+
model=model,
|
|
154
|
+
input_tokens=input_tokens,
|
|
155
|
+
output_tokens=output_tokens,
|
|
156
|
+
cache_creation_tokens=cache_creation,
|
|
157
|
+
cache_read_tokens=cache_read,
|
|
158
|
+
cost_usd=cost_usd,
|
|
159
|
+
project=project,
|
|
160
|
+
agent_id="claude-code",
|
|
161
|
+
)
|
|
@@ -0,0 +1,226 @@
|
|
|
1
|
+
import json
|
|
2
|
+
import os
|
|
3
|
+
import sqlite3
|
|
4
|
+
from datetime import datetime, timedelta, timezone
|
|
5
|
+
from pathlib import Path
|
|
6
|
+
|
|
7
|
+
from .types import AgentInfo, RateLimits, UsageEntry
|
|
8
|
+
|
|
9
|
+
CODEX_DIR = os.path.expanduser("~/.codex")
|
|
10
|
+
SESSIONS_DIR = os.path.join(CODEX_DIR, "sessions")
|
|
11
|
+
STATE_DB = os.path.join(CODEX_DIR, "state_5.sqlite")
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
def detect() -> AgentInfo | None:
|
|
15
|
+
if Path(SESSIONS_DIR).is_dir():
|
|
16
|
+
return AgentInfo(
|
|
17
|
+
id="codex",
|
|
18
|
+
name="Codex",
|
|
19
|
+
data_dir=SESSIONS_DIR,
|
|
20
|
+
installed=True,
|
|
21
|
+
)
|
|
22
|
+
return None
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
def load_entries(hours_back: int = 0) -> list[UsageEntry]:
|
|
26
|
+
entries: list[UsageEntry] = []
|
|
27
|
+
seen: set[str] = set()
|
|
28
|
+
cutoff = None
|
|
29
|
+
if hours_back > 0:
|
|
30
|
+
cutoff = datetime.now(timezone.utc) - timedelta(hours=hours_back)
|
|
31
|
+
|
|
32
|
+
models = _load_thread_models()
|
|
33
|
+
|
|
34
|
+
sessions_path = Path(SESSIONS_DIR)
|
|
35
|
+
if not sessions_path.is_dir():
|
|
36
|
+
return entries
|
|
37
|
+
|
|
38
|
+
for jsonl_path in sessions_path.rglob("*.jsonl"):
|
|
39
|
+
_parse_jsonl(jsonl_path, models, entries, seen, cutoff)
|
|
40
|
+
|
|
41
|
+
entries.sort(key=lambda e: e.timestamp)
|
|
42
|
+
return entries
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
def _load_thread_models() -> dict[str, str]:
|
|
46
|
+
if not os.path.exists(STATE_DB):
|
|
47
|
+
return {}
|
|
48
|
+
try:
|
|
49
|
+
conn = sqlite3.connect(f"file:{STATE_DB}?mode=ro", uri=True)
|
|
50
|
+
rows = conn.execute("SELECT id, model FROM threads WHERE model IS NOT NULL").fetchall()
|
|
51
|
+
conn.close()
|
|
52
|
+
return {row[0]: row[1] for row in rows}
|
|
53
|
+
except (sqlite3.Error, OSError):
|
|
54
|
+
return {}
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
def load_rate_limits() -> RateLimits | None:
|
|
58
|
+
sessions_path = Path(SESSIONS_DIR)
|
|
59
|
+
if not sessions_path.is_dir():
|
|
60
|
+
return None
|
|
61
|
+
|
|
62
|
+
jsonl_files = sorted(sessions_path.rglob("*.jsonl"), key=lambda p: p.stat().st_mtime, reverse=True)
|
|
63
|
+
models = _load_thread_models()
|
|
64
|
+
|
|
65
|
+
for path in jsonl_files[:5]:
|
|
66
|
+
rl = _extract_rate_limits(path, models)
|
|
67
|
+
if rl:
|
|
68
|
+
return rl
|
|
69
|
+
return None
|
|
70
|
+
|
|
71
|
+
|
|
72
|
+
def _extract_rate_limits(path: Path, models: dict[str, str]) -> RateLimits | None:
|
|
73
|
+
session_id = ""
|
|
74
|
+
last_rl = None
|
|
75
|
+
try:
|
|
76
|
+
with open(path, "r", encoding="utf-8") as f:
|
|
77
|
+
for line in f:
|
|
78
|
+
line = line.strip()
|
|
79
|
+
if not line:
|
|
80
|
+
continue
|
|
81
|
+
try:
|
|
82
|
+
data = json.loads(line)
|
|
83
|
+
except json.JSONDecodeError:
|
|
84
|
+
continue
|
|
85
|
+
if data.get("type") == "session_meta":
|
|
86
|
+
session_id = data.get("payload", {}).get("id", "")
|
|
87
|
+
if data.get("type") != "event_msg":
|
|
88
|
+
continue
|
|
89
|
+
payload = data.get("payload", {})
|
|
90
|
+
if payload.get("type") != "token_count":
|
|
91
|
+
continue
|
|
92
|
+
rl = payload.get("rate_limits")
|
|
93
|
+
if rl:
|
|
94
|
+
last_rl = (rl, data.get("timestamp", ""), session_id)
|
|
95
|
+
except (OSError, PermissionError):
|
|
96
|
+
return None
|
|
97
|
+
|
|
98
|
+
if not last_rl:
|
|
99
|
+
return None
|
|
100
|
+
|
|
101
|
+
rl, ts, sid = last_rl
|
|
102
|
+
primary = rl.get("primary") or {}
|
|
103
|
+
secondary = rl.get("secondary") or {}
|
|
104
|
+
|
|
105
|
+
five_pct = primary.get("used_percent")
|
|
106
|
+
five_reset = primary.get("resets_at")
|
|
107
|
+
seven_pct = secondary.get("used_percent")
|
|
108
|
+
seven_reset = secondary.get("resets_at")
|
|
109
|
+
|
|
110
|
+
now_ts = datetime.now(timezone.utc).timestamp()
|
|
111
|
+
if five_reset and five_reset < now_ts:
|
|
112
|
+
five_pct = 0.0
|
|
113
|
+
if seven_reset and seven_reset < now_ts:
|
|
114
|
+
seven_pct = 0.0
|
|
115
|
+
|
|
116
|
+
if five_pct is None and seven_pct is None:
|
|
117
|
+
return None
|
|
118
|
+
|
|
119
|
+
model_name = models.get(sid, "")
|
|
120
|
+
|
|
121
|
+
return RateLimits(
|
|
122
|
+
five_hour_pct=five_pct,
|
|
123
|
+
five_hour_resets_at=five_reset,
|
|
124
|
+
seven_day_pct=seven_pct,
|
|
125
|
+
seven_day_resets_at=seven_reset,
|
|
126
|
+
model=model_name,
|
|
127
|
+
updated_at=ts,
|
|
128
|
+
)
|
|
129
|
+
|
|
130
|
+
|
|
131
|
+
def _project_from_cwd(cwd: str) -> str:
|
|
132
|
+
home = os.path.expanduser("~")
|
|
133
|
+
if cwd.startswith(home):
|
|
134
|
+
rel = cwd[len(home):].strip(os.sep)
|
|
135
|
+
else:
|
|
136
|
+
rel = cwd.strip(os.sep)
|
|
137
|
+
parts = rel.split(os.sep)
|
|
138
|
+
return parts[-1] if parts and parts[-1] else rel or "unknown"
|
|
139
|
+
|
|
140
|
+
|
|
141
|
+
def _parse_jsonl(
|
|
142
|
+
path: Path,
|
|
143
|
+
models: dict[str, str],
|
|
144
|
+
entries: list[UsageEntry],
|
|
145
|
+
seen: set[str],
|
|
146
|
+
cutoff: datetime | None,
|
|
147
|
+
) -> None:
|
|
148
|
+
session_id = ""
|
|
149
|
+
session_ts = ""
|
|
150
|
+
project = "unknown"
|
|
151
|
+
model = "unknown"
|
|
152
|
+
last_usage = None
|
|
153
|
+
msg_count = 0
|
|
154
|
+
|
|
155
|
+
try:
|
|
156
|
+
with open(path, "r", encoding="utf-8") as f:
|
|
157
|
+
for line in f:
|
|
158
|
+
line = line.strip()
|
|
159
|
+
if not line:
|
|
160
|
+
continue
|
|
161
|
+
try:
|
|
162
|
+
data = json.loads(line)
|
|
163
|
+
except json.JSONDecodeError:
|
|
164
|
+
continue
|
|
165
|
+
|
|
166
|
+
row_type = data.get("type")
|
|
167
|
+
|
|
168
|
+
if row_type == "session_meta":
|
|
169
|
+
payload = data.get("payload", {})
|
|
170
|
+
session_id = payload.get("id", "")
|
|
171
|
+
session_ts = payload.get("timestamp", "")
|
|
172
|
+
cwd = payload.get("cwd", "")
|
|
173
|
+
if cwd:
|
|
174
|
+
project = _project_from_cwd(cwd)
|
|
175
|
+
model = models.get(session_id, "unknown")
|
|
176
|
+
continue
|
|
177
|
+
|
|
178
|
+
if row_type != "event_msg":
|
|
179
|
+
continue
|
|
180
|
+
|
|
181
|
+
payload = data.get("payload", {})
|
|
182
|
+
if payload.get("type") == "token_count":
|
|
183
|
+
info = payload.get("info")
|
|
184
|
+
if info and info.get("total_token_usage"):
|
|
185
|
+
last_usage = info["total_token_usage"]
|
|
186
|
+
msg_count += 1
|
|
187
|
+
except (OSError, PermissionError):
|
|
188
|
+
return
|
|
189
|
+
|
|
190
|
+
if not last_usage or not session_id:
|
|
191
|
+
return
|
|
192
|
+
|
|
193
|
+
cached = last_usage.get("cached_input_tokens", 0)
|
|
194
|
+
input_tokens = last_usage.get("input_tokens", 0) - cached
|
|
195
|
+
output_tokens = last_usage.get("output_tokens", 0) + last_usage.get("reasoning_output_tokens", 0)
|
|
196
|
+
|
|
197
|
+
if input_tokens == 0 and output_tokens == 0:
|
|
198
|
+
return
|
|
199
|
+
|
|
200
|
+
try:
|
|
201
|
+
ts = datetime.fromisoformat(session_ts.replace("Z", "+00:00"))
|
|
202
|
+
except (ValueError, AttributeError):
|
|
203
|
+
return
|
|
204
|
+
|
|
205
|
+
if cutoff and ts < cutoff:
|
|
206
|
+
return
|
|
207
|
+
|
|
208
|
+
if session_id in seen:
|
|
209
|
+
return
|
|
210
|
+
seen.add(session_id)
|
|
211
|
+
|
|
212
|
+
entries.append(UsageEntry(
|
|
213
|
+
timestamp=ts,
|
|
214
|
+
session_id=session_id,
|
|
215
|
+
message_id=session_id,
|
|
216
|
+
request_id="",
|
|
217
|
+
model=model,
|
|
218
|
+
input_tokens=input_tokens,
|
|
219
|
+
output_tokens=output_tokens,
|
|
220
|
+
cache_creation_tokens=0,
|
|
221
|
+
cache_read_tokens=cached,
|
|
222
|
+
cost_usd=None,
|
|
223
|
+
project=project,
|
|
224
|
+
agent_id="codex",
|
|
225
|
+
message_count=msg_count,
|
|
226
|
+
))
|
|
@@ -0,0 +1,48 @@
|
|
|
1
|
+
import json
|
|
2
|
+
import os
|
|
3
|
+
from datetime import datetime, timezone
|
|
4
|
+
|
|
5
|
+
from .types import RateLimits
|
|
6
|
+
|
|
7
|
+
STATUS_FILE = os.path.expanduser("~/.claude/tt-status.json")
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
def load_rate_limits() -> RateLimits | None:
|
|
11
|
+
if not os.path.exists(STATUS_FILE):
|
|
12
|
+
return None
|
|
13
|
+
|
|
14
|
+
try:
|
|
15
|
+
with open(STATUS_FILE, "r", encoding="utf-8") as f:
|
|
16
|
+
data = json.load(f)
|
|
17
|
+
except (json.JSONDecodeError, OSError):
|
|
18
|
+
return None
|
|
19
|
+
|
|
20
|
+
rl = data.get("rate_limits") or {}
|
|
21
|
+
five = rl.get("five_hour") or {}
|
|
22
|
+
seven = rl.get("seven_day") or {}
|
|
23
|
+
|
|
24
|
+
now_ts = datetime.now(timezone.utc).timestamp()
|
|
25
|
+
five_pct = five.get("used_percentage")
|
|
26
|
+
five_reset = five.get("resets_at")
|
|
27
|
+
if five_reset and five_reset < now_ts:
|
|
28
|
+
five_pct = 0.0
|
|
29
|
+
|
|
30
|
+
seven_pct = seven.get("used_percentage")
|
|
31
|
+
seven_reset = seven.get("resets_at")
|
|
32
|
+
if seven_reset and seven_reset < now_ts:
|
|
33
|
+
seven_pct = 0.0
|
|
34
|
+
|
|
35
|
+
model_info = data.get("model") or {}
|
|
36
|
+
model_name = model_info.get("display_name") or model_info.get("id") or ""
|
|
37
|
+
|
|
38
|
+
if five_pct is None and seven_pct is None and not model_name:
|
|
39
|
+
return None
|
|
40
|
+
|
|
41
|
+
return RateLimits(
|
|
42
|
+
five_hour_pct=five_pct,
|
|
43
|
+
five_hour_resets_at=five_reset,
|
|
44
|
+
seven_day_pct=seven_pct,
|
|
45
|
+
seven_day_resets_at=seven_reset,
|
|
46
|
+
model=model_name,
|
|
47
|
+
updated_at=data.get("_received_at", ""),
|
|
48
|
+
)
|
|
@@ -0,0 +1,129 @@
|
|
|
1
|
+
from dataclasses import dataclass, field
|
|
2
|
+
from datetime import datetime
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
@dataclass
|
|
6
|
+
class UsageEntry:
|
|
7
|
+
timestamp: datetime
|
|
8
|
+
session_id: str
|
|
9
|
+
message_id: str
|
|
10
|
+
request_id: str
|
|
11
|
+
model: str
|
|
12
|
+
input_tokens: int
|
|
13
|
+
output_tokens: int
|
|
14
|
+
cache_creation_tokens: int
|
|
15
|
+
cache_read_tokens: int
|
|
16
|
+
cost_usd: float | None
|
|
17
|
+
project: str
|
|
18
|
+
agent_id: str
|
|
19
|
+
message_count: int = 1
|
|
20
|
+
|
|
21
|
+
@property
|
|
22
|
+
def total_tokens(self) -> int:
|
|
23
|
+
return self.input_tokens + self.output_tokens + self.cache_creation_tokens + self.cache_read_tokens
|
|
24
|
+
|
|
25
|
+
@property
|
|
26
|
+
def dedup_key(self) -> str:
|
|
27
|
+
return f"{self.message_id}:{self.request_id}"
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
@dataclass
|
|
31
|
+
class AgentInfo:
|
|
32
|
+
id: str
|
|
33
|
+
name: str
|
|
34
|
+
data_dir: str
|
|
35
|
+
installed: bool
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
@dataclass
|
|
39
|
+
class DailyStats:
|
|
40
|
+
date: str
|
|
41
|
+
input_tokens: int = 0
|
|
42
|
+
output_tokens: int = 0
|
|
43
|
+
cache_creation_tokens: int = 0
|
|
44
|
+
cache_read_tokens: int = 0
|
|
45
|
+
total_tokens: int = 0
|
|
46
|
+
cost_usd: float = 0.0
|
|
47
|
+
session_count: int = 0
|
|
48
|
+
message_count: int = 0
|
|
49
|
+
models: dict[str, int] = field(default_factory=dict)
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
@dataclass
|
|
53
|
+
class WeeklyStats:
|
|
54
|
+
week: str
|
|
55
|
+
week_start: str = ""
|
|
56
|
+
week_end: str = ""
|
|
57
|
+
input_tokens: int = 0
|
|
58
|
+
output_tokens: int = 0
|
|
59
|
+
cache_creation_tokens: int = 0
|
|
60
|
+
cache_read_tokens: int = 0
|
|
61
|
+
total_tokens: int = 0
|
|
62
|
+
cost_usd: float = 0.0
|
|
63
|
+
session_count: int = 0
|
|
64
|
+
message_count: int = 0
|
|
65
|
+
models: dict[str, int] = field(default_factory=dict)
|
|
66
|
+
|
|
67
|
+
|
|
68
|
+
@dataclass
|
|
69
|
+
class SessionStats:
|
|
70
|
+
session_id: str
|
|
71
|
+
project: str
|
|
72
|
+
model: str
|
|
73
|
+
start_time: datetime
|
|
74
|
+
end_time: datetime
|
|
75
|
+
duration_minutes: float
|
|
76
|
+
input_tokens: int = 0
|
|
77
|
+
output_tokens: int = 0
|
|
78
|
+
cache_creation_tokens: int = 0
|
|
79
|
+
cache_read_tokens: int = 0
|
|
80
|
+
total_tokens: int = 0
|
|
81
|
+
cost_usd: float = 0.0
|
|
82
|
+
message_count: int = 0
|
|
83
|
+
|
|
84
|
+
|
|
85
|
+
@dataclass
|
|
86
|
+
class MonthlyStats:
|
|
87
|
+
month: str
|
|
88
|
+
input_tokens: int = 0
|
|
89
|
+
output_tokens: int = 0
|
|
90
|
+
cache_creation_tokens: int = 0
|
|
91
|
+
cache_read_tokens: int = 0
|
|
92
|
+
total_tokens: int = 0
|
|
93
|
+
cost_usd: float = 0.0
|
|
94
|
+
session_count: int = 0
|
|
95
|
+
message_count: int = 0
|
|
96
|
+
models: dict[str, int] = field(default_factory=dict)
|
|
97
|
+
|
|
98
|
+
|
|
99
|
+
@dataclass
|
|
100
|
+
class RateLimits:
|
|
101
|
+
five_hour_pct: float | None = None
|
|
102
|
+
five_hour_resets_at: int | None = None
|
|
103
|
+
seven_day_pct: float | None = None
|
|
104
|
+
seven_day_resets_at: int | None = None
|
|
105
|
+
model: str = ""
|
|
106
|
+
updated_at: str = ""
|
|
107
|
+
|
|
108
|
+
|
|
109
|
+
@dataclass
|
|
110
|
+
class P90Limits:
|
|
111
|
+
token_limit: int = 0
|
|
112
|
+
cost_limit: float = 0.0
|
|
113
|
+
message_limit: int = 0
|
|
114
|
+
|
|
115
|
+
|
|
116
|
+
@dataclass
|
|
117
|
+
class SessionBlock:
|
|
118
|
+
start_time: datetime
|
|
119
|
+
end_time: datetime
|
|
120
|
+
entries: list[UsageEntry] = field(default_factory=list)
|
|
121
|
+
input_tokens: int = 0
|
|
122
|
+
output_tokens: int = 0
|
|
123
|
+
cache_creation_tokens: int = 0
|
|
124
|
+
cache_read_tokens: int = 0
|
|
125
|
+
total_tokens: int = 0
|
|
126
|
+
cost_usd: float = 0.0
|
|
127
|
+
is_active: bool = False
|
|
128
|
+
burn_rate: float = 0.0
|
|
129
|
+
is_gap: bool = False
|
|
File without changes
|