token-tracker 0.2.0__tar.gz → 0.2.2__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {token_tracker-0.2.0 → token_tracker-0.2.2}/PKG-INFO +2 -2
- {token_tracker-0.2.0 → token_tracker-0.2.2}/README.md +24 -17
- {token_tracker-0.2.0 → token_tracker-0.2.2}/pyproject.toml +2 -2
- token_tracker-0.2.2/src/analyzer/aggregator.py +108 -0
- {token_tracker-0.2.0 → token_tracker-0.2.2}/src/cli.py +12 -2
- {token_tracker-0.2.0 → token_tracker-0.2.2}/src/ui/tables.py +17 -20
- {token_tracker-0.2.0 → token_tracker-0.2.2}/token_tracker.egg-info/PKG-INFO +2 -2
- token_tracker-0.2.0/src/analyzer/aggregator.py +0 -133
- {token_tracker-0.2.0 → token_tracker-0.2.2}/setup.cfg +0 -0
- {token_tracker-0.2.0 → token_tracker-0.2.2}/src/__init__.py +0 -0
- {token_tracker-0.2.0 → token_tracker-0.2.2}/src/adapters/__init__.py +0 -0
- {token_tracker-0.2.0 → token_tracker-0.2.2}/src/adapters/claude.py +0 -0
- {token_tracker-0.2.0 → token_tracker-0.2.2}/src/adapters/codex.py +0 -0
- {token_tracker-0.2.0 → token_tracker-0.2.2}/src/adapters/rate_limits.py +0 -0
- {token_tracker-0.2.0 → token_tracker-0.2.2}/src/adapters/registry.py +0 -0
- {token_tracker-0.2.0 → token_tracker-0.2.2}/src/adapters/types.py +0 -0
- {token_tracker-0.2.0 → token_tracker-0.2.2}/src/analyzer/__init__.py +0 -0
- {token_tracker-0.2.0 → token_tracker-0.2.2}/src/analyzer/blocks.py +0 -0
- {token_tracker-0.2.0 → token_tracker-0.2.2}/src/analyzer/cost.py +0 -0
- {token_tracker-0.2.0 → token_tracker-0.2.2}/src/hooks.py +0 -0
- {token_tracker-0.2.0 → token_tracker-0.2.2}/src/ui/__init__.py +0 -0
- {token_tracker-0.2.0 → token_tracker-0.2.2}/token_tracker.egg-info/SOURCES.txt +0 -0
- {token_tracker-0.2.0 → token_tracker-0.2.2}/token_tracker.egg-info/dependency_links.txt +0 -0
- {token_tracker-0.2.0 → token_tracker-0.2.2}/token_tracker.egg-info/entry_points.txt +0 -0
- {token_tracker-0.2.0 → token_tracker-0.2.2}/token_tracker.egg-info/requires.txt +0 -0
- {token_tracker-0.2.0 → token_tracker-0.2.2}/token_tracker.egg-info/top_level.txt +0 -0
|
@@ -1,26 +1,34 @@
|
|
|
1
1
|
# Token Tracker (tt)
|
|
2
2
|
|
|
3
|
-
本地 AI Agent Token
|
|
3
|
+
本地 AI Agent Token 消耗追踪/分析工具,支持 **Claude Code** 和 **Codex** 。
|
|
4
4
|
|
|
5
5
|
自定义 StatusLine 状态栏 + CLI Dashboard,实时查看 token 用量、等效成本、限额状态。
|
|
6
6
|
|
|
7
|
-
 
|
|
8
8
|
|
|
9
9
|
[English](README_EN.md)
|
|
10
10
|
|
|
11
|
-

|
|
12
|
-
|
|
13
11
|
## StatusLine 状态栏
|
|
14
12
|
|
|
15
|
-
自动为 Claude Code 和 Codex
|
|
13
|
+
自动为 Claude Code 和 Codex 配置状态栏,`tt setup` 一键配置,脚本更新时自动升级。
|
|
16
14
|
|
|
17
15
|
**Claude Code**:项目名、5h/7d 配额进度条、CTX 窗口占比、Token 用量、模型名
|
|
18
16
|
|
|
19
|
-

|
|
17
|
+

|
|
20
18
|
|
|
21
19
|
**Codex**:项目名、5h/7d 配额、上下文剩余、模型名
|
|
22
20
|
|
|
23
|
-

|
|
21
|
+

|
|
22
|
+
|
|
23
|
+
## Dashboard 数据面板和 日/周/月 数据报表分析
|
|
24
|
+
|
|
25
|
+

|
|
26
|
+
|
|
27
|
+

|
|
28
|
+
|
|
29
|
+

|
|
30
|
+
|
|
31
|
+

|
|
24
32
|
|
|
25
33
|
## 功能
|
|
26
34
|
|
|
@@ -41,33 +49,32 @@ curl -sSL https://raw.githubusercontent.com/stormzhang/token-tracker/master/inst
|
|
|
41
49
|
|
|
42
50
|
```bash
|
|
43
51
|
pip install token-tracker
|
|
52
|
+
tt setup
|
|
44
53
|
```
|
|
45
54
|
|
|
46
55
|
## 使用
|
|
47
56
|
|
|
48
57
|
```bash
|
|
58
|
+
tt setup # 初始化配置 Claude Code + Codex status_line
|
|
49
59
|
tt # 交互式 Dashboard(方向键切换 Agent)
|
|
50
|
-
tt claude #
|
|
51
|
-
tt codex #
|
|
60
|
+
tt claude # 仅展示 Claude Code
|
|
61
|
+
tt codex # 仅展示 Codex
|
|
52
62
|
tt daily # 按日汇总(按 token 消耗排序)
|
|
53
63
|
tt weekly # 按周汇总(多 Agent 分组展示)
|
|
54
64
|
tt monthly # 按月汇总(多 Agent 分组展示)
|
|
55
|
-
tt sessions #
|
|
56
|
-
tt setup # 配置 Claude Code statusLine + Codex status_line
|
|
65
|
+
tt sessions # 最近 20 条会话明细数据
|
|
57
66
|
tt unsetup # 卸载并恢复安装前的配置
|
|
58
67
|
```
|
|
59
68
|
|
|
60
69
|
## 环境要求
|
|
61
70
|
|
|
62
|
-
- Python 3.
|
|
71
|
+
- Python 3.11+
|
|
63
72
|
- [Rich](https://github.com/Textualize/rich)(自动安装)
|
|
64
73
|
|
|
65
|
-
##
|
|
74
|
+
## TODO
|
|
66
75
|
|
|
67
|
-
|
|
68
|
-
- 🔌 **更多 Agent 支持** — Cursor、Cline、Aider 等本地日志适配 + API 提供商用量拉取
|
|
69
|
-
- 🧩 **终端集成** — 规划见 [Terminal Integrations Roadmap](docs/terminal-integrations.md)
|
|
76
|
+
未来持续增加更多数据报表,多维度分析。
|
|
70
77
|
|
|
71
78
|
## License
|
|
72
79
|
|
|
73
|
-
MIT
|
|
80
|
+
Copyright (c) 2026 stormzhang. MIT License.
|
|
@@ -4,9 +4,9 @@ build-backend = "setuptools.build_meta"
|
|
|
4
4
|
|
|
5
5
|
[project]
|
|
6
6
|
name = "token-tracker"
|
|
7
|
-
version = "0.2.
|
|
7
|
+
version = "0.2.2"
|
|
8
8
|
description = "Track token usage across local AI agents (Claude Code, Codex)"
|
|
9
|
-
requires-python = ">=3.
|
|
9
|
+
requires-python = ">=3.11"
|
|
10
10
|
dependencies = [
|
|
11
11
|
"rich>=13.7",
|
|
12
12
|
]
|
|
@@ -0,0 +1,108 @@
|
|
|
1
|
+
from collections import defaultdict
|
|
2
|
+
from datetime import timedelta
|
|
3
|
+
|
|
4
|
+
from ..adapters.types import DailyStats, MonthlyStats, SessionStats, UsageEntry, WeeklyStats
|
|
5
|
+
from .cost import calculate_cost
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
def _accumulate(stat, entry: UsageEntry) -> None:
|
|
9
|
+
cost = calculate_cost(entry)
|
|
10
|
+
stat.input_tokens += entry.input_tokens
|
|
11
|
+
stat.output_tokens += entry.output_tokens
|
|
12
|
+
stat.cache_creation_tokens += entry.cache_creation_tokens
|
|
13
|
+
stat.cache_read_tokens += entry.cache_read_tokens
|
|
14
|
+
stat.total_tokens += entry.total_tokens
|
|
15
|
+
stat.cost_usd += cost
|
|
16
|
+
stat.message_count += entry.message_count
|
|
17
|
+
if hasattr(stat, "models"):
|
|
18
|
+
stat.models[entry.model] = stat.models.get(entry.model, 0) + entry.total_tokens
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
def aggregate_daily(entries: list[UsageEntry]) -> list[DailyStats]:
|
|
22
|
+
by_date: dict[str, DailyStats] = {}
|
|
23
|
+
sessions: dict[str, set[str]] = defaultdict(set)
|
|
24
|
+
|
|
25
|
+
for e in entries:
|
|
26
|
+
key = e.timestamp.strftime("%Y-%m-%d")
|
|
27
|
+
if key not in by_date:
|
|
28
|
+
by_date[key] = DailyStats(date=key)
|
|
29
|
+
_accumulate(by_date[key], e)
|
|
30
|
+
sessions[key].add(e.session_id)
|
|
31
|
+
|
|
32
|
+
for key, sids in sessions.items():
|
|
33
|
+
by_date[key].session_count = len(sids)
|
|
34
|
+
|
|
35
|
+
return sorted(by_date.values(), key=lambda s: s.date)
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
def aggregate_monthly(entries: list[UsageEntry]) -> list[MonthlyStats]:
|
|
39
|
+
by_month: dict[str, MonthlyStats] = {}
|
|
40
|
+
sessions: dict[str, set[str]] = defaultdict(set)
|
|
41
|
+
|
|
42
|
+
for e in entries:
|
|
43
|
+
key = e.timestamp.strftime("%Y-%m")
|
|
44
|
+
if key not in by_month:
|
|
45
|
+
by_month[key] = MonthlyStats(month=key)
|
|
46
|
+
_accumulate(by_month[key], e)
|
|
47
|
+
sessions[key].add(e.session_id)
|
|
48
|
+
|
|
49
|
+
for key, sids in sessions.items():
|
|
50
|
+
by_month[key].session_count = len(sids)
|
|
51
|
+
|
|
52
|
+
return sorted(by_month.values(), key=lambda s: s.month)
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
def aggregate_weekly(entries: list[UsageEntry]) -> list[WeeklyStats]:
|
|
56
|
+
by_week: dict[str, WeeklyStats] = {}
|
|
57
|
+
sessions: dict[str, set[str]] = defaultdict(set)
|
|
58
|
+
|
|
59
|
+
for e in entries:
|
|
60
|
+
monday = e.timestamp.date() - timedelta(days=e.timestamp.weekday())
|
|
61
|
+
sunday = monday + timedelta(days=6)
|
|
62
|
+
key = monday.isoformat()
|
|
63
|
+
if key not in by_week:
|
|
64
|
+
by_week[key] = WeeklyStats(
|
|
65
|
+
week=key,
|
|
66
|
+
week_start=monday.strftime("%m-%d"),
|
|
67
|
+
week_end=sunday.strftime("%m-%d"),
|
|
68
|
+
)
|
|
69
|
+
_accumulate(by_week[key], e)
|
|
70
|
+
sessions[key].add(e.session_id)
|
|
71
|
+
|
|
72
|
+
for key, sids in sessions.items():
|
|
73
|
+
by_week[key].session_count = len(sids)
|
|
74
|
+
|
|
75
|
+
return sorted(by_week.values(), key=lambda s: s.week)
|
|
76
|
+
|
|
77
|
+
|
|
78
|
+
def aggregate_sessions(entries: list[UsageEntry]) -> list[SessionStats]:
|
|
79
|
+
by_session: dict[str, list[UsageEntry]] = defaultdict(list)
|
|
80
|
+
for e in entries:
|
|
81
|
+
by_session[e.session_id].append(e)
|
|
82
|
+
|
|
83
|
+
sessions: list[SessionStats] = []
|
|
84
|
+
for session_id, session_entries in by_session.items():
|
|
85
|
+
session_entries.sort(key=lambda e: e.timestamp)
|
|
86
|
+
first = session_entries[0]
|
|
87
|
+
last = session_entries[-1]
|
|
88
|
+
duration = (last.timestamp - first.timestamp).total_seconds() / 60
|
|
89
|
+
|
|
90
|
+
models: dict[str, int] = defaultdict(int)
|
|
91
|
+
for e in session_entries:
|
|
92
|
+
models[e.model] += e.total_tokens
|
|
93
|
+
primary_model = max(models, key=models.get) if models else "unknown"
|
|
94
|
+
|
|
95
|
+
s = SessionStats(
|
|
96
|
+
session_id=session_id,
|
|
97
|
+
project=first.project,
|
|
98
|
+
model=primary_model,
|
|
99
|
+
start_time=first.timestamp,
|
|
100
|
+
end_time=last.timestamp,
|
|
101
|
+
duration_minutes=round(duration, 1),
|
|
102
|
+
)
|
|
103
|
+
for e in session_entries:
|
|
104
|
+
_accumulate(s, e)
|
|
105
|
+
sessions.append(s)
|
|
106
|
+
|
|
107
|
+
sessions.sort(key=lambda s: s.start_time, reverse=True)
|
|
108
|
+
return sessions
|
|
@@ -1,4 +1,5 @@
|
|
|
1
1
|
import sys
|
|
2
|
+
from datetime import datetime, timedelta, timezone
|
|
2
3
|
|
|
3
4
|
from .adapters import claude, codex
|
|
4
5
|
from .adapters.rate_limits import load_rate_limits as load_claude_rate_limits
|
|
@@ -48,7 +49,8 @@ def _build_agent_data(agent_id: str, agent_name: str) -> dict | None:
|
|
|
48
49
|
weekly = aggregate_weekly(entries)
|
|
49
50
|
monthly = aggregate_monthly(entries)
|
|
50
51
|
sessions = aggregate_sessions(entries)
|
|
51
|
-
|
|
52
|
+
cutoff = datetime.now(timezone.utc) - timedelta(hours=48)
|
|
53
|
+
recent = [e for e in entries if e.timestamp >= cutoff]
|
|
52
54
|
blocks = analyze_blocks(recent)
|
|
53
55
|
rate_limits = RATE_LIMIT_LOADERS.get(agent_id, lambda: None)()
|
|
54
56
|
p90 = None
|
|
@@ -138,10 +140,18 @@ def _read_key(tty, termios):
|
|
|
138
140
|
termios.tcsetattr(fd, termios.TCSADRAIN, old)
|
|
139
141
|
|
|
140
142
|
|
|
143
|
+
def _get_version() -> str:
|
|
144
|
+
from importlib.metadata import version
|
|
145
|
+
return version("token-tracker")
|
|
146
|
+
|
|
147
|
+
|
|
141
148
|
def main():
|
|
142
149
|
args = sys.argv[1:]
|
|
143
150
|
command = args[0] if args else "dashboard"
|
|
144
151
|
|
|
152
|
+
if command in ("--version", "-v", "-V"):
|
|
153
|
+
print(f"tt {_get_version()}")
|
|
154
|
+
return
|
|
145
155
|
if command == "setup":
|
|
146
156
|
setup()
|
|
147
157
|
return
|
|
@@ -213,7 +223,7 @@ def main():
|
|
|
213
223
|
render_sessions(stats, limit)
|
|
214
224
|
else:
|
|
215
225
|
console.print(f"[red]未知命令: {command}[/red]")
|
|
216
|
-
console.print("[dim]可用命令: dashboard, daily, weekly, monthly, sessions, claude, codex, setup, unsetup[/dim]")
|
|
226
|
+
console.print("[dim]可用命令: dashboard, daily, weekly, monthly, sessions, claude, codex, setup, unsetup, --version[/dim]")
|
|
217
227
|
sys.exit(1)
|
|
218
228
|
|
|
219
229
|
|
|
@@ -117,17 +117,14 @@ def _fmt_duration(minutes: float) -> str:
|
|
|
117
117
|
return f"{int(minutes)}min"
|
|
118
118
|
|
|
119
119
|
|
|
120
|
-
def
|
|
121
|
-
|
|
122
|
-
for ch in s:
|
|
123
|
-
w += 2 if ord(ch) > 0x7F else 1
|
|
124
|
-
return w
|
|
120
|
+
def _bar_width() -> int:
|
|
121
|
+
return 20 if _width_mode() == "compact" else 30
|
|
125
122
|
|
|
126
123
|
|
|
127
124
|
def _append_bar(lines: Text, label: str, pct: float,
|
|
128
125
|
bar_width: int, suffix: str = "") -> None:
|
|
129
126
|
filled = int(pct / 100 * bar_width)
|
|
130
|
-
bar = "
|
|
127
|
+
bar = "=" * filled + "-" * (bar_width - filled)
|
|
131
128
|
bar_style = _S.bar_high if pct > 80 else _S.bar_mid if pct > 50 else _S.bar_low
|
|
132
129
|
lines.append(label, style=_S.dim)
|
|
133
130
|
lines.append(bar, style=bar_style)
|
|
@@ -138,7 +135,7 @@ def _append_bar(lines: Text, label: str, pct: float,
|
|
|
138
135
|
|
|
139
136
|
|
|
140
137
|
def _append_trend(lines: Text, current: float, previous: float) -> None:
|
|
141
|
-
arrow = "
|
|
138
|
+
arrow = "^" if current >= previous else "v"
|
|
142
139
|
style = _S.bad if current >= previous else _S.good
|
|
143
140
|
lines.append(f"{arrow}", style=style)
|
|
144
141
|
|
|
@@ -186,12 +183,12 @@ def render_tab_bar(agent_names: list[str], current: int) -> None:
|
|
|
186
183
|
line.append(" ")
|
|
187
184
|
for i, name in enumerate(agent_names):
|
|
188
185
|
if i > 0:
|
|
189
|
-
line.append("
|
|
186
|
+
line.append(" | ", style=_S.dim)
|
|
190
187
|
if i == current:
|
|
191
188
|
line.append(f" {name} ", style="bold reverse")
|
|
192
189
|
else:
|
|
193
190
|
line.append(f" {name} ", style=_S.dim)
|
|
194
|
-
line.append("
|
|
191
|
+
line.append(" < > 切换 q / ESC 退出", style=_S.dim)
|
|
195
192
|
console.print(line)
|
|
196
193
|
|
|
197
194
|
|
|
@@ -201,7 +198,7 @@ def _project_short(project: str) -> str:
|
|
|
201
198
|
|
|
202
199
|
def _render_header(agents: list[str], total_tokens: int, total_cost: float,
|
|
203
200
|
total_sessions: int, total_messages: int, days: int) -> None:
|
|
204
|
-
agent_text = " ".join(f"[{_S.good}]
|
|
201
|
+
agent_text = " ".join(f"[{_S.good}]{a}[/{_S.good}]" for a in agents)
|
|
205
202
|
console.print()
|
|
206
203
|
console.print(Panel(
|
|
207
204
|
f"[bold]Token Tracker[/bold] {agent_text}",
|
|
@@ -614,7 +611,7 @@ def _render_model_breakdown(stats: list[MonthlyStats]) -> None:
|
|
|
614
611
|
for model, tokens in sorted_models[:8]:
|
|
615
612
|
pct = tokens / total * 100 if total > 0 else 0
|
|
616
613
|
bar_width = int(pct / 100 * 20)
|
|
617
|
-
bar_text = "
|
|
614
|
+
bar_text = "=" * bar_width + "-" * (20 - bar_width)
|
|
618
615
|
|
|
619
616
|
if pct > 50:
|
|
620
617
|
bar_style = _S.token_bold
|
|
@@ -697,7 +694,7 @@ def _render_daily_panel(
|
|
|
697
694
|
week: WeeklyStats | None = None,
|
|
698
695
|
last_week: WeeklyStats | None = None,
|
|
699
696
|
) -> None:
|
|
700
|
-
|
|
697
|
+
bw = _bar_width()
|
|
701
698
|
lines = Text()
|
|
702
699
|
lines.append("当日数据面板 (P90)\n\n", style="bold")
|
|
703
700
|
|
|
@@ -710,9 +707,9 @@ def _render_daily_panel(
|
|
|
710
707
|
for label, current, limit, unit_fmt in p90_items:
|
|
711
708
|
pct = min(current / limit * 100, 100) if limit > 0 else 0
|
|
712
709
|
max_pct = max(max_pct, pct)
|
|
713
|
-
display_label = f" {label}" + " " * (14 -
|
|
710
|
+
display_label = f" {label}" + " " * (14 - len(label))
|
|
714
711
|
suffix = f" {unit_fmt(current)} / {unit_fmt(limit)}"
|
|
715
|
-
_append_bar(lines, display_label, pct,
|
|
712
|
+
_append_bar(lines, display_label, pct, bw, suffix)
|
|
716
713
|
lines.append("\n")
|
|
717
714
|
|
|
718
715
|
lines.append(f" Token {_fmt_tokens(today.total_tokens)}", style=_S.token)
|
|
@@ -765,14 +762,14 @@ def _render_active_block(
|
|
|
765
762
|
remaining_h = remaining_min // 60
|
|
766
763
|
remaining_m = remaining_min % 60
|
|
767
764
|
|
|
768
|
-
|
|
765
|
+
bw = _bar_width()
|
|
769
766
|
|
|
770
767
|
lines = Text()
|
|
771
768
|
lines.append("当前 5h&7d 数据面板\n\n", style="bold")
|
|
772
769
|
|
|
773
770
|
if rate_limits and rate_limits.five_hour_pct is not None:
|
|
774
771
|
_render_rate_bar(lines, "5h 限额", rate_limits.five_hour_pct,
|
|
775
|
-
rate_limits.five_hour_resets_at,
|
|
772
|
+
rate_limits.five_hour_resets_at, bw)
|
|
776
773
|
|
|
777
774
|
lines.append(f" 时间 ", style=_S.dim)
|
|
778
775
|
lines.append(f"已用 {elapsed_min}min / 剩余 {remaining_h}h{remaining_m:02d}m\n", style=_S.dim)
|
|
@@ -791,7 +788,7 @@ def _render_active_block(
|
|
|
791
788
|
if rate_limits and rate_limits.seven_day_pct is not None:
|
|
792
789
|
lines.append("\n\n")
|
|
793
790
|
_render_rate_bar(lines, "7d 限额", rate_limits.seven_day_pct,
|
|
794
|
-
rate_limits.seven_day_resets_at,
|
|
791
|
+
rate_limits.seven_day_resets_at, bw, "%m-%d %H:%M")
|
|
795
792
|
if week:
|
|
796
793
|
_render_week_section(lines, week, last_week)
|
|
797
794
|
|
|
@@ -806,19 +803,19 @@ def _render_idle_panel(
|
|
|
806
803
|
week: WeeklyStats | None = None,
|
|
807
804
|
last_week: WeeklyStats | None = None,
|
|
808
805
|
) -> None:
|
|
809
|
-
|
|
806
|
+
bw = _bar_width()
|
|
810
807
|
lines = Text()
|
|
811
808
|
lines.append("限额数据面板\n\n", style="bold")
|
|
812
809
|
|
|
813
810
|
if rate_limits.five_hour_pct is not None:
|
|
814
811
|
_render_rate_bar(lines, "5h 限额", rate_limits.five_hour_pct,
|
|
815
|
-
rate_limits.five_hour_resets_at,
|
|
812
|
+
rate_limits.five_hour_resets_at, bw)
|
|
816
813
|
|
|
817
814
|
if rate_limits.seven_day_pct is not None:
|
|
818
815
|
if rate_limits.five_hour_pct is not None:
|
|
819
816
|
lines.append("\n")
|
|
820
817
|
_render_rate_bar(lines, "7d 限额", rate_limits.seven_day_pct,
|
|
821
|
-
rate_limits.seven_day_resets_at,
|
|
818
|
+
rate_limits.seven_day_resets_at, bw, "%m-%d %H:%M")
|
|
822
819
|
if week:
|
|
823
820
|
_render_week_section(lines, week, last_week)
|
|
824
821
|
|
|
@@ -1,133 +0,0 @@
|
|
|
1
|
-
from collections import defaultdict
|
|
2
|
-
from datetime import datetime
|
|
3
|
-
|
|
4
|
-
from ..adapters.types import DailyStats, MonthlyStats, SessionStats, UsageEntry, WeeklyStats
|
|
5
|
-
from .cost import calculate_cost
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
def aggregate_daily(entries: list[UsageEntry]) -> list[DailyStats]:
|
|
9
|
-
by_date: dict[str, DailyStats] = {}
|
|
10
|
-
sessions_by_date: dict[str, set[str]] = defaultdict(set)
|
|
11
|
-
|
|
12
|
-
for e in entries:
|
|
13
|
-
date_str = e.timestamp.strftime("%Y-%m-%d")
|
|
14
|
-
if date_str not in by_date:
|
|
15
|
-
by_date[date_str] = DailyStats(date=date_str)
|
|
16
|
-
s = by_date[date_str]
|
|
17
|
-
cost = calculate_cost(e)
|
|
18
|
-
s.input_tokens += e.input_tokens
|
|
19
|
-
s.output_tokens += e.output_tokens
|
|
20
|
-
s.cache_creation_tokens += e.cache_creation_tokens
|
|
21
|
-
s.cache_read_tokens += e.cache_read_tokens
|
|
22
|
-
s.total_tokens += e.total_tokens
|
|
23
|
-
s.cost_usd += cost
|
|
24
|
-
s.message_count += e.message_count
|
|
25
|
-
s.models[e.model] = s.models.get(e.model, 0) + e.total_tokens
|
|
26
|
-
sessions_by_date[date_str].add(e.session_id)
|
|
27
|
-
|
|
28
|
-
for date_str, sessions in sessions_by_date.items():
|
|
29
|
-
by_date[date_str].session_count = len(sessions)
|
|
30
|
-
|
|
31
|
-
return sorted(by_date.values(), key=lambda s: s.date)
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
def aggregate_monthly(entries: list[UsageEntry]) -> list[MonthlyStats]:
|
|
35
|
-
by_month: dict[str, MonthlyStats] = {}
|
|
36
|
-
sessions_by_month: dict[str, set[str]] = defaultdict(set)
|
|
37
|
-
|
|
38
|
-
for e in entries:
|
|
39
|
-
month_str = e.timestamp.strftime("%Y-%m")
|
|
40
|
-
if month_str not in by_month:
|
|
41
|
-
by_month[month_str] = MonthlyStats(month=month_str)
|
|
42
|
-
s = by_month[month_str]
|
|
43
|
-
cost = calculate_cost(e)
|
|
44
|
-
s.input_tokens += e.input_tokens
|
|
45
|
-
s.output_tokens += e.output_tokens
|
|
46
|
-
s.cache_creation_tokens += e.cache_creation_tokens
|
|
47
|
-
s.cache_read_tokens += e.cache_read_tokens
|
|
48
|
-
s.total_tokens += e.total_tokens
|
|
49
|
-
s.cost_usd += cost
|
|
50
|
-
s.message_count += e.message_count
|
|
51
|
-
s.models[e.model] = s.models.get(e.model, 0) + e.total_tokens
|
|
52
|
-
sessions_by_month[month_str].add(e.session_id)
|
|
53
|
-
|
|
54
|
-
for month_str, sessions in sessions_by_month.items():
|
|
55
|
-
by_month[month_str].session_count = len(sessions)
|
|
56
|
-
|
|
57
|
-
return sorted(by_month.values(), key=lambda s: s.month)
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
def aggregate_weekly(entries: list[UsageEntry]) -> list[WeeklyStats]:
|
|
61
|
-
from datetime import timedelta
|
|
62
|
-
|
|
63
|
-
by_week: dict[str, WeeklyStats] = {}
|
|
64
|
-
sessions_by_week: dict[str, set[str]] = defaultdict(set)
|
|
65
|
-
|
|
66
|
-
for e in entries:
|
|
67
|
-
monday = e.timestamp.date() - timedelta(days=e.timestamp.weekday())
|
|
68
|
-
sunday = monday + timedelta(days=6)
|
|
69
|
-
week_key = monday.isoformat()
|
|
70
|
-
if week_key not in by_week:
|
|
71
|
-
by_week[week_key] = WeeklyStats(
|
|
72
|
-
week=week_key,
|
|
73
|
-
week_start=monday.strftime("%m-%d"),
|
|
74
|
-
week_end=sunday.strftime("%m-%d"),
|
|
75
|
-
)
|
|
76
|
-
s = by_week[week_key]
|
|
77
|
-
cost = calculate_cost(e)
|
|
78
|
-
s.input_tokens += e.input_tokens
|
|
79
|
-
s.output_tokens += e.output_tokens
|
|
80
|
-
s.cache_creation_tokens += e.cache_creation_tokens
|
|
81
|
-
s.cache_read_tokens += e.cache_read_tokens
|
|
82
|
-
s.total_tokens += e.total_tokens
|
|
83
|
-
s.cost_usd += cost
|
|
84
|
-
s.message_count += e.message_count
|
|
85
|
-
s.models[e.model] = s.models.get(e.model, 0) + e.total_tokens
|
|
86
|
-
sessions_by_week[week_key].add(e.session_id)
|
|
87
|
-
|
|
88
|
-
for week_key, sessions in sessions_by_week.items():
|
|
89
|
-
by_week[week_key].session_count = len(sessions)
|
|
90
|
-
|
|
91
|
-
return sorted(by_week.values(), key=lambda s: s.week)
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
def aggregate_sessions(entries: list[UsageEntry]) -> list[SessionStats]:
|
|
95
|
-
by_session: dict[str, list[UsageEntry]] = defaultdict(list)
|
|
96
|
-
|
|
97
|
-
for e in entries:
|
|
98
|
-
by_session[e.session_id].append(e)
|
|
99
|
-
|
|
100
|
-
sessions: list[SessionStats] = []
|
|
101
|
-
for session_id, session_entries in by_session.items():
|
|
102
|
-
session_entries.sort(key=lambda e: e.timestamp)
|
|
103
|
-
first = session_entries[0]
|
|
104
|
-
last = session_entries[-1]
|
|
105
|
-
duration = (last.timestamp - first.timestamp).total_seconds() / 60
|
|
106
|
-
|
|
107
|
-
models: dict[str, int] = defaultdict(int)
|
|
108
|
-
for e in session_entries:
|
|
109
|
-
models[e.model] += e.total_tokens
|
|
110
|
-
primary_model = max(models, key=models.get) if models else "unknown"
|
|
111
|
-
|
|
112
|
-
s = SessionStats(
|
|
113
|
-
session_id=session_id,
|
|
114
|
-
project=first.project,
|
|
115
|
-
model=primary_model,
|
|
116
|
-
start_time=first.timestamp,
|
|
117
|
-
end_time=last.timestamp,
|
|
118
|
-
duration_minutes=round(duration, 1),
|
|
119
|
-
)
|
|
120
|
-
for e in session_entries:
|
|
121
|
-
cost = calculate_cost(e)
|
|
122
|
-
s.input_tokens += e.input_tokens
|
|
123
|
-
s.output_tokens += e.output_tokens
|
|
124
|
-
s.cache_creation_tokens += e.cache_creation_tokens
|
|
125
|
-
s.cache_read_tokens += e.cache_read_tokens
|
|
126
|
-
s.total_tokens += e.total_tokens
|
|
127
|
-
s.cost_usd += cost
|
|
128
|
-
s.message_count += e.message_count
|
|
129
|
-
|
|
130
|
-
sessions.append(s)
|
|
131
|
-
|
|
132
|
-
sessions.sort(key=lambda s: s.start_time, reverse=True)
|
|
133
|
-
return sessions
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|