oh-my-opencode-dashboard 0.1.4 → 0.1.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/src/App.tsx CHANGED
@@ -63,6 +63,30 @@ type TimeSeries = {
63
63
  series: TimeSeriesSeries[];
64
64
  };
65
65
 
66
+ type TokenUsageTotals = {
67
+ input: number;
68
+ output: number;
69
+ reasoning: number;
70
+ cacheRead: number;
71
+ cacheWrite: number;
72
+ total: number;
73
+ };
74
+
75
+ type TokenUsageRow = {
76
+ model: string;
77
+ input: number;
78
+ output: number;
79
+ reasoning: number;
80
+ cacheRead: number;
81
+ cacheWrite: number;
82
+ total: number;
83
+ };
84
+
85
+ type TokenUsage = {
86
+ totals: TokenUsageTotals;
87
+ rows: TokenUsageRow[];
88
+ };
89
+
66
90
  function toNonNegativeFinite(value: unknown): number {
67
91
  if (typeof value !== "number") return 0;
68
92
  if (!Number.isFinite(value)) return 0;
@@ -344,6 +368,7 @@ type DashboardPayload = {
344
368
  backgroundTasks: BackgroundTask[];
345
369
  mainSessionTasks: BackgroundTask[];
346
370
  timeSeries: TimeSeries;
371
+ tokenUsage: TokenUsage;
347
372
  raw: unknown;
348
373
  };
349
374
 
@@ -494,6 +519,10 @@ const FALLBACK_DATA: DashboardPayload = {
494
519
  anchorMs: 0,
495
520
  serverNowMs: 0,
496
521
  }),
522
+ tokenUsage: {
523
+ totals: { input: 0, output: 0, reasoning: 0, cacheRead: 0, cacheWrite: 0, total: 0 },
524
+ rows: [],
525
+ },
497
526
  raw: {
498
527
  ok: false,
499
528
  hint: "API not reachable yet. Using placeholder data.",
@@ -685,6 +714,67 @@ function toDashboardPayload(json: unknown): DashboardPayload {
685
714
  const tasks = (anyJson.backgroundTasks ?? anyJson.background_tasks ?? []) as unknown;
686
715
  const mainTasks = (anyJson.mainSessionTasks ?? anyJson.main_session_tasks ?? []) as unknown;
687
716
 
717
+ const tokenUsageDefault: TokenUsage = {
718
+ totals: { input: 0, output: 0, reasoning: 0, cacheRead: 0, cacheWrite: 0, total: 0 },
719
+ rows: [],
720
+ };
721
+
722
+ function parseTokenUsage(input: unknown): TokenUsage {
723
+ if (!input || typeof input !== "object") return tokenUsageDefault;
724
+ const rec = input as Record<string, unknown>;
725
+
726
+ const rowsRaw = rec.rows;
727
+ const rows: TokenUsageRow[] = Array.isArray(rowsRaw)
728
+ ? rowsRaw
729
+ .map((row): TokenUsageRow | null => {
730
+ if (!row || typeof row !== "object") return null;
731
+ const r = row as Record<string, unknown>;
732
+
733
+ const model = toNonEmptyString(r.model ?? r.id ?? r.key);
734
+ if (!model) return null;
735
+
736
+ const input = toNonNegativeCount(r.input ?? r.input_tokens);
737
+ const output = toNonNegativeCount(r.output ?? r.output_tokens);
738
+ const reasoning = toNonNegativeCount(r.reasoning ?? r.reasoning_tokens);
739
+ const cacheRead = toNonNegativeCount(r.cacheRead ?? r.cache_read ?? r.cache_read_tokens);
740
+ const cacheWrite = toNonNegativeCount(r.cacheWrite ?? r.cache_write ?? r.cache_write_tokens);
741
+
742
+ const totalKey = r.total ?? r.total_tokens ?? r.totalTokens;
743
+ const totalFromServer = totalKey === undefined || totalKey === null ? null : toNonNegativeCount(totalKey);
744
+ const total = typeof totalFromServer === "number" ? totalFromServer : input + output + reasoning + cacheRead + cacheWrite;
745
+
746
+ return { model, input, output, reasoning, cacheRead, cacheWrite, total };
747
+ })
748
+ .filter((r): r is TokenUsageRow => r !== null)
749
+ : [];
750
+
751
+ const totalsRaw = rec.totals;
752
+ const totalsObj = totalsRaw && typeof totalsRaw === "object" ? (totalsRaw as Record<string, unknown>) : null;
753
+
754
+ const inputTotal = toNonNegativeCount(totalsObj?.input ?? totalsObj?.input_tokens);
755
+ const outputTotal = toNonNegativeCount(totalsObj?.output ?? totalsObj?.output_tokens);
756
+ const reasoningTotal = toNonNegativeCount(totalsObj?.reasoning ?? totalsObj?.reasoning_tokens);
757
+ const cacheReadTotal = toNonNegativeCount(totalsObj?.cacheRead ?? totalsObj?.cache_read ?? totalsObj?.cache_read_tokens);
758
+ const cacheWriteTotal = toNonNegativeCount(totalsObj?.cacheWrite ?? totalsObj?.cache_write ?? totalsObj?.cache_write_tokens);
759
+
760
+ const totalKey = totalsObj?.total ?? totalsObj?.total_tokens ?? totalsObj?.totalTokens;
761
+ const totalFromServer = totalKey === undefined || totalKey === null ? null : toNonNegativeCount(totalKey);
762
+ const total = typeof totalFromServer === "number"
763
+ ? totalFromServer
764
+ : inputTotal + outputTotal + reasoningTotal + cacheReadTotal + cacheWriteTotal;
765
+
766
+ const totals: TokenUsageTotals = {
767
+ input: inputTotal,
768
+ output: outputTotal,
769
+ reasoning: reasoningTotal,
770
+ cacheRead: cacheReadTotal,
771
+ cacheWrite: cacheWriteTotal,
772
+ total,
773
+ };
774
+
775
+ return { totals, rows };
776
+ }
777
+
688
778
  function parsePlanSteps(stepsInput: unknown): Array<{ checked: boolean; text: string }> {
689
779
  if (!Array.isArray(stepsInput)) return [];
690
780
 
@@ -752,6 +842,7 @@ function toDashboardPayload(json: unknown): DashboardPayload {
752
842
  const steps = parsePlanSteps(plan.steps);
753
843
 
754
844
  const timeSeries = normalizeTimeSeries(anyJson.timeSeries, Date.now());
845
+ const tokenUsage = parseTokenUsage(anyJson.tokenUsage ?? anyJson.token_usage);
755
846
 
756
847
  return {
757
848
  mainSession: {
@@ -774,6 +865,7 @@ function toDashboardPayload(json: unknown): DashboardPayload {
774
865
  backgroundTasks,
775
866
  mainSessionTasks,
776
867
  timeSeries,
868
+ tokenUsage,
777
869
  raw: json,
778
870
  };
779
871
  }
@@ -926,6 +1018,48 @@ export default function App() {
926
1018
  return JSON.stringify(data.raw, null, 2);
927
1019
  }, [data.raw]);
928
1020
 
1021
+ const tokenUsageRowsSorted = React.useMemo(() => {
1022
+ const rows = Array.isArray(data.tokenUsage?.rows) ? data.tokenUsage.rows : [];
1023
+ const sorted = rows.slice();
1024
+ sorted.sort((a, b) => {
1025
+ const aTotal =
1026
+ typeof a.total === "number" && Number.isFinite(a.total)
1027
+ ? a.total
1028
+ : a.input + a.output + a.reasoning + a.cacheRead + a.cacheWrite;
1029
+ const bTotal =
1030
+ typeof b.total === "number" && Number.isFinite(b.total)
1031
+ ? b.total
1032
+ : b.input + b.output + b.reasoning + b.cacheRead + b.cacheWrite;
1033
+ return bTotal - aTotal;
1034
+ });
1035
+ return sorted;
1036
+ }, [data.tokenUsage]);
1037
+
1038
+ const tokenUsageTotalsForUi = React.useMemo((): TokenUsageTotals => {
1039
+ const base = data.tokenUsage?.totals;
1040
+ if (!base) return { input: 0, output: 0, reasoning: 0, cacheRead: 0, cacheWrite: 0, total: 0 };
1041
+
1042
+ const baseIsAllZero =
1043
+ base.input === 0 &&
1044
+ base.output === 0 &&
1045
+ base.reasoning === 0 &&
1046
+ base.cacheRead === 0 &&
1047
+ base.cacheWrite === 0 &&
1048
+ base.total === 0;
1049
+ if (!baseIsAllZero) return base;
1050
+
1051
+ const sums = { input: 0, output: 0, reasoning: 0, cacheRead: 0, cacheWrite: 0 };
1052
+ for (const r of data.tokenUsage?.rows ?? []) {
1053
+ sums.input += toNonNegativeCount(r?.input);
1054
+ sums.output += toNonNegativeCount(r?.output);
1055
+ sums.reasoning += toNonNegativeCount(r?.reasoning);
1056
+ sums.cacheRead += toNonNegativeCount(r?.cacheRead);
1057
+ sums.cacheWrite += toNonNegativeCount(r?.cacheWrite);
1058
+ }
1059
+ const total = sums.input + sums.output + sums.reasoning + sums.cacheRead + sums.cacheWrite;
1060
+ return { ...sums, total };
1061
+ }, [data.tokenUsage]);
1062
+
929
1063
  React.useEffect(() => {
930
1064
  let alive = true;
931
1065
 
@@ -1250,6 +1384,59 @@ export default function App() {
1250
1384
  </article>
1251
1385
  </section>
1252
1386
 
1387
+ <section className="card">
1388
+ <div className="cardHeader">
1389
+ <h2>Token usage</h2>
1390
+ <span className="badge">{data.tokenUsage.rows.length}</span>
1391
+ </div>
1392
+
1393
+ <div className="tableWrap">
1394
+ <table className="table">
1395
+ <thead>
1396
+ <tr>
1397
+ <th>MODEL</th>
1398
+ <th>INPUT</th>
1399
+ <th>OUTPUT</th>
1400
+ <th>REASONING</th>
1401
+ <th>CACHE.READ</th>
1402
+ <th>CACHE.WRITE</th>
1403
+ </tr>
1404
+ </thead>
1405
+ <tbody>
1406
+ <tr>
1407
+ <td className="mono">TOTAL</td>
1408
+ <td className="mono">{tokenUsageTotalsForUi.input}</td>
1409
+ <td className="mono">{tokenUsageTotalsForUi.output}</td>
1410
+ <td className="mono">{tokenUsageTotalsForUi.reasoning}</td>
1411
+ <td className="mono">{tokenUsageTotalsForUi.cacheRead}</td>
1412
+ <td className="mono">{tokenUsageTotalsForUi.cacheWrite}</td>
1413
+ </tr>
1414
+
1415
+ {tokenUsageRowsSorted.length === 0 ? (
1416
+ <tr>
1417
+ <td colSpan={6} className="muted" style={{ padding: 16 }}>
1418
+ No token usage detected yet.
1419
+ </td>
1420
+ </tr>
1421
+ ) : null}
1422
+
1423
+ {tokenUsageRowsSorted.map((r) => (
1424
+ <tr key={r.model}>
1425
+ <td className="mono" title={r.model}>
1426
+ {r.model}
1427
+ </td>
1428
+ <td className="mono">{r.input}</td>
1429
+ <td className="mono">{r.output}</td>
1430
+ <td className="mono">{r.reasoning}</td>
1431
+ <td className="mono">{r.cacheRead}</td>
1432
+ <td className="mono">{r.cacheWrite}</td>
1433
+ </tr>
1434
+ ))}
1435
+ </tbody>
1436
+ </table>
1437
+ </div>
1438
+ </section>
1439
+
1253
1440
  <section className="card">
1254
1441
  <div className="cardHeader">
1255
1442
  <h2>Main session tasks</h2>
@@ -4,7 +4,7 @@ import { toDashboardPayload } from "./App"
4
4
  describe('toDashboardPayload', () => {
5
5
  it('should preserve planProgress.steps from server JSON', () => {
6
6
  // #given: server JSON with planProgress.steps
7
- const serverJson = {
7
+ const serverJson: unknown = {
8
8
  mainSession: {
9
9
  agent: "sisyphus",
10
10
  currentTool: "dashboard_start",
@@ -262,4 +262,109 @@ describe('toDashboardPayload', () => {
262
262
  },
263
263
  ])
264
264
  })
265
+
266
+ it('should preserve tokenUsage from server JSON', () => {
267
+ type TokenUsage = {
268
+ totals: {
269
+ input: number
270
+ output: number
271
+ reasoning: number
272
+ cacheRead: number
273
+ cacheWrite: number
274
+ total: number
275
+ }
276
+ rows: Array<{
277
+ model: string
278
+ input: number
279
+ output: number
280
+ reasoning: number
281
+ cacheRead: number
282
+ cacheWrite: number
283
+ total: number
284
+ }>
285
+ }
286
+ type DashboardPayloadWithTokenUsage = ReturnType<typeof toDashboardPayload> & {
287
+ tokenUsage?: TokenUsage
288
+ }
289
+
290
+ // #given: server JSON with token usage totals and rows
291
+ const tokenUsageKey = "tokenUsage"
292
+ const serverJson: Record<string, unknown> = {
293
+ mainSession: {
294
+ agent: "sisyphus",
295
+ currentTool: "dashboard_start",
296
+ currentModel: "anthropic/claude-opus-4-5",
297
+ lastUpdatedLabel: "just now",
298
+ session: "test-session",
299
+ statusPill: "busy",
300
+ },
301
+ }
302
+
303
+ serverJson[tokenUsageKey] = {
304
+ totals: {
305
+ input: 120,
306
+ output: 340,
307
+ reasoning: 56,
308
+ cacheRead: 12,
309
+ cacheWrite: 8,
310
+ total: 536,
311
+ },
312
+ rows: [
313
+ {
314
+ model: "anthropic/claude-opus-4-5",
315
+ input: 100,
316
+ output: 300,
317
+ reasoning: 50,
318
+ cacheRead: 10,
319
+ cacheWrite: 5,
320
+ total: 465,
321
+ },
322
+ {
323
+ model: "openai/gpt-5.2",
324
+ input: 20,
325
+ output: 40,
326
+ reasoning: 6,
327
+ cacheRead: 2,
328
+ cacheWrite: 3,
329
+ total: 71,
330
+ },
331
+ ],
332
+ }
333
+
334
+ // #when: converting to dashboard payload
335
+ const payload = toDashboardPayload(serverJson)
336
+ const payloadWithTokenUsage = payload as DashboardPayloadWithTokenUsage
337
+
338
+ // #then: tokenUsage should be preserved with correct shape
339
+ expect(payloadWithTokenUsage.tokenUsage).toEqual({
340
+ totals: {
341
+ input: 120,
342
+ output: 340,
343
+ reasoning: 56,
344
+ cacheRead: 12,
345
+ cacheWrite: 8,
346
+ total: 536,
347
+ },
348
+ rows: [
349
+ {
350
+ model: "anthropic/claude-opus-4-5",
351
+ input: 100,
352
+ output: 300,
353
+ reasoning: 50,
354
+ cacheRead: 10,
355
+ cacheWrite: 5,
356
+ total: 465,
357
+ },
358
+ {
359
+ model: "openai/gpt-5.2",
360
+ input: 20,
361
+ output: 40,
362
+ reasoning: 6,
363
+ cacheRead: 2,
364
+ cacheWrite: 3,
365
+ total: 71,
366
+ },
367
+ ],
368
+ })
369
+ })
265
370
  })
@@ -0,0 +1,205 @@
1
+ import { describe, expect, it } from "vitest"
2
+ import { aggregateTokenUsage } from "./token-usage-core"
3
+
4
+ describe("token usage aggregateTokenUsage", () => {
5
+ it("aggregates assistant-only token usage", () => {
6
+ // #given
7
+ const metas = [
8
+ {
9
+ id: "msg_1",
10
+ role: "assistant",
11
+ providerID: "openai",
12
+ modelID: "gpt-5.2",
13
+ tokens: {
14
+ input: 10,
15
+ output: 5,
16
+ reasoning: 2,
17
+ cache: { read: 1, write: 3 },
18
+ },
19
+ },
20
+ {
21
+ id: "msg_2",
22
+ role: "user",
23
+ providerID: "openai",
24
+ modelID: "gpt-5.2",
25
+ tokens: {
26
+ input: 99,
27
+ output: 99,
28
+ reasoning: 99,
29
+ cache: { read: 99, write: 99 },
30
+ },
31
+ },
32
+ ]
33
+
34
+ // #when
35
+ const result = aggregateTokenUsage(metas)
36
+
37
+ // #then
38
+ expect(result.rows.length).toBe(1)
39
+ expect(result.rows[0]?.model).toBe("openai/gpt-5.2")
40
+ expect(result.totals).toEqual({
41
+ input: 10,
42
+ output: 5,
43
+ reasoning: 2,
44
+ cacheRead: 1,
45
+ cacheWrite: 3,
46
+ total: 21,
47
+ })
48
+ })
49
+
50
+ it("defaults missing tokens to zeros", () => {
51
+ // #given
52
+ const metas = [
53
+ {
54
+ id: "msg_1",
55
+ role: "assistant",
56
+ providerID: "openai",
57
+ modelID: "gpt-5.2",
58
+ },
59
+ ]
60
+
61
+ // #when
62
+ const result = aggregateTokenUsage(metas)
63
+
64
+ // #then
65
+ expect(result.rows).toEqual([
66
+ {
67
+ model: "openai/gpt-5.2",
68
+ input: 0,
69
+ output: 0,
70
+ reasoning: 0,
71
+ cacheRead: 0,
72
+ cacheWrite: 0,
73
+ total: 0,
74
+ },
75
+ ])
76
+ expect(result.totals.total).toBe(0)
77
+ })
78
+
79
+ it("uses unknown/unknown when model is missing", () => {
80
+ // #given
81
+ const metas = [
82
+ {
83
+ id: "msg_1",
84
+ role: "assistant",
85
+ tokens: {
86
+ input: 1,
87
+ output: 2,
88
+ reasoning: 3,
89
+ cache: { read: 4, write: 5 },
90
+ },
91
+ },
92
+ ]
93
+
94
+ // #when
95
+ const result = aggregateTokenUsage(metas)
96
+
97
+ // #then
98
+ expect(result.rows[0]?.model).toBe("unknown/unknown")
99
+ })
100
+
101
+ it("totals equal the sum of per-model rows", () => {
102
+ // #given
103
+ const metas = [
104
+ {
105
+ id: "msg_1",
106
+ role: "assistant",
107
+ providerID: "openai",
108
+ modelID: "gpt-5.2",
109
+ tokens: {
110
+ input: 1,
111
+ output: 2,
112
+ reasoning: 3,
113
+ cache: { read: 4, write: 5 },
114
+ },
115
+ },
116
+ {
117
+ id: "msg_2",
118
+ role: "assistant",
119
+ providerID: "anthropic",
120
+ modelID: "claude",
121
+ tokens: {
122
+ input: 10,
123
+ output: 20,
124
+ reasoning: 30,
125
+ cache: { read: 40, write: 50 },
126
+ },
127
+ },
128
+ ]
129
+
130
+ // #when
131
+ const result = aggregateTokenUsage(metas)
132
+
133
+ // #then
134
+ const summed = result.rows.reduce(
135
+ (acc, row) => ({
136
+ input: acc.input + row.input,
137
+ output: acc.output + row.output,
138
+ reasoning: acc.reasoning + row.reasoning,
139
+ cacheRead: acc.cacheRead + row.cacheRead,
140
+ cacheWrite: acc.cacheWrite + row.cacheWrite,
141
+ total: acc.total + row.total,
142
+ }),
143
+ { input: 0, output: 0, reasoning: 0, cacheRead: 0, cacheWrite: 0, total: 0 }
144
+ )
145
+ expect(result.totals).toEqual(summed)
146
+ })
147
+
148
+ it("dedupes by message id", () => {
149
+ // #given
150
+ const metas = [
151
+ {
152
+ id: "msg_1",
153
+ role: "assistant",
154
+ providerID: "openai",
155
+ modelID: "gpt-5.2",
156
+ tokens: { input: 3, output: 0, reasoning: 0, cache: { read: 0, write: 0 } },
157
+ },
158
+ {
159
+ id: "msg_1",
160
+ role: "assistant",
161
+ providerID: "openai",
162
+ modelID: "gpt-5.2",
163
+ tokens: { input: 9, output: 0, reasoning: 0, cache: { read: 0, write: 0 } },
164
+ },
165
+ ]
166
+
167
+ // #when
168
+ const result = aggregateTokenUsage(metas)
169
+
170
+ // #then
171
+ expect(result.totals.input).toBe(3)
172
+ })
173
+
174
+ it("clamps invalid token values to zero", () => {
175
+ // #given
176
+ const metas = [
177
+ {
178
+ id: "msg_1",
179
+ role: "assistant",
180
+ providerID: "openai",
181
+ modelID: "gpt-5.2",
182
+ tokens: {
183
+ input: -5,
184
+ output: "7",
185
+ reasoning: Number.NaN,
186
+ cache: { read: -1, write: 3 },
187
+ },
188
+ },
189
+ ]
190
+
191
+ // #when
192
+ const result = aggregateTokenUsage(metas)
193
+
194
+ // #then
195
+ expect(result.rows[0]).toEqual({
196
+ model: "openai/gpt-5.2",
197
+ input: 0,
198
+ output: 0,
199
+ reasoning: 0,
200
+ cacheRead: 0,
201
+ cacheWrite: 3,
202
+ total: 3,
203
+ })
204
+ })
205
+ })
@@ -0,0 +1,113 @@
1
+ import { extractModelString } from "./model"
2
+
3
+ export type TokenUsageRow = {
4
+ model: string
5
+ input: number
6
+ output: number
7
+ reasoning: number
8
+ cacheRead: number
9
+ cacheWrite: number
10
+ total: number
11
+ }
12
+
13
+ export type TokenUsageTotals = {
14
+ input: number
15
+ output: number
16
+ reasoning: number
17
+ cacheRead: number
18
+ cacheWrite: number
19
+ total: number
20
+ }
21
+
22
+ export interface TokenUsagePayload {
23
+ rows: TokenUsageRow[]
24
+ totals: TokenUsageTotals
25
+ }
26
+
27
+ const EMPTY_TOTALS: TokenUsageTotals = {
28
+ input: 0,
29
+ output: 0,
30
+ reasoning: 0,
31
+ cacheRead: 0,
32
+ cacheWrite: 0,
33
+ total: 0,
34
+ }
35
+
36
+ function clampToken(value: unknown): number {
37
+ return typeof value === "number" && Number.isFinite(value) && value >= 0 ? value : 0
38
+ }
39
+
40
+ function isRecord(value: unknown): value is Record<string, unknown> {
41
+ return typeof value === "object" && value !== null
42
+ }
43
+
44
+ function readString(value: unknown): string | null {
45
+ if (typeof value !== "string") return null
46
+ const trimmed = value.trim()
47
+ return trimmed.length > 0 ? trimmed : null
48
+ }
49
+
50
+ function blankRow(model: string): TokenUsageRow {
51
+ return { model, input: 0, output: 0, reasoning: 0, cacheRead: 0, cacheWrite: 0, total: 0 }
52
+ }
53
+
54
+ export function aggregateTokenUsage(metas: unknown[]): TokenUsagePayload {
55
+ const rowsByModel = new Map<string, TokenUsageRow>()
56
+ const seenMessageIds = new Set<string>()
57
+
58
+ for (const metaUnknown of metas) {
59
+ if (!isRecord(metaUnknown)) continue
60
+ const role = readString(metaUnknown.role)
61
+ if (role !== "assistant") continue
62
+
63
+ const id = readString(metaUnknown.id)
64
+ if (id) {
65
+ if (seenMessageIds.has(id)) continue
66
+ seenMessageIds.add(id)
67
+ }
68
+
69
+ const model = extractModelString(metaUnknown) ?? "unknown/unknown"
70
+ const tokens = isRecord(metaUnknown.tokens) ? metaUnknown.tokens : null
71
+ const cache = tokens && isRecord(tokens.cache) ? tokens.cache : null
72
+ const input = clampToken(tokens?.input)
73
+ const output = clampToken(tokens?.output)
74
+ const reasoning = clampToken(tokens?.reasoning)
75
+ const cacheRead = clampToken(cache?.read)
76
+ const cacheWrite = clampToken(cache?.write)
77
+ const total = input + output + reasoning + cacheRead + cacheWrite
78
+
79
+ const row = rowsByModel.get(model) ?? blankRow(model)
80
+ row.input += input
81
+ row.output += output
82
+ row.reasoning += reasoning
83
+ row.cacheRead += cacheRead
84
+ row.cacheWrite += cacheWrite
85
+ row.total += total
86
+ rowsByModel.set(model, row)
87
+ }
88
+
89
+ const rows = Array.from(rowsByModel.values()).sort((a, b) => {
90
+ if (b.total !== a.total) return b.total - a.total
91
+ return a.model.localeCompare(b.model)
92
+ })
93
+ const totals = rows.reduce(
94
+ (acc, row) => ({
95
+ input: acc.input + row.input,
96
+ output: acc.output + row.output,
97
+ reasoning: acc.reasoning + row.reasoning,
98
+ cacheRead: acc.cacheRead + row.cacheRead,
99
+ cacheWrite: acc.cacheWrite + row.cacheWrite,
100
+ total: acc.total + row.total,
101
+ }),
102
+ { ...EMPTY_TOTALS }
103
+ )
104
+
105
+ return { rows, totals }
106
+ }
107
+
108
+ export const EMPTY_TOKEN_USAGE_PAYLOAD: TokenUsagePayload = {
109
+ rows: [],
110
+ totals: { ...EMPTY_TOTALS },
111
+ }
112
+
113
+ export const TokenUsagePayload = EMPTY_TOKEN_USAGE_PAYLOAD