vibeusage 0.5.0 → 0.6.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,100 @@
1
+ "use strict";
2
+
3
+ const fs = require("node:fs");
4
+ const path = require("node:path");
5
+
6
+ /**
7
+ * OpenCode audit strategy.
8
+ *
9
+ * OpenCode persists one JSON per assistant message under
10
+ * ~/.local/share/opencode/storage/message/ses_<session>/msg_<id>.json
11
+ * Each file looks like:
12
+ * {
13
+ * role: "assistant",
14
+ * id: "msg_...",
15
+ * modelID: "...",
16
+ * tokens: { input, output, reasoning, cache: { read, write } },
17
+ * time: { created, completed }
18
+ * }
19
+ *
20
+ * Channel mapping matches src/lib/rollout.js normalizeOpencodeTokens so the
21
+ * audit's truth sum equals what the parser emits into vibeusage_tracker_hourly
22
+ * (post PR #153, which added cache.read to total):
23
+ * total = input + cache.write + cache.read + output + reasoning
24
+ *
25
+ * Notes:
26
+ * - OPENCODE_HOME / XDG_DATA_HOME env vars override the default root (matches
27
+ * the same logic used by src/commands/sync.js).
28
+ * - Only assistant messages carry tokens; user messages return null from
29
+ * extractUsage so the generic runner skips them.
30
+ * - New OpenCode installs may persist into opencode.db (sqlite) instead of
31
+ * these JSON files. The audit reports no-local-sessions in that case;
32
+ * users can dump the same rows to a JSON file and feed --db-json to
33
+ * compare via the backend path.
34
+ */
35
+
36
+ module.exports = {
37
+ id: "opencode",
38
+ displayName: "OpenCode",
39
+ sessionRoot({ home, env }) {
40
+ const xdg = env.XDG_DATA_HOME || path.join(home, ".local", "share");
41
+ const opencodeHome = env.OPENCODE_HOME || path.join(xdg, "opencode");
42
+ return path.join(opencodeHome, "storage", "message");
43
+ },
44
+ walkSessions({ root }) {
45
+ if (!fs.existsSync(root)) return [];
46
+ const out = [];
47
+ for (const entry of fs.readdirSync(root, { withFileTypes: true })) {
48
+ if (!entry.isDirectory()) continue;
49
+ const dir = path.join(root, entry.name);
50
+ for (const f of fs.readdirSync(dir, { withFileTypes: true })) {
51
+ if (!f.isFile()) continue;
52
+ if (!f.name.startsWith("msg_") || !f.name.endsWith(".json")) continue;
53
+ out.push(path.join(dir, f.name));
54
+ }
55
+ }
56
+ return out;
57
+ },
58
+ // OpenCode is one JSON per file (not JSONL). Yield the whole file body as a
59
+ // single "line" so extractUsage can JSON.parse it uniformly with the
60
+ // line-based contract.
61
+ *iterateRecords(filePath) {
62
+ let text;
63
+ try {
64
+ text = fs.readFileSync(filePath, "utf8");
65
+ } catch (_err) {
66
+ return;
67
+ }
68
+ if (!text.trim()) return;
69
+ yield { line: text, context: { filePath } };
70
+ },
71
+ extractUsage(line) {
72
+ if (!line) return null;
73
+ let obj;
74
+ try {
75
+ obj = JSON.parse(line);
76
+ } catch (_err) {
77
+ return null;
78
+ }
79
+ if (obj?.role !== "assistant") return null;
80
+ const tokens = obj.tokens;
81
+ if (!tokens || typeof tokens !== "object") return null;
82
+ const completed = obj?.time?.completed;
83
+ const created = obj?.time?.created;
84
+ const epochMs = typeof completed === "number" ? completed : typeof created === "number" ? created : null;
85
+ if (!epochMs || !Number.isFinite(epochMs)) return null;
86
+
87
+ const cache = tokens.cache && typeof tokens.cache === "object" ? tokens.cache : {};
88
+ return {
89
+ timestamp: new Date(epochMs).toISOString(),
90
+ dedupeId: typeof obj.id === "string" && obj.id ? obj.id : null,
91
+ channels: {
92
+ input: tokens.input,
93
+ cache_creation: cache.write,
94
+ cache_read: cache.read,
95
+ output: tokens.output,
96
+ reasoning: tokens.reasoning,
97
+ },
98
+ };
99
+ },
100
+ };
@@ -214,6 +214,7 @@ async function parseClaudeIncremental({
214
214
  await ensureDir(path.dirname(queuePath));
215
215
  let filesProcessed = 0;
216
216
  let eventsAggregated = 0;
217
+ let dedupSkipped = 0;
217
218
 
218
219
  const cb = typeof onProgress === "function" ? onProgress : null;
219
220
  const files = Array.isArray(projectFiles) ? projectFiles : [];
@@ -283,6 +284,7 @@ async function parseClaudeIncremental({
283
284
 
284
285
  filesProcessed += 1;
285
286
  eventsAggregated += result.eventsAggregated;
287
+ dedupSkipped += result.dedupSkipped || 0;
286
288
 
287
289
  if (cb) {
288
290
  cb({
@@ -307,7 +309,13 @@ async function parseClaudeIncremental({
307
309
  cursors.projectHourly = projectState;
308
310
  }
309
311
 
310
- return { filesProcessed, eventsAggregated, bucketsQueued, projectBucketsQueued };
312
+ return {
313
+ filesProcessed,
314
+ eventsAggregated,
315
+ bucketsQueued,
316
+ projectBucketsQueued,
317
+ dedupSkipped,
318
+ };
311
319
  }
312
320
 
313
321
  async function parseGeminiIncremental({
@@ -801,16 +809,18 @@ async function parseClaudeFile({
801
809
 
802
810
  const st = await fs.stat(filePath).catch(() => null);
803
811
  if (!st || !st.isFile()) {
804
- return { endOffset: startOffset, eventsAggregated: 0, seenIds: seenOrder };
812
+ return { endOffset: startOffset, eventsAggregated: 0, dedupSkipped: 0, seenIds: seenOrder };
805
813
  }
806
814
 
807
815
  const endOffset = st.size;
808
- if (startOffset >= endOffset) return { endOffset, eventsAggregated: 0, seenIds: seenOrder };
816
+ if (startOffset >= endOffset)
817
+ return { endOffset, eventsAggregated: 0, dedupSkipped: 0, seenIds: seenOrder };
809
818
 
810
819
  const stream = fssync.createReadStream(filePath, { encoding: "utf8", start: startOffset });
811
820
  const rl = readline.createInterface({ input: stream, crlfDelay: Infinity });
812
821
 
813
822
  let eventsAggregated = 0;
823
+ let dedupSkipped = 0;
814
824
  for await (const line of rl) {
815
825
  if (!line || !line.includes('\"usage\"')) continue;
816
826
  let obj;
@@ -827,7 +837,10 @@ async function parseClaudeFile({
827
837
  // (same `message.id` / `requestId`, different outer `uuid`). Aggregate once per
828
838
  // upstream Anthropic response to avoid multi-counting token usage.
829
839
  const dedupeId = obj?.message?.id || obj?.requestId || null;
830
- if (dedupeId && seenSet.has(dedupeId)) continue;
840
+ if (dedupeId && seenSet.has(dedupeId)) {
841
+ dedupSkipped += 1;
842
+ continue;
843
+ }
831
844
 
832
845
  const model = normalizeModelInput(obj?.message?.model || obj?.model) || DEFAULT_MODEL;
833
846
  const tokenTimestamp = typeof obj?.timestamp === "string" ? obj.timestamp : null;
@@ -866,7 +879,7 @@ async function parseClaudeFile({
866
879
  seenOrder.length > CLAUDE_SEEN_IDS_LIMIT
867
880
  ? seenOrder.slice(seenOrder.length - CLAUDE_SEEN_IDS_LIMIT)
868
881
  : seenOrder;
869
- return { endOffset, eventsAggregated, seenIds: trimmedSeenIds };
882
+ return { endOffset, eventsAggregated, dedupSkipped, seenIds: trimmedSeenIds };
870
883
  }
871
884
 
872
885
  async function parseKimiFile({
@@ -2457,4 +2470,13 @@ module.exports = {
2457
2470
  bucketKey,
2458
2471
  enqueueTouchedBuckets,
2459
2472
  toUtcHalfHourStart,
2473
+ // Exported for the token-conservation property test (see
2474
+ // test/parser-total-conservation.test.js and AGENTS.md "新 AI CLI Source
2475
+ // 接入 Checklist"). If you add a new normalize<Source>Usage function,
2476
+ // export it here so the conservation test covers it automatically.
2477
+ normalizeUsage,
2478
+ normalizeClaudeUsage,
2479
+ normalizeGeminiTokens,
2480
+ normalizeKimiUsage,
2481
+ normalizeOpencodeTokens,
2460
2482
  };