@vibe-cafe/vibe-usage 0.7.15 → 0.7.17

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -70,6 +70,7 @@ npx @vibe-cafe/vibe-usage status # Show config & detected tools
70
70
  - Extracts session metadata from all parsers: active time (AI generation time, excluding queue/TTFT wait), total duration, message counts
71
71
  - Uploads buckets + sessions to your vibecafe.ai dashboard (gzip-compressed when ≥ 1 KB, ~94% smaller)
72
72
  - Stateless: computes full totals from local logs each sync (idempotent, no state files)
73
+ - SQLite-backed tools (Cursor, OpenCode, Kiro, Hermes) are read via Node's built-in `node:sqlite` on Node ≥ 22.5 — no `sqlite3` binary needed (works on Windows out of the box); on older Node it falls back to the system `sqlite3` CLI
73
74
  - For continuous syncing, use `npx @vibe-cafe/vibe-usage daemon` or the [Vibe Usage Mac app](https://github.com/vibe-cafe/vibe-usage-app)
74
75
 
75
76
  ## AI Skill
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@vibe-cafe/vibe-usage",
3
- "version": "0.7.15",
3
+ "version": "0.7.17",
4
4
  "description": "Track your AI coding tool token usage and sync to vibecafe.ai",
5
5
  "type": "module",
6
6
  "bin": {
@@ -27,6 +27,26 @@ function findJsonlFiles(dir) {
27
27
  return results;
28
28
  }
29
29
 
30
+ /**
31
+ * Count the leading `event_msg/token_count` records in a session file.
32
+ * Used to size the replayed-history block of a forked session: a fork
33
+ * copies the original conversation verbatim, so it begins with exactly as
34
+ * many token_count records as the original session has in total.
35
+ */
36
+ function countTokenCountRecords(content) {
37
+ let n = 0;
38
+ for (const line of content.split('\n')) {
39
+ if (!line.trim()) continue;
40
+ try {
41
+ const obj = JSON.parse(line);
42
+ if (obj.type === 'event_msg' && obj.payload?.type === 'token_count') n++;
43
+ } catch {
44
+ continue;
45
+ }
46
+ }
47
+ return n;
48
+ }
49
+
30
50
  export async function parse() {
31
51
  if (!existsSync(SESSIONS_DIR)) return { buckets: [], sessions: [] };
32
52
 
@@ -34,19 +54,72 @@ export async function parse() {
34
54
  const sessionEvents = [];
35
55
  const files = findJsonlFiles(SESSIONS_DIR);
36
56
  if (files.length === 0) return { buckets: [], sessions: [] };
37
- for (const filePath of files) {
38
57
 
58
+ // Pass 1: index every session by its UUID and count its token_count
59
+ // records. A forked session (session_meta.payload.forked_from_id) starts
60
+ // with the original conversation replayed verbatim — including every
61
+ // token_count, all timestamped in a burst at the fork instant. Those
62
+ // tokens are already counted from the original session's own file, so
63
+ // re-counting them here double-counts usage and produces a spurious
64
+ // token/cost spike at the fork time. Timestamps cannot distinguish the
65
+ // replay from new activity (the replay burst is stamped at/after the fork
66
+ // instant, within the same 1–3s window), so we instead skip exactly the
67
+ // original session's token_count count from the start of each fork.
68
+ const tokenCountById = new Map(); // sessionId → number of token_count records
69
+ const fileMeta = new Map(); // filePath → { content, forkedFromId }
70
+ for (const filePath of files) {
39
71
  let content;
40
72
  try {
41
73
  content = readFileSync(filePath, 'utf-8');
42
74
  } catch {
43
75
  continue;
44
76
  }
77
+ let sessionId = null;
78
+ let forkedFromId = null;
79
+ for (const line of content.split('\n')) {
80
+ if (!line.trim()) continue;
81
+ try {
82
+ const obj = JSON.parse(line);
83
+ if (obj.type === 'session_meta' && obj.payload) {
84
+ sessionId = obj.payload.id || null;
85
+ forkedFromId = obj.payload.forked_from_id || null;
86
+ }
87
+ } catch { /* ignore */ }
88
+ break; // session_meta is always the first line
89
+ }
90
+ fileMeta.set(filePath, { content, forkedFromId });
91
+ if (sessionId) {
92
+ tokenCountById.set(sessionId, countTokenCountRecords(content));
93
+ }
94
+ }
45
95
 
46
- // Extract project name and model from session_meta line
96
+ // Pass 2: parse usage, skipping each fork's replayed-history token_counts.
97
+ for (const filePath of files) {
98
+ const fm = fileMeta.get(filePath);
99
+ if (!fm) continue;
100
+ const { content, forkedFromId } = fm;
101
+
102
+ const lines = content.split('\n');
103
+
104
+ // How many leading token_count records are copied history. A fork's file
105
+ // begins with the *entire* source file replayed verbatim, so the count
106
+ // to skip is the source's total token_count count. This is correct even
107
+ // for chained forks: a fork-of-a-fork replays the parent fork's whole
108
+ // file (which itself already contains the grandparent's replay), so
109
+ // skipping the parent's full count skips exactly the duplicated region.
110
+ // If the source file is missing (rotated/deleted) we cannot locate the
111
+ // boundary; skip nothing so incomplete data over-counts rather than
112
+ // silently dropping real usage.
113
+ let replayTokenCountToSkip = 0;
114
+ if (forkedFromId != null) {
115
+ replayTokenCountToSkip = tokenCountById.get(forkedFromId) ?? 0;
116
+ }
117
+ let tokenCountSeen = 0;
118
+
119
+ // Extract project name from session_meta.
47
120
  let sessionProject = 'unknown';
48
121
  let sessionModel = 'unknown';
49
- for (const line of content.split('\n')) {
122
+ for (const line of lines) {
50
123
  if (!line.trim()) continue;
51
124
  try {
52
125
  const obj = JSON.parse(line);
@@ -60,29 +133,44 @@ export async function parse() {
60
133
  const match = meta.git.repository_url.match(/([^/]+\/[^/]+?)(?:\.git)?$/);
61
134
  if (match) sessionProject = match[1];
62
135
  }
63
- break;
64
136
  }
65
- } catch { break; }
137
+ } catch { /* ignore */ }
138
+ break; // session_meta is always the first line
66
139
  }
67
140
 
68
141
  let turnContextModel = 'unknown';
69
142
  const prevTotal = new Map();
70
- for (const line of content.split('\n')) {
143
+ for (const line of lines) {
71
144
  if (!line.trim()) continue;
72
145
  try {
73
146
  const obj = JSON.parse(line);
74
147
 
148
+ // A fork's replayed-history block is the run from the start of the
149
+ // file up to and including the Nth token_count, where N is the source
150
+ // session's total token_count count. We are still inside that block
151
+ // until we have *passed* the Nth token_count. (token_count is the
152
+ // last event of each turn, so the boundary lands cleanly at a turn
153
+ // edge — the new conversation's events come strictly after it.)
154
+ const inReplayBlock = tokenCountSeen < replayTokenCountToSkip;
155
+
75
156
  if (obj.timestamp) {
76
157
  const evTs = new Date(obj.timestamp);
77
158
  if (!isNaN(evTs.getTime())) {
78
- const isUserTurn = obj.type === 'turn_context' || obj.type === 'session_meta';
79
- sessionEvents.push({
80
- sessionId: filePath,
81
- source: 'codex',
82
- project: sessionProject,
83
- timestamp: evTs,
84
- role: isUserTurn ? 'user' : 'assistant',
85
- });
159
+ // Skip replayed history events so a forked session's
160
+ // duration/active-time/message counts reflect only the new
161
+ // conversation, not the copied original. session_meta itself is
162
+ // kept: it marks when the fork actually started.
163
+ const isReplay = inReplayBlock && obj.type !== 'session_meta';
164
+ if (!isReplay) {
165
+ const isUserTurn = obj.type === 'turn_context' || obj.type === 'session_meta';
166
+ sessionEvents.push({
167
+ sessionId: filePath,
168
+ source: 'codex',
169
+ project: sessionProject,
170
+ timestamp: evTs,
171
+ role: isUserTurn ? 'user' : 'assistant',
172
+ });
173
+ }
86
174
  }
87
175
  }
88
176
 
@@ -104,6 +192,14 @@ export async function parse() {
104
192
  const timestamp = obj.timestamp ? new Date(obj.timestamp) : null;
105
193
  if (!timestamp || isNaN(timestamp.getTime())) continue;
106
194
 
195
+ // This is the (tokenCountSeen+1)-th token_count in the file. If it
196
+ // falls inside the fork's replay block it's an exact copy of a record
197
+ // already counted from the source session's own file — skip it (but
198
+ // still advance the cumulative-total baseline below so the first real
199
+ // post-fork delta is measured correctly).
200
+ const isReplayedHistory = tokenCountSeen < replayTokenCountToSkip;
201
+ tokenCountSeen++;
202
+
107
203
  // Prefer incremental per-request usage; compute delta from cumulative total as fallback
108
204
  let usage = info.last_token_usage;
109
205
  if (!usage && info.total_token_usage) {
@@ -121,9 +217,13 @@ export async function parse() {
121
217
  // First cumulative entry — use as-is (it's the first event's total)
122
218
  usage = curr;
123
219
  }
220
+ // Always advance the cumulative baseline, even for replayed history,
221
+ // so the first real post-fork delta is measured against the last
222
+ // replayed total instead of being mistaken for a fresh "first entry".
124
223
  prevTotal.set(totalKey, { ...curr });
125
224
  }
126
225
  if (!usage) continue;
226
+ if (isReplayedHistory) continue;
127
227
 
128
228
  const model = info.model || payload.model || turnContextModel || sessionModel;
129
229
 
@@ -1,8 +1,8 @@
1
- import { execFileSync } from 'node:child_process';
2
1
  import { copyFileSync, existsSync, mkdtempSync, rmSync } from 'node:fs';
3
2
  import { join, resolve } from 'node:path';
4
3
  import { homedir, tmpdir } from 'node:os';
5
4
  import { aggregateToBuckets } from './index.js';
5
+ import { queryDbJson } from './sqlite.js';
6
6
 
7
7
  const STATE_DB_RELATIVE = join('User', 'globalStorage', 'state.vscdb');
8
8
  const ACCESS_TOKEN_KEY = 'cursorAuth/accessToken';
@@ -66,14 +66,7 @@ function readAccessToken(dbPath) {
66
66
 
67
67
  function queryAccessToken(dbPath) {
68
68
  const sql = `SELECT value FROM ItemTable WHERE key = '${ACCESS_TOKEN_KEY}' LIMIT 1`;
69
- const out = execFileSync('sqlite3', ['-json', dbPath, sql], {
70
- encoding: 'utf-8',
71
- maxBuffer: 4 * 1024 * 1024,
72
- timeout: 15000,
73
- });
74
- const trimmed = out.trim();
75
- if (!trimmed || trimmed === '[]') return null;
76
- const rows = JSON.parse(trimmed);
69
+ const rows = queryDbJson(dbPath, sql, { maxBuffer: 4 * 1024 * 1024, timeout: 15000 });
77
70
  const value = rows[0]?.value;
78
71
  if (typeof value !== 'string') return null;
79
72
  const t = value.trim();
@@ -182,7 +175,7 @@ export async function parse() {
182
175
  token = readAccessToken(dbPath);
183
176
  } catch (err) {
184
177
  if (err && typeof err.message === 'string' && err.message.includes('ENOENT')) {
185
- throw new Error('sqlite3 CLI not found. Install sqlite3 to sync Cursor data.');
178
+ throw new Error('sqlite3 CLI not found. Install sqlite3 (or use Node >= 22.5) to sync Cursor data.');
186
179
  }
187
180
  throw err;
188
181
  }
@@ -1,8 +1,8 @@
1
- import { execFileSync } from 'node:child_process';
2
1
  import { existsSync, readdirSync, statSync } from 'node:fs';
3
2
  import { join } from 'node:path';
4
3
  import { homedir } from 'node:os';
5
4
  import { aggregateToBuckets, extractSessions } from './index.js';
5
+ import { queryDbJson } from './sqlite.js';
6
6
 
7
7
  const HERMES_HOME = process.env.HERMES_HOME || join(homedir(), '.hermes');
8
8
 
@@ -38,7 +38,7 @@ export async function parse() {
38
38
  WHERE input_tokens > 0 OR output_tokens > 0`);
39
39
  } catch (err) {
40
40
  if (err.message && err.message.includes('ENOENT')) {
41
- throw new Error('sqlite3 CLI not found. Install sqlite3 to sync Hermes data.');
41
+ throw new Error('sqlite3 CLI not found. Install sqlite3 (or use Node >= 22.5) to sync Hermes data.');
42
42
  }
43
43
  throw err;
44
44
  }
@@ -121,14 +121,5 @@ function discoverDbPaths(home) {
121
121
  }
122
122
 
123
123
  function queryDb(dbPath, sql) {
124
- const output = execFileSync('sqlite3', [
125
- '-json',
126
- dbPath,
127
- sql,
128
- ], { encoding: 'utf-8', maxBuffer: 100 * 1024 * 1024, timeout: 30000 });
129
-
130
- const trimmed = output.trim();
131
- if (!trimmed || trimmed === '[]') return [];
132
-
133
- return JSON.parse(trimmed);
124
+ return queryDbJson(dbPath, sql);
134
125
  }
@@ -1,4 +1,3 @@
1
- import { execFileSync } from 'node:child_process';
2
1
  import {
3
2
  copyFileSync,
4
3
  existsSync,
@@ -11,6 +10,7 @@ import {
11
10
  import { join, resolve } from 'node:path';
12
11
  import { homedir, tmpdir } from 'node:os';
13
12
  import { aggregateToBuckets } from './index.js';
13
+ import { queryDbJson } from './sqlite.js';
14
14
 
15
15
  const KIROAGENT_RELATIVE = join('User', 'globalStorage', 'kiro.kiroagent');
16
16
 
@@ -41,14 +41,7 @@ function isLockError(err) {
41
41
  }
42
42
 
43
43
  function queryDb(dbPath, sql) {
44
- const out = execFileSync('sqlite3', ['-json', dbPath, sql], {
45
- encoding: 'utf-8',
46
- maxBuffer: 100 * 1024 * 1024,
47
- timeout: 30000,
48
- });
49
- const trimmed = out.trim();
50
- if (!trimmed || trimmed === '[]') return [];
51
- return JSON.parse(trimmed);
44
+ return queryDbJson(dbPath, sql);
52
45
  }
53
46
 
54
47
  const TOKENS_SQL =
@@ -195,7 +188,7 @@ export async function parse() {
195
188
  }
196
189
  } catch (err) {
197
190
  if (err && typeof err.message === 'string' && err.message.includes('ENOENT')) {
198
- throw new Error('sqlite3 CLI not found. Install sqlite3 to sync Kiro data.');
191
+ throw new Error('sqlite3 CLI not found. Install sqlite3 (or use Node >= 22.5) to sync Kiro data.');
199
192
  }
200
193
  throw err;
201
194
  }
@@ -1,8 +1,8 @@
1
- import { execFileSync } from 'node:child_process';
2
- import { readdirSync, readFileSync, statSync, existsSync } from 'node:fs';
1
+ import { readdirSync, readFileSync, existsSync } from 'node:fs';
3
2
  import { join, basename } from 'node:path';
4
3
  import { homedir } from 'node:os';
5
4
  import { aggregateToBuckets, extractSessions } from './index.js';
5
+ import { queryDbJson } from './sqlite.js';
6
6
 
7
7
  const DATA_DIR = join(homedir(), '.local', 'share', 'opencode');
8
8
  const DB_PATH = join(DATA_DIR, 'opencode.db');
@@ -33,29 +33,16 @@ function parseFromSqlite() {
33
33
  json_extract(data, '$.path.root') as rootPath
34
34
  FROM message`;
35
35
 
36
- let output;
36
+ let rows;
37
37
  try {
38
- output = execFileSync('sqlite3', [
39
- '-json',
40
- DB_PATH,
41
- query,
42
- ], { encoding: 'utf-8', maxBuffer: 100 * 1024 * 1024, timeout: 30000 });
38
+ rows = queryDbJson(DB_PATH, query);
43
39
  } catch (err) {
44
40
  if (err.status === 127 || (err.message && err.message.includes('ENOENT'))) {
45
- throw new Error('sqlite3 CLI not found. Install sqlite3 to sync opencode data.');
41
+ throw new Error('sqlite3 CLI not found. Install sqlite3 (or use Node >= 22.5) to sync opencode data.');
46
42
  }
47
43
  throw err;
48
44
  }
49
-
50
- output = output.trim();
51
- if (!output || output === '[]') return { buckets: [], sessions: [] };
52
-
53
- let rows;
54
- try {
55
- rows = JSON.parse(output);
56
- } catch {
57
- throw new Error('Failed to parse sqlite3 JSON output');
58
- }
45
+ if (!rows.length) return { buckets: [], sessions: [] };
59
46
 
60
47
  const entries = [];
61
48
  const sessionEvents = [];
@@ -0,0 +1,74 @@
1
+ import { execFileSync } from 'node:child_process';
2
+ import { createRequire } from 'node:module';
3
+
4
+ const require = createRequire(import.meta.url);
5
+
6
+ /**
7
+ * Run a SQL query against a SQLite database and return rows as plain objects
8
+ * (column name → value), mirroring the shape of `sqlite3 -json` output.
9
+ *
10
+ * Prefers Node's built-in `node:sqlite` (available on Node >= 22.5, no external
11
+ * binary needed — important on Windows where the `sqlite3` CLI is rarely on
12
+ * PATH). Falls back to shelling out to the `sqlite3` CLI on older Node.
13
+ *
14
+ * If neither is available, throws an Error whose message contains "ENOENT" so
15
+ * callers can surface an "Install sqlite3" hint, matching the previous behavior.
16
+ */
17
+ export function queryDbJson(dbPath, sql, { timeout = 30000, maxBuffer = 100 * 1024 * 1024 } = {}) {
18
+ const db = openNodeSqlite(dbPath);
19
+ if (db) {
20
+ try {
21
+ return db.prepare(sql).all();
22
+ } finally {
23
+ db.close();
24
+ }
25
+ }
26
+ return queryViaCli(dbPath, sql, { timeout, maxBuffer });
27
+ }
28
+
29
+ let nodeSqlite; // undefined = not tried, null = unavailable
30
+
31
+ function getNodeSqlite() {
32
+ if (nodeSqlite !== undefined) return nodeSqlite;
33
+ try {
34
+ // Suppress the one-time "SQLite is an experimental feature" ExperimentalWarning
35
+ // on Node versions where node:sqlite is still flagged experimental.
36
+ const prevEmit = process.emitWarning;
37
+ process.emitWarning = (warning, ...rest) => {
38
+ const opts = rest[0];
39
+ const type = typeof opts === 'object' && opts ? opts.type : opts;
40
+ const name = typeof warning === 'object' && warning ? warning.name : undefined;
41
+ if ((type === 'ExperimentalWarning' || name === 'ExperimentalWarning') && String(warning).includes('SQLite')) return;
42
+ return prevEmit.call(process, warning, ...rest);
43
+ };
44
+ try {
45
+ nodeSqlite = require('node:sqlite');
46
+ } finally {
47
+ process.emitWarning = prevEmit;
48
+ }
49
+ } catch {
50
+ nodeSqlite = null;
51
+ }
52
+ return nodeSqlite;
53
+ }
54
+
55
+ function openNodeSqlite(dbPath) {
56
+ const mod = getNodeSqlite();
57
+ if (!mod || !mod.DatabaseSync) return null;
58
+ try {
59
+ return new mod.DatabaseSync(dbPath, { readOnly: true });
60
+ } catch {
61
+ return null;
62
+ }
63
+ }
64
+
65
+ function queryViaCli(dbPath, sql, { timeout, maxBuffer }) {
66
+ const out = execFileSync('sqlite3', ['-json', dbPath, sql], {
67
+ encoding: 'utf-8',
68
+ maxBuffer,
69
+ timeout,
70
+ });
71
+ const trimmed = out.trim();
72
+ if (!trimmed || trimmed === '[]') return [];
73
+ return JSON.parse(trimmed);
74
+ }