@vibe-cafe/vibe-usage 0.5.1 → 0.6.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -29,23 +29,37 @@ npx vibe-usage status # Show config & detected tools
29
29
 
30
30
  | Tool | Data Location |
31
31
  |------|---------------|
32
- | Claude Code | `~/.claude/projects/` |
32
+ | Claude Code | `~/.claude/projects/` (tokens + sessions), `~/.claude/transcripts/` (sessions only) |
33
33
  | Codex CLI | `~/.codex/sessions/` |
34
34
  | Gemini CLI | `~/.gemini/tmp/` |
35
- | OpenCode | `~/.local/share/opencode/opencode.db` (SQLite) |
35
+ | OpenCode | `~/.local/share/opencode/opencode.db` (SQLite, `json_extract` query) |
36
36
  | OpenClaw | `~/.openclaw/agents/` |
37
+ | Qwen Code | `~/.qwen/tmp/` |
38
+ | Kimi Code | `~/.kimi/sessions/` |
37
39
 
38
40
  ## How It Works
39
41
 
40
42
  - Parses local session logs from each AI coding tool
41
43
  - Aggregates token usage into 30-minute buckets
42
- - Uploads to your vibecafe.ai dashboard
44
+ - Extracts session metadata from all 7 parsers: active time (sum of turn durations), total duration, message counts
45
+ - Uploads buckets + sessions to your vibecafe.ai dashboard
43
46
  - Stateless: computes full totals from local logs each sync (idempotent, no state files)
44
47
  - For continuous syncing, use `npx vibe-usage daemon` or the [Vibe Usage Mac app](https://github.com/vibe-cafe/vibe-usage-app)
45
48
 
49
+ ## Development
50
+
51
+ Test against a local vibe-cafe dev server without publishing:
52
+
53
+ ```bash
54
+ VIBE_USAGE_DEV=1 VIBE_USAGE_API_URL=http://localhost:3000 npx vibe-usage init
55
+ VIBE_USAGE_DEV=1 npx vibe-usage sync
56
+ ```
57
+
58
+ `VIBE_USAGE_DEV=1` uses a separate config file (`~/.vibe-usage/config.dev.json`).
59
+
46
60
  ## Config
47
61
 
48
- Config stored at `~/.vibe-usage/config.json`. Contains your API key and server URL.
62
+ Config stored at `~/.vibe-usage/config.json` (dev: `config.dev.json`). Contains your API key and server URL.
49
63
 
50
64
  ## Daemon Mode
51
65
 
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@vibe-cafe/vibe-usage",
3
- "version": "0.5.1",
3
+ "version": "0.6.0",
4
4
  "description": "Track your AI coding tool token usage and sync to vibecafe.ai",
5
5
  "type": "module",
6
6
  "bin": {
package/src/api.js CHANGED
@@ -5,21 +5,11 @@ import { URL } from 'node:url';
5
5
  const MAX_RETRIES = 3;
6
6
  const INITIAL_DELAY = 1000;
7
7
 
8
- /**
9
- * POST buckets to the vibecafe ingest API.
10
- * Uses native http/https — zero dependencies.
11
- * Retries up to 3 times with exponential backoff on transient failures.
12
- * @param {string} apiUrl - Base URL (e.g. "https://vibecafe.ai")
13
- * @param {string} apiKey - Bearer token (vbu_xxx)
14
- * @param {Array} buckets - Array of usage bucket objects
15
- * @param {{onProgress?: (sent: number, total: number) => void}} [opts]
16
- * @returns {Promise<{ingested: number}>}
17
- */
18
- export async function ingest(apiUrl, apiKey, buckets, opts) {
8
+ export async function ingest(apiUrl, apiKey, buckets, opts, sessions) {
19
9
  let lastError;
20
10
  for (let attempt = 0; attempt < MAX_RETRIES; attempt++) {
21
11
  try {
22
- return await _send(apiUrl, apiKey, buckets, opts?.onProgress);
12
+ return await _send(apiUrl, apiKey, buckets, opts?.onProgress, sessions);
23
13
  } catch (err) {
24
14
  lastError = err;
25
15
  // Don't retry auth errors or client errors
@@ -35,10 +25,12 @@ export async function ingest(apiUrl, apiKey, buckets, opts) {
35
25
  throw lastError;
36
26
  }
37
27
 
38
- function _send(apiUrl, apiKey, buckets, onProgress) {
28
+ function _send(apiUrl, apiKey, buckets, onProgress, sessions) {
39
29
  return new Promise((resolve, reject) => {
40
30
  const url = new URL('/api/usage/ingest', apiUrl);
41
- const body = Buffer.from(JSON.stringify({ buckets }));
31
+ const payload = { buckets };
32
+ if (sessions && sessions.length > 0) payload.sessions = sessions;
33
+ const body = Buffer.from(JSON.stringify(payload));
42
34
  const totalBytes = body.length;
43
35
  const mod = url.protocol === 'https:' ? https : http;
44
36
 
package/src/index.js CHANGED
@@ -1,5 +1,5 @@
1
1
  import { loadConfig, saveConfig, getConfigPath } from './config.js';
2
- import { detectInstalledTools, TOOLS } from './hooks.js';
2
+ import { detectInstalledTools, TOOLS } from './tools.js';
3
3
  import { existsSync } from 'node:fs';
4
4
 
5
5
  async function showStatus() {
package/src/init.js CHANGED
@@ -4,7 +4,7 @@ import { platform } from 'node:os';
4
4
  import { loadConfig, saveConfig } from './config.js';
5
5
  import { ingest } from './api.js';
6
6
  import { runSync } from './sync.js';
7
- import { detectInstalledTools } from './hooks.js';
7
+ import { detectInstalledTools } from './tools.js';
8
8
 
9
9
  function prompt(question) {
10
10
  const rl = createInterface({ input: process.stdin, output: process.stdout });
@@ -1,7 +1,7 @@
1
1
  import { readdirSync, readFileSync, existsSync } from 'node:fs';
2
2
  import { join, basename, sep } from 'node:path';
3
3
  import { homedir } from 'node:os';
4
- import { aggregateToBuckets } from './index.js';
4
+ import { aggregateToBuckets, extractSessions } from './index.js';
5
5
 
6
6
  /**
7
7
  * Stateless Claude Code parser.
@@ -11,7 +11,8 @@ import { aggregateToBuckets } from './index.js';
11
11
  * ON CONFLICT ... DO UPDATE SET idempotent.
12
12
  */
13
13
 
14
- const CLAUDE_DIR = join(homedir(), '.claude', 'projects');
14
+ const CLAUDE_PROJECTS_DIR = join(homedir(), '.claude', 'projects');
15
+ const CLAUDE_TRANSCRIPTS_DIR = join(homedir(), '.claude', 'transcripts');
15
16
 
16
17
  /**
17
18
  * Recursively find all .jsonl files under a directory.
@@ -44,29 +45,29 @@ function findJsonlFiles(dir) {
44
45
  * We extract the last path segment as the project name.
45
46
  */
46
47
  function extractProject(filePath) {
47
- // Get relative path from the projects dir
48
- const projectsPrefix = CLAUDE_DIR + sep;
48
+ const projectsPrefix = CLAUDE_PROJECTS_DIR + sep;
49
49
  if (!filePath.startsWith(projectsPrefix)) return 'unknown';
50
50
  const relative = filePath.slice(projectsPrefix.length);
51
- // First segment is the encoded project path
52
51
  const firstSeg = relative.split(sep)[0];
53
52
  if (!firstSeg) return 'unknown';
54
- // The encoded path uses dashes: -Users-kalasoo-Projects-myproject
55
- // Take the last segment after splitting by dash
56
53
  const parts = firstSeg.split('-').filter(Boolean);
57
54
  return parts.length > 0 ? parts[parts.length - 1] : 'unknown';
58
55
  }
59
56
 
60
- export async function parse() {
61
- if (!existsSync(CLAUDE_DIR)) return [];
62
-
63
- const files = findJsonlFiles(CLAUDE_DIR);
64
- if (files.length === 0) return [];
57
+ function extractSessionId(filePath) {
58
+ return basename(filePath, '.jsonl');
59
+ }
65
60
 
61
+ export async function parse() {
66
62
  const entries = [];
63
+ const sessionEvents = [];
67
64
  const seenUuids = new Set();
65
+ const seenSessionIds = new Set();
66
+
67
+ // --- projects/ directory: extract BOTH token buckets AND session events ---
68
+ const projectFiles = findJsonlFiles(CLAUDE_PROJECTS_DIR);
68
69
 
69
- for (const filePath of files) {
70
+ for (const filePath of projectFiles) {
70
71
  let content;
71
72
  try {
72
73
  content = readFileSync(filePath, 'utf-8');
@@ -75,13 +76,29 @@ export async function parse() {
75
76
  }
76
77
 
77
78
  const project = extractProject(filePath);
79
+ const sessionId = extractSessionId(filePath);
80
+ seenSessionIds.add(sessionId);
78
81
 
79
82
  for (const line of content.split('\n')) {
80
83
  if (!line.trim()) continue;
81
84
  try {
82
85
  const obj = JSON.parse(line);
83
86
 
84
- // Only process assistant messages with usage data
87
+ const timestamp = obj.timestamp;
88
+ if (!timestamp) continue;
89
+ const ts = new Date(timestamp);
90
+ if (isNaN(ts.getTime())) continue;
91
+
92
+ if (obj.type === 'user' || obj.type === 'assistant' || obj.type === 'tool_use' || obj.type === 'tool_result') {
93
+ sessionEvents.push({
94
+ sessionId,
95
+ source: 'claude-code',
96
+ project,
97
+ timestamp: ts,
98
+ role: obj.type === 'user' ? 'user' : 'assistant',
99
+ });
100
+ }
101
+
85
102
  if (obj.type !== 'assistant') continue;
86
103
  const msg = obj.message;
87
104
  if (!msg || !msg.usage) continue;
@@ -89,18 +106,12 @@ export async function parse() {
89
106
  const usage = msg.usage;
90
107
  if (usage.input_tokens == null && usage.output_tokens == null) continue;
91
108
 
92
- // Deduplicate by UUID across all files
93
109
  const uuid = obj.uuid;
94
110
  if (uuid) {
95
111
  if (seenUuids.has(uuid)) continue;
96
112
  seenUuids.add(uuid);
97
113
  }
98
114
 
99
- const timestamp = obj.timestamp;
100
- if (!timestamp) continue;
101
- const ts = new Date(timestamp);
102
- if (isNaN(ts.getTime())) continue;
103
-
104
115
  entries.push({
105
116
  source: 'claude-code',
106
117
  model: msg.model || 'unknown',
@@ -117,5 +128,44 @@ export async function parse() {
117
128
  }
118
129
  }
119
130
 
120
- return aggregateToBuckets(entries);
131
+ // --- transcripts/ directory: extract session events ONLY (no token data) ---
132
+ const transcriptFiles = findJsonlFiles(CLAUDE_TRANSCRIPTS_DIR);
133
+
134
+ for (const filePath of transcriptFiles) {
135
+ const sessionId = extractSessionId(filePath);
136
+ if (seenSessionIds.has(sessionId)) continue;
137
+
138
+ let content;
139
+ try {
140
+ content = readFileSync(filePath, 'utf-8');
141
+ } catch {
142
+ continue;
143
+ }
144
+
145
+ for (const line of content.split('\n')) {
146
+ if (!line.trim()) continue;
147
+ try {
148
+ const obj = JSON.parse(line);
149
+
150
+ const timestamp = obj.timestamp;
151
+ if (!timestamp) continue;
152
+ const ts = new Date(timestamp);
153
+ if (isNaN(ts.getTime())) continue;
154
+
155
+ if (obj.type === 'user' || obj.type === 'assistant' || obj.type === 'tool_use' || obj.type === 'tool_result') {
156
+ sessionEvents.push({
157
+ sessionId,
158
+ source: 'claude-code',
159
+ project: 'unknown',
160
+ timestamp: ts,
161
+ role: obj.type === 'user' ? 'user' : 'assistant',
162
+ });
163
+ }
164
+ } catch {
165
+ continue;
166
+ }
167
+ }
168
+ }
169
+
170
+ return { buckets: aggregateToBuckets(entries), sessions: extractSessions(sessionEvents) };
121
171
  }
@@ -1,7 +1,7 @@
1
1
  import { readdirSync, readFileSync, statSync, existsSync } from 'node:fs';
2
2
  import { join } from 'node:path';
3
3
  import { homedir } from 'node:os';
4
- import { aggregateToBuckets } from './index.js';
4
+ import { aggregateToBuckets, extractSessions } from './index.js';
5
5
 
6
6
  const SESSIONS_DIR = join(homedir(), '.codex', 'sessions');
7
7
 
@@ -28,11 +28,12 @@ function findJsonlFiles(dir) {
28
28
  }
29
29
 
30
30
  export async function parse() {
31
- if (!existsSync(SESSIONS_DIR)) return [];
31
+ if (!existsSync(SESSIONS_DIR)) return { buckets: [], sessions: [] };
32
32
 
33
33
  const entries = [];
34
+ const sessionEvents = [];
34
35
  const files = findJsonlFiles(SESSIONS_DIR);
35
- if (files.length === 0) return [];
36
+ if (files.length === 0) return { buckets: [], sessions: [] };
36
37
  for (const filePath of files) {
37
38
 
38
39
  let content;
@@ -64,16 +65,27 @@ export async function parse() {
64
65
  } catch { break; }
65
66
  }
66
67
 
67
- // Track model from turn_context events (fallback when token_count lacks model)
68
68
  let turnContextModel = 'unknown';
69
- // Track previous cumulative totals per model to compute deltas when only total_token_usage is available
70
69
  const prevTotal = new Map();
71
70
  for (const line of content.split('\n')) {
72
71
  if (!line.trim()) continue;
73
72
  try {
74
73
  const obj = JSON.parse(line);
75
74
 
76
- // Capture model from top-level turn_context entries
75
+ if (obj.timestamp) {
76
+ const evTs = new Date(obj.timestamp);
77
+ if (!isNaN(evTs.getTime())) {
78
+ const isUserTurn = obj.type === 'turn_context' || obj.type === 'session_meta';
79
+ sessionEvents.push({
80
+ sessionId: filePath,
81
+ source: 'codex',
82
+ project: sessionProject,
83
+ timestamp: evTs,
84
+ role: isUserTurn ? 'user' : 'assistant',
85
+ });
86
+ }
87
+ }
88
+
77
89
  if (obj.type === 'turn_context' && obj.payload?.model) {
78
90
  turnContextModel = obj.payload.model;
79
91
  continue;
@@ -135,5 +147,5 @@ export async function parse() {
135
147
  }
136
148
  }
137
149
 
138
- return aggregateToBuckets(entries);
150
+ return { buckets: aggregateToBuckets(entries), sessions: extractSessions(sessionEvents) };
139
151
  }
@@ -1,7 +1,7 @@
1
1
  import { readdirSync, readFileSync, statSync, existsSync } from 'node:fs';
2
2
  import { join } from 'node:path';
3
3
  import { homedir } from 'node:os';
4
- import { aggregateToBuckets } from './index.js';
4
+ import { aggregateToBuckets, extractSessions } from './index.js';
5
5
 
6
6
  const TMP_DIR = join(homedir(), '.gemini', 'tmp');
7
7
 
@@ -32,9 +32,10 @@ function findSessionFiles(baseDir) {
32
32
 
33
33
  export async function parse() {
34
34
  const sessionFiles = findSessionFiles(TMP_DIR);
35
- if (sessionFiles.length === 0) return [];
35
+ if (sessionFiles.length === 0) return { buckets: [], sessions: [] };
36
36
 
37
37
  const entries = [];
38
+ const sessionEvents = [];
38
39
 
39
40
  for (const filePath of sessionFiles) {
40
41
 
@@ -47,19 +48,25 @@ export async function parse() {
47
48
 
48
49
  const messages = data.messages || data.history || [];
49
50
  for (const msg of messages) {
50
- // New format: tokens on type=gemini messages (ChatRecordingService)
51
- // Old format: usage/usageMetadata on any message
52
- const tokens = msg.tokens;
53
- const usage = msg.usage || msg.usageMetadata || msg.token_count;
54
- if (!tokens && !usage) continue;
55
-
56
51
  const timestamp = msg.timestamp || msg.createTime || data.createTime;
57
52
  if (!timestamp) continue;
58
53
  const ts = new Date(timestamp);
59
54
  if (isNaN(ts.getTime())) continue;
60
55
 
56
+ const role = (msg.role === 'user') ? 'user' : 'assistant';
57
+ sessionEvents.push({
58
+ sessionId: filePath,
59
+ source: 'gemini-cli',
60
+ project: 'unknown',
61
+ timestamp: ts,
62
+ role,
63
+ });
64
+
65
+ const tokens = msg.tokens;
66
+ const usage = msg.usage || msg.usageMetadata || msg.token_count;
67
+ if (!tokens && !usage) continue;
68
+
61
69
  if (tokens) {
62
- // Gemini API: input INCLUDES cached, output INCLUDES thoughts. Normalize to non-overlapping.
63
70
  const cached = tokens.cached || 0;
64
71
  const thoughts = tokens.thoughts || 0;
65
72
  entries.push({
@@ -73,7 +80,6 @@ export async function parse() {
73
80
  reasoningOutputTokens: thoughts,
74
81
  });
75
82
  } else {
76
- // Gemini API: promptTokenCount INCLUDES cachedContentTokenCount. Normalize to non-overlapping.
77
83
  const cached = usage.cachedContentTokenCount || 0;
78
84
  const thoughts = usage.thoughtsTokenCount || 0;
79
85
  entries.push({
@@ -90,5 +96,5 @@ export async function parse() {
90
96
  }
91
97
  }
92
98
 
93
- return aggregateToBuckets(entries);
99
+ return { buckets: aggregateToBuckets(entries), sessions: extractSessions(sessionEvents) };
94
100
  }
@@ -1,8 +1,11 @@
1
+ import { createHash } from 'node:crypto';
1
2
  import { parse as parseClaudeCode } from './claude-code.js';
2
3
  import { parse as parseCodex } from './codex.js';
3
4
  import { parse as parseGeminiCli } from './gemini-cli.js';
4
5
  import { parse as parseOpencode } from './opencode.js';
5
6
  import { parse as parseOpenclaw } from './openclaw.js';
7
+ import { parse as parseQwenCode } from './qwen-code.js';
8
+ import { parse as parseKimiCode } from './kimi-code.js';
6
9
 
7
10
  export const parsers = {
8
11
  'claude-code': parseClaudeCode,
@@ -10,6 +13,8 @@ export const parsers = {
10
13
  'gemini-cli': parseGeminiCli,
11
14
  'opencode': parseOpencode,
12
15
  'openclaw': parseOpenclaw,
16
+ 'qwen-code': parseQwenCode,
17
+ 'kimi-code': parseKimiCode,
13
18
  };
14
19
 
15
20
 
@@ -50,3 +55,72 @@ export function aggregateToBuckets(entries) {
50
55
 
51
56
  return Array.from(map.values());
52
57
  }
58
+
59
+ /**
60
+ * Extract session metadata from timing events.
61
+ * Each event: { sessionId, source, project, timestamp: Date, role: 'user'|'assistant' }
62
+ *
63
+ * Turn = user prompt → last agent message before next user prompt.
64
+ * activeSeconds = sum(turn durations). durationSeconds = wall clock.
65
+ */
66
+ export function extractSessions(events) {
67
+ const groups = new Map();
68
+ for (const e of events) {
69
+ if (!groups.has(e.sessionId)) groups.set(e.sessionId, []);
70
+ groups.get(e.sessionId).push(e);
71
+ }
72
+
73
+ const sessions = [];
74
+ for (const [sessionId, sessionEvents] of groups) {
75
+ sessionEvents.sort((a, b) => a.timestamp - b.timestamp);
76
+
77
+ const first = sessionEvents[0];
78
+ const last = sessionEvents[sessionEvents.length - 1];
79
+ const durationSeconds = Math.round((last.timestamp - first.timestamp) / 1000);
80
+
81
+ let activeSeconds = 0;
82
+ let turnStart = null;
83
+ let turnEnd = null;
84
+
85
+ for (const event of sessionEvents) {
86
+ if (event.role === 'user') {
87
+ if (turnStart !== null && turnEnd !== null && turnEnd > turnStart) {
88
+ activeSeconds += Math.round((turnEnd - turnStart) / 1000);
89
+ }
90
+ turnStart = event.timestamp;
91
+ turnEnd = event.timestamp;
92
+ } else if (turnStart !== null) {
93
+ turnEnd = event.timestamp;
94
+ }
95
+ }
96
+ if (turnStart !== null && turnEnd !== null && turnEnd > turnStart) {
97
+ activeSeconds += Math.round((turnEnd - turnStart) / 1000);
98
+ }
99
+
100
+ const userPromptHours = new Array(24).fill(0);
101
+ let userMessageCount = 0;
102
+ for (const event of sessionEvents) {
103
+ if (event.role === 'user') {
104
+ userMessageCount++;
105
+ userPromptHours[event.timestamp.getUTCHours()]++;
106
+ }
107
+ }
108
+
109
+ const sessionHash = createHash('sha256').update(sessionId).digest('hex').slice(0, 16);
110
+
111
+ sessions.push({
112
+ source: first.source,
113
+ project: first.project || 'unknown',
114
+ sessionHash,
115
+ firstMessageAt: first.timestamp.toISOString(),
116
+ lastMessageAt: last.timestamp.toISOString(),
117
+ durationSeconds,
118
+ activeSeconds,
119
+ messageCount: sessionEvents.length,
120
+ userMessageCount,
121
+ userPromptHours,
122
+ });
123
+ }
124
+
125
+ return sessions;
126
+ }
@@ -0,0 +1,140 @@
1
+ import { readdirSync, readFileSync, existsSync } from 'node:fs';
2
+ import { join, sep } from 'node:path';
3
+ import { homedir } from 'node:os';
4
+ import { aggregateToBuckets, extractSessions } from './index.js';
5
+
6
+ /**
7
+ * Kimi Code CLI parser.
8
+ * Wire protocol JSONL at ~/.kimi/sessions/<work-dir-hash>/<session-id>/wire.jsonl
9
+ * Token data from StatusUpdate events: payload.token_usage.{input_other, output,
10
+ * input_cache_read, input_cache_creation}
11
+ */
12
+
13
+ const KIMI_SESSIONS_DIR = join(homedir(), '.kimi', 'sessions');
14
+ const KIMI_CONFIG = join(homedir(), '.kimi', 'kimi.json');
15
+
16
+ function findWireFiles(baseDir) {
17
+ const results = [];
18
+ if (!existsSync(baseDir)) return results;
19
+
20
+ try {
21
+ for (const workDir of readdirSync(baseDir, { withFileTypes: true })) {
22
+ if (!workDir.isDirectory()) continue;
23
+ const workDirPath = join(baseDir, workDir.name);
24
+
25
+ try {
26
+ for (const session of readdirSync(workDirPath, { withFileTypes: true })) {
27
+ if (!session.isDirectory()) continue;
28
+ const wireFile = join(workDirPath, session.name, 'wire.jsonl');
29
+ if (existsSync(wireFile)) {
30
+ results.push({ filePath: wireFile, workDirHash: workDir.name });
31
+ }
32
+ }
33
+ } catch {
34
+ continue;
35
+ }
36
+ }
37
+ } catch {
38
+ return results;
39
+ }
40
+ return results;
41
+ }
42
+
43
+ function loadProjectMap() {
44
+ const map = new Map();
45
+ if (!existsSync(KIMI_CONFIG)) return map;
46
+
47
+ try {
48
+ const config = JSON.parse(readFileSync(KIMI_CONFIG, 'utf-8'));
49
+ const workspaces = config.workspaces || config.projects || {};
50
+ for (const [hash, info] of Object.entries(workspaces)) {
51
+ const path = typeof info === 'string' ? info : (info?.path || info?.dir);
52
+ if (path) {
53
+ const parts = path.split('/').filter(Boolean);
54
+ map.set(hash, parts[parts.length - 1] || hash);
55
+ }
56
+ }
57
+ } catch {
58
+ // config unreadable
59
+ }
60
+ return map;
61
+ }
62
+
63
+ export async function parse() {
64
+ const wireFiles = findWireFiles(KIMI_SESSIONS_DIR);
65
+ if (wireFiles.length === 0) return { buckets: [], sessions: [] };
66
+
67
+ const projectMap = loadProjectMap();
68
+ const entries = [];
69
+ const sessionEvents = [];
70
+ const seenMessageIds = new Set();
71
+
72
+ for (const { filePath, workDirHash } of wireFiles) {
73
+ let content;
74
+ try {
75
+ content = readFileSync(filePath, 'utf-8');
76
+ } catch {
77
+ continue;
78
+ }
79
+
80
+ const project = projectMap.get(workDirHash) || workDirHash;
81
+ let currentModel = 'unknown';
82
+ let lastTimestamp = null;
83
+
84
+ for (const line of content.split('\n')) {
85
+ if (!line.trim()) continue;
86
+ try {
87
+ const obj = JSON.parse(line);
88
+ const type = obj.type;
89
+ const payload = obj.payload;
90
+ if (!payload) continue;
91
+
92
+ if (payload.timestamp) lastTimestamp = payload.timestamp;
93
+ if (payload.model) currentModel = payload.model;
94
+
95
+ if (lastTimestamp) {
96
+ const evTs = new Date(lastTimestamp);
97
+ if (!isNaN(evTs.getTime())) {
98
+ const isUser = type === 'UserMessage' || type === 'user_message' || type === 'Input';
99
+ sessionEvents.push({
100
+ sessionId: filePath,
101
+ source: 'kimi-code',
102
+ project,
103
+ timestamp: evTs,
104
+ role: isUser ? 'user' : 'assistant',
105
+ });
106
+ }
107
+ }
108
+
109
+ if (type !== 'StatusUpdate') continue;
110
+
111
+ const tokenUsage = payload.token_usage;
112
+ if (!tokenUsage) continue;
113
+ if (!tokenUsage.input_other && !tokenUsage.output) continue;
114
+
115
+ const messageId = payload.message_id;
116
+ if (messageId) {
117
+ if (seenMessageIds.has(messageId)) continue;
118
+ seenMessageIds.add(messageId);
119
+ }
120
+
121
+ const ts = lastTimestamp ? new Date(lastTimestamp) : new Date();
122
+
123
+ entries.push({
124
+ source: 'kimi-code',
125
+ model: currentModel,
126
+ project,
127
+ timestamp: ts,
128
+ inputTokens: tokenUsage.input_other || 0,
129
+ outputTokens: tokenUsage.output || 0,
130
+ cachedInputTokens: tokenUsage.input_cache_read || 0,
131
+ reasoningOutputTokens: 0,
132
+ });
133
+ } catch {
134
+ continue;
135
+ }
136
+ }
137
+ }
138
+
139
+ return { buckets: aggregateToBuckets(entries), sessions: extractSessions(sessionEvents) };
140
+ }
@@ -1,7 +1,7 @@
1
1
  import { readdirSync, readFileSync, statSync, existsSync } from 'node:fs';
2
2
  import { join } from 'node:path';
3
3
  import { homedir } from 'node:os';
4
- import { aggregateToBuckets } from './index.js';
4
+ import { aggregateToBuckets, extractSessions } from './index.js';
5
5
 
6
6
  // OpenClaw stores data at ~/.openclaw/agents/<agentId>/sessions/*.jsonl
7
7
  // Legacy paths: ~/.clawdbot, ~/.moltbot, ~/.moldbot
@@ -22,6 +22,7 @@ function getTokens(usage, ...keys) {
22
22
 
23
23
  export async function parse() {
24
24
  const entries = [];
25
+ const sessionEvents = [];
25
26
 
26
27
  for (const root of POSSIBLE_ROOTS) {
27
28
  const agentsDir = join(root, 'agents');
@@ -62,19 +63,27 @@ export async function parse() {
62
63
  try {
63
64
  const obj = JSON.parse(line);
64
65
 
65
- // Only process message entries with assistant role
66
66
  if (obj.type !== 'message') continue;
67
67
  const msg = obj.message;
68
- if (!msg || msg.role !== 'assistant') continue;
69
-
70
- const usage = msg.usage;
71
- if (!usage) continue;
68
+ if (!msg) continue;
72
69
 
73
70
  const timestamp = obj.timestamp || msg.timestamp;
74
71
  if (!timestamp) continue;
75
72
  const ts = new Date(typeof timestamp === 'number' ? timestamp : timestamp);
76
73
  if (isNaN(ts.getTime())) continue;
77
74
 
75
+ sessionEvents.push({
76
+ sessionId: filePath,
77
+ source: 'openclaw',
78
+ project,
79
+ timestamp: ts,
80
+ role: msg.role === 'user' ? 'user' : 'assistant',
81
+ });
82
+
83
+ if (msg.role !== 'assistant') continue;
84
+ const usage = msg.usage;
85
+ if (!usage) continue;
86
+
78
87
  entries.push({
79
88
  source: 'openclaw',
80
89
  model: msg.model || obj.model || 'unknown',
@@ -93,5 +102,5 @@ export async function parse() {
93
102
  }
94
103
  }
95
104
 
96
- return aggregateToBuckets(entries);
105
+ return { buckets: aggregateToBuckets(entries), sessions: extractSessions(sessionEvents) };
97
106
  }
@@ -2,7 +2,7 @@ import { execFileSync } from 'node:child_process';
2
2
  import { readdirSync, readFileSync, statSync, existsSync } from 'node:fs';
3
3
  import { join, basename } from 'node:path';
4
4
  import { homedir } from 'node:os';
5
- import { aggregateToBuckets } from './index.js';
5
+ import { aggregateToBuckets, extractSessions } from './index.js';
6
6
 
7
7
  const DATA_DIR = join(homedir(), '.local', 'share', 'opencode');
8
8
  const DB_PATH = join(DATA_DIR, 'opencode.db');
@@ -24,12 +24,14 @@ export async function parse() {
24
24
  }
25
25
 
26
26
  function parseFromSqlite() {
27
- // Build WHERE clause: only messages with token data
28
- const conditions = [
29
- "(json_extract(data, '$.tokens.input') > 0 OR json_extract(data, '$.tokens.output') > 0)",
30
- ];
31
-
32
- const query = `SELECT data FROM message WHERE ${conditions.join(' AND ')}`;
27
+ const query = `SELECT
28
+ session_id as sessionID,
29
+ json_extract(data, '$.role') as role,
30
+ json_extract(data, '$.time.created') as created,
31
+ json_extract(data, '$.modelID') as modelID,
32
+ json_extract(data, '$.tokens') as tokens,
33
+ json_extract(data, '$.path.root') as rootPath
34
+ FROM message`;
33
35
 
34
36
  let output;
35
37
  try {
@@ -46,7 +48,7 @@ function parseFromSqlite() {
46
48
  }
47
49
 
48
50
  output = output.trim();
49
- if (!output || output === '[]') return [];
51
+ if (!output || output === '[]') return { buckets: [], sessions: [] };
50
52
 
51
53
  let rows;
52
54
  try {
@@ -56,29 +58,34 @@ function parseFromSqlite() {
56
58
  }
57
59
 
58
60
  const entries = [];
61
+ const sessionEvents = [];
59
62
  for (const row of rows) {
60
- let data;
63
+ const timestamp = new Date(row.created);
64
+ if (isNaN(timestamp.getTime())) continue;
65
+
66
+ const project = row.rootPath ? basename(row.rootPath) : 'unknown';
67
+ const sessionId = row.sessionID || 'unknown';
68
+
69
+ sessionEvents.push({
70
+ sessionId,
71
+ source: 'opencode',
72
+ project,
73
+ timestamp,
74
+ role: row.role === 'user' ? 'user' : 'assistant',
75
+ });
76
+
77
+ if (!row.modelID) continue;
78
+ let tokens;
61
79
  try {
62
- data = JSON.parse(row.data);
80
+ tokens = typeof row.tokens === 'string' ? JSON.parse(row.tokens) : row.tokens;
63
81
  } catch {
64
82
  continue;
65
83
  }
66
-
67
- if (!data.modelID) continue;
68
-
69
- const tokens = data.tokens;
70
- if (!tokens) continue;
71
- if (!tokens.input && !tokens.output) continue;
72
-
73
- const timestamp = new Date(data.time?.created);
74
- if (isNaN(timestamp.getTime())) continue;
75
-
76
- const rootPath = data.path?.root;
77
- const project = rootPath ? basename(rootPath) : 'unknown';
84
+ if (!tokens || (!tokens.input && !tokens.output)) continue;
78
85
 
79
86
  entries.push({
80
87
  source: 'opencode',
81
- model: data.modelID || 'unknown',
88
+ model: row.modelID || 'unknown',
82
89
  project,
83
90
  timestamp,
84
91
  inputTokens: tokens.input || 0,
@@ -88,20 +95,20 @@ function parseFromSqlite() {
88
95
  });
89
96
  }
90
97
 
91
- return aggregateToBuckets(entries);
98
+ return { buckets: aggregateToBuckets(entries), sessions: extractSessions(sessionEvents) };
92
99
  }
93
100
 
94
- /** Legacy parser: reads JSON files from storage/message directories. */
95
101
  function parseFromJson() {
96
- if (!existsSync(MESSAGES_DIR)) return [];
102
+ if (!existsSync(MESSAGES_DIR)) return { buckets: [], sessions: [] };
97
103
 
98
104
  const entries = [];
105
+ const sessionEvents = [];
99
106
  let sessionDirs;
100
107
  try {
101
108
  sessionDirs = readdirSync(MESSAGES_DIR, { withFileTypes: true })
102
109
  .filter(d => d.isDirectory() && d.name.startsWith('ses_'));
103
110
  } catch {
104
- return [];
111
+ return { buckets: [], sessions: [] };
105
112
  }
106
113
 
107
114
  for (const sessionDir of sessionDirs) {
@@ -123,18 +130,25 @@ function parseFromJson() {
123
130
  continue;
124
131
  }
125
132
 
126
- if (!data.modelID) continue;
127
-
128
- const tokens = data.tokens;
129
- if (!tokens) continue;
130
- if (!tokens.input && !tokens.output) continue;
131
-
132
133
  const timestamp = new Date(data.time?.created);
133
134
  if (isNaN(timestamp.getTime())) continue;
134
135
 
135
136
  const rootPath = data.path?.root;
136
137
  const project = rootPath ? basename(rootPath) : 'unknown';
137
138
 
139
+ sessionEvents.push({
140
+ sessionId: sessionDir.name,
141
+ source: 'opencode',
142
+ project,
143
+ timestamp,
144
+ role: data.role === 'user' ? 'user' : 'assistant',
145
+ });
146
+
147
+ if (!data.modelID) continue;
148
+ const tokens = data.tokens;
149
+ if (!tokens) continue;
150
+ if (!tokens.input && !tokens.output) continue;
151
+
138
152
  entries.push({
139
153
  source: 'opencode',
140
154
  model: data.modelID || 'unknown',
@@ -148,5 +162,5 @@ function parseFromJson() {
148
162
  }
149
163
  }
150
164
 
151
- return aggregateToBuckets(entries);
165
+ return { buckets: aggregateToBuckets(entries), sessions: extractSessions(sessionEvents) };
152
166
  }
@@ -0,0 +1,122 @@
1
+ import { readdirSync, readFileSync, existsSync } from 'node:fs';
2
+ import { join, basename, sep } from 'node:path';
3
+ import { homedir } from 'node:os';
4
+ import { aggregateToBuckets, extractSessions } from './index.js';
5
+
6
+ /**
7
+ * Qwen Code parser (Gemini CLI fork).
8
+ * JSONL at ~/.qwen/tmp/<project_id>/chats/<sessionId>.jsonl
9
+ * Token fields: usageMetadata.{promptTokenCount, candidatesTokenCount,
10
+ * cachedContentTokenCount, thoughtsTokenCount}
11
+ * Note: promptTokenCount INCLUDES cachedContentTokenCount (needs normalization).
12
+ */
13
+
14
+ const QWEN_TMP_DIR = join(homedir(), '.qwen', 'tmp');
15
+
16
+ function findSessionFiles(baseDir) {
17
+ const results = [];
18
+ if (!existsSync(baseDir)) return results;
19
+
20
+ try {
21
+ for (const entry of readdirSync(baseDir, { withFileTypes: true })) {
22
+ if (!entry.isDirectory()) continue;
23
+ const chatsDir = join(baseDir, entry.name, 'chats');
24
+ if (!existsSync(chatsDir)) continue;
25
+ try {
26
+ for (const f of readdirSync(chatsDir)) {
27
+ if (f.endsWith('.jsonl')) {
28
+ results.push(join(chatsDir, f));
29
+ }
30
+ }
31
+ } catch {
32
+ continue;
33
+ }
34
+ }
35
+ } catch {
36
+ return results;
37
+ }
38
+ return results;
39
+ }
40
+
41
+ function extractProject(cwd, filePath) {
42
+ if (cwd) {
43
+ const parts = cwd.split('/').filter(Boolean);
44
+ if (parts.length > 0) return parts[parts.length - 1];
45
+ }
46
+ const tmpPrefix = QWEN_TMP_DIR + sep;
47
+ if (filePath.startsWith(tmpPrefix)) {
48
+ const relative = filePath.slice(tmpPrefix.length);
49
+ const projectId = relative.split(sep)[0];
50
+ if (projectId) return projectId;
51
+ }
52
+ return 'unknown';
53
+ }
54
+
55
+ export async function parse() {
56
+ const sessionFiles = findSessionFiles(QWEN_TMP_DIR);
57
+ if (sessionFiles.length === 0) return { buckets: [], sessions: [] };
58
+
59
+ const entries = [];
60
+ const sessionEvents = [];
61
+ const seenUuids = new Set();
62
+
63
+ for (const filePath of sessionFiles) {
64
+ let content;
65
+ try {
66
+ content = readFileSync(filePath, 'utf-8');
67
+ } catch {
68
+ continue;
69
+ }
70
+
71
+ for (const line of content.split('\n')) {
72
+ if (!line.trim()) continue;
73
+ try {
74
+ const obj = JSON.parse(line);
75
+
76
+ const timestamp = obj.timestamp;
77
+ if (!timestamp) continue;
78
+ const ts = new Date(timestamp);
79
+ if (isNaN(ts.getTime())) continue;
80
+
81
+ if (obj.type === 'user' || obj.type === 'assistant') {
82
+ sessionEvents.push({
83
+ sessionId: filePath,
84
+ source: 'qwen-code',
85
+ project: extractProject(obj.cwd, filePath),
86
+ timestamp: ts,
87
+ role: obj.type === 'user' ? 'user' : 'assistant',
88
+ });
89
+ }
90
+
91
+ if (obj.type !== 'assistant') continue;
92
+ const usage = obj.usageMetadata;
93
+ if (!usage) continue;
94
+ if (usage.promptTokenCount == null && usage.candidatesTokenCount == null) continue;
95
+
96
+ const uuid = obj.uuid;
97
+ if (uuid) {
98
+ if (seenUuids.has(uuid)) continue;
99
+ seenUuids.add(uuid);
100
+ }
101
+
102
+ const cached = usage.cachedContentTokenCount || 0;
103
+ const thoughts = usage.thoughtsTokenCount || 0;
104
+
105
+ entries.push({
106
+ source: 'qwen-code',
107
+ model: obj.model || 'unknown',
108
+ project: extractProject(obj.cwd, filePath),
109
+ timestamp: ts,
110
+ inputTokens: (usage.promptTokenCount || 0) - cached,
111
+ outputTokens: (usage.candidatesTokenCount || 0) - thoughts,
112
+ cachedInputTokens: cached,
113
+ reasoningOutputTokens: thoughts,
114
+ });
115
+ } catch {
116
+ continue;
117
+ }
118
+ }
119
+ }
120
+
121
+ return { buckets: aggregateToBuckets(entries), sessions: extractSessions(sessionEvents) };
122
+ }
package/src/sync.js CHANGED
@@ -26,28 +26,45 @@ export async function runSync({ throws = false, quiet = false } = {}) {
26
26
  }
27
27
 
28
28
  const allBuckets = [];
29
+ const allSessions = [];
30
+ const parserResults = [];
29
31
 
30
32
  for (const [source, parse] of Object.entries(parsers)) {
31
33
  try {
32
- const buckets = await parse();
33
- if (buckets.length > 0) {
34
- allBuckets.push(...buckets);
34
+ const result = await parse();
35
+ const buckets = Array.isArray(result) ? result : result.buckets;
36
+ const sessions = Array.isArray(result) ? [] : (result.sessions || []);
37
+ if (buckets.length > 0) allBuckets.push(...buckets);
38
+ if (sessions.length > 0) allSessions.push(...sessions);
39
+ if (buckets.length > 0 || sessions.length > 0) {
40
+ parserResults.push({ source, buckets: buckets.length, sessions: sessions.length });
35
41
  }
36
42
  } catch (err) {
37
43
  process.stderr.write(`warn: ${source} parser failed: ${err.message}\n`);
38
44
  }
39
45
  }
40
46
 
41
- if (allBuckets.length === 0) {
47
+ if (allBuckets.length === 0 && allSessions.length === 0) {
42
48
  if (!quiet) console.log('No new usage data found.');
43
49
  return 0;
44
50
  }
45
51
 
46
- // Tag every bucket with this machine's hostname
52
+ if (!quiet && parserResults.length > 0) {
53
+ for (const p of parserResults) {
54
+ const parts = [];
55
+ if (p.buckets > 0) parts.push(`${p.buckets} buckets`);
56
+ if (p.sessions > 0) parts.push(`${p.sessions} sessions`);
57
+ console.log(` ${p.source}: ${parts.join(', ')}`);
58
+ }
59
+ }
60
+
47
61
  const host = osHostname().replace(/\.local$/, '');
48
62
  for (const b of allBuckets) {
49
63
  b.hostname = host;
50
64
  }
65
+ for (const s of allSessions) {
66
+ s.hostname = host;
67
+ }
51
68
 
52
69
  // Privacy: check if user allows project name upload
53
70
  const apiUrl = config.apiUrl || 'https://vibecafe.ai';
@@ -61,32 +78,57 @@ export async function runSync({ throws = false, quiet = false } = {}) {
61
78
  for (const b of allBuckets) {
62
79
  b.project = 'unknown';
63
80
  }
81
+ for (const s of allSessions) {
82
+ s.project = 'unknown';
83
+ }
64
84
  }
65
85
 
66
86
  let totalIngested = 0;
67
- const totalBatches = Math.ceil(allBuckets.length / BATCH_SIZE);
87
+ let totalSessionsSynced = 0;
88
+ const totalBatches = Math.ceil(Math.max(allBuckets.length, 1) / BATCH_SIZE);
68
89
 
69
- console.log(`Uploading ${allBuckets.length} buckets (${totalBatches} batch${totalBatches > 1 ? 'es' : ''})...`);
90
+ const parts = [];
91
+ if (allBuckets.length > 0) parts.push(`${allBuckets.length} buckets`);
92
+ if (allSessions.length > 0) parts.push(`${allSessions.length} sessions`);
93
+ console.log(`Uploading ${parts.join(' + ')} (${totalBatches} batch${totalBatches > 1 ? 'es' : ''})...`);
70
94
 
71
95
  try {
72
- for (let i = 0; i < allBuckets.length; i += BATCH_SIZE) {
96
+ for (let i = 0; i < Math.max(allBuckets.length, 1); i += BATCH_SIZE) {
73
97
  const batch = allBuckets.slice(i, i + BATCH_SIZE);
74
98
  const batchNum = Math.floor(i / BATCH_SIZE) + 1;
75
99
  const prefix = totalBatches > 1 ? ` [${batchNum}/${totalBatches}] ` : ' ';
100
+ const batchSessions = i === 0 ? allSessions : undefined;
76
101
 
77
102
  const result = await ingest(apiUrl, config.apiKey, batch, {
78
103
  onProgress(sent, total) {
79
104
  const pct = Math.round((sent / total) * 100);
80
105
  process.stdout.write(`\r${prefix}${formatBytes(sent)}/${formatBytes(total)} (${pct}%)\x1b[K`);
81
106
  },
82
- });
107
+ }, batchSessions);
83
108
  totalIngested += result.ingested ?? batch.length;
109
+ totalSessionsSynced += result.sessions ?? 0;
84
110
  }
85
111
 
86
112
  if (totalBatches > 1 || allBuckets.length > 0) {
87
113
  process.stdout.write('\n');
88
114
  }
89
- console.log(`Synced ${totalIngested} buckets.`);
115
+ const syncParts = [`${totalIngested} buckets`];
116
+ if (totalSessionsSynced > 0) syncParts.push(`${totalSessionsSynced} sessions`);
117
+ console.log(`Synced ${syncParts.join(' + ')}.`);
118
+
119
+ if (!quiet && totalSessionsSynced > 0) {
120
+ const totalActive = allSessions.reduce((s, x) => s + x.activeSeconds, 0);
121
+ const totalDuration = allSessions.reduce((s, x) => s + x.durationSeconds, 0);
122
+ const totalMsgs = allSessions.reduce((s, x) => s + x.messageCount, 0);
123
+ const fmtTime = (secs) => {
124
+ if (secs < 60) return `${secs}s`;
125
+ const h = Math.floor(secs / 3600);
126
+ const m = Math.floor((secs % 3600) / 60);
127
+ return h > 0 ? (m > 0 ? `${h}h ${m}m` : `${h}h`) : `${m}m`;
128
+ };
129
+ console.log(` active: ${fmtTime(totalActive)} / total: ${fmtTime(totalDuration)}, ${totalMsgs} messages`);
130
+ }
131
+
90
132
  return totalIngested;
91
133
  } catch (err) {
92
134
  if (err.message === 'UNAUTHORIZED') {
@@ -28,6 +28,16 @@ export const TOOLS = [
28
28
  id: 'openclaw',
29
29
  dataDir: join(homedir(), '.openclaw', 'agents'),
30
30
  },
31
+ {
32
+ name: 'Qwen Code',
33
+ id: 'qwen-code',
34
+ dataDir: join(homedir(), '.qwen', 'tmp'),
35
+ },
36
+ {
37
+ name: 'Kimi Code',
38
+ id: 'kimi-code',
39
+ dataDir: join(homedir(), '.kimi', 'sessions'),
40
+ },
31
41
  ];
32
42
 
33
43
  export function detectInstalledTools() {