tokentracker-cli 0.10.1 → 0.10.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "tokentracker-cli",
|
|
3
|
-
"version": "0.10.
|
|
3
|
+
"version": "0.10.2",
|
|
4
4
|
"description": "Token usage tracker for AI agent CLIs (Claude Code, Codex, Cursor, Gemini, Kiro, OpenCode, OpenClaw, Every Code, Hermes, GitHub Copilot, Kimi Code, CodeBuddy, oh-my-pi, pi, Craft Agents)",
|
|
5
5
|
"main": "src/cli.js",
|
|
6
6
|
"bin": {
|
package/src/commands/sync.js
CHANGED
|
@@ -69,9 +69,21 @@ const CLAUDE_MEM_OBSERVER_REINCLUDE_KEY = "claudeMemObserverReinclude_2026_05_v3
|
|
|
69
69
|
const CLAUDE_MEM_OBSERVER_PATH_SEGMENT = "--claude-mem-observer-sessions";
|
|
70
70
|
// v1 had a cursor-format bug (wrote plain integer instead of {inode, offset,
|
|
71
71
|
// updatedAt}), which made parseClaudeIncremental reread every jsonl from
|
|
72
|
-
// byte 0 on the next sync and double everything. v2
|
|
73
|
-
//
|
|
74
|
-
|
|
72
|
+
// byte 0 on the next sync and double everything. v2 fixed the format.
|
|
73
|
+
// v3 fixes two latent issues caught by adversarial review:
|
|
74
|
+
// (a) v2 wrote `cursors.hourly.groupQueued[claude|<hour>]` for every
|
|
75
|
+
// repaired bucket. enqueueTouchedBuckets uses presence of that key
|
|
76
|
+
// as the legacy-group marker, so any later sync that touched a
|
|
77
|
+
// claude hour (even just a user-message conv-count++) would re-emit
|
|
78
|
+
// the entire hour as one aggregate row under model=DEFAULT_MODEL,
|
|
79
|
+
// causing a different inflation path. v3 leaves groupQueued alone.
|
|
80
|
+
// (b) v2 only repaired the main queue.jsonl. project.queue.jsonl still
|
|
81
|
+
// carried historical claude-mem observer rows (project_key=
|
|
82
|
+
// "claude-mem/observer-sessions") and the project totals on the
|
|
83
|
+
// Project Usage panel stayed inflated. v3 drops every claude /
|
|
84
|
+
// claude-mem row from project.queue.jsonl too, and resets the
|
|
85
|
+
// matching cursors.projectHourly + project.queue.state offset.
|
|
86
|
+
const CLAUDE_GROUND_TRUTH_REPAIR_KEY = "claudeGroundTruthRepair_2026_05_v3";
|
|
75
87
|
|
|
76
88
|
async function cmdSync(argv) {
|
|
77
89
|
const opts = parseArgs(argv);
|
|
@@ -187,7 +199,13 @@ async function cmdSync(argv) {
|
|
|
187
199
|
|
|
188
200
|
const claudeFiles = await listClaudeProjectFiles(claudeProjectsDir);
|
|
189
201
|
await reincludeClaudeMemObserverFiles({ cursors, claudeFiles, queuePath });
|
|
190
|
-
await repairClaudeQueueFromGroundTruth({
|
|
202
|
+
await repairClaudeQueueFromGroundTruth({
|
|
203
|
+
cursors,
|
|
204
|
+
queuePath,
|
|
205
|
+
queueStatePath,
|
|
206
|
+
projectQueuePath,
|
|
207
|
+
projectQueueStatePath,
|
|
208
|
+
});
|
|
191
209
|
let claudeResult = { filesProcessed: 0, eventsAggregated: 0, bucketsQueued: 0 };
|
|
192
210
|
if (claudeFiles.length > 0) {
|
|
193
211
|
if (progress?.enabled) {
|
|
@@ -1216,7 +1234,13 @@ async function migrateRolloutCumulativeDeltaBuckets({ cursors, queuePath, rollou
|
|
|
1216
1234
|
// in sync, and reset the cloud upload offset so the corrected rows actually
|
|
1217
1235
|
// reach the cloud (the ingest endpoint upserts by (source, model,
|
|
1218
1236
|
// hour_start), so re-uploading other sources is idempotent).
|
|
1219
|
-
async function repairClaudeQueueFromGroundTruth({
|
|
1237
|
+
async function repairClaudeQueueFromGroundTruth({
|
|
1238
|
+
cursors,
|
|
1239
|
+
queuePath,
|
|
1240
|
+
queueStatePath = null,
|
|
1241
|
+
projectQueuePath = null,
|
|
1242
|
+
projectQueueStatePath = null,
|
|
1243
|
+
}) {
|
|
1220
1244
|
if (!cursors || typeof cursors !== "object") return false;
|
|
1221
1245
|
const migrations = (cursors.migrations ||= {});
|
|
1222
1246
|
if (migrations[CLAUDE_GROUND_TRUTH_REPAIR_KEY]) return false;
|
|
@@ -1296,12 +1320,22 @@ async function repairClaudeQueueFromGroundTruth({ cursors, queuePath, queueState
|
|
|
1296
1320
|
bucketsCleared += 1;
|
|
1297
1321
|
}
|
|
1298
1322
|
}
|
|
1323
|
+
// Clear stale claude entries from groupQueued (left over by v2 repair).
|
|
1324
|
+
// After v3 we never repopulate it for claude, so nothing should be added
|
|
1325
|
+
// back during the per-model write loop below.
|
|
1299
1326
|
for (const k of Object.keys(hourly.groupQueued)) {
|
|
1300
1327
|
if (k.startsWith("claude|") || k.startsWith("claude-mem|")) {
|
|
1301
1328
|
delete hourly.groupQueued[k];
|
|
1302
1329
|
}
|
|
1303
1330
|
}
|
|
1304
1331
|
|
|
1332
|
+
// Per-model claude buckets: set queuedKey but DO NOT touch
|
|
1333
|
+
// hourly.groupQueued. groupQueued is used by enqueueTouchedBuckets to
|
|
1334
|
+
// mark a (source, hour) as legacy-aggregate state; writing claude hours
|
|
1335
|
+
// there would force every later sync to re-emit the hour as a single
|
|
1336
|
+
// model=DEFAULT_MODEL aggregate row instead of touching only the bucket
|
|
1337
|
+
// that actually changed. The original v2 release did write groupQueued
|
|
1338
|
+
// here and was the cause of an unknown-bucket inflation regression.
|
|
1305
1339
|
for (const r of rows) {
|
|
1306
1340
|
const totals = {
|
|
1307
1341
|
input_tokens: r.input_tokens,
|
|
@@ -1320,7 +1354,6 @@ async function repairClaudeQueueFromGroundTruth({ cursors, queuePath, queueState
|
|
|
1320
1354
|
source: "claude",
|
|
1321
1355
|
hour_start: r.hour_start,
|
|
1322
1356
|
};
|
|
1323
|
-
hourly.groupQueued[groupBucketKey("claude", r.hour_start)] = totalsKey(totals);
|
|
1324
1357
|
}
|
|
1325
1358
|
|
|
1326
1359
|
// 3. Reset per-file cursors so future incremental sync only reads genuinely
|
|
@@ -1361,10 +1394,78 @@ async function repairClaudeQueueFromGroundTruth({ cursors, queuePath, queueState
|
|
|
1361
1394
|
}
|
|
1362
1395
|
uploadState.offset = 0;
|
|
1363
1396
|
uploadState.updatedAt = new Date().toISOString();
|
|
1364
|
-
uploadState.note = "
|
|
1397
|
+
uploadState.note = "reset_after_claude_repair_2026_05_v3";
|
|
1365
1398
|
await fs.writeFile(queueStatePath, JSON.stringify(uploadState));
|
|
1366
1399
|
}
|
|
1367
1400
|
|
|
1401
|
+
// 5. Repair project queue. Historical claude rows in project.queue.jsonl
|
|
1402
|
+
// were uniformly mis-attributed to project_key=
|
|
1403
|
+
// "claude-mem/observer-sessions" (left over from the observer
|
|
1404
|
+
// relabel migration). We can't reconstruct the true cwd-based
|
|
1405
|
+
// project_key for each historical message reliably, so we drop every
|
|
1406
|
+
// claude/claude-mem row from project.queue.jsonl and reset the
|
|
1407
|
+
// matching cursors.projectHourly state. New claude usage will
|
|
1408
|
+
// accumulate to the correct cwd-derived project_key going forward.
|
|
1409
|
+
let projectRowsRemoved = 0;
|
|
1410
|
+
let projectBucketsCleared = 0;
|
|
1411
|
+
if (typeof projectQueuePath === "string" && projectQueuePath) {
|
|
1412
|
+
let projRaw = "";
|
|
1413
|
+
try {
|
|
1414
|
+
projRaw = await fs.readFile(projectQueuePath, "utf8");
|
|
1415
|
+
} catch (e) {
|
|
1416
|
+
if (e?.code !== "ENOENT") throw e;
|
|
1417
|
+
}
|
|
1418
|
+
if (projRaw) {
|
|
1419
|
+
const projKept = [];
|
|
1420
|
+
for (const line of projRaw.split("\n")) {
|
|
1421
|
+
if (!line.trim()) continue;
|
|
1422
|
+
let row;
|
|
1423
|
+
try {
|
|
1424
|
+
row = JSON.parse(line);
|
|
1425
|
+
} catch (_e) {
|
|
1426
|
+
projKept.push(line);
|
|
1427
|
+
continue;
|
|
1428
|
+
}
|
|
1429
|
+
if (row?.source === "claude" || row?.source === "claude-mem") {
|
|
1430
|
+
projectRowsRemoved += 1;
|
|
1431
|
+
continue;
|
|
1432
|
+
}
|
|
1433
|
+
projKept.push(line);
|
|
1434
|
+
}
|
|
1435
|
+
await ensureDir(path.dirname(projectQueuePath));
|
|
1436
|
+
const tmp = `${projectQueuePath}.tmp.${process.pid}.${Date.now()}`;
|
|
1437
|
+
await fs.writeFile(tmp, projKept.join("\n") + "\n", "utf8");
|
|
1438
|
+
await fs.rename(tmp, projectQueuePath);
|
|
1439
|
+
}
|
|
1440
|
+
|
|
1441
|
+
// Clear matching projectHourly state so the claude project buckets
|
|
1442
|
+
// start fresh.
|
|
1443
|
+
const projHourly = (cursors.projectHourly ||= { buckets: {} });
|
|
1444
|
+
projHourly.buckets ||= {};
|
|
1445
|
+
for (const k of Object.keys(projHourly.buckets)) {
|
|
1446
|
+
const v = projHourly.buckets[k];
|
|
1447
|
+
const src = v?.source || "";
|
|
1448
|
+
if (src === "claude" || src === "claude-mem") {
|
|
1449
|
+
delete projHourly.buckets[k];
|
|
1450
|
+
projectBucketsCleared += 1;
|
|
1451
|
+
}
|
|
1452
|
+
}
|
|
1453
|
+
|
|
1454
|
+
// Reset project upload offset.
|
|
1455
|
+
if (typeof projectQueueStatePath === "string" && projectQueueStatePath) {
|
|
1456
|
+
let st = {};
|
|
1457
|
+
try {
|
|
1458
|
+
st = JSON.parse(await fs.readFile(projectQueueStatePath, "utf8"));
|
|
1459
|
+
} catch (_e) {
|
|
1460
|
+
st = {};
|
|
1461
|
+
}
|
|
1462
|
+
st.offset = 0;
|
|
1463
|
+
st.updatedAt = new Date().toISOString();
|
|
1464
|
+
st.note = "reset_after_claude_repair_2026_05_v3";
|
|
1465
|
+
await fs.writeFile(projectQueueStatePath, JSON.stringify(st));
|
|
1466
|
+
}
|
|
1467
|
+
}
|
|
1468
|
+
|
|
1368
1469
|
migrations[CLAUDE_GROUND_TRUTH_REPAIR_KEY] = {
|
|
1369
1470
|
appliedAt: new Date().toISOString(),
|
|
1370
1471
|
bucketsWritten: rows.length,
|
|
@@ -1373,6 +1474,8 @@ async function repairClaudeQueueFromGroundTruth({ cursors, queuePath, queueState
|
|
|
1373
1474
|
filesReset,
|
|
1374
1475
|
hashesRetained: seenHashes.length,
|
|
1375
1476
|
uploadOffsetReset: typeof queueStatePath === "string" && !!queueStatePath,
|
|
1477
|
+
projectRowsRemoved,
|
|
1478
|
+
projectBucketsCleared,
|
|
1376
1479
|
};
|
|
1377
1480
|
return true;
|
|
1378
1481
|
}
|