claude-setup 1.1.4 → 1.1.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/tokens.js CHANGED
@@ -1,19 +1,75 @@
1
1
  /**
2
2
  * Token cost tracking — visibility into what every command costs.
3
3
  *
4
- * Zero extra API calls. Token count is computed from content length.
5
- * Estimates are based on ~4 chars per token approximation.
4
+ * Two data sources:
5
+ * 1. Estimates: computed from content length (~4 chars/token)
6
+ * 2. Real usage: parsed from Claude Code JSONL session transcripts
7
+ * stored at ~/.config/claude/projects/ and ~/.claude/projects/
6
8
  *
7
- * Supports all pricing models:
8
- * - Opus: $15/M input
9
- * - Sonnet: $3/M input
10
- * - Haiku: $0.25/M input
9
+ * Pricing engine inspired by ccusage (github.com/syunmoca/ccusage):
10
+ * - Per-model pricing with tiered rates (200k token threshold)
11
+ * - Cache creation/read tokens tracked separately
12
+ * - Per-session, per-project, per-model breakdowns
13
+ *
14
+ * Current pricing (per million tokens):
15
+ * Opus 4.6: $15 input / $75 output / $18.75 cache-write / $1.50 cache-read
16
+ * Sonnet 4.6: $3 input / $15 output / $3.75 cache-write / $0.30 cache-read
17
+ * Haiku 4.5: $0.80 input / $4 output / $1.00 cache-write / $0.08 cache-read
11
18
  */
12
- // Pricing per million input tokens (current as of 2025)
19
+ import { join } from "path";
20
+ import { existsSync, readFileSync, readdirSync } from "fs";
21
+ import { homedir } from "os";
22
+ const TIERED_THRESHOLD = 200_000;
23
+ const MODEL_PRICING = {
24
+ // Opus 4.6
25
+ "opus": {
26
+ input: 15e-6, output: 75e-6, cacheWrite: 18.75e-6, cacheRead: 1.5e-6,
27
+ inputAbove200k: 30e-6, outputAbove200k: 112.5e-6,
28
+ cacheWriteAbove200k: 37.5e-6, cacheReadAbove200k: 3e-6,
29
+ },
30
+ // Sonnet 4.6
31
+ "sonnet": {
32
+ input: 3e-6, output: 15e-6, cacheWrite: 3.75e-6, cacheRead: 0.3e-6,
33
+ inputAbove200k: 6e-6, outputAbove200k: 22.5e-6,
34
+ cacheWriteAbove200k: 7.5e-6, cacheReadAbove200k: 0.6e-6,
35
+ },
36
+ // Haiku 4.5
37
+ "haiku": {
38
+ input: 0.8e-6, output: 4e-6, cacheWrite: 1e-6, cacheRead: 0.08e-6,
39
+ },
40
+ };
41
+ /** Match a model name string to a pricing tier */
42
+ function matchModelPricing(modelName) {
43
+ const m = modelName.toLowerCase();
44
+ if (m.includes("opus"))
45
+ return MODEL_PRICING["opus"];
46
+ if (m.includes("haiku"))
47
+ return MODEL_PRICING["haiku"];
48
+ return MODEL_PRICING["sonnet"]; // default
49
+ }
50
+ /** Calculate tiered cost (like ccusage's calculateTieredCost) */
51
+ function tieredCost(tokens, baseRate, aboveRate, threshold = TIERED_THRESHOLD) {
52
+ if (tokens <= 0)
53
+ return 0;
54
+ if (tokens > threshold && aboveRate !== undefined) {
55
+ return Math.min(tokens, threshold) * baseRate + Math.max(0, tokens - threshold) * aboveRate;
56
+ }
57
+ return tokens * baseRate;
58
+ }
59
+ /** Calculate real cost for a set of token counts using a specific model */
60
+ export function calculateRealCost(inputTokens, outputTokens, cacheCreate, cacheRead, modelName) {
61
+ const p = matchModelPricing(modelName);
62
+ return (tieredCost(inputTokens, p.input, p.inputAbove200k) +
63
+ tieredCost(outputTokens, p.output, p.outputAbove200k) +
64
+ tieredCost(cacheCreate, p.cacheWrite, p.cacheWriteAbove200k) +
65
+ tieredCost(cacheRead, p.cacheRead, p.cacheReadAbove200k));
66
+ }
67
+ // ── Legacy estimation (for command file size predictions) ────────────
68
+ // Pricing per million input tokens (for quick estimates)
13
69
  const PRICING_PER_M_INPUT = {
14
70
  opus: 15.0,
15
71
  sonnet: 3.0,
16
- haiku: 0.25,
72
+ haiku: 0.80,
17
73
  };
18
74
  export function estimateTokens(content) {
19
75
  return Math.ceil(content.length / 4);
@@ -25,8 +81,19 @@ export function estimateCost(tokens) {
25
81
  haiku: (tokens / 1_000_000) * PRICING_PER_M_INPUT.haiku,
26
82
  };
27
83
  }
84
+ function fmtVal(v) {
85
+ if (v === 0)
86
+ return "$0.000000";
87
+ if (v < 0.000001)
88
+ return `$${v.toFixed(8)}`;
89
+ if (v < 0.0001)
90
+ return `$${v.toFixed(6)}`;
91
+ if (v < 0.01)
92
+ return `$${v.toFixed(5)}`;
93
+ return `$${v.toFixed(4)}`;
94
+ }
28
95
  export function formatCost(cost) {
29
- return `Opus $${cost.opus.toFixed(4)} | Sonnet $${cost.sonnet.toFixed(4)} | Haiku $${cost.haiku.toFixed(4)}`;
96
+ return `Opus ${fmtVal(cost.opus)} | Sonnet ${fmtVal(cost.sonnet)} | Haiku ${fmtVal(cost.haiku)}`;
30
97
  }
31
98
  /**
32
99
  * Build a detailed token estimate with per-section breakdown.
@@ -104,6 +171,472 @@ export function generateHints(runs, currentTokens, budget) {
104
171
  }
105
172
  return hints;
106
173
  }
174
+ export function readRealTokenUsage(cwd) {
175
+ const p = join(cwd, ".claude", "token-usage.json");
176
+ if (!existsSync(p))
177
+ return [];
178
+ try {
179
+ return JSON.parse(readFileSync(p, "utf8"));
180
+ }
181
+ catch {
182
+ return [];
183
+ }
184
+ }
185
+ /**
186
+ * Find Claude data directories — works on every OS.
187
+ * Checks (in order):
188
+ * 1. CLAUDE_CONFIG_DIR env var (comma-separated, custom override)
189
+ * 2. XDG_CONFIG_HOME/claude (Linux/macOS new default)
190
+ * 3. ~/.config/claude (Linux/macOS fallback)
191
+ * 4. ~/Library/Application Support/claude (macOS alternate)
192
+ * 5. %APPDATA%/claude (Windows alternate)
193
+ * 6. ~/.claude (old default, all platforms)
194
+ */
195
+ function getClaudeDataDirs() {
196
+ const dirs = [];
197
+ const seen = new Set();
198
+ const home = homedir();
199
+ function tryAdd(dir) {
200
+ const resolved = join(dir); // normalize
201
+ if (seen.has(resolved))
202
+ return;
203
+ seen.add(resolved);
204
+ if (existsSync(join(resolved, "projects")))
205
+ dirs.push(resolved);
206
+ }
207
+ // 1. Custom env var
208
+ const envDirs = process.env.CLAUDE_CONFIG_DIR;
209
+ if (envDirs) {
210
+ for (const d of envDirs.split(",").map(s => s.trim()).filter(Boolean)) {
211
+ tryAdd(d);
212
+ }
213
+ }
214
+ // 2. XDG config
215
+ const xdgConfig = process.env.XDG_CONFIG_HOME ?? join(home, ".config");
216
+ tryAdd(join(xdgConfig, "claude"));
217
+ // 3. macOS Application Support
218
+ tryAdd(join(home, "Library", "Application Support", "claude"));
219
+ // 4. Windows APPDATA
220
+ if (process.env.APPDATA)
221
+ tryAdd(join(process.env.APPDATA, "claude"));
222
+ // 5. Old default
223
+ tryAdd(join(home, ".claude"));
224
+ return dirs;
225
+ }
226
+ /** Parse a single JSONL line into usage data */
227
+ function parseJsonlLine(line) {
228
+ try {
229
+ const obj = JSON.parse(line);
230
+ const msg = obj?.message;
231
+ if (!msg?.usage)
232
+ return null;
233
+ const u = msg.usage;
234
+ // Skip synthetic/zero-usage entries (e.g. <synthetic> model with all-zero counts)
235
+ if ((u.input_tokens ?? 0) === 0 && (u.output_tokens ?? 0) === 0 &&
236
+ (u.cache_creation_input_tokens ?? 0) === 0 && (u.cache_read_input_tokens ?? 0) === 0)
237
+ return null;
238
+ return {
239
+ model: msg.model ?? "unknown",
240
+ inputTokens: u.input_tokens ?? 0,
241
+ outputTokens: u.output_tokens ?? 0,
242
+ cacheCreate: u.cache_creation_input_tokens ?? 0,
243
+ cacheRead: u.cache_read_input_tokens ?? 0,
244
+ costUSD: obj.costUSD,
245
+ timestamp: obj.timestamp,
246
+ messageId: msg.id,
247
+ requestId: obj.requestId,
248
+ };
249
+ }
250
+ catch {
251
+ return null;
252
+ }
253
+ }
254
+ /** Extract a human-readable project name from a CWD path */
255
+ function extractProjectName(cwd) {
256
+ const parts = cwd.replace(/[\\/]/g, "/").split("/").filter(Boolean);
257
+ return parts[parts.length - 1] ?? cwd;
258
+ }
259
+ /** Aggregate entries by model (like ccusage's aggregateByModel) */
260
+ function aggregateByModel(entries) {
261
+ const agg = new Map();
262
+ for (const e of entries) {
263
+ const existing = agg.get(e.model) ?? { inputTokens: 0, outputTokens: 0, cacheCreateTokens: 0, cacheReadTokens: 0, cost: 0 };
264
+ existing.inputTokens += e.inputTokens;
265
+ existing.outputTokens += e.outputTokens;
266
+ existing.cacheCreateTokens += e.cacheCreate;
267
+ existing.cacheReadTokens += e.cacheRead;
268
+ existing.cost += e.cost;
269
+ agg.set(e.model, existing);
270
+ }
271
+ return [...agg.entries()].map(([model, stats]) => ({
272
+ model,
273
+ ...stats,
274
+ totalTokens: stats.inputTokens + stats.outputTokens + stats.cacheCreateTokens + stats.cacheReadTokens,
275
+ }));
276
+ }
277
+ /**
278
+ * Read all JSONL session files for a given project directory.
279
+ * Scans Claude's data directories for matching project paths.
280
+ * Returns per-session summaries with per-model breakdowns.
281
+ */
282
+ export function readProjectSessions(projectCwd) {
283
+ const claudeDirs = getClaudeDataDirs();
284
+ if (claudeDirs.length === 0)
285
+ return [];
286
+ const sessions = [];
287
+ const seen = new Set(); // dedup by messageId:requestId
288
+ for (const claudeDir of claudeDirs) {
289
+ const projectsDir = join(claudeDir, "projects");
290
+ if (!existsSync(projectsDir))
291
+ continue;
292
+ // Encode the CWD the way Claude Code does, then exact-match against project dirs.
293
+ // This is the only reliable cross-platform approach — decoding is lossy when
294
+ // folder names contain hyphens (e.g. "Claude-code-documentation").
295
+ // Windows: C:\Users\ok\Desktop\my-app → C--Users-ok-Desktop-my-app
296
+ // Unix: /Users/ok/dev/my-app → -Users-ok-dev-my-app
297
+ const encodedCwd = projectCwd
298
+ .replace(/\\/g, "/") // normalize backslashes to forward slashes
299
+ .replace(/:\//g, "--") // drive letter: C:/ → C--
300
+ .replace(/\//g, "-"); // remaining slashes → dashes
301
+ let targetDir = null;
302
+ try {
303
+ for (const entry of readdirSync(projectsDir)) {
304
+ // Case-insensitive compare handles Windows where CWDs may differ in case
305
+ if (entry.toLowerCase() === encodedCwd.toLowerCase()) {
306
+ targetDir = join(projectsDir, entry);
307
+ break;
308
+ }
309
+ }
310
+ }
311
+ catch {
312
+ continue;
313
+ }
314
+ if (!targetDir || !existsSync(targetDir))
315
+ continue;
316
+ // Read all .jsonl files in this project dir
317
+ try {
318
+ const files = readdirSync(targetDir).filter(f => f.endsWith(".jsonl"));
319
+ for (const file of files) {
320
+ const filePath = join(targetDir, file);
321
+ const sessionId = file.replace(".jsonl", "");
322
+ let content;
323
+ try {
324
+ content = readFileSync(filePath, "utf8");
325
+ }
326
+ catch {
327
+ continue;
328
+ }
329
+ const entries = [];
330
+ let latestTimestamp = "";
331
+ // Helper to process lines from any JSONL source into entries
332
+ const processLines = (text) => {
333
+ for (const line of text.split("\n")) {
334
+ if (!line.trim())
335
+ continue;
336
+ const parsed = parseJsonlLine(line);
337
+ if (!parsed)
338
+ continue;
339
+ // Dedup by messageId:requestId
340
+ if (parsed.messageId && parsed.requestId) {
341
+ const key = `${parsed.messageId}:${parsed.requestId}`;
342
+ if (seen.has(key))
343
+ continue;
344
+ seen.add(key);
345
+ }
346
+ const cost = parsed.costUSD ?? calculateRealCost(parsed.inputTokens, parsed.outputTokens, parsed.cacheCreate, parsed.cacheRead, parsed.model);
347
+ entries.push({
348
+ model: parsed.model,
349
+ inputTokens: parsed.inputTokens,
350
+ outputTokens: parsed.outputTokens,
351
+ cacheCreate: parsed.cacheCreate,
352
+ cacheRead: parsed.cacheRead,
353
+ cost,
354
+ });
355
+ if (parsed.timestamp && parsed.timestamp > latestTimestamp) {
356
+ latestTimestamp = parsed.timestamp;
357
+ }
358
+ }
359
+ };
360
+ // Read main session JSONL
361
+ processLines(content);
362
+ // Also read subagent JSONL files (stored in <sessionId>/subagents/*.jsonl)
363
+ // These track token usage from Agent tool calls (subagents use separate API sessions)
364
+ const subagentDir = join(targetDir, sessionId, "subagents");
365
+ if (existsSync(subagentDir)) {
366
+ try {
367
+ const subFiles = readdirSync(subagentDir).filter(f => f.endsWith(".jsonl"));
368
+ for (const sf of subFiles) {
369
+ try {
370
+ const subContent = readFileSync(join(subagentDir, sf), "utf8");
371
+ processLines(subContent);
372
+ }
373
+ catch { /* skip unreadable subagent file */ }
374
+ }
375
+ }
376
+ catch { /* skip if subagents dir unreadable */ }
377
+ }
378
+ if (entries.length === 0)
379
+ continue;
380
+ const models = aggregateByModel(entries);
381
+ const totals = entries.reduce((acc, e) => ({
382
+ inputTokens: acc.inputTokens + e.inputTokens,
383
+ outputTokens: acc.outputTokens + e.outputTokens,
384
+ cacheCreate: acc.cacheCreate + e.cacheCreate,
385
+ cacheRead: acc.cacheRead + e.cacheRead,
386
+ cost: acc.cost + e.cost,
387
+ }), { inputTokens: 0, outputTokens: 0, cacheCreate: 0, cacheRead: 0, cost: 0 });
388
+ sessions.push({
389
+ sessionId,
390
+ project: extractProjectName(projectCwd),
391
+ timestamp: latestTimestamp,
392
+ models,
393
+ inputTokens: totals.inputTokens,
394
+ outputTokens: totals.outputTokens,
395
+ cacheCreateTokens: totals.cacheCreate,
396
+ cacheReadTokens: totals.cacheRead,
397
+ totalTokens: totals.inputTokens + totals.outputTokens + totals.cacheCreate + totals.cacheRead,
398
+ totalCost: totals.cost,
399
+ });
400
+ }
401
+ }
402
+ catch { /* skip */ }
403
+ }
404
+ // Sort by timestamp descending
405
+ sessions.sort((a, b) => b.timestamp.localeCompare(a.timestamp));
406
+ return sessions;
407
+ }
408
+ /**
409
+ * Aggregate all sessions for a project into a single summary.
410
+ */
411
+ export function getProjectUsageSummary(projectCwd) {
412
+ const sessions = readProjectSessions(projectCwd);
413
+ if (sessions.length === 0)
414
+ return null;
415
+ // Merge all model breakdowns across sessions
416
+ const allEntries = [];
417
+ for (const s of sessions) {
418
+ for (const m of s.models) {
419
+ allEntries.push({
420
+ model: m.model,
421
+ inputTokens: m.inputTokens,
422
+ outputTokens: m.outputTokens,
423
+ cacheCreate: m.cacheCreateTokens,
424
+ cacheRead: m.cacheReadTokens,
425
+ cost: m.cost,
426
+ });
427
+ }
428
+ }
429
+ const models = aggregateByModel(allEntries);
430
+ const totals = sessions.reduce((acc, s) => ({
431
+ inputTokens: acc.inputTokens + s.inputTokens,
432
+ outputTokens: acc.outputTokens + s.outputTokens,
433
+ cacheCreate: acc.cacheCreate + s.cacheCreateTokens,
434
+ cacheRead: acc.cacheRead + s.cacheReadTokens,
435
+ cost: acc.cost + s.totalCost,
436
+ }), { inputTokens: 0, outputTokens: 0, cacheCreate: 0, cacheRead: 0, cost: 0 });
437
+ return {
438
+ project: sessions[0].project,
439
+ sessions: sessions.length,
440
+ models,
441
+ inputTokens: totals.inputTokens,
442
+ outputTokens: totals.outputTokens,
443
+ cacheCreateTokens: totals.cacheCreate,
444
+ cacheReadTokens: totals.cacheRead,
445
+ totalTokens: totals.inputTokens + totals.outputTokens + totals.cacheCreate + totals.cacheRead,
446
+ totalCost: totals.cost,
447
+ };
448
+ }
449
+ export function getTokenHookScript() {
450
+ return `#!/usr/bin/env node
451
+ 'use strict';
452
+ const fs = require('fs');
453
+ const path = require('path');
454
+
455
+ // Tiered pricing: tokens above 200k threshold are charged at a higher rate
456
+ const THRESHOLD = 200000;
457
+ function tieredCost(tokens, baseRate, aboveRate) {
458
+ if (tokens <= 0) return 0;
459
+ if (tokens > THRESHOLD && aboveRate) {
460
+ return Math.min(tokens, THRESHOLD) * baseRate + Math.max(0, tokens - THRESHOLD) * aboveRate;
461
+ }
462
+ return tokens * baseRate;
463
+ }
464
+
465
+ // Model pricing per token (ccusage-style, supports tiered pricing)
466
+ const PRICING = {
467
+ opus: { input: 15e-6, output: 75e-6, cacheWrite: 18.75e-6, cacheRead: 1.5e-6,
468
+ inputAbove200k: 30e-6, outputAbove200k: 112.5e-6, cacheWriteAbove200k: 37.5e-6, cacheReadAbove200k: 3e-6 },
469
+ sonnet: { input: 3e-6, output: 15e-6, cacheWrite: 3.75e-6, cacheRead: 0.3e-6,
470
+ inputAbove200k: 6e-6, outputAbove200k: 22.5e-6, cacheWriteAbove200k: 7.5e-6, cacheReadAbove200k: 0.6e-6 },
471
+ haiku: { input: 0.8e-6, output: 4e-6, cacheWrite: 1e-6, cacheRead: 0.08e-6 },
472
+ };
473
+
474
+ function getPricing(modelName) {
475
+ const m = modelName.toLowerCase();
476
+ if (m.includes('opus')) return PRICING.opus;
477
+ if (m.includes('haiku')) return PRICING.haiku;
478
+ return PRICING.sonnet;
479
+ }
480
+
481
+ let input = '';
482
+ process.stdin.on('data', (d) => { input += d; });
483
+ process.stdin.on('end', () => {
484
+ try {
485
+ const event = JSON.parse(input);
486
+ const transcriptPath = event.transcript_path;
487
+ const sessionId = event.session_id || 'unknown';
488
+
489
+ if (!transcriptPath || !fs.existsSync(transcriptPath)) process.exit(0);
490
+
491
+ // Per-model aggregation (like ccusage's aggregateByModel)
492
+ const models = {};
493
+ const seen = new Set();
494
+
495
+ function processLines(text) {
496
+ for (const line of text.split('\\n')) {
497
+ if (!line.trim()) continue;
498
+ try {
499
+ const obj = JSON.parse(line);
500
+ const msg = obj.message;
501
+ if (!msg || !msg.usage) continue;
502
+ const u = msg.usage;
503
+ // Skip zero-usage entries (e.g. <synthetic>)
504
+ if (!u.input_tokens && !u.output_tokens && !u.cache_creation_input_tokens && !u.cache_read_input_tokens) continue;
505
+ // Dedup by messageId:requestId
506
+ const dedup = (msg.id || '') + ':' + (obj.requestId || '');
507
+ if (dedup !== ':' && seen.has(dedup)) continue;
508
+ if (dedup !== ':') seen.add(dedup);
509
+ const model = msg.model || 'unknown';
510
+ if (!models[model]) models[model] = { inputTokens: 0, outputTokens: 0, cacheCreate: 0, cacheRead: 0 };
511
+ models[model].inputTokens += u.input_tokens || 0;
512
+ models[model].outputTokens += u.output_tokens || 0;
513
+ models[model].cacheCreate += u.cache_creation_input_tokens || 0;
514
+ models[model].cacheRead += u.cache_read_input_tokens || 0;
515
+ } catch {}
516
+ }
517
+ }
518
+
519
+ // Read main session transcript
520
+ processLines(fs.readFileSync(transcriptPath, 'utf8'));
521
+
522
+ // Also read subagent JSONL files — subagents use separate API sessions
523
+ // stored at <transcriptPath without .jsonl>/subagents/*.jsonl
524
+ const sessionDir = transcriptPath.replace(/\\.jsonl$/, '');
525
+ const subagentDir = path.join(sessionDir, 'subagents');
526
+ if (fs.existsSync(subagentDir)) {
527
+ try {
528
+ const subFiles = fs.readdirSync(subagentDir).filter(f => f.endsWith('.jsonl'));
529
+ for (const sf of subFiles) {
530
+ try { processLines(fs.readFileSync(path.join(subagentDir, sf), 'utf8')); } catch {}
531
+ }
532
+ } catch {}
533
+ }
534
+
535
+ // Calculate cost per model with tiered pricing
536
+ let totalCost = 0;
537
+ let totalInput = 0, totalOutput = 0, totalCacheCreate = 0, totalCacheRead = 0;
538
+ const modelBreakdowns = [];
539
+ let primaryModel = 'unknown';
540
+ let maxTokens = 0;
541
+
542
+ for (const [model, t] of Object.entries(models)) {
543
+ const p = getPricing(model);
544
+ const cost = tieredCost(t.inputTokens, p.input, p.inputAbove200k)
545
+ + tieredCost(t.outputTokens, p.output, p.outputAbove200k)
546
+ + tieredCost(t.cacheCreate, p.cacheWrite, p.cacheWriteAbove200k)
547
+ + tieredCost(t.cacheRead, p.cacheRead, p.cacheReadAbove200k);
548
+ totalCost += cost;
549
+ totalInput += t.inputTokens;
550
+ totalOutput += t.outputTokens;
551
+ totalCacheCreate += t.cacheCreate;
552
+ totalCacheRead += t.cacheRead;
553
+ const total = t.inputTokens + t.outputTokens + t.cacheCreate + t.cacheRead;
554
+ if (total > maxTokens) { maxTokens = total; primaryModel = model; }
555
+ modelBreakdowns.push({ model, ...t, cost, totalTokens: total });
556
+ }
557
+
558
+ const record = {
559
+ sessionId,
560
+ timestamp: new Date().toISOString(),
561
+ model: primaryModel,
562
+ inputTokens: totalInput,
563
+ outputTokens: totalOutput,
564
+ cacheCreate: totalCacheCreate,
565
+ cacheRead: totalCacheRead,
566
+ cost: totalCost,
567
+ modelBreakdowns
568
+ };
569
+
570
+ const usageFile = path.join(process.cwd(), '.claude', 'token-usage.json');
571
+ let records = [];
572
+ try { records = JSON.parse(fs.readFileSync(usageFile, 'utf8')); } catch {}
573
+ const idx = records.findIndex(r => r.sessionId === sessionId);
574
+ if (idx >= 0) { records[idx] = record; } else { records.push(record); }
575
+ if (records.length > 100) records = records.slice(-100);
576
+ const dir = path.dirname(usageFile);
577
+ if (!fs.existsSync(dir)) fs.mkdirSync(dir, { recursive: true });
578
+ fs.writeFileSync(usageFile, JSON.stringify(records, null, 2));
579
+ } catch { process.exit(0); }
580
+ });
581
+ `;
582
+ }
583
+ /**
584
+ * Format a real-cost summary. Prefers JSONL transcript data (ccusage-style),
585
+ * falls back to Stop hook data if no JSONL sessions found.
586
+ * Returns null if no real data is available from either source.
587
+ */
588
+ export function formatRealCostSummary(cwd) {
589
+ // Try JSONL transcripts first (most accurate — per-model, cache-aware)
590
+ const projectSummary = getProjectUsageSummary(cwd);
591
+ if (projectSummary && projectSummary.totalTokens > 0) {
592
+ const lines = [];
593
+ lines.push(` Real usage (${projectSummary.sessions} session${projectSummary.sessions > 1 ? "s" : ""}, from JSONL transcripts):`);
594
+ lines.push(` Total cost : $${projectSummary.totalCost.toFixed(6)}`);
595
+ lines.push(` Input tokens : ${projectSummary.inputTokens.toLocaleString()}`);
596
+ lines.push(` Output tokens: ${projectSummary.outputTokens.toLocaleString()}`);
597
+ if (projectSummary.cacheCreateTokens > 0 || projectSummary.cacheReadTokens > 0) {
598
+ lines.push(` Cache write : ${projectSummary.cacheCreateTokens.toLocaleString()}`);
599
+ lines.push(` Cache read : ${projectSummary.cacheReadTokens.toLocaleString()}`);
600
+ }
601
+ if (projectSummary.models.length > 0) {
602
+ lines.push(` Per model:`);
603
+ for (const m of projectSummary.models.sort((a, b) => b.cost - a.cost)) {
604
+ const shortName = m.model.replace(/^claude-/, "").replace(/-\d{8}$/, "");
605
+ lines.push(` ${shortName.padEnd(14)} ${m.totalTokens.toLocaleString().padStart(12)} tokens $${m.cost.toFixed(6)}`);
606
+ }
607
+ }
608
+ return lines.join("\n");
609
+ }
610
+ // Fallback: Stop hook data
611
+ const records = readRealTokenUsage(cwd);
612
+ if (records.length === 0)
613
+ return null;
614
+ let totalCost = 0;
615
+ let totalInput = 0;
616
+ let totalOutput = 0;
617
+ let totalCacheCreate = 0;
618
+ let totalCacheRead = 0;
619
+ for (const r of records) {
620
+ totalCost += r.cost;
621
+ totalInput += r.inputTokens;
622
+ totalOutput += r.outputTokens;
623
+ totalCacheCreate += r.cacheCreate;
624
+ totalCacheRead += r.cacheRead;
625
+ }
626
+ const last = records[records.length - 1];
627
+ const lastDate = new Date(last.timestamp).toLocaleString();
628
+ const lines = [];
629
+ lines.push(` Real usage (${records.length} session${records.length > 1 ? "s" : ""} tracked):`);
630
+ lines.push(` Total cost : $${totalCost.toFixed(6)}`);
631
+ lines.push(` Input tokens : ${totalInput.toLocaleString()}`);
632
+ lines.push(` Output tokens: ${totalOutput.toLocaleString()}`);
633
+ if (totalCacheCreate > 0 || totalCacheRead > 0) {
634
+ lines.push(` Cache write : ${totalCacheCreate.toLocaleString()}`);
635
+ lines.push(` Cache read : ${totalCacheRead.toLocaleString()}`);
636
+ }
637
+ lines.push(` Last session : ${lastDate} (${last.model})`);
638
+ return lines.join("\n");
639
+ }
107
640
  /**
108
641
  * Compute cumulative stats for status dashboard.
109
642
  */
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "claude-setup",
3
- "version": "1.1.4",
3
+ "version": "1.1.5",
4
4
  "description": "Setup layer for Claude Code — reads your project, writes command files, Claude Code does the rest",
5
5
  "type": "module",
6
6
  "bin": {
@@ -3,22 +3,64 @@
3
3
 
4
4
  This is a new project with no source files yet.
5
5
 
6
- Before setting up anything, ask the developer these three questions in ONE message:
6
+ ## Step 1 Idea discovery
7
7
 
8
- 1. What are you building? (describe the product in one sentence not the tech)
9
- 2. Do you have a preferred language or runtime? (completely fine if not)
10
- 3. Are there external services you know you'll use? (fine to say "not sure yet")
8
+ Start a conversation with the developer. Ask ONE focused question to understand what they're building.
9
+ Keep it short and conversational. Offer at most 3 answer choices where helpful.
11
10
 
12
- Wait for their reply. Then immediately set up the full Claude Code environment.
11
+ **Ask this first:**
12
+
13
+ > What are you building? (Pick the closest match or describe in a few words)
14
+ > 1. A web app or SaaS product
15
+ > 2. A CLI tool or developer utility
16
+ > 3. Something else — tell me in one sentence
17
+
18
+ Wait for their answer. Then ask ONE follow-up question if needed to clarify the core purpose.
19
+ Ask a maximum of 3 questions total in this step. Stop after you understand the product.
20
+
21
+ ---
22
+
23
+ ## Step 2 — Tech stack
24
+
25
+ Once you understand the idea, ask:
26
+
27
+ > Do you already have a tech stack in mind?
28
+ > - **Yes** → ask them to describe it briefly, then suggest complementary tools
29
+ > - **No** → based on their idea, suggest the most appropriate stack:
30
+ > - Web app → Next.js/React + TypeScript (or Python/FastAPI if data-heavy)
31
+ > - CLI tool → Node.js/TypeScript or Go
32
+ > - API service → Node.js/Express or Python/FastAPI
33
+ > - Mobile → React Native or Flutter
34
+ > Present 2-3 options max and let them choose.
35
+
36
+ Confirm their final stack choice before moving on.
37
+
38
+ ---
39
+
40
+ ## Step 3 — Services
41
+
42
+ Based on the stack and idea, ask:
43
+
44
+ > Which of these services will you need? (select any that apply)
45
+
46
+ Suggest only services relevant to their specific idea:
47
+ - **Database** — if they need persistent data (PostgreSQL, MongoDB, Redis)
48
+ - **Auth** — if they need user accounts
49
+ - **Payments** — if they're monetizing (Stripe)
50
+ - **Storage** — if they handle files/media
51
+ - **Email/notifications** — if they need to reach users
52
+ - **External API** — if they named a specific third-party service
53
+
54
+ Let them specify their own services, then suggest 1-2 additional ones that complement their stack.
13
55
 
14
56
  ---
15
57
 
16
- ## After they reply
58
+ ## After collecting all answers
17
59
 
18
60
  Use exactly what they said. Do not invent requirements they did not mention.
19
61
 
20
62
  **CLAUDE.md**
21
- Write it for the product they described. Reference their exact words.
63
+ Write it for the product they described. Reference their exact words and chosen stack.
22
64
  If they gave no language: write a language-agnostic CLAUDE.md about the product itself.
23
65
 
24
66
  **.mcp.json**
@@ -47,8 +47,12 @@ Before removing anything, scan these locations directly:
47
47
  - Remove the entire matcher entry if removing all hooks for that matcher
48
48
 
49
49
  ### Skills
50
- - Delete the skill directory: `.claude/skills/<name>/` (entire directory including SKILL.md)
51
- - Or delete the flat skill file: `.claude/skills/<name>.md`
50
+ - Delete the skill directory using the appropriate shell command:
51
+ - macOS/Linux: run `rm -rf .claude/skills/<name>/`
52
+ - Windows: run `rmdir /s /q ".claude\skills\<name>"`
53
+ - Verify deletion succeeded: confirm `.claude/skills/<name>/` no longer exists on disk
54
+ - Remove the skill reference from CLAUDE.md
55
+ - Remove from SKILLS_LIST in CLAUDE.md if referenced there
52
56
 
53
57
  ### Plugins
54
58
  - Suggest: `/plugin uninstall <name>@<marketplace>`