cchubber 0.1.0 → 0.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "cchubber",
3
- "version": "0.1.0",
3
+ "version": "0.2.0",
4
4
  "description": "What you spent. Why you spent it. Is that normal. — Claude Code usage diagnosis with beautiful HTML reports.",
5
5
  "type": "module",
6
6
  "bin": {
@@ -0,0 +1,72 @@
1
+ /**
2
+ * Model Routing Analysis
3
+ * Detects model usage patterns and estimates savings from better routing.
4
+ */
5
+ export function analyzeModelRouting(costAnalysis, jsonlEntries) {
6
+ const modelCosts = costAnalysis.modelCosts || {};
7
+ const totalCost = Object.values(modelCosts).reduce((s, c) => s + c, 0);
8
+
9
+ if (totalCost < 0.01) return { available: false };
10
+
11
+ // Classify models into tiers
12
+ const tiers = { opus: 0, sonnet: 0, haiku: 0, other: 0 };
13
+ const tierCosts = { opus: 0, sonnet: 0, haiku: 0, other: 0 };
14
+
15
+ for (const [name, cost] of Object.entries(modelCosts)) {
16
+ const lower = name.toLowerCase();
17
+ if (lower.includes('opus')) { tiers.opus++; tierCosts.opus += cost; }
18
+ else if (lower.includes('sonnet')) { tiers.sonnet++; tierCosts.sonnet += cost; }
19
+ else if (lower.includes('haiku')) { tiers.haiku++; tierCosts.haiku += cost; }
20
+ else { tiers.other++; tierCosts.other += cost; }
21
+ }
22
+
23
+ const opusPct = totalCost > 0 ? Math.round((tierCosts.opus / totalCost) * 100) : 0;
24
+ const sonnetPct = totalCost > 0 ? Math.round((tierCosts.sonnet / totalCost) * 100) : 0;
25
+ const haikuPct = totalCost > 0 ? Math.round((tierCosts.haiku / totalCost) * 100) : 0;
26
+
27
+ // Estimate savings: assume 40% of Opus work could be done by Sonnet at 60% cost
28
+ // Conservative estimate — Sonnet handles file reads, simple edits, search well
29
+ const opusCost = tierCosts.opus;
30
+ const routableToSonnet = opusCost * 0.4; // 40% of Opus work is routable
31
+ const sonnetEquivalentCost = routableToSonnet * 0.6; // Sonnet is ~60% of Opus cost
32
+ const estimatedSavings = routableToSonnet - sonnetEquivalentCost;
33
+
34
+ // Detect subagent usage from JSONL (subagent messages often use different models)
35
+ let subagentMessages = 0;
36
+ let mainMessages = 0;
37
+ if (jsonlEntries && jsonlEntries.length > 0) {
38
+ for (const entry of jsonlEntries) {
39
+ const model = (entry.model || '').toLowerCase();
40
+ // Subagents typically use sonnet/haiku, main thread uses opus
41
+ if (model.includes('sonnet') || model.includes('haiku')) {
42
+ subagentMessages++;
43
+ } else {
44
+ mainMessages++;
45
+ }
46
+ }
47
+ }
48
+
49
+ const subagentPct = (subagentMessages + mainMessages) > 0
50
+ ? Math.round((subagentMessages / (subagentMessages + mainMessages)) * 100)
51
+ : 0;
52
+
53
+ // Model diversity score (0-100): higher = better routing
54
+ const modelCount = Object.keys(modelCosts).length;
55
+ let diversityScore = 0;
56
+ if (modelCount >= 3 && opusPct < 80) diversityScore = 90;
57
+ else if (modelCount >= 2 && opusPct < 90) diversityScore = 60;
58
+ else if (opusPct > 95) diversityScore = 20;
59
+ else diversityScore = 40;
60
+
61
+ return {
62
+ available: true,
63
+ opusPct,
64
+ sonnetPct,
65
+ haikuPct,
66
+ estimatedSavings: Math.round(estimatedSavings),
67
+ subagentPct,
68
+ diversityScore,
69
+ tierCosts,
70
+ totalCost,
71
+ };
72
+ }
@@ -1,128 +1,141 @@
1
- export function generateRecommendations(costAnalysis, cacheHealth, claudeMdStack, anomalies, inflection) {
1
+ /**
2
+ * Recommendations Engine
3
+ * Generates actionable recommendations informed by community data (March 2026 crisis).
4
+ * Every recommendation maps to a real pattern reported by users on GitHub/Twitter/Reddit.
5
+ */
6
+ export function generateRecommendations(costAnalysis, cacheHealth, claudeMdStack, anomalies, inflection, sessionIntel, modelRouting) {
2
7
  const recs = [];
3
8
 
4
- // 0. Inflection point — most important signal, goes first
9
+ // 0. Inflection point — most important signal
5
10
  if (inflection && inflection.direction === 'worsened' && inflection.multiplier >= 2) {
6
11
  recs.push({
7
12
  severity: 'critical',
8
- title: `Efficiency dropped ${inflection.multiplier}x on ${inflection.date}`,
13
+ title: `Cache efficiency dropped ${inflection.multiplier}x on ${inflection.date}`,
9
14
  detail: inflection.summary,
10
- action: 'This date likely correlates with a Claude Code update or cache regression. Check your CC version history. v2.1.89 had a known cache bug v2.1.90 includes a fix.',
15
+ action: 'Run: claude update. Versions 2.1.69-2.1.89 had a cache sentinel bug that dropped read rates from 95% to 4-17%. Fixed in v2.1.90.',
11
16
  });
12
17
  } else if (inflection && inflection.direction === 'improved' && inflection.multiplier >= 2) {
13
18
  recs.push({
14
19
  severity: 'positive',
15
20
  title: `Efficiency improved ${inflection.multiplier}x on ${inflection.date}`,
16
21
  detail: inflection.summary,
17
- action: 'Something changed for the better on this date. Likely a version update or workflow change.',
22
+ action: 'Your cache efficiency improved here. Likely a version update or workflow change that stuck.',
18
23
  });
19
24
  }
20
25
 
21
- // 1. CLAUDE.md size
26
+ // 1. CLAUDE.md bloat — community-reported 10-20x cost multiplier
22
27
  if (claudeMdStack.totalTokensEstimate > 8000) {
28
+ const dailyCost = claudeMdStack.costPerMessage?.dailyCached200;
23
29
  recs.push({
24
- severity: 'warning',
25
- title: 'Large CLAUDE.md stack',
26
- detail: `Your CLAUDE.md files total ~${claudeMdStack.totalTokensEstimate.toLocaleString()} tokens (${(claudeMdStack.totalBytes / 1024).toFixed(1)} KB). This is re-read on every message. At 200 messages/day, this costs ~$${claudeMdStack.costPerMessage.dailyCached200.toFixed(2)}/day cached, or $${claudeMdStack.costPerMessage.dailyUncached200.toFixed(2)}/day if cache breaks.`,
27
- action: 'Review your CLAUDE.md for sections that could be moved to project-level files loaded on demand.',
30
+ severity: claudeMdStack.totalTokensEstimate > 15000 ? 'critical' : 'warning',
31
+ title: `CLAUDE.md is ${Math.round(claudeMdStack.totalTokensEstimate / 1000)}K tokens`,
32
+ detail: `Re-read on every turn. Community best practice: keep under 200 lines (~4K tokens). Yours costs ~$${dailyCost ? dailyCost.toFixed(2) : '?'}/day at 200 messages. Each cache break re-reads at 12.5x the cached price.`,
33
+ action: 'Move rarely-used rules to project-level files. Use skills/hooks instead of inline instructions. Every 1K tokens removed saves ~$0.50/day.',
28
34
  });
29
35
  }
30
36
 
31
- // 2. Cache break frequency
37
+ // 2. Version check — the #1 fix reported by community
38
+ if (cacheHealth.efficiencyRatio > 1500 || (inflection && inflection.direction === 'worsened')) {
39
+ recs.push({
40
+ severity: 'critical',
41
+ title: 'Update Claude Code to v2.1.90+',
42
+ detail: 'Versions 2.1.69-2.1.89 had three cache bugs: sentinel replacement error, --resume cache miss, and nested CLAUDE.md re-injection. Community-verified: usage dropped from 80-100% to 5-7% of Max quota after updating.',
43
+ action: 'Run: claude update. If already on latest, start a fresh session — the fix only applies to new sessions.',
44
+ });
45
+ }
46
+
47
+ // 3. Cache break analysis
32
48
  if (cacheHealth.totalCacheBreaks > 10) {
33
49
  const topReason = cacheHealth.reasonsRanked[0];
34
50
  recs.push({
35
51
  severity: cacheHealth.totalCacheBreaks > 50 ? 'critical' : 'warning',
36
- title: `${cacheHealth.totalCacheBreaks} cache breaks detected`,
37
- detail: `Each cache break forces a full context re-read at write prices (12.5x cache read cost). Top cause: "${topReason?.reason}" (${topReason?.count} times, ${topReason?.percentage}%).`,
52
+ title: `${cacheHealth.totalCacheBreaks} cache invalidations`,
53
+ detail: `Each break forces a full prompt re-read at write prices (12.5x cache read cost). ${topReason ? `Top cause: "${topReason.reason}" (${topReason.count}x, ${topReason.percentage}%).` : ''}`,
38
54
  action: topReason?.reason === 'Tool schemas changed'
39
- ? 'Reduce MCP tool connections. Each tool add/remove invalidates the cache.'
55
+ ? 'Reduce MCP server connections. Each tool schema change breaks the cache prefix. Disconnect tools you\'re not actively using.'
40
56
  : topReason?.reason === 'System prompt changed'
41
- ? 'Avoid editing CLAUDE.md mid-session. Make changes between sessions.'
42
- : topReason?.reason === 'TTL expiry'
43
- ? 'Keep sessions active. Cache expires after 5 minutes of inactivity.'
44
- : 'Review cache break logs in ~/.claude/tmp/cache-break-*.diff for details.',
57
+ ? 'Stop editing CLAUDE.md mid-session. Batch rule changes between sessions.'
58
+ : 'Review ~/.claude/tmp/cache-break-*.diff for exact invalidation causes.',
45
59
  });
46
60
  }
47
61
 
48
- // 3. High cache:output ratio
62
+ // 4. High cache:output ratio
49
63
  if (cacheHealth.efficiencyRatio > 2000) {
50
64
  recs.push({
51
65
  severity: 'critical',
52
- title: `Cache efficiency ratio: ${cacheHealth.efficiencyRatio.toLocaleString()}:1`,
53
- detail: `For every 1 token of output, ${cacheHealth.efficiencyRatio.toLocaleString()} tokens are read from cache. Healthy range is 300-800:1. This could indicate the known Claude Code cache bug (March 2026).`,
54
- action: 'Check your Claude Code version. Versions around 2.1.85-2.1.90 have known cache regression bugs. Consider pinning to an earlier version.',
66
+ title: `Cache ratio ${cacheHealth.efficiencyRatio.toLocaleString()}:1 — abnormally high`,
67
+ detail: `Healthy range: 300-800:1. You\'re at ${cacheHealth.efficiencyRatio.toLocaleString()}:1 every output token costs ${cacheHealth.efficiencyRatio.toLocaleString()} cache read tokens. This pattern matches the March 2026 cache bug reported by thousands of users.`,
68
+ action: 'Immediate fix: update to v2.1.90+. If already updated, avoid --resume flag and start fresh sessions per task.',
55
69
  });
56
70
  } else if (cacheHealth.efficiencyRatio > 1000) {
57
71
  recs.push({
58
72
  severity: 'warning',
59
- title: `Elevated cache ratio: ${cacheHealth.efficiencyRatio.toLocaleString()}:1`,
60
- detail: 'Above average but not critical. Could be large codebase exploration or heavy file reading.',
61
- action: 'Use /compact more frequently in long sessions. Start fresh sessions for new tasks.',
73
+ title: `Cache ratio ${cacheHealth.efficiencyRatio.toLocaleString()}:1 — elevated`,
74
+ detail: 'Not critical, but above the 300-800 healthy range. Common causes: large codebase exploration, many file reads without /compact, or stale sessions.',
75
+ action: 'Use /compact every 30-40 tool calls. Start fresh sessions for each distinct task.',
62
76
  });
63
77
  }
64
78
 
65
- // 4. Cost anomalies
66
- if (anomalies.hasAnomalies) {
67
- const spikes = anomalies.anomalies.filter(a => a.type === 'spike');
68
- if (spikes.length > 0) {
69
- const worst = spikes[0];
70
- recs.push({
71
- severity: worst.severity,
72
- title: `${spikes.length} cost spike${spikes.length > 1 ? 's' : ''} detected`,
73
- detail: `Worst: $${worst.cost.toFixed(2)} on ${worst.date} (${worst.zScore > 0 ? '+' : ''}${worst.deviation.toFixed(2)} from average of $${worst.avgCost.toFixed(2)}).${worst.cacheRatioAnomaly ? ' Cache ratio was also anomalous — likely cache bug impact.' : ''}`,
74
- action: 'Compare session activity on spike days. Look for long sessions without /compact, or sessions where many MCP tools were connected.',
75
- });
76
- }
77
- }
79
+ // 5. Opus dominance — community tip: Sonnet handles 60%+ of tasks at 1/5 cost
80
+ const modelCosts = costAnalysis.modelCosts || {};
81
+ const totalModelCost = Object.values(modelCosts).reduce((s, c) => s + c, 0);
82
+ const opusCost = Object.entries(modelCosts).filter(([n]) => n.toLowerCase().includes('opus')).reduce((s, [, c]) => s + c, 0);
83
+ const opusPct = totalModelCost > 0 ? Math.round((opusCost / totalModelCost) * 100) : 0;
78
84
 
79
- // 5. Cost trend
80
- if (anomalies.trend === 'rising_fast') {
85
+ if (opusPct > 85) {
86
+ const savings = modelRouting?.estimatedSavings || Math.round(opusCost * 0.16);
81
87
  recs.push({
82
- severity: 'critical',
83
- title: 'Costs rising rapidly',
84
- detail: 'Your recent 7-day average is significantly higher than your historical average.',
85
- action: 'This may be related to the March 2026 Claude Code cache bug. Check Anthropic status for updates.',
88
+ severity: 'warning',
89
+ title: `${opusPct}% of spend is Opus`,
90
+ detail: `Opus costs 5x more than Sonnet per token. Sonnet 4.6 handles file reads, search, simple edits, and subagent work at the same quality. Community tip: switching routine tasks to Sonnet dropped quota usage by 60-80%.`,
91
+ action: `Set model: "sonnet" on subagent/Task calls. Estimated savings: ~$${savings.toLocaleString()}. Reserve Opus for complex reasoning only.`,
86
92
  });
87
93
  }
88
94
 
89
- // 6. Opus dominance
90
- const modelCosts = costAnalysis.modelCosts || {};
91
- const totalModelCost = Object.values(modelCosts).reduce((s, c) => s + c, 0);
92
- const opusCost = Object.entries(modelCosts)
93
- .filter(([name]) => name.includes('opus'))
94
- .reduce((s, [, c]) => s + c, 0);
95
- const opusPercentage = totalModelCost > 0 ? (opusCost / totalModelCost) * 100 : 0;
96
-
97
- if (opusPercentage > 90) {
95
+ // 6. Session length — community-reported: sessions >60 min degrade heavily
96
+ if (sessionIntel?.available && sessionIntel.longSessionPct > 30) {
98
97
  recs.push({
99
- severity: 'info',
100
- title: `${Math.round(opusPercentage)}% of costs from Opus`,
101
- detail: 'Opus is the most expensive model. Subagents and simple tasks could use Sonnet or Haiku.',
102
- action: 'Set model: "sonnet" or "haiku" on Task tool calls for search, documentation lookup, and log analysis.',
98
+ severity: 'warning',
99
+ title: `${sessionIntel.longSessionPct}% of sessions exceed 60 minutes`,
100
+ detail: `Long sessions accumulate context that degrades cache efficiency and response quality. Your median: ${sessionIntel.medianDuration}min, p90: ${sessionIntel.p90Duration}min, longest: ${sessionIntel.maxDuration}min.`,
101
+ action: 'One task, one session. Use /compact for exploration, fresh session for each bug fix or feature. The cost of starting fresh is less than the cost of a bloated context.',
103
102
  });
104
103
  }
105
104
 
106
- // 7. Session depthlong sessions without compact
107
- const sessions = costAnalysis.sessions || {};
108
- if (sessions.avgDurationMinutes > 60) {
105
+ // 7. Peak hour overlap community-reported: 5am-11am PT has throttled limits
106
+ if (sessionIntel?.available && sessionIntel.peakOverlapPct > 40) {
109
107
  recs.push({
110
- severity: 'warning',
111
- title: `Average session: ${Math.round(sessions.avgDurationMinutes)} minutes`,
112
- detail: `Long sessions accumulate context that degrades both performance and cache efficiency. Sessions over 60 minutes often benefit from /compact.`,
113
- action: 'Use /compact every 30-40 tool calls or when switching tasks. Start fresh sessions for new work.',
108
+ severity: 'info',
109
+ title: `${sessionIntel.peakOverlapPct}% of your work hits throttled hours`,
110
+ detail: 'Anthropic reduces 5-hour session limits during weekday peak hours (5am-11am PT / 12pm-6pm UTC). ~7% of users hit limits they wouldn\'t otherwise.',
111
+ action: 'Shift token-heavy work (refactors, test generation, codebase exploration) to off-peak hours. Session limits are unchanged only the 5-hour window shrinks.',
114
112
  });
115
113
  }
116
114
 
117
- // 8. Caching savings acknowledgment
118
- if (cacheHealth.savings.fromCaching > 100) {
115
+ // 8. Cost anomalies
116
+ if (anomalies.hasAnomalies) {
117
+ const spikes = anomalies.anomalies.filter(a => a.type === 'spike');
118
+ if (spikes.length > 0) {
119
+ const worst = spikes[0];
120
+ recs.push({
121
+ severity: worst.severity,
122
+ title: `${spikes.length} cost spike${spikes.length > 1 ? 's' : ''} — worst: $${worst.cost.toFixed(0)} on ${worst.date}`,
123
+ detail: `+$${worst.deviation.toFixed(0)} above your $${worst.avgCost.toFixed(0)} daily average.${worst.cacheRatioAnomaly ? ' Cache ratio was also anomalous — strongly suggests cache bug.' : ''} GitHub #38029 documents a bug where a single session generated 652K phantom output tokens ($342).`,
124
+ action: 'Monitor the first 1-2 messages of each session. If a single message burns 3-5% of your quota, restart immediately.',
125
+ });
126
+ }
127
+ }
128
+
129
+ // 9. Positive: cache savings
130
+ if (cacheHealth.savings?.fromCaching > 100) {
119
131
  recs.push({
120
132
  severity: 'positive',
121
- title: `Caching saved you ~$${cacheHealth.savings.fromCaching.toLocaleString()}`,
122
- detail: 'Without prompt caching, your bill would be significantly higher. The cache system is working — the question is whether it breaks too often.',
123
- action: 'No action needed. Keep sessions alive to maximize cache hits.',
133
+ title: `Cache saved you ~$${cacheHealth.savings.fromCaching.toLocaleString()}`,
134
+ detail: 'Without prompt caching, standard input pricing would have applied to all cache reads. The system is working — optimization is about reducing breaks.',
135
+ action: 'Keep sessions alive to maximize hits. Avoid mid-session CLAUDE.md edits and MCP tool changes.',
124
136
  });
125
137
  }
126
138
 
127
- return recs;
139
+ // Cap at 5 most impactful recommendations
140
+ return recs.slice(0, 5);
128
141
  }
@@ -0,0 +1,114 @@
1
+ /**
2
+ * Session Intelligence
3
+ * Analyzes session patterns: length, tool density, compact usage, productivity.
4
+ */
5
+ export function analyzeSessionIntelligence(sessionMeta, jsonlEntries) {
6
+ if (!sessionMeta || sessionMeta.length === 0) {
7
+ return { available: false };
8
+ }
9
+
10
+ const sessions = sessionMeta.filter(s => s.durationMinutes > 0);
11
+ if (sessions.length === 0) return { available: false };
12
+
13
+ // Basic session stats
14
+ const durations = sessions.map(s => s.durationMinutes);
15
+ const totalMinutes = durations.reduce((s, d) => s + d, 0);
16
+ const avgDuration = totalMinutes / sessions.length;
17
+ const maxDuration = Math.max(...durations);
18
+ const longestSession = sessions.find(s => s.durationMinutes === maxDuration);
19
+
20
+ // Sort by duration for percentile calc
21
+ const sorted = [...durations].sort((a, b) => a - b);
22
+ const p50 = sorted[Math.floor(sorted.length * 0.5)];
23
+ const p90 = sorted[Math.floor(sorted.length * 0.9)];
24
+
25
+ // Long sessions (>60 min) — likely need /compact
26
+ const longSessions = sessions.filter(s => s.durationMinutes > 60);
27
+ const longSessionPct = sessions.length > 0 ? Math.round((longSessions.length / sessions.length) * 100) : 0;
28
+
29
+ // Tool call density per session
30
+ const toolDensities = sessions.map(s => {
31
+ const totalTools = Object.values(s.toolCounts || {}).reduce((sum, c) => sum + c, 0);
32
+ return { sessionId: s.sessionId, tools: totalTools, minutes: s.durationMinutes, density: s.durationMinutes > 0 ? (totalTools / s.durationMinutes).toFixed(1) : 0 };
33
+ });
34
+
35
+ const avgToolsPerSession = toolDensities.reduce((s, t) => s + t.tools, 0) / sessions.length;
36
+
37
+ // Most used tools across all sessions
38
+ const toolTotals = {};
39
+ for (const s of sessions) {
40
+ for (const [tool, count] of Object.entries(s.toolCounts || {})) {
41
+ toolTotals[tool] = (toolTotals[tool] || 0) + count;
42
+ }
43
+ }
44
+ const topTools = Object.entries(toolTotals)
45
+ .sort((a, b) => b[1] - a[1])
46
+ .slice(0, 8)
47
+ .map(([name, count]) => ({ name, count }));
48
+
49
+ // Lines of code per session hour (productivity proxy)
50
+ const totalLines = sessions.reduce((s, x) => s + x.linesAdded + x.linesRemoved, 0);
51
+ const totalHours = totalMinutes / 60;
52
+ const linesPerHour = totalHours > 0 ? Math.round(totalLines / totalHours) : 0;
53
+
54
+ // Messages per session
55
+ const totalMessages = sessions.reduce((s, x) => s + x.userMessageCount + x.assistantMessageCount, 0);
56
+ const avgMessagesPerSession = Math.round(totalMessages / sessions.length);
57
+
58
+ // Time-of-day distribution (from JSONL timestamps)
59
+ const hourDistribution = new Array(24).fill(0);
60
+ if (jsonlEntries && jsonlEntries.length > 0) {
61
+ for (const entry of jsonlEntries) {
62
+ if (!entry.timestamp) continue;
63
+ try {
64
+ const d = new Date(entry.timestamp);
65
+ if (!isNaN(d.getTime())) {
66
+ hourDistribution[d.getHours()]++;
67
+ }
68
+ } catch { /* skip */ }
69
+ }
70
+ }
71
+
72
+ // Peak hours (top 3)
73
+ const peakHours = hourDistribution
74
+ .map((count, hour) => ({ hour, count }))
75
+ .sort((a, b) => b.count - a.count)
76
+ .slice(0, 3)
77
+ .map(h => ({ hour: h.hour, label: formatHour(h.hour), count: h.count }));
78
+
79
+ // Off-peak overlap check (5am-11am PT = 12pm-6pm UTC, roughly)
80
+ const offPeakStart = 12; // UTC
81
+ const offPeakEnd = 18;
82
+ const peakOverlapMessages = hourDistribution
83
+ .slice(offPeakStart, offPeakEnd + 1)
84
+ .reduce((s, c) => s + c, 0);
85
+ const totalHourMessages = hourDistribution.reduce((s, c) => s + c, 0);
86
+ const peakOverlapPct = totalHourMessages > 0 ? Math.round((peakOverlapMessages / totalHourMessages) * 100) : 0;
87
+
88
+ return {
89
+ available: true,
90
+ totalSessions: sessions.length,
91
+ totalMinutes,
92
+ avgDuration: Math.round(avgDuration),
93
+ medianDuration: p50,
94
+ p90Duration: p90,
95
+ maxDuration,
96
+ longestSessionProject: longestSession?.projectPath,
97
+ longSessions: longSessions.length,
98
+ longSessionPct,
99
+ avgToolsPerSession: Math.round(avgToolsPerSession),
100
+ topTools,
101
+ linesPerHour,
102
+ avgMessagesPerSession,
103
+ peakHours,
104
+ peakOverlapPct,
105
+ hourDistribution,
106
+ };
107
+ }
108
+
109
+ function formatHour(h) {
110
+ if (h === 0) return '12am';
111
+ if (h < 12) return h + 'am';
112
+ if (h === 12) return '12pm';
113
+ return (h - 12) + 'pm';
114
+ }
package/src/cli/index.js CHANGED
@@ -16,6 +16,8 @@ import { analyzeCacheHealth } from '../analyzers/cache-health.js';
16
16
  import { detectAnomalies } from '../analyzers/anomaly-detector.js';
17
17
  import { generateRecommendations } from '../analyzers/recommendations.js';
18
18
  import { detectInflectionPoints } from '../analyzers/inflection-detector.js';
19
+ import { analyzeSessionIntelligence } from '../analyzers/session-intelligence.js';
20
+ import { analyzeModelRouting } from '../analyzers/model-routing.js';
19
21
  import { renderHTML } from '../renderers/html-report.js';
20
22
  import { renderTerminal } from '../renderers/terminal-summary.js';
21
23
 
@@ -115,21 +117,24 @@ async function main() {
115
117
  const cacheHealth = analyzeCacheHealth(statsCache, cacheBreaks, allTimeDays, dailyFromJSONL);
116
118
  const anomalies = detectAnomalies(costAnalysis);
117
119
  const inflection = detectInflectionPoints(dailyFromJSONL);
118
- const recommendations = generateRecommendations(costAnalysis, cacheHealth, claudeMdStack, anomalies, inflection);
119
-
120
- if (inflection) {
121
- console.log(` ✓ Inflection point: ${inflection.summary}`);
122
- }
120
+ const sessionIntel = analyzeSessionIntelligence(sessionMeta, jsonlEntries);
121
+ const modelRouting = analyzeModelRouting(costAnalysis, jsonlEntries);
122
+ const recommendations = generateRecommendations(costAnalysis, cacheHealth, claudeMdStack, anomalies, inflection, sessionIntel, modelRouting);
123
123
 
124
+ if (inflection) console.log(` ✓ Inflection: ${inflection.summary}`);
125
+ if (sessionIntel.available) console.log(` ✓ ${sessionIntel.totalSessions} sessions analyzed (${sessionIntel.avgDuration} min avg)`);
126
+ if (modelRouting.available) console.log(` ✓ Model routing: ${modelRouting.opusPct}% Opus, ${modelRouting.sonnetPct}% Sonnet`);
124
127
  console.log(` ✓ ${projectBreakdown.length} projects detected`);
125
128
 
126
129
  const report = {
127
130
  generatedAt: new Date().toISOString(),
128
- periodDays: flags.days, // Default view in HTML
131
+ periodDays: flags.days,
129
132
  costAnalysis,
130
133
  cacheHealth,
131
134
  anomalies,
132
135
  inflection,
136
+ sessionIntel,
137
+ modelRouting,
133
138
  projectBreakdown,
134
139
  claudeMdStack,
135
140
  oauthUsage,
@@ -6,15 +6,41 @@ export function readClaudeMdStack(claudeDir) {
6
6
  const home = homedir();
7
7
  const stack = [];
8
8
 
9
- // Global CLAUDE.md
9
+ // Global CLAUDE.md — detailed section analysis
10
10
  const globalPath = join(home, '.claude', 'CLAUDE.md');
11
+ let globalSections = [];
11
12
  if (existsSync(globalPath)) {
12
13
  const stat = statSync(globalPath);
14
+ const content = readFileSync(globalPath, 'utf-8');
15
+ const lines = content.split('\n');
16
+ const lineCount = lines.length;
17
+
18
+ // Parse sections (## headings)
19
+ let currentSection = { name: 'Header', lines: 0, bytes: 0 };
20
+ const sections = [];
21
+ for (const line of lines) {
22
+ if (line.match(/^##\s+/)) {
23
+ if (currentSection.lines > 0) sections.push(currentSection);
24
+ currentSection = { name: line.replace(/^#+\s*/, '').trim(), lines: 0, bytes: 0 };
25
+ }
26
+ currentSection.lines++;
27
+ currentSection.bytes += Buffer.byteLength(line + '\n', 'utf-8');
28
+ }
29
+ if (currentSection.lines > 0) sections.push(currentSection);
30
+
31
+ // Add token estimates and sort by size
32
+ globalSections = sections
33
+ .map(s => ({ ...s, tokens: Math.round(s.bytes / 4) }))
34
+ .sort((a, b) => b.bytes - a.bytes);
35
+
13
36
  stack.push({
14
37
  level: 'global',
15
38
  path: globalPath,
16
39
  bytes: stat.size,
17
40
  tokensEstimate: Math.round(stat.size / 4),
41
+ lineCount,
42
+ sectionCount: sections.length,
43
+ sections: globalSections,
18
44
  });
19
45
  }
20
46
 
@@ -54,6 +80,7 @@ export function readClaudeMdStack(claudeDir) {
54
80
 
55
81
  return {
56
82
  files: stack,
83
+ globalSections,
57
84
  totalBytes,
58
85
  totalTokensEstimate,
59
86
  settingsBytes: settingsSize,