meter-ai 0.1.2 → 0.3.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (44) hide show
  1. package/dist/commands/init.d.ts.map +1 -1
  2. package/dist/commands/init.js +74 -3
  3. package/dist/commands/init.js.map +1 -1
  4. package/dist/commands/wrap.d.ts.map +1 -1
  5. package/dist/commands/wrap.js +23 -2
  6. package/dist/commands/wrap.js.map +1 -1
  7. package/dist/pty/wrapper.d.ts +9 -8
  8. package/dist/pty/wrapper.d.ts.map +1 -1
  9. package/dist/pty/wrapper.js +39 -14
  10. package/dist/pty/wrapper.js.map +1 -1
  11. package/package.json +1 -1
  12. package/src/auth/credentials.ts +44 -0
  13. package/src/auth/detect.ts +24 -0
  14. package/src/commands/config.ts +19 -0
  15. package/src/commands/history.ts +16 -0
  16. package/src/commands/init.ts +149 -0
  17. package/src/commands/report.ts +27 -0
  18. package/src/commands/status.ts +16 -0
  19. package/src/commands/uninstall.ts +20 -0
  20. package/src/commands/wrap.ts +235 -0
  21. package/src/constants.ts +52 -0
  22. package/src/estimation/heuristics.ts +36 -0
  23. package/src/estimation/history-matcher.ts +43 -0
  24. package/src/estimation/llm-precheck.ts +27 -0
  25. package/src/estimation/pipeline.ts +67 -0
  26. package/src/hooks/on-prompt.js +92 -0
  27. package/src/hooks/statusline.js +36 -0
  28. package/src/index.ts +50 -0
  29. package/src/pty/resize.ts +15 -0
  30. package/src/pty/screen.ts +15 -0
  31. package/src/pty/wrapper.ts +143 -0
  32. package/src/shell/binary-resolver.ts +21 -0
  33. package/src/shell/detect.ts +33 -0
  34. package/src/shell/path-inject.ts +31 -0
  35. package/src/shell/shim-writer.ts +28 -0
  36. package/src/storage/config-store.ts +46 -0
  37. package/src/storage/db.ts +63 -0
  38. package/src/tracking/cost.ts +7 -0
  39. package/src/tracking/plan-usage.ts +57 -0
  40. package/src/tracking/tokens.ts +16 -0
  41. package/src/types.ts +73 -0
  42. package/src/ui/keypress.ts +27 -0
  43. package/src/ui/notification.ts +31 -0
  44. package/src/ui/statusbar.ts +74 -0
@@ -0,0 +1,27 @@
1
+ import { readConfig } from '../storage/config-store.js'
2
+ import { openDb, getRecentTasks } from '../storage/db.js'
3
+ import { CONFIG_PATH, HISTORY_DB_PATH } from '../constants.js'
4
+
5
+ export async function runReport(): Promise<void> {
6
+ const config = await readConfig(CONFIG_PATH)
7
+ if (!config) { console.log('Run meter init first.'); return }
8
+ const db = await openDb(HISTORY_DB_PATH)
9
+ const weekAgo = Date.now() - 7 * 24 * 60 * 60 * 1000
10
+ const allTasks = getRecentTasks(db, 500).filter(t => t.created_at >= weekAgo)
11
+ db.close()
12
+ if (allTasks.length === 0) { console.log('No tasks recorded this week.'); return }
13
+ const totalCost = allTasks.reduce((s, t) => s + (t.actual_cost ?? 0), 0)
14
+ const heaviest = allTasks.sort((a, b) => (b.actual_cost ?? 0) - (a.actual_cost ?? 0))[0]
15
+ const byComplexity: Record<string, number> = { low: 0, medium: 0, heavy: 0, critical: 0 }
16
+ for (const t of allTasks) byComplexity[t.complexity] = (byComplexity[t.complexity] ?? 0) + 1
17
+ console.log(`\n◆ meter weekly report\n`)
18
+ console.log(` Mode ${config.mode}`)
19
+ console.log(` Tasks run ${allTasks.length}`)
20
+ if (config.mode === 'api') {
21
+ console.log(` Total spend $${totalCost.toFixed(3)}`)
22
+ console.log(` Avg task cost $${(totalCost / allTasks.length).toFixed(3)}`)
23
+ }
24
+ if (heaviest) console.log(` Heaviest task "${heaviest.prompt_text.slice(0, 40)}"`)
25
+ console.log(`\n By complexity:`)
26
+ for (const [k, v] of Object.entries(byComplexity)) if (v > 0) console.log(` ${k.padEnd(10)} ${v} tasks`)
27
+ }
@@ -0,0 +1,16 @@
1
+ import { readConfig } from '../storage/config-store.js'
2
+ import { CONFIG_PATH } from '../constants.js'
3
+
4
+ export async function runStatus(): Promise<void> {
5
+ const config = await readConfig(CONFIG_PATH)
6
+ if (!config) { console.log('meter not initialised. Run: meter init'); return }
7
+ console.log(`◆ meter status\n`)
8
+ console.log(` Mode: ${config.mode}`)
9
+ console.log(` Model: ${config.models.claude_chain[0]}`)
10
+ console.log(` Claude: ${config.resolved_binaries.claude}`)
11
+ if (config.mode === 'api') {
12
+ console.log(` Budget: $${config.budget.per_task_usd} per task (notify at ${config.budget.threshold_pct}%)`)
13
+ } else {
14
+ console.log(` Window: notify at ${config.plan.window_threshold_pct}%`)
15
+ }
16
+ }
@@ -0,0 +1,20 @@
1
+ import { rm } from 'fs/promises'
2
+ import { removePath } from '../shell/path-inject.js'
3
+ import { detectShell, getShellConfigPath } from '../shell/detect.js'
4
+ import { METER_DIR } from '../constants.js'
5
+ import * as readline from 'readline/promises'
6
+
7
+ export async function runUninstall(): Promise<void> {
8
+ const rl = readline.createInterface({ input: process.stdin, output: process.stdout })
9
+ const answer = await rl.question('Remove all meter data (~/.meter/)? [y/N] ')
10
+ rl.close()
11
+ const shell = detectShell()
12
+ const configPath = getShellConfigPath(shell)
13
+ await removePath(configPath)
14
+ console.log(`✓ Removed PATH entry from ${configPath}`)
15
+ if (answer.toLowerCase() === 'y') {
16
+ await rm(METER_DIR, { recursive: true, force: true })
17
+ console.log('✓ Removed ~/.meter/')
18
+ }
19
+ console.log('✓ Uninstall complete. Run: npm uninstall -g meter-ai')
20
+ }
@@ -0,0 +1,235 @@
1
+ import { PtyWrapper } from '../pty/wrapper.js'
2
+ import { readConfig } from '../storage/config-store.js'
3
+ import { openDb, insertTask } from '../storage/db.js'
4
+ import { runEstimationPipeline } from '../estimation/pipeline.js'
5
+ import { injectStatusBar, renderStatusBar } from '../ui/statusbar.js'
6
+ import { renderNotification } from '../ui/notification.js'
7
+ import { waitForKeypress } from '../ui/keypress.js'
8
+ import { parseTokensFromOutput, estimateInputTokens } from '../tracking/tokens.js'
9
+ import { fetchPlanUsage, formatResetCountdown } from '../tracking/plan-usage.js'
10
+ import { readCredentials, watchCredentials } from '../auth/credentials.js'
11
+ import { calculateStatusBarLines } from '../pty/resize.js'
12
+ import {
13
+ CONFIG_PATH, HISTORY_DB_PATH, CLAUDE_CREDENTIALS_PATH
14
+ } from '../constants.js'
15
+ import { execSync } from 'child_process'
16
+ import { createHash } from 'crypto'
17
+ import type { StatusBarState } from '../ui/statusbar.js'
18
+ import type { Complexity } from '../types.js'
19
+
20
+ export async function runWrap(args: string[]): Promise<void> {
21
+ const trueBinary = args[0]
22
+ const agentArgs = args.slice(1)
23
+ const prompt = agentArgs.join(' ')
24
+
25
+ if (!trueBinary) {
26
+ console.error('[meter] wrap: missing binary argument')
27
+ process.exit(1)
28
+ }
29
+
30
+ const config = await readConfig(CONFIG_PATH)
31
+ if (!config) {
32
+ // No config — run agent directly without wrapping
33
+ const w = new PtyWrapper()
34
+ w.spawn(trueBinary, agentArgs, process.env as NodeJS.ProcessEnv)
35
+ await new Promise<void>(resolve => w.on('exit', () => resolve()))
36
+ return
37
+ }
38
+
39
+ const db = await openDb(HISTORY_DB_PATH)
40
+
41
+ // Get repo identifier
42
+ let repo: string | null = null
43
+ try { repo = execSync('git remote get-url origin 2>/dev/null').toString().trim() } catch {}
44
+ if (!repo) { try { repo = process.cwd() } catch {} }
45
+
46
+ // Get repo file count for estimation
47
+ let repoFileCount = 100
48
+ try { repoFileCount = parseInt(execSync('git ls-files 2>/dev/null | wc -l').toString().trim(), 10) || 100 } catch {}
49
+
50
+ // Run estimation (non-blocking display)
51
+ const estimation = await runEstimationPipeline({
52
+ prompt,
53
+ repoFileCount,
54
+ db,
55
+ repo,
56
+ config: config.estimation,
57
+ })
58
+
59
+ // Fetch initial plan usage if Plan Mode
60
+ let planUsage: Awaited<ReturnType<typeof fetchPlanUsage>> = null
61
+ let creds: Awaited<ReturnType<typeof readCredentials>> = null
62
+ if (config.mode === 'plan' && config.org_id) {
63
+ creds = await readCredentials(CLAUDE_CREDENTIALS_PATH)
64
+ if (creds) planUsage = await fetchPlanUsage(config.org_id, creds)
65
+ }
66
+
67
+ // State for status bar
68
+ const state: StatusBarState = {
69
+ model: config.models.claude_chain[0],
70
+ estimatedCost: estimation.estimated_cost,
71
+ complexity: estimation.complexity as Complexity,
72
+ mode: config.mode,
73
+ elapsedCost: 0,
74
+ budgetUsd: config.budget.per_task_usd,
75
+ windowPct: planUsage?.five_hour_pct ?? null,
76
+ windowResetIn: planUsage ? formatResetCountdown(planUsage.five_hour_reset_at) : null,
77
+ }
78
+
79
+ // Spawn agent in PTY — declare BEFORE updateBar so closure can reference it safely
80
+ const wrapper = new PtyWrapper()
81
+ let outputBuffer = ''
82
+ let thresholdNotified = false
83
+ let modelSwitched = 0
84
+
85
+ // Status bar update function — adapts to PTY vs fallback mode
86
+ let statusBarShown = false
87
+ const updateBar = () => {
88
+ if (wrapper.isInAlternateScreen) return
89
+ const content = renderStatusBar(state)
90
+
91
+ if (wrapper.usingFallback) {
92
+ // Fallback mode: only show status bar once as a header, then on threshold
93
+ if (!statusBarShown) {
94
+ process.stdout.write(content + '\n')
95
+ statusBarShown = true
96
+ }
97
+ } else {
98
+ // PTY mode: inject at fixed position via escape codes
99
+ const N = calculateStatusBarLines(content.replace(/\x1b\[[^m]*m/g, '').length, process.stdout.columns ?? 80)
100
+ injectStatusBar(content, N)
101
+ }
102
+ }
103
+
104
+ // Watch for credential refresh
105
+ if (config.mode === 'plan') {
106
+ watchCredentials(CLAUDE_CREDENTIALS_PATH, async (newCreds) => {
107
+ if (newCreds) creds = newCreds
108
+ })
109
+ }
110
+
111
+ // Plan usage polling (15s during active task)
112
+ let pollInterval: NodeJS.Timeout | null = null
113
+ if (config.mode === 'plan' && config.org_id && creds) {
114
+ pollInterval = setInterval(async () => {
115
+ if (!creds || !config.org_id) return
116
+ const usage = await fetchPlanUsage(config.org_id, creds)
117
+ if (usage) {
118
+ state.windowPct = usage.five_hour_pct
119
+ state.windowResetIn = formatResetCountdown(usage.five_hour_reset_at)
120
+ updateBar()
121
+ }
122
+ }, 15_000)
123
+ }
124
+
125
+ // Per-prompt estimation: when user submits a new prompt inside the interactive
126
+ // session, re-run the estimation pipeline and update the status bar
127
+ let lastPrompt = prompt // initial prompt from CLI args
128
+ wrapper.on('input', async (newPrompt: string) => {
129
+ lastPrompt = newPrompt
130
+ const newEstimation = await runEstimationPipeline({
131
+ prompt: newPrompt,
132
+ repoFileCount,
133
+ db,
134
+ repo,
135
+ config: config.estimation,
136
+ })
137
+ state.estimatedCost = newEstimation.estimated_cost
138
+ state.complexity = newEstimation.complexity as Complexity
139
+ // Reset elapsed cost for the new prompt
140
+ state.elapsedCost = 0
141
+ thresholdNotified = false
142
+ statusBarShown = false // allow fallback mode to re-print
143
+ updateBar()
144
+ })
145
+
146
+ wrapper.on('data', (chunk: string) => {
147
+ outputBuffer += chunk
148
+ // Rough running cost estimate from output volume
149
+ state.elapsedCost = (state.elapsedCost ?? 0) + (chunk.length / 4 / 1_000_000) * 15
150
+ updateBar()
151
+
152
+ // Check threshold
153
+ if (!thresholdNotified) {
154
+ const exceeded = config.mode === 'api'
155
+ ? ((state.elapsedCost ?? 0) / config.budget.per_task_usd) * 100 >= config.budget.threshold_pct
156
+ : (state.windowPct ?? 0) >= config.plan.window_threshold_pct
157
+
158
+ if (exceeded) {
159
+ thresholdNotified = true
160
+ handleThreshold()
161
+ }
162
+ }
163
+ })
164
+
165
+ async function handleThreshold() {
166
+ // config is guaranteed non-null here (we returned early above if null)
167
+ const cfg = config!
168
+ const nextModel = cfg.models.claude_chain[1] ?? null
169
+ const notification = renderNotification({
170
+ mode: cfg.mode,
171
+ thresholdPct: cfg.mode === 'api' ? cfg.budget.threshold_pct : cfg.plan.window_threshold_pct,
172
+ elapsedCost: state.elapsedCost,
173
+ budgetUsd: cfg.budget.per_task_usd,
174
+ windowPct: state.windowPct,
175
+ nextModel,
176
+ })
177
+
178
+ process.stdout.write('\n' + notification + '\n')
179
+
180
+ const action = await waitForKeypress(['s', 'd', 'c'], 0)
181
+
182
+ if (action === 'c') {
183
+ wrapper.kill()
184
+ } else if (action === 's' && nextModel) {
185
+ process.stdout.write(`\n↻ restarting with ${nextModel} (context will reset — press c within 5s to cancel)\n`)
186
+ const cancel = await waitForKeypress(['c'], 5_000)
187
+ if (cancel !== 'c') {
188
+ wrapper.kill()
189
+ state.model = nextModel
190
+ modelSwitched = 1
191
+ wrapper.spawn(trueBinary, [`--model`, nextModel, ...agentArgs.slice(1)], process.env as NodeJS.ProcessEnv)
192
+ }
193
+ }
194
+ // 'd' = dismiss, do nothing
195
+ }
196
+
197
+ const taskStart = Date.now()
198
+
199
+ // Reserve status bar line and spawn
200
+ process.stdout.write('\n')
201
+ updateBar()
202
+ wrapper.spawn(trueBinary, agentArgs, process.env as NodeJS.ProcessEnv)
203
+
204
+ const exitCode = await new Promise<number>(resolve => {
205
+ wrapper.on('exit', (code: number) => resolve(code))
206
+ })
207
+
208
+ if (pollInterval) clearInterval(pollInterval)
209
+
210
+ // Parse final token counts
211
+ const tokens = parseTokensFromOutput(outputBuffer)
212
+ const finalPrompt = lastPrompt || prompt
213
+ const promptHash = createHash('sha256').update(finalPrompt.toLowerCase().trim()).digest('hex').slice(0, 16)
214
+
215
+ insertTask(db, {
216
+ created_at: taskStart,
217
+ repo,
218
+ prompt_hash: promptHash,
219
+ prompt_text: finalPrompt,
220
+ model: state.model,
221
+ complexity: estimation.complexity as Complexity,
222
+ est_layer: estimation.layer_used,
223
+ est_cost: estimation.estimated_cost,
224
+ actual_tokens_in: tokens?.input ?? estimateInputTokens(prompt.length),
225
+ actual_tokens_out: tokens?.output ?? null,
226
+ actual_cost: config.mode === 'api' ? (state.elapsedCost ?? null) : null,
227
+ window_pct_start: planUsage?.five_hour_pct ?? null,
228
+ window_pct_end: state.windowPct,
229
+ model_switched: modelSwitched,
230
+ exit_code: exitCode,
231
+ })
232
+
233
+ db.close()
234
+ process.exit(exitCode)
235
+ }
@@ -0,0 +1,52 @@
1
+ import { homedir } from 'os'
2
+ import { join } from 'path'
3
+
4
+ export const METER_DIR = join(homedir(), '.meter')
5
+ export const METER_BIN_DIR = join(METER_DIR, 'bin')
6
+ export const CONFIG_PATH = join(METER_DIR, 'config.json')
7
+ export const HISTORY_DB_PATH = join(METER_DIR, 'history.db')
8
+ export const PRICING_PATH = join(METER_DIR, 'pricing.json')
9
+ export const USAGE_CACHE_PATH = join(METER_DIR, 'cache', 'usage.json')
10
+ export const SESSION_CACHE_PATH = join(METER_DIR, 'cache', 'session.json')
11
+ export const ERRORS_LOG_PATH = join(METER_DIR, 'cache', 'errors.log')
12
+ export const REPORTS_DIR = join(METER_DIR, 'reports')
13
+
14
+ export const DEFAULT_CONFIG_VALUES = {
15
+ budget_per_task_usd: 0.50,
16
+ threshold_pct: 80,
17
+ window_threshold_pct: 80,
18
+ poll_interval_seconds: 60,
19
+ min_confidence_to_skip_llm: 0.85,
20
+ claude_chain: ['claude-opus-4-20250514', 'claude-sonnet-4-20250514', 'claude-haiku-4-20250307'],
21
+ llm_precheck_model: 'claude-haiku-4-20250307',
22
+ }
23
+
24
+ export const CLAUDE_USAGE_API = 'https://claude.ai/api/organizations'
25
+ export const CLAUDE_BOOTSTRAP_API = 'https://claude.ai/api/bootstrap'
26
+ export const CLAUDE_CREDENTIALS_PATH = join(homedir(), '.claude', '.credentials.json')
27
+ export const CLAUDE_SETTINGS_PATH = join(homedir(), '.claude', 'settings.json')
28
+
29
+ export const KEYWORD_WEIGHTS: Record<string, number> = {
30
+ 'refactor entire': 0.9,
31
+ 'rewrite': 0.85,
32
+ 'migrate': 0.8,
33
+ 'refactor': 0.7,
34
+ 'implement': 0.6,
35
+ 'add feature': 0.55,
36
+ 'add tests': 0.5,
37
+ 'add': 0.4,
38
+ 'update': 0.35,
39
+ 'fix': 0.3,
40
+ 'debug': 0.3,
41
+ 'change': 0.25,
42
+ 'rename': 0.2,
43
+ 'fix typo': 0.05,
44
+ 'typo': 0.05,
45
+ }
46
+
47
+ export const COMPLEXITY_THRESHOLDS = {
48
+ low: 0.3,
49
+ medium: 0.55,
50
+ heavy: 0.75,
51
+ critical: 1.0,
52
+ }
@@ -0,0 +1,36 @@
1
+ import { KEYWORD_WEIGHTS, COMPLEXITY_THRESHOLDS } from '../constants.js'
2
+ import type { Complexity, EstimationResult } from '../types.js'
3
+
4
+ interface HeuristicInput {
5
+ prompt: string
6
+ repoFileCount: number
7
+ }
8
+
9
+ export function scoreHeuristics(input: HeuristicInput): Pick<EstimationResult, 'complexity' | 'confidence'> {
10
+ const lower = input.prompt.toLowerCase()
11
+
12
+ // Use the most-specific (longest) matching keyword to avoid over-counting
13
+ let keywordScore = 0.35
14
+ let bestMatchLen = 0
15
+ for (const [keyword, weight] of Object.entries(KEYWORD_WEIGHTS)) {
16
+ if (lower.includes(keyword) && keyword.length > bestMatchLen) {
17
+ bestMatchLen = keyword.length
18
+ keywordScore = weight
19
+ }
20
+ }
21
+
22
+ const sizeModifier = Math.min(input.repoFileCount / 500, 0.2)
23
+ const lengthModifier = lower.length > 60 ? 0.05 : 0
24
+ const rawScore = Math.min(keywordScore + sizeModifier + lengthModifier, 1.0)
25
+
26
+ // critical uses strict > so it is unreachable via capped rawScore;
27
+ // heavy is the practical maximum from heuristics alone
28
+ let complexity: Complexity = 'low'
29
+ if (rawScore > COMPLEXITY_THRESHOLDS.critical) complexity = 'critical'
30
+ else if (rawScore >= COMPLEXITY_THRESHOLDS.heavy) complexity = 'heavy'
31
+ else if (rawScore >= COMPLEXITY_THRESHOLDS.medium) complexity = 'medium'
32
+
33
+ const confidence = keywordScore >= 0.8 || keywordScore <= 0.1 ? 0.9 : 0.65
34
+
35
+ return { complexity, confidence }
36
+ }
@@ -0,0 +1,43 @@
1
+ import type { DB } from '../storage/db.js'
2
+ import type { EstimationResult, TaskRecord } from '../types.js'
3
+
4
+ function trigramSimilarity(a: string, b: string): number {
5
+ const trigrams = (s: string): Set<string> => {
6
+ const set = new Set<string>()
7
+ const padded = ` ${s.toLowerCase()} `
8
+ for (let i = 0; i < padded.length - 2; i++) {
9
+ set.add(padded.slice(i, i + 3))
10
+ }
11
+ return set
12
+ }
13
+
14
+ const ta = trigrams(a)
15
+ const tb = trigrams(b)
16
+ const intersection = [...ta].filter(t => tb.has(t)).length
17
+ const union = new Set([...ta, ...tb]).size
18
+ return union === 0 ? 0 : intersection / union
19
+ }
20
+
21
+ const MIN_SIMILARITY = 0.25
22
+ const MIN_RUNS = 3
23
+
24
+ export function matchHistory(
25
+ db: DB,
26
+ prompt: string,
27
+ repo: string | null
28
+ ): Pick<EstimationResult, 'estimated_cost' | 'layer_used'> | null {
29
+ const candidates = db.prepare(
30
+ 'SELECT * FROM tasks WHERE repo = ? AND actual_cost IS NOT NULL ORDER BY created_at DESC LIMIT 100'
31
+ ).all(repo) as TaskRecord[]
32
+
33
+ const matches = candidates.filter(t =>
34
+ trigramSimilarity(t.prompt_text, prompt) >= MIN_SIMILARITY
35
+ )
36
+
37
+ if (matches.length < MIN_RUNS) return null
38
+
39
+ const costs = matches.map(t => t.actual_cost!).sort((a, b) => a - b)
40
+ const median = costs[Math.floor(costs.length / 2)]
41
+
42
+ return { estimated_cost: median, layer_used: 2 }
43
+ }
@@ -0,0 +1,27 @@
1
+ import Anthropic from '@anthropic-ai/sdk'
2
+ import type { Complexity } from '../types.js'
3
+
4
+ const VALID_COMPLEXITIES = new Set<Complexity>(['low', 'medium', 'heavy', 'critical'])
5
+
6
+ export async function llmClassify(
7
+ prompt: string,
8
+ repoFileCount: number,
9
+ model: string
10
+ ): Promise<Complexity | null> {
11
+ try {
12
+ const client = new Anthropic()
13
+ const message = await client.messages.create({
14
+ model,
15
+ max_tokens: 10,
16
+ messages: [{
17
+ role: 'user',
18
+ content: `Classify this coding task complexity: low | medium | heavy | critical\nTask: "${prompt}"\nRepo: ${repoFileCount} files\nReply with exactly one word.`
19
+ }]
20
+ })
21
+
22
+ const text = (message.content[0] as { type: 'text'; text: string }).text.trim().toLowerCase() as Complexity
23
+ return VALID_COMPLEXITIES.has(text) ? text : null
24
+ } catch {
25
+ return null
26
+ }
27
+ }
@@ -0,0 +1,67 @@
1
+ import { scoreHeuristics } from './heuristics.js'
2
+ import { matchHistory } from './history-matcher.js'
3
+ import { llmClassify } from './llm-precheck.js'
4
+ import type { DB } from '../storage/db.js'
5
+ import type { EstimationResult } from '../types.js'
6
+
7
+ const COST_BY_COMPLEXITY = { low: 0.02, medium: 0.09, heavy: 0.38, critical: 0.80 }
8
+
9
+ interface PipelineInput {
10
+ prompt: string
11
+ repoFileCount: number
12
+ db: DB | null
13
+ repo: string | null
14
+ config: {
15
+ use_llm_precheck: boolean
16
+ llm_precheck_model: string
17
+ min_confidence_to_skip_llm: number
18
+ }
19
+ }
20
+
21
+ export async function runEstimationPipeline(input: PipelineInput): Promise<EstimationResult> {
22
+ // Layer 1: heuristics
23
+ const heuristic = scoreHeuristics({ prompt: input.prompt, repoFileCount: input.repoFileCount })
24
+
25
+ if (heuristic.confidence >= input.config.min_confidence_to_skip_llm) {
26
+ return {
27
+ complexity: heuristic.complexity,
28
+ confidence: heuristic.confidence,
29
+ estimated_cost: COST_BY_COMPLEXITY[heuristic.complexity],
30
+ layer_used: 1
31
+ }
32
+ }
33
+
34
+ // Layer 2: historical baseline
35
+ if (input.db && input.repo) {
36
+ const historical = matchHistory(input.db, input.prompt, input.repo)
37
+ if (historical?.estimated_cost !== undefined) {
38
+ return {
39
+ complexity: heuristic.complexity,
40
+ confidence: 0.8,
41
+ estimated_cost: historical.estimated_cost,
42
+ layer_used: 2
43
+ }
44
+ }
45
+ }
46
+
47
+ // Layer 3: LLM pre-call
48
+ if (input.config.use_llm_precheck) {
49
+ const llmResult = await llmClassify(input.prompt, input.repoFileCount, input.config.llm_precheck_model)
50
+ if (llmResult) {
51
+ return {
52
+ complexity: llmResult,
53
+ confidence: 0.87,
54
+ estimated_cost: COST_BY_COMPLEXITY[llmResult],
55
+ layer_used: 3
56
+ }
57
+ }
58
+ }
59
+
60
+ // Fallback: use heuristic
61
+ return {
62
+ complexity: heuristic.complexity,
63
+ confidence: heuristic.confidence,
64
+ estimated_cost: COST_BY_COMPLEXITY[heuristic.complexity],
65
+ layer_used: 1
66
+ }
67
+ }
@@ -0,0 +1,92 @@
1
+ #!/usr/bin/env node
2
+ /**
3
+ * meter — UserPromptSubmit hook
4
+ *
5
+ * Runs when user submits a prompt in Claude Code.
6
+ * Executes the estimation pipeline and writes result to ~/.meter/cache/latest-estimate.json
7
+ * The statusline command reads this file to display the estimate.
8
+ */
9
+ const { execSync } = require('child_process');
10
+ const fs = require('fs');
11
+ const path = require('path');
12
+ const os = require('os');
13
+
14
+ const METER_DIR = path.join(os.homedir(), '.meter');
15
+ const CACHE_DIR = path.join(METER_DIR, 'cache');
16
+ const ESTIMATE_FILE = path.join(CACHE_DIR, 'latest-estimate.json');
17
+ const CONFIG_FILE = path.join(METER_DIR, 'config.json');
18
+
19
+ // Keyword weights for heuristic scoring
20
+ const KEYWORD_WEIGHTS = {
21
+ 'refactor entire': 0.9, 'rewrite': 0.85, 'migrate': 0.8,
22
+ 'refactor': 0.7, 'implement': 0.6, 'add feature': 0.55,
23
+ 'add tests': 0.5, 'add': 0.4, 'update': 0.35,
24
+ 'fix': 0.3, 'debug': 0.3, 'change': 0.25,
25
+ 'rename': 0.2, 'fix typo': 0.05, 'typo': 0.05,
26
+ };
27
+
28
+ const COST_BY_COMPLEXITY = { low: 0.02, medium: 0.09, heavy: 0.38, critical: 0.80 };
29
+
30
+ function scorePrompt(prompt) {
31
+ const lower = prompt.toLowerCase();
32
+
33
+ let keywordScore = 0.35;
34
+ // Longest match wins
35
+ const sorted = Object.entries(KEYWORD_WEIGHTS).sort((a, b) => b[0].length - a[0].length);
36
+ for (const [keyword, weight] of sorted) {
37
+ if (lower.includes(keyword)) {
38
+ keywordScore = Math.max(keywordScore, weight);
39
+ break;
40
+ }
41
+ }
42
+
43
+ // Repo file count
44
+ let fileCount = 100;
45
+ try { fileCount = parseInt(execSync('git ls-files 2>/dev/null | wc -l').toString().trim(), 10) || 100; } catch {}
46
+
47
+ const sizeModifier = Math.min(fileCount / 500, 0.2);
48
+ const rawScore = Math.min(keywordScore + sizeModifier, 1.0);
49
+
50
+ let complexity = 'low';
51
+ if (rawScore >= 0.75) complexity = 'heavy';
52
+ else if (rawScore >= 0.55) complexity = 'medium';
53
+
54
+ const cost = COST_BY_COMPLEXITY[complexity];
55
+ return { complexity, cost, prompt: prompt.slice(0, 80) };
56
+ }
57
+
58
+ try {
59
+ // Read prompt from stdin (Claude Code pipes it)
60
+ const input = fs.readFileSync(0, 'utf-8').trim();
61
+ let prompt = '';
62
+
63
+ try {
64
+ const parsed = JSON.parse(input);
65
+ prompt = parsed.prompt || parsed.message || input;
66
+ } catch {
67
+ prompt = input;
68
+ }
69
+
70
+ if (!prompt) process.exit(0);
71
+
72
+ const estimate = scorePrompt(prompt);
73
+ estimate.timestamp = Date.now();
74
+
75
+ // Read config for mode
76
+ try {
77
+ const config = JSON.parse(fs.readFileSync(CONFIG_FILE, 'utf-8'));
78
+ estimate.mode = config.mode;
79
+ estimate.model = config.models?.claude_chain?.[0] || 'unknown';
80
+ estimate.budget = config.budget?.per_task_usd || 0.50;
81
+ } catch {
82
+ estimate.mode = 'api';
83
+ estimate.model = 'unknown';
84
+ estimate.budget = 0.50;
85
+ }
86
+
87
+ fs.mkdirSync(CACHE_DIR, { recursive: true });
88
+ fs.writeFileSync(ESTIMATE_FILE, JSON.stringify(estimate));
89
+ } catch (e) {
90
+ // Never block Claude Code — fail silently
91
+ process.exit(0);
92
+ }
@@ -0,0 +1,36 @@
1
+ #!/usr/bin/env node
2
+ /**
3
+ * meter — Statusline command for Claude Code
4
+ *
5
+ * Reads the latest estimation from ~/.meter/cache/latest-estimate.json
6
+ * and outputs a formatted status string for Claude Code's bottom bar.
7
+ */
8
+ const fs = require('fs');
9
+ const path = require('path');
10
+ const os = require('os');
11
+
12
+ const ESTIMATE_FILE = path.join(os.homedir(), '.meter', 'cache', 'latest-estimate.json');
13
+
14
+ try {
15
+ if (!fs.existsSync(ESTIMATE_FILE)) {
16
+ process.stdout.write('meter: ready');
17
+ process.exit(0);
18
+ }
19
+
20
+ const data = JSON.parse(fs.readFileSync(ESTIMATE_FILE, 'utf-8'));
21
+ const age = Date.now() - (data.timestamp || 0);
22
+
23
+ // If estimate is older than 10 minutes, show stale indicator
24
+ if (age > 600_000) {
25
+ process.stdout.write('meter: idle');
26
+ process.exit(0);
27
+ }
28
+
29
+ const cost = data.cost != null ? `~$${data.cost.toFixed(2)}` : '?';
30
+ const complexity = data.complexity || '?';
31
+ const prompt = (data.prompt || '').slice(0, 30);
32
+
33
+ process.stdout.write(`meter ${cost} ${complexity} │ ${prompt}`);
34
+ } catch {
35
+ process.stdout.write('meter: ready');
36
+ }