codex-meter 0.3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
codex_meter/cli.py ADDED
@@ -0,0 +1,2455 @@
1
+ from __future__ import annotations
2
+
3
+ import calendar
4
+ import csv
5
+ import datetime as dt
6
+ import html
7
+ import io
8
+ import json
9
+ import os
10
+ import re
11
+ import sqlite3
12
+ import subprocess
13
+ import urllib.request
14
+ from collections.abc import Callable
15
+ from contextlib import closing
16
+ from decimal import Decimal
17
+ from pathlib import Path
18
+ from typing import Annotated
19
+
20
+ import typer
21
+ from rich.console import Console
22
+ from rich.table import Table
23
+
24
+ from codex_meter import __version__
25
+ from codex_meter.aggregation import (
26
+ aggregate_daily,
27
+ aggregate_model_mode,
28
+ aggregate_monthly,
29
+ aggregate_projects,
30
+ aggregate_sessions,
31
+ aggregate_total,
32
+ aggregate_weekly,
33
+ )
34
+ from codex_meter.budgets import (
35
+ SEVERITY_BREACH,
36
+ SEVERITY_EXIT_CODE,
37
+ SEVERITY_WARN,
38
+ Budget,
39
+ BudgetAlert,
40
+ max_severity,
41
+ parse_budgets_table,
42
+ )
43
+ from codex_meter.budgets import (
44
+ evaluate as evaluate_budgets,
45
+ )
46
+ from codex_meter.config import build_options, load_config
47
+ from codex_meter.exporters import (
48
+ ReceiptInputs,
49
+ month_bounds,
50
+ render_grafana_dashboard,
51
+ render_receipt_html,
52
+ render_receipt_markdown,
53
+ )
54
+ from codex_meter.forecasts import project as project_forecast
55
+ from codex_meter.humanize import short_table_label
56
+ from codex_meter.insights import build_insights, insights_payload, render_insights_markdown
57
+ from codex_meter.intervals import Interval, parse_interval
58
+ from codex_meter.live import run_live
59
+ from codex_meter.models import (
60
+ Aggregate,
61
+ LoadResult,
62
+ RuntimeOptions,
63
+ decimal_string,
64
+ decimal_value,
65
+ )
66
+ from codex_meter.parse_cache import default_cache_path
67
+ from codex_meter.parser import load_usage
68
+ from codex_meter.pricing import (
69
+ MODEL_CARDS,
70
+ MODELS_BY_NAME,
71
+ PRICING_SOURCES,
72
+ RateCard,
73
+ normalize_model,
74
+ )
75
+ from codex_meter.render import format_int, pricing_status, pricing_warnings, render, render_limits
76
+ from codex_meter.timeutil import iso_z, local_timezone
77
+
78
+ app = typer.Typer(
79
+ help=(
80
+ "Local Codex usage intelligence for sessions, projects, models, cache, "
81
+ "and rate-limit windows."
82
+ ),
83
+ no_args_is_help=False,
84
+ )
85
+ console = Console()
86
+
87
+ OUTPUT_FORMATS = ("table", "json", "csv", "markdown")
88
+
89
+ SinceOpt = Annotated[
90
+ str | None,
91
+ typer.Option("--since", "-s", help="Start date/time. Supports YYYY-MM-DD, YYYYMMDD, or ISO."),
92
+ ]
93
+ UntilOpt = Annotated[
94
+ str | None, typer.Option("--until", "-u", help="End date/time. Defaults to now.")
95
+ ]
96
+ DaysOpt = Annotated[float | None, typer.Option("--days", help="Rolling day window before --until.")]
97
+ TimezoneOpt = Annotated[
98
+ str, typer.Option("--timezone", "-z", help="Timezone for grouping. Use local or an IANA name.")
99
+ ]
100
+ SessionRootOpt = Annotated[
101
+ Path | None, typer.Option("--session-root", help="Codex session JSONL root.")
102
+ ]
103
+ StateDbOpt = Annotated[Path | None, typer.Option("--state-db", help="Codex state_5.sqlite path.")]
104
+ CodexConfigOpt = Annotated[
105
+ Path | None, typer.Option("--codex-config", help="Codex config.toml path.")
106
+ ]
107
+ ConfigOpt = Annotated[Path | None, typer.Option("--config", help="codex-meter config TOML path.")]
108
+ PricingModeOpt = Annotated[str, typer.Option("--pricing-mode")]
109
+ ServiceTierOpt = Annotated[str, typer.Option("--service-tier")]
110
+ UnknownTierOpt = Annotated[str, typer.Option("--unknown-service-tier")]
111
+ TierOverridesOpt = Annotated[Path | None, typer.Option("--tier-overrides")]
112
+ RatesFileOpt = Annotated[Path | None, typer.Option("--rates-file")]
113
+ NoDedupeOpt = Annotated[bool, typer.Option("--no-dedupe")]
114
+ NoParseCacheOpt = Annotated[bool, typer.Option("--no-parse-cache")]
115
+ DefaultModelOpt = Annotated[str, typer.Option("--default-model")]
116
+ ShowPromptsOpt = Annotated[bool, typer.Option("--show-prompts")]
117
+ OfflineOpt = Annotated[bool, typer.Option("--offline/--no-offline")]
118
+ CompactOpt = Annotated[bool, typer.Option("--compact")]
119
+ WidthOpt = Annotated[int | None, typer.Option("--width", help="Table width override.")]
120
+ TopThreadsOpt = Annotated[int, typer.Option("--top", "--top-threads", help="Limit grouped rows.")]
121
+ FormatOpt = Annotated[str, typer.Option("--format", "-f", help="table, json, csv, or markdown.")]
122
+ OutputOpt = Annotated[Path | None, typer.Option("--output", help="Write output to a file.")]
123
+
124
+ RowsFn = Callable[[LoadResult, RuntimeOptions], list[Aggregate]]
125
+
126
+ OPTION_KEYS = (
127
+ "since",
128
+ "until",
129
+ "days",
130
+ "timezone",
131
+ "session_root",
132
+ "state_db",
133
+ "codex_config",
134
+ "config",
135
+ "pricing_mode",
136
+ "service_tier",
137
+ "unknown_service_tier",
138
+ "tier_overrides",
139
+ "rates_file",
140
+ "no_dedupe",
141
+ "no_parse_cache",
142
+ "default_model",
143
+ "show_prompts",
144
+ "offline",
145
+ "compact",
146
+ "width",
147
+ "top_threads",
148
+ )
149
+
150
+
151
+ def _exit_error(message: str) -> typer.Exit:
152
+ console.print(f"[red]error:[/red] {message}")
153
+ return typer.Exit(2)
154
+
155
+
156
+ def _json_default(value: object) -> float:
157
+ if isinstance(value, Decimal):
158
+ return float(value)
159
+ raise TypeError(f"Object of type {type(value).__name__} is not JSON serializable")
160
+
161
+
162
+ def _json_dumps(payload: object) -> str:
163
+ return json.dumps(payload, indent=2, default=_json_default)
164
+
165
+
166
+ def _amount_fields(name: str, value: object) -> dict[str, object]:
167
+ return {name: value, f"{name}_exact": decimal_string(value)}
168
+
169
+
170
+ def _validate_format(output_format: str) -> None:
171
+ if output_format not in OUTPUT_FORMATS:
172
+ raise _exit_error(f"--format must be one of: {', '.join(OUTPUT_FORMATS)}")
173
+
174
+
175
+ def _records_to_csv(records: list[dict]) -> str:
176
+ if not records:
177
+ return ""
178
+ out = io.StringIO()
179
+ fields = list(records[0].keys())
180
+ writer = csv.DictWriter(out, fieldnames=fields)
181
+ writer.writeheader()
182
+ writer.writerows(records)
183
+ return out.getvalue()
184
+
185
+
186
+ def _records_to_markdown(records: list[dict]) -> str:
187
+ if not records:
188
+ return "_No data._\n"
189
+ fields = list(records[0].keys())
190
+ lines = [
191
+ "| " + " | ".join(fields) + " |",
192
+ "| " + " | ".join(["---"] * len(fields)) + " |",
193
+ ]
194
+ for record in records:
195
+ lines.append(
196
+ "| "
197
+ + " | ".join(str(record.get(field, "")).replace("|", "\\|") for field in fields)
198
+ + " |"
199
+ )
200
+ return "\n".join(lines) + "\n"
201
+
202
+
203
+ def _options(values: dict) -> RuntimeOptions:
204
+ kwargs = {key: values[key] for key in OPTION_KEYS if key in values}
205
+ try:
206
+ return build_options(**kwargs)
207
+ except ValueError as exc:
208
+ raise _exit_error(str(exc)) from exc
209
+
210
+
211
+ def _run_grouped(name: str, rows_fn: RowsFn, values: dict) -> None:
212
+ _validate_format(values["output_format"])
213
+ options = _options(values)
214
+ result = load_usage(options)
215
+ rows = rows_fn(result, options)[: options.top_threads]
216
+ render(result, options, rows, name, values["output_format"], values["output"])
217
+
218
+
219
+ def version_callback(value: bool) -> None:
220
+ if value:
221
+ console.print(_version_label())
222
+ raise typer.Exit()
223
+
224
+
225
+ def _version_label() -> str:
226
+ checked = max(source.checked for source in PRICING_SOURCES)
227
+ try:
228
+ completed = subprocess.run(
229
+ ["git", "rev-parse", "--short", "HEAD"],
230
+ capture_output=True,
231
+ text=True,
232
+ timeout=1,
233
+ check=False,
234
+ )
235
+ except (FileNotFoundError, subprocess.TimeoutExpired, OSError):
236
+ sha = ""
237
+ else:
238
+ sha = completed.stdout.strip()
239
+ suffix = f", commit {sha}" if sha else ""
240
+ return f"{__version__} (rates checked {checked}{suffix})"
241
+
242
+
243
+ @app.callback(invoke_without_command=True)
244
+ def main(
245
+ ctx: typer.Context,
246
+ version: Annotated[
247
+ bool,
248
+ typer.Option("--version", "-v", callback=version_callback, help="Show version and exit."),
249
+ ] = False,
250
+ output_format: FormatOpt = "table",
251
+ output: OutputOpt = None,
252
+ session_root: SessionRootOpt = None,
253
+ state_db: StateDbOpt = None,
254
+ codex_config: CodexConfigOpt = None,
255
+ config: ConfigOpt = None,
256
+ pricing_mode: PricingModeOpt = "model",
257
+ service_tier: ServiceTierOpt = "auto",
258
+ unknown_service_tier: UnknownTierOpt = "current-config",
259
+ tier_overrides: TierOverridesOpt = None,
260
+ rates_file: RatesFileOpt = None,
261
+ no_dedupe: NoDedupeOpt = False,
262
+ no_parse_cache: NoParseCacheOpt = False,
263
+ default_model: DefaultModelOpt = "gpt-5.5",
264
+ compact: CompactOpt = False,
265
+ width: WidthOpt = None,
266
+ ) -> None:
267
+ if ctx.invoked_subcommand is None:
268
+ _run_overview(locals())
269
+
270
+
271
+ def _run_overview(values: dict) -> None:
272
+ output_format = values["output_format"]
273
+ _validate_format(output_format)
274
+ now = dt.datetime.now(tz=local_timezone())
275
+ option_values = {key: values[key] for key in OPTION_KEYS if key in values}
276
+ option_values["days"] = 90.0
277
+ option_values["until"] = iso_z(now)
278
+ try:
279
+ longest_options = build_options(**option_values)
280
+ except ValueError as exc:
281
+ raise _exit_error(str(exc)) from exc
282
+ longest_result = load_usage(longest_options)
283
+ rate_card = RateCard.load(longest_options.rates_file, longest_options.pricing_mode)
284
+ rows: list[Aggregate] = []
285
+ for days in (7, 30, 90):
286
+ start = now - dt.timedelta(days=days)
287
+ events = [event for event in longest_result.events if start <= event.timestamp < now]
288
+ window = LoadResult(
289
+ events=events,
290
+ duplicates=0,
291
+ tier_sources=longest_result.tier_sources,
292
+ plan_types=longest_result.plan_types,
293
+ credit_samples=[],
294
+ warnings=longest_result.warnings,
295
+ )
296
+ rows.append(
297
+ aggregate_total(window, longest_options, label=f"Last {days} days", rate_card=rate_card)
298
+ )
299
+ render(longest_result, longest_options, rows, "overview", output_format, values["output"])
300
+
301
+
302
+ @app.command()
303
+ def overview(
304
+ output_format: FormatOpt = "table",
305
+ output: OutputOpt = None,
306
+ session_root: SessionRootOpt = None,
307
+ state_db: StateDbOpt = None,
308
+ codex_config: CodexConfigOpt = None,
309
+ config: ConfigOpt = None,
310
+ pricing_mode: PricingModeOpt = "model",
311
+ service_tier: ServiceTierOpt = "auto",
312
+ unknown_service_tier: UnknownTierOpt = "current-config",
313
+ tier_overrides: TierOverridesOpt = None,
314
+ rates_file: RatesFileOpt = None,
315
+ no_dedupe: NoDedupeOpt = False,
316
+ no_parse_cache: NoParseCacheOpt = False,
317
+ default_model: DefaultModelOpt = "gpt-5.5",
318
+ compact: CompactOpt = False,
319
+ width: WidthOpt = None,
320
+ ) -> None:
321
+ """Show rolling 7/30/90 day usage."""
322
+ _run_overview(locals())
323
+
324
+
325
+ @app.command()
326
+ def daily(
327
+ since: SinceOpt = None,
328
+ until: UntilOpt = None,
329
+ days: DaysOpt = None,
330
+ timezone: TimezoneOpt = "local",
331
+ output_format: FormatOpt = "table",
332
+ output: OutputOpt = None,
333
+ session_root: SessionRootOpt = None,
334
+ state_db: StateDbOpt = None,
335
+ codex_config: CodexConfigOpt = None,
336
+ config: ConfigOpt = None,
337
+ pricing_mode: PricingModeOpt = "model",
338
+ service_tier: ServiceTierOpt = "auto",
339
+ unknown_service_tier: UnknownTierOpt = "current-config",
340
+ tier_overrides: TierOverridesOpt = None,
341
+ rates_file: RatesFileOpt = None,
342
+ no_dedupe: NoDedupeOpt = False,
343
+ no_parse_cache: NoParseCacheOpt = False,
344
+ default_model: DefaultModelOpt = "gpt-5.5",
345
+ show_prompts: ShowPromptsOpt = False,
346
+ offline: OfflineOpt = True,
347
+ compact: CompactOpt = False,
348
+ width: WidthOpt = None,
349
+ top_threads: TopThreadsOpt = 10,
350
+ ) -> None:
351
+ """Show usage grouped by day."""
352
+ _run_grouped("daily", aggregate_daily, locals())
353
+
354
+
355
+ @app.command()
356
+ def weekly(
357
+ since: SinceOpt = None,
358
+ until: UntilOpt = None,
359
+ days: DaysOpt = None,
360
+ timezone: TimezoneOpt = "local",
361
+ output_format: FormatOpt = "table",
362
+ output: OutputOpt = None,
363
+ session_root: SessionRootOpt = None,
364
+ state_db: StateDbOpt = None,
365
+ codex_config: CodexConfigOpt = None,
366
+ config: ConfigOpt = None,
367
+ pricing_mode: PricingModeOpt = "model",
368
+ service_tier: ServiceTierOpt = "auto",
369
+ unknown_service_tier: UnknownTierOpt = "current-config",
370
+ tier_overrides: TierOverridesOpt = None,
371
+ rates_file: RatesFileOpt = None,
372
+ no_dedupe: NoDedupeOpt = False,
373
+ no_parse_cache: NoParseCacheOpt = False,
374
+ default_model: DefaultModelOpt = "gpt-5.5",
375
+ show_prompts: ShowPromptsOpt = False,
376
+ offline: OfflineOpt = True,
377
+ compact: CompactOpt = False,
378
+ width: WidthOpt = None,
379
+ top_threads: TopThreadsOpt = 10,
380
+ ) -> None:
381
+ """Show usage grouped by ISO week."""
382
+ _run_grouped("weekly", aggregate_weekly, locals())
383
+
384
+
385
+ @app.command()
386
+ def monthly(
387
+ since: SinceOpt = None,
388
+ until: UntilOpt = None,
389
+ days: DaysOpt = None,
390
+ timezone: TimezoneOpt = "local",
391
+ output_format: FormatOpt = "table",
392
+ output: OutputOpt = None,
393
+ session_root: SessionRootOpt = None,
394
+ state_db: StateDbOpt = None,
395
+ codex_config: CodexConfigOpt = None,
396
+ config: ConfigOpt = None,
397
+ pricing_mode: PricingModeOpt = "model",
398
+ service_tier: ServiceTierOpt = "auto",
399
+ unknown_service_tier: UnknownTierOpt = "current-config",
400
+ tier_overrides: TierOverridesOpt = None,
401
+ rates_file: RatesFileOpt = None,
402
+ no_dedupe: NoDedupeOpt = False,
403
+ no_parse_cache: NoParseCacheOpt = False,
404
+ default_model: DefaultModelOpt = "gpt-5.5",
405
+ show_prompts: ShowPromptsOpt = False,
406
+ offline: OfflineOpt = True,
407
+ compact: CompactOpt = False,
408
+ width: WidthOpt = None,
409
+ top_threads: TopThreadsOpt = 10,
410
+ ) -> None:
411
+ """Show usage grouped by month."""
412
+ _run_grouped("monthly", aggregate_monthly, locals())
413
+
414
+
415
+ @app.command(name="session")
416
+ def session_command(
417
+ since: SinceOpt = None,
418
+ until: UntilOpt = None,
419
+ days: DaysOpt = None,
420
+ timezone: TimezoneOpt = "local",
421
+ output_format: FormatOpt = "table",
422
+ output: OutputOpt = None,
423
+ session_root: SessionRootOpt = None,
424
+ state_db: StateDbOpt = None,
425
+ codex_config: CodexConfigOpt = None,
426
+ config: ConfigOpt = None,
427
+ pricing_mode: PricingModeOpt = "model",
428
+ service_tier: ServiceTierOpt = "auto",
429
+ unknown_service_tier: UnknownTierOpt = "current-config",
430
+ tier_overrides: TierOverridesOpt = None,
431
+ rates_file: RatesFileOpt = None,
432
+ no_dedupe: NoDedupeOpt = False,
433
+ no_parse_cache: NoParseCacheOpt = False,
434
+ default_model: DefaultModelOpt = "gpt-5.5",
435
+ show_prompts: ShowPromptsOpt = False,
436
+ offline: OfflineOpt = True,
437
+ compact: CompactOpt = False,
438
+ width: WidthOpt = None,
439
+ top_threads: TopThreadsOpt = 10,
440
+ ) -> None:
441
+ """Show usage grouped by Codex session."""
442
+ _run_grouped("session", aggregate_sessions, locals())
443
+
444
+
445
+ @app.command()
446
+ def project(
447
+ since: SinceOpt = None,
448
+ until: UntilOpt = None,
449
+ days: DaysOpt = None,
450
+ timezone: TimezoneOpt = "local",
451
+ output_format: FormatOpt = "table",
452
+ output: OutputOpt = None,
453
+ session_root: SessionRootOpt = None,
454
+ state_db: StateDbOpt = None,
455
+ codex_config: CodexConfigOpt = None,
456
+ config: ConfigOpt = None,
457
+ pricing_mode: PricingModeOpt = "model",
458
+ service_tier: ServiceTierOpt = "auto",
459
+ unknown_service_tier: UnknownTierOpt = "current-config",
460
+ tier_overrides: TierOverridesOpt = None,
461
+ rates_file: RatesFileOpt = None,
462
+ no_dedupe: NoDedupeOpt = False,
463
+ no_parse_cache: NoParseCacheOpt = False,
464
+ default_model: DefaultModelOpt = "gpt-5.5",
465
+ show_prompts: ShowPromptsOpt = False,
466
+ offline: OfflineOpt = True,
467
+ compact: CompactOpt = False,
468
+ width: WidthOpt = None,
469
+ top_threads: TopThreadsOpt = 10,
470
+ ) -> None:
471
+ """Show usage grouped by project/cwd."""
472
+ _run_grouped("project", aggregate_projects, locals())
473
+
474
+
475
+ @app.command()
476
+ def models(
477
+ since: SinceOpt = None,
478
+ until: UntilOpt = None,
479
+ days: DaysOpt = None,
480
+ timezone: TimezoneOpt = "local",
481
+ output_format: FormatOpt = "table",
482
+ output: OutputOpt = None,
483
+ session_root: SessionRootOpt = None,
484
+ state_db: StateDbOpt = None,
485
+ codex_config: CodexConfigOpt = None,
486
+ config: ConfigOpt = None,
487
+ pricing_mode: PricingModeOpt = "model",
488
+ service_tier: ServiceTierOpt = "auto",
489
+ unknown_service_tier: UnknownTierOpt = "current-config",
490
+ tier_overrides: TierOverridesOpt = None,
491
+ rates_file: RatesFileOpt = None,
492
+ no_dedupe: NoDedupeOpt = False,
493
+ no_parse_cache: NoParseCacheOpt = False,
494
+ default_model: DefaultModelOpt = "gpt-5.5",
495
+ show_prompts: ShowPromptsOpt = False,
496
+ offline: OfflineOpt = True,
497
+ compact: CompactOpt = False,
498
+ width: WidthOpt = None,
499
+ top_threads: TopThreadsOpt = 10,
500
+ ) -> None:
501
+ """Show usage grouped by model and service tier."""
502
+ _run_grouped("models", aggregate_model_mode, locals())
503
+
504
+
505
+ @app.command()
506
+ def limits(
507
+ since: SinceOpt = None,
508
+ until: UntilOpt = None,
509
+ days: DaysOpt = 7,
510
+ timezone: TimezoneOpt = "local",
511
+ output_format: FormatOpt = "table",
512
+ output: OutputOpt = None,
513
+ session_root: SessionRootOpt = None,
514
+ state_db: StateDbOpt = None,
515
+ codex_config: CodexConfigOpt = None,
516
+ config: ConfigOpt = None,
517
+ pricing_mode: PricingModeOpt = "model",
518
+ service_tier: ServiceTierOpt = "auto",
519
+ unknown_service_tier: UnknownTierOpt = "current-config",
520
+ tier_overrides: TierOverridesOpt = None,
521
+ rates_file: RatesFileOpt = None,
522
+ no_dedupe: NoDedupeOpt = False,
523
+ no_parse_cache: NoParseCacheOpt = False,
524
+ default_model: DefaultModelOpt = "gpt-5.5",
525
+ show_prompts: ShowPromptsOpt = False,
526
+ offline: OfflineOpt = True,
527
+ compact: CompactOpt = False,
528
+ width: WidthOpt = None,
529
+ top_threads: TopThreadsOpt = 10,
530
+ ) -> None:
531
+ """Show recent rate-limit and credit samples."""
532
+ _validate_format(output_format)
533
+ options = _options(locals())
534
+ result = load_usage(options)
535
+ render_limits(result, options, output_format, output)
536
+
537
+
538
+ @app.command()
539
+ def insights(
540
+ since: SinceOpt = None,
541
+ until: UntilOpt = None,
542
+ days: DaysOpt = None,
543
+ timezone: TimezoneOpt = "local",
544
+ output_format: FormatOpt = "table",
545
+ output: OutputOpt = None,
546
+ session_root: SessionRootOpt = None,
547
+ state_db: StateDbOpt = None,
548
+ codex_config: CodexConfigOpt = None,
549
+ config: ConfigOpt = None,
550
+ pricing_mode: PricingModeOpt = "model",
551
+ service_tier: ServiceTierOpt = "auto",
552
+ unknown_service_tier: UnknownTierOpt = "current-config",
553
+ tier_overrides: TierOverridesOpt = None,
554
+ rates_file: RatesFileOpt = None,
555
+ no_dedupe: NoDedupeOpt = False,
556
+ no_parse_cache: NoParseCacheOpt = False,
557
+ default_model: DefaultModelOpt = "gpt-5.5",
558
+ show_prompts: ShowPromptsOpt = False,
559
+ offline: OfflineOpt = True,
560
+ compact: CompactOpt = False,
561
+ width: WidthOpt = None,
562
+ top_threads: TopThreadsOpt = 10,
563
+ ) -> None:
564
+ """Surface usage patterns, cache behavior, and project signals."""
565
+ if output_format not in {"table", "json", "markdown"}:
566
+ raise _exit_error("--format must be one of: table, json, markdown")
567
+ options = _options(locals())
568
+ result = load_usage(options)
569
+ card = RateCard.load(options.rates_file, options.pricing_mode)
570
+ items = build_insights(result, options, rate_card=card)[: options.top_threads]
571
+ if output_format == "json":
572
+ text = _json_dumps(insights_payload(items)) + "\n"
573
+ elif output_format == "markdown":
574
+ text = render_insights_markdown(items)
575
+ else:
576
+ text = _render_insights_table(items)
577
+ if output:
578
+ output.expanduser().write_text(text)
579
+ else:
580
+ typer.echo(text, nl=False)
581
+
582
+
583
+ def _render_insights_table(items) -> str:
584
+ buffer = io.StringIO()
585
+ local_console = Console(file=buffer, width=120, _environ={})
586
+ local_console.print("[bold]Codex Meter - Insights[/bold]")
587
+ if not items:
588
+ local_console.print("No insights for this window.")
589
+ return buffer.getvalue()
590
+ table = Table(show_lines=False, expand=True)
591
+ table.add_column("Severity")
592
+ table.add_column("Insight")
593
+ table.add_column("Detail")
594
+ table.add_column("Action")
595
+ for item in items:
596
+ table.add_row(item.severity, item.title, item.detail, item.action)
597
+ local_console.print(table)
598
+ return buffer.getvalue()
599
+
600
+
601
+ @app.command()
602
+ def tail(
603
+ n: Annotated[int, typer.Option("--n", min=1, help="Number of recent records.")] = 20,
604
+ by: Annotated[str, typer.Option("--by", help="event or session.")] = "event",
605
+ since: SinceOpt = None,
606
+ until: UntilOpt = None,
607
+ days: DaysOpt = None,
608
+ timezone: TimezoneOpt = "local",
609
+ output_format: FormatOpt = "table",
610
+ output: OutputOpt = None,
611
+ session_root: SessionRootOpt = None,
612
+ state_db: StateDbOpt = None,
613
+ codex_config: CodexConfigOpt = None,
614
+ config: ConfigOpt = None,
615
+ pricing_mode: PricingModeOpt = "model",
616
+ service_tier: ServiceTierOpt = "auto",
617
+ unknown_service_tier: UnknownTierOpt = "current-config",
618
+ tier_overrides: TierOverridesOpt = None,
619
+ rates_file: RatesFileOpt = None,
620
+ no_dedupe: NoDedupeOpt = False,
621
+ no_parse_cache: NoParseCacheOpt = False,
622
+ default_model: DefaultModelOpt = "gpt-5.5",
623
+ show_prompts: ShowPromptsOpt = False,
624
+ offline: OfflineOpt = True,
625
+ compact: CompactOpt = False,
626
+ width: WidthOpt = None,
627
+ top_threads: TopThreadsOpt = 10,
628
+ ) -> None:
629
+ """Show the most recent usage events or sessions."""
630
+ del top_threads
631
+ if by not in {"event", "session"}:
632
+ raise _exit_error("--by must be one of: event, session")
633
+ if output_format not in {"table", "json", "csv"}:
634
+ raise _exit_error("--format must be one of: table, json, csv")
635
+ options = _options(locals() | {"top_threads": n})
636
+ result = load_usage(options)
637
+ rows = _recent_tail_rows(result, n, by)
638
+ if output_format == "json":
639
+ text = _json_dumps({"by": by, f"{by}s": rows}) + "\n"
640
+ elif output_format == "csv":
641
+ text = _tail_csv(rows)
642
+ else:
643
+ text = _tail_table(rows, by, options)
644
+ if output:
645
+ output.expanduser().write_text(text)
646
+ else:
647
+ typer.echo(text, nl=False)
648
+
649
+
650
+ def _recent_tail_rows(result: LoadResult, n: int, by: str) -> list[dict]:
651
+ events = sorted(result.events, key=lambda event: event.timestamp, reverse=True)
652
+ if by == "event":
653
+ return [_tail_event_dict(event) for event in events[:n]]
654
+ seen: set[str] = set()
655
+ rows: list[dict] = []
656
+ for event in events:
657
+ if event.session_id in seen:
658
+ continue
659
+ seen.add(event.session_id)
660
+ item = _tail_event_dict(event)
661
+ item["label"] = event.thread.title or event.thread.first_user_message or event.session_id
662
+ rows.append(item)
663
+ if len(rows) >= n:
664
+ break
665
+ return rows
666
+
667
+
668
+ def _tail_event_dict(event) -> dict:
669
+ return {
670
+ "timestamp": iso_z(event.timestamp),
671
+ "session_id": event.session_id,
672
+ "model": event.model,
673
+ "service_tier": event.service_tier,
674
+ "input_tokens": event.usage.input_tokens,
675
+ "cached_input_tokens": event.usage.cached_input_tokens,
676
+ "output_tokens": event.usage.output_tokens,
677
+ "reasoning_output_tokens": event.usage.reasoning_output_tokens,
678
+ "total_tokens": event.usage.total_tokens,
679
+ "project": event.thread.cwd,
680
+ }
681
+
682
+
683
+ def _tail_csv(rows: list[dict]) -> str:
684
+ if not rows:
685
+ return ""
686
+ out = io.StringIO()
687
+ writer = csv.DictWriter(out, fieldnames=list(rows[0].keys()))
688
+ writer.writeheader()
689
+ writer.writerows(rows)
690
+ return out.getvalue()
691
+
692
+
693
+ def _tail_table(rows: list[dict], by: str, options: RuntimeOptions) -> str:
694
+ buffer = io.StringIO()
695
+ local_console = Console(file=buffer, width=options.width or 120, _environ={})
696
+ local_console.print(f"[bold]Codex Meter - Recent {by.title()}s[/bold]")
697
+ table = Table(show_lines=False, expand=True)
698
+ table.add_column("Time")
699
+ table.add_column("Model")
700
+ table.add_column("Tier")
701
+ table.add_column("Tokens", justify="right")
702
+ table.add_column("Project")
703
+ for row in rows:
704
+ table.add_row(
705
+ row["timestamp"],
706
+ row["model"],
707
+ row["service_tier"],
708
+ format_int(row["total_tokens"]),
709
+ row.get("project") or "-",
710
+ )
711
+ local_console.print(table)
712
+ return buffer.getvalue()
713
+
714
+
715
+ _DOCTOR_STATUS_STYLES = {"ok": "green", "warn": "yellow", "fail": "red"}
716
+ _DOCTOR_EXIT_CODES = {"ok": 0, "warn": 1, "fail": 2}
717
+
718
+
719
+ def _doctor_check(label: str, status: str, detail: str) -> tuple[str, str, str]:
720
+ return label, status, detail
721
+
722
+
723
+ def _check_codex_cli_version() -> tuple[str, str, str]:
724
+ import subprocess
725
+
726
+ try:
727
+ completed = subprocess.run( # noqa: S603 — codex on PATH, no shell.
728
+ ["codex", "--version"],
729
+ capture_output=True,
730
+ text=True,
731
+ timeout=2,
732
+ check=False,
733
+ )
734
+ except FileNotFoundError:
735
+ return _doctor_check("Codex CLI", "warn", "not found on PATH; install Codex for live data.")
736
+ except (subprocess.TimeoutExpired, OSError) as exc:
737
+ return _doctor_check("Codex CLI", "warn", f"could not invoke (`{exc}`)")
738
+ version = (completed.stdout or completed.stderr or "").strip().splitlines()[0:1]
739
+ return _doctor_check("Codex CLI", "ok", version[0] if version else "found")
740
+
741
+
742
+ def _check_clock_skew(events) -> tuple[str, str, str]:
743
+ if not events:
744
+ return _doctor_check("Clock", "ok", "no events to compare")
745
+ latest = max(event.timestamp for event in events)
746
+ now = dt.datetime.now(tz=dt.UTC)
747
+ skew = (latest - now).total_seconds()
748
+ detail = _clock_skew_detail(skew)
749
+ if abs(skew) <= 300:
750
+ return _doctor_check("Clock", "ok", f"latest event {detail}")
751
+ if abs(skew) <= 86400:
752
+ return _doctor_check("Clock", "warn", f"latest event {detail}")
753
+ return _doctor_check("Clock", "fail", f"latest event {detail}")
754
+
755
+
756
+ def _clock_skew_detail(skew_seconds: float) -> str:
757
+ seconds = int(abs(skew_seconds))
758
+ if seconds < 60:
759
+ amount = f"{seconds}s"
760
+ else:
761
+ minutes, rem_seconds = divmod(seconds, 60)
762
+ hours, minutes = divmod(minutes, 60)
763
+ amount = f"{hours}h {minutes}m" if hours else f"{minutes}m {rem_seconds}s"
764
+ if skew_seconds > 0:
765
+ return f"{amount} in the future"
766
+ return f"{amount} ago"
767
+
768
+
769
+ def _check_rate_card_age() -> tuple[str, str, str]:
770
+ age = _rate_card_age_days()
771
+ if age <= 30:
772
+ return _doctor_check("Rate card", "ok", f"checked {age} days ago")
773
+ if age <= 90:
774
+ return _doctor_check("Rate card", "warn", f"checked {age} days ago")
775
+ return _doctor_check(
776
+ "Rate card",
777
+ "fail",
778
+ f"checked {age} days ago — run `codex-meter rates show` and consider updating.",
779
+ )
780
+
781
+
782
+ def _check_state_db_readable(path: Path) -> tuple[str, str, str]:
783
+ if not path.exists():
784
+ return _doctor_check("State DB readable", "warn", "state DB is missing")
785
+ try:
786
+ with closing(sqlite3.connect(f"file:{path}?mode=ro", uri=True)) as conn:
787
+ conn.execute("select count(*) from sqlite_master").fetchone()
788
+ except sqlite3.Error as exc:
789
+ return _doctor_check("State DB readable", "warn", f"could not open read-only: {exc}")
790
+ return _doctor_check("State DB readable", "ok", "read-only open succeeded")
791
+
792
+
793
+ def _check_rates_file(path: Path | None) -> tuple[str, str, str]:
794
+ if path is None:
795
+ return _doctor_check("Rates file", "ok", "using embedded rate card")
796
+ try:
797
+ RateCard.load(path)
798
+ except ValueError as exc:
799
+ return _doctor_check("Rates file", "fail", str(exc))
800
+ return _doctor_check("Rates file", "ok", str(path))
801
+
802
+
803
+ def _check_python_version() -> tuple[str, str, str]:
804
+ import sys
805
+
806
+ info = sys.version_info
807
+ if info < (3, 11):
808
+ return _doctor_check("Python", "fail", f"{info.major}.{info.minor} — requires >= 3.11")
809
+ return _doctor_check("Python", "ok", f"{info.major}.{info.minor}.{info.micro}")
810
+
811
+
812
+ @app.command()
813
+ def doctor(
814
+ session_root: SessionRootOpt = None,
815
+ state_db: StateDbOpt = None,
816
+ codex_config: CodexConfigOpt = None,
817
+ config: ConfigOpt = None,
818
+ rates_file: RatesFileOpt = None,
819
+ output_format: Annotated[
820
+ str,
821
+ typer.Option("--format", "-f", help="table, json, csv, or markdown."),
822
+ ] = "table",
823
+ ) -> None:
824
+ """Check local Codex data paths, rate-card age, clock skew, and tooling."""
825
+ _validate_format(output_format)
826
+ try:
827
+ options = build_options(
828
+ days=7.0,
829
+ session_root=session_root,
830
+ state_db=state_db,
831
+ codex_config=codex_config,
832
+ config=config,
833
+ rates_file=rates_file,
834
+ )
835
+ except ValueError as exc:
836
+ raise _exit_error(str(exc)) from exc
837
+
838
+ files = list(options.session_root.glob("**/*.jsonl")) if options.session_root.exists() else []
839
+ result = load_usage(options) if files else None
840
+ checks: list[tuple[str, str, str]] = []
841
+ checks.append(_check_python_version())
842
+ checks.append(
843
+ _doctor_check(
844
+ "Session root",
845
+ "ok" if options.session_root.exists() else "fail",
846
+ f"{options.session_root} ({format_int(len(files))} JSONL files)",
847
+ )
848
+ )
849
+ checks.append(
850
+ _doctor_check(
851
+ "State DB",
852
+ "ok" if options.state_db.exists() else "warn",
853
+ str(options.state_db),
854
+ )
855
+ )
856
+ checks.append(_check_state_db_readable(options.state_db))
857
+ checks.append(
858
+ _doctor_check(
859
+ "Codex config",
860
+ "ok" if options.config_path.exists() else "warn",
861
+ str(options.config_path),
862
+ )
863
+ )
864
+ checks.append(_check_codex_cli_version())
865
+ checks.append(_check_rate_card_age())
866
+ checks.append(_check_rates_file(options.rates_file))
867
+ checks.append(_doctor_check("Parse cache", "ok", str(default_cache_path())))
868
+ checks.append(_check_clock_skew(result.events if result else []))
869
+ if result:
870
+ checks.append(
871
+ _doctor_check("Events loaded", "ok", f"{len(result.events):,} in last 7 days")
872
+ )
873
+ if result.events:
874
+ inferred = result.tier_sources.get("assumed", 0) + result.tier_sources.get(
875
+ "current-config", 0
876
+ )
877
+ if inferred == len(result.events):
878
+ checks.append(
879
+ _doctor_check(
880
+ "Tier coverage",
881
+ "warn",
882
+ "No events recorded a tier; pin one with --service-tier "
883
+ "or --tier-overrides.",
884
+ )
885
+ )
886
+ if result.warnings:
887
+ for warning in result.warnings:
888
+ checks.append(_doctor_check("Parser warning", "warn", warning))
889
+
890
+ worst = "ok"
891
+ severity_rank = {"ok": 0, "warn": 1, "fail": 2}
892
+ for _, status, _detail in checks:
893
+ if severity_rank[status] > severity_rank[worst]:
894
+ worst = status
895
+
896
+ if output_format == "json":
897
+ typer.echo(
898
+ _json_dumps(
899
+ {
900
+ "checks": [
901
+ {"label": label, "status": status, "detail": detail}
902
+ for label, status, detail in checks
903
+ ],
904
+ "worst": worst,
905
+ }
906
+ )
907
+ )
908
+ raise typer.Exit(_DOCTOR_EXIT_CODES[worst])
909
+
910
+ if output_format == "csv":
911
+ check_records = [
912
+ {"label": label, "status": status, "detail": detail} for label, status, detail in checks
913
+ ]
914
+ typer.echo(
915
+ _records_to_csv(check_records),
916
+ nl=False,
917
+ )
918
+ raise typer.Exit(_DOCTOR_EXIT_CODES[worst])
919
+
920
+ if output_format == "markdown":
921
+ check_records = [
922
+ {"label": label, "status": status, "detail": detail} for label, status, detail in checks
923
+ ]
924
+ typer.echo(
925
+ _records_to_markdown(check_records),
926
+ nl=False,
927
+ )
928
+ raise typer.Exit(_DOCTOR_EXIT_CODES[worst])
929
+
930
+ console.print("[bold]Codex Meter - Doctor[/bold]")
931
+ table = Table(show_lines=False)
932
+ table.add_column("Check")
933
+ table.add_column("Status")
934
+ table.add_column("Detail")
935
+ for label, status, detail in checks:
936
+ style = _DOCTOR_STATUS_STYLES[status]
937
+ table.add_row(label, f"[{style}]{status.upper()}[/{style}]", detail)
938
+ console.print(table)
939
+ console.print(
940
+ f"Overall: [{_DOCTOR_STATUS_STYLES[worst]}]{worst.upper()}[/{_DOCTOR_STATUS_STYLES[worst]}]"
941
+ )
942
+ raise typer.Exit(_DOCTOR_EXIT_CODES[worst])
943
+
944
+
945
+ @app.command()
946
+ def init(
947
+ path: Annotated[
948
+ Path,
949
+ typer.Option("--path", help="Target file path."),
950
+ ] = Path(".codex-meter.toml"),
951
+ force: Annotated[
952
+ bool,
953
+ typer.Option("--force", help="Overwrite an existing config."),
954
+ ] = False,
955
+ ) -> None:
956
+ """Scaffold a .codex-meter.toml config with commented defaults."""
957
+ if path.exists() and not force:
958
+ raise _exit_error(f"{path} already exists. Pass --force to overwrite.")
959
+ template = (
960
+ "# codex-meter configuration.\n"
961
+ "# Tip: every key here can also be set per-invocation via a CLI flag.\n"
962
+ "\n"
963
+ "# Default rolling window for `codex-meter` (days).\n"
964
+ "default_days = 30\n"
965
+ '# timezone = "local"\n'
966
+ "\n"
967
+ "# Local Codex data paths.\n"
968
+ '# session_root = "~/.codex/sessions"\n'
969
+ '# state_db = "~/.codex/state_5.sqlite"\n'
970
+ '# codex_config = "~/.codex/config.toml"\n'
971
+ "\n"
972
+ "# Pricing behavior. model = per-model card; flat = default fallback rates.\n"
973
+ '# pricing_mode = "model"\n'
974
+ '# rates_file = "./rates.json"\n'
975
+ "\n"
976
+ "# Service-tier inference. auto = use precedence chain.\n"
977
+ 'service_tier = "auto"\n'
978
+ 'unknown_service_tier = "current-config"\n'
979
+ '# tier_overrides = "./tier-overrides.json"\n'
980
+ "\n"
981
+ "# Default model when none is recorded.\n"
982
+ 'default_model = "gpt-5.5"\n'
983
+ "\n"
984
+ "# Output and privacy defaults.\n"
985
+ "# show_prompts = false\n"
986
+ "# offline = true\n"
987
+ "# compact = false\n"
988
+ "# width = 140\n"
989
+ "# top_threads = 10\n"
990
+ "# no_dedupe = false\n"
991
+ "# no_parse_cache = false\n"
992
+ "\n"
993
+ "# Optional budgets — used by `codex-meter budgets check`.\n"
994
+ "# Severity: ok < 80%, warn at 80%, breach at 100%.\n"
995
+ "[budgets]\n"
996
+ "# daily_credits = 25000\n"
997
+ "# weekly_credits = 100000\n"
998
+ "# monthly_credits = 400000\n"
999
+ "# weekly_api_dollars = 50.0\n"
1000
+ "\n"
1001
+ "# Or use the nested form to set warn_at per period:\n"
1002
+ "# [budgets.monthly]\n"
1003
+ "# credits = 500000\n"
1004
+ "# warn_at = 0.7\n"
1005
+ )
1006
+ path.write_text(template)
1007
+ console.print(f"[green]Wrote[/green] {path}")
1008
+ console.print("[dim]Edit it, then run `codex-meter budgets check` to verify.[/dim]")
1009
+
1010
+
1011
+ rates_app = typer.Typer(help="Inspect and manage the embedded Codex rate card.")
1012
+ app.add_typer(rates_app, name="rates")
1013
+
1014
+
1015
+ def _rate_card_age_days() -> int:
1016
+ today = dt.date.today()
1017
+ ages: list[int] = []
1018
+ for source in PRICING_SOURCES:
1019
+ try:
1020
+ checked = dt.date.fromisoformat(source.checked)
1021
+ except ValueError:
1022
+ continue
1023
+ ages.append((today - checked).days)
1024
+ return max(ages) if ages else 0
1025
+
1026
+
1027
+ def _format_rates_label(rates: object) -> str:
1028
+ if rates is None:
1029
+ return "—"
1030
+ return (
1031
+ f"in={rates.input:g} cached={rates.cached_input:g} "
1032
+ f"out={rates.output:g} reason={rates.effective_reasoning_output:g}"
1033
+ )
1034
+
1035
+
1036
+ @rates_app.command("show")
1037
+ def rates_show(
1038
+ output_format: Annotated[
1039
+ str,
1040
+ typer.Option("--format", "-f", help="table, json, csv, or markdown."),
1041
+ ] = "table",
1042
+ ) -> None:
1043
+ """Show the active rate card, sources, and age."""
1044
+ _validate_format(output_format)
1045
+
1046
+ age = _rate_card_age_days()
1047
+ stale = age > 90
1048
+
1049
+ payload = {
1050
+ "checked": [
1051
+ {"name": source.name, "url": source.url, "checked": source.checked}
1052
+ for source in PRICING_SOURCES
1053
+ ],
1054
+ "age_days": age,
1055
+ "stale": stale,
1056
+ "models": [
1057
+ {
1058
+ "name": card.name,
1059
+ "api": _rates_payload(card.api_rates),
1060
+ "credits": _rates_payload(card.credit_rates),
1061
+ "fast_multiplier": card.fast_multiplier,
1062
+ "long_context": (
1063
+ {
1064
+ "threshold": card.long_context.threshold,
1065
+ "input_mult": card.long_context.input_mult,
1066
+ "output_mult": card.long_context.output_mult,
1067
+ }
1068
+ if card.long_context
1069
+ else None
1070
+ ),
1071
+ }
1072
+ for card in MODEL_CARDS
1073
+ ],
1074
+ }
1075
+ if output_format == "json":
1076
+ typer.echo(_json_dumps(payload))
1077
+ return
1078
+ if output_format in {"csv", "markdown"}:
1079
+ records = [
1080
+ {
1081
+ "model": card["name"],
1082
+ "fast_multiplier": card["fast_multiplier"],
1083
+ "api_input": (card["api"] or {}).get("input", ""),
1084
+ "credits_input": (card["credits"] or {}).get("input", ""),
1085
+ }
1086
+ for card in payload["models"]
1087
+ ]
1088
+ text = _records_to_csv(records) if output_format == "csv" else _records_to_markdown(records)
1089
+ typer.echo(text, nl=False)
1090
+ return
1091
+
1092
+ console.print("[bold]Codex Meter - Rate Card[/bold]")
1093
+ console.print(f"Age: {age} days{' [red](stale; consider refreshing)[/red]' if stale else ''}")
1094
+ for source in PRICING_SOURCES:
1095
+ console.print(f"- {source.name} (checked {source.checked}) — {source.url}")
1096
+ console.print()
1097
+
1098
+ table = Table(title="Models")
1099
+ table.add_column("Model")
1100
+ table.add_column("Fast×", justify="right")
1101
+ table.add_column("Long context")
1102
+ table.add_column("API ($/M)")
1103
+ table.add_column("Credits (/M)")
1104
+ for card in MODEL_CARDS:
1105
+ long_ctx = (
1106
+ f">{format_int(card.long_context.threshold)} → "
1107
+ f"in×{card.long_context.input_mult:g}, out×{card.long_context.output_mult:g}"
1108
+ if card.long_context
1109
+ else "—"
1110
+ )
1111
+ table.add_row(
1112
+ card.name,
1113
+ f"{card.fast_multiplier:g}",
1114
+ long_ctx,
1115
+ _format_rates_label(card.api_rates),
1116
+ _format_rates_label(card.credit_rates),
1117
+ )
1118
+ console.print(table)
1119
+
1120
+
1121
+ @rates_app.command("refresh")
1122
+ def rates_refresh(
1123
+ allow_network: Annotated[
1124
+ bool,
1125
+ typer.Option("--allow-network", help="Fetch pricing pages over the network."),
1126
+ ] = False,
1127
+ output: OutputOpt = None,
1128
+ ) -> None:
1129
+ """Refresh a local fetched rate-card snapshot. Offline unless --allow-network is set."""
1130
+ if not allow_network:
1131
+ raise _exit_error(
1132
+ "rates refresh needs --allow-network. The default path stays offline; pass "
1133
+ "--allow-network to fetch an audit snapshot, or use --rates-file to override locally."
1134
+ )
1135
+ target = output or _fetched_rates_path()
1136
+ payload = _fetch_rate_sources()
1137
+ target.expanduser().parent.mkdir(parents=True, exist_ok=True)
1138
+ target.expanduser().write_text(_json_dumps(payload) + "\n")
1139
+ console.print(f"[green]Wrote[/green] {target}")
1140
+
1141
+
1142
+ def _fetched_rates_path() -> Path:
1143
+ override = os.environ.get("CODEX_METER_DATA_DIR")
1144
+ if override:
1145
+ return Path(override).expanduser() / "rates-fetched.json"
1146
+ xdg = os.environ.get("XDG_DATA_HOME")
1147
+ if xdg:
1148
+ return Path(xdg).expanduser() / "codex-meter" / "rates-fetched.json"
1149
+ return Path.home() / ".local" / "share" / "codex-meter" / "rates-fetched.json"
1150
+
1151
+
1152
+ def _fetch_rate_sources() -> dict:
1153
+ sources = []
1154
+ observed = []
1155
+ for source in PRICING_SOURCES:
1156
+ request = urllib.request.Request(
1157
+ source.url,
1158
+ headers={"User-Agent": f"codex-meter/{__version__}"},
1159
+ )
1160
+ try:
1161
+ with urllib.request.urlopen(request, timeout=5) as response:
1162
+ body = response.read()
1163
+ except OSError as exc:
1164
+ sources.append(
1165
+ {"name": source.name, "url": source.url, "status": "error", "error": str(exc)}
1166
+ )
1167
+ continue
1168
+ text = body.decode("utf-8", errors="replace")
1169
+ extracted = _extract_models_from_text(text)
1170
+ sources.append(
1171
+ {
1172
+ "name": source.name,
1173
+ "url": source.url,
1174
+ "status": "ok",
1175
+ "bytes": len(body),
1176
+ "observed_models": len(extracted),
1177
+ }
1178
+ )
1179
+ observed.extend(item | {"source": source.name} for item in extracted)
1180
+ observed_models = _dedupe_models(observed)
1181
+ return {
1182
+ "fetched_at": iso_z(dt.datetime.now(tz=dt.UTC)),
1183
+ "sources": sources,
1184
+ "embedded_models": _embedded_rate_snapshot(),
1185
+ "observed_models": observed_models,
1186
+ "models": observed_models,
1187
+ "discrepancies": _rate_discrepancies(observed_models),
1188
+ }
1189
+
1190
+
1191
+ def _extract_models_from_text(text: str) -> list[dict]:
1192
+ try:
1193
+ raw = json.loads(text)
1194
+ except json.JSONDecodeError:
1195
+ return _extract_models_from_html(text)
1196
+ if isinstance(raw, dict) and isinstance(raw.get("models"), list):
1197
+ return [item for item in raw["models"] if isinstance(item, dict) and item.get("name")]
1198
+ return []
1199
+
1200
+
1201
+ def _extract_models_from_html(text: str) -> list[dict]:
1202
+ normalized = _normal_text(text)
1203
+ found: list[dict] = []
1204
+ for card in MODEL_CARDS:
1205
+ window = _window_for_model(normalized, card.name)
1206
+ if not window:
1207
+ continue
1208
+ api_rates = _extract_api_rates(window)
1209
+ credit_rates = _extract_credit_rates(window, card.name)
1210
+ fast_multiplier = _extract_fast_multiplier(window, card.name)
1211
+ long_context = _extract_long_context_rule(window)
1212
+ item: dict = {"name": card.name}
1213
+ if api_rates:
1214
+ item["api"] = api_rates
1215
+ if credit_rates:
1216
+ item["credits"] = credit_rates
1217
+ if fast_multiplier is not None:
1218
+ item["fast_multiplier"] = fast_multiplier
1219
+ if long_context is not None:
1220
+ item["long_context"] = long_context
1221
+ if len(item) > 1:
1222
+ found.append(item)
1223
+ return found
1224
+
1225
+
1226
+ def _normal_text(text: str) -> str:
1227
+ text = html.unescape(text)
1228
+ text = re.sub(r"<[^>]+>", " ", text)
1229
+ text = text.replace("\u2011", "-").replace("\u2010", "-").replace("\u2013", "-")
1230
+ text = text.replace("\u2014", "-").replace("\xa0", " ")
1231
+ return re.sub(r"\s+", " ", text).strip()
1232
+
1233
+
1234
+ def _window_for_model(text: str, model: str) -> str:
1235
+ candidates = [
1236
+ text[max(0, match.start() - 1000) : match.start() + 3500]
1237
+ for match in re.finditer(re.escape(model), text, flags=re.IGNORECASE)
1238
+ ]
1239
+ for candidate in candidates:
1240
+ lowered = candidate.lower()
1241
+ if "per 1m tokens" in lowered or "credits" in lowered:
1242
+ return candidate
1243
+ return candidates[0] if candidates else ""
1244
+
1245
+
1246
+ def _extract_api_rates(window: str) -> dict | None:
1247
+ match = re.search(
1248
+ r"Per 1M tokens\s+Input\s+\$([0-9.]+)\s+Cached input\s+\$([0-9.]+)\s+Output\s+\$([0-9.]+)",
1249
+ window,
1250
+ flags=re.IGNORECASE,
1251
+ )
1252
+ if not match:
1253
+ return None
1254
+ return {
1255
+ "input": float(match.group(1)),
1256
+ "cached_input": float(match.group(2)),
1257
+ "output": float(match.group(3)),
1258
+ }
1259
+
1260
+
1261
+ def _extract_credit_rates(window: str, model: str) -> dict | None:
1262
+ display = re.escape(model.replace("gpt", "GPT"))
1263
+ match = re.search(
1264
+ rf"{display}\s+([0-9.]+)\s+credits\s+([0-9.]+)\s+credits\s+([0-9.]+)\s+credits",
1265
+ window,
1266
+ flags=re.IGNORECASE,
1267
+ )
1268
+ if not match:
1269
+ return None
1270
+ return {
1271
+ "input": float(match.group(1)),
1272
+ "cached_input": float(match.group(2)),
1273
+ "output": float(match.group(3)),
1274
+ }
1275
+
1276
+
1277
+ def _extract_fast_multiplier(window: str, model: str) -> float | None:
1278
+ display = re.escape(model.replace("gpt", "GPT"))
1279
+ patterns = [
1280
+ rf"([0-9.]+)x\s+the\s+Standard\s+rate\s+for\s+{display}",
1281
+ rf"{display}[^.]*?([0-9.]+)x\s+the\s+Standard\s+rate",
1282
+ ]
1283
+ for pattern in patterns:
1284
+ match = re.search(pattern, window, flags=re.IGNORECASE)
1285
+ if match:
1286
+ return float(match.group(1))
1287
+ return None
1288
+
1289
+
1290
+ def _extract_long_context_rule(window: str) -> dict | None:
1291
+ lowered = window.lower()
1292
+ if ">272k" not in lowered or "2x input" not in lowered or "1.5x output" not in lowered:
1293
+ return None
1294
+ return {"threshold": 272_000, "input_mult": 2.0, "output_mult": 1.5}
1295
+
1296
+
1297
+ def _dedupe_models(models: list[dict]) -> list[dict]:
1298
+ deduped: dict[str, dict] = {}
1299
+ for model in models:
1300
+ name = str(model["name"])
1301
+ merged = deduped.setdefault(name, {"name": name})
1302
+ for key, value in model.items():
1303
+ if key == "name":
1304
+ continue
1305
+ if key == "source" and merged.get("source") and merged["source"] != value:
1306
+ merged["source"] = f"{merged['source']}; {value}"
1307
+ continue
1308
+ merged[key] = value
1309
+ return [deduped[key] for key in sorted(deduped)]
1310
+
1311
+
1312
+ def _rates_payload(rates) -> dict | None:
1313
+ if rates is None:
1314
+ return None
1315
+ return {
1316
+ "input": float(rates.input),
1317
+ "cached_input": float(rates.cached_input),
1318
+ "output": float(rates.output),
1319
+ "reasoning_output": float(rates.effective_reasoning_output),
1320
+ }
1321
+
1322
+
1323
+ def _embedded_rate_snapshot() -> list[dict]:
1324
+ return [
1325
+ {
1326
+ "name": card.name,
1327
+ "api": _rates_payload(card.api_rates),
1328
+ "credits": _rates_payload(card.credit_rates),
1329
+ "fast_multiplier": card.fast_multiplier,
1330
+ "long_context": (
1331
+ {
1332
+ "threshold": card.long_context.threshold,
1333
+ "input_mult": card.long_context.input_mult,
1334
+ "output_mult": card.long_context.output_mult,
1335
+ }
1336
+ if card.long_context
1337
+ else None
1338
+ ),
1339
+ }
1340
+ for card in MODEL_CARDS
1341
+ ]
1342
+
1343
+
1344
+ def _rate_discrepancies(observed_models: list[dict]) -> list[dict]:
1345
+ discrepancies: list[dict] = []
1346
+ for observed in observed_models:
1347
+ card = MODELS_BY_NAME.get(normalize_model(str(observed.get("name") or "")))
1348
+ if card is None:
1349
+ continue
1350
+ for section, rates in (("api", card.api_rates), ("credits", card.credit_rates)):
1351
+ if not rates or not isinstance(observed.get(section), dict):
1352
+ continue
1353
+ expected = _rates_payload(rates) or {}
1354
+ for field in ("input", "cached_input", "output"):
1355
+ actual = observed[section].get(field)
1356
+ if actual is None:
1357
+ continue
1358
+ if abs(float(actual) - float(expected[field])) > 1e-9:
1359
+ discrepancies.append(
1360
+ {
1361
+ "model": card.name,
1362
+ "section": section,
1363
+ "field": field,
1364
+ "embedded": expected[field],
1365
+ "observed": actual,
1366
+ }
1367
+ )
1368
+ if observed.get("fast_multiplier") is not None:
1369
+ actual_fast = float(observed["fast_multiplier"])
1370
+ if abs(actual_fast - float(card.fast_multiplier)) > 1e-9:
1371
+ discrepancies.append(
1372
+ {
1373
+ "model": card.name,
1374
+ "section": "fast_multiplier",
1375
+ "field": "multiplier",
1376
+ "embedded": card.fast_multiplier,
1377
+ "observed": actual_fast,
1378
+ }
1379
+ )
1380
+ if isinstance(observed.get("long_context"), dict) and card.long_context is not None:
1381
+ expected_long = {
1382
+ "threshold": card.long_context.threshold,
1383
+ "input_mult": card.long_context.input_mult,
1384
+ "output_mult": card.long_context.output_mult,
1385
+ }
1386
+ for field, expected in expected_long.items():
1387
+ actual = observed["long_context"].get(field)
1388
+ if actual is not None and abs(float(actual) - float(expected)) > 1e-9:
1389
+ discrepancies.append(
1390
+ {
1391
+ "model": card.name,
1392
+ "section": "long_context",
1393
+ "field": field,
1394
+ "embedded": expected,
1395
+ "observed": actual,
1396
+ }
1397
+ )
1398
+ return discrepancies
1399
+
1400
+
1401
+ def _daily_credit_series(events, options: RuntimeOptions, rate_card: RateCard) -> list[float]:
1402
+ """Sum adjusted credits per local-tz day across the given events."""
1403
+ result = LoadResult(
1404
+ events=list(events),
1405
+ duplicates=0,
1406
+ tier_sources={},
1407
+ plan_types=set(),
1408
+ credit_samples=[],
1409
+ warnings=[],
1410
+ )
1411
+ rows = aggregate_daily(result, options, rate_card=rate_card)
1412
+ return [float(row.costs.adjusted_credits) for row in rows]
1413
+
1414
+
1415
+ def _daily_api_dollar_series(events, options: RuntimeOptions, rate_card: RateCard) -> list[float]:
1416
+ result = LoadResult(
1417
+ events=list(events),
1418
+ duplicates=0,
1419
+ tier_sources={},
1420
+ plan_types=set(),
1421
+ credit_samples=[],
1422
+ warnings=[],
1423
+ )
1424
+ rows = aggregate_daily(result, options, rate_card=rate_card)
1425
+ return [float(row.costs.api_dollars) for row in rows]
1426
+
1427
+
1428
+ def _sparkline(values: list[float]) -> str:
1429
+ if not values:
1430
+ return ""
1431
+ bars = "▁▂▃▄▅▆▇█"
1432
+ low = min(values)
1433
+ high = max(values)
1434
+ if high == low:
1435
+ return bars[0] * len(values)
1436
+ return "".join(bars[round((value - low) / (high - low) * (len(bars) - 1))] for value in values)
1437
+
1438
+
1439
+ def _days_remaining_in_month(now: dt.datetime) -> int:
1440
+ last_day = calendar.monthrange(now.year, now.month)[1]
1441
+ return max(0, last_day - now.day)
1442
+
1443
+
1444
+ @app.command()
1445
+ def forecast(
1446
+ days: Annotated[
1447
+ int,
1448
+ typer.Option("--days", min=1, max=180, help="Trailing day window analyzed."),
1449
+ ] = 14,
1450
+ cap: Annotated[
1451
+ float | None,
1452
+ typer.Option("--cap", help="Plan credit cap. Compute days-to-depletion."),
1453
+ ] = None,
1454
+ output_format: FormatOpt = "table",
1455
+ session_root: SessionRootOpt = None,
1456
+ state_db: StateDbOpt = None,
1457
+ codex_config: CodexConfigOpt = None,
1458
+ config: ConfigOpt = None,
1459
+ rates_file: RatesFileOpt = None,
1460
+ tier_overrides: TierOverridesOpt = None,
1461
+ service_tier: ServiceTierOpt = "auto",
1462
+ pricing_mode: PricingModeOpt = "model",
1463
+ ) -> None:
1464
+ """Project month-end credits + ±1σ band. Optional --cap shows days-to-depletion."""
1465
+ _validate_format(output_format)
1466
+ try:
1467
+ options = build_options(
1468
+ days=float(days),
1469
+ session_root=session_root,
1470
+ state_db=state_db,
1471
+ codex_config=codex_config,
1472
+ config=config,
1473
+ rates_file=rates_file,
1474
+ tier_overrides=tier_overrides,
1475
+ service_tier=service_tier,
1476
+ pricing_mode=pricing_mode,
1477
+ )
1478
+ except ValueError as exc:
1479
+ raise _exit_error(str(exc)) from exc
1480
+
1481
+ result = load_usage(options)
1482
+ rate_card = RateCard.load(options.rates_file, options.pricing_mode)
1483
+ total = aggregate_total(result, options, rate_card=rate_card)
1484
+ status = pricing_status(total)
1485
+ warnings = pricing_warnings(total)
1486
+ daily = _daily_credit_series(result.events, options, rate_card)
1487
+ daily_dollars = _daily_api_dollar_series(result.events, options, rate_card)
1488
+ now = dt.datetime.now(tz=local_timezone())
1489
+ days_remaining = _days_remaining_in_month(now)
1490
+ projection = project_forecast(daily, days_remaining, unit="credits", cap=cap)
1491
+ dollar_projection = project_forecast(daily_dollars, days_remaining, unit="API $")
1492
+ sparkline = _sparkline(daily)
1493
+
1494
+ forecast_payload = {
1495
+ "unit": projection.unit,
1496
+ "days_analyzed": projection.days_analyzed,
1497
+ "daily_mean": projection.daily_mean,
1498
+ "daily_stdev": projection.daily_stdev,
1499
+ "days_remaining": projection.days_remaining,
1500
+ "linear_total": projection.linear_total,
1501
+ "ewma_total": projection.ewma_total,
1502
+ "linear_low": projection.linear_low,
1503
+ "linear_high": projection.linear_high,
1504
+ "cap": projection.cap,
1505
+ "days_to_cap": projection.days_to_cap,
1506
+ "sparkline": sparkline,
1507
+ "projections": {
1508
+ "credits": {
1509
+ "linear_total": projection.linear_total,
1510
+ "ewma_total": projection.ewma_total,
1511
+ "daily_mean": projection.daily_mean,
1512
+ },
1513
+ "api_dollars": {
1514
+ "linear_total": dollar_projection.linear_total,
1515
+ "ewma_total": dollar_projection.ewma_total,
1516
+ "daily_mean": dollar_projection.daily_mean,
1517
+ },
1518
+ },
1519
+ "pricing_status": status,
1520
+ "pricing_warnings": warnings,
1521
+ }
1522
+ forecast_records = [
1523
+ {
1524
+ "unit": "credits",
1525
+ "daily_mean": f"{projection.daily_mean:.2f}",
1526
+ "linear_total": f"{projection.linear_total:.2f}",
1527
+ "ewma_total": f"{projection.ewma_total:.2f}",
1528
+ "sparkline": sparkline,
1529
+ "pricing_status": status,
1530
+ },
1531
+ {
1532
+ "unit": "api_dollars",
1533
+ "daily_mean": f"{dollar_projection.daily_mean:.2f}",
1534
+ "linear_total": f"{dollar_projection.linear_total:.2f}",
1535
+ "ewma_total": f"{dollar_projection.ewma_total:.2f}",
1536
+ "sparkline": sparkline,
1537
+ "pricing_status": status,
1538
+ },
1539
+ ]
1540
+
1541
+ if output_format == "json":
1542
+ typer.echo(_json_dumps(forecast_payload))
1543
+ return
1544
+
1545
+ if output_format == "csv":
1546
+ typer.echo(_records_to_csv(forecast_records), nl=False)
1547
+ return
1548
+
1549
+ if output_format == "markdown":
1550
+ typer.echo(_records_to_markdown(forecast_records), nl=False)
1551
+ return
1552
+
1553
+ console.print("[bold]Codex Meter - Forecast[/bold]")
1554
+ for warning in warnings:
1555
+ console.print(f"[yellow]Warning:[/yellow] {warning}")
1556
+ console.print(
1557
+ f"Window analyzed: trailing {days} days ({projection.days_analyzed} days with usage)"
1558
+ )
1559
+ console.print(f"Days remaining this month: {projection.days_remaining}")
1560
+ table = Table(show_header=False, box=None, pad_edge=False)
1561
+ table.add_column(style="dim", justify="right")
1562
+ table.add_column()
1563
+ table.add_row("Daily mean", f"{projection.daily_mean:,.2f} {projection.unit}")
1564
+ table.add_row("Daily σ", f"{projection.daily_stdev:,.2f} {projection.unit}")
1565
+ table.add_row("Linear projection", f"{projection.linear_total:,.2f} {projection.unit}")
1566
+ table.add_row("API $ projection", f"${dollar_projection.linear_total:,.2f}")
1567
+ table.add_row(
1568
+ " ±1σ band",
1569
+ f"{projection.linear_low:,.2f} – {projection.linear_high:,.2f}",
1570
+ )
1571
+ table.add_row("EWMA projection", f"{projection.ewma_total:,.2f} {projection.unit}")
1572
+ table.add_row("Trend", sparkline or "no usage")
1573
+ if projection.cap is not None and projection.days_to_cap is not None:
1574
+ table.add_row("Plan cap", f"{projection.cap:,.2f} {projection.unit}")
1575
+ table.add_row("Days to depletion at mean rate", f"{projection.days_to_cap:,.1f}")
1576
+ console.print(table)
1577
+
1578
+
1579
+ def _events_in_interval(events, interval: Interval):
1580
+ return [event for event in events if interval.start <= event.timestamp < interval.end]
1581
+
1582
+
1583
+ def _safe_receipt_rows(
1584
+ rows: list[Aggregate], *, kind: str, show_sensitive: bool
1585
+ ) -> list[Aggregate]:
1586
+ if show_sensitive:
1587
+ return rows
1588
+ safe_rows: list[Aggregate] = []
1589
+ for index, row in enumerate(rows, start=1):
1590
+ if kind == "session":
1591
+ timestamp = row.label.split(" | ", 1)[0]
1592
+ label = f"Session {index} ({timestamp})"
1593
+ elif kind == "project":
1594
+ label = f"Project {index}: {short_table_label(row.label) or 'Unknown Project'}"
1595
+ else:
1596
+ label = row.label
1597
+ safe_rows.append(Aggregate(key=f"{kind}-{index}", label=label))
1598
+ safe_rows[-1].totals = row.totals
1599
+ safe_rows[-1].costs = row.costs
1600
+ safe_rows[-1].cache_savings = row.cache_savings
1601
+ safe_rows[-1].models = set(row.models)
1602
+ safe_rows[-1].service_tiers = set(row.service_tiers)
1603
+ safe_rows[-1].plan_types = set(row.plan_types)
1604
+ safe_rows[-1].usage_sources = set(row.usage_sources)
1605
+ safe_rows[-1].model_context_window = row.model_context_window
1606
+ safe_rows[-1].long_context_events = row.long_context_events
1607
+ safe_rows[-1].unknown_model_events = row.unknown_model_events
1608
+ safe_rows[-1].unknown_tier_events = row.unknown_tier_events
1609
+ return safe_rows
1610
+
1611
+
1612
+ def _aggregate_interval(
1613
+ events,
1614
+ options: RuntimeOptions,
1615
+ rate_card: RateCard,
1616
+ interval: Interval,
1617
+ label: str,
1618
+ ) -> Aggregate:
1619
+ from codex_meter.aggregation import aggregate_total
1620
+
1621
+ filtered = _events_in_interval(events, interval)
1622
+ result = LoadResult(
1623
+ events=filtered,
1624
+ duplicates=0,
1625
+ tier_sources={},
1626
+ plan_types=set(),
1627
+ credit_samples=[],
1628
+ warnings=[],
1629
+ )
1630
+ return aggregate_total(result, options, label=label, rate_card=rate_card)
1631
+
1632
+
1633
+ @app.command()
1634
+ def compare(
1635
+ a: Annotated[
1636
+ str,
1637
+ typer.Option("--a", help='Window A expression, e.g. "last 7 days".'),
1638
+ ] = "last 7 days",
1639
+ b: Annotated[
1640
+ str,
1641
+ typer.Option("--b", help='Window B expression, e.g. "previous 7 days".'),
1642
+ ] = "previous 7 days",
1643
+ output_format: FormatOpt = "table",
1644
+ session_root: SessionRootOpt = None,
1645
+ state_db: StateDbOpt = None,
1646
+ codex_config: CodexConfigOpt = None,
1647
+ config: ConfigOpt = None,
1648
+ rates_file: RatesFileOpt = None,
1649
+ tier_overrides: TierOverridesOpt = None,
1650
+ service_tier: ServiceTierOpt = "auto",
1651
+ pricing_mode: PricingModeOpt = "model",
1652
+ ) -> None:
1653
+ """Compare two windows side-by-side with credit + dollar deltas."""
1654
+ _validate_format(output_format)
1655
+
1656
+ now = dt.datetime.now(tz=local_timezone())
1657
+ try:
1658
+ interval_a = parse_interval(a, now)
1659
+ interval_b = parse_interval(b, now)
1660
+ except ValueError as exc:
1661
+ raise _exit_error(str(exc)) from exc
1662
+
1663
+ earliest = min(interval_a.start, interval_b.start)
1664
+ span_days = max(1.0, (now - earliest).total_seconds() / 86400.0) + 1
1665
+
1666
+ try:
1667
+ options = build_options(
1668
+ days=span_days,
1669
+ session_root=session_root,
1670
+ state_db=state_db,
1671
+ codex_config=codex_config,
1672
+ config=config,
1673
+ rates_file=rates_file,
1674
+ tier_overrides=tier_overrides,
1675
+ service_tier=service_tier,
1676
+ pricing_mode=pricing_mode,
1677
+ )
1678
+ except ValueError as exc:
1679
+ raise _exit_error(str(exc)) from exc
1680
+
1681
+ result = load_usage(options)
1682
+ rate_card = RateCard.load(options.rates_file, options.pricing_mode)
1683
+ agg_a = _aggregate_interval(result.events, options, rate_card, interval_a, "A")
1684
+ agg_b = _aggregate_interval(result.events, options, rate_card, interval_b, "B")
1685
+
1686
+ def _delta(left: float, right: float) -> tuple[float, float]:
1687
+ diff = left - right
1688
+ pct = float(decimal_value(diff) / decimal_value(right) * Decimal("100")) if right else 0.0
1689
+ return diff, pct
1690
+
1691
+ credits_delta, credits_pct = _delta(agg_a.costs.adjusted_credits, agg_b.costs.adjusted_credits)
1692
+ dollars_delta, dollars_pct = _delta(agg_a.costs.api_dollars, agg_b.costs.api_dollars)
1693
+ tokens_delta, tokens_pct = _delta(agg_a.totals.total_tokens, agg_b.totals.total_tokens)
1694
+ max_events = max(agg_a.totals.events, agg_b.totals.events)
1695
+ min_events = min(agg_a.totals.events, agg_b.totals.events)
1696
+ sparse_warning = ""
1697
+ if max_events and min_events < max_events * 0.05:
1698
+ sparse_side = "A" if agg_a.totals.events == min_events else "B"
1699
+ sparse_warning = (
1700
+ f"warning: window {sparse_side} has {min_events:,} events; "
1701
+ "comparison is not representative"
1702
+ )
1703
+
1704
+ if output_format == "json":
1705
+ typer.echo(
1706
+ _json_dumps(
1707
+ {
1708
+ "a": _interval_summary(interval_a, agg_a),
1709
+ "b": _interval_summary(interval_b, agg_b),
1710
+ "delta": {
1711
+ **_amount_fields("credits", credits_delta),
1712
+ "credits_pct": credits_pct,
1713
+ **_amount_fields("api_dollars", dollars_delta),
1714
+ "api_dollars_pct": dollars_pct,
1715
+ "tokens": tokens_delta,
1716
+ "tokens_pct": tokens_pct,
1717
+ },
1718
+ "warnings": [sparse_warning] if sparse_warning else [],
1719
+ }
1720
+ )
1721
+ )
1722
+ return
1723
+
1724
+ compare_records = [
1725
+ {
1726
+ "metric": "credits",
1727
+ "a": agg_a.costs.adjusted_credits,
1728
+ "b": agg_b.costs.adjusted_credits,
1729
+ "delta": credits_delta,
1730
+ "pct": credits_pct,
1731
+ },
1732
+ {
1733
+ "metric": "api_dollars",
1734
+ "a": agg_a.costs.api_dollars,
1735
+ "b": agg_b.costs.api_dollars,
1736
+ "delta": dollars_delta,
1737
+ "pct": dollars_pct,
1738
+ },
1739
+ {
1740
+ "metric": "tokens",
1741
+ "a": agg_a.totals.total_tokens,
1742
+ "b": agg_b.totals.total_tokens,
1743
+ "delta": tokens_delta,
1744
+ "pct": tokens_pct,
1745
+ },
1746
+ {
1747
+ "metric": "events",
1748
+ "a": agg_a.totals.events,
1749
+ "b": agg_b.totals.events,
1750
+ "delta": "",
1751
+ "pct": "",
1752
+ },
1753
+ ]
1754
+ if output_format == "csv":
1755
+ typer.echo(_records_to_csv(compare_records), nl=False)
1756
+ return
1757
+ if output_format == "markdown":
1758
+ typer.echo(_records_to_markdown(compare_records), nl=False)
1759
+ return
1760
+
1761
+ console.print("[bold]Codex Meter - Compare[/bold]")
1762
+ console.print(f"A: {interval_a.label} ({iso_z(interval_a.start)} → {iso_z(interval_a.end)})")
1763
+ console.print(f"B: {interval_b.label} ({iso_z(interval_b.start)} → {iso_z(interval_b.end)})")
1764
+ for warning in pricing_warnings(agg_a) + pricing_warnings(agg_b):
1765
+ console.print(f"[yellow]Warning:[/yellow] {warning}")
1766
+ table = Table()
1767
+ table.add_column("Metric")
1768
+ table.add_column("A", justify="right")
1769
+ table.add_column("B", justify="right")
1770
+ table.add_column("Δ", justify="right")
1771
+ table.add_column("%", justify="right")
1772
+ table.add_row(
1773
+ "Credits",
1774
+ f"{agg_a.costs.adjusted_credits:,.2f}",
1775
+ f"{agg_b.costs.adjusted_credits:,.2f}",
1776
+ f"{credits_delta:+,.2f}",
1777
+ f"{credits_pct:+.1f}%",
1778
+ )
1779
+ table.add_row(
1780
+ "API $",
1781
+ f"${agg_a.costs.api_dollars:,.2f}",
1782
+ f"${agg_b.costs.api_dollars:,.2f}",
1783
+ f"{dollars_delta:+,.2f}",
1784
+ f"{dollars_pct:+.1f}%",
1785
+ )
1786
+ table.add_row(
1787
+ "Tokens",
1788
+ format_int(agg_a.totals.total_tokens),
1789
+ format_int(agg_b.totals.total_tokens),
1790
+ f"{tokens_delta:+,.0f}",
1791
+ f"{tokens_pct:+.1f}%",
1792
+ )
1793
+ table.add_row(
1794
+ "Events",
1795
+ format_int(agg_a.totals.events),
1796
+ format_int(agg_b.totals.events),
1797
+ "",
1798
+ "",
1799
+ )
1800
+ console.print(table)
1801
+ if sparse_warning:
1802
+ console.print(f"[yellow]{sparse_warning}[/yellow]")
1803
+
1804
+
1805
+ def _interval_summary(interval: Interval, agg: Aggregate) -> dict:
1806
+ return {
1807
+ "label": interval.label,
1808
+ "start": iso_z(interval.start),
1809
+ "end": iso_z(interval.end),
1810
+ **_amount_fields("credits", agg.costs.adjusted_credits),
1811
+ **_amount_fields("standard_credits", agg.costs.standard_credits),
1812
+ **_amount_fields("api_dollars", agg.costs.api_dollars),
1813
+ "events": agg.totals.events,
1814
+ "tokens": agg.totals.total_tokens,
1815
+ "models": sorted(agg.models),
1816
+ "pricing_status": pricing_status(agg),
1817
+ "pricing_warnings": pricing_warnings(agg),
1818
+ }
1819
+
1820
+
1821
+ @app.command()
1822
+ def whatif(
1823
+ days: Annotated[
1824
+ int,
1825
+ typer.Option("--days", min=1, max=365, help="Trailing day window."),
1826
+ ] = 7,
1827
+ tier: Annotated[
1828
+ str | None,
1829
+ typer.Option("--tier", help="Hypothetical tier: standard or fast."),
1830
+ ] = None,
1831
+ model: Annotated[
1832
+ str | None,
1833
+ typer.Option("--model", help="Hypothetical model name (must be in rate card)."),
1834
+ ] = None,
1835
+ output_format: FormatOpt = "table",
1836
+ session_root: SessionRootOpt = None,
1837
+ state_db: StateDbOpt = None,
1838
+ codex_config: CodexConfigOpt = None,
1839
+ config: ConfigOpt = None,
1840
+ rates_file: RatesFileOpt = None,
1841
+ tier_overrides: TierOverridesOpt = None,
1842
+ service_tier: ServiceTierOpt = "auto",
1843
+ pricing_mode: PricingModeOpt = "model",
1844
+ ) -> None:
1845
+ """Re-cost the window under a hypothetical tier or model swap."""
1846
+ if tier is None and model is None:
1847
+ raise _exit_error("Provide --tier and/or --model to evaluate a hypothetical.")
1848
+ if tier is not None and tier not in {"standard", "fast"}:
1849
+ raise _exit_error("--tier must be one of: standard, fast")
1850
+ if model is not None and model not in MODELS_BY_NAME:
1851
+ raise _exit_error(
1852
+ f"--model {model!r} is not in the embedded rate card. "
1853
+ f"Use one of: {', '.join(sorted(MODELS_BY_NAME))}"
1854
+ )
1855
+ _validate_format(output_format)
1856
+
1857
+ try:
1858
+ options = build_options(
1859
+ days=float(days),
1860
+ session_root=session_root,
1861
+ state_db=state_db,
1862
+ codex_config=codex_config,
1863
+ config=config,
1864
+ rates_file=rates_file,
1865
+ tier_overrides=tier_overrides,
1866
+ service_tier=service_tier,
1867
+ pricing_mode=pricing_mode,
1868
+ )
1869
+ except ValueError as exc:
1870
+ raise _exit_error(str(exc)) from exc
1871
+
1872
+ result = load_usage(options)
1873
+ rate_card = RateCard.load(options.rates_file, options.pricing_mode)
1874
+ actual_total = aggregate_total(result, options, rate_card=rate_card)
1875
+ actual_pricing_status = pricing_status(actual_total)
1876
+ actual_pricing_warnings = pricing_warnings(actual_total)
1877
+ if result.events and all(
1878
+ (tier is None or event.service_tier == tier) and (model is None or event.model == model)
1879
+ for event in result.events
1880
+ ):
1881
+ label_parts = []
1882
+ if tier:
1883
+ label_parts.append(f"tier={tier}")
1884
+ if model:
1885
+ label_parts.append(f"model={model}")
1886
+ label = ", ".join(label_parts)
1887
+ message = f"All {len(result.events):,} events are already at {label}; no change."
1888
+ noop_record = {
1889
+ "days": days,
1890
+ "tier": tier or "",
1891
+ "model": model or "",
1892
+ "noop": True,
1893
+ "message": message,
1894
+ "events_evaluated": len(result.events),
1895
+ "pricing_status": actual_pricing_status,
1896
+ }
1897
+ if output_format == "json":
1898
+ typer.echo(
1899
+ _json_dumps(
1900
+ {
1901
+ "days": days,
1902
+ "hypothetical": {"tier": tier, "model": model},
1903
+ "noop": True,
1904
+ "message": message,
1905
+ "events_evaluated": len(result.events),
1906
+ "pricing_status": actual_pricing_status,
1907
+ "pricing_warnings": actual_pricing_warnings,
1908
+ }
1909
+ )
1910
+ )
1911
+ return
1912
+ if output_format == "csv":
1913
+ typer.echo(_records_to_csv([noop_record]), nl=False)
1914
+ return
1915
+ if output_format == "markdown":
1916
+ typer.echo(_records_to_markdown([noop_record]), nl=False)
1917
+ return
1918
+ console.print(message)
1919
+ return
1920
+ actual_credits = Decimal("0")
1921
+ actual_dollars = Decimal("0")
1922
+ hypothetical_credits = Decimal("0")
1923
+ hypothetical_dollars = Decimal("0")
1924
+ for event in result.events:
1925
+ actual, _, _ = rate_card.cost_for(event.usage, event.model, event.service_tier)
1926
+ actual_credits += actual.adjusted_credits
1927
+ actual_dollars += actual.api_dollars
1928
+ hyp_model = model or event.model
1929
+ hyp_tier = tier or event.service_tier
1930
+ hypothetical, _, _ = rate_card.cost_for(event.usage, hyp_model, hyp_tier)
1931
+ hypothetical_credits += hypothetical.adjusted_credits
1932
+ hypothetical_dollars += hypothetical.api_dollars
1933
+
1934
+ credit_delta = hypothetical_credits - actual_credits
1935
+ dollar_delta = hypothetical_dollars - actual_dollars
1936
+ credit_pct = float(credit_delta / actual_credits * Decimal("100")) if actual_credits else 0.0
1937
+ dollar_pct = float(dollar_delta / actual_dollars * Decimal("100")) if actual_dollars else 0.0
1938
+
1939
+ if output_format == "json":
1940
+ typer.echo(
1941
+ _json_dumps(
1942
+ {
1943
+ "days": days,
1944
+ "hypothetical": {"tier": tier, "model": model},
1945
+ "actual": {
1946
+ **_amount_fields("credits", actual_credits),
1947
+ **_amount_fields("api_dollars", actual_dollars),
1948
+ },
1949
+ "projected": {
1950
+ **_amount_fields("credits", hypothetical_credits),
1951
+ **_amount_fields("api_dollars", hypothetical_dollars),
1952
+ },
1953
+ "delta": {
1954
+ **_amount_fields("credits", credit_delta),
1955
+ "credits_pct": credit_pct,
1956
+ **_amount_fields("api_dollars", dollar_delta),
1957
+ "api_dollars_pct": dollar_pct,
1958
+ },
1959
+ "events_evaluated": len(result.events),
1960
+ "pricing_status": actual_pricing_status,
1961
+ "pricing_warnings": actual_pricing_warnings,
1962
+ }
1963
+ )
1964
+ )
1965
+ return
1966
+
1967
+ whatif_records = [
1968
+ {
1969
+ "metric": "credits",
1970
+ "actual": actual_credits,
1971
+ "projected": hypothetical_credits,
1972
+ "delta": credit_delta,
1973
+ "pct": credit_pct,
1974
+ "pricing_status": actual_pricing_status,
1975
+ },
1976
+ {
1977
+ "metric": "api_dollars",
1978
+ "actual": actual_dollars,
1979
+ "projected": hypothetical_dollars,
1980
+ "delta": dollar_delta,
1981
+ "pct": dollar_pct,
1982
+ "pricing_status": actual_pricing_status,
1983
+ },
1984
+ ]
1985
+ if output_format == "csv":
1986
+ typer.echo(_records_to_csv(whatif_records), nl=False)
1987
+ return
1988
+ if output_format == "markdown":
1989
+ typer.echo(_records_to_markdown(whatif_records), nl=False)
1990
+ return
1991
+
1992
+ label_parts = []
1993
+ if tier:
1994
+ label_parts.append(f"tier={tier}")
1995
+ if model:
1996
+ label_parts.append(f"model={model}")
1997
+ label = ", ".join(label_parts) or "no-op"
1998
+ console.print(f"[bold]Codex Meter - What If ({label})[/bold]")
1999
+ console.print(f"Trailing {days} days · {len(result.events):,} events")
2000
+ for warning in actual_pricing_warnings:
2001
+ console.print(f"[yellow]Warning:[/yellow] {warning}")
2002
+ table = Table()
2003
+ table.add_column("Metric")
2004
+ table.add_column("Actual", justify="right")
2005
+ table.add_column("Projected", justify="right")
2006
+ table.add_column("Δ", justify="right")
2007
+ table.add_column("%", justify="right")
2008
+ table.add_row(
2009
+ "Credits",
2010
+ f"{actual_credits:,.2f}",
2011
+ f"{hypothetical_credits:,.2f}",
2012
+ f"{credit_delta:+,.2f}",
2013
+ f"{credit_pct:+.1f}%",
2014
+ )
2015
+ table.add_row(
2016
+ "API $",
2017
+ f"${actual_dollars:,.2f}",
2018
+ f"${hypothetical_dollars:,.2f}",
2019
+ f"{dollar_delta:+,.2f}",
2020
+ f"{dollar_pct:+.1f}%",
2021
+ )
2022
+ console.print(table)
2023
+
2024
+
2025
+ export_app = typer.Typer(help="Generate external artifacts: receipts, Grafana dashboards, etc.")
2026
+ app.add_typer(export_app, name="export")
2027
+
2028
+
2029
+ def _build_prometheus_snapshot(options: RuntimeOptions):
2030
+ """Construct a Prometheus MetricsSnapshot from a freshly loaded usage window."""
2031
+ from codex_meter.prom_export import MetricsSnapshot
2032
+ from codex_meter.windows import compute_window_state
2033
+
2034
+ result = load_usage(options)
2035
+ rate_card = RateCard.load(options.rates_file, options.pricing_mode)
2036
+ now = dt.datetime.now(tz=local_timezone())
2037
+ today_start = now.replace(hour=0, minute=0, second=0, microsecond=0)
2038
+ today_events = [event for event in result.events if event.timestamp >= today_start]
2039
+ today_result = LoadResult(
2040
+ events=today_events,
2041
+ duplicates=0,
2042
+ tier_sources={},
2043
+ plan_types=set(),
2044
+ credit_samples=[],
2045
+ warnings=[],
2046
+ )
2047
+ from codex_meter.aggregation import aggregate_total
2048
+
2049
+ totals = aggregate_total(today_result, options, label="today", rate_card=rate_card)
2050
+ primary = compute_window_state(result.credit_samples, now, "primary")
2051
+ secondary = compute_window_state(result.credit_samples, now, "secondary")
2052
+
2053
+ tokens: dict[tuple[str, str, str], int] = {}
2054
+ for event in today_events:
2055
+ key_input = (event.model or "unknown", event.service_tier or "unknown", "input")
2056
+ tokens[key_input] = tokens.get(key_input, 0) + int(event.usage.input_tokens)
2057
+ key_cached = (event.model or "unknown", event.service_tier or "unknown", "cached")
2058
+ tokens[key_cached] = tokens.get(key_cached, 0) + int(event.usage.cached_input_tokens)
2059
+ key_output = (event.model or "unknown", event.service_tier or "unknown", "output")
2060
+ tokens[key_output] = tokens.get(key_output, 0) + int(event.usage.output_tokens)
2061
+ key_reasoning = (event.model or "unknown", event.service_tier or "unknown", "reasoning")
2062
+ tokens[key_reasoning] = tokens.get(key_reasoning, 0) + int(
2063
+ event.usage.reasoning_output_tokens
2064
+ )
2065
+
2066
+ burn = primary.burn_rate_per_hour
2067
+ return MetricsSnapshot(
2068
+ credits_used=float(totals.costs.adjusted_credits),
2069
+ burn_per_hour=burn if burn is not None else 0.0,
2070
+ primary_window_percent=primary.used_percent if primary.used_percent is not None else 0.0,
2071
+ secondary_window_percent=(
2072
+ secondary.used_percent if secondary.used_percent is not None else 0.0
2073
+ ),
2074
+ events_total=totals.totals.events,
2075
+ long_context_events_total=totals.long_context_events,
2076
+ tokens_total=tokens,
2077
+ )
2078
+
2079
+
2080
+ @export_app.command("prometheus")
2081
+ def export_prometheus(
2082
+ host: Annotated[
2083
+ str,
2084
+ typer.Option("--host", help="Bind address. Default 127.0.0.1 to keep metrics local."),
2085
+ ] = "127.0.0.1",
2086
+ port: Annotated[int, typer.Option("--port", help="TCP port.")] = 9090,
2087
+ session_root: SessionRootOpt = None,
2088
+ state_db: StateDbOpt = None,
2089
+ codex_config: CodexConfigOpt = None,
2090
+ config: ConfigOpt = None,
2091
+ rates_file: RatesFileOpt = None,
2092
+ tier_overrides: TierOverridesOpt = None,
2093
+ service_tier: ServiceTierOpt = "auto",
2094
+ pricing_mode: PricingModeOpt = "model",
2095
+ ) -> None:
2096
+ """Serve /metrics for Prometheus. Default bind 127.0.0.1 — pass --host 0.0.0.0 to expose."""
2097
+ try:
2098
+ from codex_meter.prom_export import serve_forever
2099
+ except ImportError as exc:
2100
+ raise _exit_error(
2101
+ "prometheus-client is not installed. Install with: pip install 'codex-meter[prom]'"
2102
+ ) from exc
2103
+ try:
2104
+ options = build_options(
2105
+ days=7.0,
2106
+ session_root=session_root,
2107
+ state_db=state_db,
2108
+ codex_config=codex_config,
2109
+ config=config,
2110
+ rates_file=rates_file,
2111
+ tier_overrides=tier_overrides,
2112
+ service_tier=service_tier,
2113
+ pricing_mode=pricing_mode,
2114
+ )
2115
+ except ValueError as exc:
2116
+ raise _exit_error(str(exc)) from exc
2117
+
2118
+ console.print(f"[green]Prometheus exporter listening on http://{host}:{port}/metrics[/green]")
2119
+ try:
2120
+ serve_forever(host, port, lambda: _build_prometheus_snapshot(options))
2121
+ except OSError as exc:
2122
+ raise _exit_error(
2123
+ f"could not bind {host}:{port}: {exc}. "
2124
+ f"Try a different --port or check that no other exporter is running."
2125
+ ) from exc
2126
+
2127
+
2128
+ @export_app.command("grafana")
2129
+ def export_grafana(
2130
+ title: Annotated[str, typer.Option("--title", help="Dashboard title.")] = "Codex Meter",
2131
+ output: OutputOpt = None,
2132
+ ) -> None:
2133
+ """Emit a Grafana dashboard JSON wired to the Prometheus exporter metric names."""
2134
+ text = render_grafana_dashboard(title=title)
2135
+ if output:
2136
+ output.expanduser().write_text(text)
2137
+ else:
2138
+ typer.echo(text)
2139
+
2140
+
2141
+ @export_app.command("receipt")
2142
+ def export_receipt(
2143
+ month: Annotated[str, typer.Option("--month", help="Month YYYY-MM.")] = "",
2144
+ receipt_format: Annotated[
2145
+ str,
2146
+ typer.Option("--format", "-f", help="markdown or html."),
2147
+ ] = "markdown",
2148
+ output: OutputOpt = None,
2149
+ session_root: SessionRootOpt = None,
2150
+ state_db: StateDbOpt = None,
2151
+ codex_config: CodexConfigOpt = None,
2152
+ config: ConfigOpt = None,
2153
+ rates_file: RatesFileOpt = None,
2154
+ tier_overrides: TierOverridesOpt = None,
2155
+ service_tier: ServiceTierOpt = "auto",
2156
+ pricing_mode: PricingModeOpt = "model",
2157
+ top: Annotated[
2158
+ int,
2159
+ typer.Option("--top", min=1, max=50, help="Rows in 'top sessions' / 'top projects'."),
2160
+ ] = 5,
2161
+ show_sensitive: Annotated[
2162
+ bool,
2163
+ typer.Option(
2164
+ "--show-sensitive",
2165
+ help="Include full session labels and local project paths in the receipt.",
2166
+ ),
2167
+ ] = False,
2168
+ ) -> None:
2169
+ """Generate a monthly receipt (markdown or html)."""
2170
+ if receipt_format not in {"markdown", "html"}:
2171
+ raise _exit_error("--format must be one of: markdown, html")
2172
+
2173
+ now = dt.datetime.now(tz=local_timezone())
2174
+ chosen_month = month or now.strftime("%Y-%m")
2175
+ try:
2176
+ start, end = month_bounds(chosen_month, local_timezone())
2177
+ except ValueError as exc:
2178
+ raise _exit_error(str(exc)) from exc
2179
+
2180
+ span_days = (end - start).total_seconds() / 86400.0
2181
+ try:
2182
+ options = build_options(
2183
+ days=span_days,
2184
+ until=iso_z(end),
2185
+ session_root=session_root,
2186
+ state_db=state_db,
2187
+ codex_config=codex_config,
2188
+ config=config,
2189
+ rates_file=rates_file,
2190
+ tier_overrides=tier_overrides,
2191
+ service_tier=service_tier,
2192
+ pricing_mode=pricing_mode,
2193
+ )
2194
+ except ValueError as exc:
2195
+ raise _exit_error(str(exc)) from exc
2196
+
2197
+ result = load_usage(options)
2198
+ rate_card = RateCard.load(options.rates_file, options.pricing_mode)
2199
+
2200
+ in_month = [event for event in result.events if start <= event.timestamp < end]
2201
+ scoped = LoadResult(
2202
+ events=in_month,
2203
+ duplicates=0,
2204
+ tier_sources={},
2205
+ plan_types=set(),
2206
+ credit_samples=[],
2207
+ warnings=[],
2208
+ )
2209
+
2210
+ from codex_meter.aggregation import (
2211
+ aggregate_model_mode,
2212
+ aggregate_projects,
2213
+ aggregate_sessions,
2214
+ aggregate_total,
2215
+ )
2216
+
2217
+ totals = aggregate_total(scoped, options, label="Month", rate_card=rate_card)
2218
+ by_model = aggregate_model_mode(scoped, options, rate_card=rate_card)
2219
+ top_sessions = _safe_receipt_rows(
2220
+ aggregate_sessions(scoped, options, rate_card=rate_card)[:top],
2221
+ kind="session",
2222
+ show_sensitive=show_sensitive,
2223
+ )
2224
+ top_projects = _safe_receipt_rows(
2225
+ aggregate_projects(scoped, options, rate_card=rate_card)[:top],
2226
+ kind="project",
2227
+ show_sensitive=show_sensitive,
2228
+ )
2229
+
2230
+ payload = ReceiptInputs(
2231
+ month=chosen_month,
2232
+ totals=totals,
2233
+ by_model=by_model,
2234
+ top_sessions=top_sessions,
2235
+ top_projects=top_projects,
2236
+ generated_at=now,
2237
+ tier_sources=result.tier_sources,
2238
+ insights=[item.title for item in build_insights(scoped, options, rate_card=rate_card)[:3]],
2239
+ warning_count=len(result.warnings),
2240
+ pricing_status=pricing_status(totals),
2241
+ pricing_warnings=pricing_warnings(totals),
2242
+ )
2243
+ text = (
2244
+ render_receipt_html(payload)
2245
+ if receipt_format == "html"
2246
+ else render_receipt_markdown(payload)
2247
+ )
2248
+ if output:
2249
+ output.expanduser().write_text(text)
2250
+ else:
2251
+ typer.echo(text)
2252
+
2253
+
2254
+ budgets_app = typer.Typer(help="Inspect budget alerts (warn/breach) for the active window.")
2255
+ app.add_typer(budgets_app, name="budgets")
2256
+
2257
+
2258
+ def _current_period_intervals(now: dt.datetime) -> dict[str, tuple[dt.datetime, dt.datetime]]:
2259
+ day_start = now.replace(hour=0, minute=0, second=0, microsecond=0)
2260
+ week_start = day_start - dt.timedelta(days=day_start.weekday())
2261
+ month_start = day_start.replace(day=1)
2262
+ return {
2263
+ "daily": (day_start, now),
2264
+ "weekly": (week_start, now),
2265
+ "monthly": (month_start, now),
2266
+ }
2267
+
2268
+
2269
+ def _usage_for_periods(
2270
+ events, options: RuntimeOptions, rate_card: RateCard, now: dt.datetime
2271
+ ) -> dict[str, float]:
2272
+ from codex_meter.aggregation import aggregate_total
2273
+
2274
+ intervals = _current_period_intervals(now)
2275
+ usage: dict[str, float] = {}
2276
+ for period, (start, end) in intervals.items():
2277
+ scoped = [event for event in events if start <= event.timestamp < end]
2278
+ result = LoadResult(
2279
+ events=scoped,
2280
+ duplicates=0,
2281
+ tier_sources={},
2282
+ plan_types=set(),
2283
+ credit_samples=[],
2284
+ warnings=[],
2285
+ )
2286
+ aggregate = aggregate_total(result, options, label=period, rate_card=rate_card)
2287
+ usage[f"{period}.credits"] = aggregate.costs.adjusted_credits
2288
+ usage[f"{period}.api_dollars"] = aggregate.costs.api_dollars
2289
+ usage[f"{period}.tokens"] = float(aggregate.totals.total_tokens)
2290
+ return usage
2291
+
2292
+
2293
+ def _severity_style(severity: str) -> str:
2294
+ if severity == SEVERITY_BREACH:
2295
+ return "[red]breach[/red]"
2296
+ if severity == SEVERITY_WARN:
2297
+ return "[yellow]warn[/yellow]"
2298
+ return "[green]ok[/green]"
2299
+
2300
+
2301
+ @budgets_app.command("check")
2302
+ def budgets_check(
2303
+ config: ConfigOpt = None,
2304
+ session_root: SessionRootOpt = None,
2305
+ state_db: StateDbOpt = None,
2306
+ codex_config: CodexConfigOpt = None,
2307
+ rates_file: RatesFileOpt = None,
2308
+ tier_overrides: TierOverridesOpt = None,
2309
+ service_tier: ServiceTierOpt = "auto",
2310
+ pricing_mode: PricingModeOpt = "model",
2311
+ output_format: FormatOpt = "table",
2312
+ ) -> None:
2313
+ """Evaluate \\[budgets] from .codex-meter.toml against the current window."""
2314
+ _validate_format(output_format)
2315
+ try:
2316
+ loaded = load_config(config) if config else load_config()
2317
+ except ValueError as exc:
2318
+ raise _exit_error(str(exc)) from exc
2319
+ raw = loaded.get("budgets") or {}
2320
+ try:
2321
+ budget_list: list[Budget] = parse_budgets_table(raw if isinstance(raw, dict) else {})
2322
+ except ValueError as exc:
2323
+ raise _exit_error(str(exc)) from exc
2324
+
2325
+ if not budget_list:
2326
+ console.print(
2327
+ "No budgets defined. Add a [budgets] table to .codex-meter.toml. "
2328
+ "Example: daily_credits = 25000.",
2329
+ markup=False,
2330
+ )
2331
+ raise typer.Exit(0)
2332
+
2333
+ try:
2334
+ options = build_options(
2335
+ days=31.0,
2336
+ session_root=session_root,
2337
+ state_db=state_db,
2338
+ codex_config=codex_config,
2339
+ config=config,
2340
+ rates_file=rates_file,
2341
+ tier_overrides=tier_overrides,
2342
+ service_tier=service_tier,
2343
+ pricing_mode=pricing_mode,
2344
+ )
2345
+ except ValueError as exc:
2346
+ raise _exit_error(str(exc)) from exc
2347
+
2348
+ result = load_usage(options)
2349
+ rate_card = RateCard.load(options.rates_file, options.pricing_mode)
2350
+ total = aggregate_total(result, options, rate_card=rate_card)
2351
+ status = pricing_status(total)
2352
+ warnings = pricing_warnings(total)
2353
+ now = dt.datetime.now(tz=local_timezone())
2354
+ usage = _usage_for_periods(result.events, options, rate_card, now)
2355
+ alerts: list[BudgetAlert] = evaluate_budgets(budget_list, usage)
2356
+ worst = max_severity(alerts)
2357
+ alert_records = [
2358
+ {
2359
+ "period": alert.budget.period,
2360
+ "metric": alert.budget.metric,
2361
+ "limit": alert.budget.limit,
2362
+ "warn_at": alert.budget.warn_at,
2363
+ "used": alert.used,
2364
+ "used_exact": decimal_string(usage.get(alert.budget.key(), alert.used)),
2365
+ "used_percent": alert.used_percent,
2366
+ "severity": alert.severity,
2367
+ "pricing_status": status,
2368
+ }
2369
+ for alert in alerts
2370
+ ]
2371
+
2372
+ if output_format == "json":
2373
+ typer.echo(
2374
+ _json_dumps(
2375
+ {
2376
+ "alerts": alert_records,
2377
+ "max_severity": worst,
2378
+ "pricing_status": status,
2379
+ "pricing_warnings": warnings,
2380
+ }
2381
+ )
2382
+ )
2383
+ raise typer.Exit(SEVERITY_EXIT_CODE[worst])
2384
+
2385
+ if output_format == "csv":
2386
+ typer.echo(_records_to_csv(alert_records), nl=False)
2387
+ raise typer.Exit(SEVERITY_EXIT_CODE[worst])
2388
+
2389
+ if output_format == "markdown":
2390
+ typer.echo(_records_to_markdown(alert_records), nl=False)
2391
+ raise typer.Exit(SEVERITY_EXIT_CODE[worst])
2392
+
2393
+ console.print("[bold]Codex Meter - Budgets[/bold]")
2394
+ for warning in warnings:
2395
+ console.print(f"[yellow]Warning:[/yellow] {warning}")
2396
+ table = Table()
2397
+ table.add_column("Period")
2398
+ table.add_column("Metric")
2399
+ table.add_column("Used", justify="right")
2400
+ table.add_column("Limit", justify="right")
2401
+ table.add_column("%", justify="right")
2402
+ table.add_column("Status")
2403
+ for alert in alerts:
2404
+ table.add_row(
2405
+ alert.budget.period,
2406
+ alert.budget.metric,
2407
+ f"{alert.used:,.2f}",
2408
+ f"{alert.budget.limit:,.2f}",
2409
+ f"{alert.used_percent:,.1f}%",
2410
+ _severity_style(alert.severity),
2411
+ )
2412
+ console.print(table)
2413
+ console.print(f"Max severity: {_severity_style(worst)}")
2414
+ raise typer.Exit(SEVERITY_EXIT_CODE[worst])
2415
+
2416
+
2417
+ @app.command()
2418
+ def live(
2419
+ session_root: SessionRootOpt = None,
2420
+ state_db: StateDbOpt = None,
2421
+ codex_config: CodexConfigOpt = None,
2422
+ config: ConfigOpt = None,
2423
+ rates_file: RatesFileOpt = None,
2424
+ tier_overrides: TierOverridesOpt = None,
2425
+ service_tier: ServiceTierOpt = "auto",
2426
+ unknown_service_tier: UnknownTierOpt = "current-config",
2427
+ default_model: DefaultModelOpt = "gpt-5.5",
2428
+ no_parse_cache: NoParseCacheOpt = False,
2429
+ interval: Annotated[
2430
+ float,
2431
+ typer.Option("--interval", "-i", min=0.5, help="Refresh seconds. Default 2."),
2432
+ ] = 2.0,
2433
+ max_ticks: Annotated[
2434
+ int | None,
2435
+ typer.Option("--max-ticks", help="Stop after N ticks."),
2436
+ ] = None,
2437
+ ) -> None:
2438
+ """Live TUI: today's usage, 5h + weekly window countdowns, burn rate."""
2439
+ try:
2440
+ options = build_options(
2441
+ days=7,
2442
+ session_root=session_root,
2443
+ state_db=state_db,
2444
+ codex_config=codex_config,
2445
+ config=config,
2446
+ rates_file=rates_file,
2447
+ tier_overrides=tier_overrides,
2448
+ service_tier=service_tier,
2449
+ unknown_service_tier=unknown_service_tier,
2450
+ default_model=default_model,
2451
+ no_parse_cache=no_parse_cache,
2452
+ )
2453
+ except ValueError as exc:
2454
+ raise _exit_error(str(exc)) from exc
2455
+ run_live(options, interval=interval, max_ticks=max_ticks)