token-miser 0.3.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- token_miser/__init__.py +3 -0
- token_miser/__main__.py +641 -0
- token_miser/backends/__init__.py +57 -0
- token_miser/backends/base.py +112 -0
- token_miser/backends/claude.py +189 -0
- token_miser/backends/codex.py +137 -0
- token_miser/checker.py +117 -0
- token_miser/db.py +439 -0
- token_miser/digest.py +162 -0
- token_miser/environment.py +265 -0
- token_miser/evaluator.py +216 -0
- token_miser/executor.py +23 -0
- token_miser/matrix.py +304 -0
- token_miser/package_adapter.py +319 -0
- token_miser/package_ref.py +56 -0
- token_miser/publish.py +74 -0
- token_miser/recommend.py +330 -0
- token_miser/report.py +198 -0
- token_miser/repos.py +94 -0
- token_miser/score.py +126 -0
- token_miser/suite.py +82 -0
- token_miser/task.py +104 -0
- token_miser/tune.py +426 -0
- token_miser/tune_builder.py +65 -0
- token_miser-0.3.0.dist-info/METADATA +354 -0
- token_miser-0.3.0.dist-info/RECORD +29 -0
- token_miser-0.3.0.dist-info/WHEEL +4 -0
- token_miser-0.3.0.dist-info/entry_points.txt +2 -0
- token_miser-0.3.0.dist-info/licenses/LICENSE +190 -0
token_miser/__init__.py
ADDED
token_miser/__main__.py
ADDED
|
@@ -0,0 +1,641 @@
|
|
|
1
|
+
"""CLI entry point for token-miser."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import argparse
|
|
6
|
+
import json
|
|
7
|
+
import os
|
|
8
|
+
import sys
|
|
9
|
+
|
|
10
|
+
from token_miser.backends import get_backend, parse_agents
|
|
11
|
+
from token_miser.checker import check_all_criteria
|
|
12
|
+
from token_miser.db import Run, get_run, get_runs, init_db, store_run
|
|
13
|
+
from token_miser.environment import setup_env
|
|
14
|
+
from token_miser.evaluator import score_quality
|
|
15
|
+
from token_miser.package_ref import list_packages, parse_package_ref, resolve_packages_dir
|
|
16
|
+
from token_miser.report import analyze, compare
|
|
17
|
+
from token_miser.task import load_task
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
def cmd_run(args: argparse.Namespace) -> int:
|
|
21
|
+
task = load_task(args.task)
|
|
22
|
+
conn = init_db()
|
|
23
|
+
packages_dir = getattr(args, "packages_dir", None)
|
|
24
|
+
agents = parse_agents(getattr(args, "agent", None))
|
|
25
|
+
try:
|
|
26
|
+
specs = [args.baseline]
|
|
27
|
+
if args.package:
|
|
28
|
+
specs.append(args.package)
|
|
29
|
+
if getattr(args, "order", "baseline-first") == "package-first":
|
|
30
|
+
specs = [args.package, args.baseline]
|
|
31
|
+
|
|
32
|
+
results = []
|
|
33
|
+
for agent_name in agents:
|
|
34
|
+
backend = get_backend(agent_name)
|
|
35
|
+
backend_env = backend.load_env()
|
|
36
|
+
for spec in specs:
|
|
37
|
+
package_ref = parse_package_ref(spec, packages_dir=packages_dir)
|
|
38
|
+
print(f"Running {backend.name}:{package_ref.name}...", file=sys.stderr)
|
|
39
|
+
env = setup_env(task, package_ref, agent=backend.name)
|
|
40
|
+
try:
|
|
41
|
+
bare = getattr(args, "bare", False)
|
|
42
|
+
model = getattr(args, "model", None)
|
|
43
|
+
if task.type == "sequential":
|
|
44
|
+
res = backend.run_sequential(
|
|
45
|
+
task.prompts,
|
|
46
|
+
env.home_dir,
|
|
47
|
+
env.workspace_dir,
|
|
48
|
+
timeout=args.timeout,
|
|
49
|
+
extra_env=backend_env,
|
|
50
|
+
bare=bare,
|
|
51
|
+
model=model,
|
|
52
|
+
)
|
|
53
|
+
else:
|
|
54
|
+
res = backend.run(
|
|
55
|
+
task.prompt,
|
|
56
|
+
env.home_dir,
|
|
57
|
+
env.workspace_dir,
|
|
58
|
+
timeout=args.timeout,
|
|
59
|
+
extra_env=backend_env,
|
|
60
|
+
bare=bare,
|
|
61
|
+
model=model,
|
|
62
|
+
)
|
|
63
|
+
|
|
64
|
+
checks = check_all_criteria(task.success_criteria, env)
|
|
65
|
+
passed = sum(1 for c in checks if c.passed)
|
|
66
|
+
total = len(checks)
|
|
67
|
+
|
|
68
|
+
quality_json = "{}"
|
|
69
|
+
api_key = os.environ.get("ANTHROPIC_API_KEY")
|
|
70
|
+
if api_key and task.quality_rubric:
|
|
71
|
+
try:
|
|
72
|
+
scores = score_quality(
|
|
73
|
+
task.prompt or task.prompts[-1], res.result, task.quality_rubric, api_key
|
|
74
|
+
)
|
|
75
|
+
quality_json = json.dumps(
|
|
76
|
+
[{"dimension": s.dimension, "score": s.score, "reason": s.reason} for s in scores]
|
|
77
|
+
)
|
|
78
|
+
except Exception as e:
|
|
79
|
+
print(f"WARNING: quality scoring failed: {e}", file=sys.stderr)
|
|
80
|
+
|
|
81
|
+
run = Run(
|
|
82
|
+
agent=backend.name,
|
|
83
|
+
task_id=task.id,
|
|
84
|
+
package_name=package_ref.name,
|
|
85
|
+
loadout_name=package_ref.package_path.split("/")[-1] if package_ref.package_path else "",
|
|
86
|
+
model=backend.resolve_model(model),
|
|
87
|
+
wall_seconds=res.wall_seconds,
|
|
88
|
+
input_tokens=res.usage.input_tokens,
|
|
89
|
+
output_tokens=res.usage.output_tokens,
|
|
90
|
+
cache_read_tokens=res.usage.cache_read_input_tokens,
|
|
91
|
+
cache_write_tokens=res.usage.cache_creation_input_tokens,
|
|
92
|
+
reasoning_tokens=res.usage.reasoning_tokens,
|
|
93
|
+
total_cost_usd=res.total_cost_usd,
|
|
94
|
+
criteria_pass=passed,
|
|
95
|
+
criteria_total=total,
|
|
96
|
+
quality_scores=quality_json,
|
|
97
|
+
result=res.result,
|
|
98
|
+
)
|
|
99
|
+
run_id = store_run(conn, run)
|
|
100
|
+
results.append((backend.name, package_ref.name, run.model, res, passed, total, run_id))
|
|
101
|
+
finally:
|
|
102
|
+
env.teardown()
|
|
103
|
+
|
|
104
|
+
print("\n=== Run Summary ===")
|
|
105
|
+
for agent_name, name, model_name, res, passed, total, run_id in results:
|
|
106
|
+
print(
|
|
107
|
+
f"Agent: {agent_name} | Package: {name} | Model: {model_name or '-'} | "
|
|
108
|
+
f"Input: {res.usage.input_tokens:,} | Output: {res.usage.output_tokens:,} | "
|
|
109
|
+
f"Cached: {res.usage.cache_read_input_tokens:,} | "
|
|
110
|
+
f"Cost: ${res.total_cost_usd:.6f} | Wall: {res.wall_seconds:.1f}s | "
|
|
111
|
+
f"Criteria: {passed}/{total} | Run ID: {run_id}"
|
|
112
|
+
)
|
|
113
|
+
return 0
|
|
114
|
+
finally:
|
|
115
|
+
conn.close()
|
|
116
|
+
|
|
117
|
+
|
|
118
|
+
def cmd_compare(args: argparse.Namespace) -> int:
|
|
119
|
+
conn = init_db()
|
|
120
|
+
try:
|
|
121
|
+
print(compare(args.task, conn))
|
|
122
|
+
return 0
|
|
123
|
+
finally:
|
|
124
|
+
conn.close()
|
|
125
|
+
|
|
126
|
+
|
|
127
|
+
def cmd_analyze(args: argparse.Namespace) -> int:
|
|
128
|
+
conn = init_db()
|
|
129
|
+
try:
|
|
130
|
+
print(analyze(args.task, conn))
|
|
131
|
+
return 0
|
|
132
|
+
finally:
|
|
133
|
+
conn.close()
|
|
134
|
+
|
|
135
|
+
|
|
136
|
+
def cmd_history(args: argparse.Namespace) -> int:
|
|
137
|
+
conn = init_db()
|
|
138
|
+
try:
|
|
139
|
+
runs = get_runs(conn)
|
|
140
|
+
if not runs:
|
|
141
|
+
print("No runs recorded.")
|
|
142
|
+
return 0
|
|
143
|
+
|
|
144
|
+
print(
|
|
145
|
+
f"{'ID':>4} {'Agent':<8} {'Task':<16} {'Package':<20} {'Tokens':>10} "
|
|
146
|
+
f"{'Wall':>8} {'Cost':>12} {'Criteria':>10}"
|
|
147
|
+
)
|
|
148
|
+
for r in runs:
|
|
149
|
+
tokens = r.input_tokens + r.output_tokens
|
|
150
|
+
wall = f"{r.wall_seconds:.1f}s" if r.wall_seconds > 0 else "-"
|
|
151
|
+
criteria = f"{r.criteria_pass}/{r.criteria_total}" if r.criteria_total else "-"
|
|
152
|
+
print(
|
|
153
|
+
f"{r.id:>4} {r.agent:<8} {r.task_id:<16} {r.package_name:<20} {tokens:>10,} "
|
|
154
|
+
f"{wall:>8} ${r.total_cost_usd:>11.6f} {criteria:>10}"
|
|
155
|
+
)
|
|
156
|
+
return 0
|
|
157
|
+
finally:
|
|
158
|
+
conn.close()
|
|
159
|
+
|
|
160
|
+
|
|
161
|
+
def cmd_show(args: argparse.Namespace) -> int:
|
|
162
|
+
conn = init_db()
|
|
163
|
+
try:
|
|
164
|
+
run = get_run(conn, args.run_id)
|
|
165
|
+
if not run:
|
|
166
|
+
print(f"Run {args.run_id} not found.", file=sys.stderr)
|
|
167
|
+
return 1
|
|
168
|
+
|
|
169
|
+
print(f"Run #{run.id}")
|
|
170
|
+
print(f" Agent: {run.agent}")
|
|
171
|
+
print(f" Task: {run.task_id}")
|
|
172
|
+
print(f" Package: {run.package_name}")
|
|
173
|
+
print(f" Model: {run.model}")
|
|
174
|
+
print(f" Started: {run.started_at}")
|
|
175
|
+
print(f" Wall time: {run.wall_seconds:.1f}s")
|
|
176
|
+
print(f" Input: {run.input_tokens:,} tokens")
|
|
177
|
+
print(f" Output: {run.output_tokens:,} tokens")
|
|
178
|
+
print(f" Cached: {run.cache_read_tokens:,} tokens")
|
|
179
|
+
if run.reasoning_tokens:
|
|
180
|
+
print(f" Reasoning: {run.reasoning_tokens:,} tokens")
|
|
181
|
+
print(f" Cost: ${run.total_cost_usd:.6f}")
|
|
182
|
+
print(f" Criteria: {run.criteria_pass}/{run.criteria_total}")
|
|
183
|
+
|
|
184
|
+
if run.quality_scores and run.quality_scores != "{}":
|
|
185
|
+
print(" Quality:")
|
|
186
|
+
try:
|
|
187
|
+
scores = json.loads(run.quality_scores)
|
|
188
|
+
if isinstance(scores, list):
|
|
189
|
+
for s in scores:
|
|
190
|
+
print(f" {s['dimension']}: {s['score']:.2f} — {s.get('reason', '')}")
|
|
191
|
+
elif isinstance(scores, dict):
|
|
192
|
+
for k, v in scores.items():
|
|
193
|
+
print(f" {k}: {v}")
|
|
194
|
+
except json.JSONDecodeError:
|
|
195
|
+
print(f" (unparseable: {run.quality_scores})")
|
|
196
|
+
|
|
197
|
+
if run.result:
|
|
198
|
+
print(f"\n--- {run.agent.title()} Output ---")
|
|
199
|
+
text = run.result
|
|
200
|
+
if len(text) > 2000:
|
|
201
|
+
text = text[:2000] + "\n... (truncated)"
|
|
202
|
+
print(text)
|
|
203
|
+
return 0
|
|
204
|
+
finally:
|
|
205
|
+
conn.close()
|
|
206
|
+
|
|
207
|
+
|
|
208
|
+
def cmd_tasks(args: argparse.Namespace) -> int:
|
|
209
|
+
from pathlib import Path
|
|
210
|
+
|
|
211
|
+
task_dir = Path(args.dir)
|
|
212
|
+
if not task_dir.is_dir():
|
|
213
|
+
print(f"ERROR: {args.dir} is not a directory", file=sys.stderr)
|
|
214
|
+
return 1
|
|
215
|
+
|
|
216
|
+
for f in sorted(task_dir.glob("*.yaml")):
|
|
217
|
+
try:
|
|
218
|
+
t = load_task(f)
|
|
219
|
+
print(f" {t.id:<20} {t.name}")
|
|
220
|
+
except Exception as e:
|
|
221
|
+
print(f" {f.name:<20} (error: {e})", file=sys.stderr)
|
|
222
|
+
return 0
|
|
223
|
+
|
|
224
|
+
|
|
225
|
+
def cmd_migrate(args: argparse.Namespace) -> int:
|
|
226
|
+
conn = init_db()
|
|
227
|
+
try:
|
|
228
|
+
print("Database initialized and migrations applied.")
|
|
229
|
+
return 0
|
|
230
|
+
finally:
|
|
231
|
+
conn.close()
|
|
232
|
+
|
|
233
|
+
|
|
234
|
+
def cmd_tune(args: argparse.Namespace) -> int:
|
|
235
|
+
from token_miser.tune import run_tune
|
|
236
|
+
|
|
237
|
+
packages_dir = getattr(args, "packages_dir", None)
|
|
238
|
+
package_path = args.package
|
|
239
|
+
if package_path and "/" not in package_path and "\\" not in package_path:
|
|
240
|
+
pkg = resolve_packages_dir(packages_dir) / package_path
|
|
241
|
+
package_path = str(pkg)
|
|
242
|
+
|
|
243
|
+
agents = parse_agents(getattr(args, "agent", None))
|
|
244
|
+
exit_code = 0
|
|
245
|
+
for agent_name in agents:
|
|
246
|
+
output_dir = args.output
|
|
247
|
+
if len(agents) > 1:
|
|
248
|
+
output_dir = f"{args.output}-{agent_name}"
|
|
249
|
+
exit_code = max(
|
|
250
|
+
exit_code,
|
|
251
|
+
run_tune(
|
|
252
|
+
suite_name=args.suite,
|
|
253
|
+
skip_baseline=args.skip_baseline,
|
|
254
|
+
package_path=package_path,
|
|
255
|
+
output_dir=output_dir,
|
|
256
|
+
timeout=args.timeout,
|
|
257
|
+
model=args.model,
|
|
258
|
+
yes=args.yes,
|
|
259
|
+
bare=args.bare,
|
|
260
|
+
agent=agent_name,
|
|
261
|
+
),
|
|
262
|
+
)
|
|
263
|
+
return exit_code
|
|
264
|
+
|
|
265
|
+
|
|
266
|
+
def cmd_suite(args: argparse.Namespace) -> int:
|
|
267
|
+
from pathlib import Path
|
|
268
|
+
|
|
269
|
+
from token_miser.suite import list_suites, load_suite
|
|
270
|
+
from token_miser.tune import _benchmarks_dir
|
|
271
|
+
|
|
272
|
+
benchmarks = _benchmarks_dir()
|
|
273
|
+
suites_dir = benchmarks / "suites"
|
|
274
|
+
tasks_dir = benchmarks / "tasks"
|
|
275
|
+
|
|
276
|
+
if args.suite_command == "list":
|
|
277
|
+
names = list_suites(suites_dir)
|
|
278
|
+
if not names:
|
|
279
|
+
print("No suites found.")
|
|
280
|
+
return 0
|
|
281
|
+
for name in names:
|
|
282
|
+
suite_file = suites_dir / f"{name}.yaml"
|
|
283
|
+
try:
|
|
284
|
+
suite = load_suite(suite_file, tasks_dir)
|
|
285
|
+
print(f" {name:<16} {len(suite.tasks)} tasks {suite.description}")
|
|
286
|
+
except Exception:
|
|
287
|
+
print(f" {name:<16} (error loading)")
|
|
288
|
+
return 0
|
|
289
|
+
|
|
290
|
+
elif args.suite_command == "validate":
|
|
291
|
+
suite_name = args.suite or "standard"
|
|
292
|
+
suite_file = suites_dir / f"{suite_name}.yaml"
|
|
293
|
+
if not suite_file.exists():
|
|
294
|
+
print(f"Suite not found: {suite_name}", file=sys.stderr)
|
|
295
|
+
return 1
|
|
296
|
+
try:
|
|
297
|
+
suite = load_suite(suite_file, tasks_dir)
|
|
298
|
+
print(f"Suite '{suite.name}' v{suite.version}: {len(suite.tasks)} tasks — valid")
|
|
299
|
+
return 0
|
|
300
|
+
except Exception as e:
|
|
301
|
+
print(f"Validation failed: {e}", file=sys.stderr)
|
|
302
|
+
return 1
|
|
303
|
+
|
|
304
|
+
elif args.suite_command == "prep":
|
|
305
|
+
from token_miser.repos import ensure_repo, load_repos_config
|
|
306
|
+
|
|
307
|
+
suite_name = args.suite or "standard"
|
|
308
|
+
suite_file = suites_dir / f"{suite_name}.yaml"
|
|
309
|
+
if not suite_file.exists():
|
|
310
|
+
print(f"Suite not found: {suite_name}", file=sys.stderr)
|
|
311
|
+
return 1
|
|
312
|
+
|
|
313
|
+
suite = load_suite(suite_file, tasks_dir)
|
|
314
|
+
repos_yaml = benchmarks / "repos.yaml"
|
|
315
|
+
if repos_yaml.exists():
|
|
316
|
+
specs = load_repos_config(repos_yaml)
|
|
317
|
+
cache_dir = Path.home() / ".token_miser" / "repo_cache"
|
|
318
|
+
cache_dir.mkdir(parents=True, exist_ok=True)
|
|
319
|
+
needed = {t.repo_id for t in suite.tasks if t.repo_id}
|
|
320
|
+
for repo_id in sorted(needed):
|
|
321
|
+
if repo_id in specs:
|
|
322
|
+
print(f" Preparing {repo_id}...", end=" ", flush=True)
|
|
323
|
+
try:
|
|
324
|
+
ensure_repo(specs[repo_id], cache_dir, benchmarks_dir=benchmarks)
|
|
325
|
+
print("ok")
|
|
326
|
+
except Exception as e:
|
|
327
|
+
print(f"error: {e}")
|
|
328
|
+
else:
|
|
329
|
+
print(f" {repo_id}: not found in repos.yaml", file=sys.stderr)
|
|
330
|
+
print("Done.")
|
|
331
|
+
return 0
|
|
332
|
+
|
|
333
|
+
print(f"Unknown suite command: {args.suite_command}", file=sys.stderr)
|
|
334
|
+
return 1
|
|
335
|
+
|
|
336
|
+
|
|
337
|
+
def cmd_matrix(args: argparse.Namespace) -> int:
|
|
338
|
+
from pathlib import Path
|
|
339
|
+
|
|
340
|
+
from token_miser.db import init_db
|
|
341
|
+
from token_miser.matrix import build_matrix, export_matrix_json
|
|
342
|
+
|
|
343
|
+
conn = init_db()
|
|
344
|
+
try:
|
|
345
|
+
if args.json_out:
|
|
346
|
+
path = export_matrix_json(args.suite, Path(args.json_out), conn)
|
|
347
|
+
print(f"Matrix exported to {path}")
|
|
348
|
+
else:
|
|
349
|
+
print(build_matrix(args.suite, conn))
|
|
350
|
+
finally:
|
|
351
|
+
conn.close()
|
|
352
|
+
return 0
|
|
353
|
+
|
|
354
|
+
|
|
355
|
+
def cmd_digest(args: argparse.Namespace) -> int:
|
|
356
|
+
from pathlib import Path
|
|
357
|
+
|
|
358
|
+
from token_miser.digest import compare_digests, export_all, export_session, list_digests
|
|
359
|
+
|
|
360
|
+
if args.digest_command == "export":
|
|
361
|
+
conn = init_db()
|
|
362
|
+
try:
|
|
363
|
+
if args.all:
|
|
364
|
+
paths = export_all(conn)
|
|
365
|
+
for p in paths:
|
|
366
|
+
print(f" Exported: {p}")
|
|
367
|
+
print(f"{len(paths)} digests exported.")
|
|
368
|
+
else:
|
|
369
|
+
from token_miser.db import get_latest_tune_session
|
|
370
|
+
|
|
371
|
+
session = get_latest_tune_session(conn)
|
|
372
|
+
if not session:
|
|
373
|
+
print("No tune sessions found.", file=sys.stderr)
|
|
374
|
+
return 1
|
|
375
|
+
path = export_session(conn, session.id)
|
|
376
|
+
print(f"Exported: {path}")
|
|
377
|
+
return 0
|
|
378
|
+
finally:
|
|
379
|
+
conn.close()
|
|
380
|
+
|
|
381
|
+
elif args.digest_command == "history":
|
|
382
|
+
paths = list_digests()
|
|
383
|
+
if not paths:
|
|
384
|
+
print("No digests found.")
|
|
385
|
+
return 0
|
|
386
|
+
for p in paths:
|
|
387
|
+
print(f" {p.name}")
|
|
388
|
+
return 0
|
|
389
|
+
|
|
390
|
+
elif args.digest_command == "compare":
|
|
391
|
+
p1 = Path(args.digest1)
|
|
392
|
+
p2 = Path(args.digest2)
|
|
393
|
+
if not p1.exists() or not p2.exists():
|
|
394
|
+
print("One or both digest files not found.", file=sys.stderr)
|
|
395
|
+
return 1
|
|
396
|
+
print(compare_digests(p1, p2))
|
|
397
|
+
return 0
|
|
398
|
+
|
|
399
|
+
print(f"Unknown digest command: {args.digest_command}", file=sys.stderr)
|
|
400
|
+
return 1
|
|
401
|
+
|
|
402
|
+
|
|
403
|
+
def cmd_publish(args: argparse.Namespace) -> int:
|
|
404
|
+
from pathlib import Path
|
|
405
|
+
|
|
406
|
+
from token_miser.publish import generate_manifest_snippet, publish_package
|
|
407
|
+
|
|
408
|
+
pkg_path = Path(args.package_dir)
|
|
409
|
+
if not pkg_path.is_dir():
|
|
410
|
+
print(f"ERROR: {args.package_dir} is not a directory", file=sys.stderr)
|
|
411
|
+
return 1
|
|
412
|
+
|
|
413
|
+
try:
|
|
414
|
+
result = publish_package(
|
|
415
|
+
pkg_path,
|
|
416
|
+
args.repo,
|
|
417
|
+
name=args.name,
|
|
418
|
+
version=args.version,
|
|
419
|
+
)
|
|
420
|
+
except ValueError as e:
|
|
421
|
+
print(f"ERROR: {e}", file=sys.stderr)
|
|
422
|
+
return 1
|
|
423
|
+
|
|
424
|
+
print(f"Published {result['package_name']} v{result['version']}")
|
|
425
|
+
print(f"Tag: {result['tag']}")
|
|
426
|
+
print("\nKanon manifest snippet:")
|
|
427
|
+
print(f" {generate_manifest_snippet(result['package_name'], result['version'])}")
|
|
428
|
+
return 0
|
|
429
|
+
|
|
430
|
+
|
|
431
|
+
def cmd_list(args: argparse.Namespace) -> int:
|
|
432
|
+
packages_dir = getattr(args, "packages_dir", None)
|
|
433
|
+
resolved = resolve_packages_dir(packages_dir)
|
|
434
|
+
names = list_packages(packages_dir)
|
|
435
|
+
|
|
436
|
+
if not names:
|
|
437
|
+
print(f"No packages found in {resolved}/")
|
|
438
|
+
return 0
|
|
439
|
+
|
|
440
|
+
print(f"Packages in {resolved}/:")
|
|
441
|
+
for name in names:
|
|
442
|
+
print(f" {name}")
|
|
443
|
+
return 0
|
|
444
|
+
|
|
445
|
+
|
|
446
|
+
def cmd_score(args: argparse.Namespace) -> int:
|
|
447
|
+
from pathlib import Path
|
|
448
|
+
|
|
449
|
+
from token_miser.score import export_score_json
|
|
450
|
+
|
|
451
|
+
conn = init_db()
|
|
452
|
+
try:
|
|
453
|
+
output_path = Path(args.output)
|
|
454
|
+
export_score_json(args.suite, args.package, output_path, conn)
|
|
455
|
+
print(f"Score exported to {output_path}")
|
|
456
|
+
return 0
|
|
457
|
+
finally:
|
|
458
|
+
conn.close()
|
|
459
|
+
|
|
460
|
+
|
|
461
|
+
def cmd_packages(args: argparse.Namespace) -> int:
|
|
462
|
+
from pathlib import Path
|
|
463
|
+
|
|
464
|
+
import yaml
|
|
465
|
+
|
|
466
|
+
from token_miser.package_adapter import discover_kanon_packages
|
|
467
|
+
|
|
468
|
+
# Check for .kanon file
|
|
469
|
+
kanonenv = Path.cwd() / ".kanon"
|
|
470
|
+
packages = discover_kanon_packages(kanonenv)
|
|
471
|
+
|
|
472
|
+
if not packages:
|
|
473
|
+
# Also check .packages/ directly
|
|
474
|
+
packages_dir = Path.cwd() / ".packages"
|
|
475
|
+
if packages_dir.is_dir():
|
|
476
|
+
packages = sorted(p for p in packages_dir.iterdir() if p.is_dir() and (p / "manifest.yaml").exists())
|
|
477
|
+
|
|
478
|
+
if not packages:
|
|
479
|
+
print("No kanon packages found. Run 'kanon install' first, or check .packages/ directory.")
|
|
480
|
+
return 0
|
|
481
|
+
|
|
482
|
+
print(f"{'Package':<24} {'Version':<12} {'Description'}")
|
|
483
|
+
print("-" * 70)
|
|
484
|
+
for pkg_path in packages:
|
|
485
|
+
try:
|
|
486
|
+
manifest = yaml.safe_load((pkg_path / "manifest.yaml").read_text())
|
|
487
|
+
name = manifest.get("name", pkg_path.name)
|
|
488
|
+
version = manifest.get("version", "?")
|
|
489
|
+
desc = manifest.get("description", "")[:40]
|
|
490
|
+
print(f" {name:<22} {version:<12} {desc}")
|
|
491
|
+
except Exception:
|
|
492
|
+
print(f" {pkg_path.name:<22} (error reading manifest)")
|
|
493
|
+
return 0
|
|
494
|
+
|
|
495
|
+
|
|
496
|
+
def build_parser() -> argparse.ArgumentParser:
|
|
497
|
+
from token_miser import __version__
|
|
498
|
+
|
|
499
|
+
parser = argparse.ArgumentParser(prog="token-miser", description="Benchmark coding-agent configuration packages")
|
|
500
|
+
parser.add_argument("--version", action="version", version=f"%(prog)s {__version__}")
|
|
501
|
+
parser.add_argument(
|
|
502
|
+
"--packages-dir",
|
|
503
|
+
default=None,
|
|
504
|
+
help="Directory containing packages (default: $TOKEN_MISER_PACKAGES_DIR or ./packages)",
|
|
505
|
+
)
|
|
506
|
+
sub = parser.add_subparsers(dest="command", required=True)
|
|
507
|
+
|
|
508
|
+
# run
|
|
509
|
+
p_run = sub.add_parser("run", help="Run an experiment")
|
|
510
|
+
p_run.add_argument("--task", required=True, help="Path to task YAML")
|
|
511
|
+
p_run.add_argument("--baseline", required=True, help="Baseline spec ('vanilla' or package path)")
|
|
512
|
+
p_run.add_argument("--package", default=None, help="Package path to benchmark")
|
|
513
|
+
p_run.add_argument("--agent", default="claude", help="Agent backend: claude, codex, openai(alias), or both")
|
|
514
|
+
p_run.add_argument(
|
|
515
|
+
"--order",
|
|
516
|
+
choices=("baseline-first", "package-first"),
|
|
517
|
+
default="baseline-first",
|
|
518
|
+
help="Execution order when both baseline and package are present",
|
|
519
|
+
)
|
|
520
|
+
p_run.add_argument(
|
|
521
|
+
"--model",
|
|
522
|
+
default=None,
|
|
523
|
+
help="Model identifier (defaults by agent: sonnet for Claude, gpt-5.4 for Codex)",
|
|
524
|
+
)
|
|
525
|
+
p_run.add_argument("--timeout", type=int, default=600, help="Per-invocation timeout in seconds (default: 600)")
|
|
526
|
+
p_run.add_argument("--bare", action="store_true", help="Skip hooks/plugins (cheaper, less realistic)")
|
|
527
|
+
|
|
528
|
+
# compare
|
|
529
|
+
p_compare = sub.add_parser("compare", help="Compare runs for a task")
|
|
530
|
+
p_compare.add_argument("--task", required=True, help="Task ID")
|
|
531
|
+
|
|
532
|
+
# analyze
|
|
533
|
+
p_analyze = sub.add_parser("analyze", help="Statistical analysis for a task")
|
|
534
|
+
p_analyze.add_argument("--task", required=True, help="Task ID")
|
|
535
|
+
|
|
536
|
+
# history
|
|
537
|
+
sub.add_parser("history", help="List all runs")
|
|
538
|
+
|
|
539
|
+
# show
|
|
540
|
+
p_show = sub.add_parser("show", help="Show details for a run")
|
|
541
|
+
p_show.add_argument("run_id", type=int, help="Run ID")
|
|
542
|
+
|
|
543
|
+
# tasks
|
|
544
|
+
p_tasks = sub.add_parser("tasks", help="List task YAML files")
|
|
545
|
+
p_tasks.add_argument("--dir", default="tasks", help="Directory containing task files")
|
|
546
|
+
|
|
547
|
+
# migrate
|
|
548
|
+
sub.add_parser("migrate", help="Initialize/migrate database")
|
|
549
|
+
|
|
550
|
+
# tune
|
|
551
|
+
p_tune = sub.add_parser("tune", help="Guided efficiency optimization")
|
|
552
|
+
p_tune.add_argument("--suite", default="standard", help="Benchmark suite name (default: standard)")
|
|
553
|
+
p_tune.add_argument("--skip-baseline", action="store_true", help="Reuse last baseline")
|
|
554
|
+
p_tune.add_argument("--package", default=None, help="Test a specific package path")
|
|
555
|
+
p_tune.add_argument("--profile", dest="package", help=argparse.SUPPRESS)
|
|
556
|
+
p_tune.add_argument("--output", default="tuned-package", help="Output dir for generated package")
|
|
557
|
+
p_tune.add_argument("--timeout", type=int, default=600, help="Per-task timeout in seconds")
|
|
558
|
+
p_tune.add_argument("--agent", default="claude", help="Agent backend: claude, codex, openai(alias), or both")
|
|
559
|
+
p_tune.add_argument("--model", default=None, help="Model identifier (defaults by agent)")
|
|
560
|
+
p_tune.add_argument("--yes", action="store_true", help="Skip confirmation prompts")
|
|
561
|
+
p_tune.add_argument("--bare", action="store_true", help="Skip hooks/plugins (cheaper, less realistic)")
|
|
562
|
+
|
|
563
|
+
# suite
|
|
564
|
+
p_suite = sub.add_parser("suite", help="Manage benchmark suites")
|
|
565
|
+
suite_sub = p_suite.add_subparsers(dest="suite_command", required=True)
|
|
566
|
+
suite_sub.add_parser("list", help="List available suites")
|
|
567
|
+
p_suite_validate = suite_sub.add_parser("validate", help="Validate suite tasks")
|
|
568
|
+
p_suite_validate.add_argument("--suite", default=None, help="Suite name")
|
|
569
|
+
p_suite_prep = suite_sub.add_parser("prep", help="Pre-clone suite repos")
|
|
570
|
+
p_suite_prep.add_argument("--suite", default=None, help="Suite name")
|
|
571
|
+
|
|
572
|
+
# digest
|
|
573
|
+
p_digest = sub.add_parser("digest", help="Export run data for git tracking")
|
|
574
|
+
digest_sub = p_digest.add_subparsers(dest="digest_command", required=True)
|
|
575
|
+
p_digest_export = digest_sub.add_parser("export", help="Export sessions to digest files")
|
|
576
|
+
p_digest_export.add_argument("--all", action="store_true", help="Export all sessions")
|
|
577
|
+
digest_sub.add_parser("history", help="Show digest timeline")
|
|
578
|
+
p_digest_compare = digest_sub.add_parser("compare", help="Compare two digests")
|
|
579
|
+
p_digest_compare.add_argument("digest1", help="First digest file")
|
|
580
|
+
p_digest_compare.add_argument("digest2", help="Second digest file")
|
|
581
|
+
|
|
582
|
+
# matrix
|
|
583
|
+
p_matrix = sub.add_parser("matrix", help="Cross-package comparison matrix")
|
|
584
|
+
p_matrix.add_argument("--suite", default="axis", help="Suite name (default: axis)")
|
|
585
|
+
p_matrix.add_argument("--json", dest="json_out", default=None, help="Export matrix as JSON to this path")
|
|
586
|
+
|
|
587
|
+
# publish
|
|
588
|
+
p_publish = sub.add_parser("publish", help="Publish a package to a git repo for kanon")
|
|
589
|
+
p_publish.add_argument("package_dir", help="Path to the package directory")
|
|
590
|
+
p_publish.add_argument("--repo", required=True, help="Target git repo path")
|
|
591
|
+
p_publish.add_argument("--name", default=None, help="Package name (default: from manifest)")
|
|
592
|
+
p_publish.add_argument("--version", default=None, help="Package version (default: from manifest)")
|
|
593
|
+
|
|
594
|
+
# packages
|
|
595
|
+
sub.add_parser("packages", help="List kanon-distributed packages")
|
|
596
|
+
|
|
597
|
+
# list
|
|
598
|
+
sub.add_parser("list", help="List available packages from packages directory")
|
|
599
|
+
|
|
600
|
+
# score
|
|
601
|
+
p_score = sub.add_parser("score", help="Generate quality-first score for CI artifacts")
|
|
602
|
+
p_score.add_argument("--suite", required=True, help="Suite name")
|
|
603
|
+
p_score.add_argument("--package", required=True, help="Package label (e.g., 'claude:lean')")
|
|
604
|
+
p_score.add_argument("--output", default="token-miser-score.json", help="Output file path")
|
|
605
|
+
|
|
606
|
+
return parser
|
|
607
|
+
|
|
608
|
+
|
|
609
|
+
def main() -> None:
|
|
610
|
+
parser = build_parser()
|
|
611
|
+
args = parser.parse_args()
|
|
612
|
+
|
|
613
|
+
handlers = {
|
|
614
|
+
"run": cmd_run,
|
|
615
|
+
"compare": cmd_compare,
|
|
616
|
+
"analyze": cmd_analyze,
|
|
617
|
+
"history": cmd_history,
|
|
618
|
+
"show": cmd_show,
|
|
619
|
+
"tasks": cmd_tasks,
|
|
620
|
+
"migrate": cmd_migrate,
|
|
621
|
+
"tune": cmd_tune,
|
|
622
|
+
"publish": cmd_publish,
|
|
623
|
+
"packages": cmd_packages,
|
|
624
|
+
"suite": cmd_suite,
|
|
625
|
+
"matrix": cmd_matrix,
|
|
626
|
+
"digest": cmd_digest,
|
|
627
|
+
"list": cmd_list,
|
|
628
|
+
"score": cmd_score,
|
|
629
|
+
}
|
|
630
|
+
|
|
631
|
+
try:
|
|
632
|
+
sys.exit(handlers[args.command](args))
|
|
633
|
+
except KeyboardInterrupt:
|
|
634
|
+
sys.exit(130)
|
|
635
|
+
except Exception as e:
|
|
636
|
+
print(f"ERROR: {e}", file=sys.stderr)
|
|
637
|
+
sys.exit(1)
|
|
638
|
+
|
|
639
|
+
|
|
640
|
+
if __name__ == "__main__":
|
|
641
|
+
main()
|