akm-cli 0.7.0-rc1 → 0.7.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/src/cli.js +100 -16
- package/dist/src/commands/config-cli.js +42 -0
- package/dist/src/commands/history.js +78 -7
- package/dist/src/commands/registry-search.js +69 -6
- package/dist/src/commands/search.js +30 -3
- package/dist/src/commands/show.js +29 -0
- package/dist/src/commands/source-add.js +5 -1
- package/dist/src/commands/source-manage.js +7 -1
- package/dist/src/core/config.js +28 -0
- package/dist/src/indexer/db-search.js +1 -0
- package/dist/src/indexer/indexer.js +16 -2
- package/dist/src/indexer/matchers.js +1 -1
- package/dist/src/indexer/search-source.js +4 -2
- package/dist/src/integrations/agent/profiles.js +1 -1
- package/dist/src/integrations/agent/spawn.js +67 -16
- package/dist/src/integrations/github.js +9 -3
- package/dist/src/llm/embedders/remote.js +37 -3
- package/dist/src/output/cli-hints.js +15 -2
- package/dist/src/output/renderers.js +3 -1
- package/dist/src/output/shapes.js +8 -1
- package/dist/src/output/text.js +156 -3
- package/dist/src/registry/build-index.js +5 -4
- package/dist/src/registry/providers/static-index.js +3 -1
- package/dist/src/setup/setup.js +9 -0
- package/dist/src/wiki/wiki.js +54 -6
- package/dist/src/workflows/runs.js +37 -3
- package/dist/tests/architecture/agent-no-llm-sdk-guard.test.js +1 -1
- package/dist/tests/bench/attribution.test.js +24 -23
- package/dist/tests/bench/cleanup.js +31 -0
- package/dist/tests/bench/cli.js +366 -31
- package/dist/tests/bench/cli.test.js +282 -14
- package/dist/tests/bench/corpus.js +3 -0
- package/dist/tests/bench/corpus.test.js +10 -10
- package/dist/tests/bench/doctor.js +525 -0
- package/dist/tests/bench/driver.js +77 -22
- package/dist/tests/bench/driver.test.js +142 -1
- package/dist/tests/bench/environment.js +233 -0
- package/dist/tests/bench/environment.test.js +199 -0
- package/dist/tests/bench/evolve.js +67 -0
- package/dist/tests/bench/evolve.test.js +12 -4
- package/dist/tests/bench/failure-modes.test.js +52 -3
- package/dist/tests/bench/feedback-integrity.test.js +3 -2
- package/dist/tests/bench/leakage.test.js +105 -2
- package/dist/tests/bench/learning-curve.test.js +3 -2
- package/dist/tests/bench/metrics.js +102 -26
- package/dist/tests/bench/metrics.test.js +10 -4
- package/dist/tests/bench/opencode-config.js +194 -0
- package/dist/tests/bench/opencode-config.test.js +370 -0
- package/dist/tests/bench/report.js +73 -9
- package/dist/tests/bench/report.test.js +59 -10
- package/dist/tests/bench/run-config.js +355 -0
- package/dist/tests/bench/run-config.test.js +298 -0
- package/dist/tests/bench/run-curate-test.js +32 -0
- package/dist/tests/bench/run-failing-tasks.js +56 -0
- package/dist/tests/bench/run-full-bench.js +51 -0
- package/dist/tests/bench/run-items36-targeted.js +69 -0
- package/dist/tests/bench/run-nano-quick.js +42 -0
- package/dist/tests/bench/run-waveg-targeted.js +62 -0
- package/dist/tests/bench/runner.js +257 -94
- package/dist/tests/bench/tmp.js +90 -0
- package/dist/tests/bench/trajectory.js +2 -2
- package/dist/tests/bench/verifier.js +6 -1
- package/dist/tests/bench/workflow-spec.js +11 -24
- package/dist/tests/bench/workflow-spec.test.js +1 -1
- package/dist/tests/bench/workflow-trace.js +34 -0
- package/dist/tests/cli-errors.test.js +1 -0
- package/dist/tests/commands/history.test.js +195 -0
- package/dist/tests/config.test.js +25 -0
- package/dist/tests/e2e.test.js +23 -2
- package/dist/tests/fixtures/stashes/load.js +1 -1
- package/dist/tests/fixtures/stashes/load.test.js +11 -2
- package/dist/tests/indexer.test.js +12 -1
- package/dist/tests/output-baseline.test.js +2 -1
- package/dist/tests/output-shapes-unit.test.js +3 -1
- package/dist/tests/registry-build-index.test.js +17 -1
- package/dist/tests/registry-providers/static-index.test.js +34 -0
- package/dist/tests/registry-search.test.js +200 -0
- package/dist/tests/remember-frontmatter.test.js +11 -13
- package/dist/tests/source-qa-fixes.test.js +18 -0
- package/dist/tests/source-registry.test.js +3 -3
- package/dist/tests/source-source.test.js +61 -1
- package/dist/tests/workflow-qa-fixes.test.js +18 -0
- package/package.json +1 -1
|
@@ -0,0 +1,525 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* bench doctor — pre-flight harness smoke-test.
|
|
3
|
+
*
|
|
4
|
+
* Runs a sequence of checks that surface misconfiguration issues before a
|
|
5
|
+
* full bench run. Each check is independent and produces a structured result.
|
|
6
|
+
* The most important check (#3) materialises a real isolation dir, writes the
|
|
7
|
+
* opencode.json exactly as `runOne` does, and runs a live `opencode run`
|
|
8
|
+
* invocation with a 60-second timeout. This catches the class of silent
|
|
9
|
+
* harness bugs (wrong OPENCODE_CONFIG path, missing model key, wrong
|
|
10
|
+
* subcommand, blocked node_modules, open stdin pipe) in a single shot.
|
|
11
|
+
*
|
|
12
|
+
* Usage from the CLI:
|
|
13
|
+
* bun run tests/bench/cli.ts doctor [--model <id>] [--opencode-config <path>] [--verbose]
|
|
14
|
+
*
|
|
15
|
+
* Exit codes: 0 = all checks pass, 1 = any check failed.
|
|
16
|
+
*/
|
|
17
|
+
import fs from "node:fs";
|
|
18
|
+
import path from "node:path";
|
|
19
|
+
import process from "node:process";
|
|
20
|
+
import { getBuiltinAgentProfile } from "../../src/integrations/agent/profiles";
|
|
21
|
+
import { runAgent } from "../../src/integrations/agent/spawn";
|
|
22
|
+
import { buildIsolatedEnv, buildSanitizedEnvSource, createIsolationDirs } from "./driver";
|
|
23
|
+
import { validateFixtureCorpus, writeOpencodeJson } from "./environment";
|
|
24
|
+
import { BenchConfigError, selectProviderForModel } from "./opencode-config";
|
|
25
|
+
import { benchMkdtemp } from "./tmp";
|
|
26
|
+
// ---------------------------------------------------------------------------
|
|
27
|
+
// Internal helpers
|
|
28
|
+
// ---------------------------------------------------------------------------
|
|
29
|
+
/** Absolute path to the repo root (two directories above tests/bench). */
|
|
30
|
+
const REPO_ROOT = path.resolve(__dirname, "..", "..");
|
|
31
|
+
/** Absolute path to the az-cli fixture stash. */
|
|
32
|
+
const AZ_CLI_FIXTURE = path.join(REPO_ROOT, "tests", "fixtures", "stashes", "az-cli");
|
|
33
|
+
/** akm binary — the same one the bench would use in a real run. */
|
|
34
|
+
function resolveAkmBin() {
|
|
35
|
+
// Try explicit PATH lookup first so the local `bun run` path works too.
|
|
36
|
+
const result = Bun.spawnSync({ cmd: ["which", "akm"], stdout: "pipe", stderr: "pipe" });
|
|
37
|
+
if (result.exitCode === 0) {
|
|
38
|
+
const bin = new TextDecoder().decode(result.stdout).trim();
|
|
39
|
+
if (bin)
|
|
40
|
+
return bin;
|
|
41
|
+
}
|
|
42
|
+
// Fallback: project-local bin via bun (works in CI where akm isn't on PATH).
|
|
43
|
+
return "akm";
|
|
44
|
+
}
|
|
45
|
+
function log(verbose, msg) {
|
|
46
|
+
if (verbose)
|
|
47
|
+
process.stderr.write(` [doctor] ${msg}\n`);
|
|
48
|
+
}
|
|
49
|
+
// ---------------------------------------------------------------------------
|
|
50
|
+
// Individual checks
|
|
51
|
+
// ---------------------------------------------------------------------------
|
|
52
|
+
/**
|
|
53
|
+
* Check 1: opencode binary is reachable on PATH.
|
|
54
|
+
*/
|
|
55
|
+
async function checkOpencodeReachable(verbose) {
|
|
56
|
+
const name = "opencode binary reachable";
|
|
57
|
+
log(verbose, `running check: ${name}`);
|
|
58
|
+
try {
|
|
59
|
+
const result = Bun.spawnSync({
|
|
60
|
+
cmd: ["which", "opencode"],
|
|
61
|
+
stdout: "pipe",
|
|
62
|
+
stderr: "pipe",
|
|
63
|
+
});
|
|
64
|
+
if (result.exitCode === 0) {
|
|
65
|
+
const binPath = new TextDecoder().decode(result.stdout).trim();
|
|
66
|
+
log(verbose, `opencode found at: ${binPath}`);
|
|
67
|
+
return { name, ok: true, severity: "pass", message: `opencode found at ${binPath}` };
|
|
68
|
+
}
|
|
69
|
+
return {
|
|
70
|
+
name,
|
|
71
|
+
ok: false,
|
|
72
|
+
severity: "fail",
|
|
73
|
+
message: "opencode not found on PATH — install it with `npm install -g opencode-ai` or equivalent",
|
|
74
|
+
};
|
|
75
|
+
}
|
|
76
|
+
catch (err) {
|
|
77
|
+
return {
|
|
78
|
+
name,
|
|
79
|
+
ok: false,
|
|
80
|
+
severity: "fail",
|
|
81
|
+
message: `which opencode threw: ${err instanceof Error ? err.message : String(err)}`,
|
|
82
|
+
};
|
|
83
|
+
}
|
|
84
|
+
}
|
|
85
|
+
/**
|
|
86
|
+
* Check 2: model resolves against provider's /v1/models endpoint.
|
|
87
|
+
*
|
|
88
|
+
* For local providers that carry an explicit `options.baseURL`, makes an HTTP
|
|
89
|
+
* GET to `${baseURL}/models` and checks that the model ID suffix is in the
|
|
90
|
+
* response. For built-in cloud models (no matching provider entry in the
|
|
91
|
+
* providers file) the check is skipped — we can't probe without auth.
|
|
92
|
+
*/
|
|
93
|
+
async function checkModelResolves(model, opencodeProviders, verbose) {
|
|
94
|
+
const name = "model resolves";
|
|
95
|
+
log(verbose, `running check: ${name} (model=${model})`);
|
|
96
|
+
if (!opencodeProviders) {
|
|
97
|
+
return {
|
|
98
|
+
name,
|
|
99
|
+
ok: true,
|
|
100
|
+
severity: "pass",
|
|
101
|
+
message: "no local provider config; model will be resolved by opencode's cloud-provider defaults (skipped probe)",
|
|
102
|
+
};
|
|
103
|
+
}
|
|
104
|
+
let selected;
|
|
105
|
+
try {
|
|
106
|
+
selected = selectProviderForModel(opencodeProviders, model);
|
|
107
|
+
}
|
|
108
|
+
catch (err) {
|
|
109
|
+
if (err instanceof BenchConfigError) {
|
|
110
|
+
// Model has no provider entry — likely a built-in cloud model. Skip.
|
|
111
|
+
return {
|
|
112
|
+
name,
|
|
113
|
+
ok: true,
|
|
114
|
+
severity: "pass",
|
|
115
|
+
message: `model "${model}" has no provider entry (built-in cloud model) — skipping HTTP probe`,
|
|
116
|
+
};
|
|
117
|
+
}
|
|
118
|
+
return {
|
|
119
|
+
name,
|
|
120
|
+
ok: false,
|
|
121
|
+
severity: "fail",
|
|
122
|
+
message: `selectProviderForModel threw: ${err instanceof Error ? err.message : String(err)}`,
|
|
123
|
+
};
|
|
124
|
+
}
|
|
125
|
+
// Extract baseURL from the provider entry to know where to probe.
|
|
126
|
+
const entry = selected.entry;
|
|
127
|
+
const options = entry.options;
|
|
128
|
+
const baseURL = typeof options?.baseURL === "string" ? options.baseURL : undefined;
|
|
129
|
+
if (!baseURL) {
|
|
130
|
+
return {
|
|
131
|
+
name,
|
|
132
|
+
ok: true,
|
|
133
|
+
severity: "pass",
|
|
134
|
+
message: `provider "${selected.providerKey}" has no baseURL — cannot probe without auth (skipped)`,
|
|
135
|
+
};
|
|
136
|
+
}
|
|
137
|
+
// The model suffix is everything after the first "/" (the provider key).
|
|
138
|
+
const slashIdx = model.indexOf("/");
|
|
139
|
+
const modelSuffix = slashIdx === -1 ? model : model.slice(slashIdx + 1);
|
|
140
|
+
log(verbose, `probing ${baseURL}/models for model suffix "${modelSuffix}"`);
|
|
141
|
+
try {
|
|
142
|
+
// Trim trailing slash so we don't end up with double slashes.
|
|
143
|
+
const url = `${baseURL.replace(/\/$/, "")}/models`;
|
|
144
|
+
const controller = new AbortController();
|
|
145
|
+
const timer = setTimeout(() => controller.abort(), 10_000);
|
|
146
|
+
let resp;
|
|
147
|
+
try {
|
|
148
|
+
resp = await fetch(url, { signal: controller.signal });
|
|
149
|
+
}
|
|
150
|
+
finally {
|
|
151
|
+
clearTimeout(timer);
|
|
152
|
+
}
|
|
153
|
+
if (!resp.ok) {
|
|
154
|
+
return {
|
|
155
|
+
name,
|
|
156
|
+
ok: false,
|
|
157
|
+
severity: "fail",
|
|
158
|
+
message: `GET ${url} returned HTTP ${resp.status} — is the local LLM server running?`,
|
|
159
|
+
};
|
|
160
|
+
}
|
|
161
|
+
const body = (await resp.json());
|
|
162
|
+
const ids = (body.data ?? []).map((m) => m.id);
|
|
163
|
+
log(verbose, `models returned: ${ids.join(", ")}`);
|
|
164
|
+
// Check if the model suffix (or the full model id) appears in the list.
|
|
165
|
+
const found = ids.some((id) => id === modelSuffix || id === model);
|
|
166
|
+
if (found) {
|
|
167
|
+
return { name, ok: true, severity: "pass", message: `model "${modelSuffix}" found in ${url}` };
|
|
168
|
+
}
|
|
169
|
+
return {
|
|
170
|
+
name,
|
|
171
|
+
ok: false,
|
|
172
|
+
severity: "fail",
|
|
173
|
+
message: `model "${modelSuffix}" not found in ${url}; available: ${ids.join(", ") || "(none)"}`,
|
|
174
|
+
};
|
|
175
|
+
}
|
|
176
|
+
catch (err) {
|
|
177
|
+
const msg = err instanceof Error ? err.message : String(err);
|
|
178
|
+
const isAbort = msg.includes("abort") || msg.includes("AbortError");
|
|
179
|
+
return {
|
|
180
|
+
name,
|
|
181
|
+
ok: false,
|
|
182
|
+
severity: "fail",
|
|
183
|
+
message: isAbort
|
|
184
|
+
? `GET ${baseURL}/models timed out after 10 s — is the local LLM server running?`
|
|
185
|
+
: `GET ${baseURL}/models failed: ${msg}`,
|
|
186
|
+
};
|
|
187
|
+
}
|
|
188
|
+
}
|
|
189
|
+
/**
|
|
190
|
+
* Check 3: materialise + run — the single most important check.
|
|
191
|
+
*
|
|
192
|
+
* Creates a real isolation dir, materialises opencode.json exactly as `runOne`
|
|
193
|
+
* does, then runs:
|
|
194
|
+
*
|
|
195
|
+
* opencode run "Reply with the single word: READY" --model <model>
|
|
196
|
+
*
|
|
197
|
+
* with a 60 s timeout. Checks that stdout contains "READY" (case-insensitive).
|
|
198
|
+
* This catches all of: wrong OPENCODE_CONFIG path, missing model key, wrong
|
|
199
|
+
* subcommand, blocked node_modules, open stdin pipe.
|
|
200
|
+
*/
|
|
201
|
+
async function checkMaterialiseAndRun(model, opencodeProviders, verbose) {
|
|
202
|
+
const name = "materialise + run";
|
|
203
|
+
log(verbose, `running check: ${name}`);
|
|
204
|
+
const workspace = benchMkdtemp("bench-doctor-run-");
|
|
205
|
+
const dirs = createIsolationDirs(undefined);
|
|
206
|
+
try {
|
|
207
|
+
// Materialise opencode.json using the same writer as runOne. Warnings
|
|
208
|
+
// (e.g. built-in cloud model stub) are surfaced via verbose log only.
|
|
209
|
+
const writeResult = writeOpencodeJson(dirs.opencodeConfig, model, opencodeProviders);
|
|
210
|
+
for (const w of writeResult.warnings)
|
|
211
|
+
log(verbose, `writeOpencodeJson: ${w}`);
|
|
212
|
+
if (writeResult.providerKey) {
|
|
213
|
+
log(verbose, `materialised opencode.json with provider "${writeResult.providerKey}"`);
|
|
214
|
+
}
|
|
215
|
+
else {
|
|
216
|
+
log(verbose, "no provider entry for model — wrote stub opencode.json with bench invariants");
|
|
217
|
+
}
|
|
218
|
+
const env = buildIsolatedEnv(dirs, model);
|
|
219
|
+
log(verbose, `OPENCODE_CONFIG=${env.OPENCODE_CONFIG}`);
|
|
220
|
+
log(verbose, `XDG_CONFIG_HOME=${env.XDG_CONFIG_HOME}`);
|
|
221
|
+
const profile = getBuiltinAgentProfile("opencode");
|
|
222
|
+
if (!profile) {
|
|
223
|
+
return {
|
|
224
|
+
name,
|
|
225
|
+
ok: false,
|
|
226
|
+
severity: "fail",
|
|
227
|
+
message: 'built-in agent profile "opencode" not found — this is a harness bug',
|
|
228
|
+
};
|
|
229
|
+
}
|
|
230
|
+
const prompt = "Reply with the single word: READY";
|
|
231
|
+
log(verbose, `spawning opencode with prompt: "${prompt}"`);
|
|
232
|
+
const agentResult = await runAgent(profile, prompt, {
|
|
233
|
+
env,
|
|
234
|
+
envSource: buildSanitizedEnvSource(),
|
|
235
|
+
cwd: workspace,
|
|
236
|
+
timeoutMs: 60_000,
|
|
237
|
+
stdio: "captured",
|
|
238
|
+
});
|
|
239
|
+
log(verbose, `opencode exited: ok=${agentResult.ok}, reason=${agentResult.reason ?? "none"}`);
|
|
240
|
+
if (verbose && agentResult.stdout) {
|
|
241
|
+
const preview = agentResult.stdout.slice(0, 500);
|
|
242
|
+
process.stderr.write(` [doctor] stdout preview: ${preview}\n`);
|
|
243
|
+
}
|
|
244
|
+
if (!agentResult.ok) {
|
|
245
|
+
if (agentResult.reason === "timeout") {
|
|
246
|
+
return {
|
|
247
|
+
name,
|
|
248
|
+
ok: false,
|
|
249
|
+
severity: "fail",
|
|
250
|
+
message: "opencode timed out after 60 s — check model availability and provider config",
|
|
251
|
+
};
|
|
252
|
+
}
|
|
253
|
+
if (agentResult.reason === "spawn_failed") {
|
|
254
|
+
return {
|
|
255
|
+
name,
|
|
256
|
+
ok: false,
|
|
257
|
+
severity: "fail",
|
|
258
|
+
message: "opencode failed to spawn — is `opencode` on PATH and executable?",
|
|
259
|
+
};
|
|
260
|
+
}
|
|
261
|
+
// non_zero_exit is still OK to check stdout for READY.
|
|
262
|
+
}
|
|
263
|
+
const containsReady = /ready/i.test(agentResult.stdout);
|
|
264
|
+
if (containsReady) {
|
|
265
|
+
return {
|
|
266
|
+
name,
|
|
267
|
+
ok: true,
|
|
268
|
+
severity: "pass",
|
|
269
|
+
message: 'opencode replied with "READY" — config and model are functional',
|
|
270
|
+
};
|
|
271
|
+
}
|
|
272
|
+
// Even if stdout doesn't contain "READY", a non-zero exit or unexpected
|
|
273
|
+
// output is still a failure for our purposes.
|
|
274
|
+
const stdoutSnip = agentResult.stdout.slice(0, 300).replace(/\n/g, " ").trim();
|
|
275
|
+
return {
|
|
276
|
+
name,
|
|
277
|
+
ok: false,
|
|
278
|
+
severity: "fail",
|
|
279
|
+
message: `opencode did not include "READY" in output. stdout: ${stdoutSnip || "(empty)"}`,
|
|
280
|
+
};
|
|
281
|
+
}
|
|
282
|
+
finally {
|
|
283
|
+
fs.rmSync(dirs.root, { recursive: true, force: true });
|
|
284
|
+
fs.rmSync(workspace, { recursive: true, force: true });
|
|
285
|
+
}
|
|
286
|
+
}
|
|
287
|
+
/**
|
|
288
|
+
* Check 4: stash fixture loadable.
|
|
289
|
+
*
|
|
290
|
+
* Runs `akm search az cli` with `AKM_STASH_DIR` pointing at the az-cli
|
|
291
|
+
* fixture and confirms at least one result is returned. Verifies that akm
|
|
292
|
+
* itself is functional in isolation.
|
|
293
|
+
*/
|
|
294
|
+
async function checkStashFixtureLoadable(verbose) {
|
|
295
|
+
const name = "stash fixture loadable";
|
|
296
|
+
log(verbose, `running check: ${name}`);
|
|
297
|
+
if (!fs.existsSync(AZ_CLI_FIXTURE)) {
|
|
298
|
+
return {
|
|
299
|
+
name,
|
|
300
|
+
ok: false,
|
|
301
|
+
severity: "fail",
|
|
302
|
+
message: `az-cli fixture stash not found at: ${AZ_CLI_FIXTURE}`,
|
|
303
|
+
};
|
|
304
|
+
}
|
|
305
|
+
const akmBin = resolveAkmBin();
|
|
306
|
+
log(verbose, `using akm binary: ${akmBin}`);
|
|
307
|
+
try {
|
|
308
|
+
const result = Bun.spawnSync({
|
|
309
|
+
cmd: [akmBin, "search", "az", "cli"],
|
|
310
|
+
env: { ...process.env, AKM_STASH_DIR: AZ_CLI_FIXTURE },
|
|
311
|
+
stdout: "pipe",
|
|
312
|
+
stderr: "pipe",
|
|
313
|
+
});
|
|
314
|
+
const stdout = new TextDecoder().decode(result.stdout ?? new Uint8Array());
|
|
315
|
+
const stderr = new TextDecoder().decode(result.stderr ?? new Uint8Array());
|
|
316
|
+
log(verbose, `akm search exit: ${result.exitCode}, stdout length: ${stdout.length}`);
|
|
317
|
+
if (verbose && stderr) {
|
|
318
|
+
process.stderr.write(` [doctor] akm search stderr: ${stderr.slice(0, 200)}\n`);
|
|
319
|
+
}
|
|
320
|
+
// Check for at least one result. The search command outputs asset refs or
|
|
321
|
+
// formatted results. An empty output or a "no results" message is a fail.
|
|
322
|
+
const hasResults = result.exitCode === 0 &&
|
|
323
|
+
stdout.trim().length > 0 &&
|
|
324
|
+
!stdout.toLowerCase().includes("no results") &&
|
|
325
|
+
!stdout.toLowerCase().includes("0 results");
|
|
326
|
+
if (hasResults) {
|
|
327
|
+
const lineCount = stdout.trim().split("\n").length;
|
|
328
|
+
return {
|
|
329
|
+
name,
|
|
330
|
+
ok: true,
|
|
331
|
+
severity: "pass",
|
|
332
|
+
message: `akm search returned ${lineCount} result line(s) from az-cli fixture`,
|
|
333
|
+
};
|
|
334
|
+
}
|
|
335
|
+
return {
|
|
336
|
+
name,
|
|
337
|
+
ok: false,
|
|
338
|
+
severity: "fail",
|
|
339
|
+
message: `akm search az cli returned no results (exit=${result.exitCode}). stdout: ${stdout.slice(0, 200).trim() || "(empty)"}`,
|
|
340
|
+
};
|
|
341
|
+
}
|
|
342
|
+
catch (err) {
|
|
343
|
+
return {
|
|
344
|
+
name,
|
|
345
|
+
ok: false,
|
|
346
|
+
severity: "fail",
|
|
347
|
+
message: `akm search threw: ${err instanceof Error ? err.message : String(err)}`,
|
|
348
|
+
};
|
|
349
|
+
}
|
|
350
|
+
}
|
|
351
|
+
/**
|
|
352
|
+
* Check 5: verifier binaries present.
|
|
353
|
+
*
|
|
354
|
+
* Checks that `bash` is on PATH (required for script verifiers). Optionally
|
|
355
|
+
* checks for `pytest` (required for pytest verifiers). Missing `pytest` is a
|
|
356
|
+
* warning, not a failure.
|
|
357
|
+
*/
|
|
358
|
+
async function checkVerifierBinaries(verbose) {
|
|
359
|
+
const checks = [];
|
|
360
|
+
// bash
|
|
361
|
+
{
|
|
362
|
+
const name = "verifier: bash present";
|
|
363
|
+
log(verbose, `running check: ${name}`);
|
|
364
|
+
const result = Bun.spawnSync({ cmd: ["which", "bash"], stdout: "pipe", stderr: "pipe" });
|
|
365
|
+
if (result.exitCode === 0) {
|
|
366
|
+
const p = new TextDecoder().decode(result.stdout).trim();
|
|
367
|
+
checks.push({ name, ok: true, severity: "pass", message: `bash found at ${p}` });
|
|
368
|
+
}
|
|
369
|
+
else {
|
|
370
|
+
checks.push({
|
|
371
|
+
name,
|
|
372
|
+
ok: false,
|
|
373
|
+
severity: "fail",
|
|
374
|
+
message: "bash not found on PATH — script verifiers will fail with exit 127",
|
|
375
|
+
});
|
|
376
|
+
}
|
|
377
|
+
}
|
|
378
|
+
// pytest
|
|
379
|
+
{
|
|
380
|
+
const name = "verifier: pytest present";
|
|
381
|
+
log(verbose, `running check: ${name}`);
|
|
382
|
+
const result = Bun.spawnSync({ cmd: ["which", "pytest"], stdout: "pipe", stderr: "pipe" });
|
|
383
|
+
if (result.exitCode === 0) {
|
|
384
|
+
const p = new TextDecoder().decode(result.stdout).trim();
|
|
385
|
+
checks.push({ name, ok: true, severity: "pass", message: `pytest found at ${p}` });
|
|
386
|
+
}
|
|
387
|
+
else {
|
|
388
|
+
// Warn but don't fail — only pytest-verifier tasks need it.
|
|
389
|
+
checks.push({
|
|
390
|
+
name,
|
|
391
|
+
ok: true,
|
|
392
|
+
severity: "warn",
|
|
393
|
+
message: "pytest not found on PATH — tasks with verifier: pytest will fail with exit 127",
|
|
394
|
+
});
|
|
395
|
+
}
|
|
396
|
+
}
|
|
397
|
+
return checks;
|
|
398
|
+
}
|
|
399
|
+
/**
|
|
400
|
+
* Check 6: generated opencode.json carries bench isolation invariants.
|
|
401
|
+
*
|
|
402
|
+
* Materialises an opencode.json exactly as runOne does and asserts that
|
|
403
|
+
* `plugin` is an empty array and `permission.bash === "allow"`. Catches
|
|
404
|
+
* any refactor that accidentally drops these fields.
|
|
405
|
+
*/
|
|
406
|
+
async function checkOpencodeJsonInvariants(model, opencodeProviders, verbose) {
|
|
407
|
+
const name = "opencode.json bench invariants";
|
|
408
|
+
log(verbose, `running check: ${name}`);
|
|
409
|
+
const tmpDir = benchMkdtemp("bench-doctor-invariant-");
|
|
410
|
+
try {
|
|
411
|
+
fs.mkdirSync(path.join(tmpDir, "opencode-config"), { recursive: true });
|
|
412
|
+
writeOpencodeJson(path.join(tmpDir, "opencode-config"), model, opencodeProviders);
|
|
413
|
+
const written = JSON.parse(fs.readFileSync(path.join(tmpDir, "opencode-config", "opencode.json"), "utf8"));
|
|
414
|
+
const pluginOk = Array.isArray(written.plugin) && written.plugin.length === 0;
|
|
415
|
+
const permOk = written.permission !== null &&
|
|
416
|
+
typeof written.permission === "object" &&
|
|
417
|
+
written.permission.bash === "allow";
|
|
418
|
+
if (pluginOk && permOk) {
|
|
419
|
+
return { name, ok: true, severity: "pass", message: "plugin:[] and permission.bash=allow present" };
|
|
420
|
+
}
|
|
421
|
+
const issues = [];
|
|
422
|
+
if (!pluginOk)
|
|
423
|
+
issues.push(`plugin=${JSON.stringify(written.plugin)} (expected [])`);
|
|
424
|
+
if (!permOk)
|
|
425
|
+
issues.push(`permission.bash=${JSON.stringify(written.permission?.bash)} (expected "allow")`);
|
|
426
|
+
return { name, ok: false, severity: "fail", message: `invariant violation: ${issues.join("; ")}` };
|
|
427
|
+
}
|
|
428
|
+
catch (err) {
|
|
429
|
+
return {
|
|
430
|
+
name,
|
|
431
|
+
ok: false,
|
|
432
|
+
severity: "fail",
|
|
433
|
+
message: `invariant check threw: ${err instanceof Error ? err.message : String(err)}`,
|
|
434
|
+
};
|
|
435
|
+
}
|
|
436
|
+
finally {
|
|
437
|
+
fs.rmSync(tmpDir, { recursive: true, force: true });
|
|
438
|
+
}
|
|
439
|
+
}
|
|
440
|
+
/**
|
|
441
|
+
* Check 7 (optional): all task stash references name valid fixtures.
|
|
442
|
+
*
|
|
443
|
+
* When `tasks` is provided, validates every `task.stash` against the
|
|
444
|
+
* fixture directory. Missing fixtures produce `harness_error` at run time —
|
|
445
|
+
* better to surface them loudly at startup.
|
|
446
|
+
*/
|
|
447
|
+
function checkFixtureCorpus(tasks, verbose) {
|
|
448
|
+
const name = "fixture corpus";
|
|
449
|
+
log(verbose, `running check: ${name}`);
|
|
450
|
+
const { valid, missing } = validateFixtureCorpus(tasks);
|
|
451
|
+
if (missing.size === 0) {
|
|
452
|
+
return { name, ok: true, severity: "pass", message: `all ${valid.size} fixture(s) found` };
|
|
453
|
+
}
|
|
454
|
+
const detail = [...missing.entries()].map(([fix, tids]) => `${fix} (used by: ${tids.join(", ")})`).join("; ");
|
|
455
|
+
return {
|
|
456
|
+
name,
|
|
457
|
+
ok: false,
|
|
458
|
+
severity: "fail",
|
|
459
|
+
message: `${missing.size} fixture(s) missing MANIFEST.json: ${detail}`,
|
|
460
|
+
};
|
|
461
|
+
}
|
|
462
|
+
/**
|
|
463
|
+
* Run all doctor checks in order. Returns a structured `DoctorResult`.
|
|
464
|
+
*
|
|
465
|
+
* Fails fast on check 1 (opencode binary missing) since subsequent checks
|
|
466
|
+
* that invoke opencode would also fail in a confusing way.
|
|
467
|
+
*/
|
|
468
|
+
export async function runDoctor(options) {
|
|
469
|
+
const { model, opencodeProviders, verbose = false, tasks } = options;
|
|
470
|
+
const allChecks = [];
|
|
471
|
+
if (verbose) {
|
|
472
|
+
process.stderr.write(`[bench doctor] model=${model}\n`);
|
|
473
|
+
if (opencodeProviders) {
|
|
474
|
+
process.stderr.write(`[bench doctor] providers loaded from: ${opencodeProviders.source}\n`);
|
|
475
|
+
}
|
|
476
|
+
else {
|
|
477
|
+
process.stderr.write("[bench doctor] no providers config (cloud-provider defaults)\n");
|
|
478
|
+
}
|
|
479
|
+
process.stderr.write("\n");
|
|
480
|
+
}
|
|
481
|
+
// ── Check 1: opencode binary ──────────────────────────────────────────────
|
|
482
|
+
const check1 = await checkOpencodeReachable(verbose);
|
|
483
|
+
allChecks.push(check1);
|
|
484
|
+
if (!check1.ok) {
|
|
485
|
+
// Fail fast: subsequent checks that spawn opencode will produce confusing
|
|
486
|
+
// errors. Short-circuit with the remaining checks as skipped.
|
|
487
|
+
process.stderr.write("[bench doctor] FAIL FAST: opencode not found — skipping remaining checks\n");
|
|
488
|
+
return { ok: false, checks: allChecks };
|
|
489
|
+
}
|
|
490
|
+
// ── Check 2: model resolves ───────────────────────────────────────────────
|
|
491
|
+
allChecks.push(await checkModelResolves(model, opencodeProviders, verbose));
|
|
492
|
+
// ── Check 3: materialise + run ────────────────────────────────────────────
|
|
493
|
+
allChecks.push(await checkMaterialiseAndRun(model, opencodeProviders, verbose));
|
|
494
|
+
// ── Check 4: stash fixture loadable ──────────────────────────────────────
|
|
495
|
+
allChecks.push(await checkStashFixtureLoadable(verbose));
|
|
496
|
+
// ── Check 5: verifier binaries ────────────────────────────────────────────
|
|
497
|
+
allChecks.push(...(await checkVerifierBinaries(verbose)));
|
|
498
|
+
// ── Check 6: opencode.json invariants ─────────────────────────────────────
|
|
499
|
+
allChecks.push(await checkOpencodeJsonInvariants(model, opencodeProviders, verbose));
|
|
500
|
+
// ── Check 7 (optional): fixture corpus ───────────────────────────────────
|
|
501
|
+
if (tasks && tasks.length > 0) {
|
|
502
|
+
allChecks.push(checkFixtureCorpus(tasks, verbose));
|
|
503
|
+
}
|
|
504
|
+
// overall ok = no "fail" severity checks (warns are ok)
|
|
505
|
+
const ok = allChecks.every((c) => c.severity !== "fail");
|
|
506
|
+
return { ok, checks: allChecks };
|
|
507
|
+
}
|
|
508
|
+
// ---------------------------------------------------------------------------
|
|
509
|
+
// Formatting helper (used by the CLI)
|
|
510
|
+
// ---------------------------------------------------------------------------
|
|
511
|
+
/**
|
|
512
|
+
* Render a `DoctorResult` as a human-readable report string. Written to
|
|
513
|
+
* stderr by the CLI dispatcher.
|
|
514
|
+
*/
|
|
515
|
+
export function renderDoctorReport(result) {
|
|
516
|
+
const lines = [];
|
|
517
|
+
lines.push("## bench doctor\n");
|
|
518
|
+
for (const c of result.checks) {
|
|
519
|
+
const icon = c.severity === "pass" ? "PASS" : c.severity === "warn" ? "WARN" : "FAIL";
|
|
520
|
+
lines.push(` [${icon}] ${c.name}: ${c.message}`);
|
|
521
|
+
}
|
|
522
|
+
lines.push("");
|
|
523
|
+
lines.push(result.ok ? "All checks passed — harness is ready." : "One or more checks FAILED — fix before running bench.");
|
|
524
|
+
return lines.join("\n");
|
|
525
|
+
}
|
|
@@ -21,9 +21,11 @@
|
|
|
21
21
|
* unit-testable with an injected fake spawn.
|
|
22
22
|
*/
|
|
23
23
|
import fs from "node:fs";
|
|
24
|
+
import os from "node:os";
|
|
24
25
|
import path from "node:path";
|
|
25
26
|
import { BUILTIN_AGENT_PROFILE_NAMES, getBuiltinAgentProfile } from "../../src/integrations/agent/profiles";
|
|
26
27
|
import { runAgent } from "../../src/integrations/agent/spawn";
|
|
28
|
+
import { setupBenchEnvironment } from "./environment";
|
|
27
29
|
import { benchMkdtemp } from "./tmp";
|
|
28
30
|
import { runVerifier } from "./verifier";
|
|
29
31
|
/** Operator-config env names that MUST NOT leak into per-run children. */
|
|
@@ -74,6 +76,20 @@ export function createIsolationDirs(stashDir) {
|
|
|
74
76
|
fs.mkdirSync(cacheHome, { recursive: true });
|
|
75
77
|
fs.mkdirSync(configHome, { recursive: true });
|
|
76
78
|
fs.mkdirSync(opencodeConfig, { recursive: true });
|
|
79
|
+
// Symlink the real opencode config dir into XDG_CONFIG_HOME so opencode
|
|
80
|
+
// can find its installed npm provider packages (node_modules). Without
|
|
81
|
+
// this, overriding XDG_CONFIG_HOME produces an empty opencode config dir
|
|
82
|
+
// and provider plugins (e.g. @ai-sdk/openai-compatible) fail to load.
|
|
83
|
+
// OPENCODE_CONFIG still points to our materialised file, which opencode
|
|
84
|
+
// reads in preference to XDG_CONFIG_HOME/opencode/opencode.json.
|
|
85
|
+
const realOpencodeConfigDir = path.join(os.homedir(), ".config", "opencode");
|
|
86
|
+
const isolatedOpencodeConfigDir = path.join(configHome, "opencode");
|
|
87
|
+
if (fs.existsSync(realOpencodeConfigDir)) {
|
|
88
|
+
fs.symlinkSync(realOpencodeConfigDir, isolatedOpencodeConfigDir);
|
|
89
|
+
}
|
|
90
|
+
else {
|
|
91
|
+
fs.mkdirSync(isolatedOpencodeConfigDir, { recursive: true });
|
|
92
|
+
}
|
|
77
93
|
return {
|
|
78
94
|
root,
|
|
79
95
|
cacheHome,
|
|
@@ -87,7 +103,7 @@ export function buildIsolatedEnv(dirs, model) {
|
|
|
87
103
|
const env = {
|
|
88
104
|
XDG_CACHE_HOME: dirs.cacheHome,
|
|
89
105
|
XDG_CONFIG_HOME: dirs.configHome,
|
|
90
|
-
OPENCODE_CONFIG: dirs.opencodeConfig,
|
|
106
|
+
OPENCODE_CONFIG: path.join(dirs.opencodeConfig, "opencode.json"),
|
|
91
107
|
BENCH_OPENCODE_MODEL: model,
|
|
92
108
|
};
|
|
93
109
|
if (dirs.akmStashDir)
|
|
@@ -205,16 +221,44 @@ export function readRunEvents(cacheHome, opts) {
|
|
|
205
221
|
}
|
|
206
222
|
/** Default prompt forwarded to opencode when caller omits one. */
|
|
207
223
|
function defaultPrompt(options) {
|
|
224
|
+
// For non-akm arms: keep the minimal format so the model is forced to read
|
|
225
|
+
// the workspace README.md to discover task specifics. Injecting the title
|
|
226
|
+
// here causes the model to answer from the prompt alone and skip the README,
|
|
227
|
+
// which breaks tasks where specific parameter values (names, IDs) only appear
|
|
228
|
+
// in the workspace files.
|
|
229
|
+
if (options.arm !== "akm") {
|
|
230
|
+
return [`Task: ${options.taskId}`, `Arm: ${options.arm}`, `Workspace: ${options.workspace}`].join("\n");
|
|
231
|
+
}
|
|
232
|
+
const title = options.taskTitle ? `\n${options.taskTitle}` : "";
|
|
233
|
+
const taskLine = `Task: ${options.taskId}${title}`;
|
|
234
|
+
// Derive search keywords: prefer explicit field, fall back to task domain.
|
|
235
|
+
const keywords = options.akmKeywords ?? options.taskId.split("/")[0].replace(/-/g, " ");
|
|
236
|
+
// Force the model to use the bash tool to run akm CLI commands before
|
|
237
|
+
// writing any output. Each step is an explicit bash invocation so the
|
|
238
|
+
// model cannot skip to writing the answer without executing the commands.
|
|
208
239
|
return [
|
|
209
|
-
`
|
|
210
|
-
|
|
240
|
+
`You have access to a knowledge stash via the akm CLI tool.`,
|
|
241
|
+
``,
|
|
242
|
+
`Step 1 — open a terminal and execute this bash command:`,
|
|
243
|
+
` bash: akm search ${keywords}`,
|
|
244
|
+
``,
|
|
245
|
+
`Step 2 — from the search results, execute:`,
|
|
246
|
+
` bash: akm show <ref> (e.g. akm show skill:${keywords.split(" ")[0]})`,
|
|
247
|
+
``,
|
|
248
|
+
`Step 3 — read README.md in the workspace to understand the specific task requirements:`,
|
|
249
|
+
` bash: cat ${options.workspace}/README.md`,
|
|
250
|
+
``,
|
|
251
|
+
`Step 4 — using the skill content from step 2 and the task requirements from step 3,`,
|
|
252
|
+
`write the answer to ${options.workspace}/commands.txt`,
|
|
253
|
+
``,
|
|
254
|
+
`Step 5 — execute:`,
|
|
255
|
+
` bash: akm feedback <ref> --positive (or --negative)`,
|
|
256
|
+
``,
|
|
257
|
+
`DO NOT write commands.txt before running steps 1 and 2.`,
|
|
258
|
+
``,
|
|
259
|
+
taskLine,
|
|
211
260
|
`Workspace: ${options.workspace}`,
|
|
212
|
-
|
|
213
|
-
? "An akm stash is configured via AKM_STASH_DIR. Use `akm search` and `akm show` to find relevant assets before acting."
|
|
214
|
-
: "",
|
|
215
|
-
]
|
|
216
|
-
.filter(Boolean)
|
|
217
|
-
.join("\n");
|
|
261
|
+
].join("\n");
|
|
218
262
|
}
|
|
219
263
|
/**
|
|
220
264
|
* Run a single (task, arm, seed) and return the v1 RunResult envelope.
|
|
@@ -256,19 +300,29 @@ export async function runOne(options) {
|
|
|
256
300
|
result.verifierStdout = `harness: built-in agent profile "opencode" missing; available: ${BUILTIN_AGENT_PROFILE_NAMES.join(", ")}`;
|
|
257
301
|
return result;
|
|
258
302
|
}
|
|
259
|
-
//
|
|
260
|
-
//
|
|
261
|
-
//
|
|
262
|
-
//
|
|
263
|
-
|
|
264
|
-
|
|
265
|
-
|
|
266
|
-
|
|
267
|
-
|
|
268
|
-
|
|
269
|
-
|
|
303
|
+
// Set up the complete bench environment: isolation dirs, opencode.json
|
|
304
|
+
// (with BENCH_OPENCODE_INVARIANTS), akm config.json, and FTS5 index.
|
|
305
|
+
// `dryRun: true` when a test-injected spawn is present — the fake stash
|
|
306
|
+
// doesn't exist on disk so the akm config and index writes are skipped.
|
|
307
|
+
let benchEnv;
|
|
308
|
+
try {
|
|
309
|
+
benchEnv = setupBenchEnvironment({
|
|
310
|
+
model: options.model,
|
|
311
|
+
arm: options.arm,
|
|
312
|
+
stashDir: options.stashDir,
|
|
313
|
+
indexCacheHome: options.indexCacheHome,
|
|
314
|
+
providers: options.opencodeProviders,
|
|
315
|
+
dryRun: !!options.spawn,
|
|
316
|
+
warnings: options.warnings,
|
|
317
|
+
});
|
|
318
|
+
}
|
|
319
|
+
catch (err) {
|
|
320
|
+
result.verifierStdout = `harness: environment setup failed: ${err instanceof Error ? err.message : String(err)}`;
|
|
321
|
+
return result;
|
|
270
322
|
}
|
|
323
|
+
const { dirs, env } = benchEnv;
|
|
271
324
|
try {
|
|
325
|
+
result.startedAt = new Date().toISOString();
|
|
272
326
|
const agentResult = await runAgent(profile, options.prompt ?? defaultPrompt(options), {
|
|
273
327
|
env,
|
|
274
328
|
// #271: scrub operator credentials + config-dir hints from the env
|
|
@@ -282,6 +336,7 @@ export async function runOne(options) {
|
|
|
282
336
|
stdio: "captured",
|
|
283
337
|
...(options.spawn ? { spawn: options.spawn } : {}),
|
|
284
338
|
});
|
|
339
|
+
result.finishedAt = new Date().toISOString();
|
|
285
340
|
result.wallclockMs = agentResult.durationMs;
|
|
286
341
|
const parsed = parseTokenUsage(agentResult.stdout);
|
|
287
342
|
result.tokens = { input: parsed.input, output: parsed.output };
|
|
@@ -332,9 +387,9 @@ export async function runOne(options) {
|
|
|
332
387
|
return result;
|
|
333
388
|
}
|
|
334
389
|
finally {
|
|
335
|
-
// Always tear down the isolation tmpdir.
|
|
390
|
+
// Always tear down the isolation tmpdir. Events are read out before
|
|
336
391
|
// deletion (see readRunEvents above), so this is safe.
|
|
337
|
-
|
|
392
|
+
benchEnv.teardown();
|
|
338
393
|
}
|
|
339
394
|
}
|
|
340
395
|
/** Exposed for the unit test that asserts operator env never leaks. */
|