@agentworkforce/cli 0.18.0 → 0.19.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +9 -0
- package/dist/cli.d.ts +37 -0
- package/dist/cli.d.ts.map +1 -1
- package/dist/cli.js +949 -50
- package/dist/cli.js.map +1 -1
- package/dist/cli.test.js +303 -1
- package/dist/cli.test.js.map +1 -1
- package/dist/launch-metadata.d.ts +7 -0
- package/dist/launch-metadata.d.ts.map +1 -1
- package/dist/launch-metadata.js +6 -2
- package/dist/launch-metadata.js.map +1 -1
- package/dist/local-personas.d.ts +7 -0
- package/dist/local-personas.d.ts.map +1 -1
- package/dist/local-personas.js +9 -3
- package/dist/local-personas.js.map +1 -1
- package/package.json +5 -5
package/dist/cli.js
CHANGED
|
@@ -1,12 +1,12 @@
|
|
|
1
1
|
#!/usr/bin/env node
|
|
2
2
|
import { spawn, spawnSync } from 'node:child_process';
|
|
3
3
|
import { randomBytes } from 'node:crypto';
|
|
4
|
-
import { appendFileSync, existsSync, mkdirSync, mkdtempSync, readFileSync, readSync, rmSync, statSync, writeFileSync } from 'node:fs';
|
|
4
|
+
import { appendFileSync, closeSync, existsSync, mkdirSync, mkdtempSync, openSync, readdirSync, readFileSync, readSync, rmSync, statSync, writeFileSync } from 'node:fs';
|
|
5
5
|
import { constants, homedir, tmpdir } from 'node:os';
|
|
6
6
|
import { dirname, isAbsolute, join, resolve as resolvePath } from 'node:path';
|
|
7
7
|
import { pathToFileURL } from 'node:url';
|
|
8
8
|
import { HARNESS_VALUES, materializeSkills, PERSONA_TAGS, PERSONA_TIERS, listBuiltInPersonas, personaCatalog, resolveSidecar, routingProfiles, useSelection } from '@agentworkforce/workload-router';
|
|
9
|
-
import { buildInteractiveSpec, detectHarnesses, formatDropWarnings, MissingPersonaInputError, renderPersonaInputs, resolvePersonaInputs, resolveMcpServersLenient, resolveStringMapLenient } from '@agentworkforce/harness-kit';
|
|
9
|
+
import { buildInteractiveSpec, detectHarnesses, formatDropWarnings, MissingPersonaInputError, renderPersonaInputs, resolvePersonaInputs, resolveMcpServersLenient, resolveStringMapLenient, useRunnableSelection } from '@agentworkforce/harness-kit';
|
|
10
10
|
import { createMount, readAgentDotfiles } from '@relayfile/local-mount';
|
|
11
11
|
import ora from 'ora';
|
|
12
12
|
import { startLaunchMetadataRecording } from './launch-metadata.js';
|
|
@@ -332,28 +332,45 @@ function subprocessExitCode(res) {
|
|
|
332
332
|
* the buffered output is dumped after spinner.fail so the user sees what
|
|
333
333
|
* actually broke. stdin is ignored — the install commands don't prompt.
|
|
334
334
|
*
|
|
335
|
+
* Uses async `spawn` (not `spawnSync`) because ora's frame redraw runs on a
|
|
336
|
+
* setInterval — `spawnSync` blocks the event loop for the duration of the
|
|
337
|
+
* install, freezing the spinner on its first frame.
|
|
338
|
+
*
|
|
335
339
|
* The spinner text stays "Installing skills…" while running; the longer
|
|
336
340
|
* `label` (which includes target paths and skill ids) is shown on
|
|
337
341
|
* success/failure so the verbose detail is still discoverable in logs.
|
|
338
342
|
*/
|
|
339
|
-
function runInstallWithSpinner(command, label, cwd) {
|
|
343
|
+
async function runInstallWithSpinner(command, label, cwd) {
|
|
340
344
|
const [bin, ...args] = command;
|
|
341
345
|
if (!bin)
|
|
342
346
|
return { code: 0, output: '' };
|
|
343
347
|
const spinner = ora({ text: 'Installing skills…', stream: process.stderr }).start();
|
|
344
|
-
|
|
345
|
-
|
|
346
|
-
|
|
347
|
-
|
|
348
|
-
|
|
349
|
-
|
|
350
|
-
|
|
351
|
-
|
|
352
|
-
|
|
353
|
-
|
|
348
|
+
// Async spawn (not spawnSync) so ora's frame timer can fire during the
|
|
349
|
+
// install — spawnSync blocks the event loop and freezes the spinner on
|
|
350
|
+
// its first frame.
|
|
351
|
+
const { code, output } = await new Promise((resolve) => {
|
|
352
|
+
const child = spawn(bin, args, {
|
|
353
|
+
stdio: ['ignore', 'pipe', 'pipe'],
|
|
354
|
+
shell: false,
|
|
355
|
+
...(cwd ? { cwd } : {})
|
|
356
|
+
});
|
|
357
|
+
let buffered = '';
|
|
358
|
+
child.stdout?.setEncoding('utf8');
|
|
359
|
+
child.stderr?.setEncoding('utf8');
|
|
360
|
+
child.stdout?.on('data', (chunk) => {
|
|
361
|
+
buffered += chunk;
|
|
362
|
+
});
|
|
363
|
+
child.stderr?.on('data', (chunk) => {
|
|
364
|
+
buffered += chunk;
|
|
365
|
+
});
|
|
366
|
+
child.on('error', (err) => {
|
|
367
|
+
resolve({ code: 1, output: `${buffered}${err.message}\n` });
|
|
368
|
+
});
|
|
369
|
+
child.on('close', (status, signal) => {
|
|
370
|
+
const exit = typeof status === 'number' ? status : signal ? signalExitCode(signal) : 1;
|
|
371
|
+
resolve({ code: exit, output: buffered });
|
|
372
|
+
});
|
|
354
373
|
});
|
|
355
|
-
const output = `${res.stdout ?? ''}${res.stderr ?? ''}`;
|
|
356
|
-
const code = subprocessExitCode(res);
|
|
357
374
|
if (code === 0) {
|
|
358
375
|
spinner.succeed(label);
|
|
359
376
|
}
|
|
@@ -364,13 +381,13 @@ function runInstallWithSpinner(command, label, cwd) {
|
|
|
364
381
|
}
|
|
365
382
|
return { code, output };
|
|
366
383
|
}
|
|
367
|
-
function runInstall(command, label, cwd) {
|
|
384
|
+
async function runInstall(command, label, cwd) {
|
|
368
385
|
const [bin] = command;
|
|
369
386
|
if (!bin)
|
|
370
387
|
return;
|
|
371
388
|
// runInstallWithSpinner already prints the failure line via spinner.fail;
|
|
372
389
|
// the previous extra "${label} failed … Aborting." write would duplicate it.
|
|
373
|
-
const { code } = runInstallWithSpinner(command, label, cwd);
|
|
390
|
+
const { code } = await runInstallWithSpinner(command, label, cwd);
|
|
374
391
|
if (code !== 0)
|
|
375
392
|
process.exit(code);
|
|
376
393
|
}
|
|
@@ -392,11 +409,11 @@ class InstallCommandError extends Error {
|
|
|
392
409
|
* Used inside the mount branch's onBeforeLaunch step so mount teardown runs
|
|
393
410
|
* before the error surfaces.
|
|
394
411
|
*/
|
|
395
|
-
function runInstallOrThrow(command, label, cwd) {
|
|
412
|
+
async function runInstallOrThrow(command, label, cwd) {
|
|
396
413
|
const [bin] = command;
|
|
397
414
|
if (!bin)
|
|
398
415
|
return;
|
|
399
|
-
const { code } = runInstallWithSpinner(command, label, cwd);
|
|
416
|
+
const { code } = await runInstallWithSpinner(command, label, cwd);
|
|
400
417
|
if (code !== 0) {
|
|
401
418
|
throw new InstallCommandError(label, code);
|
|
402
419
|
}
|
|
@@ -949,7 +966,7 @@ async function runInteractive(selection, options) {
|
|
|
949
966
|
// `onBeforeLaunch` below instead of pre-running here.
|
|
950
967
|
const deferInstallToMount = useClean && runtime.harness !== 'claude' && install.commandString !== ':';
|
|
951
968
|
if (install.commandString !== ':' && !deferInstallToMount) {
|
|
952
|
-
runInstall(install.command, installLabel);
|
|
969
|
+
await runInstall(install.command, installLabel);
|
|
953
970
|
}
|
|
954
971
|
const spec = buildInteractiveSpec({
|
|
955
972
|
harness: runtime.harness,
|
|
@@ -1039,32 +1056,61 @@ async function runInteractive(selection, options) {
|
|
|
1039
1056
|
mount: effectiveSelection.mount,
|
|
1040
1057
|
configFilePaths: spec.configFiles.map((file) => file.path)
|
|
1041
1058
|
});
|
|
1042
|
-
|
|
1059
|
+
// Setup spinner covers createMount + git-config + (optional) in-mount
|
|
1060
|
+
// install + config-file writes + autosync start, so the multi-second
|
|
1061
|
+
// pause before the harness child appears is visibly live. createMount
|
|
1062
|
+
// is async in @relayfile/local-mount ≥0.7.0, which yields between
|
|
1063
|
+
// directory entries — so this spinner actually animates instead of
|
|
1064
|
+
// freezing on its first frame.
|
|
1065
|
+
let setupSpinner = ora({
|
|
1066
|
+
text: `Setting up sandbox mount → ${mountDir}…`,
|
|
1067
|
+
stream: process.stderr
|
|
1068
|
+
}).start();
|
|
1043
1069
|
// Inline mount lifecycle (formerly delegated to launchOnMount) so we can
|
|
1044
1070
|
// surface a spinner the moment the child exits — not just when the user
|
|
1045
1071
|
// presses Ctrl-C. The sync-back walks both trees and can take several
|
|
1046
1072
|
// seconds on a large repo; without an indicator, exiting the persona via
|
|
1047
1073
|
// /exit looked like a hang.
|
|
1048
1074
|
//
|
|
1049
|
-
// SIGINT semantics:
|
|
1050
|
-
// •
|
|
1051
|
-
//
|
|
1052
|
-
//
|
|
1053
|
-
//
|
|
1054
|
-
//
|
|
1055
|
-
//
|
|
1056
|
-
//
|
|
1057
|
-
//
|
|
1058
|
-
//
|
|
1059
|
-
//
|
|
1060
|
-
//
|
|
1075
|
+
// SIGINT semantics — three phases:
|
|
1076
|
+
// • Pre-launch (setup): tear down the setup spinner, rm the session
|
|
1077
|
+
// dir, and exit(130). We must handle this ourselves because
|
|
1078
|
+
// registering any 'SIGINT' listener suppresses Node's default
|
|
1079
|
+
// exit-on-SIGINT, and createMount is now async (relayfile 0.7+) so
|
|
1080
|
+
// the handler actually fires during mount setup.
|
|
1081
|
+
// • Child running: Ctrl-C reaches the harness directly via the
|
|
1082
|
+
// controlling TTY's foreground process group (the child is spawned
|
|
1083
|
+
// with `stdio: 'inherit'` and inherits the parent's pgid). We
|
|
1084
|
+
// no-op purely to suppress Node's default exit — forwarding via
|
|
1085
|
+
// child.kill('SIGINT') would deliver a *second* SIGINT and break
|
|
1086
|
+
// harnesses that escalate on repeated interrupts (e.g. claude
|
|
1087
|
+
// treats 1st = cancel, 2nd = quit).
|
|
1088
|
+
// • Syncing (post-child): 1st press aborts the shutdownSignal
|
|
1089
|
+
// (relayfile then skips autosync's draining reconcile and returns
|
|
1090
|
+
// the partial count from the final syncBack). 2nd press hard-exits
|
|
1091
|
+
// and rms the session dir so no mount is left behind.
|
|
1061
1092
|
const shutdownController = new AbortController();
|
|
1062
1093
|
let syncSpinner;
|
|
1063
1094
|
let isSyncing = false;
|
|
1095
|
+
let childSpawned = false;
|
|
1064
1096
|
let abortPresses = 0;
|
|
1065
1097
|
const sigintHandler = () => {
|
|
1066
|
-
if (!isSyncing)
|
|
1067
|
-
|
|
1098
|
+
if (!isSyncing) {
|
|
1099
|
+
if (childSpawned)
|
|
1100
|
+
return;
|
|
1101
|
+
// Pre-launch teardown.
|
|
1102
|
+
if (setupSpinner) {
|
|
1103
|
+
setupSpinner.fail('Sandbox mount setup interrupted (Ctrl-C)');
|
|
1104
|
+
setupSpinner = undefined;
|
|
1105
|
+
}
|
|
1106
|
+
try {
|
|
1107
|
+
rmSync(sessionRoot, { recursive: true, force: true });
|
|
1108
|
+
}
|
|
1109
|
+
catch {
|
|
1110
|
+
/* swallow — we're exiting anyway */
|
|
1111
|
+
}
|
|
1112
|
+
process.exit(130);
|
|
1113
|
+
}
|
|
1068
1114
|
abortPresses += 1;
|
|
1069
1115
|
if (abortPresses === 1) {
|
|
1070
1116
|
if (syncSpinner) {
|
|
@@ -1090,27 +1136,35 @@ async function runInteractive(selection, options) {
|
|
|
1090
1136
|
process.exit(130);
|
|
1091
1137
|
};
|
|
1092
1138
|
process.on('SIGINT', sigintHandler);
|
|
1093
|
-
|
|
1094
|
-
ignoredPatterns: [...ignoredPatterns],
|
|
1095
|
-
readonlyPatterns: [...readonlyPatterns],
|
|
1096
|
-
excludeDirs: [],
|
|
1097
|
-
agentName: personaId,
|
|
1098
|
-
// Pull `.git` into the mount so git commands work inside the sandbox.
|
|
1099
|
-
// relayfile treats this as one-way project→mount: host-side `.git`
|
|
1100
|
-
// changes flow in, mount-side commits/refs stay sandboxed and are
|
|
1101
|
-
// discarded on cleanup. The agent must `git push` to persist work.
|
|
1102
|
-
includeGit: true
|
|
1103
|
-
});
|
|
1139
|
+
let handle;
|
|
1104
1140
|
let autoSync;
|
|
1105
1141
|
let exitCode = 0;
|
|
1106
1142
|
try {
|
|
1143
|
+
// createMount inside the try so its initial-mirror failures fall into
|
|
1144
|
+
// the catch path and clean up the setup spinner.
|
|
1145
|
+
handle = await createMount(process.cwd(), mountDir, {
|
|
1146
|
+
ignoredPatterns: [...ignoredPatterns],
|
|
1147
|
+
readonlyPatterns: [...readonlyPatterns],
|
|
1148
|
+
excludeDirs: [],
|
|
1149
|
+
agentName: personaId,
|
|
1150
|
+
// Pull `.git` into the mount so git commands work inside the
|
|
1151
|
+
// sandbox. relayfile treats this as one-way project→mount: host-side
|
|
1152
|
+
// `.git` changes flow in, mount-side commits/refs stay sandboxed and
|
|
1153
|
+
// are discarded on cleanup. The agent must `git push` to persist
|
|
1154
|
+
// work.
|
|
1155
|
+
includeGit: true
|
|
1156
|
+
});
|
|
1107
1157
|
// Run before install / configFile writes so the freshly written files
|
|
1108
1158
|
// (e.g. `.opencode/`, `opencode.json`) aren't yet present when we run
|
|
1109
1159
|
// `git ls-files` to pick skip-worktree candidates — we don't need them
|
|
1110
1160
|
// flagged in the index, just hidden via the `.git/info/exclude` block.
|
|
1111
1161
|
configureGitForMount(handle.mountDir, ignoredPatterns);
|
|
1112
1162
|
if (deferInstallToMount) {
|
|
1113
|
-
|
|
1163
|
+
// Hand the line off to the install spinner so the two don't fight
|
|
1164
|
+
// for the same stream, then resume the setup spinner afterwards.
|
|
1165
|
+
setupSpinner?.stop();
|
|
1166
|
+
await runInstallOrThrow(install.command, installLabel, handle.mountDir);
|
|
1167
|
+
setupSpinner?.start();
|
|
1114
1168
|
}
|
|
1115
1169
|
for (const file of spec.configFiles) {
|
|
1116
1170
|
assertSafeRelativePath(file.path);
|
|
@@ -1123,11 +1177,30 @@ async function runInteractive(selection, options) {
|
|
|
1123
1177
|
writeFileSync(join(handle.mountDir, resolvedSidecar.mountFile), body, 'utf8');
|
|
1124
1178
|
}
|
|
1125
1179
|
launchMetadata = await startLaunchMetadataForLaunch(handle.mountDir);
|
|
1180
|
+
if (options.capture) {
|
|
1181
|
+
options.capture.stampEnrichment = { ...launchMetadata.metadata };
|
|
1182
|
+
options.capture.stampingEnabled = launchMetadata.enabled;
|
|
1183
|
+
}
|
|
1126
1184
|
autoSync = handle.startAutoSync();
|
|
1185
|
+
// Stop the setup spinner before spawning the child — the child
|
|
1186
|
+
// inherits stdio and would otherwise interleave its output with
|
|
1187
|
+
// spinner frames.
|
|
1188
|
+
setupSpinner?.succeed(`Sandbox mount ready → ${mountDir}`);
|
|
1189
|
+
setupSpinner = undefined;
|
|
1127
1190
|
const childEnv = resolvedEnv ? { ...process.env, ...resolvedEnv } : process.env;
|
|
1191
|
+
const childCwd = handle.mountDir;
|
|
1192
|
+
if (options.capture) {
|
|
1193
|
+
options.capture.sessionCwd = childCwd;
|
|
1194
|
+
options.capture.harness = runtime.harness;
|
|
1195
|
+
options.capture.startedAt = Date.now();
|
|
1196
|
+
}
|
|
1197
|
+
// Flip the SIGINT phase flag before spawn so a Ctrl-C arriving during
|
|
1198
|
+
// the child's lifetime is treated as "child has the TTY" (no-op),
|
|
1199
|
+
// not as pre-launch teardown.
|
|
1200
|
+
childSpawned = true;
|
|
1128
1201
|
exitCode = await new Promise((resolve, reject) => {
|
|
1129
1202
|
const child = spawn(spec.bin, finalArgs, {
|
|
1130
|
-
cwd:
|
|
1203
|
+
cwd: childCwd,
|
|
1131
1204
|
stdio: 'inherit',
|
|
1132
1205
|
env: childEnv
|
|
1133
1206
|
});
|
|
@@ -1169,6 +1242,10 @@ async function runInteractive(selection, options) {
|
|
|
1169
1242
|
return exitCode;
|
|
1170
1243
|
}
|
|
1171
1244
|
catch (err) {
|
|
1245
|
+
if (setupSpinner) {
|
|
1246
|
+
setupSpinner.fail('Sandbox mount setup failed');
|
|
1247
|
+
setupSpinner = undefined;
|
|
1248
|
+
}
|
|
1172
1249
|
if (syncSpinner) {
|
|
1173
1250
|
syncSpinner.fail('Sync did not complete');
|
|
1174
1251
|
syncSpinner = undefined;
|
|
@@ -1190,6 +1267,10 @@ async function runInteractive(selection, options) {
|
|
|
1190
1267
|
return 1;
|
|
1191
1268
|
}
|
|
1192
1269
|
finally {
|
|
1270
|
+
if (setupSpinner) {
|
|
1271
|
+
setupSpinner.stop();
|
|
1272
|
+
setupSpinner = undefined;
|
|
1273
|
+
}
|
|
1193
1274
|
if (syncSpinner) {
|
|
1194
1275
|
syncSpinner.stop();
|
|
1195
1276
|
syncSpinner = undefined;
|
|
@@ -1204,7 +1285,7 @@ async function runInteractive(selection, options) {
|
|
|
1204
1285
|
/* ignore — we're tearing down anyway */
|
|
1205
1286
|
}
|
|
1206
1287
|
}
|
|
1207
|
-
handle
|
|
1288
|
+
handle?.cleanup();
|
|
1208
1289
|
await launchMetadata?.stop();
|
|
1209
1290
|
process.removeListener('SIGINT', sigintHandler);
|
|
1210
1291
|
// When the install ran inside the mount, its cleanup paths are
|
|
@@ -1220,6 +1301,13 @@ async function runInteractive(selection, options) {
|
|
|
1220
1301
|
}
|
|
1221
1302
|
}
|
|
1222
1303
|
const launchMetadata = await startLaunchMetadataForLaunch();
|
|
1304
|
+
if (options.capture) {
|
|
1305
|
+
options.capture.sessionCwd = process.cwd();
|
|
1306
|
+
options.capture.harness = runtime.harness;
|
|
1307
|
+
options.capture.startedAt = Date.now();
|
|
1308
|
+
options.capture.stampEnrichment = { ...launchMetadata.metadata };
|
|
1309
|
+
options.capture.stampingEnabled = launchMetadata.enabled;
|
|
1310
|
+
}
|
|
1223
1311
|
return new Promise((resolve) => {
|
|
1224
1312
|
let settled = false;
|
|
1225
1313
|
const finish = (code) => {
|
|
@@ -2042,14 +2130,825 @@ async function runAgentSelector(selector, flags, inputValues) {
|
|
|
2042
2130
|
const code = runDryRun(selection);
|
|
2043
2131
|
process.exit(code);
|
|
2044
2132
|
}
|
|
2133
|
+
const capture = {};
|
|
2045
2134
|
const code = await runInteractive(selection, {
|
|
2046
2135
|
installInRepo: flags.installInRepo,
|
|
2047
2136
|
noLaunchMetadata: flags.noLaunchMetadata,
|
|
2048
2137
|
personaSpec: target.spec,
|
|
2049
|
-
personaSource: target.source
|
|
2138
|
+
personaSource: target.source,
|
|
2139
|
+
capture
|
|
2140
|
+
});
|
|
2141
|
+
// Post-session learnings prompt: only for local personas (built-in
|
|
2142
|
+
// catalog and pack personas are read-only here), and only when stdin
|
|
2143
|
+
// is a TTY so we can read y/N. Improver failures never affect the
|
|
2144
|
+
// user-facing exit code — the original session's exit is what matters.
|
|
2145
|
+
await maybeOfferLearningsImprover({
|
|
2146
|
+
target,
|
|
2147
|
+
capture,
|
|
2148
|
+
flags
|
|
2050
2149
|
});
|
|
2051
2150
|
process.exit(code);
|
|
2052
2151
|
}
|
|
2152
|
+
/**
|
|
2153
|
+
* Decide whether to offer post-session auto-improvement, run the improver,
|
|
2154
|
+
* walk the proposals interactively, and apply accepted patches. Silently
|
|
2155
|
+
* skips the prompt when the persona is built-in or stdin is not a TTY.
|
|
2156
|
+
*
|
|
2157
|
+
* Failures (improver crash, malformed proposals JSON, unwriteable persona
|
|
2158
|
+
* file) are surfaced as warnings on stderr; they never throw or change
|
|
2159
|
+
* the original session's exit code. The user already saw their session
|
|
2160
|
+
* complete — a flaky meta-step shouldn't mask that.
|
|
2161
|
+
*/
|
|
2162
|
+
async function maybeOfferLearningsImprover(ctx) {
|
|
2163
|
+
if (ctx.target.kind !== 'local')
|
|
2164
|
+
return;
|
|
2165
|
+
if (ctx.target.source === 'library')
|
|
2166
|
+
return;
|
|
2167
|
+
const personaFilePath = local.paths.get(ctx.target.spec.id);
|
|
2168
|
+
if (!personaFilePath) {
|
|
2169
|
+
// No on-disk path means we can't apply patches even if the user agrees.
|
|
2170
|
+
// Skip silently — local-personas would have warned at load time.
|
|
2171
|
+
return;
|
|
2172
|
+
}
|
|
2173
|
+
if (!process.stdin.isTTY || !process.stderr.isTTY)
|
|
2174
|
+
return;
|
|
2175
|
+
const personaId = ctx.target.spec.id;
|
|
2176
|
+
const wantsImprover = promptYesNoSync(`\nAuto-improve "${personaId}" from this session? [y/N] `);
|
|
2177
|
+
if (!wantsImprover)
|
|
2178
|
+
return;
|
|
2179
|
+
let transcriptPath = '';
|
|
2180
|
+
try {
|
|
2181
|
+
if (ctx.capture.stampingEnabled && ctx.capture.stampEnrichment) {
|
|
2182
|
+
transcriptPath =
|
|
2183
|
+
(await findSessionTranscriptViaStamps({
|
|
2184
|
+
harness: ctx.capture.harness,
|
|
2185
|
+
sessionCwd: ctx.capture.sessionCwd,
|
|
2186
|
+
enrichment: ctx.capture.stampEnrichment,
|
|
2187
|
+
startedAt: ctx.capture.startedAt
|
|
2188
|
+
})) ?? '';
|
|
2189
|
+
}
|
|
2190
|
+
if (!transcriptPath) {
|
|
2191
|
+
transcriptPath =
|
|
2192
|
+
findSessionTranscriptPath({
|
|
2193
|
+
harness: ctx.capture.harness,
|
|
2194
|
+
sessionCwd: ctx.capture.sessionCwd,
|
|
2195
|
+
startedAt: ctx.capture.startedAt
|
|
2196
|
+
}) ?? '';
|
|
2197
|
+
}
|
|
2198
|
+
}
|
|
2199
|
+
catch (err) {
|
|
2200
|
+
process.stderr.write(`warning: could not locate session transcript: ${err.message}\n`);
|
|
2201
|
+
}
|
|
2202
|
+
if (!transcriptPath) {
|
|
2203
|
+
process.stderr.write(`note: session transcript not found for harness "${ctx.capture.harness ?? '?'}" — proceeding from persona file alone.\n`);
|
|
2204
|
+
}
|
|
2205
|
+
let proposals;
|
|
2206
|
+
const proposalsTempPath = join(tmpdir(), `agentworkforce-proposals-${randomBytes(6).toString('hex')}.json`);
|
|
2207
|
+
const spinner = ora({
|
|
2208
|
+
text: 'Extracting learnings via persona-improver…',
|
|
2209
|
+
stream: process.stderr
|
|
2210
|
+
}).start();
|
|
2211
|
+
try {
|
|
2212
|
+
proposals = await runPersonaImprover({
|
|
2213
|
+
personaFilePath,
|
|
2214
|
+
transcriptPath,
|
|
2215
|
+
proposalsOutputPath: proposalsTempPath
|
|
2216
|
+
});
|
|
2217
|
+
spinner.succeed(proposals.proposals.length === 0
|
|
2218
|
+
? 'persona-improver: no improvements to propose.'
|
|
2219
|
+
: `persona-improver: found ${proposals.proposals.length} proposed improvement${proposals.proposals.length === 1 ? '' : 's'}.`);
|
|
2220
|
+
}
|
|
2221
|
+
catch (err) {
|
|
2222
|
+
spinner.fail(`persona-improver failed: ${err.message}`);
|
|
2223
|
+
return;
|
|
2224
|
+
}
|
|
2225
|
+
finally {
|
|
2226
|
+
try {
|
|
2227
|
+
rmSync(proposalsTempPath, { force: true });
|
|
2228
|
+
}
|
|
2229
|
+
catch {
|
|
2230
|
+
/* swallow — temp file in $TMPDIR is harmless */
|
|
2231
|
+
}
|
|
2232
|
+
}
|
|
2233
|
+
if (!proposals || proposals.proposals.length === 0)
|
|
2234
|
+
return;
|
|
2235
|
+
const accepted = walkProposalsInteractive(proposals);
|
|
2236
|
+
if (accepted.length === 0) {
|
|
2237
|
+
process.stderr.write('No improvements applied.\n');
|
|
2238
|
+
return;
|
|
2239
|
+
}
|
|
2240
|
+
try {
|
|
2241
|
+
applyAcceptedPatches(personaFilePath, accepted);
|
|
2242
|
+
process.stderr.write(`✓ Applied ${accepted.length} improvement${accepted.length === 1 ? '' : 's'} to ${personaFilePath}\n`);
|
|
2243
|
+
}
|
|
2244
|
+
catch (err) {
|
|
2245
|
+
process.stderr.write(`warning: failed to write updated persona to ${personaFilePath}: ${err.message}\n`);
|
|
2246
|
+
}
|
|
2247
|
+
}
|
|
2248
|
+
/**
|
|
2249
|
+
* Allowlist of dot-paths the improver may rewrite via `op: "set"`. Mirrors
|
|
2250
|
+
* the patch grammar advertised in the persona's AGENTS.md — anything else
|
|
2251
|
+
* is a defense-in-depth reject (the persona's anti-goals already say "no
|
|
2252
|
+
* changes to id/intent/harness/model/permissions", but we don't trust the
|
|
2253
|
+
* model alone for a flow that mutates the user's persona file in place).
|
|
2254
|
+
*/
|
|
2255
|
+
const ALLOWED_SET_PATHS = [
|
|
2256
|
+
'description',
|
|
2257
|
+
'agentsMdContent',
|
|
2258
|
+
'claudeMdContent',
|
|
2259
|
+
'tags',
|
|
2260
|
+
'tiers.best.systemPrompt',
|
|
2261
|
+
'tiers.best-value.systemPrompt',
|
|
2262
|
+
'tiers.minimum.systemPrompt'
|
|
2263
|
+
];
|
|
2264
|
+
/**
|
|
2265
|
+
* Allowlist of dot-paths the improver may rewrite via `op: "append"`.
|
|
2266
|
+
* Currently just `skills` — the only array the AGENTS.md grammar exposes
|
|
2267
|
+
* for append-style mutation.
|
|
2268
|
+
*/
|
|
2269
|
+
const ALLOWED_APPEND_PATHS = ['skills'];
|
|
2270
|
+
/**
|
|
2271
|
+
* Reserved JSON-object keys that must never appear as a path segment —
|
|
2272
|
+
* setting them would either pollute the prototype chain (`__proto__`,
|
|
2273
|
+
* `constructor`, `prototype`) for the running process or rewrite a
|
|
2274
|
+
* built-in property that downstream code relies on. Belt-and-braces
|
|
2275
|
+
* alongside the path allowlist; even an `inputs.<NAME>` segment can't
|
|
2276
|
+
* smuggle one of these in.
|
|
2277
|
+
*/
|
|
2278
|
+
const FORBIDDEN_PATH_SEGMENTS = new Set(['__proto__', 'constructor', 'prototype']);
|
|
2279
|
+
function assertSafePathSegments(path, context) {
|
|
2280
|
+
const segments = path.split('.').filter((s) => s.length > 0);
|
|
2281
|
+
if (segments.length === 0) {
|
|
2282
|
+
throw new Error(`${context}: path is empty`);
|
|
2283
|
+
}
|
|
2284
|
+
for (const seg of segments) {
|
|
2285
|
+
if (FORBIDDEN_PATH_SEGMENTS.has(seg)) {
|
|
2286
|
+
throw new Error(`${context}: path "${path}" contains forbidden segment "${seg}"`);
|
|
2287
|
+
}
|
|
2288
|
+
}
|
|
2289
|
+
return segments;
|
|
2290
|
+
}
|
|
2291
|
+
/**
|
|
2292
|
+
* Validate one improver patch against the path/op allowlist + the
|
|
2293
|
+
* prototype-segment guard. Throws a descriptive error rejected at parse
|
|
2294
|
+
* time so the CLI never offers a disallowed proposal to the user.
|
|
2295
|
+
*
|
|
2296
|
+
* Allowed set paths: see ALLOWED_SET_PATHS, plus any `inputs.<NAME>`
|
|
2297
|
+
* (NAME must be env-style, matching the persona-input naming rule).
|
|
2298
|
+
* Allowed append paths: see ALLOWED_APPEND_PATHS.
|
|
2299
|
+
*/
|
|
2300
|
+
function assertAllowedImproverPatch(patch, context) {
|
|
2301
|
+
assertSafePathSegments(patch.path, context);
|
|
2302
|
+
if (patch.op === 'set') {
|
|
2303
|
+
if (ALLOWED_SET_PATHS.includes(patch.path))
|
|
2304
|
+
return;
|
|
2305
|
+
if (patch.path.startsWith('inputs.')) {
|
|
2306
|
+
const after = patch.path.slice('inputs.'.length);
|
|
2307
|
+
if (!/^[A-Z_][A-Z0-9_]*$/.test(after)) {
|
|
2308
|
+
throw new Error(`${context}: inputs path "${patch.path}" must use an env-style NAME (got "${after}")`);
|
|
2309
|
+
}
|
|
2310
|
+
return;
|
|
2311
|
+
}
|
|
2312
|
+
throw new Error(`${context}: set path "${patch.path}" is not in the allowlist`);
|
|
2313
|
+
}
|
|
2314
|
+
if (patch.op === 'append') {
|
|
2315
|
+
if (!ALLOWED_APPEND_PATHS.includes(patch.path)) {
|
|
2316
|
+
throw new Error(`${context}: append path "${patch.path}" is not in the allowlist`);
|
|
2317
|
+
}
|
|
2318
|
+
return;
|
|
2319
|
+
}
|
|
2320
|
+
throw new Error(`${context}: unknown patch op "${patch.op}"`);
|
|
2321
|
+
}
|
|
2322
|
+
/**
|
|
2323
|
+
* Locate the just-ended session's transcript via the burn-stamp ledger.
|
|
2324
|
+
* Authoritative when stamping is wired: `launch-metadata.ts` writes a
|
|
2325
|
+
* pending stamp (with our `personaVersion` enrichment hash) before spawn
|
|
2326
|
+
* and runs `ingest` on a 1s tick + once at stop, so by the time we get
|
|
2327
|
+
* here the ledger already has a row whose `selector.sessionId` is the
|
|
2328
|
+
* harness's own session id. We filter by `persona` + `personaVersion`
|
|
2329
|
+
* (unique per persona spec hash) and `ts` near `startedAt` to avoid
|
|
2330
|
+
* picking up a sibling launch of the same persona, then resolve the
|
|
2331
|
+
* sessionId to a transcript file path per harness.
|
|
2332
|
+
*
|
|
2333
|
+
* Returns undefined when:
|
|
2334
|
+
* - the SDK call fails
|
|
2335
|
+
* - no row matches (ingest hasn't reconciled yet, or stamping is off)
|
|
2336
|
+
* - the resolved sessionId can't be located on disk
|
|
2337
|
+
* Caller falls back to `findSessionTranscriptPath` (cwd-content match).
|
|
2338
|
+
*/
|
|
2339
|
+
async function findSessionTranscriptViaStamps(input) {
|
|
2340
|
+
if (!input.harness || !input.sessionCwd)
|
|
2341
|
+
return undefined;
|
|
2342
|
+
const persona = input.enrichment.persona;
|
|
2343
|
+
const personaVersion = input.enrichment.personaVersion;
|
|
2344
|
+
if (!persona || !personaVersion)
|
|
2345
|
+
return undefined;
|
|
2346
|
+
let sdk;
|
|
2347
|
+
try {
|
|
2348
|
+
sdk = await import('@relayburn/sdk');
|
|
2349
|
+
}
|
|
2350
|
+
catch {
|
|
2351
|
+
return undefined;
|
|
2352
|
+
}
|
|
2353
|
+
if (typeof sdk.exportStamps !== 'function')
|
|
2354
|
+
return undefined;
|
|
2355
|
+
let rows;
|
|
2356
|
+
try {
|
|
2357
|
+
rows = await sdk.exportStamps();
|
|
2358
|
+
}
|
|
2359
|
+
catch {
|
|
2360
|
+
return undefined;
|
|
2361
|
+
}
|
|
2362
|
+
const startedAt = input.startedAt ?? 0;
|
|
2363
|
+
const spawnerPid = input.enrichment.spawnerPid;
|
|
2364
|
+
// Tight window around our session: stamps written before our spawn
|
|
2365
|
+
// (minus tolerance for clock skew) or after the prompt fires (plus
|
|
2366
|
+
// tolerance for ingest latency) can't be ours. The upper bound matters
|
|
2367
|
+
// when a sibling launch of the same persona starts AFTER ours but
|
|
2368
|
+
// before we get here — without it, max-ts wins picks the wrong row.
|
|
2369
|
+
const LOWER_TOLERANCE_MS = 5000;
|
|
2370
|
+
const UPPER_TOLERANCE_MS = 1000;
|
|
2371
|
+
const lowerMs = startedAt - LOWER_TOLERANCE_MS;
|
|
2372
|
+
const upperMs = Date.now() + UPPER_TOLERANCE_MS;
|
|
2373
|
+
let bestSessionId;
|
|
2374
|
+
// Prefer the stamp closest to our spawn time (smallest |ts - startedAt|),
|
|
2375
|
+
// not the most recent. Same-persona concurrent launches can both fall
|
|
2376
|
+
// inside the window; the one launched at our PID/time is the right one.
|
|
2377
|
+
let bestDelta = Number.POSITIVE_INFINITY;
|
|
2378
|
+
let pidMatched = false;
|
|
2379
|
+
for (const row of rows) {
|
|
2380
|
+
if (!row || typeof row !== 'object')
|
|
2381
|
+
continue;
|
|
2382
|
+
const r = row;
|
|
2383
|
+
const sessionId = r.selector?.sessionId;
|
|
2384
|
+
const enrichment = r.enrichment;
|
|
2385
|
+
const ts = r.ts;
|
|
2386
|
+
if (typeof sessionId !== 'string' || !enrichment || typeof ts !== 'string')
|
|
2387
|
+
continue;
|
|
2388
|
+
if (enrichment.persona !== persona)
|
|
2389
|
+
continue;
|
|
2390
|
+
if (enrichment.personaVersion !== personaVersion)
|
|
2391
|
+
continue;
|
|
2392
|
+
const tsMs = Date.parse(ts);
|
|
2393
|
+
if (!Number.isFinite(tsMs))
|
|
2394
|
+
continue;
|
|
2395
|
+
if (tsMs < lowerMs || tsMs > upperMs)
|
|
2396
|
+
continue;
|
|
2397
|
+
// spawnerPid is the strongest discriminator — folded into enrichment
|
|
2398
|
+
// by `buildLaunchMetadata` so it survives stamp ingest. When present
|
|
2399
|
+
// on both sides, treat a mismatch as a hard reject and a match as
|
|
2400
|
+
// sticky: once we've seen a pid-matched row, ignore unmatched ones
|
|
2401
|
+
// even if they're closer in time.
|
|
2402
|
+
const rowPid = enrichment.spawnerPid;
|
|
2403
|
+
if (spawnerPid && typeof rowPid === 'string') {
|
|
2404
|
+
if (rowPid !== spawnerPid)
|
|
2405
|
+
continue;
|
|
2406
|
+
if (!pidMatched) {
|
|
2407
|
+
pidMatched = true;
|
|
2408
|
+
bestDelta = Number.POSITIVE_INFINITY;
|
|
2409
|
+
bestSessionId = undefined;
|
|
2410
|
+
}
|
|
2411
|
+
}
|
|
2412
|
+
else if (pidMatched) {
|
|
2413
|
+
// We already locked onto pid-matched candidates — skip non-pid rows.
|
|
2414
|
+
continue;
|
|
2415
|
+
}
|
|
2416
|
+
const delta = Math.abs(tsMs - startedAt);
|
|
2417
|
+
if (delta >= bestDelta)
|
|
2418
|
+
continue;
|
|
2419
|
+
bestDelta = delta;
|
|
2420
|
+
bestSessionId = sessionId;
|
|
2421
|
+
}
|
|
2422
|
+
if (!bestSessionId)
|
|
2423
|
+
return undefined;
|
|
2424
|
+
return resolveTranscriptForSessionId(input.harness, input.sessionCwd, bestSessionId);
|
|
2425
|
+
}
|
|
2426
|
+
/**
|
|
2427
|
+
* Map a harness session id to its on-disk transcript file. The directory
|
|
2428
|
+
* is harness-conventional, but the filename pattern varies:
|
|
2429
|
+
* • claude → `<sessionId>.jsonl` directly under the cwd-encoded subdir
|
|
2430
|
+
* • codex → `rollout-<ts>-<sessionId>.jsonl` under a date-grouped subdir
|
|
2431
|
+
* • opencode → file or filename containing the sessionId under `<projectHash>/`
|
|
2432
|
+
*
|
|
2433
|
+
* For codex/opencode we scan once and match by filename substring (cheap;
|
|
2434
|
+
* the substring is a UUID-ish so collisions don't happen in practice).
|
|
2435
|
+
*/
|
|
2436
|
+
function resolveTranscriptForSessionId(harness, sessionCwd, sessionId) {
|
|
2437
|
+
const home = homedir();
|
|
2438
|
+
if (harness === 'claude') {
|
|
2439
|
+
const encoded = sessionCwd.replace(/[\\/]+/g, '-');
|
|
2440
|
+
const candidate = join(home, '.claude', 'projects', encoded, `${sessionId}.jsonl`);
|
|
2441
|
+
return existsSync(candidate) ? candidate : undefined;
|
|
2442
|
+
}
|
|
2443
|
+
if (harness === 'codex') {
|
|
2444
|
+
return findFileByNameSubstring(join(home, '.codex', 'sessions'), sessionId, ['.jsonl']);
|
|
2445
|
+
}
|
|
2446
|
+
if (harness === 'opencode') {
|
|
2447
|
+
return findFileByNameSubstring(join(home, '.local', 'share', 'opencode', 'storage', 'session'), sessionId, ['.json']);
|
|
2448
|
+
}
|
|
2449
|
+
return undefined;
|
|
2450
|
+
}
|
|
2451
|
+
function findFileByNameSubstring(dir, needle, extensions) {
|
|
2452
|
+
const wantsExt = (name) => extensions.some((ext) => name.endsWith(ext));
|
|
2453
|
+
const visit = (cur, depth) => {
|
|
2454
|
+
let entries;
|
|
2455
|
+
try {
|
|
2456
|
+
entries = readdirSync(cur, { withFileTypes: true });
|
|
2457
|
+
}
|
|
2458
|
+
catch {
|
|
2459
|
+
return undefined;
|
|
2460
|
+
}
|
|
2461
|
+
for (const entry of entries) {
|
|
2462
|
+
const full = join(cur, entry.name);
|
|
2463
|
+
if (entry.isDirectory()) {
|
|
2464
|
+
if (depth < 3) {
|
|
2465
|
+
const found = visit(full, depth + 1);
|
|
2466
|
+
if (found)
|
|
2467
|
+
return found;
|
|
2468
|
+
}
|
|
2469
|
+
continue;
|
|
2470
|
+
}
|
|
2471
|
+
if (!entry.isFile())
|
|
2472
|
+
continue;
|
|
2473
|
+
if (!wantsExt(entry.name))
|
|
2474
|
+
continue;
|
|
2475
|
+
if (entry.name.includes(needle))
|
|
2476
|
+
return full;
|
|
2477
|
+
}
|
|
2478
|
+
return undefined;
|
|
2479
|
+
};
|
|
2480
|
+
return visit(dir, 0);
|
|
2481
|
+
}
|
|
2482
|
+
/**
|
|
2483
|
+
* Fallback locator when the burn-stamp ledger is unavailable or the
|
|
2484
|
+
* just-ended session hasn't reconciled yet. Walks the harness's
|
|
2485
|
+
* transcript dir and verifies each candidate's embedded cwd matches the
|
|
2486
|
+
* captured session cwd. Every harness embeds the session cwd:
|
|
2487
|
+
* • claude → `~/.claude/projects/<cwd-encoded>/<sessionId>.jsonl` —
|
|
2488
|
+
* each entry carries `"cwd"`. The dir-name encoding
|
|
2489
|
+
* replaces `/` with `-` and is itself a strong filter.
|
|
2490
|
+
* • codex → `~/.codex/sessions/YYYY/MM/DD/rollout-*.jsonl` — first
|
|
2491
|
+
* line is a `session_meta` event with `payload.cwd`.
|
|
2492
|
+
* • opencode → `~/.local/share/opencode/storage/session/<projectHash>/<sessionId>.json`
|
|
2493
|
+
* — top-level `directory` field on the session object.
|
|
2494
|
+
*
|
|
2495
|
+
* For each harness we walk the candidate dir, filter to files with
|
|
2496
|
+
* mtime ≥ sessionStart, and confirm the embedded cwd matches the captured
|
|
2497
|
+
* session cwd. Among matches we pick the most recently mtime'd. The cwd
|
|
2498
|
+
* confirmation makes this robust to concurrent harness sessions — the
|
|
2499
|
+
* caveat that previously applied to codex/opencode (most-recent-mtime
|
|
2500
|
+
* could pick a sibling) goes away when we read the file's own cwd.
|
|
2501
|
+
*
|
|
2502
|
+
* Returns undefined when nothing matches; callers handle gracefully (the
|
|
2503
|
+
* persona-improver accepts an empty transcript path).
|
|
2504
|
+
*/
|
|
2505
|
+
function findSessionTranscriptPath(input) {
|
|
2506
|
+
if (!input.harness || !input.sessionCwd)
|
|
2507
|
+
return undefined;
|
|
2508
|
+
const startedAt = input.startedAt ?? 0;
|
|
2509
|
+
const cwd = input.sessionCwd;
|
|
2510
|
+
const home = homedir();
|
|
2511
|
+
if (input.harness === 'claude') {
|
|
2512
|
+
const encoded = cwd.replace(/[\\/]+/g, '-');
|
|
2513
|
+
const projectDir = join(home, '.claude', 'projects', encoded);
|
|
2514
|
+
// Within the cwd-encoded dir, all candidates already share the cwd —
|
|
2515
|
+
// mtime is enough. We still verify the first-line `cwd` so a stale
|
|
2516
|
+
// dir-name match can't smuggle in a wrong file.
|
|
2517
|
+
return findFreshestMatchingTranscript({
|
|
2518
|
+
dir: projectDir,
|
|
2519
|
+
recursive: false,
|
|
2520
|
+
extensions: ['.jsonl'],
|
|
2521
|
+
sinceMs: startedAt,
|
|
2522
|
+
sessionCwd: cwd,
|
|
2523
|
+
readCwd: readCwdFromClaudeJsonl
|
|
2524
|
+
});
|
|
2525
|
+
}
|
|
2526
|
+
if (input.harness === 'codex') {
|
|
2527
|
+
return findFreshestMatchingTranscript({
|
|
2528
|
+
dir: join(home, '.codex', 'sessions'),
|
|
2529
|
+
recursive: true,
|
|
2530
|
+
extensions: ['.jsonl'],
|
|
2531
|
+
sinceMs: startedAt,
|
|
2532
|
+
sessionCwd: cwd,
|
|
2533
|
+
readCwd: readCwdFromCodexJsonl
|
|
2534
|
+
});
|
|
2535
|
+
}
|
|
2536
|
+
if (input.harness === 'opencode') {
|
|
2537
|
+
return findFreshestMatchingTranscript({
|
|
2538
|
+
dir: join(home, '.local', 'share', 'opencode', 'storage', 'session'),
|
|
2539
|
+
recursive: true,
|
|
2540
|
+
extensions: ['.json'],
|
|
2541
|
+
sinceMs: startedAt,
|
|
2542
|
+
sessionCwd: cwd,
|
|
2543
|
+
readCwd: readCwdFromOpencodeSession
|
|
2544
|
+
});
|
|
2545
|
+
}
|
|
2546
|
+
return undefined;
|
|
2547
|
+
}
|
|
2548
|
+
/**
|
|
2549
|
+
* Walk a candidate directory and pick the most recently modified file
|
|
2550
|
+
* whose embedded cwd matches `sessionCwd`. Capped at depth 3 in
|
|
2551
|
+
* recursive mode — codex/opencode group by date or project hash, never
|
|
2552
|
+
* deeper. mtime gate eliminates files written before the session and
|
|
2553
|
+
* keeps the scan cheap on large session stores.
|
|
2554
|
+
*/
|
|
2555
|
+
function findFreshestMatchingTranscript(opts) {
|
|
2556
|
+
const wantsExt = (name) => opts.extensions.some((ext) => name.endsWith(ext));
|
|
2557
|
+
let bestPath;
|
|
2558
|
+
let bestMtime = -1;
|
|
2559
|
+
const visit = (cur, depth) => {
|
|
2560
|
+
let entries;
|
|
2561
|
+
try {
|
|
2562
|
+
entries = readdirSync(cur, { withFileTypes: true });
|
|
2563
|
+
}
|
|
2564
|
+
catch {
|
|
2565
|
+
return;
|
|
2566
|
+
}
|
|
2567
|
+
for (const entry of entries) {
|
|
2568
|
+
const full = join(cur, entry.name);
|
|
2569
|
+
if (entry.isDirectory()) {
|
|
2570
|
+
if (opts.recursive && depth < 3)
|
|
2571
|
+
visit(full, depth + 1);
|
|
2572
|
+
continue;
|
|
2573
|
+
}
|
|
2574
|
+
if (!entry.isFile())
|
|
2575
|
+
continue;
|
|
2576
|
+
if (!wantsExt(entry.name))
|
|
2577
|
+
continue;
|
|
2578
|
+
let s;
|
|
2579
|
+
try {
|
|
2580
|
+
s = statSync(full);
|
|
2581
|
+
}
|
|
2582
|
+
catch {
|
|
2583
|
+
continue;
|
|
2584
|
+
}
|
|
2585
|
+
const mtime = s.mtimeMs;
|
|
2586
|
+
if (mtime < opts.sinceMs)
|
|
2587
|
+
continue;
|
|
2588
|
+
if (mtime <= bestMtime)
|
|
2589
|
+
continue;
|
|
2590
|
+
const cwd = opts.readCwd(full);
|
|
2591
|
+
if (cwd !== opts.sessionCwd)
|
|
2592
|
+
continue;
|
|
2593
|
+
bestMtime = mtime;
|
|
2594
|
+
bestPath = full;
|
|
2595
|
+
}
|
|
2596
|
+
};
|
|
2597
|
+
visit(opts.dir, 0);
|
|
2598
|
+
return bestPath;
|
|
2599
|
+
}
|
|
2600
|
+
/**
|
|
2601
|
+
* Read up to `maxBytes` from `path` and report whether the file was
|
|
2602
|
+
* larger. Callers that need to JSON.parse the whole file (opencode's
|
|
2603
|
+
* single-object session record) gate on `truncated === false`; callers
|
|
2604
|
+
* that scan line-by-line (claude/codex JSONL) ignore it.
|
|
2605
|
+
*/
|
|
2606
|
+
function readTranscriptHeader(path, maxBytes = 65536) {
|
|
2607
|
+
let fd;
|
|
2608
|
+
try {
|
|
2609
|
+
fd = openSync(path, 'r');
|
|
2610
|
+
const buf = Buffer.alloc(maxBytes);
|
|
2611
|
+
const n = readSync(fd, buf, 0, maxBytes, 0);
|
|
2612
|
+
return {
|
|
2613
|
+
text: buf.subarray(0, n).toString('utf8'),
|
|
2614
|
+
truncated: n >= maxBytes
|
|
2615
|
+
};
|
|
2616
|
+
}
|
|
2617
|
+
catch {
|
|
2618
|
+
return undefined;
|
|
2619
|
+
}
|
|
2620
|
+
finally {
|
|
2621
|
+
if (fd !== undefined) {
|
|
2622
|
+
try {
|
|
2623
|
+
closeSync(fd);
|
|
2624
|
+
}
|
|
2625
|
+
catch {
|
|
2626
|
+
/* swallow — fd already invalid */
|
|
2627
|
+
}
|
|
2628
|
+
}
|
|
2629
|
+
}
|
|
2630
|
+
}
|
|
2631
|
+
/** Claude JSONL: `cwd` appears on most entries; the first line that has it wins. */
|
|
2632
|
+
function readCwdFromClaudeJsonl(path) {
|
|
2633
|
+
const header = readTranscriptHeader(path);
|
|
2634
|
+
if (!header)
|
|
2635
|
+
return undefined;
|
|
2636
|
+
for (const line of header.text.split('\n')) {
|
|
2637
|
+
if (!line.includes('"cwd"'))
|
|
2638
|
+
continue;
|
|
2639
|
+
try {
|
|
2640
|
+
const obj = JSON.parse(line);
|
|
2641
|
+
if (typeof obj.cwd === 'string')
|
|
2642
|
+
return obj.cwd;
|
|
2643
|
+
}
|
|
2644
|
+
catch {
|
|
2645
|
+
// partial last line — skip
|
|
2646
|
+
}
|
|
2647
|
+
}
|
|
2648
|
+
return undefined;
|
|
2649
|
+
}
|
|
2650
|
+
/** Codex JSONL: line 1 is `session_meta` with `payload.cwd`. */
|
|
2651
|
+
function readCwdFromCodexJsonl(path) {
|
|
2652
|
+
const header = readTranscriptHeader(path);
|
|
2653
|
+
if (!header)
|
|
2654
|
+
return undefined;
|
|
2655
|
+
const firstNewline = header.text.indexOf('\n');
|
|
2656
|
+
const firstLine = firstNewline === -1 ? header.text : header.text.slice(0, firstNewline);
|
|
2657
|
+
try {
|
|
2658
|
+
const obj = JSON.parse(firstLine);
|
|
2659
|
+
const cwd = obj.payload?.cwd;
|
|
2660
|
+
return typeof cwd === 'string' ? cwd : undefined;
|
|
2661
|
+
}
|
|
2662
|
+
catch {
|
|
2663
|
+
return undefined;
|
|
2664
|
+
}
|
|
2665
|
+
}
|
|
2666
|
+
/**
|
|
2667
|
+
* Opencode session JSON: top-level `directory` field. Opencode writes
|
|
2668
|
+
* the whole session as a single JSON object, so a truncated read can't
|
|
2669
|
+
* be parsed — the closing brace is missing. Re-read the full file when
|
|
2670
|
+
* `truncated` flips, since real opencode session records are typically
|
|
2671
|
+
* a few hundred bytes (summary, directory, ids) but we don't want to
|
|
2672
|
+
* silently miss a larger one.
|
|
2673
|
+
*/
|
|
2674
|
+
function readCwdFromOpencodeSession(path) {
|
|
2675
|
+
const header = readTranscriptHeader(path);
|
|
2676
|
+
if (!header)
|
|
2677
|
+
return undefined;
|
|
2678
|
+
let body = header.text;
|
|
2679
|
+
if (header.truncated) {
|
|
2680
|
+
try {
|
|
2681
|
+
body = readFileSync(path, 'utf8');
|
|
2682
|
+
}
|
|
2683
|
+
catch {
|
|
2684
|
+
return undefined;
|
|
2685
|
+
}
|
|
2686
|
+
}
|
|
2687
|
+
try {
|
|
2688
|
+
const obj = JSON.parse(body);
|
|
2689
|
+
return typeof obj.directory === 'string' ? obj.directory : undefined;
|
|
2690
|
+
}
|
|
2691
|
+
catch {
|
|
2692
|
+
return undefined;
|
|
2693
|
+
}
|
|
2694
|
+
}
|
|
2695
|
+
/**
|
|
2696
|
+
* Run the persona-improver in headless one-shot mode against the given
|
|
2697
|
+
* persona + transcript. Returns the parsed proposals file on success.
|
|
2698
|
+
*
|
|
2699
|
+
* Throws on: missing improver in catalog, harness binary not on PATH,
|
|
2700
|
+
* non-zero harness exit, or unparseable proposals JSON. Caller is expected
|
|
2701
|
+
* to surface the message and skip the apply step.
|
|
2702
|
+
*/
|
|
2703
|
+
async function runPersonaImprover(args) {
|
|
2704
|
+
const improverSpec = personaCatalog['persona-improvement'];
|
|
2705
|
+
if (!improverSpec) {
|
|
2706
|
+
throw new Error('built-in persona "persona-improver" is not registered in the catalog');
|
|
2707
|
+
}
|
|
2708
|
+
const tier = 'best-value';
|
|
2709
|
+
const selection = buildSelection(improverSpec, tier, 'repo');
|
|
2710
|
+
const ctx = useRunnableSelection(selection);
|
|
2711
|
+
const taskLines = [
|
|
2712
|
+
'Improve this local persona from one finished session. The CLI will read your proposals JSON and walk the user through accept/deny.',
|
|
2713
|
+
`PERSONA_FILE_PATH=${args.personaFilePath}`,
|
|
2714
|
+
`SESSION_TRANSCRIPT_PATH=${args.transcriptPath}`,
|
|
2715
|
+
`PROPOSALS_OUTPUT_PATH=${args.proposalsOutputPath}`
|
|
2716
|
+
];
|
|
2717
|
+
const result = await ctx.sendMessage(taskLines.join('\n'), {
|
|
2718
|
+
inputs: {
|
|
2719
|
+
PERSONA_FILE_PATH: args.personaFilePath,
|
|
2720
|
+
SESSION_TRANSCRIPT_PATH: args.transcriptPath,
|
|
2721
|
+
PROPOSALS_OUTPUT_PATH: args.proposalsOutputPath
|
|
2722
|
+
},
|
|
2723
|
+
timeoutSeconds: selection.runtime.harnessSettings.timeoutSeconds
|
|
2724
|
+
});
|
|
2725
|
+
if (result.status !== 'completed' || (result.exitCode !== null && result.exitCode !== 0)) {
|
|
2726
|
+
throw new Error(`improver exited with status=${result.status}, code=${result.exitCode ?? 'null'}.${result.stderr ? ` stderr: ${result.stderr.slice(0, 400)}` : ''}`);
|
|
2727
|
+
}
|
|
2728
|
+
let raw;
|
|
2729
|
+
try {
|
|
2730
|
+
raw = readFileSync(args.proposalsOutputPath, 'utf8');
|
|
2731
|
+
}
|
|
2732
|
+
catch (err) {
|
|
2733
|
+
throw new Error(`improver did not write proposals file at ${args.proposalsOutputPath}: ${err.message}`);
|
|
2734
|
+
}
|
|
2735
|
+
return parseProposals(raw);
|
|
2736
|
+
}
|
|
2737
|
+
export function parseProposals(raw) {
|
|
2738
|
+
let parsed;
|
|
2739
|
+
try {
|
|
2740
|
+
parsed = JSON.parse(raw);
|
|
2741
|
+
}
|
|
2742
|
+
catch (err) {
|
|
2743
|
+
throw new Error(`proposals file is not valid JSON: ${err.message}`);
|
|
2744
|
+
}
|
|
2745
|
+
if (!parsed || typeof parsed !== 'object') {
|
|
2746
|
+
throw new Error('proposals file must be a JSON object');
|
|
2747
|
+
}
|
|
2748
|
+
const obj = parsed;
|
|
2749
|
+
const proposalsArr = Array.isArray(obj.proposals) ? obj.proposals : [];
|
|
2750
|
+
const proposals = [];
|
|
2751
|
+
for (const [idx, item] of proposalsArr.entries()) {
|
|
2752
|
+
if (!item || typeof item !== 'object') {
|
|
2753
|
+
throw new Error(`proposals[${idx}] must be an object`);
|
|
2754
|
+
}
|
|
2755
|
+
const p = item;
|
|
2756
|
+
if (typeof p.id !== 'string' || !p.id.trim()) {
|
|
2757
|
+
throw new Error(`proposals[${idx}].id must be a non-empty string`);
|
|
2758
|
+
}
|
|
2759
|
+
if (typeof p.summary !== 'string' || !p.summary.trim()) {
|
|
2760
|
+
throw new Error(`proposals[${idx}].summary must be a non-empty string`);
|
|
2761
|
+
}
|
|
2762
|
+
if (typeof p.rationale !== 'string') {
|
|
2763
|
+
throw new Error(`proposals[${idx}].rationale must be a string`);
|
|
2764
|
+
}
|
|
2765
|
+
if (!Array.isArray(p.patches) || p.patches.length === 0) {
|
|
2766
|
+
throw new Error(`proposals[${idx}].patches must be a non-empty array`);
|
|
2767
|
+
}
|
|
2768
|
+
const patches = [];
|
|
2769
|
+
for (const [pidx, rawPatch] of p.patches.entries()) {
|
|
2770
|
+
if (!rawPatch || typeof rawPatch !== 'object') {
|
|
2771
|
+
throw new Error(`proposals[${idx}].patches[${pidx}] must be an object`);
|
|
2772
|
+
}
|
|
2773
|
+
const rp = rawPatch;
|
|
2774
|
+
if (typeof rp.path !== 'string' || !rp.path.trim()) {
|
|
2775
|
+
throw new Error(`proposals[${idx}].patches[${pidx}].path must be a non-empty string`);
|
|
2776
|
+
}
|
|
2777
|
+
if (rp.op !== 'set' && rp.op !== 'append') {
|
|
2778
|
+
throw new Error(`proposals[${idx}].patches[${pidx}].op must be "set" or "append"`);
|
|
2779
|
+
}
|
|
2780
|
+
const patch = { path: rp.path, op: rp.op, value: rp.value };
|
|
2781
|
+
assertAllowedImproverPatch(patch, `proposals[${idx}].patches[${pidx}]`);
|
|
2782
|
+
patches.push(patch);
|
|
2783
|
+
}
|
|
2784
|
+
proposals.push({
|
|
2785
|
+
id: p.id,
|
|
2786
|
+
summary: p.summary,
|
|
2787
|
+
rationale: p.rationale,
|
|
2788
|
+
patches
|
|
2789
|
+
});
|
|
2790
|
+
}
|
|
2791
|
+
return {
|
|
2792
|
+
personaId: typeof obj.personaId === 'string' ? obj.personaId : '',
|
|
2793
|
+
personaFilePath: typeof obj.personaFilePath === 'string' ? obj.personaFilePath : '',
|
|
2794
|
+
transcriptPath: typeof obj.transcriptPath === 'string' ? obj.transcriptPath : '',
|
|
2795
|
+
proposals
|
|
2796
|
+
};
|
|
2797
|
+
}
|
|
2798
|
+
/**
|
|
2799
|
+
* Walk improver proposals one-by-one over the TTY. Returns only the
|
|
2800
|
+
* accepted proposals; the caller applies the patches. Supports:
|
|
2801
|
+
* y / n — accept or skip the current proposal
|
|
2802
|
+
* a — accept this and all remaining proposals
|
|
2803
|
+
* q — quit without accepting any further proposals (already-accepted ones stay)
|
|
2804
|
+
*
|
|
2805
|
+
* On a non-TTY we shouldn't have reached this point (caller checks),
|
|
2806
|
+
* but if we do, return an empty list so nothing is auto-applied.
|
|
2807
|
+
*/
|
|
2808
|
+
function walkProposalsInteractive(file) {
|
|
2809
|
+
if (!process.stdin.isTTY)
|
|
2810
|
+
return [];
|
|
2811
|
+
const accepted = [];
|
|
2812
|
+
const total = file.proposals.length;
|
|
2813
|
+
let acceptAll = false;
|
|
2814
|
+
for (let i = 0; i < total; i++) {
|
|
2815
|
+
const proposal = file.proposals[i];
|
|
2816
|
+
process.stderr.write(`\n[${i + 1}/${total}] ${proposal.summary}\n`);
|
|
2817
|
+
if (proposal.rationale) {
|
|
2818
|
+
process.stderr.write(` why: ${proposal.rationale}\n`);
|
|
2819
|
+
}
|
|
2820
|
+
for (const patch of proposal.patches) {
|
|
2821
|
+
const preview = formatPatchPreview(patch);
|
|
2822
|
+
process.stderr.write(` ${preview}\n`);
|
|
2823
|
+
}
|
|
2824
|
+
if (acceptAll) {
|
|
2825
|
+
accepted.push(proposal);
|
|
2826
|
+
process.stderr.write(' → accepted (accept-all)\n');
|
|
2827
|
+
continue;
|
|
2828
|
+
}
|
|
2829
|
+
// 'n' is first so empty Enter defaults to skip (matches the
|
|
2830
|
+
// [y/N] default-no convention used by promptYesNoSync at the
|
|
2831
|
+
// session-end "auto-improve?" prompt). If the user hammers Enter
|
|
2832
|
+
// through a stack of proposals they get a no-op outcome, not an
|
|
2833
|
+
// unintended file mutation.
|
|
2834
|
+
const choice = readSingleCharChoice(' accept? [y/N/a/q] ', ['n', 'y', 'a', 'q']);
|
|
2835
|
+
if (choice === 'y') {
|
|
2836
|
+
accepted.push(proposal);
|
|
2837
|
+
}
|
|
2838
|
+
else if (choice === 'a') {
|
|
2839
|
+
accepted.push(proposal);
|
|
2840
|
+
acceptAll = true;
|
|
2841
|
+
}
|
|
2842
|
+
else if (choice === 'q') {
|
|
2843
|
+
process.stderr.write(' → quit; no further proposals will be reviewed.\n');
|
|
2844
|
+
break;
|
|
2845
|
+
}
|
|
2846
|
+
// 'n' (and bare Enter) falls through with no accept.
|
|
2847
|
+
}
|
|
2848
|
+
return accepted;
|
|
2849
|
+
}
|
|
2850
|
+
/**
|
|
2851
|
+
* Render a one-line patch preview. Truncates long string values so a
|
|
2852
|
+
* multi-paragraph systemPrompt rewrite doesn't dominate the screen.
|
|
2853
|
+
*/
|
|
2854
|
+
function formatPatchPreview(patch) {
|
|
2855
|
+
const op = patch.op === 'append' ? '+= ' : '= ';
|
|
2856
|
+
const valueStr = formatPatchValue(patch.value);
|
|
2857
|
+
return `${patch.path} ${op}${valueStr}`;
|
|
2858
|
+
}
|
|
2859
|
+
function formatPatchValue(value) {
|
|
2860
|
+
if (typeof value === 'string') {
|
|
2861
|
+
const condensed = value.replace(/\s+/g, ' ').trim();
|
|
2862
|
+
return condensed.length > 100 ? `"${condensed.slice(0, 97)}..."` : `"${condensed}"`;
|
|
2863
|
+
}
|
|
2864
|
+
try {
|
|
2865
|
+
const json = JSON.stringify(value);
|
|
2866
|
+
if (json === undefined)
|
|
2867
|
+
return '<undefined>';
|
|
2868
|
+
return json.length > 120 ? `${json.slice(0, 117)}...` : json;
|
|
2869
|
+
}
|
|
2870
|
+
catch {
|
|
2871
|
+
return '<unserializable>';
|
|
2872
|
+
}
|
|
2873
|
+
}
|
|
2874
|
+
/**
|
|
2875
|
+
* Read a single-character choice from stdin synchronously, looping on
|
|
2876
|
+
* invalid input. Empty Enter (no character) returns the first option in
|
|
2877
|
+
* `valid` — callers should put the safe / default-no answer first.
|
|
2878
|
+
*
|
|
2879
|
+
* Test seam: callers can inject `read` so the prompt is exercisable
|
|
2880
|
+
* without a real TTY (mirrors `promptYesNoSync`).
|
|
2881
|
+
*/
|
|
2882
|
+
export function readSingleCharChoice(prompt, valid, opts = {}) {
|
|
2883
|
+
const write = opts.write ?? ((chunk) => {
|
|
2884
|
+
process.stderr.write(chunk);
|
|
2885
|
+
});
|
|
2886
|
+
for (;;) {
|
|
2887
|
+
write(prompt);
|
|
2888
|
+
const line = opts.read ? opts.read() : readLineFromStdinSync();
|
|
2889
|
+
const trimmed = (line ?? '').trim().toLowerCase();
|
|
2890
|
+
if (trimmed.length === 0)
|
|
2891
|
+
return valid[0];
|
|
2892
|
+
const ch = trimmed[0];
|
|
2893
|
+
if (valid.includes(ch))
|
|
2894
|
+
return ch;
|
|
2895
|
+
write(` invalid choice; expected one of: ${valid.join(', ')}\n`);
|
|
2896
|
+
}
|
|
2897
|
+
}
|
|
2898
|
+
/**
|
|
2899
|
+
* Apply accepted patches to the persona JSON on disk. Reads, mutates the
|
|
2900
|
+
* parsed object, writes back with two-space indent + trailing newline
|
|
2901
|
+
* (matches existing /personas style). Throws on unwriteable file or
|
|
2902
|
+
* unsupported patch op/path resolution.
|
|
2903
|
+
*/
|
|
2904
|
+
export function applyAcceptedPatches(personaFilePath, accepted) {
|
|
2905
|
+
const raw = readFileSync(personaFilePath, 'utf8');
|
|
2906
|
+
const json = JSON.parse(raw);
|
|
2907
|
+
for (const proposal of accepted) {
|
|
2908
|
+
for (const patch of proposal.patches) {
|
|
2909
|
+
applyPatchInPlace(json, patch);
|
|
2910
|
+
}
|
|
2911
|
+
}
|
|
2912
|
+
writeFileSync(personaFilePath, JSON.stringify(json, null, 2) + '\n', 'utf8');
|
|
2913
|
+
}
|
|
2914
|
+
function applyPatchInPlace(root, patch) {
|
|
2915
|
+
// Re-run the allowlist + prototype-segment guard at apply time, not
|
|
2916
|
+
// just at parse time. Belt-and-braces: a patch list constructed by a
|
|
2917
|
+
// future caller that bypasses parseProposals can't smuggle a
|
|
2918
|
+
// disallowed path past this point either.
|
|
2919
|
+
assertAllowedImproverPatch(patch, `applyPatchInPlace`);
|
|
2920
|
+
const segments = patch.path.split('.').filter((s) => s.length > 0);
|
|
2921
|
+
let cursor = root;
|
|
2922
|
+
for (let i = 0; i < segments.length - 1; i++) {
|
|
2923
|
+
const seg = segments[i];
|
|
2924
|
+
const next = cursor[seg];
|
|
2925
|
+
if (next === undefined || next === null) {
|
|
2926
|
+
const created = {};
|
|
2927
|
+
cursor[seg] = created;
|
|
2928
|
+
cursor = created;
|
|
2929
|
+
continue;
|
|
2930
|
+
}
|
|
2931
|
+
if (typeof next !== 'object' || Array.isArray(next)) {
|
|
2932
|
+
throw new Error(`patch path "${patch.path}": "${seg}" is not an object`);
|
|
2933
|
+
}
|
|
2934
|
+
cursor = next;
|
|
2935
|
+
}
|
|
2936
|
+
const finalSeg = segments[segments.length - 1];
|
|
2937
|
+
if (patch.op === 'append') {
|
|
2938
|
+
const existing = cursor[finalSeg];
|
|
2939
|
+
if (existing === undefined) {
|
|
2940
|
+
cursor[finalSeg] = [patch.value];
|
|
2941
|
+
return;
|
|
2942
|
+
}
|
|
2943
|
+
if (!Array.isArray(existing)) {
|
|
2944
|
+
throw new Error(`patch path "${patch.path}": cannot append to non-array`);
|
|
2945
|
+
}
|
|
2946
|
+
existing.push(patch.value);
|
|
2947
|
+
return;
|
|
2948
|
+
}
|
|
2949
|
+
// op === 'set'
|
|
2950
|
+
cursor[finalSeg] = patch.value;
|
|
2951
|
+
}
|
|
2053
2952
|
/**
|
|
2054
2953
|
* Enumerate persona candidates for the picker. Local overrides win over the
|
|
2055
2954
|
* built-in catalog when ids collide; the picker only needs the projection
|