@spacek33z/autoauto 0.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +197 -0
- package/package.json +51 -0
- package/src/App.tsx +224 -0
- package/src/cli.ts +772 -0
- package/src/components/AgentPanel.tsx +254 -0
- package/src/components/Chat.test.tsx +71 -0
- package/src/components/Chat.tsx +308 -0
- package/src/components/CycleField.tsx +23 -0
- package/src/components/ModelPicker.tsx +97 -0
- package/src/components/PostUpdatePrompt.tsx +46 -0
- package/src/components/ResultsTable.tsx +172 -0
- package/src/components/RunCompletePrompt.tsx +90 -0
- package/src/components/RunSettingsOverlay.tsx +49 -0
- package/src/components/RunsTable.tsx +219 -0
- package/src/components/StatsHeader.tsx +100 -0
- package/src/daemon.ts +264 -0
- package/src/index.tsx +8 -0
- package/src/lib/agent/agent-provider.test.ts +133 -0
- package/src/lib/agent/claude-provider.ts +277 -0
- package/src/lib/agent/codex-provider.ts +413 -0
- package/src/lib/agent/default-providers.ts +10 -0
- package/src/lib/agent/index.ts +32 -0
- package/src/lib/agent/mock-provider.ts +61 -0
- package/src/lib/agent/opencode-provider.ts +424 -0
- package/src/lib/agent/types.ts +73 -0
- package/src/lib/auth.ts +11 -0
- package/src/lib/config.ts +152 -0
- package/src/lib/daemon-callbacks.ts +59 -0
- package/src/lib/daemon-client.ts +16 -0
- package/src/lib/daemon-lifecycle.ts +368 -0
- package/src/lib/daemon-spawn.ts +122 -0
- package/src/lib/daemon-status.ts +189 -0
- package/src/lib/daemon-watcher.ts +192 -0
- package/src/lib/experiment-loop.ts +679 -0
- package/src/lib/experiment.ts +356 -0
- package/src/lib/finalize.test.ts +143 -0
- package/src/lib/finalize.ts +511 -0
- package/src/lib/format.test.ts +32 -0
- package/src/lib/format.ts +44 -0
- package/src/lib/git.ts +176 -0
- package/src/lib/ideas-backlog.test.ts +54 -0
- package/src/lib/ideas-backlog.ts +109 -0
- package/src/lib/measure.ts +472 -0
- package/src/lib/model-options.ts +24 -0
- package/src/lib/programs.ts +247 -0
- package/src/lib/push-stream.ts +48 -0
- package/src/lib/run-context.ts +112 -0
- package/src/lib/run-setup.ts +34 -0
- package/src/lib/run.ts +383 -0
- package/src/lib/syntax-theme.ts +39 -0
- package/src/lib/system-prompts/experiment.ts +77 -0
- package/src/lib/system-prompts/finalize.ts +90 -0
- package/src/lib/system-prompts/index.ts +7 -0
- package/src/lib/system-prompts/setup.ts +516 -0
- package/src/lib/system-prompts/update.ts +188 -0
- package/src/lib/tool-events.ts +99 -0
- package/src/lib/validate-measurement.ts +326 -0
- package/src/lib/worktree.ts +40 -0
- package/src/screens/AuthErrorScreen.tsx +31 -0
- package/src/screens/ExecutionScreen.tsx +851 -0
- package/src/screens/FirstSetupScreen.tsx +168 -0
- package/src/screens/HomeScreen.tsx +406 -0
- package/src/screens/PreRunScreen.tsx +206 -0
- package/src/screens/SettingsScreen.tsx +189 -0
- package/src/screens/SetupScreen.tsx +226 -0
- package/src/tui.tsx +17 -0
- package/tsconfig.json +17 -0
|
@@ -0,0 +1,122 @@
|
|
|
1
|
+
import { open } from "node:fs/promises"
|
|
2
|
+
import { join, dirname } from "node:path"
|
|
3
|
+
import { fileURLToPath } from "node:url"
|
|
4
|
+
import { spawn } from "node:child_process"
|
|
5
|
+
import { randomUUID } from "node:crypto"
|
|
6
|
+
import { generateRunId } from "./run.ts"
|
|
7
|
+
import { initRunDir } from "./run-setup.ts"
|
|
8
|
+
import { getProgramDir } from "./programs.ts"
|
|
9
|
+
import type { ModelSlot } from "./config.ts"
|
|
10
|
+
import { isWorkingTreeClean, formatShellError } from "./git.ts"
|
|
11
|
+
import { createWorktree } from "./worktree.ts"
|
|
12
|
+
import {
|
|
13
|
+
acquireLock,
|
|
14
|
+
updateLockPid,
|
|
15
|
+
releaseLock,
|
|
16
|
+
writeRunConfig,
|
|
17
|
+
type DaemonJson,
|
|
18
|
+
type RunConfig,
|
|
19
|
+
} from "./daemon-lifecycle.ts"
|
|
20
|
+
|
|
21
|
+
/**
|
|
22
|
+
* Prepares and spawns a new daemon for a run. Does everything the TUI needs
|
|
23
|
+
* before handing off to the daemon:
|
|
24
|
+
* 1. Checks working tree is clean
|
|
25
|
+
* 2. Creates git worktree
|
|
26
|
+
* 3. Initializes run directory
|
|
27
|
+
* 4. Writes run-config.json
|
|
28
|
+
* 5. Acquires per-program lock
|
|
29
|
+
* 6. Spawns detached daemon process
|
|
30
|
+
* 7. Writes initial daemon.json with PID
|
|
31
|
+
*
|
|
32
|
+
* Returns run metadata for the TUI to start watching.
|
|
33
|
+
*/
|
|
34
|
+
export async function spawnDaemon(
|
|
35
|
+
mainRoot: string,
|
|
36
|
+
programSlug: string,
|
|
37
|
+
modelConfig: ModelSlot,
|
|
38
|
+
maxExperiments: number,
|
|
39
|
+
ideasBacklogEnabled = true,
|
|
40
|
+
useWorktree = true,
|
|
41
|
+
): Promise<{ runId: string; runDir: string; worktreePath: string | null; pid: number }> {
|
|
42
|
+
// 1. Check working tree
|
|
43
|
+
if (!(await isWorkingTreeClean(mainRoot))) {
|
|
44
|
+
throw new Error("Working tree has uncommitted changes. Commit or stash them before starting a run.")
|
|
45
|
+
}
|
|
46
|
+
|
|
47
|
+
// 2. Generate run ID + acquire lock before creating isolated work
|
|
48
|
+
const runId = generateRunId()
|
|
49
|
+
const programDir = getProgramDir(mainRoot, programSlug)
|
|
50
|
+
const worktreePath = useWorktree ? join(mainRoot, ".autoauto", "worktrees", runId) : mainRoot
|
|
51
|
+
const daemonId = randomUUID()
|
|
52
|
+
|
|
53
|
+
const locked = await acquireLock(programDir, runId, daemonId, 0, worktreePath)
|
|
54
|
+
if (!locked) {
|
|
55
|
+
throw new Error(`Another run is already active for program "${programSlug}". Stop it first.`)
|
|
56
|
+
}
|
|
57
|
+
|
|
58
|
+
try {
|
|
59
|
+
if (useWorktree) {
|
|
60
|
+
await createWorktree(mainRoot, runId, programSlug)
|
|
61
|
+
} else {
|
|
62
|
+
// In-place mode: create experiment branch directly in main checkout
|
|
63
|
+
const { $ } = await import("bun")
|
|
64
|
+
const branchName = `autoauto-${programSlug}-${runId}`
|
|
65
|
+
try {
|
|
66
|
+
await $`git checkout -b ${branchName}`.cwd(mainRoot).quiet()
|
|
67
|
+
} catch (err) {
|
|
68
|
+
throw new Error(formatShellError(err, `git checkout -b ${branchName}`), { cause: err })
|
|
69
|
+
}
|
|
70
|
+
}
|
|
71
|
+
|
|
72
|
+
// 3. Init run dir in main root + write run-config.json
|
|
73
|
+
const runDir = await initRunDir(programDir, runId)
|
|
74
|
+
const runConfig: RunConfig = {
|
|
75
|
+
provider: modelConfig.provider,
|
|
76
|
+
model: modelConfig.model,
|
|
77
|
+
effort: modelConfig.effort,
|
|
78
|
+
max_experiments: maxExperiments,
|
|
79
|
+
ideas_backlog_enabled: ideasBacklogEnabled,
|
|
80
|
+
in_place: useWorktree ? undefined : true,
|
|
81
|
+
}
|
|
82
|
+
await writeRunConfig(runDir, runConfig)
|
|
83
|
+
|
|
84
|
+
// 4. Spawn detached daemon
|
|
85
|
+
const daemonPath = join(dirname(fileURLToPath(import.meta.url)), "..", "daemon.ts")
|
|
86
|
+
const logPath = join(runDir, "daemon.log")
|
|
87
|
+
const logFd = await open(logPath, "w")
|
|
88
|
+
|
|
89
|
+
const daemonArgs = [daemonPath, "--program", programSlug, "--run-id", runId, "--main-root", mainRoot, "--worktree", worktreePath, "--daemon-id", daemonId]
|
|
90
|
+
if (!useWorktree) daemonArgs.push("--in-place")
|
|
91
|
+
|
|
92
|
+
const proc = spawn(
|
|
93
|
+
"bun",
|
|
94
|
+
daemonArgs,
|
|
95
|
+
{
|
|
96
|
+
detached: true,
|
|
97
|
+
stdio: ["ignore", logFd.fd, logFd.fd],
|
|
98
|
+
},
|
|
99
|
+
)
|
|
100
|
+
|
|
101
|
+
const pid = proc.pid!
|
|
102
|
+
|
|
103
|
+
// 5. Write initial daemon.json. The daemon waits for this stub, then adds heartbeat_at.
|
|
104
|
+
const initialDaemon: DaemonJson = {
|
|
105
|
+
run_id: runId,
|
|
106
|
+
pid,
|
|
107
|
+
started_at: new Date().toISOString(),
|
|
108
|
+
worktree_path: worktreePath,
|
|
109
|
+
daemon_id: daemonId,
|
|
110
|
+
}
|
|
111
|
+
await Bun.write(join(runDir, "daemon.json"), JSON.stringify(initialDaemon, null, 2) + "\n")
|
|
112
|
+
await updateLockPid(programDir, runId, daemonId, pid)
|
|
113
|
+
|
|
114
|
+
proc.unref()
|
|
115
|
+
await logFd.close()
|
|
116
|
+
|
|
117
|
+
return { runId, runDir, worktreePath: useWorktree ? worktreePath : null, pid }
|
|
118
|
+
} catch (err) {
|
|
119
|
+
await releaseLock(programDir)
|
|
120
|
+
throw err
|
|
121
|
+
}
|
|
122
|
+
}
|
|
@@ -0,0 +1,189 @@
|
|
|
1
|
+
import { join } from "node:path"
|
|
2
|
+
import { streamLogName } from "./daemon-callbacks.ts"
|
|
3
|
+
import type { RunState, ExperimentResult } from "./run.ts"
|
|
4
|
+
import { readAllResults, readState, getMetricHistory } from "./run.ts"
|
|
5
|
+
import type { ProgramConfig } from "./programs.ts"
|
|
6
|
+
import { loadProgramConfig } from "./programs.ts"
|
|
7
|
+
import {
|
|
8
|
+
readDaemonJson,
|
|
9
|
+
readRunConfig,
|
|
10
|
+
writeRunConfig,
|
|
11
|
+
writeControl,
|
|
12
|
+
readLock,
|
|
13
|
+
type DaemonJson,
|
|
14
|
+
} from "./daemon-lifecycle.ts"
|
|
15
|
+
|
|
16
|
+
export interface DaemonStatus {
|
|
17
|
+
alive: boolean
|
|
18
|
+
starting: boolean // daemon_id not yet written
|
|
19
|
+
daemonJson: DaemonJson | null
|
|
20
|
+
}
|
|
21
|
+
|
|
22
|
+
/**
|
|
23
|
+
* Checks if a daemon is alive for a given run directory.
|
|
24
|
+
*/
|
|
25
|
+
export async function getDaemonStatus(runDir: string): Promise<DaemonStatus> {
|
|
26
|
+
const daemon = await readDaemonJson(runDir)
|
|
27
|
+
|
|
28
|
+
if (!daemon) {
|
|
29
|
+
return { alive: false, starting: false, daemonJson: null }
|
|
30
|
+
}
|
|
31
|
+
|
|
32
|
+
// No daemon_id = daemon hasn't started yet (TUI wrote initial stub)
|
|
33
|
+
if (!daemon.daemon_id) {
|
|
34
|
+
// Check if PID is still alive
|
|
35
|
+
try {
|
|
36
|
+
process.kill(daemon.pid, 0)
|
|
37
|
+
return { alive: true, starting: true, daemonJson: daemon }
|
|
38
|
+
} catch {
|
|
39
|
+
return { alive: false, starting: false, daemonJson: daemon }
|
|
40
|
+
}
|
|
41
|
+
}
|
|
42
|
+
|
|
43
|
+
if (!daemon.heartbeat_at) {
|
|
44
|
+
const startupAge = Date.now() - new Date(daemon.started_at).getTime()
|
|
45
|
+
try {
|
|
46
|
+
process.kill(daemon.pid, 0)
|
|
47
|
+
return { alive: startupAge <= 30_000, starting: startupAge <= 30_000, daemonJson: daemon }
|
|
48
|
+
} catch {
|
|
49
|
+
return { alive: false, starting: false, daemonJson: daemon }
|
|
50
|
+
}
|
|
51
|
+
}
|
|
52
|
+
|
|
53
|
+
// Check heartbeat staleness
|
|
54
|
+
if (daemon.heartbeat_at) {
|
|
55
|
+
const heartbeatAge = Date.now() - new Date(daemon.heartbeat_at).getTime()
|
|
56
|
+
if (heartbeatAge > 30_000) {
|
|
57
|
+
return { alive: false, starting: false, daemonJson: daemon }
|
|
58
|
+
}
|
|
59
|
+
}
|
|
60
|
+
|
|
61
|
+
return { alive: true, starting: false, daemonJson: daemon }
|
|
62
|
+
}
|
|
63
|
+
|
|
64
|
+
// --- State Reconstruction ---
|
|
65
|
+
|
|
66
|
+
/**
|
|
67
|
+
* Reconstructs the full TUI state from files on disk. Used for attach mode
|
|
68
|
+
* and reconnection after terminal close/reopen.
|
|
69
|
+
*/
|
|
70
|
+
export async function reconstructState(runDir: string, programDir: string): Promise<{
|
|
71
|
+
state: RunState
|
|
72
|
+
results: ExperimentResult[]
|
|
73
|
+
metricHistory: number[]
|
|
74
|
+
programConfig: ProgramConfig
|
|
75
|
+
streamText: string
|
|
76
|
+
ideasText: string
|
|
77
|
+
}> {
|
|
78
|
+
const [state, results, programConfig] = await Promise.all([
|
|
79
|
+
readState(runDir),
|
|
80
|
+
readAllResults(runDir),
|
|
81
|
+
loadProgramConfig(programDir),
|
|
82
|
+
])
|
|
83
|
+
const [streamText, ideasText] = await Promise.all([
|
|
84
|
+
readStreamTail(runDir, state.experiment_number),
|
|
85
|
+
Bun.file(join(runDir, "ideas.md")).text().catch(() => ""),
|
|
86
|
+
])
|
|
87
|
+
|
|
88
|
+
return {
|
|
89
|
+
state,
|
|
90
|
+
results,
|
|
91
|
+
metricHistory: getMetricHistory(results),
|
|
92
|
+
programConfig,
|
|
93
|
+
streamText,
|
|
94
|
+
ideasText,
|
|
95
|
+
}
|
|
96
|
+
}
|
|
97
|
+
|
|
98
|
+
async function readStreamTail(runDir: string, experimentNumber: number): Promise<string> {
|
|
99
|
+
try {
|
|
100
|
+
const filename = streamLogName(experimentNumber)
|
|
101
|
+
const content = await Bun.file(join(runDir, filename)).text()
|
|
102
|
+
// Same truncation as ExecutionScreen: keep last ~6KB
|
|
103
|
+
return content.length > 8000 ? content.slice(-6000) : content
|
|
104
|
+
} catch {
|
|
105
|
+
return ""
|
|
106
|
+
}
|
|
107
|
+
}
|
|
108
|
+
|
|
109
|
+
// --- Control ---
|
|
110
|
+
|
|
111
|
+
async function sendControlSignal(runDir: string, action: "stop" | "abort"): Promise<void> {
|
|
112
|
+
const daemon = await readDaemonJson(runDir)
|
|
113
|
+
if (!daemon) return
|
|
114
|
+
|
|
115
|
+
await writeControl(runDir, { action, timestamp: new Date().toISOString() })
|
|
116
|
+
|
|
117
|
+
try {
|
|
118
|
+
process.kill(daemon.pid, "SIGTERM")
|
|
119
|
+
} catch {
|
|
120
|
+
// Process may already be dead
|
|
121
|
+
}
|
|
122
|
+
}
|
|
123
|
+
|
|
124
|
+
export async function sendStop(runDir: string): Promise<void> {
|
|
125
|
+
return sendControlSignal(runDir, "stop")
|
|
126
|
+
}
|
|
127
|
+
|
|
128
|
+
export async function sendAbort(runDir: string): Promise<void> {
|
|
129
|
+
return sendControlSignal(runDir, "abort")
|
|
130
|
+
}
|
|
131
|
+
|
|
132
|
+
/**
|
|
133
|
+
* Force-kills the daemon (SIGKILL). Used as escalation after abort timeout.
|
|
134
|
+
*/
|
|
135
|
+
export async function forceKillDaemon(runDir: string): Promise<void> {
|
|
136
|
+
const daemon = await readDaemonJson(runDir)
|
|
137
|
+
if (!daemon) return
|
|
138
|
+
|
|
139
|
+
try {
|
|
140
|
+
process.kill(daemon.pid, "SIGKILL")
|
|
141
|
+
} catch {
|
|
142
|
+
// Process may already be dead
|
|
143
|
+
}
|
|
144
|
+
}
|
|
145
|
+
|
|
146
|
+
// --- Run Config Updates ---
|
|
147
|
+
|
|
148
|
+
/**
|
|
149
|
+
* Updates max_experiments in run-config.json. The daemon re-reads this file
|
|
150
|
+
* at each iteration boundary, so the change takes effect after the current experiment.
|
|
151
|
+
* Must be a positive integer.
|
|
152
|
+
*/
|
|
153
|
+
export async function updateMaxExperiments(runDir: string, maxExperiments: number): Promise<void> {
|
|
154
|
+
const config = await readRunConfig(runDir)
|
|
155
|
+
if (!config || config.max_experiments === maxExperiments) return
|
|
156
|
+
await writeRunConfig(runDir, { ...config, max_experiments: maxExperiments })
|
|
157
|
+
}
|
|
158
|
+
|
|
159
|
+
/**
|
|
160
|
+
* Reads the current max_experiments from run-config.json.
|
|
161
|
+
*/
|
|
162
|
+
export async function getMaxExperiments(runDir: string): Promise<number | null> {
|
|
163
|
+
const config = await readRunConfig(runDir)
|
|
164
|
+
return config?.max_experiments ?? null
|
|
165
|
+
}
|
|
166
|
+
|
|
167
|
+
// --- Active Run Detection ---
|
|
168
|
+
|
|
169
|
+
/**
|
|
170
|
+
* Finds the active run for a program, if any. Checks the lock file first,
|
|
171
|
+
* then verifies the daemon is actually alive.
|
|
172
|
+
*/
|
|
173
|
+
export async function findActiveRun(programDir: string): Promise<{
|
|
174
|
+
runId: string
|
|
175
|
+
runDir: string
|
|
176
|
+
daemonAlive: boolean
|
|
177
|
+
} | null> {
|
|
178
|
+
const lock = await readLock(programDir)
|
|
179
|
+
if (!lock) return null
|
|
180
|
+
|
|
181
|
+
const runDir = join(programDir, "runs", lock.run_id)
|
|
182
|
+
const status = await getDaemonStatus(runDir)
|
|
183
|
+
|
|
184
|
+
return {
|
|
185
|
+
runId: lock.run_id,
|
|
186
|
+
runDir,
|
|
187
|
+
daemonAlive: status.alive,
|
|
188
|
+
}
|
|
189
|
+
}
|
|
@@ -0,0 +1,192 @@
|
|
|
1
|
+
import { watch, statSync, readFileSync, type FSWatcher } from "node:fs"
|
|
2
|
+
import { join } from "node:path"
|
|
3
|
+
import { streamLogName } from "./daemon-callbacks.ts"
|
|
4
|
+
import type { RunState, ExperimentResult } from "./run.ts"
|
|
5
|
+
import { readAllResults, readState, getMetricHistory } from "./run.ts"
|
|
6
|
+
import { getDaemonStatus } from "./daemon-status.ts"
|
|
7
|
+
|
|
8
|
+
export interface WatchCallbacks {
|
|
9
|
+
onStateChange: (state: RunState) => void
|
|
10
|
+
onResultsChange: (results: ExperimentResult[], metricHistory: number[]) => void
|
|
11
|
+
onStreamChange: (text: string) => void
|
|
12
|
+
onStreamReset?: () => void
|
|
13
|
+
onToolStatus?: (status: string | null) => void
|
|
14
|
+
onIdeasChange?: (text: string) => void
|
|
15
|
+
onDaemonDied: () => void
|
|
16
|
+
}
|
|
17
|
+
|
|
18
|
+
export interface DaemonWatcher {
|
|
19
|
+
stop: () => void
|
|
20
|
+
}
|
|
21
|
+
|
|
22
|
+
/**
|
|
23
|
+
* Watches the run directory for file changes and calls back with updates.
|
|
24
|
+
* Uses fs.watch on the directory (not individual files) to handle atomic renames.
|
|
25
|
+
* Falls back to polling if fs.watch errors.
|
|
26
|
+
*/
|
|
27
|
+
export function watchRunDir(
|
|
28
|
+
runDir: string,
|
|
29
|
+
callbacks: WatchCallbacks,
|
|
30
|
+
options: { startAtEnd?: boolean } = {},
|
|
31
|
+
): DaemonWatcher {
|
|
32
|
+
let stopped = false
|
|
33
|
+
let watcher: FSWatcher | null = null
|
|
34
|
+
|
|
35
|
+
// Track byte offsets for delta reads
|
|
36
|
+
let resultsByteOffset = 0
|
|
37
|
+
let streamByteOffset = 0
|
|
38
|
+
let currentStreamFile = "" // e.g. "stream-001.log"
|
|
39
|
+
|
|
40
|
+
if (options.startAtEnd) {
|
|
41
|
+
try {
|
|
42
|
+
resultsByteOffset = statSync(join(runDir, "results.tsv")).size
|
|
43
|
+
} catch {}
|
|
44
|
+
// Determine current stream file from state
|
|
45
|
+
try {
|
|
46
|
+
const state = JSON.parse(readFileSync(join(runDir, "state.json"), "utf-8"))
|
|
47
|
+
currentStreamFile = streamLogName(state.experiment_number ?? 0)
|
|
48
|
+
streamByteOffset = statSync(join(runDir, currentStreamFile)).size
|
|
49
|
+
} catch {}
|
|
50
|
+
}
|
|
51
|
+
|
|
52
|
+
// Debounce: avoid reading the same file multiple times per event burst
|
|
53
|
+
const pendingReads = new Set<string>()
|
|
54
|
+
let flushTimer: ReturnType<typeof setTimeout> | null = null
|
|
55
|
+
|
|
56
|
+
function scheduleRead(filename: string) {
|
|
57
|
+
pendingReads.add(filename)
|
|
58
|
+
if (!flushTimer) {
|
|
59
|
+
flushTimer = setTimeout(flushReads, 50)
|
|
60
|
+
}
|
|
61
|
+
}
|
|
62
|
+
|
|
63
|
+
async function flushReads() {
|
|
64
|
+
flushTimer = null
|
|
65
|
+
const files = [...pendingReads]
|
|
66
|
+
pendingReads.clear()
|
|
67
|
+
if (stopped) return
|
|
68
|
+
|
|
69
|
+
for (const file of files) {
|
|
70
|
+
try {
|
|
71
|
+
if (file === "state.json") {
|
|
72
|
+
const state = await readState(runDir)
|
|
73
|
+
callbacks.onStateChange(state)
|
|
74
|
+
} else if (file === "results.tsv") {
|
|
75
|
+
await readResultsDelta()
|
|
76
|
+
} else if (file.startsWith("stream-") && file.endsWith(".log")) {
|
|
77
|
+
await readStreamDelta(file)
|
|
78
|
+
} else if (file === "ideas.md" && callbacks.onIdeasChange) {
|
|
79
|
+
const text = await Bun.file(join(runDir, "ideas.md")).text()
|
|
80
|
+
callbacks.onIdeasChange(text)
|
|
81
|
+
} else if (file === "daemon.json") {
|
|
82
|
+
// Heartbeat check handled by backup timer
|
|
83
|
+
}
|
|
84
|
+
} catch {
|
|
85
|
+
// File may be mid-write — ignore and catch next event
|
|
86
|
+
}
|
|
87
|
+
}
|
|
88
|
+
}
|
|
89
|
+
|
|
90
|
+
async function readResultsDelta() {
|
|
91
|
+
try {
|
|
92
|
+
const size = Bun.file(join(runDir, "results.tsv")).size
|
|
93
|
+
if (size <= resultsByteOffset) return // no new data
|
|
94
|
+
resultsByteOffset = size
|
|
95
|
+
|
|
96
|
+
const results = await readAllResults(runDir)
|
|
97
|
+
callbacks.onResultsChange(results, getMetricHistory(results))
|
|
98
|
+
} catch {
|
|
99
|
+
// Ignore transient errors
|
|
100
|
+
}
|
|
101
|
+
}
|
|
102
|
+
|
|
103
|
+
async function readStreamDelta(file: string) {
|
|
104
|
+
try {
|
|
105
|
+
// New experiment file → reset stream
|
|
106
|
+
if (file !== currentStreamFile) {
|
|
107
|
+
currentStreamFile = file
|
|
108
|
+
streamByteOffset = 0
|
|
109
|
+
callbacks.onStreamReset?.()
|
|
110
|
+
}
|
|
111
|
+
|
|
112
|
+
const bunFile = Bun.file(join(runDir, file))
|
|
113
|
+
const size = bunFile.size
|
|
114
|
+
if (size <= streamByteOffset) return
|
|
115
|
+
|
|
116
|
+
const delta = await bunFile.slice(streamByteOffset, size).text()
|
|
117
|
+
streamByteOffset = size
|
|
118
|
+
|
|
119
|
+
// Extract latest tool status for WaitingIndicator
|
|
120
|
+
if (callbacks.onToolStatus) {
|
|
121
|
+
const toolMatch = delta.match(/\[tool\] (.+)/g)
|
|
122
|
+
if (toolMatch) {
|
|
123
|
+
const last = toolMatch[toolMatch.length - 1]
|
|
124
|
+
callbacks.onToolStatus(last.replace("[tool] ", ""))
|
|
125
|
+
}
|
|
126
|
+
}
|
|
127
|
+
|
|
128
|
+
// Pass through full text including [time:] and [tool] markers
|
|
129
|
+
// AgentPanel parses and renders them with styling
|
|
130
|
+
if (delta) callbacks.onStreamChange(delta)
|
|
131
|
+
} catch {
|
|
132
|
+
// Ignore transient errors
|
|
133
|
+
}
|
|
134
|
+
}
|
|
135
|
+
|
|
136
|
+
// fs.watch on the directory
|
|
137
|
+
try {
|
|
138
|
+
watcher = watch(runDir, (_event, filename) => {
|
|
139
|
+
if (stopped || !filename || filename.endsWith(".tmp")) return
|
|
140
|
+
scheduleRead(filename)
|
|
141
|
+
})
|
|
142
|
+
watcher.on("error", () => {
|
|
143
|
+
// Fall back to polling if watcher errors
|
|
144
|
+
startPolling()
|
|
145
|
+
})
|
|
146
|
+
} catch {
|
|
147
|
+
startPolling()
|
|
148
|
+
}
|
|
149
|
+
|
|
150
|
+
// Poll current stream file frequently to catch writes that fs.watch
|
|
151
|
+
// misses (common on macOS with Bun FileSink appends).
|
|
152
|
+
const streamPollTimer = setInterval(() => {
|
|
153
|
+
if (stopped || !currentStreamFile) return
|
|
154
|
+
scheduleRead(currentStreamFile)
|
|
155
|
+
}, 250)
|
|
156
|
+
|
|
157
|
+
// Backup heartbeat timer (5-10s)
|
|
158
|
+
const heartbeatTimer = setInterval(async () => {
|
|
159
|
+
if (stopped) return
|
|
160
|
+
const status = await getDaemonStatus(runDir)
|
|
161
|
+
if (!status.alive && !status.starting) {
|
|
162
|
+
callbacks.onDaemonDied()
|
|
163
|
+
}
|
|
164
|
+
}, 7_000)
|
|
165
|
+
|
|
166
|
+
// Polling fallback
|
|
167
|
+
let pollTimer: ReturnType<typeof setInterval> | null = null
|
|
168
|
+
|
|
169
|
+
function startPolling() {
|
|
170
|
+
if (pollTimer || stopped) return
|
|
171
|
+
watcher?.close()
|
|
172
|
+
watcher = null
|
|
173
|
+
|
|
174
|
+
pollTimer = setInterval(() => {
|
|
175
|
+
if (stopped) return
|
|
176
|
+
scheduleRead("state.json")
|
|
177
|
+
scheduleRead("results.tsv")
|
|
178
|
+
if (currentStreamFile) scheduleRead(currentStreamFile)
|
|
179
|
+
}, 300)
|
|
180
|
+
}
|
|
181
|
+
|
|
182
|
+
return {
|
|
183
|
+
stop: () => {
|
|
184
|
+
stopped = true
|
|
185
|
+
watcher?.close()
|
|
186
|
+
clearInterval(streamPollTimer)
|
|
187
|
+
clearInterval(heartbeatTimer)
|
|
188
|
+
if (pollTimer) clearInterval(pollTimer)
|
|
189
|
+
if (flushTimer) clearTimeout(flushTimer)
|
|
190
|
+
},
|
|
191
|
+
}
|
|
192
|
+
}
|