astrocode-workflow 0.3.4 → 0.3.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,15 @@
1
+ /**
2
+ * This is the only place you should hold the repo lock.
3
+ * Everything that mutates the repo (tool calls, steps) runs inside this scope.
4
+ *
5
+ * Replace the internals with your actual astro/opencode driver loop.
6
+ */
7
+ export declare function runAstroWorkflow(opts: {
8
+ lockPath: string;
9
+ repoRoot: string;
10
+ sessionId: string;
11
+ owner?: string;
12
+ proceedOneStep: () => Promise<{
13
+ done: boolean;
14
+ }>;
15
+ }): Promise<void>;
@@ -0,0 +1,25 @@
1
+ // src/astro/workflow-runner.ts
2
+ import { acquireRepoLock } from "../state/repo-lock";
3
+ import { workflowRepoLock } from "../state/workflow-repo-lock";
4
+ /**
5
+ * This is the only place you should hold the repo lock.
6
+ * Everything that mutates the repo (tool calls, steps) runs inside this scope.
7
+ *
8
+ * Replace the internals with your actual astro/opencode driver loop.
9
+ */
10
+ export async function runAstroWorkflow(opts) {
11
+ await workflowRepoLock({ acquireRepoLock }, {
12
+ lockPath: opts.lockPath,
13
+ repoRoot: opts.repoRoot,
14
+ sessionId: opts.sessionId,
15
+ owner: opts.owner,
16
+ fn: async () => {
17
+ // ✅ Lock is held ONCE for the entire run. Tool calls can "rattle through".
18
+ while (true) {
19
+ const { done } = await opts.proceedOneStep();
20
+ if (done)
21
+ return;
22
+ }
23
+ },
24
+ });
25
+ }
@@ -4,6 +4,10 @@ type ChatMessageInput = {
4
4
  sessionID: string;
5
5
  agent: string;
6
6
  };
7
+ type ToolExecuteAfterInput = {
8
+ tool: string;
9
+ sessionID?: string;
10
+ };
7
11
  type RuntimeState = {
8
12
  db: SqliteDb | null;
9
13
  limitedMode: boolean;
@@ -18,5 +22,6 @@ export declare function createInjectProvider(opts: {
18
22
  runtime: RuntimeState;
19
23
  }): {
20
24
  onChatMessage(input: ChatMessageInput): Promise<void>;
25
+ onToolAfter(input: ToolExecuteAfterInput): Promise<void>;
21
26
  };
22
27
  export {};
@@ -116,5 +116,15 @@ export function createInjectProvider(opts) {
116
116
  // Inject eligible injects before processing the user's message
117
117
  await injectEligibleInjects(input.sessionID);
118
118
  },
119
+ async onToolAfter(input) {
120
+ if (!config.inject?.enabled)
121
+ return;
122
+ // Extract sessionID (same pattern as continuation enforcer)
123
+ const sessionId = input.sessionID ?? ctx.sessionID;
124
+ if (!sessionId)
125
+ return;
126
+ // Inject eligible injects after tool execution
127
+ await injectEligibleInjects(sessionId);
128
+ },
119
129
  };
120
130
  }
package/dist/src/index.js CHANGED
@@ -9,7 +9,6 @@ import { createInjectProvider } from "./hooks/inject-provider";
9
9
  import { createToastManager } from "./ui/toasts";
10
10
  import { createAstroAgents } from "./agents/registry";
11
11
  import { info, warn } from "./shared/log";
12
- import { acquireRepoLock } from "./state/repo-lock";
13
12
  // Safe config cloning with structuredClone preference (fallback for older Node versions)
14
13
  // CONTRACT: Config is guaranteed JSON-serializable (enforced by loadAstrocodeConfig validation)
15
14
  const cloneConfig = (v) => {
@@ -38,9 +37,12 @@ const Astrocode = async (ctx) => {
38
37
  throw new Error("Astrocode requires ctx.directory to be a string repo root.");
39
38
  }
40
39
  const repoRoot = ctx.directory;
41
- // Acquire exclusive repo lock to prevent multiple processes from corrupting the database
42
- const lockPath = `${repoRoot}/.astro/astro.lock`;
43
- const repoLock = acquireRepoLock(lockPath);
40
+ // NOTE: Repo locking is handled at the workflow level via workflowRepoLock.
41
+ // The workflow tool correctly acquires and holds the lock for the entire workflow execution.
42
+ // Plugin-level locking is unnecessary and architecturally incorrect since:
43
+ // - The lock would be held for the entire session lifecycle (too long)
44
+ // - Individual tools are designed to be called within workflow context where lock is held
45
+ // - Workflow-level locking with refcounting prevents lock churn during tool execution
44
46
  // Always load config first - this provides defaults even in limited mode
45
47
  let pluginConfig;
46
48
  try {
@@ -258,6 +260,10 @@ const Astrocode = async (ctx) => {
258
260
  return { args: nextArgs };
259
261
  },
260
262
  "tool.execute.after": async (input, output) => {
263
+ // Inject eligible injects after tool execution (not just on chat messages)
264
+ if (injectProvider && hookEnabled("inject-provider")) {
265
+ await injectProvider.onToolAfter(input);
266
+ }
261
267
  // Truncate huge tool outputs to artifacts
262
268
  if (truncatorHook && hookEnabled("tool-output-truncator")) {
263
269
  await truncatorHook(input, output ?? null);
@@ -284,8 +290,7 @@ const Astrocode = async (ctx) => {
284
290
  },
285
291
  // Best-effort cleanup
286
292
  close: async () => {
287
- // Release repo lock first (important for process termination)
288
- repoLock.release();
293
+ // Close database connection
289
294
  if (db && typeof db.close === "function") {
290
295
  try {
291
296
  db.close();
@@ -1,3 +1,34 @@
1
- export declare function acquireRepoLock(lockPath: string): {
1
+ /**
2
+ * Acquire a repo-scoped lock with:
3
+ * - ✅ process-local caching + refcount (efficient repeated tool calls)
4
+ * - ✅ heartbeat lease + stale recovery
5
+ * - ✅ atomic create (`wx`) + portable replace fallback
6
+ * - ✅ dead PID eviction + stale eviction
7
+ * - ✅ no live takeover (even same session) to avoid concurrency stomps
8
+ * - ✅ ABA-safe release via lease_id fencing
9
+ * - ✅ exponential backoff + jitter to reduce FS churn
10
+ */
11
+ export declare function acquireRepoLock(opts: {
12
+ lockPath: string;
13
+ repoRoot: string;
14
+ sessionId?: string;
15
+ owner?: string;
16
+ retryMs?: number;
17
+ pollMs?: number;
18
+ pollMaxMs?: number;
19
+ staleMs?: number;
20
+ heartbeatMs?: number;
21
+ minWriteMs?: number;
22
+ }): Promise<{
2
23
  release: () => void;
3
- };
24
+ }>;
25
+ /**
26
+ * Helper wrapper: always releases lock.
27
+ */
28
+ export declare function withRepoLock<T>(opts: {
29
+ lockPath: string;
30
+ repoRoot: string;
31
+ sessionId?: string;
32
+ owner?: string;
33
+ fn: () => Promise<T>;
34
+ }): Promise<T>;
@@ -1,29 +1,502 @@
1
1
  // src/state/repo-lock.ts
2
2
  import fs from "node:fs";
3
3
  import path from "node:path";
4
- export function acquireRepoLock(lockPath) {
5
- fs.mkdirSync(path.dirname(lockPath), { recursive: true });
6
- let fd;
4
+ import crypto from "node:crypto";
5
+ const LOCK_VERSION = 2;
6
+ // Process-stable identifier for this Node process instance.
7
+ const PROCESS_INSTANCE_ID = crypto.randomUUID();
8
+ // Hard guardrails against garbage/corruption.
9
+ const MAX_LOCK_BYTES = 64 * 1024; // 64KB; lock file should be tiny.
10
+ // How many times we’ll attempt "atomic-ish replace" before giving up.
11
+ const ATOMIC_REPLACE_RETRIES = 3;
12
+ function nowISO() {
13
+ return new Date().toISOString();
14
+ }
15
+ function sleep(ms) {
16
+ return new Promise((r) => setTimeout(r, ms));
17
+ }
18
+ /**
19
+ * PID existence check:
20
+ * - EPERM => process exists but we can't signal it (treat as alive)
21
+ * - ESRCH => process does not exist (dead)
22
+ */
23
+ function isPidAlive(pid) {
7
24
  try {
8
- fd = fs.openSync(lockPath, "wx"); // exclusive create
25
+ process.kill(pid, 0);
26
+ return true;
9
27
  }
10
- catch (e) {
11
- const msg = e?.code === "EEXIST"
12
- ? `Astrocode lock is already held (${lockPath}). Another opencode process is running in this repo.`
13
- : `Failed to acquire lock (${lockPath}): ${e?.message ?? String(e)}`;
14
- throw new Error(msg);
28
+ catch (err) {
29
+ const code = err?.code;
30
+ if (code === "EPERM")
31
+ return true;
32
+ if (code === "ESRCH")
33
+ return false;
34
+ // Unknown: conservative = don't evict.
35
+ return true;
15
36
  }
16
- fs.writeFileSync(fd, `${process.pid}\n`, "utf8");
17
- return {
18
- release: () => {
19
- try {
20
- fs.closeSync(fd);
37
+ }
38
+ function parseISOToMs(iso) {
39
+ const t = Date.parse(iso);
40
+ if (Number.isNaN(t))
41
+ return null;
42
+ return t;
43
+ }
44
+ function isStaleByAge(existing, staleMs) {
45
+ const updatedMs = parseISOToMs(existing.updated_at);
46
+ if (updatedMs === null)
47
+ return true;
48
+ return Date.now() - updatedMs > staleMs;
49
+ }
50
+ function safeUnlink(p) {
51
+ try {
52
+ fs.unlinkSync(p);
53
+ }
54
+ catch {
55
+ // ignore
56
+ }
57
+ }
58
+ /**
59
+ * Reads & validates lock file defensively.
60
+ * Supports both v2 JSON format and legacy PID-only format for compatibility.
61
+ * Returns null on any parse/validation failure.
62
+ */
63
+ function readLock(lockPath) {
64
+ try {
65
+ const st = fs.statSync(lockPath);
66
+ if (!st.isFile())
67
+ return null;
68
+ if (st.size <= 0 || st.size > MAX_LOCK_BYTES)
69
+ return null;
70
+ const raw = fs.readFileSync(lockPath, "utf8").trim();
71
+ // Try v2 JSON first
72
+ try {
73
+ const parsed = JSON.parse(raw);
74
+ if (parsed && typeof parsed === "object" && parsed.v === LOCK_VERSION) {
75
+ if (typeof parsed.pid !== "number")
76
+ return null;
77
+ if (typeof parsed.created_at !== "string")
78
+ return null;
79
+ if (typeof parsed.updated_at !== "string")
80
+ return null;
81
+ if (typeof parsed.repo_root !== "string")
82
+ return null;
83
+ if (typeof parsed.instance_id !== "string")
84
+ return null;
85
+ if (typeof parsed.lease_id !== "string")
86
+ return null;
87
+ if (parsed.session_id !== undefined && typeof parsed.session_id !== "string")
88
+ return null;
89
+ if (parsed.owner !== undefined && typeof parsed.owner !== "string")
90
+ return null;
91
+ return parsed;
92
+ }
93
+ }
94
+ catch {
95
+ // Not JSON, try legacy format
96
+ }
97
+ // Legacy format: just PID as number string
98
+ const legacyPid = parseInt(raw, 10);
99
+ if (Number.isNaN(legacyPid) || legacyPid <= 0)
100
+ return null;
101
+ // Convert legacy to v2 format
102
+ const now = nowISO();
103
+ const leaseId = crypto.randomUUID();
104
+ return {
105
+ v: LOCK_VERSION,
106
+ pid: legacyPid,
107
+ created_at: now, // Approximate
108
+ updated_at: now,
109
+ repo_root: "", // Unknown, will be filled by caller
110
+ instance_id: PROCESS_INSTANCE_ID, // Assume same instance
111
+ session_id: undefined,
112
+ lease_id: leaseId,
113
+ owner: "legacy-lock",
114
+ };
115
+ }
116
+ catch {
117
+ return null;
118
+ }
119
+ }
120
+ /**
121
+ * Best-effort directory fsync:
122
+ * Helps durability on crash for some filesystems (mostly POSIX).
123
+ * On platforms where opening a directory fails, we ignore.
124
+ */
125
+ function fsyncDirBestEffort(dirPath) {
126
+ try {
127
+ const fd = fs.openSync(dirPath, "r");
128
+ try {
129
+ fs.fsyncSync(fd);
130
+ }
131
+ finally {
132
+ fs.closeSync(fd);
133
+ }
134
+ }
135
+ catch {
136
+ // ignore (not portable)
137
+ }
138
+ }
139
+ /**
140
+ * "Atomic-ish" replace:
141
+ * - Write temp file
142
+ * - Try rename over target (POSIX generally atomic)
143
+ * - Windows can fail if target exists/locked; fallback to unlink+rename (not atomic, but best-effort)
144
+ * - Best-effort directory fsync after rename
145
+ */
146
+ function writeLockAtomicish(lockPath, lock) {
147
+ const dir = path.dirname(lockPath);
148
+ fs.mkdirSync(dir, { recursive: true });
149
+ const tmp = `${lockPath}.${process.pid}.${Date.now()}.${crypto.randomUUID()}.tmp`;
150
+ const body = JSON.stringify(lock); // compact JSON to reduce IO
151
+ fs.writeFileSync(tmp, body, "utf8");
152
+ let lastErr = null;
153
+ for (let i = 0; i < ATOMIC_REPLACE_RETRIES; i++) {
154
+ try {
155
+ fs.renameSync(tmp, lockPath);
156
+ fsyncDirBestEffort(dir);
157
+ return;
158
+ }
159
+ catch (err) {
160
+ lastErr = err;
161
+ const code = err?.code;
162
+ // Common Windows-ish cases where rename over existing fails.
163
+ if (code === "EEXIST" || code === "EPERM" || code === "ENOTEMPTY") {
164
+ safeUnlink(lockPath);
165
+ continue;
166
+ }
167
+ // If tmp vanished somehow, stop.
168
+ if (code === "ENOENT")
169
+ break;
170
+ continue;
171
+ }
172
+ }
173
+ safeUnlink(tmp);
174
+ if (lastErr)
175
+ throw lastErr;
176
+ throw new Error(`Failed to replace lock file: ${lockPath}`);
177
+ }
178
+ /**
179
+ * Atomic "create if not exists" using exclusive open.
180
+ */
181
+ function tryCreateExclusiveFile(filePath, contentsUtf8) {
182
+ fs.mkdirSync(path.dirname(filePath), { recursive: true });
183
+ try {
184
+ const fd = fs.openSync(filePath, "wx");
185
+ try {
186
+ fs.writeFileSync(fd, contentsUtf8, "utf8");
187
+ fs.fsyncSync(fd);
188
+ }
189
+ finally {
190
+ fs.closeSync(fd);
191
+ }
192
+ fsyncDirBestEffort(path.dirname(filePath));
193
+ return true;
194
+ }
195
+ catch (err) {
196
+ if (err?.code === "EEXIST")
197
+ return false;
198
+ throw err;
199
+ }
200
+ }
201
+ function tryCreateRepoLockExclusive(lockPath, lock) {
202
+ return tryCreateExclusiveFile(lockPath, JSON.stringify(lock));
203
+ }
204
+ const ACTIVE_LOCKS = new Map();
205
+ function cacheKey(lockPath, sessionId) {
206
+ return `${lockPath}::${sessionId ?? ""}`;
207
+ }
208
+ /**
209
+ * Heartbeat loop:
210
+ * - setTimeout (not setInterval) to avoid backlog drift under load
211
+ * - Minimizes writes by enforcing minWriteMs
212
+ * - ABA-safe: only refreshes if lock matches our lease_id and process identity
213
+ * - Avoids unnecessary writes if lock already has a recent updated_at
214
+ */
215
+ function startHeartbeat(opts) {
216
+ let stopped = false;
217
+ let lastWriteAt = 0;
218
+ let timer = null;
219
+ const tick = () => {
220
+ if (stopped)
221
+ return;
222
+ const now = Date.now();
223
+ const shouldAttempt = now - lastWriteAt >= opts.minWriteMs;
224
+ if (shouldAttempt) {
225
+ const existing = readLock(opts.lockPath);
226
+ if (existing &&
227
+ existing.lease_id === opts.leaseId &&
228
+ existing.pid === process.pid &&
229
+ existing.instance_id === PROCESS_INSTANCE_ID) {
230
+ const updatedMs = parseISOToMs(existing.updated_at);
231
+ const isFresh = updatedMs !== null && now - updatedMs < opts.minWriteMs;
232
+ if (!isFresh) {
233
+ writeLockAtomicish(opts.lockPath, {
234
+ ...existing,
235
+ updated_at: nowISO(),
236
+ repo_root: opts.repoRoot,
237
+ session_id: opts.sessionId ?? existing.session_id,
238
+ owner: opts.owner ?? existing.owner,
239
+ });
240
+ lastWriteAt = now;
241
+ }
242
+ else {
243
+ lastWriteAt = now;
244
+ }
21
245
  }
22
- catch { }
246
+ }
247
+ timer = setTimeout(tick, opts.heartbeatMs);
248
+ timer.unref?.();
249
+ };
250
+ tick();
251
+ return () => {
252
+ stopped = true;
253
+ if (timer)
254
+ clearTimeout(timer);
255
+ };
256
+ }
257
+ /**
258
+ * Shutdown cleanup:
259
+ * Best-effort release on normal termination signals.
260
+ */
261
+ let EXIT_HOOK_INSTALLED = false;
262
+ function installExitHookOnce() {
263
+ if (EXIT_HOOK_INSTALLED)
264
+ return;
265
+ EXIT_HOOK_INSTALLED = true;
266
+ const cleanup = () => {
267
+ for (const [key, h] of ACTIVE_LOCKS.entries()) {
23
268
  try {
24
- fs.unlinkSync(lockPath);
269
+ ACTIVE_LOCKS.delete(key);
270
+ h.heartbeatStop();
271
+ h.releaseOnce();
272
+ }
273
+ catch {
274
+ // ignore
25
275
  }
26
- catch { }
27
- },
276
+ }
28
277
  };
278
+ process.once("exit", cleanup);
279
+ process.once("SIGINT", () => {
280
+ cleanup();
281
+ process.exit(130);
282
+ });
283
+ process.once("SIGTERM", () => {
284
+ cleanup();
285
+ process.exit(143);
286
+ });
287
+ }
288
+ /**
289
+ * Acquire a repo-scoped lock with:
290
+ * - ✅ process-local caching + refcount (efficient repeated tool calls)
291
+ * - ✅ heartbeat lease + stale recovery
292
+ * - ✅ atomic create (`wx`) + portable replace fallback
293
+ * - ✅ dead PID eviction + stale eviction
294
+ * - ✅ no live takeover (even same session) to avoid concurrency stomps
295
+ * - ✅ ABA-safe release via lease_id fencing
296
+ * - ✅ exponential backoff + jitter to reduce FS churn
297
+ */
298
+ export async function acquireRepoLock(opts) {
299
+ installExitHookOnce();
300
+ const { lockPath, repoRoot, sessionId, owner } = opts;
301
+ const retryMs = opts.retryMs ?? 8000;
302
+ const pollBaseMs = opts.pollMs ?? 20;
303
+ const pollMaxMs = opts.pollMaxMs ?? 250;
304
+ const heartbeatMs = opts.heartbeatMs ?? 200;
305
+ const minWriteMs = opts.minWriteMs ?? 800;
306
+ // Ensure stale is comfortably above minWriteMs to prevent false-stale under load.
307
+ const staleMs = Math.max(opts.staleMs ?? 2 * 60 * 1000, minWriteMs * 8);
308
+ // ✅ Fast path: reuse cached handle in the same process/session.
309
+ const key = cacheKey(lockPath, sessionId);
310
+ const cached = ACTIVE_LOCKS.get(key);
311
+ if (cached) {
312
+ cached.refCount += 1;
313
+ return {
314
+ release: () => {
315
+ cached.refCount -= 1;
316
+ if (cached.refCount <= 0) {
317
+ ACTIVE_LOCKS.delete(key);
318
+ cached.heartbeatStop();
319
+ cached.releaseOnce();
320
+ }
321
+ },
322
+ };
323
+ }
324
+ const myPid = process.pid;
325
+ const startedAt = Date.now();
326
+ let pollMs = pollBaseMs;
327
+ while (true) {
328
+ const existing = readLock(lockPath);
329
+ // No lock (or unreadable/invalid) -> try create.
330
+ if (!existing) {
331
+ const now = nowISO();
332
+ const leaseId = crypto.randomUUID();
333
+ const candidate = {
334
+ v: LOCK_VERSION,
335
+ pid: myPid,
336
+ created_at: now,
337
+ updated_at: now,
338
+ repo_root: repoRoot,
339
+ instance_id: PROCESS_INSTANCE_ID,
340
+ session_id: sessionId,
341
+ lease_id: leaseId,
342
+ owner,
343
+ };
344
+ const created = tryCreateRepoLockExclusive(lockPath, candidate);
345
+ if (created) {
346
+ const heartbeatStop = startHeartbeat({
347
+ lockPath,
348
+ repoRoot,
349
+ sessionId,
350
+ owner,
351
+ leaseId,
352
+ heartbeatMs,
353
+ minWriteMs,
354
+ });
355
+ const releaseOnce = () => {
356
+ const cur = readLock(lockPath);
357
+ if (!cur)
358
+ return;
359
+ // ABA-safe
360
+ if (cur.lease_id !== leaseId)
361
+ return;
362
+ // Strict identity: only exact process instance can delete.
363
+ if (cur.pid !== myPid)
364
+ return;
365
+ if (cur.instance_id !== PROCESS_INSTANCE_ID)
366
+ return;
367
+ safeUnlink(lockPath);
368
+ fsyncDirBestEffort(path.dirname(lockPath));
369
+ };
370
+ const handle = {
371
+ key,
372
+ lockPath,
373
+ sessionId,
374
+ leaseId,
375
+ refCount: 1,
376
+ heartbeatStop,
377
+ releaseOnce,
378
+ };
379
+ ACTIVE_LOCKS.set(key, handle);
380
+ return {
381
+ release: () => {
382
+ const h = ACTIVE_LOCKS.get(key);
383
+ if (!h)
384
+ return;
385
+ h.refCount -= 1;
386
+ if (h.refCount <= 0) {
387
+ ACTIVE_LOCKS.delete(key);
388
+ h.heartbeatStop();
389
+ h.releaseOnce();
390
+ }
391
+ },
392
+ };
393
+ }
394
+ // Race lost; reset backoff and loop.
395
+ pollMs = pollBaseMs;
396
+ continue;
397
+ }
398
+ // Re-entrant by SAME PROCESS IDENTITY (pid+instance), or legacy lock with same PID.
399
+ if (existing.pid === myPid && (existing.instance_id === PROCESS_INSTANCE_ID || existing.owner === "legacy-lock")) {
400
+ const leaseId = crypto.randomUUID();
401
+ writeLockAtomicish(lockPath, {
402
+ ...existing,
403
+ v: LOCK_VERSION,
404
+ updated_at: nowISO(),
405
+ repo_root: repoRoot,
406
+ instance_id: PROCESS_INSTANCE_ID, // Upgrade legacy
407
+ session_id: sessionId ?? existing.session_id,
408
+ owner: owner ?? existing.owner,
409
+ lease_id: leaseId,
410
+ });
411
+ const heartbeatStop = startHeartbeat({
412
+ lockPath,
413
+ repoRoot,
414
+ sessionId: sessionId ?? existing.session_id,
415
+ owner: owner ?? existing.owner,
416
+ leaseId,
417
+ heartbeatMs,
418
+ minWriteMs,
419
+ });
420
+ const releaseOnce = () => {
421
+ const cur = readLock(lockPath);
422
+ if (!cur)
423
+ return;
424
+ if (cur.lease_id !== leaseId)
425
+ return;
426
+ if (cur.pid !== myPid)
427
+ return;
428
+ if (cur.instance_id !== PROCESS_INSTANCE_ID)
429
+ return;
430
+ safeUnlink(lockPath);
431
+ fsyncDirBestEffort(path.dirname(lockPath));
432
+ };
433
+ const handle = {
434
+ key,
435
+ lockPath,
436
+ sessionId,
437
+ leaseId,
438
+ refCount: 1,
439
+ heartbeatStop,
440
+ releaseOnce,
441
+ };
442
+ ACTIVE_LOCKS.set(key, handle);
443
+ return {
444
+ release: () => {
445
+ const h = ACTIVE_LOCKS.get(key);
446
+ if (!h)
447
+ return;
448
+ h.refCount -= 1;
449
+ if (h.refCount <= 0) {
450
+ ACTIVE_LOCKS.delete(key);
451
+ h.heartbeatStop();
452
+ h.releaseOnce();
453
+ }
454
+ },
455
+ };
456
+ }
457
+ // 🚫 No live takeover (even same session).
458
+ // We only evict dead/stale locks.
459
+ const pidAlive = isPidAlive(existing.pid);
460
+ const staleByAge = isStaleByAge(existing, staleMs);
461
+ if (!pidAlive || staleByAge) {
462
+ safeUnlink(lockPath);
463
+ fsyncDirBestEffort(path.dirname(lockPath));
464
+ pollMs = pollBaseMs;
465
+ continue;
466
+ }
467
+ // Alive and not us -> bounded wait with exponential backoff + jitter.
468
+ if (Date.now() - startedAt > retryMs) {
469
+ const ownerBits = [
470
+ `pid=${existing.pid}`,
471
+ existing.session_id ? `session=${existing.session_id}` : null,
472
+ existing.owner ? `owner=${existing.owner}` : null,
473
+ `updated_at=${existing.updated_at}`,
474
+ sessionId && existing.session_id === sessionId ? `(same-session waiting)` : null,
475
+ ]
476
+ .filter(Boolean)
477
+ .join(" ");
478
+ throw new Error(`Astrocode lock is already held (${lockPath}). ${ownerBits}. ` +
479
+ `Close other opencode processes or wait.`);
480
+ }
481
+ const jitter = Math.floor(Math.random() * Math.min(12, pollMs));
482
+ await sleep(pollMs + jitter);
483
+ pollMs = Math.min(pollMaxMs, Math.floor(pollMs * 1.35));
484
+ }
485
+ }
486
+ /**
487
+ * Helper wrapper: always releases lock.
488
+ */
489
+ export async function withRepoLock(opts) {
490
+ const handle = await acquireRepoLock({
491
+ lockPath: opts.lockPath,
492
+ repoRoot: opts.repoRoot,
493
+ sessionId: opts.sessionId,
494
+ owner: opts.owner,
495
+ });
496
+ try {
497
+ return await opts.fn();
498
+ }
499
+ finally {
500
+ handle.release();
501
+ }
29
502
  }
@@ -0,0 +1,16 @@
1
+ import type { acquireRepoLock } from "./repo-lock";
2
+ type RepoLockAcquire = typeof acquireRepoLock;
3
+ /**
4
+ * Acquire ONCE per workflow/session in this process.
5
+ * Nested calls reuse the same held lock (no reacquire, no churn).
6
+ */
7
+ export declare function workflowRepoLock<T>(deps: {
8
+ acquireRepoLock: RepoLockAcquire;
9
+ }, opts: {
10
+ lockPath: string;
11
+ repoRoot: string;
12
+ sessionId?: string;
13
+ owner?: string;
14
+ fn: () => Promise<T>;
15
+ }): Promise<T>;
16
+ export {};
@@ -0,0 +1,50 @@
1
+ const HELD_BY_KEY = new Map();
2
+ function key(lockPath, sessionId) {
3
+ return `${lockPath}::${sessionId ?? ""}`;
4
+ }
5
+ /**
6
+ * Acquire ONCE per workflow/session in this process.
7
+ * Nested calls reuse the same held lock (no reacquire, no churn).
8
+ */
9
+ export async function workflowRepoLock(deps, opts) {
10
+ const k = key(opts.lockPath, opts.sessionId);
11
+ const existing = HELD_BY_KEY.get(k);
12
+ if (existing) {
13
+ existing.depth += 1;
14
+ try {
15
+ return await opts.fn();
16
+ }
17
+ finally {
18
+ existing.depth -= 1;
19
+ if (existing.depth <= 0) {
20
+ HELD_BY_KEY.delete(k);
21
+ existing.release();
22
+ }
23
+ }
24
+ }
25
+ // IMPORTANT: this is tuned for "hold for whole workflow".
26
+ const handle = await deps.acquireRepoLock({
27
+ lockPath: opts.lockPath,
28
+ repoRoot: opts.repoRoot,
29
+ sessionId: opts.sessionId,
30
+ owner: opts.owner,
31
+ retryMs: 30_000,
32
+ staleMs: 2 * 60_000,
33
+ heartbeatMs: 200,
34
+ minWriteMs: 800,
35
+ pollMs: 20,
36
+ pollMaxMs: 250,
37
+ });
38
+ const held = { release: handle.release, depth: 1 };
39
+ HELD_BY_KEY.set(k, held);
40
+ try {
41
+ return await opts.fn();
42
+ }
43
+ finally {
44
+ held.depth -= 1;
45
+ if (held.depth <= 0) {
46
+ HELD_BY_KEY.delete(k);
47
+ held.release();
48
+ }
49
+ }
50
+ }
@@ -31,7 +31,7 @@ function stageIcon(status) {
31
31
  }
32
32
  }
33
33
  export function createAstroStatusTool(opts) {
34
- const { config, db } = opts;
34
+ const { ctx, config, db } = opts;
35
35
  return tool({
36
36
  description: "Show a compact Astrocode status dashboard: active run/stage, pipeline, story board counts, and next action.",
37
37
  args: {
@@ -1,5 +1,6 @@
1
1
  // src/tools/workflow.ts
2
2
  import { tool } from "@opencode-ai/plugin/tool";
3
+ import path from "node:path";
3
4
  import { withTx } from "../state/db";
4
5
  import { buildContextSnapshot } from "../workflow/context";
5
6
  import { decideNextAction, createRunForStory, startStage, completeRun, failRun, getActiveRun, EVENT_TYPES, } from "../workflow/state-machine";
@@ -10,6 +11,7 @@ import { newEventId } from "../state/ids";
10
11
  import { debug } from "../shared/log";
11
12
  import { createToastManager } from "../ui/toasts";
12
13
  import { acquireRepoLock } from "../state/repo-lock";
14
+ import { workflowRepoLock } from "../state/workflow-repo-lock";
13
15
  // Agent name mapping for case-sensitive resolution
14
16
  export const STAGE_TO_AGENT_MAP = {
15
17
  frame: "Frame",
@@ -156,200 +158,201 @@ export function createAstroWorkflowProceedTool(opts) {
156
158
  max_steps: tool.schema.number().int().positive().default(config.workflow.default_max_steps),
157
159
  },
158
160
  execute: async ({ mode, max_steps }) => {
159
- // Acquire repo lock to ensure no concurrent workflow operations
160
- const lockPath = `${ctx.directory}/.astro/astro.lock`;
161
- const repoLock = acquireRepoLock(lockPath);
162
- try {
163
- const sessionId = ctx.sessionID;
164
- const steps = Math.min(max_steps, config.workflow.loop_max_steps_hard_cap);
165
- const actions = [];
166
- const warnings = [];
167
- const startedAt = nowISO();
168
- // Collect UI events emitted inside state-machine functions, then flush AFTER tx.
169
- const uiEvents = [];
170
- const emit = (e) => uiEvents.push(e);
171
- for (let i = 0; i < steps; i++) {
172
- const next = decideNextAction(db, config);
173
- if (next.kind === "idle") {
174
- actions.push("idle: no approved stories");
175
- break;
176
- }
177
- if (next.kind === "start_run") {
178
- // SINGLE tx boundary: caller owns tx, state-machine is pure.
179
- const { run_id } = withTx(db, () => createRunForStory(db, config, next.story_key));
180
- actions.push(`started run ${run_id} for story ${next.story_key}`);
181
- if (mode === "step")
182
- break;
183
- continue;
184
- }
185
- if (next.kind === "complete_run") {
186
- withTx(db, () => completeRun(db, next.run_id, emit));
187
- actions.push(`completed run ${next.run_id}`);
188
- if (mode === "step")
161
+ const repoRoot = ctx.directory;
162
+ const lockPath = path.join(repoRoot, ".astro", "astro.lock");
163
+ const sessionId = ctx.sessionID;
164
+ return workflowRepoLock({ acquireRepoLock }, {
165
+ lockPath,
166
+ repoRoot,
167
+ sessionId,
168
+ owner: "astro_workflow_proceed",
169
+ fn: async () => {
170
+ const steps = Math.min(max_steps, config.workflow.loop_max_steps_hard_cap);
171
+ const actions = [];
172
+ const warnings = [];
173
+ const startedAt = nowISO();
174
+ // Collect UI events emitted inside state-machine functions, then flush AFTER tx.
175
+ const uiEvents = [];
176
+ const emit = (e) => uiEvents.push(e);
177
+ for (let i = 0; i < steps; i++) {
178
+ const next = decideNextAction(db, config);
179
+ if (next.kind === "idle") {
180
+ actions.push("idle: no approved stories");
189
181
  break;
190
- continue;
191
- }
192
- if (next.kind === "failed") {
193
- // Ensure DB state reflects failure in one tx; emit UI event.
194
- withTx(db, () => failRun(db, next.run_id, next.stage_key, next.error_text, emit));
195
- actions.push(`failed: ${next.stage_key} — ${next.error_text}`);
196
- if (mode === "step")
197
- break;
198
- continue;
199
- }
200
- if (next.kind === "delegate_stage") {
201
- const active = getActiveRun(db);
202
- if (!active)
203
- throw new Error("Invariant: delegate_stage but no active run.");
204
- const run = db.prepare("SELECT * FROM runs WHERE run_id=?").get(active.run_id);
205
- const story = db.prepare("SELECT * FROM stories WHERE story_key=?").get(run.story_key);
206
- let agentName = resolveAgentName(next.stage_key, config, agents, warnings);
207
- const agentExists = (name) => {
208
- if (agents && agents[name])
209
- return true;
210
- const knownStageAgents = ["Frame", "Plan", "Spec", "Implement", "Review", "Verify", "Close", "General", "Astro", "general"];
211
- if (knownStageAgents.includes(name))
212
- return true;
213
- return false;
214
- };
215
- if (!agentExists(agentName)) {
216
- const originalAgent = agentName;
217
- console.warn(`[Astrocode] Agent ${agentName} not found. Falling back to orchestrator.`);
218
- agentName = config.agents?.orchestrator_name || "Astro";
182
+ }
183
+ if (next.kind === "start_run") {
184
+ // SINGLE tx boundary: caller owns tx, state-machine is pure.
185
+ const { run_id } = withTx(db, () => createRunForStory(db, config, next.story_key));
186
+ actions.push(`started run ${run_id} for story ${next.story_key}`);
187
+ if (mode === "step")
188
+ break;
189
+ continue;
190
+ }
191
+ if (next.kind === "complete_run") {
192
+ withTx(db, () => completeRun(db, next.run_id, emit));
193
+ actions.push(`completed run ${next.run_id}`);
194
+ if (mode === "step")
195
+ break;
196
+ continue;
197
+ }
198
+ if (next.kind === "failed") {
199
+ // Ensure DB state reflects failure in one tx; emit UI event.
200
+ withTx(db, () => failRun(db, next.run_id, next.stage_key, next.error_text, emit));
201
+ actions.push(`failed: ${next.stage_key} — ${next.error_text}`);
202
+ if (mode === "step")
203
+ break;
204
+ continue;
205
+ }
206
+ if (next.kind === "delegate_stage") {
207
+ const active = getActiveRun(db);
208
+ if (!active)
209
+ throw new Error("Invariant: delegate_stage but no active run.");
210
+ const run = db.prepare("SELECT * FROM runs WHERE run_id=?").get(active.run_id);
211
+ const story = db.prepare("SELECT * FROM stories WHERE story_key=?").get(run.story_key);
212
+ let agentName = resolveAgentName(next.stage_key, config, agents, warnings);
213
+ const agentExists = (name) => {
214
+ if (agents && agents[name])
215
+ return true;
216
+ const knownStageAgents = ["Frame", "Plan", "Spec", "Implement", "Review", "Verify", "Close", "General", "Astro", "general"];
217
+ if (knownStageAgents.includes(name))
218
+ return true;
219
+ return false;
220
+ };
219
221
  if (!agentExists(agentName)) {
220
- console.warn(`[Astrocode] Orchestrator ${agentName} not available. Falling back to General.`);
221
- agentName = "General";
222
+ const originalAgent = agentName;
223
+ console.warn(`[Astrocode] Agent ${agentName} not found. Falling back to orchestrator.`);
224
+ agentName = config.agents?.orchestrator_name || "Astro";
222
225
  if (!agentExists(agentName)) {
223
- throw new Error(`Critical: No agents available for delegation. Primary: ${originalAgent}, Orchestrator: ${config.agents?.orchestrator_name || "Astro"}, General: unavailable`);
226
+ console.warn(`[Astrocode] Orchestrator ${agentName} not available. Falling back to General.`);
227
+ agentName = "General";
228
+ if (!agentExists(agentName)) {
229
+ throw new Error(`Critical: No agents available for delegation. Primary: ${originalAgent}, Orchestrator: ${config.agents?.orchestrator_name || "Astro"}, General: unavailable`);
230
+ }
224
231
  }
225
232
  }
226
- }
227
- // NOTE: startStage owns its own tx (state-machine.ts).
228
- withTx(db, () => {
229
- startStage(db, active.run_id, next.stage_key, { subagent_type: agentName }, emit);
230
- });
231
- const context = buildContextSnapshot({
232
- db,
233
- config,
234
- run_id: active.run_id,
235
- next_action: `delegate stage ${next.stage_key}`,
236
- });
237
- const stageDirective = buildStageDirective({
238
- config,
239
- stage_key: next.stage_key,
240
- run_id: active.run_id,
241
- story_key: run.story_key,
242
- story_title: story?.title ?? "(missing)",
243
- stage_agent_name: agentName,
244
- stage_goal: stageGoal(next.stage_key, config),
245
- stage_constraints: stageConstraints(next.stage_key, config),
246
- context_snapshot_md: context,
247
- }).body;
248
- const delegatePrompt = buildDelegationPrompt({
249
- stageDirective,
250
- run_id: active.run_id,
251
- stage_key: next.stage_key,
252
- stage_agent_name: agentName,
253
- });
254
- // Record continuation (best-effort; no tx wrapper needed but safe either way)
255
- const h = directiveHash(delegatePrompt);
256
- const now = nowISO();
257
- if (sessionId) {
258
- // This assumes continuations table exists in vNext schema.
259
- db.prepare("INSERT INTO continuations (session_id, run_id, directive_hash, kind, reason, created_at) VALUES (?, ?, ?, 'stage', ?, ?)").run(sessionId, active.run_id, h, `delegate ${next.stage_key}`, now);
260
- }
261
- // Visible injection so user can see state (awaited)
262
- if (sessionId) {
263
- await injectChatPrompt({ ctx, sessionId, text: delegatePrompt, agent: "Astro" });
264
- const continueMessage = [
265
- `[SYSTEM DIRECTIVE: ASTROCODE — AWAIT_STAGE_COMPLETION]`,
266
- ``,
267
- `Stage \`${next.stage_key}\` delegated to \`${agentName}\`.`,
268
- ``,
269
- `When \`${agentName}\` completes, call:`,
270
- `astro_stage_complete(run_id="${active.run_id}", stage_key="${next.stage_key}", output_text="[paste subagent output here]")`,
271
- ``,
272
- `This advances the workflow.`,
273
- ].join("\n");
274
- await injectChatPrompt({ ctx, sessionId, text: continueMessage, agent: "Astro" });
275
- }
276
- actions.push(`delegated stage ${next.stage_key} via ${agentName}`);
277
- // Stop here; subagent needs to run.
278
- break;
279
- }
280
- if (next.kind === "await_stage_completion") {
281
- actions.push(`await stage completion: ${next.stage_key}`);
282
- if (sessionId) {
233
+ // NOTE: startStage owns its own tx (state-machine.ts).
234
+ withTx(db, () => {
235
+ startStage(db, active.run_id, next.stage_key, { subagent_type: agentName }, emit);
236
+ });
283
237
  const context = buildContextSnapshot({
284
238
  db,
285
239
  config,
286
- run_id: next.run_id,
287
- next_action: `complete stage ${next.stage_key}`,
240
+ run_id: active.run_id,
241
+ next_action: `delegate stage ${next.stage_key}`,
242
+ });
243
+ const stageDirective = buildStageDirective({
244
+ config,
245
+ stage_key: next.stage_key,
246
+ run_id: active.run_id,
247
+ story_key: run.story_key,
248
+ story_title: story?.title ?? "(missing)",
249
+ stage_agent_name: agentName,
250
+ stage_goal: stageGoal(next.stage_key, config),
251
+ stage_constraints: stageConstraints(next.stage_key, config),
252
+ context_snapshot_md: context,
253
+ }).body;
254
+ const delegatePrompt = buildDelegationPrompt({
255
+ stageDirective,
256
+ run_id: active.run_id,
257
+ stage_key: next.stage_key,
258
+ stage_agent_name: agentName,
288
259
  });
289
- const prompt = [
290
- `[SYSTEM DIRECTIVE: ASTROCODE — AWAIT_STAGE_OUTPUT]`,
291
- ``,
292
- `Run \`${next.run_id}\` is waiting for stage \`${next.stage_key}\` output.`,
293
- `If you have the subagent output, call astro_stage_complete with output_text=the FULL output.`,
294
- ``,
295
- `Context snapshot:`,
296
- context,
297
- ].join("\n").trim();
298
- const h = directiveHash(prompt);
260
+ // Record continuation (best-effort; no tx wrapper needed but safe either way)
261
+ const h = directiveHash(delegatePrompt);
299
262
  const now = nowISO();
300
- db.prepare("INSERT INTO continuations (session_id, run_id, directive_hash, kind, reason, created_at) VALUES (?, ?, ?, 'continue', ?, ?)").run(sessionId, next.run_id, h, `await ${next.stage_key}`, now);
301
- await injectChatPrompt({ ctx, sessionId, text: prompt, agent: "Astro" });
263
+ if (sessionId) {
264
+ // This assumes continuations table exists in vNext schema.
265
+ db.prepare("INSERT INTO continuations (session_id, run_id, directive_hash, kind, reason, created_at) VALUES (?, ?, ?, 'stage', ?, ?)").run(sessionId, active.run_id, h, `delegate ${next.stage_key}`, now);
266
+ }
267
+ // Visible injection so user can see state (awaited)
268
+ if (sessionId) {
269
+ await injectChatPrompt({ ctx, sessionId, text: delegatePrompt, agent: "Astro" });
270
+ const continueMessage = [
271
+ `[SYSTEM DIRECTIVE: ASTROCODE — AWAIT_STAGE_COMPLETION]`,
272
+ ``,
273
+ `Stage \`${next.stage_key}\` delegated to \`${agentName}\`.`,
274
+ ``,
275
+ `When \`${agentName}\` completes, call:`,
276
+ `astro_stage_complete(run_id="${active.run_id}", stage_key="${next.stage_key}", output_text="[paste subagent output here]")`,
277
+ ``,
278
+ `This advances the workflow.`,
279
+ ].join("\n");
280
+ await injectChatPrompt({ ctx, sessionId, text: continueMessage, agent: "Astro" });
281
+ }
282
+ actions.push(`delegated stage ${next.stage_key} via ${agentName}`);
283
+ // Stop here; subagent needs to run.
284
+ break;
302
285
  }
286
+ if (next.kind === "await_stage_completion") {
287
+ actions.push(`await stage completion: ${next.stage_key}`);
288
+ if (sessionId) {
289
+ const context = buildContextSnapshot({
290
+ db,
291
+ config,
292
+ run_id: next.run_id,
293
+ next_action: `complete stage ${next.stage_key}`,
294
+ });
295
+ const prompt = [
296
+ `[SYSTEM DIRECTIVE: ASTROCODE — AWAIT_STAGE_OUTPUT]`,
297
+ ``,
298
+ `Run \`${next.run_id}\` is waiting for stage \`${next.stage_key}\` output.`,
299
+ `If you have the subagent output, call astro_stage_complete with output_text=the FULL output.`,
300
+ ``,
301
+ `Context snapshot:`,
302
+ context,
303
+ ].join("\n").trim();
304
+ const h = directiveHash(prompt);
305
+ const now = nowISO();
306
+ db.prepare("INSERT INTO continuations (session_id, run_id, directive_hash, kind, reason, created_at) VALUES (?, ?, ?, 'continue', ?, ?)").run(sessionId, next.run_id, h, `await ${next.stage_key}`, now);
307
+ await injectChatPrompt({ ctx, sessionId, text: prompt, agent: "Astro" });
308
+ }
309
+ break;
310
+ }
311
+ actions.push(`unhandled next action: ${next.kind}`);
303
312
  break;
304
313
  }
305
- actions.push(`unhandled next action: ${next.kind}`);
306
- break;
307
- }
308
- // Flush UI events (toast + prompt) AFTER state transitions
309
- if (uiEvents.length > 0) {
310
- for (const e of uiEvents) {
311
- const msg = buildUiMessage(e);
312
- if (config.ui.toasts.enabled) {
313
- await toasts.show({
314
- title: msg.title,
315
- message: msg.message,
316
- variant: msg.variant,
317
- });
318
- }
319
- if (ctx?.sessionID) {
320
- await injectChatPrompt({
321
- ctx,
322
- sessionId: ctx.sessionID,
323
- text: msg.chatText,
324
- agent: "Astro",
325
- });
314
+ // Flush UI events (toast + prompt) AFTER state transitions
315
+ if (uiEvents.length > 0) {
316
+ for (const e of uiEvents) {
317
+ const msg = buildUiMessage(e);
318
+ if (config.ui.toasts.enabled) {
319
+ await toasts.show({
320
+ title: msg.title,
321
+ message: msg.message,
322
+ variant: msg.variant,
323
+ });
324
+ }
325
+ if (ctx?.sessionID) {
326
+ await injectChatPrompt({
327
+ ctx,
328
+ sessionId: ctx.sessionID,
329
+ text: msg.chatText,
330
+ agent: "Astro",
331
+ });
332
+ }
326
333
  }
334
+ actions.push(`ui: flushed ${uiEvents.length} event(s)`);
335
+ }
336
+ // Housekeeping event
337
+ db.prepare("INSERT INTO events (event_id, run_id, stage_key, type, body_json, created_at) VALUES (?, NULL, NULL, ?, ?, ?)").run(newEventId(), EVENT_TYPES.WORKFLOW_PROCEED, JSON.stringify({ started_at: startedAt, mode, max_steps: steps, actions }), nowISO());
338
+ const active = getActiveRun(db);
339
+ const lines = [];
340
+ lines.push(`# astro_workflow_proceed`);
341
+ lines.push(`- mode: ${mode}`);
342
+ lines.push(`- steps requested: ${max_steps} (cap=${steps})`);
343
+ if (active)
344
+ lines.push(`- active run: \`${active.run_id}\` (stage=${active.current_stage_key ?? "?"})`);
345
+ lines.push(``, `## Actions`);
346
+ for (const a of actions)
347
+ lines.push(`- ${a}`);
348
+ if (warnings.length > 0) {
349
+ lines.push(``, `## Warnings`);
350
+ for (const w of warnings)
351
+ lines.push(`⚠️ ${w}`);
327
352
  }
328
- actions.push(`ui: flushed ${uiEvents.length} event(s)`);
329
- }
330
- // Housekeeping event
331
- db.prepare("INSERT INTO events (event_id, run_id, stage_key, type, body_json, created_at) VALUES (?, NULL, NULL, ?, ?, ?)").run(newEventId(), EVENT_TYPES.WORKFLOW_PROCEED, JSON.stringify({ started_at: startedAt, mode, max_steps: steps, actions }), nowISO());
332
- const active = getActiveRun(db);
333
- const lines = [];
334
- lines.push(`# astro_workflow_proceed`);
335
- lines.push(`- mode: ${mode}`);
336
- lines.push(`- steps requested: ${max_steps} (cap=${steps})`);
337
- if (active)
338
- lines.push(`- active run: \`${active.run_id}\` (stage=${active.current_stage_key ?? "?"})`);
339
- lines.push(``, `## Actions`);
340
- for (const a of actions)
341
- lines.push(`- ${a}`);
342
- if (warnings.length > 0) {
343
- lines.push(``, `## Warnings`);
344
- for (const w of warnings)
345
- lines.push(`⚠️ ${w}`);
346
- }
347
- return lines.join("\n").trim();
348
- }
349
- finally {
350
- // Always release the lock
351
- repoLock.release();
352
- }
353
+ return lines.join("\n").trim();
354
+ },
355
+ });
353
356
  },
354
357
  });
355
358
  }
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "astrocode-workflow",
3
- "version": "0.3.4",
3
+ "version": "0.3.5",
4
4
  "type": "module",
5
5
  "main": "dist/index.js",
6
6
  "types": "dist/index.d.ts",
@@ -9,6 +9,11 @@ type ChatMessageInput = {
9
9
  agent: string;
10
10
  };
11
11
 
12
+ type ToolExecuteAfterInput = {
13
+ tool: string;
14
+ sessionID?: string;
15
+ };
16
+
12
17
  type RuntimeState = {
13
18
  db: SqliteDb | null;
14
19
  limitedMode: boolean;
@@ -156,5 +161,16 @@ export function createInjectProvider(opts: {
156
161
  // Inject eligible injects before processing the user's message
157
162
  await injectEligibleInjects(input.sessionID);
158
163
  },
164
+
165
+ async onToolAfter(input: ToolExecuteAfterInput) {
166
+ if (!config.inject?.enabled) return;
167
+
168
+ // Extract sessionID (same pattern as continuation enforcer)
169
+ const sessionId = input.sessionID ?? (ctx as any).sessionID;
170
+ if (!sessionId) return;
171
+
172
+ // Inject eligible injects after tool execution
173
+ await injectEligibleInjects(sessionId);
174
+ },
159
175
  };
160
176
  }
package/src/index.ts CHANGED
@@ -13,7 +13,6 @@ import { createToastManager, type ToastOptions } from "./ui/toasts";
13
13
  import { createAstroAgents } from "./agents/registry";
14
14
  import type { AgentConfig } from "@opencode-ai/sdk";
15
15
  import { info, warn } from "./shared/log";
16
- import { acquireRepoLock } from "./state/repo-lock";
17
16
 
18
17
  // Type definitions for plugin components
19
18
  type ConfigHandler = (config: Record<string, any>) => Promise<void>;
@@ -25,6 +24,7 @@ type ContinuationEnforcer = {
25
24
  type ToolOutputTruncator = (input: any, output: any | null) => Promise<void>;
26
25
  type InjectProvider = {
27
26
  onChatMessage: (input: any) => Promise<void>;
27
+ onToolAfter: (input: any) => Promise<void>;
28
28
  };
29
29
  type ToastManager = {
30
30
  show: (toast: ToastOptions) => Promise<void>;
@@ -59,9 +59,12 @@ const Astrocode: Plugin = async (ctx) => {
59
59
  }
60
60
  const repoRoot = ctx.directory;
61
61
 
62
- // Acquire exclusive repo lock to prevent multiple processes from corrupting the database
63
- const lockPath = `${repoRoot}/.astro/astro.lock`;
64
- const repoLock = acquireRepoLock(lockPath);
62
+ // NOTE: Repo locking is handled at the workflow level via workflowRepoLock.
63
+ // The workflow tool correctly acquires and holds the lock for the entire workflow execution.
64
+ // Plugin-level locking is unnecessary and architecturally incorrect since:
65
+ // - The lock would be held for the entire session lifecycle (too long)
66
+ // - Individual tools are designed to be called within workflow context where lock is held
67
+ // - Workflow-level locking with refcounting prevents lock churn during tool execution
65
68
 
66
69
  // Always load config first - this provides defaults even in limited mode
67
70
  let pluginConfig: AstrocodeConfig;
@@ -299,6 +302,11 @@ const Astrocode: Plugin = async (ctx) => {
299
302
  },
300
303
 
301
304
  "tool.execute.after": async (input: any, output: any) => {
305
+ // Inject eligible injects after tool execution (not just on chat messages)
306
+ if (injectProvider && hookEnabled("inject-provider")) {
307
+ await injectProvider.onToolAfter(input);
308
+ }
309
+
302
310
  // Truncate huge tool outputs to artifacts
303
311
  if (truncatorHook && hookEnabled("tool-output-truncator")) {
304
312
  await truncatorHook(input, output ?? null);
@@ -330,9 +338,7 @@ const Astrocode: Plugin = async (ctx) => {
330
338
 
331
339
  // Best-effort cleanup
332
340
  close: async () => {
333
- // Release repo lock first (important for process termination)
334
- repoLock.release();
335
-
341
+ // Close database connection
336
342
  if (db && typeof db.close === "function") {
337
343
  try {
338
344
  db.close();