astrocode-workflow 0.3.3 → 0.3.5
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/src/astro/workflow-runner.d.ts +15 -0
- package/dist/src/astro/workflow-runner.js +25 -0
- package/dist/src/hooks/inject-provider.d.ts +5 -0
- package/dist/src/hooks/inject-provider.js +10 -0
- package/dist/src/index.js +11 -6
- package/dist/src/state/repo-lock.d.ts +33 -2
- package/dist/src/state/repo-lock.js +491 -18
- package/dist/src/state/workflow-repo-lock.d.ts +16 -0
- package/dist/src/state/workflow-repo-lock.js +50 -0
- package/dist/src/tools/status.js +1 -1
- package/dist/src/tools/workflow.js +182 -179
- package/package.json +1 -1
- package/src/hooks/inject-provider.ts +16 -0
- package/src/index.ts +13 -7
- package/src/state/repo-lock.ts +41 -16
|
@@ -0,0 +1,50 @@
|
|
|
1
|
+
const HELD_BY_KEY = new Map();
|
|
2
|
+
function key(lockPath, sessionId) {
|
|
3
|
+
return `${lockPath}::${sessionId ?? ""}`;
|
|
4
|
+
}
|
|
5
|
+
/**
|
|
6
|
+
* Acquire ONCE per workflow/session in this process.
|
|
7
|
+
* Nested calls reuse the same held lock (no reacquire, no churn).
|
|
8
|
+
*/
|
|
9
|
+
export async function workflowRepoLock(deps, opts) {
|
|
10
|
+
const k = key(opts.lockPath, opts.sessionId);
|
|
11
|
+
const existing = HELD_BY_KEY.get(k);
|
|
12
|
+
if (existing) {
|
|
13
|
+
existing.depth += 1;
|
|
14
|
+
try {
|
|
15
|
+
return await opts.fn();
|
|
16
|
+
}
|
|
17
|
+
finally {
|
|
18
|
+
existing.depth -= 1;
|
|
19
|
+
if (existing.depth <= 0) {
|
|
20
|
+
HELD_BY_KEY.delete(k);
|
|
21
|
+
existing.release();
|
|
22
|
+
}
|
|
23
|
+
}
|
|
24
|
+
}
|
|
25
|
+
// IMPORTANT: this is tuned for "hold for whole workflow".
|
|
26
|
+
const handle = await deps.acquireRepoLock({
|
|
27
|
+
lockPath: opts.lockPath,
|
|
28
|
+
repoRoot: opts.repoRoot,
|
|
29
|
+
sessionId: opts.sessionId,
|
|
30
|
+
owner: opts.owner,
|
|
31
|
+
retryMs: 30_000,
|
|
32
|
+
staleMs: 2 * 60_000,
|
|
33
|
+
heartbeatMs: 200,
|
|
34
|
+
minWriteMs: 800,
|
|
35
|
+
pollMs: 20,
|
|
36
|
+
pollMaxMs: 250,
|
|
37
|
+
});
|
|
38
|
+
const held = { release: handle.release, depth: 1 };
|
|
39
|
+
HELD_BY_KEY.set(k, held);
|
|
40
|
+
try {
|
|
41
|
+
return await opts.fn();
|
|
42
|
+
}
|
|
43
|
+
finally {
|
|
44
|
+
held.depth -= 1;
|
|
45
|
+
if (held.depth <= 0) {
|
|
46
|
+
HELD_BY_KEY.delete(k);
|
|
47
|
+
held.release();
|
|
48
|
+
}
|
|
49
|
+
}
|
|
50
|
+
}
|
package/dist/src/tools/status.js
CHANGED
|
@@ -31,7 +31,7 @@ function stageIcon(status) {
|
|
|
31
31
|
}
|
|
32
32
|
}
|
|
33
33
|
export function createAstroStatusTool(opts) {
|
|
34
|
-
const { config, db } = opts;
|
|
34
|
+
const { ctx, config, db } = opts;
|
|
35
35
|
return tool({
|
|
36
36
|
description: "Show a compact Astrocode status dashboard: active run/stage, pipeline, story board counts, and next action.",
|
|
37
37
|
args: {
|
|
@@ -1,5 +1,6 @@
|
|
|
1
1
|
// src/tools/workflow.ts
|
|
2
2
|
import { tool } from "@opencode-ai/plugin/tool";
|
|
3
|
+
import path from "node:path";
|
|
3
4
|
import { withTx } from "../state/db";
|
|
4
5
|
import { buildContextSnapshot } from "../workflow/context";
|
|
5
6
|
import { decideNextAction, createRunForStory, startStage, completeRun, failRun, getActiveRun, EVENT_TYPES, } from "../workflow/state-machine";
|
|
@@ -10,6 +11,7 @@ import { newEventId } from "../state/ids";
|
|
|
10
11
|
import { debug } from "../shared/log";
|
|
11
12
|
import { createToastManager } from "../ui/toasts";
|
|
12
13
|
import { acquireRepoLock } from "../state/repo-lock";
|
|
14
|
+
import { workflowRepoLock } from "../state/workflow-repo-lock";
|
|
13
15
|
// Agent name mapping for case-sensitive resolution
|
|
14
16
|
export const STAGE_TO_AGENT_MAP = {
|
|
15
17
|
frame: "Frame",
|
|
@@ -156,200 +158,201 @@ export function createAstroWorkflowProceedTool(opts) {
|
|
|
156
158
|
max_steps: tool.schema.number().int().positive().default(config.workflow.default_max_steps),
|
|
157
159
|
},
|
|
158
160
|
execute: async ({ mode, max_steps }) => {
|
|
159
|
-
|
|
160
|
-
const lockPath =
|
|
161
|
-
const
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
const { run_id } = withTx(db, () => createRunForStory(db, config, next.story_key));
|
|
180
|
-
actions.push(`started run ${run_id} for story ${next.story_key}`);
|
|
181
|
-
if (mode === "step")
|
|
182
|
-
break;
|
|
183
|
-
continue;
|
|
184
|
-
}
|
|
185
|
-
if (next.kind === "complete_run") {
|
|
186
|
-
withTx(db, () => completeRun(db, next.run_id, emit));
|
|
187
|
-
actions.push(`completed run ${next.run_id}`);
|
|
188
|
-
if (mode === "step")
|
|
161
|
+
const repoRoot = ctx.directory;
|
|
162
|
+
const lockPath = path.join(repoRoot, ".astro", "astro.lock");
|
|
163
|
+
const sessionId = ctx.sessionID;
|
|
164
|
+
return workflowRepoLock({ acquireRepoLock }, {
|
|
165
|
+
lockPath,
|
|
166
|
+
repoRoot,
|
|
167
|
+
sessionId,
|
|
168
|
+
owner: "astro_workflow_proceed",
|
|
169
|
+
fn: async () => {
|
|
170
|
+
const steps = Math.min(max_steps, config.workflow.loop_max_steps_hard_cap);
|
|
171
|
+
const actions = [];
|
|
172
|
+
const warnings = [];
|
|
173
|
+
const startedAt = nowISO();
|
|
174
|
+
// Collect UI events emitted inside state-machine functions, then flush AFTER tx.
|
|
175
|
+
const uiEvents = [];
|
|
176
|
+
const emit = (e) => uiEvents.push(e);
|
|
177
|
+
for (let i = 0; i < steps; i++) {
|
|
178
|
+
const next = decideNextAction(db, config);
|
|
179
|
+
if (next.kind === "idle") {
|
|
180
|
+
actions.push("idle: no approved stories");
|
|
189
181
|
break;
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
|
|
182
|
+
}
|
|
183
|
+
if (next.kind === "start_run") {
|
|
184
|
+
// SINGLE tx boundary: caller owns tx, state-machine is pure.
|
|
185
|
+
const { run_id } = withTx(db, () => createRunForStory(db, config, next.story_key));
|
|
186
|
+
actions.push(`started run ${run_id} for story ${next.story_key}`);
|
|
187
|
+
if (mode === "step")
|
|
188
|
+
break;
|
|
189
|
+
continue;
|
|
190
|
+
}
|
|
191
|
+
if (next.kind === "complete_run") {
|
|
192
|
+
withTx(db, () => completeRun(db, next.run_id, emit));
|
|
193
|
+
actions.push(`completed run ${next.run_id}`);
|
|
194
|
+
if (mode === "step")
|
|
195
|
+
break;
|
|
196
|
+
continue;
|
|
197
|
+
}
|
|
198
|
+
if (next.kind === "failed") {
|
|
199
|
+
// Ensure DB state reflects failure in one tx; emit UI event.
|
|
200
|
+
withTx(db, () => failRun(db, next.run_id, next.stage_key, next.error_text, emit));
|
|
201
|
+
actions.push(`failed: ${next.stage_key} — ${next.error_text}`);
|
|
202
|
+
if (mode === "step")
|
|
203
|
+
break;
|
|
204
|
+
continue;
|
|
205
|
+
}
|
|
206
|
+
if (next.kind === "delegate_stage") {
|
|
207
|
+
const active = getActiveRun(db);
|
|
208
|
+
if (!active)
|
|
209
|
+
throw new Error("Invariant: delegate_stage but no active run.");
|
|
210
|
+
const run = db.prepare("SELECT * FROM runs WHERE run_id=?").get(active.run_id);
|
|
211
|
+
const story = db.prepare("SELECT * FROM stories WHERE story_key=?").get(run.story_key);
|
|
212
|
+
let agentName = resolveAgentName(next.stage_key, config, agents, warnings);
|
|
213
|
+
const agentExists = (name) => {
|
|
214
|
+
if (agents && agents[name])
|
|
215
|
+
return true;
|
|
216
|
+
const knownStageAgents = ["Frame", "Plan", "Spec", "Implement", "Review", "Verify", "Close", "General", "Astro", "general"];
|
|
217
|
+
if (knownStageAgents.includes(name))
|
|
218
|
+
return true;
|
|
219
|
+
return false;
|
|
220
|
+
};
|
|
219
221
|
if (!agentExists(agentName)) {
|
|
220
|
-
|
|
221
|
-
agentName
|
|
222
|
+
const originalAgent = agentName;
|
|
223
|
+
console.warn(`[Astrocode] Agent ${agentName} not found. Falling back to orchestrator.`);
|
|
224
|
+
agentName = config.agents?.orchestrator_name || "Astro";
|
|
222
225
|
if (!agentExists(agentName)) {
|
|
223
|
-
|
|
226
|
+
console.warn(`[Astrocode] Orchestrator ${agentName} not available. Falling back to General.`);
|
|
227
|
+
agentName = "General";
|
|
228
|
+
if (!agentExists(agentName)) {
|
|
229
|
+
throw new Error(`Critical: No agents available for delegation. Primary: ${originalAgent}, Orchestrator: ${config.agents?.orchestrator_name || "Astro"}, General: unavailable`);
|
|
230
|
+
}
|
|
224
231
|
}
|
|
225
232
|
}
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
|
|
229
|
-
|
|
230
|
-
});
|
|
231
|
-
const context = buildContextSnapshot({
|
|
232
|
-
db,
|
|
233
|
-
config,
|
|
234
|
-
run_id: active.run_id,
|
|
235
|
-
next_action: `delegate stage ${next.stage_key}`,
|
|
236
|
-
});
|
|
237
|
-
const stageDirective = buildStageDirective({
|
|
238
|
-
config,
|
|
239
|
-
stage_key: next.stage_key,
|
|
240
|
-
run_id: active.run_id,
|
|
241
|
-
story_key: run.story_key,
|
|
242
|
-
story_title: story?.title ?? "(missing)",
|
|
243
|
-
stage_agent_name: agentName,
|
|
244
|
-
stage_goal: stageGoal(next.stage_key, config),
|
|
245
|
-
stage_constraints: stageConstraints(next.stage_key, config),
|
|
246
|
-
context_snapshot_md: context,
|
|
247
|
-
}).body;
|
|
248
|
-
const delegatePrompt = buildDelegationPrompt({
|
|
249
|
-
stageDirective,
|
|
250
|
-
run_id: active.run_id,
|
|
251
|
-
stage_key: next.stage_key,
|
|
252
|
-
stage_agent_name: agentName,
|
|
253
|
-
});
|
|
254
|
-
// Record continuation (best-effort; no tx wrapper needed but safe either way)
|
|
255
|
-
const h = directiveHash(delegatePrompt);
|
|
256
|
-
const now = nowISO();
|
|
257
|
-
if (sessionId) {
|
|
258
|
-
// This assumes continuations table exists in vNext schema.
|
|
259
|
-
db.prepare("INSERT INTO continuations (session_id, run_id, directive_hash, kind, reason, created_at) VALUES (?, ?, ?, 'stage', ?, ?)").run(sessionId, active.run_id, h, `delegate ${next.stage_key}`, now);
|
|
260
|
-
}
|
|
261
|
-
// Visible injection so user can see state (awaited)
|
|
262
|
-
if (sessionId) {
|
|
263
|
-
await injectChatPrompt({ ctx, sessionId, text: delegatePrompt, agent: "Astro" });
|
|
264
|
-
const continueMessage = [
|
|
265
|
-
`[SYSTEM DIRECTIVE: ASTROCODE — AWAIT_STAGE_COMPLETION]`,
|
|
266
|
-
``,
|
|
267
|
-
`Stage \`${next.stage_key}\` delegated to \`${agentName}\`.`,
|
|
268
|
-
``,
|
|
269
|
-
`When \`${agentName}\` completes, call:`,
|
|
270
|
-
`astro_stage_complete(run_id="${active.run_id}", stage_key="${next.stage_key}", output_text="[paste subagent output here]")`,
|
|
271
|
-
``,
|
|
272
|
-
`This advances the workflow.`,
|
|
273
|
-
].join("\n");
|
|
274
|
-
await injectChatPrompt({ ctx, sessionId, text: continueMessage, agent: "Astro" });
|
|
275
|
-
}
|
|
276
|
-
actions.push(`delegated stage ${next.stage_key} via ${agentName}`);
|
|
277
|
-
// Stop here; subagent needs to run.
|
|
278
|
-
break;
|
|
279
|
-
}
|
|
280
|
-
if (next.kind === "await_stage_completion") {
|
|
281
|
-
actions.push(`await stage completion: ${next.stage_key}`);
|
|
282
|
-
if (sessionId) {
|
|
233
|
+
// NOTE: startStage owns its own tx (state-machine.ts).
|
|
234
|
+
withTx(db, () => {
|
|
235
|
+
startStage(db, active.run_id, next.stage_key, { subagent_type: agentName }, emit);
|
|
236
|
+
});
|
|
283
237
|
const context = buildContextSnapshot({
|
|
284
238
|
db,
|
|
285
239
|
config,
|
|
286
|
-
run_id:
|
|
287
|
-
next_action: `
|
|
240
|
+
run_id: active.run_id,
|
|
241
|
+
next_action: `delegate stage ${next.stage_key}`,
|
|
242
|
+
});
|
|
243
|
+
const stageDirective = buildStageDirective({
|
|
244
|
+
config,
|
|
245
|
+
stage_key: next.stage_key,
|
|
246
|
+
run_id: active.run_id,
|
|
247
|
+
story_key: run.story_key,
|
|
248
|
+
story_title: story?.title ?? "(missing)",
|
|
249
|
+
stage_agent_name: agentName,
|
|
250
|
+
stage_goal: stageGoal(next.stage_key, config),
|
|
251
|
+
stage_constraints: stageConstraints(next.stage_key, config),
|
|
252
|
+
context_snapshot_md: context,
|
|
253
|
+
}).body;
|
|
254
|
+
const delegatePrompt = buildDelegationPrompt({
|
|
255
|
+
stageDirective,
|
|
256
|
+
run_id: active.run_id,
|
|
257
|
+
stage_key: next.stage_key,
|
|
258
|
+
stage_agent_name: agentName,
|
|
288
259
|
});
|
|
289
|
-
|
|
290
|
-
|
|
291
|
-
``,
|
|
292
|
-
`Run \`${next.run_id}\` is waiting for stage \`${next.stage_key}\` output.`,
|
|
293
|
-
`If you have the subagent output, call astro_stage_complete with output_text=the FULL output.`,
|
|
294
|
-
``,
|
|
295
|
-
`Context snapshot:`,
|
|
296
|
-
context,
|
|
297
|
-
].join("\n").trim();
|
|
298
|
-
const h = directiveHash(prompt);
|
|
260
|
+
// Record continuation (best-effort; no tx wrapper needed but safe either way)
|
|
261
|
+
const h = directiveHash(delegatePrompt);
|
|
299
262
|
const now = nowISO();
|
|
300
|
-
|
|
301
|
-
|
|
263
|
+
if (sessionId) {
|
|
264
|
+
// This assumes continuations table exists in vNext schema.
|
|
265
|
+
db.prepare("INSERT INTO continuations (session_id, run_id, directive_hash, kind, reason, created_at) VALUES (?, ?, ?, 'stage', ?, ?)").run(sessionId, active.run_id, h, `delegate ${next.stage_key}`, now);
|
|
266
|
+
}
|
|
267
|
+
// Visible injection so user can see state (awaited)
|
|
268
|
+
if (sessionId) {
|
|
269
|
+
await injectChatPrompt({ ctx, sessionId, text: delegatePrompt, agent: "Astro" });
|
|
270
|
+
const continueMessage = [
|
|
271
|
+
`[SYSTEM DIRECTIVE: ASTROCODE — AWAIT_STAGE_COMPLETION]`,
|
|
272
|
+
``,
|
|
273
|
+
`Stage \`${next.stage_key}\` delegated to \`${agentName}\`.`,
|
|
274
|
+
``,
|
|
275
|
+
`When \`${agentName}\` completes, call:`,
|
|
276
|
+
`astro_stage_complete(run_id="${active.run_id}", stage_key="${next.stage_key}", output_text="[paste subagent output here]")`,
|
|
277
|
+
``,
|
|
278
|
+
`This advances the workflow.`,
|
|
279
|
+
].join("\n");
|
|
280
|
+
await injectChatPrompt({ ctx, sessionId, text: continueMessage, agent: "Astro" });
|
|
281
|
+
}
|
|
282
|
+
actions.push(`delegated stage ${next.stage_key} via ${agentName}`);
|
|
283
|
+
// Stop here; subagent needs to run.
|
|
284
|
+
break;
|
|
302
285
|
}
|
|
286
|
+
if (next.kind === "await_stage_completion") {
|
|
287
|
+
actions.push(`await stage completion: ${next.stage_key}`);
|
|
288
|
+
if (sessionId) {
|
|
289
|
+
const context = buildContextSnapshot({
|
|
290
|
+
db,
|
|
291
|
+
config,
|
|
292
|
+
run_id: next.run_id,
|
|
293
|
+
next_action: `complete stage ${next.stage_key}`,
|
|
294
|
+
});
|
|
295
|
+
const prompt = [
|
|
296
|
+
`[SYSTEM DIRECTIVE: ASTROCODE — AWAIT_STAGE_OUTPUT]`,
|
|
297
|
+
``,
|
|
298
|
+
`Run \`${next.run_id}\` is waiting for stage \`${next.stage_key}\` output.`,
|
|
299
|
+
`If you have the subagent output, call astro_stage_complete with output_text=the FULL output.`,
|
|
300
|
+
``,
|
|
301
|
+
`Context snapshot:`,
|
|
302
|
+
context,
|
|
303
|
+
].join("\n").trim();
|
|
304
|
+
const h = directiveHash(prompt);
|
|
305
|
+
const now = nowISO();
|
|
306
|
+
db.prepare("INSERT INTO continuations (session_id, run_id, directive_hash, kind, reason, created_at) VALUES (?, ?, ?, 'continue', ?, ?)").run(sessionId, next.run_id, h, `await ${next.stage_key}`, now);
|
|
307
|
+
await injectChatPrompt({ ctx, sessionId, text: prompt, agent: "Astro" });
|
|
308
|
+
}
|
|
309
|
+
break;
|
|
310
|
+
}
|
|
311
|
+
actions.push(`unhandled next action: ${next.kind}`);
|
|
303
312
|
break;
|
|
304
313
|
}
|
|
305
|
-
|
|
306
|
-
|
|
307
|
-
|
|
308
|
-
|
|
309
|
-
|
|
310
|
-
|
|
311
|
-
|
|
312
|
-
|
|
313
|
-
|
|
314
|
-
|
|
315
|
-
|
|
316
|
-
|
|
317
|
-
|
|
318
|
-
|
|
319
|
-
|
|
320
|
-
|
|
321
|
-
|
|
322
|
-
|
|
323
|
-
|
|
324
|
-
agent: "Astro",
|
|
325
|
-
});
|
|
314
|
+
// Flush UI events (toast + prompt) AFTER state transitions
|
|
315
|
+
if (uiEvents.length > 0) {
|
|
316
|
+
for (const e of uiEvents) {
|
|
317
|
+
const msg = buildUiMessage(e);
|
|
318
|
+
if (config.ui.toasts.enabled) {
|
|
319
|
+
await toasts.show({
|
|
320
|
+
title: msg.title,
|
|
321
|
+
message: msg.message,
|
|
322
|
+
variant: msg.variant,
|
|
323
|
+
});
|
|
324
|
+
}
|
|
325
|
+
if (ctx?.sessionID) {
|
|
326
|
+
await injectChatPrompt({
|
|
327
|
+
ctx,
|
|
328
|
+
sessionId: ctx.sessionID,
|
|
329
|
+
text: msg.chatText,
|
|
330
|
+
agent: "Astro",
|
|
331
|
+
});
|
|
332
|
+
}
|
|
326
333
|
}
|
|
334
|
+
actions.push(`ui: flushed ${uiEvents.length} event(s)`);
|
|
335
|
+
}
|
|
336
|
+
// Housekeeping event
|
|
337
|
+
db.prepare("INSERT INTO events (event_id, run_id, stage_key, type, body_json, created_at) VALUES (?, NULL, NULL, ?, ?, ?)").run(newEventId(), EVENT_TYPES.WORKFLOW_PROCEED, JSON.stringify({ started_at: startedAt, mode, max_steps: steps, actions }), nowISO());
|
|
338
|
+
const active = getActiveRun(db);
|
|
339
|
+
const lines = [];
|
|
340
|
+
lines.push(`# astro_workflow_proceed`);
|
|
341
|
+
lines.push(`- mode: ${mode}`);
|
|
342
|
+
lines.push(`- steps requested: ${max_steps} (cap=${steps})`);
|
|
343
|
+
if (active)
|
|
344
|
+
lines.push(`- active run: \`${active.run_id}\` (stage=${active.current_stage_key ?? "?"})`);
|
|
345
|
+
lines.push(``, `## Actions`);
|
|
346
|
+
for (const a of actions)
|
|
347
|
+
lines.push(`- ${a}`);
|
|
348
|
+
if (warnings.length > 0) {
|
|
349
|
+
lines.push(``, `## Warnings`);
|
|
350
|
+
for (const w of warnings)
|
|
351
|
+
lines.push(`⚠️ ${w}`);
|
|
327
352
|
}
|
|
328
|
-
|
|
329
|
-
}
|
|
330
|
-
|
|
331
|
-
db.prepare("INSERT INTO events (event_id, run_id, stage_key, type, body_json, created_at) VALUES (?, NULL, NULL, ?, ?, ?)").run(newEventId(), EVENT_TYPES.WORKFLOW_PROCEED, JSON.stringify({ started_at: startedAt, mode, max_steps: steps, actions }), nowISO());
|
|
332
|
-
const active = getActiveRun(db);
|
|
333
|
-
const lines = [];
|
|
334
|
-
lines.push(`# astro_workflow_proceed`);
|
|
335
|
-
lines.push(`- mode: ${mode}`);
|
|
336
|
-
lines.push(`- steps requested: ${max_steps} (cap=${steps})`);
|
|
337
|
-
if (active)
|
|
338
|
-
lines.push(`- active run: \`${active.run_id}\` (stage=${active.current_stage_key ?? "?"})`);
|
|
339
|
-
lines.push(``, `## Actions`);
|
|
340
|
-
for (const a of actions)
|
|
341
|
-
lines.push(`- ${a}`);
|
|
342
|
-
if (warnings.length > 0) {
|
|
343
|
-
lines.push(``, `## Warnings`);
|
|
344
|
-
for (const w of warnings)
|
|
345
|
-
lines.push(`⚠️ ${w}`);
|
|
346
|
-
}
|
|
347
|
-
return lines.join("\n").trim();
|
|
348
|
-
}
|
|
349
|
-
finally {
|
|
350
|
-
// Always release the lock
|
|
351
|
-
repoLock.release();
|
|
352
|
-
}
|
|
353
|
+
return lines.join("\n").trim();
|
|
354
|
+
},
|
|
355
|
+
});
|
|
353
356
|
},
|
|
354
357
|
});
|
|
355
358
|
}
|
package/package.json
CHANGED
|
@@ -9,6 +9,11 @@ type ChatMessageInput = {
|
|
|
9
9
|
agent: string;
|
|
10
10
|
};
|
|
11
11
|
|
|
12
|
+
type ToolExecuteAfterInput = {
|
|
13
|
+
tool: string;
|
|
14
|
+
sessionID?: string;
|
|
15
|
+
};
|
|
16
|
+
|
|
12
17
|
type RuntimeState = {
|
|
13
18
|
db: SqliteDb | null;
|
|
14
19
|
limitedMode: boolean;
|
|
@@ -156,5 +161,16 @@ export function createInjectProvider(opts: {
|
|
|
156
161
|
// Inject eligible injects before processing the user's message
|
|
157
162
|
await injectEligibleInjects(input.sessionID);
|
|
158
163
|
},
|
|
164
|
+
|
|
165
|
+
async onToolAfter(input: ToolExecuteAfterInput) {
|
|
166
|
+
if (!config.inject?.enabled) return;
|
|
167
|
+
|
|
168
|
+
// Extract sessionID (same pattern as continuation enforcer)
|
|
169
|
+
const sessionId = input.sessionID ?? (ctx as any).sessionID;
|
|
170
|
+
if (!sessionId) return;
|
|
171
|
+
|
|
172
|
+
// Inject eligible injects after tool execution
|
|
173
|
+
await injectEligibleInjects(sessionId);
|
|
174
|
+
},
|
|
159
175
|
};
|
|
160
176
|
}
|
package/src/index.ts
CHANGED
|
@@ -13,7 +13,6 @@ import { createToastManager, type ToastOptions } from "./ui/toasts";
|
|
|
13
13
|
import { createAstroAgents } from "./agents/registry";
|
|
14
14
|
import type { AgentConfig } from "@opencode-ai/sdk";
|
|
15
15
|
import { info, warn } from "./shared/log";
|
|
16
|
-
import { acquireRepoLock } from "./state/repo-lock";
|
|
17
16
|
|
|
18
17
|
// Type definitions for plugin components
|
|
19
18
|
type ConfigHandler = (config: Record<string, any>) => Promise<void>;
|
|
@@ -25,6 +24,7 @@ type ContinuationEnforcer = {
|
|
|
25
24
|
type ToolOutputTruncator = (input: any, output: any | null) => Promise<void>;
|
|
26
25
|
type InjectProvider = {
|
|
27
26
|
onChatMessage: (input: any) => Promise<void>;
|
|
27
|
+
onToolAfter: (input: any) => Promise<void>;
|
|
28
28
|
};
|
|
29
29
|
type ToastManager = {
|
|
30
30
|
show: (toast: ToastOptions) => Promise<void>;
|
|
@@ -59,9 +59,12 @@ const Astrocode: Plugin = async (ctx) => {
|
|
|
59
59
|
}
|
|
60
60
|
const repoRoot = ctx.directory;
|
|
61
61
|
|
|
62
|
-
//
|
|
63
|
-
|
|
64
|
-
|
|
62
|
+
// NOTE: Repo locking is handled at the workflow level via workflowRepoLock.
|
|
63
|
+
// The workflow tool correctly acquires and holds the lock for the entire workflow execution.
|
|
64
|
+
// Plugin-level locking is unnecessary and architecturally incorrect since:
|
|
65
|
+
// - The lock would be held for the entire session lifecycle (too long)
|
|
66
|
+
// - Individual tools are designed to be called within workflow context where lock is held
|
|
67
|
+
// - Workflow-level locking with refcounting prevents lock churn during tool execution
|
|
65
68
|
|
|
66
69
|
// Always load config first - this provides defaults even in limited mode
|
|
67
70
|
let pluginConfig: AstrocodeConfig;
|
|
@@ -299,6 +302,11 @@ const Astrocode: Plugin = async (ctx) => {
|
|
|
299
302
|
},
|
|
300
303
|
|
|
301
304
|
"tool.execute.after": async (input: any, output: any) => {
|
|
305
|
+
// Inject eligible injects after tool execution (not just on chat messages)
|
|
306
|
+
if (injectProvider && hookEnabled("inject-provider")) {
|
|
307
|
+
await injectProvider.onToolAfter(input);
|
|
308
|
+
}
|
|
309
|
+
|
|
302
310
|
// Truncate huge tool outputs to artifacts
|
|
303
311
|
if (truncatorHook && hookEnabled("tool-output-truncator")) {
|
|
304
312
|
await truncatorHook(input, output ?? null);
|
|
@@ -330,9 +338,7 @@ const Astrocode: Plugin = async (ctx) => {
|
|
|
330
338
|
|
|
331
339
|
// Best-effort cleanup
|
|
332
340
|
close: async () => {
|
|
333
|
-
//
|
|
334
|
-
repoLock.release();
|
|
335
|
-
|
|
341
|
+
// Close database connection
|
|
336
342
|
if (db && typeof db.close === "function") {
|
|
337
343
|
try {
|
|
338
344
|
db.close();
|
package/src/state/repo-lock.ts
CHANGED
|
@@ -83,6 +83,7 @@ function safeUnlink(p: string) {
|
|
|
83
83
|
|
|
84
84
|
/**
|
|
85
85
|
* Reads & validates lock file defensively.
|
|
86
|
+
* Supports both v2 JSON format and legacy PID-only format for compatibility.
|
|
86
87
|
* Returns null on any parse/validation failure.
|
|
87
88
|
*/
|
|
88
89
|
function readLock(lockPath: string): LockFile | null {
|
|
@@ -91,23 +92,46 @@ function readLock(lockPath: string): LockFile | null {
|
|
|
91
92
|
if (!st.isFile()) return null;
|
|
92
93
|
if (st.size <= 0 || st.size > MAX_LOCK_BYTES) return null;
|
|
93
94
|
|
|
94
|
-
const raw = fs.readFileSync(lockPath, "utf8");
|
|
95
|
-
const parsed = JSON.parse(raw) as LockFile;
|
|
95
|
+
const raw = fs.readFileSync(lockPath, "utf8").trim();
|
|
96
96
|
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
97
|
+
// Try v2 JSON first
|
|
98
|
+
try {
|
|
99
|
+
const parsed = JSON.parse(raw) as LockFile;
|
|
100
|
+
if (parsed && typeof parsed === "object" && parsed.v === LOCK_VERSION) {
|
|
101
|
+
if (typeof parsed.pid !== "number") return null;
|
|
102
|
+
if (typeof parsed.created_at !== "string") return null;
|
|
103
|
+
if (typeof parsed.updated_at !== "string") return null;
|
|
104
|
+
if (typeof parsed.repo_root !== "string") return null;
|
|
105
|
+
if (typeof parsed.instance_id !== "string") return null;
|
|
106
|
+
if (typeof parsed.lease_id !== "string") return null;
|
|
107
|
+
|
|
108
|
+
if (parsed.session_id !== undefined && typeof parsed.session_id !== "string") return null;
|
|
109
|
+
if (parsed.owner !== undefined && typeof parsed.owner !== "string") return null;
|
|
110
|
+
|
|
111
|
+
return parsed;
|
|
112
|
+
}
|
|
113
|
+
} catch {
|
|
114
|
+
// Not JSON, try legacy format
|
|
115
|
+
}
|
|
106
116
|
|
|
107
|
-
|
|
108
|
-
|
|
117
|
+
// Legacy format: just PID as number string
|
|
118
|
+
const legacyPid = parseInt(raw, 10);
|
|
119
|
+
if (Number.isNaN(legacyPid) || legacyPid <= 0) return null;
|
|
109
120
|
|
|
110
|
-
|
|
121
|
+
// Convert legacy to v2 format
|
|
122
|
+
const now = nowISO();
|
|
123
|
+
const leaseId = crypto.randomUUID();
|
|
124
|
+
return {
|
|
125
|
+
v: LOCK_VERSION,
|
|
126
|
+
pid: legacyPid,
|
|
127
|
+
created_at: now, // Approximate
|
|
128
|
+
updated_at: now,
|
|
129
|
+
repo_root: "", // Unknown, will be filled by caller
|
|
130
|
+
instance_id: PROCESS_INSTANCE_ID, // Assume same instance
|
|
131
|
+
session_id: undefined,
|
|
132
|
+
lease_id: leaseId,
|
|
133
|
+
owner: "legacy-lock",
|
|
134
|
+
};
|
|
111
135
|
} catch {
|
|
112
136
|
return null;
|
|
113
137
|
}
|
|
@@ -453,8 +477,8 @@ export async function acquireRepoLock(opts: {
|
|
|
453
477
|
continue;
|
|
454
478
|
}
|
|
455
479
|
|
|
456
|
-
// Re-entrant by SAME PROCESS IDENTITY (pid+instance).
|
|
457
|
-
if (existing.pid === myPid && existing.instance_id === PROCESS_INSTANCE_ID) {
|
|
480
|
+
// Re-entrant by SAME PROCESS IDENTITY (pid+instance), or legacy lock with same PID.
|
|
481
|
+
if (existing.pid === myPid && (existing.instance_id === PROCESS_INSTANCE_ID || existing.owner === "legacy-lock")) {
|
|
458
482
|
const leaseId = crypto.randomUUID();
|
|
459
483
|
|
|
460
484
|
writeLockAtomicish(lockPath, {
|
|
@@ -462,6 +486,7 @@ export async function acquireRepoLock(opts: {
|
|
|
462
486
|
v: LOCK_VERSION,
|
|
463
487
|
updated_at: nowISO(),
|
|
464
488
|
repo_root: repoRoot,
|
|
489
|
+
instance_id: PROCESS_INSTANCE_ID, // Upgrade legacy
|
|
465
490
|
session_id: sessionId ?? existing.session_id,
|
|
466
491
|
owner: owner ?? existing.owner,
|
|
467
492
|
lease_id: leaseId,
|