@pruddiman/dispatch 0.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +308 -0
- package/dist/cli.js +4613 -0
- package/dist/cli.js.map +1 -0
- package/package.json +58 -0
package/dist/cli.js
ADDED
|
@@ -0,0 +1,4613 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
var __defProp = Object.defineProperty;
|
|
3
|
+
var __getOwnPropNames = Object.getOwnPropertyNames;
|
|
4
|
+
var __esm = (fn, res) => function __init() {
|
|
5
|
+
return fn && (res = (0, fn[__getOwnPropNames(fn)[0]])(fn = 0)), res;
|
|
6
|
+
};
|
|
7
|
+
var __export = (target, all) => {
|
|
8
|
+
for (var name in all)
|
|
9
|
+
__defProp(target, name, { get: all[name], enumerable: true });
|
|
10
|
+
};
|
|
11
|
+
|
|
12
|
+
// src/helpers/logger.ts
|
|
13
|
+
import chalk from "chalk";
|
|
14
|
+
var MAX_CAUSE_CHAIN_DEPTH, log;
|
|
15
|
+
var init_logger = __esm({
|
|
16
|
+
"src/helpers/logger.ts"() {
|
|
17
|
+
"use strict";
|
|
18
|
+
MAX_CAUSE_CHAIN_DEPTH = 5;
|
|
19
|
+
log = {
|
|
20
|
+
/** When true, `debug()` messages are printed. Set by `--verbose`. */
|
|
21
|
+
verbose: false,
|
|
22
|
+
info(msg) {
|
|
23
|
+
console.log(chalk.blue("\u2139"), msg);
|
|
24
|
+
},
|
|
25
|
+
success(msg) {
|
|
26
|
+
console.log(chalk.green("\u2714"), msg);
|
|
27
|
+
},
|
|
28
|
+
warn(msg) {
|
|
29
|
+
console.log(chalk.yellow("\u26A0"), msg);
|
|
30
|
+
},
|
|
31
|
+
error(msg) {
|
|
32
|
+
console.error(chalk.red("\u2716"), msg);
|
|
33
|
+
},
|
|
34
|
+
task(index, total, msg) {
|
|
35
|
+
console.log(chalk.cyan(`[${index + 1}/${total}]`), msg);
|
|
36
|
+
},
|
|
37
|
+
dim(msg) {
|
|
38
|
+
console.log(chalk.dim(msg));
|
|
39
|
+
},
|
|
40
|
+
/**
|
|
41
|
+
* Print a debug/verbose message. Only visible when `log.verbose` is true.
|
|
42
|
+
* Messages are prefixed with a dim arrow to visually nest them under the
|
|
43
|
+
* preceding info/error line.
|
|
44
|
+
*/
|
|
45
|
+
debug(msg) {
|
|
46
|
+
if (!this.verbose) return;
|
|
47
|
+
console.log(chalk.dim(` \u2937 ${msg}`));
|
|
48
|
+
},
|
|
49
|
+
/**
|
|
50
|
+
* Extract and format the full error cause chain. Node.js network errors
|
|
51
|
+
* (e.g. `TypeError: fetch failed`) bury the real reason in nested `.cause`
|
|
52
|
+
* properties — this helper surfaces them all.
|
|
53
|
+
*/
|
|
54
|
+
formatErrorChain(err) {
|
|
55
|
+
const parts = [];
|
|
56
|
+
let current = err;
|
|
57
|
+
let depth = 0;
|
|
58
|
+
while (current && depth < MAX_CAUSE_CHAIN_DEPTH) {
|
|
59
|
+
if (current instanceof Error) {
|
|
60
|
+
const prefix = depth === 0 ? "Error" : "Cause";
|
|
61
|
+
parts.push(`${prefix}: ${current.message}`);
|
|
62
|
+
if (current.cause) {
|
|
63
|
+
current = current.cause;
|
|
64
|
+
} else {
|
|
65
|
+
break;
|
|
66
|
+
}
|
|
67
|
+
} else {
|
|
68
|
+
parts.push(`${depth === 0 ? "Error" : "Cause"}: ${String(current)}`);
|
|
69
|
+
break;
|
|
70
|
+
}
|
|
71
|
+
depth++;
|
|
72
|
+
}
|
|
73
|
+
return parts.join("\n \u2937 ");
|
|
74
|
+
},
|
|
75
|
+
/**
|
|
76
|
+
* Extract the raw error message string from an unknown thrown value.
|
|
77
|
+
* Returns `err.message` for Error instances, `String(err)` for other
|
|
78
|
+
* truthy values, and `""` for null/undefined.
|
|
79
|
+
*/
|
|
80
|
+
extractMessage(err) {
|
|
81
|
+
if (err instanceof Error) return err.message;
|
|
82
|
+
if (err != null) return String(err);
|
|
83
|
+
return "";
|
|
84
|
+
}
|
|
85
|
+
};
|
|
86
|
+
}
|
|
87
|
+
});
|
|
88
|
+
|
|
89
|
+
// src/providers/opencode.ts
|
|
90
|
+
import {
|
|
91
|
+
createOpencode,
|
|
92
|
+
createOpencodeClient
|
|
93
|
+
} from "@opencode-ai/sdk";
|
|
94
|
+
async function listModels(opts) {
|
|
95
|
+
let client;
|
|
96
|
+
let stopServer;
|
|
97
|
+
if (opts?.url) {
|
|
98
|
+
client = createOpencodeClient({ baseUrl: opts.url });
|
|
99
|
+
} else {
|
|
100
|
+
if (opts?.cwd) {
|
|
101
|
+
log.debug(`listModels: requested cwd "${opts.cwd}" \u2014 OpenCode SDK does not support spawn-level cwd`);
|
|
102
|
+
}
|
|
103
|
+
try {
|
|
104
|
+
const oc = await createOpencode({ port: 0 });
|
|
105
|
+
client = oc.client;
|
|
106
|
+
stopServer = () => oc.server.close();
|
|
107
|
+
} catch (err) {
|
|
108
|
+
log.debug(`listModels: failed to start OpenCode server: ${log.formatErrorChain(err)}`);
|
|
109
|
+
throw err;
|
|
110
|
+
}
|
|
111
|
+
}
|
|
112
|
+
try {
|
|
113
|
+
const { data } = await client.config.providers();
|
|
114
|
+
if (!data) return [];
|
|
115
|
+
return data.providers.filter((p) => p.source === "env" || p.source === "config" || p.source === "custom").flatMap((p) => Object.keys(p.models).map((modelId) => `${p.id}/${modelId}`)).sort();
|
|
116
|
+
} finally {
|
|
117
|
+
stopServer?.();
|
|
118
|
+
}
|
|
119
|
+
}
|
|
120
|
+
async function boot(opts) {
|
|
121
|
+
let client;
|
|
122
|
+
let stopServer;
|
|
123
|
+
let cleaned = false;
|
|
124
|
+
if (opts?.url) {
|
|
125
|
+
log.debug(`Connecting to existing OpenCode server at ${opts.url}`);
|
|
126
|
+
client = createOpencodeClient({ baseUrl: opts.url });
|
|
127
|
+
} else {
|
|
128
|
+
log.debug("No --server-url provided, spawning local OpenCode server...");
|
|
129
|
+
if (opts?.cwd) {
|
|
130
|
+
log.debug(`Requested cwd "${opts.cwd}" \u2014 OpenCode SDK does not support spawn-level cwd; relying on prompt-level cwd`);
|
|
131
|
+
}
|
|
132
|
+
try {
|
|
133
|
+
const oc = await createOpencode({ port: 0 });
|
|
134
|
+
client = oc.client;
|
|
135
|
+
stopServer = () => oc.server.close();
|
|
136
|
+
log.debug("OpenCode server started successfully");
|
|
137
|
+
} catch (err) {
|
|
138
|
+
log.debug(`Failed to start OpenCode server: ${log.formatErrorChain(err)}`);
|
|
139
|
+
throw err;
|
|
140
|
+
}
|
|
141
|
+
}
|
|
142
|
+
let modelOverride;
|
|
143
|
+
if (opts?.model) {
|
|
144
|
+
const slash = opts.model.indexOf("/");
|
|
145
|
+
if (slash > 0) {
|
|
146
|
+
modelOverride = {
|
|
147
|
+
providerID: opts.model.slice(0, slash),
|
|
148
|
+
modelID: opts.model.slice(slash + 1)
|
|
149
|
+
};
|
|
150
|
+
log.debug(`Model override: ${opts.model}`);
|
|
151
|
+
} else {
|
|
152
|
+
log.debug(`Ignoring model override "${opts.model}": must be in "provider/model" format`);
|
|
153
|
+
}
|
|
154
|
+
}
|
|
155
|
+
let model = opts?.model;
|
|
156
|
+
if (!model) {
|
|
157
|
+
try {
|
|
158
|
+
const { data: config } = await client.config.get();
|
|
159
|
+
if (config?.model) {
|
|
160
|
+
model = config.model;
|
|
161
|
+
log.debug(`Detected model: ${model}`);
|
|
162
|
+
}
|
|
163
|
+
} catch (err) {
|
|
164
|
+
log.debug(`Failed to retrieve model from config: ${log.formatErrorChain(err)}`);
|
|
165
|
+
}
|
|
166
|
+
}
|
|
167
|
+
return {
|
|
168
|
+
name: "opencode",
|
|
169
|
+
model,
|
|
170
|
+
async createSession() {
|
|
171
|
+
log.debug("Creating OpenCode session...");
|
|
172
|
+
try {
|
|
173
|
+
const { data: session } = await client.session.create();
|
|
174
|
+
if (!session) {
|
|
175
|
+
throw new Error("Failed to create OpenCode session");
|
|
176
|
+
}
|
|
177
|
+
log.debug(`Session created: ${session.id}`);
|
|
178
|
+
return session.id;
|
|
179
|
+
} catch (err) {
|
|
180
|
+
log.debug(`Session creation failed: ${log.formatErrorChain(err)}`);
|
|
181
|
+
throw err;
|
|
182
|
+
}
|
|
183
|
+
},
|
|
184
|
+
async prompt(sessionId, text) {
|
|
185
|
+
log.debug(`Sending async prompt to session ${sessionId} (${text.length} chars)...`);
|
|
186
|
+
try {
|
|
187
|
+
const { error: promptError } = await client.session.promptAsync({
|
|
188
|
+
path: { id: sessionId },
|
|
189
|
+
body: {
|
|
190
|
+
parts: [{ type: "text", text }],
|
|
191
|
+
...modelOverride ? { model: modelOverride } : {}
|
|
192
|
+
}
|
|
193
|
+
});
|
|
194
|
+
if (promptError) {
|
|
195
|
+
throw new Error(`OpenCode promptAsync failed: ${JSON.stringify(promptError)}`);
|
|
196
|
+
}
|
|
197
|
+
log.debug("Async prompt accepted, subscribing to events...");
|
|
198
|
+
const controller = new AbortController();
|
|
199
|
+
const { stream } = await client.event.subscribe({
|
|
200
|
+
signal: controller.signal
|
|
201
|
+
});
|
|
202
|
+
try {
|
|
203
|
+
for await (const event of stream) {
|
|
204
|
+
if (!isSessionEvent(event, sessionId)) continue;
|
|
205
|
+
if (event.type === "message.part.updated" && event.properties.part.type === "text") {
|
|
206
|
+
const delta = event.properties.delta;
|
|
207
|
+
if (delta) {
|
|
208
|
+
log.debug(`Streaming text (+${delta.length} chars)...`);
|
|
209
|
+
}
|
|
210
|
+
continue;
|
|
211
|
+
}
|
|
212
|
+
if (event.type === "session.error") {
|
|
213
|
+
const err = event.properties.error;
|
|
214
|
+
throw new Error(
|
|
215
|
+
`OpenCode session error: ${err ? JSON.stringify(err) : "unknown error"}`
|
|
216
|
+
);
|
|
217
|
+
}
|
|
218
|
+
if (event.type === "session.idle") {
|
|
219
|
+
log.debug("Session went idle, fetching result...");
|
|
220
|
+
break;
|
|
221
|
+
}
|
|
222
|
+
}
|
|
223
|
+
} finally {
|
|
224
|
+
controller.abort();
|
|
225
|
+
}
|
|
226
|
+
const { data: messages } = await client.session.messages({
|
|
227
|
+
path: { id: sessionId }
|
|
228
|
+
});
|
|
229
|
+
if (!messages || messages.length === 0) {
|
|
230
|
+
log.debug("No messages found in session");
|
|
231
|
+
return null;
|
|
232
|
+
}
|
|
233
|
+
const lastAssistant = [...messages].reverse().find((m) => m.info.role === "assistant");
|
|
234
|
+
if (!lastAssistant) {
|
|
235
|
+
log.debug("No assistant message found in session");
|
|
236
|
+
return null;
|
|
237
|
+
}
|
|
238
|
+
if (lastAssistant.info.role === "assistant" && "error" in lastAssistant.info && lastAssistant.info.error) {
|
|
239
|
+
throw new Error(
|
|
240
|
+
`OpenCode assistant error: ${JSON.stringify(lastAssistant.info.error)}`
|
|
241
|
+
);
|
|
242
|
+
}
|
|
243
|
+
const textParts = lastAssistant.parts.filter(
|
|
244
|
+
(p) => p.type === "text" && "text" in p
|
|
245
|
+
);
|
|
246
|
+
const result = textParts.map((p) => p.text).join("\n") || null;
|
|
247
|
+
log.debug(`Prompt response received (${result?.length ?? 0} chars)`);
|
|
248
|
+
return result;
|
|
249
|
+
} catch (err) {
|
|
250
|
+
log.debug(`Prompt failed: ${log.formatErrorChain(err)}`);
|
|
251
|
+
throw err;
|
|
252
|
+
}
|
|
253
|
+
},
|
|
254
|
+
async cleanup() {
|
|
255
|
+
if (cleaned) return;
|
|
256
|
+
cleaned = true;
|
|
257
|
+
log.debug("Cleaning up OpenCode provider...");
|
|
258
|
+
try {
|
|
259
|
+
stopServer?.();
|
|
260
|
+
} catch (err) {
|
|
261
|
+
log.debug(`Failed to stop OpenCode server: ${log.formatErrorChain(err)}`);
|
|
262
|
+
}
|
|
263
|
+
}
|
|
264
|
+
};
|
|
265
|
+
}
|
|
266
|
+
function isSessionEvent(event, sessionId) {
|
|
267
|
+
const props = event.properties;
|
|
268
|
+
if (props.sessionID === sessionId) return true;
|
|
269
|
+
if (props.info && typeof props.info === "object" && props.info.sessionID === sessionId) {
|
|
270
|
+
return true;
|
|
271
|
+
}
|
|
272
|
+
if (props.part && typeof props.part === "object" && props.part.sessionID === sessionId) {
|
|
273
|
+
return true;
|
|
274
|
+
}
|
|
275
|
+
return false;
|
|
276
|
+
}
|
|
277
|
+
var init_opencode = __esm({
|
|
278
|
+
"src/providers/opencode.ts"() {
|
|
279
|
+
"use strict";
|
|
280
|
+
init_logger();
|
|
281
|
+
}
|
|
282
|
+
});
|
|
283
|
+
|
|
284
|
+
// src/providers/copilot.ts
|
|
285
|
+
import { CopilotClient, approveAll } from "@github/copilot-sdk";
|
|
286
|
+
async function listModels2(opts) {
|
|
287
|
+
const client = new CopilotClient({
|
|
288
|
+
...opts?.url ? { cliUrl: opts.url } : {}
|
|
289
|
+
});
|
|
290
|
+
try {
|
|
291
|
+
await client.start();
|
|
292
|
+
const models = await client.listModels();
|
|
293
|
+
return models.map((m) => m.id).sort();
|
|
294
|
+
} finally {
|
|
295
|
+
await client.stop().catch(() => {
|
|
296
|
+
});
|
|
297
|
+
}
|
|
298
|
+
}
|
|
299
|
+
async function boot2(opts) {
|
|
300
|
+
log.debug(opts?.url ? `Connecting to Copilot CLI at ${opts.url}` : "Starting Copilot CLI...");
|
|
301
|
+
const client = new CopilotClient({
|
|
302
|
+
...opts?.url ? { cliUrl: opts.url } : {},
|
|
303
|
+
...opts?.cwd ? { cwd: opts.cwd } : {}
|
|
304
|
+
});
|
|
305
|
+
try {
|
|
306
|
+
await client.start();
|
|
307
|
+
log.debug("Copilot CLI started successfully");
|
|
308
|
+
} catch (err) {
|
|
309
|
+
log.debug(`Failed to start Copilot CLI: ${log.formatErrorChain(err)}`);
|
|
310
|
+
throw err;
|
|
311
|
+
}
|
|
312
|
+
let model;
|
|
313
|
+
let modelDetected = false;
|
|
314
|
+
const sessions = /* @__PURE__ */ new Map();
|
|
315
|
+
return {
|
|
316
|
+
name: "copilot",
|
|
317
|
+
get model() {
|
|
318
|
+
return model;
|
|
319
|
+
},
|
|
320
|
+
async createSession() {
|
|
321
|
+
log.debug("Creating Copilot session...");
|
|
322
|
+
try {
|
|
323
|
+
const session = await client.createSession({
|
|
324
|
+
...opts?.model ? { model: opts.model } : {},
|
|
325
|
+
...opts?.cwd ? { workingDirectory: opts.cwd } : {},
|
|
326
|
+
onPermissionRequest: approveAll
|
|
327
|
+
});
|
|
328
|
+
sessions.set(session.sessionId, session);
|
|
329
|
+
log.debug(`Session created: ${session.sessionId}`);
|
|
330
|
+
if (!modelDetected) {
|
|
331
|
+
modelDetected = true;
|
|
332
|
+
try {
|
|
333
|
+
const result = await session.rpc.model.getCurrent();
|
|
334
|
+
if (result.modelId) {
|
|
335
|
+
model = result.modelId;
|
|
336
|
+
log.debug(`Detected model: ${model}`);
|
|
337
|
+
}
|
|
338
|
+
} catch (err) {
|
|
339
|
+
log.debug(`Failed to detect model from session: ${log.formatErrorChain(err)}`);
|
|
340
|
+
}
|
|
341
|
+
}
|
|
342
|
+
return session.sessionId;
|
|
343
|
+
} catch (err) {
|
|
344
|
+
log.debug(`Session creation failed: ${log.formatErrorChain(err)}`);
|
|
345
|
+
throw err;
|
|
346
|
+
}
|
|
347
|
+
},
|
|
348
|
+
async prompt(sessionId, text) {
|
|
349
|
+
const session = sessions.get(sessionId);
|
|
350
|
+
if (!session) {
|
|
351
|
+
throw new Error(`Copilot session ${sessionId} not found`);
|
|
352
|
+
}
|
|
353
|
+
log.debug(`Sending prompt to session ${sessionId} (${text.length} chars)...`);
|
|
354
|
+
try {
|
|
355
|
+
await session.send({ prompt: text });
|
|
356
|
+
log.debug("Async prompt accepted, waiting for session to become idle...");
|
|
357
|
+
await new Promise((resolve2, reject) => {
|
|
358
|
+
const unsubIdle = session.on("session.idle", () => {
|
|
359
|
+
unsubIdle();
|
|
360
|
+
unsubErr();
|
|
361
|
+
resolve2();
|
|
362
|
+
});
|
|
363
|
+
const unsubErr = session.on("session.error", (event) => {
|
|
364
|
+
unsubIdle();
|
|
365
|
+
unsubErr();
|
|
366
|
+
reject(new Error(`Copilot session error: ${event.data.message}`));
|
|
367
|
+
});
|
|
368
|
+
});
|
|
369
|
+
log.debug("Session went idle, fetching result...");
|
|
370
|
+
const events = await session.getMessages();
|
|
371
|
+
const last = [...events].reverse().find((e) => e.type === "assistant.message");
|
|
372
|
+
const result = last?.data?.content ?? null;
|
|
373
|
+
log.debug(`Prompt response received (${result?.length ?? 0} chars)`);
|
|
374
|
+
return result;
|
|
375
|
+
} catch (err) {
|
|
376
|
+
log.debug(`Prompt failed: ${log.formatErrorChain(err)}`);
|
|
377
|
+
throw err;
|
|
378
|
+
}
|
|
379
|
+
},
|
|
380
|
+
async cleanup() {
|
|
381
|
+
log.debug("Cleaning up Copilot provider...");
|
|
382
|
+
const destroyOps = [...sessions.values()].map(
|
|
383
|
+
(s) => s.destroy().catch((err) => {
|
|
384
|
+
log.debug(`Failed to destroy Copilot session: ${log.formatErrorChain(err)}`);
|
|
385
|
+
})
|
|
386
|
+
);
|
|
387
|
+
await Promise.all(destroyOps);
|
|
388
|
+
sessions.clear();
|
|
389
|
+
await client.stop().catch((err) => {
|
|
390
|
+
log.debug(`Failed to stop Copilot client: ${log.formatErrorChain(err)}`);
|
|
391
|
+
});
|
|
392
|
+
}
|
|
393
|
+
};
|
|
394
|
+
}
|
|
395
|
+
var init_copilot = __esm({
|
|
396
|
+
"src/providers/copilot.ts"() {
|
|
397
|
+
"use strict";
|
|
398
|
+
init_logger();
|
|
399
|
+
}
|
|
400
|
+
});
|
|
401
|
+
|
|
402
|
+
// src/providers/claude.ts
|
|
403
|
+
import { randomUUID } from "crypto";
|
|
404
|
+
import { unstable_v2_createSession } from "@anthropic-ai/claude-agent-sdk";
|
|
405
|
+
async function listModels3(_opts) {
|
|
406
|
+
return [
|
|
407
|
+
"claude-haiku-3-5",
|
|
408
|
+
"claude-opus-4-6",
|
|
409
|
+
"claude-sonnet-4",
|
|
410
|
+
"claude-sonnet-4-5"
|
|
411
|
+
];
|
|
412
|
+
}
|
|
413
|
+
async function boot3(opts) {
|
|
414
|
+
const model = opts?.model ?? "claude-sonnet-4";
|
|
415
|
+
const cwd = opts?.cwd;
|
|
416
|
+
log.debug(`Booting Claude provider with model ${model}`);
|
|
417
|
+
const sessions = /* @__PURE__ */ new Map();
|
|
418
|
+
return {
|
|
419
|
+
name: "claude",
|
|
420
|
+
model,
|
|
421
|
+
async createSession() {
|
|
422
|
+
log.debug("Creating Claude session...");
|
|
423
|
+
try {
|
|
424
|
+
const sessionOpts = { model, permissionMode: "acceptEdits", ...cwd ? { cwd } : {} };
|
|
425
|
+
const session = unstable_v2_createSession(sessionOpts);
|
|
426
|
+
const sessionId = randomUUID();
|
|
427
|
+
sessions.set(sessionId, session);
|
|
428
|
+
log.debug(`Session created: ${sessionId}`);
|
|
429
|
+
return sessionId;
|
|
430
|
+
} catch (err) {
|
|
431
|
+
log.debug(`Session creation failed: ${log.formatErrorChain(err)}`);
|
|
432
|
+
throw err;
|
|
433
|
+
}
|
|
434
|
+
},
|
|
435
|
+
async prompt(sessionId, text) {
|
|
436
|
+
const session = sessions.get(sessionId);
|
|
437
|
+
if (!session) {
|
|
438
|
+
throw new Error(`Claude session ${sessionId} not found`);
|
|
439
|
+
}
|
|
440
|
+
log.debug(`Sending prompt to session ${sessionId} (${text.length} chars)...`);
|
|
441
|
+
try {
|
|
442
|
+
await session.send(text);
|
|
443
|
+
const parts = [];
|
|
444
|
+
for await (const msg of session.stream()) {
|
|
445
|
+
if (msg.type === "assistant") {
|
|
446
|
+
const msgText = msg.message.content.filter((block) => block.type === "text").map((block) => block.text).join("");
|
|
447
|
+
if (msgText) parts.push(msgText);
|
|
448
|
+
}
|
|
449
|
+
}
|
|
450
|
+
const result = parts.join("") || null;
|
|
451
|
+
log.debug(`Prompt response received (${result?.length ?? 0} chars)`);
|
|
452
|
+
return result;
|
|
453
|
+
} catch (err) {
|
|
454
|
+
log.debug(`Prompt failed: ${log.formatErrorChain(err)}`);
|
|
455
|
+
throw err;
|
|
456
|
+
}
|
|
457
|
+
},
|
|
458
|
+
async cleanup() {
|
|
459
|
+
log.debug("Cleaning up Claude provider...");
|
|
460
|
+
for (const session of sessions.values()) {
|
|
461
|
+
try {
|
|
462
|
+
session.close();
|
|
463
|
+
} catch {
|
|
464
|
+
}
|
|
465
|
+
}
|
|
466
|
+
sessions.clear();
|
|
467
|
+
}
|
|
468
|
+
};
|
|
469
|
+
}
|
|
470
|
+
var init_claude = __esm({
|
|
471
|
+
"src/providers/claude.ts"() {
|
|
472
|
+
"use strict";
|
|
473
|
+
init_logger();
|
|
474
|
+
}
|
|
475
|
+
});
|
|
476
|
+
|
|
477
|
+
// src/providers/codex.ts
|
|
478
|
+
import { randomUUID as randomUUID2 } from "crypto";
|
|
479
|
+
async function loadAgentLoop() {
|
|
480
|
+
return import("@openai/codex");
|
|
481
|
+
}
|
|
482
|
+
async function listModels4(_opts) {
|
|
483
|
+
return [
|
|
484
|
+
"codex-mini-latest",
|
|
485
|
+
"o3-mini",
|
|
486
|
+
"o4-mini"
|
|
487
|
+
];
|
|
488
|
+
}
|
|
489
|
+
async function boot4(opts) {
|
|
490
|
+
const model = opts?.model ?? "o4-mini";
|
|
491
|
+
log.debug(`Booting Codex provider with model ${model}`);
|
|
492
|
+
const { AgentLoop } = await loadAgentLoop();
|
|
493
|
+
const sessions = /* @__PURE__ */ new Map();
|
|
494
|
+
return {
|
|
495
|
+
name: "codex",
|
|
496
|
+
model,
|
|
497
|
+
async createSession() {
|
|
498
|
+
log.debug("Creating Codex session...");
|
|
499
|
+
try {
|
|
500
|
+
const sessionId = randomUUID2();
|
|
501
|
+
const agent = new AgentLoop({
|
|
502
|
+
model,
|
|
503
|
+
config: { model, instructions: "" },
|
|
504
|
+
approvalPolicy: "full-auto",
|
|
505
|
+
additionalWritableRoots: opts?.cwd ? [opts.cwd] : [],
|
|
506
|
+
getCommandConfirmation: async () => ({ approved: true }),
|
|
507
|
+
onItem: () => {
|
|
508
|
+
},
|
|
509
|
+
onLoading: () => {
|
|
510
|
+
},
|
|
511
|
+
onLastResponseId: () => {
|
|
512
|
+
}
|
|
513
|
+
});
|
|
514
|
+
sessions.set(sessionId, agent);
|
|
515
|
+
log.debug(`Session created: ${sessionId}`);
|
|
516
|
+
return sessionId;
|
|
517
|
+
} catch (err) {
|
|
518
|
+
log.debug(`Session creation failed: ${log.formatErrorChain(err)}`);
|
|
519
|
+
throw err;
|
|
520
|
+
}
|
|
521
|
+
},
|
|
522
|
+
async prompt(sessionId, text) {
|
|
523
|
+
const agent = sessions.get(sessionId);
|
|
524
|
+
if (!agent) {
|
|
525
|
+
throw new Error(`Codex session ${sessionId} not found`);
|
|
526
|
+
}
|
|
527
|
+
log.debug(`Sending prompt to session ${sessionId} (${text.length} chars)...`);
|
|
528
|
+
try {
|
|
529
|
+
const items = await agent.run([text]);
|
|
530
|
+
const parts = [];
|
|
531
|
+
for (const item of items) {
|
|
532
|
+
if (item.type === "message" && "content" in item) {
|
|
533
|
+
const content = item.content;
|
|
534
|
+
const itemText = content.filter((block) => block.type === "output_text").map((block) => block.text ?? "").join("");
|
|
535
|
+
if (itemText) parts.push(itemText);
|
|
536
|
+
}
|
|
537
|
+
}
|
|
538
|
+
const result = parts.join("") || null;
|
|
539
|
+
log.debug(`Prompt response received (${result?.length ?? 0} chars)`);
|
|
540
|
+
return result;
|
|
541
|
+
} catch (err) {
|
|
542
|
+
log.debug(`Prompt failed: ${log.formatErrorChain(err)}`);
|
|
543
|
+
throw err;
|
|
544
|
+
}
|
|
545
|
+
},
|
|
546
|
+
async cleanup() {
|
|
547
|
+
log.debug("Cleaning up Codex provider...");
|
|
548
|
+
for (const agent of sessions.values()) {
|
|
549
|
+
try {
|
|
550
|
+
agent.terminate();
|
|
551
|
+
} catch {
|
|
552
|
+
}
|
|
553
|
+
}
|
|
554
|
+
sessions.clear();
|
|
555
|
+
}
|
|
556
|
+
};
|
|
557
|
+
}
|
|
558
|
+
var init_codex = __esm({
|
|
559
|
+
"src/providers/codex.ts"() {
|
|
560
|
+
"use strict";
|
|
561
|
+
init_logger();
|
|
562
|
+
}
|
|
563
|
+
});
|
|
564
|
+
|
|
565
|
+
// src/providers/detect.ts
|
|
566
|
+
import { execFile as execFile6 } from "child_process";
|
|
567
|
+
import { promisify as promisify6 } from "util";
|
|
568
|
+
async function checkProviderInstalled(name) {
|
|
569
|
+
try {
|
|
570
|
+
await exec6(PROVIDER_BINARIES[name], ["--version"]);
|
|
571
|
+
return true;
|
|
572
|
+
} catch {
|
|
573
|
+
return false;
|
|
574
|
+
}
|
|
575
|
+
}
|
|
576
|
+
var exec6, PROVIDER_BINARIES;
|
|
577
|
+
var init_detect = __esm({
|
|
578
|
+
"src/providers/detect.ts"() {
|
|
579
|
+
"use strict";
|
|
580
|
+
exec6 = promisify6(execFile6);
|
|
581
|
+
PROVIDER_BINARIES = {
|
|
582
|
+
opencode: "opencode",
|
|
583
|
+
copilot: "copilot",
|
|
584
|
+
claude: "claude",
|
|
585
|
+
codex: "codex"
|
|
586
|
+
};
|
|
587
|
+
}
|
|
588
|
+
});
|
|
589
|
+
|
|
590
|
+
// src/providers/index.ts
|
|
591
|
+
async function bootProvider(name, opts) {
|
|
592
|
+
const bootFn = PROVIDERS[name];
|
|
593
|
+
if (!bootFn) {
|
|
594
|
+
throw new Error(
|
|
595
|
+
`Unknown provider "${name}". Available: ${PROVIDER_NAMES.join(", ")}`
|
|
596
|
+
);
|
|
597
|
+
}
|
|
598
|
+
return bootFn(opts);
|
|
599
|
+
}
|
|
600
|
+
async function listProviderModels(name, opts) {
|
|
601
|
+
const fn = LIST_MODELS[name];
|
|
602
|
+
if (!fn) {
|
|
603
|
+
throw new Error(
|
|
604
|
+
`Unknown provider "${name}". Available: ${PROVIDER_NAMES.join(", ")}`
|
|
605
|
+
);
|
|
606
|
+
}
|
|
607
|
+
return fn(opts);
|
|
608
|
+
}
|
|
609
|
+
var PROVIDERS, LIST_MODELS, PROVIDER_NAMES;
|
|
610
|
+
var init_providers = __esm({
|
|
611
|
+
"src/providers/index.ts"() {
|
|
612
|
+
"use strict";
|
|
613
|
+
init_opencode();
|
|
614
|
+
init_copilot();
|
|
615
|
+
init_claude();
|
|
616
|
+
init_codex();
|
|
617
|
+
init_detect();
|
|
618
|
+
PROVIDERS = {
|
|
619
|
+
opencode: boot,
|
|
620
|
+
copilot: boot2,
|
|
621
|
+
claude: boot3,
|
|
622
|
+
codex: boot4
|
|
623
|
+
};
|
|
624
|
+
LIST_MODELS = {
|
|
625
|
+
opencode: listModels,
|
|
626
|
+
copilot: listModels2,
|
|
627
|
+
claude: listModels3,
|
|
628
|
+
codex: listModels4
|
|
629
|
+
};
|
|
630
|
+
PROVIDER_NAMES = Object.keys(PROVIDERS);
|
|
631
|
+
}
|
|
632
|
+
});
|
|
633
|
+
|
|
634
|
+
// src/helpers/cleanup.ts
|
|
635
|
+
function registerCleanup(fn) {
|
|
636
|
+
cleanups.push(fn);
|
|
637
|
+
}
|
|
638
|
+
async function runCleanup() {
|
|
639
|
+
const fns = cleanups.splice(0);
|
|
640
|
+
for (const fn of fns) {
|
|
641
|
+
try {
|
|
642
|
+
await fn();
|
|
643
|
+
} catch {
|
|
644
|
+
}
|
|
645
|
+
}
|
|
646
|
+
}
|
|
647
|
+
var cleanups;
|
|
648
|
+
var init_cleanup = __esm({
|
|
649
|
+
"src/helpers/cleanup.ts"() {
|
|
650
|
+
"use strict";
|
|
651
|
+
cleanups = [];
|
|
652
|
+
}
|
|
653
|
+
});
|
|
654
|
+
|
|
655
|
+
// src/orchestrator/fix-tests-pipeline.ts
|
|
656
|
+
var fix_tests_pipeline_exports = {};
|
|
657
|
+
__export(fix_tests_pipeline_exports, {
|
|
658
|
+
buildFixTestsPrompt: () => buildFixTestsPrompt,
|
|
659
|
+
detectTestCommand: () => detectTestCommand,
|
|
660
|
+
runFixTestsPipeline: () => runFixTestsPipeline,
|
|
661
|
+
runTestCommand: () => runTestCommand
|
|
662
|
+
});
|
|
663
|
+
import { readFile as readFile8 } from "fs/promises";
|
|
664
|
+
import { join as join10 } from "path";
|
|
665
|
+
import { execFile as execFileCb } from "child_process";
|
|
666
|
+
async function detectTestCommand(cwd) {
|
|
667
|
+
try {
|
|
668
|
+
const raw = await readFile8(join10(cwd, "package.json"), "utf-8");
|
|
669
|
+
let pkg;
|
|
670
|
+
try {
|
|
671
|
+
pkg = JSON.parse(raw);
|
|
672
|
+
} catch {
|
|
673
|
+
log.debug(
|
|
674
|
+
`Failed to parse package.json: ${raw.slice(0, 200)}`
|
|
675
|
+
);
|
|
676
|
+
return null;
|
|
677
|
+
}
|
|
678
|
+
const testScript = pkg?.scripts?.test;
|
|
679
|
+
if (typeof testScript === "string" && testScript !== 'echo "Error: no test specified" && exit 1') {
|
|
680
|
+
return "npm test";
|
|
681
|
+
}
|
|
682
|
+
return null;
|
|
683
|
+
} catch {
|
|
684
|
+
return null;
|
|
685
|
+
}
|
|
686
|
+
}
|
|
687
|
+
function runTestCommand(command, cwd) {
|
|
688
|
+
return new Promise((resolve2) => {
|
|
689
|
+
const [cmd, ...args] = command.split(" ");
|
|
690
|
+
execFileCb(
|
|
691
|
+
cmd,
|
|
692
|
+
args,
|
|
693
|
+
{ cwd, maxBuffer: 10 * 1024 * 1024 },
|
|
694
|
+
(error, stdout, stderr) => {
|
|
695
|
+
const exitCode = error && "code" in error ? error.code ?? 1 : error ? 1 : 0;
|
|
696
|
+
resolve2({ exitCode, stdout, stderr, command });
|
|
697
|
+
}
|
|
698
|
+
);
|
|
699
|
+
});
|
|
700
|
+
}
|
|
701
|
+
function buildFixTestsPrompt(testResult, cwd) {
|
|
702
|
+
const output = [testResult.stdout, testResult.stderr].filter(Boolean).join("\n");
|
|
703
|
+
return [
|
|
704
|
+
`You are fixing failing tests in a project.`,
|
|
705
|
+
``,
|
|
706
|
+
`**Working directory:** ${cwd}`,
|
|
707
|
+
`**Test command:** ${testResult.command}`,
|
|
708
|
+
`**Exit code:** ${testResult.exitCode}`,
|
|
709
|
+
``,
|
|
710
|
+
`## Test Output`,
|
|
711
|
+
``,
|
|
712
|
+
"```",
|
|
713
|
+
output,
|
|
714
|
+
"```",
|
|
715
|
+
``,
|
|
716
|
+
`## Instructions`,
|
|
717
|
+
``,
|
|
718
|
+
`- Read the failing test files and the source code they test.`,
|
|
719
|
+
`- Understand why the tests are failing.`,
|
|
720
|
+
`- Make minimal fixes \u2014 fix the tests or fix the source code, whichever is appropriate.`,
|
|
721
|
+
`- Do NOT commit changes \u2014 the developer controls commits.`,
|
|
722
|
+
`- Do NOT modify tests to simply skip or ignore failures.`,
|
|
723
|
+
`- When finished, confirm by saying "Tests fixed."`
|
|
724
|
+
].join("\n");
|
|
725
|
+
}
|
|
726
|
+
async function runFixTestsPipeline(opts) {
|
|
727
|
+
const { cwd } = opts;
|
|
728
|
+
const testTimeoutMs = (opts.testTimeout ?? 5) * 6e4;
|
|
729
|
+
const start = Date.now();
|
|
730
|
+
const testCommand = await detectTestCommand(cwd);
|
|
731
|
+
if (!testCommand) {
|
|
732
|
+
log.error(
|
|
733
|
+
'No test command found. Ensure package.json has a "test" script.'
|
|
734
|
+
);
|
|
735
|
+
return { mode: "fix-tests", success: false, error: "No test command found" };
|
|
736
|
+
}
|
|
737
|
+
log.info(`Detected test command: ${testCommand}`);
|
|
738
|
+
if (opts.dryRun) {
|
|
739
|
+
log.info(`Dry run \u2014 would execute: ${testCommand}`);
|
|
740
|
+
log.dim(` Working directory: ${cwd}`);
|
|
741
|
+
return { mode: "fix-tests", success: false };
|
|
742
|
+
}
|
|
743
|
+
try {
|
|
744
|
+
log.info("Running test suite...");
|
|
745
|
+
const testResult = await runTestCommand(testCommand, cwd);
|
|
746
|
+
if (testResult.exitCode === 0) {
|
|
747
|
+
log.success("All tests pass \u2014 nothing to fix.");
|
|
748
|
+
return { mode: "fix-tests", success: true };
|
|
749
|
+
}
|
|
750
|
+
log.warn(
|
|
751
|
+
`Tests failed (exit code ${testResult.exitCode}). Dispatching AI to fix...`
|
|
752
|
+
);
|
|
753
|
+
const provider = opts.provider ?? "opencode";
|
|
754
|
+
const instance = await bootProvider(provider, { url: opts.serverUrl, cwd });
|
|
755
|
+
registerCleanup(() => instance.cleanup());
|
|
756
|
+
const prompt = buildFixTestsPrompt(testResult, cwd);
|
|
757
|
+
log.debug(`Prompt built (${prompt.length} chars)`);
|
|
758
|
+
const sessionId = await instance.createSession();
|
|
759
|
+
const response = await instance.prompt(sessionId, prompt);
|
|
760
|
+
if (response === null) {
|
|
761
|
+
log.error("No response from AI agent.");
|
|
762
|
+
await instance.cleanup();
|
|
763
|
+
return { mode: "fix-tests", success: false, error: "No response from agent" };
|
|
764
|
+
}
|
|
765
|
+
log.success("AI agent completed fixes.");
|
|
766
|
+
log.info("Re-running tests to verify fixes...");
|
|
767
|
+
const verifyResult = await runTestCommand(testCommand, cwd);
|
|
768
|
+
await instance.cleanup();
|
|
769
|
+
if (verifyResult.exitCode === 0) {
|
|
770
|
+
log.success("All tests pass after fixes!");
|
|
771
|
+
return { mode: "fix-tests", success: true };
|
|
772
|
+
}
|
|
773
|
+
log.warn(
|
|
774
|
+
`Tests still failing after fix attempt (exit code ${verifyResult.exitCode}).`
|
|
775
|
+
);
|
|
776
|
+
return { mode: "fix-tests", success: false, error: "Tests still failing after fix attempt" };
|
|
777
|
+
} catch (err) {
|
|
778
|
+
const message = log.extractMessage(err);
|
|
779
|
+
log.error(`Fix-tests pipeline failed: ${log.formatErrorChain(err)}`);
|
|
780
|
+
log.debug(log.formatErrorChain(err));
|
|
781
|
+
return { mode: "fix-tests", success: false, error: message };
|
|
782
|
+
}
|
|
783
|
+
}
|
|
784
|
+
var init_fix_tests_pipeline = __esm({
|
|
785
|
+
"src/orchestrator/fix-tests-pipeline.ts"() {
|
|
786
|
+
"use strict";
|
|
787
|
+
init_providers();
|
|
788
|
+
init_cleanup();
|
|
789
|
+
init_logger();
|
|
790
|
+
}
|
|
791
|
+
});
|
|
792
|
+
|
|
793
|
+
// src/cli.ts
|
|
794
|
+
import { resolve, join as join11 } from "path";
|
|
795
|
+
|
|
796
|
+
// src/spec-generator.ts
|
|
797
|
+
import { cpus, freemem } from "os";
|
|
798
|
+
|
|
799
|
+
// src/datasources/index.ts
|
|
800
|
+
import { execFile as execFile4 } from "child_process";
|
|
801
|
+
import { promisify as promisify4 } from "util";
|
|
802
|
+
|
|
803
|
+
// src/datasources/github.ts
|
|
804
|
+
import { execFile } from "child_process";
|
|
805
|
+
import { promisify } from "util";
|
|
806
|
+
|
|
807
|
+
// src/helpers/slugify.ts
|
|
808
|
+
var MAX_SLUG_LENGTH = 60;
|
|
809
|
+
function slugify(input2, maxLength) {
|
|
810
|
+
const slug = input2.toLowerCase().replace(/[^a-z0-9]+/g, "-").replace(/^-|-$/g, "");
|
|
811
|
+
return maxLength != null ? slug.slice(0, maxLength) : slug;
|
|
812
|
+
}
|
|
813
|
+
|
|
814
|
+
// src/datasources/github.ts
|
|
815
|
+
init_logger();
|
|
816
|
+
var exec = promisify(execFile);
|
|
817
|
+
async function git(args, cwd) {
|
|
818
|
+
const { stdout } = await exec("git", args, { cwd });
|
|
819
|
+
return stdout;
|
|
820
|
+
}
|
|
821
|
+
async function gh(args, cwd) {
|
|
822
|
+
const { stdout } = await exec("gh", args, { cwd });
|
|
823
|
+
return stdout;
|
|
824
|
+
}
|
|
825
|
+
function buildBranchName(issueNumber, title, username = "unknown") {
|
|
826
|
+
const slug = slugify(title, 50);
|
|
827
|
+
return `${username}/dispatch/${issueNumber}-${slug}`;
|
|
828
|
+
}
|
|
829
|
+
async function getDefaultBranch(cwd) {
|
|
830
|
+
try {
|
|
831
|
+
const ref = await git(["symbolic-ref", "refs/remotes/origin/HEAD"], cwd);
|
|
832
|
+
const parts = ref.trim().split("/");
|
|
833
|
+
return parts[parts.length - 1];
|
|
834
|
+
} catch {
|
|
835
|
+
try {
|
|
836
|
+
await git(["rev-parse", "--verify", "main"], cwd);
|
|
837
|
+
return "main";
|
|
838
|
+
} catch {
|
|
839
|
+
return "master";
|
|
840
|
+
}
|
|
841
|
+
}
|
|
842
|
+
}
|
|
843
|
+
var datasource = {
|
|
844
|
+
name: "github",
|
|
845
|
+
async list(opts = {}) {
|
|
846
|
+
const cwd = opts.cwd || process.cwd();
|
|
847
|
+
const { stdout } = await exec(
|
|
848
|
+
"gh",
|
|
849
|
+
[
|
|
850
|
+
"issue",
|
|
851
|
+
"list",
|
|
852
|
+
"--state",
|
|
853
|
+
"open",
|
|
854
|
+
"--json",
|
|
855
|
+
"number,title,body,labels,state,url"
|
|
856
|
+
],
|
|
857
|
+
{ cwd }
|
|
858
|
+
);
|
|
859
|
+
let issues;
|
|
860
|
+
try {
|
|
861
|
+
issues = JSON.parse(stdout);
|
|
862
|
+
} catch {
|
|
863
|
+
throw new Error(`Failed to parse GitHub CLI output: ${stdout.slice(0, 200)}`);
|
|
864
|
+
}
|
|
865
|
+
return issues.map(
|
|
866
|
+
(issue) => ({
|
|
867
|
+
number: String(issue.number),
|
|
868
|
+
title: issue.title ?? "",
|
|
869
|
+
body: issue.body ?? "",
|
|
870
|
+
labels: (issue.labels ?? []).map((l) => l.name),
|
|
871
|
+
state: issue.state ?? "OPEN",
|
|
872
|
+
url: issue.url ?? "",
|
|
873
|
+
comments: [],
|
|
874
|
+
acceptanceCriteria: ""
|
|
875
|
+
})
|
|
876
|
+
);
|
|
877
|
+
},
|
|
878
|
+
async fetch(issueId, opts = {}) {
|
|
879
|
+
const cwd = opts.cwd || process.cwd();
|
|
880
|
+
const { stdout } = await exec(
|
|
881
|
+
"gh",
|
|
882
|
+
[
|
|
883
|
+
"issue",
|
|
884
|
+
"view",
|
|
885
|
+
issueId,
|
|
886
|
+
"--json",
|
|
887
|
+
"number,title,body,labels,state,url,comments"
|
|
888
|
+
],
|
|
889
|
+
{ cwd }
|
|
890
|
+
);
|
|
891
|
+
let issue;
|
|
892
|
+
try {
|
|
893
|
+
issue = JSON.parse(stdout);
|
|
894
|
+
} catch {
|
|
895
|
+
throw new Error(`Failed to parse GitHub CLI output: ${stdout.slice(0, 200)}`);
|
|
896
|
+
}
|
|
897
|
+
const comments = [];
|
|
898
|
+
if (issue.comments && Array.isArray(issue.comments)) {
|
|
899
|
+
for (const c of issue.comments) {
|
|
900
|
+
const author = c.author?.login ?? "unknown";
|
|
901
|
+
comments.push(`**${author}:** ${c.body}`);
|
|
902
|
+
}
|
|
903
|
+
}
|
|
904
|
+
return {
|
|
905
|
+
number: String(issue.number),
|
|
906
|
+
title: issue.title ?? "",
|
|
907
|
+
body: issue.body ?? "",
|
|
908
|
+
labels: (issue.labels ?? []).map((l) => l.name),
|
|
909
|
+
state: issue.state ?? "OPEN",
|
|
910
|
+
url: issue.url ?? "",
|
|
911
|
+
comments,
|
|
912
|
+
acceptanceCriteria: ""
|
|
913
|
+
};
|
|
914
|
+
},
|
|
915
|
+
async update(issueId, title, body, opts = {}) {
|
|
916
|
+
const cwd = opts.cwd || process.cwd();
|
|
917
|
+
await exec("gh", ["issue", "edit", issueId, "--title", title, "--body", body], { cwd });
|
|
918
|
+
},
|
|
919
|
+
async close(issueId, opts = {}) {
|
|
920
|
+
const cwd = opts.cwd || process.cwd();
|
|
921
|
+
await exec("gh", ["issue", "close", issueId], { cwd });
|
|
922
|
+
},
|
|
923
|
+
async create(title, body, opts = {}) {
|
|
924
|
+
const cwd = opts.cwd || process.cwd();
|
|
925
|
+
const { stdout } = await exec(
|
|
926
|
+
"gh",
|
|
927
|
+
["issue", "create", "--title", title, "--body", body],
|
|
928
|
+
{ cwd }
|
|
929
|
+
);
|
|
930
|
+
const url = stdout.trim();
|
|
931
|
+
const match = url.match(/\/issues\/(\d+)$/);
|
|
932
|
+
const number = match ? match[1] : "0";
|
|
933
|
+
return {
|
|
934
|
+
number,
|
|
935
|
+
title,
|
|
936
|
+
body,
|
|
937
|
+
labels: [],
|
|
938
|
+
state: "open",
|
|
939
|
+
url,
|
|
940
|
+
comments: [],
|
|
941
|
+
acceptanceCriteria: ""
|
|
942
|
+
};
|
|
943
|
+
},
|
|
944
|
+
async getUsername(opts) {
|
|
945
|
+
try {
|
|
946
|
+
const name = await git(["config", "user.name"], opts.cwd);
|
|
947
|
+
const slug = slugify(name.trim());
|
|
948
|
+
return slug || "unknown";
|
|
949
|
+
} catch {
|
|
950
|
+
return "unknown";
|
|
951
|
+
}
|
|
952
|
+
},
|
|
953
|
+
getDefaultBranch(opts) {
|
|
954
|
+
return getDefaultBranch(opts.cwd);
|
|
955
|
+
},
|
|
956
|
+
buildBranchName(issueNumber, title, username) {
|
|
957
|
+
return buildBranchName(issueNumber, title, username ?? "unknown");
|
|
958
|
+
},
|
|
959
|
+
async createAndSwitchBranch(branchName, opts) {
|
|
960
|
+
const cwd = opts.cwd;
|
|
961
|
+
try {
|
|
962
|
+
await git(["checkout", "-b", branchName], cwd);
|
|
963
|
+
} catch (err) {
|
|
964
|
+
const message = log.extractMessage(err);
|
|
965
|
+
if (message.includes("already exists")) {
|
|
966
|
+
await git(["checkout", branchName], cwd);
|
|
967
|
+
} else {
|
|
968
|
+
throw err;
|
|
969
|
+
}
|
|
970
|
+
}
|
|
971
|
+
},
|
|
972
|
+
async switchBranch(branchName, opts) {
|
|
973
|
+
await git(["checkout", branchName], opts.cwd);
|
|
974
|
+
},
|
|
975
|
+
async pushBranch(branchName, opts) {
|
|
976
|
+
await git(["push", "--set-upstream", "origin", branchName], opts.cwd);
|
|
977
|
+
},
|
|
978
|
+
async commitAllChanges(message, opts) {
|
|
979
|
+
const cwd = opts.cwd;
|
|
980
|
+
await git(["add", "-A"], cwd);
|
|
981
|
+
const status = await git(["diff", "--cached", "--stat"], cwd);
|
|
982
|
+
if (!status.trim()) {
|
|
983
|
+
return;
|
|
984
|
+
}
|
|
985
|
+
await git(["commit", "-m", message], cwd);
|
|
986
|
+
},
|
|
987
|
+
async createPullRequest(branchName, issueNumber, title, body, opts) {
|
|
988
|
+
const cwd = opts.cwd;
|
|
989
|
+
const prBody = body || `Closes #${issueNumber}`;
|
|
990
|
+
try {
|
|
991
|
+
const url = await gh(
|
|
992
|
+
[
|
|
993
|
+
"pr",
|
|
994
|
+
"create",
|
|
995
|
+
"--title",
|
|
996
|
+
title,
|
|
997
|
+
"--body",
|
|
998
|
+
prBody,
|
|
999
|
+
"--head",
|
|
1000
|
+
branchName
|
|
1001
|
+
],
|
|
1002
|
+
cwd
|
|
1003
|
+
);
|
|
1004
|
+
return url.trim();
|
|
1005
|
+
} catch (err) {
|
|
1006
|
+
const message = log.extractMessage(err);
|
|
1007
|
+
if (message.includes("already exists")) {
|
|
1008
|
+
const existing = await gh(
|
|
1009
|
+
["pr", "view", branchName, "--json", "url", "--jq", ".url"],
|
|
1010
|
+
cwd
|
|
1011
|
+
);
|
|
1012
|
+
return existing.trim();
|
|
1013
|
+
}
|
|
1014
|
+
throw err;
|
|
1015
|
+
}
|
|
1016
|
+
}
|
|
1017
|
+
};
|
|
1018
|
+
|
|
1019
|
+
// src/datasources/azdevops.ts
|
|
1020
|
+
import { execFile as execFile2 } from "child_process";
|
|
1021
|
+
import { promisify as promisify2 } from "util";
|
|
1022
|
+
init_logger();
|
|
1023
|
+
var exec2 = promisify2(execFile2);
|
|
1024
|
+
async function detectWorkItemType(opts = {}) {
|
|
1025
|
+
try {
|
|
1026
|
+
const args = ["boards", "work-item", "type", "list", "--output", "json"];
|
|
1027
|
+
if (opts.project) args.push("--project", opts.project);
|
|
1028
|
+
if (opts.org) args.push("--org", opts.org);
|
|
1029
|
+
const { stdout } = await exec2("az", args, {
|
|
1030
|
+
cwd: opts.cwd || process.cwd()
|
|
1031
|
+
});
|
|
1032
|
+
const types = JSON.parse(stdout);
|
|
1033
|
+
if (!Array.isArray(types) || types.length === 0) return null;
|
|
1034
|
+
const names = types.map((t) => t.name);
|
|
1035
|
+
const preferred = ["User Story", "Product Backlog Item", "Requirement", "Issue"];
|
|
1036
|
+
for (const p of preferred) {
|
|
1037
|
+
if (names.includes(p)) return p;
|
|
1038
|
+
}
|
|
1039
|
+
return names[0] ?? null;
|
|
1040
|
+
} catch {
|
|
1041
|
+
return null;
|
|
1042
|
+
}
|
|
1043
|
+
}
|
|
1044
|
+
var datasource2 = {
|
|
1045
|
+
name: "azdevops",
|
|
1046
|
+
async list(opts = {}) {
|
|
1047
|
+
const wiql = "SELECT [System.Id] FROM workitems WHERE [System.State] <> 'Closed' AND [System.State] <> 'Removed' ORDER BY [System.CreatedDate] DESC";
|
|
1048
|
+
const args = ["boards", "query", "--wiql", wiql, "--output", "json"];
|
|
1049
|
+
if (opts.org) args.push("--org", opts.org);
|
|
1050
|
+
if (opts.project) args.push("--project", opts.project);
|
|
1051
|
+
const { stdout } = await exec2("az", args, {
|
|
1052
|
+
cwd: opts.cwd || process.cwd()
|
|
1053
|
+
});
|
|
1054
|
+
let data;
|
|
1055
|
+
try {
|
|
1056
|
+
data = JSON.parse(stdout);
|
|
1057
|
+
} catch {
|
|
1058
|
+
throw new Error(`Failed to parse Azure CLI output: ${stdout.slice(0, 200)}`);
|
|
1059
|
+
}
|
|
1060
|
+
const items = [];
|
|
1061
|
+
if (Array.isArray(data)) {
|
|
1062
|
+
for (const row of data) {
|
|
1063
|
+
const id = String(row.id ?? row.ID ?? "");
|
|
1064
|
+
if (id) {
|
|
1065
|
+
const detail = await datasource2.fetch(id, opts);
|
|
1066
|
+
items.push(detail);
|
|
1067
|
+
}
|
|
1068
|
+
}
|
|
1069
|
+
}
|
|
1070
|
+
return items;
|
|
1071
|
+
},
|
|
1072
|
+
async fetch(issueId, opts = {}) {
|
|
1073
|
+
const args = [
|
|
1074
|
+
"boards",
|
|
1075
|
+
"work-item",
|
|
1076
|
+
"show",
|
|
1077
|
+
"--id",
|
|
1078
|
+
issueId,
|
|
1079
|
+
"--output",
|
|
1080
|
+
"json"
|
|
1081
|
+
];
|
|
1082
|
+
if (opts.org) {
|
|
1083
|
+
args.push("--org", opts.org);
|
|
1084
|
+
}
|
|
1085
|
+
if (opts.project) {
|
|
1086
|
+
args.push("--project", opts.project);
|
|
1087
|
+
}
|
|
1088
|
+
const { stdout } = await exec2("az", args, {
|
|
1089
|
+
cwd: opts.cwd || process.cwd()
|
|
1090
|
+
});
|
|
1091
|
+
let item;
|
|
1092
|
+
try {
|
|
1093
|
+
item = JSON.parse(stdout);
|
|
1094
|
+
} catch {
|
|
1095
|
+
throw new Error(`Failed to parse Azure CLI output: ${stdout.slice(0, 200)}`);
|
|
1096
|
+
}
|
|
1097
|
+
const fields = item.fields ?? {};
|
|
1098
|
+
const comments = await fetchComments(issueId, opts);
|
|
1099
|
+
return {
|
|
1100
|
+
number: String(item.id ?? issueId),
|
|
1101
|
+
title: fields["System.Title"] ?? "",
|
|
1102
|
+
body: fields["System.Description"] ?? "",
|
|
1103
|
+
labels: (fields["System.Tags"] ?? "").split(";").map((t) => t.trim()).filter(Boolean),
|
|
1104
|
+
state: fields["System.State"] ?? "",
|
|
1105
|
+
url: item._links?.html?.href ?? item.url ?? "",
|
|
1106
|
+
comments,
|
|
1107
|
+
acceptanceCriteria: fields["Microsoft.VSTS.Common.AcceptanceCriteria"] ?? ""
|
|
1108
|
+
};
|
|
1109
|
+
},
|
|
1110
|
+
async update(issueId, title, body, opts = {}) {
|
|
1111
|
+
const args = [
|
|
1112
|
+
"boards",
|
|
1113
|
+
"work-item",
|
|
1114
|
+
"update",
|
|
1115
|
+
"--id",
|
|
1116
|
+
issueId,
|
|
1117
|
+
"--title",
|
|
1118
|
+
title,
|
|
1119
|
+
"--description",
|
|
1120
|
+
body
|
|
1121
|
+
];
|
|
1122
|
+
if (opts.org) args.push("--org", opts.org);
|
|
1123
|
+
if (opts.project) args.push("--project", opts.project);
|
|
1124
|
+
await exec2("az", args, { cwd: opts.cwd || process.cwd() });
|
|
1125
|
+
},
|
|
1126
|
+
async close(issueId, opts = {}) {
|
|
1127
|
+
const args = [
|
|
1128
|
+
"boards",
|
|
1129
|
+
"work-item",
|
|
1130
|
+
"update",
|
|
1131
|
+
"--id",
|
|
1132
|
+
issueId,
|
|
1133
|
+
"--state",
|
|
1134
|
+
"Closed"
|
|
1135
|
+
];
|
|
1136
|
+
if (opts.org) args.push("--org", opts.org);
|
|
1137
|
+
if (opts.project) args.push("--project", opts.project);
|
|
1138
|
+
await exec2("az", args, { cwd: opts.cwd || process.cwd() });
|
|
1139
|
+
},
|
|
1140
|
+
async create(title, body, opts = {}) {
|
|
1141
|
+
const workItemType = opts.workItemType ?? await detectWorkItemType(opts);
|
|
1142
|
+
if (!workItemType) {
|
|
1143
|
+
throw new Error(
|
|
1144
|
+
"Could not determine work item type. Set workItemType in your config (for example via `dispatch config`)."
|
|
1145
|
+
);
|
|
1146
|
+
}
|
|
1147
|
+
const args = [
|
|
1148
|
+
"boards",
|
|
1149
|
+
"work-item",
|
|
1150
|
+
"create",
|
|
1151
|
+
"--type",
|
|
1152
|
+
workItemType,
|
|
1153
|
+
"--title",
|
|
1154
|
+
title,
|
|
1155
|
+
"--description",
|
|
1156
|
+
body,
|
|
1157
|
+
"--output",
|
|
1158
|
+
"json"
|
|
1159
|
+
];
|
|
1160
|
+
if (opts.org) args.push("--org", opts.org);
|
|
1161
|
+
if (opts.project) args.push("--project", opts.project);
|
|
1162
|
+
const { stdout } = await exec2("az", args, {
|
|
1163
|
+
cwd: opts.cwd || process.cwd()
|
|
1164
|
+
});
|
|
1165
|
+
let item;
|
|
1166
|
+
try {
|
|
1167
|
+
item = JSON.parse(stdout);
|
|
1168
|
+
} catch {
|
|
1169
|
+
throw new Error(`Failed to parse Azure CLI output: ${stdout.slice(0, 200)}`);
|
|
1170
|
+
}
|
|
1171
|
+
const fields = item.fields ?? {};
|
|
1172
|
+
return {
|
|
1173
|
+
number: String(item.id),
|
|
1174
|
+
title: fields["System.Title"] ?? title,
|
|
1175
|
+
body: fields["System.Description"] ?? body,
|
|
1176
|
+
labels: (fields["System.Tags"] ?? "").split(";").map((t) => t.trim()).filter(Boolean),
|
|
1177
|
+
state: fields["System.State"] ?? "New",
|
|
1178
|
+
url: item._links?.html?.href ?? item.url ?? "",
|
|
1179
|
+
comments: [],
|
|
1180
|
+
acceptanceCriteria: fields["Microsoft.VSTS.Common.AcceptanceCriteria"] ?? ""
|
|
1181
|
+
};
|
|
1182
|
+
},
|
|
1183
|
+
async getDefaultBranch(opts) {
|
|
1184
|
+
try {
|
|
1185
|
+
const { stdout } = await exec2("git", ["symbolic-ref", "refs/remotes/origin/HEAD"], { cwd: opts.cwd });
|
|
1186
|
+
const parts = stdout.trim().split("/");
|
|
1187
|
+
return parts[parts.length - 1];
|
|
1188
|
+
} catch {
|
|
1189
|
+
try {
|
|
1190
|
+
await exec2("git", ["rev-parse", "--verify", "main"], { cwd: opts.cwd });
|
|
1191
|
+
return "main";
|
|
1192
|
+
} catch {
|
|
1193
|
+
return "master";
|
|
1194
|
+
}
|
|
1195
|
+
}
|
|
1196
|
+
},
|
|
1197
|
+
async getUsername(opts) {
|
|
1198
|
+
try {
|
|
1199
|
+
const { stdout } = await exec2("git", ["config", "user.name"], { cwd: opts.cwd });
|
|
1200
|
+
const name = stdout.trim();
|
|
1201
|
+
if (!name) return "unknown";
|
|
1202
|
+
return slugify(name);
|
|
1203
|
+
} catch {
|
|
1204
|
+
return "unknown";
|
|
1205
|
+
}
|
|
1206
|
+
},
|
|
1207
|
+
buildBranchName(issueNumber, title, username) {
|
|
1208
|
+
const slug = slugify(title, 50);
|
|
1209
|
+
return `${username}/dispatch/${issueNumber}-${slug}`;
|
|
1210
|
+
},
|
|
1211
|
+
async createAndSwitchBranch(branchName, opts) {
|
|
1212
|
+
try {
|
|
1213
|
+
await exec2("git", ["checkout", "-b", branchName], { cwd: opts.cwd });
|
|
1214
|
+
} catch (err) {
|
|
1215
|
+
const message = log.extractMessage(err);
|
|
1216
|
+
if (message.includes("already exists")) {
|
|
1217
|
+
await exec2("git", ["checkout", branchName], { cwd: opts.cwd });
|
|
1218
|
+
} else {
|
|
1219
|
+
throw err;
|
|
1220
|
+
}
|
|
1221
|
+
}
|
|
1222
|
+
},
|
|
1223
|
+
async switchBranch(branchName, opts) {
|
|
1224
|
+
await exec2("git", ["checkout", branchName], { cwd: opts.cwd });
|
|
1225
|
+
},
|
|
1226
|
+
async pushBranch(branchName, opts) {
|
|
1227
|
+
await exec2("git", ["push", "--set-upstream", "origin", branchName], { cwd: opts.cwd });
|
|
1228
|
+
},
|
|
1229
|
+
async commitAllChanges(message, opts) {
|
|
1230
|
+
await exec2("git", ["add", "-A"], { cwd: opts.cwd });
|
|
1231
|
+
const { stdout } = await exec2("git", ["diff", "--cached", "--stat"], { cwd: opts.cwd });
|
|
1232
|
+
if (!stdout.trim()) {
|
|
1233
|
+
return;
|
|
1234
|
+
}
|
|
1235
|
+
await exec2("git", ["commit", "-m", message], { cwd: opts.cwd });
|
|
1236
|
+
},
|
|
1237
|
+
async createPullRequest(branchName, issueNumber, title, body, opts) {
|
|
1238
|
+
try {
|
|
1239
|
+
const { stdout } = await exec2(
|
|
1240
|
+
"az",
|
|
1241
|
+
[
|
|
1242
|
+
"repos",
|
|
1243
|
+
"pr",
|
|
1244
|
+
"create",
|
|
1245
|
+
"--title",
|
|
1246
|
+
title,
|
|
1247
|
+
"--description",
|
|
1248
|
+
body || `Resolves AB#${issueNumber}`,
|
|
1249
|
+
"--source-branch",
|
|
1250
|
+
branchName,
|
|
1251
|
+
"--work-items",
|
|
1252
|
+
issueNumber,
|
|
1253
|
+
"--output",
|
|
1254
|
+
"json"
|
|
1255
|
+
],
|
|
1256
|
+
{ cwd: opts.cwd }
|
|
1257
|
+
);
|
|
1258
|
+
let pr;
|
|
1259
|
+
try {
|
|
1260
|
+
pr = JSON.parse(stdout);
|
|
1261
|
+
} catch {
|
|
1262
|
+
throw new Error(`Failed to parse Azure CLI output: ${stdout.slice(0, 200)}`);
|
|
1263
|
+
}
|
|
1264
|
+
return pr.url ?? "";
|
|
1265
|
+
} catch (err) {
|
|
1266
|
+
const message = log.extractMessage(err);
|
|
1267
|
+
if (message.includes("already exists")) {
|
|
1268
|
+
const { stdout } = await exec2(
|
|
1269
|
+
"az",
|
|
1270
|
+
[
|
|
1271
|
+
"repos",
|
|
1272
|
+
"pr",
|
|
1273
|
+
"list",
|
|
1274
|
+
"--source-branch",
|
|
1275
|
+
branchName,
|
|
1276
|
+
"--status",
|
|
1277
|
+
"active",
|
|
1278
|
+
"--output",
|
|
1279
|
+
"json"
|
|
1280
|
+
],
|
|
1281
|
+
{ cwd: opts.cwd }
|
|
1282
|
+
);
|
|
1283
|
+
let prs;
|
|
1284
|
+
try {
|
|
1285
|
+
prs = JSON.parse(stdout);
|
|
1286
|
+
} catch {
|
|
1287
|
+
throw new Error(`Failed to parse Azure CLI output: ${stdout.slice(0, 200)}`);
|
|
1288
|
+
}
|
|
1289
|
+
if (Array.isArray(prs) && prs.length > 0) {
|
|
1290
|
+
return prs[0].url ?? "";
|
|
1291
|
+
}
|
|
1292
|
+
return "";
|
|
1293
|
+
}
|
|
1294
|
+
throw err;
|
|
1295
|
+
}
|
|
1296
|
+
}
|
|
1297
|
+
};
|
|
1298
|
+
async function fetchComments(workItemId, opts) {
|
|
1299
|
+
try {
|
|
1300
|
+
const args = [
|
|
1301
|
+
"boards",
|
|
1302
|
+
"work-item",
|
|
1303
|
+
"relation",
|
|
1304
|
+
"list-comment",
|
|
1305
|
+
"--work-item-id",
|
|
1306
|
+
workItemId,
|
|
1307
|
+
"--output",
|
|
1308
|
+
"json"
|
|
1309
|
+
];
|
|
1310
|
+
if (opts.org) {
|
|
1311
|
+
args.push("--org", opts.org);
|
|
1312
|
+
}
|
|
1313
|
+
if (opts.project) {
|
|
1314
|
+
args.push("--project", opts.project);
|
|
1315
|
+
}
|
|
1316
|
+
const { stdout } = await exec2("az", args, {
|
|
1317
|
+
cwd: opts.cwd || process.cwd()
|
|
1318
|
+
});
|
|
1319
|
+
const data = JSON.parse(stdout);
|
|
1320
|
+
if (data.comments && Array.isArray(data.comments)) {
|
|
1321
|
+
return data.comments.map(
|
|
1322
|
+
(c) => {
|
|
1323
|
+
const author = c.createdBy?.displayName ?? "unknown";
|
|
1324
|
+
return `**${author}:** ${c.text ?? ""}`;
|
|
1325
|
+
}
|
|
1326
|
+
);
|
|
1327
|
+
}
|
|
1328
|
+
return [];
|
|
1329
|
+
} catch {
|
|
1330
|
+
return [];
|
|
1331
|
+
}
|
|
1332
|
+
}
|
|
1333
|
+
|
|
1334
|
+
// src/datasources/md.ts
|
|
1335
|
+
import { execFile as execFile3 } from "child_process";
|
|
1336
|
+
import { readFile, writeFile, readdir, mkdir, rename } from "fs/promises";
|
|
1337
|
+
import { join, parse as parsePath } from "path";
|
|
1338
|
+
import { promisify as promisify3 } from "util";
|
|
1339
|
+
var exec3 = promisify3(execFile3);
|
|
1340
|
+
var DEFAULT_DIR = ".dispatch/specs";
|
|
1341
|
+
function resolveDir(opts) {
|
|
1342
|
+
const cwd = opts?.cwd ?? process.cwd();
|
|
1343
|
+
return join(cwd, DEFAULT_DIR);
|
|
1344
|
+
}
|
|
1345
|
+
function extractTitle(content, filename) {
|
|
1346
|
+
const match = content.match(/^#\s+(.+)$/m);
|
|
1347
|
+
if (match) return match[1].trim();
|
|
1348
|
+
const lines = content.split("\n");
|
|
1349
|
+
for (const line of lines) {
|
|
1350
|
+
const trimmed = line.trim();
|
|
1351
|
+
if (!trimmed) continue;
|
|
1352
|
+
const cleaned = trimmed.replace(/^[#>*\-]+\s*/, "").trim();
|
|
1353
|
+
if (!cleaned) continue;
|
|
1354
|
+
if (cleaned.length <= 80) return cleaned;
|
|
1355
|
+
const truncated = cleaned.slice(0, 80);
|
|
1356
|
+
const lastSpace = truncated.lastIndexOf(" ");
|
|
1357
|
+
return lastSpace > 0 ? truncated.slice(0, lastSpace) : truncated;
|
|
1358
|
+
}
|
|
1359
|
+
return parsePath(filename).name;
|
|
1360
|
+
}
|
|
1361
|
+
function toIssueDetails(filename, content, dir) {
|
|
1362
|
+
return {
|
|
1363
|
+
number: filename,
|
|
1364
|
+
title: extractTitle(content, filename),
|
|
1365
|
+
body: content,
|
|
1366
|
+
labels: [],
|
|
1367
|
+
state: "open",
|
|
1368
|
+
url: join(dir, filename),
|
|
1369
|
+
comments: [],
|
|
1370
|
+
acceptanceCriteria: ""
|
|
1371
|
+
};
|
|
1372
|
+
}
|
|
1373
|
+
var datasource3 = {
|
|
1374
|
+
name: "md",
|
|
1375
|
+
async list(opts) {
|
|
1376
|
+
const dir = resolveDir(opts);
|
|
1377
|
+
let entries;
|
|
1378
|
+
try {
|
|
1379
|
+
entries = await readdir(dir);
|
|
1380
|
+
} catch {
|
|
1381
|
+
return [];
|
|
1382
|
+
}
|
|
1383
|
+
const mdFiles = entries.filter((f) => f.endsWith(".md")).sort();
|
|
1384
|
+
const results = [];
|
|
1385
|
+
for (const filename of mdFiles) {
|
|
1386
|
+
const filePath = join(dir, filename);
|
|
1387
|
+
const content = await readFile(filePath, "utf-8");
|
|
1388
|
+
results.push(toIssueDetails(filename, content, dir));
|
|
1389
|
+
}
|
|
1390
|
+
return results;
|
|
1391
|
+
},
|
|
1392
|
+
async fetch(issueId, opts) {
|
|
1393
|
+
const dir = resolveDir(opts);
|
|
1394
|
+
const filename = issueId.endsWith(".md") ? issueId : `${issueId}.md`;
|
|
1395
|
+
const filePath = join(dir, filename);
|
|
1396
|
+
const content = await readFile(filePath, "utf-8");
|
|
1397
|
+
return toIssueDetails(filename, content, dir);
|
|
1398
|
+
},
|
|
1399
|
+
async update(issueId, _title, body, opts) {
|
|
1400
|
+
const dir = resolveDir(opts);
|
|
1401
|
+
const filename = issueId.endsWith(".md") ? issueId : `${issueId}.md`;
|
|
1402
|
+
const filePath = join(dir, filename);
|
|
1403
|
+
await writeFile(filePath, body, "utf-8");
|
|
1404
|
+
},
|
|
1405
|
+
async close(issueId, opts) {
|
|
1406
|
+
const dir = resolveDir(opts);
|
|
1407
|
+
const filename = issueId.endsWith(".md") ? issueId : `${issueId}.md`;
|
|
1408
|
+
const filePath = join(dir, filename);
|
|
1409
|
+
const archiveDir = join(dir, "archive");
|
|
1410
|
+
await mkdir(archiveDir, { recursive: true });
|
|
1411
|
+
await rename(filePath, join(archiveDir, filename));
|
|
1412
|
+
},
|
|
1413
|
+
async create(title, body, opts) {
|
|
1414
|
+
const dir = resolveDir(opts);
|
|
1415
|
+
await mkdir(dir, { recursive: true });
|
|
1416
|
+
const filename = `${slugify(title)}.md`;
|
|
1417
|
+
const filePath = join(dir, filename);
|
|
1418
|
+
await writeFile(filePath, body, "utf-8");
|
|
1419
|
+
return toIssueDetails(filename, body, dir);
|
|
1420
|
+
},
|
|
1421
|
+
async getDefaultBranch(_opts) {
|
|
1422
|
+
return "main";
|
|
1423
|
+
},
|
|
1424
|
+
async getUsername(opts) {
|
|
1425
|
+
try {
|
|
1426
|
+
const { stdout } = await exec3("git", ["config", "user.name"], { cwd: opts.cwd });
|
|
1427
|
+
const name = stdout.trim();
|
|
1428
|
+
if (!name) return "local";
|
|
1429
|
+
return slugify(name);
|
|
1430
|
+
} catch {
|
|
1431
|
+
return "local";
|
|
1432
|
+
}
|
|
1433
|
+
},
|
|
1434
|
+
buildBranchName(issueNumber, title, username) {
|
|
1435
|
+
const slug = slugify(title, 50);
|
|
1436
|
+
return `${username}/dispatch/${issueNumber}-${slug}`;
|
|
1437
|
+
},
|
|
1438
|
+
async createAndSwitchBranch(_branchName, _opts) {
|
|
1439
|
+
},
|
|
1440
|
+
async switchBranch(_branchName, _opts) {
|
|
1441
|
+
},
|
|
1442
|
+
async pushBranch(_branchName, _opts) {
|
|
1443
|
+
},
|
|
1444
|
+
async commitAllChanges(_message, _opts) {
|
|
1445
|
+
},
|
|
1446
|
+
async createPullRequest(_branchName, _issueNumber, _title, _body, _opts) {
|
|
1447
|
+
return "";
|
|
1448
|
+
}
|
|
1449
|
+
};
|
|
1450
|
+
|
|
1451
|
+
// src/datasources/index.ts
|
|
1452
|
+
var exec4 = promisify4(execFile4);
|
|
1453
|
+
var DATASOURCES = {
|
|
1454
|
+
github: datasource,
|
|
1455
|
+
azdevops: datasource2,
|
|
1456
|
+
md: datasource3
|
|
1457
|
+
};
|
|
1458
|
+
var DATASOURCE_NAMES = Object.keys(DATASOURCES);
|
|
1459
|
+
function getDatasource(name) {
|
|
1460
|
+
const datasource4 = DATASOURCES[name];
|
|
1461
|
+
if (!datasource4) {
|
|
1462
|
+
throw new Error(
|
|
1463
|
+
`Unknown datasource "${name}". Available: ${DATASOURCE_NAMES.join(", ")}`
|
|
1464
|
+
);
|
|
1465
|
+
}
|
|
1466
|
+
return datasource4;
|
|
1467
|
+
}
|
|
1468
|
+
async function getGitRemoteUrl(cwd) {
|
|
1469
|
+
try {
|
|
1470
|
+
const { stdout } = await exec4("git", ["remote", "get-url", "origin"], {
|
|
1471
|
+
cwd
|
|
1472
|
+
});
|
|
1473
|
+
return stdout.trim() || null;
|
|
1474
|
+
} catch {
|
|
1475
|
+
return null;
|
|
1476
|
+
}
|
|
1477
|
+
}
|
|
1478
|
+
var SOURCE_PATTERNS = [
|
|
1479
|
+
{ pattern: /github\.com/i, source: "github" },
|
|
1480
|
+
{ pattern: /dev\.azure\.com/i, source: "azdevops" },
|
|
1481
|
+
{ pattern: /visualstudio\.com/i, source: "azdevops" }
|
|
1482
|
+
];
|
|
1483
|
+
async function detectDatasource(cwd) {
|
|
1484
|
+
const url = await getGitRemoteUrl(cwd);
|
|
1485
|
+
if (!url) return null;
|
|
1486
|
+
for (const { pattern, source } of SOURCE_PATTERNS) {
|
|
1487
|
+
if (pattern.test(url)) {
|
|
1488
|
+
return source;
|
|
1489
|
+
}
|
|
1490
|
+
}
|
|
1491
|
+
return null;
|
|
1492
|
+
}
|
|
1493
|
+
|
|
1494
|
+
// src/spec-generator.ts
|
|
1495
|
+
init_logger();
|
|
1496
|
+
var MB_PER_CONCURRENT_TASK = 500;
|
|
1497
|
+
var RECOGNIZED_H2 = /* @__PURE__ */ new Set([
|
|
1498
|
+
"## Context",
|
|
1499
|
+
"## Why",
|
|
1500
|
+
"## Approach",
|
|
1501
|
+
"## Integration Points",
|
|
1502
|
+
"## Tasks",
|
|
1503
|
+
"## References",
|
|
1504
|
+
"## Key Guidelines"
|
|
1505
|
+
]);
|
|
1506
|
+
function defaultConcurrency() {
|
|
1507
|
+
return Math.max(1, Math.min(cpus().length, Math.floor(freemem() / 1024 / 1024 / MB_PER_CONCURRENT_TASK)));
|
|
1508
|
+
}
|
|
1509
|
+
function isIssueNumbers(input2) {
|
|
1510
|
+
if (Array.isArray(input2)) return false;
|
|
1511
|
+
return /^\d+(,\s*\d+)*$/.test(input2);
|
|
1512
|
+
}
|
|
1513
|
+
function isGlobOrFilePath(input2) {
|
|
1514
|
+
if (Array.isArray(input2)) return true;
|
|
1515
|
+
if (/[*?\[{]/.test(input2)) return true;
|
|
1516
|
+
if (/[/\\]/.test(input2)) return true;
|
|
1517
|
+
if (/^\.\.?\//.test(input2)) return true;
|
|
1518
|
+
if (/\.(md|txt|yaml|yml|json|ts|js|tsx|jsx)$/i.test(input2)) return true;
|
|
1519
|
+
return false;
|
|
1520
|
+
}
|
|
1521
|
+
function extractSpecContent(raw) {
|
|
1522
|
+
let content = raw;
|
|
1523
|
+
const fenceMatch = content.match(/^\s*```(?:markdown)?\s*\n([\s\S]*?)\n\s*```\s*$/);
|
|
1524
|
+
if (fenceMatch) {
|
|
1525
|
+
content = fenceMatch[1];
|
|
1526
|
+
} else {
|
|
1527
|
+
const innerFenceMatch = content.match(/```(?:markdown)?\s*\n([\s\S]*?)\n\s*```/);
|
|
1528
|
+
if (innerFenceMatch && /^# /m.test(innerFenceMatch[1])) {
|
|
1529
|
+
content = innerFenceMatch[1];
|
|
1530
|
+
}
|
|
1531
|
+
}
|
|
1532
|
+
const h1Index = content.search(/^# /m);
|
|
1533
|
+
if (h1Index === -1) {
|
|
1534
|
+
return raw;
|
|
1535
|
+
}
|
|
1536
|
+
content = content.slice(h1Index);
|
|
1537
|
+
const lines = content.split("\n");
|
|
1538
|
+
let lastRecognizedSectionEnd = lines.length;
|
|
1539
|
+
let foundLastRecognized = false;
|
|
1540
|
+
for (let i = lines.length - 1; i >= 0; i--) {
|
|
1541
|
+
const trimmed = lines[i].trimEnd();
|
|
1542
|
+
if (trimmed.startsWith("## ")) {
|
|
1543
|
+
if (RECOGNIZED_H2.has(trimmed)) {
|
|
1544
|
+
foundLastRecognized = true;
|
|
1545
|
+
break;
|
|
1546
|
+
} else {
|
|
1547
|
+
lastRecognizedSectionEnd = i;
|
|
1548
|
+
}
|
|
1549
|
+
}
|
|
1550
|
+
}
|
|
1551
|
+
if (foundLastRecognized || lastRecognizedSectionEnd < lines.length) {
|
|
1552
|
+
let end = lastRecognizedSectionEnd;
|
|
1553
|
+
while (end > 0 && lines[end - 1].trim() === "") {
|
|
1554
|
+
end--;
|
|
1555
|
+
}
|
|
1556
|
+
content = lines.slice(0, end).join("\n");
|
|
1557
|
+
}
|
|
1558
|
+
if (!content.endsWith("\n")) {
|
|
1559
|
+
content += "\n";
|
|
1560
|
+
}
|
|
1561
|
+
return content;
|
|
1562
|
+
}
|
|
1563
|
+
function validateSpecStructure(content) {
|
|
1564
|
+
const trimmed = content.trimStart();
|
|
1565
|
+
if (!trimmed.startsWith("# ")) {
|
|
1566
|
+
const reason = 'Spec does not start with an H1 heading (expected "# ")';
|
|
1567
|
+
log.warn(reason);
|
|
1568
|
+
return { valid: false, reason };
|
|
1569
|
+
}
|
|
1570
|
+
const tasksIndex = content.search(/^## Tasks\s*$/m);
|
|
1571
|
+
if (tasksIndex === -1) {
|
|
1572
|
+
const reason = 'Spec is missing a "## Tasks" section';
|
|
1573
|
+
log.warn(reason);
|
|
1574
|
+
return { valid: false, reason };
|
|
1575
|
+
}
|
|
1576
|
+
const afterTasks = content.slice(tasksIndex);
|
|
1577
|
+
if (!/- \[ \]/.test(afterTasks)) {
|
|
1578
|
+
const reason = '"## Tasks" section contains no unchecked tasks (expected at least one "- [ ]")';
|
|
1579
|
+
log.warn(reason);
|
|
1580
|
+
return { valid: false, reason };
|
|
1581
|
+
}
|
|
1582
|
+
return { valid: true };
|
|
1583
|
+
}
|
|
1584
|
+
async function resolveSource(issues, issueSource, cwd) {
|
|
1585
|
+
if (issueSource) {
|
|
1586
|
+
return issueSource;
|
|
1587
|
+
}
|
|
1588
|
+
log.info("Detecting datasource from git remote...");
|
|
1589
|
+
const detected = await detectDatasource(cwd);
|
|
1590
|
+
if (detected) {
|
|
1591
|
+
log.info(`Detected datasource: ${detected}`);
|
|
1592
|
+
return detected;
|
|
1593
|
+
}
|
|
1594
|
+
if (!isIssueNumbers(issues)) {
|
|
1595
|
+
return "md";
|
|
1596
|
+
}
|
|
1597
|
+
log.error(
|
|
1598
|
+
`Could not detect datasource from the repository remote URL.
|
|
1599
|
+
Supported sources: ${DATASOURCE_NAMES.join(", ")}
|
|
1600
|
+
Use --source <name> to specify explicitly, or ensure the git remote
|
|
1601
|
+
points to a supported platform (github.com, dev.azure.com).`
|
|
1602
|
+
);
|
|
1603
|
+
return null;
|
|
1604
|
+
}
|
|
1605
|
+
|
|
1606
|
+
// src/orchestrator/runner.ts
|
|
1607
|
+
init_logger();
|
|
1608
|
+
|
|
1609
|
+
// src/helpers/confirm-large-batch.ts
|
|
1610
|
+
init_logger();
|
|
1611
|
+
import { input } from "@inquirer/prompts";
|
|
1612
|
+
import chalk2 from "chalk";
|
|
1613
|
+
var LARGE_BATCH_THRESHOLD = 100;
|
|
1614
|
+
async function confirmLargeBatch(count, threshold = LARGE_BATCH_THRESHOLD) {
|
|
1615
|
+
if (count <= threshold) return true;
|
|
1616
|
+
log.warn(
|
|
1617
|
+
`This operation will process ${chalk2.bold(String(count))} specs, which exceeds the safety threshold of ${threshold}.`
|
|
1618
|
+
);
|
|
1619
|
+
const answer = await input({
|
|
1620
|
+
message: `Type ${chalk2.bold('"yes"')} to proceed:`
|
|
1621
|
+
});
|
|
1622
|
+
return answer.trim().toLowerCase() === "yes";
|
|
1623
|
+
}
|
|
1624
|
+
|
|
1625
|
+
// src/helpers/prereqs.ts
|
|
1626
|
+
import { execFile as execFile5 } from "child_process";
|
|
1627
|
+
import { promisify as promisify5 } from "util";
|
|
1628
|
+
var exec5 = promisify5(execFile5);
|
|
1629
|
+
var MIN_NODE_VERSION = "20.12.0";
|
|
1630
|
+
function parseSemver(version) {
|
|
1631
|
+
const [major, minor, patch] = version.split(".").map(Number);
|
|
1632
|
+
return [major ?? 0, minor ?? 0, patch ?? 0];
|
|
1633
|
+
}
|
|
1634
|
+
function semverGte(current, minimum) {
|
|
1635
|
+
const [cMaj, cMin, cPat] = parseSemver(current);
|
|
1636
|
+
const [mMaj, mMin, mPat] = parseSemver(minimum);
|
|
1637
|
+
if (cMaj !== mMaj) return cMaj > mMaj;
|
|
1638
|
+
if (cMin !== mMin) return cMin > mMin;
|
|
1639
|
+
return cPat >= mPat;
|
|
1640
|
+
}
|
|
1641
|
+
async function checkPrereqs(context) {
|
|
1642
|
+
const failures = [];
|
|
1643
|
+
try {
|
|
1644
|
+
await exec5("git", ["--version"]);
|
|
1645
|
+
} catch {
|
|
1646
|
+
failures.push("git is required but was not found on PATH. Install it from https://git-scm.com");
|
|
1647
|
+
}
|
|
1648
|
+
const nodeVersion = process.versions.node;
|
|
1649
|
+
if (!semverGte(nodeVersion, MIN_NODE_VERSION)) {
|
|
1650
|
+
failures.push(
|
|
1651
|
+
`Node.js >= ${MIN_NODE_VERSION} is required but found ${nodeVersion}. Please upgrade Node.js`
|
|
1652
|
+
);
|
|
1653
|
+
}
|
|
1654
|
+
if (context?.datasource === "github") {
|
|
1655
|
+
try {
|
|
1656
|
+
await exec5("gh", ["--version"]);
|
|
1657
|
+
} catch {
|
|
1658
|
+
failures.push(
|
|
1659
|
+
"gh (GitHub CLI) is required for the github datasource but was not found on PATH. Install it from https://cli.github.com/"
|
|
1660
|
+
);
|
|
1661
|
+
}
|
|
1662
|
+
}
|
|
1663
|
+
if (context?.datasource === "azdevops") {
|
|
1664
|
+
try {
|
|
1665
|
+
await exec5("az", ["--version"]);
|
|
1666
|
+
} catch {
|
|
1667
|
+
failures.push(
|
|
1668
|
+
"az (Azure CLI) is required for the azdevops datasource but was not found on PATH. Install it from https://learn.microsoft.com/en-us/cli/azure/"
|
|
1669
|
+
);
|
|
1670
|
+
}
|
|
1671
|
+
}
|
|
1672
|
+
return failures;
|
|
1673
|
+
}
|
|
1674
|
+
|
|
1675
|
+
// src/helpers/gitignore.ts
|
|
1676
|
+
init_logger();
|
|
1677
|
+
import { readFile as readFile2, writeFile as writeFile2 } from "fs/promises";
|
|
1678
|
+
import { join as join2 } from "path";
|
|
1679
|
+
async function ensureGitignoreEntry(repoRoot, entry) {
|
|
1680
|
+
const gitignorePath = join2(repoRoot, ".gitignore");
|
|
1681
|
+
let contents = "";
|
|
1682
|
+
try {
|
|
1683
|
+
contents = await readFile2(gitignorePath, "utf8");
|
|
1684
|
+
} catch {
|
|
1685
|
+
}
|
|
1686
|
+
const lines = contents.split("\n").map((l) => l.trim());
|
|
1687
|
+
const bare = entry.replace(/\/$/, "");
|
|
1688
|
+
if (lines.includes(entry) || lines.includes(bare)) {
|
|
1689
|
+
return;
|
|
1690
|
+
}
|
|
1691
|
+
try {
|
|
1692
|
+
const separator = contents.length > 0 && !contents.endsWith("\n") ? "\n" : "";
|
|
1693
|
+
await writeFile2(gitignorePath, `${contents}${separator}${entry}
|
|
1694
|
+
`, "utf8");
|
|
1695
|
+
log.debug(`Added '${entry}' to .gitignore`);
|
|
1696
|
+
} catch (err) {
|
|
1697
|
+
log.warn(`Could not update .gitignore: ${String(err)}`);
|
|
1698
|
+
}
|
|
1699
|
+
}
|
|
1700
|
+
|
|
1701
|
+
// src/orchestrator/cli-config.ts
|
|
1702
|
+
init_logger();
|
|
1703
|
+
import { join as join4 } from "path";
|
|
1704
|
+
import { access } from "fs/promises";
|
|
1705
|
+
import { constants } from "fs";
|
|
1706
|
+
|
|
1707
|
+
// src/config.ts
|
|
1708
|
+
init_providers();
|
|
1709
|
+
import { readFile as readFile3, writeFile as writeFile3, mkdir as mkdir2 } from "fs/promises";
|
|
1710
|
+
import { join as join3, dirname } from "path";
|
|
1711
|
+
|
|
1712
|
+
// src/config-prompts.ts
|
|
1713
|
+
init_logger();
|
|
1714
|
+
import { select, confirm } from "@inquirer/prompts";
|
|
1715
|
+
import chalk3 from "chalk";
|
|
1716
|
+
init_providers();
|
|
1717
|
+
async function runInteractiveConfigWizard(configDir) {
|
|
1718
|
+
console.log();
|
|
1719
|
+
log.info(chalk3.bold("Dispatch Configuration Wizard"));
|
|
1720
|
+
console.log();
|
|
1721
|
+
const existing = await loadConfig(configDir);
|
|
1722
|
+
const hasExisting = Object.keys(existing).length > 0;
|
|
1723
|
+
if (hasExisting) {
|
|
1724
|
+
log.dim("Current configuration:");
|
|
1725
|
+
for (const [key, value] of Object.entries(existing)) {
|
|
1726
|
+
if (value !== void 0) {
|
|
1727
|
+
log.dim(` ${key} = ${value}`);
|
|
1728
|
+
}
|
|
1729
|
+
}
|
|
1730
|
+
console.log();
|
|
1731
|
+
const reconfigure = await confirm({
|
|
1732
|
+
message: "Do you want to reconfigure?",
|
|
1733
|
+
default: true
|
|
1734
|
+
});
|
|
1735
|
+
if (!reconfigure) {
|
|
1736
|
+
log.dim("Configuration unchanged.");
|
|
1737
|
+
return;
|
|
1738
|
+
}
|
|
1739
|
+
console.log();
|
|
1740
|
+
}
|
|
1741
|
+
const installStatuses = await Promise.all(
|
|
1742
|
+
PROVIDER_NAMES.map((name) => checkProviderInstalled(name))
|
|
1743
|
+
);
|
|
1744
|
+
const provider = await select({
|
|
1745
|
+
message: "Select a provider:",
|
|
1746
|
+
choices: PROVIDER_NAMES.map((name, i) => ({
|
|
1747
|
+
name: `${installStatuses[i] ? chalk3.green("\u25CF") : chalk3.red("\u25CF")} ${name}`,
|
|
1748
|
+
value: name
|
|
1749
|
+
})),
|
|
1750
|
+
default: existing.provider
|
|
1751
|
+
});
|
|
1752
|
+
let selectedModel = existing.model;
|
|
1753
|
+
try {
|
|
1754
|
+
log.dim("Fetching available models...");
|
|
1755
|
+
const models = await listProviderModels(provider);
|
|
1756
|
+
if (models.length > 0) {
|
|
1757
|
+
const modelChoice = await select({
|
|
1758
|
+
message: "Select a model:",
|
|
1759
|
+
choices: [
|
|
1760
|
+
{ name: "default (provider decides)", value: "" },
|
|
1761
|
+
...models.map((m) => ({ name: m, value: m }))
|
|
1762
|
+
],
|
|
1763
|
+
default: existing.model ?? ""
|
|
1764
|
+
});
|
|
1765
|
+
selectedModel = modelChoice || void 0;
|
|
1766
|
+
} else {
|
|
1767
|
+
log.dim("No models returned by provider \u2014 skipping model selection.");
|
|
1768
|
+
selectedModel = existing.model;
|
|
1769
|
+
}
|
|
1770
|
+
} catch {
|
|
1771
|
+
log.dim("Could not list models (provider may not be running) \u2014 skipping model selection.");
|
|
1772
|
+
selectedModel = existing.model;
|
|
1773
|
+
}
|
|
1774
|
+
const detectedSource = await detectDatasource(process.cwd());
|
|
1775
|
+
const datasourceDefault = existing.source ?? "auto";
|
|
1776
|
+
if (detectedSource) {
|
|
1777
|
+
log.info(
|
|
1778
|
+
`Detected datasource ${chalk3.cyan(detectedSource)} from git remote`
|
|
1779
|
+
);
|
|
1780
|
+
}
|
|
1781
|
+
const selectedSource = await select({
|
|
1782
|
+
message: "Select a datasource:",
|
|
1783
|
+
choices: [
|
|
1784
|
+
{
|
|
1785
|
+
name: "auto",
|
|
1786
|
+
value: "auto",
|
|
1787
|
+
description: "detect from git remote at runtime"
|
|
1788
|
+
},
|
|
1789
|
+
...DATASOURCE_NAMES.map((name) => ({ name, value: name }))
|
|
1790
|
+
],
|
|
1791
|
+
default: datasourceDefault
|
|
1792
|
+
});
|
|
1793
|
+
const source = selectedSource === "auto" ? void 0 : selectedSource;
|
|
1794
|
+
const newConfig = {
|
|
1795
|
+
provider,
|
|
1796
|
+
source
|
|
1797
|
+
};
|
|
1798
|
+
if (selectedModel !== void 0) {
|
|
1799
|
+
newConfig.model = selectedModel;
|
|
1800
|
+
}
|
|
1801
|
+
console.log();
|
|
1802
|
+
log.info(chalk3.bold("Configuration summary:"));
|
|
1803
|
+
for (const [key, value] of Object.entries(newConfig)) {
|
|
1804
|
+
if (value !== void 0) {
|
|
1805
|
+
console.log(` ${chalk3.cyan(key)} = ${value}`);
|
|
1806
|
+
}
|
|
1807
|
+
}
|
|
1808
|
+
if (selectedSource === "auto") {
|
|
1809
|
+
console.log(
|
|
1810
|
+
` ${chalk3.cyan("source")} = auto (detect from git remote at runtime)`
|
|
1811
|
+
);
|
|
1812
|
+
}
|
|
1813
|
+
console.log();
|
|
1814
|
+
const shouldSave = await confirm({
|
|
1815
|
+
message: "Save this configuration?",
|
|
1816
|
+
default: true
|
|
1817
|
+
});
|
|
1818
|
+
if (shouldSave) {
|
|
1819
|
+
await saveConfig(newConfig, configDir);
|
|
1820
|
+
log.success("Configuration saved.");
|
|
1821
|
+
} else {
|
|
1822
|
+
log.dim("Configuration not saved.");
|
|
1823
|
+
}
|
|
1824
|
+
}
|
|
1825
|
+
|
|
1826
|
+
// src/config.ts
|
|
1827
|
+
var CONFIG_KEYS = ["provider", "model", "source", "testTimeout"];
|
|
1828
|
+
function getConfigPath(configDir) {
|
|
1829
|
+
const dir = configDir ?? join3(process.cwd(), ".dispatch");
|
|
1830
|
+
return join3(dir, "config.json");
|
|
1831
|
+
}
|
|
1832
|
+
async function loadConfig(configDir) {
|
|
1833
|
+
const configPath = getConfigPath(configDir);
|
|
1834
|
+
try {
|
|
1835
|
+
const raw = await readFile3(configPath, "utf-8");
|
|
1836
|
+
return JSON.parse(raw);
|
|
1837
|
+
} catch {
|
|
1838
|
+
return {};
|
|
1839
|
+
}
|
|
1840
|
+
}
|
|
1841
|
+
async function saveConfig(config, configDir) {
|
|
1842
|
+
const configPath = getConfigPath(configDir);
|
|
1843
|
+
await mkdir2(dirname(configPath), { recursive: true });
|
|
1844
|
+
await writeFile3(configPath, JSON.stringify(config, null, 2) + "\n", "utf-8");
|
|
1845
|
+
}
|
|
1846
|
+
async function handleConfigCommand(_argv, configDir) {
|
|
1847
|
+
await runInteractiveConfigWizard(configDir);
|
|
1848
|
+
}
|
|
1849
|
+
|
|
1850
|
+
// src/orchestrator/cli-config.ts
|
|
1851
|
+
var CONFIG_TO_CLI = {
|
|
1852
|
+
provider: "provider",
|
|
1853
|
+
model: "model",
|
|
1854
|
+
source: "issueSource",
|
|
1855
|
+
testTimeout: "testTimeout"
|
|
1856
|
+
};
|
|
1857
|
+
function setCliField(target, key, value) {
|
|
1858
|
+
target[key] = value;
|
|
1859
|
+
}
|
|
1860
|
+
async function resolveCliConfig(args) {
|
|
1861
|
+
const { explicitFlags } = args;
|
|
1862
|
+
const configDir = join4(args.cwd, ".dispatch");
|
|
1863
|
+
const config = await loadConfig(configDir);
|
|
1864
|
+
const merged = { ...args };
|
|
1865
|
+
for (const configKey of CONFIG_KEYS) {
|
|
1866
|
+
const cliField = CONFIG_TO_CLI[configKey];
|
|
1867
|
+
const configValue = config[configKey];
|
|
1868
|
+
if (configValue !== void 0 && !explicitFlags.has(cliField)) {
|
|
1869
|
+
setCliField(merged, cliField, configValue);
|
|
1870
|
+
}
|
|
1871
|
+
}
|
|
1872
|
+
const providerConfigured = explicitFlags.has("provider") || config.provider !== void 0;
|
|
1873
|
+
if (!providerConfigured) {
|
|
1874
|
+
log.error("Missing required configuration: provider");
|
|
1875
|
+
log.dim(" Run 'dispatch config' to configure defaults interactively.");
|
|
1876
|
+
log.dim(" Or pass it as a CLI flag: --provider <name>");
|
|
1877
|
+
process.exit(1);
|
|
1878
|
+
}
|
|
1879
|
+
if (merged.outputDir) {
|
|
1880
|
+
try {
|
|
1881
|
+
await access(merged.outputDir, constants.W_OK);
|
|
1882
|
+
} catch {
|
|
1883
|
+
log.error(
|
|
1884
|
+
`--output-dir path does not exist or is not writable: ${merged.outputDir}`
|
|
1885
|
+
);
|
|
1886
|
+
process.exit(1);
|
|
1887
|
+
}
|
|
1888
|
+
}
|
|
1889
|
+
const sourceConfigured = explicitFlags.has("issueSource") || config.source !== void 0;
|
|
1890
|
+
const needsSource = !merged.fixTests && !merged.spec && !merged.respec;
|
|
1891
|
+
if (needsSource && !sourceConfigured) {
|
|
1892
|
+
const detected = await detectDatasource(merged.cwd);
|
|
1893
|
+
if (detected) {
|
|
1894
|
+
log.info(`Auto-detected datasource from git remote: ${detected}`);
|
|
1895
|
+
merged.issueSource = detected;
|
|
1896
|
+
} else {
|
|
1897
|
+
log.error("Datasource auto-detection failed \u2014 could not determine issue source from git remote.");
|
|
1898
|
+
log.dim(` Available datasources: ${DATASOURCE_NAMES.join(", ")}`);
|
|
1899
|
+
log.dim(" Run 'dispatch config' to configure defaults interactively.");
|
|
1900
|
+
log.dim(" Or pass it as a CLI flag: --issue-source <name>");
|
|
1901
|
+
process.exit(1);
|
|
1902
|
+
}
|
|
1903
|
+
}
|
|
1904
|
+
log.verbose = merged.verbose;
|
|
1905
|
+
return merged;
|
|
1906
|
+
}
|
|
1907
|
+
|
|
1908
|
+
// src/orchestrator/spec-pipeline.ts
|
|
1909
|
+
import { join as join6 } from "path";
|
|
1910
|
+
import { mkdir as mkdir4, readFile as readFile5, rename as rename2, unlink as unlink2 } from "fs/promises";
|
|
1911
|
+
import { glob } from "glob";
|
|
1912
|
+
init_providers();
|
|
1913
|
+
|
|
1914
|
+
// src/agents/spec.ts
|
|
1915
|
+
import { mkdir as mkdir3, readFile as readFile4, writeFile as writeFile4, unlink } from "fs/promises";
|
|
1916
|
+
import { join as join5 } from "path";
|
|
1917
|
+
import { randomUUID as randomUUID3 } from "crypto";
|
|
1918
|
+
init_logger();
|
|
1919
|
+
async function boot5(opts) {
|
|
1920
|
+
const { provider } = opts;
|
|
1921
|
+
if (!provider) {
|
|
1922
|
+
throw new Error("Spec agent requires a provider instance in boot options");
|
|
1923
|
+
}
|
|
1924
|
+
return {
|
|
1925
|
+
name: "spec",
|
|
1926
|
+
async generate(genOpts) {
|
|
1927
|
+
const { issue, filePath, fileContent, inlineText, cwd: workingDir, outputPath } = genOpts;
|
|
1928
|
+
try {
|
|
1929
|
+
const tmpDir = join5(workingDir, ".dispatch", "tmp");
|
|
1930
|
+
await mkdir3(tmpDir, { recursive: true });
|
|
1931
|
+
const tmpFilename = `spec-${randomUUID3()}.md`;
|
|
1932
|
+
const tmpPath = join5(tmpDir, tmpFilename);
|
|
1933
|
+
let prompt;
|
|
1934
|
+
if (issue) {
|
|
1935
|
+
prompt = buildSpecPrompt(issue, workingDir, tmpPath);
|
|
1936
|
+
} else if (inlineText) {
|
|
1937
|
+
prompt = buildInlineTextSpecPrompt(inlineText, workingDir, tmpPath);
|
|
1938
|
+
} else if (filePath && fileContent !== void 0) {
|
|
1939
|
+
prompt = buildFileSpecPrompt(filePath, fileContent, workingDir, tmpPath);
|
|
1940
|
+
} else {
|
|
1941
|
+
return {
|
|
1942
|
+
content: "",
|
|
1943
|
+
success: false,
|
|
1944
|
+
error: "Either issue, inlineText, or filePath+fileContent must be provided",
|
|
1945
|
+
valid: false
|
|
1946
|
+
};
|
|
1947
|
+
}
|
|
1948
|
+
const sessionId = await provider.createSession();
|
|
1949
|
+
log.debug(`Spec prompt built (${prompt.length} chars)`);
|
|
1950
|
+
const response = await provider.prompt(sessionId, prompt);
|
|
1951
|
+
if (response === null) {
|
|
1952
|
+
return {
|
|
1953
|
+
content: "",
|
|
1954
|
+
success: false,
|
|
1955
|
+
error: "AI agent returned no response",
|
|
1956
|
+
valid: false
|
|
1957
|
+
};
|
|
1958
|
+
}
|
|
1959
|
+
log.debug(`Spec agent response (${response.length} chars)`);
|
|
1960
|
+
let rawContent;
|
|
1961
|
+
try {
|
|
1962
|
+
rawContent = await readFile4(tmpPath, "utf-8");
|
|
1963
|
+
} catch {
|
|
1964
|
+
return {
|
|
1965
|
+
content: "",
|
|
1966
|
+
success: false,
|
|
1967
|
+
error: `Spec agent did not write the file to ${tmpPath}. Agent response: ${response.slice(0, 300)}`,
|
|
1968
|
+
valid: false
|
|
1969
|
+
};
|
|
1970
|
+
}
|
|
1971
|
+
const cleanedContent = extractSpecContent(rawContent);
|
|
1972
|
+
log.debug(`Post-processed spec (${rawContent.length} \u2192 ${cleanedContent.length} chars)`);
|
|
1973
|
+
const validation = validateSpecStructure(cleanedContent);
|
|
1974
|
+
if (!validation.valid) {
|
|
1975
|
+
log.warn(`Spec validation warning for ${outputPath}: ${validation.reason}`);
|
|
1976
|
+
}
|
|
1977
|
+
await writeFile4(outputPath, cleanedContent, "utf-8");
|
|
1978
|
+
log.debug(`Wrote cleaned spec to ${outputPath}`);
|
|
1979
|
+
try {
|
|
1980
|
+
await unlink(tmpPath);
|
|
1981
|
+
} catch {
|
|
1982
|
+
}
|
|
1983
|
+
return {
|
|
1984
|
+
content: cleanedContent,
|
|
1985
|
+
success: true,
|
|
1986
|
+
valid: validation.valid,
|
|
1987
|
+
validationReason: validation.reason
|
|
1988
|
+
};
|
|
1989
|
+
} catch (err) {
|
|
1990
|
+
const message = log.extractMessage(err);
|
|
1991
|
+
return {
|
|
1992
|
+
content: "",
|
|
1993
|
+
success: false,
|
|
1994
|
+
error: message,
|
|
1995
|
+
valid: false
|
|
1996
|
+
};
|
|
1997
|
+
}
|
|
1998
|
+
},
|
|
1999
|
+
async cleanup() {
|
|
2000
|
+
}
|
|
2001
|
+
};
|
|
2002
|
+
}
|
|
2003
|
+
function buildSpecPrompt(issue, cwd, outputPath) {
|
|
2004
|
+
const sections = [
|
|
2005
|
+
`You are a **spec agent**. Your job is to explore the codebase, understand the issue below, and write a high-level **markdown spec file** to disk that will drive an automated implementation pipeline.`,
|
|
2006
|
+
``,
|
|
2007
|
+
`**Important:** This file will be consumed by a two-stage pipeline:`,
|
|
2008
|
+
`1. A **planner agent** reads each task together with the prose context in this file, then explores the codebase to produce a detailed, line-level implementation plan.`,
|
|
2009
|
+
`2. A **coder agent** follows that detailed plan to make the actual code changes.`,
|
|
2010
|
+
``,
|
|
2011
|
+
`Because the planner agent handles low-level details, your spec must stay **high-level and strategic**. Focus on the WHAT, WHY, and HOW \u2014 not exact code or line numbers.`,
|
|
2012
|
+
``,
|
|
2013
|
+
`**CRITICAL \u2014 Output constraints (read carefully):**`,
|
|
2014
|
+
`The file you write must contain ONLY the structured spec content described below. You MUST NOT include:`,
|
|
2015
|
+
`- **No preamble:** Do not add any text before the H1 heading (e.g., "Here's the spec:", "I've written the spec file to...")`,
|
|
2016
|
+
`- **No postamble:** Do not add any text after the last spec section (e.g., "Let me know if you'd like changes", "Here's a summary of...")`,
|
|
2017
|
+
`- **No summaries:** Do not append a summary or recap of what you wrote`,
|
|
2018
|
+
`- **No code fences:** Do not wrap the spec content in \`\`\`markdown ... \`\`\` or any other code fence`,
|
|
2019
|
+
`- **No conversational text:** Do not include any explanations, commentary, or dialogue \u2014 the file is consumed by an automated pipeline, not a human`,
|
|
2020
|
+
`The file content must start with \`# \` (the H1 heading) and contain nothing before or after the structured spec sections.`,
|
|
2021
|
+
``,
|
|
2022
|
+
`## Issue Details`,
|
|
2023
|
+
``,
|
|
2024
|
+
`- **Number:** #${issue.number}`,
|
|
2025
|
+
`- **Title:** ${issue.title}`,
|
|
2026
|
+
`- **State:** ${issue.state}`,
|
|
2027
|
+
`- **URL:** ${issue.url}`
|
|
2028
|
+
];
|
|
2029
|
+
if (issue.labels.length > 0) {
|
|
2030
|
+
sections.push(`- **Labels:** ${issue.labels.join(", ")}`);
|
|
2031
|
+
}
|
|
2032
|
+
if (issue.body) {
|
|
2033
|
+
sections.push(``, `### Description`, ``, issue.body);
|
|
2034
|
+
}
|
|
2035
|
+
if (issue.acceptanceCriteria) {
|
|
2036
|
+
sections.push(``, `### Acceptance Criteria`, ``, issue.acceptanceCriteria);
|
|
2037
|
+
}
|
|
2038
|
+
if (issue.comments.length > 0) {
|
|
2039
|
+
sections.push(``, `### Discussion`, ``);
|
|
2040
|
+
for (const comment of issue.comments) {
|
|
2041
|
+
sections.push(comment, ``);
|
|
2042
|
+
}
|
|
2043
|
+
}
|
|
2044
|
+
sections.push(
|
|
2045
|
+
``,
|
|
2046
|
+
`## Working Directory`,
|
|
2047
|
+
``,
|
|
2048
|
+
`\`${cwd}\``,
|
|
2049
|
+
``,
|
|
2050
|
+
`## Instructions`,
|
|
2051
|
+
``,
|
|
2052
|
+
`1. **Explore the codebase** \u2014 read relevant files, search for symbols, understand the project structure, language, frameworks, conventions, and patterns. Identify the tech stack (languages, package managers, frameworks, test runners) so your spec aligns with the project's actual standards.`,
|
|
2053
|
+
``,
|
|
2054
|
+
`2. **Understand the issue** \u2014 analyze the issue description, acceptance criteria, and discussion comments to fully understand what needs to be done and why.`,
|
|
2055
|
+
``,
|
|
2056
|
+
`3. **Research the approach** \u2014 look up relevant documentation, libraries, and patterns. Consider how the change integrates with the existing architecture, standards, and technologies already in use. For example, if the project is TypeScript, do not propose a Python solution; if it uses Vitest, do not suggest Jest.`,
|
|
2057
|
+
``,
|
|
2058
|
+
`4. **Identify integration points** \u2014 determine which existing modules, interfaces, patterns, and conventions the implementation must align with. Note the key files and modules involved, but do NOT prescribe exact code changes \u2014 the planner agent will handle that.`,
|
|
2059
|
+
``,
|
|
2060
|
+
`5. **DO NOT make any code changes** \u2014 you are only producing a spec, not implementing.`,
|
|
2061
|
+
``,
|
|
2062
|
+
`## Output`,
|
|
2063
|
+
``,
|
|
2064
|
+
`Write the complete spec as a markdown file to this exact path:`,
|
|
2065
|
+
``,
|
|
2066
|
+
`\`${outputPath}\``,
|
|
2067
|
+
``,
|
|
2068
|
+
`Use your Write tool to save the file. The file content MUST begin with the H1 heading \u2014 no preamble, no code fences, no conversational text before it. Do not add any text after the final spec section \u2014 no postamble, no summary, no commentary. The file must follow this structure exactly:`,
|
|
2069
|
+
``,
|
|
2070
|
+
`# <Issue title> (#<number>)`,
|
|
2071
|
+
``,
|
|
2072
|
+
`> <One-line summary: what this issue achieves and why it matters>`,
|
|
2073
|
+
``,
|
|
2074
|
+
`## Context`,
|
|
2075
|
+
``,
|
|
2076
|
+
`<Describe the relevant parts of the codebase: key modules, directory structure,`,
|
|
2077
|
+
`language/framework, and architectural patterns. Name specific files and modules`,
|
|
2078
|
+
`that are involved so the planner agent knows where to look, but do not include`,
|
|
2079
|
+
`code snippets or line-level details.>`,
|
|
2080
|
+
``,
|
|
2081
|
+
`## Why`,
|
|
2082
|
+
``,
|
|
2083
|
+
`<Explain the motivation \u2014 why this change is needed, what problem it solves,`,
|
|
2084
|
+
`what user or system benefit it provides. Pull from the issue description,`,
|
|
2085
|
+
`acceptance criteria, and discussion.>`,
|
|
2086
|
+
``,
|
|
2087
|
+
`## Approach`,
|
|
2088
|
+
``,
|
|
2089
|
+
`<High-level description of the implementation strategy. Explain the overall`,
|
|
2090
|
+
`approach, which patterns to follow, what to extend vs. create new, and how`,
|
|
2091
|
+
`the change fits into the existing architecture. Mention relevant standards,`,
|
|
2092
|
+
`technologies, and conventions the implementation MUST align with.>`,
|
|
2093
|
+
``,
|
|
2094
|
+
`## Integration Points`,
|
|
2095
|
+
``,
|
|
2096
|
+
`<List the specific modules, interfaces, configurations, and conventions that`,
|
|
2097
|
+
`the implementation must integrate with. For example: existing provider`,
|
|
2098
|
+
`interfaces to implement, CLI argument patterns to follow, test framework`,
|
|
2099
|
+
`and conventions to match, build system requirements, etc.>`,
|
|
2100
|
+
``,
|
|
2101
|
+
`## Tasks`,
|
|
2102
|
+
``,
|
|
2103
|
+
`Each task MUST be prefixed with an execution-mode tag:`,
|
|
2104
|
+
``,
|
|
2105
|
+
`- \`(P)\` \u2014 **Parallel-safe.** This task has no dependency on the output of a prior task and can run concurrently with other \`(P)\` tasks.`,
|
|
2106
|
+
`- \`(S)\` \u2014 **Serial / dependent.** This task depends on a prior task's output or modifies shared state that conflicts with concurrent work. It acts as a barrier: all preceding tasks complete before it starts, and it completes before subsequent tasks begin.`,
|
|
2107
|
+
`- \`(I)\` \u2014 **Isolated / barrier.** This task must run alone after all preceding tasks complete and before any subsequent tasks begin. Use for validation tasks like running tests, linting, or builds that read the output of prior tasks.`,
|
|
2108
|
+
``,
|
|
2109
|
+
`**Default to \`(P)\`.** Most tasks are independent (e.g., adding a function in one module, writing tests in another). Only use \`(S)\` when a task genuinely depends on the result of a prior task (e.g., "refactor module X" followed by "update callers of module X"). Use \`(I)\` for validation or barrier tasks that must run alone after all prior work completes (e.g., "run tests", "run linting", "build the project").`,
|
|
2110
|
+
``,
|
|
2111
|
+
`If a task has no \`(P)\`, \`(S)\`, or \`(I)\` prefix, the system treats it as serial, so always tag explicitly.`,
|
|
2112
|
+
``,
|
|
2113
|
+
`Example:`,
|
|
2114
|
+
``,
|
|
2115
|
+
`- [ ] (P) Add validation helper to the form utils module`,
|
|
2116
|
+
`- [ ] (P) Add unit tests for the new validation helper`,
|
|
2117
|
+
`- [ ] (S) Refactor the form component to use the new validation helper`,
|
|
2118
|
+
`- [ ] (P) Update documentation for the form utils module`,
|
|
2119
|
+
`- [ ] (I) Run the full test suite to verify all changes pass`,
|
|
2120
|
+
``,
|
|
2121
|
+
``,
|
|
2122
|
+
`## References`,
|
|
2123
|
+
``,
|
|
2124
|
+
`- <Links to relevant docs, related issues, or external resources>`,
|
|
2125
|
+
``,
|
|
2126
|
+
`## Key Guidelines`,
|
|
2127
|
+
``,
|
|
2128
|
+
`- **Stay high-level.** Do NOT include code snippets, exact line numbers, diffs, or step-by-step coding instructions. A dedicated planner agent will produce those details for each task at execution time.`,
|
|
2129
|
+
`- **Respect the project's stack.** Your spec must align with the languages, frameworks, libraries, test tools, and conventions already in use. Never suggest technologies that conflict with the existing project.`,
|
|
2130
|
+
`- **Explain WHAT, WHY, and HOW (strategically).** Each task should say what needs to happen, why it's needed, and which part of the codebase it touches \u2014 but leave the tactical "how" to the planner agent.`,
|
|
2131
|
+
`- **Detail integration points.** The prose sections (Context, Approach, Integration Points) are critical \u2014 they tell the planner agent where to look and what constraints to respect.`,
|
|
2132
|
+
`- **Keep tasks atomic and ordered.** Each \`- [ ]\` task must be a single, clear unit of work. Order them so dependencies come first.`,
|
|
2133
|
+
`- **Tag every task with \`(P)\`, \`(S)\`, or \`(I)\`.** Default to \`(P)\` (parallel) unless the task depends on a prior task's output. Use \`(I)\` for validation/barrier tasks. Group related serial dependencies together and prefer parallelism to maximize throughput.`,
|
|
2134
|
+
`- **Embed commit instructions within task descriptions.** You control when commits happen. Instead of creating standalone commit tasks (which would fail \u2014 each task runs in an isolated agent session), include commit instructions at the end of implementation task descriptions at logical boundaries. For example: "Implement the validation helper and commit with a conventional commit message." Group related changes into a single commit where it makes logical sense, and use the project's conventional commit types: \`feat\`, \`fix\`, \`docs\`, \`refactor\`, \`test\`, \`chore\`, \`style\`, \`perf\`, \`ci\`. Not every task needs a commit instruction \u2014 use your judgment to place them at logical boundaries.`,
|
|
2135
|
+
`- **Keep the markdown clean** \u2014 it will be parsed by an automated tool.`
|
|
2136
|
+
);
|
|
2137
|
+
return sections.join("\n");
|
|
2138
|
+
}
|
|
2139
|
+
function buildFileSpecPrompt(filePath, content, cwd, outputPath) {
|
|
2140
|
+
const title = extractTitle(content, filePath);
|
|
2141
|
+
const writePath = outputPath ?? filePath;
|
|
2142
|
+
const sections = [
|
|
2143
|
+
`You are a **spec agent**. Your job is to explore the codebase, understand the content below, and write a high-level **markdown spec file** to disk that will drive an automated implementation pipeline.`,
|
|
2144
|
+
``,
|
|
2145
|
+
`**Important:** This file will be consumed by a two-stage pipeline:`,
|
|
2146
|
+
`1. A **planner agent** reads each task together with the prose context in this file, then explores the codebase to produce a detailed, line-level implementation plan.`,
|
|
2147
|
+
`2. A **coder agent** follows that detailed plan to make the actual code changes.`,
|
|
2148
|
+
``,
|
|
2149
|
+
`Because the planner agent handles low-level details, your spec must stay **high-level and strategic**. Focus on the WHAT, WHY, and HOW \u2014 not exact code or line numbers.`,
|
|
2150
|
+
``,
|
|
2151
|
+
`**CRITICAL \u2014 Output constraints (read carefully):**`,
|
|
2152
|
+
`The file you write must contain ONLY the structured spec content described below. You MUST NOT include:`,
|
|
2153
|
+
`- **No preamble:** Do not add any text before the H1 heading (e.g., "Here's the spec:", "I've written the spec file to...")`,
|
|
2154
|
+
`- **No postamble:** Do not add any text after the last spec section (e.g., "Let me know if you'd like changes", "Here's a summary of...")`,
|
|
2155
|
+
`- **No summaries:** Do not append a summary or recap of what you wrote`,
|
|
2156
|
+
`- **No code fences:** Do not wrap the spec content in \`\`\`markdown ... \`\`\` or any other code fence`,
|
|
2157
|
+
`- **No conversational text:** Do not include any explanations, commentary, or dialogue \u2014 the file is consumed by an automated pipeline, not a human`,
|
|
2158
|
+
`The file content must start with \`# \` (the H1 heading) and contain nothing before or after the structured spec sections.`,
|
|
2159
|
+
``,
|
|
2160
|
+
`## File Details`,
|
|
2161
|
+
``,
|
|
2162
|
+
`- **Title:** ${title}`,
|
|
2163
|
+
`- **Source file:** ${filePath}`
|
|
2164
|
+
];
|
|
2165
|
+
if (content) {
|
|
2166
|
+
sections.push(``, `### Content`, ``, content);
|
|
2167
|
+
}
|
|
2168
|
+
sections.push(
|
|
2169
|
+
``,
|
|
2170
|
+
`## Working Directory`,
|
|
2171
|
+
``,
|
|
2172
|
+
`\`${cwd}\``,
|
|
2173
|
+
``,
|
|
2174
|
+
`## Instructions`,
|
|
2175
|
+
``,
|
|
2176
|
+
`1. **Explore the codebase** \u2014 read relevant files, search for symbols, understand the project structure, language, frameworks, conventions, and patterns. Identify the tech stack (languages, package managers, frameworks, test runners) so your spec aligns with the project's actual standards.`,
|
|
2177
|
+
``,
|
|
2178
|
+
`2. **Understand the content** \u2014 analyze the file content to fully understand what needs to be done and why.`,
|
|
2179
|
+
``,
|
|
2180
|
+
`3. **Research the approach** \u2014 look up relevant documentation, libraries, and patterns. Consider how the change integrates with the existing architecture, standards, and technologies already in use. For example, if the project is TypeScript, do not propose a Python solution; if it uses Vitest, do not suggest Jest.`,
|
|
2181
|
+
``,
|
|
2182
|
+
`4. **Identify integration points** \u2014 determine which existing modules, interfaces, patterns, and conventions the implementation must align with. Note the key files and modules involved, but do NOT prescribe exact code changes \u2014 the planner agent will handle that.`,
|
|
2183
|
+
``,
|
|
2184
|
+
`5. **DO NOT make any code changes** \u2014 you are only producing a spec, not implementing.`,
|
|
2185
|
+
``,
|
|
2186
|
+
`## Output`,
|
|
2187
|
+
``,
|
|
2188
|
+
`Write the complete spec as a markdown file to this exact path:`,
|
|
2189
|
+
``,
|
|
2190
|
+
`\`${writePath}\``,
|
|
2191
|
+
``,
|
|
2192
|
+
`Use your Write tool to save the file. The file content MUST begin with the H1 heading \u2014 no preamble, no code fences, no conversational text before it. Do not add any text after the final spec section \u2014 no postamble, no summary, no commentary. The file must follow this structure exactly:`,
|
|
2193
|
+
``,
|
|
2194
|
+
`# <Title>`,
|
|
2195
|
+
``,
|
|
2196
|
+
`> <One-line summary: what this achieves and why it matters>`,
|
|
2197
|
+
``,
|
|
2198
|
+
`## Context`,
|
|
2199
|
+
``,
|
|
2200
|
+
`<Describe the relevant parts of the codebase: key modules, directory structure,`,
|
|
2201
|
+
`language/framework, and architectural patterns. Name specific files and modules`,
|
|
2202
|
+
`that are involved so the planner agent knows where to look, but do not include`,
|
|
2203
|
+
`code snippets or line-level details.>`,
|
|
2204
|
+
``,
|
|
2205
|
+
`## Why`,
|
|
2206
|
+
``,
|
|
2207
|
+
`<Explain the motivation \u2014 why this change is needed, what problem it solves,`,
|
|
2208
|
+
`what user or system benefit it provides. Pull from the file content.>`,
|
|
2209
|
+
``,
|
|
2210
|
+
`## Approach`,
|
|
2211
|
+
``,
|
|
2212
|
+
`<High-level description of the implementation strategy. Explain the overall`,
|
|
2213
|
+
`approach, which patterns to follow, what to extend vs. create new, and how`,
|
|
2214
|
+
`the change fits into the existing architecture. Mention relevant standards,`,
|
|
2215
|
+
`technologies, and conventions the implementation MUST align with.>`,
|
|
2216
|
+
``,
|
|
2217
|
+
`## Integration Points`,
|
|
2218
|
+
``,
|
|
2219
|
+
`<List the specific modules, interfaces, configurations, and conventions that`,
|
|
2220
|
+
`the implementation must integrate with. For example: existing provider`,
|
|
2221
|
+
`interfaces to implement, CLI argument patterns to follow, test framework`,
|
|
2222
|
+
`and conventions to match, build system requirements, etc.>`,
|
|
2223
|
+
``,
|
|
2224
|
+
`## Tasks`,
|
|
2225
|
+
``,
|
|
2226
|
+
`Each task MUST be prefixed with an execution-mode tag:`,
|
|
2227
|
+
``,
|
|
2228
|
+
`- \`(P)\` \u2014 **Parallel-safe.** This task has no dependency on the output of a prior task and can run concurrently with other \`(P)\` tasks.`,
|
|
2229
|
+
`- \`(S)\` \u2014 **Serial / dependent.** This task depends on a prior task's output or modifies shared state that conflicts with concurrent work. It acts as a barrier: all preceding tasks complete before it starts, and it completes before subsequent tasks begin.`,
|
|
2230
|
+
`- \`(I)\` \u2014 **Isolated / barrier.** This task must run alone after all preceding tasks complete and before any subsequent tasks begin. Use for validation tasks like running tests, linting, or builds that read the output of prior tasks.`,
|
|
2231
|
+
``,
|
|
2232
|
+
`**Default to \`(P)\`.** Most tasks are independent (e.g., adding a function in one module, writing tests in another). Only use \`(S)\` when a task genuinely depends on the result of a prior task (e.g., "refactor module X" followed by "update callers of module X"). Use \`(I)\` for validation or barrier tasks that must run alone after all prior work completes (e.g., "run tests", "run linting", "build the project").`,
|
|
2233
|
+
``,
|
|
2234
|
+
`If a task has no \`(P)\`, \`(S)\`, or \`(I)\` prefix, the system treats it as serial, so always tag explicitly.`,
|
|
2235
|
+
``,
|
|
2236
|
+
`Example:`,
|
|
2237
|
+
``,
|
|
2238
|
+
`- [ ] (P) Add validation helper to the form utils module`,
|
|
2239
|
+
`- [ ] (P) Add unit tests for the new validation helper`,
|
|
2240
|
+
`- [ ] (S) Refactor the form component to use the new validation helper`,
|
|
2241
|
+
`- [ ] (P) Update documentation for the form utils module`,
|
|
2242
|
+
`- [ ] (I) Run the full test suite to verify all changes pass`,
|
|
2243
|
+
``,
|
|
2244
|
+
``,
|
|
2245
|
+
`## References`,
|
|
2246
|
+
``,
|
|
2247
|
+
`- <Links to relevant docs, related issues, or external resources>`,
|
|
2248
|
+
``,
|
|
2249
|
+
`## Key Guidelines`,
|
|
2250
|
+
``,
|
|
2251
|
+
`- **Stay high-level.** Do NOT include code snippets, exact line numbers, diffs, or step-by-step coding instructions. A dedicated planner agent will produce those details for each task at execution time.`,
|
|
2252
|
+
`- **Respect the project's stack.** Your spec must align with the languages, frameworks, libraries, test tools, and conventions already in use. Never suggest technologies that conflict with the existing project.`,
|
|
2253
|
+
`- **Explain WHAT, WHY, and HOW (strategically).** Each task should say what needs to happen, why it's needed, and which part of the codebase it touches \u2014 but leave the tactical "how" to the planner agent.`,
|
|
2254
|
+
`- **Detail integration points.** The prose sections (Context, Approach, Integration Points) are critical \u2014 they tell the planner agent where to look and what constraints to respect.`,
|
|
2255
|
+
`- **Keep tasks atomic and ordered.** Each \`- [ ]\` task must be a single, clear unit of work. Order them so dependencies come first.`,
|
|
2256
|
+
`- **Tag every task with \`(P)\`, \`(S)\`, or \`(I)\`.** Default to \`(P)\` (parallel) unless the task depends on a prior task's output. Use \`(I)\` for validation/barrier tasks. Group related serial dependencies together and prefer parallelism to maximize throughput.`,
|
|
2257
|
+
`- **Embed commit instructions within task descriptions.** You control when commits happen. Instead of creating standalone commit tasks (which would fail \u2014 each task runs in an isolated agent session), include commit instructions at the end of implementation task descriptions at logical boundaries. For example: "Implement the validation helper and commit with a conventional commit message." Group related changes into a single commit where it makes logical sense, and use the project's conventional commit types: \`feat\`, \`fix\`, \`docs\`, \`refactor\`, \`test\`, \`chore\`, \`style\`, \`perf\`, \`ci\`. Not every task needs a commit instruction \u2014 use your judgment to place them at logical boundaries.`,
|
|
2258
|
+
`- **Keep the markdown clean** \u2014 it will be parsed by an automated tool.`
|
|
2259
|
+
);
|
|
2260
|
+
return sections.join("\n");
|
|
2261
|
+
}
|
|
2262
|
+
function buildInlineTextSpecPrompt(text, cwd, outputPath) {
|
|
2263
|
+
const title = text.length > 80 ? text.slice(0, 80).trimEnd() + "\u2026" : text;
|
|
2264
|
+
const sections = [
|
|
2265
|
+
`You are a **spec agent**. Your job is to explore the codebase, understand the request below, and write a high-level **markdown spec file** to disk that will drive an automated implementation pipeline.`,
|
|
2266
|
+
``,
|
|
2267
|
+
`**Important:** This file will be consumed by a two-stage pipeline:`,
|
|
2268
|
+
`1. A **planner agent** reads each task together with the prose context in this file, then explores the codebase to produce a detailed, line-level implementation plan.`,
|
|
2269
|
+
`2. A **coder agent** follows that detailed plan to make the actual code changes.`,
|
|
2270
|
+
``,
|
|
2271
|
+
`Because the planner agent handles low-level details, your spec must stay **high-level and strategic**. Focus on the WHAT, WHY, and HOW \u2014 not exact code or line numbers.`,
|
|
2272
|
+
``,
|
|
2273
|
+
`**CRITICAL \u2014 Output constraints (read carefully):**`,
|
|
2274
|
+
`The file you write must contain ONLY the structured spec content described below. You MUST NOT include:`,
|
|
2275
|
+
`- **No preamble:** Do not add any text before the H1 heading (e.g., "Here's the spec:", "I've written the spec file to...")`,
|
|
2276
|
+
`- **No postamble:** Do not add any text after the last spec section (e.g., "Let me know if you'd like changes", "Here's a summary of...")`,
|
|
2277
|
+
`- **No summaries:** Do not append a summary or recap of what you wrote`,
|
|
2278
|
+
`- **No code fences:** Do not wrap the spec content in \`\`\`markdown ... \`\`\` or any other code fence`,
|
|
2279
|
+
`- **No conversational text:** Do not include any explanations, commentary, or dialogue \u2014 the file is consumed by an automated pipeline, not a human`,
|
|
2280
|
+
`The file content must start with \`# \` (the H1 heading) and contain nothing before or after the structured spec sections.`,
|
|
2281
|
+
``,
|
|
2282
|
+
`## Inline Text`,
|
|
2283
|
+
``,
|
|
2284
|
+
`- **Title:** ${title}`,
|
|
2285
|
+
``,
|
|
2286
|
+
`### Description`,
|
|
2287
|
+
``,
|
|
2288
|
+
text
|
|
2289
|
+
];
|
|
2290
|
+
sections.push(
|
|
2291
|
+
``,
|
|
2292
|
+
`## Working Directory`,
|
|
2293
|
+
``,
|
|
2294
|
+
`\`${cwd}\``,
|
|
2295
|
+
``,
|
|
2296
|
+
`## Instructions`,
|
|
2297
|
+
``,
|
|
2298
|
+
`1. **Explore the codebase** \u2014 read relevant files, search for symbols, understand the project structure, language, frameworks, conventions, and patterns. Identify the tech stack (languages, package managers, frameworks, test runners) so your spec aligns with the project's actual standards.`,
|
|
2299
|
+
``,
|
|
2300
|
+
`2. **Understand the request** \u2014 analyze the inline text to fully understand what needs to be done and why. Since this is a brief description rather than a detailed issue or document, you may need to infer details from the codebase.`,
|
|
2301
|
+
``,
|
|
2302
|
+
`3. **Research the approach** \u2014 look up relevant documentation, libraries, and patterns. Consider how the change integrates with the existing architecture, standards, and technologies already in use. For example, if the project is TypeScript, do not propose a Python solution; if it uses Vitest, do not suggest Jest.`,
|
|
2303
|
+
``,
|
|
2304
|
+
`4. **Identify integration points** \u2014 determine which existing modules, interfaces, patterns, and conventions the implementation must align with. Note the key files and modules involved, but do NOT prescribe exact code changes \u2014 the planner agent will handle that.`,
|
|
2305
|
+
``,
|
|
2306
|
+
`5. **DO NOT make any code changes** \u2014 you are only producing a spec, not implementing.`,
|
|
2307
|
+
``,
|
|
2308
|
+
`## Output`,
|
|
2309
|
+
``,
|
|
2310
|
+
`Write the complete spec as a markdown file to this exact path:`,
|
|
2311
|
+
``,
|
|
2312
|
+
`\`${outputPath}\``,
|
|
2313
|
+
``,
|
|
2314
|
+
`Use your Write tool to save the file. The file content MUST begin with the H1 heading \u2014 no preamble, no code fences, no conversational text before it. Do not add any text after the final spec section \u2014 no postamble, no summary, no commentary. The file must follow this structure exactly:`,
|
|
2315
|
+
``,
|
|
2316
|
+
`# <Title>`,
|
|
2317
|
+
``,
|
|
2318
|
+
`> <One-line summary: what this achieves and why it matters>`,
|
|
2319
|
+
``,
|
|
2320
|
+
`## Context`,
|
|
2321
|
+
``,
|
|
2322
|
+
`<Describe the relevant parts of the codebase: key modules, directory structure,`,
|
|
2323
|
+
`language/framework, and architectural patterns. Name specific files and modules`,
|
|
2324
|
+
`that are involved so the planner agent knows where to look, but do not include`,
|
|
2325
|
+
`code snippets or line-level details.>`,
|
|
2326
|
+
``,
|
|
2327
|
+
`## Why`,
|
|
2328
|
+
``,
|
|
2329
|
+
`<Explain the motivation \u2014 why this change is needed, what problem it solves,`,
|
|
2330
|
+
`what user or system benefit it provides. Pull from the inline text description.>`,
|
|
2331
|
+
``,
|
|
2332
|
+
`## Approach`,
|
|
2333
|
+
``,
|
|
2334
|
+
`<High-level description of the implementation strategy. Explain the overall`,
|
|
2335
|
+
`approach, which patterns to follow, what to extend vs. create new, and how`,
|
|
2336
|
+
`the change fits into the existing architecture. Mention relevant standards,`,
|
|
2337
|
+
`technologies, and conventions the implementation MUST align with.>`,
|
|
2338
|
+
``,
|
|
2339
|
+
`## Integration Points`,
|
|
2340
|
+
``,
|
|
2341
|
+
`<List the specific modules, interfaces, configurations, and conventions that`,
|
|
2342
|
+
`the implementation must integrate with. For example: existing provider`,
|
|
2343
|
+
`interfaces to implement, CLI argument patterns to follow, test framework`,
|
|
2344
|
+
`and conventions to match, build system requirements, etc.>`,
|
|
2345
|
+
``,
|
|
2346
|
+
`## Tasks`,
|
|
2347
|
+
``,
|
|
2348
|
+
`Each task MUST be prefixed with an execution-mode tag:`,
|
|
2349
|
+
``,
|
|
2350
|
+
`- \`(P)\` \u2014 **Parallel-safe.** This task has no dependency on the output of a prior task and can run concurrently with other \`(P)\` tasks.`,
|
|
2351
|
+
`- \`(S)\` \u2014 **Serial / dependent.** This task depends on a prior task's output or modifies shared state that conflicts with concurrent work. It acts as a barrier: all preceding tasks complete before it starts, and it completes before subsequent tasks begin.`,
|
|
2352
|
+
`- \`(I)\` \u2014 **Isolated / barrier.** This task must run alone after all preceding tasks complete and before any subsequent tasks begin. Use for validation tasks like running tests, linting, or builds that read the output of prior tasks.`,
|
|
2353
|
+
``,
|
|
2354
|
+
`**Default to \`(P)\`.** Most tasks are independent (e.g., adding a function in one module, writing tests in another). Only use \`(S)\` when a task genuinely depends on the result of a prior task (e.g., "refactor module X" followed by "update callers of module X"). Use \`(I)\` for validation or barrier tasks that must run alone after all prior work completes (e.g., "run tests", "run linting", "build the project").`,
|
|
2355
|
+
``,
|
|
2356
|
+
`If a task has no \`(P)\`, \`(S)\`, or \`(I)\` prefix, the system treats it as serial, so always tag explicitly.`,
|
|
2357
|
+
``,
|
|
2358
|
+
`Example:`,
|
|
2359
|
+
``,
|
|
2360
|
+
`- [ ] (P) Add validation helper to the form utils module`,
|
|
2361
|
+
`- [ ] (P) Add unit tests for the new validation helper`,
|
|
2362
|
+
`- [ ] (S) Refactor the form component to use the new validation helper`,
|
|
2363
|
+
`- [ ] (P) Update documentation for the form utils module`,
|
|
2364
|
+
`- [ ] (I) Run the full test suite to verify all changes pass`,
|
|
2365
|
+
``,
|
|
2366
|
+
``,
|
|
2367
|
+
`## References`,
|
|
2368
|
+
``,
|
|
2369
|
+
`- <Links to relevant docs, related issues, or external resources>`,
|
|
2370
|
+
``,
|
|
2371
|
+
`## Key Guidelines`,
|
|
2372
|
+
``,
|
|
2373
|
+
`- **Stay high-level.** Do NOT include code snippets, exact line numbers, diffs, or step-by-step coding instructions. A dedicated planner agent will produce those details for each task at execution time.`,
|
|
2374
|
+
`- **Respect the project's stack.** Your spec must align with the languages, frameworks, libraries, test tools, and conventions already in use. Never suggest technologies that conflict with the existing project.`,
|
|
2375
|
+
`- **Explain WHAT, WHY, and HOW (strategically).** Each task should say what needs to happen, why it's needed, and which part of the codebase it touches \u2014 but leave the tactical "how" to the planner agent.`,
|
|
2376
|
+
`- **Detail integration points.** The prose sections (Context, Approach, Integration Points) are critical \u2014 they tell the planner agent where to look and what constraints to respect.`,
|
|
2377
|
+
`- **Keep tasks atomic and ordered.** Each \`- [ ]\` task must be a single, clear unit of work. Order them so dependencies come first.`,
|
|
2378
|
+
`- **Tag every task with \`(P)\`, \`(S)\`, or \`(I)\`.** Default to \`(P)\` (parallel) unless the task depends on a prior task's output. Use \`(I)\` for validation/barrier tasks. Group related serial dependencies together and prefer parallelism to maximize throughput.`,
|
|
2379
|
+
`- **Embed commit instructions within task descriptions.** You control when commits happen. Instead of creating standalone commit tasks (which would fail \u2014 each task runs in an isolated agent session), include commit instructions at the end of implementation task descriptions at logical boundaries. For example: "Implement the validation helper and commit with a conventional commit message." Group related changes into a single commit where it makes logical sense, and use the project's conventional commit types: \`feat\`, \`fix\`, \`docs\`, \`refactor\`, \`test\`, \`chore\`, \`style\`, \`perf\`, \`ci\`. Not every task needs a commit instruction \u2014 use your judgment to place them at logical boundaries.`,
|
|
2380
|
+
`- **Keep the markdown clean** \u2014 it will be parsed by an automated tool.`
|
|
2381
|
+
);
|
|
2382
|
+
return sections.join("\n");
|
|
2383
|
+
}
|
|
2384
|
+
|
|
2385
|
+
// src/orchestrator/spec-pipeline.ts
|
|
2386
|
+
init_cleanup();
|
|
2387
|
+
init_logger();
|
|
2388
|
+
import chalk5 from "chalk";
|
|
2389
|
+
|
|
2390
|
+
// src/helpers/format.ts
|
|
2391
|
+
import chalk4 from "chalk";
|
|
2392
|
+
function elapsed(ms) {
|
|
2393
|
+
const s = Math.floor(ms / 1e3);
|
|
2394
|
+
const m = Math.floor(s / 60);
|
|
2395
|
+
const sec = s % 60;
|
|
2396
|
+
if (m > 0) return `${m}m ${sec}s`;
|
|
2397
|
+
return `${sec}s`;
|
|
2398
|
+
}
|
|
2399
|
+
function renderHeaderLines(info) {
|
|
2400
|
+
const lines = [];
|
|
2401
|
+
lines.push(chalk4.bold.white(" \u26A1 dispatch") + chalk4.dim(` \u2014 AI task orchestration`));
|
|
2402
|
+
if (info.provider) {
|
|
2403
|
+
lines.push(chalk4.dim(` provider: ${info.provider}`));
|
|
2404
|
+
}
|
|
2405
|
+
if (info.model) {
|
|
2406
|
+
lines.push(chalk4.dim(` model: ${info.model}`));
|
|
2407
|
+
}
|
|
2408
|
+
if (info.source) {
|
|
2409
|
+
lines.push(chalk4.dim(` source: ${info.source}`));
|
|
2410
|
+
}
|
|
2411
|
+
return lines;
|
|
2412
|
+
}
|
|
2413
|
+
|
|
2414
|
+
// src/helpers/retry.ts
|
|
2415
|
+
init_logger();
|
|
2416
|
+
async function withRetry(fn, maxRetries, options) {
|
|
2417
|
+
const maxAttempts = maxRetries + 1;
|
|
2418
|
+
const label = options?.label;
|
|
2419
|
+
let lastError;
|
|
2420
|
+
for (let attempt = 1; attempt <= maxAttempts; attempt++) {
|
|
2421
|
+
try {
|
|
2422
|
+
return await fn();
|
|
2423
|
+
} catch (err) {
|
|
2424
|
+
lastError = err;
|
|
2425
|
+
const suffix = label ? ` [${label}]` : "";
|
|
2426
|
+
if (attempt < maxAttempts) {
|
|
2427
|
+
log.warn(
|
|
2428
|
+
`Attempt ${attempt}/${maxAttempts} failed${suffix}: ${log.extractMessage(err)}`
|
|
2429
|
+
);
|
|
2430
|
+
log.debug(`Retrying${suffix} (attempt ${attempt + 1}/${maxAttempts})`);
|
|
2431
|
+
}
|
|
2432
|
+
}
|
|
2433
|
+
}
|
|
2434
|
+
throw lastError;
|
|
2435
|
+
}
|
|
2436
|
+
|
|
2437
|
+
// src/orchestrator/spec-pipeline.ts
|
|
2438
|
+
async function runSpecPipeline(opts) {
|
|
2439
|
+
const {
|
|
2440
|
+
issues,
|
|
2441
|
+
provider,
|
|
2442
|
+
model,
|
|
2443
|
+
serverUrl,
|
|
2444
|
+
cwd: specCwd,
|
|
2445
|
+
outputDir = join6(specCwd, ".dispatch", "specs"),
|
|
2446
|
+
org,
|
|
2447
|
+
project,
|
|
2448
|
+
workItemType,
|
|
2449
|
+
concurrency = defaultConcurrency(),
|
|
2450
|
+
dryRun,
|
|
2451
|
+
retries = 2
|
|
2452
|
+
} = opts;
|
|
2453
|
+
const pipelineStart = Date.now();
|
|
2454
|
+
const source = await resolveSource(issues, opts.issueSource, specCwd);
|
|
2455
|
+
if (!source) {
|
|
2456
|
+
return { total: 0, generated: 0, failed: 0, files: [], issueNumbers: [], durationMs: Date.now() - pipelineStart, fileDurationsMs: {} };
|
|
2457
|
+
}
|
|
2458
|
+
const datasource4 = getDatasource(source);
|
|
2459
|
+
const fetchOpts = { cwd: specCwd, org, project, workItemType };
|
|
2460
|
+
const isTrackerMode = isIssueNumbers(issues);
|
|
2461
|
+
const isInlineText = !isTrackerMode && !isGlobOrFilePath(issues);
|
|
2462
|
+
let items;
|
|
2463
|
+
if (isTrackerMode) {
|
|
2464
|
+
const issueNumbers2 = issues.split(",").map((s) => s.trim()).filter(Boolean);
|
|
2465
|
+
if (issueNumbers2.length === 0) {
|
|
2466
|
+
log.error("No issue numbers provided. Use --spec 1,2,3");
|
|
2467
|
+
return { total: 0, generated: 0, failed: 0, files: [], issueNumbers: [], durationMs: 0, fileDurationsMs: {} };
|
|
2468
|
+
}
|
|
2469
|
+
const fetchStart = Date.now();
|
|
2470
|
+
log.info(`Fetching ${issueNumbers2.length} issue(s) from ${source} (concurrency: ${concurrency})...`);
|
|
2471
|
+
items = [];
|
|
2472
|
+
const fetchQueue = [...issueNumbers2];
|
|
2473
|
+
while (fetchQueue.length > 0) {
|
|
2474
|
+
const batch = fetchQueue.splice(0, concurrency);
|
|
2475
|
+
log.debug(`Fetching batch of ${batch.length}: #${batch.join(", #")}`);
|
|
2476
|
+
const batchResults = await Promise.all(
|
|
2477
|
+
batch.map(async (id) => {
|
|
2478
|
+
try {
|
|
2479
|
+
const details = await datasource4.fetch(id, fetchOpts);
|
|
2480
|
+
log.success(`Fetched #${id}: ${details.title}`);
|
|
2481
|
+
log.debug(`Body: ${details.body?.length ?? 0} chars, Labels: ${details.labels.length}, Comments: ${details.comments.length}`);
|
|
2482
|
+
return { id, details };
|
|
2483
|
+
} catch (err) {
|
|
2484
|
+
const message = log.extractMessage(err);
|
|
2485
|
+
log.error(`Failed to fetch #${id}: ${log.formatErrorChain(err)}`);
|
|
2486
|
+
log.debug(log.formatErrorChain(err));
|
|
2487
|
+
return { id, details: null, error: message };
|
|
2488
|
+
}
|
|
2489
|
+
})
|
|
2490
|
+
);
|
|
2491
|
+
items.push(...batchResults);
|
|
2492
|
+
}
|
|
2493
|
+
log.debug(`Issue fetching completed in ${elapsed(Date.now() - fetchStart)}`);
|
|
2494
|
+
} else if (isInlineText) {
|
|
2495
|
+
const text = Array.isArray(issues) ? issues.join(" ") : issues;
|
|
2496
|
+
const title = text.length > 80 ? text.slice(0, 80).trimEnd() + "\u2026" : text;
|
|
2497
|
+
const slug = slugify(text, MAX_SLUG_LENGTH);
|
|
2498
|
+
const filename = `${slug}.md`;
|
|
2499
|
+
const filepath = join6(outputDir, filename);
|
|
2500
|
+
const details = {
|
|
2501
|
+
number: filepath,
|
|
2502
|
+
title,
|
|
2503
|
+
body: text,
|
|
2504
|
+
labels: [],
|
|
2505
|
+
state: "open",
|
|
2506
|
+
url: filepath,
|
|
2507
|
+
comments: [],
|
|
2508
|
+
acceptanceCriteria: ""
|
|
2509
|
+
};
|
|
2510
|
+
log.info(`Inline text spec: "${title}"`);
|
|
2511
|
+
items = [{ id: filepath, details }];
|
|
2512
|
+
} else {
|
|
2513
|
+
const files = await glob(issues, { cwd: specCwd, absolute: true });
|
|
2514
|
+
if (files.length === 0) {
|
|
2515
|
+
log.error(`No files matched the pattern "${Array.isArray(issues) ? issues.join(", ") : issues}".`);
|
|
2516
|
+
return { total: 0, generated: 0, failed: 0, files: [], issueNumbers: [], durationMs: 0, fileDurationsMs: {} };
|
|
2517
|
+
}
|
|
2518
|
+
log.info(`Matched ${files.length} file(s) for spec generation (concurrency: ${concurrency})...`);
|
|
2519
|
+
items = [];
|
|
2520
|
+
for (const filePath of files) {
|
|
2521
|
+
try {
|
|
2522
|
+
const content = await readFile5(filePath, "utf-8");
|
|
2523
|
+
const title = extractTitle(content, filePath);
|
|
2524
|
+
const details = {
|
|
2525
|
+
number: filePath,
|
|
2526
|
+
title,
|
|
2527
|
+
body: content,
|
|
2528
|
+
labels: [],
|
|
2529
|
+
state: "open",
|
|
2530
|
+
url: filePath,
|
|
2531
|
+
comments: [],
|
|
2532
|
+
acceptanceCriteria: ""
|
|
2533
|
+
};
|
|
2534
|
+
items.push({ id: filePath, details });
|
|
2535
|
+
} catch (err) {
|
|
2536
|
+
items.push({ id: filePath, details: null, error: log.extractMessage(err) });
|
|
2537
|
+
}
|
|
2538
|
+
}
|
|
2539
|
+
}
|
|
2540
|
+
const validItems = items.filter(
|
|
2541
|
+
(i) => i.details !== null
|
|
2542
|
+
);
|
|
2543
|
+
if (validItems.length === 0) {
|
|
2544
|
+
const noun = isTrackerMode ? "issues" : isInlineText ? "inline specs" : "files";
|
|
2545
|
+
log.error(`No ${noun} could be loaded. Aborting spec generation.`);
|
|
2546
|
+
return { total: items.length, generated: 0, failed: items.length, files: [], issueNumbers: [], durationMs: Date.now() - pipelineStart, fileDurationsMs: {} };
|
|
2547
|
+
}
|
|
2548
|
+
if (dryRun) {
|
|
2549
|
+
const mode = isTrackerMode ? "tracker" : isInlineText ? "inline" : "file";
|
|
2550
|
+
log.info(`[DRY RUN] Would generate ${validItems.length} spec(s) (mode: ${mode}):
|
|
2551
|
+
`);
|
|
2552
|
+
for (const { id, details } of validItems) {
|
|
2553
|
+
let filepath;
|
|
2554
|
+
if (isTrackerMode) {
|
|
2555
|
+
const slug = slugify(details.title, 60);
|
|
2556
|
+
filepath = join6(outputDir, `${id}-${slug}.md`);
|
|
2557
|
+
} else {
|
|
2558
|
+
filepath = id;
|
|
2559
|
+
}
|
|
2560
|
+
const label = isTrackerMode ? `#${id}` : filepath;
|
|
2561
|
+
log.info(`[DRY RUN] Would generate spec for ${label}: "${details.title}"`);
|
|
2562
|
+
log.dim(` \u2192 ${filepath}`);
|
|
2563
|
+
}
|
|
2564
|
+
return {
|
|
2565
|
+
total: items.length,
|
|
2566
|
+
generated: 0,
|
|
2567
|
+
failed: items.filter((i) => i.details === null).length,
|
|
2568
|
+
files: [],
|
|
2569
|
+
issueNumbers: [],
|
|
2570
|
+
durationMs: Date.now() - pipelineStart,
|
|
2571
|
+
fileDurationsMs: {}
|
|
2572
|
+
};
|
|
2573
|
+
}
|
|
2574
|
+
const confirmed = await confirmLargeBatch(validItems.length);
|
|
2575
|
+
if (!confirmed) {
|
|
2576
|
+
return { total: 0, generated: 0, failed: 0, files: [], issueNumbers: [], durationMs: Date.now() - pipelineStart, fileDurationsMs: {} };
|
|
2577
|
+
}
|
|
2578
|
+
const bootStart = Date.now();
|
|
2579
|
+
log.info(`Booting ${provider} provider...`);
|
|
2580
|
+
log.debug(serverUrl ? `Using server URL: ${serverUrl}` : "No --server-url, will spawn local server");
|
|
2581
|
+
const instance = await bootProvider(provider, { url: serverUrl, cwd: specCwd, model });
|
|
2582
|
+
registerCleanup(() => instance.cleanup());
|
|
2583
|
+
log.debug(`Provider booted in ${elapsed(Date.now() - bootStart)}`);
|
|
2584
|
+
const headerLines = renderHeaderLines({
|
|
2585
|
+
provider,
|
|
2586
|
+
model: instance.model,
|
|
2587
|
+
source
|
|
2588
|
+
});
|
|
2589
|
+
console.log("");
|
|
2590
|
+
for (const line of headerLines) {
|
|
2591
|
+
console.log(line);
|
|
2592
|
+
}
|
|
2593
|
+
console.log(chalk5.dim(" \u2500".repeat(24)));
|
|
2594
|
+
console.log("");
|
|
2595
|
+
const specAgent = await boot5({ provider: instance, cwd: specCwd });
|
|
2596
|
+
await mkdir4(outputDir, { recursive: true });
|
|
2597
|
+
const generatedFiles = [];
|
|
2598
|
+
const issueNumbers = [];
|
|
2599
|
+
const dispatchIdentifiers = [];
|
|
2600
|
+
let failed = items.filter((i) => i.details === null).length;
|
|
2601
|
+
const fileDurationsMs = {};
|
|
2602
|
+
const genQueue = [...validItems];
|
|
2603
|
+
let modelLoggedInBanner = !!instance.model;
|
|
2604
|
+
while (genQueue.length > 0) {
|
|
2605
|
+
const batch = genQueue.splice(0, concurrency);
|
|
2606
|
+
log.info(`Generating specs for batch of ${batch.length} (${generatedFiles.length + failed}/${items.length} done)...`);
|
|
2607
|
+
const batchResults = await Promise.all(
|
|
2608
|
+
batch.map(async ({ id, details }) => {
|
|
2609
|
+
const specStart = Date.now();
|
|
2610
|
+
if (!details) {
|
|
2611
|
+
log.error(`Skipping item ${id}: missing issue details`);
|
|
2612
|
+
return null;
|
|
2613
|
+
}
|
|
2614
|
+
let filepath;
|
|
2615
|
+
if (isTrackerMode) {
|
|
2616
|
+
const slug = slugify(details.title, MAX_SLUG_LENGTH);
|
|
2617
|
+
const filename = `${id}-${slug}.md`;
|
|
2618
|
+
filepath = join6(outputDir, filename);
|
|
2619
|
+
} else if (isInlineText) {
|
|
2620
|
+
filepath = id;
|
|
2621
|
+
} else {
|
|
2622
|
+
filepath = id;
|
|
2623
|
+
}
|
|
2624
|
+
try {
|
|
2625
|
+
log.info(`Generating spec for ${isTrackerMode ? `#${id}` : filepath}: ${details.title}...`);
|
|
2626
|
+
const result = await withRetry(
|
|
2627
|
+
() => specAgent.generate({
|
|
2628
|
+
issue: isTrackerMode ? details : void 0,
|
|
2629
|
+
filePath: isTrackerMode ? void 0 : id,
|
|
2630
|
+
fileContent: isTrackerMode ? void 0 : details.body,
|
|
2631
|
+
cwd: specCwd,
|
|
2632
|
+
outputPath: filepath
|
|
2633
|
+
}),
|
|
2634
|
+
retries,
|
|
2635
|
+
{ label: `specAgent.generate(${isTrackerMode ? `#${id}` : filepath})` }
|
|
2636
|
+
);
|
|
2637
|
+
if (!result.success) {
|
|
2638
|
+
throw new Error(result.error ?? "Spec generation failed");
|
|
2639
|
+
}
|
|
2640
|
+
if (isTrackerMode || isInlineText) {
|
|
2641
|
+
const h1Title = extractTitle(result.content, filepath);
|
|
2642
|
+
const h1Slug = slugify(h1Title, MAX_SLUG_LENGTH);
|
|
2643
|
+
const finalFilename = isTrackerMode ? `${id}-${h1Slug}.md` : `${h1Slug}.md`;
|
|
2644
|
+
const finalFilepath = join6(outputDir, finalFilename);
|
|
2645
|
+
if (finalFilepath !== filepath) {
|
|
2646
|
+
await rename2(filepath, finalFilepath);
|
|
2647
|
+
filepath = finalFilepath;
|
|
2648
|
+
}
|
|
2649
|
+
}
|
|
2650
|
+
const specDuration = Date.now() - specStart;
|
|
2651
|
+
fileDurationsMs[filepath] = specDuration;
|
|
2652
|
+
log.success(`Spec written: ${filepath} (${elapsed(specDuration)})`);
|
|
2653
|
+
let identifier = filepath;
|
|
2654
|
+
try {
|
|
2655
|
+
if (isTrackerMode) {
|
|
2656
|
+
await datasource4.update(id, details.title, result.content, fetchOpts);
|
|
2657
|
+
log.success(`Updated issue #${id} with spec content`);
|
|
2658
|
+
await unlink2(filepath);
|
|
2659
|
+
log.success(`Deleted local spec ${filepath} (now tracked as issue #${id})`);
|
|
2660
|
+
identifier = id;
|
|
2661
|
+
issueNumbers.push(id);
|
|
2662
|
+
} else if (datasource4.name !== "md") {
|
|
2663
|
+
const created = await datasource4.create(details.title, result.content, fetchOpts);
|
|
2664
|
+
log.success(`Created issue #${created.number} from ${filepath}`);
|
|
2665
|
+
await unlink2(filepath);
|
|
2666
|
+
log.success(`Deleted local spec ${filepath} (now tracked as issue #${created.number})`);
|
|
2667
|
+
identifier = created.number;
|
|
2668
|
+
issueNumbers.push(created.number);
|
|
2669
|
+
}
|
|
2670
|
+
} catch (err) {
|
|
2671
|
+
const label = isTrackerMode ? `issue #${id}` : filepath;
|
|
2672
|
+
log.warn(`Could not sync ${label} to datasource: ${log.formatErrorChain(err)}`);
|
|
2673
|
+
}
|
|
2674
|
+
return { filepath, identifier };
|
|
2675
|
+
} catch (err) {
|
|
2676
|
+
log.error(`Failed to generate spec for ${isTrackerMode ? `#${id}` : filepath}: ${log.formatErrorChain(err)}`);
|
|
2677
|
+
log.debug(log.formatErrorChain(err));
|
|
2678
|
+
return null;
|
|
2679
|
+
}
|
|
2680
|
+
})
|
|
2681
|
+
);
|
|
2682
|
+
for (const result of batchResults) {
|
|
2683
|
+
if (result !== null) {
|
|
2684
|
+
generatedFiles.push(result.filepath);
|
|
2685
|
+
dispatchIdentifiers.push(result.identifier);
|
|
2686
|
+
} else {
|
|
2687
|
+
failed++;
|
|
2688
|
+
}
|
|
2689
|
+
}
|
|
2690
|
+
if (!modelLoggedInBanner && instance.model) {
|
|
2691
|
+
log.info(`Detected model: ${instance.model}`);
|
|
2692
|
+
modelLoggedInBanner = true;
|
|
2693
|
+
}
|
|
2694
|
+
}
|
|
2695
|
+
try {
|
|
2696
|
+
await specAgent.cleanup();
|
|
2697
|
+
} catch (err) {
|
|
2698
|
+
log.warn(`Spec agent cleanup failed: ${log.formatErrorChain(err)}`);
|
|
2699
|
+
}
|
|
2700
|
+
try {
|
|
2701
|
+
await instance.cleanup();
|
|
2702
|
+
} catch (err) {
|
|
2703
|
+
log.warn(`Provider cleanup failed: ${log.formatErrorChain(err)}`);
|
|
2704
|
+
}
|
|
2705
|
+
const totalDuration = Date.now() - pipelineStart;
|
|
2706
|
+
log.info(
|
|
2707
|
+
`Spec generation complete: ${generatedFiles.length} generated, ${failed} failed in ${elapsed(totalDuration)}`
|
|
2708
|
+
);
|
|
2709
|
+
if (generatedFiles.length > 0) {
|
|
2710
|
+
log.dim(`
|
|
2711
|
+
Run these specs with:`);
|
|
2712
|
+
const allNumeric = dispatchIdentifiers.every((id) => /^\d+$/.test(id));
|
|
2713
|
+
if (allNumeric) {
|
|
2714
|
+
log.dim(` dispatch ${dispatchIdentifiers.join(",")}
|
|
2715
|
+
`);
|
|
2716
|
+
} else {
|
|
2717
|
+
log.dim(` dispatch ${dispatchIdentifiers.map((f) => '"' + f + '"').join(" ")}
|
|
2718
|
+
`);
|
|
2719
|
+
}
|
|
2720
|
+
}
|
|
2721
|
+
return {
|
|
2722
|
+
total: items.length,
|
|
2723
|
+
generated: generatedFiles.length,
|
|
2724
|
+
failed,
|
|
2725
|
+
files: generatedFiles,
|
|
2726
|
+
issueNumbers,
|
|
2727
|
+
identifiers: dispatchIdentifiers,
|
|
2728
|
+
durationMs: totalDuration,
|
|
2729
|
+
fileDurationsMs
|
|
2730
|
+
};
|
|
2731
|
+
}
|
|
2732
|
+
|
|
2733
|
+
// src/orchestrator/dispatch-pipeline.ts
|
|
2734
|
+
import { readFile as readFile7 } from "fs/promises";
|
|
2735
|
+
|
|
2736
|
+
// src/parser.ts
|
|
2737
|
+
import { readFile as readFile6, writeFile as writeFile5 } from "fs/promises";
|
|
2738
|
+
var UNCHECKED_RE = /^(\s*[-*]\s)\[ \]\s+(.+)$/;
|
|
2739
|
+
var CHECKED_SUB = "$1[x] $2";
|
|
2740
|
+
var MODE_PREFIX_RE = /^\(([PSI])\)\s+/;
|
|
2741
|
+
function buildTaskContext(content, task) {
|
|
2742
|
+
const normalized = content.replace(/\r\n/g, "\n");
|
|
2743
|
+
const lines = normalized.split("\n");
|
|
2744
|
+
const filtered = lines.filter((line, i) => {
|
|
2745
|
+
if (i + 1 === task.line) return true;
|
|
2746
|
+
if (UNCHECKED_RE.test(line)) return false;
|
|
2747
|
+
return true;
|
|
2748
|
+
});
|
|
2749
|
+
return filtered.join("\n");
|
|
2750
|
+
}
|
|
2751
|
+
function parseTaskContent(content, filePath) {
|
|
2752
|
+
const normalized = content.replace(/\r\n/g, "\n");
|
|
2753
|
+
const lines = normalized.split("\n");
|
|
2754
|
+
const tasks = [];
|
|
2755
|
+
for (let i = 0; i < lines.length; i++) {
|
|
2756
|
+
const match = lines[i].match(UNCHECKED_RE);
|
|
2757
|
+
if (match) {
|
|
2758
|
+
let text = match[2].trim();
|
|
2759
|
+
let mode = "serial";
|
|
2760
|
+
const modeMatch = text.match(MODE_PREFIX_RE);
|
|
2761
|
+
if (modeMatch) {
|
|
2762
|
+
const modeMap = {
|
|
2763
|
+
P: "parallel",
|
|
2764
|
+
S: "serial",
|
|
2765
|
+
I: "isolated"
|
|
2766
|
+
};
|
|
2767
|
+
mode = modeMap[modeMatch[1]] ?? "serial";
|
|
2768
|
+
text = text.slice(modeMatch[0].length);
|
|
2769
|
+
}
|
|
2770
|
+
tasks.push({
|
|
2771
|
+
index: tasks.length,
|
|
2772
|
+
text,
|
|
2773
|
+
line: i + 1,
|
|
2774
|
+
raw: lines[i],
|
|
2775
|
+
file: filePath,
|
|
2776
|
+
mode
|
|
2777
|
+
});
|
|
2778
|
+
}
|
|
2779
|
+
}
|
|
2780
|
+
return { path: filePath, tasks, content };
|
|
2781
|
+
}
|
|
2782
|
+
async function parseTaskFile(filePath) {
|
|
2783
|
+
const content = await readFile6(filePath, "utf-8");
|
|
2784
|
+
return parseTaskContent(content, filePath);
|
|
2785
|
+
}
|
|
2786
|
+
async function markTaskComplete(task) {
|
|
2787
|
+
const content = await readFile6(task.file, "utf-8");
|
|
2788
|
+
const lines = content.split("\n");
|
|
2789
|
+
const lineIndex = task.line - 1;
|
|
2790
|
+
if (lineIndex < 0 || lineIndex >= lines.length) {
|
|
2791
|
+
throw new Error(
|
|
2792
|
+
`Line ${task.line} out of range in ${task.file} (${lines.length} lines)`
|
|
2793
|
+
);
|
|
2794
|
+
}
|
|
2795
|
+
const original = lines[lineIndex];
|
|
2796
|
+
const updated = original.replace(UNCHECKED_RE, CHECKED_SUB);
|
|
2797
|
+
if (original === updated) {
|
|
2798
|
+
throw new Error(
|
|
2799
|
+
`Line ${task.line} in ${task.file} does not match expected unchecked pattern: "${original}"`
|
|
2800
|
+
);
|
|
2801
|
+
}
|
|
2802
|
+
lines[lineIndex] = updated;
|
|
2803
|
+
await writeFile5(task.file, lines.join("\n"), "utf-8");
|
|
2804
|
+
}
|
|
2805
|
+
function groupTasksByMode(tasks) {
|
|
2806
|
+
if (tasks.length === 0) return [];
|
|
2807
|
+
const groups = [];
|
|
2808
|
+
let current = [];
|
|
2809
|
+
for (const task of tasks) {
|
|
2810
|
+
const mode = task.mode ?? "serial";
|
|
2811
|
+
if (mode === "parallel") {
|
|
2812
|
+
current.push(task);
|
|
2813
|
+
} else if (mode === "isolated") {
|
|
2814
|
+
if (current.length > 0) {
|
|
2815
|
+
groups.push(current);
|
|
2816
|
+
current = [];
|
|
2817
|
+
}
|
|
2818
|
+
groups.push([task]);
|
|
2819
|
+
} else {
|
|
2820
|
+
current.push(task);
|
|
2821
|
+
groups.push(current);
|
|
2822
|
+
current = [];
|
|
2823
|
+
}
|
|
2824
|
+
}
|
|
2825
|
+
if (current.length > 0) {
|
|
2826
|
+
groups.push(current);
|
|
2827
|
+
}
|
|
2828
|
+
return groups;
|
|
2829
|
+
}
|
|
2830
|
+
|
|
2831
|
+
// src/agents/planner.ts
|
|
2832
|
+
init_logger();
|
|
2833
|
+
async function boot6(opts) {
|
|
2834
|
+
const { provider, cwd } = opts;
|
|
2835
|
+
if (!provider) {
|
|
2836
|
+
throw new Error("Planner agent requires a provider instance in boot options");
|
|
2837
|
+
}
|
|
2838
|
+
return {
|
|
2839
|
+
name: "planner",
|
|
2840
|
+
async plan(task, fileContext, cwdOverride) {
|
|
2841
|
+
try {
|
|
2842
|
+
const sessionId = await provider.createSession();
|
|
2843
|
+
const prompt = buildPlannerPrompt(task, cwdOverride ?? cwd, fileContext);
|
|
2844
|
+
const plan = await provider.prompt(sessionId, prompt);
|
|
2845
|
+
if (!plan?.trim()) {
|
|
2846
|
+
return { prompt: "", success: false, error: "Planner returned empty plan" };
|
|
2847
|
+
}
|
|
2848
|
+
return { prompt: plan, success: true };
|
|
2849
|
+
} catch (err) {
|
|
2850
|
+
const message = log.extractMessage(err);
|
|
2851
|
+
return { prompt: "", success: false, error: message };
|
|
2852
|
+
}
|
|
2853
|
+
},
|
|
2854
|
+
async cleanup() {
|
|
2855
|
+
}
|
|
2856
|
+
};
|
|
2857
|
+
}
|
|
2858
|
+
function buildPlannerPrompt(task, cwd, fileContext) {
|
|
2859
|
+
const sections = [
|
|
2860
|
+
`You are a **planning agent**. Your job is to explore the codebase, understand the task below, and produce a detailed execution prompt that another agent will follow to implement the changes.`,
|
|
2861
|
+
``,
|
|
2862
|
+
`## Task`,
|
|
2863
|
+
`- **Working directory:** ${cwd}`,
|
|
2864
|
+
`- **Source file:** ${task.file}`,
|
|
2865
|
+
`- **Task (line ${task.line}):** ${task.text}`
|
|
2866
|
+
];
|
|
2867
|
+
if (fileContext) {
|
|
2868
|
+
sections.push(
|
|
2869
|
+
``,
|
|
2870
|
+
`## Task File Contents`,
|
|
2871
|
+
``,
|
|
2872
|
+
`The task comes from a markdown file that may contain important implementation`,
|
|
2873
|
+
`details, requirements, and context in its non-task content (headings, prose,`,
|
|
2874
|
+
`notes). Review this carefully \u2014 it may describe conventions, constraints, or`,
|
|
2875
|
+
`technical details that are critical for the implementation.`,
|
|
2876
|
+
``,
|
|
2877
|
+
`\`\`\`markdown`,
|
|
2878
|
+
fileContext,
|
|
2879
|
+
`\`\`\``
|
|
2880
|
+
);
|
|
2881
|
+
}
|
|
2882
|
+
sections.push(
|
|
2883
|
+
``,
|
|
2884
|
+
`## Instructions`,
|
|
2885
|
+
``,
|
|
2886
|
+
`1. **Explore the codebase** \u2014 read relevant files, search for symbols, and understand the project structure, conventions, and patterns.`,
|
|
2887
|
+
`2. **Review the task file contents above** \u2014 pay close attention to non-task text (headings, prose, notes) as it often contains important implementation details, requirements, and constraints.`,
|
|
2888
|
+
`3. **Identify the files** that need to be created or modified to complete this task.`,
|
|
2889
|
+
`4. **Research the implementation** \u2014 understand the existing code patterns, imports, types, and APIs involved.`,
|
|
2890
|
+
`5. **DO NOT make any changes** \u2014 you are only planning, not executing.`,
|
|
2891
|
+
``,
|
|
2892
|
+
`## Output Format`,
|
|
2893
|
+
``,
|
|
2894
|
+
`Produce your response as a **system prompt for an executor agent**. The executor will receive your output verbatim as its instructions. Write it in second person ("You will...", "Modify the file...").`,
|
|
2895
|
+
``,
|
|
2896
|
+
`Your output MUST include:`,
|
|
2897
|
+
``,
|
|
2898
|
+
`1. **Context** \u2014 A brief summary of the relevant project structure, conventions, and patterns the executor needs to know. Include any important details from the task file's non-task content.`,
|
|
2899
|
+
`2. **Files to modify** \u2014 The exact file paths that need to be created or changed, with the rationale for each.`,
|
|
2900
|
+
`3. **Step-by-step implementation** \u2014 Precise, ordered steps the executor should follow. Include:`,
|
|
2901
|
+
` - Exact file paths`,
|
|
2902
|
+
` - What to add, change, or remove`,
|
|
2903
|
+
` - Code snippets, type signatures, or patterns to follow (based on existing code you read)`,
|
|
2904
|
+
` - Import statements needed`,
|
|
2905
|
+
`4. **Constraints** \u2014 Any important constraints:`,
|
|
2906
|
+
` - If the task description includes a commit instruction, include a final step in the plan to commit the changes using conventional commit conventions (supported types: feat, fix, docs, refactor, test, chore, style, perf, ci). If the task does not mention committing, instruct the executor to NOT commit changes.`,
|
|
2907
|
+
` - Make minimal, correct changes \u2014 do not refactor unrelated code.`,
|
|
2908
|
+
` - Follow existing code style and conventions found in the project.`,
|
|
2909
|
+
``,
|
|
2910
|
+
`Be specific and concrete. Reference actual code you found during exploration. The executor has no prior context about this codebase \u2014 your prompt is all it gets.`
|
|
2911
|
+
);
|
|
2912
|
+
return sections.join("\n");
|
|
2913
|
+
}
|
|
2914
|
+
|
|
2915
|
+
// src/dispatcher.ts
|
|
2916
|
+
init_logger();
|
|
2917
|
+
async function dispatchTask(instance, task, cwd, plan) {
|
|
2918
|
+
try {
|
|
2919
|
+
log.debug(`Dispatching task: ${task.file}:${task.line} \u2014 ${task.text.slice(0, 80)}`);
|
|
2920
|
+
const sessionId = await instance.createSession();
|
|
2921
|
+
const prompt = plan ? buildPlannedPrompt(task, cwd, plan) : buildPrompt(task, cwd);
|
|
2922
|
+
log.debug(`Prompt built (${prompt.length} chars, ${plan ? "with plan" : "no plan"})`);
|
|
2923
|
+
const response = await instance.prompt(sessionId, prompt);
|
|
2924
|
+
if (response === null) {
|
|
2925
|
+
log.debug("Task dispatch returned null response");
|
|
2926
|
+
return { task, success: false, error: "No response from agent" };
|
|
2927
|
+
}
|
|
2928
|
+
log.debug(`Task dispatch completed (${response.length} chars response)`);
|
|
2929
|
+
return { task, success: true };
|
|
2930
|
+
} catch (err) {
|
|
2931
|
+
const message = log.extractMessage(err);
|
|
2932
|
+
log.debug(`Task dispatch failed: ${log.formatErrorChain(err)}`);
|
|
2933
|
+
return { task, success: false, error: message };
|
|
2934
|
+
}
|
|
2935
|
+
}
|
|
2936
|
+
function buildPrompt(task, cwd) {
|
|
2937
|
+
return [
|
|
2938
|
+
`You are completing a task from a markdown task file.`,
|
|
2939
|
+
``,
|
|
2940
|
+
`**Working directory:** ${cwd}`,
|
|
2941
|
+
`**Source file:** ${task.file}`,
|
|
2942
|
+
`**Task (line ${task.line}):** ${task.text}`,
|
|
2943
|
+
``,
|
|
2944
|
+
`Instructions:`,
|
|
2945
|
+
`- Complete ONLY this specific task \u2014 do not work on other tasks.`,
|
|
2946
|
+
`- Make the minimal, correct changes needed.`,
|
|
2947
|
+
buildCommitInstruction(task.text),
|
|
2948
|
+
`- When finished, confirm by saying "Task complete."`
|
|
2949
|
+
].join("\n");
|
|
2950
|
+
}
|
|
2951
|
+
function buildPlannedPrompt(task, cwd, plan) {
|
|
2952
|
+
return [
|
|
2953
|
+
`You are an **executor agent** completing a task that has been pre-planned by a planner agent.`,
|
|
2954
|
+
`The planner has already explored the codebase and produced detailed instructions below.`,
|
|
2955
|
+
``,
|
|
2956
|
+
`**Working directory:** ${cwd}`,
|
|
2957
|
+
`**Source file:** ${task.file}`,
|
|
2958
|
+
`**Task (line ${task.line}):** ${task.text}`,
|
|
2959
|
+
``,
|
|
2960
|
+
`---`,
|
|
2961
|
+
``,
|
|
2962
|
+
`## Execution Plan`,
|
|
2963
|
+
``,
|
|
2964
|
+
plan,
|
|
2965
|
+
``,
|
|
2966
|
+
`---`,
|
|
2967
|
+
``,
|
|
2968
|
+
`## Executor Constraints`,
|
|
2969
|
+
`- Follow the plan above precisely \u2014 do not deviate, skip steps, or reorder.`,
|
|
2970
|
+
`- Complete ONLY this specific task \u2014 do not work on other tasks.`,
|
|
2971
|
+
`- Make the minimal, correct changes needed \u2014 do not refactor unrelated code.`,
|
|
2972
|
+
`- Do NOT explore the codebase. The planner has already done all necessary research. Only read or modify the files explicitly referenced in the plan.`,
|
|
2973
|
+
`- Do NOT re-plan, question, or revise the plan. Trust it as given and execute it faithfully.`,
|
|
2974
|
+
`- Do NOT search for additional context using grep, find, or similar tools unless the plan explicitly instructs you to.`,
|
|
2975
|
+
buildCommitInstruction(task.text),
|
|
2976
|
+
`- When finished, confirm by saying "Task complete."`
|
|
2977
|
+
].join("\n");
|
|
2978
|
+
}
|
|
2979
|
+
function taskRequestsCommit(taskText) {
|
|
2980
|
+
return /\bcommit\b/i.test(taskText);
|
|
2981
|
+
}
|
|
2982
|
+
function buildCommitInstruction(taskText) {
|
|
2983
|
+
if (taskRequestsCommit(taskText)) {
|
|
2984
|
+
return `- The task description includes a commit instruction. After completing the implementation, stage all changes and create a conventional commit. Use one of these types: feat, fix, docs, refactor, test, chore, style, perf, ci.`;
|
|
2985
|
+
}
|
|
2986
|
+
return `- Do NOT commit changes \u2014 the orchestrator handles commits.`;
|
|
2987
|
+
}
|
|
2988
|
+
|
|
2989
|
+
// src/agents/executor.ts
|
|
2990
|
+
init_logger();
|
|
2991
|
+
async function boot7(opts) {
|
|
2992
|
+
const { provider } = opts;
|
|
2993
|
+
if (!provider) {
|
|
2994
|
+
throw new Error("Executor agent requires a provider instance in boot options");
|
|
2995
|
+
}
|
|
2996
|
+
return {
|
|
2997
|
+
name: "executor",
|
|
2998
|
+
async execute(input2) {
|
|
2999
|
+
const { task, cwd, plan } = input2;
|
|
3000
|
+
const startTime = Date.now();
|
|
3001
|
+
try {
|
|
3002
|
+
const result = await dispatchTask(provider, task, cwd, plan ?? void 0);
|
|
3003
|
+
if (result.success) {
|
|
3004
|
+
await markTaskComplete(task);
|
|
3005
|
+
}
|
|
3006
|
+
return {
|
|
3007
|
+
dispatchResult: result,
|
|
3008
|
+
success: result.success,
|
|
3009
|
+
error: result.error,
|
|
3010
|
+
elapsedMs: Date.now() - startTime
|
|
3011
|
+
};
|
|
3012
|
+
} catch (err) {
|
|
3013
|
+
const message = log.extractMessage(err);
|
|
3014
|
+
return {
|
|
3015
|
+
dispatchResult: { task, success: false, error: message },
|
|
3016
|
+
success: false,
|
|
3017
|
+
error: message,
|
|
3018
|
+
elapsedMs: Date.now() - startTime
|
|
3019
|
+
};
|
|
3020
|
+
}
|
|
3021
|
+
},
|
|
3022
|
+
async cleanup() {
|
|
3023
|
+
}
|
|
3024
|
+
};
|
|
3025
|
+
}
|
|
3026
|
+
|
|
3027
|
+
// src/agents/commit.ts
|
|
3028
|
+
init_logger();
|
|
3029
|
+
import { mkdir as mkdir5, writeFile as writeFile6 } from "fs/promises";
|
|
3030
|
+
import { join as join7 } from "path";
|
|
3031
|
+
import { randomUUID as randomUUID4 } from "crypto";
|
|
3032
|
+
async function boot8(opts) {
|
|
3033
|
+
const { provider } = opts;
|
|
3034
|
+
if (!provider) {
|
|
3035
|
+
throw new Error(
|
|
3036
|
+
"Commit agent requires a provider instance in boot options"
|
|
3037
|
+
);
|
|
3038
|
+
}
|
|
3039
|
+
return {
|
|
3040
|
+
name: "commit",
|
|
3041
|
+
async generate(genOpts) {
|
|
3042
|
+
try {
|
|
3043
|
+
const tmpDir = join7(genOpts.cwd, ".dispatch", "tmp");
|
|
3044
|
+
await mkdir5(tmpDir, { recursive: true });
|
|
3045
|
+
const tmpFilename = `commit-${randomUUID4()}.md`;
|
|
3046
|
+
const tmpPath = join7(tmpDir, tmpFilename);
|
|
3047
|
+
const prompt = buildCommitPrompt(genOpts);
|
|
3048
|
+
const sessionId = await provider.createSession();
|
|
3049
|
+
log.debug(`Commit prompt built (${prompt.length} chars)`);
|
|
3050
|
+
const response = await provider.prompt(sessionId, prompt);
|
|
3051
|
+
if (!response?.trim()) {
|
|
3052
|
+
return {
|
|
3053
|
+
commitMessage: "",
|
|
3054
|
+
prTitle: "",
|
|
3055
|
+
prDescription: "",
|
|
3056
|
+
success: false,
|
|
3057
|
+
error: "Commit agent returned empty response"
|
|
3058
|
+
};
|
|
3059
|
+
}
|
|
3060
|
+
log.debug(`Commit agent response (${response.length} chars)`);
|
|
3061
|
+
const parsed = parseCommitResponse(response);
|
|
3062
|
+
if (!parsed.commitMessage && !parsed.prTitle) {
|
|
3063
|
+
return {
|
|
3064
|
+
commitMessage: "",
|
|
3065
|
+
prTitle: "",
|
|
3066
|
+
prDescription: "",
|
|
3067
|
+
success: false,
|
|
3068
|
+
error: "Failed to parse commit agent response: no commit message or PR title found"
|
|
3069
|
+
};
|
|
3070
|
+
}
|
|
3071
|
+
const outputContent = formatOutputFile(parsed);
|
|
3072
|
+
await writeFile6(tmpPath, outputContent, "utf-8");
|
|
3073
|
+
log.debug(`Wrote commit agent output to ${tmpPath}`);
|
|
3074
|
+
return {
|
|
3075
|
+
...parsed,
|
|
3076
|
+
success: true,
|
|
3077
|
+
outputPath: tmpPath
|
|
3078
|
+
};
|
|
3079
|
+
} catch (err) {
|
|
3080
|
+
const message = log.extractMessage(err);
|
|
3081
|
+
return {
|
|
3082
|
+
commitMessage: "",
|
|
3083
|
+
prTitle: "",
|
|
3084
|
+
prDescription: "",
|
|
3085
|
+
success: false,
|
|
3086
|
+
error: message
|
|
3087
|
+
};
|
|
3088
|
+
}
|
|
3089
|
+
},
|
|
3090
|
+
async cleanup() {
|
|
3091
|
+
}
|
|
3092
|
+
};
|
|
3093
|
+
}
|
|
3094
|
+
function buildCommitPrompt(opts) {
|
|
3095
|
+
const { branchDiff, issue, taskResults } = opts;
|
|
3096
|
+
const sections = [
|
|
3097
|
+
`You are a **commit message agent**. Your job is to analyze the git diff below and generate a meaningful, conventional-commit-compliant commit message, a PR title, and a PR description.`,
|
|
3098
|
+
``,
|
|
3099
|
+
`## Conventional Commit Guidelines`,
|
|
3100
|
+
``,
|
|
3101
|
+
`Follow the Conventional Commits specification (https://www.conventionalcommits.org/):`,
|
|
3102
|
+
`- Format: \`<type>(<optional scope>): <description>\``,
|
|
3103
|
+
`- Types: \`feat\`, \`fix\`, \`docs\`, \`refactor\`, \`test\`, \`chore\`, \`style\`, \`perf\`, \`ci\``,
|
|
3104
|
+
`- The description should be concise, imperative mood, lowercase first letter, no period at the end`,
|
|
3105
|
+
`- If the change includes breaking changes, add \`!\` after the type/scope (e.g., \`feat!: ...\`)`,
|
|
3106
|
+
``,
|
|
3107
|
+
`## Issue Context`,
|
|
3108
|
+
``,
|
|
3109
|
+
`- **Issue #${issue.number}:** ${issue.title}`
|
|
3110
|
+
];
|
|
3111
|
+
if (issue.body) {
|
|
3112
|
+
sections.push(
|
|
3113
|
+
`- **Description:** ${issue.body.slice(0, 500)}${issue.body.length > 500 ? "..." : ""}`
|
|
3114
|
+
);
|
|
3115
|
+
}
|
|
3116
|
+
if (issue.labels.length > 0) {
|
|
3117
|
+
sections.push(`- **Labels:** ${issue.labels.join(", ")}`);
|
|
3118
|
+
}
|
|
3119
|
+
const completed = taskResults.filter((r) => r.success);
|
|
3120
|
+
const failed = taskResults.filter((r) => !r.success);
|
|
3121
|
+
if (taskResults.length > 0) {
|
|
3122
|
+
sections.push(``, `## Tasks`);
|
|
3123
|
+
if (completed.length > 0) {
|
|
3124
|
+
sections.push(``, `### Completed`);
|
|
3125
|
+
for (const r of completed) {
|
|
3126
|
+
sections.push(`- ${r.task.text}`);
|
|
3127
|
+
}
|
|
3128
|
+
}
|
|
3129
|
+
if (failed.length > 0) {
|
|
3130
|
+
sections.push(``, `### Failed`);
|
|
3131
|
+
for (const r of failed) {
|
|
3132
|
+
sections.push(
|
|
3133
|
+
`- ${r.task.text}${r.error ? ` (error: ${r.error})` : ""}`
|
|
3134
|
+
);
|
|
3135
|
+
}
|
|
3136
|
+
}
|
|
3137
|
+
}
|
|
3138
|
+
const maxDiffLength = 5e4;
|
|
3139
|
+
const truncatedDiff = branchDiff.length > maxDiffLength ? branchDiff.slice(0, maxDiffLength) + "\n\n... (diff truncated due to size)" : branchDiff;
|
|
3140
|
+
sections.push(
|
|
3141
|
+
``,
|
|
3142
|
+
`## Git Diff`,
|
|
3143
|
+
``,
|
|
3144
|
+
`\`\`\`diff`,
|
|
3145
|
+
truncatedDiff,
|
|
3146
|
+
`\`\`\``,
|
|
3147
|
+
``,
|
|
3148
|
+
`## Required Output Format`,
|
|
3149
|
+
``,
|
|
3150
|
+
`You MUST respond with exactly the following three sections, using these exact headers:`,
|
|
3151
|
+
``,
|
|
3152
|
+
`### COMMIT_MESSAGE`,
|
|
3153
|
+
`<your conventional commit message here>`,
|
|
3154
|
+
``,
|
|
3155
|
+
`### PR_TITLE`,
|
|
3156
|
+
`<your PR title here>`,
|
|
3157
|
+
``,
|
|
3158
|
+
`### PR_DESCRIPTION`,
|
|
3159
|
+
`<your PR description in markdown here>`,
|
|
3160
|
+
``,
|
|
3161
|
+
`**Rules:**`,
|
|
3162
|
+
`- The commit message MUST follow conventional commit format`,
|
|
3163
|
+
`- The PR title should be a concise, descriptive summary of the overall change`,
|
|
3164
|
+
`- The PR description should explain what changed and why, referencing the issue context`,
|
|
3165
|
+
`- Do NOT include any text outside these three sections`
|
|
3166
|
+
);
|
|
3167
|
+
return sections.join("\n");
|
|
3168
|
+
}
|
|
3169
|
+
function parseCommitResponse(response) {
|
|
3170
|
+
const result = {
|
|
3171
|
+
commitMessage: "",
|
|
3172
|
+
prTitle: "",
|
|
3173
|
+
prDescription: ""
|
|
3174
|
+
};
|
|
3175
|
+
const commitMatch = response.match(
|
|
3176
|
+
/###\s*COMMIT_MESSAGE\s*\n([\s\S]*?)(?=###\s*PR_TITLE|$)/i
|
|
3177
|
+
);
|
|
3178
|
+
const titleMatch = response.match(
|
|
3179
|
+
/###\s*PR_TITLE\s*\n([\s\S]*?)(?=###\s*PR_DESCRIPTION|$)/i
|
|
3180
|
+
);
|
|
3181
|
+
const descMatch = response.match(
|
|
3182
|
+
/###\s*PR_DESCRIPTION\s*\n([\s\S]*?)$/i
|
|
3183
|
+
);
|
|
3184
|
+
if (commitMatch?.[1]) {
|
|
3185
|
+
result.commitMessage = commitMatch[1].trim();
|
|
3186
|
+
}
|
|
3187
|
+
if (titleMatch?.[1]) {
|
|
3188
|
+
result.prTitle = titleMatch[1].trim();
|
|
3189
|
+
}
|
|
3190
|
+
if (descMatch?.[1]) {
|
|
3191
|
+
result.prDescription = descMatch[1].trim();
|
|
3192
|
+
}
|
|
3193
|
+
return result;
|
|
3194
|
+
}
|
|
3195
|
+
function formatOutputFile(parsed) {
|
|
3196
|
+
const sections = [
|
|
3197
|
+
`# Commit Agent Output`,
|
|
3198
|
+
``,
|
|
3199
|
+
`## Commit Message`,
|
|
3200
|
+
``,
|
|
3201
|
+
parsed.commitMessage,
|
|
3202
|
+
``,
|
|
3203
|
+
`## PR Title`,
|
|
3204
|
+
``,
|
|
3205
|
+
parsed.prTitle,
|
|
3206
|
+
``,
|
|
3207
|
+
`## PR Description`,
|
|
3208
|
+
``,
|
|
3209
|
+
parsed.prDescription,
|
|
3210
|
+
``
|
|
3211
|
+
];
|
|
3212
|
+
return sections.join("\n");
|
|
3213
|
+
}
|
|
3214
|
+
|
|
3215
|
+
// src/orchestrator/dispatch-pipeline.ts
|
|
3216
|
+
init_logger();
|
|
3217
|
+
init_cleanup();
|
|
3218
|
+
|
|
3219
|
+
// src/helpers/worktree.ts
|
|
3220
|
+
import { join as join8, basename } from "path";
|
|
3221
|
+
import { execFile as execFile7 } from "child_process";
|
|
3222
|
+
import { promisify as promisify7 } from "util";
|
|
3223
|
+
init_logger();
|
|
3224
|
+
var exec7 = promisify7(execFile7);
|
|
3225
|
+
var WORKTREE_DIR = ".dispatch/worktrees";
|
|
3226
|
+
async function git2(args, cwd) {
|
|
3227
|
+
const { stdout } = await exec7("git", args, { cwd });
|
|
3228
|
+
return stdout;
|
|
3229
|
+
}
|
|
3230
|
+
function worktreeName(issueFilename) {
|
|
3231
|
+
const base = basename(issueFilename);
|
|
3232
|
+
const withoutExt = base.replace(/\.md$/i, "");
|
|
3233
|
+
return slugify(withoutExt);
|
|
3234
|
+
}
|
|
3235
|
+
async function createWorktree(repoRoot, issueFilename, branchName) {
|
|
3236
|
+
const name = worktreeName(issueFilename);
|
|
3237
|
+
const worktreePath = join8(repoRoot, WORKTREE_DIR, name);
|
|
3238
|
+
try {
|
|
3239
|
+
await git2(["worktree", "add", worktreePath, "-b", branchName], repoRoot);
|
|
3240
|
+
log.debug(`Created worktree at ${worktreePath} on branch ${branchName}`);
|
|
3241
|
+
} catch (err) {
|
|
3242
|
+
const message = log.extractMessage(err);
|
|
3243
|
+
if (message.includes("already exists")) {
|
|
3244
|
+
await git2(["worktree", "add", worktreePath, branchName], repoRoot);
|
|
3245
|
+
log.debug(`Created worktree at ${worktreePath} using existing branch ${branchName}`);
|
|
3246
|
+
} else {
|
|
3247
|
+
throw err;
|
|
3248
|
+
}
|
|
3249
|
+
}
|
|
3250
|
+
return worktreePath;
|
|
3251
|
+
}
|
|
3252
|
+
async function removeWorktree(repoRoot, issueFilename) {
|
|
3253
|
+
const name = worktreeName(issueFilename);
|
|
3254
|
+
const worktreePath = join8(repoRoot, WORKTREE_DIR, name);
|
|
3255
|
+
try {
|
|
3256
|
+
await git2(["worktree", "remove", worktreePath], repoRoot);
|
|
3257
|
+
} catch {
|
|
3258
|
+
try {
|
|
3259
|
+
await git2(["worktree", "remove", "--force", worktreePath], repoRoot);
|
|
3260
|
+
} catch (err) {
|
|
3261
|
+
log.warn(`Could not remove worktree ${name}: ${log.formatErrorChain(err)}`);
|
|
3262
|
+
return;
|
|
3263
|
+
}
|
|
3264
|
+
}
|
|
3265
|
+
try {
|
|
3266
|
+
await git2(["worktree", "prune"], repoRoot);
|
|
3267
|
+
} catch (err) {
|
|
3268
|
+
log.warn(`Could not prune worktrees: ${log.formatErrorChain(err)}`);
|
|
3269
|
+
}
|
|
3270
|
+
}
|
|
3271
|
+
|
|
3272
|
+
// src/tui.ts
|
|
3273
|
+
import chalk6 from "chalk";
|
|
3274
|
+
var SPINNER_FRAMES = ["\u280B", "\u2819", "\u2839", "\u2838", "\u283C", "\u2834", "\u2826", "\u2827", "\u2807", "\u280F"];
|
|
3275
|
+
var BAR_WIDTH = 30;
|
|
3276
|
+
var spinnerIndex = 0;
|
|
3277
|
+
var interval = null;
|
|
3278
|
+
var lastLineCount = 0;
|
|
3279
|
+
function spinner() {
|
|
3280
|
+
return chalk6.cyan(SPINNER_FRAMES[spinnerIndex % SPINNER_FRAMES.length]);
|
|
3281
|
+
}
|
|
3282
|
+
function progressBar(done, total) {
|
|
3283
|
+
if (total === 0) return chalk6.dim("\u2591".repeat(BAR_WIDTH));
|
|
3284
|
+
const filled = Math.round(done / total * BAR_WIDTH);
|
|
3285
|
+
const empty = BAR_WIDTH - filled;
|
|
3286
|
+
const pct = Math.round(done / total * 100);
|
|
3287
|
+
return chalk6.green("\u2588".repeat(filled)) + chalk6.dim("\u2591".repeat(empty)) + chalk6.white(` ${pct}%`);
|
|
3288
|
+
}
|
|
3289
|
+
function statusIcon(status) {
|
|
3290
|
+
switch (status) {
|
|
3291
|
+
case "pending":
|
|
3292
|
+
return chalk6.dim("\u25CB");
|
|
3293
|
+
case "planning":
|
|
3294
|
+
return spinner();
|
|
3295
|
+
case "running":
|
|
3296
|
+
return spinner();
|
|
3297
|
+
case "done":
|
|
3298
|
+
return chalk6.green("\u25CF");
|
|
3299
|
+
case "failed":
|
|
3300
|
+
return chalk6.red("\u2716");
|
|
3301
|
+
}
|
|
3302
|
+
}
|
|
3303
|
+
function statusLabel(status) {
|
|
3304
|
+
switch (status) {
|
|
3305
|
+
case "pending":
|
|
3306
|
+
return chalk6.dim("pending");
|
|
3307
|
+
case "planning":
|
|
3308
|
+
return chalk6.magenta("planning");
|
|
3309
|
+
case "running":
|
|
3310
|
+
return chalk6.cyan("executing");
|
|
3311
|
+
case "done":
|
|
3312
|
+
return chalk6.green("done");
|
|
3313
|
+
case "failed":
|
|
3314
|
+
return chalk6.red("failed");
|
|
3315
|
+
}
|
|
3316
|
+
}
|
|
3317
|
+
function phaseLabel(phase, provider) {
|
|
3318
|
+
switch (phase) {
|
|
3319
|
+
case "discovering":
|
|
3320
|
+
return `${spinner()} Discovering task files...`;
|
|
3321
|
+
case "parsing":
|
|
3322
|
+
return `${spinner()} Parsing tasks...`;
|
|
3323
|
+
case "booting": {
|
|
3324
|
+
const name = provider ?? "provider";
|
|
3325
|
+
return `${spinner()} Connecting to ${name}...`;
|
|
3326
|
+
}
|
|
3327
|
+
case "dispatching":
|
|
3328
|
+
return `${spinner()} Dispatching tasks...`;
|
|
3329
|
+
case "done":
|
|
3330
|
+
return chalk6.green("\u2714") + " Complete";
|
|
3331
|
+
}
|
|
3332
|
+
}
|
|
3333
|
+
function countVisualRows(text, cols) {
|
|
3334
|
+
const stripped = text.replace(/\x1B\[[0-9;]*m/g, "");
|
|
3335
|
+
const safeCols = Math.max(1, cols);
|
|
3336
|
+
return stripped.split("\n").reduce((sum, line) => {
|
|
3337
|
+
return sum + Math.max(1, Math.ceil(line.length / safeCols));
|
|
3338
|
+
}, 0);
|
|
3339
|
+
}
|
|
3340
|
+
function render(state) {
|
|
3341
|
+
const lines = [];
|
|
3342
|
+
const now = Date.now();
|
|
3343
|
+
const totalElapsed = elapsed(now - state.startTime);
|
|
3344
|
+
const done = state.tasks.filter((t) => t.status === "done").length;
|
|
3345
|
+
const failed = state.tasks.filter((t) => t.status === "failed").length;
|
|
3346
|
+
const total = state.tasks.length;
|
|
3347
|
+
lines.push("");
|
|
3348
|
+
lines.push(
|
|
3349
|
+
...renderHeaderLines({
|
|
3350
|
+
provider: state.provider,
|
|
3351
|
+
model: state.model,
|
|
3352
|
+
source: state.source
|
|
3353
|
+
})
|
|
3354
|
+
);
|
|
3355
|
+
if (state.currentIssue) {
|
|
3356
|
+
lines.push(
|
|
3357
|
+
chalk6.dim(` issue: `) + chalk6.white(`#${state.currentIssue.number}`) + chalk6.dim(` \u2014 ${state.currentIssue.title}`)
|
|
3358
|
+
);
|
|
3359
|
+
}
|
|
3360
|
+
lines.push(chalk6.dim(" \u2500".repeat(24)));
|
|
3361
|
+
lines.push(` ${phaseLabel(state.phase, state.provider)}` + chalk6.dim(` ${totalElapsed}`));
|
|
3362
|
+
if (state.phase === "dispatching" || state.phase === "done") {
|
|
3363
|
+
lines.push("");
|
|
3364
|
+
lines.push(` ${progressBar(done + failed, total)} ${chalk6.dim(`${done + failed}/${total} tasks`)}`);
|
|
3365
|
+
lines.push("");
|
|
3366
|
+
const activeWorktrees = new Set(
|
|
3367
|
+
state.tasks.map((t) => t.worktree).filter(Boolean)
|
|
3368
|
+
);
|
|
3369
|
+
const showWorktree = activeWorktrees.size > 1;
|
|
3370
|
+
const cols = process.stdout.columns || 80;
|
|
3371
|
+
const maxTextLen = cols - 30;
|
|
3372
|
+
const running = state.tasks.filter((t) => t.status === "running" || t.status === "planning");
|
|
3373
|
+
const completed = state.tasks.filter(
|
|
3374
|
+
(t) => t.status === "done" || t.status === "failed"
|
|
3375
|
+
);
|
|
3376
|
+
const pending = state.tasks.filter((t) => t.status === "pending");
|
|
3377
|
+
if (showWorktree) {
|
|
3378
|
+
const groups = /* @__PURE__ */ new Map();
|
|
3379
|
+
const ungrouped = [];
|
|
3380
|
+
for (const ts of state.tasks) {
|
|
3381
|
+
if (ts.worktree) {
|
|
3382
|
+
const arr = groups.get(ts.worktree) ?? [];
|
|
3383
|
+
arr.push(ts);
|
|
3384
|
+
groups.set(ts.worktree, arr);
|
|
3385
|
+
} else {
|
|
3386
|
+
ungrouped.push(ts);
|
|
3387
|
+
}
|
|
3388
|
+
}
|
|
3389
|
+
const doneGroups = [];
|
|
3390
|
+
const activeGroups = [];
|
|
3391
|
+
for (const [wt, tasks] of groups) {
|
|
3392
|
+
const allDone = tasks.every((t) => t.status === "done" || t.status === "failed");
|
|
3393
|
+
if (allDone) {
|
|
3394
|
+
doneGroups.push([wt, tasks]);
|
|
3395
|
+
} else {
|
|
3396
|
+
activeGroups.push([wt, tasks]);
|
|
3397
|
+
}
|
|
3398
|
+
}
|
|
3399
|
+
if (doneGroups.length > 3) {
|
|
3400
|
+
lines.push(chalk6.dim(` \xB7\xB7\xB7 ${doneGroups.length - 3} earlier issue(s) completed`));
|
|
3401
|
+
}
|
|
3402
|
+
for (const [wt, tasks] of doneGroups.slice(-3)) {
|
|
3403
|
+
const issueNum = wt.match(/^(\d+)/)?.[1] ?? wt.slice(0, 12);
|
|
3404
|
+
const anyFailed = tasks.some((t) => t.status === "failed");
|
|
3405
|
+
const icon = anyFailed ? chalk6.red("\u2716") : chalk6.green("\u25CF");
|
|
3406
|
+
const doneCount = tasks.filter((t) => t.status === "done").length;
|
|
3407
|
+
const maxElapsed = Math.max(...tasks.map((t) => t.elapsed ?? 0));
|
|
3408
|
+
lines.push(` ${icon} ${chalk6.dim(`#${issueNum}`)} ${chalk6.dim(`${doneCount}/${tasks.length} tasks`)} ${chalk6.dim(elapsed(maxElapsed))}`);
|
|
3409
|
+
}
|
|
3410
|
+
for (const [wt, tasks] of activeGroups) {
|
|
3411
|
+
const issueNum = wt.match(/^(\d+)/)?.[1] ?? wt.slice(0, 12);
|
|
3412
|
+
const activeTasks = tasks.filter((t) => t.status === "running" || t.status === "planning");
|
|
3413
|
+
const activeCount = activeTasks.length;
|
|
3414
|
+
const firstActive = activeTasks[0];
|
|
3415
|
+
const truncLen = Math.min(cols - 26, 60);
|
|
3416
|
+
let text = firstActive?.task.text ?? "";
|
|
3417
|
+
if (text.length > truncLen) {
|
|
3418
|
+
text = text.slice(0, truncLen - 1) + "\u2026";
|
|
3419
|
+
}
|
|
3420
|
+
const earliest = Math.min(...activeTasks.map((t) => t.elapsed ?? now));
|
|
3421
|
+
const elapsedStr = elapsed(now - earliest);
|
|
3422
|
+
lines.push(` ${spinner()} ${chalk6.white(`#${issueNum}`)} ${activeCount} active ${text} ${chalk6.dim(elapsedStr)}`);
|
|
3423
|
+
}
|
|
3424
|
+
for (const ts of ungrouped) {
|
|
3425
|
+
if (ts.status !== "running" && ts.status !== "planning") continue;
|
|
3426
|
+
const icon = statusIcon(ts.status);
|
|
3427
|
+
const idx = chalk6.dim(`#${state.tasks.indexOf(ts) + 1}`);
|
|
3428
|
+
let text = ts.task.text;
|
|
3429
|
+
if (text.length > maxTextLen) {
|
|
3430
|
+
text = text.slice(0, maxTextLen - 1) + "\u2026";
|
|
3431
|
+
}
|
|
3432
|
+
const elapsedStr = chalk6.dim(` ${elapsed(now - (ts.elapsed || now))}`);
|
|
3433
|
+
const label = statusLabel(ts.status);
|
|
3434
|
+
lines.push(` ${icon} ${idx} ${text} ${label}${elapsedStr}`);
|
|
3435
|
+
if (ts.error) {
|
|
3436
|
+
lines.push(chalk6.red(` \u2514\u2500 ${ts.error}`));
|
|
3437
|
+
}
|
|
3438
|
+
}
|
|
3439
|
+
} else {
|
|
3440
|
+
const visibleRunning = running.slice(0, 8);
|
|
3441
|
+
const visible = [
|
|
3442
|
+
...completed.slice(-3),
|
|
3443
|
+
...visibleRunning,
|
|
3444
|
+
...pending.slice(0, 3)
|
|
3445
|
+
];
|
|
3446
|
+
if (completed.length > 3) {
|
|
3447
|
+
lines.push(chalk6.dim(` \xB7\xB7\xB7 ${completed.length - 3} earlier task(s) completed`));
|
|
3448
|
+
}
|
|
3449
|
+
for (const ts of visible) {
|
|
3450
|
+
const icon = statusIcon(ts.status);
|
|
3451
|
+
const idx = chalk6.dim(`#${state.tasks.indexOf(ts) + 1}`);
|
|
3452
|
+
let text = ts.task.text;
|
|
3453
|
+
if (text.length > maxTextLen) {
|
|
3454
|
+
text = text.slice(0, maxTextLen - 1) + "\u2026";
|
|
3455
|
+
}
|
|
3456
|
+
const elapsedStr = ts.status === "running" || ts.status === "planning" ? chalk6.dim(` ${elapsed(now - (ts.elapsed || now))}`) : ts.status === "done" && ts.elapsed ? chalk6.dim(` ${elapsed(ts.elapsed)}`) : "";
|
|
3457
|
+
const label = statusLabel(ts.status);
|
|
3458
|
+
lines.push(` ${icon} ${idx} ${text} ${label}${elapsedStr}`);
|
|
3459
|
+
if (ts.error) {
|
|
3460
|
+
lines.push(chalk6.red(` \u2514\u2500 ${ts.error}`));
|
|
3461
|
+
}
|
|
3462
|
+
}
|
|
3463
|
+
if (running.length > 8) {
|
|
3464
|
+
lines.push(chalk6.dim(` \xB7\xB7\xB7 ${running.length - 8} more running`));
|
|
3465
|
+
}
|
|
3466
|
+
if (pending.length > 3) {
|
|
3467
|
+
lines.push(chalk6.dim(` \xB7\xB7\xB7 ${pending.length - 3} more task(s) pending`));
|
|
3468
|
+
}
|
|
3469
|
+
}
|
|
3470
|
+
lines.push("");
|
|
3471
|
+
const parts = [];
|
|
3472
|
+
if (done > 0) parts.push(chalk6.green(`${done} passed`));
|
|
3473
|
+
if (failed > 0) parts.push(chalk6.red(`${failed} failed`));
|
|
3474
|
+
if (total - done - failed > 0)
|
|
3475
|
+
parts.push(chalk6.dim(`${total - done - failed} remaining`));
|
|
3476
|
+
lines.push(` ${parts.join(chalk6.dim(" \xB7 "))}`);
|
|
3477
|
+
} else if (state.filesFound > 0) {
|
|
3478
|
+
lines.push(chalk6.dim(` Found ${state.filesFound} file(s)`));
|
|
3479
|
+
}
|
|
3480
|
+
lines.push("");
|
|
3481
|
+
return lines.join("\n");
|
|
3482
|
+
}
|
|
3483
|
+
function draw(state) {
|
|
3484
|
+
if (lastLineCount > 0) {
|
|
3485
|
+
process.stdout.write(`\x1B[${lastLineCount}A\x1B[0J`);
|
|
3486
|
+
}
|
|
3487
|
+
const output = render(state);
|
|
3488
|
+
process.stdout.write(output);
|
|
3489
|
+
const cols = process.stdout.columns || 80;
|
|
3490
|
+
lastLineCount = countVisualRows(output, cols);
|
|
3491
|
+
}
|
|
3492
|
+
function createTui() {
|
|
3493
|
+
const state = {
|
|
3494
|
+
tasks: [],
|
|
3495
|
+
phase: "discovering",
|
|
3496
|
+
startTime: Date.now(),
|
|
3497
|
+
filesFound: 0
|
|
3498
|
+
};
|
|
3499
|
+
interval = setInterval(() => {
|
|
3500
|
+
spinnerIndex++;
|
|
3501
|
+
draw(state);
|
|
3502
|
+
}, 80);
|
|
3503
|
+
const update = () => draw(state);
|
|
3504
|
+
const stop = () => {
|
|
3505
|
+
if (interval) {
|
|
3506
|
+
clearInterval(interval);
|
|
3507
|
+
interval = null;
|
|
3508
|
+
}
|
|
3509
|
+
draw(state);
|
|
3510
|
+
};
|
|
3511
|
+
draw(state);
|
|
3512
|
+
return { state, update, stop };
|
|
3513
|
+
}
|
|
3514
|
+
|
|
3515
|
+
// src/orchestrator/dispatch-pipeline.ts
|
|
3516
|
+
init_providers();
|
|
3517
|
+
|
|
3518
|
+
// src/orchestrator/datasource-helpers.ts
|
|
3519
|
+
init_logger();
|
|
3520
|
+
import { basename as basename2, join as join9 } from "path";
|
|
3521
|
+
import { mkdtemp, writeFile as writeFile7 } from "fs/promises";
|
|
3522
|
+
import { tmpdir } from "os";
|
|
3523
|
+
import { execFile as execFile8 } from "child_process";
|
|
3524
|
+
import { promisify as promisify8 } from "util";
|
|
3525
|
+
var exec8 = promisify8(execFile8);
|
|
3526
|
+
function parseIssueFilename(filePath) {
|
|
3527
|
+
const filename = basename2(filePath);
|
|
3528
|
+
const match = /^(\d+)-(.+)\.md$/.exec(filename);
|
|
3529
|
+
if (!match) return null;
|
|
3530
|
+
return { issueId: match[1], slug: match[2] };
|
|
3531
|
+
}
|
|
3532
|
+
async function fetchItemsById(issueIds, datasource4, fetchOpts) {
|
|
3533
|
+
const ids = issueIds.flatMap(
|
|
3534
|
+
(id) => id.split(",").map((s) => s.trim()).filter(Boolean)
|
|
3535
|
+
);
|
|
3536
|
+
const items = [];
|
|
3537
|
+
for (const id of ids) {
|
|
3538
|
+
try {
|
|
3539
|
+
const item = await datasource4.fetch(id, fetchOpts);
|
|
3540
|
+
items.push(item);
|
|
3541
|
+
} catch (err) {
|
|
3542
|
+
log.warn(`Could not fetch issue #${id}: ${log.formatErrorChain(err)}`);
|
|
3543
|
+
}
|
|
3544
|
+
}
|
|
3545
|
+
return items;
|
|
3546
|
+
}
|
|
3547
|
+
async function writeItemsToTempDir(items) {
|
|
3548
|
+
const tempDir = await mkdtemp(join9(tmpdir(), "dispatch-"));
|
|
3549
|
+
const files = [];
|
|
3550
|
+
const issueDetailsByFile = /* @__PURE__ */ new Map();
|
|
3551
|
+
for (const item of items) {
|
|
3552
|
+
const slug = slugify(item.title, MAX_SLUG_LENGTH);
|
|
3553
|
+
const filename = `${item.number}-${slug}.md`;
|
|
3554
|
+
const filepath = join9(tempDir, filename);
|
|
3555
|
+
await writeFile7(filepath, item.body, "utf-8");
|
|
3556
|
+
files.push(filepath);
|
|
3557
|
+
issueDetailsByFile.set(filepath, item);
|
|
3558
|
+
}
|
|
3559
|
+
files.sort((a, b) => {
|
|
3560
|
+
const numA = parseInt(basename2(a).match(/^(\d+)/)?.[1] ?? "0", 10);
|
|
3561
|
+
const numB = parseInt(basename2(b).match(/^(\d+)/)?.[1] ?? "0", 10);
|
|
3562
|
+
if (numA !== numB) return numA - numB;
|
|
3563
|
+
return a.localeCompare(b);
|
|
3564
|
+
});
|
|
3565
|
+
return { files, issueDetailsByFile };
|
|
3566
|
+
}
|
|
3567
|
+
async function closeCompletedSpecIssues(taskFiles, results, cwd, source, org, project, workItemType) {
|
|
3568
|
+
let datasourceName = source;
|
|
3569
|
+
if (!datasourceName) {
|
|
3570
|
+
datasourceName = await detectDatasource(cwd) ?? void 0;
|
|
3571
|
+
}
|
|
3572
|
+
if (!datasourceName) return;
|
|
3573
|
+
const datasource4 = getDatasource(datasourceName);
|
|
3574
|
+
const succeededTasks = new Set(
|
|
3575
|
+
results.filter((r) => r.success).map((r) => r.task)
|
|
3576
|
+
);
|
|
3577
|
+
const fetchOpts = { cwd, org, project, workItemType };
|
|
3578
|
+
for (const taskFile of taskFiles) {
|
|
3579
|
+
const fileTasks = taskFile.tasks;
|
|
3580
|
+
if (fileTasks.length === 0) continue;
|
|
3581
|
+
const allSucceeded = fileTasks.every((t) => succeededTasks.has(t));
|
|
3582
|
+
if (!allSucceeded) continue;
|
|
3583
|
+
const parsed = parseIssueFilename(taskFile.path);
|
|
3584
|
+
if (!parsed) continue;
|
|
3585
|
+
const { issueId } = parsed;
|
|
3586
|
+
const filename = basename2(taskFile.path);
|
|
3587
|
+
try {
|
|
3588
|
+
await datasource4.close(issueId, fetchOpts);
|
|
3589
|
+
log.success(`Closed issue #${issueId} (all tasks in ${filename} completed)`);
|
|
3590
|
+
} catch (err) {
|
|
3591
|
+
log.warn(`Could not close issue #${issueId}: ${log.formatErrorChain(err)}`);
|
|
3592
|
+
}
|
|
3593
|
+
}
|
|
3594
|
+
}
|
|
3595
|
+
async function getCommitSummaries(defaultBranch, cwd) {
|
|
3596
|
+
try {
|
|
3597
|
+
const { stdout } = await exec8(
|
|
3598
|
+
"git",
|
|
3599
|
+
["log", `${defaultBranch}..HEAD`, "--pretty=format:%s"],
|
|
3600
|
+
{ cwd }
|
|
3601
|
+
);
|
|
3602
|
+
return stdout.trim().split("\n").filter(Boolean);
|
|
3603
|
+
} catch {
|
|
3604
|
+
return [];
|
|
3605
|
+
}
|
|
3606
|
+
}
|
|
3607
|
+
async function getBranchDiff(defaultBranch, cwd) {
|
|
3608
|
+
try {
|
|
3609
|
+
const { stdout } = await exec8(
|
|
3610
|
+
"git",
|
|
3611
|
+
["diff", `${defaultBranch}..HEAD`],
|
|
3612
|
+
{ cwd, maxBuffer: 10 * 1024 * 1024 }
|
|
3613
|
+
);
|
|
3614
|
+
return stdout;
|
|
3615
|
+
} catch {
|
|
3616
|
+
return "";
|
|
3617
|
+
}
|
|
3618
|
+
}
|
|
3619
|
+
async function squashBranchCommits(defaultBranch, message, cwd) {
|
|
3620
|
+
const { stdout } = await exec8(
|
|
3621
|
+
"git",
|
|
3622
|
+
["merge-base", defaultBranch, "HEAD"],
|
|
3623
|
+
{ cwd }
|
|
3624
|
+
);
|
|
3625
|
+
const mergeBase = stdout.trim();
|
|
3626
|
+
await exec8("git", ["reset", "--soft", mergeBase], { cwd });
|
|
3627
|
+
await exec8("git", ["commit", "-m", message], { cwd });
|
|
3628
|
+
}
|
|
3629
|
+
async function buildPrBody(details, tasks, results, defaultBranch, datasourceName, cwd) {
|
|
3630
|
+
const sections = [];
|
|
3631
|
+
const commits = await getCommitSummaries(defaultBranch, cwd);
|
|
3632
|
+
if (commits.length > 0) {
|
|
3633
|
+
sections.push("## Summary\n");
|
|
3634
|
+
for (const commit of commits) {
|
|
3635
|
+
sections.push(`- ${commit}`);
|
|
3636
|
+
}
|
|
3637
|
+
sections.push("");
|
|
3638
|
+
}
|
|
3639
|
+
const taskResults = new Map(
|
|
3640
|
+
results.filter((r) => tasks.includes(r.task)).map((r) => [r.task, r])
|
|
3641
|
+
);
|
|
3642
|
+
const completedTasks = tasks.filter((t) => taskResults.get(t)?.success);
|
|
3643
|
+
const failedTasks = tasks.filter((t) => {
|
|
3644
|
+
const r = taskResults.get(t);
|
|
3645
|
+
return r && !r.success;
|
|
3646
|
+
});
|
|
3647
|
+
if (completedTasks.length > 0 || failedTasks.length > 0) {
|
|
3648
|
+
sections.push("## Tasks\n");
|
|
3649
|
+
for (const task of completedTasks) {
|
|
3650
|
+
sections.push(`- [x] ${task.text}`);
|
|
3651
|
+
}
|
|
3652
|
+
for (const task of failedTasks) {
|
|
3653
|
+
sections.push(`- [ ] ${task.text}`);
|
|
3654
|
+
}
|
|
3655
|
+
sections.push("");
|
|
3656
|
+
}
|
|
3657
|
+
if (details.labels.length > 0) {
|
|
3658
|
+
sections.push(`**Labels:** ${details.labels.join(", ")}
|
|
3659
|
+
`);
|
|
3660
|
+
}
|
|
3661
|
+
if (datasourceName === "github") {
|
|
3662
|
+
sections.push(`Closes #${details.number}`);
|
|
3663
|
+
} else if (datasourceName === "azdevops") {
|
|
3664
|
+
sections.push(`Resolves AB#${details.number}`);
|
|
3665
|
+
}
|
|
3666
|
+
return sections.join("\n");
|
|
3667
|
+
}
|
|
3668
|
+
async function buildPrTitle(issueTitle, defaultBranch, cwd) {
|
|
3669
|
+
const commits = await getCommitSummaries(defaultBranch, cwd);
|
|
3670
|
+
if (commits.length === 0) {
|
|
3671
|
+
return issueTitle;
|
|
3672
|
+
}
|
|
3673
|
+
if (commits.length === 1) {
|
|
3674
|
+
return commits[0];
|
|
3675
|
+
}
|
|
3676
|
+
return `${commits[commits.length - 1]} (+${commits.length - 1} more)`;
|
|
3677
|
+
}
|
|
3678
|
+
|
|
3679
|
+
// src/helpers/timeout.ts
|
|
3680
|
+
var TimeoutError = class extends Error {
|
|
3681
|
+
/** Optional label identifying the operation that timed out. */
|
|
3682
|
+
label;
|
|
3683
|
+
constructor(ms, label) {
|
|
3684
|
+
const suffix = label ? ` [${label}]` : "";
|
|
3685
|
+
super(`Timed out after ${ms}ms${suffix}`);
|
|
3686
|
+
this.name = "TimeoutError";
|
|
3687
|
+
this.label = label;
|
|
3688
|
+
}
|
|
3689
|
+
};
|
|
3690
|
+
function withTimeout(promise, ms, label) {
|
|
3691
|
+
const p = new Promise((resolve2, reject) => {
|
|
3692
|
+
let settled = false;
|
|
3693
|
+
const timer = setTimeout(() => {
|
|
3694
|
+
if (settled) return;
|
|
3695
|
+
settled = true;
|
|
3696
|
+
reject(new TimeoutError(ms, label));
|
|
3697
|
+
}, ms);
|
|
3698
|
+
promise.then(
|
|
3699
|
+
(value) => {
|
|
3700
|
+
if (settled) return;
|
|
3701
|
+
settled = true;
|
|
3702
|
+
clearTimeout(timer);
|
|
3703
|
+
resolve2(value);
|
|
3704
|
+
},
|
|
3705
|
+
(err) => {
|
|
3706
|
+
if (settled) return;
|
|
3707
|
+
settled = true;
|
|
3708
|
+
clearTimeout(timer);
|
|
3709
|
+
reject(err);
|
|
3710
|
+
}
|
|
3711
|
+
);
|
|
3712
|
+
});
|
|
3713
|
+
p.catch(() => {
|
|
3714
|
+
});
|
|
3715
|
+
return p;
|
|
3716
|
+
}
|
|
3717
|
+
|
|
3718
|
+
// src/orchestrator/dispatch-pipeline.ts
|
|
3719
|
+
import chalk7 from "chalk";
|
|
3720
|
+
var DEFAULT_PLAN_TIMEOUT_MIN = 10;
|
|
3721
|
+
var DEFAULT_PLAN_RETRIES = 1;
|
|
3722
|
+
async function runDispatchPipeline(opts, cwd) {
|
|
3723
|
+
const {
|
|
3724
|
+
issueIds,
|
|
3725
|
+
concurrency,
|
|
3726
|
+
dryRun,
|
|
3727
|
+
serverUrl,
|
|
3728
|
+
noPlan,
|
|
3729
|
+
noBranch,
|
|
3730
|
+
noWorktree,
|
|
3731
|
+
provider = "opencode",
|
|
3732
|
+
model,
|
|
3733
|
+
source,
|
|
3734
|
+
org,
|
|
3735
|
+
project,
|
|
3736
|
+
workItemType,
|
|
3737
|
+
planTimeout,
|
|
3738
|
+
planRetries,
|
|
3739
|
+
retries
|
|
3740
|
+
} = opts;
|
|
3741
|
+
const planTimeoutMs = (planTimeout ?? DEFAULT_PLAN_TIMEOUT_MIN) * 6e4;
|
|
3742
|
+
const maxPlanAttempts = (planRetries ?? retries ?? DEFAULT_PLAN_RETRIES) + 1;
|
|
3743
|
+
log.debug(`Plan timeout: ${planTimeout ?? DEFAULT_PLAN_TIMEOUT_MIN}m (${planTimeoutMs}ms), max attempts: ${maxPlanAttempts}`);
|
|
3744
|
+
if (dryRun) {
|
|
3745
|
+
return dryRunMode(issueIds, cwd, source, org, project, workItemType);
|
|
3746
|
+
}
|
|
3747
|
+
const verbose = log.verbose;
|
|
3748
|
+
let tui;
|
|
3749
|
+
if (verbose) {
|
|
3750
|
+
const headerLines = renderHeaderLines({ provider, source });
|
|
3751
|
+
console.log("");
|
|
3752
|
+
for (const line of headerLines) console.log(line);
|
|
3753
|
+
console.log(chalk7.dim(" \u2500".repeat(24)));
|
|
3754
|
+
console.log("");
|
|
3755
|
+
log.info("Discovering task files...");
|
|
3756
|
+
const state = {
|
|
3757
|
+
tasks: [],
|
|
3758
|
+
phase: "discovering",
|
|
3759
|
+
startTime: Date.now(),
|
|
3760
|
+
filesFound: 0,
|
|
3761
|
+
provider,
|
|
3762
|
+
source
|
|
3763
|
+
};
|
|
3764
|
+
tui = { state, update: () => {
|
|
3765
|
+
}, stop: () => {
|
|
3766
|
+
} };
|
|
3767
|
+
} else {
|
|
3768
|
+
tui = createTui();
|
|
3769
|
+
tui.state.provider = provider;
|
|
3770
|
+
tui.state.source = source;
|
|
3771
|
+
}
|
|
3772
|
+
try {
|
|
3773
|
+
tui.state.phase = "discovering";
|
|
3774
|
+
if (!source) {
|
|
3775
|
+
tui.state.phase = "done";
|
|
3776
|
+
tui.stop();
|
|
3777
|
+
log.error("No datasource configured. Use --source or run 'dispatch config' to set up defaults.");
|
|
3778
|
+
return { total: 0, completed: 0, failed: 0, skipped: 0, results: [] };
|
|
3779
|
+
}
|
|
3780
|
+
const datasource4 = getDatasource(source);
|
|
3781
|
+
const fetchOpts = { cwd, org, project, workItemType };
|
|
3782
|
+
const items = issueIds.length > 0 ? await fetchItemsById(issueIds, datasource4, fetchOpts) : await datasource4.list(fetchOpts);
|
|
3783
|
+
if (items.length === 0) {
|
|
3784
|
+
tui.state.phase = "done";
|
|
3785
|
+
tui.stop();
|
|
3786
|
+
const label = issueIds.length > 0 ? `issue(s) ${issueIds.join(", ")}` : `datasource: ${source}`;
|
|
3787
|
+
log.warn("No work items found from " + label);
|
|
3788
|
+
return { total: 0, completed: 0, failed: 0, skipped: 0, results: [] };
|
|
3789
|
+
}
|
|
3790
|
+
const { files, issueDetailsByFile } = await writeItemsToTempDir(items);
|
|
3791
|
+
tui.state.filesFound = files.length;
|
|
3792
|
+
if (verbose) log.debug(`Found ${files.length} task file(s)`);
|
|
3793
|
+
tui.state.phase = "parsing";
|
|
3794
|
+
if (verbose) log.info("Parsing tasks...");
|
|
3795
|
+
const taskFiles = [];
|
|
3796
|
+
for (const file of files) {
|
|
3797
|
+
const tf = await parseTaskFile(file);
|
|
3798
|
+
if (tf.tasks.length > 0) {
|
|
3799
|
+
taskFiles.push(tf);
|
|
3800
|
+
}
|
|
3801
|
+
}
|
|
3802
|
+
const allTasks = taskFiles.flatMap((tf) => tf.tasks);
|
|
3803
|
+
const fileContentMap = /* @__PURE__ */ new Map();
|
|
3804
|
+
for (const tf of taskFiles) {
|
|
3805
|
+
fileContentMap.set(tf.path, tf.content);
|
|
3806
|
+
}
|
|
3807
|
+
if (allTasks.length === 0) {
|
|
3808
|
+
tui.state.phase = "done";
|
|
3809
|
+
tui.stop();
|
|
3810
|
+
log.warn("No unchecked tasks found");
|
|
3811
|
+
return { total: 0, completed: 0, failed: 0, skipped: 0, results: [] };
|
|
3812
|
+
}
|
|
3813
|
+
tui.state.tasks = allTasks.map((task) => ({
|
|
3814
|
+
task,
|
|
3815
|
+
status: "pending"
|
|
3816
|
+
}));
|
|
3817
|
+
const tasksByFile = /* @__PURE__ */ new Map();
|
|
3818
|
+
for (const task of allTasks) {
|
|
3819
|
+
const list = tasksByFile.get(task.file) ?? [];
|
|
3820
|
+
list.push(task);
|
|
3821
|
+
tasksByFile.set(task.file, list);
|
|
3822
|
+
}
|
|
3823
|
+
const useWorktrees = !noWorktree && !noBranch && tasksByFile.size > 1;
|
|
3824
|
+
tui.state.phase = "booting";
|
|
3825
|
+
if (verbose) log.info(`Booting ${provider} provider...`);
|
|
3826
|
+
if (serverUrl) {
|
|
3827
|
+
tui.state.serverUrl = serverUrl;
|
|
3828
|
+
}
|
|
3829
|
+
if (verbose && serverUrl) log.debug(`Server URL: ${serverUrl}`);
|
|
3830
|
+
let instance;
|
|
3831
|
+
let planner = null;
|
|
3832
|
+
let executor;
|
|
3833
|
+
let commitAgent;
|
|
3834
|
+
if (!useWorktrees) {
|
|
3835
|
+
instance = await bootProvider(provider, { url: serverUrl, cwd, model });
|
|
3836
|
+
registerCleanup(() => instance.cleanup());
|
|
3837
|
+
if (instance.model) {
|
|
3838
|
+
tui.state.model = instance.model;
|
|
3839
|
+
}
|
|
3840
|
+
if (verbose && instance.model) log.debug(`Model: ${instance.model}`);
|
|
3841
|
+
planner = noPlan ? null : await boot6({ provider: instance, cwd });
|
|
3842
|
+
executor = await boot7({ provider: instance, cwd });
|
|
3843
|
+
commitAgent = await boot8({ provider: instance, cwd });
|
|
3844
|
+
}
|
|
3845
|
+
tui.state.phase = "dispatching";
|
|
3846
|
+
if (verbose) log.info(`Dispatching ${allTasks.length} task(s)...`);
|
|
3847
|
+
const results = [];
|
|
3848
|
+
let completed = 0;
|
|
3849
|
+
let failed = 0;
|
|
3850
|
+
const lifecycleOpts = { cwd };
|
|
3851
|
+
let username = "";
|
|
3852
|
+
try {
|
|
3853
|
+
username = await datasource4.getUsername(lifecycleOpts);
|
|
3854
|
+
} catch (err) {
|
|
3855
|
+
log.warn(`Could not resolve git username for branch naming: ${log.formatErrorChain(err)}`);
|
|
3856
|
+
}
|
|
3857
|
+
const processIssueFile = async (file, fileTasks) => {
|
|
3858
|
+
const details = issueDetailsByFile.get(file);
|
|
3859
|
+
let defaultBranch;
|
|
3860
|
+
let branchName;
|
|
3861
|
+
let worktreePath;
|
|
3862
|
+
let issueCwd = cwd;
|
|
3863
|
+
if (!noBranch && details) {
|
|
3864
|
+
try {
|
|
3865
|
+
defaultBranch = await datasource4.getDefaultBranch(lifecycleOpts);
|
|
3866
|
+
branchName = datasource4.buildBranchName(details.number, details.title, username);
|
|
3867
|
+
if (useWorktrees) {
|
|
3868
|
+
worktreePath = await createWorktree(cwd, file, branchName);
|
|
3869
|
+
registerCleanup(async () => {
|
|
3870
|
+
await removeWorktree(cwd, file);
|
|
3871
|
+
});
|
|
3872
|
+
issueCwd = worktreePath;
|
|
3873
|
+
log.debug(`Created worktree for issue #${details.number} at ${worktreePath}`);
|
|
3874
|
+
const wtName = worktreeName(file);
|
|
3875
|
+
for (const task of fileTasks) {
|
|
3876
|
+
const tuiTask = tui.state.tasks.find((t) => t.task === task);
|
|
3877
|
+
if (tuiTask) tuiTask.worktree = wtName;
|
|
3878
|
+
}
|
|
3879
|
+
} else {
|
|
3880
|
+
await datasource4.createAndSwitchBranch(branchName, lifecycleOpts);
|
|
3881
|
+
log.debug(`Switched to branch ${branchName}`);
|
|
3882
|
+
}
|
|
3883
|
+
} catch (err) {
|
|
3884
|
+
const errorMsg = `Branch creation failed for issue #${details.number}: ${log.extractMessage(err)}`;
|
|
3885
|
+
log.error(errorMsg);
|
|
3886
|
+
for (const task of fileTasks) {
|
|
3887
|
+
const tuiTask = tui.state.tasks.find((t) => t.task === task);
|
|
3888
|
+
if (tuiTask) {
|
|
3889
|
+
tuiTask.status = "failed";
|
|
3890
|
+
tuiTask.error = errorMsg;
|
|
3891
|
+
}
|
|
3892
|
+
results.push({ task, success: false, error: errorMsg });
|
|
3893
|
+
}
|
|
3894
|
+
failed += fileTasks.length;
|
|
3895
|
+
return;
|
|
3896
|
+
}
|
|
3897
|
+
}
|
|
3898
|
+
const issueLifecycleOpts = { cwd: issueCwd };
|
|
3899
|
+
let localInstance;
|
|
3900
|
+
let localPlanner;
|
|
3901
|
+
let localExecutor;
|
|
3902
|
+
let localCommitAgent;
|
|
3903
|
+
if (useWorktrees) {
|
|
3904
|
+
localInstance = await bootProvider(provider, { url: serverUrl, cwd: issueCwd, model });
|
|
3905
|
+
registerCleanup(() => localInstance.cleanup());
|
|
3906
|
+
if (localInstance.model && !tui.state.model) {
|
|
3907
|
+
tui.state.model = localInstance.model;
|
|
3908
|
+
}
|
|
3909
|
+
if (verbose && localInstance.model) log.debug(`Model: ${localInstance.model}`);
|
|
3910
|
+
localPlanner = noPlan ? null : await boot6({ provider: localInstance, cwd: issueCwd });
|
|
3911
|
+
localExecutor = await boot7({ provider: localInstance, cwd: issueCwd });
|
|
3912
|
+
localCommitAgent = await boot8({ provider: localInstance, cwd: issueCwd });
|
|
3913
|
+
} else {
|
|
3914
|
+
localInstance = instance;
|
|
3915
|
+
localPlanner = planner;
|
|
3916
|
+
localExecutor = executor;
|
|
3917
|
+
localCommitAgent = commitAgent;
|
|
3918
|
+
}
|
|
3919
|
+
const groups = groupTasksByMode(fileTasks);
|
|
3920
|
+
const issueResults = [];
|
|
3921
|
+
for (const group of groups) {
|
|
3922
|
+
const groupQueue = [...group];
|
|
3923
|
+
while (groupQueue.length > 0) {
|
|
3924
|
+
const batch = groupQueue.splice(0, concurrency);
|
|
3925
|
+
const batchResults = await Promise.all(
|
|
3926
|
+
batch.map(async (task) => {
|
|
3927
|
+
const tuiTask = tui.state.tasks.find((t) => t.task === task);
|
|
3928
|
+
const startTime = Date.now();
|
|
3929
|
+
tuiTask.elapsed = startTime;
|
|
3930
|
+
let plan;
|
|
3931
|
+
if (localPlanner) {
|
|
3932
|
+
tuiTask.status = "planning";
|
|
3933
|
+
if (verbose) log.info(`Task #${tui.state.tasks.indexOf(tuiTask) + 1}: planning \u2014 "${task.text}"`);
|
|
3934
|
+
const rawContent = fileContentMap.get(task.file);
|
|
3935
|
+
const fileContext = rawContent ? buildTaskContext(rawContent, task) : void 0;
|
|
3936
|
+
let planResult;
|
|
3937
|
+
for (let attempt = 1; attempt <= maxPlanAttempts; attempt++) {
|
|
3938
|
+
try {
|
|
3939
|
+
planResult = await withTimeout(
|
|
3940
|
+
localPlanner.plan(task, fileContext, issueCwd),
|
|
3941
|
+
planTimeoutMs,
|
|
3942
|
+
"planner.plan()"
|
|
3943
|
+
);
|
|
3944
|
+
break;
|
|
3945
|
+
} catch (err) {
|
|
3946
|
+
if (err instanceof TimeoutError) {
|
|
3947
|
+
log.warn(
|
|
3948
|
+
`Planning timed out for task "${task.text}" (attempt ${attempt}/${maxPlanAttempts})`
|
|
3949
|
+
);
|
|
3950
|
+
if (attempt < maxPlanAttempts) {
|
|
3951
|
+
log.debug(`Retrying planning (attempt ${attempt + 1}/${maxPlanAttempts})`);
|
|
3952
|
+
}
|
|
3953
|
+
} else {
|
|
3954
|
+
planResult = {
|
|
3955
|
+
prompt: "",
|
|
3956
|
+
success: false,
|
|
3957
|
+
error: log.extractMessage(err)
|
|
3958
|
+
};
|
|
3959
|
+
break;
|
|
3960
|
+
}
|
|
3961
|
+
}
|
|
3962
|
+
}
|
|
3963
|
+
if (!planResult) {
|
|
3964
|
+
const timeoutMin = planTimeout ?? 10;
|
|
3965
|
+
planResult = {
|
|
3966
|
+
prompt: "",
|
|
3967
|
+
success: false,
|
|
3968
|
+
error: `Planning timed out after ${timeoutMin}m (${maxPlanAttempts} attempts)`
|
|
3969
|
+
};
|
|
3970
|
+
}
|
|
3971
|
+
if (!planResult.success) {
|
|
3972
|
+
tuiTask.status = "failed";
|
|
3973
|
+
tuiTask.error = `Planning failed: ${planResult.error}`;
|
|
3974
|
+
tuiTask.elapsed = Date.now() - startTime;
|
|
3975
|
+
if (verbose) log.error(`Task #${tui.state.tasks.indexOf(tuiTask) + 1}: failed \u2014 ${tuiTask.error} (${elapsed(tuiTask.elapsed)})`);
|
|
3976
|
+
failed++;
|
|
3977
|
+
return { task, success: false, error: tuiTask.error };
|
|
3978
|
+
}
|
|
3979
|
+
plan = planResult.prompt;
|
|
3980
|
+
}
|
|
3981
|
+
tuiTask.status = "running";
|
|
3982
|
+
if (verbose) log.info(`Task #${tui.state.tasks.indexOf(tuiTask) + 1}: executing \u2014 "${task.text}"`);
|
|
3983
|
+
const execRetries = 2;
|
|
3984
|
+
const execResult = await withRetry(
|
|
3985
|
+
async () => {
|
|
3986
|
+
const result = await localExecutor.execute({
|
|
3987
|
+
task,
|
|
3988
|
+
cwd: issueCwd,
|
|
3989
|
+
plan: plan ?? null
|
|
3990
|
+
});
|
|
3991
|
+
if (!result.success) {
|
|
3992
|
+
throw new Error(result.error ?? "Execution failed");
|
|
3993
|
+
}
|
|
3994
|
+
return result;
|
|
3995
|
+
},
|
|
3996
|
+
execRetries,
|
|
3997
|
+
{ label: `executor "${task.text}"` }
|
|
3998
|
+
).catch((err) => ({
|
|
3999
|
+
dispatchResult: { task, success: false, error: log.extractMessage(err) },
|
|
4000
|
+
success: false,
|
|
4001
|
+
error: log.extractMessage(err),
|
|
4002
|
+
elapsedMs: 0
|
|
4003
|
+
}));
|
|
4004
|
+
if (execResult.success) {
|
|
4005
|
+
try {
|
|
4006
|
+
const parsed = parseIssueFilename(task.file);
|
|
4007
|
+
if (parsed) {
|
|
4008
|
+
const updatedContent = await readFile7(task.file, "utf-8");
|
|
4009
|
+
const issueDetails = issueDetailsByFile.get(task.file);
|
|
4010
|
+
const title = issueDetails?.title ?? parsed.slug;
|
|
4011
|
+
await datasource4.update(parsed.issueId, title, updatedContent, fetchOpts);
|
|
4012
|
+
log.success(`Synced task completion to issue #${parsed.issueId}`);
|
|
4013
|
+
}
|
|
4014
|
+
} catch (err) {
|
|
4015
|
+
log.warn(`Could not sync task completion to datasource: ${log.formatErrorChain(err)}`);
|
|
4016
|
+
}
|
|
4017
|
+
tuiTask.status = "done";
|
|
4018
|
+
tuiTask.elapsed = Date.now() - startTime;
|
|
4019
|
+
if (verbose) log.success(`Task #${tui.state.tasks.indexOf(tuiTask) + 1}: done \u2014 "${task.text}" (${elapsed(tuiTask.elapsed)})`);
|
|
4020
|
+
completed++;
|
|
4021
|
+
} else {
|
|
4022
|
+
tuiTask.status = "failed";
|
|
4023
|
+
tuiTask.error = execResult.error;
|
|
4024
|
+
tuiTask.elapsed = Date.now() - startTime;
|
|
4025
|
+
if (verbose) log.error(`Task #${tui.state.tasks.indexOf(tuiTask) + 1}: failed \u2014 "${task.text}" (${elapsed(tuiTask.elapsed)})${tuiTask.error ? `: ${tuiTask.error}` : ""}`);
|
|
4026
|
+
failed++;
|
|
4027
|
+
}
|
|
4028
|
+
return execResult.dispatchResult;
|
|
4029
|
+
})
|
|
4030
|
+
);
|
|
4031
|
+
issueResults.push(...batchResults);
|
|
4032
|
+
if (!tui.state.model && localInstance.model) {
|
|
4033
|
+
tui.state.model = localInstance.model;
|
|
4034
|
+
}
|
|
4035
|
+
}
|
|
4036
|
+
}
|
|
4037
|
+
results.push(...issueResults);
|
|
4038
|
+
if (!noBranch && branchName && defaultBranch && details) {
|
|
4039
|
+
try {
|
|
4040
|
+
await datasource4.commitAllChanges(
|
|
4041
|
+
`chore: stage uncommitted changes for issue #${details.number}`,
|
|
4042
|
+
issueLifecycleOpts
|
|
4043
|
+
);
|
|
4044
|
+
log.debug(`Staged uncommitted changes for issue #${details.number}`);
|
|
4045
|
+
} catch (err) {
|
|
4046
|
+
log.warn(`Could not commit uncommitted changes for issue #${details.number}: ${log.formatErrorChain(err)}`);
|
|
4047
|
+
}
|
|
4048
|
+
}
|
|
4049
|
+
let commitAgentResult;
|
|
4050
|
+
if (!noBranch && branchName && defaultBranch && details) {
|
|
4051
|
+
try {
|
|
4052
|
+
const branchDiff = await getBranchDiff(defaultBranch, issueCwd);
|
|
4053
|
+
if (branchDiff) {
|
|
4054
|
+
const result = await localCommitAgent.generate({
|
|
4055
|
+
branchDiff,
|
|
4056
|
+
issue: details,
|
|
4057
|
+
taskResults: issueResults,
|
|
4058
|
+
cwd: issueCwd
|
|
4059
|
+
});
|
|
4060
|
+
if (result.success) {
|
|
4061
|
+
commitAgentResult = result;
|
|
4062
|
+
try {
|
|
4063
|
+
await squashBranchCommits(defaultBranch, result.commitMessage, issueCwd);
|
|
4064
|
+
log.debug(`Rewrote commit message for issue #${details.number}`);
|
|
4065
|
+
} catch (err) {
|
|
4066
|
+
log.warn(`Could not rewrite commit message for issue #${details.number}: ${log.formatErrorChain(err)}`);
|
|
4067
|
+
}
|
|
4068
|
+
} else {
|
|
4069
|
+
log.warn(`Commit agent failed for issue #${details.number}: ${result.error}`);
|
|
4070
|
+
}
|
|
4071
|
+
}
|
|
4072
|
+
} catch (err) {
|
|
4073
|
+
log.warn(`Commit agent error for issue #${details.number}: ${log.formatErrorChain(err)}`);
|
|
4074
|
+
}
|
|
4075
|
+
}
|
|
4076
|
+
if (!noBranch && branchName && defaultBranch && details) {
|
|
4077
|
+
try {
|
|
4078
|
+
await datasource4.pushBranch(branchName, issueLifecycleOpts);
|
|
4079
|
+
log.debug(`Pushed branch ${branchName}`);
|
|
4080
|
+
} catch (err) {
|
|
4081
|
+
log.warn(`Could not push branch ${branchName}: ${log.formatErrorChain(err)}`);
|
|
4082
|
+
}
|
|
4083
|
+
try {
|
|
4084
|
+
const prTitle = commitAgentResult?.prTitle || await buildPrTitle(details.title, defaultBranch, issueLifecycleOpts.cwd);
|
|
4085
|
+
const prBody = commitAgentResult?.prDescription || await buildPrBody(
|
|
4086
|
+
details,
|
|
4087
|
+
fileTasks,
|
|
4088
|
+
issueResults,
|
|
4089
|
+
defaultBranch,
|
|
4090
|
+
datasource4.name,
|
|
4091
|
+
issueLifecycleOpts.cwd
|
|
4092
|
+
);
|
|
4093
|
+
const prUrl = await datasource4.createPullRequest(
|
|
4094
|
+
branchName,
|
|
4095
|
+
details.number,
|
|
4096
|
+
prTitle,
|
|
4097
|
+
prBody,
|
|
4098
|
+
issueLifecycleOpts
|
|
4099
|
+
);
|
|
4100
|
+
if (prUrl) {
|
|
4101
|
+
log.success(`Created PR for issue #${details.number}: ${prUrl}`);
|
|
4102
|
+
}
|
|
4103
|
+
} catch (err) {
|
|
4104
|
+
log.warn(`Could not create PR for issue #${details.number}: ${log.formatErrorChain(err)}`);
|
|
4105
|
+
}
|
|
4106
|
+
if (useWorktrees && worktreePath) {
|
|
4107
|
+
try {
|
|
4108
|
+
await removeWorktree(cwd, file);
|
|
4109
|
+
} catch (err) {
|
|
4110
|
+
log.warn(`Could not remove worktree for issue #${details.number}: ${log.formatErrorChain(err)}`);
|
|
4111
|
+
}
|
|
4112
|
+
} else if (!useWorktrees) {
|
|
4113
|
+
try {
|
|
4114
|
+
await datasource4.switchBranch(defaultBranch, lifecycleOpts);
|
|
4115
|
+
log.debug(`Switched back to ${defaultBranch}`);
|
|
4116
|
+
} catch (err) {
|
|
4117
|
+
log.warn(`Could not switch back to ${defaultBranch}: ${log.formatErrorChain(err)}`);
|
|
4118
|
+
}
|
|
4119
|
+
}
|
|
4120
|
+
}
|
|
4121
|
+
if (useWorktrees) {
|
|
4122
|
+
await localExecutor.cleanup();
|
|
4123
|
+
await localPlanner?.cleanup();
|
|
4124
|
+
await localInstance.cleanup();
|
|
4125
|
+
}
|
|
4126
|
+
};
|
|
4127
|
+
if (useWorktrees) {
|
|
4128
|
+
await Promise.all(
|
|
4129
|
+
Array.from(tasksByFile).map(
|
|
4130
|
+
([file, fileTasks]) => processIssueFile(file, fileTasks)
|
|
4131
|
+
)
|
|
4132
|
+
);
|
|
4133
|
+
} else {
|
|
4134
|
+
for (const [file, fileTasks] of tasksByFile) {
|
|
4135
|
+
await processIssueFile(file, fileTasks);
|
|
4136
|
+
}
|
|
4137
|
+
}
|
|
4138
|
+
try {
|
|
4139
|
+
await closeCompletedSpecIssues(taskFiles, results, cwd, source, org, project, workItemType);
|
|
4140
|
+
} catch (err) {
|
|
4141
|
+
log.warn(`Could not close completed spec issues: ${log.formatErrorChain(err)}`);
|
|
4142
|
+
}
|
|
4143
|
+
await commitAgent?.cleanup();
|
|
4144
|
+
await executor?.cleanup();
|
|
4145
|
+
await planner?.cleanup();
|
|
4146
|
+
await instance?.cleanup();
|
|
4147
|
+
tui.state.phase = "done";
|
|
4148
|
+
tui.stop();
|
|
4149
|
+
if (verbose) log.success(`Done \u2014 ${completed} completed, ${failed} failed (${elapsed(Date.now() - tui.state.startTime)})`);
|
|
4150
|
+
return { total: allTasks.length, completed, failed, skipped: 0, results };
|
|
4151
|
+
} catch (err) {
|
|
4152
|
+
tui.stop();
|
|
4153
|
+
throw err;
|
|
4154
|
+
}
|
|
4155
|
+
}
|
|
4156
|
+
async function dryRunMode(issueIds, cwd, source, org, project, workItemType) {
|
|
4157
|
+
if (!source) {
|
|
4158
|
+
log.error("No datasource configured. Use --source or run 'dispatch config' to set up defaults.");
|
|
4159
|
+
return { total: 0, completed: 0, failed: 0, skipped: 0, results: [] };
|
|
4160
|
+
}
|
|
4161
|
+
const datasource4 = getDatasource(source);
|
|
4162
|
+
const fetchOpts = { cwd, org, project, workItemType };
|
|
4163
|
+
const lifecycleOpts = { cwd };
|
|
4164
|
+
let username = "";
|
|
4165
|
+
try {
|
|
4166
|
+
username = await datasource4.getUsername(lifecycleOpts);
|
|
4167
|
+
} catch {
|
|
4168
|
+
}
|
|
4169
|
+
const items = issueIds.length > 0 ? await fetchItemsById(issueIds, datasource4, fetchOpts) : await datasource4.list(fetchOpts);
|
|
4170
|
+
if (items.length === 0) {
|
|
4171
|
+
const label = issueIds.length > 0 ? `issue(s) ${issueIds.join(", ")}` : `datasource: ${source}`;
|
|
4172
|
+
log.warn("No work items found from " + label);
|
|
4173
|
+
return { total: 0, completed: 0, failed: 0, skipped: 0, results: [] };
|
|
4174
|
+
}
|
|
4175
|
+
const { files } = await writeItemsToTempDir(items);
|
|
4176
|
+
const taskFiles = [];
|
|
4177
|
+
for (const file of files) {
|
|
4178
|
+
const tf = await parseTaskFile(file);
|
|
4179
|
+
if (tf.tasks.length > 0) {
|
|
4180
|
+
taskFiles.push(tf);
|
|
4181
|
+
}
|
|
4182
|
+
}
|
|
4183
|
+
const allTasks = taskFiles.flatMap((tf) => tf.tasks);
|
|
4184
|
+
if (allTasks.length === 0) {
|
|
4185
|
+
log.warn("No unchecked tasks found");
|
|
4186
|
+
return { total: 0, completed: 0, failed: 0, skipped: 0, results: [] };
|
|
4187
|
+
}
|
|
4188
|
+
log.info(`Dry run \u2014 ${allTasks.length} task(s) across ${taskFiles.length} file(s):
|
|
4189
|
+
`);
|
|
4190
|
+
for (const task of allTasks) {
|
|
4191
|
+
const parsed = parseIssueFilename(task.file);
|
|
4192
|
+
const details = parsed ? items.find((item) => item.number === parsed.issueId) : void 0;
|
|
4193
|
+
const branchInfo = details ? ` [branch: ${datasource4.buildBranchName(details.number, details.title, username)}]` : "";
|
|
4194
|
+
log.task(allTasks.indexOf(task), allTasks.length, `${task.file}:${task.line} \u2014 ${task.text}${branchInfo}`);
|
|
4195
|
+
}
|
|
4196
|
+
return {
|
|
4197
|
+
total: allTasks.length,
|
|
4198
|
+
completed: 0,
|
|
4199
|
+
failed: 0,
|
|
4200
|
+
skipped: allTasks.length,
|
|
4201
|
+
results: []
|
|
4202
|
+
};
|
|
4203
|
+
}
|
|
4204
|
+
|
|
4205
|
+
// src/orchestrator/runner.ts
|
|
4206
|
+
async function boot9(opts) {
|
|
4207
|
+
const { cwd } = opts;
|
|
4208
|
+
const runner = {
|
|
4209
|
+
orchestrate: (runOpts) => runDispatchPipeline(runOpts, cwd),
|
|
4210
|
+
generateSpecs: (specOpts) => runSpecPipeline(specOpts),
|
|
4211
|
+
async run(opts2) {
|
|
4212
|
+
if (opts2.mode === "spec") {
|
|
4213
|
+
const { mode: _2, ...rest2 } = opts2;
|
|
4214
|
+
return runner.generateSpecs({ ...rest2, cwd });
|
|
4215
|
+
}
|
|
4216
|
+
if (opts2.mode === "fix-tests") {
|
|
4217
|
+
const { runFixTestsPipeline: runFixTestsPipeline2 } = await Promise.resolve().then(() => (init_fix_tests_pipeline(), fix_tests_pipeline_exports));
|
|
4218
|
+
return runFixTestsPipeline2({ cwd, provider: "opencode", serverUrl: void 0, verbose: false, testTimeout: opts2.testTimeout });
|
|
4219
|
+
}
|
|
4220
|
+
const { mode: _, ...rest } = opts2;
|
|
4221
|
+
return runner.orchestrate(rest);
|
|
4222
|
+
},
|
|
4223
|
+
async runFromCli(args) {
|
|
4224
|
+
const m = await resolveCliConfig(args);
|
|
4225
|
+
const prereqFailures = await checkPrereqs({ datasource: m.issueSource });
|
|
4226
|
+
if (prereqFailures.length > 0) {
|
|
4227
|
+
for (const msg of prereqFailures) {
|
|
4228
|
+
log.error(msg);
|
|
4229
|
+
}
|
|
4230
|
+
process.exit(1);
|
|
4231
|
+
}
|
|
4232
|
+
await ensureGitignoreEntry(m.cwd, ".dispatch/worktrees/");
|
|
4233
|
+
const modeFlags = [
|
|
4234
|
+
m.spec !== void 0 && "--spec",
|
|
4235
|
+
m.respec !== void 0 && "--respec",
|
|
4236
|
+
m.fixTests && "--fix-tests"
|
|
4237
|
+
].filter(Boolean);
|
|
4238
|
+
if (modeFlags.length > 1) {
|
|
4239
|
+
log.error(`${modeFlags.join(" and ")} are mutually exclusive`);
|
|
4240
|
+
process.exit(1);
|
|
4241
|
+
}
|
|
4242
|
+
if (m.fixTests && m.issueIds.length > 0) {
|
|
4243
|
+
log.error("--fix-tests cannot be combined with issue IDs");
|
|
4244
|
+
process.exit(1);
|
|
4245
|
+
}
|
|
4246
|
+
if (m.fixTests) {
|
|
4247
|
+
const { runFixTestsPipeline: runFixTestsPipeline2 } = await Promise.resolve().then(() => (init_fix_tests_pipeline(), fix_tests_pipeline_exports));
|
|
4248
|
+
return runFixTestsPipeline2({ cwd: m.cwd, provider: m.provider, serverUrl: m.serverUrl, verbose: m.verbose, testTimeout: m.testTimeout });
|
|
4249
|
+
}
|
|
4250
|
+
if (m.spec) {
|
|
4251
|
+
return this.generateSpecs({
|
|
4252
|
+
issues: m.spec,
|
|
4253
|
+
issueSource: m.issueSource,
|
|
4254
|
+
provider: m.provider,
|
|
4255
|
+
model: m.model,
|
|
4256
|
+
serverUrl: m.serverUrl,
|
|
4257
|
+
cwd: m.cwd,
|
|
4258
|
+
outputDir: m.outputDir,
|
|
4259
|
+
org: m.org,
|
|
4260
|
+
project: m.project,
|
|
4261
|
+
workItemType: m.workItemType,
|
|
4262
|
+
concurrency: m.concurrency,
|
|
4263
|
+
dryRun: m.dryRun
|
|
4264
|
+
});
|
|
4265
|
+
}
|
|
4266
|
+
if (m.respec) {
|
|
4267
|
+
const respecArgs = m.respec;
|
|
4268
|
+
const isEmpty = Array.isArray(respecArgs) && respecArgs.length === 0;
|
|
4269
|
+
let issues;
|
|
4270
|
+
if (isEmpty) {
|
|
4271
|
+
const source = await resolveSource(respecArgs, m.issueSource, m.cwd);
|
|
4272
|
+
if (!source) {
|
|
4273
|
+
process.exit(1);
|
|
4274
|
+
}
|
|
4275
|
+
const datasource4 = getDatasource(source);
|
|
4276
|
+
const existing = await datasource4.list({ cwd: m.cwd, org: m.org, project: m.project, workItemType: m.workItemType });
|
|
4277
|
+
if (existing.length === 0) {
|
|
4278
|
+
log.error("No existing specs found to regenerate");
|
|
4279
|
+
process.exit(1);
|
|
4280
|
+
}
|
|
4281
|
+
const identifiers = existing.map((item) => item.number);
|
|
4282
|
+
const allNumeric = identifiers.every((id) => /^\d+$/.test(id));
|
|
4283
|
+
issues = allNumeric ? identifiers.join(",") : identifiers;
|
|
4284
|
+
const confirmed = await confirmLargeBatch(existing.length);
|
|
4285
|
+
if (!confirmed) {
|
|
4286
|
+
process.exit(0);
|
|
4287
|
+
}
|
|
4288
|
+
} else {
|
|
4289
|
+
issues = respecArgs;
|
|
4290
|
+
}
|
|
4291
|
+
return this.generateSpecs({
|
|
4292
|
+
issues,
|
|
4293
|
+
issueSource: m.issueSource,
|
|
4294
|
+
provider: m.provider,
|
|
4295
|
+
model: m.model,
|
|
4296
|
+
serverUrl: m.serverUrl,
|
|
4297
|
+
cwd: m.cwd,
|
|
4298
|
+
outputDir: m.outputDir,
|
|
4299
|
+
org: m.org,
|
|
4300
|
+
project: m.project,
|
|
4301
|
+
workItemType: m.workItemType,
|
|
4302
|
+
concurrency: m.concurrency,
|
|
4303
|
+
dryRun: m.dryRun
|
|
4304
|
+
});
|
|
4305
|
+
}
|
|
4306
|
+
return this.orchestrate({
|
|
4307
|
+
issueIds: m.issueIds,
|
|
4308
|
+
concurrency: m.concurrency ?? defaultConcurrency(),
|
|
4309
|
+
dryRun: m.dryRun,
|
|
4310
|
+
noPlan: m.noPlan,
|
|
4311
|
+
noBranch: m.noBranch,
|
|
4312
|
+
noWorktree: m.noWorktree,
|
|
4313
|
+
provider: m.provider,
|
|
4314
|
+
model: m.model,
|
|
4315
|
+
serverUrl: m.serverUrl,
|
|
4316
|
+
source: m.issueSource,
|
|
4317
|
+
org: m.org,
|
|
4318
|
+
project: m.project,
|
|
4319
|
+
workItemType: m.workItemType,
|
|
4320
|
+
planTimeout: m.planTimeout,
|
|
4321
|
+
planRetries: m.planRetries,
|
|
4322
|
+
retries: m.retries,
|
|
4323
|
+
force: m.force
|
|
4324
|
+
});
|
|
4325
|
+
}
|
|
4326
|
+
};
|
|
4327
|
+
return runner;
|
|
4328
|
+
}
|
|
4329
|
+
|
|
4330
|
+
// src/cli.ts
|
|
4331
|
+
init_logger();
|
|
4332
|
+
init_cleanup();
|
|
4333
|
+
init_providers();
|
|
4334
|
+
var MAX_CONCURRENCY = 64;
|
|
4335
|
+
var HELP = `
|
|
4336
|
+
dispatch \u2014 AI agent orchestration CLI
|
|
4337
|
+
|
|
4338
|
+
Usage:
|
|
4339
|
+
dispatch [issue-id...] Dispatch specific issues (or all open issues if none given)
|
|
4340
|
+
dispatch --spec <ids> Generate spec files from issues
|
|
4341
|
+
dispatch --spec <glob> Generate specs from local markdown files in the configured datasource
|
|
4342
|
+
dispatch --respec Regenerate all existing specs
|
|
4343
|
+
dispatch --respec <ids> Regenerate specs for specific issues
|
|
4344
|
+
dispatch --respec <glob> Regenerate specs matching a glob pattern
|
|
4345
|
+
dispatch --spec "description" Generate a spec from an inline text description
|
|
4346
|
+
dispatch --fix-tests Run tests and fix failures via AI agent
|
|
4347
|
+
|
|
4348
|
+
Dispatch options:
|
|
4349
|
+
--dry-run List tasks without dispatching (also works with --spec)
|
|
4350
|
+
--no-plan Skip the planner agent, dispatch directly
|
|
4351
|
+
--no-branch Skip branch creation, push, and PR lifecycle
|
|
4352
|
+
--no-worktree Skip git worktree isolation for parallel issues
|
|
4353
|
+
--force Ignore prior run state and re-run all tasks
|
|
4354
|
+
--concurrency <n> Max parallel dispatches (default: min(cpus, freeMB/500), max: 64)
|
|
4355
|
+
--provider <name> Agent backend: ${PROVIDER_NAMES.join(", ")} (default: opencode)
|
|
4356
|
+
--server-url <url> URL of a running provider server
|
|
4357
|
+
--plan-timeout <min> Planning timeout in minutes (default: 10)
|
|
4358
|
+
--retries <n> Retry attempts for all agents (default: 2)
|
|
4359
|
+
--plan-retries <n> Retry attempts after planning timeout (overrides --retries for planner)
|
|
4360
|
+
--test-timeout <min> Test timeout in minutes (default: 5)
|
|
4361
|
+
--cwd <dir> Working directory (default: cwd)
|
|
4362
|
+
|
|
4363
|
+
Spec options:
|
|
4364
|
+
--spec <value> Comma-separated issue numbers or glob pattern for .md files (creates specs in configured datasource)
|
|
4365
|
+
--respec [value] Regenerate specs: issue numbers, glob, or omit to regenerate all existing specs
|
|
4366
|
+
--spec <value> Comma-separated issue numbers, glob pattern for .md files, or inline text description
|
|
4367
|
+
--source <name> Issue source: ${DATASOURCE_NAMES.join(", ")} (optional; auto-detected from git remote, falls back to md)
|
|
4368
|
+
--org <url> Azure DevOps organization URL
|
|
4369
|
+
--project <name> Azure DevOps project name
|
|
4370
|
+
--output-dir <dir> Output directory for specs (default: .dispatch/specs)
|
|
4371
|
+
|
|
4372
|
+
General:
|
|
4373
|
+
--verbose Show detailed debug output for troubleshooting
|
|
4374
|
+
-h, --help Show this help
|
|
4375
|
+
-v, --version Show version
|
|
4376
|
+
|
|
4377
|
+
Config:
|
|
4378
|
+
dispatch config Launch interactive configuration wizard
|
|
4379
|
+
|
|
4380
|
+
Examples:
|
|
4381
|
+
dispatch 14
|
|
4382
|
+
dispatch 14,15,16
|
|
4383
|
+
dispatch 14 15 16
|
|
4384
|
+
dispatch
|
|
4385
|
+
dispatch 14 --dry-run
|
|
4386
|
+
dispatch 14 --provider copilot
|
|
4387
|
+
dispatch --spec 42,43,44
|
|
4388
|
+
dispatch --spec 42,43 --source github --provider copilot
|
|
4389
|
+
dispatch --spec 100,200 --source azdevops --org https://dev.azure.com/myorg --project MyProject
|
|
4390
|
+
dispatch --spec "drafts/*.md"
|
|
4391
|
+
dispatch --spec "drafts/*.md" --source github
|
|
4392
|
+
dispatch --spec "./my-feature.md" --provider copilot
|
|
4393
|
+
dispatch --respec
|
|
4394
|
+
dispatch --respec 42,43,44
|
|
4395
|
+
dispatch --respec "specs/*.md"
|
|
4396
|
+
dispatch --spec "add dark mode toggle to settings page"
|
|
4397
|
+
dispatch --spec "feature A should do x" --provider copilot
|
|
4398
|
+
dispatch config
|
|
4399
|
+
`.trimStart();
|
|
4400
|
+
function parseArgs(argv) {
|
|
4401
|
+
const args = {
|
|
4402
|
+
issueIds: [],
|
|
4403
|
+
dryRun: false,
|
|
4404
|
+
noPlan: false,
|
|
4405
|
+
noBranch: false,
|
|
4406
|
+
noWorktree: false,
|
|
4407
|
+
force: false,
|
|
4408
|
+
provider: "opencode",
|
|
4409
|
+
cwd: process.cwd(),
|
|
4410
|
+
help: false,
|
|
4411
|
+
version: false,
|
|
4412
|
+
verbose: false
|
|
4413
|
+
};
|
|
4414
|
+
const explicitFlags = /* @__PURE__ */ new Set();
|
|
4415
|
+
let i = 0;
|
|
4416
|
+
while (i < argv.length) {
|
|
4417
|
+
const arg = argv[i];
|
|
4418
|
+
if (arg === "--help" || arg === "-h") {
|
|
4419
|
+
args.help = true;
|
|
4420
|
+
explicitFlags.add("help");
|
|
4421
|
+
} else if (arg === "--version" || arg === "-v") {
|
|
4422
|
+
args.version = true;
|
|
4423
|
+
explicitFlags.add("version");
|
|
4424
|
+
} else if (arg === "--dry-run") {
|
|
4425
|
+
args.dryRun = true;
|
|
4426
|
+
explicitFlags.add("dryRun");
|
|
4427
|
+
} else if (arg === "--no-plan") {
|
|
4428
|
+
args.noPlan = true;
|
|
4429
|
+
explicitFlags.add("noPlan");
|
|
4430
|
+
} else if (arg === "--no-branch") {
|
|
4431
|
+
args.noBranch = true;
|
|
4432
|
+
explicitFlags.add("noBranch");
|
|
4433
|
+
} else if (arg === "--no-worktree") {
|
|
4434
|
+
args.noWorktree = true;
|
|
4435
|
+
explicitFlags.add("noWorktree");
|
|
4436
|
+
} else if (arg === "--force") {
|
|
4437
|
+
args.force = true;
|
|
4438
|
+
explicitFlags.add("force");
|
|
4439
|
+
} else if (arg === "--verbose") {
|
|
4440
|
+
args.verbose = true;
|
|
4441
|
+
explicitFlags.add("verbose");
|
|
4442
|
+
} else if (arg === "--spec") {
|
|
4443
|
+
i++;
|
|
4444
|
+
const specs = [];
|
|
4445
|
+
while (i < argv.length && !argv[i].startsWith("--")) {
|
|
4446
|
+
specs.push(argv[i]);
|
|
4447
|
+
i++;
|
|
4448
|
+
}
|
|
4449
|
+
i--;
|
|
4450
|
+
args.spec = specs.length === 1 ? specs[0] : specs;
|
|
4451
|
+
explicitFlags.add("spec");
|
|
4452
|
+
} else if (arg === "--respec") {
|
|
4453
|
+
i++;
|
|
4454
|
+
const respecs = [];
|
|
4455
|
+
while (i < argv.length && !argv[i].startsWith("--")) {
|
|
4456
|
+
respecs.push(argv[i]);
|
|
4457
|
+
i++;
|
|
4458
|
+
}
|
|
4459
|
+
i--;
|
|
4460
|
+
args.respec = respecs.length === 1 ? respecs[0] : respecs;
|
|
4461
|
+
explicitFlags.add("respec");
|
|
4462
|
+
} else if (arg === "--fix-tests") {
|
|
4463
|
+
args.fixTests = true;
|
|
4464
|
+
explicitFlags.add("fixTests");
|
|
4465
|
+
} else if (arg === "--source") {
|
|
4466
|
+
i++;
|
|
4467
|
+
const val = argv[i];
|
|
4468
|
+
if (!DATASOURCE_NAMES.includes(val)) {
|
|
4469
|
+
log.error(
|
|
4470
|
+
`Unknown source "${val}". Available: ${DATASOURCE_NAMES.join(", ")}`
|
|
4471
|
+
);
|
|
4472
|
+
process.exit(1);
|
|
4473
|
+
}
|
|
4474
|
+
args.issueSource = val;
|
|
4475
|
+
explicitFlags.add("issueSource");
|
|
4476
|
+
} else if (arg === "--org") {
|
|
4477
|
+
i++;
|
|
4478
|
+
args.org = argv[i];
|
|
4479
|
+
explicitFlags.add("org");
|
|
4480
|
+
} else if (arg === "--project") {
|
|
4481
|
+
i++;
|
|
4482
|
+
args.project = argv[i];
|
|
4483
|
+
explicitFlags.add("project");
|
|
4484
|
+
} else if (arg === "--output-dir") {
|
|
4485
|
+
i++;
|
|
4486
|
+
args.outputDir = resolve(argv[i]);
|
|
4487
|
+
explicitFlags.add("outputDir");
|
|
4488
|
+
} else if (arg === "--concurrency") {
|
|
4489
|
+
i++;
|
|
4490
|
+
const val = parseInt(argv[i], 10);
|
|
4491
|
+
if (isNaN(val) || val < 1) {
|
|
4492
|
+
log.error("--concurrency must be a positive integer");
|
|
4493
|
+
process.exit(1);
|
|
4494
|
+
}
|
|
4495
|
+
if (val > MAX_CONCURRENCY) {
|
|
4496
|
+
log.error(`--concurrency must not exceed ${MAX_CONCURRENCY}`);
|
|
4497
|
+
process.exit(1);
|
|
4498
|
+
}
|
|
4499
|
+
args.concurrency = val;
|
|
4500
|
+
explicitFlags.add("concurrency");
|
|
4501
|
+
} else if (arg === "--provider") {
|
|
4502
|
+
i++;
|
|
4503
|
+
const val = argv[i];
|
|
4504
|
+
if (!PROVIDER_NAMES.includes(val)) {
|
|
4505
|
+
log.error(`Unknown provider "${val}". Available: ${PROVIDER_NAMES.join(", ")}`);
|
|
4506
|
+
process.exit(1);
|
|
4507
|
+
}
|
|
4508
|
+
args.provider = val;
|
|
4509
|
+
explicitFlags.add("provider");
|
|
4510
|
+
} else if (arg === "--server-url") {
|
|
4511
|
+
i++;
|
|
4512
|
+
args.serverUrl = argv[i];
|
|
4513
|
+
explicitFlags.add("serverUrl");
|
|
4514
|
+
} else if (arg === "--plan-timeout") {
|
|
4515
|
+
i++;
|
|
4516
|
+
const val = parseFloat(argv[i]);
|
|
4517
|
+
if (isNaN(val) || val <= 0) {
|
|
4518
|
+
log.error("--plan-timeout must be a positive number (minutes)");
|
|
4519
|
+
process.exit(1);
|
|
4520
|
+
}
|
|
4521
|
+
args.planTimeout = val;
|
|
4522
|
+
explicitFlags.add("planTimeout");
|
|
4523
|
+
} else if (arg === "--retries") {
|
|
4524
|
+
i++;
|
|
4525
|
+
const val = parseInt(argv[i], 10);
|
|
4526
|
+
if (isNaN(val) || val < 0) {
|
|
4527
|
+
log.error("--retries must be a non-negative integer");
|
|
4528
|
+
process.exit(1);
|
|
4529
|
+
}
|
|
4530
|
+
args.retries = val;
|
|
4531
|
+
explicitFlags.add("retries");
|
|
4532
|
+
} else if (arg === "--plan-retries") {
|
|
4533
|
+
i++;
|
|
4534
|
+
const val = parseInt(argv[i], 10);
|
|
4535
|
+
if (isNaN(val) || val < 0) {
|
|
4536
|
+
log.error("--plan-retries must be a non-negative integer");
|
|
4537
|
+
process.exit(1);
|
|
4538
|
+
}
|
|
4539
|
+
args.planRetries = val;
|
|
4540
|
+
explicitFlags.add("planRetries");
|
|
4541
|
+
} else if (arg === "--test-timeout") {
|
|
4542
|
+
i++;
|
|
4543
|
+
const val = parseFloat(argv[i]);
|
|
4544
|
+
if (isNaN(val) || val <= 0) {
|
|
4545
|
+
log.error("--test-timeout must be a positive number (minutes)");
|
|
4546
|
+
process.exit(1);
|
|
4547
|
+
}
|
|
4548
|
+
args.testTimeout = val;
|
|
4549
|
+
explicitFlags.add("testTimeout");
|
|
4550
|
+
} else if (arg === "--cwd") {
|
|
4551
|
+
i++;
|
|
4552
|
+
args.cwd = resolve(argv[i]);
|
|
4553
|
+
explicitFlags.add("cwd");
|
|
4554
|
+
} else if (!arg.startsWith("-")) {
|
|
4555
|
+
args.issueIds.push(arg);
|
|
4556
|
+
} else {
|
|
4557
|
+
log.error(`Unknown option: ${arg}`);
|
|
4558
|
+
process.exit(1);
|
|
4559
|
+
}
|
|
4560
|
+
i++;
|
|
4561
|
+
}
|
|
4562
|
+
return [args, explicitFlags];
|
|
4563
|
+
}
|
|
4564
|
+
async function main() {
|
|
4565
|
+
const rawArgv = process.argv.slice(2);
|
|
4566
|
+
if (rawArgv[0] === "config") {
|
|
4567
|
+
let cwd = process.cwd();
|
|
4568
|
+
for (let i = 1; i < rawArgv.length; i++) {
|
|
4569
|
+
if (rawArgv[i] === "--cwd" && i + 1 < rawArgv.length) {
|
|
4570
|
+
cwd = resolve(rawArgv[i + 1]);
|
|
4571
|
+
break;
|
|
4572
|
+
}
|
|
4573
|
+
}
|
|
4574
|
+
const configDir = join11(cwd, ".dispatch");
|
|
4575
|
+
await handleConfigCommand(rawArgv.slice(1), configDir);
|
|
4576
|
+
process.exit(0);
|
|
4577
|
+
}
|
|
4578
|
+
const [args, explicitFlags] = parseArgs(rawArgv);
|
|
4579
|
+
log.verbose = args.verbose;
|
|
4580
|
+
process.on("SIGINT", async () => {
|
|
4581
|
+
log.debug("Received SIGINT, cleaning up...");
|
|
4582
|
+
await runCleanup();
|
|
4583
|
+
process.exit(130);
|
|
4584
|
+
});
|
|
4585
|
+
process.on("SIGTERM", async () => {
|
|
4586
|
+
log.debug("Received SIGTERM, cleaning up...");
|
|
4587
|
+
await runCleanup();
|
|
4588
|
+
process.exit(143);
|
|
4589
|
+
});
|
|
4590
|
+
if (args.help) {
|
|
4591
|
+
console.log(HELP);
|
|
4592
|
+
process.exit(0);
|
|
4593
|
+
}
|
|
4594
|
+
if (args.version) {
|
|
4595
|
+
console.log(`dispatch v${"0.0.1"}`);
|
|
4596
|
+
process.exit(0);
|
|
4597
|
+
}
|
|
4598
|
+
const orchestrator = await boot9({ cwd: args.cwd });
|
|
4599
|
+
const { help: _, version: __, ...rawArgs } = args;
|
|
4600
|
+
const summary = await orchestrator.runFromCli({ ...rawArgs, explicitFlags });
|
|
4601
|
+
const failed = "failed" in summary ? summary.failed : "success" in summary && !summary.success ? 1 : 0;
|
|
4602
|
+
process.exit(failed > 0 ? 1 : 0);
|
|
4603
|
+
}
|
|
4604
|
+
main().catch(async (err) => {
|
|
4605
|
+
log.error(log.formatErrorChain(err));
|
|
4606
|
+
await runCleanup();
|
|
4607
|
+
process.exit(1);
|
|
4608
|
+
});
|
|
4609
|
+
export {
|
|
4610
|
+
MAX_CONCURRENCY,
|
|
4611
|
+
parseArgs
|
|
4612
|
+
};
|
|
4613
|
+
//# sourceMappingURL=cli.js.map
|