@tagma/sdk 0.4.2 → 0.4.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/dist/adapters/stdin-approval.d.ts +6 -0
- package/dist/adapters/stdin-approval.d.ts.map +1 -0
- package/dist/adapters/stdin-approval.js +90 -0
- package/dist/adapters/stdin-approval.js.map +1 -0
- package/dist/adapters/websocket-approval.d.ts +28 -0
- package/dist/adapters/websocket-approval.d.ts.map +1 -0
- package/dist/adapters/websocket-approval.js +145 -0
- package/dist/adapters/websocket-approval.js.map +1 -0
- package/dist/approval.d.ts +13 -0
- package/dist/approval.d.ts.map +1 -0
- package/dist/approval.js +91 -0
- package/dist/approval.js.map +1 -0
- package/dist/bootstrap.d.ts +2 -0
- package/dist/bootstrap.d.ts.map +1 -0
- package/dist/bootstrap.js +30 -0
- package/dist/bootstrap.js.map +1 -0
- package/dist/completions/exit-code.d.ts +3 -0
- package/dist/completions/exit-code.d.ts.map +1 -0
- package/dist/completions/exit-code.js +25 -0
- package/dist/completions/exit-code.js.map +1 -0
- package/dist/completions/file-exists.d.ts +3 -0
- package/dist/completions/file-exists.d.ts.map +1 -0
- package/dist/completions/file-exists.js +58 -0
- package/dist/completions/file-exists.js.map +1 -0
- package/dist/completions/output-check.d.ts +3 -0
- package/dist/completions/output-check.d.ts.map +1 -0
- package/dist/completions/output-check.js +81 -0
- package/dist/completions/output-check.js.map +1 -0
- package/dist/config-ops.d.ts +55 -0
- package/dist/config-ops.d.ts.map +1 -0
- package/dist/config-ops.js +258 -0
- package/dist/config-ops.js.map +1 -0
- package/dist/dag.d.ts +46 -0
- package/dist/dag.d.ts.map +1 -0
- package/dist/dag.js +193 -0
- package/dist/dag.js.map +1 -0
- package/dist/drivers/claude-code.d.ts +3 -0
- package/dist/drivers/claude-code.d.ts.map +1 -0
- package/dist/drivers/claude-code.js +210 -0
- package/dist/drivers/claude-code.js.map +1 -0
- package/dist/engine.d.ts +89 -0
- package/dist/engine.d.ts.map +1 -0
- package/dist/engine.js +815 -0
- package/dist/engine.js.map +1 -0
- package/dist/hooks.d.ts +73 -0
- package/dist/hooks.d.ts.map +1 -0
- package/dist/hooks.js +103 -0
- package/dist/hooks.js.map +1 -0
- package/dist/logger.d.ts +61 -0
- package/dist/logger.d.ts.map +1 -0
- package/dist/logger.js +151 -0
- package/dist/logger.js.map +1 -0
- package/dist/middlewares/static-context.d.ts +3 -0
- package/dist/middlewares/static-context.d.ts.map +1 -0
- package/dist/middlewares/static-context.js +36 -0
- package/dist/middlewares/static-context.js.map +1 -0
- package/dist/pipeline-runner.d.ts +50 -0
- package/dist/pipeline-runner.d.ts.map +1 -0
- package/dist/pipeline-runner.js +139 -0
- package/dist/pipeline-runner.js.map +1 -0
- package/dist/registry.d.ts +43 -0
- package/dist/registry.d.ts.map +1 -0
- package/dist/registry.js +182 -0
- package/dist/registry.js.map +1 -0
- package/dist/runner.d.ts +19 -0
- package/dist/runner.d.ts.map +1 -0
- package/dist/runner.js +364 -0
- package/dist/runner.js.map +1 -0
- package/dist/schema.d.ts +27 -0
- package/dist/schema.d.ts.map +1 -0
- package/dist/schema.js +373 -0
- package/dist/schema.js.map +1 -0
- package/dist/sdk.d.ts +27 -0
- package/dist/sdk.d.ts.map +1 -0
- package/dist/sdk.js +33 -0
- package/dist/sdk.js.map +1 -0
- package/dist/templates.d.ts +20 -0
- package/dist/templates.d.ts.map +1 -0
- package/dist/templates.js +93 -0
- package/dist/templates.js.map +1 -0
- package/dist/triggers/file.d.ts +3 -0
- package/dist/triggers/file.d.ts.map +1 -0
- package/dist/triggers/file.js +123 -0
- package/dist/triggers/file.js.map +1 -0
- package/dist/triggers/manual.d.ts +3 -0
- package/dist/triggers/manual.d.ts.map +1 -0
- package/dist/triggers/manual.js +73 -0
- package/dist/triggers/manual.js.map +1 -0
- package/dist/types.d.ts +4 -0
- package/dist/types.d.ts.map +1 -0
- package/dist/types.js +14 -0
- package/dist/types.js.map +1 -0
- package/dist/utils.d.ts +14 -0
- package/dist/utils.d.ts.map +1 -0
- package/dist/utils.js +138 -0
- package/dist/utils.js.map +1 -0
- package/dist/validate-raw.d.ts +26 -0
- package/dist/validate-raw.d.ts.map +1 -0
- package/dist/validate-raw.js +260 -0
- package/dist/validate-raw.js.map +1 -0
- package/package.json +26 -17
- package/src/approval.ts +5 -3
- package/src/registry.ts +214 -214
- package/scripts/preinstall.js +0 -38
package/dist/engine.js
ADDED
|
@@ -0,0 +1,815 @@
|
|
|
1
|
+
import { resolve, dirname } from 'path';
|
|
2
|
+
import { mkdir, readdir, rm } from 'fs/promises';
|
|
3
|
+
import { buildDag } from './dag';
|
|
4
|
+
import { getHandler, hasHandler, loadPlugins } from './registry';
|
|
5
|
+
import { runSpawn, runCommand } from './runner';
|
|
6
|
+
import { parseDuration, nowISO, generateRunId, validatePath } from './utils';
|
|
7
|
+
import { executeHook, buildPipelineStartContext, buildTaskContext, buildPipelineCompleteContext, buildPipelineErrorContext, } from './hooks';
|
|
8
|
+
import { Logger, tailLines, clip } from './logger';
|
|
9
|
+
import { InMemoryApprovalGateway } from './approval';
|
|
10
|
+
// ═══ A7: Typed trigger errors ═══
|
|
11
|
+
// Replace string-matching on error messages with structured error types so
|
|
12
|
+
// coincidental substrings don't cause misclassification.
|
|
13
|
+
export class TriggerBlockedError extends Error {
|
|
14
|
+
code = 'TRIGGER_BLOCKED';
|
|
15
|
+
constructor(message) {
|
|
16
|
+
super(message);
|
|
17
|
+
this.name = 'TriggerBlockedError';
|
|
18
|
+
}
|
|
19
|
+
}
|
|
20
|
+
export class TriggerTimeoutError extends Error {
|
|
21
|
+
code = 'TRIGGER_TIMEOUT';
|
|
22
|
+
constructor(message) {
|
|
23
|
+
super(message);
|
|
24
|
+
this.name = 'TriggerTimeoutError';
|
|
25
|
+
}
|
|
26
|
+
}
|
|
27
|
+
// ═══ Preflight Validation ═══
|
|
28
|
+
function preflight(config, dag) {
|
|
29
|
+
const errors = [];
|
|
30
|
+
for (const [, node] of dag.nodes) {
|
|
31
|
+
const task = node.task;
|
|
32
|
+
const track = node.track;
|
|
33
|
+
const driverName = task.driver ?? track.driver ?? config.driver ?? 'claude-code';
|
|
34
|
+
// Pure command tasks don't use a driver — skip driver registration check.
|
|
35
|
+
const isCommandOnly = task.command && !task.prompt;
|
|
36
|
+
if (!isCommandOnly && !hasHandler('drivers', driverName)) {
|
|
37
|
+
errors.push(`Task "${node.taskId}": driver "${driverName}" not registered`);
|
|
38
|
+
}
|
|
39
|
+
if (task.trigger && !hasHandler('triggers', task.trigger.type)) {
|
|
40
|
+
errors.push(`Task "${node.taskId}": trigger type "${task.trigger.type}" not registered`);
|
|
41
|
+
}
|
|
42
|
+
if (task.completion && !hasHandler('completions', task.completion.type)) {
|
|
43
|
+
errors.push(`Task "${node.taskId}": completion type "${task.completion.type}" not registered`);
|
|
44
|
+
}
|
|
45
|
+
const mws = task.middlewares ?? track.middlewares ?? [];
|
|
46
|
+
for (const mw of mws) {
|
|
47
|
+
if (!hasHandler('middlewares', mw.type)) {
|
|
48
|
+
errors.push(`Task "${node.taskId}": middleware type "${mw.type}" not registered`);
|
|
49
|
+
}
|
|
50
|
+
}
|
|
51
|
+
if (task.continue_from && hasHandler('drivers', driverName)) {
|
|
52
|
+
const driver = getHandler('drivers', driverName);
|
|
53
|
+
if (!driver.capabilities.sessionResume) {
|
|
54
|
+
const upstreamId = resolveRefInDag(dag, task.continue_from, track.id);
|
|
55
|
+
if (upstreamId) {
|
|
56
|
+
const upstream = dag.nodes.get(upstreamId);
|
|
57
|
+
if (upstream) {
|
|
58
|
+
// A handoff is possible via session resume (already ruled out above),
|
|
59
|
+
// an output file, OR in-memory text injection through normalizedMap
|
|
60
|
+
// (when the upstream driver implements parseResult and returns normalizedOutput).
|
|
61
|
+
const upstreamDriverName = upstream.task.driver ?? upstream.track.driver
|
|
62
|
+
?? config.driver ?? 'claude-code';
|
|
63
|
+
const upstreamDriver = hasHandler('drivers', upstreamDriverName)
|
|
64
|
+
? getHandler('drivers', upstreamDriverName)
|
|
65
|
+
: null;
|
|
66
|
+
const canNormalize = typeof upstreamDriver?.parseResult === 'function';
|
|
67
|
+
if (!upstream.task.output && !canNormalize) {
|
|
68
|
+
errors.push(`Task "${node.taskId}" uses continue_from: "${task.continue_from}", ` +
|
|
69
|
+
`but upstream task "${upstreamId}" has no "output" field and its driver ` +
|
|
70
|
+
`does not implement parseResult for text-injection handoff. ` +
|
|
71
|
+
`Add output to the upstream task, use a driver with parseResult, or remove continue_from.`);
|
|
72
|
+
}
|
|
73
|
+
}
|
|
74
|
+
}
|
|
75
|
+
}
|
|
76
|
+
}
|
|
77
|
+
}
|
|
78
|
+
if (errors.length > 0) {
|
|
79
|
+
throw new Error(`Preflight validation failed:\n - ${errors.join('\n - ')}`);
|
|
80
|
+
}
|
|
81
|
+
}
|
|
82
|
+
function resolveRefInDag(dag, ref, fromTrackId) {
|
|
83
|
+
// Already fully qualified
|
|
84
|
+
if (dag.nodes.has(ref))
|
|
85
|
+
return ref;
|
|
86
|
+
// Same-track match (preferred)
|
|
87
|
+
const sameTrack = `${fromTrackId}.${ref}`;
|
|
88
|
+
if (dag.nodes.has(sameTrack))
|
|
89
|
+
return sameTrack;
|
|
90
|
+
// Cross-track bare name lookup — must be unambiguous (aligned with buildDag's resolveRef)
|
|
91
|
+
let match = null;
|
|
92
|
+
for (const [id] of dag.nodes) {
|
|
93
|
+
if (id.endsWith(`.${ref}`)) {
|
|
94
|
+
if (match !== null) {
|
|
95
|
+
// Ambiguous: multiple tasks share the bare name across tracks
|
|
96
|
+
return null;
|
|
97
|
+
}
|
|
98
|
+
match = id;
|
|
99
|
+
}
|
|
100
|
+
}
|
|
101
|
+
return match;
|
|
102
|
+
}
|
|
103
|
+
// Poll interval when no tasks are in-flight but non-terminal tasks remain
|
|
104
|
+
// (e.g. tasks waiting on a file or manual trigger).
|
|
105
|
+
const POLL_INTERVAL_MS = 50;
|
|
106
|
+
// R15: cap on each normalized-output entry stored in normalizedMap so a
|
|
107
|
+
// runaway parseResult can't accumulate hundreds of MB across tasks. 1 MB
|
|
108
|
+
// is generous for any text-context handoff between AI tasks.
|
|
109
|
+
const MAX_NORMALIZED_BYTES = 1_000_000;
|
|
110
|
+
export async function runPipeline(config, workDir, options = {}) {
|
|
111
|
+
const approvalGateway = options.approvalGateway ?? new InMemoryApprovalGateway();
|
|
112
|
+
const maxLogRuns = options.maxLogRuns ?? 20;
|
|
113
|
+
// Load any plugins declared in the pipeline config before preflight so that
|
|
114
|
+
// drivers, completions, and middlewares referenced in YAML are registered.
|
|
115
|
+
// Hosts that pre-load plugins from a custom path (e.g. the editor loading
|
|
116
|
+
// from the user's workspace node_modules) pass skipPluginLoading: true so
|
|
117
|
+
// we don't re-resolve via Node's cwd-based default import.
|
|
118
|
+
if (!options.skipPluginLoading && config.plugins?.length) {
|
|
119
|
+
await loadPlugins(config.plugins);
|
|
120
|
+
}
|
|
121
|
+
const dag = buildDag(config);
|
|
122
|
+
const runId = options.runId ?? generateRunId();
|
|
123
|
+
preflight(config, dag);
|
|
124
|
+
const startedAt = nowISO();
|
|
125
|
+
const pipelineInfo = { name: config.name, run_id: runId, started_at: startedAt };
|
|
126
|
+
// Forward every structured log line to subscribers as task_log events.
|
|
127
|
+
// Reading options.onEvent inside the callback (vs. capturing it once) keeps
|
|
128
|
+
// the SDK behavior correct if callers pass a fresh onEvent on each run.
|
|
129
|
+
const log = new Logger(workDir, runId, (record) => {
|
|
130
|
+
options.onEvent?.({
|
|
131
|
+
type: 'task_log',
|
|
132
|
+
runId,
|
|
133
|
+
taskId: record.taskId,
|
|
134
|
+
level: record.level,
|
|
135
|
+
timestamp: record.timestamp,
|
|
136
|
+
text: record.text,
|
|
137
|
+
});
|
|
138
|
+
});
|
|
139
|
+
try {
|
|
140
|
+
log.info('[pipeline]', `start "${config.name}" run_id=${runId}`);
|
|
141
|
+
// File-only: dump the resolved pipeline shape + DAG topology for post-mortem.
|
|
142
|
+
log.section('Pipeline configuration');
|
|
143
|
+
log.quiet(`name: ${config.name}`);
|
|
144
|
+
log.quiet(`driver: ${config.driver ?? '(default: claude-code)'}`);
|
|
145
|
+
log.quiet(`timeout: ${config.timeout ?? '(none)'}`);
|
|
146
|
+
log.quiet(`tracks: ${config.tracks.length}`);
|
|
147
|
+
log.quiet(`tasks (total): ${dag.nodes.size}`);
|
|
148
|
+
log.quiet(`plugins: ${(config.plugins ?? []).join(', ') || '(none)'}`);
|
|
149
|
+
log.quiet(`hooks: ${config.hooks ? Object.keys(config.hooks).join(', ') || '(none)' : '(none)'}`);
|
|
150
|
+
log.section('DAG topology');
|
|
151
|
+
for (const [id, node] of dag.nodes) {
|
|
152
|
+
const deps = node.dependsOn.length ? node.dependsOn.join(', ') : '(root)';
|
|
153
|
+
const kind = node.task.prompt ? 'ai' : 'cmd';
|
|
154
|
+
log.quiet(` • ${id} [${kind}] track=${node.track.id} deps=[${deps}]`);
|
|
155
|
+
}
|
|
156
|
+
log.quiet('');
|
|
157
|
+
// Initialize states (before hook, so we can return them even if blocked)
|
|
158
|
+
const states = new Map();
|
|
159
|
+
for (const [id, node] of dag.nodes) {
|
|
160
|
+
states.set(id, {
|
|
161
|
+
config: node.task,
|
|
162
|
+
trackConfig: node.track,
|
|
163
|
+
status: 'idle',
|
|
164
|
+
result: null,
|
|
165
|
+
startedAt: null,
|
|
166
|
+
finishedAt: null,
|
|
167
|
+
});
|
|
168
|
+
}
|
|
169
|
+
// Pipeline start hook (gate)
|
|
170
|
+
const startHook = await executeHook(config.hooks, 'pipeline_start', buildPipelineStartContext(pipelineInfo), workDir);
|
|
171
|
+
if (!startHook.allowed) {
|
|
172
|
+
console.error(`Pipeline blocked by pipeline_start hook (exit code ${startHook.exitCode})`);
|
|
173
|
+
await executeHook(config.hooks, 'pipeline_error', buildPipelineErrorContext(pipelineInfo, 'pipeline_blocked', 'pipeline_blocked'), workDir);
|
|
174
|
+
// All tasks stay idle — pipeline never started
|
|
175
|
+
return {
|
|
176
|
+
success: false,
|
|
177
|
+
runId,
|
|
178
|
+
logPath: log.path,
|
|
179
|
+
summary: { total: dag.nodes.size, success: 0, failed: 0, skipped: 0, timeout: 0, blocked: 0 },
|
|
180
|
+
states: freezeStates(states),
|
|
181
|
+
};
|
|
182
|
+
}
|
|
183
|
+
// Pipeline approved — transition all tasks to waiting
|
|
184
|
+
for (const [, state] of states) {
|
|
185
|
+
state.status = 'waiting';
|
|
186
|
+
}
|
|
187
|
+
// Include a full states snapshot so listeners can initialize their mirrors without missing events
|
|
188
|
+
const statesSnapshot = new Map([...states.entries()].map(([id, s]) => [id, { ...s }]));
|
|
189
|
+
options.onEvent?.({ type: 'pipeline_start', runId, states: statesSnapshot });
|
|
190
|
+
const sessionMap = new Map();
|
|
191
|
+
const outputMap = new Map();
|
|
192
|
+
const normalizedMap = new Map();
|
|
193
|
+
// Pipeline timeout
|
|
194
|
+
const pipelineTimeoutMs = config.timeout ? parseDuration(config.timeout) : 0;
|
|
195
|
+
let pipelineAborted = false;
|
|
196
|
+
const abortController = new AbortController();
|
|
197
|
+
let pipelineTimer = null;
|
|
198
|
+
if (pipelineTimeoutMs > 0) {
|
|
199
|
+
pipelineTimer = setTimeout(() => {
|
|
200
|
+
pipelineAborted = true;
|
|
201
|
+
abortController.abort();
|
|
202
|
+
}, pipelineTimeoutMs);
|
|
203
|
+
}
|
|
204
|
+
// When the pipeline is aborted (timeout, external shutdown), drain all
|
|
205
|
+
// pending approvals so waiting triggers unblock immediately.
|
|
206
|
+
abortController.signal.addEventListener('abort', () => {
|
|
207
|
+
approvalGateway.abortAll('pipeline aborted');
|
|
208
|
+
});
|
|
209
|
+
// Wire external cancel signal into the internal abort controller.
|
|
210
|
+
const externalAbortHandler = () => {
|
|
211
|
+
pipelineAborted = true;
|
|
212
|
+
abortController.abort();
|
|
213
|
+
};
|
|
214
|
+
if (options.signal) {
|
|
215
|
+
if (options.signal.aborted) {
|
|
216
|
+
externalAbortHandler();
|
|
217
|
+
}
|
|
218
|
+
else {
|
|
219
|
+
options.signal.addEventListener('abort', externalAbortHandler, { once: true });
|
|
220
|
+
}
|
|
221
|
+
}
|
|
222
|
+
// ── Helpers ──
|
|
223
|
+
function emit(event) {
|
|
224
|
+
options.onEvent?.(event);
|
|
225
|
+
}
|
|
226
|
+
function setTaskStatus(taskId, newStatus) {
|
|
227
|
+
const state = states.get(taskId);
|
|
228
|
+
// Terminal lock: once a task reaches a terminal state it must not be
|
|
229
|
+
// re-transitioned. This prevents stop_all from marking running tasks as
|
|
230
|
+
// skipped and then having their in-flight processTask promise overwrite
|
|
231
|
+
// that with success/failed, producing an invalid double transition.
|
|
232
|
+
if (isTerminal(state.status))
|
|
233
|
+
return;
|
|
234
|
+
const prevStatus = state.status;
|
|
235
|
+
state.status = newStatus;
|
|
236
|
+
// Snapshot state at emit time — result and finishedAt must be set before calling this for terminal statuses
|
|
237
|
+
const snapshot = {
|
|
238
|
+
config: state.config,
|
|
239
|
+
trackConfig: state.trackConfig,
|
|
240
|
+
status: state.status,
|
|
241
|
+
result: state.result,
|
|
242
|
+
startedAt: state.startedAt,
|
|
243
|
+
finishedAt: state.finishedAt,
|
|
244
|
+
};
|
|
245
|
+
emit({ type: 'task_status_change', taskId, status: newStatus, prevStatus, runId, state: snapshot });
|
|
246
|
+
}
|
|
247
|
+
function getOnFailure(taskId) {
|
|
248
|
+
return dag.nodes.get(taskId)?.track.on_failure ?? 'skip_downstream';
|
|
249
|
+
}
|
|
250
|
+
function isDependencySatisfied(depId) {
|
|
251
|
+
const depState = states.get(depId);
|
|
252
|
+
if (!depState)
|
|
253
|
+
return 'skip';
|
|
254
|
+
switch (depState.status) {
|
|
255
|
+
case 'success': return 'satisfied';
|
|
256
|
+
case 'skipped': return 'skip';
|
|
257
|
+
case 'failed':
|
|
258
|
+
case 'timeout':
|
|
259
|
+
case 'blocked':
|
|
260
|
+
return getOnFailure(depId) === 'ignore' ? 'satisfied' : 'skip';
|
|
261
|
+
default: return 'unsatisfied';
|
|
262
|
+
}
|
|
263
|
+
}
|
|
264
|
+
/**
|
|
265
|
+
* H3: "stop_all" historically only stopped tasks within the same track,
|
|
266
|
+
* which contradicted both its name and user expectations. It now stops
|
|
267
|
+
* the **entire pipeline**:
|
|
268
|
+
* - In-flight tasks are signalled via the shared abort controller so
|
|
269
|
+
* drivers / runner.ts can cancel cooperatively (returning
|
|
270
|
+
* `failureKind: 'timeout'`).
|
|
271
|
+
* - Still-waiting tasks across every track are immediately marked
|
|
272
|
+
* skipped so the run completes promptly.
|
|
273
|
+
* The terminal lock in setTaskStatus prevents any later re-transition
|
|
274
|
+
* should a completed running task try to overwrite the skipped state.
|
|
275
|
+
*/
|
|
276
|
+
function applyStopAll(_failedTrackId) {
|
|
277
|
+
pipelineAborted = true;
|
|
278
|
+
abortController.abort();
|
|
279
|
+
for (const [id, state] of states) {
|
|
280
|
+
if (state.status === 'waiting') {
|
|
281
|
+
state.finishedAt = nowISO();
|
|
282
|
+
setTaskStatus(id, 'skipped');
|
|
283
|
+
}
|
|
284
|
+
}
|
|
285
|
+
}
|
|
286
|
+
function buildTaskInfoObj(taskId) {
|
|
287
|
+
const state = states.get(taskId);
|
|
288
|
+
return {
|
|
289
|
+
id: taskId,
|
|
290
|
+
name: state.config.name,
|
|
291
|
+
type: state.config.prompt ? 'ai' : 'command',
|
|
292
|
+
status: state.status,
|
|
293
|
+
exit_code: state.result?.exitCode ?? null,
|
|
294
|
+
duration_ms: state.result?.durationMs ?? null,
|
|
295
|
+
output_path: state.result?.outputPath ?? null,
|
|
296
|
+
stderr_path: state.result?.stderrPath ?? null,
|
|
297
|
+
session_id: state.result?.sessionId ?? null,
|
|
298
|
+
started_at: state.startedAt,
|
|
299
|
+
finished_at: state.finishedAt,
|
|
300
|
+
};
|
|
301
|
+
}
|
|
302
|
+
function trackInfoOf(taskId) {
|
|
303
|
+
const node = dag.nodes.get(taskId);
|
|
304
|
+
return { id: node.track.id, name: node.track.name };
|
|
305
|
+
}
|
|
306
|
+
async function fireHook(taskId, event) {
|
|
307
|
+
await executeHook(config.hooks, event, buildTaskContext(event, pipelineInfo, trackInfoOf(taskId), buildTaskInfoObj(taskId)), workDir, abortController.signal);
|
|
308
|
+
}
|
|
309
|
+
// ── Process a single task ──
|
|
310
|
+
async function processTask(taskId) {
|
|
311
|
+
const state = states.get(taskId);
|
|
312
|
+
const node = dag.nodes.get(taskId);
|
|
313
|
+
const task = node.task;
|
|
314
|
+
const track = node.track;
|
|
315
|
+
log.section(`Task ${taskId}`, taskId);
|
|
316
|
+
log.debug(`[task:${taskId}]`, `type=${task.prompt ? 'ai' : 'cmd'} track=${track.id} deps=[${node.dependsOn.join(', ') || '(root)'}]`);
|
|
317
|
+
// 1. Check dependencies
|
|
318
|
+
for (const depId of node.dependsOn) {
|
|
319
|
+
const result = isDependencySatisfied(depId);
|
|
320
|
+
if (result === 'skip') {
|
|
321
|
+
const depStatus = states.get(depId)?.status ?? 'unknown';
|
|
322
|
+
log.debug(`[task:${taskId}]`, `skipped (upstream "${depId}" status=${depStatus})`);
|
|
323
|
+
state.finishedAt = nowISO();
|
|
324
|
+
setTaskStatus(taskId, 'skipped');
|
|
325
|
+
return;
|
|
326
|
+
}
|
|
327
|
+
if (result === 'unsatisfied')
|
|
328
|
+
return; // still waiting
|
|
329
|
+
}
|
|
330
|
+
// 2. Check trigger
|
|
331
|
+
if (task.trigger) {
|
|
332
|
+
log.debug(`[task:${taskId}]`, `trigger wait: type=${task.trigger.type} ${JSON.stringify(task.trigger)}`);
|
|
333
|
+
try {
|
|
334
|
+
const triggerPlugin = getHandler('triggers', task.trigger.type);
|
|
335
|
+
// R6: race the plugin's watch() against the pipeline's abort signal.
|
|
336
|
+
// Third-party triggers may forget to wire up ctx.signal — without
|
|
337
|
+
// this race, an aborted pipeline would hang forever waiting for the
|
|
338
|
+
// plugin's watch promise to resolve. The race resolves on whichever
|
|
339
|
+
// path settles first, and the cleanup paths in finally never run on
|
|
340
|
+
// the orphaned plugin promise (it's allowed to leak a watcher; the
|
|
341
|
+
// pipeline is being torn down anyway).
|
|
342
|
+
await new Promise((resolve, reject) => {
|
|
343
|
+
let settled = false;
|
|
344
|
+
const onAbort = () => {
|
|
345
|
+
if (settled)
|
|
346
|
+
return;
|
|
347
|
+
settled = true;
|
|
348
|
+
abortController.signal.removeEventListener('abort', onAbort);
|
|
349
|
+
reject(new Error('Pipeline aborted'));
|
|
350
|
+
};
|
|
351
|
+
if (abortController.signal.aborted) {
|
|
352
|
+
onAbort();
|
|
353
|
+
return;
|
|
354
|
+
}
|
|
355
|
+
abortController.signal.addEventListener('abort', onAbort, { once: true });
|
|
356
|
+
triggerPlugin.watch(task.trigger, {
|
|
357
|
+
taskId: node.taskId,
|
|
358
|
+
trackId: track.id,
|
|
359
|
+
workDir: task.cwd ?? workDir,
|
|
360
|
+
signal: abortController.signal,
|
|
361
|
+
approvalGateway,
|
|
362
|
+
}).then((v) => {
|
|
363
|
+
if (settled)
|
|
364
|
+
return;
|
|
365
|
+
settled = true;
|
|
366
|
+
abortController.signal.removeEventListener('abort', onAbort);
|
|
367
|
+
resolve(v);
|
|
368
|
+
}, (e) => {
|
|
369
|
+
if (settled)
|
|
370
|
+
return;
|
|
371
|
+
settled = true;
|
|
372
|
+
abortController.signal.removeEventListener('abort', onAbort);
|
|
373
|
+
reject(e);
|
|
374
|
+
});
|
|
375
|
+
});
|
|
376
|
+
log.debug(`[task:${taskId}]`, `trigger fired`);
|
|
377
|
+
}
|
|
378
|
+
catch (err) {
|
|
379
|
+
// If pipeline was aborted while we were still waiting for the trigger,
|
|
380
|
+
// this task never entered running state → skipped, not timeout.
|
|
381
|
+
state.finishedAt = nowISO();
|
|
382
|
+
if (pipelineAborted) {
|
|
383
|
+
setTaskStatus(taskId, 'skipped');
|
|
384
|
+
}
|
|
385
|
+
else if (err instanceof TriggerBlockedError) {
|
|
386
|
+
setTaskStatus(taskId, 'blocked'); // user/policy rejection
|
|
387
|
+
}
|
|
388
|
+
else if (err instanceof TriggerTimeoutError) {
|
|
389
|
+
setTaskStatus(taskId, 'timeout'); // genuine trigger wait timeout
|
|
390
|
+
}
|
|
391
|
+
else {
|
|
392
|
+
// A7 fallback: also check message strings for backward-compat with
|
|
393
|
+
// third-party trigger plugins that don't throw typed errors yet.
|
|
394
|
+
const msg = err instanceof Error ? err.message : String(err);
|
|
395
|
+
if (msg.includes('rejected') || msg.includes('denied')) {
|
|
396
|
+
setTaskStatus(taskId, 'blocked');
|
|
397
|
+
}
|
|
398
|
+
else if (msg.includes('timeout')) {
|
|
399
|
+
setTaskStatus(taskId, 'timeout');
|
|
400
|
+
}
|
|
401
|
+
else {
|
|
402
|
+
setTaskStatus(taskId, 'failed'); // plugin error, watcher crash, etc.
|
|
403
|
+
}
|
|
404
|
+
}
|
|
405
|
+
try {
|
|
406
|
+
await fireHook(taskId, 'task_failure');
|
|
407
|
+
}
|
|
408
|
+
catch (hookErr) {
|
|
409
|
+
log.error(`[task:${taskId}]`, `hook execution failed: ${hookErr instanceof Error ? hookErr.message : String(hookErr)}`);
|
|
410
|
+
}
|
|
411
|
+
return;
|
|
412
|
+
}
|
|
413
|
+
}
|
|
414
|
+
// 3. task_start hook (gate)
|
|
415
|
+
const hookResult = await executeHook(config.hooks, 'task_start', buildTaskContext('task_start', pipelineInfo, trackInfoOf(taskId), buildTaskInfoObj(taskId)), workDir, abortController.signal);
|
|
416
|
+
if (hookResult.exitCode !== 0 || config.hooks?.task_start) {
|
|
417
|
+
log.debug(`[task:${taskId}]`, `task_start hook exit=${hookResult.exitCode} allowed=${hookResult.allowed}`);
|
|
418
|
+
}
|
|
419
|
+
if (!hookResult.allowed) {
|
|
420
|
+
state.finishedAt = nowISO();
|
|
421
|
+
setTaskStatus(taskId, 'blocked');
|
|
422
|
+
try {
|
|
423
|
+
await fireHook(taskId, 'task_failure');
|
|
424
|
+
}
|
|
425
|
+
catch (hookErr) {
|
|
426
|
+
log.error(`[task:${taskId}]`, `hook execution failed: ${hookErr instanceof Error ? hookErr.message : String(hookErr)}`);
|
|
427
|
+
}
|
|
428
|
+
return;
|
|
429
|
+
}
|
|
430
|
+
// 4. Mark running — set startedAt before emitting so subscribers see a
|
|
431
|
+
// complete snapshot (startedAt non-null) in the task_status_change event.
|
|
432
|
+
state.startedAt = nowISO();
|
|
433
|
+
setTaskStatus(taskId, 'running');
|
|
434
|
+
log.info(`[task:${taskId}]`, task.command ? `running: ${task.command}` : `running (driver task)`);
|
|
435
|
+
// File-only: resolved config for this task
|
|
436
|
+
const resolvedDriver = task.driver ?? track.driver ?? config.driver ?? 'claude-code';
|
|
437
|
+
const resolvedTier = task.model_tier ?? track.model_tier ?? '(default)';
|
|
438
|
+
const resolvedPerms = task.permissions ?? track.permissions ?? '(default)';
|
|
439
|
+
const resolvedCwd = task.cwd ?? track.cwd ?? workDir;
|
|
440
|
+
log.debug(`[task:${taskId}]`, `resolved: driver=${resolvedDriver} tier=${resolvedTier} cwd=${resolvedCwd}`);
|
|
441
|
+
log.debug(`[task:${taskId}]`, `permissions: ${JSON.stringify(resolvedPerms)}`);
|
|
442
|
+
if (task.continue_from) {
|
|
443
|
+
log.debug(`[task:${taskId}]`, `continue_from: "${task.continue_from}"`);
|
|
444
|
+
}
|
|
445
|
+
if (task.timeout) {
|
|
446
|
+
log.debug(`[task:${taskId}]`, `timeout: ${task.timeout}`);
|
|
447
|
+
}
|
|
448
|
+
try {
|
|
449
|
+
let result;
|
|
450
|
+
const timeoutMs = task.timeout ? parseDuration(task.timeout) : undefined;
|
|
451
|
+
const runOpts = { timeoutMs, signal: abortController.signal };
|
|
452
|
+
if (task.command) {
|
|
453
|
+
log.debug(`[task:${taskId}]`, `command: ${task.command}`);
|
|
454
|
+
result = await runCommand(task.command, task.cwd ?? workDir, runOpts);
|
|
455
|
+
}
|
|
456
|
+
else {
|
|
457
|
+
// AI task: apply middleware chain
|
|
458
|
+
const driverName = task.driver ?? track.driver ?? config.driver ?? 'claude-code';
|
|
459
|
+
const driver = getHandler('drivers', driverName);
|
|
460
|
+
let prompt = task.prompt;
|
|
461
|
+
const originalLen = prompt.length;
|
|
462
|
+
const mws = task.middlewares !== undefined ? task.middlewares : track.middlewares;
|
|
463
|
+
if (mws && mws.length > 0) {
|
|
464
|
+
log.debug(`[task:${taskId}]`, `middleware chain: ${mws.map(m => m.type).join(' → ')}`);
|
|
465
|
+
const mwCtx = {
|
|
466
|
+
task, track, outputMap, workDir: task.cwd ?? workDir,
|
|
467
|
+
};
|
|
468
|
+
for (const mwConfig of mws) {
|
|
469
|
+
const before = prompt.length;
|
|
470
|
+
const mwPlugin = getHandler('middlewares', mwConfig.type);
|
|
471
|
+
const next = await mwPlugin.enhance(prompt, mwConfig, mwCtx);
|
|
472
|
+
// R3: a middleware that returns undefined / null / a non-string
|
|
473
|
+
// would silently corrupt the prompt sent to the driver. Fail loud
|
|
474
|
+
// here so the user sees "middleware X.enhance returned ..." in the
|
|
475
|
+
// task log instead of "[object Object]" arriving at the model.
|
|
476
|
+
if (typeof next !== 'string') {
|
|
477
|
+
throw new Error(`middleware "${mwConfig.type}".enhance() returned ${next === null ? 'null' : typeof next}, expected string`);
|
|
478
|
+
}
|
|
479
|
+
prompt = next;
|
|
480
|
+
log.debug(`[task:${taskId}]`, ` ${mwConfig.type}: ${before} → ${prompt.length} chars`);
|
|
481
|
+
}
|
|
482
|
+
}
|
|
483
|
+
log.debug(`[task:${taskId}]`, `prompt: ${originalLen} chars (final: ${prompt.length} chars)`);
|
|
484
|
+
log.quiet(`--- prompt (final) ---\n${clip(prompt)}\n--- end prompt ---`, taskId);
|
|
485
|
+
// H1: hand the driver a continue_from that has already been
|
|
486
|
+
// qualified by dag.ts. Without this, drivers like codex/opencode/
|
|
487
|
+
// claude-code do `outputMap.get(task.continue_from)` directly with
|
|
488
|
+
// the user's raw (possibly bare) string, which races whenever two
|
|
489
|
+
// tracks share a task name. dag.ts has the only authoritative
|
|
490
|
+
// resolver, so we use its precomputed answer here.
|
|
491
|
+
const enrichedTask = {
|
|
492
|
+
...task,
|
|
493
|
+
prompt,
|
|
494
|
+
continue_from: node.resolvedContinueFrom ?? task.continue_from,
|
|
495
|
+
};
|
|
496
|
+
const driverCtx = {
|
|
497
|
+
sessionMap, outputMap, normalizedMap, workDir: task.cwd ?? workDir,
|
|
498
|
+
};
|
|
499
|
+
const spec = await driver.buildCommand(enrichedTask, track, driverCtx);
|
|
500
|
+
log.debug(`[task:${taskId}]`, `driver=${driverName}`);
|
|
501
|
+
log.debug(`[task:${taskId}]`, `spawn args: ${JSON.stringify(spec.args)}`);
|
|
502
|
+
if (spec.cwd)
|
|
503
|
+
log.debug(`[task:${taskId}]`, `spawn cwd: ${spec.cwd}`);
|
|
504
|
+
if (spec.env)
|
|
505
|
+
log.debug(`[task:${taskId}]`, `spawn env overrides: ${Object.keys(spec.env).join(', ')}`);
|
|
506
|
+
if (spec.stdin)
|
|
507
|
+
log.debug(`[task:${taskId}]`, `spawn stdin: ${spec.stdin.length} chars`);
|
|
508
|
+
result = await runSpawn(spec, driver, runOpts);
|
|
509
|
+
}
|
|
510
|
+
// 5. Write output file with RAW stdout (preserves driver output format).
|
|
511
|
+
// Done BEFORE the completion check so a `file_exists` completion pointing
|
|
512
|
+
// at `task.output` observes the AI-generated artefact. Writes happen
|
|
513
|
+
// regardless of exit code so failed/timed-out tasks still leave a
|
|
514
|
+
// debuggable artefact on disk.
|
|
515
|
+
if (task.output) {
|
|
516
|
+
// validatePath enforces no .. traversal and no absolute paths escaping workDir.
|
|
517
|
+
const outPath = validatePath(task.output, workDir);
|
|
518
|
+
await mkdir(dirname(outPath), { recursive: true });
|
|
519
|
+
await Bun.write(outPath, result.stdout);
|
|
520
|
+
result = { ...result, outputPath: outPath };
|
|
521
|
+
// H1: only write the fully-qualified taskId. The previous "also store
|
|
522
|
+
// bare id when not yet present" trick produced non-deterministic
|
|
523
|
+
// continue_from lookups when two tracks shared a task name —
|
|
524
|
+
// whichever finished first won the bare key. dag.ts now resolves
|
|
525
|
+
// continue_from to a qualified id (DagNode.resolvedContinueFrom),
|
|
526
|
+
// and the enrichedTask handed to drivers carries that qualified
|
|
527
|
+
// version, so bare keys are no longer needed.
|
|
528
|
+
outputMap.set(taskId, outPath);
|
|
529
|
+
}
|
|
530
|
+
// 6. Determine terminal status (without emitting yet — result must be complete first)
|
|
531
|
+
// H2: branch on failureKind so spawn errors no longer masquerade as
|
|
532
|
+
// timeouts. Old runners that don't set failureKind still work — we
|
|
533
|
+
// fall back to the historical `exitCode === -1 → timeout` heuristic so
|
|
534
|
+
// pre-existing third-party drivers don't regress.
|
|
535
|
+
let terminalStatus;
|
|
536
|
+
const kind = result.failureKind;
|
|
537
|
+
if (kind === 'timeout') {
|
|
538
|
+
terminalStatus = 'timeout';
|
|
539
|
+
}
|
|
540
|
+
else if (kind === 'spawn_error') {
|
|
541
|
+
terminalStatus = 'failed';
|
|
542
|
+
}
|
|
543
|
+
else if (kind === undefined && result.exitCode === -1) {
|
|
544
|
+
// Legacy path: pre-H2 driver returned -1 with no kind. Treat as
|
|
545
|
+
// timeout for backward compatibility (the previous behaviour).
|
|
546
|
+
terminalStatus = 'timeout';
|
|
547
|
+
}
|
|
548
|
+
else if (result.exitCode !== 0) {
|
|
549
|
+
terminalStatus = 'failed';
|
|
550
|
+
}
|
|
551
|
+
else if (task.completion) {
|
|
552
|
+
const plugin = getHandler('completions', task.completion.type);
|
|
553
|
+
const completionCtx = { workDir: task.cwd ?? workDir, signal: abortController.signal };
|
|
554
|
+
const passed = await plugin.check(task.completion, result, completionCtx);
|
|
555
|
+
// R4: strict boolean check. Truthy strings/numbers used to be coerced
|
|
556
|
+
// to success — a check returning "ok" would let a failing task pass.
|
|
557
|
+
if (typeof passed !== 'boolean') {
|
|
558
|
+
throw new Error(`completion "${task.completion.type}".check() returned ${passed === null ? 'null' : typeof passed}, expected boolean`);
|
|
559
|
+
}
|
|
560
|
+
terminalStatus = passed ? 'success' : 'failed';
|
|
561
|
+
}
|
|
562
|
+
else {
|
|
563
|
+
terminalStatus = 'success';
|
|
564
|
+
}
|
|
565
|
+
// Store normalized text separately (in-memory) for continue_from handoff.
|
|
566
|
+
// R15: clip oversized values so a runaway parseResult can't accumulate
|
|
567
|
+
// hundreds of MB across tasks.
|
|
568
|
+
if (result.normalizedOutput !== null) {
|
|
569
|
+
const clipped = result.normalizedOutput.length > MAX_NORMALIZED_BYTES
|
|
570
|
+
? result.normalizedOutput.slice(0, MAX_NORMALIZED_BYTES) +
|
|
571
|
+
`\n[…clipped at ${MAX_NORMALIZED_BYTES} bytes]`
|
|
572
|
+
: result.normalizedOutput;
|
|
573
|
+
// H1: qualified-only key (see comment near outputMap above).
|
|
574
|
+
normalizedMap.set(taskId, clipped);
|
|
575
|
+
}
|
|
576
|
+
if (result.stderr) {
|
|
577
|
+
const stderrPath = resolve(log.dir, `${taskId.replace(/\./g, '_')}.stderr`);
|
|
578
|
+
await Bun.write(stderrPath, result.stderr);
|
|
579
|
+
result = { ...result, stderrPath };
|
|
580
|
+
}
|
|
581
|
+
if (result.sessionId) {
|
|
582
|
+
// H1: qualified-only key (see comment near outputMap above).
|
|
583
|
+
sessionMap.set(taskId, result.sessionId);
|
|
584
|
+
}
|
|
585
|
+
// Set result and finishedAt before emitting terminal status so listeners see complete state
|
|
586
|
+
state.result = result;
|
|
587
|
+
state.finishedAt = nowISO();
|
|
588
|
+
setTaskStatus(taskId, terminalStatus);
|
|
589
|
+
// Log task outcome with relevant details
|
|
590
|
+
const durSec = (result.durationMs / 1000).toFixed(1);
|
|
591
|
+
if (terminalStatus === 'success') {
|
|
592
|
+
log.info(`[task:${taskId}]`, `success (${durSec}s)`);
|
|
593
|
+
}
|
|
594
|
+
else {
|
|
595
|
+
log.error(`[task:${taskId}]`, `${terminalStatus} exit=${result.exitCode} duration=${durSec}s`);
|
|
596
|
+
if (result.stderr) {
|
|
597
|
+
const tail = tailLines(result.stderr, 10);
|
|
598
|
+
log.error(`[task:${taskId}]`, `stderr tail:\n${tail}`);
|
|
599
|
+
}
|
|
600
|
+
}
|
|
601
|
+
// File-only: full stdout/stderr dump (clipped) + extracted metadata
|
|
602
|
+
log.debug(`[task:${taskId}]`, `stdout: ${result.stdout.length} chars, stderr: ${result.stderr.length} chars`);
|
|
603
|
+
if (result.sessionId) {
|
|
604
|
+
log.debug(`[task:${taskId}]`, `sessionId: ${result.sessionId}`);
|
|
605
|
+
}
|
|
606
|
+
if (result.outputPath) {
|
|
607
|
+
log.debug(`[task:${taskId}]`, `wrote output: ${result.outputPath}`);
|
|
608
|
+
}
|
|
609
|
+
if (result.stderrPath) {
|
|
610
|
+
log.debug(`[task:${taskId}]`, `wrote stderr: ${result.stderrPath}`);
|
|
611
|
+
}
|
|
612
|
+
if (result.stdout) {
|
|
613
|
+
log.quiet(`--- stdout (${taskId}) ---\n${clip(result.stdout)}\n--- end stdout ---`, taskId);
|
|
614
|
+
}
|
|
615
|
+
if (result.stderr) {
|
|
616
|
+
log.quiet(`--- stderr (${taskId}) ---\n${clip(result.stderr)}\n--- end stderr ---`, taskId);
|
|
617
|
+
}
|
|
618
|
+
if (task.completion) {
|
|
619
|
+
log.debug(`[task:${taskId}]`, `completion check: type=${task.completion.type} result=${terminalStatus}`);
|
|
620
|
+
}
|
|
621
|
+
}
|
|
622
|
+
catch (err) {
|
|
623
|
+
const errMsg = err instanceof Error ? (err.stack ?? err.message) : String(err);
|
|
624
|
+
log.error(`[task:${taskId}]`, `failed before execution: ${errMsg}`);
|
|
625
|
+
state.result = {
|
|
626
|
+
exitCode: -1,
|
|
627
|
+
stdout: '',
|
|
628
|
+
stderr: errMsg,
|
|
629
|
+
outputPath: null, stderrPath: null, durationMs: 0,
|
|
630
|
+
sessionId: null, normalizedOutput: null,
|
|
631
|
+
// H2: Engine-level pre-execution errors (driver throw, middleware
|
|
632
|
+
// throw, getHandler 404) classify as spawn_error — the process never
|
|
633
|
+
// ran, so calling them "timeout" was actively misleading.
|
|
634
|
+
failureKind: 'spawn_error',
|
|
635
|
+
};
|
|
636
|
+
state.finishedAt = nowISO();
|
|
637
|
+
setTaskStatus(taskId, 'failed');
|
|
638
|
+
}
|
|
639
|
+
// 7. Fire hooks
|
|
640
|
+
const finalStatus = state.status;
|
|
641
|
+
try {
|
|
642
|
+
await fireHook(taskId, finalStatus === 'success' ? 'task_success' : 'task_failure');
|
|
643
|
+
}
|
|
644
|
+
catch (hookErr) {
|
|
645
|
+
log.error(`[task:${taskId}]`, `hook execution failed: ${hookErr instanceof Error ? hookErr.message : String(hookErr)}`);
|
|
646
|
+
}
|
|
647
|
+
// 8. Handle stop_all for failure states
|
|
648
|
+
if (finalStatus !== 'success' && getOnFailure(taskId) === 'stop_all') {
|
|
649
|
+
applyStopAll(node.track.id);
|
|
650
|
+
}
|
|
651
|
+
}
|
|
652
|
+
// ── Event loop ──
|
|
653
|
+
// Each task is launched as soon as ALL its deps reach a terminal state.
|
|
654
|
+
// We track in-flight tasks in `running` so a task completing mid-batch
|
|
655
|
+
// immediately unblocks its dependents without waiting for sibling tasks.
|
|
656
|
+
const running = new Map();
|
|
657
|
+
try {
|
|
658
|
+
while (!pipelineAborted) {
|
|
659
|
+
// Launch every task whose deps are all terminal and that isn't already in-flight
|
|
660
|
+
for (const [id, state] of states) {
|
|
661
|
+
if (state.status !== 'waiting' || running.has(id))
|
|
662
|
+
continue;
|
|
663
|
+
const node = dag.nodes.get(id);
|
|
664
|
+
const allDepsTerminal = node.dependsOn.length === 0 ||
|
|
665
|
+
node.dependsOn.every(d => isTerminal(states.get(d).status));
|
|
666
|
+
if (!allDepsTerminal)
|
|
667
|
+
continue;
|
|
668
|
+
const p = processTask(id).finally(() => running.delete(id));
|
|
669
|
+
running.set(id, p);
|
|
670
|
+
}
|
|
671
|
+
// All tasks terminal — done
|
|
672
|
+
if ([...states.values()].every(s => isTerminal(s.status)))
|
|
673
|
+
break;
|
|
674
|
+
if (running.size === 0) {
|
|
675
|
+
// Nothing in-flight but non-terminal tasks exist (e.g. trigger-wait states
|
|
676
|
+
// that processTask hasn't been called for yet). Poll briefly.
|
|
677
|
+
await new Promise(r => setTimeout(r, POLL_INTERVAL_MS));
|
|
678
|
+
}
|
|
679
|
+
else {
|
|
680
|
+
// Wait for any one task to finish, then re-scan for new launchables.
|
|
681
|
+
await Promise.race(running.values());
|
|
682
|
+
}
|
|
683
|
+
}
|
|
684
|
+
if (pipelineAborted) {
|
|
685
|
+
// Wait for in-flight tasks to honour the abort signal before marking states.
|
|
686
|
+
if (running.size > 0)
|
|
687
|
+
await Promise.allSettled(running.values());
|
|
688
|
+
for (const [id, state] of states) {
|
|
689
|
+
if (!isTerminal(state.status)) {
|
|
690
|
+
// Running tasks get timeout (they were killed); waiting tasks get skipped
|
|
691
|
+
state.finishedAt = nowISO();
|
|
692
|
+
setTaskStatus(id, state.status === 'running' ? 'timeout' : 'skipped');
|
|
693
|
+
}
|
|
694
|
+
}
|
|
695
|
+
}
|
|
696
|
+
}
|
|
697
|
+
finally {
|
|
698
|
+
if (pipelineTimer)
|
|
699
|
+
clearTimeout(pipelineTimer);
|
|
700
|
+
// Clean up the external abort signal listener to prevent dead references
|
|
701
|
+
// accumulating on long-lived shared AbortControllers.
|
|
702
|
+
if (options.signal) {
|
|
703
|
+
options.signal.removeEventListener('abort', externalAbortHandler);
|
|
704
|
+
}
|
|
705
|
+
// Safety net: drain any approvals still pending at shutdown (e.g. crash path).
|
|
706
|
+
if (approvalGateway.pending().length > 0) {
|
|
707
|
+
approvalGateway.abortAll('pipeline finished');
|
|
708
|
+
}
|
|
709
|
+
}
|
|
710
|
+
// ── Summary ──
|
|
711
|
+
const summary = { total: 0, success: 0, failed: 0, skipped: 0, timeout: 0, blocked: 0 };
|
|
712
|
+
for (const [, state] of states) {
|
|
713
|
+
summary.total++;
|
|
714
|
+
switch (state.status) {
|
|
715
|
+
case 'success':
|
|
716
|
+
summary.success++;
|
|
717
|
+
break;
|
|
718
|
+
case 'failed':
|
|
719
|
+
summary.failed++;
|
|
720
|
+
break;
|
|
721
|
+
case 'skipped':
|
|
722
|
+
summary.skipped++;
|
|
723
|
+
break;
|
|
724
|
+
case 'timeout':
|
|
725
|
+
summary.timeout++;
|
|
726
|
+
break;
|
|
727
|
+
case 'blocked':
|
|
728
|
+
summary.blocked++;
|
|
729
|
+
break;
|
|
730
|
+
}
|
|
731
|
+
}
|
|
732
|
+
const finishedAt = nowISO();
|
|
733
|
+
const durationMs = new Date(finishedAt).getTime() - new Date(startedAt).getTime();
|
|
734
|
+
if (pipelineAborted) {
|
|
735
|
+
await executeHook(config.hooks, 'pipeline_error', buildPipelineErrorContext(pipelineInfo, 'Pipeline timeout exceeded'), workDir);
|
|
736
|
+
}
|
|
737
|
+
else {
|
|
738
|
+
await executeHook(config.hooks, 'pipeline_complete', buildPipelineCompleteContext({ ...pipelineInfo, finished_at: finishedAt, duration_ms: durationMs }, summary), workDir);
|
|
739
|
+
}
|
|
740
|
+
const allSuccess = !pipelineAborted
|
|
741
|
+
&& summary.failed === 0 && summary.timeout === 0 && summary.blocked === 0;
|
|
742
|
+
log.section('Pipeline summary');
|
|
743
|
+
log.quiet(`status: ${pipelineAborted ? 'aborted (timeout)' : 'completed'}`);
|
|
744
|
+
log.quiet(`duration: ${(durationMs / 1000).toFixed(1)}s`);
|
|
745
|
+
log.quiet(`counts: total=${summary.total} success=${summary.success} ` +
|
|
746
|
+
`failed=${summary.failed} skipped=${summary.skipped} ` +
|
|
747
|
+
`timeout=${summary.timeout} blocked=${summary.blocked}`);
|
|
748
|
+
log.quiet('');
|
|
749
|
+
log.quiet('per-task:');
|
|
750
|
+
for (const [id, state] of states) {
|
|
751
|
+
const dur = state.result?.durationMs != null
|
|
752
|
+
? `${(state.result.durationMs / 1000).toFixed(1)}s` : '-';
|
|
753
|
+
const exit = state.result?.exitCode ?? '-';
|
|
754
|
+
log.quiet(` ${state.status.padEnd(8)} ${id} (exit=${exit}, ${dur})`);
|
|
755
|
+
}
|
|
756
|
+
log.info('[pipeline]', `completed "${config.name}"`);
|
|
757
|
+
log.info('[pipeline]', `Total: ${summary.total} | Success: ${summary.success} | Failed: ${summary.failed} | Skipped: ${summary.skipped} | Timeout: ${summary.timeout} | Blocked: ${summary.blocked}`);
|
|
758
|
+
log.info('[pipeline]', `Duration: ${(durationMs / 1000).toFixed(1)}s`);
|
|
759
|
+
log.info('[pipeline]', `Log: ${log.path}`);
|
|
760
|
+
emit({ type: 'pipeline_end', runId, success: allSuccess });
|
|
761
|
+
return { success: allSuccess, runId, logPath: log.path, summary, states: freezeStates(states) };
|
|
762
|
+
}
|
|
763
|
+
finally {
|
|
764
|
+
// Close the persistent log file handle before pruning.
|
|
765
|
+
log.close();
|
|
766
|
+
// Prune old per-run log directories on every exit path (normal, blocked, or thrown).
|
|
767
|
+
// Exclude the current runId so a concurrent run cannot delete its own live directory.
|
|
768
|
+
if (maxLogRuns > 0) {
|
|
769
|
+
await pruneLogDirs(resolve(workDir, '.tagma', 'logs'), maxLogRuns, runId);
|
|
770
|
+
}
|
|
771
|
+
}
|
|
772
|
+
}
|
|
773
|
+
/**
|
|
774
|
+
* Delete the oldest subdirectories under `logsDir`, keeping only the most recent `keep`.
|
|
775
|
+
* Directories are sorted lexicographically; because runIds are prefixed with a base-36
|
|
776
|
+
* timestamp, lexicographic order equals chronological order.
|
|
777
|
+
*
|
|
778
|
+
* `excludeRunId` is always skipped from deletion even if it would otherwise be pruned —
|
|
779
|
+
* this prevents a concurrent run from removing a live log directory that is still in use.
|
|
780
|
+
*/
|
|
781
|
+
async function pruneLogDirs(logsDir, keep, excludeRunId) {
|
|
782
|
+
let entries;
|
|
783
|
+
try {
|
|
784
|
+
entries = await readdir(logsDir);
|
|
785
|
+
}
|
|
786
|
+
catch {
|
|
787
|
+
return; // logsDir doesn't exist yet — nothing to prune
|
|
788
|
+
}
|
|
789
|
+
// Only consider directories that look like run IDs (run_<...>), excluding the live run.
|
|
790
|
+
const runDirs = entries.filter(e => e.startsWith('run_') && e !== excludeRunId).sort();
|
|
791
|
+
const toDelete = runDirs.slice(0, Math.max(0, runDirs.length - keep));
|
|
792
|
+
await Promise.all(toDelete.map(dir => rm(resolve(logsDir, dir), { recursive: true, force: true }).catch(() => {
|
|
793
|
+
// Ignore deletion errors — stale dirs are better than a crash
|
|
794
|
+
})));
|
|
795
|
+
}
|
|
796
|
+
function isTerminal(status) {
|
|
797
|
+
return status === 'success' || status === 'failed' || status === 'timeout'
|
|
798
|
+
|| status === 'skipped' || status === 'blocked';
|
|
799
|
+
}
|
|
800
|
+
/** Return a deep-copied, caller-safe snapshot of the states map. */
|
|
801
|
+
function freezeStates(states) {
|
|
802
|
+
const copy = new Map();
|
|
803
|
+
for (const [id, s] of states) {
|
|
804
|
+
copy.set(id, {
|
|
805
|
+
config: { ...s.config },
|
|
806
|
+
trackConfig: { ...s.trackConfig },
|
|
807
|
+
status: s.status,
|
|
808
|
+
result: s.result ? { ...s.result } : null,
|
|
809
|
+
startedAt: s.startedAt,
|
|
810
|
+
finishedAt: s.finishedAt,
|
|
811
|
+
});
|
|
812
|
+
}
|
|
813
|
+
return copy;
|
|
814
|
+
}
|
|
815
|
+
//# sourceMappingURL=engine.js.map
|