@tagma/sdk 0.2.6 → 0.2.8

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -296,6 +296,7 @@ Options:
296
296
  - `task_status_change` — a task changed status; includes `state: TaskState` (complete snapshot at the time of change: `startedAt` is populated before the `running` event; `result` and `finishedAt` are populated before any terminal-status event)
297
297
  - `task_log` — a structured log line was written to `pipeline.log`. Mirrors every `Logger` call (info/warn/error/debug/section/quiet) and carries `{ taskId: string | null, level, timestamp, text }`. `taskId` is non-null for lines tagged with a `[task:<id>]` prefix (or passed explicitly to `section`/`quiet`) and `null` for pipeline-wide messages such as the configuration dump and DAG topology. Use this to stream the full run process into UIs without tailing the log file.
298
298
  - `pipeline_end` — pipeline finished; includes `success: boolean`
299
+ - `runId` -- caller-supplied run ID. When provided the engine uses this instead of generating its own, keeping the caller and the SDK log directories aligned on the same ID
299
300
  - `maxLogRuns` -- number of per-run log directories to keep under `<workDir>/.tagma/logs/` (default: 20)
300
301
 
301
302
  ### `PipelineRunner`
@@ -435,7 +436,7 @@ const yaml = serializePipeline(config);
435
436
  | `upsertTask(config, trackId, task)` | Insert or replace a task |
436
437
  | `removeTask(config, trackId, taskId, cleanRefs?)` | Remove a task; pass `cleanRefs: true` to also strip dangling `depends_on` / `continue_from` references. Only refs that resolve to the deleted task are removed — same-named tasks in other tracks are unaffected |
437
438
  | `moveTask(config, trackId, taskId, toIndex)` | Reorder a task within its track |
438
- | `transferTask(config, fromTrackId, taskId, toTrackId)` | Move a task across tracks |
439
+ | `transferTask(config, fromTrackId, taskId, toTrackId, qualifyRefs?)` | Move a task across tracks. When `qualifyRefs` is `true` (default), bare `depends_on` / `continue_from` references to the moved task are converted to fully-qualified form (`toTrackId.taskId`) so same-track resolution stays correct |
439
440
 
440
441
  ### `parseYaml(content: string): RawPipelineConfig`
441
442
 
@@ -469,7 +470,7 @@ Use `validateRaw` for editing raw configs in a UI; use `validateConfig` after `r
469
470
 
470
471
  Validates a raw pipeline config without resolving inheritance or executing anything. Returns a flat list of `{ path, message }` objects — empty array means valid.
471
472
 
472
- Checks: required fields, `prompt`/`command` exclusivity, `depends_on`/`continue_from` reference integrity (including ambiguous bare refs that exist in multiple tracks — use `trackId.taskId` to disambiguate), circular dependency detection.
473
+ Checks: required fields, `prompt`/`command` exclusivity, duplicate task IDs within a track, `depends_on`/`continue_from` reference integrity (including ambiguous bare refs that exist in multiple tracks — use `trackId.taskId` to disambiguate), circular dependency detection.
473
474
 
474
475
  Does **not** check plugin registration (plugins may not be loaded at edit time).
475
476
 
@@ -508,6 +509,7 @@ logger.section('Title'); // file only — visual separator
508
509
  logger.quiet(bulkText); // file only — bulk payload
509
510
  logger.path; // log file path
510
511
  logger.dir; // run artifact directory
512
+ logger.close(); // close the persistent file handle (called automatically by runPipeline at run completion)
511
513
  ```
512
514
 
513
515
  Pass an optional third argument to stream every appended line out as a
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@tagma/sdk",
3
- "version": "0.2.6",
3
+ "version": "0.2.8",
4
4
  "license": "MIT",
5
5
  "repository": {
6
6
  "type": "git",
@@ -34,6 +34,17 @@ export const OutputCheckCompletion: CompletionPlugin = {
34
34
  const controller = new AbortController();
35
35
  const timer = setTimeout(() => controller.abort(), timeoutMs);
36
36
 
37
+ // Wire pipeline abort signal into the check process so external abort
38
+ // terminates the child instead of leaving it running undetected.
39
+ const onAbort = () => controller.abort();
40
+ if (ctx.signal) {
41
+ if (ctx.signal.aborted) {
42
+ controller.abort();
43
+ } else {
44
+ ctx.signal.addEventListener('abort', onAbort, { once: true });
45
+ }
46
+ }
47
+
37
48
  const proc = Bun.spawn(shellArgs(checkCmd) as string[], {
38
49
  cwd: ctx.workDir,
39
50
  stdin: 'pipe',
@@ -69,6 +80,7 @@ export const OutputCheckCompletion: CompletionPlugin = {
69
80
  return exitCode === 0;
70
81
  } finally {
71
82
  clearTimeout(timer);
83
+ if (ctx.signal) ctx.signal.removeEventListener('abort', onAbort);
72
84
  }
73
85
  },
74
86
  };
package/src/config-ops.ts CHANGED
@@ -226,13 +226,21 @@ export function moveTask(
226
226
  /**
227
227
  * Move a task from one track to another (appends to the target track).
228
228
  * No-op if either trackId or taskId is not found.
229
+ *
230
+ * When `qualifyRefs` is true (the default), bare references (`depends_on`,
231
+ * `continue_from`) pointing to the moved task are converted to fully-qualified
232
+ * refs (`toTrackId.taskId`) so that same-track resolution doesn't silently
233
+ * break after the task changes tracks.
229
234
  */
230
235
  export function transferTask(
231
236
  config: RawPipelineConfig,
232
237
  fromTrackId: string,
233
238
  taskId: string,
234
239
  toTrackId: string,
240
+ qualifyRefs = true,
235
241
  ): RawPipelineConfig {
242
+ if (fromTrackId === toTrackId) return config;
243
+
236
244
  let task: RawTaskConfig | undefined;
237
245
  const afterRemove = {
238
246
  ...config,
@@ -245,5 +253,70 @@ export function transferTask(
245
253
  }),
246
254
  };
247
255
  if (!task) return config;
248
- return upsertTask(afterRemove, toTrackId, task);
256
+ const afterInsert = upsertTask(afterRemove, toTrackId, task);
257
+
258
+ if (!qualifyRefs) return afterInsert;
259
+
260
+ // Qualify bare references to the moved task. After the move, bare ref
261
+ // "taskId" from the old track no longer resolves via same-track priority.
262
+ // Convert it to the qualified form "toTrackId.taskId" so the dependency
263
+ // graph stays correct.
264
+ const qualId = `${toTrackId}.${taskId}`;
265
+ const oldQualId = `${fromTrackId}.${taskId}`;
266
+
267
+ // Does any track (other than the destination) still have a task with this bare id?
268
+ const bareIdSurvivesElsewhere = afterInsert.tracks.some(t =>
269
+ t.id !== toTrackId && t.tasks.some(tk => tk.id === taskId),
270
+ );
271
+
272
+ return {
273
+ ...afterInsert,
274
+ tracks: afterInsert.tracks.map(t => {
275
+ const localHasId = t.tasks.some(tk => tk.id === taskId);
276
+
277
+ const qualifyRef = (ref: string): string => {
278
+ // Already-qualified ref to old location → rewrite to new location
279
+ if (ref === oldQualId) return qualId;
280
+ // Bare ref: only needs qualifying if it would have resolved to the
281
+ // moved task before the transfer
282
+ if (ref === taskId) {
283
+ if (t.id === fromTrackId) {
284
+ // Was same-track in the old track — now the task is gone.
285
+ // If no other local task shadows it, qualify to new location.
286
+ if (!localHasId) return qualId;
287
+ }
288
+ // From a different track: bare ref resolved globally before.
289
+ // If the bare id is now ambiguous or gone from this track's
290
+ // perspective, qualify it.
291
+ if (!localHasId && !bareIdSurvivesElsewhere) return qualId;
292
+ }
293
+ return ref;
294
+ };
295
+
296
+ return {
297
+ ...t,
298
+ tasks: t.tasks.map(tk => qualifyTaskRefs(tk, qualifyRef)),
299
+ };
300
+ }),
301
+ };
302
+ }
303
+
304
+ /** Rewrite `depends_on` and `continue_from` refs using a mapping function. */
305
+ function qualifyTaskRefs(
306
+ task: RawTaskConfig,
307
+ rewrite: (ref: string) => string,
308
+ ): RawTaskConfig {
309
+ const newDeps = task.depends_on?.map(rewrite);
310
+ const newContinue = task.continue_from !== undefined ? rewrite(task.continue_from) : undefined;
311
+
312
+ const depsChanged = newDeps !== undefined && newDeps.some((d, i) => d !== task.depends_on![i]);
313
+ const continueChanged = newContinue !== undefined && newContinue !== task.continue_from;
314
+
315
+ if (!depsChanged && !continueChanged) return task;
316
+
317
+ return {
318
+ ...task,
319
+ ...(newDeps !== undefined ? { depends_on: newDeps } : {}),
320
+ ...(newContinue !== undefined ? { continue_from: newContinue } : {}),
321
+ };
249
322
  }
package/src/dag.ts CHANGED
@@ -118,8 +118,10 @@ export function buildDag(config: PipelineConfig): Dag {
118
118
  }
119
119
 
120
120
  const sorted: string[] = [];
121
- while (queue.length > 0) {
122
- const current = queue.shift()!;
121
+ // Use an index pointer instead of shift() to avoid O(n) per dequeue.
122
+ let qi = 0;
123
+ while (qi < queue.length) {
124
+ const current = queue[qi++]!;
123
125
  sorted.push(current);
124
126
  for (const child of adjacency.get(current)!) {
125
127
  const newDegree = inDegree.get(child)! - 1;
@@ -129,8 +131,13 @@ export function buildDag(config: PipelineConfig): Dag {
129
131
  }
130
132
 
131
133
  if (sorted.length !== nodes.size) {
132
- const remaining = [...nodes.keys()].filter(id => !sorted.includes(id));
133
- throw new Error(`Circular dependency detected involving tasks: ${remaining.join(', ')}`);
134
+ // Only report nodes that are actually part of cycles (in-degree > 0
135
+ // after Kahn's algorithm), not their downstream dependents.
136
+ const sortedSet = new Set(sorted);
137
+ const cycleMembers = [...nodes.keys()].filter(id =>
138
+ !sortedSet.has(id) && (inDegree.get(id) ?? 0) > 0
139
+ );
140
+ throw new Error(`Circular dependency detected involving tasks: ${cycleMembers.join(', ')}`);
134
141
  }
135
142
 
136
143
  return { nodes, sorted };
package/src/engine.ts CHANGED
@@ -88,13 +88,23 @@ function preflight(config: PipelineConfig, dag: Dag): void {
88
88
  }
89
89
 
90
90
  function resolveRefInDag(dag: Dag, ref: string, fromTrackId: string): string | null {
91
+ // Already fully qualified
91
92
  if (dag.nodes.has(ref)) return ref;
93
+ // Same-track match (preferred)
92
94
  const sameTrack = `${fromTrackId}.${ref}`;
93
95
  if (dag.nodes.has(sameTrack)) return sameTrack;
96
+ // Cross-track bare name lookup — must be unambiguous (aligned with buildDag's resolveRef)
97
+ let match: string | null = null;
94
98
  for (const [id] of dag.nodes) {
95
- if (id.endsWith(`.${ref}`)) return id;
99
+ if (id.endsWith(`.${ref}`)) {
100
+ if (match !== null) {
101
+ // Ambiguous: multiple tasks share the bare name across tracks
102
+ return null;
103
+ }
104
+ match = id;
105
+ }
96
106
  }
97
- return null;
107
+ return match;
98
108
  }
99
109
 
100
110
  // ═══ Engine ═══
@@ -132,6 +142,12 @@ export interface RunPipelineOptions {
132
142
  * Oldest directories are deleted after each run. Defaults to 20. Set to 0 to disable cleanup.
133
143
  */
134
144
  readonly maxLogRuns?: number;
145
+ /**
146
+ * Caller-supplied run ID. When provided the engine uses this instead of
147
+ * generating its own via `generateRunId()`, keeping the editor and SDK
148
+ * log directories aligned on the same ID.
149
+ */
150
+ readonly runId?: string;
135
151
  /**
136
152
  * External AbortSignal — aborting it cancels the pipeline immediately.
137
153
  * Equivalent to the pipeline timeout firing, but caller-controlled.
@@ -163,7 +179,7 @@ export async function runPipeline(
163
179
  }
164
180
 
165
181
  const dag = buildDag(config);
166
- const runId = generateRunId();
182
+ const runId = options.runId ?? generateRunId();
167
183
  preflight(config, dag);
168
184
 
169
185
  const startedAt = nowISO();
@@ -181,6 +197,9 @@ export async function runPipeline(
181
197
  text: record.text,
182
198
  });
183
199
  });
200
+
201
+ try {
202
+
184
203
  log.info('[pipeline]', `start "${config.name}" run_id=${runId}`);
185
204
 
186
205
  // File-only: dump the resolved pipeline shape + DAG topology for post-mortem.
@@ -214,8 +233,6 @@ export async function runPipeline(
214
233
  });
215
234
  }
216
235
 
217
- try {
218
-
219
236
  // Pipeline start hook (gate)
220
237
  const startHook = await executeHook(
221
238
  config.hooks, 'pipeline_start', buildPipelineStartContext(pipelineInfo), workDir,
@@ -268,15 +285,15 @@ export async function runPipeline(
268
285
  });
269
286
 
270
287
  // Wire external cancel signal into the internal abort controller.
288
+ const externalAbortHandler = () => {
289
+ pipelineAborted = true;
290
+ abortController.abort();
291
+ };
271
292
  if (options.signal) {
272
293
  if (options.signal.aborted) {
273
- pipelineAborted = true;
274
- abortController.abort();
294
+ externalAbortHandler();
275
295
  } else {
276
- options.signal.addEventListener('abort', () => {
277
- pipelineAborted = true;
278
- abortController.abort();
279
- }, { once: true });
296
+ options.signal.addEventListener('abort', externalAbortHandler, { once: true });
280
297
  }
281
298
  }
282
299
 
@@ -361,7 +378,7 @@ export async function runPipeline(
361
378
 
362
379
  async function fireHook(taskId: string, event: 'task_success' | 'task_failure'): Promise<void> {
363
380
  await executeHook(config.hooks, event,
364
- buildTaskContext(event, pipelineInfo, trackInfoOf(taskId), buildTaskInfoObj(taskId)), workDir);
381
+ buildTaskContext(event, pipelineInfo, trackInfoOf(taskId), buildTaskInfoObj(taskId)), workDir, abortController.signal);
365
382
  }
366
383
 
367
384
  // ── Process a single task ──
@@ -423,7 +440,7 @@ export async function runPipeline(
423
440
 
424
441
  // 3. task_start hook (gate)
425
442
  const hookResult = await executeHook(config.hooks, 'task_start',
426
- buildTaskContext('task_start', pipelineInfo, trackInfoOf(taskId), buildTaskInfoObj(taskId)), workDir);
443
+ buildTaskContext('task_start', pipelineInfo, trackInfoOf(taskId), buildTaskInfoObj(taskId)), workDir, abortController.signal);
427
444
  if (hookResult.exitCode !== 0 || config.hooks?.task_start) {
428
445
  log.debug(`[task:${taskId}]`,
429
446
  `task_start hook exit=${hookResult.exitCode} allowed=${hookResult.allowed}`);
@@ -515,7 +532,7 @@ export async function runPipeline(
515
532
  terminalStatus = 'failed';
516
533
  } else if (task.completion) {
517
534
  const plugin = getHandler<CompletionPlugin>('completions', task.completion.type);
518
- const completionCtx = { workDir: task.cwd ?? workDir };
535
+ const completionCtx = { workDir: task.cwd ?? workDir, signal: abortController.signal };
519
536
  const passed = await plugin.check(task.completion as Record<string, unknown>, result, completionCtx);
520
537
  terminalStatus = passed ? 'success' : 'failed';
521
538
  } else {
@@ -664,6 +681,11 @@ export async function runPipeline(
664
681
  }
665
682
  } finally {
666
683
  if (pipelineTimer) clearTimeout(pipelineTimer);
684
+ // Clean up the external abort signal listener to prevent dead references
685
+ // accumulating on long-lived shared AbortControllers.
686
+ if (options.signal) {
687
+ options.signal.removeEventListener('abort', externalAbortHandler);
688
+ }
667
689
  // Safety net: drain any approvals still pending at shutdown (e.g. crash path).
668
690
  if (approvalGateway.pending().length > 0) {
669
691
  approvalGateway.abortAll('pipeline finished');
@@ -714,15 +736,17 @@ export async function runPipeline(
714
736
  log.quiet(` ${state.status.padEnd(8)} ${id} (exit=${exit}, ${dur})`);
715
737
  }
716
738
 
717
- console.log(`\n[Pipeline "${config.name}"] completed`);
718
- console.log(` Total: ${summary.total} | Success: ${summary.success} | Failed: ${summary.failed} | Skipped: ${summary.skipped} | Timeout: ${summary.timeout} | Blocked: ${summary.blocked}`);
719
- console.log(` Duration: ${(durationMs / 1000).toFixed(1)}s`);
720
- console.log(` Log: ${log.path}`);
739
+ log.info('[pipeline]', `completed "${config.name}"`);
740
+ log.info('[pipeline]', `Total: ${summary.total} | Success: ${summary.success} | Failed: ${summary.failed} | Skipped: ${summary.skipped} | Timeout: ${summary.timeout} | Blocked: ${summary.blocked}`);
741
+ log.info('[pipeline]', `Duration: ${(durationMs / 1000).toFixed(1)}s`);
742
+ log.info('[pipeline]', `Log: ${log.path}`);
721
743
 
722
744
  emit({ type: 'pipeline_end', runId, success: allSuccess });
723
745
  return { success: allSuccess, runId, logPath: log.path, summary, states: freezeStates(states) };
724
746
 
725
747
  } finally {
748
+ // Close the persistent log file handle before pruning.
749
+ log.close();
726
750
  // Prune old per-run log directories on every exit path (normal, blocked, or thrown).
727
751
  // Exclude the current runId so a concurrent run cannot delete its own live directory.
728
752
  if (maxLogRuns > 0) {
package/src/hooks.ts CHANGED
@@ -18,42 +18,71 @@ function normalizeCommands(cmd: HookCommand | undefined): readonly string[] {
18
18
  return cmd;
19
19
  }
20
20
 
21
- async function runSingleHook(command: string, context: unknown, cwd?: string): Promise<number> {
21
+ const DEFAULT_HOOK_TIMEOUT_MS = 30_000;
22
+
23
+ async function runSingleHook(
24
+ command: string,
25
+ context: unknown,
26
+ cwd?: string,
27
+ signal?: AbortSignal,
28
+ timeoutMs: number = DEFAULT_HOOK_TIMEOUT_MS,
29
+ ): Promise<number> {
22
30
  const jsonInput = JSON.stringify(context, null, 2);
23
31
 
24
- const proc = Bun.spawn(shellArgs(command) as string[], {
25
- stdin: 'pipe',
26
- stdout: 'pipe',
27
- stderr: 'pipe',
28
- ...(cwd ? { cwd } : {}),
29
- });
30
-
31
- if (proc.stdin) {
32
- try {
33
- proc.stdin.write(jsonInput);
34
- proc.stdin.end();
35
- } catch {
36
- // Process may exit before reading stdin (e.g. `exit 1`), ignore EPIPE
32
+ const controller = new AbortController();
33
+ const timer = timeoutMs > 0
34
+ ? setTimeout(() => controller.abort(), timeoutMs)
35
+ : null;
36
+
37
+ // Wire pipeline abort signal into hook process
38
+ const onAbort = () => controller.abort();
39
+ if (signal) {
40
+ if (signal.aborted) {
41
+ controller.abort();
42
+ } else {
43
+ signal.addEventListener('abort', onAbort, { once: true });
37
44
  }
38
45
  }
39
46
 
40
- // Consume stdout and stderr concurrently with waiting for exit.
41
- // Sequential reads after proc.exited risk a pipe-buffer deadlock when
42
- // hook output exceeds the ~64 KB kernel buffer.
43
- const [exitCode, stdout, stderr] = await Promise.all([
44
- proc.exited,
45
- new Response(proc.stdout).text(),
46
- new Response(proc.stderr).text(),
47
- ]);
48
-
49
- if (stdout.trim()) {
50
- console.log(`[hook: ${command}] stdout: ${stdout.trim()}`);
51
- }
52
- if (stderr.trim()) {
53
- console.error(`[hook: ${command}] stderr: ${stderr.trim()}`);
54
- }
47
+ try {
48
+ const proc = Bun.spawn(shellArgs(command) as string[], {
49
+ stdin: 'pipe',
50
+ stdout: 'pipe',
51
+ stderr: 'pipe',
52
+ signal: controller.signal,
53
+ ...(cwd ? { cwd } : {}),
54
+ });
55
+
56
+ if (proc.stdin) {
57
+ try {
58
+ proc.stdin.write(jsonInput);
59
+ proc.stdin.end();
60
+ } catch {
61
+ // Process may exit before reading stdin (e.g. `exit 1`), ignore EPIPE
62
+ }
63
+ }
55
64
 
56
- return exitCode;
65
+ // Consume stdout and stderr concurrently with waiting for exit.
66
+ // Sequential reads after proc.exited risk a pipe-buffer deadlock when
67
+ // hook output exceeds the ~64 KB kernel buffer.
68
+ const [exitCode, stdout, stderr] = await Promise.all([
69
+ proc.exited,
70
+ new Response(proc.stdout).text(),
71
+ new Response(proc.stderr).text(),
72
+ ]);
73
+
74
+ if (stdout.trim()) {
75
+ console.log(`[hook: ${command}] stdout: ${stdout.trim()}`);
76
+ }
77
+ if (stderr.trim()) {
78
+ console.error(`[hook: ${command}] stderr: ${stderr.trim()}`);
79
+ }
80
+
81
+ return exitCode;
82
+ } finally {
83
+ if (timer) clearTimeout(timer);
84
+ if (signal) signal.removeEventListener('abort', onAbort);
85
+ }
57
86
  }
58
87
 
59
88
  export async function executeHook(
@@ -61,6 +90,7 @@ export async function executeHook(
61
90
  event: HookEvent,
62
91
  context: unknown,
63
92
  workDir?: string,
93
+ signal?: AbortSignal,
64
94
  ): Promise<HookResult> {
65
95
  if (!hooks) return { allowed: true, exitCode: 0 };
66
96
 
@@ -70,7 +100,7 @@ export async function executeHook(
70
100
  const isGate = GATE_HOOKS.has(event);
71
101
 
72
102
  for (const cmd of commands) {
73
- const exitCode = await runSingleHook(cmd, context, workDir);
103
+ const exitCode = await runSingleHook(cmd, context, workDir, signal);
74
104
 
75
105
  if (isGate && exitCode === 1) {
76
106
  // Only exit code 1 has gate semantics (block execution)
package/src/logger.ts CHANGED
@@ -1,5 +1,5 @@
1
1
  import { resolve, dirname } from 'node:path';
2
- import { mkdirSync, appendFileSync, writeFileSync } from 'node:fs';
2
+ import { mkdirSync, writeFileSync, openSync, writeSync, closeSync } from 'node:fs';
3
3
 
4
4
  /**
5
5
  * Structured record emitted for every log line. Consumers (e.g. the editor
@@ -43,18 +43,21 @@ export class Logger {
43
43
  private readonly filePath: string;
44
44
  private readonly runDir: string;
45
45
  private readonly onLine: LogListener | null;
46
+ /** Persistent file descriptor for append writes (avoids open/close per line). */
47
+ private fd: number | null;
46
48
 
47
49
  constructor(workDir: string, runId: string, onLine?: LogListener) {
48
50
  this.runDir = resolve(workDir, '.tagma', 'logs', runId);
49
51
  this.filePath = resolve(this.runDir, 'pipeline.log');
50
52
  this.onLine = onLine ?? null;
51
53
  mkdirSync(dirname(this.filePath), { recursive: true });
52
- writeFileSync(
53
- this.filePath,
54
+ const header =
54
55
  `# Pipeline run ${runId} @ ${new Date().toISOString()}\n` +
55
56
  `# Host: ${process.platform} ${process.arch} Bun: ${process.versions.bun ?? 'n/a'}\n` +
56
- `# Work dir: ${workDir}\n\n`,
57
- );
57
+ `# Work dir: ${workDir}\n\n`;
58
+ writeFileSync(this.filePath, header);
59
+ // Open once for all subsequent appends (O_APPEND is implied by 'a' flag)
60
+ this.fd = openSync(this.filePath, 'a');
58
61
  }
59
62
 
60
63
  info(prefix: string, message: string): void {
@@ -105,13 +108,23 @@ export class Logger {
105
108
  }
106
109
 
107
110
  private append(line: string): void {
111
+ if (this.fd === null) return;
108
112
  try {
109
- appendFileSync(this.filePath, line.endsWith('\n') ? line : line + '\n');
113
+ const data = line.endsWith('\n') ? line : line + '\n';
114
+ writeSync(this.fd, data);
110
115
  } catch {
111
116
  // Swallow log write failures; engine correctness shouldn't depend on logging.
112
117
  }
113
118
  }
114
119
 
120
+ /** Close the persistent file handle. Called by the engine at run completion. */
121
+ close(): void {
122
+ if (this.fd !== null) {
123
+ try { closeSync(this.fd); } catch { /* already closed */ }
124
+ this.fd = null;
125
+ }
126
+ }
127
+
115
128
  private emit(level: LogLevel, ts: string, text: string, taskId: string | null): void {
116
129
  if (!this.onLine) return;
117
130
  try {
@@ -61,6 +61,15 @@ export class PipelineRunner {
61
61
  start(): Promise<EngineResult> {
62
62
  if (this._result) return this._result;
63
63
 
64
+ // Guard: if abort() was called before start(), the signal is already
65
+ // aborted. Create a fresh controller so the pipeline doesn't terminate
66
+ // immediately. If users truly want pre-abort semantics, they call
67
+ // abort() after start().
68
+ if (this._abortController.signal.aborted) {
69
+ this._abortController = new AbortController();
70
+ this._status = 'idle';
71
+ }
72
+
64
73
  this._status = 'running';
65
74
  this._result = runPipeline(this.config, this.workDir, {
66
75
  ...this.opts,
package/src/registry.ts CHANGED
@@ -37,8 +37,20 @@ export function hasHandler(category: PluginCategory, type: string): boolean {
37
37
  return registries[category].has(type);
38
38
  }
39
39
 
40
+ // Plugin name must be a scoped npm package or a tagma-prefixed package.
41
+ // Reject absolute/relative paths and suspicious patterns to prevent
42
+ // arbitrary code execution via crafted YAML configs.
43
+ const PLUGIN_NAME_RE = /^(@[a-z0-9-]+\/[a-z0-9._-]+|tagma-plugin-[a-z0-9._-]+)$/;
44
+
40
45
  export async function loadPlugins(pluginNames: readonly string[]): Promise<void> {
41
46
  for (const name of pluginNames) {
47
+ if (!PLUGIN_NAME_RE.test(name)) {
48
+ throw new Error(
49
+ `Plugin "${name}" rejected: plugin names must be scoped npm packages ` +
50
+ `(e.g. @tagma/trigger-xyz) or tagma-plugin-* packages. ` +
51
+ `Relative/absolute paths are not allowed.`
52
+ );
53
+ }
42
54
  const mod = await import(name);
43
55
  if (!mod.pluginCategory || !mod.pluginType || !mod.default) {
44
56
  throw new Error(
package/src/runner.ts CHANGED
@@ -15,10 +15,17 @@ const SIGKILL_DELAY_MS = 3_000;
15
15
  function killProcessTree(pid: number): void {
16
16
  if (process.platform !== 'win32') return;
17
17
  try {
18
- Bun.spawnSync(['taskkill', '/F', '/T', '/PID', String(pid)], {
19
- stdout: 'ignore',
20
- stderr: 'ignore',
18
+ const result = Bun.spawnSync(['taskkill', '/F', '/T', '/PID', String(pid)], {
19
+ stdout: 'pipe',
20
+ stderr: 'pipe',
21
21
  });
22
+ if (result.exitCode !== 0) {
23
+ const stderr = new TextDecoder().decode(result.stderr);
24
+ // Exit code 128 = process not found (already exited) — not worth warning about
25
+ if (result.exitCode !== 128) {
26
+ console.error(`[killProcessTree] taskkill exited ${result.exitCode} for PID ${pid}: ${stderr.trim()}`);
27
+ }
28
+ }
22
29
  } catch {
23
30
  /* best-effort — process may have already exited */
24
31
  }
@@ -42,8 +49,18 @@ export interface RunOptions {
42
49
  * Returns the original name if resolution fails; Bun will raise the same
43
50
  * ENOENT it would have otherwise.
44
51
  */
52
+ const RESOLVED_EXE_CACHE_MAX = 128;
45
53
  const resolvedExeCache = new Map<string, string | null>();
46
54
 
55
+ /** Evict the oldest entry when the cache is at capacity. */
56
+ function evictIfFull(): void {
57
+ if (resolvedExeCache.size >= RESOLVED_EXE_CACHE_MAX) {
58
+ // Map iteration order is insertion order — delete the first (oldest) key.
59
+ const oldest = resolvedExeCache.keys().next().value;
60
+ if (oldest !== undefined) resolvedExeCache.delete(oldest);
61
+ }
62
+ }
63
+
47
64
  function resolveWindowsExe(
48
65
  args: readonly string[],
49
66
  envPath: string,
@@ -73,11 +90,13 @@ function resolveWindowsExe(
73
90
  for (const ext of exts) {
74
91
  const candidate = join(dir, cmd + ext);
75
92
  if (existsSync(candidate)) {
93
+ evictIfFull();
76
94
  resolvedExeCache.set(cacheKey, candidate);
77
95
  return [candidate, ...args.slice(1)];
78
96
  }
79
97
  }
80
98
  }
99
+ evictIfFull();
81
100
  resolvedExeCache.set(cacheKey, null);
82
101
  return args;
83
102
  }
@@ -140,6 +159,7 @@ export async function runSpawn(
140
159
 
141
160
  // ── 3. Timeout & abort handling ────────────────────────────────────────
142
161
  let killedByUs = false;
162
+ let timedOut = false;
143
163
  let timer: ReturnType<typeof setTimeout> | null = null;
144
164
  let forceTimer: ReturnType<typeof setTimeout> | null = null;
145
165
 
@@ -165,7 +185,10 @@ export async function runSpawn(
165
185
  };
166
186
 
167
187
  if (timeoutMs && timeoutMs > 0) {
168
- timer = setTimeout(killGracefully, timeoutMs);
188
+ timer = setTimeout(() => {
189
+ timedOut = true;
190
+ killGracefully();
191
+ }, timeoutMs);
169
192
  }
170
193
 
171
194
  const onAbort = () => killGracefully();
@@ -197,8 +220,10 @@ export async function runSpawn(
197
220
  // We initiated the kill (timeout or abort) — always treat as non-success
198
221
  // regardless of exit code. A process that catches SIGTERM and exits 0 still
199
222
  // hit the timeout; letting it pass as success would unblock downstream tasks
200
- // incorrectly.
201
- if (killedByUs) {
223
+ // incorrectly. The `timedOut` flag guards against the narrow race where the
224
+ // process exits naturally at the exact moment the timeout fires — even if
225
+ // killedByUs wasn't set in time, the timeout intention still applies.
226
+ if (killedByUs || timedOut) {
202
227
  return {
203
228
  exitCode: -1,
204
229
  stdout,
package/src/schema.ts CHANGED
@@ -101,7 +101,9 @@ async function loadTemplate(ref: string): Promise<TemplateConfig> {
101
101
  // Expect the module to export a template.yaml content or parsed object
102
102
  if (mod.template) return mod.template as TemplateConfig;
103
103
 
104
- // Try loading template.yaml from the package
104
+ // Try loading template.yaml from the package.
105
+ // NOTE: require.resolve is a CommonJS API. Bun supports it natively, but
106
+ // this would need import.meta.resolve() for pure ESM runtimes (e.g. Deno).
105
107
  const pkgPath = require.resolve(`${moduleName}/template.yaml`);
106
108
  const content = await Bun.file(pkgPath).text();
107
109
  const doc = yaml.load(content) as { template: TemplateConfig };
@@ -196,10 +198,15 @@ function expandTemplateTask(
196
198
  newTask.continue_from = `${instanceId}.${task.continue_from}`;
197
199
  }
198
200
 
199
- // Rewrite output path to instance namespace
201
+ // Rewrite output path to instance namespace so parallel template
202
+ // instances don't collide on the same file. Handles any relative path
203
+ // (e.g. ./tmp/foo, ./output/bar, ./build/result.json) by injecting
204
+ // the instanceId as the first directory component after `./`.
200
205
  if (task.output) {
201
206
  const original = interpolate(task.output);
202
- newTask.output = original.replace('./tmp/', `./tmp/${instanceId}/`);
207
+ newTask.output = original.startsWith('./')
208
+ ? `./${instanceId}/${original.slice(2)}`
209
+ : `${instanceId}/${original}`;
203
210
  }
204
211
 
205
212
  return newTask as unknown as RawTaskConfig;
@@ -1,5 +1,6 @@
1
1
  import { watch } from 'chokidar';
2
2
  import { resolve, dirname } from 'path';
3
+ import { mkdir } from 'fs/promises';
3
4
  import type { TriggerPlugin, TriggerContext } from '../types';
4
5
  import { parseDuration, validatePath } from '../utils';
5
6
 
@@ -35,7 +36,7 @@ export const FileTrigger: TriggerPlugin = {
35
36
  const safePath = validatePath(filePath, ctx.workDir);
36
37
  const timeoutMs = config.timeout != null ? parseDuration(String(config.timeout)) : 0;
37
38
 
38
- return new Promise((resolve_p, reject) => {
39
+ return new Promise(async (resolve_p, reject) => {
39
40
  if (ctx.signal.aborted) {
40
41
  reject(new Error('Pipeline aborted'));
41
42
  return;
@@ -44,7 +45,13 @@ export const FileTrigger: TriggerPlugin = {
44
45
  let settled = false;
45
46
  let timer: ReturnType<typeof setTimeout> | null = null;
46
47
 
48
+ // Ensure the parent directory exists so the watcher doesn't fail
49
+ // with ENOENT for nested paths like `build/output/result.json`.
47
50
  const dir = dirname(safePath);
51
+ try {
52
+ await mkdir(dir, { recursive: true });
53
+ } catch { /* best effort — dir may already exist */ }
54
+
48
55
  const watcher = watch(dir, {
49
56
  ignoreInitial: true,
50
57
  depth: 0,
@@ -41,19 +41,26 @@ export const ManualTrigger: TriggerPlugin = {
41
41
  // so instead we race against an abort promise and let engine status logic
42
42
  // fall back to pipelineAborted → skipped. abortAll() on gateway still runs
43
43
  // from engine shutdown path to clean up any truly-pending entries.
44
+ const onAbort = () => {};
44
45
  const abortPromise = new Promise<never>((_, reject) => {
45
46
  if (ctx.signal.aborted) {
46
47
  reject(new Error('Pipeline aborted'));
47
48
  return;
48
49
  }
49
- ctx.signal.addEventListener(
50
- 'abort',
51
- () => reject(new Error('Pipeline aborted')),
52
- { once: true },
53
- );
50
+ const handler = () => reject(new Error('Pipeline aborted'));
51
+ // Store reference so we can remove it after the race settles.
52
+ (onAbort as { handler?: () => void }).handler = handler;
53
+ ctx.signal.addEventListener('abort', handler, { once: true });
54
54
  });
55
55
 
56
- const decision = await Promise.race([decisionPromise, abortPromise]);
56
+ let decision: Awaited<typeof decisionPromise>;
57
+ try {
58
+ decision = await Promise.race([decisionPromise, abortPromise]);
59
+ } finally {
60
+ // Clean up the abort listener to prevent leaking on normal completion.
61
+ const handler = (onAbort as { handler?: () => void }).handler;
62
+ if (handler) ctx.signal.removeEventListener('abort', handler);
63
+ }
57
64
 
58
65
  switch (decision.outcome) {
59
66
  case 'approved':
package/src/utils.ts CHANGED
@@ -1,12 +1,12 @@
1
1
  import { resolve, relative } from 'path';
2
2
  import { randomBytes } from 'crypto';
3
3
 
4
- const DURATION_RE = /^(\d+(?:\.\d+)?)\s*(s|m|h)$/;
4
+ const DURATION_RE = /^(\d*\.?\d+)\s*(s|m|h|d)$/;
5
5
 
6
6
  export function parseDuration(input: string): number {
7
7
  const match = DURATION_RE.exec(input.trim());
8
8
  if (!match) {
9
- throw new Error(`Invalid duration format: "${input}". Expected format: <number>(s|m|h)`);
9
+ throw new Error(`Invalid duration format: "${input}". Expected format: <number>(s|m|h|d)`);
10
10
  }
11
11
  const value = parseFloat(match[1]);
12
12
  const unit = match[2];
@@ -14,6 +14,7 @@ export function parseDuration(input: string): number {
14
14
  case 's': return value * 1000;
15
15
  case 'm': return value * 60_000;
16
16
  case 'h': return value * 3_600_000;
17
+ case 'd': return value * 86_400_000;
17
18
  default: throw new Error(`Unknown duration unit: "${unit}"`);
18
19
  }
19
20
  }
@@ -136,8 +137,13 @@ export function shellArgs(command: string): readonly string[] {
136
137
  /** Quote a single argument for inclusion in a shell command string. */
137
138
  function quoteArg(arg: string): string {
138
139
  if (!/[\s"'\\<>|&;`$!^%]/.test(arg)) return arg;
139
- // Double-quote and escape embedded double quotes + backslashes
140
- return '"' + arg.replace(/\\/g, '\\\\').replace(/"/g, '\\"') + '"';
140
+ if (IS_WINDOWS) {
141
+ // On Windows (cmd.exe), double-quote and escape embedded quotes + backslashes
142
+ return '"' + arg.replace(/\\/g, '\\\\').replace(/"/g, '\\"') + '"';
143
+ }
144
+ // On Unix, use single quotes to prevent $variable expansion.
145
+ // Escape embedded single quotes via the '\'' idiom.
146
+ return "'" + arg.replace(/'/g, "'\\''") + "'";
141
147
  }
142
148
 
143
149
  /**
@@ -74,6 +74,7 @@ export function validateRaw(config: RawPipelineConfig): ValidationError[] {
74
74
  }
75
75
 
76
76
  // ── Per-task validation ──
77
+ const seenTaskIds = new Set<string>();
77
78
  for (let ki = 0; ki < track.tasks.length; ki++) {
78
79
  const task = track.tasks[ki];
79
80
  const taskPath = `${trackPath}.tasks[${ki}]`;
@@ -83,6 +84,11 @@ export function validateRaw(config: RawPipelineConfig): ValidationError[] {
83
84
  continue; // Can't check further without an id
84
85
  }
85
86
 
87
+ if (seenTaskIds.has(task.id)) {
88
+ errors.push({ path: taskPath, message: `Duplicate task id "${task.id}" in track "${track.id}"` });
89
+ }
90
+ seenTaskIds.add(task.id);
91
+
86
92
  // Template-based tasks: skip prompt/command checks (params validated at runtime)
87
93
  if (task.use) continue;
88
94
 
@@ -195,10 +201,13 @@ function detectCycles(
195
201
  // Canonical key = sorted node list joined — order-independent fingerprint.
196
202
  const seenCycles = new Set<string>();
197
203
 
198
- function dfs(id: string, path: string[]): void {
204
+ // Use a mutable path array instead of copying at each level (O(n) vs O(n^2)).
205
+ const pathStack: string[] = [];
206
+
207
+ function dfs(id: string): void {
199
208
  if (inStack.has(id)) {
200
- const cycleStart = path.indexOf(id);
201
- const cycleNodes = [...path.slice(cycleStart), id];
209
+ const cycleStart = pathStack.indexOf(id);
210
+ const cycleNodes = [...pathStack.slice(cycleStart), id];
202
211
  const key = [...cycleNodes].sort().join(',');
203
212
  if (!seenCycles.has(key)) {
204
213
  seenCycles.add(key);
@@ -209,14 +218,16 @@ function detectCycles(
209
218
  if (visited.has(id)) return;
210
219
  visited.add(id);
211
220
  inStack.add(id);
221
+ pathStack.push(id);
212
222
  for (const dep of adj.get(id) ?? []) {
213
- dfs(dep, [...path, id]);
223
+ dfs(dep);
214
224
  }
225
+ pathStack.pop();
215
226
  inStack.delete(id);
216
227
  }
217
228
 
218
229
  for (const id of adj.keys()) {
219
- if (!visited.has(id)) dfs(id, []);
230
+ if (!visited.has(id)) dfs(id);
220
231
  }
221
232
 
222
233
  return errors;