builderman 1.1.0 → 1.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +266 -12
- package/dist/index.d.ts +2 -2
- package/dist/index.js +1 -1
- package/dist/pipeline.d.ts +18 -0
- package/dist/pipeline.js +380 -34
- package/dist/scheduler.d.ts +3 -0
- package/dist/scheduler.js +4 -12
- package/dist/task.d.ts +11 -0
- package/dist/task.js +11 -1
- package/dist/types.d.ts +163 -7
- package/package.json +1 -1
package/README.md
CHANGED
|
@@ -18,29 +18,34 @@ import { task, pipeline } from "builderman"
|
|
|
18
18
|
const task1 = task({
|
|
19
19
|
name: "lib:build",
|
|
20
20
|
commands: {
|
|
21
|
-
dev: "tsc --watch",
|
|
22
21
|
build: "tsc",
|
|
22
|
+
dev: {
|
|
23
|
+
run: "tsc --watch",
|
|
24
|
+
readyWhen: (stdout) => {
|
|
25
|
+
// mark this task as ready when the process is watching for file changes
|
|
26
|
+
return stdout.includes("Watching for file changes.")
|
|
27
|
+
},
|
|
28
|
+
},
|
|
23
29
|
},
|
|
24
30
|
cwd: "packages/lib",
|
|
25
|
-
isReady: (stdout) => {
|
|
26
|
-
// mark this this task as ready when the process is watching for file changes
|
|
27
|
-
return stdout.includes("Watching for file changes.")
|
|
28
|
-
},
|
|
29
31
|
})
|
|
30
32
|
|
|
31
33
|
const task2 = task({
|
|
32
34
|
name: "consumer:dev",
|
|
33
35
|
commands: {
|
|
34
|
-
dev: "npm run dev",
|
|
35
36
|
build: "npm run build",
|
|
37
|
+
dev: "npm run dev",
|
|
38
|
+
deploy: "npm run deploy",
|
|
36
39
|
},
|
|
37
40
|
cwd: "packages/consumer",
|
|
38
41
|
dependencies: [task1],
|
|
39
42
|
})
|
|
40
43
|
|
|
41
44
|
await pipeline([task1, task2]).run({
|
|
42
|
-
|
|
43
|
-
|
|
45
|
+
// default command is "build" if process.NODE_ENV is "production", otherwise "dev".
|
|
46
|
+
command: "deploy",
|
|
47
|
+
onTaskBegin: (taskName) => {
|
|
48
|
+
console.log(`[${taskName}] Starting...`)
|
|
44
49
|
},
|
|
45
50
|
onTaskComplete: (taskName) => {
|
|
46
51
|
console.log(`[${taskName}] Complete!`)
|
|
@@ -54,24 +59,273 @@ await pipeline([task1, task2]).run({
|
|
|
54
59
|
})
|
|
55
60
|
```
|
|
56
61
|
|
|
62
|
+
## Error Handling
|
|
63
|
+
|
|
64
|
+
Pipeline errors are provided as `PipelineError` instances with error codes for easier handling:
|
|
65
|
+
|
|
66
|
+
```ts
|
|
67
|
+
import { pipeline, PipelineError } from "builderman"
|
|
68
|
+
|
|
69
|
+
await pipeline([task1, task2]).run({
|
|
70
|
+
onPipelineError: (error) => {
|
|
71
|
+
switch (error.code) {
|
|
72
|
+
case PipelineError.Aborted:
|
|
73
|
+
console.error("Pipeline was cancelled")
|
|
74
|
+
break
|
|
75
|
+
case PipelineError.TaskFailed:
|
|
76
|
+
console.error(`Task failed: ${error.message}`)
|
|
77
|
+
break
|
|
78
|
+
case PipelineError.ProcessTerminated:
|
|
79
|
+
console.error("Process was terminated")
|
|
80
|
+
break
|
|
81
|
+
case PipelineError.InvalidTask:
|
|
82
|
+
console.error(`Invalid task configuration: ${error.message}`)
|
|
83
|
+
break
|
|
84
|
+
case PipelineError.InvalidSignal:
|
|
85
|
+
console.error("Invalid abort signal")
|
|
86
|
+
break
|
|
87
|
+
}
|
|
88
|
+
},
|
|
89
|
+
})
|
|
90
|
+
```
|
|
91
|
+
|
|
92
|
+
## Cancellation
|
|
93
|
+
|
|
94
|
+
You can cancel a running pipeline by providing an `AbortSignal`:
|
|
95
|
+
|
|
96
|
+
```ts
|
|
97
|
+
import { pipeline, PipelineError } from "builderman"
|
|
98
|
+
|
|
99
|
+
const abortController = new AbortController()
|
|
100
|
+
|
|
101
|
+
const runPromise = pipeline([task1, task2]).run({
|
|
102
|
+
signal: abortController.signal,
|
|
103
|
+
onPipelineError: (error) => {
|
|
104
|
+
if (error.code === PipelineError.Aborted) {
|
|
105
|
+
console.error("Pipeline was cancelled")
|
|
106
|
+
}
|
|
107
|
+
},
|
|
108
|
+
})
|
|
109
|
+
|
|
110
|
+
// Cancel the pipeline after 5 seconds
|
|
111
|
+
setTimeout(() => {
|
|
112
|
+
abortController.abort()
|
|
113
|
+
}, 5000)
|
|
114
|
+
|
|
115
|
+
try {
|
|
116
|
+
await runPromise
|
|
117
|
+
} catch (error) {
|
|
118
|
+
if (error instanceof PipelineError && error.code === PipelineError.Aborted) {
|
|
119
|
+
// Pipeline was cancelled
|
|
120
|
+
}
|
|
121
|
+
}
|
|
122
|
+
```
|
|
123
|
+
|
|
124
|
+
## Teardown
|
|
125
|
+
|
|
126
|
+
Tasks can specify teardown commands that run automatically when the task completes or fails. Teardowns are executed in reverse dependency order (dependents before dependencies) to ensure proper cleanup.
|
|
127
|
+
|
|
128
|
+
### Basic Teardown
|
|
129
|
+
|
|
130
|
+
```ts
|
|
131
|
+
const dbTask = task({
|
|
132
|
+
name: "database",
|
|
133
|
+
commands: {
|
|
134
|
+
dev: {
|
|
135
|
+
run: "docker-compose up",
|
|
136
|
+
teardown: "docker-compose down",
|
|
137
|
+
},
|
|
138
|
+
build: "echo build",
|
|
139
|
+
},
|
|
140
|
+
cwd: ".",
|
|
141
|
+
})
|
|
142
|
+
```
|
|
143
|
+
|
|
144
|
+
### Teardown Callbacks
|
|
145
|
+
|
|
146
|
+
You can monitor teardown execution with callbacks. Note that teardown failures do not cause the pipeline to fail - they are fire-and-forget cleanup operations:
|
|
147
|
+
|
|
148
|
+
```ts
|
|
149
|
+
await pipeline([dbTask]).run({
|
|
150
|
+
onTaskTeardown: (taskName) => {
|
|
151
|
+
console.log(`[${taskName}] Starting teardown...`)
|
|
152
|
+
},
|
|
153
|
+
onTaskTeardownError: (taskName, error) => {
|
|
154
|
+
console.error(`[${taskName}] Teardown failed: ${error.message}`)
|
|
155
|
+
// error is a regular Error instance (not a PipelineError)
|
|
156
|
+
// Teardown failures do not affect pipeline success/failure
|
|
157
|
+
},
|
|
158
|
+
})
|
|
159
|
+
```
|
|
160
|
+
|
|
161
|
+
### Teardown Execution Rules
|
|
162
|
+
|
|
163
|
+
Teardowns run when:
|
|
164
|
+
|
|
165
|
+
- ✅ The command entered the running state (regardless of success or failure)
|
|
166
|
+
- ✅ The pipeline completes successfully
|
|
167
|
+
- ✅ The pipeline fails after tasks have started
|
|
168
|
+
|
|
169
|
+
Teardowns do **not** run when:
|
|
170
|
+
|
|
171
|
+
- ❌ The task was skipped (no command for the current mode)
|
|
172
|
+
- ❌ The task failed before starting (spawn error)
|
|
173
|
+
- ❌ The pipeline never began execution
|
|
174
|
+
|
|
175
|
+
### Reverse Dependency Order
|
|
176
|
+
|
|
177
|
+
Teardowns execute in reverse dependency order to ensure dependents are cleaned up before their dependencies:
|
|
178
|
+
|
|
179
|
+
```ts
|
|
180
|
+
const db = task({
|
|
181
|
+
name: "db",
|
|
182
|
+
commands: {
|
|
183
|
+
dev: { run: "docker-compose up", teardown: "docker-compose down" },
|
|
184
|
+
build: "echo build",
|
|
185
|
+
},
|
|
186
|
+
cwd: ".",
|
|
187
|
+
})
|
|
188
|
+
|
|
189
|
+
const api = task({
|
|
190
|
+
name: "api",
|
|
191
|
+
commands: {
|
|
192
|
+
dev: { run: "npm run dev", teardown: "echo stopping api" },
|
|
193
|
+
build: "echo build",
|
|
194
|
+
},
|
|
195
|
+
cwd: ".",
|
|
196
|
+
dependencies: [db], // api depends on db
|
|
197
|
+
})
|
|
198
|
+
|
|
199
|
+
// Teardown order: api first, then db
|
|
200
|
+
await pipeline([db, api]).run()
|
|
201
|
+
```
|
|
202
|
+
|
|
203
|
+
## Skipping Tasks
|
|
204
|
+
|
|
205
|
+
Tasks can be automatically skipped when they don't have a command for the current mode. This is useful for multi-mode pipelines where some tasks are only relevant in certain contexts.
|
|
206
|
+
|
|
207
|
+
### Default Behavior
|
|
208
|
+
|
|
209
|
+
If a task has no command for the current mode, it is **skipped**:
|
|
210
|
+
|
|
211
|
+
- ✅ The task participates in the dependency graph
|
|
212
|
+
- ✅ The task resolves immediately (satisfies dependencies)
|
|
213
|
+
- ✅ Dependents are unblocked
|
|
214
|
+
- ❌ No command is executed
|
|
215
|
+
- ❌ No teardown is registered
|
|
216
|
+
- ❌ No readiness is waited for
|
|
217
|
+
|
|
218
|
+
```ts
|
|
219
|
+
const dbTask = task({
|
|
220
|
+
name: "database",
|
|
221
|
+
commands: {
|
|
222
|
+
dev: "docker-compose up",
|
|
223
|
+
// No build command - will be skipped in build mode
|
|
224
|
+
},
|
|
225
|
+
cwd: ".",
|
|
226
|
+
})
|
|
227
|
+
|
|
228
|
+
const apiTask = task({
|
|
229
|
+
name: "api",
|
|
230
|
+
commands: {
|
|
231
|
+
dev: "npm run dev",
|
|
232
|
+
build: "npm run build",
|
|
233
|
+
},
|
|
234
|
+
cwd: ".",
|
|
235
|
+
dependencies: [dbTask], // dbTask will be skipped, but apiTask will still run
|
|
236
|
+
})
|
|
237
|
+
|
|
238
|
+
await pipeline([dbTask, apiTask]).run({
|
|
239
|
+
command: "build",
|
|
240
|
+
onTaskSkipped: (taskName, mode) => {
|
|
241
|
+
console.log(`[${taskName}] skipped (no command for mode "${mode}")`)
|
|
242
|
+
},
|
|
243
|
+
})
|
|
244
|
+
```
|
|
245
|
+
|
|
246
|
+
### Strict Mode
|
|
247
|
+
|
|
248
|
+
In strict mode, missing commands cause the pipeline to fail. Use this for CI/release pipelines where every task is expected to participate:
|
|
249
|
+
|
|
250
|
+
```ts
|
|
251
|
+
await pipeline([dbTask, apiTask]).run({
|
|
252
|
+
command: "build",
|
|
253
|
+
strict: true, // Missing commands will cause pipeline to fail
|
|
254
|
+
})
|
|
255
|
+
```
|
|
256
|
+
|
|
257
|
+
### Task-Level Override
|
|
258
|
+
|
|
259
|
+
Even with global strict mode, you can explicitly allow a task to be skipped:
|
|
260
|
+
|
|
261
|
+
```ts
|
|
262
|
+
const dbTask = task({
|
|
263
|
+
name: "database",
|
|
264
|
+
commands: {
|
|
265
|
+
dev: "docker-compose up",
|
|
266
|
+
// No build command, but explicitly allowed to skip
|
|
267
|
+
},
|
|
268
|
+
cwd: ".",
|
|
269
|
+
allowSkip: true, // Explicitly allow skipping even in strict mode
|
|
270
|
+
})
|
|
271
|
+
|
|
272
|
+
await pipeline([dbTask]).run({
|
|
273
|
+
command: "build",
|
|
274
|
+
strict: true, // Global strict mode
|
|
275
|
+
// dbTask will still be skipped because allowSkip: true
|
|
276
|
+
})
|
|
277
|
+
```
|
|
278
|
+
|
|
279
|
+
### Nested Pipeline Behavior
|
|
280
|
+
|
|
281
|
+
When a pipeline is converted to a task, skip behavior is preserved:
|
|
282
|
+
|
|
283
|
+
- If **all** inner tasks are skipped → outer task is skipped
|
|
284
|
+
- If **some** run, some skip → outer task is completed
|
|
285
|
+
- If **any** fail → outer task fails
|
|
286
|
+
|
|
287
|
+
```ts
|
|
288
|
+
const innerPipeline = pipeline([
|
|
289
|
+
task({ name: "inner1", commands: { dev: "..." }, cwd: "." }),
|
|
290
|
+
task({ name: "inner2", commands: { dev: "..." }, cwd: "." }),
|
|
291
|
+
])
|
|
292
|
+
|
|
293
|
+
const outerTask = innerPipeline.toTask({ name: "outer" })
|
|
294
|
+
|
|
295
|
+
// If all inner tasks are skipped in build mode, outer task is also skipped
|
|
296
|
+
await pipeline([outerTask]).run({ command: "build" })
|
|
297
|
+
```
|
|
298
|
+
|
|
57
299
|
## Pipeline Composition
|
|
58
300
|
|
|
59
301
|
Build complex workflows by composing tasks and pipelines together.
|
|
60
302
|
|
|
61
303
|
### Task Chaining
|
|
62
304
|
|
|
63
|
-
Chain tasks together using `andThen()` to create a pipeline that will run the tasks in order:
|
|
305
|
+
Chain tasks together using `andThen()` to create a pipeline that will run the tasks in order, automatically adding the previous task as a dependency:
|
|
64
306
|
|
|
65
307
|
```ts
|
|
66
|
-
import { task
|
|
308
|
+
import { task } from "builderman"
|
|
67
309
|
|
|
68
310
|
const build = task({
|
|
69
311
|
name: "compile",
|
|
70
|
-
commands: {
|
|
312
|
+
commands: {
|
|
313
|
+
build: "tsc",
|
|
314
|
+
dev: {
|
|
315
|
+
run: "tsc --watch",
|
|
316
|
+
readyWhen: (output) => output.includes("Watching for file changes."),
|
|
317
|
+
},
|
|
318
|
+
},
|
|
71
319
|
cwd: "packages/lib",
|
|
72
320
|
}).andThen({
|
|
73
321
|
name: "bundle",
|
|
74
|
-
commands: {
|
|
322
|
+
commands: {
|
|
323
|
+
build: "rollup",
|
|
324
|
+
dev: {
|
|
325
|
+
run: "rollup --watch",
|
|
326
|
+
readyWhen: (output) => output.includes("Watching for file changes."),
|
|
327
|
+
},
|
|
328
|
+
},
|
|
75
329
|
cwd: "packages/lib",
|
|
76
330
|
})
|
|
77
331
|
|
package/dist/index.d.ts
CHANGED
|
@@ -1,3 +1,3 @@
|
|
|
1
1
|
export { task } from "./task.js";
|
|
2
|
-
export { pipeline } from "./pipeline.js";
|
|
3
|
-
export type { Task, Pipeline, TaskConfig } from "./types.js";
|
|
2
|
+
export { pipeline, PipelineError } from "./pipeline.js";
|
|
3
|
+
export type { Task, Pipeline, TaskConfig, Command, CommandConfig, Commands, PipelineRunConfig, PipelineTaskConfig, } from "./types.js";
|
package/dist/index.js
CHANGED
|
@@ -1,2 +1,2 @@
|
|
|
1
1
|
export { task } from "./task.js";
|
|
2
|
-
export { pipeline } from "./pipeline.js";
|
|
2
|
+
export { pipeline, PipelineError } from "./pipeline.js";
|
package/dist/pipeline.d.ts
CHANGED
|
@@ -1,5 +1,23 @@
|
|
|
1
1
|
import type { Pipeline, Task } from "./types.js";
|
|
2
2
|
/**
|
|
3
3
|
* Creates a pipeline that manages task execution with dependency-based coordination.
|
|
4
|
+
* @param tasks - The tasks to include in the pipeline.
|
|
5
|
+
* @returns A pipeline that can be used to execute the tasks.
|
|
6
|
+
* @example
|
|
7
|
+
* const task1 = task({ name: "task1", commands: { dev: "echo task1" }, cwd: "." })
|
|
8
|
+
* const task2 = task({ name: "task2", commands: { dev: "echo task2" }, cwd: ".", dependencies: [task1] })
|
|
9
|
+
* await pipeline([task1, task2]).run()
|
|
4
10
|
*/
|
|
5
11
|
export declare function pipeline(tasks: Task[]): Pipeline;
|
|
12
|
+
type PipelineErrorCode = typeof PipelineError.Aborted | typeof PipelineError.ProcessTerminated | typeof PipelineError.TaskFailed | typeof PipelineError.InvalidSignal | typeof PipelineError.InvalidTask;
|
|
13
|
+
export declare class PipelineError extends Error {
|
|
14
|
+
readonly code: PipelineErrorCode;
|
|
15
|
+
readonly taskName?: string;
|
|
16
|
+
constructor(message: string, code: PipelineErrorCode, taskName?: string);
|
|
17
|
+
static Aborted: 0;
|
|
18
|
+
static ProcessTerminated: 1;
|
|
19
|
+
static TaskFailed: 2;
|
|
20
|
+
static InvalidSignal: 3;
|
|
21
|
+
static InvalidTask: 4;
|
|
22
|
+
}
|
|
23
|
+
export {};
|
package/dist/pipeline.js
CHANGED
|
@@ -9,8 +9,16 @@ import { validateTasks } from "./util.js";
|
|
|
9
9
|
// Module-level cache for pipeline-to-task conversions
|
|
10
10
|
// Key: Pipeline, Value: Map of name -> Task
|
|
11
11
|
const pipelineTaskCache = new WeakMap();
|
|
12
|
+
// Store tasks for each pipeline (for nested pipeline skip tracking)
|
|
13
|
+
const pipelineTasksCache = new WeakMap();
|
|
12
14
|
/**
|
|
13
15
|
* Creates a pipeline that manages task execution with dependency-based coordination.
|
|
16
|
+
* @param tasks - The tasks to include in the pipeline.
|
|
17
|
+
* @returns A pipeline that can be used to execute the tasks.
|
|
18
|
+
* @example
|
|
19
|
+
* const task1 = task({ name: "task1", commands: { dev: "echo task1" }, cwd: "." })
|
|
20
|
+
* const task2 = task({ name: "task2", commands: { dev: "echo task2" }, cwd: ".", dependencies: [task1] })
|
|
21
|
+
* await pipeline([task1, task2]).run()
|
|
14
22
|
*/
|
|
15
23
|
export function pipeline(tasks) {
|
|
16
24
|
const graph = createTaskGraph(tasks);
|
|
@@ -21,7 +29,7 @@ export function pipeline(tasks) {
|
|
|
21
29
|
validateTasks(config.dependencies);
|
|
22
30
|
const syntheticTask = task({
|
|
23
31
|
name: config.name,
|
|
24
|
-
commands: {
|
|
32
|
+
commands: {},
|
|
25
33
|
cwd: ".", // Dummy cwd
|
|
26
34
|
dependencies: [...(config.dependencies || [])],
|
|
27
35
|
});
|
|
@@ -38,9 +46,15 @@ export function pipeline(tasks) {
|
|
|
38
46
|
},
|
|
39
47
|
async run(config) {
|
|
40
48
|
const spawnFn = config?.spawn ?? spawn;
|
|
49
|
+
const signal = config?.signal;
|
|
41
50
|
const runningTasks = new Map();
|
|
42
51
|
const runningPipelines = new Map();
|
|
52
|
+
const teardownCommands = new Map();
|
|
43
53
|
let failed = false;
|
|
54
|
+
// Check if signal is already aborted
|
|
55
|
+
if (signal?.aborted) {
|
|
56
|
+
throw new PipelineError("Aborted", PipelineError.Aborted);
|
|
57
|
+
}
|
|
44
58
|
const scheduler = createScheduler(graph);
|
|
45
59
|
let completionResolver = null;
|
|
46
60
|
let completionRejector = null;
|
|
@@ -48,7 +62,127 @@ export function pipeline(tasks) {
|
|
|
48
62
|
completionResolver = resolve;
|
|
49
63
|
completionRejector = reject;
|
|
50
64
|
});
|
|
51
|
-
const
|
|
65
|
+
const executeTeardown = (taskId) => {
|
|
66
|
+
const teardown = teardownCommands.get(taskId);
|
|
67
|
+
if (!teardown)
|
|
68
|
+
return Promise.resolve();
|
|
69
|
+
// Remove from map so it doesn't run again
|
|
70
|
+
teardownCommands.delete(taskId);
|
|
71
|
+
config?.onTaskTeardown?.(teardown.taskName);
|
|
72
|
+
return new Promise((resolve) => {
|
|
73
|
+
try {
|
|
74
|
+
const teardownProcess = spawnFn(teardown.command, {
|
|
75
|
+
cwd: teardown.cwd,
|
|
76
|
+
stdio: "inherit",
|
|
77
|
+
shell: true,
|
|
78
|
+
});
|
|
79
|
+
let resolved = false;
|
|
80
|
+
const resolveOnce = () => {
|
|
81
|
+
if (!resolved) {
|
|
82
|
+
resolved = true;
|
|
83
|
+
resolve();
|
|
84
|
+
}
|
|
85
|
+
};
|
|
86
|
+
teardownProcess.on("error", (error) => {
|
|
87
|
+
const teardownError = new Error(`[${teardown.taskName}] Teardown failed: ${error.message}`);
|
|
88
|
+
config?.onTaskTeardownError?.(teardown.taskName, teardownError);
|
|
89
|
+
resolveOnce();
|
|
90
|
+
});
|
|
91
|
+
teardownProcess.on("exit", (code) => {
|
|
92
|
+
if (code !== 0) {
|
|
93
|
+
const teardownError = new Error(`[${teardown.taskName}] Teardown failed with exit code ${code ?? 1}`);
|
|
94
|
+
config?.onTaskTeardownError?.(teardown.taskName, teardownError);
|
|
95
|
+
}
|
|
96
|
+
resolveOnce();
|
|
97
|
+
});
|
|
98
|
+
}
|
|
99
|
+
catch (error) {
|
|
100
|
+
const teardownError = new Error(`[${teardown.taskName}] Teardown failed to start: ${error.message}`);
|
|
101
|
+
config?.onTaskTeardownError?.(teardown.taskName, teardownError);
|
|
102
|
+
resolve();
|
|
103
|
+
}
|
|
104
|
+
});
|
|
105
|
+
};
|
|
106
|
+
const executeAllTeardowns = async () => {
|
|
107
|
+
// Execute teardowns in reverse dependency order
|
|
108
|
+
// Tasks with dependents should be torn down before their dependencies
|
|
109
|
+
const taskIdsWithTeardown = Array.from(teardownCommands.keys());
|
|
110
|
+
if (taskIdsWithTeardown.length === 0) {
|
|
111
|
+
return;
|
|
112
|
+
}
|
|
113
|
+
// Calculate reverse topological order
|
|
114
|
+
// Tasks that have dependents should be torn down first
|
|
115
|
+
const teardownOrder = getReverseDependencyOrder(taskIdsWithTeardown, graph);
|
|
116
|
+
// Execute teardowns sequentially in reverse dependency order
|
|
117
|
+
// This ensures dependents are torn down before their dependencies
|
|
118
|
+
for (const taskId of teardownOrder) {
|
|
119
|
+
await executeTeardown(taskId);
|
|
120
|
+
}
|
|
121
|
+
};
|
|
122
|
+
const getReverseDependencyOrder = (taskIds, graph) => {
|
|
123
|
+
// Create a set for quick lookup
|
|
124
|
+
const taskIdSet = new Set(taskIds);
|
|
125
|
+
// For reverse dependency order, we want to tear down dependents before dependencies
|
|
126
|
+
// If api depends on db, we want: api first, then db
|
|
127
|
+
// This is the reverse of normal execution order
|
|
128
|
+
// Count how many dependencies each task has (within the teardown set)
|
|
129
|
+
const dependencyCount = new Map();
|
|
130
|
+
for (const taskId of taskIds) {
|
|
131
|
+
const node = graph.nodes.get(taskId);
|
|
132
|
+
if (node) {
|
|
133
|
+
let count = 0;
|
|
134
|
+
for (const depId of node.dependencies) {
|
|
135
|
+
if (taskIdSet.has(depId)) {
|
|
136
|
+
count++;
|
|
137
|
+
}
|
|
138
|
+
}
|
|
139
|
+
dependencyCount.set(taskId, count);
|
|
140
|
+
}
|
|
141
|
+
}
|
|
142
|
+
// Build result in reverse order
|
|
143
|
+
// Start with tasks that have no dependencies (leaf nodes) - these go LAST
|
|
144
|
+
const result = [];
|
|
145
|
+
const visited = new Set();
|
|
146
|
+
const queue = [];
|
|
147
|
+
// Find leaf nodes (tasks with no dependencies in teardown set)
|
|
148
|
+
for (const taskId of taskIds) {
|
|
149
|
+
if (dependencyCount.get(taskId) === 0) {
|
|
150
|
+
queue.push(taskId);
|
|
151
|
+
}
|
|
152
|
+
}
|
|
153
|
+
// Process in reverse topological order
|
|
154
|
+
while (queue.length > 0) {
|
|
155
|
+
const taskId = queue.shift();
|
|
156
|
+
if (visited.has(taskId))
|
|
157
|
+
continue;
|
|
158
|
+
visited.add(taskId);
|
|
159
|
+
// Add to front (so we get reverse order: dependents before dependencies)
|
|
160
|
+
result.unshift(taskId);
|
|
161
|
+
// Find tasks that depend on this one (dependents)
|
|
162
|
+
const node = graph.nodes.get(taskId);
|
|
163
|
+
if (node) {
|
|
164
|
+
for (const dependentId of node.dependents) {
|
|
165
|
+
if (taskIdSet.has(dependentId) && !visited.has(dependentId)) {
|
|
166
|
+
const currentCount = dependencyCount.get(dependentId) ?? 0;
|
|
167
|
+
const newCount = currentCount - 1;
|
|
168
|
+
dependencyCount.set(dependentId, newCount);
|
|
169
|
+
// When a dependent has no more dependencies to wait for, add it to queue
|
|
170
|
+
if (newCount === 0) {
|
|
171
|
+
queue.push(dependentId);
|
|
172
|
+
}
|
|
173
|
+
}
|
|
174
|
+
}
|
|
175
|
+
}
|
|
176
|
+
}
|
|
177
|
+
// Add any remaining tasks (shouldn't happen in a valid graph, but handle it)
|
|
178
|
+
for (const taskId of taskIds) {
|
|
179
|
+
if (!visited.has(taskId)) {
|
|
180
|
+
result.unshift(taskId);
|
|
181
|
+
}
|
|
182
|
+
}
|
|
183
|
+
return result;
|
|
184
|
+
};
|
|
185
|
+
const failPipeline = async (error) => {
|
|
52
186
|
if (failed)
|
|
53
187
|
return;
|
|
54
188
|
failed = true;
|
|
@@ -65,17 +199,40 @@ export function pipeline(tasks) {
|
|
|
65
199
|
}
|
|
66
200
|
catch { }
|
|
67
201
|
}
|
|
202
|
+
// Execute all teardown commands and wait for them to complete
|
|
203
|
+
// Even if teardowns fail, we still want to reject the pipeline
|
|
204
|
+
try {
|
|
205
|
+
await executeAllTeardowns();
|
|
206
|
+
}
|
|
207
|
+
catch (teardownError) {
|
|
208
|
+
// Teardown errors are already reported via onTaskTeardownError
|
|
209
|
+
// We continue to reject the pipeline with the original error
|
|
210
|
+
}
|
|
68
211
|
config?.onPipelineError?.(error);
|
|
69
212
|
completionRejector?.(error);
|
|
70
213
|
};
|
|
71
214
|
const startTask = (task) => {
|
|
215
|
+
// Check if signal is aborted before starting new tasks
|
|
216
|
+
if (signal?.aborted) {
|
|
217
|
+
failPipeline(new PipelineError("Aborted", PipelineError.InvalidSignal));
|
|
218
|
+
return;
|
|
219
|
+
}
|
|
72
220
|
const { name: taskName, [$TASK_INTERNAL]: { id: taskId, pipeline: nestedPipeline }, } = task;
|
|
73
221
|
if (runningTasks.has(taskId))
|
|
74
222
|
return;
|
|
75
223
|
// Handle pipeline tasks
|
|
76
224
|
if (nestedPipeline) {
|
|
225
|
+
// Track nested pipeline state for skip behavior
|
|
226
|
+
let nestedSkippedCount = 0;
|
|
227
|
+
let nestedCompletedCount = 0;
|
|
228
|
+
// Get total tasks from nested pipeline
|
|
229
|
+
const nestedTasks = pipelineTasksCache.get(nestedPipeline);
|
|
230
|
+
const nestedTotalTasks = nestedTasks
|
|
231
|
+
? createTaskGraph(nestedTasks).nodes.size
|
|
232
|
+
: 0;
|
|
77
233
|
// Mark as ready immediately (pipeline entry nodes will handle their own ready state)
|
|
78
234
|
advanceScheduler({ type: "ready", taskId });
|
|
235
|
+
config?.onTaskBegin?.(taskName);
|
|
79
236
|
// Create an abort controller to stop the nested pipeline if needed
|
|
80
237
|
let pipelineStopped = false;
|
|
81
238
|
const stopPipeline = () => {
|
|
@@ -84,58 +241,110 @@ export function pipeline(tasks) {
|
|
|
84
241
|
// In a more sophisticated implementation, we could propagate stop signals
|
|
85
242
|
};
|
|
86
243
|
runningPipelines.set(taskId, { stop: stopPipeline });
|
|
87
|
-
// Run the nested pipeline
|
|
244
|
+
// Run the nested pipeline with signal propagation
|
|
88
245
|
nestedPipeline
|
|
89
246
|
.run({
|
|
90
247
|
spawn: spawnFn,
|
|
91
|
-
|
|
248
|
+
command: config?.command,
|
|
249
|
+
strict: config?.strict,
|
|
250
|
+
signal, // Pass signal to nested pipeline
|
|
251
|
+
onTaskBegin: (nestedTaskName) => {
|
|
92
252
|
if (pipelineStopped)
|
|
93
253
|
return;
|
|
94
|
-
config?.
|
|
254
|
+
config?.onTaskBegin?.(`${taskName}:${nestedTaskName}`);
|
|
95
255
|
},
|
|
96
256
|
onTaskComplete: (nestedTaskName) => {
|
|
97
257
|
if (pipelineStopped)
|
|
98
258
|
return;
|
|
259
|
+
nestedCompletedCount++;
|
|
99
260
|
config?.onTaskComplete?.(`${taskName}:${nestedTaskName}`);
|
|
100
261
|
},
|
|
262
|
+
onTaskSkipped: (nestedTaskName, mode) => {
|
|
263
|
+
if (pipelineStopped)
|
|
264
|
+
return;
|
|
265
|
+
nestedSkippedCount++;
|
|
266
|
+
config?.onTaskSkipped?.(`${taskName}:${nestedTaskName}`, mode);
|
|
267
|
+
},
|
|
101
268
|
onPipelineError: (error) => {
|
|
102
269
|
if (pipelineStopped)
|
|
103
270
|
return;
|
|
104
271
|
runningPipelines.delete(taskId);
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
failPipeline(e);
|
|
272
|
+
// error is already a PipelineError
|
|
273
|
+
failPipeline(error);
|
|
108
274
|
},
|
|
109
275
|
onPipelineComplete: () => {
|
|
110
276
|
if (pipelineStopped)
|
|
111
277
|
return;
|
|
112
278
|
runningPipelines.delete(taskId);
|
|
113
|
-
|
|
114
|
-
|
|
279
|
+
// Determine nested pipeline result based on skip behavior:
|
|
280
|
+
// - If all inner tasks are skipped → outer task is skipped
|
|
281
|
+
// - If some run, some skip → outer task is completed
|
|
282
|
+
// - If any fail → outer task fails (handled in onPipelineError)
|
|
283
|
+
const commandName = config?.command ?? process.env.NODE_ENV === "production"
|
|
284
|
+
? "build"
|
|
285
|
+
: "dev";
|
|
286
|
+
if (nestedSkippedCount === nestedTotalTasks &&
|
|
287
|
+
nestedTotalTasks > 0) {
|
|
288
|
+
// All tasks were skipped
|
|
289
|
+
console.log(`[${taskName}] skipped (all nested tasks skipped)`);
|
|
290
|
+
config?.onTaskSkipped?.(taskName, commandName);
|
|
291
|
+
setImmediate(() => {
|
|
292
|
+
advanceScheduler({ type: "skip", taskId });
|
|
293
|
+
});
|
|
294
|
+
}
|
|
295
|
+
else {
|
|
296
|
+
// Some tasks ran (and completed successfully)
|
|
297
|
+
config?.onTaskComplete?.(taskName);
|
|
298
|
+
advanceScheduler({ type: "complete", taskId });
|
|
299
|
+
}
|
|
115
300
|
},
|
|
116
301
|
})
|
|
117
302
|
.catch((error) => {
|
|
118
303
|
if (pipelineStopped)
|
|
119
304
|
return;
|
|
120
305
|
runningPipelines.delete(taskId);
|
|
121
|
-
|
|
122
|
-
config?.onTaskError?.(taskName, e);
|
|
123
|
-
failPipeline(e);
|
|
306
|
+
failPipeline(error);
|
|
124
307
|
});
|
|
125
308
|
return;
|
|
126
309
|
}
|
|
127
310
|
// Regular task execution
|
|
128
|
-
const
|
|
129
|
-
?
|
|
130
|
-
:
|
|
131
|
-
const
|
|
311
|
+
const commandName = config?.command ?? process.env.NODE_ENV === "production"
|
|
312
|
+
? "build"
|
|
313
|
+
: "dev";
|
|
314
|
+
const commandConfig = task[$TASK_INTERNAL].commands[commandName];
|
|
315
|
+
// Check if command exists
|
|
316
|
+
if (commandConfig === undefined) {
|
|
317
|
+
const allowSkip = task[$TASK_INTERNAL].allowSkip ?? false;
|
|
318
|
+
const strict = config?.strict ?? false;
|
|
319
|
+
// If strict mode and not explicitly allowed to skip, fail
|
|
320
|
+
if (strict && !allowSkip) {
|
|
321
|
+
failPipeline(new PipelineError(`[${taskName}] No command for "${commandName}" and strict mode is enabled`, PipelineError.TaskFailed));
|
|
322
|
+
return;
|
|
323
|
+
}
|
|
324
|
+
// Skip the task
|
|
325
|
+
console.log(`[${taskName}] skipped "${commandName}"`);
|
|
326
|
+
config?.onTaskSkipped?.(taskName, commandName);
|
|
327
|
+
// Mark as skipped - this satisfies dependencies and unblocks dependents
|
|
328
|
+
// Use setImmediate to ensure scheduler is at idle yield before receiving skip
|
|
329
|
+
setImmediate(() => {
|
|
330
|
+
advanceScheduler({ type: "skip", taskId });
|
|
331
|
+
});
|
|
332
|
+
return;
|
|
333
|
+
}
|
|
334
|
+
const command = typeof commandConfig === "string" ? commandConfig : commandConfig.run;
|
|
335
|
+
const readyWhen = typeof commandConfig === "string"
|
|
336
|
+
? undefined
|
|
337
|
+
: commandConfig.readyWhen;
|
|
338
|
+
const readyTimeout = typeof commandConfig === "string"
|
|
339
|
+
? Infinity
|
|
340
|
+
: commandConfig.readyTimeout ?? Infinity;
|
|
341
|
+
const teardown = typeof commandConfig === "string" ? undefined : commandConfig.teardown;
|
|
342
|
+
const { cwd } = task[$TASK_INTERNAL];
|
|
132
343
|
const taskCwd = path.isAbsolute(cwd)
|
|
133
344
|
? cwd
|
|
134
345
|
: path.resolve(process.cwd(), cwd);
|
|
135
346
|
if (!fs.existsSync(taskCwd)) {
|
|
136
|
-
|
|
137
|
-
config?.onTaskError?.(taskName, e);
|
|
138
|
-
failPipeline(e);
|
|
347
|
+
failPipeline(new PipelineError(`[${taskName}] Working directory does not exist: ${taskCwd}`, PipelineError.InvalidTask));
|
|
139
348
|
return;
|
|
140
349
|
}
|
|
141
350
|
const accumulatedPath = [
|
|
@@ -157,17 +366,43 @@ export function pipeline(tasks) {
|
|
|
157
366
|
env,
|
|
158
367
|
});
|
|
159
368
|
runningTasks.set(taskId, child);
|
|
369
|
+
// Store teardown command if provided
|
|
370
|
+
if (teardown) {
|
|
371
|
+
teardownCommands.set(taskId, {
|
|
372
|
+
command: teardown,
|
|
373
|
+
cwd: taskCwd,
|
|
374
|
+
taskName,
|
|
375
|
+
});
|
|
376
|
+
}
|
|
377
|
+
config?.onTaskBegin?.(taskName);
|
|
160
378
|
let didMarkReady = false;
|
|
161
|
-
|
|
379
|
+
let readyTimeoutId = null;
|
|
380
|
+
if (!readyWhen) {
|
|
162
381
|
advanceScheduler({ type: "ready", taskId });
|
|
163
382
|
didMarkReady = true;
|
|
164
383
|
}
|
|
384
|
+
else if (readyTimeout !== Infinity) {
|
|
385
|
+
// Set up timeout for readyWhen condition
|
|
386
|
+
readyTimeoutId = setTimeout(() => {
|
|
387
|
+
if (!didMarkReady) {
|
|
388
|
+
failPipeline(new PipelineError(`[${taskName}] Task did not become ready within ${readyTimeout}ms`, PipelineError.TaskFailed));
|
|
389
|
+
}
|
|
390
|
+
}, readyTimeout);
|
|
391
|
+
}
|
|
165
392
|
let output = "";
|
|
166
393
|
child.stdout?.on("data", (buf) => {
|
|
394
|
+
// Check if signal is aborted before processing stdout
|
|
395
|
+
if (signal?.aborted) {
|
|
396
|
+
return;
|
|
397
|
+
}
|
|
167
398
|
const chunk = buf.toString();
|
|
168
399
|
output += chunk;
|
|
169
400
|
process.stdout.write(chunk);
|
|
170
|
-
if (!didMarkReady &&
|
|
401
|
+
if (!didMarkReady && readyWhen && readyWhen(output)) {
|
|
402
|
+
if (readyTimeoutId) {
|
|
403
|
+
clearTimeout(readyTimeoutId);
|
|
404
|
+
readyTimeoutId = null;
|
|
405
|
+
}
|
|
171
406
|
advanceScheduler({ type: "ready", taskId });
|
|
172
407
|
didMarkReady = true;
|
|
173
408
|
}
|
|
@@ -176,16 +411,27 @@ export function pipeline(tasks) {
|
|
|
176
411
|
process.stderr.write(buf);
|
|
177
412
|
});
|
|
178
413
|
child.on("error", (error) => {
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
414
|
+
// Task failed before entering running state, so don't execute teardown
|
|
415
|
+
// Remove teardown from map since it was never actually running
|
|
416
|
+
teardownCommands.delete(taskId);
|
|
417
|
+
// Clear ready timeout if it exists
|
|
418
|
+
if (readyTimeoutId) {
|
|
419
|
+
clearTimeout(readyTimeoutId);
|
|
420
|
+
readyTimeoutId = null;
|
|
421
|
+
}
|
|
422
|
+
failPipeline(new PipelineError(`[${taskName}] Failed to start: ${error.message}`, PipelineError.TaskFailed));
|
|
182
423
|
});
|
|
183
424
|
child.on("exit", (code) => {
|
|
184
425
|
runningTasks.delete(taskId);
|
|
426
|
+
// Clear ready timeout if it exists
|
|
427
|
+
if (readyTimeoutId) {
|
|
428
|
+
clearTimeout(readyTimeoutId);
|
|
429
|
+
readyTimeoutId = null;
|
|
430
|
+
}
|
|
431
|
+
// Don't execute teardown immediately - it will be executed in reverse dependency order
|
|
432
|
+
// when the pipeline completes or fails
|
|
185
433
|
if (code !== 0) {
|
|
186
|
-
|
|
187
|
-
config?.onTaskError?.(taskName, e);
|
|
188
|
-
failPipeline(e);
|
|
434
|
+
failPipeline(new PipelineError(`[${taskName}] Task failed with exit code ${code ?? 1}`, PipelineError.TaskFailed));
|
|
189
435
|
return;
|
|
190
436
|
}
|
|
191
437
|
config?.onTaskComplete?.(taskName);
|
|
@@ -194,8 +440,25 @@ export function pipeline(tasks) {
|
|
|
194
440
|
});
|
|
195
441
|
};
|
|
196
442
|
const advanceScheduler = (input) => {
|
|
443
|
+
// Check if signal is aborted before advancing scheduler
|
|
444
|
+
if (signal?.aborted) {
|
|
445
|
+
failPipeline(new PipelineError("Aborted", PipelineError.Aborted));
|
|
446
|
+
return;
|
|
447
|
+
}
|
|
197
448
|
let result = input ? scheduler.next(input) : scheduler.next();
|
|
449
|
+
// If we passed skip/complete input and got "idle", the generator processed it
|
|
450
|
+
// but returned the old yield. Call next() again to get the result after processing.
|
|
451
|
+
if (input &&
|
|
452
|
+
(input.type === "skip" || input.type === "complete") &&
|
|
453
|
+
result.value?.type === "idle") {
|
|
454
|
+
result = scheduler.next();
|
|
455
|
+
}
|
|
198
456
|
while (true) {
|
|
457
|
+
// Check signal again in the loop
|
|
458
|
+
if (signal?.aborted) {
|
|
459
|
+
failPipeline(new PipelineError("Aborted", PipelineError.Aborted));
|
|
460
|
+
return;
|
|
461
|
+
}
|
|
199
462
|
const event = result.value;
|
|
200
463
|
const isFinished = result.done && result.value.type === "done";
|
|
201
464
|
if (isFinished) {
|
|
@@ -214,21 +477,104 @@ export function pipeline(tasks) {
|
|
|
214
477
|
}
|
|
215
478
|
};
|
|
216
479
|
// Handle termination signals
|
|
217
|
-
const
|
|
480
|
+
const processTerminationListenerCleanups = [
|
|
481
|
+
"SIGINT",
|
|
482
|
+
"SIGTERM",
|
|
483
|
+
"SIGQUIT",
|
|
484
|
+
"SIGBREAK",
|
|
485
|
+
].map((sig) => {
|
|
218
486
|
const handleSignal = () => {
|
|
219
|
-
failPipeline(new
|
|
487
|
+
failPipeline(new PipelineError(`Received ${sig}`, PipelineError.ProcessTerminated));
|
|
220
488
|
};
|
|
221
|
-
process.once(
|
|
489
|
+
process.once(sig, handleSignal);
|
|
222
490
|
return () => {
|
|
223
|
-
process.removeListener(
|
|
491
|
+
process.removeListener(sig, handleSignal);
|
|
224
492
|
};
|
|
225
493
|
});
|
|
494
|
+
// Handle abort signal if provided
|
|
495
|
+
let signalCleanup = null;
|
|
496
|
+
if (signal) {
|
|
497
|
+
const handleAbort = () => {
|
|
498
|
+
failPipeline(new PipelineError("Aborted", PipelineError.Aborted));
|
|
499
|
+
};
|
|
500
|
+
signal.addEventListener("abort", handleAbort);
|
|
501
|
+
signalCleanup = () => {
|
|
502
|
+
signal.removeEventListener("abort", handleAbort);
|
|
503
|
+
};
|
|
504
|
+
}
|
|
226
505
|
// 🚀 Kick off initial runnable tasks
|
|
227
506
|
advanceScheduler();
|
|
228
|
-
await completionPromise
|
|
229
|
-
|
|
507
|
+
await completionPromise
|
|
508
|
+
.then(async () => {
|
|
509
|
+
// Pipeline completed successfully - execute any remaining teardowns
|
|
510
|
+
// (for tasks that completed successfully) and wait for them to complete
|
|
511
|
+
// Even if teardowns fail, the pipeline still resolves successfully
|
|
512
|
+
// (teardown errors are reported via onTaskTeardownError)
|
|
513
|
+
try {
|
|
514
|
+
await executeAllTeardowns();
|
|
515
|
+
}
|
|
516
|
+
catch (teardownError) {
|
|
517
|
+
// Teardown errors are already reported via onTaskTeardownError
|
|
518
|
+
// The pipeline still resolves successfully
|
|
519
|
+
}
|
|
520
|
+
})
|
|
521
|
+
.finally(() => {
|
|
522
|
+
processTerminationListenerCleanups.forEach((cleanup) => cleanup());
|
|
523
|
+
signalCleanup?.();
|
|
230
524
|
});
|
|
231
525
|
},
|
|
232
526
|
};
|
|
527
|
+
// Store tasks for nested pipeline skip tracking
|
|
528
|
+
pipelineTasksCache.set(pipelineImpl, tasks);
|
|
233
529
|
return pipelineImpl;
|
|
234
530
|
}
|
|
531
|
+
export class PipelineError extends Error {
|
|
532
|
+
constructor(message, code, taskName) {
|
|
533
|
+
super(message);
|
|
534
|
+
Object.defineProperty(this, "code", {
|
|
535
|
+
enumerable: true,
|
|
536
|
+
configurable: true,
|
|
537
|
+
writable: true,
|
|
538
|
+
value: void 0
|
|
539
|
+
});
|
|
540
|
+
Object.defineProperty(this, "taskName", {
|
|
541
|
+
enumerable: true,
|
|
542
|
+
configurable: true,
|
|
543
|
+
writable: true,
|
|
544
|
+
value: void 0
|
|
545
|
+
});
|
|
546
|
+
this.name = "PipelineError";
|
|
547
|
+
this.code = code;
|
|
548
|
+
this.taskName = taskName;
|
|
549
|
+
}
|
|
550
|
+
}
|
|
551
|
+
Object.defineProperty(PipelineError, "Aborted", {
|
|
552
|
+
enumerable: true,
|
|
553
|
+
configurable: true,
|
|
554
|
+
writable: true,
|
|
555
|
+
value: 0
|
|
556
|
+
});
|
|
557
|
+
Object.defineProperty(PipelineError, "ProcessTerminated", {
|
|
558
|
+
enumerable: true,
|
|
559
|
+
configurable: true,
|
|
560
|
+
writable: true,
|
|
561
|
+
value: 1
|
|
562
|
+
});
|
|
563
|
+
Object.defineProperty(PipelineError, "TaskFailed", {
|
|
564
|
+
enumerable: true,
|
|
565
|
+
configurable: true,
|
|
566
|
+
writable: true,
|
|
567
|
+
value: 2
|
|
568
|
+
});
|
|
569
|
+
Object.defineProperty(PipelineError, "InvalidSignal", {
|
|
570
|
+
enumerable: true,
|
|
571
|
+
configurable: true,
|
|
572
|
+
writable: true,
|
|
573
|
+
value: 3
|
|
574
|
+
});
|
|
575
|
+
Object.defineProperty(PipelineError, "InvalidTask", {
|
|
576
|
+
enumerable: true,
|
|
577
|
+
configurable: true,
|
|
578
|
+
writable: true,
|
|
579
|
+
value: 4
|
|
580
|
+
});
|
package/dist/scheduler.d.ts
CHANGED
package/dist/scheduler.js
CHANGED
|
@@ -29,20 +29,12 @@ export function* createScheduler(graph) {
|
|
|
29
29
|
continue;
|
|
30
30
|
}
|
|
31
31
|
const input = yield { type: "idle" };
|
|
32
|
-
if (input?.
|
|
33
|
-
markDependencyReady(input.taskId);
|
|
34
|
-
// After marking a dependency as ready, continue the loop to check for newly runnable tasks
|
|
35
|
-
// This ensures that if dependents become runnable, they are yielded immediately
|
|
32
|
+
if (input?.taskId === undefined)
|
|
36
33
|
continue;
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
completed++;
|
|
40
|
-
// When a task completes, it's also ready (if it wasn't already)
|
|
41
|
-
// This handles the case where a task completes without a ready check
|
|
42
|
-
markDependencyReady(input.taskId);
|
|
43
|
-
// Continue the loop to check for newly runnable tasks
|
|
34
|
+
markDependencyReady(input.taskId);
|
|
35
|
+
if (input.type === "ready")
|
|
44
36
|
continue;
|
|
45
|
-
|
|
37
|
+
completed++;
|
|
46
38
|
}
|
|
47
39
|
return { type: "done" };
|
|
48
40
|
}
|
package/dist/task.d.ts
CHANGED
|
@@ -1,5 +1,16 @@
|
|
|
1
1
|
import type { TaskConfig, Task } from "./types.js";
|
|
2
2
|
/**
|
|
3
3
|
* Creates a task configuration.
|
|
4
|
+
* @param config - The configuration for the task.
|
|
5
|
+
* @returns A task instance.
|
|
6
|
+
* @example
|
|
7
|
+
* const build = task({ name: "build", commands: { build: "npm run build" }, cwd: "." })
|
|
8
|
+
* const deploy = task({ name: "deploy", commands: { build: "npm run deploy" }, cwd: ".", dependencies: [build] })
|
|
9
|
+
* await pipeline([build, deploy]).run()
|
|
10
|
+
*
|
|
11
|
+
* // alternatively, you can use the andThen method to chain tasks together:
|
|
12
|
+
* const buildAndDeploy = task({ name: "build", commands: { build: "npm run build" }, cwd: "." })
|
|
13
|
+
* .andThen({ name: "deploy", commands: { build: "npm run deploy" }, cwd: "." })
|
|
14
|
+
* await buildAndDeploy.run()
|
|
4
15
|
*/
|
|
5
16
|
export declare function task(config: TaskConfig): Task;
|
package/dist/task.js
CHANGED
|
@@ -4,6 +4,17 @@ import { pipeline } from "./pipeline.js";
|
|
|
4
4
|
let taskId = 0;
|
|
5
5
|
/**
|
|
6
6
|
* Creates a task configuration.
|
|
7
|
+
* @param config - The configuration for the task.
|
|
8
|
+
* @returns A task instance.
|
|
9
|
+
* @example
|
|
10
|
+
* const build = task({ name: "build", commands: { build: "npm run build" }, cwd: "." })
|
|
11
|
+
* const deploy = task({ name: "deploy", commands: { build: "npm run deploy" }, cwd: ".", dependencies: [build] })
|
|
12
|
+
* await pipeline([build, deploy]).run()
|
|
13
|
+
*
|
|
14
|
+
* // alternatively, you can use the andThen method to chain tasks together:
|
|
15
|
+
* const buildAndDeploy = task({ name: "build", commands: { build: "npm run build" }, cwd: "." })
|
|
16
|
+
* .andThen({ name: "deploy", commands: { build: "npm run deploy" }, cwd: "." })
|
|
17
|
+
* await buildAndDeploy.run()
|
|
7
18
|
*/
|
|
8
19
|
export function task(config) {
|
|
9
20
|
validateTasks(config.dependencies);
|
|
@@ -13,7 +24,6 @@ export function task(config) {
|
|
|
13
24
|
...config,
|
|
14
25
|
id: taskId++,
|
|
15
26
|
dependencies: [...(config.dependencies || [])],
|
|
16
|
-
shouldStdoutMarkReady: config.isReady,
|
|
17
27
|
},
|
|
18
28
|
andThen(nextConfig) {
|
|
19
29
|
// Create the next task with the current task as a dependency
|
package/dist/types.d.ts
CHANGED
|
@@ -1,43 +1,199 @@
|
|
|
1
1
|
import type { $TASK_INTERNAL } from "./constants.js";
|
|
2
|
+
import { PipelineError } from "./pipeline.js";
|
|
3
|
+
/**
|
|
4
|
+
* Configuration for a command to be executed as part of a task.
|
|
5
|
+
*/
|
|
6
|
+
export interface CommandConfig {
|
|
7
|
+
/**
|
|
8
|
+
* The command string to execute (e.g., "npm run dev" or "node server.js").
|
|
9
|
+
*/
|
|
10
|
+
run: string;
|
|
11
|
+
/**
|
|
12
|
+
* Optional function that determines when the command is considered "ready".
|
|
13
|
+
* The function receives the accumulated stdout output and should return true
|
|
14
|
+
* when the command has reached a ready state (e.g., server has started).
|
|
15
|
+
* If not provided, the task is marked as ready immediately after the command starts.
|
|
16
|
+
*/
|
|
17
|
+
readyWhen?: (stdout: string) => boolean;
|
|
18
|
+
/**
|
|
19
|
+
* Maximum time in milliseconds to wait for the command to become ready.
|
|
20
|
+
* Only applies when `readyWhen` is provided.
|
|
21
|
+
* @default Infinity
|
|
22
|
+
*/
|
|
23
|
+
readyTimeout?: number;
|
|
24
|
+
/**
|
|
25
|
+
* Optional command to run during teardown (e.g., to stop a server).
|
|
26
|
+
*/
|
|
27
|
+
teardown?: string;
|
|
28
|
+
}
|
|
29
|
+
/**
|
|
30
|
+
* A command can be either a simple string or a CommandConfig object.
|
|
31
|
+
* When a string is provided, it's equivalent to `{ run: string }`.
|
|
32
|
+
*/
|
|
33
|
+
export type Command = string | CommandConfig;
|
|
34
|
+
/**
|
|
35
|
+
* A map of command names to their configurations.
|
|
36
|
+
* Common command names include "dev", "build", "test", etc.
|
|
37
|
+
* The command name is selected based on the pipeline's run configuration.
|
|
38
|
+
* If a matching command is not found, the task is skipped.
|
|
39
|
+
*/
|
|
2
40
|
export interface Commands {
|
|
3
|
-
|
|
4
|
-
build: string;
|
|
41
|
+
[key: string]: Command;
|
|
5
42
|
}
|
|
43
|
+
/**
|
|
44
|
+
* Configuration for creating a task in the pipeline.
|
|
45
|
+
*/
|
|
6
46
|
export interface TaskConfig {
|
|
47
|
+
/**
|
|
48
|
+
* The name of the task. Used for logging and identification.
|
|
49
|
+
*/
|
|
7
50
|
name: string;
|
|
51
|
+
/**
|
|
52
|
+
* Map of command names to their configurations.
|
|
53
|
+
* The pipeline will select a command based on the run configuration
|
|
54
|
+
* (defaults to "dev" in development, "build" in production).
|
|
55
|
+
*/
|
|
8
56
|
commands: Commands;
|
|
57
|
+
/**
|
|
58
|
+
* Working directory for the task's commands.
|
|
59
|
+
* Can be absolute or relative to the current working directory.
|
|
60
|
+
*/
|
|
9
61
|
cwd: string;
|
|
10
|
-
|
|
62
|
+
/**
|
|
63
|
+
* Optional array of tasks that must complete before this task can start.
|
|
64
|
+
* Dependencies are executed in parallel when possible.
|
|
65
|
+
*/
|
|
11
66
|
dependencies?: Task[];
|
|
67
|
+
/**
|
|
68
|
+
* Allows this task to be skipped even in strict mode.
|
|
69
|
+
* Use this to explicitly mark tasks that are intentionally mode-specific.
|
|
70
|
+
*/
|
|
71
|
+
allowSkip?: boolean;
|
|
12
72
|
}
|
|
13
|
-
interface TaskInternal extends
|
|
73
|
+
interface TaskInternal extends TaskConfig {
|
|
14
74
|
id: number;
|
|
15
75
|
dependencies: Task[];
|
|
16
|
-
shouldStdoutMarkReady?: (stdout: string) => boolean;
|
|
17
76
|
pipeline?: Pipeline;
|
|
18
77
|
}
|
|
78
|
+
/**
|
|
79
|
+
* A task to be executed in a pipeline. Tasks are created using the `task()` function.
|
|
80
|
+
* Tasks can have dependencies on other tasks and define commands to execute in a specific mode.
|
|
81
|
+
*/
|
|
19
82
|
export interface Task {
|
|
83
|
+
/**
|
|
84
|
+
* The name of the task.
|
|
85
|
+
*/
|
|
20
86
|
name: string;
|
|
87
|
+
/**
|
|
88
|
+
* Internal task data. This property is for internal use only.
|
|
89
|
+
* @internal
|
|
90
|
+
*/
|
|
21
91
|
[$TASK_INTERNAL]: TaskInternal;
|
|
92
|
+
/**
|
|
93
|
+
* Creates a new pipeline that starts after this task completes.
|
|
94
|
+
* This allows for chaining tasks together.
|
|
95
|
+
* @param config Task configuration for the next task in the chain.
|
|
96
|
+
* @returns A new pipeline starting with the configured task.
|
|
97
|
+
*/
|
|
22
98
|
andThen(config: Omit<TaskConfig, "dependencies">): Pipeline;
|
|
23
99
|
}
|
|
100
|
+
/**
|
|
101
|
+
* Configuration options for running a pipeline.
|
|
102
|
+
*/
|
|
24
103
|
export interface PipelineRunConfig {
|
|
104
|
+
/**
|
|
105
|
+
* Provides a custom command for the pipeline.
|
|
106
|
+
* @default process.env.NODE_ENV === "production" ? "build" : "dev"
|
|
107
|
+
*/
|
|
108
|
+
command?: string;
|
|
109
|
+
/**
|
|
110
|
+
* Provides a custom abort signal for the pipeline.
|
|
111
|
+
* Aborting the signal will cause the pipeline to fail.
|
|
112
|
+
*/
|
|
113
|
+
signal?: AbortSignal;
|
|
25
114
|
/**
|
|
26
115
|
* Provides a custom spawn function for the pipeline.
|
|
27
116
|
* @default import("node:child_process").spawn
|
|
28
117
|
*/
|
|
29
118
|
spawn?: typeof import("node:child_process").spawn;
|
|
30
|
-
|
|
119
|
+
/**
|
|
120
|
+
* If true, missing commands will cause the pipeline to fail.
|
|
121
|
+
* Use this for CI/release pipelines where every task is expected to participate.
|
|
122
|
+
*/
|
|
123
|
+
strict?: boolean;
|
|
124
|
+
/**
|
|
125
|
+
* Callback invoked when a task begins execution.
|
|
126
|
+
* @param taskName The name of the task that started.
|
|
127
|
+
*/
|
|
128
|
+
onTaskBegin?: (taskName: string) => void;
|
|
129
|
+
/**
|
|
130
|
+
* Callback invoked when a task completes successfully.
|
|
131
|
+
* @param taskName The name of the task that completed.
|
|
132
|
+
*/
|
|
31
133
|
onTaskComplete?: (taskName: string) => void;
|
|
32
|
-
|
|
134
|
+
/**
|
|
135
|
+
* Callback invoked when a task is skipped (e.g., when a command doesn't exist for the current mode).
|
|
136
|
+
* @param taskName The name of the task that was skipped.
|
|
137
|
+
* @param mode The command mode that was requested (e.g., "dev", "build").
|
|
138
|
+
*/
|
|
139
|
+
onTaskSkipped?: (taskName: string, mode: string) => void;
|
|
140
|
+
/**
|
|
141
|
+
* Callback invoked when a task's teardown command begins execution.
|
|
142
|
+
* @param taskName The name of the task whose teardown is running.
|
|
143
|
+
*/
|
|
144
|
+
onTaskTeardown?: (taskName: string) => void;
|
|
145
|
+
/**
|
|
146
|
+
* Callback invoked when a task's teardown command fails.
|
|
147
|
+
* Note: Teardown failures do not cause the pipeline to fail.
|
|
148
|
+
* @param taskName The name of the task whose teardown failed.
|
|
149
|
+
* @param error The error that occurred during teardown.
|
|
150
|
+
*/
|
|
151
|
+
onTaskTeardownError?: (taskName: string, error: Error) => void;
|
|
152
|
+
/**
|
|
153
|
+
* Callback invoked when the pipeline encounters an error and fails.
|
|
154
|
+
* @param error The PipelineError that caused the pipeline to fail.
|
|
155
|
+
*/
|
|
156
|
+
onPipelineError?: (error: PipelineError) => void;
|
|
157
|
+
/**
|
|
158
|
+
* Callback invoked when the pipeline completes successfully.
|
|
159
|
+
* This is called after all tasks have completed and all teardowns have finished.
|
|
160
|
+
*/
|
|
33
161
|
onPipelineComplete?: () => void;
|
|
34
162
|
}
|
|
163
|
+
/**
|
|
164
|
+
* Configuration for converting a pipeline into a task.
|
|
165
|
+
* This allows pipelines to be used as dependencies in other pipelines.
|
|
166
|
+
*/
|
|
35
167
|
export interface PipelineTaskConfig {
|
|
168
|
+
/**
|
|
169
|
+
* The name for the task that represents this pipeline.
|
|
170
|
+
*/
|
|
36
171
|
name: string;
|
|
172
|
+
/**
|
|
173
|
+
* Optional array of tasks that must complete before this pipeline task can start.
|
|
174
|
+
*/
|
|
37
175
|
dependencies?: Task[];
|
|
38
176
|
}
|
|
177
|
+
/**
|
|
178
|
+
* A pipeline manages the execution of tasks with dependency-based coordination.
|
|
179
|
+
* Pipelines are created using the `pipeline()` function.
|
|
180
|
+
*/
|
|
39
181
|
export interface Pipeline {
|
|
182
|
+
/**
|
|
183
|
+
* Runs the pipeline, executing all tasks according to their dependencies.
|
|
184
|
+
* Tasks with no dependencies start immediately, and tasks with dependencies
|
|
185
|
+
* wait for their dependencies to complete before starting.
|
|
186
|
+
* @param config Optional configuration for pipeline execution.
|
|
187
|
+
* @returns A promise that resolves when all tasks complete successfully,
|
|
188
|
+
* or rejects if any task fails or the pipeline is aborted.
|
|
189
|
+
*/
|
|
40
190
|
run(config?: PipelineRunConfig): Promise<void>;
|
|
191
|
+
/**
|
|
192
|
+
* Converts this pipeline into a task that can be used as a dependency
|
|
193
|
+
* in another pipeline. This enables nested pipelines.
|
|
194
|
+
* @param config Configuration for the task representation of this pipeline.
|
|
195
|
+
* @returns A task that represents this pipeline.
|
|
196
|
+
*/
|
|
41
197
|
toTask(config: PipelineTaskConfig): Task;
|
|
42
198
|
}
|
|
43
199
|
export interface TaskNode {
|