@livingdata/pipex 0.0.5 → 0.0.7
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +25 -4
- package/dist/cli/pipeline-loader.js +4 -0
- package/dist/cli/pipeline-runner.js +4 -0
- package/dist/cli/reporter.js +83 -40
- package/dist/engine/docker-executor.js +24 -14
- package/dist/kits/builtin/node.js +5 -6
- package/package.json +1 -1
package/README.md
CHANGED
|
@@ -129,7 +129,7 @@ steps:
|
|
|
129
129
|
inputs: [{ step: build }]
|
|
130
130
|
```
|
|
131
131
|
|
|
132
|
-
`uses` and `image`/`cmd` are mutually exclusive. All other step fields (`env`, `inputs`, `mounts`, `caches`, `timeoutSec`, `allowFailure`, `allowNetwork`) remain available and merge with kit defaults (user values take priority). The `src` parameter in `with`
|
|
132
|
+
`uses` and `image`/`cmd` are mutually exclusive. All other step fields (`env`, `inputs`, `mounts`, `sources`, `caches`, `timeoutSec`, `allowFailure`, `allowNetwork`) remain available and merge with kit defaults (user values take priority). The `src` parameter in `with` copies the host directory into `/app` in the container's writable layer (see [Sources](#sources)).
|
|
133
133
|
|
|
134
134
|
#### Available Kits
|
|
135
135
|
|
|
@@ -138,7 +138,7 @@ steps:
|
|
|
138
138
|
| Parameter | Default | Description |
|
|
139
139
|
|-----------|---------|-------------|
|
|
140
140
|
| `script` | *(required)* | Script to run (relative to `/app`) |
|
|
141
|
-
| `src` | -- | Host directory to
|
|
141
|
+
| `src` | -- | Host directory to copy into `/app` |
|
|
142
142
|
| `version` | `"24"` | Node.js version |
|
|
143
143
|
| `packageManager` | `"npm"` | `"npm"`, `"pnpm"`, or `"yarn"` |
|
|
144
144
|
| `install` | `true` | Run package install before script |
|
|
@@ -149,7 +149,7 @@ steps:
|
|
|
149
149
|
| Parameter | Default | Description |
|
|
150
150
|
|-----------|---------|-------------|
|
|
151
151
|
| `script` | *(required)* | Script to run (relative to `/app`) |
|
|
152
|
-
| `src` | -- | Host directory to
|
|
152
|
+
| `src` | -- | Host directory to copy into `/app` |
|
|
153
153
|
| `version` | `"3.12"` | Python version |
|
|
154
154
|
| `packageManager` | `"pip"` | `"pip"` or `"uv"` |
|
|
155
155
|
| `install` | `true` | Run dependency install before script |
|
|
@@ -161,7 +161,7 @@ steps:
|
|
|
161
161
|
|-----------|---------|-------------|
|
|
162
162
|
| `run` | *(required)* | Shell command to execute |
|
|
163
163
|
| `packages` | -- | Apt packages to install before running |
|
|
164
|
-
| `src` | -- | Host directory to mount at `/app` |
|
|
164
|
+
| `src` | -- | Host directory to mount read-only at `/app` |
|
|
165
165
|
| `image` | `"alpine:3.20"` | Docker image (defaults to `"debian:bookworm-slim"` when `packages` is set) |
|
|
166
166
|
|
|
167
167
|
When `packages` is provided, the kit automatically switches to a Debian image, enables network access, and provides an `apt-cache` cache. Without packages, it runs on a minimal Alpine image with no network.
|
|
@@ -212,6 +212,7 @@ steps:
|
|
|
212
212
|
| `env` | Record<string, string> | Environment variables |
|
|
213
213
|
| `outputPath` | string | Output mount point (default: `/output`) |
|
|
214
214
|
| `mounts` | MountSpec[] | Host directories to bind mount (read-only) |
|
|
215
|
+
| `sources` | MountSpec[] | Host directories copied into the container's writable layer |
|
|
215
216
|
| `caches` | CacheSpec[] | Persistent caches to mount |
|
|
216
217
|
| `timeoutSec` | number | Execution timeout |
|
|
217
218
|
| `allowFailure` | boolean | Continue pipeline if step fails |
|
|
@@ -250,6 +251,26 @@ mounts:
|
|
|
250
251
|
|
|
251
252
|
This means a pipeline at `/project/ci/pipeline.yaml` can only mount subdirectories of `/project/ci/`. Use `/tmp` or `/output` inside the container for writes.
|
|
252
253
|
|
|
254
|
+
### Sources
|
|
255
|
+
|
|
256
|
+
Copy host directories into the container's **writable layer**. Unlike bind mounts, copied files live inside the container so the step can create new files and subdirectories alongside them (e.g. `node_modules` after `npm install`).
|
|
257
|
+
|
|
258
|
+
```yaml
|
|
259
|
+
sources:
|
|
260
|
+
- host: src/app
|
|
261
|
+
container: /app
|
|
262
|
+
```
|
|
263
|
+
|
|
264
|
+
- Same path rules as `mounts` (`host` relative, `container` absolute, no `..`)
|
|
265
|
+
- Files are snapshotted at step start -- changes on the host during execution are not reflected
|
|
266
|
+
- The container can write next to source files without affecting the host
|
|
267
|
+
|
|
268
|
+
**When to use `sources` vs `mounts`**:
|
|
269
|
+
- Use `sources` when the step needs to write alongside the source files (install dependencies, generate build artifacts next to sources)
|
|
270
|
+
- Use `mounts` when read-only access is sufficient (config files, static data)
|
|
271
|
+
|
|
272
|
+
Kits use `sources` internally: the `node` kit's `src` parameter copies into `/app` so that `npm install` can create `node_modules`.
|
|
273
|
+
|
|
253
274
|
### Caches
|
|
254
275
|
|
|
255
276
|
Persistent read-write directories shared across steps and executions:
|
|
@@ -49,6 +49,7 @@ export class PipelineLoader {
|
|
|
49
49
|
outputPath: step.outputPath,
|
|
50
50
|
caches: mergeCaches(kitOutput.caches, step.caches),
|
|
51
51
|
mounts: mergeMounts(kitOutput.mounts, step.mounts),
|
|
52
|
+
sources: mergeMounts(kitOutput.sources, step.sources),
|
|
52
53
|
timeoutSec: step.timeoutSec,
|
|
53
54
|
allowFailure: step.allowFailure,
|
|
54
55
|
allowNetwork: step.allowNetwork ?? kitOutput.allowNetwork
|
|
@@ -70,6 +71,9 @@ export class PipelineLoader {
|
|
|
70
71
|
if (step.mounts) {
|
|
71
72
|
this.validateMounts(step.id, step.mounts);
|
|
72
73
|
}
|
|
74
|
+
if (step.sources) {
|
|
75
|
+
this.validateMounts(step.id, step.sources);
|
|
76
|
+
}
|
|
73
77
|
if (step.caches) {
|
|
74
78
|
this.validateCaches(step.id, step.caches);
|
|
75
79
|
}
|
|
@@ -102,6 +102,10 @@ export class PipelineRunner {
|
|
|
102
102
|
output,
|
|
103
103
|
caches,
|
|
104
104
|
mounts,
|
|
105
|
+
sources: step.sources?.map(m => ({
|
|
106
|
+
hostPath: resolve(pipelineRoot, m.host),
|
|
107
|
+
containerPath: m.container
|
|
108
|
+
})),
|
|
105
109
|
network: step.allowNetwork ? 'bridge' : 'none',
|
|
106
110
|
timeoutSec: step.timeoutSec
|
|
107
111
|
}, ({ stream, line }) => {
|
package/dist/cli/reporter.js
CHANGED
|
@@ -23,57 +23,100 @@ export class ConsoleReporter {
|
|
|
23
23
|
* Suitable for local development and manual execution.
|
|
24
24
|
*/
|
|
25
25
|
export class InteractiveReporter {
|
|
26
|
+
static get maxStderrLines() {
|
|
27
|
+
return 20;
|
|
28
|
+
}
|
|
26
29
|
spinner;
|
|
27
30
|
stepSpinners = new Map();
|
|
31
|
+
stderrBuffers = new Map();
|
|
28
32
|
state(workspaceId, event, step, meta) {
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
const spinner = ora({ text: step.displayName, prefixText: ' ' }).start();
|
|
35
|
-
this.stepSpinners.set(step.id, spinner);
|
|
36
|
-
}
|
|
37
|
-
if (event === 'STEP_SKIPPED' && step) {
|
|
38
|
-
const spinner = this.stepSpinners.get(step.id);
|
|
39
|
-
if (spinner) {
|
|
40
|
-
spinner.stopAndPersist({ symbol: chalk.gray('⊙'), text: chalk.gray(`${step.displayName} (cached)`) });
|
|
41
|
-
this.stepSpinners.delete(step.id);
|
|
33
|
+
switch (event) {
|
|
34
|
+
case 'PIPELINE_START': {
|
|
35
|
+
const displayName = meta?.pipelineName ?? workspaceId;
|
|
36
|
+
console.log(chalk.bold(`\n▶ Pipeline: ${chalk.cyan(displayName)}\n`));
|
|
37
|
+
break;
|
|
42
38
|
}
|
|
43
|
-
|
|
44
|
-
|
|
39
|
+
case 'STEP_STARTING': {
|
|
40
|
+
if (step) {
|
|
41
|
+
const spinner = ora({ text: step.displayName, prefixText: ' ' }).start();
|
|
42
|
+
this.stepSpinners.set(step.id, spinner);
|
|
43
|
+
}
|
|
44
|
+
break;
|
|
45
45
|
}
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
46
|
+
case 'STEP_SKIPPED': {
|
|
47
|
+
if (step) {
|
|
48
|
+
const spinner = this.stepSpinners.get(step.id);
|
|
49
|
+
if (spinner) {
|
|
50
|
+
spinner.stopAndPersist({ symbol: chalk.gray('⊙'), text: chalk.gray(`${step.displayName} (cached)`) });
|
|
51
|
+
this.stepSpinners.delete(step.id);
|
|
52
|
+
}
|
|
53
|
+
else {
|
|
54
|
+
console.log(` ${chalk.gray('⊙')} ${chalk.gray(`${step.displayName} (cached)`)}`);
|
|
55
|
+
}
|
|
56
|
+
}
|
|
57
|
+
break;
|
|
52
58
|
}
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
59
|
+
case 'STEP_FINISHED': {
|
|
60
|
+
if (step) {
|
|
61
|
+
const spinner = this.stepSpinners.get(step.id);
|
|
62
|
+
if (spinner) {
|
|
63
|
+
spinner.stopAndPersist({ symbol: chalk.green('✓'), text: chalk.green(step.displayName) });
|
|
64
|
+
this.stepSpinners.delete(step.id);
|
|
65
|
+
}
|
|
66
|
+
this.stderrBuffers.delete(step.id);
|
|
67
|
+
}
|
|
68
|
+
break;
|
|
69
|
+
}
|
|
70
|
+
case 'STEP_FAILED': {
|
|
71
|
+
if (step) {
|
|
72
|
+
this.handleStepFailed(step, meta);
|
|
73
|
+
}
|
|
74
|
+
break;
|
|
75
|
+
}
|
|
76
|
+
case 'PIPELINE_FINISHED': {
|
|
77
|
+
console.log(chalk.bold.green('\n✓ Pipeline completed\n'));
|
|
78
|
+
break;
|
|
79
|
+
}
|
|
80
|
+
case 'PIPELINE_FAILED': {
|
|
81
|
+
console.log(chalk.bold.red('\n✗ Pipeline failed\n'));
|
|
82
|
+
break;
|
|
64
83
|
}
|
|
65
|
-
}
|
|
66
|
-
if (event === 'PIPELINE_FINISHED') {
|
|
67
|
-
console.log(chalk.bold.green('\n✓ Pipeline completed\n'));
|
|
68
|
-
}
|
|
69
|
-
if (event === 'PIPELINE_FAILED') {
|
|
70
|
-
console.log(chalk.bold.red('\n✗ Pipeline failed\n'));
|
|
71
84
|
}
|
|
72
85
|
}
|
|
73
|
-
log(_workspaceId,
|
|
74
|
-
|
|
86
|
+
log(_workspaceId, step, stream, line) {
|
|
87
|
+
if (stream === 'stderr') {
|
|
88
|
+
let buffer = this.stderrBuffers.get(step.id);
|
|
89
|
+
if (!buffer) {
|
|
90
|
+
buffer = [];
|
|
91
|
+
this.stderrBuffers.set(step.id, buffer);
|
|
92
|
+
}
|
|
93
|
+
buffer.push(line);
|
|
94
|
+
if (buffer.length > InteractiveReporter.maxStderrLines) {
|
|
95
|
+
buffer.shift();
|
|
96
|
+
}
|
|
97
|
+
}
|
|
75
98
|
}
|
|
76
99
|
result(_workspaceId, _step, _result) {
|
|
77
100
|
// Results shown via state updates
|
|
78
101
|
}
|
|
102
|
+
handleStepFailed(step, meta) {
|
|
103
|
+
const spinner = this.stepSpinners.get(step.id);
|
|
104
|
+
const exitCode = meta?.exitCode;
|
|
105
|
+
if (spinner) {
|
|
106
|
+
const exitInfo = exitCode === undefined ? '' : ` (exit ${exitCode})`;
|
|
107
|
+
spinner.stopAndPersist({
|
|
108
|
+
symbol: chalk.red('✗'),
|
|
109
|
+
text: chalk.red(`${step.displayName}${exitInfo}`)
|
|
110
|
+
});
|
|
111
|
+
this.stepSpinners.delete(step.id);
|
|
112
|
+
}
|
|
113
|
+
const stderr = this.stderrBuffers.get(step.id);
|
|
114
|
+
if (stderr && stderr.length > 0) {
|
|
115
|
+
console.log(chalk.red(' ── stderr ──'));
|
|
116
|
+
for (const line of stderr) {
|
|
117
|
+
console.log(chalk.red(` ${line}`));
|
|
118
|
+
}
|
|
119
|
+
}
|
|
120
|
+
this.stderrBuffers.delete(step.id);
|
|
121
|
+
}
|
|
79
122
|
}
|
|
@@ -1,6 +1,5 @@
|
|
|
1
1
|
import process from 'node:process';
|
|
2
2
|
import { execa } from 'execa';
|
|
3
|
-
import { createInterface } from 'node:readline';
|
|
4
3
|
import { ContainerExecutor } from './executor.js';
|
|
5
4
|
/**
|
|
6
5
|
* Build a minimal environment for the Docker CLI process.
|
|
@@ -29,7 +28,10 @@ export class DockerCliExecutor extends ContainerExecutor {
|
|
|
29
28
|
}
|
|
30
29
|
async run(workspace, request, onLogLine) {
|
|
31
30
|
const startedAt = new Date();
|
|
32
|
-
|
|
31
|
+
// Use create+start instead of run: docker run cannot create mountpoints
|
|
32
|
+
// for anonymous volumes inside read-only bind mounts (shadow paths).
|
|
33
|
+
// docker create sets up the filesystem layer before readonly applies.
|
|
34
|
+
const args = ['create', '--name', request.name, '--network', request.network];
|
|
33
35
|
if (request.env) {
|
|
34
36
|
for (const [key, value] of Object.entries(request.env)) {
|
|
35
37
|
args.push('-e', `${key}=${value}`);
|
|
@@ -59,24 +61,32 @@ export class DockerCliExecutor extends ContainerExecutor {
|
|
|
59
61
|
let exitCode = 0;
|
|
60
62
|
let error;
|
|
61
63
|
try {
|
|
62
|
-
|
|
64
|
+
await execa('docker', args, { env: this.env });
|
|
65
|
+
// Copy source directories into the container's writable layer.
|
|
66
|
+
// Unlike bind mounts, copied files live in the container layer,
|
|
67
|
+
// so the container can create subdirectories (e.g. node_modules).
|
|
68
|
+
if (request.sources) {
|
|
69
|
+
for (const source of request.sources) {
|
|
70
|
+
await execa('docker', ['cp', `${source.hostPath}/.`, `${request.name}:${source.containerPath}`], { env: this.env });
|
|
71
|
+
}
|
|
72
|
+
}
|
|
73
|
+
const proc = execa('docker', ['start', '-a', request.name], {
|
|
63
74
|
env: this.env,
|
|
64
75
|
reject: false,
|
|
65
76
|
timeout: request.timeoutSec ? request.timeoutSec * 1000 : undefined
|
|
66
77
|
});
|
|
67
|
-
|
|
68
|
-
const
|
|
69
|
-
rl.on('line', line => {
|
|
78
|
+
const stdoutDone = (async () => {
|
|
79
|
+
for await (const line of proc.iterable({ from: 'stdout' })) {
|
|
70
80
|
onLogLine({ stream: 'stdout', line });
|
|
71
|
-
}
|
|
72
|
-
}
|
|
73
|
-
|
|
74
|
-
const
|
|
75
|
-
rl.on('line', line => {
|
|
81
|
+
}
|
|
82
|
+
})();
|
|
83
|
+
const stderrDone = (async () => {
|
|
84
|
+
for await (const line of proc.iterable({ from: 'stderr' })) {
|
|
76
85
|
onLogLine({ stream: 'stderr', line });
|
|
77
|
-
}
|
|
78
|
-
}
|
|
86
|
+
}
|
|
87
|
+
})();
|
|
79
88
|
const result = await proc;
|
|
89
|
+
await Promise.all([stdoutDone, stderrDone]);
|
|
80
90
|
exitCode = result.exitCode ?? 0;
|
|
81
91
|
}
|
|
82
92
|
catch (error_) {
|
|
@@ -85,7 +95,7 @@ export class DockerCliExecutor extends ContainerExecutor {
|
|
|
85
95
|
}
|
|
86
96
|
finally {
|
|
87
97
|
try {
|
|
88
|
-
await execa('docker', ['rm', '-f', request.name], { env: this.env, reject: false });
|
|
98
|
+
await execa('docker', ['rm', '-f', '-v', request.name], { env: this.env, reject: false });
|
|
89
99
|
}
|
|
90
100
|
catch {
|
|
91
101
|
// Best effort cleanup
|
|
@@ -6,13 +6,13 @@ const cacheMap = {
|
|
|
6
6
|
function buildInstallCommand(packageManager) {
|
|
7
7
|
switch (packageManager) {
|
|
8
8
|
case 'npm': {
|
|
9
|
-
return 'cd /
|
|
9
|
+
return 'cd /app && npm install --no-package-lock --no-audit --no-fund 2>&1';
|
|
10
10
|
}
|
|
11
11
|
case 'pnpm': {
|
|
12
|
-
return 'cd /
|
|
12
|
+
return 'cd /app && pnpm install --no-frozen-lockfile 2>&1';
|
|
13
13
|
}
|
|
14
14
|
case 'yarn': {
|
|
15
|
-
return 'cd /
|
|
15
|
+
return 'cd /app && yarn install 2>&1';
|
|
16
16
|
}
|
|
17
17
|
default: {
|
|
18
18
|
throw new Error(`Kit "node": unsupported packageManager "${packageManager}"`);
|
|
@@ -36,8 +36,7 @@ export const nodeKit = {
|
|
|
36
36
|
if (install) {
|
|
37
37
|
parts.push(buildInstallCommand(packageManager));
|
|
38
38
|
}
|
|
39
|
-
|
|
40
|
-
parts.push(`${nodePathPrefix}node /app/${script}`);
|
|
39
|
+
parts.push(`node /app/${script}`);
|
|
41
40
|
const cache = cacheMap[packageManager];
|
|
42
41
|
if (!cache) {
|
|
43
42
|
throw new Error(`Kit "node": unsupported packageManager "${packageManager}"`);
|
|
@@ -49,7 +48,7 @@ export const nodeKit = {
|
|
|
49
48
|
allowNetwork: true
|
|
50
49
|
};
|
|
51
50
|
if (src) {
|
|
52
|
-
output.
|
|
51
|
+
output.sources = [{ host: src, container: '/app' }];
|
|
53
52
|
}
|
|
54
53
|
return output;
|
|
55
54
|
}
|