@redwoodjs/agent-ci 0.1.0 → 0.3.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +34 -14
- package/dist/cli.js +101 -11
- package/dist/output/working-directory.js +1 -1
- package/dist/runner/local-job.js +17 -2
- package/dist/runner/result-builder.js +111 -1
- package/dist/runner/result-builder.test.js +138 -0
- package/dist/runner/step-wrapper.js +69 -0
- package/dist/workflow/workflow-parser.js +246 -11
- package/dist/workflow/workflow-parser.test.js +340 -0
- package/package.json +7 -2
package/README.md
CHANGED
|
@@ -12,35 +12,34 @@ Agent CI runs on any machine that can run a container. When a step fails the run
|
|
|
12
12
|
|
|
13
13
|
<!-- TODO: Add demo video/screen recording -->
|
|
14
14
|
|
|
15
|
-
##
|
|
16
|
-
|
|
17
|
-
```bash
|
|
18
|
-
npm install -g agent-ci
|
|
19
|
-
```
|
|
20
|
-
|
|
21
|
-
### Prerequisites
|
|
15
|
+
## Prerequisites
|
|
22
16
|
|
|
23
17
|
- **Docker** — A running Docker provider:
|
|
24
18
|
- **macOS:** [OrbStack](https://orbstack.dev/) (recommended) or Docker Desktop
|
|
25
19
|
- **Linux:** Native Docker Engine
|
|
26
20
|
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
Agent CI connects to Docker via the `DOCKER_HOST` environment variable. By default it uses the local socket (`unix:///var/run/docker.sock`), but you can point it at any remote Docker daemon:
|
|
21
|
+
## Installation
|
|
30
22
|
|
|
31
23
|
```bash
|
|
32
|
-
|
|
33
|
-
DOCKER_HOST=ssh://user@remote-server agent-ci run --workflow .github/workflows/ci.yml
|
|
24
|
+
npm install -D @redwoodjs/agent-ci
|
|
34
25
|
```
|
|
35
26
|
|
|
36
27
|
## Usage
|
|
37
28
|
|
|
38
29
|
```bash
|
|
39
30
|
# Run a specific workflow
|
|
40
|
-
agent-ci run --workflow .github/workflows/ci.yml
|
|
31
|
+
npx agent-ci run --workflow .github/workflows/ci.yml
|
|
41
32
|
|
|
42
33
|
# Run all relevant workflows for the current branch
|
|
43
|
-
agent-ci run --all
|
|
34
|
+
npx agent-ci run --all
|
|
35
|
+
```
|
|
36
|
+
|
|
37
|
+
### Remote Docker
|
|
38
|
+
|
|
39
|
+
Agent CI connects to Docker via the `DOCKER_HOST` environment variable. By default it uses the local socket (`unix:///var/run/docker.sock`), but you can point it at any remote Docker daemon:
|
|
40
|
+
|
|
41
|
+
```bash
|
|
42
|
+
DOCKER_HOST=ssh://user@remote-server npx agent-ci run --workflow .github/workflows/ci.yml
|
|
44
43
|
```
|
|
45
44
|
|
|
46
45
|
### `agent-ci run`
|
|
@@ -77,3 +76,24 @@ Abort a paused runner and tear down its container.
|
|
|
77
76
|
## YAML Compatibility
|
|
78
77
|
|
|
79
78
|
See [compatibility.md](./compatibility.md) for detailed GitHub Actions workflow syntax support.
|
|
79
|
+
|
|
80
|
+
## Debugging
|
|
81
|
+
|
|
82
|
+
Set the `DEBUG` environment variable to enable verbose debug logging. It accepts a comma-separated list of glob patterns matching the namespaces you want to see:
|
|
83
|
+
|
|
84
|
+
| Value | What it shows |
|
|
85
|
+
| --------------------------------- | ----------------------------- |
|
|
86
|
+
| `DEBUG=agent-ci:*` | All debug output |
|
|
87
|
+
| `DEBUG=agent-ci:cli` | CLI-level logs only |
|
|
88
|
+
| `DEBUG=agent-ci:runner` | Runner/container logs only |
|
|
89
|
+
| `DEBUG=agent-ci:dtu` | DTU mock-server logs only |
|
|
90
|
+
| `DEBUG=agent-ci:boot` | Boot/startup timing logs only |
|
|
91
|
+
| `DEBUG=agent-ci:cli,agent-ci:dtu` | Multiple namespaces |
|
|
92
|
+
|
|
93
|
+
- Output goes to **stderr** so stdout stays clean for piping.
|
|
94
|
+
- If `DEBUG` is unset or empty, all debug loggers become **no-ops** (zero overhead).
|
|
95
|
+
- Pattern matching uses [minimatch](https://github.com/isaacs/minimatch) globs, so `agent-ci:*` matches all four namespaces.
|
|
96
|
+
|
|
97
|
+
```bash
|
|
98
|
+
DEBUG=agent-ci:* npx agent-ci run
|
|
99
|
+
```
|
package/dist/cli.js
CHANGED
|
@@ -6,7 +6,8 @@ import { getNextLogNum } from "./output/logger.js";
|
|
|
6
6
|
import { setWorkingDirectory, DEFAULT_WORKING_DIR, PROJECT_ROOT, } from "./output/working-directory.js";
|
|
7
7
|
import { debugCli } from "./output/debug.js";
|
|
8
8
|
import { executeLocalJob } from "./runner/local-job.js";
|
|
9
|
-
import { getWorkflowTemplate, parseWorkflowSteps, parseWorkflowServices, parseWorkflowContainer, validateSecrets, parseMatrixDef, expandMatrixCombinations, isWorkflowRelevant, getChangedFiles, } from "./workflow/workflow-parser.js";
|
|
9
|
+
import { getWorkflowTemplate, parseWorkflowSteps, parseWorkflowServices, parseWorkflowContainer, validateSecrets, parseMatrixDef, expandMatrixCombinations, isWorkflowRelevant, getChangedFiles, parseJobOutputDefs, parseJobIf, evaluateJobIf, parseFailFast, } from "./workflow/workflow-parser.js";
|
|
10
|
+
import { resolveJobOutputs } from "./runner/result-builder.js";
|
|
10
11
|
import { createConcurrencyLimiter, getDefaultMaxConcurrentJobs } from "./output/concurrency.js";
|
|
11
12
|
import { isWarmNodeModules, computeLockfileHash } from "./output/cleanup.js";
|
|
12
13
|
import { getWorkingDirectory } from "./output/working-directory.js";
|
|
@@ -497,24 +498,36 @@ async function handleWorkflow(options) {
|
|
|
497
498
|
taskId: ej.taskName,
|
|
498
499
|
};
|
|
499
500
|
};
|
|
500
|
-
const runJob = async (ej) => {
|
|
501
|
+
const runJob = async (ej, needsContext) => {
|
|
501
502
|
const { taskName, matrixContext } = ej;
|
|
502
503
|
debugCli(`Running: ${path.basename(workflowPath)} | Task: ${taskName}${matrixContext ? ` | Matrix: ${JSON.stringify(Object.fromEntries(Object.entries(matrixContext).filter(([k]) => !k.startsWith("__"))))}` : ""}`);
|
|
503
504
|
const secrets = loadMachineSecrets(repoRoot);
|
|
504
505
|
const secretsFilePath = path.join(repoRoot, ".env.agent-ci");
|
|
505
506
|
validateSecrets(workflowPath, taskName, secrets, secretsFilePath);
|
|
506
|
-
const steps = await parseWorkflowSteps(workflowPath, taskName, secrets, matrixContext);
|
|
507
|
+
const steps = await parseWorkflowSteps(workflowPath, taskName, secrets, matrixContext, needsContext);
|
|
507
508
|
const services = await parseWorkflowServices(workflowPath, taskName);
|
|
508
509
|
const container = await parseWorkflowContainer(workflowPath, taskName);
|
|
509
510
|
const job = buildJob(ej);
|
|
510
511
|
job.steps = steps;
|
|
511
512
|
job.services = services;
|
|
512
513
|
job.container = container ?? undefined;
|
|
513
|
-
|
|
514
|
+
const result = await executeLocalJob(job, { pauseOnFailure, store });
|
|
515
|
+
// result.outputs now contains raw step outputs (extracted inside executeLocalJob
|
|
516
|
+
// before workspace cleanup). Resolve them to job-level outputs using the
|
|
517
|
+
// output definitions from the workflow YAML.
|
|
518
|
+
if (result.outputs && Object.keys(result.outputs).length > 0) {
|
|
519
|
+
const outputDefs = parseJobOutputDefs(workflowPath, taskName);
|
|
520
|
+
if (Object.keys(outputDefs).length > 0) {
|
|
521
|
+
result.outputs = resolveJobOutputs(outputDefs, result.outputs);
|
|
522
|
+
}
|
|
523
|
+
}
|
|
524
|
+
return result;
|
|
514
525
|
};
|
|
515
526
|
pruneOrphanedDockerResources();
|
|
516
527
|
const limiter = createConcurrencyLimiter(maxJobs);
|
|
517
528
|
const allResults = [];
|
|
529
|
+
// Accumulate job outputs across waves for needs.*.outputs.* resolution
|
|
530
|
+
const jobOutputs = new Map();
|
|
518
531
|
// ── Dependency-aware wave scheduling ──────────────────────────────────────
|
|
519
532
|
const deps = parseJobDependencies(workflowPath);
|
|
520
533
|
const waves = topoSort(deps);
|
|
@@ -525,6 +538,73 @@ async function handleWorkflow(options) {
|
|
|
525
538
|
if (filteredWaves.length === 0) {
|
|
526
539
|
filteredWaves.push(Array.from(taskNamesInWf));
|
|
527
540
|
}
|
|
541
|
+
/** Build a needsContext for a job from its dependencies' accumulated outputs */
|
|
542
|
+
const buildNeedsContext = (jobId) => {
|
|
543
|
+
const jobDeps = deps.get(jobId);
|
|
544
|
+
if (!jobDeps || jobDeps.length === 0) {
|
|
545
|
+
return undefined;
|
|
546
|
+
}
|
|
547
|
+
const ctx = {};
|
|
548
|
+
for (const depId of jobDeps) {
|
|
549
|
+
ctx[depId] = jobOutputs.get(depId) ?? {};
|
|
550
|
+
}
|
|
551
|
+
return ctx;
|
|
552
|
+
};
|
|
553
|
+
/** Collect outputs from a completed job result */
|
|
554
|
+
const collectOutputs = (result, taskName) => {
|
|
555
|
+
if (result.outputs && Object.keys(result.outputs).length > 0) {
|
|
556
|
+
jobOutputs.set(taskName, result.outputs);
|
|
557
|
+
}
|
|
558
|
+
};
|
|
559
|
+
// Track job results for if-condition evaluation (success/failure status)
|
|
560
|
+
const jobResultStatus = new Map();
|
|
561
|
+
/** Check if a job should be skipped based on its if: condition */
|
|
562
|
+
const shouldSkipJob = (jobId) => {
|
|
563
|
+
const ifExpr = parseJobIf(workflowPath, jobId);
|
|
564
|
+
if (ifExpr === null) {
|
|
565
|
+
// No if: condition — default behavior is success() (skip if any upstream failed)
|
|
566
|
+
const jobDeps = deps.get(jobId);
|
|
567
|
+
if (jobDeps && jobDeps.length > 0) {
|
|
568
|
+
const anyFailed = jobDeps.some((d) => jobResultStatus.get(d) === "failure");
|
|
569
|
+
if (anyFailed) {
|
|
570
|
+
return true;
|
|
571
|
+
}
|
|
572
|
+
}
|
|
573
|
+
return false;
|
|
574
|
+
}
|
|
575
|
+
// Build upstream job results for the evaluator
|
|
576
|
+
const upstreamResults = {};
|
|
577
|
+
const jobDeps = deps.get(jobId) ?? [];
|
|
578
|
+
for (const depId of jobDeps) {
|
|
579
|
+
upstreamResults[depId] = jobResultStatus.get(depId) ?? "success";
|
|
580
|
+
}
|
|
581
|
+
const needsCtx = buildNeedsContext(jobId);
|
|
582
|
+
return !evaluateJobIf(ifExpr, upstreamResults, needsCtx);
|
|
583
|
+
};
|
|
584
|
+
/** Create a synthetic skipped result for a job that was skipped by if: */
|
|
585
|
+
const skippedResult = (ej) => ({
|
|
586
|
+
name: `agent-ci-skipped-${ej.taskName}`,
|
|
587
|
+
workflow: path.basename(workflowPath),
|
|
588
|
+
taskId: ej.taskName,
|
|
589
|
+
succeeded: true,
|
|
590
|
+
durationMs: 0,
|
|
591
|
+
debugLogPath: "",
|
|
592
|
+
steps: [],
|
|
593
|
+
});
|
|
594
|
+
/** Run a job or skip it based on if: condition */
|
|
595
|
+
const runOrSkipJob = async (ej) => {
|
|
596
|
+
if (shouldSkipJob(ej.taskName)) {
|
|
597
|
+
debugCli(`Skipping ${ej.taskName} (if: condition is false)`);
|
|
598
|
+
const result = skippedResult(ej);
|
|
599
|
+
jobResultStatus.set(ej.taskName, "skipped");
|
|
600
|
+
return result;
|
|
601
|
+
}
|
|
602
|
+
const ctx = buildNeedsContext(ej.taskName);
|
|
603
|
+
const result = await runJob(ej, ctx);
|
|
604
|
+
jobResultStatus.set(ej.taskName, result.succeeded ? "success" : "failure");
|
|
605
|
+
collectOutputs(result, ej.taskName);
|
|
606
|
+
return result;
|
|
607
|
+
};
|
|
528
608
|
for (let wi = 0; wi < filteredWaves.length; wi++) {
|
|
529
609
|
const waveJobIds = new Set(filteredWaves[wi]);
|
|
530
610
|
const waveJobs = expandedJobs.filter((j) => waveJobIds.has(j.taskName));
|
|
@@ -534,9 +614,9 @@ async function handleWorkflow(options) {
|
|
|
534
614
|
// ── Warm-cache serialization for the first wave ────────────────────────
|
|
535
615
|
if (!warm && wi === 0 && waveJobs.length > 1) {
|
|
536
616
|
debugCli("Cold cache — running first job to populate warm modules...");
|
|
537
|
-
const firstResult = await
|
|
617
|
+
const firstResult = await runOrSkipJob(waveJobs[0]);
|
|
538
618
|
allResults.push(firstResult);
|
|
539
|
-
const results = await Promise.allSettled(waveJobs.slice(1).map((ej) => limiter.run(() =>
|
|
619
|
+
const results = await Promise.allSettled(waveJobs.slice(1).map((ej) => limiter.run(() => runOrSkipJob(ej))));
|
|
540
620
|
for (const r of results) {
|
|
541
621
|
if (r.status === "fulfilled") {
|
|
542
622
|
allResults.push(r.value);
|
|
@@ -545,17 +625,27 @@ async function handleWorkflow(options) {
|
|
|
545
625
|
warm = true;
|
|
546
626
|
}
|
|
547
627
|
else {
|
|
548
|
-
const results = await Promise.allSettled(waveJobs.map((ej) => limiter.run(() =>
|
|
628
|
+
const results = await Promise.allSettled(waveJobs.map((ej) => limiter.run(() => runOrSkipJob(ej))));
|
|
549
629
|
for (const r of results) {
|
|
550
630
|
if (r.status === "fulfilled") {
|
|
551
631
|
allResults.push(r.value);
|
|
552
632
|
}
|
|
553
633
|
}
|
|
554
634
|
}
|
|
555
|
-
//
|
|
556
|
-
|
|
557
|
-
|
|
558
|
-
|
|
635
|
+
// Check whether to abort remaining waves on failure
|
|
636
|
+
const waveHadFailures = allResults.some((r) => !r.succeeded);
|
|
637
|
+
if (waveHadFailures && wi < filteredWaves.length - 1) {
|
|
638
|
+
// Check fail-fast setting for jobs in this wave
|
|
639
|
+
const waveFailFastSettings = waveJobs.map((ej) => parseFailFast(workflowPath, ej.taskName));
|
|
640
|
+
// Abort unless ALL jobs in the wave explicitly set fail-fast: false
|
|
641
|
+
const shouldAbort = !waveFailFastSettings.every((ff) => ff === false);
|
|
642
|
+
if (shouldAbort) {
|
|
643
|
+
debugCli(`Wave ${wi + 1} had failures — aborting remaining waves for ${path.basename(workflowPath)}`);
|
|
644
|
+
break;
|
|
645
|
+
}
|
|
646
|
+
else {
|
|
647
|
+
debugCli(`Wave ${wi + 1} had failures but fail-fast is disabled — continuing`);
|
|
648
|
+
}
|
|
559
649
|
}
|
|
560
650
|
}
|
|
561
651
|
return allResults;
|
|
@@ -2,7 +2,7 @@ import path from "node:path";
|
|
|
2
2
|
import os from "node:os";
|
|
3
3
|
import fs from "node:fs";
|
|
4
4
|
import { fileURLToPath } from "node:url";
|
|
5
|
-
// Pinned to the
|
|
5
|
+
// Pinned to the cli package root
|
|
6
6
|
export const PROJECT_ROOT = path.resolve(fileURLToPath(import.meta.url), "..", "..", "..");
|
|
7
7
|
// When running inside a container with Docker-outside-of-Docker (shared socket),
|
|
8
8
|
// /tmp is NOT visible to the Docker host. Use a project-relative directory
|
package/dist/runner/local-job.js
CHANGED
|
@@ -17,7 +17,7 @@ import { prepareWorkspace } from "./workspace.js";
|
|
|
17
17
|
import { createRunDirectories } from "./directory-setup.js";
|
|
18
18
|
import { buildContainerEnv, buildContainerBinds, buildContainerCmd, resolveDtuHost, resolveDockerApiUrl, } from "../docker/container-config.js";
|
|
19
19
|
import { buildJobResult, sanitizeStepName } from "./result-builder.js";
|
|
20
|
-
import { wrapJobSteps } from "./step-wrapper.js";
|
|
20
|
+
import { wrapJobSteps, appendOutputCaptureStep } from "./step-wrapper.js";
|
|
21
21
|
import { syncWorkspaceForRetry } from "./sync.js";
|
|
22
22
|
// ─── Docker setup ─────────────────────────────────────────────────────────────
|
|
23
23
|
const dockerHost = process.env.DOCKER_HOST || "unix:///var/run/docker.sock";
|
|
@@ -169,7 +169,8 @@ export async function executeLocalJob(job, options) {
|
|
|
169
169
|
default_branch: job.repository?.default_branch || "main",
|
|
170
170
|
}
|
|
171
171
|
: job.repository;
|
|
172
|
-
const
|
|
172
|
+
const wrappedSteps = pauseOnFailure ? wrapJobSteps(job.steps ?? [], true) : job.steps;
|
|
173
|
+
const seededSteps = appendOutputCaptureStep(wrappedSteps ?? []);
|
|
173
174
|
t0 = Date.now();
|
|
174
175
|
const seedResponse = await fetch(`${dtuUrl}/_dtu/seed`, {
|
|
175
176
|
method: "POST",
|
|
@@ -668,6 +669,19 @@ export async function executeLocalJob(job, options) {
|
|
|
668
669
|
if (fs.existsSync(hostRunnerDir)) {
|
|
669
670
|
fs.rmSync(hostRunnerDir, { recursive: true, force: true });
|
|
670
671
|
}
|
|
672
|
+
// Read step outputs captured by the DTU server via the runner's outputs API
|
|
673
|
+
let stepOutputs = {};
|
|
674
|
+
if (jobSucceeded) {
|
|
675
|
+
const outputsFile = path.join(logDir, "outputs.json");
|
|
676
|
+
try {
|
|
677
|
+
if (fs.existsSync(outputsFile)) {
|
|
678
|
+
stepOutputs = JSON.parse(fs.readFileSync(outputsFile, "utf-8"));
|
|
679
|
+
}
|
|
680
|
+
}
|
|
681
|
+
catch {
|
|
682
|
+
/* best-effort */
|
|
683
|
+
}
|
|
684
|
+
}
|
|
671
685
|
if (jobSucceeded && fs.existsSync(dirs.containerWorkDir)) {
|
|
672
686
|
fs.rmSync(dirs.containerWorkDir, { recursive: true, force: true });
|
|
673
687
|
}
|
|
@@ -682,6 +696,7 @@ export async function executeLocalJob(job, options) {
|
|
|
682
696
|
timelinePath,
|
|
683
697
|
logDir,
|
|
684
698
|
debugLogPath,
|
|
699
|
+
stepOutputs,
|
|
685
700
|
});
|
|
686
701
|
}
|
|
687
702
|
finally {
|
|
@@ -83,11 +83,117 @@ export function extractFailureDetails(timelinePath, failedStepName, logDir) {
|
|
|
83
83
|
}
|
|
84
84
|
return result;
|
|
85
85
|
}
|
|
86
|
+
// ─── Step output extraction ───────────────────────────────────────────────────
|
|
87
|
+
/**
|
|
88
|
+
* Extract step outputs from the runner's `_runner_file_commands/` directory.
|
|
89
|
+
*
|
|
90
|
+
* The GitHub Actions runner writes step outputs to files named `set_output_<uuid>`
|
|
91
|
+
* in `<workDir>/_runner_file_commands/`. Each file contains key=value pairs,
|
|
92
|
+
* with multiline values using the heredoc format:
|
|
93
|
+
* key<<DELIMITER
|
|
94
|
+
* line1
|
|
95
|
+
* line2
|
|
96
|
+
* DELIMITER
|
|
97
|
+
*
|
|
98
|
+
* @param workDir The container's work directory (bind-mounted from host)
|
|
99
|
+
* @returns Record<string, string> of all output key-value pairs
|
|
100
|
+
*/
|
|
101
|
+
export function extractStepOutputs(workDir) {
|
|
102
|
+
const outputs = {};
|
|
103
|
+
// The runner writes to _temp/_runner_file_commands/ under the work dir
|
|
104
|
+
// $GITHUB_OUTPUT = /home/runner/_work/_temp/_runner_file_commands/set_output_<uuid>
|
|
105
|
+
const candidates = [
|
|
106
|
+
path.join(workDir, "_temp", "_runner_file_commands"),
|
|
107
|
+
path.join(workDir, "_runner_file_commands"),
|
|
108
|
+
];
|
|
109
|
+
for (const fileCommandsDir of candidates) {
|
|
110
|
+
if (!fs.existsSync(fileCommandsDir)) {
|
|
111
|
+
continue;
|
|
112
|
+
}
|
|
113
|
+
let entries;
|
|
114
|
+
try {
|
|
115
|
+
entries = fs.readdirSync(fileCommandsDir).sort(); // Sort for deterministic override order
|
|
116
|
+
}
|
|
117
|
+
catch {
|
|
118
|
+
continue;
|
|
119
|
+
}
|
|
120
|
+
for (const entry of entries) {
|
|
121
|
+
if (!entry.startsWith("set_output_")) {
|
|
122
|
+
continue;
|
|
123
|
+
}
|
|
124
|
+
try {
|
|
125
|
+
const content = fs.readFileSync(path.join(fileCommandsDir, entry), "utf-8");
|
|
126
|
+
parseOutputFileContent(content, outputs);
|
|
127
|
+
}
|
|
128
|
+
catch {
|
|
129
|
+
// Best-effort: skip unreadable files
|
|
130
|
+
}
|
|
131
|
+
}
|
|
132
|
+
}
|
|
133
|
+
return outputs;
|
|
134
|
+
}
|
|
135
|
+
/**
|
|
136
|
+
* Parse the content of a single set_output file into the outputs record.
|
|
137
|
+
* Handles both `key=value` and `key<<DELIMITER\nvalue\nDELIMITER` formats.
|
|
138
|
+
*/
|
|
139
|
+
function parseOutputFileContent(content, outputs) {
|
|
140
|
+
const lines = content.split("\n");
|
|
141
|
+
let i = 0;
|
|
142
|
+
while (i < lines.length) {
|
|
143
|
+
const line = lines[i];
|
|
144
|
+
// Heredoc format: key<<DELIMITER
|
|
145
|
+
const heredocMatch = line.match(/^([^=]+)<<(.+)$/);
|
|
146
|
+
if (heredocMatch) {
|
|
147
|
+
const key = heredocMatch[1];
|
|
148
|
+
const delimiter = heredocMatch[2];
|
|
149
|
+
const valueLines = [];
|
|
150
|
+
i++;
|
|
151
|
+
while (i < lines.length && lines[i] !== delimiter) {
|
|
152
|
+
valueLines.push(lines[i]);
|
|
153
|
+
i++;
|
|
154
|
+
}
|
|
155
|
+
outputs[key] = valueLines.join("\n");
|
|
156
|
+
i++; // Skip the closing delimiter
|
|
157
|
+
continue;
|
|
158
|
+
}
|
|
159
|
+
// Simple format: key=value
|
|
160
|
+
const eqIdx = line.indexOf("=");
|
|
161
|
+
if (eqIdx > 0) {
|
|
162
|
+
const key = line.slice(0, eqIdx);
|
|
163
|
+
const value = line.slice(eqIdx + 1);
|
|
164
|
+
outputs[key] = value;
|
|
165
|
+
}
|
|
166
|
+
i++;
|
|
167
|
+
}
|
|
168
|
+
}
|
|
169
|
+
// ─── Job output resolution ────────────────────────────────────────────────────
|
|
170
|
+
/**
|
|
171
|
+
* Resolve job output definitions against actual step outputs.
|
|
172
|
+
*
|
|
173
|
+
* Job output templates reference `steps.<stepId>.outputs.<name>`. Since the
|
|
174
|
+
* runner writes all step outputs to `$GITHUB_OUTPUT` files keyed only by
|
|
175
|
+
* output name (not step ID), we resolve by matching the output name from
|
|
176
|
+
* the template against the flat step outputs map.
|
|
177
|
+
*
|
|
178
|
+
* @param outputDefs Job output definitions from parseJobOutputDefs
|
|
179
|
+
* @param stepOutputs Flat step outputs from extractStepOutputs
|
|
180
|
+
* @returns Resolved job outputs
|
|
181
|
+
*/
|
|
182
|
+
export function resolveJobOutputs(outputDefs, stepOutputs) {
|
|
183
|
+
const result = {};
|
|
184
|
+
for (const [outputName, template] of Object.entries(outputDefs)) {
|
|
185
|
+
// Replace ${{ steps.<id>.outputs.<name> }} with the actual step output value
|
|
186
|
+
result[outputName] = template.replace(/\$\{\{\s*steps\.[^.]+\.outputs\.([^\s}]+)\s*\}\}/g, (_match, outputKey) => {
|
|
187
|
+
return stepOutputs[outputKey] ?? "";
|
|
188
|
+
});
|
|
189
|
+
}
|
|
190
|
+
return result;
|
|
191
|
+
}
|
|
86
192
|
/**
|
|
87
193
|
* Build the structured `JobResult` from container exit state and timeline data.
|
|
88
194
|
*/
|
|
89
195
|
export function buildJobResult(opts) {
|
|
90
|
-
const { containerName, job, startTime, jobSucceeded, lastFailedStep, containerExitCode, timelinePath, logDir, debugLogPath, } = opts;
|
|
196
|
+
const { containerName, job, startTime, jobSucceeded, lastFailedStep, containerExitCode, timelinePath, logDir, debugLogPath, stepOutputs, } = opts;
|
|
91
197
|
const steps = parseTimelineSteps(timelinePath);
|
|
92
198
|
const result = {
|
|
93
199
|
name: containerName,
|
|
@@ -115,5 +221,9 @@ export function buildJobResult(opts) {
|
|
|
115
221
|
result.lastOutputLines = [];
|
|
116
222
|
}
|
|
117
223
|
}
|
|
224
|
+
// Attach raw step outputs (will be resolved to job outputs by cli.ts)
|
|
225
|
+
if (stepOutputs && Object.keys(stepOutputs).length > 0) {
|
|
226
|
+
result.outputs = stepOutputs;
|
|
227
|
+
}
|
|
118
228
|
return result;
|
|
119
229
|
}
|
|
@@ -175,3 +175,141 @@ describe("buildJobResult", () => {
|
|
|
175
175
|
expect(result.lastOutputLines).toContain("compile error");
|
|
176
176
|
});
|
|
177
177
|
});
|
|
178
|
+
// ── extractStepOutputs ────────────────────────────────────────────────────────
|
|
179
|
+
describe("extractStepOutputs", () => {
|
|
180
|
+
let tmpDir;
|
|
181
|
+
beforeEach(() => {
|
|
182
|
+
tmpDir = fs.mkdtempSync(path.join(os.tmpdir(), "step-outputs-test-"));
|
|
183
|
+
});
|
|
184
|
+
afterEach(() => {
|
|
185
|
+
fs.rmSync(tmpDir, { recursive: true, force: true });
|
|
186
|
+
});
|
|
187
|
+
it("extracts simple key=value outputs from set_output files", async () => {
|
|
188
|
+
const { extractStepOutputs } = await import("./result-builder.js");
|
|
189
|
+
// Simulate the runner's file_commands directory structure
|
|
190
|
+
const fileCommandsDir = path.join(tmpDir, "_runner_file_commands");
|
|
191
|
+
fs.mkdirSync(fileCommandsDir, { recursive: true });
|
|
192
|
+
fs.writeFileSync(path.join(fileCommandsDir, "set_output_abc123"), "skip=false\nshard_count=3\n");
|
|
193
|
+
const outputs = extractStepOutputs(tmpDir);
|
|
194
|
+
expect(outputs).toEqual({
|
|
195
|
+
skip: "false",
|
|
196
|
+
shard_count: "3",
|
|
197
|
+
});
|
|
198
|
+
});
|
|
199
|
+
it("extracts multiline (heredoc) values", async () => {
|
|
200
|
+
const { extractStepOutputs } = await import("./result-builder.js");
|
|
201
|
+
const fileCommandsDir = path.join(tmpDir, "_runner_file_commands");
|
|
202
|
+
fs.mkdirSync(fileCommandsDir, { recursive: true });
|
|
203
|
+
fs.writeFileSync(path.join(fileCommandsDir, "set_output_def456"), 'matrix<<EOF\n["1","2","3"]\nEOF\n');
|
|
204
|
+
const outputs = extractStepOutputs(tmpDir);
|
|
205
|
+
expect(outputs).toEqual({
|
|
206
|
+
matrix: '["1","2","3"]',
|
|
207
|
+
});
|
|
208
|
+
});
|
|
209
|
+
it("merges outputs from multiple set_output files", async () => {
|
|
210
|
+
const { extractStepOutputs } = await import("./result-builder.js");
|
|
211
|
+
const fileCommandsDir = path.join(tmpDir, "_runner_file_commands");
|
|
212
|
+
fs.mkdirSync(fileCommandsDir, { recursive: true });
|
|
213
|
+
fs.writeFileSync(path.join(fileCommandsDir, "set_output_aaa"), "key1=val1\n");
|
|
214
|
+
fs.writeFileSync(path.join(fileCommandsDir, "set_output_bbb"), "key2=val2\n");
|
|
215
|
+
const outputs = extractStepOutputs(tmpDir);
|
|
216
|
+
expect(outputs.key1).toBe("val1");
|
|
217
|
+
expect(outputs.key2).toBe("val2");
|
|
218
|
+
});
|
|
219
|
+
it("returns empty object when no _runner_file_commands directory exists", async () => {
|
|
220
|
+
const { extractStepOutputs } = await import("./result-builder.js");
|
|
221
|
+
const outputs = extractStepOutputs(tmpDir);
|
|
222
|
+
expect(outputs).toEqual({});
|
|
223
|
+
});
|
|
224
|
+
it("returns empty object when directory has no set_output files", async () => {
|
|
225
|
+
const { extractStepOutputs } = await import("./result-builder.js");
|
|
226
|
+
const fileCommandsDir = path.join(tmpDir, "_runner_file_commands");
|
|
227
|
+
fs.mkdirSync(fileCommandsDir, { recursive: true });
|
|
228
|
+
fs.writeFileSync(path.join(fileCommandsDir, "add_path_xyz"), "/usr/local/bin\n");
|
|
229
|
+
const outputs = extractStepOutputs(tmpDir);
|
|
230
|
+
expect(outputs).toEqual({});
|
|
231
|
+
});
|
|
232
|
+
it("later files override earlier ones for the same key", async () => {
|
|
233
|
+
const { extractStepOutputs } = await import("./result-builder.js");
|
|
234
|
+
const fileCommandsDir = path.join(tmpDir, "_runner_file_commands");
|
|
235
|
+
fs.mkdirSync(fileCommandsDir, { recursive: true });
|
|
236
|
+
fs.writeFileSync(path.join(fileCommandsDir, "set_output_aaa"), "key=first\n");
|
|
237
|
+
fs.writeFileSync(path.join(fileCommandsDir, "set_output_zzz"), "key=second\n");
|
|
238
|
+
const outputs = extractStepOutputs(tmpDir);
|
|
239
|
+
expect(outputs.key).toBe("second");
|
|
240
|
+
});
|
|
241
|
+
it("handles multiline heredoc with multiple lines", async () => {
|
|
242
|
+
const { extractStepOutputs } = await import("./result-builder.js");
|
|
243
|
+
const fileCommandsDir = path.join(tmpDir, "_runner_file_commands");
|
|
244
|
+
fs.mkdirSync(fileCommandsDir, { recursive: true });
|
|
245
|
+
fs.writeFileSync(path.join(fileCommandsDir, "set_output_multi"), "tests<<DELIM\ntest1.ts\ntest2.ts\ntest3.ts\nDELIM\n");
|
|
246
|
+
const outputs = extractStepOutputs(tmpDir);
|
|
247
|
+
expect(outputs.tests).toBe("test1.ts\ntest2.ts\ntest3.ts");
|
|
248
|
+
});
|
|
249
|
+
});
|
|
250
|
+
// ── resolveJobOutputs ─────────────────────────────────────────────────────────
|
|
251
|
+
describe("resolveJobOutputs", () => {
|
|
252
|
+
it("resolves step output references in job output templates", async () => {
|
|
253
|
+
const { resolveJobOutputs } = await import("./result-builder.js");
|
|
254
|
+
const outputDefs = {
|
|
255
|
+
skip: "${{ steps.check.outputs.skip }}",
|
|
256
|
+
count: "${{ steps.counter.outputs.shard_count }}",
|
|
257
|
+
};
|
|
258
|
+
const stepOutputs = {
|
|
259
|
+
skip: "false",
|
|
260
|
+
shard_count: "3",
|
|
261
|
+
};
|
|
262
|
+
const resolved = resolveJobOutputs(outputDefs, stepOutputs);
|
|
263
|
+
expect(resolved).toEqual({
|
|
264
|
+
skip: "false",
|
|
265
|
+
count: "3",
|
|
266
|
+
});
|
|
267
|
+
});
|
|
268
|
+
it("returns empty string for unresolved step outputs", async () => {
|
|
269
|
+
const { resolveJobOutputs } = await import("./result-builder.js");
|
|
270
|
+
const outputDefs = {
|
|
271
|
+
missing: "${{ steps.none.outputs.doesnt_exist }}",
|
|
272
|
+
};
|
|
273
|
+
const stepOutputs = {};
|
|
274
|
+
const resolved = resolveJobOutputs(outputDefs, stepOutputs);
|
|
275
|
+
expect(resolved).toEqual({ missing: "" });
|
|
276
|
+
});
|
|
277
|
+
it("passes through literal values unchanged", async () => {
|
|
278
|
+
const { resolveJobOutputs } = await import("./result-builder.js");
|
|
279
|
+
const outputDefs = {
|
|
280
|
+
version: "1.2.3",
|
|
281
|
+
};
|
|
282
|
+
const stepOutputs = {};
|
|
283
|
+
const resolved = resolveJobOutputs(outputDefs, stepOutputs);
|
|
284
|
+
expect(resolved).toEqual({ version: "1.2.3" });
|
|
285
|
+
});
|
|
286
|
+
it("returns empty object when no output definitions", async () => {
|
|
287
|
+
const { resolveJobOutputs } = await import("./result-builder.js");
|
|
288
|
+
const resolved = resolveJobOutputs({}, { some: "output" });
|
|
289
|
+
expect(resolved).toEqual({});
|
|
290
|
+
});
|
|
291
|
+
it("handles JSON values in step outputs", async () => {
|
|
292
|
+
const { resolveJobOutputs } = await import("./result-builder.js");
|
|
293
|
+
const outputDefs = {
|
|
294
|
+
matrix: "${{ steps.plan.outputs.matrix }}",
|
|
295
|
+
};
|
|
296
|
+
const stepOutputs = {
|
|
297
|
+
matrix: '{"shard":[1,2,3]}',
|
|
298
|
+
};
|
|
299
|
+
const resolved = resolveJobOutputs(outputDefs, stepOutputs);
|
|
300
|
+
expect(resolved).toEqual({
|
|
301
|
+
matrix: '{"shard":[1,2,3]}',
|
|
302
|
+
});
|
|
303
|
+
});
|
|
304
|
+
it("handles templates with surrounding text", async () => {
|
|
305
|
+
const { resolveJobOutputs } = await import("./result-builder.js");
|
|
306
|
+
const outputDefs = {
|
|
307
|
+
label: "shard-${{ steps.plan.outputs.index }}",
|
|
308
|
+
};
|
|
309
|
+
const stepOutputs = {
|
|
310
|
+
index: "5",
|
|
311
|
+
};
|
|
312
|
+
const resolved = resolveJobOutputs(outputDefs, stepOutputs);
|
|
313
|
+
expect(resolved).toEqual({ label: "shard-5" });
|
|
314
|
+
});
|
|
315
|
+
});
|
|
@@ -80,3 +80,72 @@ export function wrapJobSteps(steps, pauseOnFailure) {
|
|
|
80
80
|
};
|
|
81
81
|
});
|
|
82
82
|
}
|
|
83
|
+
// ─── Output capture step injection ────────────────────────────────────────────
|
|
84
|
+
//
|
|
85
|
+
// Appends a synthetic step that reads `$GITHUB_OUTPUT` files and echoes their
|
|
86
|
+
// contents to stdout with a `::agent-ci-output::` prefix. The DTU parses these
|
|
87
|
+
// lines and persists them to `outputs.json` so the CLI can resolve cross-job
|
|
88
|
+
// outputs via `needs.*.outputs.*`.
|
|
89
|
+
//
|
|
90
|
+
// This step is necessary because the runner's FinalizeJob step deletes
|
|
91
|
+
// `_temp/_runner_file_commands/` _inside_ the container before it exits,
|
|
92
|
+
// making the files unreachable from the host.
|
|
93
|
+
/**
|
|
94
|
+
* Build the shell script for the output-capture synthetic step.
|
|
95
|
+
*
|
|
96
|
+
* Reads all `set_output_*` files from `GITHUB_OUTPUT`'s directory and
|
|
97
|
+
* echoes each `key=value` line with the prefix `::agent-ci-output::`.
|
|
98
|
+
* Multiline values (heredoc format) are flattened into single-line JSON.
|
|
99
|
+
*/
|
|
100
|
+
function outputCaptureScript() {
|
|
101
|
+
return `# Agent CI: capture step outputs for cross-job passing
|
|
102
|
+
DIR="$(dirname "$GITHUB_OUTPUT")"
|
|
103
|
+
if [ -d "$DIR" ]; then
|
|
104
|
+
for f in "$DIR"/set_output_*; do
|
|
105
|
+
[ -f "$f" ] || continue
|
|
106
|
+
while IFS= read -r line || [ -n "$line" ]; do
|
|
107
|
+
if echo "$line" | grep -q '<<'; then
|
|
108
|
+
# Heredoc: key<<DELIMITER ... DELIMITER
|
|
109
|
+
KEY=$(echo "$line" | cut -d'<' -f1)
|
|
110
|
+
DELIM=$(echo "$line" | sed 's/^[^<]*<<//')
|
|
111
|
+
VAL=""
|
|
112
|
+
while IFS= read -r hline || [ -n "$hline" ]; do
|
|
113
|
+
[ "$hline" = "$DELIM" ] && break
|
|
114
|
+
[ -n "$VAL" ] && VAL="$VAL\\\\n$hline" || VAL="$hline"
|
|
115
|
+
done
|
|
116
|
+
echo "::agent-ci-output::$KEY=$VAL"
|
|
117
|
+
else
|
|
118
|
+
echo "::agent-ci-output::$line"
|
|
119
|
+
fi
|
|
120
|
+
done < "$f"
|
|
121
|
+
done
|
|
122
|
+
fi`;
|
|
123
|
+
}
|
|
124
|
+
/**
|
|
125
|
+
* Create a synthetic step object for output capture.
|
|
126
|
+
* Uses `if: always()` semantics by setting `Condition` so it runs even
|
|
127
|
+
* if prior steps failed.
|
|
128
|
+
*/
|
|
129
|
+
export function createOutputCaptureStep() {
|
|
130
|
+
return {
|
|
131
|
+
Name: "__agent_ci_output_capture",
|
|
132
|
+
DisplayName: "Capture outputs",
|
|
133
|
+
Reference: { Type: "Script" },
|
|
134
|
+
Inputs: {
|
|
135
|
+
script: outputCaptureScript(),
|
|
136
|
+
},
|
|
137
|
+
Condition: "always()",
|
|
138
|
+
Environment: {},
|
|
139
|
+
ContextName: "__agent_ci_output_capture",
|
|
140
|
+
};
|
|
141
|
+
}
|
|
142
|
+
/**
|
|
143
|
+
* Append the output-capture step to a steps array.
|
|
144
|
+
* Only adds it if there are existing steps and the job has outputs defined.
|
|
145
|
+
*/
|
|
146
|
+
export function appendOutputCaptureStep(steps) {
|
|
147
|
+
if (!steps || steps.length === 0) {
|
|
148
|
+
return steps;
|
|
149
|
+
}
|
|
150
|
+
return [...steps, createOutputCaptureStep()];
|
|
151
|
+
}
|
|
@@ -19,7 +19,7 @@ async function loadWorkflowParser() {
|
|
|
19
19
|
* - github.sha → '0000000000000000000000000000000000000000'
|
|
20
20
|
* - (others) → empty string (safe: no commas injected)
|
|
21
21
|
*/
|
|
22
|
-
export function expandExpressions(value, repoPath, secrets, matrixContext) {
|
|
22
|
+
export function expandExpressions(value, repoPath, secrets, matrixContext, needsContext) {
|
|
23
23
|
return value.replace(/\$\{\{([\s\S]*?)\}\}/g, (_match, expr) => {
|
|
24
24
|
const trimmed = expr.trim();
|
|
25
25
|
// hashFiles('glob1', 'glob2', ...)
|
|
@@ -62,6 +62,45 @@ export function expandExpressions(value, repoPath, secrets, matrixContext) {
|
|
|
62
62
|
return "0000000000000000000000000000000000000000";
|
|
63
63
|
}
|
|
64
64
|
}
|
|
65
|
+
// fromJSON(expr) — parse JSON from a string (or inner expression)
|
|
66
|
+
const fromJsonMatch = trimmed.match(/^fromJSON\(([\s\S]+)\)$/);
|
|
67
|
+
if (fromJsonMatch) {
|
|
68
|
+
const inner = fromJsonMatch[1].trim();
|
|
69
|
+
// If the inner arg is a quoted string literal, use it directly
|
|
70
|
+
let rawValue;
|
|
71
|
+
if ((inner.startsWith("'") && inner.endsWith("'")) ||
|
|
72
|
+
(inner.startsWith('"') && inner.endsWith('"'))) {
|
|
73
|
+
rawValue = inner.slice(1, -1);
|
|
74
|
+
}
|
|
75
|
+
else {
|
|
76
|
+
// Otherwise, treat it as an expression and expand it
|
|
77
|
+
rawValue = expandExpressions(`\${{ ${inner} }}`, repoPath, secrets, matrixContext, needsContext);
|
|
78
|
+
}
|
|
79
|
+
try {
|
|
80
|
+
const parsed = JSON.parse(rawValue);
|
|
81
|
+
if (typeof parsed === "string") {
|
|
82
|
+
return parsed;
|
|
83
|
+
}
|
|
84
|
+
return JSON.stringify(parsed);
|
|
85
|
+
}
|
|
86
|
+
catch {
|
|
87
|
+
return "";
|
|
88
|
+
}
|
|
89
|
+
}
|
|
90
|
+
// toJSON(expr) — serialize a value to JSON
|
|
91
|
+
const toJsonMatch = trimmed.match(/^toJSON\(([\s\S]+)\)$/);
|
|
92
|
+
if (toJsonMatch) {
|
|
93
|
+
const inner = toJsonMatch[1].trim();
|
|
94
|
+
let rawValue;
|
|
95
|
+
if ((inner.startsWith("'") && inner.endsWith("'")) ||
|
|
96
|
+
(inner.startsWith('"') && inner.endsWith('"'))) {
|
|
97
|
+
rawValue = inner.slice(1, -1);
|
|
98
|
+
}
|
|
99
|
+
else {
|
|
100
|
+
rawValue = expandExpressions(`\${{ ${inner} }}`, repoPath, secrets, matrixContext, needsContext);
|
|
101
|
+
}
|
|
102
|
+
return JSON.stringify(rawValue);
|
|
103
|
+
}
|
|
65
104
|
// format('template {0} {1}', arg0, arg1)
|
|
66
105
|
const formatMatch = trimmed.match(/^format\(([\s\S]+)\)$/);
|
|
67
106
|
if (formatMatch) {
|
|
@@ -132,6 +171,20 @@ export function expandExpressions(value, repoPath, secrets, matrixContext) {
|
|
|
132
171
|
if (trimmed.startsWith("steps.")) {
|
|
133
172
|
return "";
|
|
134
173
|
}
|
|
174
|
+
if (trimmed.startsWith("needs.") && needsContext) {
|
|
175
|
+
// needs.<jobId>.outputs.<name> or needs.<jobId>.result
|
|
176
|
+
const parts = trimmed.split(".");
|
|
177
|
+
const jobId = parts[1];
|
|
178
|
+
const jobOutputs = needsContext[jobId];
|
|
179
|
+
if (parts[2] === "outputs" && parts[3]) {
|
|
180
|
+
return jobOutputs?.[parts[3]] ?? "";
|
|
181
|
+
}
|
|
182
|
+
if (parts[2] === "result") {
|
|
183
|
+
// If the job is in the needsContext, it completed (default to 'success')
|
|
184
|
+
return jobOutputs ? (jobOutputs["__result"] ?? "success") : "";
|
|
185
|
+
}
|
|
186
|
+
return "";
|
|
187
|
+
}
|
|
135
188
|
if (trimmed.startsWith("needs.")) {
|
|
136
189
|
return "";
|
|
137
190
|
}
|
|
@@ -224,7 +277,7 @@ export async function parseMatrixDef(filePath, jobId) {
|
|
|
224
277
|
}
|
|
225
278
|
return Object.keys(result).length > 0 ? result : null;
|
|
226
279
|
}
|
|
227
|
-
export async function parseWorkflowSteps(filePath, taskName, secrets, matrixContext) {
|
|
280
|
+
export async function parseWorkflowSteps(filePath, taskName, secrets, matrixContext, needsContext) {
|
|
228
281
|
const template = await getWorkflowTemplate(filePath);
|
|
229
282
|
const rawYaml = parseYaml(fs.readFileSync(filePath, "utf8"));
|
|
230
283
|
// Derive repoPath from filePath (.../repoPath/.github/workflows/foo.yml → repoPath)
|
|
@@ -244,10 +297,12 @@ export async function parseWorkflowSteps(filePath, taskName, secrets, matrixCont
|
|
|
244
297
|
return job.steps
|
|
245
298
|
.map((step, index) => {
|
|
246
299
|
const stepId = step.id || `step-${index + 1}`;
|
|
247
|
-
let stepName = step.name
|
|
248
|
-
? expandExpressions(step.name.toString(), repoPath, secrets, matrixContext)
|
|
249
|
-
: stepId;
|
|
250
300
|
const rawStep = rawSteps[index] || {};
|
|
301
|
+
// Prefer raw YAML name to preserve ${{ }} expressions for our own expansion
|
|
302
|
+
const rawName = rawStep.name != null ? String(rawStep.name) : step.name?.toString();
|
|
303
|
+
let stepName = rawName
|
|
304
|
+
? expandExpressions(rawName, repoPath, secrets, matrixContext, needsContext)
|
|
305
|
+
: stepId;
|
|
251
306
|
// If a step lacks an explicit name, we map it to standard GitHub Actions defaults
|
|
252
307
|
if (!step.name) {
|
|
253
308
|
if ("run" in step) {
|
|
@@ -270,7 +325,7 @@ export async function parseWorkflowSteps(filePath, taskName, secrets, matrixCont
|
|
|
270
325
|
// string is always the complete literal block scalar.
|
|
271
326
|
const rawScript = rawStep.run != null ? String(rawStep.run) : step.run.toString();
|
|
272
327
|
const inputs = {
|
|
273
|
-
script: expandExpressions(rawScript, repoPath, secrets, matrixContext),
|
|
328
|
+
script: expandExpressions(rawScript, repoPath, secrets, matrixContext, needsContext),
|
|
274
329
|
};
|
|
275
330
|
if (rawStep["working-directory"]) {
|
|
276
331
|
inputs.workingDirectory = rawStep["working-directory"];
|
|
@@ -287,7 +342,7 @@ export async function parseWorkflowSteps(filePath, taskName, secrets, matrixCont
|
|
|
287
342
|
Env: rawStep.env
|
|
288
343
|
? Object.fromEntries(Object.entries(rawStep.env).map(([k, v]) => [
|
|
289
344
|
k,
|
|
290
|
-
expandExpressions(String(v), repoPath, secrets),
|
|
345
|
+
expandExpressions(String(v), repoPath, secrets, undefined, needsContext),
|
|
291
346
|
]))
|
|
292
347
|
: undefined,
|
|
293
348
|
};
|
|
@@ -322,13 +377,13 @@ export async function parseWorkflowSteps(filePath, taskName, secrets, matrixCont
|
|
|
322
377
|
...(step.with
|
|
323
378
|
? Object.fromEntries(Object.entries(step.with).map(([k, v]) => [
|
|
324
379
|
k,
|
|
325
|
-
expandExpressions(String(v), repoPath, secrets, matrixContext),
|
|
380
|
+
expandExpressions(String(v), repoPath, secrets, matrixContext, needsContext),
|
|
326
381
|
]))
|
|
327
382
|
: {}),
|
|
328
383
|
// Merge from raw YAML (overrides parsed values), expanding expressions
|
|
329
384
|
...Object.fromEntries(Object.entries(stepWith).map(([k, v]) => [
|
|
330
385
|
k,
|
|
331
|
-
expandExpressions(String(v), repoPath, secrets, matrixContext),
|
|
386
|
+
expandExpressions(String(v), repoPath, secrets, matrixContext, needsContext),
|
|
332
387
|
])),
|
|
333
388
|
...(isCheckout
|
|
334
389
|
? {
|
|
@@ -338,7 +393,7 @@ export async function parseWorkflowSteps(filePath, taskName, secrets, matrixCont
|
|
|
338
393
|
submodules: "false",
|
|
339
394
|
...Object.fromEntries(Object.entries(stepWith).map(([k, v]) => [
|
|
340
395
|
k,
|
|
341
|
-
expandExpressions(String(v), repoPath),
|
|
396
|
+
expandExpressions(String(v), repoPath, secrets, undefined, needsContext),
|
|
342
397
|
])),
|
|
343
398
|
}
|
|
344
399
|
: {}), // Prevent actions/checkout from wiping the rsynced workspace
|
|
@@ -346,7 +401,7 @@ export async function parseWorkflowSteps(filePath, taskName, secrets, matrixCont
|
|
|
346
401
|
Env: rawStep.env
|
|
347
402
|
? Object.fromEntries(Object.entries(rawStep.env).map(([k, v]) => [
|
|
348
403
|
k,
|
|
349
|
-
expandExpressions(String(v), repoPath, secrets, matrixContext),
|
|
404
|
+
expandExpressions(String(v), repoPath, secrets, matrixContext, needsContext),
|
|
350
405
|
]))
|
|
351
406
|
: undefined,
|
|
352
407
|
};
|
|
@@ -554,3 +609,183 @@ export function validateSecrets(filePath, taskName, secrets, secretsFilePath) {
|
|
|
554
609
|
missing.map((n) => `${n}=`).join("\n") +
|
|
555
610
|
"\n");
|
|
556
611
|
}
|
|
612
|
+
/**
|
|
613
|
+
* Parse `jobs.<id>.outputs` definitions from a workflow YAML file.
|
|
614
|
+
* Returns a Record<outputName, expressionTemplate> (e.g. { skip: "${{ steps.check.outputs.skip }}" }).
|
|
615
|
+
* Returns {} if the job has no outputs or doesn't exist.
|
|
616
|
+
*/
|
|
617
|
+
export function parseJobOutputDefs(filePath, jobId) {
|
|
618
|
+
const yaml = parseYaml(fs.readFileSync(filePath, "utf8"));
|
|
619
|
+
const outputs = yaml?.jobs?.[jobId]?.outputs;
|
|
620
|
+
if (!outputs || typeof outputs !== "object") {
|
|
621
|
+
return {};
|
|
622
|
+
}
|
|
623
|
+
const result = {};
|
|
624
|
+
for (const [k, v] of Object.entries(outputs)) {
|
|
625
|
+
result[k] = String(v);
|
|
626
|
+
}
|
|
627
|
+
return result;
|
|
628
|
+
}
|
|
629
|
+
/**
|
|
630
|
+
* Parse the `if:` condition from a workflow job.
|
|
631
|
+
* Returns the raw expression string (with `${{ }}` wrapper stripped if present),
|
|
632
|
+
* or null if the job has no `if:`.
|
|
633
|
+
*/
|
|
634
|
+
export function parseJobIf(filePath, jobId) {
|
|
635
|
+
const yaml = parseYaml(fs.readFileSync(filePath, "utf8"));
|
|
636
|
+
const rawIf = yaml?.jobs?.[jobId]?.if;
|
|
637
|
+
if (rawIf == null) {
|
|
638
|
+
return null;
|
|
639
|
+
}
|
|
640
|
+
let expr = String(rawIf).trim();
|
|
641
|
+
// Strip ${{ }} wrapper if present
|
|
642
|
+
const wrapped = expr.match(/^\$\{\{\s*([\s\S]*?)\s*\}\}$/);
|
|
643
|
+
if (wrapped) {
|
|
644
|
+
expr = wrapped[1];
|
|
645
|
+
}
|
|
646
|
+
return expr;
|
|
647
|
+
}
|
|
648
|
+
/**
|
|
649
|
+
* Evaluate a job-level `if:` condition.
|
|
650
|
+
*
|
|
651
|
+
* @param expr The expression string (already stripped of `${{ }}`)
|
|
652
|
+
* @param jobResults Record<jobId, "success" | "failure"> for upstream jobs
|
|
653
|
+
* @param needsCtx Optional needs output context (same shape as expandExpressions needsContext)
|
|
654
|
+
* @returns Whether the job should run
|
|
655
|
+
*/
|
|
656
|
+
export function evaluateJobIf(expr, jobResults, needsCtx) {
|
|
657
|
+
const trimmed = expr.trim();
|
|
658
|
+
// Empty expression defaults to success()
|
|
659
|
+
if (!trimmed) {
|
|
660
|
+
return evaluateAtom("success()", jobResults, needsCtx);
|
|
661
|
+
}
|
|
662
|
+
// Handle || (split first — lower precedence)
|
|
663
|
+
if (trimmed.includes("||")) {
|
|
664
|
+
const parts = splitOnOperator(trimmed, "||");
|
|
665
|
+
if (parts.length > 1) {
|
|
666
|
+
return parts.some((p) => evaluateJobIf(p.trim(), jobResults, needsCtx));
|
|
667
|
+
}
|
|
668
|
+
}
|
|
669
|
+
// Handle &&
|
|
670
|
+
if (trimmed.includes("&&")) {
|
|
671
|
+
const parts = splitOnOperator(trimmed, "&&");
|
|
672
|
+
if (parts.length > 1) {
|
|
673
|
+
return parts.every((p) => evaluateJobIf(p.trim(), jobResults, needsCtx));
|
|
674
|
+
}
|
|
675
|
+
}
|
|
676
|
+
return evaluateAtom(trimmed, jobResults, needsCtx);
|
|
677
|
+
}
|
|
678
|
+
/**
|
|
679
|
+
* Split an expression on a logical operator, respecting parentheses and quotes.
|
|
680
|
+
*/
|
|
681
|
+
function splitOnOperator(expr, op) {
|
|
682
|
+
const parts = [];
|
|
683
|
+
let depth = 0;
|
|
684
|
+
let inQuote = null;
|
|
685
|
+
let current = "";
|
|
686
|
+
for (let i = 0; i < expr.length; i++) {
|
|
687
|
+
const ch = expr[i];
|
|
688
|
+
if (inQuote) {
|
|
689
|
+
current += ch;
|
|
690
|
+
if (ch === inQuote) {
|
|
691
|
+
inQuote = null;
|
|
692
|
+
}
|
|
693
|
+
continue;
|
|
694
|
+
}
|
|
695
|
+
if (ch === "'" || ch === '"') {
|
|
696
|
+
inQuote = ch;
|
|
697
|
+
current += ch;
|
|
698
|
+
continue;
|
|
699
|
+
}
|
|
700
|
+
if (ch === "(") {
|
|
701
|
+
depth++;
|
|
702
|
+
}
|
|
703
|
+
if (ch === ")") {
|
|
704
|
+
depth--;
|
|
705
|
+
}
|
|
706
|
+
if (depth === 0 && expr.slice(i, i + op.length) === op) {
|
|
707
|
+
parts.push(current);
|
|
708
|
+
current = "";
|
|
709
|
+
i += op.length - 1;
|
|
710
|
+
continue;
|
|
711
|
+
}
|
|
712
|
+
current += ch;
|
|
713
|
+
}
|
|
714
|
+
parts.push(current);
|
|
715
|
+
return parts;
|
|
716
|
+
}
|
|
717
|
+
/**
|
|
718
|
+
* Evaluate a single atomic condition (no && or ||).
|
|
719
|
+
*/
|
|
720
|
+
function evaluateAtom(expr, jobResults, needsCtx) {
|
|
721
|
+
const trimmed = expr.trim();
|
|
722
|
+
// Status check functions
|
|
723
|
+
if (trimmed === "always()") {
|
|
724
|
+
return true;
|
|
725
|
+
}
|
|
726
|
+
if (trimmed === "cancelled()") {
|
|
727
|
+
return false;
|
|
728
|
+
}
|
|
729
|
+
if (trimmed === "success()") {
|
|
730
|
+
return Object.values(jobResults).every((r) => r === "success");
|
|
731
|
+
}
|
|
732
|
+
if (trimmed === "failure()") {
|
|
733
|
+
return Object.values(jobResults).some((r) => r === "failure");
|
|
734
|
+
}
|
|
735
|
+
// != comparison
|
|
736
|
+
const neqMatch = trimmed.match(/^(.+?)\s*!=\s*(.+)$/);
|
|
737
|
+
if (neqMatch) {
|
|
738
|
+
const left = resolveValue(neqMatch[1].trim(), needsCtx);
|
|
739
|
+
const right = resolveValue(neqMatch[2].trim(), needsCtx);
|
|
740
|
+
return left !== right;
|
|
741
|
+
}
|
|
742
|
+
// == comparison
|
|
743
|
+
const eqMatch = trimmed.match(/^(.+?)\s*==\s*(.+)$/);
|
|
744
|
+
if (eqMatch) {
|
|
745
|
+
const left = resolveValue(eqMatch[1].trim(), needsCtx);
|
|
746
|
+
const right = resolveValue(eqMatch[2].trim(), needsCtx);
|
|
747
|
+
return left === right;
|
|
748
|
+
}
|
|
749
|
+
// Bare truthy value (e.g. needs.setup.outputs.run_tests)
|
|
750
|
+
const val = resolveValue(trimmed, needsCtx);
|
|
751
|
+
return val !== "" && val !== "false" && val !== "0";
|
|
752
|
+
}
|
|
753
|
+
/**
|
|
754
|
+
* Resolve a value reference in a condition expression.
|
|
755
|
+
*/
|
|
756
|
+
function resolveValue(raw, needsCtx) {
|
|
757
|
+
const trimmed = raw.trim();
|
|
758
|
+
// Quoted string literal
|
|
759
|
+
if ((trimmed.startsWith("'") && trimmed.endsWith("'")) ||
|
|
760
|
+
(trimmed.startsWith('"') && trimmed.endsWith('"'))) {
|
|
761
|
+
return trimmed.slice(1, -1);
|
|
762
|
+
}
|
|
763
|
+
// needs.<jobId>.outputs.<name>
|
|
764
|
+
if (trimmed.startsWith("needs.") && needsCtx) {
|
|
765
|
+
const parts = trimmed.split(".");
|
|
766
|
+
const jobId = parts[1];
|
|
767
|
+
const jobOutputs = needsCtx[jobId];
|
|
768
|
+
if (parts[2] === "outputs" && parts[3]) {
|
|
769
|
+
return jobOutputs?.[parts[3]] ?? "";
|
|
770
|
+
}
|
|
771
|
+
if (parts[2] === "result") {
|
|
772
|
+
return jobOutputs ? (jobOutputs["__result"] ?? "success") : "";
|
|
773
|
+
}
|
|
774
|
+
}
|
|
775
|
+
return trimmed;
|
|
776
|
+
}
|
|
777
|
+
/**
|
|
778
|
+
* Parse `strategy.fail-fast` for a job.
|
|
779
|
+
* Returns true/false if explicitly set, undefined if not specified.
|
|
780
|
+
*/
|
|
781
|
+
export function parseFailFast(filePath, jobId) {
|
|
782
|
+
const yaml = parseYaml(fs.readFileSync(filePath, "utf8"));
|
|
783
|
+
const strategy = yaml?.jobs?.[jobId]?.strategy;
|
|
784
|
+
if (!strategy || typeof strategy !== "object") {
|
|
785
|
+
return undefined;
|
|
786
|
+
}
|
|
787
|
+
if ("fail-fast" in strategy) {
|
|
788
|
+
return Boolean(strategy["fail-fast"]);
|
|
789
|
+
}
|
|
790
|
+
return undefined;
|
|
791
|
+
}
|
|
@@ -640,3 +640,343 @@ describe("isWorkflowRelevant", () => {
|
|
|
640
640
|
expect(isWorkflowRelevant(template, "feature", ["cli/src/cli.ts"])).toBe(true);
|
|
641
641
|
});
|
|
642
642
|
});
|
|
643
|
+
// ─── fromJSON / toJSON ────────────────────────────────────────────────────────
|
|
644
|
+
describe("expandExpressions — fromJSON", () => {
|
|
645
|
+
it("fromJSON parses a JSON array string and returns it", () => {
|
|
646
|
+
expect(expandExpressions('${{ fromJSON(\'["a","b","c"]\') }}')).toBe('["a","b","c"]');
|
|
647
|
+
});
|
|
648
|
+
it("fromJSON parses a JSON string value and unwraps it", () => {
|
|
649
|
+
expect(expandExpressions("${{ fromJSON('\"hello\"') }}")).toBe("hello");
|
|
650
|
+
});
|
|
651
|
+
it("fromJSON parses a JSON number and returns it as string", () => {
|
|
652
|
+
expect(expandExpressions("${{ fromJSON('42') }}")).toBe("42");
|
|
653
|
+
});
|
|
654
|
+
it("fromJSON parses a JSON boolean", () => {
|
|
655
|
+
expect(expandExpressions("${{ fromJSON('true') }}")).toBe("true");
|
|
656
|
+
expect(expandExpressions("${{ fromJSON('false') }}")).toBe("false");
|
|
657
|
+
});
|
|
658
|
+
it("fromJSON parses a JSON object string", () => {
|
|
659
|
+
const result = expandExpressions('${{ fromJSON(\'{"key":"val"}\') }}');
|
|
660
|
+
expect(JSON.parse(result)).toEqual({ key: "val" });
|
|
661
|
+
});
|
|
662
|
+
it("fromJSON returns empty string for invalid JSON", () => {
|
|
663
|
+
expect(expandExpressions("${{ fromJSON('not valid json') }}")).toBe("");
|
|
664
|
+
});
|
|
665
|
+
it("fromJSON with a nested expression resolves the inner expr first", () => {
|
|
666
|
+
// Simulates fromJSON(needs.setup.outputs.matrix) — the inner expr resolves first
|
|
667
|
+
const needsCtx = { setup: { matrix: '["x","y"]' } };
|
|
668
|
+
expect(expandExpressions("${{ fromJSON(needs.setup.outputs.matrix) }}", undefined, undefined, undefined, needsCtx)).toBe('["x","y"]');
|
|
669
|
+
});
|
|
670
|
+
});
|
|
671
|
+
describe("expandExpressions — toJSON", () => {
|
|
672
|
+
it("toJSON wraps a string value in quotes", () => {
|
|
673
|
+
expect(expandExpressions("${{ toJSON('hello') }}")).toBe('"hello"');
|
|
674
|
+
});
|
|
675
|
+
});
|
|
676
|
+
// ─── Cross-job outputs: needs context ─────────────────────────────────────────
|
|
677
|
+
import { parseJobOutputDefs } from "./workflow-parser.js";
|
|
678
|
+
describe("parseJobOutputDefs", () => {
|
|
679
|
+
let tmpDir;
|
|
680
|
+
afterEach(() => {
|
|
681
|
+
if (tmpDir) {
|
|
682
|
+
fs.rmSync(tmpDir, { recursive: true, force: true });
|
|
683
|
+
}
|
|
684
|
+
});
|
|
685
|
+
function writeWorkflow(content) {
|
|
686
|
+
tmpDir = fs.mkdtempSync(path.join(os.tmpdir(), "oa-outputs-test-"));
|
|
687
|
+
const filePath = path.join(tmpDir, "workflow.yml");
|
|
688
|
+
fs.writeFileSync(filePath, content);
|
|
689
|
+
return filePath;
|
|
690
|
+
}
|
|
691
|
+
it("parses job output definitions from YAML", () => {
|
|
692
|
+
const filePath = writeWorkflow(`
|
|
693
|
+
name: Test Outputs
|
|
694
|
+
on: [push]
|
|
695
|
+
jobs:
|
|
696
|
+
setup:
|
|
697
|
+
runs-on: ubuntu-latest
|
|
698
|
+
outputs:
|
|
699
|
+
skip: \${{ steps.check.outputs.skip }}
|
|
700
|
+
shard_count: \${{ steps.count.outputs.shard_count }}
|
|
701
|
+
steps:
|
|
702
|
+
- run: echo ok
|
|
703
|
+
`);
|
|
704
|
+
const defs = parseJobOutputDefs(filePath, "setup");
|
|
705
|
+
expect(defs).toEqual({
|
|
706
|
+
skip: "${{ steps.check.outputs.skip }}",
|
|
707
|
+
shard_count: "${{ steps.count.outputs.shard_count }}",
|
|
708
|
+
});
|
|
709
|
+
});
|
|
710
|
+
it("returns empty object when job has no outputs", () => {
|
|
711
|
+
const filePath = writeWorkflow(`
|
|
712
|
+
name: No Outputs
|
|
713
|
+
on: [push]
|
|
714
|
+
jobs:
|
|
715
|
+
build:
|
|
716
|
+
runs-on: ubuntu-latest
|
|
717
|
+
steps:
|
|
718
|
+
- run: echo ok
|
|
719
|
+
`);
|
|
720
|
+
const defs = parseJobOutputDefs(filePath, "build");
|
|
721
|
+
expect(defs).toEqual({});
|
|
722
|
+
});
|
|
723
|
+
it("returns empty object for nonexistent job", () => {
|
|
724
|
+
const filePath = writeWorkflow(`
|
|
725
|
+
name: Test
|
|
726
|
+
on: [push]
|
|
727
|
+
jobs:
|
|
728
|
+
build:
|
|
729
|
+
runs-on: ubuntu-latest
|
|
730
|
+
steps:
|
|
731
|
+
- run: echo ok
|
|
732
|
+
`);
|
|
733
|
+
const defs = parseJobOutputDefs(filePath, "nonexistent");
|
|
734
|
+
expect(defs).toEqual({});
|
|
735
|
+
});
|
|
736
|
+
});
|
|
737
|
+
describe("expandExpressions with needsContext", () => {
|
|
738
|
+
it("resolves needs.build.outputs.sha to the provided value", () => {
|
|
739
|
+
const needsCtx = { build: { sha: "abc123" } };
|
|
740
|
+
expect(expandExpressions("sha=${{ needs.build.outputs.sha }}", undefined, undefined, undefined, needsCtx)).toBe("sha=abc123");
|
|
741
|
+
});
|
|
742
|
+
it("resolves needs.setup.outputs.skip to 'false'", () => {
|
|
743
|
+
const needsCtx = { setup: { skip: "false" } };
|
|
744
|
+
expect(expandExpressions("${{ needs.setup.outputs.skip }}", undefined, undefined, undefined, needsCtx)).toBe("false");
|
|
745
|
+
});
|
|
746
|
+
it("returns empty string for unknown needs output", () => {
|
|
747
|
+
const needsCtx = { build: { sha: "abc123" } };
|
|
748
|
+
expect(expandExpressions("${{ needs.build.outputs.unknown }}", undefined, undefined, undefined, needsCtx)).toBe("");
|
|
749
|
+
});
|
|
750
|
+
it("returns empty string for unknown needs job", () => {
|
|
751
|
+
const needsCtx = { build: { sha: "abc123" } };
|
|
752
|
+
expect(expandExpressions("${{ needs.other.outputs.sha }}", undefined, undefined, undefined, needsCtx)).toBe("");
|
|
753
|
+
});
|
|
754
|
+
it("resolves needs.build.result to success when not explicitly set", () => {
|
|
755
|
+
const needsCtx = { build: {} };
|
|
756
|
+
// needs.build.result should default to 'success' if the job succeeded
|
|
757
|
+
expect(expandExpressions("${{ needs.build.result }}", undefined, undefined, undefined, needsCtx)).toBe("success");
|
|
758
|
+
});
|
|
759
|
+
});
|
|
760
|
+
// ─── Job-level if conditions ──────────────────────────────────────────────────
|
|
761
|
+
import { evaluateJobIf, parseJobIf } from "./workflow-parser.js";
|
|
762
|
+
describe("parseJobIf", () => {
|
|
763
|
+
let tmpDir;
|
|
764
|
+
afterEach(() => {
|
|
765
|
+
if (tmpDir) {
|
|
766
|
+
fs.rmSync(tmpDir, { recursive: true, force: true });
|
|
767
|
+
}
|
|
768
|
+
});
|
|
769
|
+
function writeWorkflow(content) {
|
|
770
|
+
tmpDir = fs.mkdtempSync(path.join(os.tmpdir(), "oa-job-if-test-"));
|
|
771
|
+
const filePath = path.join(tmpDir, "workflow.yml");
|
|
772
|
+
fs.writeFileSync(filePath, content);
|
|
773
|
+
return filePath;
|
|
774
|
+
}
|
|
775
|
+
it("returns the if expression when present", () => {
|
|
776
|
+
const filePath = writeWorkflow(`
|
|
777
|
+
name: If Test
|
|
778
|
+
on: [push]
|
|
779
|
+
jobs:
|
|
780
|
+
test:
|
|
781
|
+
if: needs.setup.outputs.skip == 'false'
|
|
782
|
+
runs-on: ubuntu-latest
|
|
783
|
+
steps:
|
|
784
|
+
- run: echo ok
|
|
785
|
+
`);
|
|
786
|
+
expect(parseJobIf(filePath, "test")).toBe("needs.setup.outputs.skip == 'false'");
|
|
787
|
+
});
|
|
788
|
+
it("returns null when job has no if", () => {
|
|
789
|
+
const filePath = writeWorkflow(`
|
|
790
|
+
name: No If
|
|
791
|
+
on: [push]
|
|
792
|
+
jobs:
|
|
793
|
+
build:
|
|
794
|
+
runs-on: ubuntu-latest
|
|
795
|
+
steps:
|
|
796
|
+
- run: echo ok
|
|
797
|
+
`);
|
|
798
|
+
expect(parseJobIf(filePath, "build")).toBeNull();
|
|
799
|
+
});
|
|
800
|
+
it("strips ${{ }} wrapper if present", () => {
|
|
801
|
+
const filePath = writeWorkflow(`
|
|
802
|
+
name: If Wrapped
|
|
803
|
+
on: [push]
|
|
804
|
+
jobs:
|
|
805
|
+
check:
|
|
806
|
+
if: \${{ always() }}
|
|
807
|
+
runs-on: ubuntu-latest
|
|
808
|
+
steps:
|
|
809
|
+
- run: echo ok
|
|
810
|
+
`);
|
|
811
|
+
expect(parseJobIf(filePath, "check")).toBe("always()");
|
|
812
|
+
});
|
|
813
|
+
});
|
|
814
|
+
describe("evaluateJobIf", () => {
|
|
815
|
+
it("always() returns true", () => {
|
|
816
|
+
expect(evaluateJobIf("always()", {})).toBe(true);
|
|
817
|
+
});
|
|
818
|
+
it("success() returns true when all upstream jobs succeeded", () => {
|
|
819
|
+
expect(evaluateJobIf("success()", { build: "success", lint: "success" })).toBe(true);
|
|
820
|
+
});
|
|
821
|
+
it("success() returns false when any upstream job failed", () => {
|
|
822
|
+
expect(evaluateJobIf("success()", { build: "success", lint: "failure" })).toBe(false);
|
|
823
|
+
});
|
|
824
|
+
it("failure() returns true when any upstream job failed", () => {
|
|
825
|
+
expect(evaluateJobIf("failure()", { build: "success", lint: "failure" })).toBe(true);
|
|
826
|
+
});
|
|
827
|
+
it("failure() returns false when all upstream succeeded", () => {
|
|
828
|
+
expect(evaluateJobIf("failure()", { build: "success" })).toBe(false);
|
|
829
|
+
});
|
|
830
|
+
it("cancelled() returns false (locally, nothing is ever cancelled)", () => {
|
|
831
|
+
expect(evaluateJobIf("cancelled()", {})).toBe(false);
|
|
832
|
+
});
|
|
833
|
+
it("evaluates string equality with needs outputs", () => {
|
|
834
|
+
const needsCtx = { setup: { run_tests: "true" } };
|
|
835
|
+
expect(evaluateJobIf("needs.setup.outputs.run_tests == 'true'", {}, needsCtx)).toBe(true);
|
|
836
|
+
});
|
|
837
|
+
it("evaluates string inequality with needs outputs", () => {
|
|
838
|
+
const needsCtx = { setup: { run_tests: "false" } };
|
|
839
|
+
expect(evaluateJobIf("needs.setup.outputs.run_tests == 'true'", {}, needsCtx)).toBe(false);
|
|
840
|
+
});
|
|
841
|
+
it("evaluates != operator", () => {
|
|
842
|
+
const needsCtx = { setup: { skip: "false" } };
|
|
843
|
+
expect(evaluateJobIf("needs.setup.outputs.skip != 'true'", {}, needsCtx)).toBe(true);
|
|
844
|
+
});
|
|
845
|
+
it("evaluates compound condition with &&", () => {
|
|
846
|
+
const needsCtx = { setup: { skip: "false", run_tests: "true" } };
|
|
847
|
+
expect(evaluateJobIf("needs.setup.outputs.skip == 'false' && needs.setup.outputs.run_tests == 'true'", {}, needsCtx)).toBe(true);
|
|
848
|
+
});
|
|
849
|
+
it("evaluates compound condition with || where first is false", () => {
|
|
850
|
+
expect(evaluateJobIf("failure() || always()", { build: "success" })).toBe(true);
|
|
851
|
+
});
|
|
852
|
+
it("defaults to success() when expression is empty", () => {
|
|
853
|
+
expect(evaluateJobIf("", { build: "success" })).toBe(true);
|
|
854
|
+
expect(evaluateJobIf("", { build: "failure" })).toBe(false);
|
|
855
|
+
});
|
|
856
|
+
});
|
|
857
|
+
// ─── strategy.fail-fast ──────────────────────────────────────────────────────
|
|
858
|
+
import { parseFailFast } from "./workflow-parser.js";
|
|
859
|
+
describe("parseFailFast", () => {
|
|
860
|
+
let tmpDir;
|
|
861
|
+
afterEach(() => {
|
|
862
|
+
if (tmpDir) {
|
|
863
|
+
fs.rmSync(tmpDir, { recursive: true, force: true });
|
|
864
|
+
}
|
|
865
|
+
});
|
|
866
|
+
function writeWorkflow(content) {
|
|
867
|
+
tmpDir = fs.mkdtempSync(path.join(os.tmpdir(), "oa-failfast-test-"));
|
|
868
|
+
const filePath = path.join(tmpDir, "workflow.yml");
|
|
869
|
+
fs.writeFileSync(filePath, content);
|
|
870
|
+
return filePath;
|
|
871
|
+
}
|
|
872
|
+
it("returns false when strategy.fail-fast is explicitly false", () => {
|
|
873
|
+
const filePath = writeWorkflow(`
|
|
874
|
+
name: Fail Fast False
|
|
875
|
+
on: [push]
|
|
876
|
+
jobs:
|
|
877
|
+
test:
|
|
878
|
+
runs-on: ubuntu-latest
|
|
879
|
+
strategy:
|
|
880
|
+
fail-fast: false
|
|
881
|
+
matrix:
|
|
882
|
+
shard: [1, 2, 3]
|
|
883
|
+
steps:
|
|
884
|
+
- run: echo ok
|
|
885
|
+
`);
|
|
886
|
+
expect(parseFailFast(filePath, "test")).toBe(false);
|
|
887
|
+
});
|
|
888
|
+
it("returns true when strategy.fail-fast is explicitly true", () => {
|
|
889
|
+
const filePath = writeWorkflow(`
|
|
890
|
+
name: Fail Fast True
|
|
891
|
+
on: [push]
|
|
892
|
+
jobs:
|
|
893
|
+
test:
|
|
894
|
+
runs-on: ubuntu-latest
|
|
895
|
+
strategy:
|
|
896
|
+
fail-fast: true
|
|
897
|
+
matrix:
|
|
898
|
+
shard: [1, 2]
|
|
899
|
+
steps:
|
|
900
|
+
- run: echo ok
|
|
901
|
+
`);
|
|
902
|
+
expect(parseFailFast(filePath, "test")).toBe(true);
|
|
903
|
+
});
|
|
904
|
+
it("returns undefined when strategy has no fail-fast key", () => {
|
|
905
|
+
const filePath = writeWorkflow(`
|
|
906
|
+
name: No Fail Fast
|
|
907
|
+
on: [push]
|
|
908
|
+
jobs:
|
|
909
|
+
test:
|
|
910
|
+
runs-on: ubuntu-latest
|
|
911
|
+
strategy:
|
|
912
|
+
matrix:
|
|
913
|
+
shard: [1, 2]
|
|
914
|
+
steps:
|
|
915
|
+
- run: echo ok
|
|
916
|
+
`);
|
|
917
|
+
expect(parseFailFast(filePath, "test")).toBeUndefined();
|
|
918
|
+
});
|
|
919
|
+
it("returns undefined when job has no strategy", () => {
|
|
920
|
+
const filePath = writeWorkflow(`
|
|
921
|
+
name: No Strategy
|
|
922
|
+
on: [push]
|
|
923
|
+
jobs:
|
|
924
|
+
build:
|
|
925
|
+
runs-on: ubuntu-latest
|
|
926
|
+
steps:
|
|
927
|
+
- run: echo ok
|
|
928
|
+
`);
|
|
929
|
+
expect(parseFailFast(filePath, "build")).toBeUndefined();
|
|
930
|
+
});
|
|
931
|
+
});
|
|
932
|
+
// ─── parseWorkflowSteps with needsContext ─────────────────────────────────────
|
|
933
|
+
import { parseWorkflowSteps } from "./workflow-parser.js";
|
|
934
|
+
describe("parseWorkflowSteps with needsContext", () => {
|
|
935
|
+
let tmpDir;
|
|
936
|
+
afterEach(() => {
|
|
937
|
+
if (tmpDir) {
|
|
938
|
+
fs.rmSync(tmpDir, { recursive: true, force: true });
|
|
939
|
+
}
|
|
940
|
+
});
|
|
941
|
+
function writeWorkflowTree(content) {
|
|
942
|
+
// Create a minimal repo structure: repoRoot/.github/workflows/test.yml
|
|
943
|
+
tmpDir = fs.mkdtempSync(path.join(os.tmpdir(), "oa-steps-needs-"));
|
|
944
|
+
const workflowDir = path.join(tmpDir, ".github", "workflows");
|
|
945
|
+
fs.mkdirSync(workflowDir, { recursive: true });
|
|
946
|
+
const filePath = path.join(workflowDir, "test.yml");
|
|
947
|
+
fs.writeFileSync(filePath, content);
|
|
948
|
+
return filePath;
|
|
949
|
+
}
|
|
950
|
+
it("resolves needs.*.outputs.* in step scripts when needsContext is provided", async () => {
|
|
951
|
+
const filePath = writeWorkflowTree(`
|
|
952
|
+
name: Needs Test
|
|
953
|
+
on: [push]
|
|
954
|
+
jobs:
|
|
955
|
+
test:
|
|
956
|
+
needs: [setup]
|
|
957
|
+
runs-on: ubuntu-latest
|
|
958
|
+
steps:
|
|
959
|
+
- run: echo \${{ needs.setup.outputs.skip }}
|
|
960
|
+
`);
|
|
961
|
+
const needsCtx = { setup: { skip: "false" } };
|
|
962
|
+
const steps = await parseWorkflowSteps(filePath, "test", undefined, undefined, needsCtx);
|
|
963
|
+
// The step script should have "false" substituted in
|
|
964
|
+
expect(steps[0].Inputs.script).toBe("echo false");
|
|
965
|
+
});
|
|
966
|
+
it("resolves needs context in step names", async () => {
|
|
967
|
+
const filePath = writeWorkflowTree(`
|
|
968
|
+
name: Needs Name Test
|
|
969
|
+
on: [push]
|
|
970
|
+
jobs:
|
|
971
|
+
test:
|
|
972
|
+
needs: [setup]
|
|
973
|
+
runs-on: ubuntu-latest
|
|
974
|
+
steps:
|
|
975
|
+
- name: "Shard \${{ needs.setup.outputs.index }}"
|
|
976
|
+
run: echo hello
|
|
977
|
+
`);
|
|
978
|
+
const needsCtx = { setup: { index: "3" } };
|
|
979
|
+
const steps = await parseWorkflowSteps(filePath, "test", undefined, undefined, needsCtx);
|
|
980
|
+
expect(steps[0].Name).toBe("Shard 3");
|
|
981
|
+
});
|
|
982
|
+
});
|
package/package.json
CHANGED
|
@@ -1,10 +1,15 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@redwoodjs/agent-ci",
|
|
3
|
-
"version": "0.
|
|
3
|
+
"version": "0.3.0",
|
|
4
4
|
"description": "Local GitHub Actions runner",
|
|
5
5
|
"keywords": [],
|
|
6
6
|
"license": "FSL-1.1-MIT",
|
|
7
7
|
"author": "",
|
|
8
|
+
"repository": {
|
|
9
|
+
"type": "git",
|
|
10
|
+
"url": "https://github.com/redwoodjs/agent-ci.git",
|
|
11
|
+
"directory": "packages/cli"
|
|
12
|
+
},
|
|
8
13
|
"bin": {
|
|
9
14
|
"agent-ci": "./dist/cli.js"
|
|
10
15
|
},
|
|
@@ -22,7 +27,7 @@
|
|
|
22
27
|
"log-update": "^7.2.0",
|
|
23
28
|
"minimatch": "^10.2.1",
|
|
24
29
|
"yaml": "^2.8.2",
|
|
25
|
-
"dtu-github-actions": "0.
|
|
30
|
+
"dtu-github-actions": "0.3.0"
|
|
26
31
|
},
|
|
27
32
|
"devDependencies": {
|
|
28
33
|
"@types/dockerode": "^3.3.34",
|