@redwoodjs/agent-ci 0.1.0 → 0.3.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +34 -14
- package/dist/cli.js +126 -12
- package/dist/output/working-directory.js +1 -1
- package/dist/runner/job-result.js +37 -0
- package/dist/runner/job-result.test.js +100 -0
- package/dist/runner/local-job.js +17 -2
- package/dist/runner/result-builder.js +111 -1
- package/dist/runner/result-builder.test.js +138 -0
- package/dist/runner/step-wrapper.js +69 -0
- package/dist/workflow/workflow-parser.js +246 -11
- package/dist/workflow/workflow-parser.test.js +340 -0
- package/package.json +10 -2
|
@@ -175,3 +175,141 @@ describe("buildJobResult", () => {
|
|
|
175
175
|
expect(result.lastOutputLines).toContain("compile error");
|
|
176
176
|
});
|
|
177
177
|
});
|
|
178
|
+
// ── extractStepOutputs ────────────────────────────────────────────────────────
|
|
179
|
+
describe("extractStepOutputs", () => {
|
|
180
|
+
let tmpDir;
|
|
181
|
+
beforeEach(() => {
|
|
182
|
+
tmpDir = fs.mkdtempSync(path.join(os.tmpdir(), "step-outputs-test-"));
|
|
183
|
+
});
|
|
184
|
+
afterEach(() => {
|
|
185
|
+
fs.rmSync(tmpDir, { recursive: true, force: true });
|
|
186
|
+
});
|
|
187
|
+
it("extracts simple key=value outputs from set_output files", async () => {
|
|
188
|
+
const { extractStepOutputs } = await import("./result-builder.js");
|
|
189
|
+
// Simulate the runner's file_commands directory structure
|
|
190
|
+
const fileCommandsDir = path.join(tmpDir, "_runner_file_commands");
|
|
191
|
+
fs.mkdirSync(fileCommandsDir, { recursive: true });
|
|
192
|
+
fs.writeFileSync(path.join(fileCommandsDir, "set_output_abc123"), "skip=false\nshard_count=3\n");
|
|
193
|
+
const outputs = extractStepOutputs(tmpDir);
|
|
194
|
+
expect(outputs).toEqual({
|
|
195
|
+
skip: "false",
|
|
196
|
+
shard_count: "3",
|
|
197
|
+
});
|
|
198
|
+
});
|
|
199
|
+
it("extracts multiline (heredoc) values", async () => {
|
|
200
|
+
const { extractStepOutputs } = await import("./result-builder.js");
|
|
201
|
+
const fileCommandsDir = path.join(tmpDir, "_runner_file_commands");
|
|
202
|
+
fs.mkdirSync(fileCommandsDir, { recursive: true });
|
|
203
|
+
fs.writeFileSync(path.join(fileCommandsDir, "set_output_def456"), 'matrix<<EOF\n["1","2","3"]\nEOF\n');
|
|
204
|
+
const outputs = extractStepOutputs(tmpDir);
|
|
205
|
+
expect(outputs).toEqual({
|
|
206
|
+
matrix: '["1","2","3"]',
|
|
207
|
+
});
|
|
208
|
+
});
|
|
209
|
+
it("merges outputs from multiple set_output files", async () => {
|
|
210
|
+
const { extractStepOutputs } = await import("./result-builder.js");
|
|
211
|
+
const fileCommandsDir = path.join(tmpDir, "_runner_file_commands");
|
|
212
|
+
fs.mkdirSync(fileCommandsDir, { recursive: true });
|
|
213
|
+
fs.writeFileSync(path.join(fileCommandsDir, "set_output_aaa"), "key1=val1\n");
|
|
214
|
+
fs.writeFileSync(path.join(fileCommandsDir, "set_output_bbb"), "key2=val2\n");
|
|
215
|
+
const outputs = extractStepOutputs(tmpDir);
|
|
216
|
+
expect(outputs.key1).toBe("val1");
|
|
217
|
+
expect(outputs.key2).toBe("val2");
|
|
218
|
+
});
|
|
219
|
+
it("returns empty object when no _runner_file_commands directory exists", async () => {
|
|
220
|
+
const { extractStepOutputs } = await import("./result-builder.js");
|
|
221
|
+
const outputs = extractStepOutputs(tmpDir);
|
|
222
|
+
expect(outputs).toEqual({});
|
|
223
|
+
});
|
|
224
|
+
it("returns empty object when directory has no set_output files", async () => {
|
|
225
|
+
const { extractStepOutputs } = await import("./result-builder.js");
|
|
226
|
+
const fileCommandsDir = path.join(tmpDir, "_runner_file_commands");
|
|
227
|
+
fs.mkdirSync(fileCommandsDir, { recursive: true });
|
|
228
|
+
fs.writeFileSync(path.join(fileCommandsDir, "add_path_xyz"), "/usr/local/bin\n");
|
|
229
|
+
const outputs = extractStepOutputs(tmpDir);
|
|
230
|
+
expect(outputs).toEqual({});
|
|
231
|
+
});
|
|
232
|
+
it("later files override earlier ones for the same key", async () => {
|
|
233
|
+
const { extractStepOutputs } = await import("./result-builder.js");
|
|
234
|
+
const fileCommandsDir = path.join(tmpDir, "_runner_file_commands");
|
|
235
|
+
fs.mkdirSync(fileCommandsDir, { recursive: true });
|
|
236
|
+
fs.writeFileSync(path.join(fileCommandsDir, "set_output_aaa"), "key=first\n");
|
|
237
|
+
fs.writeFileSync(path.join(fileCommandsDir, "set_output_zzz"), "key=second\n");
|
|
238
|
+
const outputs = extractStepOutputs(tmpDir);
|
|
239
|
+
expect(outputs.key).toBe("second");
|
|
240
|
+
});
|
|
241
|
+
it("handles multiline heredoc with multiple lines", async () => {
|
|
242
|
+
const { extractStepOutputs } = await import("./result-builder.js");
|
|
243
|
+
const fileCommandsDir = path.join(tmpDir, "_runner_file_commands");
|
|
244
|
+
fs.mkdirSync(fileCommandsDir, { recursive: true });
|
|
245
|
+
fs.writeFileSync(path.join(fileCommandsDir, "set_output_multi"), "tests<<DELIM\ntest1.ts\ntest2.ts\ntest3.ts\nDELIM\n");
|
|
246
|
+
const outputs = extractStepOutputs(tmpDir);
|
|
247
|
+
expect(outputs.tests).toBe("test1.ts\ntest2.ts\ntest3.ts");
|
|
248
|
+
});
|
|
249
|
+
});
|
|
250
|
+
// ── resolveJobOutputs ─────────────────────────────────────────────────────────
|
|
251
|
+
describe("resolveJobOutputs", () => {
|
|
252
|
+
it("resolves step output references in job output templates", async () => {
|
|
253
|
+
const { resolveJobOutputs } = await import("./result-builder.js");
|
|
254
|
+
const outputDefs = {
|
|
255
|
+
skip: "${{ steps.check.outputs.skip }}",
|
|
256
|
+
count: "${{ steps.counter.outputs.shard_count }}",
|
|
257
|
+
};
|
|
258
|
+
const stepOutputs = {
|
|
259
|
+
skip: "false",
|
|
260
|
+
shard_count: "3",
|
|
261
|
+
};
|
|
262
|
+
const resolved = resolveJobOutputs(outputDefs, stepOutputs);
|
|
263
|
+
expect(resolved).toEqual({
|
|
264
|
+
skip: "false",
|
|
265
|
+
count: "3",
|
|
266
|
+
});
|
|
267
|
+
});
|
|
268
|
+
it("returns empty string for unresolved step outputs", async () => {
|
|
269
|
+
const { resolveJobOutputs } = await import("./result-builder.js");
|
|
270
|
+
const outputDefs = {
|
|
271
|
+
missing: "${{ steps.none.outputs.doesnt_exist }}",
|
|
272
|
+
};
|
|
273
|
+
const stepOutputs = {};
|
|
274
|
+
const resolved = resolveJobOutputs(outputDefs, stepOutputs);
|
|
275
|
+
expect(resolved).toEqual({ missing: "" });
|
|
276
|
+
});
|
|
277
|
+
it("passes through literal values unchanged", async () => {
|
|
278
|
+
const { resolveJobOutputs } = await import("./result-builder.js");
|
|
279
|
+
const outputDefs = {
|
|
280
|
+
version: "1.2.3",
|
|
281
|
+
};
|
|
282
|
+
const stepOutputs = {};
|
|
283
|
+
const resolved = resolveJobOutputs(outputDefs, stepOutputs);
|
|
284
|
+
expect(resolved).toEqual({ version: "1.2.3" });
|
|
285
|
+
});
|
|
286
|
+
it("returns empty object when no output definitions", async () => {
|
|
287
|
+
const { resolveJobOutputs } = await import("./result-builder.js");
|
|
288
|
+
const resolved = resolveJobOutputs({}, { some: "output" });
|
|
289
|
+
expect(resolved).toEqual({});
|
|
290
|
+
});
|
|
291
|
+
it("handles JSON values in step outputs", async () => {
|
|
292
|
+
const { resolveJobOutputs } = await import("./result-builder.js");
|
|
293
|
+
const outputDefs = {
|
|
294
|
+
matrix: "${{ steps.plan.outputs.matrix }}",
|
|
295
|
+
};
|
|
296
|
+
const stepOutputs = {
|
|
297
|
+
matrix: '{"shard":[1,2,3]}',
|
|
298
|
+
};
|
|
299
|
+
const resolved = resolveJobOutputs(outputDefs, stepOutputs);
|
|
300
|
+
expect(resolved).toEqual({
|
|
301
|
+
matrix: '{"shard":[1,2,3]}',
|
|
302
|
+
});
|
|
303
|
+
});
|
|
304
|
+
it("handles templates with surrounding text", async () => {
|
|
305
|
+
const { resolveJobOutputs } = await import("./result-builder.js");
|
|
306
|
+
const outputDefs = {
|
|
307
|
+
label: "shard-${{ steps.plan.outputs.index }}",
|
|
308
|
+
};
|
|
309
|
+
const stepOutputs = {
|
|
310
|
+
index: "5",
|
|
311
|
+
};
|
|
312
|
+
const resolved = resolveJobOutputs(outputDefs, stepOutputs);
|
|
313
|
+
expect(resolved).toEqual({ label: "shard-5" });
|
|
314
|
+
});
|
|
315
|
+
});
|
|
@@ -80,3 +80,72 @@ export function wrapJobSteps(steps, pauseOnFailure) {
|
|
|
80
80
|
};
|
|
81
81
|
});
|
|
82
82
|
}
|
|
83
|
+
// ─── Output capture step injection ────────────────────────────────────────────
|
|
84
|
+
//
|
|
85
|
+
// Appends a synthetic step that reads `$GITHUB_OUTPUT` files and echoes their
|
|
86
|
+
// contents to stdout with a `::agent-ci-output::` prefix. The DTU parses these
|
|
87
|
+
// lines and persists them to `outputs.json` so the CLI can resolve cross-job
|
|
88
|
+
// outputs via `needs.*.outputs.*`.
|
|
89
|
+
//
|
|
90
|
+
// This step is necessary because the runner's FinalizeJob step deletes
|
|
91
|
+
// `_temp/_runner_file_commands/` _inside_ the container before it exits,
|
|
92
|
+
// making the files unreachable from the host.
|
|
93
|
+
/**
|
|
94
|
+
* Build the shell script for the output-capture synthetic step.
|
|
95
|
+
*
|
|
96
|
+
* Reads all `set_output_*` files from `GITHUB_OUTPUT`'s directory and
|
|
97
|
+
* echoes each `key=value` line with the prefix `::agent-ci-output::`.
|
|
98
|
+
* Multiline values (heredoc format) are flattened into single-line JSON.
|
|
99
|
+
*/
|
|
100
|
+
function outputCaptureScript() {
|
|
101
|
+
return `# Agent CI: capture step outputs for cross-job passing
|
|
102
|
+
DIR="$(dirname "$GITHUB_OUTPUT")"
|
|
103
|
+
if [ -d "$DIR" ]; then
|
|
104
|
+
for f in "$DIR"/set_output_*; do
|
|
105
|
+
[ -f "$f" ] || continue
|
|
106
|
+
while IFS= read -r line || [ -n "$line" ]; do
|
|
107
|
+
if echo "$line" | grep -q '<<'; then
|
|
108
|
+
# Heredoc: key<<DELIMITER ... DELIMITER
|
|
109
|
+
KEY=$(echo "$line" | cut -d'<' -f1)
|
|
110
|
+
DELIM=$(echo "$line" | sed 's/^[^<]*<<//')
|
|
111
|
+
VAL=""
|
|
112
|
+
while IFS= read -r hline || [ -n "$hline" ]; do
|
|
113
|
+
[ "$hline" = "$DELIM" ] && break
|
|
114
|
+
[ -n "$VAL" ] && VAL="$VAL\\\\n$hline" || VAL="$hline"
|
|
115
|
+
done
|
|
116
|
+
echo "::agent-ci-output::$KEY=$VAL"
|
|
117
|
+
else
|
|
118
|
+
echo "::agent-ci-output::$line"
|
|
119
|
+
fi
|
|
120
|
+
done < "$f"
|
|
121
|
+
done
|
|
122
|
+
fi`;
|
|
123
|
+
}
|
|
124
|
+
/**
|
|
125
|
+
* Create a synthetic step object for output capture.
|
|
126
|
+
* Uses `if: always()` semantics by setting `Condition` so it runs even
|
|
127
|
+
* if prior steps failed.
|
|
128
|
+
*/
|
|
129
|
+
export function createOutputCaptureStep() {
|
|
130
|
+
return {
|
|
131
|
+
Name: "__agent_ci_output_capture",
|
|
132
|
+
DisplayName: "Capture outputs",
|
|
133
|
+
Reference: { Type: "Script" },
|
|
134
|
+
Inputs: {
|
|
135
|
+
script: outputCaptureScript(),
|
|
136
|
+
},
|
|
137
|
+
Condition: "always()",
|
|
138
|
+
Environment: {},
|
|
139
|
+
ContextName: "__agent_ci_output_capture",
|
|
140
|
+
};
|
|
141
|
+
}
|
|
142
|
+
/**
|
|
143
|
+
* Append the output-capture step to a steps array.
|
|
144
|
+
* Only adds it if there are existing steps and the job has outputs defined.
|
|
145
|
+
*/
|
|
146
|
+
export function appendOutputCaptureStep(steps) {
|
|
147
|
+
if (!steps || steps.length === 0) {
|
|
148
|
+
return steps;
|
|
149
|
+
}
|
|
150
|
+
return [...steps, createOutputCaptureStep()];
|
|
151
|
+
}
|
|
@@ -19,7 +19,7 @@ async function loadWorkflowParser() {
|
|
|
19
19
|
* - github.sha → '0000000000000000000000000000000000000000'
|
|
20
20
|
* - (others) → empty string (safe: no commas injected)
|
|
21
21
|
*/
|
|
22
|
-
export function expandExpressions(value, repoPath, secrets, matrixContext) {
|
|
22
|
+
export function expandExpressions(value, repoPath, secrets, matrixContext, needsContext) {
|
|
23
23
|
return value.replace(/\$\{\{([\s\S]*?)\}\}/g, (_match, expr) => {
|
|
24
24
|
const trimmed = expr.trim();
|
|
25
25
|
// hashFiles('glob1', 'glob2', ...)
|
|
@@ -62,6 +62,45 @@ export function expandExpressions(value, repoPath, secrets, matrixContext) {
|
|
|
62
62
|
return "0000000000000000000000000000000000000000";
|
|
63
63
|
}
|
|
64
64
|
}
|
|
65
|
+
// fromJSON(expr) — parse JSON from a string (or inner expression)
|
|
66
|
+
const fromJsonMatch = trimmed.match(/^fromJSON\(([\s\S]+)\)$/);
|
|
67
|
+
if (fromJsonMatch) {
|
|
68
|
+
const inner = fromJsonMatch[1].trim();
|
|
69
|
+
// If the inner arg is a quoted string literal, use it directly
|
|
70
|
+
let rawValue;
|
|
71
|
+
if ((inner.startsWith("'") && inner.endsWith("'")) ||
|
|
72
|
+
(inner.startsWith('"') && inner.endsWith('"'))) {
|
|
73
|
+
rawValue = inner.slice(1, -1);
|
|
74
|
+
}
|
|
75
|
+
else {
|
|
76
|
+
// Otherwise, treat it as an expression and expand it
|
|
77
|
+
rawValue = expandExpressions(`\${{ ${inner} }}`, repoPath, secrets, matrixContext, needsContext);
|
|
78
|
+
}
|
|
79
|
+
try {
|
|
80
|
+
const parsed = JSON.parse(rawValue);
|
|
81
|
+
if (typeof parsed === "string") {
|
|
82
|
+
return parsed;
|
|
83
|
+
}
|
|
84
|
+
return JSON.stringify(parsed);
|
|
85
|
+
}
|
|
86
|
+
catch {
|
|
87
|
+
return "";
|
|
88
|
+
}
|
|
89
|
+
}
|
|
90
|
+
// toJSON(expr) — serialize a value to JSON
|
|
91
|
+
const toJsonMatch = trimmed.match(/^toJSON\(([\s\S]+)\)$/);
|
|
92
|
+
if (toJsonMatch) {
|
|
93
|
+
const inner = toJsonMatch[1].trim();
|
|
94
|
+
let rawValue;
|
|
95
|
+
if ((inner.startsWith("'") && inner.endsWith("'")) ||
|
|
96
|
+
(inner.startsWith('"') && inner.endsWith('"'))) {
|
|
97
|
+
rawValue = inner.slice(1, -1);
|
|
98
|
+
}
|
|
99
|
+
else {
|
|
100
|
+
rawValue = expandExpressions(`\${{ ${inner} }}`, repoPath, secrets, matrixContext, needsContext);
|
|
101
|
+
}
|
|
102
|
+
return JSON.stringify(rawValue);
|
|
103
|
+
}
|
|
65
104
|
// format('template {0} {1}', arg0, arg1)
|
|
66
105
|
const formatMatch = trimmed.match(/^format\(([\s\S]+)\)$/);
|
|
67
106
|
if (formatMatch) {
|
|
@@ -132,6 +171,20 @@ export function expandExpressions(value, repoPath, secrets, matrixContext) {
|
|
|
132
171
|
if (trimmed.startsWith("steps.")) {
|
|
133
172
|
return "";
|
|
134
173
|
}
|
|
174
|
+
if (trimmed.startsWith("needs.") && needsContext) {
|
|
175
|
+
// needs.<jobId>.outputs.<name> or needs.<jobId>.result
|
|
176
|
+
const parts = trimmed.split(".");
|
|
177
|
+
const jobId = parts[1];
|
|
178
|
+
const jobOutputs = needsContext[jobId];
|
|
179
|
+
if (parts[2] === "outputs" && parts[3]) {
|
|
180
|
+
return jobOutputs?.[parts[3]] ?? "";
|
|
181
|
+
}
|
|
182
|
+
if (parts[2] === "result") {
|
|
183
|
+
// If the job is in the needsContext, it completed (default to 'success')
|
|
184
|
+
return jobOutputs ? (jobOutputs["__result"] ?? "success") : "";
|
|
185
|
+
}
|
|
186
|
+
return "";
|
|
187
|
+
}
|
|
135
188
|
if (trimmed.startsWith("needs.")) {
|
|
136
189
|
return "";
|
|
137
190
|
}
|
|
@@ -224,7 +277,7 @@ export async function parseMatrixDef(filePath, jobId) {
|
|
|
224
277
|
}
|
|
225
278
|
return Object.keys(result).length > 0 ? result : null;
|
|
226
279
|
}
|
|
227
|
-
export async function parseWorkflowSteps(filePath, taskName, secrets, matrixContext) {
|
|
280
|
+
export async function parseWorkflowSteps(filePath, taskName, secrets, matrixContext, needsContext) {
|
|
228
281
|
const template = await getWorkflowTemplate(filePath);
|
|
229
282
|
const rawYaml = parseYaml(fs.readFileSync(filePath, "utf8"));
|
|
230
283
|
// Derive repoPath from filePath (.../repoPath/.github/workflows/foo.yml → repoPath)
|
|
@@ -244,10 +297,12 @@ export async function parseWorkflowSteps(filePath, taskName, secrets, matrixCont
|
|
|
244
297
|
return job.steps
|
|
245
298
|
.map((step, index) => {
|
|
246
299
|
const stepId = step.id || `step-${index + 1}`;
|
|
247
|
-
let stepName = step.name
|
|
248
|
-
? expandExpressions(step.name.toString(), repoPath, secrets, matrixContext)
|
|
249
|
-
: stepId;
|
|
250
300
|
const rawStep = rawSteps[index] || {};
|
|
301
|
+
// Prefer raw YAML name to preserve ${{ }} expressions for our own expansion
|
|
302
|
+
const rawName = rawStep.name != null ? String(rawStep.name) : step.name?.toString();
|
|
303
|
+
let stepName = rawName
|
|
304
|
+
? expandExpressions(rawName, repoPath, secrets, matrixContext, needsContext)
|
|
305
|
+
: stepId;
|
|
251
306
|
// If a step lacks an explicit name, we map it to standard GitHub Actions defaults
|
|
252
307
|
if (!step.name) {
|
|
253
308
|
if ("run" in step) {
|
|
@@ -270,7 +325,7 @@ export async function parseWorkflowSteps(filePath, taskName, secrets, matrixCont
|
|
|
270
325
|
// string is always the complete literal block scalar.
|
|
271
326
|
const rawScript = rawStep.run != null ? String(rawStep.run) : step.run.toString();
|
|
272
327
|
const inputs = {
|
|
273
|
-
script: expandExpressions(rawScript, repoPath, secrets, matrixContext),
|
|
328
|
+
script: expandExpressions(rawScript, repoPath, secrets, matrixContext, needsContext),
|
|
274
329
|
};
|
|
275
330
|
if (rawStep["working-directory"]) {
|
|
276
331
|
inputs.workingDirectory = rawStep["working-directory"];
|
|
@@ -287,7 +342,7 @@ export async function parseWorkflowSteps(filePath, taskName, secrets, matrixCont
|
|
|
287
342
|
Env: rawStep.env
|
|
288
343
|
? Object.fromEntries(Object.entries(rawStep.env).map(([k, v]) => [
|
|
289
344
|
k,
|
|
290
|
-
expandExpressions(String(v), repoPath, secrets),
|
|
345
|
+
expandExpressions(String(v), repoPath, secrets, undefined, needsContext),
|
|
291
346
|
]))
|
|
292
347
|
: undefined,
|
|
293
348
|
};
|
|
@@ -322,13 +377,13 @@ export async function parseWorkflowSteps(filePath, taskName, secrets, matrixCont
|
|
|
322
377
|
...(step.with
|
|
323
378
|
? Object.fromEntries(Object.entries(step.with).map(([k, v]) => [
|
|
324
379
|
k,
|
|
325
|
-
expandExpressions(String(v), repoPath, secrets, matrixContext),
|
|
380
|
+
expandExpressions(String(v), repoPath, secrets, matrixContext, needsContext),
|
|
326
381
|
]))
|
|
327
382
|
: {}),
|
|
328
383
|
// Merge from raw YAML (overrides parsed values), expanding expressions
|
|
329
384
|
...Object.fromEntries(Object.entries(stepWith).map(([k, v]) => [
|
|
330
385
|
k,
|
|
331
|
-
expandExpressions(String(v), repoPath, secrets, matrixContext),
|
|
386
|
+
expandExpressions(String(v), repoPath, secrets, matrixContext, needsContext),
|
|
332
387
|
])),
|
|
333
388
|
...(isCheckout
|
|
334
389
|
? {
|
|
@@ -338,7 +393,7 @@ export async function parseWorkflowSteps(filePath, taskName, secrets, matrixCont
|
|
|
338
393
|
submodules: "false",
|
|
339
394
|
...Object.fromEntries(Object.entries(stepWith).map(([k, v]) => [
|
|
340
395
|
k,
|
|
341
|
-
expandExpressions(String(v), repoPath),
|
|
396
|
+
expandExpressions(String(v), repoPath, secrets, undefined, needsContext),
|
|
342
397
|
])),
|
|
343
398
|
}
|
|
344
399
|
: {}), // Prevent actions/checkout from wiping the rsynced workspace
|
|
@@ -346,7 +401,7 @@ export async function parseWorkflowSteps(filePath, taskName, secrets, matrixCont
|
|
|
346
401
|
Env: rawStep.env
|
|
347
402
|
? Object.fromEntries(Object.entries(rawStep.env).map(([k, v]) => [
|
|
348
403
|
k,
|
|
349
|
-
expandExpressions(String(v), repoPath, secrets, matrixContext),
|
|
404
|
+
expandExpressions(String(v), repoPath, secrets, matrixContext, needsContext),
|
|
350
405
|
]))
|
|
351
406
|
: undefined,
|
|
352
407
|
};
|
|
@@ -554,3 +609,183 @@ export function validateSecrets(filePath, taskName, secrets, secretsFilePath) {
|
|
|
554
609
|
missing.map((n) => `${n}=`).join("\n") +
|
|
555
610
|
"\n");
|
|
556
611
|
}
|
|
612
|
+
/**
|
|
613
|
+
* Parse `jobs.<id>.outputs` definitions from a workflow YAML file.
|
|
614
|
+
* Returns a Record<outputName, expressionTemplate> (e.g. { skip: "${{ steps.check.outputs.skip }}" }).
|
|
615
|
+
* Returns {} if the job has no outputs or doesn't exist.
|
|
616
|
+
*/
|
|
617
|
+
export function parseJobOutputDefs(filePath, jobId) {
|
|
618
|
+
const yaml = parseYaml(fs.readFileSync(filePath, "utf8"));
|
|
619
|
+
const outputs = yaml?.jobs?.[jobId]?.outputs;
|
|
620
|
+
if (!outputs || typeof outputs !== "object") {
|
|
621
|
+
return {};
|
|
622
|
+
}
|
|
623
|
+
const result = {};
|
|
624
|
+
for (const [k, v] of Object.entries(outputs)) {
|
|
625
|
+
result[k] = String(v);
|
|
626
|
+
}
|
|
627
|
+
return result;
|
|
628
|
+
}
|
|
629
|
+
/**
|
|
630
|
+
* Parse the `if:` condition from a workflow job.
|
|
631
|
+
* Returns the raw expression string (with `${{ }}` wrapper stripped if present),
|
|
632
|
+
* or null if the job has no `if:`.
|
|
633
|
+
*/
|
|
634
|
+
export function parseJobIf(filePath, jobId) {
|
|
635
|
+
const yaml = parseYaml(fs.readFileSync(filePath, "utf8"));
|
|
636
|
+
const rawIf = yaml?.jobs?.[jobId]?.if;
|
|
637
|
+
if (rawIf == null) {
|
|
638
|
+
return null;
|
|
639
|
+
}
|
|
640
|
+
let expr = String(rawIf).trim();
|
|
641
|
+
// Strip ${{ }} wrapper if present
|
|
642
|
+
const wrapped = expr.match(/^\$\{\{\s*([\s\S]*?)\s*\}\}$/);
|
|
643
|
+
if (wrapped) {
|
|
644
|
+
expr = wrapped[1];
|
|
645
|
+
}
|
|
646
|
+
return expr;
|
|
647
|
+
}
|
|
648
|
+
/**
|
|
649
|
+
* Evaluate a job-level `if:` condition.
|
|
650
|
+
*
|
|
651
|
+
* @param expr The expression string (already stripped of `${{ }}`)
|
|
652
|
+
* @param jobResults Record<jobId, "success" | "failure"> for upstream jobs
|
|
653
|
+
* @param needsCtx Optional needs output context (same shape as expandExpressions needsContext)
|
|
654
|
+
* @returns Whether the job should run
|
|
655
|
+
*/
|
|
656
|
+
export function evaluateJobIf(expr, jobResults, needsCtx) {
|
|
657
|
+
const trimmed = expr.trim();
|
|
658
|
+
// Empty expression defaults to success()
|
|
659
|
+
if (!trimmed) {
|
|
660
|
+
return evaluateAtom("success()", jobResults, needsCtx);
|
|
661
|
+
}
|
|
662
|
+
// Handle || (split first — lower precedence)
|
|
663
|
+
if (trimmed.includes("||")) {
|
|
664
|
+
const parts = splitOnOperator(trimmed, "||");
|
|
665
|
+
if (parts.length > 1) {
|
|
666
|
+
return parts.some((p) => evaluateJobIf(p.trim(), jobResults, needsCtx));
|
|
667
|
+
}
|
|
668
|
+
}
|
|
669
|
+
// Handle &&
|
|
670
|
+
if (trimmed.includes("&&")) {
|
|
671
|
+
const parts = splitOnOperator(trimmed, "&&");
|
|
672
|
+
if (parts.length > 1) {
|
|
673
|
+
return parts.every((p) => evaluateJobIf(p.trim(), jobResults, needsCtx));
|
|
674
|
+
}
|
|
675
|
+
}
|
|
676
|
+
return evaluateAtom(trimmed, jobResults, needsCtx);
|
|
677
|
+
}
|
|
678
|
+
/**
|
|
679
|
+
* Split an expression on a logical operator, respecting parentheses and quotes.
|
|
680
|
+
*/
|
|
681
|
+
function splitOnOperator(expr, op) {
|
|
682
|
+
const parts = [];
|
|
683
|
+
let depth = 0;
|
|
684
|
+
let inQuote = null;
|
|
685
|
+
let current = "";
|
|
686
|
+
for (let i = 0; i < expr.length; i++) {
|
|
687
|
+
const ch = expr[i];
|
|
688
|
+
if (inQuote) {
|
|
689
|
+
current += ch;
|
|
690
|
+
if (ch === inQuote) {
|
|
691
|
+
inQuote = null;
|
|
692
|
+
}
|
|
693
|
+
continue;
|
|
694
|
+
}
|
|
695
|
+
if (ch === "'" || ch === '"') {
|
|
696
|
+
inQuote = ch;
|
|
697
|
+
current += ch;
|
|
698
|
+
continue;
|
|
699
|
+
}
|
|
700
|
+
if (ch === "(") {
|
|
701
|
+
depth++;
|
|
702
|
+
}
|
|
703
|
+
if (ch === ")") {
|
|
704
|
+
depth--;
|
|
705
|
+
}
|
|
706
|
+
if (depth === 0 && expr.slice(i, i + op.length) === op) {
|
|
707
|
+
parts.push(current);
|
|
708
|
+
current = "";
|
|
709
|
+
i += op.length - 1;
|
|
710
|
+
continue;
|
|
711
|
+
}
|
|
712
|
+
current += ch;
|
|
713
|
+
}
|
|
714
|
+
parts.push(current);
|
|
715
|
+
return parts;
|
|
716
|
+
}
|
|
717
|
+
/**
|
|
718
|
+
* Evaluate a single atomic condition (no && or ||).
|
|
719
|
+
*/
|
|
720
|
+
function evaluateAtom(expr, jobResults, needsCtx) {
|
|
721
|
+
const trimmed = expr.trim();
|
|
722
|
+
// Status check functions
|
|
723
|
+
if (trimmed === "always()") {
|
|
724
|
+
return true;
|
|
725
|
+
}
|
|
726
|
+
if (trimmed === "cancelled()") {
|
|
727
|
+
return false;
|
|
728
|
+
}
|
|
729
|
+
if (trimmed === "success()") {
|
|
730
|
+
return Object.values(jobResults).every((r) => r === "success");
|
|
731
|
+
}
|
|
732
|
+
if (trimmed === "failure()") {
|
|
733
|
+
return Object.values(jobResults).some((r) => r === "failure");
|
|
734
|
+
}
|
|
735
|
+
// != comparison
|
|
736
|
+
const neqMatch = trimmed.match(/^(.+?)\s*!=\s*(.+)$/);
|
|
737
|
+
if (neqMatch) {
|
|
738
|
+
const left = resolveValue(neqMatch[1].trim(), needsCtx);
|
|
739
|
+
const right = resolveValue(neqMatch[2].trim(), needsCtx);
|
|
740
|
+
return left !== right;
|
|
741
|
+
}
|
|
742
|
+
// == comparison
|
|
743
|
+
const eqMatch = trimmed.match(/^(.+?)\s*==\s*(.+)$/);
|
|
744
|
+
if (eqMatch) {
|
|
745
|
+
const left = resolveValue(eqMatch[1].trim(), needsCtx);
|
|
746
|
+
const right = resolveValue(eqMatch[2].trim(), needsCtx);
|
|
747
|
+
return left === right;
|
|
748
|
+
}
|
|
749
|
+
// Bare truthy value (e.g. needs.setup.outputs.run_tests)
|
|
750
|
+
const val = resolveValue(trimmed, needsCtx);
|
|
751
|
+
return val !== "" && val !== "false" && val !== "0";
|
|
752
|
+
}
|
|
753
|
+
/**
|
|
754
|
+
* Resolve a value reference in a condition expression.
|
|
755
|
+
*/
|
|
756
|
+
function resolveValue(raw, needsCtx) {
|
|
757
|
+
const trimmed = raw.trim();
|
|
758
|
+
// Quoted string literal
|
|
759
|
+
if ((trimmed.startsWith("'") && trimmed.endsWith("'")) ||
|
|
760
|
+
(trimmed.startsWith('"') && trimmed.endsWith('"'))) {
|
|
761
|
+
return trimmed.slice(1, -1);
|
|
762
|
+
}
|
|
763
|
+
// needs.<jobId>.outputs.<name>
|
|
764
|
+
if (trimmed.startsWith("needs.") && needsCtx) {
|
|
765
|
+
const parts = trimmed.split(".");
|
|
766
|
+
const jobId = parts[1];
|
|
767
|
+
const jobOutputs = needsCtx[jobId];
|
|
768
|
+
if (parts[2] === "outputs" && parts[3]) {
|
|
769
|
+
return jobOutputs?.[parts[3]] ?? "";
|
|
770
|
+
}
|
|
771
|
+
if (parts[2] === "result") {
|
|
772
|
+
return jobOutputs ? (jobOutputs["__result"] ?? "success") : "";
|
|
773
|
+
}
|
|
774
|
+
}
|
|
775
|
+
return trimmed;
|
|
776
|
+
}
|
|
777
|
+
/**
|
|
778
|
+
* Parse `strategy.fail-fast` for a job.
|
|
779
|
+
* Returns true/false if explicitly set, undefined if not specified.
|
|
780
|
+
*/
|
|
781
|
+
export function parseFailFast(filePath, jobId) {
|
|
782
|
+
const yaml = parseYaml(fs.readFileSync(filePath, "utf8"));
|
|
783
|
+
const strategy = yaml?.jobs?.[jobId]?.strategy;
|
|
784
|
+
if (!strategy || typeof strategy !== "object") {
|
|
785
|
+
return undefined;
|
|
786
|
+
}
|
|
787
|
+
if ("fail-fast" in strategy) {
|
|
788
|
+
return Boolean(strategy["fail-fast"]);
|
|
789
|
+
}
|
|
790
|
+
return undefined;
|
|
791
|
+
}
|