@kody-ade/kody-engine-lite 0.1.54 → 0.1.56
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/agent-runner.d.ts +4 -0
- package/dist/agent-runner.js +122 -0
- package/dist/bin/cli.js +422 -473
- package/dist/ci/parse-inputs.d.ts +6 -0
- package/dist/ci/parse-inputs.js +76 -0
- package/dist/ci/parse-safety.d.ts +6 -0
- package/dist/ci/parse-safety.js +22 -0
- package/dist/cli/args.d.ts +13 -0
- package/dist/cli/args.js +42 -0
- package/dist/cli/litellm.d.ts +2 -0
- package/dist/cli/litellm.js +85 -0
- package/dist/cli/task-resolution.d.ts +2 -0
- package/dist/cli/task-resolution.js +41 -0
- package/dist/config.d.ts +49 -0
- package/dist/config.js +72 -0
- package/dist/context.d.ts +4 -0
- package/dist/context.js +83 -0
- package/dist/definitions.d.ts +3 -0
- package/dist/definitions.js +59 -0
- package/dist/entry.d.ts +1 -0
- package/dist/entry.js +236 -0
- package/dist/git-utils.d.ts +13 -0
- package/dist/git-utils.js +174 -0
- package/dist/github-api.d.ts +14 -0
- package/dist/github-api.js +114 -0
- package/dist/kody-utils.d.ts +1 -0
- package/dist/kody-utils.js +9 -0
- package/dist/learning/auto-learn.d.ts +2 -0
- package/dist/learning/auto-learn.js +169 -0
- package/dist/logger.d.ts +14 -0
- package/dist/logger.js +51 -0
- package/dist/memory.d.ts +1 -0
- package/dist/memory.js +20 -0
- package/dist/observer.d.ts +9 -0
- package/dist/observer.js +80 -0
- package/dist/pipeline/complexity.d.ts +3 -0
- package/dist/pipeline/complexity.js +12 -0
- package/dist/pipeline/executor-registry.d.ts +3 -0
- package/dist/pipeline/executor-registry.js +20 -0
- package/dist/pipeline/hooks.d.ts +17 -0
- package/dist/pipeline/hooks.js +110 -0
- package/dist/pipeline/questions.d.ts +2 -0
- package/dist/pipeline/questions.js +44 -0
- package/dist/pipeline/runner-selection.d.ts +2 -0
- package/dist/pipeline/runner-selection.js +13 -0
- package/dist/pipeline/state.d.ts +4 -0
- package/dist/pipeline/state.js +37 -0
- package/dist/pipeline.d.ts +3 -0
- package/dist/pipeline.js +213 -0
- package/dist/preflight.d.ts +1 -0
- package/dist/preflight.js +69 -0
- package/dist/retrospective.d.ts +26 -0
- package/dist/retrospective.js +211 -0
- package/dist/stages/agent.d.ts +2 -0
- package/dist/stages/agent.js +94 -0
- package/dist/stages/gate.d.ts +2 -0
- package/dist/stages/gate.js +32 -0
- package/dist/stages/review.d.ts +2 -0
- package/dist/stages/review.js +32 -0
- package/dist/stages/ship.d.ts +3 -0
- package/dist/stages/ship.js +154 -0
- package/dist/stages/verify.d.ts +2 -0
- package/dist/stages/verify.js +94 -0
- package/dist/types.d.ts +61 -0
- package/dist/types.js +1 -0
- package/dist/validators.d.ts +8 -0
- package/dist/validators.js +42 -0
- package/dist/verify-runner.d.ts +11 -0
- package/dist/verify-runner.js +110 -0
- package/kody.config.schema.json +2 -2
- package/package.json +9 -8
- package/prompts/autofix.md +9 -27
- package/prompts/review.md +16 -83
- package/templates/kody.yml +29 -19
|
@@ -0,0 +1,32 @@
|
|
|
1
|
+
import * as fs from "fs";
|
|
2
|
+
import * as path from "path";
|
|
3
|
+
import { STAGES } from "../definitions.js";
|
|
4
|
+
import { logger } from "../logger.js";
|
|
5
|
+
import { executeAgentStage } from "./agent.js";
|
|
6
|
+
export async function executeReviewWithFix(ctx, def) {
|
|
7
|
+
if (ctx.input.dryRun) {
|
|
8
|
+
return { outcome: "completed", retries: 0 };
|
|
9
|
+
}
|
|
10
|
+
const reviewDef = STAGES.find((s) => s.name === "review");
|
|
11
|
+
const reviewFixDef = STAGES.find((s) => s.name === "review-fix");
|
|
12
|
+
const reviewResult = await executeAgentStage(ctx, reviewDef);
|
|
13
|
+
if (reviewResult.outcome !== "completed") {
|
|
14
|
+
return reviewResult;
|
|
15
|
+
}
|
|
16
|
+
const reviewFile = path.join(ctx.taskDir, "review.md");
|
|
17
|
+
if (!fs.existsSync(reviewFile)) {
|
|
18
|
+
return { outcome: "failed", retries: 0, error: "review.md not found" };
|
|
19
|
+
}
|
|
20
|
+
const content = fs.readFileSync(reviewFile, "utf-8");
|
|
21
|
+
const hasIssues = /\bfail\b/i.test(content) && !/pass/i.test(content);
|
|
22
|
+
if (!hasIssues) {
|
|
23
|
+
return reviewResult;
|
|
24
|
+
}
|
|
25
|
+
logger.info(` review found issues, running review-fix...`);
|
|
26
|
+
const fixResult = await executeAgentStage(ctx, reviewFixDef);
|
|
27
|
+
if (fixResult.outcome !== "completed") {
|
|
28
|
+
return fixResult;
|
|
29
|
+
}
|
|
30
|
+
logger.info(` re-running review after fix...`);
|
|
31
|
+
return executeAgentStage(ctx, reviewDef);
|
|
32
|
+
}
|
|
@@ -0,0 +1,154 @@
|
|
|
1
|
+
import * as fs from "fs";
|
|
2
|
+
import * as path from "path";
|
|
3
|
+
import { execFileSync } from "child_process";
|
|
4
|
+
import { getCurrentBranch, getDefaultBranch, pushBranch, } from "../git-utils.js";
|
|
5
|
+
import { postComment, createPR, } from "../github-api.js";
|
|
6
|
+
import { getProjectConfig } from "../config.js";
|
|
7
|
+
export function buildPrBody(ctx) {
|
|
8
|
+
const sections = [];
|
|
9
|
+
// What and why — from task.json
|
|
10
|
+
const taskJsonPath = path.join(ctx.taskDir, "task.json");
|
|
11
|
+
if (fs.existsSync(taskJsonPath)) {
|
|
12
|
+
try {
|
|
13
|
+
const raw = fs.readFileSync(taskJsonPath, "utf-8");
|
|
14
|
+
const cleaned = raw.replace(/^```json\s*\n?/m, "").replace(/\n?```\s*$/m, "");
|
|
15
|
+
const task = JSON.parse(cleaned);
|
|
16
|
+
if (task.description) {
|
|
17
|
+
sections.push(`## What\n\n${task.description}`);
|
|
18
|
+
}
|
|
19
|
+
if (task.scope?.length) {
|
|
20
|
+
sections.push(`\n## Scope\n\n${task.scope.map((s) => `- \`${s}\``).join("\n")}`);
|
|
21
|
+
}
|
|
22
|
+
sections.push(`\n**Type:** ${task.task_type ?? "unknown"} | **Risk:** ${task.risk_level ?? "unknown"}`);
|
|
23
|
+
}
|
|
24
|
+
catch { /* ignore parse errors */ }
|
|
25
|
+
}
|
|
26
|
+
// Changes — from review.md summary
|
|
27
|
+
const reviewPath = path.join(ctx.taskDir, "review.md");
|
|
28
|
+
if (fs.existsSync(reviewPath)) {
|
|
29
|
+
const review = fs.readFileSync(reviewPath, "utf-8");
|
|
30
|
+
const summaryMatch = review.match(/## Summary\s*\n([\s\S]*?)(?=\n## |\n*$)/);
|
|
31
|
+
if (summaryMatch) {
|
|
32
|
+
const summary = summaryMatch[1].trim();
|
|
33
|
+
if (summary) {
|
|
34
|
+
sections.push(`\n## Changes\n\n${summary}`);
|
|
35
|
+
}
|
|
36
|
+
}
|
|
37
|
+
const verdictMatch = review.match(/## Verdict:\s*(PASS|FAIL)/i);
|
|
38
|
+
if (verdictMatch) {
|
|
39
|
+
sections.push(`\n**Review:** ${verdictMatch[1].toUpperCase() === "PASS" ? "✅ PASS" : "❌ FAIL"}`);
|
|
40
|
+
}
|
|
41
|
+
}
|
|
42
|
+
// Verify result
|
|
43
|
+
const verifyPath = path.join(ctx.taskDir, "verify.md");
|
|
44
|
+
if (fs.existsSync(verifyPath)) {
|
|
45
|
+
const verify = fs.readFileSync(verifyPath, "utf-8");
|
|
46
|
+
if (/PASS/i.test(verify))
|
|
47
|
+
sections.push(`**Verify:** ✅ typecheck + tests + lint passed`);
|
|
48
|
+
}
|
|
49
|
+
// Plan — collapsible details
|
|
50
|
+
const planPath = path.join(ctx.taskDir, "plan.md");
|
|
51
|
+
if (fs.existsSync(planPath)) {
|
|
52
|
+
const plan = fs.readFileSync(planPath, "utf-8").trim();
|
|
53
|
+
if (plan) {
|
|
54
|
+
const truncated = plan.length > 800 ? plan.slice(0, 800) + "\n..." : plan;
|
|
55
|
+
sections.push(`\n<details><summary>📋 Implementation plan</summary>\n\n${truncated}\n</details>`);
|
|
56
|
+
}
|
|
57
|
+
}
|
|
58
|
+
// Closes issue
|
|
59
|
+
if (ctx.input.issueNumber) {
|
|
60
|
+
sections.push(`\nCloses #${ctx.input.issueNumber}`);
|
|
61
|
+
}
|
|
62
|
+
sections.push(`\n---\n🤖 Generated by Kody`);
|
|
63
|
+
return sections.join("\n");
|
|
64
|
+
}
|
|
65
|
+
export function executeShipStage(ctx, _def) {
|
|
66
|
+
const shipPath = path.join(ctx.taskDir, "ship.md");
|
|
67
|
+
if (ctx.input.dryRun) {
|
|
68
|
+
fs.writeFileSync(shipPath, "# Ship\n\nShip stage skipped — dry run.\n");
|
|
69
|
+
return { outcome: "completed", outputFile: "ship.md", retries: 0 };
|
|
70
|
+
}
|
|
71
|
+
// Local mode or no issue: skip git push + PR
|
|
72
|
+
if (ctx.input.local && !ctx.input.issueNumber) {
|
|
73
|
+
fs.writeFileSync(shipPath, "# Ship\n\nShip stage skipped — local mode, no issue number.\n");
|
|
74
|
+
return { outcome: "completed", outputFile: "ship.md", retries: 0 };
|
|
75
|
+
}
|
|
76
|
+
try {
|
|
77
|
+
const head = getCurrentBranch(ctx.projectDir);
|
|
78
|
+
const base = getDefaultBranch(ctx.projectDir);
|
|
79
|
+
pushBranch(ctx.projectDir);
|
|
80
|
+
// Resolve owner/repo
|
|
81
|
+
const config = getProjectConfig();
|
|
82
|
+
let owner = config.github?.owner;
|
|
83
|
+
let repo = config.github?.repo;
|
|
84
|
+
if (!owner || !repo) {
|
|
85
|
+
try {
|
|
86
|
+
const remoteUrl = execFileSync("git", ["remote", "get-url", "origin"], {
|
|
87
|
+
encoding: "utf-8",
|
|
88
|
+
cwd: ctx.projectDir,
|
|
89
|
+
}).trim();
|
|
90
|
+
const match = remoteUrl.match(/github\.com[/:]([^/]+)\/([^/.]+)/);
|
|
91
|
+
if (match) {
|
|
92
|
+
owner = match[1];
|
|
93
|
+
repo = match[2];
|
|
94
|
+
}
|
|
95
|
+
}
|
|
96
|
+
catch {
|
|
97
|
+
// Can't determine repo
|
|
98
|
+
}
|
|
99
|
+
}
|
|
100
|
+
// Derive PR title from task.json (preferred) or task.md (fallback)
|
|
101
|
+
let title = "Update";
|
|
102
|
+
const TYPE_PREFIX = {
|
|
103
|
+
feature: "feat",
|
|
104
|
+
bugfix: "fix",
|
|
105
|
+
refactor: "refactor",
|
|
106
|
+
docs: "docs",
|
|
107
|
+
chore: "chore",
|
|
108
|
+
};
|
|
109
|
+
const taskJsonPath = path.join(ctx.taskDir, "task.json");
|
|
110
|
+
if (fs.existsSync(taskJsonPath)) {
|
|
111
|
+
try {
|
|
112
|
+
const raw = fs.readFileSync(taskJsonPath, "utf-8");
|
|
113
|
+
const cleaned = raw.replace(/^```json\s*\n?/m, "").replace(/\n?```\s*$/m, "");
|
|
114
|
+
const task = JSON.parse(cleaned);
|
|
115
|
+
const prefix = TYPE_PREFIX[task.task_type] ?? "chore";
|
|
116
|
+
const taskTitle = task.title ?? "Update";
|
|
117
|
+
title = `${prefix}: ${taskTitle}`.slice(0, 72);
|
|
118
|
+
}
|
|
119
|
+
catch { /* fallback below */ }
|
|
120
|
+
}
|
|
121
|
+
if (title === "Update") {
|
|
122
|
+
const taskMdPath = path.join(ctx.taskDir, "task.md");
|
|
123
|
+
if (fs.existsSync(taskMdPath)) {
|
|
124
|
+
const content = fs.readFileSync(taskMdPath, "utf-8");
|
|
125
|
+
const firstLine = content.split("\n").find((l) => l.trim() && !l.startsWith("#") && !l.startsWith("*"));
|
|
126
|
+
if (firstLine)
|
|
127
|
+
title = `chore: ${firstLine.trim()}`.slice(0, 72);
|
|
128
|
+
}
|
|
129
|
+
}
|
|
130
|
+
// Build rich PR body
|
|
131
|
+
const body = buildPrBody(ctx);
|
|
132
|
+
const pr = createPR(head, base, title, body);
|
|
133
|
+
if (pr) {
|
|
134
|
+
if (ctx.input.issueNumber && !ctx.input.local) {
|
|
135
|
+
try {
|
|
136
|
+
postComment(ctx.input.issueNumber, `🎉 PR created: ${pr.url}`);
|
|
137
|
+
}
|
|
138
|
+
catch {
|
|
139
|
+
// Fire and forget
|
|
140
|
+
}
|
|
141
|
+
}
|
|
142
|
+
fs.writeFileSync(shipPath, `# Ship\n\nPR created: ${pr.url}\nPR #${pr.number}\n`);
|
|
143
|
+
}
|
|
144
|
+
else {
|
|
145
|
+
fs.writeFileSync(shipPath, "# Ship\n\nPushed branch but failed to create PR.\n");
|
|
146
|
+
}
|
|
147
|
+
return { outcome: "completed", outputFile: "ship.md", retries: 0 };
|
|
148
|
+
}
|
|
149
|
+
catch (err) {
|
|
150
|
+
const msg = err instanceof Error ? err.message : String(err);
|
|
151
|
+
fs.writeFileSync(shipPath, `# Ship\n\nFailed: ${msg}\n`);
|
|
152
|
+
return { outcome: "failed", retries: 0, error: msg };
|
|
153
|
+
}
|
|
154
|
+
}
|
|
@@ -0,0 +1,94 @@
|
|
|
1
|
+
import * as fs from "fs";
|
|
2
|
+
import * as path from "path";
|
|
3
|
+
import { execFileSync } from "child_process";
|
|
4
|
+
import { resolveModel } from "../context.js";
|
|
5
|
+
import { getProjectConfig, FIX_COMMAND_TIMEOUT_MS } from "../config.js";
|
|
6
|
+
import { parseCommand } from "../verify-runner.js";
|
|
7
|
+
import { getRunnerForStage } from "../pipeline/runner-selection.js";
|
|
8
|
+
import { postComment } from "../github-api.js";
|
|
9
|
+
import { diagnoseFailure, getModifiedFiles } from "../observer.js";
|
|
10
|
+
import { logger } from "../logger.js";
|
|
11
|
+
import { executeAgentStage } from "./agent.js";
|
|
12
|
+
import { executeGateStage } from "./gate.js";
|
|
13
|
+
export async function executeVerifyWithAutofix(ctx, def) {
|
|
14
|
+
const maxAttempts = def.maxRetries ?? 2;
|
|
15
|
+
for (let attempt = 0; attempt <= maxAttempts; attempt++) {
|
|
16
|
+
logger.info(` verification attempt ${attempt + 1}/${maxAttempts + 1}`);
|
|
17
|
+
const gateResult = executeGateStage(ctx, def);
|
|
18
|
+
if (gateResult.outcome === "completed") {
|
|
19
|
+
return { ...gateResult, retries: attempt };
|
|
20
|
+
}
|
|
21
|
+
if (attempt < maxAttempts) {
|
|
22
|
+
// Read verify errors for diagnosis
|
|
23
|
+
const verifyPath = path.join(ctx.taskDir, "verify.md");
|
|
24
|
+
const errorOutput = fs.existsSync(verifyPath) ? fs.readFileSync(verifyPath, "utf-8") : "Unknown error";
|
|
25
|
+
// AI diagnosis — classify the failure
|
|
26
|
+
const modifiedFiles = getModifiedFiles(ctx.projectDir);
|
|
27
|
+
const defaultRunner = getRunnerForStage(ctx, "taskify"); // use cheap model
|
|
28
|
+
const diagnosis = await diagnoseFailure("verify", errorOutput, modifiedFiles, defaultRunner, resolveModel("cheap"));
|
|
29
|
+
if (diagnosis.classification === "infrastructure") {
|
|
30
|
+
logger.warn(` Infrastructure issue: ${diagnosis.reason}`);
|
|
31
|
+
if (ctx.input.issueNumber && !ctx.input.local) {
|
|
32
|
+
try {
|
|
33
|
+
postComment(ctx.input.issueNumber, `⚠️ **Infrastructure issue detected:** ${diagnosis.reason}\n\n${diagnosis.resolution}`);
|
|
34
|
+
}
|
|
35
|
+
catch { /* fire-and-forget */ }
|
|
36
|
+
}
|
|
37
|
+
return { outcome: "completed", retries: attempt, error: `Skipped: ${diagnosis.reason}` };
|
|
38
|
+
}
|
|
39
|
+
if (diagnosis.classification === "pre-existing") {
|
|
40
|
+
logger.warn(` Pre-existing issue: ${diagnosis.reason}`);
|
|
41
|
+
return { outcome: "completed", retries: attempt, error: `Skipped: ${diagnosis.reason}` };
|
|
42
|
+
}
|
|
43
|
+
if (diagnosis.classification === "abort") {
|
|
44
|
+
logger.error(` Unrecoverable: ${diagnosis.reason}`);
|
|
45
|
+
return { outcome: "failed", retries: attempt, error: diagnosis.reason };
|
|
46
|
+
}
|
|
47
|
+
// fixable or retry — proceed with autofix
|
|
48
|
+
logger.info(` Diagnosis: ${diagnosis.classification} — ${diagnosis.reason}`);
|
|
49
|
+
const config = getProjectConfig();
|
|
50
|
+
const runFix = (cmd) => {
|
|
51
|
+
if (!cmd)
|
|
52
|
+
return;
|
|
53
|
+
const parts = parseCommand(cmd);
|
|
54
|
+
if (parts.length === 0)
|
|
55
|
+
return;
|
|
56
|
+
try {
|
|
57
|
+
execFileSync(parts[0], parts.slice(1), {
|
|
58
|
+
stdio: "pipe",
|
|
59
|
+
timeout: FIX_COMMAND_TIMEOUT_MS,
|
|
60
|
+
});
|
|
61
|
+
}
|
|
62
|
+
catch {
|
|
63
|
+
// Silently ignore fix failures
|
|
64
|
+
}
|
|
65
|
+
};
|
|
66
|
+
runFix(config.quality.lintFix);
|
|
67
|
+
runFix(config.quality.formatFix);
|
|
68
|
+
if (def.retryWithAgent) {
|
|
69
|
+
// Create new context with diagnosis guidance — don't mutate original
|
|
70
|
+
const autofixCtx = {
|
|
71
|
+
...ctx,
|
|
72
|
+
input: {
|
|
73
|
+
...ctx.input,
|
|
74
|
+
feedback: `${diagnosis.resolution}\n\n${ctx.input.feedback ?? ""}`.trim(),
|
|
75
|
+
},
|
|
76
|
+
};
|
|
77
|
+
logger.info(` running ${def.retryWithAgent} agent with diagnosis guidance...`);
|
|
78
|
+
await executeAgentStage(autofixCtx, {
|
|
79
|
+
...def,
|
|
80
|
+
name: def.retryWithAgent,
|
|
81
|
+
type: "agent",
|
|
82
|
+
modelTier: "mid",
|
|
83
|
+
timeout: 300_000,
|
|
84
|
+
outputFile: undefined,
|
|
85
|
+
});
|
|
86
|
+
}
|
|
87
|
+
}
|
|
88
|
+
}
|
|
89
|
+
return {
|
|
90
|
+
outcome: "failed",
|
|
91
|
+
retries: maxAttempts,
|
|
92
|
+
error: "Verification failed after autofix attempts",
|
|
93
|
+
};
|
|
94
|
+
}
|
package/dist/types.d.ts
ADDED
|
@@ -0,0 +1,61 @@
|
|
|
1
|
+
export type StageName = "taskify" | "plan" | "build" | "verify" | "review" | "review-fix" | "ship";
|
|
2
|
+
export type StageType = "agent" | "gate" | "deterministic";
|
|
3
|
+
export type PipelineState = "pending" | "running" | "completed" | "failed" | "timeout";
|
|
4
|
+
export interface StageDefinition {
|
|
5
|
+
name: StageName;
|
|
6
|
+
type: StageType;
|
|
7
|
+
modelTier: "cheap" | "mid" | "strong";
|
|
8
|
+
timeout: number;
|
|
9
|
+
maxRetries: number;
|
|
10
|
+
outputFile?: string;
|
|
11
|
+
retryWithAgent?: string;
|
|
12
|
+
}
|
|
13
|
+
export interface StageState {
|
|
14
|
+
state: PipelineState;
|
|
15
|
+
startedAt?: string;
|
|
16
|
+
completedAt?: string;
|
|
17
|
+
retries: number;
|
|
18
|
+
error?: string;
|
|
19
|
+
outputFile?: string;
|
|
20
|
+
}
|
|
21
|
+
export interface PipelineStatus {
|
|
22
|
+
taskId: string;
|
|
23
|
+
state: "running" | "completed" | "failed";
|
|
24
|
+
stages: Record<StageName, StageState>;
|
|
25
|
+
createdAt: string;
|
|
26
|
+
updatedAt: string;
|
|
27
|
+
}
|
|
28
|
+
export interface StageResult {
|
|
29
|
+
outcome: "completed" | "failed" | "timed_out";
|
|
30
|
+
outputFile?: string;
|
|
31
|
+
error?: string;
|
|
32
|
+
retries: number;
|
|
33
|
+
}
|
|
34
|
+
export interface AgentResult {
|
|
35
|
+
outcome: "completed" | "failed" | "timed_out";
|
|
36
|
+
output?: string;
|
|
37
|
+
error?: string;
|
|
38
|
+
}
|
|
39
|
+
export interface AgentRunnerOptions {
|
|
40
|
+
cwd?: string;
|
|
41
|
+
env?: Record<string, string>;
|
|
42
|
+
}
|
|
43
|
+
export interface AgentRunner {
|
|
44
|
+
run(stageName: string, prompt: string, model: string, timeout: number, taskDir: string, options?: AgentRunnerOptions): Promise<AgentResult>;
|
|
45
|
+
healthCheck(): Promise<boolean>;
|
|
46
|
+
}
|
|
47
|
+
export interface PipelineContext {
|
|
48
|
+
taskId: string;
|
|
49
|
+
taskDir: string;
|
|
50
|
+
projectDir: string;
|
|
51
|
+
runners: Record<string, AgentRunner>;
|
|
52
|
+
input: {
|
|
53
|
+
mode: "full" | "rerun" | "status";
|
|
54
|
+
fromStage?: string;
|
|
55
|
+
dryRun?: boolean;
|
|
56
|
+
issueNumber?: number;
|
|
57
|
+
feedback?: string;
|
|
58
|
+
local?: boolean;
|
|
59
|
+
complexity?: "low" | "medium" | "high";
|
|
60
|
+
};
|
|
61
|
+
}
|
package/dist/types.js
ADDED
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export {};
|
|
@@ -0,0 +1,8 @@
|
|
|
1
|
+
export interface ValidationResult {
|
|
2
|
+
valid: boolean;
|
|
3
|
+
error?: string;
|
|
4
|
+
}
|
|
5
|
+
export declare function stripFences(content: string): string;
|
|
6
|
+
export declare function validateTaskJson(content: string): ValidationResult;
|
|
7
|
+
export declare function validatePlanMd(content: string): ValidationResult;
|
|
8
|
+
export declare function validateReviewMd(content: string): ValidationResult;
|
|
@@ -0,0 +1,42 @@
|
|
|
1
|
+
const REQUIRED_TASK_FIELDS = [
|
|
2
|
+
"task_type",
|
|
3
|
+
"title",
|
|
4
|
+
"description",
|
|
5
|
+
"scope",
|
|
6
|
+
"risk_level",
|
|
7
|
+
];
|
|
8
|
+
export function stripFences(content) {
|
|
9
|
+
return content.replace(/^```json\s*\n?/m, "").replace(/\n?```\s*$/m, "");
|
|
10
|
+
}
|
|
11
|
+
export function validateTaskJson(content) {
|
|
12
|
+
try {
|
|
13
|
+
const parsed = JSON.parse(stripFences(content));
|
|
14
|
+
for (const field of REQUIRED_TASK_FIELDS) {
|
|
15
|
+
if (!(field in parsed)) {
|
|
16
|
+
return { valid: false, error: `Missing field: ${field}` };
|
|
17
|
+
}
|
|
18
|
+
}
|
|
19
|
+
return { valid: true };
|
|
20
|
+
}
|
|
21
|
+
catch (err) {
|
|
22
|
+
return {
|
|
23
|
+
valid: false,
|
|
24
|
+
error: `Invalid JSON: ${err instanceof Error ? err.message : String(err)}`,
|
|
25
|
+
};
|
|
26
|
+
}
|
|
27
|
+
}
|
|
28
|
+
export function validatePlanMd(content) {
|
|
29
|
+
if (content.length < 10) {
|
|
30
|
+
return { valid: false, error: "Plan is too short (< 10 chars)" };
|
|
31
|
+
}
|
|
32
|
+
if (!/^##\s+\w+/m.test(content)) {
|
|
33
|
+
return { valid: false, error: "Plan has no markdown h2 sections" };
|
|
34
|
+
}
|
|
35
|
+
return { valid: true };
|
|
36
|
+
}
|
|
37
|
+
export function validateReviewMd(content) {
|
|
38
|
+
if (/pass/i.test(content) || /fail/i.test(content)) {
|
|
39
|
+
return { valid: true };
|
|
40
|
+
}
|
|
41
|
+
return { valid: false, error: "Review must contain 'pass' or 'fail'" };
|
|
42
|
+
}
|
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
export interface VerifyResult {
|
|
2
|
+
pass: boolean;
|
|
3
|
+
errors: string[];
|
|
4
|
+
summary: string[];
|
|
5
|
+
}
|
|
6
|
+
/**
|
|
7
|
+
* Parse a command string into [executable, ...args], respecting quoted arguments.
|
|
8
|
+
* e.g., 'pnpm -s "test:unit"' → ["pnpm", "-s", "test:unit"]
|
|
9
|
+
*/
|
|
10
|
+
export declare function parseCommand(cmd: string): string[];
|
|
11
|
+
export declare function runQualityGates(taskDir: string, projectRoot?: string): VerifyResult;
|
|
@@ -0,0 +1,110 @@
|
|
|
1
|
+
import { execFileSync } from "child_process";
|
|
2
|
+
import { getProjectConfig, VERIFY_COMMAND_TIMEOUT_MS } from "./config.js";
|
|
3
|
+
import { logger } from "./logger.js";
|
|
4
|
+
function isExecError(err) {
|
|
5
|
+
return typeof err === "object" && err !== null;
|
|
6
|
+
}
|
|
7
|
+
/**
|
|
8
|
+
* Parse a command string into [executable, ...args], respecting quoted arguments.
|
|
9
|
+
* e.g., 'pnpm -s "test:unit"' → ["pnpm", "-s", "test:unit"]
|
|
10
|
+
*/
|
|
11
|
+
export function parseCommand(cmd) {
|
|
12
|
+
const parts = [];
|
|
13
|
+
let current = "";
|
|
14
|
+
let inQuote = null;
|
|
15
|
+
for (const ch of cmd) {
|
|
16
|
+
if (inQuote) {
|
|
17
|
+
if (ch === inQuote) {
|
|
18
|
+
inQuote = null;
|
|
19
|
+
}
|
|
20
|
+
else {
|
|
21
|
+
current += ch;
|
|
22
|
+
}
|
|
23
|
+
}
|
|
24
|
+
else if (ch === '"' || ch === "'") {
|
|
25
|
+
inQuote = ch;
|
|
26
|
+
}
|
|
27
|
+
else if (/\s/.test(ch)) {
|
|
28
|
+
if (current) {
|
|
29
|
+
parts.push(current);
|
|
30
|
+
current = "";
|
|
31
|
+
}
|
|
32
|
+
}
|
|
33
|
+
else {
|
|
34
|
+
current += ch;
|
|
35
|
+
}
|
|
36
|
+
}
|
|
37
|
+
if (current)
|
|
38
|
+
parts.push(current);
|
|
39
|
+
if (inQuote)
|
|
40
|
+
logger.warn(`Unclosed quote in command: ${cmd}`);
|
|
41
|
+
return parts;
|
|
42
|
+
}
|
|
43
|
+
function runCommand(cmd, cwd, timeout) {
|
|
44
|
+
const parts = parseCommand(cmd);
|
|
45
|
+
if (parts.length === 0) {
|
|
46
|
+
return { success: true, output: "", timedOut: false };
|
|
47
|
+
}
|
|
48
|
+
try {
|
|
49
|
+
const output = execFileSync(parts[0], parts.slice(1), {
|
|
50
|
+
cwd,
|
|
51
|
+
timeout,
|
|
52
|
+
encoding: "utf-8",
|
|
53
|
+
stdio: ["pipe", "pipe", "pipe"],
|
|
54
|
+
env: { ...process.env, FORCE_COLOR: "0" },
|
|
55
|
+
});
|
|
56
|
+
return { success: true, output: output ?? "", timedOut: false };
|
|
57
|
+
}
|
|
58
|
+
catch (err) {
|
|
59
|
+
const stdout = isExecError(err) ? err.stdout ?? "" : "";
|
|
60
|
+
const stderr = isExecError(err) ? err.stderr ?? "" : "";
|
|
61
|
+
const killed = isExecError(err) ? !!err.killed : false;
|
|
62
|
+
return { success: false, output: `${stdout}${stderr}`, timedOut: killed };
|
|
63
|
+
}
|
|
64
|
+
}
|
|
65
|
+
function parseErrors(output) {
|
|
66
|
+
const errors = [];
|
|
67
|
+
for (const line of output.split("\n")) {
|
|
68
|
+
if (/error|Error|ERROR|failed|Failed|FAIL|warning:|Warning:|WARN/i.test(line)) {
|
|
69
|
+
errors.push(line.slice(0, 500));
|
|
70
|
+
}
|
|
71
|
+
}
|
|
72
|
+
return errors;
|
|
73
|
+
}
|
|
74
|
+
function extractSummary(output, cmdName) {
|
|
75
|
+
const summaryPatterns = /Test Suites|Tests|Coverage|ERRORS|FAILURES|success|completed/i;
|
|
76
|
+
const lines = output.split("\n").filter((l) => summaryPatterns.test(l));
|
|
77
|
+
return lines.slice(-3).map((l) => `[${cmdName}] ${l.trim()}`);
|
|
78
|
+
}
|
|
79
|
+
export function runQualityGates(taskDir, projectRoot) {
|
|
80
|
+
const config = getProjectConfig();
|
|
81
|
+
const cwd = projectRoot ?? process.cwd();
|
|
82
|
+
const allErrors = [];
|
|
83
|
+
const allSummary = [];
|
|
84
|
+
let allPass = true;
|
|
85
|
+
const commands = [
|
|
86
|
+
{ name: "typecheck", cmd: config.quality.typecheck },
|
|
87
|
+
{ name: "test", cmd: config.quality.testUnit },
|
|
88
|
+
];
|
|
89
|
+
if (config.quality.lint) {
|
|
90
|
+
commands.push({ name: "lint", cmd: config.quality.lint });
|
|
91
|
+
}
|
|
92
|
+
for (const { name, cmd } of commands) {
|
|
93
|
+
if (!cmd)
|
|
94
|
+
continue;
|
|
95
|
+
logger.info(` Running ${name}: ${cmd}`);
|
|
96
|
+
const result = runCommand(cmd, cwd, VERIFY_COMMAND_TIMEOUT_MS);
|
|
97
|
+
if (result.timedOut) {
|
|
98
|
+
allErrors.push(`${name}: timed out after ${VERIFY_COMMAND_TIMEOUT_MS / 1000}s`);
|
|
99
|
+
allPass = false;
|
|
100
|
+
continue;
|
|
101
|
+
}
|
|
102
|
+
if (!result.success) {
|
|
103
|
+
allPass = false;
|
|
104
|
+
const errors = parseErrors(result.output);
|
|
105
|
+
allErrors.push(...errors.map((e) => `[${name}] ${e}`));
|
|
106
|
+
}
|
|
107
|
+
allSummary.push(...extractSummary(result.output, name));
|
|
108
|
+
}
|
|
109
|
+
return { pass: allPass, errors: allErrors, summary: allSummary };
|
|
110
|
+
}
|
package/kody.config.schema.json
CHANGED
|
@@ -82,8 +82,8 @@
|
|
|
82
82
|
"properties": {
|
|
83
83
|
"taskDir": {
|
|
84
84
|
"type": "string",
|
|
85
|
-
"description": "Directory for pipeline artifacts and state (
|
|
86
|
-
"default": ".
|
|
85
|
+
"description": "Directory for pipeline artifacts and state (gitignored)",
|
|
86
|
+
"default": ".tasks"
|
|
87
87
|
}
|
|
88
88
|
},
|
|
89
89
|
"additionalProperties": false
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@kody-ade/kody-engine-lite",
|
|
3
|
-
"version": "0.1.
|
|
3
|
+
"version": "0.1.56",
|
|
4
4
|
"description": "Autonomous SDLC pipeline: Kody orchestration + Claude Code + LiteLLM",
|
|
5
5
|
"license": "MIT",
|
|
6
6
|
"type": "module",
|
|
@@ -13,6 +13,13 @@
|
|
|
13
13
|
"templates",
|
|
14
14
|
"kody.config.schema.json"
|
|
15
15
|
],
|
|
16
|
+
"scripts": {
|
|
17
|
+
"kody": "tsx src/entry.ts",
|
|
18
|
+
"build": "tsup",
|
|
19
|
+
"test": "vitest run",
|
|
20
|
+
"typecheck": "tsc --noEmit",
|
|
21
|
+
"prepublishOnly": "pnpm build"
|
|
22
|
+
},
|
|
16
23
|
"dependencies": {
|
|
17
24
|
"dotenv": "^16.4.7"
|
|
18
25
|
},
|
|
@@ -25,11 +32,5 @@
|
|
|
25
32
|
},
|
|
26
33
|
"engines": {
|
|
27
34
|
"node": ">=22"
|
|
28
|
-
},
|
|
29
|
-
"scripts": {
|
|
30
|
-
"kody": "tsx src/entry.ts",
|
|
31
|
-
"build": "tsup",
|
|
32
|
-
"test": "vitest run",
|
|
33
|
-
"typecheck": "tsc --noEmit"
|
|
34
35
|
}
|
|
35
|
-
}
|
|
36
|
+
}
|
package/prompts/autofix.md
CHANGED
|
@@ -1,39 +1,21 @@
|
|
|
1
1
|
---
|
|
2
2
|
name: autofix
|
|
3
|
-
description:
|
|
3
|
+
description: Fix verification errors (typecheck, lint, test failures)
|
|
4
4
|
mode: primary
|
|
5
5
|
tools: [read, write, edit, bash, glob, grep]
|
|
6
6
|
---
|
|
7
7
|
|
|
8
8
|
You are an autofix agent. The verification stage failed. Fix the errors below.
|
|
9
9
|
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
4. Classify the failure pattern:
|
|
17
|
-
- **Type error**: mismatched types, missing properties, wrong generics
|
|
18
|
-
- **Test failure**: assertion mismatch, missing mock, changed behavior
|
|
19
|
-
- **Lint error**: style violation, unused import, naming convention
|
|
20
|
-
- **Runtime error**: null reference, missing dependency, config issue
|
|
21
|
-
- **Integration failure**: API contract mismatch, schema drift
|
|
22
|
-
5. Identify root cause — is this a direct error in new code, or a side effect of a change elsewhere?
|
|
23
|
-
|
|
24
|
-
## Phase 2 — Fix (only after root cause is clear)
|
|
25
|
-
1. Try quick wins first: run configured lintFix and formatFix commands via Bash
|
|
26
|
-
2. For type errors: fix the type mismatch at its source, not by adding type assertions
|
|
27
|
-
3. For test failures: fix the root cause (implementation or test), not both — determine which is correct
|
|
28
|
-
4. For lint errors: apply the specific fix the linter suggests
|
|
29
|
-
5. For integration failures: trace the contract back to its definition, fix the mismatch at source
|
|
10
|
+
STRATEGY (in order):
|
|
11
|
+
1. Try quick wins first: run `pnpm lint:fix` and `pnpm format:fix` via Bash
|
|
12
|
+
2. Read the error output carefully — understand WHAT failed and WHY
|
|
13
|
+
3. For type errors: Read the affected file, fix the type mismatch
|
|
14
|
+
4. For test failures: Read both the test and the implementation, fix the root cause
|
|
15
|
+
5. For lint errors: Apply the specific fix the linter suggests
|
|
30
16
|
6. After EACH fix, re-run the failing command to verify it passes
|
|
31
|
-
7.
|
|
32
|
-
8. Do NOT commit or push — the orchestrator handles git
|
|
17
|
+
7. Do NOT commit or push — the orchestrator handles git
|
|
33
18
|
|
|
34
|
-
|
|
35
|
-
- Fix ONLY the reported errors. Do NOT make unrelated changes.
|
|
36
|
-
- Minimal diff — use Edit for surgical changes, not Write for rewrites
|
|
37
|
-
- If the failure is pre-existing (not caused by this PR's changes), document it and move on
|
|
19
|
+
Do NOT make unrelated changes. Fix ONLY the reported errors.
|
|
38
20
|
|
|
39
21
|
{{TASK_CONTEXT}}
|