@kody-ade/kody-engine-lite 0.1.104 → 0.1.105

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (75) hide show
  1. package/README.md +1 -1
  2. package/dist/bin/cli.js +759 -1037
  3. package/package.json +1 -1
  4. package/prompts/autofix.md +20 -33
  5. package/prompts/review-fix.md +8 -12
  6. package/prompts/taskify.md +2 -19
  7. package/templates/kody.yml +26 -17
  8. package/dist/agent-runner.d.ts +0 -4
  9. package/dist/agent-runner.js +0 -122
  10. package/dist/ci/parse-inputs.d.ts +0 -6
  11. package/dist/ci/parse-inputs.js +0 -76
  12. package/dist/ci/parse-safety.d.ts +0 -6
  13. package/dist/ci/parse-safety.js +0 -22
  14. package/dist/cli/args.d.ts +0 -13
  15. package/dist/cli/args.js +0 -42
  16. package/dist/cli/litellm.d.ts +0 -2
  17. package/dist/cli/litellm.js +0 -85
  18. package/dist/cli/task-resolution.d.ts +0 -2
  19. package/dist/cli/task-resolution.js +0 -41
  20. package/dist/config.d.ts +0 -49
  21. package/dist/config.js +0 -72
  22. package/dist/context.d.ts +0 -4
  23. package/dist/context.js +0 -83
  24. package/dist/definitions.d.ts +0 -3
  25. package/dist/definitions.js +0 -59
  26. package/dist/entry.d.ts +0 -1
  27. package/dist/entry.js +0 -236
  28. package/dist/git-utils.d.ts +0 -13
  29. package/dist/git-utils.js +0 -174
  30. package/dist/github-api.d.ts +0 -14
  31. package/dist/github-api.js +0 -114
  32. package/dist/kody-utils.d.ts +0 -1
  33. package/dist/kody-utils.js +0 -9
  34. package/dist/learning/auto-learn.d.ts +0 -2
  35. package/dist/learning/auto-learn.js +0 -169
  36. package/dist/logger.d.ts +0 -14
  37. package/dist/logger.js +0 -51
  38. package/dist/memory.d.ts +0 -1
  39. package/dist/memory.js +0 -20
  40. package/dist/observer.d.ts +0 -9
  41. package/dist/observer.js +0 -80
  42. package/dist/pipeline/complexity.d.ts +0 -3
  43. package/dist/pipeline/complexity.js +0 -12
  44. package/dist/pipeline/executor-registry.d.ts +0 -3
  45. package/dist/pipeline/executor-registry.js +0 -20
  46. package/dist/pipeline/hooks.d.ts +0 -17
  47. package/dist/pipeline/hooks.js +0 -110
  48. package/dist/pipeline/questions.d.ts +0 -2
  49. package/dist/pipeline/questions.js +0 -44
  50. package/dist/pipeline/runner-selection.d.ts +0 -2
  51. package/dist/pipeline/runner-selection.js +0 -13
  52. package/dist/pipeline/state.d.ts +0 -4
  53. package/dist/pipeline/state.js +0 -37
  54. package/dist/pipeline.d.ts +0 -3
  55. package/dist/pipeline.js +0 -213
  56. package/dist/preflight.d.ts +0 -1
  57. package/dist/preflight.js +0 -69
  58. package/dist/retrospective.d.ts +0 -26
  59. package/dist/retrospective.js +0 -211
  60. package/dist/stages/agent.d.ts +0 -2
  61. package/dist/stages/agent.js +0 -94
  62. package/dist/stages/gate.d.ts +0 -2
  63. package/dist/stages/gate.js +0 -32
  64. package/dist/stages/review.d.ts +0 -2
  65. package/dist/stages/review.js +0 -32
  66. package/dist/stages/ship.d.ts +0 -3
  67. package/dist/stages/ship.js +0 -154
  68. package/dist/stages/verify.d.ts +0 -2
  69. package/dist/stages/verify.js +0 -94
  70. package/dist/types.d.ts +0 -61
  71. package/dist/types.js +0 -1
  72. package/dist/validators.d.ts +0 -8
  73. package/dist/validators.js +0 -42
  74. package/dist/verify-runner.d.ts +0 -11
  75. package/dist/verify-runner.js +0 -110
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@kody-ade/kody-engine-lite",
3
- "version": "0.1.104",
3
+ "version": "0.1.105",
4
4
  "description": "Autonomous SDLC pipeline: Kody orchestration + Claude Code + LiteLLM",
5
5
  "license": "MIT",
6
6
  "type": "module",
@@ -5,48 +5,35 @@ mode: primary
5
5
  tools: [read, write, edit, bash, glob, grep]
6
6
  ---
7
7
 
8
- You are an autofix agent following the Superpowers Systematic Debugging methodology. The verification stage failed. Fix the errors below.
8
+ You are an autofix agent. The verification stage failed. Fix the errors below.
9
9
 
10
- IRON LAW: NO FIXES WITHOUT ROOT CAUSE INVESTIGATION FIRST. If you haven't completed Phase 1, you cannot propose fixes.
10
+ IRON LAW: NO FIXES WITHOUT INVESTIGATION FIRST. Do not jump to changing code. Understand the failure first.
11
11
 
12
- ## Phase 1 — Root Cause Investigation (BEFORE any edits)
13
- 1. Read the full error output — what exactly failed? Full stack traces, line numbers, error codes.
14
- 2. Identify the affected files — Read them to understand context.
15
- 3. Check recent changes: run `git diff HEAD~1` to see what changed.
16
- 4. Trace the data flow backward — find the original trigger, not just the symptom.
17
- 5. Classify the failure pattern:
12
+ ## Phase 1 — Investigate (do this BEFORE any edits)
13
+ 1. Read the full error output — what exactly failed?
14
+ 2. Identify the affected files — Read them to understand context
15
+ 3. Check recent changes: run `git diff HEAD~1` to see what changed
16
+ 4. Classify the failure pattern:
18
17
  - **Type error**: mismatched types, missing properties, wrong generics
19
18
  - **Test failure**: assertion mismatch, missing mock, changed behavior
20
19
  - **Lint error**: style violation, unused import, naming convention
21
20
  - **Runtime error**: null reference, missing dependency, config issue
22
21
  - **Integration failure**: API contract mismatch, schema drift
23
- 6. Identify root cause — is this a direct error in new code, or a side effect of a change elsewhere?
24
-
25
- ## Phase 2 — Pattern Analysis
26
- 1. Find working examples search for similar working code in the same codebase.
27
- 2. Compare against the working version what's different?
28
- 3. Form a single hypothesis: "I think X is the root cause because Y."
29
-
30
- ## Phase 3 Fix (only after root cause is clear)
31
- 1. Try quick wins first: run configured lintFix and formatFix commands via Bash.
32
- 2. Implement a single fix ONE change at a time, not multiple changes at once.
33
- 3. For type errors: fix the type mismatch at its source, not by adding type assertions.
34
- 4. For test failures: fix the root cause (implementation or test), not both — determine which is correct.
35
- 5. For lint errors: apply the specific fix the linter suggests.
36
- 6. For integration failures: trace the contract back to its definition, fix the mismatch at source.
37
- 7. After EACH fix, re-run the failing command to verify it passes.
38
- 8. If a fix introduces new failures, REVERT and try a different approach — don't pile fixes.
39
- 9. Do NOT commit or push — the orchestrator handles git.
40
-
41
- ## Red Flags — STOP and return to Phase 1 if you catch yourself:
42
- - "Quick fix for now, investigate later"
43
- - "Just try changing X and see"
44
- - "I don't fully understand but this might work"
45
- - Proposing solutions before tracing the data flow
22
+ 5. Identify root cause — is this a direct error in new code, or a side effect of a change elsewhere?
23
+
24
+ ## Phase 2 — Fix (only after root cause is clear)
25
+ 1. Try quick wins first: run configured lintFix and formatFix commands via Bash
26
+ 2. For type errors: fix the type mismatch at its source, not by adding type assertions
27
+ 3. For test failures: fix the root cause (implementation or test), not both — determine which is correct
28
+ 4. For lint errors: apply the specific fix the linter suggests
29
+ 5. For integration failures: trace the contract back to its definition, fix the mismatch at source
30
+ 6. After EACH fix, re-run the failing command to verify it passes
31
+ 7. If a fix introduces new failures, REVERT and try a different approach
32
+ 8. Do NOT commit or push the orchestrator handles git
46
33
 
47
34
  ## Rules
48
35
  - Fix ONLY the reported errors. Do NOT make unrelated changes.
49
- - Minimal diff — use Edit for surgical changes, not Write for rewrites.
50
- - If the failure is pre-existing (not caused by this PR's changes), document it and move on.
36
+ - Minimal diff — use Edit for surgical changes, not Write for rewrites
37
+ - If the failure is pre-existing (not caused by this PR's changes), document it and move on
51
38
 
52
39
  {{TASK_CONTEXT}}
@@ -5,23 +5,19 @@ mode: primary
5
5
  tools: [read, write, edit, bash, glob, grep]
6
6
  ---
7
7
 
8
- You are a review-fix agent following the Superpowers Executing Plans methodology.
8
+ You are a review-fix agent. The code review found issues that need fixing.
9
9
 
10
- The code review found issues that need fixing. Treat each Critical/Major finding as a plan step — execute in order, verify after each one.
11
-
12
- RULES (Superpowers Executing Plans discipline):
10
+ RULES:
13
11
  1. Fix ONLY Critical and Major issues (ignore Minor findings)
14
12
  2. Use Edit for surgical changes — do NOT rewrite entire files
15
13
  3. Run tests after EACH fix to verify nothing breaks
16
- 4. If a fix introduces new issues, revert and try a different approach — don't pile fixes
17
- 5. Document any deviations from the expected fix
18
- 6. Do NOT commit or push — the orchestrator handles git
14
+ 4. If a fix introduces new issues, revert and try a different approach
15
+ 5. Do NOT commit or push — the orchestrator handles git
19
16
 
20
- For each Critical/Major finding:
17
+ Read the review findings carefully. For each Critical/Major finding:
21
18
  1. Read the affected file to understand full context
22
- 2. Understand the root cause don't just patch the symptom
23
- 3. Make the minimal change to fix the issue
24
- 4. Run tests to verify the fix
25
- 5. Move to the next finding
19
+ 2. Make the minimal change to fix the issue
20
+ 3. Run tests to verify the fix
21
+ 4. Move to the next finding
26
22
 
27
23
  {{TASK_CONTEXT}}
@@ -7,15 +7,7 @@ tools: [read, glob, grep]
7
7
 
8
8
  You are a task classification agent following the Superpowers Brainstorming methodology.
9
9
 
10
- ## MANDATORY: Explore Before Classifying
11
-
12
- Before classifying, you MUST explore the project context:
13
- 1. **Examine the codebase** — Use Read, Glob, and Grep to understand project structure, existing patterns, and affected files.
14
- 2. **Find existing solutions** — Search for how similar problems are already solved in this codebase. If a pattern exists, the task should reuse it.
15
- 3. **Challenge assumptions** — Does the task description assume an approach? Are there simpler alternatives? Apply YAGNI ruthlessly.
16
- 4. **Identify ambiguity** — Could the requirements be interpreted two ways? Are there missing edge case decisions?
17
-
18
- ## Output
10
+ Before classifying, examine the codebase to understand the project structure, existing patterns, and affected files. Use Read, Glob, and Grep to explore.
19
11
 
20
12
  Output ONLY valid JSON. No markdown fences. No explanation. No extra text before or after the JSON.
21
13
 
@@ -27,7 +19,6 @@ Required JSON format:
27
19
  "scope": ["list", "of", "exact/file/paths", "affected"],
28
20
  "risk_level": "low | medium | high",
29
21
  "hasUI": true,
30
- "existing_patterns": ["list of existing patterns found that the implementation should reuse"],
31
22
  "questions": []
32
23
  }
33
24
 
@@ -40,17 +31,9 @@ Risk level heuristics:
40
31
  - medium: multiple files, possible side effects, API changes, new dependencies, refactoring existing logic
41
32
  - high: core business logic, data migrations, security, authentication, payment processing, database schema changes
42
33
 
43
- existing_patterns rules:
44
- - List patterns found in the codebase that are relevant to this task
45
- - Include the file path and a brief description of the pattern
46
- - If no relevant patterns exist, use an empty array []
47
- - These inform the planner — reuse existing solutions, don't invent new ones
48
-
49
- Questions rules (Superpowers Brainstorming discipline):
34
+ Questions rules:
50
35
  - ONLY ask product/requirements questions — things you CANNOT determine by reading code
51
36
  - Ask about: unclear scope, missing acceptance criteria, ambiguous user behavior, missing edge case decisions
52
- - Challenge assumptions — if the task implies an approach, consider simpler alternatives
53
- - Check for ambiguity — could requirements be interpreted two ways?
54
37
  - Do NOT ask about technical implementation — that is the planner's job
55
38
  - Do NOT ask about things you can find by reading the codebase (file structure, frameworks, patterns)
56
39
  - If the task is clear and complete, leave questions as an empty array []
@@ -105,18 +105,10 @@ jobs:
105
105
  env:
106
106
  BODY: ${{ github.event.comment.body }}
107
107
  run: |
108
- # Strip carriage returns — GitHub comments may contain \r\n line endings
109
- BODY=$(printf '%s' "$BODY" | tr -d '\r')
110
-
111
108
  # Extract: @kody [mode] [task-id] [--from stage]
112
109
  KODY_ARGS=$(echo "$BODY" | grep -oP '(?:@kody|/kody)\s+\K.*' || echo "")
113
- MODE=$(echo "$KODY_ARGS" | awk '{print $1}')
114
- RAW_TASK_ID=$(echo "$KODY_ARGS" | awk '{print $2}')
115
- # Don't treat flags (--from, --feedback) as task IDs
116
- case "$RAW_TASK_ID" in
117
- --*) TASK_ID="" ;;
118
- *) TASK_ID="$RAW_TASK_ID" ;;
119
- esac
110
+
111
+ # Extract flags first (before positional parsing)
120
112
  FROM_STAGE=$(echo "$KODY_ARGS" | grep -oP '(?<=--from )\S+' || echo "")
121
113
  FEEDBACK=$(echo "$KODY_ARGS" | grep -oP '(?<=--feedback ")[^"]*' || echo "")
122
114
  COMPLEXITY=""
@@ -128,12 +120,24 @@ jobs:
128
120
  DRY_RUN="true"
129
121
  fi
130
122
 
131
- # Validate mode
123
+ # Strip flags and their values for clean positional parsing
124
+ POSITIONAL=$(echo "$KODY_ARGS" | sed -E \
125
+ -e 's/--from\s+\S+//g' \
126
+ -e 's/--feedback\s+"[^"]*"//g' \
127
+ -e 's/--complexity\s+\S+//g' \
128
+ -e 's/--dry-run//g' \
129
+ -e 's/--ci-run-id\s+\S+//g' \
130
+ -e 's/\s+/ /g' -e 's/^ //' -e 's/ $//')
131
+
132
+ MODE=$(echo "$POSITIONAL" | awk '{print $1}')
133
+ TASK_ID=$(echo "$POSITIONAL" | awk '{print $2}')
134
+
135
+ # Validate mode — after flag stripping, only positional args remain
132
136
  case "$MODE" in
133
137
  full|rerun|fix|fix-ci|status|approve|review|resolve|bootstrap) ;;
134
138
  *)
135
- # If first arg isn't a mode, it might be a task-id or nothing
136
- if [ -n "$MODE" ] && [ "$MODE" != "" ]; then
139
+ # First positional isn't a known mode treat as task-id
140
+ if [ -n "$MODE" ]; then
137
141
  TASK_ID="$MODE"
138
142
  fi
139
143
  MODE="full"
@@ -251,6 +255,9 @@ jobs:
251
255
 
252
256
  - run: pnpm install --frozen-lockfile
253
257
 
258
+ - name: Install Kody Engine
259
+ run: npm install -g @kody-ade/kody-engine-lite
260
+
254
261
  - name: Install Claude Code CLI
255
262
  run: npm install -g @anthropic-ai/claude-code
256
263
 
@@ -280,7 +287,7 @@ jobs:
280
287
  run: |
281
288
  if [ "$MODE" = "bootstrap" ]; then
282
289
  echo "Running bootstrap..."
283
- npx kody-engine-lite bootstrap
290
+ kody-engine-lite bootstrap
284
291
  else
285
292
  CMD="run"
286
293
  [ "$MODE" = "rerun" ] && CMD="rerun"
@@ -296,7 +303,7 @@ jobs:
296
303
  # FEEDBACK is also passed via env var (avoids shell escaping issues)
297
304
  [ -n "$FEEDBACK" ] && ARGS="$ARGS --feedback \"$FEEDBACK\""
298
305
  [ "$DRY_RUN" = "true" ] && ARGS="$ARGS --dry-run"
299
- npx kody-engine-lite $CMD $ARGS
306
+ kody-engine-lite $CMD $ARGS
300
307
  fi
301
308
 
302
309
  - name: Pipeline summary
@@ -489,15 +496,17 @@ jobs:
489
496
  node-version: 22
490
497
  cache: pnpm
491
498
  - run: pnpm install --frozen-lockfile
499
+ - name: Install Kody Engine
500
+ run: npm install -g @kody-ade/kody-engine-lite
492
501
  - name: Typecheck
493
502
  run: pnpm tsc --noEmit
494
503
  - name: CLI loads
495
- run: npx kody-engine-lite --help
504
+ run: kody-engine-lite --help
496
505
  - name: Dry run
497
506
  run: |
498
507
  mkdir -p .kody/tasks/smoke-test
499
508
  echo "Smoke test task" > .kody/tasks/smoke-test/task.md
500
- npx kody-engine-lite run --task-id smoke-test --dry-run || true
509
+ kody-engine-lite run --task-id smoke-test --dry-run || true
501
510
  if [ -f ".kody/tasks/smoke-test/status.json" ]; then
502
511
  echo "✓ status.json created"
503
512
  cat .kody/tasks/smoke-test/status.json
@@ -1,4 +0,0 @@
1
- import type { AgentRunner } from "./types.js";
2
- import type { KodyConfig } from "./config.js";
3
- export declare function createClaudeCodeRunner(): AgentRunner;
4
- export declare function createRunners(config: KodyConfig): Record<string, AgentRunner>;
@@ -1,122 +0,0 @@
1
- import { spawn, execFileSync } from "child_process";
2
- const SIGKILL_GRACE_MS = 5000;
3
- const STDERR_TAIL_CHARS = 500;
4
- function writeStdin(child, prompt) {
5
- return new Promise((resolve, reject) => {
6
- if (!child.stdin) {
7
- resolve();
8
- return;
9
- }
10
- child.stdin.write(prompt, (err) => {
11
- if (err)
12
- reject(err);
13
- else {
14
- child.stdin.end();
15
- resolve();
16
- }
17
- });
18
- });
19
- }
20
- function waitForProcess(child, timeout) {
21
- return new Promise((resolve) => {
22
- const stdoutChunks = [];
23
- const stderrChunks = [];
24
- child.stdout?.on("data", (chunk) => stdoutChunks.push(chunk));
25
- child.stderr?.on("data", (chunk) => stderrChunks.push(chunk));
26
- const timer = setTimeout(() => {
27
- child.kill("SIGTERM");
28
- setTimeout(() => {
29
- if (!child.killed)
30
- child.kill("SIGKILL");
31
- }, SIGKILL_GRACE_MS);
32
- }, timeout);
33
- child.on("exit", (code) => {
34
- clearTimeout(timer);
35
- resolve({
36
- code,
37
- stdout: Buffer.concat(stdoutChunks).toString(),
38
- stderr: Buffer.concat(stderrChunks).toString(),
39
- });
40
- });
41
- child.on("error", (err) => {
42
- clearTimeout(timer);
43
- resolve({ code: -1, stdout: "", stderr: err.message });
44
- });
45
- });
46
- }
47
- async function runSubprocess(command, args, prompt, timeout, options) {
48
- const child = spawn(command, args, {
49
- cwd: options?.cwd ?? process.cwd(),
50
- env: {
51
- ...process.env,
52
- SKIP_BUILD: "1",
53
- SKIP_HOOKS: "1",
54
- ...options?.env,
55
- },
56
- stdio: ["pipe", "pipe", "pipe"],
57
- });
58
- try {
59
- await writeStdin(child, prompt);
60
- }
61
- catch (err) {
62
- return {
63
- outcome: "failed",
64
- error: `Failed to send prompt: ${err instanceof Error ? err.message : String(err)}`,
65
- };
66
- }
67
- const { code, stdout, stderr } = await waitForProcess(child, timeout);
68
- if (code === 0) {
69
- return { outcome: "completed", output: stdout };
70
- }
71
- return {
72
- outcome: code === null ? "timed_out" : "failed",
73
- error: `Exit code ${code}\n${stderr.slice(-STDERR_TAIL_CHARS)}`,
74
- };
75
- }
76
- function checkCommand(command, args) {
77
- try {
78
- execFileSync(command, args, { timeout: 10_000, stdio: "pipe" });
79
- return true;
80
- }
81
- catch {
82
- return false;
83
- }
84
- }
85
- // ─── Claude Code Runner ──────────────────────────────────────────────────────
86
- export function createClaudeCodeRunner() {
87
- return {
88
- async run(_stageName, prompt, model, timeout, _taskDir, options) {
89
- return runSubprocess("claude", [
90
- "--print",
91
- "--model", model,
92
- "--dangerously-skip-permissions",
93
- "--allowedTools", "Bash,Edit,Read,Write,Glob,Grep",
94
- ], prompt, timeout, options);
95
- },
96
- async healthCheck() {
97
- return checkCommand("claude", ["--version"]);
98
- },
99
- };
100
- }
101
- // ─── Runner Factory ──────────────────────────────────────────────────────────
102
- const RUNNER_FACTORIES = {
103
- "claude-code": createClaudeCodeRunner,
104
- };
105
- export function createRunners(config) {
106
- // New multi-runner config
107
- if (config.agent.runners && Object.keys(config.agent.runners).length > 0) {
108
- const runners = {};
109
- for (const [name, runnerConfig] of Object.entries(config.agent.runners)) {
110
- const factory = RUNNER_FACTORIES[runnerConfig.type];
111
- if (factory) {
112
- runners[name] = factory();
113
- }
114
- }
115
- return runners;
116
- }
117
- // Legacy single-runner fallback
118
- const runnerType = config.agent.runner ?? "claude-code";
119
- const factory = RUNNER_FACTORIES[runnerType];
120
- const defaultName = config.agent.defaultRunner ?? "claude";
121
- return { [defaultName]: factory ? factory() : createClaudeCodeRunner() };
122
- }
@@ -1,6 +0,0 @@
1
- /**
2
- * Parses @kody / /kody comment body into structured inputs.
3
- * Run by the parse job in GitHub Actions.
4
- * Reads from env, writes to $GITHUB_OUTPUT.
5
- */
6
- export {};
@@ -1,76 +0,0 @@
1
- /**
2
- * Parses @kody / /kody comment body into structured inputs.
3
- * Run by the parse job in GitHub Actions.
4
- * Reads from env, writes to $GITHUB_OUTPUT.
5
- */
6
- import * as fs from "fs";
7
- const outputFile = process.env.GITHUB_OUTPUT;
8
- const triggerType = process.env.TRIGGER_TYPE ?? "dispatch";
9
- function output(key, value) {
10
- if (outputFile) {
11
- fs.appendFileSync(outputFile, `${key}=${value}\n`);
12
- }
13
- console.log(`${key}=${value}`);
14
- }
15
- // For workflow_dispatch, pass through inputs
16
- if (triggerType === "dispatch") {
17
- output("task_id", process.env.INPUT_TASK_ID ?? "");
18
- output("mode", process.env.INPUT_MODE ?? "full");
19
- output("from_stage", process.env.INPUT_FROM_STAGE ?? "");
20
- output("issue_number", process.env.INPUT_ISSUE_NUMBER ?? "");
21
- output("feedback", process.env.INPUT_FEEDBACK ?? "");
22
- output("valid", process.env.INPUT_TASK_ID ? "true" : "false");
23
- output("trigger_type", "dispatch");
24
- process.exit(0);
25
- }
26
- // For issue_comment, parse the comment body
27
- const commentBody = process.env.COMMENT_BODY ?? "";
28
- const issueNumber = process.env.ISSUE_NUMBER ?? "";
29
- // Match: @kody [mode] [task-id] [--from stage] [--feedback "text"]
30
- const kodyMatch = commentBody.match(/(?:@kody|\/kody)\s*(.*)/i);
31
- if (!kodyMatch) {
32
- output("valid", "false");
33
- output("trigger_type", "comment");
34
- process.exit(0);
35
- }
36
- const parts = kodyMatch[1].trim().split(/\s+/);
37
- const validModes = ["full", "rerun", "status"];
38
- let mode = "full";
39
- let taskId = "";
40
- let fromStage = "";
41
- let feedback = "";
42
- let i = 0;
43
- // First arg: mode or task-id
44
- if (parts[i] && validModes.includes(parts[i])) {
45
- mode = parts[i];
46
- i++;
47
- }
48
- // Second arg: task-id
49
- if (parts[i] && !parts[i].startsWith("--")) {
50
- taskId = parts[i];
51
- i++;
52
- }
53
- // Named args
54
- while (i < parts.length) {
55
- if (parts[i] === "--from" && parts[i + 1]) {
56
- fromStage = parts[i + 1];
57
- i += 2;
58
- }
59
- else if (parts[i] === "--feedback" && parts[i + 1]) {
60
- // Collect quoted feedback
61
- const rest = parts.slice(i + 1).join(" ");
62
- const quoted = rest.match(/^"([^"]*)"/);
63
- feedback = quoted ? quoted[1] : parts[i + 1];
64
- break;
65
- }
66
- else {
67
- i++;
68
- }
69
- }
70
- output("task_id", taskId);
71
- output("mode", mode);
72
- output("from_stage", fromStage);
73
- output("issue_number", issueNumber);
74
- output("feedback", feedback);
75
- output("valid", taskId ? "true" : "false");
76
- output("trigger_type", "comment");
@@ -1,6 +0,0 @@
1
- /**
2
- * Validates that a comment trigger is safe to execute.
3
- * Run by the parse job in GitHub Actions.
4
- * Reads from env, writes to $GITHUB_OUTPUT.
5
- */
6
- export {};
@@ -1,22 +0,0 @@
1
- /**
2
- * Validates that a comment trigger is safe to execute.
3
- * Run by the parse job in GitHub Actions.
4
- * Reads from env, writes to $GITHUB_OUTPUT.
5
- */
6
- import * as fs from "fs";
7
- const ALLOWED_ASSOCIATIONS = ["COLLABORATOR", "MEMBER", "OWNER"];
8
- const association = process.env.COMMENT_AUTHOR_ASSOCIATION ?? "";
9
- const outputFile = process.env.GITHUB_OUTPUT;
10
- function output(key, value) {
11
- if (outputFile) {
12
- fs.appendFileSync(outputFile, `${key}=${value}\n`);
13
- }
14
- console.log(`${key}=${value}`);
15
- }
16
- if (!ALLOWED_ASSOCIATIONS.includes(association)) {
17
- output("valid", "false");
18
- output("reason", `Author association '${association}' not in allowlist: ${ALLOWED_ASSOCIATIONS.join(", ")}`);
19
- process.exit(0);
20
- }
21
- output("valid", "true");
22
- output("reason", "");
@@ -1,13 +0,0 @@
1
- export interface CliInput {
2
- command: "run" | "rerun" | "fix" | "status";
3
- taskId?: string;
4
- task?: string;
5
- fromStage?: string;
6
- dryRun?: boolean;
7
- cwd?: string;
8
- issueNumber?: number;
9
- feedback?: string;
10
- local?: boolean;
11
- complexity?: "low" | "medium" | "high";
12
- }
13
- export declare function parseArgs(): CliInput;
package/dist/cli/args.js DELETED
@@ -1,42 +0,0 @@
1
- const isCI = !!process.env.GITHUB_ACTIONS;
2
- function getArg(args, flag) {
3
- const idx = args.indexOf(flag);
4
- if (idx !== -1 && args[idx + 1] && !args[idx + 1].startsWith("--")) {
5
- return args[idx + 1];
6
- }
7
- return undefined;
8
- }
9
- function hasFlag(args, flag) {
10
- return args.includes(flag);
11
- }
12
- export function parseArgs() {
13
- const args = process.argv.slice(2);
14
- if (hasFlag(args, "--help") || hasFlag(args, "-h") || args.length === 0) {
15
- console.log(`Usage:
16
- kody run --task-id <id> [--task "<desc>"] [--cwd <path>] [--issue-number <n>] [--complexity low|medium|high] [--feedback "<text>"] [--local] [--dry-run]
17
- kody rerun --task-id <id> --from <stage> [--cwd <path>] [--issue-number <n>]
18
- kody fix --task-id <id> [--cwd <path>] [--issue-number <n>] [--feedback "<text>"]
19
- kody status --task-id <id> [--cwd <path>]
20
- kody --help`);
21
- process.exit(0);
22
- }
23
- const command = args[0];
24
- if (!["run", "rerun", "fix", "status"].includes(command)) {
25
- console.error(`Unknown command: ${command}`);
26
- process.exit(1);
27
- }
28
- const issueStr = getArg(args, "--issue-number") ?? process.env.ISSUE_NUMBER;
29
- const localFlag = hasFlag(args, "--local");
30
- return {
31
- command,
32
- taskId: getArg(args, "--task-id") ?? process.env.TASK_ID,
33
- task: getArg(args, "--task"),
34
- fromStage: getArg(args, "--from") ?? process.env.FROM_STAGE,
35
- dryRun: hasFlag(args, "--dry-run") || process.env.DRY_RUN === "true",
36
- cwd: getArg(args, "--cwd"),
37
- issueNumber: issueStr ? parseInt(issueStr, 10) : undefined,
38
- feedback: getArg(args, "--feedback") ?? process.env.FEEDBACK,
39
- local: localFlag || (!isCI && !hasFlag(args, "--no-local")),
40
- complexity: (getArg(args, "--complexity") ?? process.env.COMPLEXITY),
41
- };
42
- }
@@ -1,2 +0,0 @@
1
- export declare function checkLitellmHealth(url: string): Promise<boolean>;
2
- export declare function tryStartLitellm(url: string, projectDir: string): Promise<ReturnType<typeof import("child_process").spawn> | null>;
@@ -1,85 +0,0 @@
1
- import * as fs from "fs";
2
- import * as path from "path";
3
- import { execFileSync } from "child_process";
4
- import { logger } from "../logger.js";
5
- export async function checkLitellmHealth(url) {
6
- try {
7
- const response = await fetch(`${url}/health`, { signal: AbortSignal.timeout(3000) });
8
- return response.ok;
9
- }
10
- catch {
11
- return false;
12
- }
13
- }
14
- export async function tryStartLitellm(url, projectDir) {
15
- const configPath = path.join(projectDir, "litellm-config.yaml");
16
- if (!fs.existsSync(configPath)) {
17
- logger.warn("litellm-config.yaml not found — cannot start proxy");
18
- return null;
19
- }
20
- // Extract port from URL
21
- const portMatch = url.match(/:(\d+)/);
22
- const port = portMatch ? portMatch[1] : "4000";
23
- // Check if litellm is installed
24
- try {
25
- execFileSync("litellm", ["--version"], { timeout: 5000, stdio: "pipe" });
26
- }
27
- catch {
28
- try {
29
- execFileSync("python3", ["-m", "litellm", "--version"], { timeout: 5000, stdio: "pipe" });
30
- }
31
- catch {
32
- logger.warn("litellm not installed (pip install 'litellm[proxy]')");
33
- return null;
34
- }
35
- }
36
- logger.info(`Starting LiteLLM proxy on port ${port}...`);
37
- // Determine command
38
- let cmd;
39
- let args;
40
- try {
41
- execFileSync("litellm", ["--version"], { timeout: 5000, stdio: "pipe" });
42
- cmd = "litellm";
43
- args = ["--config", configPath, "--port", port];
44
- }
45
- catch {
46
- cmd = "python3";
47
- args = ["-m", "litellm", "--config", configPath, "--port", port];
48
- }
49
- // Load API key env vars from project .env (only *_API_KEY patterns)
50
- const dotenvPath = path.join(projectDir, ".env");
51
- const dotenvVars = {};
52
- if (fs.existsSync(dotenvPath)) {
53
- for (const line of fs.readFileSync(dotenvPath, "utf-8").split("\n")) {
54
- const match = line.match(/^([A-Z_][A-Z0-9_]*_API_KEY)=(.*)$/);
55
- if (match)
56
- dotenvVars[match[1]] = match[2];
57
- }
58
- if (Object.keys(dotenvVars).length > 0) {
59
- logger.info(` Loaded API keys: ${Object.keys(dotenvVars).join(", ")}`);
60
- }
61
- }
62
- const { spawn } = await import("child_process");
63
- const child = spawn(cmd, args, {
64
- stdio: ["ignore", "pipe", "pipe"],
65
- detached: true,
66
- env: { ...process.env, ...dotenvVars },
67
- });
68
- // Capture stderr for debugging
69
- let proxyStderr = "";
70
- child.stderr?.on("data", (chunk) => { proxyStderr += chunk.toString(); });
71
- // Wait for health
72
- for (let i = 0; i < 30; i++) {
73
- await new Promise((r) => setTimeout(r, 2000));
74
- if (await checkLitellmHealth(url)) {
75
- logger.info(`LiteLLM proxy ready at ${url}`);
76
- return child;
77
- }
78
- }
79
- if (proxyStderr) {
80
- logger.warn(`LiteLLM stderr: ${proxyStderr.slice(-1000)}`);
81
- }
82
- logger.warn("LiteLLM proxy failed to start within 60s");
83
- child.kill();
84
- return null;
85
- }
@@ -1,2 +0,0 @@
1
- export declare function findLatestTaskForIssue(issueNumber: number, projectDir: string): string | null;
2
- export declare function generateTaskId(): string;