wiggum-cli 0.16.0 → 0.17.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/bin/ralph.js +0 -0
- package/dist/agent/memory/ingest.d.ts +14 -0
- package/dist/agent/memory/ingest.js +77 -0
- package/dist/agent/memory/store.d.ts +15 -0
- package/dist/agent/memory/store.js +98 -0
- package/dist/agent/memory/types.d.ts +16 -0
- package/dist/agent/memory/types.js +14 -0
- package/dist/agent/orchestrator.d.ts +7 -0
- package/dist/agent/orchestrator.js +266 -0
- package/dist/agent/resolve-config.d.ts +26 -0
- package/dist/agent/resolve-config.js +43 -0
- package/dist/agent/tools/backlog.d.ts +27 -0
- package/dist/agent/tools/backlog.js +51 -0
- package/dist/agent/tools/dry-run.d.ts +106 -0
- package/dist/agent/tools/dry-run.js +119 -0
- package/dist/agent/tools/execution.d.ts +51 -0
- package/dist/agent/tools/execution.js +256 -0
- package/dist/agent/tools/feature-state.d.ts +43 -0
- package/dist/agent/tools/feature-state.js +184 -0
- package/dist/agent/tools/introspection.d.ts +23 -0
- package/dist/agent/tools/introspection.js +40 -0
- package/dist/agent/tools/memory.d.ts +44 -0
- package/dist/agent/tools/memory.js +99 -0
- package/dist/agent/tools/preflight.d.ts +7 -0
- package/dist/agent/tools/preflight.js +137 -0
- package/dist/agent/tools/reporting.d.ts +58 -0
- package/dist/agent/tools/reporting.js +119 -0
- package/dist/agent/tools/schemas.d.ts +2 -0
- package/dist/agent/tools/schemas.js +3 -0
- package/dist/agent/types.d.ts +45 -0
- package/dist/agent/types.js +1 -0
- package/dist/ai/conversation/conversation-manager.js +8 -0
- package/dist/ai/conversation/url-fetcher.js +27 -0
- package/dist/ai/providers.js +5 -5
- package/dist/commands/agent.d.ts +17 -0
- package/dist/commands/agent.js +114 -0
- package/dist/commands/monitor.js +50 -183
- package/dist/commands/new-auto.d.ts +15 -0
- package/dist/commands/new-auto.js +237 -0
- package/dist/commands/run.js +20 -10
- package/dist/commands/sync.d.ts +15 -0
- package/dist/commands/sync.js +68 -0
- package/dist/generator/config.d.ts +1 -41
- package/dist/generator/config.js +7 -0
- package/dist/generator/index.d.ts +2 -2
- package/dist/generator/templates.d.ts +2 -0
- package/dist/generator/templates.js +9 -1
- package/dist/index.d.ts +1 -1
- package/dist/index.js +115 -4
- package/dist/repl/command-parser.d.ts +5 -0
- package/dist/repl/command-parser.js +5 -0
- package/dist/templates/prompts/PROMPT.md.tmpl +13 -10
- package/dist/templates/prompts/PROMPT_e2e.md.tmpl +13 -7
- package/dist/templates/prompts/PROMPT_feature.md.tmpl +16 -3
- package/dist/templates/prompts/PROMPT_review_auto.md.tmpl +32 -12
- package/dist/templates/prompts/PROMPT_review_manual.md.tmpl +4 -1
- package/dist/templates/prompts/PROMPT_review_merge.md.tmpl +39 -14
- package/dist/templates/prompts/PROMPT_verify.md.tmpl +5 -2
- package/dist/templates/scripts/feature-loop.sh.tmpl +441 -69
- package/dist/tui/app.d.ts +19 -2
- package/dist/tui/app.js +22 -4
- package/dist/tui/components/IssuePicker.d.ts +27 -0
- package/dist/tui/components/IssuePicker.js +64 -0
- package/dist/tui/components/RunCompletionSummary.js +6 -3
- package/dist/tui/hooks/useAgentOrchestrator.d.ts +29 -0
- package/dist/tui/hooks/useAgentOrchestrator.js +453 -0
- package/dist/tui/orchestration/interview-orchestrator.d.ts +5 -1
- package/dist/tui/orchestration/interview-orchestrator.js +27 -6
- package/dist/tui/screens/AgentScreen.d.ts +21 -0
- package/dist/tui/screens/AgentScreen.js +159 -0
- package/dist/tui/screens/InitScreen.js +4 -0
- package/dist/tui/screens/InterviewScreen.d.ts +3 -1
- package/dist/tui/screens/InterviewScreen.js +146 -10
- package/dist/tui/screens/MainShell.d.ts +1 -1
- package/dist/tui/screens/MainShell.js +36 -1
- package/dist/tui/screens/RunScreen.js +38 -6
- package/dist/tui/utils/build-run-summary.d.ts +1 -1
- package/dist/tui/utils/build-run-summary.js +40 -84
- package/dist/tui/utils/clear-screen.d.ts +14 -0
- package/dist/tui/utils/clear-screen.js +16 -0
- package/dist/tui/utils/loop-status.d.ts +41 -1
- package/dist/tui/utils/loop-status.js +243 -35
- package/dist/tui/utils/pr-summary.d.ts +3 -2
- package/dist/tui/utils/pr-summary.js +41 -6
- package/dist/utils/config.d.ts +8 -0
- package/dist/utils/config.js +8 -0
- package/dist/utils/github.d.ts +32 -0
- package/dist/utils/github.js +106 -0
- package/package.json +4 -1
- package/src/templates/prompts/PROMPT.md.tmpl +13 -10
- package/src/templates/prompts/PROMPT_e2e.md.tmpl +13 -7
- package/src/templates/prompts/PROMPT_feature.md.tmpl +16 -3
- package/src/templates/prompts/PROMPT_review_auto.md.tmpl +32 -12
- package/src/templates/prompts/PROMPT_review_manual.md.tmpl +4 -1
- package/src/templates/prompts/PROMPT_review_merge.md.tmpl +39 -14
- package/src/templates/prompts/PROMPT_verify.md.tmpl +5 -2
- package/src/templates/scripts/feature-loop.sh.tmpl +441 -69
package/bin/ralph.js
CHANGED
|
File without changes
|
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
import type { MemoryStore } from './store.js';
|
|
2
|
+
/**
|
|
3
|
+
* Ingest strategic docs as lightweight catalog entries (filename + summary).
|
|
4
|
+
* Full content is read on-demand via the readStrategicDoc tool.
|
|
5
|
+
*/
|
|
6
|
+
export declare function ingestStrategicDocs(projectRoot: string, store: MemoryStore): Promise<number>;
|
|
7
|
+
/**
|
|
8
|
+
* List available strategic doc filenames.
|
|
9
|
+
*/
|
|
10
|
+
export declare function listStrategicDocs(projectRoot: string): Promise<string[]>;
|
|
11
|
+
/**
|
|
12
|
+
* Read the full content of a strategic doc.
|
|
13
|
+
*/
|
|
14
|
+
export declare function readStrategicDoc(projectRoot: string, filename: string): Promise<string | null>;
|
|
@@ -0,0 +1,77 @@
|
|
|
1
|
+
import { readdir, readFile } from 'node:fs/promises';
|
|
2
|
+
import { join, resolve } from 'node:path';
|
|
3
|
+
import { createMemoryEntry } from './types.js';
|
|
4
|
+
const SUMMARY_LENGTH = 300;
|
|
5
|
+
/**
|
|
6
|
+
* Extract the first heading and opening lines as a summary.
|
|
7
|
+
*/
|
|
8
|
+
function summarize(content) {
|
|
9
|
+
const lines = content.split('\n').filter(l => l.trim());
|
|
10
|
+
const summary = lines.slice(0, 8).join('\n');
|
|
11
|
+
return summary.length > SUMMARY_LENGTH
|
|
12
|
+
? summary.slice(0, SUMMARY_LENGTH) + '…'
|
|
13
|
+
: summary;
|
|
14
|
+
}
|
|
15
|
+
/**
|
|
16
|
+
* Ingest strategic docs as lightweight catalog entries (filename + summary).
|
|
17
|
+
* Full content is read on-demand via the readStrategicDoc tool.
|
|
18
|
+
*/
|
|
19
|
+
export async function ingestStrategicDocs(projectRoot, store) {
|
|
20
|
+
const docsDir = join(projectRoot, '.ralph', 'strategic');
|
|
21
|
+
let allFiles;
|
|
22
|
+
try {
|
|
23
|
+
allFiles = await readdir(docsDir);
|
|
24
|
+
}
|
|
25
|
+
catch {
|
|
26
|
+
return 0; // directory does not exist
|
|
27
|
+
}
|
|
28
|
+
const files = allFiles.filter(f => f.endsWith('.md'));
|
|
29
|
+
if (files.length === 0)
|
|
30
|
+
return 0;
|
|
31
|
+
const existing = await store.read({ type: 'strategic_context' });
|
|
32
|
+
const ingestedFiles = new Set(existing.flatMap(e => (e.tags ?? []).filter(t => t.startsWith('source:')).map(t => t.slice(7))));
|
|
33
|
+
let count = 0;
|
|
34
|
+
for (const file of files) {
|
|
35
|
+
if (ingestedFiles.has(file))
|
|
36
|
+
continue;
|
|
37
|
+
const content = await readFile(join(docsDir, file), 'utf-8');
|
|
38
|
+
const summary = summarize(content);
|
|
39
|
+
const entry = createMemoryEntry({
|
|
40
|
+
type: 'strategic_context',
|
|
41
|
+
content: `[${file}] ${summary}`,
|
|
42
|
+
tags: [`source:${file}`],
|
|
43
|
+
});
|
|
44
|
+
await store.append(entry);
|
|
45
|
+
count++;
|
|
46
|
+
}
|
|
47
|
+
return count;
|
|
48
|
+
}
|
|
49
|
+
/**
|
|
50
|
+
* List available strategic doc filenames.
|
|
51
|
+
*/
|
|
52
|
+
export async function listStrategicDocs(projectRoot) {
|
|
53
|
+
const docsDir = join(projectRoot, '.ralph', 'strategic');
|
|
54
|
+
try {
|
|
55
|
+
const allFiles = await readdir(docsDir);
|
|
56
|
+
return allFiles.filter(f => f.endsWith('.md'));
|
|
57
|
+
}
|
|
58
|
+
catch {
|
|
59
|
+
return [];
|
|
60
|
+
}
|
|
61
|
+
}
|
|
62
|
+
/**
|
|
63
|
+
* Read the full content of a strategic doc.
|
|
64
|
+
*/
|
|
65
|
+
export async function readStrategicDoc(projectRoot, filename) {
|
|
66
|
+
const docsDir = resolve(projectRoot, '.ralph', 'strategic');
|
|
67
|
+
const resolved = resolve(docsDir, filename);
|
|
68
|
+
// Prevent path traversal: resolved path must stay within docsDir
|
|
69
|
+
if (!resolved.startsWith(docsDir + '/'))
|
|
70
|
+
return null;
|
|
71
|
+
try {
|
|
72
|
+
return await readFile(resolved, 'utf-8');
|
|
73
|
+
}
|
|
74
|
+
catch {
|
|
75
|
+
return null;
|
|
76
|
+
}
|
|
77
|
+
}
|
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
import type { MemoryEntry, MemoryType } from './types.js';
|
|
2
|
+
export interface ReadOptions {
|
|
3
|
+
type?: MemoryType;
|
|
4
|
+
limit?: number;
|
|
5
|
+
search?: string;
|
|
6
|
+
}
|
|
7
|
+
export declare class MemoryStore {
|
|
8
|
+
private readonly filePath;
|
|
9
|
+
private lock;
|
|
10
|
+
constructor(dirPath: string);
|
|
11
|
+
private serialize;
|
|
12
|
+
append(entry: MemoryEntry): Promise<void>;
|
|
13
|
+
read(options?: ReadOptions): Promise<MemoryEntry[]>;
|
|
14
|
+
prune(): Promise<number>;
|
|
15
|
+
}
|
|
@@ -0,0 +1,98 @@
|
|
|
1
|
+
import { readFile, writeFile, appendFile, mkdir, rename } from 'node:fs/promises';
|
|
2
|
+
import { existsSync } from 'node:fs';
|
|
3
|
+
import { join, dirname } from 'node:path';
|
|
4
|
+
const MEMORY_FILE = 'memory.jsonl';
|
|
5
|
+
const FILE_MODE = 0o600;
|
|
6
|
+
const PRUNE_AGE_MS = 30 * 24 * 60 * 60 * 1000; // 30 days
|
|
7
|
+
const PERMANENT_TYPES = ['decision', 'strategic_context'];
|
|
8
|
+
export class MemoryStore {
|
|
9
|
+
filePath;
|
|
10
|
+
lock = Promise.resolve();
|
|
11
|
+
constructor(dirPath) {
|
|
12
|
+
this.filePath = join(dirPath, MEMORY_FILE);
|
|
13
|
+
}
|
|
14
|
+
serialize(fn) {
|
|
15
|
+
const p = this.lock.then(fn, fn);
|
|
16
|
+
this.lock = p.then(() => { }, () => { });
|
|
17
|
+
return p;
|
|
18
|
+
}
|
|
19
|
+
async append(entry) {
|
|
20
|
+
return this.serialize(async () => {
|
|
21
|
+
await mkdir(dirname(this.filePath), { recursive: true });
|
|
22
|
+
await appendFile(this.filePath, JSON.stringify(entry) + '\n', { encoding: 'utf-8', mode: FILE_MODE });
|
|
23
|
+
});
|
|
24
|
+
}
|
|
25
|
+
async read(options = {}) {
|
|
26
|
+
return this.serialize(async () => {
|
|
27
|
+
if (!existsSync(this.filePath))
|
|
28
|
+
return [];
|
|
29
|
+
const raw = (await readFile(this.filePath, 'utf-8')).trim();
|
|
30
|
+
if (!raw)
|
|
31
|
+
return [];
|
|
32
|
+
let entries = raw
|
|
33
|
+
.split('\n')
|
|
34
|
+
.filter(line => line.trim())
|
|
35
|
+
.map(line => {
|
|
36
|
+
try {
|
|
37
|
+
return JSON.parse(line);
|
|
38
|
+
}
|
|
39
|
+
catch {
|
|
40
|
+
return null;
|
|
41
|
+
}
|
|
42
|
+
})
|
|
43
|
+
.filter((e) => e !== null);
|
|
44
|
+
if (options.type) {
|
|
45
|
+
entries = entries.filter(e => e.type === options.type);
|
|
46
|
+
}
|
|
47
|
+
if (options.search) {
|
|
48
|
+
const term = options.search.toLowerCase();
|
|
49
|
+
entries = entries.filter(e => e.content.toLowerCase().includes(term));
|
|
50
|
+
}
|
|
51
|
+
// Most recent first for limit
|
|
52
|
+
entries.reverse();
|
|
53
|
+
if (options.limit && options.limit > 0) {
|
|
54
|
+
entries = entries.slice(0, options.limit);
|
|
55
|
+
}
|
|
56
|
+
return entries;
|
|
57
|
+
});
|
|
58
|
+
}
|
|
59
|
+
async prune() {
|
|
60
|
+
return this.serialize(async () => {
|
|
61
|
+
if (!existsSync(this.filePath))
|
|
62
|
+
return 0;
|
|
63
|
+
const raw = (await readFile(this.filePath, 'utf-8')).trim();
|
|
64
|
+
if (!raw)
|
|
65
|
+
return 0;
|
|
66
|
+
const entries = raw
|
|
67
|
+
.split('\n')
|
|
68
|
+
.filter(line => line.trim())
|
|
69
|
+
.map(line => {
|
|
70
|
+
try {
|
|
71
|
+
return JSON.parse(line);
|
|
72
|
+
}
|
|
73
|
+
catch {
|
|
74
|
+
return null;
|
|
75
|
+
}
|
|
76
|
+
})
|
|
77
|
+
.filter((e) => e !== null);
|
|
78
|
+
const now = Date.now();
|
|
79
|
+
const kept = entries.filter(entry => {
|
|
80
|
+
if (PERMANENT_TYPES.includes(entry.type))
|
|
81
|
+
return true;
|
|
82
|
+
const age = now - new Date(entry.timestamp).getTime();
|
|
83
|
+
if (Number.isNaN(age))
|
|
84
|
+
return true; // keep entries with corrupted timestamps
|
|
85
|
+
return age < PRUNE_AGE_MS;
|
|
86
|
+
});
|
|
87
|
+
const pruned = entries.length - kept.length;
|
|
88
|
+
if (pruned > 0) {
|
|
89
|
+
// Atomic write: write to temp file then rename
|
|
90
|
+
const tmpPath = this.filePath + '.tmp';
|
|
91
|
+
const content = kept.map(e => JSON.stringify(e)).join('\n') + '\n';
|
|
92
|
+
await writeFile(tmpPath, content, { encoding: 'utf-8', mode: FILE_MODE });
|
|
93
|
+
await rename(tmpPath, this.filePath);
|
|
94
|
+
}
|
|
95
|
+
return pruned;
|
|
96
|
+
});
|
|
97
|
+
}
|
|
98
|
+
}
|
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
export type MemoryType = 'work_log' | 'project_knowledge' | 'decision' | 'strategic_context';
|
|
2
|
+
export interface MemoryEntry {
|
|
3
|
+
id: string;
|
|
4
|
+
timestamp: string;
|
|
5
|
+
type: MemoryType;
|
|
6
|
+
content: string;
|
|
7
|
+
tags?: string[];
|
|
8
|
+
relatedIssue?: number;
|
|
9
|
+
}
|
|
10
|
+
export interface CreateMemoryInput {
|
|
11
|
+
type: MemoryType;
|
|
12
|
+
content: string;
|
|
13
|
+
tags?: string[];
|
|
14
|
+
relatedIssue?: number;
|
|
15
|
+
}
|
|
16
|
+
export declare function createMemoryEntry(input: CreateMemoryInput): MemoryEntry;
|
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
import { randomBytes } from 'node:crypto';
|
|
2
|
+
function generateId() {
|
|
3
|
+
return randomBytes(8).toString('hex');
|
|
4
|
+
}
|
|
5
|
+
export function createMemoryEntry(input) {
|
|
6
|
+
return {
|
|
7
|
+
id: generateId(),
|
|
8
|
+
timestamp: new Date().toISOString(),
|
|
9
|
+
type: input.type,
|
|
10
|
+
content: input.content,
|
|
11
|
+
...(input.tags && { tags: input.tags }),
|
|
12
|
+
...(input.relatedIssue !== undefined && { relatedIssue: input.relatedIssue }),
|
|
13
|
+
};
|
|
14
|
+
}
|
|
@@ -0,0 +1,7 @@
|
|
|
1
|
+
import { ToolLoopAgent } from 'ai';
|
|
2
|
+
import type { AgentConfig } from './types.js';
|
|
3
|
+
export declare const AGENT_SYSTEM_PROMPT = "You are wiggum's autonomous development agent. You work through the GitHub issue backlog, shipping features one at a time.\n\n## Workflow\n\n1. Read memory to recall previous work and context\n - Use listStrategicDocs to see available project documentation\n - Use readStrategicDoc to read full documents relevant to the current task (architecture, design, implementation plans)\n2. List open issues and cross-reference with memory\n - Consider: PM priority labels (P0 > P1 > P2), dependencies, strategic context\n - **Housekeeping:** If memory says an issue was already completed (outcome \"success\" or \"skipped\") but it's still open:\n 1. Call assessFeatureState with the featureName and issueNumber\n 2. If recommendation is \"pr_merged\" or \"linked_pr_merged\": close it with closeIssue. Reflect with outcome \"skipped\". Does NOT count against maxItems.\n 3. If recommendation is anything else (e.g., \"resume_implementation\", \"start_fresh\", \"resume_pr_phase\"): the issue was NOT actually shipped. Do NOT close it. Instead, prioritize it as your next work item and follow the Feature State Decision Tree. This counts against maxItems.\n - **Retry:** If memory records a previous attempt at an issue with outcome \"failure\" or \"partial\", and it's still open, prioritize it over new issues. Bugs that caused the failure may have been fixed, and existing work (branch, spec, plan) should not be abandoned. Call assessFeatureState to determine the right action \u2014 usually resume_implementation. This counts against maxItems.\n3. For the chosen issue (one NOT already completed):\n a. Read the full issue details\n b. Derive a featureName from the issue title (lowercase, hyphens, no spaces)\n c. **Assess feature state** using assessFeatureState \u2014 MANDATORY before any action\n d. Follow the Feature State Decision Tree based on the recommendation field\n e. Monitor progress with checkLoopStatus and readLoopLog\n f. Report results by commenting on the issue\n\n## Feature State Decision Tree\n\nAfter calling assessFeatureState, follow the recommendation:\n\n| recommendation | action |\n|---|---|\n| start_fresh | generateSpec \u2192 runLoop (fresh) |\n| generate_plan | runLoop without resume (spec exists, needs planning) |\n| resume_implementation | runLoop with resume: true (plan has pending tasks) |\n| resume_pr_phase | runLoop with resume: true (all tasks done, needs PR) |\n| pr_exists_open | Comment on issue, do NOT re-run loop |\n| pr_merged | Verify PR is merged, close issue with closeIssue, reflect with outcome \"skipped\", move on |\n| pr_closed | Decide: restart from scratch or skip |\n| linked_pr_merged | Verify the linked PR is merged, close issue with closeIssue (comment \"shipped via PR #N\"), reflect with outcome \"skipped\", move on |\n| linked_pr_open | Work in progress under a different branch \u2014 comment \"in progress via PR #N\", do NOT re-run loop |\n\n**Critical:**\n- When recommendation is resume_implementation or resume_pr_phase, you MUST pass resume: true to runLoop\n- When recommendation is generate_plan, do NOT pass resume (fresh branch needed)\n- When recommendation is start_fresh, generate a spec first, then run the loop without resume\n- ALWAYS pass issueNumber to assessFeatureState so it can detect work shipped under a different branch name\n- Derive short, stable feature names (2-4 words, kebab-case) from the issue title \u2014 e.g. \"config-module\" not \"config-module-toml-read-write-with-secret-masking\"\n4. After the loop completes (successfully or with failure) \u2014 MANDATORY for EVERY issue, including subsequent ones:\n a. Call readLoopLog to get the actual log content\n b. Call assessFeatureState to check the actual state \u2014 do NOT rely solely on loop log output\n c. **Blocker detection (MANDATORY):** Scan the log for pre-existing test failures (lines like \"All N test failure(s) are pre-existing\"). If found:\n 1. Call listIssues with labels [\"bug\"] to check for existing bug issues covering these failures\n 2. If no existing issue covers them, you MUST call createIssue with title \"Fix N pre-existing test failures\", body listing the failing files, and labels [\"bug\"]. If a \"P0\" label exists on the repo you may add it; if not, just use [\"bug\"].\n 3. Do NOT skip this step just because the loop succeeded \u2014 pre-existing failures degrade CI and must be tracked\n d. Only close the issue if assessFeatureState confirms a PR was merged (recommendation: \"pr_merged\" or \"linked_pr_merged\")\n e. When closing: check off acceptance criteria with checkAllBoxes, then close with closeIssue\n f. If the loop produced code but no PR was created/merged, run the loop again with resume: true to trigger the PR phase\n g. If the loop failed and code exists on the branch without a PR, this is incomplete work \u2014 do NOT close the issue\n h. Steps 4\u20136 are MANDATORY after every runLoop \u2014 including the 2nd, 3rd, etc. issue. Do NOT summarize or stop after runLoop returns. The next tool call must be readLoopLog.\n5. Reflect on the outcome:\n - Call reflectOnWork with structured observations\n - Use outcome \"skipped\" for issues that were already complete (no real work done) \u2014 these do NOT count against maxItems\n - Use outcome \"success\"/\"partial\"/\"failure\" for issues where real work was performed\n - Note what worked, what failed, any patterns discovered\n6. Continue to next issue \u2014 MANDATORY tool call sequence:\n a. Call listIssues (with NO label filter) to get the full backlog\n b. Cross-reference with memory to avoid re-doing completed work\n c. If actionable issues remain and no stop condition is met, immediately call assessFeatureState for the next priority issue \u2014 do NOT generate text\n d. When assessFeatureState returns, follow the Feature State Decision Tree (step 3d) for that issue \u2014 e.g. start_fresh \u2192 generateSpec \u2192 runLoop. This begins a full new work cycle (steps 3\u20136). Do NOT stop after assessFeatureState.\n e. Only produce a text-only response (final summary) when the backlog is empty or a stop condition is met\n f. ANY text without a tool call terminates the session \u2014 there is no \"ask for permission\" step\n\n## Model forwarding\n\nWhen calling generateSpec, ALWAYS forward the model and provider so the spec generation uses the same AI model as this agent session. The values are provided in the Runtime Config section below.\n\nDo NOT forward model/provider to runLoop \u2014 the development loop uses Claude Code internally, which has its own model configuration (opus for planning, sonnet for implementation). Passing a non-Claude model would break the loop.\n\nWhen calling runLoop, pass the reviewMode from the Runtime Config below (if configured). This controls how the loop handles the PR phase:\n- 'manual': stop at PR creation (default)\n- 'auto': create PR + run automated review (no merge)\n- 'merge': create PR + review + merge if approved\n\n## Prioritization\n\nUse hybrid reasoning: respect PM labels (P0 > P1 > P2) but apply your own judgment for ordering within the same priority tier.\n\n**Ordering rules (in priority order):**\n1. PM priority labels: P0 > P1 > P2 > unlabeled\n2. Explicit dependencies: if readIssue returns a `dependsOn` array (parsed from \"depends on #N\" / \"blocked by #N\" in the issue body), complete those issues first\n3. Lower-numbered issues first: within the same priority tier, prefer lower issue numbers \u2014 they are typically more foundational (scaffolding, setup, core infrastructure)\n4. Prefer issues with existing branches: if assessFeatureState shows a branch exists with commits ahead, prefer that issue over one without a branch \u2014 existing branches diverge further from main with every merge, increasing conflict risk\n5. Strategic context from memory and what you learned from previous iterations\n\n## When to stop\n\nStop the loop when:\n- Backlog has no more actionable open issues\n- You've completed the maximum number of items (if configured)\n- A critical failure requires human attention\n- The user has signaled to stop\n\nIMPORTANT: Generating text without tool calls terminates the session immediately. After completing an issue, you MUST call listIssues (step 6) \u2014 never ask \"should I continue?\" or summarize before checking. After assessFeatureState returns for the next issue, you MUST follow the Feature State Decision Tree and call the next tool (e.g. generateSpec for start_fresh). Stopping after assessFeatureState is a bug \u2014 the result tells you what to do next. After runLoop returns, you MUST execute steps 4\u20136 (readLoopLog \u2192 assessFeatureState \u2192 close/comment \u2192 reflectOnWork \u2192 listIssues). Stopping after runLoop is a bug \u2014 there is always post-loop work to do. Your only text-only response is the final summary when ALL issues are processed or a stop condition is met.\n\n## Learning\n\nAfter each issue, always call reflectOnWork. Your memory entries make you progressively better at this specific codebase. Be specific and narrative in what you record. Focus on: what patterns work here, what gotchas exist, which approaches produce better specs and fewer loop iterations.\n\n## Error recovery\n\nIf spec generation fails: retry once with simplified goals. If it fails again, skip the issue and comment explaining why.\nIf a loop fails:\n1. ALWAYS call readLoopLog to get the actual log content\n2. Your issue comment MUST quote or summarize what the log says \u2014 do NOT speculate or guess the cause\n3. Call assessFeatureState to check if a PR was merged despite the loop failure\n4. If assessFeatureState shows \"pr_merged\" or \"linked_pr_merged\" \u2192 close the issue (the work shipped)\n5. If assessFeatureState shows \"resume_pr_phase\" \u2192 the code exists but no PR was created. Run the loop again with resume: true to create and merge the PR. Do NOT close the issue yet.\n6. If the log says \"already complete\" but no PR is merged, the work is stranded on a branch \u2014 resume the loop to ship it\n7. If runLoop returns status \"already_complete\", verify with assessFeatureState before closing\n8. Reflect on what happened, then move to the next issue\nNever close an issue without verifying the code is merged to main. Loop log evidence alone is not sufficient.\n\n## Blocker detection (additional)\n\nBesides the mandatory check in step 4c, also create bug issues for systemic blockers you discover (broken CI, missing infrastructure, flaky tests). Always check with listIssues(labels: [\"bug\"]) before creating to avoid duplicates. After creating blocker issues, continue processing the backlog \u2014 never stop due to blockers alone.";
|
|
4
|
+
export type AgentOrchestrator = ToolLoopAgent<never, any, any>;
|
|
5
|
+
export declare function buildRuntimeConfig(config: AgentConfig): string;
|
|
6
|
+
export declare function buildConstraints(config: AgentConfig): string;
|
|
7
|
+
export declare function createAgentOrchestrator(config: AgentConfig): AgentOrchestrator;
|
|
@@ -0,0 +1,266 @@
|
|
|
1
|
+
import { MemoryStore } from './memory/store.js';
|
|
2
|
+
import { ingestStrategicDocs } from './memory/ingest.js';
|
|
3
|
+
import { createBacklogTools } from './tools/backlog.js';
|
|
4
|
+
import { createMemoryTools, REFLECT_TOOL_NAME } from './tools/memory.js';
|
|
5
|
+
import { createExecutionTools } from './tools/execution.js';
|
|
6
|
+
import { createReportingTools } from './tools/reporting.js';
|
|
7
|
+
import { createIntrospectionTools } from './tools/introspection.js';
|
|
8
|
+
import { createDryRunExecutionTools, createDryRunReportingTools, createDryRunFeatureStateTools } from './tools/dry-run.js';
|
|
9
|
+
import { createFeatureStateTools } from './tools/feature-state.js';
|
|
10
|
+
import { join } from 'node:path';
|
|
11
|
+
import { logger } from '../utils/logger.js';
|
|
12
|
+
import { getTracedAI } from '../utils/tracing.js';
|
|
13
|
+
export const AGENT_SYSTEM_PROMPT = `You are wiggum's autonomous development agent. You work through the GitHub issue backlog, shipping features one at a time.
|
|
14
|
+
|
|
15
|
+
## Workflow
|
|
16
|
+
|
|
17
|
+
1. Read memory to recall previous work and context
|
|
18
|
+
- Use listStrategicDocs to see available project documentation
|
|
19
|
+
- Use readStrategicDoc to read full documents relevant to the current task (architecture, design, implementation plans)
|
|
20
|
+
2. List open issues and cross-reference with memory
|
|
21
|
+
- Consider: PM priority labels (P0 > P1 > P2), dependencies, strategic context
|
|
22
|
+
- **Housekeeping:** If memory says an issue was already completed (outcome "success" or "skipped") but it's still open:
|
|
23
|
+
1. Call assessFeatureState with the featureName and issueNumber
|
|
24
|
+
2. If recommendation is "pr_merged" or "linked_pr_merged": close it with closeIssue. Reflect with outcome "skipped". Does NOT count against maxItems.
|
|
25
|
+
3. If recommendation is anything else (e.g., "resume_implementation", "start_fresh", "resume_pr_phase"): the issue was NOT actually shipped. Do NOT close it. Instead, prioritize it as your next work item and follow the Feature State Decision Tree. This counts against maxItems.
|
|
26
|
+
- **Retry:** If memory records a previous attempt at an issue with outcome "failure" or "partial", and it's still open, prioritize it over new issues. Bugs that caused the failure may have been fixed, and existing work (branch, spec, plan) should not be abandoned. Call assessFeatureState to determine the right action — usually resume_implementation. This counts against maxItems.
|
|
27
|
+
3. For the chosen issue (one NOT already completed):
|
|
28
|
+
a. Read the full issue details
|
|
29
|
+
b. Derive a featureName from the issue title (lowercase, hyphens, no spaces)
|
|
30
|
+
c. **Assess feature state** using assessFeatureState — MANDATORY before any action
|
|
31
|
+
d. Follow the Feature State Decision Tree based on the recommendation field
|
|
32
|
+
e. Monitor progress with checkLoopStatus and readLoopLog
|
|
33
|
+
f. Report results by commenting on the issue
|
|
34
|
+
|
|
35
|
+
## Feature State Decision Tree
|
|
36
|
+
|
|
37
|
+
After calling assessFeatureState, follow the recommendation:
|
|
38
|
+
|
|
39
|
+
| recommendation | action |
|
|
40
|
+
|---|---|
|
|
41
|
+
| start_fresh | generateSpec → runLoop (fresh) |
|
|
42
|
+
| generate_plan | runLoop without resume (spec exists, needs planning) |
|
|
43
|
+
| resume_implementation | runLoop with resume: true (plan has pending tasks) |
|
|
44
|
+
| resume_pr_phase | runLoop with resume: true (all tasks done, needs PR) |
|
|
45
|
+
| pr_exists_open | Comment on issue, do NOT re-run loop |
|
|
46
|
+
| pr_merged | Verify PR is merged, close issue with closeIssue, reflect with outcome "skipped", move on |
|
|
47
|
+
| pr_closed | Decide: restart from scratch or skip |
|
|
48
|
+
| linked_pr_merged | Verify the linked PR is merged, close issue with closeIssue (comment "shipped via PR #N"), reflect with outcome "skipped", move on |
|
|
49
|
+
| linked_pr_open | Work in progress under a different branch — comment "in progress via PR #N", do NOT re-run loop |
|
|
50
|
+
|
|
51
|
+
**Critical:**
|
|
52
|
+
- When recommendation is resume_implementation or resume_pr_phase, you MUST pass resume: true to runLoop
|
|
53
|
+
- When recommendation is generate_plan, do NOT pass resume (fresh branch needed)
|
|
54
|
+
- When recommendation is start_fresh, generate a spec first, then run the loop without resume
|
|
55
|
+
- ALWAYS pass issueNumber to assessFeatureState so it can detect work shipped under a different branch name
|
|
56
|
+
- Derive short, stable feature names (2-4 words, kebab-case) from the issue title — e.g. "config-module" not "config-module-toml-read-write-with-secret-masking"
|
|
57
|
+
4. After the loop completes (successfully or with failure) — MANDATORY for EVERY issue, including subsequent ones:
|
|
58
|
+
a. Call readLoopLog to get the actual log content
|
|
59
|
+
b. Call assessFeatureState to check the actual state — do NOT rely solely on loop log output
|
|
60
|
+
c. **Blocker detection (MANDATORY):** Scan the log for pre-existing test failures (lines like "All N test failure(s) are pre-existing"). If found:
|
|
61
|
+
1. Call listIssues with labels ["bug"] to check for existing bug issues covering these failures
|
|
62
|
+
2. If no existing issue covers them, you MUST call createIssue with title "Fix N pre-existing test failures", body listing the failing files, and labels ["bug"]. If a "P0" label exists on the repo you may add it; if not, just use ["bug"].
|
|
63
|
+
3. Do NOT skip this step just because the loop succeeded — pre-existing failures degrade CI and must be tracked
|
|
64
|
+
d. Only close the issue if assessFeatureState confirms a PR was merged (recommendation: "pr_merged" or "linked_pr_merged")
|
|
65
|
+
e. When closing: check off acceptance criteria with checkAllBoxes, then close with closeIssue
|
|
66
|
+
f. If the loop produced code but no PR was created/merged, run the loop again with resume: true to trigger the PR phase
|
|
67
|
+
g. If the loop failed and code exists on the branch without a PR, this is incomplete work — do NOT close the issue
|
|
68
|
+
h. Steps 4–6 are MANDATORY after every runLoop — including the 2nd, 3rd, etc. issue. Do NOT summarize or stop after runLoop returns. The next tool call must be readLoopLog.
|
|
69
|
+
5. Reflect on the outcome:
|
|
70
|
+
- Call reflectOnWork with structured observations
|
|
71
|
+
- Use outcome "skipped" for issues that were already complete (no real work done) — these do NOT count against maxItems
|
|
72
|
+
- Use outcome "success"/"partial"/"failure" for issues where real work was performed
|
|
73
|
+
- Note what worked, what failed, any patterns discovered
|
|
74
|
+
6. Continue to next issue — MANDATORY tool call sequence:
|
|
75
|
+
a. Call listIssues (with NO label filter) to get the full backlog
|
|
76
|
+
b. Cross-reference with memory to avoid re-doing completed work
|
|
77
|
+
c. If actionable issues remain and no stop condition is met, immediately call assessFeatureState for the next priority issue — do NOT generate text
|
|
78
|
+
d. When assessFeatureState returns, follow the Feature State Decision Tree (step 3d) for that issue — e.g. start_fresh → generateSpec → runLoop. This begins a full new work cycle (steps 3–6). Do NOT stop after assessFeatureState.
|
|
79
|
+
e. Only produce a text-only response (final summary) when the backlog is empty or a stop condition is met
|
|
80
|
+
f. ANY text without a tool call terminates the session — there is no "ask for permission" step
|
|
81
|
+
|
|
82
|
+
## Model forwarding
|
|
83
|
+
|
|
84
|
+
When calling generateSpec, ALWAYS forward the model and provider so the spec generation uses the same AI model as this agent session. The values are provided in the Runtime Config section below.
|
|
85
|
+
|
|
86
|
+
Do NOT forward model/provider to runLoop — the development loop uses Claude Code internally, which has its own model configuration (opus for planning, sonnet for implementation). Passing a non-Claude model would break the loop.
|
|
87
|
+
|
|
88
|
+
When calling runLoop, pass the reviewMode from the Runtime Config below (if configured). This controls how the loop handles the PR phase:
|
|
89
|
+
- 'manual': stop at PR creation (default)
|
|
90
|
+
- 'auto': create PR + run automated review (no merge)
|
|
91
|
+
- 'merge': create PR + review + merge if approved
|
|
92
|
+
|
|
93
|
+
## Prioritization
|
|
94
|
+
|
|
95
|
+
Use hybrid reasoning: respect PM labels (P0 > P1 > P2) but apply your own judgment for ordering within the same priority tier.
|
|
96
|
+
|
|
97
|
+
**Ordering rules (in priority order):**
|
|
98
|
+
1. PM priority labels: P0 > P1 > P2 > unlabeled
|
|
99
|
+
2. Explicit dependencies: if readIssue returns a \`dependsOn\` array (parsed from "depends on #N" / "blocked by #N" in the issue body), complete those issues first
|
|
100
|
+
3. Lower-numbered issues first: within the same priority tier, prefer lower issue numbers — they are typically more foundational (scaffolding, setup, core infrastructure)
|
|
101
|
+
4. Prefer issues with existing branches: if assessFeatureState shows a branch exists with commits ahead, prefer that issue over one without a branch — existing branches diverge further from main with every merge, increasing conflict risk
|
|
102
|
+
5. Strategic context from memory and what you learned from previous iterations
|
|
103
|
+
|
|
104
|
+
## When to stop
|
|
105
|
+
|
|
106
|
+
Stop the loop when:
|
|
107
|
+
- Backlog has no more actionable open issues
|
|
108
|
+
- You've completed the maximum number of items (if configured)
|
|
109
|
+
- A critical failure requires human attention
|
|
110
|
+
- The user has signaled to stop
|
|
111
|
+
|
|
112
|
+
IMPORTANT: Generating text without tool calls terminates the session immediately. After completing an issue, you MUST call listIssues (step 6) — never ask "should I continue?" or summarize before checking. After assessFeatureState returns for the next issue, you MUST follow the Feature State Decision Tree and call the next tool (e.g. generateSpec for start_fresh). Stopping after assessFeatureState is a bug — the result tells you what to do next. After runLoop returns, you MUST execute steps 4–6 (readLoopLog → assessFeatureState → close/comment → reflectOnWork → listIssues). Stopping after runLoop is a bug — there is always post-loop work to do. Your only text-only response is the final summary when ALL issues are processed or a stop condition is met.
|
|
113
|
+
|
|
114
|
+
## Learning
|
|
115
|
+
|
|
116
|
+
After each issue, always call reflectOnWork. Your memory entries make you progressively better at this specific codebase. Be specific and narrative in what you record. Focus on: what patterns work here, what gotchas exist, which approaches produce better specs and fewer loop iterations.
|
|
117
|
+
|
|
118
|
+
## Error recovery
|
|
119
|
+
|
|
120
|
+
If spec generation fails: retry once with simplified goals. If it fails again, skip the issue and comment explaining why.
|
|
121
|
+
If a loop fails:
|
|
122
|
+
1. ALWAYS call readLoopLog to get the actual log content
|
|
123
|
+
2. Your issue comment MUST quote or summarize what the log says — do NOT speculate or guess the cause
|
|
124
|
+
3. Call assessFeatureState to check if a PR was merged despite the loop failure
|
|
125
|
+
4. If assessFeatureState shows "pr_merged" or "linked_pr_merged" → close the issue (the work shipped)
|
|
126
|
+
5. If assessFeatureState shows "resume_pr_phase" → the code exists but no PR was created. Run the loop again with resume: true to create and merge the PR. Do NOT close the issue yet.
|
|
127
|
+
6. If the log says "already complete" but no PR is merged, the work is stranded on a branch — resume the loop to ship it
|
|
128
|
+
7. If runLoop returns status "already_complete", verify with assessFeatureState before closing
|
|
129
|
+
8. Reflect on what happened, then move to the next issue
|
|
130
|
+
Never close an issue without verifying the code is merged to main. Loop log evidence alone is not sufficient.
|
|
131
|
+
|
|
132
|
+
## Blocker detection (additional)
|
|
133
|
+
|
|
134
|
+
Besides the mandatory check in step 4c, also create bug issues for systemic blockers you discover (broken CI, missing infrastructure, flaky tests). Always check with listIssues(labels: ["bug"]) before creating to avoid duplicates. After creating blocker issues, continue processing the backlog — never stop due to blockers alone.`;
|
|
135
|
+
export function buildRuntimeConfig(config) {
|
|
136
|
+
const lines = [];
|
|
137
|
+
if (config.modelId)
|
|
138
|
+
lines.push(`- model: ${config.modelId}`);
|
|
139
|
+
if (config.provider)
|
|
140
|
+
lines.push(`- provider: ${config.provider}`);
|
|
141
|
+
if (config.reviewMode)
|
|
142
|
+
lines.push(`- reviewMode: ${config.reviewMode}`);
|
|
143
|
+
return lines.length > 0
|
|
144
|
+
? `\n\n## Runtime Config\n\n${lines.join('\n')}`
|
|
145
|
+
: '';
|
|
146
|
+
}
|
|
147
|
+
export function buildConstraints(config) {
|
|
148
|
+
const lines = [];
|
|
149
|
+
if (config.maxItems != null) {
|
|
150
|
+
lines.push(`- You MUST stop after completing ${config.maxItems} issue(s). Call reflectOnWork for each, then stop.`);
|
|
151
|
+
}
|
|
152
|
+
if (config.labels?.length) {
|
|
153
|
+
lines.push(`- Only work on issues with these labels: ${config.labels.join(', ')}. Ignore all others.`);
|
|
154
|
+
}
|
|
155
|
+
if (config.dryRun) {
|
|
156
|
+
lines.push('- DRY RUN MODE: Plan what you would do but do NOT execute. Execution and reporting tools return simulated results.');
|
|
157
|
+
}
|
|
158
|
+
return lines.length > 0
|
|
159
|
+
? `\n\n## Constraints\n\n${lines.join('\n')}`
|
|
160
|
+
: '';
|
|
161
|
+
}
|
|
162
|
+
export function createAgentOrchestrator(config) {
|
|
163
|
+
const { model, projectRoot, owner, repo } = config;
|
|
164
|
+
const memoryDir = join(projectRoot, '.ralph', 'agent');
|
|
165
|
+
const store = new MemoryStore(memoryDir);
|
|
166
|
+
const backlog = createBacklogTools(owner, repo, {
|
|
167
|
+
defaultLabels: config.labels,
|
|
168
|
+
});
|
|
169
|
+
const memory = createMemoryTools(store, projectRoot);
|
|
170
|
+
const execution = config.dryRun
|
|
171
|
+
? createDryRunExecutionTools()
|
|
172
|
+
: createExecutionTools(projectRoot, { onProgress: config.onProgress });
|
|
173
|
+
const reporting = config.dryRun
|
|
174
|
+
? createDryRunReportingTools()
|
|
175
|
+
: createReportingTools(owner, repo);
|
|
176
|
+
const introspection = createIntrospectionTools(projectRoot);
|
|
177
|
+
const featureState = config.dryRun
|
|
178
|
+
? createDryRunFeatureStateTools()
|
|
179
|
+
: createFeatureStateTools(projectRoot);
|
|
180
|
+
const tools = {
|
|
181
|
+
...backlog,
|
|
182
|
+
...memory,
|
|
183
|
+
...execution,
|
|
184
|
+
...reporting,
|
|
185
|
+
...introspection,
|
|
186
|
+
...featureState,
|
|
187
|
+
};
|
|
188
|
+
const constraints = buildConstraints(config);
|
|
189
|
+
const runtimeConfig = buildRuntimeConfig(config);
|
|
190
|
+
const fullPrompt = AGENT_SYSTEM_PROMPT + runtimeConfig + constraints;
|
|
191
|
+
const completedIssues = new Set();
|
|
192
|
+
const maxSteps = config.maxSteps ?? 200;
|
|
193
|
+
// Use traced ToolLoopAgent so Braintrust automatically captures
|
|
194
|
+
// all LLM calls, tool executions, and agent steps.
|
|
195
|
+
const { ToolLoopAgent: TracedToolLoopAgent } = getTracedAI();
|
|
196
|
+
return new TracedToolLoopAgent({
|
|
197
|
+
model,
|
|
198
|
+
instructions: fullPrompt,
|
|
199
|
+
tools,
|
|
200
|
+
experimental_telemetry: {
|
|
201
|
+
isEnabled: true,
|
|
202
|
+
functionId: 'agent-orchestrator',
|
|
203
|
+
metadata: { owner, repo, dryRun: String(config.dryRun ?? false) },
|
|
204
|
+
},
|
|
205
|
+
stopWhen: ({ steps }) => {
|
|
206
|
+
if (steps.length >= maxSteps)
|
|
207
|
+
return true;
|
|
208
|
+
if (config.maxItems != null && completedIssues.size >= config.maxItems)
|
|
209
|
+
return true;
|
|
210
|
+
return false;
|
|
211
|
+
},
|
|
212
|
+
prepareStep: async ({ steps }) => {
|
|
213
|
+
try {
|
|
214
|
+
if (steps.length === 0) {
|
|
215
|
+
await ingestStrategicDocs(projectRoot, store);
|
|
216
|
+
await store.prune();
|
|
217
|
+
}
|
|
218
|
+
const all = await store.read({ limit: 50 });
|
|
219
|
+
const recentLogs = all.filter(e => e.type === 'work_log').slice(0, 5);
|
|
220
|
+
const knowledge = all.filter(e => e.type === 'project_knowledge').slice(0, 3);
|
|
221
|
+
const decisions = all.filter(e => e.type === 'decision').slice(0, 2);
|
|
222
|
+
// Strategic docs are injected as lightweight catalog entries (filename + summary).
|
|
223
|
+
// The agent reads full content on-demand via readStrategicDoc tool.
|
|
224
|
+
const strategic = all.filter(e => e.type === 'strategic_context');
|
|
225
|
+
const memoryContext = [
|
|
226
|
+
...recentLogs.map(e => `[work] ${e.content}`),
|
|
227
|
+
...knowledge.map(e => `[knowledge] ${e.content}`),
|
|
228
|
+
...decisions.map(e => `[decision] ${e.content}`),
|
|
229
|
+
...strategic.map(e => `[strategic-doc] ${e.content}`),
|
|
230
|
+
].join('\n');
|
|
231
|
+
if (!memoryContext)
|
|
232
|
+
return undefined;
|
|
233
|
+
return {
|
|
234
|
+
system: [
|
|
235
|
+
fullPrompt,
|
|
236
|
+
`## Current Memory\n\n${memoryContext}`,
|
|
237
|
+
].join('\n\n'),
|
|
238
|
+
};
|
|
239
|
+
}
|
|
240
|
+
catch (err) {
|
|
241
|
+
logger.warn(`prepareStep failed, continuing without memory: ${err instanceof Error ? err.message : String(err)}`);
|
|
242
|
+
return undefined;
|
|
243
|
+
}
|
|
244
|
+
},
|
|
245
|
+
onStepFinish: async ({ toolCalls, toolResults }) => {
|
|
246
|
+
try {
|
|
247
|
+
for (const tc of toolCalls) {
|
|
248
|
+
if (tc.toolName === REFLECT_TOOL_NAME) {
|
|
249
|
+
const input = tc.input;
|
|
250
|
+
if (input.issueNumber != null && input.outcome !== 'skipped') {
|
|
251
|
+
completedIssues.add(input.issueNumber);
|
|
252
|
+
}
|
|
253
|
+
}
|
|
254
|
+
}
|
|
255
|
+
config.onStepUpdate?.({
|
|
256
|
+
toolCalls: toolCalls.map((tc) => ({ toolName: tc.toolName, args: tc.input })),
|
|
257
|
+
toolResults: toolResults.map((tr) => ({ toolName: tr.toolName, result: tr.output })),
|
|
258
|
+
completedItems: completedIssues.size,
|
|
259
|
+
});
|
|
260
|
+
}
|
|
261
|
+
catch (err) {
|
|
262
|
+
logger.warn(`onStepFinish failed: ${err instanceof Error ? err.message : String(err)}`);
|
|
263
|
+
}
|
|
264
|
+
},
|
|
265
|
+
});
|
|
266
|
+
}
|
|
@@ -0,0 +1,26 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Shared agent environment resolution
|
|
3
|
+
*
|
|
4
|
+
* Extracts provider, model, and GitHub remote detection into a reusable
|
|
5
|
+
* function that **throws** on error (no process.exit). Used by both the
|
|
6
|
+
* headless agent command and the TUI AgentScreen.
|
|
7
|
+
*/
|
|
8
|
+
import type { LanguageModel } from 'ai';
|
|
9
|
+
import type { AIProvider } from '../ai/providers.js';
|
|
10
|
+
import type { ReviewMode } from './types.js';
|
|
11
|
+
export interface ResolvedAgentEnv {
|
|
12
|
+
provider: AIProvider;
|
|
13
|
+
model: LanguageModel;
|
|
14
|
+
modelId: string | undefined;
|
|
15
|
+
owner: string;
|
|
16
|
+
repo: string;
|
|
17
|
+
projectRoot: string;
|
|
18
|
+
reviewMode?: ReviewMode;
|
|
19
|
+
}
|
|
20
|
+
/**
|
|
21
|
+
* Resolve provider, model, and GitHub remote for agent execution.
|
|
22
|
+
* Throws descriptive errors instead of calling process.exit.
|
|
23
|
+
*/
|
|
24
|
+
export declare function resolveAgentEnv(projectRoot: string, options?: {
|
|
25
|
+
model?: string;
|
|
26
|
+
}): Promise<ResolvedAgentEnv>;
|
|
@@ -0,0 +1,43 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Shared agent environment resolution
|
|
3
|
+
*
|
|
4
|
+
* Extracts provider, model, and GitHub remote detection into a reusable
|
|
5
|
+
* function that **throws** on error (no process.exit). Used by both the
|
|
6
|
+
* headless agent command and the TUI AgentScreen.
|
|
7
|
+
*/
|
|
8
|
+
import { getAvailableProvider, getModel, } from '../ai/providers.js';
|
|
9
|
+
import { detectGitHubRemote } from '../utils/github.js';
|
|
10
|
+
import { loadConfigWithDefaults } from '../utils/config.js';
|
|
11
|
+
const VALID_PROVIDERS = new Set(['anthropic', 'openai', 'openrouter']);
|
|
12
|
+
/**
|
|
13
|
+
* Resolve provider, model, and GitHub remote for agent execution.
|
|
14
|
+
* Throws descriptive errors instead of calling process.exit.
|
|
15
|
+
*/
|
|
16
|
+
export async function resolveAgentEnv(projectRoot, options) {
|
|
17
|
+
// 1. Resolve provider (config > env detection)
|
|
18
|
+
const ralphConfig = await loadConfigWithDefaults(projectRoot);
|
|
19
|
+
const configProvider = ralphConfig.agent.defaultProvider;
|
|
20
|
+
const validConfigProvider = VALID_PROVIDERS.has(configProvider)
|
|
21
|
+
? configProvider
|
|
22
|
+
: null;
|
|
23
|
+
const provider = validConfigProvider || getAvailableProvider();
|
|
24
|
+
if (!provider) {
|
|
25
|
+
throw new Error('No AI provider configured. Run `wiggum init` or set ANTHROPIC_API_KEY, OPENAI_API_KEY, or OPENROUTER_API_KEY.');
|
|
26
|
+
}
|
|
27
|
+
// 2. Detect GitHub remote
|
|
28
|
+
const remote = await detectGitHubRemote(projectRoot);
|
|
29
|
+
if (!remote) {
|
|
30
|
+
throw new Error('No GitHub remote detected. Run from a repo with a GitHub origin.');
|
|
31
|
+
}
|
|
32
|
+
// 3. Resolve model (CLI flag > config > provider default)
|
|
33
|
+
const modelId = options?.model || ralphConfig.agent.defaultModel || undefined;
|
|
34
|
+
const { model } = getModel(provider, modelId);
|
|
35
|
+
return {
|
|
36
|
+
provider,
|
|
37
|
+
model,
|
|
38
|
+
modelId,
|
|
39
|
+
owner: remote.owner,
|
|
40
|
+
repo: remote.repo,
|
|
41
|
+
projectRoot,
|
|
42
|
+
};
|
|
43
|
+
}
|
|
@@ -0,0 +1,27 @@
|
|
|
1
|
+
export interface BacklogToolsOptions {
|
|
2
|
+
defaultLabels?: string[];
|
|
3
|
+
}
|
|
4
|
+
export declare function createBacklogTools(owner: string, repo: string, options?: BacklogToolsOptions): {
|
|
5
|
+
listIssues: import("ai").Tool<{
|
|
6
|
+
limit: number;
|
|
7
|
+
labels?: string[] | undefined;
|
|
8
|
+
milestone?: string | undefined;
|
|
9
|
+
}, {
|
|
10
|
+
issues: never[];
|
|
11
|
+
error: string;
|
|
12
|
+
} | {
|
|
13
|
+
issues: import("../../utils/github.js").GitHubIssueListItem[];
|
|
14
|
+
error?: undefined;
|
|
15
|
+
}>;
|
|
16
|
+
readIssue: import("ai").Tool<{
|
|
17
|
+
issueNumber: number;
|
|
18
|
+
}, {
|
|
19
|
+
error: string;
|
|
20
|
+
} | {
|
|
21
|
+
dependsOn: number[];
|
|
22
|
+
title: string;
|
|
23
|
+
body: string;
|
|
24
|
+
labels: string[];
|
|
25
|
+
error?: undefined;
|
|
26
|
+
}>;
|
|
27
|
+
};
|