error-trace-debugger 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +22 -0
- package/README.md +96 -0
- package/bin/error-trace-debugger +4 -0
- package/package.json +33 -0
- package/src/adapters/llm_client_langchain.js +38 -0
- package/src/agents/agent_graph.js +228 -0
- package/src/agents/deep_agent_v2.js +74 -0
- package/src/cli/deep_agent.js +67 -0
- package/src/cli/run_analyze_command.js +85 -0
- package/src/cli/stdin_logs.js +44 -0
- package/src/core/Orchestrator.js +190 -0
- package/src/core/report_writer.js +161 -0
- package/src/core/state.js +33 -0
- package/src/core/write_text_file.js +8 -0
- package/src/tools/CodeSearchTool.js +170 -0
- package/src/tools/GitDiffTool.js +115 -0
- package/src/tools/LogAnalyzerTool.js +148 -0
- package/src/tools/PatchProposerTool.js +110 -0
- package/src/tools/ReadFileTool.js +98 -0
- package/src/tools/TestRunnerTool.js +98 -0
- package/src/tools/create_default_tools.js +14 -0
- package/src/util/redact_secrets.js +16 -0
package/LICENSE
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2026
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|
|
22
|
+
|
package/README.md
ADDED
|
@@ -0,0 +1,96 @@
|
|
|
1
|
+
# error-trace-debugger
|
|
2
|
+
|
|
3
|
+
CLI agent that ingests logs/stack traces, investigates a codebase, and outputs a report with a
|
|
4
|
+
suggested fix and (when possible) a unified patch.
|
|
5
|
+
|
|
6
|
+
## Install
|
|
7
|
+
|
|
8
|
+
```bash
|
|
9
|
+
npm install -g error-trace-debugger
|
|
10
|
+
```
|
|
11
|
+
|
|
12
|
+
Or run without installing:
|
|
13
|
+
|
|
14
|
+
```bash
|
|
15
|
+
npx error-trace-debugger analyze --help
|
|
16
|
+
```
|
|
17
|
+
|
|
18
|
+
## Usage
|
|
19
|
+
|
|
20
|
+
### Analyze a logs file
|
|
21
|
+
|
|
22
|
+
```bash
|
|
23
|
+
error-trace-debugger analyze --logs ./error.log
|
|
24
|
+
```
|
|
25
|
+
|
|
26
|
+
### Analyze an inline stack trace
|
|
27
|
+
|
|
28
|
+
```bash
|
|
29
|
+
error-trace-debugger analyze --stack "TypeError: boom\n at run (/repo/src/index.js:10:5)"
|
|
30
|
+
```
|
|
31
|
+
|
|
32
|
+
### Pipe logs over stdin
|
|
33
|
+
|
|
34
|
+
```bash
|
|
35
|
+
cat ./error.log | error-trace-debugger analyze --stdin
|
|
36
|
+
```
|
|
37
|
+
|
|
38
|
+
### Target a specific repo
|
|
39
|
+
|
|
40
|
+
```bash
|
|
41
|
+
error-trace-debugger analyze --logs ./error.log --repo ../my-repo
|
|
42
|
+
```
|
|
43
|
+
|
|
44
|
+
## Scope (single repo)
|
|
45
|
+
|
|
46
|
+
`error-trace-debugger` analyzes **one repo/workspace at a time**.
|
|
47
|
+
By default it uses the current working directory; you can override with `--repo <path>`.
|
|
48
|
+
|
|
49
|
+
### Output formats
|
|
50
|
+
|
|
51
|
+
```bash
|
|
52
|
+
error-trace-debugger analyze --logs ./error.log --format md
|
|
53
|
+
error-trace-debugger analyze --logs ./error.log --format text
|
|
54
|
+
error-trace-debugger analyze --logs ./error.log --format json
|
|
55
|
+
```
|
|
56
|
+
|
|
57
|
+
If you keep the default `--format md` and do not pass `--out`, the CLI writes the full
|
|
58
|
+
Markdown report to `error-trace-debugger-report.md` in the target repo root, and the terminal shows
|
|
59
|
+
only a short “check out the file” message.
|
|
60
|
+
|
|
61
|
+
### Write report + patch to files
|
|
62
|
+
|
|
63
|
+
```bash
|
|
64
|
+
error-trace-debugger analyze --logs ./error.log --out ./report.md --patch-out ./fix.diff
|
|
65
|
+
git apply ./fix.diff
|
|
66
|
+
```
|
|
67
|
+
|
|
68
|
+
### Run tests (optional)
|
|
69
|
+
|
|
70
|
+
```bash
|
|
71
|
+
error-trace-debugger analyze --logs ./error.log --run-tests
|
|
72
|
+
error-trace-debugger analyze --logs ./error.log --run-tests --test-command "npm test"
|
|
73
|
+
```
|
|
74
|
+
|
|
75
|
+
### Engine: v1 (default) vs v2 (LangGraph)
|
|
76
|
+
|
|
77
|
+
- **v1** (default): heuristic loop (parse → search → diff → optional tests). No LLM.
|
|
78
|
+
- **v2**: LangGraph state machine with the same tools plus LLM-backed patch proposal. Use `--engine v2` and set `OPENAI_API_KEY` (or `--api-key-env VAR`). Optional: `--provider openai`, `--model <name>`, `--max-tool-calls`, `--max-files-read`, `--max-bytes-read`.
|
|
79
|
+
|
|
80
|
+
```bash
|
|
81
|
+
error-trace-debugger analyze --engine v2 --logs ./error.log --repo .
|
|
82
|
+
```
|
|
83
|
+
|
|
84
|
+
## What it does (v1)
|
|
85
|
+
- **Log analyzer**: parses common Node/Jest stack traces into structured frames.
|
|
86
|
+
- **Code search**: uses ripgrep if available (otherwise scans within safe budgets).
|
|
87
|
+
- **Diff generator**: emits a unified patch when it has concrete edits to apply.
|
|
88
|
+
If no edits are produced, the `Patch` section may be empty.
|
|
89
|
+
- **Test runner**: runs tests (optional) and summarizes failing output.
|
|
90
|
+
|
|
91
|
+
The orchestrator iteratively:
|
|
92
|
+
`logs → hypothesis → code search → patch proposal → (optional) tests`.
|
|
93
|
+
|
|
94
|
+
## Notes
|
|
95
|
+
- v1 focuses on investigation + reporting. Patch generation is supported but requires a fix plan with edits (a future version can add an LLM-backed patch proposer).
|
|
96
|
+
- PR creation is intentionally out of scope for v1.
|
package/package.json
ADDED
|
@@ -0,0 +1,33 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "error-trace-debugger",
|
|
3
|
+
"version": "0.1.0",
|
|
4
|
+
"description": "CLI agent that analyzes logs, investigates a repo, suggests fixes, and outputs a patch.",
|
|
5
|
+
"type": "module",
|
|
6
|
+
"bin": {
|
|
7
|
+
"error-trace-debugger": "bin/error-trace-debugger"
|
|
8
|
+
},
|
|
9
|
+
"files": [
|
|
10
|
+
"bin/",
|
|
11
|
+
"src/",
|
|
12
|
+
"README.md",
|
|
13
|
+
"LICENSE"
|
|
14
|
+
],
|
|
15
|
+
"engines": {
|
|
16
|
+
"node": ">=18.0.0"
|
|
17
|
+
},
|
|
18
|
+
"scripts": {
|
|
19
|
+
"test": "node --test",
|
|
20
|
+
"lint": "node ./src/cli/deep_agent.js --help"
|
|
21
|
+
},
|
|
22
|
+
"dependencies": {
|
|
23
|
+
"@langchain/core": "^1.1.34",
|
|
24
|
+
"@langchain/langgraph": "^0.2.74",
|
|
25
|
+
"@langchain/openai": "^0.3.5",
|
|
26
|
+
"commander": "^14.0.0",
|
|
27
|
+
"diff": "^8.0.0",
|
|
28
|
+
"execa": "^9.6.0",
|
|
29
|
+
"fast-glob": "^3.3.3",
|
|
30
|
+
"ignore": "^7.0.5"
|
|
31
|
+
},
|
|
32
|
+
"license": "MIT"
|
|
33
|
+
}
|
|
@@ -0,0 +1,38 @@
|
|
|
1
|
+
import { ChatOpenAI } from "@langchain/openai";
|
|
2
|
+
|
|
3
|
+
const DEFAULT_PROVIDER = "openai";
|
|
4
|
+
const DEFAULT_MODEL = "gpt-4.1-mini";
|
|
5
|
+
|
|
6
|
+
export function createLlmClient({
|
|
7
|
+
provider = DEFAULT_PROVIDER,
|
|
8
|
+
model = DEFAULT_MODEL,
|
|
9
|
+
apiKeyEnv
|
|
10
|
+
}) {
|
|
11
|
+
const provider_name = provider || DEFAULT_PROVIDER;
|
|
12
|
+
if (provider_name !== "openai") {
|
|
13
|
+
throw new Error(`Unsupported provider for v2 engine: ${provider_name}`);
|
|
14
|
+
}
|
|
15
|
+
|
|
16
|
+
const env_var_name = apiKeyEnv || "OPENAI_API_KEY";
|
|
17
|
+
const api_key = process.env[env_var_name];
|
|
18
|
+
if (!api_key) {
|
|
19
|
+
throw new Error(
|
|
20
|
+
`Missing API key: set ${env_var_name} in the environment to use --engine v2.`
|
|
21
|
+
);
|
|
22
|
+
}
|
|
23
|
+
|
|
24
|
+
const chat_model = new ChatOpenAI({
|
|
25
|
+
apiKey: api_key,
|
|
26
|
+
model: model || DEFAULT_MODEL,
|
|
27
|
+
temperature: 0
|
|
28
|
+
});
|
|
29
|
+
|
|
30
|
+
return {
|
|
31
|
+
async generateJson(prompt) {
|
|
32
|
+
const response = await chat_model.invoke(prompt);
|
|
33
|
+
const text = response.content?.[0]?.text ?? String(response.content ?? "");
|
|
34
|
+
return text;
|
|
35
|
+
}
|
|
36
|
+
};
|
|
37
|
+
}
|
|
38
|
+
|
|
@@ -0,0 +1,228 @@
|
|
|
1
|
+
import { StateGraph, Annotation, START, END } from "@langchain/langgraph";
|
|
2
|
+
|
|
3
|
+
const investigationStepsReducer = (left, right) => {
|
|
4
|
+
const items = Array.isArray(right) ? right : right ? [right] : [];
|
|
5
|
+
return (left || []).concat(items);
|
|
6
|
+
};
|
|
7
|
+
|
|
8
|
+
export const DeepAgentState = Annotation.Root({
|
|
9
|
+
repo_path: Annotation(),
|
|
10
|
+
raw_logs: Annotation(),
|
|
11
|
+
log_artifact: Annotation(),
|
|
12
|
+
search_matches: Annotation(),
|
|
13
|
+
file_contexts: Annotation(),
|
|
14
|
+
hypothesis: Annotation(),
|
|
15
|
+
fix_plan: Annotation(),
|
|
16
|
+
patch_text: Annotation(),
|
|
17
|
+
validation_result: Annotation(),
|
|
18
|
+
iteration_index: Annotation(),
|
|
19
|
+
investigation_steps: Annotation({
|
|
20
|
+
reducer: investigationStepsReducer,
|
|
21
|
+
default: () => []
|
|
22
|
+
}),
|
|
23
|
+
run_tests: Annotation(),
|
|
24
|
+
test_command: Annotation(),
|
|
25
|
+
max_iterations: Annotation(),
|
|
26
|
+
budget_ms: Annotation(),
|
|
27
|
+
started_at_ms: Annotation(),
|
|
28
|
+
should_iterate: Annotation()
|
|
29
|
+
});
|
|
30
|
+
|
|
31
|
+
export function createAgentGraph({ tools, options }) {
|
|
32
|
+
const {
|
|
33
|
+
max_iterations = 5,
|
|
34
|
+
budget_ms,
|
|
35
|
+
run_tests = false,
|
|
36
|
+
test_command,
|
|
37
|
+
max_files_read = 32
|
|
38
|
+
} = options || {};
|
|
39
|
+
|
|
40
|
+
async function parseLogs(state) {
|
|
41
|
+
const log_artifact = await tools.log_analyzer.run({
|
|
42
|
+
raw_logs: state.raw_logs,
|
|
43
|
+
repo_path: state.repo_path
|
|
44
|
+
});
|
|
45
|
+
return {
|
|
46
|
+
log_artifact,
|
|
47
|
+
investigation_steps: [
|
|
48
|
+
{ tool_name: "log_analyzer", summary: "Parsed logs.", output: log_artifact }
|
|
49
|
+
]
|
|
50
|
+
};
|
|
51
|
+
}
|
|
52
|
+
|
|
53
|
+
function buildSearchQueries(log_artifact) {
|
|
54
|
+
const queries = new Set();
|
|
55
|
+
if (log_artifact?.error_name) queries.add(log_artifact.error_name);
|
|
56
|
+
if (log_artifact?.error_message) {
|
|
57
|
+
String(log_artifact.error_message)
|
|
58
|
+
.split(/[^a-zA-Z0-9_./-]+/g)
|
|
59
|
+
.map((t) => t.trim())
|
|
60
|
+
.filter(Boolean)
|
|
61
|
+
.slice(0, 20)
|
|
62
|
+
.forEach((t) => queries.add(t));
|
|
63
|
+
}
|
|
64
|
+
(log_artifact?.frames || []).slice(0, 10).forEach((frame) => {
|
|
65
|
+
if (frame.function_name) queries.add(frame.function_name);
|
|
66
|
+
if (frame.file_path) {
|
|
67
|
+
const basename = frame.file_path.split("/").pop();
|
|
68
|
+
if (basename) queries.add(basename);
|
|
69
|
+
}
|
|
70
|
+
});
|
|
71
|
+
return [...queries].filter((q) => q && q.trim().length >= 3).slice(0, 25);
|
|
72
|
+
}
|
|
73
|
+
|
|
74
|
+
async function searchCode(state) {
|
|
75
|
+
const queries = buildSearchQueries(state.log_artifact);
|
|
76
|
+
const search_result = await tools.code_search.run({
|
|
77
|
+
repo_path: state.repo_path,
|
|
78
|
+
queries
|
|
79
|
+
});
|
|
80
|
+
return {
|
|
81
|
+
search_matches: search_result.matches || [],
|
|
82
|
+
investigation_steps: [
|
|
83
|
+
{
|
|
84
|
+
tool_name: "code_search",
|
|
85
|
+
summary: "Searched codebase.",
|
|
86
|
+
output: { queries, match_count: (search_result.matches || []).length }
|
|
87
|
+
}
|
|
88
|
+
]
|
|
89
|
+
};
|
|
90
|
+
}
|
|
91
|
+
|
|
92
|
+
function dedupeFilePaths(paths) {
|
|
93
|
+
const seen = new Set();
|
|
94
|
+
return (paths || []).filter((p) => p && !seen.has(p) && seen.add(p));
|
|
95
|
+
}
|
|
96
|
+
|
|
97
|
+
async function readFiles(state) {
|
|
98
|
+
const paths = dedupeFilePaths(
|
|
99
|
+
(state.search_matches || []).map((m) => m.file_path)
|
|
100
|
+
).slice(0, max_files_read);
|
|
101
|
+
const file_contexts = [];
|
|
102
|
+
for (const file_path of paths) {
|
|
103
|
+
const result = await tools.read_file.run({
|
|
104
|
+
file_path,
|
|
105
|
+
start_line: 1,
|
|
106
|
+
end_line: 200
|
|
107
|
+
});
|
|
108
|
+
if (result.ok) file_contexts.push(result);
|
|
109
|
+
}
|
|
110
|
+
return {
|
|
111
|
+
file_contexts,
|
|
112
|
+
investigation_steps: [
|
|
113
|
+
{
|
|
114
|
+
tool_name: "read_file",
|
|
115
|
+
summary: "Read relevant files.",
|
|
116
|
+
output: { file_count: file_contexts.length }
|
|
117
|
+
}
|
|
118
|
+
]
|
|
119
|
+
};
|
|
120
|
+
}
|
|
121
|
+
|
|
122
|
+
async function proposePatch(state) {
|
|
123
|
+
const fix_plan = await tools.patch_proposer.run({
|
|
124
|
+
log_artifact: state.log_artifact,
|
|
125
|
+
search_matches: state.search_matches || [],
|
|
126
|
+
file_contexts: state.file_contexts || []
|
|
127
|
+
});
|
|
128
|
+
const hypothesis = {
|
|
129
|
+
claim: fix_plan.summary || "LLM proposed fix.",
|
|
130
|
+
confidence: 0.7,
|
|
131
|
+
predicted_locations: (fix_plan.citations || []).slice(0, 5)
|
|
132
|
+
};
|
|
133
|
+
return {
|
|
134
|
+
fix_plan,
|
|
135
|
+
hypothesis,
|
|
136
|
+
investigation_steps: [
|
|
137
|
+
{
|
|
138
|
+
tool_name: "patch_proposer",
|
|
139
|
+
summary: "LLM proposed fix plan.",
|
|
140
|
+
output: { edit_count: (fix_plan.edits || []).length }
|
|
141
|
+
}
|
|
142
|
+
]
|
|
143
|
+
};
|
|
144
|
+
}
|
|
145
|
+
|
|
146
|
+
async function generateDiff(state) {
|
|
147
|
+
const diff_result = await tools.diff_generator.run({
|
|
148
|
+
repo_path: state.repo_path,
|
|
149
|
+
log_artifact: state.log_artifact,
|
|
150
|
+
hypothesis: state.hypothesis,
|
|
151
|
+
search_matches: state.search_matches || [],
|
|
152
|
+
fix_plan_overrides: state.fix_plan
|
|
153
|
+
});
|
|
154
|
+
return {
|
|
155
|
+
patch_text: diff_result.patch_text || "",
|
|
156
|
+
investigation_steps: [
|
|
157
|
+
{
|
|
158
|
+
tool_name: "diff_generator",
|
|
159
|
+
summary: "Generated patch.",
|
|
160
|
+
output: { has_patch: Boolean(diff_result.patch_text) }
|
|
161
|
+
}
|
|
162
|
+
]
|
|
163
|
+
};
|
|
164
|
+
}
|
|
165
|
+
|
|
166
|
+
async function runTests(state) {
|
|
167
|
+
const validation_result = await tools.test_runner.run({
|
|
168
|
+
repo_path: state.repo_path,
|
|
169
|
+
test_command: state.test_command
|
|
170
|
+
});
|
|
171
|
+
return {
|
|
172
|
+
validation_result,
|
|
173
|
+
investigation_steps: [
|
|
174
|
+
{
|
|
175
|
+
tool_name: "test_runner",
|
|
176
|
+
summary: "Ran tests.",
|
|
177
|
+
output: {
|
|
178
|
+
ok: validation_result.ok,
|
|
179
|
+
exit_code: validation_result.exit_code
|
|
180
|
+
}
|
|
181
|
+
}
|
|
182
|
+
]
|
|
183
|
+
};
|
|
184
|
+
}
|
|
185
|
+
|
|
186
|
+
function assess(state) {
|
|
187
|
+
const iteration_index = (state.iteration_index ?? 0) + 1;
|
|
188
|
+
const tests_ok = state.validation_result?.ok === true;
|
|
189
|
+
const has_patch =
|
|
190
|
+
state.patch_text && String(state.patch_text).trim().length > 0;
|
|
191
|
+
const over_iterations = iteration_index >= (state.max_iterations ?? 5);
|
|
192
|
+
const over_budget =
|
|
193
|
+
Number.isFinite(state.budget_ms) &&
|
|
194
|
+
Date.now() - (state.started_at_ms ?? 0) >= state.budget_ms;
|
|
195
|
+
const should_iterate =
|
|
196
|
+
!tests_ok &&
|
|
197
|
+
!over_iterations &&
|
|
198
|
+
!over_budget &&
|
|
199
|
+
!has_patch;
|
|
200
|
+
return {
|
|
201
|
+
iteration_index,
|
|
202
|
+
should_iterate
|
|
203
|
+
};
|
|
204
|
+
}
|
|
205
|
+
|
|
206
|
+
const graph = new StateGraph(DeepAgentState)
|
|
207
|
+
.addNode("parseLogs", parseLogs)
|
|
208
|
+
.addNode("searchCode", searchCode)
|
|
209
|
+
.addNode("readFiles", readFiles)
|
|
210
|
+
.addNode("proposePatch", proposePatch)
|
|
211
|
+
.addNode("generateDiff", generateDiff)
|
|
212
|
+
.addNode("runTests", runTests)
|
|
213
|
+
.addNode("assess", assess)
|
|
214
|
+
.addEdge(START, "parseLogs")
|
|
215
|
+
.addEdge("parseLogs", "searchCode")
|
|
216
|
+
.addEdge("searchCode", "readFiles")
|
|
217
|
+
.addEdge("readFiles", "proposePatch")
|
|
218
|
+
.addEdge("proposePatch", "generateDiff")
|
|
219
|
+
.addConditionalEdges("generateDiff", (state) => {
|
|
220
|
+
return state.run_tests ? "runTests" : "assess";
|
|
221
|
+
})
|
|
222
|
+
.addEdge("runTests", "assess")
|
|
223
|
+
.addConditionalEdges("assess", (state) => {
|
|
224
|
+
return state.should_iterate ? "searchCode" : END;
|
|
225
|
+
});
|
|
226
|
+
|
|
227
|
+
return graph.compile();
|
|
228
|
+
}
|
|
@@ -0,0 +1,74 @@
|
|
|
1
|
+
import { LogAnalyzerTool } from "../tools/LogAnalyzerTool.js";
|
|
2
|
+
import { CodeSearchTool } from "../tools/CodeSearchTool.js";
|
|
3
|
+
import { GitDiffTool } from "../tools/GitDiffTool.js";
|
|
4
|
+
import { TestRunnerTool } from "../tools/TestRunnerTool.js";
|
|
5
|
+
import { ReadFileTool } from "../tools/ReadFileTool.js";
|
|
6
|
+
import { PatchProposerTool } from "../tools/PatchProposerTool.js";
|
|
7
|
+
import { createLlmClient } from "../adapters/llm_client_langchain.js";
|
|
8
|
+
import { createAgentGraph } from "./agent_graph.js";
|
|
9
|
+
|
|
10
|
+
export async function run_deep_agent_v2({ repo_path, raw_logs, cli_options }) {
|
|
11
|
+
const log_analyzer = new LogAnalyzerTool({ repo_path });
|
|
12
|
+
const code_search = new CodeSearchTool({ repo_path });
|
|
13
|
+
const diff_generator = new GitDiffTool({ repo_path });
|
|
14
|
+
const test_runner = new TestRunnerTool({ repo_path });
|
|
15
|
+
|
|
16
|
+
const read_file = new ReadFileTool({
|
|
17
|
+
repo_path,
|
|
18
|
+
max_bytes_per_file: cli_options.maxBytesRead || undefined,
|
|
19
|
+
max_total_bytes: cli_options.maxBytesRead || undefined
|
|
20
|
+
});
|
|
21
|
+
|
|
22
|
+
const llm_client = createLlmClient({
|
|
23
|
+
provider: cli_options.provider,
|
|
24
|
+
model: cli_options.model,
|
|
25
|
+
apiKeyEnv: cli_options.apiKeyEnv
|
|
26
|
+
});
|
|
27
|
+
const patch_proposer = new PatchProposerTool({ llm: llm_client });
|
|
28
|
+
|
|
29
|
+
const tools = {
|
|
30
|
+
log_analyzer,
|
|
31
|
+
code_search,
|
|
32
|
+
read_file,
|
|
33
|
+
patch_proposer,
|
|
34
|
+
diff_generator,
|
|
35
|
+
test_runner
|
|
36
|
+
};
|
|
37
|
+
|
|
38
|
+
const options = {
|
|
39
|
+
max_iterations: cli_options.maxIterations ?? 5,
|
|
40
|
+
budget_ms: cli_options.budgetMs,
|
|
41
|
+
run_tests: Boolean(cli_options.runTests),
|
|
42
|
+
test_command: cli_options.testCommand,
|
|
43
|
+
max_files_read: cli_options.maxFilesRead ?? 32
|
|
44
|
+
};
|
|
45
|
+
|
|
46
|
+
const graph = createAgentGraph({ tools, options });
|
|
47
|
+
|
|
48
|
+
const initial_state = {
|
|
49
|
+
repo_path,
|
|
50
|
+
raw_logs,
|
|
51
|
+
run_tests: options.run_tests,
|
|
52
|
+
test_command: options.test_command,
|
|
53
|
+
max_iterations: options.max_iterations,
|
|
54
|
+
budget_ms: options.budget_ms,
|
|
55
|
+
started_at_ms: Date.now(),
|
|
56
|
+
iteration_index: 0
|
|
57
|
+
};
|
|
58
|
+
|
|
59
|
+
const final_state = await graph.invoke(initial_state);
|
|
60
|
+
|
|
61
|
+
return {
|
|
62
|
+
repo_path: final_state.repo_path ?? repo_path,
|
|
63
|
+
raw_logs: final_state.raw_logs ?? raw_logs,
|
|
64
|
+
log_artifact: final_state.log_artifact,
|
|
65
|
+
hypotheses: final_state.hypothesis ? [final_state.hypothesis] : [],
|
|
66
|
+
investigation_steps: final_state.investigation_steps ?? [],
|
|
67
|
+
fix_plan: final_state.fix_plan,
|
|
68
|
+
patch_text: final_state.patch_text ?? "",
|
|
69
|
+
validation_result: final_state.validation_result ?? null,
|
|
70
|
+
has_error: Boolean(
|
|
71
|
+
final_state.validation_result && !final_state.validation_result.ok
|
|
72
|
+
)
|
|
73
|
+
};
|
|
74
|
+
}
|
|
@@ -0,0 +1,67 @@
|
|
|
1
|
+
import { Command } from "commander";
|
|
2
|
+
import process from "node:process";
|
|
3
|
+
import { run_analyze_command } from "./run_analyze_command.js";
|
|
4
|
+
|
|
5
|
+
export async function run_cli(argv) {
|
|
6
|
+
const program = new Command();
|
|
7
|
+
|
|
8
|
+
program.name("error-trace-debugger");
|
|
9
|
+
program.description(
|
|
10
|
+
"Analyze logs/stack traces, investigate a repo, and output suggested fixes + a patch."
|
|
11
|
+
);
|
|
12
|
+
program.version("0.1.0");
|
|
13
|
+
|
|
14
|
+
program
|
|
15
|
+
.command("analyze")
|
|
16
|
+
.description("Analyze logs and propose a fix patch.")
|
|
17
|
+
.option("--logs <path>", "Path to a logs file")
|
|
18
|
+
.option("--stack <string>", "Inline stack trace / error text")
|
|
19
|
+
.option("--stdin", "Read logs from stdin")
|
|
20
|
+
.option("--repo <path>", "Repo path (default: cwd)")
|
|
21
|
+
.option("--max-iterations <n>", "Max investigation iterations", to_int, 5)
|
|
22
|
+
.option("--budget-ms <n>", "Time budget in ms", to_int)
|
|
23
|
+
.option("--engine <engine>", "Engine: v1|v2", "v1")
|
|
24
|
+
.option("--provider <provider>", "LLM provider for v2 (e.g. openai)")
|
|
25
|
+
.option("--model <model>", "LLM model name for v2")
|
|
26
|
+
.option(
|
|
27
|
+
"--api-key-env <name>",
|
|
28
|
+
"Environment variable name that holds the LLM API key"
|
|
29
|
+
)
|
|
30
|
+
.option("--max-tool-calls <n>", "Max tool calls for v2", to_int, 32)
|
|
31
|
+
.option("--max-files-read <n>", "Max files the agent may read", to_int, 64)
|
|
32
|
+
.option(
|
|
33
|
+
"--max-bytes-read <n>",
|
|
34
|
+
"Max total bytes the agent may read",
|
|
35
|
+
to_int,
|
|
36
|
+
5 * 1024 * 1024
|
|
37
|
+
)
|
|
38
|
+
.option("--format <format>", "Output format: md|json|text", "md")
|
|
39
|
+
.option("--out <path>", "Write report to path")
|
|
40
|
+
.option("--patch-out <path>", "Write patch (unified diff) to path")
|
|
41
|
+
.option("--run-tests", "Run tests after proposing a patch", false)
|
|
42
|
+
.option(
|
|
43
|
+
"--test-command <cmd>",
|
|
44
|
+
"Explicit test command (default: auto-detect)"
|
|
45
|
+
)
|
|
46
|
+
.action(async (options) => {
|
|
47
|
+
const exit_code = await run_analyze_command({
|
|
48
|
+
options,
|
|
49
|
+
stdin: process.stdin,
|
|
50
|
+
stdout: process.stdout,
|
|
51
|
+
stderr: process.stderr
|
|
52
|
+
});
|
|
53
|
+
process.exitCode = exit_code;
|
|
54
|
+
});
|
|
55
|
+
|
|
56
|
+
program.showHelpAfterError(true);
|
|
57
|
+
await program.parseAsync(argv);
|
|
58
|
+
}
|
|
59
|
+
|
|
60
|
+
function to_int(value) {
|
|
61
|
+
const parsed_number = Number.parseInt(value, 10);
|
|
62
|
+
if (!Number.isFinite(parsed_number)) {
|
|
63
|
+
throw new Error(`Expected integer, got: ${value}`);
|
|
64
|
+
}
|
|
65
|
+
return parsed_number;
|
|
66
|
+
}
|
|
67
|
+
|
|
@@ -0,0 +1,85 @@
|
|
|
1
|
+
import path from "node:path";
|
|
2
|
+
import process from "node:process";
|
|
3
|
+
import { read_logs_input } from "./stdin_logs.js";
|
|
4
|
+
import { Orchestrator } from "../core/Orchestrator.js";
|
|
5
|
+
import { create_default_tools } from "../tools/create_default_tools.js";
|
|
6
|
+
import { write_report } from "../core/report_writer.js";
|
|
7
|
+
import { write_text_file } from "../core/write_text_file.js";
|
|
8
|
+
|
|
9
|
+
export async function run_analyze_command({ options, stdin, stdout, stderr }) {
|
|
10
|
+
const repo_path = options.repo ? path.resolve(options.repo) : process.cwd();
|
|
11
|
+
const logs_result = await read_logs_input({
|
|
12
|
+
logs_path: options.logs,
|
|
13
|
+
stack_text: options.stack,
|
|
14
|
+
use_stdin: Boolean(options.stdin),
|
|
15
|
+
stdin
|
|
16
|
+
});
|
|
17
|
+
|
|
18
|
+
if (!logs_result.ok) {
|
|
19
|
+
stderr.write(`${logs_result.error}\n`);
|
|
20
|
+
return 2;
|
|
21
|
+
}
|
|
22
|
+
|
|
23
|
+
const engine_name = options.engine || "v1";
|
|
24
|
+
const final_report =
|
|
25
|
+
engine_name === "v2"
|
|
26
|
+
? await (await import("../agents/deep_agent_v2.js")).run_deep_agent_v2({
|
|
27
|
+
repo_path,
|
|
28
|
+
raw_logs: logs_result.value,
|
|
29
|
+
cli_options: options
|
|
30
|
+
})
|
|
31
|
+
: await run_v1_engine({
|
|
32
|
+
repo_path,
|
|
33
|
+
raw_logs: logs_result.value,
|
|
34
|
+
options
|
|
35
|
+
});
|
|
36
|
+
|
|
37
|
+
const report_text = write_report(final_report, { format: options.format });
|
|
38
|
+
|
|
39
|
+
const md_report_text = write_report(final_report, { format: "md" });
|
|
40
|
+
const should_suppress_stdout =
|
|
41
|
+
!options.out && options.format === "md" && md_report_text.length > 0;
|
|
42
|
+
const should_print_checkout_message =
|
|
43
|
+
(should_suppress_stdout || Boolean(options.out)) && !options.apply;
|
|
44
|
+
|
|
45
|
+
if (should_suppress_stdout) {
|
|
46
|
+
const default_md_path = path.join(repo_path, "error-trace-debugger-report.md");
|
|
47
|
+
await write_text_file(default_md_path, md_report_text);
|
|
48
|
+
stdout.write(
|
|
49
|
+
`Report written to: error-trace-debugger-report.md (${default_md_path})\n`
|
|
50
|
+
);
|
|
51
|
+
stdout.write("Please check out that file for the full output.\n");
|
|
52
|
+
} else if (options.out) {
|
|
53
|
+
const out_path = path.resolve(options.out);
|
|
54
|
+
await write_text_file(out_path, report_text);
|
|
55
|
+
if (should_print_checkout_message) {
|
|
56
|
+
const file_name = path.basename(out_path);
|
|
57
|
+
stdout.write(`Report written to: ${file_name} (${out_path})\n`);
|
|
58
|
+
stdout.write("Please check out that file for the full output.\n");
|
|
59
|
+
}
|
|
60
|
+
} else {
|
|
61
|
+
stdout.write(report_text);
|
|
62
|
+
if (!report_text.endsWith("\n")) stdout.write("\n");
|
|
63
|
+
}
|
|
64
|
+
|
|
65
|
+
if (options.patchOut && final_report.patch_text) {
|
|
66
|
+
await write_text_file(path.resolve(options.patchOut), final_report.patch_text);
|
|
67
|
+
}
|
|
68
|
+
|
|
69
|
+
return final_report.has_error ? 1 : 0;
|
|
70
|
+
}
|
|
71
|
+
|
|
72
|
+
async function run_v1_engine({ repo_path, raw_logs, options }) {
|
|
73
|
+
const tools = create_default_tools({ repo_path });
|
|
74
|
+
const orchestrator = new Orchestrator({
|
|
75
|
+
repo_path,
|
|
76
|
+
tools,
|
|
77
|
+
max_iterations: options.maxIterations,
|
|
78
|
+
budget_ms: options.budgetMs,
|
|
79
|
+
run_tests: Boolean(options.runTests),
|
|
80
|
+
test_command: options.testCommand
|
|
81
|
+
});
|
|
82
|
+
|
|
83
|
+
return await orchestrator.run({ raw_logs });
|
|
84
|
+
}
|
|
85
|
+
|
|
@@ -0,0 +1,44 @@
|
|
|
1
|
+
import fs from "node:fs/promises";
|
|
2
|
+
|
|
3
|
+
export async function read_logs_input({
|
|
4
|
+
logs_path,
|
|
5
|
+
stack_text,
|
|
6
|
+
use_stdin,
|
|
7
|
+
stdin
|
|
8
|
+
}) {
|
|
9
|
+
const has_logs_path = Boolean(logs_path);
|
|
10
|
+
const has_stack_text = typeof stack_text === "string" && stack_text.trim().length > 0;
|
|
11
|
+
const has_stdin = Boolean(use_stdin);
|
|
12
|
+
|
|
13
|
+
const selected_count = Number(has_logs_path) + Number(has_stack_text) + Number(has_stdin);
|
|
14
|
+
if (selected_count !== 1) {
|
|
15
|
+
return {
|
|
16
|
+
ok: false,
|
|
17
|
+
error: "Provide exactly one of --logs, --stack, or --stdin."
|
|
18
|
+
};
|
|
19
|
+
}
|
|
20
|
+
|
|
21
|
+
if (has_logs_path) {
|
|
22
|
+
const raw_text = await fs.readFile(logs_path, "utf8");
|
|
23
|
+
return { ok: true, value: raw_text };
|
|
24
|
+
}
|
|
25
|
+
|
|
26
|
+
if (has_stack_text) return { ok: true, value: stack_text };
|
|
27
|
+
|
|
28
|
+
const stdin_text = await read_all_stdin(stdin);
|
|
29
|
+
if (stdin_text.trim().length === 0) {
|
|
30
|
+
return { ok: false, error: "No input read from stdin." };
|
|
31
|
+
}
|
|
32
|
+
return { ok: true, value: stdin_text };
|
|
33
|
+
}
|
|
34
|
+
|
|
35
|
+
function read_all_stdin(stdin) {
|
|
36
|
+
return new Promise((resolve, reject) => {
|
|
37
|
+
const chunks = [];
|
|
38
|
+
stdin.setEncoding("utf8");
|
|
39
|
+
stdin.on("data", (chunk) => chunks.push(chunk));
|
|
40
|
+
stdin.on("end", () => resolve(chunks.join("")));
|
|
41
|
+
stdin.on("error", (error) => reject(error));
|
|
42
|
+
});
|
|
43
|
+
}
|
|
44
|
+
|