ci-triage 0.1.0 → 0.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/flake-store.js +106 -0
- package/dist/index.js +167 -18
- package/dist/llm-analyzer.js +87 -0
- package/dist/providers/circleci.js +106 -0
- package/dist/providers/github.js +103 -0
- package/dist/providers/gitlab.js +96 -0
- package/dist/providers/index.js +36 -0
- package/dist/providers/types.js +2 -0
- package/package.json +5 -2
|
@@ -0,0 +1,106 @@
|
|
|
1
|
+
import Database from 'better-sqlite3';
|
|
2
|
+
import { mkdirSync } from 'node:fs';
|
|
3
|
+
import { homedir } from 'node:os';
|
|
4
|
+
import { join } from 'node:path';
|
|
5
|
+
let _db = null;
|
|
6
|
+
function dbPath() {
|
|
7
|
+
const dir = join(homedir(), '.ci-triage');
|
|
8
|
+
mkdirSync(dir, { recursive: true });
|
|
9
|
+
return join(dir, 'flake.db');
|
|
10
|
+
}
|
|
11
|
+
function getDb() {
|
|
12
|
+
if (_db)
|
|
13
|
+
return _db;
|
|
14
|
+
_db = new Database(dbPath());
|
|
15
|
+
bootstrap(_db);
|
|
16
|
+
return _db;
|
|
17
|
+
}
|
|
18
|
+
function bootstrap(db) {
|
|
19
|
+
db.exec(`
|
|
20
|
+
CREATE TABLE IF NOT EXISTS runs (
|
|
21
|
+
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
|
22
|
+
repo TEXT NOT NULL,
|
|
23
|
+
run_id TEXT NOT NULL,
|
|
24
|
+
created_at TEXT NOT NULL
|
|
25
|
+
);
|
|
26
|
+
|
|
27
|
+
CREATE TABLE IF NOT EXISTS test_results (
|
|
28
|
+
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
|
29
|
+
run_id INTEGER NOT NULL,
|
|
30
|
+
test_name TEXT NOT NULL,
|
|
31
|
+
status TEXT NOT NULL,
|
|
32
|
+
FOREIGN KEY (run_id) REFERENCES runs(id)
|
|
33
|
+
);
|
|
34
|
+
|
|
35
|
+
CREATE INDEX IF NOT EXISTS idx_runs_repo ON runs(repo);
|
|
36
|
+
CREATE INDEX IF NOT EXISTS idx_test_results_run_id ON test_results(run_id);
|
|
37
|
+
CREATE INDEX IF NOT EXISTS idx_test_results_test_name ON test_results(test_name);
|
|
38
|
+
`);
|
|
39
|
+
}
|
|
40
|
+
/** Persist a run and its test outcomes. */
|
|
41
|
+
export function persistRun(repo, runId, createdAt, tests) {
|
|
42
|
+
const db = getDb();
|
|
43
|
+
// Deduplicate: skip if run already stored
|
|
44
|
+
const existing = db.prepare('SELECT id FROM runs WHERE repo = ? AND run_id = ?').get(repo, String(runId));
|
|
45
|
+
if (existing)
|
|
46
|
+
return;
|
|
47
|
+
const insertRun = db.prepare('INSERT INTO runs (repo, run_id, created_at) VALUES (?, ?, ?)');
|
|
48
|
+
const insertTest = db.prepare('INSERT INTO test_results (run_id, test_name, status) VALUES (?, ?, ?)');
|
|
49
|
+
const tx = db.transaction(() => {
|
|
50
|
+
const result = insertRun.run(repo, String(runId), createdAt);
|
|
51
|
+
const rowId = result.lastInsertRowid;
|
|
52
|
+
for (const [testName, status] of Object.entries(tests)) {
|
|
53
|
+
insertTest.run(rowId, testName, status);
|
|
54
|
+
}
|
|
55
|
+
});
|
|
56
|
+
tx();
|
|
57
|
+
}
|
|
58
|
+
/** Get flake stats for all known tests in a repo. */
|
|
59
|
+
export function getFlakes(repo) {
|
|
60
|
+
const db = getDb();
|
|
61
|
+
const rows = db.prepare(`
|
|
62
|
+
SELECT
|
|
63
|
+
tr.test_name,
|
|
64
|
+
SUM(CASE WHEN tr.status IN ('fail', 'error') THEN 1 ELSE 0 END) AS fail_count,
|
|
65
|
+
SUM(CASE WHEN tr.status = 'pass' THEN 1 ELSE 0 END) AS pass_count,
|
|
66
|
+
COUNT(*) AS total_count,
|
|
67
|
+
MAX(r.created_at) AS last_seen
|
|
68
|
+
FROM test_results tr
|
|
69
|
+
JOIN runs r ON tr.run_id = r.id
|
|
70
|
+
WHERE r.repo = ?
|
|
71
|
+
GROUP BY tr.test_name
|
|
72
|
+
HAVING fail_count > 0 AND pass_count > 0
|
|
73
|
+
ORDER BY fail_count DESC
|
|
74
|
+
`).all(repo);
|
|
75
|
+
return rows.map((r) => ({
|
|
76
|
+
...r,
|
|
77
|
+
repo,
|
|
78
|
+
flake_ratio: r.total_count > 0 ? r.fail_count / r.total_count : 0,
|
|
79
|
+
}));
|
|
80
|
+
}
|
|
81
|
+
/** Check if a specific test is flaky in a repo based on stored history. */
|
|
82
|
+
export function isFlakySqlite(repo, testName) {
|
|
83
|
+
const db = getDb();
|
|
84
|
+
const row = db.prepare(`
|
|
85
|
+
SELECT
|
|
86
|
+
SUM(CASE WHEN tr.status IN ('fail', 'error') THEN 1 ELSE 0 END) AS fail_count,
|
|
87
|
+
SUM(CASE WHEN tr.status = 'pass' THEN 1 ELSE 0 END) AS pass_count,
|
|
88
|
+
COUNT(*) AS total_count
|
|
89
|
+
FROM test_results tr
|
|
90
|
+
JOIN runs r ON tr.run_id = r.id
|
|
91
|
+
WHERE r.repo = ? AND tr.test_name = ?
|
|
92
|
+
`).get(repo, testName);
|
|
93
|
+
if (!row || row.total_count === 0) {
|
|
94
|
+
return { is_flaky: false, fail_count: 0, pass_count: 0, flake_ratio: 0 };
|
|
95
|
+
}
|
|
96
|
+
const flake_ratio = row.fail_count / row.total_count;
|
|
97
|
+
const is_flaky = row.fail_count > 0 && row.pass_count > 0;
|
|
98
|
+
return { is_flaky, fail_count: row.fail_count, pass_count: row.pass_count, flake_ratio };
|
|
99
|
+
}
|
|
100
|
+
/** Close the DB (useful in tests). */
|
|
101
|
+
export function closeDb() {
|
|
102
|
+
if (_db) {
|
|
103
|
+
_db.close();
|
|
104
|
+
_db = null;
|
|
105
|
+
}
|
|
106
|
+
}
|
package/dist/index.js
CHANGED
|
@@ -1,66 +1,212 @@
|
|
|
1
1
|
#!/usr/bin/env node
|
|
2
2
|
import { writeFileSync } from 'node:fs';
|
|
3
|
+
import { createRequire } from 'node:module';
|
|
3
4
|
import { classify } from './classifier.js';
|
|
4
|
-
import { fetchFailedLog, fetchRunById, fetchRunMetadata, fetchRuns } from './fetcher.js';
|
|
5
5
|
import { parseFailures } from './parser.js';
|
|
6
6
|
import { buildJsonReport, toConsoleText, toJson, toMarkdown } from './reporter.js';
|
|
7
|
+
import { getProvider, detectProvider } from './providers/index.js';
|
|
8
|
+
import { getFlakes, persistRun } from './flake-store.js';
|
|
9
|
+
import { analyzeLlm } from './llm-analyzer.js';
|
|
10
|
+
// --version support
|
|
11
|
+
const _require = createRequire(import.meta.url);
|
|
12
|
+
let pkgVersion = 'unknown';
|
|
13
|
+
try {
|
|
14
|
+
const pkg = _require('../package.json');
|
|
15
|
+
pkgVersion = pkg.version;
|
|
16
|
+
}
|
|
17
|
+
catch {
|
|
18
|
+
// ignore
|
|
19
|
+
}
|
|
7
20
|
function usage() {
|
|
8
|
-
console.error(
|
|
21
|
+
console.error([
|
|
22
|
+
`ci-triage v${pkgVersion}`,
|
|
23
|
+
'',
|
|
24
|
+
'Usage:',
|
|
25
|
+
' ci-triage owner/repo [limit] [--run <id>] [--md report.md] [--json]',
|
|
26
|
+
' ci-triage flakes owner/repo',
|
|
27
|
+
'',
|
|
28
|
+
'Options:',
|
|
29
|
+
' --run <id> Triage a specific run ID',
|
|
30
|
+
' --md <path> Write markdown report to file',
|
|
31
|
+
' --json Output JSON instead of human text',
|
|
32
|
+
' --provider <p> Force CI provider: github | gitlab | circleci',
|
|
33
|
+
' --llm Enable LLM root-cause analysis (requires OPENAI_API_KEY)',
|
|
34
|
+
' --llm-model <model> Override LLM model (default: gpt-4.1-mini)',
|
|
35
|
+
' --version Print version and exit',
|
|
36
|
+
].join('\n'));
|
|
9
37
|
process.exit(1);
|
|
10
38
|
}
|
|
11
39
|
function parseArgs(argv) {
|
|
12
|
-
const
|
|
13
|
-
|
|
14
|
-
if (
|
|
40
|
+
const args = argv.slice(2);
|
|
41
|
+
// --version
|
|
42
|
+
if (args.includes('--version') || args.includes('-v')) {
|
|
43
|
+
process.stdout.write(`ci-triage v${pkgVersion}\n`);
|
|
44
|
+
process.exit(0);
|
|
45
|
+
}
|
|
46
|
+
// subcommand: ci-triage flakes owner/repo
|
|
47
|
+
if (args[0] === 'flakes') {
|
|
48
|
+
const repo = args[1];
|
|
49
|
+
if (!repo || !/^[A-Za-z0-9_./%-]+\/[A-Za-z0-9_./%-]+$/.test(repo)) {
|
|
50
|
+
console.error('Usage: ci-triage flakes owner/repo');
|
|
51
|
+
process.exit(1);
|
|
52
|
+
}
|
|
53
|
+
return { repo, limit: 0, outputJson: false, subcommand: 'flakes' };
|
|
54
|
+
}
|
|
55
|
+
const repo = args[0];
|
|
56
|
+
if (!repo || !/^[A-Za-z0-9_./%-]+\/[A-Za-z0-9_./%-]+$/.test(repo)) {
|
|
15
57
|
usage();
|
|
16
58
|
}
|
|
17
|
-
const
|
|
18
|
-
const parsedLimit =
|
|
59
|
+
const positionalArgs = args.slice(1).filter((arg) => !arg.startsWith('--'));
|
|
60
|
+
const parsedLimit = positionalArgs[0] ? Number(positionalArgs[0]) : 10;
|
|
19
61
|
const limit = Math.min(Math.max(Number.isFinite(parsedLimit) ? parsedLimit : 10, 1), 100);
|
|
20
62
|
const runIdx = args.indexOf('--run');
|
|
21
63
|
const runIdRaw = runIdx >= 0 ? args[runIdx + 1] : undefined;
|
|
22
64
|
const runId = runIdRaw ? Number(runIdRaw) : undefined;
|
|
23
65
|
const mdIdx = args.indexOf('--md');
|
|
24
66
|
const markdownPath = mdIdx >= 0 ? args[mdIdx + 1] ?? '' : undefined;
|
|
67
|
+
const providerIdx = args.indexOf('--provider');
|
|
68
|
+
const provider = providerIdx >= 0 ? args[providerIdx + 1] : undefined;
|
|
69
|
+
const llmModelIdx = args.indexOf('--llm-model');
|
|
70
|
+
const llmModel = llmModelIdx >= 0 ? args[llmModelIdx + 1] : undefined;
|
|
25
71
|
return {
|
|
26
72
|
repo,
|
|
27
73
|
runId: Number.isFinite(runId) ? runId : undefined,
|
|
28
74
|
limit,
|
|
29
75
|
markdownPath,
|
|
30
76
|
outputJson: args.includes('--json'),
|
|
77
|
+
provider,
|
|
78
|
+
llm: args.includes('--llm'),
|
|
79
|
+
llmModel,
|
|
31
80
|
};
|
|
32
81
|
}
|
|
33
|
-
function
|
|
34
|
-
|
|
35
|
-
|
|
82
|
+
async function runFlakesCommand(repo) {
|
|
83
|
+
const flakes = getFlakes(repo);
|
|
84
|
+
if (flakes.length === 0) {
|
|
85
|
+
console.log(`No flaky tests found for ${repo} in local history.`);
|
|
86
|
+
console.log('Run ci-triage a few times to build up history.');
|
|
87
|
+
return;
|
|
36
88
|
}
|
|
37
|
-
|
|
38
|
-
|
|
89
|
+
console.log(`\nKnown flaky tests in ${repo}:\n`);
|
|
90
|
+
console.log(`${'Test Name'.padEnd(60)} ${'Fails'.padStart(6)} ${'Passes'.padStart(7)} ${'Ratio'.padStart(7)} Last Seen`);
|
|
91
|
+
console.log('-'.repeat(110));
|
|
92
|
+
for (const f of flakes) {
|
|
93
|
+
const ratio = `${(f.flake_ratio * 100).toFixed(1)}%`;
|
|
94
|
+
const lastSeen = f.last_seen.slice(0, 10);
|
|
95
|
+
console.log(`${f.test_name.slice(0, 59).padEnd(60)} ${String(f.fail_count).padStart(6)} ${String(f.pass_count).padStart(7)} ${ratio.padStart(7)} ${lastSeen}`);
|
|
96
|
+
}
|
|
97
|
+
console.log(`\nTotal flaky tests: ${flakes.length}`);
|
|
39
98
|
}
|
|
40
|
-
function main() {
|
|
99
|
+
async function main() {
|
|
41
100
|
const options = parseArgs(process.argv);
|
|
42
|
-
|
|
101
|
+
// Handle subcommands
|
|
102
|
+
if (options.subcommand === 'flakes') {
|
|
103
|
+
await runFlakesCommand(options.repo);
|
|
104
|
+
return;
|
|
105
|
+
}
|
|
106
|
+
// Resolve provider
|
|
107
|
+
const providerName = options.provider ?? detectProvider();
|
|
108
|
+
const provider = getProvider(providerName);
|
|
109
|
+
const canHandle = await provider.canHandle(options.repo);
|
|
110
|
+
if (!canHandle) {
|
|
111
|
+
process.exit(1);
|
|
112
|
+
}
|
|
113
|
+
// Resolve run
|
|
114
|
+
let run = null;
|
|
115
|
+
try {
|
|
116
|
+
run = await provider.resolveRun(options.repo, options.runId);
|
|
117
|
+
}
|
|
118
|
+
catch (err) {
|
|
119
|
+
console.error(err instanceof Error ? err.message : String(err));
|
|
120
|
+
process.exit(2);
|
|
121
|
+
}
|
|
43
122
|
if (!run) {
|
|
44
123
|
console.error('No failed runs found.');
|
|
45
124
|
process.exit(2);
|
|
46
125
|
}
|
|
47
|
-
|
|
126
|
+
// Fetch logs and metadata in parallel
|
|
127
|
+
const runRef = { provider: providerName, repo: options.repo, runId: run.id };
|
|
128
|
+
const [logBundle, metadata] = await Promise.all([
|
|
129
|
+
provider.fetchLogs(runRef),
|
|
130
|
+
provider.fetchMetadata(runRef),
|
|
131
|
+
]);
|
|
132
|
+
const rawLog = logBundle.combined;
|
|
48
133
|
const failures = parseFailures(rawLog);
|
|
49
134
|
const classified = failures.map((failure) => ({
|
|
50
135
|
...failure,
|
|
51
136
|
classification: classify(failure),
|
|
52
137
|
}));
|
|
138
|
+
// Build compat RunInfo for reporter
|
|
139
|
+
const runInfo = {
|
|
140
|
+
databaseId: Number(run.id) || 0,
|
|
141
|
+
displayTitle: run.displayTitle,
|
|
142
|
+
workflowName: run.workflowName,
|
|
143
|
+
conclusion: run.conclusion,
|
|
144
|
+
url: run.url,
|
|
145
|
+
};
|
|
53
146
|
const report = buildJsonReport({
|
|
54
147
|
repo: options.repo,
|
|
55
|
-
run,
|
|
148
|
+
run: runInfo,
|
|
56
149
|
failures: classified,
|
|
57
|
-
metadata:
|
|
150
|
+
metadata: {
|
|
151
|
+
headSha: metadata.headSha,
|
|
152
|
+
headBranch: metadata.headBranch,
|
|
153
|
+
event: metadata.event,
|
|
154
|
+
},
|
|
58
155
|
});
|
|
156
|
+
// LLM analysis (gated)
|
|
157
|
+
const llmEnabled = options.llm || !!process.env['OPENAI_API_KEY'];
|
|
158
|
+
if (llmEnabled) {
|
|
159
|
+
const failureEntries = classified.map((f) => ({
|
|
160
|
+
type: f.classification?.type ?? 'unknown',
|
|
161
|
+
error: f.error,
|
|
162
|
+
stack: f.stack?.join('\n'),
|
|
163
|
+
category: f.classification?.category ?? 'unknown',
|
|
164
|
+
severity: f.classification?.severity ?? 'low',
|
|
165
|
+
suggested_fix: f.classification?.suggestedFix ?? '',
|
|
166
|
+
flaky: { is_flaky: false, confidence: 0, pass_rate_7d: 1, last_5_runs: [] },
|
|
167
|
+
}));
|
|
168
|
+
const analysis = await analyzeLlm(failureEntries, rawLog, {
|
|
169
|
+
model: options.llmModel,
|
|
170
|
+
enabled: true,
|
|
171
|
+
});
|
|
172
|
+
report.analysis = analysis;
|
|
173
|
+
}
|
|
174
|
+
// Persist to SQLite flake store (best-effort)
|
|
175
|
+
try {
|
|
176
|
+
const testMap = {};
|
|
177
|
+
for (const f of classified) {
|
|
178
|
+
if (f.stepName)
|
|
179
|
+
testMap[f.stepName] = 'fail';
|
|
180
|
+
}
|
|
181
|
+
persistRun(options.repo, String(run.id), new Date().toISOString(), testMap);
|
|
182
|
+
}
|
|
183
|
+
catch {
|
|
184
|
+
// non-fatal
|
|
185
|
+
}
|
|
59
186
|
if (options.outputJson) {
|
|
60
187
|
process.stdout.write(toJson(report));
|
|
61
188
|
}
|
|
62
189
|
else {
|
|
63
190
|
process.stdout.write(toConsoleText(report));
|
|
191
|
+
// Print LLM analysis summary if available
|
|
192
|
+
if (report.analysis?.mode === 'llm') {
|
|
193
|
+
console.log('\n── LLM Root-Cause Analysis ─────────────────────────────');
|
|
194
|
+
console.log(`Model: ${report.analysis.model} (${report.analysis.provider})`);
|
|
195
|
+
if (report.analysis.root_cause) {
|
|
196
|
+
console.log(`Root Cause: ${report.analysis.root_cause}`);
|
|
197
|
+
}
|
|
198
|
+
if (report.analysis.fix_suggestions?.length) {
|
|
199
|
+
console.log('Fix Suggestions:');
|
|
200
|
+
for (const s of report.analysis.fix_suggestions) {
|
|
201
|
+
console.log(` • ${s}`);
|
|
202
|
+
}
|
|
203
|
+
}
|
|
204
|
+
if (report.analysis.llm?.usage) {
|
|
205
|
+
const u = report.analysis.llm.usage;
|
|
206
|
+
console.log(`Tokens: ${u.input_tokens} in / ${u.output_tokens} out — est. $${u.estimated_cost_usd.toFixed(4)}`);
|
|
207
|
+
}
|
|
208
|
+
console.log('─────────────────────────────────────────────────────────\n');
|
|
209
|
+
}
|
|
64
210
|
}
|
|
65
211
|
if (options.markdownPath) {
|
|
66
212
|
writeFileSync(options.markdownPath, toMarkdown(report), 'utf8');
|
|
@@ -69,4 +215,7 @@ function main() {
|
|
|
69
215
|
}
|
|
70
216
|
}
|
|
71
217
|
}
|
|
72
|
-
main()
|
|
218
|
+
main().catch((err) => {
|
|
219
|
+
console.error(err instanceof Error ? err.message : String(err));
|
|
220
|
+
process.exit(1);
|
|
221
|
+
});
|
|
@@ -0,0 +1,87 @@
|
|
|
1
|
+
import OpenAI from 'openai';
|
|
2
|
+
/** Pricing per 1M tokens (update as models change) */
|
|
3
|
+
const PRICING_PER_1M = {
|
|
4
|
+
'gpt-4.1-mini': { input: 0.40, output: 1.60 },
|
|
5
|
+
'gpt-4.1': { input: 2.00, output: 8.00 },
|
|
6
|
+
'gpt-4o-mini': { input: 0.15, output: 0.60 },
|
|
7
|
+
'gpt-4o': { input: 2.50, output: 10.00 },
|
|
8
|
+
};
|
|
9
|
+
function estimateCost(model, inputTokens, outputTokens) {
|
|
10
|
+
const pricing = PRICING_PER_1M[model] ?? { input: 0.40, output: 1.60 };
|
|
11
|
+
return (inputTokens / 1_000_000) * pricing.input + (outputTokens / 1_000_000) * pricing.output;
|
|
12
|
+
}
|
|
13
|
+
function buildPrompt(failures, logExcerpt) {
|
|
14
|
+
const failureSummary = failures
|
|
15
|
+
.slice(0, 5)
|
|
16
|
+
.map((f) => `- [${f.category}] ${f.error}${f.stack ? `\n Stack: ${f.stack.slice(0, 300)}` : ''}`)
|
|
17
|
+
.join('\n');
|
|
18
|
+
const logSnippet = logExcerpt.slice(-3000); // last 3k chars of log
|
|
19
|
+
return `You are a CI failure analyst. Given the following CI failures and log excerpt, provide:
|
|
20
|
+
1. A concise root cause (1-2 sentences)
|
|
21
|
+
2. Up to 3 specific fix suggestions
|
|
22
|
+
|
|
23
|
+
Failures:
|
|
24
|
+
${failureSummary}
|
|
25
|
+
|
|
26
|
+
Log excerpt (last section):
|
|
27
|
+
${logSnippet}
|
|
28
|
+
|
|
29
|
+
Respond in JSON format:
|
|
30
|
+
{
|
|
31
|
+
"root_cause": "...",
|
|
32
|
+
"fix_suggestions": ["...", "..."]
|
|
33
|
+
}`;
|
|
34
|
+
}
|
|
35
|
+
export async function analyzeLlm(failures, logExcerpt, options = {}) {
|
|
36
|
+
const model = options.model ?? 'gpt-4.1-mini';
|
|
37
|
+
const enabled = options.enabled ?? (!!process.env['OPENAI_API_KEY']);
|
|
38
|
+
if (!enabled || !process.env['OPENAI_API_KEY']) {
|
|
39
|
+
return { mode: 'heuristic', fallback_reason: 'OPENAI_API_KEY not set or --llm not passed' };
|
|
40
|
+
}
|
|
41
|
+
const client = new OpenAI({ apiKey: process.env['OPENAI_API_KEY'] });
|
|
42
|
+
try {
|
|
43
|
+
const prompt = buildPrompt(failures, logExcerpt);
|
|
44
|
+
const response = await client.responses.create({
|
|
45
|
+
model,
|
|
46
|
+
input: prompt,
|
|
47
|
+
});
|
|
48
|
+
const inputTokens = response.usage?.input_tokens ?? 0;
|
|
49
|
+
const outputTokens = response.usage?.output_tokens ?? 0;
|
|
50
|
+
const estimatedCost = estimateCost(model, inputTokens, outputTokens);
|
|
51
|
+
const text = response.output_text ?? '';
|
|
52
|
+
let root_cause = '';
|
|
53
|
+
let fix_suggestions = [];
|
|
54
|
+
try {
|
|
55
|
+
// Strip markdown code fences if present
|
|
56
|
+
const cleaned = text.replace(/```(?:json)?\s*/g, '').replace(/```\s*$/g, '').trim();
|
|
57
|
+
const parsed = JSON.parse(cleaned);
|
|
58
|
+
root_cause = parsed.root_cause ?? '';
|
|
59
|
+
fix_suggestions = parsed.fix_suggestions ?? [];
|
|
60
|
+
}
|
|
61
|
+
catch {
|
|
62
|
+
root_cause = text.slice(0, 500);
|
|
63
|
+
}
|
|
64
|
+
return {
|
|
65
|
+
mode: 'llm',
|
|
66
|
+
provider: 'openai',
|
|
67
|
+
model,
|
|
68
|
+
root_cause,
|
|
69
|
+
fix_suggestions,
|
|
70
|
+
llm: {
|
|
71
|
+
usage: {
|
|
72
|
+
input_tokens: inputTokens,
|
|
73
|
+
output_tokens: outputTokens,
|
|
74
|
+
estimated_cost_usd: estimatedCost,
|
|
75
|
+
},
|
|
76
|
+
},
|
|
77
|
+
};
|
|
78
|
+
}
|
|
79
|
+
catch (err) {
|
|
80
|
+
const reason = err instanceof Error ? err.message : String(err);
|
|
81
|
+
console.error(`[ci-triage] LLM analysis failed, falling back to heuristic: ${reason}`);
|
|
82
|
+
return {
|
|
83
|
+
mode: 'heuristic',
|
|
84
|
+
fallback_reason: reason,
|
|
85
|
+
};
|
|
86
|
+
}
|
|
87
|
+
}
|
|
@@ -0,0 +1,106 @@
|
|
|
1
|
+
const CIRCLE_API = 'https://circleci.com/api/v2';
|
|
2
|
+
function token() {
|
|
3
|
+
return process.env['CIRCLE_TOKEN'];
|
|
4
|
+
}
|
|
5
|
+
async function circleFetch(path) {
|
|
6
|
+
const tok = token();
|
|
7
|
+
if (!tok)
|
|
8
|
+
throw new Error('CIRCLE_TOKEN env var is required for CircleCI provider.');
|
|
9
|
+
const res = await fetch(`${CIRCLE_API}${path}`, {
|
|
10
|
+
headers: { 'Circle-Token': tok },
|
|
11
|
+
});
|
|
12
|
+
if (!res.ok)
|
|
13
|
+
throw new Error(`CircleCI API error ${res.status}: ${res.statusText} — ${path}`);
|
|
14
|
+
return res.json();
|
|
15
|
+
}
|
|
16
|
+
export class CircleCiProvider {
|
|
17
|
+
name = 'circleci';
|
|
18
|
+
async canHandle(_repo) {
|
|
19
|
+
if (!token()) {
|
|
20
|
+
console.error([
|
|
21
|
+
'Error: CircleCI provider requires CIRCLE_TOKEN environment variable.',
|
|
22
|
+
' Create a personal API token at: https://app.circleci.com/settings/user/tokens',
|
|
23
|
+
' Then set: export CIRCLE_TOKEN=<your-token>',
|
|
24
|
+
].join('\n'));
|
|
25
|
+
return false;
|
|
26
|
+
}
|
|
27
|
+
return true;
|
|
28
|
+
}
|
|
29
|
+
/** repo format: gh/orgname/reponame or bb/org/repo */
|
|
30
|
+
async listRuns(repo, limit) {
|
|
31
|
+
const data = await circleFetch(`/project/${repo}/pipeline?page-token=&per_page=${Math.min(limit, 50)}`);
|
|
32
|
+
return data.items.map((p) => ({
|
|
33
|
+
id: p.id,
|
|
34
|
+
displayTitle: `Pipeline #${p.number}`,
|
|
35
|
+
workflowName: 'CircleCI',
|
|
36
|
+
conclusion: p.state === 'created' ? null : p.state,
|
|
37
|
+
url: `https://app.circleci.com/pipelines/${repo}/${p.number}`,
|
|
38
|
+
}));
|
|
39
|
+
}
|
|
40
|
+
async resolveRun(repo, runId) {
|
|
41
|
+
const runs = await this.listRuns(repo, 20);
|
|
42
|
+
if (runId !== undefined) {
|
|
43
|
+
const run = runs.find((r) => String(r.id) === String(runId));
|
|
44
|
+
if (!run)
|
|
45
|
+
throw new Error(`Pipeline ${runId} not found for ${repo}.`);
|
|
46
|
+
return run;
|
|
47
|
+
}
|
|
48
|
+
const failed = runs.find((r) => r.conclusion === 'errored' || r.conclusion === 'failed');
|
|
49
|
+
if (!failed)
|
|
50
|
+
throw new Error(`No failed pipelines found for ${repo} in recent history.`);
|
|
51
|
+
return failed;
|
|
52
|
+
}
|
|
53
|
+
async fetchLogs(ref) {
|
|
54
|
+
if (!ref.runId)
|
|
55
|
+
throw new Error('runId required for CircleCI provider');
|
|
56
|
+
const byJob = {};
|
|
57
|
+
try {
|
|
58
|
+
// Get workflows for this pipeline
|
|
59
|
+
const wfData = await circleFetch(`/pipeline/${ref.runId}/workflow`);
|
|
60
|
+
for (const wf of wfData.items) {
|
|
61
|
+
if (wf.status !== 'failed')
|
|
62
|
+
continue;
|
|
63
|
+
const jobData = await circleFetch(`/workflow/${wf.id}/job`);
|
|
64
|
+
for (const job of jobData.items) {
|
|
65
|
+
if (job.status !== 'failed' || !job.job_number)
|
|
66
|
+
continue;
|
|
67
|
+
try {
|
|
68
|
+
const artifacts = await circleFetch(`/project/${ref.repo}/${job.job_number}/artifacts`);
|
|
69
|
+
for (const artifact of artifacts.items) {
|
|
70
|
+
if (artifact.path.endsWith('.log') || artifact.path.endsWith('output.txt')) {
|
|
71
|
+
const logRes = await fetch(artifact.url, {
|
|
72
|
+
headers: { 'Circle-Token': token() },
|
|
73
|
+
});
|
|
74
|
+
if (logRes.ok)
|
|
75
|
+
byJob[job.name] = await logRes.text();
|
|
76
|
+
}
|
|
77
|
+
}
|
|
78
|
+
}
|
|
79
|
+
catch {
|
|
80
|
+
// skip if artifact fetch fails
|
|
81
|
+
}
|
|
82
|
+
}
|
|
83
|
+
}
|
|
84
|
+
}
|
|
85
|
+
catch {
|
|
86
|
+
// return empty on any error
|
|
87
|
+
}
|
|
88
|
+
return { combined: Object.values(byJob).join('\n'), byJob };
|
|
89
|
+
}
|
|
90
|
+
async fetchMetadata(ref) {
|
|
91
|
+
if (!ref.runId)
|
|
92
|
+
return { headSha: '', headBranch: '', event: '' };
|
|
93
|
+
try {
|
|
94
|
+
const pipeline = await circleFetch(`/pipeline/${ref.runId}`);
|
|
95
|
+
return {
|
|
96
|
+
headSha: pipeline.vcs?.revision ?? '',
|
|
97
|
+
headBranch: pipeline.vcs?.branch ?? '',
|
|
98
|
+
event: 'push',
|
|
99
|
+
url: `https://app.circleci.com/pipelines/${ref.repo}/${pipeline.number}`,
|
|
100
|
+
};
|
|
101
|
+
}
|
|
102
|
+
catch {
|
|
103
|
+
return { headSha: '', headBranch: '', event: '' };
|
|
104
|
+
}
|
|
105
|
+
}
|
|
106
|
+
}
|
|
@@ -0,0 +1,103 @@
|
|
|
1
|
+
import { execFileSync } from 'node:child_process';
|
|
2
|
+
function runJson(args) {
|
|
3
|
+
const out = execFileSync('gh', args, { encoding: 'utf8', stdio: ['ignore', 'pipe', 'pipe'] });
|
|
4
|
+
return JSON.parse(out);
|
|
5
|
+
}
|
|
6
|
+
function runText(args) {
|
|
7
|
+
return execFileSync('gh', args, { encoding: 'utf8', stdio: ['ignore', 'pipe', 'pipe'] });
|
|
8
|
+
}
|
|
9
|
+
function checkGhAvailable() {
|
|
10
|
+
try {
|
|
11
|
+
execFileSync('gh', ['--version'], { encoding: 'utf8', stdio: ['ignore', 'pipe', 'pipe'] });
|
|
12
|
+
return true;
|
|
13
|
+
}
|
|
14
|
+
catch {
|
|
15
|
+
return false;
|
|
16
|
+
}
|
|
17
|
+
}
|
|
18
|
+
function checkGhAuth() {
|
|
19
|
+
try {
|
|
20
|
+
execFileSync('gh', ['auth', 'status'], { encoding: 'utf8', stdio: ['ignore', 'pipe', 'pipe'] });
|
|
21
|
+
return true;
|
|
22
|
+
}
|
|
23
|
+
catch {
|
|
24
|
+
return false;
|
|
25
|
+
}
|
|
26
|
+
}
|
|
27
|
+
export class GitHubProvider {
|
|
28
|
+
name = 'github';
|
|
29
|
+
async canHandle(_repo) {
|
|
30
|
+
if (!checkGhAvailable()) {
|
|
31
|
+
console.error([
|
|
32
|
+
'Error: `gh` CLI is required for GitHub mode but was not found.',
|
|
33
|
+
' Install: https://cli.github.com/',
|
|
34
|
+
' macOS: brew install gh',
|
|
35
|
+
' After installing, run: gh auth login',
|
|
36
|
+
].join('\n'));
|
|
37
|
+
return false;
|
|
38
|
+
}
|
|
39
|
+
if (!checkGhAuth()) {
|
|
40
|
+
console.error([
|
|
41
|
+
'Error: `gh` CLI is installed but not authenticated.',
|
|
42
|
+
' Run: gh auth login',
|
|
43
|
+
' If you intended to use GitLab or CircleCI, pass --provider gitlab|circleci',
|
|
44
|
+
].join('\n'));
|
|
45
|
+
return false;
|
|
46
|
+
}
|
|
47
|
+
return true;
|
|
48
|
+
}
|
|
49
|
+
async listRuns(repo, limit) {
|
|
50
|
+
const runs = runJson([
|
|
51
|
+
'run', 'list', '--repo', repo, '--limit', String(limit),
|
|
52
|
+
'--json', 'databaseId,displayTitle,workflowName,conclusion,url',
|
|
53
|
+
]);
|
|
54
|
+
return runs.map((r) => ({
|
|
55
|
+
id: r.databaseId,
|
|
56
|
+
displayTitle: r.displayTitle,
|
|
57
|
+
workflowName: r.workflowName,
|
|
58
|
+
conclusion: r.conclusion,
|
|
59
|
+
url: r.url,
|
|
60
|
+
}));
|
|
61
|
+
}
|
|
62
|
+
async resolveRun(repo, runId) {
|
|
63
|
+
const runs = await this.listRuns(repo, 100);
|
|
64
|
+
if (runId !== undefined) {
|
|
65
|
+
const run = runs.find((r) => String(r.id) === String(runId));
|
|
66
|
+
if (!run)
|
|
67
|
+
throw new Error(`Run ${runId} not found in recent runs for ${repo}.`);
|
|
68
|
+
return run;
|
|
69
|
+
}
|
|
70
|
+
// Most recent failed run
|
|
71
|
+
const failed = runs.find((r) => r.conclusion === 'failure');
|
|
72
|
+
if (!failed)
|
|
73
|
+
throw new Error(`No failed runs found for ${repo} in recent history.`);
|
|
74
|
+
return failed;
|
|
75
|
+
}
|
|
76
|
+
async fetchLogs(ref) {
|
|
77
|
+
if (!ref.runId)
|
|
78
|
+
throw new Error('runId required for GitHub provider');
|
|
79
|
+
try {
|
|
80
|
+
const combined = runText([
|
|
81
|
+
'run', 'view', String(ref.runId), '--repo', ref.repo, '--log-failed',
|
|
82
|
+
]);
|
|
83
|
+
return { combined };
|
|
84
|
+
}
|
|
85
|
+
catch {
|
|
86
|
+
return { combined: '' };
|
|
87
|
+
}
|
|
88
|
+
}
|
|
89
|
+
async fetchMetadata(ref) {
|
|
90
|
+
if (!ref.runId)
|
|
91
|
+
return { headSha: '', headBranch: '', event: '' };
|
|
92
|
+
try {
|
|
93
|
+
const result = runJson([
|
|
94
|
+
'run', 'view', String(ref.runId), '--repo', ref.repo,
|
|
95
|
+
'--json', 'headSha,headBranch,event',
|
|
96
|
+
]);
|
|
97
|
+
return result;
|
|
98
|
+
}
|
|
99
|
+
catch {
|
|
100
|
+
return { headSha: '', headBranch: '', event: '' };
|
|
101
|
+
}
|
|
102
|
+
}
|
|
103
|
+
}
|
|
@@ -0,0 +1,96 @@
|
|
|
1
|
+
const GITLAB_API = 'https://gitlab.com/api/v4';
|
|
2
|
+
function token() {
|
|
3
|
+
return process.env['GITLAB_TOKEN'];
|
|
4
|
+
}
|
|
5
|
+
async function gitlabFetch(path) {
|
|
6
|
+
const tok = token();
|
|
7
|
+
if (!tok)
|
|
8
|
+
throw new Error('GITLAB_TOKEN env var is required for GitLab provider.');
|
|
9
|
+
const res = await fetch(`${GITLAB_API}${path}`, {
|
|
10
|
+
headers: { 'PRIVATE-TOKEN': tok },
|
|
11
|
+
});
|
|
12
|
+
if (!res.ok)
|
|
13
|
+
throw new Error(`GitLab API error ${res.status}: ${res.statusText} — ${path}`);
|
|
14
|
+
return res.json();
|
|
15
|
+
}
|
|
16
|
+
function encodedRepo(repo) {
|
|
17
|
+
// repo format: group/project — encode slashes for GitLab API
|
|
18
|
+
return encodeURIComponent(repo);
|
|
19
|
+
}
|
|
20
|
+
export class GitLabProvider {
|
|
21
|
+
name = 'gitlab';
|
|
22
|
+
async canHandle(_repo) {
|
|
23
|
+
if (!token()) {
|
|
24
|
+
console.error([
|
|
25
|
+
'Error: GitLab provider requires GITLAB_TOKEN environment variable.',
|
|
26
|
+
' Create a personal access token at: https://gitlab.com/-/profile/personal_access_tokens',
|
|
27
|
+
' Required scopes: read_api',
|
|
28
|
+
' Then set: export GITLAB_TOKEN=<your-token>',
|
|
29
|
+
].join('\n'));
|
|
30
|
+
return false;
|
|
31
|
+
}
|
|
32
|
+
return true;
|
|
33
|
+
}
|
|
34
|
+
async listRuns(repo, limit) {
|
|
35
|
+
const pipelines = await gitlabFetch(`/projects/${encodedRepo(repo)}/pipelines?per_page=${limit}`);
|
|
36
|
+
return pipelines.map((p) => ({
|
|
37
|
+
id: p.id,
|
|
38
|
+
displayTitle: `Pipeline #${p.id} (${p.ref})`,
|
|
39
|
+
workflowName: 'GitLab CI',
|
|
40
|
+
conclusion: p.status === 'success' ? 'success' : p.status === 'failed' ? 'failure' : p.status,
|
|
41
|
+
url: p.web_url,
|
|
42
|
+
}));
|
|
43
|
+
}
|
|
44
|
+
async resolveRun(repo, runId) {
|
|
45
|
+
if (runId !== undefined) {
|
|
46
|
+
const pipeline = await gitlabFetch(`/projects/${encodedRepo(repo)}/pipelines/${runId}`);
|
|
47
|
+
return {
|
|
48
|
+
id: pipeline.id,
|
|
49
|
+
displayTitle: `Pipeline #${pipeline.id} (${pipeline.ref})`,
|
|
50
|
+
workflowName: 'GitLab CI',
|
|
51
|
+
conclusion: pipeline.status === 'success' ? 'success' : pipeline.status === 'failed' ? 'failure' : pipeline.status,
|
|
52
|
+
url: pipeline.web_url,
|
|
53
|
+
};
|
|
54
|
+
}
|
|
55
|
+
const runs = await this.listRuns(repo, 20);
|
|
56
|
+
const failed = runs.find((r) => r.conclusion === 'failure');
|
|
57
|
+
if (!failed)
|
|
58
|
+
throw new Error(`No failed pipelines found for ${repo} in recent history.`);
|
|
59
|
+
return failed;
|
|
60
|
+
}
|
|
61
|
+
async fetchLogs(ref) {
|
|
62
|
+
if (!ref.runId)
|
|
63
|
+
throw new Error('runId required for GitLab provider');
|
|
64
|
+
const jobs = await gitlabFetch(`/projects/${encodedRepo(ref.repo)}/pipelines/${ref.runId}/jobs?scope[]=failed`);
|
|
65
|
+
const byJob = {};
|
|
66
|
+
for (const job of jobs) {
|
|
67
|
+
try {
|
|
68
|
+
const logRes = await fetch(`${GITLAB_API}/projects/${encodedRepo(ref.repo)}/jobs/${job.id}/trace`, { headers: { 'PRIVATE-TOKEN': token() } });
|
|
69
|
+
if (logRes.ok) {
|
|
70
|
+
byJob[job.name] = await logRes.text();
|
|
71
|
+
}
|
|
72
|
+
}
|
|
73
|
+
catch {
|
|
74
|
+
// skip job if log fetch fails
|
|
75
|
+
}
|
|
76
|
+
}
|
|
77
|
+
const combined = Object.values(byJob).join('\n');
|
|
78
|
+
return { combined, byJob };
|
|
79
|
+
}
|
|
80
|
+
async fetchMetadata(ref) {
|
|
81
|
+
if (!ref.runId)
|
|
82
|
+
return { headSha: '', headBranch: '', event: '' };
|
|
83
|
+
try {
|
|
84
|
+
const pipeline = await gitlabFetch(`/projects/${encodedRepo(ref.repo)}/pipelines/${ref.runId}`);
|
|
85
|
+
return {
|
|
86
|
+
headSha: pipeline.sha,
|
|
87
|
+
headBranch: pipeline.ref,
|
|
88
|
+
event: 'push',
|
|
89
|
+
url: pipeline.web_url,
|
|
90
|
+
};
|
|
91
|
+
}
|
|
92
|
+
catch {
|
|
93
|
+
return { headSha: '', headBranch: '', event: '' };
|
|
94
|
+
}
|
|
95
|
+
}
|
|
96
|
+
}
|
|
@@ -0,0 +1,36 @@
|
|
|
1
|
+
import { existsSync } from 'node:fs';
|
|
2
|
+
import { join } from 'node:path';
|
|
3
|
+
import { GitHubProvider } from './github.js';
|
|
4
|
+
import { GitLabProvider } from './gitlab.js';
|
|
5
|
+
import { CircleCiProvider } from './circleci.js';
|
|
6
|
+
export { GitHubProvider } from './github.js';
|
|
7
|
+
export { GitLabProvider } from './gitlab.js';
|
|
8
|
+
export { CircleCiProvider } from './circleci.js';
|
|
9
|
+
const providers = {
|
|
10
|
+
github: () => new GitHubProvider(),
|
|
11
|
+
gitlab: () => new GitLabProvider(),
|
|
12
|
+
circleci: () => new CircleCiProvider(),
|
|
13
|
+
};
|
|
14
|
+
/**
|
|
15
|
+
* Detect CI provider by local repo markers.
|
|
16
|
+
* Checks cwd and common project root indicators.
|
|
17
|
+
*/
|
|
18
|
+
export function detectProvider(cwd = process.cwd()) {
|
|
19
|
+
if (existsSync(join(cwd, '.gitlab-ci.yml')))
|
|
20
|
+
return 'gitlab';
|
|
21
|
+
if (existsSync(join(cwd, '.circleci')))
|
|
22
|
+
return 'circleci';
|
|
23
|
+
return 'github';
|
|
24
|
+
}
|
|
25
|
+
/** Get a provider instance by name, or auto-detect if not specified. */
|
|
26
|
+
export function getProvider(name) {
|
|
27
|
+
if (name) {
|
|
28
|
+
const factory = providers[name];
|
|
29
|
+
if (!factory) {
|
|
30
|
+
throw new Error(`Unknown provider: "${name}". Valid options: github, gitlab, circleci`);
|
|
31
|
+
}
|
|
32
|
+
return factory();
|
|
33
|
+
}
|
|
34
|
+
const detected = detectProvider();
|
|
35
|
+
return providers[detected]();
|
|
36
|
+
}
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "ci-triage",
|
|
3
|
-
"version": "0.
|
|
3
|
+
"version": "0.2.0",
|
|
4
4
|
"description": "Open-source CI failure triage for humans and agents — smart log parsing, flake detection, structured JSON, MCP server.",
|
|
5
5
|
"type": "module",
|
|
6
6
|
"bin": {
|
|
@@ -35,17 +35,20 @@
|
|
|
35
35
|
"license": "MIT",
|
|
36
36
|
"repository": {
|
|
37
37
|
"type": "git",
|
|
38
|
-
"url": "https://github.com/clankamode/ci-triage.git"
|
|
38
|
+
"url": "git+https://github.com/clankamode/ci-triage.git"
|
|
39
39
|
},
|
|
40
40
|
"homepage": "https://github.com/clankamode/ci-triage",
|
|
41
41
|
"devDependencies": {
|
|
42
|
+
"@types/better-sqlite3": "^7.6.13",
|
|
42
43
|
"@types/node": "^22.13.10",
|
|
43
44
|
"typescript": "^5.9.2",
|
|
44
45
|
"vitest": "^4.0.18"
|
|
45
46
|
},
|
|
46
47
|
"dependencies": {
|
|
47
48
|
"@modelcontextprotocol/sdk": "^1.27.1",
|
|
49
|
+
"better-sqlite3": "^12.6.2",
|
|
48
50
|
"fast-xml-parser": "^5.4.1",
|
|
51
|
+
"openai": "^6.25.0",
|
|
49
52
|
"zod": "^4.3.6"
|
|
50
53
|
}
|
|
51
54
|
}
|