chorus-cli 0.4.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/index.js +1184 -0
- package/package.json +29 -0
- package/providers/azuredevops.js +202 -0
- package/providers/github.js +144 -0
- package/providers/index.js +51 -0
- package/scripts/postinstall.js +125 -0
- package/tools/coder.py +970 -0
- package/tools/mapper.py +465 -0
- package/tools/qa.py +528 -0
- package/tools/requirements.txt +3 -0
package/index.js
ADDED
|
@@ -0,0 +1,1184 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
const _path = require('path');
|
|
3
|
+
const _os = require('os');
|
|
4
|
+
const _configDir = _path.join(_os.homedir(), '.config', 'chorus');
|
|
5
|
+
const _configEnv = _path.join(_configDir, '.env');
|
|
6
|
+
const _localEnv = _path.join(__dirname, '.env');
|
|
7
|
+
// Prefer user config dir (works when installed globally), fall back to local .env for dev
|
|
8
|
+
require('dotenv').config({ path: require('fs').existsSync(_configEnv) ? _configEnv : _localEnv });
|
|
9
|
+
const { Octokit } = require('@octokit/rest');
|
|
10
|
+
const Anthropic = require('@anthropic-ai/sdk');
|
|
11
|
+
const { createProvider } = require('./providers');
|
|
12
|
+
const { exec, execFile, spawn } = require('child_process');
|
|
13
|
+
const util = require('util');
|
|
14
|
+
const path = require('path');
|
|
15
|
+
const os = require('os');
|
|
16
|
+
const execPromise = util.promisify(exec);
|
|
17
|
+
const execFilePromise = util.promisify(execFile);
|
|
18
|
+
const fs = require('fs').promises;
|
|
19
|
+
|
|
20
|
+
// Run coder.py with real-time stderr streaming so progress is visible
|
|
21
|
+
function runCoder(prompt) {
|
|
22
|
+
return new Promise((resolve, reject) => {
|
|
23
|
+
const env = { ...process.env };
|
|
24
|
+
if (CONFIG.ai.proxyUrl) {
|
|
25
|
+
env.CODER_PROXY_URL = CONFIG.ai.proxyUrl;
|
|
26
|
+
env.ANTHROPIC_API_KEY = CONFIG.ai.anthropicApiKey;
|
|
27
|
+
}
|
|
28
|
+
const proc = spawn(CONFIG.ai.venvPython, [CONFIG.ai.coderPath, '--prompt', prompt], {
|
|
29
|
+
cwd: process.cwd(),
|
|
30
|
+
env,
|
|
31
|
+
stdio: ['ignore', 'pipe', 'pipe'],
|
|
32
|
+
timeout: 600000,
|
|
33
|
+
});
|
|
34
|
+
|
|
35
|
+
let stdout = '';
|
|
36
|
+
|
|
37
|
+
proc.stdout.on('data', (data) => {
|
|
38
|
+
stdout += data.toString();
|
|
39
|
+
});
|
|
40
|
+
|
|
41
|
+
// Stream stderr to terminal in real-time
|
|
42
|
+
proc.stderr.on('data', (data) => {
|
|
43
|
+
process.stderr.write(data);
|
|
44
|
+
});
|
|
45
|
+
|
|
46
|
+
proc.on('close', (code) => {
|
|
47
|
+
try {
|
|
48
|
+
resolve(JSON.parse(stdout));
|
|
49
|
+
} catch (e) {
|
|
50
|
+
reject(new Error(`Failed to parse coder output (exit ${code}): ${stdout.slice(0, 500)}`));
|
|
51
|
+
}
|
|
52
|
+
});
|
|
53
|
+
|
|
54
|
+
proc.on('error', reject);
|
|
55
|
+
});
|
|
56
|
+
}
|
|
57
|
+
|
|
58
|
+
// Run qa.py with issue context on stdin, capture JSON from stdout
|
|
59
|
+
function runQAChat(issue, enrichedDetails, qaName, useSuper = false) {
|
|
60
|
+
return new Promise((resolve, reject) => {
|
|
61
|
+
const input = JSON.stringify({
|
|
62
|
+
issue_number: issue.number,
|
|
63
|
+
issue_title: issue.title,
|
|
64
|
+
issue_body: issue.body || '',
|
|
65
|
+
enriched_questions: enrichedDetails,
|
|
66
|
+
});
|
|
67
|
+
|
|
68
|
+
const args = ['--messenger', CONFIG.messenger, '--qa', qaName];
|
|
69
|
+
if (CONFIG.messenger === 'teams') {
|
|
70
|
+
args.push('--auth', CONFIG.teams.authPath);
|
|
71
|
+
}
|
|
72
|
+
if (useSuper) args.push('--super');
|
|
73
|
+
|
|
74
|
+
const env = { ...process.env };
|
|
75
|
+
if (CONFIG.ai.proxyUrl) {
|
|
76
|
+
env.CODER_PROXY_URL = CONFIG.ai.proxyUrl;
|
|
77
|
+
env.ANTHROPIC_API_KEY = CONFIG.ai.anthropicApiKey;
|
|
78
|
+
}
|
|
79
|
+
if (CONFIG.messenger === 'slack' && CONFIG.slack.botToken) {
|
|
80
|
+
env.SLACK_BOT_TOKEN = CONFIG.slack.botToken;
|
|
81
|
+
}
|
|
82
|
+
const proc = spawn(CONFIG.ai.venvPython, [CONFIG.ai.qaPath, ...args], {
|
|
83
|
+
cwd: process.cwd(),
|
|
84
|
+
env,
|
|
85
|
+
stdio: ['pipe', 'pipe', 'pipe'],
|
|
86
|
+
timeout: 1800000, // 30 min
|
|
87
|
+
});
|
|
88
|
+
|
|
89
|
+
let stdout = '';
|
|
90
|
+
|
|
91
|
+
proc.stdin.write(input);
|
|
92
|
+
proc.stdin.end();
|
|
93
|
+
|
|
94
|
+
proc.stdout.on('data', (data) => {
|
|
95
|
+
stdout += data.toString();
|
|
96
|
+
});
|
|
97
|
+
|
|
98
|
+
// Stream stderr to terminal in real-time
|
|
99
|
+
proc.stderr.on('data', (data) => {
|
|
100
|
+
process.stderr.write(data);
|
|
101
|
+
});
|
|
102
|
+
|
|
103
|
+
proc.on('close', (code) => {
|
|
104
|
+
try {
|
|
105
|
+
resolve(JSON.parse(stdout));
|
|
106
|
+
} catch (e) {
|
|
107
|
+
reject(new Error(`Failed to parse qa.py output (exit ${code}): ${stdout.slice(0, 500)}`));
|
|
108
|
+
}
|
|
109
|
+
});
|
|
110
|
+
|
|
111
|
+
proc.on('error', reject);
|
|
112
|
+
});
|
|
113
|
+
}
|
|
114
|
+
|
|
115
|
+
// ===== CONFIGURATION =====
|
|
116
|
+
const CONFIG = {
|
|
117
|
+
github: {
|
|
118
|
+
owner: process.env.GITHUB_OWNER,
|
|
119
|
+
repo: process.env.GITHUB_REPO,
|
|
120
|
+
token: process.env.GITHUB_TOKEN,
|
|
121
|
+
},
|
|
122
|
+
azuredevops: {
|
|
123
|
+
org: process.env.AZDO_ORG,
|
|
124
|
+
project: process.env.AZDO_PROJECT,
|
|
125
|
+
repo: process.env.AZDO_REPO,
|
|
126
|
+
pat: process.env.AZDO_PAT,
|
|
127
|
+
},
|
|
128
|
+
messenger: process.env.MESSENGER || 'teams',
|
|
129
|
+
teams: {
|
|
130
|
+
authPath: path.join(os.homedir(), '.config', 'chorus', 'teams-auth.json'),
|
|
131
|
+
pollInterval: 60000 // Check for response every 60 seconds
|
|
132
|
+
},
|
|
133
|
+
slack: {
|
|
134
|
+
botToken: process.env.SLACK_BOT_TOKEN || '',
|
|
135
|
+
},
|
|
136
|
+
ai: {
|
|
137
|
+
enrichmentTool: 'claude', // 'claude' or 'kimi'
|
|
138
|
+
codingTool: 'coder', // 'coder', 'claude', or 'kimi'
|
|
139
|
+
coderPath: path.join(__dirname, 'tools', 'coder.py'),
|
|
140
|
+
qaPath: path.join(__dirname, 'tools', 'qa.py'),
|
|
141
|
+
mapperPath: path.join(__dirname, 'tools', 'mapper.py'),
|
|
142
|
+
venvPython: process.platform === 'win32'
|
|
143
|
+
? path.join(os.homedir(), '.config', 'chorus', '.venv', 'Scripts', 'python.exe')
|
|
144
|
+
: path.join(os.homedir(), '.config', 'chorus', '.venv', 'bin', 'python'),
|
|
145
|
+
anthropicApiKey: process.env.PROXY_API_KEY || process.env.ANTHROPIC_API_KEY,
|
|
146
|
+
proxyUrl: process.env.PROXY_URL,
|
|
147
|
+
}
|
|
148
|
+
};
|
|
149
|
+
|
|
150
|
+
// ===== GITHUB / AZURE DEVOPS =====
|
|
151
|
+
// Provider-specific logic lives in ./providers/
|
|
152
|
+
// Use createProvider(CONFIG, issueArg) to get the right provider.
|
|
153
|
+
|
|
154
|
+
// ===== AI ENRICHMENT =====
|
|
155
|
+
async function enrichWithAI(issue) {
|
|
156
|
+
const prompt = `Analyze this GitHub issue and write questions for QA clarification.
|
|
157
|
+
|
|
158
|
+
ISSUE DETAILS:
|
|
159
|
+
Title: ${issue.title}
|
|
160
|
+
Body: ${issue.body || 'No description provided'}
|
|
161
|
+
Labels: ${issue.labels.map(l => l.name).join(', ') || 'None'}
|
|
162
|
+
|
|
163
|
+
INSTRUCTIONS:
|
|
164
|
+
- Write clear, specific questions that will help a developer fix this issue
|
|
165
|
+
- Use simple language that a QA analyst can easily understand
|
|
166
|
+
- Format for a Teams message (clean, readable, no code blocks or technical formatting)
|
|
167
|
+
- Focus on missing details, edge cases, and expected vs actual behavior
|
|
168
|
+
- Ask questions that avoid back-and-forth (get all needed info upfront)
|
|
169
|
+
- Use pure text formatting, so it's not a big unstructured paragraph, like how this text is structured with newlines and spaces
|
|
170
|
+
- Maximum 3 questions
|
|
171
|
+
OUTPUT FORMAT:
|
|
172
|
+
Hi! I need clarification on issue #${issue.number}:
|
|
173
|
+
|
|
174
|
+
${issue.title}
|
|
175
|
+
|
|
176
|
+
To fix this properly, I need to know:
|
|
177
|
+
|
|
178
|
+
1. [First question]
|
|
179
|
+
2. [Second question]
|
|
180
|
+
3. [Third question]
|
|
181
|
+
...
|
|
182
|
+
|
|
183
|
+
Thanks!
|
|
184
|
+
|
|
185
|
+
IMPORTANT: Output ONLY the message above. Do not include any preamble, thinking process, or explanations. Start directly with "Hi! I need clarification..."`;
|
|
186
|
+
|
|
187
|
+
console.log('🤖 Enriching issue with AI...');
|
|
188
|
+
|
|
189
|
+
const tool = CONFIG.ai.enrichmentTool;
|
|
190
|
+
|
|
191
|
+
if (tool === 'claude') {
|
|
192
|
+
// Use Anthropic API
|
|
193
|
+
if (!CONFIG.ai.anthropicApiKey) {
|
|
194
|
+
throw new Error('PROXY_API_KEY or ANTHROPIC_API_KEY environment variable is required');
|
|
195
|
+
}
|
|
196
|
+
const clientOpts = { apiKey: CONFIG.ai.anthropicApiKey };
|
|
197
|
+
if (CONFIG.ai.proxyUrl) {
|
|
198
|
+
clientOpts.baseURL = CONFIG.ai.proxyUrl.replace(/\/+$/, '');
|
|
199
|
+
}
|
|
200
|
+
const anthropic = new Anthropic(clientOpts);
|
|
201
|
+
|
|
202
|
+
const message = await anthropic.messages.create({
|
|
203
|
+
|
|
204
|
+
//when --super flag is added use claude-opus-4-6, else default to claude-sonnet-4-20250514
|
|
205
|
+
model: 'claude-opus-4-6',
|
|
206
|
+
max_tokens: 2000,
|
|
207
|
+
messages: [
|
|
208
|
+
{
|
|
209
|
+
role: 'user',
|
|
210
|
+
content: prompt
|
|
211
|
+
}
|
|
212
|
+
]
|
|
213
|
+
});
|
|
214
|
+
|
|
215
|
+
if (message.usage) {
|
|
216
|
+
console.log(` Enrichment tokens: ${message.usage.input_tokens} in / ${message.usage.output_tokens} out`);
|
|
217
|
+
}
|
|
218
|
+
|
|
219
|
+
return message.content[0].text.trim();
|
|
220
|
+
} else {
|
|
221
|
+
// Use Kimi CLI
|
|
222
|
+
const escapedPrompt = prompt.replace(/"/g, '\\"').replace(/\$/g, '\\$');
|
|
223
|
+
const { stdout } = await execPromise(`kimi --prompt "${escapedPrompt}"`);
|
|
224
|
+
return stdout.trim();
|
|
225
|
+
}
|
|
226
|
+
}
|
|
227
|
+
|
|
228
|
+
// ===== CODE GENERATION =====
|
|
229
|
+
async function generateCode(issue, enrichedDetails, qaResponse) {
|
|
230
|
+
const tool = CONFIG.ai.codingTool;
|
|
231
|
+
|
|
232
|
+
if (tool === 'coder') {
|
|
233
|
+
const prompt = `Implement a solution for this GitHub issue in the current repository:
|
|
234
|
+
|
|
235
|
+
**Issue #${issue.number}: ${issue.title}**
|
|
236
|
+
|
|
237
|
+
Original Description:
|
|
238
|
+
${issue.body || 'No description provided'}
|
|
239
|
+
|
|
240
|
+
Technical Analysis:
|
|
241
|
+
${enrichedDetails}
|
|
242
|
+
|
|
243
|
+
QA Clarification:
|
|
244
|
+
${qaResponse}
|
|
245
|
+
|
|
246
|
+
Instructions:
|
|
247
|
+
1. Explore the codebase to understand the structure and relevant files
|
|
248
|
+
2. Implement all necessary code changes by writing to the actual files
|
|
249
|
+
3. Do not write unit tests unless the project already has substantive test coverage
|
|
250
|
+
4. Do not attempt to build or compile the project`;
|
|
251
|
+
|
|
252
|
+
console.log('🔨 Generating code with Coder agent...');
|
|
253
|
+
|
|
254
|
+
return await runCoder(prompt);
|
|
255
|
+
}
|
|
256
|
+
|
|
257
|
+
// Fallback: kimi
|
|
258
|
+
const prompt = `Generate a solution for this GitHub issue:
|
|
259
|
+
|
|
260
|
+
**Issue #${issue.number}: ${issue.title}**
|
|
261
|
+
|
|
262
|
+
Original Description:
|
|
263
|
+
${issue.body || 'No description provided'}
|
|
264
|
+
|
|
265
|
+
Technical Analysis:
|
|
266
|
+
${enrichedDetails}
|
|
267
|
+
|
|
268
|
+
QA Clarification:
|
|
269
|
+
${qaResponse}
|
|
270
|
+
|
|
271
|
+
Please implement a complete solution with:
|
|
272
|
+
1. All necessary code changes
|
|
273
|
+
2. Tests if applicable
|
|
274
|
+
3. Documentation updates
|
|
275
|
+
|
|
276
|
+
Output the code with file paths clearly marked using code blocks like:
|
|
277
|
+
\`\`\`path/to/file.js
|
|
278
|
+
code here
|
|
279
|
+
\`\`\``;
|
|
280
|
+
|
|
281
|
+
console.log('🔨 Generating code with Kimi...');
|
|
282
|
+
const escapedPrompt = prompt.replace(/"/g, '\\"').replace(/\$/g, '\\$');
|
|
283
|
+
const { stdout } = await execPromise(`kimi --prompt "${escapedPrompt}"`);
|
|
284
|
+
return { _raw: stdout, completed: true, summary: stdout.trim(), files_modified: [], files_created: [] };
|
|
285
|
+
}
|
|
286
|
+
|
|
287
|
+
// ===== CODE REVIEW =====
|
|
288
|
+
|
|
289
|
+
async function getCodeRabbitReview(solution, issue, provider) {
|
|
290
|
+
console.log('🐰 Preparing code for CodeRabbit review...');
|
|
291
|
+
|
|
292
|
+
const isCoder = CONFIG.ai.codingTool === 'coder';
|
|
293
|
+
let filesWritten;
|
|
294
|
+
|
|
295
|
+
if (isCoder) {
|
|
296
|
+
const coderFiles = provider.getSolutionFiles(solution).length;
|
|
297
|
+
filesWritten = coderFiles;
|
|
298
|
+
console.log(` Coder wrote ${coderFiles} file(s) directly`);
|
|
299
|
+
} else {
|
|
300
|
+
filesWritten = 0; // Legacy path — files already written by provider
|
|
301
|
+
}
|
|
302
|
+
|
|
303
|
+
if (filesWritten === 0) {
|
|
304
|
+
console.log('Skipping CodeRabbit review (no files to review)');
|
|
305
|
+
return { needsChanges: false, feedback: 'No files to review' };
|
|
306
|
+
}
|
|
307
|
+
|
|
308
|
+
// 2. Create a temporary branch for review
|
|
309
|
+
const branchName = `review/issue-${issue.number}`;
|
|
310
|
+
|
|
311
|
+
try {
|
|
312
|
+
// Clean up stale review branch from previous runs
|
|
313
|
+
await execPromise(`git branch -D ${branchName}`).catch(() => {});
|
|
314
|
+
|
|
315
|
+
await execPromise(`git checkout -b ${branchName}`);
|
|
316
|
+
await provider.gitAddSolutionFiles(solution);
|
|
317
|
+
|
|
318
|
+
// Verify there are staged changes before committing
|
|
319
|
+
const { stdout: diffStat } = await execPromise(`git diff --cached --stat`);
|
|
320
|
+
if (!diffStat.trim()) {
|
|
321
|
+
console.warn('⚠️ No staged changes to commit — skipping CodeRabbit review');
|
|
322
|
+
await execPromise(`git checkout main`);
|
|
323
|
+
await execPromise(`git branch -D ${branchName}`).catch(() => {});
|
|
324
|
+
return { needsChanges: false, feedback: 'No file changes to review' };
|
|
325
|
+
}
|
|
326
|
+
|
|
327
|
+
await execPromise(`git commit -m "Draft: ${issue.title}"`);
|
|
328
|
+
|
|
329
|
+
// 3. Run CodeRabbit CLI (plain text output, compare against main)
|
|
330
|
+
console.log('🐰 Running CodeRabbit review...');
|
|
331
|
+
const { stdout, stderr } = await execPromise(`coderabbit review --plain --base main`);
|
|
332
|
+
const reviewOutput = (stdout + '\n' + stderr).trim();
|
|
333
|
+
|
|
334
|
+
console.log('CodeRabbit output:', reviewOutput || '(empty)');
|
|
335
|
+
|
|
336
|
+
// 4. Parse CodeRabbit output to determine if changes needed
|
|
337
|
+
const needsChanges = reviewOutput.length > 0 && (
|
|
338
|
+
reviewOutput.toLowerCase().includes('issue') ||
|
|
339
|
+
reviewOutput.toLowerCase().includes('refactor') ||
|
|
340
|
+
reviewOutput.toLowerCase().includes('warning') ||
|
|
341
|
+
reviewOutput.toLowerCase().includes('suggestion') ||
|
|
342
|
+
reviewOutput.toLowerCase().includes('improve') ||
|
|
343
|
+
reviewOutput.toLowerCase().includes('consider')
|
|
344
|
+
);
|
|
345
|
+
|
|
346
|
+
// 5. Clean up — undo the commit so changes stay in working tree, then go back to main
|
|
347
|
+
await execPromise(`git reset --mixed HEAD~1`); // undo commit, keep changes in working tree
|
|
348
|
+
await execPromise(`git stash`); // stash working tree changes
|
|
349
|
+
await execPromise(`git checkout main`);
|
|
350
|
+
await execPromise(`git stash pop`); // restore coder's changes on main
|
|
351
|
+
await execPromise(`git branch -D ${branchName}`).catch(() => {});
|
|
352
|
+
|
|
353
|
+
return {
|
|
354
|
+
needsChanges,
|
|
355
|
+
feedback: reviewOutput
|
|
356
|
+
};
|
|
357
|
+
} catch (error) {
|
|
358
|
+
console.error('Error running CodeRabbit:', error.message);
|
|
359
|
+
// Clean up on error — try to preserve working tree changes
|
|
360
|
+
await execPromise(`git reset --mixed HEAD~1`).catch(() => {});
|
|
361
|
+
await execPromise(`git stash`).catch(() => {});
|
|
362
|
+
await execPromise(`git checkout main`).catch(() => {});
|
|
363
|
+
await execPromise(`git stash pop`).catch(() => {});
|
|
364
|
+
await execPromise(`git branch -D ${branchName}`).catch(() => {});
|
|
365
|
+
|
|
366
|
+
return {
|
|
367
|
+
needsChanges: false,
|
|
368
|
+
feedback: `CodeRabbit error: ${error.message}`
|
|
369
|
+
};
|
|
370
|
+
}
|
|
371
|
+
}
|
|
372
|
+
|
|
373
|
+
async function refineCode(solution, review) {
|
|
374
|
+
const tool = CONFIG.ai.codingTool;
|
|
375
|
+
|
|
376
|
+
if (tool === 'coder') {
|
|
377
|
+
const filesContext = [
|
|
378
|
+
...(solution.files_modified || []),
|
|
379
|
+
...(solution.files_created || [])
|
|
380
|
+
].join(', ');
|
|
381
|
+
|
|
382
|
+
const prompt = `Refine the code based on this code review feedback.
|
|
383
|
+
|
|
384
|
+
Files that were changed: ${filesContext}
|
|
385
|
+
|
|
386
|
+
Review Feedback:
|
|
387
|
+
${review.feedback}
|
|
388
|
+
|
|
389
|
+
Instructions:
|
|
390
|
+
1. Read the files listed above
|
|
391
|
+
2. Address every concern raised in the review
|
|
392
|
+
3. Edit the files to fix the issues
|
|
393
|
+
4. Do not write unit tests unless the project already has substantive test coverage
|
|
394
|
+
5. Do not attempt to build or compile the project`;
|
|
395
|
+
|
|
396
|
+
console.log('🔄 Refining code with Coder agent...');
|
|
397
|
+
|
|
398
|
+
return await runCoder(prompt);
|
|
399
|
+
}
|
|
400
|
+
|
|
401
|
+
// Fallback: kimi
|
|
402
|
+
const prompt = `Please refine this code based on the review feedback:
|
|
403
|
+
|
|
404
|
+
Current Solution:
|
|
405
|
+
${solution._raw || solution}
|
|
406
|
+
|
|
407
|
+
Review Feedback:
|
|
408
|
+
${review.feedback}
|
|
409
|
+
|
|
410
|
+
Please update the code to address all concerns. Output with the same file format:
|
|
411
|
+
\`\`\`path/to/file.js
|
|
412
|
+
updated code
|
|
413
|
+
\`\`\``;
|
|
414
|
+
|
|
415
|
+
console.log('🔄 Refining code with Kimi...');
|
|
416
|
+
const escapedPrompt = prompt.replace(/"/g, '\\"').replace(/\$/g, '\\$');
|
|
417
|
+
const { stdout } = await execPromise(`kimi --prompt "${escapedPrompt}"`);
|
|
418
|
+
return { _raw: stdout, completed: true, summary: stdout.trim(), files_modified: [], files_created: [] };
|
|
419
|
+
}
|
|
420
|
+
|
|
421
|
+
// PR creation is handled by the provider (see ./providers/)
|
|
422
|
+
|
|
423
|
+
// ===== LINT =====
|
|
424
|
+
async function lintAndFix(solution, provider) {
|
|
425
|
+
const files = provider.getSolutionFiles(solution).filter(f => /\.(js|jsx|ts|tsx)$/.test(f));
|
|
426
|
+
if (files.length === 0) {
|
|
427
|
+
console.log('🔍 No JS/TS files to lint\n');
|
|
428
|
+
return;
|
|
429
|
+
}
|
|
430
|
+
|
|
431
|
+
console.log(`🔍 Linting ${files.length} file(s)...`);
|
|
432
|
+
|
|
433
|
+
// First pass: auto-fix what ESLint can
|
|
434
|
+
const quoted = files.map(f => `"${f}"`).join(' ');
|
|
435
|
+
try {
|
|
436
|
+
await execPromise(`npx eslint --fix --quiet ${quoted}`, { cwd: process.cwd() });
|
|
437
|
+
console.log(' Auto-fix applied');
|
|
438
|
+
} catch (e) {
|
|
439
|
+
// eslint exits non-zero when there are unfixable errors — that's expected
|
|
440
|
+
if (e.stdout) console.log(e.stdout);
|
|
441
|
+
}
|
|
442
|
+
|
|
443
|
+
// Second pass: check for remaining errors
|
|
444
|
+
try {
|
|
445
|
+
const { stdout } = await execPromise(`npx eslint --quiet ${quoted}`, { cwd: process.cwd() });
|
|
446
|
+
if (stdout.trim()) {
|
|
447
|
+
console.log(' Remaining lint issues:\n', stdout);
|
|
448
|
+
} else {
|
|
449
|
+
console.log(' All lint issues resolved ✓\n');
|
|
450
|
+
return;
|
|
451
|
+
}
|
|
452
|
+
} catch (e) {
|
|
453
|
+
const lintOutput = (e.stdout || '') + (e.stderr || '');
|
|
454
|
+
if (!lintOutput.trim()) {
|
|
455
|
+
console.log(' All lint issues resolved ✓\n');
|
|
456
|
+
return;
|
|
457
|
+
}
|
|
458
|
+
|
|
459
|
+
// Unfixable errors remain — send back to coder
|
|
460
|
+
console.log('🔄 Lint errors remain, sending to Coder for manual fix...');
|
|
461
|
+
const prompt = `Fix these ESLint errors. Do NOT disable rules or add eslint-disable comments — fix the actual code.
|
|
462
|
+
|
|
463
|
+
Files: ${files.join(', ')}
|
|
464
|
+
|
|
465
|
+
ESLint output:
|
|
466
|
+
${lintOutput.slice(0, 5000)}`;
|
|
467
|
+
|
|
468
|
+
const fixResult = await runCoder(prompt);
|
|
469
|
+
|
|
470
|
+
if (fixResult.completed === false) {
|
|
471
|
+
console.warn('⚠️ Coder lint fix had errors:', fixResult.errors);
|
|
472
|
+
}
|
|
473
|
+
|
|
474
|
+
// Update solution with any new files coder touched
|
|
475
|
+
for (const f of (fixResult.files_modified || [])) {
|
|
476
|
+
if (!solution.files_modified.includes(f)) solution.files_modified.push(f);
|
|
477
|
+
}
|
|
478
|
+
|
|
479
|
+
console.log(' Lint fix pass complete\n');
|
|
480
|
+
}
|
|
481
|
+
}
|
|
482
|
+
|
|
483
|
+
// ===== TOKEN LIMIT =====
|
|
484
|
+
|
|
485
|
+
function isTokenLimitError(err) {
|
|
486
|
+
const msg = typeof err === 'string' ? err : (err?.message || err?.error || '');
|
|
487
|
+
return msg.includes('token limit exceeded') || msg.includes('rate_limit_error');
|
|
488
|
+
}
|
|
489
|
+
|
|
490
|
+
async function fetchAccountEmail() {
|
|
491
|
+
if (!CONFIG.ai.proxyUrl || !CONFIG.ai.anthropicApiKey) return null;
|
|
492
|
+
try {
|
|
493
|
+
const res = await fetch(`${CONFIG.ai.proxyUrl.replace(/\/+$/, '')}/auth/me`, {
|
|
494
|
+
headers: { 'Authorization': `Bearer ${CONFIG.ai.anthropicApiKey}` },
|
|
495
|
+
});
|
|
496
|
+
if (res.ok) {
|
|
497
|
+
const data = await res.json();
|
|
498
|
+
return data.email || null;
|
|
499
|
+
}
|
|
500
|
+
} catch {}
|
|
501
|
+
return null;
|
|
502
|
+
}
|
|
503
|
+
|
|
504
|
+
async function printTokenLimitMessage() {
|
|
505
|
+
const email = await fetchAccountEmail();
|
|
506
|
+
const base = 'https://getchorusai.com/pricing';
|
|
507
|
+
const url = email ? `${base}?email=${encodeURIComponent(email)}` : base;
|
|
508
|
+
|
|
509
|
+
console.error('\n⚠️ You\'ve run out of Chorus tokens for this month.\n');
|
|
510
|
+
console.error(' To keep going, purchase more tokens at:');
|
|
511
|
+
console.error(` ${url}\n`);
|
|
512
|
+
console.error(' Or wait for your monthly allowance to reset.\n');
|
|
513
|
+
}
|
|
514
|
+
|
|
515
|
+
// ===== MAIN WORKFLOW =====
|
|
516
|
+
async function processTicket(issueArg, { useSuper = false, skipQA = false, qaName: qaNameOverride } = {}) {
|
|
517
|
+
try {
|
|
518
|
+
console.log('🚀 Starting ticket processing...\n');
|
|
519
|
+
|
|
520
|
+
// 0. Ensure Python venv exists and has required dependencies
|
|
521
|
+
const reqFile = path.join(__dirname, 'tools', 'requirements.txt');
|
|
522
|
+
const { execFileSync: efs } = require('child_process');
|
|
523
|
+
const venvDir = path.join(os.homedir(), '.config', 'chorus', '.venv');
|
|
524
|
+
|
|
525
|
+
if (!require('fs').existsSync(CONFIG.ai.venvPython)) {
|
|
526
|
+
console.log('🐍 Creating Python virtual environment...');
|
|
527
|
+
efs('python3', ['-m', 'venv', venvDir], { stdio: 'inherit' });
|
|
528
|
+
}
|
|
529
|
+
|
|
530
|
+
try {
|
|
531
|
+
efs(CONFIG.ai.venvPython, ['-c', 'import anthropic'], { stdio: 'ignore' });
|
|
532
|
+
} catch {
|
|
533
|
+
console.log('📦 Installing Python dependencies (first run)...');
|
|
534
|
+
efs(CONFIG.ai.venvPython, ['-m', 'pip', 'install', '-r', reqFile], { stdio: 'inherit' });
|
|
535
|
+
}
|
|
536
|
+
|
|
537
|
+
// 0a. Verify no modified tracked files (untracked files like .coder/ are fine)
|
|
538
|
+
const { stdout: gitStatus } = await execPromise('git status --porcelain --untracked-files=no');
|
|
539
|
+
if (gitStatus.trim()) {
|
|
540
|
+
console.error('⚠️ Working directory has uncommitted changes. Commit or stash first:');
|
|
541
|
+
console.error(gitStatus);
|
|
542
|
+
//throw new Error('Dirty working directory');
|
|
543
|
+
}
|
|
544
|
+
const { stdout: currentBranch } = await execPromise('git rev-parse --abbrev-ref HEAD');
|
|
545
|
+
if (currentBranch.trim() !== 'main') {
|
|
546
|
+
console.warn(`⚠️ Not on main branch (currently on ${currentBranch.trim()}). Switch to main first.`);
|
|
547
|
+
//throw new Error('Not on main branch');
|
|
548
|
+
}
|
|
549
|
+
|
|
550
|
+
// 0b. Map the codebase so coder starts with full context
|
|
551
|
+
if (CONFIG.ai.codingTool === 'coder') {
|
|
552
|
+
console.log('🗺️ Mapping codebase...');
|
|
553
|
+
try {
|
|
554
|
+
await execFilePromise(CONFIG.ai.venvPython, [CONFIG.ai.mapperPath, process.cwd()]);
|
|
555
|
+
console.log(' Map generated\n');
|
|
556
|
+
} catch (e) {
|
|
557
|
+
console.warn(' Mapper failed, coder will explore manually:', e.message);
|
|
558
|
+
}
|
|
559
|
+
}
|
|
560
|
+
|
|
561
|
+
// 1. Fetch issue via provider
|
|
562
|
+
const provider = createProvider(CONFIG, issueArg);
|
|
563
|
+
console.log(` Provider: ${provider.name}`);
|
|
564
|
+
|
|
565
|
+
const parsed = provider.parseIssueArg(issueArg);
|
|
566
|
+
let issue;
|
|
567
|
+
if (parsed) {
|
|
568
|
+
console.log(`📥 Fetching issue #${parsed.number}...`);
|
|
569
|
+
issue = await provider.fetchIssueByNumber(parsed.number, parsed.owner, parsed.repo);
|
|
570
|
+
} else {
|
|
571
|
+
console.log('📥 Fetching latest assigned issue...');
|
|
572
|
+
issue = await provider.fetchLatestIssue();
|
|
573
|
+
}
|
|
574
|
+
console.log(`Found issue #${issue.number}: ${issue.title}\n`);
|
|
575
|
+
|
|
576
|
+
// 2. Enrich with AI
|
|
577
|
+
const enrichedDetails = await enrichWithAI(issue);
|
|
578
|
+
console.log('Enrichment complete\n', enrichedDetails);
|
|
579
|
+
|
|
580
|
+
// 3. Multi-turn QA conversation via qa.py
|
|
581
|
+
let qaResponse = '';
|
|
582
|
+
if (skipQA) {
|
|
583
|
+
console.log('⏭️ Skipping QA conversation (--skip-qa)\n');
|
|
584
|
+
} else {
|
|
585
|
+
const qaName = qaNameOverride || await provider.getUserDisplayName(issue.user.login);
|
|
586
|
+
console.log(`💬 Starting QA conversation with ${qaName?.login}...`);
|
|
587
|
+
const qaResult = await runQAChat(issue, enrichedDetails, qaName, useSuper);
|
|
588
|
+
qaResponse = qaResult.requirements;
|
|
589
|
+
|
|
590
|
+
if (!qaResult.completed) {
|
|
591
|
+
if (isTokenLimitError(qaResult.error)) {
|
|
592
|
+
await printTokenLimitMessage();
|
|
593
|
+
process.exit(1);
|
|
594
|
+
}
|
|
595
|
+
console.warn('⚠️ QA chat did not complete successfully:', qaResult.error || 'unknown');
|
|
596
|
+
}
|
|
597
|
+
console.log(`QA complete (${qaResult.conversation_rounds} rounds)\n`);
|
|
598
|
+
}
|
|
599
|
+
|
|
600
|
+
// 4. Generate code (with retry if coder doesn't actually write files)
|
|
601
|
+
const maxCodeAttempts = 3;
|
|
602
|
+
let solution;
|
|
603
|
+
|
|
604
|
+
for (let attempt = 1; attempt <= maxCodeAttempts; attempt++) {
|
|
605
|
+
if (attempt === 1) {
|
|
606
|
+
solution = await generateCode(issue, enrichedDetails, qaResponse);
|
|
607
|
+
} else {
|
|
608
|
+
// Reprompt with explicit instruction that files must be written
|
|
609
|
+
const retryPrompt = `You previously attempted to implement this issue but DID NOT write any files. Your task is NOT complete until you have actually created or modified files using write_file or edit_file.
|
|
610
|
+
|
|
611
|
+
Implement a solution for this GitHub issue in the current repository:
|
|
612
|
+
|
|
613
|
+
Issue #${issue.number}: ${issue.title}
|
|
614
|
+
|
|
615
|
+
Original Description:
|
|
616
|
+
${issue.body || 'No description provided'}
|
|
617
|
+
|
|
618
|
+
Technical Analysis:
|
|
619
|
+
${enrichedDetails}
|
|
620
|
+
|
|
621
|
+
QA Clarification:
|
|
622
|
+
${qaResponse}
|
|
623
|
+
|
|
624
|
+
CRITICAL: You MUST write code to actual files. Do not just describe changes — use write_file or edit_file to make them. If you are unsure where to make changes, explore the codebase first, then write the code.`;
|
|
625
|
+
|
|
626
|
+
console.log(`🔁 Reprompting coder (attempt ${attempt}/${maxCodeAttempts})...`);
|
|
627
|
+
solution = await runCoder(retryPrompt);
|
|
628
|
+
}
|
|
629
|
+
|
|
630
|
+
if (solution.completed === false) {
|
|
631
|
+
const errs = solution.errors || [solution.summary || ''];
|
|
632
|
+
if (errs.some(e => isTokenLimitError(e))) {
|
|
633
|
+
printTokenLimitMessage();
|
|
634
|
+
process.exit(1);
|
|
635
|
+
}
|
|
636
|
+
console.error('❌ Code generation failed:', errs);
|
|
637
|
+
throw new Error('Code generation failed');
|
|
638
|
+
}
|
|
639
|
+
|
|
640
|
+
const filesChanged = (solution.files_modified || []).length + (solution.files_created || []).length;
|
|
641
|
+
|
|
642
|
+
if (filesChanged > 0) {
|
|
643
|
+
break; // Success — coder actually wrote files
|
|
644
|
+
}
|
|
645
|
+
|
|
646
|
+
if (attempt < maxCodeAttempts) {
|
|
647
|
+
console.warn(`⚠️ Coder reported success but wrote 0 files (attempt ${attempt}/${maxCodeAttempts})`);
|
|
648
|
+
} else {
|
|
649
|
+
console.error('❌ Coder failed to write any files after', maxCodeAttempts, 'attempts');
|
|
650
|
+
throw new Error('Code generation produced no file changes');
|
|
651
|
+
}
|
|
652
|
+
}
|
|
653
|
+
|
|
654
|
+
console.log('Code generation complete\n');
|
|
655
|
+
if (solution.summary) console.log(`Summary: ${solution.summary}\n`);
|
|
656
|
+
|
|
657
|
+
// 5. Lint files the coder touched
|
|
658
|
+
await lintAndFix(solution, provider);
|
|
659
|
+
|
|
660
|
+
// 6. CodeRabbit review
|
|
661
|
+
let review = await getCodeRabbitReview(solution, issue, provider);
|
|
662
|
+
|
|
663
|
+
// 7. Iterate if needed
|
|
664
|
+
let iterations = 0;
|
|
665
|
+
const maxIterations = 3;
|
|
666
|
+
|
|
667
|
+
if (review.needsChanges) {
|
|
668
|
+
console.log('🔄 CodeRabbit found issues, refining...\n');
|
|
669
|
+
|
|
670
|
+
while (review.needsChanges && iterations < maxIterations) {
|
|
671
|
+
console.log(`Iteration ${iterations + 1}/${maxIterations}...`);
|
|
672
|
+
|
|
673
|
+
const refined = await refineCode(solution, review);
|
|
674
|
+
|
|
675
|
+
if (refined.completed === false) {
|
|
676
|
+
console.warn('⚠️ Refinement had errors:', refined.errors);
|
|
677
|
+
}
|
|
678
|
+
|
|
679
|
+
const refinedFiles = (refined.files_modified || []).length + (refined.files_created || []).length;
|
|
680
|
+
if (refinedFiles > 0) {
|
|
681
|
+
solution = refined; // Update solution only if refinement actually touched files
|
|
682
|
+
} else {
|
|
683
|
+
console.warn('⚠️ Refinement produced no file changes, keeping previous solution');
|
|
684
|
+
}
|
|
685
|
+
|
|
686
|
+
const newReview = await getCodeRabbitReview(solution, issue, provider);
|
|
687
|
+
|
|
688
|
+
if (!newReview.needsChanges) {
|
|
689
|
+
review = newReview;
|
|
690
|
+
break;
|
|
691
|
+
}
|
|
692
|
+
|
|
693
|
+
review = newReview;
|
|
694
|
+
iterations++;
|
|
695
|
+
}
|
|
696
|
+
|
|
697
|
+
if (iterations >= maxIterations && review.needsChanges) {
|
|
698
|
+
console.warn('⚠️ Max iterations reached. Creating PR with current state.');
|
|
699
|
+
}
|
|
700
|
+
}
|
|
701
|
+
|
|
702
|
+
// 8. User review loop — confirm before creating PR
|
|
703
|
+
const readline = require('readline');
|
|
704
|
+
const rl = readline.createInterface({ input: process.stdin, output: process.stdout });
|
|
705
|
+
const maxUserIterations = 5;
|
|
706
|
+
let userIteration = 0;
|
|
707
|
+
|
|
708
|
+
while (userIteration < maxUserIterations) {
|
|
709
|
+
const allFiles = [...new Set([
|
|
710
|
+
...(solution.files_modified || []),
|
|
711
|
+
...(solution.files_created || []),
|
|
712
|
+
])];
|
|
713
|
+
console.log('FILES CHANGED:');
|
|
714
|
+
allFiles.forEach(f => console.log(` ${f}`));
|
|
715
|
+
if (solution.summary) console.log(`\nSUMMARY: ${solution.summary}`);
|
|
716
|
+
console.log();
|
|
717
|
+
|
|
718
|
+
const answer = await prompt(rl, '✅ Happy with the changes? (y/n): ');
|
|
719
|
+
if (answer.trim().toLowerCase() === 'y' || answer.trim().toLowerCase() === 'yes') {
|
|
720
|
+
break;
|
|
721
|
+
}
|
|
722
|
+
|
|
723
|
+
const feedback = await prompt(rl, '📝 What needs to change? ');
|
|
724
|
+
if (!feedback.trim()) {
|
|
725
|
+
console.log('No feedback provided, continuing to PR.\n');
|
|
726
|
+
break;
|
|
727
|
+
}
|
|
728
|
+
|
|
729
|
+
userIteration++;
|
|
730
|
+
console.log(`\n🔁 Revising (attempt ${userIteration}/${maxUserIterations})...\n`);
|
|
731
|
+
|
|
732
|
+
const filesContext = allFiles.join(', ');
|
|
733
|
+
const revisionPrompt = `Revise the code based on user feedback.
|
|
734
|
+
|
|
735
|
+
Files that were changed: ${filesContext}
|
|
736
|
+
|
|
737
|
+
User feedback:
|
|
738
|
+
${feedback.trim()}
|
|
739
|
+
|
|
740
|
+
Instructions:
|
|
741
|
+
1. Read the files listed above
|
|
742
|
+
2. Address the user's feedback
|
|
743
|
+
3. Edit the files to make the requested changes
|
|
744
|
+
4. Do not write unit tests unless the project already has substantive test coverage
|
|
745
|
+
5. Do not attempt to build or compile the project`;
|
|
746
|
+
|
|
747
|
+
const revised = await runCoder(revisionPrompt);
|
|
748
|
+
|
|
749
|
+
if (revised.completed === false) {
|
|
750
|
+
const errs = revised.errors || [revised.summary || ''];
|
|
751
|
+
if (errs.some(e => isTokenLimitError(e))) {
|
|
752
|
+
printTokenLimitMessage();
|
|
753
|
+
rl.close();
|
|
754
|
+
process.exit(1);
|
|
755
|
+
}
|
|
756
|
+
console.warn('⚠️ Revision had errors:', errs);
|
|
757
|
+
}
|
|
758
|
+
|
|
759
|
+
const revisedFiles = (revised.files_modified || []).length + (revised.files_created || []).length;
|
|
760
|
+
if (revisedFiles > 0) {
|
|
761
|
+
solution = revised;
|
|
762
|
+
} else {
|
|
763
|
+
console.warn('⚠️ Revision produced no file changes, keeping previous solution');
|
|
764
|
+
}
|
|
765
|
+
|
|
766
|
+
await lintAndFix(solution, provider);
|
|
767
|
+
console.log();
|
|
768
|
+
}
|
|
769
|
+
|
|
770
|
+
if (userIteration >= maxUserIterations) {
|
|
771
|
+
console.warn('⚠️ Max revision rounds reached. Creating PR with current state.');
|
|
772
|
+
}
|
|
773
|
+
|
|
774
|
+
rl.close();
|
|
775
|
+
|
|
776
|
+
// 9. Create PR
|
|
777
|
+
await provider.createPR(issue, solution);
|
|
778
|
+
|
|
779
|
+
console.log('\n✨ Ticket processing complete!');
|
|
780
|
+
|
|
781
|
+
} catch (error) {
|
|
782
|
+
if (isTokenLimitError(error)) {
|
|
783
|
+
await printTokenLimitMessage();
|
|
784
|
+
process.exit(1);
|
|
785
|
+
}
|
|
786
|
+
console.error('❌ Error processing ticket:', error);
|
|
787
|
+
throw error;
|
|
788
|
+
}
|
|
789
|
+
}
|
|
790
|
+
|
|
791
|
+
// ===== SETUP =====
|
|
792
|
+
|
|
793
|
+
function prompt(rl, question, hidden = false) {
|
|
794
|
+
return new Promise((resolve) => {
|
|
795
|
+
if (hidden) {
|
|
796
|
+
process.stdout.write(question);
|
|
797
|
+
const stdin = process.stdin;
|
|
798
|
+
const wasRaw = stdin.isRaw;
|
|
799
|
+
stdin.setRawMode(true);
|
|
800
|
+
stdin.resume();
|
|
801
|
+
stdin.setEncoding('utf8');
|
|
802
|
+
let input = '';
|
|
803
|
+
const onData = (ch) => {
|
|
804
|
+
if (ch === '\n' || ch === '\r') {
|
|
805
|
+
stdin.setRawMode(wasRaw);
|
|
806
|
+
stdin.removeListener('data', onData);
|
|
807
|
+
stdin.pause();
|
|
808
|
+
process.stdout.write('\n');
|
|
809
|
+
resolve(input);
|
|
810
|
+
} else if (ch === '\u0003') {
|
|
811
|
+
process.exit();
|
|
812
|
+
} else if (ch === '\u007F' || ch === '\b') {
|
|
813
|
+
if (input.length > 0) input = input.slice(0, -1);
|
|
814
|
+
} else {
|
|
815
|
+
input += ch;
|
|
816
|
+
}
|
|
817
|
+
};
|
|
818
|
+
stdin.on('data', onData);
|
|
819
|
+
} else {
|
|
820
|
+
rl.question(question, resolve);
|
|
821
|
+
}
|
|
822
|
+
});
|
|
823
|
+
}
|
|
824
|
+
|
|
825
|
+
async function setupGitHub() {
|
|
826
|
+
console.log('Setting up GitHub configuration...\n');
|
|
827
|
+
|
|
828
|
+
const readline = require('readline');
|
|
829
|
+
const rl = readline.createInterface({ input: process.stdin, output: process.stdout });
|
|
830
|
+
|
|
831
|
+
const owner = await prompt(rl, `GitHub owner/org: `) || CONFIG.github.owner;
|
|
832
|
+
const repo = await prompt(rl, `GitHub repo: `) || CONFIG.github.repo;
|
|
833
|
+
const token = await prompt(rl, 'GitHub token (optional, for private repos): ', true);
|
|
834
|
+
rl.close();
|
|
835
|
+
|
|
836
|
+
const configDir = path.join(os.homedir(), '.config', 'chorus');
|
|
837
|
+
await fs.mkdir(configDir, { recursive: true });
|
|
838
|
+
const envPath = path.join(configDir, '.env');
|
|
839
|
+
let envContent = '';
|
|
840
|
+
try {
|
|
841
|
+
envContent = await fs.readFile(envPath, 'utf8');
|
|
842
|
+
} catch { /* no .env yet */ }
|
|
843
|
+
|
|
844
|
+
const updates = { GITHUB_OWNER: owner, GITHUB_REPO: repo };
|
|
845
|
+
if (token) updates.GITHUB_TOKEN = token;
|
|
846
|
+
|
|
847
|
+
for (const [key, value] of Object.entries(updates)) {
|
|
848
|
+
const regex = new RegExp(`^${key}=.*$`, 'm');
|
|
849
|
+
if (regex.test(envContent)) {
|
|
850
|
+
envContent = envContent.replace(regex, `${key}=${value}`);
|
|
851
|
+
} else {
|
|
852
|
+
envContent = envContent.trimEnd() + `\n${key}=${value}`;
|
|
853
|
+
}
|
|
854
|
+
}
|
|
855
|
+
|
|
856
|
+
await fs.writeFile(envPath, envContent.trimEnd() + '\n');
|
|
857
|
+
|
|
858
|
+
// Update in-memory config
|
|
859
|
+
CONFIG.github.owner = owner;
|
|
860
|
+
CONFIG.github.repo = repo;
|
|
861
|
+
if (token) CONFIG.github.token = token;
|
|
862
|
+
|
|
863
|
+
console.log(`\n✅ GitHub config saved to ${envPath} (${owner}/${repo})\n`);
|
|
864
|
+
}
|
|
865
|
+
|
|
866
|
+
async function setupProxyAuth() {
|
|
867
|
+
const DEFAULT_PROXY_URL = 'https://chorus-bad0f.web.app';
|
|
868
|
+
|
|
869
|
+
if (!CONFIG.ai.proxyUrl) {
|
|
870
|
+
CONFIG.ai.proxyUrl = DEFAULT_PROXY_URL;
|
|
871
|
+
|
|
872
|
+
// Persist PROXY_URL to .env
|
|
873
|
+
const configDir = path.join(os.homedir(), '.config', 'chorus');
|
|
874
|
+
await fs.mkdir(configDir, { recursive: true });
|
|
875
|
+
const envPath = path.join(configDir, '.env');
|
|
876
|
+
let envContent = '';
|
|
877
|
+
try { envContent = await fs.readFile(envPath, 'utf8'); } catch { /* no .env yet */ }
|
|
878
|
+
if (envContent.includes('PROXY_URL=')) {
|
|
879
|
+
envContent = envContent.replace(/PROXY_URL=.*/, `PROXY_URL=${CONFIG.ai.proxyUrl}`);
|
|
880
|
+
} else {
|
|
881
|
+
envContent = envContent.trimEnd() + `\nPROXY_URL=${CONFIG.ai.proxyUrl}`;
|
|
882
|
+
}
|
|
883
|
+
await fs.writeFile(envPath, envContent.trimEnd() + '\n');
|
|
884
|
+
process.env.PROXY_URL = CONFIG.ai.proxyUrl;
|
|
885
|
+
}
|
|
886
|
+
|
|
887
|
+
console.log('Setting up Chorus authentication...\n');
|
|
888
|
+
|
|
889
|
+
const readline = require('readline');
|
|
890
|
+
const rl = readline.createInterface({ input: process.stdin, output: process.stdout });
|
|
891
|
+
|
|
892
|
+
const email = await prompt(rl, 'Email: ');
|
|
893
|
+
const password = await prompt(rl, 'Password: ', true);
|
|
894
|
+
rl.close();
|
|
895
|
+
|
|
896
|
+
// Try register first, fall back to login
|
|
897
|
+
let apiKey;
|
|
898
|
+
for (const endpoint of ['/auth/register', '/auth/login']) {
|
|
899
|
+
const res = await fetch(`${CONFIG.ai.proxyUrl}${endpoint}`, {
|
|
900
|
+
method: 'POST',
|
|
901
|
+
headers: { 'Content-Type': 'application/json' },
|
|
902
|
+
body: JSON.stringify({ email, password }),
|
|
903
|
+
});
|
|
904
|
+
|
|
905
|
+
const data = await res.json();
|
|
906
|
+
|
|
907
|
+
if (res.ok && data.apiKey) {
|
|
908
|
+
apiKey = data.apiKey;
|
|
909
|
+
console.log(`\n✅ ${endpoint === '/auth/register' ? 'Registered' : 'Logged in'} successfully`);
|
|
910
|
+
break;
|
|
911
|
+
}
|
|
912
|
+
|
|
913
|
+
// If register fails with 409 (already exists), try login next
|
|
914
|
+
if (res.status === 409) continue;
|
|
915
|
+
|
|
916
|
+
// Any other error on login means bad credentials
|
|
917
|
+
if (endpoint === '/auth/login' && !res.ok) {
|
|
918
|
+
console.error(`\n❌ Login failed: ${data.error?.message || 'Unknown error'}`);
|
|
919
|
+
return;
|
|
920
|
+
}
|
|
921
|
+
}
|
|
922
|
+
|
|
923
|
+
if (!apiKey) {
|
|
924
|
+
console.error('\n❌ Failed to authenticate');
|
|
925
|
+
return;
|
|
926
|
+
}
|
|
927
|
+
|
|
928
|
+
// Save to ~/.config/chorus/.env
|
|
929
|
+
const configDir = path.join(os.homedir(), '.config', 'chorus');
|
|
930
|
+
await fs.mkdir(configDir, { recursive: true });
|
|
931
|
+
const envPath = path.join(configDir, '.env');
|
|
932
|
+
let envContent = '';
|
|
933
|
+
try {
|
|
934
|
+
envContent = await fs.readFile(envPath, 'utf8');
|
|
935
|
+
} catch { /* no .env yet */ }
|
|
936
|
+
|
|
937
|
+
if (envContent.includes('PROXY_API_KEY=')) {
|
|
938
|
+
envContent = envContent.replace(/PROXY_API_KEY=.*/, `PROXY_API_KEY=${apiKey}`);
|
|
939
|
+
} else {
|
|
940
|
+
envContent = envContent.trimEnd() + `\nPROXY_API_KEY=${apiKey}\n`;
|
|
941
|
+
}
|
|
942
|
+
await fs.writeFile(envPath, envContent);
|
|
943
|
+
|
|
944
|
+
// Update in-memory config
|
|
945
|
+
CONFIG.ai.anthropicApiKey = apiKey;
|
|
946
|
+
process.env.PROXY_API_KEY = apiKey;
|
|
947
|
+
|
|
948
|
+
console.log(` API key saved to .env\n`);
|
|
949
|
+
}
|
|
950
|
+
|
|
951
|
+
async function setupTeamsAuth() {
|
|
952
|
+
console.log('Setting up Teams authentication...\n');
|
|
953
|
+
|
|
954
|
+
const { firefox } = require('playwright');
|
|
955
|
+
const browser = await firefox.launch({ headless: false });
|
|
956
|
+
const context = await browser.newContext();
|
|
957
|
+
const page = await context.newPage();
|
|
958
|
+
|
|
959
|
+
await page.goto('https://teams.microsoft.com');
|
|
960
|
+
|
|
961
|
+
console.log('Please log in to Teams manually in the browser window...');
|
|
962
|
+
console.log('Once logged in and you can see your chats, press Enter here...\n');
|
|
963
|
+
|
|
964
|
+
const readline = require('readline');
|
|
965
|
+
const rl = readline.createInterface({ input: process.stdin, output: process.stdout });
|
|
966
|
+
await new Promise(resolve => rl.question('Press Enter when ready... ', resolve));
|
|
967
|
+
rl.close();
|
|
968
|
+
|
|
969
|
+
await fs.mkdir(path.dirname(CONFIG.teams.authPath), { recursive: true });
|
|
970
|
+
await context.storageState({ path: CONFIG.teams.authPath });
|
|
971
|
+
await browser.close();
|
|
972
|
+
|
|
973
|
+
console.log(`\n✅ Authentication state saved to ${CONFIG.teams.authPath}`);
|
|
974
|
+
}
|
|
975
|
+
|
|
976
|
+
async function setupSlack() {
|
|
977
|
+
console.log('Setting up Slack integration...\n');
|
|
978
|
+
console.log('1. Create or configure your Slack app at:');
|
|
979
|
+
console.log(' https://api.slack.com/apps\n');
|
|
980
|
+
console.log('2. Under OAuth & Permissions, add these bot token scopes:');
|
|
981
|
+
console.log(' - chat:write (send messages)');
|
|
982
|
+
console.log(' - users:read (find users by name)');
|
|
983
|
+
console.log(' - im:history (read DM replies)');
|
|
984
|
+
console.log(' - im:write (open DM channels)\n');
|
|
985
|
+
console.log('3. Install the app to your workspace and copy the Bot User OAuth Token.\n');
|
|
986
|
+
|
|
987
|
+
const readline = require('readline');
|
|
988
|
+
const rl = readline.createInterface({ input: process.stdin, output: process.stdout });
|
|
989
|
+
|
|
990
|
+
const token = await prompt(rl, 'Slack bot token (xoxb-...): ', true);
|
|
991
|
+
rl.close();
|
|
992
|
+
|
|
993
|
+
if (!token.startsWith('xoxb-')) {
|
|
994
|
+
console.error('\n❌ Invalid token — Slack bot tokens start with "xoxb-"');
|
|
995
|
+
return;
|
|
996
|
+
}
|
|
997
|
+
|
|
998
|
+
const configDir = path.join(os.homedir(), '.config', 'chorus');
|
|
999
|
+
await fs.mkdir(configDir, { recursive: true });
|
|
1000
|
+
const envPath = path.join(configDir, '.env');
|
|
1001
|
+
let envContent = '';
|
|
1002
|
+
try {
|
|
1003
|
+
envContent = await fs.readFile(envPath, 'utf8');
|
|
1004
|
+
} catch { /* no .env yet */ }
|
|
1005
|
+
|
|
1006
|
+
const updates = { SLACK_BOT_TOKEN: token, MESSENGER: 'slack' };
|
|
1007
|
+
for (const [key, value] of Object.entries(updates)) {
|
|
1008
|
+
const regex = new RegExp(`^${key}=.*$`, 'm');
|
|
1009
|
+
if (regex.test(envContent)) {
|
|
1010
|
+
envContent = envContent.replace(regex, `${key}=${value}`);
|
|
1011
|
+
} else {
|
|
1012
|
+
envContent = envContent.trimEnd() + `\n${key}=${value}`;
|
|
1013
|
+
}
|
|
1014
|
+
}
|
|
1015
|
+
|
|
1016
|
+
await fs.writeFile(envPath, envContent.trimEnd() + '\n');
|
|
1017
|
+
|
|
1018
|
+
CONFIG.messenger = 'slack';
|
|
1019
|
+
CONFIG.slack.botToken = token;
|
|
1020
|
+
|
|
1021
|
+
console.log(`\n✅ Slack config saved to ${envPath}\n`);
|
|
1022
|
+
}
|
|
1023
|
+
|
|
1024
|
+
async function setupAzureDevOps() {
|
|
1025
|
+
console.log('Setting up Azure DevOps configuration...\n');
|
|
1026
|
+
|
|
1027
|
+
const readline = require('readline');
|
|
1028
|
+
const rl = readline.createInterface({ input: process.stdin, output: process.stdout });
|
|
1029
|
+
|
|
1030
|
+
const adoOrg = await prompt(rl, 'Azure DevOps organization: ');
|
|
1031
|
+
const adoProject = await prompt(rl, 'Azure DevOps project: ');
|
|
1032
|
+
const adoRepo = await prompt(rl, 'Azure DevOps repository name: ');
|
|
1033
|
+
const adoPat = await prompt(rl, 'Azure DevOps PAT (personal access token): ', true);
|
|
1034
|
+
rl.close();
|
|
1035
|
+
|
|
1036
|
+
const configDir = path.join(os.homedir(), '.config', 'chorus');
|
|
1037
|
+
await fs.mkdir(configDir, { recursive: true });
|
|
1038
|
+
const envPath = path.join(configDir, '.env');
|
|
1039
|
+
let envContent = '';
|
|
1040
|
+
try {
|
|
1041
|
+
envContent = await fs.readFile(envPath, 'utf8');
|
|
1042
|
+
} catch { /* no .env yet */ }
|
|
1043
|
+
|
|
1044
|
+
const updates = { AZDO_ORG: adoOrg, AZDO_PROJECT: adoProject, AZDO_REPO: adoRepo, PROVIDER: 'azuredevops' };
|
|
1045
|
+
if (adoPat) updates.AZDO_PAT = adoPat;
|
|
1046
|
+
|
|
1047
|
+
for (const [key, value] of Object.entries(updates)) {
|
|
1048
|
+
const regex = new RegExp(`^${key}=.*$`, 'm');
|
|
1049
|
+
if (regex.test(envContent)) {
|
|
1050
|
+
envContent = envContent.replace(regex, `${key}=${value}`);
|
|
1051
|
+
} else {
|
|
1052
|
+
envContent = envContent.trimEnd() + `\n${key}=${value}`;
|
|
1053
|
+
}
|
|
1054
|
+
}
|
|
1055
|
+
|
|
1056
|
+
await fs.writeFile(envPath, envContent.trimEnd() + '\n');
|
|
1057
|
+
|
|
1058
|
+
CONFIG.azuredevops.org = adoOrg;
|
|
1059
|
+
CONFIG.azuredevops.project = adoProject;
|
|
1060
|
+
CONFIG.azuredevops.repo = adoRepo;
|
|
1061
|
+
if (adoPat) CONFIG.azuredevops.pat = adoPat;
|
|
1062
|
+
|
|
1063
|
+
console.log(`\n✅ Azure DevOps config saved to ${envPath} (${adoOrg}/${adoProject}/${adoRepo})\n`);
|
|
1064
|
+
}
|
|
1065
|
+
|
|
1066
|
+
async function setup() {
|
|
1067
|
+
console.log('Chorus Setup\n============\n');
|
|
1068
|
+
|
|
1069
|
+
const readline = require('readline');
|
|
1070
|
+
const rl = readline.createInterface({ input: process.stdin, output: process.stdout });
|
|
1071
|
+
const providerChoice = await prompt(rl, 'Which provider? (github / azuredevops) [github]: ');
|
|
1072
|
+
rl.close();
|
|
1073
|
+
|
|
1074
|
+
const chosenProvider = (providerChoice || 'github').toLowerCase();
|
|
1075
|
+
|
|
1076
|
+
if (chosenProvider === 'azuredevops' || chosenProvider === 'ado' || chosenProvider === 'azure') {
|
|
1077
|
+
await setupAzureDevOps();
|
|
1078
|
+
} else {
|
|
1079
|
+
await setupGitHub();
|
|
1080
|
+
}
|
|
1081
|
+
|
|
1082
|
+
await setupProxyAuth();
|
|
1083
|
+
|
|
1084
|
+
const readline2 = require('readline');
|
|
1085
|
+
const rl2 = readline2.createInterface({ input: process.stdin, output: process.stdout });
|
|
1086
|
+
const messengerChoice = await prompt(rl2, 'Which messenger for QA chat? (teams / slack) [teams]: ');
|
|
1087
|
+
rl2.close();
|
|
1088
|
+
|
|
1089
|
+
const chosenMessenger = (messengerChoice || 'teams').toLowerCase();
|
|
1090
|
+
|
|
1091
|
+
if (chosenMessenger === 'slack') {
|
|
1092
|
+
await setupSlack();
|
|
1093
|
+
} else {
|
|
1094
|
+
await setupTeamsAuth();
|
|
1095
|
+
}
|
|
1096
|
+
|
|
1097
|
+
console.log('\n✅ Setup complete. You can now run: chorus run\n');
|
|
1098
|
+
}
|
|
1099
|
+
|
|
1100
|
+
// ===== CLI =====
|
|
1101
|
+
const command = process.argv[2];
|
|
1102
|
+
const _envExists = require('fs').existsSync(path.join(os.homedir(), '.config', 'chorus', '.env'));
|
|
1103
|
+
|
|
1104
|
+
function parseRunArgs() {
|
|
1105
|
+
const args = process.argv.slice(3);
|
|
1106
|
+
const opts = { useSuper: false, skipQA: false, qaName: null, issueArg: null };
|
|
1107
|
+
|
|
1108
|
+
for (let i = 0; i < args.length; i++) {
|
|
1109
|
+
if (args[i] === '--super') {
|
|
1110
|
+
opts.useSuper = true;
|
|
1111
|
+
} else if (args[i] === '--skip-qa') {
|
|
1112
|
+
opts.skipQA = true;
|
|
1113
|
+
} else if (args[i] === '--qa' && i + 1 < args.length) {
|
|
1114
|
+
opts.qaName = args[++i];
|
|
1115
|
+
} else if (!args[i].startsWith('--')) {
|
|
1116
|
+
opts.issueArg = args[i];
|
|
1117
|
+
}
|
|
1118
|
+
}
|
|
1119
|
+
return opts;
|
|
1120
|
+
}
|
|
1121
|
+
|
|
1122
|
+
if (command === 'setup') {
|
|
1123
|
+
setup().catch(console.error);
|
|
1124
|
+
} else if (command === 'run') {
|
|
1125
|
+
if (!_envExists) {
|
|
1126
|
+
console.log('It looks like this is your first time running Chorus.');
|
|
1127
|
+
console.log('Running setup first...\n');
|
|
1128
|
+
setup().then(() => {
|
|
1129
|
+
const { issueArg, ...opts } = parseRunArgs();
|
|
1130
|
+
return processTicket(issueArg, opts);
|
|
1131
|
+
}).catch(console.error);
|
|
1132
|
+
} else {
|
|
1133
|
+
const { issueArg, ...opts } = parseRunArgs();
|
|
1134
|
+
processTicket(issueArg, opts).catch(console.error);
|
|
1135
|
+
}
|
|
1136
|
+
} else {
|
|
1137
|
+
if (!_envExists) {
|
|
1138
|
+
console.log(`
|
|
1139
|
+
Chorus — AI-powered GitHub ticket automation
|
|
1140
|
+
=============================================
|
|
1141
|
+
|
|
1142
|
+
It looks like Chorus hasn't been set up yet.
|
|
1143
|
+
Run "chorus setup" to get started.
|
|
1144
|
+
`);
|
|
1145
|
+
} else {
|
|
1146
|
+
console.log(`
|
|
1147
|
+
Chorus — AI-powered ticket automation (GitHub & Azure DevOps)
|
|
1148
|
+
=============================================================
|
|
1149
|
+
|
|
1150
|
+
Usage:
|
|
1151
|
+
chorus setup - Set up provider, Chorus auth + messenger
|
|
1152
|
+
chorus run - Process latest assigned issue
|
|
1153
|
+
chorus run 4464 - Process specific issue by number
|
|
1154
|
+
chorus run <url> - Process issue from full URL (auto-detects provider)
|
|
1155
|
+
chorus run 4464 --super - Use Opus 4.6 for QA evaluation
|
|
1156
|
+
chorus run 4464 --qa 'John Doe' - Specify QA contact name for chat
|
|
1157
|
+
chorus run 4464 --skip-qa - Skip QA conversation, go straight to coding
|
|
1158
|
+
|
|
1159
|
+
GitHub examples:
|
|
1160
|
+
chorus run 4464
|
|
1161
|
+
chorus run https://github.com/shesha-io/shesha-framework/issues/4464
|
|
1162
|
+
|
|
1163
|
+
Azure DevOps examples:
|
|
1164
|
+
chorus run 456
|
|
1165
|
+
chorus run https://dev.azure.com/myorg/myproject/_workitems/edit/456
|
|
1166
|
+
PROVIDER=azuredevops chorus run 456
|
|
1167
|
+
|
|
1168
|
+
Slack example:
|
|
1169
|
+
MESSENGER=slack chorus run 4464 --qa 'John Doe'
|
|
1170
|
+
|
|
1171
|
+
Environment variables:
|
|
1172
|
+
PROVIDER - Force provider: github (default) or azuredevops
|
|
1173
|
+
MESSENGER - Messenger for QA chat: teams (default) or slack
|
|
1174
|
+
SLACK_BOT_TOKEN - Slack bot token (required when MESSENGER=slack)
|
|
1175
|
+
AZDO_ORG - Azure DevOps organization
|
|
1176
|
+
AZDO_PROJECT - Azure DevOps project
|
|
1177
|
+
AZDO_REPO - Azure DevOps repository name
|
|
1178
|
+
AZDO_PAT - Azure DevOps personal access token
|
|
1179
|
+
|
|
1180
|
+
Configuration is stored in ~/.config/chorus/.env
|
|
1181
|
+
Run "chorus setup" to configure credentials and messenger auth.
|
|
1182
|
+
`);
|
|
1183
|
+
}
|
|
1184
|
+
}
|