chorus-cli 0.4.1 → 0.4.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/index.js +80 -83
- package/package.json +2 -2
- package/scripts/postinstall.js +2 -2
- package/tools/__pycache__/coder.cpython-314.pyc +0 -0
- package/tools/__pycache__/qa.cpython-314.pyc +0 -0
- package/tools/coder.py +296 -206
- package/tools/qa.py +46 -32
- package/tools/requirements.txt +1 -1
package/index.js
CHANGED
|
@@ -7,8 +7,9 @@ const _localEnv = _path.join(__dirname, '.env');
|
|
|
7
7
|
// Prefer user config dir (works when installed globally), fall back to local .env for dev
|
|
8
8
|
require('dotenv').config({ path: require('fs').existsSync(_configEnv) ? _configEnv : _localEnv });
|
|
9
9
|
const { Octokit } = require('@octokit/rest');
|
|
10
|
-
|
|
10
|
+
|
|
11
11
|
const { createProvider } = require('./providers');
|
|
12
|
+
const OpenAI = require('openai');
|
|
12
13
|
const { exec, execFile, spawn } = require('child_process');
|
|
13
14
|
const util = require('util');
|
|
14
15
|
const path = require('path');
|
|
@@ -21,9 +22,9 @@ const fs = require('fs').promises;
|
|
|
21
22
|
function runCoder(prompt) {
|
|
22
23
|
return new Promise((resolve, reject) => {
|
|
23
24
|
const env = { ...process.env };
|
|
24
|
-
if (CONFIG.ai.
|
|
25
|
-
env.
|
|
26
|
-
env.
|
|
25
|
+
if (CONFIG.ai.chorusApiKey) {
|
|
26
|
+
env.CHORUS_API_KEY = CONFIG.ai.chorusApiKey;
|
|
27
|
+
env.CHORUS_API_URL = CONFIG.ai.chorusApiUrl;
|
|
27
28
|
}
|
|
28
29
|
const proc = spawn(CONFIG.ai.venvPython, [CONFIG.ai.coderPath, '--prompt', prompt], {
|
|
29
30
|
cwd: process.cwd(),
|
|
@@ -72,9 +73,9 @@ function runQAChat(issue, enrichedDetails, qaName, useSuper = false) {
|
|
|
72
73
|
if (useSuper) args.push('--super');
|
|
73
74
|
|
|
74
75
|
const env = { ...process.env };
|
|
75
|
-
if (CONFIG.ai.
|
|
76
|
-
env.
|
|
77
|
-
env.
|
|
76
|
+
if (CONFIG.ai.chorusApiKey) {
|
|
77
|
+
env.CHORUS_API_KEY = CONFIG.ai.chorusApiKey;
|
|
78
|
+
env.CHORUS_API_URL = CONFIG.ai.chorusApiUrl;
|
|
78
79
|
}
|
|
79
80
|
if (CONFIG.messenger === 'slack' && CONFIG.slack.botToken) {
|
|
80
81
|
env.SLACK_BOT_TOKEN = CONFIG.slack.botToken;
|
|
@@ -142,8 +143,8 @@ const CONFIG = {
|
|
|
142
143
|
venvPython: process.platform === 'win32'
|
|
143
144
|
? path.join(os.homedir(), '.config', 'chorus', '.venv', 'Scripts', 'python.exe')
|
|
144
145
|
: path.join(os.homedir(), '.config', 'chorus', '.venv', 'bin', 'python'),
|
|
145
|
-
|
|
146
|
-
|
|
146
|
+
chorusApiKey: process.env.CHORUS_API_KEY,
|
|
147
|
+
chorusApiUrl: process.env.CHORUS_API_URL || 'https://chorus-bad0f.web.app/v1',
|
|
147
148
|
}
|
|
148
149
|
};
|
|
149
150
|
|
|
@@ -189,20 +190,17 @@ IMPORTANT: Output ONLY the message above. Do not include any preamble, thinking
|
|
|
189
190
|
const tool = CONFIG.ai.enrichmentTool;
|
|
190
191
|
|
|
191
192
|
if (tool === 'claude') {
|
|
192
|
-
// Use
|
|
193
|
-
if (!CONFIG.ai.
|
|
194
|
-
throw new Error('
|
|
195
|
-
}
|
|
196
|
-
const clientOpts = { apiKey: CONFIG.ai.anthropicApiKey };
|
|
197
|
-
if (CONFIG.ai.proxyUrl) {
|
|
198
|
-
clientOpts.baseURL = CONFIG.ai.proxyUrl.replace(/\/+$/, '');
|
|
193
|
+
// Use Chorus proxy API
|
|
194
|
+
if (!CONFIG.ai.chorusApiKey) {
|
|
195
|
+
throw new Error('CHORUS_API_KEY environment variable is required. Run "chorus setup" to configure.');
|
|
199
196
|
}
|
|
200
|
-
const
|
|
197
|
+
const openai = new OpenAI({
|
|
198
|
+
apiKey: CONFIG.ai.chorusApiKey,
|
|
199
|
+
baseURL: CONFIG.ai.chorusApiUrl,
|
|
200
|
+
});
|
|
201
201
|
|
|
202
|
-
const
|
|
203
|
-
|
|
204
|
-
//when --super flag is added use claude-opus-4-6, else default to claude-sonnet-4-20250514
|
|
205
|
-
model: 'claude-opus-4-6',
|
|
202
|
+
const response = await openai.chat.completions.create({
|
|
203
|
+
model: 'anthropic/claude-opus-4',
|
|
206
204
|
max_tokens: 2000,
|
|
207
205
|
messages: [
|
|
208
206
|
{
|
|
@@ -212,11 +210,11 @@ IMPORTANT: Output ONLY the message above. Do not include any preamble, thinking
|
|
|
212
210
|
]
|
|
213
211
|
});
|
|
214
212
|
|
|
215
|
-
if (
|
|
216
|
-
console.log(` Enrichment tokens: ${
|
|
213
|
+
if (response.usage) {
|
|
214
|
+
console.log(` Enrichment tokens: ${response.usage.prompt_tokens} in / ${response.usage.completion_tokens} out`);
|
|
217
215
|
}
|
|
218
216
|
|
|
219
|
-
return
|
|
217
|
+
return response.choices[0].message.content.trim();
|
|
220
218
|
} else {
|
|
221
219
|
// Use Kimi CLI
|
|
222
220
|
const escapedPrompt = prompt.replace(/"/g, '\\"').replace(/\$/g, '\\$');
|
|
@@ -488,16 +486,7 @@ function isTokenLimitError(err) {
|
|
|
488
486
|
}
|
|
489
487
|
|
|
490
488
|
async function fetchAccountEmail() {
|
|
491
|
-
|
|
492
|
-
try {
|
|
493
|
-
const res = await fetch(`${CONFIG.ai.proxyUrl.replace(/\/+$/, '')}/auth/me`, {
|
|
494
|
-
headers: { 'Authorization': `Bearer ${CONFIG.ai.anthropicApiKey}` },
|
|
495
|
-
});
|
|
496
|
-
if (res.ok) {
|
|
497
|
-
const data = await res.json();
|
|
498
|
-
return data.email || null;
|
|
499
|
-
}
|
|
500
|
-
} catch {}
|
|
489
|
+
// TODO: fetch email from Chorus proxy /auth/me endpoint
|
|
501
490
|
return null;
|
|
502
491
|
}
|
|
503
492
|
|
|
@@ -528,7 +517,7 @@ async function processTicket(issueArg, { useSuper = false, skipQA = false, qaNam
|
|
|
528
517
|
}
|
|
529
518
|
|
|
530
519
|
try {
|
|
531
|
-
efs(CONFIG.ai.venvPython, ['-c', 'import
|
|
520
|
+
efs(CONFIG.ai.venvPython, ['-c', 'import openai'], { stdio: 'ignore' });
|
|
532
521
|
} catch {
|
|
533
522
|
console.log('📦 Installing Python dependencies (first run)...');
|
|
534
523
|
efs(CONFIG.ai.venvPython, ['-m', 'pip', 'install', '-r', reqFile], { stdio: 'inherit' });
|
|
@@ -864,26 +853,6 @@ async function setupGitHub() {
|
|
|
864
853
|
}
|
|
865
854
|
|
|
866
855
|
async function setupProxyAuth() {
|
|
867
|
-
const DEFAULT_PROXY_URL = 'https://chorus-bad0f.web.app';
|
|
868
|
-
|
|
869
|
-
if (!CONFIG.ai.proxyUrl) {
|
|
870
|
-
CONFIG.ai.proxyUrl = DEFAULT_PROXY_URL;
|
|
871
|
-
|
|
872
|
-
// Persist PROXY_URL to .env
|
|
873
|
-
const configDir = path.join(os.homedir(), '.config', 'chorus');
|
|
874
|
-
await fs.mkdir(configDir, { recursive: true });
|
|
875
|
-
const envPath = path.join(configDir, '.env');
|
|
876
|
-
let envContent = '';
|
|
877
|
-
try { envContent = await fs.readFile(envPath, 'utf8'); } catch { /* no .env yet */ }
|
|
878
|
-
if (envContent.includes('PROXY_URL=')) {
|
|
879
|
-
envContent = envContent.replace(/PROXY_URL=.*/, `PROXY_URL=${CONFIG.ai.proxyUrl}`);
|
|
880
|
-
} else {
|
|
881
|
-
envContent = envContent.trimEnd() + `\nPROXY_URL=${CONFIG.ai.proxyUrl}`;
|
|
882
|
-
}
|
|
883
|
-
await fs.writeFile(envPath, envContent.trimEnd() + '\n');
|
|
884
|
-
process.env.PROXY_URL = CONFIG.ai.proxyUrl;
|
|
885
|
-
}
|
|
886
|
-
|
|
887
856
|
console.log('Setting up Chorus authentication...\n');
|
|
888
857
|
|
|
889
858
|
const readline = require('readline');
|
|
@@ -893,35 +862,59 @@ async function setupProxyAuth() {
|
|
|
893
862
|
const password = await prompt(rl, 'Password: ', true);
|
|
894
863
|
rl.close();
|
|
895
864
|
|
|
896
|
-
|
|
865
|
+
if (!email || !password) {
|
|
866
|
+
console.error('\n❌ Email and password are required.');
|
|
867
|
+
return;
|
|
868
|
+
}
|
|
869
|
+
|
|
870
|
+
const baseUrl = CONFIG.ai.chorusApiUrl.replace(/\/v1\/?$/, '');
|
|
871
|
+
|
|
872
|
+
// Try register first, fall back to login if already registered
|
|
897
873
|
let apiKey;
|
|
898
|
-
|
|
899
|
-
const
|
|
900
|
-
|
|
901
|
-
|
|
902
|
-
|
|
874
|
+
try {
|
|
875
|
+
const https = require('https');
|
|
876
|
+
const http = require('http');
|
|
877
|
+
|
|
878
|
+
const doPost = (url, body) => new Promise((resolve, reject) => {
|
|
879
|
+
const parsed = new URL(url);
|
|
880
|
+
const mod = parsed.protocol === 'https:' ? https : http;
|
|
881
|
+
const req = mod.request(parsed, { method: 'POST', headers: { 'Content-Type': 'application/json' } }, (res) => {
|
|
882
|
+
let data = '';
|
|
883
|
+
res.on('data', (chunk) => data += chunk);
|
|
884
|
+
res.on('end', () => {
|
|
885
|
+
try {
|
|
886
|
+
resolve({ status: res.statusCode, body: JSON.parse(data) });
|
|
887
|
+
} catch {
|
|
888
|
+
resolve({ status: res.statusCode, body: data });
|
|
889
|
+
}
|
|
890
|
+
});
|
|
891
|
+
});
|
|
892
|
+
req.on('error', reject);
|
|
893
|
+
req.write(JSON.stringify(body));
|
|
894
|
+
req.end();
|
|
903
895
|
});
|
|
904
896
|
|
|
905
|
-
|
|
897
|
+
console.log(' Registering...');
|
|
898
|
+
let res = await doPost(`${baseUrl}/auth/register`, { email, password });
|
|
906
899
|
|
|
907
|
-
if (res.
|
|
908
|
-
|
|
909
|
-
|
|
910
|
-
break;
|
|
900
|
+
if (res.status === 409 || (res.body && res.body.error && res.body.error.includes('already'))) {
|
|
901
|
+
console.log(' Account exists, logging in...');
|
|
902
|
+
res = await doPost(`${baseUrl}/auth/login`, { email, password });
|
|
911
903
|
}
|
|
912
904
|
|
|
913
|
-
|
|
914
|
-
|
|
915
|
-
|
|
916
|
-
// Any other error on login means bad credentials
|
|
917
|
-
if (endpoint === '/auth/login' && !res.ok) {
|
|
918
|
-
console.error(`\n❌ Login failed: ${data.error?.message || 'Unknown error'}`);
|
|
905
|
+
if (res.status >= 400) {
|
|
906
|
+
const errMsg = (res.body && res.body.error) || JSON.stringify(res.body);
|
|
907
|
+
console.error(`\n❌ Authentication failed: ${errMsg}`);
|
|
919
908
|
return;
|
|
920
909
|
}
|
|
921
|
-
}
|
|
922
910
|
|
|
923
|
-
|
|
924
|
-
|
|
911
|
+
apiKey = res.body.apiKey || res.body.api_key || res.body.key;
|
|
912
|
+
if (!apiKey) {
|
|
913
|
+
console.error('\n❌ No API key returned from server. Response:', JSON.stringify(res.body));
|
|
914
|
+
return;
|
|
915
|
+
}
|
|
916
|
+
} catch (err) {
|
|
917
|
+
console.error(`\n❌ Failed to connect to Chorus: ${err.message}`);
|
|
925
918
|
return;
|
|
926
919
|
}
|
|
927
920
|
|
|
@@ -934,18 +927,22 @@ async function setupProxyAuth() {
|
|
|
934
927
|
envContent = await fs.readFile(envPath, 'utf8');
|
|
935
928
|
} catch { /* no .env yet */ }
|
|
936
929
|
|
|
937
|
-
|
|
938
|
-
|
|
939
|
-
|
|
940
|
-
|
|
930
|
+
const updates = { CHORUS_API_KEY: apiKey, CHORUS_API_URL: CONFIG.ai.chorusApiUrl };
|
|
931
|
+
for (const [key, value] of Object.entries(updates)) {
|
|
932
|
+
const regex = new RegExp(`^${key}=.*$`, 'm');
|
|
933
|
+
if (regex.test(envContent)) {
|
|
934
|
+
envContent = envContent.replace(regex, `${key}=${value}`);
|
|
935
|
+
} else {
|
|
936
|
+
envContent = envContent.trimEnd() + `\n${key}=${value}`;
|
|
937
|
+
}
|
|
941
938
|
}
|
|
942
|
-
await fs.writeFile(envPath, envContent);
|
|
939
|
+
await fs.writeFile(envPath, envContent.trimEnd() + '\n');
|
|
943
940
|
|
|
944
941
|
// Update in-memory config
|
|
945
|
-
CONFIG.ai.
|
|
946
|
-
process.env.
|
|
942
|
+
CONFIG.ai.chorusApiKey = apiKey;
|
|
943
|
+
process.env.CHORUS_API_KEY = apiKey;
|
|
947
944
|
|
|
948
|
-
console.log(
|
|
945
|
+
console.log(`\n✅ Chorus API key saved to ${envPath}\n`);
|
|
949
946
|
}
|
|
950
947
|
|
|
951
948
|
async function setupTeamsAuth() {
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "chorus-cli",
|
|
3
|
-
"version": "0.4.
|
|
3
|
+
"version": "0.4.2",
|
|
4
4
|
"description": "Automated ticket resolution with AI, Teams, and Slack integration",
|
|
5
5
|
"main": "index.js",
|
|
6
6
|
"bin": {
|
|
@@ -18,9 +18,9 @@
|
|
|
18
18
|
"start": "node index.js run"
|
|
19
19
|
},
|
|
20
20
|
"dependencies": {
|
|
21
|
-
"@anthropic-ai/sdk": "^0.73.0",
|
|
22
21
|
"@octokit/rest": "^20.0.2",
|
|
23
22
|
"dotenv": "^17.2.4",
|
|
23
|
+
"openai": "^4.0.0",
|
|
24
24
|
"playwright": "^1.40.0"
|
|
25
25
|
},
|
|
26
26
|
"engines": {
|
package/scripts/postinstall.js
CHANGED
|
@@ -93,11 +93,11 @@ run(venvPython, ['-m', 'pip', 'install', '-r', REQUIREMENTS]);
|
|
|
93
93
|
|
|
94
94
|
// 3. Verify critical dependency installed
|
|
95
95
|
try {
|
|
96
|
-
execFileSync(venvPython, ['-c', 'import
|
|
96
|
+
execFileSync(venvPython, ['-c', 'import openai'], { stdio: 'ignore' });
|
|
97
97
|
console.log(' Dependencies installed ✓');
|
|
98
98
|
} catch {
|
|
99
99
|
console.error(
|
|
100
|
-
'⚠ "
|
|
100
|
+
'⚠ "openai" module is missing after pip install.\n' +
|
|
101
101
|
' Run manually: ' + venvPython + ' -m pip install -r ' + REQUIREMENTS
|
|
102
102
|
);
|
|
103
103
|
process.exit(0);
|
|
Binary file
|
|
Binary file
|
package/tools/coder.py
CHANGED
|
@@ -1,20 +1,20 @@
|
|
|
1
1
|
#!/usr/bin/env python3
|
|
2
2
|
"""
|
|
3
|
-
Coder — A terminal coding agent powered by Claude.
|
|
3
|
+
Coder — A terminal coding agent powered by Claude via the Chorus proxy.
|
|
4
4
|
|
|
5
5
|
Usage:
|
|
6
6
|
coder.py Interactive REPL
|
|
7
7
|
coder.py --prompt "do something" Headless mode — outputs JSON to stdout
|
|
8
8
|
|
|
9
9
|
Environment variables:
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
CODER_MODEL — Model to use (default: claude-sonnet-4
|
|
10
|
+
CHORUS_API_KEY — Required. Your Chorus API key.
|
|
11
|
+
CHORUS_API_URL — Optional. Chorus proxy base URL (default: https://chorus-bad0f.web.app/v1)
|
|
12
|
+
CODER_MODEL — Model to use (default: anthropic/claude-sonnet-4)
|
|
13
13
|
CODER_MAX_TOKENS — Max response tokens (default: 16384)
|
|
14
14
|
CODER_SAFE_MODE — Set to 1 to require approval for writes/edits/bash
|
|
15
15
|
"""
|
|
16
16
|
|
|
17
|
-
import
|
|
17
|
+
from openai import OpenAI
|
|
18
18
|
import argparse
|
|
19
19
|
import json
|
|
20
20
|
import os
|
|
@@ -41,7 +41,7 @@ class C:
|
|
|
41
41
|
|
|
42
42
|
# ── Config ──────────────────────────────────────────────────────────────────
|
|
43
43
|
|
|
44
|
-
MODEL = os.environ.get("CODER_MODEL", "claude-sonnet-4
|
|
44
|
+
MODEL = os.environ.get("CODER_MODEL", "anthropic/claude-sonnet-4")
|
|
45
45
|
MAX_TOKENS = int(os.environ.get("CODER_MAX_TOKENS", "16384"))
|
|
46
46
|
SAFE_MODE = os.environ.get("CODER_SAFE_MODE", "").lower() in ("1", "true", "yes")
|
|
47
47
|
|
|
@@ -90,83 +90,101 @@ Approach:
|
|
|
90
90
|
|
|
91
91
|
TOOLS = [
|
|
92
92
|
{
|
|
93
|
-
"
|
|
94
|
-
"
|
|
95
|
-
|
|
96
|
-
"
|
|
97
|
-
"
|
|
98
|
-
"
|
|
99
|
-
"
|
|
100
|
-
|
|
93
|
+
"type": "function",
|
|
94
|
+
"function": {
|
|
95
|
+
"name": "read_file",
|
|
96
|
+
"description": "Read a file's contents. Returns lines with line numbers.",
|
|
97
|
+
"parameters": {
|
|
98
|
+
"type": "object",
|
|
99
|
+
"properties": {
|
|
100
|
+
"path": {"type": "string", "description": "File path (relative to cwd or absolute)"},
|
|
101
|
+
"offset": {"type": "integer", "description": "Start line (1-indexed)"},
|
|
102
|
+
"limit": {"type": "integer", "description": "Max lines to read"},
|
|
103
|
+
},
|
|
104
|
+
"required": ["path"],
|
|
101
105
|
},
|
|
102
|
-
"required": ["path"],
|
|
103
106
|
},
|
|
104
107
|
},
|
|
105
108
|
{
|
|
106
|
-
"
|
|
107
|
-
"
|
|
108
|
-
|
|
109
|
-
"
|
|
110
|
-
"
|
|
111
|
-
"
|
|
112
|
-
"
|
|
109
|
+
"type": "function",
|
|
110
|
+
"function": {
|
|
111
|
+
"name": "write_file",
|
|
112
|
+
"description": "Create or overwrite a file with the given content.",
|
|
113
|
+
"parameters": {
|
|
114
|
+
"type": "object",
|
|
115
|
+
"properties": {
|
|
116
|
+
"path": {"type": "string", "description": "File path to write"},
|
|
117
|
+
"content": {"type": "string", "description": "Full file content"},
|
|
118
|
+
},
|
|
119
|
+
"required": ["path", "content"],
|
|
113
120
|
},
|
|
114
|
-
"required": ["path", "content"],
|
|
115
121
|
},
|
|
116
122
|
},
|
|
117
123
|
{
|
|
118
|
-
"
|
|
119
|
-
"
|
|
120
|
-
"
|
|
121
|
-
"
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
"
|
|
127
|
-
"
|
|
128
|
-
"
|
|
129
|
-
|
|
130
|
-
|
|
124
|
+
"type": "function",
|
|
125
|
+
"function": {
|
|
126
|
+
"name": "edit_file",
|
|
127
|
+
"description": (
|
|
128
|
+
"Replace an exact string in a file with new content. "
|
|
129
|
+
"old_string must match exactly including whitespace/indentation. "
|
|
130
|
+
"Fails if old_string is not found or is ambiguous (found multiple times without replace_all)."
|
|
131
|
+
),
|
|
132
|
+
"parameters": {
|
|
133
|
+
"type": "object",
|
|
134
|
+
"properties": {
|
|
135
|
+
"path": {"type": "string", "description": "File path to edit"},
|
|
136
|
+
"old_string": {"type": "string", "description": "Exact string to find"},
|
|
137
|
+
"new_string": {"type": "string", "description": "Replacement string"},
|
|
138
|
+
"replace_all": {"type": "boolean", "description": "Replace all occurrences (default: false)"},
|
|
139
|
+
},
|
|
140
|
+
"required": ["path", "old_string", "new_string"],
|
|
131
141
|
},
|
|
132
|
-
"required": ["path", "old_string", "new_string"],
|
|
133
142
|
},
|
|
134
143
|
},
|
|
135
144
|
{
|
|
136
|
-
"
|
|
137
|
-
"
|
|
138
|
-
|
|
139
|
-
"
|
|
140
|
-
"
|
|
141
|
-
"
|
|
142
|
-
"
|
|
145
|
+
"type": "function",
|
|
146
|
+
"function": {
|
|
147
|
+
"name": "list_files",
|
|
148
|
+
"description": "List files matching a glob pattern. Use '**/*.ext' for recursive search.",
|
|
149
|
+
"parameters": {
|
|
150
|
+
"type": "object",
|
|
151
|
+
"properties": {
|
|
152
|
+
"pattern": {"type": "string", "description": "Glob pattern (e.g. '**/*.py', 'src/**/*.ts')"},
|
|
153
|
+
"path": {"type": "string", "description": "Base directory (default: cwd)"},
|
|
154
|
+
},
|
|
155
|
+
"required": ["pattern"],
|
|
143
156
|
},
|
|
144
|
-
"required": ["pattern"],
|
|
145
157
|
},
|
|
146
158
|
},
|
|
147
159
|
{
|
|
148
|
-
"
|
|
149
|
-
"
|
|
150
|
-
|
|
151
|
-
"
|
|
152
|
-
"
|
|
153
|
-
"
|
|
154
|
-
"
|
|
155
|
-
|
|
160
|
+
"type": "function",
|
|
161
|
+
"function": {
|
|
162
|
+
"name": "search_files",
|
|
163
|
+
"description": "Search file contents with regex. Returns matching lines with file:line: prefix.",
|
|
164
|
+
"parameters": {
|
|
165
|
+
"type": "object",
|
|
166
|
+
"properties": {
|
|
167
|
+
"pattern": {"type": "string", "description": "Regex pattern to search for"},
|
|
168
|
+
"path": {"type": "string", "description": "Directory or file to search (default: cwd)"},
|
|
169
|
+
"include": {"type": "string", "description": "Glob to filter files (e.g. '*.py')"},
|
|
170
|
+
},
|
|
171
|
+
"required": ["pattern"],
|
|
156
172
|
},
|
|
157
|
-
"required": ["pattern"],
|
|
158
173
|
},
|
|
159
174
|
},
|
|
160
175
|
{
|
|
161
|
-
"
|
|
162
|
-
"
|
|
163
|
-
|
|
164
|
-
"
|
|
165
|
-
"
|
|
166
|
-
"
|
|
167
|
-
"
|
|
176
|
+
"type": "function",
|
|
177
|
+
"function": {
|
|
178
|
+
"name": "bash",
|
|
179
|
+
"description": "Execute a shell command. Returns stdout, stderr, and exit code.",
|
|
180
|
+
"parameters": {
|
|
181
|
+
"type": "object",
|
|
182
|
+
"properties": {
|
|
183
|
+
"command": {"type": "string", "description": "Shell command to run"},
|
|
184
|
+
"timeout": {"type": "integer", "description": "Timeout in seconds (default: 120)"},
|
|
185
|
+
},
|
|
186
|
+
"required": ["command"],
|
|
168
187
|
},
|
|
169
|
-
"required": ["command"],
|
|
170
188
|
},
|
|
171
189
|
},
|
|
172
190
|
]
|
|
@@ -502,7 +520,9 @@ def _estimate_tokens(messages):
|
|
|
502
520
|
"""Rough token estimate: 1 token ≈ 4 chars."""
|
|
503
521
|
total = 0
|
|
504
522
|
for msg in messages:
|
|
505
|
-
content = msg
|
|
523
|
+
content = _get_msg_content(msg) if isinstance(msg, dict) else getattr(msg, "content", "")
|
|
524
|
+
if content is None:
|
|
525
|
+
content = ""
|
|
506
526
|
if isinstance(content, str):
|
|
507
527
|
total += len(content)
|
|
508
528
|
elif isinstance(content, list):
|
|
@@ -547,6 +567,13 @@ def _summarize_tool_use_input(block):
|
|
|
547
567
|
block.input["content"] = f"[file content: {line_count} lines, truncated]"
|
|
548
568
|
|
|
549
569
|
|
|
570
|
+
def _get_msg_content(msg):
|
|
571
|
+
"""Get content from either dict or OpenAI message object."""
|
|
572
|
+
if isinstance(msg, dict):
|
|
573
|
+
return msg.get("content")
|
|
574
|
+
return getattr(msg, "content", None)
|
|
575
|
+
|
|
576
|
+
|
|
550
577
|
def prune_context(messages, token_budget=None):
|
|
551
578
|
"""
|
|
552
579
|
Trim old tool results when conversation exceeds the token budget.
|
|
@@ -567,7 +594,7 @@ def prune_context(messages, token_budget=None):
|
|
|
567
594
|
|
|
568
595
|
for i in range(1, prune_end):
|
|
569
596
|
msg = messages[i]
|
|
570
|
-
content = msg
|
|
597
|
+
content = _get_msg_content(msg)
|
|
571
598
|
|
|
572
599
|
if isinstance(content, list):
|
|
573
600
|
for item in content:
|
|
@@ -586,66 +613,71 @@ def prune_context(messages, token_budget=None):
|
|
|
586
613
|
# ── Streaming Response Handler ──────────────────────────────────────────────
|
|
587
614
|
|
|
588
615
|
def stream_response(client, messages, system):
|
|
589
|
-
"""Stream
|
|
616
|
+
"""Stream LLM response via Chorus proxy, handling tool-use loops until done."""
|
|
617
|
+
openai_messages = [{"role": "system", "content": system}]
|
|
618
|
+
# Convert existing messages to OpenAI format
|
|
619
|
+
for msg in messages:
|
|
620
|
+
openai_messages.append(msg)
|
|
621
|
+
|
|
590
622
|
while True:
|
|
591
623
|
printed_text = False
|
|
592
|
-
|
|
593
|
-
|
|
624
|
+
|
|
625
|
+
response = client.chat.completions.create(
|
|
594
626
|
model=MODEL,
|
|
595
627
|
max_tokens=MAX_TOKENS,
|
|
596
|
-
|
|
628
|
+
messages=openai_messages,
|
|
597
629
|
tools=TOOLS,
|
|
598
|
-
|
|
599
|
-
) as stream:
|
|
600
|
-
for event in stream:
|
|
601
|
-
if event.type == "content_block_delta":
|
|
602
|
-
if hasattr(event.delta, "text"):
|
|
603
|
-
sys.stdout.write(event.delta.text)
|
|
604
|
-
sys.stdout.flush()
|
|
605
|
-
printed_text = True
|
|
606
|
-
|
|
607
|
-
response = stream.get_final_message()
|
|
608
|
-
|
|
609
|
-
if printed_text:
|
|
610
|
-
print() # newline after streamed text
|
|
611
|
-
|
|
612
|
-
# Add the full assistant message to conversation
|
|
613
|
-
messages.append({"role": "assistant", "content": response.content})
|
|
630
|
+
)
|
|
614
631
|
|
|
615
|
-
|
|
616
|
-
|
|
632
|
+
message = response.choices[0].message
|
|
633
|
+
|
|
634
|
+
# Add assistant message to conversation history
|
|
635
|
+
openai_messages.append(message)
|
|
636
|
+
if message.content:
|
|
637
|
+
messages.append({"role": "assistant", "content": message.content})
|
|
638
|
+
print(message.content)
|
|
639
|
+
printed_text = True
|
|
640
|
+
|
|
641
|
+
# Check for tool calls
|
|
642
|
+
if message.tool_calls:
|
|
617
643
|
tool_results = []
|
|
618
|
-
for
|
|
619
|
-
|
|
620
|
-
|
|
621
|
-
|
|
622
|
-
|
|
623
|
-
|
|
624
|
-
|
|
625
|
-
|
|
626
|
-
|
|
627
|
-
|
|
628
|
-
|
|
629
|
-
|
|
630
|
-
|
|
631
|
-
|
|
632
|
-
|
|
633
|
-
|
|
634
|
-
|
|
635
|
-
|
|
636
|
-
|
|
637
|
-
|
|
638
|
-
|
|
639
|
-
|
|
640
|
-
|
|
641
|
-
|
|
642
|
-
|
|
644
|
+
for tool_call in message.tool_calls:
|
|
645
|
+
function_name = tool_call.function.name
|
|
646
|
+
import json
|
|
647
|
+
try:
|
|
648
|
+
arguments = json.loads(tool_call.function.arguments)
|
|
649
|
+
except json.JSONDecodeError:
|
|
650
|
+
arguments = {}
|
|
651
|
+
|
|
652
|
+
# Track what's happening
|
|
653
|
+
if function_name == "write_file":
|
|
654
|
+
path = arguments.get("path", "")
|
|
655
|
+
if resolve_path(path).exists():
|
|
656
|
+
pass # Will track in result
|
|
657
|
+
|
|
658
|
+
# Execute the tool
|
|
659
|
+
result = execute_tool(function_name, arguments)
|
|
660
|
+
|
|
661
|
+
# Truncate huge results
|
|
662
|
+
if len(result) > 15000:
|
|
663
|
+
result = result[:15000] + "\n... (output truncated)"
|
|
664
|
+
if _should_nudge(function_name, arguments, result):
|
|
665
|
+
result += REFLECT_NUDGE
|
|
666
|
+
|
|
667
|
+
tool_results.append({
|
|
668
|
+
"role": "tool",
|
|
669
|
+
"tool_call_id": tool_call.id,
|
|
670
|
+
"content": result,
|
|
671
|
+
})
|
|
672
|
+
|
|
673
|
+
openai_messages.extend(tool_results)
|
|
674
|
+
prune_context(openai_messages)
|
|
643
675
|
print() # breathing room before next response
|
|
644
676
|
else:
|
|
645
677
|
# Print token usage
|
|
646
678
|
if hasattr(response, "usage") and response.usage:
|
|
647
|
-
inp = response.usage.
|
|
648
|
-
out = response.usage.
|
|
679
|
+
inp = response.usage.prompt_tokens
|
|
680
|
+
out = response.usage.completion_tokens
|
|
649
681
|
print(f"{C.DIM}[{inp} in / {out} out tokens]{C.RESET}")
|
|
650
682
|
break
|
|
651
683
|
|
|
@@ -653,7 +685,41 @@ def stream_response(client, messages, system):
|
|
|
653
685
|
|
|
654
686
|
def run_prompt(client, prompt, system):
|
|
655
687
|
"""Run a single prompt non-interactively. Returns a JSON-serializable dict."""
|
|
656
|
-
|
|
688
|
+
|
|
689
|
+
# PHASE 1: Planning - ask the model to explain its approach first
|
|
690
|
+
print(f"\n{C.BOLD}{C.BLUE}📝 PLANNING PHASE{C.RESET}", file=sys.stderr, flush=True)
|
|
691
|
+
print(f"{C.DIM}Understanding the issue and creating a plan...{C.RESET}\n", file=sys.stderr, flush=True)
|
|
692
|
+
|
|
693
|
+
plan_messages = [
|
|
694
|
+
{"role": "system", "content": system},
|
|
695
|
+
{"role": "user", "content": f"{prompt}\n\nBefore making any code changes, please analyze this issue and explain:\n1. What is the problem/goal?\n2. What files do you need to examine first?\n3. What is your overall approach to solving this?\n4. Which files will you modify and how?\n\nDo NOT write any code yet - just explain your plan."}
|
|
696
|
+
]
|
|
697
|
+
|
|
698
|
+
try:
|
|
699
|
+
plan_response = client.chat.completions.create(
|
|
700
|
+
model=MODEL,
|
|
701
|
+
max_tokens=MAX_TOKENS,
|
|
702
|
+
messages=plan_messages,
|
|
703
|
+
)
|
|
704
|
+
plan_text = plan_response.choices[0].message.content.strip()
|
|
705
|
+
|
|
706
|
+
# Print the plan with formatting
|
|
707
|
+
print(f"{C.CYAN}{'─' * 60}{C.RESET}", file=sys.stderr, flush=True)
|
|
708
|
+
for line in plan_text.split('\n'):
|
|
709
|
+
print(f"{C.CYAN} {line}{C.RESET}", file=sys.stderr, flush=True)
|
|
710
|
+
print(f"{C.CYAN}{'─' * 60}{C.RESET}\n", file=sys.stderr, flush=True)
|
|
711
|
+
|
|
712
|
+
except Exception as e:
|
|
713
|
+
print(f"{C.YELLOW}Could not generate plan: {e}{C.RESET}", file=sys.stderr, flush=True)
|
|
714
|
+
plan_text = ""
|
|
715
|
+
|
|
716
|
+
# PHASE 2: Execution - proceed with the actual coding
|
|
717
|
+
print(f"{C.BOLD}{C.GREEN}🔨 EXECUTING PLAN{C.RESET}\n", file=sys.stderr, flush=True)
|
|
718
|
+
|
|
719
|
+
messages = [
|
|
720
|
+
{"role": "system", "content": system},
|
|
721
|
+
{"role": "user", "content": prompt}
|
|
722
|
+
]
|
|
657
723
|
files_modified = set()
|
|
658
724
|
files_created = set()
|
|
659
725
|
commands_run = []
|
|
@@ -670,14 +736,13 @@ def run_prompt(client, prompt, system):
|
|
|
670
736
|
turn += 1
|
|
671
737
|
|
|
672
738
|
try:
|
|
673
|
-
response = client.
|
|
739
|
+
response = client.chat.completions.create(
|
|
674
740
|
model=MODEL,
|
|
675
741
|
max_tokens=MAX_TOKENS,
|
|
676
|
-
system=system,
|
|
677
|
-
tools=TOOLS,
|
|
678
742
|
messages=messages,
|
|
743
|
+
tools=TOOLS,
|
|
679
744
|
)
|
|
680
|
-
except
|
|
745
|
+
except Exception as e:
|
|
681
746
|
if is_token_limit_error(e):
|
|
682
747
|
print(f"\n{C.YELLOW}Token limit reached — stopping.{C.RESET}", file=sys.stderr, flush=True)
|
|
683
748
|
errors.append(str(e))
|
|
@@ -687,66 +752,88 @@ def run_prompt(client, prompt, system):
|
|
|
687
752
|
# Per-turn token tracking
|
|
688
753
|
turn_in = turn_out = 0
|
|
689
754
|
if hasattr(response, "usage") and response.usage:
|
|
690
|
-
turn_in = response.usage.
|
|
691
|
-
turn_out = response.usage.
|
|
755
|
+
turn_in = response.usage.prompt_tokens
|
|
756
|
+
turn_out = response.usage.completion_tokens
|
|
692
757
|
total_input_tokens += turn_in
|
|
693
758
|
total_output_tokens += turn_out
|
|
694
759
|
|
|
695
|
-
|
|
760
|
+
message = response.choices[0].message
|
|
761
|
+
messages.append(message)
|
|
762
|
+
|
|
763
|
+
# Show reasoning/thinking if present
|
|
764
|
+
if message.content:
|
|
765
|
+
print(f"\n{C.YELLOW}💭 {message.content}{C.RESET}\n", file=sys.stderr, flush=True)
|
|
696
766
|
|
|
697
|
-
|
|
767
|
+
# Check for tool calls
|
|
768
|
+
if message.tool_calls:
|
|
698
769
|
tool_results = []
|
|
699
|
-
for
|
|
700
|
-
|
|
701
|
-
|
|
702
|
-
|
|
703
|
-
|
|
704
|
-
|
|
705
|
-
|
|
706
|
-
|
|
707
|
-
|
|
708
|
-
|
|
709
|
-
|
|
710
|
-
|
|
711
|
-
|
|
712
|
-
|
|
713
|
-
|
|
714
|
-
|
|
715
|
-
|
|
716
|
-
|
|
717
|
-
|
|
718
|
-
|
|
719
|
-
|
|
720
|
-
|
|
721
|
-
|
|
722
|
-
|
|
723
|
-
|
|
724
|
-
|
|
725
|
-
|
|
726
|
-
|
|
727
|
-
|
|
728
|
-
|
|
729
|
-
|
|
730
|
-
|
|
731
|
-
|
|
732
|
-
|
|
733
|
-
|
|
734
|
-
|
|
735
|
-
|
|
736
|
-
result
|
|
737
|
-
|
|
738
|
-
|
|
739
|
-
|
|
740
|
-
|
|
741
|
-
|
|
742
|
-
|
|
743
|
-
|
|
744
|
-
|
|
745
|
-
|
|
746
|
-
|
|
747
|
-
|
|
748
|
-
|
|
749
|
-
|
|
770
|
+
for tool_call in message.tool_calls:
|
|
771
|
+
function_name = tool_call.function.name
|
|
772
|
+
import json
|
|
773
|
+
try:
|
|
774
|
+
arguments = json.loads(tool_call.function.arguments)
|
|
775
|
+
except json.JSONDecodeError:
|
|
776
|
+
arguments = {}
|
|
777
|
+
|
|
778
|
+
# Track what's happening
|
|
779
|
+
if function_name == "write_file":
|
|
780
|
+
path = arguments.get("path", "")
|
|
781
|
+
if resolve_path(path).exists():
|
|
782
|
+
files_modified.add(path)
|
|
783
|
+
else:
|
|
784
|
+
files_created.add(path)
|
|
785
|
+
elif function_name == "edit_file":
|
|
786
|
+
pass # tracked after execution below
|
|
787
|
+
elif function_name == "bash":
|
|
788
|
+
commands_run.append(arguments.get("command", ""))
|
|
789
|
+
|
|
790
|
+
# Colored tool log to stderr with reasoning
|
|
791
|
+
_, color = TOOL_LABELS.get(function_name, (function_name, C.DIM))
|
|
792
|
+
header = format_tool_header(function_name, arguments)
|
|
793
|
+
print(f" {color}{header}{C.RESET}", file=sys.stderr, flush=True)
|
|
794
|
+
|
|
795
|
+
result = execute_tool(function_name, arguments)
|
|
796
|
+
|
|
797
|
+
# Track successful edits
|
|
798
|
+
if function_name == "edit_file" and not result.startswith("Error"):
|
|
799
|
+
files_modified.add(arguments.get("path", ""))
|
|
800
|
+
|
|
801
|
+
if result.startswith("Error"):
|
|
802
|
+
err_msg = f"{function_name}: {result}"
|
|
803
|
+
# Recoverable: file not found on read (exploring), edit match failures (retries)
|
|
804
|
+
if (function_name == "read_file" and "not found" in result) or \
|
|
805
|
+
(function_name == "edit_file" and "not found" in result):
|
|
806
|
+
warnings.append(err_msg)
|
|
807
|
+
print(f" {C.YELLOW}{result.splitlines()[0]}{C.RESET}", file=sys.stderr, flush=True)
|
|
808
|
+
else:
|
|
809
|
+
errors.append(err_msg)
|
|
810
|
+
print(f" {C.RED}{result.splitlines()[0]}{C.RESET}", file=sys.stderr, flush=True)
|
|
811
|
+
|
|
812
|
+
if len(result) > 15000:
|
|
813
|
+
result = result[:15000] + "\n... (output truncated)"
|
|
814
|
+
if _should_nudge(function_name, arguments, result):
|
|
815
|
+
result += REFLECT_NUDGE
|
|
816
|
+
|
|
817
|
+
tool_results.append({
|
|
818
|
+
"role": "tool",
|
|
819
|
+
"tool_call_id": tool_call.id,
|
|
820
|
+
"content": result,
|
|
821
|
+
})
|
|
822
|
+
|
|
823
|
+
# Show progress instead of just tokens
|
|
824
|
+
action_summary = []
|
|
825
|
+
if files_created:
|
|
826
|
+
action_summary.append(f"+{len(files_created)} files")
|
|
827
|
+
if files_modified:
|
|
828
|
+
action_summary.append(f"~{len(files_modified)} files")
|
|
829
|
+
if commands_run:
|
|
830
|
+
action_summary.append(f"{len(commands_run)} commands")
|
|
831
|
+
|
|
832
|
+
if action_summary:
|
|
833
|
+
progress = " | ".join(action_summary)
|
|
834
|
+
print(f" {C.DIM}[Progress: {progress}]{C.RESET}", file=sys.stderr, flush=True)
|
|
835
|
+
|
|
836
|
+
messages.extend(tool_results)
|
|
750
837
|
|
|
751
838
|
# Prune old tool results to prevent quadratic token growth
|
|
752
839
|
prune_context(messages)
|
|
@@ -757,44 +844,50 @@ def run_prompt(client, prompt, system):
|
|
|
757
844
|
errors.append(f"Hit max turns limit ({max_turns})")
|
|
758
845
|
print(f"{C.RED}Max turns reached ({max_turns}), stopping{C.RESET}", file=sys.stderr, flush=True)
|
|
759
846
|
|
|
760
|
-
# Final
|
|
761
|
-
print(f"{C.
|
|
762
|
-
|
|
763
|
-
|
|
764
|
-
|
|
765
|
-
|
|
766
|
-
|
|
847
|
+
# Final summary
|
|
848
|
+
print(f"\n{C.BOLD}{C.GREEN}✓ Done{C.RESET}", file=sys.stderr, flush=True)
|
|
849
|
+
if files_created:
|
|
850
|
+
print(f" {C.GREEN}Created: {', '.join(sorted(files_created))}{C.RESET}", file=sys.stderr, flush=True)
|
|
851
|
+
if files_modified:
|
|
852
|
+
print(f" {C.YELLOW}Modified: {', '.join(sorted(files_modified))}{C.RESET}", file=sys.stderr, flush=True)
|
|
853
|
+
|
|
854
|
+
# Extract LLM's final text response from the last assistant message
|
|
855
|
+
final_text = ""
|
|
856
|
+
for msg in reversed(messages):
|
|
857
|
+
if isinstance(msg, dict) and msg.get("role") == "assistant" and msg.get("content"):
|
|
858
|
+
final_text = msg["content"]
|
|
859
|
+
break
|
|
860
|
+
elif hasattr(msg, "role") and msg.role == "assistant" and msg.content:
|
|
861
|
+
final_text = msg.content
|
|
862
|
+
break
|
|
767
863
|
|
|
768
|
-
# Ask
|
|
769
|
-
# Uses a standalone minimal prompt — no conversation history, system prompt, or tools.
|
|
864
|
+
# Ask LLM for a CodeRabbit-oriented summary (skip if we hit token limit)
|
|
770
865
|
summary = final_text.strip()
|
|
771
866
|
if not any(is_token_limit_error(e) for e in errors):
|
|
772
|
-
summary_messages = [
|
|
773
|
-
"role": "
|
|
774
|
-
"content": (
|
|
867
|
+
summary_messages = [
|
|
868
|
+
{"role": "system", "content": "You are a helpful assistant that summarizes code changes."},
|
|
869
|
+
{"role": "user", "content": (
|
|
775
870
|
f"Summarize these code changes in 2-3 sentences for a code review tool.\n\n"
|
|
776
871
|
f"Files modified: {', '.join(sorted(files_modified)) or 'none'}\n"
|
|
777
872
|
f"Files created: {', '.join(sorted(files_created)) or 'none'}\n\n"
|
|
778
873
|
f"Agent's final notes:\n{final_text[:2000]}\n\n"
|
|
779
874
|
f"Focus on what changed, what was added/fixed, and why. Be specific. No preamble."
|
|
780
|
-
),
|
|
781
|
-
|
|
875
|
+
)},
|
|
876
|
+
]
|
|
782
877
|
|
|
783
878
|
try:
|
|
784
|
-
summary_response = client.
|
|
879
|
+
summary_response = client.chat.completions.create(
|
|
785
880
|
model=MODEL,
|
|
786
881
|
max_tokens=1024,
|
|
787
882
|
messages=summary_messages,
|
|
788
883
|
)
|
|
789
884
|
|
|
790
885
|
if hasattr(summary_response, "usage") and summary_response.usage:
|
|
791
|
-
total_input_tokens += summary_response.usage.
|
|
792
|
-
total_output_tokens += summary_response.usage.
|
|
886
|
+
total_input_tokens += summary_response.usage.prompt_tokens
|
|
887
|
+
total_output_tokens += summary_response.usage.completion_tokens
|
|
793
888
|
|
|
794
|
-
summary =
|
|
795
|
-
|
|
796
|
-
).strip()
|
|
797
|
-
except anthropic.APIError as e:
|
|
889
|
+
summary = summary_response.choices[0].message.content.strip()
|
|
890
|
+
except Exception as e:
|
|
798
891
|
if is_token_limit_error(e):
|
|
799
892
|
errors.append(str(e))
|
|
800
893
|
else:
|
|
@@ -821,20 +914,17 @@ def run_prompt(client, prompt, system):
|
|
|
821
914
|
# ── Main ────────────────────────────────────────────────────────────────────
|
|
822
915
|
|
|
823
916
|
def main():
|
|
824
|
-
parser = argparse.ArgumentParser(description="Coder — AI coding agent powered by Claude")
|
|
917
|
+
parser = argparse.ArgumentParser(description="Coder — AI coding agent powered by Claude via Chorus")
|
|
825
918
|
parser.add_argument("-p", "--prompt", help="Run a single prompt headlessly and output JSON")
|
|
826
919
|
args = parser.parse_args()
|
|
827
920
|
|
|
828
|
-
|
|
829
|
-
|
|
830
|
-
|
|
831
|
-
|
|
921
|
+
api_key = os.environ.get("CHORUS_API_KEY")
|
|
922
|
+
if not api_key:
|
|
923
|
+
print(f"{C.RED}Error: CHORUS_API_KEY not set. Run 'chorus setup' to configure.{C.RESET}", file=sys.stderr)
|
|
924
|
+
sys.exit(1)
|
|
832
925
|
|
|
833
|
-
|
|
834
|
-
|
|
835
|
-
client = anthropic.Anthropic(base_url=proxy_url.rstrip('/'))
|
|
836
|
-
else:
|
|
837
|
-
client = anthropic.Anthropic()
|
|
926
|
+
base_url = os.environ.get("CHORUS_API_URL", "https://chorus-bad0f.web.app/v1")
|
|
927
|
+
client = OpenAI(api_key=api_key, base_url=base_url)
|
|
838
928
|
system = SYSTEM_PROMPT.format(cwd=os.getcwd(), approach=APPROACH_BLOCK)
|
|
839
929
|
|
|
840
930
|
# Load codebase map if available
|
|
@@ -855,7 +945,7 @@ def main():
|
|
|
855
945
|
result = run_prompt(client, args.prompt, system)
|
|
856
946
|
print(json.dumps(result, indent=2))
|
|
857
947
|
sys.exit(0 if result["completed"] else 1)
|
|
858
|
-
except
|
|
948
|
+
except Exception as e:
|
|
859
949
|
print(json.dumps({
|
|
860
950
|
"completed": False,
|
|
861
951
|
"summary": f"API error: {e}",
|
|
@@ -953,7 +1043,7 @@ def main():
|
|
|
953
1043
|
except KeyboardInterrupt:
|
|
954
1044
|
del messages[snapshot:]
|
|
955
1045
|
print(f"\n{C.DIM}(interrupted){C.RESET}\n")
|
|
956
|
-
except
|
|
1046
|
+
except Exception as e:
|
|
957
1047
|
del messages[snapshot:]
|
|
958
1048
|
print(f"\n{C.RED}API error: {e}{C.RESET}\n")
|
|
959
1049
|
|
package/tools/qa.py
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
#!/usr/bin/env python3
|
|
2
2
|
"""
|
|
3
|
-
QA Chat — Multi-turn QA conversation tool powered by Claude + pluggable messengers.
|
|
3
|
+
QA Chat — Multi-turn QA conversation tool powered by Claude via the Chorus proxy + pluggable messengers.
|
|
4
4
|
|
|
5
5
|
Supports Teams (Playwright browser automation) and Slack (API-based).
|
|
6
6
|
|
|
@@ -17,7 +17,7 @@ Output (JSON on stdout):
|
|
|
17
17
|
Progress is logged to stderr.
|
|
18
18
|
"""
|
|
19
19
|
|
|
20
|
-
import
|
|
20
|
+
from openai import OpenAI
|
|
21
21
|
import argparse
|
|
22
22
|
import json
|
|
23
23
|
import os
|
|
@@ -27,7 +27,7 @@ from abc import ABC, abstractmethod
|
|
|
27
27
|
|
|
28
28
|
# ── Config ──────────────────────────────────────────────────────────────────
|
|
29
29
|
|
|
30
|
-
MODEL = os.environ.get("QA_MODEL", "claude-sonnet-4
|
|
30
|
+
MODEL = os.environ.get("QA_MODEL", "anthropic/claude-sonnet-4")
|
|
31
31
|
MAX_ROUNDS = int(os.environ.get("QA_MAX_ROUNDS", "5"))
|
|
32
32
|
POLL_INTERVAL = int(os.environ.get("QA_POLL_INTERVAL", "60")) # seconds
|
|
33
33
|
POLL_TIMEOUT = int(os.environ.get("QA_POLL_TIMEOUT", "1800")) # 30 min
|
|
@@ -316,33 +316,45 @@ If NO: set sufficient=false and write a short, friendly follow-up message asking
|
|
|
316
316
|
}
|
|
317
317
|
]
|
|
318
318
|
|
|
319
|
-
|
|
320
|
-
|
|
321
|
-
|
|
322
|
-
system
|
|
319
|
+
# Chorus proxy uses OpenAI-compatible API — no native tool use
|
|
320
|
+
# We'll simulate by asking the model to respond in a structured way
|
|
321
|
+
messages.append({
|
|
322
|
+
"role": "system",
|
|
323
|
+
"content": (
|
|
323
324
|
"You are evaluating a QA conversation about a software bug/feature. "
|
|
324
325
|
"Your job is to decide if there is enough concrete information to write "
|
|
325
326
|
"exact developer requirements. Vague answers like 'it should work properly' "
|
|
326
327
|
"are NOT sufficient — you need specifics: exact behavior, exact UI elements, "
|
|
327
328
|
"exact data flows, exact error messages, etc. "
|
|
328
|
-
"
|
|
329
|
+
"Respond with a JSON object containing: sufficient (boolean), reasoning (string), "
|
|
330
|
+
"and follow_up (string, required if sufficient is false). "
|
|
329
331
|
"IMPORTANT: follow_up messages are sent via chat. Use plain text only — "
|
|
330
332
|
"no markdown, no **bold**, no *italic*, no bullet points. "
|
|
331
333
|
"Use numbered lines (1. 2. 3.) for multiple questions. Keep it conversational."
|
|
332
|
-
)
|
|
333
|
-
|
|
334
|
-
|
|
334
|
+
)
|
|
335
|
+
})
|
|
336
|
+
|
|
337
|
+
response = client.chat.completions.create(
|
|
338
|
+
model=MODEL,
|
|
339
|
+
max_tokens=1024,
|
|
335
340
|
messages=messages,
|
|
341
|
+
response_format={"type": "json_object"},
|
|
336
342
|
)
|
|
337
343
|
|
|
338
344
|
if hasattr(response, "usage") and response.usage:
|
|
339
|
-
log(f" Evaluate tokens: {response.usage.
|
|
340
|
-
|
|
341
|
-
for block in response.content:
|
|
342
|
-
if block.type == "tool_use" and block.name == "evaluation":
|
|
343
|
-
return block.input
|
|
345
|
+
log(f" Evaluate tokens: {response.usage.prompt_tokens} in / {response.usage.completion_tokens} out")
|
|
344
346
|
|
|
345
|
-
|
|
347
|
+
import json
|
|
348
|
+
try:
|
|
349
|
+
result = json.loads(response.choices[0].message.content)
|
|
350
|
+
return result
|
|
351
|
+
except json.JSONDecodeError:
|
|
352
|
+
# Fallback if not valid JSON
|
|
353
|
+
return {
|
|
354
|
+
"sufficient": False,
|
|
355
|
+
"reasoning": "Could not parse evaluation",
|
|
356
|
+
"follow_up": "Could you please provide more details?"
|
|
357
|
+
}
|
|
346
358
|
|
|
347
359
|
|
|
348
360
|
def synthesize(client, conversation, issue_context):
|
|
@@ -367,29 +379,31 @@ Write a clear numbered list of requirements. Each requirement should be specific
|
|
|
367
379
|
}
|
|
368
380
|
]
|
|
369
381
|
|
|
370
|
-
|
|
371
|
-
|
|
372
|
-
|
|
373
|
-
system=(
|
|
382
|
+
messages.insert(0, {
|
|
383
|
+
"role": "system",
|
|
384
|
+
"content": (
|
|
374
385
|
"You synthesize QA conversations into exact, actionable developer requirements. "
|
|
375
386
|
"Be specific and concrete. No vague language. Every requirement should be testable."
|
|
376
|
-
)
|
|
387
|
+
)
|
|
388
|
+
})
|
|
389
|
+
|
|
390
|
+
response = client.chat.completions.create(
|
|
391
|
+
model=MODEL,
|
|
392
|
+
max_tokens=2048,
|
|
377
393
|
messages=messages,
|
|
378
394
|
)
|
|
379
395
|
|
|
380
396
|
if hasattr(response, "usage") and response.usage:
|
|
381
|
-
log(f" Synthesize tokens: {response.usage.
|
|
397
|
+
log(f" Synthesize tokens: {response.usage.prompt_tokens} in / {response.usage.completion_tokens} out")
|
|
382
398
|
|
|
383
|
-
return
|
|
399
|
+
return response.choices[0].message.content.strip()
|
|
384
400
|
|
|
385
401
|
# ── Main Loop ───────────────────────────────────────────────────────────────
|
|
386
402
|
|
|
387
403
|
def run_qa_chat(issue_context, messenger, qa_name):
|
|
388
|
-
|
|
389
|
-
|
|
390
|
-
|
|
391
|
-
else:
|
|
392
|
-
client = anthropic.Anthropic()
|
|
404
|
+
api_key = os.environ.get("CHORUS_API_KEY")
|
|
405
|
+
base_url = os.environ.get("CHORUS_API_URL", "https://chorus-bad0f.web.app/v1")
|
|
406
|
+
client = OpenAI(api_key=api_key, base_url=base_url)
|
|
393
407
|
conversation = []
|
|
394
408
|
raw_responses = []
|
|
395
409
|
|
|
@@ -465,11 +479,11 @@ def main():
|
|
|
465
479
|
|
|
466
480
|
if args.super:
|
|
467
481
|
global MODEL
|
|
468
|
-
MODEL = "claude-opus-4
|
|
482
|
+
MODEL = "anthropic/claude-opus-4"
|
|
469
483
|
log(f"Super mode: using {MODEL}")
|
|
470
484
|
|
|
471
|
-
if not os.environ.get("
|
|
472
|
-
log("Error:
|
|
485
|
+
if not os.environ.get("CHORUS_API_KEY"):
|
|
486
|
+
log("Error: CHORUS_API_KEY not set. Run 'chorus setup' to configure.")
|
|
473
487
|
sys.exit(1)
|
|
474
488
|
|
|
475
489
|
# Build the appropriate messenger
|
package/tools/requirements.txt
CHANGED