chorus-cli 0.4.1 → 0.4.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/index.js CHANGED
@@ -7,8 +7,9 @@ const _localEnv = _path.join(__dirname, '.env');
7
7
  // Prefer user config dir (works when installed globally), fall back to local .env for dev
8
8
  require('dotenv').config({ path: require('fs').existsSync(_configEnv) ? _configEnv : _localEnv });
9
9
  const { Octokit } = require('@octokit/rest');
10
- const Anthropic = require('@anthropic-ai/sdk');
10
+
11
11
  const { createProvider } = require('./providers');
12
+ const OpenAI = require('openai');
12
13
  const { exec, execFile, spawn } = require('child_process');
13
14
  const util = require('util');
14
15
  const path = require('path');
@@ -17,13 +18,53 @@ const execPromise = util.promisify(exec);
17
18
  const execFilePromise = util.promisify(execFile);
18
19
  const fs = require('fs').promises;
19
20
 
21
+ // Returns a stable hardware UUID for this machine, with a persistent fallback
22
+ async function getMachineId() {
23
+ try {
24
+ if (process.platform === 'darwin') {
25
+ const { stdout } = await execPromise(
26
+ "ioreg -rd1 -c IOPlatformExpertDevice | awk -F'\"' '/IOPlatformUUID/{print $4}'"
27
+ );
28
+ if (stdout.trim()) return stdout.trim();
29
+ } else if (process.platform === 'linux') {
30
+ const fsp = require('fs').promises;
31
+ const id = (await fsp.readFile('/etc/machine-id', 'utf8')).trim();
32
+ if (id) return id;
33
+ } else if (process.platform === 'win32') {
34
+ const { stdout } = await execPromise('wmic csproduct get UUID');
35
+ const lines = stdout.trim().split('\n');
36
+ if (lines.length > 1) {
37
+ const uuid = lines[1].trim();
38
+ if (uuid && uuid !== 'FFFFFFFF-FFFF-FFFF-FFFF-FFFFFFFFFFFF') return uuid;
39
+ }
40
+ }
41
+ } catch { /* fall through to persistent fallback */ }
42
+
43
+ // Persistent fallback: generate and cache a random UUID
44
+ const configDir = path.join(os.homedir(), '.config', 'chorus');
45
+ const idPath = path.join(configDir, 'machine-id');
46
+ try {
47
+ const existing = await fs.readFile(idPath, 'utf8');
48
+ if (existing.trim()) return existing.trim();
49
+ } catch { /* no file yet */ }
50
+
51
+ const { randomUUID } = require('crypto');
52
+ const newId = randomUUID();
53
+ await fs.mkdir(configDir, { recursive: true });
54
+ await fs.writeFile(idPath, newId + '\n');
55
+ return newId;
56
+ }
57
+
20
58
  // Run coder.py with real-time stderr streaming so progress is visible
21
59
  function runCoder(prompt) {
22
60
  return new Promise((resolve, reject) => {
23
61
  const env = { ...process.env };
24
- if (CONFIG.ai.proxyUrl) {
25
- env.CODER_PROXY_URL = CONFIG.ai.proxyUrl;
26
- env.ANTHROPIC_API_KEY = CONFIG.ai.anthropicApiKey;
62
+ if (CONFIG.ai.chorusApiKey) {
63
+ env.CHORUS_API_KEY = CONFIG.ai.chorusApiKey;
64
+ env.CHORUS_API_URL = CONFIG.ai.chorusApiUrl;
65
+ }
66
+ if (CONFIG.ai.machineId) {
67
+ env.CHORUS_MACHINE_ID = CONFIG.ai.machineId;
27
68
  }
28
69
  const proc = spawn(CONFIG.ai.venvPython, [CONFIG.ai.coderPath, '--prompt', prompt], {
29
70
  cwd: process.cwd(),
@@ -72,9 +113,12 @@ function runQAChat(issue, enrichedDetails, qaName, useSuper = false) {
72
113
  if (useSuper) args.push('--super');
73
114
 
74
115
  const env = { ...process.env };
75
- if (CONFIG.ai.proxyUrl) {
76
- env.CODER_PROXY_URL = CONFIG.ai.proxyUrl;
77
- env.ANTHROPIC_API_KEY = CONFIG.ai.anthropicApiKey;
116
+ if (CONFIG.ai.chorusApiKey) {
117
+ env.CHORUS_API_KEY = CONFIG.ai.chorusApiKey;
118
+ env.CHORUS_API_URL = CONFIG.ai.chorusApiUrl;
119
+ }
120
+ if (CONFIG.ai.machineId) {
121
+ env.CHORUS_MACHINE_ID = CONFIG.ai.machineId;
78
122
  }
79
123
  if (CONFIG.messenger === 'slack' && CONFIG.slack.botToken) {
80
124
  env.SLACK_BOT_TOKEN = CONFIG.slack.botToken;
@@ -142,8 +186,8 @@ const CONFIG = {
142
186
  venvPython: process.platform === 'win32'
143
187
  ? path.join(os.homedir(), '.config', 'chorus', '.venv', 'Scripts', 'python.exe')
144
188
  : path.join(os.homedir(), '.config', 'chorus', '.venv', 'bin', 'python'),
145
- anthropicApiKey: process.env.PROXY_API_KEY || process.env.ANTHROPIC_API_KEY,
146
- proxyUrl: process.env.PROXY_URL,
189
+ chorusApiKey: process.env.CHORUS_API_KEY,
190
+ chorusApiUrl: process.env.CHORUS_API_URL || 'https://chorus-bad0f.web.app/v1',
147
191
  }
148
192
  };
149
193
 
@@ -189,20 +233,21 @@ IMPORTANT: Output ONLY the message above. Do not include any preamble, thinking
189
233
  const tool = CONFIG.ai.enrichmentTool;
190
234
 
191
235
  if (tool === 'claude') {
192
- // Use Anthropic API
193
- if (!CONFIG.ai.anthropicApiKey) {
194
- throw new Error('PROXY_API_KEY or ANTHROPIC_API_KEY environment variable is required');
236
+ // Use Chorus proxy API
237
+ if (!CONFIG.ai.chorusApiKey) {
238
+ throw new Error('CHORUS_API_KEY environment variable is required. Run "chorus setup" to configure.');
195
239
  }
196
- const clientOpts = { apiKey: CONFIG.ai.anthropicApiKey };
197
- if (CONFIG.ai.proxyUrl) {
198
- clientOpts.baseURL = CONFIG.ai.proxyUrl.replace(/\/+$/, '');
240
+ const openaiOpts = {
241
+ apiKey: CONFIG.ai.chorusApiKey,
242
+ baseURL: CONFIG.ai.chorusApiUrl,
243
+ };
244
+ if (CONFIG.ai.machineId) {
245
+ openaiOpts.defaultHeaders = { 'X-Machine-Id': CONFIG.ai.machineId };
199
246
  }
200
- const anthropic = new Anthropic(clientOpts);
247
+ const openai = new OpenAI(openaiOpts);
201
248
 
202
- const message = await anthropic.messages.create({
203
-
204
- //when --super flag is added use claude-opus-4-6, else default to claude-sonnet-4-20250514
205
- model: 'claude-opus-4-6',
249
+ const response = await openai.chat.completions.create({
250
+ model: 'anthropic/claude-opus-4',
206
251
  max_tokens: 2000,
207
252
  messages: [
208
253
  {
@@ -212,11 +257,11 @@ IMPORTANT: Output ONLY the message above. Do not include any preamble, thinking
212
257
  ]
213
258
  });
214
259
 
215
- if (message.usage) {
216
- console.log(` Enrichment tokens: ${message.usage.input_tokens} in / ${message.usage.output_tokens} out`);
260
+ if (response.usage) {
261
+ console.log(` Enrichment tokens: ${response.usage.prompt_tokens} in / ${response.usage.completion_tokens} out`);
217
262
  }
218
263
 
219
- return message.content[0].text.trim();
264
+ return response.choices[0].message.content.trim();
220
265
  } else {
221
266
  // Use Kimi CLI
222
267
  const escapedPrompt = prompt.replace(/"/g, '\\"').replace(/\$/g, '\\$');
@@ -488,16 +533,7 @@ function isTokenLimitError(err) {
488
533
  }
489
534
 
490
535
  async function fetchAccountEmail() {
491
- if (!CONFIG.ai.proxyUrl || !CONFIG.ai.anthropicApiKey) return null;
492
- try {
493
- const res = await fetch(`${CONFIG.ai.proxyUrl.replace(/\/+$/, '')}/auth/me`, {
494
- headers: { 'Authorization': `Bearer ${CONFIG.ai.anthropicApiKey}` },
495
- });
496
- if (res.ok) {
497
- const data = await res.json();
498
- return data.email || null;
499
- }
500
- } catch {}
536
+ // TODO: fetch email from Chorus proxy /auth/me endpoint
501
537
  return null;
502
538
  }
503
539
 
@@ -517,6 +553,9 @@ async function processTicket(issueArg, { useSuper = false, skipQA = false, qaNam
517
553
  try {
518
554
  console.log('🚀 Starting ticket processing...\n');
519
555
 
556
+ // Resolve machine fingerprint for per-machine usage tracking
557
+ CONFIG.ai.machineId = await getMachineId();
558
+
520
559
  // 0. Ensure Python venv exists and has required dependencies
521
560
  const reqFile = path.join(__dirname, 'tools', 'requirements.txt');
522
561
  const { execFileSync: efs } = require('child_process');
@@ -528,7 +567,7 @@ async function processTicket(issueArg, { useSuper = false, skipQA = false, qaNam
528
567
  }
529
568
 
530
569
  try {
531
- efs(CONFIG.ai.venvPython, ['-c', 'import anthropic'], { stdio: 'ignore' });
570
+ efs(CONFIG.ai.venvPython, ['-c', 'import openai'], { stdio: 'ignore' });
532
571
  } catch {
533
572
  console.log('📦 Installing Python dependencies (first run)...');
534
573
  efs(CONFIG.ai.venvPython, ['-m', 'pip', 'install', '-r', reqFile], { stdio: 'inherit' });
@@ -864,26 +903,6 @@ async function setupGitHub() {
864
903
  }
865
904
 
866
905
  async function setupProxyAuth() {
867
- const DEFAULT_PROXY_URL = 'https://chorus-bad0f.web.app';
868
-
869
- if (!CONFIG.ai.proxyUrl) {
870
- CONFIG.ai.proxyUrl = DEFAULT_PROXY_URL;
871
-
872
- // Persist PROXY_URL to .env
873
- const configDir = path.join(os.homedir(), '.config', 'chorus');
874
- await fs.mkdir(configDir, { recursive: true });
875
- const envPath = path.join(configDir, '.env');
876
- let envContent = '';
877
- try { envContent = await fs.readFile(envPath, 'utf8'); } catch { /* no .env yet */ }
878
- if (envContent.includes('PROXY_URL=')) {
879
- envContent = envContent.replace(/PROXY_URL=.*/, `PROXY_URL=${CONFIG.ai.proxyUrl}`);
880
- } else {
881
- envContent = envContent.trimEnd() + `\nPROXY_URL=${CONFIG.ai.proxyUrl}`;
882
- }
883
- await fs.writeFile(envPath, envContent.trimEnd() + '\n');
884
- process.env.PROXY_URL = CONFIG.ai.proxyUrl;
885
- }
886
-
887
906
  console.log('Setting up Chorus authentication...\n');
888
907
 
889
908
  const readline = require('readline');
@@ -893,35 +912,62 @@ async function setupProxyAuth() {
893
912
  const password = await prompt(rl, 'Password: ', true);
894
913
  rl.close();
895
914
 
896
- // Try register first, fall back to login
915
+ if (!email || !password) {
916
+ console.error('\n❌ Email and password are required.');
917
+ return;
918
+ }
919
+
920
+ const baseUrl = CONFIG.ai.chorusApiUrl.replace(/\/v1\/?$/, '');
921
+
922
+ // Resolve machine fingerprint so the server can track per-machine usage
923
+ const machineId = await getMachineId();
924
+
925
+ // Try register first, fall back to login if already registered
897
926
  let apiKey;
898
- for (const endpoint of ['/auth/register', '/auth/login']) {
899
- const res = await fetch(`${CONFIG.ai.proxyUrl}${endpoint}`, {
900
- method: 'POST',
901
- headers: { 'Content-Type': 'application/json' },
902
- body: JSON.stringify({ email, password }),
927
+ try {
928
+ const https = require('https');
929
+ const http = require('http');
930
+
931
+ const doPost = (url, body) => new Promise((resolve, reject) => {
932
+ const parsed = new URL(url);
933
+ const mod = parsed.protocol === 'https:' ? https : http;
934
+ const req = mod.request(parsed, { method: 'POST', headers: { 'Content-Type': 'application/json' } }, (res) => {
935
+ let data = '';
936
+ res.on('data', (chunk) => data += chunk);
937
+ res.on('end', () => {
938
+ try {
939
+ resolve({ status: res.statusCode, body: JSON.parse(data) });
940
+ } catch {
941
+ resolve({ status: res.statusCode, body: data });
942
+ }
943
+ });
944
+ });
945
+ req.on('error', reject);
946
+ req.write(JSON.stringify(body));
947
+ req.end();
903
948
  });
904
949
 
905
- const data = await res.json();
950
+ console.log(' Registering...');
951
+ let res = await doPost(`${baseUrl}/auth/register`, { email, password, machineId });
906
952
 
907
- if (res.ok && data.apiKey) {
908
- apiKey = data.apiKey;
909
- console.log(`\n✅ ${endpoint === '/auth/register' ? 'Registered' : 'Logged in'} successfully`);
910
- break;
953
+ if (res.status === 409 || (res.body && res.body.error && res.body.error.includes('already'))) {
954
+ console.log(' Account exists, logging in...');
955
+ res = await doPost(`${baseUrl}/auth/login`, { email, password, machineId });
911
956
  }
912
957
 
913
- // If register fails with 409 (already exists), try login next
914
- if (res.status === 409) continue;
915
-
916
- // Any other error on login means bad credentials
917
- if (endpoint === '/auth/login' && !res.ok) {
918
- console.error(`\n❌ Login failed: ${data.error?.message || 'Unknown error'}`);
958
+ if (res.status >= 400) {
959
+ const errMsg = (res.body && res.body.error) || JSON.stringify(res.body);
960
+ console.error(`\n❌ Authentication failed: ${errMsg}`);
919
961
  return;
920
962
  }
921
- }
922
963
 
923
- if (!apiKey) {
924
- console.error('\n❌ Failed to authenticate');
964
+ apiKey = res.body.apiKey || res.body.api_key || res.body.key;
965
+ if (!apiKey) {
966
+ console.error('\n❌ No API key returned from server. Response:', JSON.stringify(res.body));
967
+ return;
968
+ }
969
+ } catch (err) {
970
+ console.error(`\n❌ Failed to connect to Chorus: ${err.message}`);
925
971
  return;
926
972
  }
927
973
 
@@ -934,18 +980,22 @@ async function setupProxyAuth() {
934
980
  envContent = await fs.readFile(envPath, 'utf8');
935
981
  } catch { /* no .env yet */ }
936
982
 
937
- if (envContent.includes('PROXY_API_KEY=')) {
938
- envContent = envContent.replace(/PROXY_API_KEY=.*/, `PROXY_API_KEY=${apiKey}`);
939
- } else {
940
- envContent = envContent.trimEnd() + `\nPROXY_API_KEY=${apiKey}\n`;
983
+ const updates = { CHORUS_API_KEY: apiKey, CHORUS_API_URL: CONFIG.ai.chorusApiUrl };
984
+ for (const [key, value] of Object.entries(updates)) {
985
+ const regex = new RegExp(`^${key}=.*$`, 'm');
986
+ if (regex.test(envContent)) {
987
+ envContent = envContent.replace(regex, `${key}=${value}`);
988
+ } else {
989
+ envContent = envContent.trimEnd() + `\n${key}=${value}`;
990
+ }
941
991
  }
942
- await fs.writeFile(envPath, envContent);
992
+ await fs.writeFile(envPath, envContent.trimEnd() + '\n');
943
993
 
944
994
  // Update in-memory config
945
- CONFIG.ai.anthropicApiKey = apiKey;
946
- process.env.PROXY_API_KEY = apiKey;
995
+ CONFIG.ai.chorusApiKey = apiKey;
996
+ process.env.CHORUS_API_KEY = apiKey;
947
997
 
948
- console.log(` API key saved to .env\n`);
998
+ console.log(`\n✅ Chorus API key saved to ${envPath}\n`);
949
999
  }
950
1000
 
951
1001
  async function setupTeamsAuth() {
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "chorus-cli",
3
- "version": "0.4.1",
3
+ "version": "0.4.4",
4
4
  "description": "Automated ticket resolution with AI, Teams, and Slack integration",
5
5
  "main": "index.js",
6
6
  "bin": {
@@ -18,9 +18,9 @@
18
18
  "start": "node index.js run"
19
19
  },
20
20
  "dependencies": {
21
- "@anthropic-ai/sdk": "^0.73.0",
22
21
  "@octokit/rest": "^20.0.2",
23
22
  "dotenv": "^17.2.4",
23
+ "openai": "^4.0.0",
24
24
  "playwright": "^1.40.0"
25
25
  },
26
26
  "engines": {
@@ -93,11 +93,11 @@ run(venvPython, ['-m', 'pip', 'install', '-r', REQUIREMENTS]);
93
93
 
94
94
  // 3. Verify critical dependency installed
95
95
  try {
96
- execFileSync(venvPython, ['-c', 'import anthropic'], { stdio: 'ignore' });
96
+ execFileSync(venvPython, ['-c', 'import openai'], { stdio: 'ignore' });
97
97
  console.log(' Dependencies installed ✓');
98
98
  } catch {
99
99
  console.error(
100
- '⚠ "anthropic" module is missing after pip install.\n' +
100
+ '⚠ "openai" module is missing after pip install.\n' +
101
101
  ' Run manually: ' + venvPython + ' -m pip install -r ' + REQUIREMENTS
102
102
  );
103
103
  process.exit(0);
package/tools/coder.py CHANGED
@@ -1,20 +1,20 @@
1
1
  #!/usr/bin/env python3
2
2
  """
3
- Coder — A terminal coding agent powered by Claude.
3
+ Coder — A terminal coding agent powered by Claude via the Chorus proxy.
4
4
 
5
5
  Usage:
6
6
  coder.py Interactive REPL
7
7
  coder.py --prompt "do something" Headless mode — outputs JSON to stdout
8
8
 
9
9
  Environment variables:
10
- ANTHROPIC_API_KEY — Required. Your Anthropic API key (or Chorus pk- key).
11
- CODER_PROXY_URL — Optional. Chorus base URL (e.g. http://localhost:8081)
12
- CODER_MODEL — Model to use (default: claude-sonnet-4-5-20250929)
10
+ CHORUS_API_KEY — Required. Your Chorus API key.
11
+ CHORUS_API_URL — Optional. Chorus proxy base URL (default: https://chorus-bad0f.web.app/v1)
12
+ CODER_MODEL — Model to use (default: anthropic/claude-sonnet-4)
13
13
  CODER_MAX_TOKENS — Max response tokens (default: 16384)
14
14
  CODER_SAFE_MODE — Set to 1 to require approval for writes/edits/bash
15
15
  """
16
16
 
17
- import anthropic
17
+ from openai import OpenAI
18
18
  import argparse
19
19
  import json
20
20
  import os
@@ -41,7 +41,7 @@ class C:
41
41
 
42
42
  # ── Config ──────────────────────────────────────────────────────────────────
43
43
 
44
- MODEL = os.environ.get("CODER_MODEL", "claude-sonnet-4-5-20250929")
44
+ MODEL = os.environ.get("CODER_MODEL", "anthropic/claude-sonnet-4")
45
45
  MAX_TOKENS = int(os.environ.get("CODER_MAX_TOKENS", "16384"))
46
46
  SAFE_MODE = os.environ.get("CODER_SAFE_MODE", "").lower() in ("1", "true", "yes")
47
47
 
@@ -50,23 +50,29 @@ def is_token_limit_error(err):
50
50
  return "token limit exceeded" in msg or "rate_limit_error" in msg
51
51
 
52
52
  SYSTEM_PROMPT = """\
53
- You are a coding agent running in the terminal.
53
+ You are a coding agent running inside a CLI tool called Chorus.
54
+ Your output goes straight to a terminal. There is no browser, no rich renderer.
54
55
  Working directory: {cwd}
55
56
 
56
57
  You help with software engineering tasks: writing code, debugging, refactoring, \
57
58
  explaining code, running commands, and managing files.
58
59
 
59
60
  Formatting:
60
- - Your output is displayed raw in a terminal. Never use markdown.
61
- - No ## headers, **bold**, *italic*, [links](url), or bullet symbols like -.
61
+ - Plain text only. Never use markdown.
62
+ - No ## headers, **bold**, *italic*, `backticks`, [links](url), or bullet symbols like -.
62
63
  - Use blank lines, indentation, and CAPS for emphasis or section labels.
63
64
  - Use plain numbered lists (1. 2. 3.) when listing things.
64
- - For inline code references, just use the name directly (e.g. myFunction, not `myFunction`).
65
- - Keep responses short and scannable.
65
+ - Refer to code identifiers by name directly (e.g. myFunction, not `myFunction`).
66
+
67
+ Communication style:
68
+ - Be terse. Say what you are doing and why in one or two lines, then do it.
69
+ - No greetings, preambles, encouragement, or sign-offs.
70
+ - No "Great question!", "Let me", "Sure!", "I'll now", or similar filler.
71
+ - When explaining your plan, use short declarative sentences. Skip obvious reasoning.
72
+ - After completing work, state what changed and nothing else.
66
73
 
67
74
  {approach}Guidelines:
68
- - Be direct and concise. No filler.
69
- - Always use your tools. If a question can be answered by running a command (git, ls, etc.), use the bash tool — never guess.
75
+ - Always use your tools. If a question can be answered by running a command (git, ls, etc.), use the bash tool. Never guess.
70
76
  - Always read a file before editing it.
71
77
  - If edit_file fails with "old_string not found", re-read the file to get the actual current content before retrying. Never guess at file contents.
72
78
  - Use edit_file for targeted changes. Use write_file for new files or complete rewrites.
@@ -90,83 +96,101 @@ Approach:
90
96
 
91
97
  TOOLS = [
92
98
  {
93
- "name": "read_file",
94
- "description": "Read a file's contents. Returns lines with line numbers.",
95
- "input_schema": {
96
- "type": "object",
97
- "properties": {
98
- "path": {"type": "string", "description": "File path (relative to cwd or absolute)"},
99
- "offset": {"type": "integer", "description": "Start line (1-indexed)"},
100
- "limit": {"type": "integer", "description": "Max lines to read"},
99
+ "type": "function",
100
+ "function": {
101
+ "name": "read_file",
102
+ "description": "Read a file's contents. Returns lines with line numbers.",
103
+ "parameters": {
104
+ "type": "object",
105
+ "properties": {
106
+ "path": {"type": "string", "description": "File path (relative to cwd or absolute)"},
107
+ "offset": {"type": "integer", "description": "Start line (1-indexed)"},
108
+ "limit": {"type": "integer", "description": "Max lines to read"},
109
+ },
110
+ "required": ["path"],
101
111
  },
102
- "required": ["path"],
103
112
  },
104
113
  },
105
114
  {
106
- "name": "write_file",
107
- "description": "Create or overwrite a file with the given content.",
108
- "input_schema": {
109
- "type": "object",
110
- "properties": {
111
- "path": {"type": "string", "description": "File path to write"},
112
- "content": {"type": "string", "description": "Full file content"},
115
+ "type": "function",
116
+ "function": {
117
+ "name": "write_file",
118
+ "description": "Create or overwrite a file with the given content.",
119
+ "parameters": {
120
+ "type": "object",
121
+ "properties": {
122
+ "path": {"type": "string", "description": "File path to write"},
123
+ "content": {"type": "string", "description": "Full file content"},
124
+ },
125
+ "required": ["path", "content"],
113
126
  },
114
- "required": ["path", "content"],
115
127
  },
116
128
  },
117
129
  {
118
- "name": "edit_file",
119
- "description": (
120
- "Replace an exact string in a file with new content. "
121
- "old_string must match exactly including whitespace/indentation. "
122
- "Fails if old_string is not found or is ambiguous (found multiple times without replace_all)."
123
- ),
124
- "input_schema": {
125
- "type": "object",
126
- "properties": {
127
- "path": {"type": "string", "description": "File path to edit"},
128
- "old_string": {"type": "string", "description": "Exact string to find"},
129
- "new_string": {"type": "string", "description": "Replacement string"},
130
- "replace_all": {"type": "boolean", "description": "Replace all occurrences (default: false)"},
130
+ "type": "function",
131
+ "function": {
132
+ "name": "edit_file",
133
+ "description": (
134
+ "Replace an exact string in a file with new content. "
135
+ "old_string must match exactly including whitespace/indentation. "
136
+ "Fails if old_string is not found or is ambiguous (found multiple times without replace_all)."
137
+ ),
138
+ "parameters": {
139
+ "type": "object",
140
+ "properties": {
141
+ "path": {"type": "string", "description": "File path to edit"},
142
+ "old_string": {"type": "string", "description": "Exact string to find"},
143
+ "new_string": {"type": "string", "description": "Replacement string"},
144
+ "replace_all": {"type": "boolean", "description": "Replace all occurrences (default: false)"},
145
+ },
146
+ "required": ["path", "old_string", "new_string"],
131
147
  },
132
- "required": ["path", "old_string", "new_string"],
133
148
  },
134
149
  },
135
150
  {
136
- "name": "list_files",
137
- "description": "List files matching a glob pattern. Use '**/*.ext' for recursive search.",
138
- "input_schema": {
139
- "type": "object",
140
- "properties": {
141
- "pattern": {"type": "string", "description": "Glob pattern (e.g. '**/*.py', 'src/**/*.ts')"},
142
- "path": {"type": "string", "description": "Base directory (default: cwd)"},
151
+ "type": "function",
152
+ "function": {
153
+ "name": "list_files",
154
+ "description": "List files matching a glob pattern. Use '**/*.ext' for recursive search.",
155
+ "parameters": {
156
+ "type": "object",
157
+ "properties": {
158
+ "pattern": {"type": "string", "description": "Glob pattern (e.g. '**/*.py', 'src/**/*.ts')"},
159
+ "path": {"type": "string", "description": "Base directory (default: cwd)"},
160
+ },
161
+ "required": ["pattern"],
143
162
  },
144
- "required": ["pattern"],
145
163
  },
146
164
  },
147
165
  {
148
- "name": "search_files",
149
- "description": "Search file contents with regex. Returns matching lines with file:line: prefix.",
150
- "input_schema": {
151
- "type": "object",
152
- "properties": {
153
- "pattern": {"type": "string", "description": "Regex pattern to search for"},
154
- "path": {"type": "string", "description": "Directory or file to search (default: cwd)"},
155
- "include": {"type": "string", "description": "Glob to filter files (e.g. '*.py')"},
166
+ "type": "function",
167
+ "function": {
168
+ "name": "search_files",
169
+ "description": "Search file contents with regex. Returns matching lines with file:line: prefix.",
170
+ "parameters": {
171
+ "type": "object",
172
+ "properties": {
173
+ "pattern": {"type": "string", "description": "Regex pattern to search for"},
174
+ "path": {"type": "string", "description": "Directory or file to search (default: cwd)"},
175
+ "include": {"type": "string", "description": "Glob to filter files (e.g. '*.py')"},
176
+ },
177
+ "required": ["pattern"],
156
178
  },
157
- "required": ["pattern"],
158
179
  },
159
180
  },
160
181
  {
161
- "name": "bash",
162
- "description": "Execute a shell command. Returns stdout, stderr, and exit code.",
163
- "input_schema": {
164
- "type": "object",
165
- "properties": {
166
- "command": {"type": "string", "description": "Shell command to run"},
167
- "timeout": {"type": "integer", "description": "Timeout in seconds (default: 120)"},
182
+ "type": "function",
183
+ "function": {
184
+ "name": "bash",
185
+ "description": "Execute a shell command. Returns stdout, stderr, and exit code.",
186
+ "parameters": {
187
+ "type": "object",
188
+ "properties": {
189
+ "command": {"type": "string", "description": "Shell command to run"},
190
+ "timeout": {"type": "integer", "description": "Timeout in seconds (default: 120)"},
191
+ },
192
+ "required": ["command"],
168
193
  },
169
- "required": ["command"],
170
194
  },
171
195
  },
172
196
  ]
@@ -502,7 +526,9 @@ def _estimate_tokens(messages):
502
526
  """Rough token estimate: 1 token ≈ 4 chars."""
503
527
  total = 0
504
528
  for msg in messages:
505
- content = msg.get("content", "")
529
+ content = _get_msg_content(msg) if isinstance(msg, dict) else getattr(msg, "content", "")
530
+ if content is None:
531
+ content = ""
506
532
  if isinstance(content, str):
507
533
  total += len(content)
508
534
  elif isinstance(content, list):
@@ -547,6 +573,13 @@ def _summarize_tool_use_input(block):
547
573
  block.input["content"] = f"[file content: {line_count} lines, truncated]"
548
574
 
549
575
 
576
+ def _get_msg_content(msg):
577
+ """Get content from either dict or OpenAI message object."""
578
+ if isinstance(msg, dict):
579
+ return msg.get("content")
580
+ return getattr(msg, "content", None)
581
+
582
+
550
583
  def prune_context(messages, token_budget=None):
551
584
  """
552
585
  Trim old tool results when conversation exceeds the token budget.
@@ -567,7 +600,7 @@ def prune_context(messages, token_budget=None):
567
600
 
568
601
  for i in range(1, prune_end):
569
602
  msg = messages[i]
570
- content = msg.get("content")
603
+ content = _get_msg_content(msg)
571
604
 
572
605
  if isinstance(content, list):
573
606
  for item in content:
@@ -586,66 +619,71 @@ def prune_context(messages, token_budget=None):
586
619
  # ── Streaming Response Handler ──────────────────────────────────────────────
587
620
 
588
621
  def stream_response(client, messages, system):
589
- """Stream Claude's response, handling tool-use loops until done."""
622
+ """Stream LLM response via Chorus proxy, handling tool-use loops until done."""
623
+ openai_messages = [{"role": "system", "content": system}]
624
+ # Convert existing messages to OpenAI format
625
+ for msg in messages:
626
+ openai_messages.append(msg)
627
+
590
628
  while True:
591
629
  printed_text = False
592
-
593
- with client.messages.stream(
630
+
631
+ response = client.chat.completions.create(
594
632
  model=MODEL,
595
633
  max_tokens=MAX_TOKENS,
596
- system=system,
634
+ messages=openai_messages,
597
635
  tools=TOOLS,
598
- messages=messages,
599
- ) as stream:
600
- for event in stream:
601
- if event.type == "content_block_delta":
602
- if hasattr(event.delta, "text"):
603
- sys.stdout.write(event.delta.text)
604
- sys.stdout.flush()
605
- printed_text = True
606
-
607
- response = stream.get_final_message()
608
-
609
- if printed_text:
610
- print() # newline after streamed text
611
-
612
- # Add the full assistant message to conversation
613
- messages.append({"role": "assistant", "content": response.content})
636
+ )
614
637
 
615
- # If stop reason is tool_use, execute tools and loop
616
- if response.stop_reason == "tool_use":
638
+ message = response.choices[0].message
639
+
640
+ # Add assistant message to conversation history
641
+ openai_messages.append(message)
642
+ if message.content:
643
+ messages.append({"role": "assistant", "content": message.content})
644
+ print(message.content)
645
+ printed_text = True
646
+
647
+ # Check for tool calls
648
+ if message.tool_calls:
617
649
  tool_results = []
618
- for block in response.content:
619
- if block.type == "tool_use":
620
- if SAFE_MODE and block.name in NEEDS_PERMISSION:
621
- # Safe mode: show preview and ask
622
- if request_permission(block.name, block.input):
623
- result = execute_tool(block.name, block.input)
624
- else:
625
- result = f"Permission denied: user rejected {block.name} call."
626
- else:
627
- # Default: just run it
628
- print_tool_call(block.name, block.input)
629
- result = execute_tool(block.name, block.input)
630
- # Truncate huge results
631
- if len(result) > 15000:
632
- result = result[:15000] + "\n... (output truncated)"
633
- if _should_nudge(block.name, block.input, result):
634
- result += REFLECT_NUDGE
635
- print_tool_result_summary(block.name, result)
636
- tool_results.append({
637
- "type": "tool_result",
638
- "tool_use_id": block.id,
639
- "content": result,
640
- })
641
- messages.append({"role": "user", "content": tool_results})
642
- prune_context(messages)
650
+ for tool_call in message.tool_calls:
651
+ function_name = tool_call.function.name
652
+ import json
653
+ try:
654
+ arguments = json.loads(tool_call.function.arguments)
655
+ except json.JSONDecodeError:
656
+ arguments = {}
657
+
658
+ # Track what's happening
659
+ if function_name == "write_file":
660
+ path = arguments.get("path", "")
661
+ if resolve_path(path).exists():
662
+ pass # Will track in result
663
+
664
+ # Execute the tool
665
+ result = execute_tool(function_name, arguments)
666
+
667
+ # Truncate huge results
668
+ if len(result) > 15000:
669
+ result = result[:15000] + "\n... (output truncated)"
670
+ if _should_nudge(function_name, arguments, result):
671
+ result += REFLECT_NUDGE
672
+
673
+ tool_results.append({
674
+ "role": "tool",
675
+ "tool_call_id": tool_call.id,
676
+ "content": result,
677
+ })
678
+
679
+ openai_messages.extend(tool_results)
680
+ prune_context(openai_messages)
643
681
  print() # breathing room before next response
644
682
  else:
645
683
  # Print token usage
646
684
  if hasattr(response, "usage") and response.usage:
647
- inp = response.usage.input_tokens
648
- out = response.usage.output_tokens
685
+ inp = response.usage.prompt_tokens
686
+ out = response.usage.completion_tokens
649
687
  print(f"{C.DIM}[{inp} in / {out} out tokens]{C.RESET}")
650
688
  break
651
689
 
@@ -653,7 +691,41 @@ def stream_response(client, messages, system):
653
691
 
654
692
  def run_prompt(client, prompt, system):
655
693
  """Run a single prompt non-interactively. Returns a JSON-serializable dict."""
656
- messages = [{"role": "user", "content": prompt}]
694
+
695
+ # PHASE 1: Planning - ask the model to explain its approach first
696
+ print(f"\n{C.BOLD}{C.BLUE}📝 PLANNING PHASE{C.RESET}", file=sys.stderr, flush=True)
697
+ print(f"{C.DIM}Understanding the issue and creating a plan...{C.RESET}\n", file=sys.stderr, flush=True)
698
+
699
+ plan_messages = [
700
+ {"role": "system", "content": system},
701
+ {"role": "user", "content": f"{prompt}\n\nBefore making any code changes, briefly state:\n1. The goal\n2. Files to examine\n3. Files to modify and how\n\nKeep it short. Do NOT write any code yet."}
702
+ ]
703
+
704
+ try:
705
+ plan_response = client.chat.completions.create(
706
+ model=MODEL,
707
+ max_tokens=MAX_TOKENS,
708
+ messages=plan_messages,
709
+ )
710
+ plan_text = plan_response.choices[0].message.content.strip()
711
+
712
+ # Print the plan with formatting
713
+ print(f"{C.CYAN}{'─' * 60}{C.RESET}", file=sys.stderr, flush=True)
714
+ for line in plan_text.split('\n'):
715
+ print(f"{C.CYAN} {line}{C.RESET}", file=sys.stderr, flush=True)
716
+ print(f"{C.CYAN}{'─' * 60}{C.RESET}\n", file=sys.stderr, flush=True)
717
+
718
+ except Exception as e:
719
+ print(f"{C.YELLOW}Could not generate plan: {e}{C.RESET}", file=sys.stderr, flush=True)
720
+ plan_text = ""
721
+
722
+ # PHASE 2: Execution - proceed with the actual coding
723
+ print(f"{C.BOLD}{C.GREEN}🔨 EXECUTING PLAN{C.RESET}\n", file=sys.stderr, flush=True)
724
+
725
+ messages = [
726
+ {"role": "system", "content": system},
727
+ {"role": "user", "content": prompt}
728
+ ]
657
729
  files_modified = set()
658
730
  files_created = set()
659
731
  commands_run = []
@@ -670,14 +742,13 @@ def run_prompt(client, prompt, system):
670
742
  turn += 1
671
743
 
672
744
  try:
673
- response = client.messages.create(
745
+ response = client.chat.completions.create(
674
746
  model=MODEL,
675
747
  max_tokens=MAX_TOKENS,
676
- system=system,
677
- tools=TOOLS,
678
748
  messages=messages,
749
+ tools=TOOLS,
679
750
  )
680
- except anthropic.APIError as e:
751
+ except Exception as e:
681
752
  if is_token_limit_error(e):
682
753
  print(f"\n{C.YELLOW}Token limit reached — stopping.{C.RESET}", file=sys.stderr, flush=True)
683
754
  errors.append(str(e))
@@ -687,66 +758,88 @@ def run_prompt(client, prompt, system):
687
758
  # Per-turn token tracking
688
759
  turn_in = turn_out = 0
689
760
  if hasattr(response, "usage") and response.usage:
690
- turn_in = response.usage.input_tokens
691
- turn_out = response.usage.output_tokens
761
+ turn_in = response.usage.prompt_tokens
762
+ turn_out = response.usage.completion_tokens
692
763
  total_input_tokens += turn_in
693
764
  total_output_tokens += turn_out
694
765
 
695
- messages.append({"role": "assistant", "content": response.content})
766
+ message = response.choices[0].message
767
+ messages.append(message)
768
+
769
+ # Show reasoning/thinking if present
770
+ if message.content:
771
+ print(f"\n{C.YELLOW}💭 {message.content}{C.RESET}\n", file=sys.stderr, flush=True)
696
772
 
697
- if response.stop_reason == "tool_use":
773
+ # Check for tool calls
774
+ if message.tool_calls:
698
775
  tool_results = []
699
- for block in response.content:
700
- if block.type == "tool_use":
701
- # Track what's happening
702
- if block.name == "write_file":
703
- path = block.input.get("path", "")
704
- if resolve_path(path).exists():
705
- files_modified.add(path)
706
- else:
707
- files_created.add(path)
708
- elif block.name == "edit_file":
709
- pass # tracked after execution below
710
- elif block.name == "bash":
711
- commands_run.append(block.input.get("command", ""))
712
-
713
- # Colored tool log to stderr
714
- _, color = TOOL_LABELS.get(block.name, (block.name, C.DIM))
715
- header = format_tool_header(block.name, block.input)
716
- print(f" {color}{header}{C.RESET}", file=sys.stderr, flush=True)
717
-
718
- result = execute_tool(block.name, block.input)
719
-
720
- # Track successful edits
721
- if block.name == "edit_file" and not result.startswith("Error"):
722
- files_modified.add(block.input.get("path", ""))
723
-
724
- if result.startswith("Error"):
725
- err_msg = f"{block.name}: {result}"
726
- # Recoverable: file not found on read (exploring), edit match failures (retries)
727
- if (block.name == "read_file" and "not found" in result) or \
728
- (block.name == "edit_file" and "not found" in result):
729
- warnings.append(err_msg)
730
- print(f" {C.YELLOW}{result.splitlines()[0]}{C.RESET}", file=sys.stderr, flush=True)
731
- else:
732
- errors.append(err_msg)
733
- print(f" {C.RED}{result.splitlines()[0]}{C.RESET}", file=sys.stderr, flush=True)
734
-
735
- if len(result) > 15000:
736
- result = result[:15000] + "\n... (output truncated)"
737
- if _should_nudge(block.name, block.input, result):
738
- result += REFLECT_NUDGE
739
-
740
- tool_results.append({
741
- "type": "tool_result",
742
- "tool_use_id": block.id,
743
- "content": result,
744
- })
745
-
746
- # Token usage for this turn
747
- print(f" {C.DIM}[{turn_in} in / {turn_out} out]{C.RESET}", file=sys.stderr, flush=True)
748
-
749
- messages.append({"role": "user", "content": tool_results})
776
+ for tool_call in message.tool_calls:
777
+ function_name = tool_call.function.name
778
+ import json
779
+ try:
780
+ arguments = json.loads(tool_call.function.arguments)
781
+ except json.JSONDecodeError:
782
+ arguments = {}
783
+
784
+ # Track what's happening
785
+ if function_name == "write_file":
786
+ path = arguments.get("path", "")
787
+ if resolve_path(path).exists():
788
+ files_modified.add(path)
789
+ else:
790
+ files_created.add(path)
791
+ elif function_name == "edit_file":
792
+ pass # tracked after execution below
793
+ elif function_name == "bash":
794
+ commands_run.append(arguments.get("command", ""))
795
+
796
+ # Colored tool log to stderr with reasoning
797
+ _, color = TOOL_LABELS.get(function_name, (function_name, C.DIM))
798
+ header = format_tool_header(function_name, arguments)
799
+ print(f" {color}{header}{C.RESET}", file=sys.stderr, flush=True)
800
+
801
+ result = execute_tool(function_name, arguments)
802
+
803
+ # Track successful edits
804
+ if function_name == "edit_file" and not result.startswith("Error"):
805
+ files_modified.add(arguments.get("path", ""))
806
+
807
+ if result.startswith("Error"):
808
+ err_msg = f"{function_name}: {result}"
809
+ # Recoverable: file not found on read (exploring), edit match failures (retries)
810
+ if (function_name == "read_file" and "not found" in result) or \
811
+ (function_name == "edit_file" and "not found" in result):
812
+ warnings.append(err_msg)
813
+ print(f" {C.YELLOW}{result.splitlines()[0]}{C.RESET}", file=sys.stderr, flush=True)
814
+ else:
815
+ errors.append(err_msg)
816
+ print(f" {C.RED}{result.splitlines()[0]}{C.RESET}", file=sys.stderr, flush=True)
817
+
818
+ if len(result) > 15000:
819
+ result = result[:15000] + "\n... (output truncated)"
820
+ if _should_nudge(function_name, arguments, result):
821
+ result += REFLECT_NUDGE
822
+
823
+ tool_results.append({
824
+ "role": "tool",
825
+ "tool_call_id": tool_call.id,
826
+ "content": result,
827
+ })
828
+
829
+ # Show progress instead of just tokens
830
+ action_summary = []
831
+ if files_created:
832
+ action_summary.append(f"+{len(files_created)} files")
833
+ if files_modified:
834
+ action_summary.append(f"~{len(files_modified)} files")
835
+ if commands_run:
836
+ action_summary.append(f"{len(commands_run)} commands")
837
+
838
+ if action_summary:
839
+ progress = " | ".join(action_summary)
840
+ print(f" {C.DIM}[Progress: {progress}]{C.RESET}", file=sys.stderr, flush=True)
841
+
842
+ messages.extend(tool_results)
750
843
 
751
844
  # Prune old tool results to prevent quadratic token growth
752
845
  prune_context(messages)
@@ -757,44 +850,50 @@ def run_prompt(client, prompt, system):
757
850
  errors.append(f"Hit max turns limit ({max_turns})")
758
851
  print(f"{C.RED}Max turns reached ({max_turns}), stopping{C.RESET}", file=sys.stderr, flush=True)
759
852
 
760
- # Final totals
761
- print(f"{C.DIM}Coder finished: {turn} turns, {total_input_tokens} in / {total_output_tokens} out tokens{C.RESET}", file=sys.stderr, flush=True)
762
-
763
- # Extract Claude's final text response
764
- final_text = "".join(
765
- block.text for block in response.content if block.type == "text"
766
- ) if response else ""
853
+ # Final summary
854
+ print(f"\n{C.BOLD}{C.GREEN} Done{C.RESET}", file=sys.stderr, flush=True)
855
+ if files_created:
856
+ print(f" {C.GREEN}Created: {', '.join(sorted(files_created))}{C.RESET}", file=sys.stderr, flush=True)
857
+ if files_modified:
858
+ print(f" {C.YELLOW}Modified: {', '.join(sorted(files_modified))}{C.RESET}", file=sys.stderr, flush=True)
859
+
860
+ # Extract LLM's final text response from the last assistant message
861
+ final_text = ""
862
+ for msg in reversed(messages):
863
+ if isinstance(msg, dict) and msg.get("role") == "assistant" and msg.get("content"):
864
+ final_text = msg["content"]
865
+ break
866
+ elif hasattr(msg, "role") and msg.role == "assistant" and msg.content:
867
+ final_text = msg.content
868
+ break
767
869
 
768
- # Ask Claude for a CodeRabbit-oriented summary (skip if we hit token limit)
769
- # Uses a standalone minimal prompt — no conversation history, system prompt, or tools.
870
+ # Ask LLM for a CodeRabbit-oriented summary (skip if we hit token limit)
770
871
  summary = final_text.strip()
771
872
  if not any(is_token_limit_error(e) for e in errors):
772
- summary_messages = [{
773
- "role": "user",
774
- "content": (
873
+ summary_messages = [
874
+ {"role": "system", "content": "You are a helpful assistant that summarizes code changes."},
875
+ {"role": "user", "content": (
775
876
  f"Summarize these code changes in 2-3 sentences for a code review tool.\n\n"
776
877
  f"Files modified: {', '.join(sorted(files_modified)) or 'none'}\n"
777
878
  f"Files created: {', '.join(sorted(files_created)) or 'none'}\n\n"
778
879
  f"Agent's final notes:\n{final_text[:2000]}\n\n"
779
880
  f"Focus on what changed, what was added/fixed, and why. Be specific. No preamble."
780
- ),
781
- }]
881
+ )},
882
+ ]
782
883
 
783
884
  try:
784
- summary_response = client.messages.create(
885
+ summary_response = client.chat.completions.create(
785
886
  model=MODEL,
786
887
  max_tokens=1024,
787
888
  messages=summary_messages,
788
889
  )
789
890
 
790
891
  if hasattr(summary_response, "usage") and summary_response.usage:
791
- total_input_tokens += summary_response.usage.input_tokens
792
- total_output_tokens += summary_response.usage.output_tokens
892
+ total_input_tokens += summary_response.usage.prompt_tokens
893
+ total_output_tokens += summary_response.usage.completion_tokens
793
894
 
794
- summary = "".join(
795
- block.text for block in summary_response.content if block.type == "text"
796
- ).strip()
797
- except anthropic.APIError as e:
895
+ summary = summary_response.choices[0].message.content.strip()
896
+ except Exception as e:
798
897
  if is_token_limit_error(e):
799
898
  errors.append(str(e))
800
899
  else:
@@ -821,20 +920,21 @@ def run_prompt(client, prompt, system):
821
920
  # ── Main ────────────────────────────────────────────────────────────────────
822
921
 
823
922
  def main():
824
- parser = argparse.ArgumentParser(description="Coder — AI coding agent powered by Claude")
923
+ parser = argparse.ArgumentParser(description="Coder — AI coding agent powered by Claude via Chorus")
825
924
  parser.add_argument("-p", "--prompt", help="Run a single prompt headlessly and output JSON")
826
925
  args = parser.parse_args()
827
926
 
828
- # if not os.environ.get("ANTHROPIC_API_KEY"):
829
- # print(f"{C.RED}Error: ANTHROPIC_API_KEY not set.{C.RESET}", file=sys.stderr)
830
- # print(" export ANTHROPIC_API_KEY=sk-ant-... (or pk-... for proxy)", file=sys.stderr)
831
- # sys.exit(1)
832
-
833
- proxy_url = os.environ.get("CODER_PROXY_URL")
834
- if proxy_url:
835
- client = anthropic.Anthropic(base_url=proxy_url.rstrip('/'))
836
- else:
837
- client = anthropic.Anthropic()
927
+ api_key = os.environ.get("CHORUS_API_KEY")
928
+ if not api_key:
929
+ print(f"{C.RED}Error: CHORUS_API_KEY not set. Run 'chorus setup' to configure.{C.RESET}", file=sys.stderr)
930
+ sys.exit(1)
931
+
932
+ base_url = os.environ.get("CHORUS_API_URL", "https://chorus-bad0f.web.app/v1")
933
+ machine_id = os.environ.get("CHORUS_MACHINE_ID")
934
+ client_kwargs = {"api_key": api_key, "base_url": base_url}
935
+ if machine_id:
936
+ client_kwargs["default_headers"] = {"X-Machine-Id": machine_id}
937
+ client = OpenAI(**client_kwargs)
838
938
  system = SYSTEM_PROMPT.format(cwd=os.getcwd(), approach=APPROACH_BLOCK)
839
939
 
840
940
  # Load codebase map if available
@@ -855,7 +955,7 @@ def main():
855
955
  result = run_prompt(client, args.prompt, system)
856
956
  print(json.dumps(result, indent=2))
857
957
  sys.exit(0 if result["completed"] else 1)
858
- except anthropic.APIError as e:
958
+ except Exception as e:
859
959
  print(json.dumps({
860
960
  "completed": False,
861
961
  "summary": f"API error: {e}",
@@ -953,7 +1053,7 @@ def main():
953
1053
  except KeyboardInterrupt:
954
1054
  del messages[snapshot:]
955
1055
  print(f"\n{C.DIM}(interrupted){C.RESET}\n")
956
- except anthropic.APIError as e:
1056
+ except Exception as e:
957
1057
  del messages[snapshot:]
958
1058
  print(f"\n{C.RED}API error: {e}{C.RESET}\n")
959
1059
 
package/tools/qa.py CHANGED
@@ -1,6 +1,6 @@
1
1
  #!/usr/bin/env python3
2
2
  """
3
- QA Chat — Multi-turn QA conversation tool powered by Claude + pluggable messengers.
3
+ QA Chat — Multi-turn QA conversation tool powered by Claude via the Chorus proxy + pluggable messengers.
4
4
 
5
5
  Supports Teams (Playwright browser automation) and Slack (API-based).
6
6
 
@@ -17,7 +17,7 @@ Output (JSON on stdout):
17
17
  Progress is logged to stderr.
18
18
  """
19
19
 
20
- import anthropic
20
+ from openai import OpenAI
21
21
  import argparse
22
22
  import json
23
23
  import os
@@ -27,7 +27,7 @@ from abc import ABC, abstractmethod
27
27
 
28
28
  # ── Config ──────────────────────────────────────────────────────────────────
29
29
 
30
- MODEL = os.environ.get("QA_MODEL", "claude-sonnet-4-5-20250929")
30
+ MODEL = os.environ.get("QA_MODEL", "anthropic/claude-sonnet-4")
31
31
  MAX_ROUNDS = int(os.environ.get("QA_MAX_ROUNDS", "5"))
32
32
  POLL_INTERVAL = int(os.environ.get("QA_POLL_INTERVAL", "60")) # seconds
33
33
  POLL_TIMEOUT = int(os.environ.get("QA_POLL_TIMEOUT", "1800")) # 30 min
@@ -316,33 +316,45 @@ If NO: set sufficient=false and write a short, friendly follow-up message asking
316
316
  }
317
317
  ]
318
318
 
319
- response = client.messages.create(
320
- model=MODEL,
321
- max_tokens=1024,
322
- system=(
319
+ # Chorus proxy uses OpenAI-compatible API — no native tool use
320
+ # We'll simulate by asking the model to respond in a structured way
321
+ messages.append({
322
+ "role": "system",
323
+ "content": (
323
324
  "You are evaluating a QA conversation about a software bug/feature. "
324
325
  "Your job is to decide if there is enough concrete information to write "
325
326
  "exact developer requirements. Vague answers like 'it should work properly' "
326
327
  "are NOT sufficient — you need specifics: exact behavior, exact UI elements, "
327
328
  "exact data flows, exact error messages, etc. "
328
- "Use the evaluation tool to return your assessment. "
329
+ "Respond with a JSON object containing: sufficient (boolean), reasoning (string), "
330
+ "and follow_up (string, required if sufficient is false). "
329
331
  "IMPORTANT: follow_up messages are sent via chat. Use plain text only — "
330
332
  "no markdown, no **bold**, no *italic*, no bullet points. "
331
333
  "Use numbered lines (1. 2. 3.) for multiple questions. Keep it conversational."
332
- ),
333
- tools=EVALUATE_TOOLS,
334
- tool_choice={"type": "tool", "name": "evaluation"},
334
+ )
335
+ })
336
+
337
+ response = client.chat.completions.create(
338
+ model=MODEL,
339
+ max_tokens=1024,
335
340
  messages=messages,
341
+ response_format={"type": "json_object"},
336
342
  )
337
343
 
338
344
  if hasattr(response, "usage") and response.usage:
339
- log(f" Evaluate tokens: {response.usage.input_tokens} in / {response.usage.output_tokens} out")
340
-
341
- for block in response.content:
342
- if block.type == "tool_use" and block.name == "evaluation":
343
- return block.input
345
+ log(f" Evaluate tokens: {response.usage.prompt_tokens} in / {response.usage.completion_tokens} out")
344
346
 
345
- raise RuntimeError("Claude did not return evaluation tool call")
347
+ import json
348
+ try:
349
+ result = json.loads(response.choices[0].message.content)
350
+ return result
351
+ except json.JSONDecodeError:
352
+ # Fallback if not valid JSON
353
+ return {
354
+ "sufficient": False,
355
+ "reasoning": "Could not parse evaluation",
356
+ "follow_up": "Could you please provide more details?"
357
+ }
346
358
 
347
359
 
348
360
  def synthesize(client, conversation, issue_context):
@@ -367,29 +379,35 @@ Write a clear numbered list of requirements. Each requirement should be specific
367
379
  }
368
380
  ]
369
381
 
370
- response = client.messages.create(
371
- model=MODEL,
372
- max_tokens=2048,
373
- system=(
382
+ messages.insert(0, {
383
+ "role": "system",
384
+ "content": (
374
385
  "You synthesize QA conversations into exact, actionable developer requirements. "
375
386
  "Be specific and concrete. No vague language. Every requirement should be testable."
376
- ),
387
+ )
388
+ })
389
+
390
+ response = client.chat.completions.create(
391
+ model=MODEL,
392
+ max_tokens=2048,
377
393
  messages=messages,
378
394
  )
379
395
 
380
396
  if hasattr(response, "usage") and response.usage:
381
- log(f" Synthesize tokens: {response.usage.input_tokens} in / {response.usage.output_tokens} out")
397
+ log(f" Synthesize tokens: {response.usage.prompt_tokens} in / {response.usage.completion_tokens} out")
382
398
 
383
- return "".join(block.text for block in response.content if block.type == "text").strip()
399
+ return response.choices[0].message.content.strip()
384
400
 
385
401
  # ── Main Loop ───────────────────────────────────────────────────────────────
386
402
 
387
403
  def run_qa_chat(issue_context, messenger, qa_name):
388
- proxy_url = os.environ.get("CODER_PROXY_URL")
389
- if proxy_url:
390
- client = anthropic.Anthropic(base_url=proxy_url.rstrip('/'))
391
- else:
392
- client = anthropic.Anthropic()
404
+ api_key = os.environ.get("CHORUS_API_KEY")
405
+ base_url = os.environ.get("CHORUS_API_URL", "https://chorus-bad0f.web.app/v1")
406
+ machine_id = os.environ.get("CHORUS_MACHINE_ID")
407
+ client_kwargs = {"api_key": api_key, "base_url": base_url}
408
+ if machine_id:
409
+ client_kwargs["default_headers"] = {"X-Machine-Id": machine_id}
410
+ client = OpenAI(**client_kwargs)
393
411
  conversation = []
394
412
  raw_responses = []
395
413
 
@@ -465,11 +483,11 @@ def main():
465
483
 
466
484
  if args.super:
467
485
  global MODEL
468
- MODEL = "claude-opus-4-6"
486
+ MODEL = "anthropic/claude-opus-4"
469
487
  log(f"Super mode: using {MODEL}")
470
488
 
471
- if not os.environ.get("ANTHROPIC_API_KEY"):
472
- log("Error: ANTHROPIC_API_KEY not set")
489
+ if not os.environ.get("CHORUS_API_KEY"):
490
+ log("Error: CHORUS_API_KEY not set. Run 'chorus setup' to configure.")
473
491
  sys.exit(1)
474
492
 
475
493
  # Build the appropriate messenger
@@ -1,3 +1,3 @@
1
- anthropic>=0.40.0
1
+ openai>=1.0.0
2
2
  playwright
3
3
  slack_sdk>=3.27.0