codebot-ai 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +247 -0
- package/bin/codebot +5 -0
- package/dist/agent.d.ts +31 -0
- package/dist/agent.js +256 -0
- package/dist/banner.d.ts +19 -0
- package/dist/banner.js +148 -0
- package/dist/browser/cdp.d.ts +29 -0
- package/dist/browser/cdp.js +292 -0
- package/dist/cli.d.ts +2 -0
- package/dist/cli.js +518 -0
- package/dist/context/manager.d.ts +27 -0
- package/dist/context/manager.js +139 -0
- package/dist/context/repo-map.d.ts +5 -0
- package/dist/context/repo-map.js +100 -0
- package/dist/history.d.ts +27 -0
- package/dist/history.js +146 -0
- package/dist/index.d.ts +13 -0
- package/dist/index.js +42 -0
- package/dist/memory.d.ts +39 -0
- package/dist/memory.js +168 -0
- package/dist/parser.d.ts +8 -0
- package/dist/parser.js +79 -0
- package/dist/providers/anthropic.d.ts +9 -0
- package/dist/providers/anthropic.js +288 -0
- package/dist/providers/index.d.ts +5 -0
- package/dist/providers/index.js +13 -0
- package/dist/providers/openai.d.ts +11 -0
- package/dist/providers/openai.js +173 -0
- package/dist/providers/registry.d.ts +15 -0
- package/dist/providers/registry.js +115 -0
- package/dist/setup.d.ts +17 -0
- package/dist/setup.js +243 -0
- package/dist/tools/browser.d.ts +43 -0
- package/dist/tools/browser.js +329 -0
- package/dist/tools/edit.d.ts +26 -0
- package/dist/tools/edit.js +73 -0
- package/dist/tools/execute.d.ts +26 -0
- package/dist/tools/execute.js +52 -0
- package/dist/tools/glob.d.ts +24 -0
- package/dist/tools/glob.js +102 -0
- package/dist/tools/grep.d.ts +29 -0
- package/dist/tools/grep.js +125 -0
- package/dist/tools/index.d.ts +10 -0
- package/dist/tools/index.js +49 -0
- package/dist/tools/memory.d.ts +36 -0
- package/dist/tools/memory.js +114 -0
- package/dist/tools/read.d.ts +26 -0
- package/dist/tools/read.js +75 -0
- package/dist/tools/think.d.ts +18 -0
- package/dist/tools/think.js +20 -0
- package/dist/tools/web-fetch.d.ts +36 -0
- package/dist/tools/web-fetch.js +83 -0
- package/dist/tools/write.d.ts +22 -0
- package/dist/tools/write.js +65 -0
- package/dist/types.d.ts +82 -0
- package/dist/types.js +3 -0
- package/package.json +57 -0
package/dist/cli.js
ADDED
|
@@ -0,0 +1,518 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
|
|
3
|
+
if (k2 === undefined) k2 = k;
|
|
4
|
+
var desc = Object.getOwnPropertyDescriptor(m, k);
|
|
5
|
+
if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) {
|
|
6
|
+
desc = { enumerable: true, get: function() { return m[k]; } };
|
|
7
|
+
}
|
|
8
|
+
Object.defineProperty(o, k2, desc);
|
|
9
|
+
}) : (function(o, m, k, k2) {
|
|
10
|
+
if (k2 === undefined) k2 = k;
|
|
11
|
+
o[k2] = m[k];
|
|
12
|
+
}));
|
|
13
|
+
var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) {
|
|
14
|
+
Object.defineProperty(o, "default", { enumerable: true, value: v });
|
|
15
|
+
}) : function(o, v) {
|
|
16
|
+
o["default"] = v;
|
|
17
|
+
});
|
|
18
|
+
var __importStar = (this && this.__importStar) || (function () {
|
|
19
|
+
var ownKeys = function(o) {
|
|
20
|
+
ownKeys = Object.getOwnPropertyNames || function (o) {
|
|
21
|
+
var ar = [];
|
|
22
|
+
for (var k in o) if (Object.prototype.hasOwnProperty.call(o, k)) ar[ar.length] = k;
|
|
23
|
+
return ar;
|
|
24
|
+
};
|
|
25
|
+
return ownKeys(o);
|
|
26
|
+
};
|
|
27
|
+
return function (mod) {
|
|
28
|
+
if (mod && mod.__esModule) return mod;
|
|
29
|
+
var result = {};
|
|
30
|
+
if (mod != null) for (var k = ownKeys(mod), i = 0; i < k.length; i++) if (k[i] !== "default") __createBinding(result, mod, k[i]);
|
|
31
|
+
__setModuleDefault(result, mod);
|
|
32
|
+
return result;
|
|
33
|
+
};
|
|
34
|
+
})();
|
|
35
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
36
|
+
exports.main = main;
|
|
37
|
+
const readline = __importStar(require("readline"));
|
|
38
|
+
const agent_1 = require("./agent");
|
|
39
|
+
const openai_1 = require("./providers/openai");
|
|
40
|
+
const anthropic_1 = require("./providers/anthropic");
|
|
41
|
+
const registry_1 = require("./providers/registry");
|
|
42
|
+
const history_1 = require("./history");
|
|
43
|
+
const setup_1 = require("./setup");
|
|
44
|
+
const banner_1 = require("./banner");
|
|
45
|
+
const VERSION = '1.0.0';
|
|
46
|
+
const C = {
|
|
47
|
+
reset: '\x1b[0m',
|
|
48
|
+
bold: '\x1b[1m',
|
|
49
|
+
dim: '\x1b[2m',
|
|
50
|
+
red: '\x1b[31m',
|
|
51
|
+
green: '\x1b[32m',
|
|
52
|
+
yellow: '\x1b[33m',
|
|
53
|
+
blue: '\x1b[34m',
|
|
54
|
+
cyan: '\x1b[36m',
|
|
55
|
+
};
|
|
56
|
+
function c(text, style) {
|
|
57
|
+
return `${C[style]}${text}${C.reset}`;
|
|
58
|
+
}
|
|
59
|
+
async function main() {
|
|
60
|
+
const args = parseArgs(process.argv.slice(2));
|
|
61
|
+
if (args.help) {
|
|
62
|
+
showHelp();
|
|
63
|
+
return;
|
|
64
|
+
}
|
|
65
|
+
if (args.version) {
|
|
66
|
+
console.log(`codebot v${VERSION}`);
|
|
67
|
+
return;
|
|
68
|
+
}
|
|
69
|
+
// Setup wizard
|
|
70
|
+
if (args.setup) {
|
|
71
|
+
await (0, setup_1.runSetup)();
|
|
72
|
+
return;
|
|
73
|
+
}
|
|
74
|
+
// First run: auto-launch setup if nothing is configured
|
|
75
|
+
if ((0, setup_1.isFirstRun)() && process.stdin.isTTY && !args.message) {
|
|
76
|
+
console.log(c('Welcome! No configuration found — launching setup...', 'cyan'));
|
|
77
|
+
await (0, setup_1.runSetup)();
|
|
78
|
+
// If setup saved a config, continue to main flow
|
|
79
|
+
// Otherwise exit
|
|
80
|
+
if ((0, setup_1.isFirstRun)())
|
|
81
|
+
return;
|
|
82
|
+
}
|
|
83
|
+
const config = await resolveConfig(args);
|
|
84
|
+
const provider = createProvider(config);
|
|
85
|
+
// Session management
|
|
86
|
+
let resumeId;
|
|
87
|
+
if (args.continue) {
|
|
88
|
+
resumeId = history_1.SessionManager.latest();
|
|
89
|
+
if (!resumeId) {
|
|
90
|
+
console.log(c('No previous session found.', 'yellow'));
|
|
91
|
+
}
|
|
92
|
+
}
|
|
93
|
+
else if (typeof args.resume === 'string') {
|
|
94
|
+
resumeId = args.resume;
|
|
95
|
+
}
|
|
96
|
+
const session = new history_1.SessionManager(config.model, resumeId);
|
|
97
|
+
const sessionShort = session.getId().substring(0, 8);
|
|
98
|
+
console.log((0, banner_1.banner)(VERSION, config.model, `${config.provider} @ ${config.baseUrl}`, `${sessionShort}...`, !!config.autoApprove));
|
|
99
|
+
if (resumeId) {
|
|
100
|
+
console.log(c(` Resuming session...`, 'green'));
|
|
101
|
+
}
|
|
102
|
+
console.log(c(` ${(0, banner_1.randomGreeting)()}\n`, 'dim'));
|
|
103
|
+
const agent = new agent_1.Agent({
|
|
104
|
+
provider,
|
|
105
|
+
model: config.model,
|
|
106
|
+
maxIterations: config.maxIterations,
|
|
107
|
+
autoApprove: config.autoApprove,
|
|
108
|
+
onMessage: (msg) => session.save(msg),
|
|
109
|
+
});
|
|
110
|
+
// Resume: load previous messages
|
|
111
|
+
if (resumeId) {
|
|
112
|
+
const messages = session.load();
|
|
113
|
+
if (messages.length > 0) {
|
|
114
|
+
agent.loadMessages(messages);
|
|
115
|
+
console.log(c(` Loaded ${messages.length} messages from previous session.`, 'dim'));
|
|
116
|
+
}
|
|
117
|
+
}
|
|
118
|
+
// Non-interactive: single message from CLI args
|
|
119
|
+
if (typeof args.message === 'string') {
|
|
120
|
+
await runOnce(agent, args.message);
|
|
121
|
+
return;
|
|
122
|
+
}
|
|
123
|
+
// Non-interactive: piped stdin
|
|
124
|
+
if (!process.stdin.isTTY) {
|
|
125
|
+
const input = await readStdin();
|
|
126
|
+
if (input.trim()) {
|
|
127
|
+
await runOnce(agent, input.trim());
|
|
128
|
+
}
|
|
129
|
+
return;
|
|
130
|
+
}
|
|
131
|
+
// Interactive REPL
|
|
132
|
+
await repl(agent, config, session);
|
|
133
|
+
}
|
|
134
|
+
function createProvider(config) {
|
|
135
|
+
if (config.provider === 'anthropic') {
|
|
136
|
+
return new anthropic_1.AnthropicProvider({
|
|
137
|
+
baseUrl: config.baseUrl,
|
|
138
|
+
apiKey: config.apiKey,
|
|
139
|
+
model: config.model,
|
|
140
|
+
});
|
|
141
|
+
}
|
|
142
|
+
// All other providers use OpenAI-compatible format
|
|
143
|
+
return new openai_1.OpenAIProvider({
|
|
144
|
+
baseUrl: config.baseUrl,
|
|
145
|
+
apiKey: config.apiKey,
|
|
146
|
+
model: config.model,
|
|
147
|
+
});
|
|
148
|
+
}
|
|
149
|
+
async function repl(agent, config, session) {
|
|
150
|
+
const rl = readline.createInterface({
|
|
151
|
+
input: process.stdin,
|
|
152
|
+
output: process.stdout,
|
|
153
|
+
prompt: c('> ', 'cyan'),
|
|
154
|
+
});
|
|
155
|
+
rl.prompt();
|
|
156
|
+
rl.on('line', async (line) => {
|
|
157
|
+
const input = line.trim();
|
|
158
|
+
if (!input) {
|
|
159
|
+
rl.prompt();
|
|
160
|
+
return;
|
|
161
|
+
}
|
|
162
|
+
if (input.startsWith('/')) {
|
|
163
|
+
handleSlashCommand(input, agent, config);
|
|
164
|
+
rl.prompt();
|
|
165
|
+
return;
|
|
166
|
+
}
|
|
167
|
+
try {
|
|
168
|
+
for await (const event of agent.run(input)) {
|
|
169
|
+
renderEvent(event);
|
|
170
|
+
}
|
|
171
|
+
}
|
|
172
|
+
catch (err) {
|
|
173
|
+
const msg = err instanceof Error ? err.message : String(err);
|
|
174
|
+
console.error(c(`\nError: ${msg}`, 'red'));
|
|
175
|
+
}
|
|
176
|
+
console.log();
|
|
177
|
+
rl.prompt();
|
|
178
|
+
});
|
|
179
|
+
rl.on('close', () => {
|
|
180
|
+
console.log(c('\nBye!', 'dim'));
|
|
181
|
+
process.exit(0);
|
|
182
|
+
});
|
|
183
|
+
}
|
|
184
|
+
async function runOnce(agent, message) {
|
|
185
|
+
for await (const event of agent.run(message)) {
|
|
186
|
+
renderEvent(event);
|
|
187
|
+
}
|
|
188
|
+
console.log();
|
|
189
|
+
}
|
|
190
|
+
let isThinking = false;
|
|
191
|
+
function renderEvent(event) {
|
|
192
|
+
switch (event.type) {
|
|
193
|
+
case 'thinking':
|
|
194
|
+
if (!isThinking) {
|
|
195
|
+
process.stdout.write(c('\n💭 ', 'dim'));
|
|
196
|
+
isThinking = true;
|
|
197
|
+
}
|
|
198
|
+
process.stdout.write(c(event.text || '', 'dim'));
|
|
199
|
+
break;
|
|
200
|
+
case 'text':
|
|
201
|
+
if (isThinking) {
|
|
202
|
+
process.stdout.write('\n');
|
|
203
|
+
isThinking = false;
|
|
204
|
+
}
|
|
205
|
+
process.stdout.write(event.text || '');
|
|
206
|
+
break;
|
|
207
|
+
case 'tool_call':
|
|
208
|
+
if (isThinking) {
|
|
209
|
+
process.stdout.write('\n');
|
|
210
|
+
isThinking = false;
|
|
211
|
+
}
|
|
212
|
+
console.log(c(`\n⚡ ${event.toolCall?.name}`, 'yellow') +
|
|
213
|
+
c(`(${formatArgs(event.toolCall?.args)})`, 'dim'));
|
|
214
|
+
break;
|
|
215
|
+
case 'tool_result':
|
|
216
|
+
if (event.toolResult?.is_error) {
|
|
217
|
+
console.log(c(` ✗ ${truncate(event.toolResult.result, 200)}`, 'red'));
|
|
218
|
+
}
|
|
219
|
+
else {
|
|
220
|
+
const result = event.toolResult?.result || '';
|
|
221
|
+
const lines = result.split('\n');
|
|
222
|
+
if (lines.length > 10) {
|
|
223
|
+
console.log(c(` ✓ (${lines.length} lines)`, 'green'));
|
|
224
|
+
}
|
|
225
|
+
else {
|
|
226
|
+
console.log(c(` ✓ ${truncate(result, 200)}`, 'green'));
|
|
227
|
+
}
|
|
228
|
+
}
|
|
229
|
+
break;
|
|
230
|
+
case 'usage':
|
|
231
|
+
if (event.usage) {
|
|
232
|
+
const parts = [];
|
|
233
|
+
if (event.usage.inputTokens)
|
|
234
|
+
parts.push(`in: ${event.usage.inputTokens}`);
|
|
235
|
+
if (event.usage.outputTokens)
|
|
236
|
+
parts.push(`out: ${event.usage.outputTokens}`);
|
|
237
|
+
if (parts.length > 0) {
|
|
238
|
+
console.log(c(` [${parts.join(', ')} tokens]`, 'dim'));
|
|
239
|
+
}
|
|
240
|
+
}
|
|
241
|
+
break;
|
|
242
|
+
case 'compaction':
|
|
243
|
+
console.log(c(`\n📦 ${event.text}`, 'dim'));
|
|
244
|
+
break;
|
|
245
|
+
case 'error':
|
|
246
|
+
console.error(c(`\n✗ ${event.error}`, 'red'));
|
|
247
|
+
break;
|
|
248
|
+
case 'done':
|
|
249
|
+
if (isThinking) {
|
|
250
|
+
process.stdout.write('\n');
|
|
251
|
+
isThinking = false;
|
|
252
|
+
}
|
|
253
|
+
break;
|
|
254
|
+
}
|
|
255
|
+
}
|
|
256
|
+
function formatArgs(args) {
|
|
257
|
+
if (!args)
|
|
258
|
+
return '';
|
|
259
|
+
return Object.entries(args)
|
|
260
|
+
.map(([k, v]) => {
|
|
261
|
+
const val = typeof v === 'string' ? truncate(v, 60) : JSON.stringify(v);
|
|
262
|
+
return `${k}: ${val}`;
|
|
263
|
+
})
|
|
264
|
+
.join(', ');
|
|
265
|
+
}
|
|
266
|
+
function truncate(s, max) {
|
|
267
|
+
return s.length <= max ? s : s.substring(0, max) + '...';
|
|
268
|
+
}
|
|
269
|
+
function handleSlashCommand(input, agent, config) {
|
|
270
|
+
const [cmd, ...rest] = input.split(/\s+/);
|
|
271
|
+
switch (cmd) {
|
|
272
|
+
case '/help':
|
|
273
|
+
console.log(`${c('Commands:', 'bold')}
|
|
274
|
+
/help Show this help
|
|
275
|
+
/model Show or change model (/model <name>)
|
|
276
|
+
/models List all supported models
|
|
277
|
+
/sessions List saved sessions
|
|
278
|
+
/clear Clear conversation history
|
|
279
|
+
/compact Force context compaction
|
|
280
|
+
/auto Toggle autonomous mode
|
|
281
|
+
/config Show current config
|
|
282
|
+
/quit Exit`);
|
|
283
|
+
break;
|
|
284
|
+
case '/model':
|
|
285
|
+
if (rest.length > 0) {
|
|
286
|
+
config.model = rest.join(' ');
|
|
287
|
+
const detected = (0, registry_1.detectProvider)(config.model);
|
|
288
|
+
if (detected) {
|
|
289
|
+
config.provider = detected;
|
|
290
|
+
console.log(c(`Model: ${config.model} (provider: ${detected})`, 'green'));
|
|
291
|
+
}
|
|
292
|
+
else {
|
|
293
|
+
console.log(c(`Model: ${config.model} (local/ollama)`, 'green'));
|
|
294
|
+
}
|
|
295
|
+
}
|
|
296
|
+
else {
|
|
297
|
+
console.log(`Current model: ${config.model} (${config.provider})`);
|
|
298
|
+
}
|
|
299
|
+
break;
|
|
300
|
+
case '/models':
|
|
301
|
+
showModels();
|
|
302
|
+
break;
|
|
303
|
+
case '/clear':
|
|
304
|
+
agent.clearHistory();
|
|
305
|
+
console.log(c('Conversation cleared.', 'dim'));
|
|
306
|
+
break;
|
|
307
|
+
case '/compact': {
|
|
308
|
+
const stats = agent.forceCompact();
|
|
309
|
+
console.log(c(`Context compacted: ${stats.before} → ${stats.after} messages.`, 'dim'));
|
|
310
|
+
break;
|
|
311
|
+
}
|
|
312
|
+
case '/auto':
|
|
313
|
+
config.autoApprove = !config.autoApprove;
|
|
314
|
+
console.log(c(`Autonomous mode: ${config.autoApprove ? 'ON' : 'OFF'}`, config.autoApprove ? 'yellow' : 'green'));
|
|
315
|
+
break;
|
|
316
|
+
case '/sessions': {
|
|
317
|
+
const sessions = history_1.SessionManager.list();
|
|
318
|
+
if (sessions.length === 0) {
|
|
319
|
+
console.log(c('No saved sessions.', 'dim'));
|
|
320
|
+
}
|
|
321
|
+
else {
|
|
322
|
+
console.log(c('\nSaved sessions:', 'bold'));
|
|
323
|
+
for (const s of sessions) {
|
|
324
|
+
const date = s.updated ? new Date(s.updated).toLocaleString() : 'unknown';
|
|
325
|
+
console.log(` ${c(s.id.substring(0, 8), 'cyan')} ${date} ${s.messageCount} msgs ${c(s.preview || '(empty)', 'dim')}`);
|
|
326
|
+
}
|
|
327
|
+
console.log(c(`\nResume with: codebot --resume <id>`, 'dim'));
|
|
328
|
+
}
|
|
329
|
+
break;
|
|
330
|
+
}
|
|
331
|
+
case '/config':
|
|
332
|
+
console.log(JSON.stringify({ ...config, apiKey: config.apiKey ? '***' : undefined }, null, 2));
|
|
333
|
+
break;
|
|
334
|
+
case '/quit':
|
|
335
|
+
case '/exit':
|
|
336
|
+
process.exit(0);
|
|
337
|
+
default:
|
|
338
|
+
console.log(c(`Unknown command: ${cmd}. Type /help`, 'yellow'));
|
|
339
|
+
}
|
|
340
|
+
}
|
|
341
|
+
function showModels() {
|
|
342
|
+
const { MODEL_REGISTRY } = require('./providers/registry');
|
|
343
|
+
const byProvider = {};
|
|
344
|
+
for (const [name, info] of Object.entries(MODEL_REGISTRY)) {
|
|
345
|
+
const p = info.provider || 'local/ollama';
|
|
346
|
+
if (!byProvider[p])
|
|
347
|
+
byProvider[p] = [];
|
|
348
|
+
byProvider[p].push(name);
|
|
349
|
+
}
|
|
350
|
+
for (const [provider, models] of Object.entries(byProvider).sort()) {
|
|
351
|
+
console.log(c(`\n${provider}:`, 'bold'));
|
|
352
|
+
for (const m of models) {
|
|
353
|
+
console.log(` ${m}`);
|
|
354
|
+
}
|
|
355
|
+
}
|
|
356
|
+
}
|
|
357
|
+
async function resolveConfig(args) {
|
|
358
|
+
// Load saved config (CLI args override saved config)
|
|
359
|
+
const saved = (0, setup_1.loadConfig)();
|
|
360
|
+
const model = args.model || process.env.CODEBOT_MODEL || saved.model || 'qwen2.5-coder:32b';
|
|
361
|
+
const detected = (0, registry_1.detectProvider)(model);
|
|
362
|
+
const config = {
|
|
363
|
+
provider: args.provider || process.env.CODEBOT_PROVIDER || saved.provider || detected || 'openai',
|
|
364
|
+
model,
|
|
365
|
+
baseUrl: args['base-url'] || process.env.CODEBOT_BASE_URL || saved.baseUrl || '',
|
|
366
|
+
apiKey: args['api-key'] || '',
|
|
367
|
+
maxIterations: parseInt(args['max-iterations'] || String(saved.maxIterations || 50), 10),
|
|
368
|
+
autoApprove: !!args['auto-approve'] || !!args.autonomous || !!args.auto || !!saved.autoApprove,
|
|
369
|
+
};
|
|
370
|
+
// Auto-resolve base URL and API key from provider
|
|
371
|
+
if (!config.baseUrl || !config.apiKey) {
|
|
372
|
+
const defaults = registry_1.PROVIDER_DEFAULTS[config.provider];
|
|
373
|
+
if (defaults) {
|
|
374
|
+
if (!config.baseUrl)
|
|
375
|
+
config.baseUrl = defaults.baseUrl;
|
|
376
|
+
if (!config.apiKey)
|
|
377
|
+
config.apiKey = process.env[defaults.envKey] || process.env.CODEBOT_API_KEY || '';
|
|
378
|
+
}
|
|
379
|
+
}
|
|
380
|
+
// Fallback: try generic env vars
|
|
381
|
+
if (!config.apiKey) {
|
|
382
|
+
config.apiKey = process.env.CODEBOT_API_KEY || process.env.OPENAI_API_KEY || '';
|
|
383
|
+
}
|
|
384
|
+
// If still no base URL, auto-detect local provider
|
|
385
|
+
if (!config.baseUrl) {
|
|
386
|
+
config.baseUrl = await autoDetectProvider();
|
|
387
|
+
}
|
|
388
|
+
return config;
|
|
389
|
+
}
|
|
390
|
+
async function autoDetectProvider() {
|
|
391
|
+
const candidates = [
|
|
392
|
+
{ url: 'http://localhost:11434', name: 'Ollama' },
|
|
393
|
+
{ url: 'http://localhost:1234', name: 'LM Studio' },
|
|
394
|
+
{ url: 'http://localhost:8000', name: 'vLLM' },
|
|
395
|
+
];
|
|
396
|
+
for (const { url, name } of candidates) {
|
|
397
|
+
try {
|
|
398
|
+
const res = await fetch(`${url}/v1/models`, {
|
|
399
|
+
signal: AbortSignal.timeout(2000),
|
|
400
|
+
});
|
|
401
|
+
if (res.ok) {
|
|
402
|
+
console.log(c(` ✓ ${name} detected on ${url}`, 'green'));
|
|
403
|
+
return url;
|
|
404
|
+
}
|
|
405
|
+
}
|
|
406
|
+
catch {
|
|
407
|
+
// not running
|
|
408
|
+
}
|
|
409
|
+
}
|
|
410
|
+
console.log(c(' ⚠ No local LLM detected. Start Ollama or set --base-url', 'yellow'));
|
|
411
|
+
return 'http://localhost:11434';
|
|
412
|
+
}
|
|
413
|
+
function parseArgs(argv) {
|
|
414
|
+
const result = {};
|
|
415
|
+
const positional = [];
|
|
416
|
+
for (let i = 0; i < argv.length; i++) {
|
|
417
|
+
const arg = argv[i];
|
|
418
|
+
if (arg === '--help' || arg === '-h') {
|
|
419
|
+
result.help = true;
|
|
420
|
+
continue;
|
|
421
|
+
}
|
|
422
|
+
if (arg === '--version' || arg === '-v') {
|
|
423
|
+
result.version = true;
|
|
424
|
+
continue;
|
|
425
|
+
}
|
|
426
|
+
if (arg === '--auto-approve' || arg === '--autonomous' || arg === '--auto') {
|
|
427
|
+
result['auto-approve'] = true;
|
|
428
|
+
result.autonomous = true;
|
|
429
|
+
result.auto = true;
|
|
430
|
+
continue;
|
|
431
|
+
}
|
|
432
|
+
if (arg === '--continue' || arg === '-c') {
|
|
433
|
+
result.continue = true;
|
|
434
|
+
continue;
|
|
435
|
+
}
|
|
436
|
+
if (arg === '--setup' || arg === '--init') {
|
|
437
|
+
result.setup = true;
|
|
438
|
+
continue;
|
|
439
|
+
}
|
|
440
|
+
if (arg.startsWith('--')) {
|
|
441
|
+
const key = arg.slice(2);
|
|
442
|
+
const next = argv[i + 1];
|
|
443
|
+
if (next && !next.startsWith('--')) {
|
|
444
|
+
result[key] = next;
|
|
445
|
+
i++;
|
|
446
|
+
}
|
|
447
|
+
else {
|
|
448
|
+
result[key] = true;
|
|
449
|
+
}
|
|
450
|
+
continue;
|
|
451
|
+
}
|
|
452
|
+
positional.push(arg);
|
|
453
|
+
}
|
|
454
|
+
if (positional.length > 0) {
|
|
455
|
+
result.message = positional.join(' ');
|
|
456
|
+
}
|
|
457
|
+
return result;
|
|
458
|
+
}
|
|
459
|
+
function readStdin() {
|
|
460
|
+
return new Promise(resolve => {
|
|
461
|
+
let data = '';
|
|
462
|
+
process.stdin.on('data', (chunk) => (data += chunk.toString()));
|
|
463
|
+
process.stdin.on('end', () => resolve(data));
|
|
464
|
+
});
|
|
465
|
+
}
|
|
466
|
+
function showHelp() {
|
|
467
|
+
console.log(`${c('CodeBot AI', 'bold')} - Local-first AI coding assistant
|
|
468
|
+
|
|
469
|
+
${c('Quick Start:', 'bold')}
|
|
470
|
+
codebot --setup Run interactive setup wizard
|
|
471
|
+
codebot Start interactive mode
|
|
472
|
+
codebot "fix the bug in app.ts" Single message mode
|
|
473
|
+
echo "explain this" | codebot Pipe mode
|
|
474
|
+
|
|
475
|
+
${c('Options:', 'bold')}
|
|
476
|
+
--setup Run the setup wizard (auto-runs on first use)
|
|
477
|
+
--model <name> Model to use (default: qwen2.5-coder:32b)
|
|
478
|
+
--provider <name> Provider: openai, anthropic, gemini, deepseek, groq, mistral, xai
|
|
479
|
+
--base-url <url> LLM API base URL (auto-detects Ollama/LM Studio/vLLM + cloud)
|
|
480
|
+
--api-key <key> API key (or set provider-specific env var)
|
|
481
|
+
--autonomous Skip ALL permission prompts — full auto mode
|
|
482
|
+
--auto-approve Same as --autonomous
|
|
483
|
+
--resume <id> Resume a previous session by ID
|
|
484
|
+
--continue, -c Resume the most recent session
|
|
485
|
+
--max-iterations <n> Max agent loop iterations (default: 50)
|
|
486
|
+
-h, --help Show this help
|
|
487
|
+
-v, --version Show version
|
|
488
|
+
|
|
489
|
+
${c('Supported Providers:', 'bold')}
|
|
490
|
+
Local: Ollama, LM Studio, vLLM (auto-detected)
|
|
491
|
+
Anthropic: Claude Opus/Sonnet/Haiku (ANTHROPIC_API_KEY)
|
|
492
|
+
OpenAI: GPT-4o, GPT-4.1, o1/o3/o4 (OPENAI_API_KEY)
|
|
493
|
+
Google: Gemini 2.5/2.0/1.5 (GEMINI_API_KEY)
|
|
494
|
+
DeepSeek: deepseek-chat, deepseek-reasoner (DEEPSEEK_API_KEY)
|
|
495
|
+
Groq: Llama, Mixtral on Groq (GROQ_API_KEY)
|
|
496
|
+
Mistral: mistral-large, codestral (MISTRAL_API_KEY)
|
|
497
|
+
xAI: Grok-3 (XAI_API_KEY)
|
|
498
|
+
|
|
499
|
+
${c('Examples:', 'bold')}
|
|
500
|
+
codebot --model claude-opus-4-6 Uses Anthropic API
|
|
501
|
+
codebot --model gpt-4o Uses OpenAI API
|
|
502
|
+
codebot --model gemini-2.5-pro Uses Gemini API
|
|
503
|
+
codebot --model deepseek-chat Uses DeepSeek API
|
|
504
|
+
codebot --model qwen2.5-coder:32b Uses local Ollama
|
|
505
|
+
codebot --autonomous "refactor src/" Full auto, no prompts
|
|
506
|
+
|
|
507
|
+
${c('Interactive Commands:', 'bold')}
|
|
508
|
+
/help Show commands
|
|
509
|
+
/model Show or change model
|
|
510
|
+
/models List all supported models
|
|
511
|
+
/sessions List saved sessions
|
|
512
|
+
/auto Toggle autonomous mode
|
|
513
|
+
/clear Clear conversation
|
|
514
|
+
/compact Force context compaction
|
|
515
|
+
/config Show configuration
|
|
516
|
+
/quit Exit`);
|
|
517
|
+
}
|
|
518
|
+
//# sourceMappingURL=cli.js.map
|
|
@@ -0,0 +1,27 @@
|
|
|
1
|
+
import { Message, LLMProvider } from '../types';
|
|
2
|
+
export declare class ContextManager {
|
|
3
|
+
private contextWindow;
|
|
4
|
+
private reservedForOutput;
|
|
5
|
+
private reservedForSystem;
|
|
6
|
+
private reservedForTools;
|
|
7
|
+
private provider?;
|
|
8
|
+
constructor(model: string, provider?: LLMProvider);
|
|
9
|
+
/** Set the provider (for LLM-powered compaction) */
|
|
10
|
+
setProvider(provider: LLMProvider): void;
|
|
11
|
+
/** Conservative token estimate: ~3.5 chars per token */
|
|
12
|
+
estimateTokens(text: string): number;
|
|
13
|
+
/** Tokens available for conversation messages */
|
|
14
|
+
availableTokens(): number;
|
|
15
|
+
/** Check if messages fit within budget */
|
|
16
|
+
fitsInBudget(messages: Message[]): boolean;
|
|
17
|
+
/** Compact conversation by dropping old messages and inserting a summary placeholder */
|
|
18
|
+
compact(messages: Message[], force?: boolean): Message[];
|
|
19
|
+
/** Smart compaction: use LLM to summarize dropped messages instead of just discarding */
|
|
20
|
+
compactWithSummary(messages: Message[]): Promise<{
|
|
21
|
+
messages: Message[];
|
|
22
|
+
summary: string;
|
|
23
|
+
}>;
|
|
24
|
+
private summarizeMessages;
|
|
25
|
+
getContextWindow(): number;
|
|
26
|
+
}
|
|
27
|
+
//# sourceMappingURL=manager.d.ts.map
|
|
@@ -0,0 +1,139 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.ContextManager = void 0;
|
|
4
|
+
const registry_1 = require("../providers/registry");
|
|
5
|
+
class ContextManager {
|
|
6
|
+
contextWindow;
|
|
7
|
+
reservedForOutput = 2048;
|
|
8
|
+
reservedForSystem = 1500;
|
|
9
|
+
reservedForTools = 2000;
|
|
10
|
+
provider;
|
|
11
|
+
constructor(model, provider) {
|
|
12
|
+
this.contextWindow = (0, registry_1.getModelInfo)(model).contextWindow;
|
|
13
|
+
this.provider = provider;
|
|
14
|
+
}
|
|
15
|
+
/** Set the provider (for LLM-powered compaction) */
|
|
16
|
+
setProvider(provider) {
|
|
17
|
+
this.provider = provider;
|
|
18
|
+
}
|
|
19
|
+
/** Conservative token estimate: ~3.5 chars per token */
|
|
20
|
+
estimateTokens(text) {
|
|
21
|
+
return Math.ceil(text.length / 3.5);
|
|
22
|
+
}
|
|
23
|
+
/** Tokens available for conversation messages */
|
|
24
|
+
availableTokens() {
|
|
25
|
+
return this.contextWindow - this.reservedForOutput - this.reservedForSystem - this.reservedForTools;
|
|
26
|
+
}
|
|
27
|
+
/** Check if messages fit within budget */
|
|
28
|
+
fitsInBudget(messages) {
|
|
29
|
+
const total = messages.reduce((sum, m) => sum + this.estimateTokens(m.content), 0);
|
|
30
|
+
return total <= this.availableTokens();
|
|
31
|
+
}
|
|
32
|
+
/** Compact conversation by dropping old messages and inserting a summary placeholder */
|
|
33
|
+
compact(messages, force = false) {
|
|
34
|
+
if (!force && this.fitsInBudget(messages))
|
|
35
|
+
return messages;
|
|
36
|
+
const system = messages[0]?.role === 'system' ? messages[0] : null;
|
|
37
|
+
const rest = system ? messages.slice(1) : [...messages];
|
|
38
|
+
// Keep recent messages that fit within 80% of budget
|
|
39
|
+
const kept = [];
|
|
40
|
+
let tokenCount = 0;
|
|
41
|
+
const budget = this.availableTokens();
|
|
42
|
+
for (let i = rest.length - 1; i >= 0; i--) {
|
|
43
|
+
const msgTokens = this.estimateTokens(rest[i].content);
|
|
44
|
+
if (tokenCount + msgTokens > budget * 0.8)
|
|
45
|
+
break;
|
|
46
|
+
kept.unshift(rest[i]);
|
|
47
|
+
tokenCount += msgTokens;
|
|
48
|
+
}
|
|
49
|
+
const dropped = rest.length - kept.length;
|
|
50
|
+
if (dropped > 0) {
|
|
51
|
+
kept.unshift({
|
|
52
|
+
role: 'system',
|
|
53
|
+
content: `[${dropped} earlier messages compacted. The conversation has been ongoing — continue from the recent messages below.]`,
|
|
54
|
+
});
|
|
55
|
+
}
|
|
56
|
+
if (system)
|
|
57
|
+
kept.unshift(system);
|
|
58
|
+
return kept;
|
|
59
|
+
}
|
|
60
|
+
/** Smart compaction: use LLM to summarize dropped messages instead of just discarding */
|
|
61
|
+
async compactWithSummary(messages) {
|
|
62
|
+
const system = messages[0]?.role === 'system' ? messages[0] : null;
|
|
63
|
+
const rest = system ? messages.slice(1) : [...messages];
|
|
64
|
+
// Determine which messages to keep vs summarize
|
|
65
|
+
const kept = [];
|
|
66
|
+
let tokenCount = 0;
|
|
67
|
+
const budget = this.availableTokens();
|
|
68
|
+
for (let i = rest.length - 1; i >= 0; i--) {
|
|
69
|
+
const msgTokens = this.estimateTokens(rest[i].content);
|
|
70
|
+
if (tokenCount + msgTokens > budget * 0.8)
|
|
71
|
+
break;
|
|
72
|
+
kept.unshift(rest[i]);
|
|
73
|
+
tokenCount += msgTokens;
|
|
74
|
+
}
|
|
75
|
+
const droppedCount = rest.length - kept.length;
|
|
76
|
+
if (droppedCount === 0) {
|
|
77
|
+
return { messages, summary: '' };
|
|
78
|
+
}
|
|
79
|
+
const dropped = rest.slice(0, droppedCount);
|
|
80
|
+
let summary = `[${droppedCount} earlier messages compacted.]`;
|
|
81
|
+
// Try LLM summarization
|
|
82
|
+
if (this.provider) {
|
|
83
|
+
try {
|
|
84
|
+
summary = await this.summarizeMessages(dropped);
|
|
85
|
+
}
|
|
86
|
+
catch {
|
|
87
|
+
// Fall back to simple compaction
|
|
88
|
+
}
|
|
89
|
+
}
|
|
90
|
+
kept.unshift({ role: 'system', content: summary });
|
|
91
|
+
if (system)
|
|
92
|
+
kept.unshift(system);
|
|
93
|
+
return { messages: kept, summary };
|
|
94
|
+
}
|
|
95
|
+
async summarizeMessages(messages) {
|
|
96
|
+
if (!this.provider) {
|
|
97
|
+
throw new Error('No provider for summarization');
|
|
98
|
+
}
|
|
99
|
+
// Build a condensed version of the conversation for summarization
|
|
100
|
+
const convoText = messages
|
|
101
|
+
.filter(m => m.role !== 'system')
|
|
102
|
+
.map(m => {
|
|
103
|
+
if (m.role === 'tool') {
|
|
104
|
+
const result = m.content.length > 200 ? m.content.substring(0, 200) + '...' : m.content;
|
|
105
|
+
return `[Tool result]: ${result}`;
|
|
106
|
+
}
|
|
107
|
+
if (m.role === 'assistant' && m.tool_calls?.length) {
|
|
108
|
+
const tools = m.tool_calls.map(tc => tc.function.name).join(', ');
|
|
109
|
+
const text = m.content ? m.content.substring(0, 200) : '';
|
|
110
|
+
return `Assistant: ${text}\n[Used tools: ${tools}]`;
|
|
111
|
+
}
|
|
112
|
+
return `${m.role === 'user' ? 'User' : 'Assistant'}: ${m.content.substring(0, 500)}`;
|
|
113
|
+
})
|
|
114
|
+
.join('\n');
|
|
115
|
+
// Keep summary request short to minimize cost
|
|
116
|
+
const summaryPrompt = [
|
|
117
|
+
{
|
|
118
|
+
role: 'system',
|
|
119
|
+
content: 'Summarize this conversation excerpt in 2-4 sentences. Focus on: what was discussed, what actions were taken, what was decided, and any important context for continuing the conversation. Be specific about file names, functions, and technical details.',
|
|
120
|
+
},
|
|
121
|
+
{ role: 'user', content: convoText },
|
|
122
|
+
];
|
|
123
|
+
let summaryText = '';
|
|
124
|
+
for await (const event of this.provider.chat(summaryPrompt)) {
|
|
125
|
+
if (event.type === 'text' && event.text) {
|
|
126
|
+
summaryText += event.text;
|
|
127
|
+
}
|
|
128
|
+
}
|
|
129
|
+
if (!summaryText.trim()) {
|
|
130
|
+
return `[${messages.length} earlier messages compacted.]`;
|
|
131
|
+
}
|
|
132
|
+
return `[Conversation summary: ${summaryText.trim()}]`;
|
|
133
|
+
}
|
|
134
|
+
getContextWindow() {
|
|
135
|
+
return this.contextWindow;
|
|
136
|
+
}
|
|
137
|
+
}
|
|
138
|
+
exports.ContextManager = ContextManager;
|
|
139
|
+
//# sourceMappingURL=manager.js.map
|