codebot-ai 1.2.1 → 1.2.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/agent.d.ts CHANGED
@@ -28,6 +28,10 @@ export declare class Agent {
28
28
  after: number;
29
29
  };
30
30
  getMessages(): Message[];
31
+ /** Ensure every assistant message with tool_calls has matching tool response messages.
32
+ * OpenAI returns 400 if any tool_call_id lacks a response. This can happen if
33
+ * a previous LLM call errored out mid-flow. */
34
+ private repairToolCallMessages;
31
35
  private buildSystemPrompt;
32
36
  }
33
37
  //# sourceMappingURL=agent.d.ts.map
package/dist/agent.js CHANGED
@@ -93,6 +93,9 @@ class Agent {
93
93
  yield { type: 'compaction', text: result.summary || 'Context compacted to fit budget.' };
94
94
  }
95
95
  for (let i = 0; i < this.maxIterations; i++) {
96
+ // Validate message integrity: ensure every tool_call has a matching tool response
97
+ // This prevents cascading 400 errors from OpenAI when a previous call failed
98
+ this.repairToolCallMessages();
96
99
  const supportsTools = (0, registry_1.getModelInfo)(this.model).supportsToolCalling;
97
100
  const toolSchemas = supportsTools ? this.tools.getSchemas() : undefined;
98
101
  let fullText = '';
@@ -211,6 +214,39 @@ class Agent {
211
214
  getMessages() {
212
215
  return [...this.messages];
213
216
  }
217
+ /** Ensure every assistant message with tool_calls has matching tool response messages.
218
+ * OpenAI returns 400 if any tool_call_id lacks a response. This can happen if
219
+ * a previous LLM call errored out mid-flow. */
220
+ repairToolCallMessages() {
221
+ const toolResponseIds = new Set();
222
+ for (const msg of this.messages) {
223
+ if (msg.role === 'tool' && msg.tool_call_id) {
224
+ toolResponseIds.add(msg.tool_call_id);
225
+ }
226
+ }
227
+ for (let i = 0; i < this.messages.length; i++) {
228
+ const msg = this.messages[i];
229
+ if (msg.role === 'assistant' && msg.tool_calls?.length) {
230
+ for (const tc of msg.tool_calls) {
231
+ if (!toolResponseIds.has(tc.id)) {
232
+ // Missing tool response — inject one right after the assistant message
233
+ const repairMsg = {
234
+ role: 'tool',
235
+ content: 'Error: tool call was not executed (interrupted).',
236
+ tool_call_id: tc.id,
237
+ };
238
+ // Find the right position: after the assistant message and any existing tool responses
239
+ let insertAt = i + 1;
240
+ while (insertAt < this.messages.length && this.messages[insertAt].role === 'tool') {
241
+ insertAt++;
242
+ }
243
+ this.messages.splice(insertAt, 0, repairMsg);
244
+ toolResponseIds.add(tc.id);
245
+ }
246
+ }
247
+ }
248
+ }
249
+ }
214
250
  buildSystemPrompt(supportsTools) {
215
251
  let repoMap = '';
216
252
  try {
package/dist/cli.js CHANGED
@@ -44,7 +44,7 @@ const setup_1 = require("./setup");
44
44
  const banner_1 = require("./banner");
45
45
  const tools_1 = require("./tools");
46
46
  const scheduler_1 = require("./scheduler");
47
- const VERSION = '1.2.0';
47
+ const VERSION = '1.2.3';
48
48
  // Session-wide token tracking
49
49
  let sessionTokens = { input: 0, output: 0, total: 0 };
50
50
  const C = {
@@ -221,8 +221,13 @@ class OpenAIProvider {
221
221
  }
222
222
  formatMessage(msg) {
223
223
  const formatted = { role: msg.role, content: msg.content };
224
- if (msg.tool_calls)
224
+ if (msg.tool_calls) {
225
225
  formatted.tool_calls = msg.tool_calls;
226
+ // OpenAI (especially GPT-4.1) requires content: null when tool_calls are present
227
+ // and there's no actual text content. Empty string "" causes 400 errors.
228
+ if (!msg.content)
229
+ formatted.content = null;
230
+ }
226
231
  if (msg.tool_call_id)
227
232
  formatted.tool_call_id = msg.tool_call_id;
228
233
  if (msg.name)
package/dist/setup.d.ts CHANGED
@@ -12,6 +12,6 @@ export declare function loadConfig(): SavedConfig;
12
12
  export declare function saveConfig(config: SavedConfig): void;
13
13
  /** Check if this is the first run (no config, no env keys) */
14
14
  export declare function isFirstRun(): boolean;
15
- /** Interactive setup wizard */
15
+ /** Interactive setup wizard — model-first flow */
16
16
  export declare function runSetup(): Promise<SavedConfig>;
17
17
  //# sourceMappingURL=setup.d.ts.map
package/dist/setup.js CHANGED
@@ -60,15 +60,12 @@ function loadConfig() {
60
60
  function saveConfig(config) {
61
61
  fs.mkdirSync(CONFIG_DIR, { recursive: true });
62
62
  const safe = { ...config };
63
- // Persist API key if user entered it during setup (convenience over env vars)
64
- // The key is stored in the user's home directory with default permissions
65
63
  fs.writeFileSync(CONFIG_FILE, JSON.stringify(safe, null, 2) + '\n');
66
64
  }
67
65
  /** Check if this is the first run (no config, no env keys) */
68
66
  function isFirstRun() {
69
67
  if (fs.existsSync(CONFIG_FILE))
70
68
  return false;
71
- // Check if any provider API keys are set
72
69
  const envKeys = [
73
70
  'ANTHROPIC_API_KEY', 'OPENAI_API_KEY', 'GEMINI_API_KEY',
74
71
  'DEEPSEEK_API_KEY', 'GROQ_API_KEY', 'MISTRAL_API_KEY',
@@ -105,24 +102,7 @@ async function detectLocalServers() {
105
102
  }
106
103
  return servers;
107
104
  }
108
- /** Detect which cloud API keys are available */
109
- function detectApiKeys() {
110
- return Object.entries(registry_1.PROVIDER_DEFAULTS).map(([provider, defaults]) => ({
111
- provider,
112
- envVar: defaults.envKey,
113
- set: !!process.env[defaults.envKey],
114
- }));
115
- }
116
- /** Cloud provider display info */
117
- const CLOUD_PROVIDERS = [
118
- { provider: 'openai', name: 'OpenAI', defaultModel: 'gpt-4o', description: 'GPT-4o, GPT-4.1, o3/o4' },
119
- { provider: 'anthropic', name: 'Anthropic', defaultModel: 'claude-sonnet-4-6', description: 'Claude Opus/Sonnet/Haiku' },
120
- { provider: 'gemini', name: 'Google Gemini', defaultModel: 'gemini-2.5-flash', description: 'Gemini 2.5 Pro/Flash' },
121
- { provider: 'deepseek', name: 'DeepSeek', defaultModel: 'deepseek-chat', description: 'DeepSeek Chat/Reasoner' },
122
- { provider: 'groq', name: 'Groq', defaultModel: 'llama-3.3-70b-versatile', description: 'Fast Llama/Mixtral inference' },
123
- { provider: 'mistral', name: 'Mistral', defaultModel: 'mistral-large-latest', description: 'Mistral Large, Codestral' },
124
- { provider: 'xai', name: 'xAI', defaultModel: 'grok-3', description: 'Grok-3' },
125
- ];
105
+ // ── ANSI helpers ─────────────────────────────────────────────────────────────
126
106
  const C = {
127
107
  reset: '\x1b[0m',
128
108
  bold: '\x1b[1m',
@@ -140,131 +120,253 @@ function ask(rl, question) {
140
120
  rl.question(question, answer => resolve(answer.trim()));
141
121
  });
142
122
  }
143
- /** Interactive setup wizard */
123
+ const PROVIDER_DISPLAY = {
124
+ anthropic: 'Anthropic',
125
+ openai: 'OpenAI',
126
+ gemini: 'Google',
127
+ deepseek: 'DeepSeek',
128
+ groq: 'Groq',
129
+ mistral: 'Mistral',
130
+ xai: 'xAI',
131
+ };
132
+ /** Hand-picked cloud models for the setup menu — best 2-3 from each provider */
133
+ const CURATED_CLOUD_MODELS = [
134
+ // Frontier (most capable)
135
+ { id: 'claude-opus-4-6', displayName: 'Claude Opus 4', provider: 'anthropic', category: 'frontier', contextK: '200K' },
136
+ { id: 'gpt-4.1', displayName: 'GPT-4.1', provider: 'openai', category: 'frontier', contextK: '1M' },
137
+ { id: 'gemini-2.5-pro', displayName: 'Gemini 2.5 Pro', provider: 'gemini', category: 'frontier', contextK: '1M' },
138
+ { id: 'o3', displayName: 'o3', provider: 'openai', category: 'frontier', contextK: '200K' },
139
+ { id: 'grok-3', displayName: 'Grok-3', provider: 'xai', category: 'frontier', contextK: '131K' },
140
+ // Fast & efficient
141
+ { id: 'claude-sonnet-4-6', displayName: 'Claude Sonnet 4', provider: 'anthropic', category: 'fast', contextK: '200K' },
142
+ { id: 'gpt-4o', displayName: 'GPT-4o', provider: 'openai', category: 'fast', contextK: '128K' },
143
+ { id: 'gemini-2.5-flash', displayName: 'Gemini 2.5 Flash', provider: 'gemini', category: 'fast', contextK: '1M' },
144
+ { id: 'deepseek-chat', displayName: 'DeepSeek Chat', provider: 'deepseek', category: 'fast', contextK: '65K' },
145
+ { id: 'mistral-large-latest', displayName: 'Mistral Large', provider: 'mistral', category: 'fast', contextK: '131K' },
146
+ { id: 'llama-3.3-70b-versatile', displayName: 'Llama 3.3 70B', provider: 'groq', category: 'fast', contextK: '131K' },
147
+ { id: 'claude-haiku-4-5-20251001', displayName: 'Claude Haiku 4.5', provider: 'anthropic', category: 'fast', contextK: '200K' },
148
+ // Reasoning
149
+ { id: 'o1', displayName: 'o1', provider: 'openai', category: 'reasoning', contextK: '200K' },
150
+ { id: 'o4-mini', displayName: 'o4-mini', provider: 'openai', category: 'reasoning', contextK: '200K' },
151
+ { id: 'deepseek-reasoner', displayName: 'DeepSeek Reasoner', provider: 'deepseek', category: 'reasoning', contextK: '65K' },
152
+ ];
153
+ /** Format context window for display: 200000 → "200K", 1048576 → "1M" */
154
+ function formatCtx(tokens) {
155
+ if (tokens >= 1000000)
156
+ return `${Math.round(tokens / 1048576)}M`;
157
+ return `${Math.round(tokens / 1024)}K`;
158
+ }
159
+ /** Build the unified model list: local models first, then curated cloud models */
160
+ function buildModelList(localServers, apiKeyStatus) {
161
+ const entries = [];
162
+ // Local models (cap at 8, prioritize well-known models)
163
+ const localPriority = ['qwen', 'deepseek', 'llama', 'phi', 'mistral', 'codellama'];
164
+ for (const server of localServers) {
165
+ const sorted = [...server.models].sort((a, b) => {
166
+ const ai = localPriority.findIndex(p => a.toLowerCase().includes(p));
167
+ const bi = localPriority.findIndex(p => b.toLowerCase().includes(p));
168
+ return (ai === -1 ? 99 : ai) - (bi === -1 ? 99 : bi);
169
+ });
170
+ for (const model of sorted.slice(0, 8)) {
171
+ const info = (0, registry_1.getModelInfo)(model);
172
+ entries.push({
173
+ id: model,
174
+ displayName: model,
175
+ provider: 'local',
176
+ category: 'local',
177
+ contextK: formatCtx(info.contextWindow),
178
+ baseUrl: server.url,
179
+ needsKey: false,
180
+ serverName: server.name,
181
+ });
182
+ }
183
+ }
184
+ // Cloud models from curated list
185
+ for (const model of CURATED_CLOUD_MODELS) {
186
+ const defaults = registry_1.PROVIDER_DEFAULTS[model.provider];
187
+ entries.push({
188
+ ...model,
189
+ baseUrl: defaults?.baseUrl || '',
190
+ needsKey: !apiKeyStatus.get(model.provider),
191
+ });
192
+ }
193
+ return entries;
194
+ }
195
+ function renderCategoryHeader(category) {
196
+ const headers = {
197
+ local: 'LOCAL (free, private, runs on your machine)',
198
+ frontier: 'CLOUD \u2014 FRONTIER (most capable)',
199
+ fast: 'CLOUD \u2014 FAST & EFFICIENT',
200
+ reasoning: 'CLOUD \u2014 REASONING',
201
+ };
202
+ const title = headers[category] || category.toUpperCase();
203
+ console.log(`\n ${fmt(title, 'bold')}`);
204
+ console.log(` ${fmt('\u2500'.repeat(48), 'dim')}`);
205
+ }
206
+ function renderModelRow(index, entry) {
207
+ const num = fmt(String(index).padStart(3), 'cyan');
208
+ const name = entry.displayName.padEnd(26);
209
+ const prov = (entry.serverName || PROVIDER_DISPLAY[entry.provider] || entry.provider).padEnd(11);
210
+ const ctx = fmt((entry.contextK + ' ctx').padStart(9), 'dim');
211
+ let keyStatus = '';
212
+ if (entry.provider !== 'local') {
213
+ keyStatus = entry.needsKey
214
+ ? fmt(' needs key', 'yellow')
215
+ : fmt(' \u2713 key set', 'green');
216
+ }
217
+ console.log(` ${num} ${name}${prov}${ctx}${keyStatus}`);
218
+ }
219
+ /** Fuzzy match a typed model name against all known models */
220
+ function fuzzyMatchModel(input, allModels) {
221
+ const lower = input.toLowerCase();
222
+ // Exact match
223
+ if (allModels.includes(input))
224
+ return input;
225
+ // Case-insensitive exact
226
+ const exact = allModels.find(m => m.toLowerCase() === lower);
227
+ if (exact)
228
+ return exact;
229
+ // Prefix match
230
+ const prefix = allModels.find(m => m.toLowerCase().startsWith(lower));
231
+ if (prefix)
232
+ return prefix;
233
+ // Substring match
234
+ const sub = allModels.find(m => m.toLowerCase().includes(lower));
235
+ if (sub)
236
+ return sub;
237
+ return undefined;
238
+ }
239
+ // ── Setup wizard ─────────────────────────────────────────────────────────────
240
+ /** Interactive setup wizard — model-first flow */
144
241
  async function runSetup() {
145
242
  const rl = readline.createInterface({ input: process.stdin, output: process.stdout });
146
- console.log(fmt('\n CodeBot AI Setup', 'bold'));
243
+ console.log(fmt('\n\u26A1 CodeBot AI \u2014 Setup', 'bold'));
147
244
  console.log(fmt(' Let\'s get you configured.\n', 'dim'));
148
- // Step 1: Detect local servers
245
+ // ── Phase A: Detection ──────────────────────────────────────────────────────
149
246
  console.log(fmt('Scanning for local LLM servers...', 'dim'));
150
247
  const localServers = await detectLocalServers();
151
- // Step 2: Detect API keys
152
- const apiKeys = detectApiKeys();
153
- const availableKeys = apiKeys.filter(k => k.set);
154
- // Show what was found
248
+ const apiKeyStatus = new Map();
249
+ for (const [provider, defaults] of Object.entries(registry_1.PROVIDER_DEFAULTS)) {
250
+ apiKeyStatus.set(provider, !!process.env[defaults.envKey]);
251
+ }
252
+ // Show detection results
155
253
  if (localServers.length > 0) {
156
254
  for (const server of localServers) {
157
- console.log(fmt(` ${server.name} detected (${server.models.length} models)`, 'green'));
255
+ console.log(fmt(` \u2713 ${server.name} detected (${server.models.length} models)`, 'green'));
158
256
  }
159
257
  }
160
258
  else {
161
- console.log(fmt(' No local LLM servers detected.', 'dim'));
259
+ console.log(fmt(' No local servers found. Start Ollama for free local models: ollama.com', 'dim'));
162
260
  }
163
- if (availableKeys.length > 0) {
164
- for (const key of availableKeys) {
165
- console.log(fmt(` ✓ ${key.provider} API key found (${key.envVar})`, 'green'));
166
- }
261
+ const setKeys = [...apiKeyStatus.entries()].filter(([, set]) => set);
262
+ for (const [prov] of setKeys) {
263
+ const display = PROVIDER_DISPLAY[prov] || prov;
264
+ console.log(fmt(` \u2713 ${display} API key found`, 'green'));
167
265
  }
168
- // Step 3: Choose provider show ALL options (local + cloud)
169
- console.log(fmt('\nChoose your setup:', 'bold'));
170
- const options = [];
171
- let idx = 1;
172
- // Local options first
173
- for (const server of localServers) {
174
- const defaultModel = server.models[0] || 'qwen2.5-coder:32b';
175
- options.push({
176
- label: `${server.name} (local, free)`,
177
- provider: 'openai',
178
- model: defaultModel,
179
- baseUrl: server.url,
180
- needsKey: false,
181
- });
182
- console.log(` ${fmt(`${idx}`, 'cyan')} ${server.name} — ${defaultModel} ${fmt('(local, free, private)', 'green')}`);
183
- idx++;
266
+ // ── Phase B: Build & render model list ──────────────────────────────────────
267
+ const modelList = buildModelList(localServers, apiKeyStatus);
268
+ console.log(fmt('\nChoose a model:', 'bold'));
269
+ let currentCategory = '';
270
+ modelList.forEach((entry, i) => {
271
+ if (entry.category !== currentCategory) {
272
+ currentCategory = entry.category;
273
+ renderCategoryHeader(currentCategory);
274
+ }
275
+ renderModelRow(i + 1, entry);
276
+ });
277
+ if (modelList.length === 0) {
278
+ console.log(fmt('\n No models available. Install Ollama or set a cloud API key.', 'yellow'));
279
+ rl.close();
280
+ return {};
184
281
  }
185
- // Cloud options ALWAYS show all providers
186
- for (const cloud of CLOUD_PROVIDERS) {
187
- const keyInfo = apiKeys.find(k => k.provider === cloud.provider);
188
- const hasKey = keyInfo?.set || false;
189
- const defaults = registry_1.PROVIDER_DEFAULTS[cloud.provider];
190
- const keyStatus = hasKey ? fmt('✓ key set', 'green') : fmt('enter key during setup', 'yellow');
191
- options.push({
192
- label: cloud.name,
193
- provider: cloud.provider,
194
- model: cloud.defaultModel,
195
- baseUrl: defaults.baseUrl,
196
- needsKey: !hasKey,
197
- envVar: defaults.envKey,
198
- });
199
- console.log(` ${fmt(`${idx}`, 'cyan')} ${cloud.name} — ${cloud.description} ${fmt(`(${keyStatus})`, 'dim')}`);
200
- idx++;
282
+ // ── Phase C: Model selection ────────────────────────────────────────────────
283
+ const allKnownModels = [
284
+ ...Object.keys(registry_1.MODEL_REGISTRY),
285
+ ...localServers.flatMap(s => s.models),
286
+ ];
287
+ const choice = await ask(rl, fmt(`\nSelect [1-${modelList.length}] or type a model name: `, 'cyan'));
288
+ let selectedModel;
289
+ let selectedProvider;
290
+ let selectedBaseUrl;
291
+ let isLocal = false;
292
+ const choiceNum = parseInt(choice, 10);
293
+ if (choiceNum >= 1 && choiceNum <= modelList.length) {
294
+ // User picked by number
295
+ const entry = modelList[choiceNum - 1];
296
+ selectedModel = entry.id;
297
+ selectedProvider = entry.provider === 'local' ? 'openai' : entry.provider;
298
+ selectedBaseUrl = entry.baseUrl;
299
+ isLocal = entry.provider === 'local';
201
300
  }
202
- const choice = await ask(rl, fmt(`\nSelect [1-${options.length}]: `, 'cyan'));
203
- const selected = options[parseInt(choice, 10) - 1] || options[0];
204
- // Step 4: If cloud provider needs API key, prompt for it
205
- let apiKey = '';
206
- if (selected.needsKey && selected.envVar) {
207
- console.log(fmt(`\n ${selected.label} requires an API key.`, 'yellow'));
208
- console.log(fmt(` Get one at: ${getKeyUrl(selected.provider)}`, 'dim'));
209
- apiKey = await ask(rl, fmt(`\n Enter your ${selected.label} API key: `, 'cyan'));
210
- if (!apiKey) {
211
- console.log(fmt(`\n No key entered. You can set it later:`, 'yellow'));
212
- console.log(fmt(` export ${selected.envVar}="your-key-here"`, 'dim'));
301
+ else if (choice.length > 1) {
302
+ // User typed a model name fuzzy match
303
+ const matched = fuzzyMatchModel(choice, allKnownModels);
304
+ selectedModel = matched || choice;
305
+ const detected = (0, registry_1.detectProvider)(selectedModel);
306
+ selectedProvider = detected || 'openai';
307
+ isLocal = !detected;
308
+ if (isLocal) {
309
+ const server = localServers.find(s => s.models.some(m => m.toLowerCase() === selectedModel.toLowerCase() || m.toLowerCase().includes(selectedModel.toLowerCase())));
310
+ selectedBaseUrl = server?.url || 'http://localhost:11434';
311
+ }
312
+ else {
313
+ selectedBaseUrl = registry_1.PROVIDER_DEFAULTS[selectedProvider]?.baseUrl || '';
213
314
  }
214
315
  }
215
- else if (selected.envVar) {
216
- // Use existing env var
217
- apiKey = process.env[selected.envVar] || '';
316
+ else {
317
+ // Empty or single char — default to first entry
318
+ const entry = modelList[0];
319
+ selectedModel = entry.id;
320
+ selectedProvider = entry.provider === 'local' ? 'openai' : entry.provider;
321
+ selectedBaseUrl = entry.baseUrl;
322
+ isLocal = entry.provider === 'local';
218
323
  }
219
- // Step 5: Show available models for chosen provider
220
- const matchedServer = localServers.find(s => s.url === selected.baseUrl);
221
- const providerModels = matchedServer && matchedServer.models.length > 0
222
- ? matchedServer.models
223
- : Object.entries(registry_1.MODEL_REGISTRY)
224
- .filter(([, info]) => info.provider === selected.provider)
225
- .map(([name]) => name);
226
- if (providerModels.length > 1) {
227
- console.log(fmt(`\nAvailable models${matchedServer ? ` on ${matchedServer.name}` : ''}:`, 'bold'));
228
- providerModels.slice(0, 15).forEach((m, i) => {
229
- const marker = m === selected.model ? fmt(' (default)', 'green') : '';
230
- console.log(` ${fmt(`${i + 1}`, 'cyan')} ${m}${marker}`);
231
- });
232
- const modelChoice = await ask(rl, fmt(`\nModel [Enter for ${selected.model}]: `, 'cyan'));
233
- if (modelChoice) {
234
- const modelIdx = parseInt(modelChoice, 10) - 1;
235
- if (providerModels[modelIdx]) {
236
- selected.model = providerModels[modelIdx];
237
- }
238
- else if (modelChoice.length > 2) {
239
- // Treat as model name typed directly
240
- selected.model = modelChoice;
324
+ console.log(fmt(` \u2713 Selected: ${selectedModel}`, 'green'));
325
+ // ── Phase D: API key resolution ─────────────────────────────────────────────
326
+ let apiKey = '';
327
+ if (!isLocal) {
328
+ const defaults = registry_1.PROVIDER_DEFAULTS[selectedProvider];
329
+ const envKey = defaults?.envKey;
330
+ const existingKey = envKey ? process.env[envKey] : undefined;
331
+ if (existingKey) {
332
+ console.log(fmt(` \u2713 Using ${envKey} from environment`, 'green'));
333
+ apiKey = existingKey;
334
+ }
335
+ else if (envKey) {
336
+ const providerName = PROVIDER_DISPLAY[selectedProvider] || selectedProvider;
337
+ const keyUrl = getKeyUrl(selectedProvider);
338
+ console.log(fmt(`\n ${selectedModel} requires a ${providerName} API key.`, 'yellow'));
339
+ console.log(fmt(` Get one at: ${keyUrl}`, 'dim'));
340
+ apiKey = await ask(rl, fmt('\n Paste your API key: ', 'cyan'));
341
+ if (!apiKey) {
342
+ console.log(fmt(`\n No key entered. Set it later:`, 'yellow'));
343
+ console.log(fmt(` export ${envKey}="your-key-here"`, 'dim'));
241
344
  }
242
345
  }
243
346
  }
244
- // Step 6: Auto mode?
347
+ // ── Phase E: Autonomous mode ────────────────────────────────────────────────
245
348
  const autoChoice = await ask(rl, fmt('\nEnable autonomous mode? (skip permission prompts) [y/N]: ', 'cyan'));
246
349
  const autoApprove = autoChoice.toLowerCase().startsWith('y');
247
350
  rl.close();
248
- // Save config
351
+ // ── Phase F: Save config + summary ──────────────────────────────────────────
249
352
  const config = {
250
- model: selected.model,
251
- provider: selected.provider,
252
- baseUrl: selected.baseUrl,
353
+ model: selectedModel,
354
+ provider: selectedProvider,
355
+ baseUrl: selectedBaseUrl,
253
356
  autoApprove,
254
357
  };
255
- // Save API key if user entered one
256
358
  if (apiKey) {
257
359
  config.apiKey = apiKey;
258
360
  }
259
361
  saveConfig(config);
260
- console.log(fmt('\n Config saved to ~/.codebot/config.json', 'green'));
261
- console.log(fmt(` Model: ${config.model}`, 'dim'));
262
- console.log(fmt(` Provider: ${config.provider}`, 'dim'));
362
+ console.log(fmt('\n\u2713 Config saved to ~/.codebot/config.json', 'green'));
363
+ console.log(fmt(` Model: ${config.model}`, 'dim'));
364
+ console.log(fmt(` Provider: ${selectedProvider}${isLocal ? '' : ' (auto-detected)'}`, 'dim'));
263
365
  if (apiKey) {
264
- console.log(fmt(` API Key: ${'*'.repeat(Math.min(apiKey.length, 20))}`, 'dim'));
366
+ console.log(fmt(` API Key: ${'*'.repeat(Math.min(apiKey.length, 20))}`, 'dim'));
265
367
  }
266
368
  if (autoApprove) {
267
- console.log(fmt(` Mode: AUTONOMOUS`, 'yellow'));
369
+ console.log(fmt(` Mode: AUTONOMOUS`, 'yellow'));
268
370
  }
269
371
  console.log(fmt(`\nRun ${fmt('codebot', 'bold')} to start. Run ${fmt('codebot --setup', 'bold')} to reconfigure.\n`, 'dim'));
270
372
  return config;
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "codebot-ai",
3
- "version": "1.2.1",
3
+ "version": "1.2.3",
4
4
  "description": "Local-first AI coding assistant. Zero dependencies. Works with Ollama, LM Studio, vLLM, Claude, GPT, Gemini, and more.",
5
5
  "main": "dist/index.js",
6
6
  "types": "dist/index.d.ts",