codemaxxing 0.1.9 → 0.1.11

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (3) hide show
  1. package/dist/index.js +143 -120
  2. package/package.json +1 -1
  3. package/src/index.tsx +146 -121
package/dist/index.js CHANGED
@@ -29,6 +29,7 @@ function formatTimeAgo(date) {
29
29
  // ── Slash Commands ──
30
30
  const SLASH_COMMANDS = [
31
31
  { cmd: "/help", desc: "show commands" },
32
+ { cmd: "/connect", desc: "retry LLM connection" },
32
33
  { cmd: "/login", desc: "set up authentication" },
33
34
  { cmd: "/map", desc: "show repository map" },
34
35
  { cmd: "/reset", desc: "clear conversation" },
@@ -127,113 +128,124 @@ function App() {
127
128
  pasteEvents.on("paste", handler);
128
129
  return () => { pasteEvents.off("paste", handler); };
129
130
  }, []);
130
- // Initialize agent
131
- useEffect(() => {
132
- (async () => {
133
- const cliArgs = parseCLIArgs();
134
- const rawConfig = loadConfig();
135
- const config = applyOverrides(rawConfig, cliArgs);
136
- let provider = config.provider;
137
- const info = [];
138
- if (provider.model === "auto" || (provider.baseUrl === "http://localhost:1234/v1" && !cliArgs.baseUrl)) {
139
- info.push("Detecting local LLM server...");
131
+ // Connect/reconnect to LLM provider
132
+ const connectToProvider = useCallback(async (isRetry = false) => {
133
+ const cliArgs = parseCLIArgs();
134
+ const rawConfig = loadConfig();
135
+ const config = applyOverrides(rawConfig, cliArgs);
136
+ let provider = config.provider;
137
+ const info = [];
138
+ if (isRetry) {
139
+ info.push("Retrying connection...");
140
+ setConnectionInfo([...info]);
141
+ }
142
+ if (provider.model === "auto" || (provider.baseUrl === "http://localhost:1234/v1" && !cliArgs.baseUrl)) {
143
+ info.push("Detecting local LLM server...");
144
+ setConnectionInfo([...info]);
145
+ const detected = await detectLocalProvider();
146
+ if (detected) {
147
+ // Keep CLI model override if specified
148
+ if (cliArgs.model)
149
+ detected.model = cliArgs.model;
150
+ provider = detected;
151
+ info.push(`✔ Connected to ${provider.baseUrl} → ${provider.model}`);
140
152
  setConnectionInfo([...info]);
141
- const detected = await detectLocalProvider();
142
- if (detected) {
143
- // Keep CLI model override if specified
144
- if (cliArgs.model)
145
- detected.model = cliArgs.model;
146
- provider = detected;
147
- info.push(`✔ Connected to ${provider.baseUrl} → ${provider.model}`);
148
- setConnectionInfo([...info]);
149
- }
150
- else {
151
- info.push("✗ No local LLM server found. Start LM Studio or Ollama.");
152
- info.push(" Use --base-url and --api-key to connect to a remote provider.");
153
- setConnectionInfo([...info]);
154
- return;
155
- }
156
153
  }
157
154
  else {
158
- info.push(`Provider: ${provider.baseUrl}`);
159
- info.push(`Model: ${provider.model}`);
160
- setConnectionInfo([...info]);
161
- }
162
- const cwd = process.cwd();
163
- // Git info
164
- if (isGitRepo(cwd)) {
165
- const branch = getBranch(cwd);
166
- const status = getStatus(cwd);
167
- info.push(`Git: ${branch} (${status})`);
155
+ info.push("✗ No local LLM server found. Start LM Studio or Ollama.");
156
+ info.push(" Use --base-url and --api-key to connect to a remote provider.");
157
+ info.push(" Type /login to authenticate, or /connect to retry.");
168
158
  setConnectionInfo([...info]);
169
- }
170
- const a = new CodingAgent({
171
- provider,
172
- cwd,
173
- maxTokens: config.defaults.maxTokens,
174
- autoApprove: config.defaults.autoApprove,
175
- onToken: (token) => {
176
- // Switch from big spinner to streaming mode
177
- setLoading(false);
178
- setStreaming(true);
179
- // Update the current streaming response in-place
180
- setMessages((prev) => {
181
- const lastIdx = prev.length - 1;
182
- const last = prev[lastIdx];
183
- if (last && last.type === "response" && last._streaming) {
184
- return [
185
- ...prev.slice(0, lastIdx),
186
- { ...last, text: last.text + token },
187
- ];
188
- }
189
- // First token of a new response
190
- return [...prev, { id: msgId++, type: "response", text: token, _streaming: true }];
191
- });
192
- },
193
- onToolCall: (name, args) => {
194
- setLoading(true);
195
- setSpinnerMsg("Executing tools...");
196
- const argStr = Object.entries(args)
197
- .map(([k, v]) => {
198
- const val = String(v);
199
- return val.length > 60 ? val.slice(0, 60) + "..." : val;
200
- })
201
- .join(", ");
202
- addMsg("tool", `${name}(${argStr})`);
203
- },
204
- onToolResult: (_name, result) => {
205
- const numLines = result.split("\n").length;
206
- const size = result.length > 1024 ? `${(result.length / 1024).toFixed(1)}KB` : `${result.length}B`;
207
- addMsg("tool-result", `└ ${numLines} lines (${size})`);
208
- },
209
- onThinking: (text) => {
210
- if (text.length > 0) {
211
- addMsg("info", `💭 Thought for ${text.split(/\s+/).length} words`);
159
+ setReady(true);
160
+ return;
161
+ }
162
+ }
163
+ else {
164
+ info.push(`Provider: ${provider.baseUrl}`);
165
+ info.push(`Model: ${provider.model}`);
166
+ setConnectionInfo([...info]);
167
+ }
168
+ const cwd = process.cwd();
169
+ // Git info
170
+ if (isGitRepo(cwd)) {
171
+ const branch = getBranch(cwd);
172
+ const status = getStatus(cwd);
173
+ info.push(`Git: ${branch} (${status})`);
174
+ setConnectionInfo([...info]);
175
+ }
176
+ const a = new CodingAgent({
177
+ provider,
178
+ cwd,
179
+ maxTokens: config.defaults.maxTokens,
180
+ autoApprove: config.defaults.autoApprove,
181
+ onToken: (token) => {
182
+ // Switch from big spinner to streaming mode
183
+ setLoading(false);
184
+ setStreaming(true);
185
+ // Update the current streaming response in-place
186
+ setMessages((prev) => {
187
+ const lastIdx = prev.length - 1;
188
+ const last = prev[lastIdx];
189
+ if (last && last.type === "response" && last._streaming) {
190
+ return [
191
+ ...prev.slice(0, lastIdx),
192
+ { ...last, text: last.text + token },
193
+ ];
212
194
  }
213
- },
214
- onGitCommit: (message) => {
215
- addMsg("info", `📝 Auto-committed: ${message}`);
216
- },
217
- onContextCompressed: (oldTokens, newTokens) => {
218
- const saved = oldTokens - newTokens;
219
- const savedStr = saved >= 1000 ? `${(saved / 1000).toFixed(1)}k` : String(saved);
220
- addMsg("info", `📦 Context compressed (~${savedStr} tokens freed)`);
221
- },
222
- contextCompressionThreshold: config.defaults.contextCompressionThreshold,
223
- onToolApproval: (name, args, diff) => {
224
- return new Promise((resolve) => {
225
- setApproval({ tool: name, args, diff, resolve });
226
- setLoading(false);
227
- });
228
- },
229
- });
230
- // Initialize async context (repo map)
231
- await a.init();
232
- setAgent(a);
233
- setModelName(provider.model);
234
- providerRef.current = { baseUrl: provider.baseUrl, apiKey: provider.apiKey };
235
- setReady(true);
236
- })();
195
+ // First token of a new response
196
+ return [...prev, { id: msgId++, type: "response", text: token, _streaming: true }];
197
+ });
198
+ },
199
+ onToolCall: (name, args) => {
200
+ setLoading(true);
201
+ setSpinnerMsg("Executing tools...");
202
+ const argStr = Object.entries(args)
203
+ .map(([k, v]) => {
204
+ const val = String(v);
205
+ return val.length > 60 ? val.slice(0, 60) + "..." : val;
206
+ })
207
+ .join(", ");
208
+ addMsg("tool", `${name}(${argStr})`);
209
+ },
210
+ onToolResult: (_name, result) => {
211
+ const numLines = result.split("\n").length;
212
+ const size = result.length > 1024 ? `${(result.length / 1024).toFixed(1)}KB` : `${result.length}B`;
213
+ addMsg("tool-result", `└ ${numLines} lines (${size})`);
214
+ },
215
+ onThinking: (text) => {
216
+ if (text.length > 0) {
217
+ addMsg("info", `💭 Thought for ${text.split(/\s+/).length} words`);
218
+ }
219
+ },
220
+ onGitCommit: (message) => {
221
+ addMsg("info", `📝 Auto-committed: ${message}`);
222
+ },
223
+ onContextCompressed: (oldTokens, newTokens) => {
224
+ const saved = oldTokens - newTokens;
225
+ const savedStr = saved >= 1000 ? `${(saved / 1000).toFixed(1)}k` : String(saved);
226
+ addMsg("info", `📦 Context compressed (~${savedStr} tokens freed)`);
227
+ },
228
+ contextCompressionThreshold: config.defaults.contextCompressionThreshold,
229
+ onToolApproval: (name, args, diff) => {
230
+ return new Promise((resolve) => {
231
+ setApproval({ tool: name, args, diff, resolve });
232
+ setLoading(false);
233
+ });
234
+ },
235
+ });
236
+ // Initialize async context (repo map)
237
+ await a.init();
238
+ setAgent(a);
239
+ setModelName(provider.model);
240
+ providerRef.current = { baseUrl: provider.baseUrl, apiKey: provider.apiKey };
241
+ setReady(true);
242
+ if (isRetry) {
243
+ addMsg("info", `✅ Connected to ${provider.model}`);
244
+ }
245
+ }, []);
246
+ // Initialize agent on mount
247
+ useEffect(() => {
248
+ connectToProvider(false);
237
249
  }, []);
238
250
  function addMsg(type, text) {
239
251
  setMessages((prev) => [...prev, { id: msgId++, type, text }]);
@@ -284,7 +296,7 @@ function App() {
284
296
  setInput("");
285
297
  setPastedChunks([]);
286
298
  setPasteCount(0);
287
- if (!trimmed || !agent)
299
+ if (!trimmed)
288
300
  return;
289
301
  addMsg("user", trimmed);
290
302
  if (trimmed === "/quit" || trimmed === "/exit") {
@@ -296,10 +308,16 @@ function App() {
296
308
  setLoginPickerIndex(0);
297
309
  return;
298
310
  }
311
+ if (trimmed === "/connect") {
312
+ addMsg("info", "🔄 Reconnecting...");
313
+ await connectToProvider(true);
314
+ return;
315
+ }
299
316
  if (trimmed === "/help") {
300
317
  addMsg("info", [
301
318
  "Commands:",
302
319
  " /help — show this",
320
+ " /connect — retry LLM connection",
303
321
  " /login — authentication setup (run codemaxxing login in terminal)",
304
322
  " /model — switch model mid-session",
305
323
  " /models — list available models",
@@ -319,6 +337,28 @@ function App() {
319
337
  ].join("\n"));
320
338
  return;
321
339
  }
340
+ if (trimmed.startsWith("/theme")) {
341
+ const themeName = trimmed.replace("/theme", "").trim();
342
+ if (!themeName) {
343
+ const themeKeys = listThemes();
344
+ const currentIdx = themeKeys.indexOf(theme.name.toLowerCase());
345
+ setThemePicker(true);
346
+ setThemePickerIndex(currentIdx >= 0 ? currentIdx : 0);
347
+ return;
348
+ }
349
+ if (!THEMES[themeName]) {
350
+ addMsg("error", `Theme "${themeName}" not found. Use /theme to see available themes.`);
351
+ return;
352
+ }
353
+ setTheme(getTheme(themeName));
354
+ addMsg("info", `✅ Switched to theme: ${THEMES[themeName].name}`);
355
+ return;
356
+ }
357
+ // Commands below require an active LLM connection
358
+ if (!agent) {
359
+ addMsg("info", "⚠ No LLM connected. Use /login to authenticate with a provider, or start a local server.");
360
+ return;
361
+ }
322
362
  if (trimmed === "/reset") {
323
363
  agent.reset();
324
364
  addMsg("info", "✅ Conversation reset.");
@@ -351,23 +391,6 @@ function App() {
351
391
  addMsg("info", `✅ Switched to model: ${newModel}`);
352
392
  return;
353
393
  }
354
- if (trimmed.startsWith("/theme")) {
355
- const themeName = trimmed.replace("/theme", "").trim();
356
- if (!themeName) {
357
- const themeKeys = listThemes();
358
- const currentIdx = themeKeys.indexOf(theme.name.toLowerCase());
359
- setThemePicker(true);
360
- setThemePickerIndex(currentIdx >= 0 ? currentIdx : 0);
361
- return;
362
- }
363
- if (!THEMES[themeName]) {
364
- addMsg("error", `Theme "${themeName}" not found. Use /theme to see available themes.`);
365
- return;
366
- }
367
- setTheme(getTheme(themeName));
368
- addMsg("info", `✅ Switched to theme: ${THEMES[themeName].name}`);
369
- return;
370
- }
371
394
  if (trimmed === "/map") {
372
395
  const map = agent.getRepoMap();
373
396
  if (map) {
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "codemaxxing",
3
- "version": "0.1.9",
3
+ "version": "0.1.11",
4
4
  "description": "Open-source terminal coding agent. Connect any LLM. Max your code.",
5
5
  "main": "dist/index.js",
6
6
  "bin": {
package/src/index.tsx CHANGED
@@ -29,6 +29,7 @@ function formatTimeAgo(date: Date): string {
29
29
  // ── Slash Commands ──
30
30
  const SLASH_COMMANDS = [
31
31
  { cmd: "/help", desc: "show commands" },
32
+ { cmd: "/connect", desc: "retry LLM connection" },
32
33
  { cmd: "/login", desc: "set up authentication" },
33
34
  { cmd: "/map", desc: "show repository map" },
34
35
  { cmd: "/reset", desc: "clear conversation" },
@@ -163,119 +164,132 @@ function App() {
163
164
  return () => { pasteEvents.off("paste", handler); };
164
165
  }, []);
165
166
 
166
- // Initialize agent
167
- useEffect(() => {
168
- (async () => {
169
- const cliArgs = parseCLIArgs();
170
- const rawConfig = loadConfig();
171
- const config = applyOverrides(rawConfig, cliArgs);
172
- let provider = config.provider;
173
- const info: string[] = [];
174
-
175
- if (provider.model === "auto" || (provider.baseUrl === "http://localhost:1234/v1" && !cliArgs.baseUrl)) {
176
- info.push("Detecting local LLM server...");
167
+ // Connect/reconnect to LLM provider
168
+ const connectToProvider = useCallback(async (isRetry = false) => {
169
+ const cliArgs = parseCLIArgs();
170
+ const rawConfig = loadConfig();
171
+ const config = applyOverrides(rawConfig, cliArgs);
172
+ let provider = config.provider;
173
+ const info: string[] = [];
174
+
175
+ if (isRetry) {
176
+ info.push("Retrying connection...");
177
+ setConnectionInfo([...info]);
178
+ }
179
+
180
+ if (provider.model === "auto" || (provider.baseUrl === "http://localhost:1234/v1" && !cliArgs.baseUrl)) {
181
+ info.push("Detecting local LLM server...");
182
+ setConnectionInfo([...info]);
183
+ const detected = await detectLocalProvider();
184
+ if (detected) {
185
+ // Keep CLI model override if specified
186
+ if (cliArgs.model) detected.model = cliArgs.model;
187
+ provider = detected;
188
+ info.push(`✔ Connected to ${provider.baseUrl} → ${provider.model}`);
177
189
  setConnectionInfo([...info]);
178
- const detected = await detectLocalProvider();
179
- if (detected) {
180
- // Keep CLI model override if specified
181
- if (cliArgs.model) detected.model = cliArgs.model;
182
- provider = detected;
183
- info.push(`✔ Connected to ${provider.baseUrl} → ${provider.model}`);
184
- setConnectionInfo([...info]);
185
- } else {
186
- info.push("✗ No local LLM server found. Start LM Studio or Ollama.");
187
- info.push(" Use --base-url and --api-key to connect to a remote provider.");
188
- setConnectionInfo([...info]);
189
- return;
190
- }
191
190
  } else {
192
- info.push(`Provider: ${provider.baseUrl}`);
193
- info.push(`Model: ${provider.model}`);
191
+ info.push("✗ No local LLM server found. Start LM Studio or Ollama.");
192
+ info.push(" Use --base-url and --api-key to connect to a remote provider.");
193
+ info.push(" Type /login to authenticate, or /connect to retry.");
194
194
  setConnectionInfo([...info]);
195
+ setReady(true);
196
+ return;
195
197
  }
198
+ } else {
199
+ info.push(`Provider: ${provider.baseUrl}`);
200
+ info.push(`Model: ${provider.model}`);
201
+ setConnectionInfo([...info]);
202
+ }
196
203
 
197
- const cwd = process.cwd();
204
+ const cwd = process.cwd();
198
205
 
199
- // Git info
200
- if (isGitRepo(cwd)) {
201
- const branch = getBranch(cwd);
202
- const status = getStatus(cwd);
203
- info.push(`Git: ${branch} (${status})`);
204
- setConnectionInfo([...info]);
205
- }
206
+ // Git info
207
+ if (isGitRepo(cwd)) {
208
+ const branch = getBranch(cwd);
209
+ const status = getStatus(cwd);
210
+ info.push(`Git: ${branch} (${status})`);
211
+ setConnectionInfo([...info]);
212
+ }
206
213
 
207
- const a = new CodingAgent({
208
- provider,
209
- cwd,
210
- maxTokens: config.defaults.maxTokens,
211
- autoApprove: config.defaults.autoApprove,
212
- onToken: (token) => {
213
- // Switch from big spinner to streaming mode
214
- setLoading(false);
215
- setStreaming(true);
216
-
217
- // Update the current streaming response in-place
218
- setMessages((prev) => {
219
- const lastIdx = prev.length - 1;
220
- const last = prev[lastIdx];
221
-
222
- if (last && last.type === "response" && (last as any)._streaming) {
223
- return [
224
- ...prev.slice(0, lastIdx),
225
- { ...last, text: last.text + token },
226
- ];
227
- }
228
-
229
- // First token of a new response
230
- return [...prev, { id: msgId++, type: "response" as const, text: token, _streaming: true } as any];
231
- });
232
- },
233
- onToolCall: (name, args) => {
234
- setLoading(true);
235
- setSpinnerMsg("Executing tools...");
236
- const argStr = Object.entries(args)
237
- .map(([k, v]) => {
238
- const val = String(v);
239
- return val.length > 60 ? val.slice(0, 60) + "..." : val;
240
- })
241
- .join(", ");
242
- addMsg("tool", `${name}(${argStr})`);
243
- },
244
- onToolResult: (_name, result) => {
245
- const numLines = result.split("\n").length;
246
- const size = result.length > 1024 ? `${(result.length / 1024).toFixed(1)}KB` : `${result.length}B`;
247
- addMsg("tool-result", `└ ${numLines} lines (${size})`);
248
- },
249
- onThinking: (text) => {
250
- if (text.length > 0) {
251
- addMsg("info", `💭 Thought for ${text.split(/\s+/).length} words`);
214
+ const a = new CodingAgent({
215
+ provider,
216
+ cwd,
217
+ maxTokens: config.defaults.maxTokens,
218
+ autoApprove: config.defaults.autoApprove,
219
+ onToken: (token) => {
220
+ // Switch from big spinner to streaming mode
221
+ setLoading(false);
222
+ setStreaming(true);
223
+
224
+ // Update the current streaming response in-place
225
+ setMessages((prev) => {
226
+ const lastIdx = prev.length - 1;
227
+ const last = prev[lastIdx];
228
+
229
+ if (last && last.type === "response" && (last as any)._streaming) {
230
+ return [
231
+ ...prev.slice(0, lastIdx),
232
+ { ...last, text: last.text + token },
233
+ ];
252
234
  }
253
- },
254
- onGitCommit: (message) => {
255
- addMsg("info", `📝 Auto-committed: ${message}`);
256
- },
257
- onContextCompressed: (oldTokens, newTokens) => {
258
- const saved = oldTokens - newTokens;
259
- const savedStr = saved >= 1000 ? `${(saved / 1000).toFixed(1)}k` : String(saved);
260
- addMsg("info", `📦 Context compressed (~${savedStr} tokens freed)`);
261
- },
262
- contextCompressionThreshold: config.defaults.contextCompressionThreshold,
263
- onToolApproval: (name, args, diff) => {
264
- return new Promise((resolve) => {
265
- setApproval({ tool: name, args, diff, resolve });
266
- setLoading(false);
267
- });
268
- },
269
- });
270
235
 
271
- // Initialize async context (repo map)
272
- await a.init();
236
+ // First token of a new response
237
+ return [...prev, { id: msgId++, type: "response" as const, text: token, _streaming: true } as any];
238
+ });
239
+ },
240
+ onToolCall: (name, args) => {
241
+ setLoading(true);
242
+ setSpinnerMsg("Executing tools...");
243
+ const argStr = Object.entries(args)
244
+ .map(([k, v]) => {
245
+ const val = String(v);
246
+ return val.length > 60 ? val.slice(0, 60) + "..." : val;
247
+ })
248
+ .join(", ");
249
+ addMsg("tool", `${name}(${argStr})`);
250
+ },
251
+ onToolResult: (_name, result) => {
252
+ const numLines = result.split("\n").length;
253
+ const size = result.length > 1024 ? `${(result.length / 1024).toFixed(1)}KB` : `${result.length}B`;
254
+ addMsg("tool-result", `└ ${numLines} lines (${size})`);
255
+ },
256
+ onThinking: (text) => {
257
+ if (text.length > 0) {
258
+ addMsg("info", `💭 Thought for ${text.split(/\s+/).length} words`);
259
+ }
260
+ },
261
+ onGitCommit: (message) => {
262
+ addMsg("info", `📝 Auto-committed: ${message}`);
263
+ },
264
+ onContextCompressed: (oldTokens, newTokens) => {
265
+ const saved = oldTokens - newTokens;
266
+ const savedStr = saved >= 1000 ? `${(saved / 1000).toFixed(1)}k` : String(saved);
267
+ addMsg("info", `📦 Context compressed (~${savedStr} tokens freed)`);
268
+ },
269
+ contextCompressionThreshold: config.defaults.contextCompressionThreshold,
270
+ onToolApproval: (name, args, diff) => {
271
+ return new Promise((resolve) => {
272
+ setApproval({ tool: name, args, diff, resolve });
273
+ setLoading(false);
274
+ });
275
+ },
276
+ });
277
+
278
+ // Initialize async context (repo map)
279
+ await a.init();
280
+
281
+ setAgent(a);
282
+ setModelName(provider.model);
283
+ providerRef.current = { baseUrl: provider.baseUrl, apiKey: provider.apiKey };
284
+ setReady(true);
285
+ if (isRetry) {
286
+ addMsg("info", `✅ Connected to ${provider.model}`);
287
+ }
288
+ }, []);
273
289
 
274
- setAgent(a);
275
- setModelName(provider.model);
276
- providerRef.current = { baseUrl: provider.baseUrl, apiKey: provider.apiKey };
277
- setReady(true);
278
- })();
290
+ // Initialize agent on mount
291
+ useEffect(() => {
292
+ connectToProvider(false);
279
293
  }, []);
280
294
 
281
295
  function addMsg(type: ChatMessage["type"], text: string) {
@@ -332,7 +346,7 @@ function App() {
332
346
  setInput("");
333
347
  setPastedChunks([]);
334
348
  setPasteCount(0);
335
- if (!trimmed || !agent) return;
349
+ if (!trimmed) return;
336
350
 
337
351
  addMsg("user", trimmed);
338
352
 
@@ -345,10 +359,16 @@ function App() {
345
359
  setLoginPickerIndex(0);
346
360
  return;
347
361
  }
362
+ if (trimmed === "/connect") {
363
+ addMsg("info", "🔄 Reconnecting...");
364
+ await connectToProvider(true);
365
+ return;
366
+ }
348
367
  if (trimmed === "/help") {
349
368
  addMsg("info", [
350
369
  "Commands:",
351
370
  " /help — show this",
371
+ " /connect — retry LLM connection",
352
372
  " /login — authentication setup (run codemaxxing login in terminal)",
353
373
  " /model — switch model mid-session",
354
374
  " /models — list available models",
@@ -368,6 +388,28 @@ function App() {
368
388
  ].join("\n"));
369
389
  return;
370
390
  }
391
+ if (trimmed.startsWith("/theme")) {
392
+ const themeName = trimmed.replace("/theme", "").trim();
393
+ if (!themeName) {
394
+ const themeKeys = listThemes();
395
+ const currentIdx = themeKeys.indexOf(theme.name.toLowerCase());
396
+ setThemePicker(true);
397
+ setThemePickerIndex(currentIdx >= 0 ? currentIdx : 0);
398
+ return;
399
+ }
400
+ if (!THEMES[themeName]) {
401
+ addMsg("error", `Theme "${themeName}" not found. Use /theme to see available themes.`);
402
+ return;
403
+ }
404
+ setTheme(getTheme(themeName));
405
+ addMsg("info", `✅ Switched to theme: ${THEMES[themeName].name}`);
406
+ return;
407
+ }
408
+ // Commands below require an active LLM connection
409
+ if (!agent) {
410
+ addMsg("info", "⚠ No LLM connected. Use /login to authenticate with a provider, or start a local server.");
411
+ return;
412
+ }
371
413
  if (trimmed === "/reset") {
372
414
  agent.reset();
373
415
  addMsg("info", "✅ Conversation reset.");
@@ -399,23 +441,6 @@ function App() {
399
441
  addMsg("info", `✅ Switched to model: ${newModel}`);
400
442
  return;
401
443
  }
402
- if (trimmed.startsWith("/theme")) {
403
- const themeName = trimmed.replace("/theme", "").trim();
404
- if (!themeName) {
405
- const themeKeys = listThemes();
406
- const currentIdx = themeKeys.indexOf(theme.name.toLowerCase());
407
- setThemePicker(true);
408
- setThemePickerIndex(currentIdx >= 0 ? currentIdx : 0);
409
- return;
410
- }
411
- if (!THEMES[themeName]) {
412
- addMsg("error", `Theme "${themeName}" not found. Use /theme to see available themes.`);
413
- return;
414
- }
415
- setTheme(getTheme(themeName));
416
- addMsg("info", `✅ Switched to theme: ${THEMES[themeName].name}`);
417
- return;
418
- }
419
444
  if (trimmed === "/map") {
420
445
  const map = agent.getRepoMap();
421
446
  if (map) {