codemaxxing 0.4.16 → 0.4.17

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -191,10 +191,35 @@ Conversations auto-save to SQLite. Pick up where you left off:
191
191
  - `/session delete` — remove a session
192
192
  - `/resume` — interactive session picker
193
193
 
194
+ ### 🔌 MCP Support (Model Context Protocol)
195
+ Connect to external tools via the industry-standard MCP protocol. Databases, GitHub, Slack, browsers — anything with an MCP server.
196
+ - Compatible with `.cursor/mcp.json` and `opencode.json` configs
197
+ - `/mcp` — show connected servers
198
+ - `/mcp add github npx -y @modelcontextprotocol/server-github` — add a server
199
+ - `/mcp tools` — list all available MCP tools
200
+
201
+ ### 🖥️ Zero-Setup Local LLM
202
+ First time with no LLM? Codemaxxing walks you through it:
203
+ 1. Detects your hardware (CPU, RAM, GPU)
204
+ 2. Recommends coding models that fit your machine
205
+ 3. Installs Ollama automatically
206
+ 4. Downloads the model with a progress bar
207
+ 5. Connects and drops you into coding mode
208
+
209
+ No googling, no config files, no decisions. Just run `codemaxxing`.
210
+
211
+ ### 🦙 Ollama Management
212
+ Full Ollama control from inside codemaxxing:
213
+ - `/ollama` — status, installed models, GPU usage
214
+ - `/ollama pull` — interactive model picker + download
215
+ - `/ollama delete` — pick and remove models
216
+ - `/ollama start` / `/ollama stop` — server management
217
+ - Exit warning when Ollama is using GPU memory
218
+
194
219
  ### 🔄 Multi-Provider
195
- Switch models mid-session without restarting:
196
- - `/model gpt-4o` — switch to a different model
197
- - `/models` — list available models from your provider
220
+ Switch models mid-session with an interactive picker:
221
+ - `/model` — browse and switch models
222
+ - `/model gpt-4o` — switch directly by name
198
223
  - Native Anthropic API support (not just OpenAI-compatible)
199
224
 
200
225
  ### 🎨 14 Themes
@@ -216,11 +241,15 @@ Type `/` for autocomplete suggestions. Arrow keys to navigate, Tab or Enter to s
216
241
  | `/help` | Show all commands |
217
242
  | `/connect` | Retry LLM connection |
218
243
  | `/login` | Interactive auth setup |
244
+ | `/model` | Browse & switch models (picker) |
219
245
  | `/architect` | Toggle architect mode / set model |
220
246
  | `/skills` | Browse, install, manage skills |
221
247
  | `/lint on/off` | Toggle auto-linting |
222
- | `/model <name>` | Switch model mid-session |
223
- | `/models` | List available models |
248
+ | `/mcp` | MCP server status & tools |
249
+ | `/ollama` | Ollama status, models & GPU |
250
+ | `/ollama pull` | Download a model (picker) |
251
+ | `/ollama delete` | Remove a model (picker) |
252
+ | `/ollama start/stop` | Server management |
224
253
  | `/theme` | Switch color theme |
225
254
  | `/map` | Show repository map |
226
255
  | `/sessions` | List past sessions |
@@ -295,14 +324,16 @@ Settings are stored in `~/.codemaxxing/settings.json`:
295
324
 
296
325
  ## Tools
297
326
 
298
- Codemaxxing gives the model these tools:
327
+ Built-in tools:
299
328
 
300
329
  - **read_file** — Read file contents (safe)
301
- - **write_file** — Write/create files (requires approval)
330
+ - **write_file** — Write/create files (requires approval, shows diff)
302
331
  - **list_files** — List directory contents (safe)
303
332
  - **search_files** — Search for patterns across files (safe)
304
333
  - **run_command** — Execute shell commands (requires approval)
305
334
 
335
+ Plus any tools from connected MCP servers (databases, APIs, GitHub, etc.)
336
+
306
337
  ## Project Context
307
338
 
308
339
  Drop a `CODEMAXXING.md` file in your project root to give the model extra context about your codebase, conventions, or instructions. It's automatically included in the system prompt.
@@ -311,8 +342,10 @@ Drop a `CODEMAXXING.md` file in your project root to give the model extra contex
311
342
 
312
343
  - **Runtime:** Node.js + TypeScript
313
344
  - **TUI:** [Ink](https://github.com/vadimdemedes/ink) (React for the terminal)
314
- - **LLM SDK:** [OpenAI SDK](https://github.com/openai/openai-node) (works with any compatible API)
345
+ - **LLM SDKs:** [OpenAI SDK](https://github.com/openai/openai-node) + [Anthropic SDK](https://github.com/anthropics/anthropic-sdk-typescript)
346
+ - **MCP:** [@modelcontextprotocol/sdk](https://github.com/modelcontextprotocol/typescript-sdk)
315
347
  - **Sessions:** [better-sqlite3](https://github.com/WiseLibs/better-sqlite3)
348
+ - **Local LLM:** Ollama integration (auto-install, pull, manage)
316
349
  - **Zero cloud dependencies** — everything runs locally
317
350
 
318
351
  ## Inspired By
package/dist/config.d.ts CHANGED
@@ -41,6 +41,17 @@ export declare function getConfigPath(): string;
41
41
  /**
42
42
  * Auto-detect local LLM servers
43
43
  */
44
+ export type DetectionResult = {
45
+ status: "connected";
46
+ provider: ProviderConfig;
47
+ } | {
48
+ status: "no-models";
49
+ serverName: string;
50
+ baseUrl: string;
51
+ } | {
52
+ status: "no-server";
53
+ };
54
+ export declare function detectLocalProviderDetailed(): Promise<DetectionResult>;
44
55
  export declare function detectLocalProvider(): Promise<ProviderConfig | null>;
45
56
  /**
46
57
  * List available models from a provider endpoint
package/dist/config.js CHANGED
@@ -149,9 +149,49 @@ export function applyOverrides(config, args) {
149
149
  export function getConfigPath() {
150
150
  return CONFIG_FILE;
151
151
  }
152
- /**
153
- * Auto-detect local LLM servers
154
- */
152
+ export async function detectLocalProviderDetailed() {
153
+ const endpoints = [
154
+ { name: "LM Studio", url: "http://localhost:1234/v1" },
155
+ { name: "Ollama", url: "http://localhost:11434/v1" },
156
+ { name: "vLLM", url: "http://localhost:8000/v1" },
157
+ ];
158
+ let serverFound = null;
159
+ for (const endpoint of endpoints) {
160
+ try {
161
+ const controller = new AbortController();
162
+ const timeout = setTimeout(() => controller.abort(), 2000);
163
+ const res = await fetch(`${endpoint.url}/models`, {
164
+ signal: controller.signal,
165
+ });
166
+ clearTimeout(timeout);
167
+ if (res.ok) {
168
+ const data = (await res.json());
169
+ const models = data.data ?? [];
170
+ if (models.length === 0) {
171
+ // Server is up but no models — remember it but keep looking
172
+ if (!serverFound)
173
+ serverFound = endpoint;
174
+ continue;
175
+ }
176
+ return {
177
+ status: "connected",
178
+ provider: {
179
+ baseUrl: endpoint.url,
180
+ apiKey: "not-needed",
181
+ model: models[0].id,
182
+ },
183
+ };
184
+ }
185
+ }
186
+ catch {
187
+ // Server not running, try next
188
+ }
189
+ }
190
+ if (serverFound) {
191
+ return { status: "no-models", serverName: serverFound.name, baseUrl: serverFound.url };
192
+ }
193
+ return { status: "no-server" };
194
+ }
155
195
  export async function detectLocalProvider() {
156
196
  const endpoints = [
157
197
  { name: "LM Studio", url: "http://localhost:1234/v1" },
@@ -169,7 +209,11 @@ export async function detectLocalProvider() {
169
209
  if (res.ok) {
170
210
  const data = (await res.json());
171
211
  const models = data.data ?? [];
172
- const model = models[0]?.id ?? "auto";
212
+ if (models.length === 0) {
213
+ // Server is up but no models available — don't fake a connection
214
+ continue;
215
+ }
216
+ const model = models[0].id;
173
217
  return {
174
218
  baseUrl: endpoint.url,
175
219
  apiKey: "not-needed",
package/dist/index.js CHANGED
@@ -5,7 +5,7 @@ import { render, Box, Text, useInput, useApp, useStdout } from "ink";
5
5
  import { EventEmitter } from "events";
6
6
  import TextInput from "ink-text-input";
7
7
  import { CodingAgent } from "./agent.js";
8
- import { loadConfig, saveConfig, detectLocalProvider, parseCLIArgs, applyOverrides, listModels } from "./config.js";
8
+ import { loadConfig, saveConfig, detectLocalProvider, detectLocalProviderDetailed, parseCLIArgs, applyOverrides, listModels } from "./config.js";
9
9
  import { listSessions, getSession, loadMessages, deleteSession } from "./utils/sessions.js";
10
10
  import { execSync } from "child_process";
11
11
  import { isGitRepo, getBranch, getStatus, getDiff, undoLastCommit } from "./utils/git.js";
@@ -220,15 +220,21 @@ function App() {
220
220
  if (provider.model === "auto" || (provider.baseUrl === "http://localhost:1234/v1" && !cliArgs.baseUrl)) {
221
221
  info.push("Detecting local LLM server...");
222
222
  setConnectionInfo([...info]);
223
- const detected = await detectLocalProvider();
224
- if (detected) {
223
+ const detection = await detectLocalProviderDetailed();
224
+ if (detection.status === "connected") {
225
225
  // Keep CLI model override if specified
226
226
  if (cliArgs.model)
227
- detected.model = cliArgs.model;
228
- provider = detected;
227
+ detection.provider.model = cliArgs.model;
228
+ provider = detection.provider;
229
229
  info.push(`✔ Connected to ${provider.baseUrl} → ${provider.model}`);
230
230
  setConnectionInfo([...info]);
231
231
  }
232
+ else if (detection.status === "no-models") {
233
+ info.push(`⚠ ${detection.serverName} is running but has no models. Use /ollama pull to download one.`);
234
+ setConnectionInfo([...info]);
235
+ setReady(true);
236
+ return;
237
+ }
232
238
  else {
233
239
  info.push("✗ No local LLM server found.");
234
240
  setConnectionInfo([...info]);
@@ -903,8 +909,9 @@ function App() {
903
909
  }
904
910
  return;
905
911
  }
906
- if (trimmed === "/model" || trimmed === "/models") {
912
+ if (trimmed === "/model") {
907
913
  // Show picker of available models
914
+ addMsg("info", "Fetching available models...");
908
915
  try {
909
916
  const ollamaModels = await listInstalledModelsDetailed();
910
917
  if (ollamaModels.length > 0) {
@@ -913,18 +920,25 @@ function App() {
913
920
  return;
914
921
  }
915
922
  }
916
- catch { }
923
+ catch (err) {
924
+ // Ollama not available or failed, try provider
925
+ }
917
926
  // Fallback: try provider's model list
918
- try {
919
- const providerModels = await listModels(providerRef.current?.baseUrl || "", providerRef.current?.apiKey || "");
920
- if (providerModels.length > 0) {
921
- setModelPicker(providerModels.map((m) => m.id || m));
922
- setModelPickerIndex(0);
923
- return;
927
+ if (providerRef.current?.baseUrl && providerRef.current.baseUrl !== "auto") {
928
+ try {
929
+ const providerModels = await listModels(providerRef.current.baseUrl, providerRef.current.apiKey || "");
930
+ if (providerModels.length > 0) {
931
+ setModelPicker(providerModels);
932
+ setModelPickerIndex(0);
933
+ return;
934
+ }
935
+ }
936
+ catch (err) {
937
+ // Provider fetch failed
924
938
  }
925
939
  }
926
- catch { }
927
- addMsg("info", `Current model: ${modelName}\n Usage: /model <model-name>`);
940
+ // No models found anywhere
941
+ addMsg("error", "No models available. Download one with /ollama pull or configure a provider.");
928
942
  return;
929
943
  }
930
944
  if (trimmed.startsWith("/model ")) {
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "codemaxxing",
3
- "version": "0.4.16",
3
+ "version": "0.4.17",
4
4
  "description": "Open-source terminal coding agent. Connect any LLM. Max your code.",
5
5
  "main": "dist/index.js",
6
6
  "bin": {
package/src/config.ts CHANGED
@@ -193,6 +193,57 @@ export function getConfigPath(): string {
193
193
  /**
194
194
  * Auto-detect local LLM servers
195
195
  */
196
+ export type DetectionResult =
197
+ | { status: "connected"; provider: ProviderConfig }
198
+ | { status: "no-models"; serverName: string; baseUrl: string }
199
+ | { status: "no-server" };
200
+
201
+ export async function detectLocalProviderDetailed(): Promise<DetectionResult> {
202
+ const endpoints = [
203
+ { name: "LM Studio", url: "http://localhost:1234/v1" },
204
+ { name: "Ollama", url: "http://localhost:11434/v1" },
205
+ { name: "vLLM", url: "http://localhost:8000/v1" },
206
+ ];
207
+
208
+ let serverFound: { name: string; url: string } | null = null;
209
+
210
+ for (const endpoint of endpoints) {
211
+ try {
212
+ const controller = new AbortController();
213
+ const timeout = setTimeout(() => controller.abort(), 2000);
214
+ const res = await fetch(`${endpoint.url}/models`, {
215
+ signal: controller.signal,
216
+ });
217
+ clearTimeout(timeout);
218
+
219
+ if (res.ok) {
220
+ const data = (await res.json()) as { data?: Array<{ id: string }> };
221
+ const models = data.data ?? [];
222
+ if (models.length === 0) {
223
+ // Server is up but no models — remember it but keep looking
224
+ if (!serverFound) serverFound = endpoint;
225
+ continue;
226
+ }
227
+ return {
228
+ status: "connected",
229
+ provider: {
230
+ baseUrl: endpoint.url,
231
+ apiKey: "not-needed",
232
+ model: models[0]!.id,
233
+ },
234
+ };
235
+ }
236
+ } catch {
237
+ // Server not running, try next
238
+ }
239
+ }
240
+
241
+ if (serverFound) {
242
+ return { status: "no-models", serverName: serverFound.name, baseUrl: serverFound.url };
243
+ }
244
+ return { status: "no-server" };
245
+ }
246
+
196
247
  export async function detectLocalProvider(): Promise<ProviderConfig | null> {
197
248
  const endpoints = [
198
249
  { name: "LM Studio", url: "http://localhost:1234/v1" },
@@ -212,7 +263,11 @@ export async function detectLocalProvider(): Promise<ProviderConfig | null> {
212
263
  if (res.ok) {
213
264
  const data = (await res.json()) as { data?: Array<{ id: string }> };
214
265
  const models = data.data ?? [];
215
- const model = models[0]?.id ?? "auto";
266
+ if (models.length === 0) {
267
+ // Server is up but no models available — don't fake a connection
268
+ continue;
269
+ }
270
+ const model = models[0]!.id;
216
271
  return {
217
272
  baseUrl: endpoint.url,
218
273
  apiKey: "not-needed",
package/src/index.tsx CHANGED
@@ -5,7 +5,7 @@ import { render, Box, Text, useInput, useApp, useStdout } from "ink";
5
5
  import { EventEmitter } from "events";
6
6
  import TextInput from "ink-text-input";
7
7
  import { CodingAgent } from "./agent.js";
8
- import { loadConfig, saveConfig, detectLocalProvider, parseCLIArgs, applyOverrides, listModels } from "./config.js";
8
+ import { loadConfig, saveConfig, detectLocalProvider, detectLocalProviderDetailed, parseCLIArgs, applyOverrides, listModels } from "./config.js";
9
9
  import { listSessions, getSession, loadMessages, deleteSession } from "./utils/sessions.js";
10
10
  import { execSync } from "child_process";
11
11
  import { isGitRepo, getBranch, getStatus, getDiff, undoLastCommit } from "./utils/git.js";
@@ -264,13 +264,18 @@ function App() {
264
264
  if (provider.model === "auto" || (provider.baseUrl === "http://localhost:1234/v1" && !cliArgs.baseUrl)) {
265
265
  info.push("Detecting local LLM server...");
266
266
  setConnectionInfo([...info]);
267
- const detected = await detectLocalProvider();
268
- if (detected) {
267
+ const detection = await detectLocalProviderDetailed();
268
+ if (detection.status === "connected") {
269
269
  // Keep CLI model override if specified
270
- if (cliArgs.model) detected.model = cliArgs.model;
271
- provider = detected;
270
+ if (cliArgs.model) detection.provider.model = cliArgs.model;
271
+ provider = detection.provider;
272
272
  info.push(`✔ Connected to ${provider.baseUrl} → ${provider.model}`);
273
273
  setConnectionInfo([...info]);
274
+ } else if (detection.status === "no-models") {
275
+ info.push(`⚠ ${detection.serverName} is running but has no models. Use /ollama pull to download one.`);
276
+ setConnectionInfo([...info]);
277
+ setReady(true);
278
+ return;
274
279
  } else {
275
280
  info.push("✗ No local LLM server found.");
276
281
  setConnectionInfo([...info]);
@@ -941,8 +946,9 @@ function App() {
941
946
  }
942
947
  return;
943
948
  }
944
- if (trimmed === "/model" || trimmed === "/models") {
949
+ if (trimmed === "/model") {
945
950
  // Show picker of available models
951
+ addMsg("info", "Fetching available models...");
946
952
  try {
947
953
  const ollamaModels = await listInstalledModelsDetailed();
948
954
  if (ollamaModels.length > 0) {
@@ -950,17 +956,26 @@ function App() {
950
956
  setModelPickerIndex(0);
951
957
  return;
952
958
  }
953
- } catch {}
959
+ } catch (err) {
960
+ // Ollama not available or failed, try provider
961
+ }
962
+
954
963
  // Fallback: try provider's model list
955
- try {
956
- const providerModels = await listModels(providerRef.current?.baseUrl || "", providerRef.current?.apiKey || "");
957
- if (providerModels.length > 0) {
958
- setModelPicker(providerModels.map((m: any) => m.id || m));
959
- setModelPickerIndex(0);
960
- return;
964
+ if (providerRef.current?.baseUrl && providerRef.current.baseUrl !== "auto") {
965
+ try {
966
+ const providerModels = await listModels(providerRef.current.baseUrl, providerRef.current.apiKey || "");
967
+ if (providerModels.length > 0) {
968
+ setModelPicker(providerModels);
969
+ setModelPickerIndex(0);
970
+ return;
971
+ }
972
+ } catch (err) {
973
+ // Provider fetch failed
961
974
  }
962
- } catch {}
963
- addMsg("info", `Current model: ${modelName}\n Usage: /model <model-name>`);
975
+ }
976
+
977
+ // No models found anywhere
978
+ addMsg("error", "No models available. Download one with /ollama pull or configure a provider.");
964
979
  return;
965
980
  }
966
981
  if (trimmed.startsWith("/model ")) {