codemaxxing 0.2.1 → 0.3.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,251 @@
1
+ /**
2
+ * MCP (Model Context Protocol) client support
3
+ * Connects to external MCP servers and exposes their tools to the LLM agent.
4
+ */
5
+ import { Client } from "@modelcontextprotocol/sdk/client/index.js";
6
+ import { StdioClientTransport } from "@modelcontextprotocol/sdk/client/stdio.js";
7
+ import { readFileSync, writeFileSync, existsSync, mkdirSync } from "fs";
8
+ import { join } from "path";
9
+ import { homedir } from "os";
10
+ // ── Config paths ──
11
+ const GLOBAL_CONFIG_DIR = join(homedir(), ".codemaxxing");
12
+ const GLOBAL_CONFIG_PATH = join(GLOBAL_CONFIG_DIR, "mcp.json");
13
+ function getProjectConfigPaths(cwd) {
14
+ return [
15
+ join(cwd, ".codemaxxing", "mcp.json"),
16
+ join(cwd, ".cursor", "mcp.json"),
17
+ join(cwd, "opencode.json"),
18
+ ];
19
+ }
20
+ // ── Config loading ──
21
+ function loadConfigFile(path) {
22
+ try {
23
+ if (!existsSync(path))
24
+ return null;
25
+ const raw = readFileSync(path, "utf-8");
26
+ const parsed = JSON.parse(raw);
27
+ if (parsed.mcpServers && typeof parsed.mcpServers === "object") {
28
+ return parsed;
29
+ }
30
+ return null;
31
+ }
32
+ catch {
33
+ return null;
34
+ }
35
+ }
36
+ export function loadMCPConfig(cwd) {
37
+ const merged = { mcpServers: {} };
38
+ // Load global config first (lower priority)
39
+ const globalConfig = loadConfigFile(GLOBAL_CONFIG_PATH);
40
+ if (globalConfig) {
41
+ Object.assign(merged.mcpServers, globalConfig.mcpServers);
42
+ }
43
+ // Load project configs (higher priority — later overwrites earlier)
44
+ for (const configPath of getProjectConfigPaths(cwd)) {
45
+ const config = loadConfigFile(configPath);
46
+ if (config) {
47
+ Object.assign(merged.mcpServers, config.mcpServers);
48
+ }
49
+ }
50
+ return merged;
51
+ }
52
+ // ── Connection management ──
53
+ const connectedServers = [];
54
+ export async function connectToServers(config, onStatus) {
55
+ const entries = Object.entries(config.mcpServers);
56
+ if (entries.length === 0)
57
+ return [];
58
+ for (const [name, serverConfig] of entries) {
59
+ try {
60
+ onStatus?.(name, "connecting");
61
+ const transport = new StdioClientTransport({
62
+ command: serverConfig.command,
63
+ args: serverConfig.args ?? [],
64
+ env: { ...process.env, ...(serverConfig.env ?? {}) },
65
+ });
66
+ const client = new Client({
67
+ name: "codemaxxing",
68
+ version: "0.3.0",
69
+ });
70
+ await client.connect(transport);
71
+ // Fetch available tools
72
+ const toolsResult = await client.listTools();
73
+ const tools = (toolsResult.tools ?? []).map((t) => ({
74
+ name: t.name,
75
+ description: t.description,
76
+ inputSchema: (t.inputSchema ?? { type: "object", properties: {} }),
77
+ }));
78
+ const server = { name, client, transport, tools };
79
+ connectedServers.push(server);
80
+ onStatus?.(name, `connected (${tools.length} tools)`);
81
+ }
82
+ catch (err) {
83
+ onStatus?.(name, `failed: ${err.message}`);
84
+ }
85
+ }
86
+ return connectedServers;
87
+ }
88
+ export async function disconnectAll() {
89
+ for (const server of connectedServers) {
90
+ try {
91
+ await server.client.close();
92
+ }
93
+ catch {
94
+ // Ignore cleanup errors
95
+ }
96
+ }
97
+ connectedServers.length = 0;
98
+ }
99
+ export function getConnectedServers() {
100
+ return connectedServers;
101
+ }
102
+ // ── Tool format conversion ──
103
+ export function getAllMCPTools(servers) {
104
+ const tools = [];
105
+ for (const server of servers) {
106
+ for (const tool of server.tools) {
107
+ tools.push({
108
+ type: "function",
109
+ function: {
110
+ name: `mcp_${server.name}_${tool.name}`,
111
+ description: `[MCP: ${server.name}] ${tool.description ?? tool.name}`,
112
+ parameters: tool.inputSchema,
113
+ },
114
+ });
115
+ }
116
+ }
117
+ return tools;
118
+ }
119
+ /**
120
+ * Parse an MCP tool call name to extract server name and tool name.
121
+ * Format: mcp_<serverName>_<toolName>
122
+ * Server names can contain hyphens but not underscores (by convention).
123
+ */
124
+ export function parseMCPToolName(fullName) {
125
+ if (!fullName.startsWith("mcp_"))
126
+ return null;
127
+ const rest = fullName.slice(4); // Remove "mcp_"
128
+ // Find the server by matching known connected server names
129
+ for (const server of connectedServers) {
130
+ const prefix = server.name + "_";
131
+ if (rest.startsWith(prefix)) {
132
+ return { serverName: server.name, toolName: rest.slice(prefix.length) };
133
+ }
134
+ }
135
+ // Fallback: split on first underscore
136
+ const idx = rest.indexOf("_");
137
+ if (idx === -1)
138
+ return null;
139
+ return { serverName: rest.slice(0, idx), toolName: rest.slice(idx + 1) };
140
+ }
141
+ // ── Tool execution ──
142
+ export async function callMCPTool(serverName, toolName, args) {
143
+ const server = connectedServers.find((s) => s.name === serverName);
144
+ if (!server) {
145
+ return `Error: MCP server "${serverName}" not found or not connected.`;
146
+ }
147
+ try {
148
+ const result = await server.client.callTool({ name: toolName, arguments: args });
149
+ // MCP tool results have a content array
150
+ const content = result.content;
151
+ if (Array.isArray(content)) {
152
+ return content
153
+ .map((c) => {
154
+ if (c.type === "text")
155
+ return c.text;
156
+ if (c.type === "image")
157
+ return `[image: ${c.mimeType}]`;
158
+ return JSON.stringify(c);
159
+ })
160
+ .join("\n");
161
+ }
162
+ return typeof content === "string" ? content : JSON.stringify(content);
163
+ }
164
+ catch (err) {
165
+ return `Error calling MCP tool "${toolName}" on server "${serverName}": ${err.message}`;
166
+ }
167
+ }
168
+ // ── Server management ──
169
+ export function addServer(name, config) {
170
+ try {
171
+ if (!existsSync(GLOBAL_CONFIG_DIR)) {
172
+ mkdirSync(GLOBAL_CONFIG_DIR, { recursive: true });
173
+ }
174
+ let existing = { mcpServers: {} };
175
+ if (existsSync(GLOBAL_CONFIG_PATH)) {
176
+ try {
177
+ existing = JSON.parse(readFileSync(GLOBAL_CONFIG_PATH, "utf-8"));
178
+ if (!existing.mcpServers)
179
+ existing.mcpServers = {};
180
+ }
181
+ catch {
182
+ existing = { mcpServers: {} };
183
+ }
184
+ }
185
+ existing.mcpServers[name] = config;
186
+ writeFileSync(GLOBAL_CONFIG_PATH, JSON.stringify(existing, null, 2) + "\n", "utf-8");
187
+ return { ok: true, message: `Added MCP server "${name}" to global config.` };
188
+ }
189
+ catch (err) {
190
+ return { ok: false, message: `Failed to add server: ${err.message}` };
191
+ }
192
+ }
193
+ export function removeServer(name) {
194
+ try {
195
+ if (!existsSync(GLOBAL_CONFIG_PATH)) {
196
+ return { ok: false, message: `No global MCP config found.` };
197
+ }
198
+ const existing = JSON.parse(readFileSync(GLOBAL_CONFIG_PATH, "utf-8"));
199
+ if (!existing.mcpServers || !existing.mcpServers[name]) {
200
+ return { ok: false, message: `Server "${name}" not found in global config.` };
201
+ }
202
+ delete existing.mcpServers[name];
203
+ writeFileSync(GLOBAL_CONFIG_PATH, JSON.stringify(existing, null, 2) + "\n", "utf-8");
204
+ return { ok: true, message: `Removed MCP server "${name}" from global config.` };
205
+ }
206
+ catch (err) {
207
+ return { ok: false, message: `Failed to remove server: ${err.message}` };
208
+ }
209
+ }
210
+ export function listServers(cwd) {
211
+ const result = [];
212
+ // Gather from global config
213
+ const globalConfig = loadConfigFile(GLOBAL_CONFIG_PATH);
214
+ if (globalConfig) {
215
+ for (const [name, cfg] of Object.entries(globalConfig.mcpServers)) {
216
+ const connected = connectedServers.find((s) => s.name === name);
217
+ result.push({
218
+ name,
219
+ source: "global",
220
+ command: `${cfg.command} ${(cfg.args ?? []).join(" ")}`.trim(),
221
+ connected: !!connected,
222
+ toolCount: connected?.tools.length ?? 0,
223
+ });
224
+ }
225
+ }
226
+ // Gather from project configs
227
+ for (const configPath of getProjectConfigPaths(cwd)) {
228
+ const config = loadConfigFile(configPath);
229
+ if (config) {
230
+ const source = configPath.includes(".cursor") ? "cursor" : configPath.includes("opencode") ? "opencode" : "project";
231
+ for (const [name, cfg] of Object.entries(config.mcpServers)) {
232
+ // Skip if already listed from global (project overrides)
233
+ const existing = result.find((r) => r.name === name);
234
+ if (existing) {
235
+ existing.source = source;
236
+ existing.command = `${cfg.command} ${(cfg.args ?? []).join(" ")}`.trim();
237
+ continue;
238
+ }
239
+ const connected = connectedServers.find((s) => s.name === name);
240
+ result.push({
241
+ name,
242
+ source,
243
+ command: `${cfg.command} ${(cfg.args ?? []).join(" ")}`.trim(),
244
+ connected: !!connected,
245
+ toolCount: connected?.tools.length ?? 0,
246
+ });
247
+ }
248
+ }
249
+ }
250
+ return result;
251
+ }
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "codemaxxing",
3
- "version": "0.2.1",
3
+ "version": "0.3.1",
4
4
  "description": "Open-source terminal coding agent. Connect any LLM. Max your code.",
5
5
  "main": "dist/index.js",
6
6
  "bin": {
@@ -27,6 +27,7 @@
27
27
  "license": "MIT",
28
28
  "dependencies": {
29
29
  "@anthropic-ai/sdk": "^0.78.0",
30
+ "@modelcontextprotocol/sdk": "^1.27.1",
30
31
  "@types/react": "^19.2.14",
31
32
  "better-sqlite3": "^12.6.2",
32
33
  "chalk": "^5.3.0",
package/src/agent.ts CHANGED
@@ -6,10 +6,12 @@ import type {
6
6
  ChatCompletionChunk,
7
7
  } from "openai/resources/chat/completions";
8
8
  import { FILE_TOOLS, executeTool, generateDiff, getExistingContent } from "./tools/files.js";
9
- import { buildProjectContext, getSystemPrompt } from "./utils/context.js";
9
+ import { detectLinter, runLinter } from "./utils/lint.js";
10
+ import { buildProjectContext, getSystemPrompt, loadProjectRules } from "./utils/context.js";
10
11
  import { isGitRepo, autoCommit } from "./utils/git.js";
11
12
  import { buildSkillPrompts, getActiveSkillCount } from "./utils/skills.js";
12
13
  import { createSession, saveMessage, updateTokenEstimate, updateSessionCost, loadMessages } from "./utils/sessions.js";
14
+ import { loadMCPConfig, connectToServers, disconnectAll, getAllMCPTools, parseMCPToolName, callMCPTool, getConnectedServers, type ConnectedServer } from "./utils/mcp.js";
13
15
  import type { ProviderConfig } from "./config.js";
14
16
 
15
17
  // Tools that can modify your project — require approval
@@ -71,6 +73,9 @@ export interface AgentOptions {
71
73
  onToolApproval?: (name: string, args: Record<string, unknown>, diff?: string) => Promise<"yes" | "no" | "always">;
72
74
  onGitCommit?: (message: string) => void;
73
75
  onContextCompressed?: (oldTokens: number, newTokens: number) => void;
76
+ onArchitectPlan?: (plan: string) => void;
77
+ onLintResult?: (file: string, errors: string) => void;
78
+ onMCPStatus?: (server: string, status: string) => void;
74
79
  contextCompressionThreshold?: number;
75
80
  }
76
81
 
@@ -101,6 +106,11 @@ export class CodingAgent {
101
106
  private systemPrompt: string = "";
102
107
  private compressionThreshold: number;
103
108
  private sessionDisabledSkills: Set<string> = new Set();
109
+ private projectRulesSource: string | null = null;
110
+ private architectModel: string | null = null;
111
+ private autoLintEnabled: boolean = true;
112
+ private detectedLinter: { command: string; name: string } | null = null;
113
+ private mcpServers: ConnectedServer[] = [];
104
114
 
105
115
  constructor(private options: AgentOptions) {
106
116
  this.providerType = options.provider.type || "openai";
@@ -131,7 +141,22 @@ export class CodingAgent {
131
141
  async init(): Promise<void> {
132
142
  const context = await buildProjectContext(this.cwd);
133
143
  const skillPrompts = buildSkillPrompts(this.cwd, this.sessionDisabledSkills);
134
- this.systemPrompt = await getSystemPrompt(context, skillPrompts);
144
+ const rules = loadProjectRules(this.cwd);
145
+ if (rules) this.projectRulesSource = rules.source;
146
+ this.systemPrompt = await getSystemPrompt(context, skillPrompts, rules?.content ?? "");
147
+
148
+ // Detect project linter
149
+ this.detectedLinter = detectLinter(this.cwd);
150
+
151
+ // Connect to MCP servers
152
+ const mcpConfig = loadMCPConfig(this.cwd);
153
+ if (Object.keys(mcpConfig.mcpServers).length > 0) {
154
+ this.mcpServers = await connectToServers(mcpConfig, this.options.onMCPStatus);
155
+ if (this.mcpServers.length > 0) {
156
+ const mcpTools = getAllMCPTools(this.mcpServers);
157
+ this.tools = [...FILE_TOOLS, ...mcpTools];
158
+ }
159
+ }
135
160
 
136
161
  this.messages = [
137
162
  { role: "system", content: this.systemPrompt },
@@ -174,6 +199,16 @@ export class CodingAgent {
174
199
  return this.repoMap;
175
200
  }
176
201
 
202
+ /**
203
+ * Send a message, routing through architect model if enabled
204
+ */
205
+ async send(userMessage: string): Promise<string> {
206
+ if (this.architectModel) {
207
+ return this.architectChat(userMessage);
208
+ }
209
+ return this.chat(userMessage);
210
+ }
211
+
177
212
  /**
178
213
  * Stream a response from the model.
179
214
  * Assembles tool call chunks, emits tokens in real-time,
@@ -335,7 +370,14 @@ export class CodingAgent {
335
370
  }
336
371
  }
337
372
 
338
- const result = await executeTool(toolCall.name, args, this.cwd);
373
+ // Route to MCP or built-in tool
374
+ const mcpParsed = parseMCPToolName(toolCall.name);
375
+ let result: string;
376
+ if (mcpParsed) {
377
+ result = await callMCPTool(mcpParsed.serverName, mcpParsed.toolName, args);
378
+ } else {
379
+ result = await executeTool(toolCall.name, args, this.cwd);
380
+ }
339
381
  this.options.onToolResult?.(toolCall.name, result);
340
382
 
341
383
  // Auto-commit after successful write_file (only if enabled)
@@ -347,6 +389,23 @@ export class CodingAgent {
347
389
  }
348
390
  }
349
391
 
392
+ // Auto-lint after successful write_file
393
+ if (this.autoLintEnabled && this.detectedLinter && toolCall.name === "write_file" && result.startsWith("✅")) {
394
+ const filePath = String(args.path ?? "");
395
+ const lintErrors = runLinter(this.detectedLinter, filePath, this.cwd);
396
+ if (lintErrors) {
397
+ this.options.onLintResult?.(filePath, lintErrors);
398
+ const lintMsg: ChatCompletionMessageParam = {
399
+ role: "tool",
400
+ tool_call_id: toolCall.id,
401
+ content: result + `\n\nLint errors detected in ${filePath}:\n${lintErrors}\nPlease fix these issues.`,
402
+ };
403
+ this.messages.push(lintMsg);
404
+ saveMessage(this.sessionId, lintMsg);
405
+ continue; // skip the normal tool message push
406
+ }
407
+ }
408
+
350
409
  const toolMsg: ChatCompletionMessageParam = {
351
410
  role: "tool",
352
411
  tool_call_id: toolCall.id,
@@ -530,7 +589,14 @@ export class CodingAgent {
530
589
  }
531
590
  }
532
591
 
533
- const result = await executeTool(toolCall.name, args, this.cwd);
592
+ // Route to MCP or built-in tool
593
+ const mcpParsed = parseMCPToolName(toolCall.name);
594
+ let result: string;
595
+ if (mcpParsed) {
596
+ result = await callMCPTool(mcpParsed.serverName, mcpParsed.toolName, args);
597
+ } else {
598
+ result = await executeTool(toolCall.name, args, this.cwd);
599
+ }
534
600
  this.options.onToolResult?.(toolCall.name, result);
535
601
 
536
602
  // Auto-commit after successful write_file
@@ -542,6 +608,23 @@ export class CodingAgent {
542
608
  }
543
609
  }
544
610
 
611
+ // Auto-lint after successful write_file
612
+ if (this.autoLintEnabled && this.detectedLinter && toolCall.name === "write_file" && result.startsWith("✅")) {
613
+ const filePath = String(args.path ?? "");
614
+ const lintErrors = runLinter(this.detectedLinter, filePath, this.cwd);
615
+ if (lintErrors) {
616
+ this.options.onLintResult?.(filePath, lintErrors);
617
+ const lintMsg: ChatCompletionMessageParam = {
618
+ role: "tool",
619
+ tool_call_id: toolCall.id,
620
+ content: result + `\n\nLint errors detected in ${filePath}:\n${lintErrors}\nPlease fix these issues.`,
621
+ };
622
+ this.messages.push(lintMsg);
623
+ saveMessage(this.sessionId, lintMsg);
624
+ continue;
625
+ }
626
+ }
627
+
545
628
  const toolMsg: ChatCompletionMessageParam = {
546
629
  role: "tool",
547
630
  tool_call_id: toolCall.id,
@@ -712,6 +795,98 @@ export class CodingAgent {
712
795
  return this.cwd;
713
796
  }
714
797
 
798
+ getProjectRulesSource(): string | null {
799
+ return this.projectRulesSource;
800
+ }
801
+
802
+ setArchitectModel(model: string | null): void {
803
+ this.architectModel = model;
804
+ }
805
+
806
+ getArchitectModel(): string | null {
807
+ return this.architectModel;
808
+ }
809
+
810
+ setAutoLint(enabled: boolean): void {
811
+ this.autoLintEnabled = enabled;
812
+ }
813
+
814
+ isAutoLintEnabled(): boolean {
815
+ return this.autoLintEnabled;
816
+ }
817
+
818
+ getDetectedLinter(): { command: string; name: string } | null {
819
+ return this.detectedLinter;
820
+ }
821
+
822
+ setDetectedLinter(linter: { command: string; name: string } | null): void {
823
+ this.detectedLinter = linter;
824
+ }
825
+
826
+ /**
827
+ * Run the architect model to generate a plan, then feed to editor model
828
+ */
829
+ private async architectChat(userMessage: string): Promise<string> {
830
+ const architectSystemPrompt = "You are a senior software architect. Analyze the request and create a detailed implementation plan. List exactly which files to modify, what changes to make, and in what order. Do NOT write code — just plan.";
831
+
832
+ let plan = "";
833
+
834
+ if (this.providerType === "anthropic" && this.anthropicClient) {
835
+ const response = await this.anthropicClient.messages.create({
836
+ model: this.architectModel!,
837
+ max_tokens: this.maxTokens,
838
+ system: architectSystemPrompt,
839
+ messages: [{ role: "user", content: userMessage }],
840
+ });
841
+ plan = response.content
842
+ .filter((b): b is Anthropic.TextBlock => b.type === "text")
843
+ .map((b) => b.text)
844
+ .join("");
845
+ } else {
846
+ const response = await this.client.chat.completions.create({
847
+ model: this.architectModel!,
848
+ max_tokens: this.maxTokens,
849
+ messages: [
850
+ { role: "system", content: architectSystemPrompt },
851
+ { role: "user", content: userMessage },
852
+ ],
853
+ });
854
+ plan = response.choices[0]?.message?.content ?? "(no plan generated)";
855
+ }
856
+
857
+ this.options.onArchitectPlan?.(plan);
858
+
859
+ // Feed plan + original request to the editor model
860
+ const editorPrompt = `## Architect Plan\n${plan}\n\n## Original Request\n${userMessage}\n\nExecute the plan above. Follow it step by step.`;
861
+ return this.chat(editorPrompt);
862
+ }
863
+
864
+ getMCPServerCount(): number {
865
+ return this.mcpServers.length;
866
+ }
867
+
868
+ getMCPServers(): ConnectedServer[] {
869
+ return this.mcpServers;
870
+ }
871
+
872
+ async disconnectMCP(): Promise<void> {
873
+ await disconnectAll();
874
+ this.mcpServers = [];
875
+ this.tools = FILE_TOOLS;
876
+ }
877
+
878
+ async reconnectMCP(): Promise<void> {
879
+ await this.disconnectMCP();
880
+ const mcpConfig = loadMCPConfig(this.cwd);
881
+ if (Object.keys(mcpConfig.mcpServers).length > 0) {
882
+ this.mcpServers = await connectToServers(mcpConfig, this.options.onMCPStatus);
883
+ if (this.mcpServers.length > 0) {
884
+ const mcpTools = getAllMCPTools(this.mcpServers);
885
+ this.tools = [...FILE_TOOLS, ...mcpTools];
886
+ }
887
+ }
888
+ }
889
+
715
890
  reset(): void {
716
891
  const systemMsg = this.messages[0];
717
892
  this.messages = [systemMsg];
package/src/cli.ts CHANGED
@@ -2,7 +2,7 @@
2
2
 
3
3
  /**
4
4
  * Codemaxxing CLI entry point
5
- * Routes subcommands (login, auth) to auth-cli, everything else to the TUI
5
+ * Routes subcommands (login, auth, exec) to handlers, everything else to the TUI
6
6
  */
7
7
 
8
8
  import { spawn } from "node:child_process";
@@ -27,6 +27,10 @@ if (subcmd === "login" || subcmd === "auth") {
27
27
  });
28
28
 
29
29
  child.on("exit", (code) => process.exit(code ?? 0));
30
+ } else if (subcmd === "exec") {
31
+ // Headless/CI mode — no TUI
32
+ const { runExec } = await import("./exec.js");
33
+ await runExec(process.argv.slice(3));
30
34
  } else {
31
35
  // TUI mode — import directly (not spawn) to preserve raw stdin
32
36
  await import("./index.js");
package/src/config.ts CHANGED
@@ -22,6 +22,8 @@ export interface CodemaxxingConfig {
22
22
  contextFiles: number;
23
23
  maxTokens: number;
24
24
  contextCompressionThreshold?: number;
25
+ architectModel?: string;
26
+ autoLint?: boolean;
25
27
  };
26
28
  }
27
29
 
@@ -85,6 +87,7 @@ codemaxxing — your code. your model. no excuses.
85
87
 
86
88
  Usage:
87
89
  codemaxxing [options]
90
+ codemaxxing exec "prompt" [exec-options]
88
91
 
89
92
  Options:
90
93
  -m, --model <model> Model name to use
@@ -93,11 +96,19 @@ Options:
93
96
  -u, --base-url <url> Base URL for the provider API
94
97
  -h, --help Show this help
95
98
 
99
+ Exec options (headless/CI mode):
100
+ --auto-approve Skip tool approval prompts
101
+ --json Output JSON instead of streaming text
102
+ -m, --model <model> Model to use
103
+ -p, --provider <name> Provider profile
104
+
96
105
  Examples:
97
106
  codemaxxing # Auto-detect local LLM
98
107
  codemaxxing -m gpt-4o -u https://api.openai.com/v1 -k sk-...
99
108
  codemaxxing -p openrouter # Use saved provider profile
100
109
  codemaxxing -m qwen3.5-35b # Override model only
110
+ codemaxxing exec "fix the failing tests" # Headless mode
111
+ echo "explain this code" | codemaxxing exec # Pipe input
101
112
 
102
113
  Config: ~/.codemaxxing/settings.json
103
114
  `);