codemaxxing 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md ADDED
@@ -0,0 +1,209 @@
1
+ # codemaxxing 💪
2
+
3
+ > your code. your model. no excuses.
4
+
5
+ Open-source terminal coding agent. Connect **any** LLM — local or remote — and start building. Like Claude Code, but you bring your own model.
6
+
7
+ ## Why?
8
+
9
+ Every coding agent locks you into their API. Codemaxxing doesn't. Run it with LM Studio, Ollama, OpenRouter, OpenAI, or any OpenAI-compatible endpoint. Your machine, your model, your rules.
10
+
11
+ ## Quick Install (Recommended)
12
+
13
+ **Linux / macOS:**
14
+ ```bash
15
+ bash -c "$(curl -fsSL https://raw.githubusercontent.com/MarcosV6/codemaxxing/main/install.sh)"
16
+ ```
17
+
18
+ **Windows (PowerShell as Administrator):**
19
+ ```powershell
20
+ curl -fsSL -o $env:TEMP\install-codemaxxing.bat https://raw.githubusercontent.com/MarcosV6/codemaxxing/main/install.bat; & $env:TEMP\install-codemaxxing.bat
21
+ ```
22
+
23
+ **Windows (CMD as Administrator):**
24
+ ```
25
+ curl -fsSL -o %TEMP%\install-codemaxxing.bat https://raw.githubusercontent.com/MarcosV6/codemaxxing/main/install.bat && %TEMP%\install-codemaxxing.bat
26
+ ```
27
+
28
+ > **Note:** Restart your terminal after installation to ensure everything works.
29
+
30
+ ## Manual Installation
31
+
32
+ **Prerequisites:** [Node.js](https://nodejs.org) 20 or later.
33
+
34
+ **NPM:**
35
+ ```bash
36
+ npm install -g codemaxxing
37
+ ```
38
+
39
+ ## Quick Start
40
+
41
+ ### 1. Start Your LLM
42
+
43
+ You need a local LLM server running. The easiest option:
44
+
45
+ 1. Download [LM Studio](https://lmstudio.ai)
46
+ 2. Search for a model (e.g. **Qwen 2.5 Coder 7B Q4_K_M** — good for testing)
47
+ 3. Load the model
48
+ 4. Click **Start Server** (it runs on port 1234 by default)
49
+
50
+ ### 2. Run It
51
+
52
+ ```bash
53
+ codemaxxing
54
+ ```
55
+
56
+ That's it. Codemaxxing auto-detects LM Studio and connects. Start coding.
57
+
58
+ ---
59
+
60
+ ## Advanced Setup
61
+
62
+ **With a remote provider (OpenAI, OpenRouter, etc.):**
63
+
64
+ ```bash
65
+ codemaxxing --base-url https://api.openai.com/v1 --api-key sk-... --model gpt-4o
66
+ ```
67
+
68
+ **With a saved provider profile:**
69
+
70
+ ```bash
71
+ codemaxxing --provider openrouter
72
+ ```
73
+
74
+ **Auto-detected local servers:** LM Studio (`:1234`), Ollama (`:11434`), vLLM (`:8000`)
75
+
76
+ ## Features
77
+
78
+ ### 🔥 Streaming Tokens
79
+ Real-time token display. See the model think, not just the final answer.
80
+
81
+ ### ⚠️ Tool Approval
82
+ Dangerous operations (file writes, shell commands) require your approval. Press `y` to allow, `n` to deny, `a` to always allow for the session.
83
+
84
+ ### 📂 Smart Context (Repo Map)
85
+ Automatically scans your codebase and builds a map of functions, classes, and types. The model knows what exists where without reading every file.
86
+
87
+ ### 🔀 Git Integration
88
+ Opt-in git commands built in:
89
+ - `/commit <message>` — stage all + commit
90
+ - `/push` — push to remote
91
+ - `/diff` — show changes
92
+ - `/undo` — revert last codemaxxing commit
93
+ - `/git on` / `/git off` — toggle auto-commits
94
+
95
+ ### 💾 Session Persistence
96
+ Conversations auto-save to SQLite. Pick up where you left off:
97
+ - `/sessions` — list past sessions
98
+ - `/resume` — interactive session picker
99
+
100
+ ### 🔄 Multi-Provider
101
+ Switch models mid-session without restarting:
102
+ - `/model gpt-4o` — switch to a different model
103
+ - `/models` — list available models from your provider
104
+
105
+ ### 📋 Smart Paste
106
+ Paste large code blocks without breaking the UI. Multi-line pastes collapse into `[Pasted text #1 +N lines]` badges (like Claude Code).
107
+
108
+ ### ⌨️ Slash Commands
109
+ Type `/` for autocomplete suggestions. Arrow keys to navigate, Tab or Enter to select.
110
+
111
+ ## Commands
112
+
113
+ | Command | Description |
114
+ |---------|-------------|
115
+ | `/help` | Show all commands |
116
+ | `/model <name>` | Switch model mid-session |
117
+ | `/models` | List available models |
118
+ | `/map` | Show repository map |
119
+ | `/sessions` | List past sessions |
120
+ | `/resume` | Resume a past session |
121
+ | `/reset` | Clear conversation |
122
+ | `/context` | Show message count + tokens |
123
+ | `/diff` | Show git changes |
124
+ | `/commit <msg>` | Stage all + commit |
125
+ | `/push` | Push to remote |
126
+ | `/undo` | Revert last codemaxxing commit |
127
+ | `/git on/off` | Toggle auto-commits |
128
+ | `/quit` | Exit |
129
+
130
+ ## CLI Flags
131
+
132
+ ```
133
+ -m, --model <model> Model name to use
134
+ -p, --provider <name> Provider profile from config
135
+ -k, --api-key <key> API key for the provider
136
+ -u, --base-url <url> Base URL for the provider API
137
+ -h, --help Show help
138
+ ```
139
+
140
+ ## Config
141
+
142
+ Settings are stored in `~/.codemaxxing/settings.json`:
143
+
144
+ ```json
145
+ {
146
+ "provider": {
147
+ "baseUrl": "http://localhost:1234/v1",
148
+ "apiKey": "not-needed",
149
+ "model": "auto"
150
+ },
151
+ "providers": {
152
+ "local": {
153
+ "name": "Local (LM Studio/Ollama)",
154
+ "baseUrl": "http://localhost:1234/v1",
155
+ "apiKey": "not-needed",
156
+ "model": "auto"
157
+ },
158
+ "openrouter": {
159
+ "name": "OpenRouter",
160
+ "baseUrl": "https://openrouter.ai/api/v1",
161
+ "apiKey": "sk-or-...",
162
+ "model": "anthropic/claude-sonnet-4"
163
+ },
164
+ "openai": {
165
+ "name": "OpenAI",
166
+ "baseUrl": "https://api.openai.com/v1",
167
+ "apiKey": "sk-...",
168
+ "model": "gpt-4o"
169
+ }
170
+ },
171
+ "defaults": {
172
+ "autoApprove": false,
173
+ "maxTokens": 8192
174
+ }
175
+ }
176
+ ```
177
+
178
+ ## Tools
179
+
180
+ Codemaxxing gives the model these tools:
181
+
182
+ - **read_file** — Read file contents (safe)
183
+ - **write_file** — Write/create files (requires approval)
184
+ - **list_files** — List directory contents (safe)
185
+ - **search_files** — Search for patterns across files (safe)
186
+ - **run_command** — Execute shell commands (requires approval)
187
+
188
+ ## Project Context
189
+
190
+ Drop a `CODEMAXXING.md` file in your project root to give the model extra context about your codebase, conventions, or instructions. It's automatically included in the system prompt.
191
+
192
+ ## Stack
193
+
194
+ - **Runtime:** Node.js + TypeScript
195
+ - **TUI:** [Ink](https://github.com/vadimdemedes/ink) (React for the terminal)
196
+ - **LLM SDK:** [OpenAI SDK](https://github.com/openai/openai-node) (works with any compatible API)
197
+ - **Sessions:** [better-sqlite3](https://github.com/WiseLibs/better-sqlite3)
198
+ - **Zero cloud dependencies** — everything runs locally
199
+
200
+ ## Inspired By
201
+
202
+ Built by studying the best:
203
+ - [Aider](https://github.com/paul-gauthier/aider) — repo map concept, auto-commit
204
+ - [Claude Code](https://docs.anthropic.com/en/docs/claude-code) — permission system, paste handling
205
+ - [OpenCode](https://github.com/opencode-ai/opencode) — multi-provider, SQLite sessions
206
+
207
+ ## License
208
+
209
+ MIT
@@ -0,0 +1,65 @@
1
+ import type { ProviderConfig } from "./config.js";
2
+ export interface AgentOptions {
3
+ provider: ProviderConfig;
4
+ cwd: string;
5
+ maxTokens: number;
6
+ autoApprove: boolean;
7
+ onToken?: (token: string) => void;
8
+ onToolCall?: (name: string, args: Record<string, unknown>) => void;
9
+ onToolResult?: (name: string, result: string) => void;
10
+ onThinking?: (text: string) => void;
11
+ onToolApproval?: (name: string, args: Record<string, unknown>) => Promise<"yes" | "no" | "always">;
12
+ onGitCommit?: (message: string) => void;
13
+ }
14
+ export declare class CodingAgent {
15
+ private options;
16
+ private client;
17
+ private messages;
18
+ private tools;
19
+ private cwd;
20
+ private maxTokens;
21
+ private autoApprove;
22
+ private model;
23
+ private alwaysApproved;
24
+ private gitEnabled;
25
+ private autoCommitEnabled;
26
+ private repoMap;
27
+ private sessionId;
28
+ constructor(options: AgentOptions);
29
+ /**
30
+ * Initialize the agent — call this after constructor to build async context
31
+ */
32
+ init(): Promise<void>;
33
+ /**
34
+ * Resume an existing session by loading its messages
35
+ */
36
+ resume(sessionId: string): Promise<void>;
37
+ getSessionId(): string;
38
+ /**
39
+ * Get the current repo map
40
+ */
41
+ getRepoMap(): string;
42
+ /**
43
+ * Rebuild the repo map (useful after file changes)
44
+ */
45
+ refreshRepoMap(): Promise<string>;
46
+ /**
47
+ * Stream a response from the model.
48
+ * Assembles tool call chunks, emits tokens in real-time,
49
+ * and loops until the model responds with text (no more tool calls).
50
+ */
51
+ chat(userMessage: string): Promise<string>;
52
+ /**
53
+ * Switch to a different model mid-session
54
+ */
55
+ switchModel(model: string, baseUrl?: string, apiKey?: string): void;
56
+ getModel(): string;
57
+ setAutoCommit(enabled: boolean): void;
58
+ isGitEnabled(): boolean;
59
+ getContextLength(): number;
60
+ /**
61
+ * Estimate token count across all messages (~4 chars per token)
62
+ */
63
+ estimateTokens(): number;
64
+ reset(): void;
65
+ }
package/dist/agent.js ADDED
@@ -0,0 +1,269 @@
1
+ import OpenAI from "openai";
2
+ import { FILE_TOOLS, executeTool } from "./tools/files.js";
3
+ import { buildProjectContext, getSystemPrompt } from "./utils/context.js";
4
+ import { isGitRepo, autoCommit } from "./utils/git.js";
5
+ import { createSession, saveMessage, updateTokenEstimate, loadMessages } from "./utils/sessions.js";
6
+ // Tools that can modify your project — require approval
7
+ const DANGEROUS_TOOLS = new Set(["write_file", "run_command"]);
8
+ export class CodingAgent {
9
+ options;
10
+ client;
11
+ messages = [];
12
+ tools = FILE_TOOLS;
13
+ cwd;
14
+ maxTokens;
15
+ autoApprove;
16
+ model;
17
+ alwaysApproved = new Set();
18
+ gitEnabled;
19
+ autoCommitEnabled = false;
20
+ repoMap = "";
21
+ sessionId = "";
22
+ constructor(options) {
23
+ this.options = options;
24
+ this.client = new OpenAI({
25
+ baseURL: options.provider.baseUrl,
26
+ apiKey: options.provider.apiKey,
27
+ });
28
+ this.cwd = options.cwd;
29
+ this.maxTokens = options.maxTokens;
30
+ this.autoApprove = options.autoApprove;
31
+ this.model = options.provider.model;
32
+ this.gitEnabled = isGitRepo(this.cwd);
33
+ }
34
+ /**
35
+ * Initialize the agent — call this after constructor to build async context
36
+ */
37
+ async init() {
38
+ const context = await buildProjectContext(this.cwd);
39
+ const systemPrompt = await getSystemPrompt(context);
40
+ this.messages = [
41
+ { role: "system", content: systemPrompt },
42
+ ];
43
+ // Create a new session
44
+ this.sessionId = createSession(this.cwd, this.model);
45
+ saveMessage(this.sessionId, this.messages[0]);
46
+ }
47
+ /**
48
+ * Resume an existing session by loading its messages
49
+ */
50
+ async resume(sessionId) {
51
+ const messages = loadMessages(sessionId);
52
+ if (messages.length === 0) {
53
+ throw new Error(`Session ${sessionId} not found or empty`);
54
+ }
55
+ this.messages = messages;
56
+ this.sessionId = sessionId;
57
+ }
58
+ getSessionId() {
59
+ return this.sessionId;
60
+ }
61
+ /**
62
+ * Get the current repo map
63
+ */
64
+ getRepoMap() {
65
+ return this.repoMap;
66
+ }
67
+ /**
68
+ * Rebuild the repo map (useful after file changes)
69
+ */
70
+ async refreshRepoMap() {
71
+ const { buildRepoMap } = await import("./utils/repomap.js");
72
+ this.repoMap = await buildRepoMap(this.cwd);
73
+ return this.repoMap;
74
+ }
75
+ /**
76
+ * Stream a response from the model.
77
+ * Assembles tool call chunks, emits tokens in real-time,
78
+ * and loops until the model responds with text (no more tool calls).
79
+ */
80
+ async chat(userMessage) {
81
+ const userMsg = { role: "user", content: userMessage };
82
+ this.messages.push(userMsg);
83
+ saveMessage(this.sessionId, userMsg);
84
+ let iterations = 0;
85
+ const MAX_ITERATIONS = 20;
86
+ while (iterations < MAX_ITERATIONS) {
87
+ iterations++;
88
+ const stream = await this.client.chat.completions.create({
89
+ model: this.model,
90
+ messages: this.messages,
91
+ tools: this.tools,
92
+ max_tokens: this.maxTokens,
93
+ stream: true,
94
+ });
95
+ // Accumulate the streamed response
96
+ let contentText = "";
97
+ let thinkingText = "";
98
+ let inThinking = false;
99
+ const toolCalls = new Map();
100
+ for await (const chunk of stream) {
101
+ const delta = chunk.choices?.[0]?.delta;
102
+ if (!delta)
103
+ continue;
104
+ // Handle content tokens (the actual response text)
105
+ if (delta.content) {
106
+ const token = delta.content;
107
+ // Detect <think> blocks from reasoning models (Qwen, DeepSeek, etc.)
108
+ if (token.includes("<think>")) {
109
+ inThinking = true;
110
+ thinkingText = "";
111
+ continue;
112
+ }
113
+ if (inThinking) {
114
+ if (token.includes("</think>")) {
115
+ inThinking = false;
116
+ this.options.onThinking?.(thinkingText.trim());
117
+ continue;
118
+ }
119
+ thinkingText += token;
120
+ continue;
121
+ }
122
+ contentText += token;
123
+ this.options.onToken?.(token);
124
+ }
125
+ // Handle tool call chunks — they arrive in pieces
126
+ if (delta.tool_calls) {
127
+ for (const tc of delta.tool_calls) {
128
+ const idx = tc.index;
129
+ if (!toolCalls.has(idx)) {
130
+ toolCalls.set(idx, {
131
+ id: tc.id ?? "",
132
+ name: tc.function?.name ?? "",
133
+ arguments: "",
134
+ });
135
+ }
136
+ const existing = toolCalls.get(idx);
137
+ if (tc.id)
138
+ existing.id = tc.id;
139
+ if (tc.function?.name)
140
+ existing.name = tc.function.name;
141
+ if (tc.function?.arguments)
142
+ existing.arguments += tc.function.arguments;
143
+ }
144
+ }
145
+ }
146
+ // Build the assistant message for history
147
+ const assistantMessage = { role: "assistant", content: contentText || null };
148
+ if (toolCalls.size > 0) {
149
+ assistantMessage.tool_calls = Array.from(toolCalls.values()).map((tc) => ({
150
+ id: tc.id,
151
+ type: "function",
152
+ function: { name: tc.name, arguments: tc.arguments },
153
+ }));
154
+ }
155
+ this.messages.push(assistantMessage);
156
+ saveMessage(this.sessionId, assistantMessage);
157
+ // If no tool calls, we're done — return the text
158
+ if (toolCalls.size === 0) {
159
+ updateTokenEstimate(this.sessionId, this.estimateTokens());
160
+ return contentText || "(empty response)";
161
+ }
162
+ // Process tool calls
163
+ for (const toolCall of toolCalls.values()) {
164
+ let args = {};
165
+ try {
166
+ args = JSON.parse(toolCall.arguments);
167
+ }
168
+ catch {
169
+ args = {};
170
+ }
171
+ this.options.onToolCall?.(toolCall.name, args);
172
+ // Check approval for dangerous tools
173
+ if (DANGEROUS_TOOLS.has(toolCall.name) && !this.autoApprove && !this.alwaysApproved.has(toolCall.name)) {
174
+ if (this.options.onToolApproval) {
175
+ const decision = await this.options.onToolApproval(toolCall.name, args);
176
+ if (decision === "no") {
177
+ const denied = `Tool call "${toolCall.name}" was denied by the user.`;
178
+ this.options.onToolResult?.(toolCall.name, denied);
179
+ const deniedMsg = {
180
+ role: "tool",
181
+ tool_call_id: toolCall.id,
182
+ content: denied,
183
+ };
184
+ this.messages.push(deniedMsg);
185
+ saveMessage(this.sessionId, deniedMsg);
186
+ continue;
187
+ }
188
+ if (decision === "always") {
189
+ this.alwaysApproved.add(toolCall.name);
190
+ }
191
+ }
192
+ }
193
+ const result = await executeTool(toolCall.name, args, this.cwd);
194
+ this.options.onToolResult?.(toolCall.name, result);
195
+ // Auto-commit after successful write_file (only if enabled)
196
+ if (this.gitEnabled && this.autoCommitEnabled && toolCall.name === "write_file" && result.startsWith("✅")) {
197
+ const path = String(args.path ?? "unknown");
198
+ const committed = autoCommit(this.cwd, path, "write");
199
+ if (committed) {
200
+ this.options.onGitCommit?.(`write ${path}`);
201
+ }
202
+ }
203
+ const toolMsg = {
204
+ role: "tool",
205
+ tool_call_id: toolCall.id,
206
+ content: result,
207
+ };
208
+ this.messages.push(toolMsg);
209
+ saveMessage(this.sessionId, toolMsg);
210
+ }
211
+ // Reset content for next iteration (tool results → model responds again)
212
+ // The onToken callback will stream the next response too
213
+ }
214
+ return "Max iterations reached. The agent may be stuck in a loop.";
215
+ }
216
+ /**
217
+ * Switch to a different model mid-session
218
+ */
219
+ switchModel(model, baseUrl, apiKey) {
220
+ this.model = model;
221
+ if (baseUrl || apiKey) {
222
+ this.client = new OpenAI({
223
+ baseURL: baseUrl ?? this.options.provider.baseUrl,
224
+ apiKey: apiKey ?? this.options.provider.apiKey,
225
+ });
226
+ }
227
+ }
228
+ getModel() {
229
+ return this.model;
230
+ }
231
+ setAutoCommit(enabled) {
232
+ this.autoCommitEnabled = enabled;
233
+ }
234
+ isGitEnabled() {
235
+ return this.gitEnabled;
236
+ }
237
+ getContextLength() {
238
+ return this.messages.length;
239
+ }
240
+ /**
241
+ * Estimate token count across all messages (~4 chars per token)
242
+ */
243
+ estimateTokens() {
244
+ let chars = 0;
245
+ for (const msg of this.messages) {
246
+ if (typeof msg.content === "string") {
247
+ chars += msg.content.length;
248
+ }
249
+ else if (Array.isArray(msg.content)) {
250
+ for (const part of msg.content) {
251
+ if ("text" in part)
252
+ chars += part.text.length;
253
+ }
254
+ }
255
+ // Count tool call arguments too
256
+ if ("tool_calls" in msg && Array.isArray(msg.tool_calls)) {
257
+ for (const tc of msg.tool_calls) {
258
+ chars += (tc.function?.arguments?.length ?? 0);
259
+ chars += (tc.function?.name?.length ?? 0);
260
+ }
261
+ }
262
+ }
263
+ return Math.ceil(chars / 4);
264
+ }
265
+ reset() {
266
+ const systemMsg = this.messages[0];
267
+ this.messages = [systemMsg];
268
+ }
269
+ }
@@ -0,0 +1,41 @@
1
+ export interface ProviderConfig {
2
+ baseUrl: string;
3
+ apiKey: string;
4
+ model: string;
5
+ }
6
+ export interface ProviderProfile extends ProviderConfig {
7
+ name: string;
8
+ }
9
+ export interface CodemaxxingConfig {
10
+ provider: ProviderConfig;
11
+ providers?: Record<string, ProviderProfile>;
12
+ defaults: {
13
+ autoApprove: boolean;
14
+ contextFiles: number;
15
+ maxTokens: number;
16
+ };
17
+ }
18
+ export interface CLIArgs {
19
+ model?: string;
20
+ provider?: string;
21
+ apiKey?: string;
22
+ baseUrl?: string;
23
+ }
24
+ /**
25
+ * Parse CLI arguments
26
+ */
27
+ export declare function parseCLIArgs(): CLIArgs;
28
+ export declare function loadConfig(): CodemaxxingConfig;
29
+ /**
30
+ * Apply CLI overrides to a provider config
31
+ */
32
+ export declare function applyOverrides(config: CodemaxxingConfig, args: CLIArgs): CodemaxxingConfig;
33
+ export declare function getConfigPath(): string;
34
+ /**
35
+ * Auto-detect local LLM servers
36
+ */
37
+ export declare function detectLocalProvider(): Promise<ProviderConfig | null>;
38
+ /**
39
+ * List available models from a provider endpoint
40
+ */
41
+ export declare function listModels(baseUrl: string, apiKey: string): Promise<string[]>;