codemaxxing 0.1.10 → 0.1.12

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (3) hide show
  1. package/dist/index.js +120 -104
  2. package/package.json +1 -1
  3. package/src/index.tsx +123 -105
package/dist/index.js CHANGED
@@ -29,6 +29,7 @@ function formatTimeAgo(date) {
29
29
  // ── Slash Commands ──
30
30
  const SLASH_COMMANDS = [
31
31
  { cmd: "/help", desc: "show commands" },
32
+ { cmd: "/connect", desc: "retry LLM connection" },
32
33
  { cmd: "/login", desc: "set up authentication" },
33
34
  { cmd: "/map", desc: "show repository map" },
34
35
  { cmd: "/reset", desc: "clear conversation" },
@@ -127,115 +128,124 @@ function App() {
127
128
  pasteEvents.on("paste", handler);
128
129
  return () => { pasteEvents.off("paste", handler); };
129
130
  }, []);
130
- // Initialize agent
131
- useEffect(() => {
132
- (async () => {
133
- const cliArgs = parseCLIArgs();
134
- const rawConfig = loadConfig();
135
- const config = applyOverrides(rawConfig, cliArgs);
136
- let provider = config.provider;
137
- const info = [];
138
- if (provider.model === "auto" || (provider.baseUrl === "http://localhost:1234/v1" && !cliArgs.baseUrl)) {
139
- info.push("Detecting local LLM server...");
131
+ // Connect/reconnect to LLM provider
132
+ const connectToProvider = useCallback(async (isRetry = false) => {
133
+ const cliArgs = parseCLIArgs();
134
+ const rawConfig = loadConfig();
135
+ const config = applyOverrides(rawConfig, cliArgs);
136
+ let provider = config.provider;
137
+ const info = [];
138
+ if (isRetry) {
139
+ info.push("Retrying connection...");
140
+ setConnectionInfo([...info]);
141
+ }
142
+ if (provider.model === "auto" || (provider.baseUrl === "http://localhost:1234/v1" && !cliArgs.baseUrl)) {
143
+ info.push("Detecting local LLM server...");
144
+ setConnectionInfo([...info]);
145
+ const detected = await detectLocalProvider();
146
+ if (detected) {
147
+ // Keep CLI model override if specified
148
+ if (cliArgs.model)
149
+ detected.model = cliArgs.model;
150
+ provider = detected;
151
+ info.push(`✔ Connected to ${provider.baseUrl} → ${provider.model}`);
140
152
  setConnectionInfo([...info]);
141
- const detected = await detectLocalProvider();
142
- if (detected) {
143
- // Keep CLI model override if specified
144
- if (cliArgs.model)
145
- detected.model = cliArgs.model;
146
- provider = detected;
147
- info.push(`✔ Connected to ${provider.baseUrl} → ${provider.model}`);
148
- setConnectionInfo([...info]);
149
- }
150
- else {
151
- info.push("✗ No local LLM server found. Start LM Studio or Ollama.");
152
- info.push(" Use --base-url and --api-key to connect to a remote provider.");
153
- info.push(" Type /login to authenticate with a cloud provider.");
154
- setConnectionInfo([...info]);
155
- setReady(true);
156
- return;
157
- }
158
153
  }
159
154
  else {
160
- info.push(`Provider: ${provider.baseUrl}`);
161
- info.push(`Model: ${provider.model}`);
162
- setConnectionInfo([...info]);
163
- }
164
- const cwd = process.cwd();
165
- // Git info
166
- if (isGitRepo(cwd)) {
167
- const branch = getBranch(cwd);
168
- const status = getStatus(cwd);
169
- info.push(`Git: ${branch} (${status})`);
155
+ info.push("✗ No local LLM server found.");
156
+ info.push(" /connect — retry after starting LM Studio or Ollama");
157
+ info.push(" /login — authenticate with a cloud provider");
170
158
  setConnectionInfo([...info]);
171
- }
172
- const a = new CodingAgent({
173
- provider,
174
- cwd,
175
- maxTokens: config.defaults.maxTokens,
176
- autoApprove: config.defaults.autoApprove,
177
- onToken: (token) => {
178
- // Switch from big spinner to streaming mode
179
- setLoading(false);
180
- setStreaming(true);
181
- // Update the current streaming response in-place
182
- setMessages((prev) => {
183
- const lastIdx = prev.length - 1;
184
- const last = prev[lastIdx];
185
- if (last && last.type === "response" && last._streaming) {
186
- return [
187
- ...prev.slice(0, lastIdx),
188
- { ...last, text: last.text + token },
189
- ];
190
- }
191
- // First token of a new response
192
- return [...prev, { id: msgId++, type: "response", text: token, _streaming: true }];
193
- });
194
- },
195
- onToolCall: (name, args) => {
196
- setLoading(true);
197
- setSpinnerMsg("Executing tools...");
198
- const argStr = Object.entries(args)
199
- .map(([k, v]) => {
200
- const val = String(v);
201
- return val.length > 60 ? val.slice(0, 60) + "..." : val;
202
- })
203
- .join(", ");
204
- addMsg("tool", `${name}(${argStr})`);
205
- },
206
- onToolResult: (_name, result) => {
207
- const numLines = result.split("\n").length;
208
- const size = result.length > 1024 ? `${(result.length / 1024).toFixed(1)}KB` : `${result.length}B`;
209
- addMsg("tool-result", `└ ${numLines} lines (${size})`);
210
- },
211
- onThinking: (text) => {
212
- if (text.length > 0) {
213
- addMsg("info", `💭 Thought for ${text.split(/\s+/).length} words`);
159
+ setReady(true);
160
+ return;
161
+ }
162
+ }
163
+ else {
164
+ info.push(`Provider: ${provider.baseUrl}`);
165
+ info.push(`Model: ${provider.model}`);
166
+ setConnectionInfo([...info]);
167
+ }
168
+ const cwd = process.cwd();
169
+ // Git info
170
+ if (isGitRepo(cwd)) {
171
+ const branch = getBranch(cwd);
172
+ const status = getStatus(cwd);
173
+ info.push(`Git: ${branch} (${status})`);
174
+ setConnectionInfo([...info]);
175
+ }
176
+ const a = new CodingAgent({
177
+ provider,
178
+ cwd,
179
+ maxTokens: config.defaults.maxTokens,
180
+ autoApprove: config.defaults.autoApprove,
181
+ onToken: (token) => {
182
+ // Switch from big spinner to streaming mode
183
+ setLoading(false);
184
+ setStreaming(true);
185
+ // Update the current streaming response in-place
186
+ setMessages((prev) => {
187
+ const lastIdx = prev.length - 1;
188
+ const last = prev[lastIdx];
189
+ if (last && last.type === "response" && last._streaming) {
190
+ return [
191
+ ...prev.slice(0, lastIdx),
192
+ { ...last, text: last.text + token },
193
+ ];
214
194
  }
215
- },
216
- onGitCommit: (message) => {
217
- addMsg("info", `📝 Auto-committed: ${message}`);
218
- },
219
- onContextCompressed: (oldTokens, newTokens) => {
220
- const saved = oldTokens - newTokens;
221
- const savedStr = saved >= 1000 ? `${(saved / 1000).toFixed(1)}k` : String(saved);
222
- addMsg("info", `📦 Context compressed (~${savedStr} tokens freed)`);
223
- },
224
- contextCompressionThreshold: config.defaults.contextCompressionThreshold,
225
- onToolApproval: (name, args, diff) => {
226
- return new Promise((resolve) => {
227
- setApproval({ tool: name, args, diff, resolve });
228
- setLoading(false);
229
- });
230
- },
231
- });
232
- // Initialize async context (repo map)
233
- await a.init();
234
- setAgent(a);
235
- setModelName(provider.model);
236
- providerRef.current = { baseUrl: provider.baseUrl, apiKey: provider.apiKey };
237
- setReady(true);
238
- })();
195
+ // First token of a new response
196
+ return [...prev, { id: msgId++, type: "response", text: token, _streaming: true }];
197
+ });
198
+ },
199
+ onToolCall: (name, args) => {
200
+ setLoading(true);
201
+ setSpinnerMsg("Executing tools...");
202
+ const argStr = Object.entries(args)
203
+ .map(([k, v]) => {
204
+ const val = String(v);
205
+ return val.length > 60 ? val.slice(0, 60) + "..." : val;
206
+ })
207
+ .join(", ");
208
+ addMsg("tool", `${name}(${argStr})`);
209
+ },
210
+ onToolResult: (_name, result) => {
211
+ const numLines = result.split("\n").length;
212
+ const size = result.length > 1024 ? `${(result.length / 1024).toFixed(1)}KB` : `${result.length}B`;
213
+ addMsg("tool-result", `└ ${numLines} lines (${size})`);
214
+ },
215
+ onThinking: (text) => {
216
+ if (text.length > 0) {
217
+ addMsg("info", `💭 Thought for ${text.split(/\s+/).length} words`);
218
+ }
219
+ },
220
+ onGitCommit: (message) => {
221
+ addMsg("info", `📝 Auto-committed: ${message}`);
222
+ },
223
+ onContextCompressed: (oldTokens, newTokens) => {
224
+ const saved = oldTokens - newTokens;
225
+ const savedStr = saved >= 1000 ? `${(saved / 1000).toFixed(1)}k` : String(saved);
226
+ addMsg("info", `📦 Context compressed (~${savedStr} tokens freed)`);
227
+ },
228
+ contextCompressionThreshold: config.defaults.contextCompressionThreshold,
229
+ onToolApproval: (name, args, diff) => {
230
+ return new Promise((resolve) => {
231
+ setApproval({ tool: name, args, diff, resolve });
232
+ setLoading(false);
233
+ });
234
+ },
235
+ });
236
+ // Initialize async context (repo map)
237
+ await a.init();
238
+ setAgent(a);
239
+ setModelName(provider.model);
240
+ providerRef.current = { baseUrl: provider.baseUrl, apiKey: provider.apiKey };
241
+ setReady(true);
242
+ if (isRetry) {
243
+ addMsg("info", `✅ Connected to ${provider.model}`);
244
+ }
245
+ }, []);
246
+ // Initialize agent on mount
247
+ useEffect(() => {
248
+ connectToProvider(false);
239
249
  }, []);
240
250
  function addMsg(type, text) {
241
251
  setMessages((prev) => [...prev, { id: msgId++, type, text }]);
@@ -298,10 +308,16 @@ function App() {
298
308
  setLoginPickerIndex(0);
299
309
  return;
300
310
  }
311
+ if (trimmed === "/connect") {
312
+ addMsg("info", "🔄 Reconnecting...");
313
+ await connectToProvider(true);
314
+ return;
315
+ }
301
316
  if (trimmed === "/help") {
302
317
  addMsg("info", [
303
318
  "Commands:",
304
319
  " /help — show this",
320
+ " /connect — retry LLM connection",
305
321
  " /login — authentication setup (run codemaxxing login in terminal)",
306
322
  " /model — switch model mid-session",
307
323
  " /models — list available models",
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "codemaxxing",
3
- "version": "0.1.10",
3
+ "version": "0.1.12",
4
4
  "description": "Open-source terminal coding agent. Connect any LLM. Max your code.",
5
5
  "main": "dist/index.js",
6
6
  "bin": {
package/src/index.tsx CHANGED
@@ -29,6 +29,7 @@ function formatTimeAgo(date: Date): string {
29
29
  // ── Slash Commands ──
30
30
  const SLASH_COMMANDS = [
31
31
  { cmd: "/help", desc: "show commands" },
32
+ { cmd: "/connect", desc: "retry LLM connection" },
32
33
  { cmd: "/login", desc: "set up authentication" },
33
34
  { cmd: "/map", desc: "show repository map" },
34
35
  { cmd: "/reset", desc: "clear conversation" },
@@ -163,121 +164,132 @@ function App() {
163
164
  return () => { pasteEvents.off("paste", handler); };
164
165
  }, []);
165
166
 
166
- // Initialize agent
167
- useEffect(() => {
168
- (async () => {
169
- const cliArgs = parseCLIArgs();
170
- const rawConfig = loadConfig();
171
- const config = applyOverrides(rawConfig, cliArgs);
172
- let provider = config.provider;
173
- const info: string[] = [];
174
-
175
- if (provider.model === "auto" || (provider.baseUrl === "http://localhost:1234/v1" && !cliArgs.baseUrl)) {
176
- info.push("Detecting local LLM server...");
167
+ // Connect/reconnect to LLM provider
168
+ const connectToProvider = useCallback(async (isRetry = false) => {
169
+ const cliArgs = parseCLIArgs();
170
+ const rawConfig = loadConfig();
171
+ const config = applyOverrides(rawConfig, cliArgs);
172
+ let provider = config.provider;
173
+ const info: string[] = [];
174
+
175
+ if (isRetry) {
176
+ info.push("Retrying connection...");
177
+ setConnectionInfo([...info]);
178
+ }
179
+
180
+ if (provider.model === "auto" || (provider.baseUrl === "http://localhost:1234/v1" && !cliArgs.baseUrl)) {
181
+ info.push("Detecting local LLM server...");
182
+ setConnectionInfo([...info]);
183
+ const detected = await detectLocalProvider();
184
+ if (detected) {
185
+ // Keep CLI model override if specified
186
+ if (cliArgs.model) detected.model = cliArgs.model;
187
+ provider = detected;
188
+ info.push(`✔ Connected to ${provider.baseUrl} → ${provider.model}`);
177
189
  setConnectionInfo([...info]);
178
- const detected = await detectLocalProvider();
179
- if (detected) {
180
- // Keep CLI model override if specified
181
- if (cliArgs.model) detected.model = cliArgs.model;
182
- provider = detected;
183
- info.push(`✔ Connected to ${provider.baseUrl} → ${provider.model}`);
184
- setConnectionInfo([...info]);
185
- } else {
186
- info.push("✗ No local LLM server found. Start LM Studio or Ollama.");
187
- info.push(" Use --base-url and --api-key to connect to a remote provider.");
188
- info.push(" Type /login to authenticate with a cloud provider.");
189
- setConnectionInfo([...info]);
190
- setReady(true);
191
- return;
192
- }
193
190
  } else {
194
- info.push(`Provider: ${provider.baseUrl}`);
195
- info.push(`Model: ${provider.model}`);
191
+ info.push("✗ No local LLM server found.");
192
+ info.push(" /connect — retry after starting LM Studio or Ollama");
193
+ info.push(" /login — authenticate with a cloud provider");
196
194
  setConnectionInfo([...info]);
195
+ setReady(true);
196
+ return;
197
197
  }
198
+ } else {
199
+ info.push(`Provider: ${provider.baseUrl}`);
200
+ info.push(`Model: ${provider.model}`);
201
+ setConnectionInfo([...info]);
202
+ }
198
203
 
199
- const cwd = process.cwd();
204
+ const cwd = process.cwd();
200
205
 
201
- // Git info
202
- if (isGitRepo(cwd)) {
203
- const branch = getBranch(cwd);
204
- const status = getStatus(cwd);
205
- info.push(`Git: ${branch} (${status})`);
206
- setConnectionInfo([...info]);
207
- }
206
+ // Git info
207
+ if (isGitRepo(cwd)) {
208
+ const branch = getBranch(cwd);
209
+ const status = getStatus(cwd);
210
+ info.push(`Git: ${branch} (${status})`);
211
+ setConnectionInfo([...info]);
212
+ }
208
213
 
209
- const a = new CodingAgent({
210
- provider,
211
- cwd,
212
- maxTokens: config.defaults.maxTokens,
213
- autoApprove: config.defaults.autoApprove,
214
- onToken: (token) => {
215
- // Switch from big spinner to streaming mode
216
- setLoading(false);
217
- setStreaming(true);
218
-
219
- // Update the current streaming response in-place
220
- setMessages((prev) => {
221
- const lastIdx = prev.length - 1;
222
- const last = prev[lastIdx];
223
-
224
- if (last && last.type === "response" && (last as any)._streaming) {
225
- return [
226
- ...prev.slice(0, lastIdx),
227
- { ...last, text: last.text + token },
228
- ];
229
- }
230
-
231
- // First token of a new response
232
- return [...prev, { id: msgId++, type: "response" as const, text: token, _streaming: true } as any];
233
- });
234
- },
235
- onToolCall: (name, args) => {
236
- setLoading(true);
237
- setSpinnerMsg("Executing tools...");
238
- const argStr = Object.entries(args)
239
- .map(([k, v]) => {
240
- const val = String(v);
241
- return val.length > 60 ? val.slice(0, 60) + "..." : val;
242
- })
243
- .join(", ");
244
- addMsg("tool", `${name}(${argStr})`);
245
- },
246
- onToolResult: (_name, result) => {
247
- const numLines = result.split("\n").length;
248
- const size = result.length > 1024 ? `${(result.length / 1024).toFixed(1)}KB` : `${result.length}B`;
249
- addMsg("tool-result", `└ ${numLines} lines (${size})`);
250
- },
251
- onThinking: (text) => {
252
- if (text.length > 0) {
253
- addMsg("info", `💭 Thought for ${text.split(/\s+/).length} words`);
214
+ const a = new CodingAgent({
215
+ provider,
216
+ cwd,
217
+ maxTokens: config.defaults.maxTokens,
218
+ autoApprove: config.defaults.autoApprove,
219
+ onToken: (token) => {
220
+ // Switch from big spinner to streaming mode
221
+ setLoading(false);
222
+ setStreaming(true);
223
+
224
+ // Update the current streaming response in-place
225
+ setMessages((prev) => {
226
+ const lastIdx = prev.length - 1;
227
+ const last = prev[lastIdx];
228
+
229
+ if (last && last.type === "response" && (last as any)._streaming) {
230
+ return [
231
+ ...prev.slice(0, lastIdx),
232
+ { ...last, text: last.text + token },
233
+ ];
254
234
  }
255
- },
256
- onGitCommit: (message) => {
257
- addMsg("info", `📝 Auto-committed: ${message}`);
258
- },
259
- onContextCompressed: (oldTokens, newTokens) => {
260
- const saved = oldTokens - newTokens;
261
- const savedStr = saved >= 1000 ? `${(saved / 1000).toFixed(1)}k` : String(saved);
262
- addMsg("info", `📦 Context compressed (~${savedStr} tokens freed)`);
263
- },
264
- contextCompressionThreshold: config.defaults.contextCompressionThreshold,
265
- onToolApproval: (name, args, diff) => {
266
- return new Promise((resolve) => {
267
- setApproval({ tool: name, args, diff, resolve });
268
- setLoading(false);
269
- });
270
- },
271
- });
272
235
 
273
- // Initialize async context (repo map)
274
- await a.init();
236
+ // First token of a new response
237
+ return [...prev, { id: msgId++, type: "response" as const, text: token, _streaming: true } as any];
238
+ });
239
+ },
240
+ onToolCall: (name, args) => {
241
+ setLoading(true);
242
+ setSpinnerMsg("Executing tools...");
243
+ const argStr = Object.entries(args)
244
+ .map(([k, v]) => {
245
+ const val = String(v);
246
+ return val.length > 60 ? val.slice(0, 60) + "..." : val;
247
+ })
248
+ .join(", ");
249
+ addMsg("tool", `${name}(${argStr})`);
250
+ },
251
+ onToolResult: (_name, result) => {
252
+ const numLines = result.split("\n").length;
253
+ const size = result.length > 1024 ? `${(result.length / 1024).toFixed(1)}KB` : `${result.length}B`;
254
+ addMsg("tool-result", `└ ${numLines} lines (${size})`);
255
+ },
256
+ onThinking: (text) => {
257
+ if (text.length > 0) {
258
+ addMsg("info", `💭 Thought for ${text.split(/\s+/).length} words`);
259
+ }
260
+ },
261
+ onGitCommit: (message) => {
262
+ addMsg("info", `📝 Auto-committed: ${message}`);
263
+ },
264
+ onContextCompressed: (oldTokens, newTokens) => {
265
+ const saved = oldTokens - newTokens;
266
+ const savedStr = saved >= 1000 ? `${(saved / 1000).toFixed(1)}k` : String(saved);
267
+ addMsg("info", `📦 Context compressed (~${savedStr} tokens freed)`);
268
+ },
269
+ contextCompressionThreshold: config.defaults.contextCompressionThreshold,
270
+ onToolApproval: (name, args, diff) => {
271
+ return new Promise((resolve) => {
272
+ setApproval({ tool: name, args, diff, resolve });
273
+ setLoading(false);
274
+ });
275
+ },
276
+ });
277
+
278
+ // Initialize async context (repo map)
279
+ await a.init();
280
+
281
+ setAgent(a);
282
+ setModelName(provider.model);
283
+ providerRef.current = { baseUrl: provider.baseUrl, apiKey: provider.apiKey };
284
+ setReady(true);
285
+ if (isRetry) {
286
+ addMsg("info", `✅ Connected to ${provider.model}`);
287
+ }
288
+ }, []);
275
289
 
276
- setAgent(a);
277
- setModelName(provider.model);
278
- providerRef.current = { baseUrl: provider.baseUrl, apiKey: provider.apiKey };
279
- setReady(true);
280
- })();
290
+ // Initialize agent on mount
291
+ useEffect(() => {
292
+ connectToProvider(false);
281
293
  }, []);
282
294
 
283
295
  function addMsg(type: ChatMessage["type"], text: string) {
@@ -347,10 +359,16 @@ function App() {
347
359
  setLoginPickerIndex(0);
348
360
  return;
349
361
  }
362
+ if (trimmed === "/connect") {
363
+ addMsg("info", "🔄 Reconnecting...");
364
+ await connectToProvider(true);
365
+ return;
366
+ }
350
367
  if (trimmed === "/help") {
351
368
  addMsg("info", [
352
369
  "Commands:",
353
370
  " /help — show this",
371
+ " /connect — retry LLM connection",
354
372
  " /login — authentication setup (run codemaxxing login in terminal)",
355
373
  " /model — switch model mid-session",
356
374
  " /models — list available models",