codemaxxing 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +209 -0
- package/dist/agent.d.ts +65 -0
- package/dist/agent.js +269 -0
- package/dist/config.d.ts +41 -0
- package/dist/config.js +174 -0
- package/dist/index.d.ts +2 -0
- package/dist/index.js +659 -0
- package/dist/tools/files.d.ts +9 -0
- package/dist/tools/files.js +227 -0
- package/dist/utils/context.d.ts +13 -0
- package/dist/utils/context.js +135 -0
- package/dist/utils/git.d.ts +27 -0
- package/dist/utils/git.js +113 -0
- package/dist/utils/repomap.d.ts +18 -0
- package/dist/utils/repomap.js +195 -0
- package/dist/utils/sessions.d.ts +51 -0
- package/dist/utils/sessions.js +174 -0
- package/dist/utils/treesitter.d.ts +20 -0
- package/dist/utils/treesitter.js +710 -0
- package/package.json +51 -0
- package/src/agent.ts +322 -0
- package/src/config.ts +211 -0
- package/src/index.tsx +858 -0
- package/src/tools/files.ts +247 -0
- package/src/utils/context.ts +146 -0
- package/src/utils/git.ts +117 -0
- package/src/utils/repomap.ts +220 -0
- package/src/utils/sessions.ts +222 -0
- package/tsconfig.json +16 -0
package/package.json
ADDED
|
@@ -0,0 +1,51 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "codemaxxing",
|
|
3
|
+
"version": "0.1.0",
|
|
4
|
+
"description": "Open-source terminal coding agent. Connect any LLM. Max your code.",
|
|
5
|
+
"main": "dist/index.js",
|
|
6
|
+
"bin": {
|
|
7
|
+
"codemaxxing": "./dist/index.js"
|
|
8
|
+
},
|
|
9
|
+
"type": "module",
|
|
10
|
+
"scripts": {
|
|
11
|
+
"build": "tsc",
|
|
12
|
+
"prepare": "tsc",
|
|
13
|
+
"dev": "tsx src/index.tsx",
|
|
14
|
+
"start": "node dist/index.js"
|
|
15
|
+
},
|
|
16
|
+
"keywords": [
|
|
17
|
+
"ai",
|
|
18
|
+
"coding",
|
|
19
|
+
"agent",
|
|
20
|
+
"terminal",
|
|
21
|
+
"llm",
|
|
22
|
+
"local",
|
|
23
|
+
"openai"
|
|
24
|
+
],
|
|
25
|
+
"author": "Marcos Vallejo",
|
|
26
|
+
"license": "MIT",
|
|
27
|
+
"dependencies": {
|
|
28
|
+
"@types/react": "^19.2.14",
|
|
29
|
+
"better-sqlite3": "^12.6.2",
|
|
30
|
+
"chalk": "^5.3.0",
|
|
31
|
+
"commander": "^12.0.0",
|
|
32
|
+
"conf": "^13.0.0",
|
|
33
|
+
"ink": "^6.8.0",
|
|
34
|
+
"ink-spinner": "^5.0.0",
|
|
35
|
+
"ink-text-input": "^6.0.0",
|
|
36
|
+
"openai": "^4.80.0",
|
|
37
|
+
"ora": "^8.0.0",
|
|
38
|
+
"react": "^19.2.4"
|
|
39
|
+
},
|
|
40
|
+
"devDependencies": {
|
|
41
|
+
"@types/better-sqlite3": "^7.6.13",
|
|
42
|
+
"@types/node": "^22.0.0",
|
|
43
|
+
"typescript": "^5.5.0"
|
|
44
|
+
},
|
|
45
|
+
"files": [
|
|
46
|
+
"dist",
|
|
47
|
+
"src",
|
|
48
|
+
"tsconfig.json",
|
|
49
|
+
"README.md"
|
|
50
|
+
]
|
|
51
|
+
}
|
package/src/agent.ts
ADDED
|
@@ -0,0 +1,322 @@
|
|
|
1
|
+
import OpenAI from "openai";
|
|
2
|
+
import type {
|
|
3
|
+
ChatCompletionMessageParam,
|
|
4
|
+
ChatCompletionTool,
|
|
5
|
+
ChatCompletionChunk,
|
|
6
|
+
} from "openai/resources/chat/completions";
|
|
7
|
+
import { FILE_TOOLS, executeTool } from "./tools/files.js";
|
|
8
|
+
import { buildProjectContext, getSystemPrompt } from "./utils/context.js";
|
|
9
|
+
import { isGitRepo, autoCommit } from "./utils/git.js";
|
|
10
|
+
import { createSession, saveMessage, updateTokenEstimate, loadMessages } from "./utils/sessions.js";
|
|
11
|
+
import type { ProviderConfig } from "./config.js";
|
|
12
|
+
|
|
13
|
+
// Tools that can modify your project — require approval
|
|
14
|
+
const DANGEROUS_TOOLS = new Set(["write_file", "run_command"]);
|
|
15
|
+
|
|
16
|
+
export interface AgentOptions {
|
|
17
|
+
provider: ProviderConfig;
|
|
18
|
+
cwd: string;
|
|
19
|
+
maxTokens: number;
|
|
20
|
+
autoApprove: boolean;
|
|
21
|
+
onToken?: (token: string) => void;
|
|
22
|
+
onToolCall?: (name: string, args: Record<string, unknown>) => void;
|
|
23
|
+
onToolResult?: (name: string, result: string) => void;
|
|
24
|
+
onThinking?: (text: string) => void;
|
|
25
|
+
onToolApproval?: (name: string, args: Record<string, unknown>) => Promise<"yes" | "no" | "always">;
|
|
26
|
+
onGitCommit?: (message: string) => void;
|
|
27
|
+
}
|
|
28
|
+
|
|
29
|
+
interface AssembledToolCall {
|
|
30
|
+
id: string;
|
|
31
|
+
name: string;
|
|
32
|
+
arguments: string;
|
|
33
|
+
}
|
|
34
|
+
|
|
35
|
+
export class CodingAgent {
|
|
36
|
+
private client: OpenAI;
|
|
37
|
+
private messages: ChatCompletionMessageParam[] = [];
|
|
38
|
+
private tools: ChatCompletionTool[] = FILE_TOOLS;
|
|
39
|
+
private cwd: string;
|
|
40
|
+
private maxTokens: number;
|
|
41
|
+
private autoApprove: boolean;
|
|
42
|
+
private model: string;
|
|
43
|
+
private alwaysApproved: Set<string> = new Set();
|
|
44
|
+
private gitEnabled: boolean;
|
|
45
|
+
private autoCommitEnabled: boolean = false;
|
|
46
|
+
private repoMap: string = "";
|
|
47
|
+
private sessionId: string = "";
|
|
48
|
+
|
|
49
|
+
constructor(private options: AgentOptions) {
|
|
50
|
+
this.client = new OpenAI({
|
|
51
|
+
baseURL: options.provider.baseUrl,
|
|
52
|
+
apiKey: options.provider.apiKey,
|
|
53
|
+
});
|
|
54
|
+
this.cwd = options.cwd;
|
|
55
|
+
this.maxTokens = options.maxTokens;
|
|
56
|
+
this.autoApprove = options.autoApprove;
|
|
57
|
+
this.model = options.provider.model;
|
|
58
|
+
this.gitEnabled = isGitRepo(this.cwd);
|
|
59
|
+
}
|
|
60
|
+
|
|
61
|
+
/**
|
|
62
|
+
* Initialize the agent — call this after constructor to build async context
|
|
63
|
+
*/
|
|
64
|
+
async init(): Promise<void> {
|
|
65
|
+
const context = await buildProjectContext(this.cwd);
|
|
66
|
+
const systemPrompt = await getSystemPrompt(context);
|
|
67
|
+
|
|
68
|
+
this.messages = [
|
|
69
|
+
{ role: "system", content: systemPrompt },
|
|
70
|
+
];
|
|
71
|
+
|
|
72
|
+
// Create a new session
|
|
73
|
+
this.sessionId = createSession(this.cwd, this.model);
|
|
74
|
+
saveMessage(this.sessionId, this.messages[0]);
|
|
75
|
+
}
|
|
76
|
+
|
|
77
|
+
/**
|
|
78
|
+
* Resume an existing session by loading its messages
|
|
79
|
+
*/
|
|
80
|
+
async resume(sessionId: string): Promise<void> {
|
|
81
|
+
const messages = loadMessages(sessionId);
|
|
82
|
+
if (messages.length === 0) {
|
|
83
|
+
throw new Error(`Session ${sessionId} not found or empty`);
|
|
84
|
+
}
|
|
85
|
+
this.messages = messages;
|
|
86
|
+
this.sessionId = sessionId;
|
|
87
|
+
}
|
|
88
|
+
|
|
89
|
+
getSessionId(): string {
|
|
90
|
+
return this.sessionId;
|
|
91
|
+
}
|
|
92
|
+
|
|
93
|
+
/**
|
|
94
|
+
* Get the current repo map
|
|
95
|
+
*/
|
|
96
|
+
getRepoMap(): string {
|
|
97
|
+
return this.repoMap;
|
|
98
|
+
}
|
|
99
|
+
|
|
100
|
+
/**
|
|
101
|
+
* Rebuild the repo map (useful after file changes)
|
|
102
|
+
*/
|
|
103
|
+
async refreshRepoMap(): Promise<string> {
|
|
104
|
+
const { buildRepoMap } = await import("./utils/repomap.js");
|
|
105
|
+
this.repoMap = await buildRepoMap(this.cwd);
|
|
106
|
+
return this.repoMap;
|
|
107
|
+
}
|
|
108
|
+
|
|
109
|
+
/**
|
|
110
|
+
* Stream a response from the model.
|
|
111
|
+
* Assembles tool call chunks, emits tokens in real-time,
|
|
112
|
+
* and loops until the model responds with text (no more tool calls).
|
|
113
|
+
*/
|
|
114
|
+
async chat(userMessage: string): Promise<string> {
|
|
115
|
+
const userMsg: ChatCompletionMessageParam = { role: "user", content: userMessage };
|
|
116
|
+
this.messages.push(userMsg);
|
|
117
|
+
saveMessage(this.sessionId, userMsg);
|
|
118
|
+
|
|
119
|
+
let iterations = 0;
|
|
120
|
+
const MAX_ITERATIONS = 20;
|
|
121
|
+
|
|
122
|
+
while (iterations < MAX_ITERATIONS) {
|
|
123
|
+
iterations++;
|
|
124
|
+
|
|
125
|
+
const stream = await this.client.chat.completions.create({
|
|
126
|
+
model: this.model,
|
|
127
|
+
messages: this.messages,
|
|
128
|
+
tools: this.tools,
|
|
129
|
+
max_tokens: this.maxTokens,
|
|
130
|
+
stream: true,
|
|
131
|
+
});
|
|
132
|
+
|
|
133
|
+
// Accumulate the streamed response
|
|
134
|
+
let contentText = "";
|
|
135
|
+
let thinkingText = "";
|
|
136
|
+
let inThinking = false;
|
|
137
|
+
const toolCalls: Map<number, AssembledToolCall> = new Map();
|
|
138
|
+
|
|
139
|
+
for await (const chunk of stream) {
|
|
140
|
+
const delta = chunk.choices?.[0]?.delta;
|
|
141
|
+
if (!delta) continue;
|
|
142
|
+
|
|
143
|
+
// Handle content tokens (the actual response text)
|
|
144
|
+
if (delta.content) {
|
|
145
|
+
const token = delta.content;
|
|
146
|
+
|
|
147
|
+
// Detect <think> blocks from reasoning models (Qwen, DeepSeek, etc.)
|
|
148
|
+
if (token.includes("<think>")) {
|
|
149
|
+
inThinking = true;
|
|
150
|
+
thinkingText = "";
|
|
151
|
+
continue;
|
|
152
|
+
}
|
|
153
|
+
if (inThinking) {
|
|
154
|
+
if (token.includes("</think>")) {
|
|
155
|
+
inThinking = false;
|
|
156
|
+
this.options.onThinking?.(thinkingText.trim());
|
|
157
|
+
continue;
|
|
158
|
+
}
|
|
159
|
+
thinkingText += token;
|
|
160
|
+
continue;
|
|
161
|
+
}
|
|
162
|
+
|
|
163
|
+
contentText += token;
|
|
164
|
+
this.options.onToken?.(token);
|
|
165
|
+
}
|
|
166
|
+
|
|
167
|
+
// Handle tool call chunks — they arrive in pieces
|
|
168
|
+
if (delta.tool_calls) {
|
|
169
|
+
for (const tc of delta.tool_calls) {
|
|
170
|
+
const idx = tc.index;
|
|
171
|
+
if (!toolCalls.has(idx)) {
|
|
172
|
+
toolCalls.set(idx, {
|
|
173
|
+
id: tc.id ?? "",
|
|
174
|
+
name: tc.function?.name ?? "",
|
|
175
|
+
arguments: "",
|
|
176
|
+
});
|
|
177
|
+
}
|
|
178
|
+
const existing = toolCalls.get(idx)!;
|
|
179
|
+
if (tc.id) existing.id = tc.id;
|
|
180
|
+
if (tc.function?.name) existing.name = tc.function.name;
|
|
181
|
+
if (tc.function?.arguments) existing.arguments += tc.function.arguments;
|
|
182
|
+
}
|
|
183
|
+
}
|
|
184
|
+
}
|
|
185
|
+
|
|
186
|
+
// Build the assistant message for history
|
|
187
|
+
const assistantMessage: any = { role: "assistant", content: contentText || null };
|
|
188
|
+
if (toolCalls.size > 0) {
|
|
189
|
+
assistantMessage.tool_calls = Array.from(toolCalls.values()).map((tc) => ({
|
|
190
|
+
id: tc.id,
|
|
191
|
+
type: "function" as const,
|
|
192
|
+
function: { name: tc.name, arguments: tc.arguments },
|
|
193
|
+
}));
|
|
194
|
+
}
|
|
195
|
+
this.messages.push(assistantMessage);
|
|
196
|
+
saveMessage(this.sessionId, assistantMessage);
|
|
197
|
+
|
|
198
|
+
// If no tool calls, we're done — return the text
|
|
199
|
+
if (toolCalls.size === 0) {
|
|
200
|
+
updateTokenEstimate(this.sessionId, this.estimateTokens());
|
|
201
|
+
return contentText || "(empty response)";
|
|
202
|
+
}
|
|
203
|
+
|
|
204
|
+
// Process tool calls
|
|
205
|
+
for (const toolCall of toolCalls.values()) {
|
|
206
|
+
let args: Record<string, unknown> = {};
|
|
207
|
+
try {
|
|
208
|
+
args = JSON.parse(toolCall.arguments);
|
|
209
|
+
} catch {
|
|
210
|
+
args = {};
|
|
211
|
+
}
|
|
212
|
+
|
|
213
|
+
this.options.onToolCall?.(toolCall.name, args);
|
|
214
|
+
|
|
215
|
+
// Check approval for dangerous tools
|
|
216
|
+
if (DANGEROUS_TOOLS.has(toolCall.name) && !this.autoApprove && !this.alwaysApproved.has(toolCall.name)) {
|
|
217
|
+
if (this.options.onToolApproval) {
|
|
218
|
+
const decision = await this.options.onToolApproval(toolCall.name, args);
|
|
219
|
+
if (decision === "no") {
|
|
220
|
+
const denied = `Tool call "${toolCall.name}" was denied by the user.`;
|
|
221
|
+
this.options.onToolResult?.(toolCall.name, denied);
|
|
222
|
+
const deniedMsg: ChatCompletionMessageParam = {
|
|
223
|
+
role: "tool",
|
|
224
|
+
tool_call_id: toolCall.id,
|
|
225
|
+
content: denied,
|
|
226
|
+
};
|
|
227
|
+
this.messages.push(deniedMsg);
|
|
228
|
+
saveMessage(this.sessionId, deniedMsg);
|
|
229
|
+
continue;
|
|
230
|
+
}
|
|
231
|
+
if (decision === "always") {
|
|
232
|
+
this.alwaysApproved.add(toolCall.name);
|
|
233
|
+
}
|
|
234
|
+
}
|
|
235
|
+
}
|
|
236
|
+
|
|
237
|
+
const result = await executeTool(toolCall.name, args, this.cwd);
|
|
238
|
+
this.options.onToolResult?.(toolCall.name, result);
|
|
239
|
+
|
|
240
|
+
// Auto-commit after successful write_file (only if enabled)
|
|
241
|
+
if (this.gitEnabled && this.autoCommitEnabled && toolCall.name === "write_file" && result.startsWith("✅")) {
|
|
242
|
+
const path = String(args.path ?? "unknown");
|
|
243
|
+
const committed = autoCommit(this.cwd, path, "write");
|
|
244
|
+
if (committed) {
|
|
245
|
+
this.options.onGitCommit?.(`write ${path}`);
|
|
246
|
+
}
|
|
247
|
+
}
|
|
248
|
+
|
|
249
|
+
const toolMsg: ChatCompletionMessageParam = {
|
|
250
|
+
role: "tool",
|
|
251
|
+
tool_call_id: toolCall.id,
|
|
252
|
+
content: result,
|
|
253
|
+
};
|
|
254
|
+
this.messages.push(toolMsg);
|
|
255
|
+
saveMessage(this.sessionId, toolMsg);
|
|
256
|
+
}
|
|
257
|
+
|
|
258
|
+
// Reset content for next iteration (tool results → model responds again)
|
|
259
|
+
// The onToken callback will stream the next response too
|
|
260
|
+
}
|
|
261
|
+
|
|
262
|
+
return "Max iterations reached. The agent may be stuck in a loop.";
|
|
263
|
+
}
|
|
264
|
+
|
|
265
|
+
/**
|
|
266
|
+
* Switch to a different model mid-session
|
|
267
|
+
*/
|
|
268
|
+
switchModel(model: string, baseUrl?: string, apiKey?: string): void {
|
|
269
|
+
this.model = model;
|
|
270
|
+
if (baseUrl || apiKey) {
|
|
271
|
+
this.client = new OpenAI({
|
|
272
|
+
baseURL: baseUrl ?? this.options.provider.baseUrl,
|
|
273
|
+
apiKey: apiKey ?? this.options.provider.apiKey,
|
|
274
|
+
});
|
|
275
|
+
}
|
|
276
|
+
}
|
|
277
|
+
|
|
278
|
+
getModel(): string {
|
|
279
|
+
return this.model;
|
|
280
|
+
}
|
|
281
|
+
|
|
282
|
+
setAutoCommit(enabled: boolean): void {
|
|
283
|
+
this.autoCommitEnabled = enabled;
|
|
284
|
+
}
|
|
285
|
+
|
|
286
|
+
isGitEnabled(): boolean {
|
|
287
|
+
return this.gitEnabled;
|
|
288
|
+
}
|
|
289
|
+
|
|
290
|
+
getContextLength(): number {
|
|
291
|
+
return this.messages.length;
|
|
292
|
+
}
|
|
293
|
+
|
|
294
|
+
/**
|
|
295
|
+
* Estimate token count across all messages (~4 chars per token)
|
|
296
|
+
*/
|
|
297
|
+
estimateTokens(): number {
|
|
298
|
+
let chars = 0;
|
|
299
|
+
for (const msg of this.messages) {
|
|
300
|
+
if (typeof msg.content === "string") {
|
|
301
|
+
chars += msg.content.length;
|
|
302
|
+
} else if (Array.isArray(msg.content)) {
|
|
303
|
+
for (const part of msg.content) {
|
|
304
|
+
if ("text" in part) chars += part.text.length;
|
|
305
|
+
}
|
|
306
|
+
}
|
|
307
|
+
// Count tool call arguments too
|
|
308
|
+
if ("tool_calls" in msg && Array.isArray((msg as any).tool_calls)) {
|
|
309
|
+
for (const tc of (msg as any).tool_calls) {
|
|
310
|
+
chars += (tc.function?.arguments?.length ?? 0);
|
|
311
|
+
chars += (tc.function?.name?.length ?? 0);
|
|
312
|
+
}
|
|
313
|
+
}
|
|
314
|
+
}
|
|
315
|
+
return Math.ceil(chars / 4);
|
|
316
|
+
}
|
|
317
|
+
|
|
318
|
+
reset(): void {
|
|
319
|
+
const systemMsg = this.messages[0];
|
|
320
|
+
this.messages = [systemMsg];
|
|
321
|
+
}
|
|
322
|
+
}
|
package/src/config.ts
ADDED
|
@@ -0,0 +1,211 @@
|
|
|
1
|
+
import { readFileSync, existsSync, mkdirSync, writeFileSync } from "fs";
|
|
2
|
+
import { homedir } from "os";
|
|
3
|
+
import { join } from "path";
|
|
4
|
+
|
|
5
|
+
export interface ProviderConfig {
|
|
6
|
+
baseUrl: string;
|
|
7
|
+
apiKey: string;
|
|
8
|
+
model: string;
|
|
9
|
+
}
|
|
10
|
+
|
|
11
|
+
export interface ProviderProfile extends ProviderConfig {
|
|
12
|
+
name: string;
|
|
13
|
+
}
|
|
14
|
+
|
|
15
|
+
export interface CodemaxxingConfig {
|
|
16
|
+
provider: ProviderConfig;
|
|
17
|
+
providers?: Record<string, ProviderProfile>;
|
|
18
|
+
defaults: {
|
|
19
|
+
autoApprove: boolean;
|
|
20
|
+
contextFiles: number;
|
|
21
|
+
maxTokens: number;
|
|
22
|
+
};
|
|
23
|
+
}
|
|
24
|
+
|
|
25
|
+
export interface CLIArgs {
|
|
26
|
+
model?: string;
|
|
27
|
+
provider?: string;
|
|
28
|
+
apiKey?: string;
|
|
29
|
+
baseUrl?: string;
|
|
30
|
+
}
|
|
31
|
+
|
|
32
|
+
const CONFIG_DIR = join(homedir(), ".codemaxxing");
|
|
33
|
+
const CONFIG_FILE = join(CONFIG_DIR, "settings.json");
|
|
34
|
+
|
|
35
|
+
const DEFAULT_CONFIG: CodemaxxingConfig = {
|
|
36
|
+
provider: {
|
|
37
|
+
baseUrl: "http://localhost:1234/v1",
|
|
38
|
+
apiKey: "not-needed",
|
|
39
|
+
model: "auto",
|
|
40
|
+
},
|
|
41
|
+
providers: {
|
|
42
|
+
local: {
|
|
43
|
+
name: "Local (LM Studio/Ollama)",
|
|
44
|
+
baseUrl: "http://localhost:1234/v1",
|
|
45
|
+
apiKey: "not-needed",
|
|
46
|
+
model: "auto",
|
|
47
|
+
},
|
|
48
|
+
},
|
|
49
|
+
defaults: {
|
|
50
|
+
autoApprove: false,
|
|
51
|
+
contextFiles: 20,
|
|
52
|
+
maxTokens: 8192,
|
|
53
|
+
},
|
|
54
|
+
};
|
|
55
|
+
|
|
56
|
+
/**
|
|
57
|
+
* Parse CLI arguments
|
|
58
|
+
*/
|
|
59
|
+
export function parseCLIArgs(): CLIArgs {
|
|
60
|
+
const args: CLIArgs = {};
|
|
61
|
+
const argv = process.argv.slice(2);
|
|
62
|
+
|
|
63
|
+
for (let i = 0; i < argv.length; i++) {
|
|
64
|
+
const arg = argv[i];
|
|
65
|
+
const next = argv[i + 1];
|
|
66
|
+
|
|
67
|
+
if ((arg === "--model" || arg === "-m") && next) {
|
|
68
|
+
args.model = next;
|
|
69
|
+
i++;
|
|
70
|
+
} else if ((arg === "--provider" || arg === "-p") && next) {
|
|
71
|
+
args.provider = next;
|
|
72
|
+
i++;
|
|
73
|
+
} else if ((arg === "--api-key" || arg === "-k") && next) {
|
|
74
|
+
args.apiKey = next;
|
|
75
|
+
i++;
|
|
76
|
+
} else if ((arg === "--base-url" || arg === "-u") && next) {
|
|
77
|
+
args.baseUrl = next;
|
|
78
|
+
i++;
|
|
79
|
+
} else if (arg === "--help" || arg === "-h") {
|
|
80
|
+
console.log(`
|
|
81
|
+
codemaxxing — your code. your model. no excuses.
|
|
82
|
+
|
|
83
|
+
Usage:
|
|
84
|
+
codemaxxing [options]
|
|
85
|
+
|
|
86
|
+
Options:
|
|
87
|
+
-m, --model <model> Model name to use
|
|
88
|
+
-p, --provider <name> Provider profile from config (e.g. local, openrouter)
|
|
89
|
+
-k, --api-key <key> API key for the provider
|
|
90
|
+
-u, --base-url <url> Base URL for the provider API
|
|
91
|
+
-h, --help Show this help
|
|
92
|
+
|
|
93
|
+
Examples:
|
|
94
|
+
codemaxxing # Auto-detect local LLM
|
|
95
|
+
codemaxxing -m gpt-4o -u https://api.openai.com/v1 -k sk-...
|
|
96
|
+
codemaxxing -p openrouter # Use saved provider profile
|
|
97
|
+
codemaxxing -m qwen3.5-35b # Override model only
|
|
98
|
+
|
|
99
|
+
Config: ~/.codemaxxing/settings.json
|
|
100
|
+
`);
|
|
101
|
+
process.exit(0);
|
|
102
|
+
}
|
|
103
|
+
}
|
|
104
|
+
|
|
105
|
+
return args;
|
|
106
|
+
}
|
|
107
|
+
|
|
108
|
+
export function loadConfig(): CodemaxxingConfig {
|
|
109
|
+
if (!existsSync(CONFIG_DIR)) {
|
|
110
|
+
mkdirSync(CONFIG_DIR, { recursive: true });
|
|
111
|
+
}
|
|
112
|
+
|
|
113
|
+
if (!existsSync(CONFIG_FILE)) {
|
|
114
|
+
writeFileSync(CONFIG_FILE, JSON.stringify(DEFAULT_CONFIG, null, 2));
|
|
115
|
+
return DEFAULT_CONFIG;
|
|
116
|
+
}
|
|
117
|
+
|
|
118
|
+
try {
|
|
119
|
+
const raw = readFileSync(CONFIG_FILE, "utf-8");
|
|
120
|
+
const parsed = JSON.parse(raw);
|
|
121
|
+
return { ...DEFAULT_CONFIG, ...parsed };
|
|
122
|
+
} catch {
|
|
123
|
+
return DEFAULT_CONFIG;
|
|
124
|
+
}
|
|
125
|
+
}
|
|
126
|
+
|
|
127
|
+
/**
|
|
128
|
+
* Apply CLI overrides to a provider config
|
|
129
|
+
*/
|
|
130
|
+
export function applyOverrides(config: CodemaxxingConfig, args: CLIArgs): CodemaxxingConfig {
|
|
131
|
+
const result = { ...config, provider: { ...config.provider } };
|
|
132
|
+
|
|
133
|
+
// If a named provider profile is specified, use it as the base
|
|
134
|
+
if (args.provider && config.providers?.[args.provider]) {
|
|
135
|
+
const profile = config.providers[args.provider];
|
|
136
|
+
result.provider = { ...profile };
|
|
137
|
+
}
|
|
138
|
+
|
|
139
|
+
// CLI flags override everything
|
|
140
|
+
if (args.model) result.provider.model = args.model;
|
|
141
|
+
if (args.apiKey) result.provider.apiKey = args.apiKey;
|
|
142
|
+
if (args.baseUrl) result.provider.baseUrl = args.baseUrl;
|
|
143
|
+
|
|
144
|
+
return result;
|
|
145
|
+
}
|
|
146
|
+
|
|
147
|
+
export function getConfigPath(): string {
|
|
148
|
+
return CONFIG_FILE;
|
|
149
|
+
}
|
|
150
|
+
|
|
151
|
+
/**
|
|
152
|
+
* Auto-detect local LLM servers
|
|
153
|
+
*/
|
|
154
|
+
export async function detectLocalProvider(): Promise<ProviderConfig | null> {
|
|
155
|
+
const endpoints = [
|
|
156
|
+
{ name: "LM Studio", url: "http://localhost:1234/v1" },
|
|
157
|
+
{ name: "Ollama", url: "http://localhost:11434/v1" },
|
|
158
|
+
{ name: "vLLM", url: "http://localhost:8000/v1" },
|
|
159
|
+
];
|
|
160
|
+
|
|
161
|
+
for (const endpoint of endpoints) {
|
|
162
|
+
try {
|
|
163
|
+
const controller = new AbortController();
|
|
164
|
+
const timeout = setTimeout(() => controller.abort(), 2000);
|
|
165
|
+
const res = await fetch(`${endpoint.url}/models`, {
|
|
166
|
+
signal: controller.signal,
|
|
167
|
+
});
|
|
168
|
+
clearTimeout(timeout);
|
|
169
|
+
|
|
170
|
+
if (res.ok) {
|
|
171
|
+
const data = (await res.json()) as { data?: Array<{ id: string }> };
|
|
172
|
+
const models = data.data ?? [];
|
|
173
|
+
const model = models[0]?.id ?? "auto";
|
|
174
|
+
return {
|
|
175
|
+
baseUrl: endpoint.url,
|
|
176
|
+
apiKey: "not-needed",
|
|
177
|
+
model,
|
|
178
|
+
};
|
|
179
|
+
}
|
|
180
|
+
} catch {
|
|
181
|
+
// Server not running, try next
|
|
182
|
+
}
|
|
183
|
+
}
|
|
184
|
+
|
|
185
|
+
return null;
|
|
186
|
+
}
|
|
187
|
+
|
|
188
|
+
/**
|
|
189
|
+
* List available models from a provider endpoint
|
|
190
|
+
*/
|
|
191
|
+
export async function listModels(baseUrl: string, apiKey: string): Promise<string[]> {
|
|
192
|
+
try {
|
|
193
|
+
const controller = new AbortController();
|
|
194
|
+
const timeout = setTimeout(() => controller.abort(), 5000);
|
|
195
|
+
const headers: Record<string, string> = {};
|
|
196
|
+
if (apiKey && apiKey !== "not-needed") {
|
|
197
|
+
headers["Authorization"] = `Bearer ${apiKey}`;
|
|
198
|
+
}
|
|
199
|
+
const res = await fetch(`${baseUrl}/models`, {
|
|
200
|
+
signal: controller.signal,
|
|
201
|
+
headers,
|
|
202
|
+
});
|
|
203
|
+
clearTimeout(timeout);
|
|
204
|
+
|
|
205
|
+
if (res.ok) {
|
|
206
|
+
const data = (await res.json()) as { data?: Array<{ id: string }> };
|
|
207
|
+
return (data.data ?? []).map(m => m.id);
|
|
208
|
+
}
|
|
209
|
+
} catch { /* ignore */ }
|
|
210
|
+
return [];
|
|
211
|
+
}
|