codemaxxing 1.0.0 → 1.0.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +18 -12
- package/dist/agent.d.ts +4 -0
- package/dist/agent.js +91 -16
- package/dist/commands/git.d.ts +2 -0
- package/dist/commands/git.js +50 -0
- package/dist/commands/ollama.d.ts +27 -0
- package/dist/commands/ollama.js +171 -0
- package/dist/commands/output.d.ts +2 -0
- package/dist/commands/output.js +18 -0
- package/dist/commands/registry.d.ts +2 -0
- package/dist/commands/registry.js +8 -0
- package/dist/commands/skills.d.ts +18 -0
- package/dist/commands/skills.js +121 -0
- package/dist/commands/types.d.ts +5 -0
- package/dist/commands/types.js +1 -0
- package/dist/commands/ui.d.ts +16 -0
- package/dist/commands/ui.js +79 -0
- package/dist/config.d.ts +9 -0
- package/dist/config.js +13 -3
- package/dist/exec.js +4 -1
- package/dist/index.js +75 -401
- package/dist/tools/files.js +58 -3
- package/dist/utils/context.js +6 -0
- package/dist/utils/mcp.d.ts +7 -2
- package/dist/utils/mcp.js +34 -6
- package/package.json +8 -5
- package/src/agent.ts +0 -894
- package/src/auth-cli.ts +0 -287
- package/src/cli.ts +0 -37
- package/src/config.ts +0 -352
- package/src/exec.ts +0 -183
- package/src/index.tsx +0 -2647
- package/src/skills/registry.ts +0 -1436
- package/src/themes.ts +0 -335
- package/src/tools/files.ts +0 -374
- package/src/utils/auth.ts +0 -606
- package/src/utils/context.ts +0 -174
- package/src/utils/git.ts +0 -117
- package/src/utils/hardware.ts +0 -131
- package/src/utils/lint.ts +0 -116
- package/src/utils/mcp.ts +0 -307
- package/src/utils/models.ts +0 -218
- package/src/utils/ollama.ts +0 -352
- package/src/utils/repomap.ts +0 -220
- package/src/utils/sessions.ts +0 -254
- package/src/utils/skills.ts +0 -241
- package/tsconfig.json +0 -16
package/src/agent.ts
DELETED
|
@@ -1,894 +0,0 @@
|
|
|
1
|
-
import OpenAI from "openai";
|
|
2
|
-
import Anthropic from "@anthropic-ai/sdk";
|
|
3
|
-
import type {
|
|
4
|
-
ChatCompletionMessageParam,
|
|
5
|
-
ChatCompletionTool,
|
|
6
|
-
ChatCompletionChunk,
|
|
7
|
-
} from "openai/resources/chat/completions";
|
|
8
|
-
import { FILE_TOOLS, executeTool, generateDiff, getExistingContent } from "./tools/files.js";
|
|
9
|
-
import { detectLinter, runLinter } from "./utils/lint.js";
|
|
10
|
-
import { buildProjectContext, getSystemPrompt, loadProjectRules } from "./utils/context.js";
|
|
11
|
-
import { isGitRepo, autoCommit } from "./utils/git.js";
|
|
12
|
-
import { buildSkillPrompts, getActiveSkillCount } from "./utils/skills.js";
|
|
13
|
-
import { createSession, saveMessage, updateTokenEstimate, updateSessionCost, loadMessages } from "./utils/sessions.js";
|
|
14
|
-
import { loadMCPConfig, connectToServers, disconnectAll, getAllMCPTools, parseMCPToolName, callMCPTool, getConnectedServers, type ConnectedServer } from "./utils/mcp.js";
|
|
15
|
-
import type { ProviderConfig } from "./config.js";
|
|
16
|
-
|
|
17
|
-
// Tools that can modify your project — require approval
|
|
18
|
-
const DANGEROUS_TOOLS = new Set(["write_file", "run_command"]);
|
|
19
|
-
|
|
20
|
-
// Cost per 1M tokens (input/output) for common models
|
|
21
|
-
const MODEL_COSTS: Record<string, { input: number; output: number }> = {
|
|
22
|
-
// OpenAI
|
|
23
|
-
"gpt-4o": { input: 2.5, output: 10 },
|
|
24
|
-
"gpt-4o-mini": { input: 0.15, output: 0.6 },
|
|
25
|
-
"gpt-4-turbo": { input: 10, output: 30 },
|
|
26
|
-
"gpt-4": { input: 30, output: 60 },
|
|
27
|
-
"gpt-3.5-turbo": { input: 0.5, output: 1.5 },
|
|
28
|
-
"o1": { input: 15, output: 60 },
|
|
29
|
-
"o1-mini": { input: 3, output: 12 },
|
|
30
|
-
"o3-mini": { input: 1.1, output: 4.4 },
|
|
31
|
-
// Anthropic
|
|
32
|
-
"claude-3-5-sonnet-20241022": { input: 3, output: 15 },
|
|
33
|
-
"claude-3-5-sonnet": { input: 3, output: 15 },
|
|
34
|
-
"claude-sonnet-4-20250514": { input: 3, output: 15 },
|
|
35
|
-
"claude-3-5-haiku-20241022": { input: 0.8, output: 4 },
|
|
36
|
-
"claude-3-opus-20240229": { input: 15, output: 75 },
|
|
37
|
-
"claude-3-haiku-20240307": { input: 0.25, output: 1.25 },
|
|
38
|
-
// Qwen (typically free/cheap on local, but OpenRouter pricing)
|
|
39
|
-
"qwen/qwen-2.5-coder-32b-instruct": { input: 0.2, output: 0.2 },
|
|
40
|
-
"qwen/qwen-2.5-72b-instruct": { input: 0.35, output: 0.4 },
|
|
41
|
-
// DeepSeek
|
|
42
|
-
"deepseek/deepseek-chat": { input: 0.14, output: 0.28 },
|
|
43
|
-
"deepseek/deepseek-coder": { input: 0.14, output: 0.28 },
|
|
44
|
-
// Llama
|
|
45
|
-
"meta-llama/llama-3.1-70b-instruct": { input: 0.52, output: 0.75 },
|
|
46
|
-
"meta-llama/llama-3.1-8b-instruct": { input: 0.055, output: 0.055 },
|
|
47
|
-
// Google
|
|
48
|
-
"google/gemini-pro-1.5": { input: 1.25, output: 5 },
|
|
49
|
-
"google/gemini-flash-1.5": { input: 0.075, output: 0.3 },
|
|
50
|
-
};
|
|
51
|
-
|
|
52
|
-
function getModelCost(model: string): { input: number; output: number } {
|
|
53
|
-
// Direct match
|
|
54
|
-
if (MODEL_COSTS[model]) return MODEL_COSTS[model];
|
|
55
|
-
// Partial match (model name contains a known key)
|
|
56
|
-
const lower = model.toLowerCase();
|
|
57
|
-
for (const [key, cost] of Object.entries(MODEL_COSTS)) {
|
|
58
|
-
if (lower.includes(key) || key.includes(lower)) return cost;
|
|
59
|
-
}
|
|
60
|
-
// Default: $0 (local/unknown models)
|
|
61
|
-
return { input: 0, output: 0 };
|
|
62
|
-
}
|
|
63
|
-
|
|
64
|
-
export interface AgentOptions {
|
|
65
|
-
provider: ProviderConfig;
|
|
66
|
-
cwd: string;
|
|
67
|
-
maxTokens: number;
|
|
68
|
-
autoApprove: boolean;
|
|
69
|
-
onToken?: (token: string) => void;
|
|
70
|
-
onToolCall?: (name: string, args: Record<string, unknown>) => void;
|
|
71
|
-
onToolResult?: (name: string, result: string) => void;
|
|
72
|
-
onThinking?: (text: string) => void;
|
|
73
|
-
onToolApproval?: (name: string, args: Record<string, unknown>, diff?: string) => Promise<"yes" | "no" | "always">;
|
|
74
|
-
onGitCommit?: (message: string) => void;
|
|
75
|
-
onContextCompressed?: (oldTokens: number, newTokens: number) => void;
|
|
76
|
-
onArchitectPlan?: (plan: string) => void;
|
|
77
|
-
onLintResult?: (file: string, errors: string) => void;
|
|
78
|
-
onMCPStatus?: (server: string, status: string) => void;
|
|
79
|
-
contextCompressionThreshold?: number;
|
|
80
|
-
}
|
|
81
|
-
|
|
82
|
-
interface AssembledToolCall {
|
|
83
|
-
id: string;
|
|
84
|
-
name: string;
|
|
85
|
-
arguments: string;
|
|
86
|
-
}
|
|
87
|
-
|
|
88
|
-
export class CodingAgent {
|
|
89
|
-
private client: OpenAI;
|
|
90
|
-
private anthropicClient: Anthropic | null = null;
|
|
91
|
-
private providerType: "openai" | "anthropic";
|
|
92
|
-
private messages: ChatCompletionMessageParam[] = [];
|
|
93
|
-
private tools: ChatCompletionTool[] = FILE_TOOLS;
|
|
94
|
-
private cwd: string;
|
|
95
|
-
private maxTokens: number;
|
|
96
|
-
private autoApprove: boolean;
|
|
97
|
-
private model: string;
|
|
98
|
-
private alwaysApproved: Set<string> = new Set();
|
|
99
|
-
private gitEnabled: boolean;
|
|
100
|
-
private autoCommitEnabled: boolean = false;
|
|
101
|
-
private repoMap: string = "";
|
|
102
|
-
private sessionId: string = "";
|
|
103
|
-
private totalPromptTokens: number = 0;
|
|
104
|
-
private totalCompletionTokens: number = 0;
|
|
105
|
-
private totalCost: number = 0;
|
|
106
|
-
private systemPrompt: string = "";
|
|
107
|
-
private compressionThreshold: number;
|
|
108
|
-
private sessionDisabledSkills: Set<string> = new Set();
|
|
109
|
-
private projectRulesSource: string | null = null;
|
|
110
|
-
private architectModel: string | null = null;
|
|
111
|
-
private autoLintEnabled: boolean = true;
|
|
112
|
-
private detectedLinter: { command: string; name: string } | null = null;
|
|
113
|
-
private mcpServers: ConnectedServer[] = [];
|
|
114
|
-
|
|
115
|
-
constructor(private options: AgentOptions) {
|
|
116
|
-
this.providerType = options.provider.type || "openai";
|
|
117
|
-
this.client = new OpenAI({
|
|
118
|
-
baseURL: options.provider.baseUrl,
|
|
119
|
-
apiKey: options.provider.apiKey,
|
|
120
|
-
});
|
|
121
|
-
if (this.providerType === "anthropic") {
|
|
122
|
-
this.anthropicClient = new Anthropic({
|
|
123
|
-
apiKey: options.provider.apiKey,
|
|
124
|
-
});
|
|
125
|
-
}
|
|
126
|
-
this.cwd = options.cwd;
|
|
127
|
-
this.maxTokens = options.maxTokens;
|
|
128
|
-
this.autoApprove = options.autoApprove;
|
|
129
|
-
this.model = options.provider.model;
|
|
130
|
-
// Default model for Anthropic
|
|
131
|
-
if (this.providerType === "anthropic" && (this.model === "auto" || !this.model)) {
|
|
132
|
-
this.model = "claude-sonnet-4-20250514";
|
|
133
|
-
}
|
|
134
|
-
this.gitEnabled = isGitRepo(this.cwd);
|
|
135
|
-
this.compressionThreshold = options.contextCompressionThreshold ?? 80000;
|
|
136
|
-
}
|
|
137
|
-
|
|
138
|
-
/**
|
|
139
|
-
* Initialize the agent — call this after constructor to build async context
|
|
140
|
-
*/
|
|
141
|
-
async init(): Promise<void> {
|
|
142
|
-
const context = await buildProjectContext(this.cwd);
|
|
143
|
-
const skillPrompts = buildSkillPrompts(this.cwd, this.sessionDisabledSkills);
|
|
144
|
-
const rules = loadProjectRules(this.cwd);
|
|
145
|
-
if (rules) this.projectRulesSource = rules.source;
|
|
146
|
-
this.systemPrompt = await getSystemPrompt(context, skillPrompts, rules?.content ?? "");
|
|
147
|
-
|
|
148
|
-
// Detect project linter
|
|
149
|
-
this.detectedLinter = detectLinter(this.cwd);
|
|
150
|
-
|
|
151
|
-
// Connect to MCP servers
|
|
152
|
-
const mcpConfig = loadMCPConfig(this.cwd);
|
|
153
|
-
if (Object.keys(mcpConfig.mcpServers).length > 0) {
|
|
154
|
-
this.mcpServers = await connectToServers(mcpConfig, this.options.onMCPStatus);
|
|
155
|
-
if (this.mcpServers.length > 0) {
|
|
156
|
-
const mcpTools = getAllMCPTools(this.mcpServers);
|
|
157
|
-
this.tools = [...FILE_TOOLS, ...mcpTools];
|
|
158
|
-
}
|
|
159
|
-
}
|
|
160
|
-
|
|
161
|
-
this.messages = [
|
|
162
|
-
{ role: "system", content: this.systemPrompt },
|
|
163
|
-
];
|
|
164
|
-
|
|
165
|
-
// Create a new session
|
|
166
|
-
this.sessionId = createSession(this.cwd, this.model);
|
|
167
|
-
saveMessage(this.sessionId, this.messages[0]);
|
|
168
|
-
}
|
|
169
|
-
|
|
170
|
-
/**
|
|
171
|
-
* Resume an existing session by loading its messages
|
|
172
|
-
*/
|
|
173
|
-
async resume(sessionId: string): Promise<void> {
|
|
174
|
-
const messages = loadMessages(sessionId);
|
|
175
|
-
if (messages.length === 0) {
|
|
176
|
-
throw new Error(`Session ${sessionId} not found or empty`);
|
|
177
|
-
}
|
|
178
|
-
this.messages = messages;
|
|
179
|
-
this.sessionId = sessionId;
|
|
180
|
-
}
|
|
181
|
-
|
|
182
|
-
getSessionId(): string {
|
|
183
|
-
return this.sessionId;
|
|
184
|
-
}
|
|
185
|
-
|
|
186
|
-
/**
|
|
187
|
-
* Get the current repo map
|
|
188
|
-
*/
|
|
189
|
-
getRepoMap(): string {
|
|
190
|
-
return this.repoMap;
|
|
191
|
-
}
|
|
192
|
-
|
|
193
|
-
/**
|
|
194
|
-
* Rebuild the repo map (useful after file changes)
|
|
195
|
-
*/
|
|
196
|
-
async refreshRepoMap(): Promise<string> {
|
|
197
|
-
const { buildRepoMap } = await import("./utils/repomap.js");
|
|
198
|
-
this.repoMap = await buildRepoMap(this.cwd);
|
|
199
|
-
return this.repoMap;
|
|
200
|
-
}
|
|
201
|
-
|
|
202
|
-
/**
|
|
203
|
-
* Send a message, routing through architect model if enabled
|
|
204
|
-
*/
|
|
205
|
-
async send(userMessage: string): Promise<string> {
|
|
206
|
-
if (this.architectModel) {
|
|
207
|
-
return this.architectChat(userMessage);
|
|
208
|
-
}
|
|
209
|
-
return this.chat(userMessage);
|
|
210
|
-
}
|
|
211
|
-
|
|
212
|
-
/**
|
|
213
|
-
* Stream a response from the model.
|
|
214
|
-
* Assembles tool call chunks, emits tokens in real-time,
|
|
215
|
-
* and loops until the model responds with text (no more tool calls).
|
|
216
|
-
*/
|
|
217
|
-
async chat(userMessage: string): Promise<string> {
|
|
218
|
-
const userMsg: ChatCompletionMessageParam = { role: "user", content: userMessage };
|
|
219
|
-
this.messages.push(userMsg);
|
|
220
|
-
saveMessage(this.sessionId, userMsg);
|
|
221
|
-
|
|
222
|
-
// Check if context needs compression before sending
|
|
223
|
-
await this.maybeCompressContext();
|
|
224
|
-
|
|
225
|
-
if (this.providerType === "anthropic" && this.anthropicClient) {
|
|
226
|
-
return this.chatAnthropic(userMessage);
|
|
227
|
-
}
|
|
228
|
-
|
|
229
|
-
let iterations = 0;
|
|
230
|
-
const MAX_ITERATIONS = 20;
|
|
231
|
-
|
|
232
|
-
while (iterations < MAX_ITERATIONS) {
|
|
233
|
-
iterations++;
|
|
234
|
-
|
|
235
|
-
const stream = await this.client.chat.completions.create({
|
|
236
|
-
model: this.model,
|
|
237
|
-
messages: this.messages,
|
|
238
|
-
tools: this.tools,
|
|
239
|
-
max_tokens: this.maxTokens,
|
|
240
|
-
stream: true,
|
|
241
|
-
stream_options: { include_usage: true },
|
|
242
|
-
});
|
|
243
|
-
|
|
244
|
-
// Accumulate the streamed response
|
|
245
|
-
let contentText = "";
|
|
246
|
-
let thinkingText = "";
|
|
247
|
-
let inThinking = false;
|
|
248
|
-
const toolCalls: Map<number, AssembledToolCall> = new Map();
|
|
249
|
-
let chunkPromptTokens = 0;
|
|
250
|
-
let chunkCompletionTokens = 0;
|
|
251
|
-
|
|
252
|
-
for await (const chunk of stream) {
|
|
253
|
-
// Capture usage from the final chunk
|
|
254
|
-
if ((chunk as any).usage) {
|
|
255
|
-
chunkPromptTokens = (chunk as any).usage.prompt_tokens ?? 0;
|
|
256
|
-
chunkCompletionTokens = (chunk as any).usage.completion_tokens ?? 0;
|
|
257
|
-
}
|
|
258
|
-
const delta = chunk.choices?.[0]?.delta;
|
|
259
|
-
if (!delta) continue;
|
|
260
|
-
|
|
261
|
-
// Handle content tokens (the actual response text)
|
|
262
|
-
if (delta.content) {
|
|
263
|
-
const token = delta.content;
|
|
264
|
-
|
|
265
|
-
// Detect <think> blocks from reasoning models (Qwen, DeepSeek, etc.)
|
|
266
|
-
if (token.includes("<think>")) {
|
|
267
|
-
inThinking = true;
|
|
268
|
-
thinkingText = "";
|
|
269
|
-
continue;
|
|
270
|
-
}
|
|
271
|
-
if (inThinking) {
|
|
272
|
-
if (token.includes("</think>")) {
|
|
273
|
-
inThinking = false;
|
|
274
|
-
this.options.onThinking?.(thinkingText.trim());
|
|
275
|
-
continue;
|
|
276
|
-
}
|
|
277
|
-
thinkingText += token;
|
|
278
|
-
continue;
|
|
279
|
-
}
|
|
280
|
-
|
|
281
|
-
contentText += token;
|
|
282
|
-
this.options.onToken?.(token);
|
|
283
|
-
}
|
|
284
|
-
|
|
285
|
-
// Handle tool call chunks — they arrive in pieces
|
|
286
|
-
if (delta.tool_calls) {
|
|
287
|
-
for (const tc of delta.tool_calls) {
|
|
288
|
-
const idx = tc.index;
|
|
289
|
-
if (!toolCalls.has(idx)) {
|
|
290
|
-
toolCalls.set(idx, {
|
|
291
|
-
id: tc.id ?? "",
|
|
292
|
-
name: tc.function?.name ?? "",
|
|
293
|
-
arguments: "",
|
|
294
|
-
});
|
|
295
|
-
}
|
|
296
|
-
const existing = toolCalls.get(idx)!;
|
|
297
|
-
if (tc.id) existing.id = tc.id;
|
|
298
|
-
if (tc.function?.name) existing.name = tc.function.name;
|
|
299
|
-
if (tc.function?.arguments) existing.arguments += tc.function.arguments;
|
|
300
|
-
}
|
|
301
|
-
}
|
|
302
|
-
}
|
|
303
|
-
|
|
304
|
-
// Build the assistant message for history
|
|
305
|
-
const assistantMessage: any = { role: "assistant", content: contentText || null };
|
|
306
|
-
if (toolCalls.size > 0) {
|
|
307
|
-
assistantMessage.tool_calls = Array.from(toolCalls.values()).map((tc) => ({
|
|
308
|
-
id: tc.id,
|
|
309
|
-
type: "function" as const,
|
|
310
|
-
function: { name: tc.name, arguments: tc.arguments },
|
|
311
|
-
}));
|
|
312
|
-
}
|
|
313
|
-
this.messages.push(assistantMessage);
|
|
314
|
-
saveMessage(this.sessionId, assistantMessage);
|
|
315
|
-
|
|
316
|
-
// Track token usage and cost
|
|
317
|
-
if (chunkPromptTokens > 0 || chunkCompletionTokens > 0) {
|
|
318
|
-
this.totalPromptTokens += chunkPromptTokens;
|
|
319
|
-
this.totalCompletionTokens += chunkCompletionTokens;
|
|
320
|
-
const costs = getModelCost(this.model);
|
|
321
|
-
this.totalCost = (this.totalPromptTokens / 1_000_000) * costs.input +
|
|
322
|
-
(this.totalCompletionTokens / 1_000_000) * costs.output;
|
|
323
|
-
updateSessionCost(this.sessionId, this.totalPromptTokens, this.totalCompletionTokens, this.totalCost);
|
|
324
|
-
}
|
|
325
|
-
|
|
326
|
-
// If no tool calls, we're done — return the text
|
|
327
|
-
if (toolCalls.size === 0) {
|
|
328
|
-
updateTokenEstimate(this.sessionId, this.estimateTokens());
|
|
329
|
-
return contentText || "(empty response)";
|
|
330
|
-
}
|
|
331
|
-
|
|
332
|
-
// Process tool calls
|
|
333
|
-
for (const toolCall of toolCalls.values()) {
|
|
334
|
-
let args: Record<string, unknown> = {};
|
|
335
|
-
try {
|
|
336
|
-
args = JSON.parse(toolCall.arguments);
|
|
337
|
-
} catch {
|
|
338
|
-
args = {};
|
|
339
|
-
}
|
|
340
|
-
|
|
341
|
-
this.options.onToolCall?.(toolCall.name, args);
|
|
342
|
-
|
|
343
|
-
// Check approval for dangerous tools
|
|
344
|
-
if (DANGEROUS_TOOLS.has(toolCall.name) && !this.autoApprove && !this.alwaysApproved.has(toolCall.name)) {
|
|
345
|
-
if (this.options.onToolApproval) {
|
|
346
|
-
// Generate diff for write_file if file already exists
|
|
347
|
-
let diff: string | undefined;
|
|
348
|
-
if (toolCall.name === "write_file" && args.path && args.content) {
|
|
349
|
-
const existing = getExistingContent(String(args.path), this.cwd);
|
|
350
|
-
if (existing !== null) {
|
|
351
|
-
diff = generateDiff(existing, String(args.content), String(args.path));
|
|
352
|
-
}
|
|
353
|
-
}
|
|
354
|
-
const decision = await this.options.onToolApproval(toolCall.name, args, diff);
|
|
355
|
-
if (decision === "no") {
|
|
356
|
-
const denied = `Tool call "${toolCall.name}" was denied by the user.`;
|
|
357
|
-
this.options.onToolResult?.(toolCall.name, denied);
|
|
358
|
-
const deniedMsg: ChatCompletionMessageParam = {
|
|
359
|
-
role: "tool",
|
|
360
|
-
tool_call_id: toolCall.id,
|
|
361
|
-
content: denied,
|
|
362
|
-
};
|
|
363
|
-
this.messages.push(deniedMsg);
|
|
364
|
-
saveMessage(this.sessionId, deniedMsg);
|
|
365
|
-
continue;
|
|
366
|
-
}
|
|
367
|
-
if (decision === "always") {
|
|
368
|
-
this.alwaysApproved.add(toolCall.name);
|
|
369
|
-
}
|
|
370
|
-
}
|
|
371
|
-
}
|
|
372
|
-
|
|
373
|
-
// Route to MCP or built-in tool
|
|
374
|
-
const mcpParsed = parseMCPToolName(toolCall.name);
|
|
375
|
-
let result: string;
|
|
376
|
-
if (mcpParsed) {
|
|
377
|
-
result = await callMCPTool(mcpParsed.serverName, mcpParsed.toolName, args);
|
|
378
|
-
} else {
|
|
379
|
-
result = await executeTool(toolCall.name, args, this.cwd);
|
|
380
|
-
}
|
|
381
|
-
this.options.onToolResult?.(toolCall.name, result);
|
|
382
|
-
|
|
383
|
-
// Auto-commit after successful write_file (only if enabled)
|
|
384
|
-
if (this.gitEnabled && this.autoCommitEnabled && toolCall.name === "write_file" && result.startsWith("✅")) {
|
|
385
|
-
const path = String(args.path ?? "unknown");
|
|
386
|
-
const committed = autoCommit(this.cwd, path, "write");
|
|
387
|
-
if (committed) {
|
|
388
|
-
this.options.onGitCommit?.(`write ${path}`);
|
|
389
|
-
}
|
|
390
|
-
}
|
|
391
|
-
|
|
392
|
-
// Auto-lint after successful write_file
|
|
393
|
-
if (this.autoLintEnabled && this.detectedLinter && toolCall.name === "write_file" && result.startsWith("✅")) {
|
|
394
|
-
const filePath = String(args.path ?? "");
|
|
395
|
-
const lintErrors = runLinter(this.detectedLinter, filePath, this.cwd);
|
|
396
|
-
if (lintErrors) {
|
|
397
|
-
this.options.onLintResult?.(filePath, lintErrors);
|
|
398
|
-
const lintMsg: ChatCompletionMessageParam = {
|
|
399
|
-
role: "tool",
|
|
400
|
-
tool_call_id: toolCall.id,
|
|
401
|
-
content: result + `\n\nLint errors detected in ${filePath}:\n${lintErrors}\nPlease fix these issues.`,
|
|
402
|
-
};
|
|
403
|
-
this.messages.push(lintMsg);
|
|
404
|
-
saveMessage(this.sessionId, lintMsg);
|
|
405
|
-
continue; // skip the normal tool message push
|
|
406
|
-
}
|
|
407
|
-
}
|
|
408
|
-
|
|
409
|
-
const toolMsg: ChatCompletionMessageParam = {
|
|
410
|
-
role: "tool",
|
|
411
|
-
tool_call_id: toolCall.id,
|
|
412
|
-
content: result,
|
|
413
|
-
};
|
|
414
|
-
this.messages.push(toolMsg);
|
|
415
|
-
saveMessage(this.sessionId, toolMsg);
|
|
416
|
-
}
|
|
417
|
-
|
|
418
|
-
// Reset content for next iteration (tool results → model responds again)
|
|
419
|
-
// The onToken callback will stream the next response too
|
|
420
|
-
}
|
|
421
|
-
|
|
422
|
-
return "Max iterations reached. The agent may be stuck in a loop.";
|
|
423
|
-
}
|
|
424
|
-
|
|
425
|
-
/**
|
|
426
|
-
* Convert OpenAI-format tools to Anthropic tool format
|
|
427
|
-
*/
|
|
428
|
-
private getAnthropicTools(): Anthropic.Tool[] {
|
|
429
|
-
return this.tools.map((t) => ({
|
|
430
|
-
name: t.function.name,
|
|
431
|
-
description: t.function.description ?? "",
|
|
432
|
-
input_schema: (t.function.parameters as Anthropic.Tool.InputSchema) ?? { type: "object" as const, properties: {} },
|
|
433
|
-
}));
|
|
434
|
-
}
|
|
435
|
-
|
|
436
|
-
/**
|
|
437
|
-
* Convert messages to Anthropic format (separate system from conversation)
|
|
438
|
-
*/
|
|
439
|
-
private getAnthropicMessages(): Anthropic.MessageParam[] {
|
|
440
|
-
const msgs: Anthropic.MessageParam[] = [];
|
|
441
|
-
for (const msg of this.messages) {
|
|
442
|
-
if (msg.role === "system") continue; // system handled separately
|
|
443
|
-
if (msg.role === "user") {
|
|
444
|
-
msgs.push({ role: "user", content: typeof msg.content === "string" ? msg.content : JSON.stringify(msg.content) });
|
|
445
|
-
} else if (msg.role === "assistant") {
|
|
446
|
-
const content: Anthropic.ContentBlockParam[] = [];
|
|
447
|
-
if (msg.content) {
|
|
448
|
-
content.push({ type: "text", text: typeof msg.content === "string" ? msg.content : JSON.stringify(msg.content) });
|
|
449
|
-
}
|
|
450
|
-
if ("tool_calls" in msg && Array.isArray((msg as any).tool_calls)) {
|
|
451
|
-
for (const tc of (msg as any).tool_calls) {
|
|
452
|
-
let input: Record<string, unknown> = {};
|
|
453
|
-
try { input = JSON.parse(tc.function.arguments); } catch {}
|
|
454
|
-
content.push({
|
|
455
|
-
type: "tool_use",
|
|
456
|
-
id: tc.id,
|
|
457
|
-
name: tc.function.name,
|
|
458
|
-
input,
|
|
459
|
-
});
|
|
460
|
-
}
|
|
461
|
-
}
|
|
462
|
-
if (content.length > 0) {
|
|
463
|
-
msgs.push({ role: "assistant", content });
|
|
464
|
-
}
|
|
465
|
-
} else if (msg.role === "tool") {
|
|
466
|
-
const toolCallId = (msg as any).tool_call_id;
|
|
467
|
-
const resultContent = typeof msg.content === "string" ? msg.content : JSON.stringify(msg.content);
|
|
468
|
-
// Anthropic expects tool results as user messages with tool_result content
|
|
469
|
-
msgs.push({
|
|
470
|
-
role: "user",
|
|
471
|
-
content: [{
|
|
472
|
-
type: "tool_result",
|
|
473
|
-
tool_use_id: toolCallId,
|
|
474
|
-
content: resultContent,
|
|
475
|
-
}],
|
|
476
|
-
});
|
|
477
|
-
}
|
|
478
|
-
}
|
|
479
|
-
return msgs;
|
|
480
|
-
}
|
|
481
|
-
|
|
482
|
-
/**
|
|
483
|
-
* Anthropic-native streaming chat
|
|
484
|
-
*/
|
|
485
|
-
private async chatAnthropic(_userMessage: string): Promise<string> {
|
|
486
|
-
const client = this.anthropicClient!;
|
|
487
|
-
let iterations = 0;
|
|
488
|
-
const MAX_ITERATIONS = 20;
|
|
489
|
-
|
|
490
|
-
while (iterations < MAX_ITERATIONS) {
|
|
491
|
-
iterations++;
|
|
492
|
-
|
|
493
|
-
const anthropicMessages = this.getAnthropicMessages();
|
|
494
|
-
const anthropicTools = this.getAnthropicTools();
|
|
495
|
-
|
|
496
|
-
const stream = client.messages.stream({
|
|
497
|
-
model: this.model,
|
|
498
|
-
max_tokens: this.maxTokens,
|
|
499
|
-
system: this.systemPrompt,
|
|
500
|
-
messages: anthropicMessages,
|
|
501
|
-
tools: anthropicTools,
|
|
502
|
-
});
|
|
503
|
-
|
|
504
|
-
let contentText = "";
|
|
505
|
-
const toolCalls: Array<{ id: string; name: string; input: Record<string, unknown> }> = [];
|
|
506
|
-
let currentToolId = "";
|
|
507
|
-
let currentToolName = "";
|
|
508
|
-
let currentToolInput = "";
|
|
509
|
-
|
|
510
|
-
stream.on("text", (text) => {
|
|
511
|
-
contentText += text;
|
|
512
|
-
this.options.onToken?.(text);
|
|
513
|
-
});
|
|
514
|
-
|
|
515
|
-
const finalMessage = await stream.finalMessage();
|
|
516
|
-
|
|
517
|
-
// Track usage
|
|
518
|
-
if (finalMessage.usage) {
|
|
519
|
-
const promptTokens = finalMessage.usage.input_tokens;
|
|
520
|
-
const completionTokens = finalMessage.usage.output_tokens;
|
|
521
|
-
this.totalPromptTokens += promptTokens;
|
|
522
|
-
this.totalCompletionTokens += completionTokens;
|
|
523
|
-
const costs = getModelCost(this.model);
|
|
524
|
-
this.totalCost = (this.totalPromptTokens / 1_000_000) * costs.input +
|
|
525
|
-
(this.totalCompletionTokens / 1_000_000) * costs.output;
|
|
526
|
-
updateSessionCost(this.sessionId, this.totalPromptTokens, this.totalCompletionTokens, this.totalCost);
|
|
527
|
-
}
|
|
528
|
-
|
|
529
|
-
// Extract tool uses from content blocks
|
|
530
|
-
for (const block of finalMessage.content) {
|
|
531
|
-
if (block.type === "tool_use") {
|
|
532
|
-
toolCalls.push({
|
|
533
|
-
id: block.id,
|
|
534
|
-
name: block.name,
|
|
535
|
-
input: block.input as Record<string, unknown>,
|
|
536
|
-
});
|
|
537
|
-
}
|
|
538
|
-
}
|
|
539
|
-
|
|
540
|
-
// Build OpenAI-format assistant message for session storage
|
|
541
|
-
const assistantMessage: any = { role: "assistant", content: contentText || null };
|
|
542
|
-
if (toolCalls.length > 0) {
|
|
543
|
-
assistantMessage.tool_calls = toolCalls.map((tc) => ({
|
|
544
|
-
id: tc.id,
|
|
545
|
-
type: "function" as const,
|
|
546
|
-
function: { name: tc.name, arguments: JSON.stringify(tc.input) },
|
|
547
|
-
}));
|
|
548
|
-
}
|
|
549
|
-
this.messages.push(assistantMessage);
|
|
550
|
-
saveMessage(this.sessionId, assistantMessage);
|
|
551
|
-
|
|
552
|
-
// If no tool calls, we're done
|
|
553
|
-
if (toolCalls.length === 0) {
|
|
554
|
-
updateTokenEstimate(this.sessionId, this.estimateTokens());
|
|
555
|
-
return contentText || "(empty response)";
|
|
556
|
-
}
|
|
557
|
-
|
|
558
|
-
// Process tool calls
|
|
559
|
-
for (const toolCall of toolCalls) {
|
|
560
|
-
const args = toolCall.input;
|
|
561
|
-
this.options.onToolCall?.(toolCall.name, args);
|
|
562
|
-
|
|
563
|
-
// Check approval for dangerous tools
|
|
564
|
-
if (DANGEROUS_TOOLS.has(toolCall.name) && !this.autoApprove && !this.alwaysApproved.has(toolCall.name)) {
|
|
565
|
-
if (this.options.onToolApproval) {
|
|
566
|
-
let diff: string | undefined;
|
|
567
|
-
if (toolCall.name === "write_file" && args.path && args.content) {
|
|
568
|
-
const existing = getExistingContent(String(args.path), this.cwd);
|
|
569
|
-
if (existing !== null) {
|
|
570
|
-
diff = generateDiff(existing, String(args.content), String(args.path));
|
|
571
|
-
}
|
|
572
|
-
}
|
|
573
|
-
const decision = await this.options.onToolApproval(toolCall.name, args, diff);
|
|
574
|
-
if (decision === "no") {
|
|
575
|
-
const denied = `Tool call "${toolCall.name}" was denied by the user.`;
|
|
576
|
-
this.options.onToolResult?.(toolCall.name, denied);
|
|
577
|
-
const deniedMsg: ChatCompletionMessageParam = {
|
|
578
|
-
role: "tool",
|
|
579
|
-
tool_call_id: toolCall.id,
|
|
580
|
-
content: denied,
|
|
581
|
-
};
|
|
582
|
-
this.messages.push(deniedMsg);
|
|
583
|
-
saveMessage(this.sessionId, deniedMsg);
|
|
584
|
-
continue;
|
|
585
|
-
}
|
|
586
|
-
if (decision === "always") {
|
|
587
|
-
this.alwaysApproved.add(toolCall.name);
|
|
588
|
-
}
|
|
589
|
-
}
|
|
590
|
-
}
|
|
591
|
-
|
|
592
|
-
// Route to MCP or built-in tool
|
|
593
|
-
const mcpParsed = parseMCPToolName(toolCall.name);
|
|
594
|
-
let result: string;
|
|
595
|
-
if (mcpParsed) {
|
|
596
|
-
result = await callMCPTool(mcpParsed.serverName, mcpParsed.toolName, args);
|
|
597
|
-
} else {
|
|
598
|
-
result = await executeTool(toolCall.name, args, this.cwd);
|
|
599
|
-
}
|
|
600
|
-
this.options.onToolResult?.(toolCall.name, result);
|
|
601
|
-
|
|
602
|
-
// Auto-commit after successful write_file
|
|
603
|
-
if (this.gitEnabled && this.autoCommitEnabled && toolCall.name === "write_file" && result.startsWith("✅")) {
|
|
604
|
-
const path = String(args.path ?? "unknown");
|
|
605
|
-
const committed = autoCommit(this.cwd, path, "write");
|
|
606
|
-
if (committed) {
|
|
607
|
-
this.options.onGitCommit?.(`write ${path}`);
|
|
608
|
-
}
|
|
609
|
-
}
|
|
610
|
-
|
|
611
|
-
// Auto-lint after successful write_file
|
|
612
|
-
if (this.autoLintEnabled && this.detectedLinter && toolCall.name === "write_file" && result.startsWith("✅")) {
|
|
613
|
-
const filePath = String(args.path ?? "");
|
|
614
|
-
const lintErrors = runLinter(this.detectedLinter, filePath, this.cwd);
|
|
615
|
-
if (lintErrors) {
|
|
616
|
-
this.options.onLintResult?.(filePath, lintErrors);
|
|
617
|
-
const lintMsg: ChatCompletionMessageParam = {
|
|
618
|
-
role: "tool",
|
|
619
|
-
tool_call_id: toolCall.id,
|
|
620
|
-
content: result + `\n\nLint errors detected in ${filePath}:\n${lintErrors}\nPlease fix these issues.`,
|
|
621
|
-
};
|
|
622
|
-
this.messages.push(lintMsg);
|
|
623
|
-
saveMessage(this.sessionId, lintMsg);
|
|
624
|
-
continue;
|
|
625
|
-
}
|
|
626
|
-
}
|
|
627
|
-
|
|
628
|
-
const toolMsg: ChatCompletionMessageParam = {
|
|
629
|
-
role: "tool",
|
|
630
|
-
tool_call_id: toolCall.id,
|
|
631
|
-
content: result,
|
|
632
|
-
};
|
|
633
|
-
this.messages.push(toolMsg);
|
|
634
|
-
saveMessage(this.sessionId, toolMsg);
|
|
635
|
-
}
|
|
636
|
-
}
|
|
637
|
-
|
|
638
|
-
return "Max iterations reached. The agent may be stuck in a loop.";
|
|
639
|
-
}
|
|
640
|
-
|
|
641
|
-
/**
|
|
642
|
-
* Switch to a different model mid-session
|
|
643
|
-
*/
|
|
644
|
-
switchModel(model: string, baseUrl?: string, apiKey?: string): void {
|
|
645
|
-
this.model = model;
|
|
646
|
-
if (baseUrl || apiKey) {
|
|
647
|
-
this.client = new OpenAI({
|
|
648
|
-
baseURL: baseUrl ?? this.options.provider.baseUrl,
|
|
649
|
-
apiKey: apiKey ?? this.options.provider.apiKey,
|
|
650
|
-
});
|
|
651
|
-
}
|
|
652
|
-
}
|
|
653
|
-
|
|
654
|
-
getModel(): string {
|
|
655
|
-
return this.model;
|
|
656
|
-
}
|
|
657
|
-
|
|
658
|
-
setAutoCommit(enabled: boolean): void {
|
|
659
|
-
this.autoCommitEnabled = enabled;
|
|
660
|
-
}
|
|
661
|
-
|
|
662
|
-
isGitEnabled(): boolean {
|
|
663
|
-
return this.gitEnabled;
|
|
664
|
-
}
|
|
665
|
-
|
|
666
|
-
getContextLength(): number {
|
|
667
|
-
return this.messages.length;
|
|
668
|
-
}
|
|
669
|
-
|
|
670
|
-
/**
|
|
671
|
-
* Estimate token count across all messages (~4 chars per token)
|
|
672
|
-
*/
|
|
673
|
-
estimateTokens(): number {
|
|
674
|
-
let chars = 0;
|
|
675
|
-
for (const msg of this.messages) {
|
|
676
|
-
if (typeof msg.content === "string") {
|
|
677
|
-
chars += msg.content.length;
|
|
678
|
-
} else if (Array.isArray(msg.content)) {
|
|
679
|
-
for (const part of msg.content) {
|
|
680
|
-
if ("text" in part) chars += part.text.length;
|
|
681
|
-
}
|
|
682
|
-
}
|
|
683
|
-
// Count tool call arguments too
|
|
684
|
-
if ("tool_calls" in msg && Array.isArray((msg as any).tool_calls)) {
|
|
685
|
-
for (const tc of (msg as any).tool_calls) {
|
|
686
|
-
chars += (tc.function?.arguments?.length ?? 0);
|
|
687
|
-
chars += (tc.function?.name?.length ?? 0);
|
|
688
|
-
}
|
|
689
|
-
}
|
|
690
|
-
}
|
|
691
|
-
return Math.ceil(chars / 4);
|
|
692
|
-
}
|
|
693
|
-
|
|
694
|
-
/**
|
|
695
|
-
* Check if context needs compression and compress if threshold exceeded
|
|
696
|
-
*/
|
|
697
|
-
private async maybeCompressContext(): Promise<void> {
|
|
698
|
-
const currentTokens = this.estimateTokens();
|
|
699
|
-
if (currentTokens < this.compressionThreshold) return;
|
|
700
|
-
|
|
701
|
-
// Keep: system prompt (index 0) + last 10 messages
|
|
702
|
-
const keepCount = 10;
|
|
703
|
-
if (this.messages.length <= keepCount + 1) return; // Not enough to compress
|
|
704
|
-
|
|
705
|
-
const systemMsg = this.messages[0];
|
|
706
|
-
const middleMessages = this.messages.slice(1, this.messages.length - keepCount);
|
|
707
|
-
const recentMessages = this.messages.slice(this.messages.length - keepCount);
|
|
708
|
-
|
|
709
|
-
if (middleMessages.length === 0) return;
|
|
710
|
-
|
|
711
|
-
// Build a summary of the middle messages
|
|
712
|
-
const summaryParts: string[] = [];
|
|
713
|
-
for (const msg of middleMessages) {
|
|
714
|
-
if (msg.role === "user" && typeof msg.content === "string") {
|
|
715
|
-
summaryParts.push(`User: ${msg.content.slice(0, 200)}`);
|
|
716
|
-
} else if (msg.role === "assistant" && typeof msg.content === "string" && msg.content) {
|
|
717
|
-
summaryParts.push(`Assistant: ${msg.content.slice(0, 200)}`);
|
|
718
|
-
} else if (msg.role === "tool") {
|
|
719
|
-
// Skip tool messages in summary to save tokens
|
|
720
|
-
}
|
|
721
|
-
}
|
|
722
|
-
|
|
723
|
-
// Use the active model to summarize
|
|
724
|
-
const summaryPrompt = `Summarize this conversation history in 2-3 concise paragraphs. Focus on: what was discussed, what files were modified, what decisions were made, and any important context for continuing the conversation.\n\n${summaryParts.join("\n")}`;
|
|
725
|
-
|
|
726
|
-
try {
|
|
727
|
-
let summary: string;
|
|
728
|
-
if (this.providerType === "anthropic" && this.anthropicClient) {
|
|
729
|
-
const response = await this.anthropicClient.messages.create({
|
|
730
|
-
model: this.model,
|
|
731
|
-
max_tokens: 500,
|
|
732
|
-
messages: [{ role: "user", content: summaryPrompt }],
|
|
733
|
-
});
|
|
734
|
-
summary = response.content
|
|
735
|
-
.filter((b): b is Anthropic.TextBlock => b.type === "text")
|
|
736
|
-
.map((b) => b.text)
|
|
737
|
-
.join("");
|
|
738
|
-
} else {
|
|
739
|
-
const response = await this.client.chat.completions.create({
|
|
740
|
-
model: this.model,
|
|
741
|
-
max_tokens: 500,
|
|
742
|
-
messages: [{ role: "user", content: summaryPrompt }],
|
|
743
|
-
});
|
|
744
|
-
summary = response.choices[0]?.message?.content ?? "Previous conversation context.";
|
|
745
|
-
}
|
|
746
|
-
|
|
747
|
-
const compressedMsg: ChatCompletionMessageParam = {
|
|
748
|
-
role: "assistant",
|
|
749
|
-
content: `[Context compressed: ${summary}]`,
|
|
750
|
-
};
|
|
751
|
-
|
|
752
|
-
const oldTokens = currentTokens;
|
|
753
|
-
this.messages = [systemMsg, compressedMsg, ...recentMessages];
|
|
754
|
-
const newTokens = this.estimateTokens();
|
|
755
|
-
|
|
756
|
-
this.options.onContextCompressed?.(oldTokens, newTokens);
|
|
757
|
-
} catch {
|
|
758
|
-
// If summarization fails, just truncate without summary
|
|
759
|
-
const compressedMsg: ChatCompletionMessageParam = {
|
|
760
|
-
role: "assistant",
|
|
761
|
-
content: "[Context compressed: Earlier conversation history was removed to stay within token limits.]",
|
|
762
|
-
};
|
|
763
|
-
const oldTokens = currentTokens;
|
|
764
|
-
this.messages = [systemMsg, compressedMsg, ...recentMessages];
|
|
765
|
-
const newTokens = this.estimateTokens();
|
|
766
|
-
this.options.onContextCompressed?.(oldTokens, newTokens);
|
|
767
|
-
}
|
|
768
|
-
}
|
|
769
|
-
|
|
770
|
-
getCostInfo(): { promptTokens: number; completionTokens: number; totalCost: number } {
|
|
771
|
-
return {
|
|
772
|
-
promptTokens: this.totalPromptTokens,
|
|
773
|
-
completionTokens: this.totalCompletionTokens,
|
|
774
|
-
totalCost: this.totalCost,
|
|
775
|
-
};
|
|
776
|
-
}
|
|
777
|
-
|
|
778
|
-
disableSkill(name: string): void {
|
|
779
|
-
this.sessionDisabledSkills.add(name);
|
|
780
|
-
}
|
|
781
|
-
|
|
782
|
-
enableSkill(name: string): void {
|
|
783
|
-
this.sessionDisabledSkills.delete(name);
|
|
784
|
-
}
|
|
785
|
-
|
|
786
|
-
getSessionDisabledSkills(): Set<string> {
|
|
787
|
-
return this.sessionDisabledSkills;
|
|
788
|
-
}
|
|
789
|
-
|
|
790
|
-
getActiveSkillCount(): number {
|
|
791
|
-
return getActiveSkillCount(this.cwd, this.sessionDisabledSkills);
|
|
792
|
-
}
|
|
793
|
-
|
|
794
|
-
getCwd(): string {
|
|
795
|
-
return this.cwd;
|
|
796
|
-
}
|
|
797
|
-
|
|
798
|
-
getProjectRulesSource(): string | null {
|
|
799
|
-
return this.projectRulesSource;
|
|
800
|
-
}
|
|
801
|
-
|
|
802
|
-
setArchitectModel(model: string | null): void {
|
|
803
|
-
this.architectModel = model;
|
|
804
|
-
}
|
|
805
|
-
|
|
806
|
-
getArchitectModel(): string | null {
|
|
807
|
-
return this.architectModel;
|
|
808
|
-
}
|
|
809
|
-
|
|
810
|
-
setAutoLint(enabled: boolean): void {
|
|
811
|
-
this.autoLintEnabled = enabled;
|
|
812
|
-
}
|
|
813
|
-
|
|
814
|
-
isAutoLintEnabled(): boolean {
|
|
815
|
-
return this.autoLintEnabled;
|
|
816
|
-
}
|
|
817
|
-
|
|
818
|
-
getDetectedLinter(): { command: string; name: string } | null {
|
|
819
|
-
return this.detectedLinter;
|
|
820
|
-
}
|
|
821
|
-
|
|
822
|
-
setDetectedLinter(linter: { command: string; name: string } | null): void {
|
|
823
|
-
this.detectedLinter = linter;
|
|
824
|
-
}
|
|
825
|
-
|
|
826
|
-
/**
|
|
827
|
-
* Run the architect model to generate a plan, then feed to editor model
|
|
828
|
-
*/
|
|
829
|
-
private async architectChat(userMessage: string): Promise<string> {
|
|
830
|
-
const architectSystemPrompt = "You are a senior software architect. Analyze the request and create a detailed implementation plan. List exactly which files to modify, what changes to make, and in what order. Do NOT write code — just plan.";
|
|
831
|
-
|
|
832
|
-
let plan = "";
|
|
833
|
-
|
|
834
|
-
if (this.providerType === "anthropic" && this.anthropicClient) {
|
|
835
|
-
const response = await this.anthropicClient.messages.create({
|
|
836
|
-
model: this.architectModel!,
|
|
837
|
-
max_tokens: this.maxTokens,
|
|
838
|
-
system: architectSystemPrompt,
|
|
839
|
-
messages: [{ role: "user", content: userMessage }],
|
|
840
|
-
});
|
|
841
|
-
plan = response.content
|
|
842
|
-
.filter((b): b is Anthropic.TextBlock => b.type === "text")
|
|
843
|
-
.map((b) => b.text)
|
|
844
|
-
.join("");
|
|
845
|
-
} else {
|
|
846
|
-
const response = await this.client.chat.completions.create({
|
|
847
|
-
model: this.architectModel!,
|
|
848
|
-
max_tokens: this.maxTokens,
|
|
849
|
-
messages: [
|
|
850
|
-
{ role: "system", content: architectSystemPrompt },
|
|
851
|
-
{ role: "user", content: userMessage },
|
|
852
|
-
],
|
|
853
|
-
});
|
|
854
|
-
plan = response.choices[0]?.message?.content ?? "(no plan generated)";
|
|
855
|
-
}
|
|
856
|
-
|
|
857
|
-
this.options.onArchitectPlan?.(plan);
|
|
858
|
-
|
|
859
|
-
// Feed plan + original request to the editor model
|
|
860
|
-
const editorPrompt = `## Architect Plan\n${plan}\n\n## Original Request\n${userMessage}\n\nExecute the plan above. Follow it step by step.`;
|
|
861
|
-
return this.chat(editorPrompt);
|
|
862
|
-
}
|
|
863
|
-
|
|
864
|
-
getMCPServerCount(): number {
|
|
865
|
-
return this.mcpServers.length;
|
|
866
|
-
}
|
|
867
|
-
|
|
868
|
-
getMCPServers(): ConnectedServer[] {
|
|
869
|
-
return this.mcpServers;
|
|
870
|
-
}
|
|
871
|
-
|
|
872
|
-
async disconnectMCP(): Promise<void> {
|
|
873
|
-
await disconnectAll();
|
|
874
|
-
this.mcpServers = [];
|
|
875
|
-
this.tools = FILE_TOOLS;
|
|
876
|
-
}
|
|
877
|
-
|
|
878
|
-
async reconnectMCP(): Promise<void> {
|
|
879
|
-
await this.disconnectMCP();
|
|
880
|
-
const mcpConfig = loadMCPConfig(this.cwd);
|
|
881
|
-
if (Object.keys(mcpConfig.mcpServers).length > 0) {
|
|
882
|
-
this.mcpServers = await connectToServers(mcpConfig, this.options.onMCPStatus);
|
|
883
|
-
if (this.mcpServers.length > 0) {
|
|
884
|
-
const mcpTools = getAllMCPTools(this.mcpServers);
|
|
885
|
-
this.tools = [...FILE_TOOLS, ...mcpTools];
|
|
886
|
-
}
|
|
887
|
-
}
|
|
888
|
-
}
|
|
889
|
-
|
|
890
|
-
reset(): void {
|
|
891
|
-
const systemMsg = this.messages[0];
|
|
892
|
-
this.messages = [systemMsg];
|
|
893
|
-
}
|
|
894
|
-
}
|