acmecode 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.acmecode/config.json +6 -0
- package/README.md +124 -0
- package/dist/agent/index.js +161 -0
- package/dist/cli/bin/acmecode.js +3 -0
- package/dist/cli/package.json +25 -0
- package/dist/cli/src/index.d.ts +1 -0
- package/dist/cli/src/index.js +53 -0
- package/dist/config/index.js +92 -0
- package/dist/context/index.js +30 -0
- package/dist/core/src/agent/index.d.ts +52 -0
- package/dist/core/src/agent/index.js +476 -0
- package/dist/core/src/config/index.d.ts +83 -0
- package/dist/core/src/config/index.js +318 -0
- package/dist/core/src/context/index.d.ts +1 -0
- package/dist/core/src/context/index.js +30 -0
- package/dist/core/src/llm/provider.d.ts +27 -0
- package/dist/core/src/llm/provider.js +202 -0
- package/dist/core/src/llm/vision.d.ts +7 -0
- package/dist/core/src/llm/vision.js +37 -0
- package/dist/core/src/mcp/index.d.ts +10 -0
- package/dist/core/src/mcp/index.js +84 -0
- package/dist/core/src/prompt/anthropic.d.ts +1 -0
- package/dist/core/src/prompt/anthropic.js +32 -0
- package/dist/core/src/prompt/architect.d.ts +1 -0
- package/dist/core/src/prompt/architect.js +17 -0
- package/dist/core/src/prompt/autopilot.d.ts +1 -0
- package/dist/core/src/prompt/autopilot.js +18 -0
- package/dist/core/src/prompt/beast.d.ts +1 -0
- package/dist/core/src/prompt/beast.js +83 -0
- package/dist/core/src/prompt/gemini.d.ts +1 -0
- package/dist/core/src/prompt/gemini.js +45 -0
- package/dist/core/src/prompt/index.d.ts +18 -0
- package/dist/core/src/prompt/index.js +239 -0
- package/dist/core/src/prompt/zen.d.ts +1 -0
- package/dist/core/src/prompt/zen.js +13 -0
- package/dist/core/src/session/index.d.ts +18 -0
- package/dist/core/src/session/index.js +97 -0
- package/dist/core/src/skills/index.d.ts +6 -0
- package/dist/core/src/skills/index.js +72 -0
- package/dist/core/src/tools/batch.d.ts +2 -0
- package/dist/core/src/tools/batch.js +65 -0
- package/dist/core/src/tools/browser.d.ts +7 -0
- package/dist/core/src/tools/browser.js +86 -0
- package/dist/core/src/tools/edit.d.ts +11 -0
- package/dist/core/src/tools/edit.js +312 -0
- package/dist/core/src/tools/index.d.ts +13 -0
- package/dist/core/src/tools/index.js +980 -0
- package/dist/core/src/tools/lsp-client.d.ts +11 -0
- package/dist/core/src/tools/lsp-client.js +224 -0
- package/dist/index.js +41 -0
- package/dist/llm/provider.js +34 -0
- package/dist/mcp/index.js +84 -0
- package/dist/session/index.js +74 -0
- package/dist/skills/index.js +32 -0
- package/dist/tools/index.js +96 -0
- package/dist/tui/App.js +297 -0
- package/dist/tui/Spinner.js +16 -0
- package/dist/tui/TextInput.js +98 -0
- package/dist/tui/src/App.d.ts +11 -0
- package/dist/tui/src/App.js +1211 -0
- package/dist/tui/src/CatLogo.d.ts +10 -0
- package/dist/tui/src/CatLogo.js +99 -0
- package/dist/tui/src/OptionList.d.ts +15 -0
- package/dist/tui/src/OptionList.js +60 -0
- package/dist/tui/src/Spinner.d.ts +7 -0
- package/dist/tui/src/Spinner.js +18 -0
- package/dist/tui/src/TextInput.d.ts +28 -0
- package/dist/tui/src/TextInput.js +139 -0
- package/dist/tui/src/Tips.d.ts +2 -0
- package/dist/tui/src/Tips.js +62 -0
- package/dist/tui/src/Toast.d.ts +19 -0
- package/dist/tui/src/Toast.js +39 -0
- package/dist/tui/src/TodoItem.d.ts +7 -0
- package/dist/tui/src/TodoItem.js +21 -0
- package/dist/tui/src/i18n.d.ts +172 -0
- package/dist/tui/src/i18n.js +189 -0
- package/dist/tui/src/markdown.d.ts +6 -0
- package/dist/tui/src/markdown.js +356 -0
- package/dist/tui/src/theme.d.ts +31 -0
- package/dist/tui/src/theme.js +239 -0
- package/output.txt +0 -0
- package/package.json +44 -0
- package/packages/cli/package.json +25 -0
- package/packages/cli/src/index.ts +59 -0
- package/packages/cli/tsconfig.json +26 -0
- package/packages/core/package.json +39 -0
- package/packages/core/src/agent/index.ts +588 -0
- package/packages/core/src/config/index.ts +383 -0
- package/packages/core/src/context/index.ts +34 -0
- package/packages/core/src/llm/provider.ts +237 -0
- package/packages/core/src/llm/vision.ts +43 -0
- package/packages/core/src/mcp/index.ts +110 -0
- package/packages/core/src/prompt/anthropic.ts +32 -0
- package/packages/core/src/prompt/architect.ts +17 -0
- package/packages/core/src/prompt/autopilot.ts +18 -0
- package/packages/core/src/prompt/beast.ts +83 -0
- package/packages/core/src/prompt/gemini.ts +45 -0
- package/packages/core/src/prompt/index.ts +267 -0
- package/packages/core/src/prompt/zen.ts +13 -0
- package/packages/core/src/session/index.ts +129 -0
- package/packages/core/src/skills/index.ts +86 -0
- package/packages/core/src/tools/batch.ts +73 -0
- package/packages/core/src/tools/browser.ts +95 -0
- package/packages/core/src/tools/edit.ts +317 -0
- package/packages/core/src/tools/index.ts +1112 -0
- package/packages/core/src/tools/lsp-client.ts +303 -0
- package/packages/core/tsconfig.json +19 -0
- package/packages/tui/package.json +24 -0
- package/packages/tui/src/App.tsx +1702 -0
- package/packages/tui/src/CatLogo.tsx +134 -0
- package/packages/tui/src/OptionList.tsx +95 -0
- package/packages/tui/src/Spinner.tsx +28 -0
- package/packages/tui/src/TextInput.tsx +202 -0
- package/packages/tui/src/Tips.tsx +64 -0
- package/packages/tui/src/Toast.tsx +60 -0
- package/packages/tui/src/TodoItem.tsx +29 -0
- package/packages/tui/src/i18n.ts +203 -0
- package/packages/tui/src/markdown.ts +403 -0
- package/packages/tui/src/theme.ts +287 -0
- package/packages/tui/tsconfig.json +24 -0
- package/tsconfig.json +18 -0
- package/vscode-acmecode/.vscodeignore +11 -0
- package/vscode-acmecode/README.md +57 -0
- package/vscode-acmecode/esbuild.js +46 -0
- package/vscode-acmecode/images/button-dark.svg +5 -0
- package/vscode-acmecode/images/button-light.svg +5 -0
- package/vscode-acmecode/images/icon.png +1 -0
- package/vscode-acmecode/package-lock.json +490 -0
- package/vscode-acmecode/package.json +87 -0
- package/vscode-acmecode/src/extension.ts +128 -0
- package/vscode-acmecode/tsconfig.json +16 -0
|
@@ -0,0 +1,588 @@
|
|
|
1
|
+
import { streamText, tool as createTool } from "ai";
|
|
2
|
+
import { existsSync, readFileSync, promises as fs } from "fs";
|
|
3
|
+
import { resolve } from "path";
|
|
4
|
+
import { getModel, ProviderType } from "../llm/provider.js";
|
|
5
|
+
import { builtInTools, toolExecutors, isCommandSafe } from "../tools/index.js";
|
|
6
|
+
import { getMcpTools } from "../mcp/index.js";
|
|
7
|
+
import { getSystemPrompt } from "../prompt/index.js";
|
|
8
|
+
import { ReasoningLevel, AgentMode } from "../config/index.js";
|
|
9
|
+
|
|
10
|
+
// ── Structured event types ──
|
|
11
|
+
export type AgentEvent =
|
|
12
|
+
| { type: "text"; text: string }
|
|
13
|
+
| {
|
|
14
|
+
type: "tool-call";
|
|
15
|
+
name: string;
|
|
16
|
+
args: Record<string, unknown>;
|
|
17
|
+
toolCallId?: string;
|
|
18
|
+
}
|
|
19
|
+
| {
|
|
20
|
+
type: "tool-call-delta";
|
|
21
|
+
name: string;
|
|
22
|
+
args: Record<string, unknown>;
|
|
23
|
+
partial?: boolean;
|
|
24
|
+
toolCallId?: string;
|
|
25
|
+
}
|
|
26
|
+
| { type: "tool-result"; name: string; result: string; toolCallId?: string }
|
|
27
|
+
| { type: "tool-generating"; name: string; args: Record<string, unknown> }
|
|
28
|
+
| {
|
|
29
|
+
type: "tool-approval-required";
|
|
30
|
+
name: string;
|
|
31
|
+
args: Record<string, unknown>;
|
|
32
|
+
riskLevel?: string;
|
|
33
|
+
isLocalGuard?: boolean;
|
|
34
|
+
}
|
|
35
|
+
| { type: "messages"; messages: any[]; promptLength?: number }
|
|
36
|
+
| { type: "step"; step: number; maxSteps: number }
|
|
37
|
+
| { type: "finish"; usage: any }
|
|
38
|
+
| { type: "mode-changed"; mode: AgentMode; planFile?: string };
|
|
39
|
+
|
|
40
|
+
// ── Configuration ──
|
|
41
|
+
const MAX_STEPS = 80;
|
|
42
|
+
const MAX_OUTPUT_LENGTH = 30000;
|
|
43
|
+
const MAX_RETRIES = 2;
|
|
44
|
+
|
|
45
|
+
// ── Model-specific output token limits ──
|
|
46
|
+
function getMaxOutputTokens(
|
|
47
|
+
modelName: string,
|
|
48
|
+
level: ReasoningLevel,
|
|
49
|
+
): number | undefined {
|
|
50
|
+
const lower = modelName.toLowerCase();
|
|
51
|
+
const isReasoningModel =
|
|
52
|
+
lower.includes("o1") || lower.includes("o3") || lower.includes("o4");
|
|
53
|
+
|
|
54
|
+
if (isReasoningModel) {
|
|
55
|
+
if (level === "max" || level === "xhigh") return 100000;
|
|
56
|
+
if (level === "high") return 64000;
|
|
57
|
+
return 32000;
|
|
58
|
+
}
|
|
59
|
+
|
|
60
|
+
if (lower.includes("4-6") || lower.includes("4.6")) {
|
|
61
|
+
// User specified Opus 4.6 (relay variant) supports 16000
|
|
62
|
+
return 16000;
|
|
63
|
+
}
|
|
64
|
+
if (lower.includes("claude-3-5") || lower.includes("sonnet")) {
|
|
65
|
+
return 8192;
|
|
66
|
+
}
|
|
67
|
+
if (lower.includes("claude")) {
|
|
68
|
+
// Standard Opus and other Claude models usually have a 4096 output limit.
|
|
69
|
+
return 4096;
|
|
70
|
+
}
|
|
71
|
+
if (lower.includes("gpt-4")) return 16384;
|
|
72
|
+
if (lower.includes("gemini")) return 65536;
|
|
73
|
+
if (lower.includes("deepseek")) return 16384;
|
|
74
|
+
return 4096; // Safe default for most models
|
|
75
|
+
}
|
|
76
|
+
|
|
77
|
+
// ── Reasoning Attempt Mapping ──
|
|
78
|
+
function getReasoningEffort(
|
|
79
|
+
level: ReasoningLevel,
|
|
80
|
+
): "low" | "medium" | "high" | undefined {
|
|
81
|
+
switch (level) {
|
|
82
|
+
case "low":
|
|
83
|
+
return "low";
|
|
84
|
+
case "medium":
|
|
85
|
+
return "medium";
|
|
86
|
+
case "high":
|
|
87
|
+
return "high";
|
|
88
|
+
case "max":
|
|
89
|
+
return "high";
|
|
90
|
+
case "xhigh":
|
|
91
|
+
return "high";
|
|
92
|
+
default:
|
|
93
|
+
return undefined;
|
|
94
|
+
}
|
|
95
|
+
}
|
|
96
|
+
|
|
97
|
+
// ── Temperature defaults ──
|
|
98
|
+
function getDefaultTemperature(modelName: string): number | undefined {
|
|
99
|
+
const lower = modelName.toLowerCase();
|
|
100
|
+
// Reasoning models and Claude don't support temperature or it's better to let provider decide
|
|
101
|
+
if (lower.includes("o1") || lower.includes("o3") || lower.includes("o4"))
|
|
102
|
+
return undefined;
|
|
103
|
+
if (lower.includes("claude")) return undefined;
|
|
104
|
+
if (lower.includes("deepseek-r1") || lower.includes("deepseek-reasoner"))
|
|
105
|
+
return undefined;
|
|
106
|
+
return 0;
|
|
107
|
+
}
|
|
108
|
+
|
|
109
|
+
/**
|
|
110
|
+
* Run the AI agent with streaming, multi-step tool calls, and structured events.
|
|
111
|
+
* Inspired by opencode's LLM.stream() pattern.
|
|
112
|
+
*/
|
|
113
|
+
export async function* runAgent(
|
|
114
|
+
provider: ProviderType,
|
|
115
|
+
modelName: string,
|
|
116
|
+
messages: any[],
|
|
117
|
+
systemPrompt?: string,
|
|
118
|
+
abortSignal?: AbortSignal,
|
|
119
|
+
reasoningLevel: ReasoningLevel = "medium",
|
|
120
|
+
agentMode: AgentMode = "agent",
|
|
121
|
+
activePlanFile?: string,
|
|
122
|
+
activeSkillContent?: string,
|
|
123
|
+
): AsyncGenerator<AgentEvent, any[], unknown> {
|
|
124
|
+
const model = getModel(provider, modelName);
|
|
125
|
+
const mcpTools = await getMcpTools();
|
|
126
|
+
|
|
127
|
+
let currentMessages = [...messages];
|
|
128
|
+
let stepCount = 0;
|
|
129
|
+
const globalCalledSet = new Set<string>();
|
|
130
|
+
|
|
131
|
+
const temperature = getDefaultTemperature(modelName);
|
|
132
|
+
const maxOutputTokens = getMaxOutputTokens(modelName, reasoningLevel);
|
|
133
|
+
const reasoningEffort = getReasoningEffort(reasoningLevel);
|
|
134
|
+
|
|
135
|
+
let currentMode = agentMode;
|
|
136
|
+
let currentPlanFile = activePlanFile;
|
|
137
|
+
|
|
138
|
+
// We keep system dynamic so it updates properly if the agent switches mode mid-loop.
|
|
139
|
+
let system =
|
|
140
|
+
systemPrompt ||
|
|
141
|
+
(await getSystemPrompt(
|
|
142
|
+
provider,
|
|
143
|
+
modelName,
|
|
144
|
+
currentMode,
|
|
145
|
+
currentPlanFile,
|
|
146
|
+
activeSkillContent,
|
|
147
|
+
));
|
|
148
|
+
yield {
|
|
149
|
+
type: "messages",
|
|
150
|
+
messages: [...messages],
|
|
151
|
+
promptLength: system.length,
|
|
152
|
+
};
|
|
153
|
+
|
|
154
|
+
let consecutiveErrors = 0;
|
|
155
|
+
|
|
156
|
+
while (stepCount < MAX_STEPS) {
|
|
157
|
+
stepCount++;
|
|
158
|
+
yield { type: "step", step: stepCount, maxSteps: MAX_STEPS };
|
|
159
|
+
|
|
160
|
+
const currentTools: Record<string, any> = {
|
|
161
|
+
...builtInTools,
|
|
162
|
+
...mcpTools,
|
|
163
|
+
};
|
|
164
|
+
|
|
165
|
+
if (currentMode === "plan") {
|
|
166
|
+
delete currentTools["run_command"];
|
|
167
|
+
delete currentTools["edit_file"];
|
|
168
|
+
delete currentTools["lsp"];
|
|
169
|
+
delete currentTools["grep_search"];
|
|
170
|
+
}
|
|
171
|
+
|
|
172
|
+
let stepText = "";
|
|
173
|
+
const collectedToolCalls: {
|
|
174
|
+
name: string;
|
|
175
|
+
args: Record<string, unknown>;
|
|
176
|
+
}[] = [];
|
|
177
|
+
const collectedToolResults: { name: string; result: string }[] = [];
|
|
178
|
+
let modeDidSwitch = false;
|
|
179
|
+
|
|
180
|
+
// ── Message cleaning (especially for Anthropic) ──
|
|
181
|
+
const purifiedMessages = currentMessages
|
|
182
|
+
.map((msg) => {
|
|
183
|
+
if (typeof msg.content === "string") return msg;
|
|
184
|
+
if (!Array.isArray(msg.content)) return msg;
|
|
185
|
+
|
|
186
|
+
return {
|
|
187
|
+
...msg,
|
|
188
|
+
content: msg.content.filter((part: any) => {
|
|
189
|
+
if (part.type === "text")
|
|
190
|
+
return part.text && part.text.trim().length > 0;
|
|
191
|
+
if (part.type === "tool-call" || part.type === "tool-result") {
|
|
192
|
+
// Normalize tool call IDs to alphanumeric + underscores
|
|
193
|
+
if (part.toolCallId) {
|
|
194
|
+
part.toolCallId = part.toolCallId.replace(
|
|
195
|
+
/[^a-zA-Z0-9_-]/g,
|
|
196
|
+
"_",
|
|
197
|
+
);
|
|
198
|
+
}
|
|
199
|
+
}
|
|
200
|
+
return true;
|
|
201
|
+
}),
|
|
202
|
+
};
|
|
203
|
+
})
|
|
204
|
+
.filter((msg) => {
|
|
205
|
+
if (!msg.content) return false;
|
|
206
|
+
if (Array.isArray(msg.content) && msg.content.length === 0)
|
|
207
|
+
return false;
|
|
208
|
+
return true;
|
|
209
|
+
});
|
|
210
|
+
|
|
211
|
+
try {
|
|
212
|
+
const result = await streamText({
|
|
213
|
+
model,
|
|
214
|
+
messages: purifiedMessages,
|
|
215
|
+
system,
|
|
216
|
+
tools: currentTools,
|
|
217
|
+
maxSteps: 1,
|
|
218
|
+
maxRetries: MAX_RETRIES,
|
|
219
|
+
temperature,
|
|
220
|
+
maxOutputTokens,
|
|
221
|
+
abortSignal,
|
|
222
|
+
experimental_toolCallStreaming: true,
|
|
223
|
+
providerOptions: {
|
|
224
|
+
openai: reasoningEffort ? { reasoningEffort } : undefined,
|
|
225
|
+
anthropic:
|
|
226
|
+
provider === "anthropic" &&
|
|
227
|
+
(modelName.includes("sonnet") || modelName.includes("opus"))
|
|
228
|
+
? {
|
|
229
|
+
thinking: {
|
|
230
|
+
type: "enabled",
|
|
231
|
+
budgetTokens: Math.min(maxOutputTokens || 4096, 16000),
|
|
232
|
+
},
|
|
233
|
+
}
|
|
234
|
+
: undefined,
|
|
235
|
+
google:
|
|
236
|
+
provider === "google" && modelName.includes("thinking")
|
|
237
|
+
? {
|
|
238
|
+
thinkingConfig: { includeThoughts: true },
|
|
239
|
+
}
|
|
240
|
+
: undefined,
|
|
241
|
+
},
|
|
242
|
+
|
|
243
|
+
// ── Tool name repair (from opencode) ──
|
|
244
|
+
async experimental_repairToolCall(failed: any) {
|
|
245
|
+
const lower = failed.toolCall.toolName.toLowerCase();
|
|
246
|
+
if (lower !== failed.toolCall.toolName && currentTools[lower]) {
|
|
247
|
+
return { ...failed.toolCall, toolName: lower };
|
|
248
|
+
}
|
|
249
|
+
return {
|
|
250
|
+
...failed.toolCall,
|
|
251
|
+
input: JSON.stringify({
|
|
252
|
+
tool: failed.toolCall.toolName,
|
|
253
|
+
error: failed.error?.message || "Unknown tool",
|
|
254
|
+
}),
|
|
255
|
+
toolName: "invalid",
|
|
256
|
+
};
|
|
257
|
+
},
|
|
258
|
+
} as any);
|
|
259
|
+
|
|
260
|
+
let lastFinishReason: string | undefined;
|
|
261
|
+
const yieldedToolCalls = new Set<string>();
|
|
262
|
+
|
|
263
|
+
for await (const chunk of result.fullStream) {
|
|
264
|
+
if (abortSignal?.aborted) {
|
|
265
|
+
yield { type: "text", text: "\n[Aborted by user]\n" };
|
|
266
|
+
yield { type: "messages", messages: currentMessages };
|
|
267
|
+
return [];
|
|
268
|
+
}
|
|
269
|
+
|
|
270
|
+
if (chunk.type === "text-delta") {
|
|
271
|
+
const text = (chunk as any).textDelta || (chunk as any).text || "";
|
|
272
|
+
if (text) {
|
|
273
|
+
stepText += text;
|
|
274
|
+
if (stepText.length > MAX_OUTPUT_LENGTH) {
|
|
275
|
+
yield {
|
|
276
|
+
type: "text",
|
|
277
|
+
text: "\n[Error: Output exceeded maximum length]\n",
|
|
278
|
+
};
|
|
279
|
+
break;
|
|
280
|
+
}
|
|
281
|
+
yield { type: "text", text };
|
|
282
|
+
}
|
|
283
|
+
} else if (chunk.type === "tool-call") {
|
|
284
|
+
const name = (chunk as any).toolName || "";
|
|
285
|
+
const rawArgs = (chunk as any).input || (chunk as any).args;
|
|
286
|
+
const args =
|
|
287
|
+
typeof rawArgs?.toObject === "function"
|
|
288
|
+
? rawArgs.toObject()
|
|
289
|
+
: rawArgs || {};
|
|
290
|
+
const id = (chunk as any).toolCallId;
|
|
291
|
+
if (name && id && !yieldedToolCalls.has(id)) {
|
|
292
|
+
yieldedToolCalls.add(id);
|
|
293
|
+
collectedToolCalls.push({ name, args });
|
|
294
|
+
yield { type: "tool-call", name, args, toolCallId: id };
|
|
295
|
+
}
|
|
296
|
+
} else if ((chunk as any).type === "tool-call-delta") {
|
|
297
|
+
const name = (chunk as any).toolName || "";
|
|
298
|
+
const rawArgs = (chunk as any).input || (chunk as any).args;
|
|
299
|
+
const args =
|
|
300
|
+
typeof rawArgs?.toObject === "function"
|
|
301
|
+
? rawArgs.toObject()
|
|
302
|
+
: rawArgs || {};
|
|
303
|
+
const id = (chunk as any).toolCallId;
|
|
304
|
+
if (name && id) {
|
|
305
|
+
yield {
|
|
306
|
+
type: "tool-call-delta",
|
|
307
|
+
name,
|
|
308
|
+
args,
|
|
309
|
+
partial: !!(chunk as any).partial,
|
|
310
|
+
};
|
|
311
|
+
}
|
|
312
|
+
} else if (chunk.type === "tool-result") {
|
|
313
|
+
const name = (chunk as any).toolName || "";
|
|
314
|
+
const toolCallId = (chunk as any).toolCallId;
|
|
315
|
+
const rawResult =
|
|
316
|
+
(chunk as any).result ?? (chunk as any).output ?? "";
|
|
317
|
+
const resultStr =
|
|
318
|
+
typeof rawResult === "string"
|
|
319
|
+
? rawResult
|
|
320
|
+
: JSON.stringify(rawResult);
|
|
321
|
+
collectedToolResults.push({ name, result: resultStr });
|
|
322
|
+
yield { type: "tool-result", name, result: resultStr, toolCallId };
|
|
323
|
+
|
|
324
|
+
if (name === "switch_mode") {
|
|
325
|
+
try {
|
|
326
|
+
const parsed =
|
|
327
|
+
typeof rawResult === "object"
|
|
328
|
+
? rawResult
|
|
329
|
+
: JSON.parse(resultStr);
|
|
330
|
+
if (parsed.switched_mode) {
|
|
331
|
+
currentMode = parsed.switched_mode;
|
|
332
|
+
currentPlanFile = parsed.contextFile;
|
|
333
|
+
modeDidSwitch = true;
|
|
334
|
+
yield {
|
|
335
|
+
type: "mode-changed",
|
|
336
|
+
mode: currentMode as AgentMode,
|
|
337
|
+
planFile: currentPlanFile,
|
|
338
|
+
};
|
|
339
|
+
}
|
|
340
|
+
} catch {}
|
|
341
|
+
}
|
|
342
|
+
} else if (chunk.type === "finish") {
|
|
343
|
+
lastFinishReason = (chunk as any).finishReason || "unknown";
|
|
344
|
+
}
|
|
345
|
+
}
|
|
346
|
+
|
|
347
|
+
// Proactive checks for silent stream drops (graceful socket close but incomplete thought)
|
|
348
|
+
const isAbruptDrop =
|
|
349
|
+
!lastFinishReason ||
|
|
350
|
+
["unknown", "error", "length"].includes(lastFinishReason);
|
|
351
|
+
if (isAbruptDrop && collectedToolCalls.length === 0) {
|
|
352
|
+
// Ignore abrupt drops if it's obvious a tool was meant to be used but we couldn't parse it?
|
|
353
|
+
// Actually, if we couldn't parse it, we want the LLM to resume and finish the syntax.
|
|
354
|
+
throw new Error(
|
|
355
|
+
`Stream terminated prematurely with finish_reason: ${lastFinishReason || "No finish chunk received"}`,
|
|
356
|
+
);
|
|
357
|
+
}
|
|
358
|
+
|
|
359
|
+
consecutiveErrors = 0; // Reset upon successful stream completion
|
|
360
|
+
} catch (err: any) {
|
|
361
|
+
if (err.name === "AbortError" || abortSignal?.aborted) {
|
|
362
|
+
yield { type: "text", text: "\n[Aborted by user]\n" };
|
|
363
|
+
yield { type: "messages", messages: currentMessages };
|
|
364
|
+
return [];
|
|
365
|
+
} else {
|
|
366
|
+
yield {
|
|
367
|
+
type: "text",
|
|
368
|
+
text: `\n[Error: ${err.message}]\n[Auto-recovering from stream drop...]\n`,
|
|
369
|
+
};
|
|
370
|
+
|
|
371
|
+
if (stepText.trim()) {
|
|
372
|
+
currentMessages.push({ role: "assistant", content: stepText });
|
|
373
|
+
}
|
|
374
|
+
|
|
375
|
+
consecutiveErrors++;
|
|
376
|
+
if (consecutiveErrors >= 3) {
|
|
377
|
+
yield {
|
|
378
|
+
type: "text",
|
|
379
|
+
text: `\n[Fatal Error: Stream dropped ${consecutiveErrors} times consecutively. Halting agent to prevent infinite loops.]\n`,
|
|
380
|
+
};
|
|
381
|
+
yield { type: "messages", messages: currentMessages };
|
|
382
|
+
return [];
|
|
383
|
+
}
|
|
384
|
+
|
|
385
|
+
currentMessages.push({
|
|
386
|
+
role: "user",
|
|
387
|
+
content: `[System Error: The network stream disconnected prematurely with error: ${err.message || err}. Please carefully analyze the incomplete text you just generated, and continue your thought or tool execution precisely from where it was cut off without repeating yourself.]`,
|
|
388
|
+
});
|
|
389
|
+
|
|
390
|
+
await new Promise((resolve) => setTimeout(resolve, 2000));
|
|
391
|
+
continue;
|
|
392
|
+
}
|
|
393
|
+
}
|
|
394
|
+
|
|
395
|
+
if (collectedToolCalls.length === 0) {
|
|
396
|
+
const parseText = stepText
|
|
397
|
+
.replace(/<think>[\s\S]*?<\/think>/g, "")
|
|
398
|
+
.trim();
|
|
399
|
+
|
|
400
|
+
for (const [toolName, toolFn] of Object.entries(toolExecutors)) {
|
|
401
|
+
const patterns = [
|
|
402
|
+
new RegExp(`<${toolName}>([\\s\\S]*?)<\\/${toolName}>`, "g"),
|
|
403
|
+
new RegExp(`\\[${toolName}\\(([^\\[\\]]*)\\)\\]`, "g"),
|
|
404
|
+
];
|
|
405
|
+
|
|
406
|
+
for (const regex of patterns) {
|
|
407
|
+
regex.lastIndex = 0;
|
|
408
|
+
let match;
|
|
409
|
+
while ((match = regex.exec(parseText)) !== null) {
|
|
410
|
+
const innerText = match[1].trim();
|
|
411
|
+
const args: Record<string, string> = {};
|
|
412
|
+
|
|
413
|
+
const argRegex = /<(\w+)>([\s\S]*?)<\/\1>/g;
|
|
414
|
+
let argMatch;
|
|
415
|
+
let foundInnerArgs = false;
|
|
416
|
+
while ((argMatch = argRegex.exec(innerText)) !== null) {
|
|
417
|
+
args[argMatch[1]] = argMatch[2].trim();
|
|
418
|
+
foundInnerArgs = true;
|
|
419
|
+
}
|
|
420
|
+
|
|
421
|
+
if (!foundInnerArgs && innerText) {
|
|
422
|
+
if (toolName === "list_dir" || toolName === "read_file") {
|
|
423
|
+
args.path = innerText.replace(/^["']|["']$/g, "").trim();
|
|
424
|
+
} else if (toolName === "run_command") {
|
|
425
|
+
args.command = innerText.replace(/^["']|["']$/g, "").trim();
|
|
426
|
+
if (!args.riskLevel) {
|
|
427
|
+
// Default to high if model didn't self-assess in fallback tag
|
|
428
|
+
args.riskLevel = "high";
|
|
429
|
+
}
|
|
430
|
+
}
|
|
431
|
+
}
|
|
432
|
+
|
|
433
|
+
const firstArgValue = Object.values(args)[0] as string | undefined;
|
|
434
|
+
if (!firstArgValue) continue;
|
|
435
|
+
|
|
436
|
+
const dedupKey = `${toolName}:${JSON.stringify(args)}`;
|
|
437
|
+
if (globalCalledSet.has(dedupKey)) continue;
|
|
438
|
+
globalCalledSet.add(dedupKey);
|
|
439
|
+
|
|
440
|
+
collectedToolCalls.push({ name: toolName, args });
|
|
441
|
+
|
|
442
|
+
// Safety Check for run_command
|
|
443
|
+
if (toolName === "run_command") {
|
|
444
|
+
const localDanger = !isCommandSafe(args.command);
|
|
445
|
+
const modelRisk = args.riskLevel as string;
|
|
446
|
+
const needsApproval =
|
|
447
|
+
localDanger || modelRisk === "medium" || modelRisk === "high";
|
|
448
|
+
|
|
449
|
+
if (needsApproval) {
|
|
450
|
+
const approved = yield {
|
|
451
|
+
type: "tool-approval-required",
|
|
452
|
+
name: toolName,
|
|
453
|
+
args,
|
|
454
|
+
riskLevel: modelRisk,
|
|
455
|
+
isLocalGuard: localDanger,
|
|
456
|
+
};
|
|
457
|
+
if (!approved) {
|
|
458
|
+
const resultStr =
|
|
459
|
+
"Error: User denied execution of dangerous command.";
|
|
460
|
+
collectedToolResults.push({
|
|
461
|
+
name: toolName,
|
|
462
|
+
result: resultStr,
|
|
463
|
+
});
|
|
464
|
+
yield { type: "tool-call", name: toolName, args };
|
|
465
|
+
yield {
|
|
466
|
+
type: "tool-result",
|
|
467
|
+
name: toolName,
|
|
468
|
+
result: resultStr,
|
|
469
|
+
};
|
|
470
|
+
continue;
|
|
471
|
+
}
|
|
472
|
+
}
|
|
473
|
+
}
|
|
474
|
+
|
|
475
|
+
yield { type: "tool-call", name: toolName, args };
|
|
476
|
+
yield { type: "tool-generating", name: toolName, args };
|
|
477
|
+
|
|
478
|
+
try {
|
|
479
|
+
const result = await toolFn(args as any);
|
|
480
|
+
const resultStr =
|
|
481
|
+
typeof result === "string" ? result : JSON.stringify(result);
|
|
482
|
+
collectedToolResults.push({ name: toolName, result: resultStr });
|
|
483
|
+
yield { type: "tool-result", name: toolName, result: resultStr };
|
|
484
|
+
|
|
485
|
+
if (toolName === "switch_mode") {
|
|
486
|
+
try {
|
|
487
|
+
const parsed =
|
|
488
|
+
typeof result === "object" ? result : JSON.parse(resultStr);
|
|
489
|
+
if (parsed.switched_mode) {
|
|
490
|
+
currentMode = parsed.switched_mode;
|
|
491
|
+
currentPlanFile = parsed.contextFile;
|
|
492
|
+
modeDidSwitch = true;
|
|
493
|
+
yield {
|
|
494
|
+
type: "mode-changed",
|
|
495
|
+
mode: currentMode as AgentMode,
|
|
496
|
+
planFile: currentPlanFile,
|
|
497
|
+
};
|
|
498
|
+
}
|
|
499
|
+
} catch {}
|
|
500
|
+
}
|
|
501
|
+
} catch (err: any) {
|
|
502
|
+
const resultStr = `Error: ${err.message}`;
|
|
503
|
+
collectedToolResults.push({ name: toolName, result: resultStr });
|
|
504
|
+
yield { type: "tool-call", name: toolName, args }; // Emitted after error for history mapping
|
|
505
|
+
yield { type: "tool-result", name: toolName, result: resultStr };
|
|
506
|
+
}
|
|
507
|
+
}
|
|
508
|
+
}
|
|
509
|
+
}
|
|
510
|
+
}
|
|
511
|
+
|
|
512
|
+
if (collectedToolCalls.length === 0) {
|
|
513
|
+
if (!stepText.trim()) {
|
|
514
|
+
consecutiveErrors++;
|
|
515
|
+
if (consecutiveErrors < 3) {
|
|
516
|
+
yield {
|
|
517
|
+
type: "text",
|
|
518
|
+
text: `\n[System Notice: The model returned an empty response. Auto-retrying (${consecutiveErrors}/3)...]\n`,
|
|
519
|
+
};
|
|
520
|
+
currentMessages.push({
|
|
521
|
+
role: "user",
|
|
522
|
+
content: `[System] Your previous response was empty. Please continue your task or provide a status update.`,
|
|
523
|
+
});
|
|
524
|
+
await new Promise((resolve) => setTimeout(resolve, 1000));
|
|
525
|
+
continue;
|
|
526
|
+
} else {
|
|
527
|
+
yield {
|
|
528
|
+
type: "text",
|
|
529
|
+
text: `\n[Fatal Error: Model returned empty response 3 times consecutively. Halting.]\n`,
|
|
530
|
+
};
|
|
531
|
+
currentMessages.push({ role: "assistant", content: stepText });
|
|
532
|
+
yield { type: "messages", messages: currentMessages };
|
|
533
|
+
return [];
|
|
534
|
+
}
|
|
535
|
+
}
|
|
536
|
+
|
|
537
|
+
currentMessages.push({ role: "assistant", content: stepText });
|
|
538
|
+
yield { type: "messages", messages: currentMessages };
|
|
539
|
+
return [];
|
|
540
|
+
}
|
|
541
|
+
|
|
542
|
+
yield { type: "text", text: "\n" };
|
|
543
|
+
|
|
544
|
+
const toolSummary = collectedToolResults
|
|
545
|
+
.map((tr, i) => {
|
|
546
|
+
const tc = collectedToolCalls[i];
|
|
547
|
+
const argsStr = tc
|
|
548
|
+
? Object.entries(tc.args)
|
|
549
|
+
.filter(([k]) => k !== "content")
|
|
550
|
+
.map(([k, v]) => `${k}=${JSON.stringify(v)}`)
|
|
551
|
+
.join(", ")
|
|
552
|
+
: "";
|
|
553
|
+
const resultBody =
|
|
554
|
+
tr.result.length > 10000
|
|
555
|
+
? tr.result.slice(0, 10000) + "\n... (truncated)"
|
|
556
|
+
: tr.result;
|
|
557
|
+
return `### Tool: ${tr.name}(${argsStr})\n\`\`\`\n${resultBody}\n\`\`\``;
|
|
558
|
+
})
|
|
559
|
+
.join("\n\n");
|
|
560
|
+
|
|
561
|
+
currentMessages.push({ role: "assistant", content: stepText });
|
|
562
|
+
currentMessages.push({
|
|
563
|
+
role: "user",
|
|
564
|
+
content: `[SYSTEM] Tool results:\n\n${toolSummary}\n\nContinue with your task. Do NOT re-call the same tools.`,
|
|
565
|
+
});
|
|
566
|
+
|
|
567
|
+
// Recompute system prompt immediately if mode changed so the next loop has the right persona/length
|
|
568
|
+
if (modeDidSwitch && !systemPrompt) {
|
|
569
|
+
system = await getSystemPrompt(
|
|
570
|
+
provider,
|
|
571
|
+
modelName,
|
|
572
|
+
currentMode as AgentMode,
|
|
573
|
+
currentPlanFile,
|
|
574
|
+
activeSkillContent,
|
|
575
|
+
);
|
|
576
|
+
}
|
|
577
|
+
|
|
578
|
+
yield {
|
|
579
|
+
type: "messages",
|
|
580
|
+
messages: currentMessages,
|
|
581
|
+
promptLength: system.length,
|
|
582
|
+
};
|
|
583
|
+
}
|
|
584
|
+
|
|
585
|
+
yield { type: "text", text: "\n[Reached maximum steps]\n" };
|
|
586
|
+
yield { type: "messages", messages: currentMessages };
|
|
587
|
+
return [];
|
|
588
|
+
}
|