@acmecloud/core 1.0.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/agent/index.d.ts +52 -0
- package/dist/agent/index.js +476 -0
- package/dist/config/index.d.ts +83 -0
- package/dist/config/index.js +318 -0
- package/dist/context/index.d.ts +1 -0
- package/dist/context/index.js +30 -0
- package/dist/llm/provider.d.ts +27 -0
- package/dist/llm/provider.js +202 -0
- package/dist/llm/vision.d.ts +7 -0
- package/dist/llm/vision.js +37 -0
- package/dist/mcp/index.d.ts +10 -0
- package/dist/mcp/index.js +84 -0
- package/dist/prompt/anthropic.d.ts +1 -0
- package/dist/prompt/anthropic.js +32 -0
- package/dist/prompt/architect.d.ts +1 -0
- package/dist/prompt/architect.js +17 -0
- package/dist/prompt/autopilot.d.ts +1 -0
- package/dist/prompt/autopilot.js +18 -0
- package/dist/prompt/beast.d.ts +1 -0
- package/dist/prompt/beast.js +83 -0
- package/dist/prompt/gemini.d.ts +1 -0
- package/dist/prompt/gemini.js +45 -0
- package/dist/prompt/index.d.ts +18 -0
- package/dist/prompt/index.js +239 -0
- package/dist/prompt/zen.d.ts +1 -0
- package/dist/prompt/zen.js +13 -0
- package/dist/session/index.d.ts +18 -0
- package/dist/session/index.js +97 -0
- package/dist/skills/index.d.ts +6 -0
- package/dist/skills/index.js +72 -0
- package/dist/tools/batch.d.ts +2 -0
- package/dist/tools/batch.js +65 -0
- package/dist/tools/browser.d.ts +7 -0
- package/dist/tools/browser.js +86 -0
- package/dist/tools/edit.d.ts +11 -0
- package/dist/tools/edit.js +312 -0
- package/dist/tools/index.d.ts +13 -0
- package/dist/tools/index.js +980 -0
- package/dist/tools/lsp-client.d.ts +11 -0
- package/dist/tools/lsp-client.js +224 -0
- package/package.json +42 -0
- package/src/agent/index.ts +588 -0
- package/src/config/index.ts +383 -0
- package/src/context/index.ts +34 -0
- package/src/llm/provider.ts +237 -0
- package/src/llm/vision.ts +43 -0
- package/src/mcp/index.ts +110 -0
- package/src/prompt/anthropic.ts +32 -0
- package/src/prompt/architect.ts +17 -0
- package/src/prompt/autopilot.ts +18 -0
- package/src/prompt/beast.ts +83 -0
- package/src/prompt/gemini.ts +45 -0
- package/src/prompt/index.ts +267 -0
- package/src/prompt/zen.ts +13 -0
- package/src/session/index.ts +129 -0
- package/src/skills/index.ts +86 -0
- package/src/tools/batch.ts +73 -0
- package/src/tools/browser.ts +95 -0
- package/src/tools/edit.ts +317 -0
- package/src/tools/index.ts +1112 -0
- package/src/tools/lsp-client.ts +303 -0
- package/tsconfig.json +19 -0
|
@@ -0,0 +1,52 @@
|
|
|
1
|
+
import { ProviderType } from "../llm/provider.js";
|
|
2
|
+
import { ReasoningLevel, AgentMode } from "../config/index.js";
|
|
3
|
+
export type AgentEvent = {
|
|
4
|
+
type: "text";
|
|
5
|
+
text: string;
|
|
6
|
+
} | {
|
|
7
|
+
type: "tool-call";
|
|
8
|
+
name: string;
|
|
9
|
+
args: Record<string, unknown>;
|
|
10
|
+
toolCallId?: string;
|
|
11
|
+
} | {
|
|
12
|
+
type: "tool-call-delta";
|
|
13
|
+
name: string;
|
|
14
|
+
args: Record<string, unknown>;
|
|
15
|
+
partial?: boolean;
|
|
16
|
+
toolCallId?: string;
|
|
17
|
+
} | {
|
|
18
|
+
type: "tool-result";
|
|
19
|
+
name: string;
|
|
20
|
+
result: string;
|
|
21
|
+
toolCallId?: string;
|
|
22
|
+
} | {
|
|
23
|
+
type: "tool-generating";
|
|
24
|
+
name: string;
|
|
25
|
+
args: Record<string, unknown>;
|
|
26
|
+
} | {
|
|
27
|
+
type: "tool-approval-required";
|
|
28
|
+
name: string;
|
|
29
|
+
args: Record<string, unknown>;
|
|
30
|
+
riskLevel?: string;
|
|
31
|
+
isLocalGuard?: boolean;
|
|
32
|
+
} | {
|
|
33
|
+
type: "messages";
|
|
34
|
+
messages: any[];
|
|
35
|
+
promptLength?: number;
|
|
36
|
+
} | {
|
|
37
|
+
type: "step";
|
|
38
|
+
step: number;
|
|
39
|
+
maxSteps: number;
|
|
40
|
+
} | {
|
|
41
|
+
type: "finish";
|
|
42
|
+
usage: any;
|
|
43
|
+
} | {
|
|
44
|
+
type: "mode-changed";
|
|
45
|
+
mode: AgentMode;
|
|
46
|
+
planFile?: string;
|
|
47
|
+
};
|
|
48
|
+
/**
|
|
49
|
+
* Run the AI agent with streaming, multi-step tool calls, and structured events.
|
|
50
|
+
* Inspired by opencode's LLM.stream() pattern.
|
|
51
|
+
*/
|
|
52
|
+
export declare function runAgent(provider: ProviderType, modelName: string, messages: any[], systemPrompt?: string, abortSignal?: AbortSignal, reasoningLevel?: ReasoningLevel, agentMode?: AgentMode, activePlanFile?: string, activeSkillContent?: string): AsyncGenerator<AgentEvent, any[], unknown>;
|
|
@@ -0,0 +1,476 @@
|
|
|
1
|
+
import { streamText } from "ai";
|
|
2
|
+
import { getModel } from "../llm/provider.js";
|
|
3
|
+
import { builtInTools, toolExecutors, isCommandSafe } from "../tools/index.js";
|
|
4
|
+
import { getMcpTools } from "../mcp/index.js";
|
|
5
|
+
import { getSystemPrompt } from "../prompt/index.js";
|
|
6
|
+
// ── Configuration ──
|
|
7
|
+
const MAX_STEPS = 80;
|
|
8
|
+
const MAX_OUTPUT_LENGTH = 30000;
|
|
9
|
+
const MAX_RETRIES = 2;
|
|
10
|
+
// ── Model-specific output token limits ──
|
|
11
|
+
function getMaxOutputTokens(modelName, level) {
|
|
12
|
+
const lower = modelName.toLowerCase();
|
|
13
|
+
const isReasoningModel = lower.includes("o1") || lower.includes("o3") || lower.includes("o4");
|
|
14
|
+
if (isReasoningModel) {
|
|
15
|
+
if (level === "max" || level === "xhigh")
|
|
16
|
+
return 100000;
|
|
17
|
+
if (level === "high")
|
|
18
|
+
return 64000;
|
|
19
|
+
return 32000;
|
|
20
|
+
}
|
|
21
|
+
if (lower.includes("4-6") || lower.includes("4.6")) {
|
|
22
|
+
// User specified Opus 4.6 (relay variant) supports 16000
|
|
23
|
+
return 16000;
|
|
24
|
+
}
|
|
25
|
+
if (lower.includes("claude-3-5") || lower.includes("sonnet")) {
|
|
26
|
+
return 8192;
|
|
27
|
+
}
|
|
28
|
+
if (lower.includes("claude")) {
|
|
29
|
+
// Standard Opus and other Claude models usually have a 4096 output limit.
|
|
30
|
+
return 4096;
|
|
31
|
+
}
|
|
32
|
+
if (lower.includes("gpt-4"))
|
|
33
|
+
return 16384;
|
|
34
|
+
if (lower.includes("gemini"))
|
|
35
|
+
return 65536;
|
|
36
|
+
if (lower.includes("deepseek"))
|
|
37
|
+
return 16384;
|
|
38
|
+
return 4096; // Safe default for most models
|
|
39
|
+
}
|
|
40
|
+
// ── Reasoning Attempt Mapping ──
|
|
41
|
+
function getReasoningEffort(level) {
|
|
42
|
+
switch (level) {
|
|
43
|
+
case "low":
|
|
44
|
+
return "low";
|
|
45
|
+
case "medium":
|
|
46
|
+
return "medium";
|
|
47
|
+
case "high":
|
|
48
|
+
return "high";
|
|
49
|
+
case "max":
|
|
50
|
+
return "high";
|
|
51
|
+
case "xhigh":
|
|
52
|
+
return "high";
|
|
53
|
+
default:
|
|
54
|
+
return undefined;
|
|
55
|
+
}
|
|
56
|
+
}
|
|
57
|
+
// ── Temperature defaults ──
|
|
58
|
+
function getDefaultTemperature(modelName) {
|
|
59
|
+
const lower = modelName.toLowerCase();
|
|
60
|
+
// Reasoning models and Claude don't support temperature or it's better to let provider decide
|
|
61
|
+
if (lower.includes("o1") || lower.includes("o3") || lower.includes("o4"))
|
|
62
|
+
return undefined;
|
|
63
|
+
if (lower.includes("claude"))
|
|
64
|
+
return undefined;
|
|
65
|
+
if (lower.includes("deepseek-r1") || lower.includes("deepseek-reasoner"))
|
|
66
|
+
return undefined;
|
|
67
|
+
return 0;
|
|
68
|
+
}
|
|
69
|
+
/**
|
|
70
|
+
* Run the AI agent with streaming, multi-step tool calls, and structured events.
|
|
71
|
+
* Inspired by opencode's LLM.stream() pattern.
|
|
72
|
+
*/
|
|
73
|
+
export async function* runAgent(provider, modelName, messages, systemPrompt, abortSignal, reasoningLevel = "medium", agentMode = "agent", activePlanFile, activeSkillContent) {
|
|
74
|
+
const model = getModel(provider, modelName);
|
|
75
|
+
const mcpTools = await getMcpTools();
|
|
76
|
+
let currentMessages = [...messages];
|
|
77
|
+
let stepCount = 0;
|
|
78
|
+
const globalCalledSet = new Set();
|
|
79
|
+
const temperature = getDefaultTemperature(modelName);
|
|
80
|
+
const maxOutputTokens = getMaxOutputTokens(modelName, reasoningLevel);
|
|
81
|
+
const reasoningEffort = getReasoningEffort(reasoningLevel);
|
|
82
|
+
let currentMode = agentMode;
|
|
83
|
+
let currentPlanFile = activePlanFile;
|
|
84
|
+
// We keep system dynamic so it updates properly if the agent switches mode mid-loop.
|
|
85
|
+
let system = systemPrompt ||
|
|
86
|
+
(await getSystemPrompt(provider, modelName, currentMode, currentPlanFile, activeSkillContent));
|
|
87
|
+
yield {
|
|
88
|
+
type: "messages",
|
|
89
|
+
messages: [...messages],
|
|
90
|
+
promptLength: system.length,
|
|
91
|
+
};
|
|
92
|
+
let consecutiveErrors = 0;
|
|
93
|
+
while (stepCount < MAX_STEPS) {
|
|
94
|
+
stepCount++;
|
|
95
|
+
yield { type: "step", step: stepCount, maxSteps: MAX_STEPS };
|
|
96
|
+
const currentTools = {
|
|
97
|
+
...builtInTools,
|
|
98
|
+
...mcpTools,
|
|
99
|
+
};
|
|
100
|
+
if (currentMode === "plan") {
|
|
101
|
+
delete currentTools["run_command"];
|
|
102
|
+
delete currentTools["edit_file"];
|
|
103
|
+
delete currentTools["lsp"];
|
|
104
|
+
delete currentTools["grep_search"];
|
|
105
|
+
}
|
|
106
|
+
let stepText = "";
|
|
107
|
+
const collectedToolCalls = [];
|
|
108
|
+
const collectedToolResults = [];
|
|
109
|
+
let modeDidSwitch = false;
|
|
110
|
+
// ── Message cleaning (especially for Anthropic) ──
|
|
111
|
+
const purifiedMessages = currentMessages
|
|
112
|
+
.map((msg) => {
|
|
113
|
+
if (typeof msg.content === "string")
|
|
114
|
+
return msg;
|
|
115
|
+
if (!Array.isArray(msg.content))
|
|
116
|
+
return msg;
|
|
117
|
+
return {
|
|
118
|
+
...msg,
|
|
119
|
+
content: msg.content.filter((part) => {
|
|
120
|
+
if (part.type === "text")
|
|
121
|
+
return part.text && part.text.trim().length > 0;
|
|
122
|
+
if (part.type === "tool-call" || part.type === "tool-result") {
|
|
123
|
+
// Normalize tool call IDs to alphanumeric + underscores
|
|
124
|
+
if (part.toolCallId) {
|
|
125
|
+
part.toolCallId = part.toolCallId.replace(/[^a-zA-Z0-9_-]/g, "_");
|
|
126
|
+
}
|
|
127
|
+
}
|
|
128
|
+
return true;
|
|
129
|
+
}),
|
|
130
|
+
};
|
|
131
|
+
})
|
|
132
|
+
.filter((msg) => {
|
|
133
|
+
if (!msg.content)
|
|
134
|
+
return false;
|
|
135
|
+
if (Array.isArray(msg.content) && msg.content.length === 0)
|
|
136
|
+
return false;
|
|
137
|
+
return true;
|
|
138
|
+
});
|
|
139
|
+
try {
|
|
140
|
+
const result = await streamText({
|
|
141
|
+
model,
|
|
142
|
+
messages: purifiedMessages,
|
|
143
|
+
system,
|
|
144
|
+
tools: currentTools,
|
|
145
|
+
maxSteps: 1,
|
|
146
|
+
maxRetries: MAX_RETRIES,
|
|
147
|
+
temperature,
|
|
148
|
+
maxOutputTokens,
|
|
149
|
+
abortSignal,
|
|
150
|
+
experimental_toolCallStreaming: true,
|
|
151
|
+
providerOptions: {
|
|
152
|
+
openai: reasoningEffort ? { reasoningEffort } : undefined,
|
|
153
|
+
anthropic: provider === "anthropic" &&
|
|
154
|
+
(modelName.includes("sonnet") || modelName.includes("opus"))
|
|
155
|
+
? {
|
|
156
|
+
thinking: {
|
|
157
|
+
type: "enabled",
|
|
158
|
+
budgetTokens: Math.min(maxOutputTokens || 4096, 16000),
|
|
159
|
+
},
|
|
160
|
+
}
|
|
161
|
+
: undefined,
|
|
162
|
+
google: provider === "google" && modelName.includes("thinking")
|
|
163
|
+
? {
|
|
164
|
+
thinkingConfig: { includeThoughts: true },
|
|
165
|
+
}
|
|
166
|
+
: undefined,
|
|
167
|
+
},
|
|
168
|
+
// ── Tool name repair (from opencode) ──
|
|
169
|
+
async experimental_repairToolCall(failed) {
|
|
170
|
+
const lower = failed.toolCall.toolName.toLowerCase();
|
|
171
|
+
if (lower !== failed.toolCall.toolName && currentTools[lower]) {
|
|
172
|
+
return { ...failed.toolCall, toolName: lower };
|
|
173
|
+
}
|
|
174
|
+
return {
|
|
175
|
+
...failed.toolCall,
|
|
176
|
+
input: JSON.stringify({
|
|
177
|
+
tool: failed.toolCall.toolName,
|
|
178
|
+
error: failed.error?.message || "Unknown tool",
|
|
179
|
+
}),
|
|
180
|
+
toolName: "invalid",
|
|
181
|
+
};
|
|
182
|
+
},
|
|
183
|
+
});
|
|
184
|
+
let lastFinishReason;
|
|
185
|
+
const yieldedToolCalls = new Set();
|
|
186
|
+
for await (const chunk of result.fullStream) {
|
|
187
|
+
if (abortSignal?.aborted) {
|
|
188
|
+
yield { type: "text", text: "\n[Aborted by user]\n" };
|
|
189
|
+
yield { type: "messages", messages: currentMessages };
|
|
190
|
+
return [];
|
|
191
|
+
}
|
|
192
|
+
if (chunk.type === "text-delta") {
|
|
193
|
+
const text = chunk.textDelta || chunk.text || "";
|
|
194
|
+
if (text) {
|
|
195
|
+
stepText += text;
|
|
196
|
+
if (stepText.length > MAX_OUTPUT_LENGTH) {
|
|
197
|
+
yield {
|
|
198
|
+
type: "text",
|
|
199
|
+
text: "\n[Error: Output exceeded maximum length]\n",
|
|
200
|
+
};
|
|
201
|
+
break;
|
|
202
|
+
}
|
|
203
|
+
yield { type: "text", text };
|
|
204
|
+
}
|
|
205
|
+
}
|
|
206
|
+
else if (chunk.type === "tool-call") {
|
|
207
|
+
const name = chunk.toolName || "";
|
|
208
|
+
const rawArgs = chunk.input || chunk.args;
|
|
209
|
+
const args = typeof rawArgs?.toObject === "function"
|
|
210
|
+
? rawArgs.toObject()
|
|
211
|
+
: rawArgs || {};
|
|
212
|
+
const id = chunk.toolCallId;
|
|
213
|
+
if (name && id && !yieldedToolCalls.has(id)) {
|
|
214
|
+
yieldedToolCalls.add(id);
|
|
215
|
+
collectedToolCalls.push({ name, args });
|
|
216
|
+
yield { type: "tool-call", name, args, toolCallId: id };
|
|
217
|
+
}
|
|
218
|
+
}
|
|
219
|
+
else if (chunk.type === "tool-call-delta") {
|
|
220
|
+
const name = chunk.toolName || "";
|
|
221
|
+
const rawArgs = chunk.input || chunk.args;
|
|
222
|
+
const args = typeof rawArgs?.toObject === "function"
|
|
223
|
+
? rawArgs.toObject()
|
|
224
|
+
: rawArgs || {};
|
|
225
|
+
const id = chunk.toolCallId;
|
|
226
|
+
if (name && id) {
|
|
227
|
+
yield {
|
|
228
|
+
type: "tool-call-delta",
|
|
229
|
+
name,
|
|
230
|
+
args,
|
|
231
|
+
partial: !!chunk.partial,
|
|
232
|
+
};
|
|
233
|
+
}
|
|
234
|
+
}
|
|
235
|
+
else if (chunk.type === "tool-result") {
|
|
236
|
+
const name = chunk.toolName || "";
|
|
237
|
+
const toolCallId = chunk.toolCallId;
|
|
238
|
+
const rawResult = chunk.result ?? chunk.output ?? "";
|
|
239
|
+
const resultStr = typeof rawResult === "string"
|
|
240
|
+
? rawResult
|
|
241
|
+
: JSON.stringify(rawResult);
|
|
242
|
+
collectedToolResults.push({ name, result: resultStr });
|
|
243
|
+
yield { type: "tool-result", name, result: resultStr, toolCallId };
|
|
244
|
+
if (name === "switch_mode") {
|
|
245
|
+
try {
|
|
246
|
+
const parsed = typeof rawResult === "object"
|
|
247
|
+
? rawResult
|
|
248
|
+
: JSON.parse(resultStr);
|
|
249
|
+
if (parsed.switched_mode) {
|
|
250
|
+
currentMode = parsed.switched_mode;
|
|
251
|
+
currentPlanFile = parsed.contextFile;
|
|
252
|
+
modeDidSwitch = true;
|
|
253
|
+
yield {
|
|
254
|
+
type: "mode-changed",
|
|
255
|
+
mode: currentMode,
|
|
256
|
+
planFile: currentPlanFile,
|
|
257
|
+
};
|
|
258
|
+
}
|
|
259
|
+
}
|
|
260
|
+
catch { }
|
|
261
|
+
}
|
|
262
|
+
}
|
|
263
|
+
else if (chunk.type === "finish") {
|
|
264
|
+
lastFinishReason = chunk.finishReason || "unknown";
|
|
265
|
+
}
|
|
266
|
+
}
|
|
267
|
+
// Proactive checks for silent stream drops (graceful socket close but incomplete thought)
|
|
268
|
+
const isAbruptDrop = !lastFinishReason ||
|
|
269
|
+
["unknown", "error", "length"].includes(lastFinishReason);
|
|
270
|
+
if (isAbruptDrop && collectedToolCalls.length === 0) {
|
|
271
|
+
// Ignore abrupt drops if it's obvious a tool was meant to be used but we couldn't parse it?
|
|
272
|
+
// Actually, if we couldn't parse it, we want the LLM to resume and finish the syntax.
|
|
273
|
+
throw new Error(`Stream terminated prematurely with finish_reason: ${lastFinishReason || "No finish chunk received"}`);
|
|
274
|
+
}
|
|
275
|
+
consecutiveErrors = 0; // Reset upon successful stream completion
|
|
276
|
+
}
|
|
277
|
+
catch (err) {
|
|
278
|
+
if (err.name === "AbortError" || abortSignal?.aborted) {
|
|
279
|
+
yield { type: "text", text: "\n[Aborted by user]\n" };
|
|
280
|
+
yield { type: "messages", messages: currentMessages };
|
|
281
|
+
return [];
|
|
282
|
+
}
|
|
283
|
+
else {
|
|
284
|
+
yield {
|
|
285
|
+
type: "text",
|
|
286
|
+
text: `\n[Error: ${err.message}]\n[Auto-recovering from stream drop...]\n`,
|
|
287
|
+
};
|
|
288
|
+
if (stepText.trim()) {
|
|
289
|
+
currentMessages.push({ role: "assistant", content: stepText });
|
|
290
|
+
}
|
|
291
|
+
consecutiveErrors++;
|
|
292
|
+
if (consecutiveErrors >= 3) {
|
|
293
|
+
yield {
|
|
294
|
+
type: "text",
|
|
295
|
+
text: `\n[Fatal Error: Stream dropped ${consecutiveErrors} times consecutively. Halting agent to prevent infinite loops.]\n`,
|
|
296
|
+
};
|
|
297
|
+
yield { type: "messages", messages: currentMessages };
|
|
298
|
+
return [];
|
|
299
|
+
}
|
|
300
|
+
currentMessages.push({
|
|
301
|
+
role: "user",
|
|
302
|
+
content: `[System Error: The network stream disconnected prematurely with error: ${err.message || err}. Please carefully analyze the incomplete text you just generated, and continue your thought or tool execution precisely from where it was cut off without repeating yourself.]`,
|
|
303
|
+
});
|
|
304
|
+
await new Promise((resolve) => setTimeout(resolve, 2000));
|
|
305
|
+
continue;
|
|
306
|
+
}
|
|
307
|
+
}
|
|
308
|
+
if (collectedToolCalls.length === 0) {
|
|
309
|
+
const parseText = stepText
|
|
310
|
+
.replace(/<think>[\s\S]*?<\/think>/g, "")
|
|
311
|
+
.trim();
|
|
312
|
+
for (const [toolName, toolFn] of Object.entries(toolExecutors)) {
|
|
313
|
+
const patterns = [
|
|
314
|
+
new RegExp(`<${toolName}>([\\s\\S]*?)<\\/${toolName}>`, "g"),
|
|
315
|
+
new RegExp(`\\[${toolName}\\(([^\\[\\]]*)\\)\\]`, "g"),
|
|
316
|
+
];
|
|
317
|
+
for (const regex of patterns) {
|
|
318
|
+
regex.lastIndex = 0;
|
|
319
|
+
let match;
|
|
320
|
+
while ((match = regex.exec(parseText)) !== null) {
|
|
321
|
+
const innerText = match[1].trim();
|
|
322
|
+
const args = {};
|
|
323
|
+
const argRegex = /<(\w+)>([\s\S]*?)<\/\1>/g;
|
|
324
|
+
let argMatch;
|
|
325
|
+
let foundInnerArgs = false;
|
|
326
|
+
while ((argMatch = argRegex.exec(innerText)) !== null) {
|
|
327
|
+
args[argMatch[1]] = argMatch[2].trim();
|
|
328
|
+
foundInnerArgs = true;
|
|
329
|
+
}
|
|
330
|
+
if (!foundInnerArgs && innerText) {
|
|
331
|
+
if (toolName === "list_dir" || toolName === "read_file") {
|
|
332
|
+
args.path = innerText.replace(/^["']|["']$/g, "").trim();
|
|
333
|
+
}
|
|
334
|
+
else if (toolName === "run_command") {
|
|
335
|
+
args.command = innerText.replace(/^["']|["']$/g, "").trim();
|
|
336
|
+
if (!args.riskLevel) {
|
|
337
|
+
// Default to high if model didn't self-assess in fallback tag
|
|
338
|
+
args.riskLevel = "high";
|
|
339
|
+
}
|
|
340
|
+
}
|
|
341
|
+
}
|
|
342
|
+
const firstArgValue = Object.values(args)[0];
|
|
343
|
+
if (!firstArgValue)
|
|
344
|
+
continue;
|
|
345
|
+
const dedupKey = `${toolName}:${JSON.stringify(args)}`;
|
|
346
|
+
if (globalCalledSet.has(dedupKey))
|
|
347
|
+
continue;
|
|
348
|
+
globalCalledSet.add(dedupKey);
|
|
349
|
+
collectedToolCalls.push({ name: toolName, args });
|
|
350
|
+
// Safety Check for run_command
|
|
351
|
+
if (toolName === "run_command") {
|
|
352
|
+
const localDanger = !isCommandSafe(args.command);
|
|
353
|
+
const modelRisk = args.riskLevel;
|
|
354
|
+
const needsApproval = localDanger || modelRisk === "medium" || modelRisk === "high";
|
|
355
|
+
if (needsApproval) {
|
|
356
|
+
const approved = yield {
|
|
357
|
+
type: "tool-approval-required",
|
|
358
|
+
name: toolName,
|
|
359
|
+
args,
|
|
360
|
+
riskLevel: modelRisk,
|
|
361
|
+
isLocalGuard: localDanger,
|
|
362
|
+
};
|
|
363
|
+
if (!approved) {
|
|
364
|
+
const resultStr = "Error: User denied execution of dangerous command.";
|
|
365
|
+
collectedToolResults.push({
|
|
366
|
+
name: toolName,
|
|
367
|
+
result: resultStr,
|
|
368
|
+
});
|
|
369
|
+
yield { type: "tool-call", name: toolName, args };
|
|
370
|
+
yield {
|
|
371
|
+
type: "tool-result",
|
|
372
|
+
name: toolName,
|
|
373
|
+
result: resultStr,
|
|
374
|
+
};
|
|
375
|
+
continue;
|
|
376
|
+
}
|
|
377
|
+
}
|
|
378
|
+
}
|
|
379
|
+
yield { type: "tool-call", name: toolName, args };
|
|
380
|
+
yield { type: "tool-generating", name: toolName, args };
|
|
381
|
+
try {
|
|
382
|
+
const result = await toolFn(args);
|
|
383
|
+
const resultStr = typeof result === "string" ? result : JSON.stringify(result);
|
|
384
|
+
collectedToolResults.push({ name: toolName, result: resultStr });
|
|
385
|
+
yield { type: "tool-result", name: toolName, result: resultStr };
|
|
386
|
+
if (toolName === "switch_mode") {
|
|
387
|
+
try {
|
|
388
|
+
const parsed = typeof result === "object" ? result : JSON.parse(resultStr);
|
|
389
|
+
if (parsed.switched_mode) {
|
|
390
|
+
currentMode = parsed.switched_mode;
|
|
391
|
+
currentPlanFile = parsed.contextFile;
|
|
392
|
+
modeDidSwitch = true;
|
|
393
|
+
yield {
|
|
394
|
+
type: "mode-changed",
|
|
395
|
+
mode: currentMode,
|
|
396
|
+
planFile: currentPlanFile,
|
|
397
|
+
};
|
|
398
|
+
}
|
|
399
|
+
}
|
|
400
|
+
catch { }
|
|
401
|
+
}
|
|
402
|
+
}
|
|
403
|
+
catch (err) {
|
|
404
|
+
const resultStr = `Error: ${err.message}`;
|
|
405
|
+
collectedToolResults.push({ name: toolName, result: resultStr });
|
|
406
|
+
yield { type: "tool-call", name: toolName, args }; // Emitted after error for history mapping
|
|
407
|
+
yield { type: "tool-result", name: toolName, result: resultStr };
|
|
408
|
+
}
|
|
409
|
+
}
|
|
410
|
+
}
|
|
411
|
+
}
|
|
412
|
+
}
|
|
413
|
+
if (collectedToolCalls.length === 0) {
|
|
414
|
+
if (!stepText.trim()) {
|
|
415
|
+
consecutiveErrors++;
|
|
416
|
+
if (consecutiveErrors < 3) {
|
|
417
|
+
yield {
|
|
418
|
+
type: "text",
|
|
419
|
+
text: `\n[System Notice: The model returned an empty response. Auto-retrying (${consecutiveErrors}/3)...]\n`,
|
|
420
|
+
};
|
|
421
|
+
currentMessages.push({
|
|
422
|
+
role: "user",
|
|
423
|
+
content: `[System] Your previous response was empty. Please continue your task or provide a status update.`,
|
|
424
|
+
});
|
|
425
|
+
await new Promise((resolve) => setTimeout(resolve, 1000));
|
|
426
|
+
continue;
|
|
427
|
+
}
|
|
428
|
+
else {
|
|
429
|
+
yield {
|
|
430
|
+
type: "text",
|
|
431
|
+
text: `\n[Fatal Error: Model returned empty response 3 times consecutively. Halting.]\n`,
|
|
432
|
+
};
|
|
433
|
+
currentMessages.push({ role: "assistant", content: stepText });
|
|
434
|
+
yield { type: "messages", messages: currentMessages };
|
|
435
|
+
return [];
|
|
436
|
+
}
|
|
437
|
+
}
|
|
438
|
+
currentMessages.push({ role: "assistant", content: stepText });
|
|
439
|
+
yield { type: "messages", messages: currentMessages };
|
|
440
|
+
return [];
|
|
441
|
+
}
|
|
442
|
+
yield { type: "text", text: "\n" };
|
|
443
|
+
const toolSummary = collectedToolResults
|
|
444
|
+
.map((tr, i) => {
|
|
445
|
+
const tc = collectedToolCalls[i];
|
|
446
|
+
const argsStr = tc
|
|
447
|
+
? Object.entries(tc.args)
|
|
448
|
+
.filter(([k]) => k !== "content")
|
|
449
|
+
.map(([k, v]) => `${k}=${JSON.stringify(v)}`)
|
|
450
|
+
.join(", ")
|
|
451
|
+
: "";
|
|
452
|
+
const resultBody = tr.result.length > 10000
|
|
453
|
+
? tr.result.slice(0, 10000) + "\n... (truncated)"
|
|
454
|
+
: tr.result;
|
|
455
|
+
return `### Tool: ${tr.name}(${argsStr})\n\`\`\`\n${resultBody}\n\`\`\``;
|
|
456
|
+
})
|
|
457
|
+
.join("\n\n");
|
|
458
|
+
currentMessages.push({ role: "assistant", content: stepText });
|
|
459
|
+
currentMessages.push({
|
|
460
|
+
role: "user",
|
|
461
|
+
content: `[SYSTEM] Tool results:\n\n${toolSummary}\n\nContinue with your task. Do NOT re-call the same tools.`,
|
|
462
|
+
});
|
|
463
|
+
// Recompute system prompt immediately if mode changed so the next loop has the right persona/length
|
|
464
|
+
if (modeDidSwitch && !systemPrompt) {
|
|
465
|
+
system = await getSystemPrompt(provider, modelName, currentMode, currentPlanFile, activeSkillContent);
|
|
466
|
+
}
|
|
467
|
+
yield {
|
|
468
|
+
type: "messages",
|
|
469
|
+
messages: currentMessages,
|
|
470
|
+
promptLength: system.length,
|
|
471
|
+
};
|
|
472
|
+
}
|
|
473
|
+
yield { type: "text", text: "\n[Reached maximum steps]\n" };
|
|
474
|
+
yield { type: "messages", messages: currentMessages };
|
|
475
|
+
return [];
|
|
476
|
+
}
|
|
@@ -0,0 +1,83 @@
|
|
|
1
|
+
export interface ModelConfig {
|
|
2
|
+
provider: string;
|
|
3
|
+
model: string;
|
|
4
|
+
visionProvider?: string;
|
|
5
|
+
visionModel?: string;
|
|
6
|
+
}
|
|
7
|
+
export interface ProviderSettings {
|
|
8
|
+
apiKey: string;
|
|
9
|
+
baseUrl?: string;
|
|
10
|
+
isCustom?: boolean;
|
|
11
|
+
protocol?: string;
|
|
12
|
+
}
|
|
13
|
+
export type AgentMode = "plan" | "code" | "agent" | "zen";
|
|
14
|
+
export type ReasoningLevel = "low" | "medium" | "high" | "max" | "xhigh";
|
|
15
|
+
/**
|
|
16
|
+
* Load model config with priority: project .acmecode/config.json > global ~/.acmecode/config.json > defaults
|
|
17
|
+
*/
|
|
18
|
+
export declare function loadModelConfig(): ModelConfig;
|
|
19
|
+
/**
|
|
20
|
+
* Load language config with priority: project .acmecode/config.json > global ~/.acmecode/config.json > default "en"
|
|
21
|
+
*/
|
|
22
|
+
export declare function loadLangConfig(): string;
|
|
23
|
+
/**
|
|
24
|
+
* Load theme config with priority: project .acmecode/config.json > global ~/.acmecode/config.json > default "dark"
|
|
25
|
+
*/
|
|
26
|
+
export declare function loadThemeConfig(): string;
|
|
27
|
+
/**
|
|
28
|
+
* Load reasoning level config with priority: project .acmecode/config.json > global ~/.acmecode/config.json > default "medium"
|
|
29
|
+
*/
|
|
30
|
+
export declare function loadReasoningLevel(): ReasoningLevel;
|
|
31
|
+
/**
|
|
32
|
+
* Load agent mode config with priority: project .acmecode/config.json > default "agent"
|
|
33
|
+
*/
|
|
34
|
+
export declare function loadAgentModeConfig(): {
|
|
35
|
+
mode: AgentMode;
|
|
36
|
+
planFile?: string;
|
|
37
|
+
};
|
|
38
|
+
/**
|
|
39
|
+
* Save model config to the project's .acmecode/config.json
|
|
40
|
+
*/
|
|
41
|
+
export declare function saveProjectModelConfig(provider: string, model: string, visionProvider?: string, visionModel?: string): void;
|
|
42
|
+
/**
|
|
43
|
+
* Save agent mode config to the project's .acmecode/config.json
|
|
44
|
+
*/
|
|
45
|
+
export declare function saveAgentModeConfig(mode: AgentMode, planFile?: string): void;
|
|
46
|
+
/**
|
|
47
|
+
* Save model config to the global ~/.acmecode/config.json
|
|
48
|
+
*/
|
|
49
|
+
export declare function saveGlobalModelConfig(provider: string, model: string): void;
|
|
50
|
+
/**
|
|
51
|
+
* Save language config to the global ~/.acmecode/config.json
|
|
52
|
+
*/
|
|
53
|
+
export declare function saveGlobalLangConfig(lang: string): void;
|
|
54
|
+
/**
|
|
55
|
+
* Save theme config to the global ~/.acmecode/config.json
|
|
56
|
+
*/
|
|
57
|
+
export declare function saveGlobalThemeConfig(theme: string): void;
|
|
58
|
+
/**
|
|
59
|
+
* Save reasoning level config to the global ~/.acmecode/config.json
|
|
60
|
+
*/
|
|
61
|
+
export declare function saveGlobalReasoningLevel(level: ReasoningLevel): void;
|
|
62
|
+
/**
|
|
63
|
+
* Save provider-specific settings to ~/.acmecode/config.json
|
|
64
|
+
*/
|
|
65
|
+
export declare function saveProviderConfig(provider: string, config: ProviderSettings): void;
|
|
66
|
+
/**
|
|
67
|
+
* Load all custom providers from global config
|
|
68
|
+
*/
|
|
69
|
+
export declare function loadCustomProviders(): Record<string, ProviderSettings>;
|
|
70
|
+
export declare const getProviderKey: (provider: string) => any;
|
|
71
|
+
/**
|
|
72
|
+
* Normalizes a base URL by removing trailing slashes and ensuring consistency.
|
|
73
|
+
*/
|
|
74
|
+
export declare const normalizeBaseUrl: (url: string | undefined) => string | undefined;
|
|
75
|
+
/**
|
|
76
|
+
* Intelligently adapts a base URL for a given protocol if version segments are missing.
|
|
77
|
+
*/
|
|
78
|
+
export declare const getAdaptedBaseUrl: (protocol: string, baseUrl: string | undefined) => string | undefined;
|
|
79
|
+
export declare const getProviderBaseUrl: (provider: string) => string | undefined;
|
|
80
|
+
/**
|
|
81
|
+
* Get the protocol for a provider (e.g. 'openai', 'anthropic', 'google')
|
|
82
|
+
*/
|
|
83
|
+
export declare const getProviderProtocol: (provider: string) => string;
|