lcagent-cli 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md ADDED
@@ -0,0 +1,166 @@
1
+ # lcagent-cli
2
+
3
+ 一个从当前仓库架构中抽出来的最小独立 Agent CLI 项目。
4
+
5
+ ## 当前能力
6
+
7
+ - `lcagent -p "..."`:单次非交互执行
8
+ - `lcagent chat`:交互式聊天
9
+ - `lcagent tools`:查看当前工具
10
+ - `lcagent provider [name]`:查看或修改模型 provider
11
+ - `lcagent model [name]`:查看或修改默认模型
12
+ - `lcagent doctor`:检查当前模型配置和接口连通性
13
+ - `lcagent config show`
14
+ - `lcagent config set <key> <value>`
15
+
16
+ ## 已实现工具
17
+
18
+ - `read_file`
19
+ - `edit_file`
20
+ - `grep`
21
+ - `run_shell`
22
+
23
+ ## 安装
24
+
25
+ ```bash
26
+ cd my-agent-cli
27
+ npm install
28
+ npm run build
29
+ ```
30
+
31
+ ## 发布到公网 npm
32
+
33
+ 先登录 npm 官方仓库:
34
+
35
+ ```bash
36
+ npm login --registry https://registry.npmjs.org/
37
+ ```
38
+
39
+ 发布前先做一次本地打包验证:
40
+
41
+ ```bash
42
+ npm run build
43
+ npm pack
44
+ ```
45
+
46
+ 确认 `package.json` 中的包名还未被占用后,执行发布:
47
+
48
+ ```bash
49
+ npm publish --registry https://registry.npmjs.org/
50
+ ```
51
+
52
+ 如果后续改成 scope 包(例如 `@your-scope/lcagent-cli`),首次发布时建议使用:
53
+
54
+ ```bash
55
+ npm publish --access public --registry https://registry.npmjs.org/
56
+ ```
57
+
58
+ ## 从公网 npm 安装和执行
59
+
60
+ 全局安装:
61
+
62
+ ```bash
63
+ npm install -g lcagent-cli --registry https://registry.npmjs.org/
64
+ lcagent --help
65
+ ```
66
+
67
+ 不做全局安装时,也可以直接执行:
68
+
69
+ ```bash
70
+ npx --registry https://registry.npmjs.org/ lcagent-cli --help
71
+ npx --registry https://registry.npmjs.org/ lcagent-cli chat
72
+ ```
73
+
74
+ ## 配置
75
+
76
+ 方式一:Anthropic 环境变量
77
+
78
+ ```bash
79
+ export ANTHROPIC_API_KEY="your-key"
80
+ ```
81
+
82
+ 方式二:写入本地配置
83
+
84
+ ```bash
85
+ npm run start -- config set apiKey your-key
86
+ npm run start -- model claude-3-7-sonnet-latest
87
+ ```
88
+
89
+ ## 使用本地 Qwen / OpenAI-compatible 服务
90
+
91
+ 如果你的本地服务提供 OpenAI-compatible 接口(例如 `http://127.0.0.1:8000/v1`),可以这样配置:
92
+
93
+ ```bash
94
+ npm run start -- provider openai-compatible
95
+ npm run start -- config set baseUrl http://127.0.0.1:8000/v1
96
+ npm run start -- model Qwen3-Coder-30B-A3B-Instruct-FP8
97
+ ```
98
+
99
+ 或者直接一条命令初始化:
100
+
101
+ ```bash
102
+ npm run start -- init-local --base-url http://127.0.0.1:8000/v1 --model Qwen3-Coder-30B-A3B-Instruct-FP8
103
+ ```
104
+
105
+ 如果你的本地服务需要 token:
106
+
107
+ ```bash
108
+ npm run start -- init-local --base-url http://127.0.0.1:8000/v1 --model Qwen3-Coder-30B-A3B-Instruct-FP8 --api-key your-local-token
109
+ ```
110
+
111
+ 如果你的本地服务需要 Bearer Token:
112
+
113
+ ```bash
114
+ export OPENAI_API_KEY="your-local-token"
115
+ ```
116
+
117
+ 如果本地服务不需要 token,可以不配置 `apiKey`。
118
+
119
+ 建议第一版优先用代码模型:
120
+
121
+ ```bash
122
+ Qwen3-Coder-30B-A3B-Instruct-FP8
123
+ ```
124
+
125
+ 配置文件默认在:
126
+
127
+ ```bash
128
+ ~/.lcagent-cli/config.json
129
+ ```
130
+
131
+ ## 运行
132
+
133
+ ```bash
134
+ npm run start -- -p "阅读 README 并总结项目结构"
135
+ npm run start -- chat
136
+ npm run start -- tools
137
+ npm run start -- provider
138
+ npm run start -- model
139
+ npm run start -- doctor
140
+ ```
141
+
142
+ ## 设计目标
143
+
144
+ - 保留 Claude Code 式的核心代理循环
145
+ - 不依赖原仓库的内部 feature gate
146
+ - 保持目录清晰,便于后续继续演进
147
+
148
+ ## 已知限制
149
+
150
+ - 当前是最小版,没有流式输出
151
+ - 工具权限策略还是简化版
152
+ - `edit_file` 只做简单字符串替换
153
+ - `grep` 是纯 Node 实现,不如 ripgrep 快
154
+ - 当前按 Anthropic Messages API 直接调用,需要有效 API Key
155
+
156
+ 对于 OpenAI-compatible 本地模型:
157
+
158
+ - 你的服务需要支持 `POST /chat/completions`
159
+ - 最好支持 tool calling / function calling
160
+ - 如果不支持 tool calling,Agent 的工具闭环能力会明显下降
161
+
162
+ 可以先用下面这条命令做自检:
163
+
164
+ ```bash
165
+ npm run start -- doctor
166
+ ```
@@ -0,0 +1,8 @@
1
+ import { loadConfig } from '../config/store.js';
2
+ import { AgentEngine } from '../core/engine.js';
3
+ import { getDefaultTools } from '../tools/registry.js';
4
+ export declare function createApp(cwd: string): Promise<{
5
+ config: Awaited<ReturnType<typeof loadConfig>>;
6
+ engine: AgentEngine;
7
+ tools: ReturnType<typeof getDefaultTools>;
8
+ }>;
@@ -0,0 +1,30 @@
1
+ import { createSession } from './session.js';
2
+ import { loadConfig } from '../config/store.js';
3
+ import { buildSystemPrompt } from '../core/systemPrompt.js';
4
+ import { ModelClient } from '../core/model.js';
5
+ import { AgentEngine } from '../core/engine.js';
6
+ import { getDefaultTools } from '../tools/registry.js';
7
+ export async function createApp(cwd) {
8
+ const config = await loadConfig();
9
+ const apiKey = config.apiKey ??
10
+ process.env.MODEL_API_KEY ??
11
+ (config.provider === 'anthropic'
12
+ ? process.env.ANTHROPIC_API_KEY
13
+ : process.env.OPENAI_API_KEY);
14
+ if (config.provider === 'anthropic' && !apiKey) {
15
+ throw new Error('Missing API key for provider=anthropic. Run `lcagent config set apiKey <key>` or set ANTHROPIC_API_KEY.');
16
+ }
17
+ const tools = getDefaultTools();
18
+ const engine = new AgentEngine(createSession(), config, new ModelClient({
19
+ provider: config.provider,
20
+ apiKey,
21
+ baseUrl: config.baseUrl,
22
+ model: config.model,
23
+ maxTokens: config.maxTokens,
24
+ }), tools, buildSystemPrompt(cwd), cwd);
25
+ return {
26
+ config,
27
+ engine,
28
+ tools,
29
+ };
30
+ }
@@ -0,0 +1,5 @@
1
+ import type { AgentMessage } from '../core/message.js';
2
+ export type AgentSession = {
3
+ messages: AgentMessage[];
4
+ };
5
+ export declare function createSession(): AgentSession;
@@ -0,0 +1,5 @@
1
+ export function createSession() {
2
+ return {
3
+ messages: [],
4
+ };
5
+ }
@@ -0,0 +1,2 @@
1
+ #!/usr/bin/env node
2
+ export {};
@@ -0,0 +1,268 @@
1
+ #!/usr/bin/env node
2
+ import { Command } from 'commander';
3
+ import { stdin as input, stdout as output } from 'node:process';
4
+ import { createInterface } from 'node:readline/promises';
5
+ import { createApp } from '../app/bootstrap.js';
6
+ import { agentConfigSchema } from '../config/schema.js';
7
+ import { getConfigPath, loadConfig, updateConfig } from '../config/store.js';
8
+ import { getDefaultTools } from '../tools/registry.js';
9
+ function trimTrailingSlash(value) {
10
+ return value.replace(/\/+$/, '');
11
+ }
12
+ async function probeUrl(url) {
13
+ try {
14
+ const response = await fetch(url, {
15
+ method: 'GET',
16
+ headers: { accept: 'application/json,text/plain,*/*' },
17
+ });
18
+ return {
19
+ ok: response.ok,
20
+ status: response.status,
21
+ detail: `GET ${url} -> HTTP ${response.status}`,
22
+ };
23
+ }
24
+ catch (error) {
25
+ return {
26
+ ok: false,
27
+ detail: error instanceof Error ? error.message : String(error),
28
+ };
29
+ }
30
+ }
31
+ async function runDoctor() {
32
+ const config = await loadConfig();
33
+ const apiKey = config.apiKey ??
34
+ process.env.MODEL_API_KEY ??
35
+ (config.provider === 'anthropic'
36
+ ? process.env.ANTHROPIC_API_KEY
37
+ : process.env.OPENAI_API_KEY);
38
+ console.log('lcagent doctor');
39
+ console.log(`- config: ${getConfigPath()}`);
40
+ console.log(`- provider: ${config.provider}`);
41
+ console.log(`- baseUrl: ${config.baseUrl}`);
42
+ console.log(`- model: ${config.model}`);
43
+ console.log(`- apiKey: ${apiKey ? 'configured' : 'not configured'}`);
44
+ const rootProbe = await probeUrl(trimTrailingSlash(config.baseUrl));
45
+ console.log(`- baseUrl probe: ${rootProbe.detail}`);
46
+ if (config.provider === 'openai-compatible') {
47
+ const endpoint = `${trimTrailingSlash(config.baseUrl)}/chat/completions`;
48
+ console.log(`- endpoint: ${endpoint}`);
49
+ try {
50
+ const response = await fetch(endpoint, {
51
+ method: 'POST',
52
+ headers: {
53
+ 'content-type': 'application/json',
54
+ ...(apiKey ? { Authorization: `Bearer ${apiKey}` } : {}),
55
+ },
56
+ body: JSON.stringify({
57
+ model: config.model,
58
+ messages: [{ role: 'user', content: 'Reply with the single word pong.' }],
59
+ max_tokens: 8,
60
+ }),
61
+ });
62
+ const text = await response.text();
63
+ console.log(`- chat/completions probe: HTTP ${response.status}`);
64
+ console.log(`- response preview: ${text.slice(0, 200) || '(empty body)'}`);
65
+ }
66
+ catch (error) {
67
+ console.log(`- chat/completions probe failed: ${error instanceof Error ? error.message : String(error)}`);
68
+ }
69
+ return;
70
+ }
71
+ const endpoint = `${trimTrailingSlash(config.baseUrl)}/v1/messages`;
72
+ console.log(`- endpoint: ${endpoint}`);
73
+ try {
74
+ const response = await fetch(endpoint, {
75
+ method: 'POST',
76
+ headers: {
77
+ 'content-type': 'application/json',
78
+ ...(apiKey ? { 'x-api-key': apiKey } : {}),
79
+ 'anthropic-version': '2023-06-01',
80
+ },
81
+ body: JSON.stringify({
82
+ model: config.model,
83
+ max_tokens: 8,
84
+ messages: [{ role: 'user', content: 'Reply with the single word pong.' }],
85
+ }),
86
+ });
87
+ const text = await response.text();
88
+ console.log(`- v1/messages probe: HTTP ${response.status}`);
89
+ console.log(`- response preview: ${text.slice(0, 200) || '(empty body)'}`);
90
+ }
91
+ catch (error) {
92
+ console.log(`- v1/messages probe failed: ${error instanceof Error ? error.message : String(error)}`);
93
+ }
94
+ }
95
+ async function printAgentRun(prompt) {
96
+ const { engine } = await createApp(process.cwd());
97
+ for await (const event of engine.submit(prompt)) {
98
+ switch (event.type) {
99
+ case 'status':
100
+ console.log(`\n[status] ${event.message}`);
101
+ break;
102
+ case 'assistant_text':
103
+ console.log(`\n${event.text}`);
104
+ break;
105
+ case 'tool_call':
106
+ console.log(`\n[tool] ${event.toolName} ${JSON.stringify(event.input)}`);
107
+ break;
108
+ case 'tool_result':
109
+ console.log(`\n[tool-result] ${event.toolName}${event.isError ? ' (error)' : ''}`);
110
+ console.log(event.result);
111
+ break;
112
+ }
113
+ }
114
+ }
115
+ async function runChat() {
116
+ const { engine, config } = await createApp(process.cwd());
117
+ console.log(`lcagent chat started with provider: ${config.provider}, model: ${config.model}`);
118
+ console.log('Type `exit` or `quit` to leave.');
119
+ const rl = createInterface({ input, output });
120
+ try {
121
+ while (true) {
122
+ const line = (await rl.question('> ')).trim();
123
+ if (!line) {
124
+ continue;
125
+ }
126
+ if (line === 'exit' || line === 'quit') {
127
+ break;
128
+ }
129
+ for await (const event of engine.submit(line)) {
130
+ switch (event.type) {
131
+ case 'status':
132
+ console.log(`[status] ${event.message}`);
133
+ break;
134
+ case 'assistant_text':
135
+ console.log(event.text);
136
+ break;
137
+ case 'tool_call':
138
+ console.log(`[tool] ${event.toolName} ${JSON.stringify(event.input)}`);
139
+ break;
140
+ case 'tool_result':
141
+ console.log(`[tool-result] ${event.toolName}${event.isError ? ' (error)' : ''}`);
142
+ console.log(event.result);
143
+ break;
144
+ }
145
+ }
146
+ }
147
+ }
148
+ finally {
149
+ rl.close();
150
+ }
151
+ }
152
+ const program = new Command();
153
+ program
154
+ .name('lcagent')
155
+ .description('A minimal coding agent CLI inspired by Claude Code architecture.')
156
+ .version('0.1.0')
157
+ .option('-p, --prompt <text>', 'Run a single non-interactive prompt')
158
+ .action(async (options) => {
159
+ if (!options.prompt) {
160
+ program.outputHelp();
161
+ return;
162
+ }
163
+ await printAgentRun(options.prompt);
164
+ });
165
+ program
166
+ .command('chat')
167
+ .description('Start interactive chat mode')
168
+ .action(async () => {
169
+ await runChat();
170
+ });
171
+ program
172
+ .command('doctor')
173
+ .description('Check provider config and probe the configured model endpoint')
174
+ .action(async () => {
175
+ await runDoctor();
176
+ });
177
+ program
178
+ .command('tools')
179
+ .description('List enabled tools')
180
+ .action(async () => {
181
+ const tools = getDefaultTools();
182
+ for (const tool of tools) {
183
+ console.log(`- ${tool.name}: ${tool.description}`);
184
+ }
185
+ });
186
+ program
187
+ .command('provider')
188
+ .description('Show or update the current model provider')
189
+ .argument('[name]', 'Provider name: anthropic | openai-compatible')
190
+ .action(async (name) => {
191
+ if (!name) {
192
+ const config = await loadConfig();
193
+ console.log(config.provider);
194
+ return;
195
+ }
196
+ const next = await updateConfig({ provider: name });
197
+ console.log(`Provider updated to ${next.provider}`);
198
+ });
199
+ program
200
+ .command('init-local')
201
+ .description('Initialize local OpenAI-compatible model settings in one command')
202
+ .requiredOption('--base-url <url>', 'Local model base URL, e.g. http://127.0.0.1:8000/v1')
203
+ .requiredOption('--model <name>', 'Local model name')
204
+ .option('--api-key <key>', 'Optional local API key / bearer token')
205
+ .action(async (options) => {
206
+ const current = await loadConfig();
207
+ const next = agentConfigSchema.parse({
208
+ ...current,
209
+ provider: 'openai-compatible',
210
+ baseUrl: options.baseUrl,
211
+ model: options.model,
212
+ ...(options.apiKey ? { apiKey: options.apiKey } : {}),
213
+ });
214
+ await updateConfig(next);
215
+ console.log('Local model config initialized:');
216
+ console.log(`- provider: ${next.provider}`);
217
+ console.log(`- baseUrl: ${next.baseUrl}`);
218
+ console.log(`- model: ${next.model}`);
219
+ console.log(`- config: ${getConfigPath()}`);
220
+ if (!options.apiKey) {
221
+ console.log('- apiKey: unchanged (not provided)');
222
+ }
223
+ });
224
+ program
225
+ .command('model')
226
+ .description('Show or update the default model')
227
+ .argument('[name]', 'Optional model name to store in config')
228
+ .action(async (name) => {
229
+ if (!name) {
230
+ const config = await loadConfig();
231
+ console.log(`${config.provider}: ${config.model}`);
232
+ return;
233
+ }
234
+ const next = await updateConfig({ model: name });
235
+ console.log(`Default model updated to ${next.model}`);
236
+ });
237
+ const configCommand = program.command('config').description('Manage local config');
238
+ configCommand
239
+ .command('show')
240
+ .description('Print current config')
241
+ .action(async () => {
242
+ const config = await loadConfig();
243
+ console.log(JSON.stringify(config, null, 2));
244
+ console.log(`Config path: ${getConfigPath()}`);
245
+ });
246
+ configCommand
247
+ .command('set')
248
+ .description('Set a config key')
249
+ .argument('<key>', 'Config key')
250
+ .argument('<value>', 'Config value')
251
+ .action(async (key, value) => {
252
+ const current = await loadConfig();
253
+ const nextValue = key === 'maxTurns' || key === 'maxTokens'
254
+ ? Number(value)
255
+ : key === 'approvalMode' || key === 'provider'
256
+ ? value
257
+ : value;
258
+ const next = agentConfigSchema.parse({
259
+ ...current,
260
+ [key]: nextValue,
261
+ });
262
+ await updateConfig(next);
263
+ console.log(`Updated ${key}`);
264
+ });
265
+ program.parseAsync(process.argv).catch((error) => {
266
+ console.error(error instanceof Error ? error.message : String(error));
267
+ process.exitCode = 1;
268
+ });
@@ -0,0 +1,18 @@
1
+ import { z } from 'zod';
2
+ export declare const agentConfigSchema: z.ZodObject<{
3
+ provider: z.ZodDefault<z.ZodEnum<{
4
+ anthropic: "anthropic";
5
+ "openai-compatible": "openai-compatible";
6
+ }>>;
7
+ apiKey: z.ZodOptional<z.ZodString>;
8
+ baseUrl: z.ZodDefault<z.ZodString>;
9
+ model: z.ZodDefault<z.ZodString>;
10
+ maxTurns: z.ZodDefault<z.ZodNumber>;
11
+ maxTokens: z.ZodDefault<z.ZodNumber>;
12
+ approvalMode: z.ZodDefault<z.ZodEnum<{
13
+ auto: "auto";
14
+ manual: "manual";
15
+ }>>;
16
+ }, z.core.$strip>;
17
+ export type AgentConfig = z.infer<typeof agentConfigSchema>;
18
+ export declare const defaultAgentConfig: AgentConfig;
@@ -0,0 +1,11 @@
1
+ import { z } from 'zod';
2
+ export const agentConfigSchema = z.object({
3
+ provider: z.enum(['anthropic', 'openai-compatible']).default('anthropic'),
4
+ apiKey: z.string().trim().min(1).optional(),
5
+ baseUrl: z.string().url().default('https://api.anthropic.com'),
6
+ model: z.string().trim().min(1).default('claude-3-7-sonnet-latest'),
7
+ maxTurns: z.number().int().positive().default(8),
8
+ maxTokens: z.number().int().positive().default(2048),
9
+ approvalMode: z.enum(['auto', 'manual']).default('auto'),
10
+ });
11
+ export const defaultAgentConfig = agentConfigSchema.parse({});
@@ -0,0 +1,5 @@
1
+ import { type AgentConfig } from './schema.js';
2
+ export declare function loadConfig(): Promise<AgentConfig>;
3
+ export declare function saveConfig(config: AgentConfig): Promise<void>;
4
+ export declare function updateConfig(partial: Partial<AgentConfig>): Promise<AgentConfig>;
5
+ export declare function getConfigPath(): string;
@@ -0,0 +1,37 @@
1
+ import { mkdir, readFile, writeFile } from 'node:fs/promises';
2
+ import { dirname, join } from 'node:path';
3
+ import { homedir } from 'node:os';
4
+ import { agentConfigSchema, defaultAgentConfig } from './schema.js';
5
+ const CONFIG_PATH = join(homedir(), '.lcagent-cli', 'config.json');
6
+ async function ensureConfigDir() {
7
+ await mkdir(dirname(CONFIG_PATH), { recursive: true });
8
+ }
9
+ export async function loadConfig() {
10
+ try {
11
+ const raw = await readFile(CONFIG_PATH, 'utf8');
12
+ return agentConfigSchema.parse({
13
+ ...defaultAgentConfig,
14
+ ...JSON.parse(raw),
15
+ });
16
+ }
17
+ catch {
18
+ return { ...defaultAgentConfig };
19
+ }
20
+ }
21
+ export async function saveConfig(config) {
22
+ await ensureConfigDir();
23
+ const normalized = agentConfigSchema.parse(config);
24
+ await writeFile(CONFIG_PATH, JSON.stringify(normalized, null, 2) + '\n', 'utf8');
25
+ }
26
+ export async function updateConfig(partial) {
27
+ const current = await loadConfig();
28
+ const next = agentConfigSchema.parse({
29
+ ...current,
30
+ ...partial,
31
+ });
32
+ await saveConfig(next);
33
+ return next;
34
+ }
35
+ export function getConfigPath() {
36
+ return CONFIG_PATH;
37
+ }
@@ -0,0 +1,15 @@
1
+ import type { AgentConfig } from '../config/schema.js';
2
+ import type { AgentSession } from '../app/session.js';
3
+ import { type AgentEvent } from './message.js';
4
+ import type { ModelClient } from './model.js';
5
+ import type { ToolDefinition } from '../tools/types.js';
6
+ export declare class AgentEngine {
7
+ private readonly session;
8
+ private readonly config;
9
+ private readonly modelClient;
10
+ private readonly tools;
11
+ private readonly systemPrompt;
12
+ private readonly cwd;
13
+ constructor(session: AgentSession, config: AgentConfig, modelClient: ModelClient, tools: ToolDefinition[], systemPrompt: string, cwd: string);
14
+ submit(prompt: string): AsyncGenerator<AgentEvent, void>;
15
+ }
@@ -0,0 +1,29 @@
1
+ import { createUserTextMessage } from './message.js';
2
+ import { runAgentLoop } from './loop.js';
3
+ export class AgentEngine {
4
+ session;
5
+ config;
6
+ modelClient;
7
+ tools;
8
+ systemPrompt;
9
+ cwd;
10
+ constructor(session, config, modelClient, tools, systemPrompt, cwd) {
11
+ this.session = session;
12
+ this.config = config;
13
+ this.modelClient = modelClient;
14
+ this.tools = tools;
15
+ this.systemPrompt = systemPrompt;
16
+ this.cwd = cwd;
17
+ }
18
+ async *submit(prompt) {
19
+ this.session.messages.push(createUserTextMessage(prompt));
20
+ yield* runAgentLoop({
21
+ config: this.config,
22
+ modelClient: this.modelClient,
23
+ messages: this.session.messages,
24
+ tools: this.tools,
25
+ systemPrompt: this.systemPrompt,
26
+ cwd: this.cwd,
27
+ });
28
+ }
29
+ }
@@ -0,0 +1,12 @@
1
+ import type { AgentConfig } from '../config/schema.js';
2
+ import type { AgentEvent, AgentMessage } from './message.js';
3
+ import type { ToolDefinition } from '../tools/types.js';
4
+ import type { ModelClient } from './model.js';
5
+ export declare function runAgentLoop(params: {
6
+ config: AgentConfig;
7
+ modelClient: ModelClient;
8
+ messages: AgentMessage[];
9
+ tools: ToolDefinition[];
10
+ systemPrompt: string;
11
+ cwd: string;
12
+ }): AsyncGenerator<AgentEvent, void>;
@@ -0,0 +1,54 @@
1
+ import { executeToolCall } from '../tools/execute.js';
2
+ export async function* runAgentLoop(params) {
3
+ const toolContext = {
4
+ cwd: params.cwd,
5
+ approvalMode: params.config.approvalMode,
6
+ };
7
+ for (let turn = 0; turn < params.config.maxTurns; turn += 1) {
8
+ yield { type: 'status', message: `Thinking (turn ${turn + 1}/${params.config.maxTurns})...` };
9
+ const response = await params.modelClient.createMessage({
10
+ systemPrompt: params.systemPrompt,
11
+ messages: params.messages,
12
+ tools: params.tools,
13
+ });
14
+ const assistantMessage = {
15
+ role: 'assistant',
16
+ content: response.content,
17
+ };
18
+ params.messages.push(assistantMessage);
19
+ const toolUses = response.content.filter((block) => block.type === 'tool_use');
20
+ for (const block of response.content) {
21
+ if (block.type === 'text' && block.text.trim()) {
22
+ yield { type: 'assistant_text', text: block.text };
23
+ }
24
+ }
25
+ if (toolUses.length === 0) {
26
+ return;
27
+ }
28
+ for (const toolUse of toolUses) {
29
+ yield {
30
+ type: 'tool_call',
31
+ toolName: toolUse.name,
32
+ input: toolUse.input,
33
+ };
34
+ const result = await executeToolCall(toolUse, params.tools, toolContext);
35
+ yield {
36
+ type: 'tool_result',
37
+ toolName: toolUse.name,
38
+ result: result.content,
39
+ isError: result.isError,
40
+ };
41
+ const toolResultBlock = {
42
+ type: 'tool_result',
43
+ tool_use_id: toolUse.id,
44
+ content: result.content,
45
+ is_error: result.isError,
46
+ };
47
+ params.messages.push({
48
+ role: 'user',
49
+ content: [toolResultBlock],
50
+ });
51
+ }
52
+ }
53
+ throw new Error(`Agent stopped after reaching maxTurns=${params.config.maxTurns}`);
54
+ }