lll-web-agent 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +396 -0
- package/package.json +27 -0
- package/src/agent.js +213 -0
- package/src/index.js +2 -0
- package/src/llm-client.js +138 -0
- package/src/memory.js +40 -0
- package/src/providers.js +54 -0
- package/src/tool.js +80 -0
package/README.md
ADDED
|
@@ -0,0 +1,396 @@
|
|
|
1
|
+
# lll-web-agent
|
|
2
|
+
|
|
3
|
+
开箱即用的 LLM Agent SDK。配个 API Key,定义几个工具,就能跑起一个完整的 AI Agent。
|
|
4
|
+
|
|
5
|
+
零依赖。支持 OpenAI / DeepSeek / 通义千问 / X-GROK / Moonshot / 智谱 等所有 OpenAI 兼容供应商。
|
|
6
|
+
|
|
7
|
+
## 安装
|
|
8
|
+
|
|
9
|
+
```bash
|
|
10
|
+
npm install lll-web-agent
|
|
11
|
+
```
|
|
12
|
+
|
|
13
|
+
要求 Node.js >= 18(使用内置 fetch API)。
|
|
14
|
+
|
|
15
|
+
## 30 秒上手
|
|
16
|
+
|
|
17
|
+
```javascript
|
|
18
|
+
import { Agent } from 'lll-web-agent'
|
|
19
|
+
|
|
20
|
+
const agent = new Agent({
|
|
21
|
+
provider: 'openai',
|
|
22
|
+
apiKey: process.env.OPENAI_API_KEY,
|
|
23
|
+
model: 'gpt-4',
|
|
24
|
+
})
|
|
25
|
+
|
|
26
|
+
const reply = await agent.chat('你好,介绍一下你自己')
|
|
27
|
+
console.log(reply)
|
|
28
|
+
```
|
|
29
|
+
|
|
30
|
+
就这么多。不需要配置 HTTP 客户端、不需要处理 SSE 解析、不需要理解 ReAct 循环。
|
|
31
|
+
|
|
32
|
+
---
|
|
33
|
+
|
|
34
|
+
## 注册工具
|
|
35
|
+
|
|
36
|
+
工具是 Agent 与外部世界交互的方式。LLM 会根据工具描述自主决定何时调用哪个工具。
|
|
37
|
+
|
|
38
|
+
### 基本用法
|
|
39
|
+
|
|
40
|
+
```javascript
|
|
41
|
+
import { Agent, defineTool } from 'lll-web-agent'
|
|
42
|
+
|
|
43
|
+
// 1. 用 defineTool 定义工具
|
|
44
|
+
const getCurrentTime = defineTool({
|
|
45
|
+
name: 'get_current_time', // 工具唯一名称
|
|
46
|
+
description: '获取当前时间', // 告诉 LLM 这个工具能做什么
|
|
47
|
+
parameters: { // JSON Schema 格式的参数定义
|
|
48
|
+
type: 'object',
|
|
49
|
+
properties: {}, // 无参数
|
|
50
|
+
},
|
|
51
|
+
execute: async () => { // 执行函数,返回字符串
|
|
52
|
+
return new Date().toISOString()
|
|
53
|
+
},
|
|
54
|
+
})
|
|
55
|
+
|
|
56
|
+
// 2. 传给 Agent
|
|
57
|
+
const agent = new Agent({
|
|
58
|
+
provider: 'openai',
|
|
59
|
+
apiKey: process.env.OPENAI_API_KEY,
|
|
60
|
+
model: 'gpt-4',
|
|
61
|
+
tools: [getCurrentTime],
|
|
62
|
+
})
|
|
63
|
+
|
|
64
|
+
// 3. Agent 会自动决定是否调用工具
|
|
65
|
+
const reply = await agent.chat('现在几点了?')
|
|
66
|
+
// Agent 内部流程:
|
|
67
|
+
// LLM 思考 -> 决定调用 get_current_time -> 执行 -> 拿到时间 -> 继续思考 -> 返回最终回答
|
|
68
|
+
```
|
|
69
|
+
|
|
70
|
+
### 带参数的工具
|
|
71
|
+
|
|
72
|
+
```javascript
|
|
73
|
+
const readFile = defineTool({
|
|
74
|
+
name: 'read_file',
|
|
75
|
+
description: '读取指定路径的文件内容',
|
|
76
|
+
parameters: {
|
|
77
|
+
type: 'object',
|
|
78
|
+
properties: {
|
|
79
|
+
path: {
|
|
80
|
+
type: 'string',
|
|
81
|
+
description: '文件路径,相对于当前工作目录',
|
|
82
|
+
},
|
|
83
|
+
},
|
|
84
|
+
required: ['path'], // 必填参数
|
|
85
|
+
},
|
|
86
|
+
execute: async ({ path }) => {
|
|
87
|
+
const fs = await import('fs/promises')
|
|
88
|
+
try {
|
|
89
|
+
return await fs.readFile(path, 'utf-8')
|
|
90
|
+
} catch (err) {
|
|
91
|
+
return `读取失败: ${err.message}` // 返回错误信息,LLM 会看到并处理
|
|
92
|
+
}
|
|
93
|
+
},
|
|
94
|
+
})
|
|
95
|
+
```
|
|
96
|
+
|
|
97
|
+
### 多个工具协作
|
|
98
|
+
|
|
99
|
+
Agent 可以在一次对话中调用多个工具,自主决定调用顺序:
|
|
100
|
+
|
|
101
|
+
```javascript
|
|
102
|
+
const listFiles = defineTool({
|
|
103
|
+
name: 'list_files',
|
|
104
|
+
description: '列出目录下的文件',
|
|
105
|
+
parameters: {
|
|
106
|
+
type: 'object',
|
|
107
|
+
properties: {
|
|
108
|
+
dir: { type: 'string', description: '目录路径' },
|
|
109
|
+
},
|
|
110
|
+
required: ['dir'],
|
|
111
|
+
},
|
|
112
|
+
execute: async ({ dir }) => {
|
|
113
|
+
const fs = await import('fs/promises')
|
|
114
|
+
const files = await fs.readdir(dir)
|
|
115
|
+
return files.join('\n')
|
|
116
|
+
},
|
|
117
|
+
})
|
|
118
|
+
|
|
119
|
+
const agent = new Agent({
|
|
120
|
+
provider: 'deepseek',
|
|
121
|
+
apiKey: process.env.DEEPSEEK_API_KEY,
|
|
122
|
+
model: 'deepseek-chat',
|
|
123
|
+
tools: [listFiles, readFile], // 注册多个工具
|
|
124
|
+
})
|
|
125
|
+
|
|
126
|
+
// Agent 会先调用 list_files 看有哪些文件,再调用 read_file 读取感兴趣的文件
|
|
127
|
+
const reply = await agent.chat('看看当前目录有什么文件,读一下 package.json 的内容')
|
|
128
|
+
```
|
|
129
|
+
|
|
130
|
+
### 工具的 execute 函数规范
|
|
131
|
+
|
|
132
|
+
```javascript
|
|
133
|
+
defineTool({
|
|
134
|
+
name: 'my_tool',
|
|
135
|
+
description: '...',
|
|
136
|
+
parameters: { ... },
|
|
137
|
+
execute: async (params) => {
|
|
138
|
+
// params 是 LLM 传入的参数对象,结构与 parameters 定义一致
|
|
139
|
+
// 必须返回字符串(LLM 只能理解文本)
|
|
140
|
+
// 如果返回对象,会被自动 JSON.stringify
|
|
141
|
+
// 抛出异常时,错误信息会被发送给 LLM,LLM 可能会重试或换一种方式
|
|
142
|
+
return '执行结果'
|
|
143
|
+
},
|
|
144
|
+
})
|
|
145
|
+
```
|
|
146
|
+
|
|
147
|
+
### 实用工具示例
|
|
148
|
+
|
|
149
|
+
```javascript
|
|
150
|
+
// Shell 命令执行
|
|
151
|
+
const shellExec = defineTool({
|
|
152
|
+
name: 'shell_exec',
|
|
153
|
+
description: '执行 shell 命令并返回输出',
|
|
154
|
+
parameters: {
|
|
155
|
+
type: 'object',
|
|
156
|
+
properties: {
|
|
157
|
+
command: { type: 'string', description: 'shell 命令' },
|
|
158
|
+
},
|
|
159
|
+
required: ['command'],
|
|
160
|
+
},
|
|
161
|
+
execute: async ({ command }) => {
|
|
162
|
+
const { execSync } = await import('child_process')
|
|
163
|
+
try {
|
|
164
|
+
return execSync(command, { encoding: 'utf-8', timeout: 10000 })
|
|
165
|
+
} catch (err) {
|
|
166
|
+
return `命令执行失败: ${err.message}`
|
|
167
|
+
}
|
|
168
|
+
},
|
|
169
|
+
})
|
|
170
|
+
|
|
171
|
+
// HTTP 请求
|
|
172
|
+
const httpGet = defineTool({
|
|
173
|
+
name: 'http_get',
|
|
174
|
+
description: '发送 HTTP GET 请求',
|
|
175
|
+
parameters: {
|
|
176
|
+
type: 'object',
|
|
177
|
+
properties: {
|
|
178
|
+
url: { type: 'string', description: 'URL 地址' },
|
|
179
|
+
},
|
|
180
|
+
required: ['url'],
|
|
181
|
+
},
|
|
182
|
+
execute: async ({ url }) => {
|
|
183
|
+
const res = await fetch(url)
|
|
184
|
+
return await res.text()
|
|
185
|
+
},
|
|
186
|
+
})
|
|
187
|
+
|
|
188
|
+
// 数学计算
|
|
189
|
+
const calculate = defineTool({
|
|
190
|
+
name: 'calculate',
|
|
191
|
+
description: '计算数学表达式',
|
|
192
|
+
parameters: {
|
|
193
|
+
type: 'object',
|
|
194
|
+
properties: {
|
|
195
|
+
expression: { type: 'string', description: '数学表达式' },
|
|
196
|
+
},
|
|
197
|
+
required: ['expression'],
|
|
198
|
+
},
|
|
199
|
+
execute: async ({ expression }) => {
|
|
200
|
+
return String(Function(`"use strict"; return (${expression})`)())
|
|
201
|
+
},
|
|
202
|
+
})
|
|
203
|
+
```
|
|
204
|
+
|
|
205
|
+
---
|
|
206
|
+
|
|
207
|
+
## 流式输出
|
|
208
|
+
|
|
209
|
+
通过 async generator 实时获取 Agent 的执行过程:
|
|
210
|
+
|
|
211
|
+
```javascript
|
|
212
|
+
for await (const event of agent.stream('帮我分析这个项目')) {
|
|
213
|
+
switch (event.type) {
|
|
214
|
+
case 'delta': // LLM 文本增量
|
|
215
|
+
process.stdout.write(event.content)
|
|
216
|
+
break
|
|
217
|
+
case 'tool_start': // 开始调用工具
|
|
218
|
+
console.log(`\n[调用工具: ${event.name}]`)
|
|
219
|
+
break
|
|
220
|
+
case 'tool_end': // 工具执行完成
|
|
221
|
+
console.log(`[结果: ${event.result}]`)
|
|
222
|
+
break
|
|
223
|
+
case 'done': // 对话完成
|
|
224
|
+
console.log('\n--- 完成 ---')
|
|
225
|
+
break
|
|
226
|
+
}
|
|
227
|
+
}
|
|
228
|
+
```
|
|
229
|
+
|
|
230
|
+
---
|
|
231
|
+
|
|
232
|
+
## 多轮对话
|
|
233
|
+
|
|
234
|
+
Agent 自动维护对话历史,支持多轮上下文:
|
|
235
|
+
|
|
236
|
+
```javascript
|
|
237
|
+
const agent = new Agent({
|
|
238
|
+
provider: 'openai',
|
|
239
|
+
apiKey: process.env.OPENAI_API_KEY,
|
|
240
|
+
model: 'gpt-4',
|
|
241
|
+
tools: [readFile],
|
|
242
|
+
})
|
|
243
|
+
|
|
244
|
+
await agent.chat('读一下 package.json')
|
|
245
|
+
// Agent 记住了上一轮的内容
|
|
246
|
+
await agent.chat('这个项目用了什么依赖?')
|
|
247
|
+
|
|
248
|
+
// 开始新会话
|
|
249
|
+
agent.reset()
|
|
250
|
+
await agent.chat('你好') // 不记得之前的对话了
|
|
251
|
+
```
|
|
252
|
+
|
|
253
|
+
---
|
|
254
|
+
|
|
255
|
+
## 取消请求
|
|
256
|
+
|
|
257
|
+
```javascript
|
|
258
|
+
const controller = new AbortController()
|
|
259
|
+
|
|
260
|
+
// 5 秒后取消
|
|
261
|
+
setTimeout(() => controller.abort(), 5000)
|
|
262
|
+
|
|
263
|
+
try {
|
|
264
|
+
const reply = await agent.chat('做一个很复杂的分析', {
|
|
265
|
+
signal: controller.signal,
|
|
266
|
+
})
|
|
267
|
+
} catch (err) {
|
|
268
|
+
if (err.name === 'AbortError') {
|
|
269
|
+
console.log('请求已取消')
|
|
270
|
+
}
|
|
271
|
+
}
|
|
272
|
+
```
|
|
273
|
+
|
|
274
|
+
---
|
|
275
|
+
|
|
276
|
+
## 多供应商支持
|
|
277
|
+
|
|
278
|
+
所有使用 OpenAI 兼容 API 的供应商都可以直接使用:
|
|
279
|
+
|
|
280
|
+
```javascript
|
|
281
|
+
// OpenAI
|
|
282
|
+
new Agent({ provider: 'openai', apiKey: '...', model: 'gpt-4' })
|
|
283
|
+
|
|
284
|
+
// DeepSeek
|
|
285
|
+
new Agent({ provider: 'deepseek', apiKey: '...', model: 'deepseek-chat' })
|
|
286
|
+
|
|
287
|
+
// 通义千问
|
|
288
|
+
new Agent({ provider: 'qwen', apiKey: '...', model: 'qwen-turbo' })
|
|
289
|
+
|
|
290
|
+
// X-GROK
|
|
291
|
+
new Agent({ provider: 'x-grok', apiKey: '...', model: 'grok-2' })
|
|
292
|
+
|
|
293
|
+
// Moonshot (月之暗面)
|
|
294
|
+
new Agent({ provider: 'moonshot', apiKey: '...', model: 'moonshot-v1-8k' })
|
|
295
|
+
|
|
296
|
+
// 智谱 AI
|
|
297
|
+
new Agent({ provider: 'zhipu', apiKey: '...', model: 'glm-4' })
|
|
298
|
+
|
|
299
|
+
// 任意 OpenAI 兼容供应商(自定义 URL)
|
|
300
|
+
new Agent({
|
|
301
|
+
provider: 'openai',
|
|
302
|
+
url: 'https://my-proxy.com/v1/chat/completions',
|
|
303
|
+
apiKey: '...',
|
|
304
|
+
model: 'my-model',
|
|
305
|
+
})
|
|
306
|
+
```
|
|
307
|
+
|
|
308
|
+
### 注册自定义供应商
|
|
309
|
+
|
|
310
|
+
```javascript
|
|
311
|
+
import { registerProvider } from 'lll-web-agent/src/providers.js'
|
|
312
|
+
|
|
313
|
+
registerProvider('my-llm', {
|
|
314
|
+
url: 'https://api.my-llm.com/v1/chat/completions',
|
|
315
|
+
})
|
|
316
|
+
|
|
317
|
+
const agent = new Agent({ provider: 'my-llm', apiKey: '...', model: '...' })
|
|
318
|
+
```
|
|
319
|
+
|
|
320
|
+
---
|
|
321
|
+
|
|
322
|
+
## 全部配置项
|
|
323
|
+
|
|
324
|
+
```javascript
|
|
325
|
+
new Agent({
|
|
326
|
+
// 必填
|
|
327
|
+
provider: 'openai', // 供应商名称
|
|
328
|
+
apiKey: 'sk-xxx', // API Key
|
|
329
|
+
|
|
330
|
+
// 可选
|
|
331
|
+
model: 'gpt-4', // 模型名称,默认 'gpt-4'
|
|
332
|
+
systemPrompt: '你是一个助手', // 系统提示词,默认 'You are a helpful assistant.'
|
|
333
|
+
url: 'https://...', // 自定义 API URL,覆盖供应商默认值
|
|
334
|
+
tools: [], // 工具列表
|
|
335
|
+
maxRounds: 30, // 最大 ReAct 轮次,默认 30
|
|
336
|
+
maxMessages: 40, // 记忆窗口大小(保留最近 N 条消息),默认 40
|
|
337
|
+
temperature: 1, // 温度,默认 1
|
|
338
|
+
})
|
|
339
|
+
```
|
|
340
|
+
|
|
341
|
+
---
|
|
342
|
+
|
|
343
|
+
## 工作原理
|
|
344
|
+
|
|
345
|
+
Agent 内部运行 ReAct(Reasoning + Acting)循环:
|
|
346
|
+
|
|
347
|
+
```
|
|
348
|
+
用户消息 + 工具描述 -> LLM
|
|
349
|
+
|
|
|
350
|
+
LLM 返回文本? -> 作为最终回答返回
|
|
351
|
+
|
|
|
352
|
+
LLM 返回工具调用?
|
|
353
|
+
|
|
|
354
|
+
执行工具 -> 将结果加入对话历史
|
|
355
|
+
|
|
|
356
|
+
回到顶部,再次调用 LLM
|
|
357
|
+
```
|
|
358
|
+
|
|
359
|
+
对话历史通过 SlidingWindowMemory 管理,超出窗口大小时自动丢弃最旧的消息(system prompt 始终保留)。
|
|
360
|
+
|
|
361
|
+
---
|
|
362
|
+
|
|
363
|
+
## API 参考
|
|
364
|
+
|
|
365
|
+
### `Agent`
|
|
366
|
+
|
|
367
|
+
| 方法 | 说明 |
|
|
368
|
+
|------|------|
|
|
369
|
+
| `new Agent(opts)` | 创建 Agent 实例 |
|
|
370
|
+
| `agent.chat(message, opts?)` | 同步对话,返回 `Promise<string>` |
|
|
371
|
+
| `agent.stream(message, opts?)` | 流式对话,返回 `AsyncGenerator<Event>` |
|
|
372
|
+
| `agent.reset()` | 清空对话历史,开始新会话 |
|
|
373
|
+
|
|
374
|
+
### `defineTool(def)`
|
|
375
|
+
|
|
376
|
+
| 字段 | 类型 | 必填 | 说明 |
|
|
377
|
+
|------|------|------|------|
|
|
378
|
+
| `name` | string | 是 | 工具唯一名称 |
|
|
379
|
+
| `description` | string | 是 | 工具描述(供 LLM 理解) |
|
|
380
|
+
| `parameters` | object | 否 | JSON Schema 格式的参数定义 |
|
|
381
|
+
| `execute` | async function | 是 | 执行函数,接收参数对象,返回字符串 |
|
|
382
|
+
|
|
383
|
+
### 流式事件类型
|
|
384
|
+
|
|
385
|
+
| type | 字段 | 说明 |
|
|
386
|
+
|------|------|------|
|
|
387
|
+
| `delta` | `content` | LLM 文本增量 |
|
|
388
|
+
| `tool_start` | `name`, `arguments` | 开始调用工具 |
|
|
389
|
+
| `tool_end` | `name`, `result` | 工具执行完成 |
|
|
390
|
+
| `done` | `content` | 对话完成,包含完整回复 |
|
|
391
|
+
|
|
392
|
+
---
|
|
393
|
+
|
|
394
|
+
## License
|
|
395
|
+
|
|
396
|
+
MIT
|
package/package.json
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "lll-web-agent",
|
|
3
|
+
"version": "0.1.0",
|
|
4
|
+
"description": "开箱即用的 LLM Agent SDK — 配个 API Key 就能跑",
|
|
5
|
+
"type": "module",
|
|
6
|
+
"main": "src/index.js",
|
|
7
|
+
"exports": {
|
|
8
|
+
".": "./src/index.js"
|
|
9
|
+
},
|
|
10
|
+
"files": [
|
|
11
|
+
"src/",
|
|
12
|
+
"README.md"
|
|
13
|
+
],
|
|
14
|
+
"scripts": {
|
|
15
|
+
"test": "node --test src/**/*.test.js",
|
|
16
|
+
"example": "node examples/basic.js"
|
|
17
|
+
},
|
|
18
|
+
"engines": {
|
|
19
|
+
"node": ">=18.0.0"
|
|
20
|
+
},
|
|
21
|
+
"keywords": ["agent", "llm", "openai", "deepseek", "anthropic", "qwen", "function-calling", "react-loop", "tool-use"],
|
|
22
|
+
"license": "MIT",
|
|
23
|
+
"repository": {
|
|
24
|
+
"type": "git",
|
|
25
|
+
"url": ""
|
|
26
|
+
}
|
|
27
|
+
}
|
package/src/agent.js
ADDED
|
@@ -0,0 +1,213 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Agent — 开箱即用的 LLM Agent
|
|
3
|
+
* 对应 Java 框架的 Agent + AgentBuilder + ReActStrategy
|
|
4
|
+
*
|
|
5
|
+
* 用法:
|
|
6
|
+
* const agent = new Agent({
|
|
7
|
+
* provider: 'openai',
|
|
8
|
+
* apiKey: 'sk-xxx',
|
|
9
|
+
* model: 'gpt-4',
|
|
10
|
+
* tools: [readFile, shellExec],
|
|
11
|
+
* })
|
|
12
|
+
* const reply = await agent.chat('帮我分析项目架构')
|
|
13
|
+
*/
|
|
14
|
+
|
|
15
|
+
import { streamChat, syncChat } from './llm-client.js'
|
|
16
|
+
import { formatToolsForOpenAI, parseToolCalls, formatToolResult } from './tool.js'
|
|
17
|
+
import { SlidingWindowMemory } from './memory.js'
|
|
18
|
+
import { resolveProviderUrl } from './providers.js'
|
|
19
|
+
|
|
20
|
+
export class Agent {
|
|
21
|
+
/**
|
|
22
|
+
* @param {object} opts
|
|
23
|
+
* @param {string} opts.provider - 供应商名称 ('openai', 'deepseek', 'qwen', ...)
|
|
24
|
+
* @param {string} opts.apiKey - API Key
|
|
25
|
+
* @param {string} [opts.model='gpt-4'] - 模型名称
|
|
26
|
+
* @param {string} [opts.systemPrompt='You are a helpful assistant.'] - 系统提示词
|
|
27
|
+
* @param {string} [opts.url] - 自定义 API URL(覆盖供应商默认值)
|
|
28
|
+
* @param {import('./tool.js').ToolDef[]} [opts.tools=[]] - 工具列表
|
|
29
|
+
* @param {number} [opts.maxRounds=30] - 最大 ReAct 轮次
|
|
30
|
+
* @param {number} [opts.maxMessages=40] - 记忆窗口大小
|
|
31
|
+
* @param {number} [opts.temperature=1] - 温度
|
|
32
|
+
*/
|
|
33
|
+
constructor(opts) {
|
|
34
|
+
if (!opts.apiKey) throw new Error('apiKey is required')
|
|
35
|
+
if (!opts.provider) throw new Error('provider is required')
|
|
36
|
+
|
|
37
|
+
this.apiKey = opts.apiKey
|
|
38
|
+
this.model = opts.model ?? 'gpt-4'
|
|
39
|
+
this.systemPrompt = opts.systemPrompt ?? 'You are a helpful assistant.'
|
|
40
|
+
this.url = resolveProviderUrl(opts.provider, opts.url)
|
|
41
|
+
this.tools = opts.tools ?? []
|
|
42
|
+
this.maxRounds = opts.maxRounds ?? 30
|
|
43
|
+
this.temperature = opts.temperature ?? 1
|
|
44
|
+
this.memory = new SlidingWindowMemory(opts.maxMessages ?? 40)
|
|
45
|
+
|
|
46
|
+
// 初始化 system prompt
|
|
47
|
+
this.memory.add({ role: 'system', content: this.systemPrompt })
|
|
48
|
+
}
|
|
49
|
+
|
|
50
|
+
/**
|
|
51
|
+
* 同步对话 — 发送消息,返回最终回复文本
|
|
52
|
+
* 内部执行完整的 ReAct 循环(LLM 思考 -> 调用工具 -> 观察结果 -> 继续)
|
|
53
|
+
* @param {string} message - 用户消息
|
|
54
|
+
* @param {object} [opts]
|
|
55
|
+
* @param {AbortSignal} [opts.signal] - 取消信号
|
|
56
|
+
* @returns {Promise<string>} Agent 最终回复
|
|
57
|
+
*/
|
|
58
|
+
async chat(message, opts = {}) {
|
|
59
|
+
this.memory.add({ role: 'user', content: message })
|
|
60
|
+
return this._reactLoop(opts)
|
|
61
|
+
}
|
|
62
|
+
|
|
63
|
+
/**
|
|
64
|
+
* 流式对话 — 通过 async generator 实时推送内容
|
|
65
|
+
* @param {string} message
|
|
66
|
+
* @param {object} [opts]
|
|
67
|
+
* @param {AbortSignal} [opts.signal]
|
|
68
|
+
* @yields {{ type: 'delta'|'tool_start'|'tool_end'|'done', ... }}
|
|
69
|
+
*/
|
|
70
|
+
async *stream(message, opts = {}) {
|
|
71
|
+
this.memory.add({ role: 'user', content: message })
|
|
72
|
+
yield* this._reactLoopStream(opts)
|
|
73
|
+
}
|
|
74
|
+
|
|
75
|
+
/** 清空对话历史,开始新会话 */
|
|
76
|
+
reset() {
|
|
77
|
+
this.memory.clear()
|
|
78
|
+
this.memory.add({ role: 'system', content: this.systemPrompt })
|
|
79
|
+
}
|
|
80
|
+
|
|
81
|
+
// ---- ReAct 循环(非流式) ----
|
|
82
|
+
|
|
83
|
+
async _reactLoop({ signal } = {}) {
|
|
84
|
+
const toolMap = Object.fromEntries(this.tools.map(t => [t.name, t]))
|
|
85
|
+
const openaiTools = this.tools.length > 0 ? formatToolsForOpenAI(this.tools) : undefined
|
|
86
|
+
|
|
87
|
+
for (let round = 0; round < this.maxRounds; round++) {
|
|
88
|
+
signal?.throwIfAborted()
|
|
89
|
+
|
|
90
|
+
const body = {
|
|
91
|
+
model: this.model,
|
|
92
|
+
messages: this.memory.getMessages(),
|
|
93
|
+
temperature: this.temperature,
|
|
94
|
+
}
|
|
95
|
+
if (openaiTools) body.tools = openaiTools
|
|
96
|
+
|
|
97
|
+
// 调用 LLM
|
|
98
|
+
const response = await syncChat({ url: this.url, apiKey: this.apiKey, body, signal })
|
|
99
|
+
|
|
100
|
+
const message = response.choices?.[0]?.message
|
|
101
|
+
if (!message) throw new Error('Empty LLM response')
|
|
102
|
+
|
|
103
|
+
const textContent = message.content ?? ''
|
|
104
|
+
const toolCalls = parseToolCalls(response)
|
|
105
|
+
|
|
106
|
+
// 没有工具调用 = LLM 给出了最终回答
|
|
107
|
+
if (toolCalls.length === 0) {
|
|
108
|
+
this.memory.add({ role: 'assistant', content: textContent })
|
|
109
|
+
return textContent
|
|
110
|
+
}
|
|
111
|
+
|
|
112
|
+
// 将 assistant 消息(含 tool_calls)加入历史
|
|
113
|
+
this.memory.add({
|
|
114
|
+
role: 'assistant',
|
|
115
|
+
content: textContent || null,
|
|
116
|
+
tool_calls: message.tool_calls,
|
|
117
|
+
})
|
|
118
|
+
|
|
119
|
+
// 执行所有工具调用
|
|
120
|
+
for (const call of toolCalls) {
|
|
121
|
+
const tool = toolMap[call.name]
|
|
122
|
+
let result
|
|
123
|
+
if (!tool) {
|
|
124
|
+
result = `Error: Tool "${call.name}" not found. Available: ${this.tools.map(t => t.name).join(', ')}`
|
|
125
|
+
} else {
|
|
126
|
+
try {
|
|
127
|
+
result = await tool.execute(call.arguments)
|
|
128
|
+
} catch (err) {
|
|
129
|
+
result = `Error executing ${call.name}: ${err.message}`
|
|
130
|
+
}
|
|
131
|
+
}
|
|
132
|
+
this.memory.add(formatToolResult(call.id, call.name, result))
|
|
133
|
+
}
|
|
134
|
+
}
|
|
135
|
+
|
|
136
|
+
return '[max rounds exceeded]'
|
|
137
|
+
}
|
|
138
|
+
|
|
139
|
+
// ---- ReAct 循环(流式) ----
|
|
140
|
+
|
|
141
|
+
async *_reactLoopStream({ signal } = {}) {
|
|
142
|
+
const toolMap = Object.fromEntries(this.tools.map(t => [t.name, t]))
|
|
143
|
+
const openaiTools = this.tools.length > 0 ? formatToolsForOpenAI(this.tools) : undefined
|
|
144
|
+
|
|
145
|
+
for (let round = 0; round < this.maxRounds; round++) {
|
|
146
|
+
signal?.throwIfAborted()
|
|
147
|
+
|
|
148
|
+
const body = {
|
|
149
|
+
model: this.model,
|
|
150
|
+
messages: this.memory.getMessages(),
|
|
151
|
+
temperature: this.temperature,
|
|
152
|
+
}
|
|
153
|
+
if (openaiTools) body.tools = openaiTools
|
|
154
|
+
|
|
155
|
+
// 流式调用 LLM
|
|
156
|
+
let fullText = ''
|
|
157
|
+
const response = await streamChat({
|
|
158
|
+
url: this.url,
|
|
159
|
+
apiKey: this.apiKey,
|
|
160
|
+
body,
|
|
161
|
+
signal,
|
|
162
|
+
onDelta: (delta) => {
|
|
163
|
+
fullText += delta
|
|
164
|
+
// 不在这里 yield,因为 onDelta 不是 generator 上下文
|
|
165
|
+
},
|
|
166
|
+
})
|
|
167
|
+
|
|
168
|
+
const textContent = response.choices?.[0]?.message?.content ?? ''
|
|
169
|
+
const toolCalls = parseToolCalls(response)
|
|
170
|
+
|
|
171
|
+
// 推送文本内容
|
|
172
|
+
if (textContent) {
|
|
173
|
+
yield { type: 'delta', content: textContent }
|
|
174
|
+
}
|
|
175
|
+
|
|
176
|
+
// 没有工具调用 = 最终回答
|
|
177
|
+
if (toolCalls.length === 0) {
|
|
178
|
+
this.memory.add({ role: 'assistant', content: textContent })
|
|
179
|
+
yield { type: 'done', content: textContent }
|
|
180
|
+
return
|
|
181
|
+
}
|
|
182
|
+
|
|
183
|
+
// 将 assistant 消息加入历史
|
|
184
|
+
this.memory.add({
|
|
185
|
+
role: 'assistant',
|
|
186
|
+
content: textContent || null,
|
|
187
|
+
tool_calls: response.choices[0].message.tool_calls,
|
|
188
|
+
})
|
|
189
|
+
|
|
190
|
+
// 执行工具
|
|
191
|
+
for (const call of toolCalls) {
|
|
192
|
+
yield { type: 'tool_start', name: call.name, arguments: call.arguments }
|
|
193
|
+
|
|
194
|
+
const tool = toolMap[call.name]
|
|
195
|
+
let result
|
|
196
|
+
if (!tool) {
|
|
197
|
+
result = `Error: Tool "${call.name}" not found`
|
|
198
|
+
} else {
|
|
199
|
+
try {
|
|
200
|
+
result = await tool.execute(call.arguments)
|
|
201
|
+
} catch (err) {
|
|
202
|
+
result = `Error: ${err.message}`
|
|
203
|
+
}
|
|
204
|
+
}
|
|
205
|
+
|
|
206
|
+
this.memory.add(formatToolResult(call.id, call.name, result))
|
|
207
|
+
yield { type: 'tool_end', name: call.name, result }
|
|
208
|
+
}
|
|
209
|
+
}
|
|
210
|
+
|
|
211
|
+
yield { type: 'done', content: '[max rounds exceeded]' }
|
|
212
|
+
}
|
|
213
|
+
}
|
package/src/index.js
ADDED
|
@@ -0,0 +1,138 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* LLM 通信层 — SSE 流式请求 + 工具调用增量拼接
|
|
3
|
+
* 对应 Java 框架的 LlmClient + SseStreamProcessor + StreamingFcCollector
|
|
4
|
+
*/
|
|
5
|
+
|
|
6
|
+
/**
|
|
7
|
+
* 发送 SSE 流式请求,返回完整的非流式等价响应 JSON
|
|
8
|
+
* @param {object} opts
|
|
9
|
+
* @param {string} opts.url - API endpoint
|
|
10
|
+
* @param {string} opts.apiKey - Bearer token
|
|
11
|
+
* @param {object} opts.body - 请求体(含 messages, model, tools 等)
|
|
12
|
+
* @param {AbortSignal} [opts.signal] - 取消信号
|
|
13
|
+
* @param {function} [opts.onDelta] - 文本增量回调 (delta: string) => void
|
|
14
|
+
* @param {function} [opts.onReasoning] - 思考过程增量回调
|
|
15
|
+
* @param {function} [opts.onToolCall] - 工具调用增量回调 (index, toolCall) => void
|
|
16
|
+
* @returns {Promise<object>} 重构后的非流式响应 JSON
|
|
17
|
+
*/
|
|
18
|
+
export async function streamChat({ url, apiKey, body, signal, onDelta, onReasoning, onToolCall }) {
|
|
19
|
+
const response = await fetch(url, {
|
|
20
|
+
method: 'POST',
|
|
21
|
+
headers: {
|
|
22
|
+
'Content-Type': 'application/json',
|
|
23
|
+
'Authorization': `Bearer ${apiKey}`,
|
|
24
|
+
},
|
|
25
|
+
body: JSON.stringify({ ...body, stream: true }),
|
|
26
|
+
signal,
|
|
27
|
+
})
|
|
28
|
+
|
|
29
|
+
if (!response.ok) {
|
|
30
|
+
const errorBody = await response.text().catch(() => '')
|
|
31
|
+
throw new LlmApiError(response.status, errorBody)
|
|
32
|
+
}
|
|
33
|
+
|
|
34
|
+
const reader = response.body.getReader()
|
|
35
|
+
const decoder = new TextDecoder()
|
|
36
|
+
let buffer = ''
|
|
37
|
+
|
|
38
|
+
// StreamingFcCollector 等价物
|
|
39
|
+
const collected = { content: '', reasoning: '', toolCalls: new Map() }
|
|
40
|
+
|
|
41
|
+
while (true) {
|
|
42
|
+
const { done, value } = await reader.read()
|
|
43
|
+
if (done) break
|
|
44
|
+
|
|
45
|
+
buffer += decoder.decode(value, { stream: true })
|
|
46
|
+
const lines = buffer.split('\n')
|
|
47
|
+
buffer = lines.pop() // 保留不完整的最后一行
|
|
48
|
+
|
|
49
|
+
for (const line of lines) {
|
|
50
|
+
if (!line.startsWith('data:')) continue
|
|
51
|
+
const data = line.slice(5).trim()
|
|
52
|
+
if (data === '[DONE]' || !data) continue
|
|
53
|
+
|
|
54
|
+
try {
|
|
55
|
+
const json = JSON.parse(data)
|
|
56
|
+
processSSEChunk(json, collected, { onDelta, onReasoning, onToolCall })
|
|
57
|
+
} catch { /* 忽略解析失败的行 */ }
|
|
58
|
+
}
|
|
59
|
+
}
|
|
60
|
+
|
|
61
|
+
return reconstructResponse(collected)
|
|
62
|
+
}
|
|
63
|
+
|
|
64
|
+
/**
|
|
65
|
+
* 同步请求(非流式)
|
|
66
|
+
*/
|
|
67
|
+
export async function syncChat({ url, apiKey, body, signal }) {
|
|
68
|
+
const response = await fetch(url, {
|
|
69
|
+
method: 'POST',
|
|
70
|
+
headers: {
|
|
71
|
+
'Content-Type': 'application/json',
|
|
72
|
+
'Authorization': `Bearer ${apiKey}`,
|
|
73
|
+
},
|
|
74
|
+
body: JSON.stringify({ ...body, stream: false }),
|
|
75
|
+
signal,
|
|
76
|
+
})
|
|
77
|
+
|
|
78
|
+
if (!response.ok) {
|
|
79
|
+
const errorBody = await response.text().catch(() => '')
|
|
80
|
+
throw new LlmApiError(response.status, errorBody)
|
|
81
|
+
}
|
|
82
|
+
|
|
83
|
+
return response.json()
|
|
84
|
+
}
|
|
85
|
+
|
|
86
|
+
/** 处理单个 SSE chunk — 对应 SseStreamProcessor.processOpenAiLine */
|
|
87
|
+
function processSSEChunk(json, collected, callbacks) {
|
|
88
|
+
const delta = json.choices?.[0]?.delta
|
|
89
|
+
if (!delta) return
|
|
90
|
+
|
|
91
|
+
if (delta.reasoning_content) {
|
|
92
|
+
collected.reasoning += delta.reasoning_content
|
|
93
|
+
callbacks.onReasoning?.(delta.reasoning_content)
|
|
94
|
+
}
|
|
95
|
+
|
|
96
|
+
if (delta.content) {
|
|
97
|
+
collected.content += delta.content
|
|
98
|
+
callbacks.onDelta?.(delta.content)
|
|
99
|
+
}
|
|
100
|
+
|
|
101
|
+
if (delta.tool_calls) {
|
|
102
|
+
for (const tc of delta.tool_calls) {
|
|
103
|
+
const idx = tc.index ?? 0
|
|
104
|
+
if (!collected.toolCalls.has(idx)) {
|
|
105
|
+
collected.toolCalls.set(idx, { id: '', type: 'function', name: '', arguments: '' })
|
|
106
|
+
}
|
|
107
|
+
const acc = collected.toolCalls.get(idx)
|
|
108
|
+
if (tc.id) acc.id = tc.id
|
|
109
|
+
if (tc.type) acc.type = tc.type
|
|
110
|
+
if (tc.function?.name) acc.name = tc.function.name
|
|
111
|
+
if (tc.function?.arguments) acc.arguments += tc.function.arguments
|
|
112
|
+
callbacks.onToolCall?.(idx, acc)
|
|
113
|
+
}
|
|
114
|
+
}
|
|
115
|
+
}
|
|
116
|
+
|
|
117
|
+
/** 重构为非流式等价响应 — 对应 StreamingFcCollector.reconstructResponse */
|
|
118
|
+
function reconstructResponse(collected) {
|
|
119
|
+
const message = {}
|
|
120
|
+
if (collected.content) message.content = collected.content
|
|
121
|
+
if (collected.reasoning) message.reasoning_content = collected.reasoning
|
|
122
|
+
if (collected.toolCalls.size > 0) {
|
|
123
|
+
message.tool_calls = [...collected.toolCalls.values()].map(tc => ({
|
|
124
|
+
id: tc.id,
|
|
125
|
+
type: tc.type,
|
|
126
|
+
function: { name: tc.name, arguments: tc.arguments },
|
|
127
|
+
}))
|
|
128
|
+
}
|
|
129
|
+
return { choices: [{ message }] }
|
|
130
|
+
}
|
|
131
|
+
|
|
132
|
+
export class LlmApiError extends Error {
|
|
133
|
+
constructor(status, body) {
|
|
134
|
+
super(`LLM API error ${status}: ${body.slice(0, 200)}`)
|
|
135
|
+
this.status = status
|
|
136
|
+
this.body = body
|
|
137
|
+
}
|
|
138
|
+
}
|
package/src/memory.js
ADDED
|
@@ -0,0 +1,40 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* 对话记忆 — 对应 Java 框架的 SlidingWindowMemory
|
|
3
|
+
* 保留 system prompt + 最近 N 条消息,超出时丢弃最旧的
|
|
4
|
+
*/
|
|
5
|
+
export class SlidingWindowMemory {
|
|
6
|
+
/** @param {number} maxMessages 最大消息数(不含 system prompt) */
|
|
7
|
+
constructor(maxMessages = 40) {
|
|
8
|
+
this.maxMessages = maxMessages
|
|
9
|
+
this.messages = []
|
|
10
|
+
}
|
|
11
|
+
|
|
12
|
+
add(message) {
|
|
13
|
+
this.messages.push(message)
|
|
14
|
+
this._trim()
|
|
15
|
+
}
|
|
16
|
+
|
|
17
|
+
addAll(messages) {
|
|
18
|
+
this.messages.push(...messages)
|
|
19
|
+
this._trim()
|
|
20
|
+
}
|
|
21
|
+
|
|
22
|
+
/** 返回当前应发送给 LLM 的消息列表 */
|
|
23
|
+
getMessages() {
|
|
24
|
+
return [...this.messages]
|
|
25
|
+
}
|
|
26
|
+
|
|
27
|
+
clear() {
|
|
28
|
+
this.messages = []
|
|
29
|
+
}
|
|
30
|
+
|
|
31
|
+
_trim() {
|
|
32
|
+
// 保留 system 消息 + 最近 maxMessages 条非 system 消息
|
|
33
|
+
const system = this.messages.filter(m => m.role === 'system')
|
|
34
|
+
const nonSystem = this.messages.filter(m => m.role !== 'system')
|
|
35
|
+
if (nonSystem.length > this.maxMessages) {
|
|
36
|
+
const trimmed = nonSystem.slice(-this.maxMessages)
|
|
37
|
+
this.messages = [...system, ...trimmed]
|
|
38
|
+
}
|
|
39
|
+
}
|
|
40
|
+
}
|
package/src/providers.js
ADDED
|
@@ -0,0 +1,54 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* 供应商适配 — 对应 Java 框架的 LlmProviderAdapterRegistry
|
|
3
|
+
* 处理不同供应商的 API URL 差异
|
|
4
|
+
*/
|
|
5
|
+
|
|
6
|
+
const PROVIDERS = {
|
|
7
|
+
openai: {
|
|
8
|
+
url: 'https://api.openai.com/v1/chat/completions',
|
|
9
|
+
},
|
|
10
|
+
deepseek: {
|
|
11
|
+
url: 'https://api.deepseek.com/chat/completions',
|
|
12
|
+
},
|
|
13
|
+
anthropic: {
|
|
14
|
+
url: 'https://api.anthropic.com/v1/messages',
|
|
15
|
+
// Anthropic 使用不同的认证头和消息格式,暂不在 MVP 中支持
|
|
16
|
+
// 后续版本添加 AnthropicProtocolAdapter
|
|
17
|
+
},
|
|
18
|
+
qwen: {
|
|
19
|
+
url: 'https://dashscope.aliyuncs.com/compatible-mode/v1/chat/completions',
|
|
20
|
+
},
|
|
21
|
+
'x-grok': {
|
|
22
|
+
url: 'https://api.x.ai/v1/chat/completions',
|
|
23
|
+
},
|
|
24
|
+
moonshot: {
|
|
25
|
+
url: 'https://api.moonshot.cn/v1/chat/completions',
|
|
26
|
+
},
|
|
27
|
+
zhipu: {
|
|
28
|
+
url: 'https://open.bigmodel.cn/api/paas/v4/chat/completions',
|
|
29
|
+
},
|
|
30
|
+
}
|
|
31
|
+
|
|
32
|
+
/**
|
|
33
|
+
* 解析供应商配置,返回 API URL
|
|
34
|
+
* @param {string} provider - 供应商名称(不区分大小写)
|
|
35
|
+
* @param {string} [customUrl] - 自定义 URL(优先级最高)
|
|
36
|
+
* @returns {string} API URL
|
|
37
|
+
*/
|
|
38
|
+
export function resolveProviderUrl(provider, customUrl) {
|
|
39
|
+
if (customUrl) return customUrl
|
|
40
|
+
const key = provider.toLowerCase()
|
|
41
|
+
const config = PROVIDERS[key]
|
|
42
|
+
if (!config) {
|
|
43
|
+
// 未知供应商,假设是 OpenAI 兼容格式,用户必须提供 url
|
|
44
|
+
throw new Error(
|
|
45
|
+
`Unknown provider "${provider}". Either use a known provider (${Object.keys(PROVIDERS).join(', ')}) or provide a custom url.`
|
|
46
|
+
)
|
|
47
|
+
}
|
|
48
|
+
return config.url
|
|
49
|
+
}
|
|
50
|
+
|
|
51
|
+
/** 注册自定义供应商 */
|
|
52
|
+
export function registerProvider(name, config) {
|
|
53
|
+
PROVIDERS[name.toLowerCase()] = config
|
|
54
|
+
}
|
package/src/tool.js
ADDED
|
@@ -0,0 +1,80 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Tool 定义 — 对应 Java 框架的 Tool 接口
|
|
3
|
+
*
|
|
4
|
+
* 用法:
|
|
5
|
+
* const readFile = defineTool({
|
|
6
|
+
* name: 'read_file',
|
|
7
|
+
* description: '读取文件内容',
|
|
8
|
+
* parameters: {
|
|
9
|
+
* type: 'object',
|
|
10
|
+
* properties: { path: { type: 'string', description: '文件路径' } },
|
|
11
|
+
* required: ['path'],
|
|
12
|
+
* },
|
|
13
|
+
* execute: async ({ path }) => {
|
|
14
|
+
* const fs = await import('fs/promises')
|
|
15
|
+
* return await fs.readFile(path, 'utf-8')
|
|
16
|
+
* },
|
|
17
|
+
* })
|
|
18
|
+
*/
|
|
19
|
+
|
|
20
|
+
/**
|
|
21
|
+
* @typedef {object} ToolDef
|
|
22
|
+
* @property {string} name - 工具唯一名称
|
|
23
|
+
* @property {string} description - 工具描述(供 LLM 理解)
|
|
24
|
+
* @property {object} parameters - JSON Schema 格式的参数定义
|
|
25
|
+
* @property {(params: object, context?: object) => Promise<string>} execute - 执行函数
|
|
26
|
+
*/
|
|
27
|
+
|
|
28
|
+
/**
|
|
29
|
+
* 定义一个工具
|
|
30
|
+
* @param {ToolDef} def
|
|
31
|
+
* @returns {ToolDef}
|
|
32
|
+
*/
|
|
33
|
+
export function defineTool(def) {
|
|
34
|
+
if (!def.name) throw new Error('Tool name is required')
|
|
35
|
+
if (!def.description) throw new Error('Tool description is required')
|
|
36
|
+
if (!def.execute) throw new Error('Tool execute function is required')
|
|
37
|
+
return {
|
|
38
|
+
name: def.name,
|
|
39
|
+
description: def.description,
|
|
40
|
+
parameters: def.parameters ?? { type: 'object', properties: {} },
|
|
41
|
+
execute: def.execute,
|
|
42
|
+
}
|
|
43
|
+
}
|
|
44
|
+
|
|
45
|
+
/** 将 Tool 列表转换为 OpenAI tools 格式 */
|
|
46
|
+
export function formatToolsForOpenAI(tools) {
|
|
47
|
+
return tools.map(t => ({
|
|
48
|
+
type: 'function',
|
|
49
|
+
function: {
|
|
50
|
+
name: t.name,
|
|
51
|
+
description: t.description,
|
|
52
|
+
parameters: t.parameters,
|
|
53
|
+
},
|
|
54
|
+
}))
|
|
55
|
+
}
|
|
56
|
+
|
|
57
|
+
/** 从 LLM 响应中解析工具调用 */
|
|
58
|
+
export function parseToolCalls(response) {
|
|
59
|
+
const message = response.choices?.[0]?.message
|
|
60
|
+
if (!message?.tool_calls?.length) return []
|
|
61
|
+
return message.tool_calls.map(tc => ({
|
|
62
|
+
id: tc.id,
|
|
63
|
+
name: tc.function.name,
|
|
64
|
+
arguments: safeParseJSON(tc.function.arguments),
|
|
65
|
+
}))
|
|
66
|
+
}
|
|
67
|
+
|
|
68
|
+
/** 将工具执行结果格式化为对话消息 */
|
|
69
|
+
export function formatToolResult(callId, toolName, result) {
|
|
70
|
+
return {
|
|
71
|
+
role: 'tool',
|
|
72
|
+
tool_call_id: callId,
|
|
73
|
+
name: toolName,
|
|
74
|
+
content: typeof result === 'string' ? result : JSON.stringify(result),
|
|
75
|
+
}
|
|
76
|
+
}
|
|
77
|
+
|
|
78
|
+
function safeParseJSON(str) {
|
|
79
|
+
try { return JSON.parse(str) } catch { return {} }
|
|
80
|
+
}
|