@next-open-ai/openclawx 0.8.40 → 0.8.48
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +10 -0
- package/apps/desktop/renderer/dist/assets/index-BHY1xIZQ.css +10 -0
- package/apps/desktop/renderer/dist/assets/{index-DgLpQsA-.js → index-DQxlVuBe.js} +56 -52
- package/apps/desktop/renderer/dist/index.html +2 -2
- package/dist/core/agent/agent-manager.js +9 -2
- package/dist/core/agent/proxy/adapters/local-adapter.js +1 -1
- package/dist/core/config/desktop-config.d.ts +2 -0
- package/dist/core/config/desktop-config.js +30 -9
- package/dist/core/config/provider-support-default.js +26 -0
- package/dist/core/local-llm-server/index.d.ts +32 -0
- package/dist/core/local-llm-server/index.js +126 -0
- package/dist/core/local-llm-server/llm-context.d.ts +60 -0
- package/dist/core/local-llm-server/llm-context.js +221 -0
- package/dist/core/local-llm-server/model-resolve.d.ts +20 -0
- package/dist/core/local-llm-server/model-resolve.js +58 -0
- package/dist/core/local-llm-server/server.d.ts +1 -0
- package/dist/core/local-llm-server/server.js +235 -0
- package/dist/core/memory/local-embedding.d.ts +4 -3
- package/dist/core/memory/local-embedding.js +43 -3
- package/dist/gateway/methods/agent-chat.js +71 -41
- package/dist/gateway/server.js +54 -1
- package/dist/server/agent-config/agent-config.controller.d.ts +1 -1
- package/dist/server/agent-config/agent-config.service.d.ts +2 -0
- package/dist/server/agent-config/agent-config.service.js +5 -0
- package/dist/server/config/config.controller.d.ts +58 -4
- package/dist/server/config/config.controller.js +135 -3
- package/dist/server/config/config.module.js +3 -2
- package/dist/server/config/local-models.service.d.ts +52 -0
- package/dist/server/config/local-models.service.js +211 -0
- package/package.json +1 -1
- package/presets/recommended-local-models.json +42 -0
- package/apps/desktop/renderer/dist/assets/index-BSfTiTKo.css +0 -10
|
@@ -0,0 +1,58 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* 本地模型路径解析与文件存在性检查(与 ~/.cache/llama 及 node-llama-cpp 命名一致)。
|
|
3
|
+
*/
|
|
4
|
+
import { join } from "node:path";
|
|
5
|
+
import { existsSync } from "node:fs";
|
|
6
|
+
import { homedir } from "node:os";
|
|
7
|
+
export const LOCAL_LLM_CACHE_DIR = join(homedir(), ".cache", "llama");
|
|
8
|
+
/**
|
|
9
|
+
* 取 modelUri 的末尾文件名(用于与已安装文件灵活匹配:不同 node-llama-cpp 版本可能生成不同前缀)。
|
|
10
|
+
* 例:hf:Qwen/Qwen3-4B-GGUF/Qwen3-4B-Q4_K_M.gguf → Qwen3-4B-Q4_K_M.gguf
|
|
11
|
+
*/
|
|
12
|
+
export function modelUriBasename(modelUri) {
|
|
13
|
+
const s = (modelUri || "").trim();
|
|
14
|
+
if (!s)
|
|
15
|
+
return "";
|
|
16
|
+
const parts = s.replace(/\\/g, "/").split("/");
|
|
17
|
+
return parts[parts.length - 1] || s;
|
|
18
|
+
}
|
|
19
|
+
/**
|
|
20
|
+
* 将 modelUri(hf:owner/repo/file.gguf)或文件名转为缓存目录下的文件名。
|
|
21
|
+
* 与 LocalModelsService.predictFilename 逻辑一致。
|
|
22
|
+
*/
|
|
23
|
+
export function modelUriToFilename(modelUri) {
|
|
24
|
+
const s = (modelUri || "").trim();
|
|
25
|
+
if (!s)
|
|
26
|
+
return "";
|
|
27
|
+
if (s.startsWith("hf:")) {
|
|
28
|
+
const parts = s.slice(3).split("/");
|
|
29
|
+
return "hf_" + parts.slice(0, -1).join("_") + "_" + parts[parts.length - 1];
|
|
30
|
+
}
|
|
31
|
+
// 已是文件名或路径,只取 basename
|
|
32
|
+
const last = s.replace(/\\/g, "/").split("/").pop();
|
|
33
|
+
return last ?? s;
|
|
34
|
+
}
|
|
35
|
+
/**
|
|
36
|
+
* 检查指定模型(uri 或文件名)是否已存在于本地缓存目录。
|
|
37
|
+
*/
|
|
38
|
+
export function isModelFileInCache(modelIdOrUri, cacheDir = LOCAL_LLM_CACHE_DIR) {
|
|
39
|
+
const filename = modelUriToFilename(modelIdOrUri);
|
|
40
|
+
if (!filename || !filename.endsWith(".gguf"))
|
|
41
|
+
return false;
|
|
42
|
+
return existsSync(join(cacheDir, filename));
|
|
43
|
+
}
|
|
44
|
+
/**
|
|
45
|
+
* 将前端传入的模型标识(hf: URI 或已安装文件名)转为可传给 node-llama-cpp 的路径或 URI。
|
|
46
|
+
* 若为纯文件名(如 hf_xxx.gguf),则返回缓存目录下的绝对路径。
|
|
47
|
+
*/
|
|
48
|
+
export function toModelPathForStart(uriOrFilename, cacheDir = LOCAL_LLM_CACHE_DIR) {
|
|
49
|
+
const s = (uriOrFilename || "").trim();
|
|
50
|
+
if (!s)
|
|
51
|
+
return "";
|
|
52
|
+
if (s.startsWith("hf:"))
|
|
53
|
+
return s;
|
|
54
|
+
const filename = modelUriToFilename(s);
|
|
55
|
+
if (!filename)
|
|
56
|
+
return s;
|
|
57
|
+
return join(cacheDir, filename);
|
|
58
|
+
}
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export declare function createOpenAICompatServer(port: number): Promise<void>;
|
|
@@ -0,0 +1,235 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* OpenAI 兼容 HTTP 服务(严格对齐 [OpenAI Chat Completions / Embeddings API](https://platform.openai.com/docs/api-reference))。
|
|
3
|
+
* 实现:GET /v1/models;POST /v1/chat/completions(流式/非流式,tool_calls);POST /v1/embeddings。
|
|
4
|
+
* - 错误统一为 { error: { message, type } },流式错误以 SSE 事件发送后结束。
|
|
5
|
+
* - 流式 delta 仅含规范字段:role、content(必为 string)、tool_calls(规范结构),避免客户端解析到未知类型。
|
|
6
|
+
*/
|
|
7
|
+
import { createServer } from "node:http";
|
|
8
|
+
import { randomUUID } from "node:crypto";
|
|
9
|
+
import { chatCompletionStream, chatCompletion, getEmbedding, isReady, } from "./llm-context.js";
|
|
10
|
+
const LLM_MODEL_ID = process.env.LOCAL_LLM_MODEL_ID ?? "local-llm";
|
|
11
|
+
const EMB_MODEL_ID = process.env.LOCAL_EMB_MODEL_ID ?? "local-embedding";
|
|
12
|
+
function readBody(req) {
|
|
13
|
+
return new Promise((resolve, reject) => {
|
|
14
|
+
let data = "";
|
|
15
|
+
req.on("data", (chunk) => (data += chunk));
|
|
16
|
+
req.on("end", () => {
|
|
17
|
+
try {
|
|
18
|
+
resolve(data ? JSON.parse(data) : {});
|
|
19
|
+
}
|
|
20
|
+
catch {
|
|
21
|
+
reject(new Error("Invalid JSON body"));
|
|
22
|
+
}
|
|
23
|
+
});
|
|
24
|
+
req.on("error", reject);
|
|
25
|
+
});
|
|
26
|
+
}
|
|
27
|
+
function sendJson(res, status, body) {
|
|
28
|
+
const json = JSON.stringify(body);
|
|
29
|
+
res.writeHead(status, { "Content-Type": "application/json" });
|
|
30
|
+
res.end(json);
|
|
31
|
+
}
|
|
32
|
+
/** OpenAI 规范错误体:{ error: { message, type } } */
|
|
33
|
+
function sendError(res, status, message, type = status >= 500 ? "server_error" : "invalid_request_error") {
|
|
34
|
+
sendJson(res, status, { error: { message: String(message), type } });
|
|
35
|
+
}
|
|
36
|
+
/** 构造 OpenAI 格式的 chat completion 响应对象 */
|
|
37
|
+
function buildCompletionResponse(content, tool_calls, finish_reason, model) {
|
|
38
|
+
const message = { role: "assistant", content: tool_calls ? null : content };
|
|
39
|
+
if (tool_calls?.length)
|
|
40
|
+
message.tool_calls = tool_calls;
|
|
41
|
+
return {
|
|
42
|
+
id: `chatcmpl-${randomUUID()}`,
|
|
43
|
+
object: "chat.completion",
|
|
44
|
+
created: Math.floor(Date.now() / 1000),
|
|
45
|
+
model,
|
|
46
|
+
choices: [{ index: 0, message, finish_reason, logprobs: null }],
|
|
47
|
+
usage: { prompt_tokens: 0, completion_tokens: 0, total_tokens: 0 },
|
|
48
|
+
};
|
|
49
|
+
}
|
|
50
|
+
/** 构造 SSE delta chunk,仅含 OpenAI 流式规范字段,不包含 logprobs 避免下游解析异常 */
|
|
51
|
+
function buildStreamChunk(id, model, delta, finish_reason) {
|
|
52
|
+
const choice = { index: 0, delta, finish_reason };
|
|
53
|
+
const chunk = {
|
|
54
|
+
id,
|
|
55
|
+
object: "chat.completion.chunk",
|
|
56
|
+
created: Math.floor(Date.now() / 1000),
|
|
57
|
+
model,
|
|
58
|
+
choices: [choice],
|
|
59
|
+
};
|
|
60
|
+
return `data: ${JSON.stringify(chunk)}\n\n`;
|
|
61
|
+
}
|
|
62
|
+
async function handleChatCompletions(req, res) {
|
|
63
|
+
let body;
|
|
64
|
+
try {
|
|
65
|
+
body = await readBody(req);
|
|
66
|
+
}
|
|
67
|
+
catch {
|
|
68
|
+
return sendError(res, 400, "Invalid JSON body");
|
|
69
|
+
}
|
|
70
|
+
if (!isReady())
|
|
71
|
+
return sendError(res, 503, "模型尚未加载完成,请稍后重试", "server_error");
|
|
72
|
+
if (!Array.isArray(body.messages)) {
|
|
73
|
+
return sendError(res, 400, "Missing or invalid 'messages' (must be an array)", "invalid_request_error");
|
|
74
|
+
}
|
|
75
|
+
if (body.messages.length === 0) {
|
|
76
|
+
return sendError(res, 400, "'messages' must contain at least one message", "invalid_request_error");
|
|
77
|
+
}
|
|
78
|
+
const messages = body.messages;
|
|
79
|
+
const tools = Array.isArray(body.tools) ? body.tools : [];
|
|
80
|
+
const stream = body.stream === true;
|
|
81
|
+
const model = typeof body.model === "string" && body.model.trim() ? body.model.trim() : LLM_MODEL_ID;
|
|
82
|
+
const abortCtrl = new AbortController();
|
|
83
|
+
req.on("close", () => abortCtrl.abort());
|
|
84
|
+
if (stream) {
|
|
85
|
+
res.writeHead(200, {
|
|
86
|
+
"Content-Type": "text/event-stream",
|
|
87
|
+
"Cache-Control": "no-cache",
|
|
88
|
+
Connection: "keep-alive",
|
|
89
|
+
});
|
|
90
|
+
const id = `chatcmpl-${randomUUID()}`;
|
|
91
|
+
// 首包:role + content 占位,与 DeepSeek 等一致,避免仅 role 时下游对 delta 的严格校验
|
|
92
|
+
res.write(buildStreamChunk(id, model, { role: "assistant", content: "" }, null));
|
|
93
|
+
let pendingToolCalls;
|
|
94
|
+
let finishReason = "stop";
|
|
95
|
+
try {
|
|
96
|
+
await chatCompletionStream(messages, tools, (chunk) => {
|
|
97
|
+
if (abortCtrl.signal.aborted)
|
|
98
|
+
return;
|
|
99
|
+
if (chunk.content != null && chunk.content !== "") {
|
|
100
|
+
const text = typeof chunk.content === "string" ? chunk.content : String(chunk.content);
|
|
101
|
+
res.write(buildStreamChunk(id, model, { content: text }, null));
|
|
102
|
+
}
|
|
103
|
+
if (chunk.tool_calls?.length) {
|
|
104
|
+
pendingToolCalls = chunk.tool_calls;
|
|
105
|
+
}
|
|
106
|
+
if (chunk.finish_reason) {
|
|
107
|
+
finishReason = chunk.finish_reason;
|
|
108
|
+
}
|
|
109
|
+
}, abortCtrl.signal);
|
|
110
|
+
}
|
|
111
|
+
catch (e) {
|
|
112
|
+
if (!abortCtrl.signal.aborted) {
|
|
113
|
+
const errMsg = e instanceof Error ? e.message : String(e);
|
|
114
|
+
const stack = e instanceof Error ? e.stack : undefined;
|
|
115
|
+
console.error("[local-llm] stream error:", errMsg);
|
|
116
|
+
if (stack)
|
|
117
|
+
console.error("[local-llm] stream stack:", stack);
|
|
118
|
+
res.write(`data: ${JSON.stringify({ error: { message: errMsg, type: "server_error" } })}\n\n`);
|
|
119
|
+
}
|
|
120
|
+
res.end();
|
|
121
|
+
return;
|
|
122
|
+
}
|
|
123
|
+
// 若有 tool_calls,按 OpenAI 流式规范发一条 delta(含 index/id/type/function),与 DeepSeek 等一致
|
|
124
|
+
if (pendingToolCalls?.length) {
|
|
125
|
+
const deltaToolCalls = pendingToolCalls.map((tc, i) => ({
|
|
126
|
+
index: i,
|
|
127
|
+
id: typeof tc.id === "string" ? tc.id : `call_${i}`,
|
|
128
|
+
type: "function",
|
|
129
|
+
function: {
|
|
130
|
+
name: typeof tc.function?.name === "string" ? tc.function.name : "",
|
|
131
|
+
arguments: typeof tc.function?.arguments === "string" ? tc.function.arguments : "",
|
|
132
|
+
},
|
|
133
|
+
}));
|
|
134
|
+
res.write(buildStreamChunk(id, model, { tool_calls: deltaToolCalls }, null));
|
|
135
|
+
finishReason = "tool_calls";
|
|
136
|
+
}
|
|
137
|
+
res.write(buildStreamChunk(id, model, {}, finishReason));
|
|
138
|
+
res.write("data: [DONE]\n\n");
|
|
139
|
+
res.end();
|
|
140
|
+
}
|
|
141
|
+
else {
|
|
142
|
+
try {
|
|
143
|
+
const result = await chatCompletion(messages, tools, abortCtrl.signal);
|
|
144
|
+
sendJson(res, 200, buildCompletionResponse(result.content, result.tool_calls, result.finish_reason, model));
|
|
145
|
+
}
|
|
146
|
+
catch (e) {
|
|
147
|
+
const msg = e instanceof Error ? e.message : String(e);
|
|
148
|
+
sendError(res, 500, msg, "server_error");
|
|
149
|
+
}
|
|
150
|
+
}
|
|
151
|
+
}
|
|
152
|
+
async function handleEmbeddings(req, res) {
|
|
153
|
+
let body;
|
|
154
|
+
try {
|
|
155
|
+
body = await readBody(req);
|
|
156
|
+
}
|
|
157
|
+
catch {
|
|
158
|
+
return sendError(res, 400, "Invalid JSON body", "invalid_request_error");
|
|
159
|
+
}
|
|
160
|
+
if (!isReady())
|
|
161
|
+
return sendError(res, 503, "模型尚未加载完成,请稍后重试", "server_error");
|
|
162
|
+
const input = body.input;
|
|
163
|
+
if (input === undefined || input === null) {
|
|
164
|
+
return sendError(res, 400, "Missing 'input' (string or array of strings)", "invalid_request_error");
|
|
165
|
+
}
|
|
166
|
+
const inputs = Array.isArray(input) ? input : [input];
|
|
167
|
+
if (inputs.length === 0 || inputs.some((x) => typeof x !== "string")) {
|
|
168
|
+
return sendError(res, 400, "'input' must be a non-empty string or array of strings", "invalid_request_error");
|
|
169
|
+
}
|
|
170
|
+
try {
|
|
171
|
+
const data = await Promise.all(inputs.map(async (text, i) => ({
|
|
172
|
+
object: "embedding",
|
|
173
|
+
index: i,
|
|
174
|
+
embedding: await getEmbedding(text),
|
|
175
|
+
})));
|
|
176
|
+
sendJson(res, 200, {
|
|
177
|
+
object: "list",
|
|
178
|
+
data,
|
|
179
|
+
model: body.model ?? EMB_MODEL_ID,
|
|
180
|
+
usage: { prompt_tokens: 0, total_tokens: 0 },
|
|
181
|
+
});
|
|
182
|
+
}
|
|
183
|
+
catch (e) {
|
|
184
|
+
const msg = e instanceof Error ? e.message : String(e);
|
|
185
|
+
sendError(res, 500, msg, "server_error");
|
|
186
|
+
}
|
|
187
|
+
}
|
|
188
|
+
function handleModels(_req, res) {
|
|
189
|
+
sendJson(res, 200, {
|
|
190
|
+
object: "list",
|
|
191
|
+
data: [
|
|
192
|
+
{ id: LLM_MODEL_ID, object: "model", created: 0, owned_by: "local" },
|
|
193
|
+
{ id: EMB_MODEL_ID, object: "model", created: 0, owned_by: "local" },
|
|
194
|
+
],
|
|
195
|
+
});
|
|
196
|
+
}
|
|
197
|
+
export function createOpenAICompatServer(port) {
|
|
198
|
+
return new Promise((resolve, reject) => {
|
|
199
|
+
const server = createServer(async (req, res) => {
|
|
200
|
+
const url = req.url ?? "";
|
|
201
|
+
const method = req.method ?? "";
|
|
202
|
+
// CORS
|
|
203
|
+
res.setHeader("Access-Control-Allow-Origin", "*");
|
|
204
|
+
res.setHeader("Access-Control-Allow-Headers", "Content-Type, Authorization");
|
|
205
|
+
if (method === "OPTIONS") {
|
|
206
|
+
res.writeHead(204);
|
|
207
|
+
res.end();
|
|
208
|
+
return;
|
|
209
|
+
}
|
|
210
|
+
try {
|
|
211
|
+
if (method === "GET" && url === "/v1/models") {
|
|
212
|
+
handleModels(req, res);
|
|
213
|
+
}
|
|
214
|
+
else if (method === "POST" && url === "/v1/chat/completions") {
|
|
215
|
+
await handleChatCompletions(req, res);
|
|
216
|
+
}
|
|
217
|
+
else if (method === "POST" && url === "/v1/embeddings") {
|
|
218
|
+
await handleEmbeddings(req, res);
|
|
219
|
+
}
|
|
220
|
+
else {
|
|
221
|
+
sendError(res, 404, `Not found: ${method} ${url}`, "invalid_request_error");
|
|
222
|
+
}
|
|
223
|
+
}
|
|
224
|
+
catch (e) {
|
|
225
|
+
if (!res.headersSent)
|
|
226
|
+
sendError(res, 500, String(e));
|
|
227
|
+
}
|
|
228
|
+
});
|
|
229
|
+
server.listen(port, "127.0.0.1", () => {
|
|
230
|
+
console.log(`[local-llm] OpenAI 兼容服务已启动: http://127.0.0.1:${port}/v1`);
|
|
231
|
+
resolve();
|
|
232
|
+
});
|
|
233
|
+
server.on("error", reject);
|
|
234
|
+
});
|
|
235
|
+
}
|
|
@@ -1,10 +1,11 @@
|
|
|
1
1
|
/**
|
|
2
|
-
* 本地 embedding
|
|
2
|
+
* 本地 embedding:优先走本地 LLM 子进程服务(/v1/embeddings),
|
|
3
|
+
* 不可用时回退到 node-llama-cpp 直接加载(GGUF)。
|
|
3
4
|
*/
|
|
4
5
|
import type { IEmbeddingProvider } from "./embedding-types.js";
|
|
5
6
|
export declare function getLocalEmbeddingUnavailableReason(): string | null;
|
|
6
7
|
/**
|
|
7
|
-
* 获取本地 embedding
|
|
8
|
-
*
|
|
8
|
+
* 获取本地 embedding 提供方(懒加载)。
|
|
9
|
+
* 优先使用本地 LLM 子进程服务;不可用时回退到 node-llama-cpp 直接加载。
|
|
9
10
|
*/
|
|
10
11
|
export declare function getLocalEmbeddingProvider(): Promise<IEmbeddingProvider | null>;
|
|
@@ -5,19 +5,59 @@ let envLogged = false;
|
|
|
5
5
|
export function getLocalEmbeddingUnavailableReason() {
|
|
6
6
|
return getLocalEmbeddingLlamaUnavailableReason();
|
|
7
7
|
}
|
|
8
|
+
/** 通过本地 LLM 子进程服务的 /v1/embeddings 接口获取向量 */
|
|
9
|
+
function createLocalServerEmbeddingProvider(baseUrl) {
|
|
10
|
+
return {
|
|
11
|
+
name: "local-llm-server",
|
|
12
|
+
async embed(text) {
|
|
13
|
+
try {
|
|
14
|
+
const res = await fetch(`${baseUrl}/embeddings`, {
|
|
15
|
+
method: "POST",
|
|
16
|
+
headers: { "Content-Type": "application/json", Authorization: "Bearer local" },
|
|
17
|
+
body: JSON.stringify({ input: text }),
|
|
18
|
+
signal: AbortSignal.timeout(30_000),
|
|
19
|
+
});
|
|
20
|
+
if (!res.ok)
|
|
21
|
+
return null;
|
|
22
|
+
const data = await res.json();
|
|
23
|
+
const vec = data?.data?.[0]?.embedding;
|
|
24
|
+
return Array.isArray(vec) && vec.length > 0 ? vec : null;
|
|
25
|
+
}
|
|
26
|
+
catch {
|
|
27
|
+
return null;
|
|
28
|
+
}
|
|
29
|
+
},
|
|
30
|
+
};
|
|
31
|
+
}
|
|
8
32
|
/**
|
|
9
|
-
* 获取本地 embedding
|
|
10
|
-
*
|
|
33
|
+
* 获取本地 embedding 提供方(懒加载)。
|
|
34
|
+
* 优先使用本地 LLM 子进程服务;不可用时回退到 node-llama-cpp 直接加载。
|
|
11
35
|
*/
|
|
12
36
|
export async function getLocalEmbeddingProvider() {
|
|
13
37
|
if (cached)
|
|
14
38
|
return cached;
|
|
39
|
+
// 优先:本地 LLM 子进程服务
|
|
40
|
+
const localBaseUrl = process.env.LOCAL_LLM_BASE_URL;
|
|
41
|
+
if (localBaseUrl) {
|
|
42
|
+
const serverProvider = createLocalServerEmbeddingProvider(localBaseUrl);
|
|
43
|
+
// 快速探测服务是否可用
|
|
44
|
+
const testVec = await serverProvider.embed("test");
|
|
45
|
+
if (testVec !== null) {
|
|
46
|
+
cached = serverProvider;
|
|
47
|
+
if (!envLogged) {
|
|
48
|
+
envLogged = true;
|
|
49
|
+
console.log("[RAG embedding] 使用本地 LLM 子进程服务");
|
|
50
|
+
}
|
|
51
|
+
return cached;
|
|
52
|
+
}
|
|
53
|
+
}
|
|
54
|
+
// 回退:node-llama-cpp 直接加载
|
|
15
55
|
const provider = await getLocalEmbeddingLlamaProvider(getRagLocalModelPathSync());
|
|
16
56
|
if (provider) {
|
|
17
57
|
cached = provider;
|
|
18
58
|
if (!envLogged) {
|
|
19
59
|
envLogged = true;
|
|
20
|
-
console.
|
|
60
|
+
console.log("[RAG embedding] 使用 node-llama-cpp (GGUF) 直接加载");
|
|
21
61
|
}
|
|
22
62
|
return cached;
|
|
23
63
|
}
|
|
@@ -9,6 +9,23 @@ import { consumePendingAgentReload } from "../../core/config/agent-reload-pendin
|
|
|
9
9
|
import { registerProxyRunAbort } from "../proxy-run-abort.js";
|
|
10
10
|
import { getSessionOutlet, sendSessionMessage } from "../../core/session-outlet/index.js";
|
|
11
11
|
const COMPOSITE_KEY_SEP = "::";
|
|
12
|
+
/** 将 delta/text 规范为字符串,避免 SDK 或上游返回对象时前端显示 [object Object] 或触发 Unknown value type */
|
|
13
|
+
function normalizeChunkText(v) {
|
|
14
|
+
if (v == null)
|
|
15
|
+
return "";
|
|
16
|
+
if (typeof v === "string")
|
|
17
|
+
return v;
|
|
18
|
+
if (typeof v.content === "string")
|
|
19
|
+
return v.content;
|
|
20
|
+
if (typeof v.text === "string")
|
|
21
|
+
return v.text;
|
|
22
|
+
try {
|
|
23
|
+
return String(JSON.stringify(v));
|
|
24
|
+
}
|
|
25
|
+
catch {
|
|
26
|
+
return String(v);
|
|
27
|
+
}
|
|
28
|
+
}
|
|
12
29
|
/** 当前每个 session 的流式订阅(用于在 cancel 或新 run 前移除旧订阅,避免重复广播) */
|
|
13
30
|
const sessionSubscriptionBySessionId = new Map();
|
|
14
31
|
/**
|
|
@@ -40,7 +57,7 @@ const SYSTEM_MSG_PREFIX = "[System Message] ";
|
|
|
40
57
|
const SYSTEM_MSG_SUFFIX = "\n";
|
|
41
58
|
/**
|
|
42
59
|
* 创建 Web 端会话消息消费者:将统一出口的 SessionMessage 转为 Gateway 事件并 broadcast。
|
|
43
|
-
*
|
|
60
|
+
* 系统消息以独立事件 system_message 下发,前端做中间展示、不进入 session 聊天记录;各通道通过统一出口收到原始 system 消息后自行处理。
|
|
44
61
|
*/
|
|
45
62
|
function createWebSessionConsumer(_sessionId) {
|
|
46
63
|
return {
|
|
@@ -48,9 +65,8 @@ function createWebSessionConsumer(_sessionId) {
|
|
|
48
65
|
const sid = msg.sessionId;
|
|
49
66
|
if (msg.type === "system" && msg.code === "command.result") {
|
|
50
67
|
const raw = msg.payload?.text ?? "";
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
broadcastToSession(sid, createEvent("agent.chunk", { text, sessionId: sid }));
|
|
68
|
+
if (raw)
|
|
69
|
+
broadcastToSession(sid, createEvent("system_message", { text: raw, code: "command.result", sessionId: sid }));
|
|
54
70
|
broadcastToSession(sid, createEvent("turn_end", { sessionId: sid, content: "" }));
|
|
55
71
|
broadcastToSession(sid, createEvent("message_complete", { sessionId: sid, content: "" }));
|
|
56
72
|
broadcastToSession(sid, createEvent("agent_end", { sessionId: sid }));
|
|
@@ -59,10 +75,8 @@ function createWebSessionConsumer(_sessionId) {
|
|
|
59
75
|
}
|
|
60
76
|
if (msg.type === "system" && msg.code === "mcp.progress") {
|
|
61
77
|
const raw = msg.payload?.message ?? msg.payload?.phase ?? "";
|
|
62
|
-
if (raw)
|
|
63
|
-
|
|
64
|
-
broadcastToSession(sid, createEvent("agent.chunk", { text, sessionId: sid }));
|
|
65
|
-
}
|
|
78
|
+
if (raw)
|
|
79
|
+
broadcastToSession(sid, createEvent("system_message", { text: raw, code: "mcp.progress", sessionId: sid }));
|
|
66
80
|
return;
|
|
67
81
|
}
|
|
68
82
|
if (msg.type === "chat") {
|
|
@@ -159,37 +173,40 @@ async function handleAgentChatInner(client, targetSessionId, message, params) {
|
|
|
159
173
|
sendSessionMessage(targetSessionId, { type: "chat", code: "agent_end", payload: {} });
|
|
160
174
|
sendSessionMessage(targetSessionId, { type: "chat", code: "conversation_end", payload: {} });
|
|
161
175
|
};
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
});
|
|
180
|
-
return { status: "completed", sessionId: targetSessionId };
|
|
181
|
-
}
|
|
182
|
-
catch (error) {
|
|
176
|
+
runForChannelStream({
|
|
177
|
+
sessionId: targetSessionId,
|
|
178
|
+
message,
|
|
179
|
+
agentId: currentAgentId,
|
|
180
|
+
signal,
|
|
181
|
+
}, {
|
|
182
|
+
onChunk(delta) {
|
|
183
|
+
sendSessionMessage(targetSessionId, { type: "chat", code: "agent.chunk", payload: { text: delta } });
|
|
184
|
+
},
|
|
185
|
+
onTurnEnd() {
|
|
186
|
+
sendSessionMessage(targetSessionId, { type: "chat", code: "turn_end", payload: {} });
|
|
187
|
+
sendSessionMessage(targetSessionId, { type: "chat", code: "message_complete", payload: {} });
|
|
188
|
+
},
|
|
189
|
+
onDone() {
|
|
190
|
+
finishAndUnregister();
|
|
191
|
+
},
|
|
192
|
+
}).catch((error) => {
|
|
183
193
|
const isAbort = error?.name === "AbortError" || (typeof error?.message === "string" && error.message.includes("abort"));
|
|
184
194
|
if (!isAbort)
|
|
185
195
|
console.error(`Error in agent chat (proxy ${runnerType}):`, error);
|
|
186
196
|
finishAndUnregister();
|
|
187
197
|
if (!isAbort) {
|
|
188
|
-
|
|
198
|
+
let errMsg = error?.message || String(error);
|
|
199
|
+
const needNormalize = typeof errMsg === "object" || (typeof errMsg === "string" && errMsg.includes("[object Object]"));
|
|
200
|
+
if (needNormalize) {
|
|
201
|
+
errMsg = normalizeChunkText(errMsg);
|
|
202
|
+
if (typeof errMsg === "string" && errMsg.includes("Unknown value type") && errMsg.includes("[object Object]")) {
|
|
203
|
+
errMsg = "模型返回了不支持的数据结构(如工具调用流),请尝试关闭工具或更换模型。";
|
|
204
|
+
}
|
|
205
|
+
}
|
|
189
206
|
sendSessionMessage(targetSessionId, { type: "chat", code: "agent.chunk", payload: { text: `请求失败:${errMsg}` } });
|
|
190
207
|
}
|
|
191
|
-
|
|
192
|
-
}
|
|
208
|
+
});
|
|
209
|
+
return { status: "streaming", sessionId: targetSessionId };
|
|
193
210
|
}
|
|
194
211
|
const isEphemeralSession = sessionType === "system" || sessionType === "scheduled";
|
|
195
212
|
if (isEphemeralSession) {
|
|
@@ -250,10 +267,10 @@ async function handleAgentChatInner(client, targetSessionId, message, params) {
|
|
|
250
267
|
const update = event;
|
|
251
268
|
if (update.assistantMessageEvent && update.assistantMessageEvent.type === "text_delta") {
|
|
252
269
|
hasReceivedAnyChunk = true;
|
|
253
|
-
wsPayload = { type: "chat", code: "agent.chunk", payload: { text: update.assistantMessageEvent.delta } };
|
|
270
|
+
wsPayload = { type: "chat", code: "agent.chunk", payload: { text: normalizeChunkText(update.assistantMessageEvent.delta) } };
|
|
254
271
|
}
|
|
255
272
|
else if (update.assistantMessageEvent && update.assistantMessageEvent.type === "thinking_delta") {
|
|
256
|
-
wsPayload = { type: "chat", code: "agent.chunk", payload: { text: update.assistantMessageEvent.delta, isThinking: true } };
|
|
273
|
+
wsPayload = { type: "chat", code: "agent.chunk", payload: { text: normalizeChunkText(update.assistantMessageEvent.delta), isThinking: true } };
|
|
257
274
|
}
|
|
258
275
|
else if (update.assistantMessageEvent?.type === "error" && update.assistantMessageEvent?.error?.errorMessage) {
|
|
259
276
|
console.warn("[agent.chat] model error:", update.assistantMessageEvent.error.errorMessage);
|
|
@@ -278,9 +295,16 @@ async function handleAgentChatInner(client, targetSessionId, message, params) {
|
|
|
278
295
|
hasReceivedAnyChunk = true;
|
|
279
296
|
}
|
|
280
297
|
if (msg?.errorMessage) {
|
|
281
|
-
|
|
298
|
+
// 调试:定位本地 LLM 流式报错来源(pi-ai 等 SDK 抛出的原始 errorMessage)
|
|
299
|
+
console.error("[agent.chat] message_end errorMessage:", msg.errorMessage);
|
|
300
|
+
if (typeof msg.errorStack === "string")
|
|
301
|
+
console.error("[agent.chat] message_end errorStack:", msg.errorStack);
|
|
302
|
+
let errText = msg.errorMessage.includes("402") || msg.errorMessage.includes("Insufficient Balance")
|
|
282
303
|
? "API 余额不足,请到「设置」检查并充值后重试。"
|
|
283
|
-
: `请求失败:${msg.errorMessage}`;
|
|
304
|
+
: `请求失败:${normalizeChunkText(msg.errorMessage)}`;
|
|
305
|
+
if (errText.includes("Unknown value type") && errText.includes("[object Object]")) {
|
|
306
|
+
errText = "请求失败:模型返回了不支持的数据结构(如工具调用流),请尝试关闭工具或更换模型。";
|
|
307
|
+
}
|
|
284
308
|
sendSessionMessage(targetSessionId, { type: "chat", code: "agent.chunk", payload: { text: errText } });
|
|
285
309
|
}
|
|
286
310
|
wsPayload = null;
|
|
@@ -298,9 +322,16 @@ async function handleAgentChatInner(client, targetSessionId, message, params) {
|
|
|
298
322
|
}
|
|
299
323
|
}
|
|
300
324
|
if (msg?.errorMessage) {
|
|
301
|
-
|
|
325
|
+
// 调试:定位 turn_end 时 SDK 传入的原始错误
|
|
326
|
+
console.error("[agent.chat] turn_end errorMessage:", msg.errorMessage);
|
|
327
|
+
if (typeof msg.errorStack === "string")
|
|
328
|
+
console.error("[agent.chat] turn_end errorStack:", msg.errorStack);
|
|
329
|
+
let errText = msg.errorMessage.includes("402") || msg.errorMessage.includes("Insufficient Balance")
|
|
302
330
|
? "API 余额不足,请到「设置」检查并充值后重试。"
|
|
303
|
-
: `请求失败:${msg.errorMessage}`;
|
|
331
|
+
: `请求失败:${normalizeChunkText(msg.errorMessage)}`;
|
|
332
|
+
if (errText.includes("Unknown value type") && errText.includes("[object Object]")) {
|
|
333
|
+
errText = "请求失败:模型返回了不支持的数据结构(如工具调用流),请尝试关闭工具或更换模型。";
|
|
334
|
+
}
|
|
304
335
|
sendSessionMessage(targetSessionId, { type: "chat", code: "agent.chunk", payload: { text: errText } });
|
|
305
336
|
hasReceivedAnyChunk = true;
|
|
306
337
|
}
|
|
@@ -342,9 +373,8 @@ async function handleAgentChatInner(client, targetSessionId, message, params) {
|
|
|
342
373
|
sessionSubscriptionBySessionId.set(targetSessionId, unsubscribe);
|
|
343
374
|
try {
|
|
344
375
|
await session.sendUserMessage(message, { deliverAs: "followUp" });
|
|
345
|
-
|
|
346
|
-
|
|
347
|
-
return { status: "completed", sessionId: targetSessionId };
|
|
376
|
+
// 流已启动,立即返回;前端以 agent_end 判断整轮结束,超时以「首包」计算更优
|
|
377
|
+
return { status: "streaming", sessionId: targetSessionId };
|
|
348
378
|
}
|
|
349
379
|
catch (error) {
|
|
350
380
|
console.error(`Error in agent chat:`, error);
|
package/dist/gateway/server.js
CHANGED
|
@@ -44,7 +44,9 @@ import multer from "multer";
|
|
|
44
44
|
import { handleInstallSkillFromPath } from "./methods/install-skill-from-path.js";
|
|
45
45
|
import { handleInstallSkillFromUpload } from "./methods/install-skill-from-upload.js";
|
|
46
46
|
import { setBackendBaseUrl } from "./backend-url.js";
|
|
47
|
-
import { ensureDesktopConfigInitialized, getChannelsConfigSync } from "../core/config/desktop-config.js";
|
|
47
|
+
import { ensureDesktopConfigInitialized, getChannelsConfigSync, loadDesktopAgentConfig } from "../core/config/desktop-config.js";
|
|
48
|
+
import { startLocalLlmServer } from "../core/local-llm-server/index.js";
|
|
49
|
+
import { isModelFileInCache } from "../core/local-llm-server/model-resolve.js";
|
|
48
50
|
import { createNestAppEmbedded } from "../server/bootstrap.js";
|
|
49
51
|
import { registerChannel, startAllChannels, stopAllChannels } from "./channel/registry.js";
|
|
50
52
|
import { createFeishuChannel } from "./channel/adapters/feishu.js";
|
|
@@ -79,6 +81,57 @@ export async function startGatewayServer(port = 38080) {
|
|
|
79
81
|
process.env.PORT = String(port);
|
|
80
82
|
await ensureDesktopConfigInitialized();
|
|
81
83
|
console.log(`Starting gateway server on port ${port}...`);
|
|
84
|
+
// 若默认智能体或环境变量指定为 local provider,后台启动本地 LLM 子进程(不阻塞主服务启动)
|
|
85
|
+
// 仅读 env 时,桌面端选「本机」默认 agent 时可能未设 OPENBOT_PROVIDER,导致本地服务未启、出现 Connection error
|
|
86
|
+
const envProvider = process.env.OPENBOT_PROVIDER ?? "";
|
|
87
|
+
let shouldStartLocal = envProvider === "local";
|
|
88
|
+
let defaultLocalModel;
|
|
89
|
+
let defaultAgentContextSize;
|
|
90
|
+
try {
|
|
91
|
+
const defaultAgent = await loadDesktopAgentConfig("default");
|
|
92
|
+
if (defaultAgent) {
|
|
93
|
+
defaultAgentContextSize = defaultAgent.contextSize;
|
|
94
|
+
if (!shouldStartLocal) {
|
|
95
|
+
shouldStartLocal =
|
|
96
|
+
defaultAgent.provider === "local" &&
|
|
97
|
+
defaultAgent.runnerType !== "coze" &&
|
|
98
|
+
defaultAgent.runnerType !== "openclawx" &&
|
|
99
|
+
defaultAgent.runnerType !== "opencode" &&
|
|
100
|
+
defaultAgent.runnerType !== "claude_code";
|
|
101
|
+
}
|
|
102
|
+
if (shouldStartLocal && defaultAgent.provider === "local" && defaultAgent.model?.trim()) {
|
|
103
|
+
defaultLocalModel = defaultAgent.model.trim();
|
|
104
|
+
}
|
|
105
|
+
}
|
|
106
|
+
}
|
|
107
|
+
catch {
|
|
108
|
+
// ignore
|
|
109
|
+
}
|
|
110
|
+
if (shouldStartLocal) {
|
|
111
|
+
// 若缺省模型已指定但文件不在缓存中,不启动本地服务,标记不可用,由用户在设置中下载后手动启动
|
|
112
|
+
const llmFileExists = !defaultLocalModel || isModelFileInCache(defaultLocalModel);
|
|
113
|
+
if (!llmFileExists) {
|
|
114
|
+
process.env.LOCAL_LLM_START_FAILED = `缺省模型文件不存在: ${defaultLocalModel},请先在「模型管理」中下载或选择已安装模型后点击「启动本地模型服务」`;
|
|
115
|
+
console.warn("[local-llm] 未启动:", process.env.LOCAL_LLM_START_FAILED);
|
|
116
|
+
}
|
|
117
|
+
else {
|
|
118
|
+
const opts = {
|
|
119
|
+
...(defaultLocalModel ? { llmModelPath: defaultLocalModel } : {}),
|
|
120
|
+
contextSize: defaultAgentContextSize ?? 32768,
|
|
121
|
+
};
|
|
122
|
+
startLocalLlmServer(opts)
|
|
123
|
+
.then((handle) => {
|
|
124
|
+
process.env.LOCAL_LLM_BASE_URL = handle.baseUrl;
|
|
125
|
+
delete process.env.LOCAL_LLM_START_FAILED;
|
|
126
|
+
console.log("[local-llm] 已就绪:", handle.baseUrl);
|
|
127
|
+
})
|
|
128
|
+
.catch((e) => {
|
|
129
|
+
const msg = e instanceof Error ? e.message : String(e);
|
|
130
|
+
process.env.LOCAL_LLM_START_FAILED = msg;
|
|
131
|
+
console.warn("[local-llm] 启动失败:", msg);
|
|
132
|
+
});
|
|
133
|
+
}
|
|
134
|
+
}
|
|
82
135
|
setBackendBaseUrl(`http://localhost:${port}`);
|
|
83
136
|
const { app: nestApp, express: nestExpress } = await createNestAppEmbedded();
|
|
84
137
|
try {
|
|
@@ -25,7 +25,7 @@ export declare class AgentConfigController {
|
|
|
25
25
|
success: boolean;
|
|
26
26
|
data: AgentConfigItem;
|
|
27
27
|
}>;
|
|
28
|
-
updateAgent(id: string, body: Partial<Pick<AgentConfigItem, 'name' | 'provider' | 'model' | 'modelItemCode' | 'mcpServers' | 'mcpMaxResultTokens' | 'systemPrompt' | 'icon' | 'runnerType' | 'coze' | 'openclawx' | 'opencode' | 'claudeCode' | 'useLongMemory' | 'webSearch'>>): Promise<{
|
|
28
|
+
updateAgent(id: string, body: Partial<Pick<AgentConfigItem, 'name' | 'provider' | 'model' | 'modelItemCode' | 'mcpServers' | 'mcpMaxResultTokens' | 'systemPrompt' | 'icon' | 'runnerType' | 'coze' | 'openclawx' | 'opencode' | 'claudeCode' | 'useLongMemory' | 'webSearch' | 'contextSize'>>): Promise<{
|
|
29
29
|
success: boolean;
|
|
30
30
|
data: AgentConfigItem;
|
|
31
31
|
}>;
|
|
@@ -86,6 +86,8 @@ export interface AgentConfigItem {
|
|
|
86
86
|
provider?: 'brave' | 'duck-duck-scrape';
|
|
87
87
|
maxResultTokens?: number;
|
|
88
88
|
};
|
|
89
|
+
/** 本地模型上下文长度(token 数),仅 runnerType 为 local 时生效;默认 32768(32K) */
|
|
90
|
+
contextSize?: number;
|
|
89
91
|
}
|
|
90
92
|
export interface DeleteAgentOptions {
|
|
91
93
|
/** 是否同时删除该工作区在磁盘上的目录及文件;默认 false(仅删数据库中的工作区相关数据,保留目录) */
|