@next-open-ai/openclawx 0.8.58 → 0.9.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (41) hide show
  1. package/README.md +14 -5
  2. package/apps/desktop/renderer/dist/assets/index-BihHYEuk.js +93 -0
  3. package/apps/desktop/renderer/dist/assets/index-Dq83-1ma.css +10 -0
  4. package/apps/desktop/renderer/dist/index.html +2 -2
  5. package/dist/cli/cli.js +2 -2
  6. package/dist/core/agent/agent-manager.js +12 -1
  7. package/dist/core/config/agent-reload-pending.js +3 -2
  8. package/dist/core/config/desktop-config.d.ts +1 -1
  9. package/dist/core/config/desktop-config.js +31 -13
  10. package/dist/core/config/provider-support-default.js +1 -0
  11. package/dist/core/local-llm-server/download-model.d.ts +1 -1
  12. package/dist/core/local-llm-server/download-model.js +1 -1
  13. package/dist/core/local-llm-server/index.d.ts +1 -1
  14. package/dist/core/local-llm-server/llm-context.d.ts +1 -0
  15. package/dist/core/local-llm-server/llm-context.js +31 -3
  16. package/dist/core/local-llm-server/model-resolve.d.ts +2 -2
  17. package/dist/core/local-llm-server/model-resolve.js +2 -2
  18. package/dist/core/local-llm-server/start-from-config.js +2 -2
  19. package/dist/core/mcp/operator.d.ts +9 -0
  20. package/dist/core/mcp/operator.js +40 -6
  21. package/dist/core/mcp/transport/stdio.js +19 -2
  22. package/dist/core/tools/windows-shell.d.ts +6 -0
  23. package/dist/core/tools/windows-shell.js +85 -0
  24. package/dist/gateway/methods/agent-chat.js +30 -3
  25. package/dist/gateway/server.js +50 -13
  26. package/dist/server/agent-config/agent-config.controller.d.ts +8 -0
  27. package/dist/server/agent-config/agent-config.controller.js +11 -0
  28. package/dist/server/agent-config/agent-config.service.d.ts +10 -0
  29. package/dist/server/agent-config/agent-config.service.js +18 -1
  30. package/dist/server/agents/agents.gateway.js +1 -1
  31. package/dist/server/bootstrap.js +16 -2
  32. package/dist/server/config/config.service.d.ts +1 -1
  33. package/dist/server/config/config.service.js +29 -23
  34. package/dist/server/config/local-models.service.js +2 -3
  35. package/package.json +1 -1
  36. package/presets/preset-agents.json +2 -2
  37. package/presets/preset-config.json +5 -5
  38. package/presets/preset-providers.json +7 -0
  39. package/presets/recommended-local-models.json +6 -12
  40. package/apps/desktop/renderer/dist/assets/index-M5VGUUpo.js +0 -93
  41. package/apps/desktop/renderer/dist/assets/index-y8oE2q_u.css +0 -10
@@ -28,6 +28,7 @@ export const DEFAULT_PROVIDER_SUPPORT = {
28
28
  "openai-custom": {
29
29
  name: "OpenAI (自定义)",
30
30
  models: [
31
+ { id: "qwen3.5:4b", name: "Qwen3.5 4B", types: ["llm"] },
31
32
  { id: "gpt-4o", name: "GPT-4o", types: ["llm"] },
32
33
  { id: "gpt-4o-mini", name: "GPT-4o Mini", types: ["llm"] },
33
34
  { id: "gpt-4-turbo", name: "GPT-4 Turbo", types: ["llm"] },
@@ -1,4 +1,4 @@
1
- export declare const DEFAULT_LLM_MODEL_URI = "hf:Qwen/Qwen3-4B-GGUF/Qwen3-4B-Q4_K_M.gguf";
1
+ export declare const DEFAULT_LLM_MODEL_URI = "hf:unsloth/Qwen3.5-4B-GGUF/Qwen3.5-4B-Q5_K_M.gguf";
2
2
  export interface DownloadModelOptions {
3
3
  useMirror?: boolean;
4
4
  signal?: AbortSignal;
@@ -4,7 +4,7 @@
4
4
  */
5
5
  import { basename } from "node:path";
6
6
  import { LOCAL_LLM_CACHE_DIR } from "./model-resolve.js";
7
- export const DEFAULT_LLM_MODEL_URI = "hf:Qwen/Qwen3-4B-GGUF/Qwen3-4B-Q4_K_M.gguf";
7
+ export const DEFAULT_LLM_MODEL_URI = "hf:unsloth/Qwen3.5-4B-GGUF/Qwen3.5-4B-Q5_K_M.gguf";
8
8
  /**
9
9
  * 下载模型到本地缓存目录。
10
10
  * @returns 解析后的本地文件路径
@@ -12,7 +12,7 @@ export interface LocalLlmServerOptions {
12
12
  port?: number;
13
13
  llmModelPath?: string;
14
14
  embeddingModelPath?: string;
15
- /** 上下文窗口 token 数,默认 32768(32K),需能容纳 system + tools + 对话 */
15
+ /** 上下文窗口 token 数,默认 32768(32K),需能容纳 system + tools + 对话;显存不足时在智能体配置中调小 */
16
16
  contextSize?: number;
17
17
  /** 等待子进程就绪的超时毫秒数,默认 300000(5 分钟,冷启/大模型加载可能较慢) */
18
18
  readyTimeoutMs?: number;
@@ -43,6 +43,7 @@ export declare function initModels(opts: LlmContextOptions): Promise<void>;
43
43
  /**
44
44
  * 流式 chat completion。
45
45
  * onChunk 每次收到新 token 时调用;结束后返回完整 finish_reason。
46
+ * 本地模型若输出 <think>...</think> 块,会从流中过滤,不展示思考过程。
46
47
  */
47
48
  export declare function chatCompletionStream(messages: ChatMessage[], tools: ToolDefinition[], onChunk: (chunk: ChatCompletionChunk) => void, signal?: AbortSignal): Promise<void>;
48
49
  /**
@@ -113,9 +113,35 @@ function parseToolCalls(text) {
113
113
  }
114
114
  return null;
115
115
  }
116
+ /**
117
+ * 从累积文本中移除 <think>...</think> 块,只保留对外可见的正文(关闭本地模型“思考过程”的展示)。
118
+ * 若存在未闭合的 <think>,则从 <think> 起至末尾均视为思考内容不输出。
119
+ */
120
+ function getVisibleWithoutThinking(text) {
121
+ let out = "";
122
+ let i = 0;
123
+ const openTag = "<think>";
124
+ const closeTag = "</think>";
125
+ while (i < text.length) {
126
+ const open = text.indexOf(openTag, i);
127
+ if (open === -1) {
128
+ out += text.slice(i);
129
+ break;
130
+ }
131
+ out += text.slice(i, open);
132
+ const close = text.indexOf(closeTag, open + openTag.length);
133
+ if (close === -1) {
134
+ // 思考块未闭合,剩余不输出
135
+ break;
136
+ }
137
+ i = close + closeTag.length;
138
+ }
139
+ return out;
140
+ }
116
141
  /**
117
142
  * 流式 chat completion。
118
143
  * onChunk 每次收到新 token 时调用;结束后返回完整 finish_reason。
144
+ * 本地模型若输出 <think>...</think> 块,会从流中过滤,不展示思考过程。
119
145
  */
120
146
  export async function chatCompletionStream(messages, tools, onChunk, signal) {
121
147
  if (!llmModel)
@@ -150,7 +176,7 @@ export async function chatCompletionStream(messages, tools, onChunk, signal) {
150
176
  }
151
177
  }
152
178
  let fullText = "";
153
- let prevSentLength = 0;
179
+ let prevSentVisibleLength = 0;
154
180
  let lastSent = ""; // 连续相同 delta 只发一次,缓解回复缓慢时「每个字显示两遍」
155
181
  try {
156
182
  await session.prompt(userPrompt, {
@@ -168,8 +194,10 @@ export async function chatCompletionStream(messages, tools, onChunk, signal) {
168
194
  else {
169
195
  fullText += s;
170
196
  }
171
- const toSend = fullText.slice(prevSentLength);
172
- prevSentLength = fullText.length;
197
+ // 过滤 <think>...</think> 块,不向客户端输出思考过程
198
+ const visibleText = getVisibleWithoutThinking(fullText);
199
+ const toSend = visibleText.slice(prevSentVisibleLength);
200
+ prevSentVisibleLength = visibleText.length;
173
201
  if (toSend && toSend !== lastSent) {
174
202
  lastSent = toSend;
175
203
  onChunk({ content: toSend });
@@ -1,8 +1,8 @@
1
1
  export declare const LOCAL_LLM_CACHE_DIR: string;
2
2
  /**
3
3
  * 取 modelUri 的末尾文件名(用于与已安装文件灵活匹配:不同 node-llama-cpp 版本可能生成不同前缀)。
4
- * 例:hf:Qwen/Qwen3-4B-GGUF/Qwen3-4B-Q4_K_M.gguf → Qwen3-4B-Q4_K_M.gguf
5
- * 例:hf_Qwen_Qwen3-4B-GGUF_Qwen3-4B-Q4_K_M.gguf → Qwen3-4B-Q4_K_M.gguf(文件名形式取最后一段 _ 之后)
4
+ * 例:hf:unsloth/Qwen3.5-4B-GGUF/Qwen3.5-4B-Q5_K_M.gguf → Qwen3.5-4B-Q5_K_M.gguf
5
+ * 例:hf_unsloth_Qwen3.5-4B-GGUF_Qwen3.5-4B-Q5_K_M.gguf → Qwen3.5-4B-Q5_K_M.gguf(文件名形式取最后一段 _ 之后)
6
6
  */
7
7
  export declare function modelUriBasename(modelUri: string): string;
8
8
  /**
@@ -9,8 +9,8 @@ import { homedir } from "node:os";
9
9
  export const LOCAL_LLM_CACHE_DIR = join(homedir(), ".openbot", ".cached_models");
10
10
  /**
11
11
  * 取 modelUri 的末尾文件名(用于与已安装文件灵活匹配:不同 node-llama-cpp 版本可能生成不同前缀)。
12
- * 例:hf:Qwen/Qwen3-4B-GGUF/Qwen3-4B-Q4_K_M.gguf → Qwen3-4B-Q4_K_M.gguf
13
- * 例:hf_Qwen_Qwen3-4B-GGUF_Qwen3-4B-Q4_K_M.gguf → Qwen3-4B-Q4_K_M.gguf(文件名形式取最后一段 _ 之后)
12
+ * 例:hf:unsloth/Qwen3.5-4B-GGUF/Qwen3.5-4B-Q5_K_M.gguf → Qwen3.5-4B-Q5_K_M.gguf
13
+ * 例:hf_unsloth_Qwen3.5-4B-GGUF_Qwen3.5-4B-Q5_K_M.gguf → Qwen3.5-4B-Q5_K_M.gguf(文件名形式取最后一段 _ 之后)
14
14
  */
15
15
  export function modelUriBasename(modelUri) {
16
16
  const s = (modelUri || "").trim();
@@ -14,8 +14,8 @@ export async function tryStartLocalModelFromSavedConfig() {
14
14
  try {
15
15
  const agent = await loadDesktopAgentConfig("default");
16
16
  if (!agent || agent.provider !== "local" || !agent.model?.trim()) {
17
- process.env.LOCAL_LLM_START_FAILED =
18
- "未配置默认本地模型,请在「模型配置」中选择 LLM 后点击「启动本地模型服务」";
17
+ // 默认智能体未使用 local 时仅跳过启动,不设置 LOCAL_LLM_START_FAILED,避免 Ollama/openai-custom 等连接失败时被误报为「未配置本地模型」
18
+ delete process.env.LOCAL_LLM_START_FAILED;
19
19
  console.log("[local-llm] 提示:未配置默认本地模型,跳过启动。");
20
20
  return;
21
21
  }
@@ -30,3 +30,12 @@ export declare function getMcpToolDefinitions(serverConfigs: McpServerConfig[],
30
30
  * 关闭并移除所有缓存的 MCP 客户端(进程退出或显式清理时调用)。
31
31
  */
32
32
  export declare function shutdownMcpClients(): Promise<void>;
33
+ /**
34
+ * 测试单条 MCP 配置是否可用:连接、拉取工具列表后断开。
35
+ * 不写入 clientCache,用于配置界面「测试」按钮,可提前触发 uvx/npx 依赖安装。
36
+ */
37
+ export declare function testMcpConnection(config: McpServerConfig, options?: GetMcpToolDefinitionsOptions): Promise<{
38
+ success: boolean;
39
+ error?: string;
40
+ toolsCount?: number;
41
+ }>;
@@ -13,14 +13,13 @@ function configLabel(config) {
13
13
  return config.command;
14
14
  return config.url;
15
15
  }
16
- /** 用于系统消息展示的 MCP 名称:stdio 优先用首参(如 akshare-tools),否则 command;sse 用 URL 主机或路径末段 */
16
+ /** 用于系统消息展示的 MCP 名称:stdio 优先用首个非选项参数(如 akshare-tools),否则 command;sse 用 URL 主机或路径末段 */
17
17
  function mcpDisplayName(config) {
18
18
  if (config.transport === "stdio") {
19
19
  const args = config.args;
20
- const first = args?.[0];
21
- if (typeof first === "string" && first.trim() && !first.includes("/") && !first.includes("\\")) {
22
- return first.trim();
23
- }
20
+ const nameArg = (args ?? []).find((a) => typeof a === "string" && a.trim() && !a.startsWith("-") && !a.includes("/") && !a.includes("\\"));
21
+ if (nameArg)
22
+ return nameArg.trim();
24
23
  return config.command;
25
24
  }
26
25
  try {
@@ -94,8 +93,13 @@ export async function getMcpToolDefinitions(serverConfigs, options = {}) {
94
93
  }
95
94
  catch { }
96
95
  if (attempt === connectRetries) {
97
- console.warn(`[mcp] 连接失败 (${label}):`, err instanceof Error ? err.message : err);
98
96
  const errMsg = err instanceof Error ? err.message : String(err);
97
+ console.warn(`[mcp] 连接失败 (${label}):`, errMsg);
98
+ const enoentHint = /ENOENT/.test(errMsg) && /uvx|npx/.test(errMsg)
99
+ ? " 若在 Docker 中运行,请使用已安装 uv/npx 的镜像(Dockerfile 中需安装 uv 并设置 PATH=/root/.local/bin)并重新构建。"
100
+ : "";
101
+ if (enoentHint)
102
+ console.warn(`[mcp]`, enoentHint.trim());
99
103
  emitProgress(`${displayName} MCP failed: ${errMsg}`, "skipped", errMsg);
100
104
  }
101
105
  }
@@ -137,3 +141,33 @@ export async function shutdownMcpClients() {
137
141
  clientCache.clear();
138
142
  await Promise.all(closeAll);
139
143
  }
144
+ /**
145
+ * 测试单条 MCP 配置是否可用:连接、拉取工具列表后断开。
146
+ * 不写入 clientCache,用于配置界面「测试」按钮,可提前触发 uvx/npx 依赖安装。
147
+ */
148
+ export async function testMcpConnection(config, options = {}) {
149
+ const clientOptions = {
150
+ initTimeoutMs: options.initTimeoutMs,
151
+ initRetries: options.initRetries ?? 1,
152
+ initRetryDelayMs: options.initRetryDelayMs,
153
+ };
154
+ let client = null;
155
+ try {
156
+ client = new McpClient(config, clientOptions);
157
+ await client.connect();
158
+ const tools = await client.listTools();
159
+ const count = Array.isArray(tools) ? tools.length : 0;
160
+ await client.close();
161
+ return { success: true, toolsCount: count };
162
+ }
163
+ catch (err) {
164
+ if (client) {
165
+ try {
166
+ await client.close();
167
+ }
168
+ catch { }
169
+ }
170
+ const message = err instanceof Error ? err.message : String(err);
171
+ return { success: false, error: message };
172
+ }
173
+ }
@@ -21,7 +21,9 @@ export class StdioTransport {
21
21
  }
22
22
  constructor(config, options = {}) {
23
23
  this.config = config;
24
- this.initTimeoutMs = options.initTimeoutMs ?? 20_000;
24
+ const isUvx = /^uvx?$/i.test((config.command || "").trim().replace(/^.*[/\\]/, ""));
25
+ const defaultInitMs = isUvx ? 60_000 : 20_000;
26
+ this.initTimeoutMs = options.initTimeoutMs ?? defaultInitMs;
25
27
  this.requestTimeoutMs = options.requestTimeoutMs ?? 30_000;
26
28
  this.initRetries = options.initRetries ?? 1;
27
29
  this.initRetryDelayMs = options.initRetryDelayMs ?? 3_000;
@@ -52,7 +54,12 @@ export class StdioTransport {
52
54
  if (env.UV_SILENT === undefined)
53
55
  env.UV_SILENT = "1";
54
56
  }
55
- this.process = spawn(this.config.command, this.config.args ?? [], {
57
+ // uvx/uv 不支持 -y 参数(与 npx -y 不同),自动去掉以免报错 "unexpected argument '-y' found"
58
+ let args = this.config.args ?? [];
59
+ if (cmdBase === "uvx" || cmdBase === "uv") {
60
+ args = args.filter((a) => a !== "-y" && a !== "--yes");
61
+ }
62
+ this.process = spawn(this.config.command, args, {
56
63
  env,
57
64
  stdio: ["pipe", "pipe", "pipe"],
58
65
  });
@@ -74,6 +81,16 @@ export class StdioTransport {
74
81
  this.rejectAll(new Error(`MCP process error: ${err.message}`));
75
82
  });
76
83
  child.on("exit", (code, signal) => {
84
+ if (code !== 0 && code !== null) {
85
+ const cmd = this.config.command;
86
+ const args = JSON.stringify(this.config.args ?? []);
87
+ // 延后读取 stderr,以便管道中尚未 flush 的输出先写入 stderrBuffer
88
+ setImmediate(() => {
89
+ const stderrTail = this.stderrBuffer.trim().slice(-2048) || "(无 stderr 输出)";
90
+ console.warn(`[mcp stdio] 子进程异常退出 command=${cmd} args=${args} code=${code} signal=${signal}`);
91
+ console.warn("[mcp stdio] 子进程 stderr 末尾:", stderrTail);
92
+ });
93
+ }
77
94
  this.rejectAll(new Error(`MCP process exited: code=${code} signal=${signal}`));
78
95
  this.process = null;
79
96
  });
@@ -0,0 +1,6 @@
1
+ import type { BashOperations } from "@mariozechner/pi-coding-agent";
2
+ /**
3
+ * 返回在 Windows 上使用 cmd.exe 的 BashOperations,供 createBashTool(..., { operations }) 使用。
4
+ * 这样在未安装 Git Bash 的 Windows 上也能执行 dir、cd、整理文件夹等命令。
5
+ */
6
+ export declare function createWindowsShellOperations(): BashOperations;
@@ -0,0 +1,85 @@
1
+ /**
2
+ * Windows 下无 Git Bash 时,使用 cmd.exe 执行 CLI 命令,避免 "No bash shell found" 报错。
3
+ * 桌面端打包后在 Windows 上运行时,bash 工具会使用本实现,支持 dir、cd、PowerShell 等常用命令。
4
+ */
5
+ import { spawn } from "node:child_process";
6
+ import { existsSync } from "node:fs";
7
+ function killProcessTreeWin32(pid) {
8
+ try {
9
+ spawn("taskkill", ["/F", "/T", "/PID", String(pid)], {
10
+ stdio: "ignore",
11
+ detached: true,
12
+ });
13
+ }
14
+ catch {
15
+ // ignore
16
+ }
17
+ }
18
+ /**
19
+ * 返回在 Windows 上使用 cmd.exe 的 BashOperations,供 createBashTool(..., { operations }) 使用。
20
+ * 这样在未安装 Git Bash 的 Windows 上也能执行 dir、cd、整理文件夹等命令。
21
+ */
22
+ export function createWindowsShellOperations() {
23
+ return {
24
+ exec: (command, cwd, { onData, signal, timeout, env }) => {
25
+ return new Promise((resolve, reject) => {
26
+ if (!existsSync(cwd)) {
27
+ reject(new Error(`Working directory does not exist: ${cwd}\nCannot execute shell commands.`));
28
+ return;
29
+ }
30
+ const child = spawn("cmd.exe", ["/c", command], {
31
+ cwd,
32
+ detached: true,
33
+ env: env ?? process.env,
34
+ stdio: ["ignore", "pipe", "pipe"],
35
+ shell: false,
36
+ });
37
+ let timedOut = false;
38
+ let timeoutHandle;
39
+ if (timeout !== undefined && timeout > 0) {
40
+ timeoutHandle = setTimeout(() => {
41
+ timedOut = true;
42
+ if (child.pid)
43
+ killProcessTreeWin32(child.pid);
44
+ }, timeout * 1000);
45
+ }
46
+ if (child.stdout)
47
+ child.stdout.on("data", onData);
48
+ if (child.stderr)
49
+ child.stderr.on("data", onData);
50
+ child.on("error", (err) => {
51
+ if (timeoutHandle)
52
+ clearTimeout(timeoutHandle);
53
+ if (signal)
54
+ signal.removeEventListener("abort", onAbort);
55
+ reject(err);
56
+ });
57
+ const onAbort = () => {
58
+ if (child.pid)
59
+ killProcessTreeWin32(child.pid);
60
+ };
61
+ if (signal) {
62
+ if (signal.aborted)
63
+ onAbort();
64
+ else
65
+ signal.addEventListener("abort", onAbort, { once: true });
66
+ }
67
+ child.on("close", (code) => {
68
+ if (timeoutHandle)
69
+ clearTimeout(timeoutHandle);
70
+ if (signal)
71
+ signal.removeEventListener("abort", onAbort);
72
+ if (signal?.aborted) {
73
+ reject(new Error("aborted"));
74
+ return;
75
+ }
76
+ if (timedOut) {
77
+ reject(new Error(`timeout:${timeout}`));
78
+ return;
79
+ }
80
+ resolve({ exitCode: code });
81
+ });
82
+ });
83
+ },
84
+ };
85
+ }
@@ -305,11 +305,24 @@ async function handleAgentChatInner(client, targetSessionId, message, params) {
305
305
  if (errText.includes("Unknown value type") && errText.includes("[object Object]")) {
306
306
  errText = "请求失败:模型返回了不支持的数据结构(如工具调用流),请尝试关闭工具或更换模型。";
307
307
  }
308
- // 本地模型子进程退出后,SDK 会报 terminated/Connection error,用 env 中的说明替换为可操作提示
308
+ const isConnErr = /Connection error|ECONNREFUSED|fetch failed/i.test(msg.errorMessage);
309
309
  const localFailed = process.env.LOCAL_LLM_START_FAILED;
310
- if (localFailed && (msg.errorMessage === "terminated" || /Connection error|ECONNREFUSED|fetch failed/i.test(msg.errorMessage))) {
310
+ const isLocalProvider = provider === "local";
311
+ const isModelRequired = /model is required|400.*model/i.test(msg.errorMessage);
312
+ if (isLocalProvider && localFailed && (msg.errorMessage === "terminated" || isConnErr)) {
311
313
  errText = `请求失败:${localFailed}`;
312
314
  }
315
+ else if ((provider === "openai-custom" || provider === "ollama") && isConnErr) {
316
+ errText = "请求失败:无法连接到模型服务(若使用 Ollama 请确认已启动且 baseUrl 为 http://localhost:11434/v1,或改用「Ollama」Provider)。";
317
+ }
318
+ else if (isModelRequired && (provider === "openai-custom" || provider === "ollama")) {
319
+ errText =
320
+ "请求失败:模型名称未被服务端识别。若使用 Ollama,请确保「模型配置」中的模型名与终端中 `ollama list` 显示的名称完全一致(如 qwen3:4b)。";
321
+ }
322
+ else if (provider === "local" && /context size.*too large|VRAM|显存/i.test(msg.errorMessage)) {
323
+ errText =
324
+ "请求失败:显存/内存不足,当前上下文长度过大。请在「智能体配置」中将该智能体的「上下文长度」调小(如 8192 或 4096)后重新启动本地模型服务再试。";
325
+ }
313
326
  sendSessionMessage(targetSessionId, { type: "chat", code: "agent.chunk", payload: { text: errText } });
314
327
  }
315
328
  wsPayload = null;
@@ -337,10 +350,24 @@ async function handleAgentChatInner(client, targetSessionId, message, params) {
337
350
  if (errText.includes("Unknown value type") && errText.includes("[object Object]")) {
338
351
  errText = "请求失败:模型返回了不支持的数据结构(如工具调用流),请尝试关闭工具或更换模型。";
339
352
  }
353
+ const isConnErr = /Connection error|ECONNREFUSED|fetch failed/i.test(msg.errorMessage);
340
354
  const localFailed = process.env.LOCAL_LLM_START_FAILED;
341
- if (localFailed && (msg.errorMessage === "terminated" || /Connection error|ECONNREFUSED|fetch failed/i.test(msg.errorMessage))) {
355
+ const isLocalProvider = provider === "local";
356
+ const isModelRequired = /model is required|400.*model/i.test(msg.errorMessage);
357
+ if (isLocalProvider && localFailed && (msg.errorMessage === "terminated" || isConnErr)) {
342
358
  errText = `请求失败:${localFailed}`;
343
359
  }
360
+ else if ((provider === "openai-custom" || provider === "ollama") && isConnErr) {
361
+ errText = "请求失败:无法连接到模型服务(若使用 Ollama 请确认已启动且 baseUrl 为 http://localhost:11434/v1,或改用「Ollama」Provider)。";
362
+ }
363
+ else if (isModelRequired && (provider === "openai-custom" || provider === "ollama")) {
364
+ errText =
365
+ "请求失败:模型名称未被服务端识别。若使用 Ollama,请确保「模型配置」中的模型名与终端中 `ollama list` 显示的名称完全一致(如 qwen3:4b)。";
366
+ }
367
+ else if (provider === "local" && /context size.*too large|VRAM|显存/i.test(msg.errorMessage)) {
368
+ errText =
369
+ "请求失败:显存/内存不足,当前上下文长度过大。请在「智能体配置」中将该智能体的「上下文长度」调小(如 8192 或 4096)后重新启动本地模型服务再试。";
370
+ }
344
371
  sendSessionMessage(targetSessionId, { type: "chat", code: "agent.chunk", payload: { text: errText } });
345
372
  hasReceivedAnyChunk = true;
346
373
  }
@@ -61,6 +61,46 @@ const __dirname = dirname(fileURLToPath(import.meta.url));
61
61
  const PACKAGE_ROOT = join(__dirname, "..", "..");
62
62
  /** 内嵌到 Electron 时由主进程设置 OPENBOT_STATIC_DIR,指向打包后的 renderer/dist */
63
63
  const STATIC_DIR = process.env.OPENBOT_STATIC_DIR || join(PACKAGE_ROOT, "apps", "desktop", "renderer", "dist");
64
+ /** 端口被占用时依次尝试的最大个数(38080, 38081, ...) */
65
+ const MAX_PORT_ATTEMPTS = 20;
66
+ /**
67
+ * 尝试将 httpServer 绑定到某端口;若被占用则尝试下一端口,返回实际绑定端口。
68
+ * @param server 已挂好路由的 HTTP Server
69
+ * @param startPort 首选端口
70
+ * @returns 实际监听的端口
71
+ */
72
+ function listenOnPreferredOrNextPort(server, startPort) {
73
+ return new Promise((resolve, reject) => {
74
+ let tryPort = startPort;
75
+ const attempt = () => {
76
+ if (tryPort - startPort >= MAX_PORT_ATTEMPTS) {
77
+ reject(new Error(`No available port in range ${startPort}–${startPort + MAX_PORT_ATTEMPTS - 1}`));
78
+ return;
79
+ }
80
+ const onListen = () => {
81
+ server.off("error", onError);
82
+ const addr = server.address();
83
+ const p = typeof addr === "object" && addr && "port" in addr ? addr.port : tryPort;
84
+ resolve(p);
85
+ };
86
+ const onError = (err) => {
87
+ server.off("listening", onListen);
88
+ if (err?.code === "EADDRINUSE") {
89
+ console.log(`Port ${tryPort} in use, trying ${tryPort + 1}...`);
90
+ tryPort += 1;
91
+ attempt();
92
+ }
93
+ else {
94
+ reject(err);
95
+ }
96
+ };
97
+ server.once("listening", onListen);
98
+ server.once("error", onError);
99
+ server.listen(tryPort);
100
+ };
101
+ attempt();
102
+ });
103
+ }
64
104
  const MIME_TYPES = {
65
105
  ".html": "text/html",
66
106
  ".js": "text/javascript",
@@ -77,7 +117,6 @@ const MIME_TYPES = {
77
117
  ".eot": "application/vnd.ms-fontobject",
78
118
  };
79
119
  export async function startGatewayServer(port = 38080) {
80
- process.env.PORT = String(port);
81
120
  await ensureDesktopConfigInitialized();
82
121
  console.log(`Starting gateway server on port ${port}...`);
83
122
  // 每次启动时按已保存配置尝试启动本地模型服务(不阻塞、不影响主进程;失败仅提示)
@@ -89,7 +128,6 @@ export async function startGatewayServer(port = 38080) {
89
128
  const msg = e instanceof Error ? e.message : String(e);
90
129
  console.log("[local-llm] 提示:启动时发生异常,已跳过。", msg);
91
130
  }
92
- setBackendBaseUrl(`http://localhost:${port}`);
93
131
  const { app: nestApp, express: nestExpress } = await createNestAppEmbedded();
94
132
  try {
95
133
  const agentsService = nestApp.get(AgentsService);
@@ -249,17 +287,16 @@ export async function startGatewayServer(port = 38080) {
249
287
  socket.destroy();
250
288
  }
251
289
  });
252
- const actualPort = await new Promise((resolve) => {
253
- httpServer.listen(port, () => {
254
- const addr = httpServer.address();
255
- const p = typeof addr === "object" && addr && "port" in addr ? addr.port : port;
256
- console.log(`✅ Gateway server listening on ws://localhost:${p}`);
257
- console.log(` Health: http://localhost:${p}${PATHS.HEALTH}`);
258
- console.log(` API: http://localhost:${p}${PATHS.SERVER_API}`);
259
- console.log(` WS: ws://localhost:${p}${PATHS.WS}`);
260
- resolve(p);
261
- });
262
- });
290
+ const actualPort = await listenOnPreferredOrNextPort(httpServer, port);
291
+ process.env.PORT = String(actualPort);
292
+ setBackendBaseUrl(`http://localhost:${actualPort}`);
293
+ if (actualPort !== port) {
294
+ console.log(`Using port ${actualPort} (preferred ${port} was in use).`);
295
+ }
296
+ console.log(`✅ Gateway server listening on ws://localhost:${actualPort}`);
297
+ console.log(` Health: http://localhost:${actualPort}${PATHS.HEALTH}`);
298
+ console.log(` API: http://localhost:${actualPort}${PATHS.SERVER_API}`);
299
+ console.log(` WS: ws://localhost:${actualPort}${PATHS.WS}`);
263
300
  // 通道:根据配置注册并启动(飞书 WebSocket、钉钉 Stream 等)
264
301
  const channelsConfig = getChannelsConfigSync();
265
302
  const feishuCfg = channelsConfig.feishu;
@@ -1,7 +1,15 @@
1
+ import type { McpServerConfigStandardEntry } from '../../core/mcp/types.js';
1
2
  import { AgentConfigService, AgentConfigItem } from './agent-config.service.js';
2
3
  export declare class AgentConfigController {
3
4
  private readonly agentConfigService;
4
5
  constructor(agentConfigService: AgentConfigService);
6
+ testMcp(body: {
7
+ mcpServer: McpServerConfigStandardEntry;
8
+ }): Promise<{
9
+ success: boolean;
10
+ error: string | undefined;
11
+ toolsCount: number | undefined;
12
+ }>;
5
13
  listAgents(): Promise<{
6
14
  success: boolean;
7
15
  data: AgentConfigItem[];
@@ -17,6 +17,10 @@ let AgentConfigController = class AgentConfigController {
17
17
  constructor(agentConfigService) {
18
18
  this.agentConfigService = agentConfigService;
19
19
  }
20
+ async testMcp(body) {
21
+ const result = await this.agentConfigService.testMcpServer(body?.mcpServer ?? {});
22
+ return { success: result.success, error: result.error, toolsCount: result.toolsCount };
23
+ }
20
24
  async listAgents() {
21
25
  const agents = await this.agentConfigService.listAgents();
22
26
  return { success: true, data: agents };
@@ -44,6 +48,13 @@ let AgentConfigController = class AgentConfigController {
44
48
  return { success: true };
45
49
  }
46
50
  };
51
+ __decorate([
52
+ Post('mcp/test'),
53
+ __param(0, Body()),
54
+ __metadata("design:type", Function),
55
+ __metadata("design:paramtypes", [Object]),
56
+ __metadata("design:returntype", Promise)
57
+ ], AgentConfigController.prototype, "testMcp", null);
47
58
  __decorate([
48
59
  Get(),
49
60
  __metadata("design:type", Function),
@@ -1,4 +1,5 @@
1
1
  import type { McpServerConfig, McpServersStandardFormat } from '../../core/mcp/index.js';
2
+ import type { McpServerConfigStandardEntry } from '../../core/mcp/types.js';
2
3
  import { DatabaseService } from '../database/database.service.js';
3
4
  import { WorkspaceService } from '../workspace/workspace.service.js';
4
5
  /** 缺省智能体 ID / 工作空间名,不可删除;对应目录 ~/.openbot/workspace/default */
@@ -136,4 +137,13 @@ export declare class AgentConfigService {
136
137
  modelItemCode?: string;
137
138
  }>;
138
139
  }): Promise<void>;
140
+ /**
141
+ * 测试单条 MCP 配置是否可用(连接并拉取工具列表后断开)。
142
+ * 用于配置界面「测试」按钮,可提前触发 uvx/npx 依赖安装。
143
+ */
144
+ testMcpServer(entry: McpServerConfigStandardEntry): Promise<{
145
+ success: boolean;
146
+ error?: string;
147
+ toolsCount?: number;
148
+ }>;
139
149
  }
@@ -12,7 +12,10 @@ import { readFile, writeFile, mkdir } from 'fs/promises';
12
12
  import { join } from 'path';
13
13
  import { existsSync } from 'fs';
14
14
  import { homedir } from 'os';
15
- import { addPendingAgentReload } from '../../core/config/agent-reload-pending.js';
15
+ import { testMcpConnection } from '../../core/mcp/operator.js';
16
+ import { standardFormatToArray } from '../../core/mcp/config.js';
17
+ import { addPendingAgentReload, consumePendingAgentReload } from '../../core/config/agent-reload-pending.js';
18
+ import { agentManager } from '../../core/agent/agent-manager.js';
16
19
  import { DatabaseService } from '../database/database.service.js';
17
20
  import { WorkspaceService } from '../workspace/workspace.service.js';
18
21
  /** 工作空间名仅允许英文、数字、下划线、连字符 */
@@ -238,6 +241,9 @@ let AgentConfigService = class AgentConfigService {
238
241
  }
239
242
  await this.writeAgentsFile(file);
240
243
  await addPendingAgentReload(id).catch(() => { });
244
+ // 立即使该智能体下所有运行中的 AgentSession 失效,下次请求将用新配置创建新会话(安全:先持久化 compaction 再移除)
245
+ await agentManager.deleteSessionsByAgentId(id).catch(() => { });
246
+ await consumePendingAgentReload(id).catch(() => { });
241
247
  return { ...agent, isDefault: agent.id === DEFAULT_AGENT_ID };
242
248
  }
243
249
  async deleteAgent(id, options) {
@@ -306,6 +312,17 @@ let AgentConfigService = class AgentConfigService {
306
312
  }
307
313
  await this.writeAgentsFile(file);
308
314
  }
315
+ /**
316
+ * 测试单条 MCP 配置是否可用(连接并拉取工具列表后断开)。
317
+ * 用于配置界面「测试」按钮,可提前触发 uvx/npx 依赖安装。
318
+ */
319
+ async testMcpServer(entry) {
320
+ const configs = standardFormatToArray({ test: entry });
321
+ if (configs.length === 0) {
322
+ return { success: false, error: '无效配置:需 command(本地进程)或 url(远程服务)' };
323
+ }
324
+ return testMcpConnection(configs[0], { initTimeoutMs: 60_000 });
325
+ }
309
326
  };
310
327
  AgentConfigService = __decorate([
311
328
  Injectable(),
@@ -108,7 +108,7 @@ __decorate([
108
108
  AgentsGateway = __decorate([
109
109
  WebSocketGateway({
110
110
  cors: {
111
- origin: ['http://localhost:5173', 'http://localhost:38081'],
111
+ origin: ['http://localhost:5173', 'http://127.0.0.1:5173', 'http://localhost:38080', 'http://localhost:38081', 'http://127.0.0.1:38080', 'http://127.0.0.1:38081'],
112
112
  credentials: true,
113
113
  },
114
114
  }),