@next-open-ai/openbot 0.6.16 → 0.6.66

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (95) hide show
  1. package/README.md +4 -4
  2. package/apps/desktop/renderer/dist/assets/index-CxDZnMBH.css +10 -0
  3. package/apps/desktop/renderer/dist/assets/index-k47Qiokg.js +93 -0
  4. package/apps/desktop/renderer/dist/index.html +2 -2
  5. package/dist/cli/cli.js +136 -0
  6. package/dist/cli/extension-cmd.d.ts +15 -0
  7. package/dist/cli/extension-cmd.js +107 -0
  8. package/dist/core/agent/agent-dir.d.ts +6 -0
  9. package/dist/core/agent/agent-dir.js +8 -0
  10. package/dist/core/agent/agent-manager.d.ts +13 -0
  11. package/dist/core/agent/agent-manager.js +88 -7
  12. package/dist/core/agent/proxy/adapters/claude-code-adapter.d.ts +2 -0
  13. package/dist/core/agent/proxy/adapters/claude-code-adapter.js +186 -0
  14. package/dist/core/agent/proxy/adapters/local-adapter.js +3 -1
  15. package/dist/core/agent/proxy/adapters/openclawx-adapter.js +3 -3
  16. package/dist/core/agent/proxy/adapters/opencode-adapter.js +65 -29
  17. package/dist/core/agent/proxy/adapters/opencode-local-runner.js +9 -0
  18. package/dist/core/agent/proxy/index.js +2 -0
  19. package/dist/core/agent/token-usage-log-extension.d.ts +14 -0
  20. package/dist/core/agent/token-usage-log-extension.js +61 -0
  21. package/dist/core/config/agent-reload-pending.js +3 -2
  22. package/dist/core/config/desktop-config.d.ts +29 -6
  23. package/dist/core/config/desktop-config.js +188 -27
  24. package/dist/core/config/provider-support-default.js +27 -0
  25. package/dist/core/extensions/index.d.ts +1 -0
  26. package/dist/core/extensions/index.js +1 -0
  27. package/dist/core/extensions/load.d.ts +11 -0
  28. package/dist/core/extensions/load.js +101 -0
  29. package/dist/core/local-llm-server/download-model.d.ts +16 -0
  30. package/dist/core/local-llm-server/download-model.js +37 -0
  31. package/dist/core/local-llm-server/index.d.ts +32 -0
  32. package/dist/core/local-llm-server/index.js +152 -0
  33. package/dist/core/local-llm-server/llm-context.d.ts +66 -0
  34. package/dist/core/local-llm-server/llm-context.js +270 -0
  35. package/dist/core/local-llm-server/model-resolve.d.ts +27 -0
  36. package/dist/core/local-llm-server/model-resolve.js +90 -0
  37. package/dist/core/local-llm-server/server.d.ts +1 -0
  38. package/dist/core/local-llm-server/server.js +234 -0
  39. package/dist/core/local-llm-server/start-from-config.d.ts +5 -0
  40. package/dist/core/local-llm-server/start-from-config.js +50 -0
  41. package/dist/core/mcp/adapter.d.ts +4 -2
  42. package/dist/core/mcp/adapter.js +10 -4
  43. package/dist/core/mcp/index.d.ts +2 -0
  44. package/dist/core/mcp/index.js +1 -0
  45. package/dist/core/mcp/operator.d.ts +11 -0
  46. package/dist/core/mcp/operator.js +41 -7
  47. package/dist/core/mcp/transport/stdio.d.ts +6 -0
  48. package/dist/core/mcp/transport/stdio.js +125 -28
  49. package/dist/core/memory/local-embedding-llama.js +8 -6
  50. package/dist/core/memory/local-embedding.d.ts +4 -3
  51. package/dist/core/memory/local-embedding.js +43 -3
  52. package/dist/core/tools/index.d.ts +1 -0
  53. package/dist/core/tools/index.js +1 -0
  54. package/dist/core/tools/truncate-result.d.ts +14 -0
  55. package/dist/core/tools/truncate-result.js +27 -0
  56. package/dist/core/tools/web-search/create-web-search-tool.d.ts +17 -0
  57. package/dist/core/tools/web-search/create-web-search-tool.js +87 -0
  58. package/dist/core/tools/web-search/index.d.ts +4 -0
  59. package/dist/core/tools/web-search/index.js +2 -0
  60. package/dist/core/tools/web-search/providers/brave.d.ts +2 -0
  61. package/dist/core/tools/web-search/providers/brave.js +87 -0
  62. package/dist/core/tools/web-search/providers/duck-duck-scrape.d.ts +2 -0
  63. package/dist/core/tools/web-search/providers/duck-duck-scrape.js +47 -0
  64. package/dist/core/tools/web-search/providers/index.d.ts +5 -0
  65. package/dist/core/tools/web-search/providers/index.js +13 -0
  66. package/dist/core/tools/web-search/types.d.ts +35 -0
  67. package/dist/core/tools/web-search/types.js +4 -0
  68. package/dist/gateway/methods/agent-chat.js +110 -42
  69. package/dist/gateway/methods/run-scheduled-task.js +2 -0
  70. package/dist/gateway/server.js +60 -13
  71. package/dist/server/agent-config/agent-config.controller.d.ts +9 -1
  72. package/dist/server/agent-config/agent-config.controller.js +11 -0
  73. package/dist/server/agent-config/agent-config.service.d.ts +29 -5
  74. package/dist/server/agent-config/agent-config.service.js +41 -1
  75. package/dist/server/agents/agents.gateway.js +1 -1
  76. package/dist/server/bootstrap.d.ts +1 -0
  77. package/dist/server/bootstrap.js +19 -2
  78. package/dist/server/config/config.controller.d.ts +107 -4
  79. package/dist/server/config/config.controller.js +185 -3
  80. package/dist/server/config/config.module.js +3 -2
  81. package/dist/server/config/config.service.d.ts +18 -1
  82. package/dist/server/config/config.service.js +68 -9
  83. package/dist/server/config/local-models.service.d.ts +67 -0
  84. package/dist/server/config/local-models.service.js +242 -0
  85. package/package.json +3 -1
  86. package/presets/preset-agents.json +125 -91
  87. package/presets/preset-config.json +24 -6
  88. package/presets/preset-providers.json +7 -0
  89. package/presets/recommended-local-models.json +36 -0
  90. package/presets/workspaces/download-assistant/skills/downloader/SKILL.md +2 -2
  91. package/presets/workspaces/office-automation/skills/rpa-helper/SKILL.md +9 -0
  92. package/presets/workspaces/self-media-bot/skills/self-media-tools/SKILL.md +9 -0
  93. package/skills/url-bookmark/SKILL.md +12 -12
  94. package/apps/desktop/renderer/dist/assets/index-BxqMW-uy.css +0 -10
  95. package/apps/desktop/renderer/dist/assets/index-DJs-wX3R.js +0 -89
@@ -209,8 +209,8 @@ export async function loadDesktopAgentConfig(agentId) {
209
209
  }
210
210
  }
211
211
  const resolvedAgentId = agentId === "default" ? "default" : agentId;
212
- let provider = config.defaultProvider ?? "deepseek";
213
- let model = config.defaultModel ?? "deepseek-chat";
212
+ let provider = config.defaultProvider ?? "ollama";
213
+ let model = config.defaultModel ?? "qwen3:4b";
214
214
  if (config.defaultModelItemCode && Array.isArray(config.configuredModels)) {
215
215
  const configured = config.configuredModels.find((m) => m.modelItemCode === config.defaultModelItemCode);
216
216
  if (configured) {
@@ -218,10 +218,14 @@ export async function loadDesktopAgentConfig(agentId) {
218
218
  model = configured.modelId;
219
219
  }
220
220
  }
221
+ /** 是否从当前智能体自己的配置得到了模型(有 modelItemCode 或 provider/model);若否,则使用的是全局默认 */
222
+ let agentHadOwnModel = false;
221
223
  let workspaceName = resolvedAgentId;
222
224
  let mcpServers;
225
+ let mcpMaxResultTokens;
223
226
  let systemPrompt;
224
227
  let useLongMemory = true;
228
+ let contextSize;
225
229
  if (existsSync(agentsPath)) {
226
230
  try {
227
231
  const raw = await readFile(agentsPath, "utf-8");
@@ -233,6 +237,12 @@ export async function loadDesktopAgentConfig(agentId) {
233
237
  workspaceName = agent.workspace;
234
238
  else if (agent.id)
235
239
  workspaceName = agent.id;
240
+ if (agent.mcpMaxResultTokens != null && typeof agent.mcpMaxResultTokens === "number" && agent.mcpMaxResultTokens > 0) {
241
+ mcpMaxResultTokens = agent.mcpMaxResultTokens;
242
+ }
243
+ if (agent.contextSize != null && typeof agent.contextSize === "number" && agent.contextSize > 0) {
244
+ contextSize = agent.contextSize;
245
+ }
236
246
  if (agent.mcpServers != null) {
237
247
  if (Array.isArray(agent.mcpServers) || (typeof agent.mcpServers === "object" && !Array.isArray(agent.mcpServers))) {
238
248
  mcpServers = agent.mcpServers;
@@ -248,19 +258,28 @@ export async function loadDesktopAgentConfig(agentId) {
248
258
  if (configured) {
249
259
  provider = configured.provider;
250
260
  model = configured.modelId;
261
+ agentHadOwnModel = true;
251
262
  }
252
263
  else {
253
- if (agent.provider)
264
+ if (agent.provider) {
254
265
  provider = agent.provider;
255
- if (agent.model)
266
+ agentHadOwnModel = true;
267
+ }
268
+ if (agent.model) {
256
269
  model = agent.model;
270
+ agentHadOwnModel = true;
271
+ }
257
272
  }
258
273
  }
259
274
  else {
260
- if (agent.provider)
275
+ if (agent.provider) {
261
276
  provider = agent.provider;
262
- if (agent.model)
277
+ agentHadOwnModel = true;
278
+ }
279
+ if (agent.model) {
263
280
  model = agent.model;
281
+ agentHadOwnModel = true;
282
+ }
264
283
  }
265
284
  }
266
285
  }
@@ -268,6 +287,11 @@ export async function loadDesktopAgentConfig(agentId) {
268
287
  // ignore
269
288
  }
270
289
  }
290
+ // 本地 LLM 可用且当前智能体未配置自己的模型时,使用本地推理作为缺省,使所有智能体“拥有”该配置
291
+ if (!agentHadOwnModel && process.env.LOCAL_LLM_BASE_URL?.trim()) {
292
+ provider = "local";
293
+ model = "local-llm";
294
+ }
271
295
  const provConfig = config.providers?.[provider];
272
296
  const apiKey = provConfig?.apiKey && typeof provConfig.apiKey === "string" && provConfig.apiKey.trim()
273
297
  ? provConfig.apiKey.trim()
@@ -276,6 +300,19 @@ export async function loadDesktopAgentConfig(agentId) {
276
300
  let coze;
277
301
  let openclawx;
278
302
  let opencode;
303
+ let claudeCode;
304
+ const tw = config.tools?.webSearch;
305
+ const timeoutSeconds = typeof tw?.timeoutSeconds === "number" && tw.timeoutSeconds > 0 ? tw.timeoutSeconds : 15;
306
+ const cacheTtlMinutes = typeof tw?.cacheTtlMinutes === "number" && tw.cacheTtlMinutes >= 0 ? tw.cacheTtlMinutes : 5;
307
+ const maxResultsRaw = typeof tw?.maxResults === "number" ? tw.maxResults : 5;
308
+ const maxResults = Math.min(10, Math.max(1, maxResultsRaw));
309
+ let webSearch = {
310
+ enabled: false,
311
+ provider: "duck-duck-scrape",
312
+ timeoutSeconds,
313
+ cacheTtlMinutes,
314
+ maxResults,
315
+ };
279
316
  if (existsSync(agentsPath)) {
280
317
  try {
281
318
  const rawAgents = await readFile(agentsPath, "utf-8");
@@ -285,9 +322,16 @@ export async function loadDesktopAgentConfig(agentId) {
285
322
  if (agentRow) {
286
323
  if (agentRow.runnerType === "coze" ||
287
324
  agentRow.runnerType === "openclawx" ||
288
- agentRow.runnerType === "opencode") {
325
+ agentRow.runnerType === "opencode" ||
326
+ agentRow.runnerType === "claude_code") {
289
327
  runnerType = agentRow.runnerType;
290
328
  }
329
+ if (agentRow.runnerType === "claude_code") {
330
+ const wd = agentRow.claudeCode?.workingDirectory;
331
+ claudeCode = {
332
+ workingDirectory: typeof wd === "string" && wd.trim() ? wd.trim() : undefined,
333
+ };
334
+ }
291
335
  if (agentRow.coze) {
292
336
  const row = agentRow.coze;
293
337
  const region = row.region === "cn" || row.region === "com" ? row.region : "com";
@@ -353,6 +397,35 @@ export async function loadDesktopAgentConfig(agentId) {
353
397
  }
354
398
  }
355
399
  }
400
+ if (agentRow.webSearch?.enabled === true) {
401
+ let preferredProvider = agentRow.webSearch?.provider === "brave" || agentRow.webSearch?.provider === "duck-duck-scrape"
402
+ ? agentRow.webSearch.provider
403
+ : tw?.defaultProvider === "brave" || tw?.defaultProvider === "duck-duck-scrape"
404
+ ? tw.defaultProvider
405
+ : "duck-duck-scrape";
406
+ let braveKey;
407
+ if (preferredProvider === "brave") {
408
+ braveKey =
409
+ (typeof tw?.providers?.brave?.apiKey === "string" && tw.providers.brave.apiKey.trim()
410
+ ? tw.providers.brave.apiKey.trim()
411
+ : undefined) ??
412
+ (process.env.BRAVE_API_KEY && process.env.BRAVE_API_KEY.trim() ? process.env.BRAVE_API_KEY.trim() : undefined);
413
+ if (!braveKey)
414
+ preferredProvider = "duck-duck-scrape";
415
+ }
416
+ const maxResultTokens = agentRow.webSearch?.maxResultTokens != null && typeof agentRow.webSearch?.maxResultTokens === "number" && agentRow.webSearch.maxResultTokens > 0
417
+ ? agentRow.webSearch.maxResultTokens
418
+ : undefined;
419
+ webSearch = {
420
+ enabled: true,
421
+ provider: preferredProvider,
422
+ apiKey: preferredProvider === "brave" ? braveKey : undefined,
423
+ timeoutSeconds,
424
+ cacheTtlMinutes,
425
+ maxResults,
426
+ maxResultTokens,
427
+ };
428
+ }
356
429
  }
357
430
  }
358
431
  catch {
@@ -365,12 +438,16 @@ export async function loadDesktopAgentConfig(agentId) {
365
438
  apiKey: apiKey ?? undefined,
366
439
  workspace: workspaceName,
367
440
  mcpServers,
441
+ mcpMaxResultTokens,
368
442
  systemPrompt,
369
443
  runnerType,
370
444
  coze,
371
445
  openclawx,
372
446
  opencode,
447
+ claudeCode,
373
448
  useLongMemory,
449
+ webSearch,
450
+ contextSize,
374
451
  };
375
452
  }
376
453
  function ensureDesktopDir() {
@@ -568,22 +645,54 @@ export async function ensureProviderSupportFile() {
568
645
  await writeFile(path, JSON.stringify(presetProviders, null, 2), "utf-8");
569
646
  }
570
647
  }
571
- /** config.json 不存在则用 preset-config.json 初始化,若存在则浅合并补充新基础键值 */
648
+ /** 预装本地推理缺省:推荐列表第一个 LLM(Qwen 3.5 4B)对应的本地文件名,与 modelUriToFilename 一致 */
649
+ const DEFAULT_LOCAL_LLM_MODEL_ID = "hf_unsloth_Qwen3.5-4B-GGUF_Qwen3.5-4B-Q5_K_M.gguf";
650
+ const DEFAULT_LOCAL_MODEL_ITEM_CODE = "local-qwen35-4b";
651
+ /** 代码内建默认:local provider + 本地 Qwen 3.5 4B,首次与合并时优先保证存在 */
652
+ const BUILTIN_DEFAULT_CONFIG = {
653
+ defaultProvider: "local",
654
+ defaultModel: DEFAULT_LOCAL_LLM_MODEL_ID,
655
+ defaultModelItemCode: DEFAULT_LOCAL_MODEL_ITEM_CODE,
656
+ defaultAgentId: DEFAULT_AGENT_ID,
657
+ maxAgentSessions: DEFAULT_MAX_AGENT_SESSIONS,
658
+ providers: {
659
+ local: { baseUrl: "http://127.0.0.1:11435/v1" },
660
+ },
661
+ configuredModels: [
662
+ {
663
+ provider: "local",
664
+ modelId: DEFAULT_LOCAL_LLM_MODEL_ID,
665
+ type: "llm",
666
+ alias: "Qwen 3.5 4B Q5_K_M",
667
+ modelItemCode: DEFAULT_LOCAL_MODEL_ITEM_CODE,
668
+ },
669
+ {
670
+ provider: "local",
671
+ modelId: "hf_ggml-org_embeddinggemma-300M-GGUF_embeddinggemma-300M-Q8_0.gguf",
672
+ type: "embedding",
673
+ alias: "EmbeddingGemma 300M Q8 (768维)",
674
+ modelItemCode: "local-embeddinggemma-300m",
675
+ },
676
+ ],
677
+ };
678
+ /** 若 config.json 不存在则用 preset-config.json 初始化,若存在则浅合并补充新基础键值。预装 local provider + 本地 Qwen 3.5 4B 模型并设为缺省;preset 与代码默认合并,保证 local 一定存在。 */
572
679
  async function ensureConfigJsonInitialized() {
573
680
  const presetPath = join(getPresetsDir(), "preset-config.json");
574
- let presetConfig = {
575
- defaultProvider: "deepseek",
576
- defaultModel: "deepseek-chat",
577
- defaultAgentId: DEFAULT_AGENT_ID,
578
- maxAgentSessions: DEFAULT_MAX_AGENT_SESSIONS,
579
- providers: {},
580
- configuredModels: [],
581
- };
681
+ let presetConfig = { ...BUILTIN_DEFAULT_CONFIG };
582
682
  if (existsSync(presetPath)) {
583
683
  try {
584
684
  const data = JSON.parse(await readFile(presetPath, "utf-8"));
585
- if (data.config)
586
- presetConfig = data.config;
685
+ if (data.config && typeof data.config === "object") {
686
+ presetConfig = { ...BUILTIN_DEFAULT_CONFIG, ...data.config };
687
+ presetConfig.providers = { ...BUILTIN_DEFAULT_CONFIG.providers, ...(presetConfig.providers || {}) };
688
+ const hasLocalModel = (presetConfig.configuredModels || []).some((m) => m?.provider === "local" && (m?.modelId === DEFAULT_LOCAL_LLM_MODEL_ID || m?.modelItemCode === DEFAULT_LOCAL_MODEL_ITEM_CODE));
689
+ if (!hasLocalModel) {
690
+ presetConfig.configuredModels = [
691
+ ...(BUILTIN_DEFAULT_CONFIG.configuredModels || []),
692
+ ...(presetConfig.configuredModels || []),
693
+ ];
694
+ }
695
+ }
587
696
  }
588
697
  catch { }
589
698
  }
@@ -652,18 +761,46 @@ async function ensureAgentsJsonInitialized() {
652
761
  }
653
762
  }
654
763
  }
764
+ // 所有未单独配置模型的智能体使用 config 的缺省模型(预装为 local + Qwen 3.5 4B)
765
+ const configPath = join(getDesktopDir(), "config.json");
766
+ if (existsSync(configPath)) {
767
+ try {
768
+ const configRaw = await readFile(configPath, "utf-8");
769
+ const configData = JSON.parse(configRaw);
770
+ const defProvider = configData.defaultProvider?.trim();
771
+ const defModel = configData.defaultModel?.trim();
772
+ const defCode = configData.defaultModelItemCode?.trim();
773
+ if (defProvider && defModel) {
774
+ for (const agent of currentData.agents) {
775
+ const hasOwn = (agent.provider && String(agent.provider).trim()) || (agent.model && String(agent.model).trim()) || (agent.modelItemCode && String(agent.modelItemCode).trim());
776
+ if (!hasOwn) {
777
+ agent.provider = defProvider;
778
+ agent.model = defModel;
779
+ if (defCode)
780
+ agent.modelItemCode = defCode;
781
+ changed = true;
782
+ }
783
+ }
784
+ }
785
+ }
786
+ catch { /* ignore */ }
787
+ }
655
788
  if (changed || !existsSync(agentsPath)) {
656
789
  await writeFile(agentsPath, JSON.stringify(currentData, null, 2), "utf-8");
657
790
  }
658
791
  }
659
792
  /**
660
- * CLI / Gateway 运行时调用,确保 config.json、provider-support.json、agents.json 均完成初始化。
793
+ * CLI / Gateway 运行时调用,确保 config.json、provider-support.json、agents.json 均完成初始化,
794
+ * 并同步到 agent 目录 models.json,供 pi ModelRegistry 解析 local 等模型与凭证。
661
795
  */
662
796
  export async function ensureDesktopConfigInitialized() {
663
797
  ensureDesktopDir();
664
798
  await ensureProviderSupportFile();
665
799
  await ensureConfigJsonInitialized();
666
800
  await ensureAgentsJsonInitialized();
801
+ await syncDesktopConfigToModelsJson().catch((err) => {
802
+ console.warn("[ensureDesktopConfigInitialized] syncDesktopConfigToModelsJson failed:", err);
803
+ });
667
804
  }
668
805
  /**
669
806
  * 取某 provider 在 provider-support 中的第一个 llm 模型 id;若无则返回第一个模型 id。
@@ -714,6 +851,10 @@ const SYNC_DEFAULTS = {
714
851
  "openai-custom": { baseUrl: "", apiKey: "OPENAI_API_KEY", api: "openai-completions" },
715
852
  nvidia: { baseUrl: "https://integrate.api.nvidia.com/v1", apiKey: "NVIDIA_API_KEY", api: "openai-completions" },
716
853
  kimi: { baseUrl: "https://api.moonshot.cn/v1", apiKey: "MOONSHOT_API_KEY", api: "openai-completions" },
854
+ /** 本地 Ollama,无需真实 API Key */
855
+ ollama: { baseUrl: "http://localhost:11434/v1", apiKey: "OPENAI_API_KEY", api: "openai-completions" },
856
+ /** 内置本地推理(node-llama-cpp),无需 API Key,baseUrl 指向本地子进程服务 */
857
+ local: { baseUrl: "http://127.0.0.1:11435/v1", apiKey: "OPENAI_API_KEY", api: "openai-completions" },
717
858
  };
718
859
  const DEFAULT_COST = { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 };
719
860
  const DEFAULT_CONTEXT_WINDOW = 64000;
@@ -738,25 +879,41 @@ function configuredModelToPi(item, displayName) {
738
879
  }
739
880
  /**
740
881
  * 根据桌面 config(已配置的 providers + configuredModels)与 provider-support,生成并写入 agent 目录的 models.json。
741
- * 仅包含在 configproviders 中已配置的 provider;每个 provider models 来自 configuredModels,结构含 reasoning、cost 等。
882
+ * 包含:config.providers 中已配置的 provider;以及 configuredModels / defaultProvider 中引用但未在 providers 中的 provider(用 support 默认 baseUrl 补全,避免 Ollama 等仅选模型未填 baseUrl 时连不上)。
742
883
  */
743
884
  export async function syncDesktopConfigToModelsJson() {
744
885
  const config = await readDesktopConfigJson();
745
886
  const configured = config.providers ?? {};
746
887
  const configuredModels = Array.isArray(config.configuredModels) ? config.configuredModels : [];
747
- if (Object.keys(configured).length === 0) {
888
+ const support = await getProviderSupport();
889
+ const providerIdsFromModels = new Set();
890
+ for (const m of configuredModels)
891
+ if (m?.provider)
892
+ providerIdsFromModels.add(m.provider);
893
+ if (config.defaultProvider)
894
+ providerIdsFromModels.add(config.defaultProvider);
895
+ const allProviderIds = new Set([...Object.keys(configured), ...providerIdsFromModels]);
896
+ if (allProviderIds.size === 0) {
748
897
  return;
749
898
  }
750
- const support = await getProviderSupport();
751
899
  const piProviders = {};
752
- for (const [providerId, userConfig] of Object.entries(configured)) {
753
- if (!userConfig?.apiKey?.trim())
900
+ for (const providerId of allProviderIds) {
901
+ const userConfig = configured[providerId];
902
+ // ollama / local 不需要 API Key,其他 provider 必须有 apiKey
903
+ const isNoKeyProvider = providerId === "ollama" || providerId === "local";
904
+ if (!isNoKeyProvider && !userConfig?.apiKey?.trim())
754
905
  continue;
755
906
  const defaults = SYNC_DEFAULTS[providerId] ?? { baseUrl: "", apiKey: "OPENAI_API_KEY", api: "openai-completions" };
756
- const baseUrl = userConfig.baseUrl?.trim() || (support[providerId]?.baseUrl ?? "").trim() || defaults.baseUrl;
907
+ let baseUrl = userConfig?.baseUrl?.trim() || (support[providerId]?.baseUrl ?? "").trim() || defaults.baseUrl;
908
+ if (providerId === "ollama" && process.env.OLLAMA_BASE_URL?.trim()) {
909
+ const u = process.env.OLLAMA_BASE_URL.trim().replace(/\/$/, "");
910
+ baseUrl = u.endsWith("/v1") ? u : u + "/v1";
911
+ }
757
912
  if (!baseUrl)
758
913
  continue;
759
914
  const def = support[providerId];
915
+ if (!def)
916
+ continue;
760
917
  const items = configuredModels.filter((m) => m.provider === providerId);
761
918
  let models;
762
919
  if (items.length > 0) {
@@ -764,7 +921,11 @@ export async function syncDesktopConfigToModelsJson() {
764
921
  const displayName = (item.alias && item.alias.trim()) ||
765
922
  (def?.models?.find((m) => m.id === item.modelId)?.name) ||
766
923
  item.modelId;
767
- return configuredModelToPi(item, displayName);
924
+ const pi = configuredModelToPi(item, displayName);
925
+ // 本地 node-llama-cpp 关闭思考:不向 SDK 发送 reasoning,避免启用 thinking 相关参数
926
+ if (providerId === "local")
927
+ return { ...pi, reasoning: false };
928
+ return pi;
768
929
  });
769
930
  }
770
931
  else if (def?.models?.length) {
@@ -783,7 +944,7 @@ export async function syncDesktopConfigToModelsJson() {
783
944
  continue;
784
945
  }
785
946
  piProviders[providerId] = {
786
- name: (userConfig.alias?.trim() || def?.name) || providerId,
947
+ name: (userConfig?.alias?.trim() || def?.name) || providerId,
787
948
  apiKey: defaults.apiKey,
788
949
  api: defaults.api,
789
950
  baseUrl: baseUrl.replace(/\/$/, ""),
@@ -28,6 +28,7 @@ export const DEFAULT_PROVIDER_SUPPORT = {
28
28
  "openai-custom": {
29
29
  name: "OpenAI (自定义)",
30
30
  models: [
31
+ { id: "qwen3.5:4b", name: "Qwen3.5 4B", types: ["llm"] },
31
32
  { id: "gpt-4o", name: "GPT-4o", types: ["llm"] },
32
33
  { id: "gpt-4o-mini", name: "GPT-4o Mini", types: ["llm"] },
33
34
  { id: "gpt-4-turbo", name: "GPT-4 Turbo", types: ["llm"] },
@@ -54,4 +55,30 @@ export const DEFAULT_PROVIDER_SUPPORT = {
54
55
  { id: "moonshot-v1-128k", name: "Moonshot 128K", types: ["llm"] },
55
56
  ],
56
57
  },
58
+ /** 本地 Ollama 服务,兼容 OpenAI API;baseUrl 指向本机 Ollama 默认端口 */
59
+ ollama: {
60
+ name: "Ollama (本地)",
61
+ baseUrl: "http://localhost:11434/v1",
62
+ models: [
63
+ { id: "qwen3:4b", name: "Qwen3 4B", types: ["llm"] },
64
+ { id: "qwen3:8b", name: "Qwen3 8B", types: ["llm"] },
65
+ { id: "qwen3:14b", name: "Qwen3 14B", types: ["llm"] },
66
+ { id: "llama3.2:3b", name: "Llama 3.2 3B", types: ["llm"] },
67
+ { id: "llama3.2:1b", name: "Llama 3.2 1B", types: ["llm"] },
68
+ { id: "nomic-embed-text", name: "Nomic Embed Text", types: ["embedding"] },
69
+ ],
70
+ },
71
+ /**
72
+ * 内置本地推理(node-llama-cpp),无需安装 Ollama。
73
+ * baseUrl 指向本地 LLM 子进程服务;模型列表为推荐的 GGUF 模型,可在本地模型管理页面增删。
74
+ * 无需 API Key。
75
+ */
76
+ local: {
77
+ name: "本地推理 (node-llama-cpp)",
78
+ baseUrl: "http://127.0.0.1:11435/v1",
79
+ models: [
80
+ { id: "local-llm", name: "本地 LLM(当前加载)", types: ["llm"] },
81
+ { id: "local-embedding", name: "本地 Embedding(当前加载)", types: ["embedding"] },
82
+ ],
83
+ },
57
84
  };
@@ -0,0 +1 @@
1
+ export { loadExtensionFactories, clearExtensionFactoriesCache } from "./load.js";
@@ -0,0 +1 @@
1
+ export { loadExtensionFactories, clearExtensionFactoriesCache } from "./load.js";
@@ -0,0 +1,11 @@
1
+ import type { ExtensionFactory } from "@mariozechner/pi-coding-agent";
2
+ /**
3
+ * 扫描 ~/.openbot/plugins,加载所有已安装的扩展包,返回 ExtensionFactory 数组。
4
+ * 进程内缓存结果;若需重载可调用 clearExtensionFactoriesCache()。
5
+ */
6
+ export declare function loadExtensionFactories(): ExtensionFactory[];
7
+ /**
8
+ * 清除扩展 factory 缓存,下次 loadExtensionFactories() 时会重新扫描并加载。
9
+ * 用于安装/卸载扩展后希望不重启即生效的场景(若调用方在适当时机调用)。
10
+ */
11
+ export declare function clearExtensionFactoriesCache(): void;
@@ -0,0 +1,101 @@
1
+ /**
2
+ * 从 ~/.openbot/plugins 目录加载通过 openbot extension install 安装的 npm 包,
3
+ * 将每个包的默认导出规范为 ExtensionFactory 并返回,供 AgentManager 注入到 DefaultResourceLoader.extensionFactories。
4
+ */
5
+ import { existsSync, readFileSync } from "node:fs";
6
+ import { createRequire } from "node:module";
7
+ import { join } from "node:path";
8
+ import { getOpenbotPluginsDir } from "../agent/agent-dir.js";
9
+ let cachedFactories = null;
10
+ /**
11
+ * 从插件目录的 package.json 读取 dependencies(及 optionalDependencies)的包名列表。
12
+ * 仅返回在 node_modules 中实际存在的包名。
13
+ */
14
+ function getInstalledPluginNames(pluginsDir) {
15
+ const pkgPath = join(pluginsDir, "package.json");
16
+ if (!existsSync(pkgPath))
17
+ return [];
18
+ let pkg;
19
+ try {
20
+ pkg = JSON.parse(readFileSync(pkgPath, "utf-8"));
21
+ }
22
+ catch {
23
+ return [];
24
+ }
25
+ const deps = {
26
+ ...pkg.dependencies,
27
+ ...pkg.optionalDependencies,
28
+ };
29
+ const names = Object.keys(deps || {});
30
+ return names.filter((name) => {
31
+ const dir = join(pluginsDir, "node_modules", name);
32
+ return existsSync(dir);
33
+ });
34
+ }
35
+ /**
36
+ * 将包默认导出规范为 ExtensionFactory:(pi) => void。
37
+ * 插件可导出 (pi) => void 或 () => (pi) => void(工厂),此处统一为 (pi) => void。
38
+ */
39
+ function toExtensionFactory(fn) {
40
+ if (typeof fn !== "function")
41
+ return null;
42
+ if (fn.length === 1)
43
+ return fn; // (pi) => void
44
+ if (fn.length === 0) {
45
+ const result = fn();
46
+ if (typeof result === "function")
47
+ return result; // () => (pi) => void
48
+ }
49
+ return null;
50
+ }
51
+ /**
52
+ * 加载单个插件包,返回 ExtensionFactory 或 null(失败时打日志并返回 null)。
53
+ * 使用 require(pkgName) 从 plugins 目录的 node_modules 解析,以便插件自身依赖正确解析。
54
+ */
55
+ function loadOnePlugin(pluginsDir, pkgName) {
56
+ const require = createRequire(join(pluginsDir, "package.json"));
57
+ let mod;
58
+ try {
59
+ mod = require(pkgName);
60
+ }
61
+ catch (err) {
62
+ console.warn(`[extensions] Failed to load plugin "${pkgName}":`, err);
63
+ return null;
64
+ }
65
+ const def = mod && typeof mod === "object" && "default" in mod ? mod.default : mod;
66
+ const factory = toExtensionFactory(def);
67
+ if (!factory) {
68
+ console.warn(`[extensions] Plugin "${pkgName}" default export is not a function; skipped.`);
69
+ return null;
70
+ }
71
+ return factory;
72
+ }
73
+ /**
74
+ * 扫描 ~/.openbot/plugins,加载所有已安装的扩展包,返回 ExtensionFactory 数组。
75
+ * 进程内缓存结果;若需重载可调用 clearExtensionFactoriesCache()。
76
+ */
77
+ export function loadExtensionFactories() {
78
+ if (cachedFactories !== null)
79
+ return cachedFactories;
80
+ const pluginsDir = getOpenbotPluginsDir();
81
+ if (!existsSync(pluginsDir)) {
82
+ cachedFactories = [];
83
+ return cachedFactories;
84
+ }
85
+ const names = getInstalledPluginNames(pluginsDir);
86
+ const factories = [];
87
+ for (const name of names) {
88
+ const factory = loadOnePlugin(pluginsDir, name);
89
+ if (factory)
90
+ factories.push(factory);
91
+ }
92
+ cachedFactories = factories;
93
+ return factories;
94
+ }
95
+ /**
96
+ * 清除扩展 factory 缓存,下次 loadExtensionFactories() 时会重新扫描并加载。
97
+ * 用于安装/卸载扩展后希望不重启即生效的场景(若调用方在适当时机调用)。
98
+ */
99
+ export function clearExtensionFactoriesCache() {
100
+ cachedFactories = null;
101
+ }
@@ -0,0 +1,16 @@
1
+ export declare const DEFAULT_LLM_MODEL_URI = "hf:unsloth/Qwen3.5-4B-GGUF/Qwen3.5-4B-Q5_K_M.gguf";
2
+ export interface DownloadModelOptions {
3
+ useMirror?: boolean;
4
+ signal?: AbortSignal;
5
+ onProgress?: (p: {
6
+ downloadedSize: number;
7
+ totalSize: number;
8
+ percent: number;
9
+ }) => void;
10
+ }
11
+ /**
12
+ * 下载模型到本地缓存目录。
13
+ * @returns 解析后的本地文件路径
14
+ */
15
+ export declare function downloadModel(modelUri: string, options?: DownloadModelOptions): Promise<string>;
16
+ export declare function getResolvedBasename(modelUri: string): string;
@@ -0,0 +1,37 @@
1
+ /**
2
+ * 本地模型下载(供 CLI 与 Nest LocalModelsService 复用)。
3
+ * 使用 node-llama-cpp resolveModelFile,缓存目录 ~/.openbot/.cached_models/。
4
+ */
5
+ import { basename } from "node:path";
6
+ import { LOCAL_LLM_CACHE_DIR } from "./model-resolve.js";
7
+ export const DEFAULT_LLM_MODEL_URI = "hf:unsloth/Qwen3.5-4B-GGUF/Qwen3.5-4B-Q5_K_M.gguf";
8
+ /**
9
+ * 下载模型到本地缓存目录。
10
+ * @returns 解析后的本地文件路径
11
+ */
12
+ export async function downloadModel(modelUri, options = {}) {
13
+ const { resolveModelFile } = await import("node-llama-cpp");
14
+ const { useMirror = false, signal, onProgress } = options;
15
+ const hfToken = process.env.HF_TOKEN || process.env.HUGGING_FACE_TOKEN;
16
+ const opts = {
17
+ directory: LOCAL_LLM_CACHE_DIR,
18
+ endpoints: {
19
+ huggingFace: useMirror ? "https://hf-mirror.com/" : "https://huggingface.co/",
20
+ },
21
+ };
22
+ if (signal)
23
+ opts.signal = signal;
24
+ if (hfToken)
25
+ opts.headers = { Authorization: `Bearer ${hfToken}` };
26
+ if (onProgress) {
27
+ opts.onProgress = ({ downloadedSize, totalSize }) => {
28
+ const percent = totalSize ? Math.round((downloadedSize / totalSize) * 100) : 0;
29
+ onProgress({ downloadedSize, totalSize, percent });
30
+ };
31
+ }
32
+ const resolved = await resolveModelFile(modelUri, opts);
33
+ return resolved;
34
+ }
35
+ export function getResolvedBasename(modelUri) {
36
+ return basename(modelUri.replace(/^hf:[^/]+\//, "").replace(/\//g, "_"));
37
+ }
@@ -0,0 +1,32 @@
1
+ /**
2
+ * local-llm-server 入口。
3
+ *
4
+ * 两种运行模式:
5
+ * 1. 子进程模式(--child):直接加载模型并启动 HTTP 服务,由主进程 fork 调用。
6
+ * 2. 主进程模式(默认导出):fork 子进程,管理其生命周期,提供 baseUrl 给调用方。
7
+ *
8
+ * 主进程通过 startLocalLlmServer() 启动,返回 { baseUrl, stop }。
9
+ * 子进程就绪后通过 IPC 发送 { type: "ready" } 通知主进程。
10
+ */
11
+ export interface LocalLlmServerOptions {
12
+ port?: number;
13
+ llmModelPath?: string;
14
+ embeddingModelPath?: string;
15
+ /** 上下文窗口 token 数,默认 32768(32K),需能容纳 system + tools + 对话;显存不足时在智能体配置中调小 */
16
+ contextSize?: number;
17
+ /** 等待子进程就绪的超时毫秒数,默认 300000(5 分钟,冷启/大模型加载可能较慢) */
18
+ readyTimeoutMs?: number;
19
+ }
20
+ export interface LocalLlmServerHandle {
21
+ baseUrl: string;
22
+ stop: () => void;
23
+ }
24
+ /**
25
+ * 停止本地 LLM 子进程服务(若正在运行)。用于切换模型前先停止再启动。
26
+ */
27
+ export declare function stopLocalLlmServer(): void;
28
+ /**
29
+ * 启动本地 LLM 子进程服务。
30
+ * 已启动时直接返回已有 handle(单例)。需先 stop 再传新参数重启。
31
+ */
32
+ export declare function startLocalLlmServer(opts?: LocalLlmServerOptions): Promise<LocalLlmServerHandle>;