@lobehub/lobehub 2.0.0-next.186 → 2.0.0-next.188

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (140) hide show
  1. package/CHANGELOG.md +50 -0
  2. package/changelog/v1.json +18 -0
  3. package/locales/ar/models.json +89 -5
  4. package/locales/ar/plugin.json +5 -0
  5. package/locales/ar/providers.json +1 -0
  6. package/locales/bg-BG/models.json +68 -0
  7. package/locales/bg-BG/plugin.json +5 -0
  8. package/locales/bg-BG/providers.json +1 -0
  9. package/locales/de-DE/models.json +85 -0
  10. package/locales/de-DE/plugin.json +5 -0
  11. package/locales/de-DE/providers.json +1 -0
  12. package/locales/en-US/models.json +11 -10
  13. package/locales/en-US/plugin.json +5 -0
  14. package/locales/en-US/providers.json +1 -0
  15. package/locales/es-ES/models.json +72 -0
  16. package/locales/es-ES/plugin.json +5 -0
  17. package/locales/es-ES/providers.json +1 -0
  18. package/locales/fa-IR/models.json +86 -0
  19. package/locales/fa-IR/plugin.json +5 -0
  20. package/locales/fa-IR/providers.json +1 -0
  21. package/locales/fr-FR/models.json +49 -0
  22. package/locales/fr-FR/plugin.json +5 -0
  23. package/locales/fr-FR/providers.json +1 -0
  24. package/locales/it-IT/models.json +82 -0
  25. package/locales/it-IT/plugin.json +5 -0
  26. package/locales/it-IT/providers.json +1 -0
  27. package/locales/ja-JP/models.json +42 -5
  28. package/locales/ja-JP/plugin.json +5 -0
  29. package/locales/ja-JP/providers.json +1 -0
  30. package/locales/ko-KR/models.json +54 -0
  31. package/locales/ko-KR/plugin.json +5 -0
  32. package/locales/ko-KR/providers.json +1 -0
  33. package/locales/nl-NL/models.json +12 -1
  34. package/locales/nl-NL/plugin.json +5 -0
  35. package/locales/nl-NL/providers.json +1 -0
  36. package/locales/pl-PL/models.json +46 -0
  37. package/locales/pl-PL/plugin.json +5 -0
  38. package/locales/pl-PL/providers.json +1 -0
  39. package/locales/pt-BR/models.json +59 -0
  40. package/locales/pt-BR/plugin.json +5 -0
  41. package/locales/pt-BR/providers.json +1 -0
  42. package/locales/ru-RU/models.json +85 -0
  43. package/locales/ru-RU/plugin.json +5 -0
  44. package/locales/ru-RU/providers.json +1 -0
  45. package/locales/tr-TR/models.json +81 -0
  46. package/locales/tr-TR/plugin.json +5 -0
  47. package/locales/tr-TR/providers.json +1 -0
  48. package/locales/vi-VN/models.json +54 -0
  49. package/locales/vi-VN/plugin.json +5 -0
  50. package/locales/vi-VN/providers.json +1 -0
  51. package/locales/zh-CN/models.json +42 -5
  52. package/locales/zh-CN/plugin.json +5 -0
  53. package/locales/zh-CN/providers.json +1 -0
  54. package/locales/zh-TW/models.json +85 -0
  55. package/locales/zh-TW/plugin.json +5 -0
  56. package/locales/zh-TW/providers.json +1 -0
  57. package/package.json +1 -1
  58. package/packages/builtin-tool-gtd/src/manifest.ts +13 -8
  59. package/packages/builtin-tool-gtd/src/systemRole.ts +54 -19
  60. package/packages/builtin-tool-knowledge-base/package.json +1 -0
  61. package/packages/builtin-tool-knowledge-base/src/client/Inspector/ReadKnowledge/index.tsx +97 -0
  62. package/packages/builtin-tool-knowledge-base/src/client/Inspector/SearchKnowledgeBase/index.tsx +75 -0
  63. package/packages/builtin-tool-knowledge-base/src/client/Inspector/index.ts +11 -0
  64. package/packages/builtin-tool-knowledge-base/src/client/Render/ReadKnowledge/FileCard.tsx +12 -12
  65. package/packages/builtin-tool-knowledge-base/src/client/Render/ReadKnowledge/index.tsx +16 -25
  66. package/packages/builtin-tool-knowledge-base/src/client/Render/SearchKnowledgeBase/Item/index.tsx +21 -47
  67. package/packages/builtin-tool-knowledge-base/src/client/Render/SearchKnowledgeBase/index.tsx +19 -31
  68. package/packages/builtin-tool-knowledge-base/src/client/Render/index.ts +0 -5
  69. package/packages/builtin-tool-knowledge-base/src/client/index.ts +5 -1
  70. package/packages/builtin-tool-knowledge-base/src/executor/index.ts +119 -0
  71. package/packages/builtin-tool-local-system/package.json +1 -0
  72. package/packages/builtin-tool-local-system/src/client/Inspector/EditLocalFile/index.tsx +44 -29
  73. package/packages/builtin-tool-local-system/src/client/Inspector/GrepContent/index.tsx +20 -18
  74. package/packages/builtin-tool-local-system/src/client/Inspector/ListLocalFiles/index.tsx +76 -0
  75. package/packages/builtin-tool-local-system/src/client/Inspector/ReadLocalFile/index.tsx +8 -32
  76. package/packages/builtin-tool-local-system/src/client/Inspector/RenameLocalFile/index.tsx +62 -0
  77. package/packages/builtin-tool-local-system/src/client/Inspector/SearchLocalFiles/index.tsx +17 -11
  78. package/packages/builtin-tool-local-system/src/client/Inspector/WriteLocalFile/index.tsx +61 -0
  79. package/packages/builtin-tool-local-system/src/client/Inspector/index.ts +6 -0
  80. package/packages/builtin-tool-local-system/src/client/Render/EditLocalFile/index.tsx +6 -1
  81. package/packages/builtin-tool-local-system/src/client/Render/SearchFiles/SearchQuery/SearchView.tsx +19 -31
  82. package/packages/builtin-tool-local-system/src/client/Render/SearchFiles/SearchQuery/index.tsx +2 -42
  83. package/packages/builtin-tool-local-system/src/client/Render/index.ts +0 -2
  84. package/packages/builtin-tool-local-system/src/client/components/FilePathDisplay.tsx +56 -0
  85. package/packages/builtin-tool-local-system/src/client/components/index.ts +2 -0
  86. package/packages/builtin-tool-local-system/src/executor/index.ts +435 -0
  87. package/packages/builtin-tool-web-browsing/src/client/Inspector/Search/index.tsx +32 -5
  88. package/packages/fetch-sse/src/__tests__/request.test.ts +608 -0
  89. package/packages/model-bank/src/aiModels/aihubmix.ts +44 -8
  90. package/packages/model-bank/src/aiModels/google.ts +49 -17
  91. package/packages/model-bank/src/aiModels/hunyuan.ts +20 -0
  92. package/packages/model-bank/src/aiModels/infiniai.ts +48 -7
  93. package/packages/model-bank/src/aiModels/lobehub.ts +13 -11
  94. package/packages/model-bank/src/aiModels/minimax.ts +46 -2
  95. package/packages/model-bank/src/aiModels/ollamacloud.ts +40 -5
  96. package/packages/model-bank/src/aiModels/openai.ts +6 -3
  97. package/packages/model-bank/src/aiModels/qwen.ts +1 -1
  98. package/packages/model-bank/src/aiModels/siliconcloud.ts +60 -0
  99. package/packages/model-bank/src/aiModels/vertexai.ts +77 -44
  100. package/packages/model-bank/src/aiModels/volcengine.ts +111 -2
  101. package/packages/model-bank/src/aiModels/zenmux.ts +19 -13
  102. package/packages/model-bank/src/aiModels/zhipu.ts +64 -2
  103. package/packages/model-bank/src/types/aiModel.ts +3 -0
  104. package/packages/model-runtime/src/core/contextBuilders/google.test.ts +84 -0
  105. package/packages/model-runtime/src/core/contextBuilders/google.ts +37 -1
  106. package/packages/model-runtime/src/providers/volcengine/index.ts +2 -1
  107. package/packages/model-runtime/src/providers/zhipu/index.test.ts +0 -27
  108. package/packages/model-runtime/src/providers/zhipu/index.ts +1 -1
  109. package/packages/model-runtime/src/utils/modelParse.ts +26 -21
  110. package/packages/types/src/agent/chatConfig.ts +6 -2
  111. package/src/features/ChatInput/ActionBar/Model/ControlsForm.tsx +40 -1
  112. package/src/features/ChatInput/ActionBar/Model/GPT52ProReasoningEffortSlider.tsx +59 -0
  113. package/src/features/ChatInput/ActionBar/Model/GPT52ReasoningEffortSlider.tsx +61 -0
  114. package/src/features/ChatInput/ActionBar/Model/TextVerbositySlider.tsx +1 -1
  115. package/src/features/ChatInput/ActionBar/Model/ThinkingLevel2Slider.tsx +58 -0
  116. package/src/features/ChatInput/ActionBar/Model/ThinkingLevelSlider.tsx +10 -8
  117. package/src/helpers/toolEngineering/index.ts +1 -1
  118. package/src/locales/default/plugin.ts +6 -0
  119. package/src/server/modules/Mecha/AgentToolsEngine/__tests__/index.test.ts +1 -1
  120. package/src/server/modules/Mecha/AgentToolsEngine/index.ts +1 -1
  121. package/src/services/chat/mecha/modelParamsResolver.ts +11 -0
  122. package/src/store/chat/slices/builtinTool/actions/index.ts +1 -11
  123. package/src/store/tool/slices/builtin/executors/index.ts +4 -0
  124. package/src/styles/text.ts +1 -1
  125. package/src/tools/executionRuntimes.ts +3 -8
  126. package/src/tools/identifiers.ts +1 -1
  127. package/src/tools/index.ts +1 -1
  128. package/src/tools/inspectors.ts +5 -0
  129. package/src/tools/renders.ts +6 -12
  130. package/packages/builtin-tool-local-system/src/client/Render/RenameLocalFile/index.tsx +0 -37
  131. package/src/store/chat/slices/builtinTool/actions/__tests__/localSystem.test.ts +0 -201
  132. package/src/store/chat/slices/builtinTool/actions/knowledgeBase.ts +0 -163
  133. package/src/store/chat/slices/builtinTool/actions/localSystem.ts +0 -241
  134. package/src/tools/knowledge-base/ExecutionRuntime/index.ts +0 -25
  135. package/src/tools/knowledge-base/Render/ReadKnowledge/index.tsx +0 -29
  136. package/src/tools/knowledge-base/Render/SearchKnowledgeBase/index.tsx +0 -29
  137. package/src/tools/knowledge-base/Render/index.ts +0 -7
  138. package/src/tools/knowledge-base/index.ts +0 -12
  139. package/src/tools/local-system/ExecutionRuntime/index.ts +0 -9
  140. package/src/tools/local-system/systemRole.ts +0 -1
@@ -73,6 +73,8 @@
73
73
  "builtins.lobe-gtd.title": "Công cụ Nhiệm vụ",
74
74
  "builtins.lobe-knowledge-base.apiName.readKnowledge": "Đọc nội dung Thư viện",
75
75
  "builtins.lobe-knowledge-base.apiName.searchKnowledgeBase": "Tìm kiếm Thư viện",
76
+ "builtins.lobe-knowledge-base.inspector.andMoreFiles": "và {{count}} tệp nữa",
77
+ "builtins.lobe-knowledge-base.inspector.noResults": "Không có kết quả",
76
78
  "builtins.lobe-knowledge-base.title": "Thư viện",
77
79
  "builtins.lobe-local-system.apiName.editLocalFile": "Chỉnh sửa tệp",
78
80
  "builtins.lobe-local-system.apiName.getCommandOutput": "Lấy kết quả lệnh",
@@ -86,6 +88,8 @@
86
88
  "builtins.lobe-local-system.apiName.runCommand": "Chạy lệnh",
87
89
  "builtins.lobe-local-system.apiName.searchLocalFiles": "Tìm kiếm tệp",
88
90
  "builtins.lobe-local-system.apiName.writeLocalFile": "Ghi tệp",
91
+ "builtins.lobe-local-system.inspector.noResults": "Không có kết quả",
92
+ "builtins.lobe-local-system.inspector.rename.result": "<old>{{oldName}}</old> → <new>{{newName}}</new>",
89
93
  "builtins.lobe-local-system.title": "Hệ thống Cục bộ",
90
94
  "builtins.lobe-page-agent.apiName.batchUpdate": "Cập nhật hàng loạt nút",
91
95
  "builtins.lobe-page-agent.apiName.compareSnapshots": "So sánh ảnh chụp",
@@ -143,6 +147,7 @@
143
147
  "builtins.lobe-web-browsing.apiName.crawlMultiPages": "Đọc nhiều trang",
144
148
  "builtins.lobe-web-browsing.apiName.crawlSinglePage": "Đọc nội dung trang",
145
149
  "builtins.lobe-web-browsing.apiName.search": "Tìm kiếm trang",
150
+ "builtins.lobe-web-browsing.inspector.noResults": "Không có kết quả",
146
151
  "builtins.lobe-web-browsing.title": "Tìm kiếm Web",
147
152
  "confirm": "Xác nhận",
148
153
  "debug.arguments": "Tham số",
@@ -29,6 +29,7 @@
29
29
  "internlm.description": "Tổ chức mã nguồn mở tập trung vào nghiên cứu mô hình lớn và công cụ, cung cấp nền tảng hiệu quả, dễ sử dụng để tiếp cận các mô hình và thuật toán tiên tiến.",
30
30
  "jina.description": "Thành lập năm 2020, Jina AI là công ty hàng đầu về AI tìm kiếm. Bộ công cụ tìm kiếm của họ bao gồm mô hình vector, bộ xếp hạng lại và mô hình ngôn ngữ nhỏ để xây dựng ứng dụng tìm kiếm sinh và đa phương thức chất lượng cao.",
31
31
  "lmstudio.description": "LM Studio là ứng dụng máy tính để phát triển và thử nghiệm LLM ngay trên máy của bạn.",
32
+ "lobehub.description": "LobeHub Cloud sử dụng API chính thức để truy cập các mô hình AI và tính mức sử dụng bằng Tín dụng dựa trên số lượng token của mô hình.",
32
33
  "minimax.description": "Thành lập năm 2021, MiniMax xây dựng AI đa năng với các mô hình nền tảng đa phương thức, bao gồm mô hình văn bản MoE hàng nghìn tỷ tham số, mô hình giọng nói và thị giác, cùng các ứng dụng như Hailuo AI.",
33
34
  "mistral.description": "Mistral cung cấp các mô hình tổng quát, chuyên biệt và nghiên cứu tiên tiến cho suy luận phức tạp, tác vụ đa ngôn ngữ và tạo mã, với khả năng gọi hàm cho tích hợp tùy chỉnh.",
34
35
  "modelscope.description": "ModelScope là nền tảng mô hình dưới dạng dịch vụ của Alibaba Cloud, cung cấp nhiều mô hình AI và dịch vụ suy luận.",
@@ -268,20 +268,20 @@
268
268
  "chatgpt-4o-latest.description": "ChatGPT-4o 是一款实时更新的动态模型,结合强大的理解与生成能力,适用于客户支持、教育和技术支持等大规模应用场景。",
269
269
  "claude-2.0.description": "Claude 2 提供关键的企业级改进,包括领先的 20 万 token 上下文窗口、减少幻觉、系统提示支持,以及新测试功能:工具调用。",
270
270
  "claude-2.1.description": "Claude 2 提供关键的企业级改进,包括领先的 20 万 token 上下文窗口、减少幻觉、系统提示支持,以及新测试功能:工具调用。",
271
- "claude-3-5-haiku-20241022.description": "Claude 3.5 Haiku 是 Anthropic 推出的最快下一代模型。相比 Claude 3 Haiku,在各项能力上均有提升,并在多个智能基准测试中超越此前的旗舰 Claude 3 Opus。",
271
+ "claude-3-5-haiku-20241022.description": "Claude 3.5 Haiku 是 Anthropic 推出的下一代最快模型,在多项技能上实现了提升,并在多个基准测试中超越了上一代旗舰 Claude 3 Opus。",
272
272
  "claude-3-5-haiku-latest.description": "Claude 3.5 Haiku 提供快速响应,适用于轻量级任务。",
273
- "claude-3-7-sonnet-20250219.description": "Claude 3.7 Sonnet 是 Anthropic 最智能的模型,也是市场上首个混合推理模型。它既能提供近乎即时的响应,也能进行可视化的逐步推理,尤其擅长编程、数据科学、视觉和智能体任务。",
273
+ "claude-3-7-sonnet-20250219.description": "Claude Sonnet 3.7 是 Anthropic 最智能的模型,也是市场上首个混合推理模型,支持近乎即时响应或细致的深度思考,并提供精细化控制。",
274
274
  "claude-3-7-sonnet-latest.description": "Claude 3.7 Sonnet 是 Anthropic 最新、最强大的模型,适用于高度复杂的任务,在性能、智能、流畅性和理解力方面表现卓越。",
275
275
  "claude-3-haiku-20240307.description": "Claude 3 Haiku 是 Anthropic 推出的最快、最紧凑的模型,专为近乎即时响应而设计,具备快速且准确的性能。",
276
276
  "claude-3-opus-20240229.description": "Claude 3 Opus 是 Anthropic 最强大的模型,适用于高度复杂的任务,在性能、智能、流畅性和理解力方面表现卓越。",
277
277
  "claude-3-sonnet-20240229.description": "Claude 3 Sonnet 在智能与速度之间取得平衡,适用于企业级工作负载,提供高效能与低成本的可靠部署。",
278
- "claude-haiku-4-5-20251001.description": "Claude Haiku 4.5 是 Anthropic 推出的最快、最智能的 Haiku 模型,具备闪电般的速度和扩展推理能力。",
278
+ "claude-haiku-4-5-20251001.description": "Claude Haiku 4.5 是 Anthropic 推出的最快且最智能的 Haiku 模型,具备闪电般的速度与深度思考能力。",
279
279
  "claude-opus-4-1-20250805-thinking.description": "Claude Opus 4.1 Thinking 是一款高级变体,能够展示其推理过程。",
280
280
  "claude-opus-4-1-20250805.description": "Claude Opus 4.1 是 Anthropic 最新、最强大的模型,适用于高度复杂的任务,在性能、智能、流畅性和理解力方面表现卓越。",
281
- "claude-opus-4-20250514.description": "Claude Opus 4 是 Anthropic 最强大的模型,适用于高度复杂的任务,在性能、智能、流畅性和理解力方面表现卓越。",
281
+ "claude-opus-4-20250514.description": "Claude Opus 4 是 Anthropic 最强大的模型,专为处理高度复杂任务而设计,在性能、智能、流畅性和理解力方面表现卓越。",
282
282
  "claude-opus-4-5-20251101.description": "Claude Opus 4.5 是 Anthropic 的旗舰模型,结合卓越智能与可扩展性能,适用于需要最高质量响应与推理的复杂任务。",
283
283
  "claude-sonnet-4-20250514-thinking.description": "Claude Sonnet 4 Thinking 可生成近乎即时的响应或可视化的逐步推理过程。",
284
- "claude-sonnet-4-20250514.description": "Claude Sonnet 4 可生成近乎即时的响应或可视化的逐步推理过程。",
284
+ "claude-sonnet-4-20250514.description": "Claude Sonnet 4 是 Anthropic 迄今为止最智能的模型,支持近乎即时响应或逐步深入思考,并为 API 用户提供精细化控制。",
285
285
  "claude-sonnet-4-5-20250929.description": "Claude Sonnet 4.5 是 Anthropic 迄今为止最智能的模型。",
286
286
  "codegeex-4.description": "CodeGeeX-4 是一款强大的 AI 编程助手,支持多语言问答和代码补全,提升开发者效率。",
287
287
  "codegeex4-all-9b.description": "CodeGeeX4-ALL-9B 是一款多语言代码生成模型,支持代码补全与生成、代码解释器、网页搜索、函数调用和仓库级代码问答,覆盖广泛的软件开发场景。是 100 亿参数以下的顶级代码模型。",
@@ -296,6 +296,43 @@
296
296
  "codestral-latest.description": "Codestral 是我们最先进的代码模型;v2(2025年1月)专为低延迟、高频任务(如 FIM、代码修复和测试生成)而设计。",
297
297
  "codestral.description": "Codestral 是 Mistral AI 推出的首个代码模型,具备强大的代码生成能力。",
298
298
  "codex-mini-latest.description": "codex-mini-latest 是为 Codex CLI 微调的 o4-mini 模型。若需直接通过 API 使用,建议从 gpt-4.1 开始。",
299
+ "cogito-2.1:671b.description": "Cogito v2.1 671B 是一款美国开源大语言模型,可免费商用,性能媲美顶级模型,具备更高的 Token 推理效率、128k 长上下文能力以及强大的综合能力。",
300
+ "cogview-4.description": "CogView-4 是智谱推出的首个支持中文字符生成的开源文生图模型,提升了语义理解、图像质量和中英文文本渲染能力,支持任意长度的中英文提示词,并可在指定范围内生成任意分辨率图像。",
301
+ "cohere-command-r-plus.description": "Command R+ 是一款为企业级工作负载优化的先进 RAG 模型。",
302
+ "cohere-command-r.description": "Command R 是一款可扩展的生成模型,专为 RAG 和工具使用场景设计,支持生产级 AI 应用。",
303
+ "cohere/Cohere-command-r-plus.description": "Command R+ 是一款为企业级工作负载优化的先进 RAG 模型。",
304
+ "cohere/Cohere-command-r.description": "Command R 是一款可扩展的生成模型,专为 RAG 和工具使用场景设计,支持生产级 AI 应用。",
305
+ "cohere/command-a.description": "Command A 是 Cohere 迄今为止最强大的模型,擅长工具使用、智能体、RAG 和多语言场景。支持 256K 上下文长度,仅需两块 GPU 即可运行,吞吐量比 Command R+ 08-2024 提高 150%。",
306
+ "cohere/command-r-plus.description": "Command R+ 是 Cohere 最新的大语言模型,针对聊天和长上下文任务进行了优化,旨在实现卓越性能,助力企业从原型走向生产部署。",
307
+ "cohere/command-r.description": "Command R 针对聊天和长上下文任务进行了优化,定位为“可扩展”模型,在高性能与准确性之间实现平衡,助力企业从原型走向生产部署。",
308
+ "cohere/embed-v4.0.description": "一个可将文本、图像或混合内容分类或转换为嵌入向量的模型。",
309
+ "comfyui/flux-dev.description": "FLUX.1 Dev 是一款高质量的文生图模型(10–50 步),非常适合高端创意和艺术输出。",
310
+ "comfyui/flux-kontext-dev.description": "FLUX.1 Kontext-dev 是一款图像编辑模型,支持基于文本的局部编辑和风格迁移。",
311
+ "comfyui/flux-krea-dev.description": "FLUX.1 Krea-dev 是与 Krea 联合开发的安全增强型文生图模型,内置安全过滤机制。",
312
+ "comfyui/flux-schnell.description": "FLUX.1 Schnell 是一款超高速文生图模型,可在 1-4 步内生成高质量图像,适用于实时使用和快速原型开发。",
313
+ "comfyui/stable-diffusion-15.description": "Stable Diffusion 1.5 是一款经典的 512x512 文生图模型,适合快速原型开发和创意实验。",
314
+ "comfyui/stable-diffusion-35-inclclip.description": "Stable Diffusion 3.5 内置 CLIP/T5 编码器,无需外部编码器文件,适用于如 sd3.5_medium_incl_clips 等资源占用较低的模型。",
315
+ "comfyui/stable-diffusion-35.description": "Stable Diffusion 3.5 是下一代文生图模型,提供 Large 和 Medium 两个版本。需使用外部 CLIP 编码器文件,图像质量和提示词响应能力出色。",
316
+ "comfyui/stable-diffusion-custom-refiner.description": "自定义 SDXL 图生图模型。模型文件名应为 custom_sd_lobe.safetensors;如有 VAE,请使用 custom_sd_vae_lobe.safetensors。将模型文件放入 Comfy 指定文件夹中。",
317
+ "comfyui/stable-diffusion-custom.description": "自定义 SD 文生图模型。模型文件名应为 custom_sd_lobe.safetensors;如有 VAE,请使用 custom_sd_vae_lobe.safetensors。将模型文件放入 Comfy 指定文件夹中。",
318
+ "comfyui/stable-diffusion-refiner.description": "SDXL 图生图模型,可对输入图像进行高质量转换,支持风格迁移、图像修复和创意变体生成。",
319
+ "comfyui/stable-diffusion-xl.description": "SDXL 是一款支持 1024x1024 高分辨率生成的文生图模型,图像质量和细节表现更佳。",
320
+ "command-a-03-2025.description": "Command A 是我们迄今为止最强大的模型,擅长工具使用、智能体、RAG 和多语言场景。支持 256K 上下文窗口,仅需两块 GPU 即可运行,吞吐量比 Command R+ 08-2024 提高 150%。",
321
+ "command-light-nightly.description": "为缩短主要版本之间的发布间隔,我们提供 Command 系列的每晚构建版本。command-light-nightly 是 command-light 系列中最新、最具实验性(可能不稳定)的版本,定期更新,适合测试用途,不建议用于生产环境。",
322
+ "command-light.description": "Command 的轻量快速版本,几乎同样强大但响应更快。",
323
+ "command-nightly.description": "为缩短主要版本之间的发布间隔,我们提供 Command 系列的每晚构建版本。command-nightly 是 Command 系列中最新、最具实验性(可能不稳定)的版本,定期更新,适合测试用途,不建议用于生产环境。",
324
+ "command-r-03-2024.description": "Command R 是一款遵循指令的聊天模型,质量更高、可靠性更强、上下文窗口更长,支持代码生成、RAG、工具使用和智能体等复杂工作流。",
325
+ "command-r-08-2024.description": "command-r-08-2024 是 2024 年 8 月发布的 Command R 模型更新版本。",
326
+ "command-r-plus-04-2024.description": "command-r-plus 是 command-r-plus-04-2024 的别名,API 中使用 command-r-plus 即指向该模型。",
327
+ "command-r-plus-08-2024.description": "Command R+ 是一款遵循指令的聊天模型,质量更高、可靠性更强、上下文窗口更长,特别适用于复杂的 RAG 工作流和多步骤工具使用。",
328
+ "command-r-plus.description": "Command R+ 是一款高性能大语言模型,专为真实企业场景和复杂应用设计。",
329
+ "command-r.description": "Command R 是一款针对聊天和长上下文任务优化的大语言模型,适用于动态交互和知识管理。",
330
+ "command-r7b-12-2024.description": "command-r7b-12-2024 是 2024 年 12 月发布的小型高效更新版本,擅长 RAG、工具使用和需要复杂多步骤推理的智能体任务。",
331
+ "command.description": "一款遵循指令的聊天模型,在语言任务中提供更高质量和可靠性,拥有比基础生成模型更长的上下文窗口。",
332
+ "computer-use-preview.description": "computer-use-preview 是为“计算机使用工具”专门训练的模型,能够理解并执行与计算机相关的任务。",
333
+ "dall-e-2.description": "第二代 DALL·E 模型,图像生成更真实、准确,分辨率是第一代的 4 倍。",
334
+ "dall-e-3.description": "最新的 DALL·E 模型,于 2023 年 11 月发布,图像生成更真实、准确,细节表现更强。",
335
+ "databricks/dbrx-instruct.description": "DBRX Instruct 提供跨行业高度可靠的指令处理能力。",
299
336
  "gemini-flash-latest.description": "Latest release of Gemini Flash",
300
337
  "gemini-flash-lite-latest.description": "Latest release of Gemini Flash-Lite",
301
338
  "gemini-pro-latest.description": "Latest release of Gemini Pro",
@@ -73,6 +73,8 @@
73
73
  "builtins.lobe-gtd.title": "任务工具",
74
74
  "builtins.lobe-knowledge-base.apiName.readKnowledge": "读取资源库内容",
75
75
  "builtins.lobe-knowledge-base.apiName.searchKnowledgeBase": "搜索资源库",
76
+ "builtins.lobe-knowledge-base.inspector.andMoreFiles": "还有 {{count}} 个",
77
+ "builtins.lobe-knowledge-base.inspector.noResults": "无结果",
76
78
  "builtins.lobe-knowledge-base.title": "资源库",
77
79
  "builtins.lobe-local-system.apiName.editLocalFile": "编辑文件",
78
80
  "builtins.lobe-local-system.apiName.getCommandOutput": "获取代码输出",
@@ -86,6 +88,8 @@
86
88
  "builtins.lobe-local-system.apiName.runCommand": "执行代码",
87
89
  "builtins.lobe-local-system.apiName.searchLocalFiles": "搜索文件",
88
90
  "builtins.lobe-local-system.apiName.writeLocalFile": "写入文件",
91
+ "builtins.lobe-local-system.inspector.noResults": "无结果",
92
+ "builtins.lobe-local-system.inspector.rename.result": "<old>{{oldName}}</old> → <new>{{newName}}</new>",
89
93
  "builtins.lobe-local-system.title": "本地系统",
90
94
  "builtins.lobe-page-agent.apiName.batchUpdate": "批量更新节点",
91
95
  "builtins.lobe-page-agent.apiName.compareSnapshots": "比较快照",
@@ -143,6 +147,7 @@
143
147
  "builtins.lobe-web-browsing.apiName.crawlMultiPages": "读取多个页面内容",
144
148
  "builtins.lobe-web-browsing.apiName.crawlSinglePage": "读取页面内容",
145
149
  "builtins.lobe-web-browsing.apiName.search": "搜索页面",
150
+ "builtins.lobe-web-browsing.inspector.noResults": "无结果",
146
151
  "builtins.lobe-web-browsing.title": "联网搜索",
147
152
  "confirm": "确认",
148
153
  "debug.arguments": "调用参数",
@@ -29,6 +29,7 @@
29
29
  "internlm.description": "一个专注于大模型研究与工具链的开源组织,提供高效、易用的平台,让前沿模型与算法触手可及。",
30
30
  "jina.description": "Jina AI 成立于 2020 年,是领先的搜索 AI 公司,其搜索技术栈包括向量模型、重排序器与小型语言模型,支持构建高质量的生成式与多模态搜索应用。",
31
31
  "lmstudio.description": "LM Studio 是一款桌面应用,支持在本地开发与实验大语言模型。",
32
+ "lobehub.description": "LobeHub Cloud 使用官方 API 访问 AI 模型,并通过与模型令牌相关的积分来计量使用情况。",
32
33
  "minimax.description": "MiniMax 成立于 2021 年,致力于构建通用 AI,拥有多模态基础模型,包括万亿参数的 MoE 文本模型、语音模型与视觉模型,并推出海螺 AI 等应用。",
33
34
  "mistral.description": "Mistral 提供先进的通用、专业与研究型模型,支持复杂推理、多语言任务与代码生成,具备函数调用能力以实现定制集成。",
34
35
  "modelscope.description": "ModelScope 是阿里云的模型即服务平台,提供丰富的 AI 模型与推理服务。",
@@ -247,6 +247,91 @@
247
247
  "anthropic/claude-opus-4.description": "Opus 4 是 Anthropic 為複雜任務與企業應用設計的旗艦模型。",
248
248
  "anthropic/claude-sonnet-4.5.description": "Claude Sonnet 4.5 是 Anthropic 最新的混合推理模型,針對複雜推理與程式設計進行最佳化。",
249
249
  "anthropic/claude-sonnet-4.description": "Claude Sonnet 4 是 Anthropic 的混合推理模型,具備思考與非思考能力的結合。",
250
+ "ascend-tribe/pangu-pro-moe.description": "Pangu-Pro-MoE 72B-A16B 是一個稀疏大型語言模型,總參數為 720 億,活躍參數為 160 億,採用分組專家模型(MoGE)架構。透過分組選擇專家並限制每組啟用相同數量的專家,以平衡負載並提升在昇騰平台上的部署效率。",
251
+ "aya.description": "Aya 23 是 Cohere 推出的多語言模型,支援 23 種語言,適用於多元應用場景。",
252
+ "aya:35b.description": "Aya 23 是 Cohere 推出的多語言模型,支援 23 種語言,適用於多元應用場景。",
253
+ "azure-DeepSeek-R1-0528.description": "由微軟部署的 DeepSeek R1 已升級為 DeepSeek-R1-0528。此次更新提升了運算能力與後訓練演算法優化,顯著增強推理深度與推論表現,在數學、程式碼與邏輯基準測試中表現優異,接近 O3 與 Gemini 2.5 Pro 等領先模型。",
254
+ "baichuan-m2-32b.description": "Baichuan M2 32B 是百川智能推出的 MoE 模型,具備強大的推理能力。",
255
+ "baichuan/baichuan2-13b-chat.description": "Baichuan-13B 是百川推出的開源、可商用的 130 億參數大型語言模型,在中文與英文權威基準測試中表現同級最佳。",
256
+ "baidu/ERNIE-4.5-300B-A47B.description": "ERNIE-4.5-300B-A47B 是百度推出的 MoE 架構大型語言模型,總參數為 3000 億,每個 token 啟用 470 億參數,兼顧效能與運算效率。作為 ERNIE 4.5 核心模型之一,擅長理解、生成、推理與程式設計。採用多模態異構 MoE 預訓練方法,結合文字與視覺訓練,全面提升能力,特別是在指令遵循與世界知識方面。",
257
+ "baidu/ernie-5.0-thinking-preview.description": "ERNIE 5.0 Thinking Preview 是百度新一代原生多模態 ERNIE 模型,擅長多模態理解、指令遵循、創作、事實問答與工具調用。",
258
+ "black-forest-labs/flux-1.1-pro.description": "FLUX 1.1 Pro 是升級版的 FLUX Pro,具備更快速度、優異的圖像品質與提示遵循能力。",
259
+ "black-forest-labs/flux-dev.description": "FLUX Dev 是 FLUX 的開發版本,僅供非商業用途。",
260
+ "black-forest-labs/flux-pro.description": "FLUX Pro 是專業級 FLUX 模型,專為高品質圖像輸出設計。",
261
+ "black-forest-labs/flux-schnell.description": "FLUX Schnell 是一款針對速度優化的快速圖像生成模型。",
262
+ "c4ai-aya-expanse-32b.description": "Aya Expanse 是一款高效能的 320 億參數多語言模型,透過指令微調、資料仲裁、偏好訓練與模型融合,達到媲美單語模型的表現。支援 23 種語言。",
263
+ "c4ai-aya-expanse-8b.description": "Aya Expanse 是一款高效能的 80 億參數多語言模型,透過指令微調、資料仲裁、偏好訓練與模型融合,達到媲美單語模型的表現。支援 23 種語言。",
264
+ "c4ai-aya-vision-32b.description": "Aya Vision 是一款先進的多模態模型,在語言、文字與視覺基準測試中表現優異。此 320 億參數版本專注於頂級多語言效能,支援 23 種語言。",
265
+ "c4ai-aya-vision-8b.description": "Aya Vision 是一款先進的多模態模型,在語言、文字與視覺基準測試中表現優異。此 80 億參數版本著重於低延遲與穩定效能。",
266
+ "charglm-3.description": "CharGLM-3 專為角色扮演與情感陪伴設計,支援超長多輪記憶與個性化對話。",
267
+ "charglm-4.description": "CharGLM-4 專為角色扮演與情感陪伴設計,支援超長多輪記憶與個性化對話。",
268
+ "chatgpt-4o-latest.description": "ChatGPT-4o 是一款即時更新的動態模型,結合強大的理解與生成能力,適用於客服、教育與技術支援等大規模應用場景。",
269
+ "claude-2.0.description": "Claude 2 提供企業級關鍵改進,包括領先的 20 萬 token 上下文、降低幻覺、系統提示與新測試功能:工具調用。",
270
+ "claude-2.1.description": "Claude 2 提供企業級關鍵改進,包括領先的 20 萬 token 上下文、降低幻覺、系統提示與新測試功能:工具調用。",
271
+ "claude-3-5-haiku-20241022.description": "Claude 3.5 Haiku 是 Anthropic 推出的最快速次世代模型,在多項技能上全面提升,並在多項基準測試中超越前一代旗艦 Claude 3 Opus。",
272
+ "claude-3-5-haiku-latest.description": "Claude 3.5 Haiku 提供快速回應,適用於輕量任務。",
273
+ "claude-3-7-sonnet-20250219.description": "Claude Sonnet 3.7 是 Anthropic 最智慧的模型,也是市場上首款混合推理模型,支援即時回應或延伸思考,並提供細緻控制。",
274
+ "claude-3-7-sonnet-latest.description": "Claude 3.7 Sonnet 是 Anthropic 最新且最強大的模型,適用於高度複雜任務,具備卓越的效能、智慧、流暢度與理解力。",
275
+ "claude-3-haiku-20240307.description": "Claude 3 Haiku 是 Anthropic 推出的最快速且最精簡的模型,設計用於即時回應,具備快速且準確的表現。",
276
+ "claude-3-opus-20240229.description": "Claude 3 Opus 是 Anthropic 最強大的模型,適用於高度複雜任務,具備卓越的效能、智慧、流暢度與理解力。",
277
+ "claude-3-sonnet-20240229.description": "Claude 3 Sonnet 在智慧與速度之間取得平衡,適合企業工作負載,提供高效能與低成本的大規模部署。",
278
+ "claude-haiku-4-5-20251001.description": "Claude Haiku 4.5 是 Anthropic 推出的最快速且最智慧的 Haiku 模型,具備閃電般的速度與延伸思考能力。",
279
+ "claude-opus-4-1-20250805-thinking.description": "Claude Opus 4.1 Thinking 是一個進階版本,能夠揭示其推理過程。",
280
+ "claude-opus-4-1-20250805.description": "Claude Opus 4.1 是 Anthropic 最新且最強大的模型,適用於高度複雜任務,具備卓越的效能、智慧、流暢度與理解力。",
281
+ "claude-opus-4-20250514.description": "Claude Opus 4 是 Anthropic 最強大的模型,適用於高度複雜任務,具備卓越的效能、智慧、流暢度與理解力。",
282
+ "claude-opus-4-5-20251101.description": "Claude Opus 4.5 是 Anthropic 的旗艦模型,結合卓越智慧與可擴展效能,適合需要最高品質回應與推理的複雜任務。",
283
+ "claude-sonnet-4-20250514-thinking.description": "Claude Sonnet 4 Thinking 可產生即時回應或延伸的逐步思考,並顯示其推理過程。",
284
+ "claude-sonnet-4-20250514.description": "Claude Sonnet 4 是 Anthropic 迄今最智慧的模型,提供即時回應或延伸的逐步思考,並為 API 使用者提供細緻控制。",
285
+ "claude-sonnet-4-5-20250929.description": "Claude Sonnet 4.5 是 Anthropic 迄今最智慧的模型。",
286
+ "codegeex-4.description": "CodeGeeX-4 是一款強大的 AI 程式輔助工具,支援多語言問答與程式碼補全,能有效提升開發者的生產力。",
287
+ "codegeex4-all-9b.description": "CodeGeeX4-ALL-9B 是一個多語言程式碼生成模型,支援程式碼補全與生成、程式碼解釋器、網頁搜尋、函式呼叫與倉庫層級的程式碼問答,涵蓋多種軟體開發場景。它是參數數量低於 100 億的頂尖程式碼模型之一。",
288
+ "codegemma.description": "CodeGemma 是一款輕量級模型,適用於多樣化的程式任務,能快速迭代與整合。",
289
+ "codegemma:2b.description": "CodeGemma 是一款輕量級模型,適用於多樣化的程式任務,能快速迭代與整合。",
290
+ "codellama.description": "Code Llama 是一款專注於程式碼生成與討論的大型語言模型,支援多種語言,優化開發者的工作流程。",
291
+ "codellama/CodeLlama-34b-Instruct-hf.description": "Code Llama 是一款專注於程式碼生成與討論的大型語言模型,支援多種語言,優化開發者的工作流程。",
292
+ "codellama:13b.description": "Code Llama 是一款專注於程式碼生成與討論的大型語言模型,支援多種語言,優化開發者的工作流程。",
293
+ "codellama:34b.description": "Code Llama 是一款專注於程式碼生成與討論的大型語言模型,支援多種語言,優化開發者的工作流程。",
294
+ "codellama:70b.description": "Code Llama 是一款專注於程式碼生成與討論的大型語言模型,支援多種語言,優化開發者的工作流程。",
295
+ "codeqwen.description": "CodeQwen1.5 是一款以大量程式碼資料訓練的大型語言模型,專為處理複雜的程式任務而設計。",
296
+ "codestral-latest.description": "Codestral 是我們最先進的程式模型;v2(2025 年 1 月)針對低延遲、高頻率任務如 FIM、程式碼修正與測試生成進行優化。",
297
+ "codestral.description": "Codestral 是 Mistral AI 推出的首款程式模型,具備強大的程式碼生成能力。",
298
+ "codex-mini-latest.description": "codex-mini-latest 是針對 Codex CLI 微調的 o4-mini 模型。如需直接使用 API,建議從 gpt-4.1 開始。",
299
+ "cogito-2.1:671b.description": "Cogito v2.1 671B 是一款美國開源的大型語言模型,可商業使用,效能媲美頂尖模型,具備更高的 Token 推理效率、128k 長上下文能力與整體強大表現。",
300
+ "cogview-4.description": "CogView-4 是智譜推出的首款開源文字轉圖像模型,支援中文字符生成。它提升了語意理解、圖像品質與中英文文字渲染能力,支援任意長度的雙語提示詞,並可在指定範圍內生成任意解析度的圖像。",
301
+ "cohere-command-r-plus.description": "Command R+ 是一款針對企業工作負載優化的先進 RAG 模型。",
302
+ "cohere-command-r.description": "Command R 是一款可擴展的生成模型,設計用於 RAG 與工具使用,支援生產級 AI 應用。",
303
+ "cohere/Cohere-command-r-plus.description": "Command R+ 是一款針對企業工作負載優化的先進 RAG 模型。",
304
+ "cohere/Cohere-command-r.description": "Command R 是一款可擴展的生成模型,設計用於 RAG 與工具使用,支援生產級 AI 應用。",
305
+ "cohere/command-a.description": "Command A 是 Cohere 目前最強大的模型,擅長工具使用、代理任務、RAG 與多語言應用。具備 256K 上下文長度,僅需兩張 GPU 即可運行,吞吐量比 Command R+ 08-2024 高出 150%。",
306
+ "cohere/command-r-plus.description": "Command R+ 是 Cohere 最新的大型語言模型,針對聊天與長上下文任務進行優化,幫助企業從原型邁向生產階段。",
307
+ "cohere/command-r.description": "Command R 針對聊天與長上下文任務進行優化,定位為「可擴展」模型,在高效能與準確性之間取得平衡,協助企業從原型邁向生產階段。",
308
+ "cohere/embed-v4.0.description": "一款可將文字、圖像或混合內容分類或轉換為嵌入向量的模型。",
309
+ "comfyui/flux-dev.description": "FLUX.1 Dev 是一款高品質的文字轉圖像模型(10–50 步),非常適合創意與藝術性輸出。",
310
+ "comfyui/flux-kontext-dev.description": "FLUX.1 Kontext-dev 是一款支援文字引導編輯的圖像編輯模型,包含局部編輯與風格轉換功能。",
311
+ "comfyui/flux-krea-dev.description": "FLUX.1 Krea-dev 是與 Krea 共同開發的安全增強型文字轉圖像模型,內建安全過濾機制。",
312
+ "comfyui/flux-schnell.description": "FLUX.1 Schnell 是一款超高速文字轉圖像模型,可在 1–4 步內生成高品質圖像,適合即時應用與快速原型設計。",
313
+ "comfyui/stable-diffusion-15.description": "Stable Diffusion 1.5 是經典的 512x512 文字轉圖像模型,適合快速原型設計與創意實驗。",
314
+ "comfyui/stable-diffusion-35-inclclip.description": "Stable Diffusion 3.5 內建 CLIP/T5 編碼器,無需外部編碼器檔案,適用於如 sd3.5_medium_incl_clips 等資源使用較低的模型。",
315
+ "comfyui/stable-diffusion-35.description": "Stable Diffusion 3.5 是新一代文字轉圖像模型,提供 Large 與 Medium 版本。需搭配外部 CLIP 編碼器檔案,具備優異的圖像品質與提示詞遵循度。",
316
+ "comfyui/stable-diffusion-custom-refiner.description": "自訂 SDXL 圖像轉圖像模型。請使用 custom_sd_lobe.safetensors 作為模型檔名;若有 VAE,請使用 custom_sd_vae_lobe.safetensors。將模型檔案放入 Comfy 指定資料夾中。",
317
+ "comfyui/stable-diffusion-custom.description": "自訂 SD 文字轉圖像模型。請使用 custom_sd_lobe.safetensors 作為模型檔名;若有 VAE,請使用 custom_sd_vae_lobe.safetensors。將模型檔案放入 Comfy 指定資料夾中。",
318
+ "comfyui/stable-diffusion-refiner.description": "SDXL 圖像轉圖像模型,能從輸入圖像進行高品質轉換,支援風格轉換、修復與創意變化。",
319
+ "comfyui/stable-diffusion-xl.description": "SDXL 是一款支援 1024x1024 高解析度生成的文字轉圖像模型,具備更佳的圖像品質與細節表現。",
320
+ "command-a-03-2025.description": "Command A 是我們目前最強大的模型,擅長工具使用、代理任務、RAG 與多語言場景。具備 256K 上下文視窗,僅需兩張 GPU 即可運行,吞吐量比 Command R+ 08-2024 高出 150%。",
321
+ "command-light-nightly.description": "為縮短主要版本之間的間隔,我們提供 Command 系列的夜間版本。command-light-nightly 是 command-light 系列中最新、最具實驗性(可能不穩定)的版本,會定期更新,建議僅用於測試環境。",
322
+ "command-light.description": "Command 的輕量快速版本,功能接近但速度更快。",
323
+ "command-nightly.description": "為縮短主要版本之間的間隔,我們提供 Command 系列的夜間版本。command-nightly 是 Command 系列中最新、最具實驗性(可能不穩定)的版本,會定期更新,建議僅用於測試環境。",
324
+ "command-r-03-2024.description": "Command R 是一款遵循指令的聊天模型,品質更高、穩定性更強,並具備比早期模型更長的上下文視窗。支援程式碼生成、RAG、工具使用與代理等複雜工作流程。",
325
+ "command-r-08-2024.description": "command-r-08-2024 是 2024 年 8 月發布的 Command R 模型更新版本。",
326
+ "command-r-plus-04-2024.description": "command-r-plus 是 command-r-plus-04-2024 的別名,因此在 API 中使用 command-r-plus 即指向該模型。",
327
+ "command-r-plus-08-2024.description": "Command R+ 是一款遵循指令的聊天模型,品質更高、穩定性更強,並具備比前代模型更長的上下文視窗。最適合用於複雜的 RAG 工作流程與多步驟工具使用。",
328
+ "command-r-plus.description": "Command R+ 是一款高效能的大型語言模型,專為真實企業場景與複雜應用而設計。",
329
+ "command-r.description": "Command R 是一款針對聊天與長上下文任務優化的大型語言模型,適合動態互動與知識管理。",
330
+ "command-r7b-12-2024.description": "command-r7b-12-2024 是 2024 年 12 月發布的小型高效更新版本,擅長需要複雜多步推理的 RAG、工具使用與代理任務。",
331
+ "command.description": "一款遵循指令的聊天模型,在語言任務中提供更高品質與可靠性,具備比基礎生成模型更長的上下文視窗。",
332
+ "computer-use-preview.description": "computer-use-preview 是一款專為「電腦使用工具」訓練的模型,能理解並執行與電腦相關的任務。",
333
+ "dall-e-2.description": "第二代 DALL·E 模型,具備更真實、準確的圖像生成能力,解析度為第一代的四倍。",
334
+ "dall-e-3.description": "最新的 DALL·E 模型於 2023 年 11 月發布,支援更真實、準確的圖像生成,細節表現更強。",
250
335
  "gemini-flash-latest.description": "Gemini Flash 最新版本",
251
336
  "gemini-flash-lite-latest.description": "Gemini Flash-Lite 最新版本",
252
337
  "gemini-pro-latest.description": "Gemini Pro 最新版本",
@@ -73,6 +73,8 @@
73
73
  "builtins.lobe-gtd.title": "GTD 工具",
74
74
  "builtins.lobe-knowledge-base.apiName.readKnowledge": "讀取資源庫內容",
75
75
  "builtins.lobe-knowledge-base.apiName.searchKnowledgeBase": "搜尋資源庫",
76
+ "builtins.lobe-knowledge-base.inspector.andMoreFiles": "還有 {{count}} 個檔案",
77
+ "builtins.lobe-knowledge-base.inspector.noResults": "沒有結果",
76
78
  "builtins.lobe-knowledge-base.title": "資源庫",
77
79
  "builtins.lobe-local-system.apiName.editLocalFile": "編輯檔案",
78
80
  "builtins.lobe-local-system.apiName.getCommandOutput": "取得程式碼輸出",
@@ -86,6 +88,8 @@
86
88
  "builtins.lobe-local-system.apiName.runCommand": "執行程式碼",
87
89
  "builtins.lobe-local-system.apiName.searchLocalFiles": "搜尋檔案",
88
90
  "builtins.lobe-local-system.apiName.writeLocalFile": "寫入檔案",
91
+ "builtins.lobe-local-system.inspector.noResults": "沒有結果",
92
+ "builtins.lobe-local-system.inspector.rename.result": "<old>{{oldName}}</old> → <new>{{newName}}</new>",
89
93
  "builtins.lobe-local-system.title": "本機系統",
90
94
  "builtins.lobe-page-agent.apiName.batchUpdate": "批次更新節點",
91
95
  "builtins.lobe-page-agent.apiName.compareSnapshots": "比較快照",
@@ -143,6 +147,7 @@
143
147
  "builtins.lobe-web-browsing.apiName.crawlMultiPages": "讀取多個頁面內容",
144
148
  "builtins.lobe-web-browsing.apiName.crawlSinglePage": "讀取頁面內容",
145
149
  "builtins.lobe-web-browsing.apiName.search": "搜尋頁面",
150
+ "builtins.lobe-web-browsing.inspector.noResults": "沒有結果",
146
151
  "builtins.lobe-web-browsing.title": "網路搜尋",
147
152
  "confirm": "確定",
148
153
  "debug.arguments": "調用參數",
@@ -29,6 +29,7 @@
29
29
  "internlm.description": "一個專注於大型模型研究與工具的開源組織,提供高效、易用的平台,讓尖端模型與演算法更易於取得。",
30
30
  "jina.description": "Jina AI 成立於 2020 年,是領先的搜尋 AI 公司。其搜尋技術堆疊包含向量模型、重排序器與小型語言模型,打造可靠且高品質的生成式與多模態搜尋應用。",
31
31
  "lmstudio.description": "LM Studio 是一款桌面應用程式,可在本機開發與實驗大型語言模型。",
32
+ "lobehub.description": "LobeHub Cloud 使用官方 API 存取 AI 模型,並以與模型代幣相關的點數(Credits)來計算使用量。",
32
33
  "minimax.description": "MiniMax 成立於 2021 年,致力於打造通用 AI,擁有多模態基礎模型,包括兆級參數的 MoE 文本模型、語音模型與視覺模型,並推出如海螺 AI 等應用。",
33
34
  "mistral.description": "Mistral 提供先進的通用、專業與研究模型,支援複雜推理、多語言任務與程式碼生成,並支援函式呼叫以實現自訂整合。",
34
35
  "modelscope.description": "ModelScope 是阿里雲的模型即服務平台,提供多樣化的 AI 模型與推理服務。",
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@lobehub/lobehub",
3
- "version": "2.0.0-next.186",
3
+ "version": "2.0.0-next.188",
4
4
  "description": "LobeHub - an open-source,comprehensive AI Agent framework that supports speech synthesis, multimodal, and extensible Function Call plugin system. Supports one-click free deployment of your private ChatGPT/LLM web application.",
5
5
  "keywords": [
6
6
  "framework",
@@ -82,36 +82,41 @@ export const GTDManifest: BuiltinToolManifest = {
82
82
  },
83
83
  },
84
84
  {
85
- description:
86
- 'Update todo items with batch operations. Each operation specifies a type (add, update, remove, complete) and the relevant data.',
85
+ description: `Update todo items with batch operations. Each operation type requires specific fields:
86
+ - "add": requires "text" (the todo text to add)
87
+ - "update": requires "index", optional "newText" and/or "completed"
88
+ - "remove": requires "index" only
89
+ - "complete": requires "index" only (marks item as completed)`,
87
90
  name: GTDApiName.updateTodos,
88
91
  renderDisplayControl: 'expand',
89
92
  parameters: {
90
93
  properties: {
91
94
  operations: {
92
- description: 'Array of update operations to apply.',
95
+ description:
96
+ 'Array of update operations. IMPORTANT: For "complete" and "remove" operations, only pass "type" and "index" - no other fields needed.',
93
97
  items: {
94
98
  properties: {
95
99
  type: {
96
- description: 'Operation type: add, update, remove, or complete.',
100
+ description:
101
+ 'Operation type. "add" needs text, "update" needs index + optional newText/completed, "remove" and "complete" need index only.',
97
102
  enum: ['add', 'update', 'remove', 'complete'],
98
103
  type: 'string',
99
104
  },
100
105
  text: {
101
- description: 'For "add": the text to add.',
106
+ description: 'Required for "add" only: the text to add.',
102
107
  type: 'string',
103
108
  },
104
109
  index: {
105
110
  description:
106
- 'For "update", "remove", "complete": the index of the item (0-based).',
111
+ 'Required for "update", "remove", "complete": the item index (0-based).',
107
112
  type: 'number',
108
113
  },
109
114
  newText: {
110
- description: 'For "update": the new text.',
115
+ description: 'Optional for "update" only: the new text.',
111
116
  type: 'string',
112
117
  },
113
118
  completed: {
114
- description: 'For "update": the new completed status.',
119
+ description: 'Optional for "update" only: set completed status.',
115
120
  type: 'boolean',
116
121
  },
117
122
  },
@@ -22,35 +22,48 @@ export const systemPrompt = `You have GTD (Getting Things Done) tools to help ma
22
22
  </tool_overview>
23
23
 
24
24
  <default_workflow>
25
- **IMPORTANT: Always create a Plan first, then Todos.**
26
- When a user asks you to help with a task, goal, or project:
25
+ **CRITICAL: Most tasks do NOT need GTD tools. Only use them for complex, multi-step projects.**
26
+
27
+ **DO NOT use GTD tools for:**
28
+ - Simple one-step tasks (rename a file, send a message, search something)
29
+ - Quick questions or lookups
30
+ - Tasks that can be completed immediately with a single action
31
+ - Any request that doesn't require tracking progress over time
32
+
33
+ **ONLY use GTD tools when ALL of these are true:**
34
+ 1. The task has multiple distinct steps that need tracking
35
+ 2. The user explicitly wants to plan or organize something
36
+ 3. Progress needs to be tracked over time (not completed in one response)
37
+
38
+ **When GTD tools ARE appropriate:**
27
39
  1. **First**, use \`createPlan\` to document the goal and relevant context
28
40
  2. **Then**, use \`createTodos\` to break down the plan into actionable steps
29
41
 
30
- This "Plan-First" approach ensures:
31
- - Clear documentation of the objective before execution
32
- - Better organized and contextual todo items
33
- - Trackable progress from goal to completion
34
-
35
- **Exception**: Only skip the plan and create todos directly when the user explicitly says:
36
- - "Just give me a todo list"
37
- - "I only need action items"
38
- - "Skip the plan, just todos"
39
- - Or similar explicit requests for todos only
42
+ **Examples:**
43
+ - "Rename this file" Just do it, no GTD needed
44
+ - "What's the weather?" Just answer, no GTD needed
45
+ - "Help me write an email" → Just write it, no GTD needed
46
+ - ✅ "Help me plan a trip to Japan" → Use createPlan + createTodos
47
+ - "I want to learn Python, create a study plan" Use createPlan + createTodos
48
+ - "Help me organize my project tasks" → Use createTodos (user explicitly wants organization)
40
49
  </default_workflow>
41
50
 
42
51
  <when_to_use>
43
52
  **Use Plans when:**
44
- - User states a goal, project, or objective
45
- - There's context, constraints, or background to capture
46
- - The task requires strategic thinking before execution
47
- - You need to document the "why" behind the work
53
+ - User explicitly asks to "plan", "organize", or "break down" a complex goal
54
+ - The project spans multiple sessions or days
55
+ - There's significant context, constraints, or background worth documenting
56
+ - The task has 5+ distinct steps that benefit from strategic organization
48
57
 
49
58
  **Use Todos when:**
50
59
  - Breaking down a plan into actionable steps (after creating a plan)
51
- - User explicitly requests only action items
52
- - Capturing quick, simple tasks that don't need planning
53
- - Tracking progress on concrete deliverables
60
+ - User explicitly requests a checklist or task list
61
+ - Tracking progress on a multi-step project
62
+
63
+ **DO NOT use Plans/Todos when:**
64
+ - The task can be done in one action (rename, delete, send, search, etc.)
65
+ - The user just wants something done, not organized
66
+ - The task will be completed in this single conversation
54
67
 
55
68
  **Use Async Tasks when:**
56
69
  - **The request requires gathering external information**: User wants you to research, investigate, or find information that you don't already know. This requires web searches, reading multiple sources, and synthesizing information.
@@ -82,6 +95,28 @@ Use \`execTask\` for a single task, \`execTasks\` for multiple parallel tasks.
82
95
  - **Track progress**: Use todo completion to measure plan progress
83
96
  </best_practices>
84
97
 
98
+ <updateTodos_usage>
99
+ When using \`updateTodos\`, each operation type requires specific fields:
100
+
101
+ **Minimal required fields per operation type:**
102
+ - \`{ "type": "add", "text": "todo text" }\` - only type + text
103
+ - \`{ "type": "complete", "index": 0 }\` - only type + index
104
+ - \`{ "type": "remove", "index": 0 }\` - only type + index
105
+ - \`{ "type": "update", "index": 0, "newText": "..." }\` - type + index + optional newText/completed
106
+
107
+ **Example - mark items 0 and 1 as complete:**
108
+ \`\`\`json
109
+ {
110
+ "operations": [
111
+ { "type": "complete", "index": 0 },
112
+ { "type": "complete", "index": 1 }
113
+ ]
114
+ }
115
+ \`\`\`
116
+
117
+ **DO NOT** add extra fields like \`"completed": true\` for complete operations - they are ignored.
118
+ </updateTodos_usage>
119
+
85
120
  <todo_granularity>
86
121
  **IMPORTANT: Keep todos focused on major stages, not detailed sub-tasks.**
87
122
 
@@ -5,6 +5,7 @@
5
5
  "exports": {
6
6
  ".": "./src/index.ts",
7
7
  "./client": "./src/client/index.ts",
8
+ "./executor": "./src/executor/index.ts",
8
9
  "./executionRuntime": "./src/ExecutionRuntime/index.ts"
9
10
  },
10
11
  "main": "./src/index.ts",
@@ -0,0 +1,97 @@
1
+ 'use client';
2
+
3
+ import { type BuiltinInspectorProps } from '@lobechat/types';
4
+ import { createStaticStyles, cx } from 'antd-style';
5
+ import { memo } from 'react';
6
+ import { useTranslation } from 'react-i18next';
7
+
8
+ import { highlightTextStyles, shinyTextStyles } from '@/styles';
9
+
10
+ import { type ReadKnowledgeArgs, type ReadKnowledgeState } from '../../..';
11
+
12
+ const styles = createStaticStyles(({ css, cssVar }) => ({
13
+ moreFiles: css`
14
+ margin-inline-start: 4px;
15
+ color: ${cssVar.colorTextTertiary};
16
+ `,
17
+ root: css`
18
+ overflow: hidden;
19
+ display: -webkit-box;
20
+ -webkit-box-orient: vertical;
21
+ -webkit-line-clamp: 1;
22
+
23
+ color: ${cssVar.colorTextSecondary};
24
+ `,
25
+ statusIcon: css`
26
+ margin-block-end: -2px;
27
+ margin-inline-start: 4px;
28
+ `,
29
+ }));
30
+
31
+ export const ReadKnowledgeInspector = memo<
32
+ BuiltinInspectorProps<ReadKnowledgeArgs, ReadKnowledgeState>
33
+ >(({ args, partialArgs, isArgumentsStreaming, isLoading, pluginState }) => {
34
+ const { t } = useTranslation('plugin');
35
+
36
+ const fileIds = args?.fileIds || partialArgs?.fileIds || [];
37
+ const fileCount = fileIds.length;
38
+ const files = pluginState?.files || [];
39
+ const firstFilename = files[0]?.filename;
40
+ const remainingCount = files.length - 1;
41
+
42
+ // During argument streaming - show file count since we don't have filenames yet
43
+ if (isArgumentsStreaming) {
44
+ if (fileCount === 0)
45
+ return (
46
+ <div className={cx(styles.root, shinyTextStyles.shinyText)}>
47
+ <span>{t('builtins.lobe-knowledge-base.apiName.readKnowledge')}</span>
48
+ </div>
49
+ );
50
+
51
+ return (
52
+ <div className={cx(styles.root, shinyTextStyles.shinyText)}>
53
+ <span>{t('builtins.lobe-knowledge-base.apiName.readKnowledge')}: </span>
54
+ <span className={highlightTextStyles.gold}>
55
+ {fileCount} {fileCount === 1 ? 'file' : 'files'}
56
+ </span>
57
+ </div>
58
+ );
59
+ }
60
+
61
+ // After loading - show filename(s)
62
+ const renderFileInfo = () => {
63
+ // If we have filenames from pluginState, show them
64
+ if (firstFilename) {
65
+ return (
66
+ <>
67
+ <span className={highlightTextStyles.gold}>{firstFilename}</span>
68
+ {remainingCount > 0 && (
69
+ <span className={styles.moreFiles}>
70
+ {t('builtins.lobe-knowledge-base.inspector.andMoreFiles', { count: remainingCount })}
71
+ </span>
72
+ )}
73
+ </>
74
+ );
75
+ }
76
+ // Fallback to file count if no filenames available yet
77
+ if (fileCount > 0) {
78
+ return (
79
+ <span className={highlightTextStyles.gold}>
80
+ {fileCount} {fileCount === 1 ? 'file' : 'files'}
81
+ </span>
82
+ );
83
+ }
84
+ return null;
85
+ };
86
+
87
+ return (
88
+ <div className={cx(styles.root, isLoading && shinyTextStyles.shinyText)}>
89
+ <span style={{ marginInlineStart: 2 }}>
90
+ <span>{t('builtins.lobe-knowledge-base.apiName.readKnowledge')}: </span>
91
+ {renderFileInfo()}
92
+ </span>
93
+ </div>
94
+ );
95
+ });
96
+
97
+ ReadKnowledgeInspector.displayName = 'ReadKnowledgeInspector';