@lobehub/chat 1.63.2 → 1.64.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (151) hide show
  1. package/CHANGELOG.md +50 -0
  2. package/changelog/v1.json +18 -0
  3. package/locales/ar/models.json +25 -16
  4. package/locales/ar/plugin.json +16 -0
  5. package/locales/ar/portal.json +0 -5
  6. package/locales/ar/tool.json +18 -0
  7. package/locales/bg-BG/models.json +25 -16
  8. package/locales/bg-BG/plugin.json +16 -0
  9. package/locales/bg-BG/portal.json +0 -5
  10. package/locales/bg-BG/tool.json +18 -0
  11. package/locales/de-DE/models.json +25 -16
  12. package/locales/de-DE/plugin.json +16 -0
  13. package/locales/de-DE/portal.json +0 -5
  14. package/locales/de-DE/tool.json +18 -0
  15. package/locales/en-US/models.json +24 -15
  16. package/locales/en-US/plugin.json +16 -0
  17. package/locales/en-US/portal.json +0 -5
  18. package/locales/en-US/tool.json +18 -0
  19. package/locales/es-ES/models.json +25 -16
  20. package/locales/es-ES/plugin.json +16 -0
  21. package/locales/es-ES/portal.json +0 -5
  22. package/locales/es-ES/tool.json +18 -0
  23. package/locales/fa-IR/models.json +25 -16
  24. package/locales/fa-IR/plugin.json +16 -0
  25. package/locales/fa-IR/portal.json +0 -5
  26. package/locales/fa-IR/tool.json +18 -0
  27. package/locales/fr-FR/models.json +25 -16
  28. package/locales/fr-FR/plugin.json +16 -0
  29. package/locales/fr-FR/portal.json +0 -5
  30. package/locales/fr-FR/tool.json +18 -0
  31. package/locales/it-IT/models.json +25 -16
  32. package/locales/it-IT/plugin.json +16 -0
  33. package/locales/it-IT/portal.json +0 -5
  34. package/locales/it-IT/tool.json +18 -0
  35. package/locales/ja-JP/models.json +24 -15
  36. package/locales/ja-JP/plugin.json +16 -0
  37. package/locales/ja-JP/portal.json +0 -5
  38. package/locales/ja-JP/tool.json +18 -0
  39. package/locales/ko-KR/models.json +25 -16
  40. package/locales/ko-KR/plugin.json +16 -0
  41. package/locales/ko-KR/portal.json +0 -5
  42. package/locales/ko-KR/tool.json +18 -0
  43. package/locales/nl-NL/models.json +25 -16
  44. package/locales/nl-NL/plugin.json +16 -0
  45. package/locales/nl-NL/portal.json +0 -5
  46. package/locales/nl-NL/tool.json +18 -0
  47. package/locales/pl-PL/models.json +25 -16
  48. package/locales/pl-PL/plugin.json +16 -0
  49. package/locales/pl-PL/portal.json +0 -5
  50. package/locales/pl-PL/tool.json +18 -0
  51. package/locales/pt-BR/models.json +24 -15
  52. package/locales/pt-BR/plugin.json +16 -0
  53. package/locales/pt-BR/portal.json +0 -5
  54. package/locales/pt-BR/tool.json +18 -0
  55. package/locales/ru-RU/models.json +25 -16
  56. package/locales/ru-RU/plugin.json +16 -0
  57. package/locales/ru-RU/portal.json +0 -5
  58. package/locales/ru-RU/tool.json +18 -0
  59. package/locales/tr-TR/models.json +25 -16
  60. package/locales/tr-TR/plugin.json +16 -0
  61. package/locales/tr-TR/portal.json +0 -5
  62. package/locales/tr-TR/tool.json +18 -0
  63. package/locales/vi-VN/models.json +24 -15
  64. package/locales/vi-VN/plugin.json +16 -0
  65. package/locales/vi-VN/portal.json +0 -5
  66. package/locales/vi-VN/tool.json +18 -0
  67. package/locales/zh-CN/models.json +30 -21
  68. package/locales/zh-CN/plugin.json +16 -0
  69. package/locales/zh-CN/portal.json +1 -6
  70. package/locales/zh-CN/tool.json +19 -1
  71. package/locales/zh-TW/models.json +23 -14
  72. package/locales/zh-TW/plugin.json +16 -0
  73. package/locales/zh-TW/portal.json +0 -5
  74. package/locales/zh-TW/tool.json +18 -0
  75. package/package.json +1 -1
  76. package/src/app/[variants]/(main)/chat/(workspace)/@conversation/features/ChatInput/Mobile/index.tsx +1 -0
  77. package/src/app/[variants]/(main)/chat/(workspace)/_layout/Desktop/ChatHeader/SearchTags.tsx +17 -0
  78. package/src/app/[variants]/(main)/chat/(workspace)/_layout/Desktop/ChatHeader/Tags.tsx +8 -2
  79. package/src/config/tools.ts +16 -0
  80. package/src/database/repositories/aiInfra/index.test.ts +29 -0
  81. package/src/features/ChatInput/ActionBar/Search/index.tsx +6 -15
  82. package/src/features/Conversation/Messages/Assistant/Tool/Inspector/ToolTitle.tsx +76 -0
  83. package/src/features/Conversation/Messages/Assistant/Tool/Inspector/index.tsx +8 -21
  84. package/src/features/Conversation/Messages/Assistant/Tool/Render/CustomRender.tsx +62 -50
  85. package/src/features/PluginsUI/Render/BuiltinType/index.tsx +11 -1
  86. package/src/features/PluginsUI/Render/index.tsx +3 -0
  87. package/src/features/Portal/Plugins/Body/index.tsx +3 -7
  88. package/src/features/Portal/Plugins/Header.tsx +14 -2
  89. package/src/hooks/useAgentEnableSearch.ts +27 -0
  90. package/src/libs/agent-runtime/perplexity/index.test.ts +26 -0
  91. package/src/libs/agent-runtime/utils/streams/openai.ts +1 -1
  92. package/src/libs/trpc/client/index.ts +1 -0
  93. package/src/libs/trpc/client/tools.ts +20 -0
  94. package/src/locales/default/plugin.ts +16 -0
  95. package/src/locales/default/portal.ts +0 -5
  96. package/src/locales/default/tool.ts +18 -0
  97. package/src/server/modules/SearXNG.ts +33 -0
  98. package/src/server/routers/lambda/message.ts +11 -0
  99. package/src/server/routers/tools/__tests__/fixtures/searXNG.ts +668 -0
  100. package/src/server/routers/tools/__tests__/search.test.ts +47 -0
  101. package/src/server/routers/tools/index.ts +3 -0
  102. package/src/server/routers/tools/search.ts +38 -0
  103. package/src/services/__tests__/__snapshots__/chat.test.ts.snap +1 -0
  104. package/src/services/_auth.ts +4 -4
  105. package/src/services/chat.ts +31 -10
  106. package/src/services/message/_deprecated.ts +4 -0
  107. package/src/services/message/client.ts +4 -0
  108. package/src/services/message/server.ts +5 -5
  109. package/src/services/message/type.ts +2 -0
  110. package/src/services/search.ts +9 -0
  111. package/src/store/aiInfra/slices/aiModel/selectors.ts +12 -5
  112. package/src/store/chat/slices/builtinTool/action.ts +121 -0
  113. package/src/store/chat/slices/builtinTool/initialState.ts +2 -0
  114. package/src/store/chat/slices/builtinTool/selectors.ts +3 -0
  115. package/src/store/chat/slices/message/action.ts +11 -0
  116. package/src/store/chat/slices/plugin/action.test.ts +2 -2
  117. package/src/store/chat/slices/plugin/action.ts +2 -2
  118. package/src/store/tool/selectors/tool.ts +5 -12
  119. package/src/store/tool/slices/builtin/selectors.ts +1 -1
  120. package/src/store/user/slices/modelList/action.ts +6 -0
  121. package/src/store/user/slices/modelList/selectors/keyVaults.ts +1 -0
  122. package/src/tools/index.ts +7 -0
  123. package/src/tools/portals.ts +6 -1
  124. package/src/tools/renders.ts +3 -0
  125. package/src/{features/Portal/Plugins → tools/web-browsing/Portal}/Footer.tsx +13 -10
  126. package/src/tools/web-browsing/Portal/ResultList/SearchItem/CategoryAvatar.tsx +70 -0
  127. package/src/tools/web-browsing/Portal/ResultList/SearchItem/TitleExtra.tsx +38 -0
  128. package/src/tools/web-browsing/Portal/ResultList/SearchItem/Video.tsx +135 -0
  129. package/src/tools/web-browsing/Portal/ResultList/SearchItem/index.tsx +91 -0
  130. package/src/tools/web-browsing/Portal/ResultList/index.tsx +21 -0
  131. package/src/tools/web-browsing/Portal/index.tsx +65 -0
  132. package/src/tools/web-browsing/Render/ConfigForm/Form.tsx +110 -0
  133. package/src/tools/web-browsing/Render/ConfigForm/SearchXNGIcon.tsx +20 -0
  134. package/src/tools/web-browsing/Render/ConfigForm/index.tsx +67 -0
  135. package/src/tools/web-browsing/Render/ConfigForm/style.tsx +63 -0
  136. package/src/tools/web-browsing/Render/SearchQuery/SearchView.tsx +88 -0
  137. package/src/tools/web-browsing/Render/SearchQuery/index.tsx +61 -0
  138. package/src/tools/web-browsing/Render/SearchResult/SearchResultItem.tsx +72 -0
  139. package/src/tools/web-browsing/Render/SearchResult/ShowMore.tsx +68 -0
  140. package/src/tools/web-browsing/Render/SearchResult/index.tsx +105 -0
  141. package/src/tools/web-browsing/Render/index.tsx +57 -0
  142. package/src/tools/web-browsing/components/EngineAvatar.tsx +32 -0
  143. package/src/tools/web-browsing/components/SearchBar.tsx +134 -0
  144. package/src/tools/web-browsing/const.ts +11 -0
  145. package/src/tools/web-browsing/index.ts +102 -0
  146. package/src/types/message/chat.ts +1 -0
  147. package/src/types/message/tools.ts +10 -0
  148. package/src/types/tool/builtin.ts +2 -0
  149. package/src/types/tool/search.ts +38 -0
  150. package/src/types/user/settings/keyVaults.ts +8 -1
  151. package/src/utils/toolManifest.ts +20 -0
@@ -170,9 +170,6 @@
170
170
  "MiniMax-Text-01": {
171
171
  "description": "在 MiniMax-01系列模型中,我们做了大胆创新:首次大规模实现线性注意力机制,传统 Transformer架构不再是唯一的选择。这个模型的参数量高达4560亿,其中单次激活459亿。模型综合性能比肩海外顶尖模型,同时能够高效处理全球最长400万token的上下文,是GPT-4o的32倍,Claude-3.5-Sonnet的20倍。"
172
172
  },
173
- "Nous-Hermes-2-Mixtral-8x7B-DPO": {
174
- "description": "Hermes 2 Mixtral 8x7B DPO 是一款高度灵活的多模型合并,旨在提供卓越的创造性体验。"
175
- },
176
173
  "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO": {
177
174
  "description": "Nous Hermes 2 - Mixtral 8x7B-DPO (46.7B) 是高精度的指令模型,适用于复杂计算。"
178
175
  },
@@ -321,20 +318,29 @@
321
318
  "description": "基础版本模型 (V4),32K上下文长度,灵活应用于各类场景"
322
319
  },
323
320
  "SenseChat-5": {
324
- "description": "最新版本模型 (V5.5),128K上下文长度,在数学推理、英文对话、指令跟随以及长文本理解等领域能力显著提升,比肩GPT-4o"
321
+ "description": "最新版本模型 (V5.5),128K上下文长度,在数学推理、英文对话、指令跟随以及长文本理解等领域能力显著提升,比肩GPT-4o"
322
+ },
323
+ "SenseChat-5-1202": {
324
+ "description": "是基于V5.5的最新版本,较上版本在中英文基础能力,聊天,理科知识, 文科知识,写作,数理逻辑,字数控制 等几个维度的表现有显著提升。"
325
325
  },
326
326
  "SenseChat-5-Cantonese": {
327
- "description": "32K上下文长度,在粤语的对话理解上超越了GPT-4,在知识、推理、数学及代码编写等多个领域均能与GPT-4 Turbo相媲美"
327
+ "description": "专门为适应香港地区的对话习惯、俚语及本地知识而设计,在粤语的对话理解上超越了GPT-4,在知识、推理、数学及代码编写等多个领域均能与GPT-4 Turbo相媲美。"
328
328
  },
329
329
  "SenseChat-Character": {
330
- "description": "标准版模型,8K上下文长度,高响应速度"
330
+ "description": "拟人对话标准版模型,8K上下文长度,高响应速度"
331
331
  },
332
332
  "SenseChat-Character-Pro": {
333
- "description": "高级版模型,32K上下文长度,能力全面提升,支持中/英文对话"
333
+ "description": "拟人对话高级版模型,32K上下文长度,能力全面提升,支持中/英文对话"
334
334
  },
335
335
  "SenseChat-Turbo": {
336
336
  "description": "适用于快速问答、模型微调场景"
337
337
  },
338
+ "SenseChat-Turbo-1202": {
339
+ "description": "是最新的轻量版本模型,达到全量模型90%以上能力,显著降低推理成本。"
340
+ },
341
+ "SenseChat-Vision": {
342
+ "description": "最新版本模型 (V5.5),支持多图的输入,全面实现模型基础能力优化,在对象属性识别、空间关系、动作事件识别、场景理解、情感识别、逻辑常识推理和文本理解生成上都实现了较大提升。"
343
+ },
338
344
  "Skylark2-lite-8k": {
339
345
  "description": "云雀(Skylark)第二代模型,Skylark2-lite模型有较高的响应速度,适用于实时性要求高、成本敏感、对模型精度要求不高的场景,上下文窗口长度为8k。"
340
346
  },
@@ -561,7 +567,7 @@
561
567
  "description": "Codestral 是 Mistral AI 的首款代码模型,为代码生成任务提供优异支持。"
562
568
  },
563
569
  "codestral-latest": {
564
- "description": "Codestral是专注于代码生成的尖端生成模型,优化了中间填充和代码补全任务。"
570
+ "description": "Codestral 是我们最先进的编码语言模型,第二个版本于2025年1月发布,专门从事低延迟、高频任务如中间填充(RST)、代码纠正和测试生成。"
565
571
  },
566
572
  "cognitivecomputations/dolphin-mixtral-8x22b": {
567
573
  "description": "Dolphin Mixtral 8x22B 是一款为指令遵循、对话和编程设计的模型。"
@@ -1019,6 +1025,12 @@
1019
1025
  "hunyuan-standard-vision": {
1020
1026
  "description": "混元最新多模态模型,支持多语种作答,中英文能力均衡。"
1021
1027
  },
1028
+ "hunyuan-translation": {
1029
+ "description": "支持中文和英语、日语、法语、葡萄牙语、西班牙语、土耳其语、俄语、阿拉伯语、韩语、意大利语、德语、越南语、马来语、印尼语15种语言互译,基于多场景翻译评测集自动化评估COMET评分,在十余种常用语种中外互译能力上整体优于市场同规模模型。"
1030
+ },
1031
+ "hunyuan-translation-lite": {
1032
+ "description": "混元翻译模型支持自然语言对话式翻译;支持中文和英语、日语、法语、葡萄牙语、西班牙语、土耳其语、俄语、阿拉伯语、韩语、意大利语、德语、越南语、马来语、印尼语15种语言互译。"
1033
+ },
1022
1034
  "hunyuan-turbo": {
1023
1035
  "description": "本版本优化:数据指令scaling,大幅提升模型通用泛化能力;大幅提升数学、代码、逻辑推理能力;优化文本理解字词理解相关能力;优化文本创作内容生成质量"
1024
1036
  },
@@ -1047,10 +1059,10 @@
1047
1059
  "description": "InternLM2 版本最大的模型,专注于高度复杂的任务"
1048
1060
  },
1049
1061
  "internlm2.5-latest": {
1050
- "description": "我们仍在维护的老版本模型,经过多轮迭代有着极其优异且稳定的性能,包含 7B、20B 多种模型参数量可选,支持 1M 的上下文长度以及更强的指令跟随和工具调用能力。默认指向我们最新发布的 InternLM2.5 系列模型"
1062
+ "description": "我们仍在维护的老版本模型,经过多轮迭代有着极其优异且稳定的性能,包含 7B、20B 多种模型参数量可选,支持 1M 的上下文长度以及更强的指令跟随和工具调用能力。默认指向我们最新发布的 InternLM2.5 系列模型,当前指向 internlm2.5-20b-chat。"
1051
1063
  },
1052
1064
  "internlm3-latest": {
1053
- "description": "我们最新的模型系列,有着卓越的推理性能,领跑同量级开源模型。默认指向我们最新发布的 InternLM3 系列模型"
1065
+ "description": "我们最新的模型系列,有着卓越的推理性能,领跑同量级开源模型。默认指向我们最新发布的 InternLM3 系列模型,当前指向 internlm3-8b-instruct。"
1054
1066
  },
1055
1067
  "jina-deepsearch-v1": {
1056
1068
  "description": "深度搜索结合了网络搜索、阅读和推理,可进行全面调查。您可以将其视为一个代理,接受您的研究任务 - 它会进行广泛搜索并经过多次迭代,然后才能给出答案。这个过程涉及持续的研究、推理和从各个角度解决问题。这与直接从预训练数据生成答案的标准大模型以及依赖一次性表面搜索的传统 RAG 系统有着根本的不同。"
@@ -1220,9 +1232,6 @@
1220
1232
  "meta-llama/llama-3-8b-instruct": {
1221
1233
  "description": "Llama 3 8B Instruct 优化了高质量对话场景,性能优于许多闭源模型。"
1222
1234
  },
1223
- "meta-llama/llama-3.1-405b-instruct": {
1224
- "description": "Llama 3.1 405B Instruct 是 Meta最新推出的版本,优化用于生成高质量对话,超越了许多领导闭源模型。"
1225
- },
1226
1235
  "meta-llama/llama-3.1-70b-instruct": {
1227
1236
  "description": "Llama 3.1 70B Instruct 专为高质量对话而设计,在人类评估中表现突出,特别适合高交互场景。"
1228
1237
  },
@@ -1286,9 +1295,6 @@
1286
1295
  "microsoft/WizardLM-2-8x22B": {
1287
1296
  "description": "WizardLM 2 是微软AI提供的语言模型,在复杂对话、多语言、推理和智能助手领域表现尤为出色。"
1288
1297
  },
1289
- "microsoft/wizardlm 2-7b": {
1290
- "description": "WizardLM 2 7B 是微软AI最新的快速轻量化模型,性能接近于现有开源领导模型的10倍。"
1291
- },
1292
1298
  "microsoft/wizardlm-2-8x22b": {
1293
1299
  "description": "WizardLM-2 8x22B 是微软AI最先进的Wizard模型,显示出极其竞争力的表现。"
1294
1300
  },
@@ -1580,10 +1586,10 @@
1580
1586
  "qwq-32b-preview": {
1581
1587
  "description": "QwQ模型是由 Qwen 团队开发的实验性研究模型,专注于增强 AI 推理能力。"
1582
1588
  },
1583
- "solar-1-mini-chat": {
1589
+ "solar-mini": {
1584
1590
  "description": "Solar Mini 是一种紧凑型 LLM,性能优于 GPT-3.5,具备强大的多语言能力,支持英语和韩语,提供高效小巧的解决方案。"
1585
1591
  },
1586
- "solar-1-mini-chat-ja": {
1592
+ "solar-mini-ja": {
1587
1593
  "description": "Solar Mini (Ja) 扩展了 Solar Mini 的能力,专注于日语,同时在英语和韩语的使用中保持高效和卓越性能。"
1588
1594
  },
1589
1595
  "solar-pro": {
@@ -1619,6 +1625,9 @@
1619
1625
  "step-1.5v-mini": {
1620
1626
  "description": "该模型拥有强大的视频理解能力。"
1621
1627
  },
1628
+ "step-1o-turbo-vision": {
1629
+ "description": "该模型拥有强大的图像理解能力,在数理、代码领域强于1o。模型比1o更小,输出速度更快。"
1630
+ },
1622
1631
  "step-1o-vision-32k": {
1623
1632
  "description": "该模型拥有强大的图像理解能力。相比于 step-1v 系列模型,拥有更强的视觉性能。"
1624
1633
  },
@@ -1634,12 +1643,12 @@
1634
1643
  "step-2-mini": {
1635
1644
  "description": "基于新一代自研Attention架构MFA的极速大模型,用极低成本达到和step1类似的效果,同时保持了更高的吞吐和更快响应时延。能够处理通用任务,在代码能力上具备特长。"
1636
1645
  },
1637
- "taichu2_mm": {
1638
- "description": "融合了图像理解、知识迁移、逻辑归因等能力,在图文问答领域表现突出"
1639
- },
1640
1646
  "taichu_llm": {
1641
1647
  "description": "基于海量高质数据训练,具有更强的文本理解、内容创作、对话问答等能力"
1642
1648
  },
1649
+ "taichu_vl": {
1650
+ "description": "融合了图像理解、知识迁移、逻辑归因等能力,在图文问答领域表现突出"
1651
+ },
1643
1652
  "text-embedding-3-large": {
1644
1653
  "description": "最强大的向量化模型,适用于英文和非英文任务"
1645
1654
  },
@@ -134,6 +134,22 @@
134
134
  "plugin": "插件运行中..."
135
135
  },
136
136
  "pluginList": "插件列表",
137
+ "search": {
138
+ "config": {
139
+ "addKey": "添加秘钥",
140
+ "close": "删除",
141
+ "confirm": "已完成配置并重试"
142
+ },
143
+ "searchxng": {
144
+ "baseURL": "请输入",
145
+ "description": "请输入 SearchXNG 的网址,即可开始联网搜索",
146
+ "keyPlaceholder": "请输入秘钥",
147
+ "title": "配置 SearchXNG 搜索引擎",
148
+ "unconfiguredDesc": "请联系管理员完成 SearchXNG 搜索引擎配置,即可开始联网搜索",
149
+ "unconfiguredTitle": "暂未配置 SearchXNG 搜索引擎"
150
+ },
151
+ "title": "联网搜索"
152
+ },
137
153
  "setting": "插件设置",
138
154
  "settings": {
139
155
  "indexUrl": {
@@ -7,11 +7,6 @@
7
7
  }
8
8
  },
9
9
  "Plugins": "插件",
10
- "actions": {
11
- "genAiMessage": "创建助手消息",
12
- "summary": "总结",
13
- "summaryTooltip": "总结当前内容"
14
- },
15
10
  "artifacts": {
16
11
  "display": {
17
12
  "code": "代码",
@@ -32,4 +27,4 @@
32
27
  "files": "文件",
33
28
  "messageDetail": "消息详情",
34
29
  "title": "工作区"
35
- }
30
+ }
@@ -6,5 +6,23 @@
6
6
  "generating": "生成中...",
7
7
  "images": "图片:",
8
8
  "prompt": "提示词"
9
+ },
10
+ "search": {
11
+ "createNewSearch": "创建新的搜索记录",
12
+ "emptyResult": "没有搜索到结果,请修改关键词后重试",
13
+ "genAiMessage": "创建助手消息",
14
+ "includedTooltip": "当前搜索结果会进入会话的上下文中",
15
+ "keywords": "关键词:",
16
+ "scoreTooltip": "相关性分数,该分数越高说明与查询关键词越相关",
17
+ "searchBar": {
18
+ "button": "搜索",
19
+ "placeholder": "关键词",
20
+ "tooltip": "将会重新获取搜索结果,并创建一条新的总结消息"
21
+ },
22
+ "searchEngine": "搜索引擎:",
23
+ "searchResult": "搜索数量:",
24
+ "summary": "总结",
25
+ "summaryTooltip": "总结当前内容",
26
+ "viewMoreResults": "查看更多 {{results}} 个结果"
9
27
  }
10
- }
28
+ }
@@ -170,9 +170,6 @@
170
170
  "MiniMax-Text-01": {
171
171
  "description": "在 MiniMax-01系列模型中,我們做了大膽創新:首次大規模實現線性注意力機制,傳統 Transformer架構不再是唯一的選擇。這個模型的參數量高達4560億,其中單次激活459億。模型綜合性能比肩海外頂尖模型,同時能夠高效處理全球最長400萬token的上下文,是GPT-4o的32倍,Claude-3.5-Sonnet的20倍。"
172
172
  },
173
- "Nous-Hermes-2-Mixtral-8x7B-DPO": {
174
- "description": "Hermes 2 Mixtral 8x7B DPO 是一款高度靈活的多模型合併,旨在提供卓越的創造性體驗。"
175
- },
176
173
  "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO": {
177
174
  "description": "Nous Hermes 2 - Mixtral 8x7B-DPO (46.7B) 是高精度的指令模型,適用於複雜計算。"
178
175
  },
@@ -323,6 +320,9 @@
323
320
  "SenseChat-5": {
324
321
  "description": "最新版本模型 (V5.5),128K上下文長度,在數學推理、英文對話、指令跟隨以及長文本理解等領域能力顯著提升,比肩GPT-4o"
325
322
  },
323
+ "SenseChat-5-1202": {
324
+ "description": "是基於V5.5的最新版本,較上版本在中英文基礎能力、聊天、理科知識、文科知識、寫作、數理邏輯、字數控制等幾個維度的表現有顯著提升。"
325
+ },
326
326
  "SenseChat-5-Cantonese": {
327
327
  "description": "32K上下文長度,在粵語的對話理解上超越了GPT-4,在知識、推理、數學及程式編寫等多個領域均能與GPT-4 Turbo相媲美"
328
328
  },
@@ -335,6 +335,12 @@
335
335
  "SenseChat-Turbo": {
336
336
  "description": "適用於快速問答、模型微調場景"
337
337
  },
338
+ "SenseChat-Turbo-1202": {
339
+ "description": "是最新的輕量版本模型,達到全量模型90%以上能力,顯著降低推理成本。"
340
+ },
341
+ "SenseChat-Vision": {
342
+ "description": "最新版本模型 (V5.5),支持多圖的輸入,全面實現模型基礎能力優化,在對象屬性識別、空間關係、動作事件識別、場景理解、情感識別、邏輯常識推理和文本理解生成上都實現了較大提升。"
343
+ },
338
344
  "Skylark2-lite-8k": {
339
345
  "description": "雲雀(Skylark)第二代模型,Skylark2-lite 模型有較高的回應速度,適用於實時性要求高、成本敏感、對模型精度要求不高的場景,上下文窗口長度為 8k。"
340
346
  },
@@ -1019,6 +1025,12 @@
1019
1025
  "hunyuan-standard-vision": {
1020
1026
  "description": "混元最新多模態模型,支持多語種作答,中英文能力均衡。"
1021
1027
  },
1028
+ "hunyuan-translation": {
1029
+ "description": "支持中文和英語、日語、法語、葡萄牙語、西班牙語、土耳其語、俄語、阿拉伯語、韓語、義大利語、德語、越南語、馬來語、印尼語15種語言互譯,基於多場景翻譯評測集自動化評估COMET評分,在十餘種常用語種中外互譯能力上整體優於市場同規模模型。"
1030
+ },
1031
+ "hunyuan-translation-lite": {
1032
+ "description": "混元翻譯模型支持自然語言對話式翻譯;支持中文和英語、日語、法語、葡萄牙語、西班牙語、土耳其語、俄語、阿拉伯語、韓語、義大利語、德語、越南語、馬來語、印尼語15種語言互譯。"
1033
+ },
1022
1034
  "hunyuan-turbo": {
1023
1035
  "description": "混元全新一代大語言模型的預覽版,採用全新的混合專家模型(MoE)結構,相較於 hunyuan-pro 推理效率更快,效果表現更強。"
1024
1036
  },
@@ -1220,9 +1232,6 @@
1220
1232
  "meta-llama/llama-3-8b-instruct": {
1221
1233
  "description": "Llama 3 8B Instruct 優化了高品質對話場景,性能優於許多閉源模型。"
1222
1234
  },
1223
- "meta-llama/llama-3.1-405b-instruct": {
1224
- "description": "Llama 3.1 405B Instruct 是 Meta 最新推出的版本,優化用於生成高品質對話,超越了許多領先的閉源模型。"
1225
- },
1226
1235
  "meta-llama/llama-3.1-70b-instruct": {
1227
1236
  "description": "Llama 3.1 70B Instruct 專為高品質對話而設計,在人類評估中表現突出,特別適合高互動場景。"
1228
1237
  },
@@ -1286,9 +1295,6 @@
1286
1295
  "microsoft/WizardLM-2-8x22B": {
1287
1296
  "description": "WizardLM 2 是微軟AI提供的語言模型,在複雜對話、多語言、推理和智能助手領域表現尤為出色。"
1288
1297
  },
1289
- "microsoft/wizardlm 2-7b": {
1290
- "description": "WizardLM 2 7B 是微軟AI最新的快速輕量化模型,性能接近於現有開源領導模型的10倍。"
1291
- },
1292
1298
  "microsoft/wizardlm-2-8x22b": {
1293
1299
  "description": "WizardLM-2 8x22B 是微軟 AI 最先進的 Wizard 模型,顯示出極其競爭力的表現。"
1294
1300
  },
@@ -1580,10 +1586,10 @@
1580
1586
  "qwq-32b-preview": {
1581
1587
  "description": "QwQ模型是由 Qwen 團隊開發的實驗性研究模型,專注於增強 AI 推理能力。"
1582
1588
  },
1583
- "solar-1-mini-chat": {
1589
+ "solar-mini": {
1584
1590
  "description": "Solar Mini 是一種緊湊型 LLM,性能優於 GPT-3.5,具備強大的多語言能力,支持英語和韓語,提供高效小巧的解決方案。"
1585
1591
  },
1586
- "solar-1-mini-chat-ja": {
1592
+ "solar-mini-ja": {
1587
1593
  "description": "Solar Mini (Ja) 擴展了 Solar Mini 的能力,專注於日語,同時在英語和韓語的使用中保持高效和卓越性能。"
1588
1594
  },
1589
1595
  "solar-pro": {
@@ -1619,6 +1625,9 @@
1619
1625
  "step-1.5v-mini": {
1620
1626
  "description": "該模型擁有強大的視頻理解能力。"
1621
1627
  },
1628
+ "step-1o-turbo-vision": {
1629
+ "description": "該模型擁有強大的圖像理解能力,在數理、代碼領域強於1o。模型比1o更小,輸出速度更快。"
1630
+ },
1622
1631
  "step-1o-vision-32k": {
1623
1632
  "description": "該模型擁有強大的圖像理解能力。相比於 step-1v 系列模型,擁有更強的視覺性能。"
1624
1633
  },
@@ -1634,12 +1643,12 @@
1634
1643
  "step-2-mini": {
1635
1644
  "description": "基於新一代自研Attention架構MFA的極速大模型,用極低成本達到和step1類似的效果,同時保持了更高的吞吐和更快響應時延。能夠處理通用任務,在程式碼能力上具備特長。"
1636
1645
  },
1637
- "taichu2_mm": {
1638
- "description": "融合了圖像理解、知識遷移、邏輯歸因等能力,在圖文問答領域表現突出"
1639
- },
1640
1646
  "taichu_llm": {
1641
1647
  "description": "紫東太初語言大模型具備超強語言理解能力以及文本創作、知識問答、代碼編程、數學計算、邏輯推理、情感分析、文本摘要等能力。創新性地將大數據預訓練與多源豐富知識相結合,通過持續打磨算法技術,並不斷吸收海量文本數據中詞彙、結構、語法、語義等方面的新知識,實現模型效果不斷進化。為用戶提供更加便捷的信息和服務以及更為智能化的體驗。"
1642
1648
  },
1649
+ "taichu_vl": {
1650
+ "description": "融合了圖像理解、知識遷移、邏輯歸因等能力,在圖文問答領域表現突出。"
1651
+ },
1643
1652
  "text-embedding-3-large": {
1644
1653
  "description": "最強大的向量化模型,適用於英文和非英文任務"
1645
1654
  },
@@ -134,6 +134,22 @@
134
134
  "plugin": "外掛執行中..."
135
135
  },
136
136
  "pluginList": "外掛清單",
137
+ "search": {
138
+ "config": {
139
+ "addKey": "添加密鑰",
140
+ "close": "刪除",
141
+ "confirm": "已完成配置並重試"
142
+ },
143
+ "searchxng": {
144
+ "baseURL": "請輸入",
145
+ "description": "請輸入 SearchXNG 的網址,即可開始聯網搜索",
146
+ "keyPlaceholder": "請輸入密鑰",
147
+ "title": "配置 SearchXNG 搜索引擎",
148
+ "unconfiguredDesc": "請聯繫管理員完成 SearchXNG 搜索引擎配置,即可開始聯網搜索",
149
+ "unconfiguredTitle": "暫未配置 SearchXNG 搜索引擎"
150
+ },
151
+ "title": "聯網搜索"
152
+ },
137
153
  "setting": "插件設置",
138
154
  "settings": {
139
155
  "indexUrl": {
@@ -7,11 +7,6 @@
7
7
  }
8
8
  },
9
9
  "Plugins": "外掛",
10
- "actions": {
11
- "genAiMessage": "生成助手訊息",
12
- "summary": "摘要",
13
- "summaryTooltip": "總結目前內容"
14
- },
15
10
  "artifacts": {
16
11
  "display": {
17
12
  "code": "程式碼",
@@ -6,5 +6,23 @@
6
6
  "generating": "生成中...",
7
7
  "images": "圖片:",
8
8
  "prompt": "提示詞"
9
+ },
10
+ "search": {
11
+ "createNewSearch": "建立新的搜尋紀錄",
12
+ "emptyResult": "沒有搜尋到結果,請修改關鍵字後重試",
13
+ "genAiMessage": "創建助手消息",
14
+ "includedTooltip": "當前搜尋結果會進入會話的上下文中",
15
+ "keywords": "關鍵字:",
16
+ "scoreTooltip": "相關性分數,該分數越高說明與查詢關鍵字越相關",
17
+ "searchBar": {
18
+ "button": "搜尋",
19
+ "placeholder": "關鍵字",
20
+ "tooltip": "將會重新獲取搜尋結果,並建立一條新的總結消息"
21
+ },
22
+ "searchEngine": "搜尋引擎:",
23
+ "searchResult": "搜尋數量:",
24
+ "summary": "總結",
25
+ "summaryTooltip": "總結當前內容",
26
+ "viewMoreResults": "查看更多 {{results}} 個結果"
9
27
  }
10
28
  }
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@lobehub/chat",
3
- "version": "1.63.2",
3
+ "version": "1.64.0",
4
4
  "description": "Lobe Chat - an open-source, high-performance chatbot framework that supports speech synthesis, multimodal, and extensible Function Call plugin system. Supports one-click free deployment of your private ChatGPT/LLM web application.",
5
5
  "keywords": [
6
6
  "framework",
@@ -21,6 +21,7 @@ import SendButton from './Send';
21
21
 
22
22
  const defaultLeftActions: ActionKeys[] = [
23
23
  'model',
24
+ 'search',
24
25
  'fileUpload',
25
26
  'knowledgeBase',
26
27
  'history',
@@ -0,0 +1,17 @@
1
+ import { Icon, Tag } from '@lobehub/ui';
2
+ import { Globe } from 'lucide-react';
3
+ import { memo } from 'react';
4
+ import { useTranslation } from 'react-i18next';
5
+
6
+ const SearchTag = memo(() => {
7
+ const { t } = useTranslation('chat');
8
+
9
+ return (
10
+ <Tag>
11
+ {<Icon icon={Globe} />}
12
+ <div>{t('search.title')}</div>
13
+ </Tag>
14
+ );
15
+ });
16
+
17
+ export default SearchTag;
@@ -6,6 +6,7 @@ import { Flexbox } from 'react-layout-kit';
6
6
 
7
7
  import ModelSwitchPanel from '@/features/ModelSwitchPanel';
8
8
  import PluginTag from '@/features/PluginTag';
9
+ import { useAgentEnableSearch } from '@/hooks/useAgentEnableSearch';
9
10
  import { useModelSupportToolUse } from '@/hooks/useModelSupportToolUse';
10
11
  import { useAgentStore } from '@/store/agent';
11
12
  import { agentSelectors } from '@/store/agent/selectors';
@@ -13,20 +14,24 @@ import { useUserStore } from '@/store/user';
13
14
  import { authSelectors } from '@/store/user/selectors';
14
15
 
15
16
  import KnowledgeTag from './KnowledgeTag';
17
+ import SearchTags from './SearchTags';
16
18
 
17
19
  const TitleTags = memo(() => {
18
- const [model, provider, hasKnowledge] = useAgentStore((s) => [
20
+ const [model, provider, hasKnowledge, isLoading] = useAgentStore((s) => [
19
21
  agentSelectors.currentAgentModel(s),
20
22
  agentSelectors.currentAgentModelProvider(s),
21
23
  agentSelectors.hasKnowledge(s),
24
+ agentSelectors.isAgentConfigLoading(s),
22
25
  ]);
26
+
23
27
  const plugins = useAgentStore(agentSelectors.currentAgentPlugins, isEqual);
24
28
  const enabledKnowledge = useAgentStore(agentSelectors.currentEnabledKnowledge, isEqual);
25
29
 
26
30
  const showPlugin = useModelSupportToolUse(model, provider);
27
- const isLoading = useAgentStore(agentSelectors.isAgentConfigLoading);
28
31
  const isLogin = useUserStore(authSelectors.isLogin);
29
32
 
33
+ const isAgentEnableSearch = useAgentEnableSearch();
34
+
30
35
  return isLoading && isLogin ? (
31
36
  <Skeleton.Button active size={'small'} style={{ height: 20 }} />
32
37
  ) : (
@@ -34,6 +39,7 @@ const TitleTags = memo(() => {
34
39
  <ModelSwitchPanel>
35
40
  <ModelTag model={model} />
36
41
  </ModelSwitchPanel>
42
+ {isAgentEnableSearch && <SearchTags />}
37
43
  {showPlugin && plugins?.length > 0 && <PluginTag plugins={plugins} />}
38
44
  {hasKnowledge && <KnowledgeTag data={enabledKnowledge} />}
39
45
  </Flexbox>
@@ -0,0 +1,16 @@
1
+ import { createEnv } from '@t3-oss/env-nextjs';
2
+ import { z } from 'zod';
3
+
4
+ export const getToolsConfig = () => {
5
+ return createEnv({
6
+ runtimeEnv: {
7
+ SEARXNG_URL: process.env.SEARXNG_URL,
8
+ },
9
+
10
+ server: {
11
+ SEARXNG_URL: z.string().url().optional(),
12
+ },
13
+ });
14
+ };
15
+
16
+ export const toolsEnv = getToolsConfig();
@@ -216,6 +216,32 @@ describe('AiInfraRepos', () => {
216
216
  }),
217
217
  );
218
218
  });
219
+
220
+ it('should include settings property from builtin model', async () => {
221
+ const mockProviders = [
222
+ { enabled: true, id: 'openai', name: 'OpenAI', source: 'builtin' },
223
+ ] as AiProviderListItem[];
224
+ const mockAllModels: EnabledAiModel[] = [];
225
+ const mockSettings = { searchImpl: 'tool' as const };
226
+
227
+ vi.spyOn(repo, 'getAiProviderList').mockResolvedValue(mockProviders);
228
+ vi.spyOn(repo.aiModelModel, 'getAllModels').mockResolvedValue(mockAllModels);
229
+ vi.spyOn(repo as any, 'fetchBuiltinModels').mockResolvedValue([
230
+ {
231
+ enabled: true,
232
+ id: 'gpt-4',
233
+ settings: mockSettings,
234
+ type: 'chat',
235
+ },
236
+ ]);
237
+
238
+ const result = await repo.getEnabledModels();
239
+
240
+ expect(result[0]).toMatchObject({
241
+ id: 'gpt-4',
242
+ settings: mockSettings,
243
+ });
244
+ });
219
245
  });
220
246
 
221
247
  describe('getAiProviderModelList', () => {
@@ -239,6 +265,7 @@ describe('AiInfraRepos', () => {
239
265
  ]),
240
266
  );
241
267
  });
268
+
242
269
  it('should merge default and custom models', async () => {
243
270
  const mockCustomModels = [
244
271
  {
@@ -321,6 +348,7 @@ describe('AiInfraRepos', () => {
321
348
  runtimeConfig: expect.any(Object),
322
349
  });
323
350
  });
351
+
324
352
  it('should return provider runtime state', async () => {
325
353
  const mockRuntimeConfig = {
326
354
  openai: {
@@ -385,6 +413,7 @@ describe('AiInfraRepos', () => {
385
413
  enabled: true, // from mockProviderConfigs
386
414
  });
387
415
  });
416
+
388
417
  it('should merge provider configs correctly', async () => {
389
418
  const mockProviderDetail = {
390
419
  enabled: true,
@@ -6,32 +6,23 @@ import { memo } from 'react';
6
6
  import { useTranslation } from 'react-i18next';
7
7
  import { Flexbox } from 'react-layout-kit';
8
8
 
9
+ import { isDeprecatedEdition } from '@/const/version';
10
+ import { useAgentEnableSearch } from '@/hooks/useAgentEnableSearch';
9
11
  import { useIsMobile } from '@/hooks/useIsMobile';
10
12
  import { useAgentStore } from '@/store/agent';
11
13
  import { agentSelectors } from '@/store/agent/selectors';
12
- import { aiModelSelectors, aiProviderSelectors, useAiInfraStore } from '@/store/aiInfra';
13
14
 
14
15
  import AINetworkSettings from './SwitchPanel';
15
16
 
16
17
  const Search = memo(() => {
17
18
  const { t } = useTranslation('chat');
18
- const [isLoading, isAgentEnableSearch] = useAgentStore((s) => [
19
- agentSelectors.isAgentConfigLoading(s),
20
- agentSelectors.isAgentEnableSearch(s),
21
- ]);
22
- const [model, provider] = useAgentStore((s) => [
23
- agentSelectors.currentAgentModel(s),
24
- agentSelectors.currentAgentModelProvider(s),
25
- ]);
26
-
27
- const [isModelHasBuiltinSearch] = useAiInfraStore((s) => [
28
- aiModelSelectors.isModelHasBuiltinSearchConfig(model, provider)(s),
29
- aiProviderSelectors.isProviderHasBuiltinSearchConfig(provider)(s),
30
- ]);
19
+ const [isLoading] = useAgentStore((s) => [agentSelectors.isAgentConfigLoading(s)]);
20
+ const isAgentEnableSearch = useAgentEnableSearch();
31
21
 
32
22
  const isMobile = useIsMobile();
33
23
 
34
24
  const theme = useTheme();
25
+
35
26
  if (isLoading) return null;
36
27
  // <ActionIcon
37
28
  // icon={Globe}
@@ -42,7 +33,7 @@ const Search = memo(() => {
42
33
  // />
43
34
 
44
35
  return (
45
- isModelHasBuiltinSearch && (
36
+ !isDeprecatedEdition && (
46
37
  <Flexbox>
47
38
  <Popover
48
39
  arrow={false}
@@ -0,0 +1,76 @@
1
+ import { Icon } from '@lobehub/ui';
2
+ import { createStyles } from 'antd-style';
3
+ import isEqual from 'fast-deep-equal';
4
+ import { Globe } from 'lucide-react';
5
+ import { memo } from 'react';
6
+ import { useTranslation } from 'react-i18next';
7
+ import { Flexbox } from 'react-layout-kit';
8
+
9
+ import PluginAvatar from '@/features/PluginAvatar';
10
+ import { useChatStore } from '@/store/chat';
11
+ import { chatSelectors } from '@/store/chat/selectors';
12
+ import { pluginHelpers, useToolStore } from '@/store/tool';
13
+ import { toolSelectors } from '@/store/tool/selectors';
14
+ import { shinyTextStylish } from '@/styles/loading';
15
+ import { WebBrowsingManifest } from '@/tools/web-browsing';
16
+
17
+ import Loader from './Loader';
18
+
19
+ export const useStyles = createStyles(({ css, token }) => ({
20
+ apiName: css`
21
+ overflow: hidden;
22
+ display: -webkit-box;
23
+ -webkit-box-orient: vertical;
24
+ -webkit-line-clamp: 1;
25
+
26
+ font-family: ${token.fontFamilyCode};
27
+ font-size: 12px;
28
+ text-overflow: ellipsis;
29
+ `,
30
+
31
+ shinyText: shinyTextStylish(token),
32
+ }));
33
+
34
+ interface ToolTitleProps {
35
+ apiName: string;
36
+ identifier: string;
37
+ index: number;
38
+ messageId: string;
39
+ toolCallId: string;
40
+ }
41
+
42
+ const ToolTitle = memo<ToolTitleProps>(({ identifier, messageId, index, apiName, toolCallId }) => {
43
+ const { t } = useTranslation('plugin');
44
+ const { styles } = useStyles();
45
+
46
+ const isLoading = useChatStore((s) => {
47
+ const toolMessageId = chatSelectors.getMessageByToolCallId(toolCallId)(s)?.id;
48
+ const isToolCallStreaming = chatSelectors.isToolCallStreaming(messageId, index)(s);
49
+ const isPluginApiInvoking = !toolMessageId
50
+ ? true
51
+ : chatSelectors.isPluginApiInvoking(toolMessageId)(s);
52
+ return isToolCallStreaming || isPluginApiInvoking;
53
+ });
54
+
55
+ const pluginMeta = useToolStore(toolSelectors.getMetaById(identifier), isEqual);
56
+
57
+ if (identifier === WebBrowsingManifest.identifier) {
58
+ return (
59
+ <Flexbox align={'center'} className={isLoading ? styles.shinyText : ''} gap={4} horizontal>
60
+ {isLoading ? <Loader /> : <Icon icon={Globe} size={{ fontSize: 13 }} />}
61
+ <div>{t('search.title')}</div>/<span className={styles.apiName}>{apiName}</span>
62
+ </Flexbox>
63
+ );
64
+ }
65
+
66
+ const pluginTitle = pluginHelpers.getPluginTitle(pluginMeta) ?? t('unknownPlugin');
67
+
68
+ return (
69
+ <Flexbox align={'center'} className={isLoading ? styles.shinyText : ''} gap={4} horizontal>
70
+ {isLoading ? <Loader /> : <PluginAvatar identifier={identifier} size={20} />}
71
+ <div>{pluginTitle}</div>/<span className={styles.apiName}>{apiName}</span>
72
+ </Flexbox>
73
+ );
74
+ });
75
+
76
+ export default ToolTitle;