@lobehub/chat 1.81.2 → 1.81.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (154) hide show
  1. package/CHANGELOG.md +59 -0
  2. package/changelog/v1.json +21 -0
  3. package/locales/ar/common.json +2 -0
  4. package/locales/ar/electron.json +32 -0
  5. package/locales/ar/models.json +126 -3
  6. package/locales/ar/plugin.json +1 -0
  7. package/locales/ar/tool.json +25 -0
  8. package/locales/bg-BG/common.json +2 -0
  9. package/locales/bg-BG/electron.json +32 -0
  10. package/locales/bg-BG/models.json +126 -3
  11. package/locales/bg-BG/plugin.json +1 -0
  12. package/locales/bg-BG/tool.json +25 -0
  13. package/locales/de-DE/common.json +2 -0
  14. package/locales/de-DE/electron.json +32 -0
  15. package/locales/de-DE/models.json +126 -3
  16. package/locales/de-DE/plugin.json +1 -0
  17. package/locales/de-DE/tool.json +25 -0
  18. package/locales/en-US/common.json +2 -0
  19. package/locales/en-US/electron.json +32 -0
  20. package/locales/en-US/models.json +126 -3
  21. package/locales/en-US/plugin.json +1 -0
  22. package/locales/en-US/tool.json +25 -0
  23. package/locales/es-ES/common.json +2 -0
  24. package/locales/es-ES/electron.json +32 -0
  25. package/locales/es-ES/models.json +126 -3
  26. package/locales/es-ES/plugin.json +1 -0
  27. package/locales/es-ES/tool.json +25 -0
  28. package/locales/fa-IR/common.json +2 -0
  29. package/locales/fa-IR/electron.json +32 -0
  30. package/locales/fa-IR/models.json +126 -3
  31. package/locales/fa-IR/plugin.json +1 -0
  32. package/locales/fa-IR/tool.json +25 -0
  33. package/locales/fr-FR/common.json +2 -0
  34. package/locales/fr-FR/electron.json +32 -0
  35. package/locales/fr-FR/models.json +126 -3
  36. package/locales/fr-FR/plugin.json +1 -0
  37. package/locales/fr-FR/tool.json +25 -0
  38. package/locales/it-IT/common.json +2 -0
  39. package/locales/it-IT/electron.json +32 -0
  40. package/locales/it-IT/models.json +126 -3
  41. package/locales/it-IT/plugin.json +1 -0
  42. package/locales/it-IT/tool.json +25 -0
  43. package/locales/ja-JP/common.json +2 -0
  44. package/locales/ja-JP/electron.json +32 -0
  45. package/locales/ja-JP/models.json +126 -3
  46. package/locales/ja-JP/plugin.json +1 -0
  47. package/locales/ja-JP/tool.json +25 -0
  48. package/locales/ko-KR/common.json +2 -0
  49. package/locales/ko-KR/electron.json +32 -0
  50. package/locales/ko-KR/models.json +126 -3
  51. package/locales/ko-KR/plugin.json +1 -0
  52. package/locales/ko-KR/tool.json +25 -0
  53. package/locales/nl-NL/common.json +2 -0
  54. package/locales/nl-NL/electron.json +32 -0
  55. package/locales/nl-NL/models.json +126 -3
  56. package/locales/nl-NL/plugin.json +1 -0
  57. package/locales/nl-NL/tool.json +25 -0
  58. package/locales/pl-PL/common.json +2 -0
  59. package/locales/pl-PL/electron.json +32 -0
  60. package/locales/pl-PL/models.json +126 -3
  61. package/locales/pl-PL/plugin.json +1 -0
  62. package/locales/pl-PL/tool.json +25 -0
  63. package/locales/pt-BR/common.json +2 -0
  64. package/locales/pt-BR/electron.json +32 -0
  65. package/locales/pt-BR/models.json +126 -3
  66. package/locales/pt-BR/plugin.json +1 -0
  67. package/locales/pt-BR/tool.json +25 -0
  68. package/locales/ru-RU/common.json +2 -0
  69. package/locales/ru-RU/electron.json +32 -0
  70. package/locales/ru-RU/models.json +126 -3
  71. package/locales/ru-RU/plugin.json +1 -0
  72. package/locales/ru-RU/tool.json +25 -0
  73. package/locales/tr-TR/common.json +2 -0
  74. package/locales/tr-TR/electron.json +32 -0
  75. package/locales/tr-TR/models.json +126 -3
  76. package/locales/tr-TR/plugin.json +1 -0
  77. package/locales/tr-TR/tool.json +25 -0
  78. package/locales/vi-VN/common.json +2 -0
  79. package/locales/vi-VN/electron.json +32 -0
  80. package/locales/vi-VN/models.json +126 -3
  81. package/locales/vi-VN/plugin.json +1 -0
  82. package/locales/vi-VN/tool.json +25 -0
  83. package/locales/zh-CN/common.json +2 -0
  84. package/locales/zh-CN/electron.json +32 -0
  85. package/locales/zh-CN/models.json +131 -8
  86. package/locales/zh-CN/plugin.json +1 -0
  87. package/locales/zh-CN/tool.json +25 -0
  88. package/locales/zh-TW/common.json +2 -0
  89. package/locales/zh-TW/electron.json +32 -0
  90. package/locales/zh-TW/models.json +126 -3
  91. package/locales/zh-TW/plugin.json +1 -0
  92. package/locales/zh-TW/tool.json +25 -0
  93. package/package.json +3 -2
  94. package/packages/electron-client-ipc/src/events/index.ts +5 -5
  95. package/packages/electron-client-ipc/src/events/localFile.ts +22 -0
  96. package/packages/electron-client-ipc/src/events/{file.ts → upload.ts} +1 -1
  97. package/packages/electron-client-ipc/src/types/index.ts +2 -1
  98. package/packages/electron-client-ipc/src/types/localFile.ts +52 -0
  99. package/scripts/prebuild.mts +5 -1
  100. package/src/app/(backend)/trpc/desktop/[trpc]/route.ts +26 -0
  101. package/src/config/aiModels/cloudflare.ts +41 -37
  102. package/src/config/aiModels/github.ts +90 -0
  103. package/src/config/aiModels/google.ts +25 -0
  104. package/src/features/Conversation/Messages/Assistant/Tool/Render/Arguments/ObjectEntity.tsx +81 -0
  105. package/src/features/Conversation/Messages/Assistant/Tool/Render/Arguments/ValueCell.tsx +43 -0
  106. package/src/features/Conversation/Messages/Assistant/Tool/Render/Arguments/index.tsx +120 -0
  107. package/src/features/Conversation/Messages/Assistant/Tool/Render/CustomRender.tsx +75 -2
  108. package/src/features/Conversation/Messages/Assistant/Tool/Render/KeyValueEditor.tsx +214 -0
  109. package/src/features/User/UserPanel/useMenu.tsx +8 -1
  110. package/src/libs/agent-runtime/google/index.ts +3 -0
  111. package/src/libs/trpc/client/desktop.ts +14 -0
  112. package/src/locales/default/common.ts +2 -0
  113. package/src/locales/default/electron.ts +34 -0
  114. package/src/locales/default/index.ts +2 -0
  115. package/src/locales/default/tool.ts +25 -0
  116. package/src/server/routers/desktop/index.ts +9 -0
  117. package/src/server/routers/desktop/pgTable.ts +43 -0
  118. package/src/services/electron/autoUpdate.ts +17 -0
  119. package/src/services/electron/file.ts +31 -0
  120. package/src/services/electron/localFileService.ts +39 -0
  121. package/src/services/electron/remoteServer.ts +40 -0
  122. package/src/store/chat/index.ts +1 -1
  123. package/src/store/chat/slices/builtinTool/actions/index.ts +3 -1
  124. package/src/store/chat/slices/builtinTool/actions/localFile.ts +129 -0
  125. package/src/store/chat/slices/builtinTool/initialState.ts +2 -0
  126. package/src/store/chat/slices/builtinTool/selectors.ts +2 -0
  127. package/src/store/chat/slices/plugin/action.ts +3 -3
  128. package/src/store/chat/store.ts +2 -0
  129. package/src/store/electron/actions/sync.ts +117 -0
  130. package/src/store/electron/index.ts +1 -0
  131. package/src/store/electron/initialState.ts +18 -0
  132. package/src/store/electron/selectors/index.ts +1 -0
  133. package/src/store/electron/selectors/sync.ts +9 -0
  134. package/src/store/electron/store.ts +29 -0
  135. package/src/tools/index.ts +8 -0
  136. package/src/tools/local-files/Render/ListFiles/Result.tsx +42 -0
  137. package/src/tools/local-files/Render/ListFiles/index.tsx +68 -0
  138. package/src/tools/local-files/Render/ReadLocalFile/ReadFileSkeleton.tsx +50 -0
  139. package/src/tools/local-files/Render/ReadLocalFile/ReadFileView.tsx +197 -0
  140. package/src/tools/local-files/Render/ReadLocalFile/index.tsx +31 -0
  141. package/src/tools/local-files/Render/ReadLocalFile/style.ts +37 -0
  142. package/src/tools/local-files/Render/SearchFiles/Result.tsx +42 -0
  143. package/src/tools/local-files/Render/SearchFiles/SearchQuery/SearchView.tsx +77 -0
  144. package/src/tools/local-files/Render/SearchFiles/SearchQuery/index.tsx +72 -0
  145. package/src/tools/local-files/Render/SearchFiles/index.tsx +32 -0
  146. package/src/tools/local-files/Render/index.tsx +36 -0
  147. package/src/tools/local-files/components/FileItem.tsx +117 -0
  148. package/src/tools/local-files/index.ts +149 -0
  149. package/src/tools/local-files/systemRole.ts +46 -0
  150. package/src/tools/local-files/type.ts +33 -0
  151. package/src/tools/renders.ts +3 -0
  152. package/packages/electron-client-ipc/src/events/search.ts +0 -4
  153. package/src/features/Conversation/Messages/Assistant/Tool/Render/Arguments.tsx +0 -165
  154. /package/packages/electron-client-ipc/src/types/{file.ts → upload.ts} +0 -0
@@ -5,9 +5,15 @@
5
5
  "01-ai/yi-1.5-9b-chat": {
6
6
  "description": "Zero One Everything, the latest open-source fine-tuned model with 9 billion parameters, supports various dialogue scenarios with high-quality training data aligned with human preferences."
7
7
  },
8
+ "360/deepseek-r1": {
9
+ "description": "[360 Deployment Version] DeepSeek-R1 extensively utilizes reinforcement learning techniques in the post-training phase, significantly enhancing model inference capabilities with minimal labeled data. It performs comparably to OpenAI's o1 official version in tasks such as mathematics, coding, and natural language reasoning."
10
+ },
8
11
  "360gpt-pro": {
9
12
  "description": "360GPT Pro, as an important member of the 360 AI model series, meets diverse natural language application scenarios with efficient text processing capabilities, supporting long text understanding and multi-turn dialogue."
10
13
  },
14
+ "360gpt-pro-trans": {
15
+ "description": "A translation-specific model, finely tuned for optimal translation results."
16
+ },
11
17
  "360gpt-turbo": {
12
18
  "description": "360GPT Turbo offers powerful computation and dialogue capabilities, with excellent semantic understanding and generation efficiency, making it an ideal intelligent assistant solution for enterprises and developers."
13
19
  },
@@ -62,6 +68,18 @@
62
68
  "DeepSeek-R1-Distill-Qwen-7B": {
63
69
  "description": "The DeepSeek-R1 distillation model based on Qwen2.5-Math-7B optimizes inference performance through reinforcement learning and cold-start data, refreshing the benchmark for open-source models across multiple tasks."
64
70
  },
71
+ "DeepSeek-V3": {
72
+ "description": "DeepSeek-V3 is a MoE model developed in-house by Deep Seek Company. Its performance surpasses that of other open-source models such as Qwen2.5-72B and Llama-3.1-405B in multiple assessments, and it stands on par with the world's top proprietary models like GPT-4o and Claude-3.5-Sonnet."
73
+ },
74
+ "Doubao-1.5-thinking-pro": {
75
+ "description": "Doubao-1.5 is a new deep thinking model that excels in specialized fields such as mathematics, programming, and scientific reasoning, as well as in general tasks like creative writing, achieving or nearing top-tier industry levels in authoritative benchmarks such as AIME 2024, Codeforces, and GPQA. It supports a context window of 128k and an output length of 16k."
76
+ },
77
+ "Doubao-1.5-thinking-pro-vision": {
78
+ "description": "Doubao-1.5 is a new deep thinking model that excels in specialized fields such as mathematics, programming, and scientific reasoning, as well as in general tasks like creative writing, achieving or nearing top-tier industry levels in authoritative benchmarks such as AIME 2024, Codeforces, and GPQA. It supports a context window of 128k and an output length of 16k."
79
+ },
80
+ "Doubao-1.5-vision-pro": {
81
+ "description": "Doubao-1.5-vision-pro is a newly upgraded multimodal large model that supports image recognition at any resolution and extreme aspect ratios, enhancing visual reasoning, document recognition, detail comprehension, and instruction following capabilities."
82
+ },
65
83
  "Doubao-1.5-vision-pro-32k": {
66
84
  "description": "Doubao-1.5-vision-pro is a newly upgraded multimodal large model that supports image recognition at any resolution and extreme aspect ratios, enhancing visual reasoning, document recognition, detail understanding, and instruction-following capabilities."
67
85
  },
@@ -341,6 +359,15 @@
341
359
  "SenseChat-Vision": {
342
360
  "description": "The latest version model (V5.5) supports multi-image input and fully optimizes the model's basic capabilities, achieving significant improvements in object attribute recognition, spatial relationships, action event recognition, scene understanding, emotion recognition, logical reasoning, and text understanding and generation."
343
361
  },
362
+ "SenseNova-V6-Pro": {
363
+ "description": "Achieves a native unification of image, text, and video capabilities, breaking through the limitations of traditional discrete multimodality, winning dual championships in the OpenCompass and SuperCLUE evaluations."
364
+ },
365
+ "SenseNova-V6-Reasoner": {
366
+ "description": "Balances visual and linguistic deep reasoning, enabling slow thinking and profound inference, presenting a complete chain of thought process."
367
+ },
368
+ "SenseNova-V6-Turbo": {
369
+ "description": "Achieves a native unification of image, text, and video capabilities, breaking through the limitations of traditional discrete multimodality, leading comprehensively in core dimensions such as multimodal foundational abilities and linguistic foundational abilities, excelling in both literature and science, and consistently ranking among the top tier in various assessments both domestically and internationally."
370
+ },
344
371
  "Skylark2-lite-8k": {
345
372
  "description": "Skylark 2nd generation model, Skylark2-lite model is characterized by high response speed, suitable for high real-time requirements, cost-sensitive scenarios, and situations where model accuracy is less critical, with a context window length of 8k."
346
373
  },
@@ -356,6 +383,21 @@
356
383
  "Skylark2-pro-turbo-8k": {
357
384
  "description": "Skylark 2nd generation model, Skylark2-pro-turbo-8k provides faster inference at a lower cost, with a context window length of 8k."
358
385
  },
386
+ "THUDM/GLM-4-32B-0414": {
387
+ "description": "GLM-4-32B-0414 is the next-generation open-source model in the GLM series, boasting 32 billion parameters. Its performance is comparable to OpenAI's GPT series and DeepSeek's V3/R1 series."
388
+ },
389
+ "THUDM/GLM-4-9B-0414": {
390
+ "description": "GLM-4-9B-0414 is a small model in the GLM series, with 9 billion parameters. This model inherits the technical characteristics of the GLM-4-32B series while providing a more lightweight deployment option. Despite its smaller size, GLM-4-9B-0414 still demonstrates excellent capabilities in tasks such as code generation, web design, SVG graphics generation, and search-based writing."
391
+ },
392
+ "THUDM/GLM-Z1-32B-0414": {
393
+ "description": "GLM-Z1-32B-0414 is a reasoning model with deep thinking capabilities. This model is developed based on GLM-4-32B-0414 through cold start and extended reinforcement learning, with further training in mathematics, coding, and logic tasks. Compared to the base model, GLM-Z1-32B-0414 significantly enhances mathematical abilities and the capacity to solve complex tasks."
394
+ },
395
+ "THUDM/GLM-Z1-9B-0414": {
396
+ "description": "GLM-Z1-9B-0414 is a small model in the GLM series, with only 9 billion parameters, yet it demonstrates remarkable capabilities while maintaining the open-source tradition. Despite its smaller size, this model excels in mathematical reasoning and general tasks, leading the performance among similarly sized open-source models."
397
+ },
398
+ "THUDM/GLM-Z1-Rumination-32B-0414": {
399
+ "description": "GLM-Z1-Rumination-32B-0414 is a deep reasoning model with reflective capabilities (comparable to OpenAI's Deep Research). Unlike typical deep thinking models, reflective models engage in longer periods of deep thought to tackle more open and complex problems."
400
+ },
359
401
  "THUDM/chatglm3-6b": {
360
402
  "description": "ChatGLM3-6B is an open-source model from the ChatGLM series, developed by Zhipu AI. This model retains the excellent features of its predecessor, such as smooth dialogue and low deployment barriers, while introducing new features. It utilizes more diverse training data, more extensive training steps, and more reasonable training strategies, performing exceptionally well among pre-trained models under 10B. ChatGLM3-6B supports multi-turn dialogues, tool invocation, code execution, and complex scenarios such as Agent tasks. In addition to the dialogue model, the foundational model ChatGLM-6B-Base and the long-text dialogue model ChatGLM3-6B-32K are also open-sourced. The model is fully open for academic research and allows free commercial use after registration."
361
403
  },
@@ -521,6 +563,9 @@
521
563
  "charglm-3": {
522
564
  "description": "CharGLM-3 is designed for role-playing and emotional companionship, supporting ultra-long multi-turn memory and personalized dialogue, with wide applications."
523
565
  },
566
+ "charglm-4": {
567
+ "description": "CharGLM-4 is designed for role-playing and emotional companionship, supporting ultra-long multi-turn memory and personalized dialogue, with wide-ranging applications."
568
+ },
524
569
  "chatglm3": {
525
570
  "description": "ChatGLM3 is a closed-source model released by Zhipu AI and Tsinghua KEG Lab. It has been pre-trained on a massive amount of Chinese and English identifiers and fine-tuned with human preference alignment. Compared to the first-generation model, it has achieved improvements of 16%, 36%, and 280% in MMLU, C-Eval, and GSM8K, respectively, and topped the Chinese task leaderboard C-Eval. It is suitable for scenarios that require a high level of knowledge, reasoning, and creativity, such as advertising copywriting, novel writing, knowledge-based writing, and code generation."
526
571
  },
@@ -632,9 +677,18 @@
632
677
  "command-r-plus-04-2024": {
633
678
  "description": "Command R+ is an instruction-following dialogue model that delivers higher quality and reliability in language tasks, with a longer context length than previous models. It is best suited for complex RAG workflows and multi-step tool usage."
634
679
  },
680
+ "command-r-plus-08-2024": {
681
+ "description": "Command R+ is an instruction-following conversational model that delivers higher quality and reliability in language tasks, with a longer context length compared to previous models. It is best suited for complex RAG workflows and multi-step tool usage."
682
+ },
635
683
  "command-r7b-12-2024": {
636
684
  "description": "command-r7b-12-2024 is a compact and efficient updated version, released in December 2024. It excels in tasks requiring complex reasoning and multi-step processing, such as RAG, tool usage, and agent tasks."
637
685
  },
686
+ "compound-beta": {
687
+ "description": "Compound-beta is a composite AI system supported by multiple publicly available models in GroqCloud, intelligently and selectively using tools to answer user queries."
688
+ },
689
+ "compound-beta-mini": {
690
+ "description": "Compound-beta-mini is a composite AI system supported by publicly available models in GroqCloud, intelligently and selectively using tools to answer user queries."
691
+ },
638
692
  "dall-e-2": {
639
693
  "description": "The second generation DALL·E model, supporting more realistic and accurate image generation, with a resolution four times that of the first generation."
640
694
  },
@@ -779,6 +833,18 @@
779
833
  "deepseek/deepseek-v3/community": {
780
834
  "description": "DeepSeek-V3 has achieved a significant breakthrough in inference speed compared to previous models. It ranks first among open-source models and can compete with the world's most advanced closed-source models. DeepSeek-V3 employs Multi-Head Latent Attention (MLA) and DeepSeekMoE architectures, which have been thoroughly validated in DeepSeek-V2. Additionally, DeepSeek-V3 introduces an auxiliary lossless strategy for load balancing and sets multi-label prediction training objectives for enhanced performance."
781
835
  },
836
+ "deepseek_r1": {
837
+ "description": "DeepSeek-R1 is a reinforcement learning (RL) driven reasoning model that addresses issues of repetition and readability within the model. Prior to RL, DeepSeek-R1 introduced cold start data to further optimize inference performance. It performs comparably to OpenAI-o1 in mathematics, coding, and reasoning tasks, and enhances overall effectiveness through carefully designed training methods."
838
+ },
839
+ "deepseek_r1_distill_llama_70b": {
840
+ "description": "DeepSeek-R1-Distill-Llama-70B is a model obtained through distillation training based on Llama-3.3-70B-Instruct. This model is part of the DeepSeek-R1 series and showcases excellent performance in mathematics, programming, and reasoning through fine-tuning with samples generated by DeepSeek-R1."
841
+ },
842
+ "deepseek_r1_distill_qwen_14b": {
843
+ "description": "DeepSeek-R1-Distill-Qwen-14B is a model derived from Qwen2.5-14B through knowledge distillation. This model is fine-tuned using 800,000 curated samples generated by DeepSeek-R1, showcasing excellent reasoning capabilities."
844
+ },
845
+ "deepseek_r1_distill_qwen_32b": {
846
+ "description": "DeepSeek-R1-Distill-Qwen-32B is a model derived from Qwen2.5-32B through knowledge distillation. This model is fine-tuned using 800,000 curated samples generated by DeepSeek-R1, demonstrating outstanding performance across multiple domains such as mathematics, programming, and reasoning."
847
+ },
782
848
  "doubao-1.5-lite-32k": {
783
849
  "description": "Doubao-1.5-lite is a new generation lightweight model, offering extreme response speed with performance and latency at a world-class level."
784
850
  },
@@ -788,6 +854,9 @@
788
854
  "doubao-1.5-pro-32k": {
789
855
  "description": "Doubao-1.5-pro is a new generation flagship model with comprehensive performance upgrades, excelling in knowledge, coding, reasoning, and more."
790
856
  },
857
+ "doubao-1.5-vision-lite": {
858
+ "description": "Doubao-1.5-vision-lite is a newly upgraded multimodal large model that supports image recognition at any resolution and extreme aspect ratios, enhancing visual reasoning, document recognition, detail comprehension, and instruction following capabilities. It supports a context window of 128k and an output length of up to 16k tokens."
859
+ },
791
860
  "emohaa": {
792
861
  "description": "Emohaa is a psychological model with professional counseling capabilities, helping users understand emotional issues."
793
862
  },
@@ -953,6 +1022,9 @@
953
1022
  "glm-4-air": {
954
1023
  "description": "GLM-4-Air is a cost-effective version with performance close to GLM-4, offering fast speed at an affordable price."
955
1024
  },
1025
+ "glm-4-air-250414": {
1026
+ "description": "GLM-4-Air is a cost-effective version, with performance close to GLM-4, offering fast speed at an affordable price."
1027
+ },
956
1028
  "glm-4-airx": {
957
1029
  "description": "GLM-4-AirX provides an efficient version of GLM-4-Air, with inference speeds up to 2.6 times faster."
958
1030
  },
@@ -962,6 +1034,9 @@
962
1034
  "glm-4-flash": {
963
1035
  "description": "GLM-4-Flash is the ideal choice for handling simple tasks, being the fastest and most cost-effective."
964
1036
  },
1037
+ "glm-4-flash-250414": {
1038
+ "description": "GLM-4-Flash is the ideal choice for handling simple tasks, being the fastest and free."
1039
+ },
965
1040
  "glm-4-flashx": {
966
1041
  "description": "GLM-4-FlashX is an enhanced version of Flash, featuring ultra-fast inference speed."
967
1042
  },
@@ -980,6 +1055,18 @@
980
1055
  "glm-4v-plus": {
981
1056
  "description": "GLM-4V-Plus has the ability to understand video content and multiple images, suitable for multimodal tasks."
982
1057
  },
1058
+ "glm-4v-plus-0111": {
1059
+ "description": "GLM-4V-Plus has the capability to understand video content and multiple images, making it suitable for multimodal tasks."
1060
+ },
1061
+ "glm-z1-air": {
1062
+ "description": "Reasoning model: possesses strong reasoning capabilities, suitable for tasks requiring deep reasoning."
1063
+ },
1064
+ "glm-z1-airx": {
1065
+ "description": "Ultra-fast reasoning: features extremely fast reasoning speed and powerful reasoning effects."
1066
+ },
1067
+ "glm-z1-flash": {
1068
+ "description": "The GLM-Z1 series possesses strong complex reasoning capabilities, excelling in logical reasoning, mathematics, programming, and more. The maximum context length is 32K."
1069
+ },
983
1070
  "glm-zero-preview": {
984
1071
  "description": "GLM-Zero-Preview possesses strong complex reasoning abilities, excelling in logical reasoning, mathematics, programming, and other fields."
985
1072
  },
@@ -1199,12 +1286,15 @@
1199
1286
  "hunyuan-turbos-20250226": {
1200
1287
  "description": "hunyuan-TurboS pv2.1.2 fixed version with upgraded training tokens; enhanced reasoning capabilities in mathematics, logic, and coding; improved performance in both Chinese and English across text creation, comprehension, knowledge Q&A, and casual conversation."
1201
1288
  },
1202
- "hunyuan-turbos-20250313": {
1203
- "description": "Unifies the style of mathematical problem-solving steps and enhances multi-turn Q&A in mathematics. Optimizes the response style for text creation, removing AI-like characteristics and adding literary flair."
1204
- },
1205
1289
  "hunyuan-turbos-latest": {
1206
1290
  "description": "The latest version of hunyuan-TurboS, the flagship model of Hunyuan, features enhanced reasoning capabilities and improved user experience."
1207
1291
  },
1292
+ "hunyuan-turbos-longtext-128k-20250325": {
1293
+ "description": "Specializes in handling long text tasks such as document summarization and question answering, while also capable of general text generation tasks. It excels in analyzing and generating long texts, effectively addressing complex and detailed long-form content processing needs."
1294
+ },
1295
+ "hunyuan-turbos-vision": {
1296
+ "description": "This model is suitable for image-text understanding scenarios and is based on the latest turbos from Hunyuan, focusing on tasks related to image-text understanding, including image-based entity recognition, knowledge Q&A, copywriting, and problem-solving from photos, with comprehensive improvements over the previous generation."
1297
+ },
1208
1298
  "hunyuan-vision": {
1209
1299
  "description": "The latest multimodal model from Hunyuan, supporting image + text input to generate textual content."
1210
1300
  },
@@ -1223,6 +1313,12 @@
1223
1313
  "internlm3-latest": {
1224
1314
  "description": "Our latest model series boasts exceptional inference performance, leading the pack among open-source models of similar scale. It defaults to our most recently released InternLM3 series models."
1225
1315
  },
1316
+ "jamba-large": {
1317
+ "description": "Our most powerful and advanced model, designed for handling complex enterprise-level tasks with exceptional performance."
1318
+ },
1319
+ "jamba-mini": {
1320
+ "description": "The most efficient model in its class, balancing speed and quality while maintaining a smaller size."
1321
+ },
1226
1322
  "jina-deepsearch-v1": {
1227
1323
  "description": "DeepSearch combines web search, reading, and reasoning for comprehensive investigations. You can think of it as an agent that takes on your research tasks—it conducts extensive searches and iterates multiple times before providing answers. This process involves ongoing research, reasoning, and problem-solving from various angles. This fundamentally differs from standard large models that generate answers directly from pre-trained data and traditional RAG systems that rely on one-time surface searches."
1228
1324
  },
@@ -1568,9 +1664,18 @@
1568
1664
  "o1-preview": {
1569
1665
  "description": "o1 is OpenAI's new reasoning model, suitable for complex tasks that require extensive general knowledge. This model features a 128K context and has a knowledge cutoff date of October 2023."
1570
1666
  },
1667
+ "o3": {
1668
+ "description": "o3 is a versatile and powerful model that excels across multiple domains. It sets new benchmarks for tasks in mathematics, science, programming, and visual reasoning. It is also skilled in technical writing and instruction following, allowing users to analyze text, code, and images to solve complex multi-step problems."
1669
+ },
1571
1670
  "o3-mini": {
1572
1671
  "description": "o3-mini is our latest small inference model that delivers high intelligence while maintaining the same cost and latency targets as o1-mini."
1573
1672
  },
1673
+ "o3-mini-high": {
1674
+ "description": "o3-mini high inference level version provides high intelligence under the same cost and latency targets as o1-mini."
1675
+ },
1676
+ "o4-mini": {
1677
+ "description": "o4-mini is our latest small model in the o series. It is optimized for fast and efficient inference, demonstrating high efficiency and performance in coding and visual tasks."
1678
+ },
1574
1679
  "open-codestral-mamba": {
1575
1680
  "description": "Codestral Mamba is a language model focused on code generation, providing strong support for advanced coding and reasoning tasks."
1576
1681
  },
@@ -1598,6 +1703,12 @@
1598
1703
  "openai/o1-preview": {
1599
1704
  "description": "o1 is OpenAI's new reasoning model, suitable for complex tasks that require extensive general knowledge. This model features a 128K context and has a knowledge cutoff date of October 2023."
1600
1705
  },
1706
+ "openai/o4-mini": {
1707
+ "description": "o4-mini is optimized for fast and efficient inference, demonstrating high efficiency and performance in coding and visual tasks."
1708
+ },
1709
+ "openai/o4-mini-high": {
1710
+ "description": "o4-mini high inference level version, optimized for fast and efficient inference, demonstrating high efficiency and performance in coding and visual tasks."
1711
+ },
1601
1712
  "openrouter/auto": {
1602
1713
  "description": "Based on context length, topic, and complexity, your request will be sent to Llama 3 70B Instruct, Claude 3.5 Sonnet (self-regulating), or GPT-4o."
1603
1714
  },
@@ -1793,6 +1904,9 @@
1793
1904
  "qwq-plus-latest": {
1794
1905
  "description": "The QwQ inference model is trained based on the Qwen2.5 model, significantly enhancing its reasoning capabilities through reinforcement learning. The core metrics of the model, including mathematical code (AIME 24/25, LiveCodeBench) and some general metrics (IFEval, LiveBench, etc.), reach the level of the full version of DeepSeek-R1."
1795
1906
  },
1907
+ "qwq_32b": {
1908
+ "description": "A medium-sized reasoning model in the Qwen series. Compared to traditional instruction-tuned models, QwQ, with its thinking and reasoning capabilities, significantly enhances performance in downstream tasks, especially in solving challenging problems."
1909
+ },
1796
1910
  "r1-1776": {
1797
1911
  "description": "R1-1776 is a version of the DeepSeek R1 model, fine-tuned to provide unfiltered, unbiased factual information."
1798
1912
  },
@@ -1853,12 +1967,21 @@
1853
1967
  "step-2-16k": {
1854
1968
  "description": "Supports large-scale context interactions, suitable for complex dialogue scenarios."
1855
1969
  },
1970
+ "step-2-16k-exp": {
1971
+ "description": "An experimental version of the step-2 model, featuring the latest capabilities and rolling updates. Not recommended for use in formal production environments."
1972
+ },
1856
1973
  "step-2-mini": {
1857
1974
  "description": "A high-speed large model based on the next-generation self-developed Attention architecture MFA, achieving results similar to step-1 at a very low cost, while maintaining higher throughput and faster response times. It is capable of handling general tasks and has specialized skills in coding."
1858
1975
  },
1976
+ "step-r1-v-mini": {
1977
+ "description": "This model is a powerful reasoning model with strong image understanding capabilities, able to process both image and text information, generating text content after deep reasoning. It excels in visual reasoning while also possessing first-tier capabilities in mathematics, coding, and text reasoning. The context length is 100k."
1978
+ },
1859
1979
  "taichu_llm": {
1860
1980
  "description": "The ZD Taichu language model possesses strong language understanding capabilities and excels in text creation, knowledge Q&A, code programming, mathematical calculations, logical reasoning, sentiment analysis, and text summarization. It innovatively combines large-scale pre-training with rich knowledge from multiple sources, continuously refining algorithmic techniques and absorbing new knowledge in vocabulary, structure, grammar, and semantics from vast text data, resulting in an evolving model performance. It provides users with more convenient information and services, as well as a more intelligent experience."
1861
1981
  },
1982
+ "taichu_o1": {
1983
+ "description": "taichu_o1 is a next-generation reasoning model that achieves human-like thinking chains through multimodal interaction and reinforcement learning, supporting complex decision-making scenarios while maintaining high-precision outputs and demonstrating model reasoning pathways, suitable for strategy analysis and deep thinking."
1984
+ },
1862
1985
  "taichu_vl": {
1863
1986
  "description": "Integrates capabilities in image understanding, knowledge transfer, and logical attribution, excelling in the field of image-text question answering."
1864
1987
  },
@@ -5,6 +5,7 @@
5
5
  "off": "Turn off debug",
6
6
  "on": "View plugin invocation information",
7
7
  "payload": "plugin payload",
8
+ "pluginState": "Plugin State",
8
9
  "response": "Response",
9
10
  "tool_call": "tool call request"
10
11
  },
@@ -7,6 +7,20 @@
7
7
  "images": "Images:",
8
8
  "prompt": "Prompt"
9
9
  },
10
+ "localFiles": {
11
+ "file": "File",
12
+ "folder": "Folder",
13
+ "open": "Open",
14
+ "openFile": "Open File",
15
+ "openFolder": "Open Folder",
16
+ "read": {
17
+ "more": "View More"
18
+ },
19
+ "readFile": "Read File",
20
+ "readFileError": "Failed to read file, please check if the file path is correct",
21
+ "readFiles": "Read Files",
22
+ "readFilesError": "Failed to read files, please check if the file path is correct"
23
+ },
10
24
  "search": {
11
25
  "createNewSearch": "Create a new search record",
12
26
  "emptyResult": "No results found, please modify your keywords and try again",
@@ -44,5 +58,16 @@
44
58
  "summary": "Summary",
45
59
  "summaryTooltip": "Summarize the current content",
46
60
  "viewMoreResults": "View {{results}} more results"
61
+ },
62
+ "updateArgs": {
63
+ "duplicateKeyError": "Field key must be unique",
64
+ "form": {
65
+ "add": "Add an Item",
66
+ "key": "Field Key",
67
+ "value": "Field Value"
68
+ },
69
+ "formValidationFailed": "Form validation failed, please check the parameter format",
70
+ "keyRequired": "Field key cannot be empty",
71
+ "stringifyError": "Unable to serialize parameters, please check the parameter format"
47
72
  }
48
73
  }
@@ -284,6 +284,8 @@
284
284
  "rename": "Renombrar",
285
285
  "reset": "Restablecer",
286
286
  "retry": "Reintentar",
287
+ "run": "Ejecutar",
288
+ "save": "Guardar",
287
289
  "send": "Enviar",
288
290
  "setting": "Configuración",
289
291
  "share": "Compartir",
@@ -0,0 +1,32 @@
1
+ {
2
+ "remoteServer": {
3
+ "authError": "Error de autorización: {{error}}",
4
+ "authPending": "Complete la autorización en el navegador",
5
+ "configDesc": "Conéctese al servidor LobeChat remoto para habilitar la sincronización de datos",
6
+ "configError": "Error de configuración",
7
+ "configTitle": "Configurar sincronización en la nube",
8
+ "connect": "Conectar y autorizar",
9
+ "connected": "Conectado",
10
+ "disconnect": "Desconectar",
11
+ "disconnectError": "Error al desconectar",
12
+ "disconnected": "Desconectado",
13
+ "fetchError": "Error al obtener la configuración",
14
+ "invalidUrl": "Por favor, introduzca una URL válida",
15
+ "serverUrl": "Dirección del servidor",
16
+ "statusConnected": "Conectado",
17
+ "statusDisconnected": "Desconectado",
18
+ "urlRequired": "Por favor, introduzca la dirección del servidor"
19
+ },
20
+ "updater": {
21
+ "downloadingUpdate": "Descargando actualización",
22
+ "downloadingUpdateDesc": "La actualización se está descargando, por favor espere...",
23
+ "later": "Actualizar más tarde",
24
+ "newVersionAvailable": "Nueva versión disponible",
25
+ "newVersionAvailableDesc": "Se ha encontrado una nueva versión {{version}}, ¿desea descargarla ahora?",
26
+ "restartAndInstall": "Reiniciar e instalar",
27
+ "updateError": "Error de actualización",
28
+ "updateReady": "Actualización lista",
29
+ "updateReadyDesc": "Lobe Chat {{version}} se ha descargado, reinicie la aplicación para completar la instalación.",
30
+ "upgradeNow": "Actualizar ahora"
31
+ }
32
+ }