@lobehub/chat 1.33.4 → 1.34.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (295) hide show
  1. package/.i18nrc.js +8 -2
  2. package/.releaserc.js +10 -1
  3. package/CHANGELOG.md +51 -16624
  4. package/changelog/CHANGELOG.v0.md +16621 -0
  5. package/changelog/v0.json +6064 -0
  6. package/changelog/v1.json +3356 -0
  7. package/docs/changelog/2024-11-25-november-providers.mdx +19 -0
  8. package/docs/changelog/2024-11-25-november-providers.zh-CN.mdx +17 -0
  9. package/docs/changelog/index.json +12 -0
  10. package/docs/changelog/schema.json +70 -0
  11. package/docs/self-hosting/advanced/auth/clerk.mdx +19 -23
  12. package/docs/self-hosting/advanced/auth/clerk.zh-CN.mdx +5 -4
  13. package/docs/self-hosting/advanced/auth/next-auth/authelia.mdx +2 -4
  14. package/docs/self-hosting/advanced/auth/next-auth/authelia.zh-CN.mdx +3 -5
  15. package/docs/self-hosting/advanced/auth/next-auth/authentik.zh-CN.mdx +2 -2
  16. package/docs/self-hosting/advanced/auth/next-auth/casdoor.mdx +49 -44
  17. package/docs/self-hosting/advanced/auth/next-auth/casdoor.zh-CN.mdx +42 -41
  18. package/docs/self-hosting/advanced/auth/next-auth/logto.mdx +29 -21
  19. package/docs/self-hosting/advanced/auth/next-auth/logto.zh-CN.mdx +2 -1
  20. package/docs/self-hosting/advanced/auth.mdx +10 -10
  21. package/docs/self-hosting/advanced/auth.zh-CN.mdx +10 -10
  22. package/docs/self-hosting/advanced/feature-flags.zh-CN.mdx +1 -1
  23. package/docs/self-hosting/advanced/model-list.mdx +1 -1
  24. package/docs/self-hosting/advanced/s3/cloudflare-r2.mdx +17 -12
  25. package/docs/self-hosting/advanced/s3/tencent-cloud.mdx +33 -20
  26. package/docs/self-hosting/advanced/s3.mdx +31 -28
  27. package/docs/self-hosting/advanced/s3.zh-CN.mdx +1 -0
  28. package/docs/self-hosting/advanced/webrtc.mdx +1 -0
  29. package/docs/self-hosting/environment-variables/s3.mdx +4 -3
  30. package/docs/self-hosting/environment-variables/s3.zh-CN.mdx +1 -0
  31. package/docs/self-hosting/environment-variables.mdx +6 -9
  32. package/docs/self-hosting/platform/alibaba-cloud.mdx +1 -2
  33. package/docs/self-hosting/platform/alibaba-cloud.zh-CN.mdx +1 -2
  34. package/docs/self-hosting/platform/btpanel.mdx +7 -13
  35. package/docs/self-hosting/platform/btpanel.zh-CN.mdx +8 -13
  36. package/docs/self-hosting/platform/docker.zh-CN.mdx +2 -1
  37. package/docs/self-hosting/platform/netlify.zh-CN.mdx +2 -1
  38. package/docs/self-hosting/server-database/docker.mdx +18 -5
  39. package/docs/self-hosting/server-database/docker.zh-CN.mdx +9 -6
  40. package/docs/self-hosting/server-database/netlify.mdx +0 -1
  41. package/docs/self-hosting/server-database/railway.mdx +0 -1
  42. package/docs/self-hosting/server-database/repocloud.mdx +5 -2
  43. package/docs/self-hosting/server-database/repocloud.zh-CN.mdx +23 -3
  44. package/docs/self-hosting/server-database/sealos.mdx +3 -0
  45. package/docs/self-hosting/server-database/vercel.mdx +35 -32
  46. package/docs/self-hosting/server-database/vercel.zh-CN.mdx +25 -25
  47. package/docs/self-hosting/server-database/zeabur.mdx +2 -2
  48. package/docs/self-hosting/server-database/zeabur.zh-CN.mdx +3 -4
  49. package/docs/self-hosting/server-database.mdx +23 -8
  50. package/docs/self-hosting/start.mdx +8 -2
  51. package/docs/usage/features/database.zh-CN.mdx +1 -1
  52. package/docs/usage/foundation/text2image.mdx +1 -2
  53. package/docs/usage/foundation/text2image.zh-CN.mdx +1 -3
  54. package/docs/usage/providers/ai21.mdx +14 -15
  55. package/docs/usage/providers/ai21.zh-CN.mdx +1 -3
  56. package/docs/usage/providers/ai360.mdx +14 -15
  57. package/docs/usage/providers/ai360.zh-CN.mdx +1 -3
  58. package/docs/usage/providers/cloudflare.mdx +1 -1
  59. package/docs/usage/providers/fireworksai.mdx +19 -19
  60. package/docs/usage/providers/fireworksai.zh-CN.mdx +1 -3
  61. package/docs/usage/providers/github.mdx +22 -21
  62. package/docs/usage/providers/github.zh-CN.mdx +6 -6
  63. package/docs/usage/providers/hunyuan.mdx +17 -18
  64. package/docs/usage/providers/hunyuan.zh-CN.mdx +1 -3
  65. package/docs/usage/providers/siliconcloud.mdx +14 -15
  66. package/docs/usage/providers/siliconcloud.zh-CN.mdx +3 -5
  67. package/docs/usage/providers/spark.mdx +17 -18
  68. package/docs/usage/providers/spark.zh-CN.mdx +1 -3
  69. package/docs/usage/providers/upstage.mdx +14 -15
  70. package/docs/usage/providers/upstage.zh-CN.mdx +1 -3
  71. package/docs/usage/providers/wenxin.mdx +17 -18
  72. package/docs/usage/providers/wenxin.zh-CN.mdx +1 -3
  73. package/docs/usage/providers/zeroone.mdx +2 -2
  74. package/locales/ar/chat.json +7 -0
  75. package/locales/ar/common.json +2 -0
  76. package/locales/ar/models.json +24 -0
  77. package/locales/ar/providers.json +3 -0
  78. package/locales/ar/setting.json +5 -0
  79. package/locales/ar/thread.json +5 -0
  80. package/locales/bg-BG/chat.json +7 -0
  81. package/locales/bg-BG/common.json +2 -0
  82. package/locales/bg-BG/models.json +24 -0
  83. package/locales/bg-BG/providers.json +3 -0
  84. package/locales/bg-BG/setting.json +5 -0
  85. package/locales/bg-BG/thread.json +5 -0
  86. package/locales/de-DE/chat.json +7 -0
  87. package/locales/de-DE/common.json +2 -0
  88. package/locales/de-DE/models.json +24 -0
  89. package/locales/de-DE/providers.json +3 -0
  90. package/locales/de-DE/setting.json +5 -0
  91. package/locales/de-DE/thread.json +5 -0
  92. package/locales/en-US/chat.json +7 -0
  93. package/locales/en-US/common.json +2 -0
  94. package/locales/en-US/models.json +24 -0
  95. package/locales/en-US/providers.json +3 -0
  96. package/locales/en-US/setting.json +5 -0
  97. package/locales/en-US/thread.json +5 -0
  98. package/locales/es-ES/chat.json +7 -0
  99. package/locales/es-ES/common.json +2 -0
  100. package/locales/es-ES/models.json +24 -0
  101. package/locales/es-ES/providers.json +3 -0
  102. package/locales/es-ES/setting.json +5 -0
  103. package/locales/es-ES/thread.json +5 -0
  104. package/locales/fa-IR/chat.json +7 -0
  105. package/locales/fa-IR/common.json +2 -0
  106. package/locales/fa-IR/models.json +24 -0
  107. package/locales/fa-IR/providers.json +3 -0
  108. package/locales/fa-IR/setting.json +5 -0
  109. package/locales/fa-IR/thread.json +5 -0
  110. package/locales/fr-FR/chat.json +7 -0
  111. package/locales/fr-FR/common.json +2 -0
  112. package/locales/fr-FR/models.json +24 -0
  113. package/locales/fr-FR/providers.json +3 -0
  114. package/locales/fr-FR/setting.json +5 -0
  115. package/locales/fr-FR/thread.json +5 -0
  116. package/locales/it-IT/chat.json +7 -0
  117. package/locales/it-IT/common.json +2 -0
  118. package/locales/it-IT/models.json +24 -0
  119. package/locales/it-IT/providers.json +3 -0
  120. package/locales/it-IT/setting.json +5 -0
  121. package/locales/it-IT/thread.json +5 -0
  122. package/locales/ja-JP/chat.json +7 -0
  123. package/locales/ja-JP/common.json +2 -0
  124. package/locales/ja-JP/models.json +24 -0
  125. package/locales/ja-JP/providers.json +3 -0
  126. package/locales/ja-JP/setting.json +5 -0
  127. package/locales/ja-JP/thread.json +5 -0
  128. package/locales/ko-KR/chat.json +7 -0
  129. package/locales/ko-KR/common.json +2 -0
  130. package/locales/ko-KR/models.json +24 -0
  131. package/locales/ko-KR/providers.json +3 -0
  132. package/locales/ko-KR/setting.json +5 -0
  133. package/locales/ko-KR/thread.json +5 -0
  134. package/locales/nl-NL/chat.json +7 -0
  135. package/locales/nl-NL/common.json +2 -0
  136. package/locales/nl-NL/models.json +24 -0
  137. package/locales/nl-NL/providers.json +3 -0
  138. package/locales/nl-NL/setting.json +5 -0
  139. package/locales/nl-NL/thread.json +5 -0
  140. package/locales/pl-PL/chat.json +7 -0
  141. package/locales/pl-PL/common.json +2 -0
  142. package/locales/pl-PL/models.json +24 -0
  143. package/locales/pl-PL/providers.json +3 -0
  144. package/locales/pl-PL/setting.json +5 -0
  145. package/locales/pl-PL/thread.json +5 -0
  146. package/locales/pt-BR/chat.json +7 -0
  147. package/locales/pt-BR/common.json +2 -0
  148. package/locales/pt-BR/models.json +24 -0
  149. package/locales/pt-BR/providers.json +3 -0
  150. package/locales/pt-BR/setting.json +5 -0
  151. package/locales/pt-BR/thread.json +5 -0
  152. package/locales/ru-RU/chat.json +7 -0
  153. package/locales/ru-RU/common.json +2 -0
  154. package/locales/ru-RU/models.json +24 -0
  155. package/locales/ru-RU/providers.json +3 -0
  156. package/locales/ru-RU/setting.json +5 -0
  157. package/locales/ru-RU/thread.json +5 -0
  158. package/locales/tr-TR/chat.json +7 -0
  159. package/locales/tr-TR/common.json +2 -0
  160. package/locales/tr-TR/models.json +24 -0
  161. package/locales/tr-TR/providers.json +3 -0
  162. package/locales/tr-TR/setting.json +5 -0
  163. package/locales/tr-TR/thread.json +5 -0
  164. package/locales/vi-VN/chat.json +7 -0
  165. package/locales/vi-VN/common.json +2 -0
  166. package/locales/vi-VN/models.json +24 -0
  167. package/locales/vi-VN/providers.json +3 -0
  168. package/locales/vi-VN/setting.json +5 -0
  169. package/locales/vi-VN/thread.json +5 -0
  170. package/locales/zh-CN/chat.json +7 -0
  171. package/locales/zh-CN/common.json +2 -0
  172. package/locales/zh-CN/models.json +24 -0
  173. package/locales/zh-CN/providers.json +3 -0
  174. package/locales/zh-CN/setting.json +5 -0
  175. package/locales/zh-CN/thread.json +5 -0
  176. package/locales/zh-TW/chat.json +7 -0
  177. package/locales/zh-TW/common.json +2 -0
  178. package/locales/zh-TW/models.json +24 -0
  179. package/locales/zh-TW/providers.json +3 -0
  180. package/locales/zh-TW/setting.json +5 -0
  181. package/locales/zh-TW/thread.json +5 -0
  182. package/package.json +6 -1
  183. package/scripts/changelogWorkflow/buildStaticChangelog.ts +135 -0
  184. package/scripts/changelogWorkflow/const.ts +11 -0
  185. package/scripts/changelogWorkflow/index.ts +10 -0
  186. package/src/app/(main)/chat/(workspace)/@conversation/default.tsx +2 -0
  187. package/src/app/(main)/chat/(workspace)/@conversation/features/ChatHydration/index.tsx +11 -2
  188. package/src/{features → app/(main)/chat/(workspace)/@conversation/features}/ChatInput/Desktop/Footer/index.tsx +7 -9
  189. package/src/app/(main)/chat/(workspace)/@conversation/features/ChatInput/Desktop/index.tsx +7 -2
  190. package/src/app/(main)/chat/(workspace)/@conversation/features/ChatList/ChatItem/Thread.tsx +62 -0
  191. package/src/app/(main)/chat/(workspace)/@conversation/features/ChatList/ChatItem/ThreadItem.tsx +68 -0
  192. package/src/app/(main)/chat/(workspace)/@conversation/features/ChatList/ChatItem/index.tsx +62 -2
  193. package/src/app/(main)/chat/(workspace)/@conversation/features/ThreadHydration.tsx +47 -0
  194. package/src/app/(main)/chat/(workspace)/@portal/_layout/Desktop.tsx +3 -2
  195. package/src/app/(main)/chat/(workspace)/@portal/_layout/Mobile.tsx +47 -6
  196. package/src/app/(main)/chat/(workspace)/@topic/features/SkeletonList.tsx +3 -2
  197. package/src/app/(main)/chat/(workspace)/@topic/features/TopicListContent/ByTimeMode/index.tsx +10 -3
  198. package/src/app/(main)/chat/(workspace)/@topic/features/TopicListContent/FlatMode/index.tsx +1 -1
  199. package/src/app/(main)/chat/(workspace)/@topic/features/TopicListContent/ThreadItem/Content.tsx +164 -0
  200. package/src/app/(main)/chat/(workspace)/@topic/features/TopicListContent/ThreadItem/index.tsx +98 -0
  201. package/src/app/(main)/chat/(workspace)/@topic/features/TopicListContent/{TopicItem.tsx → TopicItem/index.tsx} +33 -22
  202. package/src/app/(main)/chat/(workspace)/_layout/Desktop/Portal.tsx +12 -5
  203. package/src/app/(main)/chat/(workspace)/_layout/Mobile/index.tsx +1 -2
  204. package/src/const/message.ts +2 -0
  205. package/src/const/settings/systemAgent.ts +1 -0
  206. package/src/database/server/migrations/0012_add_thread.sql +39 -0
  207. package/src/database/server/migrations/meta/0012_snapshot.json +3671 -0
  208. package/src/database/server/migrations/meta/_journal.json +7 -0
  209. package/src/database/server/models/_template.ts +2 -2
  210. package/src/database/server/models/message.ts +1 -0
  211. package/src/database/server/models/thread.ts +79 -0
  212. package/src/database/server/schemas/lobechat/message.ts +2 -1
  213. package/src/database/server/schemas/lobechat/relations.ts +13 -1
  214. package/src/database/server/schemas/lobechat/topic.ts +30 -1
  215. package/src/database/server/utils/idGenerator.ts +1 -0
  216. package/src/features/ChatInput/ActionBar/Token/TokenTag.tsx +6 -4
  217. package/src/features/ChatInput/ActionBar/Token/index.tsx +24 -5
  218. package/src/features/ChatInput/ActionBar/config.ts +3 -2
  219. package/src/features/ChatInput/Desktop/index.tsx +15 -7
  220. package/src/features/ChatInput/Mobile/index.tsx +4 -4
  221. package/src/features/Conversation/Actions/Assistant.tsx +24 -5
  222. package/src/features/Conversation/Actions/User.tsx +21 -4
  223. package/src/features/Conversation/Actions/index.ts +1 -66
  224. package/src/features/Conversation/Messages/{Tool → Assistant/ToolCallItem}/Inspector/index.tsx +3 -1
  225. package/src/features/Conversation/Messages/{Tool/index.tsx → Assistant/ToolCallItem/Tool.tsx} +10 -11
  226. package/src/features/Conversation/Messages/Assistant/ToolCallItem/index.tsx +5 -3
  227. package/src/features/Conversation/Messages/Assistant/index.tsx +22 -14
  228. package/src/features/Conversation/Messages/index.ts +0 -2
  229. package/src/features/Conversation/components/AutoScroll.tsx +1 -1
  230. package/src/features/Conversation/components/ChatItem/ActionsBar.tsx +79 -5
  231. package/src/features/Conversation/components/ChatItem/InPortalThreadContext.ts +3 -0
  232. package/src/features/Conversation/components/ChatItem/index.tsx +16 -5
  233. package/src/features/Conversation/components/MarkdownElements/LobeArtifact/Render/index.tsx +9 -1
  234. package/src/features/Conversation/components/ThreadDivider/index.tsx +19 -0
  235. package/src/features/Conversation/hooks/useChatListActionsBar.tsx +19 -4
  236. package/src/features/Portal/Thread/Chat/ChatInput/Footer.tsx +90 -0
  237. package/src/features/Portal/Thread/Chat/ChatInput/TextArea.tsx +30 -0
  238. package/src/features/Portal/Thread/Chat/ChatInput/index.tsx +66 -0
  239. package/src/features/Portal/Thread/Chat/ChatInput/useSend.ts +50 -0
  240. package/src/features/Portal/Thread/Chat/ChatItem.tsx +62 -0
  241. package/src/features/Portal/Thread/Chat/ChatList.tsx +49 -0
  242. package/src/features/Portal/Thread/Chat/ThreadDivider/index.tsx +19 -0
  243. package/src/features/Portal/Thread/Chat/index.tsx +28 -0
  244. package/src/features/Portal/Thread/Header/Active.tsx +35 -0
  245. package/src/features/Portal/Thread/Header/New.tsx +37 -0
  246. package/src/features/Portal/Thread/Header/Title.tsx +18 -0
  247. package/src/features/Portal/Thread/Header/index.tsx +20 -0
  248. package/src/features/Portal/Thread/hook.ts +8 -0
  249. package/src/features/Portal/Thread/index.ts +12 -0
  250. package/src/features/Portal/router.tsx +2 -1
  251. package/src/hooks/useFetchTopics.ts +7 -1
  252. package/src/locales/default/chat.ts +8 -1
  253. package/src/locales/default/common.ts +3 -0
  254. package/src/locales/default/index.ts +2 -0
  255. package/src/locales/default/setting.ts +5 -0
  256. package/src/locales/default/thread.ts +5 -0
  257. package/src/server/routers/lambda/index.ts +2 -0
  258. package/src/server/routers/lambda/thread.ts +83 -0
  259. package/src/services/thread.ts +54 -0
  260. package/src/store/chat/initialState.ts +3 -0
  261. package/src/store/chat/selectors.ts +2 -1
  262. package/src/store/chat/slices/aiChat/actions/__tests__/generateAIChat.test.ts +1 -1
  263. package/src/store/chat/slices/aiChat/actions/__tests__/rag.test.ts +1 -1
  264. package/src/store/chat/slices/aiChat/actions/generateAIChat.ts +31 -8
  265. package/src/store/chat/slices/aiChat/actions/rag.ts +1 -1
  266. package/src/store/chat/slices/message/selectors.test.ts +3 -3
  267. package/src/store/chat/slices/message/selectors.ts +50 -29
  268. package/src/store/chat/slices/plugin/action.ts +26 -8
  269. package/src/store/chat/slices/portal/action.ts +1 -0
  270. package/src/store/chat/slices/portal/initialState.ts +1 -0
  271. package/src/store/chat/slices/portal/selectors/thread.ts +17 -0
  272. package/src/store/chat/slices/portal/selectors.ts +2 -0
  273. package/src/store/chat/slices/thread/action.ts +326 -0
  274. package/src/store/chat/slices/thread/initialState.ts +34 -0
  275. package/src/store/chat/slices/thread/reducer.ts +48 -0
  276. package/src/store/chat/slices/thread/selectors/index.ts +202 -0
  277. package/src/store/chat/slices/thread/selectors/util.ts +22 -0
  278. package/src/store/chat/slices/topic/action.ts +5 -1
  279. package/src/store/chat/store.ts +5 -2
  280. package/src/store/global/initialState.ts +4 -0
  281. package/src/store/global/selectors.ts +4 -0
  282. package/src/store/user/slices/settings/selectors/systemAgent.ts +2 -0
  283. package/src/types/message/index.ts +17 -1
  284. package/src/types/topic/index.ts +1 -0
  285. package/src/types/topic/thread.ts +42 -0
  286. package/src/types/user/settings/systemAgent.ts +1 -0
  287. package/src/app/(main)/chat/(workspace)/@portal/features/Header.tsx +0 -11
  288. package/src/app/(main)/chat/(workspace)/_layout/Mobile/PortalModal.tsx +0 -35
  289. /package/src/{features → app/(main)/chat/(workspace)/@conversation/features}/ChatInput/Desktop/Footer/SendMore.tsx +0 -0
  290. /package/src/{features → app/(main)/chat/(workspace)/@conversation/features}/ChatInput/Desktop/Footer/ShortcutHint.tsx +0 -0
  291. /package/src/app/(main)/chat/(workspace)/@topic/features/TopicListContent/{DefaultContent.tsx → TopicItem/DefaultContent.tsx} +0 -0
  292. /package/src/app/(main)/chat/(workspace)/@topic/features/TopicListContent/{TopicContent.tsx → TopicItem/TopicContent.tsx} +0 -0
  293. /package/src/features/Conversation/Messages/{Tool → Assistant/ToolCallItem}/Inspector/PluginResultJSON.tsx +0 -0
  294. /package/src/features/Conversation/Messages/{Tool → Assistant/ToolCallItem}/Inspector/Settings.tsx +0 -0
  295. /package/src/features/Conversation/Messages/{Tool → Assistant/ToolCallItem}/Inspector/style.ts +0 -0
@@ -7,13 +7,12 @@ tags:
7
7
  - API Key
8
8
  - Web UI
9
9
  ---
10
+
10
11
  # Using Upstage in LobeChat
11
12
 
12
13
  <Image
13
- cover
14
- src={
15
- 'https://github.com/user-attachments/assets/14696698-03f7-4856-b36c-9a53997eb12c'
16
- }
14
+ cover
15
+ src={'https://github.com/user-attachments/assets/14696698-03f7-4856-b36c-9a53997eb12c'}
17
16
  />
18
17
 
19
18
  [Upstage](https://www.upstage.ai/) is a platform that offers AI models and services, focusing on applications in natural language processing and machine learning. It allows developers to access its powerful AI capabilities through APIs, supporting various tasks such as text generation and conversational systems.
@@ -29,9 +28,9 @@ This article will guide you on how to use Upstage in LobeChat.
29
28
  - Copy and save the generated API key
30
29
 
31
30
  <Image
32
- alt={'Save the API Key'}
33
- inStep
34
- src={'https://github.com/user-attachments/assets/8a0225e0-16ed-40ce-9cd5-553dda561679'}
31
+ alt={'Save the API Key'}
32
+ inStep
33
+ src={'https://github.com/user-attachments/assets/8a0225e0-16ed-40ce-9cd5-553dda561679'}
35
34
  />
36
35
 
37
36
  ### Step 2: Configure Upstage in LobeChat
@@ -40,25 +39,25 @@ src={'https://github.com/user-attachments/assets/8a0225e0-16ed-40ce-9cd5-553dda5
40
39
  - Locate the `Upstage` settings under `Language Models`
41
40
 
42
41
  <Image
43
- alt={'Enter API Key'}
44
- inStep
45
- src={'https://github.com/user-attachments/assets/e89d2a56-4bf0-4bff-ac39-0d44789fa858'}
42
+ alt={'Enter API Key'}
43
+ inStep
44
+ src={'https://github.com/user-attachments/assets/e89d2a56-4bf0-4bff-ac39-0d44789fa858'}
46
45
  />
47
46
 
48
47
  - Enter the obtained API key
49
48
  - Select an Upstage model for your AI assistant to start the conversation
50
49
 
51
50
  <Image
52
- alt={'Select Upstage Model and Start Conversation'}
53
- inStep
54
- src={'https://github.com/user-attachments/assets/88e14294-20a6-47c6-981e-fb65453b57cd'}
51
+ alt={'Select Upstage Model and Start Conversation'}
52
+ inStep
53
+ src={'https://github.com/user-attachments/assets/88e14294-20a6-47c6-981e-fb65453b57cd'}
55
54
  />
56
55
 
57
56
  <Callout type={'warning'}>
58
- Please note that you may need to pay the API service provider for usage. Refer to Upstage's pricing policy for more information.
57
+ Please note that you may need to pay the API service provider for usage. Refer to Upstage's
58
+ pricing policy for more information.
59
59
  </Callout>
60
60
 
61
61
  </Steps>
62
62
 
63
63
  You can now use the models provided by Upstage for conversations in LobeChat.
64
-
@@ -12,9 +12,7 @@ tags:
12
12
 
13
13
  <Image
14
14
  cover
15
- src={
16
- 'https://github.com/user-attachments/assets/14696698-03f7-4856-b36c-9a53997eb12c'
17
- }
15
+ src={'https://github.com/user-attachments/assets/14696698-03f7-4856-b36c-9a53997eb12c'}
18
16
  />
19
17
 
20
18
  [Upstage](https://www.upstage.ai/) 是一个提供 AI 模型和服务的平台,专注于自然语言处理和机器学习应用。它允许开发者通过 API 接入其强大的 AI 功能,支持多种任务,如文本生成、对话系统等。
@@ -10,13 +10,12 @@ tags:
10
10
  - API密钥
11
11
  - Web UI
12
12
  ---
13
+
13
14
  # Using Wenxin Qianfan in LobeChat
14
15
 
15
16
  <Image
16
- cover
17
- src={
18
- 'https://github.com/user-attachments/assets/e43dacf6-313e-499c-8888-f1065c53e424'
19
- }
17
+ cover
18
+ src={'https://github.com/user-attachments/assets/e43dacf6-313e-499c-8888-f1065c53e424'}
20
19
  />
21
20
 
22
21
  [Wenxin Qianfan](https://qianfan.cloud.baidu.com/) is an artificial intelligence large language model platform launched by Baidu, supporting a variety of application scenarios, including literary creation, commercial copywriting, and mathematical logic reasoning. The platform features deep semantic understanding and generation capabilities across modalities and languages, and it is widely utilized in fields such as search Q&A, content creation, and smart office applications.
@@ -32,18 +31,18 @@ This article will guide you on how to use Wenxin Qianfan in LobeChat.
32
31
  - Create an application
33
32
 
34
33
  <Image
35
- alt={'Create Application'}
36
- inStep
37
- src={'https://github.com/user-attachments/assets/927b1040-e23f-4919-92e2-80a400db8327'}
34
+ alt={'Create Application'}
35
+ inStep
36
+ src={'https://github.com/user-attachments/assets/927b1040-e23f-4919-92e2-80a400db8327'}
38
37
  />
39
38
 
40
39
  - Enter the `Security Authentication` -> `Access Key` management page from the user account menu
41
40
  - Copy the `Access Key` and `Secret Key`, and store them safely
42
41
 
43
42
  <Image
44
- alt={'Save Keys'}
45
- inStep
46
- src={'https://github.com/user-attachments/assets/bb9dadd3-7e9c-45fd-8c56-553ab7287453'}
43
+ alt={'Save Keys'}
44
+ inStep
45
+ src={'https://github.com/user-attachments/assets/bb9dadd3-7e9c-45fd-8c56-553ab7287453'}
47
46
  />
48
47
 
49
48
  ### Step 2: Configure Wenxin Qianfan in LobeChat
@@ -52,25 +51,25 @@ src={'https://github.com/user-attachments/assets/bb9dadd3-7e9c-45fd-8c56-553ab72
52
51
  - Locate the settings for `Wenxin Qianfan` under `Language Model`
53
52
 
54
53
  <Image
55
- alt={'Enter API Keys'}
56
- inStep
57
- src={'https://github.com/user-attachments/assets/e3995de7-38d9-489b-80a2-434477018469'}
54
+ alt={'Enter API Keys'}
55
+ inStep
56
+ src={'https://github.com/user-attachments/assets/e3995de7-38d9-489b-80a2-434477018469'}
58
57
  />
59
58
 
60
59
  - Enter the obtained `Access Key` and `Secret Key`
61
60
  - Select a Wenxin Qianfan model for your AI assistant to start interacting
62
61
 
63
62
  <Image
64
- alt={'Select Wenxin Qianfan Model and Start Chat'}
65
- inStep
66
- src={'https://github.com/user-attachments/assets/b6e6a3eb-13c6-46f0-9c7c-69a20deae30f'}
63
+ alt={'Select Wenxin Qianfan Model and Start Chat'}
64
+ inStep
65
+ src={'https://github.com/user-attachments/assets/b6e6a3eb-13c6-46f0-9c7c-69a20deae30f'}
67
66
  />
68
67
 
69
68
  <Callout type={'warning'}>
70
- During usage, you may need to pay the API service provider. Please refer to Wenxin Qianfan's relevant fee policy.
69
+ During usage, you may need to pay the API service provider. Please refer to Wenxin Qianfan's
70
+ relevant fee policy.
71
71
  </Callout>
72
72
 
73
73
  </Steps>
74
74
 
75
75
  You can now use the models provided by Wenxin Qianfan for conversations in LobeChat.
76
-
@@ -13,9 +13,7 @@ tags:
13
13
 
14
14
  <Image
15
15
  cover
16
- src={
17
- 'https://github.com/user-attachments/assets/e43dacf6-313e-499c-8888-f1065c53e424'
18
- }
16
+ src={'https://github.com/user-attachments/assets/e43dacf6-313e-499c-8888-f1065c53e424'}
19
17
  />
20
18
 
21
19
  [文心千帆](https://qianfan.cloud.baidu.com/)是百度推出的一个人工智能大语言模型平台,支持多种应用场景,包括文学创作、商业文案生成、数理逻辑推算等。该平台具备跨模态、跨语言的深度语义理解与生成能力,广泛应用于搜索问答、内容创作和智能办公等领域。
@@ -75,8 +75,8 @@ This document will guide you on how to use 01 AI in LobeChat:
75
75
  />
76
76
 
77
77
  <Callout type={'warning'}>
78
- During usage, you may need to pay the API service provider. Please refer to 01 AI's relevant
79
- fee policies.
78
+ During usage, you may need to pay the API service provider. Please refer to 01 AI's relevant fee
79
+ policies.
80
80
  </Callout>
81
81
 
82
82
  </Steps>
@@ -8,6 +8,7 @@
8
8
  "agents": "مساعد",
9
9
  "artifact": {
10
10
  "generating": "جاري الإنشاء",
11
+ "inThread": "لا يمكن عرض الموضوعات الفرعية، يرجى التبديل إلى منطقة المحادثة الرئيسية لفتحها",
11
12
  "thinking": "جاري التفكير",
12
13
  "thought": "عملية التفكير",
13
14
  "unknownTitle": "عمل غير مسمى"
@@ -65,6 +66,7 @@
65
66
  },
66
67
  "messageAction": {
67
68
  "delAndRegenerate": "حذف وإعادة الإنشاء",
69
+ "deleteDisabledByThreads": "يوجد موضوعات فرعية، لا يمكن الحذف",
68
70
  "regenerate": "إعادة الإنشاء"
69
71
  },
70
72
  "newAgent": "مساعد جديد",
@@ -121,6 +123,11 @@
121
123
  "loading": "جارٍ التعرف...",
122
124
  "prettifying": "جارٍ التجميل..."
123
125
  },
126
+ "thread": {
127
+ "divider": "موضوع فرعي",
128
+ "threadMessageCount": "{{messageCount}} رسالة",
129
+ "title": "موضوع فرعي"
130
+ },
124
131
  "tokenDetails": {
125
132
  "chats": "رسائل المحادثة",
126
133
  "historySummary": "ملخص التاريخ",
@@ -16,6 +16,8 @@
16
16
  "back": "عودة",
17
17
  "batchDelete": "حذف دفعة",
18
18
  "blog": "مدونة المنتجات",
19
+ "branching": "إنشاء موضوع فرعي",
20
+ "branchingDisable": "ميزة \"الموضوع الفرعي\" متاحة فقط في إصدار الخادم. إذا كنت بحاجة إلى هذه الميزة، يرجى التبديل إلى وضع نشر الخادم أو استخدام LobeChat Cloud",
19
21
  "cancel": "إلغاء",
20
22
  "changelog": "سجل التغييرات",
21
23
  "close": "إغلاق",
@@ -176,6 +176,15 @@
176
176
  "Qwen/Qwen2.5-Math-72B-Instruct": {
177
177
  "description": "Qwen2.5-Math يركز على حل المشكلات في مجال الرياضيات، ويقدم إجابات احترافية للأسئلة الصعبة."
178
178
  },
179
+ "Qwen2-72B-Instruct": {
180
+ "description": "Qwen2 هو أحدث سلسلة من نموذج Qwen، ويدعم سياقًا يصل إلى 128 ألف، مقارنةً بأفضل النماذج مفتوحة المصدر الحالية، يتفوق Qwen2-72B بشكل ملحوظ في فهم اللغة الطبيعية والمعرفة والترميز والرياضيات والقدرات متعددة اللغات."
181
+ },
182
+ "Qwen2-7B-Instruct": {
183
+ "description": "Qwen2 هو أحدث سلسلة من نموذج Qwen، قادر على التفوق على النماذج مفتوحة المصدر ذات الحجم المماثل أو حتى النماذج الأكبر حجمًا، حقق Qwen2 7B مزايا ملحوظة في عدة تقييمات، خاصة في فهم الترميز والصينية."
184
+ },
185
+ "Qwen2.5-72B-Instruct": {
186
+ "description": "يدعم Qwen2.5-72B-Instruct سياقًا يصل إلى 16 ألف، وينتج نصوصًا طويلة تتجاوز 8 آلاف. يدعم استدعاء الوظائف والتفاعل السلس مع الأنظمة الخارجية، مما يعزز بشكل كبير من المرونة وقابلية التوسع. لقد زادت معرفة النموذج بشكل ملحوظ، كما تحسنت قدراته في الترميز والرياضيات بشكل كبير، ويدعم أكثر من 29 لغة."
187
+ },
179
188
  "SenseChat": {
180
189
  "description": "نموذج الإصدار الأساسي (V4)، بطول سياق 4K، يمتلك قدرات قوية وعامة."
181
190
  },
@@ -206,6 +215,9 @@
206
215
  "Tencent/Hunyuan-A52B-Instruct": {
207
216
  "description": "Hunyuan-Large هو أكبر نموذج MoE مفتوح المصدر في الصناعة، مع 389 مليار إجمالي عدد المعلمات و52 مليار عدد المعلمات النشطة."
208
217
  },
218
+ "Yi-34B-Chat": {
219
+ "description": "Yi-1.5-34B، مع الحفاظ على القدرات اللغوية العامة الممتازة للنموذج الأصلي، تم تدريبه بشكل إضافي على 500 مليار توكن عالي الجودة، مما أدى إلى تحسين كبير في المنطق الرياضي وقدرات الترميز."
220
+ },
209
221
  "abab5.5-chat": {
210
222
  "description": "موجه لمشاهد الإنتاجية، يدعم معالجة المهام المعقدة وتوليد النصوص بكفاءة، مناسب للتطبيقات في المجالات المهنية."
211
223
  },
@@ -368,6 +380,9 @@
368
380
  "codegeex-4": {
369
381
  "description": "CodeGeeX-4 هو مساعد برمجي قوي، يدعم مجموعة متنوعة من لغات البرمجة في الإجابة الذكية وإكمال الشيفرة، مما يعزز من كفاءة التطوير."
370
382
  },
383
+ "codegeex4-all-9b": {
384
+ "description": "CodeGeeX4-ALL-9B هو نموذج توليد كود متعدد اللغات، يدعم مجموعة شاملة من الوظائف بما في ذلك إكمال الشيفرات والتوليد، ومفسر الشيفرات، والبحث عبر الإنترنت، واستدعاء الوظائف، وأسئلة وأجوبة على مستوى المستودع، مما يغطي جميع سيناريوهات تطوير البرمجيات. إنه أحد أفضل نماذج توليد الشيفرات بأقل من 10 مليار معلمة."
385
+ },
371
386
  "codegemma": {
372
387
  "description": "CodeGemma هو نموذج لغوي خفيف الوزن مخصص لمهام البرمجة المختلفة، يدعم التكرار السريع والتكامل."
373
388
  },
@@ -422,6 +437,9 @@
422
437
  "deepseek-chat": {
423
438
  "description": "نموذج مفتوح المصدر الجديد الذي يجمع بين القدرات العامة وقدرات البرمجة، لا يحتفظ فقط بالقدرات الحوارية العامة لنموذج الدردشة الأصلي وقدرات معالجة الشيفرة القوية لنموذج Coder، بل يتماشى أيضًا بشكل أفضل مع تفضيلات البشر. بالإضافة إلى ذلك، حقق DeepSeek-V2.5 تحسينات كبيرة في مهام الكتابة، واتباع التعليمات، وغيرها من المجالات."
424
439
  },
440
+ "deepseek-coder-33B-instruct": {
441
+ "description": "DeepSeek Coder 33B هو نموذج لغة برمجية، تم تدريبه على 20 تريليون بيانات، منها 87% كود و13% لغات صينية وإنجليزية. يقدم النموذج حجم نافذة 16K ومهام ملء الفراغ، مما يوفر إكمال الشيفرات على مستوى المشروع ووظائف ملء المقاطع."
442
+ },
425
443
  "deepseek-coder-v2": {
426
444
  "description": "DeepSeek Coder V2 هو نموذج شيفرة مفتوح المصدر من نوع خبير مختلط، يقدم أداءً ممتازًا في مهام الشيفرة، ويضاهي GPT4-Turbo."
427
445
  },
@@ -476,6 +494,9 @@
476
494
  "gemini-exp-1114": {
477
495
  "description": "جيمني إكسب 1114 هو أحدث نموذج ذكاء اصطناعي متعدد الوسائط تجريبي من Google، يتميز بقدرة معالجة سريعة، ويدعم إدخالات النصوص والصور والفيديو، مما يجعله مناسبًا للتوسع الفعال في مهام متعددة."
478
496
  },
497
+ "gemini-exp-1121": {
498
+ "description": "جمني إكسب 1121 هو أحدث نموذج تجريبي متعدد الوسائط من جوجل، يتمتع بقدرة معالجة سريعة، ويدعم إدخال النصوص والصور والفيديو، مما يجعله مناسبًا للتوسع الفعال في مجموعة متنوعة من المهام."
499
+ },
479
500
  "gemma-7b-it": {
480
501
  "description": "Gemma 7B مناسب لمعالجة المهام المتوسطة والصغيرة، ويجمع بين الكفاءة من حيث التكلفة."
481
502
  },
@@ -503,6 +524,9 @@
503
524
  "glm-4-0520": {
504
525
  "description": "GLM-4-0520 هو أحدث إصدار من النموذج، مصمم للمهام المعقدة والمتنوعة، ويظهر أداءً ممتازًا."
505
526
  },
527
+ "glm-4-9b-chat": {
528
+ "description": "يظهر GLM-4-9B-Chat أداءً عاليًا في مجالات متعددة مثل الدلالات والرياضيات والاستدلال والترميز والمعرفة. كما أنه مزود بقدرات تصفح الويب وتنفيذ الشيفرات واستدعاء الأدوات المخصصة واستدلال النصوص الطويلة. يدعم 26 لغة بما في ذلك اليابانية والكورية والألمانية."
529
+ },
506
530
  "glm-4-air": {
507
531
  "description": "GLM-4-Air هو إصدار ذو قيمة عالية، يتمتع بأداء قريب من GLM-4، ويقدم سرعة عالية وسعرًا معقولًا."
508
532
  },
@@ -22,6 +22,9 @@
22
22
  "fireworksai": {
23
23
  "description": "Fireworks AI هي شركة رائدة في تقديم خدمات نماذج اللغة المتقدمة، تركز على استدعاء الوظائف والمعالجة متعددة الوسائط. نموذجها الأحدث Firefunction V2 مبني على Llama-3، مُحسّن لاستدعاء الوظائف، والحوار، واتباع التعليمات. يدعم نموذج اللغة البصرية FireLLaVA-13B إدخال الصور والنصوص المختلطة. تشمل النماذج البارزة الأخرى سلسلة Llama وسلسلة Mixtral، مما يوفر دعمًا فعالًا لاتباع التعليمات وتوليدها بلغات متعددة."
24
24
  },
25
+ "giteeai": {
26
+ "description": "خادم واجهات برمجة التطبيقات gitee منظمة العفو الدولية يوفر نموذج كبير المنطق API خدمة منظمة العفو الدولية للمطورين ."
27
+ },
25
28
  "github": {
26
29
  "description": "مع نماذج GitHub، يمكن للمطورين أن يصبحوا مهندسي ذكاء اصطناعي ويبنون باستخدام نماذج الذكاء الاصطناعي الرائدة في الصناعة."
27
30
  },
@@ -389,6 +389,11 @@
389
389
  "modelDesc": "نموذج مخصص لتحسين أسئلة المستخدمين",
390
390
  "title": "إعادة صياغة سؤال قاعدة المعرفة"
391
391
  },
392
+ "thread": {
393
+ "label": "نموذج تسمية الموضوعات الفرعية",
394
+ "modelDesc": "نموذج مخصص لإعادة تسمية الموضوعات الفرعية تلقائيًا",
395
+ "title": "تسمية الموضوعات الفرعية تلقائيًا"
396
+ },
392
397
  "title": "مساعد النظام",
393
398
  "topic": {
394
399
  "label": "نموذج تسمية الموضوع",
@@ -0,0 +1,5 @@
1
+ {
2
+ "actions": {
3
+ "confirmRemoveThread": "سيتم حذف هذا الموضوع الفرعي، ولن يمكن استعادته بعد الحذف، يرجى توخي الحذر."
4
+ }
5
+ }
@@ -8,6 +8,7 @@
8
8
  "agents": "Асистент",
9
9
  "artifact": {
10
10
  "generating": "Генериране",
11
+ "inThread": "Не можете да видите в подтемата, моля, превключете към основната дискусия.",
11
12
  "thinking": "В процес на мислене",
12
13
  "thought": "Процес на мислене",
13
14
  "unknownTitle": "Неназован артефакт"
@@ -65,6 +66,7 @@
65
66
  },
66
67
  "messageAction": {
67
68
  "delAndRegenerate": "Изтрий и прегенерирай",
69
+ "deleteDisabledByThreads": "Съществуват подтеми, не можете да изтриете.",
68
70
  "regenerate": "Прегенерирай"
69
71
  },
70
72
  "newAgent": "Нов агент",
@@ -121,6 +123,11 @@
121
123
  "loading": "Разпознаване...",
122
124
  "prettifying": "Изглаждане..."
123
125
  },
126
+ "thread": {
127
+ "divider": "Подтема",
128
+ "threadMessageCount": "{{messageCount}} съобщения",
129
+ "title": "Подтема"
130
+ },
124
131
  "tokenDetails": {
125
132
  "chats": "Чат съобщения",
126
133
  "historySummary": "Историческо резюме",
@@ -16,6 +16,8 @@
16
16
  "back": "Назад",
17
17
  "batchDelete": "Пакетно изтриване",
18
18
  "blog": "Продуктов блог",
19
+ "branching": "Създаване на подтема",
20
+ "branchingDisable": "Функцията „подтема“ е налична само в сървърната версия. Ако искате да използвате тази функция, моля, превключете на режим на сървърно разполагане или използвайте LobeChat Cloud.",
19
21
  "cancel": "Отказ",
20
22
  "changelog": "Дневник на промените",
21
23
  "close": "Затвори",
@@ -176,6 +176,15 @@
176
176
  "Qwen/Qwen2.5-Math-72B-Instruct": {
177
177
  "description": "Qwen2.5-Math се фокусира върху решаването на математически проблеми, предоставяйки професионални отговори на трудни задачи."
178
178
  },
179
+ "Qwen2-72B-Instruct": {
180
+ "description": "Qwen2 е най-новата серия на модела Qwen, поддържаща 128k контекст. В сравнение с текущите най-добри отворени модели, Qwen2-72B значително надминава водещите модели в области като разбиране на естествен език, знания, код, математика и многоезичност."
181
+ },
182
+ "Qwen2-7B-Instruct": {
183
+ "description": "Qwen2 е най-новата серия на модела Qwen, способен да надмине оптималните отворени модели с равен размер или дори по-големи модели. Qwen2 7B постига значителни предимства в множество тестове, особено в разбирането на код и китайския език."
184
+ },
185
+ "Qwen2.5-72B-Instruct": {
186
+ "description": "Qwen2.5-72B-Instruct поддържа 16k контекст, генерира дълги текстове над 8K. Поддържа функция за извикване и безпроблемна интеграция с външни системи, значително увеличаваща гъвкавостта и разширяемостта. Моделът има значително увеличени знания и значително подобрени способности в кодиране и математика, с поддръжка на над 29 езика."
187
+ },
179
188
  "SenseChat": {
180
189
  "description": "Основна версия на модела (V4), с контекстна дължина 4K, с мощни общи способности."
181
190
  },
@@ -206,6 +215,9 @@
206
215
  "Tencent/Hunyuan-A52B-Instruct": {
207
216
  "description": "Hunyuan-Large е най-голямата отворена трансформаторна архитектура MoE в индустрията, с общо 3890 милиарда параметри и 52 милиарда активни параметри."
208
217
  },
218
+ "Yi-34B-Chat": {
219
+ "description": "Yi-1.5-34B значително подобрява математическата логика и способностите в кодирането, като запазва отличните общи езикови способности на оригиналната серия модели, чрез инкрементално обучение с 500 милиарда висококачествени токени."
220
+ },
209
221
  "abab5.5-chat": {
210
222
  "description": "Насочена към производствени сценарии, поддържаща обработка на сложни задачи и ефективно генериране на текст, подходяща за професионални приложения."
211
223
  },
@@ -368,6 +380,9 @@
368
380
  "codegeex-4": {
369
381
  "description": "CodeGeeX-4 е мощен AI помощник за програмиране, който поддържа интелигентни въпроси и отговори и автоматично допълване на код за различни програмни езици, повишавайки ефективността на разработката."
370
382
  },
383
+ "codegeex4-all-9b": {
384
+ "description": "CodeGeeX4-ALL-9B е многоезичен модел за генериране на код, който предлага пълни функции, включително попълване и генериране на код, интерпретатор на код, уеб търсене, извикване на функции и въпроси и отговори на ниво хранилище, обхващащ различни сценарии на софтуерна разработка. Това е водещ модел за генериране на код с по-малко от 10B параметри."
385
+ },
371
386
  "codegemma": {
372
387
  "description": "CodeGemma е лек езиков модел, специализиран в различни програмни задачи, поддържащ бърза итерация и интеграция."
373
388
  },
@@ -422,6 +437,9 @@
422
437
  "deepseek-chat": {
423
438
  "description": "Новооткритият отворен модел, който съчетава общи и кодови способности, не само запазва общата диалогова способност на оригиналния Chat модел и мощната способност за обработка на код на Coder модела, но също така по-добре се съгласува с човешките предпочитания. Освен това, DeepSeek-V2.5 постигна значителни подобрения в писателските задачи, следването на инструкции и много други области."
424
439
  },
440
+ "deepseek-coder-33B-instruct": {
441
+ "description": "DeepSeek Coder 33B е модел за кодови езици, обучен на 20 трилиона данни, от които 87% са код и 13% са на китайски и английски. Моделът въвежда размер на прозореца от 16K и задачи за попълване, предоставяйки функции за попълване на код на проектно ниво и попълване на фрагменти."
442
+ },
425
443
  "deepseek-coder-v2": {
426
444
  "description": "DeepSeek Coder V2 е отворен хибриден експертен кодов модел, който се представя отлично в кодовите задачи, сравним с GPT4-Turbo."
427
445
  },
@@ -476,6 +494,9 @@
476
494
  "gemini-exp-1114": {
477
495
  "description": "Gemini Exp 1114 е най-новият експериментален многомодален AI модел на Google, който предлага бърза обработка и поддържа вход от текст, изображения и видео, подходящ за ефективно разширение на множество задачи."
478
496
  },
497
+ "gemini-exp-1121": {
498
+ "description": "Gemini Exp 1121 е най-новият експериментален мултимодален AI модел на Google, който предлага бърза обработка и поддържа текстови, изображенчески и видео входове, подходящ за ефективно разширяване на множество задачи."
499
+ },
479
500
  "gemma-7b-it": {
480
501
  "description": "Gemma 7B е подходяща за обработка на средни и малки задачи, съчетаваща икономичност."
481
502
  },
@@ -503,6 +524,9 @@
503
524
  "glm-4-0520": {
504
525
  "description": "GLM-4-0520 е най-новата версия на модела, проектирана за високо сложни и разнообразни задачи, с отлични резултати."
505
526
  },
527
+ "glm-4-9b-chat": {
528
+ "description": "GLM-4-9B-Chat показва висока производителност в множество области, включително семантика, математика, логическо разсъждение, код и знания. Също така предлага уеб браузинг, изпълнение на код, извикване на персонализирани инструменти и разсъждение върху дълги текстове. Поддържа 26 езика, включително японски, корейски и немски."
529
+ },
506
530
  "glm-4-air": {
507
531
  "description": "GLM-4-Air е икономичен вариант, с производителност близка до GLM-4, предлагаща бързина и достъпна цена."
508
532
  },
@@ -22,6 +22,9 @@
22
22
  "fireworksai": {
23
23
  "description": "Fireworks AI е водещ доставчик на напреднали езикови модели, фокусирайки се върху извикване на функции и мултимодална обработка. Най-новият им модел Firefunction V2, базиран на Llama-3, е оптимизиран за извикване на функции, диалози и следване на инструкции. Визуалният езиков модел FireLLaVA-13B поддържа смесени входове от изображения и текст. Други забележителни модели включват серията Llama и серията Mixtral, предлагащи ефективна поддръжка за многоезично следване на инструкции и генериране."
24
24
  },
25
+ "giteeai": {
26
+ "description": "Безсървърният API на Гити ИИ предоставя на разработчиците ИИ услугата за извеждане на голям модел."
27
+ },
25
28
  "github": {
26
29
  "description": "С моделите на GitHub разработчиците могат да станат AI инженери и да изграждат с водещите AI модели в индустрията."
27
30
  },
@@ -389,6 +389,11 @@
389
389
  "modelDesc": "Определя модел за оптимизиране на запитванията на потребителите",
390
390
  "title": "Пренаписване на въпроси от базата данни"
391
391
  },
392
+ "thread": {
393
+ "label": "Модел за именуване на подтеми",
394
+ "modelDesc": "Модел, предназначен за автоматично преименуване на подтеми",
395
+ "title": "Автоматично именуване на подтеми"
396
+ },
392
397
  "title": "Системен асистент",
393
398
  "topic": {
394
399
  "label": "Модел за именуване на теми",
@@ -0,0 +1,5 @@
1
+ {
2
+ "actions": {
3
+ "confirmRemoveThread": "Ще изтриете тази подтема. След изтриването ѝ няма да може да бъде възстановена, моля, бъдете внимателни."
4
+ }
5
+ }
@@ -8,6 +8,7 @@
8
8
  "agents": "Assistent",
9
9
  "artifact": {
10
10
  "generating": "Wird generiert",
11
+ "inThread": "In Unterthemen kann nicht angezeigt werden, bitte wechseln Sie zum Hauptdiskussionsbereich.",
11
12
  "thinking": "Denken",
12
13
  "thought": "Denkenprozess",
13
14
  "unknownTitle": "Unbenanntes Werk"
@@ -65,6 +66,7 @@
65
66
  },
66
67
  "messageAction": {
67
68
  "delAndRegenerate": "Löschen und neu generieren",
69
+ "deleteDisabledByThreads": "Es gibt Unterthemen, die Löschung ist nicht möglich.",
68
70
  "regenerate": "Neu generieren"
69
71
  },
70
72
  "newAgent": "Neuer Assistent",
@@ -121,6 +123,11 @@
121
123
  "loading": "Erkenne...",
122
124
  "prettifying": "Verschönern..."
123
125
  },
126
+ "thread": {
127
+ "divider": "Unterthema",
128
+ "threadMessageCount": "{{messageCount}} Nachrichten",
129
+ "title": "Unterthema"
130
+ },
124
131
  "tokenDetails": {
125
132
  "chats": "Chats",
126
133
  "historySummary": "Historische Zusammenfassung",
@@ -16,6 +16,8 @@
16
16
  "back": "Zurück",
17
17
  "batchDelete": "Massenlöschung",
18
18
  "blog": "Produkt-Blog",
19
+ "branching": "Unterthema erstellen",
20
+ "branchingDisable": "Die Funktion „Unterthema“ ist nur in der Serverversion verfügbar. Wenn Sie diese Funktion benötigen, wechseln Sie bitte in den Serverbereitstellungsmodus oder verwenden Sie LobeChat Cloud.",
19
21
  "cancel": "Abbrechen",
20
22
  "changelog": "Änderungsprotokoll",
21
23
  "close": "Schließen",
@@ -176,6 +176,15 @@
176
176
  "Qwen/Qwen2.5-Math-72B-Instruct": {
177
177
  "description": "Qwen2.5-Math konzentriert sich auf die Problemlösung im Bereich Mathematik und bietet professionelle Lösungen für schwierige Aufgaben."
178
178
  },
179
+ "Qwen2-72B-Instruct": {
180
+ "description": "Qwen2 ist die neueste Reihe des Qwen-Modells, das 128k Kontext unterstützt. Im Vergleich zu den derzeit besten Open-Source-Modellen übertrifft Qwen2-72B in den Bereichen natürliche Sprachverständnis, Wissen, Code, Mathematik und Mehrsprachigkeit deutlich die führenden Modelle."
181
+ },
182
+ "Qwen2-7B-Instruct": {
183
+ "description": "Qwen2 ist die neueste Reihe des Qwen-Modells, das in der Lage ist, die besten Open-Source-Modelle ähnlicher Größe oder sogar größerer Modelle zu übertreffen. Qwen2 7B hat in mehreren Bewertungen signifikante Vorteile erzielt, insbesondere im Bereich Code und Verständnis der chinesischen Sprache."
184
+ },
185
+ "Qwen2.5-72B-Instruct": {
186
+ "description": "Qwen2.5-72B-Instruct unterstützt 16k Kontext und generiert lange Texte über 8K. Es unterstützt Funktionsaufrufe und nahtlose Interaktionen mit externen Systemen, was die Flexibilität und Skalierbarkeit erheblich verbessert. Das Wissen des Modells hat deutlich zugenommen, und die Codierungs- und mathematischen Fähigkeiten wurden erheblich verbessert, mit Unterstützung für über 29 Sprachen."
187
+ },
179
188
  "SenseChat": {
180
189
  "description": "Basisversion des Modells (V4) mit 4K Kontextlänge, die über starke allgemeine Fähigkeiten verfügt."
181
190
  },
@@ -206,6 +215,9 @@
206
215
  "Tencent/Hunyuan-A52B-Instruct": {
207
216
  "description": "Hunyuan-Large ist das größte Open-Source-Transformer-Architektur MoE-Modell der Branche mit insgesamt 389 Milliarden Parametern und 52 Milliarden aktiven Parametern."
208
217
  },
218
+ "Yi-34B-Chat": {
219
+ "description": "Yi-1.5-34B hat die hervorragenden allgemeinen Sprachfähigkeiten des ursprünglichen Modells beibehalten und durch inkrementelles Training von 500 Milliarden hochwertigen Tokens die mathematische Logik und Codierungsfähigkeiten erheblich verbessert."
220
+ },
209
221
  "abab5.5-chat": {
210
222
  "description": "Für produktivitätsorientierte Szenarien konzipiert, unterstützt es die Verarbeitung komplexer Aufgaben und die effiziente Textgenerierung, geeignet für professionelle Anwendungen."
211
223
  },
@@ -368,6 +380,9 @@
368
380
  "codegeex-4": {
369
381
  "description": "CodeGeeX-4 ist ein leistungsstarker AI-Programmierassistent, der intelligente Fragen und Codevervollständigung in verschiedenen Programmiersprachen unterstützt und die Entwicklungseffizienz steigert."
370
382
  },
383
+ "codegeex4-all-9b": {
384
+ "description": "CodeGeeX4-ALL-9B ist ein mehrsprachiges Code-Generierungsmodell, das umfassende Funktionen unterstützt, darunter Code-Vervollständigung und -Generierung, Code-Interpreter, Websuche, Funktionsaufrufe und repository-weite Codefragen und -antworten, und deckt verschiedene Szenarien der Softwareentwicklung ab. Es ist das führende Code-Generierungsmodell mit weniger als 10B Parametern."
385
+ },
371
386
  "codegemma": {
372
387
  "description": "CodeGemma ist ein leichtgewichtiges Sprachmodell, das speziell für verschiedene Programmieraufgaben entwickelt wurde und schnelle Iterationen und Integrationen unterstützt."
373
388
  },
@@ -422,6 +437,9 @@
422
437
  "deepseek-chat": {
423
438
  "description": "Ein neues Open-Source-Modell, das allgemeine und Codefähigkeiten kombiniert. Es bewahrt nicht nur die allgemeinen Dialogfähigkeiten des ursprünglichen Chat-Modells und die leistungsstarken Codeverarbeitungsfähigkeiten des Coder-Modells, sondern stimmt auch besser mit menschlichen Präferenzen überein. Darüber hinaus hat DeepSeek-V2.5 in mehreren Bereichen wie Schreibaufgaben und Befolgung von Anweisungen erhebliche Verbesserungen erzielt."
424
439
  },
440
+ "deepseek-coder-33B-instruct": {
441
+ "description": "DeepSeek Coder 33B ist ein Code-Sprachmodell, das auf 20 Billionen Daten trainiert wurde, von denen 87 % Code und 13 % in Chinesisch und Englisch sind. Das Modell führt eine Fenstergröße von 16K und Aufgaben zur Lückenergänzung ein und bietet projektbezogene Code-Vervollständigung und Fragmentfüllfunktionen."
442
+ },
425
443
  "deepseek-coder-v2": {
426
444
  "description": "DeepSeek Coder V2 ist ein Open-Source-Mischexperten-Code-Modell, das in Codeaufgaben hervorragende Leistungen erbringt und mit GPT4-Turbo vergleichbar ist."
427
445
  },
@@ -476,6 +494,9 @@
476
494
  "gemini-exp-1114": {
477
495
  "description": "Gemini Exp 1114 ist Googles neuestes experimentelles multimodales KI-Modell, das über eine schnelle Verarbeitungskapazität verfügt und Texte, Bilder und Videoeingaben unterstützt, um eine effiziente Skalierung für verschiedene Aufgaben zu ermöglichen."
478
496
  },
497
+ "gemini-exp-1121": {
498
+ "description": "Gemini Exp 1121 ist Googles neuestes experimentelles multimodales KI-Modell, das über eine schnelle Verarbeitungskapazität verfügt und Texte, Bilder und Videoeingaben unterstützt, um eine effiziente Skalierung für verschiedene Aufgaben zu ermöglichen."
499
+ },
479
500
  "gemma-7b-it": {
480
501
  "description": "Gemma 7B eignet sich für die Verarbeitung von mittelgroßen Aufgaben und bietet ein gutes Kosten-Nutzen-Verhältnis."
481
502
  },
@@ -503,6 +524,9 @@
503
524
  "glm-4-0520": {
504
525
  "description": "GLM-4-0520 ist die neueste Modellversion, die für hochkomplexe und vielfältige Aufgaben konzipiert wurde und hervorragende Leistungen zeigt."
505
526
  },
527
+ "glm-4-9b-chat": {
528
+ "description": "GLM-4-9B-Chat zeigt in den Bereichen Semantik, Mathematik, Schlussfolgerungen, Code und Wissen eine hohe Leistung. Es verfügt auch über Funktionen wie Web-Browsing, Code-Ausführung, benutzerdefinierte Toolaufrufe und langes Textverständnis. Es unterstützt 26 Sprachen, darunter Japanisch, Koreanisch und Deutsch."
529
+ },
506
530
  "glm-4-air": {
507
531
  "description": "GLM-4-Air ist eine kosteneffiziente Version, die in der Leistung nahe am GLM-4 liegt und schnelle Geschwindigkeiten zu einem erschwinglichen Preis bietet."
508
532
  },
@@ -22,6 +22,9 @@
22
22
  "fireworksai": {
23
23
  "description": "Fireworks AI ist ein führender Anbieter von fortschrittlichen Sprachmodellen, der sich auf Funktionsaufrufe und multimodale Verarbeitung spezialisiert hat. Ihr neuestes Modell, Firefunction V2, basiert auf Llama-3 und ist für Funktionsaufrufe, Dialoge und Befehlsbefolgung optimiert. Das visuelle Sprachmodell FireLLaVA-13B unterstützt gemischte Eingaben von Bildern und Text. Weitere bemerkenswerte Modelle sind die Llama-Serie und die Mixtral-Serie, die effiziente mehrsprachige Befehlsbefolgung und Generierungsunterstützung bieten."
24
24
  },
25
+ "giteeai": {
26
+ "description": "Die serverlose API von Gitee AI bietet KI-Entwicklern einen sofort einsatzbereiten großen Modell-Inferenz-API-Service."
27
+ },
25
28
  "github": {
26
29
  "description": "Mit GitHub-Modellen können Entwickler zu KI-Ingenieuren werden und mit den führenden KI-Modellen der Branche arbeiten."
27
30
  },
@@ -389,6 +389,11 @@
389
389
  "modelDesc": "Modell zur Optimierung der Benutzeranfragen",
390
390
  "title": "Wiederformulierung von Fragen aus der Wissensdatenbank"
391
391
  },
392
+ "thread": {
393
+ "label": "Unterthema-Namensmodell",
394
+ "modelDesc": "Modell zur automatischen Umbenennung von Unterthemen",
395
+ "title": "Automatische Benennung von Unterthemen"
396
+ },
392
397
  "title": "Systemassistent",
393
398
  "topic": {
394
399
  "label": "Themenbenennungsmodell",
@@ -0,0 +1,5 @@
1
+ {
2
+ "actions": {
3
+ "confirmRemoveThread": "Sie sind dabei, dieses Unterthema zu löschen. Nach dem Löschen kann es nicht wiederhergestellt werden. Bitte seien Sie vorsichtig."
4
+ }
5
+ }
@@ -8,6 +8,7 @@
8
8
  "agents": "Assistants",
9
9
  "artifact": {
10
10
  "generating": "Generating",
11
+ "inThread": "Cannot view in subtopic, please switch to the main conversation area to open",
11
12
  "thinking": "Thinking",
12
13
  "thought": "Thought Process",
13
14
  "unknownTitle": "Untitled Work"
@@ -65,6 +66,7 @@
65
66
  },
66
67
  "messageAction": {
67
68
  "delAndRegenerate": "Delete and Regenerate",
69
+ "deleteDisabledByThreads": "There are subtopics, deletion is not allowed",
68
70
  "regenerate": "Regenerate"
69
71
  },
70
72
  "newAgent": "New Assistant",
@@ -121,6 +123,11 @@
121
123
  "loading": "Recognizing...",
122
124
  "prettifying": "Polishing..."
123
125
  },
126
+ "thread": {
127
+ "divider": "Subtopic",
128
+ "threadMessageCount": "{{messageCount}} messages",
129
+ "title": "Subtopic"
130
+ },
124
131
  "tokenDetails": {
125
132
  "chats": "Chat Messages",
126
133
  "historySummary": "History Summary",