@lobehub/chat 1.91.0 → 1.91.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (292) hide show
  1. package/CHANGELOG.md +50 -0
  2. package/changelog/v1.json +18 -0
  3. package/docs/changelog/2023-09-09-plugin-system.mdx +2 -3
  4. package/docs/changelog/2023-11-14-gpt4-vision.mdx +4 -6
  5. package/docs/changelog/2023-11-19-tts-stt.mdx +2 -3
  6. package/docs/changelog/2023-12-22-dalle-3.mdx +2 -5
  7. package/docs/changelog/2023-12-22-dalle-3.zh-CN.mdx +2 -2
  8. package/docs/changelog/2024-02-08-sso-oauth.mdx +2 -2
  9. package/docs/changelog/2024-06-19-lobe-chat-v1.mdx +2 -3
  10. package/docs/changelog/2024-06-19-lobe-chat-v1.zh-CN.mdx +2 -2
  11. package/docs/changelog/2024-07-19-gpt-4o-mini.mdx +2 -3
  12. package/docs/changelog/2024-07-19-gpt-4o-mini.zh-CN.mdx +2 -2
  13. package/docs/changelog/2024-08-02-lobe-chat-database-docker.mdx +2 -3
  14. package/docs/changelog/2024-08-21-file-upload-and-knowledge-base.mdx +4 -5
  15. package/docs/changelog/2024-09-13-openai-o1-models.mdx +2 -2
  16. package/docs/changelog/2024-09-20-artifacts.mdx +2 -3
  17. package/docs/changelog/2024-09-20-artifacts.zh-CN.mdx +2 -2
  18. package/docs/changelog/2024-10-27-pin-assistant.mdx +2 -3
  19. package/docs/changelog/2024-11-06-share-text-json.mdx +2 -4
  20. package/docs/changelog/2024-11-06-share-text-json.zh-CN.mdx +2 -2
  21. package/docs/changelog/2024-11-25-november-providers.mdx +2 -2
  22. package/docs/changelog/2024-11-27-forkable-chat.mdx +2 -2
  23. package/docs/changelog/2025-01-03-user-profile.mdx +2 -2
  24. package/docs/changelog/2025-01-22-new-ai-provider.mdx +2 -2
  25. package/docs/changelog/2025-02-02-deepseek-r1.mdx +4 -4
  26. package/docs/development/basic/add-new-authentication-providers.zh-CN.mdx +1 -2
  27. package/docs/development/basic/chat-api.mdx +2 -4
  28. package/docs/development/basic/chat-api.zh-CN.mdx +2 -4
  29. package/docs/development/internationalization/internationalization-implementation.mdx +10 -10
  30. package/docs/development/internationalization/internationalization-implementation.zh-CN.mdx +10 -10
  31. package/docs/self-hosting/advanced/analytics.mdx +2 -2
  32. package/docs/self-hosting/advanced/auth/clerk.mdx +2 -2
  33. package/docs/self-hosting/advanced/auth/next-auth/auth0.mdx +2 -3
  34. package/docs/self-hosting/advanced/auth/next-auth/authelia.mdx +2 -3
  35. package/docs/self-hosting/advanced/auth/next-auth/authentik.mdx +2 -3
  36. package/docs/self-hosting/advanced/auth/next-auth/casdoor.mdx +4 -7
  37. package/docs/self-hosting/advanced/auth/next-auth/casdoor.zh-CN.mdx +0 -3
  38. package/docs/self-hosting/advanced/auth/next-auth/cloudflare-zero-trust.mdx +2 -3
  39. package/docs/self-hosting/advanced/auth/next-auth/cloudflare-zero-trust.zh-CN.mdx +2 -2
  40. package/docs/self-hosting/advanced/auth/next-auth/github.mdx +2 -3
  41. package/docs/self-hosting/advanced/auth/next-auth/keycloak.mdx +12 -6
  42. package/docs/self-hosting/advanced/auth/next-auth/keycloak.zh-CN.mdx +5 -1
  43. package/docs/self-hosting/advanced/auth/next-auth/logto.mdx +8 -14
  44. package/docs/self-hosting/advanced/auth/next-auth/logto.zh-CN.mdx +6 -12
  45. package/docs/self-hosting/advanced/auth/next-auth/microsoft-entra-id.mdx +2 -3
  46. package/docs/self-hosting/advanced/auth/next-auth/wechat.mdx +2 -2
  47. package/docs/self-hosting/advanced/auth/next-auth/zitadel.mdx +2 -3
  48. package/docs/self-hosting/advanced/auth.mdx +2 -3
  49. package/docs/self-hosting/advanced/desktop.mdx +2 -1
  50. package/docs/self-hosting/advanced/desktop.zh-CN.mdx +1 -3
  51. package/docs/self-hosting/advanced/feature-flags.mdx +2 -3
  52. package/docs/self-hosting/advanced/knowledge-base.mdx +4 -3
  53. package/docs/self-hosting/advanced/model-list.mdx +11 -10
  54. package/docs/self-hosting/advanced/model-list.zh-CN.mdx +10 -9
  55. package/docs/self-hosting/advanced/observability/langfuse.mdx +2 -3
  56. package/docs/self-hosting/advanced/online-search.mdx +11 -10
  57. package/docs/self-hosting/advanced/online-search.zh-CN.mdx +7 -7
  58. package/docs/self-hosting/advanced/s3/tencent-cloud.mdx +2 -2
  59. package/docs/self-hosting/advanced/settings-url-share.mdx +2 -3
  60. package/docs/self-hosting/advanced/upstream-sync.mdx +2 -3
  61. package/docs/self-hosting/advanced/webrtc.mdx +2 -2
  62. package/docs/self-hosting/environment-variables/analytics.mdx +2 -3
  63. package/docs/self-hosting/environment-variables/auth.mdx +2 -3
  64. package/docs/self-hosting/environment-variables/basic.mdx +4 -5
  65. package/docs/self-hosting/environment-variables/basic.zh-CN.mdx +2 -2
  66. package/docs/self-hosting/environment-variables/model-provider.mdx +2 -3
  67. package/docs/self-hosting/environment-variables/s3.mdx +3 -5
  68. package/docs/self-hosting/environment-variables.mdx +2 -2
  69. package/docs/self-hosting/examples/azure-openai.mdx +2 -2
  70. package/docs/self-hosting/examples/ollama.mdx +2 -3
  71. package/docs/self-hosting/faq/no-v1-suffix.mdx +4 -4
  72. package/docs/self-hosting/faq/proxy-with-unable-to-verify-leaf-signature.mdx +2 -3
  73. package/docs/self-hosting/platform/alibaba-cloud.mdx +2 -3
  74. package/docs/self-hosting/platform/btpanel.mdx +5 -5
  75. package/docs/self-hosting/platform/btpanel.zh-CN.mdx +4 -3
  76. package/docs/self-hosting/platform/docker-compose.mdx +2 -3
  77. package/docs/self-hosting/platform/docker-compose.zh-CN.mdx +0 -2
  78. package/docs/self-hosting/platform/docker.mdx +2 -2
  79. package/docs/self-hosting/platform/netlify.mdx +2 -4
  80. package/docs/self-hosting/platform/netlify.zh-CN.mdx +2 -2
  81. package/docs/self-hosting/platform/railway.mdx +2 -3
  82. package/docs/self-hosting/platform/repocloud.mdx +2 -3
  83. package/docs/self-hosting/platform/sealos.mdx +2 -2
  84. package/docs/self-hosting/platform/tencentcloud-lighthouse.mdx +2 -3
  85. package/docs/self-hosting/platform/vercel.mdx +2 -3
  86. package/docs/self-hosting/platform/zeabur.mdx +2 -2
  87. package/docs/self-hosting/server-database/docker-compose.mdx +65 -44
  88. package/docs/self-hosting/server-database/docker-compose.zh-CN.mdx +48 -55
  89. package/docs/self-hosting/server-database/docker.mdx +2 -2
  90. package/docs/self-hosting/server-database/docker.zh-CN.mdx +2 -2
  91. package/docs/self-hosting/server-database/dokploy.mdx +4 -5
  92. package/docs/self-hosting/server-database/dokploy.zh-CN.mdx +137 -138
  93. package/docs/self-hosting/server-database/netlify.mdx +2 -2
  94. package/docs/self-hosting/server-database/netlify.zh-CN.mdx +2 -2
  95. package/docs/self-hosting/server-database/railway.mdx +2 -2
  96. package/docs/self-hosting/server-database/repocloud.mdx +2 -2
  97. package/docs/self-hosting/server-database/sealos.mdx +4 -5
  98. package/docs/self-hosting/server-database/sealos.zh-CN.mdx +18 -20
  99. package/docs/self-hosting/server-database/vercel.mdx +5 -3
  100. package/docs/self-hosting/server-database/vercel.zh-CN.mdx +2 -2
  101. package/docs/self-hosting/server-database/zeabur.mdx +2 -2
  102. package/docs/self-hosting/server-database.mdx +1 -1
  103. package/docs/self-hosting/server-database.zh-CN.mdx +2 -1
  104. package/docs/self-hosting/start.mdx +2 -2
  105. package/docs/self-hosting/start.zh-CN.mdx +2 -2
  106. package/docs/usage/agents/agent-organization.mdx +2 -2
  107. package/docs/usage/agents/concepts.mdx +4 -5
  108. package/docs/usage/agents/concepts.zh-CN.mdx +2 -2
  109. package/docs/usage/agents/custom-agent.mdx +3 -4
  110. package/docs/usage/agents/custom-agent.zh-CN.mdx +1 -1
  111. package/docs/usage/agents/model.mdx +5 -5
  112. package/docs/usage/agents/model.zh-CN.mdx +3 -5
  113. package/docs/usage/agents/prompt.mdx +4 -5
  114. package/docs/usage/agents/topics.mdx +3 -4
  115. package/docs/usage/agents/topics.zh-CN.mdx +1 -1
  116. package/docs/usage/features/agent-market.mdx +3 -11
  117. package/docs/usage/features/agent-market.zh-CN.mdx +2 -7
  118. package/docs/usage/features/artifacts.mdx +2 -2
  119. package/docs/usage/features/auth.mdx +2 -3
  120. package/docs/usage/features/cot.mdx +2 -2
  121. package/docs/usage/features/database.mdx +2 -2
  122. package/docs/usage/features/knowledge-base.mdx +4 -3
  123. package/docs/usage/features/knowledge-base.zh-CN.mdx +2 -1
  124. package/docs/usage/features/local-llm.mdx +2 -3
  125. package/docs/usage/features/mobile.mdx +2 -2
  126. package/docs/usage/features/more.mdx +2 -3
  127. package/docs/usage/features/multi-ai-providers.mdx +2 -3
  128. package/docs/usage/features/plugin-system.mdx +3 -11
  129. package/docs/usage/features/plugin-system.zh-CN.mdx +1 -8
  130. package/docs/usage/features/pwa.mdx +4 -4
  131. package/docs/usage/features/pwa.zh-CN.mdx +2 -1
  132. package/docs/usage/features/text-to-image.mdx +3 -11
  133. package/docs/usage/features/text-to-image.zh-CN.mdx +3 -10
  134. package/docs/usage/features/theme.mdx +2 -3
  135. package/docs/usage/features/tts.mdx +3 -11
  136. package/docs/usage/features/tts.zh-CN.mdx +1 -8
  137. package/docs/usage/features/vision.mdx +3 -11
  138. package/docs/usage/features/vision.zh-CN.mdx +1 -8
  139. package/docs/usage/foundation/basic.mdx +2 -3
  140. package/docs/usage/foundation/share.mdx +2 -3
  141. package/docs/usage/foundation/text2image.mdx +2 -2
  142. package/docs/usage/foundation/translate.mdx +2 -2
  143. package/docs/usage/foundation/tts-stt.mdx +2 -2
  144. package/docs/usage/foundation/vision.mdx +2 -3
  145. package/docs/usage/plugins/basic-usage.mdx +2 -3
  146. package/docs/usage/plugins/custom-plugin.mdx +2 -2
  147. package/docs/usage/plugins/development.mdx +2 -4
  148. package/docs/usage/plugins/store.mdx +2 -2
  149. package/docs/usage/providers/ai21.mdx +2 -2
  150. package/docs/usage/providers/anthropic.mdx +2 -3
  151. package/docs/usage/providers/anthropic.zh-CN.mdx +2 -2
  152. package/docs/usage/providers/azure.mdx +2 -3
  153. package/docs/usage/providers/azureai.mdx +4 -2
  154. package/docs/usage/providers/azureai.zh-CN.mdx +2 -1
  155. package/docs/usage/providers/baichuan.mdx +2 -3
  156. package/docs/usage/providers/bedrock.mdx +2 -3
  157. package/docs/usage/providers/cloudflare.mdx +3 -2
  158. package/docs/usage/providers/deepseek.mdx +2 -2
  159. package/docs/usage/providers/fireworksai.mdx +2 -2
  160. package/docs/usage/providers/giteeai.mdx +2 -2
  161. package/docs/usage/providers/github.mdx +1 -1
  162. package/docs/usage/providers/github.zh-CN.mdx +1 -1
  163. package/docs/usage/providers/google.mdx +2 -3
  164. package/docs/usage/providers/groq.mdx +2 -2
  165. package/docs/usage/providers/hunyuan.mdx +2 -2
  166. package/docs/usage/providers/infiniai.zh-CN.mdx +3 -1
  167. package/docs/usage/providers/internlm.mdx +2 -2
  168. package/docs/usage/providers/jina.mdx +4 -3
  169. package/docs/usage/providers/jina.zh-CN.mdx +2 -2
  170. package/docs/usage/providers/lmstudio.mdx +2 -2
  171. package/docs/usage/providers/lmstudio.zh-CN.mdx +2 -4
  172. package/docs/usage/providers/minimax.mdx +2 -3
  173. package/docs/usage/providers/minimax.zh-CN.mdx +2 -2
  174. package/docs/usage/providers/mistral.mdx +2 -3
  175. package/docs/usage/providers/modelscope.mdx +4 -0
  176. package/docs/usage/providers/modelscope.zh-CN.mdx +4 -0
  177. package/docs/usage/providers/moonshot.mdx +2 -3
  178. package/docs/usage/providers/novita.mdx +2 -3
  179. package/docs/usage/providers/novita.zh-CN.mdx +2 -2
  180. package/docs/usage/providers/nvidia.mdx +3 -2
  181. package/docs/usage/providers/ollama/gemma.mdx +2 -3
  182. package/docs/usage/providers/ollama/gemma.zh-CN.mdx +2 -2
  183. package/docs/usage/providers/ollama.mdx +2 -2
  184. package/docs/usage/providers/openai.mdx +5 -5
  185. package/docs/usage/providers/openai.zh-CN.mdx +3 -3
  186. package/docs/usage/providers/openrouter.mdx +2 -3
  187. package/docs/usage/providers/perplexity.mdx +2 -2
  188. package/docs/usage/providers/ppio.mdx +5 -6
  189. package/docs/usage/providers/ppio.zh-CN.mdx +6 -6
  190. package/docs/usage/providers/qiniu.mdx +6 -6
  191. package/docs/usage/providers/qiniu.zh-CN.mdx +2 -1
  192. package/docs/usage/providers/qwen.mdx +2 -4
  193. package/docs/usage/providers/sambanova.mdx +2 -1
  194. package/docs/usage/providers/sensenova.mdx +2 -2
  195. package/docs/usage/providers/siliconcloud.mdx +2 -2
  196. package/docs/usage/providers/stepfun.mdx +2 -3
  197. package/docs/usage/providers/taichu.mdx +2 -3
  198. package/docs/usage/providers/togetherai.mdx +2 -2
  199. package/docs/usage/providers/vllm.mdx +15 -12
  200. package/docs/usage/providers/vllm.zh-CN.mdx +9 -7
  201. package/docs/usage/providers/volcengine.mdx +16 -14
  202. package/docs/usage/providers/wenxin.mdx +2 -2
  203. package/docs/usage/providers/xai.mdx +2 -2
  204. package/docs/usage/providers/zeroone.mdx +2 -3
  205. package/docs/usage/providers/zeroone.zh-CN.mdx +2 -2
  206. package/docs/usage/providers/zhipu.mdx +2 -3
  207. package/docs/usage/providers/zhipu.zh-CN.mdx +1 -1
  208. package/docs/usage/providers.mdx +2 -3
  209. package/docs/usage/start.mdx +2 -3
  210. package/docs/usage/tools-calling/anthropic.mdx +2 -2
  211. package/docs/usage/tools-calling/anthropic.zh-CN.mdx +2 -2
  212. package/docs/usage/tools-calling/google.mdx +2 -2
  213. package/docs/usage/tools-calling/google.zh-CN.mdx +4 -4
  214. package/docs/usage/tools-calling/groq.zh-CN.mdx +2 -2
  215. package/docs/usage/tools-calling/openai.mdx +2 -2
  216. package/docs/usage/tools-calling/openai.zh-CN.mdx +2 -2
  217. package/package.json +2 -2
  218. package/src/app/(backend)/middleware/auth/utils.ts +2 -1
  219. package/src/app/(backend)/webapi/user/avatar/[id]/[image]/route.ts +1 -1
  220. package/src/app/[variants]/(main)/repos/[id]/_layout/Mobile.tsx +7 -7
  221. package/src/config/aiModels/deepseek.ts +1 -0
  222. package/src/config/aiModels/hunyuan.ts +1 -0
  223. package/src/config/aiModels/mistral.ts +1 -2
  224. package/src/config/aiModels/novita.ts +23 -22
  225. package/src/config/aiModels/openrouter.ts +1 -0
  226. package/src/config/aiModels/qwen.ts +11 -11
  227. package/src/config/aiModels/siliconcloud.ts +7 -6
  228. package/src/config/aiModels/vertexai.ts +2 -2
  229. package/src/config/aiModels/wenxin.ts +1 -2
  230. package/src/config/modelProviders/baichuan.ts +3 -0
  231. package/src/config/modelProviders/fireworksai.ts +3 -0
  232. package/src/config/modelProviders/giteeai.ts +3 -0
  233. package/src/config/modelProviders/github.ts +1 -2
  234. package/src/config/modelProviders/groq.ts +0 -3
  235. package/src/config/modelProviders/hunyuan.ts +3 -0
  236. package/src/config/modelProviders/infiniai.ts +0 -3
  237. package/src/config/modelProviders/internlm.ts +3 -0
  238. package/src/config/modelProviders/minimax.ts +3 -4
  239. package/src/config/modelProviders/moonshot.ts +0 -7
  240. package/src/config/modelProviders/novita.ts +3 -0
  241. package/src/config/modelProviders/openrouter.ts +0 -4
  242. package/src/config/modelProviders/perplexity.ts +0 -3
  243. package/src/config/modelProviders/qiniu.ts +0 -3
  244. package/src/config/modelProviders/qwen.ts +0 -3
  245. package/src/config/modelProviders/sensenova.ts +3 -0
  246. package/src/config/modelProviders/siliconcloud.ts +0 -3
  247. package/src/config/modelProviders/spark.ts +0 -5
  248. package/src/config/modelProviders/stepfun.ts +3 -6
  249. package/src/config/modelProviders/taichu.ts +3 -0
  250. package/src/config/modelProviders/tencentcloud.ts +3 -0
  251. package/src/config/modelProviders/togetherai.ts +3 -0
  252. package/src/config/modelProviders/upstage.ts +3 -0
  253. package/src/config/modelProviders/wenxin.ts +3 -4
  254. package/src/config/modelProviders/xai.ts +0 -3
  255. package/src/config/modelProviders/zhipu.ts +3 -0
  256. package/src/database/migrations/meta/0014_snapshot.json +182 -539
  257. package/src/database/migrations/meta/0016_snapshot.json +182 -539
  258. package/src/database/repositories/dataImporter/__tests__/fixtures/with-client-id.json +13 -58
  259. package/src/features/ChatInput/ActionBar/Model/ControlsForm.tsx +1 -8
  260. package/src/features/ChatInput/ActionBar/Model/index.tsx +8 -16
  261. package/src/features/ChatInput/ActionBar/Search/Controls.tsx +4 -12
  262. package/src/features/ChatInput/ActionBar/Search/FCSearchModel.tsx +1 -7
  263. package/src/features/ChatInput/ActionBar/Search/index.tsx +2 -4
  264. package/src/features/ModelSwitchPanel/index.tsx +1 -4
  265. package/src/libs/model-runtime/anthropic/index.test.ts +4 -2
  266. package/src/libs/model-runtime/google/index.ts +30 -40
  267. package/src/libs/model-runtime/novita/__snapshots__/index.test.ts.snap +19 -1
  268. package/src/libs/model-runtime/novita/index.ts +14 -15
  269. package/src/libs/model-runtime/nvidia/index.ts +2 -21
  270. package/src/libs/model-runtime/openai/__snapshots__/index.test.ts.snap +39 -11
  271. package/src/libs/model-runtime/openai/index.ts +3 -38
  272. package/src/libs/model-runtime/openrouter/__snapshots__/index.test.ts.snap +3 -0
  273. package/src/libs/model-runtime/openrouter/index.ts +45 -54
  274. package/src/libs/model-runtime/qwen/index.ts +2 -45
  275. package/src/libs/model-runtime/siliconcloud/index.ts +2 -51
  276. package/src/libs/model-runtime/utils/modelParse.test.ts +761 -0
  277. package/src/libs/model-runtime/utils/modelParse.ts +186 -0
  278. package/src/libs/model-runtime/utils/streams/anthropic.ts +12 -11
  279. package/src/libs/model-runtime/utils/streams/openai.ts +6 -4
  280. package/src/libs/model-runtime/utils/streams/protocol.ts +1 -1
  281. package/src/libs/model-runtime/utils/streams/spark.test.ts +1 -1
  282. package/src/libs/model-runtime/utils/streams/spark.ts +1 -2
  283. package/src/libs/model-runtime/volcengine/index.ts +11 -0
  284. package/src/libs/model-runtime/zeroone/index.ts +2 -23
  285. package/src/libs/model-runtime/zhipu/index.ts +7 -34
  286. package/src/middleware.ts +1 -1
  287. package/src/server/services/user/index.ts +3 -4
  288. package/src/services/__tests__/assistant.test.ts +4 -6
  289. package/src/services/__tests__/tool.test.ts +3 -1
  290. package/src/store/user/slices/auth/selectors.ts +1 -1
  291. package/src/store/user/slices/common/action.test.ts +1 -1
  292. package/src/tools/web-browsing/index.ts +1 -7
@@ -1,6 +1,5 @@
1
- import type { ChatModelCard } from '@/types/llm';
2
-
3
1
  import { ModelProvider } from '../types';
2
+ import { processMultiProviderModelList } from '../utils/modelParse';
4
3
  import { createOpenAICompatibleRuntime } from '../utils/openaiCompatibleFactory';
5
4
 
6
5
  export interface NvidiaModelCard {
@@ -13,28 +12,10 @@ export const LobeNvidiaAI = createOpenAICompatibleRuntime({
13
12
  chatCompletion: () => process.env.DEBUG_NVIDIA_CHAT_COMPLETION === '1',
14
13
  },
15
14
  models: async ({ client }) => {
16
- const { LOBE_DEFAULT_MODEL_LIST } = await import('@/config/aiModels');
17
-
18
15
  const modelsPage = (await client.models.list()) as any;
19
16
  const modelList: NvidiaModelCard[] = modelsPage.data;
20
17
 
21
- return modelList
22
- .map((model) => {
23
- const knownModel = LOBE_DEFAULT_MODEL_LIST.find(
24
- (m) => model.id.toLowerCase() === m.id.toLowerCase(),
25
- );
26
-
27
- return {
28
- contextWindowTokens: knownModel?.contextWindowTokens ?? undefined,
29
- displayName: knownModel?.displayName ?? undefined,
30
- enabled: knownModel?.enabled || false,
31
- functionCall: knownModel?.abilities?.functionCall || false,
32
- id: model.id,
33
- reasoning: knownModel?.abilities?.reasoning || false,
34
- vision: knownModel?.abilities?.vision || false,
35
- };
36
- })
37
- .filter(Boolean) as ChatModelCard[];
18
+ return processMultiProviderModelList(modelList);
38
19
  },
39
20
  provider: ModelProvider.Nvidia,
40
21
  });
@@ -8,15 +8,17 @@ exports[`LobeOpenAI > models > should get models 1`] = `
8
8
  "enabled": false,
9
9
  "functionCall": false,
10
10
  "id": "whisper-1",
11
+ "maxOutput": undefined,
11
12
  "reasoning": false,
12
13
  "vision": false,
13
14
  },
14
15
  {
15
16
  "contextWindowTokens": undefined,
16
- "displayName": undefined,
17
+ "displayName": "davinci-002",
17
18
  "enabled": false,
18
19
  "functionCall": false,
19
20
  "id": "davinci-002",
21
+ "maxOutput": undefined,
20
22
  "reasoning": false,
21
23
  "vision": false,
22
24
  },
@@ -26,6 +28,7 @@ exports[`LobeOpenAI > models > should get models 1`] = `
26
28
  "enabled": false,
27
29
  "functionCall": true,
28
30
  "id": "gpt-3.5-turbo",
31
+ "maxOutput": undefined,
29
32
  "reasoning": false,
30
33
  "vision": false,
31
34
  },
@@ -35,24 +38,27 @@ exports[`LobeOpenAI > models > should get models 1`] = `
35
38
  "enabled": false,
36
39
  "functionCall": false,
37
40
  "id": "dall-e-2",
41
+ "maxOutput": undefined,
38
42
  "reasoning": false,
39
43
  "vision": false,
40
44
  },
41
45
  {
42
46
  "contextWindowTokens": undefined,
43
- "displayName": undefined,
47
+ "displayName": "gpt-3.5-turbo-16k",
44
48
  "enabled": false,
45
49
  "functionCall": false,
46
50
  "id": "gpt-3.5-turbo-16k",
51
+ "maxOutput": undefined,
47
52
  "reasoning": false,
48
53
  "vision": false,
49
54
  },
50
55
  {
51
56
  "contextWindowTokens": undefined,
52
- "displayName": undefined,
57
+ "displayName": "tts-1-hd-1106",
53
58
  "enabled": false,
54
59
  "functionCall": false,
55
60
  "id": "tts-1-hd-1106",
61
+ "maxOutput": undefined,
56
62
  "reasoning": false,
57
63
  "vision": false,
58
64
  },
@@ -62,15 +68,17 @@ exports[`LobeOpenAI > models > should get models 1`] = `
62
68
  "enabled": false,
63
69
  "functionCall": false,
64
70
  "id": "tts-1-hd",
71
+ "maxOutput": undefined,
65
72
  "reasoning": false,
66
73
  "vision": false,
67
74
  },
68
75
  {
69
76
  "contextWindowTokens": undefined,
70
- "displayName": undefined,
77
+ "displayName": "gpt-3.5-turbo-16k-0613",
71
78
  "enabled": false,
72
79
  "functionCall": false,
73
80
  "id": "gpt-3.5-turbo-16k-0613",
81
+ "maxOutput": undefined,
74
82
  "reasoning": false,
75
83
  "vision": false,
76
84
  },
@@ -80,24 +88,27 @@ exports[`LobeOpenAI > models > should get models 1`] = `
80
88
  "enabled": false,
81
89
  "functionCall": false,
82
90
  "id": "text-embedding-3-large",
91
+ "maxOutput": undefined,
83
92
  "reasoning": false,
84
93
  "vision": false,
85
94
  },
86
95
  {
87
96
  "contextWindowTokens": undefined,
88
- "displayName": undefined,
97
+ "displayName": "gpt-4-1106-vision-preview",
89
98
  "enabled": false,
90
99
  "functionCall": false,
91
100
  "id": "gpt-4-1106-vision-preview",
101
+ "maxOutput": undefined,
92
102
  "reasoning": false,
93
103
  "vision": false,
94
104
  },
95
105
  {
96
106
  "contextWindowTokens": undefined,
97
- "displayName": undefined,
107
+ "displayName": "gpt-3.5-turbo-instruct-0914",
98
108
  "enabled": false,
99
109
  "functionCall": false,
100
110
  "id": "gpt-3.5-turbo-instruct-0914",
111
+ "maxOutput": undefined,
101
112
  "reasoning": false,
102
113
  "vision": false,
103
114
  },
@@ -107,6 +118,7 @@ exports[`LobeOpenAI > models > should get models 1`] = `
107
118
  "enabled": false,
108
119
  "functionCall": true,
109
120
  "id": "gpt-4-0125-preview",
121
+ "maxOutput": undefined,
110
122
  "reasoning": false,
111
123
  "vision": false,
112
124
  },
@@ -116,6 +128,7 @@ exports[`LobeOpenAI > models > should get models 1`] = `
116
128
  "enabled": false,
117
129
  "functionCall": true,
118
130
  "id": "gpt-4-turbo-preview",
131
+ "maxOutput": undefined,
119
132
  "reasoning": false,
120
133
  "vision": false,
121
134
  },
@@ -125,24 +138,27 @@ exports[`LobeOpenAI > models > should get models 1`] = `
125
138
  "enabled": false,
126
139
  "functionCall": false,
127
140
  "id": "gpt-3.5-turbo-instruct",
141
+ "maxOutput": undefined,
128
142
  "reasoning": false,
129
143
  "vision": false,
130
144
  },
131
145
  {
132
146
  "contextWindowTokens": undefined,
133
- "displayName": undefined,
147
+ "displayName": "gpt-3.5-turbo-0301",
134
148
  "enabled": false,
135
149
  "functionCall": false,
136
150
  "id": "gpt-3.5-turbo-0301",
151
+ "maxOutput": undefined,
137
152
  "reasoning": false,
138
153
  "vision": false,
139
154
  },
140
155
  {
141
156
  "contextWindowTokens": undefined,
142
- "displayName": undefined,
157
+ "displayName": "gpt-3.5-turbo-0613",
143
158
  "enabled": false,
144
159
  "functionCall": false,
145
160
  "id": "gpt-3.5-turbo-0613",
161
+ "maxOutput": undefined,
146
162
  "reasoning": false,
147
163
  "vision": false,
148
164
  },
@@ -152,6 +168,7 @@ exports[`LobeOpenAI > models > should get models 1`] = `
152
168
  "enabled": false,
153
169
  "functionCall": false,
154
170
  "id": "tts-1",
171
+ "maxOutput": undefined,
155
172
  "reasoning": false,
156
173
  "vision": false,
157
174
  },
@@ -161,6 +178,7 @@ exports[`LobeOpenAI > models > should get models 1`] = `
161
178
  "enabled": false,
162
179
  "functionCall": false,
163
180
  "id": "dall-e-3",
181
+ "maxOutput": undefined,
164
182
  "reasoning": false,
165
183
  "vision": false,
166
184
  },
@@ -170,6 +188,7 @@ exports[`LobeOpenAI > models > should get models 1`] = `
170
188
  "enabled": false,
171
189
  "functionCall": true,
172
190
  "id": "gpt-3.5-turbo-1106",
191
+ "maxOutput": undefined,
173
192
  "reasoning": false,
174
193
  "vision": false,
175
194
  },
@@ -179,24 +198,27 @@ exports[`LobeOpenAI > models > should get models 1`] = `
179
198
  "enabled": false,
180
199
  "functionCall": true,
181
200
  "id": "gpt-4-1106-preview",
201
+ "maxOutput": undefined,
182
202
  "reasoning": false,
183
203
  "vision": false,
184
204
  },
185
205
  {
186
206
  "contextWindowTokens": undefined,
187
- "displayName": undefined,
207
+ "displayName": "babbage-002",
188
208
  "enabled": false,
189
209
  "functionCall": false,
190
210
  "id": "babbage-002",
211
+ "maxOutput": undefined,
191
212
  "reasoning": false,
192
213
  "vision": false,
193
214
  },
194
215
  {
195
216
  "contextWindowTokens": undefined,
196
- "displayName": undefined,
217
+ "displayName": "tts-1-1106",
197
218
  "enabled": false,
198
219
  "functionCall": false,
199
220
  "id": "tts-1-1106",
221
+ "maxOutput": undefined,
200
222
  "reasoning": false,
201
223
  "vision": false,
202
224
  },
@@ -206,6 +228,7 @@ exports[`LobeOpenAI > models > should get models 1`] = `
206
228
  "enabled": false,
207
229
  "functionCall": false,
208
230
  "id": "gpt-4-vision-preview",
231
+ "maxOutput": undefined,
209
232
  "reasoning": false,
210
233
  "vision": true,
211
234
  },
@@ -215,6 +238,7 @@ exports[`LobeOpenAI > models > should get models 1`] = `
215
238
  "enabled": false,
216
239
  "functionCall": false,
217
240
  "id": "text-embedding-3-small",
241
+ "maxOutput": undefined,
218
242
  "reasoning": false,
219
243
  "vision": false,
220
244
  },
@@ -224,15 +248,17 @@ exports[`LobeOpenAI > models > should get models 1`] = `
224
248
  "enabled": false,
225
249
  "functionCall": true,
226
250
  "id": "gpt-4",
251
+ "maxOutput": 4096,
227
252
  "reasoning": false,
228
253
  "vision": true,
229
254
  },
230
255
  {
231
256
  "contextWindowTokens": undefined,
232
- "displayName": undefined,
257
+ "displayName": "text-embedding-ada-002",
233
258
  "enabled": false,
234
259
  "functionCall": false,
235
260
  "id": "text-embedding-ada-002",
261
+ "maxOutput": undefined,
236
262
  "reasoning": false,
237
263
  "vision": false,
238
264
  },
@@ -242,6 +268,7 @@ exports[`LobeOpenAI > models > should get models 1`] = `
242
268
  "enabled": false,
243
269
  "functionCall": true,
244
270
  "id": "gpt-3.5-turbo-0125",
271
+ "maxOutput": undefined,
245
272
  "reasoning": false,
246
273
  "vision": false,
247
274
  },
@@ -251,6 +278,7 @@ exports[`LobeOpenAI > models > should get models 1`] = `
251
278
  "enabled": false,
252
279
  "functionCall": true,
253
280
  "id": "gpt-4-0613",
281
+ "maxOutput": undefined,
254
282
  "reasoning": false,
255
283
  "vision": false,
256
284
  },
@@ -1,8 +1,7 @@
1
- import type { ChatModelCard } from '@/types/llm';
2
-
3
1
  import { ModelProvider } from '../types';
4
2
  import { createOpenAICompatibleRuntime } from '../utils/openaiCompatibleFactory';
5
3
  import { pruneReasoningPayload } from '../utils/openaiHelpers';
4
+ import { processMultiProviderModelList } from '../utils/modelParse';
6
5
 
7
6
  export interface OpenAIModelCard {
8
7
  id: string;
@@ -45,45 +44,11 @@ export const LobeOpenAI = createOpenAICompatibleRuntime({
45
44
  chatCompletion: () => process.env.DEBUG_OPENAI_CHAT_COMPLETION === '1',
46
45
  },
47
46
  models: async ({ client }) => {
48
- const { LOBE_DEFAULT_MODEL_LIST } = await import('@/config/aiModels');
49
-
50
- const functionCallKeywords = ['4o', '4.1', 'o3', 'o4'];
51
-
52
- const visionKeywords = ['4o', '4.1', 'o4'];
53
-
54
- const reasoningKeywords = ['o1', 'o3', 'o4'];
55
-
56
47
  const modelsPage = (await client.models.list()) as any;
57
48
  const modelList: OpenAIModelCard[] = modelsPage.data;
58
49
 
59
- return modelList
60
- .map((model) => {
61
- const knownModel = LOBE_DEFAULT_MODEL_LIST.find(
62
- (m) => model.id.toLowerCase() === m.id.toLowerCase(),
63
- );
64
-
65
- return {
66
- contextWindowTokens: knownModel?.contextWindowTokens ?? undefined,
67
- displayName: knownModel?.displayName ?? undefined,
68
- enabled: knownModel?.enabled || false,
69
- functionCall:
70
- (functionCallKeywords.some((keyword) => model.id.toLowerCase().includes(keyword)) &&
71
- !model.id.toLowerCase().includes('audio')) ||
72
- knownModel?.abilities?.functionCall ||
73
- false,
74
- id: model.id,
75
- reasoning:
76
- reasoningKeywords.some((keyword) => model.id.toLowerCase().includes(keyword)) ||
77
- knownModel?.abilities?.reasoning ||
78
- false,
79
- vision:
80
- (visionKeywords.some((keyword) => model.id.toLowerCase().includes(keyword)) &&
81
- !model.id.toLowerCase().includes('audio')) ||
82
- knownModel?.abilities?.vision ||
83
- false,
84
- };
85
- })
86
- .filter(Boolean) as ChatModelCard[];
50
+ // 自动检测模型提供商并选择相应配置
51
+ return processMultiProviderModelList(modelList);
87
52
  },
88
53
  provider: ModelProvider.OpenAI,
89
54
  });
@@ -13,6 +13,7 @@ _These are free, rate-limited endpoints for [Reflection 70B](/models/mattshumer/
13
13
  "enabled": false,
14
14
  "functionCall": true,
15
15
  "id": "mattshumer/reflection-70b:free",
16
+ "maxOutput": undefined,
16
17
  "maxTokens": 4096,
17
18
  "pricing": {
18
19
  "input": 0,
@@ -38,6 +39,7 @@ _These are free, rate-limited endpoints for [Reflection 70B](/models/mattshumer/
38
39
  "enabled": false,
39
40
  "functionCall": false,
40
41
  "id": "mattshumer/reflection-70b:free",
42
+ "maxOutput": undefined,
41
43
  "maxTokens": 4096,
42
44
  "pricing": {
43
45
  "input": 0,
@@ -63,6 +65,7 @@ _These are free, rate-limited endpoints for [Reflection 70B](/models/mattshumer/
63
65
  "enabled": false,
64
66
  "functionCall": false,
65
67
  "id": "mattshumer/reflection-70b:free",
68
+ "maxOutput": undefined,
66
69
  "maxTokens": 4096,
67
70
  "pricing": {
68
71
  "input": 0,
@@ -1,6 +1,7 @@
1
1
  import type { ChatModelCard } from '@/types/llm';
2
2
 
3
3
  import { ModelProvider } from '../types';
4
+ import { processMultiProviderModelList } from '../utils/modelParse';
4
5
  import { createOpenAICompatibleRuntime } from '../utils/openaiCompatibleFactory';
5
6
  import { OpenRouterModelCard, OpenRouterModelExtraInfo, OpenRouterReasoning } from './type';
6
7
 
@@ -40,20 +41,9 @@ export const LobeOpenRouterAI = createOpenAICompatibleRuntime({
40
41
  chatCompletion: () => process.env.DEBUG_OPENROUTER_CHAT_COMPLETION === '1',
41
42
  },
42
43
  models: async ({ client }) => {
43
- const { LOBE_DEFAULT_MODEL_LIST } = await import('@/config/aiModels');
44
-
45
- const reasoningKeywords = [
46
- 'deepseek/deepseek-r1',
47
- 'openai/o1',
48
- 'openai/o3',
49
- 'qwen/qvq',
50
- 'qwen/qwq',
51
- 'thinking',
52
- ];
53
-
54
44
  const modelsPage = (await client.models.list()) as any;
55
45
  const modelList: OpenRouterModelCard[] = modelsPage.data;
56
-
46
+
57
47
  const modelsExtraInfo: OpenRouterModelExtraInfo[] = [];
58
48
  try {
59
49
  const response = await fetch('https://openrouter.ai/api/frontend/models');
@@ -62,50 +52,51 @@ export const LobeOpenRouterAI = createOpenAICompatibleRuntime({
62
52
  modelsExtraInfo.push(...data['data']);
63
53
  }
64
54
  } catch (error) {
65
- // 忽略 fetch 错误,使用空的 modelsExtraInfo 数组继续处理
66
55
  console.error('Failed to fetch OpenRouter frontend models:', error);
67
56
  }
68
-
69
- return modelList
70
- .map((model) => {
71
- const knownModel = LOBE_DEFAULT_MODEL_LIST.find(
72
- (m) => model.id.toLowerCase() === m.id.toLowerCase(),
73
- );
74
- const extraInfo = modelsExtraInfo.find(
75
- (m) => m.slug.toLowerCase() === model.id.toLowerCase(),
76
- );
77
-
78
- return {
79
- contextWindowTokens: model.context_length,
80
- description: model.description,
81
- displayName: model.name,
82
- enabled: knownModel?.enabled || false,
83
- functionCall:
84
- model.description.includes('function calling') ||
85
- model.description.includes('tools') ||
86
- extraInfo?.endpoint?.supports_tool_parameters ||
87
- knownModel?.abilities?.functionCall ||
88
- false,
89
- id: model.id,
90
- maxTokens:
91
- typeof model.top_provider.max_completion_tokens === 'number'
92
- ? model.top_provider.max_completion_tokens
93
- : undefined,
94
- pricing: {
95
- input: formatPrice(model.pricing.prompt),
96
- output: formatPrice(model.pricing.completion),
97
- },
98
- reasoning:
99
- reasoningKeywords.some((keyword) => model.id.toLowerCase().includes(keyword)) ||
100
- extraInfo?.endpoint?.supports_reasoning ||
101
- knownModel?.abilities?.reasoning ||
102
- false,
103
- releasedAt: new Date(model.created * 1000).toISOString().split('T')[0],
104
- vision:
105
- model.architecture.modality.includes('image') || knownModel?.abilities?.vision || false,
106
- };
107
- })
108
- .filter(Boolean) as ChatModelCard[];
57
+
58
+ // 解析模型能力
59
+ const baseModels = await processMultiProviderModelList(modelList);
60
+
61
+ // 合并 OpenRouter 获取的模型信息
62
+ return baseModels.map((baseModel) => {
63
+ const model = modelList.find(m => m.id === baseModel.id);
64
+ const extraInfo = modelsExtraInfo.find(
65
+ (m) => m.slug.toLowerCase() === baseModel.id.toLowerCase(),
66
+ );
67
+
68
+ if (!model) return baseModel;
69
+
70
+ return {
71
+ ...baseModel,
72
+ contextWindowTokens: model.context_length,
73
+ description: model.description,
74
+ displayName: model.name,
75
+ functionCall:
76
+ baseModel.functionCall ||
77
+ model.description.includes('function calling') ||
78
+ model.description.includes('tools') ||
79
+ extraInfo?.endpoint?.supports_tool_parameters ||
80
+ false,
81
+ maxTokens:
82
+ typeof model.top_provider.max_completion_tokens === 'number'
83
+ ? model.top_provider.max_completion_tokens
84
+ : undefined,
85
+ pricing: {
86
+ input: formatPrice(model.pricing.prompt),
87
+ output: formatPrice(model.pricing.completion),
88
+ },
89
+ reasoning:
90
+ baseModel.reasoning ||
91
+ extraInfo?.endpoint?.supports_reasoning ||
92
+ false,
93
+ releasedAt: new Date(model.created * 1000).toISOString().split('T')[0],
94
+ vision:
95
+ baseModel.vision ||
96
+ model.architecture.modality.includes('image') ||
97
+ false,
98
+ };
99
+ }).filter(Boolean) as ChatModelCard[];
109
100
  },
110
101
  provider: ModelProvider.OpenRouter,
111
102
  });
@@ -1,6 +1,5 @@
1
- import type { ChatModelCard } from '@/types/llm';
2
-
3
1
  import { ModelProvider } from '../types';
2
+ import { processMultiProviderModelList } from '../utils/modelParse';
4
3
  import { createOpenAICompatibleRuntime } from '../utils/openaiCompatibleFactory';
5
4
  import { QwenAIStream } from '../utils/streams';
6
5
 
@@ -78,52 +77,10 @@ export const LobeQwenAI = createOpenAICompatibleRuntime({
78
77
  chatCompletion: () => process.env.DEBUG_QWEN_CHAT_COMPLETION === '1',
79
78
  },
80
79
  models: async ({ client }) => {
81
- const { LOBE_DEFAULT_MODEL_LIST } = await import('@/config/aiModels');
82
-
83
- const functionCallKeywords = [
84
- 'qwen-max',
85
- 'qwen-plus',
86
- 'qwen-turbo',
87
- 'qwen-long',
88
- 'qwen1.5',
89
- 'qwen2',
90
- 'qwen2.5',
91
- 'qwen3',
92
- ];
93
-
94
- const visionKeywords = ['qvq', 'vl'];
95
-
96
- const reasoningKeywords = ['qvq', 'qwq', 'deepseek-r1', 'qwen3'];
97
-
98
80
  const modelsPage = (await client.models.list()) as any;
99
81
  const modelList: QwenModelCard[] = modelsPage.data;
100
82
 
101
- return modelList
102
- .map((model) => {
103
- const knownModel = LOBE_DEFAULT_MODEL_LIST.find(
104
- (m) => model.id.toLowerCase() === m.id.toLowerCase(),
105
- );
106
-
107
- return {
108
- contextWindowTokens: knownModel?.contextWindowTokens ?? undefined,
109
- displayName: knownModel?.displayName ?? undefined,
110
- enabled: knownModel?.enabled || false,
111
- functionCall:
112
- functionCallKeywords.some((keyword) => model.id.toLowerCase().includes(keyword)) ||
113
- knownModel?.abilities?.functionCall ||
114
- false,
115
- id: model.id,
116
- reasoning:
117
- reasoningKeywords.some((keyword) => model.id.toLowerCase().includes(keyword)) ||
118
- knownModel?.abilities?.reasoning ||
119
- false,
120
- vision:
121
- visionKeywords.some((keyword) => model.id.toLowerCase().includes(keyword)) ||
122
- knownModel?.abilities?.vision ||
123
- false,
124
- };
125
- })
126
- .filter(Boolean) as ChatModelCard[];
83
+ return processMultiProviderModelList(modelList);
127
84
  },
128
85
  provider: ModelProvider.Qwen,
129
86
  });
@@ -1,7 +1,6 @@
1
- import type { ChatModelCard } from '@/types/llm';
2
-
3
1
  import { AgentRuntimeErrorType } from '../error';
4
2
  import { ChatCompletionErrorPayload, ModelProvider } from '../types';
3
+ import { processMultiProviderModelList } from '../utils/modelParse';
5
4
  import { createOpenAICompatibleRuntime } from '../utils/openaiCompatibleFactory';
6
5
 
7
6
  export interface SiliconCloudModelCard {
@@ -69,58 +68,10 @@ export const LobeSiliconCloudAI = createOpenAICompatibleRuntime({
69
68
  invalidAPIKey: AgentRuntimeErrorType.InvalidProviderAPIKey,
70
69
  },
71
70
  models: async ({ client }) => {
72
- const { LOBE_DEFAULT_MODEL_LIST } = await import('@/config/aiModels');
73
-
74
- const functionCallKeywords = [
75
- 'qwen/qwen3',
76
- 'qwen/qwen2.5',
77
- 'thudm/glm-4',
78
- 'deepseek-ai/deepseek',
79
- 'internlm/internlm2_5',
80
- 'meta-llama/meta-llama-3.1',
81
- 'meta-llama/meta-llama-3.3',
82
- ];
83
-
84
- const visionKeywords = [
85
- 'opengvlab/internvl',
86
- 'qwen/qvq',
87
- 'qwen/qwen2-vl',
88
- 'teleai/telemm',
89
- 'deepseek-ai/deepseek-vl',
90
- ];
91
-
92
- const reasoningKeywords = ['deepseek-ai/deepseek-r1', 'qwen/qvq', 'qwen/qwq', 'qwen/qwen3'];
93
-
94
71
  const modelsPage = (await client.models.list()) as any;
95
72
  const modelList: SiliconCloudModelCard[] = modelsPage.data;
96
73
 
97
- return modelList
98
- .map((model) => {
99
- const knownModel = LOBE_DEFAULT_MODEL_LIST.find(
100
- (m) => model.id.toLowerCase() === m.id.toLowerCase(),
101
- );
102
-
103
- return {
104
- contextWindowTokens: knownModel?.contextWindowTokens ?? undefined,
105
- displayName: knownModel?.displayName ?? undefined,
106
- enabled: knownModel?.enabled || false,
107
- functionCall:
108
- (functionCallKeywords.some((keyword) => model.id.toLowerCase().includes(keyword)) &&
109
- !model.id.toLowerCase().includes('deepseek-r1')) ||
110
- knownModel?.abilities?.functionCall ||
111
- false,
112
- id: model.id,
113
- reasoning:
114
- reasoningKeywords.some((keyword) => model.id.toLowerCase().includes(keyword)) ||
115
- knownModel?.abilities?.reasoning ||
116
- false,
117
- vision:
118
- visionKeywords.some((keyword) => model.id.toLowerCase().includes(keyword)) ||
119
- knownModel?.abilities?.vision ||
120
- false,
121
- };
122
- })
123
- .filter(Boolean) as ChatModelCard[];
74
+ return processMultiProviderModelList(modelList);
124
75
  },
125
76
  provider: ModelProvider.SiliconCloud,
126
77
  });