@lobehub/chat 1.90.2 → 1.90.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (91) hide show
  1. package/.cursor/rules/backend-architecture.mdc +12 -9
  2. package/.cursor/rules/cursor-ux-optimize.mdc +1 -1
  3. package/.cursor/rules/define-database-model.mdc +1 -1
  4. package/.cursor/rules/drizzle-schema-style-guide.mdc +1 -1
  5. package/.cursor/rules/i18n/i18n.mdc +1 -1
  6. package/.cursor/rules/project-introduce.mdc +2 -1
  7. package/.cursor/rules/system-role.mdc +42 -0
  8. package/.cursor/rules/zustand-action-patterns.mdc +318 -0
  9. package/.cursor/rules/zustand-slice-organization.mdc +300 -0
  10. package/CHANGELOG.md +58 -0
  11. package/README.md +2 -2
  12. package/README.zh-CN.md +2 -2
  13. package/changelog/v1.json +21 -0
  14. package/docs/self-hosting/advanced/model-list.mdx +1 -1
  15. package/docs/self-hosting/advanced/model-list.zh-CN.mdx +1 -1
  16. package/docs/self-hosting/environment-variables/model-provider.mdx +2 -2
  17. package/docs/self-hosting/environment-variables/model-provider.zh-CN.mdx +2 -2
  18. package/package.json +44 -44
  19. package/src/config/aiModels/qwen.ts +64 -42
  20. package/src/config/modelProviders/qwen.ts +2 -5
  21. package/src/config/modelProviders/xai.ts +1 -1
  22. package/src/features/PluginsUI/Render/utils/iframeOnReady.test.ts +1 -1
  23. package/src/features/PluginsUI/Render/utils/pluginSettings.test.ts +1 -1
  24. package/src/features/PluginsUI/Render/utils/pluginState.test.ts +1 -1
  25. package/src/libs/model-runtime/BaseAI.ts +3 -3
  26. package/src/libs/model-runtime/ModelRuntime.ts +2 -2
  27. package/src/libs/model-runtime/UniformRuntime/index.ts +2 -2
  28. package/src/libs/model-runtime/ai21/index.ts +2 -2
  29. package/src/libs/model-runtime/ai360/index.ts +2 -2
  30. package/src/libs/model-runtime/anthropic/index.ts +15 -11
  31. package/src/libs/model-runtime/azureOpenai/index.ts +2 -2
  32. package/src/libs/model-runtime/azureai/index.ts +4 -4
  33. package/src/libs/model-runtime/baichuan/index.ts +2 -2
  34. package/src/libs/model-runtime/bedrock/index.ts +4 -4
  35. package/src/libs/model-runtime/cloudflare/index.ts +2 -2
  36. package/src/libs/model-runtime/cohere/index.ts +2 -2
  37. package/src/libs/model-runtime/deepseek/index.ts +2 -2
  38. package/src/libs/model-runtime/fireworksai/index.ts +2 -2
  39. package/src/libs/model-runtime/giteeai/index.ts +2 -2
  40. package/src/libs/model-runtime/github/index.ts +2 -2
  41. package/src/libs/model-runtime/google/index.ts +7 -5
  42. package/src/libs/model-runtime/groq/index.ts +2 -2
  43. package/src/libs/model-runtime/higress/index.ts +2 -2
  44. package/src/libs/model-runtime/huggingface/index.ts +2 -2
  45. package/src/libs/model-runtime/hunyuan/index.ts +2 -2
  46. package/src/libs/model-runtime/index.ts +1 -1
  47. package/src/libs/model-runtime/infiniai/index.ts +2 -2
  48. package/src/libs/model-runtime/internlm/index.ts +7 -9
  49. package/src/libs/model-runtime/jina/index.ts +2 -2
  50. package/src/libs/model-runtime/lmstudio/index.ts +2 -2
  51. package/src/libs/model-runtime/minimax/index.ts +2 -2
  52. package/src/libs/model-runtime/mistral/index.ts +2 -2
  53. package/src/libs/model-runtime/modelscope/index.ts +2 -3
  54. package/src/libs/model-runtime/moonshot/index.ts +2 -2
  55. package/src/libs/model-runtime/novita/index.ts +2 -2
  56. package/src/libs/model-runtime/nvidia/index.ts +2 -2
  57. package/src/libs/model-runtime/ollama/index.ts +2 -2
  58. package/src/libs/model-runtime/openai/index.ts +3 -3
  59. package/src/libs/model-runtime/openrouter/index.ts +2 -2
  60. package/src/libs/model-runtime/perplexity/index.ts +2 -2
  61. package/src/libs/model-runtime/ppio/index.ts +2 -2
  62. package/src/libs/model-runtime/qiniu/index.ts +2 -2
  63. package/src/libs/model-runtime/qwen/index.ts +2 -2
  64. package/src/libs/model-runtime/sambanova/index.ts +2 -2
  65. package/src/libs/model-runtime/search1api/index.ts +2 -2
  66. package/src/libs/model-runtime/sensenova/index.ts +2 -2
  67. package/src/libs/model-runtime/siliconcloud/index.ts +2 -2
  68. package/src/libs/model-runtime/spark/index.ts +15 -13
  69. package/src/libs/model-runtime/stepfun/index.ts +2 -2
  70. package/src/libs/model-runtime/taichu/index.ts +2 -2
  71. package/src/libs/model-runtime/tencentcloud/index.ts +2 -2
  72. package/src/libs/model-runtime/togetherai/index.ts +2 -2
  73. package/src/libs/model-runtime/types/chat.ts +1 -1
  74. package/src/libs/model-runtime/upstage/index.ts +2 -2
  75. package/src/libs/model-runtime/utils/openaiCompatibleFactory/index.test.ts +7 -7
  76. package/src/libs/model-runtime/utils/openaiCompatibleFactory/index.ts +3 -3
  77. package/src/libs/model-runtime/vllm/index.ts +2 -2
  78. package/src/libs/model-runtime/volcengine/index.ts +2 -2
  79. package/src/libs/model-runtime/wenxin/index.ts +2 -2
  80. package/src/libs/model-runtime/xai/index.ts +6 -3
  81. package/src/libs/model-runtime/xinference/index.ts +2 -2
  82. package/src/libs/model-runtime/zeroone/index.ts +2 -2
  83. package/src/libs/model-runtime/zhipu/index.ts +2 -2
  84. package/src/middleware.ts +3 -1
  85. package/src/server/globalConfig/index.ts +3 -0
  86. package/src/server/routers/tools/search.test.ts +2 -4
  87. package/src/services/chat.ts +1 -0
  88. package/src/store/chat/slices/aiChat/actions/__tests__/generateAIChat.test.ts +4 -2
  89. package/src/store/chat/slices/message/action.test.ts +2 -1
  90. package/src/store/chat/slices/topic/action.test.ts +3 -2
  91. package/src/types/aiProvider.ts +1 -0
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@lobehub/chat",
3
- "version": "1.90.2",
3
+ "version": "1.90.4",
4
4
  "description": "Lobe Chat - an open-source, high-performance chatbot framework that supports speech synthesis, multimodal, and extensible Function Call plugin system. Supports one-click free deployment of your private ChatGPT/LLM web application.",
5
5
  "keywords": [
6
6
  "framework",
@@ -123,15 +123,15 @@
123
123
  "@ant-design/pro-components": "^2.8.7",
124
124
  "@anthropic-ai/sdk": "^0.51.0",
125
125
  "@auth/core": "^0.38.0",
126
- "@aws-sdk/client-bedrock-runtime": "^3.812.0",
127
- "@aws-sdk/client-s3": "^3.812.0",
128
- "@aws-sdk/s3-request-presigner": "^3.812.0",
126
+ "@aws-sdk/client-bedrock-runtime": "^3.821.0",
127
+ "@aws-sdk/client-s3": "^3.821.0",
128
+ "@aws-sdk/s3-request-presigner": "^3.821.0",
129
129
  "@azure-rest/ai-inference": "1.0.0-beta.5",
130
130
  "@azure/core-auth": "^1.9.0",
131
131
  "@cfworker/json-schema": "^4.1.1",
132
- "@clerk/localizations": "^3.16.0",
133
- "@clerk/nextjs": "^6.19.4",
134
- "@clerk/themes": "^2.2.45",
132
+ "@clerk/localizations": "^3.16.3",
133
+ "@clerk/nextjs": "^6.20.2",
134
+ "@clerk/themes": "^2.2.48",
135
135
  "@codesandbox/sandpack-react": "^2.20.0",
136
136
  "@cyntler/react-doc-viewer": "^1.17.0",
137
137
  "@electric-sql/pglite": "0.2.17",
@@ -140,7 +140,7 @@
140
140
  "@huggingface/inference": "^2.8.1",
141
141
  "@icons-pack/react-simple-icons": "9.6.0",
142
142
  "@khmyznikov/pwa-install": "0.3.9",
143
- "@langchain/community": "^0.3.43",
143
+ "@langchain/community": "^0.3.45",
144
144
  "@lobechat/electron-client-ipc": "workspace:*",
145
145
  "@lobechat/electron-server-ipc": "workspace:*",
146
146
  "@lobechat/file-loaders": "workspace:*",
@@ -148,21 +148,21 @@
148
148
  "@lobehub/charts": "^2.0.0",
149
149
  "@lobehub/chat-plugin-sdk": "^1.32.4",
150
150
  "@lobehub/chat-plugins-gateway": "^1.9.0",
151
- "@lobehub/icons": "^2.0.0",
151
+ "@lobehub/icons": "^2.2.0",
152
152
  "@lobehub/tts": "^2.0.1",
153
- "@lobehub/ui": "^2.1.7",
154
- "@modelcontextprotocol/sdk": "^1.11.4",
153
+ "@lobehub/ui": "^2.1.15",
154
+ "@modelcontextprotocol/sdk": "^1.12.1",
155
155
  "@neondatabase/serverless": "^1.0.0",
156
- "@next/third-parties": "^15.3.2",
156
+ "@next/third-parties": "^15.3.3",
157
157
  "@react-spring/web": "^9.7.5",
158
158
  "@sentry/nextjs": "^7.120.3",
159
159
  "@serwist/next": "^9.0.14",
160
160
  "@t3-oss/env-nextjs": "^0.12.0",
161
- "@tanstack/react-query": "^5.76.1",
162
- "@trpc/client": "^11.1.2",
163
- "@trpc/next": "^11.1.2",
164
- "@trpc/react-query": "^11.1.2",
165
- "@trpc/server": "^11.1.2",
161
+ "@tanstack/react-query": "^5.79.0",
162
+ "@trpc/client": "^11.2.0",
163
+ "@trpc/next": "^11.1.4",
164
+ "@trpc/react-query": "^11.2.0",
165
+ "@trpc/server": "^11.2.0",
166
166
  "@vercel/analytics": "^1.5.0",
167
167
  "@vercel/edge-config": "^1.4.0",
168
168
  "@vercel/functions": "^2.1.0",
@@ -170,7 +170,7 @@
170
170
  "@xterm/xterm": "^5.5.0",
171
171
  "ahooks": "^3.8.5",
172
172
  "ai": "^3.4.33",
173
- "antd": "^5.25.2",
173
+ "antd": "^5.25.4",
174
174
  "antd-style": "^3.7.1",
175
175
  "brotli-wasm": "^3.0.1",
176
176
  "chroma-js": "^3.1.2",
@@ -184,7 +184,7 @@
184
184
  "epub2": "^3.0.2",
185
185
  "fast-deep-equal": "^3.1.3",
186
186
  "file-type": "^20.5.0",
187
- "framer-motion": "^12.12.1",
187
+ "framer-motion": "^12.15.0",
188
188
  "gpt-tokenizer": "^2.9.0",
189
189
  "html-to-text": "^9.0.5",
190
190
  "i18next": "^24.2.3",
@@ -193,19 +193,19 @@
193
193
  "idb-keyval": "^6.2.2",
194
194
  "immer": "^10.1.1",
195
195
  "jose": "^5.10.0",
196
- "js-sha256": "^0.11.0",
196
+ "js-sha256": "^0.11.1",
197
197
  "jsonl-parse-stringify": "^1.0.3",
198
198
  "keyv": "^4.5.4",
199
- "langchain": "^0.3.26",
200
- "langfuse": "^3.37.2",
201
- "langfuse-core": "^3.37.2",
199
+ "langchain": "^0.3.27",
200
+ "langfuse": "^3.37.3",
201
+ "langfuse-core": "^3.37.3",
202
202
  "lodash-es": "^4.17.21",
203
203
  "lucide-react": "^0.509.0",
204
- "mammoth": "^1.9.0",
204
+ "mammoth": "^1.9.1",
205
205
  "mdast-util-to-markdown": "^2.1.2",
206
206
  "modern-screenshot": "^4.6.0",
207
207
  "nanoid": "^5.1.5",
208
- "next": "^15.3.2",
208
+ "next": "^15.3.3",
209
209
  "next-auth": "5.0.0-beta.25",
210
210
  "next-mdx-remote": "^5.0.0",
211
211
  "nextjs-toploader": "^3.8.16",
@@ -213,8 +213,8 @@
213
213
  "nuqs": "^2.4.3",
214
214
  "officeparser": "^5.1.1",
215
215
  "oidc-provider": "^8.8.1",
216
- "ollama": "^0.5.15",
217
- "openai": "^4.100.0",
216
+ "ollama": "^0.5.16",
217
+ "openai": "^4.104.0",
218
218
  "openapi-fetch": "^0.9.8",
219
219
  "partial-json": "^0.1.7",
220
220
  "path-browserify-esm": "^1.0.6",
@@ -224,22 +224,22 @@
224
224
  "pino": "^9.7.0",
225
225
  "plaiceholder": "^3.0.0",
226
226
  "polished": "^4.3.1",
227
- "posthog-js": "^1.245.0",
227
+ "posthog-js": "^1.249.0",
228
228
  "pwa-install-handler": "^2.6.2",
229
- "query-string": "^9.1.2",
229
+ "query-string": "^9.2.0",
230
230
  "random-words": "^2.0.1",
231
231
  "react": "^19.1.0",
232
232
  "react-confetti": "^6.4.0",
233
233
  "react-dom": "^19.1.0",
234
234
  "react-fast-marquee": "^1.6.5",
235
235
  "react-hotkeys-hook": "^4.6.2",
236
- "react-i18next": "^15.5.1",
236
+ "react-i18next": "^15.5.2",
237
237
  "react-layout-kit": "^1.9.1",
238
238
  "react-lazy-load": "^4.0.1",
239
239
  "react-pdf": "^9.2.1",
240
240
  "react-rnd": "^10.5.2",
241
241
  "react-scan": "^0.3.4",
242
- "react-virtuoso": "^4.12.7",
242
+ "react-virtuoso": "^4.12.8",
243
243
  "react-wrap-balancer": "^1.1.1",
244
244
  "remark": "^15.0.1",
245
245
  "remark-gfm": "^4.0.1",
@@ -252,7 +252,7 @@
252
252
  "shiki": "^3.4.2",
253
253
  "stripe": "^16.12.0",
254
254
  "superjson": "^2.2.2",
255
- "svix": "^1.65.0",
255
+ "svix": "^1.66.0",
256
256
  "swr": "^2.3.3",
257
257
  "systemjs": "^6.15.1",
258
258
  "tokenx": "^0.4.1",
@@ -268,7 +268,7 @@
268
268
  "y-webrtc": "^10.3.0",
269
269
  "yaml": "^2.8.0",
270
270
  "yjs": "^13.6.27",
271
- "zod": "^3.25.7",
271
+ "zod": "^3.25.48",
272
272
  "zustand": "5.0.4",
273
273
  "zustand-utils": "^2.1.0"
274
274
  },
@@ -276,11 +276,11 @@
276
276
  "@commitlint/cli": "^19.8.1",
277
277
  "@edge-runtime/vm": "^5.0.0",
278
278
  "@huggingface/tasks": "^0.15.9",
279
- "@lobehub/i18n-cli": "^1.20.3",
280
- "@lobehub/lint": "^1.26.1",
281
- "@lobehub/seo-cli": "^1.4.3",
282
- "@next/bundle-analyzer": "^15.3.2",
283
- "@next/eslint-plugin-next": "^15.3.2",
279
+ "@lobehub/i18n-cli": "^1.22.0",
280
+ "@lobehub/lint": "^1.26.2",
281
+ "@lobehub/seo-cli": "^1.6.0",
282
+ "@next/bundle-analyzer": "^15.3.3",
283
+ "@next/eslint-plugin-next": "^15.3.3",
284
284
  "@peculiar/webcrypto": "^1.5.0",
285
285
  "@semantic-release/exec": "^6.0.3",
286
286
  "@testing-library/jest-dom": "^6.6.3",
@@ -293,13 +293,13 @@
293
293
  "@types/fs-extra": "^11.0.4",
294
294
  "@types/ip": "^1.1.3",
295
295
  "@types/json-schema": "^7.0.15",
296
- "@types/lodash": "^4.17.16",
296
+ "@types/lodash": "^4.17.17",
297
297
  "@types/lodash-es": "^4.17.12",
298
- "@types/node": "^22.15.19",
298
+ "@types/node": "^22.15.29",
299
299
  "@types/numeral": "^2.0.5",
300
300
  "@types/oidc-provider": "^8.8.1",
301
- "@types/pg": "^8.15.2",
302
- "@types/react": "^19.1.4",
301
+ "@types/pg": "^8.15.4",
302
+ "@types/react": "^19.1.6",
303
303
  "@types/react-dom": "^19.1.5",
304
304
  "@types/rtl-detect": "^1.0.3",
305
305
  "@types/semver": "^7.7.0",
@@ -325,7 +325,7 @@
325
325
  "fs-extra": "^11.3.0",
326
326
  "glob": "^11.0.2",
327
327
  "gray-matter": "^4.0.3",
328
- "happy-dom": "^17.4.7",
328
+ "happy-dom": "^17.5.6",
329
329
  "husky": "^9.1.7",
330
330
  "just-diff": "^6.0.2",
331
331
  "lint-staged": "^15.5.2",
@@ -351,7 +351,7 @@
351
351
  "unified": "^11.0.5",
352
352
  "unist-util-visit": "^5.0.0",
353
353
  "vite": "^5.4.19",
354
- "vitest": "^3.1.4",
354
+ "vitest": "^3.2.0",
355
355
  "vitest-canvas-mock": "^0.3.3"
356
356
  },
357
357
  "packageManager": "pnpm@10.10.0",
@@ -196,12 +196,15 @@ const qwenChatModels: AIChatModelCard[] = [
196
196
  reasoning: true,
197
197
  search: true,
198
198
  },
199
+ config: {
200
+ deploymentName: 'qwq-plus-latest', // expired on 2025-09-02
201
+ },
199
202
  contextWindowTokens: 131_072,
200
203
  description:
201
204
  '基于 Qwen2.5 模型训练的 QwQ 推理模型,通过强化学习大幅度提升了模型推理能力。模型数学代码等核心指标(AIME 24/25、LiveCodeBench)以及部分通用指标(IFEval、LiveBench等)达到DeepSeek-R1 满血版水平。',
202
205
  displayName: 'QwQ Plus',
203
206
  enabled: true,
204
- id: 'qwq-plus-latest',
207
+ id: 'qwq-plus',
205
208
  maxOutput: 8192,
206
209
  organization: 'Qwen',
207
210
  pricing: {
@@ -221,11 +224,14 @@ const qwenChatModels: AIChatModelCard[] = [
221
224
  reasoning: true,
222
225
  search: true,
223
226
  },
227
+ config: {
228
+ deploymentName: 'qwen-turbo-2025-04-28', // expired on 2025-10-26
229
+ },
224
230
  contextWindowTokens: 1_000_000,
225
231
  description: '通义千问超大规模语言模型,支持中文、英文等不同语言输入。',
226
232
  displayName: 'Qwen Turbo',
227
233
  enabled: true,
228
- id: 'qwen-turbo-latest',
234
+ id: 'qwen-turbo',
229
235
  maxOutput: 8192,
230
236
  organization: 'Qwen',
231
237
  pricing: {
@@ -246,11 +252,14 @@ const qwenChatModels: AIChatModelCard[] = [
246
252
  reasoning: true,
247
253
  search: true,
248
254
  },
255
+ config: {
256
+ deploymentName: 'qwen-plus-2025-04-28', // expired on 2025-10-26
257
+ },
249
258
  contextWindowTokens: 131_072,
250
259
  description: '通义千问超大规模语言模型增强版,支持中文、英文等不同语言输入。',
251
260
  displayName: 'Qwen Plus',
252
261
  enabled: true,
253
- id: 'qwen-plus-latest',
262
+ id: 'qwen-plus',
254
263
  maxOutput: 8192,
255
264
  organization: 'Qwen',
256
265
  pricing: {
@@ -270,12 +279,15 @@ const qwenChatModels: AIChatModelCard[] = [
270
279
  functionCall: true,
271
280
  search: true,
272
281
  },
282
+ config: {
283
+ deploymentName: 'qwen-max-2025-01-25',
284
+ },
273
285
  contextWindowTokens: 131_072,
274
286
  description:
275
287
  '通义千问千亿级别超大规模语言模型,支持中文、英文等不同语言输入,当前通义千问2.5产品版本背后的API模型。',
276
288
  displayName: 'Qwen Max',
277
289
  enabled: true,
278
- id: 'qwen-max-latest',
290
+ id: 'qwen-max',
279
291
  maxOutput: 8192,
280
292
  organization: 'Qwen',
281
293
  pricing: {
@@ -292,6 +304,9 @@ const qwenChatModels: AIChatModelCard[] = [
292
304
  abilities: {
293
305
  functionCall: true,
294
306
  },
307
+ config: {
308
+ deploymentName: 'qwen-long-latest',
309
+ },
295
310
  contextWindowTokens: 10_000_000,
296
311
  description:
297
312
  '通义千问超大规模语言模型,支持长文本上下文,以及基于长文档、多文档等多个场景的对话功能。',
@@ -311,12 +326,15 @@ const qwenChatModels: AIChatModelCard[] = [
311
326
  abilities: {
312
327
  vision: true,
313
328
  },
329
+ config: {
330
+ deploymentName: 'qwen-omni-turbo-latest',
331
+ },
314
332
  contextWindowTokens: 32_768,
315
333
  description:
316
334
  'Qwen-Omni 系列模型支持输入多种模态的数据,包括视频、音频、图片、文本,并输出音频与文本。',
317
335
  displayName: 'Qwen Omni Turbo',
318
336
  enabled: true,
319
- id: 'qwen-omni-turbo-latest',
337
+ id: 'qwen-omni-turbo',
320
338
  maxOutput: 2048,
321
339
  organization: 'Qwen',
322
340
  pricing: {
@@ -348,11 +366,14 @@ const qwenChatModels: AIChatModelCard[] = [
348
366
  abilities: {
349
367
  vision: true,
350
368
  },
369
+ config: {
370
+ deploymentName: 'qwen-vl-plus-2025-01-25',
371
+ },
351
372
  contextWindowTokens: 131_072,
352
373
  description:
353
374
  '通义千问大规模视觉语言模型增强版。大幅提升细节识别能力和文字识别能力,支持超百万像素分辨率和任意长宽比规格的图像。',
354
375
  displayName: 'Qwen VL Plus',
355
- id: 'qwen-vl-plus-latest',
376
+ id: 'qwen-vl-plus',
356
377
  maxOutput: 8192,
357
378
  organization: 'Qwen',
358
379
  pricing: {
@@ -366,12 +387,15 @@ const qwenChatModels: AIChatModelCard[] = [
366
387
  abilities: {
367
388
  vision: true,
368
389
  },
390
+ config: {
391
+ deploymentName: 'qwen-vl-max-2025-04-08',
392
+ },
369
393
  contextWindowTokens: 131_072,
370
394
  description:
371
395
  '通义千问超大规模视觉语言模型。相比增强版,再次提升视觉推理能力和指令遵循能力,提供更高的视觉感知和认知水平。',
372
396
  displayName: 'Qwen VL Max',
373
397
  enabled: true,
374
- id: 'qwen-vl-max-latest',
398
+ id: 'qwen-vl-max',
375
399
  maxOutput: 8192,
376
400
  organization: 'Qwen',
377
401
  pricing: {
@@ -385,11 +409,14 @@ const qwenChatModels: AIChatModelCard[] = [
385
409
  abilities: {
386
410
  vision: true,
387
411
  },
412
+ config: {
413
+ deploymentName: 'qwen-vl-ocr-2025-04-13',
414
+ },
388
415
  contextWindowTokens: 34_096,
389
416
  description:
390
417
  '通义千问OCR是文字提取专有模型,专注于文档、表格、试题、手写体文字等类型图像的文字提取能力。它能够识别多种文字,目前支持的语言有:汉语、英语、法语、日语、韩语、德语、俄语、意大利语、越南语、阿拉伯语。',
391
418
  displayName: 'Qwen VL OCR',
392
- id: 'qwen-vl-ocr-latest',
419
+ id: 'qwen-vl-ocr',
393
420
  maxOutput: 4096,
394
421
  organization: 'Qwen',
395
422
  pricing: {
@@ -400,10 +427,13 @@ const qwenChatModels: AIChatModelCard[] = [
400
427
  type: 'chat',
401
428
  },
402
429
  {
430
+ config: {
431
+ deploymentName: 'qwen-math-turbo-latest',
432
+ },
403
433
  contextWindowTokens: 4096,
404
434
  description: '通义千问数学模型是专门用于数学解题的语言模型。',
405
435
  displayName: 'Qwen Math Turbo',
406
- id: 'qwen-math-turbo-latest',
436
+ id: 'qwen-math-turbo',
407
437
  maxOutput: 3072,
408
438
  organization: 'Qwen',
409
439
  pricing: {
@@ -414,10 +444,13 @@ const qwenChatModels: AIChatModelCard[] = [
414
444
  type: 'chat',
415
445
  },
416
446
  {
447
+ config: {
448
+ deploymentName: 'qwen-math-plus-latest',
449
+ },
417
450
  contextWindowTokens: 4096,
418
451
  description: '通义千问数学模型是专门用于数学解题的语言模型。',
419
452
  displayName: 'Qwen Math Plus',
420
- id: 'qwen-math-plus-latest',
453
+ id: 'qwen-math-plus',
421
454
  maxOutput: 3072,
422
455
  organization: 'Qwen',
423
456
  pricing: {
@@ -428,10 +461,13 @@ const qwenChatModels: AIChatModelCard[] = [
428
461
  type: 'chat',
429
462
  },
430
463
  {
464
+ config: {
465
+ deploymentName: 'qwen-coder-turbo-latest',
466
+ },
431
467
  contextWindowTokens: 131_072,
432
468
  description: '通义千问代码模型。',
433
469
  displayName: 'Qwen Coder Turbo',
434
- id: 'qwen-coder-turbo-latest',
470
+ id: 'qwen-coder-turbo',
435
471
  maxOutput: 8192,
436
472
  organization: 'Qwen',
437
473
  pricing: {
@@ -442,10 +478,13 @@ const qwenChatModels: AIChatModelCard[] = [
442
478
  type: 'chat',
443
479
  },
444
480
  {
481
+ config: {
482
+ deploymentName: 'qwen-coder-plus-latest',
483
+ },
445
484
  contextWindowTokens: 131_072,
446
485
  description: '通义千问代码模型。',
447
486
  displayName: 'Qwen Coder Plus',
448
- id: 'qwen-coder-plus-latest',
487
+ id: 'qwen-coder-plus',
449
488
  maxOutput: 8192,
450
489
  organization: 'Qwen',
451
490
  pricing: {
@@ -501,11 +540,14 @@ const qwenChatModels: AIChatModelCard[] = [
501
540
  reasoning: true,
502
541
  vision: true,
503
542
  },
543
+ config: {
544
+ deploymentName: 'qvq-max-latest',
545
+ },
504
546
  contextWindowTokens: 122_880,
505
547
  description:
506
548
  '通义千问QVQ视觉推理模型,支持视觉输入及思维链输出,在数学、编程、视觉分析、创作以及通用任务上都表现了更强的能力。',
507
549
  displayName: 'QVQ Max',
508
- id: 'qvq-max-latest',
550
+ id: 'qvq-max',
509
551
  maxOutput: 8192,
510
552
  organization: 'Qwen',
511
553
  pricing: {
@@ -667,8 +709,8 @@ const qwenChatModels: AIChatModelCard[] = [
667
709
  {
668
710
  contextWindowTokens: 131_072,
669
711
  description: '通义千问代码模型开源版。',
670
- displayName: 'Qwen2.5 Coder 32B',
671
- id: 'qwen2.5-coder-32b-instruct',
712
+ displayName: 'Qwen2.5 Coder 14B',
713
+ id: 'qwen2.5-coder-14b-instruct',
672
714
  maxOutput: 8192,
673
715
  organization: 'Qwen',
674
716
  pricing: {
@@ -679,36 +721,16 @@ const qwenChatModels: AIChatModelCard[] = [
679
721
  type: 'chat',
680
722
  },
681
723
  {
682
- abilities: {
683
- vision: true,
684
- },
685
- contextWindowTokens: 8000,
686
- description: '以 Qwen-7B 语言模型初始化,添加图像模型,图像输入分辨率为448的预训练模型。',
687
- displayName: 'Qwen VL',
688
- id: 'qwen-vl-v1',
689
- maxOutput: 1500,
690
- organization: 'Qwen',
691
- pricing: {
692
- currency: 'CNY',
693
- input: 0,
694
- output: 0,
695
- },
696
- type: 'chat',
697
- },
698
- {
699
- abilities: {
700
- vision: true,
701
- },
702
- contextWindowTokens: 8000,
703
- description: '通义千问VL支持灵活的交互方式,包括多图、多轮问答、创作等能力的模型。',
704
- displayName: 'Qwen VL Chat',
705
- id: 'qwen-vl-chat-v1',
706
- maxOutput: 1500,
724
+ contextWindowTokens: 131_072,
725
+ description: '通义千问代码模型开源版。',
726
+ displayName: 'Qwen2.5 Coder 32B',
727
+ id: 'qwen2.5-coder-32b-instruct',
728
+ maxOutput: 8192,
707
729
  organization: 'Qwen',
708
730
  pricing: {
709
731
  currency: 'CNY',
710
- input: 0,
711
- output: 0,
732
+ input: 2,
733
+ output: 6,
712
734
  },
713
735
  type: 'chat',
714
736
  },
@@ -419,7 +419,7 @@ const Qwen: ModelProviderCard = {
419
419
  id: 'qwen',
420
420
  modelList: { showModelFetcher: true },
421
421
  modelsUrl: 'https://help.aliyun.com/zh/dashscope/developer-reference/api-details',
422
- name: 'Qwen',
422
+ name: 'Aliyun Bailian',
423
423
  proxyUrl: {
424
424
  placeholder: 'https://dashscope.aliyuncs.com/compatible-mode/v1',
425
425
  },
@@ -429,16 +429,13 @@ const Qwen: ModelProviderCard = {
429
429
  placeholder: 'https://dashscope.aliyuncs.com/compatible-mode/v1',
430
430
  },
431
431
  sdkType: 'openai',
432
+ showDeployName: true,
432
433
  showModelFetcher: true,
433
434
  smoothing: {
434
435
  speed: 2,
435
436
  text: true,
436
437
  },
437
438
  },
438
- smoothing: {
439
- speed: 2,
440
- text: true,
441
- },
442
439
  url: 'https://www.aliyun.com/product/bailian',
443
440
  };
444
441
 
@@ -62,7 +62,7 @@ const XAI: ModelProviderCard = {
62
62
  id: 'xai',
63
63
  modelList: { showModelFetcher: true },
64
64
  modelsUrl: 'https://docs.x.ai/docs#models',
65
- name: 'xAI',
65
+ name: 'xAI (Grok)',
66
66
  proxyUrl: {
67
67
  placeholder: 'https://api.x.ai/v1',
68
68
  },
@@ -9,7 +9,7 @@ describe('useOnPluginReadyForInteraction', () => {
9
9
 
10
10
  afterEach(() => {
11
11
  mockOnReady.mockReset();
12
- window.removeEventListener('message', expect.any(Function));
12
+ window.removeEventListener('message', () => {});
13
13
  });
14
14
 
15
15
  it('sets readyForRender to true when a PluginChannel.pluginReadyForRender message is received', async () => {
@@ -9,7 +9,7 @@ describe('useOnPluginSettingsUpdate', () => {
9
9
 
10
10
  afterEach(() => {
11
11
  mockCallback.mockReset();
12
- window.removeEventListener('message', expect.any(Function));
12
+ window.removeEventListener('message', () => {});
13
13
  });
14
14
 
15
15
  it('calls the callback when a PluginChannel updatePluginSettings message is received', () => {
@@ -12,7 +12,7 @@ describe('useOnPluginStateUpdate', () => {
12
12
  // Reset the mock callback after each test
13
13
  mockCallback.mockReset();
14
14
  // Ensure no event listeners are left hanging after each test
15
- window.removeEventListener('message', expect.any(Function));
15
+ window.removeEventListener('message', () => {});
16
16
  });
17
17
 
18
18
  it('calls the callback when a PluginChannel update message is received', () => {
@@ -3,7 +3,7 @@ import OpenAI from 'openai';
3
3
  import { ChatModelCard } from '@/types/llm';
4
4
 
5
5
  import {
6
- ChatCompetitionOptions,
6
+ ChatMethodOptions,
7
7
  ChatStreamPayload,
8
8
  Embeddings,
9
9
  EmbeddingsOptions,
@@ -18,7 +18,7 @@ import {
18
18
  /* eslint-disable sort-keys-fix/sort-keys-fix , typescript-sort-keys/interface */
19
19
  export interface LobeRuntimeAI {
20
20
  baseURL?: string;
21
- chat(payload: ChatStreamPayload, options?: ChatCompetitionOptions): Promise<Response>;
21
+ chat(payload: ChatStreamPayload, options?: ChatMethodOptions): Promise<Response>;
22
22
 
23
23
  embeddings?(payload: EmbeddingsPayload, options?: EmbeddingsOptions): Promise<Embeddings[]>;
24
24
 
@@ -40,7 +40,7 @@ export abstract class LobeOpenAICompatibleRuntime {
40
40
  abstract baseURL: string;
41
41
  abstract client: OpenAI;
42
42
 
43
- abstract chat(payload: ChatStreamPayload, options?: ChatCompetitionOptions): Promise<Response>;
43
+ abstract chat(payload: ChatStreamPayload, options?: ChatMethodOptions): Promise<Response>;
44
44
 
45
45
  abstract models(): Promise<ChatModelCard[]>;
46
46
 
@@ -8,7 +8,7 @@ import { LobeCloudflareParams } from './cloudflare';
8
8
  import { LobeOpenAI } from './openai';
9
9
  import { providerRuntimeMap } from './runtimeMap';
10
10
  import {
11
- ChatCompetitionOptions,
11
+ ChatMethodOptions,
12
12
  ChatStreamPayload,
13
13
  EmbeddingsOptions,
14
14
  EmbeddingsPayload,
@@ -60,7 +60,7 @@ class ModelRuntime {
60
60
  * }));
61
61
  * ```
62
62
  */
63
- async chat(payload: ChatStreamPayload, options?: ChatCompetitionOptions) {
63
+ async chat(payload: ChatStreamPayload, options?: ChatMethodOptions) {
64
64
  return this._runtime.chat(payload, options);
65
65
  }
66
66
 
@@ -2,8 +2,8 @@ import { LobeRuntimeAI } from '../BaseAI';
2
2
  import { LobeOpenAI } from '../openai';
3
3
  import { providerRuntimeMap } from '../runtimeMap';
4
4
  import {
5
- ChatCompetitionOptions,
6
5
  type ChatCompletionErrorPayload,
6
+ ChatMethodOptions,
7
7
  ChatStreamPayload,
8
8
  EmbeddingsOptions,
9
9
  EmbeddingsPayload,
@@ -70,7 +70,7 @@ class UniformRuntime {
70
70
  return runtimeItem.runtime;
71
71
  }
72
72
 
73
- async chat(payload: ChatStreamPayload, options?: ChatCompetitionOptions) {
73
+ async chat(payload: ChatStreamPayload, options?: ChatMethodOptions) {
74
74
  try {
75
75
  const runtime = this.getRuntimeByModel(payload.model);
76
76
 
@@ -1,7 +1,7 @@
1
1
  import { ModelProvider } from '../types';
2
- import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
2
+ import { createOpenAICompatibleRuntime } from '../utils/openaiCompatibleFactory';
3
3
 
4
- export const LobeAi21AI = LobeOpenAICompatibleFactory({
4
+ export const LobeAi21AI = createOpenAICompatibleRuntime({
5
5
  baseURL: 'https://api.ai21.com/studio/v1',
6
6
  chatCompletion: {
7
7
  handlePayload: (payload) => {
@@ -1,7 +1,7 @@
1
1
  import type { ChatModelCard } from '@/types/llm';
2
2
 
3
3
  import { ModelProvider } from '../types';
4
- import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
4
+ import { createOpenAICompatibleRuntime } from '../utils/openaiCompatibleFactory';
5
5
 
6
6
  export interface Ai360ModelCard {
7
7
  id: string;
@@ -9,7 +9,7 @@ export interface Ai360ModelCard {
9
9
  total_tokens: number;
10
10
  }
11
11
 
12
- export const LobeAi360AI = LobeOpenAICompatibleFactory({
12
+ export const LobeAi360AI = createOpenAICompatibleRuntime({
13
13
  baseURL: 'https://api.360.cn/v1',
14
14
  chatCompletion: {
15
15
  handlePayload: (payload) => {