@lobehub/chat 1.85.10 → 1.86.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (45) hide show
  1. package/.env.example +5 -0
  2. package/CHANGELOG.md +25 -0
  3. package/Dockerfile +2 -0
  4. package/Dockerfile.database +2 -0
  5. package/Dockerfile.pglite +2 -0
  6. package/README.md +1 -0
  7. package/README.zh-CN.md +1 -0
  8. package/changelog/v1.json +9 -0
  9. package/docs/self-hosting/environment-variables/model-provider.mdx +23 -0
  10. package/docs/self-hosting/environment-variables/model-provider.zh-CN.mdx +23 -1
  11. package/docs/usage/providers/qiniu.mdx +58 -0
  12. package/docs/usage/providers/qiniu.zh-CN.mdx +55 -0
  13. package/locales/ar/providers.json +3 -0
  14. package/locales/bg-BG/providers.json +3 -0
  15. package/locales/de-DE/providers.json +3 -0
  16. package/locales/en-US/providers.json +3 -0
  17. package/locales/es-ES/providers.json +3 -0
  18. package/locales/fa-IR/providers.json +3 -0
  19. package/locales/fr-FR/providers.json +3 -0
  20. package/locales/it-IT/providers.json +3 -0
  21. package/locales/ja-JP/providers.json +3 -0
  22. package/locales/ko-KR/providers.json +3 -0
  23. package/locales/nl-NL/providers.json +3 -0
  24. package/locales/pl-PL/providers.json +3 -0
  25. package/locales/pt-BR/providers.json +3 -0
  26. package/locales/ru-RU/providers.json +3 -0
  27. package/locales/tr-TR/providers.json +3 -0
  28. package/locales/vi-VN/providers.json +3 -0
  29. package/locales/zh-CN/providers.json +3 -0
  30. package/locales/zh-TW/providers.json +3 -0
  31. package/package.json +1 -1
  32. package/src/app/[variants]/(main)/settings/llm/ProviderList/providers.tsx +2 -0
  33. package/src/app/[variants]/(main)/settings/llm/components/ProviderModelList/Option.tsx +4 -1
  34. package/src/config/aiModels/index.ts +3 -0
  35. package/src/config/aiModels/qiniu.ts +34 -0
  36. package/src/config/llm.ts +6 -0
  37. package/src/config/modelProviders/index.ts +4 -0
  38. package/src/config/modelProviders/qiniu.ts +42 -0
  39. package/src/libs/agent-runtime/qiniu/index.test.ts +16 -0
  40. package/src/libs/agent-runtime/qiniu/index.ts +47 -0
  41. package/src/libs/agent-runtime/runtimeMap.ts +2 -0
  42. package/src/libs/agent-runtime/types/type.ts +1 -0
  43. package/src/server/modules/AgentRuntime/index.test.ts +1 -0
  44. package/src/store/user/slices/modelList/selectors/modelProvider.ts +5 -2
  45. package/src/types/user/settings/keyVaults.ts +1 -0
package/.env.example CHANGED
@@ -109,6 +109,11 @@ OPENAI_API_KEY=sk-xxxxxxxxx
109
109
  # DEEPSEEK_PROXY_URL=https://api.deepseek.com/v1
110
110
  # DEEPSEEK_API_KEY=xxxxxxxxxxxxxxxxxxxxxxxxxxxxx
111
111
 
112
+ ### Qiniu AI ####
113
+
114
+ # QINIU_PROXY_URL=https://api.qnaigc.com/v1
115
+ # QINIU_API_KEY=xxxxxxxxxxxxxxxxxxxxxxxxxxxxx
116
+
112
117
  ### Qwen AI ####
113
118
 
114
119
  # QWEN_API_KEY=sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
package/CHANGELOG.md CHANGED
@@ -2,6 +2,31 @@
2
2
 
3
3
  # Changelog
4
4
 
5
+ ## [Version 1.86.0](https://github.com/lobehub/lobe-chat/compare/v1.85.10...v1.86.0)
6
+
7
+ <sup>Released on **2025-05-15**</sup>
8
+
9
+ #### ✨ Features
10
+
11
+ - **misc**: Add Qiniu Provider.
12
+
13
+ <br/>
14
+
15
+ <details>
16
+ <summary><kbd>Improvements and Fixes</kbd></summary>
17
+
18
+ #### What's improved
19
+
20
+ - **misc**: Add Qiniu Provider, closes [#7649](https://github.com/lobehub/lobe-chat/issues/7649) ([c9b8e9f](https://github.com/lobehub/lobe-chat/commit/c9b8e9f))
21
+
22
+ </details>
23
+
24
+ <div align="right">
25
+
26
+ [![](https://img.shields.io/badge/-BACK_TO_TOP-151515?style=flat-square)](#readme-top)
27
+
28
+ </div>
29
+
5
30
  ### [Version 1.85.10](https://github.com/lobehub/lobe-chat/compare/v1.85.9...v1.85.10)
6
31
 
7
32
  <sup>Released on **2025-05-14**</sup>
package/Dockerfile CHANGED
@@ -202,6 +202,8 @@ ENV \
202
202
  PERPLEXITY_API_KEY="" PERPLEXITY_MODEL_LIST="" PERPLEXITY_PROXY_URL="" \
203
203
  # PPIO
204
204
  PPIO_API_KEY="" PPIO_MODEL_LIST="" \
205
+ # Qiniu
206
+ QINIU_API_KEY="" QINIU_MODEL_LIST="" QINIU_PROXY_URL="" \
205
207
  # Qwen
206
208
  QWEN_API_KEY="" QWEN_MODEL_LIST="" QWEN_PROXY_URL="" \
207
209
  # SambaNova
@@ -246,6 +246,8 @@ ENV \
246
246
  PERPLEXITY_API_KEY="" PERPLEXITY_MODEL_LIST="" PERPLEXITY_PROXY_URL="" \
247
247
  # PPIO
248
248
  PPIO_API_KEY="" PPIO_MODEL_LIST="" \
249
+ # Qiniu
250
+ QINIU_API_KEY="" QINIU_MODEL_LIST="" QINIU_PROXY_URL="" \
249
251
  # Qwen
250
252
  QWEN_API_KEY="" QWEN_MODEL_LIST="" QWEN_PROXY_URL="" \
251
253
  # SambaNova
package/Dockerfile.pglite CHANGED
@@ -202,6 +202,8 @@ ENV \
202
202
  OPENROUTER_API_KEY="" OPENROUTER_MODEL_LIST="" \
203
203
  # Perplexity
204
204
  PERPLEXITY_API_KEY="" PERPLEXITY_MODEL_LIST="" PERPLEXITY_PROXY_URL="" \
205
+ # Qiniu
206
+ QINIU_API_KEY="" QINIU_MODEL_LIST="" QINIU_PROXY_URL="" \
205
207
  # Qwen
206
208
  QWEN_API_KEY="" QWEN_MODEL_LIST="" QWEN_PROXY_URL="" \
207
209
  # SambaNova
package/README.md CHANGED
@@ -209,6 +209,7 @@ We have implemented support for the following model service providers:
209
209
  - **[Ai21Labs](https://lobechat.com/discover/provider/ai21)**: AI21 Labs builds foundational models and AI systems for enterprises, accelerating the application of generative AI in production.
210
210
  - **[Upstage](https://lobechat.com/discover/provider/upstage)**: Upstage focuses on developing AI models for various business needs, including Solar LLM and document AI, aiming to achieve artificial general intelligence (AGI) for work. It allows for the creation of simple conversational agents through Chat API and supports functional calling, translation, embedding, and domain-specific applications.
211
211
  - **[xAI](https://lobechat.com/discover/provider/xai)**: xAI is a company dedicated to building artificial intelligence to accelerate human scientific discovery. Our mission is to advance our collective understanding of the universe.
212
+ - **[Qiniu](https://lobechat.com/discover/provider/qiniu)**: Qiniu, as a long-established cloud service provider, delivers cost-effective and reliable AI inference services for both real-time and batch processing, with a simple and user-friendly experience.
212
213
  - **[Qwen](https://lobechat.com/discover/provider/qwen)**: Tongyi Qianwen is a large-scale language model independently developed by Alibaba Cloud, featuring strong natural language understanding and generation capabilities. It can answer various questions, create written content, express opinions, and write code, playing a role in multiple fields.
213
214
  - **[Wenxin](https://lobechat.com/discover/provider/wenxin)**: An enterprise-level one-stop platform for large model and AI-native application development and services, providing the most comprehensive and user-friendly toolchain for the entire process of generative artificial intelligence model development and application development.
214
215
  - **[Hunyuan](https://lobechat.com/discover/provider/hunyuan)**: A large language model developed by Tencent, equipped with powerful Chinese creative capabilities, logical reasoning abilities in complex contexts, and reliable task execution skills.
package/README.zh-CN.md CHANGED
@@ -209,6 +209,7 @@ LobeChat 支持文件上传与知识库功能,你可以上传文件、图片
209
209
  - **[Ai21Labs](https://lobechat.com/discover/provider/ai21)**: AI21 Labs 为企业构建基础模型和人工智能系统,加速生成性人工智能在生产中的应用。
210
210
  - **[Upstage](https://lobechat.com/discover/provider/upstage)**: Upstage 专注于为各种商业需求开发 AI 模型,包括 Solar LLM 和文档 AI,旨在实现工作的人造通用智能(AGI)。通过 Chat API 创建简单的对话代理,并支持功能调用、翻译、嵌入以及特定领域应用。
211
211
  - **[xAI](https://lobechat.com/discover/provider/xai)**: xAI 是一家致力于构建人工智能以加速人类科学发现的公司。我们的使命是推动我们对宇宙的共同理解。
212
+ - **[Qiniu](https://lobechat.com/discover/provider/qiniu)**: 七牛作为老牌云服务厂商,提供高性价比稳定的实时、批量 AI 推理服务,简单易用。
212
213
  - **[Qwen](https://lobechat.com/discover/provider/qwen)**: 通义千问是阿里云自主研发的超大规模语言模型,具有强大的自然语言理解和生成能力。它可以回答各种问题、创作文字内容、表达观点看法、撰写代码等,在多个领域发挥作用。
213
214
  - **[Wenxin](https://lobechat.com/discover/provider/wenxin)**: 企业级一站式大模型与 AI 原生应用开发及服务平台,提供最全面易用的生成式人工智能模型开发、应用开发全流程工具链
214
215
  - **[Hunyuan](https://lobechat.com/discover/provider/hunyuan)**: 由腾讯研发的大语言模型,具备强大的中文创作能力,复杂语境下的逻辑推理能力,以及可靠的任务执行能力
package/changelog/v1.json CHANGED
@@ -1,4 +1,13 @@
1
1
  [
2
+ {
3
+ "children": {
4
+ "features": [
5
+ "Add Qiniu Provider."
6
+ ]
7
+ },
8
+ "date": "2025-05-15",
9
+ "version": "1.86.0"
10
+ },
2
11
  {
3
12
  "children": {
4
13
  "improvements": [
@@ -423,6 +423,29 @@ If you need to use Azure OpenAI to provide model services, you can refer to the
423
423
  - Default: `-`
424
424
  - Example: `-all,+yi-large,+yi-large-rag`
425
425
 
426
+ ## Qiniu
427
+
428
+ ### `QINIU_API_KEY`
429
+
430
+ - Type: Required
431
+ - Description: This is the API key you can obtain from Qiniu AI service
432
+ - Default: -
433
+ - Example:`sk-xxxxx...xxxxx`
434
+
435
+ ### `QINIU_MODEL_LIST`
436
+
437
+ - Type: Optional
438
+ - Description: Used to control the model list, use `+` to add a model, use `-` to hide a model, use `model_name=display_name` to customize the display name of a model, separated by commas. Definition syntax rules see [model-list][model-list]
439
+ - Default: `-`
440
+ - Example: `-all,+deepseek-r1,+deepseek-v3`
441
+
442
+ ### `QINIU_PROXY_URL`
443
+
444
+ - Type: Optional
445
+ - Description: If you manually configure the Qiniu API proxy, you can use this configuration item to override the default Qiniu API request base URL
446
+ - Default: `https://api.qnaigc.com/v1`
447
+ - Example: `https://my-qnaigc.com/v1`
448
+
426
449
  ## Qwen
427
450
 
428
451
  ### `QWEN_API_KEY`
@@ -325,7 +325,6 @@ LobeChat 在部署时提供了丰富的模型服务商相关的环境变量,
325
325
  - 默认值:`https://api.moonshot.cn/v1`
326
326
  - 示例:`https://my-moonshot-proxy.com/v1`
327
327
 
328
-
329
328
  ## Perplexity AI
330
329
 
331
330
  ### `PERPLEXITY_API_KEY`
@@ -422,6 +421,29 @@ LobeChat 在部署时提供了丰富的模型服务商相关的环境变量,
422
421
  - 默认值:`-`
423
422
  - 示例:`-all,+yi-large,+yi-large-rag`
424
423
 
424
+ ## 七牛云
425
+
426
+ ### `QINIU_API_KEY`
427
+
428
+ - 类型:必选
429
+ - 描述:这是你在七牛云上获取的 API 密钥
430
+ - 默认值:-
431
+ - 示例:`sk-xxxxx...xxxxx`
432
+
433
+ ### `QINIU_MODEL_LIST`
434
+
435
+ - 类型:可选
436
+ - 描述:用来控制模型列表,使用 `+` 增加一个模型,使用 `-` 来隐藏一个模型,使用 `模型名=展示名<扩展配置>` 来自定义模型的展示名,用英文逗号隔开。模型定义语法规则见 [模型列表][model-list]
437
+ - 默认值:`-`
438
+ - 示例:`-all,+deepseek-r1,+deepseek-v3`
439
+
440
+ ### `QINIU_PROXY_URL`
441
+
442
+ - 类型:可选
443
+ - 描述:如果你手动配置了 Qiniu 接口代理,可以使用此配置项来覆盖默认的 Qiniu API 请求基础 URL
444
+ - 默认值:`https://api.qnaigc.com/v1`
445
+ - 示例:`https://my-qnaigc.com/v1`
446
+
425
447
  ## 通义千问
426
448
 
427
449
  ### `QWEN_API_KEY`
@@ -0,0 +1,58 @@
1
+ ---
2
+ title: Using Qiniu API Key in LobeChat
3
+ description: >-
4
+ Learn how to integrate and utilize powerful language models developed by Qiniu
5
+ into LobeChat for various tasks. Follow the steps to obtain an API key
6
+ and configure it for seamless interaction.
7
+ tags:
8
+ - API key
9
+ - Web UI
10
+ - 七牛
11
+ - 七牛云
12
+ - 七牛智能
13
+ - Qiniu
14
+ - DeepSeek
15
+ ---
16
+
17
+ # Using Qiniu's AI Models in LobeChat
18
+
19
+ <Image alt={'Using Qiniu\'s AI Models in LobeChat'} cover src={'https://github.com/user-attachments/assets/3ad2655e-dd20-4534-bf6d-080b3677df86'} />
20
+
21
+ [Qiniu](https://www.qiniu.com), as a long-established cloud service provider, delivers cost-effective and reliable AI inference services for both real-time and batch processing, with a simple and user-friendly experience.
22
+
23
+ This document will guide you on how to use Qiniu's AI Models in LobeChat:
24
+
25
+ <Steps>
26
+ ### Step 1: [Obtain AI Model API Key](https://developer.qiniu.com/aitokenapi/12884/how-to-get-api-key)
27
+
28
+ - Method 1: Using Console
29
+ 1. [Register a Qiniu account](https://s.qiniu.com/umqq6n?ref=developer.qiniu.com\&s_path=%2Faitokenapi%2F12884%2Fhow-to-get-api-key)
30
+ 2. [Go to the console to obtain your API Key](https://portal.qiniu.com/ai-inference/api-key)
31
+ <Image alt={'Obtain your API Key'} inStep src={'https://static.sufy.com/lobehub/439040511-a014769f-262c-4ee4-a727-2c3c45111574.png'} />
32
+
33
+ - Method 2: Using Mini Program
34
+ 1. Open the Qiniu mini program
35
+ 2. Quick login to your account
36
+ 3. Click the \[Me] tab in the bottom navigation bar
37
+ 4. Click \[My Console]
38
+ 5. Navigate to \[AI Inference]
39
+ 6. View and copy your API key
40
+
41
+ ### Step 2: Configure Qiniu's AI Model Service in LobeChat
42
+
43
+ - Visit the `Settings` interface in LobeChat
44
+ - Find the setting for `Qiniu` under `Language Model`
45
+
46
+ <Image alt={'Enter API key'} inStep src={'https://static.sufy.com/lobehub/439047682-40bd5ec0-c2fe-4397-9ae1-f6d0b9e55287.png'} />
47
+
48
+ - Open Qiniu and enter the obtained API key.
49
+ - Choose a Qiniu's model for your AI assistant to start the conversation.
50
+
51
+ <Image alt={'Select a Qiniu\'s model and start conversation'} inStep src={'https://static.sufy.com/lobehub/439048211-eadae11f-86e8-4a8d-944d-2f984e257356.png'} />
52
+
53
+ <Callout type={'warning'}>
54
+ During usage, you may need to pay the API service provider. Please refer to [Qiniu's relevant pricing policies](https://developer.qiniu.com/aitokenapi/12898/ai-token-api-pricing).
55
+ </Callout>
56
+ </Steps>
57
+
58
+ You can now engage in conversations using the models provided by Qiniu in LobeChat.
@@ -0,0 +1,55 @@
1
+ ---
2
+ title: 在 LobeChat 中使用七牛云大模型 API Key
3
+ description: 学习如何在 LobeChat 中配置和使用七牛云的大模型,提供强大的自然语言理解和生成能力。
4
+ tags:
5
+ - API key
6
+ - Web UI
7
+ - 七牛
8
+ - 七牛云
9
+ - 七牛智能
10
+ - Qiniu
11
+ - DeepSeek
12
+ ---
13
+
14
+ # 在 LobeChat 中使用七牛云大模型
15
+
16
+ <Image alt={'在 LobeChat 中使用七牛云大模型'} cover src={'https://github.com/user-attachments/assets/3ad2655e-dd20-4534-bf6d-080b3677df86'} />
17
+
18
+ [七牛云](https://www.qiniu.com)作为老牌云服务厂商,提供高性价比稳定的实时、批量 AI 推理服务,简单易用。
19
+
20
+ 本文档将指导你如何在 LobeChat 中使用七牛云大模型:
21
+
22
+ <Steps>
23
+ ### 步骤一:[获取 AI 大模型 API 密钥](https://developer.qiniu.com/aitokenapi/12884/how-to-get-api-key)
24
+
25
+ - 方法一:使用控制台获取
26
+ 1. [注册七牛账号](https://s.qiniu.com/umqq6n?ref=developer.qiniu.com\&s_path=%2Faitokenapi%2F12884%2Fhow-to-get-api-key)
27
+ 2. [前往控制台获取 API Key](https://portal.qiniu.com/ai-inference/api-key)
28
+ <Image alt={'获取 API Key'} inStep src={'https://static.sufy.com/lobehub/438758098-119239c1-8552-420a-9906-de2eab739fc6.png'} />
29
+
30
+ - 方法二:使用小程序获取
31
+ 1. 打开七牛小程序
32
+ 2. 快速登录账号
33
+ 3. 点击【我的】底部导航栏
34
+ 4. 点击【我的控制台】
35
+ 5. 进入【AI 推理】
36
+ 6. 查看和复制你的 API 密钥
37
+
38
+ ### 步骤二:在 LobeChat 中配置七牛云大模型服务
39
+
40
+ - 访问 LobeChat 的`设置`界面
41
+ - 在`语言模型`下找到`七牛云`的设置项
42
+
43
+ <Image alt={'填写 API 密钥'} inStep src={'https://static.sufy.com/lobehub/439049319-6ae44f36-bf48-492a-a6aa-7be72f4a29d8.png'} />
44
+
45
+ - 打开七牛云并填入获得的 API 密钥
46
+ - 为你的 AI 助手选择一个七牛云的大模型即可开始对话
47
+
48
+ <Image alt={'选择七牛云大模型并开始对话'} inStep src={'https://static.sufy.com/lobehub/439048945-c608eb9e-6ee1-4611-9df7-2075e95d069b.png'} />
49
+
50
+ <Callout type={'warning'}>
51
+ 在使用过程中你可能需要向 API 服务提供商付费,请参考[七牛云的相关费用政策](https://developer.qiniu.com/aitokenapi/12898/ai-token-api-pricing)。
52
+ </Callout>
53
+ </Steps>
54
+
55
+ 至此你已经可以在 LobeChat 中使用七牛云提供的大模型进行对话了。
@@ -95,6 +95,9 @@
95
95
  "ppio": {
96
96
  "description": "تقدم PPIO بايو السحابية خدمات واجهة برمجة التطبيقات لنماذج مفتوحة المصدر مستقرة وذات تكلفة فعالة، تدعم جميع سلسلة DeepSeek، وLlama، وQwen، وغيرها من النماذج الكبيرة الرائدة في الصناعة."
97
97
  },
98
+ "qiniu": {
99
+ "description": "كشركة رائدة في خدمات السحابة، تقدم Qiniu خدمات استدلال ذكاء اصطناعي في الوقت الفعلي ومجموعة كبيرة بتكلفة فعالة وموثوقة، سهلة الاستخدام."
100
+ },
98
101
  "qwen": {
99
102
  "description": "Qwen هو نموذج لغة ضخم تم تطويره ذاتيًا بواسطة Alibaba Cloud، يتمتع بقدرات قوية في فهم وتوليد اللغة الطبيعية. يمكنه الإجابة على مجموعة متنوعة من الأسئلة، وكتابة المحتوى، والتعبير عن الآراء، وكتابة الشيفرات، ويؤدي دورًا في مجالات متعددة."
100
103
  },
@@ -95,6 +95,9 @@
95
95
  "ppio": {
96
96
  "description": "PPIO ПайОу облак предлага стабилни и икономически изгодни API услуги за отворени модели, поддържащи цялата серия DeepSeek, Llama, Qwen и други водещи модели в индустрията."
97
97
  },
98
+ "qiniu": {
99
+ "description": "Qiniu е водещ доставчик на облачни услуги, предлагащ бързи и ефективни API за извикване на големи модели, включително и тези на阿里巴巴, с гъвкави възможности за изграждане и развитие на AI приложения."
100
+ },
98
101
  "qwen": {
99
102
  "description": "Qwen е самостоятелно разработен свръхголям езиков модел на Alibaba Cloud, с мощни способности за разбиране и генериране на естествен език. Може да отговаря на различни въпроси, да създава текстово съдържание, да изразява мнения и да пише код, играейки роля в множество области."
100
103
  },
@@ -95,6 +95,9 @@
95
95
  "ppio": {
96
96
  "description": "PPIO Paiou Cloud bietet stabile und kosteneffiziente Open-Source-Modell-API-Dienste und unterstützt die gesamte DeepSeek-Serie, Llama, Qwen und andere führende große Modelle der Branche."
97
97
  },
98
+ "qiniu": {
99
+ "description": "Qiniu ist ein führender Anbieter von Cloud-Diensten, der schnelle und effiziente API-Dienste für große Modelle bereitstellt, einschließlich der von Alibaba, mit flexiblen Optionen für das Entwickeln und Anwenden von AI-Anwendungen."
100
+ },
98
101
  "qwen": {
99
102
  "description": "Tongyi Qianwen ist ein von Alibaba Cloud selbst entwickeltes, groß angelegtes Sprachmodell mit starken Fähigkeiten zur Verarbeitung und Generierung natürlicher Sprache. Es kann eine Vielzahl von Fragen beantworten, Texte erstellen, Meinungen äußern und Code schreiben und spielt in mehreren Bereichen eine Rolle."
100
103
  },
@@ -95,6 +95,9 @@
95
95
  "ppio": {
96
96
  "description": "PPIO supports stable and cost-efficient open-source LLM APIs, such as DeepSeek, Llama, Qwen etc."
97
97
  },
98
+ "qiniu": {
99
+ "description": "Qiniu, as a long-established cloud service provider, delivers cost-effective and reliable AI inference services for both real-time and batch processing, with a simple and user-friendly experience."
100
+ },
98
101
  "qwen": {
99
102
  "description": "Tongyi Qianwen is a large-scale language model independently developed by Alibaba Cloud, featuring strong natural language understanding and generation capabilities. It can answer various questions, create written content, express opinions, and write code, playing a role in multiple fields."
100
103
  },
@@ -95,6 +95,9 @@
95
95
  "ppio": {
96
96
  "description": "PPIO Paiouyun ofrece servicios de API de modelos de código abierto estables y de alto rendimiento, que admiten toda la serie DeepSeek, Llama, Qwen y otros modelos grandes líderes en la industria."
97
97
  },
98
+ "qiniu": {
99
+ "description": "Qiniu es un proveedor líder de servicios de nube, ofreciendo API de IA de alta velocidad y eficiencia, incluyendo modelos de Alibaba, con opciones flexibles para construir y aplicar aplicaciones de IA."
100
+ },
98
101
  "qwen": {
99
102
  "description": "Tongyi Qianwen es un modelo de lenguaje de gran escala desarrollado de forma independiente por Alibaba Cloud, con potentes capacidades de comprensión y generación de lenguaje natural. Puede responder a diversas preguntas, crear contenido escrito, expresar opiniones y redactar código, desempeñando un papel en múltiples campos."
100
103
  },
@@ -95,6 +95,9 @@
95
95
  "ppio": {
96
96
  "description": "PPIO پایو کلود خدمات API مدل‌های متن باز با ثبات و با قیمت مناسب را ارائه می‌دهد و از تمام سری‌های DeepSeek، Llama، Qwen و سایر مدل‌های بزرگ پیشرو در صنعت پشتیبانی می‌کند."
97
97
  },
98
+ "qiniu": {
99
+ "description": "Qiniu یک شرکت پیشرو در خدمات سحابی است که API های سریع و کارآمد برای فراخوانی مدل‌های بزرگ، از جمله مدل‌های Alibaba، را ارائه می‌دهد و با امکانات پیشرفته برای ساخت و استفاده از برنامه‌های AI پشتیبانی می‌کند."
100
+ },
98
101
  "qwen": {
99
102
  "description": "چوان یی چیان ون یک مدل زبان بسیار بزرگ است که توسط علی‌کلود به‌طور مستقل توسعه یافته و دارای توانایی‌های قدرتمند درک و تولید زبان طبیعی است. این مدل می‌تواند به انواع سوالات پاسخ دهد، محتوای متنی خلق کند، نظرات و دیدگاه‌ها را بیان کند، کد بنویسد و در حوزه‌های مختلف نقش ایفا کند."
100
103
  },
@@ -95,6 +95,9 @@
95
95
  "ppio": {
96
96
  "description": "PPIO Paiouyun offre des services API de modèles open source stables et rentables, prenant en charge toute la gamme DeepSeek, Llama, Qwen et d'autres grands modèles de pointe dans l'industrie."
97
97
  },
98
+ "qiniu": {
99
+ "description": "Qiniu est un fournisseur de services de cloud, offrant des API de IA de haute vitesse et d'efficacité, incluant des modèles Alibaba, avec des options flexibles pour la construction et l'application d'applications IA."
100
+ },
98
101
  "qwen": {
99
102
  "description": "Tongyi Qianwen est un modèle de langage à grande échelle développé de manière autonome par Alibaba Cloud, doté de puissantes capacités de compréhension et de génération du langage naturel. Il peut répondre à diverses questions, créer du contenu écrit, exprimer des opinions, rédiger du code, etc., jouant un rôle dans plusieurs domaines."
100
103
  },
@@ -95,6 +95,9 @@
95
95
  "ppio": {
96
96
  "description": "PPIO Paeou Cloud offre servizi API per modelli open source stabili e ad alto rapporto qualità-prezzo, supportando l'intera gamma di DeepSeek, Llama, Qwen e altri modelli di grandi dimensioni leader del settore."
97
97
  },
98
+ "qiniu": {
99
+ "description": "Qiniu è un fornitore di servizi cloud leader, offrendo API di IA ad alta velocità e efficienza, incluso il modello Alibaba, con opzioni flessibili per costruire e applicare applicazioni di IA."
100
+ },
98
101
  "qwen": {
99
102
  "description": "Qwen è un modello di linguaggio di grande scala sviluppato autonomamente da Alibaba Cloud, con potenti capacità di comprensione e generazione del linguaggio naturale. Può rispondere a varie domande, creare contenuti testuali, esprimere opinioni e scrivere codice, svolgendo un ruolo in vari settori."
100
103
  },
@@ -95,6 +95,9 @@
95
95
  "ppio": {
96
96
  "description": "PPIO パイオ云は、安定した高コストパフォーマンスのオープンソースモデル API サービスを提供し、DeepSeek の全シリーズ、Llama、Qwen などの業界をリードする大規模モデルをサポートしています。"
97
97
  },
98
+ "qiniu": {
99
+ "description": "Qiniuは、老舗のクラウドサービスプロバイダーであり、高品質で安価なリアルタイムおよびバッチAI推論サービスを提供し、シンプルな使い方を提供します。"
100
+ },
98
101
  "qwen": {
99
102
  "description": "通義千問は、アリババクラウドが独自に開発した超大規模言語モデルであり、強力な自然言語理解と生成能力を持っています。さまざまな質問に答えたり、文章を創作したり、意見を表現したり、コードを執筆したりすることができ、さまざまな分野で活躍しています。"
100
103
  },
@@ -95,6 +95,9 @@
95
95
  "ppio": {
96
96
  "description": "PPIO 파이오 클라우드는 안정적이고 비용 효율적인 오픈 소스 모델 API 서비스를 제공하며, DeepSeek 전 시리즈, Llama, Qwen 등 업계 선도 대모델을 지원합니다."
97
97
  },
98
+ "qiniu": {
99
+ "description": "Qiniu는 대형 모델 서비스를 제공하는 대형 모델 플랫폼으로, 안정적이고 비용 효율적인 오픈 소스 모델 API 서비스를 제공하며, DeepSeek 전 시리즈, Llama, Qwen 등 업계 선도 대모델을 지원합니다."
100
+ },
98
101
  "qwen": {
99
102
  "description": "통의천문은 알리바바 클라우드가 자주 개발한 초대형 언어 모델로, 강력한 자연어 이해 및 생성 능력을 갖추고 있습니다. 다양한 질문에 답변하고, 텍스트 콘텐츠를 창작하며, 의견을 표현하고, 코드를 작성하는 등 여러 분야에서 활용됩니다."
100
103
  },
@@ -95,6 +95,9 @@
95
95
  "ppio": {
96
96
  "description": "PPIO biedt stabiele en kosteneffectieve open source model API-diensten, die ondersteuning bieden voor de volledige DeepSeek-serie, Llama, Qwen en andere toonaangevende grote modellen in de industrie."
97
97
  },
98
+ "qiniu": {
99
+ "description": "Qiniu is een leidende cloudserviceprovider die hoogwaardige, stabiele en kosteneffectieve real-time en batch AI-inferentie-API's biedt, inclusief modellen van Alibaba, met flexibele opties voor het bouwen en toepassen van AI-toepassingen."
100
+ },
98
101
  "qwen": {
99
102
  "description": "Tongyi Qianwen is een door Alibaba Cloud zelf ontwikkeld grootschalig taalmodel met krachtige mogelijkheden voor natuurlijke taalbegrip en -generatie. Het kan verschillende vragen beantwoorden, tekstinhoud creëren, meningen uiten, code schrijven, en speelt een rol in verschillende domeinen."
100
103
  },
@@ -95,6 +95,9 @@
95
95
  "ppio": {
96
96
  "description": "PPIO Paiou Cloud oferuje stabilne i opłacalne usługi API modeli open source, wspierające pełną gamę DeepSeek, Llama, Qwen i inne wiodące modele w branży."
97
97
  },
98
+ "qiniu": {
99
+ "description": "Qiniu to wiodący dostawca usług chmurowych, oferujący API do dużych modeli AI, w tym DeepSeek, Llama i Qwen, z elastycznymi opcjami do tworzenia i stosowania aplikacji AI."
100
+ },
98
101
  "qwen": {
99
102
  "description": "Tongyi Qianwen to samodzielnie opracowany przez Alibaba Cloud model językowy o dużej skali, charakteryzujący się silnymi zdolnościami rozumienia i generowania języka naturalnego. Może odpowiadać na różnorodne pytania, tworzyć treści pisemne, wyrażać opinie, pisać kod i działać w wielu dziedzinach."
100
103
  },
@@ -95,6 +95,9 @@
95
95
  "ppio": {
96
96
  "description": "O PPIO Paiouyun oferece serviços de API de modelos de código aberto estáveis e com alto custo-benefício, suportando toda a linha DeepSeek, Llama, Qwen e outros grandes modelos líderes da indústria."
97
97
  },
98
+ "qiniu": {
99
+ "description": "Qiniu é um fornecedor de serviços de cloud leader, oferecendo API de IA de alta velocidade e eficiência, incluindo modelos Alibaba, com opções flexíveis para construir e aplicar aplicações de IA."
100
+ },
98
101
  "qwen": {
99
102
  "description": "Qwen é um modelo de linguagem de grande escala desenvolvido pela Alibaba Cloud, com forte capacidade de compreensão e geração de linguagem natural. Ele pode responder a várias perguntas, criar conteúdo escrito, expressar opiniões e escrever código, atuando em vários campos."
100
103
  },
@@ -95,6 +95,9 @@
95
95
  "ppio": {
96
96
  "description": "PPIO Paiouyun предоставляет стабильные и высокоэффективные API-сервисы для открытых моделей, поддерживающие всю серию DeepSeek, Llama, Qwen и другие ведущие модели в отрасли."
97
97
  },
98
+ "qiniu": {
99
+ "description": "Qiniu — это ведущий поставщик облачных услуг, предлагающий API для больших моделей AI, включая DeepSeek, Llama и Qwen, с гибкими вариантами для создания и применения приложений AI."
100
+ },
98
101
  "qwen": {
99
102
  "description": "Qwen — это сверхбольшая языковая модель, разработанная Alibaba Cloud, обладающая мощными возможностями понимания и генерации естественного языка. Она может отвечать на различные вопросы, создавать текстовый контент, выражать мнения и писать код, играя важную роль в различных областях."
100
103
  },
@@ -95,6 +95,9 @@
95
95
  "ppio": {
96
96
  "description": "PPIO Paiou Cloud, istikrarlı ve yüksek maliyet etkinliğe sahip açık kaynak model API hizmeti sunar, DeepSeek'in tüm serisi, Llama, Qwen gibi sektörün önde gelen büyük modellerini destekler."
97
97
  },
98
+ "qiniu": {
99
+ "description": "Qiniu, esnek seçeneklerle büyük model API hizmetleri sunan lider bulut hizmeti sağlayıcısıdır, DeepSeek, Llama ve Qwen gibi sektörün önde gelen büyük modellerini destekler."
100
+ },
98
101
  "qwen": {
99
102
  "description": "Tongyi Qianwen, Alibaba Cloud tarafından geliştirilen büyük ölçekli bir dil modelidir ve güçlü doğal dil anlama ve üretme yeteneklerine sahiptir. Çeşitli soruları yanıtlayabilir, metin içeriği oluşturabilir, görüşlerini ifade edebilir ve kod yazabilir. Birçok alanda etkili bir şekilde kullanılmaktadır."
100
103
  },
@@ -95,6 +95,9 @@
95
95
  "ppio": {
96
96
  "description": "PPIO派欧云 cung cấp dịch vụ API mô hình mã nguồn mở ổn định, hiệu quả chi phí cao, hỗ trợ toàn bộ dòng sản phẩm DeepSeek, Llama, Qwen và các mô hình lớn hàng đầu trong ngành."
97
97
  },
98
+ "qiniu": {
99
+ "description": "Qiniu là nhà cung cấp dịch vụ cloud hàng đầu, cung cấp API cho các mô hình AI lớn, bao gồm DeepSeek, Llama và Qwen, với các tùy chọn linh hoạt để xây dựng và áp dụng các ứng dụng AI."
100
+ },
98
101
  "qwen": {
99
102
  "description": "Qwen là mô hình ngôn ngữ quy mô lớn tự phát triển của Alibaba Cloud, có khả năng hiểu và tạo ngôn ngữ tự nhiên mạnh mẽ. Nó có thể trả lời nhiều câu hỏi, sáng tác nội dung văn bản, bày tỏ quan điểm, viết mã, v.v., hoạt động trong nhiều lĩnh vực."
100
103
  },
@@ -95,6 +95,9 @@
95
95
  "ppio": {
96
96
  "description": "PPIO 派欧云提供稳定、高性价比的开源模型 API 服务,支持 DeepSeek 全系列、Llama、Qwen 等行业领先大模型。"
97
97
  },
98
+ "qiniu": {
99
+ "description": "七牛作为老牌云服务厂商,提供高性价比稳定的实时、批量 AI 推理服务,简单易用。"
100
+ },
98
101
  "qwen": {
99
102
  "description": "通义千问是阿里云自主研发的超大规模语言模型,具有强大的自然语言理解和生成能力。它可以回答各种问题、创作文字内容、表达观点看法、撰写代码等,在多个领域发挥作用。"
100
103
  },
@@ -95,6 +95,9 @@
95
95
  "ppio": {
96
96
  "description": "PPIO 派歐雲提供穩定、高性價比的開源模型 API 服務,支持 DeepSeek 全系列、Llama、Qwen 等行業領先的大模型。"
97
97
  },
98
+ "qiniu": {
99
+ "description": "Qiniu 是領先的雲服務提供商,提供高性價比、穩定的實時和批量 AI 推理服務,包括 DeepSeek、Llama 和 Qwen 等行業領先的大模型。"
100
+ },
98
101
  "qwen": {
99
102
  "description": "通義千問是阿里雲自主研發的超大規模語言模型,具有強大的自然語言理解和生成能力。它可以回答各種問題、創作文字內容、表達觀點看法、撰寫代碼等,在多個領域發揮作用。"
100
103
  },
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@lobehub/chat",
3
- "version": "1.85.10",
3
+ "version": "1.86.0",
4
4
  "description": "Lobe Chat - an open-source, high-performance chatbot framework that supports speech synthesis, multimodal, and extensible Function Call plugin system. Supports one-click free deployment of your private ChatGPT/LLM web application.",
5
5
  "keywords": [
6
6
  "framework",
@@ -24,6 +24,7 @@ import {
24
24
  OpenRouterProviderCard,
25
25
  PPIOProviderCard,
26
26
  PerplexityProviderCard,
27
+ QiniuProviderCard,
27
28
  QwenProviderCard,
28
29
  SambaNovaProviderCard,
29
30
  Search1APIProviderCard,
@@ -89,6 +90,7 @@ export const useProviderList = (): ProviderItem[] => {
89
90
  SambaNovaProviderCard,
90
91
  Search1APIProviderCard,
91
92
  CohereProviderCard,
93
+ QiniuProviderCard,
92
94
  QwenProviderCard,
93
95
  WenxinProviderCard,
94
96
  HunyuanProviderCard,
@@ -23,7 +23,10 @@ interface OptionRenderProps {
23
23
  removed?: boolean;
24
24
  }
25
25
  const OptionRender = memo<OptionRenderProps>(({ displayName, id, provider, isAzure, removed }) => {
26
- const model = useUserStore((s) => modelProviderSelectors.getModelCardById(id)(s), isEqual);
26
+ const model = useUserStore(
27
+ (s) => modelProviderSelectors.getModelCardById(id, provider)(s),
28
+ isEqual,
29
+ );
27
30
  const { t } = useTranslation('components');
28
31
  const theme = useTheme();
29
32
  // if there is isCustom, it means it is a user defined custom model
@@ -32,6 +32,7 @@ import { default as openai } from './openai';
32
32
  import { default as openrouter } from './openrouter';
33
33
  import { default as perplexity } from './perplexity';
34
34
  import { default as ppio } from './ppio';
35
+ import { default as qiniu } from './qiniu';
35
36
  import { default as qwen } from './qwen';
36
37
  import { default as sambanova } from './sambanova';
37
38
  import { default as search1api } from './search1api';
@@ -104,6 +105,7 @@ export const LOBE_DEFAULT_MODEL_LIST = buildDefaultModelList({
104
105
  openrouter,
105
106
  perplexity,
106
107
  ppio,
108
+ qiniu,
107
109
  qwen,
108
110
  sambanova,
109
111
  search1api,
@@ -157,6 +159,7 @@ export { default as openai } from './openai';
157
159
  export { default as openrouter } from './openrouter';
158
160
  export { default as perplexity } from './perplexity';
159
161
  export { default as ppio } from './ppio';
162
+ export { default as qiniu } from './qiniu';
160
163
  export { default as qwen } from './qwen';
161
164
  export { default as sambanova } from './sambanova';
162
165
  export { default as search1api } from './search1api';
@@ -0,0 +1,34 @@
1
+ import { AIChatModelCard } from '@/types/aiModel';
2
+
3
+ // https://developer.qiniu.com/aitokenapi
4
+
5
+ const qiniuChatModels: AIChatModelCard[] = [
6
+ {
7
+ abilities: {
8
+ functionCall: true,
9
+ },
10
+ contextWindowTokens: 131_072,
11
+ description:
12
+ '推理速度大幅提升,位居开源模型之首,媲美顶尖闭源模型。采用负载均衡辅助策略和多标记预测训练,性能显著增强。',
13
+ displayName: 'DeepSeek V3',
14
+ enabled: true,
15
+ id: 'deepseek-v3',
16
+ type: 'chat',
17
+ },
18
+ {
19
+ abilities: {
20
+ reasoning: true,
21
+ },
22
+ contextWindowTokens: 65_536,
23
+ description:
24
+ 'DeepSeek R1 是 DeepSeek 团队发布的最新开源模型,具备非常强悍的推理性能,尤其在数学、编程和推理任务上达到了与 OpenAI 的 o1 模型相当的水平。',
25
+ displayName: 'DeepSeek R1',
26
+ enabled: true,
27
+ id: 'deepseek-r1',
28
+ type: 'chat',
29
+ },
30
+ ];
31
+
32
+ export const allModels = [...qiniuChatModels];
33
+
34
+ export default allModels;
package/src/config/llm.ts CHANGED
@@ -78,6 +78,9 @@ export const getLLMConfig = () => {
78
78
  ENABLED_XINFERENCE: z.boolean(),
79
79
  XINFERENCE_API_KEY: z.string().optional(),
80
80
 
81
+ ENABLED_QINIU: z.boolean(),
82
+ QINIU_API_KEY: z.string().optional(),
83
+
81
84
  ENABLED_QWEN: z.boolean(),
82
85
  QWEN_API_KEY: z.string().optional(),
83
86
 
@@ -237,6 +240,9 @@ export const getLLMConfig = () => {
237
240
  ENABLED_XINFERENCE: !!process.env.XINFERENCE_API_KEY,
238
241
  XINFERENCE_API_KEY: process.env.XINFERENCE_API_KEY,
239
242
 
243
+ ENABLED_QINIU: !!process.env.QINIU_API_KEY,
244
+ QINIU_API_KEY: process.env.QINIU_API_KEY,
245
+
240
246
  ENABLED_QWEN: !!process.env.QWEN_API_KEY,
241
247
  QWEN_API_KEY: process.env.QWEN_API_KEY,
242
248
 
@@ -32,6 +32,7 @@ import OpenAIProvider from './openai';
32
32
  import OpenRouterProvider from './openrouter';
33
33
  import PerplexityProvider from './perplexity';
34
34
  import PPIOProvider from './ppio';
35
+ import QiniuProvider from './qiniu';
35
36
  import QwenProvider from './qwen';
36
37
  import SambaNovaProvider from './sambanova';
37
38
  import Search1APIProvider from './search1api';
@@ -101,6 +102,7 @@ export const LOBE_DEFAULT_MODEL_LIST: ChatModelCard[] = [
101
102
  PPIOProvider.chatModels,
102
103
  Search1APIProvider.chatModels,
103
104
  InfiniAIProvider.chatModels,
105
+ QiniuProvider.chatModels,
104
106
  ].flat();
105
107
 
106
108
  export const DEFAULT_MODEL_PROVIDER_LIST = [
@@ -156,6 +158,7 @@ export const DEFAULT_MODEL_PROVIDER_LIST = [
156
158
  Ai360Provider,
157
159
  Search1APIProvider,
158
160
  InfiniAIProvider,
161
+ QiniuProvider,
159
162
  ];
160
163
 
161
164
  export const filterEnabledModels = (provider: ModelProviderCard) => {
@@ -199,6 +202,7 @@ export { default as OpenAIProviderCard } from './openai';
199
202
  export { default as OpenRouterProviderCard } from './openrouter';
200
203
  export { default as PerplexityProviderCard } from './perplexity';
201
204
  export { default as PPIOProviderCard } from './ppio';
205
+ export { default as QiniuProviderCard } from './qiniu';
202
206
  export { default as QwenProviderCard } from './qwen';
203
207
  export { default as SambaNovaProviderCard } from './sambanova';
204
208
  export { default as Search1APIProviderCard } from './search1api';
@@ -0,0 +1,42 @@
1
+ import { ModelProviderCard } from '@/types/llm';
2
+
3
+ // ref: https://developer.qiniu.com/aitokenapi
4
+ const Qiniu: ModelProviderCard = {
5
+ chatModels: [
6
+ {
7
+ contextWindowTokens: 131_072,
8
+ description:
9
+ '推理速度大幅提升,位居开源模型之首,媲美顶尖闭源模型。采用负载均衡辅助策略和多标记预测训练,性能显著增强。',
10
+ displayName: 'DeepSeek V3',
11
+ enabled: true,
12
+ id: 'deepseek-v3',
13
+ },
14
+ {
15
+ contextWindowTokens: 65_536,
16
+ description:
17
+ 'DeepSeek R1 是 DeepSeek 团队发布的最新开源模型,具备非常强悍的推理性能,尤其在数学、编程和推理任务上达到了与 OpenAI 的 o1 模型相当的水平',
18
+ displayName: 'DeepSeek R1',
19
+ enabled: true,
20
+ id: 'deepseek-r1',
21
+ },
22
+ ],
23
+ checkModel: 'deepseek-r1',
24
+ description: '七牛作为老牌云服务厂商,提供高性价比稳定的实时、批量 AI 推理服务,简单易用。',
25
+ id: 'qiniu',
26
+ modelList: { showModelFetcher: true },
27
+ modelsUrl: 'https://developer.qiniu.com/aitokenapi/12882/ai-inference-api',
28
+ name: 'Qiniu',
29
+ proxyUrl: {
30
+ placeholder: 'https://api.qnaigc.com/v1',
31
+ },
32
+ settings: {
33
+ proxyUrl: {
34
+ placeholder: 'https://api.qnaigc.com/v1',
35
+ },
36
+ sdkType: 'openai',
37
+ showModelFetcher: true,
38
+ },
39
+ url: 'https://www.qiniu.com',
40
+ };
41
+
42
+ export default Qiniu;
@@ -0,0 +1,16 @@
1
+ // @vitest-environment node
2
+ import { ModelProvider } from '@/libs/agent-runtime';
3
+ import { testProvider } from '@/libs/agent-runtime/providerTestUtils';
4
+
5
+ import { LobeQiniuAI } from './index';
6
+
7
+ const provider = ModelProvider.Qiniu;
8
+ const defaultBaseURL = 'https://api.qnaigc.com/v1';
9
+
10
+ testProvider({
11
+ Runtime: LobeQiniuAI,
12
+ provider,
13
+ defaultBaseURL,
14
+ chatDebugEnv: 'DEBUG_QINIU_CHAT_COMPLETION',
15
+ chatModel: 'deepseek-r1',
16
+ });
@@ -0,0 +1,47 @@
1
+ import type { ChatModelCard } from '@/types/llm';
2
+
3
+ import { ModelProvider } from '../types';
4
+ import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
5
+
6
+ export interface QiniuModelCard {
7
+ id: string;
8
+ }
9
+
10
+ export const LobeQiniuAI = LobeOpenAICompatibleFactory({
11
+ apiKey: 'placeholder-to-avoid-error',
12
+ baseURL: 'https://api.qnaigc.com/v1',
13
+ debug: {
14
+ chatCompletion: () => process.env.DEBUG_QINIU_CHAT_COMPLETION === '1',
15
+ },
16
+ models: async ({ client }) => {
17
+ const { LOBE_DEFAULT_MODEL_LIST } = await import('@/config/aiModels');
18
+ const { DEFAULT_MODEL_PROVIDER_LIST } = await import('@/config/modelProviders');
19
+
20
+ const modelsPage = (await client.models.list()) as any;
21
+ const modelList: QiniuModelCard[] = modelsPage.data;
22
+
23
+ return modelList
24
+ .map((model) => {
25
+ const knownModelProvlder = DEFAULT_MODEL_PROVIDER_LIST.find(
26
+ (mp) => mp.id.toLowerCase() === ModelProvider.Qiniu.toLowerCase(),
27
+ );
28
+
29
+ const knownModel = (knownModelProvlder?.chatModels ?? LOBE_DEFAULT_MODEL_LIST).find(
30
+ (m) => model.id.toLowerCase() === m.id.toLowerCase(),
31
+ );
32
+
33
+ const abilities = knownModel && 'abilities' in knownModel ? knownModel.abilities : {};
34
+ return {
35
+ contextWindowTokens: knownModel?.contextWindowTokens ?? undefined,
36
+ displayName: knownModel?.displayName ?? undefined,
37
+ enabled: knownModel?.enabled || false,
38
+ functionCall: abilities?.functionCall || false,
39
+ id: model.id,
40
+ reasoning: abilities?.reasoning || false,
41
+ vision: abilities?.vision || false,
42
+ };
43
+ })
44
+ .filter(Boolean) as ChatModelCard[];
45
+ },
46
+ provider: ModelProvider.Qiniu,
47
+ });
@@ -30,6 +30,7 @@ import { LobeOpenAI } from './openai';
30
30
  import { LobeOpenRouterAI } from './openrouter';
31
31
  import { LobePerplexityAI } from './perplexity';
32
32
  import { LobePPIOAI } from './ppio';
33
+ import { LobeQiniuAI } from './qiniu';
33
34
  import { LobeQwenAI } from './qwen';
34
35
  import { LobeSambaNovaAI } from './sambanova';
35
36
  import { LobeSearch1API } from './search1api';
@@ -82,6 +83,7 @@ export const providerRuntimeMap = {
82
83
  openrouter: LobeOpenRouterAI,
83
84
  perplexity: LobePerplexityAI,
84
85
  ppio: LobePPIOAI,
86
+ qiniu: LobeQiniuAI,
85
87
  qwen: LobeQwenAI,
86
88
  sambanova: LobeSambaNovaAI,
87
89
  search1api: LobeSearch1API,
@@ -54,6 +54,7 @@ export enum ModelProvider {
54
54
  OpenRouter = 'openrouter',
55
55
  PPIO = 'ppio',
56
56
  Perplexity = 'perplexity',
57
+ Qiniu = 'qiniu',
57
58
  Qwen = 'qwen',
58
59
  SambaNova = 'sambanova',
59
60
  Search1API = 'search1api',
@@ -52,6 +52,7 @@ vi.mock('@/config/llm', () => ({
52
52
  MISTRAL_API_KEY: 'test-mistral-key',
53
53
  OPENROUTER_API_KEY: 'test-openrouter-key',
54
54
  TOGETHERAI_API_KEY: 'test-togetherai-key',
55
+ QINIU_API_KEY: 'test-qiniu-key',
55
56
  QWEN_API_KEY: 'test-qwen-key',
56
57
  STEPFUN_API_KEY: 'test-stepfun-key',
57
58
  })),
@@ -108,10 +108,13 @@ const modelProviderListForModelSelect = (s: UserStore): EnabledProviderWithModel
108
108
  source: 'builtin',
109
109
  }));
110
110
 
111
- const getModelCardById = (id: string) => (s: UserStore) => {
111
+ const getModelCardById = (id: string, provider?: GlobalLLMProviderKey) => (s: UserStore) => {
112
112
  const list = modelProviderList(s);
113
113
 
114
- return list.flatMap((i) => i.chatModels).find((m) => m.id === id);
114
+ return list
115
+ .filter((i) => !provider || i.id === provider)
116
+ .flatMap((i) => i.chatModels)
117
+ .find((m) => m.id === id);
115
118
  };
116
119
 
117
120
  const isModelEnabledFunctionCall = (id: string) => (s: UserStore) =>
@@ -67,6 +67,7 @@ export interface UserKeyVaults extends SearchEngineKeyVaults {
67
67
  password?: string;
68
68
  perplexity?: OpenAICompatibleKeyVault;
69
69
  ppio?: OpenAICompatibleKeyVault;
70
+ qiniu?: OpenAICompatibleKeyVault;
70
71
  qwen?: OpenAICompatibleKeyVault;
71
72
  sambanova?: OpenAICompatibleKeyVault;
72
73
  search1api?: OpenAICompatibleKeyVault;