@lobehub/chat 1.85.10 → 1.86.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.env.example +5 -0
- package/CHANGELOG.md +50 -0
- package/Dockerfile +2 -0
- package/Dockerfile.database +2 -0
- package/Dockerfile.pglite +2 -0
- package/README.md +3 -2
- package/README.zh-CN.md +3 -2
- package/changelog/v1.json +18 -0
- package/docs/self-hosting/environment-variables/model-provider.mdx +23 -0
- package/docs/self-hosting/environment-variables/model-provider.zh-CN.mdx +23 -1
- package/docs/usage/providers/qiniu.mdx +58 -0
- package/docs/usage/providers/qiniu.zh-CN.mdx +55 -0
- package/locales/ar/providers.json +3 -0
- package/locales/bg-BG/providers.json +3 -0
- package/locales/de-DE/providers.json +3 -0
- package/locales/en-US/providers.json +3 -0
- package/locales/es-ES/providers.json +3 -0
- package/locales/fa-IR/providers.json +3 -0
- package/locales/fr-FR/providers.json +3 -0
- package/locales/it-IT/providers.json +3 -0
- package/locales/ja-JP/providers.json +3 -0
- package/locales/ko-KR/providers.json +3 -0
- package/locales/nl-NL/providers.json +3 -0
- package/locales/pl-PL/providers.json +3 -0
- package/locales/pt-BR/providers.json +3 -0
- package/locales/ru-RU/providers.json +3 -0
- package/locales/tr-TR/providers.json +3 -0
- package/locales/vi-VN/providers.json +3 -0
- package/locales/zh-CN/providers.json +3 -0
- package/locales/zh-TW/providers.json +3 -0
- package/package.json +1 -1
- package/src/app/(backend)/middleware/auth/index.test.ts +1 -1
- package/src/app/(backend)/middleware/auth/index.ts +1 -1
- package/src/app/(backend)/middleware/auth/utils.ts +1 -1
- package/src/app/(backend)/webapi/chat/[provider]/route.test.ts +1 -1
- package/src/app/(backend)/webapi/chat/[provider]/route.ts +1 -1
- package/src/app/(backend)/webapi/chat/vertexai/route.ts +2 -2
- package/src/app/(backend)/webapi/models/[provider]/pull/route.ts +1 -1
- package/src/app/(backend)/webapi/models/[provider]/route.ts +1 -1
- package/src/app/(backend)/webapi/plugin/gateway/route.ts +1 -1
- package/src/app/(backend)/webapi/text-to-image/[provider]/route.ts +2 -2
- package/src/app/[variants]/(main)/settings/llm/ProviderList/Azure/index.tsx +1 -1
- package/src/app/[variants]/(main)/settings/llm/ProviderList/providers.tsx +2 -0
- package/src/app/[variants]/(main)/settings/llm/components/ProviderModelList/Option.tsx +4 -1
- package/src/app/[variants]/(main)/settings/provider/(detail)/azure/page.tsx +1 -1
- package/src/app/[variants]/(main)/settings/provider/(detail)/azureai/page.tsx +1 -1
- package/src/config/aiModels/index.ts +3 -0
- package/src/config/aiModels/qiniu.ts +34 -0
- package/src/config/llm.ts +6 -0
- package/src/config/modelProviders/index.ts +4 -0
- package/src/config/modelProviders/qiniu.ts +42 -0
- package/src/const/settings/llm.ts +1 -1
- package/src/database/models/__tests__/aiProvider.test.ts +1 -1
- package/src/database/models/aiProvider.ts +1 -1
- package/src/features/Conversation/Error/APIKeyForm/Bedrock.tsx +1 -1
- package/src/features/Conversation/Error/APIKeyForm/index.tsx +1 -1
- package/src/features/Conversation/Error/index.tsx +1 -1
- package/src/libs/{agent-runtime → model-runtime}/AgentRuntime.test.ts +2 -2
- package/src/libs/{agent-runtime → model-runtime}/ai21/index.test.ts +2 -2
- package/src/libs/{agent-runtime → model-runtime}/ai360/index.test.ts +2 -2
- package/src/libs/{agent-runtime → model-runtime}/anthropic/index.test.ts +1 -1
- package/src/libs/{agent-runtime → model-runtime}/baichuan/index.test.ts +1 -1
- package/src/libs/{agent-runtime → model-runtime}/bedrock/index.test.ts +1 -1
- package/src/libs/{agent-runtime → model-runtime}/cloudflare/index.test.ts +1 -1
- package/src/libs/{agent-runtime → model-runtime}/deepseek/index.test.ts +2 -2
- package/src/libs/{agent-runtime → model-runtime}/fireworksai/index.test.ts +2 -2
- package/src/libs/{agent-runtime → model-runtime}/giteeai/index.test.ts +2 -2
- package/src/libs/{agent-runtime → model-runtime}/github/index.test.ts +2 -2
- package/src/libs/{agent-runtime → model-runtime}/google/index.test.ts +1 -1
- package/src/libs/{agent-runtime → model-runtime}/groq/index.test.ts +2 -2
- package/src/libs/{agent-runtime → model-runtime}/hunyuan/index.test.ts +2 -2
- package/src/libs/{agent-runtime → model-runtime}/internlm/index.test.ts +2 -2
- package/src/libs/{agent-runtime → model-runtime}/lmstudio/index.test.ts +2 -2
- package/src/libs/{agent-runtime → model-runtime}/mistral/index.test.ts +2 -2
- package/src/libs/{agent-runtime → model-runtime}/moonshot/index.test.ts +1 -1
- package/src/libs/{agent-runtime → model-runtime}/novita/index.test.ts +3 -3
- package/src/libs/{agent-runtime → model-runtime}/ollama/index.ts +1 -1
- package/src/libs/{agent-runtime → model-runtime}/openai/index.test.ts +1 -1
- package/src/libs/{agent-runtime → model-runtime}/openrouter/index.test.ts +2 -2
- package/src/libs/{agent-runtime → model-runtime}/perplexity/index.test.ts +2 -2
- package/src/libs/{agent-runtime → model-runtime}/ppio/index.test.ts +4 -4
- package/src/libs/{agent-runtime → model-runtime}/providerTestUtils.ts +1 -1
- package/src/libs/model-runtime/qiniu/index.test.ts +16 -0
- package/src/libs/model-runtime/qiniu/index.ts +47 -0
- package/src/libs/{agent-runtime → model-runtime}/qwen/index.test.ts +2 -2
- package/src/libs/{agent-runtime → model-runtime}/runtimeMap.ts +2 -0
- package/src/libs/{agent-runtime → model-runtime}/sensenova/index.test.ts +2 -2
- package/src/libs/{agent-runtime → model-runtime}/siliconcloud/index.ts +1 -6
- package/src/libs/{agent-runtime → model-runtime}/spark/index.test.ts +2 -2
- package/src/libs/{agent-runtime → model-runtime}/stepfun/index.test.ts +2 -2
- package/src/libs/{agent-runtime → model-runtime}/taichu/index.test.ts +2 -2
- package/src/libs/{agent-runtime → model-runtime}/tencentcloud/index.test.ts +1 -1
- package/src/libs/{agent-runtime → model-runtime}/togetherai/index.test.ts +1 -1
- package/src/libs/{agent-runtime → model-runtime}/types/type.ts +1 -0
- package/src/libs/{agent-runtime → model-runtime}/upstage/index.test.ts +1 -1
- package/src/libs/{agent-runtime → model-runtime}/utils/openaiCompatibleFactory/index.test.ts +2 -2
- package/src/libs/{agent-runtime → model-runtime}/utils/openaiHelpers.ts +1 -1
- package/src/libs/{agent-runtime → model-runtime}/utils/streams/ollama.ts +1 -1
- package/src/libs/{agent-runtime → model-runtime}/utils/streams/openai.test.ts +1 -1
- package/src/libs/{agent-runtime → model-runtime}/utils/streams/qwen.test.ts +1 -1
- package/src/libs/{agent-runtime → model-runtime}/wenxin/index.test.ts +2 -2
- package/src/libs/{agent-runtime → model-runtime}/xai/index.test.ts +2 -2
- package/src/libs/{agent-runtime → model-runtime}/zeroone/index.test.ts +1 -1
- package/src/libs/{agent-runtime → model-runtime}/zhipu/index.test.ts +2 -2
- package/src/libs/trpc/client/lambda.ts +1 -1
- package/src/server/globalConfig/_deprecated.test.ts +1 -1
- package/src/server/globalConfig/_deprecated.ts +1 -1
- package/src/server/globalConfig/genServerAiProviderConfig.ts +1 -1
- package/src/server/modules/AgentRuntime/index.test.ts +5 -4
- package/src/server/modules/AgentRuntime/index.ts +1 -1
- package/src/server/modules/AgentRuntime/trace.ts +1 -1
- package/src/server/routers/async/ragEval.ts +1 -1
- package/src/services/__tests__/_auth.test.ts +1 -1
- package/src/services/__tests__/chat.test.ts +3 -3
- package/src/services/_auth.ts +1 -1
- package/src/services/chat.ts +1 -1
- package/src/services/textToImage.ts +1 -1
- package/src/services/upload.ts +1 -1
- package/src/store/user/slices/modelList/action.ts +1 -1
- package/src/store/user/slices/modelList/selectors/modelProvider.ts +5 -2
- package/src/types/fetch.ts +1 -1
- package/src/types/message/chat.ts +1 -1
- package/src/types/user/settings/keyVaults.ts +1 -0
- package/src/types/user/settings/modelProvider.ts +1 -1
- package/src/utils/errorResponse.test.ts +1 -1
- package/src/utils/errorResponse.ts +1 -1
- package/src/utils/fetch/fetchSSE.ts +1 -1
- package/src/utils/genUserLLMConfig.test.ts +1 -1
- package/src/utils/genUserLLMConfig.ts +1 -1
- /package/src/libs/{agent-runtime → model-runtime}/AgentRuntime.ts +0 -0
- /package/src/libs/{agent-runtime → model-runtime}/BaseAI.ts +0 -0
- /package/src/libs/{agent-runtime → model-runtime}/UniformRuntime/index.ts +0 -0
- /package/src/libs/{agent-runtime → model-runtime}/ai21/index.ts +0 -0
- /package/src/libs/{agent-runtime → model-runtime}/ai360/index.ts +0 -0
- /package/src/libs/{agent-runtime → model-runtime}/anthropic/handleAnthropicError.ts +0 -0
- /package/src/libs/{agent-runtime → model-runtime}/anthropic/index.ts +0 -0
- /package/src/libs/{agent-runtime → model-runtime}/azureOpenai/index.test.ts +0 -0
- /package/src/libs/{agent-runtime → model-runtime}/azureOpenai/index.ts +0 -0
- /package/src/libs/{agent-runtime → model-runtime}/azureai/index.ts +0 -0
- /package/src/libs/{agent-runtime → model-runtime}/baichuan/index.ts +0 -0
- /package/src/libs/{agent-runtime → model-runtime}/bedrock/index.ts +0 -0
- /package/src/libs/{agent-runtime → model-runtime}/cloudflare/index.ts +0 -0
- /package/src/libs/{agent-runtime → model-runtime}/cohere/index.ts +0 -0
- /package/src/libs/{agent-runtime → model-runtime}/deepseek/index.ts +0 -0
- /package/src/libs/{agent-runtime → model-runtime}/error.ts +0 -0
- /package/src/libs/{agent-runtime → model-runtime}/fireworksai/index.ts +0 -0
- /package/src/libs/{agent-runtime → model-runtime}/giteeai/index.ts +0 -0
- /package/src/libs/{agent-runtime → model-runtime}/github/index.ts +0 -0
- /package/src/libs/{agent-runtime → model-runtime}/google/index.ts +0 -0
- /package/src/libs/{agent-runtime → model-runtime}/groq/index.ts +0 -0
- /package/src/libs/{agent-runtime → model-runtime}/helpers/index.ts +0 -0
- /package/src/libs/{agent-runtime → model-runtime}/helpers/parseToolCalls.test.ts +0 -0
- /package/src/libs/{agent-runtime → model-runtime}/helpers/parseToolCalls.ts +0 -0
- /package/src/libs/{agent-runtime → model-runtime}/higress/index.ts +0 -0
- /package/src/libs/{agent-runtime → model-runtime}/huggingface/index.ts +0 -0
- /package/src/libs/{agent-runtime → model-runtime}/hunyuan/index.ts +0 -0
- /package/src/libs/{agent-runtime → model-runtime}/index.ts +0 -0
- /package/src/libs/{agent-runtime → model-runtime}/infiniai/index.ts +0 -0
- /package/src/libs/{agent-runtime → model-runtime}/internlm/index.ts +0 -0
- /package/src/libs/{agent-runtime → model-runtime}/jina/index.ts +0 -0
- /package/src/libs/{agent-runtime → model-runtime}/lmstudio/index.ts +0 -0
- /package/src/libs/{agent-runtime → model-runtime}/minimax/index.ts +0 -0
- /package/src/libs/{agent-runtime → model-runtime}/mistral/index.ts +0 -0
- /package/src/libs/{agent-runtime → model-runtime}/moonshot/index.ts +0 -0
- /package/src/libs/{agent-runtime → model-runtime}/novita/__snapshots__/index.test.ts.snap +0 -0
- /package/src/libs/{agent-runtime → model-runtime}/novita/fixtures/models.json +0 -0
- /package/src/libs/{agent-runtime → model-runtime}/novita/index.ts +0 -0
- /package/src/libs/{agent-runtime → model-runtime}/novita/type.ts +0 -0
- /package/src/libs/{agent-runtime → model-runtime}/nvidia/index.ts +0 -0
- /package/src/libs/{agent-runtime → model-runtime}/ollama/index.test.ts +0 -0
- /package/src/libs/{agent-runtime → model-runtime}/ollama/type.ts +0 -0
- /package/src/libs/{agent-runtime → model-runtime}/openai/__snapshots__/index.test.ts.snap +0 -0
- /package/src/libs/{agent-runtime → model-runtime}/openai/fixtures/openai-models.json +0 -0
- /package/src/libs/{agent-runtime → model-runtime}/openai/index.ts +0 -0
- /package/src/libs/{agent-runtime → model-runtime}/openrouter/__snapshots__/index.test.ts.snap +0 -0
- /package/src/libs/{agent-runtime → model-runtime}/openrouter/fixtures/frontendModels.json +0 -0
- /package/src/libs/{agent-runtime → model-runtime}/openrouter/fixtures/models.json +0 -0
- /package/src/libs/{agent-runtime → model-runtime}/openrouter/index.ts +0 -0
- /package/src/libs/{agent-runtime → model-runtime}/openrouter/type.ts +0 -0
- /package/src/libs/{agent-runtime → model-runtime}/perplexity/index.ts +0 -0
- /package/src/libs/{agent-runtime → model-runtime}/ppio/__snapshots__/index.test.ts.snap +0 -0
- /package/src/libs/{agent-runtime → model-runtime}/ppio/fixtures/models.json +0 -0
- /package/src/libs/{agent-runtime → model-runtime}/ppio/index.ts +0 -0
- /package/src/libs/{agent-runtime → model-runtime}/ppio/type.ts +0 -0
- /package/src/libs/{agent-runtime → model-runtime}/providerTestUtils.test.ts +0 -0
- /package/src/libs/{agent-runtime → model-runtime}/qwen/index.ts +0 -0
- /package/src/libs/{agent-runtime → model-runtime}/sambanova/index.ts +0 -0
- /package/src/libs/{agent-runtime → model-runtime}/search1api/index.ts +0 -0
- /package/src/libs/{agent-runtime → model-runtime}/sensenova/index.ts +0 -0
- /package/src/libs/{agent-runtime → model-runtime}/spark/index.ts +0 -0
- /package/src/libs/{agent-runtime → model-runtime}/stepfun/index.ts +0 -0
- /package/src/libs/{agent-runtime → model-runtime}/taichu/index.ts +0 -0
- /package/src/libs/{agent-runtime → model-runtime}/tencentcloud/index.ts +0 -0
- /package/src/libs/{agent-runtime → model-runtime}/togetherai/fixtures/models.json +0 -0
- /package/src/libs/{agent-runtime → model-runtime}/togetherai/index.ts +0 -0
- /package/src/libs/{agent-runtime → model-runtime}/togetherai/type.ts +0 -0
- /package/src/libs/{agent-runtime → model-runtime}/types/chat.ts +0 -0
- /package/src/libs/{agent-runtime → model-runtime}/types/embeddings.ts +0 -0
- /package/src/libs/{agent-runtime → model-runtime}/types/index.ts +0 -0
- /package/src/libs/{agent-runtime → model-runtime}/types/model.ts +0 -0
- /package/src/libs/{agent-runtime → model-runtime}/types/textToImage.ts +0 -0
- /package/src/libs/{agent-runtime → model-runtime}/types/tts.ts +0 -0
- /package/src/libs/{agent-runtime → model-runtime}/upstage/index.ts +0 -0
- /package/src/libs/{agent-runtime → model-runtime}/utils/anthropicHelpers.test.ts +0 -0
- /package/src/libs/{agent-runtime → model-runtime}/utils/anthropicHelpers.ts +0 -0
- /package/src/libs/{agent-runtime → model-runtime}/utils/cloudflareHelpers.test.ts +0 -0
- /package/src/libs/{agent-runtime → model-runtime}/utils/cloudflareHelpers.ts +0 -0
- /package/src/libs/{agent-runtime → model-runtime}/utils/createError.ts +0 -0
- /package/src/libs/{agent-runtime → model-runtime}/utils/debugStream.test.ts +0 -0
- /package/src/libs/{agent-runtime → model-runtime}/utils/debugStream.ts +0 -0
- /package/src/libs/{agent-runtime → model-runtime}/utils/desensitizeUrl.test.ts +0 -0
- /package/src/libs/{agent-runtime → model-runtime}/utils/desensitizeUrl.ts +0 -0
- /package/src/libs/{agent-runtime → model-runtime}/utils/handleOpenAIError.ts +0 -0
- /package/src/libs/{agent-runtime → model-runtime}/utils/openaiCompatibleFactory/index.ts +0 -0
- /package/src/libs/{agent-runtime → model-runtime}/utils/openaiHelpers.test.ts +0 -0
- /package/src/libs/{agent-runtime → model-runtime}/utils/response.ts +0 -0
- /package/src/libs/{agent-runtime → model-runtime}/utils/sensenovaHelpers.test.ts +0 -0
- /package/src/libs/{agent-runtime → model-runtime}/utils/sensenovaHelpers.ts +0 -0
- /package/src/libs/{agent-runtime → model-runtime}/utils/streams/__snapshots__/protocol.test.ts.snap +0 -0
- /package/src/libs/{agent-runtime → model-runtime}/utils/streams/anthropic.test.ts +0 -0
- /package/src/libs/{agent-runtime → model-runtime}/utils/streams/anthropic.ts +0 -0
- /package/src/libs/{agent-runtime → model-runtime}/utils/streams/bedrock/claude.ts +0 -0
- /package/src/libs/{agent-runtime → model-runtime}/utils/streams/bedrock/common.ts +0 -0
- /package/src/libs/{agent-runtime → model-runtime}/utils/streams/bedrock/index.ts +0 -0
- /package/src/libs/{agent-runtime → model-runtime}/utils/streams/bedrock/llama.test.ts +0 -0
- /package/src/libs/{agent-runtime → model-runtime}/utils/streams/bedrock/llama.ts +0 -0
- /package/src/libs/{agent-runtime → model-runtime}/utils/streams/google-ai.test.ts +0 -0
- /package/src/libs/{agent-runtime → model-runtime}/utils/streams/google-ai.ts +0 -0
- /package/src/libs/{agent-runtime → model-runtime}/utils/streams/index.ts +0 -0
- /package/src/libs/{agent-runtime → model-runtime}/utils/streams/model.ts +0 -0
- /package/src/libs/{agent-runtime → model-runtime}/utils/streams/ollama.test.ts +0 -0
- /package/src/libs/{agent-runtime → model-runtime}/utils/streams/openai.ts +0 -0
- /package/src/libs/{agent-runtime → model-runtime}/utils/streams/protocol.test.ts +0 -0
- /package/src/libs/{agent-runtime → model-runtime}/utils/streams/protocol.ts +0 -0
- /package/src/libs/{agent-runtime → model-runtime}/utils/streams/qwen.ts +0 -0
- /package/src/libs/{agent-runtime → model-runtime}/utils/streams/spark.test.ts +0 -0
- /package/src/libs/{agent-runtime → model-runtime}/utils/streams/spark.ts +0 -0
- /package/src/libs/{agent-runtime → model-runtime}/utils/streams/vertex-ai.test.ts +0 -0
- /package/src/libs/{agent-runtime → model-runtime}/utils/streams/vertex-ai.ts +0 -0
- /package/src/libs/{agent-runtime → model-runtime}/utils/uriParser.test.ts +0 -0
- /package/src/libs/{agent-runtime → model-runtime}/utils/uriParser.ts +0 -0
- /package/src/libs/{agent-runtime → model-runtime}/utils/usageConverter.test.ts +0 -0
- /package/src/libs/{agent-runtime → model-runtime}/utils/usageConverter.ts +0 -0
- /package/src/libs/{agent-runtime → model-runtime}/vertexai/index.ts +0 -0
- /package/src/libs/{agent-runtime → model-runtime}/vllm/index.ts +0 -0
- /package/src/libs/{agent-runtime → model-runtime}/volcengine/index.ts +0 -0
- /package/src/libs/{agent-runtime → model-runtime}/wenxin/index.ts +0 -0
- /package/src/libs/{agent-runtime → model-runtime}/xai/index.ts +0 -0
- /package/src/libs/{agent-runtime → model-runtime}/xinference/index.ts +0 -0
- /package/src/libs/{agent-runtime → model-runtime}/zeroone/index.ts +0 -0
- /package/src/libs/{agent-runtime → model-runtime}/zhipu/index.ts +0 -0
package/.env.example
CHANGED
@@ -109,6 +109,11 @@ OPENAI_API_KEY=sk-xxxxxxxxx
|
|
109
109
|
# DEEPSEEK_PROXY_URL=https://api.deepseek.com/v1
|
110
110
|
# DEEPSEEK_API_KEY=xxxxxxxxxxxxxxxxxxxxxxxxxxxxx
|
111
111
|
|
112
|
+
### Qiniu AI ####
|
113
|
+
|
114
|
+
# QINIU_PROXY_URL=https://api.qnaigc.com/v1
|
115
|
+
# QINIU_API_KEY=xxxxxxxxxxxxxxxxxxxxxxxxxxxxx
|
116
|
+
|
112
117
|
### Qwen AI ####
|
113
118
|
|
114
119
|
# QWEN_API_KEY=sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
|
package/CHANGELOG.md
CHANGED
@@ -2,6 +2,56 @@
|
|
2
2
|
|
3
3
|
# Changelog
|
4
4
|
|
5
|
+
### [Version 1.86.1](https://github.com/lobehub/lobe-chat/compare/v1.86.0...v1.86.1)
|
6
|
+
|
7
|
+
<sup>Released on **2025-05-15**</sup>
|
8
|
+
|
9
|
+
#### ♻ Code Refactoring
|
10
|
+
|
11
|
+
- **misc**: Refactor agent runtime to model runtime.
|
12
|
+
|
13
|
+
<br/>
|
14
|
+
|
15
|
+
<details>
|
16
|
+
<summary><kbd>Improvements and Fixes</kbd></summary>
|
17
|
+
|
18
|
+
#### Code refactoring
|
19
|
+
|
20
|
+
- **misc**: Refactor agent runtime to model runtime, closes [#7846](https://github.com/lobehub/lobe-chat/issues/7846) ([a3b9448](https://github.com/lobehub/lobe-chat/commit/a3b9448))
|
21
|
+
|
22
|
+
</details>
|
23
|
+
|
24
|
+
<div align="right">
|
25
|
+
|
26
|
+
[](#readme-top)
|
27
|
+
|
28
|
+
</div>
|
29
|
+
|
30
|
+
## [Version 1.86.0](https://github.com/lobehub/lobe-chat/compare/v1.85.10...v1.86.0)
|
31
|
+
|
32
|
+
<sup>Released on **2025-05-15**</sup>
|
33
|
+
|
34
|
+
#### ✨ Features
|
35
|
+
|
36
|
+
- **misc**: Add Qiniu Provider.
|
37
|
+
|
38
|
+
<br/>
|
39
|
+
|
40
|
+
<details>
|
41
|
+
<summary><kbd>Improvements and Fixes</kbd></summary>
|
42
|
+
|
43
|
+
#### What's improved
|
44
|
+
|
45
|
+
- **misc**: Add Qiniu Provider, closes [#7649](https://github.com/lobehub/lobe-chat/issues/7649) ([c9b8e9f](https://github.com/lobehub/lobe-chat/commit/c9b8e9f))
|
46
|
+
|
47
|
+
</details>
|
48
|
+
|
49
|
+
<div align="right">
|
50
|
+
|
51
|
+
[](#readme-top)
|
52
|
+
|
53
|
+
</div>
|
54
|
+
|
5
55
|
### [Version 1.85.10](https://github.com/lobehub/lobe-chat/compare/v1.85.9...v1.85.10)
|
6
56
|
|
7
57
|
<sup>Released on **2025-05-14**</sup>
|
package/Dockerfile
CHANGED
@@ -202,6 +202,8 @@ ENV \
|
|
202
202
|
PERPLEXITY_API_KEY="" PERPLEXITY_MODEL_LIST="" PERPLEXITY_PROXY_URL="" \
|
203
203
|
# PPIO
|
204
204
|
PPIO_API_KEY="" PPIO_MODEL_LIST="" \
|
205
|
+
# Qiniu
|
206
|
+
QINIU_API_KEY="" QINIU_MODEL_LIST="" QINIU_PROXY_URL="" \
|
205
207
|
# Qwen
|
206
208
|
QWEN_API_KEY="" QWEN_MODEL_LIST="" QWEN_PROXY_URL="" \
|
207
209
|
# SambaNova
|
package/Dockerfile.database
CHANGED
@@ -246,6 +246,8 @@ ENV \
|
|
246
246
|
PERPLEXITY_API_KEY="" PERPLEXITY_MODEL_LIST="" PERPLEXITY_PROXY_URL="" \
|
247
247
|
# PPIO
|
248
248
|
PPIO_API_KEY="" PPIO_MODEL_LIST="" \
|
249
|
+
# Qiniu
|
250
|
+
QINIU_API_KEY="" QINIU_MODEL_LIST="" QINIU_PROXY_URL="" \
|
249
251
|
# Qwen
|
250
252
|
QWEN_API_KEY="" QWEN_MODEL_LIST="" QWEN_PROXY_URL="" \
|
251
253
|
# SambaNova
|
package/Dockerfile.pglite
CHANGED
@@ -202,6 +202,8 @@ ENV \
|
|
202
202
|
OPENROUTER_API_KEY="" OPENROUTER_MODEL_LIST="" \
|
203
203
|
# Perplexity
|
204
204
|
PERPLEXITY_API_KEY="" PERPLEXITY_MODEL_LIST="" PERPLEXITY_PROXY_URL="" \
|
205
|
+
# Qiniu
|
206
|
+
QINIU_API_KEY="" QINIU_MODEL_LIST="" QINIU_PROXY_URL="" \
|
205
207
|
# Qwen
|
206
208
|
QWEN_API_KEY="" QWEN_MODEL_LIST="" QWEN_PROXY_URL="" \
|
207
209
|
# SambaNova
|
package/README.md
CHANGED
@@ -196,7 +196,7 @@ We have implemented support for the following model service providers:
|
|
196
196
|
- **[OpenRouter](https://lobechat.com/discover/provider/openrouter)**: OpenRouter is a service platform providing access to various cutting-edge large model interfaces, supporting OpenAI, Anthropic, LLaMA, and more, suitable for diverse development and application needs. Users can flexibly choose the optimal model and pricing based on their requirements, enhancing the AI experience.
|
197
197
|
- **[Cloudflare Workers AI](https://lobechat.com/discover/provider/cloudflare)**: Run serverless GPU-powered machine learning models on Cloudflare's global network.
|
198
198
|
|
199
|
-
<details><summary><kbd>See more providers (+
|
199
|
+
<details><summary><kbd>See more providers (+31)</kbd></summary>
|
200
200
|
|
201
201
|
- **[GitHub](https://lobechat.com/discover/provider/github)**: With GitHub Models, developers can become AI engineers and leverage the industry's leading AI models.
|
202
202
|
- **[Novita](https://lobechat.com/discover/provider/novita)**: Novita AI is a platform providing a variety of large language models and AI image generation API services, flexible, reliable, and cost-effective. It supports the latest open-source models like Llama3 and Mistral, offering a comprehensive, user-friendly, and auto-scaling API solution for generative AI application development, suitable for the rapid growth of AI startups.
|
@@ -228,10 +228,11 @@ We have implemented support for the following model service providers:
|
|
228
228
|
- **[360 AI](https://lobechat.com/discover/provider/ai360)**: 360 AI is an AI model and service platform launched by 360 Company, offering various advanced natural language processing models, including 360GPT2 Pro, 360GPT Pro, 360GPT Turbo, and 360GPT Turbo Responsibility 8K. These models combine large-scale parameters and multimodal capabilities, widely applied in text generation, semantic understanding, dialogue systems, and code generation. With flexible pricing strategies, 360 AI meets diverse user needs, supports developer integration, and promotes the innovation and development of intelligent applications.
|
229
229
|
- **[Search1API](https://lobechat.com/discover/provider/search1api)**: Search1API provides access to the DeepSeek series of models that can connect to the internet as needed, including standard and fast versions, supporting a variety of model sizes.
|
230
230
|
- **[InfiniAI](https://lobechat.com/discover/provider/infiniai)**: Provides high-performance, easy-to-use, and secure large model services for application developers, covering the entire process from large model development to service deployment.
|
231
|
+
- **[Qiniu](https://lobechat.com/discover/provider/qiniu)**: Qiniu, as a long-established cloud service provider, delivers cost-effective and reliable AI inference services for both real-time and batch processing, with a simple and user-friendly experience.
|
231
232
|
|
232
233
|
</details>
|
233
234
|
|
234
|
-
> 📊 Total providers: [<kbd>**
|
235
|
+
> 📊 Total providers: [<kbd>**41**</kbd>](https://lobechat.com/discover/providers)
|
235
236
|
|
236
237
|
<!-- PROVIDER LIST -->
|
237
238
|
|
package/README.zh-CN.md
CHANGED
@@ -196,7 +196,7 @@ LobeChat 支持文件上传与知识库功能,你可以上传文件、图片
|
|
196
196
|
- **[OpenRouter](https://lobechat.com/discover/provider/openrouter)**: OpenRouter 是一个提供多种前沿大模型接口的服务平台,支持 OpenAI、Anthropic、LLaMA 及更多,适合多样化的开发和应用需求。用户可根据自身需求灵活选择最优的模型和价格,助力 AI 体验的提升。
|
197
197
|
- **[Cloudflare Workers AI](https://lobechat.com/discover/provider/cloudflare)**: 在 Cloudflare 的全球网络上运行由无服务器 GPU 驱动的机器学习模型。
|
198
198
|
|
199
|
-
<details><summary><kbd>See more providers (+
|
199
|
+
<details><summary><kbd>See more providers (+31)</kbd></summary>
|
200
200
|
|
201
201
|
- **[GitHub](https://lobechat.com/discover/provider/github)**: 通过 GitHub 模型,开发人员可以成为 AI 工程师,并使用行业领先的 AI 模型进行构建。
|
202
202
|
- **[Novita](https://lobechat.com/discover/provider/novita)**: Novita AI 是一个提供多种大语言模型与 AI 图像生成的 API 服务的平台,灵活、可靠且具有成本效益。它支持 Llama3、Mistral 等最新的开源模型,并为生成式 AI 应用开发提供了全面、用户友好且自动扩展的 API 解决方案,适合 AI 初创公司的快速发展。
|
@@ -228,10 +228,11 @@ LobeChat 支持文件上传与知识库功能,你可以上传文件、图片
|
|
228
228
|
- **[360 AI](https://lobechat.com/discover/provider/ai360)**: 360 AI 是 360 公司推出的 AI 模型和服务平台,提供多种先进的自然语言处理模型,包括 360GPT2 Pro、360GPT Pro、360GPT Turbo 和 360GPT Turbo Responsibility 8K。这些模型结合了大规模参数和多模态能力,广泛应用于文本生成、语义理解、对话系统与代码生成等领域。通过灵活的定价策略,360 AI 满足多样化用户需求,支持开发者集成,推动智能化应用的革新和发展。
|
229
229
|
- **[Search1API](https://lobechat.com/discover/provider/search1api)**: Search1API 提供可根据需要自行联网的 DeepSeek 系列模型的访问,包括标准版和快速版本,支持多种参数规模的模型选择。
|
230
230
|
- **[InfiniAI](https://lobechat.com/discover/provider/infiniai)**: 为应用开发者提供高性能、易上手、安全可靠的大模型服务,覆盖从大模型开发到大模型服务化部署的全流程。
|
231
|
+
- **[Qiniu](https://lobechat.com/discover/provider/qiniu)**: 七牛作为老牌云服务厂商,提供高性价比稳定的实时、批量 AI 推理服务,简单易用。
|
231
232
|
|
232
233
|
</details>
|
233
234
|
|
234
|
-
> 📊 Total providers: [<kbd>**
|
235
|
+
> 📊 Total providers: [<kbd>**41**</kbd>](https://lobechat.com/discover/providers)
|
235
236
|
|
236
237
|
<!-- PROVIDER LIST -->
|
237
238
|
|
package/changelog/v1.json
CHANGED
@@ -1,4 +1,22 @@
|
|
1
1
|
[
|
2
|
+
{
|
3
|
+
"children": {
|
4
|
+
"improvements": [
|
5
|
+
"Refactor agent runtime to model runtime."
|
6
|
+
]
|
7
|
+
},
|
8
|
+
"date": "2025-05-15",
|
9
|
+
"version": "1.86.1"
|
10
|
+
},
|
11
|
+
{
|
12
|
+
"children": {
|
13
|
+
"features": [
|
14
|
+
"Add Qiniu Provider."
|
15
|
+
]
|
16
|
+
},
|
17
|
+
"date": "2025-05-15",
|
18
|
+
"version": "1.86.0"
|
19
|
+
},
|
2
20
|
{
|
3
21
|
"children": {
|
4
22
|
"improvements": [
|
@@ -423,6 +423,29 @@ If you need to use Azure OpenAI to provide model services, you can refer to the
|
|
423
423
|
- Default: `-`
|
424
424
|
- Example: `-all,+yi-large,+yi-large-rag`
|
425
425
|
|
426
|
+
## Qiniu
|
427
|
+
|
428
|
+
### `QINIU_API_KEY`
|
429
|
+
|
430
|
+
- Type: Required
|
431
|
+
- Description: This is the API key you can obtain from Qiniu AI service
|
432
|
+
- Default: -
|
433
|
+
- Example:`sk-xxxxx...xxxxx`
|
434
|
+
|
435
|
+
### `QINIU_MODEL_LIST`
|
436
|
+
|
437
|
+
- Type: Optional
|
438
|
+
- Description: Used to control the model list, use `+` to add a model, use `-` to hide a model, use `model_name=display_name` to customize the display name of a model, separated by commas. Definition syntax rules see [model-list][model-list]
|
439
|
+
- Default: `-`
|
440
|
+
- Example: `-all,+deepseek-r1,+deepseek-v3`
|
441
|
+
|
442
|
+
### `QINIU_PROXY_URL`
|
443
|
+
|
444
|
+
- Type: Optional
|
445
|
+
- Description: If you manually configure the Qiniu API proxy, you can use this configuration item to override the default Qiniu API request base URL
|
446
|
+
- Default: `https://api.qnaigc.com/v1`
|
447
|
+
- Example: `https://my-qnaigc.com/v1`
|
448
|
+
|
426
449
|
## Qwen
|
427
450
|
|
428
451
|
### `QWEN_API_KEY`
|
@@ -325,7 +325,6 @@ LobeChat 在部署时提供了丰富的模型服务商相关的环境变量,
|
|
325
325
|
- 默认值:`https://api.moonshot.cn/v1`
|
326
326
|
- 示例:`https://my-moonshot-proxy.com/v1`
|
327
327
|
|
328
|
-
|
329
328
|
## Perplexity AI
|
330
329
|
|
331
330
|
### `PERPLEXITY_API_KEY`
|
@@ -422,6 +421,29 @@ LobeChat 在部署时提供了丰富的模型服务商相关的环境变量,
|
|
422
421
|
- 默认值:`-`
|
423
422
|
- 示例:`-all,+yi-large,+yi-large-rag`
|
424
423
|
|
424
|
+
## 七牛云
|
425
|
+
|
426
|
+
### `QINIU_API_KEY`
|
427
|
+
|
428
|
+
- 类型:必选
|
429
|
+
- 描述:这是你在七牛云上获取的 API 密钥
|
430
|
+
- 默认值:-
|
431
|
+
- 示例:`sk-xxxxx...xxxxx`
|
432
|
+
|
433
|
+
### `QINIU_MODEL_LIST`
|
434
|
+
|
435
|
+
- 类型:可选
|
436
|
+
- 描述:用来控制模型列表,使用 `+` 增加一个模型,使用 `-` 来隐藏一个模型,使用 `模型名=展示名<扩展配置>` 来自定义模型的展示名,用英文逗号隔开。模型定义语法规则见 [模型列表][model-list]
|
437
|
+
- 默认值:`-`
|
438
|
+
- 示例:`-all,+deepseek-r1,+deepseek-v3`
|
439
|
+
|
440
|
+
### `QINIU_PROXY_URL`
|
441
|
+
|
442
|
+
- 类型:可选
|
443
|
+
- 描述:如果你手动配置了 Qiniu 接口代理,可以使用此配置项来覆盖默认的 Qiniu API 请求基础 URL
|
444
|
+
- 默认值:`https://api.qnaigc.com/v1`
|
445
|
+
- 示例:`https://my-qnaigc.com/v1`
|
446
|
+
|
425
447
|
## 通义千问
|
426
448
|
|
427
449
|
### `QWEN_API_KEY`
|
@@ -0,0 +1,58 @@
|
|
1
|
+
---
|
2
|
+
title: Using Qiniu API Key in LobeChat
|
3
|
+
description: >-
|
4
|
+
Learn how to integrate and utilize powerful language models developed by Qiniu
|
5
|
+
into LobeChat for various tasks. Follow the steps to obtain an API key
|
6
|
+
and configure it for seamless interaction.
|
7
|
+
tags:
|
8
|
+
- API key
|
9
|
+
- Web UI
|
10
|
+
- 七牛
|
11
|
+
- 七牛云
|
12
|
+
- 七牛智能
|
13
|
+
- Qiniu
|
14
|
+
- DeepSeek
|
15
|
+
---
|
16
|
+
|
17
|
+
# Using Qiniu's AI Models in LobeChat
|
18
|
+
|
19
|
+
<Image alt={'Using Qiniu\'s AI Models in LobeChat'} cover src={'https://github.com/user-attachments/assets/3ad2655e-dd20-4534-bf6d-080b3677df86'} />
|
20
|
+
|
21
|
+
[Qiniu](https://www.qiniu.com), as a long-established cloud service provider, delivers cost-effective and reliable AI inference services for both real-time and batch processing, with a simple and user-friendly experience.
|
22
|
+
|
23
|
+
This document will guide you on how to use Qiniu's AI Models in LobeChat:
|
24
|
+
|
25
|
+
<Steps>
|
26
|
+
### Step 1: [Obtain AI Model API Key](https://developer.qiniu.com/aitokenapi/12884/how-to-get-api-key)
|
27
|
+
|
28
|
+
- Method 1: Using Console
|
29
|
+
1. [Register a Qiniu account](https://s.qiniu.com/umqq6n?ref=developer.qiniu.com\&s_path=%2Faitokenapi%2F12884%2Fhow-to-get-api-key)
|
30
|
+
2. [Go to the console to obtain your API Key](https://portal.qiniu.com/ai-inference/api-key)
|
31
|
+
<Image alt={'Obtain your API Key'} inStep src={'https://static.sufy.com/lobehub/439040511-a014769f-262c-4ee4-a727-2c3c45111574.png'} />
|
32
|
+
|
33
|
+
- Method 2: Using Mini Program
|
34
|
+
1. Open the Qiniu mini program
|
35
|
+
2. Quick login to your account
|
36
|
+
3. Click the \[Me] tab in the bottom navigation bar
|
37
|
+
4. Click \[My Console]
|
38
|
+
5. Navigate to \[AI Inference]
|
39
|
+
6. View and copy your API key
|
40
|
+
|
41
|
+
### Step 2: Configure Qiniu's AI Model Service in LobeChat
|
42
|
+
|
43
|
+
- Visit the `Settings` interface in LobeChat
|
44
|
+
- Find the setting for `Qiniu` under `Language Model`
|
45
|
+
|
46
|
+
<Image alt={'Enter API key'} inStep src={'https://static.sufy.com/lobehub/439047682-40bd5ec0-c2fe-4397-9ae1-f6d0b9e55287.png'} />
|
47
|
+
|
48
|
+
- Open Qiniu and enter the obtained API key.
|
49
|
+
- Choose a Qiniu's model for your AI assistant to start the conversation.
|
50
|
+
|
51
|
+
<Image alt={'Select a Qiniu\'s model and start conversation'} inStep src={'https://static.sufy.com/lobehub/439048211-eadae11f-86e8-4a8d-944d-2f984e257356.png'} />
|
52
|
+
|
53
|
+
<Callout type={'warning'}>
|
54
|
+
During usage, you may need to pay the API service provider. Please refer to [Qiniu's relevant pricing policies](https://developer.qiniu.com/aitokenapi/12898/ai-token-api-pricing).
|
55
|
+
</Callout>
|
56
|
+
</Steps>
|
57
|
+
|
58
|
+
You can now engage in conversations using the models provided by Qiniu in LobeChat.
|
@@ -0,0 +1,55 @@
|
|
1
|
+
---
|
2
|
+
title: 在 LobeChat 中使用七牛云大模型 API Key
|
3
|
+
description: 学习如何在 LobeChat 中配置和使用七牛云的大模型,提供强大的自然语言理解和生成能力。
|
4
|
+
tags:
|
5
|
+
- API key
|
6
|
+
- Web UI
|
7
|
+
- 七牛
|
8
|
+
- 七牛云
|
9
|
+
- 七牛智能
|
10
|
+
- Qiniu
|
11
|
+
- DeepSeek
|
12
|
+
---
|
13
|
+
|
14
|
+
# 在 LobeChat 中使用七牛云大模型
|
15
|
+
|
16
|
+
<Image alt={'在 LobeChat 中使用七牛云大模型'} cover src={'https://github.com/user-attachments/assets/3ad2655e-dd20-4534-bf6d-080b3677df86'} />
|
17
|
+
|
18
|
+
[七牛云](https://www.qiniu.com)作为老牌云服务厂商,提供高性价比稳定的实时、批量 AI 推理服务,简单易用。
|
19
|
+
|
20
|
+
本文档将指导你如何在 LobeChat 中使用七牛云大模型:
|
21
|
+
|
22
|
+
<Steps>
|
23
|
+
### 步骤一:[获取 AI 大模型 API 密钥](https://developer.qiniu.com/aitokenapi/12884/how-to-get-api-key)
|
24
|
+
|
25
|
+
- 方法一:使用控制台获取
|
26
|
+
1. [注册七牛账号](https://s.qiniu.com/umqq6n?ref=developer.qiniu.com\&s_path=%2Faitokenapi%2F12884%2Fhow-to-get-api-key)
|
27
|
+
2. [前往控制台获取 API Key](https://portal.qiniu.com/ai-inference/api-key)
|
28
|
+
<Image alt={'获取 API Key'} inStep src={'https://static.sufy.com/lobehub/438758098-119239c1-8552-420a-9906-de2eab739fc6.png'} />
|
29
|
+
|
30
|
+
- 方法二:使用小程序获取
|
31
|
+
1. 打开七牛小程序
|
32
|
+
2. 快速登录账号
|
33
|
+
3. 点击【我的】底部导航栏
|
34
|
+
4. 点击【我的控制台】
|
35
|
+
5. 进入【AI 推理】
|
36
|
+
6. 查看和复制你的 API 密钥
|
37
|
+
|
38
|
+
### 步骤二:在 LobeChat 中配置七牛云大模型服务
|
39
|
+
|
40
|
+
- 访问 LobeChat 的`设置`界面
|
41
|
+
- 在`语言模型`下找到`七牛云`的设置项
|
42
|
+
|
43
|
+
<Image alt={'填写 API 密钥'} inStep src={'https://static.sufy.com/lobehub/439049319-6ae44f36-bf48-492a-a6aa-7be72f4a29d8.png'} />
|
44
|
+
|
45
|
+
- 打开七牛云并填入获得的 API 密钥
|
46
|
+
- 为你的 AI 助手选择一个七牛云的大模型即可开始对话
|
47
|
+
|
48
|
+
<Image alt={'选择七牛云大模型并开始对话'} inStep src={'https://static.sufy.com/lobehub/439048945-c608eb9e-6ee1-4611-9df7-2075e95d069b.png'} />
|
49
|
+
|
50
|
+
<Callout type={'warning'}>
|
51
|
+
在使用过程中你可能需要向 API 服务提供商付费,请参考[七牛云的相关费用政策](https://developer.qiniu.com/aitokenapi/12898/ai-token-api-pricing)。
|
52
|
+
</Callout>
|
53
|
+
</Steps>
|
54
|
+
|
55
|
+
至此你已经可以在 LobeChat 中使用七牛云提供的大模型进行对话了。
|
@@ -95,6 +95,9 @@
|
|
95
95
|
"ppio": {
|
96
96
|
"description": "تقدم PPIO بايو السحابية خدمات واجهة برمجة التطبيقات لنماذج مفتوحة المصدر مستقرة وذات تكلفة فعالة، تدعم جميع سلسلة DeepSeek، وLlama، وQwen، وغيرها من النماذج الكبيرة الرائدة في الصناعة."
|
97
97
|
},
|
98
|
+
"qiniu": {
|
99
|
+
"description": "كشركة رائدة في خدمات السحابة، تقدم Qiniu خدمات استدلال ذكاء اصطناعي في الوقت الفعلي ومجموعة كبيرة بتكلفة فعالة وموثوقة، سهلة الاستخدام."
|
100
|
+
},
|
98
101
|
"qwen": {
|
99
102
|
"description": "Qwen هو نموذج لغة ضخم تم تطويره ذاتيًا بواسطة Alibaba Cloud، يتمتع بقدرات قوية في فهم وتوليد اللغة الطبيعية. يمكنه الإجابة على مجموعة متنوعة من الأسئلة، وكتابة المحتوى، والتعبير عن الآراء، وكتابة الشيفرات، ويؤدي دورًا في مجالات متعددة."
|
100
103
|
},
|
@@ -95,6 +95,9 @@
|
|
95
95
|
"ppio": {
|
96
96
|
"description": "PPIO ПайОу облак предлага стабилни и икономически изгодни API услуги за отворени модели, поддържащи цялата серия DeepSeek, Llama, Qwen и други водещи модели в индустрията."
|
97
97
|
},
|
98
|
+
"qiniu": {
|
99
|
+
"description": "Qiniu е водещ доставчик на облачни услуги, предлагащ бързи и ефективни API за извикване на големи модели, включително и тези на阿里巴巴, с гъвкави възможности за изграждане и развитие на AI приложения."
|
100
|
+
},
|
98
101
|
"qwen": {
|
99
102
|
"description": "Qwen е самостоятелно разработен свръхголям езиков модел на Alibaba Cloud, с мощни способности за разбиране и генериране на естествен език. Може да отговаря на различни въпроси, да създава текстово съдържание, да изразява мнения и да пише код, играейки роля в множество области."
|
100
103
|
},
|
@@ -95,6 +95,9 @@
|
|
95
95
|
"ppio": {
|
96
96
|
"description": "PPIO Paiou Cloud bietet stabile und kosteneffiziente Open-Source-Modell-API-Dienste und unterstützt die gesamte DeepSeek-Serie, Llama, Qwen und andere führende große Modelle der Branche."
|
97
97
|
},
|
98
|
+
"qiniu": {
|
99
|
+
"description": "Qiniu ist ein führender Anbieter von Cloud-Diensten, der schnelle und effiziente API-Dienste für große Modelle bereitstellt, einschließlich der von Alibaba, mit flexiblen Optionen für das Entwickeln und Anwenden von AI-Anwendungen."
|
100
|
+
},
|
98
101
|
"qwen": {
|
99
102
|
"description": "Tongyi Qianwen ist ein von Alibaba Cloud selbst entwickeltes, groß angelegtes Sprachmodell mit starken Fähigkeiten zur Verarbeitung und Generierung natürlicher Sprache. Es kann eine Vielzahl von Fragen beantworten, Texte erstellen, Meinungen äußern und Code schreiben und spielt in mehreren Bereichen eine Rolle."
|
100
103
|
},
|
@@ -95,6 +95,9 @@
|
|
95
95
|
"ppio": {
|
96
96
|
"description": "PPIO supports stable and cost-efficient open-source LLM APIs, such as DeepSeek, Llama, Qwen etc."
|
97
97
|
},
|
98
|
+
"qiniu": {
|
99
|
+
"description": "Qiniu, as a long-established cloud service provider, delivers cost-effective and reliable AI inference services for both real-time and batch processing, with a simple and user-friendly experience."
|
100
|
+
},
|
98
101
|
"qwen": {
|
99
102
|
"description": "Tongyi Qianwen is a large-scale language model independently developed by Alibaba Cloud, featuring strong natural language understanding and generation capabilities. It can answer various questions, create written content, express opinions, and write code, playing a role in multiple fields."
|
100
103
|
},
|
@@ -95,6 +95,9 @@
|
|
95
95
|
"ppio": {
|
96
96
|
"description": "PPIO Paiouyun ofrece servicios de API de modelos de código abierto estables y de alto rendimiento, que admiten toda la serie DeepSeek, Llama, Qwen y otros modelos grandes líderes en la industria."
|
97
97
|
},
|
98
|
+
"qiniu": {
|
99
|
+
"description": "Qiniu es un proveedor líder de servicios de nube, ofreciendo API de IA de alta velocidad y eficiencia, incluyendo modelos de Alibaba, con opciones flexibles para construir y aplicar aplicaciones de IA."
|
100
|
+
},
|
98
101
|
"qwen": {
|
99
102
|
"description": "Tongyi Qianwen es un modelo de lenguaje de gran escala desarrollado de forma independiente por Alibaba Cloud, con potentes capacidades de comprensión y generación de lenguaje natural. Puede responder a diversas preguntas, crear contenido escrito, expresar opiniones y redactar código, desempeñando un papel en múltiples campos."
|
100
103
|
},
|
@@ -95,6 +95,9 @@
|
|
95
95
|
"ppio": {
|
96
96
|
"description": "PPIO پایو کلود خدمات API مدلهای متن باز با ثبات و با قیمت مناسب را ارائه میدهد و از تمام سریهای DeepSeek، Llama، Qwen و سایر مدلهای بزرگ پیشرو در صنعت پشتیبانی میکند."
|
97
97
|
},
|
98
|
+
"qiniu": {
|
99
|
+
"description": "Qiniu یک شرکت پیشرو در خدمات سحابی است که API های سریع و کارآمد برای فراخوانی مدلهای بزرگ، از جمله مدلهای Alibaba، را ارائه میدهد و با امکانات پیشرفته برای ساخت و استفاده از برنامههای AI پشتیبانی میکند."
|
100
|
+
},
|
98
101
|
"qwen": {
|
99
102
|
"description": "چوان یی چیان ون یک مدل زبان بسیار بزرگ است که توسط علیکلود بهطور مستقل توسعه یافته و دارای تواناییهای قدرتمند درک و تولید زبان طبیعی است. این مدل میتواند به انواع سوالات پاسخ دهد، محتوای متنی خلق کند، نظرات و دیدگاهها را بیان کند، کد بنویسد و در حوزههای مختلف نقش ایفا کند."
|
100
103
|
},
|
@@ -95,6 +95,9 @@
|
|
95
95
|
"ppio": {
|
96
96
|
"description": "PPIO Paiouyun offre des services API de modèles open source stables et rentables, prenant en charge toute la gamme DeepSeek, Llama, Qwen et d'autres grands modèles de pointe dans l'industrie."
|
97
97
|
},
|
98
|
+
"qiniu": {
|
99
|
+
"description": "Qiniu est un fournisseur de services de cloud, offrant des API de IA de haute vitesse et d'efficacité, incluant des modèles Alibaba, avec des options flexibles pour la construction et l'application d'applications IA."
|
100
|
+
},
|
98
101
|
"qwen": {
|
99
102
|
"description": "Tongyi Qianwen est un modèle de langage à grande échelle développé de manière autonome par Alibaba Cloud, doté de puissantes capacités de compréhension et de génération du langage naturel. Il peut répondre à diverses questions, créer du contenu écrit, exprimer des opinions, rédiger du code, etc., jouant un rôle dans plusieurs domaines."
|
100
103
|
},
|
@@ -95,6 +95,9 @@
|
|
95
95
|
"ppio": {
|
96
96
|
"description": "PPIO Paeou Cloud offre servizi API per modelli open source stabili e ad alto rapporto qualità-prezzo, supportando l'intera gamma di DeepSeek, Llama, Qwen e altri modelli di grandi dimensioni leader del settore."
|
97
97
|
},
|
98
|
+
"qiniu": {
|
99
|
+
"description": "Qiniu è un fornitore di servizi cloud leader, offrendo API di IA ad alta velocità e efficienza, incluso il modello Alibaba, con opzioni flessibili per costruire e applicare applicazioni di IA."
|
100
|
+
},
|
98
101
|
"qwen": {
|
99
102
|
"description": "Qwen è un modello di linguaggio di grande scala sviluppato autonomamente da Alibaba Cloud, con potenti capacità di comprensione e generazione del linguaggio naturale. Può rispondere a varie domande, creare contenuti testuali, esprimere opinioni e scrivere codice, svolgendo un ruolo in vari settori."
|
100
103
|
},
|
@@ -95,6 +95,9 @@
|
|
95
95
|
"ppio": {
|
96
96
|
"description": "PPIO パイオ云は、安定した高コストパフォーマンスのオープンソースモデル API サービスを提供し、DeepSeek の全シリーズ、Llama、Qwen などの業界をリードする大規模モデルをサポートしています。"
|
97
97
|
},
|
98
|
+
"qiniu": {
|
99
|
+
"description": "Qiniuは、老舗のクラウドサービスプロバイダーであり、高品質で安価なリアルタイムおよびバッチAI推論サービスを提供し、シンプルな使い方を提供します。"
|
100
|
+
},
|
98
101
|
"qwen": {
|
99
102
|
"description": "通義千問は、アリババクラウドが独自に開発した超大規模言語モデルであり、強力な自然言語理解と生成能力を持っています。さまざまな質問に答えたり、文章を創作したり、意見を表現したり、コードを執筆したりすることができ、さまざまな分野で活躍しています。"
|
100
103
|
},
|
@@ -95,6 +95,9 @@
|
|
95
95
|
"ppio": {
|
96
96
|
"description": "PPIO 파이오 클라우드는 안정적이고 비용 효율적인 오픈 소스 모델 API 서비스를 제공하며, DeepSeek 전 시리즈, Llama, Qwen 등 업계 선도 대모델을 지원합니다."
|
97
97
|
},
|
98
|
+
"qiniu": {
|
99
|
+
"description": "Qiniu는 대형 모델 서비스를 제공하는 대형 모델 플랫폼으로, 안정적이고 비용 효율적인 오픈 소스 모델 API 서비스를 제공하며, DeepSeek 전 시리즈, Llama, Qwen 등 업계 선도 대모델을 지원합니다."
|
100
|
+
},
|
98
101
|
"qwen": {
|
99
102
|
"description": "통의천문은 알리바바 클라우드가 자주 개발한 초대형 언어 모델로, 강력한 자연어 이해 및 생성 능력을 갖추고 있습니다. 다양한 질문에 답변하고, 텍스트 콘텐츠를 창작하며, 의견을 표현하고, 코드를 작성하는 등 여러 분야에서 활용됩니다."
|
100
103
|
},
|
@@ -95,6 +95,9 @@
|
|
95
95
|
"ppio": {
|
96
96
|
"description": "PPIO biedt stabiele en kosteneffectieve open source model API-diensten, die ondersteuning bieden voor de volledige DeepSeek-serie, Llama, Qwen en andere toonaangevende grote modellen in de industrie."
|
97
97
|
},
|
98
|
+
"qiniu": {
|
99
|
+
"description": "Qiniu is een leidende cloudserviceprovider die hoogwaardige, stabiele en kosteneffectieve real-time en batch AI-inferentie-API's biedt, inclusief modellen van Alibaba, met flexibele opties voor het bouwen en toepassen van AI-toepassingen."
|
100
|
+
},
|
98
101
|
"qwen": {
|
99
102
|
"description": "Tongyi Qianwen is een door Alibaba Cloud zelf ontwikkeld grootschalig taalmodel met krachtige mogelijkheden voor natuurlijke taalbegrip en -generatie. Het kan verschillende vragen beantwoorden, tekstinhoud creëren, meningen uiten, code schrijven, en speelt een rol in verschillende domeinen."
|
100
103
|
},
|
@@ -95,6 +95,9 @@
|
|
95
95
|
"ppio": {
|
96
96
|
"description": "PPIO Paiou Cloud oferuje stabilne i opłacalne usługi API modeli open source, wspierające pełną gamę DeepSeek, Llama, Qwen i inne wiodące modele w branży."
|
97
97
|
},
|
98
|
+
"qiniu": {
|
99
|
+
"description": "Qiniu to wiodący dostawca usług chmurowych, oferujący API do dużych modeli AI, w tym DeepSeek, Llama i Qwen, z elastycznymi opcjami do tworzenia i stosowania aplikacji AI."
|
100
|
+
},
|
98
101
|
"qwen": {
|
99
102
|
"description": "Tongyi Qianwen to samodzielnie opracowany przez Alibaba Cloud model językowy o dużej skali, charakteryzujący się silnymi zdolnościami rozumienia i generowania języka naturalnego. Może odpowiadać na różnorodne pytania, tworzyć treści pisemne, wyrażać opinie, pisać kod i działać w wielu dziedzinach."
|
100
103
|
},
|
@@ -95,6 +95,9 @@
|
|
95
95
|
"ppio": {
|
96
96
|
"description": "O PPIO Paiouyun oferece serviços de API de modelos de código aberto estáveis e com alto custo-benefício, suportando toda a linha DeepSeek, Llama, Qwen e outros grandes modelos líderes da indústria."
|
97
97
|
},
|
98
|
+
"qiniu": {
|
99
|
+
"description": "Qiniu é um fornecedor de serviços de cloud leader, oferecendo API de IA de alta velocidade e eficiência, incluindo modelos Alibaba, com opções flexíveis para construir e aplicar aplicações de IA."
|
100
|
+
},
|
98
101
|
"qwen": {
|
99
102
|
"description": "Qwen é um modelo de linguagem de grande escala desenvolvido pela Alibaba Cloud, com forte capacidade de compreensão e geração de linguagem natural. Ele pode responder a várias perguntas, criar conteúdo escrito, expressar opiniões e escrever código, atuando em vários campos."
|
100
103
|
},
|
@@ -95,6 +95,9 @@
|
|
95
95
|
"ppio": {
|
96
96
|
"description": "PPIO Paiouyun предоставляет стабильные и высокоэффективные API-сервисы для открытых моделей, поддерживающие всю серию DeepSeek, Llama, Qwen и другие ведущие модели в отрасли."
|
97
97
|
},
|
98
|
+
"qiniu": {
|
99
|
+
"description": "Qiniu — это ведущий поставщик облачных услуг, предлагающий API для больших моделей AI, включая DeepSeek, Llama и Qwen, с гибкими вариантами для создания и применения приложений AI."
|
100
|
+
},
|
98
101
|
"qwen": {
|
99
102
|
"description": "Qwen — это сверхбольшая языковая модель, разработанная Alibaba Cloud, обладающая мощными возможностями понимания и генерации естественного языка. Она может отвечать на различные вопросы, создавать текстовый контент, выражать мнения и писать код, играя важную роль в различных областях."
|
100
103
|
},
|
@@ -95,6 +95,9 @@
|
|
95
95
|
"ppio": {
|
96
96
|
"description": "PPIO Paiou Cloud, istikrarlı ve yüksek maliyet etkinliğe sahip açık kaynak model API hizmeti sunar, DeepSeek'in tüm serisi, Llama, Qwen gibi sektörün önde gelen büyük modellerini destekler."
|
97
97
|
},
|
98
|
+
"qiniu": {
|
99
|
+
"description": "Qiniu, esnek seçeneklerle büyük model API hizmetleri sunan lider bulut hizmeti sağlayıcısıdır, DeepSeek, Llama ve Qwen gibi sektörün önde gelen büyük modellerini destekler."
|
100
|
+
},
|
98
101
|
"qwen": {
|
99
102
|
"description": "Tongyi Qianwen, Alibaba Cloud tarafından geliştirilen büyük ölçekli bir dil modelidir ve güçlü doğal dil anlama ve üretme yeteneklerine sahiptir. Çeşitli soruları yanıtlayabilir, metin içeriği oluşturabilir, görüşlerini ifade edebilir ve kod yazabilir. Birçok alanda etkili bir şekilde kullanılmaktadır."
|
100
103
|
},
|
@@ -95,6 +95,9 @@
|
|
95
95
|
"ppio": {
|
96
96
|
"description": "PPIO派欧云 cung cấp dịch vụ API mô hình mã nguồn mở ổn định, hiệu quả chi phí cao, hỗ trợ toàn bộ dòng sản phẩm DeepSeek, Llama, Qwen và các mô hình lớn hàng đầu trong ngành."
|
97
97
|
},
|
98
|
+
"qiniu": {
|
99
|
+
"description": "Qiniu là nhà cung cấp dịch vụ cloud hàng đầu, cung cấp API cho các mô hình AI lớn, bao gồm DeepSeek, Llama và Qwen, với các tùy chọn linh hoạt để xây dựng và áp dụng các ứng dụng AI."
|
100
|
+
},
|
98
101
|
"qwen": {
|
99
102
|
"description": "Qwen là mô hình ngôn ngữ quy mô lớn tự phát triển của Alibaba Cloud, có khả năng hiểu và tạo ngôn ngữ tự nhiên mạnh mẽ. Nó có thể trả lời nhiều câu hỏi, sáng tác nội dung văn bản, bày tỏ quan điểm, viết mã, v.v., hoạt động trong nhiều lĩnh vực."
|
100
103
|
},
|
@@ -95,6 +95,9 @@
|
|
95
95
|
"ppio": {
|
96
96
|
"description": "PPIO 派欧云提供稳定、高性价比的开源模型 API 服务,支持 DeepSeek 全系列、Llama、Qwen 等行业领先大模型。"
|
97
97
|
},
|
98
|
+
"qiniu": {
|
99
|
+
"description": "七牛作为老牌云服务厂商,提供高性价比稳定的实时、批量 AI 推理服务,简单易用。"
|
100
|
+
},
|
98
101
|
"qwen": {
|
99
102
|
"description": "通义千问是阿里云自主研发的超大规模语言模型,具有强大的自然语言理解和生成能力。它可以回答各种问题、创作文字内容、表达观点看法、撰写代码等,在多个领域发挥作用。"
|
100
103
|
},
|
@@ -95,6 +95,9 @@
|
|
95
95
|
"ppio": {
|
96
96
|
"description": "PPIO 派歐雲提供穩定、高性價比的開源模型 API 服務,支持 DeepSeek 全系列、Llama、Qwen 等行業領先的大模型。"
|
97
97
|
},
|
98
|
+
"qiniu": {
|
99
|
+
"description": "Qiniu 是領先的雲服務提供商,提供高性價比、穩定的實時和批量 AI 推理服務,包括 DeepSeek、Llama 和 Qwen 等行業領先的大模型。"
|
100
|
+
},
|
98
101
|
"qwen": {
|
99
102
|
"description": "通義千問是阿里雲自主研發的超大規模語言模型,具有強大的自然語言理解和生成能力。它可以回答各種問題、創作文字內容、表達觀點看法、撰寫代碼等,在多個領域發揮作用。"
|
100
103
|
},
|
package/package.json
CHANGED
@@ -1,6 +1,6 @@
|
|
1
1
|
{
|
2
2
|
"name": "@lobehub/chat",
|
3
|
-
"version": "1.
|
3
|
+
"version": "1.86.1",
|
4
4
|
"description": "Lobe Chat - an open-source, high-performance chatbot framework that supports speech synthesis, multimodal, and extensible Function Call plugin system. Supports one-click free deployment of your private ChatGPT/LLM web application.",
|
5
5
|
"keywords": [
|
6
6
|
"framework",
|
@@ -1,6 +1,6 @@
|
|
1
1
|
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
|
2
2
|
|
3
|
-
import { AgentRuntimeError } from '@/libs/
|
3
|
+
import { AgentRuntimeError } from '@/libs/model-runtime';
|
4
4
|
import { ChatErrorType } from '@/types/fetch';
|
5
5
|
import { createErrorResponse } from '@/utils/errorResponse';
|
6
6
|
import { getJWTPayload } from '@/utils/server/jwt';
|
@@ -2,8 +2,8 @@ import { AuthObject } from '@clerk/backend';
|
|
2
2
|
import { NextRequest } from 'next/server';
|
3
3
|
|
4
4
|
import { JWTPayload, LOBE_CHAT_AUTH_HEADER, OAUTH_AUTHORIZED, enableClerk } from '@/const/auth';
|
5
|
-
import { AgentRuntime, AgentRuntimeError, ChatCompletionErrorPayload } from '@/libs/agent-runtime';
|
6
5
|
import { ClerkAuth } from '@/libs/clerk-auth';
|
6
|
+
import { AgentRuntime, AgentRuntimeError, ChatCompletionErrorPayload } from '@/libs/model-runtime';
|
7
7
|
import { ChatErrorType } from '@/types/fetch';
|
8
8
|
import { createErrorResponse } from '@/utils/errorResponse';
|
9
9
|
import { getJWTPayload } from '@/utils/server/jwt';
|
@@ -2,7 +2,7 @@ import { type AuthObject } from '@clerk/backend';
|
|
2
2
|
|
3
3
|
import { getAppConfig } from '@/config/app';
|
4
4
|
import { enableClerk, enableNextAuth } from '@/const/auth';
|
5
|
-
import { AgentRuntimeError } from '@/libs/
|
5
|
+
import { AgentRuntimeError } from '@/libs/model-runtime';
|
6
6
|
import { ChatErrorType } from '@/types/fetch';
|
7
7
|
|
8
8
|
interface CheckAuthParams {
|
@@ -4,7 +4,7 @@ import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
|
|
4
4
|
|
5
5
|
import { checkAuthMethod } from '@/app/(backend)/middleware/auth/utils';
|
6
6
|
import { LOBE_CHAT_AUTH_HEADER, OAUTH_AUTHORIZED } from '@/const/auth';
|
7
|
-
import { AgentRuntime, LobeRuntimeAI } from '@/libs/
|
7
|
+
import { AgentRuntime, LobeRuntimeAI } from '@/libs/model-runtime';
|
8
8
|
import { ChatErrorType } from '@/types/fetch';
|
9
9
|
import { getJWTPayload } from '@/utils/server/jwt';
|
10
10
|
|
@@ -3,7 +3,7 @@ import {
|
|
3
3
|
AGENT_RUNTIME_ERROR_SET,
|
4
4
|
AgentRuntime,
|
5
5
|
ChatCompletionErrorPayload,
|
6
|
-
} from '@/libs/
|
6
|
+
} from '@/libs/model-runtime';
|
7
7
|
import { createTraceOptions, initAgentRuntimeWithUserPayload } from '@/server/modules/AgentRuntime';
|
8
8
|
import { ChatErrorType } from '@/types/fetch';
|
9
9
|
import { ChatStreamPayload } from '@/types/openai/chat';
|