@lobehub/chat 1.56.5 → 1.57.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.github/ISSUE_TEMPLATE/1_bug_report.yml +11 -2
- package/.github/ISSUE_TEMPLATE/1_bug_report_cn.yml +9 -0
- package/CHANGELOG.md +25 -0
- package/Dockerfile +2 -0
- package/Dockerfile.database +2 -0
- package/changelog/v1.json +9 -0
- package/package.json +1 -1
- package/src/app/[variants]/(main)/settings/llm/ProviderList/providers.tsx +2 -0
- package/src/config/aiModels/index.ts +3 -0
- package/src/config/aiModels/jina.ts +23 -0
- package/src/config/llm.ts +6 -0
- package/src/config/modelProviders/index.ts +4 -0
- package/src/config/modelProviders/jina.ts +21 -0
- package/src/libs/agent-runtime/AgentRuntime.ts +7 -0
- package/src/libs/agent-runtime/jina/index.ts +49 -0
- package/src/libs/agent-runtime/types/type.ts +1 -0
- package/src/types/user/settings/keyVaults.ts +1 -0
@@ -5,7 +5,7 @@ labels: ['🐛 Bug']
|
|
5
5
|
body:
|
6
6
|
- type: dropdown
|
7
7
|
attributes:
|
8
|
-
label: '📦
|
8
|
+
label: '📦 Platform'
|
9
9
|
multiple: true
|
10
10
|
options:
|
11
11
|
- 'Official Preview'
|
@@ -13,10 +13,19 @@ body:
|
|
13
13
|
- 'Zeabur'
|
14
14
|
- 'Sealos'
|
15
15
|
- 'Netlify'
|
16
|
-
- 'Docker'
|
16
|
+
- 'Self hosting Docker'
|
17
17
|
- 'Other'
|
18
18
|
validations:
|
19
19
|
required: true
|
20
|
+
- type: dropdown
|
21
|
+
attributes:
|
22
|
+
label: '📦 Deploymenet mode'
|
23
|
+
multiple: true
|
24
|
+
options:
|
25
|
+
- 'client db (lobe-chat image)'
|
26
|
+
- 'server db(lobe-chat-database image)'
|
27
|
+
validations:
|
28
|
+
required: true
|
20
29
|
- type: input
|
21
30
|
attributes:
|
22
31
|
label: '📌 Version'
|
@@ -17,6 +17,15 @@ body:
|
|
17
17
|
- 'Other'
|
18
18
|
validations:
|
19
19
|
required: true
|
20
|
+
- type: dropdown
|
21
|
+
attributes:
|
22
|
+
label: '📦 部署模式'
|
23
|
+
multiple: true
|
24
|
+
options:
|
25
|
+
- '客户端模式(lobe-chat 镜像)'
|
26
|
+
- '服务端模式(lobe-chat-database 镜像)'
|
27
|
+
validations:
|
28
|
+
required: true
|
20
29
|
- type: input
|
21
30
|
attributes:
|
22
31
|
label: '📌 软件版本'
|
package/CHANGELOG.md
CHANGED
@@ -2,6 +2,31 @@
|
|
2
2
|
|
3
3
|
# Changelog
|
4
4
|
|
5
|
+
## [Version 1.57.0](https://github.com/lobehub/lobe-chat/compare/v1.56.5...v1.57.0)
|
6
|
+
|
7
|
+
<sup>Released on **2025-02-16**</sup>
|
8
|
+
|
9
|
+
#### ✨ Features
|
10
|
+
|
11
|
+
- **misc**: Add Jina AI model provider support.
|
12
|
+
|
13
|
+
<br/>
|
14
|
+
|
15
|
+
<details>
|
16
|
+
<summary><kbd>Improvements and Fixes</kbd></summary>
|
17
|
+
|
18
|
+
#### What's improved
|
19
|
+
|
20
|
+
- **misc**: Add Jina AI model provider support, closes [#6140](https://github.com/lobehub/lobe-chat/issues/6140) ([6b4c15b](https://github.com/lobehub/lobe-chat/commit/6b4c15b))
|
21
|
+
|
22
|
+
</details>
|
23
|
+
|
24
|
+
<div align="right">
|
25
|
+
|
26
|
+
[](#readme-top)
|
27
|
+
|
28
|
+
</div>
|
29
|
+
|
5
30
|
### [Version 1.56.5](https://github.com/lobehub/lobe-chat/compare/v1.56.4...v1.56.5)
|
6
31
|
|
7
32
|
<sup>Released on **2025-02-16**</sup>
|
package/Dockerfile
CHANGED
@@ -183,6 +183,8 @@ ENV \
|
|
183
183
|
HUNYUAN_API_KEY="" HUNYUAN_MODEL_LIST="" \
|
184
184
|
# InternLM
|
185
185
|
INTERNLM_API_KEY="" INTERNLM_MODEL_LIST="" \
|
186
|
+
# Jina
|
187
|
+
JINA_API_KEY="" JINA_MODEL_LIST="" JINA_PROXY_URL="" \
|
186
188
|
# Minimax
|
187
189
|
MINIMAX_API_KEY="" MINIMAX_MODEL_LIST="" \
|
188
190
|
# Mistral
|
package/Dockerfile.database
CHANGED
@@ -220,6 +220,8 @@ ENV \
|
|
220
220
|
HUNYUAN_API_KEY="" HUNYUAN_MODEL_LIST="" \
|
221
221
|
# InternLM
|
222
222
|
INTERNLM_API_KEY="" INTERNLM_MODEL_LIST="" \
|
223
|
+
# Jina
|
224
|
+
JINA_API_KEY="" JINA_MODEL_LIST="" JINA_PROXY_URL="" \
|
223
225
|
# Minimax
|
224
226
|
MINIMAX_API_KEY="" MINIMAX_MODEL_LIST="" \
|
225
227
|
# Mistral
|
package/changelog/v1.json
CHANGED
package/package.json
CHANGED
@@ -1,6 +1,6 @@
|
|
1
1
|
{
|
2
2
|
"name": "@lobehub/chat",
|
3
|
-
"version": "1.
|
3
|
+
"version": "1.57.0",
|
4
4
|
"description": "Lobe Chat - an open-source, high-performance chatbot framework that supports speech synthesis, multimodal, and extensible Function Call plugin system. Supports one-click free deployment of your private ChatGPT/LLM web application.",
|
5
5
|
"keywords": [
|
6
6
|
"framework",
|
@@ -13,6 +13,7 @@ import {
|
|
13
13
|
HigressProviderCard,
|
14
14
|
HunyuanProviderCard,
|
15
15
|
InternLMProviderCard,
|
16
|
+
JinaProviderCard,
|
16
17
|
MinimaxProviderCard,
|
17
18
|
MistralProviderCard,
|
18
19
|
MoonshotProviderCard,
|
@@ -77,6 +78,7 @@ export const useProviderList = (): ProviderItem[] => {
|
|
77
78
|
Ai21ProviderCard,
|
78
79
|
UpstageProviderCard,
|
79
80
|
XAIProviderCard,
|
81
|
+
JinaProviderCard,
|
80
82
|
QwenProviderCard,
|
81
83
|
WenxinProviderCard,
|
82
84
|
HunyuanProviderCard,
|
@@ -18,6 +18,7 @@ import { default as higress } from './higress';
|
|
18
18
|
import { default as huggingface } from './huggingface';
|
19
19
|
import { default as hunyuan } from './hunyuan';
|
20
20
|
import { default as internlm } from './internlm';
|
21
|
+
import { default as jina } from './jina';
|
21
22
|
import { default as lmstudio } from './lmstudio';
|
22
23
|
import { default as minimax } from './minimax';
|
23
24
|
import { default as mistral } from './mistral';
|
@@ -81,6 +82,7 @@ export const LOBE_DEFAULT_MODEL_LIST = buildDefaultModelList({
|
|
81
82
|
huggingface,
|
82
83
|
hunyuan,
|
83
84
|
internlm,
|
85
|
+
jina,
|
84
86
|
lmstudio,
|
85
87
|
minimax,
|
86
88
|
mistral,
|
@@ -125,6 +127,7 @@ export { default as higress } from './higress';
|
|
125
127
|
export { default as huggingface } from './huggingface';
|
126
128
|
export { default as hunyuan } from './hunyuan';
|
127
129
|
export { default as internlm } from './internlm';
|
130
|
+
export { default as jina } from './jina';
|
128
131
|
export { default as lmstudio } from './lmstudio';
|
129
132
|
export { default as minimax } from './minimax';
|
130
133
|
export { default as mistral } from './mistral';
|
@@ -0,0 +1,23 @@
|
|
1
|
+
import { AIChatModelCard } from '@/types/aiModel';
|
2
|
+
|
3
|
+
const jinaChatModels: AIChatModelCard[] = [
|
4
|
+
{
|
5
|
+
abilities: {
|
6
|
+
reasoning: true,
|
7
|
+
},
|
8
|
+
contextWindowTokens: 64_000,
|
9
|
+
description: '深度搜索结合了网络搜索、阅读和推理,可进行全面调查。您可以将其视为一个代理,接受您的研究任务 - 它会进行广泛搜索并经过多次迭代,然后才能给出答案。这个过程涉及持续的研究、推理和从各个角度解决问题。这与直接从预训练数据生成答案的标准大模型以及依赖一次性表面搜索的传统 RAG 系统有着根本的不同。',
|
10
|
+
displayName: 'Jina DeepSearch v1',
|
11
|
+
enabled: true,
|
12
|
+
id: 'jina-deepsearch-v1',
|
13
|
+
pricing: {
|
14
|
+
input: 0.02,
|
15
|
+
output: 0.02
|
16
|
+
},
|
17
|
+
type: 'chat'
|
18
|
+
}
|
19
|
+
]
|
20
|
+
|
21
|
+
export const allModels = [...jinaChatModels];
|
22
|
+
|
23
|
+
export default allModels;
|
package/src/config/llm.ts
CHANGED
@@ -134,6 +134,9 @@ export const getLLMConfig = () => {
|
|
134
134
|
|
135
135
|
ENABLED_TENCENT_CLOUD: z.boolean(),
|
136
136
|
TENCENT_CLOUD_API_KEY: z.string().optional(),
|
137
|
+
|
138
|
+
ENABLED_JINA: z.boolean(),
|
139
|
+
JINA_API_KEY: z.string().optional(),
|
137
140
|
},
|
138
141
|
runtimeEnv: {
|
139
142
|
API_KEY_SELECT_MODE: process.env.API_KEY_SELECT_MODE,
|
@@ -266,6 +269,9 @@ export const getLLMConfig = () => {
|
|
266
269
|
|
267
270
|
ENABLED_TENCENT_CLOUD: !!process.env.TENCENT_CLOUD_API_KEY,
|
268
271
|
TENCENT_CLOUD_API_KEY: process.env.TENCENT_CLOUD_API_KEY,
|
272
|
+
|
273
|
+
ENABLED_JINA: !!process.env.JINA_API_KEY,
|
274
|
+
JINA_API_KEY: process.env.JINA_API_KEY,
|
269
275
|
},
|
270
276
|
});
|
271
277
|
};
|
@@ -18,6 +18,7 @@ import HigressProvider from './higress';
|
|
18
18
|
import HuggingFaceProvider from './huggingface';
|
19
19
|
import HunyuanProvider from './hunyuan';
|
20
20
|
import InternLMProvider from './internlm';
|
21
|
+
import JinaProvider from './jina';
|
21
22
|
import LMStudioProvider from './lmstudio';
|
22
23
|
import MinimaxProvider from './minimax';
|
23
24
|
import MistralProvider from './mistral';
|
@@ -67,6 +68,7 @@ export const LOBE_DEFAULT_MODEL_LIST: ChatModelCard[] = [
|
|
67
68
|
AnthropicProvider.chatModels,
|
68
69
|
HuggingFaceProvider.chatModels,
|
69
70
|
XAIProvider.chatModels,
|
71
|
+
JinaProvider.chatModels,
|
70
72
|
ZeroOneProvider.chatModels,
|
71
73
|
StepfunProvider.chatModels,
|
72
74
|
NovitaProvider.chatModels,
|
@@ -110,6 +112,7 @@ export const DEFAULT_MODEL_PROVIDER_LIST = [
|
|
110
112
|
Ai21Provider,
|
111
113
|
UpstageProvider,
|
112
114
|
XAIProvider,
|
115
|
+
JinaProvider,
|
113
116
|
QwenProvider,
|
114
117
|
WenxinProvider,
|
115
118
|
TencentcloudProvider,
|
@@ -159,6 +162,7 @@ export { default as HigressProviderCard } from './higress';
|
|
159
162
|
export { default as HuggingFaceProviderCard } from './huggingface';
|
160
163
|
export { default as HunyuanProviderCard } from './hunyuan';
|
161
164
|
export { default as InternLMProviderCard } from './internlm';
|
165
|
+
export { default as JinaProviderCard } from './jina';
|
162
166
|
export { default as LMStudioProviderCard } from './lmstudio';
|
163
167
|
export { default as MinimaxProviderCard } from './minimax';
|
164
168
|
export { default as MistralProviderCard } from './mistral';
|
@@ -0,0 +1,21 @@
|
|
1
|
+
import { ModelProviderCard } from '@/types/llm';
|
2
|
+
|
3
|
+
const Jina: ModelProviderCard = {
|
4
|
+
chatModels: [],
|
5
|
+
checkModel: 'jina-deepsearch-v1',
|
6
|
+
description: 'Jina AI 成立于 2020 年,是一家领先的搜索 AI 公司。我们的搜索底座平台包含了向量模型、重排器和小语言模型,可帮助企业构建可靠且高质量的生成式AI和多模态的搜索应用。',
|
7
|
+
id: 'jina',
|
8
|
+
modelList: { showModelFetcher: true },
|
9
|
+
modelsUrl: 'https://jina.ai/models',
|
10
|
+
name: 'Jina',
|
11
|
+
settings: {
|
12
|
+
proxyUrl: {
|
13
|
+
placeholder: 'https://deepsearch.jina.ai/v1',
|
14
|
+
},
|
15
|
+
sdkType: 'openai',
|
16
|
+
showModelFetcher: true,
|
17
|
+
},
|
18
|
+
url: 'https://jina.ai',
|
19
|
+
};
|
20
|
+
|
21
|
+
export default Jina;
|
@@ -21,6 +21,7 @@ import { LobeHigressAI } from './higress';
|
|
21
21
|
import { LobeHuggingFaceAI } from './huggingface';
|
22
22
|
import { LobeHunyuanAI } from './hunyuan';
|
23
23
|
import { LobeInternLMAI } from './internlm';
|
24
|
+
import { LobeJinaAI } from './jina';
|
24
25
|
import { LobeLMStudioAI } from './lmstudio';
|
25
26
|
import { LobeMinimaxAI } from './minimax';
|
26
27
|
import { LobeMistralAI } from './mistral';
|
@@ -154,6 +155,7 @@ class AgentRuntime {
|
|
154
155
|
huggingface: { apiKey?: string; baseURL?: string };
|
155
156
|
hunyuan: Partial<ClientOptions>;
|
156
157
|
internlm: Partial<ClientOptions>;
|
158
|
+
jina: Partial<ClientOptions>;
|
157
159
|
lmstudio: Partial<ClientOptions>;
|
158
160
|
minimax: Partial<ClientOptions>;
|
159
161
|
mistral: Partial<ClientOptions>;
|
@@ -369,6 +371,11 @@ class AgentRuntime {
|
|
369
371
|
break;
|
370
372
|
}
|
371
373
|
|
374
|
+
case ModelProvider.Jina: {
|
375
|
+
runtimeModel = new LobeJinaAI(params.jina ?? {});
|
376
|
+
break;
|
377
|
+
}
|
378
|
+
|
372
379
|
case ModelProvider.Cloudflare: {
|
373
380
|
runtimeModel = new LobeCloudflareAI(params.cloudflare ?? {});
|
374
381
|
break;
|
@@ -0,0 +1,49 @@
|
|
1
|
+
import { ModelProvider } from '../types';
|
2
|
+
import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
|
3
|
+
|
4
|
+
import type { ChatModelCard } from '@/types/llm';
|
5
|
+
|
6
|
+
export interface JinaModelCard {
|
7
|
+
id: string;
|
8
|
+
}
|
9
|
+
|
10
|
+
export const LobeJinaAI = LobeOpenAICompatibleFactory({
|
11
|
+
baseURL: 'https://deepsearch.jina.ai/v1',
|
12
|
+
debug: {
|
13
|
+
chatCompletion: () => process.env.DEBUG_JINA_CHAT_COMPLETION === '1',
|
14
|
+
},
|
15
|
+
models: async ({ client }) => {
|
16
|
+
const { LOBE_DEFAULT_MODEL_LIST } = await import('@/config/aiModels');
|
17
|
+
|
18
|
+
const reasoningKeywords = [
|
19
|
+
'deepsearch',
|
20
|
+
];
|
21
|
+
|
22
|
+
const modelsPage = await client.models.list() as any;
|
23
|
+
const modelList: JinaModelCard[] = modelsPage.data;
|
24
|
+
|
25
|
+
return modelList
|
26
|
+
.map((model) => {
|
27
|
+
const knownModel = LOBE_DEFAULT_MODEL_LIST.find((m) => model.id.toLowerCase() === m.id.toLowerCase());
|
28
|
+
|
29
|
+
return {
|
30
|
+
contextWindowTokens: knownModel?.contextWindowTokens ?? undefined,
|
31
|
+
displayName: knownModel?.displayName ?? undefined,
|
32
|
+
enabled: knownModel?.enabled || false,
|
33
|
+
functionCall:
|
34
|
+
knownModel?.abilities?.functionCall
|
35
|
+
|| false,
|
36
|
+
id: model.id,
|
37
|
+
reasoning:
|
38
|
+
reasoningKeywords.some(keyword => model.id.toLowerCase().includes(keyword))
|
39
|
+
|| knownModel?.abilities?.reasoning
|
40
|
+
|| false,
|
41
|
+
vision:
|
42
|
+
knownModel?.abilities?.vision
|
43
|
+
|| false,
|
44
|
+
};
|
45
|
+
})
|
46
|
+
.filter(Boolean) as ChatModelCard[];
|
47
|
+
},
|
48
|
+
provider: ModelProvider.Jina,
|
49
|
+
});
|
@@ -44,6 +44,7 @@ export interface UserKeyVaults {
|
|
44
44
|
huggingface?: OpenAICompatibleKeyVault;
|
45
45
|
hunyuan?: OpenAICompatibleKeyVault;
|
46
46
|
internlm?: OpenAICompatibleKeyVault;
|
47
|
+
jina?: OpenAICompatibleKeyVault;
|
47
48
|
lmstudio?: OpenAICompatibleKeyVault;
|
48
49
|
lobehub?: any;
|
49
50
|
minimax?: OpenAICompatibleKeyVault;
|