@lobehub/chat 1.56.4 → 1.57.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -5,7 +5,7 @@ labels: ['🐛 Bug']
5
5
  body:
6
6
  - type: dropdown
7
7
  attributes:
8
- label: '📦 Environment'
8
+ label: '📦 Platform'
9
9
  multiple: true
10
10
  options:
11
11
  - 'Official Preview'
@@ -13,10 +13,19 @@ body:
13
13
  - 'Zeabur'
14
14
  - 'Sealos'
15
15
  - 'Netlify'
16
- - 'Docker'
16
+ - 'Self hosting Docker'
17
17
  - 'Other'
18
18
  validations:
19
19
  required: true
20
+ - type: dropdown
21
+ attributes:
22
+ label: '📦 Deploymenet mode'
23
+ multiple: true
24
+ options:
25
+ - 'client db (lobe-chat image)'
26
+ - 'server db(lobe-chat-database image)'
27
+ validations:
28
+ required: true
20
29
  - type: input
21
30
  attributes:
22
31
  label: '📌 Version'
@@ -17,6 +17,15 @@ body:
17
17
  - 'Other'
18
18
  validations:
19
19
  required: true
20
+ - type: dropdown
21
+ attributes:
22
+ label: '📦 部署模式'
23
+ multiple: true
24
+ options:
25
+ - '客户端模式(lobe-chat 镜像)'
26
+ - '服务端模式(lobe-chat-database 镜像)'
27
+ validations:
28
+ required: true
20
29
  - type: input
21
30
  attributes:
22
31
  label: '📌 软件版本'
package/CHANGELOG.md CHANGED
@@ -2,6 +2,57 @@
2
2
 
3
3
  # Changelog
4
4
 
5
+ ## [Version 1.57.0](https://github.com/lobehub/lobe-chat/compare/v1.56.5...v1.57.0)
6
+
7
+ <sup>Released on **2025-02-16**</sup>
8
+
9
+ #### ✨ Features
10
+
11
+ - **misc**: Add Jina AI model provider support.
12
+
13
+ <br/>
14
+
15
+ <details>
16
+ <summary><kbd>Improvements and Fixes</kbd></summary>
17
+
18
+ #### What's improved
19
+
20
+ - **misc**: Add Jina AI model provider support, closes [#6140](https://github.com/lobehub/lobe-chat/issues/6140) ([6b4c15b](https://github.com/lobehub/lobe-chat/commit/6b4c15b))
21
+
22
+ </details>
23
+
24
+ <div align="right">
25
+
26
+ [![](https://img.shields.io/badge/-BACK_TO_TOP-151515?style=flat-square)](#readme-top)
27
+
28
+ </div>
29
+
30
+ ### [Version 1.56.5](https://github.com/lobehub/lobe-chat/compare/v1.56.4...v1.56.5)
31
+
32
+ <sup>Released on **2025-02-16**</sup>
33
+
34
+ #### 🐛 Bug Fixes
35
+
36
+ - **misc**: Match o1 series models more robust in Azure OpenAI provider, set max_completion_tokens to null for Azure OpenAI.
37
+
38
+ <br/>
39
+
40
+ <details>
41
+ <summary><kbd>Improvements and Fixes</kbd></summary>
42
+
43
+ #### What's fixed
44
+
45
+ - **misc**: Match o1 series models more robust in Azure OpenAI provider, closes [#6193](https://github.com/lobehub/lobe-chat/issues/6193) ([f444e66](https://github.com/lobehub/lobe-chat/commit/f444e66))
46
+ - **misc**: Set max_completion_tokens to null for Azure OpenAI, closes [#6198](https://github.com/lobehub/lobe-chat/issues/6198) ([e9e8da4](https://github.com/lobehub/lobe-chat/commit/e9e8da4))
47
+
48
+ </details>
49
+
50
+ <div align="right">
51
+
52
+ [![](https://img.shields.io/badge/-BACK_TO_TOP-151515?style=flat-square)](#readme-top)
53
+
54
+ </div>
55
+
5
56
  ### [Version 1.56.4](https://github.com/lobehub/lobe-chat/compare/v1.56.3...v1.56.4)
6
57
 
7
58
  <sup>Released on **2025-02-16**</sup>
package/Dockerfile CHANGED
@@ -183,6 +183,8 @@ ENV \
183
183
  HUNYUAN_API_KEY="" HUNYUAN_MODEL_LIST="" \
184
184
  # InternLM
185
185
  INTERNLM_API_KEY="" INTERNLM_MODEL_LIST="" \
186
+ # Jina
187
+ JINA_API_KEY="" JINA_MODEL_LIST="" JINA_PROXY_URL="" \
186
188
  # Minimax
187
189
  MINIMAX_API_KEY="" MINIMAX_MODEL_LIST="" \
188
190
  # Mistral
@@ -220,6 +220,8 @@ ENV \
220
220
  HUNYUAN_API_KEY="" HUNYUAN_MODEL_LIST="" \
221
221
  # InternLM
222
222
  INTERNLM_API_KEY="" INTERNLM_MODEL_LIST="" \
223
+ # Jina
224
+ JINA_API_KEY="" JINA_MODEL_LIST="" JINA_PROXY_URL="" \
223
225
  # Minimax
224
226
  MINIMAX_API_KEY="" MINIMAX_MODEL_LIST="" \
225
227
  # Mistral
package/changelog/v1.json CHANGED
@@ -1,4 +1,22 @@
1
1
  [
2
+ {
3
+ "children": {
4
+ "features": [
5
+ "Add Jina AI model provider support."
6
+ ]
7
+ },
8
+ "date": "2025-02-16",
9
+ "version": "1.57.0"
10
+ },
11
+ {
12
+ "children": {
13
+ "fixes": [
14
+ "Match o1 series models more robust in Azure OpenAI provider, set max_completion_tokens to null for Azure OpenAI."
15
+ ]
16
+ },
17
+ "date": "2025-02-16",
18
+ "version": "1.56.5"
19
+ },
2
20
  {
3
21
  "children": {
4
22
  "fixes": [
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@lobehub/chat",
3
- "version": "1.56.4",
3
+ "version": "1.57.0",
4
4
  "description": "Lobe Chat - an open-source, high-performance chatbot framework that supports speech synthesis, multimodal, and extensible Function Call plugin system. Supports one-click free deployment of your private ChatGPT/LLM web application.",
5
5
  "keywords": [
6
6
  "framework",
@@ -13,6 +13,7 @@ import {
13
13
  HigressProviderCard,
14
14
  HunyuanProviderCard,
15
15
  InternLMProviderCard,
16
+ JinaProviderCard,
16
17
  MinimaxProviderCard,
17
18
  MistralProviderCard,
18
19
  MoonshotProviderCard,
@@ -77,6 +78,7 @@ export const useProviderList = (): ProviderItem[] => {
77
78
  Ai21ProviderCard,
78
79
  UpstageProviderCard,
79
80
  XAIProviderCard,
81
+ JinaProviderCard,
80
82
  QwenProviderCard,
81
83
  WenxinProviderCard,
82
84
  HunyuanProviderCard,
@@ -18,6 +18,7 @@ import { default as higress } from './higress';
18
18
  import { default as huggingface } from './huggingface';
19
19
  import { default as hunyuan } from './hunyuan';
20
20
  import { default as internlm } from './internlm';
21
+ import { default as jina } from './jina';
21
22
  import { default as lmstudio } from './lmstudio';
22
23
  import { default as minimax } from './minimax';
23
24
  import { default as mistral } from './mistral';
@@ -81,6 +82,7 @@ export const LOBE_DEFAULT_MODEL_LIST = buildDefaultModelList({
81
82
  huggingface,
82
83
  hunyuan,
83
84
  internlm,
85
+ jina,
84
86
  lmstudio,
85
87
  minimax,
86
88
  mistral,
@@ -125,6 +127,7 @@ export { default as higress } from './higress';
125
127
  export { default as huggingface } from './huggingface';
126
128
  export { default as hunyuan } from './hunyuan';
127
129
  export { default as internlm } from './internlm';
130
+ export { default as jina } from './jina';
128
131
  export { default as lmstudio } from './lmstudio';
129
132
  export { default as minimax } from './minimax';
130
133
  export { default as mistral } from './mistral';
@@ -0,0 +1,23 @@
1
+ import { AIChatModelCard } from '@/types/aiModel';
2
+
3
+ const jinaChatModels: AIChatModelCard[] = [
4
+ {
5
+ abilities: {
6
+ reasoning: true,
7
+ },
8
+ contextWindowTokens: 64_000,
9
+ description: '深度搜索结合了网络搜索、阅读和推理,可进行全面调查。您可以将其视为一个代理,接受您的研究任务 - 它会进行广泛搜索并经过多次迭代,然后才能给出答案。这个过程涉及持续的研究、推理和从各个角度解决问题。这与直接从预训练数据生成答案的标准大模型以及依赖一次性表面搜索的传统 RAG 系统有着根本的不同。',
10
+ displayName: 'Jina DeepSearch v1',
11
+ enabled: true,
12
+ id: 'jina-deepsearch-v1',
13
+ pricing: {
14
+ input: 0.02,
15
+ output: 0.02
16
+ },
17
+ type: 'chat'
18
+ }
19
+ ]
20
+
21
+ export const allModels = [...jinaChatModels];
22
+
23
+ export default allModels;
package/src/config/llm.ts CHANGED
@@ -134,6 +134,9 @@ export const getLLMConfig = () => {
134
134
 
135
135
  ENABLED_TENCENT_CLOUD: z.boolean(),
136
136
  TENCENT_CLOUD_API_KEY: z.string().optional(),
137
+
138
+ ENABLED_JINA: z.boolean(),
139
+ JINA_API_KEY: z.string().optional(),
137
140
  },
138
141
  runtimeEnv: {
139
142
  API_KEY_SELECT_MODE: process.env.API_KEY_SELECT_MODE,
@@ -266,6 +269,9 @@ export const getLLMConfig = () => {
266
269
 
267
270
  ENABLED_TENCENT_CLOUD: !!process.env.TENCENT_CLOUD_API_KEY,
268
271
  TENCENT_CLOUD_API_KEY: process.env.TENCENT_CLOUD_API_KEY,
272
+
273
+ ENABLED_JINA: !!process.env.JINA_API_KEY,
274
+ JINA_API_KEY: process.env.JINA_API_KEY,
269
275
  },
270
276
  });
271
277
  };
@@ -18,6 +18,7 @@ import HigressProvider from './higress';
18
18
  import HuggingFaceProvider from './huggingface';
19
19
  import HunyuanProvider from './hunyuan';
20
20
  import InternLMProvider from './internlm';
21
+ import JinaProvider from './jina';
21
22
  import LMStudioProvider from './lmstudio';
22
23
  import MinimaxProvider from './minimax';
23
24
  import MistralProvider from './mistral';
@@ -67,6 +68,7 @@ export const LOBE_DEFAULT_MODEL_LIST: ChatModelCard[] = [
67
68
  AnthropicProvider.chatModels,
68
69
  HuggingFaceProvider.chatModels,
69
70
  XAIProvider.chatModels,
71
+ JinaProvider.chatModels,
70
72
  ZeroOneProvider.chatModels,
71
73
  StepfunProvider.chatModels,
72
74
  NovitaProvider.chatModels,
@@ -110,6 +112,7 @@ export const DEFAULT_MODEL_PROVIDER_LIST = [
110
112
  Ai21Provider,
111
113
  UpstageProvider,
112
114
  XAIProvider,
115
+ JinaProvider,
113
116
  QwenProvider,
114
117
  WenxinProvider,
115
118
  TencentcloudProvider,
@@ -159,6 +162,7 @@ export { default as HigressProviderCard } from './higress';
159
162
  export { default as HuggingFaceProviderCard } from './huggingface';
160
163
  export { default as HunyuanProviderCard } from './hunyuan';
161
164
  export { default as InternLMProviderCard } from './internlm';
165
+ export { default as JinaProviderCard } from './jina';
162
166
  export { default as LMStudioProviderCard } from './lmstudio';
163
167
  export { default as MinimaxProviderCard } from './minimax';
164
168
  export { default as MistralProviderCard } from './mistral';
@@ -0,0 +1,21 @@
1
+ import { ModelProviderCard } from '@/types/llm';
2
+
3
+ const Jina: ModelProviderCard = {
4
+ chatModels: [],
5
+ checkModel: 'jina-deepsearch-v1',
6
+ description: 'Jina AI 成立于 2020 年,是一家领先的搜索 AI 公司。我们的搜索底座平台包含了向量模型、重排器和小语言模型,可帮助企业构建可靠且高质量的生成式AI和多模态的搜索应用。',
7
+ id: 'jina',
8
+ modelList: { showModelFetcher: true },
9
+ modelsUrl: 'https://jina.ai/models',
10
+ name: 'Jina',
11
+ settings: {
12
+ proxyUrl: {
13
+ placeholder: 'https://deepsearch.jina.ai/v1',
14
+ },
15
+ sdkType: 'openai',
16
+ showModelFetcher: true,
17
+ },
18
+ url: 'https://jina.ai',
19
+ };
20
+
21
+ export default Jina;
@@ -21,6 +21,7 @@ import { LobeHigressAI } from './higress';
21
21
  import { LobeHuggingFaceAI } from './huggingface';
22
22
  import { LobeHunyuanAI } from './hunyuan';
23
23
  import { LobeInternLMAI } from './internlm';
24
+ import { LobeJinaAI } from './jina';
24
25
  import { LobeLMStudioAI } from './lmstudio';
25
26
  import { LobeMinimaxAI } from './minimax';
26
27
  import { LobeMistralAI } from './mistral';
@@ -154,6 +155,7 @@ class AgentRuntime {
154
155
  huggingface: { apiKey?: string; baseURL?: string };
155
156
  hunyuan: Partial<ClientOptions>;
156
157
  internlm: Partial<ClientOptions>;
158
+ jina: Partial<ClientOptions>;
157
159
  lmstudio: Partial<ClientOptions>;
158
160
  minimax: Partial<ClientOptions>;
159
161
  mistral: Partial<ClientOptions>;
@@ -369,6 +371,11 @@ class AgentRuntime {
369
371
  break;
370
372
  }
371
373
 
374
+ case ModelProvider.Jina: {
375
+ runtimeModel = new LobeJinaAI(params.jina ?? {});
376
+ break;
377
+ }
378
+
372
379
  case ModelProvider.Cloudflare: {
373
380
  runtimeModel = new LobeCloudflareAI(params.cloudflare ?? {});
374
381
  break;
@@ -32,13 +32,13 @@ export class LobeAzureOpenAI implements LobeRuntimeAI {
32
32
  async chat(payload: ChatStreamPayload, options?: ChatCompetitionOptions) {
33
33
  const { messages, model, ...params } = payload;
34
34
  // o1 series models on Azure OpenAI does not support streaming currently
35
- const enableStreaming = model.startsWith('o1') ? false : (params.stream ?? true);
35
+ const enableStreaming = model.includes('o1') ? false : (params.stream ?? true);
36
36
  try {
37
37
  const response = await this.client.chat.completions.create({
38
38
  messages: messages as OpenAI.ChatCompletionMessageParam[],
39
39
  model,
40
40
  ...params,
41
- max_completion_tokens: 2048,
41
+ max_completion_tokens: null,
42
42
  stream: enableStreaming,
43
43
  tool_choice: params.tools ? 'auto' : undefined,
44
44
  });
@@ -0,0 +1,49 @@
1
+ import { ModelProvider } from '../types';
2
+ import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
3
+
4
+ import type { ChatModelCard } from '@/types/llm';
5
+
6
+ export interface JinaModelCard {
7
+ id: string;
8
+ }
9
+
10
+ export const LobeJinaAI = LobeOpenAICompatibleFactory({
11
+ baseURL: 'https://deepsearch.jina.ai/v1',
12
+ debug: {
13
+ chatCompletion: () => process.env.DEBUG_JINA_CHAT_COMPLETION === '1',
14
+ },
15
+ models: async ({ client }) => {
16
+ const { LOBE_DEFAULT_MODEL_LIST } = await import('@/config/aiModels');
17
+
18
+ const reasoningKeywords = [
19
+ 'deepsearch',
20
+ ];
21
+
22
+ const modelsPage = await client.models.list() as any;
23
+ const modelList: JinaModelCard[] = modelsPage.data;
24
+
25
+ return modelList
26
+ .map((model) => {
27
+ const knownModel = LOBE_DEFAULT_MODEL_LIST.find((m) => model.id.toLowerCase() === m.id.toLowerCase());
28
+
29
+ return {
30
+ contextWindowTokens: knownModel?.contextWindowTokens ?? undefined,
31
+ displayName: knownModel?.displayName ?? undefined,
32
+ enabled: knownModel?.enabled || false,
33
+ functionCall:
34
+ knownModel?.abilities?.functionCall
35
+ || false,
36
+ id: model.id,
37
+ reasoning:
38
+ reasoningKeywords.some(keyword => model.id.toLowerCase().includes(keyword))
39
+ || knownModel?.abilities?.reasoning
40
+ || false,
41
+ vision:
42
+ knownModel?.abilities?.vision
43
+ || false,
44
+ };
45
+ })
46
+ .filter(Boolean) as ChatModelCard[];
47
+ },
48
+ provider: ModelProvider.Jina,
49
+ });
@@ -40,6 +40,7 @@ export enum ModelProvider {
40
40
  HuggingFace = 'huggingface',
41
41
  Hunyuan = 'hunyuan',
42
42
  InternLM = 'internlm',
43
+ Jina = 'jina',
43
44
  LMStudio = 'lmstudio',
44
45
  Minimax = 'minimax',
45
46
  Mistral = 'mistral',
@@ -44,6 +44,7 @@ export interface UserKeyVaults {
44
44
  huggingface?: OpenAICompatibleKeyVault;
45
45
  hunyuan?: OpenAICompatibleKeyVault;
46
46
  internlm?: OpenAICompatibleKeyVault;
47
+ jina?: OpenAICompatibleKeyVault;
47
48
  lmstudio?: OpenAICompatibleKeyVault;
48
49
  lobehub?: any;
49
50
  minimax?: OpenAICompatibleKeyVault;