@lobehub/chat 1.32.9 → 1.33.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -2,6 +2,31 @@
2
2
 
3
3
  # Changelog
4
4
 
5
+ ## [Version 1.33.0](https://github.com/lobehub/lobe-chat/compare/v1.32.9...v1.33.0)
6
+
7
+ <sup>Released on **2024-11-25**</sup>
8
+
9
+ #### ✨ Features
10
+
11
+ - **misc**: Add Gitee AI model provider.
12
+
13
+ <br/>
14
+
15
+ <details>
16
+ <summary><kbd>Improvements and Fixes</kbd></summary>
17
+
18
+ #### What's improved
19
+
20
+ - **misc**: Add Gitee AI model provider, closes [#4716](https://github.com/lobehub/lobe-chat/issues/4716) ([f1cb0af](https://github.com/lobehub/lobe-chat/commit/f1cb0af))
21
+
22
+ </details>
23
+
24
+ <div align="right">
25
+
26
+ [![](https://img.shields.io/badge/-BACK_TO_TOP-151515?style=flat-square)](#readme-top)
27
+
28
+ </div>
29
+
5
30
  ### [Version 1.32.9](https://github.com/lobehub/lobe-chat/compare/v1.32.8...v1.32.9)
6
31
 
7
32
  <sup>Released on **2024-11-24**</sup>
package/Dockerfile CHANGED
@@ -158,6 +158,8 @@ ENV \
158
158
  DEEPSEEK_API_KEY="" DEEPSEEK_MODEL_LIST="" \
159
159
  # Fireworks AI
160
160
  FIREWORKSAI_API_KEY="" FIREWORKSAI_MODEL_LIST="" \
161
+ # Gitee AI
162
+ GITEE_AI_API_KEY="" GITEE_AI_MODEL_LIST="" \
161
163
  # GitHub
162
164
  GITHUB_TOKEN="" GITHUB_MODEL_LIST="" \
163
165
  # Google
@@ -193,6 +193,8 @@ ENV \
193
193
  DEEPSEEK_API_KEY="" DEEPSEEK_MODEL_LIST="" \
194
194
  # Fireworks AI
195
195
  FIREWORKSAI_API_KEY="" FIREWORKSAI_MODEL_LIST="" \
196
+ # Gitee AI
197
+ GITEE_AI_API_KEY="" GITEE_AI_MODEL_LIST="" \
196
198
  # GitHub
197
199
  GITHUB_TOKEN="" GITHUB_MODEL_LIST="" \
198
200
  # Google
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@lobehub/chat",
3
- "version": "1.32.9",
3
+ "version": "1.33.0",
4
4
  "description": "Lobe Chat - an open-source, high-performance chatbot framework that supports speech synthesis, multimodal, and extensible Function Call plugin system. Supports one-click free deployment of your private ChatGPT/LLM web application.",
5
5
  "keywords": [
6
6
  "framework",
@@ -7,6 +7,7 @@ import {
7
7
  BaichuanProviderCard,
8
8
  DeepSeekProviderCard,
9
9
  FireworksAIProviderCard,
10
+ GiteeAIProviderCard,
10
11
  GoogleProviderCard,
11
12
  GroqProviderCard,
12
13
  HunyuanProviderCard,
@@ -88,6 +89,7 @@ export const useProviderList = (): ProviderItem[] => {
88
89
  TaichuProviderCard,
89
90
  InternLMProviderCard,
90
91
  SiliconCloudProviderCard,
92
+ GiteeAIProviderCard,
91
93
  ],
92
94
  [
93
95
  AzureProvider,
package/src/config/llm.ts CHANGED
@@ -94,6 +94,9 @@ export const getLLMConfig = () => {
94
94
  ENABLED_SILICONCLOUD: z.boolean(),
95
95
  SILICONCLOUD_API_KEY: z.string().optional(),
96
96
 
97
+ ENABLED_GITEE_AI: z.boolean(),
98
+ GITEE_AI_API_KEY: z.string().optional(),
99
+
97
100
  ENABLED_UPSTAGE: z.boolean(),
98
101
  UPSTAGE_API_KEY: z.string().optional(),
99
102
 
@@ -210,6 +213,9 @@ export const getLLMConfig = () => {
210
213
  ENABLED_SILICONCLOUD: !!process.env.SILICONCLOUD_API_KEY,
211
214
  SILICONCLOUD_API_KEY: process.env.SILICONCLOUD_API_KEY,
212
215
 
216
+ ENABLED_GITEE_AI: !!process.env.GITEE_AI_API_KEY,
217
+ GITEE_AI_API_KEY: process.env.GITEE_AI_API_KEY,
218
+
213
219
  ENABLED_UPSTAGE: !!process.env.UPSTAGE_API_KEY,
214
220
  UPSTAGE_API_KEY: process.env.UPSTAGE_API_KEY,
215
221
 
@@ -0,0 +1,66 @@
1
+ import { ModelProviderCard } from '@/types/llm';
2
+
3
+ // ref: https://ai.gitee.com/serverless-api/packages/1493
4
+ const GiteeAI: ModelProviderCard = {
5
+ chatModels: [
6
+ {
7
+ description: 'Qwen2.5-72B-Instruct 支持 16k 上下文, 生成长文本超过 8K 。支持 function call 与外部系统无缝交互,极大提升了灵活性和扩展性。模型知识明显增加,并且大大提高了编码和数学能力, 多语言支持超过 29 种',
8
+ displayName: 'Qwen2.5 72B Instruct',
9
+ enabled: true,
10
+ functionCall: true,
11
+ id: 'Qwen2.5-72B-Instruct',
12
+ tokens: 16_000,
13
+ },
14
+ {
15
+ description: 'Qwen2 是 Qwen 模型的最新系列,支持 128k 上下文,对比当前最优的开源模型,Qwen2-72B 在自然语言理解、知识、代码、数学及多语言等多项能力上均显著超越当前领先的模型。',
16
+ displayName: 'Qwen2 72B Instruct',
17
+ id: 'Qwen2-72B-Instruct',
18
+ tokens: 6000,
19
+ },
20
+ {
21
+ description: 'Qwen2 是 Qwen 模型的最新系列,能够超越同等规模的最优开源模型甚至更大规模的模型,Qwen2 7B 在多个评测上取得显著的优势,尤其是代码及中文理解上。',
22
+ displayName: 'Qwen2 7B Instruct',
23
+ id: 'Qwen2-7B-Instruct',
24
+ tokens: 32_000,
25
+ },
26
+ {
27
+ description: 'GLM-4-9B-Chat 在语义、数学、推理、代码和知识等多方面均表现出较高性能。还具备网页浏览、代码执行、自定义工具调用和长文本推理。 支持包括日语,韩语,德语在内的 26 种语言。',
28
+ displayName: 'GLM4 9B Chat',
29
+ enabled: true,
30
+ id: 'glm-4-9b-chat',
31
+ tokens: 32_000,
32
+ },
33
+ {
34
+ description: 'Yi-1.5-34B 在保持原系列模型优秀的通用语言能力的前提下,通过增量训练 5 千亿高质量 token,大幅提高了数学逻辑、代码能力。',
35
+ displayName: 'Yi 34B Chat',
36
+ enabled: true,
37
+ id: 'Yi-34B-Chat',
38
+ tokens: 4000,
39
+ },
40
+ {
41
+ description: 'DeepSeek Coder 33B 是一个代码语言模型, 基于 2 万亿数据训练而成,其中 87% 为代码, 13% 为中英文语言。模型引入 16K 窗口大小和填空任务,提供项目级别的代码补全和片段填充功能。',
42
+ displayName: 'DeepSeek Coder 33B Instruct',
43
+ enabled: true,
44
+ id: 'deepseek-coder-33B-instruct',
45
+ tokens: 8000,
46
+ },
47
+ {
48
+ description: 'CodeGeeX4-ALL-9B 是一个多语言代码生成模型,支持包括代码补全和生成、代码解释器、网络搜索、函数调用、仓库级代码问答在内的全面功能,覆盖软件开发的各种场景。是参数少于 10B 的顶尖代码生成模型。',
49
+ displayName: 'CodeGeeX4 All 9B',
50
+ enabled: true,
51
+ id: 'codegeex4-all-9b',
52
+ tokens: 40_000,
53
+ },
54
+ ],
55
+ checkModel: 'Qwen2-7B-Instruct',
56
+ description:
57
+ 'Gitee AI 的 Serverless API 为 AI 开发者提供开箱即用的大模型推理 API 服务。',
58
+ disableBrowserRequest: true,
59
+ id: 'giteeai',
60
+ modelList: { showModelFetcher: true },
61
+ modelsUrl: 'https://ai.gitee.com/docs/openapi/v1#tag/serverless/POST/chat/completions',
62
+ name: 'Gitee AI',
63
+ url: 'https://ai.gitee.com',
64
+ };
65
+
66
+ export default GiteeAI;
@@ -9,6 +9,7 @@ import BedrockProvider from './bedrock';
9
9
  import CloudflareProvider from './cloudflare';
10
10
  import DeepSeekProvider from './deepseek';
11
11
  import FireworksAIProvider from './fireworksai';
12
+ import GiteeAIProvider from './giteeai';
12
13
  import GithubProvider from './github';
13
14
  import GoogleProvider from './google';
14
15
  import GroqProvider from './groq';
@@ -64,6 +65,7 @@ export const LOBE_DEFAULT_MODEL_LIST: ChatModelCard[] = [
64
65
  CloudflareProvider.chatModels,
65
66
  Ai360Provider.chatModels,
66
67
  SiliconCloudProvider.chatModels,
68
+ GiteeAIProvider.chatModels,
67
69
  UpstageProvider.chatModels,
68
70
  SparkProvider.chatModels,
69
71
  Ai21Provider.chatModels,
@@ -109,6 +111,7 @@ export const DEFAULT_MODEL_PROVIDER_LIST = [
109
111
  TaichuProvider,
110
112
  InternLMProvider,
111
113
  SiliconCloudProvider,
114
+ GiteeAIProvider,
112
115
  ];
113
116
 
114
117
  export const filterEnabledModels = (provider: ModelProviderCard) => {
@@ -129,6 +132,7 @@ export { default as BedrockProviderCard } from './bedrock';
129
132
  export { default as CloudflareProviderCard } from './cloudflare';
130
133
  export { default as DeepSeekProviderCard } from './deepseek';
131
134
  export { default as FireworksAIProviderCard } from './fireworksai';
135
+ export { default as GiteeAIProviderCard } from './giteeai';
132
136
  export { default as GithubProviderCard } from './github';
133
137
  export { default as GoogleProviderCard } from './google';
134
138
  export { default as GroqProviderCard } from './groq';
@@ -12,6 +12,7 @@ import { LobeBedrockAI, LobeBedrockAIParams } from './bedrock';
12
12
  import { LobeCloudflareAI, LobeCloudflareParams } from './cloudflare';
13
13
  import { LobeDeepSeekAI } from './deepseek';
14
14
  import { LobeFireworksAI } from './fireworksai';
15
+ import { LobeGiteeAI } from './giteeai';
15
16
  import { LobeGithubAI } from './github';
16
17
  import { LobeGoogleAI } from './google';
17
18
  import { LobeGroq } from './groq';
@@ -137,6 +138,7 @@ class AgentRuntime {
137
138
  cloudflare: Partial<LobeCloudflareParams>;
138
139
  deepseek: Partial<ClientOptions>;
139
140
  fireworksai: Partial<ClientOptions>;
141
+ giteeai: Partial<ClientOptions>;
140
142
  github: Partial<ClientOptions>;
141
143
  google: { apiKey?: string; baseURL?: string };
142
144
  groq: Partial<ClientOptions>;
@@ -303,6 +305,11 @@ class AgentRuntime {
303
305
  break;
304
306
  }
305
307
 
308
+ case ModelProvider.GiteeAI: {
309
+ runtimeModel = new LobeGiteeAI(params.giteeai);
310
+ break;
311
+ }
312
+
306
313
  case ModelProvider.Upstage: {
307
314
  runtimeModel = new LobeUpstageAI(params.upstage);
308
315
  break;
@@ -0,0 +1,255 @@
1
+ // @vitest-environment node
2
+ import OpenAI from 'openai';
3
+ import { Mock, afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
4
+
5
+ import {
6
+ ChatStreamCallbacks,
7
+ LobeOpenAICompatibleRuntime,
8
+ ModelProvider,
9
+ } from '@/libs/agent-runtime';
10
+
11
+ import * as debugStreamModule from '../utils/debugStream';
12
+ import { LobeGiteeAI } from './index';
13
+
14
+ const provider = ModelProvider.GiteeAI;
15
+ const defaultBaseURL = 'https://ai.gitee.com/v1';
16
+
17
+ const bizErrorType = 'ProviderBizError';
18
+ const invalidErrorType = 'InvalidProviderAPIKey';
19
+
20
+ // Mock the console.error to avoid polluting test output
21
+ vi.spyOn(console, 'error').mockImplementation(() => {});
22
+
23
+ let instance: LobeOpenAICompatibleRuntime;
24
+
25
+ beforeEach(() => {
26
+ instance = new LobeGiteeAI({ apiKey: 'test' });
27
+
28
+ // 使用 vi.spyOn 来模拟 chat.completions.create 方法
29
+ vi.spyOn(instance['client'].chat.completions, 'create').mockResolvedValue(
30
+ new ReadableStream() as any,
31
+ );
32
+ });
33
+
34
+ afterEach(() => {
35
+ vi.clearAllMocks();
36
+ });
37
+
38
+ describe('LobeGiteeAI', () => {
39
+ describe('init', () => {
40
+ it('should correctly initialize with an API key', async () => {
41
+ const instance = new LobeGiteeAI({ apiKey: 'test_api_key' });
42
+ expect(instance).toBeInstanceOf(LobeGiteeAI);
43
+ expect(instance.baseURL).toEqual(defaultBaseURL);
44
+ });
45
+ });
46
+
47
+ describe('chat', () => {
48
+ describe('Error', () => {
49
+ it('should return OpenAIBizError with an openai error response when OpenAI.APIError is thrown', async () => {
50
+ // Arrange
51
+ const apiError = new OpenAI.APIError(
52
+ 400,
53
+ {
54
+ status: 400,
55
+ error: {
56
+ message: 'Bad Request',
57
+ },
58
+ },
59
+ 'Error message',
60
+ {},
61
+ );
62
+
63
+ vi.spyOn(instance['client'].chat.completions, 'create').mockRejectedValue(apiError);
64
+
65
+ // Act
66
+ try {
67
+ await instance.chat({
68
+ messages: [{ content: 'Hello', role: 'user' }],
69
+ model: 'Qwen2-7B-Instruct',
70
+ temperature: 0,
71
+ });
72
+ } catch (e) {
73
+ expect(e).toEqual({
74
+ endpoint: defaultBaseURL,
75
+ error: {
76
+ error: { message: 'Bad Request' },
77
+ status: 400,
78
+ },
79
+ errorType: bizErrorType,
80
+ provider,
81
+ });
82
+ }
83
+ });
84
+
85
+ it('should throw AgentRuntimeError with NoOpenAIAPIKey if no apiKey is provided', async () => {
86
+ try {
87
+ new LobeGiteeAI({});
88
+ } catch (e) {
89
+ expect(e).toEqual({ errorType: invalidErrorType });
90
+ }
91
+ });
92
+
93
+ it('should return OpenAIBizError with the cause when OpenAI.APIError is thrown with cause', async () => {
94
+ // Arrange
95
+ const errorInfo = {
96
+ stack: 'abc',
97
+ cause: {
98
+ message: 'api is undefined',
99
+ },
100
+ };
101
+ const apiError = new OpenAI.APIError(400, errorInfo, 'module error', {});
102
+
103
+ vi.spyOn(instance['client'].chat.completions, 'create').mockRejectedValue(apiError);
104
+
105
+ // Act
106
+ try {
107
+ await instance.chat({
108
+ messages: [{ content: 'Hello', role: 'user' }],
109
+ model: 'Qwen2-7B-Instruct',
110
+ temperature: 0,
111
+ });
112
+ } catch (e) {
113
+ expect(e).toEqual({
114
+ endpoint: defaultBaseURL,
115
+ error: {
116
+ cause: { message: 'api is undefined' },
117
+ stack: 'abc',
118
+ },
119
+ errorType: bizErrorType,
120
+ provider,
121
+ });
122
+ }
123
+ });
124
+
125
+ it('should return OpenAIBizError with an cause response with desensitize Url', async () => {
126
+ // Arrange
127
+ const errorInfo = {
128
+ stack: 'abc',
129
+ cause: { message: 'api is undefined' },
130
+ };
131
+ const apiError = new OpenAI.APIError(400, errorInfo, 'module error', {});
132
+
133
+ instance = new LobeGiteeAI({
134
+ apiKey: 'test',
135
+
136
+ baseURL: 'https://api.abc.com/v1',
137
+ });
138
+
139
+ vi.spyOn(instance['client'].chat.completions, 'create').mockRejectedValue(apiError);
140
+
141
+ // Act
142
+ try {
143
+ await instance.chat({
144
+ messages: [{ content: 'Hello', role: 'user' }],
145
+ model: 'Qwen2-7B-Instruct',
146
+ temperature: 0,
147
+ });
148
+ } catch (e) {
149
+ expect(e).toEqual({
150
+ endpoint: 'https://api.***.com/v1',
151
+ error: {
152
+ cause: { message: 'api is undefined' },
153
+ stack: 'abc',
154
+ },
155
+ errorType: bizErrorType,
156
+ provider,
157
+ });
158
+ }
159
+ });
160
+
161
+ it('should throw an InvalidGiteeAIAPIKey error type on 401 status code', async () => {
162
+ // Mock the API call to simulate a 401 error
163
+ const error = new Error('Unauthorized') as any;
164
+ error.status = 401;
165
+ vi.mocked(instance['client'].chat.completions.create).mockRejectedValue(error);
166
+
167
+ try {
168
+ await instance.chat({
169
+ messages: [{ content: 'Hello', role: 'user' }],
170
+ model: 'Qwen2-7B-Instruct',
171
+ temperature: 0,
172
+ });
173
+ } catch (e) {
174
+ // Expect the chat method to throw an error with InvalidGiteeAIAPIKey
175
+ expect(e).toEqual({
176
+ endpoint: defaultBaseURL,
177
+ error: new Error('Unauthorized'),
178
+ errorType: invalidErrorType,
179
+ provider,
180
+ });
181
+ }
182
+ });
183
+
184
+ it('should return AgentRuntimeError for non-OpenAI errors', async () => {
185
+ // Arrange
186
+ const genericError = new Error('Generic Error');
187
+
188
+ vi.spyOn(instance['client'].chat.completions, 'create').mockRejectedValue(genericError);
189
+
190
+ // Act
191
+ try {
192
+ await instance.chat({
193
+ messages: [{ content: 'Hello', role: 'user' }],
194
+ model: 'Qwen2-7B-Instruct',
195
+ temperature: 0,
196
+ });
197
+ } catch (e) {
198
+ expect(e).toEqual({
199
+ endpoint: defaultBaseURL,
200
+ errorType: 'AgentRuntimeError',
201
+ provider,
202
+ error: {
203
+ name: genericError.name,
204
+ cause: genericError.cause,
205
+ message: genericError.message,
206
+ stack: genericError.stack,
207
+ },
208
+ });
209
+ }
210
+ });
211
+ });
212
+
213
+ describe('DEBUG', () => {
214
+ it('should call debugStream and return StreamingTextResponse when DEBUG_GITEE_AI_CHAT_COMPLETION is 1', async () => {
215
+ // Arrange
216
+ const mockProdStream = new ReadableStream() as any; // 模拟的 prod 流
217
+ const mockDebugStream = new ReadableStream({
218
+ start(controller) {
219
+ controller.enqueue('Debug stream content');
220
+ controller.close();
221
+ },
222
+ }) as any;
223
+ mockDebugStream.toReadableStream = () => mockDebugStream; // 添加 toReadableStream 方法
224
+
225
+ // 模拟 chat.completions.create 返回值,包括模拟的 tee 方法
226
+ (instance['client'].chat.completions.create as Mock).mockResolvedValue({
227
+ tee: () => [mockProdStream, { toReadableStream: () => mockDebugStream }],
228
+ });
229
+
230
+ // 保存原始环境变量值
231
+ const originalDebugValue = process.env.DEBUG_GITEE_AI_CHAT_COMPLETION;
232
+
233
+ // 模拟环境变量
234
+ process.env.DEBUG_GITEE_AI_CHAT_COMPLETION = '1';
235
+ vi.spyOn(debugStreamModule, 'debugStream').mockImplementation(() => Promise.resolve());
236
+
237
+ // 执行测试
238
+ // 运行你的测试函数,确保它会在条件满足时调用 debugStream
239
+ // 假设的测试函数调用,你可能需要根据实际情况调整
240
+ await instance.chat({
241
+ messages: [{ content: 'Hello', role: 'user' }],
242
+ model: 'Qwen2-7B-Instruct',
243
+ stream: true,
244
+ temperature: 0,
245
+ });
246
+
247
+ // 验证 debugStream 被调用
248
+ expect(debugStreamModule.debugStream).toHaveBeenCalled();
249
+
250
+ // 恢复原始环境变量值
251
+ process.env.DEBUG_GITEE_AI_CHAT_COMPLETION = originalDebugValue;
252
+ });
253
+ });
254
+ });
255
+ });
@@ -0,0 +1,10 @@
1
+ import { ModelProvider } from '../types';
2
+ import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
3
+
4
+ export const LobeGiteeAI = LobeOpenAICompatibleFactory({
5
+ baseURL: 'https://ai.gitee.com/v1',
6
+ debug: {
7
+ chatCompletion: () => process.env.DEBUG_GITEE_AI_CHAT_COMPLETION === '1',
8
+ },
9
+ provider: ModelProvider.GiteeAI,
10
+ });
@@ -565,7 +565,7 @@ exports[`LobeTogetherAI > models > should get models 1`] = `
565
565
  {
566
566
  "description": "The Yi series models are large language models trained from scratch by developers at 01.AI",
567
567
  "displayName": "01-ai Yi Chat (34B)",
568
- "enabled": false,
568
+ "enabled": true,
569
569
  "functionCall": false,
570
570
  "id": "zero-one-ai/Yi-34B-Chat",
571
571
  "maxOutput": 4096,
@@ -31,6 +31,7 @@ export enum ModelProvider {
31
31
  Cloudflare = 'cloudflare',
32
32
  DeepSeek = 'deepseek',
33
33
  FireworksAI = 'fireworksai',
34
+ GiteeAI = 'giteeai',
34
35
  Github = 'github',
35
36
  Google = 'google',
36
37
  Groq = 'groq',
@@ -29,6 +29,10 @@ export const getServerGlobalConfig = () => {
29
29
  enabledKey: 'ENABLED_AWS_BEDROCK',
30
30
  modelListKey: 'AWS_BEDROCK_MODEL_LIST',
31
31
  },
32
+ giteeai: {
33
+ enabledKey: 'ENABLED_GITEE_AI',
34
+ modelListKey: 'GITEE_AI_MODEL_LIST',
35
+ },
32
36
  ollama: {
33
37
  fetchOnClient: !process.env.OLLAMA_PROXY_URL,
34
38
  },
@@ -233,6 +233,13 @@ const getLlmOptionsFromPayload = (provider: string, payload: JWTPayload) => {
233
233
 
234
234
  return { apiKey, baseURL };
235
235
  }
236
+ case ModelProvider.GiteeAI: {
237
+ const { GITEE_AI_API_KEY } = getLLMConfig();
238
+
239
+ const apiKey = apiKeyManager.pick(payload?.apiKey || GITEE_AI_API_KEY);
240
+
241
+ return { apiKey };
242
+ }
236
243
 
237
244
  case ModelProvider.HuggingFace: {
238
245
  const { HUGGINGFACE_API_KEY } = getLLMConfig();
@@ -41,6 +41,7 @@ export interface UserKeyVaults {
41
41
  cloudflare?: CloudflareKeyVault;
42
42
  deepseek?: OpenAICompatibleKeyVault;
43
43
  fireworksai?: OpenAICompatibleKeyVault;
44
+ giteeai?: OpenAICompatibleKeyVault;
44
45
  github?: OpenAICompatibleKeyVault;
45
46
  google?: OpenAICompatibleKeyVault;
46
47
  groq?: OpenAICompatibleKeyVault;