@lobehub/chat 1.55.0 → 1.55.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -2,6 +2,39 @@
2
2
 
3
3
  # Changelog
4
4
 
5
+ ### [Version 1.55.1](https://github.com/lobehub/lobe-chat/compare/v1.55.0...v1.55.1)
6
+
7
+ <sup>Released on **2025-02-15**</sup>
8
+
9
+ #### 🐛 Bug Fixes
10
+
11
+ - **misc**: Fix Azure OpenAI O1 models and refactor the Azure OpenAI implement.
12
+
13
+ #### 💄 Styles
14
+
15
+ - **misc**: Update openrouter model list and descriptions.
16
+
17
+ <br/>
18
+
19
+ <details>
20
+ <summary><kbd>Improvements and Fixes</kbd></summary>
21
+
22
+ #### What's fixed
23
+
24
+ - **misc**: Fix Azure OpenAI O1 models and refactor the Azure OpenAI implement, closes [#6079](https://github.com/lobehub/lobe-chat/issues/6079) ([6a89a8c](https://github.com/lobehub/lobe-chat/commit/6a89a8c))
25
+
26
+ #### Styles
27
+
28
+ - **misc**: Update openrouter model list and descriptions, closes [#6160](https://github.com/lobehub/lobe-chat/issues/6160) ([3ce0485](https://github.com/lobehub/lobe-chat/commit/3ce0485))
29
+
30
+ </details>
31
+
32
+ <div align="right">
33
+
34
+ [![](https://img.shields.io/badge/-BACK_TO_TOP-151515?style=flat-square)](#readme-top)
35
+
36
+ </div>
37
+
5
38
  ## [Version 1.55.0](https://github.com/lobehub/lobe-chat/compare/v1.54.0...v1.55.0)
6
39
 
7
40
  <sup>Released on **2025-02-14**</sup>
package/changelog/v1.json CHANGED
@@ -1,4 +1,16 @@
1
1
  [
2
+ {
3
+ "children": {
4
+ "fixes": [
5
+ "Fix Azure OpenAI O1 models and refactor the Azure OpenAI implement."
6
+ ],
7
+ "improvements": [
8
+ "Update openrouter model list and descriptions."
9
+ ]
10
+ },
11
+ "date": "2025-02-15",
12
+ "version": "1.55.1"
13
+ },
2
14
  {
3
15
  "children": {
4
16
  "features": [
@@ -0,0 +1,33 @@
1
+ ---
2
+ title: Deploy LobeChat on TencentCloud Lighthouse
3
+ description: Learn how to deploy the LobeChat application on TencentCloud Lighthouse, including preparing the large model API Key, clicking the deploy button, and other operations.
4
+ tags:
5
+ - TencentCloud Lighthouse
6
+ - TencentCloud
7
+ - LobeChat
8
+ - API Key
9
+ ---
10
+
11
+ # Deploy LobeChat with TencentCloud Lighthouse
12
+
13
+ If you want to deploy LobeChat on TencentCloud Lighthouse, you can follow the steps below:
14
+
15
+ ## Tencent Cloud Deployment Process
16
+
17
+ <Steps>
18
+
19
+ ### Prepare your API Key
20
+
21
+ Go to [OpenAI API Key](https://platform.openai.com/account/api-keys) to get your OpenAI API Key.
22
+
23
+
24
+ ### One-click to deploy
25
+
26
+ [![][deploy-button-image]][deploy-link]
27
+
28
+ ### Once deployed, you can start using it
29
+
30
+ </Steps>
31
+
32
+ [deploy-button-image]: https://cloudcache.tencent-cloud.com/qcloud/ui/static/static_source_business/d65fb782-4fb0-4348-ad85-f2943d6bee8f.svg
33
+ [deploy-link]: https://buy.tencentcloud.com/lighthouse?blueprintType=APP_OS&blueprintOfficialId=lhbp-6u0ti132&regionId=9&zone=ap-singapore-3&bundleId=bundle_starter_nmc_lin_med2_01&loginSet=AUTO&rule=true&from=lobechat
@@ -0,0 +1,33 @@
1
+ ---
2
+ title: 在 腾讯轻量云 上部署 LobeChat
3
+ description: 学习如何快速在腾讯轻量云上部署LobeChat应用,包括准备大模型 API Key、点击部署按钮等操作。
4
+ tags:
5
+ - 腾讯云
6
+ - 腾讯轻量云
7
+ - LobeChat
8
+ - 部署流程
9
+ - 大模型 API Key
10
+ ---
11
+
12
+ # 使用 腾讯轻量云 部署
13
+
14
+ 如果想在 腾讯云 上部署 LobeChat,可以按照以下步骤进行操作:
15
+
16
+ ## 腾讯轻量云 部署流程
17
+
18
+ <Steps>
19
+
20
+ ### 准备好你的 API Key
21
+
22
+ 前往 [OpenAI API Key](https://platform.openai.com/account/api-keys) 获取你的 OpenAI API Key
23
+
24
+ ### 点击下方按钮进行部署
25
+
26
+ [![][deploy-button-image]][deploy-link]
27
+
28
+ ### 部署完毕后,即可开始使用
29
+
30
+ </Steps>
31
+
32
+ [deploy-button-image]: https://cloudcache.tencent-cloud.com/qcloud/ui/static/static_source_business/d65fb782-4fb0-4348-ad85-f2943d6bee8f.svg
33
+ [deploy-link]: https://buy.cloud.tencent.com/lighthouse?blueprintType=APP_OS&amp;blueprintOfficialId=lhbp-6u0ti132&amp;regionId=8&amp;zone=ap-beijing-3&amp;bundleId=bundle_starter_mc_med2_01&amp;loginSet=AUTO&amp;rule=true&amp;from=lobechat
@@ -11,11 +11,13 @@ tags:
11
11
  - Vercel
12
12
  - 阿里云计算巢
13
13
  - 个性化
14
+ - 腾讯云
15
+ - 腾讯轻量云
14
16
  ---
15
17
 
16
18
  # 构建属于自己的 Lobe Chat
17
19
 
18
- LobeChat 支持多种部署平台,包括 Vercel、Docker、 Docker Compose 阿里云计算巢 等,你可以选择适合自己的部署平台进行部署,构建属于自己的 Lobe Chat。
20
+ LobeChat 支持多种部署平台,包括 Vercel、Docker、 Docker Compose 、阿里云计算巢 和腾讯轻量云 等,你可以选择适合自己的部署平台进行部署,构建属于自己的 Lobe Chat。
19
21
 
20
22
  ## 快速部署
21
23
 
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@lobehub/chat",
3
- "version": "1.55.0",
3
+ "version": "1.55.1",
4
4
  "description": "Lobe Chat - an open-source, high-performance chatbot framework that supports speech synthesis, multimodal, and extensible Function Call plugin system. Supports one-click free deployment of your private ChatGPT/LLM web application.",
5
5
  "keywords": [
6
6
  "framework",
@@ -109,8 +109,6 @@
109
109
  "@aws-sdk/client-bedrock-runtime": "^3.723.0",
110
110
  "@aws-sdk/client-s3": "^3.723.0",
111
111
  "@aws-sdk/s3-request-presigner": "^3.723.0",
112
- "@azure/core-rest-pipeline": "1.16.0",
113
- "@azure/openai": "1.0.0-beta.12",
114
112
  "@cfworker/json-schema": "^4.1.0",
115
113
  "@clerk/localizations": "^3.9.6",
116
114
  "@clerk/nextjs": "^6.10.6",
@@ -229,6 +229,36 @@ const openrouterChatModels: AIChatModelCard[] = [
229
229
  releasedAt: '2024-09-05',
230
230
  type: 'chat',
231
231
  },
232
+ {
233
+ abilities: {
234
+ reasoning: true,
235
+ },
236
+ contextWindowTokens: 163_840,
237
+ description:
238
+ 'DeepSeek-R1 在仅有极少标注数据的情况下,极大提升了模型推理能力。在输出最终回答之前,模型会先输出一段思维链内容,以提升最终答案的准确性。',
239
+ displayName: 'DeepSeek R1',
240
+ enabled: true,
241
+ id: 'deepseek/deepseek-r1',
242
+ pricing: {
243
+ input: 3,
244
+ output: 8,
245
+ },
246
+ releasedAt: '2025-01-20',
247
+ type: 'chat',
248
+ },
249
+ {
250
+ abilities: {
251
+ reasoning: true,
252
+ },
253
+ contextWindowTokens: 163_840,
254
+ description:
255
+ 'DeepSeek-R1 在仅有极少标注数据的情况下,极大提升了模型推理能力。在输出最终回答之前,模型会先输出一段思维链内容,以提升最终答案的准确性。',
256
+ displayName: 'DeepSeek R1 (Free)',
257
+ enabled: true,
258
+ id: 'deepseek/deepseek-r1:free',
259
+ releasedAt: '2025-01-20',
260
+ type: 'chat',
261
+ },
232
262
  {
233
263
  abilities: {
234
264
  vision: true,
@@ -228,6 +228,15 @@ const OpenRouter: ModelProviderCard = {
228
228
  },
229
229
  releasedAt: '2025-01-20',
230
230
  },
231
+ {
232
+ contextWindowTokens: 163_840,
233
+ description: 'DeepSeek-R1',
234
+ displayName: 'DeepSeek R1 (Free)',
235
+ enabled: true,
236
+ functionCall: false,
237
+ id: 'deepseek/deepseek-r1:free',
238
+ releasedAt: '2025-01-20',
239
+ },
231
240
  {
232
241
  contextWindowTokens: 131_072,
233
242
  description:
@@ -107,6 +107,7 @@ describe('AgentRuntime', () => {
107
107
  const jwtPayload = {
108
108
  apiKey: 'user-azure-key',
109
109
  baseURL: 'user-azure-endpoint',
110
+ apiVersion: '2024-06-01',
110
111
  };
111
112
  const runtime = await AgentRuntime.initializeWithProviderOptions(ModelProvider.Azure, {
112
113
  azure: jwtPayload,
@@ -1,9 +1,9 @@
1
1
  // @vitest-environment node
2
- import { AzureKeyCredential, OpenAIClient } from '@azure/openai';
3
- import OpenAI from 'openai';
2
+ import { AzureOpenAI } from 'openai';
4
3
  import { Mock, afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
5
4
 
6
5
  import * as debugStreamModule from '../utils/debugStream';
6
+ import * as openaiCompatibleFactoryModule from '../utils/openaiCompatibleFactory';
7
7
  import { LobeAzureOpenAI } from './index';
8
8
 
9
9
  const bizErrorType = 'ProviderBizError';
@@ -23,7 +23,7 @@ describe('LobeAzureOpenAI', () => {
23
23
  );
24
24
 
25
25
  // 使用 vi.spyOn 来模拟 streamChatCompletions 方法
26
- vi.spyOn(instance['client'], 'streamChatCompletions').mockResolvedValue(
26
+ vi.spyOn(instance['client'].chat.completions, 'create').mockResolvedValue(
27
27
  new ReadableStream() as any,
28
28
  );
29
29
  });
@@ -48,7 +48,7 @@ describe('LobeAzureOpenAI', () => {
48
48
 
49
49
  const instance = new LobeAzureOpenAI(endpoint, apikey, apiVersion);
50
50
 
51
- expect(instance.client).toBeInstanceOf(OpenAIClient);
51
+ expect(instance.client).toBeInstanceOf(AzureOpenAI);
52
52
  expect(instance.baseURL).toBe(endpoint);
53
53
  });
54
54
  });
@@ -59,7 +59,7 @@ describe('LobeAzureOpenAI', () => {
59
59
  const mockStream = new ReadableStream();
60
60
  const mockResponse = Promise.resolve(mockStream);
61
61
 
62
- (instance['client'].streamChatCompletions as Mock).mockResolvedValue(mockResponse);
62
+ (instance['client'].chat.completions.create as Mock).mockResolvedValue(mockResponse);
63
63
 
64
64
  // Act
65
65
  const result = await instance.chat({
@@ -164,7 +164,9 @@ describe('LobeAzureOpenAI', () => {
164
164
  controller.close();
165
165
  },
166
166
  });
167
- vi.spyOn(instance['client'], 'streamChatCompletions').mockResolvedValue(mockStream as any);
167
+ vi.spyOn(instance['client'].chat.completions, 'create').mockResolvedValue(
168
+ mockStream as any,
169
+ );
168
170
 
169
171
  const result = await instance.chat({
170
172
  stream: true,
@@ -204,6 +206,42 @@ describe('LobeAzureOpenAI', () => {
204
206
  ].map((item) => `${item}\n`),
205
207
  );
206
208
  });
209
+
210
+ it('should handle non-streaming response', async () => {
211
+ vi.spyOn(openaiCompatibleFactoryModule, 'transformResponseToStream').mockImplementation(
212
+ () => {
213
+ return new ReadableStream();
214
+ },
215
+ );
216
+ // Act
217
+ await instance.chat({
218
+ stream: false,
219
+ temperature: 0.6,
220
+ model: 'gpt-35-turbo-16k',
221
+ messages: [{ role: 'user', content: '你好' }],
222
+ });
223
+
224
+ // Assert
225
+ expect(openaiCompatibleFactoryModule.transformResponseToStream).toHaveBeenCalled();
226
+ });
227
+ });
228
+
229
+ it('should handle o1 series models without streaming', async () => {
230
+ vi.spyOn(openaiCompatibleFactoryModule, 'transformResponseToStream').mockImplementation(
231
+ () => {
232
+ return new ReadableStream();
233
+ },
234
+ );
235
+
236
+ // Act
237
+ await instance.chat({
238
+ temperature: 0.6,
239
+ model: 'o1-preview',
240
+ messages: [{ role: 'user', content: '你好' }],
241
+ });
242
+
243
+ // Assert
244
+ expect(openaiCompatibleFactoryModule.transformResponseToStream).toHaveBeenCalled();
207
245
  });
208
246
 
209
247
  describe('Error', () => {
@@ -214,7 +252,7 @@ describe('LobeAzureOpenAI', () => {
214
252
  message: 'Deployment not found',
215
253
  };
216
254
 
217
- (instance['client'].streamChatCompletions as Mock).mockRejectedValue(error);
255
+ (instance['client'].chat.completions.create as Mock).mockRejectedValue(error);
218
256
 
219
257
  // Act
220
258
  try {
@@ -242,7 +280,7 @@ describe('LobeAzureOpenAI', () => {
242
280
  // Arrange
243
281
  const genericError = new Error('Generic Error');
244
282
 
245
- (instance['client'].streamChatCompletions as Mock).mockRejectedValue(genericError);
283
+ (instance['client'].chat.completions.create as Mock).mockRejectedValue(genericError);
246
284
 
247
285
  // Act
248
286
  try {
@@ -279,7 +317,7 @@ describe('LobeAzureOpenAI', () => {
279
317
  }) as any;
280
318
  mockDebugStream.toReadableStream = () => mockDebugStream;
281
319
 
282
- (instance['client'].streamChatCompletions as Mock).mockResolvedValue({
320
+ (instance['client'].chat.completions.create as Mock).mockResolvedValue({
283
321
  tee: () => [mockProdStream, { toReadableStream: () => mockDebugStream }],
284
322
  });
285
323
 
@@ -1,26 +1,28 @@
1
- import {
2
- AzureKeyCredential,
3
- ChatRequestMessage,
4
- GetChatCompletionsOptions,
5
- OpenAIClient,
6
- } from '@azure/openai';
1
+ import OpenAI, { AzureOpenAI } from 'openai';
2
+ import type { Stream } from 'openai/streaming';
7
3
 
8
4
  import { LobeRuntimeAI } from '../BaseAI';
9
5
  import { AgentRuntimeErrorType } from '../error';
10
6
  import { ChatCompetitionOptions, ChatStreamPayload, ModelProvider } from '../types';
11
7
  import { AgentRuntimeError } from '../utils/createError';
12
8
  import { debugStream } from '../utils/debugStream';
9
+ import { transformResponseToStream } from '../utils/openaiCompatibleFactory';
13
10
  import { StreamingResponse } from '../utils/response';
14
- import { AzureOpenAIStream } from '../utils/streams';
11
+ import { OpenAIStream } from '../utils/streams';
15
12
 
16
13
  export class LobeAzureOpenAI implements LobeRuntimeAI {
17
- client: OpenAIClient;
14
+ client: AzureOpenAI;
18
15
 
19
16
  constructor(endpoint?: string, apikey?: string, apiVersion?: string) {
20
17
  if (!apikey || !endpoint)
21
18
  throw AgentRuntimeError.createError(AgentRuntimeErrorType.InvalidProviderAPIKey);
22
19
 
23
- this.client = new OpenAIClient(endpoint, new AzureKeyCredential(apikey), { apiVersion });
20
+ this.client = new AzureOpenAI({
21
+ apiKey: apikey,
22
+ apiVersion,
23
+ dangerouslyAllowBrowser: true,
24
+ endpoint,
25
+ });
24
26
 
25
27
  this.baseURL = endpoint;
26
28
  }
@@ -28,28 +30,33 @@ export class LobeAzureOpenAI implements LobeRuntimeAI {
28
30
  baseURL: string;
29
31
 
30
32
  async chat(payload: ChatStreamPayload, options?: ChatCompetitionOptions) {
31
- // ============ 1. preprocess messages ============ //
32
- const camelCasePayload = this.camelCaseKeys(payload);
33
- const { messages, model, maxTokens = 2048, ...params } = camelCasePayload;
34
-
35
- // ============ 2. send api ============ //
36
-
33
+ const { messages, model, ...params } = payload;
34
+ // o1 series models on Azure OpenAI does not support streaming currently
35
+ const enableStreaming = model.startsWith('o1') ? false : (params.stream ?? true);
37
36
  try {
38
- const response = await this.client.streamChatCompletions(
37
+ const response = await this.client.chat.completions.create({
38
+ messages: messages as OpenAI.ChatCompletionMessageParam[],
39
39
  model,
40
- messages as ChatRequestMessage[],
41
- { ...params, abortSignal: options?.signal, maxTokens } as GetChatCompletionsOptions,
42
- );
43
-
44
- const [debug, prod] = response.tee();
45
-
46
- if (process.env.DEBUG_AZURE_CHAT_COMPLETION === '1') {
47
- debugStream(debug).catch(console.error);
48
- }
49
-
50
- return StreamingResponse(AzureOpenAIStream(prod, options?.callback), {
51
- headers: options?.headers,
40
+ ...params,
41
+ max_completion_tokens: 2048,
42
+ stream: enableStreaming,
43
+ tool_choice: params.tools ? 'auto' : undefined,
52
44
  });
45
+ if (enableStreaming) {
46
+ const stream = response as Stream<OpenAI.ChatCompletionChunk>;
47
+ const [prod, debug] = stream.tee();
48
+ if (process.env.DEBUG_AZURE_CHAT_COMPLETION === '1') {
49
+ debugStream(debug.toReadableStream()).catch(console.error);
50
+ }
51
+ return StreamingResponse(OpenAIStream(prod, { callbacks: options?.callback }), {
52
+ headers: options?.headers,
53
+ });
54
+ } else {
55
+ const stream = transformResponseToStream(response as OpenAI.ChatCompletion);
56
+ return StreamingResponse(OpenAIStream(stream, { callbacks: options?.callback }), {
57
+ headers: options?.headers,
58
+ });
59
+ }
53
60
  } catch (e) {
54
61
  let error = e as { [key: string]: any; code: string; message: string };
55
62
 
@@ -1,5 +1,4 @@
1
1
  export * from './anthropic';
2
- export * from './azureOpenai';
3
2
  export * from './bedrock';
4
3
  export * from './google-ai';
5
4
  export * from './ollama';
@@ -223,7 +223,9 @@ describe('initAgentRuntimeWithUserPayload method', () => {
223
223
  });
224
224
 
225
225
  it('Azure AI Provider: without apikey', async () => {
226
- const jwtPayload: JWTPayload = {};
226
+ const jwtPayload: JWTPayload = {
227
+ azureApiVersion: 'test-azure-api-version',
228
+ };
227
229
  const runtime = await initAgentRuntimeWithUserPayload(ModelProvider.Azure, jwtPayload);
228
230
 
229
231
  expect(runtime['_runtime']).toBeInstanceOf(LobeAzureOpenAI);