@lobehub/chat 1.142.9 → 1.143.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (83) hide show
  1. package/.env.example +11 -0
  2. package/CHANGELOG.md +25 -0
  3. package/changelog/v1.json +9 -0
  4. package/docs/self-hosting/environment-variables/basic.mdx +49 -3
  5. package/docs/self-hosting/environment-variables/basic.zh-CN.mdx +49 -4
  6. package/package.json +2 -1
  7. package/packages/fetch-sse/package.json +29 -0
  8. package/packages/{utils/src/fetch → fetch-sse/src}/__tests__/fetchSSE.test.ts +4 -4
  9. package/packages/{utils/src/fetch → fetch-sse/src}/__tests__/parseError.test.ts +7 -4
  10. package/packages/{utils/src/fetch → fetch-sse/src}/fetchSSE.ts +2 -2
  11. package/packages/{utils/src/fetch → fetch-sse/src}/parseError.ts +3 -3
  12. package/packages/model-runtime/src/core/contextBuilders/anthropic.test.ts +17 -11
  13. package/packages/model-runtime/src/core/contextBuilders/anthropic.ts +1 -1
  14. package/packages/model-runtime/src/core/contextBuilders/google.test.ts +1 -1
  15. package/packages/model-runtime/src/core/contextBuilders/google.ts +3 -6
  16. package/packages/model-runtime/src/core/contextBuilders/openai.test.ts +93 -7
  17. package/packages/model-runtime/src/core/contextBuilders/openai.ts +45 -14
  18. package/packages/model-runtime/src/core/openaiCompatibleFactory/createImage.test.ts +1 -1
  19. package/packages/model-runtime/src/core/openaiCompatibleFactory/createImage.ts +1 -1
  20. package/packages/model-runtime/src/core/openaiCompatibleFactory/index.test.ts +3 -6
  21. package/packages/model-runtime/src/core/streams/openai/responsesStream.test.ts +1 -1
  22. package/packages/model-runtime/src/helpers/mergeChatMethodOptions.ts +2 -1
  23. package/packages/model-runtime/src/providers/aihubmix/index.test.ts +1 -1
  24. package/packages/model-runtime/src/providers/anthropic/generateObject.test.ts +1 -1
  25. package/packages/model-runtime/src/providers/anthropic/index.test.ts +1 -1
  26. package/packages/model-runtime/src/providers/baichuan/index.test.ts +1 -1
  27. package/packages/model-runtime/src/providers/bedrock/index.test.ts +1 -1
  28. package/packages/model-runtime/src/providers/bfl/createImage.test.ts +4 -4
  29. package/packages/model-runtime/src/providers/bfl/createImage.ts +1 -1
  30. package/packages/model-runtime/src/providers/cloudflare/index.test.ts +1 -1
  31. package/packages/model-runtime/src/providers/cohere/index.test.ts +1 -1
  32. package/packages/model-runtime/src/providers/deepseek/index.test.ts +86 -0
  33. package/packages/model-runtime/src/providers/deepseek/index.ts +24 -0
  34. package/packages/model-runtime/src/providers/google/createImage.test.ts +2 -2
  35. package/packages/model-runtime/src/providers/google/createImage.ts +1 -1
  36. package/packages/model-runtime/src/providers/google/generateObject.test.ts +1 -1
  37. package/packages/model-runtime/src/providers/google/index.test.ts +1 -4
  38. package/packages/model-runtime/src/providers/groq/index.test.ts +1 -1
  39. package/packages/model-runtime/src/providers/hunyuan/index.test.ts +1 -1
  40. package/packages/model-runtime/src/providers/minimax/createImage.test.ts +1 -1
  41. package/packages/model-runtime/src/providers/mistral/index.test.ts +1 -1
  42. package/packages/model-runtime/src/providers/moonshot/index.test.ts +1 -1
  43. package/packages/model-runtime/src/providers/novita/index.test.ts +1 -1
  44. package/packages/model-runtime/src/providers/ollama/index.test.ts +43 -32
  45. package/packages/model-runtime/src/providers/ollama/index.ts +31 -7
  46. package/packages/model-runtime/src/providers/openrouter/index.test.ts +1 -1
  47. package/packages/model-runtime/src/providers/perplexity/index.test.ts +1 -1
  48. package/packages/model-runtime/src/providers/ppio/index.test.ts +1 -1
  49. package/packages/model-runtime/src/providers/qwen/createImage.test.ts +1 -1
  50. package/packages/model-runtime/src/providers/search1api/index.test.ts +1 -1
  51. package/packages/model-runtime/src/providers/siliconcloud/createImage.ts +1 -1
  52. package/packages/model-runtime/src/providers/taichu/index.test.ts +1 -1
  53. package/packages/model-runtime/src/providers/wenxin/index.test.ts +1 -1
  54. package/packages/model-runtime/src/providers/zhipu/index.test.ts +1 -1
  55. package/packages/model-runtime/src/types/chat.ts +4 -0
  56. package/packages/model-runtime/src/utils/errorResponse.test.ts +1 -1
  57. package/packages/ssrf-safe-fetch/index.browser.ts +14 -0
  58. package/packages/ssrf-safe-fetch/package.json +8 -1
  59. package/packages/utils/src/imageToBase64.ts +17 -10
  60. package/packages/utils/src/index.ts +1 -1
  61. package/src/features/AgentSetting/AgentTTS/SelectWithTTSPreview.tsx +1 -1
  62. package/src/features/AgentSetting/store/action.ts +1 -1
  63. package/src/features/ChatInput/ActionBar/STT/browser.tsx +1 -1
  64. package/src/features/ChatInput/ActionBar/STT/openai.tsx +1 -1
  65. package/src/features/Conversation/components/Extras/TTS/InitPlayer.tsx +1 -1
  66. package/src/libs/mcp/__tests__/__snapshots__/index.test.ts.snap +0 -1
  67. package/src/server/globalConfig/genServerAiProviderConfig.test.ts +5 -5
  68. package/src/server/globalConfig/genServerAiProviderConfig.ts +1 -1
  69. package/src/services/chat/chat.test.ts +5 -5
  70. package/src/services/chat/clientModelRuntime.test.ts +1 -1
  71. package/src/services/chat/index.ts +6 -6
  72. package/src/services/chat/types.ts +1 -1
  73. package/src/services/models.ts +2 -1
  74. package/{packages/utils/src → src/utils}/electron/desktopRemoteRPCFetch.ts +1 -1
  75. package/{packages/utils/src → src/utils/server}/parseModels.ts +1 -2
  76. package/vitest.config.mts +2 -0
  77. package/packages/model-runtime/src/utils/imageToBase64.test.ts +0 -91
  78. package/packages/model-runtime/src/utils/imageToBase64.ts +0 -62
  79. /package/packages/{utils/src/fetch → fetch-sse/src}/headers.ts +0 -0
  80. /package/packages/{utils/src/fetch → fetch-sse/src}/index.ts +0 -0
  81. /package/packages/{utils/src/fetch → fetch-sse/src}/request.ts +0 -0
  82. /package/{packages/utils/src → src/utils/server}/__snapshots__/parseModels.test.ts.snap +0 -0
  83. /package/{packages/utils/src → src/utils/server}/parseModels.test.ts +0 -0
package/.env.example CHANGED
@@ -13,6 +13,17 @@
13
13
  # Default is '0' (enabled)
14
14
  # ENABLED_CSP=1
15
15
 
16
+ # SSRF Protection Settings
17
+ # Set to '1' to allow connections to private IP addresses (disable SSRF protection)
18
+ # WARNING: Only enable this in trusted environments
19
+ # Default is '0' (SSRF protection enabled)
20
+ # SSRF_ALLOW_PRIVATE_IP_ADDRESS=0
21
+
22
+ # Whitelist of allowed private IP addresses (comma-separated)
23
+ # Only takes effect when SSRF_ALLOW_PRIVATE_IP_ADDRESS is '0'
24
+ # Example: Allow specific internal servers while keeping SSRF protection
25
+ # SSRF_ALLOW_IP_ADDRESS_LIST=192.168.1.100,10.0.0.50
26
+
16
27
  ########################################
17
28
  ########## AI Provider Service #########
18
29
  ########################################
package/CHANGELOG.md CHANGED
@@ -2,6 +2,31 @@
2
2
 
3
3
  # Changelog
4
4
 
5
+ ## [Version 1.143.0](https://github.com/lobehub/lobe-chat/compare/v1.142.9...v1.143.0)
6
+
7
+ <sup>Released on **2025-12-01**</sup>
8
+
9
+ #### ✨ Features
10
+
11
+ - **misc**: Support DeepSeek Interleaved thinking.
12
+
13
+ <br/>
14
+
15
+ <details>
16
+ <summary><kbd>Improvements and Fixes</kbd></summary>
17
+
18
+ #### What's improved
19
+
20
+ - **misc**: Support DeepSeek Interleaved thinking, closes [#10478](https://github.com/lobehub/lobe-chat/issues/10478) [#10219](https://github.com/lobehub/lobe-chat/issues/10219) [#10152](https://github.com/lobehub/lobe-chat/issues/10152) ([aee5d71](https://github.com/lobehub/lobe-chat/commit/aee5d71))
21
+
22
+ </details>
23
+
24
+ <div align="right">
25
+
26
+ [![](https://img.shields.io/badge/-BACK_TO_TOP-151515?style=flat-square)](#readme-top)
27
+
28
+ </div>
29
+
5
30
  ### [Version 1.142.9](https://github.com/lobehub/lobe-chat/compare/v1.142.8...v1.142.9)
6
31
 
7
32
  <sup>Released on **2025-11-02**</sup>
package/changelog/v1.json CHANGED
@@ -1,4 +1,13 @@
1
1
  [
2
+ {
3
+ "children": {
4
+ "features": [
5
+ "Support DeepSeek Interleaved thinking."
6
+ ]
7
+ },
8
+ "date": "2025-12-01",
9
+ "version": "1.143.0"
10
+ },
2
11
  {
3
12
  "children": {
4
13
  "fixes": [
@@ -127,16 +127,62 @@ For specific content, please refer to the [Feature Flags](/docs/self-hosting/adv
127
127
  ### `SSRF_ALLOW_PRIVATE_IP_ADDRESS`
128
128
 
129
129
  - Type: Optional
130
- - Description: Allow to connect private IP address. In a trusted environment, it can be set to true to turn off SSRF protection.
130
+ - Description: Controls whether to allow connections to private IP addresses. Set to `1` to disable SSRF protection and allow all private IP addresses. In a trusted environment (e.g., internal network), this can be enabled to allow access to internal resources.
131
131
  - Default: `0`
132
132
  - Example: `1` or `0`
133
133
 
134
+ <Callout type="warning">
135
+ **Security Notice**: Enabling this option will disable SSRF protection and allow connections to private
136
+ IP addresses (127.0.0.0/8, 10.0.0.0/8, 172.16.0.0/12, 192.168.0.0/16, etc.). Only enable this in
137
+ trusted environments where you need to access internal network resources.
138
+ </Callout>
139
+
140
+ **Use Cases**:
141
+
142
+ LobeChat performs SSRF security checks in the following scenarios:
143
+
144
+ 1. **Image/Video URL to Base64 Conversion**: When processing media messages (e.g., vision models, multimodal models), LobeChat converts image and video URLs to base64 format. This check prevents malicious users from accessing internal network resources.
145
+
146
+ Examples:
147
+
148
+ - Image: A user sends an image message with URL `http://192.168.1.100/admin/secrets.png`
149
+ - Video: A user sends a video message with URL `http://10.0.0.50/internal/meeting.mp4`
150
+
151
+ Without SSRF protection, these requests could expose internal network resources.
152
+
153
+ 2. **Web Crawler**: When using web crawling features to fetch external content.
154
+
155
+ 3. **Proxy Requests**: When proxying external API requests.
156
+
157
+ **Configuration Examples**:
158
+
159
+ ```bash
160
+ # Scenario 1: Public deployment (recommended)
161
+ # Block all private IP addresses for security
162
+ SSRF_ALLOW_PRIVATE_IP_ADDRESS=0
163
+
164
+ # Scenario 2: Internal deployment
165
+ # Allow all private IP addresses to access internal image servers
166
+ SSRF_ALLOW_PRIVATE_IP_ADDRESS=1
167
+
168
+ # Scenario 3: Hybrid deployment (most common)
169
+ # Block private IPs by default, but allow specific trusted internal servers
170
+ SSRF_ALLOW_PRIVATE_IP_ADDRESS=0
171
+ SSRF_ALLOW_IP_ADDRESS_LIST=192.168.1.100,10.0.0.50
172
+ ```
173
+
134
174
  ### `SSRF_ALLOW_IP_ADDRESS_LIST`
135
175
 
136
176
  - Type: Optional
137
- - Description: Allow private IP address list, multiple IP addresses are separated by commas. Only when `SSRF_ALLOW_PRIVATE_IP_ADDRESS` is `0`, it takes effect.
177
+ - Description: Whitelist of allowed IP addresses, separated by commas. Only takes effect when `SSRF_ALLOW_PRIVATE_IP_ADDRESS` is `0`. Use this to allow specific internal IP addresses while keeping SSRF protection enabled for other private IPs.
138
178
  - Default: -
139
- - Example: `198.18.1.62,224.0.0.3`
179
+ - Example: `192.168.1.100,10.0.0.50,172.16.0.10`
180
+
181
+ **Common Use Cases**:
182
+
183
+ - Allow access to internal image storage server: `192.168.1.100`
184
+ - Allow access to internal API gateway: `10.0.0.50`
185
+ - Allow access to internal documentation server: `172.16.0.10`
140
186
 
141
187
  ### `ENABLE_AUTH_PROTECTION`
142
188
 
@@ -123,16 +123,61 @@ LobeChat 在部署时提供了一些额外的配置项,你可以使用环境
123
123
  ### `SSRF_ALLOW_PRIVATE_IP_ADDRESS`
124
124
 
125
125
  - 类型:可选
126
- - 描述:是否允许连接私有 IP 地址。在可信环境中可以设置为 true 来关闭 SSRF 防护。
126
+ - 描述:控制是否允许连接私有 IP 地址。设置为 `1` 时将关闭 SSRF 防护并允许所有私有 IP 地址。在可信环境(如内网部署)中,可以启用此选项以访问内部资源。
127
127
  - 默认值:`0`
128
- - 示例:`1` or `0`
128
+ - 示例:`1` `0`
129
+
130
+ <Callout type="warning">
131
+ **安全提示**:启用此选项将关闭 SSRF 防护,允许连接私有 IP 地址段(127.0.0.0/8、10.0.0.0/8、172.16.0.0/12、192.168.0.0/16
132
+ 等)。仅在需要访问内网资源的可信环境中启用。
133
+ </Callout>
134
+
135
+ **应用场景**:
136
+
137
+ LobeChat 会在以下场景执行 SSRF 安全检查:
138
+
139
+ 1. **图片 / 视频 URL 转 Base64**:在处理媒体消息时(例如视觉模型、多模态模型),LobeChat 会将图片和视频 URL 转换为 base64 格式。此检查可防止恶意用户通过媒体 URL 访问内网资源。
140
+
141
+ 举例:
142
+
143
+ - 图片:用户发送图片消息,URL 为 `http://192.168.1.100/admin/secrets.png`
144
+ - 视频:用户发送视频消息,URL 为 `http://10.0.0.50/internal/meeting.mp4`
145
+
146
+ 若无 SSRF 防护,这些请求可能导致内网资源泄露。
147
+
148
+ 2. **网页爬取**:使用网页爬取功能获取外部内容时。
149
+
150
+ 3. **代理请求**:代理外部 API 请求时。
151
+
152
+ **配置示例**:
153
+
154
+ ```bash
155
+ # 场景 1:公网部署(推荐)
156
+ # 阻止所有私有 IP 访问,保证安全
157
+ SSRF_ALLOW_PRIVATE_IP_ADDRESS=0
158
+
159
+ # 场景 2:内网部署
160
+ # 允许所有私有 IP,可访问内网图片服务器等资源
161
+ SSRF_ALLOW_PRIVATE_IP_ADDRESS=1
162
+
163
+ # 场景 3:混合部署(最常见)
164
+ # 默认阻止私有 IP,但允许特定可信的内网服务器
165
+ SSRF_ALLOW_PRIVATE_IP_ADDRESS=0
166
+ SSRF_ALLOW_IP_ADDRESS_LIST=192.168.1.100,10.0.0.50
167
+ ```
129
168
 
130
169
  ### `SSRF_ALLOW_IP_ADDRESS_LIST`
131
170
 
132
171
  - 类型:可选
133
- - 说明:允许的私有 IP 地址列表,多个 IP 地址用逗号分隔。仅在 `SSRF_ALLOW_PRIVATE_IP_ADDRESS` 为 `0` 时生效。
172
+ - 描述:允许访问的 IP 地址白名单,多个 IP 地址用逗号分隔。仅在 `SSRF_ALLOW_PRIVATE_IP_ADDRESS` 为 `0` 时生效。使用此选项可以在保持 SSRF 防护的同时,允许访问特定的内网 IP 地址。
134
173
  - 默认值:-
135
- - 示例:`198.18.1.62,224.0.0.3`
174
+ - 示例:`192.168.1.100,10.0.0.50,172.16.0.10`
175
+
176
+ **常见使用场景**:
177
+
178
+ - 允许访问内网图片存储服务器:`192.168.1.100`
179
+ - 允许访问内网 API 网关:`10.0.0.50`
180
+ - 允许访问内网文档服务器:`172.16.0.10`
136
181
 
137
182
  ### `ENABLE_AUTH_PROTECTION`
138
183
 
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@lobehub/chat",
3
- "version": "1.142.9",
3
+ "version": "1.143.0",
4
4
  "description": "Lobe Chat - an open-source, high-performance chatbot framework that supports speech synthesis, multimodal, and extensible Function Call plugin system. Supports one-click free deployment of your private ChatGPT/LLM web application.",
5
5
  "keywords": [
6
6
  "framework",
@@ -156,6 +156,7 @@
156
156
  "@lobechat/database": "workspace:*",
157
157
  "@lobechat/electron-client-ipc": "workspace:*",
158
158
  "@lobechat/electron-server-ipc": "workspace:*",
159
+ "@lobechat/fetch-sse": "workspace:*",
159
160
  "@lobechat/file-loaders": "workspace:*",
160
161
  "@lobechat/model-runtime": "workspace:*",
161
162
  "@lobechat/observability-otel": "workspace:*",
@@ -0,0 +1,29 @@
1
+ {
2
+ "name": "@lobechat/fetch-sse",
3
+ "version": "1.0.0",
4
+ "private": true,
5
+ "description": "SSE fetch utilities with streaming support",
6
+ "exports": {
7
+ ".": {
8
+ "types": "./src/index.ts",
9
+ "default": "./src/index.ts"
10
+ },
11
+ "./parseError": {
12
+ "types": "./src/parseError.ts",
13
+ "default": "./src/parseError.ts"
14
+ }
15
+ },
16
+ "main": "./src/index.ts",
17
+ "types": "./src/index.ts",
18
+ "scripts": {
19
+ "test": "vitest",
20
+ "test:coverage": "vitest --coverage --silent='passed-only'"
21
+ },
22
+ "dependencies": {
23
+ "@lobechat/const": "workspace:*",
24
+ "@lobechat/model-runtime": "workspace:*",
25
+ "@lobechat/types": "workspace:*",
26
+ "@lobechat/utils": "workspace:*",
27
+ "i18next": "^24.2.1"
28
+ }
29
+ }
@@ -1,10 +1,10 @@
1
1
  import { MESSAGE_CANCEL_FLAT } from '@lobechat/const';
2
2
  import { ChatMessageError } from '@lobechat/types';
3
+ import { FetchEventSourceInit } from '@lobechat/utils/client/fetchEventSource/index';
4
+ import { fetchEventSource } from '@lobechat/utils/client/fetchEventSource/index';
5
+ import { sleep } from '@lobechat/utils/sleep';
3
6
  import { afterEach, describe, expect, it, vi } from 'vitest';
4
7
 
5
- import { FetchEventSourceInit } from '../../client/fetchEventSource';
6
- import { fetchEventSource } from '../../client/fetchEventSource';
7
- import { sleep } from '../../sleep';
8
8
  import { fetchSSE } from '../fetchSSE';
9
9
 
10
10
  // 模拟 i18next
@@ -12,7 +12,7 @@ vi.mock('i18next', () => ({
12
12
  t: vi.fn((key) => `translated_${key}`),
13
13
  }));
14
14
 
15
- vi.mock('../../client/fetchEventSource', () => ({
15
+ vi.mock('@lobechat/utils/client/fetchEventSource/index', () => ({
16
16
  fetchEventSource: vi.fn(),
17
17
  }));
18
18
 
@@ -1,14 +1,14 @@
1
1
  import { ErrorResponse } from '@lobechat/types';
2
- import { afterEach, describe, expect, it, vi } from 'vitest';
2
+ import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
3
3
 
4
4
  import { getMessageError } from '../parseError';
5
5
 
6
- // 模拟 i18next
6
+ // Mock i18next
7
7
  vi.mock('i18next', () => ({
8
8
  t: vi.fn((key) => `translated_${key}`),
9
9
  }));
10
10
 
11
- // 模拟 Response
11
+ // Mock Response
12
12
  const createMockResponse = (body: any, ok: boolean, status: number = 200) => ({
13
13
  ok,
14
14
  status,
@@ -38,11 +38,14 @@ const createMockResponse = (body: any, ok: boolean, status: number = 200) => ({
38
38
  },
39
39
  });
40
40
 
41
- // 在每次测试后清理所有模拟
42
41
  afterEach(() => {
43
42
  vi.restoreAllMocks();
44
43
  });
45
44
 
45
+ beforeEach(() => {
46
+ vi.clearAllMocks();
47
+ });
48
+
46
49
  describe('getMessageError', () => {
47
50
  it('should handle business error correctly', async () => {
48
51
  const mockErrorResponse: ErrorResponse = {
@@ -12,9 +12,9 @@ import {
12
12
  ResponseAnimation,
13
13
  ResponseAnimationStyle,
14
14
  } from '@lobechat/types';
15
+ import { fetchEventSource } from '@lobechat/utils/client/fetchEventSource/index';
16
+ import { nanoid } from '@lobechat/utils/uuid';
15
17
 
16
- import { fetchEventSource } from '../client/fetchEventSource';
17
- import { nanoid } from '../uuid';
18
18
  import { getMessageError } from './parseError';
19
19
 
20
20
  type SSEFinishType = 'done' | 'error' | 'abort';
@@ -1,7 +1,7 @@
1
1
  import { ChatMessageError, ErrorResponse, ErrorType } from '@lobechat/types';
2
2
  import { t } from 'i18next';
3
3
 
4
- export const getMessageError = async (response: Response) => {
4
+ export const getMessageError = async (response: Response): Promise<ChatMessageError> => {
5
5
  let chatMessageError: ChatMessageError;
6
6
 
7
7
  // try to get the biz error
@@ -9,13 +9,13 @@ export const getMessageError = async (response: Response) => {
9
9
  const data = (await response.json()) as ErrorResponse;
10
10
  chatMessageError = {
11
11
  body: data.body,
12
- message: t(`response.${data.errorType}` as any, { ns: 'error' }),
12
+ message: t(`response.${data.errorType}`, { ns: 'error' }),
13
13
  type: data.errorType,
14
14
  };
15
15
  } catch {
16
16
  // if not return, then it's a common error
17
17
  chatMessageError = {
18
- message: t(`response.${response.status}` as any, { ns: 'error' }),
18
+ message: t(`response.${response.status}`, { ns: 'error' }),
19
19
  type: response.status as ErrorType,
20
20
  };
21
21
  }
@@ -1,8 +1,8 @@
1
+ import { imageUrlToBase64 } from '@lobechat/utils';
1
2
  import { OpenAI } from 'openai';
2
- import { describe, expect, it, vi } from 'vitest';
3
+ import { beforeEach, describe, expect, it, vi } from 'vitest';
3
4
 
4
5
  import { OpenAIChatMessage, UserMessageContentPart } from '../../types/chat';
5
- import { imageUrlToBase64 } from '../../utils/imageToBase64';
6
6
  import { parseDataUri } from '../../utils/uriParser';
7
7
  import {
8
8
  buildAnthropicBlock,
@@ -12,16 +12,22 @@ import {
12
12
  } from './anthropic';
13
13
 
14
14
  // Mock the parseDataUri function since it's an implementation detail
15
- vi.mock('../../utils/uriParser', () => ({
16
- parseDataUri: vi.fn().mockReturnValue({
17
- mimeType: 'image/jpeg',
18
- base64: 'base64EncodedString',
19
- type: 'base64',
20
- }),
15
+ vi.mock('../../utils/uriParser');
16
+ vi.mock('@lobechat/utils', () => ({
17
+ imageUrlToBase64: vi.fn(),
21
18
  }));
22
- vi.mock('../../utils/imageToBase64');
23
19
 
24
20
  describe('anthropicHelpers', () => {
21
+ beforeEach(() => {
22
+ vi.resetAllMocks();
23
+ // Set default mock implementation for parseDataUri
24
+ vi.mocked(parseDataUri).mockReturnValue({
25
+ mimeType: 'image/jpeg',
26
+ base64: 'base64EncodedString',
27
+ type: 'base64',
28
+ });
29
+ });
30
+
25
31
  describe('buildAnthropicBlock', () => {
26
32
  it('should return the content as is for text type', async () => {
27
33
  const content: UserMessageContentPart = { type: 'text', text: 'Hello!' };
@@ -52,7 +58,7 @@ describe('anthropicHelpers', () => {
52
58
  base64: null,
53
59
  type: 'url',
54
60
  });
55
- vi.mocked(imageUrlToBase64).mockResolvedValue({
61
+ vi.mocked(imageUrlToBase64).mockResolvedValueOnce({
56
62
  base64: 'convertedBase64String',
57
63
  mimeType: 'image/jpg',
58
64
  });
@@ -82,7 +88,7 @@ describe('anthropicHelpers', () => {
82
88
  base64: null,
83
89
  type: 'url',
84
90
  });
85
- vi.mocked(imageUrlToBase64).mockResolvedValue({
91
+ vi.mocked(imageUrlToBase64).mockResolvedValueOnce({
86
92
  base64: 'convertedBase64String',
87
93
  mimeType: 'image/png',
88
94
  });
@@ -1,8 +1,8 @@
1
1
  import Anthropic from '@anthropic-ai/sdk';
2
+ import { imageUrlToBase64 } from '@lobechat/utils';
2
3
  import OpenAI from 'openai';
3
4
 
4
5
  import { OpenAIChatMessage, UserMessageContentPart } from '../../types';
5
- import { imageUrlToBase64 } from '../../utils/imageToBase64';
6
6
  import { parseDataUri } from '../../utils/uriParser';
7
7
 
8
8
  export const buildAnthropicBlock = async (
@@ -1,9 +1,9 @@
1
1
  // @vitest-environment node
2
2
  import { Type as SchemaType } from '@google/genai';
3
+ import * as imageToBase64Module from '@lobechat/utils';
3
4
  import { describe, expect, it, vi } from 'vitest';
4
5
 
5
6
  import { ChatCompletionTool, OpenAIChatMessage, UserMessageContentPart } from '../../types';
6
- import * as imageToBase64Module from '../../utils/imageToBase64';
7
7
  import { parseDataUri } from '../../utils/uriParser';
8
8
  import {
9
9
  buildGoogleMessage,
@@ -5,9 +5,9 @@ import {
5
5
  Part,
6
6
  Type as SchemaType,
7
7
  } from '@google/genai';
8
+ import { imageUrlToBase64 } from '@lobechat/utils';
8
9
 
9
10
  import { ChatCompletionTool, OpenAIChatMessage, UserMessageContentPart } from '../../types';
10
- import { imageUrlToBase64 } from '../../utils/imageToBase64';
11
11
  import { safeParseJSON } from '../../utils/safeParseJSON';
12
12
  import { parseDataUri } from '../../utils/uriParser';
13
13
 
@@ -64,12 +64,9 @@ export const buildGooglePart = async (
64
64
  }
65
65
 
66
66
  if (type === 'url') {
67
- // For video URLs, we need to fetch and convert to base64
67
+ // Use imageUrlToBase64 for SSRF protection (works for any binary data including videos)
68
68
  // Note: This might need size/duration limits for practical use
69
- const response = await fetch(content.video_url.url);
70
- const arrayBuffer = await response.arrayBuffer();
71
- const base64 = Buffer.from(arrayBuffer).toString('base64');
72
- const mimeType = response.headers.get('content-type') || 'video/mp4';
69
+ const { base64, mimeType } = await imageUrlToBase64(content.video_url.url);
73
70
 
74
71
  return {
75
72
  inlineData: { data: base64, mimeType },
@@ -1,7 +1,8 @@
1
+ import { imageUrlToBase64 } from '@lobechat/utils';
1
2
  import OpenAI from 'openai';
2
3
  import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
3
4
 
4
- import { imageUrlToBase64 } from '../../utils/imageToBase64';
5
+ import { OpenAIChatMessage } from '../../types';
5
6
  import { parseDataUri } from '../../utils/uriParser';
6
7
  import {
7
8
  convertImageUrlToFile,
@@ -11,7 +12,9 @@ import {
11
12
  } from './openai';
12
13
 
13
14
  // 模拟依赖
14
- vi.mock('../../utils/imageToBase64');
15
+ vi.mock('@lobechat/utils', () => ({
16
+ imageUrlToBase64: vi.fn(),
17
+ }));
15
18
  vi.mock('../../utils/uriParser');
16
19
 
17
20
  describe('convertMessageContent', () => {
@@ -147,11 +150,71 @@ describe('convertOpenAIMessages', () => {
147
150
 
148
151
  expect(Promise.all).toHaveBeenCalledTimes(2); // 一次用于消息数组,一次用于内容数组
149
152
  });
153
+
154
+ it('should filter out reasoning field from messages', async () => {
155
+ const messages = [
156
+ {
157
+ role: 'assistant',
158
+ content: 'Hello',
159
+ reasoning: { content: 'some reasoning', duration: 100 },
160
+ },
161
+ { role: 'user', content: 'Hi' },
162
+ ] as any;
163
+
164
+ const result = await convertOpenAIMessages(messages);
165
+
166
+ expect(result).toEqual([
167
+ { role: 'assistant', content: 'Hello' },
168
+ { role: 'user', content: 'Hi' },
169
+ ]);
170
+ // Ensure reasoning field is removed
171
+ expect((result[0] as any).reasoning).toBeUndefined();
172
+ });
173
+
174
+ it('should preserve reasoning_content field from messages (for DeepSeek compatibility)', async () => {
175
+ const messages = [
176
+ {
177
+ role: 'assistant',
178
+ content: 'Hello',
179
+ reasoning_content: 'some reasoning content',
180
+ },
181
+ { role: 'user', content: 'Hi' },
182
+ ] as any;
183
+
184
+ const result = await convertOpenAIMessages(messages);
185
+
186
+ expect(result).toEqual([
187
+ { role: 'assistant', content: 'Hello', reasoning_content: 'some reasoning content' },
188
+ { role: 'user', content: 'Hi' },
189
+ ]);
190
+ // Ensure reasoning_content field is preserved
191
+ expect((result[0] as any).reasoning_content).toBe('some reasoning content');
192
+ });
193
+
194
+ it('should filter out reasoning but preserve reasoning_content field', async () => {
195
+ const messages = [
196
+ {
197
+ role: 'assistant',
198
+ content: 'Hello',
199
+ reasoning: { content: 'some reasoning', duration: 100 },
200
+ reasoning_content: 'some reasoning content',
201
+ },
202
+ ] as any;
203
+
204
+ const result = await convertOpenAIMessages(messages);
205
+
206
+ expect(result).toEqual([
207
+ { role: 'assistant', content: 'Hello', reasoning_content: 'some reasoning content' },
208
+ ]);
209
+ // Ensure reasoning object is removed but reasoning_content is preserved
210
+ expect((result[0] as any).reasoning).toBeUndefined();
211
+ expect((result[0] as any).reasoning_content).toBe('some reasoning content');
212
+ });
150
213
  });
151
214
 
152
215
  describe('convertOpenAIResponseInputs', () => {
153
216
  it('应该正确转换普通文本消息', async () => {
154
- const messages: OpenAI.ChatCompletionMessageParam[] = [
217
+ const messages: OpenAIChatMessage[] = [
155
218
  { role: 'user', content: 'Hello' },
156
219
  { role: 'assistant', content: 'Hi there!' },
157
220
  ];
@@ -165,7 +228,7 @@ describe('convertOpenAIResponseInputs', () => {
165
228
  });
166
229
 
167
230
  it('应该正确转换带有工具调用的消息', async () => {
168
- const messages: OpenAI.ChatCompletionMessageParam[] = [
231
+ const messages: OpenAIChatMessage[] = [
169
232
  {
170
233
  role: 'assistant',
171
234
  content: '',
@@ -195,7 +258,7 @@ describe('convertOpenAIResponseInputs', () => {
195
258
  });
196
259
 
197
260
  it('应该正确转换工具响应消息', async () => {
198
- const messages: OpenAI.ChatCompletionMessageParam[] = [
261
+ const messages: OpenAIChatMessage[] = [
199
262
  {
200
263
  role: 'tool',
201
264
  content: 'Function result',
@@ -215,7 +278,7 @@ describe('convertOpenAIResponseInputs', () => {
215
278
  });
216
279
 
217
280
  it('应该正确转换包含图片的消息', async () => {
218
- const messages: OpenAI.ChatCompletionMessageParam[] = [
281
+ const messages: OpenAIChatMessage[] = [
219
282
  {
220
283
  role: 'user',
221
284
  content: [
@@ -247,7 +310,7 @@ describe('convertOpenAIResponseInputs', () => {
247
310
  });
248
311
 
249
312
  it('应该正确处理混合类型的消息序列', async () => {
250
- const messages: OpenAI.ChatCompletionMessageParam[] = [
313
+ const messages: OpenAIChatMessage[] = [
251
314
  { role: 'user', content: 'I need help with a function' },
252
315
  {
253
316
  role: 'assistant',
@@ -287,6 +350,29 @@ describe('convertOpenAIResponseInputs', () => {
287
350
  },
288
351
  ]);
289
352
  });
353
+
354
+ it('should extract reasoning.content into a separate reasoning item', async () => {
355
+ const messages: OpenAIChatMessage[] = [
356
+ { content: 'system prompts', role: 'system' },
357
+ { content: '你好', role: 'user' },
358
+ {
359
+ content: 'hello',
360
+ role: 'assistant',
361
+ reasoning: { content: 'reasoning content', duration: 2706 },
362
+ },
363
+ { content: '杭州天气如何', role: 'user' },
364
+ ];
365
+
366
+ const result = await convertOpenAIResponseInputs(messages);
367
+
368
+ expect(result).toEqual([
369
+ { content: 'system prompts', role: 'developer' },
370
+ { content: '你好', role: 'user' },
371
+ { summary: [{ text: 'reasoning content', type: 'summary_text' }], type: 'reasoning' },
372
+ { content: 'hello', role: 'assistant' },
373
+ { content: '杭州天气如何', role: 'user' },
374
+ ]);
375
+ });
290
376
  });
291
377
 
292
378
  describe('convertImageUrlToFile', () => {