@lobehub/chat 1.120.7 → 1.121.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.cursor/rules/project-structure.mdc +54 -42
- package/.cursor/rules/testing-guide/testing-guide.mdc +28 -17
- package/.env.development +122 -0
- package/.vscode/settings.json +0 -1
- package/CHANGELOG.md +51 -0
- package/CLAUDE.md +3 -4
- package/apps/desktop/package.json +1 -0
- package/apps/desktop/src/main/modules/networkProxy/dispatcher.ts +24 -2
- package/changelog/v1.json +18 -0
- package/docker-compose/local/init_data.json +981 -1024
- package/docker-compose.development.yml +40 -0
- package/docs/development/basic/work-with-server-side-database.mdx +77 -0
- package/docs/development/basic/work-with-server-side-database.zh-CN.mdx +77 -0
- package/docs/self-hosting/advanced/s3/cloudflare-r2.mdx +1 -1
- package/docs/self-hosting/advanced/s3/cloudflare-r2.zh-CN.mdx +2 -2
- package/locales/zh-CN/common.json +7 -0
- package/package.json +2 -1
- package/packages/database/src/repositories/aiInfra/index.ts +3 -1
- package/packages/model-runtime/src/RouterRuntime/createRuntime.test.ts +6 -91
- package/packages/model-runtime/src/RouterRuntime/createRuntime.ts +6 -28
- package/packages/model-runtime/src/openrouter/index.ts +15 -12
- package/packages/model-runtime/src/openrouter/type.ts +10 -0
- package/packages/model-runtime/src/utils/modelParse.test.ts +66 -0
- package/packages/model-runtime/src/utils/modelParse.ts +15 -3
- package/packages/model-runtime/src/utils/postProcessModelList.ts +1 -0
- package/packages/utils/src/detectChinese.test.ts +37 -0
- package/packages/utils/src/detectChinese.ts +12 -0
- package/packages/utils/src/index.ts +1 -0
- package/src/app/[variants]/(main)/chat/(workspace)/@conversation/features/ChatInput/Desktop/TextArea.test.tsx +33 -18
- package/src/app/[variants]/(main)/chat/(workspace)/@topic/features/TopicListContent/ByTimeMode/index.tsx +3 -3
- package/src/app/[variants]/(main)/image/features/PromptInput/index.tsx +12 -0
- package/src/features/ChatInput/useSend.ts +14 -2
- package/src/hooks/useGeminiChineseWarning.tsx +91 -0
- package/src/locales/default/common.ts +7 -0
- package/src/store/global/initialState.ts +2 -0
@@ -0,0 +1,40 @@
|
|
1
|
+
name: lobe-chat-development
|
2
|
+
services:
|
3
|
+
network-service:
|
4
|
+
image: alpine
|
5
|
+
container_name: lobe-network
|
6
|
+
restart: always
|
7
|
+
ports:
|
8
|
+
- '${MINIO_PORT}:${MINIO_PORT}' # MinIO API
|
9
|
+
- '9001:9001' # MinIO Console
|
10
|
+
- '${CASDOOR_PORT}:${CASDOOR_PORT}' # Casdoor
|
11
|
+
command: tail -f /dev/null
|
12
|
+
networks:
|
13
|
+
- lobe-network
|
14
|
+
|
15
|
+
postgresql:
|
16
|
+
extends:
|
17
|
+
file: docker-compose/local/docker-compose.yml
|
18
|
+
service: postgresql
|
19
|
+
minio:
|
20
|
+
extends:
|
21
|
+
file: docker-compose/local/docker-compose.yml
|
22
|
+
service: minio
|
23
|
+
casdoor:
|
24
|
+
extends:
|
25
|
+
file: docker-compose/local/docker-compose.yml
|
26
|
+
service: casdoor
|
27
|
+
searxng:
|
28
|
+
extends:
|
29
|
+
file: docker-compose/local/docker-compose.yml
|
30
|
+
service: searxng
|
31
|
+
|
32
|
+
volumes:
|
33
|
+
data:
|
34
|
+
driver: local
|
35
|
+
s3_data:
|
36
|
+
driver: local
|
37
|
+
|
38
|
+
networks:
|
39
|
+
lobe-network:
|
40
|
+
driver: bridge
|
@@ -0,0 +1,77 @@
|
|
1
|
+
# Work with Server-Side Database
|
2
|
+
|
3
|
+
LobeChat provides a battery-included experience with its client-side database.
|
4
|
+
While some features you really care about is only available at a server-side development.
|
5
|
+
|
6
|
+
In order to work with the aspect of server-side database,
|
7
|
+
you can setup all the prerequisites by following the [Deploying Server-Side Database](https://lobehub.com/docs/self-hosting/server-database) story.
|
8
|
+
But here is the easier approach that can reduce your pain.
|
9
|
+
|
10
|
+
## Quick Setup
|
11
|
+
|
12
|
+
### Environment Configuration
|
13
|
+
|
14
|
+
The project already includes a `.env.development` file with all necessary environment variables for server-side database mode. This file configures:
|
15
|
+
|
16
|
+
- **Service Mode**: `NEXT_PUBLIC_SERVICE_MODE=server`
|
17
|
+
- **Database**: PostgreSQL with connection string
|
18
|
+
- **Authentication**: NextAuth with Casdoor SSO
|
19
|
+
- **Storage**: MinIO S3-compatible storage
|
20
|
+
- **Search**: SearXNG search engine
|
21
|
+
|
22
|
+
### Start Docker Services
|
23
|
+
|
24
|
+
Start all required services using Docker Compose:
|
25
|
+
|
26
|
+
```bash
|
27
|
+
docker-compose -f docker-compose.development.yml up -d
|
28
|
+
```
|
29
|
+
|
30
|
+
This will start the following services:
|
31
|
+
|
32
|
+
- PostgreSQL database (port 5432)
|
33
|
+
- MinIO storage (port 9000)
|
34
|
+
- Casdoor authentication (port 8000)
|
35
|
+
- SearXNG search (port 8080)
|
36
|
+
|
37
|
+
### Run Database Migrations
|
38
|
+
|
39
|
+
Execute the database migration script to create all necessary tables:
|
40
|
+
|
41
|
+
```bash
|
42
|
+
pnpm db:migrate
|
43
|
+
```
|
44
|
+
|
45
|
+
You should see: `✅ database migration pass.`
|
46
|
+
|
47
|
+
### Start Development Server
|
48
|
+
|
49
|
+
Launch the LobeChat development server:
|
50
|
+
|
51
|
+
```bash
|
52
|
+
pnpm dev
|
53
|
+
```
|
54
|
+
|
55
|
+
The server will start on `http://localhost:3010`
|
56
|
+
|
57
|
+
And you can check all Docker services are running by running:
|
58
|
+
|
59
|
+
```bash
|
60
|
+
docker-compose -f docker-compose.development.yml ps
|
61
|
+
```
|
62
|
+
|
63
|
+
### Reset Services
|
64
|
+
|
65
|
+
If you encounter issues, you can reset the entire stack:
|
66
|
+
|
67
|
+
```bash
|
68
|
+
# Stop and remove all containers
|
69
|
+
docker-compose -f docker-compose.development.yml down
|
70
|
+
|
71
|
+
# Remove volumes (this will delete all data)
|
72
|
+
docker-compose -f docker-compose.development.yml down -v
|
73
|
+
|
74
|
+
# Start fresh
|
75
|
+
docker-compose -f docker-compose.development.yml up -d
|
76
|
+
pnpm db:migrate
|
77
|
+
```
|
@@ -0,0 +1,77 @@
|
|
1
|
+
# 使用服务端数据库
|
2
|
+
|
3
|
+
LobeChat 提供了内置的客户端数据库体验。
|
4
|
+
但某些重要功能仅在服务端开发中可用。
|
5
|
+
|
6
|
+
为了使用服务端数据库功能,
|
7
|
+
需要参考 [部署服务端数据库](https://lobehub.com/docs/self-hosting/server-database) 的说明来配置所有前置条件。
|
8
|
+
本文档提供了一个更简化的配置方法,能够在本地开发时快速启动简化的服务端环境。
|
9
|
+
|
10
|
+
## 快速设置
|
11
|
+
|
12
|
+
### 环境配置
|
13
|
+
|
14
|
+
项目已经包含了一个 `.env.development` 文件,其中包含服务端数据库模式所需的所有环境变量。此文件配置了:
|
15
|
+
|
16
|
+
- **服务模式**: `NEXT_PUBLIC_SERVICE_MODE=server`
|
17
|
+
- **数据库**: 带连接字符串的 PostgreSQL
|
18
|
+
- **身份验证**: 带 Casdoor SSO 的 NextAuth
|
19
|
+
- **存储**: MinIO S3 兼容存储
|
20
|
+
- **搜索**: SearXNG 搜索引擎
|
21
|
+
|
22
|
+
### 启动 Docker 服务
|
23
|
+
|
24
|
+
使用 Docker Compose 启动所有必需的服务:
|
25
|
+
|
26
|
+
```bash
|
27
|
+
docker-compose -f docker-compose.development.yml up -d
|
28
|
+
```
|
29
|
+
|
30
|
+
这将启动以下服务:
|
31
|
+
|
32
|
+
- PostgreSQL 数据库(端口 5432)
|
33
|
+
- MinIO 存储(端口 9000)
|
34
|
+
- Casdoor 身份验证(端口 8000)
|
35
|
+
- SearXNG 搜索(端口 8080)
|
36
|
+
|
37
|
+
### 运行数据库迁移
|
38
|
+
|
39
|
+
执行数据库迁移脚本以创建所有必要的表:
|
40
|
+
|
41
|
+
```bash
|
42
|
+
pnpm db:migrate
|
43
|
+
```
|
44
|
+
|
45
|
+
预期输出:`✅ database migration pass.`
|
46
|
+
|
47
|
+
### 启动开发服务器
|
48
|
+
|
49
|
+
启动 LobeChat 开发服务器:
|
50
|
+
|
51
|
+
```bash
|
52
|
+
pnpm dev
|
53
|
+
```
|
54
|
+
|
55
|
+
服务器将在 `http://localhost:3010` 上启动
|
56
|
+
|
57
|
+
可以通过运行以下命令检查所有 Docker 服务运行状态:
|
58
|
+
|
59
|
+
```bash
|
60
|
+
docker-compose -f docker-compose.development.yml ps
|
61
|
+
```
|
62
|
+
|
63
|
+
### 重置服务
|
64
|
+
|
65
|
+
如遇到问题,可以重置整个服务堆栈:
|
66
|
+
|
67
|
+
```bash
|
68
|
+
# 停止并删除所有容器
|
69
|
+
docker-compose -f docker-compose.development.yml down
|
70
|
+
|
71
|
+
# 删除卷(这将删除所有数据)
|
72
|
+
docker-compose -f docker-compose.development.yml down -v
|
73
|
+
|
74
|
+
# 重新启动
|
75
|
+
docker-compose -f docker-compose.development.yml up -d
|
76
|
+
pnpm db:migrate
|
77
|
+
```
|
@@ -85,7 +85,7 @@ We need to configure an S3 storage service in the server-side database to store
|
|
85
85
|
<Image alt={'Configure allowed site domain'} src={'https://github.com/lobehub/lobe-chat/assets/28616219/dfcc2cb3-2958-4498-a8a4-51bec584fe7d'} />
|
86
86
|
|
87
87
|
<Callout type={'info'}>
|
88
|
-
If you also plan to use the desktop client, add <code>http://localhost:3015</code> to <code>AllowedOrigins</code> so the desktop client (running locally) can access R2.
|
88
|
+
If you also plan to use the desktop client, add <code>[http://localhost:3015](http://localhost:3015)</code> to <code>AllowedOrigins</code> so the desktop client (running locally) can access R2.
|
89
89
|
</Callout>
|
90
90
|
|
91
91
|
Example configuration is as follows:
|
@@ -82,11 +82,11 @@ tags:
|
|
82
82
|
添加跨域规则,允许你的域名(在上文是 `https://your-project.vercel.app`)来源的请求:
|
83
83
|
|
84
84
|
<Image alt={'配置允许你的站点域名'} src={'https://github.com/lobehub/lobe-chat/assets/28616219/dfcc2cb3-2958-4498-a8a4-51bec584fe7d'} />
|
85
|
+
|
85
86
|
<Callout type={'info'}>
|
86
|
-
如果你还需要在桌面端使用,请在 <code>AllowedOrigins</code> 中额外添加 <code>http://localhost:3015</code>,以便桌面端(本地运行)能够访问 R2。
|
87
|
+
如果你还需要在桌面端使用,请在 <code>AllowedOrigins</code> 中额外添加 <code>[http://localhost:3015](http://localhost:3015)</code>,以便桌面端(本地运行)能够访问 R2。
|
87
88
|
</Callout>
|
88
89
|
|
89
|
-
|
90
90
|
示例配置如下:
|
91
91
|
|
92
92
|
```json
|
@@ -182,6 +182,13 @@
|
|
182
182
|
"title": "喜欢我们的产品?"
|
183
183
|
},
|
184
184
|
"fullscreen": "全屏模式",
|
185
|
+
"geminiImageChineseWarning": {
|
186
|
+
"content": "Nano Banana 使用中文有概率性生成图片失败。建议使用英文以获得更好的效果。",
|
187
|
+
"continueGenerate": "继续生成",
|
188
|
+
"continueSend": "继续发送",
|
189
|
+
"doNotShowAgain": "不再提示",
|
190
|
+
"title": "中文输入提示"
|
191
|
+
},
|
185
192
|
"historyRange": "历史范围",
|
186
193
|
"import": "导入",
|
187
194
|
"importData": "导入数据",
|
package/package.json
CHANGED
@@ -1,6 +1,6 @@
|
|
1
1
|
{
|
2
2
|
"name": "@lobehub/chat",
|
3
|
-
"version": "1.
|
3
|
+
"version": "1.121.1",
|
4
4
|
"description": "Lobe Chat - an open-source, high-performance chatbot framework that supports speech synthesis, multimodal, and extensible Function Call plugin system. Supports one-click free deployment of your private ChatGPT/LLM web application.",
|
5
5
|
"keywords": [
|
6
6
|
"framework",
|
@@ -327,6 +327,7 @@
|
|
327
327
|
"crypto-js": "^4.2.0",
|
328
328
|
"dbdocs": "^0.16.0",
|
329
329
|
"dotenv": "^17.0.0",
|
330
|
+
"dotenv-expand": "^12.0.2",
|
330
331
|
"dpdm-fast": "^1.0.13",
|
331
332
|
"drizzle-dbml-generator": "^0.10.0",
|
332
333
|
"drizzle-kit": "^0.31.4",
|
@@ -208,7 +208,9 @@ export class AiInfraRepos {
|
|
208
208
|
const providerModels = modules[providerId];
|
209
209
|
|
210
210
|
// use the serverModelLists as the defined server model list
|
211
|
-
|
211
|
+
// fallback to empty array for custom provider
|
212
|
+
const presetList = this.providerConfigs[providerId]?.serverModelLists || providerModels || [];
|
213
|
+
|
212
214
|
return (presetList as AIChatModelCard[]).map<AiProviderModelListItem>((m) => ({
|
213
215
|
...m,
|
214
216
|
enabled: m.enabled || false,
|
@@ -96,7 +96,7 @@ describe('createRouterRuntime', () => {
|
|
96
96
|
});
|
97
97
|
|
98
98
|
const runtime = new Runtime();
|
99
|
-
const models = await runtime['
|
99
|
+
const models = await runtime['getRouterMatchModels']({
|
100
100
|
id: 'test',
|
101
101
|
models: ['model-1', 'model-2'],
|
102
102
|
runtime: mockRuntime,
|
@@ -105,7 +105,7 @@ describe('createRouterRuntime', () => {
|
|
105
105
|
expect(models).toEqual(['model-1', 'model-2']);
|
106
106
|
});
|
107
107
|
|
108
|
-
it('should call
|
108
|
+
it('should call asynchronous models function', async () => {
|
109
109
|
const mockRuntime = {
|
110
110
|
chat: vi.fn(),
|
111
111
|
} as unknown as LobeRuntimeAI;
|
@@ -131,14 +131,9 @@ describe('createRouterRuntime', () => {
|
|
131
131
|
runtime: mockRuntime,
|
132
132
|
};
|
133
133
|
|
134
|
-
//
|
135
|
-
const
|
136
|
-
expect(
|
137
|
-
expect(mockModelsFunction).toHaveBeenCalledTimes(1);
|
138
|
-
|
139
|
-
// Second call should use cache
|
140
|
-
const models2 = await runtime['getModels'](runtimeItem);
|
141
|
-
expect(models2).toEqual(['async-model-1', 'async-model-2']);
|
134
|
+
// Call the function
|
135
|
+
const models = await runtime['getRouterMatchModels'](runtimeItem);
|
136
|
+
expect(models).toEqual(['async-model-1', 'async-model-2']);
|
142
137
|
expect(mockModelsFunction).toHaveBeenCalledTimes(1);
|
143
138
|
});
|
144
139
|
|
@@ -159,7 +154,7 @@ describe('createRouterRuntime', () => {
|
|
159
154
|
});
|
160
155
|
|
161
156
|
const runtime = new Runtime();
|
162
|
-
const models = await runtime['
|
157
|
+
const models = await runtime['getRouterMatchModels']({
|
163
158
|
id: 'test',
|
164
159
|
runtime: mockRuntime,
|
165
160
|
});
|
@@ -455,84 +450,4 @@ describe('createRouterRuntime', () => {
|
|
455
450
|
expect(mockTextToSpeech).toHaveBeenCalledWith(payload, options);
|
456
451
|
});
|
457
452
|
});
|
458
|
-
|
459
|
-
describe('clearModelCache method', () => {
|
460
|
-
it('should clear specific runtime cache when runtimeId provided', async () => {
|
461
|
-
const mockModelsFunction = vi.fn().mockResolvedValue(['model-1']);
|
462
|
-
|
463
|
-
const Runtime = createRouterRuntime({
|
464
|
-
id: 'test-runtime',
|
465
|
-
routers: [
|
466
|
-
{
|
467
|
-
apiType: 'openai',
|
468
|
-
options: {},
|
469
|
-
runtime: vi.fn() as any,
|
470
|
-
models: mockModelsFunction,
|
471
|
-
},
|
472
|
-
],
|
473
|
-
});
|
474
|
-
|
475
|
-
const runtime = new Runtime();
|
476
|
-
const runtimeItem = {
|
477
|
-
id: 'test-id',
|
478
|
-
models: mockModelsFunction,
|
479
|
-
runtime: {} as any,
|
480
|
-
};
|
481
|
-
|
482
|
-
// Build cache
|
483
|
-
await runtime['getModels'](runtimeItem);
|
484
|
-
expect(mockModelsFunction).toHaveBeenCalledTimes(1);
|
485
|
-
|
486
|
-
// Clear specific cache
|
487
|
-
runtime.clearModelCache('test-id');
|
488
|
-
|
489
|
-
// Should call function again
|
490
|
-
await runtime['getModels'](runtimeItem);
|
491
|
-
expect(mockModelsFunction).toHaveBeenCalledTimes(2);
|
492
|
-
});
|
493
|
-
|
494
|
-
it('should clear all cache when no runtimeId provided', async () => {
|
495
|
-
const mockModelsFunction1 = vi.fn().mockResolvedValue(['model-1']);
|
496
|
-
const mockModelsFunction2 = vi.fn().mockResolvedValue(['model-2']);
|
497
|
-
|
498
|
-
const Runtime = createRouterRuntime({
|
499
|
-
id: 'test-runtime',
|
500
|
-
routers: [
|
501
|
-
{
|
502
|
-
apiType: 'openai',
|
503
|
-
options: {},
|
504
|
-
runtime: vi.fn() as any,
|
505
|
-
models: mockModelsFunction1,
|
506
|
-
},
|
507
|
-
],
|
508
|
-
});
|
509
|
-
|
510
|
-
const runtime = new Runtime();
|
511
|
-
const runtimeItem1 = {
|
512
|
-
id: 'test-id-1',
|
513
|
-
models: mockModelsFunction1,
|
514
|
-
runtime: {} as any,
|
515
|
-
};
|
516
|
-
const runtimeItem2 = {
|
517
|
-
id: 'test-id-2',
|
518
|
-
models: mockModelsFunction2,
|
519
|
-
runtime: {} as any,
|
520
|
-
};
|
521
|
-
|
522
|
-
// Build cache for both items
|
523
|
-
await runtime['getModels'](runtimeItem1);
|
524
|
-
await runtime['getModels'](runtimeItem2);
|
525
|
-
expect(mockModelsFunction1).toHaveBeenCalledTimes(1);
|
526
|
-
expect(mockModelsFunction2).toHaveBeenCalledTimes(1);
|
527
|
-
|
528
|
-
// Clear all cache
|
529
|
-
runtime.clearModelCache();
|
530
|
-
|
531
|
-
// Should call functions again
|
532
|
-
await runtime['getModels'](runtimeItem1);
|
533
|
-
await runtime['getModels'](runtimeItem2);
|
534
|
-
expect(mockModelsFunction1).toHaveBeenCalledTimes(2);
|
535
|
-
expect(mockModelsFunction2).toHaveBeenCalledTimes(2);
|
536
|
-
});
|
537
|
-
});
|
538
453
|
});
|
@@ -117,7 +117,6 @@ export const createRouterRuntime = ({
|
|
117
117
|
return class UniformRuntime implements LobeRuntimeAI {
|
118
118
|
private _runtimes: RuntimeItem[];
|
119
119
|
private _options: ClientOptions & Record<string, any>;
|
120
|
-
private _modelCache = new Map<string, string[]>();
|
121
120
|
|
122
121
|
constructor(options: ClientOptions & Record<string, any> = {}) {
|
123
122
|
const _options = {
|
@@ -143,30 +142,21 @@ export const createRouterRuntime = ({
|
|
143
142
|
this._options = _options;
|
144
143
|
}
|
145
144
|
|
146
|
-
// Get runtime's models list, supporting both synchronous arrays and asynchronous functions
|
147
|
-
private async
|
148
|
-
|
149
|
-
|
150
|
-
// If it's a synchronous array, return directly without caching
|
145
|
+
// Get runtime's models list, supporting both synchronous arrays and asynchronous functions
|
146
|
+
private async getRouterMatchModels(runtimeItem: RuntimeItem): Promise<string[]> {
|
147
|
+
// If it's a synchronous array, return directly
|
151
148
|
if (typeof runtimeItem.models !== 'function') {
|
152
149
|
return runtimeItem.models || [];
|
153
150
|
}
|
154
151
|
|
155
|
-
//
|
156
|
-
|
157
|
-
return this._modelCache.get(cacheKey)!;
|
158
|
-
}
|
159
|
-
|
160
|
-
// Get model list and cache result
|
161
|
-
const models = await runtimeItem.models();
|
162
|
-
this._modelCache.set(cacheKey, models);
|
163
|
-
return models;
|
152
|
+
// Get model list
|
153
|
+
return await runtimeItem.models();
|
164
154
|
}
|
165
155
|
|
166
156
|
// Check if it can match a specific model, otherwise default to using the last runtime
|
167
157
|
async getRuntimeByModel(model: string) {
|
168
158
|
for (const runtimeItem of this._runtimes) {
|
169
|
-
const models = await this.
|
159
|
+
const models = await this.getRouterMatchModels(runtimeItem);
|
170
160
|
if (models.includes(model)) {
|
171
161
|
return runtimeItem.runtime;
|
172
162
|
}
|
@@ -226,17 +216,5 @@ export const createRouterRuntime = ({
|
|
226
216
|
|
227
217
|
return runtime.textToSpeech!(payload, options);
|
228
218
|
}
|
229
|
-
|
230
|
-
/**
|
231
|
-
* Clear model list cache, forcing reload on next access
|
232
|
-
* @param runtimeId - Optional, specify to clear cache for a specific runtime, omit to clear all caches
|
233
|
-
*/
|
234
|
-
clearModelCache(runtimeId?: string) {
|
235
|
-
if (runtimeId) {
|
236
|
-
this._modelCache.delete(runtimeId);
|
237
|
-
} else {
|
238
|
-
this._modelCache.clear();
|
239
|
-
}
|
240
|
-
}
|
241
219
|
};
|
242
220
|
};
|
@@ -71,29 +71,32 @@ export const LobeOpenRouterAI = createOpenAICompatibleRuntime({
|
|
71
71
|
|
72
72
|
// 处理前端获取的模型信息,转换为标准格式
|
73
73
|
const formattedModels = modelList.map((model) => {
|
74
|
+
const { endpoint } = model;
|
75
|
+
const endpointModel = endpoint?.model;
|
76
|
+
|
74
77
|
const displayName = model.slug?.toLowerCase().includes('deepseek')
|
75
78
|
? (model.name ?? model.slug)
|
76
79
|
: (model.short_name ?? model.name ?? model.slug);
|
77
80
|
|
81
|
+
const inputModalities = endpointModel?.input_modalities || model.input_modalities;
|
82
|
+
|
78
83
|
return {
|
79
|
-
contextWindowTokens: model.context_length,
|
80
|
-
description: model.description,
|
84
|
+
contextWindowTokens: endpoint?.context_length || model.context_length,
|
85
|
+
description: endpointModel?.description || model.description,
|
81
86
|
displayName,
|
82
|
-
functionCall:
|
83
|
-
id: model.slug,
|
87
|
+
functionCall: endpoint?.supports_tool_parameters || false,
|
88
|
+
id: endpoint?.model_variant_slug || model.slug,
|
84
89
|
maxOutput:
|
85
|
-
typeof
|
86
|
-
?
|
90
|
+
typeof endpoint?.max_completion_tokens === 'number'
|
91
|
+
? endpoint.max_completion_tokens
|
87
92
|
: undefined,
|
88
93
|
pricing: {
|
89
|
-
input: formatPrice(
|
90
|
-
output: formatPrice(
|
94
|
+
input: formatPrice(endpoint?.pricing?.prompt),
|
95
|
+
output: formatPrice(endpoint?.pricing?.completion),
|
91
96
|
},
|
92
|
-
reasoning:
|
97
|
+
reasoning: endpoint?.supports_reasoning || false,
|
93
98
|
releasedAt: new Date(model.created_at).toISOString().split('T')[0],
|
94
|
-
vision:
|
95
|
-
(Array.isArray(model.input_modalities) && model.input_modalities.includes('image')) ||
|
96
|
-
false,
|
99
|
+
vision: Array.isArray(inputModalities) && inputModalities.includes('image'),
|
97
100
|
};
|
98
101
|
});
|
99
102
|
|
@@ -19,11 +19,21 @@ export interface OpenRouterModelCard {
|
|
19
19
|
}
|
20
20
|
|
21
21
|
interface OpenRouterModelEndpoint {
|
22
|
+
context_length?: number;
|
22
23
|
max_completion_tokens: number | null;
|
24
|
+
model?: {
|
25
|
+
description?: string;
|
26
|
+
input_modalities?: string[];
|
27
|
+
name?: string;
|
28
|
+
short_name?: string;
|
29
|
+
slug: string;
|
30
|
+
};
|
31
|
+
model_variant_slug?: string;
|
23
32
|
pricing: ModelPricing;
|
24
33
|
supported_parameters: string[];
|
25
34
|
supports_reasoning?: boolean;
|
26
35
|
supports_tool_parameters?: boolean;
|
36
|
+
variant?: 'free' | 'standard' | 'unknown';
|
27
37
|
}
|
28
38
|
|
29
39
|
interface OpenRouterOpenAIReasoning {
|
@@ -758,4 +758,70 @@ describe('modelParse', () => {
|
|
758
758
|
expect(modelConfigKeys.sort()).toEqual(providerDetectionKeys.sort());
|
759
759
|
});
|
760
760
|
});
|
761
|
+
|
762
|
+
describe('displayName processing', () => {
|
763
|
+
it('should replace "Gemini 2.5 Flash Image Preview" with "Nano Banana"', async () => {
|
764
|
+
const modelList = [
|
765
|
+
{
|
766
|
+
id: 'gemini-2.5-flash-image-preview',
|
767
|
+
displayName: 'Gemini 2.5 Flash Image Preview',
|
768
|
+
},
|
769
|
+
{
|
770
|
+
id: 'some-other-model',
|
771
|
+
displayName: 'Some Other Model',
|
772
|
+
},
|
773
|
+
{
|
774
|
+
id: 'partial-gemini-model',
|
775
|
+
displayName: 'Custom Gemini 2.5 Flash Image Preview Enhanced',
|
776
|
+
},
|
777
|
+
{
|
778
|
+
id: 'gemini-free-model',
|
779
|
+
displayName: 'Gemini 2.5 Flash Image Preview (free)',
|
780
|
+
},
|
781
|
+
];
|
782
|
+
|
783
|
+
const result = await processModelList(modelList, MODEL_LIST_CONFIGS.google);
|
784
|
+
|
785
|
+
expect(result).toHaveLength(4);
|
786
|
+
|
787
|
+
// First model should have "Nano Banana" as displayName
|
788
|
+
const geminiModel = result.find((m) => m.id === 'gemini-2.5-flash-image-preview');
|
789
|
+
expect(geminiModel?.displayName).toBe('Nano Banana');
|
790
|
+
|
791
|
+
// Second model should keep original displayName
|
792
|
+
const otherModel = result.find((m) => m.id === 'some-other-model');
|
793
|
+
expect(otherModel?.displayName).toBe('Some Other Model');
|
794
|
+
|
795
|
+
// Third model (partial match) should replace only the matching part
|
796
|
+
const partialModel = result.find((m) => m.id === 'partial-gemini-model');
|
797
|
+
expect(partialModel?.displayName).toBe('Custom Nano Banana Enhanced');
|
798
|
+
|
799
|
+
// Fourth model should preserve the (free) suffix
|
800
|
+
const freeModel = result.find((m) => m.id === 'gemini-free-model');
|
801
|
+
expect(freeModel?.displayName).toBe('Nano Banana (free)');
|
802
|
+
});
|
803
|
+
|
804
|
+
it('should keep original displayName when not matching Gemini 2.5 Flash Image Preview', async () => {
|
805
|
+
const modelList = [
|
806
|
+
{
|
807
|
+
id: 'gpt-4',
|
808
|
+
displayName: 'GPT-4',
|
809
|
+
},
|
810
|
+
{
|
811
|
+
id: 'gemini-pro',
|
812
|
+
displayName: 'Gemini Pro',
|
813
|
+
},
|
814
|
+
];
|
815
|
+
|
816
|
+
const result = await processModelList(modelList, MODEL_LIST_CONFIGS.google);
|
817
|
+
|
818
|
+
expect(result).toHaveLength(2);
|
819
|
+
|
820
|
+
const gptModel = result.find((m) => m.id === 'gpt-4');
|
821
|
+
expect(gptModel?.displayName).toBe('GPT-4');
|
822
|
+
|
823
|
+
const geminiProModel = result.find((m) => m.id === 'gemini-pro');
|
824
|
+
expect(geminiProModel?.displayName).toBe('Gemini Pro');
|
825
|
+
});
|
826
|
+
});
|
761
827
|
});
|
@@ -264,6 +264,20 @@ const processReleasedAt = (model: any, knownModel?: any): string | undefined =>
|
|
264
264
|
return model.releasedAt ?? knownModel?.releasedAt ?? undefined;
|
265
265
|
};
|
266
266
|
|
267
|
+
/**
|
268
|
+
* 处理模型显示名称
|
269
|
+
* @param displayName 原始显示名称
|
270
|
+
* @returns 处理后的显示名称
|
271
|
+
*/
|
272
|
+
const processDisplayName = (displayName: string): string => {
|
273
|
+
// 如果包含 "Gemini 2.5 Flash Image Preview",替换对应部分为 "Nano Banana"
|
274
|
+
if (displayName.includes('Gemini 2.5 Flash Image Preview')) {
|
275
|
+
return displayName.replace('Gemini 2.5 Flash Image Preview', 'Nano Banana');
|
276
|
+
}
|
277
|
+
|
278
|
+
return displayName;
|
279
|
+
};
|
280
|
+
|
267
281
|
/**
|
268
282
|
* 处理模型卡片的通用逻辑
|
269
283
|
*/
|
@@ -331,9 +345,7 @@ const processModelCard = (
|
|
331
345
|
return {
|
332
346
|
contextWindowTokens: model.contextWindowTokens ?? knownModel?.contextWindowTokens ?? undefined,
|
333
347
|
description: model.description ?? knownModel?.description ?? '',
|
334
|
-
displayName: (model.displayName ?? knownModel?.displayName ?? model.id)
|
335
|
-
.replaceAll(/\s*[((][^))]*[))]\s*/g, '')
|
336
|
-
.trim(), // 去除括号内容
|
348
|
+
displayName: processDisplayName(model.displayName ?? knownModel?.displayName ?? model.id),
|
337
349
|
enabled: model?.enabled || false,
|
338
350
|
functionCall:
|
339
351
|
model.functionCall ??
|
@@ -5,6 +5,7 @@ import type { ChatModelCard } from '@/types/llm';
|
|
5
5
|
// Whitelist for automatic image model generation
|
6
6
|
export const IMAGE_GENERATION_MODEL_WHITELIST = [
|
7
7
|
'gemini-2.5-flash-image-preview',
|
8
|
+
'gemini-2.5-flash-image-preview:free',
|
8
9
|
// More models can be added in the future
|
9
10
|
] as const;
|
10
11
|
|
@@ -0,0 +1,37 @@
|
|
1
|
+
import { describe, expect, it } from 'vitest';
|
2
|
+
|
3
|
+
import { containsChinese } from './detectChinese';
|
4
|
+
|
5
|
+
describe('containsChinese', () => {
|
6
|
+
it('should return true for text containing Chinese characters', () => {
|
7
|
+
expect(containsChinese('你好世界')).toBe(true);
|
8
|
+
expect(containsChinese('Hello 世界')).toBe(true);
|
9
|
+
expect(containsChinese('测试 test')).toBe(true);
|
10
|
+
expect(containsChinese('这是一个测试')).toBe(true);
|
11
|
+
});
|
12
|
+
|
13
|
+
it('should return false for text without Chinese characters', () => {
|
14
|
+
expect(containsChinese('Hello World')).toBe(false);
|
15
|
+
expect(containsChinese('123456')).toBe(false);
|
16
|
+
expect(containsChinese('!@#$%^&*()')).toBe(false);
|
17
|
+
expect(containsChinese('')).toBe(false);
|
18
|
+
expect(containsChinese('English only text')).toBe(false);
|
19
|
+
});
|
20
|
+
|
21
|
+
it('should handle mixed content correctly', () => {
|
22
|
+
expect(containsChinese('Hello 中国')).toBe(true);
|
23
|
+
expect(containsChinese('English and 数字 123')).toBe(true);
|
24
|
+
expect(containsChinese('Japanese こんにちは and English')).toBe(false);
|
25
|
+
expect(containsChinese('Korean 안녕하세요 and English')).toBe(false);
|
26
|
+
});
|
27
|
+
|
28
|
+
it('should detect extended Chinese character ranges', () => {
|
29
|
+
// Test CJK Unified Ideographs Extension A (U+3400-U+4DBF)
|
30
|
+
expect(containsChinese('㐀㑇㒯')).toBe(true);
|
31
|
+
// Test CJK Compatibility Ideographs (U+F900-U+FAFF)
|
32
|
+
expect(containsChinese('豈更車')).toBe(true);
|
33
|
+
// Test traditional Chinese characters
|
34
|
+
expect(containsChinese('繁體中文')).toBe(true);
|
35
|
+
expect(containsChinese('學習語言')).toBe(true);
|
36
|
+
});
|
37
|
+
});
|