@lobehub/chat 1.131.3 → 1.132.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (41) hide show
  1. package/CHANGELOG.md +50 -0
  2. package/changelog/v1.json +18 -0
  3. package/package.json +1 -1
  4. package/packages/context-engine/src/processors/MessageContent.ts +45 -10
  5. package/packages/context-engine/src/processors/__tests__/MessageContent.test.ts +179 -1
  6. package/packages/database/src/models/message.ts +9 -1
  7. package/packages/model-bank/src/aiModels/akashchat.ts +14 -13
  8. package/packages/model-bank/src/aiModels/google.ts +7 -0
  9. package/packages/model-bank/src/aiModels/mistral.ts +37 -22
  10. package/packages/model-runtime/src/providers/akashchat/index.ts +15 -1
  11. package/packages/model-runtime/src/providers/google/index.ts +31 -8
  12. package/packages/model-runtime/src/types/chat.ts +6 -0
  13. package/packages/prompts/src/prompts/files/index.test.ts +148 -3
  14. package/packages/prompts/src/prompts/files/index.ts +17 -5
  15. package/packages/prompts/src/prompts/files/video.ts +17 -0
  16. package/packages/types/src/agent/index.ts +1 -1
  17. package/packages/types/src/message/chat.ts +2 -4
  18. package/packages/types/src/message/index.ts +1 -0
  19. package/packages/types/src/message/video.ts +5 -0
  20. package/packages/utils/src/client/index.ts +1 -0
  21. package/packages/utils/src/client/videoValidation.test.ts +53 -0
  22. package/packages/utils/src/client/videoValidation.ts +21 -0
  23. package/packages/utils/src/parseModels.ts +4 -0
  24. package/src/app/[variants]/(main)/chat/(workspace)/@conversation/features/ChatInput/useSend.ts +9 -4
  25. package/src/components/ModelSelect/index.tsx +14 -2
  26. package/src/features/ChatInput/ActionBar/Upload/ClientMode.tsx +7 -0
  27. package/src/features/ChatInput/ActionBar/Upload/ServerMode.tsx +29 -3
  28. package/src/features/ChatInput/components/UploadDetail/UploadStatus.tsx +1 -1
  29. package/src/features/Conversation/Messages/Assistant/index.tsx +4 -1
  30. package/src/features/Conversation/Messages/User/VideoFileListViewer.tsx +31 -0
  31. package/src/features/Conversation/Messages/User/index.tsx +3 -1
  32. package/src/hooks/useModelSupportVideo.ts +10 -0
  33. package/src/locales/default/chat.ts +4 -0
  34. package/src/locales/default/components.ts +1 -0
  35. package/src/services/chat/contextEngineering.test.ts +0 -1
  36. package/src/services/chat/contextEngineering.ts +3 -1
  37. package/src/services/chat/helper.ts +4 -0
  38. package/src/services/upload.ts +1 -1
  39. package/src/store/aiInfra/slices/aiModel/selectors.ts +7 -0
  40. package/src/store/chat/slices/aiChat/actions/generateAIChatV2.ts +22 -0
  41. package/src/store/chat/slices/message/action.ts +15 -14
package/CHANGELOG.md CHANGED
@@ -2,6 +2,56 @@
2
2
 
3
3
  # Changelog
4
4
 
5
+ ## [Version 1.132.0](https://github.com/lobehub/lobe-chat/compare/v1.131.4...v1.132.0)
6
+
7
+ <sup>Released on **2025-09-21**</sup>
8
+
9
+ #### ✨ Features
10
+
11
+ - **misc**: Support google video understanding.
12
+
13
+ <br/>
14
+
15
+ <details>
16
+ <summary><kbd>Improvements and Fixes</kbd></summary>
17
+
18
+ #### What's improved
19
+
20
+ - **misc**: Support google video understanding, closes [#8761](https://github.com/lobehub/lobe-chat/issues/8761) ([f02d43b](https://github.com/lobehub/lobe-chat/commit/f02d43b))
21
+
22
+ </details>
23
+
24
+ <div align="right">
25
+
26
+ [![](https://img.shields.io/badge/-BACK_TO_TOP-151515?style=flat-square)](#readme-top)
27
+
28
+ </div>
29
+
30
+ ### [Version 1.131.4](https://github.com/lobehub/lobe-chat/compare/v1.131.3...v1.131.4)
31
+
32
+ <sup>Released on **2025-09-21**</sup>
33
+
34
+ #### 💄 Styles
35
+
36
+ - **misc**: Enhanced AkashChat experience.
37
+
38
+ <br/>
39
+
40
+ <details>
41
+ <summary><kbd>Improvements and Fixes</kbd></summary>
42
+
43
+ #### Styles
44
+
45
+ - **misc**: Enhanced AkashChat experience, closes [#9330](https://github.com/lobehub/lobe-chat/issues/9330) ([47ec2d8](https://github.com/lobehub/lobe-chat/commit/47ec2d8))
46
+
47
+ </details>
48
+
49
+ <div align="right">
50
+
51
+ [![](https://img.shields.io/badge/-BACK_TO_TOP-151515?style=flat-square)](#readme-top)
52
+
53
+ </div>
54
+
5
55
  ### [Version 1.131.3](https://github.com/lobehub/lobe-chat/compare/v1.131.2...v1.131.3)
6
56
 
7
57
  <sup>Released on **2025-09-21**</sup>
package/changelog/v1.json CHANGED
@@ -1,4 +1,22 @@
1
1
  [
2
+ {
3
+ "children": {
4
+ "features": [
5
+ "Support google video understanding."
6
+ ]
7
+ },
8
+ "date": "2025-09-21",
9
+ "version": "1.132.0"
10
+ },
11
+ {
12
+ "children": {
13
+ "improvements": [
14
+ "Enhanced AkashChat experience."
15
+ ]
16
+ },
17
+ "date": "2025-09-21",
18
+ "version": "1.131.4"
19
+ },
2
20
  {
3
21
  "children": {
4
22
  "fixes": [
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@lobehub/chat",
3
- "version": "1.131.3",
3
+ "version": "1.132.0",
4
4
  "description": "Lobe Chat - an open-source, high-performance chatbot framework that supports speech synthesis, multimodal, and extensible Function Call plugin system. Supports one-click free deployment of your private ChatGPT/LLM web application.",
5
5
  "keywords": [
6
6
  "framework",
@@ -17,6 +17,8 @@ export interface FileContextConfig {
17
17
  export interface MessageContentConfig {
18
18
  /** File context configuration */
19
19
  fileContext?: FileContextConfig;
20
+ /** Function to check if video is supported */
21
+ isCanUseVideo?: (model: string, provider: string) => boolean | undefined;
20
22
  /** Function to check if vision is supported */
21
23
  isCanUseVision?: (model: string, provider: string) => boolean | undefined;
22
24
  /** Model name */
@@ -33,7 +35,10 @@ export interface UserMessageContentPart {
33
35
  signature?: string;
34
36
  text?: string;
35
37
  thinking?: string;
36
- type: 'text' | 'image_url' | 'thinking';
38
+ type: 'text' | 'image_url' | 'thinking' | 'video_url';
39
+ video_url?: {
40
+ url: string;
41
+ };
37
42
  }
38
43
 
39
44
  /**
@@ -104,12 +109,13 @@ export class MessageContentProcessor extends BaseProcessor {
104
109
  * Process user message content
105
110
  */
106
111
  private async processUserMessage(message: any): Promise<any> {
107
- // Check if images or files need processing
112
+ // Check if images, videos or files need processing
108
113
  const hasImages = message.imageList && message.imageList.length > 0;
114
+ const hasVideos = message.videoList && message.videoList.length > 0;
109
115
  const hasFiles = message.fileList && message.fileList.length > 0;
110
116
 
111
- // If no images and files, return plain text content directly
112
- if (!hasImages && !hasFiles) {
117
+ // If no images, videos and files, return plain text content directly
118
+ if (!hasImages && !hasVideos && !hasFiles) {
113
119
  return {
114
120
  ...message,
115
121
  content: message.content,
@@ -121,12 +127,13 @@ export class MessageContentProcessor extends BaseProcessor {
121
127
  // Add text content
122
128
  let textContent = message.content || '';
123
129
 
124
- // Add file context (if file context is enabled and has files or images)
125
- if ((hasFiles || hasImages) && this.config.fileContext?.enabled) {
130
+ // Add file context (if file context is enabled and has files, images or videos)
131
+ if ((hasFiles || hasImages || hasVideos) && this.config.fileContext?.enabled) {
126
132
  const filesContext = filesPrompts({
127
133
  addUrl: this.config.fileContext.includeFileUrl ?? true,
128
134
  fileList: message.fileList,
129
- imageList: message.imageList,
135
+ imageList: message.imageList || [],
136
+ videoList: message.videoList || [],
130
137
  });
131
138
 
132
139
  if (filesContext) {
@@ -148,17 +155,26 @@ export class MessageContentProcessor extends BaseProcessor {
148
155
  contentParts.push(...imageContentParts);
149
156
  }
150
157
 
158
+ // Process video content
159
+ if (hasVideos && this.config.isCanUseVideo?.(this.config.model, this.config.provider)) {
160
+ const videoContentParts = await this.processVideoList(message.videoList || []);
161
+ contentParts.push(...videoContentParts);
162
+ }
163
+
151
164
  // 明确返回的字段,只保留必要的消息字段
152
- const hasFileContext = (hasFiles || hasImages) && this.config.fileContext?.enabled;
165
+ const hasFileContext = (hasFiles || hasImages || hasVideos) && this.config.fileContext?.enabled;
153
166
  const hasVisionContent =
154
167
  hasImages && this.config.isCanUseVision?.(this.config.model, this.config.provider);
168
+ const hasVideoContent =
169
+ hasVideos && this.config.isCanUseVideo?.(this.config.model, this.config.provider);
155
170
 
156
- // 如果只有文本内容且没有添加文件上下文也没有视觉内容,返回纯文本
171
+ // 如果只有文本内容且没有添加文件上下文也没有视觉/视频内容,返回纯文本
157
172
  if (
158
173
  contentParts.length === 1 &&
159
174
  contentParts[0].type === 'text' &&
160
175
  !hasFileContext &&
161
- !hasVisionContent
176
+ !hasVisionContent &&
177
+ !hasVideoContent
162
178
  ) {
163
179
  return {
164
180
  content: contentParts[0].text,
@@ -274,6 +290,22 @@ export class MessageContentProcessor extends BaseProcessor {
274
290
  );
275
291
  }
276
292
 
293
+ /**
294
+ * 处理视频列表
295
+ */
296
+ private async processVideoList(videoList: any[]): Promise<UserMessageContentPart[]> {
297
+ if (!videoList || videoList.length === 0) {
298
+ return [];
299
+ }
300
+
301
+ return videoList.map((video) => {
302
+ return {
303
+ type: 'video_url',
304
+ video_url: { url: video.url },
305
+ } as UserMessageContentPart;
306
+ });
307
+ }
308
+
277
309
  /**
278
310
  * 验证内容部分格式
279
311
  */
@@ -290,6 +322,9 @@ export class MessageContentProcessor extends BaseProcessor {
290
322
  case 'thinking': {
291
323
  return !!(part.thinking && part.signature);
292
324
  }
325
+ case 'video_url': {
326
+ return !!(part.video_url && part.video_url.url);
327
+ }
293
328
  default: {
294
329
  return false;
295
330
  }
@@ -1,4 +1,4 @@
1
- import { ChatImageItem, ChatMessage } from '@lobechat/types';
1
+ import { ChatImageItem, ChatMessage, ChatVideoItem } from '@lobechat/types';
2
2
  import { describe, expect, it, vi } from 'vitest';
3
3
 
4
4
  import type { PipelineContext } from '../../types';
@@ -26,6 +26,7 @@ const createContext = (messages: ChatMessage[]): PipelineContext => ({
26
26
  });
27
27
 
28
28
  const mockIsCanUseVision = vi.fn();
29
+ const mockIsCanUseVideo = vi.fn();
29
30
 
30
31
  describe('MessageContentProcessor', () => {
31
32
  describe('Image processing functionality', () => {
@@ -391,4 +392,181 @@ describe('MessageContentProcessor', () => {
391
392
  expect(result.metadata.assistantMessagesProcessed).toBe(1);
392
393
  });
393
394
  });
395
+
396
+ describe('Video processing functionality', () => {
397
+ it('should return empty video content parts if model cannot use video', async () => {
398
+ mockIsCanUseVideo.mockReturnValue(false);
399
+
400
+ const processor = new MessageContentProcessor({
401
+ model: 'any-model',
402
+ provider: 'any-provider',
403
+ isCanUseVideo: mockIsCanUseVideo,
404
+ fileContext: { enabled: false },
405
+ });
406
+
407
+ const messages: ChatMessage[] = [
408
+ {
409
+ id: 'test',
410
+ role: 'user',
411
+ content: 'Hello',
412
+ videoList: [{ url: 'video_url', alt: 'test video', id: 'test' } as ChatVideoItem],
413
+ createdAt: Date.now(),
414
+ updatedAt: Date.now(),
415
+ meta: {},
416
+ },
417
+ ];
418
+
419
+ const result = await processor.process(createContext(messages));
420
+
421
+ // Should return plain text when video is not supported
422
+ expect(result.messages[0].content).toBe('Hello');
423
+ });
424
+
425
+ it('should process videos if model can use video', async () => {
426
+ mockIsCanUseVideo.mockReturnValue(true);
427
+
428
+ const processor = new MessageContentProcessor({
429
+ model: 'gpt-4-vision',
430
+ provider: 'openai',
431
+ isCanUseVideo: mockIsCanUseVideo,
432
+ fileContext: { enabled: false },
433
+ });
434
+
435
+ const messages: ChatMessage[] = [
436
+ {
437
+ id: 'test',
438
+ role: 'user',
439
+ content: 'Hello',
440
+ videoList: [
441
+ { url: 'http://example.com/video.mp4', alt: 'test video', id: 'test1' },
442
+ { url: 'http://example.com/video2.mp4', alt: 'test video 2', id: 'test2' },
443
+ ] as ChatVideoItem[],
444
+ createdAt: Date.now(),
445
+ updatedAt: Date.now(),
446
+ meta: {},
447
+ },
448
+ ];
449
+
450
+ const result = await processor.process(createContext(messages));
451
+
452
+ const content = result.messages[0].content as any[];
453
+ expect(content).toHaveLength(3); // text + 2 videos
454
+ expect(content[0].type).toBe('text');
455
+ expect(content[0].text).toBe('Hello');
456
+ expect(content[1].type).toBe('video_url');
457
+ expect(content[1].video_url.url).toBe('http://example.com/video.mp4');
458
+ expect(content[2].type).toBe('video_url');
459
+ expect(content[2].video_url.url).toBe('http://example.com/video2.mp4');
460
+ });
461
+
462
+ it('should handle video disabled scenario correctly', async () => {
463
+ mockIsCanUseVideo.mockReturnValue(false);
464
+
465
+ const processor = new MessageContentProcessor({
466
+ model: 'text-model',
467
+ provider: 'openai',
468
+ isCanUseVideo: mockIsCanUseVideo,
469
+ fileContext: { enabled: false },
470
+ });
471
+
472
+ const messages: ChatMessage[] = [
473
+ {
474
+ id: 'test',
475
+ role: 'user',
476
+ content: 'Analyze this video',
477
+ videoList: [
478
+ { url: 'http://example.com/video.mp4', alt: 'test video', id: 'test' },
479
+ ] as ChatVideoItem[],
480
+ createdAt: Date.now(),
481
+ updatedAt: Date.now(),
482
+ meta: {},
483
+ },
484
+ ];
485
+
486
+ const result = await processor.process(createContext(messages));
487
+
488
+ // Should return plain text only when video not supported
489
+ expect(result.messages[0].content).toBe('Analyze this video');
490
+ });
491
+
492
+ it('should include videos in file context when enabled', async () => {
493
+ mockIsCanUseVideo.mockReturnValue(false); // Video processing disabled but file context enabled
494
+
495
+ const processor = new MessageContentProcessor({
496
+ model: 'gpt-4',
497
+ provider: 'openai',
498
+ isCanUseVideo: mockIsCanUseVideo,
499
+ fileContext: { enabled: true, includeFileUrl: true },
500
+ });
501
+
502
+ const messages: ChatMessage[] = [
503
+ {
504
+ id: 'test',
505
+ role: 'user',
506
+ content: 'Hello',
507
+ videoList: [
508
+ {
509
+ id: 'video1',
510
+ url: 'http://example.com/video.mp4',
511
+ alt: 'Test video',
512
+ },
513
+ ] as ChatVideoItem[],
514
+ createdAt: Date.now(),
515
+ updatedAt: Date.now(),
516
+ meta: {},
517
+ },
518
+ ];
519
+
520
+ const result = await processor.process(createContext(messages));
521
+
522
+ // Should return structured content when has videos and file context enabled
523
+ expect(Array.isArray(result.messages[0].content)).toBe(true);
524
+ const content = result.messages[0].content as any[];
525
+ expect(content).toHaveLength(1);
526
+ expect(content[0].type).toBe('text');
527
+ expect(content[0].text).toContain('SYSTEM CONTEXT');
528
+ expect(content[0].text).toContain('Hello');
529
+ });
530
+
531
+ it('should handle mixed images and videos correctly', async () => {
532
+ mockIsCanUseVision.mockReturnValue(true);
533
+ mockIsCanUseVideo.mockReturnValue(true);
534
+
535
+ const processor = new MessageContentProcessor({
536
+ model: 'gpt-4-vision',
537
+ provider: 'openai',
538
+ isCanUseVideo: mockIsCanUseVideo,
539
+ isCanUseVision: mockIsCanUseVision,
540
+ fileContext: { enabled: false },
541
+ });
542
+
543
+ const messages: ChatMessage[] = [
544
+ {
545
+ id: 'test',
546
+ role: 'user',
547
+ content: 'Analyze these media files',
548
+ imageList: [
549
+ { url: 'http://example.com/image.jpg', alt: 'test image', id: 'img1' },
550
+ ] as ChatImageItem[],
551
+ videoList: [
552
+ { url: 'http://example.com/video.mp4', alt: 'test video', id: 'vid1' },
553
+ ] as ChatVideoItem[],
554
+ createdAt: Date.now(),
555
+ updatedAt: Date.now(),
556
+ meta: {},
557
+ },
558
+ ];
559
+
560
+ const result = await processor.process(createContext(messages));
561
+
562
+ const content = result.messages[0].content as any[];
563
+ expect(content).toHaveLength(3); // text + image + video
564
+ expect(content[0].type).toBe('text');
565
+ expect(content[0].text).toBe('Analyze these media files');
566
+ expect(content[1].type).toBe('image_url');
567
+ expect(content[1].image_url.url).toBe('http://example.com/image.jpg');
568
+ expect(content[2].type).toBe('video_url');
569
+ expect(content[2].video_url.url).toBe('http://example.com/video.mp4');
570
+ });
571
+ });
394
572
  });
@@ -5,6 +5,7 @@ import {
5
5
  ChatTTS,
6
6
  ChatToolPayload,
7
7
  ChatTranslate,
8
+ ChatVideoItem,
8
9
  CreateMessageParams,
9
10
  MessageItem,
10
11
  ModelRankItem,
@@ -175,7 +176,10 @@ export class MessageModel {
175
176
  }
176
177
 
177
178
  const imageList = relatedFileList.filter((i) => (i.fileType || '').startsWith('image'));
178
- const fileList = relatedFileList.filter((i) => !(i.fileType || '').startsWith('image'));
179
+ const videoList = relatedFileList.filter((i) => (i.fileType || '').startsWith('video'));
180
+ const fileList = relatedFileList.filter(
181
+ (i) => !(i.fileType || '').startsWith('image') && !(i.fileType || '').startsWith('video'),
182
+ );
179
183
 
180
184
  // 3. get relative file chunks
181
185
  const chunksList = await this.db
@@ -251,6 +255,10 @@ export class MessageModel {
251
255
  ragQuery: messageQuery?.rewriteQuery,
252
256
  ragQueryId: messageQuery?.id,
253
257
  ragRawQuery: messageQuery?.userQuery,
258
+ videoList: videoList
259
+ .filter((relation) => relation.messageId === item.id)
260
+ // eslint-disable-next-line @typescript-eslint/no-unused-vars
261
+ .map<ChatVideoItem>(({ id, url, name }) => ({ alt: name!, id, url })),
254
262
  } as unknown as ChatMessage;
255
263
  },
256
264
  );
@@ -12,16 +12,9 @@ const akashChatModels: AIChatModelCard[] = [
12
12
  displayName: 'DeepSeek V3.1',
13
13
  enabled: true,
14
14
  id: 'DeepSeek-V3-1',
15
- type: 'chat',
16
- },
17
- {
18
- abilities: {
19
- functionCall: true,
20
- reasoning: true,
15
+ settings: {
16
+ extendParams: ['enableReasoning'],
21
17
  },
22
- contextWindowTokens: 65_536,
23
- displayName: 'DeepSeek R1 Distill Qwen 32B',
24
- id: 'DeepSeek-R1-Distill-Qwen-32B',
25
18
  type: 'chat',
26
19
  },
27
20
  {
@@ -34,12 +27,12 @@ const akashChatModels: AIChatModelCard[] = [
34
27
  displayName: 'GPT-OSS-120B',
35
28
  enabled: true,
36
29
  id: 'gpt-oss-120b',
30
+ settings: {
31
+ extendParams: ['reasoningEffort'],
32
+ },
37
33
  type: 'chat',
38
34
  },
39
35
  {
40
- abilities: {
41
- functionCall: true,
42
- },
43
36
  contextWindowTokens: 262_144,
44
37
  description:
45
38
  'Qwen3 235B A22B Instruct 2507:面向高级推理与对话指令优化的模型,混合专家架构以在大规模参数下保持推理效率。',
@@ -49,7 +42,15 @@ const akashChatModels: AIChatModelCard[] = [
49
42
  },
50
43
  {
51
44
  abilities: {
52
- functionCall: true,
45
+ reasoning: true,
46
+ },
47
+ contextWindowTokens: 65_536,
48
+ displayName: 'DeepSeek R1 Distill Qwen 32B',
49
+ id: 'DeepSeek-R1-Distill-Qwen-32B',
50
+ type: 'chat',
51
+ },
52
+ {
53
+ abilities: {
53
54
  vision: true,
54
55
  },
55
56
  contextWindowTokens: 131_072,
@@ -7,6 +7,7 @@ const googleChatModels: AIChatModelCard[] = [
7
7
  functionCall: true,
8
8
  reasoning: true,
9
9
  search: true,
10
+ video: true,
10
11
  vision: true,
11
12
  },
12
13
  contextWindowTokens: 1_048_576 + 65_536,
@@ -60,6 +61,7 @@ const googleChatModels: AIChatModelCard[] = [
60
61
  functionCall: true,
61
62
  reasoning: true,
62
63
  search: true,
64
+ video: true,
63
65
  vision: true,
64
66
  },
65
67
  contextWindowTokens: 1_048_576 + 65_536,
@@ -112,6 +114,7 @@ const googleChatModels: AIChatModelCard[] = [
112
114
  functionCall: true,
113
115
  reasoning: true,
114
116
  search: true,
117
+ video: true,
115
118
  vision: true,
116
119
  },
117
120
  contextWindowTokens: 1_048_576 + 65_536,
@@ -163,6 +166,7 @@ const googleChatModels: AIChatModelCard[] = [
163
166
  functionCall: true,
164
167
  reasoning: true,
165
168
  search: true,
169
+ video: true,
166
170
  vision: true,
167
171
  },
168
172
  contextWindowTokens: 1_048_576 + 65_536,
@@ -191,6 +195,7 @@ const googleChatModels: AIChatModelCard[] = [
191
195
  functionCall: true,
192
196
  reasoning: true,
193
197
  search: true,
198
+ video: true,
194
199
  vision: true,
195
200
  },
196
201
  contextWindowTokens: 1_048_576 + 65_536,
@@ -240,6 +245,7 @@ const googleChatModels: AIChatModelCard[] = [
240
245
  functionCall: true,
241
246
  reasoning: true,
242
247
  search: true,
248
+ video: true,
243
249
  vision: true,
244
250
  },
245
251
  contextWindowTokens: 1_048_576 + 65_536,
@@ -267,6 +273,7 @@ const googleChatModels: AIChatModelCard[] = [
267
273
  functionCall: true,
268
274
  reasoning: true,
269
275
  search: true,
276
+ video: true,
270
277
  vision: true,
271
278
  },
272
279
  contextWindowTokens: 1_048_576 + 65_536,
@@ -1,7 +1,7 @@
1
1
  import { AIChatModelCard } from '../types/aiModel';
2
2
 
3
3
  // https://docs.mistral.ai/getting-started/models/models_overview/
4
- // https://mistral.ai/products/la-plateforme#pricing
4
+ // https://mistral.ai/pricing#api-pricing
5
5
 
6
6
  const mistralChatModels: AIChatModelCard[] = [
7
7
  {
@@ -10,7 +10,7 @@ const mistralChatModels: AIChatModelCard[] = [
10
10
  },
11
11
  contextWindowTokens: 128_000,
12
12
  description: 'Mistral Medium 3 以 8 倍的成本提供最先进的性能,并从根本上简化了企业部署。',
13
- displayName: 'Mistral Medium 3',
13
+ displayName: 'Mistral Medium 3.1',
14
14
  enabled: true,
15
15
  id: 'mistral-medium-latest',
16
16
  pricing: {
@@ -21,6 +21,41 @@ const mistralChatModels: AIChatModelCard[] = [
21
21
  },
22
22
  type: 'chat',
23
23
  },
24
+ {
25
+ abilities: {
26
+ reasoning: true,
27
+ vision: true,
28
+ },
29
+ contextWindowTokens: 128_000,
30
+ description: 'Magistral Medium 1.2 是Mistral AI于2025年9月发布的前沿级推理模型,具有视觉支持。',
31
+ displayName: 'Magistral Medium 1.2',
32
+ enabled: true,
33
+ id: 'magistral-medium-latest',
34
+ pricing: {
35
+ units: [
36
+ { name: 'textInput', rate: 2, strategy: 'fixed', unit: 'millionTokens' },
37
+ { name: 'textOutput', rate: 5, strategy: 'fixed', unit: 'millionTokens' },
38
+ ],
39
+ },
40
+ type: 'chat',
41
+ },
42
+ {
43
+ abilities: {
44
+ reasoning: true,
45
+ vision: true,
46
+ },
47
+ contextWindowTokens: 128_000,
48
+ description: 'Magistral Small 1.2 是Mistral AI于2025年9月发布的开源小型推理模型,具有视觉支持。',
49
+ displayName: 'Magistral Small 1.2',
50
+ id: 'magistral-small-2509',
51
+ pricing: {
52
+ units: [
53
+ { name: 'textInput', rate: 0.5, strategy: 'fixed', unit: 'millionTokens' },
54
+ { name: 'textOutput', rate: 1.5, strategy: 'fixed', unit: 'millionTokens' },
55
+ ],
56
+ },
57
+ type: 'chat',
58
+ },
24
59
  {
25
60
  abilities: {
26
61
  functionCall: true,
@@ -46,7 +81,6 @@ const mistralChatModels: AIChatModelCard[] = [
46
81
  contextWindowTokens: 128_000,
47
82
  description: 'Mistral Small是成本效益高、快速且可靠的选项,适用于翻译、摘要和情感分析等用例。',
48
83
  displayName: 'Mistral Small 3.2',
49
- enabled: true,
50
84
  id: 'mistral-small-latest',
51
85
  pricing: {
52
86
  units: [
@@ -65,7 +99,6 @@ const mistralChatModels: AIChatModelCard[] = [
65
99
  description:
66
100
  'Mistral Large是旗舰大模型,擅长多语言任务、复杂推理和代码生成,是高端应用的理想选择。',
67
101
  displayName: 'Mistral Large 2.1',
68
- enabled: true,
69
102
  id: 'mistral-large-latest',
70
103
  pricing: {
71
104
  units: [
@@ -75,23 +108,6 @@ const mistralChatModels: AIChatModelCard[] = [
75
108
  },
76
109
  type: 'chat',
77
110
  },
78
- {
79
- abilities: {
80
- reasoning: true,
81
- },
82
- contextWindowTokens: 128_000,
83
- description: 'Magistral Medium 1.1 是 Mistral AI 于2025年7月发布的前沿级推理模型。',
84
- displayName: 'Magistral Medium 1.1',
85
- enabled: true,
86
- id: 'magistral-medium-latest',
87
- pricing: {
88
- units: [
89
- { name: 'textInput', rate: 2, strategy: 'fixed', unit: 'millionTokens' },
90
- { name: 'textOutput', rate: 5, strategy: 'fixed', unit: 'millionTokens' },
91
- ],
92
- },
93
- type: 'chat',
94
- },
95
111
  {
96
112
  abilities: {
97
113
  functionCall: true,
@@ -119,7 +135,6 @@ const mistralChatModels: AIChatModelCard[] = [
119
135
  description:
120
136
  'Pixtral Large 是一款拥有 1240 亿参数的开源多模态模型,基于 Mistral Large 2 构建。这是我们多模态家族中的第二款模型,展现了前沿水平的图像理解能力。',
121
137
  displayName: 'Pixtral Large',
122
- enabled: true,
123
138
  id: 'pixtral-large-latest',
124
139
  pricing: {
125
140
  units: [
@@ -2,6 +2,10 @@ import { createOpenAICompatibleRuntime } from '../../core/openaiCompatibleFactor
2
2
  import { ModelProvider } from '../../types';
3
3
  import { processMultiProviderModelList } from '../../utils/modelParse';
4
4
 
5
+ const THINKING_MODELS = [
6
+ 'DeepSeek-V3-1',
7
+ ];
8
+
5
9
  export interface AkashChatModelCard {
6
10
  id: string;
7
11
  }
@@ -10,12 +14,22 @@ export const LobeAkashChatAI = createOpenAICompatibleRuntime({
10
14
  baseURL: 'https://chatapi.akash.network/api/v1',
11
15
  chatCompletion: {
12
16
  handlePayload: (payload) => {
13
- const { model, ...rest } = payload;
17
+ const { model, thinking, ...rest } = payload;
18
+
19
+ const thinkingFlag =
20
+ thinking?.type === 'enabled' ? true : thinking?.type === 'disabled' ? false : undefined;
14
21
 
15
22
  return {
16
23
  ...rest,
24
+ allowed_openai_params: ['reasoning_effort'],
25
+ cache: { 'no-cache': true },
17
26
  model,
18
27
  stream: true,
28
+ ...(THINKING_MODELS.some((keyword) => model.includes(keyword))
29
+ ? {
30
+ chat_template_kwargs: { thinking: thinkingFlag },
31
+ }
32
+ : {}),
19
33
  } as any;
20
34
  },
21
35
  },