@lobehub/chat 1.65.0 → 1.65.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (33) hide show
  1. package/CHANGELOG.md +51 -0
  2. package/changelog/v1.json +18 -0
  3. package/docker-compose/local/docker-compose.yml +14 -0
  4. package/docker-compose/local/searxng-settings.yml +2582 -0
  5. package/docker-compose/setup.sh +3 -1
  6. package/docs/self-hosting/advanced/model-list.mdx +4 -2
  7. package/docs/self-hosting/advanced/model-list.zh-CN.mdx +4 -2
  8. package/package.json +7 -7
  9. package/src/app/(backend)/middleware/auth/index.ts +6 -0
  10. package/src/config/aiModels/google.ts +3 -3
  11. package/src/config/aiModels/groq.ts +10 -0
  12. package/src/config/aiModels/qwen.ts +43 -26
  13. package/src/const/message.ts +3 -0
  14. package/src/features/Conversation/Messages/Assistant/Tool/Render/CustomRender.tsx +7 -7
  15. package/src/features/MobileSwitchLoading/index.tsx +0 -1
  16. package/src/libs/agent-runtime/google/index.test.ts +8 -0
  17. package/src/libs/agent-runtime/google/index.ts +18 -5
  18. package/src/libs/agent-runtime/types/chat.ts +9 -1
  19. package/src/libs/agent-runtime/utils/anthropicHelpers.test.ts +113 -0
  20. package/src/libs/agent-runtime/utils/anthropicHelpers.ts +7 -4
  21. package/src/libs/agent-runtime/utils/streams/anthropic.test.ts +339 -94
  22. package/src/libs/agent-runtime/utils/streams/anthropic.ts +54 -34
  23. package/src/libs/agent-runtime/utils/streams/openai.test.ts +181 -0
  24. package/src/libs/agent-runtime/utils/streams/openai.ts +40 -30
  25. package/src/libs/agent-runtime/utils/streams/protocol.ts +4 -0
  26. package/src/services/__tests__/chat.test.ts +89 -50
  27. package/src/services/chat.ts +13 -1
  28. package/src/store/chat/slices/aiChat/actions/generateAIChat.ts +1 -1
  29. package/src/types/message/base.ts +1 -0
  30. package/src/utils/fetch/__tests__/fetchSSE.test.ts +113 -10
  31. package/src/utils/fetch/fetchSSE.ts +12 -3
  32. package/src/utils/parseModels.test.ts +14 -0
  33. package/src/utils/parseModels.ts +4 -0
@@ -394,6 +394,7 @@ SUB_DIR="docker-compose/local"
394
394
  FILES=(
395
395
  "$SUB_DIR/docker-compose.yml"
396
396
  "$SUB_DIR/init_data.json"
397
+ "$SUB_DIR/searxng-settings.yml"
397
398
  )
398
399
  ENV_EXAMPLES=(
399
400
  "$SUB_DIR/.env.zh-CN.example"
@@ -434,6 +435,7 @@ section_download_files(){
434
435
 
435
436
  download_file "$SOURCE_URL/${FILES[0]}" "docker-compose.yml"
436
437
  download_file "$SOURCE_URL/${FILES[1]}" "init_data.json"
438
+ download_file "$SOURCE_URL/${FILES[2]}" "searxng-settings.yml"
437
439
 
438
440
  # Download .env.example with the specified language
439
441
  if [ "$LANGUAGE" = "zh_CN" ]; then
@@ -657,4 +659,4 @@ section_display_configurated_report() {
657
659
  printf "\n%s" "$(show_message "tips_show_documentation")"
658
660
  printf "%s\n" $(show_message "tips_show_documentation_url")
659
661
  }
660
- section_display_configurated_report
662
+ section_display_configurated_report
@@ -17,7 +17,7 @@ LobeChat supports customizing the model list during deployment. This configurati
17
17
  You can use `+` to add a model, `-` to hide a model, and use `model name=display name<extension configuration>` to customize the display name of a model, separated by English commas. The basic syntax is as follows:
18
18
 
19
19
  ```text
20
- id=displayName<maxToken:vision:reasoning:fc:file>,model2,model3
20
+ id=displayName<maxToken:vision:reasoning:search:fc:file>,model2,model3
21
21
  ```
22
22
 
23
23
  For example: `+qwen-7b-chat,+glm-6b,-gpt-3.5-turbo,gpt-4-0125-preview=gpt-4-turbo`
@@ -29,7 +29,7 @@ In the above example, it adds `qwen-7b-chat` and `glm-6b` to the model list, rem
29
29
  Considering the diversity of model capabilities, we started to add extension configuration in version `0.147.8`, with the following rules:
30
30
 
31
31
  ```shell
32
- id=displayName<maxToken:vision:reasoning:fc:file>
32
+ id=displayName<maxToken:vision:reasoning:search:fc:file>
33
33
  ```
34
34
 
35
35
  The first value in angle brackets is designated as the `maxToken` for this model. The second value and beyond are the model's extension capabilities, separated by colons `:`, and the order is not important.
@@ -40,6 +40,7 @@ Examples are as follows:
40
40
  - `spark-v3.5=讯飞星火 v3.5<8192:fc>`: Xunfei Spark 3.5 model, maximum context of 8k, supports Function Call;
41
41
  - `gemini-1.5-flash-latest=Gemini 1.5 Flash<16000:vision>`: Google Vision model, maximum context of 16k, supports image recognition;
42
42
  - `o3-mini=OpenAI o3-mini<200000:reasoning:fc>`: OpenAI o3-mini model, maximum context of 200k, supports reasoning and Function Call;
43
+ - `qwen-max-latest=Qwen Max<32768:search:fc>`: Qwen 2.5 Max model, maximum context of 32k, supports web search and Function Call;
43
44
  - `gpt-4-all=ChatGPT Plus<128000:fc:vision:file>`, hacked version of ChatGPT Plus web, context of 128k, supports image recognition, Function Call, file upload.
44
45
 
45
46
  Currently supported extension capabilities are:
@@ -49,4 +50,5 @@ Currently supported extension capabilities are:
49
50
  | `fc` | Function Calling |
50
51
  | `vision` | Image Recognition |
51
52
  | `reasoning` | Support Reasoning |
53
+ | `search` | Support Web Search |
52
54
  | `file` | File Upload (a bit hacky, not recommended for daily use) |
@@ -16,7 +16,7 @@ LobeChat 支持在部署时自定义模型列表,详情请参考 [模型提供
16
16
  你可以使用 `+` 增加一个模型,使用 `-` 来隐藏一个模型,使用 `模型名=展示名<扩展配置>` 来自定义模型的展示名,用英文逗号隔开。通过 `<>` 来添加扩展配置。基本语法如下:
17
17
 
18
18
  ```text
19
- id=displayName<maxToken:vision:reasoning:fc:file>,model2,model3
19
+ id=displayName<maxToken:vision:reasoning:search:fc:file>,model2,model3
20
20
  ```
21
21
 
22
22
  例如: `+qwen-7b-chat,+glm-6b,-gpt-3.5-turbo,gpt-4-0125-preview=gpt-4-turbo`
@@ -28,7 +28,7 @@ id=displayName<maxToken:vision:reasoning:fc:file>,model2,model3
28
28
  考虑到模型的能力多样性,我们在 `0.147.8` 版本开始增加扩展性配置,它的规则如下:
29
29
 
30
30
  ```shell
31
- id=displayName<maxToken:vision:reasoning:fc:file>
31
+ id=displayName<maxToken:vision:reasoning:search:fc:file>
32
32
  ```
33
33
 
34
34
  尖括号第一个值约定为这个模型的 `maxToken` 。第二个及以后作为模型的扩展能力,能力与能力之间用冒号 `:` 作为分隔符,顺序不重要。
@@ -39,6 +39,7 @@ id=displayName<maxToken:vision:reasoning:fc:file>
39
39
  - `spark-v3.5=讯飞星火 v3.5<8192:fc>`:讯飞星火 3.5 模型,最大上下文 8k,支持 Function Call;
40
40
  - `gemini-1.5-flash-latest=Gemini 1.5 Flash<16000:vision>`:Google 视觉模型,最大上下文 16k,支持图像识别;
41
41
  - `o3-mini=OpenAI o3-mini<200000:reasoning:fc>`:OpenAI o3-mini 模型,最大上下文 200k,支持推理及 Function Call;
42
+ - `qwen-max-latest=Qwen Max<32768:search:fc>`:通义千问 2.5 Max 模型,最大上下文 32k,支持联网搜索及 Function Call;
42
43
  - `gpt-4-all=ChatGPT Plus<128000:fc:vision:file>`,hack 的 ChatGPT Plus 网页版,上下 128k ,支持图像识别、Function Call、文件上传
43
44
 
44
45
  目前支持的扩展能力有:
@@ -48,4 +49,5 @@ id=displayName<maxToken:vision:reasoning:fc:file>
48
49
  | `fc` | 函数调用(function calling) |
49
50
  | `vision` | 视觉识别 |
50
51
  | `reasoning` | 支持推理 |
52
+ | `search` | 支持联网搜索 |
51
53
  | `file` | 文件上传(比较 hack,不建议日常使用) |
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@lobehub/chat",
3
- "version": "1.65.0",
3
+ "version": "1.65.2",
4
4
  "description": "Lobe Chat - an open-source, high-performance chatbot framework that supports speech synthesis, multimodal, and extensible Function Call plugin system. Supports one-click free deployment of your private ChatGPT/LLM web application.",
5
5
  "keywords": [
6
6
  "framework",
@@ -131,7 +131,7 @@
131
131
  "@lobehub/tts": "^1.28.0",
132
132
  "@lobehub/ui": "^1.165.2",
133
133
  "@neondatabase/serverless": "^0.10.4",
134
- "@next/third-parties": "^15.1.4",
134
+ "@next/third-parties": "^15.2.0",
135
135
  "@react-spring/web": "^9.7.5",
136
136
  "@sentry/nextjs": "^7.120.2",
137
137
  "@serwist/next": "^9.0.11",
@@ -179,7 +179,7 @@
179
179
  "mdast-util-to-markdown": "^2.1.2",
180
180
  "modern-screenshot": "^4.5.5",
181
181
  "nanoid": "^5.0.9",
182
- "next": "^15.1.4",
182
+ "next": "^15.2.0",
183
183
  "next-auth": "beta",
184
184
  "next-mdx-remote": "^5.0.0",
185
185
  "nextjs-toploader": "^3.7.15",
@@ -250,8 +250,8 @@
250
250
  "@lobehub/i18n-cli": "^1.20.3",
251
251
  "@lobehub/lint": "^1.25.5",
252
252
  "@lobehub/seo-cli": "^1.4.3",
253
- "@next/bundle-analyzer": "^15.1.4",
254
- "@next/eslint-plugin-next": "^15.1.4",
253
+ "@next/bundle-analyzer": "^15.2.0",
254
+ "@next/eslint-plugin-next": "^15.2.0",
255
255
  "@peculiar/webcrypto": "^1.5.0",
256
256
  "@semantic-release/exec": "^6.0.3",
257
257
  "@testing-library/jest-dom": "^6.6.3",
@@ -269,8 +269,8 @@
269
269
  "@types/node": "^22.10.5",
270
270
  "@types/numeral": "^2.0.5",
271
271
  "@types/pg": "^8.11.10",
272
- "@types/react": "^19.0.3",
273
- "@types/react-dom": "^19.0.2",
272
+ "@types/react": "^19.0.10",
273
+ "@types/react-dom": "^19.0.4",
274
274
  "@types/rtl-detect": "^1.0.3",
275
275
  "@types/semver": "^7.5.8",
276
276
  "@types/systemjs": "^6.15.1",
@@ -23,6 +23,12 @@ export type RequestHandler = (
23
23
 
24
24
  export const checkAuth =
25
25
  (handler: RequestHandler) => async (req: Request, options: RequestOptions) => {
26
+ // we have a special header to debug the api endpoint in development mode
27
+ const isDebugApi = req.headers.get('lobe-auth-dev-backend-api') === '1';
28
+ if (process.env.NODE_ENV === 'development' && isDebugApi) {
29
+ return handler(req, { ...options, jwtPayload: { userId: 'DEV_USER' } });
30
+ }
31
+
26
32
  let jwtPayload: JWTPayload;
27
33
 
28
34
  try {
@@ -80,9 +80,9 @@ const googleChatModels: AIChatModelCard[] = [
80
80
  vision: true,
81
81
  },
82
82
  contextWindowTokens: 1_048_576 + 8192,
83
- description: '一个 Gemini 2.0 Flash 模型,针对成本效益和低延迟等目标进行了优化。',
84
- displayName: 'Gemini 2.0 Flash-Lite Preview 02-05',
85
- id: 'gemini-2.0-flash-lite-preview-02-05',
83
+ description: 'Gemini 2.0 Flash 模型变体,针对成本效益和低延迟等目标进行了优化。',
84
+ displayName: 'Gemini 2.0 Flash-Lite 001',
85
+ id: 'gemini-2.0-flash-lite-001',
86
86
  maxOutput: 8192,
87
87
  pricing: {
88
88
  cachedInput: 0.018_75,
@@ -176,6 +176,16 @@ const groqChatModels: AIChatModelCard[] = [
176
176
  },
177
177
  type: 'chat',
178
178
  },
179
+ {
180
+ contextWindowTokens: 32_768,
181
+ displayName: 'Mixtral Saba 24B',
182
+ id: 'mistral-saba-24b',
183
+ pricing: {
184
+ input: 0.79,
185
+ output: 0.79,
186
+ },
187
+ type: 'chat',
188
+ },
179
189
  {
180
190
  abilities: {
181
191
  functionCall: true,
@@ -62,8 +62,8 @@ const qwenChatModels: AIChatModelCard[] = [
62
62
  organization: 'Qwen',
63
63
  pricing: {
64
64
  currency: 'CNY',
65
- input: 20,
66
- output: 60,
65
+ input: 2.4,
66
+ output: 9.6,
67
67
  },
68
68
  settings: {
69
69
  searchImpl: 'params',
@@ -410,6 +410,23 @@ const qwenChatModels: AIChatModelCard[] = [
410
410
  },
411
411
  type: 'chat',
412
412
  },
413
+ {
414
+ abilities: {
415
+ vision: true,
416
+ },
417
+ contextWindowTokens: 32_768,
418
+ description: 'Qwen-Omni 系列模型支持输入多种模态的数据,包括视频、音频、图片、文本,并输出音频与文本。',
419
+ displayName: 'Qwen Omni Turbo',
420
+ id: 'qwen-omni-turbo-latest',
421
+ maxOutput: 2048,
422
+ organization: 'Qwen',
423
+ pricing: {
424
+ currency: 'CNY',
425
+ input: 0,
426
+ output: 0,
427
+ },
428
+ type: 'chat',
429
+ },
413
430
  {
414
431
  abilities: {
415
432
  vision: true,
@@ -452,7 +469,7 @@ const qwenChatModels: AIChatModelCard[] = [
452
469
  abilities: {
453
470
  reasoning: true,
454
471
  },
455
- contextWindowTokens: 131_072,
472
+ contextWindowTokens: 65_792,
456
473
  description:
457
474
  'DeepSeek-R1 在后训练阶段大规模使用了强化学习技术,在仅有极少标注数据的情况下,极大提升了模型推理能力。在数学、代码、自然语言推理等任务上,性能较高,能力较强。',
458
475
  displayName: 'DeepSeek R1',
@@ -462,8 +479,8 @@ const qwenChatModels: AIChatModelCard[] = [
462
479
  organization: 'DeepSeek',
463
480
  pricing: {
464
481
  currency: 'CNY',
465
- input: 0,
466
- output: 0,
482
+ input: 4,
483
+ output: 16,
467
484
  },
468
485
  releasedAt: '2025-01-27',
469
486
  type: 'chat',
@@ -472,7 +489,7 @@ const qwenChatModels: AIChatModelCard[] = [
472
489
  abilities: {
473
490
  functionCall: true,
474
491
  },
475
- contextWindowTokens: 131_072,
492
+ contextWindowTokens: 65_792,
476
493
  description:
477
494
  'DeepSeek-V3 为自研 MoE 模型,671B 参数,激活 37B,在 14.8T token 上进行了预训练,在长文本、代码、数学、百科、中文能力上表现优秀。',
478
495
  displayName: 'DeepSeek V3',
@@ -482,8 +499,8 @@ const qwenChatModels: AIChatModelCard[] = [
482
499
  organization: 'DeepSeek',
483
500
  pricing: {
484
501
  currency: 'CNY',
485
- input: 0,
486
- output: 0,
502
+ input: 2,
503
+ output: 8,
487
504
  },
488
505
  releasedAt: '2025-01-27',
489
506
  type: 'chat',
@@ -492,12 +509,12 @@ const qwenChatModels: AIChatModelCard[] = [
492
509
  abilities: {
493
510
  reasoning: true,
494
511
  },
495
- contextWindowTokens: 131_072,
512
+ contextWindowTokens: 32_768,
496
513
  description:
497
514
  'DeepSeek-R1-Distill-Qwen-1.5B 是一个基于 Qwen2.5-Math-1.5B 的蒸馏大型语言模型,使用了 DeepSeek R1 的输出。',
498
515
  displayName: 'DeepSeek R1 Distill Qwen 1.5B',
499
516
  id: 'deepseek-r1-distill-qwen-1.5b',
500
- maxOutput: 8192,
517
+ maxOutput: 16_384,
501
518
  organization: 'DeepSeek',
502
519
  pricing: {
503
520
  currency: 'CNY',
@@ -510,17 +527,17 @@ const qwenChatModels: AIChatModelCard[] = [
510
527
  abilities: {
511
528
  reasoning: true,
512
529
  },
513
- contextWindowTokens: 131_072,
530
+ contextWindowTokens: 32_768,
514
531
  description:
515
532
  'DeepSeek-R1-Distill-Qwen-7B 是一个基于 Qwen2.5-Math-7B 的蒸馏大型语言模型,使用了 DeepSeek R1 的输出。',
516
533
  displayName: 'DeepSeek R1 Distill Qwen 7B',
517
534
  id: 'deepseek-r1-distill-qwen-7b',
518
- maxOutput: 8192,
535
+ maxOutput: 16_384,
519
536
  organization: 'DeepSeek',
520
537
  pricing: {
521
538
  currency: 'CNY',
522
- input: 0,
523
- output: 0,
539
+ input: 0.5,
540
+ output: 1,
524
541
  },
525
542
  type: 'chat',
526
543
  },
@@ -528,17 +545,17 @@ const qwenChatModels: AIChatModelCard[] = [
528
545
  abilities: {
529
546
  reasoning: true,
530
547
  },
531
- contextWindowTokens: 131_072,
548
+ contextWindowTokens: 32_768,
532
549
  description:
533
550
  'DeepSeek-R1-Distill-Qwen-14B 是一个基于 Qwen2.5-14B 的蒸馏大型语言模型,使用了 DeepSeek R1 的输出。',
534
551
  displayName: 'DeepSeek R1 Distill Qwen 14B',
535
552
  id: 'deepseek-r1-distill-qwen-14b',
536
- maxOutput: 8192,
553
+ maxOutput: 16_384,
537
554
  organization: 'DeepSeek',
538
555
  pricing: {
539
556
  currency: 'CNY',
540
- input: 0,
541
- output: 0,
557
+ input: 1,
558
+ output: 3,
542
559
  },
543
560
  type: 'chat',
544
561
  },
@@ -546,17 +563,17 @@ const qwenChatModels: AIChatModelCard[] = [
546
563
  abilities: {
547
564
  reasoning: true,
548
565
  },
549
- contextWindowTokens: 131_072,
566
+ contextWindowTokens: 32_768,
550
567
  description:
551
568
  'DeepSeek-R1-Distill-Qwen-32B 是一个基于 Qwen2.5-32B 的蒸馏大型语言模型,使用了 DeepSeek R1 的输出。',
552
569
  displayName: 'DeepSeek R1 Distill Qwen 32B',
553
570
  id: 'deepseek-r1-distill-qwen-32b',
554
- maxOutput: 8192,
571
+ maxOutput: 16_384,
555
572
  organization: 'DeepSeek',
556
573
  pricing: {
557
574
  currency: 'CNY',
558
- input: 0,
559
- output: 0,
575
+ input: 2,
576
+ output: 6,
560
577
  },
561
578
  type: 'chat',
562
579
  },
@@ -564,12 +581,12 @@ const qwenChatModels: AIChatModelCard[] = [
564
581
  abilities: {
565
582
  reasoning: true,
566
583
  },
567
- contextWindowTokens: 131_072,
584
+ contextWindowTokens: 32_768,
568
585
  description:
569
586
  'DeepSeek-R1-Distill-Llama-8B 是一个基于 Llama-3.1-8B 的蒸馏大型语言模型,使用了 DeepSeek R1 的输出。',
570
587
  displayName: 'DeepSeek R1 Distill Llama 8B',
571
588
  id: 'deepseek-r1-distill-llama-8b',
572
- maxOutput: 8192,
589
+ maxOutput: 16_384,
573
590
  organization: 'DeepSeek',
574
591
  pricing: {
575
592
  currency: 'CNY',
@@ -582,12 +599,12 @@ const qwenChatModels: AIChatModelCard[] = [
582
599
  abilities: {
583
600
  reasoning: true,
584
601
  },
585
- contextWindowTokens: 131_072,
602
+ contextWindowTokens: 32_768,
586
603
  description:
587
604
  'DeepSeek-R1-Distill-Llama-70B 是一个基于 Llama-3.3-70B-Instruct 的蒸馏大型语言模型,使用了 DeepSeek R1 的输出。',
588
605
  displayName: 'DeepSeek R1 Distill Llama 70B',
589
606
  id: 'deepseek-r1-distill-llama-70b',
590
- maxOutput: 8192,
607
+ maxOutput: 16_384,
591
608
  organization: 'DeepSeek',
592
609
  pricing: {
593
610
  currency: 'CNY',
@@ -7,3 +7,6 @@ export const MESSAGE_THREAD_DIVIDER_ID = '__THREAD_DIVIDER__';
7
7
  export const MESSAGE_WELCOME_GUIDE_ID = 'welcome';
8
8
 
9
9
  export const THREAD_DRAFT_ID = '__THREAD_DRAFT_ID__';
10
+
11
+
12
+ export const MESSAGE_FLAGGED_THINKING='FLAGGED_THINKING'
@@ -13,13 +13,13 @@ import { ChatMessage } from '@/types/message';
13
13
 
14
14
  import Arguments from './Arguments';
15
15
 
16
- const CustomRender = memo<
17
- ChatMessage & {
18
- requestArgs?: string;
19
- setShowPluginRender: (show: boolean) => void;
20
- showPluginRender: boolean;
21
- }
22
- >(
16
+ interface CustomRenderProps extends ChatMessage {
17
+ requestArgs?: string;
18
+ setShowPluginRender: (value: boolean) => void;
19
+ showPluginRender: boolean;
20
+ }
21
+
22
+ const CustomRender = memo<CustomRenderProps>(
23
23
  ({
24
24
  id,
25
25
  content,
@@ -7,7 +7,6 @@ import { Center, Flexbox } from 'react-layout-kit';
7
7
 
8
8
  import { ProductLogo } from '@/components/Branding';
9
9
 
10
- // @ts-expect-error
11
10
  const MobileSwitchLoading: DynamicOptions['loading'] = memo(() => {
12
11
  const { t } = useTranslation('common');
13
12
  return (
@@ -449,6 +449,14 @@ describe('LobeGoogleAI', () => {
449
449
  });
450
450
  expect(result).toEqual({ text: 'Hello' });
451
451
  });
452
+ it('should handle thinking type messages', async () => {
453
+ const result = await instance['convertContentToGooglePart']({
454
+ type: 'thinking',
455
+ thinking: 'Hello',
456
+ signature: 'abc',
457
+ });
458
+ expect(result).toEqual(undefined);
459
+ });
452
460
 
453
461
  it('should handle base64 type images', async () => {
454
462
  const base64Image =
@@ -208,11 +208,18 @@ export class LobeGoogleAI implements LobeRuntimeAI {
208
208
  system: system_message?.content,
209
209
  };
210
210
  }
211
- private convertContentToGooglePart = async (content: UserMessageContentPart): Promise<Part> => {
211
+ private convertContentToGooglePart = async (
212
+ content: UserMessageContentPart,
213
+ ): Promise<Part | undefined> => {
212
214
  switch (content.type) {
215
+ default: {
216
+ return undefined;
217
+ }
218
+
213
219
  case 'text': {
214
220
  return { text: content.text };
215
221
  }
222
+
216
223
  case 'image_url': {
217
224
  const { mimeType, base64, type } = parseDataUri(content.image_url.url);
218
225
 
@@ -261,11 +268,17 @@ export class LobeGoogleAI implements LobeRuntimeAI {
261
268
  };
262
269
  }
263
270
 
271
+ const getParts = async () => {
272
+ if (typeof content === 'string') return [{ text: content }];
273
+
274
+ const parts = await Promise.all(
275
+ content.map(async (c) => await this.convertContentToGooglePart(c)),
276
+ );
277
+ return parts.filter(Boolean) as Part[];
278
+ };
279
+
264
280
  return {
265
- parts:
266
- typeof content === 'string'
267
- ? [{ text: content }]
268
- : await Promise.all(content.map(async (c) => await this.convertContentToGooglePart(c))),
281
+ parts: await getParts(),
269
282
  role: message.role === 'assistant' ? 'model' : 'user',
270
283
  };
271
284
  };
@@ -2,6 +2,11 @@ import { MessageToolCall } from '@/types/message';
2
2
 
3
3
  export type LLMRoleType = 'user' | 'system' | 'assistant' | 'function' | 'tool';
4
4
 
5
+ interface UserMessageContentPartThinking {
6
+ signature: string;
7
+ thinking: string;
8
+ type: 'thinking';
9
+ }
5
10
  interface UserMessageContentPartText {
6
11
  text: string;
7
12
  type: 'text';
@@ -15,7 +20,10 @@ interface UserMessageContentPartImage {
15
20
  type: 'image_url';
16
21
  }
17
22
 
18
- export type UserMessageContentPart = UserMessageContentPartText | UserMessageContentPartImage;
23
+ export type UserMessageContentPart =
24
+ | UserMessageContentPartText
25
+ | UserMessageContentPartImage
26
+ | UserMessageContentPartThinking;
19
27
 
20
28
  export interface OpenAIChatMessage {
21
29
  /**
@@ -383,6 +383,119 @@ describe('anthropicHelpers', () => {
383
383
  { content: '继续', role: 'user' },
384
384
  ]);
385
385
  });
386
+
387
+ it('should correctly handle thinking content part', async () => {
388
+ const messages: OpenAIChatMessage[] = [
389
+ {
390
+ content: '告诉我杭州和北京的天气,先回答我好的',
391
+ role: 'user',
392
+ },
393
+ {
394
+ content: [
395
+ { thinking: '经过一番思考', type: 'thinking', signature: '123' },
396
+ {
397
+ type: 'text',
398
+ text: '好的,我会为您查询杭州和北京的天气信息。我现在就开始查询这两个城市的当前天气情况。',
399
+ },
400
+ ],
401
+ role: 'assistant',
402
+ tool_calls: [
403
+ {
404
+ function: {
405
+ arguments: '{"city": "\\u676d\\u5dde"}',
406
+ name: 'realtime-weather____fetchCurrentWeather',
407
+ },
408
+ id: 'toolu_018PNQkH8ChbjoJz4QBiFVod',
409
+ type: 'function',
410
+ },
411
+ {
412
+ function: {
413
+ arguments: '{"city": "\\u5317\\u4eac"}',
414
+ name: 'realtime-weather____fetchCurrentWeather',
415
+ },
416
+ id: 'toolu_018VQTQ6fwAEC3eppuEfMxPp',
417
+ type: 'function',
418
+ },
419
+ ],
420
+ },
421
+ {
422
+ content:
423
+ '[{"city":"杭州市","adcode":"330100","province":"浙江","reporttime":"2024-06-24 17:02:14","casts":[{"date":"2024-06-24","week":"1","dayweather":"小雨","nightweather":"中雨","daytemp":"26","nighttemp":"20","daywind":"西","nightwind":"西","daypower":"1-3","nightpower":"1-3","daytemp_float":"26.0","nighttemp_float":"20.0"},{"date":"2024-06-25","week":"2","dayweather":"大雨","nightweather":"中雨","daytemp":"23","nighttemp":"19","daywind":"东","nightwind":"东","daypower":"1-3","nightpower":"1-3","daytemp_float":"23.0","nighttemp_float":"19.0"},{"date":"2024-06-26","week":"3","dayweather":"中雨","nightweather":"中雨","daytemp":"24","nighttemp":"21","daywind":"东南","nightwind":"东南","daypower":"1-3","nightpower":"1-3","daytemp_float":"24.0","nighttemp_float":"21.0"},{"date":"2024-06-27","week":"4","dayweather":"中雨-大雨","nightweather":"中雨","daytemp":"24","nighttemp":"22","daywind":"南","nightwind":"南","daypower":"1-3","nightpower":"1-3","daytemp_float":"24.0","nighttemp_float":"22.0"}]}]',
424
+ name: 'realtime-weather____fetchCurrentWeather',
425
+ role: 'tool',
426
+ tool_call_id: 'toolu_018PNQkH8ChbjoJz4QBiFVod',
427
+ },
428
+ {
429
+ content:
430
+ '[{"city":"北京市","adcode":"110000","province":"北京","reporttime":"2024-06-24 17:03:11","casts":[{"date":"2024-06-24","week":"1","dayweather":"晴","nightweather":"晴","daytemp":"33","nighttemp":"20","daywind":"北","nightwind":"北","daypower":"1-3","nightpower":"1-3","daytemp_float":"33.0","nighttemp_float":"20.0"},{"date":"2024-06-25","week":"2","dayweather":"晴","nightweather":"晴","daytemp":"35","nighttemp":"21","daywind":"东南","nightwind":"东南","daypower":"1-3","nightpower":"1-3","daytemp_float":"35.0","nighttemp_float":"21.0"},{"date":"2024-06-26","week":"3","dayweather":"晴","nightweather":"晴","daytemp":"35","nighttemp":"23","daywind":"西南","nightwind":"西南","daypower":"1-3","nightpower":"1-3","daytemp_float":"35.0","nighttemp_float":"23.0"},{"date":"2024-06-27","week":"4","dayweather":"多云","nightweather":"多云","daytemp":"35","nighttemp":"23","daywind":"西南","nightwind":"西南","daypower":"1-3","nightpower":"1-3","daytemp_float":"35.0","nighttemp_float":"23.0"}]}]',
431
+ name: 'realtime-weather____fetchCurrentWeather',
432
+ role: 'tool',
433
+ tool_call_id: 'toolu_018VQTQ6fwAEC3eppuEfMxPp',
434
+ },
435
+ {
436
+ content: '继续',
437
+ role: 'user',
438
+ },
439
+ ];
440
+
441
+ const contents = await buildAnthropicMessages(messages);
442
+
443
+ expect(contents).toEqual([
444
+ { content: '告诉我杭州和北京的天气,先回答我好的', role: 'user' },
445
+ {
446
+ content: [
447
+ {
448
+ signature: '123',
449
+ thinking: '经过一番思考',
450
+ type: 'thinking',
451
+ },
452
+ {
453
+ text: '好的,我会为您查询杭州和北京的天气信息。我现在就开始查询这两个城市的当前天气情况。',
454
+ type: 'text',
455
+ },
456
+ {
457
+ id: 'toolu_018PNQkH8ChbjoJz4QBiFVod',
458
+ input: { city: '杭州' },
459
+ name: 'realtime-weather____fetchCurrentWeather',
460
+ type: 'tool_use',
461
+ },
462
+ {
463
+ id: 'toolu_018VQTQ6fwAEC3eppuEfMxPp',
464
+ input: { city: '北京' },
465
+ name: 'realtime-weather____fetchCurrentWeather',
466
+ type: 'tool_use',
467
+ },
468
+ ],
469
+ role: 'assistant',
470
+ },
471
+ {
472
+ content: [
473
+ {
474
+ content: [
475
+ {
476
+ text: '[{"city":"杭州市","adcode":"330100","province":"浙江","reporttime":"2024-06-24 17:02:14","casts":[{"date":"2024-06-24","week":"1","dayweather":"小雨","nightweather":"中雨","daytemp":"26","nighttemp":"20","daywind":"西","nightwind":"西","daypower":"1-3","nightpower":"1-3","daytemp_float":"26.0","nighttemp_float":"20.0"},{"date":"2024-06-25","week":"2","dayweather":"大雨","nightweather":"中雨","daytemp":"23","nighttemp":"19","daywind":"东","nightwind":"东","daypower":"1-3","nightpower":"1-3","daytemp_float":"23.0","nighttemp_float":"19.0"},{"date":"2024-06-26","week":"3","dayweather":"中雨","nightweather":"中雨","daytemp":"24","nighttemp":"21","daywind":"东南","nightwind":"东南","daypower":"1-3","nightpower":"1-3","daytemp_float":"24.0","nighttemp_float":"21.0"},{"date":"2024-06-27","week":"4","dayweather":"中雨-大雨","nightweather":"中雨","daytemp":"24","nighttemp":"22","daywind":"南","nightwind":"南","daypower":"1-3","nightpower":"1-3","daytemp_float":"24.0","nighttemp_float":"22.0"}]}]',
477
+ type: 'text',
478
+ },
479
+ ],
480
+ tool_use_id: 'toolu_018PNQkH8ChbjoJz4QBiFVod',
481
+ type: 'tool_result',
482
+ },
483
+ {
484
+ content: [
485
+ {
486
+ text: '[{"city":"北京市","adcode":"110000","province":"北京","reporttime":"2024-06-24 17:03:11","casts":[{"date":"2024-06-24","week":"1","dayweather":"晴","nightweather":"晴","daytemp":"33","nighttemp":"20","daywind":"北","nightwind":"北","daypower":"1-3","nightpower":"1-3","daytemp_float":"33.0","nighttemp_float":"20.0"},{"date":"2024-06-25","week":"2","dayweather":"晴","nightweather":"晴","daytemp":"35","nighttemp":"21","daywind":"东南","nightwind":"东南","daypower":"1-3","nightpower":"1-3","daytemp_float":"35.0","nighttemp_float":"21.0"},{"date":"2024-06-26","week":"3","dayweather":"晴","nightweather":"晴","daytemp":"35","nighttemp":"23","daywind":"西南","nightwind":"西南","daypower":"1-3","nightpower":"1-3","daytemp_float":"35.0","nighttemp_float":"23.0"},{"date":"2024-06-27","week":"4","dayweather":"多云","nightweather":"多云","daytemp":"35","nighttemp":"23","daywind":"西南","nightwind":"西南","daypower":"1-3","nightpower":"1-3","daytemp_float":"35.0","nighttemp_float":"23.0"}]}]',
487
+ type: 'text',
488
+ },
489
+ ],
490
+ tool_use_id: 'toolu_018VQTQ6fwAEC3eppuEfMxPp',
491
+ type: 'tool_result',
492
+ },
493
+ ],
494
+ role: 'user',
495
+ },
496
+ { content: '继续', role: 'user' },
497
+ ]);
498
+ });
386
499
  });
387
500
 
388
501
  describe('buildAnthropicTools', () => {
@@ -10,6 +10,7 @@ export const buildAnthropicBlock = async (
10
10
  content: UserMessageContentPart,
11
11
  ): Promise<Anthropic.ContentBlock | Anthropic.ImageBlockParam> => {
12
12
  switch (content.type) {
13
+ case 'thinking':
13
14
  case 'text': {
14
15
  // just pass-through the content
15
16
  return content as any;
@@ -83,13 +84,15 @@ export const buildAnthropicMessage = async (
83
84
  // if there is tool_calls , we need to covert the tool_calls to tool_use content block
84
85
  // refs: https://docs.anthropic.com/claude/docs/tool-use#tool-use-and-tool-result-content-blocks
85
86
  if (message.tool_calls) {
87
+ const messageContent =
88
+ typeof content === 'string'
89
+ ? [{ text: message.content, type: 'text' }]
90
+ : await Promise.all(content.map(async (c) => await buildAnthropicBlock(c)));
91
+
86
92
  return {
87
93
  content: [
88
94
  // avoid empty text content block
89
- !!message.content && {
90
- text: message.content as string,
91
- type: 'text',
92
- },
95
+ ...messageContent,
93
96
  ...(message.tool_calls.map((tool) => ({
94
97
  id: tool.id,
95
98
  input: JSON.parse(tool.function.arguments),