@lobehub/chat 1.95.0 → 1.96.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (40) hide show
  1. package/CHANGELOG.md +58 -0
  2. package/Dockerfile +2 -0
  3. package/Dockerfile.database +2 -0
  4. package/Dockerfile.pglite +2 -0
  5. package/changelog/v1.json +21 -0
  6. package/locales/ar/setting.json +1 -1
  7. package/locales/bg-BG/setting.json +1 -1
  8. package/locales/de-DE/setting.json +1 -1
  9. package/locales/en-US/setting.json +1 -1
  10. package/locales/es-ES/setting.json +1 -1
  11. package/locales/fa-IR/setting.json +1 -1
  12. package/locales/fr-FR/setting.json +1 -1
  13. package/locales/it-IT/setting.json +1 -1
  14. package/locales/ja-JP/setting.json +1 -1
  15. package/locales/ko-KR/setting.json +1 -1
  16. package/locales/nl-NL/setting.json +1 -1
  17. package/locales/pl-PL/setting.json +1 -1
  18. package/locales/pt-BR/setting.json +1 -1
  19. package/locales/ru-RU/setting.json +1 -1
  20. package/locales/tr-TR/setting.json +1 -1
  21. package/locales/vi-VN/setting.json +1 -1
  22. package/locales/zh-CN/setting.json +1 -1
  23. package/locales/zh-TW/setting.json +1 -1
  24. package/package.json +1 -1
  25. package/src/app/[variants]/(main)/settings/llm/ProviderList/providers.tsx +2 -0
  26. package/src/config/aiModels/index.ts +3 -0
  27. package/src/config/aiModels/minimax.ts +1 -38
  28. package/src/config/aiModels/v0.ts +63 -0
  29. package/src/config/llm.ts +6 -0
  30. package/src/config/modelProviders/index.ts +4 -0
  31. package/src/config/modelProviders/v0.ts +17 -0
  32. package/src/libs/model-runtime/runtimeMap.ts +2 -0
  33. package/src/libs/model-runtime/types/type.ts +1 -0
  34. package/src/libs/model-runtime/utils/modelParse.ts +6 -0
  35. package/src/libs/model-runtime/v0/index.ts +21 -0
  36. package/src/locales/default/setting.ts +1 -1
  37. package/src/store/chat/slices/aiChat/actions/generateAIChat.ts +25 -3
  38. package/src/types/user/settings/keyVaults.ts +1 -0
  39. package/src/utils/client/parserPlaceholder.test.ts +0 -21
  40. package/src/utils/client/parserPlaceholder.ts +2 -15
package/CHANGELOG.md CHANGED
@@ -2,6 +2,64 @@
2
2
 
3
3
  # Changelog
4
4
 
5
+ ### [Version 1.96.1](https://github.com/lobehub/lobe-chat/compare/v1.96.0...v1.96.1)
6
+
7
+ <sup>Released on **2025-06-21**</sup>
8
+
9
+ #### 🐛 Bug Fixes
10
+
11
+ - **misc**: Fix `MiniMax-M1` reasoning tag missing.
12
+
13
+ <br/>
14
+
15
+ <details>
16
+ <summary><kbd>Improvements and Fixes</kbd></summary>
17
+
18
+ #### What's fixed
19
+
20
+ - **misc**: Fix `MiniMax-M1` reasoning tag missing, closes [#8240](https://github.com/lobehub/lobe-chat/issues/8240) ([ea76c11](https://github.com/lobehub/lobe-chat/commit/ea76c11))
21
+
22
+ </details>
23
+
24
+ <div align="right">
25
+
26
+ [![](https://img.shields.io/badge/-BACK_TO_TOP-151515?style=flat-square)](#readme-top)
27
+
28
+ </div>
29
+
30
+ ## [Version 1.96.0](https://github.com/lobehub/lobe-chat/compare/v1.95.0...v1.96.0)
31
+
32
+ <sup>Released on **2025-06-20**</sup>
33
+
34
+ #### ✨ Features
35
+
36
+ - **misc**: Add v0 (Vercel) provider support.
37
+
38
+ #### 🐛 Bug Fixes
39
+
40
+ - **misc**: Fix inputTemplate behavior.
41
+
42
+ <br/>
43
+
44
+ <details>
45
+ <summary><kbd>Improvements and Fixes</kbd></summary>
46
+
47
+ #### What's improved
48
+
49
+ - **misc**: Add v0 (Vercel) provider support, closes [#8235](https://github.com/lobehub/lobe-chat/issues/8235) ([5842a18](https://github.com/lobehub/lobe-chat/commit/5842a18))
50
+
51
+ #### What's fixed
52
+
53
+ - **misc**: Fix inputTemplate behavior, closes [#8204](https://github.com/lobehub/lobe-chat/issues/8204) ([61c2c3c](https://github.com/lobehub/lobe-chat/commit/61c2c3c))
54
+
55
+ </details>
56
+
57
+ <div align="right">
58
+
59
+ [![](https://img.shields.io/badge/-BACK_TO_TOP-151515?style=flat-square)](#readme-top)
60
+
61
+ </div>
62
+
5
63
  ## [Version 1.95.0](https://github.com/lobehub/lobe-chat/compare/v1.94.17...v1.95.0)
6
64
 
7
65
  <sup>Released on **2025-06-20**</sup>
package/Dockerfile CHANGED
@@ -226,6 +226,8 @@ ENV \
226
226
  TOGETHERAI_API_KEY="" TOGETHERAI_MODEL_LIST="" \
227
227
  # Upstage
228
228
  UPSTAGE_API_KEY="" UPSTAGE_MODEL_LIST="" \
229
+ # v0 (Vercel)
230
+ V0_API_KEY="" V0_MODEL_LIST="" \
229
231
  # vLLM
230
232
  VLLM_API_KEY="" VLLM_MODEL_LIST="" VLLM_PROXY_URL="" \
231
233
  # Wenxin
@@ -270,6 +270,8 @@ ENV \
270
270
  TOGETHERAI_API_KEY="" TOGETHERAI_MODEL_LIST="" \
271
271
  # Upstage
272
272
  UPSTAGE_API_KEY="" UPSTAGE_MODEL_LIST="" \
273
+ # v0 (Vercel)
274
+ V0_API_KEY="" V0_MODEL_LIST="" \
273
275
  # vLLM
274
276
  VLLM_API_KEY="" VLLM_MODEL_LIST="" VLLM_PROXY_URL="" \
275
277
  # Wenxin
package/Dockerfile.pglite CHANGED
@@ -224,6 +224,8 @@ ENV \
224
224
  TOGETHERAI_API_KEY="" TOGETHERAI_MODEL_LIST="" \
225
225
  # Upstage
226
226
  UPSTAGE_API_KEY="" UPSTAGE_MODEL_LIST="" \
227
+ # v0 (Vercel)
228
+ V0_API_KEY="" V0_MODEL_LIST="" \
227
229
  # vLLM
228
230
  VLLM_API_KEY="" VLLM_MODEL_LIST="" VLLM_PROXY_URL="" \
229
231
  # Wenxin
package/changelog/v1.json CHANGED
@@ -1,4 +1,25 @@
1
1
  [
2
+ {
3
+ "children": {
4
+ "fixes": [
5
+ "Fix MiniMax-M1 reasoning tag missing."
6
+ ]
7
+ },
8
+ "date": "2025-06-21",
9
+ "version": "1.96.1"
10
+ },
11
+ {
12
+ "children": {
13
+ "features": [
14
+ "Add v0 (Vercel) provider support."
15
+ ],
16
+ "fixes": [
17
+ "Fix inputTemplate behavior."
18
+ ]
19
+ },
20
+ "date": "2025-06-20",
21
+ "version": "1.96.0"
22
+ },
2
23
  {
3
24
  "children": {
4
25
  "features": [
@@ -220,7 +220,7 @@
220
220
  },
221
221
  "inputTemplate": {
222
222
  "desc": "سيتم ملء أحدث رسالة من المستخدم في هذا القالب",
223
- "placeholder": "القالب المُعالج مسبقًا {{input_template}} سيتم استبداله بالمعلومات المُدخلة في الوقت الحقيقي",
223
+ "placeholder": "القالب المُعالج مسبقًا {{text}} سيتم استبداله بالمعلومات المُدخلة في الوقت الحقيقي",
224
224
  "title": "معالجة مُدخلات المستخدم"
225
225
  },
226
226
  "submit": "تحديث تفضيلات الدردشة",
@@ -220,7 +220,7 @@
220
220
  },
221
221
  "inputTemplate": {
222
222
  "desc": "Последното съобщение на потребителя ще бъде попълнено в този шаблон",
223
- "placeholder": "Шаблонът за предварителна обработка {{input_template}} ще бъде заменен с информация за въвеждане в реално време",
223
+ "placeholder": "Шаблонът за предварителна обработка {{text}} ще бъде заменен с информация за въвеждане в реално време",
224
224
  "title": "Предварителна обработка на потребителския вход"
225
225
  },
226
226
  "submit": "Актуализиране на предпочитанията за чат",
@@ -220,7 +220,7 @@
220
220
  },
221
221
  "inputTemplate": {
222
222
  "desc": "Die neueste Benutzernachricht wird in dieses Template eingefügt",
223
- "placeholder": "Vorlagen-{{input_template}} werden durch Echtzeit-Eingabeinformationen ersetzt",
223
+ "placeholder": "Vorlagen-{{text}} werden durch Echtzeit-Eingabeinformationen ersetzt",
224
224
  "title": "Benutzereingabe-Vorverarbeitung"
225
225
  },
226
226
  "submit": "Chat-Präferenzen aktualisieren",
@@ -220,7 +220,7 @@
220
220
  },
221
221
  "inputTemplate": {
222
222
  "desc": "The user's latest message will be filled into this template",
223
- "placeholder": "Preprocessing template {{input_template}} will be replaced with real-time input information",
223
+ "placeholder": "Preprocessing template {{text}} will be replaced with real-time input information",
224
224
  "title": "User Input Preprocessing"
225
225
  },
226
226
  "submit": "Update Chat Preferences",
@@ -220,7 +220,7 @@
220
220
  },
221
221
  "inputTemplate": {
222
222
  "desc": "El último mensaje del usuario se completará en esta plantilla",
223
- "placeholder": "La plantilla de preprocesamiento {{input_template}} se reemplazará por la información de entrada en tiempo real",
223
+ "placeholder": "La plantilla de preprocesamiento {{text}} se reemplazará por la información de entrada en tiempo real",
224
224
  "title": "Preprocesamiento de entrada del usuario"
225
225
  },
226
226
  "submit": "Actualizar preferencias de chat",
@@ -220,7 +220,7 @@
220
220
  },
221
221
  "inputTemplate": {
222
222
  "desc": "آخرین پیام کاربر در این قالب پر می‌شود",
223
- "placeholder": "قالب پیش‌پردازش {{input_template}} با اطلاعات ورودی لحظه‌ای جایگزین می‌شود",
223
+ "placeholder": "قالب پیش‌پردازش {{text}} با اطلاعات ورودی لحظه‌ای جایگزین می‌شود",
224
224
  "title": "پیش‌پردازش ورودی کاربر"
225
225
  },
226
226
  "submit": "به‌روزرسانی ترجیحات چت",
@@ -220,7 +220,7 @@
220
220
  },
221
221
  "inputTemplate": {
222
222
  "desc": "Le dernier message de l'utilisateur sera rempli dans ce modèle",
223
- "placeholder": "Le modèle de prétraitement {{input_template}} sera remplacé par les informations d'entrée en temps réel",
223
+ "placeholder": "Le modèle de prétraitement {{text}} sera remplacé par les informations d'entrée en temps réel",
224
224
  "title": "Modèle de prétraitement de l'entrée utilisateur"
225
225
  },
226
226
  "submit": "Mettre à jour les préférences de chat",
@@ -220,7 +220,7 @@
220
220
  },
221
221
  "inputTemplate": {
222
222
  "desc": "Il template verrà popolato con l'ultimo messaggio dell'utente",
223
- "placeholder": "Il modello di input {{input_template}} verrà sostituito con le informazioni in tempo reale",
223
+ "placeholder": "Il modello di input {{text}} verrà sostituito con le informazioni in tempo reale",
224
224
  "title": "Pre-elaborazione dell'input dell'utente"
225
225
  },
226
226
  "submit": "Aggiorna preferenze chat",
@@ -220,7 +220,7 @@
220
220
  },
221
221
  "inputTemplate": {
222
222
  "desc": "ユーザーの最新メッセージがこのテンプレートに埋め込まれます",
223
- "placeholder": "入力テンプレート {{input_template}} はリアルタイムの入力情報に置き換えられます",
223
+ "placeholder": "入力テンプレート {{text}} はリアルタイムの入力情報に置き換えられます",
224
224
  "title": "ユーザー入力のプリプロセス"
225
225
  },
226
226
  "submit": "チャットの好みを更新",
@@ -220,7 +220,7 @@
220
220
  },
221
221
  "inputTemplate": {
222
222
  "desc": "사용자의 최신 메시지가이 템플릿에 채워집니다",
223
- "placeholder": "입력 템플릿 {{input_template}}은 실시간 입력 정보로 대체됩니다",
223
+ "placeholder": "입력 템플릿 {{text}}은 실시간 입력 정보로 대체됩니다",
224
224
  "title": "사용자 입력 전처리"
225
225
  },
226
226
  "submit": "채팅 선호도 업데이트",
@@ -220,7 +220,7 @@
220
220
  },
221
221
  "inputTemplate": {
222
222
  "desc": "De meest recente gebruikersboodschap wordt ingevuld in dit sjabloon",
223
- "placeholder": "Voorbewerkingssjabloon {{input_template}} wordt vervangen door realtime invoer",
223
+ "placeholder": "Voorbewerkingssjabloon {{text}} wordt vervangen door realtime invoer",
224
224
  "title": "Voorbewerking van gebruikersinvoer"
225
225
  },
226
226
  "submit": "Chatvoorkeuren bijwerken",
@@ -220,7 +220,7 @@
220
220
  },
221
221
  "inputTemplate": {
222
222
  "desc": "Ostatnia wiadomość użytkownika zostanie wypełniona w tym szablonie",
223
- "placeholder": "Szablon wejściowy {{input_template}} zostanie zastąpiony rzeczywistą wiadomością",
223
+ "placeholder": "Szablon wejściowy {{text}} zostanie zastąpiony rzeczywistą wiadomością",
224
224
  "title": "Szablon wejściowy"
225
225
  },
226
226
  "submit": "Zaktualizuj preferencje czatu",
@@ -220,7 +220,7 @@
220
220
  },
221
221
  "inputTemplate": {
222
222
  "desc": "A última mensagem do usuário será preenchida neste modelo",
223
- "placeholder": "O modelo de pré-processamento {{input_template}} será substituído pela entrada em tempo real",
223
+ "placeholder": "O modelo de pré-processamento {{text}} será substituído pela entrada em tempo real",
224
224
  "title": "Pré-processamento de entrada do usuário"
225
225
  },
226
226
  "submit": "Atualizar preferências de chat",
@@ -220,7 +220,7 @@
220
220
  },
221
221
  "inputTemplate": {
222
222
  "desc": "Последнее сообщение пользователя будет использовано в этом шаблоне",
223
- "placeholder": "Шаблон ввода {{input_template}} будет заменен на реальные данные",
223
+ "placeholder": "Шаблон ввода {{text}} будет заменен на реальные данные",
224
224
  "title": "Шаблон ввода пользователя"
225
225
  },
226
226
  "submit": "Обновить предпочтения чата",
@@ -220,7 +220,7 @@
220
220
  },
221
221
  "inputTemplate": {
222
222
  "desc": "Kullanıcının son mesajı bu şablona doldurulur",
223
- "placeholder": "Ön işleme şablonu {{input_template}}, gerçek zamanlı giriş bilgileri ile değiştirilir",
223
+ "placeholder": "Ön işleme şablonu {{text}}, gerçek zamanlı giriş bilgileri ile değiştirilir",
224
224
  "title": "Kullanıcı Girişi Ön İşleme"
225
225
  },
226
226
  "submit": "Sohbet tercihlerini güncelle",
@@ -220,7 +220,7 @@
220
220
  },
221
221
  "inputTemplate": {
222
222
  "desc": "Tin nhắn mới nhất của người dùng sẽ được điền vào mẫu này",
223
- "placeholder": "Mẫu xử lý trước {{input_template}} sẽ được thay thế bằng thông tin nhập thời gian thực",
223
+ "placeholder": "Mẫu xử lý trước {{text}} sẽ được thay thế bằng thông tin nhập thời gian thực",
224
224
  "title": "Mẫu xử lý đầu vào của người dùng"
225
225
  },
226
226
  "submit": "Cập nhật sở thích trò chuyện",
@@ -220,7 +220,7 @@
220
220
  },
221
221
  "inputTemplate": {
222
222
  "desc": "用户最新的一条消息会填充到此模板",
223
- "placeholder": "预处理模版 {{input_template}} 将替换为实时输入信息",
223
+ "placeholder": "预处理模版 {{text}} 将替换为实时输入信息",
224
224
  "title": "用户输入预处理"
225
225
  },
226
226
  "submit": "更新聊天偏好",
@@ -220,7 +220,7 @@
220
220
  },
221
221
  "inputTemplate": {
222
222
  "desc": "使用者最新的一條訊息會填充到此模板",
223
- "placeholder": "預處理模板 {{input_template}} 將替換為實時輸入資訊",
223
+ "placeholder": "預處理模板 {{text}} 將替換為實時輸入資訊",
224
224
  "title": "使用者輸入預處理"
225
225
  },
226
226
  "submit": "更新聊天偏好",
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@lobehub/chat",
3
- "version": "1.95.0",
3
+ "version": "1.96.1",
4
4
  "description": "Lobe Chat - an open-source, high-performance chatbot framework that supports speech synthesis, multimodal, and extensible Function Call plugin system. Supports one-click free deployment of your private ChatGPT/LLM web application.",
5
5
  "keywords": [
6
6
  "framework",
@@ -35,6 +35,7 @@ import {
35
35
  TaichuProviderCard,
36
36
  TogetherAIProviderCard,
37
37
  UpstageProviderCard,
38
+ V0ProviderCard,
38
39
  VLLMProviderCard,
39
40
  WenxinProviderCard,
40
41
  XAIProviderCard,
@@ -90,6 +91,7 @@ export const useProviderList = (): ProviderItem[] => {
90
91
  SambaNovaProviderCard,
91
92
  Search1APIProviderCard,
92
93
  CohereProviderCard,
94
+ V0ProviderCard,
93
95
  QiniuProviderCard,
94
96
  QwenProviderCard,
95
97
  WenxinProviderCard,
@@ -45,6 +45,7 @@ import { default as taichu } from './taichu';
45
45
  import { default as tencentcloud } from './tencentcloud';
46
46
  import { default as togetherai } from './togetherai';
47
47
  import { default as upstage } from './upstage';
48
+ import { default as v0 } from './v0';
48
49
  import { default as vertexai } from './vertexai';
49
50
  import { default as vllm } from './vllm';
50
51
  import { default as volcengine } from './volcengine';
@@ -119,6 +120,7 @@ export const LOBE_DEFAULT_MODEL_LIST = buildDefaultModelList({
119
120
  tencentcloud,
120
121
  togetherai,
121
122
  upstage,
123
+ v0,
122
124
  vertexai,
123
125
  vllm,
124
126
  volcengine,
@@ -174,6 +176,7 @@ export { default as taichu } from './taichu';
174
176
  export { default as tencentcloud } from './tencentcloud';
175
177
  export { default as togetherai } from './togetherai';
176
178
  export { default as upstage } from './upstage';
179
+ export { default as v0 } from './v0';
177
180
  export { default as vertexai } from './vertexai';
178
181
  export { default as vllm } from './vllm';
179
182
  export { default as volcengine } from './volcengine';
@@ -4,6 +4,7 @@ const minimaxChatModels: AIChatModelCard[] = [
4
4
  {
5
5
  abilities: {
6
6
  functionCall: true,
7
+ reasoning: true,
7
8
  search: true,
8
9
  },
9
10
  contextWindowTokens: 1_000_192,
@@ -48,44 +49,6 @@ const minimaxChatModels: AIChatModelCard[] = [
48
49
  },
49
50
  type: 'chat',
50
51
  },
51
- {
52
- abilities: {
53
- functionCall: true,
54
- search: true,
55
- vision: true,
56
- },
57
- contextWindowTokens: 245_760,
58
- description: '适用于广泛的自然语言处理任务,包括文本生成、对话系统等。',
59
- displayName: 'abab6.5s',
60
- id: 'abab6.5s-chat',
61
- maxOutput: 245_760,
62
- pricing: {
63
- currency: 'CNY',
64
- input: 1,
65
- output: 1,
66
- },
67
- settings: {
68
- searchImpl: 'params',
69
- },
70
- type: 'chat',
71
- },
72
- {
73
- abilities: {
74
- reasoning: true,
75
- },
76
- contextWindowTokens: 64_000,
77
- description:
78
- 'DeepSeek 推出的推理模型。在输出最终回答之前,模型会先输出一段思维链内容,以提升最终答案的准确性。',
79
- displayName: 'DeepSeek R1',
80
- id: 'DeepSeek-R1',
81
- maxOutput: 64_000,
82
- pricing: {
83
- currency: 'CNY',
84
- input: 4,
85
- output: 16,
86
- },
87
- type: 'chat',
88
- },
89
52
  ];
90
53
 
91
54
  export const allModels = [...minimaxChatModels];
@@ -0,0 +1,63 @@
1
+ import { AIChatModelCard } from '@/types/aiModel';
2
+
3
+ const v0ChatModels: AIChatModelCard[] = [
4
+ {
5
+ abilities: {
6
+ functionCall: true,
7
+ reasoning: true,
8
+ vision: true,
9
+ },
10
+ contextWindowTokens: 512_000,
11
+ description:
12
+ 'v0-1.5-lg 模型适用于高级思考或推理任务',
13
+ displayName: 'v0-1.5-lg',
14
+ enabled: true,
15
+ id: 'v0-1.5-lg',
16
+ maxOutput: 32_000,
17
+ pricing: {
18
+ input: 15,
19
+ output: 75,
20
+ },
21
+ type: 'chat',
22
+ },
23
+ {
24
+ abilities: {
25
+ functionCall: true,
26
+ reasoning: true,
27
+ vision: true,
28
+ },
29
+ contextWindowTokens: 128_000,
30
+ description:
31
+ 'v0-1.5-md 模型适用于日常任务和用户界面(UI)生成',
32
+ displayName: 'v0-1.5-md',
33
+ enabled: true,
34
+ id: 'v0-1.5-md',
35
+ maxOutput: 32_000,
36
+ pricing: {
37
+ input: 3,
38
+ output: 15,
39
+ },
40
+ type: 'chat',
41
+ },
42
+ {
43
+ abilities: {
44
+ functionCall: true,
45
+ vision: true,
46
+ },
47
+ contextWindowTokens: 128_000,
48
+ description:
49
+ 'v0-1.0-md 模型是通过 v0 API 提供服务的旧版模型',
50
+ displayName: 'v0-1.0-md',
51
+ id: 'v0-1.0-md',
52
+ maxOutput: 32_000,
53
+ pricing: {
54
+ input: 3,
55
+ output: 15,
56
+ },
57
+ type: 'chat',
58
+ },
59
+ ];
60
+
61
+ export const allModels = [...v0ChatModels];
62
+
63
+ export default allModels;
package/src/config/llm.ts CHANGED
@@ -165,6 +165,9 @@ export const getLLMConfig = () => {
165
165
 
166
166
  ENABLED_MODELSCOPE: z.boolean(),
167
167
  MODELSCOPE_API_KEY: z.string().optional(),
168
+
169
+ ENABLED_V0: z.boolean(),
170
+ V0_API_KEY: z.string().optional(),
168
171
  },
169
172
  runtimeEnv: {
170
173
  API_KEY_SELECT_MODE: process.env.API_KEY_SELECT_MODE,
@@ -328,6 +331,9 @@ export const getLLMConfig = () => {
328
331
 
329
332
  ENABLED_MODELSCOPE: !!process.env.MODELSCOPE_API_KEY,
330
333
  MODELSCOPE_API_KEY: process.env.MODELSCOPE_API_KEY,
334
+
335
+ ENABLED_V0: !!process.env.V0_API_KEY,
336
+ V0_API_KEY: process.env.V0_API_KEY,
331
337
  },
332
338
  });
333
339
  };
@@ -45,6 +45,7 @@ import TaichuProvider from './taichu';
45
45
  import TencentcloudProvider from './tencentcloud';
46
46
  import TogetherAIProvider from './togetherai';
47
47
  import UpstageProvider from './upstage';
48
+ import V0Provider from './v0';
48
49
  import VertexAIProvider from './vertexai';
49
50
  import VLLMProvider from './vllm';
50
51
  import VolcengineProvider from './volcengine';
@@ -83,6 +84,7 @@ export const LOBE_DEFAULT_MODEL_LIST: ChatModelCard[] = [
83
84
  JinaProvider.chatModels,
84
85
  SambaNovaProvider.chatModels,
85
86
  CohereProvider.chatModels,
87
+ V0Provider.chatModels,
86
88
  ZeroOneProvider.chatModels,
87
89
  StepfunProvider.chatModels,
88
90
  NovitaProvider.chatModels,
@@ -139,6 +141,7 @@ export const DEFAULT_MODEL_PROVIDER_LIST = [
139
141
  JinaProvider,
140
142
  SambaNovaProvider,
141
143
  CohereProvider,
144
+ V0Provider,
142
145
  QwenProvider,
143
146
  WenxinProvider,
144
147
  TencentcloudProvider,
@@ -218,6 +221,7 @@ export { default as TaichuProviderCard } from './taichu';
218
221
  export { default as TencentCloudProviderCard } from './tencentcloud';
219
222
  export { default as TogetherAIProviderCard } from './togetherai';
220
223
  export { default as UpstageProviderCard } from './upstage';
224
+ export { default as V0ProviderCard } from './v0';
221
225
  export { default as VertexAIProviderCard } from './vertexai';
222
226
  export { default as VLLMProviderCard } from './vllm';
223
227
  export { default as VolcengineProviderCard } from './volcengine';
@@ -0,0 +1,17 @@
1
+ import { ModelProviderCard } from '@/types/llm';
2
+
3
+ const V0: ModelProviderCard = {
4
+ chatModels: [],
5
+ checkModel: 'v0-1.5-md',
6
+ description:
7
+ 'v0 是一个配对编程助手,你只需用自然语言描述想法,它就能为你的项目生成代码和用户界面(UI)',
8
+ id: 'v0',
9
+ modelsUrl: 'https://vercel.com/docs/v0/api#models',
10
+ name: 'Vercel (v0)',
11
+ settings: {
12
+ sdkType: 'openai',
13
+ },
14
+ url: 'https://v0.dev',
15
+ };
16
+
17
+ export default V0;
@@ -43,6 +43,7 @@ import { LobeTaichuAI } from './taichu';
43
43
  import { LobeTencentCloudAI } from './tencentcloud';
44
44
  import { LobeTogetherAI } from './togetherai';
45
45
  import { LobeUpstageAI } from './upstage';
46
+ import { LobeV0AI } from './v0';
46
47
  import { LobeVLLMAI } from './vllm';
47
48
  import { LobeVolcengineAI } from './volcengine';
48
49
  import { LobeWenxinAI } from './wenxin';
@@ -97,6 +98,7 @@ export const providerRuntimeMap = {
97
98
  tencentcloud: LobeTencentCloudAI,
98
99
  togetherai: LobeTogetherAI,
99
100
  upstage: LobeUpstageAI,
101
+ v0: LobeV0AI,
100
102
  vllm: LobeVLLMAI,
101
103
  volcengine: LobeVolcengineAI,
102
104
  wenxin: LobeWenxinAI,
@@ -67,6 +67,7 @@ export enum ModelProvider {
67
67
  TencentCloud = 'tencentcloud',
68
68
  TogetherAI = 'togetherai',
69
69
  Upstage = 'upstage',
70
+ V0 = 'v0',
70
71
  VLLM = 'vllm',
71
72
  VertexAI = 'vertexai',
72
73
  Volcengine = 'volcengine',
@@ -48,6 +48,11 @@ export const MODEL_LIST_CONFIGS = {
48
48
  reasoningKeywords: ['qvq', 'qwq', 'qwen3'],
49
49
  visionKeywords: ['qvq', 'vl'],
50
50
  },
51
+ v0: {
52
+ functionCallKeywords: ['v0'],
53
+ reasoningKeywords: ['v0-1.5'],
54
+ visionKeywords: ['v0'],
55
+ },
51
56
  volcengine: {
52
57
  functionCallKeywords: ['doubao-1.5'],
53
58
  reasoningKeywords: ['thinking', '-r1'],
@@ -72,6 +77,7 @@ export const PROVIDER_DETECTION_CONFIG = {
72
77
  llama: ['llama'],
73
78
  openai: ['o1', 'o3', 'o4', 'gpt-'],
74
79
  qwen: ['qwen', 'qwq', 'qvq'],
80
+ v0: ['v0'],
75
81
  volcengine: ['doubao'],
76
82
  zeroone: ['yi-'],
77
83
  zhipu: ['glm'],
@@ -0,0 +1,21 @@
1
+ import { ModelProvider } from '../types';
2
+ import { processMultiProviderModelList } from '../utils/modelParse';
3
+ import { createOpenAICompatibleRuntime } from '../utils/openaiCompatibleFactory';
4
+
5
+ export interface V0ModelCard {
6
+ id: string;
7
+ }
8
+
9
+ export const LobeV0AI = createOpenAICompatibleRuntime({
10
+ baseURL: 'https://api.v0.dev/v1',
11
+ debug: {
12
+ chatCompletion: () => process.env.DEBUG_V0_CHAT_COMPLETION === '1',
13
+ },
14
+ models: async ({ client }) => {
15
+ const modelsPage = (await client.models.list()) as any;
16
+ const modelList: V0ModelCard[] = modelsPage.data;
17
+
18
+ return processMultiProviderModelList(modelList);
19
+ },
20
+ provider: ModelProvider.V0,
21
+ });
@@ -224,7 +224,7 @@ export default {
224
224
  },
225
225
  inputTemplate: {
226
226
  desc: '用户最新的一条消息会填充到此模板',
227
- placeholder: '预处理模版 {{input_template}} 将替换为实时输入信息',
227
+ placeholder: '预处理模版 {{text}} 将替换为实时输入信息',
228
228
  title: '用户输入预处理',
229
229
  },
230
230
  submit: '更新聊天偏好',
@@ -1,6 +1,7 @@
1
1
  /* eslint-disable sort-keys-fix/sort-keys-fix, typescript-sort-keys/interface */
2
2
  // Disable the auto sort key eslint rule to make the code more logic and readable
3
3
  import { produce } from 'immer';
4
+ import { template } from 'lodash-es';
4
5
  import { StateCreator } from 'zustand/vanilla';
5
6
 
6
7
  import { LOADING_FLAT, MESSAGE_CANCEL_FLAT } from '@/const/message';
@@ -507,6 +508,10 @@ export const generateAIChat: StateCreator<
507
508
  const agentConfig = agentSelectors.currentAgentConfig(getAgentStoreState());
508
509
  const chatConfig = agentChatConfigSelectors.currentChatConfig(getAgentStoreState());
509
510
 
511
+ const compiler = template(chatConfig.inputTemplate, {
512
+ interpolate: /{{\s*(text)\s*}}/g
513
+ });
514
+
510
515
  // ================================== //
511
516
  // messages uniformly preprocess //
512
517
  // ================================== //
@@ -521,17 +526,34 @@ export const generateAIChat: StateCreator<
521
526
  historyCount,
522
527
  });
523
528
 
524
- // 2. add systemRole
529
+ // 2. replace inputMessage template
530
+ preprocessMsgs = !chatConfig.inputTemplate
531
+ ? preprocessMsgs
532
+ : preprocessMsgs.map((m) => {
533
+ if (m.role === 'user') {
534
+ try {
535
+ return { ...m, content: compiler({ text: m.content }) };
536
+ } catch (error) {
537
+ console.error(error);
538
+
539
+ return m;
540
+ }
541
+ }
542
+
543
+ return m;
544
+ });
545
+
546
+ // 3. add systemRole
525
547
  if (agentConfig.systemRole) {
526
548
  preprocessMsgs.unshift({ content: agentConfig.systemRole, role: 'system' } as ChatMessage);
527
549
  }
528
550
 
529
- // 3. handle max_tokens
551
+ // 4. handle max_tokens
530
552
  agentConfig.params.max_tokens = chatConfig.enableMaxTokens
531
553
  ? agentConfig.params.max_tokens
532
554
  : undefined;
533
555
 
534
- // 4. handle reasoning_effort
556
+ // 5. handle reasoning_effort
535
557
  agentConfig.params.reasoning_effort = chatConfig.enableReasoningEffort
536
558
  ? agentConfig.params.reasoning_effort
537
559
  : undefined;
@@ -80,6 +80,7 @@ export interface UserKeyVaults extends SearchEngineKeyVaults {
80
80
  tencentcloud?: OpenAICompatibleKeyVault;
81
81
  togetherai?: OpenAICompatibleKeyVault;
82
82
  upstage?: OpenAICompatibleKeyVault;
83
+ v0?: OpenAICompatibleKeyVault;
83
84
  vertexai?: OpenAICompatibleKeyVault;
84
85
  vllm?: OpenAICompatibleKeyVault;
85
86
  volcengine?: OpenAICompatibleKeyVault;
@@ -21,18 +21,6 @@ vi.mock('@/store/user/selectors', () => ({
21
21
  },
22
22
  }));
23
23
 
24
- vi.mock('@/store/agent/store', () => ({
25
- getAgentStoreState: () => ({}),
26
- }));
27
-
28
- vi.mock('@/store/agent/selectors', () => ({
29
- agentChatConfigSelectors: {
30
- currentChatConfig: () => ({
31
- inputTemplate: 'Hello {{username}}!',
32
- }),
33
- },
34
- }));
35
-
36
24
  describe('parsePlaceholderVariablesMessages', () => {
37
25
  beforeEach(() => {
38
26
  // Mock Date for consistent testing
@@ -238,15 +226,6 @@ describe('parsePlaceholderVariablesMessages', () => {
238
226
  // Unknown variables should remain unchanged
239
227
  expect(result[0].content).toBe('Hello {{unknown_variable}}!');
240
228
  });
241
-
242
- it('should handle nested variables (input_template)', () => {
243
- const messages = [{ id: '1', content: 'Template: {{input_template}}' }];
244
-
245
- const result = parsePlaceholderVariablesMessages(messages);
246
-
247
- // Should resolve nested variables in input_template
248
- expect(result[0].content).toBe('Template: Hello testuser!');
249
- });
250
229
  });
251
230
 
252
231
  describe('specific variable types', () => {
@@ -5,9 +5,6 @@ import { uuid } from '@/utils/uuid';
5
5
  import { useUserStore } from '@/store/user';
6
6
  import { userProfileSelectors } from '@/store/user/selectors';
7
7
 
8
- import { getAgentStoreState } from '@/store/agent/store';
9
- import { agentChatConfigSelectors } from '@/store/agent/selectors';
10
-
11
8
  const placeholderVariablesRegex = /{{(.*?)}}/g;
12
9
 
13
10
  /* eslint-disable sort-keys-fix/sort-keys-fix */
@@ -108,16 +105,6 @@ export const VARIABLE_GENERATORS = {
108
105
  language: () => typeof navigator !== 'undefined' ? navigator.language : '',
109
106
  platform: () => typeof navigator !== 'undefined' ? navigator.platform : '',
110
107
  user_agent: () => typeof navigator !== 'undefined' ? navigator.userAgent : '',
111
-
112
- /**
113
- * LobeChat 模板变量
114
- *
115
- * | Value | Example |
116
- * |-------|---------|
117
- * | `{{input_template}}` | Some contents |
118
- *
119
- */
120
- input_template: () => agentChatConfigSelectors.currentChatConfig(getAgentStoreState()).inputTemplate || '',
121
108
  } as Record<string, () => string>;
122
109
 
123
110
  /**
@@ -133,13 +120,13 @@ const extractPlaceholderVariables = (text: string): string[] => {
133
120
  /**
134
121
  * 将模板变量替换为实际值,并支持递归解析嵌套变量
135
122
  * @param text - 含变量的原始文本
136
- * @param depth - 递归深度,默认 1,设置更高可支持 {{input_template}} 中的 {{date}} 等
123
+ * @param depth - 递归深度,默认 1,设置更高可支持 {{text}} 中的 {{date}} 等
137
124
  * @returns 替换后的文本
138
125
  */
139
126
  export const parsePlaceholderVariables = (text: string, depth = 2): string => {
140
127
  let result = text;
141
128
 
142
- // 递归解析,用于处理如 {{input_template}} 存在额外预设变量
129
+ // 递归解析,用于处理如 {{text}} 存在额外预设变量
143
130
  for (let i = 0; i < depth; i++) {
144
131
  try {
145
132
  const variables = Object.fromEntries(