@lobehub/chat 1.95.0 → 1.96.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +33 -0
- package/Dockerfile +2 -0
- package/Dockerfile.database +2 -0
- package/Dockerfile.pglite +2 -0
- package/changelog/v1.json +12 -0
- package/locales/ar/setting.json +1 -1
- package/locales/bg-BG/setting.json +1 -1
- package/locales/de-DE/setting.json +1 -1
- package/locales/en-US/setting.json +1 -1
- package/locales/es-ES/setting.json +1 -1
- package/locales/fa-IR/setting.json +1 -1
- package/locales/fr-FR/setting.json +1 -1
- package/locales/it-IT/setting.json +1 -1
- package/locales/ja-JP/setting.json +1 -1
- package/locales/ko-KR/setting.json +1 -1
- package/locales/nl-NL/setting.json +1 -1
- package/locales/pl-PL/setting.json +1 -1
- package/locales/pt-BR/setting.json +1 -1
- package/locales/ru-RU/setting.json +1 -1
- package/locales/tr-TR/setting.json +1 -1
- package/locales/vi-VN/setting.json +1 -1
- package/locales/zh-CN/setting.json +1 -1
- package/locales/zh-TW/setting.json +1 -1
- package/package.json +1 -1
- package/src/app/[variants]/(main)/settings/llm/ProviderList/providers.tsx +2 -0
- package/src/config/aiModels/index.ts +3 -0
- package/src/config/aiModels/v0.ts +63 -0
- package/src/config/llm.ts +6 -0
- package/src/config/modelProviders/index.ts +4 -0
- package/src/config/modelProviders/v0.ts +17 -0
- package/src/libs/model-runtime/runtimeMap.ts +2 -0
- package/src/libs/model-runtime/types/type.ts +1 -0
- package/src/libs/model-runtime/utils/modelParse.ts +6 -0
- package/src/libs/model-runtime/v0/index.ts +21 -0
- package/src/locales/default/setting.ts +1 -1
- package/src/store/chat/slices/aiChat/actions/generateAIChat.ts +25 -3
- package/src/types/user/settings/keyVaults.ts +1 -0
- package/src/utils/client/parserPlaceholder.test.ts +0 -21
- package/src/utils/client/parserPlaceholder.ts +2 -15
package/CHANGELOG.md
CHANGED
@@ -2,6 +2,39 @@
|
|
2
2
|
|
3
3
|
# Changelog
|
4
4
|
|
5
|
+
## [Version 1.96.0](https://github.com/lobehub/lobe-chat/compare/v1.95.0...v1.96.0)
|
6
|
+
|
7
|
+
<sup>Released on **2025-06-20**</sup>
|
8
|
+
|
9
|
+
#### ✨ Features
|
10
|
+
|
11
|
+
- **misc**: Add v0 (Vercel) provider support.
|
12
|
+
|
13
|
+
#### 🐛 Bug Fixes
|
14
|
+
|
15
|
+
- **misc**: Fix inputTemplate behavior.
|
16
|
+
|
17
|
+
<br/>
|
18
|
+
|
19
|
+
<details>
|
20
|
+
<summary><kbd>Improvements and Fixes</kbd></summary>
|
21
|
+
|
22
|
+
#### What's improved
|
23
|
+
|
24
|
+
- **misc**: Add v0 (Vercel) provider support, closes [#8235](https://github.com/lobehub/lobe-chat/issues/8235) ([5842a18](https://github.com/lobehub/lobe-chat/commit/5842a18))
|
25
|
+
|
26
|
+
#### What's fixed
|
27
|
+
|
28
|
+
- **misc**: Fix inputTemplate behavior, closes [#8204](https://github.com/lobehub/lobe-chat/issues/8204) ([61c2c3c](https://github.com/lobehub/lobe-chat/commit/61c2c3c))
|
29
|
+
|
30
|
+
</details>
|
31
|
+
|
32
|
+
<div align="right">
|
33
|
+
|
34
|
+
[](#readme-top)
|
35
|
+
|
36
|
+
</div>
|
37
|
+
|
5
38
|
## [Version 1.95.0](https://github.com/lobehub/lobe-chat/compare/v1.94.17...v1.95.0)
|
6
39
|
|
7
40
|
<sup>Released on **2025-06-20**</sup>
|
package/Dockerfile
CHANGED
@@ -226,6 +226,8 @@ ENV \
|
|
226
226
|
TOGETHERAI_API_KEY="" TOGETHERAI_MODEL_LIST="" \
|
227
227
|
# Upstage
|
228
228
|
UPSTAGE_API_KEY="" UPSTAGE_MODEL_LIST="" \
|
229
|
+
# v0 (Vercel)
|
230
|
+
V0_API_KEY="" V0_MODEL_LIST="" \
|
229
231
|
# vLLM
|
230
232
|
VLLM_API_KEY="" VLLM_MODEL_LIST="" VLLM_PROXY_URL="" \
|
231
233
|
# Wenxin
|
package/Dockerfile.database
CHANGED
@@ -270,6 +270,8 @@ ENV \
|
|
270
270
|
TOGETHERAI_API_KEY="" TOGETHERAI_MODEL_LIST="" \
|
271
271
|
# Upstage
|
272
272
|
UPSTAGE_API_KEY="" UPSTAGE_MODEL_LIST="" \
|
273
|
+
# v0 (Vercel)
|
274
|
+
V0_API_KEY="" V0_MODEL_LIST="" \
|
273
275
|
# vLLM
|
274
276
|
VLLM_API_KEY="" VLLM_MODEL_LIST="" VLLM_PROXY_URL="" \
|
275
277
|
# Wenxin
|
package/Dockerfile.pglite
CHANGED
@@ -224,6 +224,8 @@ ENV \
|
|
224
224
|
TOGETHERAI_API_KEY="" TOGETHERAI_MODEL_LIST="" \
|
225
225
|
# Upstage
|
226
226
|
UPSTAGE_API_KEY="" UPSTAGE_MODEL_LIST="" \
|
227
|
+
# v0 (Vercel)
|
228
|
+
V0_API_KEY="" V0_MODEL_LIST="" \
|
227
229
|
# vLLM
|
228
230
|
VLLM_API_KEY="" VLLM_MODEL_LIST="" VLLM_PROXY_URL="" \
|
229
231
|
# Wenxin
|
package/changelog/v1.json
CHANGED
package/locales/ar/setting.json
CHANGED
@@ -220,7 +220,7 @@
|
|
220
220
|
},
|
221
221
|
"inputTemplate": {
|
222
222
|
"desc": "سيتم ملء أحدث رسالة من المستخدم في هذا القالب",
|
223
|
-
"placeholder": "القالب المُعالج مسبقًا {{
|
223
|
+
"placeholder": "القالب المُعالج مسبقًا {{text}} سيتم استبداله بالمعلومات المُدخلة في الوقت الحقيقي",
|
224
224
|
"title": "معالجة مُدخلات المستخدم"
|
225
225
|
},
|
226
226
|
"submit": "تحديث تفضيلات الدردشة",
|
@@ -220,7 +220,7 @@
|
|
220
220
|
},
|
221
221
|
"inputTemplate": {
|
222
222
|
"desc": "Последното съобщение на потребителя ще бъде попълнено в този шаблон",
|
223
|
-
"placeholder": "Шаблонът за предварителна обработка {{
|
223
|
+
"placeholder": "Шаблонът за предварителна обработка {{text}} ще бъде заменен с информация за въвеждане в реално време",
|
224
224
|
"title": "Предварителна обработка на потребителския вход"
|
225
225
|
},
|
226
226
|
"submit": "Актуализиране на предпочитанията за чат",
|
@@ -220,7 +220,7 @@
|
|
220
220
|
},
|
221
221
|
"inputTemplate": {
|
222
222
|
"desc": "Die neueste Benutzernachricht wird in dieses Template eingefügt",
|
223
|
-
"placeholder": "Vorlagen-{{
|
223
|
+
"placeholder": "Vorlagen-{{text}} werden durch Echtzeit-Eingabeinformationen ersetzt",
|
224
224
|
"title": "Benutzereingabe-Vorverarbeitung"
|
225
225
|
},
|
226
226
|
"submit": "Chat-Präferenzen aktualisieren",
|
@@ -220,7 +220,7 @@
|
|
220
220
|
},
|
221
221
|
"inputTemplate": {
|
222
222
|
"desc": "The user's latest message will be filled into this template",
|
223
|
-
"placeholder": "Preprocessing template {{
|
223
|
+
"placeholder": "Preprocessing template {{text}} will be replaced with real-time input information",
|
224
224
|
"title": "User Input Preprocessing"
|
225
225
|
},
|
226
226
|
"submit": "Update Chat Preferences",
|
@@ -220,7 +220,7 @@
|
|
220
220
|
},
|
221
221
|
"inputTemplate": {
|
222
222
|
"desc": "El último mensaje del usuario se completará en esta plantilla",
|
223
|
-
"placeholder": "La plantilla de preprocesamiento {{
|
223
|
+
"placeholder": "La plantilla de preprocesamiento {{text}} se reemplazará por la información de entrada en tiempo real",
|
224
224
|
"title": "Preprocesamiento de entrada del usuario"
|
225
225
|
},
|
226
226
|
"submit": "Actualizar preferencias de chat",
|
@@ -220,7 +220,7 @@
|
|
220
220
|
},
|
221
221
|
"inputTemplate": {
|
222
222
|
"desc": "آخرین پیام کاربر در این قالب پر میشود",
|
223
|
-
"placeholder": "قالب پیشپردازش {{
|
223
|
+
"placeholder": "قالب پیشپردازش {{text}} با اطلاعات ورودی لحظهای جایگزین میشود",
|
224
224
|
"title": "پیشپردازش ورودی کاربر"
|
225
225
|
},
|
226
226
|
"submit": "بهروزرسانی ترجیحات چت",
|
@@ -220,7 +220,7 @@
|
|
220
220
|
},
|
221
221
|
"inputTemplate": {
|
222
222
|
"desc": "Le dernier message de l'utilisateur sera rempli dans ce modèle",
|
223
|
-
"placeholder": "Le modèle de prétraitement {{
|
223
|
+
"placeholder": "Le modèle de prétraitement {{text}} sera remplacé par les informations d'entrée en temps réel",
|
224
224
|
"title": "Modèle de prétraitement de l'entrée utilisateur"
|
225
225
|
},
|
226
226
|
"submit": "Mettre à jour les préférences de chat",
|
@@ -220,7 +220,7 @@
|
|
220
220
|
},
|
221
221
|
"inputTemplate": {
|
222
222
|
"desc": "Il template verrà popolato con l'ultimo messaggio dell'utente",
|
223
|
-
"placeholder": "Il modello di input {{
|
223
|
+
"placeholder": "Il modello di input {{text}} verrà sostituito con le informazioni in tempo reale",
|
224
224
|
"title": "Pre-elaborazione dell'input dell'utente"
|
225
225
|
},
|
226
226
|
"submit": "Aggiorna preferenze chat",
|
@@ -220,7 +220,7 @@
|
|
220
220
|
},
|
221
221
|
"inputTemplate": {
|
222
222
|
"desc": "ユーザーの最新メッセージがこのテンプレートに埋め込まれます",
|
223
|
-
"placeholder": "入力テンプレート {{
|
223
|
+
"placeholder": "入力テンプレート {{text}} はリアルタイムの入力情報に置き換えられます",
|
224
224
|
"title": "ユーザー入力のプリプロセス"
|
225
225
|
},
|
226
226
|
"submit": "チャットの好みを更新",
|
@@ -220,7 +220,7 @@
|
|
220
220
|
},
|
221
221
|
"inputTemplate": {
|
222
222
|
"desc": "사용자의 최신 메시지가이 템플릿에 채워집니다",
|
223
|
-
"placeholder": "입력 템플릿 {{
|
223
|
+
"placeholder": "입력 템플릿 {{text}}은 실시간 입력 정보로 대체됩니다",
|
224
224
|
"title": "사용자 입력 전처리"
|
225
225
|
},
|
226
226
|
"submit": "채팅 선호도 업데이트",
|
@@ -220,7 +220,7 @@
|
|
220
220
|
},
|
221
221
|
"inputTemplate": {
|
222
222
|
"desc": "De meest recente gebruikersboodschap wordt ingevuld in dit sjabloon",
|
223
|
-
"placeholder": "Voorbewerkingssjabloon {{
|
223
|
+
"placeholder": "Voorbewerkingssjabloon {{text}} wordt vervangen door realtime invoer",
|
224
224
|
"title": "Voorbewerking van gebruikersinvoer"
|
225
225
|
},
|
226
226
|
"submit": "Chatvoorkeuren bijwerken",
|
@@ -220,7 +220,7 @@
|
|
220
220
|
},
|
221
221
|
"inputTemplate": {
|
222
222
|
"desc": "Ostatnia wiadomość użytkownika zostanie wypełniona w tym szablonie",
|
223
|
-
"placeholder": "Szablon wejściowy {{
|
223
|
+
"placeholder": "Szablon wejściowy {{text}} zostanie zastąpiony rzeczywistą wiadomością",
|
224
224
|
"title": "Szablon wejściowy"
|
225
225
|
},
|
226
226
|
"submit": "Zaktualizuj preferencje czatu",
|
@@ -220,7 +220,7 @@
|
|
220
220
|
},
|
221
221
|
"inputTemplate": {
|
222
222
|
"desc": "A última mensagem do usuário será preenchida neste modelo",
|
223
|
-
"placeholder": "O modelo de pré-processamento {{
|
223
|
+
"placeholder": "O modelo de pré-processamento {{text}} será substituído pela entrada em tempo real",
|
224
224
|
"title": "Pré-processamento de entrada do usuário"
|
225
225
|
},
|
226
226
|
"submit": "Atualizar preferências de chat",
|
@@ -220,7 +220,7 @@
|
|
220
220
|
},
|
221
221
|
"inputTemplate": {
|
222
222
|
"desc": "Последнее сообщение пользователя будет использовано в этом шаблоне",
|
223
|
-
"placeholder": "Шаблон ввода {{
|
223
|
+
"placeholder": "Шаблон ввода {{text}} будет заменен на реальные данные",
|
224
224
|
"title": "Шаблон ввода пользователя"
|
225
225
|
},
|
226
226
|
"submit": "Обновить предпочтения чата",
|
@@ -220,7 +220,7 @@
|
|
220
220
|
},
|
221
221
|
"inputTemplate": {
|
222
222
|
"desc": "Kullanıcının son mesajı bu şablona doldurulur",
|
223
|
-
"placeholder": "Ön işleme şablonu {{
|
223
|
+
"placeholder": "Ön işleme şablonu {{text}}, gerçek zamanlı giriş bilgileri ile değiştirilir",
|
224
224
|
"title": "Kullanıcı Girişi Ön İşleme"
|
225
225
|
},
|
226
226
|
"submit": "Sohbet tercihlerini güncelle",
|
@@ -220,7 +220,7 @@
|
|
220
220
|
},
|
221
221
|
"inputTemplate": {
|
222
222
|
"desc": "Tin nhắn mới nhất của người dùng sẽ được điền vào mẫu này",
|
223
|
-
"placeholder": "Mẫu xử lý trước {{
|
223
|
+
"placeholder": "Mẫu xử lý trước {{text}} sẽ được thay thế bằng thông tin nhập thời gian thực",
|
224
224
|
"title": "Mẫu xử lý đầu vào của người dùng"
|
225
225
|
},
|
226
226
|
"submit": "Cập nhật sở thích trò chuyện",
|
package/package.json
CHANGED
@@ -1,6 +1,6 @@
|
|
1
1
|
{
|
2
2
|
"name": "@lobehub/chat",
|
3
|
-
"version": "1.
|
3
|
+
"version": "1.96.0",
|
4
4
|
"description": "Lobe Chat - an open-source, high-performance chatbot framework that supports speech synthesis, multimodal, and extensible Function Call plugin system. Supports one-click free deployment of your private ChatGPT/LLM web application.",
|
5
5
|
"keywords": [
|
6
6
|
"framework",
|
@@ -35,6 +35,7 @@ import {
|
|
35
35
|
TaichuProviderCard,
|
36
36
|
TogetherAIProviderCard,
|
37
37
|
UpstageProviderCard,
|
38
|
+
V0ProviderCard,
|
38
39
|
VLLMProviderCard,
|
39
40
|
WenxinProviderCard,
|
40
41
|
XAIProviderCard,
|
@@ -90,6 +91,7 @@ export const useProviderList = (): ProviderItem[] => {
|
|
90
91
|
SambaNovaProviderCard,
|
91
92
|
Search1APIProviderCard,
|
92
93
|
CohereProviderCard,
|
94
|
+
V0ProviderCard,
|
93
95
|
QiniuProviderCard,
|
94
96
|
QwenProviderCard,
|
95
97
|
WenxinProviderCard,
|
@@ -45,6 +45,7 @@ import { default as taichu } from './taichu';
|
|
45
45
|
import { default as tencentcloud } from './tencentcloud';
|
46
46
|
import { default as togetherai } from './togetherai';
|
47
47
|
import { default as upstage } from './upstage';
|
48
|
+
import { default as v0 } from './v0';
|
48
49
|
import { default as vertexai } from './vertexai';
|
49
50
|
import { default as vllm } from './vllm';
|
50
51
|
import { default as volcengine } from './volcengine';
|
@@ -119,6 +120,7 @@ export const LOBE_DEFAULT_MODEL_LIST = buildDefaultModelList({
|
|
119
120
|
tencentcloud,
|
120
121
|
togetherai,
|
121
122
|
upstage,
|
123
|
+
v0,
|
122
124
|
vertexai,
|
123
125
|
vllm,
|
124
126
|
volcengine,
|
@@ -174,6 +176,7 @@ export { default as taichu } from './taichu';
|
|
174
176
|
export { default as tencentcloud } from './tencentcloud';
|
175
177
|
export { default as togetherai } from './togetherai';
|
176
178
|
export { default as upstage } from './upstage';
|
179
|
+
export { default as v0 } from './v0';
|
177
180
|
export { default as vertexai } from './vertexai';
|
178
181
|
export { default as vllm } from './vllm';
|
179
182
|
export { default as volcengine } from './volcengine';
|
@@ -0,0 +1,63 @@
|
|
1
|
+
import { AIChatModelCard } from '@/types/aiModel';
|
2
|
+
|
3
|
+
const v0ChatModels: AIChatModelCard[] = [
|
4
|
+
{
|
5
|
+
abilities: {
|
6
|
+
functionCall: true,
|
7
|
+
reasoning: true,
|
8
|
+
vision: true,
|
9
|
+
},
|
10
|
+
contextWindowTokens: 512_000,
|
11
|
+
description:
|
12
|
+
'v0-1.5-lg 模型适用于高级思考或推理任务',
|
13
|
+
displayName: 'v0-1.5-lg',
|
14
|
+
enabled: true,
|
15
|
+
id: 'v0-1.5-lg',
|
16
|
+
maxOutput: 32_000,
|
17
|
+
pricing: {
|
18
|
+
input: 15,
|
19
|
+
output: 75,
|
20
|
+
},
|
21
|
+
type: 'chat',
|
22
|
+
},
|
23
|
+
{
|
24
|
+
abilities: {
|
25
|
+
functionCall: true,
|
26
|
+
reasoning: true,
|
27
|
+
vision: true,
|
28
|
+
},
|
29
|
+
contextWindowTokens: 128_000,
|
30
|
+
description:
|
31
|
+
'v0-1.5-md 模型适用于日常任务和用户界面(UI)生成',
|
32
|
+
displayName: 'v0-1.5-md',
|
33
|
+
enabled: true,
|
34
|
+
id: 'v0-1.5-md',
|
35
|
+
maxOutput: 32_000,
|
36
|
+
pricing: {
|
37
|
+
input: 3,
|
38
|
+
output: 15,
|
39
|
+
},
|
40
|
+
type: 'chat',
|
41
|
+
},
|
42
|
+
{
|
43
|
+
abilities: {
|
44
|
+
functionCall: true,
|
45
|
+
vision: true,
|
46
|
+
},
|
47
|
+
contextWindowTokens: 128_000,
|
48
|
+
description:
|
49
|
+
'v0-1.0-md 模型是通过 v0 API 提供服务的旧版模型',
|
50
|
+
displayName: 'v0-1.0-md',
|
51
|
+
id: 'v0-1.0-md',
|
52
|
+
maxOutput: 32_000,
|
53
|
+
pricing: {
|
54
|
+
input: 3,
|
55
|
+
output: 15,
|
56
|
+
},
|
57
|
+
type: 'chat',
|
58
|
+
},
|
59
|
+
];
|
60
|
+
|
61
|
+
export const allModels = [...v0ChatModels];
|
62
|
+
|
63
|
+
export default allModels;
|
package/src/config/llm.ts
CHANGED
@@ -165,6 +165,9 @@ export const getLLMConfig = () => {
|
|
165
165
|
|
166
166
|
ENABLED_MODELSCOPE: z.boolean(),
|
167
167
|
MODELSCOPE_API_KEY: z.string().optional(),
|
168
|
+
|
169
|
+
ENABLED_V0: z.boolean(),
|
170
|
+
V0_API_KEY: z.string().optional(),
|
168
171
|
},
|
169
172
|
runtimeEnv: {
|
170
173
|
API_KEY_SELECT_MODE: process.env.API_KEY_SELECT_MODE,
|
@@ -328,6 +331,9 @@ export const getLLMConfig = () => {
|
|
328
331
|
|
329
332
|
ENABLED_MODELSCOPE: !!process.env.MODELSCOPE_API_KEY,
|
330
333
|
MODELSCOPE_API_KEY: process.env.MODELSCOPE_API_KEY,
|
334
|
+
|
335
|
+
ENABLED_V0: !!process.env.V0_API_KEY,
|
336
|
+
V0_API_KEY: process.env.V0_API_KEY,
|
331
337
|
},
|
332
338
|
});
|
333
339
|
};
|
@@ -45,6 +45,7 @@ import TaichuProvider from './taichu';
|
|
45
45
|
import TencentcloudProvider from './tencentcloud';
|
46
46
|
import TogetherAIProvider from './togetherai';
|
47
47
|
import UpstageProvider from './upstage';
|
48
|
+
import V0Provider from './v0';
|
48
49
|
import VertexAIProvider from './vertexai';
|
49
50
|
import VLLMProvider from './vllm';
|
50
51
|
import VolcengineProvider from './volcengine';
|
@@ -83,6 +84,7 @@ export const LOBE_DEFAULT_MODEL_LIST: ChatModelCard[] = [
|
|
83
84
|
JinaProvider.chatModels,
|
84
85
|
SambaNovaProvider.chatModels,
|
85
86
|
CohereProvider.chatModels,
|
87
|
+
V0Provider.chatModels,
|
86
88
|
ZeroOneProvider.chatModels,
|
87
89
|
StepfunProvider.chatModels,
|
88
90
|
NovitaProvider.chatModels,
|
@@ -139,6 +141,7 @@ export const DEFAULT_MODEL_PROVIDER_LIST = [
|
|
139
141
|
JinaProvider,
|
140
142
|
SambaNovaProvider,
|
141
143
|
CohereProvider,
|
144
|
+
V0Provider,
|
142
145
|
QwenProvider,
|
143
146
|
WenxinProvider,
|
144
147
|
TencentcloudProvider,
|
@@ -218,6 +221,7 @@ export { default as TaichuProviderCard } from './taichu';
|
|
218
221
|
export { default as TencentCloudProviderCard } from './tencentcloud';
|
219
222
|
export { default as TogetherAIProviderCard } from './togetherai';
|
220
223
|
export { default as UpstageProviderCard } from './upstage';
|
224
|
+
export { default as V0ProviderCard } from './v0';
|
221
225
|
export { default as VertexAIProviderCard } from './vertexai';
|
222
226
|
export { default as VLLMProviderCard } from './vllm';
|
223
227
|
export { default as VolcengineProviderCard } from './volcengine';
|
@@ -0,0 +1,17 @@
|
|
1
|
+
import { ModelProviderCard } from '@/types/llm';
|
2
|
+
|
3
|
+
const V0: ModelProviderCard = {
|
4
|
+
chatModels: [],
|
5
|
+
checkModel: 'v0-1.5-md',
|
6
|
+
description:
|
7
|
+
'v0 是一个配对编程助手,你只需用自然语言描述想法,它就能为你的项目生成代码和用户界面(UI)',
|
8
|
+
id: 'v0',
|
9
|
+
modelsUrl: 'https://vercel.com/docs/v0/api#models',
|
10
|
+
name: 'Vercel (v0)',
|
11
|
+
settings: {
|
12
|
+
sdkType: 'openai',
|
13
|
+
},
|
14
|
+
url: 'https://v0.dev',
|
15
|
+
};
|
16
|
+
|
17
|
+
export default V0;
|
@@ -43,6 +43,7 @@ import { LobeTaichuAI } from './taichu';
|
|
43
43
|
import { LobeTencentCloudAI } from './tencentcloud';
|
44
44
|
import { LobeTogetherAI } from './togetherai';
|
45
45
|
import { LobeUpstageAI } from './upstage';
|
46
|
+
import { LobeV0AI } from './v0';
|
46
47
|
import { LobeVLLMAI } from './vllm';
|
47
48
|
import { LobeVolcengineAI } from './volcengine';
|
48
49
|
import { LobeWenxinAI } from './wenxin';
|
@@ -97,6 +98,7 @@ export const providerRuntimeMap = {
|
|
97
98
|
tencentcloud: LobeTencentCloudAI,
|
98
99
|
togetherai: LobeTogetherAI,
|
99
100
|
upstage: LobeUpstageAI,
|
101
|
+
v0: LobeV0AI,
|
100
102
|
vllm: LobeVLLMAI,
|
101
103
|
volcengine: LobeVolcengineAI,
|
102
104
|
wenxin: LobeWenxinAI,
|
@@ -48,6 +48,11 @@ export const MODEL_LIST_CONFIGS = {
|
|
48
48
|
reasoningKeywords: ['qvq', 'qwq', 'qwen3'],
|
49
49
|
visionKeywords: ['qvq', 'vl'],
|
50
50
|
},
|
51
|
+
v0: {
|
52
|
+
functionCallKeywords: ['v0'],
|
53
|
+
reasoningKeywords: ['v0-1.5'],
|
54
|
+
visionKeywords: ['v0'],
|
55
|
+
},
|
51
56
|
volcengine: {
|
52
57
|
functionCallKeywords: ['doubao-1.5'],
|
53
58
|
reasoningKeywords: ['thinking', '-r1'],
|
@@ -72,6 +77,7 @@ export const PROVIDER_DETECTION_CONFIG = {
|
|
72
77
|
llama: ['llama'],
|
73
78
|
openai: ['o1', 'o3', 'o4', 'gpt-'],
|
74
79
|
qwen: ['qwen', 'qwq', 'qvq'],
|
80
|
+
v0: ['v0'],
|
75
81
|
volcengine: ['doubao'],
|
76
82
|
zeroone: ['yi-'],
|
77
83
|
zhipu: ['glm'],
|
@@ -0,0 +1,21 @@
|
|
1
|
+
import { ModelProvider } from '../types';
|
2
|
+
import { processMultiProviderModelList } from '../utils/modelParse';
|
3
|
+
import { createOpenAICompatibleRuntime } from '../utils/openaiCompatibleFactory';
|
4
|
+
|
5
|
+
export interface V0ModelCard {
|
6
|
+
id: string;
|
7
|
+
}
|
8
|
+
|
9
|
+
export const LobeV0AI = createOpenAICompatibleRuntime({
|
10
|
+
baseURL: 'https://api.v0.dev/v1',
|
11
|
+
debug: {
|
12
|
+
chatCompletion: () => process.env.DEBUG_V0_CHAT_COMPLETION === '1',
|
13
|
+
},
|
14
|
+
models: async ({ client }) => {
|
15
|
+
const modelsPage = (await client.models.list()) as any;
|
16
|
+
const modelList: V0ModelCard[] = modelsPage.data;
|
17
|
+
|
18
|
+
return processMultiProviderModelList(modelList);
|
19
|
+
},
|
20
|
+
provider: ModelProvider.V0,
|
21
|
+
});
|
@@ -1,6 +1,7 @@
|
|
1
1
|
/* eslint-disable sort-keys-fix/sort-keys-fix, typescript-sort-keys/interface */
|
2
2
|
// Disable the auto sort key eslint rule to make the code more logic and readable
|
3
3
|
import { produce } from 'immer';
|
4
|
+
import { template } from 'lodash-es';
|
4
5
|
import { StateCreator } from 'zustand/vanilla';
|
5
6
|
|
6
7
|
import { LOADING_FLAT, MESSAGE_CANCEL_FLAT } from '@/const/message';
|
@@ -507,6 +508,10 @@ export const generateAIChat: StateCreator<
|
|
507
508
|
const agentConfig = agentSelectors.currentAgentConfig(getAgentStoreState());
|
508
509
|
const chatConfig = agentChatConfigSelectors.currentChatConfig(getAgentStoreState());
|
509
510
|
|
511
|
+
const compiler = template(chatConfig.inputTemplate, {
|
512
|
+
interpolate: /{{\s*(text)\s*}}/g
|
513
|
+
});
|
514
|
+
|
510
515
|
// ================================== //
|
511
516
|
// messages uniformly preprocess //
|
512
517
|
// ================================== //
|
@@ -521,17 +526,34 @@ export const generateAIChat: StateCreator<
|
|
521
526
|
historyCount,
|
522
527
|
});
|
523
528
|
|
524
|
-
// 2.
|
529
|
+
// 2. replace inputMessage template
|
530
|
+
preprocessMsgs = !chatConfig.inputTemplate
|
531
|
+
? preprocessMsgs
|
532
|
+
: preprocessMsgs.map((m) => {
|
533
|
+
if (m.role === 'user') {
|
534
|
+
try {
|
535
|
+
return { ...m, content: compiler({ text: m.content }) };
|
536
|
+
} catch (error) {
|
537
|
+
console.error(error);
|
538
|
+
|
539
|
+
return m;
|
540
|
+
}
|
541
|
+
}
|
542
|
+
|
543
|
+
return m;
|
544
|
+
});
|
545
|
+
|
546
|
+
// 3. add systemRole
|
525
547
|
if (agentConfig.systemRole) {
|
526
548
|
preprocessMsgs.unshift({ content: agentConfig.systemRole, role: 'system' } as ChatMessage);
|
527
549
|
}
|
528
550
|
|
529
|
-
//
|
551
|
+
// 4. handle max_tokens
|
530
552
|
agentConfig.params.max_tokens = chatConfig.enableMaxTokens
|
531
553
|
? agentConfig.params.max_tokens
|
532
554
|
: undefined;
|
533
555
|
|
534
|
-
//
|
556
|
+
// 5. handle reasoning_effort
|
535
557
|
agentConfig.params.reasoning_effort = chatConfig.enableReasoningEffort
|
536
558
|
? agentConfig.params.reasoning_effort
|
537
559
|
: undefined;
|
@@ -80,6 +80,7 @@ export interface UserKeyVaults extends SearchEngineKeyVaults {
|
|
80
80
|
tencentcloud?: OpenAICompatibleKeyVault;
|
81
81
|
togetherai?: OpenAICompatibleKeyVault;
|
82
82
|
upstage?: OpenAICompatibleKeyVault;
|
83
|
+
v0?: OpenAICompatibleKeyVault;
|
83
84
|
vertexai?: OpenAICompatibleKeyVault;
|
84
85
|
vllm?: OpenAICompatibleKeyVault;
|
85
86
|
volcengine?: OpenAICompatibleKeyVault;
|
@@ -21,18 +21,6 @@ vi.mock('@/store/user/selectors', () => ({
|
|
21
21
|
},
|
22
22
|
}));
|
23
23
|
|
24
|
-
vi.mock('@/store/agent/store', () => ({
|
25
|
-
getAgentStoreState: () => ({}),
|
26
|
-
}));
|
27
|
-
|
28
|
-
vi.mock('@/store/agent/selectors', () => ({
|
29
|
-
agentChatConfigSelectors: {
|
30
|
-
currentChatConfig: () => ({
|
31
|
-
inputTemplate: 'Hello {{username}}!',
|
32
|
-
}),
|
33
|
-
},
|
34
|
-
}));
|
35
|
-
|
36
24
|
describe('parsePlaceholderVariablesMessages', () => {
|
37
25
|
beforeEach(() => {
|
38
26
|
// Mock Date for consistent testing
|
@@ -238,15 +226,6 @@ describe('parsePlaceholderVariablesMessages', () => {
|
|
238
226
|
// Unknown variables should remain unchanged
|
239
227
|
expect(result[0].content).toBe('Hello {{unknown_variable}}!');
|
240
228
|
});
|
241
|
-
|
242
|
-
it('should handle nested variables (input_template)', () => {
|
243
|
-
const messages = [{ id: '1', content: 'Template: {{input_template}}' }];
|
244
|
-
|
245
|
-
const result = parsePlaceholderVariablesMessages(messages);
|
246
|
-
|
247
|
-
// Should resolve nested variables in input_template
|
248
|
-
expect(result[0].content).toBe('Template: Hello testuser!');
|
249
|
-
});
|
250
229
|
});
|
251
230
|
|
252
231
|
describe('specific variable types', () => {
|
@@ -5,9 +5,6 @@ import { uuid } from '@/utils/uuid';
|
|
5
5
|
import { useUserStore } from '@/store/user';
|
6
6
|
import { userProfileSelectors } from '@/store/user/selectors';
|
7
7
|
|
8
|
-
import { getAgentStoreState } from '@/store/agent/store';
|
9
|
-
import { agentChatConfigSelectors } from '@/store/agent/selectors';
|
10
|
-
|
11
8
|
const placeholderVariablesRegex = /{{(.*?)}}/g;
|
12
9
|
|
13
10
|
/* eslint-disable sort-keys-fix/sort-keys-fix */
|
@@ -108,16 +105,6 @@ export const VARIABLE_GENERATORS = {
|
|
108
105
|
language: () => typeof navigator !== 'undefined' ? navigator.language : '',
|
109
106
|
platform: () => typeof navigator !== 'undefined' ? navigator.platform : '',
|
110
107
|
user_agent: () => typeof navigator !== 'undefined' ? navigator.userAgent : '',
|
111
|
-
|
112
|
-
/**
|
113
|
-
* LobeChat 模板变量
|
114
|
-
*
|
115
|
-
* | Value | Example |
|
116
|
-
* |-------|---------|
|
117
|
-
* | `{{input_template}}` | Some contents |
|
118
|
-
*
|
119
|
-
*/
|
120
|
-
input_template: () => agentChatConfigSelectors.currentChatConfig(getAgentStoreState()).inputTemplate || '',
|
121
108
|
} as Record<string, () => string>;
|
122
109
|
|
123
110
|
/**
|
@@ -133,13 +120,13 @@ const extractPlaceholderVariables = (text: string): string[] => {
|
|
133
120
|
/**
|
134
121
|
* 将模板变量替换为实际值,并支持递归解析嵌套变量
|
135
122
|
* @param text - 含变量的原始文本
|
136
|
-
* @param depth - 递归深度,默认 1,设置更高可支持 {{
|
123
|
+
* @param depth - 递归深度,默认 1,设置更高可支持 {{text}} 中的 {{date}} 等
|
137
124
|
* @returns 替换后的文本
|
138
125
|
*/
|
139
126
|
export const parsePlaceholderVariables = (text: string, depth = 2): string => {
|
140
127
|
let result = text;
|
141
128
|
|
142
|
-
// 递归解析,用于处理如 {{
|
129
|
+
// 递归解析,用于处理如 {{text}} 存在额外预设变量
|
143
130
|
for (let i = 0; i < depth; i++) {
|
144
131
|
try {
|
145
132
|
const variables = Object.fromEntries(
|