@lobehub/chat 1.61.6 → 1.62.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.github/ISSUE_TEMPLATE/1_bug_report_cn.yml +8 -0
- package/.github/ISSUE_TEMPLATE/config.yml +4 -1
- package/CHANGELOG.md +58 -0
- package/changelog/v1.json +21 -0
- package/locales/ar/components.json +1 -0
- package/locales/bg-BG/components.json +1 -0
- package/locales/de-DE/components.json +1 -0
- package/locales/en-US/components.json +4 -3
- package/locales/es-ES/components.json +1 -0
- package/locales/fa-IR/components.json +1 -0
- package/locales/fr-FR/components.json +1 -0
- package/locales/it-IT/components.json +1 -0
- package/locales/ja-JP/components.json +1 -0
- package/locales/ko-KR/components.json +1 -0
- package/locales/nl-NL/components.json +1 -0
- package/locales/pl-PL/components.json +1 -0
- package/locales/pt-BR/components.json +1 -0
- package/locales/ru-RU/components.json +1 -0
- package/locales/tr-TR/components.json +1 -0
- package/locales/vi-VN/components.json +1 -0
- package/locales/zh-CN/components.json +2 -1
- package/locales/zh-TW/components.json +1 -0
- package/package.json +2 -2
- package/src/components/ModelSelect/index.tsx +24 -2
- package/src/components/Thinking/index.tsx +7 -2
- package/src/config/aiModels/jina.ts +7 -5
- package/src/config/aiModels/perplexity.ts +8 -0
- package/src/config/llm.ts +8 -0
- package/src/config/modelProviders/sambanova.ts +4 -1
- package/src/database/client/migrations.json +12 -8
- package/src/database/migrations/0015_add_message_search_metadata.sql +2 -0
- package/src/database/migrations/meta/0015_snapshot.json +3616 -0
- package/src/database/migrations/meta/_journal.json +7 -0
- package/src/database/schemas/message.ts +3 -1
- package/src/database/server/models/message.ts +2 -0
- package/src/features/Conversation/components/ChatItem/index.tsx +10 -1
- package/src/features/Conversation/components/MarkdownElements/Thinking/Render.tsx +5 -1
- package/src/features/Conversation/components/MarkdownElements/remarkPlugins/createRemarkCustomTagPlugin.ts +1 -0
- package/src/features/Conversation/components/MarkdownElements/remarkPlugins/getNodeContent.test.ts +107 -0
- package/src/features/Conversation/components/MarkdownElements/remarkPlugins/getNodeContent.ts +6 -0
- package/src/libs/agent-runtime/perplexity/index.test.ts +156 -12
- package/src/libs/agent-runtime/utils/streams/anthropic.ts +3 -3
- package/src/libs/agent-runtime/utils/streams/bedrock/claude.ts +6 -2
- package/src/libs/agent-runtime/utils/streams/bedrock/llama.ts +3 -3
- package/src/libs/agent-runtime/utils/streams/google-ai.ts +3 -3
- package/src/libs/agent-runtime/utils/streams/ollama.ts +3 -3
- package/src/libs/agent-runtime/utils/streams/openai.ts +26 -8
- package/src/libs/agent-runtime/utils/streams/protocol.ts +33 -8
- package/src/libs/agent-runtime/utils/streams/vertex-ai.ts +3 -3
- package/src/locales/default/components.ts +1 -0
- package/src/server/services/nextAuthUser/index.test.ts +109 -0
- package/src/services/user/client.test.ts +10 -0
- package/src/services/user/server.test.ts +149 -0
- package/src/store/chat/slices/aiChat/actions/generateAIChat.ts +17 -6
- package/src/store/chat/slices/message/action.ts +12 -7
- package/src/types/aiModel.ts +5 -0
- package/src/types/message/base.ts +13 -0
- package/src/types/message/chat.ts +3 -2
- package/src/utils/fetch/fetchSSE.ts +17 -1
@@ -2,7 +2,15 @@ name: '🐛 反馈缺陷'
|
|
2
2
|
description: '反馈一个问题缺陷'
|
3
3
|
title: '[Bug] '
|
4
4
|
labels: ['🐛 Bug']
|
5
|
+
type: Bug
|
5
6
|
body:
|
7
|
+
- type: markdown
|
8
|
+
attributes:
|
9
|
+
value: |
|
10
|
+
在创建新的 Issue 之前,请先[搜索已有问题](https://github.com/lobehub/lobe-chat/issues),如果发现已有类似的问题,请给它 **👍 点赞**,这样可以帮助我们更快地解决问题。
|
11
|
+
如果你在使用过程中遇到问题,可以尝试以下方式获取帮助:
|
12
|
+
- 在 [GitHub Discussions](https://github.com/lobehub/lobe-chat/discussions) 的版块发起讨论。
|
13
|
+
- 在 [LobeChat 社区](https://discord.gg/AYFPHvv2jT) 提问,与其他用户交流。
|
6
14
|
- type: dropdown
|
7
15
|
attributes:
|
8
16
|
label: '📦 部署环境'
|
@@ -1,4 +1,7 @@
|
|
1
1
|
contact_links:
|
2
|
-
- name:
|
2
|
+
- name: Ask a question for self-hosting | 咨询自部署问题
|
3
|
+
url: https://github.com/lobehub/lobe-chat/discussions/new?category=self-hosting-%E7%A7%81%E6%9C%89%E5%8C%96%E9%83%A8%E7%BD%B2
|
4
|
+
about: Please post questions, and ideas in discussions. | 请在讨论区发布问题和想法。
|
5
|
+
- name: Questions and ideas | 其他问题和想法
|
3
6
|
url: https://github.com/lobehub/lobe-chat/discussions/new/choose
|
4
7
|
about: Please post questions, and ideas in discussions. | 请在讨论区发布问题和想法。
|
package/CHANGELOG.md
CHANGED
@@ -2,6 +2,64 @@
|
|
2
2
|
|
3
3
|
# Changelog
|
4
4
|
|
5
|
+
### [Version 1.62.1](https://github.com/lobehub/lobe-chat/compare/v1.62.0...v1.62.1)
|
6
|
+
|
7
|
+
<sup>Released on **2025-02-20**</sup>
|
8
|
+
|
9
|
+
#### 🐛 Bug Fixes
|
10
|
+
|
11
|
+
- **misc**: Add sambanova proxy url.
|
12
|
+
|
13
|
+
<br/>
|
14
|
+
|
15
|
+
<details>
|
16
|
+
<summary><kbd>Improvements and Fixes</kbd></summary>
|
17
|
+
|
18
|
+
#### What's fixed
|
19
|
+
|
20
|
+
- **misc**: Add sambanova proxy url, closes [#6348](https://github.com/lobehub/lobe-chat/issues/6348) ([c9cb7d9](https://github.com/lobehub/lobe-chat/commit/c9cb7d9))
|
21
|
+
|
22
|
+
</details>
|
23
|
+
|
24
|
+
<div align="right">
|
25
|
+
|
26
|
+
[](#readme-top)
|
27
|
+
|
28
|
+
</div>
|
29
|
+
|
30
|
+
## [Version 1.62.0](https://github.com/lobehub/lobe-chat/compare/v1.61.6...v1.62.0)
|
31
|
+
|
32
|
+
<sup>Released on **2025-02-20**</sup>
|
33
|
+
|
34
|
+
#### ✨ Features
|
35
|
+
|
36
|
+
- **misc**: Support pplx search grounding.
|
37
|
+
|
38
|
+
#### 🐛 Bug Fixes
|
39
|
+
|
40
|
+
- **misc**: Azure AI env var configuration issue..
|
41
|
+
|
42
|
+
<br/>
|
43
|
+
|
44
|
+
<details>
|
45
|
+
<summary><kbd>Improvements and Fixes</kbd></summary>
|
46
|
+
|
47
|
+
#### What's improved
|
48
|
+
|
49
|
+
- **misc**: Support pplx search grounding, closes [#6331](https://github.com/lobehub/lobe-chat/issues/6331) ([ccb0003](https://github.com/lobehub/lobe-chat/commit/ccb0003))
|
50
|
+
|
51
|
+
#### What's fixed
|
52
|
+
|
53
|
+
- **misc**: Azure AI env var configuration issue., closes [#6346](https://github.com/lobehub/lobe-chat/issues/6346) ([3fc61bb](https://github.com/lobehub/lobe-chat/commit/3fc61bb))
|
54
|
+
|
55
|
+
</details>
|
56
|
+
|
57
|
+
<div align="right">
|
58
|
+
|
59
|
+
[](#readme-top)
|
60
|
+
|
61
|
+
</div>
|
62
|
+
|
5
63
|
### [Version 1.61.6](https://github.com/lobehub/lobe-chat/compare/v1.61.5...v1.61.6)
|
6
64
|
|
7
65
|
<sup>Released on **2025-02-20**</sup>
|
package/changelog/v1.json
CHANGED
@@ -1,4 +1,25 @@
|
|
1
1
|
[
|
2
|
+
{
|
3
|
+
"children": {
|
4
|
+
"fixes": [
|
5
|
+
"Add sambanova proxy url."
|
6
|
+
]
|
7
|
+
},
|
8
|
+
"date": "2025-02-20",
|
9
|
+
"version": "1.62.1"
|
10
|
+
},
|
11
|
+
{
|
12
|
+
"children": {
|
13
|
+
"features": [
|
14
|
+
"Support pplx search grounding."
|
15
|
+
],
|
16
|
+
"fixes": [
|
17
|
+
"Azure AI env var configuration issue.."
|
18
|
+
]
|
19
|
+
},
|
20
|
+
"date": "2025-02-20",
|
21
|
+
"version": "1.62.0"
|
22
|
+
},
|
2
23
|
{
|
3
24
|
"children": {
|
4
25
|
"fixes": [
|
@@ -77,6 +77,7 @@
|
|
77
77
|
"file": "يدعم هذا النموذج قراءة وتعرف الملفات المرفوعة",
|
78
78
|
"functionCall": "يدعم هذا النموذج استدعاء الوظائف",
|
79
79
|
"reasoning": "يدعم هذا النموذج التفكير العميق",
|
80
|
+
"search": "يدعم هذا النموذج البحث عبر الإنترنت",
|
80
81
|
"tokens": "يدعم هذا النموذج حتى {{tokens}} رمزًا في جلسة واحدة",
|
81
82
|
"vision": "يدعم هذا النموذج التعرف البصري"
|
82
83
|
},
|
@@ -77,6 +77,7 @@
|
|
77
77
|
"file": "Този модел поддържа качване на файлове и разпознаване",
|
78
78
|
"functionCall": "Този модел поддържа функционални обаждания (Function Call)",
|
79
79
|
"reasoning": "Този модел поддържа дълбочинно мислене",
|
80
|
+
"search": "Този модел поддържа търсене в мрежата",
|
80
81
|
"tokens": "Този модел поддържа до {{tokens}} токена за една сесия",
|
81
82
|
"vision": "Този модел поддържа визуално разпознаване"
|
82
83
|
},
|
@@ -77,6 +77,7 @@
|
|
77
77
|
"file": "Dieses Modell unterstützt das Hochladen von Dateien und deren Erkennung.",
|
78
78
|
"functionCall": "Dieses Modell unterstützt Funktionsaufrufe.",
|
79
79
|
"reasoning": "Dieses Modell unterstützt tiefes Denken",
|
80
|
+
"search": "Dieses Modell unterstützt die Online-Suche",
|
80
81
|
"tokens": "Dieses Modell unterstützt maximal {{tokens}} Tokens pro Sitzung.",
|
81
82
|
"vision": "Dieses Modell unterstützt die visuelle Erkennung."
|
82
83
|
},
|
@@ -77,6 +77,7 @@
|
|
77
77
|
"file": "This model supports file upload for reading and recognition.",
|
78
78
|
"functionCall": "This model supports function call.",
|
79
79
|
"reasoning": "This model supports deep thinking",
|
80
|
+
"search": "This model supports online search",
|
80
81
|
"tokens": "This model supports up to {{tokens}} tokens in a single session.",
|
81
82
|
"vision": "This model supports visual recognition."
|
82
83
|
},
|
@@ -111,8 +112,8 @@
|
|
111
112
|
}
|
112
113
|
},
|
113
114
|
"Thinking": {
|
114
|
-
"thinking": "Deep
|
115
|
-
"thought": "Deeply
|
116
|
-
"thoughtWithDuration": "Deeply
|
115
|
+
"thinking": "Deep Thinking...",
|
116
|
+
"thought": "Deeply Thought (in {{duration}} seconds)",
|
117
|
+
"thoughtWithDuration": "Deeply Thought"
|
117
118
|
}
|
118
119
|
}
|
@@ -77,6 +77,7 @@
|
|
77
77
|
"file": "Este modelo admite la carga y reconocimiento de archivos.",
|
78
78
|
"functionCall": "Este modelo admite llamadas de función.",
|
79
79
|
"reasoning": "Este modelo admite un pensamiento profundo",
|
80
|
+
"search": "Este modelo admite búsqueda en línea",
|
80
81
|
"tokens": "Este modelo admite un máximo de {{tokens}} tokens por sesión.",
|
81
82
|
"vision": "Este modelo admite el reconocimiento visual."
|
82
83
|
},
|
@@ -77,6 +77,7 @@
|
|
77
77
|
"file": "این مدل از بارگذاری و شناسایی فایلها پشتیبانی میکند",
|
78
78
|
"functionCall": "این مدل از فراخوانی توابع (Function Call) پشتیبانی میکند",
|
79
79
|
"reasoning": "این مدل از تفکر عمیق پشتیبانی میکند",
|
80
|
+
"search": "این مدل از جستجوی آنلاین پشتیبانی میکند",
|
80
81
|
"tokens": "این مدل در هر جلسه حداکثر از {{tokens}} توکن پشتیبانی میکند",
|
81
82
|
"vision": "این مدل از تشخیص بصری پشتیبانی میکند"
|
82
83
|
},
|
@@ -77,6 +77,7 @@
|
|
77
77
|
"file": "Ce modèle prend en charge la lecture et la reconnaissance de fichiers téléchargés.",
|
78
78
|
"functionCall": "Ce modèle prend en charge les appels de fonction.",
|
79
79
|
"reasoning": "Ce modèle prend en charge une réflexion approfondie",
|
80
|
+
"search": "Ce modèle prend en charge la recherche en ligne",
|
80
81
|
"tokens": "Ce modèle prend en charge jusqu'à {{tokens}} jetons par session.",
|
81
82
|
"vision": "Ce modèle prend en charge la reconnaissance visuelle."
|
82
83
|
},
|
@@ -77,6 +77,7 @@
|
|
77
77
|
"file": "Questo modello supporta il caricamento e il riconoscimento di file.",
|
78
78
|
"functionCall": "Questo modello supporta la chiamata di funzioni.",
|
79
79
|
"reasoning": "Questo modello supporta un pensiero profondo",
|
80
|
+
"search": "Questo modello supporta la ricerca online",
|
80
81
|
"tokens": "Questo modello supporta un massimo di {{tokens}} token per sessione.",
|
81
82
|
"vision": "Questo modello supporta il riconoscimento visivo."
|
82
83
|
},
|
@@ -77,6 +77,7 @@
|
|
77
77
|
"file": "このモデルはファイルのアップロードと認識をサポートしています。",
|
78
78
|
"functionCall": "このモデルは関数呼び出し(Function Call)をサポートしています。",
|
79
79
|
"reasoning": "このモデルは深い思考をサポートしています",
|
80
|
+
"search": "このモデルはオンライン検索をサポートしています",
|
80
81
|
"tokens": "このモデルは1つのセッションあたり最大{{tokens}}トークンをサポートしています。",
|
81
82
|
"vision": "このモデルはビジョン認識をサポートしています。"
|
82
83
|
},
|
@@ -77,6 +77,7 @@
|
|
77
77
|
"file": "This model supports file upload for reading and recognition.",
|
78
78
|
"functionCall": "This model supports function call.",
|
79
79
|
"reasoning": "Dit model ondersteunt diepgaand denken",
|
80
|
+
"search": "Dit model ondersteunt online zoeken",
|
80
81
|
"tokens": "This model supports up to {{tokens}} tokens in a single session.",
|
81
82
|
"vision": "This model supports visual recognition."
|
82
83
|
},
|
@@ -77,6 +77,7 @@
|
|
77
77
|
"file": "Ten model obsługuje wczytywanie plików i rozpoznawanie",
|
78
78
|
"functionCall": "Ten model obsługuje wywołania funkcji (Function Call).",
|
79
79
|
"reasoning": "Ten model wspiera głębokie myślenie",
|
80
|
+
"search": "Ten model wspiera wyszukiwanie w sieci",
|
80
81
|
"tokens": "Ten model obsługuje maksymalnie {{tokens}} tokenów w pojedynczej sesji.",
|
81
82
|
"vision": "Ten model obsługuje rozpoznawanie wizualne."
|
82
83
|
},
|
@@ -77,6 +77,7 @@
|
|
77
77
|
"file": "Este modelo suporta leitura e reconhecimento de arquivos enviados.",
|
78
78
|
"functionCall": "Este modelo suporta chamadas de função.",
|
79
79
|
"reasoning": "Este modelo suporta pensamento profundo",
|
80
|
+
"search": "Este modelo suporta pesquisa online",
|
80
81
|
"tokens": "Este modelo suporta no máximo {{tokens}} tokens por sessão.",
|
81
82
|
"vision": "Este modelo suporta reconhecimento visual."
|
82
83
|
},
|
@@ -77,6 +77,7 @@
|
|
77
77
|
"file": "Эта модель поддерживает загрузку и распознавание файлов",
|
78
78
|
"functionCall": "Эта модель поддерживает вызов функций",
|
79
79
|
"reasoning": "Эта модель поддерживает глубокое мышление",
|
80
|
+
"search": "Эта модель поддерживает поиск в интернете",
|
80
81
|
"tokens": "Эта модель поддерживает до {{tokens}} токенов в одной сессии",
|
81
82
|
"vision": "Эта модель поддерживает распознавание изображений"
|
82
83
|
},
|
@@ -77,6 +77,7 @@
|
|
77
77
|
"file": "Bu model dosya yükleme ve tanımayı destekler",
|
78
78
|
"functionCall": "Bu model fonksiyon çağrısını destekler",
|
79
79
|
"reasoning": "Bu model derin düşünmeyi destekler",
|
80
|
+
"search": "Bu model çevrimiçi aramayı destekler",
|
80
81
|
"tokens": "Bu model tek bir oturumda en fazla {{tokens}} Token destekler",
|
81
82
|
"vision": "Bu model görüntü tanımıyı destekler"
|
82
83
|
},
|
@@ -77,6 +77,7 @@
|
|
77
77
|
"file": "Mô hình này hỗ trợ tải lên và nhận diện tệp",
|
78
78
|
"functionCall": "Mô hình này hỗ trợ cuộc gọi hàm (Function Call)",
|
79
79
|
"reasoning": "Mô hình này hỗ trợ tư duy sâu sắc",
|
80
|
+
"search": "Mô hình này hỗ trợ tìm kiếm trực tuyến",
|
80
81
|
"tokens": "Mỗi phiên của mô hình này hỗ trợ tối đa {{tokens}} Tokens",
|
81
82
|
"vision": "Mô hình này hỗ trợ nhận diện hình ảnh"
|
82
83
|
},
|
@@ -77,6 +77,7 @@
|
|
77
77
|
"file": "该模型支持上传文件读取与识别",
|
78
78
|
"functionCall": "该模型支持函数调用(Function Call)",
|
79
79
|
"reasoning": "该模型支持深度思考",
|
80
|
+
"search": "该模型支持联网搜索",
|
80
81
|
"tokens": "该模型单个会话最多支持 {{tokens}} Tokens",
|
81
82
|
"vision": "该模型支持视觉识别"
|
82
83
|
},
|
@@ -115,4 +116,4 @@
|
|
115
116
|
"thought": "已深度思考(用时 {{duration}} 秒)",
|
116
117
|
"thoughtWithDuration": "已深度思考"
|
117
118
|
}
|
118
|
-
}
|
119
|
+
}
|
package/package.json
CHANGED
@@ -1,6 +1,6 @@
|
|
1
1
|
{
|
2
2
|
"name": "@lobehub/chat",
|
3
|
-
"version": "1.
|
3
|
+
"version": "1.62.1",
|
4
4
|
"description": "Lobe Chat - an open-source, high-performance chatbot framework that supports speech synthesis, multimodal, and extensible Function Call plugin system. Supports one-click free deployment of your private ChatGPT/LLM web application.",
|
5
5
|
"keywords": [
|
6
6
|
"framework",
|
@@ -129,7 +129,7 @@
|
|
129
129
|
"@lobehub/chat-plugins-gateway": "^1.9.0",
|
130
130
|
"@lobehub/icons": "^1.73.1",
|
131
131
|
"@lobehub/tts": "^1.28.0",
|
132
|
-
"@lobehub/ui": "^1.
|
132
|
+
"@lobehub/ui": "^1.165.0",
|
133
133
|
"@neondatabase/serverless": "^0.10.4",
|
134
134
|
"@next/third-parties": "^15.1.4",
|
135
135
|
"@react-spring/web": "^9.7.5",
|
@@ -2,7 +2,14 @@ import { IconAvatarProps, ModelIcon, ProviderIcon } from '@lobehub/icons';
|
|
2
2
|
import { Avatar, Icon, Tooltip } from '@lobehub/ui';
|
3
3
|
import { Typography } from 'antd';
|
4
4
|
import { createStyles } from 'antd-style';
|
5
|
-
import {
|
5
|
+
import {
|
6
|
+
Infinity,
|
7
|
+
AtomIcon,
|
8
|
+
LucideEye,
|
9
|
+
LucideGlobe,
|
10
|
+
LucidePaperclip,
|
11
|
+
ToyBrick,
|
12
|
+
} from 'lucide-react';
|
6
13
|
import numeral from 'numeral';
|
7
14
|
import { rgba } from 'polished';
|
8
15
|
import { FC, memo } from 'react';
|
@@ -14,7 +21,7 @@ import { AiProviderSourceType } from '@/types/aiProvider';
|
|
14
21
|
import { ChatModelCard } from '@/types/llm';
|
15
22
|
import { formatTokenNumber } from '@/utils/format';
|
16
23
|
|
17
|
-
const useStyles = createStyles(({ css, token }) => ({
|
24
|
+
const useStyles = createStyles(({ css, token, isDarkMode }) => ({
|
18
25
|
custom: css`
|
19
26
|
width: 36px;
|
20
27
|
height: 20px;
|
@@ -41,6 +48,10 @@ const useStyles = createStyles(({ css, token }) => ({
|
|
41
48
|
color: ${token.geekblue};
|
42
49
|
background: ${token.geekblue1};
|
43
50
|
`,
|
51
|
+
tagCyan: css`
|
52
|
+
color: ${isDarkMode ? token.cyan7 : token.cyan10};
|
53
|
+
background: ${isDarkMode ? token.cyan1 : token.cyan2};
|
54
|
+
`,
|
44
55
|
tagGreen: css`
|
45
56
|
color: ${token.green};
|
46
57
|
background: ${token.green1};
|
@@ -122,6 +133,17 @@ export const ModelInfoTags = memo<ModelInfoTagsProps>(
|
|
122
133
|
</div>
|
123
134
|
</Tooltip>
|
124
135
|
)}
|
136
|
+
{model.search && (
|
137
|
+
<Tooltip
|
138
|
+
placement={placement}
|
139
|
+
styles={{ root: { pointerEvents: 'none' } }}
|
140
|
+
title={t('ModelSelect.featureTag.search')}
|
141
|
+
>
|
142
|
+
<div className={cx(styles.tag, styles.tagCyan)} style={{ cursor: 'pointer' }} title="">
|
143
|
+
<Icon icon={LucideGlobe} />
|
144
|
+
</div>
|
145
|
+
</Tooltip>
|
146
|
+
)}
|
125
147
|
{typeof model.contextWindowTokens === 'number' && (
|
126
148
|
<Tooltip
|
127
149
|
placement={placement}
|
@@ -7,6 +7,8 @@ import { CSSProperties, memo, useEffect, useState } from 'react';
|
|
7
7
|
import { useTranslation } from 'react-i18next';
|
8
8
|
import { Flexbox } from 'react-layout-kit';
|
9
9
|
|
10
|
+
import { CitationItem } from '@/types/message';
|
11
|
+
|
10
12
|
const useStyles = createStyles(({ css, token, isDarkMode }) => ({
|
11
13
|
container: css`
|
12
14
|
width: fit-content;
|
@@ -59,13 +61,14 @@ const useStyles = createStyles(({ css, token, isDarkMode }) => ({
|
|
59
61
|
}));
|
60
62
|
|
61
63
|
interface ThinkingProps {
|
64
|
+
citations?: CitationItem[];
|
62
65
|
content?: string;
|
63
66
|
duration?: number;
|
64
67
|
style?: CSSProperties;
|
65
68
|
thinking?: boolean;
|
66
69
|
}
|
67
70
|
|
68
|
-
const Thinking = memo<ThinkingProps>(({ content, duration, thinking, style }) => {
|
71
|
+
const Thinking = memo<ThinkingProps>(({ content, duration, thinking, style, citations }) => {
|
69
72
|
const { t } = useTranslation(['components', 'common']);
|
70
73
|
const { styles, cx } = useStyles();
|
71
74
|
|
@@ -135,7 +138,9 @@ const Thinking = memo<ThinkingProps>(({ content, duration, thinking, style }) =>
|
|
135
138
|
}}
|
136
139
|
>
|
137
140
|
{typeof content === 'string' ? (
|
138
|
-
<Markdown variant={'chat'}>
|
141
|
+
<Markdown citations={citations} variant={'chat'}>
|
142
|
+
{content}
|
143
|
+
</Markdown>
|
139
144
|
) : (
|
140
145
|
content
|
141
146
|
)}
|
@@ -4,19 +4,21 @@ const jinaChatModels: AIChatModelCard[] = [
|
|
4
4
|
{
|
5
5
|
abilities: {
|
6
6
|
reasoning: true,
|
7
|
+
search: true,
|
7
8
|
},
|
8
9
|
contextWindowTokens: 1_000_000,
|
9
|
-
description:
|
10
|
+
description:
|
11
|
+
'深度搜索结合了网络搜索、阅读和推理,可进行全面调查。您可以将其视为一个代理,接受您的研究任务 - 它会进行广泛搜索并经过多次迭代,然后才能给出答案。这个过程涉及持续的研究、推理和从各个角度解决问题。这与直接从预训练数据生成答案的标准大模型以及依赖一次性表面搜索的传统 RAG 系统有着根本的不同。',
|
10
12
|
displayName: 'Jina DeepSearch v1',
|
11
13
|
enabled: true,
|
12
14
|
id: 'jina-deepsearch-v1',
|
13
15
|
pricing: {
|
14
16
|
input: 0.02,
|
15
|
-
output: 0.02
|
17
|
+
output: 0.02,
|
16
18
|
},
|
17
|
-
type: 'chat'
|
18
|
-
}
|
19
|
-
]
|
19
|
+
type: 'chat',
|
20
|
+
},
|
21
|
+
];
|
20
22
|
|
21
23
|
export const allModels = [...jinaChatModels];
|
22
24
|
|
@@ -4,6 +4,7 @@ const perplexityChatModels: AIChatModelCard[] = [
|
|
4
4
|
{
|
5
5
|
abilities: {
|
6
6
|
reasoning: true,
|
7
|
+
search: true,
|
7
8
|
},
|
8
9
|
contextWindowTokens: 127_072,
|
9
10
|
description: '由 DeepSeek 推理模型提供支持的新 API 产品。',
|
@@ -16,6 +17,7 @@ const perplexityChatModels: AIChatModelCard[] = [
|
|
16
17
|
{
|
17
18
|
abilities: {
|
18
19
|
reasoning: true,
|
20
|
+
search: true,
|
19
21
|
},
|
20
22
|
contextWindowTokens: 127_072,
|
21
23
|
description: '由 DeepSeek 推理模型提供支持的新 API 产品。',
|
@@ -26,6 +28,9 @@ const perplexityChatModels: AIChatModelCard[] = [
|
|
26
28
|
type: 'chat',
|
27
29
|
},
|
28
30
|
{
|
31
|
+
abilities: {
|
32
|
+
search: true,
|
33
|
+
},
|
29
34
|
contextWindowTokens: 200_000,
|
30
35
|
description: '支持搜索上下文的高级搜索产品,支持高级查询和跟进。',
|
31
36
|
displayName: 'Sonar Pro',
|
@@ -34,6 +39,9 @@ const perplexityChatModels: AIChatModelCard[] = [
|
|
34
39
|
type: 'chat',
|
35
40
|
},
|
36
41
|
{
|
42
|
+
abilities: {
|
43
|
+
search: true,
|
44
|
+
},
|
37
45
|
contextWindowTokens: 127_072,
|
38
46
|
description: '基于搜索上下文的轻量级搜索产品,比 Sonar Pro 更快、更便宜。',
|
39
47
|
displayName: 'Sonar',
|
package/src/config/llm.ts
CHANGED
@@ -15,6 +15,10 @@ export const getLLMConfig = () => {
|
|
15
15
|
AZURE_API_VERSION: z.string().optional(),
|
16
16
|
AZURE_ENDPOINT: z.string().optional(),
|
17
17
|
|
18
|
+
ENABLED_AZUREAI: z.boolean(),
|
19
|
+
AZUREAI_ENDPOINT: z.string().optional(),
|
20
|
+
AZUREAI_ENDPOINT_KEY: z.string().optional(),
|
21
|
+
|
18
22
|
ENABLED_ZHIPU: z.boolean(),
|
19
23
|
ZHIPU_API_KEY: z.string().optional(),
|
20
24
|
|
@@ -155,6 +159,10 @@ export const getLLMConfig = () => {
|
|
155
159
|
AZURE_API_VERSION: process.env.AZURE_API_VERSION,
|
156
160
|
AZURE_ENDPOINT: process.env.AZURE_ENDPOINT,
|
157
161
|
|
162
|
+
ENABLED_AZUREAI: !!process.env.AZUREAI_ENDPOINT_KEY,
|
163
|
+
AZUREAI_ENDPOINT_KEY: process.env.AZUREAI_ENDPOINT_KEY,
|
164
|
+
AZUREAI_ENDPOINT: process.env.AZUREAI_ENDPOINT,
|
165
|
+
|
158
166
|
ENABLED_ZHIPU: !!process.env.ZHIPU_API_KEY,
|
159
167
|
ZHIPU_API_KEY: process.env.ZHIPU_API_KEY,
|
160
168
|
|
@@ -10,9 +10,12 @@ const SambaNova: ModelProviderCard = {
|
|
10
10
|
name: 'SambaNova',
|
11
11
|
settings: {
|
12
12
|
disableBrowserRequest: true,
|
13
|
+
proxyUrl: {
|
14
|
+
placeholder: 'https://api.sambanova.ai/v1',
|
15
|
+
},
|
13
16
|
sdkType: 'openai',
|
14
17
|
},
|
15
18
|
url: 'https://cloud.sambanova.ai',
|
16
19
|
};
|
17
20
|
|
18
|
-
export default SambaNova;
|
21
|
+
export default SambaNova;
|
@@ -223,10 +223,7 @@
|
|
223
223
|
"hash": "9646161fa041354714f823d726af27247bcd6e60fa3be5698c0d69f337a5700b"
|
224
224
|
},
|
225
225
|
{
|
226
|
-
"sql": [
|
227
|
-
"DROP TABLE \"user_budgets\";",
|
228
|
-
"\nDROP TABLE \"user_subscriptions\";"
|
229
|
-
],
|
226
|
+
"sql": ["DROP TABLE \"user_budgets\";", "\nDROP TABLE \"user_subscriptions\";"],
|
230
227
|
"bps": true,
|
231
228
|
"folderMillis": 1729699958471,
|
232
229
|
"hash": "7dad43a2a25d1aec82124a4e53f8d82f8505c3073f23606c1dc5d2a4598eacf9"
|
@@ -298,11 +295,18 @@
|
|
298
295
|
"hash": "845a692ceabbfc3caf252a97d3e19a213bc0c433df2689900135f9cfded2cf49"
|
299
296
|
},
|
300
297
|
{
|
301
|
-
"sql": [
|
302
|
-
"ALTER TABLE \"messages\" ADD COLUMN \"reasoning\" jsonb;"
|
303
|
-
],
|
298
|
+
"sql": ["ALTER TABLE \"messages\" ADD COLUMN \"reasoning\" jsonb;"],
|
304
299
|
"bps": true,
|
305
300
|
"folderMillis": 1737609172353,
|
306
301
|
"hash": "2cb36ae4fcdd7b7064767e04bfbb36ae34518ff4bb1b39006f2dd394d1893868"
|
302
|
+
},
|
303
|
+
{
|
304
|
+
"sql": [
|
305
|
+
"ALTER TABLE \"messages\" ADD COLUMN \"search\" jsonb;",
|
306
|
+
"\nALTER TABLE \"messages\" ADD COLUMN \"metadata\" jsonb;"
|
307
|
+
],
|
308
|
+
"bps": true,
|
309
|
+
"folderMillis": 1739901891891,
|
310
|
+
"hash": "78d8fefd8c58938d7bc3da2295a73b35ce2e8d7cb2820f8e817acdb8dd5bebb2"
|
307
311
|
}
|
308
|
-
]
|
312
|
+
]
|