@lobehub/chat 1.74.10 → 1.75.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +60 -0
- package/Dockerfile +2 -0
- package/Dockerfile.database +2 -0
- package/Dockerfile.pglite +2 -0
- package/changelog/v1.json +21 -0
- package/locales/ar/models.json +9 -0
- package/locales/bg-BG/models.json +9 -0
- package/locales/de-DE/models.json +9 -0
- package/locales/en-US/models.json +9 -0
- package/locales/es-ES/models.json +9 -0
- package/locales/fa-IR/models.json +9 -0
- package/locales/fr-FR/models.json +9 -0
- package/locales/it-IT/models.json +9 -0
- package/locales/ja-JP/models.json +9 -0
- package/locales/ko-KR/models.json +9 -0
- package/locales/nl-NL/models.json +9 -0
- package/locales/pl-PL/models.json +9 -0
- package/locales/pt-BR/models.json +9 -0
- package/locales/ru-RU/models.json +9 -0
- package/locales/tr-TR/models.json +9 -0
- package/locales/vi-VN/models.json +9 -0
- package/locales/zh-CN/models.json +9 -0
- package/locales/zh-TW/models.json +9 -0
- package/package.json +1 -1
- package/packages/web-crawler/src/crawImpl/__tests__/browserless.test.ts +41 -0
- package/packages/web-crawler/src/crawImpl/search1api.ts +2 -2
- package/src/app/[variants]/(main)/chat/(workspace)/@conversation/features/ChatList/ChatItem/index.tsx +1 -1
- package/src/app/[variants]/(main)/settings/_layout/Desktop/Header.tsx +11 -9
- package/src/app/[variants]/(main)/settings/llm/ProviderList/providers.tsx +2 -0
- package/src/config/aiModels/google.ts +26 -0
- package/src/config/aiModels/index.ts +3 -0
- package/src/config/aiModels/xinference.ts +171 -0
- package/src/config/llm.ts +6 -0
- package/src/config/modelProviders/index.ts +4 -0
- package/src/config/modelProviders/xinference.ts +18 -0
- package/src/features/Conversation/components/AutoScroll.tsx +2 -1
- package/src/features/Conversation/components/ChatItem/ActionsBar.tsx +7 -2
- package/src/features/Conversation/components/ChatItem/index.tsx +6 -1
- package/src/features/Conversation/components/VirtualizedList/VirtuosoContext.ts +4 -0
- package/src/features/Conversation/components/VirtualizedList/index.tsx +34 -31
- package/src/features/Portal/Thread/Chat/ChatItem.tsx +1 -1
- package/src/libs/agent-runtime/runtimeMap.ts +2 -0
- package/src/libs/agent-runtime/types/type.ts +1 -0
- package/src/libs/agent-runtime/xinference/index.ts +53 -0
- package/src/store/chat/slices/message/selectors.test.ts +42 -0
- package/src/store/chat/slices/message/selectors.ts +4 -0
- package/src/types/user/settings/keyVaults.ts +1 -0
@@ -73,6 +73,47 @@ describe('browserless', () => {
|
|
73
73
|
});
|
74
74
|
});
|
75
75
|
|
76
|
+
it('should include rejectRequestPattern in request payload', async () => {
|
77
|
+
process.env.BROWSERLESS_TOKEN = 'test-token';
|
78
|
+
const fetchMock = vi.fn().mockResolvedValue({
|
79
|
+
text: vi.fn().mockResolvedValue('<html><title>Test</title></html>'),
|
80
|
+
});
|
81
|
+
global.fetch = fetchMock;
|
82
|
+
|
83
|
+
await browserless('https://example.com', { filterOptions: {} });
|
84
|
+
|
85
|
+
const requestPayload = JSON.parse(fetchMock.mock.calls[0][1].body);
|
86
|
+
expect(requestPayload.rejectRequestPattern).toEqual([
|
87
|
+
'.*\\.(?!(html|css|js|json|xml|webmanifest|txt|md)(\\?|#|$))[\\w-]+(?:[\\?#].*)?$',
|
88
|
+
]);
|
89
|
+
});
|
90
|
+
|
91
|
+
it('should allow requests to permitted file types', async () => {
|
92
|
+
const allowedExtensions = ['html', 'css', 'js', 'json', 'xml', 'webmanifest', 'txt', 'md'];
|
93
|
+
const pattern = new RegExp(
|
94
|
+
'.*\\.(?!(html|css|js|json|xml|webmanifest|txt|md)(\\?|#|$))[\\w-]+(?:[\\?#].*)?$',
|
95
|
+
);
|
96
|
+
|
97
|
+
allowedExtensions.forEach((ext) => {
|
98
|
+
expect(`file.${ext}`).not.toMatch(pattern);
|
99
|
+
expect(`file.${ext}?param=value`).not.toMatch(pattern);
|
100
|
+
expect(`file.${ext}#hash`).not.toMatch(pattern);
|
101
|
+
});
|
102
|
+
});
|
103
|
+
|
104
|
+
it('should reject requests to non-permitted file types', async () => {
|
105
|
+
const rejectedExtensions = ['jpg', 'png', 'gif', 'pdf', 'doc', 'mp4', 'wav'];
|
106
|
+
const pattern = new RegExp(
|
107
|
+
'.*\\.(?!(html|css|js|json|xml|webmanifest|txt|md)(\\?|#|$))[\\w-]+(?:[\\?#].*)?$',
|
108
|
+
);
|
109
|
+
|
110
|
+
rejectedExtensions.forEach((ext) => {
|
111
|
+
expect(`file.${ext}`).toMatch(pattern);
|
112
|
+
expect(`file.${ext}?param=value`).toMatch(pattern);
|
113
|
+
expect(`file.${ext}#hash`).toMatch(pattern);
|
114
|
+
});
|
115
|
+
});
|
116
|
+
|
76
117
|
it('should use correct URL when BROWSERLESS_URL is provided', async () => {
|
77
118
|
const customUrl = 'https://custom.browserless.io';
|
78
119
|
const originalEnv = { ...process.env };
|
@@ -13,7 +13,7 @@ interface Search1ApiResponse {
|
|
13
13
|
};
|
14
14
|
}
|
15
15
|
|
16
|
-
export const search1api: CrawlImpl = async (url
|
16
|
+
export const search1api: CrawlImpl = async (url) => {
|
17
17
|
// Get API key from environment variable
|
18
18
|
const apiKey = process.env.SEARCH1API_API_KEY;
|
19
19
|
|
@@ -80,4 +80,4 @@ export const search1api: CrawlImpl = async (url, { filterOptions }) => {
|
|
80
80
|
}
|
81
81
|
|
82
82
|
return;
|
83
|
-
};
|
83
|
+
};
|
@@ -64,7 +64,7 @@ const MainChatItem = memo<ThreadChatItemProps>(({ id, index }) => {
|
|
64
64
|
|
65
65
|
const placement = displayMode === 'chat' && userRole === 'user' ? 'end' : 'start';
|
66
66
|
|
67
|
-
const actionBar = useMemo(() => <ActionsBar id={id} />, [id]);
|
67
|
+
const actionBar = useMemo(() => <ActionsBar id={id} index={index} />, [id]);
|
68
68
|
|
69
69
|
return (
|
70
70
|
<ChatItem
|
@@ -53,16 +53,7 @@ const Header = memo<HeaderProps>(({ children, getContainer, title }) => {
|
|
53
53
|
}
|
54
54
|
/>
|
55
55
|
<Drawer
|
56
|
-
bodyStyle={{
|
57
|
-
display: 'flex',
|
58
|
-
flexDirection: 'column',
|
59
|
-
gap: 20,
|
60
|
-
justifyContent: 'space-between',
|
61
|
-
padding: 16,
|
62
|
-
}}
|
63
56
|
getContainer={getContainer}
|
64
|
-
headerStyle={{ display: 'none' }}
|
65
|
-
maskStyle={{ background: 'transparent' }}
|
66
57
|
onClick={() => setOpen(false)}
|
67
58
|
onClose={() => setOpen(false)}
|
68
59
|
open={open}
|
@@ -72,6 +63,17 @@ const Header = memo<HeaderProps>(({ children, getContainer, title }) => {
|
|
72
63
|
background: theme.colorBgContainer,
|
73
64
|
borderRight: `1px solid ${theme.colorSplit}`,
|
74
65
|
}}
|
66
|
+
styles={{
|
67
|
+
body: {
|
68
|
+
display: 'flex',
|
69
|
+
flexDirection: 'column',
|
70
|
+
gap: 20,
|
71
|
+
justifyContent: 'space-between',
|
72
|
+
padding: 16,
|
73
|
+
},
|
74
|
+
header: { display: 'none' },
|
75
|
+
mask: { background: 'transparent' },
|
76
|
+
}}
|
75
77
|
width={260}
|
76
78
|
zIndex={10}
|
77
79
|
>
|
@@ -37,6 +37,7 @@ import {
|
|
37
37
|
VLLMProviderCard,
|
38
38
|
WenxinProviderCard,
|
39
39
|
XAIProviderCard,
|
40
|
+
XinferenceProviderCard,
|
40
41
|
ZeroOneProviderCard,
|
41
42
|
ZhiPuProviderCard,
|
42
43
|
} from '@/config/modelProviders';
|
@@ -65,6 +66,7 @@ export const useProviderList = (): ProviderItem[] => {
|
|
65
66
|
AzureProvider,
|
66
67
|
OllamaProvider,
|
67
68
|
VLLMProviderCard,
|
69
|
+
XinferenceProviderCard,
|
68
70
|
AnthropicProviderCard,
|
69
71
|
BedrockProvider,
|
70
72
|
GoogleProviderCard,
|
@@ -26,6 +26,32 @@ const googleChatModels: AIChatModelCard[] = [
|
|
26
26
|
},
|
27
27
|
type: 'chat',
|
28
28
|
},
|
29
|
+
{
|
30
|
+
abilities: {
|
31
|
+
functionCall: true,
|
32
|
+
reasoning: true,
|
33
|
+
search: true,
|
34
|
+
vision: true,
|
35
|
+
},
|
36
|
+
contextWindowTokens: 1_048_576 + 8192,
|
37
|
+
description:
|
38
|
+
'Gemini 2.5 Pro Experimental 是 Google 最先进的思维模型,能够对代码、数学和STEM领域的复杂问题进行推理,还能利用长上下文来分析大型数据集、代码库和文档。',
|
39
|
+
displayName: 'Gemini 2.5 Pro Experimental 03-25',
|
40
|
+
enabled: true,
|
41
|
+
id: 'gemini-2.5-pro-exp-03-25',
|
42
|
+
maxOutput: 8192,
|
43
|
+
pricing: {
|
44
|
+
cachedInput: 0,
|
45
|
+
input: 0,
|
46
|
+
output: 0,
|
47
|
+
},
|
48
|
+
releasedAt: '2025-03-25',
|
49
|
+
settings: {
|
50
|
+
searchImpl: 'params',
|
51
|
+
searchProvider: 'google',
|
52
|
+
},
|
53
|
+
type: 'chat',
|
54
|
+
},
|
29
55
|
{
|
30
56
|
abilities: {
|
31
57
|
functionCall: true,
|
@@ -49,6 +49,7 @@ import { default as vllm } from './vllm';
|
|
49
49
|
import { default as volcengine } from './volcengine';
|
50
50
|
import { default as wenxin } from './wenxin';
|
51
51
|
import { default as xai } from './xai';
|
52
|
+
import { default as xinference } from './xinference';
|
52
53
|
import { default as zeroone } from './zeroone';
|
53
54
|
import { default as zhipu } from './zhipu';
|
54
55
|
|
@@ -121,6 +122,7 @@ export const LOBE_DEFAULT_MODEL_LIST = buildDefaultModelList({
|
|
121
122
|
volcengine,
|
122
123
|
wenxin,
|
123
124
|
xai,
|
125
|
+
xinference,
|
124
126
|
zeroone,
|
125
127
|
zhipu,
|
126
128
|
});
|
@@ -174,5 +176,6 @@ export { default as vllm } from './vllm';
|
|
174
176
|
export { default as volcengine } from './volcengine';
|
175
177
|
export { default as wenxin } from './wenxin';
|
176
178
|
export { default as xai } from './xai';
|
179
|
+
export { default as xinference } from './xinference';
|
177
180
|
export { default as zeroone } from './zeroone';
|
178
181
|
export { default as zhipu } from './zhipu';
|
@@ -0,0 +1,171 @@
|
|
1
|
+
import { AIChatModelCard } from '@/types/aiModel';
|
2
|
+
|
3
|
+
const xinferenceChatModels: AIChatModelCard[] = [
|
4
|
+
{
|
5
|
+
abilities: {
|
6
|
+
functionCall: true,
|
7
|
+
},
|
8
|
+
contextWindowTokens: 163_840,
|
9
|
+
description:
|
10
|
+
'DeepSeek-V3 是一个强大的专家混合(MoE)语言模型,拥有总计 6710 亿参数,每个 token 激活 370 亿参数。',
|
11
|
+
displayName: 'DeepSeek V3',
|
12
|
+
enabled: true,
|
13
|
+
id: 'deepseek-v3',
|
14
|
+
type: 'chat'
|
15
|
+
},
|
16
|
+
{
|
17
|
+
abilities: {
|
18
|
+
reasoning: true,
|
19
|
+
},
|
20
|
+
contextWindowTokens: 163_840,
|
21
|
+
description:
|
22
|
+
'DeepSeek-R1 在强化学习(RL)之前引入了冷启动数据,在数学、代码和推理任务上表现可与 OpenAI-o1 相媲美。',
|
23
|
+
displayName: 'DeepSeek R1',
|
24
|
+
enabled: true,
|
25
|
+
id: 'deepseek-r1',
|
26
|
+
type: 'chat'
|
27
|
+
},
|
28
|
+
{
|
29
|
+
abilities: {
|
30
|
+
reasoning: true,
|
31
|
+
},
|
32
|
+
contextWindowTokens: 131_072,
|
33
|
+
description:
|
34
|
+
'deepseek-r1-distill-llama 是基于 Llama 从 DeepSeek-R1 蒸馏而来的模型。',
|
35
|
+
displayName: 'DeepSeek R1 Distill Llama',
|
36
|
+
enabled: true,
|
37
|
+
id: 'deepseek-r1-distill-llama',
|
38
|
+
type: 'chat'
|
39
|
+
},
|
40
|
+
{
|
41
|
+
abilities: {
|
42
|
+
reasoning: true,
|
43
|
+
},
|
44
|
+
contextWindowTokens: 131_072,
|
45
|
+
description:
|
46
|
+
'deepseek-r1-distill-qwen 是基于 Qwen 从 DeepSeek-R1 蒸馏而来的模型。',
|
47
|
+
displayName: 'DeepSeek R1 Distill Qwen',
|
48
|
+
enabled: true,
|
49
|
+
id: 'deepseek-r1-distill-qwen',
|
50
|
+
type: 'chat'
|
51
|
+
},
|
52
|
+
{
|
53
|
+
abilities: {
|
54
|
+
reasoning: true,
|
55
|
+
},
|
56
|
+
contextWindowTokens: 32_768,
|
57
|
+
description:
|
58
|
+
'QwQ 是 Qwen 系列的推理模型。与传统的指令微调模型相比,QwQ 具备思考和推理能力,在下游任务中,尤其是复杂问题上,能够实现显著增强的性能。QwQ-32B 是一款中型推理模型,其性能可与最先进的推理模型(如 DeepSeek-R1、o1-mini)相媲美。',
|
59
|
+
displayName: 'QwQ 32B',
|
60
|
+
enabled: true,
|
61
|
+
id: 'qwq-32b',
|
62
|
+
type: 'chat'
|
63
|
+
},
|
64
|
+
{
|
65
|
+
abilities: {
|
66
|
+
reasoning: true,
|
67
|
+
vision: true,
|
68
|
+
},
|
69
|
+
contextWindowTokens: 32_768,
|
70
|
+
description:
|
71
|
+
'QVQ-72B-Preview 是由 Qwen 团队开发的实验性研究模型,专注于提升视觉推理能力。',
|
72
|
+
displayName: 'QVQ 72B Preview',
|
73
|
+
enabled: true,
|
74
|
+
id: 'qvq-72b-preview',
|
75
|
+
type: 'chat'
|
76
|
+
},
|
77
|
+
{
|
78
|
+
abilities: {
|
79
|
+
functionCall: true,
|
80
|
+
},
|
81
|
+
contextWindowTokens: 32_768,
|
82
|
+
description:
|
83
|
+
'Qwen2.5 是 Qwen 大型语言模型的最新系列。对于 Qwen2.5,我们发布了多个基础语言模型和指令微调语言模型,参数范围从 5 亿到 72 亿不等。',
|
84
|
+
displayName: 'Qwen2.5 Instruct',
|
85
|
+
enabled: true,
|
86
|
+
id: 'qwen2.5-instruct',
|
87
|
+
type: 'chat'
|
88
|
+
},
|
89
|
+
{
|
90
|
+
abilities: {
|
91
|
+
functionCall: true,
|
92
|
+
},
|
93
|
+
contextWindowTokens: 32_768,
|
94
|
+
description:
|
95
|
+
'Qwen2.5-Coder 是 Qwen 系列中最新的代码专用大型语言模型(前身为 CodeQwen)。',
|
96
|
+
displayName: 'Qwen2.5 Coder Instruct',
|
97
|
+
enabled: true,
|
98
|
+
id: 'qwen2.5-coder-instruct',
|
99
|
+
type: 'chat'
|
100
|
+
},
|
101
|
+
{
|
102
|
+
abilities: {
|
103
|
+
vision: true,
|
104
|
+
},
|
105
|
+
contextWindowTokens: 128_000,
|
106
|
+
description:
|
107
|
+
'Qwen2.5-VL 是 Qwen 模型家族中视觉语言模型的最新版本。',
|
108
|
+
displayName: 'Qwen2.5 VL Instruct',
|
109
|
+
enabled: true,
|
110
|
+
id: 'qwen2.5-vl-instruct',
|
111
|
+
type: 'chat'
|
112
|
+
},
|
113
|
+
{
|
114
|
+
contextWindowTokens: 1_024_000,
|
115
|
+
description:
|
116
|
+
'Mistral-Nemo-Instruct-2407 大型语言模型(LLM)是 Mistral-Nemo-Base-2407 的指令微调版本。',
|
117
|
+
displayName: 'Mistral Nemo Instruct',
|
118
|
+
enabled: true,
|
119
|
+
id: 'mistral-nemo-instruct',
|
120
|
+
type: 'chat'
|
121
|
+
},
|
122
|
+
{
|
123
|
+
contextWindowTokens: 131_072,
|
124
|
+
description:
|
125
|
+
'Mistral-Large-Instruct-2407 是一款先进的稠密大型语言模型(LLM),拥有 1230 亿参数,具备最先进的推理、知识和编码能力。',
|
126
|
+
displayName: 'Mistral Large Instruct',
|
127
|
+
enabled: true,
|
128
|
+
id: 'mistral-large-instruct',
|
129
|
+
type: 'chat'
|
130
|
+
},
|
131
|
+
{
|
132
|
+
abilities: {
|
133
|
+
functionCall: true,
|
134
|
+
},
|
135
|
+
contextWindowTokens: 131_072,
|
136
|
+
description:
|
137
|
+
'Llama 3.3 指令微调模型针对对话场景进行了优化,在常见的行业基准测试中,超越了许多现有的开源聊天模型。',
|
138
|
+
displayName: 'Llama 3.3 Instruct',
|
139
|
+
enabled: true,
|
140
|
+
id: 'llama-3.3-instruct',
|
141
|
+
type: 'chat'
|
142
|
+
},
|
143
|
+
{
|
144
|
+
abilities: {
|
145
|
+
vision: true,
|
146
|
+
},
|
147
|
+
contextWindowTokens: 163_840,
|
148
|
+
description:
|
149
|
+
'Llama 3.2-Vision 指令微调模型针对视觉识别、图像推理、图像描述和回答与图像相关的常规问题进行了优化。',
|
150
|
+
displayName: 'Llama 3.2 Vision Instruct',
|
151
|
+
enabled: true,
|
152
|
+
id: 'llama-3.2-vision-instruct',
|
153
|
+
type: 'chat'
|
154
|
+
},
|
155
|
+
{
|
156
|
+
abilities: {
|
157
|
+
functionCall: true,
|
158
|
+
},
|
159
|
+
contextWindowTokens: 131_072,
|
160
|
+
description:
|
161
|
+
'Llama 3.1 指令微调模型针对对话场景进行了优化,在常见的行业基准测试中,超越了许多现有的开源聊天模型。',
|
162
|
+
displayName: 'Llama 3.1 Instruct',
|
163
|
+
enabled: true,
|
164
|
+
id: 'llama-3.1-instruct',
|
165
|
+
type: 'chat'
|
166
|
+
},
|
167
|
+
]
|
168
|
+
|
169
|
+
export const allModels = [...xinferenceChatModels];
|
170
|
+
|
171
|
+
export default allModels;
|
package/src/config/llm.ts
CHANGED
@@ -75,6 +75,9 @@ export const getLLMConfig = () => {
|
|
75
75
|
ENABLED_VLLM: z.boolean(),
|
76
76
|
VLLM_API_KEY: z.string().optional(),
|
77
77
|
|
78
|
+
ENABLED_XINFERENCE: z.boolean(),
|
79
|
+
XINFERENCE_API_KEY: z.string().optional(),
|
80
|
+
|
78
81
|
ENABLED_QWEN: z.boolean(),
|
79
82
|
QWEN_API_KEY: z.string().optional(),
|
80
83
|
|
@@ -234,6 +237,9 @@ export const getLLMConfig = () => {
|
|
234
237
|
ENABLED_VLLM: !!process.env.VLLM_API_KEY,
|
235
238
|
VLLM_API_KEY: process.env.VLLM_API_KEY,
|
236
239
|
|
240
|
+
ENABLED_XINFERENCE: !!process.env.XINFERENCE_API_KEY,
|
241
|
+
XINFERENCE_API_KEY: process.env.XINFERENCE_API_KEY,
|
242
|
+
|
237
243
|
ENABLED_QWEN: !!process.env.QWEN_API_KEY,
|
238
244
|
QWEN_API_KEY: process.env.QWEN_API_KEY,
|
239
245
|
|
@@ -49,6 +49,7 @@ import VLLMProvider from './vllm';
|
|
49
49
|
import VolcengineProvider from './volcengine';
|
50
50
|
import WenxinProvider from './wenxin';
|
51
51
|
import XAIProvider from './xai';
|
52
|
+
import XinferenceProvider from './xinference';
|
52
53
|
import ZeroOneProvider from './zeroone';
|
53
54
|
import ZhiPuProvider from './zhipu';
|
54
55
|
|
@@ -69,6 +70,7 @@ export const LOBE_DEFAULT_MODEL_LIST: ChatModelCard[] = [
|
|
69
70
|
MoonshotProvider.chatModels,
|
70
71
|
OllamaProvider.chatModels,
|
71
72
|
VLLMProvider.chatModels,
|
73
|
+
XinferenceProvider.chatModels,
|
72
74
|
OpenRouterProvider.chatModels,
|
73
75
|
TogetherAIProvider.chatModels,
|
74
76
|
FireworksAIProvider.chatModels,
|
@@ -108,6 +110,7 @@ export const DEFAULT_MODEL_PROVIDER_LIST = [
|
|
108
110
|
AzureAIProvider,
|
109
111
|
OllamaProvider,
|
110
112
|
VLLMProvider,
|
113
|
+
XinferenceProvider,
|
111
114
|
AnthropicProvider,
|
112
115
|
BedrockProvider,
|
113
116
|
GoogleProvider,
|
@@ -215,5 +218,6 @@ export { default as VLLMProviderCard } from './vllm';
|
|
215
218
|
export { default as VolcengineProviderCard } from './volcengine';
|
216
219
|
export { default as WenxinProviderCard } from './wenxin';
|
217
220
|
export { default as XAIProviderCard } from './xai';
|
221
|
+
export { default as XinferenceProviderCard } from './xinference';
|
218
222
|
export { default as ZeroOneProviderCard } from './zeroone';
|
219
223
|
export { default as ZhiPuProviderCard } from './zhipu';
|
@@ -0,0 +1,18 @@
|
|
1
|
+
import { ModelProviderCard } from '@/types/llm';
|
2
|
+
|
3
|
+
const Xinference: ModelProviderCard = {
|
4
|
+
chatModels: [],
|
5
|
+
description: 'Xorbits Inference (Xinference) 是一个开源平台,用于简化各种 AI 模型的运行和集成。借助 Xinference,您可以使用任何开源 LLM、嵌入模型和多模态模型在云端或本地环境中运行推理,并创建强大的 AI 应用。',
|
6
|
+
id: 'xinference',
|
7
|
+
modelsUrl: 'https://inference.readthedocs.io/zh-cn/latest/models/builtin/index.html',
|
8
|
+
name: 'Xinference',
|
9
|
+
settings: {
|
10
|
+
proxyUrl: {
|
11
|
+
placeholder: 'http://localhost:9997/v1',
|
12
|
+
},
|
13
|
+
sdkType: 'openai',
|
14
|
+
},
|
15
|
+
url: 'https://inference.readthedocs.io/zh-cn/v0.12.3/index.html',
|
16
|
+
};
|
17
|
+
|
18
|
+
export default Xinference;
|
@@ -13,12 +13,13 @@ interface AutoScrollProps {
|
|
13
13
|
const AutoScroll = memo<AutoScrollProps>(({ atBottom, isScrolling, onScrollToBottom }) => {
|
14
14
|
const trackVisibility = useChatStore(chatSelectors.isAIGenerating);
|
15
15
|
const str = useChatStore(chatSelectors.mainAIChatsMessageString);
|
16
|
+
const reasoningStr = useChatStore(chatSelectors.mainAILatestMessageReasoningContent);
|
16
17
|
|
17
18
|
useEffect(() => {
|
18
19
|
if (atBottom && trackVisibility && !isScrolling) {
|
19
20
|
onScrollToBottom?.('auto');
|
20
21
|
}
|
21
|
-
}, [atBottom, trackVisibility, str]);
|
22
|
+
}, [atBottom, trackVisibility, str, reasoningStr]);
|
22
23
|
|
23
24
|
return <BackBottom onScrollToBottom={() => onScrollToBottom('click')} visible={!atBottom} />;
|
24
25
|
});
|
@@ -1,9 +1,10 @@
|
|
1
1
|
import { ActionEvent, ActionIconGroup, type ActionIconGroupProps } from '@lobehub/ui';
|
2
2
|
import { App } from 'antd';
|
3
3
|
import isEqual from 'fast-deep-equal';
|
4
|
-
import { memo, useCallback } from 'react';
|
4
|
+
import { memo, use, useCallback } from 'react';
|
5
5
|
import { useTranslation } from 'react-i18next';
|
6
6
|
|
7
|
+
import { VirtuosoContext } from '@/features/Conversation/components/VirtualizedList/VirtuosoContext';
|
7
8
|
import { useChatStore } from '@/store/chat';
|
8
9
|
import { chatSelectors } from '@/store/chat/selectors';
|
9
10
|
import { MessageRoleType } from '@/types/message';
|
@@ -29,9 +30,10 @@ const ActionsBar = memo<ActionsBarProps>((props) => {
|
|
29
30
|
interface ActionsProps {
|
30
31
|
id: string;
|
31
32
|
inPortalThread?: boolean;
|
33
|
+
index: number;
|
32
34
|
}
|
33
35
|
|
34
|
-
const Actions = memo<ActionsProps>(({ id, inPortalThread }) => {
|
36
|
+
const Actions = memo<ActionsProps>(({ id, inPortalThread, index }) => {
|
35
37
|
const item = useChatStore(chatSelectors.getMessageById(id), isEqual);
|
36
38
|
const { t } = useTranslation('common');
|
37
39
|
const [
|
@@ -58,12 +60,15 @@ const Actions = memo<ActionsProps>(({ id, inPortalThread }) => {
|
|
58
60
|
s.toggleMessageEditing,
|
59
61
|
]);
|
60
62
|
const { message } = App.useApp();
|
63
|
+
const virtuosoRef = use(VirtuosoContext);
|
61
64
|
|
62
65
|
const handleActionClick = useCallback(
|
63
66
|
async (action: ActionEvent) => {
|
64
67
|
switch (action.key) {
|
65
68
|
case 'edit': {
|
66
69
|
toggleMessageEditing(id, true);
|
70
|
+
|
71
|
+
virtuosoRef?.current?.scrollIntoView({ align: 'start', behavior: 'auto', index });
|
67
72
|
}
|
68
73
|
}
|
69
74
|
if (!item) return;
|
@@ -3,10 +3,11 @@
|
|
3
3
|
import { ChatItem } from '@lobehub/ui';
|
4
4
|
import { createStyles } from 'antd-style';
|
5
5
|
import isEqual from 'fast-deep-equal';
|
6
|
-
import { MouseEventHandler, ReactNode, memo, useCallback, useMemo } from 'react';
|
6
|
+
import { MouseEventHandler, ReactNode, memo, use, useCallback, useMemo } from 'react';
|
7
7
|
import { useTranslation } from 'react-i18next';
|
8
8
|
import { Flexbox } from 'react-layout-kit';
|
9
9
|
|
10
|
+
import { VirtuosoContext } from '@/features/Conversation/components/VirtualizedList/VirtuosoContext';
|
10
11
|
import { useAgentStore } from '@/store/agent';
|
11
12
|
import { agentChatConfigSelectors } from '@/store/agent/selectors';
|
12
13
|
import { useChatStore } from '@/store/chat';
|
@@ -64,6 +65,7 @@ const Item = memo<ChatListItemProps>(
|
|
64
65
|
endRender,
|
65
66
|
disableEditing,
|
66
67
|
inPortalThread = false,
|
68
|
+
index,
|
67
69
|
}) => {
|
68
70
|
const { t } = useTranslation('common');
|
69
71
|
const { styles, cx } = useStyles();
|
@@ -188,6 +190,7 @@ const Item = memo<ChatListItemProps>(
|
|
188
190
|
);
|
189
191
|
|
190
192
|
const onChange = useCallback((value: string) => updateMessageContent(id, value), [id]);
|
193
|
+
const virtuosoRef = use(VirtuosoContext);
|
191
194
|
|
192
195
|
const onDoubleClick = useCallback<MouseEventHandler<HTMLDivElement>>(
|
193
196
|
(e) => {
|
@@ -195,6 +198,8 @@ const Item = memo<ChatListItemProps>(
|
|
195
198
|
if (item.id === 'default' || item.error) return;
|
196
199
|
if (item.role && ['assistant', 'user'].includes(item.role) && e.altKey) {
|
197
200
|
toggleMessageEditing(id, true);
|
201
|
+
|
202
|
+
virtuosoRef?.current?.scrollIntoView({ align: 'start', behavior: 'auto', index });
|
198
203
|
}
|
199
204
|
},
|
200
205
|
[item, disableEditing],
|
@@ -13,6 +13,7 @@ import { chatSelectors } from '@/store/chat/selectors';
|
|
13
13
|
|
14
14
|
import AutoScroll from '../AutoScroll';
|
15
15
|
import SkeletonList from '../SkeletonList';
|
16
|
+
import { VirtuosoContext } from './VirtuosoContext';
|
16
17
|
|
17
18
|
interface VirtualizedListProps {
|
18
19
|
dataSource: string[];
|
@@ -69,38 +70,40 @@ const VirtualizedList = memo<VirtualizedListProps>(({ mobile, dataSource, itemCo
|
|
69
70
|
);
|
70
71
|
|
71
72
|
return (
|
72
|
-
<
|
73
|
-
<
|
74
|
-
|
75
|
-
|
76
|
-
|
77
|
-
|
78
|
-
|
79
|
-
|
80
|
-
|
81
|
-
|
82
|
-
|
83
|
-
|
84
|
-
|
85
|
-
|
86
|
-
|
87
|
-
|
88
|
-
|
89
|
-
|
90
|
-
|
91
|
-
|
92
|
-
|
93
|
-
|
94
|
-
|
95
|
-
|
96
|
-
|
97
|
-
|
98
|
-
|
73
|
+
<VirtuosoContext value={virtuosoRef}>
|
74
|
+
<Flexbox height={'100%'}>
|
75
|
+
<Virtuoso
|
76
|
+
atBottomStateChange={setAtBottom}
|
77
|
+
atBottomThreshold={50 * (mobile ? 2 : 1)}
|
78
|
+
computeItemKey={(_, item) => item}
|
79
|
+
data={dataSource}
|
80
|
+
followOutput={getFollowOutput}
|
81
|
+
increaseViewportBy={overscan}
|
82
|
+
initialTopMostItemIndex={dataSource?.length - 1}
|
83
|
+
isScrolling={setIsScrolling}
|
84
|
+
itemContent={itemContent}
|
85
|
+
overscan={overscan}
|
86
|
+
ref={virtuosoRef}
|
87
|
+
/>
|
88
|
+
<AutoScroll
|
89
|
+
atBottom={atBottom}
|
90
|
+
isScrolling={isScrolling}
|
91
|
+
onScrollToBottom={(type) => {
|
92
|
+
const virtuoso = virtuosoRef.current;
|
93
|
+
switch (type) {
|
94
|
+
case 'auto': {
|
95
|
+
virtuoso?.scrollToIndex({ align: 'end', behavior: 'auto', index: 'LAST' });
|
96
|
+
break;
|
97
|
+
}
|
98
|
+
case 'click': {
|
99
|
+
virtuoso?.scrollToIndex({ align: 'end', behavior: 'smooth', index: 'LAST' });
|
100
|
+
break;
|
101
|
+
}
|
99
102
|
}
|
100
|
-
}
|
101
|
-
|
102
|
-
|
103
|
-
</
|
103
|
+
}}
|
104
|
+
/>
|
105
|
+
</Flexbox>
|
106
|
+
</VirtuosoContext>
|
104
107
|
);
|
105
108
|
});
|
106
109
|
|
@@ -31,7 +31,7 @@ const ThreadChatItem = memo<ThreadChatItemProps>(({ id, index }) => {
|
|
31
31
|
const isParentMessage = index <= threadStartMessageIndex;
|
32
32
|
|
33
33
|
const actionBar = useMemo(
|
34
|
-
() => !isParentMessage && <ActionsBar id={id} inPortalThread />,
|
34
|
+
() => !isParentMessage && <ActionsBar id={id} inPortalThread index={index} />,
|
35
35
|
[id, isParentMessage],
|
36
36
|
);
|
37
37
|
|
@@ -45,6 +45,7 @@ import { LobeVLLMAI } from './vllm';
|
|
45
45
|
import { LobeVolcengineAI } from './volcengine';
|
46
46
|
import { LobeWenxinAI } from './wenxin';
|
47
47
|
import { LobeXAI } from './xai';
|
48
|
+
import { LobeXinferenceAI } from './xinference';
|
48
49
|
import { LobeZeroOneAI } from './zeroone';
|
49
50
|
import { LobeZhipuAI } from './zhipu';
|
50
51
|
|
@@ -97,6 +98,7 @@ export const providerRuntimeMap = {
|
|
97
98
|
volcengine: LobeVolcengineAI,
|
98
99
|
wenxin: LobeWenxinAI,
|
99
100
|
xai: LobeXAI,
|
101
|
+
xinference: LobeXinferenceAI,
|
100
102
|
zeroone: LobeZeroOneAI,
|
101
103
|
zhipu: LobeZhipuAI,
|
102
104
|
};
|