@lobehub/chat 1.94.3 → 1.94.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.github/scripts/create-failure-issue.js +256 -0
- package/.github/workflows/auto-i18n.yml +359 -0
- package/CHANGELOG.md +25 -0
- package/changelog/v1.json +9 -0
- package/locales/ar/setting.json +13 -1
- package/locales/bg-BG/setting.json +13 -1
- package/locales/de-DE/setting.json +13 -1
- package/locales/en-US/setting.json +13 -1
- package/locales/es-ES/setting.json +13 -1
- package/locales/fa-IR/setting.json +13 -1
- package/locales/fr-FR/setting.json +13 -1
- package/locales/it-IT/setting.json +13 -1
- package/locales/ja-JP/setting.json +13 -1
- package/locales/ko-KR/setting.json +13 -1
- package/locales/nl-NL/setting.json +13 -1
- package/locales/pl-PL/setting.json +13 -1
- package/locales/pt-BR/setting.json +13 -1
- package/locales/ru-RU/setting.json +13 -1
- package/locales/tr-TR/setting.json +13 -1
- package/locales/vi-VN/setting.json +13 -1
- package/locales/zh-CN/setting.json +13 -1
- package/locales/zh-TW/setting.json +13 -1
- package/package.json +1 -1
- package/src/app/[variants]/(main)/settings/common/features/ChatAppearance/ChatTransitionPreview.tsx +111 -0
- package/src/app/[variants]/(main)/settings/common/features/ChatAppearance/index.tsx +50 -3
- package/src/components/Thinking/index.tsx +4 -2
- package/src/config/modelProviders/anthropic.ts +1 -6
- package/src/config/modelProviders/baichuan.ts +4 -8
- package/src/config/modelProviders/google.ts +4 -4
- package/src/config/modelProviders/lmstudio.ts +4 -4
- package/src/config/modelProviders/minimax.ts +3 -3
- package/src/config/modelProviders/moonshot.ts +4 -4
- package/src/config/modelProviders/openai.ts +1 -3
- package/src/config/modelProviders/perplexity.ts +3 -3
- package/src/config/modelProviders/qwen.ts +4 -4
- package/src/config/modelProviders/search1api.ts +4 -4
- package/src/config/modelProviders/spark.ts +4 -4
- package/src/config/modelProviders/stepfun.ts +4 -4
- package/src/config/modelProviders/vertexai.ts +1 -3
- package/src/config/modelProviders/volcengine.ts +4 -4
- package/src/config/modelProviders/wenxin.ts +3 -3
- package/src/const/settings/common.ts +1 -0
- package/src/features/Conversation/Messages/Assistant/Reasoning/index.tsx +11 -1
- package/src/features/Conversation/components/ChatItem/index.tsx +6 -2
- package/src/features/Conversation/components/MarkdownElements/LobeThinking/Render.tsx +4 -0
- package/src/features/Conversation/components/MarkdownElements/Thinking/Render.tsx +12 -1
- package/src/locales/default/setting.ts +12 -0
- package/src/services/chat.ts +15 -6
- package/src/store/user/slices/settings/selectors/general.test.ts +1 -0
- package/src/store/user/slices/settings/selectors/general.ts +2 -0
- package/src/types/aiProvider.ts +11 -11
- package/src/types/llm.ts +8 -10
- package/src/types/user/settings/general.ts +3 -0
- package/src/utils/fetch/__tests__/fetchSSE.test.ts +57 -12
- package/src/utils/fetch/fetchSSE.ts +22 -15
@@ -12,12 +12,12 @@ const LMStudio: ModelProviderCard = {
|
|
12
12
|
proxyUrl: {
|
13
13
|
placeholder: 'http://127.0.0.1:1234/v1',
|
14
14
|
},
|
15
|
-
|
16
|
-
showModelFetcher: true,
|
17
|
-
smoothing: {
|
15
|
+
responseAnimation: {
|
18
16
|
speed: 2,
|
19
|
-
text:
|
17
|
+
text: 'smooth',
|
20
18
|
},
|
19
|
+
showApiKey: false,
|
20
|
+
showModelFetcher: true,
|
21
21
|
},
|
22
22
|
url: 'https://lmstudio.ai',
|
23
23
|
};
|
@@ -50,11 +50,11 @@ const Minimax: ModelProviderCard = {
|
|
50
50
|
proxyUrl: {
|
51
51
|
placeholder: 'https://api.minimax.chat/v1',
|
52
52
|
},
|
53
|
-
|
54
|
-
smoothing: {
|
53
|
+
responseAnimation: {
|
55
54
|
speed: 2,
|
56
|
-
text:
|
55
|
+
text: 'smooth',
|
57
56
|
},
|
57
|
+
sdkType: 'openai',
|
58
58
|
},
|
59
59
|
url: 'https://www.minimaxi.com',
|
60
60
|
};
|
@@ -42,12 +42,12 @@ const Moonshot: ModelProviderCard = {
|
|
42
42
|
proxyUrl: {
|
43
43
|
placeholder: 'https://api.moonshot.cn/v1',
|
44
44
|
},
|
45
|
-
|
46
|
-
showModelFetcher: true,
|
47
|
-
smoothing: {
|
45
|
+
responseAnimation: {
|
48
46
|
speed: 2,
|
49
|
-
text:
|
47
|
+
text: 'smooth',
|
50
48
|
},
|
49
|
+
sdkType: 'openai',
|
50
|
+
showModelFetcher: true,
|
51
51
|
},
|
52
52
|
url: 'https://www.moonshot.cn',
|
53
53
|
};
|
@@ -332,10 +332,8 @@ const OpenAI: ModelProviderCard = {
|
|
332
332
|
modelsUrl: 'https://platform.openai.com/docs/models',
|
333
333
|
name: 'OpenAI',
|
334
334
|
settings: {
|
335
|
+
responseAnimation: 'smooth',
|
335
336
|
showModelFetcher: true,
|
336
|
-
smoothing: {
|
337
|
-
text: true,
|
338
|
-
},
|
339
337
|
supportResponsesApi: true,
|
340
338
|
},
|
341
339
|
url: 'https://openai.com',
|
@@ -59,11 +59,11 @@ const Perplexity: ModelProviderCard = {
|
|
59
59
|
proxyUrl: {
|
60
60
|
placeholder: 'https://api.perplexity.ai',
|
61
61
|
},
|
62
|
-
|
63
|
-
smoothing: {
|
62
|
+
responseAnimation: {
|
64
63
|
speed: 2,
|
65
|
-
text:
|
64
|
+
text: 'smooth',
|
66
65
|
},
|
66
|
+
sdkType: 'openai',
|
67
67
|
},
|
68
68
|
url: 'https://www.perplexity.ai',
|
69
69
|
};
|
@@ -425,13 +425,13 @@ const Qwen: ModelProviderCard = {
|
|
425
425
|
proxyUrl: {
|
426
426
|
placeholder: 'https://dashscope.aliyuncs.com/compatible-mode/v1',
|
427
427
|
},
|
428
|
+
responseAnimation: {
|
429
|
+
speed: 2,
|
430
|
+
text: 'smooth',
|
431
|
+
},
|
428
432
|
sdkType: 'openai',
|
429
433
|
showDeployName: true,
|
430
434
|
showModelFetcher: true,
|
431
|
-
smoothing: {
|
432
|
-
speed: 2,
|
433
|
-
text: true,
|
434
|
-
},
|
435
435
|
},
|
436
436
|
url: 'https://www.aliyun.com/product/bailian',
|
437
437
|
};
|
@@ -45,12 +45,12 @@ const Search1API: ModelProviderCard = {
|
|
45
45
|
proxyUrl: {
|
46
46
|
placeholder: 'https://api.search1api.com/v1',
|
47
47
|
},
|
48
|
-
|
49
|
-
showModelFetcher: true,
|
50
|
-
smoothing: {
|
48
|
+
responseAnimation: {
|
51
49
|
speed: 2,
|
52
|
-
text:
|
50
|
+
text: 'smooth',
|
53
51
|
},
|
52
|
+
sdkType: 'openai',
|
53
|
+
showModelFetcher: true,
|
54
54
|
},
|
55
55
|
url: 'https://www.search1api.com',
|
56
56
|
};
|
@@ -73,12 +73,12 @@ const Spark: ModelProviderCard = {
|
|
73
73
|
proxyUrl: {
|
74
74
|
placeholder: 'https://spark-api-open.xf-yun.com/v1',
|
75
75
|
},
|
76
|
-
|
77
|
-
showModelFetcher: false,
|
78
|
-
smoothing: {
|
76
|
+
responseAnimation: {
|
79
77
|
speed: 2,
|
80
|
-
text:
|
78
|
+
text: 'smooth',
|
81
79
|
},
|
80
|
+
sdkType: 'openai',
|
81
|
+
showModelFetcher: false,
|
82
82
|
},
|
83
83
|
url: 'https://www.xfyun.cn',
|
84
84
|
};
|
@@ -176,12 +176,12 @@ const Stepfun: ModelProviderCard = {
|
|
176
176
|
proxyUrl: {
|
177
177
|
placeholder: 'https://api.stepfun.com/v1',
|
178
178
|
},
|
179
|
-
|
180
|
-
showModelFetcher: true,
|
181
|
-
smoothing: {
|
179
|
+
responseAnimation: {
|
182
180
|
speed: 2,
|
183
|
-
text:
|
181
|
+
text: 'smooth',
|
184
182
|
},
|
183
|
+
sdkType: 'openai',
|
184
|
+
showModelFetcher: true,
|
185
185
|
},
|
186
186
|
url: 'https://stepfun.com',
|
187
187
|
};
|
@@ -11,10 +11,8 @@ const VertexAI: ModelProviderCard = {
|
|
11
11
|
name: 'Vertex AI',
|
12
12
|
settings: {
|
13
13
|
disableBrowserRequest: true,
|
14
|
+
responseAnimation: 'smooth',
|
14
15
|
showModelFetcher: false,
|
15
|
-
smoothing: {
|
16
|
-
text: true,
|
17
|
-
},
|
18
16
|
},
|
19
17
|
url: 'https://cloud.google.com/vertex-ai',
|
20
18
|
};
|
@@ -13,12 +13,12 @@ const Doubao: ModelProviderCard = {
|
|
13
13
|
proxyUrl: {
|
14
14
|
placeholder: 'https://ark.cn-beijing.volces.com/api/v3',
|
15
15
|
},
|
16
|
-
|
17
|
-
showDeployName: true,
|
18
|
-
smoothing: {
|
16
|
+
responseAnimation: {
|
19
17
|
speed: 2,
|
20
|
-
text:
|
18
|
+
text: 'smooth',
|
21
19
|
},
|
20
|
+
sdkType: 'openai',
|
21
|
+
showDeployName: true,
|
22
22
|
},
|
23
23
|
url: 'https://www.volcengine.com/product/ark',
|
24
24
|
};
|
@@ -242,11 +242,11 @@ const BaiduWenxin: ModelProviderCard = {
|
|
242
242
|
proxyUrl: {
|
243
243
|
placeholder: 'https://qianfan.baidubce.com/v2',
|
244
244
|
},
|
245
|
-
|
246
|
-
smoothing: {
|
245
|
+
responseAnimation: {
|
247
246
|
speed: 2,
|
248
|
-
text:
|
247
|
+
text: 'smooth',
|
249
248
|
},
|
249
|
+
sdkType: 'openai',
|
250
250
|
},
|
251
251
|
url: 'https://cloud.baidu.com/wenxin.html',
|
252
252
|
};
|
@@ -3,6 +3,8 @@ import { memo } from 'react';
|
|
3
3
|
import Thinking from '@/components/Thinking';
|
4
4
|
import { useChatStore } from '@/store/chat';
|
5
5
|
import { aiChatSelectors } from '@/store/chat/selectors';
|
6
|
+
import { useUserStore } from '@/store/user';
|
7
|
+
import { userGeneralSettingsSelectors } from '@/store/user/selectors';
|
6
8
|
|
7
9
|
interface ReasoningProps {
|
8
10
|
content?: string;
|
@@ -12,8 +14,16 @@ interface ReasoningProps {
|
|
12
14
|
|
13
15
|
const Reasoning = memo<ReasoningProps>(({ content = '', duration, id }) => {
|
14
16
|
const isReasoning = useChatStore(aiChatSelectors.isMessageInReasoning(id));
|
17
|
+
const transitionMode = useUserStore(userGeneralSettingsSelectors.transitionMode);
|
15
18
|
|
16
|
-
return
|
19
|
+
return (
|
20
|
+
<Thinking
|
21
|
+
content={content}
|
22
|
+
duration={duration}
|
23
|
+
thinking={isReasoning}
|
24
|
+
thinkingAnimated={transitionMode === 'fadeIn' && isReasoning}
|
25
|
+
/>
|
26
|
+
);
|
17
27
|
});
|
18
28
|
|
19
29
|
export default Reasoning;
|
@@ -12,6 +12,8 @@ import { useAgentStore } from '@/store/agent';
|
|
12
12
|
import { agentChatConfigSelectors } from '@/store/agent/selectors';
|
13
13
|
import { useChatStore } from '@/store/chat';
|
14
14
|
import { chatSelectors } from '@/store/chat/selectors';
|
15
|
+
import { useUserStore } from '@/store/user';
|
16
|
+
import { userGeneralSettingsSelectors } from '@/store/user/selectors';
|
15
17
|
import { ChatMessage } from '@/types/message';
|
16
18
|
|
17
19
|
import ErrorMessageExtra, { useErrorContent } from '../../Error';
|
@@ -70,6 +72,7 @@ const Item = memo<ChatListItemProps>(
|
|
70
72
|
|
71
73
|
const type = useAgentStore(agentChatConfigSelectors.displayMode);
|
72
74
|
const item = useChatStore(chatSelectors.getMessageById(id), isEqual);
|
75
|
+
const transitionMode = useUserStore(userGeneralSettingsSelectors.transitionMode);
|
73
76
|
|
74
77
|
const [
|
75
78
|
isMessageLoading,
|
@@ -89,6 +92,7 @@ const Item = memo<ChatListItemProps>(
|
|
89
92
|
|
90
93
|
// when the message is in RAG flow or the AI generating, it should be in loading state
|
91
94
|
const isProcessing = isInRAGFlow || generating;
|
95
|
+
const animated = transitionMode === 'fadeIn' && generating;
|
92
96
|
|
93
97
|
const onAvatarsClick = useAvatarsClick(item?.role);
|
94
98
|
|
@@ -168,7 +172,7 @@ const Item = memo<ChatListItemProps>(
|
|
168
172
|
|
169
173
|
const markdownProps = useMemo(
|
170
174
|
() => ({
|
171
|
-
animated
|
175
|
+
animated,
|
172
176
|
citations: item?.role === 'user' ? undefined : item?.search?.citations,
|
173
177
|
components,
|
174
178
|
customRender: markdownCustomRender,
|
@@ -184,7 +188,7 @@ const Item = memo<ChatListItemProps>(
|
|
184
188
|
// if the citations's url and title are all the same, we should not show the citations
|
185
189
|
item?.search?.citations.every((item) => item.title !== item.url),
|
186
190
|
}),
|
187
|
-
[
|
191
|
+
[animated, components, markdownCustomRender, item?.role, item?.search],
|
188
192
|
);
|
189
193
|
|
190
194
|
const onChange = useCallback((value: string) => updateMessageContent(id, value), [id]);
|
@@ -4,6 +4,8 @@ import Thinking from '@/components/Thinking';
|
|
4
4
|
import { ARTIFACT_THINKING_TAG } from '@/const/plugin';
|
5
5
|
import { useChatStore } from '@/store/chat';
|
6
6
|
import { chatSelectors } from '@/store/chat/selectors';
|
7
|
+
import { useUserStore } from '@/store/user';
|
8
|
+
import { userGeneralSettingsSelectors } from '@/store/user/selectors';
|
7
9
|
|
8
10
|
import { MarkdownElementProps } from '../type';
|
9
11
|
import { isTagClosed } from '../utils';
|
@@ -13,12 +15,14 @@ const Render = memo<MarkdownElementProps>(({ children, id }) => {
|
|
13
15
|
const message = chatSelectors.getMessageById(id)(s);
|
14
16
|
return [!isTagClosed(ARTIFACT_THINKING_TAG, message?.content)];
|
15
17
|
});
|
18
|
+
const transitionMode = useUserStore(userGeneralSettingsSelectors.transitionMode);
|
16
19
|
|
17
20
|
return (
|
18
21
|
<Thinking
|
19
22
|
content={children as string}
|
20
23
|
style={{ width: isGenerating ? '100%' : undefined }}
|
21
24
|
thinking={isGenerating}
|
25
|
+
thinkingAnimated={transitionMode === 'fadeIn' && isGenerating}
|
22
26
|
/>
|
23
27
|
);
|
24
28
|
});
|
@@ -3,6 +3,8 @@ import { memo } from 'react';
|
|
3
3
|
import Thinking from '@/components/Thinking';
|
4
4
|
import { useChatStore } from '@/store/chat';
|
5
5
|
import { chatSelectors } from '@/store/chat/selectors';
|
6
|
+
import { useUserStore } from '@/store/user';
|
7
|
+
import { userGeneralSettingsSelectors } from '@/store/user/selectors';
|
6
8
|
|
7
9
|
import { MarkdownElementProps } from '../type';
|
8
10
|
|
@@ -23,9 +25,18 @@ const Render = memo<MarkdownElementProps>(({ children, id }) => {
|
|
23
25
|
return message?.search?.citations;
|
24
26
|
});
|
25
27
|
|
28
|
+
const transitionMode = useUserStore(userGeneralSettingsSelectors.transitionMode);
|
29
|
+
|
26
30
|
if (!isGenerating && !children) return;
|
27
31
|
|
28
|
-
return
|
32
|
+
return (
|
33
|
+
<Thinking
|
34
|
+
citations={citations}
|
35
|
+
content={children as string}
|
36
|
+
thinking={isGenerating}
|
37
|
+
thinkingAnimated={transitionMode === 'fadeIn' && isGenerating}
|
38
|
+
/>
|
39
|
+
);
|
29
40
|
});
|
30
41
|
|
31
42
|
export default Render;
|
@@ -245,6 +245,18 @@ export default {
|
|
245
245
|
title: 'Mermaid 主题',
|
246
246
|
},
|
247
247
|
title: '聊天外观',
|
248
|
+
transitionMode: {
|
249
|
+
desc: '聊天消息的过渡动画',
|
250
|
+
options: {
|
251
|
+
fadeIn: '淡入',
|
252
|
+
none: {
|
253
|
+
desc: '这取决于模型的响应输出方式,请自行测试。',
|
254
|
+
value: '无',
|
255
|
+
},
|
256
|
+
smooth: '平滑',
|
257
|
+
},
|
258
|
+
title: '过渡动画',
|
259
|
+
},
|
248
260
|
},
|
249
261
|
settingCommon: {
|
250
262
|
lang: {
|
package/src/services/chat.ts
CHANGED
@@ -29,6 +29,7 @@ import {
|
|
29
29
|
modelConfigSelectors,
|
30
30
|
modelProviderSelectors,
|
31
31
|
preferenceSelectors,
|
32
|
+
userGeneralSettingsSelectors,
|
32
33
|
userProfileSelectors,
|
33
34
|
} from '@/store/user/selectors';
|
34
35
|
import { WebBrowsingManifest } from '@/tools/web-browsing';
|
@@ -39,7 +40,12 @@ import type { ChatStreamPayload, OpenAIChatMessage } from '@/types/openai/chat';
|
|
39
40
|
import { UserMessageContentPart } from '@/types/openai/chat';
|
40
41
|
import { parsePlaceholderVariablesMessages } from '@/utils/client/parserPlaceholder';
|
41
42
|
import { createErrorResponse } from '@/utils/errorResponse';
|
42
|
-
import {
|
43
|
+
import {
|
44
|
+
FetchSSEOptions,
|
45
|
+
fetchSSE,
|
46
|
+
getMessageError,
|
47
|
+
standardizeAnimationStyle,
|
48
|
+
} from '@/utils/fetch';
|
43
49
|
import { genToolCallingName } from '@/utils/toolCall';
|
44
50
|
import { createTraceHeader, getTraceId } from '@/utils/trace';
|
45
51
|
|
@@ -295,7 +301,7 @@ class ChatService {
|
|
295
301
|
};
|
296
302
|
|
297
303
|
getChatCompletion = async (params: Partial<ChatStreamPayload>, options?: FetchOptions) => {
|
298
|
-
const { signal } = options ?? {};
|
304
|
+
const { signal, responseAnimation } = options ?? {};
|
299
305
|
|
300
306
|
const { provider = ModelProvider.OpenAI, ...res } = params;
|
301
307
|
|
@@ -379,6 +385,9 @@ class ChatService {
|
|
379
385
|
sdkType = providerConfig?.settings.sdkType || 'openai';
|
380
386
|
}
|
381
387
|
|
388
|
+
const userPreferTransitionMode =
|
389
|
+
userGeneralSettingsSelectors.transitionMode(getUserStoreState());
|
390
|
+
|
382
391
|
return fetchSSE(API_ENDPOINTS.chat(sdkType), {
|
383
392
|
body: JSON.stringify(payload),
|
384
393
|
fetcher: fetcher,
|
@@ -388,11 +397,11 @@ class ChatService {
|
|
388
397
|
onErrorHandle: options?.onErrorHandle,
|
389
398
|
onFinish: options?.onFinish,
|
390
399
|
onMessageHandle: options?.onMessageHandle,
|
400
|
+
responseAnimation: [userPreferTransitionMode, responseAnimation].reduce(
|
401
|
+
(acc, cur) => merge(acc, standardizeAnimationStyle(cur)),
|
402
|
+
providerConfig?.settings?.responseAnimation ?? {},
|
403
|
+
),
|
391
404
|
signal,
|
392
|
-
smoothing:
|
393
|
-
providerConfig?.settings?.smoothing ||
|
394
|
-
// @deprecated in V2
|
395
|
-
providerConfig?.smoothing,
|
396
405
|
});
|
397
406
|
};
|
398
407
|
|
@@ -8,6 +8,7 @@ const primaryColor = (s: UserStore) => generalConfig(s).primaryColor;
|
|
8
8
|
const fontSize = (s: UserStore) => generalConfig(s).fontSize;
|
9
9
|
const highlighterTheme = (s: UserStore) => generalConfig(s).highlighterTheme;
|
10
10
|
const mermaidTheme = (s: UserStore) => generalConfig(s).mermaidTheme;
|
11
|
+
const transitionMode = (s: UserStore) => generalConfig(s).transitionMode;
|
11
12
|
|
12
13
|
export const userGeneralSettingsSelectors = {
|
13
14
|
config: generalConfig,
|
@@ -16,4 +17,5 @@ export const userGeneralSettingsSelectors = {
|
|
16
17
|
mermaidTheme,
|
17
18
|
neutralColor,
|
18
19
|
primaryColor,
|
20
|
+
transitionMode,
|
19
21
|
};
|
package/src/types/aiProvider.ts
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
import { z } from 'zod';
|
2
2
|
|
3
3
|
import { AiModelForSelect, EnabledAiModel, ModelSearchImplementType } from '@/types/aiModel';
|
4
|
-
import {
|
4
|
+
import { ResponseAnimation } from '@/types/llm';
|
5
5
|
|
6
6
|
export const AiProviderSourceEnum = {
|
7
7
|
Builtin: 'builtin',
|
@@ -58,6 +58,7 @@ export interface AiProviderSettings {
|
|
58
58
|
}
|
59
59
|
| false;
|
60
60
|
|
61
|
+
responseAnimation?: ResponseAnimation;
|
61
62
|
/**
|
62
63
|
* default openai
|
63
64
|
*/
|
@@ -75,13 +76,11 @@ export interface AiProviderSettings {
|
|
75
76
|
showChecker?: boolean;
|
76
77
|
showDeployName?: boolean;
|
77
78
|
showModelFetcher?: boolean;
|
78
|
-
/**
|
79
|
-
* whether to smoothing the output
|
80
|
-
*/
|
81
|
-
smoothing?: SmoothingParams;
|
82
79
|
supportResponsesApi?: boolean;
|
83
80
|
}
|
84
81
|
|
82
|
+
const ResponseAnimationType = z.enum(['smooth', 'fadeIn', 'none']);
|
83
|
+
|
85
84
|
const AiProviderSettingsSchema = z.object({
|
86
85
|
defaultShowBrowserRequest: z.boolean().optional(),
|
87
86
|
disableBrowserRequest: z.boolean().optional(),
|
@@ -94,6 +93,13 @@ const AiProviderSettingsSchema = z.object({
|
|
94
93
|
})
|
95
94
|
.or(z.literal(false))
|
96
95
|
.optional(),
|
96
|
+
responseAnimation: z
|
97
|
+
.object({
|
98
|
+
text: ResponseAnimationType.optional(),
|
99
|
+
toolsCalling: ResponseAnimationType.optional(),
|
100
|
+
})
|
101
|
+
.or(ResponseAnimationType)
|
102
|
+
.optional(),
|
97
103
|
sdkType: z.enum(['anthropic', 'openai', 'ollama']).optional(),
|
98
104
|
searchMode: z.enum(['params', 'internal']).optional(),
|
99
105
|
showAddNewModel: z.boolean().optional(),
|
@@ -101,12 +107,6 @@ const AiProviderSettingsSchema = z.object({
|
|
101
107
|
showChecker: z.boolean().optional(),
|
102
108
|
showDeployName: z.boolean().optional(),
|
103
109
|
showModelFetcher: z.boolean().optional(),
|
104
|
-
smoothing: z
|
105
|
-
.object({
|
106
|
-
text: z.boolean().optional(),
|
107
|
-
toolsCalling: z.boolean().optional(),
|
108
|
-
})
|
109
|
-
.optional(),
|
110
110
|
supportResponsesApi: z.boolean().optional(),
|
111
111
|
});
|
112
112
|
|
package/src/types/llm.ts
CHANGED
@@ -59,11 +59,14 @@ export interface ChatModelCard {
|
|
59
59
|
vision?: boolean;
|
60
60
|
}
|
61
61
|
|
62
|
-
export
|
63
|
-
|
64
|
-
|
65
|
-
|
66
|
-
|
62
|
+
export type ResponseAnimationStyle = 'smooth' | 'fadeIn' | 'none';
|
63
|
+
export type ResponseAnimation =
|
64
|
+
| {
|
65
|
+
speed?: number;
|
66
|
+
text?: ResponseAnimationStyle;
|
67
|
+
toolsCalling?: ResponseAnimationStyle;
|
68
|
+
}
|
69
|
+
| ResponseAnimationStyle;
|
67
70
|
|
68
71
|
export interface ModelProviderCard {
|
69
72
|
/**
|
@@ -137,11 +140,6 @@ export interface ModelProviderCard {
|
|
137
140
|
* whether to show the provider config
|
138
141
|
*/
|
139
142
|
showConfig?: boolean;
|
140
|
-
/**
|
141
|
-
* whether to smoothing the output
|
142
|
-
* @deprecated
|
143
|
-
*/
|
144
|
-
smoothing?: SmoothingParams;
|
145
143
|
/**
|
146
144
|
* provider's website url
|
147
145
|
*/
|
@@ -1,9 +1,12 @@
|
|
1
1
|
import type { HighlighterProps, MermaidProps, NeutralColors, PrimaryColors } from '@lobehub/ui';
|
2
2
|
|
3
|
+
import { ResponseAnimationStyle } from '@/types/llm';
|
4
|
+
|
3
5
|
export interface UserGeneralConfig {
|
4
6
|
fontSize: number;
|
5
7
|
highlighterTheme?: HighlighterProps['theme'];
|
6
8
|
mermaidTheme?: MermaidProps['theme'];
|
7
9
|
neutralColor?: NeutralColors;
|
8
10
|
primaryColor?: PrimaryColors;
|
11
|
+
transitionMode?: ResponseAnimationStyle;
|
9
12
|
}
|