@lobehub/chat 1.94.3 → 1.94.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (67) hide show
  1. package/.github/scripts/create-failure-issue.js +256 -0
  2. package/.github/workflows/auto-i18n.yml +359 -0
  3. package/CHANGELOG.md +58 -0
  4. package/README.md +1 -1
  5. package/README.zh-CN.md +1 -1
  6. package/changelog/v1.json +18 -0
  7. package/locales/ar/setting.json +13 -1
  8. package/locales/bg-BG/setting.json +13 -1
  9. package/locales/de-DE/setting.json +13 -1
  10. package/locales/en-US/setting.json +13 -1
  11. package/locales/es-ES/setting.json +13 -1
  12. package/locales/fa-IR/setting.json +13 -1
  13. package/locales/fr-FR/setting.json +13 -1
  14. package/locales/it-IT/setting.json +13 -1
  15. package/locales/ja-JP/setting.json +13 -1
  16. package/locales/ko-KR/setting.json +13 -1
  17. package/locales/nl-NL/setting.json +13 -1
  18. package/locales/pl-PL/setting.json +13 -1
  19. package/locales/pt-BR/setting.json +13 -1
  20. package/locales/ru-RU/setting.json +13 -1
  21. package/locales/tr-TR/setting.json +13 -1
  22. package/locales/vi-VN/setting.json +13 -1
  23. package/locales/zh-CN/setting.json +13 -1
  24. package/locales/zh-TW/setting.json +13 -1
  25. package/package.json +3 -2
  26. package/scripts/i18nWorkflow/genDefaultLocale.ts +2 -2
  27. package/scripts/i18nWorkflow/genDiff.ts +8 -9
  28. package/scripts/i18nWorkflow/utils.ts +14 -1
  29. package/src/app/[variants]/(main)/settings/common/features/ChatAppearance/ChatTransitionPreview.tsx +111 -0
  30. package/src/app/[variants]/(main)/settings/common/features/ChatAppearance/index.tsx +50 -3
  31. package/src/components/Thinking/index.tsx +4 -2
  32. package/src/config/aiModels/openai.ts +120 -0
  33. package/src/config/modelProviders/anthropic.ts +1 -6
  34. package/src/config/modelProviders/baichuan.ts +4 -8
  35. package/src/config/modelProviders/google.ts +4 -4
  36. package/src/config/modelProviders/lmstudio.ts +4 -4
  37. package/src/config/modelProviders/minimax.ts +3 -3
  38. package/src/config/modelProviders/moonshot.ts +4 -4
  39. package/src/config/modelProviders/openai.ts +1 -3
  40. package/src/config/modelProviders/perplexity.ts +3 -3
  41. package/src/config/modelProviders/qwen.ts +4 -4
  42. package/src/config/modelProviders/search1api.ts +4 -4
  43. package/src/config/modelProviders/spark.ts +4 -4
  44. package/src/config/modelProviders/stepfun.ts +4 -4
  45. package/src/config/modelProviders/vertexai.ts +1 -3
  46. package/src/config/modelProviders/volcengine.ts +4 -4
  47. package/src/config/modelProviders/wenxin.ts +3 -3
  48. package/src/const/models.ts +26 -1
  49. package/src/const/settings/common.ts +1 -0
  50. package/src/features/Conversation/Messages/Assistant/Reasoning/index.tsx +11 -1
  51. package/src/features/Conversation/components/ChatItem/index.tsx +6 -2
  52. package/src/features/Conversation/components/MarkdownElements/LobeThinking/Render.tsx +4 -0
  53. package/src/features/Conversation/components/MarkdownElements/Thinking/Render.tsx +12 -1
  54. package/src/libs/model-runtime/openai/index.ts +33 -11
  55. package/src/libs/model-runtime/types/chat.ts +2 -0
  56. package/src/libs/model-runtime/utils/openaiCompatibleFactory/index.ts +10 -8
  57. package/src/libs/model-runtime/utils/streams/openai/__snapshots__/responsesStream.test.ts.snap +6 -6
  58. package/src/libs/model-runtime/utils/streams/openai/responsesStream.ts +38 -2
  59. package/src/locales/default/setting.ts +12 -0
  60. package/src/services/chat.ts +19 -6
  61. package/src/store/user/slices/settings/selectors/general.test.ts +1 -0
  62. package/src/store/user/slices/settings/selectors/general.ts +2 -0
  63. package/src/types/aiProvider.ts +11 -11
  64. package/src/types/llm.ts +8 -10
  65. package/src/types/user/settings/general.ts +3 -0
  66. package/src/utils/fetch/__tests__/fetchSSE.test.ts +57 -12
  67. package/src/utils/fetch/fetchSSE.ts +22 -15
@@ -12,6 +12,8 @@ import { useAgentStore } from '@/store/agent';
12
12
  import { agentChatConfigSelectors } from '@/store/agent/selectors';
13
13
  import { useChatStore } from '@/store/chat';
14
14
  import { chatSelectors } from '@/store/chat/selectors';
15
+ import { useUserStore } from '@/store/user';
16
+ import { userGeneralSettingsSelectors } from '@/store/user/selectors';
15
17
  import { ChatMessage } from '@/types/message';
16
18
 
17
19
  import ErrorMessageExtra, { useErrorContent } from '../../Error';
@@ -70,6 +72,7 @@ const Item = memo<ChatListItemProps>(
70
72
 
71
73
  const type = useAgentStore(agentChatConfigSelectors.displayMode);
72
74
  const item = useChatStore(chatSelectors.getMessageById(id), isEqual);
75
+ const transitionMode = useUserStore(userGeneralSettingsSelectors.transitionMode);
73
76
 
74
77
  const [
75
78
  isMessageLoading,
@@ -89,6 +92,7 @@ const Item = memo<ChatListItemProps>(
89
92
 
90
93
  // when the message is in RAG flow or the AI generating, it should be in loading state
91
94
  const isProcessing = isInRAGFlow || generating;
95
+ const animated = transitionMode === 'fadeIn' && generating;
92
96
 
93
97
  const onAvatarsClick = useAvatarsClick(item?.role);
94
98
 
@@ -168,7 +172,7 @@ const Item = memo<ChatListItemProps>(
168
172
 
169
173
  const markdownProps = useMemo(
170
174
  () => ({
171
- animated: generating,
175
+ animated,
172
176
  citations: item?.role === 'user' ? undefined : item?.search?.citations,
173
177
  components,
174
178
  customRender: markdownCustomRender,
@@ -184,7 +188,7 @@ const Item = memo<ChatListItemProps>(
184
188
  // if the citations's url and title are all the same, we should not show the citations
185
189
  item?.search?.citations.every((item) => item.title !== item.url),
186
190
  }),
187
- [generating, components, markdownCustomRender, item?.role, item?.search],
191
+ [animated, components, markdownCustomRender, item?.role, item?.search],
188
192
  );
189
193
 
190
194
  const onChange = useCallback((value: string) => updateMessageContent(id, value), [id]);
@@ -4,6 +4,8 @@ import Thinking from '@/components/Thinking';
4
4
  import { ARTIFACT_THINKING_TAG } from '@/const/plugin';
5
5
  import { useChatStore } from '@/store/chat';
6
6
  import { chatSelectors } from '@/store/chat/selectors';
7
+ import { useUserStore } from '@/store/user';
8
+ import { userGeneralSettingsSelectors } from '@/store/user/selectors';
7
9
 
8
10
  import { MarkdownElementProps } from '../type';
9
11
  import { isTagClosed } from '../utils';
@@ -13,12 +15,14 @@ const Render = memo<MarkdownElementProps>(({ children, id }) => {
13
15
  const message = chatSelectors.getMessageById(id)(s);
14
16
  return [!isTagClosed(ARTIFACT_THINKING_TAG, message?.content)];
15
17
  });
18
+ const transitionMode = useUserStore(userGeneralSettingsSelectors.transitionMode);
16
19
 
17
20
  return (
18
21
  <Thinking
19
22
  content={children as string}
20
23
  style={{ width: isGenerating ? '100%' : undefined }}
21
24
  thinking={isGenerating}
25
+ thinkingAnimated={transitionMode === 'fadeIn' && isGenerating}
22
26
  />
23
27
  );
24
28
  });
@@ -3,6 +3,8 @@ import { memo } from 'react';
3
3
  import Thinking from '@/components/Thinking';
4
4
  import { useChatStore } from '@/store/chat';
5
5
  import { chatSelectors } from '@/store/chat/selectors';
6
+ import { useUserStore } from '@/store/user';
7
+ import { userGeneralSettingsSelectors } from '@/store/user/selectors';
6
8
 
7
9
  import { MarkdownElementProps } from '../type';
8
10
 
@@ -23,9 +25,18 @@ const Render = memo<MarkdownElementProps>(({ children, id }) => {
23
25
  return message?.search?.citations;
24
26
  });
25
27
 
28
+ const transitionMode = useUserStore(userGeneralSettingsSelectors.transitionMode);
29
+
26
30
  if (!isGenerating && !children) return;
27
31
 
28
- return <Thinking citations={citations} content={children as string} thinking={isGenerating} />;
32
+ return (
33
+ <Thinking
34
+ citations={citations}
35
+ content={children as string}
36
+ thinking={isGenerating}
37
+ thinkingAnimated={transitionMode === 'fadeIn' && isGenerating}
38
+ />
39
+ );
29
40
  });
30
41
 
31
42
  export default Render;
@@ -2,21 +2,24 @@ import { ChatStreamPayload, ModelProvider } from '../types';
2
2
  import { processMultiProviderModelList } from '../utils/modelParse';
3
3
  import { createOpenAICompatibleRuntime } from '../utils/openaiCompatibleFactory';
4
4
  import { pruneReasoningPayload } from '../utils/openaiHelpers';
5
+ import { responsesAPIModels } from '@/const/models';
5
6
 
6
7
  export interface OpenAIModelCard {
7
8
  id: string;
8
9
  }
9
10
 
10
- const prunePrefixes = ['o1', 'o3', 'o4'];
11
+ const prunePrefixes = ['o1', 'o3', 'o4', 'codex', 'computer-use'];
12
+
13
+ const oaiSearchContextSize = process.env.OPENAI_SEARCH_CONTEXT_SIZE; // low, medium, high
11
14
 
12
15
  export const LobeOpenAI = createOpenAICompatibleRuntime({
13
16
  baseURL: 'https://api.openai.com/v1',
14
17
  chatCompletion: {
15
18
  handlePayload: (payload) => {
16
- const { model } = payload;
19
+ const { enabledSearch, model, ...rest } = payload;
17
20
 
18
- if (model === 'o1-pro') {
19
- return { ...payload, apiMode: 'responses' } as ChatStreamPayload;
21
+ if (responsesAPIModels.has(model) || enabledSearch) {
22
+ return { ...rest, apiMode: 'responses', enabledSearch, model } as ChatStreamPayload;
20
23
  }
21
24
 
22
25
  if (prunePrefixes.some((prefix) => model.startsWith(prefix))) {
@@ -24,11 +27,10 @@ export const LobeOpenAI = createOpenAICompatibleRuntime({
24
27
  }
25
28
 
26
29
  if (model.includes('-search-')) {
27
- const oaiSearchContextSize = process.env.OPENAI_SEARCH_CONTEXT_SIZE; // low, medium, high
28
-
29
30
  return {
30
- ...payload,
31
+ ...rest,
31
32
  frequency_penalty: undefined,
33
+ model,
32
34
  presence_penalty: undefined,
33
35
  stream: payload.stream ?? true,
34
36
  temperature: undefined,
@@ -41,7 +43,7 @@ export const LobeOpenAI = createOpenAICompatibleRuntime({
41
43
  } as any;
42
44
  }
43
45
 
44
- return { ...payload, stream: payload.stream ?? true };
46
+ return { ...rest, model, stream: payload.stream ?? true };
45
47
  },
46
48
  },
47
49
  debug: {
@@ -57,17 +59,37 @@ export const LobeOpenAI = createOpenAICompatibleRuntime({
57
59
  },
58
60
  provider: ModelProvider.OpenAI,
59
61
  responses: {
60
- handlePayload: (payload: ChatStreamPayload) => {
61
- const { model } = payload;
62
+ handlePayload: (payload) => {
63
+ const { enabledSearch, model, tools, ...rest } = payload;
64
+
65
+ const openaiTools = enabledSearch
66
+ ? [
67
+ ...(tools || []),
68
+ {
69
+ type: 'web_search_preview',
70
+ ...(oaiSearchContextSize && {
71
+ search_context_size: oaiSearchContextSize,
72
+ }),
73
+ },
74
+ ]
75
+ : tools;
76
+
62
77
  if (prunePrefixes.some((prefix) => model.startsWith(prefix))) {
63
78
  if (!payload.reasoning) {
64
79
  payload.reasoning = { summary: 'auto' };
65
80
  } else {
66
81
  payload.reasoning.summary = 'auto';
67
82
  }
83
+
84
+ // computer-use series must set truncation as auto
85
+ if (model.startsWith('computer-use')) {
86
+ payload.truncation = 'auto';
87
+ }
88
+
89
+ return pruneReasoningPayload(payload) as any;
68
90
  }
69
91
 
70
- return { ...payload, stream: payload.stream ?? true };
92
+ return { ...rest, model, stream: payload.stream ?? true, tools: openaiTools } as any;
71
93
  },
72
94
  },
73
95
  });
@@ -107,6 +107,7 @@ export interface ChatStreamPayload {
107
107
  effort?: string;
108
108
  summary?: string;
109
109
  };
110
+ reasoning_effort?: 'low' | 'medium' | 'high';
110
111
  responseMode?: 'stream' | 'json';
111
112
  /**
112
113
  * @title 是否开启流式请求
@@ -132,6 +133,7 @@ export interface ChatStreamPayload {
132
133
  * @default 1
133
134
  */
134
135
  top_p?: number;
136
+ truncation?: 'auto' | 'disabled';
135
137
  }
136
138
 
137
139
  export interface ChatMethodOptions {
@@ -209,14 +209,9 @@ export const createOpenAICompatibleRuntime = <T extends Record<string, any> = an
209
209
  }
210
210
 
211
211
  async chat(
212
- { responseMode, apiMode, ...payload }: ChatStreamPayload,
212
+ { responseMode, ...payload }: ChatStreamPayload,
213
213
  options?: ChatMethodOptions,
214
214
  ) {
215
- // new openai Response API
216
- if (apiMode === 'responses') {
217
- return this.handleResponseAPIMode(payload, options);
218
- }
219
-
220
215
  try {
221
216
  const inputStartAt = Date.now();
222
217
  const postPayload = chatCompletion?.handlePayload
@@ -226,6 +221,11 @@ export const createOpenAICompatibleRuntime = <T extends Record<string, any> = an
226
221
  stream: payload.stream ?? true,
227
222
  } as OpenAI.ChatCompletionCreateParamsStreaming);
228
223
 
224
+ // new openai Response API
225
+ if ((postPayload as any).apiMode === 'responses') {
226
+ return this.handleResponseAPIMode(payload, options);
227
+ }
228
+
229
229
  const messages = await convertOpenAIMessages(postPayload.messages);
230
230
 
231
231
  let response: Stream<OpenAI.Chat.Completions.ChatCompletionChunk>;
@@ -478,11 +478,12 @@ export const createOpenAICompatibleRuntime = <T extends Record<string, any> = an
478
478
  ): Promise<Response> {
479
479
  const inputStartAt = Date.now();
480
480
 
481
- const { messages, ...res } = responses?.handlePayload
481
+ const { messages, reasoning_effort, tools, ...res } = responses?.handlePayload
482
482
  ? (responses?.handlePayload(payload, this._options) as ChatStreamPayload)
483
483
  : payload;
484
484
 
485
485
  // remove penalty params
486
+ delete res.apiMode;
486
487
  delete res.frequency_penalty;
487
488
  delete res.presence_penalty;
488
489
 
@@ -490,9 +491,10 @@ export const createOpenAICompatibleRuntime = <T extends Record<string, any> = an
490
491
 
491
492
  const postPayload = {
492
493
  ...res,
494
+ ...(reasoning_effort ? { reasoning: { effort: reasoning_effort } } : {}),
493
495
  input,
494
496
  store: false,
495
- tools: payload.tools?.map((tool) => this.convertChatCompletionToolToResponseTool(tool)),
497
+ tools: tools?.map((tool) => this.convertChatCompletionToolToResponseTool(tool)),
496
498
  } as OpenAI.Responses.ResponseCreateParamsStreaming;
497
499
 
498
500
  if (debug?.responses?.()) {
@@ -86,11 +86,11 @@ exports[`OpenAIResponsesStream > Reasoning > summary 1`] = `
86
86
  "data: " analyzing"
87
87
 
88
88
  ",
89
- "id: resp_684313b89200819087f27686e0c822260b502bf083132d0d
89
+ "id: rs_684313b9774481908ee856625f82fb8c0b502bf083132d0d
90
90
  ",
91
- "event: data
91
+ "event: text
92
92
  ",
93
- "data: {"type":"response.output_item.done","output_index":0,"item":{"id":"rs_684313b9774481908ee856625f82fb8c0b502bf083132d0d","type":"reasoning","summary":[{"type":"summary_text","text":"**Answering a numeric comparison**\\n\\nThe user is asking in Chinese which number is larger: 9.1 or 9.92. This is straightforward since 9.92 is clearly larger, as it's greater than 9.1. We can respond with \\"9.92大于9.1\\" without needing to search for more information. It's simple comparison, but I could also add a little explanation, noting that 9.92 is indeed 0.82 more than 9.1. However, keeping it simple with \\"9.92 > 9.1\\" is perfectly fine!"}]}}
93
+ "data: null
94
94
 
95
95
  ",
96
96
  "id: resp_684313b89200819087f27686e0c822260b502bf083132d0d
@@ -128,11 +128,11 @@ exports[`OpenAIResponsesStream > Reasoning > summary 1`] = `
128
128
  "data: {"type":"response.content_part.done","item_id":"msg_684313bee2c88190b0f4b09621ad7dc60b502bf083132d0d","output_index":1,"content_index":0,"part":{"type":"output_text","annotations":[],"text":"9.92 比 9.1 大。"}}
129
129
 
130
130
  ",
131
- "id: resp_684313b89200819087f27686e0c822260b502bf083132d0d
131
+ "id: msg_684313bee2c88190b0f4b09621ad7dc60b502bf083132d0d
132
132
  ",
133
- "event: data
133
+ "event: text
134
134
  ",
135
- "data: {"type":"response.output_item.done","output_index":1,"item":{"id":"msg_684313bee2c88190b0f4b09621ad7dc60b502bf083132d0d","type":"message","status":"completed","content":[{"type":"output_text","annotations":[],"text":"9.92 比 9. 大。"}],"role":"assistant"}}
135
+ "data: null
136
136
 
137
137
  ",
138
138
  "id: resp_684313b89200819087f27686e0c822260b502bf083132d0d
@@ -1,7 +1,7 @@
1
1
  import OpenAI from 'openai';
2
2
  import type { Stream } from 'openai/streaming';
3
3
 
4
- import { ChatMessageError } from '@/types/message';
4
+ import { ChatMessageError, CitationItem } from '@/types/message';
5
5
 
6
6
  import { AgentRuntimeErrorType } from '../../../error';
7
7
  import { convertResponseUsage } from '../../usageConverter';
@@ -20,7 +20,17 @@ import {
20
20
  import { OpenAIStreamOptions } from './openai';
21
21
 
22
22
  const transformOpenAIStream = (
23
- chunk: OpenAI.Responses.ResponseStreamEvent,
23
+ chunk: OpenAI.Responses.ResponseStreamEvent | {
24
+ annotation: {
25
+ end_index: number;
26
+ start_index: number;
27
+ title: string;
28
+ type: 'url_citation';
29
+ url: string;
30
+ };
31
+ item_id: string;
32
+ type: 'response.output_text.annotation.added';
33
+ },
24
34
  streamContext: StreamContext,
25
35
  ): StreamProtocolChunk | StreamProtocolChunk[] => {
26
36
  // handle the first chunk error
@@ -42,6 +52,7 @@ const transformOpenAIStream = (
42
52
  switch (chunk.type) {
43
53
  case 'response.created': {
44
54
  streamContext.id = chunk.response.id;
55
+ streamContext.returnedCitationArray = [];
45
56
 
46
57
  return { data: chunk.response.status, id: streamContext.id, type: 'data' };
47
58
  }
@@ -106,6 +117,31 @@ const transformOpenAIStream = (
106
117
  return { data: chunk.delta, id: chunk.item_id, type: 'reasoning' };
107
118
  }
108
119
 
120
+ case 'response.output_text.annotation.added': {
121
+ const citations = chunk.annotation;
122
+
123
+ if (streamContext.returnedCitationArray) {
124
+ streamContext.returnedCitationArray.push({
125
+ title: citations.title,
126
+ url: citations.url,
127
+ } as CitationItem);
128
+ }
129
+
130
+ return { data: null, id: chunk.item_id, type: 'text' };
131
+ }
132
+
133
+ case 'response.output_item.done': {
134
+ if (streamContext.returnedCitationArray?.length) {
135
+ return {
136
+ data: { citations: streamContext.returnedCitationArray },
137
+ id: chunk.item.id,
138
+ type: 'grounding',
139
+ }
140
+ }
141
+
142
+ return { data: null, id: chunk.item.id, type: 'text' };
143
+ }
144
+
109
145
  case 'response.completed': {
110
146
  if (chunk.response.usage) {
111
147
  return {
@@ -245,6 +245,18 @@ export default {
245
245
  title: 'Mermaid 主题',
246
246
  },
247
247
  title: '聊天外观',
248
+ transitionMode: {
249
+ desc: '聊天消息的过渡动画',
250
+ options: {
251
+ fadeIn: '淡入',
252
+ none: {
253
+ desc: '这取决于模型的响应输出方式,请自行测试。',
254
+ value: '无',
255
+ },
256
+ smooth: '平滑',
257
+ },
258
+ title: '过渡动画',
259
+ },
248
260
  },
249
261
  settingCommon: {
250
262
  lang: {
@@ -29,6 +29,7 @@ import {
29
29
  modelConfigSelectors,
30
30
  modelProviderSelectors,
31
31
  preferenceSelectors,
32
+ userGeneralSettingsSelectors,
32
33
  userProfileSelectors,
33
34
  } from '@/store/user/selectors';
34
35
  import { WebBrowsingManifest } from '@/tools/web-browsing';
@@ -39,7 +40,12 @@ import type { ChatStreamPayload, OpenAIChatMessage } from '@/types/openai/chat';
39
40
  import { UserMessageContentPart } from '@/types/openai/chat';
40
41
  import { parsePlaceholderVariablesMessages } from '@/utils/client/parserPlaceholder';
41
42
  import { createErrorResponse } from '@/utils/errorResponse';
42
- import { FetchSSEOptions, fetchSSE, getMessageError } from '@/utils/fetch';
43
+ import {
44
+ FetchSSEOptions,
45
+ fetchSSE,
46
+ getMessageError,
47
+ standardizeAnimationStyle,
48
+ } from '@/utils/fetch';
43
49
  import { genToolCallingName } from '@/utils/toolCall';
44
50
  import { createTraceHeader, getTraceId } from '@/utils/trace';
45
51
 
@@ -295,7 +301,7 @@ class ChatService {
295
301
  };
296
302
 
297
303
  getChatCompletion = async (params: Partial<ChatStreamPayload>, options?: FetchOptions) => {
298
- const { signal } = options ?? {};
304
+ const { signal, responseAnimation } = options ?? {};
299
305
 
300
306
  const { provider = ModelProvider.OpenAI, ...res } = params;
301
307
 
@@ -379,6 +385,16 @@ class ChatService {
379
385
  sdkType = providerConfig?.settings.sdkType || 'openai';
380
386
  }
381
387
 
388
+ const userPreferTransitionMode =
389
+ userGeneralSettingsSelectors.transitionMode(getUserStoreState());
390
+
391
+ // The order of the array is very important.
392
+ const mergedResponseAnimation = [
393
+ providerConfig?.settings?.responseAnimation || {},
394
+ userPreferTransitionMode,
395
+ responseAnimation,
396
+ ].reduce((acc, cur) => merge(acc, standardizeAnimationStyle(cur)), {});
397
+
382
398
  return fetchSSE(API_ENDPOINTS.chat(sdkType), {
383
399
  body: JSON.stringify(payload),
384
400
  fetcher: fetcher,
@@ -388,11 +404,8 @@ class ChatService {
388
404
  onErrorHandle: options?.onErrorHandle,
389
405
  onFinish: options?.onFinish,
390
406
  onMessageHandle: options?.onMessageHandle,
407
+ responseAnimation: mergedResponseAnimation,
391
408
  signal,
392
- smoothing:
393
- providerConfig?.settings?.smoothing ||
394
- // @deprecated in V2
395
- providerConfig?.smoothing,
396
409
  });
397
410
  };
398
411
 
@@ -19,6 +19,7 @@ describe('settingsSelectors', () => {
19
19
  fontSize: 12,
20
20
  highlighterTheme: 'lobe-theme',
21
21
  mermaidTheme: 'lobe-theme',
22
+ transitionMode: 'fadeIn',
22
23
  });
23
24
  });
24
25
  });
@@ -8,6 +8,7 @@ const primaryColor = (s: UserStore) => generalConfig(s).primaryColor;
8
8
  const fontSize = (s: UserStore) => generalConfig(s).fontSize;
9
9
  const highlighterTheme = (s: UserStore) => generalConfig(s).highlighterTheme;
10
10
  const mermaidTheme = (s: UserStore) => generalConfig(s).mermaidTheme;
11
+ const transitionMode = (s: UserStore) => generalConfig(s).transitionMode;
11
12
 
12
13
  export const userGeneralSettingsSelectors = {
13
14
  config: generalConfig,
@@ -16,4 +17,5 @@ export const userGeneralSettingsSelectors = {
16
17
  mermaidTheme,
17
18
  neutralColor,
18
19
  primaryColor,
20
+ transitionMode,
19
21
  };
@@ -1,7 +1,7 @@
1
1
  import { z } from 'zod';
2
2
 
3
3
  import { AiModelForSelect, EnabledAiModel, ModelSearchImplementType } from '@/types/aiModel';
4
- import { SmoothingParams } from '@/types/llm';
4
+ import { ResponseAnimation } from '@/types/llm';
5
5
 
6
6
  export const AiProviderSourceEnum = {
7
7
  Builtin: 'builtin',
@@ -58,6 +58,7 @@ export interface AiProviderSettings {
58
58
  }
59
59
  | false;
60
60
 
61
+ responseAnimation?: ResponseAnimation;
61
62
  /**
62
63
  * default openai
63
64
  */
@@ -75,13 +76,11 @@ export interface AiProviderSettings {
75
76
  showChecker?: boolean;
76
77
  showDeployName?: boolean;
77
78
  showModelFetcher?: boolean;
78
- /**
79
- * whether to smoothing the output
80
- */
81
- smoothing?: SmoothingParams;
82
79
  supportResponsesApi?: boolean;
83
80
  }
84
81
 
82
+ const ResponseAnimationType = z.enum(['smooth', 'fadeIn', 'none']);
83
+
85
84
  const AiProviderSettingsSchema = z.object({
86
85
  defaultShowBrowserRequest: z.boolean().optional(),
87
86
  disableBrowserRequest: z.boolean().optional(),
@@ -94,6 +93,13 @@ const AiProviderSettingsSchema = z.object({
94
93
  })
95
94
  .or(z.literal(false))
96
95
  .optional(),
96
+ responseAnimation: z
97
+ .object({
98
+ text: ResponseAnimationType.optional(),
99
+ toolsCalling: ResponseAnimationType.optional(),
100
+ })
101
+ .or(ResponseAnimationType)
102
+ .optional(),
97
103
  sdkType: z.enum(['anthropic', 'openai', 'ollama']).optional(),
98
104
  searchMode: z.enum(['params', 'internal']).optional(),
99
105
  showAddNewModel: z.boolean().optional(),
@@ -101,12 +107,6 @@ const AiProviderSettingsSchema = z.object({
101
107
  showChecker: z.boolean().optional(),
102
108
  showDeployName: z.boolean().optional(),
103
109
  showModelFetcher: z.boolean().optional(),
104
- smoothing: z
105
- .object({
106
- text: z.boolean().optional(),
107
- toolsCalling: z.boolean().optional(),
108
- })
109
- .optional(),
110
110
  supportResponsesApi: z.boolean().optional(),
111
111
  });
112
112
 
package/src/types/llm.ts CHANGED
@@ -59,11 +59,14 @@ export interface ChatModelCard {
59
59
  vision?: boolean;
60
60
  }
61
61
 
62
- export interface SmoothingParams {
63
- speed?: number;
64
- text?: boolean;
65
- toolsCalling?: boolean;
66
- }
62
+ export type ResponseAnimationStyle = 'smooth' | 'fadeIn' | 'none';
63
+ export type ResponseAnimation =
64
+ | {
65
+ speed?: number;
66
+ text?: ResponseAnimationStyle;
67
+ toolsCalling?: ResponseAnimationStyle;
68
+ }
69
+ | ResponseAnimationStyle;
67
70
 
68
71
  export interface ModelProviderCard {
69
72
  /**
@@ -137,11 +140,6 @@ export interface ModelProviderCard {
137
140
  * whether to show the provider config
138
141
  */
139
142
  showConfig?: boolean;
140
- /**
141
- * whether to smoothing the output
142
- * @deprecated
143
- */
144
- smoothing?: SmoothingParams;
145
143
  /**
146
144
  * provider's website url
147
145
  */
@@ -1,9 +1,12 @@
1
1
  import type { HighlighterProps, MermaidProps, NeutralColors, PrimaryColors } from '@lobehub/ui';
2
2
 
3
+ import { ResponseAnimationStyle } from '@/types/llm';
4
+
3
5
  export interface UserGeneralConfig {
4
6
  fontSize: number;
5
7
  highlighterTheme?: HighlighterProps['theme'];
6
8
  mermaidTheme?: MermaidProps['theme'];
7
9
  neutralColor?: NeutralColors;
8
10
  primaryColor?: PrimaryColors;
11
+ transitionMode?: ResponseAnimationStyle;
9
12
  }