@lobehub/chat 1.97.11 → 1.97.13

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (58) hide show
  1. package/CHANGELOG.md +51 -0
  2. package/changelog/v1.json +18 -0
  3. package/locales/ar/chat.json +3 -0
  4. package/locales/ar/models.json +8 -8
  5. package/locales/bg-BG/chat.json +3 -0
  6. package/locales/bg-BG/models.json +6 -6
  7. package/locales/de-DE/chat.json +3 -0
  8. package/locales/de-DE/models.json +4 -4
  9. package/locales/en-US/chat.json +3 -0
  10. package/locales/en-US/models.json +4 -4
  11. package/locales/es-ES/chat.json +3 -0
  12. package/locales/es-ES/models.json +5 -5
  13. package/locales/fa-IR/chat.json +3 -0
  14. package/locales/fa-IR/models.json +6 -6
  15. package/locales/fr-FR/chat.json +3 -0
  16. package/locales/fr-FR/models.json +3 -3
  17. package/locales/it-IT/chat.json +3 -0
  18. package/locales/it-IT/models.json +3 -3
  19. package/locales/ja-JP/chat.json +3 -0
  20. package/locales/ja-JP/models.json +6 -6
  21. package/locales/ko-KR/chat.json +3 -0
  22. package/locales/ko-KR/models.json +7 -7
  23. package/locales/nl-NL/chat.json +3 -0
  24. package/locales/nl-NL/models.json +4 -4
  25. package/locales/pl-PL/chat.json +3 -0
  26. package/locales/pl-PL/models.json +6 -6
  27. package/locales/pt-BR/chat.json +3 -0
  28. package/locales/pt-BR/models.json +2 -20
  29. package/locales/ru-RU/chat.json +3 -0
  30. package/locales/ru-RU/models.json +5 -5
  31. package/locales/tr-TR/chat.json +3 -0
  32. package/locales/tr-TR/models.json +7 -7
  33. package/locales/vi-VN/chat.json +3 -0
  34. package/locales/vi-VN/models.json +4 -4
  35. package/locales/zh-CN/chat.json +3 -0
  36. package/locales/zh-CN/models.json +1 -1
  37. package/locales/zh-TW/chat.json +3 -0
  38. package/locales/zh-TW/models.json +1 -1
  39. package/package.json +1 -5
  40. package/src/config/aiModels/google.ts +5 -40
  41. package/src/config/aiModels/openai.ts +50 -41
  42. package/src/config/aiModels/volcengine.ts +58 -53
  43. package/src/config/aiModels/xai.ts +1 -1
  44. package/src/database/migrations/0026_add_autovacuum_tuning.sql +12 -0
  45. package/src/database/migrations/meta/0026_snapshot.json +5703 -0
  46. package/src/database/migrations/meta/_journal.json +7 -0
  47. package/src/features/ChatInput/ActionBar/Model/ControlsForm.tsx +11 -0
  48. package/src/features/ChatInput/ActionBar/Model/ThinkingSlider.tsx +57 -0
  49. package/src/libs/model-runtime/bedrock/index.ts +32 -1
  50. package/src/libs/model-runtime/utils/streams/bedrock/common.ts +2 -1
  51. package/src/libs/model-runtime/utils/streams/protocol.ts +16 -0
  52. package/src/libs/model-runtime/utils/streams/qwen.ts +4 -2
  53. package/src/libs/model-runtime/volcengine/index.ts +9 -5
  54. package/src/libs/model-runtime/xai/index.ts +6 -3
  55. package/src/locales/default/chat.ts +3 -0
  56. package/src/services/chat.ts +4 -0
  57. package/src/types/agent/chatConfig.ts +1 -0
  58. package/src/types/aiModel.ts +4 -0
@@ -182,6 +182,13 @@
182
182
  "when": 1749309388370,
183
183
  "tag": "0025_add_provider_config",
184
184
  "breakpoints": true
185
+ },
186
+ {
187
+ "idx": 26,
188
+ "version": "7",
189
+ "when": 1752212281564,
190
+ "tag": "0026_add_autovacuum_tuning",
191
+ "breakpoints": true
185
192
  }
186
193
  ],
187
194
  "version": "6"
@@ -14,6 +14,7 @@ import ContextCachingSwitch from './ContextCachingSwitch';
14
14
  import ReasoningEffortSlider from './ReasoningEffortSlider';
15
15
  import ReasoningTokenSlider from './ReasoningTokenSlider';
16
16
  import ThinkingBudgetSlider from './ThinkingBudgetSlider';
17
+ import ThinkingSlider from './ThinkingSlider';
17
18
 
18
19
  const ControlsForm = memo(() => {
19
20
  const { t } = useTranslation('chat');
@@ -105,6 +106,16 @@ const ControlsForm = memo(() => {
105
106
  },
106
107
  tag: 'thinkingBudget',
107
108
  },
109
+ {
110
+ children: <ThinkingSlider />,
111
+ label: t('extendParams.thinking.title'),
112
+ layout: 'horizontal',
113
+ minWidth: undefined,
114
+ name: 'thinking',
115
+ style: {
116
+ paddingBottom: 0,
117
+ },
118
+ },
108
119
  ].filter(Boolean) as FormItemProps[];
109
120
 
110
121
  return (
@@ -0,0 +1,57 @@
1
+ import { Slider } from 'antd';
2
+ import { memo, useCallback } from 'react';
3
+ import { Flexbox } from 'react-layout-kit';
4
+
5
+ import { useAgentStore } from '@/store/agent';
6
+ import { agentChatConfigSelectors } from '@/store/agent/selectors';
7
+
8
+ const ThinkingSlider = memo(() => {
9
+ const [config, updateAgentChatConfig] = useAgentStore((s) => [
10
+ agentChatConfigSelectors.currentChatConfig(s),
11
+ s.updateAgentChatConfig,
12
+ ]);
13
+
14
+ const thinking = config.thinking || 'auto'; // Default to 'auto' if not set
15
+
16
+ const marks = {
17
+ 0: 'OFF',
18
+ 1: 'Auto',
19
+ 2: 'ON',
20
+ };
21
+
22
+ const thinkingValues = ['disabled', 'auto', 'enabled'];
23
+ const indexValue = thinkingValues.indexOf(thinking);
24
+ const currentValue = indexValue === -1 ? 1 : indexValue;
25
+
26
+ const updateThinking = useCallback(
27
+ (value: number) => {
28
+ const thinkingMode = thinkingValues[value] as 'disabled' | 'auto' | 'enabled';
29
+ updateAgentChatConfig({ thinking: thinkingMode });
30
+ },
31
+ [updateAgentChatConfig],
32
+ );
33
+
34
+ return (
35
+ <Flexbox
36
+ align={'center'}
37
+ gap={12}
38
+ horizontal
39
+ paddingInline={'0 20px'}
40
+ style={{ minWidth: 200, width: '100%' }}
41
+ >
42
+ <Flexbox flex={1}>
43
+ <Slider
44
+ marks={marks}
45
+ max={2}
46
+ min={0}
47
+ onChange={updateThinking}
48
+ step={1}
49
+ tooltip={{ open: false }}
50
+ value={currentValue}
51
+ />
52
+ </Flexbox>
53
+ </Flexbox>
54
+ );
55
+ });
56
+
57
+ export default ThinkingSlider;
@@ -3,7 +3,6 @@ import {
3
3
  InvokeModelCommand,
4
4
  InvokeModelWithResponseStreamCommand,
5
5
  } from '@aws-sdk/client-bedrock-runtime';
6
- import { experimental_buildLlama2Prompt } from 'ai/prompts';
7
6
 
8
7
  import { LobeRuntimeAI } from '../BaseAI';
9
8
  import { AgentRuntimeErrorType } from '../error';
@@ -25,6 +24,38 @@ import {
25
24
  createBedrockStream,
26
25
  } from '../utils/streams';
27
26
 
27
+ /**
28
+ * A prompt constructor for HuggingFace LLama 2 chat models.
29
+ * Does not support `function` messages.
30
+ * @see https://huggingface.co/meta-llama/Llama-2-70b-chat-hf and https://huggingface.co/blog/llama2#how-to-prompt-llama-2
31
+ */
32
+ export function experimental_buildLlama2Prompt(messages: { content: string; role: string }[]) {
33
+ const startPrompt = `<s>[INST] `;
34
+ const endPrompt = ` [/INST]`;
35
+ const conversation = messages.map(({ content, role }, index) => {
36
+ switch (role) {
37
+ case 'user': {
38
+ return content.trim();
39
+ }
40
+ case 'assistant': {
41
+ return ` [/INST] ${content}</s><s>[INST] `;
42
+ }
43
+ case 'function': {
44
+ throw new Error('Llama 2 does not support function calls.');
45
+ }
46
+ default: {
47
+ if (role === 'system' && index === 0) {
48
+ return `<<SYS>>\n${content}\n<</SYS>>\n\n`;
49
+ } else {
50
+ throw new Error(`Invalid message role: ${role}`);
51
+ }
52
+ }
53
+ }
54
+ });
55
+
56
+ return startPrompt + conversation.join('') + endPrompt;
57
+ }
58
+
28
59
  export interface LobeBedrockAIParams {
29
60
  accessKeyId?: string;
30
61
  accessKeySecret?: string;
@@ -2,7 +2,8 @@ import {
2
2
  InvokeModelWithResponseStreamResponse,
3
3
  ResponseStream,
4
4
  } from '@aws-sdk/client-bedrock-runtime';
5
- import { readableFromAsyncIterable } from 'ai';
5
+
6
+ import { readableFromAsyncIterable } from '../protocol';
6
7
 
7
8
  const chatStreamable = async function* (stream: AsyncIterable<ResponseStream>) {
8
9
  for await (const response of stream) {
@@ -108,6 +108,22 @@ const chatStreamable = async function* <T>(stream: AsyncIterable<T>) {
108
108
  };
109
109
 
110
110
  const ERROR_CHUNK_PREFIX = '%FIRST_CHUNK_ERROR%: ';
111
+
112
+ export function readableFromAsyncIterable<T>(iterable: AsyncIterable<T>) {
113
+ let it = iterable[Symbol.asyncIterator]();
114
+ return new ReadableStream<T>({
115
+ async cancel(reason) {
116
+ await it.return?.(reason);
117
+ },
118
+
119
+ async pull(controller) {
120
+ const { done, value } = await it.next();
121
+ if (done) controller.close();
122
+ else controller.enqueue(value);
123
+ },
124
+ });
125
+ }
126
+
111
127
  // make the response to the streamable format
112
128
  export const convertIterableToStream = <T>(stream: AsyncIterable<T>) => {
113
129
  const iterable = chatStreamable(stream);
@@ -1,6 +1,8 @@
1
- import { ChatCompletionContentPartText } from 'ai/prompts';
2
1
  import OpenAI from 'openai';
3
- import { ChatCompletionContentPart } from 'openai/resources/index.mjs';
2
+ import {
3
+ ChatCompletionContentPart,
4
+ ChatCompletionContentPartText,
5
+ } from 'openai/resources/index.mjs';
4
6
  import type { Stream } from 'openai/streaming';
5
7
 
6
8
  import { ChatStreamCallbacks } from '../../types';
@@ -2,6 +2,13 @@ import { ModelProvider } from '../types';
2
2
  import { createOpenAICompatibleRuntime } from '../utils/openaiCompatibleFactory';
3
3
  import { MODEL_LIST_CONFIGS, processModelList } from '../utils/modelParse';
4
4
 
5
+ const THINKING_MODELS = [
6
+ 'thinking-vision-pro',
7
+ 'thinking-pro-m',
8
+ 'doubao-seed-1-6',
9
+ 'doubao-1-5-ui-tars'
10
+ ];
11
+
5
12
  export interface VolcengineModelCard {
6
13
  id: string;
7
14
  }
@@ -15,12 +22,9 @@ export const LobeVolcengineAI = createOpenAICompatibleRuntime({
15
22
  return {
16
23
  ...rest,
17
24
  model,
18
- ...(['thinking-vision-pro'].some((keyword) => model.toLowerCase().includes(keyword))
25
+ ...(THINKING_MODELS.some((keyword) => model.toLowerCase().includes(keyword))
19
26
  ? {
20
- thinking:
21
- thinking !== undefined && thinking.type === 'enabled'
22
- ? { type: 'enabled' }
23
- : { type: 'disabled' },
27
+ thinking: { type: thinking?.type }
24
28
  }
25
29
  : {}),
26
30
  } as any;
@@ -9,9 +9,12 @@ export interface XAIModelCard {
9
9
 
10
10
  export const GrokReasoningModels = new Set([
11
11
  'grok-3-mini',
12
- 'grok-4-0709',
12
+ 'grok-4',
13
13
  ]);
14
14
 
15
+ export const isGrokReasoningModel = (model: string) =>
16
+ Array.from(GrokReasoningModels).some((id) => model.includes(id));
17
+
15
18
  export const LobeXAI = createOpenAICompatibleRuntime({
16
19
  baseURL: 'https://api.x.ai/v1',
17
20
  chatCompletion: {
@@ -20,9 +23,9 @@ export const LobeXAI = createOpenAICompatibleRuntime({
20
23
 
21
24
  return {
22
25
  ...rest,
23
- frequency_penalty: GrokReasoningModels.has(model) ? undefined : frequency_penalty,
26
+ frequency_penalty: isGrokReasoningModel(model) ? undefined : frequency_penalty,
24
27
  model,
25
- presence_penalty: GrokReasoningModels.has(model) ? undefined : presence_penalty,
28
+ presence_penalty: isGrokReasoningModel(model) ? undefined : presence_penalty,
26
29
  stream: true,
27
30
  ...(enabledSearch && {
28
31
  search_parameters: {
@@ -47,6 +47,9 @@ export default {
47
47
  reasoningEffort: {
48
48
  title: '推理强度',
49
49
  },
50
+ thinking: {
51
+ title: '深度思考开关',
52
+ },
50
53
  title: '模型扩展功能',
51
54
  },
52
55
  history: {
@@ -264,6 +264,10 @@ class ChatService {
264
264
  extendParams.reasoning_effort = chatConfig.reasoningEffort;
265
265
  }
266
266
 
267
+ if (modelExtendParams!.includes('thinking') && chatConfig.thinking) {
268
+ extendParams.thinking = { type: chatConfig.thinking };
269
+ }
270
+
267
271
  if (
268
272
  modelExtendParams!.includes('thinkingBudget') &&
269
273
  chatConfig.thinkingBudget !== undefined
@@ -26,6 +26,7 @@ export interface LobeAgentChatConfig {
26
26
  enableReasoningEffort?: boolean;
27
27
  reasoningBudgetToken?: number;
28
28
  reasoningEffort?: 'low' | 'medium' | 'high';
29
+ thinking?: 'disabled' | 'auto' | 'enabled';
29
30
  thinkingBudget?: number;
30
31
  /**
31
32
  * 禁用上下文缓存
@@ -121,6 +121,7 @@ export interface AIBaseModelCard {
121
121
  * whether model is legacy (deprecated but not removed yet)
122
122
  */
123
123
  legacy?: boolean;
124
+ maxOutput?: number;
124
125
  /**
125
126
  * who create this model
126
127
  */
@@ -148,6 +149,7 @@ export type ExtendParamsType =
148
149
  | 'enableReasoning'
149
150
  | 'disableContextCaching'
150
151
  | 'reasoningEffort'
152
+ | 'thinking'
151
153
  | 'thinkingBudget';
152
154
 
153
155
  export interface AiModelSettings {
@@ -207,6 +209,7 @@ export interface AITTSModelCard extends AIBaseModelCard {
207
209
  * the input pricing, e.g. $1 / 1M tokens
208
210
  */
209
211
  input?: number;
212
+ output?: number;
210
213
  };
211
214
  type: 'tts';
212
215
  }
@@ -222,6 +225,7 @@ export interface AISTTModelCard extends AIBaseModelCard {
222
225
  * the input pricing, e.g. $1 / 1M tokens
223
226
  */
224
227
  input?: number;
228
+ output?: number;
225
229
  };
226
230
  type: 'stt';
227
231
  }