@lobehub/chat 1.97.11 → 1.97.13
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +51 -0
- package/changelog/v1.json +18 -0
- package/locales/ar/chat.json +3 -0
- package/locales/ar/models.json +8 -8
- package/locales/bg-BG/chat.json +3 -0
- package/locales/bg-BG/models.json +6 -6
- package/locales/de-DE/chat.json +3 -0
- package/locales/de-DE/models.json +4 -4
- package/locales/en-US/chat.json +3 -0
- package/locales/en-US/models.json +4 -4
- package/locales/es-ES/chat.json +3 -0
- package/locales/es-ES/models.json +5 -5
- package/locales/fa-IR/chat.json +3 -0
- package/locales/fa-IR/models.json +6 -6
- package/locales/fr-FR/chat.json +3 -0
- package/locales/fr-FR/models.json +3 -3
- package/locales/it-IT/chat.json +3 -0
- package/locales/it-IT/models.json +3 -3
- package/locales/ja-JP/chat.json +3 -0
- package/locales/ja-JP/models.json +6 -6
- package/locales/ko-KR/chat.json +3 -0
- package/locales/ko-KR/models.json +7 -7
- package/locales/nl-NL/chat.json +3 -0
- package/locales/nl-NL/models.json +4 -4
- package/locales/pl-PL/chat.json +3 -0
- package/locales/pl-PL/models.json +6 -6
- package/locales/pt-BR/chat.json +3 -0
- package/locales/pt-BR/models.json +2 -20
- package/locales/ru-RU/chat.json +3 -0
- package/locales/ru-RU/models.json +5 -5
- package/locales/tr-TR/chat.json +3 -0
- package/locales/tr-TR/models.json +7 -7
- package/locales/vi-VN/chat.json +3 -0
- package/locales/vi-VN/models.json +4 -4
- package/locales/zh-CN/chat.json +3 -0
- package/locales/zh-CN/models.json +1 -1
- package/locales/zh-TW/chat.json +3 -0
- package/locales/zh-TW/models.json +1 -1
- package/package.json +1 -5
- package/src/config/aiModels/google.ts +5 -40
- package/src/config/aiModels/openai.ts +50 -41
- package/src/config/aiModels/volcengine.ts +58 -53
- package/src/config/aiModels/xai.ts +1 -1
- package/src/database/migrations/0026_add_autovacuum_tuning.sql +12 -0
- package/src/database/migrations/meta/0026_snapshot.json +5703 -0
- package/src/database/migrations/meta/_journal.json +7 -0
- package/src/features/ChatInput/ActionBar/Model/ControlsForm.tsx +11 -0
- package/src/features/ChatInput/ActionBar/Model/ThinkingSlider.tsx +57 -0
- package/src/libs/model-runtime/bedrock/index.ts +32 -1
- package/src/libs/model-runtime/utils/streams/bedrock/common.ts +2 -1
- package/src/libs/model-runtime/utils/streams/protocol.ts +16 -0
- package/src/libs/model-runtime/utils/streams/qwen.ts +4 -2
- package/src/libs/model-runtime/volcengine/index.ts +9 -5
- package/src/libs/model-runtime/xai/index.ts +6 -3
- package/src/locales/default/chat.ts +3 -0
- package/src/services/chat.ts +4 -0
- package/src/types/agent/chatConfig.ts +1 -0
- package/src/types/aiModel.ts +4 -0
@@ -182,6 +182,13 @@
|
|
182
182
|
"when": 1749309388370,
|
183
183
|
"tag": "0025_add_provider_config",
|
184
184
|
"breakpoints": true
|
185
|
+
},
|
186
|
+
{
|
187
|
+
"idx": 26,
|
188
|
+
"version": "7",
|
189
|
+
"when": 1752212281564,
|
190
|
+
"tag": "0026_add_autovacuum_tuning",
|
191
|
+
"breakpoints": true
|
185
192
|
}
|
186
193
|
],
|
187
194
|
"version": "6"
|
@@ -14,6 +14,7 @@ import ContextCachingSwitch from './ContextCachingSwitch';
|
|
14
14
|
import ReasoningEffortSlider from './ReasoningEffortSlider';
|
15
15
|
import ReasoningTokenSlider from './ReasoningTokenSlider';
|
16
16
|
import ThinkingBudgetSlider from './ThinkingBudgetSlider';
|
17
|
+
import ThinkingSlider from './ThinkingSlider';
|
17
18
|
|
18
19
|
const ControlsForm = memo(() => {
|
19
20
|
const { t } = useTranslation('chat');
|
@@ -105,6 +106,16 @@ const ControlsForm = memo(() => {
|
|
105
106
|
},
|
106
107
|
tag: 'thinkingBudget',
|
107
108
|
},
|
109
|
+
{
|
110
|
+
children: <ThinkingSlider />,
|
111
|
+
label: t('extendParams.thinking.title'),
|
112
|
+
layout: 'horizontal',
|
113
|
+
minWidth: undefined,
|
114
|
+
name: 'thinking',
|
115
|
+
style: {
|
116
|
+
paddingBottom: 0,
|
117
|
+
},
|
118
|
+
},
|
108
119
|
].filter(Boolean) as FormItemProps[];
|
109
120
|
|
110
121
|
return (
|
@@ -0,0 +1,57 @@
|
|
1
|
+
import { Slider } from 'antd';
|
2
|
+
import { memo, useCallback } from 'react';
|
3
|
+
import { Flexbox } from 'react-layout-kit';
|
4
|
+
|
5
|
+
import { useAgentStore } from '@/store/agent';
|
6
|
+
import { agentChatConfigSelectors } from '@/store/agent/selectors';
|
7
|
+
|
8
|
+
const ThinkingSlider = memo(() => {
|
9
|
+
const [config, updateAgentChatConfig] = useAgentStore((s) => [
|
10
|
+
agentChatConfigSelectors.currentChatConfig(s),
|
11
|
+
s.updateAgentChatConfig,
|
12
|
+
]);
|
13
|
+
|
14
|
+
const thinking = config.thinking || 'auto'; // Default to 'auto' if not set
|
15
|
+
|
16
|
+
const marks = {
|
17
|
+
0: 'OFF',
|
18
|
+
1: 'Auto',
|
19
|
+
2: 'ON',
|
20
|
+
};
|
21
|
+
|
22
|
+
const thinkingValues = ['disabled', 'auto', 'enabled'];
|
23
|
+
const indexValue = thinkingValues.indexOf(thinking);
|
24
|
+
const currentValue = indexValue === -1 ? 1 : indexValue;
|
25
|
+
|
26
|
+
const updateThinking = useCallback(
|
27
|
+
(value: number) => {
|
28
|
+
const thinkingMode = thinkingValues[value] as 'disabled' | 'auto' | 'enabled';
|
29
|
+
updateAgentChatConfig({ thinking: thinkingMode });
|
30
|
+
},
|
31
|
+
[updateAgentChatConfig],
|
32
|
+
);
|
33
|
+
|
34
|
+
return (
|
35
|
+
<Flexbox
|
36
|
+
align={'center'}
|
37
|
+
gap={12}
|
38
|
+
horizontal
|
39
|
+
paddingInline={'0 20px'}
|
40
|
+
style={{ minWidth: 200, width: '100%' }}
|
41
|
+
>
|
42
|
+
<Flexbox flex={1}>
|
43
|
+
<Slider
|
44
|
+
marks={marks}
|
45
|
+
max={2}
|
46
|
+
min={0}
|
47
|
+
onChange={updateThinking}
|
48
|
+
step={1}
|
49
|
+
tooltip={{ open: false }}
|
50
|
+
value={currentValue}
|
51
|
+
/>
|
52
|
+
</Flexbox>
|
53
|
+
</Flexbox>
|
54
|
+
);
|
55
|
+
});
|
56
|
+
|
57
|
+
export default ThinkingSlider;
|
@@ -3,7 +3,6 @@ import {
|
|
3
3
|
InvokeModelCommand,
|
4
4
|
InvokeModelWithResponseStreamCommand,
|
5
5
|
} from '@aws-sdk/client-bedrock-runtime';
|
6
|
-
import { experimental_buildLlama2Prompt } from 'ai/prompts';
|
7
6
|
|
8
7
|
import { LobeRuntimeAI } from '../BaseAI';
|
9
8
|
import { AgentRuntimeErrorType } from '../error';
|
@@ -25,6 +24,38 @@ import {
|
|
25
24
|
createBedrockStream,
|
26
25
|
} from '../utils/streams';
|
27
26
|
|
27
|
+
/**
|
28
|
+
* A prompt constructor for HuggingFace LLama 2 chat models.
|
29
|
+
* Does not support `function` messages.
|
30
|
+
* @see https://huggingface.co/meta-llama/Llama-2-70b-chat-hf and https://huggingface.co/blog/llama2#how-to-prompt-llama-2
|
31
|
+
*/
|
32
|
+
export function experimental_buildLlama2Prompt(messages: { content: string; role: string }[]) {
|
33
|
+
const startPrompt = `<s>[INST] `;
|
34
|
+
const endPrompt = ` [/INST]`;
|
35
|
+
const conversation = messages.map(({ content, role }, index) => {
|
36
|
+
switch (role) {
|
37
|
+
case 'user': {
|
38
|
+
return content.trim();
|
39
|
+
}
|
40
|
+
case 'assistant': {
|
41
|
+
return ` [/INST] ${content}</s><s>[INST] `;
|
42
|
+
}
|
43
|
+
case 'function': {
|
44
|
+
throw new Error('Llama 2 does not support function calls.');
|
45
|
+
}
|
46
|
+
default: {
|
47
|
+
if (role === 'system' && index === 0) {
|
48
|
+
return `<<SYS>>\n${content}\n<</SYS>>\n\n`;
|
49
|
+
} else {
|
50
|
+
throw new Error(`Invalid message role: ${role}`);
|
51
|
+
}
|
52
|
+
}
|
53
|
+
}
|
54
|
+
});
|
55
|
+
|
56
|
+
return startPrompt + conversation.join('') + endPrompt;
|
57
|
+
}
|
58
|
+
|
28
59
|
export interface LobeBedrockAIParams {
|
29
60
|
accessKeyId?: string;
|
30
61
|
accessKeySecret?: string;
|
@@ -2,7 +2,8 @@ import {
|
|
2
2
|
InvokeModelWithResponseStreamResponse,
|
3
3
|
ResponseStream,
|
4
4
|
} from '@aws-sdk/client-bedrock-runtime';
|
5
|
-
|
5
|
+
|
6
|
+
import { readableFromAsyncIterable } from '../protocol';
|
6
7
|
|
7
8
|
const chatStreamable = async function* (stream: AsyncIterable<ResponseStream>) {
|
8
9
|
for await (const response of stream) {
|
@@ -108,6 +108,22 @@ const chatStreamable = async function* <T>(stream: AsyncIterable<T>) {
|
|
108
108
|
};
|
109
109
|
|
110
110
|
const ERROR_CHUNK_PREFIX = '%FIRST_CHUNK_ERROR%: ';
|
111
|
+
|
112
|
+
export function readableFromAsyncIterable<T>(iterable: AsyncIterable<T>) {
|
113
|
+
let it = iterable[Symbol.asyncIterator]();
|
114
|
+
return new ReadableStream<T>({
|
115
|
+
async cancel(reason) {
|
116
|
+
await it.return?.(reason);
|
117
|
+
},
|
118
|
+
|
119
|
+
async pull(controller) {
|
120
|
+
const { done, value } = await it.next();
|
121
|
+
if (done) controller.close();
|
122
|
+
else controller.enqueue(value);
|
123
|
+
},
|
124
|
+
});
|
125
|
+
}
|
126
|
+
|
111
127
|
// make the response to the streamable format
|
112
128
|
export const convertIterableToStream = <T>(stream: AsyncIterable<T>) => {
|
113
129
|
const iterable = chatStreamable(stream);
|
@@ -1,6 +1,8 @@
|
|
1
|
-
import { ChatCompletionContentPartText } from 'ai/prompts';
|
2
1
|
import OpenAI from 'openai';
|
3
|
-
import {
|
2
|
+
import {
|
3
|
+
ChatCompletionContentPart,
|
4
|
+
ChatCompletionContentPartText,
|
5
|
+
} from 'openai/resources/index.mjs';
|
4
6
|
import type { Stream } from 'openai/streaming';
|
5
7
|
|
6
8
|
import { ChatStreamCallbacks } from '../../types';
|
@@ -2,6 +2,13 @@ import { ModelProvider } from '../types';
|
|
2
2
|
import { createOpenAICompatibleRuntime } from '../utils/openaiCompatibleFactory';
|
3
3
|
import { MODEL_LIST_CONFIGS, processModelList } from '../utils/modelParse';
|
4
4
|
|
5
|
+
const THINKING_MODELS = [
|
6
|
+
'thinking-vision-pro',
|
7
|
+
'thinking-pro-m',
|
8
|
+
'doubao-seed-1-6',
|
9
|
+
'doubao-1-5-ui-tars'
|
10
|
+
];
|
11
|
+
|
5
12
|
export interface VolcengineModelCard {
|
6
13
|
id: string;
|
7
14
|
}
|
@@ -15,12 +22,9 @@ export const LobeVolcengineAI = createOpenAICompatibleRuntime({
|
|
15
22
|
return {
|
16
23
|
...rest,
|
17
24
|
model,
|
18
|
-
...(
|
25
|
+
...(THINKING_MODELS.some((keyword) => model.toLowerCase().includes(keyword))
|
19
26
|
? {
|
20
|
-
thinking:
|
21
|
-
thinking !== undefined && thinking.type === 'enabled'
|
22
|
-
? { type: 'enabled' }
|
23
|
-
: { type: 'disabled' },
|
27
|
+
thinking: { type: thinking?.type }
|
24
28
|
}
|
25
29
|
: {}),
|
26
30
|
} as any;
|
@@ -9,9 +9,12 @@ export interface XAIModelCard {
|
|
9
9
|
|
10
10
|
export const GrokReasoningModels = new Set([
|
11
11
|
'grok-3-mini',
|
12
|
-
'grok-4
|
12
|
+
'grok-4',
|
13
13
|
]);
|
14
14
|
|
15
|
+
export const isGrokReasoningModel = (model: string) =>
|
16
|
+
Array.from(GrokReasoningModels).some((id) => model.includes(id));
|
17
|
+
|
15
18
|
export const LobeXAI = createOpenAICompatibleRuntime({
|
16
19
|
baseURL: 'https://api.x.ai/v1',
|
17
20
|
chatCompletion: {
|
@@ -20,9 +23,9 @@ export const LobeXAI = createOpenAICompatibleRuntime({
|
|
20
23
|
|
21
24
|
return {
|
22
25
|
...rest,
|
23
|
-
frequency_penalty:
|
26
|
+
frequency_penalty: isGrokReasoningModel(model) ? undefined : frequency_penalty,
|
24
27
|
model,
|
25
|
-
presence_penalty:
|
28
|
+
presence_penalty: isGrokReasoningModel(model) ? undefined : presence_penalty,
|
26
29
|
stream: true,
|
27
30
|
...(enabledSearch && {
|
28
31
|
search_parameters: {
|
package/src/services/chat.ts
CHANGED
@@ -264,6 +264,10 @@ class ChatService {
|
|
264
264
|
extendParams.reasoning_effort = chatConfig.reasoningEffort;
|
265
265
|
}
|
266
266
|
|
267
|
+
if (modelExtendParams!.includes('thinking') && chatConfig.thinking) {
|
268
|
+
extendParams.thinking = { type: chatConfig.thinking };
|
269
|
+
}
|
270
|
+
|
267
271
|
if (
|
268
272
|
modelExtendParams!.includes('thinkingBudget') &&
|
269
273
|
chatConfig.thinkingBudget !== undefined
|
package/src/types/aiModel.ts
CHANGED
@@ -121,6 +121,7 @@ export interface AIBaseModelCard {
|
|
121
121
|
* whether model is legacy (deprecated but not removed yet)
|
122
122
|
*/
|
123
123
|
legacy?: boolean;
|
124
|
+
maxOutput?: number;
|
124
125
|
/**
|
125
126
|
* who create this model
|
126
127
|
*/
|
@@ -148,6 +149,7 @@ export type ExtendParamsType =
|
|
148
149
|
| 'enableReasoning'
|
149
150
|
| 'disableContextCaching'
|
150
151
|
| 'reasoningEffort'
|
152
|
+
| 'thinking'
|
151
153
|
| 'thinkingBudget';
|
152
154
|
|
153
155
|
export interface AiModelSettings {
|
@@ -207,6 +209,7 @@ export interface AITTSModelCard extends AIBaseModelCard {
|
|
207
209
|
* the input pricing, e.g. $1 / 1M tokens
|
208
210
|
*/
|
209
211
|
input?: number;
|
212
|
+
output?: number;
|
210
213
|
};
|
211
214
|
type: 'tts';
|
212
215
|
}
|
@@ -222,6 +225,7 @@ export interface AISTTModelCard extends AIBaseModelCard {
|
|
222
225
|
* the input pricing, e.g. $1 / 1M tokens
|
223
226
|
*/
|
224
227
|
input?: number;
|
228
|
+
output?: number;
|
225
229
|
};
|
226
230
|
type: 'stt';
|
227
231
|
}
|