@lobehub/chat 1.47.7 → 1.47.8
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md
CHANGED
@@ -2,6 +2,31 @@
|
|
2
2
|
|
3
3
|
# Changelog
|
4
4
|
|
5
|
+
### [Version 1.47.8](https://github.com/lobehub/lobe-chat/compare/v1.47.7...v1.47.8)
|
6
|
+
|
7
|
+
<sup>Released on **2025-01-20**</sup>
|
8
|
+
|
9
|
+
#### 💄 Styles
|
10
|
+
|
11
|
+
- **misc**: Add deepseek r1 model.
|
12
|
+
|
13
|
+
<br/>
|
14
|
+
|
15
|
+
<details>
|
16
|
+
<summary><kbd>Improvements and Fixes</kbd></summary>
|
17
|
+
|
18
|
+
#### Styles
|
19
|
+
|
20
|
+
- **misc**: Add deepseek r1 model, closes [#5520](https://github.com/lobehub/lobe-chat/issues/5520) ([414477f](https://github.com/lobehub/lobe-chat/commit/414477f))
|
21
|
+
|
22
|
+
</details>
|
23
|
+
|
24
|
+
<div align="right">
|
25
|
+
|
26
|
+
[](#readme-top)
|
27
|
+
|
28
|
+
</div>
|
29
|
+
|
5
30
|
### [Version 1.47.7](https://github.com/lobehub/lobe-chat/compare/v1.47.6...v1.47.7)
|
6
31
|
|
7
32
|
<sup>Released on **2025-01-20**</sup>
|
package/changelog/v1.json
CHANGED
package/package.json
CHANGED
@@ -1,6 +1,6 @@
|
|
1
1
|
{
|
2
2
|
"name": "@lobehub/chat",
|
3
|
-
"version": "1.47.
|
3
|
+
"version": "1.47.8",
|
4
4
|
"description": "Lobe Chat - an open-source, high-performance chatbot framework that supports speech synthesis, multimodal, and extensible Function Call plugin system. Supports one-click free deployment of your private ChatGPT/LLM web application.",
|
5
5
|
"keywords": [
|
6
6
|
"framework",
|
@@ -20,6 +20,22 @@ const deepseekChatModels: AIChatModelCard[] = [
|
|
20
20
|
releasedAt: '2024-12-26',
|
21
21
|
type: 'chat',
|
22
22
|
},
|
23
|
+
{
|
24
|
+
contextWindowTokens: 65_536,
|
25
|
+
description:
|
26
|
+
'DeepSeek 推出的推理模型。在输出最终回答之前,模型会先输出一段思维链内容,以提升最终答案的准确性。',
|
27
|
+
displayName: 'DeepSeek R1',
|
28
|
+
enabled: true,
|
29
|
+
id: 'deepseek-reasoner',
|
30
|
+
pricing: {
|
31
|
+
cachedInput: 1,
|
32
|
+
currency: 'CNY',
|
33
|
+
input: 4,
|
34
|
+
output: 16,
|
35
|
+
},
|
36
|
+
releasedAt: '2025-01-20',
|
37
|
+
type: 'chat',
|
38
|
+
},
|
23
39
|
];
|
24
40
|
|
25
41
|
export const allModels = [...deepseekChatModels];
|
@@ -11,14 +11,29 @@ const DeepSeek: ModelProviderCard = {
|
|
11
11
|
enabled: true,
|
12
12
|
functionCall: true,
|
13
13
|
id: 'deepseek-chat',
|
14
|
-
pricing: {
|
15
|
-
cachedInput: 0.
|
14
|
+
pricing: { // 2025.2.9 之后涨价
|
15
|
+
cachedInput: 0.1,
|
16
16
|
currency: 'CNY',
|
17
|
-
input:
|
18
|
-
output:
|
17
|
+
input: 1,
|
18
|
+
output: 2,
|
19
19
|
},
|
20
20
|
releasedAt: '2024-12-26',
|
21
21
|
},
|
22
|
+
{
|
23
|
+
contextWindowTokens: 65_536,
|
24
|
+
description:
|
25
|
+
'DeepSeek 推出的推理模型。在输出最终回答之前,模型会先输出一段思维链内容,以提升最终答案的准确性。',
|
26
|
+
displayName: 'DeepSeek R1',
|
27
|
+
enabled: true,
|
28
|
+
id: 'deepseek-reasoner',
|
29
|
+
pricing: {
|
30
|
+
cachedInput: 1,
|
31
|
+
currency: 'CNY',
|
32
|
+
input: 4,
|
33
|
+
output: 16,
|
34
|
+
},
|
35
|
+
releasedAt: '2025-01-20',
|
36
|
+
},
|
22
37
|
],
|
23
38
|
checkModel: 'deepseek-chat',
|
24
39
|
description:
|
@@ -1,8 +1,30 @@
|
|
1
|
-
import
|
1
|
+
import OpenAI from 'openai';
|
2
|
+
|
3
|
+
import { ChatStreamPayload, ModelProvider } from '../types';
|
2
4
|
import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
|
3
5
|
|
4
6
|
export const LobeDeepSeekAI = LobeOpenAICompatibleFactory({
|
5
7
|
baseURL: 'https://api.deepseek.com/v1',
|
8
|
+
chatCompletion: {
|
9
|
+
handlePayload: ({ frequency_penalty, model, presence_penalty, temperature, top_p, ...payload }: ChatStreamPayload) =>
|
10
|
+
({
|
11
|
+
...payload,
|
12
|
+
model,
|
13
|
+
...(model === 'deepseek-reasoner'
|
14
|
+
? {
|
15
|
+
frequency_penalty: undefined,
|
16
|
+
presence_penalty: undefined,
|
17
|
+
temperature: undefined,
|
18
|
+
top_p: undefined,
|
19
|
+
}
|
20
|
+
: {
|
21
|
+
frequency_penalty,
|
22
|
+
presence_penalty,
|
23
|
+
temperature,
|
24
|
+
top_p,
|
25
|
+
}),
|
26
|
+
}) as OpenAI.ChatCompletionCreateParamsStreaming,
|
27
|
+
},
|
6
28
|
debug: {
|
7
29
|
chatCompletion: () => process.env.DEBUG_DEEPSEEK_CHAT_COMPLETION === '1',
|
8
30
|
},
|