@lobehub/chat 1.94.15 → 1.94.16
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +25 -0
- package/README.md +8 -8
- package/changelog/v1.json +9 -0
- package/package.json +1 -1
- package/src/app/[variants]/layout.tsx +7 -0
- package/src/config/aiModels/google.ts +5 -5
- package/src/config/aiModels/groq.ts +9 -41
- package/src/config/aiModels/xai.ts +4 -0
- package/src/libs/model-runtime/openai/index.ts +2 -1
- package/src/libs/model-runtime/utils/openaiCompatibleFactory/index.ts +10 -6
package/CHANGELOG.md
CHANGED
@@ -2,6 +2,31 @@
|
|
2
2
|
|
3
3
|
# Changelog
|
4
4
|
|
5
|
+
### [Version 1.94.16](https://github.com/lobehub/lobe-chat/compare/v1.94.15...v1.94.16)
|
6
|
+
|
7
|
+
<sup>Released on **2025-06-19**</sup>
|
8
|
+
|
9
|
+
#### 🐛 Bug Fixes
|
10
|
+
|
11
|
+
- **misc**: Correctly pass `reasoning.summary`.
|
12
|
+
|
13
|
+
<br/>
|
14
|
+
|
15
|
+
<details>
|
16
|
+
<summary><kbd>Improvements and Fixes</kbd></summary>
|
17
|
+
|
18
|
+
#### What's fixed
|
19
|
+
|
20
|
+
- **misc**: Correctly pass `reasoning.summary`, closes [#8221](https://github.com/lobehub/lobe-chat/issues/8221) ([da79815](https://github.com/lobehub/lobe-chat/commit/da79815))
|
21
|
+
|
22
|
+
</details>
|
23
|
+
|
24
|
+
<div align="right">
|
25
|
+
|
26
|
+
[](#readme-top)
|
27
|
+
|
28
|
+
</div>
|
29
|
+
|
5
30
|
### [Version 1.94.15](https://github.com/lobehub/lobe-chat/compare/v1.94.14...v1.94.15)
|
6
31
|
|
7
32
|
<sup>Released on **2025-06-19**</sup>
|
package/README.md
CHANGED
@@ -367,14 +367,14 @@ Our marketplace is not just a showcase platform but also a collaborative space.
|
|
367
367
|
|
368
368
|
<!-- AGENT LIST -->
|
369
369
|
|
370
|
-
| Recent Submits
|
371
|
-
|
|
372
|
-
| [
|
373
|
-
| [
|
374
|
-
| [
|
375
|
-
| [
|
376
|
-
|
377
|
-
> 📊 Total agents: [<kbd>**
|
370
|
+
| Recent Submits | Description |
|
371
|
+
| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
|
372
|
+
| [Turtle Soup Host](https://lobechat.com/discover/assistant/lateral-thinking-puzzle)<br/><sup>By **[CSY2022](https://github.com/CSY2022)** on **2025-06-19**</sup> | A turtle soup host needs to provide the scenario, the complete story (truth of the event), and the key point (the condition for guessing correctly).<br/>`turtle-soup` `reasoning` `interaction` `puzzle` `role-playing` |
|
373
|
+
| [Gourmet Reviewer🍟](https://lobechat.com/discover/assistant/food-reviewer)<br/><sup>By **[renhai-lab](https://github.com/renhai-lab)** on **2025-06-17**</sup> | Food critique expert<br/>`gourmet` `review` `writing` |
|
374
|
+
| [Academic Writing Assistant](https://lobechat.com/discover/assistant/academic-writing-assistant)<br/><sup>By **[swarfte](https://github.com/swarfte)** on **2025-06-17**</sup> | Expert in academic research paper writing and formal documentation<br/>`academic-writing` `research` `formal-style` |
|
375
|
+
| [Minecraft Senior Developer](https://lobechat.com/discover/assistant/java-development)<br/><sup>By **[iamyuuk](https://github.com/iamyuuk)** on **2025-06-17**</sup> | Expert in advanced Java development and Minecraft mod and server plugin development<br/>`development` `programming` `minecraft` `java` |
|
376
|
+
|
377
|
+
> 📊 Total agents: [<kbd>**505**</kbd> ](https://lobechat.com/discover/assistants)
|
378
378
|
|
379
379
|
<!-- AGENT LIST -->
|
380
380
|
|
package/changelog/v1.json
CHANGED
package/package.json
CHANGED
@@ -1,6 +1,6 @@
|
|
1
1
|
{
|
2
2
|
"name": "@lobehub/chat",
|
3
|
-
"version": "1.94.
|
3
|
+
"version": "1.94.16",
|
4
4
|
"description": "Lobe Chat - an open-source, high-performance chatbot framework that supports speech synthesis, multimodal, and extensible Function Call plugin system. Supports one-click free deployment of your private ChatGPT/LLM web application.",
|
5
5
|
"keywords": [
|
6
6
|
"framework",
|
@@ -78,6 +78,13 @@ export const generateViewport = async (props: DynamicLayoutProps): ResolvingView
|
|
78
78
|
};
|
79
79
|
|
80
80
|
export const generateStaticParams = () => {
|
81
|
+
// if in dev mode or in vercel preview mode, use ISR to speed up
|
82
|
+
const isVercelPreview = process.env.VERCEL === '1' && process.env.VERCEL_ENV !== 'production';
|
83
|
+
|
84
|
+
if (process.env.NODE_ENV !== 'production' || isVercelPreview) {
|
85
|
+
return [];
|
86
|
+
}
|
87
|
+
|
81
88
|
const themes: ThemeAppearance[] = ['dark', 'light'];
|
82
89
|
const mobileOptions = isDesktop ? [false] : [true, false];
|
83
90
|
// only static for serveral page, other go to dynamtic
|
@@ -113,6 +113,7 @@ const googleChatModels: AIChatModelCard[] = [
|
|
113
113
|
id: 'gemini-2.5-flash',
|
114
114
|
maxOutput: 65_536,
|
115
115
|
pricing: {
|
116
|
+
cachedInput: 0.075,
|
116
117
|
input: 0.3,
|
117
118
|
output: 2.5,
|
118
119
|
},
|
@@ -134,7 +135,6 @@ const googleChatModels: AIChatModelCard[] = [
|
|
134
135
|
contextWindowTokens: 1_048_576 + 65_536,
|
135
136
|
description: 'Gemini 2.5 Flash Preview 是 Google 性价比最高的模型,提供全面的功能。',
|
136
137
|
displayName: 'Gemini 2.5 Flash Preview 05-20',
|
137
|
-
enabled: true,
|
138
138
|
id: 'gemini-2.5-flash-preview-05-20',
|
139
139
|
maxOutput: 65_536,
|
140
140
|
pricing: {
|
@@ -202,18 +202,18 @@ const googleChatModels: AIChatModelCard[] = [
|
|
202
202
|
search: true,
|
203
203
|
vision: true,
|
204
204
|
},
|
205
|
-
contextWindowTokens:
|
205
|
+
contextWindowTokens: 65_536 + 65_536,
|
206
206
|
description:
|
207
207
|
'Gemini 2.5 Flash-Lite Preview 是 Google 最小、性价比最高的模型,专为大规模使用而设计。',
|
208
208
|
displayName: 'Gemini 2.5 Flash-Lite Preview 06-17',
|
209
|
-
enabled: true,
|
210
209
|
id: 'gemini-2.5-flash-lite-preview-06-17',
|
211
|
-
maxOutput:
|
210
|
+
maxOutput: 65_536,
|
212
211
|
pricing: {
|
212
|
+
cachedInput: 0.025,
|
213
213
|
input: 0.1,
|
214
214
|
output: 0.4,
|
215
215
|
},
|
216
|
-
releasedAt: '2025-06-
|
216
|
+
releasedAt: '2025-06-11',
|
217
217
|
settings: {
|
218
218
|
extendParams: ['enableReasoning', 'reasoningBudgetToken'],
|
219
219
|
searchImpl: 'params',
|
@@ -56,6 +56,7 @@ const groqChatModels: AIChatModelCard[] = [
|
|
56
56
|
displayName: 'Qwen QwQ 32B',
|
57
57
|
enabled: true,
|
58
58
|
id: 'qwen-qwq-32b',
|
59
|
+
maxOutput: 131_072,
|
59
60
|
pricing: {
|
60
61
|
input: 0.29,
|
61
62
|
output: 0.39,
|
@@ -69,7 +70,7 @@ const groqChatModels: AIChatModelCard[] = [
|
|
69
70
|
contextWindowTokens: 131_072,
|
70
71
|
displayName: 'Qwen3 32B',
|
71
72
|
id: 'qwen/qwen3-32b',
|
72
|
-
maxOutput:
|
73
|
+
maxOutput: 40_960,
|
73
74
|
pricing: {
|
74
75
|
input: 0.29,
|
75
76
|
output: 0.59,
|
@@ -84,6 +85,7 @@ const groqChatModels: AIChatModelCard[] = [
|
|
84
85
|
contextWindowTokens: 131_072,
|
85
86
|
displayName: 'DeepSeek R1 Distill Llama 70B',
|
86
87
|
id: 'deepseek-r1-distill-llama-70b',
|
88
|
+
maxOutput: 131_072,
|
87
89
|
pricing: {
|
88
90
|
input: 0.75, // 0.75 - 5.00
|
89
91
|
output: 0.99, // 0.99 - 5.00
|
@@ -98,6 +100,7 @@ const groqChatModels: AIChatModelCard[] = [
|
|
98
100
|
description: 'Gemma 2 9B 是一款优化用于特定任务和工具整合的模型。',
|
99
101
|
displayName: 'Gemma 2 9B',
|
100
102
|
id: 'gemma2-9b-it',
|
103
|
+
maxOutput: 8192,
|
101
104
|
pricing: {
|
102
105
|
input: 0.2,
|
103
106
|
output: 0.2,
|
@@ -113,7 +116,7 @@ const groqChatModels: AIChatModelCard[] = [
|
|
113
116
|
'Llama 3.1 8B 是一款高效能模型,提供了快速的文本生成能力,非常适合需要大规模效率和成本效益的应用场景。',
|
114
117
|
displayName: 'Llama 3.1 8B Instant',
|
115
118
|
id: 'llama-3.1-8b-instant',
|
116
|
-
maxOutput:
|
119
|
+
maxOutput: 131_072,
|
117
120
|
pricing: {
|
118
121
|
input: 0.05,
|
119
122
|
output: 0.08,
|
@@ -136,32 +139,11 @@ const groqChatModels: AIChatModelCard[] = [
|
|
136
139
|
},
|
137
140
|
type: 'chat',
|
138
141
|
},
|
139
|
-
{
|
140
|
-
contextWindowTokens: 8192,
|
141
|
-
description: 'Meta Llama 3 70B 提供无与伦比的复杂性处理能力,为高要求项目量身定制。',
|
142
|
-
displayName: 'Llama 3 70B',
|
143
|
-
id: 'llama3-70b-8192',
|
144
|
-
pricing: {
|
145
|
-
input: 0.59,
|
146
|
-
output: 0.79,
|
147
|
-
},
|
148
|
-
type: 'chat',
|
149
|
-
},
|
150
|
-
{
|
151
|
-
contextWindowTokens: 8192,
|
152
|
-
description: 'Meta Llama 3 8B 带来优质的推理效能,适合多场景应用需求。',
|
153
|
-
displayName: 'Llama 3 8B',
|
154
|
-
id: 'llama3-8b-8192',
|
155
|
-
pricing: {
|
156
|
-
input: 0.05,
|
157
|
-
output: 0.08,
|
158
|
-
},
|
159
|
-
type: 'chat',
|
160
|
-
},
|
161
142
|
{
|
162
143
|
contextWindowTokens: 32_768,
|
163
144
|
displayName: 'Mistral Saba 24B',
|
164
145
|
id: 'mistral-saba-24b',
|
146
|
+
maxOutput: 32_768,
|
165
147
|
pricing: {
|
166
148
|
input: 0.79,
|
167
149
|
output: 0.79,
|
@@ -172,39 +154,25 @@ const groqChatModels: AIChatModelCard[] = [
|
|
172
154
|
contextWindowTokens: 131_072,
|
173
155
|
displayName: 'Llama Guard 4 12B',
|
174
156
|
id: 'meta-llama/llama-guard-4-12b',
|
175
|
-
maxOutput:
|
157
|
+
maxOutput: 1024,
|
176
158
|
pricing: {
|
177
159
|
input: 0.2,
|
178
160
|
output: 0.2,
|
179
161
|
},
|
180
162
|
type: 'chat',
|
181
163
|
},
|
182
|
-
{
|
183
|
-
contextWindowTokens: 8192,
|
184
|
-
displayName: 'Llama Guard 3 8B',
|
185
|
-
id: 'llama-guard-3-8b',
|
186
|
-
pricing: {
|
187
|
-
input: 0.2,
|
188
|
-
output: 0.2,
|
189
|
-
},
|
190
|
-
type: 'chat',
|
191
|
-
},
|
192
|
-
{
|
193
|
-
contextWindowTokens: 4096,
|
194
|
-
displayName: 'ALLaM 2 7B',
|
195
|
-
id: 'allam-2-7b',
|
196
|
-
type: 'chat',
|
197
|
-
},
|
198
164
|
{
|
199
165
|
contextWindowTokens: 512,
|
200
166
|
displayName: 'Llama Prompt Guard 2 22M',
|
201
167
|
id: 'meta-llama/llama-prompt-guard-2-22m',
|
168
|
+
maxOutput: 512,
|
202
169
|
type: 'chat',
|
203
170
|
},
|
204
171
|
{
|
205
172
|
contextWindowTokens: 512,
|
206
173
|
displayName: 'Llama Prompt Guard 2 86M',
|
207
174
|
id: 'meta-llama/llama-prompt-guard-2-86m',
|
175
|
+
maxOutput: 512,
|
208
176
|
type: 'chat',
|
209
177
|
},
|
210
178
|
];
|
@@ -14,6 +14,7 @@ const xaiChatModels: AIChatModelCard[] = [
|
|
14
14
|
enabled: true,
|
15
15
|
id: 'grok-3',
|
16
16
|
pricing: {
|
17
|
+
cachedInput: 0.75,
|
17
18
|
input: 3,
|
18
19
|
output: 15,
|
19
20
|
},
|
@@ -34,6 +35,7 @@ const xaiChatModels: AIChatModelCard[] = [
|
|
34
35
|
displayName: 'Grok 3 (Fast mode)',
|
35
36
|
id: 'grok-3-fast',
|
36
37
|
pricing: {
|
38
|
+
cachedInput: 1.25,
|
37
39
|
input: 5,
|
38
40
|
output: 25,
|
39
41
|
},
|
@@ -56,6 +58,7 @@ const xaiChatModels: AIChatModelCard[] = [
|
|
56
58
|
enabled: true,
|
57
59
|
id: 'grok-3-mini',
|
58
60
|
pricing: {
|
61
|
+
cachedInput: 0.075,
|
59
62
|
input: 0.3,
|
60
63
|
output: 0.5,
|
61
64
|
},
|
@@ -78,6 +81,7 @@ const xaiChatModels: AIChatModelCard[] = [
|
|
78
81
|
displayName: 'Grok 3 Mini (Fast mode)',
|
79
82
|
id: 'grok-3-mini-fast',
|
80
83
|
pricing: {
|
84
|
+
cachedInput: 0.15,
|
81
85
|
input: 0.6,
|
82
86
|
output: 4,
|
83
87
|
},
|
@@ -1,8 +1,9 @@
|
|
1
|
+
import { responsesAPIModels } from '@/const/models';
|
2
|
+
|
1
3
|
import { ChatStreamPayload, ModelProvider } from '../types';
|
2
4
|
import { processMultiProviderModelList } from '../utils/modelParse';
|
3
5
|
import { createOpenAICompatibleRuntime } from '../utils/openaiCompatibleFactory';
|
4
6
|
import { pruneReasoningPayload } from '../utils/openaiHelpers';
|
5
|
-
import { responsesAPIModels } from '@/const/models';
|
6
7
|
|
7
8
|
export interface OpenAIModelCard {
|
8
9
|
id: string;
|
@@ -208,10 +208,7 @@ export const createOpenAICompatibleRuntime = <T extends Record<string, any> = an
|
|
208
208
|
this.id = options.id || provider;
|
209
209
|
}
|
210
210
|
|
211
|
-
async chat(
|
212
|
-
{ responseMode, ...payload }: ChatStreamPayload,
|
213
|
-
options?: ChatMethodOptions,
|
214
|
-
) {
|
211
|
+
async chat({ responseMode, ...payload }: ChatStreamPayload, options?: ChatMethodOptions) {
|
215
212
|
try {
|
216
213
|
const inputStartAt = Date.now();
|
217
214
|
const postPayload = chatCompletion?.handlePayload
|
@@ -478,7 +475,7 @@ export const createOpenAICompatibleRuntime = <T extends Record<string, any> = an
|
|
478
475
|
): Promise<Response> {
|
479
476
|
const inputStartAt = Date.now();
|
480
477
|
|
481
|
-
const { messages, reasoning_effort, tools, ...res } = responses?.handlePayload
|
478
|
+
const { messages, reasoning_effort, tools, reasoning, ...res } = responses?.handlePayload
|
482
479
|
? (responses?.handlePayload(payload, this._options) as ChatStreamPayload)
|
483
480
|
: payload;
|
484
481
|
|
@@ -491,7 +488,14 @@ export const createOpenAICompatibleRuntime = <T extends Record<string, any> = an
|
|
491
488
|
|
492
489
|
const postPayload = {
|
493
490
|
...res,
|
494
|
-
...(
|
491
|
+
...(reasoning || reasoning_effort
|
492
|
+
? {
|
493
|
+
reasoning: {
|
494
|
+
...reasoning,
|
495
|
+
...(reasoning_effort && { effort: reasoning_effort }),
|
496
|
+
},
|
497
|
+
}
|
498
|
+
: {}),
|
495
499
|
input,
|
496
500
|
store: false,
|
497
501
|
tools: tools?.map((tool) => this.convertChatCompletionToolToResponseTool(tool)),
|