@lobehub/chat 1.94.5 → 1.94.6
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md
CHANGED
@@ -2,6 +2,31 @@
|
|
2
2
|
|
3
3
|
# Changelog
|
4
4
|
|
5
|
+
### [Version 1.94.6](https://github.com/lobehub/lobe-chat/compare/v1.94.5...v1.94.6)
|
6
|
+
|
7
|
+
<sup>Released on **2025-06-12**</sup>
|
8
|
+
|
9
|
+
#### 🐛 Bug Fixes
|
10
|
+
|
11
|
+
- **misc**: Abort the Gemini request correctly & Add openai o3-pro.
|
12
|
+
|
13
|
+
<br/>
|
14
|
+
|
15
|
+
<details>
|
16
|
+
<summary><kbd>Improvements and Fixes</kbd></summary>
|
17
|
+
|
18
|
+
#### What's fixed
|
19
|
+
|
20
|
+
- **misc**: Abort the Gemini request correctly & Add openai o3-pro, closes [#8135](https://github.com/lobehub/lobe-chat/issues/8135) ([c79f1b9](https://github.com/lobehub/lobe-chat/commit/c79f1b9))
|
21
|
+
|
22
|
+
</details>
|
23
|
+
|
24
|
+
<div align="right">
|
25
|
+
|
26
|
+
[](#readme-top)
|
27
|
+
|
28
|
+
</div>
|
29
|
+
|
5
30
|
### [Version 1.94.5](https://github.com/lobehub/lobe-chat/compare/v1.94.4...v1.94.5)
|
6
31
|
|
7
32
|
<sup>Released on **2025-06-12**</sup>
|
package/changelog/v1.json
CHANGED
package/package.json
CHANGED
@@ -1,6 +1,6 @@
|
|
1
1
|
{
|
2
2
|
"name": "@lobehub/chat",
|
3
|
-
"version": "1.94.
|
3
|
+
"version": "1.94.6",
|
4
4
|
"description": "Lobe Chat - an open-source, high-performance chatbot framework that supports speech synthesis, multimodal, and extensible Function Call plugin system. Supports one-click free deployment of your private ChatGPT/LLM web application.",
|
5
5
|
"keywords": [
|
6
6
|
"framework",
|
@@ -62,6 +62,20 @@ const groqChatModels: AIChatModelCard[] = [
|
|
62
62
|
},
|
63
63
|
type: 'chat',
|
64
64
|
},
|
65
|
+
{
|
66
|
+
abilities: {
|
67
|
+
reasoning: true,
|
68
|
+
},
|
69
|
+
contextWindowTokens: 131_072,
|
70
|
+
displayName: 'Qwen3 32B',
|
71
|
+
id: 'qwen/qwen3-32b',
|
72
|
+
maxOutput: 16_384,
|
73
|
+
pricing: {
|
74
|
+
input: 0.29,
|
75
|
+
output: 0.59,
|
76
|
+
},
|
77
|
+
type: 'chat',
|
78
|
+
},
|
65
79
|
{
|
66
80
|
abilities: {
|
67
81
|
functionCall: true,
|
@@ -8,6 +8,28 @@ import {
|
|
8
8
|
} from '@/types/aiModel';
|
9
9
|
|
10
10
|
export const openaiChatModels: AIChatModelCard[] = [
|
11
|
+
{
|
12
|
+
abilities: {
|
13
|
+
functionCall: true,
|
14
|
+
reasoning: true,
|
15
|
+
vision: true,
|
16
|
+
},
|
17
|
+
contextWindowTokens: 200_000,
|
18
|
+
description:
|
19
|
+
'o3-pro 模型使用更多的计算来更深入地思考并始终提供更好的答案,仅支持 Responses API 下使用。',
|
20
|
+
displayName: 'o3-pro',
|
21
|
+
id: 'o3-pro',
|
22
|
+
maxOutput: 100_000,
|
23
|
+
pricing: {
|
24
|
+
input: 20,
|
25
|
+
output: 80,
|
26
|
+
},
|
27
|
+
releasedAt: '2025-06-10',
|
28
|
+
settings: {
|
29
|
+
extendParams: ['reasoningEffort'],
|
30
|
+
},
|
31
|
+
type: 'chat',
|
32
|
+
},
|
11
33
|
{
|
12
34
|
abilities: {
|
13
35
|
functionCall: true,
|
@@ -22,11 +44,11 @@ export const openaiChatModels: AIChatModelCard[] = [
|
|
22
44
|
id: 'o3',
|
23
45
|
maxOutput: 100_000,
|
24
46
|
pricing: {
|
25
|
-
cachedInput:
|
26
|
-
input:
|
27
|
-
output:
|
47
|
+
cachedInput: 0.5,
|
48
|
+
input: 2,
|
49
|
+
output: 8,
|
28
50
|
},
|
29
|
-
releasedAt: '2025-04-
|
51
|
+
releasedAt: '2025-04-16',
|
30
52
|
settings: {
|
31
53
|
extendParams: ['reasoningEffort'],
|
32
54
|
},
|
@@ -323,7 +323,7 @@ const OpenAI: ModelProviderCard = {
|
|
323
323
|
},
|
324
324
|
},
|
325
325
|
],
|
326
|
-
checkModel: 'gpt-
|
326
|
+
checkModel: 'gpt-4.1-nano',
|
327
327
|
description:
|
328
328
|
'OpenAI 是全球领先的人工智能研究机构,其开发的模型如GPT系列推动了自然语言处理的前沿。OpenAI 致力于通过创新和高效的AI解决方案改变多个行业。他们的产品具有显著的性能和经济性,广泛用于研究、商业和创新应用。',
|
329
329
|
enabled: true,
|
@@ -25,11 +25,7 @@ import {
|
|
25
25
|
import { AgentRuntimeError } from '../utils/createError';
|
26
26
|
import { debugStream } from '../utils/debugStream';
|
27
27
|
import { StreamingResponse } from '../utils/response';
|
28
|
-
import {
|
29
|
-
GoogleGenerativeAIStream,
|
30
|
-
VertexAIStream,
|
31
|
-
convertIterableToStream,
|
32
|
-
} from '../utils/streams';
|
28
|
+
import { GoogleGenerativeAIStream, VertexAIStream } from '../utils/streams';
|
33
29
|
import { parseDataUri } from '../utils/uriParser';
|
34
30
|
|
35
31
|
const modelsOffSafetySettings = new Set(['gemini-2.0-flash-exp']);
|
@@ -91,6 +87,17 @@ interface GoogleAIThinkingConfig {
|
|
91
87
|
thinkingBudget?: number;
|
92
88
|
}
|
93
89
|
|
90
|
+
const isAbortError = (error: Error): boolean => {
|
91
|
+
const message = error.message.toLowerCase();
|
92
|
+
return (
|
93
|
+
message.includes('aborted') ||
|
94
|
+
message.includes('cancelled') ||
|
95
|
+
message.includes('error reading from the stream') ||
|
96
|
+
message.includes('abort') ||
|
97
|
+
error.name === 'AbortError'
|
98
|
+
);
|
99
|
+
};
|
100
|
+
|
94
101
|
export class LobeGoogleAI implements LobeRuntimeAI {
|
95
102
|
private client: GoogleGenerativeAI;
|
96
103
|
private isVertexAi: boolean;
|
@@ -140,6 +147,20 @@ export class LobeGoogleAI implements LobeRuntimeAI {
|
|
140
147
|
const contents = await this.buildGoogleMessages(payload.messages);
|
141
148
|
|
142
149
|
const inputStartAt = Date.now();
|
150
|
+
|
151
|
+
const controller = new AbortController();
|
152
|
+
const originalSignal = options?.signal;
|
153
|
+
|
154
|
+
if (originalSignal) {
|
155
|
+
if (originalSignal.aborted) {
|
156
|
+
controller.abort();
|
157
|
+
} else {
|
158
|
+
originalSignal.addEventListener('abort', () => {
|
159
|
+
controller.abort();
|
160
|
+
});
|
161
|
+
}
|
162
|
+
}
|
163
|
+
|
143
164
|
const geminiStreamResult = await this.client
|
144
165
|
.getGenerativeModel(
|
145
166
|
{
|
@@ -177,15 +198,20 @@ export class LobeGoogleAI implements LobeRuntimeAI {
|
|
177
198
|
},
|
178
199
|
{ apiVersion: 'v1beta', baseUrl: this.baseURL },
|
179
200
|
)
|
180
|
-
.generateContentStream(
|
181
|
-
|
182
|
-
|
183
|
-
|
184
|
-
|
185
|
-
|
186
|
-
|
201
|
+
.generateContentStream(
|
202
|
+
{
|
203
|
+
contents,
|
204
|
+
systemInstruction: modelsDisableInstuction.has(model)
|
205
|
+
? undefined
|
206
|
+
: (payload.system as string),
|
207
|
+
tools: this.buildGoogleTools(payload.tools, payload),
|
208
|
+
},
|
209
|
+
{
|
210
|
+
signal: controller.signal,
|
211
|
+
},
|
212
|
+
);
|
187
213
|
|
188
|
-
const googleStream =
|
214
|
+
const googleStream = this.createEnhancedStream(geminiStreamResult.stream, controller.signal);
|
189
215
|
const [prod, useForDebug] = googleStream.tee();
|
190
216
|
|
191
217
|
const key = this.isVertexAi
|
@@ -205,6 +231,16 @@ export class LobeGoogleAI implements LobeRuntimeAI {
|
|
205
231
|
} catch (e) {
|
206
232
|
const err = e as Error;
|
207
233
|
|
234
|
+
// 移除之前的静默处理,统一抛出错误
|
235
|
+
if (isAbortError(err)) {
|
236
|
+
console.log('Request was cancelled');
|
237
|
+
throw AgentRuntimeError.chat({
|
238
|
+
error: { message: 'Request was cancelled' },
|
239
|
+
errorType: AgentRuntimeErrorType.ProviderBizError,
|
240
|
+
provider: this.provider,
|
241
|
+
});
|
242
|
+
}
|
243
|
+
|
208
244
|
console.log(err);
|
209
245
|
const { errorType, error } = this.parseErrorMessage(err.message);
|
210
246
|
|
@@ -212,24 +248,75 @@ export class LobeGoogleAI implements LobeRuntimeAI {
|
|
212
248
|
}
|
213
249
|
}
|
214
250
|
|
215
|
-
|
251
|
+
private createEnhancedStream(originalStream: any, signal: AbortSignal): ReadableStream {
|
252
|
+
return new ReadableStream({
|
253
|
+
async start(controller) {
|
254
|
+
let hasData = false;
|
255
|
+
|
256
|
+
try {
|
257
|
+
for await (const chunk of originalStream) {
|
258
|
+
if (signal.aborted) {
|
259
|
+
// 如果有数据已经输出,优雅地关闭流而不是抛出错误
|
260
|
+
if (hasData) {
|
261
|
+
console.log('Stream cancelled gracefully, preserving existing output');
|
262
|
+
controller.close();
|
263
|
+
return;
|
264
|
+
} else {
|
265
|
+
// 如果还没有数据输出,则抛出取消错误
|
266
|
+
throw new Error('Stream cancelled');
|
267
|
+
}
|
268
|
+
}
|
269
|
+
|
270
|
+
hasData = true;
|
271
|
+
controller.enqueue(chunk);
|
272
|
+
}
|
273
|
+
} catch (error) {
|
274
|
+
const err = error as Error;
|
275
|
+
|
276
|
+
// 统一处理所有错误,包括 abort 错误
|
277
|
+
if (isAbortError(err) || signal.aborted) {
|
278
|
+
// 如果有数据已经输出,优雅地关闭流
|
279
|
+
if (hasData) {
|
280
|
+
console.log('Stream reading cancelled gracefully, preserving existing output');
|
281
|
+
controller.close();
|
282
|
+
return;
|
283
|
+
} else {
|
284
|
+
console.log('Stream reading cancelled before any output');
|
285
|
+
controller.error(new Error('Stream cancelled'));
|
286
|
+
return;
|
287
|
+
}
|
288
|
+
} else {
|
289
|
+
// 处理其他流解析错误
|
290
|
+
console.error('Stream parsing error:', err);
|
291
|
+
controller.error(err);
|
292
|
+
return;
|
293
|
+
}
|
294
|
+
}
|
295
|
+
|
296
|
+
controller.close();
|
297
|
+
},
|
298
|
+
});
|
299
|
+
}
|
300
|
+
|
301
|
+
async models(options?: { signal?: AbortSignal }) {
|
216
302
|
try {
|
217
303
|
const url = `${this.baseURL}/v1beta/models?key=${this.apiKey}`;
|
218
304
|
const response = await fetch(url, {
|
219
305
|
method: 'GET',
|
306
|
+
signal: options?.signal,
|
220
307
|
});
|
221
|
-
|
308
|
+
|
222
309
|
if (!response.ok) {
|
223
310
|
throw new Error(`HTTP error! status: ${response.status}`);
|
224
311
|
}
|
225
|
-
|
312
|
+
|
226
313
|
const json = await response.json();
|
227
|
-
|
314
|
+
|
228
315
|
const modelList: GoogleModelCard[] = json.models;
|
229
|
-
|
316
|
+
|
230
317
|
const processedModels = modelList.map((model) => {
|
231
318
|
const id = model.name.replace(/^models\//, '');
|
232
|
-
|
319
|
+
|
233
320
|
return {
|
234
321
|
contextWindowTokens: (model.inputTokenLimit || 0) + (model.outputTokenLimit || 0),
|
235
322
|
displayName: model.displayName || id,
|
@@ -237,9 +324,9 @@ export class LobeGoogleAI implements LobeRuntimeAI {
|
|
237
324
|
maxOutput: model.outputTokenLimit || undefined,
|
238
325
|
};
|
239
326
|
});
|
240
|
-
|
327
|
+
|
241
328
|
const { MODEL_LIST_CONFIGS, processModelList } = await import('../utils/modelParse');
|
242
|
-
|
329
|
+
|
243
330
|
return processModelList(processedModels, MODEL_LIST_CONFIGS.google);
|
244
331
|
} catch (error) {
|
245
332
|
console.error('Failed to fetch Google models:', error);
|