@lobehub/lobehub 2.0.0-next.67 → 2.0.0-next.68

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -2,6 +2,31 @@
2
2
 
3
3
  # Changelog
4
4
 
5
+ ## [Version 2.0.0-next.68](https://github.com/lobehub/lobe-chat/compare/v2.0.0-next.67...v2.0.0-next.68)
6
+
7
+ <sup>Released on **2025-11-16**</sup>
8
+
9
+ #### 🐛 Bug Fixes
10
+
11
+ - **misc**: The tool to fail execution on ollama when a message contains b….
12
+
13
+ <br/>
14
+
15
+ <details>
16
+ <summary><kbd>Improvements and Fixes</kbd></summary>
17
+
18
+ #### What's fixed
19
+
20
+ - **misc**: The tool to fail execution on ollama when a message contains b…, closes [#10259](https://github.com/lobehub/lobe-chat/issues/10259) ([1ad8080](https://github.com/lobehub/lobe-chat/commit/1ad8080))
21
+
22
+ </details>
23
+
24
+ <div align="right">
25
+
26
+ [![](https://img.shields.io/badge/-BACK_TO_TOP-151515?style=flat-square)](#readme-top)
27
+
28
+ </div>
29
+
5
30
  ## [Version 2.0.0-next.67](https://github.com/lobehub/lobe-chat/compare/v2.0.0-next.66...v2.0.0-next.67)
6
31
 
7
32
  <sup>Released on **2025-11-16**</sup>
package/changelog/v1.json CHANGED
@@ -1,4 +1,13 @@
1
1
  [
2
+ {
3
+ "children": {
4
+ "fixes": [
5
+ "The tool to fail execution on ollama when a message contains b…."
6
+ ]
7
+ },
8
+ "date": "2025-11-16",
9
+ "version": "2.0.0-next.68"
10
+ },
2
11
  {
3
12
  "children": {
4
13
  "improvements": [
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@lobehub/lobehub",
3
- "version": "2.0.0-next.67",
3
+ "version": "2.0.0-next.68",
4
4
  "description": "LobeHub - an open-source,comprehensive AI Agent framework that supports speech synthesis, multimodal, and extensible Function Call plugin system. Supports one-click free deployment of your private ChatGPT/LLM web application.",
5
5
  "keywords": [
6
6
  "framework",
@@ -122,9 +122,6 @@
122
122
  "eslint --fix"
123
123
  ]
124
124
  },
125
- "overrides": {
126
- "eta": "4.0.1"
127
- },
128
125
  "dependencies": {
129
126
  "@ant-design/icons": "^5.6.1",
130
127
  "@ant-design/pro-components": "^2.8.10",
@@ -398,9 +395,6 @@
398
395
  "pnpm": {
399
396
  "onlyBuiltDependencies": [
400
397
  "@vercel/speed-insights"
401
- ],
402
- "overrides": {
403
- "eta": "4.0.1"
404
- }
398
+ ]
405
399
  }
406
400
  }
@@ -235,6 +235,73 @@ describe('OllamaStream', () => {
235
235
  expect(onToolCall).toHaveBeenCalledTimes(1);
236
236
  expect(onCompletionMock).toHaveBeenCalledTimes(1);
237
237
  });
238
+
239
+ it('tools use with a done', async () => {
240
+ vi.spyOn(uuidModule, 'nanoid').mockReturnValueOnce('1').mockReturnValueOnce('abcd1234');
241
+
242
+ const mockOllamaStream = new ReadableStream<ChatResponse>({
243
+ start(controller) {
244
+ controller.enqueue({
245
+ model: 'qwen2.5',
246
+ created_at: new Date('2024-12-01T03:34:55.166692Z'),
247
+ message: {
248
+ role: 'assistant',
249
+ content: '',
250
+ tool_calls: [
251
+ {
252
+ function: {
253
+ name: 'realtime-weather____fetchCurrentWeather',
254
+ arguments: { city: '杭州' },
255
+ },
256
+ },
257
+ ],
258
+ },
259
+ done_reason: 'stop',
260
+ done: true,
261
+ total_duration: 1122415333,
262
+ load_duration: 26178333,
263
+ prompt_eval_count: 221,
264
+ prompt_eval_duration: 507000000,
265
+ eval_count: 26,
266
+ eval_duration: 583000000,
267
+ } as unknown as ChatResponse);
268
+
269
+ controller.close();
270
+ },
271
+ });
272
+ const onStartMock = vi.fn();
273
+ const onTextMock = vi.fn();
274
+ const onToolCall = vi.fn();
275
+ const onCompletionMock = vi.fn();
276
+
277
+ const protocolStream = OllamaStream(mockOllamaStream, {
278
+ onStart: onStartMock,
279
+ onText: onTextMock,
280
+ onCompletion: onCompletionMock,
281
+ onToolsCalling: onToolCall,
282
+ });
283
+
284
+ const decoder = new TextDecoder();
285
+ const chunks = [];
286
+
287
+ // @ts-ignore
288
+ for await (const chunk of protocolStream) {
289
+ chunks.push(decoder.decode(chunk, { stream: true }));
290
+ }
291
+
292
+ expect(chunks).toEqual(
293
+ [
294
+ 'id: chat_1',
295
+ 'event: tool_calls',
296
+ `data: [{"function":{"arguments":"{\\"city\\":\\"杭州\\"}","name":"realtime-weather____fetchCurrentWeather"},"id":"realtime-weather____fetchCurrentWeather_0_abcd1234","index":0,"type":"function"}]\n`,
297
+ ].map((i) => `${i}\n`),
298
+ );
299
+
300
+ expect(onTextMock).toHaveBeenCalledTimes(0);
301
+ expect(onStartMock).toHaveBeenCalledTimes(1);
302
+ expect(onToolCall).toHaveBeenCalledTimes(1);
303
+ expect(onCompletionMock).toHaveBeenCalledTimes(1);
304
+ });
238
305
  });
239
306
 
240
307
  it('should handle empty stream', async () => {
@@ -11,11 +11,6 @@ import {
11
11
  } from './protocol';
12
12
 
13
13
  const transformOllamaStream = (chunk: ChatResponse, stack: StreamContext): StreamProtocolChunk => {
14
- // maybe need another structure to add support for multiple choices
15
- if (chunk.done && !chunk.message.content) {
16
- return { data: 'finished', id: stack.id, type: 'stop' };
17
- }
18
-
19
14
  if (chunk.message.thinking) {
20
15
  return { data: chunk.message.thinking, id: stack.id, type: 'reasoning' };
21
16
  }
@@ -36,6 +31,11 @@ const transformOllamaStream = (chunk: ChatResponse, stack: StreamContext): Strea
36
31
  };
37
32
  }
38
33
 
34
+ // maybe need another structure to add support for multiple choices
35
+ if (chunk.done && !chunk.message.content) {
36
+ return { data: 'finished', id: stack.id, type: 'stop' };
37
+ }
38
+
39
39
  // 判断是否有 <think> 或 </think> 标签,更新 thinkingInContent 状态
40
40
  if (chunk.message.content.includes('<think>')) {
41
41
  stack.thinkingInContent = true;