@lobehub/chat 1.17.6 → 1.17.7

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of @lobehub/chat might be problematic. Click here for more details.

package/CHANGELOG.md CHANGED
@@ -2,6 +2,39 @@
2
2
 
3
3
  # Changelog
4
4
 
5
+ ### [Version 1.17.7](https://github.com/lobehub/lobe-chat/compare/v1.17.6...v1.17.7)
6
+
7
+ <sup>Released on **2024-09-16**</sup>
8
+
9
+ #### 🐛 Bug Fixes
10
+
11
+ - **misc**: Fix a corner case of `tools_call` with empty object.
12
+
13
+ #### 💄 Styles
14
+
15
+ - **misc**: Delete duplicate models in ollama.
16
+
17
+ <br/>
18
+
19
+ <details>
20
+ <summary><kbd>Improvements and Fixes</kbd></summary>
21
+
22
+ #### What's fixed
23
+
24
+ - **misc**: Fix a corner case of `tools_call` with empty object, closes [#3955](https://github.com/lobehub/lobe-chat/issues/3955) ([d3fabdc](https://github.com/lobehub/lobe-chat/commit/d3fabdc))
25
+
26
+ #### Styles
27
+
28
+ - **misc**: Delete duplicate models in ollama, closes [#3989](https://github.com/lobehub/lobe-chat/issues/3989) ([ece60ee](https://github.com/lobehub/lobe-chat/commit/ece60ee))
29
+
30
+ </details>
31
+
32
+ <div align="right">
33
+
34
+ [![](https://img.shields.io/badge/-BACK_TO_TOP-151515?style=flat-square)](#readme-top)
35
+
36
+ </div>
37
+
5
38
  ### [Version 1.17.6](https://github.com/lobehub/lobe-chat/compare/v1.17.5...v1.17.6)
6
39
 
7
40
  <sup>Released on **2024-09-15**</sup>
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@lobehub/chat",
3
- "version": "1.17.6",
3
+ "version": "1.17.7",
4
4
  "description": "Lobe Chat - an open-source, high-performance chatbot framework that supports speech synthesis, multimodal, and extensible Function Call plugin system. Supports one-click free deployment of your private ChatGPT/LLM web application.",
5
5
  "keywords": [
6
6
  "framework",
@@ -274,14 +274,6 @@ const Ollama: ModelProviderCard = {
274
274
  tokens: 4096,
275
275
  vision: true,
276
276
  },
277
- {
278
- description: 'MiniCPM-V 是 OpenBMB 推出的新一代多模态大模型,具备卓越的 OCR 识别和多模态理解能力,支持广泛的应用场景。',
279
- displayName: 'MiniCPM-V 8B',
280
- enabled: true,
281
- id: 'minicpm-v:8b',
282
- tokens: 128_000,
283
- vision:true,
284
- },
285
277
  {
286
278
  description: 'MiniCPM-V 是 OpenBMB 推出的新一代多模态大模型,具备卓越的 OCR 识别和多模态理解能力,支持广泛的应用场景。',
287
279
  displayName: 'MiniCPM-V 8B',
@@ -162,6 +162,49 @@ describe('OpenAIStream', () => {
162
162
  );
163
163
  });
164
164
 
165
+ it('should handle content with tool_calls but is an empty object', async () => {
166
+ // data: {"id":"chatcmpl-A7pokGUqSov0JuMkhiHhWU9GRtAgJ", "object":"chat.completion.chunk", "created":1726430846, "model":"gpt-4o-2024-05-13", "choices":[{"index":0, "delta":{"content":" today", "role":"", "tool_calls":[]}, "finish_reason":"", "logprobs":""}], "prompt_annotations":[{"prompt_index":0, "content_filter_results":null}]}
167
+ const mockOpenAIStream = new ReadableStream({
168
+ start(controller) {
169
+ controller.enqueue({
170
+ choices: [
171
+ {
172
+ "index": 0,
173
+ "delta": {
174
+ "content": "Some contents",
175
+ "role": "",
176
+ "tool_calls": []
177
+ },
178
+ "finish_reason": "",
179
+ "logprobs": ""
180
+ }
181
+ ],
182
+ id: '456',
183
+ });
184
+
185
+ controller.close();
186
+ },
187
+ });
188
+
189
+ const onToolCallMock = vi.fn();
190
+
191
+ const protocolStream = OpenAIStream(mockOpenAIStream, {
192
+ onToolCall: onToolCallMock,
193
+ });
194
+
195
+ const decoder = new TextDecoder();
196
+ const chunks = [];
197
+
198
+ // @ts-ignore
199
+ for await (const chunk of protocolStream) {
200
+ chunks.push(decoder.decode(chunk, { stream: true }));
201
+ }
202
+
203
+ expect(chunks).toEqual(
204
+ ['id: 456', 'event: text', `data: "Some contents"\n`].map((i) => `${i}\n`),
205
+ );
206
+ });
207
+
165
208
  it('should handle other delta data', async () => {
166
209
  const mockOpenAIStream = new ReadableStream({
167
210
  start(controller) {
@@ -27,7 +27,7 @@ export const transformOpenAIStream = (
27
27
  return { data: chunk, id: chunk.id, type: 'data' };
28
28
  }
29
29
 
30
- if (item.delta?.tool_calls) {
30
+ if (typeof item.delta?.tool_calls === 'object' && item.delta.tool_calls?.length > 0) {
31
31
  return {
32
32
  data: item.delta.tool_calls.map((value, index): StreamToolCallChunkData => {
33
33
  if (stack && !stack.tool) {