@lobehub/chat 1.26.7 → 1.26.9

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -2,6 +2,48 @@
2
2
 
3
3
  # Changelog
4
4
 
5
+ ### [Version 1.26.9](https://github.com/lobehub/lobe-chat/compare/v1.26.8...v1.26.9)
6
+
7
+ <sup>Released on **2024-10-29**</sup>
8
+
9
+ <br/>
10
+
11
+ <details>
12
+ <summary><kbd>Improvements and Fixes</kbd></summary>
13
+
14
+ </details>
15
+
16
+ <div align="right">
17
+
18
+ [![](https://img.shields.io/badge/-BACK_TO_TOP-151515?style=flat-square)](#readme-top)
19
+
20
+ </div>
21
+
22
+ ### [Version 1.26.8](https://github.com/lobehub/lobe-chat/compare/v1.26.7...v1.26.8)
23
+
24
+ <sup>Released on **2024-10-29**</sup>
25
+
26
+ #### 🐛 Bug Fixes
27
+
28
+ - **misc**: Update zhipu param process.
29
+
30
+ <br/>
31
+
32
+ <details>
33
+ <summary><kbd>Improvements and Fixes</kbd></summary>
34
+
35
+ #### What's fixed
36
+
37
+ - **misc**: Update zhipu param process, closes [#4523](https://github.com/lobehub/lobe-chat/issues/4523) ([3317fbd](https://github.com/lobehub/lobe-chat/commit/3317fbd))
38
+
39
+ </details>
40
+
41
+ <div align="right">
42
+
43
+ [![](https://img.shields.io/badge/-BACK_TO_TOP-151515?style=flat-square)](#readme-top)
44
+
45
+ </div>
46
+
5
47
  ### [Version 1.26.7](https://github.com/lobehub/lobe-chat/compare/v1.26.6...v1.26.7)
6
48
 
7
49
  <sup>Released on **2024-10-29**</sup>
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@lobehub/chat",
3
- "version": "1.26.7",
3
+ "version": "1.26.9",
4
4
  "description": "Lobe Chat - an open-source, high-performance chatbot framework that supports speech synthesis, multimodal, and extensible Function Call plugin system. Supports one-click free deployment of your private ChatGPT/LLM web application.",
5
5
  "keywords": [
6
6
  "framework",
@@ -107,6 +107,26 @@ describe('LobeZhipuAI', () => {
107
107
  { content: [{ type: 'text', text: 'Hello again' }], role: 'user' },
108
108
  ],
109
109
  model: 'glm-4',
110
+ temperature: 1.6,
111
+ top_p: 1,
112
+ });
113
+
114
+ const calledWithParams = spyOn.mock.calls[0][0];
115
+
116
+ expect(calledWithParams.messages[1].content).toEqual([{ type: 'text', text: 'Hello again' }]);
117
+ expect(calledWithParams.temperature).toBe(0.8); // temperature should be divided by two
118
+ expect(calledWithParams.top_p).toEqual(1);
119
+ });
120
+
121
+ it('should pass arameters correctly', async () => {
122
+ const spyOn = vi.spyOn(instance['client'].chat.completions, 'create');
123
+
124
+ await instance.chat({
125
+ messages: [
126
+ { content: 'Hello', role: 'user' },
127
+ { content: [{ type: 'text', text: 'Hello again' }], role: 'user' },
128
+ ],
129
+ model: 'glm-4-alltools',
110
130
  temperature: 0,
111
131
  top_p: 1,
112
132
  });
@@ -114,9 +134,8 @@ describe('LobeZhipuAI', () => {
114
134
  const calledWithParams = spyOn.mock.calls[0][0];
115
135
 
116
136
  expect(calledWithParams.messages[1].content).toEqual([{ type: 'text', text: 'Hello again' }]);
117
- expect(calledWithParams.temperature).toBe(0); // temperature 0 should be undefined
118
- expect((calledWithParams as any).do_sample).toBeTruthy(); // temperature 0 should be undefined
119
- expect(calledWithParams.top_p).toEqual(1); // top_p should be transformed correctly
137
+ expect(calledWithParams.temperature).toBe(0.01);
138
+ expect(calledWithParams.top_p).toEqual(0.99);
120
139
  });
121
140
 
122
141
  describe('Error', () => {
@@ -6,12 +6,24 @@ import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
6
6
  export const LobeZhipuAI = LobeOpenAICompatibleFactory({
7
7
  baseURL: 'https://open.bigmodel.cn/api/paas/v4',
8
8
  chatCompletion: {
9
- handlePayload: ({ temperature, ...payload }: ChatStreamPayload) =>
9
+ handlePayload: ({ model, temperature, top_p, ...payload }: ChatStreamPayload) =>
10
10
  ({
11
11
  ...payload,
12
- do_sample: temperature === 0,
12
+ model,
13
13
  stream: true,
14
- temperature,
14
+ ...(model === "glm-4-alltools" ? {
15
+ temperature: temperature !== undefined
16
+ ? Math.max(0.01, Math.min(0.99, temperature / 2))
17
+ : undefined,
18
+ top_p: top_p !== undefined
19
+ ? Math.max(0.01, Math.min(0.99, top_p))
20
+ : undefined,
21
+ } : {
22
+ temperature: temperature !== undefined
23
+ ? temperature / 2
24
+ : undefined,
25
+ top_p,
26
+ }),
15
27
  }) as OpenAI.ChatCompletionCreateParamsStreaming,
16
28
  },
17
29
  debug: {
@@ -10,6 +10,7 @@ export const LANGCHAIN_SUPPORT_TEXT_LIST = [
10
10
 
11
11
  'sh',
12
12
  'patch',
13
+ 'log',
13
14
  // js
14
15
  'js',
15
16
  'jsx',
@@ -0,0 +1,15 @@
1
+ import { ChatFileItem } from '@/types/message';
2
+
3
+ const filePrompt = (item: ChatFileItem) =>
4
+ `<file id="${item.id}" name="${item.name}" type="${item.fileType}" size="${item.size}" url="${item.url}"></file>`;
5
+
6
+ export const filePrompts = (fileList: ChatFileItem[]) => {
7
+ if (fileList.length === 0) return '';
8
+
9
+ const prompt = `<files>
10
+ <files_docstring>here are user upload files you can refer to</files_docstring>
11
+ ${fileList.map((item) => filePrompt(item)).join('\n')}
12
+ </files>`;
13
+
14
+ return prompt.trim();
15
+ };
@@ -0,0 +1,14 @@
1
+ import { ChatImageItem } from '@/types/message';
2
+
3
+ const imagePrompt = (item: ChatImageItem) => `<image name="${item.alt}" url="${item.url}"></image>`;
4
+
5
+ export const imagesPrompts = (imageList: ChatImageItem[]) => {
6
+ if (imageList.length === 0) return '';
7
+
8
+ const prompt = `<images>
9
+ <images_docstring>here are user upload images you can refer to</images_docstring>
10
+ ${imageList.map((item) => imagePrompt(item)).join('\n')}
11
+ </images>`;
12
+
13
+ return prompt.trim();
14
+ };
@@ -0,0 +1,19 @@
1
+ import { ChatFileItem, ChatImageItem } from '@/types/message';
2
+
3
+ import { filePrompts } from './file';
4
+ import { imagesPrompts } from './image';
5
+
6
+ export const filesPrompts = ({
7
+ imageList,
8
+ fileList,
9
+ }: {
10
+ fileList?: ChatFileItem[];
11
+ imageList: ChatImageItem[];
12
+ }) => {
13
+ const prompt = `<files_info>
14
+ ${imagesPrompts(imageList)}
15
+ ${fileList ? filePrompts(fileList) : ''}
16
+ </files_info>`;
17
+
18
+ return prompt.trim();
19
+ };
@@ -126,6 +126,92 @@ describe('ChatService', () => {
126
126
  );
127
127
  });
128
128
 
129
+ describe('handle with files content', () => {
130
+ it('should includes files', async () => {
131
+ const messages = [
132
+ {
133
+ content: 'Hello',
134
+ role: 'user',
135
+ imageList: [
136
+ {
137
+ id: 'imagecx1',
138
+ url: 'http://example.com/xxx0asd-dsd.png',
139
+ alt: 'ttt.png',
140
+ },
141
+ ],
142
+ fileList: [
143
+ {
144
+ fileType: 'plain/txt',
145
+ size: 100000,
146
+ id: 'file1',
147
+ url: 'http://abc.com/abc.txt',
148
+ name: 'abc.png',
149
+ },
150
+ {
151
+ id: 'file_oKMve9qySLMI',
152
+ name: '2402.16667v1.pdf',
153
+ type: 'application/pdf',
154
+ size: 11256078,
155
+ url: 'https://xxx.com/ppp/480497/5826c2b8-fde0-4de1-a54b-a224d5e3d898.pdf',
156
+ },
157
+ ],
158
+ }, // Message with files
159
+ { content: 'Hi', role: 'tool', plugin: { identifier: 'plugin1', apiName: 'api1' } }, // Message with tool role
160
+ { content: 'Hey', role: 'assistant' }, // Regular user message
161
+ ] as ChatMessage[];
162
+
163
+ const getChatCompletionSpy = vi.spyOn(chatService, 'getChatCompletion');
164
+ await chatService.createAssistantMessage({
165
+ messages,
166
+ plugins: [],
167
+ model: 'gpt-4o',
168
+ });
169
+
170
+ expect(getChatCompletionSpy).toHaveBeenCalledWith(
171
+ {
172
+ messages: [
173
+ {
174
+ content: [
175
+ {
176
+ text: `Hello
177
+
178
+ <files_info>
179
+ <images>
180
+ <images_docstring>here are user upload images you can refer to</images_docstring>
181
+ <image name="ttt.png" url="http://example.com/xxx0asd-dsd.png"></image>
182
+ </images>
183
+ <files>
184
+ <files_docstring>here are user upload files you can refer to</files_docstring>
185
+ <file id="file1" name="abc.png" type="plain/txt" size="100000" url="http://abc.com/abc.txt"></file>
186
+ <file id="file_oKMve9qySLMI" name="2402.16667v1.pdf" type="undefined" size="11256078" url="https://xxx.com/ppp/480497/5826c2b8-fde0-4de1-a54b-a224d5e3d898.pdf"></file>
187
+ </files>
188
+ </files_info>`,
189
+ type: 'text',
190
+ },
191
+ {
192
+ image_url: { detail: 'auto', url: 'http://example.com/xxx0asd-dsd.png' },
193
+ type: 'image_url',
194
+ },
195
+ ],
196
+ role: 'user',
197
+ },
198
+ {
199
+ content: 'Hi',
200
+ name: 'plugin1____api1',
201
+ role: 'tool',
202
+ },
203
+ {
204
+ content: 'Hey',
205
+ role: 'assistant',
206
+ },
207
+ ],
208
+ model: 'gpt-4o',
209
+ },
210
+ undefined,
211
+ );
212
+ });
213
+ });
214
+
129
215
  describe('should handle content correctly for vision models', () => {
130
216
  it('should include image content when with vision model', async () => {
131
217
  const messages = [
@@ -156,7 +242,18 @@ describe('ChatService', () => {
156
242
  messages: [
157
243
  {
158
244
  content: [
159
- { text: 'Hello', type: 'text' },
245
+ {
246
+ text: `Hello
247
+
248
+ <files_info>
249
+ <images>
250
+ <images_docstring>here are user upload images you can refer to</images_docstring>
251
+ <image name="abc.png" url="http://example.com/image.jpg"></image>
252
+ </images>
253
+
254
+ </files_info>`,
255
+ type: 'text',
256
+ },
160
257
  {
161
258
  image_url: { detail: 'auto', url: 'http://example.com/image.jpg' },
162
259
  type: 'image_url',
@@ -8,6 +8,7 @@ import { INBOX_SESSION_ID } from '@/const/session';
8
8
  import { DEFAULT_AGENT_CONFIG } from '@/const/settings';
9
9
  import { TracePayload, TraceTagMap } from '@/const/trace';
10
10
  import { AgentRuntime, ChatCompletionErrorPayload, ModelProvider } from '@/libs/agent-runtime';
11
+ import { filesPrompts } from '@/prompts/files';
11
12
  import { useSessionStore } from '@/store/session';
12
13
  import { sessionMetaSelectors } from '@/store/session/selectors';
13
14
  import { useToolStore } from '@/store/tool';
@@ -413,22 +414,15 @@ class ChatService {
413
414
  // for the models with visual ability, add image url to content
414
415
  // refs: https://platform.openai.com/docs/guides/vision/quick-start
415
416
  const getContent = (m: ChatMessage) => {
416
- if (!m.imageList) return m.content;
417
-
418
- const imageList = m.imageList;
419
-
420
- if (imageList.length === 0) return m.content;
421
-
422
- const canUploadFile = modelProviderSelectors.isModelEnabledUpload(model)(
423
- useUserStore.getState(),
424
- );
425
-
426
- if (!canUploadFile) {
417
+ // only if message doesn't have images and files, then return the plain content
418
+ if ((!m.imageList || m.imageList.length === 0) && (!m.fileList || m.fileList.length === 0))
427
419
  return m.content;
428
- }
429
420
 
421
+ const imageList = m.imageList || [];
422
+
423
+ const filesContext = filesPrompts({ fileList: m.fileList, imageList });
430
424
  return [
431
- { text: m.content, type: 'text' },
425
+ { text: m.content + '\n\n' + filesContext, type: 'text' },
432
426
  ...imageList.map(
433
427
  (i) => ({ image_url: { detail: 'auto', url: i.url }, type: 'image_url' }) as const,
434
428
  ),