@lobehub/chat 1.26.7 → 1.26.8

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -2,6 +2,31 @@
2
2
 
3
3
  # Changelog
4
4
 
5
+ ### [Version 1.26.8](https://github.com/lobehub/lobe-chat/compare/v1.26.7...v1.26.8)
6
+
7
+ <sup>Released on **2024-10-29**</sup>
8
+
9
+ #### 🐛 Bug Fixes
10
+
11
+ - **misc**: Update zhipu param process.
12
+
13
+ <br/>
14
+
15
+ <details>
16
+ <summary><kbd>Improvements and Fixes</kbd></summary>
17
+
18
+ #### What's fixed
19
+
20
+ - **misc**: Update zhipu param process, closes [#4523](https://github.com/lobehub/lobe-chat/issues/4523) ([3317fbd](https://github.com/lobehub/lobe-chat/commit/3317fbd))
21
+
22
+ </details>
23
+
24
+ <div align="right">
25
+
26
+ [![](https://img.shields.io/badge/-BACK_TO_TOP-151515?style=flat-square)](#readme-top)
27
+
28
+ </div>
29
+
5
30
  ### [Version 1.26.7](https://github.com/lobehub/lobe-chat/compare/v1.26.6...v1.26.7)
6
31
 
7
32
  <sup>Released on **2024-10-29**</sup>
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@lobehub/chat",
3
- "version": "1.26.7",
3
+ "version": "1.26.8",
4
4
  "description": "Lobe Chat - an open-source, high-performance chatbot framework that supports speech synthesis, multimodal, and extensible Function Call plugin system. Supports one-click free deployment of your private ChatGPT/LLM web application.",
5
5
  "keywords": [
6
6
  "framework",
@@ -107,6 +107,26 @@ describe('LobeZhipuAI', () => {
107
107
  { content: [{ type: 'text', text: 'Hello again' }], role: 'user' },
108
108
  ],
109
109
  model: 'glm-4',
110
+ temperature: 1.6,
111
+ top_p: 1,
112
+ });
113
+
114
+ const calledWithParams = spyOn.mock.calls[0][0];
115
+
116
+ expect(calledWithParams.messages[1].content).toEqual([{ type: 'text', text: 'Hello again' }]);
117
+ expect(calledWithParams.temperature).toBe(0.8); // temperature should be divided by two
118
+ expect(calledWithParams.top_p).toEqual(1);
119
+ });
120
+
121
+ it('should pass arameters correctly', async () => {
122
+ const spyOn = vi.spyOn(instance['client'].chat.completions, 'create');
123
+
124
+ await instance.chat({
125
+ messages: [
126
+ { content: 'Hello', role: 'user' },
127
+ { content: [{ type: 'text', text: 'Hello again' }], role: 'user' },
128
+ ],
129
+ model: 'glm-4-alltools',
110
130
  temperature: 0,
111
131
  top_p: 1,
112
132
  });
@@ -114,9 +134,8 @@ describe('LobeZhipuAI', () => {
114
134
  const calledWithParams = spyOn.mock.calls[0][0];
115
135
 
116
136
  expect(calledWithParams.messages[1].content).toEqual([{ type: 'text', text: 'Hello again' }]);
117
- expect(calledWithParams.temperature).toBe(0); // temperature 0 should be undefined
118
- expect((calledWithParams as any).do_sample).toBeTruthy(); // temperature 0 should be undefined
119
- expect(calledWithParams.top_p).toEqual(1); // top_p should be transformed correctly
137
+ expect(calledWithParams.temperature).toBe(0.01);
138
+ expect(calledWithParams.top_p).toEqual(0.99);
120
139
  });
121
140
 
122
141
  describe('Error', () => {
@@ -6,12 +6,24 @@ import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
6
6
  export const LobeZhipuAI = LobeOpenAICompatibleFactory({
7
7
  baseURL: 'https://open.bigmodel.cn/api/paas/v4',
8
8
  chatCompletion: {
9
- handlePayload: ({ temperature, ...payload }: ChatStreamPayload) =>
9
+ handlePayload: ({ model, temperature, top_p, ...payload }: ChatStreamPayload) =>
10
10
  ({
11
11
  ...payload,
12
- do_sample: temperature === 0,
12
+ model,
13
13
  stream: true,
14
- temperature,
14
+ ...(model === "glm-4-alltools" ? {
15
+ temperature: temperature !== undefined
16
+ ? Math.max(0.01, Math.min(0.99, temperature / 2))
17
+ : undefined,
18
+ top_p: top_p !== undefined
19
+ ? Math.max(0.01, Math.min(0.99, top_p))
20
+ : undefined,
21
+ } : {
22
+ temperature: temperature !== undefined
23
+ ? temperature / 2
24
+ : undefined,
25
+ top_p,
26
+ }),
15
27
  }) as OpenAI.ChatCompletionCreateParamsStreaming,
16
28
  },
17
29
  debug: {