@lobehub/chat 1.56.3 → 1.56.5
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md
CHANGED
@@ -2,6 +2,57 @@
|
|
2
2
|
|
3
3
|
# Changelog
|
4
4
|
|
5
|
+
### [Version 1.56.5](https://github.com/lobehub/lobe-chat/compare/v1.56.4...v1.56.5)
|
6
|
+
|
7
|
+
<sup>Released on **2025-02-16**</sup>
|
8
|
+
|
9
|
+
#### 🐛 Bug Fixes
|
10
|
+
|
11
|
+
- **misc**: Match o1 series models more robust in Azure OpenAI provider, set max_completion_tokens to null for Azure OpenAI.
|
12
|
+
|
13
|
+
<br/>
|
14
|
+
|
15
|
+
<details>
|
16
|
+
<summary><kbd>Improvements and Fixes</kbd></summary>
|
17
|
+
|
18
|
+
#### What's fixed
|
19
|
+
|
20
|
+
- **misc**: Match o1 series models more robust in Azure OpenAI provider, closes [#6193](https://github.com/lobehub/lobe-chat/issues/6193) ([f444e66](https://github.com/lobehub/lobe-chat/commit/f444e66))
|
21
|
+
- **misc**: Set max_completion_tokens to null for Azure OpenAI, closes [#6198](https://github.com/lobehub/lobe-chat/issues/6198) ([e9e8da4](https://github.com/lobehub/lobe-chat/commit/e9e8da4))
|
22
|
+
|
23
|
+
</details>
|
24
|
+
|
25
|
+
<div align="right">
|
26
|
+
|
27
|
+
[](#readme-top)
|
28
|
+
|
29
|
+
</div>
|
30
|
+
|
31
|
+
### [Version 1.56.4](https://github.com/lobehub/lobe-chat/compare/v1.56.3...v1.56.4)
|
32
|
+
|
33
|
+
<sup>Released on **2025-02-16**</sup>
|
34
|
+
|
35
|
+
#### 🐛 Bug Fixes
|
36
|
+
|
37
|
+
- **misc**: Fix ai provider description not show correctly.
|
38
|
+
|
39
|
+
<br/>
|
40
|
+
|
41
|
+
<details>
|
42
|
+
<summary><kbd>Improvements and Fixes</kbd></summary>
|
43
|
+
|
44
|
+
#### What's fixed
|
45
|
+
|
46
|
+
- **misc**: Fix ai provider description not show correctly, closes [#6199](https://github.com/lobehub/lobe-chat/issues/6199) ([3e8d9c5](https://github.com/lobehub/lobe-chat/commit/3e8d9c5))
|
47
|
+
|
48
|
+
</details>
|
49
|
+
|
50
|
+
<div align="right">
|
51
|
+
|
52
|
+
[](#readme-top)
|
53
|
+
|
54
|
+
</div>
|
55
|
+
|
5
56
|
### [Version 1.56.3](https://github.com/lobehub/lobe-chat/compare/v1.56.2...v1.56.3)
|
6
57
|
|
7
58
|
<sup>Released on **2025-02-16**</sup>
|
package/changelog/v1.json
CHANGED
@@ -1,4 +1,22 @@
|
|
1
1
|
[
|
2
|
+
{
|
3
|
+
"children": {
|
4
|
+
"fixes": [
|
5
|
+
"Match o1 series models more robust in Azure OpenAI provider, set max_completion_tokens to null for Azure OpenAI."
|
6
|
+
]
|
7
|
+
},
|
8
|
+
"date": "2025-02-16",
|
9
|
+
"version": "1.56.5"
|
10
|
+
},
|
11
|
+
{
|
12
|
+
"children": {
|
13
|
+
"fixes": [
|
14
|
+
"Fix ai provider description not show correctly."
|
15
|
+
]
|
16
|
+
},
|
17
|
+
"date": "2025-02-16",
|
18
|
+
"version": "1.56.4"
|
19
|
+
},
|
2
20
|
{
|
3
21
|
"children": {
|
4
22
|
"improvements": [
|
package/package.json
CHANGED
@@ -1,6 +1,6 @@
|
|
1
1
|
{
|
2
2
|
"name": "@lobehub/chat",
|
3
|
-
"version": "1.56.
|
3
|
+
"version": "1.56.5",
|
4
4
|
"description": "Lobe Chat - an open-source, high-performance chatbot framework that supports speech synthesis, multimodal, and extensible Function Call plugin system. Supports one-click free deployment of your private ChatGPT/LLM web application.",
|
5
5
|
"keywords": [
|
6
6
|
"framework",
|
@@ -170,6 +170,7 @@ export class AiProviderModel {
|
|
170
170
|
const query = this.db
|
171
171
|
.select({
|
172
172
|
checkModel: aiProviders.checkModel,
|
173
|
+
description: aiProviders.description,
|
173
174
|
enabled: aiProviders.enabled,
|
174
175
|
fetchOnClient: aiProviders.fetchOnClient,
|
175
176
|
id: aiProviders.id,
|
@@ -32,13 +32,13 @@ export class LobeAzureOpenAI implements LobeRuntimeAI {
|
|
32
32
|
async chat(payload: ChatStreamPayload, options?: ChatCompetitionOptions) {
|
33
33
|
const { messages, model, ...params } = payload;
|
34
34
|
// o1 series models on Azure OpenAI does not support streaming currently
|
35
|
-
const enableStreaming = model.
|
35
|
+
const enableStreaming = model.includes('o1') ? false : (params.stream ?? true);
|
36
36
|
try {
|
37
37
|
const response = await this.client.chat.completions.create({
|
38
38
|
messages: messages as OpenAI.ChatCompletionMessageParam[],
|
39
39
|
model,
|
40
40
|
...params,
|
41
|
-
max_completion_tokens:
|
41
|
+
max_completion_tokens: null,
|
42
42
|
stream: enableStreaming,
|
43
43
|
tool_choice: params.tools ? 'auto' : undefined,
|
44
44
|
});
|