@lobehub/chat 1.111.3 → 1.111.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md
CHANGED
@@ -2,6 +2,31 @@
|
|
2
2
|
|
3
3
|
# Changelog
|
4
4
|
|
5
|
+
### [Version 1.111.4](https://github.com/lobehub/lobe-chat/compare/v1.111.3...v1.111.4)
|
6
|
+
|
7
|
+
<sup>Released on **2025-08-09**</sup>
|
8
|
+
|
9
|
+
#### 🐛 Bug Fixes
|
10
|
+
|
11
|
+
- **pricing**: Adjust cachedInput values for GPT-5 models.
|
12
|
+
|
13
|
+
<br/>
|
14
|
+
|
15
|
+
<details>
|
16
|
+
<summary><kbd>Improvements and Fixes</kbd></summary>
|
17
|
+
|
18
|
+
#### What's fixed
|
19
|
+
|
20
|
+
- **pricing**: Adjust cachedInput values for GPT-5 models, closes [#8723](https://github.com/lobehub/lobe-chat/issues/8723) ([652bf08](https://github.com/lobehub/lobe-chat/commit/652bf08))
|
21
|
+
|
22
|
+
</details>
|
23
|
+
|
24
|
+
<div align="right">
|
25
|
+
|
26
|
+
[](#readme-top)
|
27
|
+
|
28
|
+
</div>
|
29
|
+
|
5
30
|
### [Version 1.111.3](https://github.com/lobehub/lobe-chat/compare/v1.111.2...v1.111.3)
|
6
31
|
|
7
32
|
<sup>Released on **2025-08-09**</sup>
|
package/changelog/v1.json
CHANGED
@@ -84,12 +84,16 @@ We need to configure an S3 storage service in the server-side database to store
|
|
84
84
|
|
85
85
|
<Image alt={'Configure allowed site domain'} src={'https://github.com/lobehub/lobe-chat/assets/28616219/dfcc2cb3-2958-4498-a8a4-51bec584fe7d'} />
|
86
86
|
|
87
|
+
<Callout type={'info'}>
|
88
|
+
If you also plan to use the desktop client, add <code>http://localhost:3015</code> to <code>AllowedOrigins</code> so the desktop client (running locally) can access R2.
|
89
|
+
</Callout>
|
90
|
+
|
87
91
|
Example configuration is as follows:
|
88
92
|
|
89
93
|
```json
|
90
94
|
[
|
91
95
|
{
|
92
|
-
"AllowedOrigins": ["https://your-project.vercel.app"],
|
96
|
+
"AllowedOrigins": ["https://your-project.vercel.app", "http://localhost:3015"],
|
93
97
|
"AllowedMethods": ["GET", "PUT", "HEAD", "POST", "DELETE"],
|
94
98
|
"AllowedHeaders": ["*"]
|
95
99
|
}
|
@@ -82,13 +82,17 @@ tags:
|
|
82
82
|
添加跨域规则,允许你的域名(在上文是 `https://your-project.vercel.app`)来源的请求:
|
83
83
|
|
84
84
|
<Image alt={'配置允许你的站点域名'} src={'https://github.com/lobehub/lobe-chat/assets/28616219/dfcc2cb3-2958-4498-a8a4-51bec584fe7d'} />
|
85
|
+
<Callout type={'info'}>
|
86
|
+
如果你还需要在桌面端使用,请在 <code>AllowedOrigins</code> 中额外添加 <code>http://localhost:3015</code>,以便桌面端(本地运行)能够访问 R2。
|
87
|
+
</Callout>
|
88
|
+
|
85
89
|
|
86
90
|
示例配置如下:
|
87
91
|
|
88
92
|
```json
|
89
93
|
[
|
90
94
|
{
|
91
|
-
"AllowedOrigins": ["https://your-project.vercel.app"],
|
95
|
+
"AllowedOrigins": ["https://your-project.vercel.app", "http://localhost:3015"],
|
92
96
|
"AllowedMethods": ["GET", "PUT", "HEAD", "POST", "DELETE"],
|
93
97
|
"AllowedHeaders": ["*"]
|
94
98
|
}
|
package/package.json
CHANGED
@@ -1,6 +1,6 @@
|
|
1
1
|
{
|
2
2
|
"name": "@lobehub/chat",
|
3
|
-
"version": "1.111.
|
3
|
+
"version": "1.111.4",
|
4
4
|
"description": "Lobe Chat - an open-source, high-performance chatbot framework that supports speech synthesis, multimodal, and extensible Function Call plugin system. Supports one-click free deployment of your private ChatGPT/LLM web application.",
|
5
5
|
"keywords": [
|
6
6
|
"framework",
|
@@ -37,7 +37,7 @@ export const openaiChatModels: AIChatModelCard[] = [
|
|
37
37
|
units: [
|
38
38
|
{ name: 'textInput', rate: 1.25, strategy: 'fixed', unit: 'millionTokens' },
|
39
39
|
{ name: 'textOutput', rate: 10, strategy: 'fixed', unit: 'millionTokens' },
|
40
|
-
{ name: 'textInput_cacheRead', rate: 0.
|
40
|
+
{ name: 'textInput_cacheRead', rate: 0.125, strategy: 'fixed', unit: 'millionTokens' },
|
41
41
|
],
|
42
42
|
},
|
43
43
|
releasedAt: '2025-08-07',
|
@@ -65,7 +65,7 @@ export const openaiChatModels: AIChatModelCard[] = [
|
|
65
65
|
units: [
|
66
66
|
{ name: 'textInput', rate: 0.25, strategy: 'fixed', unit: 'millionTokens' },
|
67
67
|
{ name: 'textOutput', rate: 2, strategy: 'fixed', unit: 'millionTokens' },
|
68
|
-
{ name: 'textInput_cacheRead', rate: 0.
|
68
|
+
{ name: 'textInput_cacheRead', rate: 0.025, strategy: 'fixed', unit: 'millionTokens' },
|
69
69
|
],
|
70
70
|
},
|
71
71
|
releasedAt: '2025-08-07',
|
@@ -90,7 +90,7 @@ export const openaiChatModels: AIChatModelCard[] = [
|
|
90
90
|
units: [
|
91
91
|
{ name: 'textInput', rate: 0.05, strategy: 'fixed', unit: 'millionTokens' },
|
92
92
|
{ name: 'textOutput', rate: 0.4, strategy: 'fixed', unit: 'millionTokens' },
|
93
|
-
{ name: 'textInput_cacheRead', rate: 0.
|
93
|
+
{ name: 'textInput_cacheRead', rate: 0.005, strategy: 'fixed', unit: 'millionTokens' },
|
94
94
|
],
|
95
95
|
},
|
96
96
|
releasedAt: '2025-08-07',
|
@@ -111,7 +111,7 @@ export const openaiChatModels: AIChatModelCard[] = [
|
|
111
111
|
units: [
|
112
112
|
{ name: 'textInput', rate: 1.25, strategy: 'fixed', unit: 'millionTokens' },
|
113
113
|
{ name: 'textOutput', rate: 10, strategy: 'fixed', unit: 'millionTokens' },
|
114
|
-
{ name: 'textInput_cacheRead', rate: 0.
|
114
|
+
{ name: 'textInput_cacheRead', rate: 0.125, strategy: 'fixed', unit: 'millionTokens' },
|
115
115
|
],
|
116
116
|
},
|
117
117
|
releasedAt: '2025-08-07',
|