@lobehub/chat 1.104.2 → 1.104.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -2,6 +2,31 @@
2
2
 
3
3
  # Changelog
4
4
 
5
+ ### [Version 1.104.3](https://github.com/lobehub/lobe-chat/compare/v1.104.2...v1.104.3)
6
+
7
+ <sup>Released on **2025-07-26**</sup>
8
+
9
+ #### 💄 Styles
10
+
11
+ - **misc**: Add Gemini 2.5 Flash-Lite GA model.
12
+
13
+ <br/>
14
+
15
+ <details>
16
+ <summary><kbd>Improvements and Fixes</kbd></summary>
17
+
18
+ #### Styles
19
+
20
+ - **misc**: Add Gemini 2.5 Flash-Lite GA model, closes [#8539](https://github.com/lobehub/lobe-chat/issues/8539) ([404ac21](https://github.com/lobehub/lobe-chat/commit/404ac21))
21
+
22
+ </details>
23
+
24
+ <div align="right">
25
+
26
+ [![](https://img.shields.io/badge/-BACK_TO_TOP-151515?style=flat-square)](#readme-top)
27
+
28
+ </div>
29
+
5
30
  ### [Version 1.104.2](https://github.com/lobehub/lobe-chat/compare/v1.104.1...v1.104.2)
6
31
 
7
32
  <sup>Released on **2025-07-26**</sup>
package/changelog/v1.json CHANGED
@@ -1,4 +1,13 @@
1
1
  [
2
+ {
3
+ "children": {
4
+ "improvements": [
5
+ "Add Gemini 2.5 Flash-Lite GA model."
6
+ ]
7
+ },
8
+ "date": "2025-07-26",
9
+ "version": "1.104.3"
10
+ },
2
11
  {
3
12
  "children": {
4
13
  "fixes": [
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@lobehub/chat",
3
- "version": "1.104.2",
3
+ "version": "1.104.3",
4
4
  "description": "Lobe Chat - an open-source, high-performance chatbot framework that supports speech synthesis, multimodal, and extensible Function Call plugin system. Supports one-click free deployment of your private ChatGPT/LLM web application.",
5
5
  "keywords": [
6
6
  "framework",
@@ -153,7 +153,7 @@
153
153
  "@lobehub/icons": "^2.17.0",
154
154
  "@lobehub/market-sdk": "^0.22.7",
155
155
  "@lobehub/tts": "^2.0.1",
156
- "@lobehub/ui": "^2.7.4",
156
+ "@lobehub/ui": "^2.7.5",
157
157
  "@modelcontextprotocol/sdk": "^1.16.0",
158
158
  "@neondatabase/serverless": "^1.0.1",
159
159
  "@next/third-parties": "^15.4.3",
@@ -241,7 +241,7 @@
241
241
  "react-fast-marquee": "^1.6.5",
242
242
  "react-hotkeys-hook": "^5.1.0",
243
243
  "react-i18next": "^15.6.1",
244
- "react-layout-kit": "^1.9.2",
244
+ "react-layout-kit": "^2.0.0",
245
245
  "react-lazy-load": "^4.0.1",
246
246
  "react-pdf": "^9.2.1",
247
247
  "react-rnd": "^10.5.2",
@@ -32,6 +32,11 @@ const RootLayout = async ({ children, params, modal }: RootLayoutProps) => {
32
32
 
33
33
  return (
34
34
  <html dir={direction} lang={locale} suppressHydrationWarning>
35
+ <head>
36
+ {process.env.DEBUG_REACT_SCAN === '1' && (
37
+ <script crossOrigin="anonymous" src="https://unpkg.com/react-scan/dist/auto.global.js" />
38
+ )}
39
+ </head>
35
40
  <body>
36
41
  <NuqsAdapter>
37
42
  <GlobalProvider
@@ -17,6 +17,7 @@ const googleChatModels: AIChatModelCard[] = [
17
17
  id: 'gemini-2.5-pro',
18
18
  maxOutput: 65_536,
19
19
  pricing: {
20
+ cachedInput: 0.31, // prompts <= 200k tokens
20
21
  input: 1.25, // prompts <= 200k tokens
21
22
  output: 10, // prompts <= 200k tokens
22
23
  },
@@ -177,6 +178,32 @@ const googleChatModels: AIChatModelCard[] = [
177
178
  },
178
179
  type: 'chat',
179
180
  },
181
+ {
182
+ abilities: {
183
+ functionCall: true,
184
+ reasoning: true,
185
+ search: true,
186
+ vision: true,
187
+ },
188
+ contextWindowTokens: 1_048_576 + 65_536,
189
+ description: 'Gemini 2.5 Flash-Lite 是 Google 最小、性价比最高的模型,专为大规模使用而设计。',
190
+ displayName: 'Gemini 2.5 Flash-Lite',
191
+ enabled: true,
192
+ id: 'gemini-2.5-flash-lite',
193
+ maxOutput: 65_536,
194
+ pricing: {
195
+ cachedInput: 0.025,
196
+ input: 0.1,
197
+ output: 0.4,
198
+ },
199
+ releasedAt: '2025-07-22',
200
+ settings: {
201
+ extendParams: ['thinkingBudget'],
202
+ searchImpl: 'params',
203
+ searchProvider: 'google',
204
+ },
205
+ type: 'chat',
206
+ },
180
207
  {
181
208
  abilities: {
182
209
  functionCall: true,
@@ -17,6 +17,7 @@ const vertexaiChatModels: AIChatModelCard[] = [
17
17
  id: 'gemini-2.5-pro',
18
18
  maxOutput: 65_536,
19
19
  pricing: {
20
+ cachedInput: 0.31, // prompts <= 200k tokens
20
21
  input: 1.25, // prompts <= 200k tokens
21
22
  output: 10, // prompts <= 200k tokens
22
23
  },
@@ -80,6 +81,7 @@ const vertexaiChatModels: AIChatModelCard[] = [
80
81
  id: 'gemini-2.5-flash',
81
82
  maxOutput: 65_536,
82
83
  pricing: {
84
+ cachedInput: 0.075,
83
85
  input: 0.3,
84
86
  output: 2.5,
85
87
  },
@@ -109,6 +111,31 @@ const vertexaiChatModels: AIChatModelCard[] = [
109
111
  releasedAt: '2025-04-17',
110
112
  type: 'chat',
111
113
  },
114
+ {
115
+ abilities: {
116
+ functionCall: true,
117
+ reasoning: true,
118
+ search: true,
119
+ vision: true,
120
+ },
121
+ contextWindowTokens: 1_000_000 + 64_000,
122
+ description: 'Gemini 2.5 Flash-Lite 是 Google 最小、性价比最高的模型,专为大规模使用而设计。',
123
+ displayName: 'Gemini 2.5 Flash-Lite',
124
+ enabled: true,
125
+ id: 'gemini-2.5-flash-lite',
126
+ maxOutput: 64_000,
127
+ pricing: {
128
+ cachedInput: 0.025,
129
+ input: 0.1,
130
+ output: 0.4,
131
+ },
132
+ releasedAt: '2025-07-22',
133
+ settings: {
134
+ searchImpl: 'params',
135
+ searchProvider: 'google',
136
+ },
137
+ type: 'chat',
138
+ },
112
139
  {
113
140
  abilities: {
114
141
  functionCall: true,
@@ -13,7 +13,6 @@ import AppTheme from './AppTheme';
13
13
  import ImportSettings from './ImportSettings';
14
14
  import Locale from './Locale';
15
15
  import QueryProvider from './Query';
16
- import ReactScan from './ReactScan';
17
16
  import StoreInitialization from './StoreInitialization';
18
17
  import StyleRegistry from './StyleRegistry';
19
18
 
@@ -61,7 +60,6 @@ const GlobalLayout = async ({
61
60
  <StoreInitialization />
62
61
  <Suspense>
63
62
  <ImportSettings />
64
- <ReactScan />
65
63
  {process.env.NODE_ENV === 'development' && <DevPanel />}
66
64
  </Suspense>
67
65
  </ServerConfigStoreProvider>
@@ -1,15 +0,0 @@
1
- 'use client';
2
-
3
- import Script from 'next/script';
4
- import { useQueryState } from 'nuqs';
5
- import React, { memo } from 'react';
6
-
7
- import { withSuspense } from '@/components/withSuspense';
8
-
9
- const ReactScan = memo(() => {
10
- const [debug] = useQueryState('debug', { clearOnDefault: true, defaultValue: '' });
11
-
12
- return !!debug && <Script src="https://unpkg.com/react-scan/dist/auto.global.js" />;
13
- });
14
-
15
- export default withSuspense(ReactScan);