@lobehub/chat 1.122.0 → 1.122.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -2,6 +2,48 @@
2
2
 
3
3
  # Changelog
4
4
 
5
+ ### [Version 1.122.2](https://github.com/lobehub/lobe-chat/compare/v1.122.1...v1.122.2)
6
+
7
+ <sup>Released on **2025-09-04**</sup>
8
+
9
+ #### 🐛 Bug Fixes
10
+
11
+ - **modelProvider**: Add lmstudio to provider whitelist to enable fetchOnClient toggle.
12
+
13
+ <br/>
14
+
15
+ <details>
16
+ <summary><kbd>Improvements and Fixes</kbd></summary>
17
+
18
+ #### What's fixed
19
+
20
+ - **modelProvider**: Add lmstudio to provider whitelist to enable fetchOnClient toggle, closes [#9067](https://github.com/lobehub/lobe-chat/issues/9067) ([e58864f](https://github.com/lobehub/lobe-chat/commit/e58864f))
21
+
22
+ </details>
23
+
24
+ <div align="right">
25
+
26
+ [![](https://img.shields.io/badge/-BACK_TO_TOP-151515?style=flat-square)](#readme-top)
27
+
28
+ </div>
29
+
30
+ ### [Version 1.122.1](https://github.com/lobehub/lobe-chat/compare/v1.122.0...v1.122.1)
31
+
32
+ <sup>Released on **2025-09-04**</sup>
33
+
34
+ <br/>
35
+
36
+ <details>
37
+ <summary><kbd>Improvements and Fixes</kbd></summary>
38
+
39
+ </details>
40
+
41
+ <div align="right">
42
+
43
+ [![](https://img.shields.io/badge/-BACK_TO_TOP-151515?style=flat-square)](#readme-top)
44
+
45
+ </div>
46
+
5
47
  ## [Version 1.122.0](https://github.com/lobehub/lobe-chat/compare/v1.121.1...v1.122.0)
6
48
 
7
49
  <sup>Released on **2025-09-04**</sup>
package/Dockerfile CHANGED
@@ -66,7 +66,7 @@ ENV NEXT_PUBLIC_ANALYTICS_UMAMI="${NEXT_PUBLIC_ANALYTICS_UMAMI}" \
66
66
  NEXT_PUBLIC_UMAMI_WEBSITE_ID="${NEXT_PUBLIC_UMAMI_WEBSITE_ID}"
67
67
 
68
68
  # Node
69
- ENV NODE_OPTIONS="--max-old-space-size=8192"
69
+ ENV NODE_OPTIONS="--max-old-space-size=6144"
70
70
 
71
71
  WORKDIR /app
72
72
 
@@ -74,7 +74,7 @@ ENV NEXT_PUBLIC_ANALYTICS_UMAMI="${NEXT_PUBLIC_ANALYTICS_UMAMI}" \
74
74
  NEXT_PUBLIC_UMAMI_WEBSITE_ID="${NEXT_PUBLIC_UMAMI_WEBSITE_ID}"
75
75
 
76
76
  # Node
77
- ENV NODE_OPTIONS="--max-old-space-size=8192"
77
+ ENV NODE_OPTIONS="--max-old-space-size=6144"
78
78
 
79
79
  WORKDIR /app
80
80
 
package/Dockerfile.pglite CHANGED
@@ -68,7 +68,7 @@ ENV NEXT_PUBLIC_ANALYTICS_UMAMI="${NEXT_PUBLIC_ANALYTICS_UMAMI}" \
68
68
  NEXT_PUBLIC_UMAMI_WEBSITE_ID="${NEXT_PUBLIC_UMAMI_WEBSITE_ID}"
69
69
 
70
70
  # Node
71
- ENV NODE_OPTIONS="--max-old-space-size=8192"
71
+ ENV NODE_OPTIONS="--max-old-space-size=6144"
72
72
 
73
73
  WORKDIR /app
74
74
 
package/README.md CHANGED
@@ -150,7 +150,7 @@ From productivity tools to development environments, discover new ways to extend
150
150
 
151
151
  **Peak Performance, Zero Distractions**
152
152
 
153
- Get the full LobeChat experience without browser limitations—lightweight, focused, and always ready to go. Our desktop application provides a dedicated environment for your AI interactions, ensuring optimal performance and minimal distractions.
153
+ Get the full LobeChat experience without browser limitations—comprehensive, focused, and always ready to go. Our desktop application provides a dedicated environment for your AI interactions, ensuring optimal performance and minimal distractions.
154
154
 
155
155
  Experience faster response times, better resource management, and a more stable connection to your AI assistant. The desktop app is designed for users who demand the best performance from their AI tools.
156
156
 
@@ -481,7 +481,7 @@ We deeply understand the importance of providing a seamless experience for users
481
481
  Therefore, we have adopted Progressive Web Application ([PWA](https://support.google.com/chrome/answer/9658361)) technology,
482
482
  a modern web technology that elevates web applications to an experience close to that of native apps.
483
483
 
484
- Through PWA, LobeChat can offer a highly optimized user experience on both desktop and mobile devices while maintaining its lightweight and high-performance characteristics.
484
+ Through PWA, LobeChat can offer a highly optimized user experience on both desktop and mobile devices while maintaining high-performance characteristics.
485
485
  Visually and in terms of feel, we have also meticulously designed the interface to ensure it is indistinguishable from native apps,
486
486
  providing smooth animations, responsive layouts, and adapting to different device screen resolutions.
487
487
 
package/changelog/v1.json CHANGED
@@ -1,4 +1,14 @@
1
1
  [
2
+ {
3
+ "children": {},
4
+ "date": "2025-09-04",
5
+ "version": "1.122.2"
6
+ },
7
+ {
8
+ "children": {},
9
+ "date": "2025-09-04",
10
+ "version": "1.122.1"
11
+ },
2
12
  {
3
13
  "children": {
4
14
  "features": [
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@lobehub/chat",
3
- "version": "1.122.0",
3
+ "version": "1.122.2",
4
4
  "description": "Lobe Chat - an open-source, high-performance chatbot framework that supports speech synthesis, multimodal, and extensible Function Call plugin system. Supports one-click free deployment of your private ChatGPT/LLM web application.",
5
5
  "keywords": [
6
6
  "framework",
@@ -35,7 +35,7 @@
35
35
  "build-migrate-db": "bun run db:migrate",
36
36
  "build-sitemap": "tsx ./scripts/buildSitemapIndex/index.ts",
37
37
  "build:analyze": "NODE_OPTIONS=--max-old-space-size=6144 ANALYZE=true next build",
38
- "build:docker": "npm run prebuild && DOCKER=true next build && npm run build-sitemap",
38
+ "build:docker": "npm run prebuild && NODE_OPTIONS=--max-old-space-size=6144 DOCKER=true next build && npm run build-sitemap",
39
39
  "prebuild:electron": "cross-env NEXT_PUBLIC_IS_DESKTOP_APP=1 tsx scripts/prebuild.mts",
40
40
  "build:electron": "cross-env NODE_OPTIONS=--max-old-space-size=6144 NEXT_PUBLIC_IS_DESKTOP_APP=1 NEXT_PUBLIC_SERVICE_MODE=server next build",
41
41
  "db:generate": "drizzle-kit generate && npm run db:generate-client && npm run workflow:dbml",
@@ -25,7 +25,7 @@ const activeProviderConfig = (s: AIProviderStoreState) => s.aiProviderDetail;
25
25
  const isAiProviderConfigLoading = (id: string) => (s: AIProviderStoreState) =>
26
26
  s.activeAiProvider !== id;
27
27
 
28
- const providerWhitelist = new Set(['ollama']);
28
+ const providerWhitelist = new Set(['ollama', 'lmstudio']);
29
29
 
30
30
  const activeProviderKeyVaults = (s: AIProviderStoreState) => activeProviderConfig(s)?.keyVaults;
31
31
 
@@ -9,7 +9,7 @@ import { keyVaultsConfigSelectors } from './keyVaults';
9
9
  const isProviderEnabled = (provider: GlobalLLMProviderKey) => (s: UserStore) =>
10
10
  getProviderConfigById(provider)(s)?.enabled || false;
11
11
 
12
- const providerWhitelist = new Set(['ollama']);
12
+ const providerWhitelist = new Set(['ollama', 'lmstudio']);
13
13
  /**
14
14
  * @description The conditions to enable client fetch
15
15
  * 1. If no baseUrl and apikey input, force on Server.