@lobehub/chat 1.139.0 → 1.139.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (52) hide show
  1. package/.github/ISSUE_TEMPLATE/1_bug_report.yml +63 -19
  2. package/CHANGELOG.md +58 -0
  3. package/changelog/v1.json +18 -0
  4. package/locales/ar/models.json +12 -0
  5. package/locales/ar/welcome.json +0 -17
  6. package/locales/bg-BG/models.json +12 -0
  7. package/locales/bg-BG/welcome.json +0 -17
  8. package/locales/de-DE/models.json +12 -0
  9. package/locales/de-DE/welcome.json +0 -17
  10. package/locales/en-US/models.json +12 -0
  11. package/locales/en-US/welcome.json +0 -17
  12. package/locales/es-ES/models.json +12 -0
  13. package/locales/es-ES/welcome.json +0 -17
  14. package/locales/fa-IR/models.json +12 -0
  15. package/locales/fa-IR/welcome.json +0 -17
  16. package/locales/fr-FR/models.json +12 -0
  17. package/locales/fr-FR/welcome.json +0 -17
  18. package/locales/it-IT/models.json +12 -0
  19. package/locales/it-IT/welcome.json +0 -17
  20. package/locales/ja-JP/models.json +12 -0
  21. package/locales/ja-JP/welcome.json +0 -17
  22. package/locales/ko-KR/chat.json +1 -1
  23. package/locales/ko-KR/models.json +12 -0
  24. package/locales/ko-KR/welcome.json +0 -17
  25. package/locales/nl-NL/models.json +12 -0
  26. package/locales/nl-NL/welcome.json +0 -17
  27. package/locales/pl-PL/models.json +12 -0
  28. package/locales/pl-PL/welcome.json +0 -17
  29. package/locales/pt-BR/models.json +12 -0
  30. package/locales/pt-BR/welcome.json +0 -17
  31. package/locales/ru-RU/models.json +12 -0
  32. package/locales/ru-RU/welcome.json +0 -17
  33. package/locales/tr-TR/models.json +12 -0
  34. package/locales/tr-TR/welcome.json +0 -17
  35. package/locales/vi-VN/models.json +12 -0
  36. package/locales/vi-VN/welcome.json +0 -17
  37. package/locales/zh-CN/models.json +12 -0
  38. package/locales/zh-CN/welcome.json +0 -17
  39. package/locales/zh-TW/models.json +12 -0
  40. package/locales/zh-TW/welcome.json +0 -17
  41. package/next.config.ts +1 -0
  42. package/package.json +4 -2
  43. package/packages/model-runtime/package.json +1 -0
  44. package/packages/obervability-otel/package.json +2 -2
  45. package/src/app/[variants]/(main)/_layout/Desktop/SideBar/index.tsx +11 -7
  46. package/src/layout/GlobalProvider/StoreInitialization.tsx +4 -1
  47. package/src/libs/swr/index.ts +13 -0
  48. package/src/libs/trpc/client/lambda.ts +13 -3
  49. package/src/locales/default/welcome.ts +0 -17
  50. package/src/store/agent/slices/chat/action.ts +4 -2
  51. package/src/store/aiInfra/slices/aiProvider/action.ts +5 -3
  52. package/src/store/chat/slices/message/action.ts +0 -4
@@ -345,23 +345,6 @@
345
345
  "title": "写作圈"
346
346
  }
347
347
  },
348
- "qa": {
349
- "q01": "LobeHub 是什么?",
350
- "q02": "{{appName}} 是什么?",
351
- "q03": "{{appName}} 是否有社区支持?",
352
- "q04": "{{appName}} 支持哪些功能?",
353
- "q05": "{{appName}} 如何部署和使用?",
354
- "q06": "{{appName}} 的定价是如何的?",
355
- "q07": "{{appName}} 是否免费?",
356
- "q08": "是否有云端服务版?",
357
- "q09": "是否支持本地语言模型?",
358
- "q10": "是否支持图像识别和生成?",
359
- "q11": "是否支持语音合成和语音识别?",
360
- "q12": "是否支持插件系统?",
361
- "q13": "是否有自己的市场来获取 GPTs?",
362
- "q14": "是否支持多种 AI 服务提供商?",
363
- "q15": "我在使用时遇到问题应该怎么办?"
364
- },
365
348
  "questions": {
366
349
  "moreBtn": "了解更多",
367
350
  "title": "试着问问:"
@@ -311,6 +311,12 @@
311
311
  "Qwen/QwQ-32B-Preview": {
312
312
  "description": "QwQ-32B-Preview是Qwen 最新的實驗性研究模型,專注於提升AI推理能力。通過探索語言混合、遞歸推理等複雜機制,主要優勢包括強大的推理分析能力、數學和編程能力。與此同時,也存在語言切換問題、推理循環、安全性考量、其他能力方面的差異。"
313
313
  },
314
+ "Qwen/Qwen-Image": {
315
+ "description": "Qwen-Image 是由阿里巴巴通義千問團隊發布的圖像生成基礎模型,擁有 200 億參數。該模型在複雜的文字渲染與精確的圖像編輯方面取得了顯著進展,特別擅長生成包含高保真中英文文字的圖像。Qwen-Image 不僅能處理多行排版與段落級文字,還能在生成圖像時維持排版的一致性與上下文的協調性。除了卓越的文字渲染能力,該模型還支援多樣的藝術風格,從寫實攝影到動漫美學,能靈活滿足各種創作需求。同時,它也具備強大的圖像編輯與理解能力,支援風格轉換、物件增刪、細節增強、文字編輯甚至人體姿勢操控等進階操作,旨在成為一個融合語言、排版與圖像的綜合性智慧視覺創作與處理基礎模型。"
316
+ },
317
+ "Qwen/Qwen-Image-Edit-2509": {
318
+ "description": "Qwen-Image-Edit-2509 是由阿里巴巴通義千問團隊發布的 Qwen-Image 圖像編輯最新版本。該模型基於 200 億參數的 Qwen-Image 模型進行深度訓練,成功將其獨特的文字渲染能力擴展至圖像編輯領域,實現對圖片中文字的精準編輯。此外,Qwen-Image-Edit 採用創新的架構,將輸入圖像同時送入 Qwen2.5-VL(用於視覺語意控制)與 VAE 編碼器(用於視覺外觀控制),從而具備語意與外觀的雙重編輯能力。這意味著它不僅支援元素的新增、刪除或修改等局部外觀編輯,還支援如 IP 創作、風格轉換等需保持語意一致性的高階視覺語意編輯。該模型在多個公開基準測試中展現出頂尖(SOTA)表現,使其成為一個強大的圖像編輯基礎模型。"
319
+ },
314
320
  "Qwen/Qwen2-72B-Instruct": {
315
321
  "description": "Qwen2 是先進的通用語言模型,支持多種指令類型。"
316
322
  },
@@ -392,6 +398,12 @@
392
398
  "Qwen/Qwen3-Next-80B-A3B-Thinking": {
393
399
  "description": "Qwen3-Next-80B-A3B-Thinking 是由阿里巴巴通義千問團隊發布的、專為複雜推理任務設計的下一代基礎模型。它基於創新的 Qwen3-Next 架構,該架構融合了混合注意力機制(Gated DeltaNet 與 Gated Attention)和高稀疏度混合專家(MoE)結構,旨在實現極致的訓練與推理效率。作為一個總參數達 800 億的稀疏模型,它在推理時僅啟動約 30 億參數,大幅降低了計算成本,在處理超過 32K tokens 的長上下文任務時,吞吐量比 Qwen3-32B 模型高出 10 倍以上。此“Thinking”版本專為執行數學證明、程式碼綜合、邏輯分析和規劃等高難度多步任務而優化,並預設以結構化的“思維鏈”形式輸出推理過程。在性能上,它不僅超越了 Qwen3-32B-Thinking 等成本更高的模型,還在多個基準測試中優於 Gemini-2.5-Flash-Thinking。"
394
400
  },
401
+ "Qwen/Qwen3-VL-8B-Instruct": {
402
+ "description": "Qwen3-VL-8B-Instruct 是 Qwen3 系列的視覺語言模型,基於 Qwen3-8B-Instruct 開發,並在大量圖文資料上進行訓練,擅長通用視覺理解、以視覺為核心的對話以及圖像中的多語言文字識別。適用於視覺問答、圖像描述、多模態指令跟隨與工具調用等場景。"
403
+ },
404
+ "Qwen/Qwen3-VL-8B-Thinking": {
405
+ "description": "Qwen3-VL-8B-Thinking 是 Qwen3 系列的視覺思考版本,針對複雜多步推理任務進行優化,預設在回答前生成逐步思考(thinking chain),以提升推理準確性。適合需要深度推理的視覺問答、審閱圖像內容並提供詳細分析的場景。"
406
+ },
395
407
  "Qwen2-72B-Instruct": {
396
408
  "description": "Qwen2 是 Qwen 模型的最新系列,支持 128k 上下文,對比當前最優的開源模型,Qwen2-72B 在自然語言理解、知識、代碼、數學及多語言等多項能力上均顯著超越當前領先的模型。"
397
409
  },
@@ -340,23 +340,6 @@
340
340
  "title": "寫作圈"
341
341
  }
342
342
  },
343
- "qa": {
344
- "q01": "LobeHub 是什麼?",
345
- "q02": "{{appName}} 是什麼?",
346
- "q03": "{{appName}} 是否有社群支持?",
347
- "q04": "{{appName}} 支持哪些功能?",
348
- "q05": "{{appName}} 如何部署和使用?",
349
- "q06": "{{appName}} 的定價是如何的?",
350
- "q07": "{{appName}} 是否免費?",
351
- "q08": "是否有雲端服務版?",
352
- "q09": "是否支持本地語言模型?",
353
- "q10": "是否支持圖像識別和生成?",
354
- "q11": "是否支持語音合成和語音識別?",
355
- "q12": "是否支持插件系統?",
356
- "q13": "是否有自己的市場來獲取 GPTs?",
357
- "q14": "是否支持多種 AI 服務提供商?",
358
- "q15": "我在使用時遇到問題應該怎麼辦?"
359
- },
360
343
  "questions": {
361
344
  "moreBtn": "了解更多",
362
345
  "title": "試著問問:"
package/next.config.ts CHANGED
@@ -47,6 +47,7 @@ const nextConfig: NextConfig = {
47
47
  // refs: https://github.com/lobehub/lobe-chat/pull/7430
48
48
  serverMinification: false,
49
49
  webVitalsAttribution: ['CLS', 'LCP'],
50
+ webpackBuildWorker: true,
50
51
  webpackMemoryOptimizations: true,
51
52
  },
52
53
  async headers() {
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@lobehub/chat",
3
- "version": "1.139.0",
3
+ "version": "1.139.2",
4
4
  "description": "Lobe Chat - an open-source, high-performance chatbot framework that supports speech synthesis, multimodal, and extensible Function Call plugin system. Supports one-click free deployment of your private ChatGPT/LLM web application.",
5
5
  "keywords": [
6
6
  "framework",
@@ -126,7 +126,7 @@
126
126
  "dependencies": {
127
127
  "@ant-design/icons": "^5.6.1",
128
128
  "@ant-design/pro-components": "^2.8.10",
129
- "@anthropic-ai/sdk": "^0.65.0",
129
+ "@anthropic-ai/sdk": "^0.67.0",
130
130
  "@auth/core": "^0.40.0",
131
131
  "@aws-sdk/client-s3": "~3.893.0",
132
132
  "@aws-sdk/s3-request-presigner": "~3.893.0",
@@ -172,6 +172,8 @@
172
172
  "@modelcontextprotocol/sdk": "^1.20.0",
173
173
  "@neondatabase/serverless": "^1.0.2",
174
174
  "@next/third-parties": "^15.5.4",
175
+ "@opentelemetry/exporter-jaeger": "^2.1.0",
176
+ "@opentelemetry/winston-transport": "^0.17.0",
175
177
  "@react-spring/web": "^9.7.5",
176
178
  "@serwist/next": "^9.2.1",
177
179
  "@t3-oss/env-nextjs": "^0.13.8",
@@ -13,6 +13,7 @@
13
13
  },
14
14
  "dependencies": {
15
15
  "@aws-sdk/client-bedrock-runtime": "^3.862.0",
16
+ "@huggingface/inference": "^4.11.3",
16
17
  "@lobechat/const": "workspace:*",
17
18
  "@lobechat/types": "workspace:*",
18
19
  "@lobechat/utils": "workspace:*",
@@ -10,12 +10,12 @@
10
10
  "@opentelemetry/auto-instrumentations-node": "^0.65.0",
11
11
  "@opentelemetry/exporter-metrics-otlp-http": "^0.206.0",
12
12
  "@opentelemetry/exporter-trace-otlp-http": "^0.206.0",
13
- "@opentelemetry/instrumentation": "^0.205.0",
13
+ "@opentelemetry/instrumentation": "^0.206.0",
14
14
  "@opentelemetry/instrumentation-http": "^0.205.0",
15
15
  "@opentelemetry/instrumentation-pg": "^0.59.0",
16
16
  "@opentelemetry/resources": "^2.0.1",
17
17
  "@opentelemetry/sdk-metrics": "^2.0.1",
18
- "@opentelemetry/sdk-node": "^0.205.0",
18
+ "@opentelemetry/sdk-node": "^0.206.0",
19
19
  "@opentelemetry/sdk-trace-node": "^2.0.1",
20
20
  "@opentelemetry/semantic-conventions": "^1.36.0",
21
21
  "@vercel/otel": "^1.13.0",
@@ -27,12 +27,13 @@ const Top = () => {
27
27
 
28
28
  const Nav = memo(() => {
29
29
  const theme = useTheme();
30
- const isSingleMode = useIsSingleMode()
30
+ const isSingleMode = useIsSingleMode();
31
31
  const inZenMode = useGlobalStore(systemStatusSelectors.inZenMode);
32
32
  const { showPinList } = useServerConfigStore(featureFlagsSelectors);
33
33
 
34
34
  return (
35
- !inZenMode && !isSingleMode && (
35
+ !inZenMode &&
36
+ !isSingleMode && (
36
37
  <SideNav
37
38
  avatar={
38
39
  <div className={electronStylish.nodrag}>
@@ -54,11 +55,14 @@ const Nav = memo(() => {
54
55
  }}
55
56
  topActions={
56
57
  <Suspense>
57
- <div className={electronStylish.nodrag} style={{
58
- display: 'flex',
59
- flexDirection: 'column',
60
- maxHeight: "calc(100vh - 150px)"
61
- }}>
58
+ <div
59
+ className={electronStylish.nodrag}
60
+ style={{
61
+ display: 'flex',
62
+ flexDirection: 'column',
63
+ maxHeight: isDesktop ? 'calc(100vh - 180px)' : 'calc(100vh - 150px)',
64
+ }}
65
+ >
62
66
  <Top />
63
67
  {showPinList && <PinList />}
64
68
  </div>
@@ -50,9 +50,12 @@ const StoreInitialization = memo(() => {
50
50
  * The store function of `isLogin` will both consider the values of `enableAuth` and `isSignedIn`.
51
51
  * But during initialization, the value of `enableAuth` might be incorrect cause of the async fetch.
52
52
  * So we need to use `isSignedIn` only to determine whether request for the default agent config and user state.
53
+ *
54
+ * IMPORTANT: Explicitly convert to boolean to avoid passing null/undefined downstream,
55
+ * which would cause unnecessary API requests with invalid login state.
53
56
  */
54
57
  const isDBInited = useGlobalStore(systemStatusSelectors.isDBInited);
55
- const isLoginOnInit = isDBInited && (enableNextAuth ? isSignedIn : isLogin);
58
+ const isLoginOnInit = isDBInited ? Boolean(enableNextAuth ? isSignedIn : isLogin) : false;
56
59
 
57
60
  // init inbox agent and default agent config
58
61
  useInitAgentStore(isLoginOnInit, serverConfig.defaultAgent?.config);
@@ -34,6 +34,19 @@ export const useClientDataSWR: SWRHook = (key, fetch, config) =>
34
34
  ? 1500
35
35
  : // web 300s
36
36
  5 * 60 * 1000,
37
+ // Custom error retry logic: don't retry on 401 errors
38
+ onErrorRetry: (error: any, key: any, config: any, revalidate: any, { retryCount }: any) => {
39
+ // Check if error is marked as non-retryable (e.g., 401 authentication errors)
40
+ if (error?.meta?.shouldRetry === false) {
41
+ return;
42
+ }
43
+ // For other errors, use default SWR retry behavior
44
+ // Default: exponential backoff, max 5 retries
45
+ if (retryCount >= 5) return;
46
+ const exponentialDelay = 1000 * Math.pow(2, Math.min(retryCount, 10));
47
+ const timeout = Math.min(exponentialDelay, 30_000);
48
+ setTimeout(() => revalidate({ retryCount }), timeout);
49
+ },
37
50
  refreshWhenOffline: false,
38
51
  revalidateOnFocus: true,
39
52
  revalidateOnReconnect: true,
@@ -10,6 +10,10 @@ import type { LambdaRouter } from '@/server/routers/lambda';
10
10
 
11
11
  const log = debug('lobe-image:lambda-client');
12
12
 
13
+ // 401 error debouncing: prevent showing multiple login notifications in short time
14
+ let last401Time = 0;
15
+ const MIN_401_INTERVAL = 5000; // 5 seconds
16
+
13
17
  // handle error
14
18
  const errorHandlingLink: TRPCLink<LambdaRouter> = () => {
15
19
  return ({ op, next }) =>
@@ -18,10 +22,9 @@ const errorHandlingLink: TRPCLink<LambdaRouter> = () => {
18
22
  complete: () => observer.complete(),
19
23
  error: async (err) => {
20
24
  const showError = (op.context?.showNotification as boolean) ?? true;
25
+ const status = err.data?.httpStatus as number;
21
26
 
22
27
  if (showError) {
23
- const status = err.data?.httpStatus as number;
24
-
25
28
  const { loginRequired } = await import('@/components/Error/loginRequiredNotification');
26
29
  const { fetchErrorNotification } = await import(
27
30
  '@/components/Error/fetchErrorNotification'
@@ -29,7 +32,14 @@ const errorHandlingLink: TRPCLink<LambdaRouter> = () => {
29
32
 
30
33
  switch (status) {
31
34
  case 401: {
32
- loginRequired.redirect();
35
+ // Debounce: only show login notification once every 5 seconds
36
+ const now = Date.now();
37
+ if (now - last401Time > MIN_401_INTERVAL) {
38
+ last401Time = now;
39
+ loginRequired.redirect();
40
+ }
41
+ // Mark error as non-retryable to prevent SWR infinite retry loop
42
+ err.meta = { ...err.meta, shouldRetry: false };
33
43
  break;
34
44
  }
35
45
 
@@ -362,23 +362,6 @@ export default {
362
362
  title: '写作圈',
363
363
  },
364
364
  },
365
- qa: {
366
- q01: 'LobeHub 是什么?',
367
- q02: '{{appName}} 是什么?',
368
- q03: '{{appName}} 是否有社区支持?',
369
- q04: '{{appName}} 支持哪些功能?',
370
- q05: '{{appName}} 如何部署和使用?',
371
- q06: '{{appName}} 的定价是如何的?',
372
- q07: '{{appName}} 是否免费?',
373
- q08: '是否有云端服务版?',
374
- q09: '是否支持本地语言模型?',
375
- q10: '是否支持图像识别和生成?',
376
- q11: '是否支持语音合成和语音识别?',
377
- q12: '是否支持插件系统?',
378
- q13: '是否有自己的市场来获取 GPTs?',
379
- q14: '是否支持多种 AI 服务提供商?',
380
- q15: '我在使用时遇到问题应该怎么办?',
381
- },
382
365
  questions: {
383
366
  moreBtn: '了解更多',
384
367
  title: '试着问问:',
@@ -160,7 +160,8 @@ export const createChatSlice: StateCreator<
160
160
  },
161
161
  useFetchAgentConfig: (isLogin, sessionId) =>
162
162
  useClientDataSWR<LobeAgentConfig>(
163
- isLogin && !sessionId.startsWith('cg_')
163
+ // Only fetch when login status is explicitly true (not null/undefined)
164
+ isLogin === true && !sessionId.startsWith('cg_')
164
165
  ? ([FETCH_AGENT_CONFIG_KEY, sessionId] as const)
165
166
  : null,
166
167
  ([, id]: readonly [string, string]) => sessionService.getSessionConfig(id),
@@ -192,7 +193,8 @@ export const createChatSlice: StateCreator<
192
193
 
193
194
  useInitInboxAgentStore: (isLogin, defaultAgentConfig) =>
194
195
  useOnlyFetchOnceSWR<PartialDeep<LobeAgentConfig>>(
195
- !!isLogin ? 'fetchInboxAgentConfig' : null,
196
+ // Only fetch when login status is explicitly true (not null/undefined/false)
197
+ isLogin === true ? 'fetchInboxAgentConfig' : null,
196
198
  () => sessionService.getSessionConfig(INBOX_SESSION_ID),
197
199
  {
198
200
  onSuccess: (data) => {
@@ -237,10 +237,12 @@ export const createAiProviderSlice: StateCreator<
237
237
 
238
238
  useFetchAiProviderRuntimeState: (isLogin) => {
239
239
  const isAuthLoaded = authSelectors.isLoaded(useUserStore.getState());
240
+ // Only fetch when auth is loaded and login status is explicitly defined (true or false)
241
+ // Prevents unnecessary requests when login state is null/undefined
242
+ const shouldFetch =
243
+ isAuthLoaded && !isDeprecatedEdition && isLogin !== null && isLogin !== undefined;
240
244
  return useClientDataSWR<AiProviderRuntimeStateWithBuiltinModels | undefined>(
241
- isAuthLoaded && !isDeprecatedEdition
242
- ? [AiProviderSwrKey.fetchAiProviderRuntimeState, isLogin]
243
- : null,
245
+ shouldFetch ? [AiProviderSwrKey.fetchAiProviderRuntimeState, isLogin] : null,
244
246
  async ([, isLogin]) => {
245
247
  const [{ LOBE_DEFAULT_MODEL_LIST: builtinAiModelList }, { DEFAULT_MODEL_PROVIDER_LIST }] =
246
248
  await Promise.all([import('model-bank'), import('@/config/modelProviders')]);
@@ -428,11 +428,7 @@ export const chatMessage: StateCreator<
428
428
  internal_toggleMessageLoading(true, tempId);
429
429
  }
430
430
 
431
- console.log('internal_createMessafe', message);
432
-
433
431
  try {
434
- console.log('internal_createMessage: Trying to call messageService.createMessage', message);
435
-
436
432
  const id = await messageService.createMessage(message);
437
433
  if (!context?.skipRefresh) {
438
434
  internal_toggleMessageLoading(true, tempId);