@lobehub/lobehub 2.0.0-next.301 → 2.0.0-next.302

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (32) hide show
  1. package/CHANGELOG.md +33 -0
  2. package/changelog/v1.json +12 -0
  3. package/package.json +1 -1
  4. package/packages/builtin-tool-agent-builder/src/client/Streaming/UpdatePrompt/index.tsx +27 -0
  5. package/packages/builtin-tool-agent-builder/src/client/Streaming/index.ts +16 -0
  6. package/packages/builtin-tool-agent-builder/src/client/index.ts +3 -0
  7. package/packages/model-bank/src/aiModels/infiniai.ts +2 -2
  8. package/packages/model-bank/src/aiModels/ollamacloud.ts +2 -2
  9. package/packages/model-bank/src/aiModels/siliconcloud.ts +1 -1
  10. package/packages/model-bank/src/aiModels/volcengine.ts +2 -2
  11. package/packages/model-bank/src/aiModels/zhipu.ts +2 -2
  12. package/packages/model-bank/src/modelProviders/comfyui.ts +8 -8
  13. package/packages/model-bank/src/modelProviders/moonshot.ts +1 -1
  14. package/packages/model-bank/src/modelProviders/stepfun.ts +1 -1
  15. package/packages/model-bank/src/modelProviders/zeroone.ts +1 -1
  16. package/packages/model-runtime/src/core/streams/anthropic.ts +1 -1
  17. package/packages/model-runtime/src/core/streams/google/index.ts +4 -4
  18. package/packages/model-runtime/src/core/streams/model.ts +26 -26
  19. package/packages/model-runtime/src/core/streams/ollama.ts +2 -2
  20. package/packages/model-runtime/src/core/streams/openai/openai.ts +7 -7
  21. package/packages/model-runtime/src/core/streams/protocol.ts +8 -8
  22. package/packages/model-runtime/src/core/streams/spark.ts +5 -5
  23. package/packages/model-runtime/src/core/streams/vertex-ai.ts +3 -3
  24. package/src/app/[variants]/(main)/agent/_layout/AgentIdSync.tsx +6 -2
  25. package/src/app/[variants]/(main)/agent/features/Conversation/Header/ShareButton/index.tsx +2 -1
  26. package/src/app/[variants]/(main)/agent/features/Conversation/index.tsx +16 -9
  27. package/src/app/[variants]/(main)/group/profile/{features/ProfileHydration.tsx → StoreSync.tsx} +17 -8
  28. package/src/app/[variants]/(main)/group/profile/features/Header/index.tsx +6 -1
  29. package/src/app/[variants]/(main)/group/profile/index.tsx +2 -2
  30. package/src/features/AgentBuilder/TopicSelector.tsx +3 -1
  31. package/src/tools/streamings.ts +5 -0
  32. package/vercel.json +1 -1
package/CHANGELOG.md CHANGED
@@ -2,6 +2,39 @@
2
2
 
3
3
  # Changelog
4
4
 
5
+ ## [Version 2.0.0-next.302](https://github.com/lobehub/lobe-chat/compare/v2.0.0-next.301...v2.0.0-next.302)
6
+
7
+ <sup>Released on **2026-01-17**</sup>
8
+
9
+ #### 🐛 Bug Fixes
10
+
11
+ - **misc**: Try to fix group supervisor id not sync successful.
12
+
13
+ #### 💄 Styles
14
+
15
+ - **misc**: Fix left panel on group page.
16
+
17
+ <br/>
18
+
19
+ <details>
20
+ <summary><kbd>Improvements and Fixes</kbd></summary>
21
+
22
+ #### What's fixed
23
+
24
+ - **misc**: Try to fix group supervisor id not sync successful, closes [#11570](https://github.com/lobehub/lobe-chat/issues/11570) ([ef51c17](https://github.com/lobehub/lobe-chat/commit/ef51c17))
25
+
26
+ #### Styles
27
+
28
+ - **misc**: Fix left panel on group page, closes [#11571](https://github.com/lobehub/lobe-chat/issues/11571) ([de81a42](https://github.com/lobehub/lobe-chat/commit/de81a42))
29
+
30
+ </details>
31
+
32
+ <div align="right">
33
+
34
+ [![](https://img.shields.io/badge/-BACK_TO_TOP-151515?style=flat-square)](#readme-top)
35
+
36
+ </div>
37
+
5
38
  ## [Version 2.0.0-next.301](https://github.com/lobehub/lobe-chat/compare/v2.0.0-next.300...v2.0.0-next.301)
6
39
 
7
40
  <sup>Released on **2026-01-17**</sup>
package/changelog/v1.json CHANGED
@@ -1,4 +1,16 @@
1
1
  [
2
+ {
3
+ "children": {
4
+ "fixes": [
5
+ "Try to fix group supervisor id not sync successful."
6
+ ],
7
+ "improvements": [
8
+ "Fix left panel on group page."
9
+ ]
10
+ },
11
+ "date": "2026-01-17",
12
+ "version": "2.0.0-next.302"
13
+ },
2
14
  {
3
15
  "children": {},
4
16
  "date": "2026-01-17",
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@lobehub/lobehub",
3
- "version": "2.0.0-next.301",
3
+ "version": "2.0.0-next.302",
4
4
  "description": "LobeHub - an open-source,comprehensive AI Agent framework that supports speech synthesis, multimodal, and extensible Function Call plugin system. Supports one-click free deployment of your private ChatGPT/LLM web application.",
5
5
  "keywords": [
6
6
  "framework",
@@ -0,0 +1,27 @@
1
+ 'use client';
2
+
3
+ import type { BuiltinStreamingProps } from '@lobechat/types';
4
+ import { Block, Markdown } from '@lobehub/ui';
5
+ import { memo } from 'react';
6
+
7
+ import type { UpdatePromptParams } from '../../../types';
8
+
9
+ export const UpdatePromptStreaming = memo<BuiltinStreamingProps<UpdatePromptParams>>(
10
+ ({ args }) => {
11
+ const { prompt } = args || {};
12
+
13
+ if (!prompt) return null;
14
+
15
+ return (
16
+ <Block paddingBlock={8} paddingInline={12} variant={'outlined'} width="100%">
17
+ <Markdown animated variant={'chat'}>
18
+ {prompt}
19
+ </Markdown>
20
+ </Block>
21
+ );
22
+ },
23
+ );
24
+
25
+ UpdatePromptStreaming.displayName = 'UpdatePromptStreaming';
26
+
27
+ export default UpdatePromptStreaming;
@@ -0,0 +1,16 @@
1
+ import type { BuiltinStreaming } from '@lobechat/types';
2
+
3
+ import { AgentBuilderApiName } from '../../types';
4
+ import { UpdatePromptStreaming } from './UpdatePrompt';
5
+
6
+ /**
7
+ * Agent Builder Streaming Components Registry
8
+ *
9
+ * Streaming components render tool calls while they are
10
+ * still executing, allowing real-time feedback to users.
11
+ */
12
+ export const AgentBuilderStreamings: Record<string, BuiltinStreaming> = {
13
+ [AgentBuilderApiName.updatePrompt]: UpdatePromptStreaming as BuiltinStreaming,
14
+ };
15
+
16
+ export { UpdatePromptStreaming } from './UpdatePrompt';
@@ -14,6 +14,9 @@ export { AgentBuilderInterventions } from './Intervention';
14
14
  // Render components (read-only snapshots)
15
15
  export { AgentBuilderRenders } from './Render';
16
16
 
17
+ // Streaming components (real-time tool execution feedback)
18
+ export { AgentBuilderStreamings } from './Streaming';
19
+
17
20
  // Re-export types and manifest for convenience
18
21
  export { AgentBuilderManifest } from '../manifest';
19
22
  export * from '../types';
@@ -51,7 +51,7 @@ const infiniaiChatModels: AIChatModelCard[] = [
51
51
  },
52
52
  contextWindowTokens: 200_000,
53
53
  description:
54
- 'MiniMax-M2.1 MiniMax 系列的最新版本,专为多语言编程和真实世界复杂任务优化。作为一款 AI 原生模型,MiniMax-M2.1 在模型性能、智能体框架支持以及多场景适配方面实现了显著提升,旨在帮助企业和个人更快地找到 AI 原生的工作与生活方式。',
54
+ 'MiniMax-M2.1 is the latest version of the MiniMax series, optimized for multilingual programming and real-world complex tasks. As an AI-native model, MiniMax-M2.1 achieves significant improvements in model performance, agent framework support, and multi-scenario adaptation, aiming to help enterprises and individuals find AI-native work and lifestyle more quickly.',
55
55
  displayName: 'MiniMax M2.1',
56
56
  enabled: true,
57
57
  id: 'minimax-m2.1',
@@ -72,7 +72,7 @@ const infiniaiChatModels: AIChatModelCard[] = [
72
72
  },
73
73
  contextWindowTokens: 200_000,
74
74
  description:
75
- 'MiniMax-M2 是一款专为编码与智能体工作流优化的专家混合(MoE)语言模型,具有约 230B 总参数与约 10B 活跃参数。它在保持强通用智能的同时,针对多文件编辑、代码-运行-修复闭环、测试校验修复等开发者场景进行深度增强,在终端、IDE CI 等真实环境中表现稳定、高效。',
75
+ 'MiniMax-M2 is a Mixture of Experts (MoE) language model optimized for coding and agent workflows, with approximately 230B total parameters and approximately 10B active parameters. While maintaining strong general intelligence, it is deeply enhanced for developer scenarios such as multi-file editing, code-run-fix loops, and test validation fixes, demonstrating stable and efficient performance in real environments like terminals, IDEs, and CI.',
76
76
  displayName: 'MiniMax M2',
77
77
  id: 'minimax-m2',
78
78
  maxOutput: 200_000,
@@ -82,7 +82,7 @@ const ollamaCloudModels: AIChatModelCard[] = [
82
82
  },
83
83
  contextWindowTokens: 200_000,
84
84
  description:
85
- 'GLM-4.7 是智谱最新旗舰模型,面向 Agentic Coding 场景强化了编码能力、长程任务规划与工具协同,并在多个公开基准的当期榜单中取得开源模型中的领先表现。通用能力提升,回复更简洁自然,写作更具沉浸感。在执行复杂智能体任务,在工具调用时指令遵循更强,Artifacts Agentic Coding 的前端美感和长程任务完成效率进一步提升。',
85
+ "GLM-4.7 is Zhipu's latest flagship model, enhanced for Agentic Coding scenarios with improved coding capabilities, long-term task planning, and tool collaboration. It achieves leading performance among open-source models on multiple public benchmarks. General capabilities are improved with more concise and natural responses and more immersive writing. For complex agent tasks, instruction following during tool calls is stronger, and the frontend aesthetics and long-term task completion efficiency of Artifacts and Agentic Coding are further enhanced.",
86
86
  displayName: 'GLM-4.7',
87
87
  enabled: true,
88
88
  id: 'glm-4.7',
@@ -95,7 +95,7 @@ const ollamaCloudModels: AIChatModelCard[] = [
95
95
  },
96
96
  contextWindowTokens: 200_000,
97
97
  description:
98
- '智谱旗舰模型 GLM-4.6 (355B) 在高级编码、长文本处理、推理与智能体能力上全面超越前代,尤其在编程能力上对齐 Claude Sonnet 4,成为国内顶尖的 Coding 模型。',
98
+ "Zhipu's flagship model GLM-4.6 (355B) fully surpasses its predecessors in advanced coding, long-text processing, reasoning, and agent capabilities. It particularly aligns with Claude Sonnet 4 in programming ability, becoming China's top Coding model.",
99
99
  displayName: 'GLM-4.6',
100
100
  id: 'glm-4.6',
101
101
  type: 'chat',
@@ -9,7 +9,7 @@ const siliconcloudChatModels: AIChatModelCard[] = [
9
9
  },
10
10
  contextWindowTokens: 200_000,
11
11
  description:
12
- 'GLM-4.7 是智谱新一代旗舰模型,总参数量 355B,激活参数量 32B,在通用对话、推理和智能体能力方面全面升级。GLM-4.7 增强了 Interleaved Thinking(交错思考),并引入了 Preserved Thinking(保留思考)和 Turn-level Thinking(轮级思考)。',
12
+ "GLM-4.7 is Zhipu's new generation flagship model with 355B total parameters and 32B active parameters, fully upgraded in general dialogue, reasoning, and agent capabilities. GLM-4.7 enhances Interleaved Thinking and introduces Preserved Thinking and Turn-level Thinking.",
13
13
  displayName: 'GLM-4.7 (Pro)',
14
14
  id: 'Pro/zai-org/glm-4.7',
15
15
  pricing: {
@@ -14,7 +14,7 @@ const doubaoChatModels: AIChatModelCard[] = [
14
14
  },
15
15
  contextWindowTokens: 256_000,
16
16
  description:
17
- 'Doubao-Seed-1.8 有着更强的多模态理解能力和 Agent 能力,支持文本/图片/视频输入与上下文缓存,可在复杂任务中提供更出色的表现。',
17
+ 'Doubao-Seed-1.8 has stronger multimodal understanding and Agent capabilities, supports text/image/video input and context caching, and can deliver excellent performance in complex tasks.',
18
18
  displayName: 'Doubao Seed 1.8',
19
19
  enabled: true,
20
20
  id: 'doubao-seed-1.8',
@@ -127,7 +127,7 @@ const doubaoChatModels: AIChatModelCard[] = [
127
127
  },
128
128
  contextWindowTokens: 131_072,
129
129
  description:
130
- 'DeepSeek-V3.2 是深度求索推出的首个将思考融入工具使用的混合推理模型,用高效架构省算力、大规模强化学习提能力、大规模合成任务数据强泛化,三者结合性能媲美 GPT-5-High,输出长度大幅降低,显著减少了计算开销与用户等待时间。',
130
+ 'DeepSeek-V3.2 is the first hybrid reasoning model from DeepSeek that integrates thinking into tool usage. It uses efficient architecture to save computation, large-scale reinforcement learning to enhance capabilities, and large-scale synthetic task data to strengthen generalization. The combination of these three achieves performance comparable to GPT-5-High, with significantly reduced output length, notably decreasing computational overhead and user wait times.',
131
131
  displayName: 'DeepSeek V3.2',
132
132
  enabled: true,
133
133
  id: 'deepseek-v3.2',
@@ -12,7 +12,7 @@ const zhipuChatModels: AIChatModelCard[] = [
12
12
  },
13
13
  contextWindowTokens: 200_000,
14
14
  description:
15
- 'GLM-4.7 是智谱最新旗舰模型,面向 Agentic Coding 场景强化了编码能力、长程任务规划与工具协同,并在多个公开基准的当期榜单中取得开源模型中的领先表现。通用能力提升,回复更简洁自然,写作更具沉浸感。在执行复杂智能体任务,在工具调用时指令遵循更强,Artifacts Agentic Coding 的前端美感和长程任务完成效率进一步提升。',
15
+ "GLM-4.7 is Zhipu's latest flagship model, enhanced for Agentic Coding scenarios with improved coding capabilities, long-term task planning, and tool collaboration. It achieves leading performance among open-source models on multiple public benchmarks. General capabilities are improved with more concise and natural responses and more immersive writing. For complex agent tasks, instruction following during tool calls is stronger, and the frontend aesthetics and long-term task completion efficiency of Artifacts and Agentic Coding are further enhanced.",
16
16
  displayName: 'GLM-4.7',
17
17
  enabled: true,
18
18
  id: 'glm-4.7',
@@ -75,7 +75,7 @@ const zhipuChatModels: AIChatModelCard[] = [
75
75
  },
76
76
  contextWindowTokens: 200_000,
77
77
  description:
78
- '智谱最新旗舰模型 GLM-4.6 (355B) 在高级编码、长文本处理、推理与智能体能力上全面超越前代,尤其在编程能力上对齐 Claude Sonnet 4,成为国内顶尖的 Coding 模型。',
78
+ "Zhipu's latest flagship model GLM-4.6 (355B) fully surpasses its predecessors in advanced coding, long-text processing, reasoning, and agent capabilities. It particularly aligns with Claude Sonnet 4 in programming ability, becoming China's top Coding model.",
79
79
  displayName: 'GLM-4.6',
80
80
  id: 'glm-4.6',
81
81
  maxOutput: 131_072,
@@ -3,8 +3,8 @@ import { type ModelProviderCard } from '@/types/llm';
3
3
  /**
4
4
  * ComfyUI Provider Configuration
5
5
  *
6
- * 支持本地和远程 ComfyUI 服务器连接
7
- * 提供 FLUX 系列模型的图像生成能力
6
+ * Supports local and remote ComfyUI server connections
7
+ * Provides image generation capabilities for FLUX series models
8
8
  *
9
9
  * @see https://www.comfy.org/
10
10
  */
@@ -16,22 +16,22 @@ const ComfyUI: ModelProviderCard = {
16
16
  id: 'comfyui',
17
17
  name: 'ComfyUI',
18
18
  settings: {
19
- // 禁用浏览器直接请求,通过服务端代理
19
+ // Disable direct browser requests, proxy through server
20
20
  disableBrowserRequest: true,
21
21
 
22
- // SDK 类型标识
22
+ // SDK type identifier
23
23
  sdkType: 'comfyui',
24
24
 
25
- // 不显示添加新模型按钮(模型通过配置管理)
25
+ // Hide add new model button (models managed via configuration)
26
26
  showAddNewModel: false,
27
27
 
28
- // 显示 API 密钥配置(用于认证配置)
28
+ // Show API key configuration (for authentication setup)
29
29
  showApiKey: true,
30
30
 
31
- // 不显示连通性检查(图像生成不支持聊天接口检查)
31
+ // Hide connectivity check (image generation doesn't support chat interface checks)
32
32
  showChecker: false,
33
33
 
34
- // 不显示模型获取器(使用预定义模型)
34
+ // Hide model fetcher (use predefined models)
35
35
  showModelFetcher: false,
36
36
  },
37
37
  url: 'https://www.comfy.org/',
@@ -1,6 +1,6 @@
1
1
  import { type ModelProviderCard } from '@/types/llm';
2
2
 
3
- // ref: https://platform.moonshot.cn/docs/intro#模型列表
3
+ // ref: https://platform.moonshot.cn/docs/intro#model-list
4
4
  const Moonshot: ModelProviderCard = {
5
5
  chatModels: [],
6
6
  checkModel: 'kimi-latest',
@@ -1,7 +1,7 @@
1
1
  import { type ModelProviderCard } from '@/types/llm';
2
2
 
3
3
  // ref: https://platform.stepfun.com/docs/llm/text
4
- // 根据文档,阶级星辰大模型的上下文长度,其 k 的含义均为 1000
4
+ // According to the documentation, for Stepfun models, the 'k' in context length refers to 1000
5
5
  const Stepfun: ModelProviderCard = {
6
6
  chatModels: [],
7
7
  checkModel: 'step-2-mini',
@@ -8,7 +8,7 @@ const ZeroOne: ModelProviderCard = {
8
8
  '01.AI drives a human-centered AI 2.0 revolution, using LLMs to create economic and social value and build new AI ecosystems and business models.',
9
9
  id: 'zeroone',
10
10
  modelList: { showModelFetcher: true },
11
- modelsUrl: 'https://platform.lingyiwanwu.com/docs#模型与计费',
11
+ modelsUrl: 'https://platform.lingyiwanwu.com/docs#models-and-pricing',
12
12
  name: '01.AI',
13
13
  settings: {
14
14
  proxyUrl: {
@@ -223,7 +223,7 @@ export const transformAnthropicStream = (
223
223
 
224
224
  export interface AnthropicStreamOptions {
225
225
  callbacks?: ChatStreamCallbacks;
226
- enableStreaming?: boolean; // 选择 TPS 计算方式(非流式时传 false
226
+ enableStreaming?: boolean; // Select TPS calculation method (pass false for non-streaming)
227
227
  inputStartAt?: number;
228
228
  payload?: ChatPayloadForTransformStream;
229
229
  }
@@ -241,8 +241,8 @@ const transformGoogleGenerativeAIStream = (
241
241
  {
242
242
  data: {
243
243
  citations: groundingChunks?.map((chunk) => ({
244
- // google 返回的 uri 是经过 google 自己处理过的 url,因此无法展现真实的 favicon
245
- // 需要使用 title 作为替换
244
+ // Google returns a uri processed by Google itself, so it cannot display the real favicon
245
+ // Need to use title as a replacement
246
246
  favicon: chunk.web?.title,
247
247
  title: chunk.web?.title,
248
248
  url: chunk.web?.uri,
@@ -293,7 +293,7 @@ const transformGoogleGenerativeAIStream = (
293
293
  ...usageChunks,
294
294
  ].filter(Boolean) as StreamProtocolChunk[];
295
295
  }
296
- // 当有 finishReason 但没有 text 内容时,发送一个空的 text 块以停止加载动画
296
+ // When there is finishReason but no text content, send an empty text chunk to stop the loading animation
297
297
  return [
298
298
  { data: '', id: context?.id, type: 'text' },
299
299
  { data: candidate.finishReason, id: context?.id, type: 'stop' },
@@ -312,7 +312,7 @@ const transformGoogleGenerativeAIStream = (
312
312
 
313
313
  export interface GoogleAIStreamOptions {
314
314
  callbacks?: ChatStreamCallbacks;
315
- enableStreaming?: boolean; // 选择 TPS 计算方式(非流式时传 false
315
+ enableStreaming?: boolean; // Select TPS calculation method (pass false for non-streaming)
316
316
  inputStartAt?: number;
317
317
  payload?: ChatPayloadForTransformStream;
318
318
  }
@@ -1,5 +1,5 @@
1
1
  /**
2
- * 将异步迭代器转换为 JSON 格式的 ReadableStream
2
+ * Convert async iterator to JSON format ReadableStream
3
3
  */
4
4
  export const createModelPullStream = <
5
5
  T extends { completed?: number; digest?: string; status: string; total?: number },
@@ -7,48 +7,48 @@ export const createModelPullStream = <
7
7
  iterable: AsyncIterable<T>,
8
8
  model: string,
9
9
  {
10
- onCancel, // 新增:取消时调用的回调函数
10
+ onCancel, // Added: callback function to call on cancellation
11
11
  }: {
12
- onCancel?: (reason?: any) => void; // 回调函数签名
12
+ onCancel?: (reason?: any) => void; // Callback function signature
13
13
  } = {},
14
14
  ): ReadableStream => {
15
- let iterator: AsyncIterator<T>; // 在外部跟踪迭代器以便取消时可以调用 return
15
+ let iterator: AsyncIterator<T>; // Track iterator externally so we can call return on cancellation
16
16
 
17
17
  return new ReadableStream({
18
- // 实现 cancel 方法
18
+ // Implement cancel method
19
19
  cancel(reason) {
20
- // 调用传入的 onCancel 回调,执行外部的清理逻辑(如 client.abort()
20
+ // Call the onCancel callback to execute external cleanup logic (e.g., client.abort())
21
21
  if (onCancel) {
22
22
  onCancel(reason);
23
23
  }
24
24
 
25
- // 尝试优雅地终止迭代器
26
- // 注意:这依赖于 AsyncIterable 的实现是否支持 return/throw
25
+ // Attempt to gracefully terminate the iterator
26
+ // Note: This depends on whether the AsyncIterable implementation supports return/throw
27
27
  if (iterator && typeof iterator.return === 'function') {
28
- // 不需要 await,让它在后台执行清理
28
+ // No need to await, let it execute cleanup in the background
29
29
  iterator.return().catch();
30
30
  }
31
31
  },
32
32
  async start(controller) {
33
- iterator = iterable[Symbol.asyncIterator](); // 获取迭代器
33
+ iterator = iterable[Symbol.asyncIterator](); // Get iterator
34
34
 
35
35
  const encoder = new TextEncoder();
36
36
 
37
37
  try {
38
38
  // eslint-disable-next-line no-constant-condition
39
39
  while (true) {
40
- // 等待下一个数据块或迭代完成
40
+ // Wait for the next data chunk or iteration completion
41
41
  const { value: progress, done } = await iterator.next();
42
42
 
43
- // 如果迭代完成,跳出循环
43
+ // If iteration is complete, break the loop
44
44
  if (done) {
45
45
  break;
46
46
  }
47
47
 
48
- // 忽略 'pulling manifest' 状态,因为它不包含进度
48
+ // Ignore 'pulling manifest' status as it does not contain progress
49
49
  if (progress.status === 'pulling manifest') continue;
50
50
 
51
- // 格式化为标准格式并写入流
51
+ // Format to standard format and write to stream
52
52
  const progressData =
53
53
  JSON.stringify({
54
54
  completed: progress.completed,
@@ -61,24 +61,24 @@ export const createModelPullStream = <
61
61
  controller.enqueue(encoder.encode(progressData));
62
62
  }
63
63
 
64
- // 正常完成
64
+ // Normal completion
65
65
  controller.close();
66
66
  } catch (error) {
67
- // 处理错误
67
+ // Handle errors
68
68
 
69
- // 如果错误是由于中止操作引起的,则静默处理或记录日志,然后尝试关闭流
69
+ // If error is caused by abort operation, handle silently or log, then try to close stream
70
70
  if (error instanceof DOMException && error.name === 'AbortError') {
71
- // 不需要再 enqueue 错误信息,因为连接可能已断开
72
- // 尝试正常关闭,如果已经取消,controller 可能已关闭或出错
71
+ // No need to enqueue error message as connection may already be disconnected
72
+ // Try to close normally; if already cancelled, controller may be closed or errored
73
73
  try {
74
74
  controller.enqueue(new TextEncoder().encode(JSON.stringify({ status: 'cancelled' })));
75
75
  controller.close();
76
76
  } catch {
77
- // 忽略关闭错误,可能流已经被取消机制处理了
77
+ // Ignore close errors, stream may already be handled by cancellation mechanism
78
78
  }
79
79
  } else {
80
80
  console.error('[createModelPullStream] model download stream error:', error);
81
- // 对于其他错误,尝试将错误信息发送给客户端
81
+ // For other errors, try to send error message to client
82
82
  const errorMessage = error instanceof Error ? error.message : String(error);
83
83
  const errorData =
84
84
  JSON.stringify({
@@ -88,20 +88,20 @@ export const createModelPullStream = <
88
88
  }) + '\n';
89
89
 
90
90
  try {
91
- // 只有在流还期望数据时才尝试 enqueue
91
+ // Only try to enqueue if stream is still expecting data
92
92
  if (controller.desiredSize !== null && controller.desiredSize > 0) {
93
93
  controller.enqueue(encoder.encode(errorData));
94
94
  }
95
95
  } catch (enqueueError) {
96
96
  console.error('[createModelPullStream] Error enqueueing error message:', enqueueError);
97
- // 如果这里也失败,很可能连接已断开
97
+ // If this also fails, connection is likely disconnected
98
98
  }
99
99
 
100
- // 尝试关闭流或标记为错误状态
100
+ // Try to close stream or mark as error state
101
101
  try {
102
- controller.close(); // 尝试正常关闭
102
+ controller.close(); // Try to close normally
103
103
  } catch {
104
- controller.error(error); // 如果关闭失败,则将流置于错误状态
104
+ controller.error(error); // If closing fails, put stream in error state
105
105
  }
106
106
  }
107
107
  }
@@ -36,14 +36,14 @@ const transformOllamaStream = (chunk: ChatResponse, stack: StreamContext): Strea
36
36
  return { data: 'finished', id: stack.id, type: 'stop' };
37
37
  }
38
38
 
39
- // 判断是否有 <think> </think> 标签,更新 thinkingInContent 状态
39
+ // Check for <think> or </think> tags and update thinkingInContent state
40
40
  if (chunk.message.content.includes('<think>')) {
41
41
  stack.thinkingInContent = true;
42
42
  } else if (chunk.message.content.includes('</think>')) {
43
43
  stack.thinkingInContent = false;
44
44
  }
45
45
 
46
- // 清除 <think> </think> 标签,并根据当前思考模式确定返回类型
46
+ // Remove <think> and </think> tags, and determine return type based on current thinking mode
47
47
  return {
48
48
  data: chunk.message.content.replaceAll(/<\/?think>/g, ''),
49
49
  id: stack.id,
@@ -88,8 +88,8 @@ const transformOpenAIStream = (
88
88
  return { data: errorData, id: 'first_chunk_error', type: 'error' };
89
89
  }
90
90
 
91
- // MiniMax 会在 base_resp 中返回业务错误(如余额不足),但不走 FIRST_CHUNK_ERROR_KEY
92
- // 典型返回:{ id: '...', choices: null, base_resp: { status_code: 1008, status_msg: 'insufficient balance' }, usage: {...} }
91
+ // MiniMax returns business errors (e.g., insufficient balance) in base_resp, but not through FIRST_CHUNK_ERROR_KEY
92
+ // Typical response: { id: '...', choices: null, base_resp: { status_code: 1008, status_msg: 'insufficient balance' }, usage: {...} }
93
93
  if ((chunk as any).base_resp && typeof (chunk as any).base_resp.status_code === 'number') {
94
94
  const baseResp = (chunk as any).base_resp as {
95
95
  message?: string;
@@ -98,29 +98,29 @@ const transformOpenAIStream = (
98
98
  };
99
99
 
100
100
  if (baseResp.status_code !== 0) {
101
- // 根据 MiniMax 错误码映射到对应的错误类型
101
+ // Map MiniMax error codes to corresponding error types
102
102
  let errorType: ILobeAgentRuntimeErrorType = AgentRuntimeErrorType.ProviderBizError;
103
103
 
104
104
  switch (baseResp.status_code) {
105
- // 1004 - 未授权 / Token 不匹配 / 2049 - 无效的 API Key
105
+ // 1004 - Unauthorized / Token mismatch / 2049 - Invalid API Key
106
106
  case 1004:
107
107
  case 2049: {
108
108
  errorType = AgentRuntimeErrorType.InvalidProviderAPIKey;
109
109
  break;
110
110
  }
111
- // 1008 - 余额不足
111
+ // 1008 - Insufficient balance
112
112
  case 1008: {
113
113
  errorType = AgentRuntimeErrorType.InsufficientQuota;
114
114
  break;
115
115
  }
116
- // 1002 - 请求频率超限 / 1041 - 连接数限制 / 2045 - 请求频率增长超限
116
+ // 1002 - Request rate limit exceeded / 1041 - Connection limit / 2045 - Request rate growth limit exceeded
117
117
  case 1002:
118
118
  case 1041:
119
119
  case 2045: {
120
120
  errorType = AgentRuntimeErrorType.QuotaLimitReached;
121
121
  break;
122
122
  }
123
- // 1039 - Token 限制
123
+ // 1039 - Token limit
124
124
  case 1039: {
125
125
  errorType = AgentRuntimeErrorType.ExceededContextWindow;
126
126
  break;
@@ -402,25 +402,25 @@ export const createFirstErrorHandleTransformer = (
402
402
  export const createSSEDataExtractor = () =>
403
403
  new TransformStream({
404
404
  transform(chunk: Uint8Array, controller) {
405
- // Uint8Array 转换为字符串
405
+ // Convert Uint8Array to string
406
406
  const text = new TextDecoder().decode(chunk, { stream: true });
407
407
 
408
- // 处理多行数据的情况
408
+ // Handle multi-line data case
409
409
  const lines = text.split('\n');
410
410
 
411
411
  for (const line of lines) {
412
- // 只处理以 "data: " 开头的行
412
+ // Only process lines starting with "data: "
413
413
  if (line.startsWith('data: ')) {
414
- // 提取 "data: " 后面的实际数据
414
+ // Extract the actual data after "data: "
415
415
  const jsonText = line.slice(6);
416
416
 
417
- // 跳过心跳消息
417
+ // Skip heartbeat messages
418
418
  if (jsonText === '[DONE]') continue;
419
419
 
420
420
  try {
421
- // 解析 JSON 数据
421
+ // Parse JSON data
422
422
  const data = JSON.parse(jsonText);
423
- // 将解析后的数据传递给下一个处理器
423
+ // Pass parsed data to the next processor
424
424
  controller.enqueue(data);
425
425
  } catch {
426
426
  console.warn('Failed to parse SSE data:', jsonText);
@@ -441,7 +441,7 @@ export const createTokenSpeedCalculator = (
441
441
  {
442
442
  inputStartAt,
443
443
  streamStack,
444
- enableStreaming = true, // 选择 TPS 计算方式(非流式时传 false
444
+ enableStreaming = true, // Select TPS calculation method (pass false for non-streaming)
445
445
  }: { enableStreaming?: boolean; inputStartAt?: number; streamStack?: StreamContext } = {},
446
446
  ) => {
447
447
  let outputStartAt: number | undefined;
@@ -23,7 +23,7 @@ export function transformSparkResponseToStream(data: OpenAI.ChatCompletion) {
23
23
  ? Array.isArray(choice.message.tool_calls)
24
24
  ? choice.message.tool_calls
25
25
  : [choice.message.tool_calls]
26
- : []; // 如果不是数组,包装成数组
26
+ : []; // If not an array, wrap it in an array
27
27
 
28
28
  return {
29
29
  delta: {
@@ -91,7 +91,7 @@ export const transformSparkStream = (chunk: OpenAI.ChatCompletionChunk): StreamP
91
91
  if (item.delta?.tool_calls) {
92
92
  const toolCallsArray = Array.isArray(item.delta.tool_calls)
93
93
  ? item.delta.tool_calls
94
- : [item.delta.tool_calls]; // 如果不是数组,包装成数组
94
+ : [item.delta.tool_calls]; // If not an array, wrap it in an array
95
95
 
96
96
  if (toolCallsArray.length > 0) {
97
97
  return {
@@ -108,7 +108,7 @@ export const transformSparkStream = (chunk: OpenAI.ChatCompletionChunk): StreamP
108
108
  }
109
109
 
110
110
  if (item.finish_reason) {
111
- // one-api 的流式接口,会出现既有 finish_reason ,也有 content 的情况
111
+ // one-api's streaming interface can have both finish_reason and content
112
112
  // {"id":"demo","model":"deepl-en","choices":[{"index":0,"delta":{"role":"assistant","content":"Introduce yourself."},"finish_reason":"stop"}]}
113
113
 
114
114
  if (typeof item.delta?.content === 'string' && !!item.delta.content) {
@@ -129,7 +129,7 @@ export const transformSparkStream = (chunk: OpenAI.ChatCompletionChunk): StreamP
129
129
 
130
130
  if (typeof item.delta?.content === 'string') {
131
131
  /*
132
- 处理 v1 endpoint usage,混合在最后一个 content 内容中
132
+ Handle v1 endpoint usage, mixed in the last content
133
133
  {"code":0,"message":"Success","sid":"cha000d05ef@dx196553ae415b80a432","id":"cha000d05ef@dx196553ae415b80a432","created":1745186655,"choices":[{"delta":{"role":"assistant","content":"😊"},"index":0}],"usage":{"prompt_tokens":1,"completion_tokens":418,"total_tokens":419}}
134
134
  */
135
135
  if (chunk.usage) {
@@ -146,7 +146,7 @@ export const transformSparkStream = (chunk: OpenAI.ChatCompletionChunk): StreamP
146
146
  return { data: item.delta, id: chunk.id, type: 'data' };
147
147
  }
148
148
 
149
- // 处理 v2 endpoint usage
149
+ // Handle v2 endpoint usage
150
150
  if (chunk.usage) {
151
151
  return { data: convertOpenAIUsage(chunk.usage), id: chunk.id, type: 'usage' };
152
152
  }
@@ -35,7 +35,7 @@ const transformVertexAIStream = (
35
35
  }
36
36
 
37
37
  if (
38
- candidate && // 首先检查是否为 reasoning 内容 (thought: true)
38
+ candidate && // First check if this is reasoning content (thought: true)
39
39
  Array.isArray(candidate.content?.parts) &&
40
40
  candidate.content.parts.length > 0
41
41
  ) {
@@ -88,8 +88,8 @@ const transformVertexAIStream = (
88
88
  {
89
89
  data: {
90
90
  citations: groundingChunks?.map((chunk) => ({
91
- // google 返回的 uri 是经过 google 自己处理过的 url,因此无法展现真实的 favicon
92
- // 需要使用 title 作为替换
91
+ // Google returns a uri processed by Google itself, so it cannot display the real favicon
92
+ // Need to use title as a replacement
93
93
  favicon: chunk.web?.title,
94
94
  title: chunk.web?.title,
95
95
  url: chunk.web?.uri,
@@ -26,8 +26,12 @@ const AgentIdSync = () => {
26
26
 
27
27
  // Clear activeAgentId when unmounting (leaving chat page)
28
28
  useUnmount(() => {
29
- useAgentStore.setState({ activeAgentId: undefined });
30
- useChatStore.setState({ activeAgentId: undefined, activeTopicId: undefined });
29
+ useAgentStore.setState({ activeAgentId: undefined }, false, 'AgentIdSync/unmountAgentId');
30
+ useChatStore.setState(
31
+ { activeAgentId: undefined, activeTopicId: undefined },
32
+ false,
33
+ 'AgentIdSync/unmountAgentId',
34
+ );
31
35
  });
32
36
 
33
37
  return null;
@@ -6,6 +6,7 @@ import dynamic from 'next/dynamic';
6
6
  import { memo } from 'react';
7
7
  import { useTranslation } from 'react-i18next';
8
8
 
9
+ import { withSuspense } from '@/components/withSuspense';
9
10
  import { DESKTOP_HEADER_ICON_SIZE, MOBILE_HEADER_ICON_SIZE } from '@/const/layoutTokens';
10
11
  import { useWorkspaceModal } from '@/hooks/useWorkspaceModal';
11
12
  import { useChatStore } from '@/store/chat';
@@ -54,4 +55,4 @@ const ShareButton = memo<ShareButtonProps>(({ mobile, setOpen, open }) => {
54
55
  );
55
56
  });
56
57
 
57
- export default ShareButton;
58
+ export default withSuspense(ShareButton);
@@ -1,7 +1,8 @@
1
1
  import { Flexbox, TooltipGroup } from '@lobehub/ui';
2
- import React, { memo } from 'react';
2
+ import React, { Suspense, memo } from 'react';
3
3
 
4
4
  import DragUploadZone, { useUploadFiles } from '@/components/DragUploadZone';
5
+ import Loading from '@/components/Loading/BrandTextLoading';
5
6
  import { useAgentStore } from '@/store/agent';
6
7
  import { agentSelectors } from '@/store/agent/selectors';
7
8
  import { useGlobalStore } from '@/store/global';
@@ -25,14 +26,20 @@ const ChatConversation = memo(() => {
25
26
  const { handleUploadFiles } = useUploadFiles({ model, provider });
26
27
 
27
28
  return (
28
- <DragUploadZone onUploadFiles={handleUploadFiles} style={wrapperStyle}>
29
- <Flexbox height={'100%'} style={{ overflow: 'hidden', position: 'relative' }} width={'100%'}>
30
- {showHeader && <ChatHeader />}
31
- <TooltipGroup>
32
- <ConversationArea />
33
- </TooltipGroup>
34
- </Flexbox>
35
- </DragUploadZone>
29
+ <Suspense fallback={<Loading debugId="Agent > ChatConversation" />}>
30
+ <DragUploadZone onUploadFiles={handleUploadFiles} style={wrapperStyle}>
31
+ <Flexbox
32
+ height={'100%'}
33
+ style={{ overflow: 'hidden', position: 'relative' }}
34
+ width={'100%'}
35
+ >
36
+ {showHeader && <ChatHeader />}
37
+ <TooltipGroup>
38
+ <ConversationArea />
39
+ </TooltipGroup>
40
+ </Flexbox>
41
+ </DragUploadZone>
42
+ </Suspense>
36
43
  );
37
44
  });
38
45
 
@@ -10,7 +10,7 @@ import { parseAsString, useQueryState } from '@/hooks/useQueryParam';
10
10
  import { useChatStore } from '@/store/chat';
11
11
  import { useGroupProfileStore } from '@/store/groupProfile';
12
12
 
13
- const ProfileHydration = memo(() => {
13
+ const StoreSync = memo(() => {
14
14
  const editor = useEditor();
15
15
  const editorState = useEditorState(editor);
16
16
  const flushSave = useGroupProfileStore((s) => s.flushSave);
@@ -32,6 +32,11 @@ const ProfileHydration = memo(() => {
32
32
  useEffect(() => {
33
33
  const urlTopicId = builderTopicId ?? undefined;
34
34
  useChatStore.setState({ activeTopicId: urlTopicId });
35
+
36
+ return () => {
37
+ // Clear activeTopicId when unmounting (leaving group profile page)
38
+ useChatStore.setState({ activeTopicId: undefined }, false, 'GroupProfileUnmounted');
39
+ };
35
40
  }, [builderTopicId]);
36
41
 
37
42
  // Register hotkeys
@@ -40,15 +45,19 @@ const ProfileHydration = memo(() => {
40
45
 
41
46
  // Clear state when unmounting
42
47
  useUnmount(() => {
43
- useGroupProfileStore.setState({
44
- activeTabId: 'group',
45
- editor: undefined,
46
- editorState: undefined,
47
- saveStateMap: {},
48
- });
48
+ useGroupProfileStore.setState(
49
+ {
50
+ activeTabId: 'group',
51
+ editor: undefined,
52
+ editorState: undefined,
53
+ saveStateMap: {},
54
+ },
55
+ false,
56
+ 'GroupProfileUnmounted',
57
+ );
49
58
  });
50
59
 
51
60
  return null;
52
61
  });
53
62
 
54
- export default ProfileHydration;
63
+ export default StoreSync;
@@ -7,9 +7,12 @@ import { memo, useMemo, useState } from 'react';
7
7
  import { useTranslation } from 'react-i18next';
8
8
 
9
9
  import AddGroupMemberModal from '@/app/[variants]/(main)/group/_layout/Sidebar/AddGroupMemberModal';
10
+ import ToggleLeftPanelButton from '@/features/NavPanel/ToggleLeftPanelButton';
10
11
  import { parseAsString, useQueryState } from '@/hooks/useQueryParam';
11
12
  import { useAgentGroupStore } from '@/store/agentGroup';
12
13
  import { agentGroupSelectors } from '@/store/agentGroup/selectors';
14
+ import { useGlobalStore } from '@/store/global';
15
+ import { systemStatusSelectors } from '@/store/global/selectors';
13
16
 
14
17
  import AgentBuilderToggle from './AgentBuilderToggle';
15
18
  import ChromeTabs, { type ChromeTabItem } from './ChromeTabs';
@@ -46,6 +49,7 @@ const Header = memo(() => {
46
49
  const members = useAgentGroupStore(agentGroupSelectors.currentGroupAgents);
47
50
  const activeGroupId = useAgentGroupStore(agentGroupSelectors.activeGroupId);
48
51
  const addAgentsToGroup = useAgentGroupStore((s) => s.addAgentsToGroup);
52
+ const showLeftPanel = useGlobalStore(systemStatusSelectors.showLeftPanel);
49
53
 
50
54
  // Use URL query param for selected tab
51
55
  const [selectedTabId, setSelectedTabId] = useQueryState(
@@ -86,7 +90,8 @@ const Header = memo(() => {
86
90
 
87
91
  return (
88
92
  <>
89
- <Flexbox align="center" className={styles.header} horizontal justify="space-between">
93
+ <Flexbox align="center" className={styles.header} gap={4} horizontal justify="space-between">
94
+ {!showLeftPanel && <ToggleLeftPanelButton />}
90
95
  <div className={styles.tabsWrapper}>
91
96
  <ChromeTabs
92
97
  activeId={selectedTabId}
@@ -9,11 +9,11 @@ import { useAgentGroupStore } from '@/store/agentGroup';
9
9
  import { agentGroupSelectors } from '@/store/agentGroup/selectors';
10
10
  import { useGroupProfileStore } from '@/store/groupProfile';
11
11
 
12
+ import StoreSync from './StoreSync';
12
13
  import AgentBuilder from './features/AgentBuilder';
13
14
  import GroupProfileSettings from './features/GroupProfile';
14
15
  import Header from './features/Header';
15
16
  import MemberProfile from './features/MemberProfile';
16
- import ProfileHydration from './features/ProfileHydration';
17
17
 
18
18
  const ProfileArea = memo(() => {
19
19
  const editor = useGroupProfileStore((s) => s.editor);
@@ -51,7 +51,7 @@ const ProfileArea = memo(() => {
51
51
  const GroupProfile: FC = () => {
52
52
  return (
53
53
  <Suspense fallback={<Loading debugId="GroupProfile" />}>
54
- <ProfileHydration />
54
+ <StoreSync />
55
55
  <Flexbox height={'100%'} horizontal width={'100%'}>
56
56
  <ProfileArea />
57
57
  <AgentBuilder />
@@ -37,7 +37,9 @@ const TopicSelector = memo<TopicSelectorProps>(({ agentId }) => {
37
37
  const { t } = useTranslation('topic');
38
38
 
39
39
  // Fetch topics for the agent builder
40
- useChatStore((s) => s.useFetchTopics)(true, { agentId });
40
+ const useFetchTopics = useChatStore((s) => s.useFetchTopics);
41
+
42
+ useFetchTopics(true, { agentId });
41
43
 
42
44
  const [activeTopicId, switchTopic, topics] = useChatStore((s) => [
43
45
  s.activeTopicId,
@@ -1,3 +1,7 @@
1
+ import {
2
+ AgentBuilderManifest,
3
+ AgentBuilderStreamings,
4
+ } from '@lobechat/builtin-tool-agent-builder/client';
1
5
  import {
2
6
  CloudSandboxManifest,
3
7
  CloudSandboxStreamings,
@@ -28,6 +32,7 @@ import { type BuiltinStreaming } from '@lobechat/types';
28
32
  * The component should fetch streaming content from store internally.
29
33
  */
30
34
  const BuiltinToolStreamings: Record<string, Record<string, BuiltinStreaming>> = {
35
+ [AgentBuilderManifest.identifier]: AgentBuilderStreamings as Record<string, BuiltinStreaming>,
31
36
  [CloudSandboxManifest.identifier]: CloudSandboxStreamings as Record<string, BuiltinStreaming>,
32
37
  [GroupAgentBuilderManifest.identifier]: GroupAgentBuilderStreamings as Record<
33
38
  string,
package/vercel.json CHANGED
@@ -1,4 +1,4 @@
1
1
  {
2
2
  "buildCommand": "bun run build:vercel",
3
- "installCommand": "npx bun@1.2.23 install"
3
+ "installCommand": "npx pnpm@10.26.2 install"
4
4
  }