@lobehub/lobehub 2.0.0-next.301 → 2.0.0-next.303
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +58 -0
- package/changelog/v1.json +21 -0
- package/package.json +1 -1
- package/packages/builtin-tool-agent-builder/src/client/Streaming/UpdatePrompt/index.tsx +27 -0
- package/packages/builtin-tool-agent-builder/src/client/Streaming/index.ts +16 -0
- package/packages/builtin-tool-agent-builder/src/client/index.ts +3 -0
- package/packages/model-bank/src/aiModels/infiniai.ts +2 -2
- package/packages/model-bank/src/aiModels/ollamacloud.ts +2 -2
- package/packages/model-bank/src/aiModels/siliconcloud.ts +1 -1
- package/packages/model-bank/src/aiModels/volcengine.ts +2 -2
- package/packages/model-bank/src/aiModels/zhipu.ts +2 -2
- package/packages/model-bank/src/modelProviders/comfyui.ts +8 -8
- package/packages/model-bank/src/modelProviders/moonshot.ts +1 -1
- package/packages/model-bank/src/modelProviders/stepfun.ts +1 -1
- package/packages/model-bank/src/modelProviders/zeroone.ts +1 -1
- package/packages/model-runtime/src/core/streams/anthropic.ts +1 -1
- package/packages/model-runtime/src/core/streams/google/index.ts +4 -4
- package/packages/model-runtime/src/core/streams/model.ts +26 -26
- package/packages/model-runtime/src/core/streams/ollama.ts +2 -2
- package/packages/model-runtime/src/core/streams/openai/openai.ts +7 -7
- package/packages/model-runtime/src/core/streams/protocol.ts +8 -8
- package/packages/model-runtime/src/core/streams/spark.ts +5 -5
- package/packages/model-runtime/src/core/streams/vertex-ai.ts +3 -3
- package/src/app/[variants]/(main)/agent/_layout/AgentIdSync.tsx +6 -2
- package/src/app/[variants]/(main)/agent/features/Conversation/Header/ShareButton/index.tsx +2 -1
- package/src/app/[variants]/(main)/agent/features/Conversation/index.tsx +16 -9
- package/src/app/[variants]/(main)/group/profile/{features/ProfileHydration.tsx → StoreSync.tsx} +17 -8
- package/src/app/[variants]/(main)/group/profile/features/Header/index.tsx +6 -1
- package/src/app/[variants]/(main)/group/profile/index.tsx +2 -2
- package/src/features/AgentBuilder/TopicSelector.tsx +3 -1
- package/src/features/Conversation/Messages/Assistant/index.tsx +1 -1
- package/src/features/Conversation/Messages/AssistantGroup/components/MessageContent.tsx +3 -3
- package/src/features/Conversation/Messages/Supervisor/components/MessageContent.tsx +2 -2
- package/src/features/Conversation/Messages/components/ContentLoading.tsx +5 -3
- package/src/store/chat/slices/operation/__tests__/selectors.test.ts +165 -0
- package/src/store/chat/slices/operation/selectors.ts +23 -0
- package/src/tools/streamings.ts +5 -0
- package/vercel.json +1 -1
package/CHANGELOG.md
CHANGED
|
@@ -2,6 +2,64 @@
|
|
|
2
2
|
|
|
3
3
|
# Changelog
|
|
4
4
|
|
|
5
|
+
## [Version 2.0.0-next.303](https://github.com/lobehub/lobe-chat/compare/v2.0.0-next.302...v2.0.0-next.303)
|
|
6
|
+
|
|
7
|
+
<sup>Released on **2026-01-18**</sup>
|
|
8
|
+
|
|
9
|
+
#### 💄 Styles
|
|
10
|
+
|
|
11
|
+
- **misc**: Improve operation hint and fix scroll issue.
|
|
12
|
+
|
|
13
|
+
<br/>
|
|
14
|
+
|
|
15
|
+
<details>
|
|
16
|
+
<summary><kbd>Improvements and Fixes</kbd></summary>
|
|
17
|
+
|
|
18
|
+
#### Styles
|
|
19
|
+
|
|
20
|
+
- **misc**: Improve operation hint and fix scroll issue, closes [#11573](https://github.com/lobehub/lobe-chat/issues/11573) ([8505d14](https://github.com/lobehub/lobe-chat/commit/8505d14))
|
|
21
|
+
|
|
22
|
+
</details>
|
|
23
|
+
|
|
24
|
+
<div align="right">
|
|
25
|
+
|
|
26
|
+
[](#readme-top)
|
|
27
|
+
|
|
28
|
+
</div>
|
|
29
|
+
|
|
30
|
+
## [Version 2.0.0-next.302](https://github.com/lobehub/lobe-chat/compare/v2.0.0-next.301...v2.0.0-next.302)
|
|
31
|
+
|
|
32
|
+
<sup>Released on **2026-01-17**</sup>
|
|
33
|
+
|
|
34
|
+
#### 🐛 Bug Fixes
|
|
35
|
+
|
|
36
|
+
- **misc**: Try to fix group supervisor id not sync successful.
|
|
37
|
+
|
|
38
|
+
#### 💄 Styles
|
|
39
|
+
|
|
40
|
+
- **misc**: Fix left panel on group page.
|
|
41
|
+
|
|
42
|
+
<br/>
|
|
43
|
+
|
|
44
|
+
<details>
|
|
45
|
+
<summary><kbd>Improvements and Fixes</kbd></summary>
|
|
46
|
+
|
|
47
|
+
#### What's fixed
|
|
48
|
+
|
|
49
|
+
- **misc**: Try to fix group supervisor id not sync successful, closes [#11570](https://github.com/lobehub/lobe-chat/issues/11570) ([ef51c17](https://github.com/lobehub/lobe-chat/commit/ef51c17))
|
|
50
|
+
|
|
51
|
+
#### Styles
|
|
52
|
+
|
|
53
|
+
- **misc**: Fix left panel on group page, closes [#11571](https://github.com/lobehub/lobe-chat/issues/11571) ([de81a42](https://github.com/lobehub/lobe-chat/commit/de81a42))
|
|
54
|
+
|
|
55
|
+
</details>
|
|
56
|
+
|
|
57
|
+
<div align="right">
|
|
58
|
+
|
|
59
|
+
[](#readme-top)
|
|
60
|
+
|
|
61
|
+
</div>
|
|
62
|
+
|
|
5
63
|
## [Version 2.0.0-next.301](https://github.com/lobehub/lobe-chat/compare/v2.0.0-next.300...v2.0.0-next.301)
|
|
6
64
|
|
|
7
65
|
<sup>Released on **2026-01-17**</sup>
|
package/changelog/v1.json
CHANGED
|
@@ -1,4 +1,25 @@
|
|
|
1
1
|
[
|
|
2
|
+
{
|
|
3
|
+
"children": {
|
|
4
|
+
"improvements": [
|
|
5
|
+
"Improve operation hint and fix scroll issue."
|
|
6
|
+
]
|
|
7
|
+
},
|
|
8
|
+
"date": "2026-01-18",
|
|
9
|
+
"version": "2.0.0-next.303"
|
|
10
|
+
},
|
|
11
|
+
{
|
|
12
|
+
"children": {
|
|
13
|
+
"fixes": [
|
|
14
|
+
"Try to fix group supervisor id not sync successful."
|
|
15
|
+
],
|
|
16
|
+
"improvements": [
|
|
17
|
+
"Fix left panel on group page."
|
|
18
|
+
]
|
|
19
|
+
},
|
|
20
|
+
"date": "2026-01-17",
|
|
21
|
+
"version": "2.0.0-next.302"
|
|
22
|
+
},
|
|
2
23
|
{
|
|
3
24
|
"children": {},
|
|
4
25
|
"date": "2026-01-17",
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@lobehub/lobehub",
|
|
3
|
-
"version": "2.0.0-next.
|
|
3
|
+
"version": "2.0.0-next.303",
|
|
4
4
|
"description": "LobeHub - an open-source,comprehensive AI Agent framework that supports speech synthesis, multimodal, and extensible Function Call plugin system. Supports one-click free deployment of your private ChatGPT/LLM web application.",
|
|
5
5
|
"keywords": [
|
|
6
6
|
"framework",
|
|
@@ -0,0 +1,27 @@
|
|
|
1
|
+
'use client';
|
|
2
|
+
|
|
3
|
+
import type { BuiltinStreamingProps } from '@lobechat/types';
|
|
4
|
+
import { Block, Markdown } from '@lobehub/ui';
|
|
5
|
+
import { memo } from 'react';
|
|
6
|
+
|
|
7
|
+
import type { UpdatePromptParams } from '../../../types';
|
|
8
|
+
|
|
9
|
+
export const UpdatePromptStreaming = memo<BuiltinStreamingProps<UpdatePromptParams>>(
|
|
10
|
+
({ args }) => {
|
|
11
|
+
const { prompt } = args || {};
|
|
12
|
+
|
|
13
|
+
if (!prompt) return null;
|
|
14
|
+
|
|
15
|
+
return (
|
|
16
|
+
<Block paddingBlock={8} paddingInline={12} variant={'outlined'} width="100%">
|
|
17
|
+
<Markdown animated variant={'chat'}>
|
|
18
|
+
{prompt}
|
|
19
|
+
</Markdown>
|
|
20
|
+
</Block>
|
|
21
|
+
);
|
|
22
|
+
},
|
|
23
|
+
);
|
|
24
|
+
|
|
25
|
+
UpdatePromptStreaming.displayName = 'UpdatePromptStreaming';
|
|
26
|
+
|
|
27
|
+
export default UpdatePromptStreaming;
|
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
import type { BuiltinStreaming } from '@lobechat/types';
|
|
2
|
+
|
|
3
|
+
import { AgentBuilderApiName } from '../../types';
|
|
4
|
+
import { UpdatePromptStreaming } from './UpdatePrompt';
|
|
5
|
+
|
|
6
|
+
/**
|
|
7
|
+
* Agent Builder Streaming Components Registry
|
|
8
|
+
*
|
|
9
|
+
* Streaming components render tool calls while they are
|
|
10
|
+
* still executing, allowing real-time feedback to users.
|
|
11
|
+
*/
|
|
12
|
+
export const AgentBuilderStreamings: Record<string, BuiltinStreaming> = {
|
|
13
|
+
[AgentBuilderApiName.updatePrompt]: UpdatePromptStreaming as BuiltinStreaming,
|
|
14
|
+
};
|
|
15
|
+
|
|
16
|
+
export { UpdatePromptStreaming } from './UpdatePrompt';
|
|
@@ -14,6 +14,9 @@ export { AgentBuilderInterventions } from './Intervention';
|
|
|
14
14
|
// Render components (read-only snapshots)
|
|
15
15
|
export { AgentBuilderRenders } from './Render';
|
|
16
16
|
|
|
17
|
+
// Streaming components (real-time tool execution feedback)
|
|
18
|
+
export { AgentBuilderStreamings } from './Streaming';
|
|
19
|
+
|
|
17
20
|
// Re-export types and manifest for convenience
|
|
18
21
|
export { AgentBuilderManifest } from '../manifest';
|
|
19
22
|
export * from '../types';
|
|
@@ -51,7 +51,7 @@ const infiniaiChatModels: AIChatModelCard[] = [
|
|
|
51
51
|
},
|
|
52
52
|
contextWindowTokens: 200_000,
|
|
53
53
|
description:
|
|
54
|
-
'MiniMax-M2.1
|
|
54
|
+
'MiniMax-M2.1 is the latest version of the MiniMax series, optimized for multilingual programming and real-world complex tasks. As an AI-native model, MiniMax-M2.1 achieves significant improvements in model performance, agent framework support, and multi-scenario adaptation, aiming to help enterprises and individuals find AI-native work and lifestyle more quickly.',
|
|
55
55
|
displayName: 'MiniMax M2.1',
|
|
56
56
|
enabled: true,
|
|
57
57
|
id: 'minimax-m2.1',
|
|
@@ -72,7 +72,7 @@ const infiniaiChatModels: AIChatModelCard[] = [
|
|
|
72
72
|
},
|
|
73
73
|
contextWindowTokens: 200_000,
|
|
74
74
|
description:
|
|
75
|
-
'MiniMax-M2
|
|
75
|
+
'MiniMax-M2 is a Mixture of Experts (MoE) language model optimized for coding and agent workflows, with approximately 230B total parameters and approximately 10B active parameters. While maintaining strong general intelligence, it is deeply enhanced for developer scenarios such as multi-file editing, code-run-fix loops, and test validation fixes, demonstrating stable and efficient performance in real environments like terminals, IDEs, and CI.',
|
|
76
76
|
displayName: 'MiniMax M2',
|
|
77
77
|
id: 'minimax-m2',
|
|
78
78
|
maxOutput: 200_000,
|
|
@@ -82,7 +82,7 @@ const ollamaCloudModels: AIChatModelCard[] = [
|
|
|
82
82
|
},
|
|
83
83
|
contextWindowTokens: 200_000,
|
|
84
84
|
description:
|
|
85
|
-
|
|
85
|
+
"GLM-4.7 is Zhipu's latest flagship model, enhanced for Agentic Coding scenarios with improved coding capabilities, long-term task planning, and tool collaboration. It achieves leading performance among open-source models on multiple public benchmarks. General capabilities are improved with more concise and natural responses and more immersive writing. For complex agent tasks, instruction following during tool calls is stronger, and the frontend aesthetics and long-term task completion efficiency of Artifacts and Agentic Coding are further enhanced.",
|
|
86
86
|
displayName: 'GLM-4.7',
|
|
87
87
|
enabled: true,
|
|
88
88
|
id: 'glm-4.7',
|
|
@@ -95,7 +95,7 @@ const ollamaCloudModels: AIChatModelCard[] = [
|
|
|
95
95
|
},
|
|
96
96
|
contextWindowTokens: 200_000,
|
|
97
97
|
description:
|
|
98
|
-
'
|
|
98
|
+
"Zhipu's flagship model GLM-4.6 (355B) fully surpasses its predecessors in advanced coding, long-text processing, reasoning, and agent capabilities. It particularly aligns with Claude Sonnet 4 in programming ability, becoming China's top Coding model.",
|
|
99
99
|
displayName: 'GLM-4.6',
|
|
100
100
|
id: 'glm-4.6',
|
|
101
101
|
type: 'chat',
|
|
@@ -9,7 +9,7 @@ const siliconcloudChatModels: AIChatModelCard[] = [
|
|
|
9
9
|
},
|
|
10
10
|
contextWindowTokens: 200_000,
|
|
11
11
|
description:
|
|
12
|
-
|
|
12
|
+
"GLM-4.7 is Zhipu's new generation flagship model with 355B total parameters and 32B active parameters, fully upgraded in general dialogue, reasoning, and agent capabilities. GLM-4.7 enhances Interleaved Thinking and introduces Preserved Thinking and Turn-level Thinking.",
|
|
13
13
|
displayName: 'GLM-4.7 (Pro)',
|
|
14
14
|
id: 'Pro/zai-org/glm-4.7',
|
|
15
15
|
pricing: {
|
|
@@ -14,7 +14,7 @@ const doubaoChatModels: AIChatModelCard[] = [
|
|
|
14
14
|
},
|
|
15
15
|
contextWindowTokens: 256_000,
|
|
16
16
|
description:
|
|
17
|
-
'Doubao-Seed-1.8
|
|
17
|
+
'Doubao-Seed-1.8 has stronger multimodal understanding and Agent capabilities, supports text/image/video input and context caching, and can deliver excellent performance in complex tasks.',
|
|
18
18
|
displayName: 'Doubao Seed 1.8',
|
|
19
19
|
enabled: true,
|
|
20
20
|
id: 'doubao-seed-1.8',
|
|
@@ -127,7 +127,7 @@ const doubaoChatModels: AIChatModelCard[] = [
|
|
|
127
127
|
},
|
|
128
128
|
contextWindowTokens: 131_072,
|
|
129
129
|
description:
|
|
130
|
-
'DeepSeek-V3.2
|
|
130
|
+
'DeepSeek-V3.2 is the first hybrid reasoning model from DeepSeek that integrates thinking into tool usage. It uses efficient architecture to save computation, large-scale reinforcement learning to enhance capabilities, and large-scale synthetic task data to strengthen generalization. The combination of these three achieves performance comparable to GPT-5-High, with significantly reduced output length, notably decreasing computational overhead and user wait times.',
|
|
131
131
|
displayName: 'DeepSeek V3.2',
|
|
132
132
|
enabled: true,
|
|
133
133
|
id: 'deepseek-v3.2',
|
|
@@ -12,7 +12,7 @@ const zhipuChatModels: AIChatModelCard[] = [
|
|
|
12
12
|
},
|
|
13
13
|
contextWindowTokens: 200_000,
|
|
14
14
|
description:
|
|
15
|
-
|
|
15
|
+
"GLM-4.7 is Zhipu's latest flagship model, enhanced for Agentic Coding scenarios with improved coding capabilities, long-term task planning, and tool collaboration. It achieves leading performance among open-source models on multiple public benchmarks. General capabilities are improved with more concise and natural responses and more immersive writing. For complex agent tasks, instruction following during tool calls is stronger, and the frontend aesthetics and long-term task completion efficiency of Artifacts and Agentic Coding are further enhanced.",
|
|
16
16
|
displayName: 'GLM-4.7',
|
|
17
17
|
enabled: true,
|
|
18
18
|
id: 'glm-4.7',
|
|
@@ -75,7 +75,7 @@ const zhipuChatModels: AIChatModelCard[] = [
|
|
|
75
75
|
},
|
|
76
76
|
contextWindowTokens: 200_000,
|
|
77
77
|
description:
|
|
78
|
-
'
|
|
78
|
+
"Zhipu's latest flagship model GLM-4.6 (355B) fully surpasses its predecessors in advanced coding, long-text processing, reasoning, and agent capabilities. It particularly aligns with Claude Sonnet 4 in programming ability, becoming China's top Coding model.",
|
|
79
79
|
displayName: 'GLM-4.6',
|
|
80
80
|
id: 'glm-4.6',
|
|
81
81
|
maxOutput: 131_072,
|
|
@@ -3,8 +3,8 @@ import { type ModelProviderCard } from '@/types/llm';
|
|
|
3
3
|
/**
|
|
4
4
|
* ComfyUI Provider Configuration
|
|
5
5
|
*
|
|
6
|
-
*
|
|
7
|
-
*
|
|
6
|
+
* Supports local and remote ComfyUI server connections
|
|
7
|
+
* Provides image generation capabilities for FLUX series models
|
|
8
8
|
*
|
|
9
9
|
* @see https://www.comfy.org/
|
|
10
10
|
*/
|
|
@@ -16,22 +16,22 @@ const ComfyUI: ModelProviderCard = {
|
|
|
16
16
|
id: 'comfyui',
|
|
17
17
|
name: 'ComfyUI',
|
|
18
18
|
settings: {
|
|
19
|
-
//
|
|
19
|
+
// Disable direct browser requests, proxy through server
|
|
20
20
|
disableBrowserRequest: true,
|
|
21
21
|
|
|
22
|
-
// SDK
|
|
22
|
+
// SDK type identifier
|
|
23
23
|
sdkType: 'comfyui',
|
|
24
24
|
|
|
25
|
-
//
|
|
25
|
+
// Hide add new model button (models managed via configuration)
|
|
26
26
|
showAddNewModel: false,
|
|
27
27
|
|
|
28
|
-
//
|
|
28
|
+
// Show API key configuration (for authentication setup)
|
|
29
29
|
showApiKey: true,
|
|
30
30
|
|
|
31
|
-
//
|
|
31
|
+
// Hide connectivity check (image generation doesn't support chat interface checks)
|
|
32
32
|
showChecker: false,
|
|
33
33
|
|
|
34
|
-
//
|
|
34
|
+
// Hide model fetcher (use predefined models)
|
|
35
35
|
showModelFetcher: false,
|
|
36
36
|
},
|
|
37
37
|
url: 'https://www.comfy.org/',
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
import { type ModelProviderCard } from '@/types/llm';
|
|
2
2
|
|
|
3
3
|
// ref: https://platform.stepfun.com/docs/llm/text
|
|
4
|
-
//
|
|
4
|
+
// According to the documentation, for Stepfun models, the 'k' in context length refers to 1000
|
|
5
5
|
const Stepfun: ModelProviderCard = {
|
|
6
6
|
chatModels: [],
|
|
7
7
|
checkModel: 'step-2-mini',
|
|
@@ -8,7 +8,7 @@ const ZeroOne: ModelProviderCard = {
|
|
|
8
8
|
'01.AI drives a human-centered AI 2.0 revolution, using LLMs to create economic and social value and build new AI ecosystems and business models.',
|
|
9
9
|
id: 'zeroone',
|
|
10
10
|
modelList: { showModelFetcher: true },
|
|
11
|
-
modelsUrl: 'https://platform.lingyiwanwu.com/docs
|
|
11
|
+
modelsUrl: 'https://platform.lingyiwanwu.com/docs#models-and-pricing',
|
|
12
12
|
name: '01.AI',
|
|
13
13
|
settings: {
|
|
14
14
|
proxyUrl: {
|
|
@@ -223,7 +223,7 @@ export const transformAnthropicStream = (
|
|
|
223
223
|
|
|
224
224
|
export interface AnthropicStreamOptions {
|
|
225
225
|
callbacks?: ChatStreamCallbacks;
|
|
226
|
-
enableStreaming?: boolean; //
|
|
226
|
+
enableStreaming?: boolean; // Select TPS calculation method (pass false for non-streaming)
|
|
227
227
|
inputStartAt?: number;
|
|
228
228
|
payload?: ChatPayloadForTransformStream;
|
|
229
229
|
}
|
|
@@ -241,8 +241,8 @@ const transformGoogleGenerativeAIStream = (
|
|
|
241
241
|
{
|
|
242
242
|
data: {
|
|
243
243
|
citations: groundingChunks?.map((chunk) => ({
|
|
244
|
-
//
|
|
245
|
-
//
|
|
244
|
+
// Google returns a uri processed by Google itself, so it cannot display the real favicon
|
|
245
|
+
// Need to use title as a replacement
|
|
246
246
|
favicon: chunk.web?.title,
|
|
247
247
|
title: chunk.web?.title,
|
|
248
248
|
url: chunk.web?.uri,
|
|
@@ -293,7 +293,7 @@ const transformGoogleGenerativeAIStream = (
|
|
|
293
293
|
...usageChunks,
|
|
294
294
|
].filter(Boolean) as StreamProtocolChunk[];
|
|
295
295
|
}
|
|
296
|
-
//
|
|
296
|
+
// When there is finishReason but no text content, send an empty text chunk to stop the loading animation
|
|
297
297
|
return [
|
|
298
298
|
{ data: '', id: context?.id, type: 'text' },
|
|
299
299
|
{ data: candidate.finishReason, id: context?.id, type: 'stop' },
|
|
@@ -312,7 +312,7 @@ const transformGoogleGenerativeAIStream = (
|
|
|
312
312
|
|
|
313
313
|
export interface GoogleAIStreamOptions {
|
|
314
314
|
callbacks?: ChatStreamCallbacks;
|
|
315
|
-
enableStreaming?: boolean; //
|
|
315
|
+
enableStreaming?: boolean; // Select TPS calculation method (pass false for non-streaming)
|
|
316
316
|
inputStartAt?: number;
|
|
317
317
|
payload?: ChatPayloadForTransformStream;
|
|
318
318
|
}
|
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
/**
|
|
2
|
-
*
|
|
2
|
+
* Convert async iterator to JSON format ReadableStream
|
|
3
3
|
*/
|
|
4
4
|
export const createModelPullStream = <
|
|
5
5
|
T extends { completed?: number; digest?: string; status: string; total?: number },
|
|
@@ -7,48 +7,48 @@ export const createModelPullStream = <
|
|
|
7
7
|
iterable: AsyncIterable<T>,
|
|
8
8
|
model: string,
|
|
9
9
|
{
|
|
10
|
-
onCancel, //
|
|
10
|
+
onCancel, // Added: callback function to call on cancellation
|
|
11
11
|
}: {
|
|
12
|
-
onCancel?: (reason?: any) => void; //
|
|
12
|
+
onCancel?: (reason?: any) => void; // Callback function signature
|
|
13
13
|
} = {},
|
|
14
14
|
): ReadableStream => {
|
|
15
|
-
let iterator: AsyncIterator<T>; //
|
|
15
|
+
let iterator: AsyncIterator<T>; // Track iterator externally so we can call return on cancellation
|
|
16
16
|
|
|
17
17
|
return new ReadableStream({
|
|
18
|
-
//
|
|
18
|
+
// Implement cancel method
|
|
19
19
|
cancel(reason) {
|
|
20
|
-
//
|
|
20
|
+
// Call the onCancel callback to execute external cleanup logic (e.g., client.abort())
|
|
21
21
|
if (onCancel) {
|
|
22
22
|
onCancel(reason);
|
|
23
23
|
}
|
|
24
24
|
|
|
25
|
-
//
|
|
26
|
-
//
|
|
25
|
+
// Attempt to gracefully terminate the iterator
|
|
26
|
+
// Note: This depends on whether the AsyncIterable implementation supports return/throw
|
|
27
27
|
if (iterator && typeof iterator.return === 'function') {
|
|
28
|
-
//
|
|
28
|
+
// No need to await, let it execute cleanup in the background
|
|
29
29
|
iterator.return().catch();
|
|
30
30
|
}
|
|
31
31
|
},
|
|
32
32
|
async start(controller) {
|
|
33
|
-
iterator = iterable[Symbol.asyncIterator](); //
|
|
33
|
+
iterator = iterable[Symbol.asyncIterator](); // Get iterator
|
|
34
34
|
|
|
35
35
|
const encoder = new TextEncoder();
|
|
36
36
|
|
|
37
37
|
try {
|
|
38
38
|
// eslint-disable-next-line no-constant-condition
|
|
39
39
|
while (true) {
|
|
40
|
-
//
|
|
40
|
+
// Wait for the next data chunk or iteration completion
|
|
41
41
|
const { value: progress, done } = await iterator.next();
|
|
42
42
|
|
|
43
|
-
//
|
|
43
|
+
// If iteration is complete, break the loop
|
|
44
44
|
if (done) {
|
|
45
45
|
break;
|
|
46
46
|
}
|
|
47
47
|
|
|
48
|
-
//
|
|
48
|
+
// Ignore 'pulling manifest' status as it does not contain progress
|
|
49
49
|
if (progress.status === 'pulling manifest') continue;
|
|
50
50
|
|
|
51
|
-
//
|
|
51
|
+
// Format to standard format and write to stream
|
|
52
52
|
const progressData =
|
|
53
53
|
JSON.stringify({
|
|
54
54
|
completed: progress.completed,
|
|
@@ -61,24 +61,24 @@ export const createModelPullStream = <
|
|
|
61
61
|
controller.enqueue(encoder.encode(progressData));
|
|
62
62
|
}
|
|
63
63
|
|
|
64
|
-
//
|
|
64
|
+
// Normal completion
|
|
65
65
|
controller.close();
|
|
66
66
|
} catch (error) {
|
|
67
|
-
//
|
|
67
|
+
// Handle errors
|
|
68
68
|
|
|
69
|
-
//
|
|
69
|
+
// If error is caused by abort operation, handle silently or log, then try to close stream
|
|
70
70
|
if (error instanceof DOMException && error.name === 'AbortError') {
|
|
71
|
-
//
|
|
72
|
-
//
|
|
71
|
+
// No need to enqueue error message as connection may already be disconnected
|
|
72
|
+
// Try to close normally; if already cancelled, controller may be closed or errored
|
|
73
73
|
try {
|
|
74
74
|
controller.enqueue(new TextEncoder().encode(JSON.stringify({ status: 'cancelled' })));
|
|
75
75
|
controller.close();
|
|
76
76
|
} catch {
|
|
77
|
-
//
|
|
77
|
+
// Ignore close errors, stream may already be handled by cancellation mechanism
|
|
78
78
|
}
|
|
79
79
|
} else {
|
|
80
80
|
console.error('[createModelPullStream] model download stream error:', error);
|
|
81
|
-
//
|
|
81
|
+
// For other errors, try to send error message to client
|
|
82
82
|
const errorMessage = error instanceof Error ? error.message : String(error);
|
|
83
83
|
const errorData =
|
|
84
84
|
JSON.stringify({
|
|
@@ -88,20 +88,20 @@ export const createModelPullStream = <
|
|
|
88
88
|
}) + '\n';
|
|
89
89
|
|
|
90
90
|
try {
|
|
91
|
-
//
|
|
91
|
+
// Only try to enqueue if stream is still expecting data
|
|
92
92
|
if (controller.desiredSize !== null && controller.desiredSize > 0) {
|
|
93
93
|
controller.enqueue(encoder.encode(errorData));
|
|
94
94
|
}
|
|
95
95
|
} catch (enqueueError) {
|
|
96
96
|
console.error('[createModelPullStream] Error enqueueing error message:', enqueueError);
|
|
97
|
-
//
|
|
97
|
+
// If this also fails, connection is likely disconnected
|
|
98
98
|
}
|
|
99
99
|
|
|
100
|
-
//
|
|
100
|
+
// Try to close stream or mark as error state
|
|
101
101
|
try {
|
|
102
|
-
controller.close(); //
|
|
102
|
+
controller.close(); // Try to close normally
|
|
103
103
|
} catch {
|
|
104
|
-
controller.error(error); //
|
|
104
|
+
controller.error(error); // If closing fails, put stream in error state
|
|
105
105
|
}
|
|
106
106
|
}
|
|
107
107
|
}
|
|
@@ -36,14 +36,14 @@ const transformOllamaStream = (chunk: ChatResponse, stack: StreamContext): Strea
|
|
|
36
36
|
return { data: 'finished', id: stack.id, type: 'stop' };
|
|
37
37
|
}
|
|
38
38
|
|
|
39
|
-
//
|
|
39
|
+
// Check for <think> or </think> tags and update thinkingInContent state
|
|
40
40
|
if (chunk.message.content.includes('<think>')) {
|
|
41
41
|
stack.thinkingInContent = true;
|
|
42
42
|
} else if (chunk.message.content.includes('</think>')) {
|
|
43
43
|
stack.thinkingInContent = false;
|
|
44
44
|
}
|
|
45
45
|
|
|
46
|
-
//
|
|
46
|
+
// Remove <think> and </think> tags, and determine return type based on current thinking mode
|
|
47
47
|
return {
|
|
48
48
|
data: chunk.message.content.replaceAll(/<\/?think>/g, ''),
|
|
49
49
|
id: stack.id,
|
|
@@ -88,8 +88,8 @@ const transformOpenAIStream = (
|
|
|
88
88
|
return { data: errorData, id: 'first_chunk_error', type: 'error' };
|
|
89
89
|
}
|
|
90
90
|
|
|
91
|
-
// MiniMax
|
|
92
|
-
//
|
|
91
|
+
// MiniMax returns business errors (e.g., insufficient balance) in base_resp, but not through FIRST_CHUNK_ERROR_KEY
|
|
92
|
+
// Typical response: { id: '...', choices: null, base_resp: { status_code: 1008, status_msg: 'insufficient balance' }, usage: {...} }
|
|
93
93
|
if ((chunk as any).base_resp && typeof (chunk as any).base_resp.status_code === 'number') {
|
|
94
94
|
const baseResp = (chunk as any).base_resp as {
|
|
95
95
|
message?: string;
|
|
@@ -98,29 +98,29 @@ const transformOpenAIStream = (
|
|
|
98
98
|
};
|
|
99
99
|
|
|
100
100
|
if (baseResp.status_code !== 0) {
|
|
101
|
-
//
|
|
101
|
+
// Map MiniMax error codes to corresponding error types
|
|
102
102
|
let errorType: ILobeAgentRuntimeErrorType = AgentRuntimeErrorType.ProviderBizError;
|
|
103
103
|
|
|
104
104
|
switch (baseResp.status_code) {
|
|
105
|
-
// 1004 -
|
|
105
|
+
// 1004 - Unauthorized / Token mismatch / 2049 - Invalid API Key
|
|
106
106
|
case 1004:
|
|
107
107
|
case 2049: {
|
|
108
108
|
errorType = AgentRuntimeErrorType.InvalidProviderAPIKey;
|
|
109
109
|
break;
|
|
110
110
|
}
|
|
111
|
-
// 1008 -
|
|
111
|
+
// 1008 - Insufficient balance
|
|
112
112
|
case 1008: {
|
|
113
113
|
errorType = AgentRuntimeErrorType.InsufficientQuota;
|
|
114
114
|
break;
|
|
115
115
|
}
|
|
116
|
-
// 1002 -
|
|
116
|
+
// 1002 - Request rate limit exceeded / 1041 - Connection limit / 2045 - Request rate growth limit exceeded
|
|
117
117
|
case 1002:
|
|
118
118
|
case 1041:
|
|
119
119
|
case 2045: {
|
|
120
120
|
errorType = AgentRuntimeErrorType.QuotaLimitReached;
|
|
121
121
|
break;
|
|
122
122
|
}
|
|
123
|
-
// 1039 - Token
|
|
123
|
+
// 1039 - Token limit
|
|
124
124
|
case 1039: {
|
|
125
125
|
errorType = AgentRuntimeErrorType.ExceededContextWindow;
|
|
126
126
|
break;
|
|
@@ -402,25 +402,25 @@ export const createFirstErrorHandleTransformer = (
|
|
|
402
402
|
export const createSSEDataExtractor = () =>
|
|
403
403
|
new TransformStream({
|
|
404
404
|
transform(chunk: Uint8Array, controller) {
|
|
405
|
-
//
|
|
405
|
+
// Convert Uint8Array to string
|
|
406
406
|
const text = new TextDecoder().decode(chunk, { stream: true });
|
|
407
407
|
|
|
408
|
-
//
|
|
408
|
+
// Handle multi-line data case
|
|
409
409
|
const lines = text.split('\n');
|
|
410
410
|
|
|
411
411
|
for (const line of lines) {
|
|
412
|
-
//
|
|
412
|
+
// Only process lines starting with "data: "
|
|
413
413
|
if (line.startsWith('data: ')) {
|
|
414
|
-
//
|
|
414
|
+
// Extract the actual data after "data: "
|
|
415
415
|
const jsonText = line.slice(6);
|
|
416
416
|
|
|
417
|
-
//
|
|
417
|
+
// Skip heartbeat messages
|
|
418
418
|
if (jsonText === '[DONE]') continue;
|
|
419
419
|
|
|
420
420
|
try {
|
|
421
|
-
//
|
|
421
|
+
// Parse JSON data
|
|
422
422
|
const data = JSON.parse(jsonText);
|
|
423
|
-
//
|
|
423
|
+
// Pass parsed data to the next processor
|
|
424
424
|
controller.enqueue(data);
|
|
425
425
|
} catch {
|
|
426
426
|
console.warn('Failed to parse SSE data:', jsonText);
|
|
@@ -441,7 +441,7 @@ export const createTokenSpeedCalculator = (
|
|
|
441
441
|
{
|
|
442
442
|
inputStartAt,
|
|
443
443
|
streamStack,
|
|
444
|
-
enableStreaming = true, //
|
|
444
|
+
enableStreaming = true, // Select TPS calculation method (pass false for non-streaming)
|
|
445
445
|
}: { enableStreaming?: boolean; inputStartAt?: number; streamStack?: StreamContext } = {},
|
|
446
446
|
) => {
|
|
447
447
|
let outputStartAt: number | undefined;
|
|
@@ -23,7 +23,7 @@ export function transformSparkResponseToStream(data: OpenAI.ChatCompletion) {
|
|
|
23
23
|
? Array.isArray(choice.message.tool_calls)
|
|
24
24
|
? choice.message.tool_calls
|
|
25
25
|
: [choice.message.tool_calls]
|
|
26
|
-
: []; //
|
|
26
|
+
: []; // If not an array, wrap it in an array
|
|
27
27
|
|
|
28
28
|
return {
|
|
29
29
|
delta: {
|
|
@@ -91,7 +91,7 @@ export const transformSparkStream = (chunk: OpenAI.ChatCompletionChunk): StreamP
|
|
|
91
91
|
if (item.delta?.tool_calls) {
|
|
92
92
|
const toolCallsArray = Array.isArray(item.delta.tool_calls)
|
|
93
93
|
? item.delta.tool_calls
|
|
94
|
-
: [item.delta.tool_calls]; //
|
|
94
|
+
: [item.delta.tool_calls]; // If not an array, wrap it in an array
|
|
95
95
|
|
|
96
96
|
if (toolCallsArray.length > 0) {
|
|
97
97
|
return {
|
|
@@ -108,7 +108,7 @@ export const transformSparkStream = (chunk: OpenAI.ChatCompletionChunk): StreamP
|
|
|
108
108
|
}
|
|
109
109
|
|
|
110
110
|
if (item.finish_reason) {
|
|
111
|
-
// one-api
|
|
111
|
+
// one-api's streaming interface can have both finish_reason and content
|
|
112
112
|
// {"id":"demo","model":"deepl-en","choices":[{"index":0,"delta":{"role":"assistant","content":"Introduce yourself."},"finish_reason":"stop"}]}
|
|
113
113
|
|
|
114
114
|
if (typeof item.delta?.content === 'string' && !!item.delta.content) {
|
|
@@ -129,7 +129,7 @@ export const transformSparkStream = (chunk: OpenAI.ChatCompletionChunk): StreamP
|
|
|
129
129
|
|
|
130
130
|
if (typeof item.delta?.content === 'string') {
|
|
131
131
|
/*
|
|
132
|
-
|
|
132
|
+
Handle v1 endpoint usage, mixed in the last content
|
|
133
133
|
{"code":0,"message":"Success","sid":"cha000d05ef@dx196553ae415b80a432","id":"cha000d05ef@dx196553ae415b80a432","created":1745186655,"choices":[{"delta":{"role":"assistant","content":"😊"},"index":0}],"usage":{"prompt_tokens":1,"completion_tokens":418,"total_tokens":419}}
|
|
134
134
|
*/
|
|
135
135
|
if (chunk.usage) {
|
|
@@ -146,7 +146,7 @@ export const transformSparkStream = (chunk: OpenAI.ChatCompletionChunk): StreamP
|
|
|
146
146
|
return { data: item.delta, id: chunk.id, type: 'data' };
|
|
147
147
|
}
|
|
148
148
|
|
|
149
|
-
//
|
|
149
|
+
// Handle v2 endpoint usage
|
|
150
150
|
if (chunk.usage) {
|
|
151
151
|
return { data: convertOpenAIUsage(chunk.usage), id: chunk.id, type: 'usage' };
|
|
152
152
|
}
|
|
@@ -35,7 +35,7 @@ const transformVertexAIStream = (
|
|
|
35
35
|
}
|
|
36
36
|
|
|
37
37
|
if (
|
|
38
|
-
candidate && //
|
|
38
|
+
candidate && // First check if this is reasoning content (thought: true)
|
|
39
39
|
Array.isArray(candidate.content?.parts) &&
|
|
40
40
|
candidate.content.parts.length > 0
|
|
41
41
|
) {
|
|
@@ -88,8 +88,8 @@ const transformVertexAIStream = (
|
|
|
88
88
|
{
|
|
89
89
|
data: {
|
|
90
90
|
citations: groundingChunks?.map((chunk) => ({
|
|
91
|
-
//
|
|
92
|
-
//
|
|
91
|
+
// Google returns a uri processed by Google itself, so it cannot display the real favicon
|
|
92
|
+
// Need to use title as a replacement
|
|
93
93
|
favicon: chunk.web?.title,
|
|
94
94
|
title: chunk.web?.title,
|
|
95
95
|
url: chunk.web?.uri,
|
|
@@ -26,8 +26,12 @@ const AgentIdSync = () => {
|
|
|
26
26
|
|
|
27
27
|
// Clear activeAgentId when unmounting (leaving chat page)
|
|
28
28
|
useUnmount(() => {
|
|
29
|
-
useAgentStore.setState({ activeAgentId: undefined });
|
|
30
|
-
useChatStore.setState(
|
|
29
|
+
useAgentStore.setState({ activeAgentId: undefined }, false, 'AgentIdSync/unmountAgentId');
|
|
30
|
+
useChatStore.setState(
|
|
31
|
+
{ activeAgentId: undefined, activeTopicId: undefined },
|
|
32
|
+
false,
|
|
33
|
+
'AgentIdSync/unmountAgentId',
|
|
34
|
+
);
|
|
31
35
|
});
|
|
32
36
|
|
|
33
37
|
return null;
|
|
@@ -6,6 +6,7 @@ import dynamic from 'next/dynamic';
|
|
|
6
6
|
import { memo } from 'react';
|
|
7
7
|
import { useTranslation } from 'react-i18next';
|
|
8
8
|
|
|
9
|
+
import { withSuspense } from '@/components/withSuspense';
|
|
9
10
|
import { DESKTOP_HEADER_ICON_SIZE, MOBILE_HEADER_ICON_SIZE } from '@/const/layoutTokens';
|
|
10
11
|
import { useWorkspaceModal } from '@/hooks/useWorkspaceModal';
|
|
11
12
|
import { useChatStore } from '@/store/chat';
|
|
@@ -54,4 +55,4 @@ const ShareButton = memo<ShareButtonProps>(({ mobile, setOpen, open }) => {
|
|
|
54
55
|
);
|
|
55
56
|
});
|
|
56
57
|
|
|
57
|
-
export default ShareButton;
|
|
58
|
+
export default withSuspense(ShareButton);
|
|
@@ -1,7 +1,8 @@
|
|
|
1
1
|
import { Flexbox, TooltipGroup } from '@lobehub/ui';
|
|
2
|
-
import React, { memo } from 'react';
|
|
2
|
+
import React, { Suspense, memo } from 'react';
|
|
3
3
|
|
|
4
4
|
import DragUploadZone, { useUploadFiles } from '@/components/DragUploadZone';
|
|
5
|
+
import Loading from '@/components/Loading/BrandTextLoading';
|
|
5
6
|
import { useAgentStore } from '@/store/agent';
|
|
6
7
|
import { agentSelectors } from '@/store/agent/selectors';
|
|
7
8
|
import { useGlobalStore } from '@/store/global';
|
|
@@ -25,14 +26,20 @@ const ChatConversation = memo(() => {
|
|
|
25
26
|
const { handleUploadFiles } = useUploadFiles({ model, provider });
|
|
26
27
|
|
|
27
28
|
return (
|
|
28
|
-
<
|
|
29
|
-
<
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
29
|
+
<Suspense fallback={<Loading debugId="Agent > ChatConversation" />}>
|
|
30
|
+
<DragUploadZone onUploadFiles={handleUploadFiles} style={wrapperStyle}>
|
|
31
|
+
<Flexbox
|
|
32
|
+
height={'100%'}
|
|
33
|
+
style={{ overflow: 'hidden', position: 'relative' }}
|
|
34
|
+
width={'100%'}
|
|
35
|
+
>
|
|
36
|
+
{showHeader && <ChatHeader />}
|
|
37
|
+
<TooltipGroup>
|
|
38
|
+
<ConversationArea />
|
|
39
|
+
</TooltipGroup>
|
|
40
|
+
</Flexbox>
|
|
41
|
+
</DragUploadZone>
|
|
42
|
+
</Suspense>
|
|
36
43
|
);
|
|
37
44
|
});
|
|
38
45
|
|
package/src/app/[variants]/(main)/group/profile/{features/ProfileHydration.tsx → StoreSync.tsx}
RENAMED
|
@@ -10,7 +10,7 @@ import { parseAsString, useQueryState } from '@/hooks/useQueryParam';
|
|
|
10
10
|
import { useChatStore } from '@/store/chat';
|
|
11
11
|
import { useGroupProfileStore } from '@/store/groupProfile';
|
|
12
12
|
|
|
13
|
-
const
|
|
13
|
+
const StoreSync = memo(() => {
|
|
14
14
|
const editor = useEditor();
|
|
15
15
|
const editorState = useEditorState(editor);
|
|
16
16
|
const flushSave = useGroupProfileStore((s) => s.flushSave);
|
|
@@ -32,6 +32,11 @@ const ProfileHydration = memo(() => {
|
|
|
32
32
|
useEffect(() => {
|
|
33
33
|
const urlTopicId = builderTopicId ?? undefined;
|
|
34
34
|
useChatStore.setState({ activeTopicId: urlTopicId });
|
|
35
|
+
|
|
36
|
+
return () => {
|
|
37
|
+
// Clear activeTopicId when unmounting (leaving group profile page)
|
|
38
|
+
useChatStore.setState({ activeTopicId: undefined }, false, 'GroupProfileUnmounted');
|
|
39
|
+
};
|
|
35
40
|
}, [builderTopicId]);
|
|
36
41
|
|
|
37
42
|
// Register hotkeys
|
|
@@ -40,15 +45,19 @@ const ProfileHydration = memo(() => {
|
|
|
40
45
|
|
|
41
46
|
// Clear state when unmounting
|
|
42
47
|
useUnmount(() => {
|
|
43
|
-
useGroupProfileStore.setState(
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
48
|
+
useGroupProfileStore.setState(
|
|
49
|
+
{
|
|
50
|
+
activeTabId: 'group',
|
|
51
|
+
editor: undefined,
|
|
52
|
+
editorState: undefined,
|
|
53
|
+
saveStateMap: {},
|
|
54
|
+
},
|
|
55
|
+
false,
|
|
56
|
+
'GroupProfileUnmounted',
|
|
57
|
+
);
|
|
49
58
|
});
|
|
50
59
|
|
|
51
60
|
return null;
|
|
52
61
|
});
|
|
53
62
|
|
|
54
|
-
export default
|
|
63
|
+
export default StoreSync;
|
|
@@ -7,9 +7,12 @@ import { memo, useMemo, useState } from 'react';
|
|
|
7
7
|
import { useTranslation } from 'react-i18next';
|
|
8
8
|
|
|
9
9
|
import AddGroupMemberModal from '@/app/[variants]/(main)/group/_layout/Sidebar/AddGroupMemberModal';
|
|
10
|
+
import ToggleLeftPanelButton from '@/features/NavPanel/ToggleLeftPanelButton';
|
|
10
11
|
import { parseAsString, useQueryState } from '@/hooks/useQueryParam';
|
|
11
12
|
import { useAgentGroupStore } from '@/store/agentGroup';
|
|
12
13
|
import { agentGroupSelectors } from '@/store/agentGroup/selectors';
|
|
14
|
+
import { useGlobalStore } from '@/store/global';
|
|
15
|
+
import { systemStatusSelectors } from '@/store/global/selectors';
|
|
13
16
|
|
|
14
17
|
import AgentBuilderToggle from './AgentBuilderToggle';
|
|
15
18
|
import ChromeTabs, { type ChromeTabItem } from './ChromeTabs';
|
|
@@ -46,6 +49,7 @@ const Header = memo(() => {
|
|
|
46
49
|
const members = useAgentGroupStore(agentGroupSelectors.currentGroupAgents);
|
|
47
50
|
const activeGroupId = useAgentGroupStore(agentGroupSelectors.activeGroupId);
|
|
48
51
|
const addAgentsToGroup = useAgentGroupStore((s) => s.addAgentsToGroup);
|
|
52
|
+
const showLeftPanel = useGlobalStore(systemStatusSelectors.showLeftPanel);
|
|
49
53
|
|
|
50
54
|
// Use URL query param for selected tab
|
|
51
55
|
const [selectedTabId, setSelectedTabId] = useQueryState(
|
|
@@ -86,7 +90,8 @@ const Header = memo(() => {
|
|
|
86
90
|
|
|
87
91
|
return (
|
|
88
92
|
<>
|
|
89
|
-
<Flexbox align="center" className={styles.header} horizontal justify="space-between">
|
|
93
|
+
<Flexbox align="center" className={styles.header} gap={4} horizontal justify="space-between">
|
|
94
|
+
{!showLeftPanel && <ToggleLeftPanelButton />}
|
|
90
95
|
<div className={styles.tabsWrapper}>
|
|
91
96
|
<ChromeTabs
|
|
92
97
|
activeId={selectedTabId}
|
|
@@ -9,11 +9,11 @@ import { useAgentGroupStore } from '@/store/agentGroup';
|
|
|
9
9
|
import { agentGroupSelectors } from '@/store/agentGroup/selectors';
|
|
10
10
|
import { useGroupProfileStore } from '@/store/groupProfile';
|
|
11
11
|
|
|
12
|
+
import StoreSync from './StoreSync';
|
|
12
13
|
import AgentBuilder from './features/AgentBuilder';
|
|
13
14
|
import GroupProfileSettings from './features/GroupProfile';
|
|
14
15
|
import Header from './features/Header';
|
|
15
16
|
import MemberProfile from './features/MemberProfile';
|
|
16
|
-
import ProfileHydration from './features/ProfileHydration';
|
|
17
17
|
|
|
18
18
|
const ProfileArea = memo(() => {
|
|
19
19
|
const editor = useGroupProfileStore((s) => s.editor);
|
|
@@ -51,7 +51,7 @@ const ProfileArea = memo(() => {
|
|
|
51
51
|
const GroupProfile: FC = () => {
|
|
52
52
|
return (
|
|
53
53
|
<Suspense fallback={<Loading debugId="GroupProfile" />}>
|
|
54
|
-
<
|
|
54
|
+
<StoreSync />
|
|
55
55
|
<Flexbox height={'100%'} horizontal width={'100%'}>
|
|
56
56
|
<ProfileArea />
|
|
57
57
|
<AgentBuilder />
|
|
@@ -37,7 +37,9 @@ const TopicSelector = memo<TopicSelectorProps>(({ agentId }) => {
|
|
|
37
37
|
const { t } = useTranslation('topic');
|
|
38
38
|
|
|
39
39
|
// Fetch topics for the agent builder
|
|
40
|
-
useChatStore((s) => s.useFetchTopics)
|
|
40
|
+
const useFetchTopics = useChatStore((s) => s.useFetchTopics);
|
|
41
|
+
|
|
42
|
+
useFetchTopics(true, { agentId });
|
|
41
43
|
|
|
42
44
|
const [activeTopicId, switchTopic, topics] = useChatStore((s) => [
|
|
43
45
|
s.activeTopicId,
|
|
@@ -58,7 +58,7 @@ const AssistantMessage = memo<AssistantMessageProps>(
|
|
|
58
58
|
const editing = useConversationStore(messageStateSelectors.isMessageEditing(id));
|
|
59
59
|
const generating = useConversationStore(messageStateSelectors.isMessageGenerating(id));
|
|
60
60
|
const creating = useConversationStore(messageStateSelectors.isMessageCreating(id));
|
|
61
|
-
const newScreen = useNewScreen({ creating, isLatestItem });
|
|
61
|
+
const newScreen = useNewScreen({ creating: creating || generating, isLatestItem });
|
|
62
62
|
|
|
63
63
|
const errorContent = useErrorContent(error);
|
|
64
64
|
|
|
@@ -1,9 +1,9 @@
|
|
|
1
1
|
import { createStaticStyles, cx } from 'antd-style';
|
|
2
2
|
import { memo } from 'react';
|
|
3
3
|
|
|
4
|
-
import BubblesLoading from '@/components/BubblesLoading';
|
|
5
4
|
import { LOADING_FLAT } from '@/const/message';
|
|
6
5
|
import MarkdownMessage from '@/features/Conversation/Markdown';
|
|
6
|
+
import ContentLoading from '@/features/Conversation/Messages/components/ContentLoading';
|
|
7
7
|
|
|
8
8
|
import { normalizeThinkTags, processWithArtifact } from '../../../utils/markdown';
|
|
9
9
|
import { useMarkdown } from '../useMarkdown';
|
|
@@ -25,12 +25,12 @@ const MessageContent = memo<ContentBlockProps>(({ content, hasTools, id }) => {
|
|
|
25
25
|
const message = normalizeThinkTags(processWithArtifact(content));
|
|
26
26
|
const markdownProps = useMarkdown(id);
|
|
27
27
|
|
|
28
|
-
if (!content && !hasTools) return <
|
|
28
|
+
if (!content && !hasTools) return <ContentLoading id={id} />;
|
|
29
29
|
|
|
30
30
|
if (content === LOADING_FLAT) {
|
|
31
31
|
if (hasTools) return null;
|
|
32
32
|
|
|
33
|
-
return <
|
|
33
|
+
return <ContentLoading id={id} />;
|
|
34
34
|
}
|
|
35
35
|
|
|
36
36
|
return (
|
|
@@ -1,11 +1,11 @@
|
|
|
1
1
|
import { memo } from 'react';
|
|
2
2
|
|
|
3
|
-
import BubblesLoading from '@/components/BubblesLoading';
|
|
4
3
|
import { LOADING_FLAT } from '@/const/message';
|
|
5
4
|
import MarkdownMessage from '@/features/Conversation/Markdown';
|
|
6
5
|
|
|
7
6
|
import { normalizeThinkTags, processWithArtifact } from '../../../utils/markdown';
|
|
8
7
|
import { useMarkdown } from '../../AssistantGroup/useMarkdown';
|
|
8
|
+
import ContentLoading from '../../components/ContentLoading';
|
|
9
9
|
|
|
10
10
|
interface ContentBlockProps {
|
|
11
11
|
content: string;
|
|
@@ -20,7 +20,7 @@ const MessageContent = memo<ContentBlockProps>(({ content, id, hasTools }) => {
|
|
|
20
20
|
if (!content || content === LOADING_FLAT) {
|
|
21
21
|
if (hasTools) return null;
|
|
22
22
|
|
|
23
|
-
return <
|
|
23
|
+
return <ContentLoading id={id} />;
|
|
24
24
|
}
|
|
25
25
|
|
|
26
26
|
return content && <MarkdownMessage {...markdownProps}>{message}</MarkdownMessage>;
|
|
@@ -9,17 +9,17 @@ import type { OperationType } from '@/store/chat/slices/operation/types';
|
|
|
9
9
|
|
|
10
10
|
const ELAPSED_TIME_THRESHOLD = 2100; // Show elapsed time after 2 seconds
|
|
11
11
|
|
|
12
|
+
const NO_NEED_SHOW_DOT_OP_TYPES = new Set<OperationType>(['reasoning']);
|
|
13
|
+
|
|
12
14
|
interface ContentLoadingProps {
|
|
13
15
|
id: string;
|
|
14
16
|
}
|
|
15
17
|
|
|
16
18
|
const ContentLoading = memo<ContentLoadingProps>(({ id }) => {
|
|
17
19
|
const { t } = useTranslation('chat');
|
|
18
|
-
const
|
|
20
|
+
const runningOp = useChatStore(operationSelectors.getDeepestRunningOperationByMessage(id));
|
|
19
21
|
const [elapsedSeconds, setElapsedSeconds] = useState(0);
|
|
20
22
|
|
|
21
|
-
// Get the running operation
|
|
22
|
-
const runningOp = operations.find((op) => op.status === 'running');
|
|
23
23
|
const operationType = runningOp?.type as OperationType | undefined;
|
|
24
24
|
const startTime = runningOp?.metadata?.startTime;
|
|
25
25
|
|
|
@@ -48,6 +48,8 @@ const ContentLoading = memo<ContentLoadingProps>(({ id }) => {
|
|
|
48
48
|
|
|
49
49
|
const showElapsedTime = elapsedSeconds >= ELAPSED_TIME_THRESHOLD / 1000;
|
|
50
50
|
|
|
51
|
+
if (operationType && NO_NEED_SHOW_DOT_OP_TYPES.has(operationType)) return null;
|
|
52
|
+
|
|
51
53
|
return (
|
|
52
54
|
<Flexbox align={'center'} horizontal>
|
|
53
55
|
<BubblesLoading />
|
|
@@ -188,6 +188,171 @@ describe('Operation Selectors', () => {
|
|
|
188
188
|
});
|
|
189
189
|
});
|
|
190
190
|
|
|
191
|
+
describe('getDeepestRunningOperationByMessage', () => {
|
|
192
|
+
it('should return undefined when no operations exist', () => {
|
|
193
|
+
const { result } = renderHook(() => useChatStore());
|
|
194
|
+
|
|
195
|
+
const deepestOp = operationSelectors.getDeepestRunningOperationByMessage('msg1')(
|
|
196
|
+
result.current,
|
|
197
|
+
);
|
|
198
|
+
|
|
199
|
+
expect(deepestOp).toBeUndefined();
|
|
200
|
+
});
|
|
201
|
+
|
|
202
|
+
it('should return undefined when no running operations exist', () => {
|
|
203
|
+
const { result } = renderHook(() => useChatStore());
|
|
204
|
+
|
|
205
|
+
let opId: string;
|
|
206
|
+
act(() => {
|
|
207
|
+
opId = result.current.startOperation({
|
|
208
|
+
type: 'execAgentRuntime',
|
|
209
|
+
context: { agentId: 'session1', messageId: 'msg1' },
|
|
210
|
+
}).operationId;
|
|
211
|
+
result.current.associateMessageWithOperation('msg1', opId);
|
|
212
|
+
result.current.completeOperation(opId);
|
|
213
|
+
});
|
|
214
|
+
|
|
215
|
+
const deepestOp = operationSelectors.getDeepestRunningOperationByMessage('msg1')(
|
|
216
|
+
result.current,
|
|
217
|
+
);
|
|
218
|
+
|
|
219
|
+
expect(deepestOp).toBeUndefined();
|
|
220
|
+
});
|
|
221
|
+
|
|
222
|
+
it('should return the only running operation when there is one', () => {
|
|
223
|
+
const { result } = renderHook(() => useChatStore());
|
|
224
|
+
|
|
225
|
+
let opId: string;
|
|
226
|
+
act(() => {
|
|
227
|
+
opId = result.current.startOperation({
|
|
228
|
+
type: 'execAgentRuntime',
|
|
229
|
+
context: { agentId: 'session1', messageId: 'msg1' },
|
|
230
|
+
}).operationId;
|
|
231
|
+
result.current.associateMessageWithOperation('msg1', opId);
|
|
232
|
+
});
|
|
233
|
+
|
|
234
|
+
const deepestOp = operationSelectors.getDeepestRunningOperationByMessage('msg1')(
|
|
235
|
+
result.current,
|
|
236
|
+
);
|
|
237
|
+
|
|
238
|
+
expect(deepestOp).toBeDefined();
|
|
239
|
+
expect(deepestOp?.type).toBe('execAgentRuntime');
|
|
240
|
+
});
|
|
241
|
+
|
|
242
|
+
it('should return the leaf operation in a parent-child tree', () => {
|
|
243
|
+
const { result } = renderHook(() => useChatStore());
|
|
244
|
+
|
|
245
|
+
let parentOpId: string;
|
|
246
|
+
let childOpId: string;
|
|
247
|
+
|
|
248
|
+
act(() => {
|
|
249
|
+
// Start parent operation
|
|
250
|
+
parentOpId = result.current.startOperation({
|
|
251
|
+
type: 'execAgentRuntime',
|
|
252
|
+
context: { agentId: 'session1', messageId: 'msg1' },
|
|
253
|
+
}).operationId;
|
|
254
|
+
result.current.associateMessageWithOperation('msg1', parentOpId);
|
|
255
|
+
|
|
256
|
+
// Start child operation
|
|
257
|
+
childOpId = result.current.startOperation({
|
|
258
|
+
type: 'reasoning',
|
|
259
|
+
context: { agentId: 'session1', messageId: 'msg1' },
|
|
260
|
+
parentOperationId: parentOpId,
|
|
261
|
+
}).operationId;
|
|
262
|
+
result.current.associateMessageWithOperation('msg1', childOpId);
|
|
263
|
+
});
|
|
264
|
+
|
|
265
|
+
const deepestOp = operationSelectors.getDeepestRunningOperationByMessage('msg1')(
|
|
266
|
+
result.current,
|
|
267
|
+
);
|
|
268
|
+
|
|
269
|
+
// Should return the child (reasoning) not the parent (execAgentRuntime)
|
|
270
|
+
expect(deepestOp).toBeDefined();
|
|
271
|
+
expect(deepestOp?.type).toBe('reasoning');
|
|
272
|
+
expect(deepestOp?.id).toBe(childOpId!);
|
|
273
|
+
});
|
|
274
|
+
|
|
275
|
+
it('should return the deepest leaf in a multi-level tree', () => {
|
|
276
|
+
const { result } = renderHook(() => useChatStore());
|
|
277
|
+
|
|
278
|
+
let rootOpId: string;
|
|
279
|
+
let level1OpId: string;
|
|
280
|
+
let level2OpId: string;
|
|
281
|
+
|
|
282
|
+
act(() => {
|
|
283
|
+
// Level 0: root operation
|
|
284
|
+
rootOpId = result.current.startOperation({
|
|
285
|
+
type: 'execAgentRuntime',
|
|
286
|
+
context: { agentId: 'session1', messageId: 'msg1' },
|
|
287
|
+
}).operationId;
|
|
288
|
+
result.current.associateMessageWithOperation('msg1', rootOpId);
|
|
289
|
+
|
|
290
|
+
// Level 1: child of root
|
|
291
|
+
level1OpId = result.current.startOperation({
|
|
292
|
+
type: 'callLLM',
|
|
293
|
+
context: { agentId: 'session1', messageId: 'msg1' },
|
|
294
|
+
parentOperationId: rootOpId,
|
|
295
|
+
}).operationId;
|
|
296
|
+
result.current.associateMessageWithOperation('msg1', level1OpId);
|
|
297
|
+
|
|
298
|
+
// Level 2: grandchild (deepest)
|
|
299
|
+
level2OpId = result.current.startOperation({
|
|
300
|
+
type: 'reasoning',
|
|
301
|
+
context: { agentId: 'session1', messageId: 'msg1' },
|
|
302
|
+
parentOperationId: level1OpId,
|
|
303
|
+
}).operationId;
|
|
304
|
+
result.current.associateMessageWithOperation('msg1', level2OpId);
|
|
305
|
+
});
|
|
306
|
+
|
|
307
|
+
const deepestOp = operationSelectors.getDeepestRunningOperationByMessage('msg1')(
|
|
308
|
+
result.current,
|
|
309
|
+
);
|
|
310
|
+
|
|
311
|
+
// Should return the deepest leaf (reasoning at level 2)
|
|
312
|
+
expect(deepestOp).toBeDefined();
|
|
313
|
+
expect(deepestOp?.type).toBe('reasoning');
|
|
314
|
+
expect(deepestOp?.id).toBe(level2OpId!);
|
|
315
|
+
});
|
|
316
|
+
|
|
317
|
+
it('should return parent when child operation completes', () => {
|
|
318
|
+
const { result } = renderHook(() => useChatStore());
|
|
319
|
+
|
|
320
|
+
let parentOpId: string;
|
|
321
|
+
let childOpId: string;
|
|
322
|
+
|
|
323
|
+
act(() => {
|
|
324
|
+
parentOpId = result.current.startOperation({
|
|
325
|
+
type: 'execAgentRuntime',
|
|
326
|
+
context: { agentId: 'session1', messageId: 'msg1' },
|
|
327
|
+
}).operationId;
|
|
328
|
+
result.current.associateMessageWithOperation('msg1', parentOpId);
|
|
329
|
+
|
|
330
|
+
childOpId = result.current.startOperation({
|
|
331
|
+
type: 'reasoning',
|
|
332
|
+
context: { agentId: 'session1', messageId: 'msg1' },
|
|
333
|
+
parentOperationId: parentOpId,
|
|
334
|
+
}).operationId;
|
|
335
|
+
result.current.associateMessageWithOperation('msg1', childOpId);
|
|
336
|
+
});
|
|
337
|
+
|
|
338
|
+
// Before completing child
|
|
339
|
+
let deepestOp = operationSelectors.getDeepestRunningOperationByMessage('msg1')(
|
|
340
|
+
result.current,
|
|
341
|
+
);
|
|
342
|
+
expect(deepestOp?.type).toBe('reasoning');
|
|
343
|
+
|
|
344
|
+
// Complete child operation
|
|
345
|
+
act(() => {
|
|
346
|
+
result.current.completeOperation(childOpId);
|
|
347
|
+
});
|
|
348
|
+
|
|
349
|
+
// After completing child, parent should be the deepest running
|
|
350
|
+
deepestOp = operationSelectors.getDeepestRunningOperationByMessage('msg1')(result.current);
|
|
351
|
+
expect(deepestOp?.type).toBe('execAgentRuntime');
|
|
352
|
+
expect(deepestOp?.id).toBe(parentOpId!);
|
|
353
|
+
});
|
|
354
|
+
});
|
|
355
|
+
|
|
191
356
|
describe('isMessageProcessing', () => {
|
|
192
357
|
it('should return true if message has running operations', () => {
|
|
193
358
|
const { result } = renderHook(() => useChatStore());
|
|
@@ -355,6 +355,28 @@ const isAnyMessageLoading =
|
|
|
355
355
|
return messageIds.some((id) => isMessageProcessing(id)(s));
|
|
356
356
|
};
|
|
357
357
|
|
|
358
|
+
/**
|
|
359
|
+
* Get the deepest running operation for a message (leaf node in operation tree)
|
|
360
|
+
* Operations form a tree structure via parentOperationId/childOperationIds
|
|
361
|
+
* This returns the most specific (deepest) running operation for UI display
|
|
362
|
+
*/
|
|
363
|
+
const getDeepestRunningOperationByMessage =
|
|
364
|
+
(messageId: string) =>
|
|
365
|
+
(s: ChatStoreState): Operation | undefined => {
|
|
366
|
+
const operations = getOperationsByMessage(messageId)(s);
|
|
367
|
+
const runningOps = operations.filter((op) => op.status === 'running');
|
|
368
|
+
|
|
369
|
+
if (runningOps.length === 0) return undefined;
|
|
370
|
+
|
|
371
|
+
const runningOpIds = new Set(runningOps.map((op) => op.id));
|
|
372
|
+
|
|
373
|
+
// A leaf running operation has no running children
|
|
374
|
+
return runningOps.find((op) => {
|
|
375
|
+
const childIds = op.childOperationIds || [];
|
|
376
|
+
return !childIds.some((childId) => runningOpIds.has(childId));
|
|
377
|
+
});
|
|
378
|
+
};
|
|
379
|
+
|
|
358
380
|
/**
|
|
359
381
|
* Check if a specific message is being regenerated
|
|
360
382
|
*/
|
|
@@ -440,6 +462,7 @@ export const operationSelectors = {
|
|
|
440
462
|
getCurrentContextOperations,
|
|
441
463
|
getCurrentOperationLabel,
|
|
442
464
|
getCurrentOperationProgress,
|
|
465
|
+
getDeepestRunningOperationByMessage,
|
|
443
466
|
getOperationById,
|
|
444
467
|
getOperationContextFromMessage,
|
|
445
468
|
getOperationsByContext,
|
package/src/tools/streamings.ts
CHANGED
|
@@ -1,3 +1,7 @@
|
|
|
1
|
+
import {
|
|
2
|
+
AgentBuilderManifest,
|
|
3
|
+
AgentBuilderStreamings,
|
|
4
|
+
} from '@lobechat/builtin-tool-agent-builder/client';
|
|
1
5
|
import {
|
|
2
6
|
CloudSandboxManifest,
|
|
3
7
|
CloudSandboxStreamings,
|
|
@@ -28,6 +32,7 @@ import { type BuiltinStreaming } from '@lobechat/types';
|
|
|
28
32
|
* The component should fetch streaming content from store internally.
|
|
29
33
|
*/
|
|
30
34
|
const BuiltinToolStreamings: Record<string, Record<string, BuiltinStreaming>> = {
|
|
35
|
+
[AgentBuilderManifest.identifier]: AgentBuilderStreamings as Record<string, BuiltinStreaming>,
|
|
31
36
|
[CloudSandboxManifest.identifier]: CloudSandboxStreamings as Record<string, BuiltinStreaming>,
|
|
32
37
|
[GroupAgentBuilderManifest.identifier]: GroupAgentBuilderStreamings as Record<
|
|
33
38
|
string,
|
package/vercel.json
CHANGED