@lobehub/chat 0.156.1 → 0.157.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (115) hide show
  1. package/CHANGELOG.md +42 -0
  2. package/Dockerfile +4 -1
  3. package/package.json +3 -2
  4. package/src/config/modelProviders/anthropic.ts +3 -0
  5. package/src/config/modelProviders/google.ts +3 -0
  6. package/src/config/modelProviders/groq.ts +5 -1
  7. package/src/config/modelProviders/minimax.ts +10 -7
  8. package/src/config/modelProviders/mistral.ts +1 -0
  9. package/src/config/modelProviders/moonshot.ts +3 -0
  10. package/src/config/modelProviders/zhipu.ts +2 -6
  11. package/src/config/server/provider.ts +1 -1
  12. package/src/database/client/core/db.ts +32 -0
  13. package/src/database/client/core/schemas.ts +9 -0
  14. package/src/database/client/models/__tests__/message.test.ts +2 -2
  15. package/src/database/client/schemas/message.ts +8 -1
  16. package/src/features/AgentSetting/store/action.ts +15 -6
  17. package/src/features/Conversation/Actions/Tool.tsx +16 -0
  18. package/src/features/Conversation/Actions/index.ts +2 -2
  19. package/src/features/Conversation/Messages/Assistant/ToolCalls/index.tsx +78 -0
  20. package/src/features/Conversation/Messages/Assistant/ToolCalls/style.ts +25 -0
  21. package/src/features/Conversation/Messages/Assistant/index.tsx +47 -0
  22. package/src/features/Conversation/Messages/Default.tsx +4 -1
  23. package/src/features/Conversation/{Plugins → Messages/Tool}/Inspector/index.tsx +34 -35
  24. package/src/features/Conversation/Messages/Tool/index.tsx +44 -0
  25. package/src/features/Conversation/Messages/index.ts +3 -2
  26. package/src/features/Conversation/Plugins/Render/StandaloneType/Iframe.tsx +1 -1
  27. package/src/features/Conversation/components/SkeletonList.tsx +2 -2
  28. package/src/features/Conversation/index.tsx +2 -3
  29. package/src/libs/agent-runtime/BaseAI.ts +2 -9
  30. package/src/libs/agent-runtime/anthropic/index.test.ts +195 -0
  31. package/src/libs/agent-runtime/anthropic/index.ts +71 -15
  32. package/src/libs/agent-runtime/azureOpenai/index.ts +12 -13
  33. package/src/libs/agent-runtime/bedrock/index.ts +24 -18
  34. package/src/libs/agent-runtime/google/index.test.ts +154 -0
  35. package/src/libs/agent-runtime/google/index.ts +91 -10
  36. package/src/libs/agent-runtime/groq/index.test.ts +41 -72
  37. package/src/libs/agent-runtime/groq/index.ts +7 -0
  38. package/src/libs/agent-runtime/minimax/index.test.ts +2 -2
  39. package/src/libs/agent-runtime/minimax/index.ts +14 -37
  40. package/src/libs/agent-runtime/mistral/index.test.ts +0 -53
  41. package/src/libs/agent-runtime/mistral/index.ts +1 -0
  42. package/src/libs/agent-runtime/moonshot/index.test.ts +1 -71
  43. package/src/libs/agent-runtime/ollama/index.test.ts +197 -0
  44. package/src/libs/agent-runtime/ollama/index.ts +3 -3
  45. package/src/libs/agent-runtime/openai/index.test.ts +0 -53
  46. package/src/libs/agent-runtime/openrouter/index.test.ts +1 -53
  47. package/src/libs/agent-runtime/perplexity/index.test.ts +0 -71
  48. package/src/libs/agent-runtime/perplexity/index.ts +2 -3
  49. package/src/libs/agent-runtime/togetherai/__snapshots__/index.test.ts.snap +886 -0
  50. package/src/libs/agent-runtime/togetherai/fixtures/models.json +8111 -0
  51. package/src/libs/agent-runtime/togetherai/index.test.ts +16 -54
  52. package/src/libs/agent-runtime/types/chat.ts +19 -3
  53. package/src/libs/agent-runtime/utils/anthropicHelpers.test.ts +120 -1
  54. package/src/libs/agent-runtime/utils/anthropicHelpers.ts +67 -4
  55. package/src/libs/agent-runtime/utils/debugStream.test.ts +70 -0
  56. package/src/libs/agent-runtime/utils/debugStream.ts +39 -9
  57. package/src/libs/agent-runtime/utils/openaiCompatibleFactory/index.test.ts +521 -0
  58. package/src/libs/agent-runtime/utils/openaiCompatibleFactory/index.ts +76 -5
  59. package/src/libs/agent-runtime/utils/response.ts +12 -0
  60. package/src/libs/agent-runtime/utils/streams/anthropic.test.ts +197 -0
  61. package/src/libs/agent-runtime/utils/streams/anthropic.ts +91 -0
  62. package/src/libs/agent-runtime/utils/streams/bedrock/claude.ts +21 -0
  63. package/src/libs/agent-runtime/utils/streams/bedrock/common.ts +32 -0
  64. package/src/libs/agent-runtime/utils/streams/bedrock/index.ts +3 -0
  65. package/src/libs/agent-runtime/utils/streams/bedrock/llama.test.ts +196 -0
  66. package/src/libs/agent-runtime/utils/streams/bedrock/llama.ts +51 -0
  67. package/src/libs/agent-runtime/utils/streams/google-ai.test.ts +97 -0
  68. package/src/libs/agent-runtime/utils/streams/google-ai.ts +68 -0
  69. package/src/libs/agent-runtime/utils/streams/index.ts +7 -0
  70. package/src/libs/agent-runtime/utils/streams/minimax.ts +39 -0
  71. package/src/libs/agent-runtime/utils/streams/ollama.test.ts +77 -0
  72. package/src/libs/agent-runtime/utils/streams/ollama.ts +38 -0
  73. package/src/libs/agent-runtime/utils/streams/openai.test.ts +263 -0
  74. package/src/libs/agent-runtime/utils/streams/openai.ts +79 -0
  75. package/src/libs/agent-runtime/utils/streams/protocol.ts +100 -0
  76. package/src/libs/agent-runtime/zeroone/index.test.ts +1 -53
  77. package/src/libs/agent-runtime/zhipu/index.test.ts +1 -1
  78. package/src/libs/agent-runtime/zhipu/index.ts +3 -2
  79. package/src/locales/default/plugin.ts +3 -4
  80. package/src/migrations/FromV4ToV5/fixtures/from-v1-to-v5-output.json +245 -0
  81. package/src/migrations/FromV4ToV5/fixtures/function-input-v4.json +96 -0
  82. package/src/migrations/FromV4ToV5/fixtures/function-output-v5.json +120 -0
  83. package/src/migrations/FromV4ToV5/index.ts +58 -0
  84. package/src/migrations/FromV4ToV5/migrations.test.ts +49 -0
  85. package/src/migrations/FromV4ToV5/types/v4.ts +21 -0
  86. package/src/migrations/FromV4ToV5/types/v5.ts +27 -0
  87. package/src/migrations/index.ts +8 -1
  88. package/src/services/__tests__/chat.test.ts +10 -20
  89. package/src/services/chat.ts +78 -65
  90. package/src/store/chat/slices/enchance/action.ts +15 -10
  91. package/src/store/chat/slices/message/action.test.ts +36 -86
  92. package/src/store/chat/slices/message/action.ts +70 -79
  93. package/src/store/chat/slices/message/reducer.ts +18 -1
  94. package/src/store/chat/slices/message/selectors.test.ts +38 -68
  95. package/src/store/chat/slices/message/selectors.ts +1 -22
  96. package/src/store/chat/slices/plugin/action.test.ts +147 -203
  97. package/src/store/chat/slices/plugin/action.ts +96 -82
  98. package/src/store/chat/slices/share/action.test.ts +3 -3
  99. package/src/store/chat/slices/share/action.ts +1 -1
  100. package/src/store/chat/slices/topic/action.ts +7 -2
  101. package/src/store/tool/selectors/tool.ts +6 -24
  102. package/src/store/tool/slices/builtin/action.test.ts +90 -0
  103. package/src/types/llm.ts +1 -1
  104. package/src/types/message/index.ts +9 -4
  105. package/src/types/message/tools.ts +57 -0
  106. package/src/types/openai/chat.ts +6 -0
  107. package/src/utils/fetch.test.ts +245 -1
  108. package/src/utils/fetch.ts +120 -44
  109. package/src/utils/toolCall.ts +21 -0
  110. package/src/features/Conversation/Messages/Assistant.tsx +0 -26
  111. package/src/features/Conversation/Messages/Function.tsx +0 -35
  112. package/src/libs/agent-runtime/ollama/stream.ts +0 -31
  113. /package/src/features/Conversation/{Plugins → Messages/Tool}/Inspector/PluginResultJSON.tsx +0 -0
  114. /package/src/features/Conversation/{Plugins → Messages/Tool}/Inspector/Settings.tsx +0 -0
  115. /package/src/features/Conversation/{Plugins → Messages/Tool}/Inspector/style.ts +0 -0
package/CHANGELOG.md CHANGED
@@ -2,6 +2,48 @@
2
2
 
3
3
  # Changelog
4
4
 
5
+ ## [Version 0.157.0](https://github.com/lobehub/lobe-chat/compare/v0.156.2...v0.157.0)
6
+
7
+ <sup>Released on **2024-05-11**</sup>
8
+
9
+ #### ✨ Features
10
+
11
+ - **misc**: upgrade to the new `tool calls` mode.
12
+
13
+ <br/>
14
+
15
+ <details>
16
+ <summary><kbd>Improvements and Fixes</kbd></summary>
17
+
18
+ #### What's improved
19
+
20
+ - **misc**: upgrade to the new `tool calls` mode, closes [#2414](https://github.com/lobehub/lobe-chat/issues/2414) ([7404f3b](https://github.com/lobehub/lobe-chat/commit/7404f3b))
21
+
22
+ </details>
23
+
24
+ <div align="right">
25
+
26
+ [![](https://img.shields.io/badge/-BACK_TO_TOP-151515?style=flat-square)](#readme-top)
27
+
28
+ </div>
29
+
30
+ ### [Version 0.156.2](https://github.com/lobehub/lobe-chat/compare/v0.156.1...v0.156.2)
31
+
32
+ <sup>Released on **2024-05-10**</sup>
33
+
34
+ <br/>
35
+
36
+ <details>
37
+ <summary><kbd>Improvements and Fixes</kbd></summary>
38
+
39
+ </details>
40
+
41
+ <div align="right">
42
+
43
+ [![](https://img.shields.io/badge/-BACK_TO_TOP-151515?style=flat-square)](#readme-top)
44
+
45
+ </div>
46
+
5
47
  ### [Version 0.156.1](https://github.com/lobehub/lobe-chat/compare/v0.156.0...v0.156.1)
6
48
 
7
49
  <sup>Released on **2024-05-10**</sup>
package/Dockerfile CHANGED
@@ -44,8 +44,11 @@ ENV NEXT_PUBLIC_ANALYTICS_UMAMI ""
44
44
  ENV NEXT_PUBLIC_UMAMI_SCRIPT_URL ""
45
45
  ENV NEXT_PUBLIC_UMAMI_WEBSITE_ID ""
46
46
 
47
+ # Node
48
+ ENV NODE_OPTIONS "--max-old-space-size=8192"
47
49
 
48
- RUN npm run build:docker # run build standalone for docker version
50
+ # run build standalone for docker version
51
+ RUN npm run build:docker
49
52
 
50
53
  ## Production image, copy all the files and run next
51
54
  FROM base AS runner
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@lobehub/chat",
3
- "version": "0.156.1",
3
+ "version": "0.157.0",
4
4
  "description": "Lobe Chat - an open-source, high-performance chatbot framework that supports speech synthesis, multimodal, and extensible Function Call plugin system. Supports one-click free deployment of your private ChatGPT/LLM web application.",
5
5
  "keywords": [
6
6
  "framework",
@@ -82,7 +82,7 @@
82
82
  },
83
83
  "dependencies": {
84
84
  "@ant-design/icons": "^5.3.6",
85
- "@anthropic-ai/sdk": "^0.18.0",
85
+ "@anthropic-ai/sdk": "^0.20.9",
86
86
  "@auth/core": "0.28.0",
87
87
  "@aws-sdk/client-bedrock-runtime": "^3.565.0",
88
88
  "@azure/openai": "^1.0.0-beta.12",
@@ -97,6 +97,7 @@
97
97
  "@lobehub/icons": "latest",
98
98
  "@lobehub/tts": "latest",
99
99
  "@lobehub/ui": "^1.138.17",
100
+ "@microsoft/fetch-event-source": "^2.0.1",
100
101
  "@next/third-parties": "^14.2.3",
101
102
  "@sentry/nextjs": "^7.112.2",
102
103
  "@t3-oss/env-nextjs": "^0.10.1",
@@ -8,6 +8,7 @@ const Anthropic: ModelProviderCard = {
8
8
  'Ideal balance of intelligence and speed for enterprise workloads. Maximum utility at a lower price, dependable, balanced for scaled deployments',
9
9
  displayName: 'Claude 3 Sonnet',
10
10
  enabled: true,
11
+ functionCall: true,
11
12
  id: 'claude-3-sonnet-20240229',
12
13
  maxOutput: 4096,
13
14
  tokens: 200_000,
@@ -18,6 +19,7 @@ const Anthropic: ModelProviderCard = {
18
19
  'Most powerful model for highly complex tasks. Top-level performance, intelligence, fluency, and understanding',
19
20
  displayName: 'Claude 3 Opus',
20
21
  enabled: true,
22
+ functionCall: true,
21
23
  id: 'claude-3-opus-20240229',
22
24
  maxOutput: 4096,
23
25
  tokens: 200_000,
@@ -28,6 +30,7 @@ const Anthropic: ModelProviderCard = {
28
30
  'Fastest and most compact model for near-instant responsiveness. Quick and accurate targeted performance',
29
31
  displayName: 'Claude 3 Haiku',
30
32
  enabled: true,
33
+ functionCall: true,
31
34
  id: 'claude-3-haiku-20240307',
32
35
  maxOutput: 4096,
33
36
  tokens: 200_000,
@@ -22,6 +22,7 @@ const Google: ModelProviderCard = {
22
22
  description: 'The best model for scaling across a wide range of tasks',
23
23
  displayName: 'Gemini 1.0 Pro',
24
24
  enabled: true,
25
+ functionCall: true,
25
26
  id: 'gemini-pro',
26
27
  maxOutput: 2048,
27
28
  tokens: 30_720 + 2048,
@@ -47,6 +48,7 @@ const Google: ModelProviderCard = {
47
48
  description:
48
49
  'The best model for scaling across a wide range of tasks. This is a stable model that supports tuning.',
49
50
  displayName: 'Gemini 1.0 Pro 001 (Tuning)',
51
+ functionCall: true,
50
52
  id: 'gemini-1.0-pro-001',
51
53
  maxOutput: 2048,
52
54
  tokens: 30_720 + 2048,
@@ -71,6 +73,7 @@ const Google: ModelProviderCard = {
71
73
  description: 'Mid-size multimodal model that supports up to 1 million tokens',
72
74
  displayName: 'Gemini 1.5 Pro',
73
75
  enabled: true,
76
+ functionCall: true,
74
77
  id: 'gemini-1.5-pro-latest',
75
78
  maxOutput: 8192,
76
79
  tokens: 1_048_576 + 8192,
@@ -6,24 +6,28 @@ const Groq: ModelProviderCard = {
6
6
  {
7
7
  displayName: 'LLaMA3-3-70B',
8
8
  enabled: true,
9
+ functionCall: true,
9
10
  id: 'llama3-70b-8192',
10
11
  tokens: 8192,
11
12
  },
12
13
  {
13
- displayName: 'Mixtral-8x7b-Instruct-v0.1',
14
+ displayName: 'Mixtral-8x7b',
14
15
  enabled: true,
16
+ functionCall: true,
15
17
  id: 'mixtral-8x7b-32768',
16
18
  tokens: 32_768,
17
19
  },
18
20
  {
19
21
  displayName: 'Gemma-7b-it',
20
22
  enabled: true,
23
+ functionCall: true,
21
24
  id: 'gemma-7b-it',
22
25
  tokens: 8192,
23
26
  },
24
27
  {
25
28
  displayName: 'LLaMA3-3-8B',
26
29
  enabled: true,
30
+ functionCall: true,
27
31
  id: 'llama3-8b-8192',
28
32
  tokens: 8192,
29
33
  },
@@ -3,24 +3,27 @@ import { ModelProviderCard } from '@/types/llm';
3
3
  // ref https://www.minimaxi.com/document/guides/chat-model/pro/api
4
4
  const Minimax: ModelProviderCard = {
5
5
  chatModels: [
6
- {
7
- description: '复杂场景,例如应用题计算、科学计算等场景',
8
- displayName: 'abab6.5',
9
- enabled: true,
10
- id: 'abab6.5-chat',
11
- tokens: 8192,
12
- },
13
6
  {
14
7
  description: '通用场景',
15
8
  displayName: 'abab6.5s',
16
9
  enabled: true,
10
+ functionCall: true,
17
11
  id: 'abab6.5s-chat',
18
12
  tokens: 245_760,
19
13
  },
14
+ {
15
+ description: '复杂场景,例如应用题计算、科学计算等场景',
16
+ displayName: 'abab6.5',
17
+ enabled: true,
18
+ functionCall: true,
19
+ id: 'abab6.5-chat',
20
+ tokens: 8192,
21
+ },
20
22
  {
21
23
  description: '更复杂的格式化文本生成',
22
24
  displayName: 'abab6',
23
25
  enabled: true,
26
+ functionCall: true,
24
27
  id: 'abab6-chat',
25
28
  tokens: 32_768,
26
29
  },
@@ -18,6 +18,7 @@ const Mistral: ModelProviderCard = {
18
18
  {
19
19
  displayName: 'Mixtral 8x22B',
20
20
  enabled: true,
21
+ functionCall: true,
21
22
  id: 'open-mixtral-8x22b',
22
23
  tokens: 65_536,
23
24
  },
@@ -6,18 +6,21 @@ const Moonshot: ModelProviderCard = {
6
6
  {
7
7
  displayName: 'Moonshot V1 8K',
8
8
  enabled: true,
9
+ functionCall: true,
9
10
  id: 'moonshot-v1-8k',
10
11
  tokens: 8192,
11
12
  },
12
13
  {
13
14
  displayName: 'Moonshot V1 32K',
14
15
  enabled: true,
16
+ functionCall: true,
15
17
  id: 'moonshot-v1-32k',
16
18
  tokens: 32_768,
17
19
  },
18
20
  {
19
21
  displayName: 'Moonshot V1 128K',
20
22
  enabled: true,
23
+ functionCall: true,
21
24
  id: 'moonshot-v1-128k',
22
25
  tokens: 128_000,
23
26
  },
@@ -1,9 +1,5 @@
1
1
  import { ModelProviderCard } from '@/types/llm';
2
2
 
3
- // TODO: 等待 ZhiPu 修复 API 问题后开启 functionCall
4
- // 暂时不透出 GLM 系列的 function_call 功能
5
- // refs https://github.com/lobehub/lobe-chat/discussions/737#discussioncomment-8315815
6
-
7
3
  // ref https://open.bigmodel.cn/dev/howuse/model
8
4
  const ZhiPu: ModelProviderCard = {
9
5
  chatModels: [
@@ -11,7 +7,7 @@ const ZhiPu: ModelProviderCard = {
11
7
  description: '最新的 GLM-4 、最大支持 128k 上下文、支持 Function Call 、Retreival',
12
8
  displayName: 'GLM-4',
13
9
  enabled: true,
14
- // functionCall: true,
10
+ functionCall: true,
15
11
  id: 'glm-4',
16
12
  tokens: 128_000,
17
13
  },
@@ -28,7 +24,7 @@ const ZhiPu: ModelProviderCard = {
28
24
  description: '最新的glm-3-turbo、最大支持 128k上下文、支持Function Call、Retreival',
29
25
  displayName: 'GLM-3 Turbo',
30
26
  enabled: true,
31
- // functionCall: true,
27
+ functionCall: true,
32
28
  id: 'glm-3-turbo',
33
29
  tokens: 128_000,
34
30
  },
@@ -211,7 +211,7 @@ export const getProviderConfig = () => {
211
211
  AWS_ACCESS_KEY_ID: AWS_ACCESS_KEY_ID,
212
212
  AWS_SECRET_ACCESS_KEY: process.env.AWS_SECRET_ACCESS_KEY || '',
213
213
 
214
- ENABLE_OLLAMA: process.env.ENABLE_OLLAMA as unknown as boolean,
214
+ ENABLE_OLLAMA: Boolean(process.env.ENABLE_OLLAMA),
215
215
  OLLAMA_PROXY_URL: process.env.OLLAMA_PROXY_URL || '',
216
216
  OLLAMA_MODEL_LIST: process.env.OLLAMA_MODEL_LIST || process.env.OLLAMA_CUSTOM_MODELS,
217
217
  };
@@ -19,6 +19,7 @@ import {
19
19
  dbSchemaV5,
20
20
  dbSchemaV6,
21
21
  dbSchemaV7,
22
+ dbSchemaV9,
22
23
  } from './schemas';
23
24
  import { DBModel, LOBE_CHAT_LOCAL_DB_NAME } from './types/db';
24
25
 
@@ -67,6 +68,10 @@ export class BrowserDB extends Dexie {
67
68
  .stores(dbSchemaV7)
68
69
  .upgrade((trans) => this.upgradeToV8(trans));
69
70
 
71
+ this.version(9)
72
+ .stores(dbSchemaV9)
73
+ .upgrade((trans) => this.upgradeToV9(trans));
74
+
70
75
  this.files = this.table('files');
71
76
  this.sessions = this.table('sessions');
72
77
  this.messages = this.table('messages');
@@ -153,6 +158,33 @@ export class BrowserDB extends Dexie {
153
158
  }
154
159
  });
155
160
  };
161
+
162
+ upgradeToV9 = async (trans: Transaction) => {
163
+ const messages = trans.table('messages');
164
+ await messages.toCollection().modify(async (message: DBModel<DB_Message>) => {
165
+ if ((message.role as string) === 'function') {
166
+ const origin = Object.assign({}, message);
167
+
168
+ const toolCallId = `tool_call_${message.id}`;
169
+ const assistantMessageId = `tool_calls_${message.id}`;
170
+
171
+ message.role = 'tool';
172
+ message.tool_call_id = toolCallId;
173
+ message.parentId = assistantMessageId;
174
+
175
+ await messages.add({
176
+ ...origin,
177
+ content: '',
178
+ createdAt: message.createdAt - 10,
179
+ error: undefined,
180
+ id: assistantMessageId,
181
+ role: 'assistant',
182
+ tools: [{ ...message.plugin!, id: toolCallId }],
183
+ updatedAt: message.updatedAt - 10,
184
+ } as DBModel<DB_Message>);
185
+ }
186
+ });
187
+ };
156
188
  }
157
189
 
158
190
  export const browserDB = new BrowserDB();
@@ -76,3 +76,12 @@ export const dbSchemaV7 = {
76
76
  plugins:
77
77
  '&identifier, id, type, manifest.type, manifest.meta.title, manifest.meta.description, manifest.meta.author, createdAt, updatedAt',
78
78
  };
79
+ // ************************************** //
80
+ // ******* Version 9 - 2024-03-14 ******* //
81
+ // ************************************** //
82
+ // - Added id to `plugins` table
83
+ export const dbSchemaV9 = {
84
+ ...dbSchemaV7,
85
+ messages:
86
+ '&id, role, content, fromModel, favorite, tool_call_id, plugin.identifier, plugin.apiName, translate.content, createdAt, updatedAt, sessionId, topicId, quotaId, parentId, [sessionId+topicId], traceId',
87
+ };
@@ -264,14 +264,14 @@ describe('MessageModel', () => {
264
264
  it('should update a role and plugins', async () => {
265
265
  const createdMessage = await MessageModel.create(messageData);
266
266
  const updateData = {
267
- role: 'function' as const,
267
+ role: 'tool' as const,
268
268
  plugin: { apiName: 'a', identifier: 'b', arguments: 'abc' },
269
269
  };
270
270
 
271
271
  await MessageModel.update(createdMessage.id, updateData);
272
272
  const updatedMessage = await MessageModel.findById(createdMessage.id);
273
273
 
274
- expect(updatedMessage).toHaveProperty('role', 'function');
274
+ expect(updatedMessage).toHaveProperty('role', 'tool');
275
275
  });
276
276
  });
277
277
 
@@ -14,13 +14,20 @@ const PluginSchema = z.object({
14
14
  type: z.enum(['default', 'markdown', 'standalone', 'builtin']).default('default'),
15
15
  });
16
16
 
17
+ const ToolCallSchema = PluginSchema.extend({
18
+ id: z.string(),
19
+ });
20
+
17
21
  export const DB_MessageSchema = z.object({
18
- role: z.enum(['user', 'system', 'assistant', 'function']),
22
+ role: z.enum(['user', 'system', 'assistant', 'tool']),
19
23
  content: z.string(),
20
24
  files: z.array(z.string()).optional(),
21
25
  favorite: z.number().int().min(0).max(1).optional(),
22
26
  error: z.any().optional(),
23
27
 
28
+ tools: z.array(ToolCallSchema).optional(),
29
+ tool_call_id: z.string().optional(),
30
+
24
31
  plugin: PluginSchema.optional(),
25
32
  pluginState: z.any().optional(),
26
33
  fromModel: z.string().optional(),
@@ -8,6 +8,7 @@ import { TraceNameMap, TracePayload, TraceTopicType } from '@/const/trace';
8
8
  import { chatService } from '@/services/chat';
9
9
  import { LobeAgentConfig } from '@/types/agent';
10
10
  import { MetaData } from '@/types/meta';
11
+ import { MessageTextChunk } from '@/utils/fetch';
11
12
  import { setNamespace } from '@/utils/storeDebug';
12
13
 
13
14
  import { SessionLoadingState } from '../store/initialState';
@@ -246,17 +247,25 @@ export const store: StateCreator<Store, [['zustand/devtools', never]]> = (set, g
246
247
 
247
248
  streamUpdateMetaArray: (key: keyof MetaData) => {
248
249
  let value = '';
249
- return (text: string) => {
250
- value += text;
251
- get().dispatchMeta({ type: 'update', value: { [key]: value.split(',') } });
250
+ return (chunk: MessageTextChunk) => {
251
+ switch (chunk.type) {
252
+ case 'text': {
253
+ value += chunk.text;
254
+ get().dispatchMeta({ type: 'update', value: { [key]: value.split(',') } });
255
+ }
256
+ }
252
257
  };
253
258
  },
254
259
 
255
260
  streamUpdateMetaString: (key: keyof MetaData) => {
256
261
  let value = '';
257
- return (text: string) => {
258
- value += text;
259
- get().dispatchMeta({ type: 'update', value: { [key]: value } });
262
+ return (chunk: MessageTextChunk) => {
263
+ switch (chunk.type) {
264
+ case 'text': {
265
+ value += chunk.text;
266
+ get().dispatchMeta({ type: 'update', value: { [key]: value } });
267
+ }
268
+ }
260
269
  };
261
270
  },
262
271
 
@@ -0,0 +1,16 @@
1
+ import { memo } from 'react';
2
+
3
+ import { RenderAction } from '../types';
4
+
5
+ export const ToolActionsBar: RenderAction = memo(() => {
6
+ return undefined;
7
+ // const { regenerate } = useChatListActionsBar();
8
+ // return (
9
+ // <ActionIconGroup
10
+ // dropdownMenu={[regenerate]}
11
+ // items={[regenerate]}
12
+ // onActionClick={onActionClick}
13
+ // type="ghost"
14
+ // />
15
+ // );
16
+ });
@@ -8,13 +8,13 @@ import { LLMRoleType } from '@/types/llm';
8
8
  import { OnActionsClick, RenderAction } from '../types';
9
9
  import { AssistantActionsBar } from './Assistant';
10
10
  import { DefaultActionsBar } from './Fallback';
11
- import { FunctionActionsBar } from './Function';
11
+ import { ToolActionsBar } from './Tool';
12
12
  import { UserActionsBar } from './User';
13
13
 
14
14
  export const renderActions: Record<LLMRoleType, RenderAction> = {
15
15
  assistant: AssistantActionsBar,
16
- function: FunctionActionsBar,
17
16
  system: DefaultActionsBar,
17
+ tool: ToolActionsBar,
18
18
  user: UserActionsBar,
19
19
  };
20
20
 
@@ -0,0 +1,78 @@
1
+ import { Avatar, Highlighter, Icon } from '@lobehub/ui';
2
+ import isEqual from 'fast-deep-equal';
3
+ import { Loader2, LucideChevronDown, LucideChevronRight, LucideToyBrick } from 'lucide-react';
4
+ import { memo, useState } from 'react';
5
+ import { useTranslation } from 'react-i18next';
6
+ import { Center, Flexbox } from 'react-layout-kit';
7
+
8
+ import { useChatStore } from '@/store/chat';
9
+ import { chatSelectors } from '@/store/chat/slices/message/selectors';
10
+ import { pluginHelpers, useToolStore } from '@/store/tool';
11
+ import { toolSelectors } from '@/store/tool/selectors';
12
+
13
+ import { useStyles } from './style';
14
+
15
+ export interface InspectorProps {
16
+ arguments?: string;
17
+ identifier: string;
18
+ messageId: string;
19
+ }
20
+
21
+ const CallItem = memo<InspectorProps>(
22
+ ({ arguments: requestArgs = '{}', messageId, identifier }) => {
23
+ const { t } = useTranslation('plugin');
24
+ const { styles } = useStyles();
25
+ const [open, setOpen] = useState(false);
26
+ const loading = useChatStore(chatSelectors.isMessageGenerating(messageId));
27
+
28
+ const pluginMeta = useToolStore(toolSelectors.getMetaById(identifier), isEqual);
29
+
30
+ const pluginAvatar = pluginHelpers.getPluginAvatar(pluginMeta);
31
+
32
+ const pluginTitle = pluginHelpers.getPluginTitle(pluginMeta) ?? t('unknownPlugin');
33
+
34
+ const avatar = pluginAvatar ? (
35
+ <Avatar avatar={pluginAvatar} size={32} />
36
+ ) : (
37
+ <Icon icon={LucideToyBrick} />
38
+ );
39
+
40
+ let params;
41
+ try {
42
+ params = JSON.stringify(JSON.parse(requestArgs), null, 2);
43
+ } catch {
44
+ params = requestArgs;
45
+ }
46
+
47
+ return (
48
+ <Flexbox gap={8}>
49
+ <Flexbox
50
+ align={'center'}
51
+ className={styles.container}
52
+ distribution={'space-between'}
53
+ gap={8}
54
+ height={32}
55
+ horizontal
56
+ onClick={() => {
57
+ setOpen(!open);
58
+ }}
59
+ >
60
+ <Flexbox align={'center'} gap={8} horizontal>
61
+ {loading ? (
62
+ <Center height={30} width={24}>
63
+ <Icon icon={Loader2} spin />
64
+ </Center>
65
+ ) : (
66
+ avatar
67
+ )}
68
+ {pluginTitle}
69
+ </Flexbox>
70
+ <Icon icon={open ? LucideChevronDown : LucideChevronRight} />
71
+ </Flexbox>
72
+ {(open || loading) && <Highlighter language={'json'}>{params}</Highlighter>}
73
+ </Flexbox>
74
+ );
75
+ },
76
+ );
77
+
78
+ export default CallItem;
@@ -0,0 +1,25 @@
1
+ import { createStyles } from 'antd-style';
2
+
3
+ export const useStyles = createStyles(({ css, token }) => ({
4
+ container: css`
5
+ cursor: pointer;
6
+
7
+ width: fit-content;
8
+ padding-inline: 4px 6px;
9
+
10
+ color: ${token.colorText};
11
+
12
+ background: ${token.colorFillTertiary};
13
+ border-radius: 8px;
14
+
15
+ &:hover {
16
+ background: ${token.colorFillSecondary};
17
+ }
18
+ `,
19
+ plugin: css`
20
+ display: flex;
21
+ gap: 4px;
22
+ align-items: center;
23
+ width: fit-content;
24
+ `,
25
+ }));
@@ -0,0 +1,47 @@
1
+ import { ReactNode, memo } from 'react';
2
+ import { Flexbox } from 'react-layout-kit';
3
+
4
+ import { LOADING_FLAT } from '@/const/message';
5
+ import { useChatStore } from '@/store/chat';
6
+ import { chatSelectors } from '@/store/chat/selectors';
7
+ import { ChatMessage } from '@/types/message';
8
+
9
+ import { DefaultMessage } from '../Default';
10
+ import ToolCalls from './ToolCalls';
11
+
12
+ export const AssistantMessage = memo<
13
+ ChatMessage & {
14
+ editableContent: ReactNode;
15
+ }
16
+ >(({ id, tools, content, ...props }) => {
17
+ const editing = useChatStore(chatSelectors.isMessageEditing(id));
18
+ const generating = useChatStore(chatSelectors.isMessageGenerating(id));
19
+
20
+ const isToolCallGenerating = generating && (content === LOADING_FLAT || !content) && !!tools;
21
+
22
+ return (
23
+ <Flexbox gap={8} id={id}>
24
+ {(content || editing) && (
25
+ <DefaultMessage
26
+ content={content}
27
+ // we have id above, so don't need to pass it again
28
+ id={undefined as any}
29
+ isToolCallGenerating={isToolCallGenerating}
30
+ {...props}
31
+ />
32
+ )}
33
+ {!editing && tools && (
34
+ <Flexbox gap={8} horizontal>
35
+ {tools.map((toolCall) => (
36
+ <ToolCalls
37
+ arguments={toolCall.arguments}
38
+ identifier={toolCall.identifier}
39
+ key={toolCall.id}
40
+ messageId={id}
41
+ />
42
+ ))}
43
+ </Flexbox>
44
+ )}
45
+ </Flexbox>
46
+ );
47
+ });
@@ -8,8 +8,11 @@ import BubblesLoading from '../components/BubblesLoading';
8
8
  export const DefaultMessage = memo<
9
9
  ChatMessage & {
10
10
  editableContent: ReactNode;
11
+ isToolCallGenerating?: boolean;
11
12
  }
12
- >(({ id, editableContent, content }) => {
13
+ >(({ id, editableContent, content, isToolCallGenerating }) => {
14
+ if (isToolCallGenerating) return;
15
+
13
16
  if (content === LOADING_FLAT) return <BubblesLoading />;
14
17
 
15
18
  return <div id={id}>{editableContent}</div>;