@lobehub/chat 1.81.7 → 1.81.9

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (55) hide show
  1. package/CHANGELOG.md +50 -0
  2. package/changelog/v1.json +18 -0
  3. package/locales/ar/chat.json +10 -0
  4. package/locales/bg-BG/chat.json +10 -0
  5. package/locales/de-DE/chat.json +10 -0
  6. package/locales/en-US/chat.json +10 -0
  7. package/locales/es-ES/chat.json +10 -0
  8. package/locales/fa-IR/chat.json +10 -0
  9. package/locales/fr-FR/chat.json +10 -0
  10. package/locales/it-IT/chat.json +10 -0
  11. package/locales/ja-JP/chat.json +10 -0
  12. package/locales/ko-KR/chat.json +10 -0
  13. package/locales/nl-NL/chat.json +10 -0
  14. package/locales/pl-PL/chat.json +10 -0
  15. package/locales/pt-BR/chat.json +10 -0
  16. package/locales/ru-RU/chat.json +10 -0
  17. package/locales/tr-TR/chat.json +10 -0
  18. package/locales/vi-VN/chat.json +10 -0
  19. package/locales/zh-CN/chat.json +10 -0
  20. package/locales/zh-TW/chat.json +10 -0
  21. package/package.json +1 -1
  22. package/packages/electron-client-ipc/src/events/localFile.ts +8 -2
  23. package/packages/electron-client-ipc/src/events/system.ts +3 -0
  24. package/packages/electron-client-ipc/src/types/index.ts +1 -0
  25. package/packages/electron-client-ipc/src/types/localFile.ts +46 -0
  26. package/packages/electron-client-ipc/src/types/system.ts +24 -0
  27. package/packages/file-loaders/src/blackList.ts +9 -0
  28. package/packages/file-loaders/src/index.ts +1 -0
  29. package/packages/file-loaders/src/loaders/pdf/index.test.ts +1 -0
  30. package/packages/file-loaders/src/loaders/pdf/index.ts +1 -7
  31. package/src/components/FileIcon/index.tsx +7 -3
  32. package/src/features/Conversation/Extras/Usage/UsageDetail/index.tsx +31 -4
  33. package/src/features/Conversation/Extras/Usage/index.tsx +1 -1
  34. package/src/libs/agent-runtime/anthropic/index.ts +7 -3
  35. package/src/libs/agent-runtime/perplexity/index.test.ts +4 -1
  36. package/src/libs/agent-runtime/utils/openaiCompatibleFactory/index.ts +17 -8
  37. package/src/libs/agent-runtime/utils/streams/anthropic.test.ts +11 -5
  38. package/src/libs/agent-runtime/utils/streams/anthropic.ts +11 -2
  39. package/src/libs/agent-runtime/utils/streams/openai.ts +5 -2
  40. package/src/libs/agent-runtime/utils/streams/protocol.test.ts +67 -1
  41. package/src/libs/agent-runtime/utils/streams/protocol.ts +46 -1
  42. package/src/locales/default/chat.ts +11 -0
  43. package/src/services/electron/localFileService.ts +19 -0
  44. package/src/services/electron/system.ts +21 -0
  45. package/src/store/chat/slices/aiChat/actions/generateAIChat.ts +2 -2
  46. package/src/store/chat/slices/builtinTool/actions/search.ts +0 -3
  47. package/src/tools/local-files/Render/ListFiles/index.tsx +24 -17
  48. package/src/tools/local-files/Render/ReadLocalFile/ReadFileView.tsx +28 -28
  49. package/src/tools/local-files/components/FileItem.tsx +9 -11
  50. package/src/tools/local-files/index.ts +60 -2
  51. package/src/tools/local-files/systemRole.ts +53 -13
  52. package/src/tools/local-files/type.ts +19 -1
  53. package/src/tools/web-browsing/systemRole.ts +40 -38
  54. package/src/types/message/base.ts +8 -0
  55. package/src/utils/fetch/fetchSSE.ts +17 -1
@@ -6,10 +6,11 @@ import { memo } from 'react';
6
6
  import { useTranslation } from 'react-i18next';
7
7
  import { Center, Flexbox } from 'react-layout-kit';
8
8
 
9
+ import InfoTooltip from '@/components/InfoTooltip';
9
10
  import { aiModelSelectors, useAiInfraStore } from '@/store/aiInfra';
10
11
  import { useGlobalStore } from '@/store/global';
11
12
  import { systemStatusSelectors } from '@/store/global/selectors';
12
- import { ModelTokensUsage } from '@/types/message';
13
+ import { MessageMetadata } from '@/types/message';
13
14
  import { formatNumber } from '@/utils/format';
14
15
 
15
16
  import ModelCard from './ModelCard';
@@ -17,19 +18,19 @@ import TokenProgress, { TokenProgressItem } from './TokenProgress';
17
18
  import { getDetailsToken } from './tokens';
18
19
 
19
20
  interface TokenDetailProps {
21
+ meta: MessageMetadata;
20
22
  model: string;
21
23
  provider: string;
22
- usage: ModelTokensUsage;
23
24
  }
24
25
 
25
- const TokenDetail = memo<TokenDetailProps>(({ usage, model, provider }) => {
26
+ const TokenDetail = memo<TokenDetailProps>(({ meta, model, provider }) => {
26
27
  const { t } = useTranslation('chat');
27
28
  const theme = useTheme();
28
29
 
29
30
  const modelCard = useAiInfraStore(aiModelSelectors.getModelCard(model, provider));
30
31
  const isShowCredit = useGlobalStore(systemStatusSelectors.isShowCredit) && !!modelCard?.pricing;
31
32
 
32
- const detailTokens = getDetailsToken(usage, modelCard);
33
+ const detailTokens = getDetailsToken(meta, modelCard);
33
34
  const inputDetails = [
34
35
  !!detailTokens.inputAudio && {
35
36
  color: theme.cyan9,
@@ -113,6 +114,10 @@ const TokenDetail = memo<TokenDetailProps>(({ usage, model, provider }) => {
113
114
  detailTokens.totalTokens!.credit / detailTokens.totalTokens!.token,
114
115
  2,
115
116
  );
117
+
118
+ const tps = meta?.tps ? formatNumber(meta.tps, 2) : undefined;
119
+ const ttft = meta?.ttft ? formatNumber(meta.ttft / 1000, 2) : undefined;
120
+
116
121
  return (
117
122
  <Popover
118
123
  arrow={false}
@@ -170,6 +175,28 @@ const TokenDetail = memo<TokenDetailProps>(({ usage, model, provider }) => {
170
175
  <div style={{ fontWeight: 500 }}>{averagePricing}</div>
171
176
  </Flexbox>
172
177
  )}
178
+ {tps && (
179
+ <Flexbox align={'center'} gap={4} horizontal justify={'space-between'}>
180
+ <Flexbox gap={8} horizontal>
181
+ <div style={{ color: theme.colorTextSecondary }}>
182
+ {t('messages.tokenDetails.speed.tps.title')}
183
+ </div>
184
+ <InfoTooltip title={t('messages.tokenDetails.speed.tps.tooltip')} />
185
+ </Flexbox>
186
+ <div style={{ fontWeight: 500 }}>{tps}</div>
187
+ </Flexbox>
188
+ )}
189
+ {ttft && (
190
+ <Flexbox align={'center'} gap={4} horizontal justify={'space-between'}>
191
+ <Flexbox gap={8} horizontal>
192
+ <div style={{ color: theme.colorTextSecondary }}>
193
+ {t('messages.tokenDetails.speed.ttft.title')}
194
+ </div>
195
+ <InfoTooltip title={t('messages.tokenDetails.speed.ttft.tooltip')} />
196
+ </Flexbox>
197
+ <div style={{ fontWeight: 500 }}>{ttft}s</div>
198
+ </Flexbox>
199
+ )}
173
200
  </Flexbox>
174
201
  </Flexbox>
175
202
  </Flexbox>
@@ -31,7 +31,7 @@ const Usage = memo<UsageProps>(({ model, metadata, provider }) => {
31
31
  </Center>
32
32
 
33
33
  {!!metadata.totalTokens && (
34
- <TokenDetail model={model as string} provider={provider} usage={metadata} />
34
+ <TokenDetail meta={metadata} model={model as string} provider={provider} />
35
35
  )}
36
36
  </Flexbox>
37
37
  );
@@ -56,6 +56,7 @@ export class LobeAnthropicAI implements LobeRuntimeAI {
56
56
  async chat(payload: ChatStreamPayload, options?: ChatCompetitionOptions) {
57
57
  try {
58
58
  const anthropicPayload = await this.buildAnthropicPayload(payload);
59
+ const inputStartAt = Date.now();
59
60
 
60
61
  if (this.isDebug()) {
61
62
  console.log('[requestPayload]');
@@ -79,9 +80,12 @@ export class LobeAnthropicAI implements LobeRuntimeAI {
79
80
  debugStream(debug.toReadableStream()).catch(console.error);
80
81
  }
81
82
 
82
- return StreamingResponse(AnthropicStream(prod, options?.callback), {
83
- headers: options?.headers,
84
- });
83
+ return StreamingResponse(
84
+ AnthropicStream(prod, { callbacks: options?.callback, inputStartAt }),
85
+ {
86
+ headers: options?.headers,
87
+ },
88
+ );
85
89
  } catch (error) {
86
90
  throw this.handleError(error);
87
91
  }
@@ -225,7 +225,10 @@ describe('LobePerplexityAI', () => {
225
225
  stream.push(decoder.decode(value));
226
226
  }
227
227
 
228
- expect(stream).toEqual(
228
+ // Slice out speed chunk
229
+ const noSpeedStream = stream.slice(0, -3);
230
+
231
+ expect(noSpeedStream).toEqual(
229
232
  [
230
233
  'id: 506d64fb-e7f2-4d94-b80f-158369e9446d',
231
234
  'event: text',
@@ -201,6 +201,7 @@ export const LobeOpenAICompatibleFactory = <T extends Record<string, any> = any>
201
201
 
202
202
  async chat({ responseMode, ...payload }: ChatStreamPayload, options?: ChatCompetitionOptions) {
203
203
  try {
204
+ const inputStartAt = Date.now();
204
205
  const postPayload = chatCompletion?.handlePayload
205
206
  ? chatCompletion.handlePayload(payload, this._options)
206
207
  : ({
@@ -253,10 +254,14 @@ export const LobeOpenAICompatibleFactory = <T extends Record<string, any> = any>
253
254
  debugStream(useForDebugStream).catch(console.error);
254
255
  }
255
256
 
256
- const streamHandler = chatCompletion?.handleStream || OpenAIStream;
257
- return StreamingResponse(streamHandler(prod, streamOptions), {
258
- headers: options?.headers,
259
- });
257
+ return StreamingResponse(
258
+ chatCompletion?.handleStream
259
+ ? chatCompletion.handleStream(prod, streamOptions.callbacks)
260
+ : OpenAIStream(prod, { ...streamOptions, inputStartAt }),
261
+ {
262
+ headers: options?.headers,
263
+ },
264
+ );
260
265
  }
261
266
 
262
267
  if (debug?.chatCompletion?.()) {
@@ -269,10 +274,14 @@ export const LobeOpenAICompatibleFactory = <T extends Record<string, any> = any>
269
274
  chatCompletion?.handleTransformResponseToStream || transformResponseToStream;
270
275
  const stream = transformHandler(response as unknown as OpenAI.ChatCompletion);
271
276
 
272
- const streamHandler = chatCompletion?.handleStream || OpenAIStream;
273
- return StreamingResponse(streamHandler(stream, streamOptions), {
274
- headers: options?.headers,
275
- });
277
+ return StreamingResponse(
278
+ chatCompletion?.handleStream
279
+ ? chatCompletion.handleStream(stream, streamOptions.callbacks)
280
+ : OpenAIStream(stream, { ...streamOptions, inputStartAt }),
281
+ {
282
+ headers: options?.headers,
283
+ },
284
+ );
276
285
  } catch (error) {
277
286
  throw this.handleError(error);
278
287
  }
@@ -61,9 +61,11 @@ describe('AnthropicStream', () => {
61
61
  const onCompletionMock = vi.fn();
62
62
 
63
63
  const protocolStream = AnthropicStream(mockAnthropicStream, {
64
- onStart: onStartMock,
65
- onText: onTextMock,
66
- onCompletion: onCompletionMock,
64
+ callbacks: {
65
+ onStart: onStartMock,
66
+ onText: onTextMock,
67
+ onCompletion: onCompletionMock,
68
+ },
67
69
  });
68
70
 
69
71
  const decoder = new TextDecoder();
@@ -165,7 +167,9 @@ describe('AnthropicStream', () => {
165
167
  const onToolCallMock = vi.fn();
166
168
 
167
169
  const protocolStream = AnthropicStream(mockReadableStream, {
168
- onToolsCalling: onToolCallMock,
170
+ callbacks: {
171
+ onToolsCalling: onToolCallMock,
172
+ },
169
173
  });
170
174
 
171
175
  const decoder = new TextDecoder();
@@ -317,7 +321,9 @@ describe('AnthropicStream', () => {
317
321
  const onToolCallMock = vi.fn();
318
322
 
319
323
  const protocolStream = AnthropicStream(mockReadableStream, {
320
- onToolsCalling: onToolCallMock,
324
+ callbacks: {
325
+ onToolsCalling: onToolCallMock,
326
+ },
321
327
  });
322
328
 
323
329
  const decoder = new TextDecoder();
@@ -12,6 +12,7 @@ import {
12
12
  convertIterableToStream,
13
13
  createCallbacksTransformer,
14
14
  createSSEProtocolTransformer,
15
+ createTokenSpeedCalculator,
15
16
  } from './protocol';
16
17
 
17
18
  export const transformAnthropicStream = (
@@ -188,9 +189,14 @@ export const transformAnthropicStream = (
188
189
  }
189
190
  };
190
191
 
192
+ export interface AnthropicStreamOptions {
193
+ callbacks?: ChatStreamCallbacks;
194
+ inputStartAt?: number;
195
+ }
196
+
191
197
  export const AnthropicStream = (
192
198
  stream: Stream<Anthropic.MessageStreamEvent> | ReadableStream,
193
- callbacks?: ChatStreamCallbacks,
199
+ { callbacks, inputStartAt }: AnthropicStreamOptions = {},
194
200
  ) => {
195
201
  const streamStack: StreamContext = { id: '' };
196
202
 
@@ -198,6 +204,9 @@ export const AnthropicStream = (
198
204
  stream instanceof ReadableStream ? stream : convertIterableToStream(stream);
199
205
 
200
206
  return readableStream
201
- .pipeThrough(createSSEProtocolTransformer(transformAnthropicStream, streamStack))
207
+ .pipeThrough(
208
+ createTokenSpeedCalculator(transformAnthropicStream, { inputStartAt, streamStack }),
209
+ )
210
+ .pipeThrough(createSSEProtocolTransformer((c) => c, streamStack))
202
211
  .pipeThrough(createCallbacksTransformer(callbacks));
203
212
  };
@@ -16,6 +16,7 @@ import {
16
16
  createCallbacksTransformer,
17
17
  createFirstErrorHandleTransformer,
18
18
  createSSEProtocolTransformer,
19
+ createTokenSpeedCalculator,
19
20
  generateToolCallId,
20
21
  } from './protocol';
21
22
 
@@ -218,12 +219,13 @@ export interface OpenAIStreamOptions {
218
219
  name: string;
219
220
  }) => ILobeAgentRuntimeErrorType | undefined;
220
221
  callbacks?: ChatStreamCallbacks;
222
+ inputStartAt?: number;
221
223
  provider?: string;
222
224
  }
223
225
 
224
226
  export const OpenAIStream = (
225
227
  stream: Stream<OpenAI.ChatCompletionChunk> | ReadableStream,
226
- { callbacks, provider, bizErrorTypeTransformer }: OpenAIStreamOptions = {},
228
+ { callbacks, provider, bizErrorTypeTransformer, inputStartAt }: OpenAIStreamOptions = {},
227
229
  ) => {
228
230
  const streamStack: StreamContext = { id: '' };
229
231
 
@@ -236,7 +238,8 @@ export const OpenAIStream = (
236
238
  // provider like huggingface or minimax will return error in the stream,
237
239
  // so in the first Transformer, we need to handle the error
238
240
  .pipeThrough(createFirstErrorHandleTransformer(bizErrorTypeTransformer, provider))
239
- .pipeThrough(createSSEProtocolTransformer(transformOpenAIStream, streamStack))
241
+ .pipeThrough(createTokenSpeedCalculator(transformOpenAIStream, { inputStartAt, streamStack }))
242
+ .pipeThrough(createSSEProtocolTransformer((c) => c, streamStack))
240
243
  .pipeThrough(createCallbacksTransformer(callbacks))
241
244
  );
242
245
  };
@@ -1,6 +1,6 @@
1
1
  import { describe, expect, it } from 'vitest';
2
2
 
3
- import { createSSEDataExtractor } from './protocol';
3
+ import { createSSEDataExtractor, createTokenSpeedCalculator } from './protocol';
4
4
 
5
5
  describe('createSSEDataExtractor', () => {
6
6
  // Helper function to convert string to Uint8Array
@@ -135,3 +135,69 @@ describe('createSSEDataExtractor', () => {
135
135
  });
136
136
  });
137
137
  });
138
+
139
+ describe('createTokenSpeedCalculator', async () => {
140
+ // Mock the param from caller - 1000 to avoid div 0
141
+ const inputStartAt = Date.now() - 1000;
142
+
143
+ // Helper function to process chunks through transformer
144
+ const processChunk = async (transformer: TransformStream, chunk: any) => {
145
+ const results: any[] = [];
146
+ const readable = new ReadableStream({
147
+ start(controller) {
148
+ controller.enqueue(chunk);
149
+ controller.close();
150
+ },
151
+ });
152
+
153
+ const writable = new WritableStream({
154
+ write(chunk) {
155
+ results.push(chunk);
156
+ },
157
+ });
158
+
159
+ await readable.pipeThrough(transformer).pipeTo(writable);
160
+
161
+ return results;
162
+ };
163
+
164
+ it('should calculate token speed correctly', async () => {
165
+ const chunks = [
166
+ { data: '', id: 'chatcmpl-BKO1bogylHvMaYfETjTAzrCguYwZy', type: 'text' },
167
+ { data: 'hi', id: 'chatcmpl-BKO1bogylHvMaYfETjTAzrCguYwZy', type: 'text' },
168
+ { data: 'stop', id: 'chatcmpl-BKO1bogylHvMaYfETjTAzrCguYwZy', type: 'stop' },
169
+ {
170
+ data: {
171
+ inputTextTokens: 9,
172
+ outputTextTokens: 1,
173
+ totalInputTokens: 9,
174
+ totalOutputTokens: 1,
175
+ totalTokens: 10,
176
+ },
177
+ id: 'chatcmpl-BKO1bogylHvMaYfETjTAzrCguYwZy',
178
+ type: 'usage',
179
+ },
180
+ ];
181
+
182
+ const transformer = createTokenSpeedCalculator((v) => v, { inputStartAt });
183
+ const results = await processChunk(transformer, chunks);
184
+ expect(results).toHaveLength(chunks.length + 1);
185
+ const speedChunk = results.slice(-1)[0];
186
+ expect(speedChunk.id).toBe('output_speed');
187
+ expect(speedChunk.type).toBe('speed');
188
+ expect(speedChunk.data.tps).not.toBeNaN();
189
+ expect(speedChunk.data.ttft).not.toBeNaN();
190
+ });
191
+
192
+ it('should not calculate token speed if no usage', async () => {
193
+ const chunks = [
194
+ { data: '', id: 'chatcmpl-BKO1bogylHvMaYfETjTAzrCguYwZy', type: 'text' },
195
+ { data: 'hi', id: 'chatcmpl-BKO1bogylHvMaYfETjTAzrCguYwZy', type: 'text' },
196
+ { data: 'stop', id: 'chatcmpl-BKO1bogylHvMaYfETjTAzrCguYwZy', type: 'stop' },
197
+ ];
198
+
199
+ const transformer = createTokenSpeedCalculator((v) => v, { inputStartAt });
200
+ const results = await processChunk(transformer, chunks);
201
+ expect(results).toHaveLength(chunks.length);
202
+ });
203
+ });
@@ -1,4 +1,4 @@
1
- import { ModelTokensUsage } from '@/types/message';
1
+ import { ModelSpeed, ModelTokensUsage } from '@/types/message';
2
2
  import { safeParseJSON } from '@/utils/safeParseJSON';
3
3
 
4
4
  import { AgentRuntimeErrorType } from '../../error';
@@ -52,6 +52,8 @@ export interface StreamProtocolChunk {
52
52
  | 'error'
53
53
  // token usage
54
54
  | 'usage'
55
+ // performance monitor
56
+ | 'speed'
55
57
  // unknown data result
56
58
  | 'data';
57
59
  }
@@ -287,3 +289,46 @@ export const createSSEDataExtractor = () =>
287
289
  }
288
290
  },
289
291
  });
292
+
293
+ export const TOKEN_SPEED_CHUNK_ID = 'output_speed';
294
+
295
+ /**
296
+ * Create a middleware to calculate the token generate speed
297
+ * @requires createSSEProtocolTransformer
298
+ */
299
+ export const createTokenSpeedCalculator = (
300
+ transformer: (chunk: any, stack: StreamContext) => StreamProtocolChunk | StreamProtocolChunk[],
301
+ { streamStack, inputStartAt }: { inputStartAt?: number; streamStack?: StreamContext } = {},
302
+ ) => {
303
+ let outputStartAt: number | undefined;
304
+
305
+ const process = (chunk: StreamProtocolChunk) => {
306
+ let result = [chunk];
307
+ // if the chunk is the first text chunk, set as output start
308
+ if (!outputStartAt && chunk.type === 'text') outputStartAt = Date.now();
309
+ // if the chunk is the stop chunk, set as output finish
310
+ if (inputStartAt && outputStartAt && chunk.type === 'usage') {
311
+ const outputTokens = chunk.data?.totalOutputTokens || chunk.data?.outputTextTokens;
312
+ result.push({
313
+ data: {
314
+ tps: (outputTokens / (Date.now() - outputStartAt)) * 1000,
315
+ ttft: outputStartAt - inputStartAt,
316
+ } as ModelSpeed,
317
+ id: TOKEN_SPEED_CHUNK_ID,
318
+ type: 'speed',
319
+ });
320
+ }
321
+ return result;
322
+ };
323
+
324
+ return new TransformStream({
325
+ transform(chunk, controller) {
326
+ let result = transformer(chunk, streamStack || { id: '' });
327
+ if (!Array.isArray(result)) result = [result];
328
+ result.forEach((r) => {
329
+ const processed = process(r);
330
+ if (processed) processed.forEach((p) => controller.enqueue(p));
331
+ });
332
+ },
333
+ });
334
+ };
@@ -122,6 +122,17 @@ export default {
122
122
  outputText: '文本输出',
123
123
  outputTitle: '输出明细',
124
124
  reasoning: '深度思考',
125
+ speed: {
126
+ tps: {
127
+ title: 'TPS',
128
+ tooltip:
129
+ 'Tokens Per Second,TPS。指AI生成内容的平均速度(Token/秒),在接收到首个 Token 后开始计算。',
130
+ },
131
+ ttft: {
132
+ title: 'TTFT',
133
+ tooltip: 'Time To First Token,TTFT。指从您发送消息到客户端接收到首个 Token 的时间间隔。',
134
+ },
135
+ },
125
136
  title: '生成明细',
126
137
  total: '总计消耗',
127
138
  },
@@ -1,12 +1,15 @@
1
1
  import {
2
2
  ListLocalFileParams,
3
3
  LocalFileItem,
4
+ LocalMoveFilesResultItem,
4
5
  LocalReadFileParams,
5
6
  LocalReadFileResult,
6
7
  LocalReadFilesParams,
7
8
  LocalSearchFilesParams,
9
+ MoveLocalFilesParams,
8
10
  OpenLocalFileParams,
9
11
  OpenLocalFolderParams,
12
+ RenameLocalFileParams,
10
13
  dispatch,
11
14
  } from '@lobechat/electron-client-ipc';
12
15
 
@@ -34,6 +37,22 @@ class LocalFileService {
34
37
  async openLocalFolder(params: OpenLocalFolderParams) {
35
38
  return dispatch('openLocalFolder', params);
36
39
  }
40
+
41
+ async moveLocalFiles(params: MoveLocalFilesParams): Promise<LocalMoveFilesResultItem[]> {
42
+ return dispatch('moveLocalFiles', params);
43
+ }
44
+
45
+ async renameLocalFile(params: RenameLocalFileParams) {
46
+ return dispatch('renameLocalFile', params);
47
+ }
48
+
49
+ async openLocalFileOrFolder(path: string, isDirectory: boolean) {
50
+ if (isDirectory) {
51
+ return this.openLocalFolder({ isDirectory, path });
52
+ } else {
53
+ return this.openLocalFile({ path });
54
+ }
55
+ }
37
56
  }
38
57
 
39
58
  export const localFileService = new LocalFileService();
@@ -0,0 +1,21 @@
1
+ import { ElectronAppState, dispatch } from '@lobechat/electron-client-ipc';
2
+
3
+ /**
4
+ * Service class for interacting with Electron's system-level information and actions.
5
+ */
6
+ class ElectronSystemService {
7
+ /**
8
+ * Fetches the application state from the Electron main process.
9
+ * This includes system information (platform, arch) and user-specific paths.
10
+ * @returns {Promise<DesktopAppState>} A promise that resolves with the desktop app state.
11
+ */
12
+ async getAppState(): Promise<ElectronAppState> {
13
+ // Calls the underlying IPC function to get data from the main process
14
+ return dispatch('getDesktopAppState');
15
+ }
16
+
17
+ // Add other system-related service methods here if needed in the future
18
+ }
19
+
20
+ // Export a singleton instance of the service
21
+ export const electronSystemService = new ElectronSystemService();
@@ -576,7 +576,7 @@ export const generateAIChat: StateCreator<
576
576
  },
577
577
  onFinish: async (
578
578
  content,
579
- { traceId, observationId, toolCalls, reasoning, grounding, usage },
579
+ { traceId, observationId, toolCalls, reasoning, grounding, usage, speed },
580
580
  ) => {
581
581
  // if there is traceId, update it
582
582
  if (traceId) {
@@ -611,8 +611,8 @@ export const generateAIChat: StateCreator<
611
611
  toolCalls,
612
612
  reasoning: !!reasoning ? { ...reasoning, duration } : undefined,
613
613
  search: !!grounding?.citations ? grounding : undefined,
614
- metadata: usage,
615
614
  imageList: finalImages.length > 0 ? finalImages : undefined,
615
+ metadata: speed ? { ...usage, ...speed } : usage,
616
616
  });
617
617
  },
618
618
  onMessageHandle: async (chunk) => {
@@ -190,9 +190,6 @@ export const searchSlice: StateCreator<
190
190
 
191
191
  await get().internal_updateMessageContent(id, JSON.stringify(searchContent));
192
192
 
193
- // 如果没搜索到结果,那么不触发 ai 总结
194
- if (searchContent.length === 0) return;
195
-
196
193
  // 如果 aiSummary 为 true,则会自动触发总结
197
194
  return aiSummary;
198
195
  },
@@ -1,12 +1,10 @@
1
1
  import { ListLocalFileParams } from '@lobechat/electron-client-ipc';
2
- import { ActionIcon } from '@lobehub/ui';
3
2
  import { Typography } from 'antd';
4
3
  import { createStyles } from 'antd-style';
5
- import { FolderOpen } from 'lucide-react';
6
4
  import React, { memo } from 'react';
7
- import { useTranslation } from 'react-i18next';
8
5
  import { Flexbox } from 'react-layout-kit';
9
6
 
7
+ import FileIcon from '@/components/FileIcon';
10
8
  import { localFileService } from '@/services/electron/localFileService';
11
9
  import { LocalFileListState } from '@/tools/local-files/type';
12
10
  import { ChatMessagePluginError } from '@/types/message';
@@ -20,8 +18,21 @@ const useStyles = createStyles(({ css, token, cx }) => ({
20
18
  opacity: 1;
21
19
  transition: opacity 0.2s ${token.motionEaseInOut};
22
20
  `),
21
+ container: css`
22
+ cursor: pointer;
23
+
24
+ padding-block: 2px;
25
+ padding-inline: 4px;
26
+ border-radius: 4px;
27
+
28
+ color: ${token.colorTextSecondary};
29
+
30
+ :hover {
31
+ color: ${token.colorText};
32
+ background: ${token.colorFillTertiary};
33
+ }
34
+ `,
23
35
  path: css`
24
- padding-inline-start: 8px;
25
36
  color: ${token.colorTextSecondary};
26
37
  `,
27
38
  }));
@@ -34,25 +45,21 @@ interface ListFilesProps {
34
45
  }
35
46
 
36
47
  const ListFiles = memo<ListFilesProps>(({ messageId, pluginError, args, pluginState }) => {
37
- const { t } = useTranslation('tool');
38
-
39
48
  const { styles } = useStyles();
40
49
  return (
41
50
  <>
42
- <Flexbox gap={8} horizontal>
51
+ <Flexbox
52
+ className={styles.container}
53
+ gap={8}
54
+ horizontal
55
+ onClick={() => {
56
+ localFileService.openLocalFolder({ isDirectory: true, path: args.path });
57
+ }}
58
+ >
59
+ <FileIcon fileName={args.path} isDirectory size={22} variant={'pure'} />
43
60
  <Typography.Text className={styles.path} ellipsis>
44
61
  {args.path}
45
62
  </Typography.Text>
46
- <Flexbox className={styles.actions} gap={8} horizontal style={{ marginLeft: 8 }}>
47
- <ActionIcon
48
- icon={FolderOpen}
49
- onClick={() => {
50
- localFileService.openLocalFolder({ isDirectory: true, path: args.path });
51
- }}
52
- size="small"
53
- title={t('localFiles.openFolder')}
54
- />
55
- </Flexbox>
56
63
  </Flexbox>
57
64
  <SearchResult
58
65
  listResults={pluginState?.listResults}