@lobehub/chat 1.116.4 → 1.117.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (64) hide show
  1. package/CHANGELOG.md +117 -0
  2. package/changelog/v1.json +21 -0
  3. package/locales/ar/models.json +3 -0
  4. package/locales/bg-BG/models.json +3 -0
  5. package/locales/de-DE/models.json +3 -0
  6. package/locales/en-US/models.json +3 -0
  7. package/locales/es-ES/models.json +3 -0
  8. package/locales/fa-IR/models.json +3 -0
  9. package/locales/fr-FR/models.json +3 -0
  10. package/locales/it-IT/models.json +3 -0
  11. package/locales/ja-JP/models.json +3 -0
  12. package/locales/ko-KR/models.json +3 -0
  13. package/locales/nl-NL/models.json +3 -0
  14. package/locales/pl-PL/models.json +3 -0
  15. package/locales/pt-BR/models.json +3 -0
  16. package/locales/ru-RU/models.json +3 -0
  17. package/locales/tr-TR/models.json +3 -0
  18. package/locales/vi-VN/models.json +3 -0
  19. package/locales/zh-CN/models.json +3 -0
  20. package/locales/zh-TW/models.json +3 -0
  21. package/package.json +1 -2
  22. package/packages/const/src/image.ts +9 -0
  23. package/packages/database/vitest.config.mts +1 -0
  24. package/packages/database/vitest.config.server.mts +1 -0
  25. package/packages/file-loaders/package.json +1 -1
  26. package/packages/model-runtime/src/RouterRuntime/createRuntime.ts +11 -9
  27. package/packages/model-runtime/src/google/createImage.test.ts +657 -0
  28. package/packages/model-runtime/src/google/createImage.ts +152 -0
  29. package/packages/model-runtime/src/google/index.test.ts +0 -328
  30. package/packages/model-runtime/src/google/index.ts +3 -40
  31. package/packages/model-runtime/src/utils/modelParse.ts +2 -1
  32. package/packages/model-runtime/src/utils/openaiCompatibleFactory/createImage.ts +239 -0
  33. package/packages/model-runtime/src/utils/openaiCompatibleFactory/index.test.ts +22 -22
  34. package/packages/model-runtime/src/utils/openaiCompatibleFactory/index.ts +9 -116
  35. package/packages/model-runtime/src/utils/postProcessModelList.ts +55 -0
  36. package/packages/model-runtime/src/utils/streams/google-ai.test.ts +7 -7
  37. package/packages/model-runtime/src/utils/streams/google-ai.ts +15 -2
  38. package/packages/model-runtime/src/utils/streams/openai/openai.test.ts +41 -0
  39. package/packages/model-runtime/src/utils/streams/openai/openai.ts +38 -2
  40. package/packages/model-runtime/src/utils/streams/protocol.test.ts +32 -0
  41. package/packages/model-runtime/src/utils/streams/protocol.ts +7 -3
  42. package/packages/model-runtime/src/utils/usageConverter.test.ts +58 -0
  43. package/packages/model-runtime/src/utils/usageConverter.ts +5 -1
  44. package/packages/utils/vitest.config.mts +1 -0
  45. package/src/components/ChatItem/ChatItem.tsx +183 -0
  46. package/src/components/ChatItem/components/Actions.tsx +25 -0
  47. package/src/components/ChatItem/components/Avatar.tsx +50 -0
  48. package/src/components/ChatItem/components/BorderSpacing.tsx +13 -0
  49. package/src/components/ChatItem/components/ErrorContent.tsx +24 -0
  50. package/src/components/ChatItem/components/Loading.tsx +26 -0
  51. package/src/components/ChatItem/components/MessageContent.tsx +76 -0
  52. package/src/components/ChatItem/components/Title.tsx +43 -0
  53. package/src/components/ChatItem/index.ts +2 -0
  54. package/src/components/ChatItem/style.ts +208 -0
  55. package/src/components/ChatItem/type.ts +80 -0
  56. package/src/config/aiModels/google.ts +42 -22
  57. package/src/config/aiModels/openrouter.ts +33 -0
  58. package/src/config/aiModels/vertexai.ts +4 -4
  59. package/src/features/ChatItem/index.tsx +1 -1
  60. package/src/features/Conversation/Extras/Usage/UsageDetail/index.tsx +6 -0
  61. package/src/features/Conversation/Extras/Usage/UsageDetail/tokens.test.ts +38 -0
  62. package/src/features/Conversation/Extras/Usage/UsageDetail/tokens.ts +13 -1
  63. package/src/locales/default/chat.ts +1 -0
  64. package/packages/model-runtime/src/UniformRuntime/index.ts +0 -117
@@ -2271,4 +2271,45 @@ describe('OpenAIStream', () => {
2271
2271
  );
2272
2272
  });
2273
2273
  });
2274
+
2275
+ it('should handle base64_image in delta.images (image_url shape)', async () => {
2276
+ const base64 =
2277
+ 'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mNkYPhfDwAChwGA60e6kgAAAABJRU5ErkJggg==';
2278
+
2279
+ const mockOpenAIStream = new ReadableStream({
2280
+ start(controller) {
2281
+ controller.enqueue({
2282
+ choices: [
2283
+ {
2284
+ delta: {
2285
+ images: [
2286
+ {
2287
+ type: 'image_url',
2288
+ image_url: { url: base64 },
2289
+ index: 0,
2290
+ },
2291
+ ],
2292
+ },
2293
+ index: 0,
2294
+ },
2295
+ ],
2296
+ id: '6',
2297
+ });
2298
+
2299
+ controller.close();
2300
+ },
2301
+ });
2302
+
2303
+ const protocolStream = OpenAIStream(mockOpenAIStream);
2304
+
2305
+ const decoder = new TextDecoder();
2306
+ const chunks = [];
2307
+
2308
+ // @ts-ignore
2309
+ for await (const chunk of protocolStream) {
2310
+ chunks.push(decoder.decode(chunk, { stream: true }));
2311
+ }
2312
+
2313
+ expect(chunks).toEqual(['id: 6\n', 'event: base64_image\n', `data: "${base64}"\n\n`]);
2314
+ });
2274
2315
  });
@@ -96,6 +96,36 @@ const transformOpenAIStream = (
96
96
  }
97
97
  }
98
98
 
99
+ // Handle image preview chunks (e.g. Gemini 2.5 flash image preview)
100
+ // Example shape:
101
+ // choices[0].delta.images = [{ type: 'image_url', image_url: { url: 'data:image/png;base64,...' }, index: 0 }]
102
+ if (
103
+ (item as any).delta &&
104
+ Array.isArray((item as any).delta.images) &&
105
+ (item as any).delta.images.length > 0
106
+ ) {
107
+ const images = (item as any).delta.images as any[];
108
+
109
+ return images
110
+ .map((img) => {
111
+ // support multiple possible shapes for the url
112
+ const url =
113
+ img?.image_url?.url ||
114
+ img?.image_url?.image_url?.url ||
115
+ img?.url ||
116
+ (typeof img === 'string' ? img : undefined);
117
+
118
+ if (!url) return null;
119
+
120
+ return {
121
+ data: url,
122
+ id: chunk.id,
123
+ type: 'base64_image',
124
+ } as StreamProtocolChunk;
125
+ })
126
+ .filter(Boolean) as StreamProtocolChunk[];
127
+ }
128
+
99
129
  // 给定结束原因
100
130
  if (item.finish_reason) {
101
131
  // one-api 的流式接口,会出现既有 finish_reason ,也有 content 的情况
@@ -192,11 +222,11 @@ const transformOpenAIStream = (
192
222
  if ('content' in item.delta && Array.isArray(item.delta.content)) {
193
223
  return item.delta.content
194
224
  .filter((block: any) => block.type === 'thinking' && Array.isArray(block.thinking))
195
- .map((block: any) =>
225
+ .map((block: any) =>
196
226
  block.thinking
197
227
  .filter((thinkItem: any) => thinkItem.type === 'text' && thinkItem.text)
198
228
  .map((thinkItem: any) => thinkItem.text)
199
- .join('')
229
+ .join(''),
200
230
  )
201
231
  .join('');
202
232
  }
@@ -233,6 +263,12 @@ const transformOpenAIStream = (
233
263
  streamContext.thinkingInContent = false;
234
264
  }
235
265
 
266
+ // 如果 content 是空字符串但 chunk 带有 usage,则优先返回 usage(例如 Gemini image-preview 最终会在单独的 chunk 中返回 usage)
267
+ if (content === '' && chunk.usage) {
268
+ const usage = chunk.usage;
269
+ return { data: convertUsage(usage, provider), id: chunk.id, type: 'usage' };
270
+ }
271
+
236
272
  // 判断是否有 citations 内容,更新 returnedCitation 状态
237
273
  if (!streamContext?.returnedCitation) {
238
274
  const citations =
@@ -200,4 +200,36 @@ describe('createTokenSpeedCalculator', async () => {
200
200
  const results = await processChunk(transformer, chunks);
201
201
  expect(results).toHaveLength(chunks.length);
202
202
  });
203
+
204
+ it('should calculate token speed considering outputImageTokens when totalOutputTokens is missing', async () => {
205
+ const chunks = [
206
+ { data: '', id: 'chatcmpl-image-1', type: 'text' },
207
+ { data: 'hi', id: 'chatcmpl-image-1', type: 'text' },
208
+ { data: 'stop', id: 'chatcmpl-image-1', type: 'stop' },
209
+ {
210
+ data: {
211
+ inputTextTokens: 9,
212
+ outputTextTokens: 1,
213
+ outputImageTokens: 4,
214
+ totalInputTokens: 9,
215
+ // totalOutputTokens intentionally omitted to force summation path
216
+ totalTokens: 13,
217
+ },
218
+ id: 'chatcmpl-image-1',
219
+ type: 'usage',
220
+ },
221
+ ];
222
+
223
+ const transformer = createTokenSpeedCalculator((v) => v, { inputStartAt });
224
+ const results = await processChunk(transformer, chunks);
225
+
226
+ // should push an extra speed chunk
227
+ expect(results).toHaveLength(chunks.length + 1);
228
+ const speedChunk = results.slice(-1)[0];
229
+ expect(speedChunk.id).toBe('output_speed');
230
+ expect(speedChunk.type).toBe('speed');
231
+ // tps and ttft should be numeric (avoid flakiness if interval is 0ms)
232
+ expect(speedChunk.data.tps).not.toBeNaN();
233
+ expect(speedChunk.data.ttft).not.toBeNaN();
234
+ });
203
235
  });
@@ -364,10 +364,14 @@ export const createTokenSpeedCalculator = (
364
364
  }
365
365
  // if the chunk is the stop chunk, set as output finish
366
366
  if (inputStartAt && outputStartAt && chunk.type === 'usage') {
367
- const totalOutputTokens = chunk.data?.totalOutputTokens || chunk.data?.outputTextTokens;
368
- const reasoningTokens = chunk.data?.outputReasoningTokens || 0;
367
+ const totalOutputTokens =
368
+ chunk.data?.totalOutputTokens ??
369
+ (chunk.data?.outputTextTokens ?? 0) + (chunk.data?.outputImageTokens ?? 0);
370
+ const reasoningTokens = chunk.data?.outputReasoningTokens ?? 0;
369
371
  const outputTokens =
370
- (outputThinking ?? false) ? totalOutputTokens : totalOutputTokens - reasoningTokens;
372
+ (outputThinking ?? false)
373
+ ? totalOutputTokens
374
+ : Math.max(0, totalOutputTokens - reasoningTokens);
371
375
  result.push({
372
376
  data: {
373
377
  tps: (outputTokens / (Date.now() - outputStartAt)) * 1000,
@@ -290,4 +290,62 @@ describe('convertUsage', () => {
290
290
  totalTokens: 6550,
291
291
  });
292
292
  });
293
+
294
+ it('should handle output image tokens correctly', () => {
295
+ // Arrange
296
+ const usageWithImage = {
297
+ prompt_tokens: 100,
298
+ completion_tokens: 200,
299
+ completion_tokens_details: {
300
+ image_tokens: 60,
301
+ reasoning_tokens: 30,
302
+ },
303
+ total_tokens: 300,
304
+ } as OpenAI.Completions.CompletionUsage;
305
+
306
+ // Act
307
+ const result = convertUsage(usageWithImage);
308
+
309
+ // Assert
310
+ expect(result).toEqual({
311
+ inputTextTokens: 100,
312
+ totalInputTokens: 100,
313
+ totalOutputTokens: 200,
314
+ outputImageTokens: 60,
315
+ outputReasoningTokens: 30,
316
+ outputTextTokens: 110, // 200 - 60 - 30
317
+ totalTokens: 300,
318
+ });
319
+ });
320
+
321
+ it('should handle response output image tokens correctly for ResponseUsage', () => {
322
+ // Arrange
323
+ const responseUsage = {
324
+ input_tokens: 100,
325
+ input_tokens_details: {
326
+ cached_tokens: 0,
327
+ },
328
+ output_tokens: 200,
329
+ output_tokens_details: {
330
+ image_tokens: 60,
331
+ reasoning_tokens: 30,
332
+ },
333
+ total_tokens: 300,
334
+ } as OpenAI.Responses.ResponseUsage;
335
+
336
+ // Act
337
+ const result = convertResponseUsage(responseUsage);
338
+
339
+ // Assert
340
+ expect(result).toEqual({
341
+ inputTextTokens: 100,
342
+ inputCacheMissTokens: 100, // 100 - 0
343
+ totalInputTokens: 100,
344
+ totalOutputTokens: 200,
345
+ outputImageTokens: 60,
346
+ outputReasoningTokens: 30,
347
+ outputTextTokens: 170, // 200 - 30
348
+ totalTokens: 300,
349
+ });
350
+ });
293
351
  });
@@ -20,12 +20,13 @@ export const convertUsage = (
20
20
  const totalOutputTokens = usage.completion_tokens;
21
21
  const outputReasoning = usage.completion_tokens_details?.reasoning_tokens || 0;
22
22
  const outputAudioTokens = usage.completion_tokens_details?.audio_tokens || 0;
23
+ const outputImageTokens = (usage.completion_tokens_details as any)?.image_tokens || 0;
23
24
 
24
25
  // XAI 的 completion_tokens 不包含 reasoning_tokens,需要特殊处理
25
26
  const outputTextTokens =
26
27
  provider === 'xai'
27
28
  ? totalOutputTokens - outputAudioTokens
28
- : totalOutputTokens - outputReasoning - outputAudioTokens;
29
+ : totalOutputTokens - outputReasoning - outputAudioTokens - outputImageTokens;
29
30
 
30
31
  const totalTokens = inputCitationTokens + usage.total_tokens;
31
32
 
@@ -37,6 +38,7 @@ export const convertUsage = (
37
38
  inputCitationTokens: inputCitationTokens,
38
39
  inputTextTokens: inputTextTokens,
39
40
  outputAudioTokens: outputAudioTokens,
41
+ outputImageTokens: outputImageTokens,
40
42
  outputReasoningTokens: outputReasoning,
41
43
  outputTextTokens: outputTextTokens,
42
44
  rejectedPredictionTokens: usage.completion_tokens_details?.rejected_prediction_tokens,
@@ -75,6 +77,7 @@ export const convertResponseUsage = (usage: OpenAI.Responses.ResponseUsage): Mod
75
77
 
76
78
  // For ResponseUsage, outputTextTokens is totalOutputTokens minus reasoning, as no audio output tokens are specified.
77
79
  const outputTextTokens = totalOutputTokens - outputReasoningTokens;
80
+ const outputImageTokens = (usage.output_tokens_details as any)?.image_tokens || 0;
78
81
 
79
82
  // 3. Construct the comprehensive data object (matching ModelTokensUsage structure)
80
83
  const data = {
@@ -87,6 +90,7 @@ export const convertResponseUsage = (usage: OpenAI.Responses.ResponseUsage): Mod
87
90
  inputCitationTokens: undefined, // Not in ResponseUsage
88
91
  inputTextTokens: inputTextTokens,
89
92
  outputAudioTokens: undefined, // Not in ResponseUsage
93
+ outputImageTokens: outputImageTokens,
90
94
  outputReasoningTokens: outputReasoningTokens,
91
95
  outputTextTokens: outputTextTokens,
92
96
  rejectedPredictionTokens: undefined, // Not in ResponseUsage
@@ -7,6 +7,7 @@ export default defineConfig({
7
7
  /* eslint-disable sort-keys-fix/sort-keys-fix */
8
8
  '@/types': resolve(__dirname, '../types/src'),
9
9
  '@/const': resolve(__dirname, '../const/src'),
10
+ '@/libs/model-runtime': resolve(__dirname, '../model-runtime/src'),
10
11
  '@': resolve(__dirname, '../../src'),
11
12
  /* eslint-enable */
12
13
  },
@@ -0,0 +1,183 @@
1
+ 'use client';
2
+
3
+ import { useResponsive } from 'antd-style';
4
+ import { memo, useEffect, useRef, useState } from 'react';
5
+ import { Flexbox } from 'react-layout-kit';
6
+
7
+ import Actions from './components/Actions';
8
+ import Avatar from './components/Avatar';
9
+ import BorderSpacing from './components/BorderSpacing';
10
+ import ErrorContent from './components/ErrorContent';
11
+ import MessageContent from './components/MessageContent';
12
+ import Title from './components/Title';
13
+ import { useStyles } from './style';
14
+ import type { ChatItemProps } from './type';
15
+
16
+ const MOBILE_AVATAR_SIZE = 32;
17
+
18
+ const ChatItem = memo<ChatItemProps>(
19
+ ({
20
+ avatarAddon,
21
+ onAvatarClick,
22
+ avatarProps,
23
+ actions,
24
+ className,
25
+ primary,
26
+ loading,
27
+ message,
28
+ placeholderMessage = '...',
29
+ placement = 'left',
30
+ variant = 'bubble',
31
+ avatar,
32
+ error,
33
+ showTitle,
34
+ time,
35
+ editing,
36
+ onChange,
37
+ onEditingChange,
38
+ messageExtra,
39
+ renderMessage,
40
+ text,
41
+ errorMessage,
42
+ onDoubleClick,
43
+ fontSize,
44
+ aboveMessage,
45
+ belowMessage,
46
+ markdownProps,
47
+ actionsWrapWidth = 54,
48
+ ...rest
49
+ }) => {
50
+ const { mobile } = useResponsive();
51
+ const { cx, styles } = useStyles({
52
+ editing,
53
+ placement,
54
+ primary,
55
+ showTitle,
56
+ time,
57
+ title: avatar.title,
58
+ variant,
59
+ });
60
+
61
+ // 在 ChatItem 组件中添加
62
+ const contentRef = useRef<HTMLDivElement>(null);
63
+ const containerRef = useRef<HTMLDivElement>(null);
64
+ const [layoutMode, setLayoutMode] = useState<'horizontal' | 'vertical'>(
65
+ variant === 'bubble' ? 'horizontal' : 'vertical',
66
+ );
67
+
68
+ // 使用 ResizeObserver 监控内容和容器尺寸
69
+ useEffect(() => {
70
+ if (variant === 'docs') {
71
+ setLayoutMode('vertical');
72
+ return;
73
+ }
74
+
75
+ if (!contentRef.current || !containerRef.current) return;
76
+
77
+ const observer = new ResizeObserver(() => {
78
+ if (!contentRef.current || !containerRef.current) return;
79
+
80
+ const containerWidth = containerRef.current.clientWidth;
81
+ const contentWidth = contentRef.current.scrollWidth; // 使用scrollWidth获取实际内容宽度
82
+
83
+ // 预留给Actions的最小空间 (根据实际Actions大小调整)
84
+
85
+ // 只有当内容宽度 + Actions最小宽度 > 容器宽度时才切换布局
86
+ setLayoutMode(contentWidth + actionsWrapWidth > containerWidth ? 'vertical' : 'horizontal');
87
+ });
88
+
89
+ observer.observe(contentRef.current);
90
+ observer.observe(containerRef.current);
91
+
92
+ return () => observer.disconnect();
93
+ }, [variant, actionsWrapWidth]);
94
+
95
+ return (
96
+ <Flexbox
97
+ className={cx(styles.container, className)}
98
+ direction={placement === 'left' ? 'horizontal' : 'horizontal-reverse'}
99
+ gap={mobile ? 6 : 12}
100
+ {...rest}
101
+ >
102
+ <Avatar
103
+ {...avatarProps}
104
+ addon={avatarAddon}
105
+ alt={avatarProps?.alt || avatar.title || 'avatar'}
106
+ avatar={avatar}
107
+ loading={loading}
108
+ onClick={onAvatarClick}
109
+ placement={placement}
110
+ size={mobile ? MOBILE_AVATAR_SIZE : undefined}
111
+ style={{
112
+ marginTop: 6,
113
+ ...avatarProps?.style,
114
+ }}
115
+ />
116
+ <Flexbox
117
+ align={placement === 'left' ? 'flex-start' : 'flex-end'}
118
+ className={styles.messageContainer}
119
+ ref={containerRef}
120
+ >
121
+ <Title avatar={avatar} placement={placement} showTitle={showTitle} time={time} />
122
+ {aboveMessage}
123
+ <Flexbox
124
+ align={placement === 'left' ? 'flex-start' : 'flex-end'}
125
+ className={styles.messageContent}
126
+ data-layout={layoutMode} // 添加数据属性以方便样式选择
127
+ direction={
128
+ layoutMode === 'horizontal'
129
+ ? placement === 'left'
130
+ ? 'horizontal'
131
+ : 'horizontal-reverse'
132
+ : 'vertical'
133
+ }
134
+ gap={8}
135
+ >
136
+ <Flexbox ref={contentRef} width={'100%'}>
137
+ {error && (message === placeholderMessage || !message) ? (
138
+ <ErrorContent error={error} message={errorMessage} placement={placement} />
139
+ ) : (
140
+ <MessageContent
141
+ editing={editing}
142
+ fontSize={fontSize}
143
+ markdownProps={markdownProps}
144
+ message={message}
145
+ messageExtra={
146
+ <>
147
+ {error && (
148
+ <ErrorContent error={error} message={errorMessage} placement={placement} />
149
+ )}
150
+ {messageExtra}
151
+ </>
152
+ }
153
+ onChange={onChange}
154
+ onDoubleClick={onDoubleClick}
155
+ onEditingChange={onEditingChange}
156
+ placement={placement}
157
+ primary={primary}
158
+ renderMessage={renderMessage}
159
+ text={text}
160
+ variant={variant}
161
+ />
162
+ )}
163
+ </Flexbox>
164
+ {actions && (
165
+ <Actions
166
+ actions={actions}
167
+ editing={editing}
168
+ placement={placement}
169
+ variant={variant}
170
+ />
171
+ )}
172
+ </Flexbox>
173
+ {belowMessage}
174
+ </Flexbox>
175
+ {mobile && variant === 'bubble' && <BorderSpacing borderSpacing={MOBILE_AVATAR_SIZE} />}
176
+ </Flexbox>
177
+ );
178
+ },
179
+ );
180
+
181
+ export default ChatItem;
182
+
183
+ export type { ChatItemProps } from './type';
@@ -0,0 +1,25 @@
1
+ import { type Ref, memo } from 'react';
2
+ import { Flexbox } from 'react-layout-kit';
3
+
4
+ import { useStyles } from '../style';
5
+ import { ChatItemProps } from '../type';
6
+
7
+ export interface ActionsProps {
8
+ actions: ChatItemProps['actions'];
9
+ editing?: boolean;
10
+ placement?: ChatItemProps['placement'];
11
+ ref?: Ref<HTMLDivElement>;
12
+ variant?: ChatItemProps['variant'];
13
+ }
14
+
15
+ const Actions = memo<ActionsProps>(({ actions, placement, variant, editing, ref }) => {
16
+ const { styles } = useStyles({ editing, placement, variant });
17
+
18
+ return (
19
+ <Flexbox align={'flex-start'} className={styles.actions} ref={ref} role="menubar">
20
+ {actions}
21
+ </Flexbox>
22
+ );
23
+ });
24
+
25
+ export default Actions;
@@ -0,0 +1,50 @@
1
+ import { Avatar as A } from '@lobehub/ui';
2
+ import { type CSSProperties, memo } from 'react';
3
+ import { Flexbox } from 'react-layout-kit';
4
+
5
+ import { useStyles } from '../style';
6
+ import type { ChatItemProps } from '../type';
7
+ import Loading from './Loading';
8
+
9
+ export interface AvatarProps {
10
+ addon?: ChatItemProps['avatarAddon'];
11
+ alt?: string;
12
+ avatar: ChatItemProps['avatar'];
13
+ loading?: ChatItemProps['loading'];
14
+ onClick?: ChatItemProps['onAvatarClick'];
15
+ placement?: ChatItemProps['placement'];
16
+ size?: number;
17
+ style?: CSSProperties;
18
+ unoptimized?: boolean;
19
+ }
20
+
21
+ const Avatar = memo<AvatarProps>(
22
+ ({ loading, avatar, placement, unoptimized, addon, onClick, size = 40, style, alt }) => {
23
+ const { styles } = useStyles({ avatarSize: size });
24
+ const avatarContent = (
25
+ <div className={styles.avatarContainer} style={style}>
26
+ <A
27
+ alt={alt || avatar.title}
28
+ animation={loading}
29
+ avatar={avatar.avatar}
30
+ background={avatar.backgroundColor}
31
+ onClick={onClick}
32
+ size={size}
33
+ title={avatar.title}
34
+ unoptimized={unoptimized}
35
+ />
36
+ <Loading loading={loading} placement={placement} />
37
+ </div>
38
+ );
39
+
40
+ if (!addon) return avatarContent;
41
+ return (
42
+ <Flexbox align={'center'} className={styles.avatarGroupContainer} gap={8}>
43
+ {avatarContent}
44
+ {addon}
45
+ </Flexbox>
46
+ );
47
+ },
48
+ );
49
+
50
+ export default Avatar;
@@ -0,0 +1,13 @@
1
+ import { memo } from 'react';
2
+
3
+ export interface BorderSpacingProps {
4
+ borderSpacing?: number;
5
+ }
6
+
7
+ const BorderSpacing = memo<BorderSpacingProps>(({ borderSpacing }) => {
8
+ if (!borderSpacing) return null;
9
+
10
+ return <div style={{ flex: 'none', width: borderSpacing }} />;
11
+ });
12
+
13
+ export default BorderSpacing;
@@ -0,0 +1,24 @@
1
+ import { Alert } from '@lobehub/ui';
2
+ import { memo } from 'react';
3
+ import { Flexbox } from 'react-layout-kit';
4
+
5
+ import { useStyles } from '../style';
6
+ import { ChatItemProps } from '../type';
7
+
8
+ export interface ErrorContentProps {
9
+ error?: ChatItemProps['error'];
10
+ message?: ChatItemProps['errorMessage'];
11
+ placement?: ChatItemProps['placement'];
12
+ }
13
+
14
+ const ErrorContent = memo<ErrorContentProps>(({ message, error, placement }) => {
15
+ const { styles } = useStyles({ placement });
16
+
17
+ return (
18
+ <Flexbox className={styles.errorContainer}>
19
+ <Alert closable={false} extra={message} showIcon type={'error'} {...error} />
20
+ </Flexbox>
21
+ );
22
+ });
23
+
24
+ export default ErrorContent;
@@ -0,0 +1,26 @@
1
+ import { Icon } from '@lobehub/ui';
2
+ import { Loader2 } from 'lucide-react';
3
+ import { memo } from 'react';
4
+ import { Flexbox } from 'react-layout-kit';
5
+
6
+ import { useStyles } from '../style';
7
+ import { ChatItemProps } from '../type';
8
+
9
+ export interface LoadingProps {
10
+ loading?: ChatItemProps['loading'];
11
+ placement?: ChatItemProps['placement'];
12
+ }
13
+
14
+ const Loading = memo<LoadingProps>(({ loading, placement }) => {
15
+ const { styles } = useStyles({ placement });
16
+
17
+ if (!loading) return null;
18
+
19
+ return (
20
+ <Flexbox align={'center'} className={styles.loading} justify={'center'}>
21
+ <Icon icon={Loader2} size={{ size: 12, strokeWidth: 3 }} spin />
22
+ </Flexbox>
23
+ );
24
+ });
25
+
26
+ export default Loading;
@@ -0,0 +1,76 @@
1
+ import { MarkdownProps } from '@lobehub/ui';
2
+ import { EditableMessage } from '@lobehub/ui/chat';
3
+ import { useResponsive } from 'antd-style';
4
+ import { type ReactNode, memo } from 'react';
5
+ import { Flexbox } from 'react-layout-kit';
6
+
7
+ import { useStyles } from '../style';
8
+ import { ChatItemProps } from '../type';
9
+
10
+ export interface MessageContentProps {
11
+ editing?: ChatItemProps['editing'];
12
+ fontSize?: number;
13
+ markdownProps?: Omit<MarkdownProps, 'className' | 'style' | 'children'>;
14
+ message?: ReactNode;
15
+ messageExtra?: ChatItemProps['messageExtra'];
16
+ onChange?: ChatItemProps['onChange'];
17
+ onDoubleClick?: ChatItemProps['onDoubleClick'];
18
+ onEditingChange?: ChatItemProps['onEditingChange'];
19
+ placement?: ChatItemProps['placement'];
20
+ primary?: ChatItemProps['primary'];
21
+ renderMessage?: ChatItemProps['renderMessage'];
22
+ text?: ChatItemProps['text'];
23
+ variant?: ChatItemProps['variant'];
24
+ }
25
+
26
+ const MessageContent = memo<MessageContentProps>(
27
+ ({
28
+ editing,
29
+ onChange,
30
+ onEditingChange,
31
+ text,
32
+ message,
33
+ placement,
34
+ messageExtra,
35
+ renderMessage,
36
+ variant,
37
+ primary,
38
+ onDoubleClick,
39
+ fontSize,
40
+ markdownProps,
41
+ }) => {
42
+ const { cx, styles } = useStyles({ editing, placement, primary, variant });
43
+ const { mobile } = useResponsive();
44
+
45
+ const content = (
46
+ <EditableMessage
47
+ classNames={{ input: styles.editingInput }}
48
+ editButtonSize={'small'}
49
+ editing={editing}
50
+ fontSize={fontSize}
51
+ fullFeaturedCodeBlock
52
+ markdownProps={markdownProps}
53
+ onChange={onChange}
54
+ onEditingChange={onEditingChange}
55
+ openModal={mobile ? editing : undefined}
56
+ text={text}
57
+ value={message ? String(message) : ''}
58
+ />
59
+ );
60
+ const messageContent = renderMessage ? renderMessage(content) : content;
61
+
62
+ return (
63
+ <Flexbox
64
+ className={cx(styles.message, editing && styles.editingContainer)}
65
+ onDoubleClick={onDoubleClick}
66
+ >
67
+ {messageContent}
68
+ {messageExtra && !editing ? (
69
+ <div className={styles.messageExtra}>{messageExtra}</div>
70
+ ) : null}
71
+ </Flexbox>
72
+ );
73
+ },
74
+ );
75
+
76
+ export default MessageContent;