@lobehub/chat 1.103.1 → 1.103.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +35 -0
- package/apps/desktop/build/icon-beta.ico +0 -0
- package/apps/desktop/build/icon-dev.ico +0 -0
- package/apps/desktop/build/icon-nightly.ico +0 -0
- package/apps/desktop/build/icon.ico +0 -0
- package/apps/desktop/electron.vite.config.ts +4 -2
- package/apps/desktop/package.json +1 -0
- package/apps/desktop/src/main/appBrowsers.ts +2 -2
- package/apps/desktop/src/main/const/env.ts +5 -4
- package/apps/desktop/src/main/const/store.ts +1 -0
- package/apps/desktop/src/main/const/theme.ts +11 -0
- package/apps/desktop/src/main/controllers/BrowserWindowsCtr.ts +1 -1
- package/apps/desktop/src/main/controllers/NotificationCtr.ts +2 -4
- package/apps/desktop/src/main/controllers/SystemCtr.ts +4 -0
- package/apps/desktop/src/main/controllers/TrayMenuCtr.ts +5 -5
- package/apps/desktop/src/main/controllers/index.ts +1 -1
- package/apps/desktop/src/main/core/App.ts +9 -10
- package/apps/desktop/src/main/core/{Browser.ts → browser/Browser.ts} +129 -88
- package/apps/desktop/src/main/core/{BrowserManager.ts → browser/BrowserManager.ts} +13 -3
- package/apps/desktop/src/main/core/{StaticFileServerManager.ts → infrastructure/StaticFileServerManager.ts} +13 -7
- package/apps/desktop/src/main/core/{StoreManager.ts → infrastructure/StoreManager.ts} +1 -1
- package/apps/desktop/src/main/core/{UpdaterManager.ts → infrastructure/UpdaterManager.ts} +1 -1
- package/apps/desktop/src/main/core/{MenuManager.ts → ui/MenuManager.ts} +2 -2
- package/apps/desktop/src/main/core/{ShortcutManager.ts → ui/ShortcutManager.ts} +7 -1
- package/apps/desktop/src/main/core/{Tray.ts → ui/Tray.ts} +61 -59
- package/apps/desktop/src/main/core/{TrayManager.ts → ui/TrayManager.ts} +5 -5
- package/apps/desktop/src/main/shortcuts/config.ts +2 -2
- package/apps/desktop/src/main/types/store.ts +1 -0
- package/changelog/v1.json +12 -0
- package/docs/development/basic/add-new-image-model.mdx +162 -0
- package/docs/development/basic/add-new-image-model.zh-CN.mdx +162 -0
- package/docs/usage/providers/fal.mdx +1 -1
- package/docs/usage/providers/fal.zh-CN.mdx +1 -1
- package/package.json +66 -66
- package/src/app/[variants]/(main)/chat/(workspace)/_layout/Desktop/Portal.tsx +3 -1
- package/src/app/[variants]/(main)/chat/(workspace)/features/AgentSettings/index.tsx +4 -2
- package/src/app/[variants]/(main)/image/@menu/components/SeedNumberInput/index.tsx +1 -1
- package/src/app/[variants]/(main)/image/features/GenerationFeed/BatchItem.tsx +39 -3
- package/src/app/[variants]/(main)/image/features/GenerationFeed/ReferenceImages.tsx +122 -0
- package/src/config/aiModels/fal.ts +31 -7
- package/src/config/aiModels/openai.ts +10 -1
- package/src/features/ElectronTitlebar/WinControl/index.tsx +85 -90
- package/src/features/ElectronTitlebar/hooks/useWatchThemeUpdate.ts +10 -5
- package/src/features/ImageTopicPanel/index.tsx +0 -1
- package/src/features/PluginDevModal/index.tsx +3 -1
- package/src/features/User/__tests__/UserAvatar.test.tsx +5 -4
- package/src/libs/model-runtime/fal/index.ts +1 -1
- package/src/libs/model-runtime/types/image.ts +1 -1
- package/src/libs/model-runtime/utils/openaiCompatibleFactory/index.ts +1 -1
- package/src/libs/model-runtime/utils/response.ts +2 -0
- package/src/libs/model-runtime/utils/streams/google-ai.test.ts +46 -0
- package/src/libs/model-runtime/utils/streams/google-ai.ts +4 -4
- package/src/libs/model-runtime/utils/streams/vertex-ai.ts +6 -8
- package/src/libs/standard-parameters/{meta-schema.test.ts → index.test.ts} +1 -1
- package/src/libs/standard-parameters/index.ts +152 -1
- package/src/server/ld.test.ts +4 -3
- package/src/server/routers/async/image.ts +1 -1
- package/src/services/__tests__/chat.test.ts +3 -4
- package/src/store/chat/slices/message/selectors.test.ts +2 -3
- package/src/store/chat/slices/plugin/action.test.ts +2 -1
- package/src/store/image/slices/generationConfig/action.test.ts +2 -2
- package/src/store/image/slices/generationConfig/action.ts +1 -1
- package/src/store/image/slices/generationConfig/hooks.test.ts +2 -2
- package/src/store/image/slices/generationConfig/hooks.ts +1 -4
- package/src/store/image/slices/generationConfig/initialState.ts +2 -2
- package/src/store/image/slices/generationConfig/selectors.test.ts +2 -2
- package/src/store/image/slices/generationConfig/selectors.ts +1 -1
- package/src/store/user/slices/auth/selectors.test.ts +3 -2
- package/src/types/generation/index.ts +1 -0
- package/docs/development/basic/add-new-ai-image-model.mdx +0 -36
- package/docs/development/basic/add-new-ai-image-model.zh-CN.mdx +0 -0
- package/src/config/paramsSchemas/fal/flux-kontext-dev.ts +0 -8
- package/src/config/paramsSchemas/fal/flux-pro-kontext.ts +0 -11
- package/src/config/paramsSchemas/fal/flux-schnell.ts +0 -9
- package/src/config/paramsSchemas/fal/imagen4.ts +0 -10
- package/src/config/paramsSchemas/openai/gpt-image-1.ts +0 -10
- package/src/libs/standard-parameters/meta-schema.ts +0 -147
- /package/apps/desktop/src/main/core/{I18nManager.ts → infrastructure/I18nManager.ts} +0 -0
- /package/apps/desktop/src/main/core/{IoCContainer.ts → infrastructure/IoCContainer.ts} +0 -0
@@ -0,0 +1,122 @@
|
|
1
|
+
'use client';
|
2
|
+
|
3
|
+
import { createStyles } from 'antd-style';
|
4
|
+
import { memo } from 'react';
|
5
|
+
import { Flexbox } from 'react-layout-kit';
|
6
|
+
|
7
|
+
import ImageItem from '@/components/ImageItem';
|
8
|
+
|
9
|
+
const useStyles = createStyles(({ css, token }) => ({
|
10
|
+
container: css`
|
11
|
+
gap: 8px;
|
12
|
+
margin-block-end: 12px;
|
13
|
+
`,
|
14
|
+
image: css`
|
15
|
+
overflow: hidden;
|
16
|
+
flex-shrink: 0;
|
17
|
+
|
18
|
+
width: 60px;
|
19
|
+
height: 60px;
|
20
|
+
border-radius: ${token.borderRadius}px;
|
21
|
+
`,
|
22
|
+
imageSingle: css`
|
23
|
+
position: relative;
|
24
|
+
transform: rotate(-3deg);
|
25
|
+
|
26
|
+
flex-shrink: 0;
|
27
|
+
|
28
|
+
width: 64px;
|
29
|
+
height: 64px;
|
30
|
+
|
31
|
+
transition: transform 0.2s ease;
|
32
|
+
|
33
|
+
&::before {
|
34
|
+
content: '';
|
35
|
+
|
36
|
+
position: absolute;
|
37
|
+
z-index: -1;
|
38
|
+
inset: -4px;
|
39
|
+
|
40
|
+
border: 1px solid ${token.colorBorder};
|
41
|
+
border-radius: ${token.borderRadius}px;
|
42
|
+
|
43
|
+
background: ${token.colorBgContainer};
|
44
|
+
box-shadow: 0 2px 8px ${token.colorBgMask};
|
45
|
+
}
|
46
|
+
|
47
|
+
&:hover {
|
48
|
+
transform: rotate(-1deg) scale(1.05);
|
49
|
+
}
|
50
|
+
`,
|
51
|
+
imageSingleInner: css`
|
52
|
+
overflow: hidden;
|
53
|
+
|
54
|
+
width: 100%;
|
55
|
+
height: 100%;
|
56
|
+
border-radius: ${token.borderRadiusSM}px;
|
57
|
+
|
58
|
+
background: ${token.colorBgLayout};
|
59
|
+
`,
|
60
|
+
}));
|
61
|
+
|
62
|
+
interface ReferenceImagesProps {
|
63
|
+
imageUrl?: string | null;
|
64
|
+
imageUrls?: string[];
|
65
|
+
layout?: 'single' | 'multiple';
|
66
|
+
}
|
67
|
+
|
68
|
+
export const ReferenceImages = memo<ReferenceImagesProps>(({ imageUrl, imageUrls, layout }) => {
|
69
|
+
const { styles } = useStyles();
|
70
|
+
|
71
|
+
// Collect all images
|
72
|
+
const allImages: string[] = [];
|
73
|
+
if (imageUrl) {
|
74
|
+
allImages.push(imageUrl);
|
75
|
+
}
|
76
|
+
if (imageUrls && imageUrls.length > 0) {
|
77
|
+
allImages.push(...imageUrls);
|
78
|
+
}
|
79
|
+
|
80
|
+
// Don't render if no images
|
81
|
+
if (allImages.length === 0) {
|
82
|
+
return null;
|
83
|
+
}
|
84
|
+
|
85
|
+
// Single image layout (no label, with frame effect)
|
86
|
+
if (layout === 'single' && allImages.length === 1) {
|
87
|
+
return (
|
88
|
+
<div className={styles.imageSingle}>
|
89
|
+
<div className={styles.imageSingleInner}>
|
90
|
+
<ImageItem
|
91
|
+
alt="Reference image"
|
92
|
+
preview={{
|
93
|
+
src: allImages[0],
|
94
|
+
}}
|
95
|
+
style={{ height: '100%', width: '100%' }}
|
96
|
+
url={allImages[0]}
|
97
|
+
/>
|
98
|
+
</div>
|
99
|
+
</div>
|
100
|
+
);
|
101
|
+
}
|
102
|
+
|
103
|
+
// Multiple images layout
|
104
|
+
return (
|
105
|
+
<Flexbox className={styles.container} horizontal wrap="wrap">
|
106
|
+
{allImages.map((url, index) => (
|
107
|
+
<div className={styles.image} key={`${url}-${index}`}>
|
108
|
+
<ImageItem
|
109
|
+
alt={`Reference image ${index + 1}`}
|
110
|
+
preview={{
|
111
|
+
src: url,
|
112
|
+
}}
|
113
|
+
style={{ height: '100%', width: '100%' }}
|
114
|
+
url={url}
|
115
|
+
/>
|
116
|
+
</div>
|
117
|
+
))}
|
118
|
+
</Flexbox>
|
119
|
+
);
|
120
|
+
});
|
121
|
+
|
122
|
+
ReferenceImages.displayName = 'ReferenceImages';
|
@@ -1,9 +1,13 @@
|
|
1
|
+
import { ModelParamsSchema } from '@/libs/standard-parameters';
|
1
2
|
import { AIImageModelCard } from '@/types/aiModel';
|
2
3
|
|
3
|
-
|
4
|
-
|
5
|
-
|
6
|
-
|
4
|
+
export const fluxSchnellParamsSchema: ModelParamsSchema = {
|
5
|
+
height: { default: 1024, max: 1536, min: 512, step: 1 },
|
6
|
+
prompt: { default: '' },
|
7
|
+
seed: { default: null },
|
8
|
+
steps: { default: 4, max: 12, min: 1 },
|
9
|
+
width: { default: 1024, max: 1536, min: 512, step: 1 },
|
10
|
+
};
|
7
11
|
|
8
12
|
const falImageModels: AIImageModelCard[] = [
|
9
13
|
{
|
@@ -11,7 +15,12 @@ const falImageModels: AIImageModelCard[] = [
|
|
11
15
|
displayName: 'FLUX.1 Kontext Dev',
|
12
16
|
enabled: true,
|
13
17
|
id: 'flux-kontext/dev',
|
14
|
-
parameters:
|
18
|
+
parameters: {
|
19
|
+
imageUrl: { default: null },
|
20
|
+
prompt: { default: '' },
|
21
|
+
seed: { default: null },
|
22
|
+
steps: { default: 28, max: 50, min: 10 },
|
23
|
+
},
|
15
24
|
releasedAt: '2025-06-28',
|
16
25
|
type: 'image',
|
17
26
|
},
|
@@ -21,7 +30,15 @@ const falImageModels: AIImageModelCard[] = [
|
|
21
30
|
displayName: 'FLUX.1 Kontext [pro]',
|
22
31
|
enabled: true,
|
23
32
|
id: 'flux-pro/kontext',
|
24
|
-
parameters:
|
33
|
+
parameters: {
|
34
|
+
aspectRatio: {
|
35
|
+
default: '1:1',
|
36
|
+
enum: ['21:9', '16:9', '4:3', '3:2', '1:1', '2:3', '3:4', '9:16', '9:21'],
|
37
|
+
},
|
38
|
+
imageUrl: { default: null },
|
39
|
+
prompt: { default: '' },
|
40
|
+
seed: { default: null },
|
41
|
+
},
|
25
42
|
releasedAt: '2025-05-01',
|
26
43
|
type: 'image',
|
27
44
|
},
|
@@ -41,7 +58,14 @@ const falImageModels: AIImageModelCard[] = [
|
|
41
58
|
enabled: true,
|
42
59
|
id: 'imagen4/preview',
|
43
60
|
organization: 'Deepmind',
|
44
|
-
parameters:
|
61
|
+
parameters: {
|
62
|
+
aspectRatio: {
|
63
|
+
default: '1:1',
|
64
|
+
enum: ['1:1', '16:9', '9:16', '3:4', '4:3'],
|
65
|
+
},
|
66
|
+
prompt: { default: '' },
|
67
|
+
seed: { default: null },
|
68
|
+
},
|
45
69
|
releasedAt: '2025-05-21',
|
46
70
|
type: 'image',
|
47
71
|
},
|
@@ -1,4 +1,4 @@
|
|
1
|
-
import {
|
1
|
+
import { ModelParamsSchema } from '@/libs/standard-parameters';
|
2
2
|
import {
|
3
3
|
AIChatModelCard,
|
4
4
|
AIEmbeddingModelCard,
|
@@ -8,6 +8,15 @@ import {
|
|
8
8
|
AITTSModelCard,
|
9
9
|
} from '@/types/aiModel';
|
10
10
|
|
11
|
+
export const gptImage1ParamsSchema: ModelParamsSchema = {
|
12
|
+
imageUrls: { default: [] },
|
13
|
+
prompt: { default: '' },
|
14
|
+
size: {
|
15
|
+
default: 'auto',
|
16
|
+
enum: ['auto', '1024x1024', '1536x1024', '1024x1536'],
|
17
|
+
},
|
18
|
+
};
|
19
|
+
|
11
20
|
export const openaiChatModels: AIChatModelCard[] = [
|
12
21
|
{
|
13
22
|
abilities: {
|
@@ -1,95 +1,90 @@
|
|
1
|
-
|
2
|
-
|
3
|
-
|
4
|
-
|
5
|
-
|
6
|
-
|
7
|
-
|
8
|
-
|
9
|
-
|
10
|
-
|
11
|
-
|
12
|
-
|
13
|
-
|
14
|
-
|
15
|
-
|
16
|
-
|
17
|
-
|
18
|
-
|
19
|
-
|
20
|
-
|
21
|
-
|
22
|
-
|
23
|
-
|
24
|
-
|
25
|
-
|
26
|
-
|
27
|
-
|
28
|
-
|
29
|
-
|
30
|
-
|
31
|
-
|
32
|
-
|
33
|
-
|
34
|
-
|
35
|
-
|
36
|
-
|
37
|
-
|
38
|
-
|
39
|
-
|
40
|
-
|
41
|
-
|
42
|
-
|
43
|
-
|
44
|
-
|
45
|
-
|
46
|
-
|
47
|
-
|
48
|
-
|
49
|
-
|
50
|
-
|
51
|
-
|
52
|
-
|
53
|
-
|
54
|
-
container: css`
|
55
|
-
cursor: pointer;
|
56
|
-
display: flex;
|
57
|
-
`,
|
58
|
-
icon,
|
59
|
-
};
|
60
|
-
});
|
1
|
+
// const useStyles = createStyles(({ css, cx, token }) => {
|
2
|
+
// const icon = css`
|
3
|
+
// display: flex;
|
4
|
+
// align-items: center;
|
5
|
+
// justify-content: center;
|
6
|
+
//
|
7
|
+
// width: ${TITLE_BAR_HEIGHT * 1.2}px;
|
8
|
+
// min-height: ${TITLE_BAR_HEIGHT}px;
|
9
|
+
//
|
10
|
+
// color: ${token.colorTextSecondary};
|
11
|
+
//
|
12
|
+
// transition: all ease-in-out 100ms;
|
13
|
+
//
|
14
|
+
// -webkit-app-region: no-drag;
|
15
|
+
//
|
16
|
+
// &:hover {
|
17
|
+
// color: ${token.colorText};
|
18
|
+
// background: ${token.colorFillTertiary};
|
19
|
+
// }
|
20
|
+
//
|
21
|
+
// &:active {
|
22
|
+
// color: ${token.colorText};
|
23
|
+
// background: ${token.colorFillSecondary};
|
24
|
+
// }
|
25
|
+
// `;
|
26
|
+
// return {
|
27
|
+
// close: cx(
|
28
|
+
// icon,
|
29
|
+
// css`
|
30
|
+
// padding-inline-end: 2px;
|
31
|
+
//
|
32
|
+
// &:hover {
|
33
|
+
// color: ${token.colorTextLightSolid};
|
34
|
+
//
|
35
|
+
// /* win11 的色值,亮暗色均不变 */
|
36
|
+
// background: #d33328;
|
37
|
+
// }
|
38
|
+
//
|
39
|
+
// &:active {
|
40
|
+
// color: ${token.colorTextLightSolid};
|
41
|
+
//
|
42
|
+
// /* win11 的色值 */
|
43
|
+
// background: #8b2b25;
|
44
|
+
// }
|
45
|
+
// `,
|
46
|
+
// ),
|
47
|
+
// container: css`
|
48
|
+
// cursor: pointer;
|
49
|
+
// display: flex;
|
50
|
+
// `,
|
51
|
+
// icon,
|
52
|
+
// };
|
53
|
+
// });
|
61
54
|
|
62
55
|
const WinControl = () => {
|
63
|
-
|
64
|
-
|
65
|
-
|
66
|
-
|
67
|
-
|
68
|
-
|
69
|
-
|
70
|
-
|
71
|
-
|
72
|
-
|
73
|
-
|
74
|
-
|
75
|
-
|
76
|
-
|
77
|
-
|
78
|
-
|
79
|
-
|
80
|
-
|
81
|
-
|
82
|
-
|
83
|
-
|
84
|
-
|
85
|
-
|
86
|
-
|
87
|
-
|
88
|
-
|
89
|
-
|
90
|
-
|
91
|
-
|
92
|
-
|
56
|
+
return <div style={{ width: 132 }} />;
|
57
|
+
|
58
|
+
// const { styles } = useStyles();
|
59
|
+
//
|
60
|
+
// return (
|
61
|
+
// <div className={styles.container}>
|
62
|
+
// <div
|
63
|
+
// className={styles.icon}
|
64
|
+
// onClick={() => {
|
65
|
+
// electronSystemService.minimizeWindow();
|
66
|
+
// }}
|
67
|
+
// >
|
68
|
+
// <Minus absoluteStrokeWidth size={14} strokeWidth={1.2} />
|
69
|
+
// </div>
|
70
|
+
// <div
|
71
|
+
// className={styles.icon}
|
72
|
+
// onClick={() => {
|
73
|
+
// electronSystemService.maximizeWindow();
|
74
|
+
// }}
|
75
|
+
// >
|
76
|
+
// <Square absoluteStrokeWidth size={10} strokeWidth={1.2} />
|
77
|
+
// </div>
|
78
|
+
// <div
|
79
|
+
// className={styles.close}
|
80
|
+
// onClick={() => {
|
81
|
+
// electronSystemService.closeWindow();
|
82
|
+
// }}
|
83
|
+
// >
|
84
|
+
// <XIcon absoluteStrokeWidth size={14} strokeWidth={1.2} />
|
85
|
+
// </div>
|
86
|
+
// </div>
|
87
|
+
// );
|
93
88
|
};
|
94
89
|
|
95
90
|
export default WinControl;
|
@@ -7,10 +7,14 @@ import { useElectronStore } from '@/store/electron';
|
|
7
7
|
import { useGlobalStore } from '@/store/global';
|
8
8
|
|
9
9
|
export const useWatchThemeUpdate = () => {
|
10
|
-
const [systemAppearance, updateElectronAppState] = useElectronStore(
|
11
|
-
s
|
12
|
-
|
13
|
-
|
10
|
+
const [isAppStateInit, systemAppearance, updateElectronAppState, isMac] = useElectronStore(
|
11
|
+
(s) => [
|
12
|
+
s.isAppStateInit,
|
13
|
+
s.appState.systemAppearance,
|
14
|
+
s.updateElectronAppState,
|
15
|
+
s.appState.isMac,
|
16
|
+
],
|
17
|
+
);
|
14
18
|
const switchThemeMode = useGlobalStore((s) => s.switchThemeMode);
|
15
19
|
|
16
20
|
const theme = useTheme();
|
@@ -24,11 +28,12 @@ export const useWatchThemeUpdate = () => {
|
|
24
28
|
});
|
25
29
|
|
26
30
|
useEffect(() => {
|
31
|
+
if (!isAppStateInit || !isMac) return;
|
27
32
|
document.documentElement.style.background = 'none';
|
28
33
|
|
29
34
|
// https://x.com/alanblogsooo/status/1939208908993896684
|
30
35
|
const isNotSameTheme = !systemAppearance ? true : theme.appearance !== systemAppearance;
|
31
36
|
|
32
37
|
document.body.style.background = rgba(theme.colorBgLayout, isNotSameTheme ? 0.95 : 0.66);
|
33
|
-
}, [theme, systemAppearance]);
|
38
|
+
}, [theme, systemAppearance, isAppStateInit, isMac]);
|
34
39
|
};
|
@@ -7,6 +7,8 @@ import { Trans, useTranslation } from 'react-i18next';
|
|
7
7
|
import { Flexbox } from 'react-layout-kit';
|
8
8
|
|
9
9
|
import { WIKI_PLUGIN_GUIDE } from '@/const/url';
|
10
|
+
import { isDesktop } from '@/const/version';
|
11
|
+
import { TITLE_BAR_HEIGHT } from '@/features/ElectronTitlebar';
|
10
12
|
import { LobeToolCustomPlugin } from '@/types/tool/plugin';
|
11
13
|
|
12
14
|
import MCPManifestForm from './MCPManifestForm';
|
@@ -112,7 +114,7 @@ const DevModal = memo<DevModalProps>(
|
|
112
114
|
containerMaxWidth={'auto'}
|
113
115
|
destroyOnHidden
|
114
116
|
footer={footer}
|
115
|
-
height={'100vh'}
|
117
|
+
height={isDesktop ? `calc(100vh - ${TITLE_BAR_HEIGHT}px)` : '100vh'}
|
116
118
|
onClose={(e) => {
|
117
119
|
e.stopPropagation();
|
118
120
|
onOpenChange(false);
|
@@ -1,6 +1,7 @@
|
|
1
1
|
import { act, render, screen } from '@testing-library/react';
|
2
2
|
import { afterEach, describe, expect, it, vi } from 'vitest';
|
3
3
|
|
4
|
+
import { BRANDING_NAME } from '@/const/branding';
|
4
5
|
import { DEFAULT_USER_AVATAR_URL } from '@/const/meta';
|
5
6
|
import { useUserStore } from '@/store/user';
|
6
7
|
|
@@ -63,8 +64,8 @@ describe('UserAvatar', () => {
|
|
63
64
|
});
|
64
65
|
|
65
66
|
render(<UserAvatar />);
|
66
|
-
expect(screen.getByAltText(
|
67
|
-
expect(screen.getByAltText(
|
67
|
+
expect(screen.getByAltText(BRANDING_NAME)).toBeInTheDocument();
|
68
|
+
expect(screen.getByAltText(BRANDING_NAME)).toHaveAttribute('src', DEFAULT_USER_AVATAR_URL);
|
68
69
|
});
|
69
70
|
});
|
70
71
|
|
@@ -76,8 +77,8 @@ describe('UserAvatar', () => {
|
|
76
77
|
});
|
77
78
|
|
78
79
|
render(<UserAvatar />);
|
79
|
-
expect(screen.getByAltText(
|
80
|
-
expect(screen.getByAltText(
|
80
|
+
expect(screen.getByAltText(BRANDING_NAME)).toBeInTheDocument();
|
81
|
+
expect(screen.getByAltText(BRANDING_NAME)).toHaveAttribute('src', DEFAULT_USER_AVATAR_URL);
|
81
82
|
});
|
82
83
|
});
|
83
84
|
});
|
@@ -3,7 +3,7 @@ import debug from 'debug';
|
|
3
3
|
import { pick } from 'lodash-es';
|
4
4
|
import { ClientOptions } from 'openai';
|
5
5
|
|
6
|
-
import { RuntimeImageGenParamsValue } from '@/libs/standard-parameters/
|
6
|
+
import { RuntimeImageGenParamsValue } from '@/libs/standard-parameters/index';
|
7
7
|
|
8
8
|
import { LobeRuntimeAI } from '../BaseAI';
|
9
9
|
import { AgentRuntimeErrorType } from '../error';
|
@@ -5,7 +5,7 @@ import OpenAI, { ClientOptions } from 'openai';
|
|
5
5
|
import { Stream } from 'openai/streaming';
|
6
6
|
|
7
7
|
import { LOBE_DEFAULT_MODEL_LIST } from '@/config/aiModels';
|
8
|
-
import { RuntimeImageGenParamsValue } from '@/libs/standard-parameters/
|
8
|
+
import { RuntimeImageGenParamsValue } from '@/libs/standard-parameters/index';
|
9
9
|
import type { ChatModelCard } from '@/types/llm';
|
10
10
|
import { getModelPropertyWithFallback } from '@/utils/getFallbackModelProperty';
|
11
11
|
|
@@ -186,6 +186,52 @@ describe('GoogleGenerativeAIStream', () => {
|
|
186
186
|
]);
|
187
187
|
});
|
188
188
|
|
189
|
+
it('should handle token count with cached token count', async () => {
|
190
|
+
vi.spyOn(uuidModule, 'nanoid').mockReturnValueOnce('1');
|
191
|
+
|
192
|
+
const data = {
|
193
|
+
candidates: [{ content: { role: 'model' }, finishReason: 'STOP', index: 0 }],
|
194
|
+
usageMetadata: {
|
195
|
+
promptTokenCount: 15725,
|
196
|
+
candidatesTokenCount: 1053,
|
197
|
+
totalTokenCount: 16778,
|
198
|
+
cachedContentTokenCount: 14286,
|
199
|
+
promptTokensDetails: [{ modality: 'TEXT', tokenCount: 15725 }],
|
200
|
+
cacheTokensDetails: [{ modality: 'TEXT', tokenCount: 14286 }],
|
201
|
+
},
|
202
|
+
modelVersion: 'gemini-2.0-flash-exp',
|
203
|
+
};
|
204
|
+
|
205
|
+
const mockGoogleStream = new ReadableStream({
|
206
|
+
start(controller) {
|
207
|
+
controller.enqueue(data);
|
208
|
+
|
209
|
+
controller.close();
|
210
|
+
},
|
211
|
+
});
|
212
|
+
|
213
|
+
const protocolStream = GoogleGenerativeAIStream(mockGoogleStream);
|
214
|
+
|
215
|
+
const decoder = new TextDecoder();
|
216
|
+
const chunks = [];
|
217
|
+
|
218
|
+
// @ts-ignore
|
219
|
+
for await (const chunk of protocolStream) {
|
220
|
+
chunks.push(decoder.decode(chunk, { stream: true }));
|
221
|
+
}
|
222
|
+
|
223
|
+
expect(chunks).toEqual([
|
224
|
+
// stop
|
225
|
+
'id: chat_1\n',
|
226
|
+
'event: stop\n',
|
227
|
+
`data: "STOP"\n\n`,
|
228
|
+
// usage
|
229
|
+
'id: chat_1\n',
|
230
|
+
'event: usage\n',
|
231
|
+
`data: {"inputCachedTokens":14286,"inputTextTokens":15725,"outputTextTokens":1053,"totalInputTokens":15725,"totalOutputTokens":1053,"totalTokens":16778}\n\n`,
|
232
|
+
]);
|
233
|
+
});
|
234
|
+
|
189
235
|
it('should handle stop with content', async () => {
|
190
236
|
vi.spyOn(uuidModule, 'nanoid').mockReturnValueOnce('1');
|
191
237
|
|
@@ -33,10 +33,10 @@ const transformGoogleGenerativeAIStream = (
|
|
33
33
|
{ data: candidate.finishReason, id: context?.id, type: 'stop' },
|
34
34
|
{
|
35
35
|
data: {
|
36
|
-
|
37
|
-
inputImageTokens: usage.promptTokensDetails?.find((i
|
36
|
+
inputCachedTokens: usage.cachedContentTokenCount,
|
37
|
+
inputImageTokens: usage.promptTokensDetails?.find((i) => i.modality === 'IMAGE')
|
38
38
|
?.tokenCount,
|
39
|
-
inputTextTokens: usage.promptTokensDetails?.find((i
|
39
|
+
inputTextTokens: usage.promptTokensDetails?.find((i) => i.modality === 'TEXT')
|
40
40
|
?.tokenCount,
|
41
41
|
outputReasoningTokens: reasoningTokens,
|
42
42
|
outputTextTokens,
|
@@ -79,7 +79,7 @@ const transformGoogleGenerativeAIStream = (
|
|
79
79
|
// 首先检查是否为 reasoning 内容 (thought: true)
|
80
80
|
if (Array.isArray(candidate.content?.parts) && candidate.content.parts.length > 0) {
|
81
81
|
for (const part of candidate.content.parts) {
|
82
|
-
if (part && part.text &&
|
82
|
+
if (part && part.text && part.thought === true) {
|
83
83
|
return { data: part.text, id: context.id, type: 'reasoning' };
|
84
84
|
}
|
85
85
|
}
|
@@ -31,13 +31,11 @@ const transformVertexAIStream = (
|
|
31
31
|
{ data: candidate.finishReason, id: context?.id, type: 'stop' },
|
32
32
|
{
|
33
33
|
data: {
|
34
|
-
|
35
|
-
inputImageTokens:
|
36
|
-
|
37
|
-
)
|
38
|
-
|
39
|
-
(i: any) => i.modality === 'TEXT',
|
40
|
-
)?.tokenCount,
|
34
|
+
inputCachedTokens: usage.cachedContentTokenCount,
|
35
|
+
inputImageTokens: usage.promptTokensDetails?.find((i) => i.modality === 'IMAGE')
|
36
|
+
?.tokenCount,
|
37
|
+
inputTextTokens: usage.promptTokensDetails?.find((i) => i.modality === 'TEXT')
|
38
|
+
?.tokenCount,
|
41
39
|
outputReasoningTokens,
|
42
40
|
outputTextTokens,
|
43
41
|
totalInputTokens: usage.promptTokenCount,
|
@@ -56,7 +54,7 @@ const transformVertexAIStream = (
|
|
56
54
|
candidate.content.parts.length > 0
|
57
55
|
) {
|
58
56
|
for (const part of candidate.content.parts) {
|
59
|
-
if (part && part.text &&
|
57
|
+
if (part && part.text && part.thought === true) {
|
60
58
|
return { data: part.text, id: context.id, type: 'reasoning' };
|
61
59
|
}
|
62
60
|
}
|