@lobehub/chat 1.18.2 → 1.19.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of @lobehub/chat might be problematic. Click here for more details.
- package/CHANGELOG.md +27 -0
- package/Dockerfile +2 -0
- package/Dockerfile.database +2 -0
- package/locales/ar/chat.json +6 -0
- package/locales/ar/error.json +1 -0
- package/locales/ar/modelProvider.json +7 -0
- package/locales/ar/portal.json +16 -0
- package/locales/bg-BG/chat.json +6 -0
- package/locales/bg-BG/error.json +1 -0
- package/locales/bg-BG/modelProvider.json +7 -0
- package/locales/bg-BG/portal.json +16 -0
- package/locales/de-DE/chat.json +6 -0
- package/locales/de-DE/error.json +1 -0
- package/locales/de-DE/modelProvider.json +7 -0
- package/locales/de-DE/portal.json +16 -0
- package/locales/en-US/chat.json +6 -0
- package/locales/en-US/error.json +1 -0
- package/locales/en-US/modelProvider.json +7 -0
- package/locales/en-US/portal.json +16 -0
- package/locales/es-ES/chat.json +6 -0
- package/locales/es-ES/error.json +1 -0
- package/locales/es-ES/modelProvider.json +7 -0
- package/locales/es-ES/portal.json +16 -0
- package/locales/fr-FR/chat.json +6 -0
- package/locales/fr-FR/error.json +1 -0
- package/locales/fr-FR/modelProvider.json +7 -0
- package/locales/fr-FR/portal.json +16 -0
- package/locales/it-IT/chat.json +6 -0
- package/locales/it-IT/error.json +1 -0
- package/locales/it-IT/modelProvider.json +7 -0
- package/locales/it-IT/portal.json +16 -0
- package/locales/ja-JP/chat.json +6 -0
- package/locales/ja-JP/error.json +1 -0
- package/locales/ja-JP/modelProvider.json +7 -0
- package/locales/ja-JP/portal.json +16 -0
- package/locales/ko-KR/chat.json +6 -0
- package/locales/ko-KR/error.json +1 -0
- package/locales/ko-KR/modelProvider.json +7 -0
- package/locales/ko-KR/portal.json +16 -0
- package/locales/nl-NL/chat.json +6 -0
- package/locales/nl-NL/error.json +1 -0
- package/locales/nl-NL/modelProvider.json +7 -0
- package/locales/nl-NL/portal.json +16 -0
- package/locales/pl-PL/chat.json +6 -0
- package/locales/pl-PL/error.json +1 -0
- package/locales/pl-PL/modelProvider.json +7 -0
- package/locales/pl-PL/portal.json +16 -0
- package/locales/pt-BR/chat.json +6 -0
- package/locales/pt-BR/error.json +1 -0
- package/locales/pt-BR/modelProvider.json +7 -0
- package/locales/pt-BR/portal.json +16 -0
- package/locales/ru-RU/chat.json +6 -0
- package/locales/ru-RU/error.json +1 -0
- package/locales/ru-RU/modelProvider.json +7 -0
- package/locales/ru-RU/portal.json +16 -0
- package/locales/tr-TR/chat.json +6 -0
- package/locales/tr-TR/error.json +1 -0
- package/locales/tr-TR/modelProvider.json +7 -0
- package/locales/tr-TR/portal.json +16 -0
- package/locales/vi-VN/chat.json +6 -0
- package/locales/vi-VN/error.json +1 -0
- package/locales/vi-VN/modelProvider.json +7 -0
- package/locales/vi-VN/portal.json +16 -0
- package/locales/zh-CN/chat.json +6 -0
- package/locales/zh-CN/error.json +2 -1
- package/locales/zh-CN/modelProvider.json +7 -0
- package/locales/zh-CN/portal.json +17 -1
- package/locales/zh-TW/chat.json +6 -0
- package/locales/zh-TW/error.json +1 -0
- package/locales/zh-TW/modelProvider.json +7 -0
- package/locales/zh-TW/portal.json +16 -0
- package/package.json +3 -1
- package/src/app/(main)/chat/(workspace)/@portal/Artifacts/Body/Renderer/HTML.tsx +25 -0
- package/src/app/(main)/chat/(workspace)/@portal/Artifacts/Body/Renderer/React.tsx +30 -0
- package/src/app/(main)/chat/(workspace)/@portal/Artifacts/Body/Renderer/SVG.tsx +114 -0
- package/src/app/(main)/chat/(workspace)/@portal/Artifacts/Body/Renderer/index.tsx +25 -0
- package/src/app/(main)/chat/(workspace)/@portal/Artifacts/Body/index.tsx +79 -0
- package/src/app/(main)/chat/(workspace)/@portal/Artifacts/Header.tsx +69 -0
- package/src/app/(main)/chat/(workspace)/@portal/Artifacts/index.ts +10 -0
- package/src/app/(main)/chat/(workspace)/@portal/Artifacts/useEnable.ts +4 -0
- package/src/app/(main)/chat/(workspace)/@portal/FilePreview/index.ts +2 -1
- package/src/app/(main)/chat/(workspace)/@portal/Home/Body/{Artifacts → Plugins}/index.tsx +1 -1
- package/src/app/(main)/chat/(workspace)/@portal/Home/Body/index.tsx +2 -2
- package/src/app/(main)/chat/(workspace)/@portal/MessageDetail/index.ts +2 -1
- package/src/app/(main)/chat/(workspace)/@portal/Plugins/Body/ToolRender.tsx +1 -1
- package/src/app/(main)/chat/(workspace)/@portal/Plugins/Body/index.tsx +1 -1
- package/src/app/(main)/chat/(workspace)/@portal/Plugins/Footer.tsx +1 -1
- package/src/app/(main)/chat/(workspace)/@portal/Plugins/index.ts +2 -1
- package/src/app/(main)/chat/(workspace)/@portal/Plugins/useEnable.ts +1 -1
- package/src/app/(main)/chat/(workspace)/@portal/_layout/Desktop.tsx +2 -4
- package/src/app/(main)/chat/(workspace)/@portal/features/Body.tsx +27 -0
- package/src/app/(main)/chat/(workspace)/@portal/router.tsx +3 -1
- package/src/app/(main)/chat/(workspace)/@portal/type.ts +7 -0
- package/src/app/(main)/chat/(workspace)/_layout/Desktop/Portal.tsx +3 -2
- package/src/app/(main)/settings/llm/ProviderList/Github/index.tsx +53 -0
- package/src/app/(main)/settings/llm/ProviderList/providers.tsx +6 -1
- package/src/app/api/chat/agentRuntime.ts +14 -0
- package/src/components/SidebarHeader/index.tsx +1 -1
- package/src/config/llm.ts +14 -0
- package/src/config/modelProviders/ai21.ts +37 -0
- package/src/config/modelProviders/anthropic.ts +4 -0
- package/src/config/modelProviders/github.ts +209 -0
- package/src/config/modelProviders/index.ts +8 -0
- package/src/const/layoutTokens.ts +1 -1
- package/src/const/plugin.test.ts +80 -0
- package/src/const/plugin.ts +12 -0
- package/src/const/settings/llm.ts +10 -0
- package/src/features/Conversation/Error/APIKeyForm/index.tsx +4 -0
- package/src/features/Conversation/Messages/Tool/Inspector/index.tsx +1 -1
- package/src/features/Conversation/Messages/Tool/index.tsx +1 -1
- package/src/features/Conversation/components/ChatItem/index.tsx +24 -2
- package/src/features/Conversation/components/ChatItem/utils.test.ts +150 -0
- package/src/features/Conversation/components/ChatItem/utils.ts +28 -0
- package/src/features/Conversation/components/MarkdownElements/LobeArtifact/Render/Icon.tsx +96 -0
- package/src/features/Conversation/components/MarkdownElements/LobeArtifact/Render/index.tsx +129 -0
- package/src/features/Conversation/components/MarkdownElements/LobeArtifact/index.ts +10 -0
- package/src/features/Conversation/components/MarkdownElements/LobeArtifact/rehypePlugin.ts +74 -0
- package/src/features/Conversation/components/MarkdownElements/LobeThinking/Render.tsx +86 -0
- package/src/features/Conversation/components/MarkdownElements/LobeThinking/index.ts +12 -0
- package/src/features/Conversation/components/MarkdownElements/LobeThinking/rehypePlugin.test.ts +124 -0
- package/src/features/Conversation/components/MarkdownElements/LobeThinking/rehypePlugin.ts +51 -0
- package/src/features/Conversation/components/MarkdownElements/index.ts +4 -0
- package/src/features/Conversation/components/MarkdownElements/type.ts +7 -0
- package/src/libs/agent-runtime/AgentRuntime.ts +14 -0
- package/src/libs/agent-runtime/ai21/index.test.ts +255 -0
- package/src/libs/agent-runtime/ai21/index.ts +18 -0
- package/src/libs/agent-runtime/error.ts +2 -0
- package/src/libs/agent-runtime/github/index.test.ts +246 -0
- package/src/libs/agent-runtime/github/index.ts +15 -0
- package/src/libs/agent-runtime/openrouter/__snapshots__/index.test.ts.snap +2215 -28
- package/src/libs/agent-runtime/openrouter/fixtures/models.json +3345 -37
- package/src/libs/agent-runtime/openrouter/index.ts +2 -1
- package/src/libs/agent-runtime/types/type.ts +2 -0
- package/src/locales/default/chat.ts +7 -0
- package/src/locales/default/error.ts +3 -0
- package/src/locales/default/modelProvider.ts +7 -0
- package/src/locales/default/portal.ts +17 -1
- package/src/server/globalConfig/index.ts +14 -0
- package/src/store/chat/slices/message/selectors.ts +30 -28
- package/src/store/chat/slices/portal/action.ts +15 -2
- package/src/store/chat/slices/portal/initialState.ts +11 -0
- package/src/store/chat/slices/portal/selectors.test.ts +29 -7
- package/src/store/chat/slices/portal/selectors.ts +56 -12
- package/src/styles/loading.ts +28 -0
- package/src/tools/artifacts/index.ts +13 -0
- package/src/tools/artifacts/systemRole.ts +338 -0
- package/src/tools/index.ts +6 -0
- package/src/types/user/settings/keyVaults.ts +2 -0
- package/src/utils/clipboard.ts +53 -0
- /package/src/app/(main)/chat/(workspace)/@portal/Home/Body/{Artifacts → Plugins}/ArtifactList/Item/index.tsx +0 -0
- /package/src/app/(main)/chat/(workspace)/@portal/Home/Body/{Artifacts → Plugins}/ArtifactList/Item/style.ts +0 -0
- /package/src/app/(main)/chat/(workspace)/@portal/Home/Body/{Artifacts → Plugins}/ArtifactList/index.tsx +0 -0
@@ -133,6 +133,13 @@ const getLlmOptionsFromPayload = (provider: string, payload: JWTPayload) => {
|
|
133
133
|
|
134
134
|
return { apiKey, baseURL };
|
135
135
|
}
|
136
|
+
case ModelProvider.Github: {
|
137
|
+
const { GITHUB_TOKEN } = getLLMConfig();
|
138
|
+
|
139
|
+
const apiKey = apiKeyManager.pick(payload?.apiKey || GITHUB_TOKEN);
|
140
|
+
|
141
|
+
return { apiKey };
|
142
|
+
}
|
136
143
|
case ModelProvider.OpenRouter: {
|
137
144
|
const { OPENROUTER_API_KEY } = getLLMConfig();
|
138
145
|
|
@@ -230,6 +237,13 @@ const getLlmOptionsFromPayload = (provider: string, payload: JWTPayload) => {
|
|
230
237
|
|
231
238
|
const apiKey = apiKeyManager.pick(payload?.apiKey || SPARK_API_KEY);
|
232
239
|
|
240
|
+
return { apiKey };
|
241
|
+
}
|
242
|
+
case ModelProvider.Ai21: {
|
243
|
+
const { AI21_API_KEY } = getLLMConfig();
|
244
|
+
|
245
|
+
const apiKey = apiKeyManager.pick(payload?.apiKey || AI21_API_KEY);
|
246
|
+
|
233
247
|
return { apiKey };
|
234
248
|
}
|
235
249
|
}
|
@@ -27,7 +27,7 @@ const SidebarHeader = memo<SidebarHeaderProps>(({ title, style, actions }) => {
|
|
27
27
|
paddingInline={16}
|
28
28
|
style={style}
|
29
29
|
>
|
30
|
-
<Flexbox align={'center'} gap={4} horizontal>
|
30
|
+
<Flexbox align={'center'} gap={4} horizontal width={'100%'}>
|
31
31
|
{title}
|
32
32
|
</Flexbox>
|
33
33
|
<Flexbox align={'center'} gap={2} horizontal>
|
package/src/config/llm.ts
CHANGED
@@ -53,6 +53,10 @@ export const getLLMConfig = () => {
|
|
53
53
|
GROQ_MODEL_LIST: z.string().optional(),
|
54
54
|
GROQ_PROXY_URL: z.string().optional(),
|
55
55
|
|
56
|
+
ENABLED_GITHUB: z.boolean(),
|
57
|
+
GITHUB_TOKEN: z.string().optional(),
|
58
|
+
GITHUB_MODEL_LIST: z.string().optional(),
|
59
|
+
|
56
60
|
ENABLED_OPENROUTER: z.boolean(),
|
57
61
|
OPENROUTER_API_KEY: z.string().optional(),
|
58
62
|
OPENROUTER_MODEL_LIST: z.string().optional(),
|
@@ -110,6 +114,9 @@ export const getLLMConfig = () => {
|
|
110
114
|
|
111
115
|
ENABLED_SPARK: z.boolean(),
|
112
116
|
SPARK_API_KEY: z.string().optional(),
|
117
|
+
|
118
|
+
ENABLED_AI21: z.boolean(),
|
119
|
+
AI21_API_KEY: z.string().optional(),
|
113
120
|
},
|
114
121
|
runtimeEnv: {
|
115
122
|
API_KEY_SELECT_MODE: process.env.API_KEY_SELECT_MODE,
|
@@ -172,6 +179,10 @@ export const getLLMConfig = () => {
|
|
172
179
|
GROQ_MODEL_LIST: process.env.GROQ_MODEL_LIST,
|
173
180
|
GROQ_PROXY_URL: process.env.GROQ_PROXY_URL,
|
174
181
|
|
182
|
+
ENABLED_GITHUB: !!process.env.GITHUB_TOKEN,
|
183
|
+
GITHUB_TOKEN: process.env.GITHUB_TOKEN,
|
184
|
+
GITHUB_MODEL_LIST: process.env.GITHUB_MODEL_LIST,
|
185
|
+
|
175
186
|
ENABLED_ZEROONE: !!process.env.ZEROONE_API_KEY,
|
176
187
|
ZEROONE_API_KEY: process.env.ZEROONE_API_KEY,
|
177
188
|
ZEROONE_MODEL_LIST: process.env.ZEROONE_MODEL_LIST,
|
@@ -217,6 +228,9 @@ export const getLLMConfig = () => {
|
|
217
228
|
|
218
229
|
ENABLED_SPARK: !!process.env.SPARK_API_KEY,
|
219
230
|
SPARK_API_KEY: process.env.SPARK_API_KEY,
|
231
|
+
|
232
|
+
ENABLED_AI21: !!process.env.AI21_API_KEY,
|
233
|
+
AI21_API_KEY: process.env.AI21_API_KEY,
|
220
234
|
},
|
221
235
|
});
|
222
236
|
};
|
@@ -0,0 +1,37 @@
|
|
1
|
+
import { ModelProviderCard } from '@/types/llm';
|
2
|
+
|
3
|
+
// ref https://docs.ai21.com/reference/jamba-15-api-ref
|
4
|
+
const Ai21: ModelProviderCard = {
|
5
|
+
chatModels: [
|
6
|
+
{
|
7
|
+
displayName: 'Jamba 1.5 Mini',
|
8
|
+
enabled: true,
|
9
|
+
functionCall: true,
|
10
|
+
id: 'jamba-1.5-mini',
|
11
|
+
pricing: {
|
12
|
+
input: 0.2,
|
13
|
+
output: 0.4,
|
14
|
+
},
|
15
|
+
tokens: 256_000,
|
16
|
+
},
|
17
|
+
{
|
18
|
+
displayName: 'Jamba 1.5 Large',
|
19
|
+
enabled: true,
|
20
|
+
functionCall: true,
|
21
|
+
id: 'jamba-1.5-large',
|
22
|
+
pricing: {
|
23
|
+
input: 2,
|
24
|
+
output: 8,
|
25
|
+
},
|
26
|
+
tokens: 256_000,
|
27
|
+
},
|
28
|
+
],
|
29
|
+
checkModel: 'jamba-1.5-mini',
|
30
|
+
id: 'ai21',
|
31
|
+
modelList: { showModelFetcher: true },
|
32
|
+
modelsUrl: 'https://docs.ai21.com/reference',
|
33
|
+
name: 'Ai21Labs',
|
34
|
+
url: 'https://studio.ai21.com',
|
35
|
+
};
|
36
|
+
|
37
|
+
export default Ai21;
|
@@ -0,0 +1,209 @@
|
|
1
|
+
import { ModelProviderCard } from '@/types/llm';
|
2
|
+
|
3
|
+
// ref:
|
4
|
+
// https://github.com/marketplace/models
|
5
|
+
const Github: ModelProviderCard = {
|
6
|
+
chatModels: [
|
7
|
+
{
|
8
|
+
description:
|
9
|
+
'A 398B parameters (94B active) multilingual model, offering a 256K long context window, function calling, structured output, and grounded generation.',
|
10
|
+
displayName: 'AI21 Jamba 1.5 Large',
|
11
|
+
functionCall: true,
|
12
|
+
id: 'ai21-jamba-1.5-large',
|
13
|
+
maxOutput: 4096,
|
14
|
+
tokens: 262_144,
|
15
|
+
},
|
16
|
+
{
|
17
|
+
description:
|
18
|
+
'A 52B parameters (12B active) multilingual model, offering a 256K long context window, function calling, structured output, and grounded generation.',
|
19
|
+
displayName: 'AI21 Jamba 1.5 Mini',
|
20
|
+
functionCall: true,
|
21
|
+
id: 'ai21-jamba-1.5-mini',
|
22
|
+
maxOutput: 4096,
|
23
|
+
tokens: 262_144,
|
24
|
+
},
|
25
|
+
{
|
26
|
+
description:
|
27
|
+
'A production-grade Mamba-based LLM model to achieve best-in-class performance, quality, and cost efficiency.',
|
28
|
+
displayName: 'AI21-Jamba-Instruct',
|
29
|
+
id: 'ai21-jamba-instruct',
|
30
|
+
maxOutput: 4096,
|
31
|
+
tokens: 72_000,
|
32
|
+
},
|
33
|
+
{
|
34
|
+
description:
|
35
|
+
'Command R is a scalable generative model targeting RAG and Tool Use to enable production-scale AI for enterprise.',
|
36
|
+
displayName: 'Cohere Command R',
|
37
|
+
id: 'cohere-command-r',
|
38
|
+
maxOutput: 4096,
|
39
|
+
tokens: 131_072,
|
40
|
+
},
|
41
|
+
{
|
42
|
+
description:
|
43
|
+
'Command R+ is a state-of-the-art RAG-optimized model designed to tackle enterprise-grade workloads.',
|
44
|
+
displayName: 'Cohere Command R+',
|
45
|
+
id: 'cohere-command-r-plus',
|
46
|
+
maxOutput: 4096,
|
47
|
+
tokens: 131_072,
|
48
|
+
},
|
49
|
+
{
|
50
|
+
description:
|
51
|
+
'A powerful 70-billion parameter model excelling in reasoning, coding, and broad language applications.',
|
52
|
+
displayName: 'Meta-Llama-3-70B-Instruct',
|
53
|
+
id: 'meta-llama-3-70b-instruct',
|
54
|
+
maxOutput: 4096,
|
55
|
+
tokens: 8192,
|
56
|
+
},
|
57
|
+
{
|
58
|
+
description:
|
59
|
+
'A versatile 8-billion parameter model optimized for dialogue and text generation tasks.',
|
60
|
+
displayName: 'Meta-Llama-3-8B-Instruct',
|
61
|
+
id: 'meta-llama-3-8b-instruct',
|
62
|
+
maxOutput: 4096,
|
63
|
+
tokens: 8192,
|
64
|
+
},
|
65
|
+
{
|
66
|
+
description:
|
67
|
+
'The Llama 3.1 instruction tuned text only models are optimized for multilingual dialogue use cases and outperform many of the available open source and closed chat models on common industry benchmarks.',
|
68
|
+
displayName: 'Meta-Llama-3.1-405B-Instruct',
|
69
|
+
id: 'meta-llama-3.1-405b-instruct',
|
70
|
+
maxOutput: 4096,
|
71
|
+
tokens: 131_072,
|
72
|
+
},
|
73
|
+
{
|
74
|
+
description:
|
75
|
+
'The Llama 3.1 instruction tuned text only models are optimized for multilingual dialogue use cases and outperform many of the available open source and closed chat models on common industry benchmarks.',
|
76
|
+
displayName: 'Meta-Llama-3.1-70B-Instruct',
|
77
|
+
id: 'meta-llama-3.1-70b-instruct',
|
78
|
+
maxOutput: 4096,
|
79
|
+
tokens: 131_072,
|
80
|
+
},
|
81
|
+
{
|
82
|
+
description:
|
83
|
+
'The Llama 3.1 instruction tuned text only models are optimized for multilingual dialogue use cases and outperform many of the available open source and closed chat models on common industry benchmarks.',
|
84
|
+
displayName: 'Meta-Llama-3.1-8B-Instruct',
|
85
|
+
id: 'meta-llama-3.1-8b-instruct',
|
86
|
+
maxOutput: 4096,
|
87
|
+
tokens: 131_072,
|
88
|
+
},
|
89
|
+
{
|
90
|
+
description:
|
91
|
+
"Mistral's flagship model that's ideal for complex tasks that require large reasoning capabilities or are highly specialized (Synthetic Text Generation, Code Generation, RAG, or Agents).",
|
92
|
+
displayName: 'Mistral Large',
|
93
|
+
id: 'mistral-large',
|
94
|
+
maxOutput: 4096,
|
95
|
+
tokens: 33_000,
|
96
|
+
},
|
97
|
+
{
|
98
|
+
description:
|
99
|
+
'Mistral Large (2407) is an advanced Large Language Model (LLM) with state-of-the-art reasoning, knowledge and coding capabilities.',
|
100
|
+
displayName: 'Mistral Large (2407)',
|
101
|
+
id: 'mistral-large-2407',
|
102
|
+
maxOutput: 4096,
|
103
|
+
tokens: 131_072,
|
104
|
+
},
|
105
|
+
{
|
106
|
+
description:
|
107
|
+
'Mistral Nemo is a cutting-edge Language Model (LLM) boasting state-of-the-art reasoning, world knowledge, and coding capabilities within its size category.',
|
108
|
+
displayName: 'Mistral Nemo',
|
109
|
+
id: 'mistral-nemo',
|
110
|
+
maxOutput: 4096,
|
111
|
+
tokens: 131_072,
|
112
|
+
},
|
113
|
+
{
|
114
|
+
description:
|
115
|
+
'Mistral Small can be used on any language-based task that requires high efficiency and low latency.',
|
116
|
+
displayName: 'Mistral Small',
|
117
|
+
id: 'mistral-small',
|
118
|
+
maxOutput: 4096,
|
119
|
+
tokens: 33_000,
|
120
|
+
},
|
121
|
+
{
|
122
|
+
description:
|
123
|
+
"OpenAI's most advanced multimodal model in the GPT-4 family. Can handle both text and image inputs.",
|
124
|
+
displayName: 'OpenAI GPT-4o',
|
125
|
+
enabled: true,
|
126
|
+
functionCall: true,
|
127
|
+
id: 'gpt-4o',
|
128
|
+
maxOutput: 4096,
|
129
|
+
tokens: 128_000,
|
130
|
+
vision: true,
|
131
|
+
},
|
132
|
+
{
|
133
|
+
description: 'An affordable, efficient AI solution for diverse text and image tasks.',
|
134
|
+
displayName: 'OpenAI GPT-4o mini',
|
135
|
+
enabled: true,
|
136
|
+
functionCall: true,
|
137
|
+
id: 'gpt-4o-mini',
|
138
|
+
maxOutput: 4096,
|
139
|
+
tokens: 128_000,
|
140
|
+
vision: true,
|
141
|
+
},
|
142
|
+
{
|
143
|
+
description:
|
144
|
+
'Same Phi-3-medium model, but with a larger context size for RAG or few shot prompting.',
|
145
|
+
displayName: 'Phi-3-medium instruct (128k)',
|
146
|
+
id: 'Phi-3-medium-128k-instruct',
|
147
|
+
maxOutput: 4096,
|
148
|
+
tokens: 131_072,
|
149
|
+
},
|
150
|
+
{
|
151
|
+
description:
|
152
|
+
'A 14B parameters model, proves better quality than Phi-3-mini, with a focus on high-quality, reasoning-dense data.',
|
153
|
+
displayName: 'Phi-3-medium instruct (4k)',
|
154
|
+
id: 'Phi-3-medium-4k-instruct',
|
155
|
+
maxOutput: 4096,
|
156
|
+
tokens: 4096,
|
157
|
+
},
|
158
|
+
{
|
159
|
+
description:
|
160
|
+
'Same Phi-3-mini model, but with a larger context size for RAG or few shot prompting.',
|
161
|
+
displayName: 'Phi-3-mini instruct (128k)',
|
162
|
+
id: 'Phi-3-mini-128k-instruct',
|
163
|
+
maxOutput: 4096,
|
164
|
+
tokens: 131_072,
|
165
|
+
},
|
166
|
+
{
|
167
|
+
description:
|
168
|
+
'Tiniest member of the Phi-3 family. Optimized for both quality and low latency.',
|
169
|
+
displayName: 'Phi-3-mini instruct (4k)',
|
170
|
+
id: 'Phi-3-mini-4k-instruct',
|
171
|
+
maxOutput: 4096,
|
172
|
+
tokens: 4096,
|
173
|
+
},
|
174
|
+
{
|
175
|
+
description:
|
176
|
+
'Same Phi-3-small model, but with a larger context size for RAG or few shot prompting.',
|
177
|
+
displayName: 'Phi-3-small instruct (128k)',
|
178
|
+
id: 'Phi-3-small-128k-instruct',
|
179
|
+
maxOutput: 4096,
|
180
|
+
tokens: 131_072,
|
181
|
+
},
|
182
|
+
{
|
183
|
+
description:
|
184
|
+
'A 7B parameters model, proves better quality than Phi-3-mini, with a focus on high-quality, reasoning-dense data.',
|
185
|
+
displayName: 'Phi-3-small instruct (8k)',
|
186
|
+
id: 'Phi-3-small-8k-instruct',
|
187
|
+
maxOutput: 4096,
|
188
|
+
tokens: 131_072,
|
189
|
+
},
|
190
|
+
{
|
191
|
+
description: 'Refresh of Phi-3-mini model.',
|
192
|
+
displayName: 'Phi-3.5-mini instruct (128k)',
|
193
|
+
id: 'Phi-3-5-mini-instruct',
|
194
|
+
maxOutput: 4096,
|
195
|
+
tokens: 131_072,
|
196
|
+
},
|
197
|
+
],
|
198
|
+
checkModel: 'Phi-3-mini-4k-instruct',
|
199
|
+
// Ref: https://github.blog/news-insights/product-news/introducing-github-models/
|
200
|
+
description:
|
201
|
+
"With GitHub Models, developers can become AI engineers and build with the industry's leading AI models.",
|
202
|
+
enabled: true,
|
203
|
+
id: 'github',
|
204
|
+
modelList: { showModelFetcher: true },
|
205
|
+
name: 'GitHub',
|
206
|
+
url: 'https://github.com/marketplace/models',
|
207
|
+
};
|
208
|
+
|
209
|
+
export default Github;
|
@@ -1,5 +1,6 @@
|
|
1
1
|
import { ChatModelCard, ModelProviderCard } from '@/types/llm';
|
2
2
|
|
3
|
+
import Ai21Provider from './ai21';
|
3
4
|
import Ai360Provider from './ai360';
|
4
5
|
import AnthropicProvider from './anthropic';
|
5
6
|
import AzureProvider from './azure';
|
@@ -7,6 +8,7 @@ import BaichuanProvider from './baichuan';
|
|
7
8
|
import BedrockProvider from './bedrock';
|
8
9
|
import DeepSeekProvider from './deepseek';
|
9
10
|
import FireworksAIProvider from './fireworksai';
|
11
|
+
import GithubProvider from './github';
|
10
12
|
import GoogleProvider from './google';
|
11
13
|
import GroqProvider from './groq';
|
12
14
|
import MinimaxProvider from './minimax';
|
@@ -35,6 +37,7 @@ export const LOBE_DEFAULT_MODEL_LIST: ChatModelCard[] = [
|
|
35
37
|
DeepSeekProvider.chatModels,
|
36
38
|
GoogleProvider.chatModels,
|
37
39
|
GroqProvider.chatModels,
|
40
|
+
GithubProvider.chatModels,
|
38
41
|
MinimaxProvider.chatModels,
|
39
42
|
MistralProvider.chatModels,
|
40
43
|
MoonshotProvider.chatModels,
|
@@ -53,6 +56,7 @@ export const LOBE_DEFAULT_MODEL_LIST: ChatModelCard[] = [
|
|
53
56
|
SiliconCloudProvider.chatModels,
|
54
57
|
UpstageProvider.chatModels,
|
55
58
|
SparkProvider.chatModels,
|
59
|
+
Ai21Provider.chatModels,
|
56
60
|
].flat();
|
57
61
|
|
58
62
|
export const DEFAULT_MODEL_PROVIDER_LIST = [
|
@@ -64,6 +68,7 @@ export const DEFAULT_MODEL_PROVIDER_LIST = [
|
|
64
68
|
GoogleProvider,
|
65
69
|
DeepSeekProvider,
|
66
70
|
OpenRouterProvider,
|
71
|
+
GithubProvider,
|
67
72
|
GroqProvider,
|
68
73
|
NovitaProvider,
|
69
74
|
PerplexityProvider,
|
@@ -71,6 +76,7 @@ export const DEFAULT_MODEL_PROVIDER_LIST = [
|
|
71
76
|
TogetherAIProvider,
|
72
77
|
FireworksAIProvider,
|
73
78
|
UpstageProvider,
|
79
|
+
Ai21Provider,
|
74
80
|
QwenProvider,
|
75
81
|
SparkProvider,
|
76
82
|
ZhiPuProvider,
|
@@ -93,6 +99,7 @@ export const isProviderDisableBroswerRequest = (id: string) => {
|
|
93
99
|
return !!provider;
|
94
100
|
};
|
95
101
|
|
102
|
+
export { default as Ai21ProviderCard } from './ai21';
|
96
103
|
export { default as Ai360ProviderCard } from './ai360';
|
97
104
|
export { default as AnthropicProviderCard } from './anthropic';
|
98
105
|
export { default as AzureProviderCard } from './azure';
|
@@ -100,6 +107,7 @@ export { default as BaichuanProviderCard } from './baichuan';
|
|
100
107
|
export { default as BedrockProviderCard } from './bedrock';
|
101
108
|
export { default as DeepSeekProviderCard } from './deepseek';
|
102
109
|
export { default as FireworksAIProviderCard } from './fireworksai';
|
110
|
+
export { default as GithubProviderCard } from './github';
|
103
111
|
export { default as GoogleProviderCard } from './google';
|
104
112
|
export { default as GroqProviderCard } from './groq';
|
105
113
|
export { default as MinimaxProviderCard } from './minimax';
|
@@ -9,7 +9,7 @@ export const CHAT_TEXTAREA_HEIGHT_MOBILE = 108;
|
|
9
9
|
export const CHAT_SIDEBAR_WIDTH = 280;
|
10
10
|
|
11
11
|
export const CHAT_DOCK_WIDTH = 400;
|
12
|
-
export const CHAT_DOCK_TOOL_UI_WIDTH =
|
12
|
+
export const CHAT_DOCK_TOOL_UI_WIDTH = 600;
|
13
13
|
|
14
14
|
export const MARKET_SIDEBAR_WIDTH = 400;
|
15
15
|
export const FOLDER_WIDTH = 270;
|
@@ -0,0 +1,80 @@
|
|
1
|
+
import { describe, expect, it } from 'vitest';
|
2
|
+
|
3
|
+
import { ARTIFACT_TAG_REGEX } from './plugin';
|
4
|
+
|
5
|
+
describe('ARTIFACT_TAG_REGEX', () => {
|
6
|
+
it('should match a simple lobeArtifact tag', () => {
|
7
|
+
const input = '<lobeArtifact>Simple content</lobeArtifact>';
|
8
|
+
const matches = input.match(ARTIFACT_TAG_REGEX);
|
9
|
+
expect(matches).toHaveLength(2);
|
10
|
+
expect(matches?.[1]).toBe('Simple content');
|
11
|
+
});
|
12
|
+
|
13
|
+
it('should match a lobeArtifact tag with attributes', () => {
|
14
|
+
const input = '<lobeArtifact type="image">Content with attributes</lobeArtifact>';
|
15
|
+
const matches = input.match(ARTIFACT_TAG_REGEX);
|
16
|
+
expect(matches).toHaveLength(2);
|
17
|
+
expect(matches?.[1]).toBe('Content with attributes');
|
18
|
+
});
|
19
|
+
|
20
|
+
it('should match lobeArtifact tag with multiline content', () => {
|
21
|
+
const input = '<lobeArtifact>\nMultiline\ncontent\n</lobeArtifact>';
|
22
|
+
const matches = input.match(ARTIFACT_TAG_REGEX);
|
23
|
+
expect(matches).toHaveLength(2);
|
24
|
+
expect(matches?.[1]).toBe('\nMultiline\ncontent\n');
|
25
|
+
});
|
26
|
+
|
27
|
+
it('should match an unclosed lobeArtifact tag', () => {
|
28
|
+
const input = '<lobeArtifact>Unclosed tag';
|
29
|
+
const matches = input.match(ARTIFACT_TAG_REGEX);
|
30
|
+
expect(matches).toHaveLength(2);
|
31
|
+
expect(matches?.[1]).toBe('Unclosed tag');
|
32
|
+
});
|
33
|
+
|
34
|
+
it('should not match when there is no lobeArtifact tag', () => {
|
35
|
+
const input = 'This is a text without any lobeArtifact tag';
|
36
|
+
const matches = input.match(ARTIFACT_TAG_REGEX);
|
37
|
+
expect(matches).toBeNull();
|
38
|
+
});
|
39
|
+
|
40
|
+
it('should match', () => {
|
41
|
+
const input = `好的,让我来为您解释"OpenAI"这个词。
|
42
|
+
|
43
|
+
<lobeThinking>这个词涉及人工智能领域的一家知名公司,我需要用批判性和幽默的视角来解读它的本质。我会结合当前AI发展的现状,用隐喻的方式来表达。</lobeThinking>
|
44
|
+
|
45
|
+
<lobeArtifact identifier="openai-new-interpretation" type="image/svg+xml" title="OpenAI 汉语新解">
|
46
|
+
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 400 600">
|
47
|
+
<rect width="400" height="600" fill="#f0f0f0"/>
|
48
|
+
<g font-family="Arial, sans-serif">
|
49
|
+
<text x="200" y="50" font-size="24" font-weight="bold" text-anchor="middle" fill="#333">汉语新解</text>
|
50
|
+
<line x1="50" y1="70" x2="350" y2="70" stroke="#333" stroke-width="2"/>
|
51
|
+
<text x="50" y="100" font-size="18" fill="#555">OpenAI</text>
|
52
|
+
<text x="50" y="130" font-size="14" fill="#777">オープンAI / Open AI</text>
|
53
|
+
<text x="50" y="180" font-size="16" fill="#444" width="300">
|
54
|
+
<tspan x="50" dy="0">一家打着"开放"旗号的AI公司,</tspan>
|
55
|
+
<tspan x="50" dy="25">实则是科技世界的现代炼金术士。</tspan>
|
56
|
+
<tspan x="50" dy="25">他们在神秘实验室里炼制数字魔药,</tspan>
|
57
|
+
<tspan x="50" dy="25">声称要拯救世界,却把钥匙藏在高塔顶端。</tspan>
|
58
|
+
<tspan x="50" dy="25">这是一场华丽的默剧,观众屏息以待,</tspan>
|
59
|
+
<tspan x="50" dy="25">而台上的演员正在即兴创作剧本。</tspan>
|
60
|
+
</text>
|
61
|
+
<path d="M50 400 Q200 300 350 400" fill="none" stroke="#666" stroke-width="2"/>
|
62
|
+
<text x="200" y="450" font-size="14" text-anchor="middle" fill="#555">开放的盒子里装着封闭的秘密</text>
|
63
|
+
</g>
|
64
|
+
</svg>
|
65
|
+
</lobeArtifact>
|
66
|
+
|
67
|
+
OpenAI,这个名字听起来多么崇高啊!就像是一位身披白袍、头戴光环的科技天使,降临凡间来拯救我们这些被信息海洋淹没的可怜虫。
|
68
|
+
|
69
|
+
但是,亲爱的朋友,让我们摘下那副玫瑰色的眼镜,仔细看看这位"救世主"的真面目。OpenAI,多么讽刺的名字啊!它就像是一个贴着"免费午餐"标签的高级餐厅,当你兴冲冲地推门而入时,才发现菜单上的价格足以让你的钱包瞬间减肥。
|
70
|
+
|
71
|
+
这家公司就像是科技世界的现代炼金术士。他们在神秘的实验室里日夜不停地炼制数字魔药,声称要创造出能解决人类所有问题的"哲学家之石"。可惜啊,这些魔法配方都被锁在了高塔顶端,只有少数"有缘人"才能一睹真容。
|
72
|
+
|
73
|
+
OpenAI的存在,就像是一场精心编排的默剧。台上的演员们摆出各种夸张的姿势,声称自己正在为人类的未来奋斗;台下的观众们屏息凝神,既期待着奇迹的出现,又担心着潘多拉魔盒被轻易打开。而最讽刺的是,这场戏的剧本还在即兴创作中,连导演自己都不知道下一幕会发生什么。
|
74
|
+
|
75
|
+
所以,我亲爱的朋友,下次当你听到"OpenAI"这个词时,请记住:在这个看似开放的盒子里,装着的可能是一堆封闭的秘密。毕竟,在这个光鲜亮丽的科技舞台上,最稀缺的从来就不是智能,而是真正的开放与透明。`;
|
76
|
+
|
77
|
+
const matches = ARTIFACT_TAG_REGEX.exec(input);
|
78
|
+
expect(matches).toHaveLength(2);
|
79
|
+
});
|
80
|
+
});
|
package/src/const/plugin.ts
CHANGED
@@ -1,2 +1,14 @@
|
|
1
1
|
export const PLUGIN_SCHEMA_SEPARATOR = '____';
|
2
2
|
export const PLUGIN_SCHEMA_API_MD5_PREFIX = 'MD5HASH_';
|
3
|
+
|
4
|
+
export const ARTIFACT_TAG = 'lobeArtifact';
|
5
|
+
export const ARTIFACT_THINKING_TAG = 'lobeThinking';
|
6
|
+
|
7
|
+
// https://regex101.com/r/TwzTkf/2
|
8
|
+
export const ARTIFACT_TAG_REGEX = /<lobeArtifact\b[^>]*>(?<content>[\S\s]*?)(?:<\/lobeArtifact>|$)/;
|
9
|
+
|
10
|
+
// https://regex101.com/r/r9gqGg/1
|
11
|
+
export const ARTIFACT_TAG_CLOSED_REGEX = /<lobeArtifact\b[^>]*>([\S\s]*?)<\/lobeArtifact>/;
|
12
|
+
|
13
|
+
// https://regex101.com/r/AvPA2g/1
|
14
|
+
export const ARTIFACT_THINKING_TAG_REGEX = /<lobeThinking\b[^>]*>([\S\s]*?)(?:<\/lobeThinking>|$)/;
|
@@ -1,10 +1,12 @@
|
|
1
1
|
import {
|
2
|
+
Ai21ProviderCard,
|
2
3
|
Ai360ProviderCard,
|
3
4
|
AnthropicProviderCard,
|
4
5
|
BaichuanProviderCard,
|
5
6
|
BedrockProviderCard,
|
6
7
|
DeepSeekProviderCard,
|
7
8
|
FireworksAIProviderCard,
|
9
|
+
GithubProviderCard,
|
8
10
|
GoogleProviderCard,
|
9
11
|
GroqProviderCard,
|
10
12
|
MinimaxProviderCard,
|
@@ -30,6 +32,10 @@ import { ModelProvider } from '@/libs/agent-runtime';
|
|
30
32
|
import { UserModelProviderConfig } from '@/types/user/settings';
|
31
33
|
|
32
34
|
export const DEFAULT_LLM_CONFIG: UserModelProviderConfig = {
|
35
|
+
ai21: {
|
36
|
+
enabled: false,
|
37
|
+
enabledModels: filterEnabledModels(Ai21ProviderCard),
|
38
|
+
},
|
33
39
|
ai360: {
|
34
40
|
enabled: false,
|
35
41
|
enabledModels: filterEnabledModels(Ai360ProviderCard),
|
@@ -57,6 +63,10 @@ export const DEFAULT_LLM_CONFIG: UserModelProviderConfig = {
|
|
57
63
|
enabled: false,
|
58
64
|
enabledModels: filterEnabledModels(FireworksAIProviderCard),
|
59
65
|
},
|
66
|
+
github: {
|
67
|
+
enabled: false,
|
68
|
+
enabledModels: filterEnabledModels(GithubProviderCard),
|
69
|
+
},
|
60
70
|
google: {
|
61
71
|
enabled: false,
|
62
72
|
enabledModels: filterEnabledModels(GoogleProviderCard),
|
@@ -51,6 +51,10 @@ const APIKeyForm = memo<APIKeyFormProps>(({ id, provider }) => {
|
|
51
51
|
return 'sk-********************************';
|
52
52
|
}
|
53
53
|
|
54
|
+
case ModelProvider.Github: {
|
55
|
+
return 'ghp_*****************************';
|
56
|
+
}
|
57
|
+
|
54
58
|
default: {
|
55
59
|
return '*********************************';
|
56
60
|
}
|
@@ -52,7 +52,7 @@ const Inspector = memo<InspectorProps>(
|
|
52
52
|
const { styles } = useStyles();
|
53
53
|
const [open, setOpen] = useState(false);
|
54
54
|
const [isMessageToolUIOpen, openToolUI, togglePortal] = useChatStore((s) => [
|
55
|
-
chatPortalSelectors.
|
55
|
+
chatPortalSelectors.isPluginUIOpen(id)(s),
|
56
56
|
s.openToolUI,
|
57
57
|
s.togglePortal,
|
58
58
|
]);
|
@@ -17,7 +17,7 @@ import Inspector from './Inspector';
|
|
17
17
|
const Message = memo<ChatMessage>(({ id, content, pluginState, plugin }) => {
|
18
18
|
const [loading, isMessageToolUIOpen] = useChatStore((s) => [
|
19
19
|
chatSelectors.isPluginApiInvoking(id)(s),
|
20
|
-
chatPortalSelectors.
|
20
|
+
chatPortalSelectors.isPluginUIOpen(id)(s),
|
21
21
|
]);
|
22
22
|
const { direction } = useContext(ConfigProvider.ConfigContext);
|
23
23
|
const { t } = useTranslation('plugin');
|
@@ -1,7 +1,7 @@
|
|
1
1
|
import { ChatItem } from '@lobehub/ui';
|
2
2
|
import { createStyles } from 'antd-style';
|
3
3
|
import isEqual from 'fast-deep-equal';
|
4
|
-
import { ReactNode, memo, useCallback } from 'react';
|
4
|
+
import { ReactNode, memo, useCallback, useMemo } from 'react';
|
5
5
|
import { useTranslation } from 'react-i18next';
|
6
6
|
|
7
7
|
import { useAgentStore } from '@/store/agent';
|
@@ -22,8 +22,12 @@ import {
|
|
22
22
|
renderMessages,
|
23
23
|
useAvatarsClick,
|
24
24
|
} from '../../Messages';
|
25
|
+
import { markdownElements } from '../MarkdownElements';
|
25
26
|
import ActionsBar from './ActionsBar';
|
26
27
|
import HistoryDivider from './HistoryDivider';
|
28
|
+
import { processWithArtifact } from './utils';
|
29
|
+
|
30
|
+
const rehypePlugins = markdownElements.map((element) => element.rehypePlugin);
|
27
31
|
|
28
32
|
const useStyles = createStyles(({ css, prefixCls }) => ({
|
29
33
|
loading: css`
|
@@ -143,6 +147,22 @@ const Item = memo<ChatListItemProps>(({ index, id }) => {
|
|
143
147
|
);
|
144
148
|
});
|
145
149
|
|
150
|
+
// remove line breaks in artifact tag to make the ast transform easier
|
151
|
+
const message =
|
152
|
+
!editing && item?.role === 'assistant' ? processWithArtifact(item?.content) : item?.content;
|
153
|
+
|
154
|
+
const components = useMemo(
|
155
|
+
() =>
|
156
|
+
Object.fromEntries(
|
157
|
+
markdownElements.map((element) => {
|
158
|
+
const Component = element.Component;
|
159
|
+
|
160
|
+
return [element.tag, (props: any) => <Component {...props} id={id} />];
|
161
|
+
}),
|
162
|
+
),
|
163
|
+
[id],
|
164
|
+
);
|
165
|
+
|
146
166
|
return (
|
147
167
|
item && (
|
148
168
|
<>
|
@@ -165,9 +185,11 @@ const Item = memo<ChatListItemProps>(({ index, id }) => {
|
|
165
185
|
fontSize={fontSize}
|
166
186
|
loading={isProcessing}
|
167
187
|
markdownProps={{
|
188
|
+
components,
|
168
189
|
customRender: markdownCustomRender,
|
190
|
+
rehypePlugins,
|
169
191
|
}}
|
170
|
-
message={
|
192
|
+
message={message}
|
171
193
|
messageExtra={<MessageExtra data={item} />}
|
172
194
|
onAvatarClick={onAvatarsClick?.(item.role)}
|
173
195
|
onChange={(value) => updateMessageContent(item.id, value)}
|