@lobehub/chat 1.14.2 → 1.14.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (45) hide show
  1. package/CHANGELOG.md +50 -0
  2. package/README.md +2 -2
  3. package/README.zh-CN.md +2 -2
  4. package/docker-compose/.env.example +35 -0
  5. package/docker-compose/.env.zh-CN.example +35 -0
  6. package/docker-compose/docker-compose.yml +70 -0
  7. package/docker-compose/minio-bucket.config.json +34 -0
  8. package/docs/self-hosting/advanced/auth/next-auth/github.mdx +1 -1
  9. package/docs/self-hosting/advanced/auth/next-auth/github.zh-CN.mdx +4 -4
  10. package/docs/self-hosting/advanced/auth/next-auth/logto.mdx +74 -0
  11. package/docs/self-hosting/advanced/auth/next-auth/logto.zh-CN.mdx +78 -0
  12. package/docs/self-hosting/environment-variables/auth.mdx +21 -0
  13. package/docs/self-hosting/environment-variables/auth.zh-CN.mdx +31 -8
  14. package/docs/self-hosting/server-database/docker-compose.mdx +486 -7
  15. package/docs/self-hosting/server-database/docker-compose.zh-CN.mdx +477 -4
  16. package/docs/self-hosting/server-database/docker.mdx +20 -0
  17. package/docs/self-hosting/server-database/docker.zh-CN.mdx +27 -4
  18. package/docs/usage/features/pwa.zh-CN.mdx +1 -1
  19. package/package.json +2 -2
  20. package/src/app/(main)/chat/(workspace)/_layout/Desktop/ChatHeader/Tags.tsx +1 -1
  21. package/src/app/(main)/chat/(workspace)/features/ShareButton/Preview.tsx +1 -1
  22. package/src/app/(main)/chat/@session/features/SessionListContent/List/Item/index.tsx +1 -1
  23. package/src/app/(main)/settings/llm/ProviderList/Azure/index.tsx +1 -14
  24. package/src/app/(main)/settings/llm/ProviderList/Bedrock/index.tsx +1 -10
  25. package/src/app/(main)/settings/llm/ProviderList/Ollama/index.tsx +0 -2
  26. package/src/app/(main)/settings/llm/ProviderList/OpenAI/index.tsx +0 -3
  27. package/src/app/(main)/settings/llm/ProviderList/providers.tsx +28 -174
  28. package/src/app/(main)/settings/llm/components/ProviderConfig/index.tsx +17 -17
  29. package/src/app/(main)/settings/llm/components/ProviderModelList/CustomModelOption.tsx +1 -1
  30. package/src/app/(main)/settings/llm/components/ProviderModelList/Option.tsx +1 -1
  31. package/src/app/(main)/settings/tts/features/{const.ts → const.tsx} +6 -3
  32. package/src/components/ModelSelect/index.tsx +15 -5
  33. package/src/const/url.ts +1 -0
  34. package/src/database/server/models/asyncTask.ts +4 -4
  35. package/src/features/AgentSetting/AgentTTS/options.tsx +19 -0
  36. package/src/features/Conversation/Extras/Assistant.tsx +1 -1
  37. package/src/server/routers/async/caller.ts +2 -0
  38. package/src/server/routers/async/file.ts +72 -46
  39. package/src/server/services/chunk/index.ts +32 -3
  40. package/src/types/asyncTask.ts +23 -4
  41. package/src/components/ModelIcon/index.tsx +0 -114
  42. package/src/components/ModelProviderIcon/index.tsx +0 -148
  43. package/src/components/ModelTag/ModelIcon.tsx +0 -105
  44. package/src/components/ModelTag/index.tsx +0 -13
  45. package/src/features/AgentSetting/AgentTTS/options.ts +0 -16
@@ -15,7 +15,12 @@ import { ModelProvider } from '@/libs/agent-runtime';
15
15
  import { asyncAuthedProcedure, asyncRouter as router } from '@/libs/trpc/async';
16
16
  import { S3 } from '@/server/modules/S3';
17
17
  import { ChunkService } from '@/server/services/chunk';
18
- import { AsyncTaskError, AsyncTaskErrorType, AsyncTaskStatus } from '@/types/asyncTask';
18
+ import {
19
+ AsyncTaskError,
20
+ AsyncTaskErrorType,
21
+ AsyncTaskStatus,
22
+ IAsyncTaskError,
23
+ } from '@/types/asyncTask';
19
24
  import { safeParseJSON } from '@/utils/safeParseJSON';
20
25
 
21
26
  const fileProcedure = asyncAuthedProcedure.use(async (opts) => {
@@ -55,10 +60,12 @@ export const fileRouter = router({
55
60
  try {
56
61
  const timeoutPromise = new Promise((_, reject) => {
57
62
  setTimeout(() => {
58
- reject({
59
- body: { detail: 'embedding task is timeout, please try again' },
60
- name: AsyncTaskErrorType.Timeout,
61
- } as AsyncTaskError);
63
+ reject(
64
+ new AsyncTaskError(
65
+ AsyncTaskErrorType.Timeout,
66
+ 'embedding task is timeout, please try again',
67
+ ),
68
+ );
62
69
  }, ASYNC_TASK_TIMEOUT);
63
70
  });
64
71
 
@@ -76,40 +83,47 @@ export const fileRouter = router({
76
83
  const chunks = await ctx.chunkModel.getChunksTextByFileId(input.fileId);
77
84
  const requestArray = chunk(chunks, CHUNK_SIZE);
78
85
 
79
- await pMap(
80
- requestArray,
81
- async (chunks, index) => {
82
- const agentRuntime = await initAgentRuntimeWithUserPayload(
83
- ModelProvider.OpenAI,
84
- ctx.jwtPayload,
85
- );
86
-
87
- const number = index + 1;
88
- console.log(`执行第 ${number} 个任务`);
89
-
90
- console.time(`任务[${number}]: embeddings`);
91
-
92
- const embeddings = await agentRuntime.embeddings({
93
- dimensions: 1024,
94
- input: chunks.map((c) => c.text),
95
- model: input.model,
96
- });
97
- console.timeEnd(`任务[${number}]: embeddings`);
98
-
99
- const items: NewEmbeddingsItem[] =
100
- embeddings?.map((e) => ({
101
- chunkId: chunks[e.index].id,
102
- embeddings: e.embedding,
103
- fileId: input.fileId,
104
- model: input.model,
105
- })) || [];
86
+ try {
87
+ await pMap(
88
+ requestArray,
89
+ async (chunks, index) => {
90
+ const agentRuntime = await initAgentRuntimeWithUserPayload(
91
+ ModelProvider.OpenAI,
92
+ ctx.jwtPayload,
93
+ );
106
94
 
107
- console.time(`任务[${number}]: insert db`);
108
- await ctx.embeddingModel.bulkCreate(items);
109
- console.timeEnd(`任务[${number}]: insert db`);
110
- },
111
- { concurrency: CONCURRENCY },
112
- );
95
+ const number = index + 1;
96
+ console.log(`执行第 ${number} 个任务`);
97
+
98
+ console.time(`任务[${number}]: embeddings`);
99
+
100
+ const embeddings = await agentRuntime.embeddings({
101
+ dimensions: 1024,
102
+ input: chunks.map((c) => c.text),
103
+ model: input.model,
104
+ });
105
+ console.timeEnd(`任务[${number}]: embeddings`);
106
+
107
+ const items: NewEmbeddingsItem[] =
108
+ embeddings?.map((e) => ({
109
+ chunkId: chunks[e.index].id,
110
+ embeddings: e.embedding,
111
+ fileId: input.fileId,
112
+ model: input.model,
113
+ })) || [];
114
+
115
+ console.time(`任务[${number}]: insert db`);
116
+ await ctx.embeddingModel.bulkCreate(items);
117
+ console.timeEnd(`任务[${number}]: insert db`);
118
+ },
119
+ { concurrency: CONCURRENCY },
120
+ );
121
+ } catch (e) {
122
+ throw {
123
+ message: JSON.stringify(e),
124
+ name: AsyncTaskErrorType.EmbeddingError,
125
+ };
126
+ }
113
127
 
114
128
  const duration = Date.now() - startAt;
115
129
  // update the task status to success
@@ -125,8 +139,9 @@ export const fileRouter = router({
125
139
  return await Promise.race([embeddingPromise(), timeoutPromise]);
126
140
  } catch (e) {
127
141
  console.error('embeddingChunks error', e);
142
+
128
143
  await ctx.asyncTaskModel.update(input.taskId, {
129
- error: e,
144
+ error: new AsyncTaskError((e as Error).name, (e as Error).message),
130
145
  status: AsyncTaskStatus.Error,
131
146
  });
132
147
 
@@ -175,10 +190,12 @@ export const fileRouter = router({
175
190
 
176
191
  const timeoutPromise = new Promise((_, reject) => {
177
192
  setTimeout(() => {
178
- reject({
179
- body: { detail: 'chunking task is timeout, please try again' },
180
- name: AsyncTaskErrorType.Timeout,
181
- } as AsyncTaskError);
193
+ reject(
194
+ new AsyncTaskError(
195
+ AsyncTaskErrorType.Timeout,
196
+ 'chunking task is timeout, please try again',
197
+ ),
198
+ );
182
199
  }, ASYNC_TASK_TIMEOUT);
183
200
  });
184
201
 
@@ -201,6 +218,15 @@ export const fileRouter = router({
201
218
 
202
219
  const duration = Date.now() - startAt;
203
220
 
221
+ // if no chunk found, throw error
222
+ if (chunks.length === 0) {
223
+ throw {
224
+ message:
225
+ 'No chunk found in this file. it may due to current chunking method can not parse file accurately',
226
+ name: AsyncTaskErrorType.NoChunkError,
227
+ };
228
+ }
229
+
204
230
  await ctx.chunkModel.bulkCreate(chunks, input.fileId);
205
231
 
206
232
  if (chunkResult.unstructuredChunks) {
@@ -228,9 +254,9 @@ export const fileRouter = router({
228
254
  } catch (e) {
229
255
  const error = e as any;
230
256
 
231
- const asyncTaskError: AsyncTaskError = error.body
232
- ? { body: safeParseJSON(error.body) ?? error.body, name: error.name }
233
- : { body: { detail: error.message }, name: (error as Error).name };
257
+ const asyncTaskError = error.body
258
+ ? ({ body: safeParseJSON(error.body) ?? error.body, name: error.name } as IAsyncTaskError)
259
+ : new AsyncTaskError((error as Error).name, error.message);
234
260
 
235
261
  console.error('[Chunking Error]', asyncTaskError);
236
262
  await ctx.asyncTaskModel.update(input.taskId, {
@@ -3,7 +3,12 @@ import { AsyncTaskModel } from '@/database/server/models/asyncTask';
3
3
  import { FileModel } from '@/database/server/models/file';
4
4
  import { ChunkContentParams, ContentChunk } from '@/server/modules/ContentChunk';
5
5
  import { createAsyncServerClient } from '@/server/routers/async';
6
- import { AsyncTaskStatus, AsyncTaskType } from '@/types/asyncTask';
6
+ import {
7
+ AsyncTaskError,
8
+ AsyncTaskErrorType,
9
+ AsyncTaskStatus,
10
+ AsyncTaskType,
11
+ } from '@/types/asyncTask';
7
12
 
8
13
  export class ChunkService {
9
14
  private userId: string;
@@ -40,7 +45,19 @@ export class ChunkService {
40
45
  const asyncCaller = await createAsyncServerClient(this.userId, payload);
41
46
 
42
47
  // trigger embedding task asynchronously
43
- await asyncCaller.file.embeddingChunks.mutate({ fileId, taskId: asyncTaskId });
48
+ try {
49
+ await asyncCaller.file.embeddingChunks.mutate({ fileId, taskId: asyncTaskId });
50
+ } catch (e) {
51
+ console.error('[embeddingFileChunks] error:', e);
52
+
53
+ await this.asyncTaskModel.update(asyncTaskId, {
54
+ error: new AsyncTaskError(
55
+ AsyncTaskErrorType.TaskTriggerError,
56
+ 'trigger chunk embedding async task error. Please check your app is public available or check your proxy settings is set correctly.',
57
+ ),
58
+ status: AsyncTaskStatus.Error,
59
+ });
60
+ }
44
61
 
45
62
  return asyncTaskId;
46
63
  }
@@ -67,7 +84,19 @@ export class ChunkService {
67
84
  const asyncCaller = await createAsyncServerClient(this.userId, payload);
68
85
 
69
86
  // trigger parse file task asynchronously
70
- asyncCaller.file.parseFileToChunks.mutate({ fileId: fileId, taskId: asyncTaskId });
87
+ asyncCaller.file.parseFileToChunks
88
+ .mutate({ fileId: fileId, taskId: asyncTaskId })
89
+ .catch(async (e) => {
90
+ console.error('[ParseFileToChunks] error:', e);
91
+
92
+ await this.asyncTaskModel.update(asyncTaskId, {
93
+ error: new AsyncTaskError(
94
+ AsyncTaskErrorType.TaskTriggerError,
95
+ 'trigger file parse async task error. Please check your app is public available or check your proxy settings is set correctly.',
96
+ ),
97
+ status: AsyncTaskStatus.Error,
98
+ });
99
+ });
71
100
 
72
101
  return asyncTaskId;
73
102
  }
@@ -11,21 +11,40 @@ export enum AsyncTaskStatus {
11
11
  }
12
12
 
13
13
  export enum AsyncTaskErrorType {
14
- SDKError = 'SDKError',
14
+ EmbeddingError = 'EmbeddingError',
15
+ /**
16
+ * the chunk parse result it empty
17
+ */
18
+ NoChunkError = 'NoChunkError',
15
19
  ServerError = 'ServerError',
20
+ /**
21
+ * this happens when the task is not trigger successfully
22
+ */
23
+ TaskTriggerError = 'TaskTriggerError',
16
24
  Timeout = 'TaskTimeout',
17
25
  }
18
26
 
19
- export interface AsyncTaskError {
27
+ export interface IAsyncTaskError {
20
28
  body: string | { detail: string };
21
29
  name: string;
22
30
  }
23
31
 
32
+ export class AsyncTaskError implements IAsyncTaskError {
33
+ constructor(name: string, message: string) {
34
+ this.name = name;
35
+ this.body = { detail: message };
36
+ }
37
+
38
+ name: string;
39
+
40
+ body: { detail: string };
41
+ }
42
+
24
43
  export interface FileParsingTask {
25
44
  chunkCount?: number | null;
26
- chunkingError?: AsyncTaskError | null;
45
+ chunkingError?: IAsyncTaskError | null;
27
46
  chunkingStatus?: AsyncTaskStatus | null;
28
- embeddingError?: AsyncTaskError | null;
47
+ embeddingError?: IAsyncTaskError | null;
29
48
  embeddingStatus?: AsyncTaskStatus | null;
30
49
  finishEmbedding?: boolean;
31
50
  }
@@ -1,114 +0,0 @@
1
- import {
2
- Adobe,
3
- Ai21,
4
- Ai360,
5
- AiMass,
6
- Aws,
7
- Aya,
8
- Azure,
9
- Baichuan,
10
- ByteDance,
11
- ChatGLM,
12
- Claude,
13
- CodeGeeX,
14
- Cohere,
15
- Dbrx,
16
- DeepSeek,
17
- FishAudio,
18
- Gemini,
19
- Gemma,
20
- Hunyuan,
21
- LLaVA,
22
- Meta,
23
- Minimax,
24
- Mistral,
25
- Moonshot,
26
- OpenAI,
27
- OpenChat,
28
- OpenRouter,
29
- Perplexity,
30
- Rwkv,
31
- Spark,
32
- Stability,
33
- Stepfun,
34
- Tongyi,
35
- Wenxin,
36
- Yi,
37
- } from '@lobehub/icons';
38
- import { memo } from 'react';
39
-
40
- interface ModelProviderIconProps {
41
- model?: string;
42
- size?: number;
43
- }
44
-
45
- const ModelIcon = memo<ModelProviderIconProps>(({ model: originModel, size = 12 }) => {
46
- if (!originModel) return;
47
-
48
- // lower case the origin model so to better match more model id case
49
- const model = originModel.toLowerCase();
50
-
51
- // currently supported models, maybe not in its own provider
52
- if (model.includes('text-embedding-')) return <OpenAI.Avatar size={size} />;
53
- if (model.includes('gpt-3')) return <OpenAI.Avatar size={size} type={'gpt3'} />;
54
- if (model.includes('gpt-4')) return <OpenAI.Avatar size={size} type={'gpt4'} />;
55
- if (model.includes('glm-') || model.includes('chatglm')) return <ChatGLM.Avatar size={size} />;
56
- if (model.startsWith('codegeex')) return <CodeGeeX.Avatar size={size} />;
57
- if (model.includes('deepseek')) return <DeepSeek.Avatar size={size} />;
58
- if (model.includes('claude')) return <Claude.Avatar size={size} />;
59
- if (model.includes('titan')) return <Aws.Avatar size={size} />;
60
- if (model.includes('llama')) return <Meta.Avatar size={size} />;
61
- if (model.includes('llava')) return <LLaVA.Avatar size={size} />;
62
- if (model.includes('gemini')) return <Gemini.Avatar size={size} />;
63
- if (model.includes('gemma')) return <Gemma.Avatar size={size} />;
64
- if (model.includes('moonshot')) return <Moonshot.Avatar size={size} />;
65
- if (model.includes('qwen')) return <Tongyi.Avatar background={Tongyi.colorPrimary} size={size} />;
66
- if (model.includes('minmax') || model.includes('abab')) return <Minimax.Avatar size={size} />;
67
- if (
68
- model.includes('mistral') ||
69
- model.includes('mixtral') ||
70
- model.includes('codestral') ||
71
- model.includes('mathstral')
72
- )
73
- return <Mistral.Avatar size={size} />;
74
- if (model.includes('pplx') || model.includes('sonar')) return <Perplexity.Avatar size={size} />;
75
- if (model.includes('yi-')) return <Yi.Avatar size={size} />;
76
- if (model.startsWith('openrouter')) return <OpenRouter.Avatar size={size} />; // only for Cinematika and Auto
77
- if (model.startsWith('openchat')) return <OpenChat.Avatar size={size} />;
78
- if (model.includes('aya')) return <Aya.Avatar size={size} />;
79
- if (model.includes('command')) return <Cohere.Avatar size={size} />;
80
- if (model.includes('dbrx')) return <Dbrx.Avatar size={size} />;
81
- if (model.includes('step')) return <Stepfun.Avatar size={size} />;
82
- if (model.includes('taichu')) return <AiMass.Avatar size={size} />;
83
- if (model.includes('360gpt')) return <Ai360.Avatar size={size} />;
84
-
85
- // below: To be supported in providers, move up if supported
86
- if (model.includes('baichuan'))
87
- return <Baichuan.Avatar background={Baichuan.colorPrimary} size={size} />;
88
- if (model.includes('rwkv')) return <Rwkv.Avatar size={size} />;
89
- if (model.includes('ernie')) return <Wenxin.Avatar size={size} />;
90
- if (model.includes('spark')) return <Spark.Avatar size={size} />;
91
- if (model.includes('hunyuan')) return <Hunyuan.Avatar size={size} />;
92
- // ref https://github.com/fishaudio/Bert-VITS2/blob/master/train_ms.py#L702
93
- if (model.startsWith('d_') || model.startsWith('g_') || model.startsWith('wd_'))
94
- return <FishAudio.Avatar size={size} />;
95
- if (model.includes('skylark')) return <ByteDance.Avatar size={size} />;
96
-
97
- if (
98
- model.includes('stable-diffusion') ||
99
- model.includes('stable-video') ||
100
- model.includes('stable-cascade') ||
101
- model.includes('sdxl') ||
102
- model.includes('stablelm') ||
103
- model.startsWith('stable-') ||
104
- model.startsWith('sd3')
105
- )
106
- return <Stability.Avatar size={size} />;
107
-
108
- if (model.includes('phi3') || model.includes('phi-3') || model.includes('wizardlm'))
109
- return <Azure.Avatar size={size} />;
110
- if (model.includes('firefly')) return <Adobe.Avatar size={size} />;
111
- if (model.includes('jamba') || model.includes('j2-')) return <Ai21.Avatar size={size} />;
112
- });
113
-
114
- export default ModelIcon;
@@ -1,148 +0,0 @@
1
- import {
2
- Ai360,
3
- AiMass,
4
- Anthropic,
5
- Azure,
6
- Baichuan,
7
- Bedrock,
8
- DeepSeek,
9
- Google,
10
- Groq,
11
- LobeHub,
12
- Minimax,
13
- Mistral,
14
- Moonshot,
15
- Novita,
16
- Ollama,
17
- OpenAI,
18
- OpenRouter,
19
- Perplexity,
20
- SiliconCloud,
21
- Stepfun,
22
- Together,
23
- Tongyi,
24
- ZeroOne,
25
- Zhipu,
26
- } from '@lobehub/icons';
27
- import { memo } from 'react';
28
- import { Center } from 'react-layout-kit';
29
-
30
- import { ModelProvider } from '@/libs/agent-runtime';
31
-
32
- interface ModelProviderIconProps {
33
- provider?: string;
34
- }
35
-
36
- const ModelProviderIcon = memo<ModelProviderIconProps>(({ provider }) => {
37
- switch (provider) {
38
- case 'lobehub': {
39
- return <LobeHub.Color size={20} />;
40
- }
41
-
42
- case ModelProvider.ZhiPu: {
43
- return <Zhipu size={20} />;
44
- }
45
-
46
- case ModelProvider.Bedrock: {
47
- return <Bedrock size={20} />;
48
- }
49
-
50
- case ModelProvider.DeepSeek: {
51
- return <DeepSeek size={20} />;
52
- }
53
-
54
- case ModelProvider.Google: {
55
- return (
56
- <Center height={20} width={20}>
57
- <Google size={14} />
58
- </Center>
59
- );
60
- }
61
-
62
- case ModelProvider.Azure: {
63
- return (
64
- <Center height={20} width={20}>
65
- <Azure size={14} />
66
- </Center>
67
- );
68
- }
69
-
70
- case ModelProvider.Moonshot: {
71
- return <Moonshot size={20} />;
72
- }
73
-
74
- case ModelProvider.OpenAI: {
75
- return <OpenAI size={20} />;
76
- }
77
-
78
- case ModelProvider.Ollama: {
79
- return <Ollama size={20} />;
80
- }
81
-
82
- case ModelProvider.Perplexity: {
83
- return <Perplexity size={20} />;
84
- }
85
-
86
- case ModelProvider.Minimax: {
87
- return <Minimax size={20} />;
88
- }
89
-
90
- case ModelProvider.Mistral: {
91
- return <Mistral size={20} />;
92
- }
93
-
94
- case ModelProvider.Anthropic: {
95
- return <Anthropic size={20} />;
96
- }
97
-
98
- case ModelProvider.Groq: {
99
- return <Groq size={20} />;
100
- }
101
-
102
- case ModelProvider.OpenRouter: {
103
- return <OpenRouter size={20} />;
104
- }
105
-
106
- case ModelProvider.ZeroOne: {
107
- return <ZeroOne size={20} />;
108
- }
109
-
110
- case ModelProvider.TogetherAI: {
111
- return <Together size={20} />;
112
- }
113
-
114
- case ModelProvider.Qwen: {
115
- return <Tongyi size={20} />;
116
- }
117
-
118
- case ModelProvider.Stepfun: {
119
- return <Stepfun size={20} />;
120
- }
121
-
122
- case ModelProvider.Novita: {
123
- return <Novita size={20} />;
124
- }
125
-
126
- case ModelProvider.Baichuan: {
127
- return <Baichuan size={20} />;
128
- }
129
-
130
- case ModelProvider.Taichu: {
131
- return <AiMass size={20} />;
132
- }
133
-
134
- case ModelProvider.Ai360: {
135
- return <Ai360 size={20} />;
136
- }
137
-
138
- case ModelProvider.SiliconCloud: {
139
- return <SiliconCloud size={20} />;
140
- }
141
-
142
- default: {
143
- return null;
144
- }
145
- }
146
- });
147
-
148
- export default ModelProviderIcon;
@@ -1,105 +0,0 @@
1
- import {
2
- AdobeFirefly,
3
- Ai21,
4
- Ai360,
5
- AiMass,
6
- Aws,
7
- Aya,
8
- Azure,
9
- Baichuan,
10
- ByteDance,
11
- ChatGLM,
12
- Claude,
13
- CodeGeeX,
14
- Cohere,
15
- Dbrx,
16
- DeepSeek,
17
- FishAudio,
18
- Gemini,
19
- Gemma,
20
- Hunyuan,
21
- LLaVA,
22
- Meta,
23
- Minimax,
24
- Mistral,
25
- Moonshot,
26
- OpenAI,
27
- OpenChat,
28
- OpenRouter,
29
- Perplexity,
30
- Rwkv,
31
- Spark,
32
- Stability,
33
- Stepfun,
34
- Tongyi,
35
- Wenxin,
36
- Yi,
37
- } from '@lobehub/icons';
38
- import { memo } from 'react';
39
-
40
- interface ModelIconProps {
41
- model?: string;
42
- size?: number;
43
- }
44
-
45
- const ModelIcon = memo<ModelIconProps>(({ model: originModel, size = 12 }) => {
46
- if (!originModel) return;
47
-
48
- // lower case the origin model so to better match more model id case
49
- const model = originModel.toLowerCase();
50
-
51
- // currently supported models, maybe not in its own provider
52
- if (model.startsWith('gpt')) return <OpenAI size={size} />;
53
- if (model.startsWith('glm') || model.includes('chatglm')) return <ChatGLM size={size} />;
54
- if (model.includes('codegeex')) return <CodeGeeX size={size} />;
55
- if (model.includes('claude')) return <Claude size={size} />;
56
- if (model.includes('deepseek')) return <DeepSeek size={size} />;
57
- if (model.includes('titan')) return <Aws size={size} />;
58
- if (model.includes('llama')) return <Meta size={size} />;
59
- if (model.includes('llava')) return <LLaVA size={size} />;
60
- if (model.includes('gemini')) return <Gemini size={size} />;
61
- if (model.includes('gemma')) return <Gemma.Simple size={size} />;
62
- if (model.includes('moonshot')) return <Moonshot size={size} />;
63
- if (model.includes('qwen')) return <Tongyi size={size} />;
64
- if (model.includes('minmax')) return <Minimax size={size} />;
65
- if (model.includes('abab')) return <Minimax size={size} />;
66
- if (model.includes('mistral') || model.includes('mixtral') || model.includes('codestral') || model.includes('mathstral')) return <Mistral size={size} />;
67
- if (model.includes('pplx') || model.includes('sonar')) return <Perplexity size={size} />;
68
- if (model.includes('yi-')) return <Yi size={size} />;
69
- if (model.startsWith('openrouter')) return <OpenRouter size={size} />; // only for Cinematika and Auto
70
- if (model.startsWith('openchat')) return <OpenChat size={size} />;
71
- if (model.includes('aya')) return <Aya.Avatar size={size} />;
72
- if (model.includes('command')) return <Cohere size={size} />;
73
- if (model.includes('dbrx')) return <Dbrx size={size} />;
74
- if (model.includes('step')) return <Stepfun size={size} />;
75
- if (model.includes('taichu')) return <AiMass size={size} />;
76
- if (model.includes('360gpt')) return <Ai360 size={size} />;
77
-
78
- // below: To be supported in providers, move up if supported
79
- if (model.includes('baichuan')) return <Baichuan size={size} />;
80
- if (model.includes('rwkv')) return <Rwkv size={size} />;
81
- if (model.includes('ernie')) return <Wenxin size={size} />;
82
- if (model.includes('spark')) return <Spark size={size} />;
83
- if (model.includes('hunyuan')) return <Hunyuan size={size} />;
84
- // ref https://github.com/fishaudio/Bert-VITS2/blob/master/train_ms.py#L702
85
- if (model.startsWith('d_') || model.startsWith('g_') || model.startsWith('wd_'))
86
- return <FishAudio size={size} />;
87
- if (model.includes('skylark')) return <ByteDance size={size} />;
88
-
89
- if (
90
- model.includes('stable-diffusion') ||
91
- model.includes('stable-video') ||
92
- model.includes('stable-cascade') ||
93
- model.includes('sdxl') ||
94
- model.includes('stablelm') ||
95
- model.startsWith('stable-') ||
96
- model.startsWith('sd3')
97
- )
98
- return <Stability size={size} />;
99
-
100
- if (model.includes('phi3') || model.includes('phi-3') || model.includes('wizardlm')) return <Azure size={size} />;
101
- if (model.includes('firefly')) return <AdobeFirefly size={size} />;
102
- if (model.includes('jamba') || model.includes('j2-')) return <Ai21 size={size} />;
103
- });
104
-
105
- export default ModelIcon;
@@ -1,13 +0,0 @@
1
- import { Tag } from '@lobehub/ui';
2
- import { memo } from 'react';
3
-
4
- import ModelIcon from './ModelIcon';
5
-
6
- interface ModelTagProps {
7
- model: string;
8
- }
9
- const ModelTag = memo<ModelTagProps>(({ model }) => (
10
- <Tag icon={<ModelIcon model={model} />}>{model}</Tag>
11
- ));
12
-
13
- export default ModelTag;
@@ -1,16 +0,0 @@
1
- import { SelectProps } from 'antd';
2
-
3
- export const ttsOptions: SelectProps['options'] = [
4
- {
5
- label: 'OpenAI',
6
- value: 'openai',
7
- },
8
- {
9
- label: 'Edge Speech',
10
- value: 'edge',
11
- },
12
- {
13
- label: 'Microsoft Speech',
14
- value: 'microsoft',
15
- },
16
- ];