@lobehub/chat 1.21.16 → 1.22.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (108) hide show
  1. package/CHANGELOG.md +33 -0
  2. package/README.zh-CN.md +8 -6
  3. package/docs/usage/providers/wenxin.mdx +4 -3
  4. package/docs/usage/providers/wenxin.zh-CN.mdx +4 -3
  5. package/locales/ar/error.json +1 -0
  6. package/locales/ar/modelProvider.json +7 -0
  7. package/locales/ar/models.json +18 -6
  8. package/locales/ar/providers.json +3 -0
  9. package/locales/bg-BG/error.json +1 -0
  10. package/locales/bg-BG/modelProvider.json +7 -0
  11. package/locales/bg-BG/models.json +18 -6
  12. package/locales/bg-BG/providers.json +3 -0
  13. package/locales/de-DE/error.json +1 -0
  14. package/locales/de-DE/modelProvider.json +7 -0
  15. package/locales/de-DE/models.json +18 -6
  16. package/locales/de-DE/providers.json +3 -0
  17. package/locales/en-US/error.json +1 -0
  18. package/locales/en-US/modelProvider.json +7 -0
  19. package/locales/en-US/models.json +18 -6
  20. package/locales/en-US/providers.json +3 -0
  21. package/locales/es-ES/error.json +1 -0
  22. package/locales/es-ES/modelProvider.json +7 -0
  23. package/locales/es-ES/models.json +18 -6
  24. package/locales/es-ES/providers.json +3 -0
  25. package/locales/fr-FR/error.json +1 -0
  26. package/locales/fr-FR/modelProvider.json +7 -0
  27. package/locales/fr-FR/models.json +17 -5
  28. package/locales/fr-FR/providers.json +3 -0
  29. package/locales/it-IT/error.json +1 -0
  30. package/locales/it-IT/modelProvider.json +7 -0
  31. package/locales/it-IT/models.json +18 -6
  32. package/locales/it-IT/providers.json +3 -0
  33. package/locales/ja-JP/error.json +1 -0
  34. package/locales/ja-JP/modelProvider.json +7 -0
  35. package/locales/ja-JP/models.json +18 -6
  36. package/locales/ja-JP/providers.json +3 -0
  37. package/locales/ko-KR/error.json +1 -0
  38. package/locales/ko-KR/modelProvider.json +7 -0
  39. package/locales/ko-KR/models.json +17 -5
  40. package/locales/ko-KR/providers.json +3 -0
  41. package/locales/nl-NL/error.json +1 -0
  42. package/locales/nl-NL/modelProvider.json +7 -0
  43. package/locales/nl-NL/models.json +17 -5
  44. package/locales/nl-NL/providers.json +3 -0
  45. package/locales/pl-PL/error.json +1 -0
  46. package/locales/pl-PL/modelProvider.json +7 -0
  47. package/locales/pl-PL/models.json +18 -6
  48. package/locales/pl-PL/providers.json +3 -0
  49. package/locales/pt-BR/error.json +1 -0
  50. package/locales/pt-BR/modelProvider.json +7 -0
  51. package/locales/pt-BR/models.json +18 -6
  52. package/locales/pt-BR/providers.json +3 -0
  53. package/locales/ru-RU/error.json +1 -0
  54. package/locales/ru-RU/modelProvider.json +7 -0
  55. package/locales/ru-RU/models.json +18 -6
  56. package/locales/ru-RU/providers.json +3 -0
  57. package/locales/tr-TR/error.json +1 -0
  58. package/locales/tr-TR/modelProvider.json +7 -0
  59. package/locales/tr-TR/models.json +18 -6
  60. package/locales/tr-TR/providers.json +3 -0
  61. package/locales/vi-VN/error.json +1 -0
  62. package/locales/vi-VN/modelProvider.json +7 -0
  63. package/locales/vi-VN/models.json +18 -6
  64. package/locales/vi-VN/providers.json +3 -0
  65. package/locales/zh-CN/error.json +2 -1
  66. package/locales/zh-CN/modelProvider.json +8 -1
  67. package/locales/zh-CN/models.json +16 -4
  68. package/locales/zh-CN/providers.json +3 -0
  69. package/locales/zh-TW/error.json +1 -0
  70. package/locales/zh-TW/modelProvider.json +7 -0
  71. package/locales/zh-TW/models.json +16 -4
  72. package/locales/zh-TW/providers.json +3 -0
  73. package/package.json +5 -3
  74. package/src/app/(main)/settings/llm/ProviderList/HuggingFace/index.tsx +53 -0
  75. package/src/app/(main)/settings/llm/ProviderList/providers.tsx +12 -1
  76. package/src/config/llm.ts +10 -0
  77. package/src/config/modelProviders/huggingface.ts +50 -0
  78. package/src/config/modelProviders/index.ts +4 -0
  79. package/src/const/settings/llm.ts +5 -0
  80. package/src/features/Conversation/Error/index.tsx +1 -0
  81. package/src/libs/agent-runtime/AgentRuntime.ts +7 -0
  82. package/src/libs/agent-runtime/error.ts +1 -0
  83. package/src/libs/agent-runtime/groq/index.ts +1 -1
  84. package/src/libs/agent-runtime/huggingface/index.ts +48 -0
  85. package/src/libs/agent-runtime/types/type.ts +1 -0
  86. package/src/libs/agent-runtime/utils/openaiCompatibleFactory/index.ts +58 -20
  87. package/src/libs/agent-runtime/utils/streams/openai.test.ts +78 -7
  88. package/src/libs/agent-runtime/utils/streams/openai.ts +38 -5
  89. package/src/libs/agent-runtime/utils/streams/protocol.ts +63 -4
  90. package/src/locales/default/error.ts +2 -2
  91. package/src/locales/default/modelProvider.ts +8 -1
  92. package/src/server/globalConfig/index.ts +12 -1
  93. package/src/server/modules/AgentRuntime/index.ts +10 -0
  94. package/src/services/_url.ts +4 -5
  95. package/src/types/user/settings/keyVaults.ts +1 -0
  96. /package/src/app/(backend)/{api → webapi}/chat/[provider]/route.test.ts +0 -0
  97. /package/src/app/(backend)/{api → webapi}/chat/[provider]/route.ts +0 -0
  98. /package/src/app/(backend)/{api → webapi}/chat/anthropic/route.test.ts +0 -0
  99. /package/src/app/(backend)/{api → webapi}/chat/anthropic/route.ts +0 -0
  100. /package/src/app/(backend)/{api → webapi}/chat/google/route.test.ts +0 -0
  101. /package/src/app/(backend)/{api → webapi}/chat/google/route.ts +0 -0
  102. /package/src/app/(backend)/{api → webapi}/chat/minimax/route.test.ts +0 -0
  103. /package/src/app/(backend)/{api → webapi}/chat/minimax/route.ts +0 -0
  104. /package/src/app/(backend)/{api → webapi}/chat/models/[provider]/route.ts +0 -0
  105. /package/src/app/(backend)/{api → webapi}/chat/openai/route.test.ts +0 -0
  106. /package/src/app/(backend)/{api → webapi}/chat/openai/route.ts +0 -0
  107. /package/src/app/(backend)/{api → webapi}/chat/wenxin/route.test.ts +0 -0
  108. /package/src/app/(backend)/{api → webapi}/chat/wenxin/route.ts +0 -0
@@ -1,6 +1,9 @@
1
1
  import { describe, expect, it, vi } from 'vitest';
2
2
 
3
+ import { AgentRuntimeErrorType } from '@/libs/agent-runtime';
4
+
3
5
  import { OpenAIStream } from './openai';
6
+ import { FIRST_CHUNK_ERROR_KEY } from './protocol';
4
7
 
5
8
  describe('OpenAIStream', () => {
6
9
  it('should transform OpenAI stream to protocol stream', async () => {
@@ -45,10 +48,12 @@ describe('OpenAIStream', () => {
45
48
  const onCompletionMock = vi.fn();
46
49
 
47
50
  const protocolStream = OpenAIStream(mockOpenAIStream, {
48
- onStart: onStartMock,
49
- onText: onTextMock,
50
- onToken: onTokenMock,
51
- onCompletion: onCompletionMock,
51
+ callbacks: {
52
+ onStart: onStartMock,
53
+ onText: onTextMock,
54
+ onToken: onTokenMock,
55
+ onCompletion: onCompletionMock,
56
+ },
52
57
  });
53
58
 
54
59
  const decoder = new TextDecoder();
@@ -189,7 +194,9 @@ describe('OpenAIStream', () => {
189
194
  const onToolCallMock = vi.fn();
190
195
 
191
196
  const protocolStream = OpenAIStream(mockOpenAIStream, {
192
- onToolCall: onToolCallMock,
197
+ callbacks: {
198
+ onToolCall: onToolCallMock,
199
+ },
193
200
  });
194
201
 
195
202
  const decoder = new TextDecoder();
@@ -281,6 +288,66 @@ describe('OpenAIStream', () => {
281
288
  );
282
289
  });
283
290
 
291
+ it('should handle FIRST_CHUNK_ERROR_KEY', async () => {
292
+ const mockOpenAIStream = new ReadableStream({
293
+ start(controller) {
294
+ controller.enqueue({
295
+ [FIRST_CHUNK_ERROR_KEY]: true,
296
+ errorType: AgentRuntimeErrorType.ProviderBizError,
297
+ message: 'Test error',
298
+ });
299
+ controller.close();
300
+ },
301
+ });
302
+
303
+ const protocolStream = OpenAIStream(mockOpenAIStream);
304
+
305
+ const decoder = new TextDecoder();
306
+ const chunks = [];
307
+
308
+ // @ts-ignore
309
+ for await (const chunk of protocolStream) {
310
+ chunks.push(decoder.decode(chunk, { stream: true }));
311
+ }
312
+
313
+ expect(chunks).toEqual([
314
+ 'id: first_chunk_error\n',
315
+ 'event: error\n',
316
+ `data: {"body":{"errorType":"ProviderBizError","message":"Test error"},"type":"ProviderBizError"}\n\n`,
317
+ ]);
318
+ });
319
+
320
+ it('should use bizErrorTypeTransformer', async () => {
321
+ const mockOpenAIStream = new ReadableStream({
322
+ start(controller) {
323
+ controller.enqueue(
324
+ '%FIRST_CHUNK_ERROR%: ' +
325
+ JSON.stringify({ message: 'Custom error', name: 'CustomError' }),
326
+ );
327
+ controller.close();
328
+ },
329
+ });
330
+
331
+ const protocolStream = OpenAIStream(mockOpenAIStream, {
332
+ bizErrorTypeTransformer: () => AgentRuntimeErrorType.PermissionDenied,
333
+ provider: 'grok',
334
+ });
335
+
336
+ const decoder = new TextDecoder();
337
+ const chunks = [];
338
+
339
+ // @ts-ignore
340
+ for await (const chunk of protocolStream) {
341
+ chunks.push(decoder.decode(chunk, { stream: true }));
342
+ }
343
+
344
+ expect(chunks).toEqual([
345
+ 'id: first_chunk_error\n',
346
+ 'event: error\n',
347
+ `data: {"body":{"message":"Custom error","errorType":"PermissionDenied","provider":"grok"},"type":"PermissionDenied"}\n\n`,
348
+ ]);
349
+ });
350
+
284
351
  describe('Tools Calling', () => {
285
352
  it('should handle OpenAI official tool calls', async () => {
286
353
  const mockOpenAIStream = new ReadableStream({
@@ -316,7 +383,9 @@ describe('OpenAIStream', () => {
316
383
  const onToolCallMock = vi.fn();
317
384
 
318
385
  const protocolStream = OpenAIStream(mockOpenAIStream, {
319
- onToolCall: onToolCallMock,
386
+ callbacks: {
387
+ onToolCall: onToolCallMock,
388
+ },
320
389
  });
321
390
 
322
391
  const decoder = new TextDecoder();
@@ -447,7 +516,9 @@ describe('OpenAIStream', () => {
447
516
  const onToolCallMock = vi.fn();
448
517
 
449
518
  const protocolStream = OpenAIStream(mockOpenAIStream, {
450
- onToolCall: onToolCallMock,
519
+ callbacks: {
520
+ onToolCall: onToolCallMock,
521
+ },
451
522
  });
452
523
 
453
524
  const decoder = new TextDecoder();
@@ -3,14 +3,17 @@ import type { Stream } from 'openai/streaming';
3
3
 
4
4
  import { ChatMessageError } from '@/types/message';
5
5
 
6
+ import { AgentRuntimeErrorType, ILobeAgentRuntimeErrorType } from '../../error';
6
7
  import { ChatStreamCallbacks } from '../../types';
7
8
  import {
9
+ FIRST_CHUNK_ERROR_KEY,
8
10
  StreamProtocolChunk,
9
11
  StreamProtocolToolCallChunk,
10
12
  StreamStack,
11
13
  StreamToolCallChunkData,
12
14
  convertIterableToStream,
13
15
  createCallbacksTransformer,
16
+ createFirstErrorHandleTransformer,
14
17
  createSSEProtocolTransformer,
15
18
  generateToolCallId,
16
19
  } from './protocol';
@@ -19,6 +22,21 @@ export const transformOpenAIStream = (
19
22
  chunk: OpenAI.ChatCompletionChunk,
20
23
  stack?: StreamStack,
21
24
  ): StreamProtocolChunk => {
25
+ // handle the first chunk error
26
+ if (FIRST_CHUNK_ERROR_KEY in chunk) {
27
+ delete chunk[FIRST_CHUNK_ERROR_KEY];
28
+ // @ts-ignore
29
+ delete chunk['name'];
30
+ // @ts-ignore
31
+ delete chunk['stack'];
32
+
33
+ const errorData = {
34
+ body: chunk,
35
+ type: 'errorType' in chunk ? chunk.errorType : AgentRuntimeErrorType.ProviderBizError,
36
+ } as ChatMessageError;
37
+ return { data: errorData, id: 'first_chunk_error', type: 'error' };
38
+ }
39
+
22
40
  // maybe need another structure to add support for multiple choices
23
41
 
24
42
  try {
@@ -97,7 +115,7 @@ export const transformOpenAIStream = (
97
115
  'chat response streaming chunk parse error, please contact your API Provider to fix it.',
98
116
  context: { error: { message: err.message, name: err.name }, chunk },
99
117
  },
100
- type: 'StreamChunkError',
118
+ type: errorName,
101
119
  } as ChatMessageError;
102
120
  /* eslint-enable */
103
121
 
@@ -105,16 +123,31 @@ export const transformOpenAIStream = (
105
123
  }
106
124
  };
107
125
 
126
+ export interface OpenAIStreamOptions {
127
+ bizErrorTypeTransformer?: (error: {
128
+ message: string;
129
+ name: string;
130
+ }) => ILobeAgentRuntimeErrorType | undefined;
131
+ callbacks?: ChatStreamCallbacks;
132
+ provider?: string;
133
+ }
134
+
108
135
  export const OpenAIStream = (
109
136
  stream: Stream<OpenAI.ChatCompletionChunk> | ReadableStream,
110
- callbacks?: ChatStreamCallbacks,
137
+ { callbacks, provider, bizErrorTypeTransformer }: OpenAIStreamOptions = {},
111
138
  ) => {
112
139
  const streamStack: StreamStack = { id: '' };
113
140
 
114
141
  const readableStream =
115
142
  stream instanceof ReadableStream ? stream : convertIterableToStream(stream);
116
143
 
117
- return readableStream
118
- .pipeThrough(createSSEProtocolTransformer(transformOpenAIStream, streamStack))
119
- .pipeThrough(createCallbacksTransformer(callbacks));
144
+ return (
145
+ readableStream
146
+ // 1. handle the first error if exist
147
+ // provider like huggingface or minimax will return error in the stream,
148
+ // so in the first Transformer, we need to handle the error
149
+ .pipeThrough(createFirstErrorHandleTransformer(bizErrorTypeTransformer, provider))
150
+ .pipeThrough(createSSEProtocolTransformer(transformOpenAIStream, streamStack))
151
+ .pipeThrough(createCallbacksTransformer(callbacks))
152
+ );
120
153
  };
@@ -1,7 +1,7 @@
1
- import { readableFromAsyncIterable } from 'ai';
2
-
3
1
  import { ChatStreamCallbacks } from '@/libs/agent-runtime';
4
2
 
3
+ import { AgentRuntimeErrorType } from '../../error';
4
+
5
5
  export interface StreamStack {
6
6
  id: string;
7
7
  tool?: {
@@ -38,17 +38,52 @@ export interface StreamProtocolToolCallChunk {
38
38
  export const generateToolCallId = (index: number, functionName?: string) =>
39
39
  `${functionName || 'unknown_tool_call'}_${index}`;
40
40
 
41
- export const chatStreamable = async function* <T>(stream: AsyncIterable<T>) {
41
+ const chatStreamable = async function* <T>(stream: AsyncIterable<T>) {
42
42
  for await (const response of stream) {
43
43
  yield response;
44
44
  }
45
45
  };
46
46
 
47
+ const ERROR_CHUNK_PREFIX = '%FIRST_CHUNK_ERROR%: ';
47
48
  // make the response to the streamable format
48
49
  export const convertIterableToStream = <T>(stream: AsyncIterable<T>) => {
49
- return readableFromAsyncIterable(chatStreamable(stream));
50
+ const iterable = chatStreamable(stream);
51
+
52
+ // copy from https://github.com/vercel/ai/blob/d3aa5486529e3d1a38b30e3972b4f4c63ea4ae9a/packages/ai/streams/ai-stream.ts#L284
53
+ // and add an error handle
54
+ let it = iterable[Symbol.asyncIterator]();
55
+
56
+ return new ReadableStream<T>({
57
+ async cancel(reason) {
58
+ await it.return?.(reason);
59
+ },
60
+ async pull(controller) {
61
+ const { done, value } = await it.next();
62
+ if (done) controller.close();
63
+ else controller.enqueue(value);
64
+ },
65
+
66
+ async start(controller) {
67
+ try {
68
+ const { done, value } = await it.next();
69
+ if (done) controller.close();
70
+ else controller.enqueue(value);
71
+ } catch (e) {
72
+ const error = e as Error;
73
+
74
+ controller.enqueue(
75
+ (ERROR_CHUNK_PREFIX +
76
+ JSON.stringify({ message: error.message, name: error.name, stack: error.stack })) as T,
77
+ );
78
+ controller.close();
79
+ }
80
+ },
81
+ });
50
82
  };
51
83
 
84
+ /**
85
+ * Create a transformer to convert the response into an SSE format
86
+ */
52
87
  export const createSSEProtocolTransformer = (
53
88
  transformer: (chunk: any, stack: StreamStack) => StreamProtocolChunk,
54
89
  streamStack?: StreamStack,
@@ -111,3 +146,27 @@ export function createCallbacksTransformer(cb: ChatStreamCallbacks | undefined)
111
146
  },
112
147
  });
113
148
  }
149
+
150
+ export const FIRST_CHUNK_ERROR_KEY = '_isFirstChunkError';
151
+
152
+ export const createFirstErrorHandleTransformer = (
153
+ errorHandler?: (errorJson: any) => any,
154
+ provider?: string,
155
+ ) => {
156
+ return new TransformStream({
157
+ transform(chunk, controller) {
158
+ if (chunk.toString().startsWith(ERROR_CHUNK_PREFIX)) {
159
+ const errorData = JSON.parse(chunk.toString().replace(ERROR_CHUNK_PREFIX, ''));
160
+
161
+ controller.enqueue({
162
+ ...errorData,
163
+ [FIRST_CHUNK_ERROR_KEY]: true,
164
+ errorType: errorHandler?.(errorData) || AgentRuntimeErrorType.ProviderBizError,
165
+ provider,
166
+ });
167
+ } else {
168
+ controller.enqueue(chunk);
169
+ }
170
+ },
171
+ });
172
+ };
@@ -80,8 +80,8 @@ export default {
80
80
  LocationNotSupportError:
81
81
  '很抱歉,你的所在地区不支持此模型服务,可能是由于区域限制或服务未开通。请确认当前地区是否支持使用此服务,或尝试使用切换到其他地区后重试。',
82
82
  QuotaLimitReached:
83
- '很抱歉,当前 Token 用量或请求次数已达该秘钥的配额(quota)上限,请增加该秘钥的配额或稍后再试',
84
-
83
+ '很抱歉,当前 Token 用量或请求次数已达该密钥的配额(quota)上限,请增加该密钥的配额或稍后再试',
84
+ PermissionDenied: '很抱歉,你没有权限访问该服务,请检查你的密钥是否有访问权限',
85
85
  InvalidProviderAPIKey: '{{provider}} API Key 不正确或为空,请检查 {{provider}} API Key 后重试',
86
86
  ProviderBizError: '请求 {{provider}} 服务出错,请根据以下信息排查或重试',
87
87
  /**
@@ -54,11 +54,18 @@ export default {
54
54
  },
55
55
  github: {
56
56
  personalAccessToken: {
57
- desc: '填入你的 Github PAT,点击[这里](https://github.com/settings/tokens) 创建',
57
+ desc: '填入你的 Github PAT,点击 [这里](https://github.com/settings/tokens) 创建',
58
58
  placeholder: 'ghp_xxxxxx',
59
59
  title: 'Github PAT',
60
60
  },
61
61
  },
62
+ huggingface: {
63
+ accessToken: {
64
+ desc: '填入你的 HuggingFace Token,点击 [这里](https://huggingface.co/settings/tokens) 创建',
65
+ placeholder: 'hf_xxxxxxxxx',
66
+ title: 'HuggingFace Token',
67
+ },
68
+ },
62
69
  ollama: {
63
70
  checker: {
64
71
  desc: '测试代理地址是否正确填写',
@@ -9,6 +9,7 @@ import {
9
9
  GithubProviderCard,
10
10
  GoogleProviderCard,
11
11
  GroqProviderCard,
12
+ HuggingFaceProviderCard,
12
13
  HunyuanProviderCard,
13
14
  NovitaProviderCard,
14
15
  OllamaProviderCard,
@@ -98,6 +99,9 @@ export const getServerGlobalConfig = () => {
98
99
  FIREWORKSAI_MODEL_LIST,
99
100
 
100
101
  ENABLED_WENXIN,
102
+
103
+ ENABLED_HUGGINGFACE,
104
+ HUGGINGFACE_MODEL_LIST,
101
105
  } = getLLMConfig();
102
106
 
103
107
  const config: GlobalServerConfig = {
@@ -166,6 +170,14 @@ export const getServerGlobalConfig = () => {
166
170
  modelString: GROQ_MODEL_LIST,
167
171
  }),
168
172
  },
173
+ huggingface: {
174
+ enabled: ENABLED_HUGGINGFACE,
175
+ enabledModels: extractEnabledModels(HUGGINGFACE_MODEL_LIST),
176
+ serverModelCards: transformToChatModelCards({
177
+ defaultChatModels: HuggingFaceProviderCard.chatModels,
178
+ modelString: HUGGINGFACE_MODEL_LIST,
179
+ }),
180
+ },
169
181
  hunyuan: {
170
182
  enabled: ENABLED_HUNYUAN,
171
183
  enabledModels: extractEnabledModels(HUNYUAN_MODEL_LIST),
@@ -202,7 +214,6 @@ export const getServerGlobalConfig = () => {
202
214
  modelString: OPENAI_MODEL_LIST,
203
215
  }),
204
216
  },
205
-
206
217
  openrouter: {
207
218
  enabled: ENABLED_OPENROUTER,
208
219
  enabledModels: extractEnabledModels(OPENROUTER_MODEL_LIST),
@@ -225,6 +225,16 @@ const getLlmOptionsFromPayload = (provider: string, payload: JWTPayload) => {
225
225
 
226
226
  return { apiKey, baseURL };
227
227
  }
228
+
229
+ case ModelProvider.HuggingFace: {
230
+ const { HUGGINGFACE_PROXY_URL, HUGGINGFACE_API_KEY } = getLLMConfig();
231
+
232
+ const apiKey = apiKeyManager.pick(payload?.apiKey || HUGGINGFACE_API_KEY);
233
+ const baseURL = payload?.endpoint || HUGGINGFACE_PROXY_URL;
234
+
235
+ return { apiKey, baseURL };
236
+ }
237
+
228
238
  case ModelProvider.Upstage: {
229
239
  const { UPSTAGE_API_KEY } = getLLMConfig();
230
240
 
@@ -1,5 +1,3 @@
1
- // TODO: 未来所有核心路由需要迁移到 trpc,部分不需要迁移的则走 webapi
2
-
3
1
  /* eslint-disable sort-keys-fix/sort-keys-fix */
4
2
  import { transform } from 'lodash-es';
5
3
 
@@ -17,9 +15,6 @@ const mapWithBasePath = <T extends object>(apis: T): T => {
17
15
  };
18
16
 
19
17
  export const API_ENDPOINTS = mapWithBasePath({
20
- // chat
21
- chat: (provider: string) => withBasePath(`/api/chat/${provider}`),
22
- chatModels: (provider: string) => withBasePath(`/api/chat/models/${provider}`),
23
18
  oauth: '/api/auth',
24
19
 
25
20
  proxy: '/webapi/proxy',
@@ -35,6 +30,10 @@ export const API_ENDPOINTS = mapWithBasePath({
35
30
  // trace
36
31
  trace: '/webapi/trace',
37
32
 
33
+ // chat
34
+ chat: (provider: string) => withBasePath(`/webapi/chat/${provider}`),
35
+ chatModels: (provider: string) => withBasePath(`/webapi/chat/models/${provider}`),
36
+
38
37
  // image
39
38
  images: (provider: string) => `/webapi/text-to-image/${provider}`,
40
39
 
@@ -33,6 +33,7 @@ export interface UserKeyVaults {
33
33
  github?: OpenAICompatibleKeyVault;
34
34
  google?: OpenAICompatibleKeyVault;
35
35
  groq?: OpenAICompatibleKeyVault;
36
+ huggingface?: OpenAICompatibleKeyVault;
36
37
  hunyuan?: OpenAICompatibleKeyVault;
37
38
  lobehub?: any;
38
39
  minimax?: OpenAICompatibleKeyVault;