@lobehub/chat 1.106.8 → 1.107.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (85) hide show
  1. package/.env.example +9 -0
  2. package/CHANGELOG.md +25 -0
  3. package/Dockerfile +2 -0
  4. package/Dockerfile.database +2 -0
  5. package/Dockerfile.pglite +2 -0
  6. package/changelog/v1.json +9 -0
  7. package/docs/usage/providers/aihubmix.zh-CN.mdx +101 -0
  8. package/locales/ar/modelProvider.json +1 -0
  9. package/locales/ar/providers.json +3 -0
  10. package/locales/bg-BG/modelProvider.json +1 -0
  11. package/locales/bg-BG/providers.json +3 -0
  12. package/locales/de-DE/modelProvider.json +1 -0
  13. package/locales/de-DE/providers.json +3 -0
  14. package/locales/en-US/modelProvider.json +1 -0
  15. package/locales/en-US/providers.json +3 -0
  16. package/locales/es-ES/modelProvider.json +1 -0
  17. package/locales/es-ES/providers.json +3 -0
  18. package/locales/fa-IR/modelProvider.json +1 -0
  19. package/locales/fa-IR/providers.json +3 -0
  20. package/locales/fr-FR/modelProvider.json +1 -0
  21. package/locales/fr-FR/providers.json +3 -0
  22. package/locales/it-IT/modelProvider.json +1 -0
  23. package/locales/it-IT/providers.json +3 -0
  24. package/locales/ja-JP/modelProvider.json +1 -0
  25. package/locales/ja-JP/providers.json +3 -0
  26. package/locales/ko-KR/modelProvider.json +1 -0
  27. package/locales/ko-KR/providers.json +3 -0
  28. package/locales/nl-NL/modelProvider.json +1 -0
  29. package/locales/nl-NL/providers.json +3 -0
  30. package/locales/pl-PL/modelProvider.json +1 -0
  31. package/locales/pl-PL/providers.json +3 -0
  32. package/locales/pt-BR/modelProvider.json +1 -0
  33. package/locales/pt-BR/providers.json +3 -0
  34. package/locales/ru-RU/modelProvider.json +1 -0
  35. package/locales/ru-RU/providers.json +3 -0
  36. package/locales/tr-TR/modelProvider.json +1 -0
  37. package/locales/tr-TR/providers.json +3 -0
  38. package/locales/vi-VN/modelProvider.json +1 -0
  39. package/locales/vi-VN/providers.json +3 -0
  40. package/locales/zh-CN/modelProvider.json +1 -0
  41. package/locales/zh-CN/providers.json +3 -0
  42. package/locales/zh-TW/modelProvider.json +1 -0
  43. package/locales/zh-TW/providers.json +3 -0
  44. package/package.json +1 -2
  45. package/src/app/(backend)/middleware/auth/index.ts +2 -2
  46. package/src/app/(backend)/webapi/chat/[provider]/route.test.ts +12 -12
  47. package/src/app/(backend)/webapi/chat/[provider]/route.ts +6 -6
  48. package/src/app/(backend)/webapi/chat/vertexai/route.ts +2 -2
  49. package/src/app/(backend)/webapi/models/[provider]/pull/route.ts +2 -2
  50. package/src/app/(backend)/webapi/models/[provider]/route.ts +2 -2
  51. package/src/app/(backend)/webapi/text-to-image/[provider]/route.ts +2 -2
  52. package/src/app/[variants]/(main)/settings/provider/(detail)/github/page.tsx +2 -2
  53. package/src/app/[variants]/(main)/settings/provider/features/ProviderConfig/index.tsx +17 -2
  54. package/src/config/aiModels/aihubmix.ts +164 -0
  55. package/src/config/aiModels/index.ts +3 -0
  56. package/src/config/llm.ts +6 -0
  57. package/src/config/modelProviders/aihubmix.ts +18 -0
  58. package/src/config/modelProviders/huggingface.ts +1 -0
  59. package/src/config/modelProviders/index.ts +4 -0
  60. package/src/libs/model-runtime/ModelRuntime.test.ts +9 -10
  61. package/src/libs/model-runtime/ModelRuntime.ts +2 -3
  62. package/src/libs/model-runtime/RouterRuntime/baseRuntimeMap.ts +15 -0
  63. package/src/libs/model-runtime/RouterRuntime/createRuntime.ts +193 -0
  64. package/src/libs/model-runtime/RouterRuntime/index.ts +9 -0
  65. package/src/libs/model-runtime/aihubmix/index.ts +118 -0
  66. package/src/libs/model-runtime/index.ts +1 -1
  67. package/src/libs/model-runtime/openrouter/index.ts +2 -2
  68. package/src/libs/model-runtime/runtimeMap.ts +2 -0
  69. package/src/libs/model-runtime/types/type.ts +1 -0
  70. package/src/locales/default/modelProvider.ts +1 -0
  71. package/src/server/modules/{AgentRuntime → ModelRuntime}/index.test.ts +64 -67
  72. package/src/server/modules/{AgentRuntime → ModelRuntime}/index.ts +3 -3
  73. package/src/server/routers/async/file.ts +2 -2
  74. package/src/server/routers/async/image.ts +2 -2
  75. package/src/server/routers/async/ragEval.ts +2 -2
  76. package/src/server/routers/lambda/chunk.ts +3 -3
  77. package/src/services/__tests__/chat.test.ts +21 -21
  78. package/src/services/chat.ts +2 -2
  79. package/src/types/aiProvider.ts +1 -0
  80. package/src/types/llm.ts +4 -0
  81. package/src/types/user/settings/keyVaults.ts +1 -0
  82. package/src/app/[variants]/(main)/settings/provider/(detail)/huggingface/page.tsx +0 -67
  83. /package/src/server/modules/{AgentRuntime → ModelRuntime}/apiKeyManager.test.ts +0 -0
  84. /package/src/server/modules/{AgentRuntime → ModelRuntime}/apiKeyManager.ts +0 -0
  85. /package/src/server/modules/{AgentRuntime → ModelRuntime}/trace.ts +0 -0
@@ -0,0 +1,193 @@
1
+ /**
2
+ * @see https://github.com/lobehub/lobe-chat/discussions/6563
3
+ */
4
+ import OpenAI, { ClientOptions } from 'openai';
5
+ import { Stream } from 'openai/streaming';
6
+
7
+ import { ILobeAgentRuntimeErrorType } from '@/libs/model-runtime';
8
+ import { CreateImagePayload, CreateImageResponse } from '@/libs/model-runtime/types/image';
9
+ import {
10
+ CreateImageOptions,
11
+ CustomClientOptions,
12
+ } from '@/libs/model-runtime/utils/openaiCompatibleFactory';
13
+ import type { ChatModelCard } from '@/types/llm';
14
+
15
+ import { LobeRuntimeAI } from '../BaseAI';
16
+ import { LobeOpenAI } from '../openai';
17
+ import {
18
+ type ChatCompletionErrorPayload,
19
+ ChatMethodOptions,
20
+ ChatStreamCallbacks,
21
+ ChatStreamPayload,
22
+ EmbeddingsOptions,
23
+ EmbeddingsPayload,
24
+ TextToImagePayload,
25
+ TextToSpeechPayload,
26
+ } from '../types';
27
+ import { baseRuntimeMap } from './baseRuntimeMap';
28
+
29
+ export interface RuntimeItem {
30
+ id: string;
31
+ models?: string[];
32
+ runtime: LobeRuntimeAI;
33
+ }
34
+
35
+ interface ProviderIniOptions extends Record<string, any> {
36
+ accessKeyId?: string;
37
+ accessKeySecret?: string;
38
+ apiKey?: string;
39
+ apiVersion?: string;
40
+ baseURL?: string;
41
+ baseURLOrAccountID?: string;
42
+ dangerouslyAllowBrowser?: boolean;
43
+ region?: string;
44
+ sessionToken?: string;
45
+ }
46
+
47
+ interface RouterInstance {
48
+ apiType: keyof typeof baseRuntimeMap;
49
+ models?: string[];
50
+ options: ProviderIniOptions;
51
+ runtime?: typeof LobeOpenAI;
52
+ }
53
+
54
+ type ConstructorOptions<T extends Record<string, any> = any> = ClientOptions & T;
55
+
56
+ interface CreateRouterRuntimeOptions<T extends Record<string, any> = any> {
57
+ apiKey?: string;
58
+ chatCompletion?: {
59
+ excludeUsage?: boolean;
60
+ handleError?: (
61
+ error: any,
62
+ options: ConstructorOptions<T>,
63
+ ) => Omit<ChatCompletionErrorPayload, 'provider'> | undefined;
64
+ handlePayload?: (
65
+ payload: ChatStreamPayload,
66
+ options: ConstructorOptions<T>,
67
+ ) => OpenAI.ChatCompletionCreateParamsStreaming;
68
+ handleStream?: (
69
+ stream: Stream<OpenAI.ChatCompletionChunk> | ReadableStream,
70
+ { callbacks, inputStartAt }: { callbacks?: ChatStreamCallbacks; inputStartAt?: number },
71
+ ) => ReadableStream;
72
+ handleStreamBizErrorType?: (error: {
73
+ message: string;
74
+ name: string;
75
+ }) => ILobeAgentRuntimeErrorType | undefined;
76
+ handleTransformResponseToStream?: (
77
+ data: OpenAI.ChatCompletion,
78
+ ) => ReadableStream<OpenAI.ChatCompletionChunk>;
79
+ noUserId?: boolean;
80
+ };
81
+ constructorOptions?: ConstructorOptions<T>;
82
+ createImage?: (
83
+ payload: CreateImagePayload,
84
+ options: CreateImageOptions,
85
+ ) => Promise<CreateImageResponse>;
86
+ customClient?: CustomClientOptions<T>;
87
+ debug?: {
88
+ chatCompletion: () => boolean;
89
+ responses?: () => boolean;
90
+ };
91
+ errorType?: {
92
+ bizError: ILobeAgentRuntimeErrorType;
93
+ invalidAPIKey: ILobeAgentRuntimeErrorType;
94
+ };
95
+ id: string;
96
+ models?:
97
+ | ((params: { client: OpenAI }) => Promise<ChatModelCard[]>)
98
+ | {
99
+ transformModel?: (model: OpenAI.Model) => ChatModelCard;
100
+ };
101
+ responses?: {
102
+ handlePayload?: (
103
+ payload: ChatStreamPayload,
104
+ options: ConstructorOptions<T>,
105
+ ) => ChatStreamPayload;
106
+ };
107
+ routers: RouterInstance[];
108
+ }
109
+
110
+ export const createRouterRuntime = ({
111
+ id,
112
+ routers,
113
+ apiKey: DEFAULT_API_LEY,
114
+ ...params
115
+ }: CreateRouterRuntimeOptions) => {
116
+ return class UniformRuntime implements LobeRuntimeAI {
117
+ private _runtimes: RuntimeItem[];
118
+ private _options: ClientOptions & Record<string, any>;
119
+
120
+ constructor(options: ClientOptions & Record<string, any> = {}) {
121
+ const _options = {
122
+ ...options,
123
+ apiKey: options.apiKey?.trim() || DEFAULT_API_LEY,
124
+ baseURL: options.baseURL?.trim(),
125
+ };
126
+
127
+ if (routers.length === 0) {
128
+ throw new Error('empty providers');
129
+ }
130
+
131
+ this._runtimes = routers.map((router) => {
132
+ const providerAI = router.runtime ?? baseRuntimeMap[router.apiType] ?? LobeOpenAI;
133
+
134
+ const finalOptions = { ...router.options, ...options };
135
+ // @ts-ignore
136
+ const runtime: LobeRuntimeAI = new providerAI({ ...params, ...finalOptions, id });
137
+
138
+ return { id: router.apiType, models: router.models, runtime };
139
+ });
140
+
141
+ this._options = _options;
142
+ }
143
+
144
+ // 检查下是否能匹配到特定模型,否则默认使用最后一个 runtime
145
+ getRuntimeByModel(model: string) {
146
+ const runtimeItem =
147
+ this._runtimes.find((runtime) => runtime.models && runtime.models.includes(model)) ||
148
+ this._runtimes.at(-1)!;
149
+
150
+ return runtimeItem.runtime;
151
+ }
152
+
153
+ async chat(payload: ChatStreamPayload, options?: ChatMethodOptions) {
154
+ try {
155
+ const runtime = this.getRuntimeByModel(payload.model);
156
+
157
+ return await runtime.chat!(payload, options);
158
+ } catch (e) {
159
+ if (this._options.chat?.handleError) {
160
+ const error = this._options.chat.handleError(e);
161
+
162
+ if (error) {
163
+ throw error;
164
+ }
165
+ }
166
+
167
+ throw e;
168
+ }
169
+ }
170
+
171
+ async textToImage(payload: TextToImagePayload) {
172
+ const runtime = this.getRuntimeByModel(payload.model);
173
+
174
+ return runtime.textToImage!(payload);
175
+ }
176
+
177
+ async models() {
178
+ return this._runtimes[0].runtime.models?.();
179
+ }
180
+
181
+ async embeddings(payload: EmbeddingsPayload, options?: EmbeddingsOptions) {
182
+ const runtime = this.getRuntimeByModel(payload.model);
183
+
184
+ return runtime.embeddings!(payload, options);
185
+ }
186
+
187
+ async textToSpeech(payload: TextToSpeechPayload, options?: EmbeddingsOptions) {
188
+ const runtime = this.getRuntimeByModel(payload.model);
189
+
190
+ return runtime.textToSpeech!(payload, options);
191
+ }
192
+ };
193
+ };
@@ -0,0 +1,9 @@
1
+ import { LobeRuntimeAI } from '../BaseAI';
2
+
3
+ export interface RuntimeItem {
4
+ id: string;
5
+ models?: string[];
6
+ runtime: LobeRuntimeAI;
7
+ }
8
+
9
+ export { createRouterRuntime } from './createRuntime';
@@ -0,0 +1,118 @@
1
+ import urlJoin from 'url-join';
2
+
3
+ import { LOBE_DEFAULT_MODEL_LIST } from '@/config/aiModels';
4
+ import AiHubMixModels from '@/config/aiModels/aihubmix';
5
+ import type { ChatModelCard } from '@/types/llm';
6
+
7
+ import { createRouterRuntime } from '../RouterRuntime';
8
+ import { ModelProvider } from '../types';
9
+
10
+ export interface AiHubMixModelCard {
11
+ created: number;
12
+ id: string;
13
+ object: string;
14
+ owned_by: string;
15
+ }
16
+
17
+ const baseURL = 'https://aihubmix.com';
18
+
19
+ export const LobeAiHubMixAI = createRouterRuntime({
20
+ constructorOptions: {
21
+ defaultHeaders: {
22
+ 'APP-Code': 'LobeHub',
23
+ },
24
+ },
25
+ debug: {
26
+ chatCompletion: () => process.env.DEBUG_AIHUBMIX_CHAT_COMPLETION === '1',
27
+ },
28
+ id: ModelProvider.AiHubMix,
29
+ models: async ({ client }) => {
30
+ const functionCallKeywords = [
31
+ 'gpt-4',
32
+ 'gpt-3.5',
33
+ 'claude',
34
+ 'gemini',
35
+ 'qwen',
36
+ 'deepseek',
37
+ 'llama',
38
+ ];
39
+
40
+ const visionKeywords = [
41
+ 'gpt-4o',
42
+ 'gpt-4-vision',
43
+ 'claude-3',
44
+ 'claude-4',
45
+ 'gemini-pro-vision',
46
+ 'qwen-vl',
47
+ 'llava',
48
+ ];
49
+
50
+ const reasoningKeywords = [
51
+ 'o1',
52
+ 'deepseek-r1',
53
+ 'qwq',
54
+ 'claude-opus-4',
55
+ 'claude-sonnet-4',
56
+ 'claude-3-5-sonnet',
57
+ 'claude-3-5-haiku',
58
+ ];
59
+
60
+ try {
61
+ const modelsPage = (await client.models.list()) as any;
62
+ const modelList: AiHubMixModelCard[] = modelsPage.data || [];
63
+
64
+ return modelList
65
+ .map((model) => {
66
+ const knownModel = AiHubMixModels.find(
67
+ (m) => model.id.toLowerCase() === m.id.toLowerCase(),
68
+ );
69
+
70
+ const modelId = model.id.toLowerCase();
71
+
72
+ return {
73
+ contextWindowTokens: knownModel?.contextWindowTokens ?? undefined,
74
+ displayName: knownModel?.displayName ?? model.id,
75
+ enabled: knownModel?.enabled || false,
76
+ functionCall:
77
+ functionCallKeywords.some((keyword) => modelId.includes(keyword)) ||
78
+ knownModel?.abilities?.functionCall ||
79
+ false,
80
+ id: model.id,
81
+ reasoning:
82
+ reasoningKeywords.some((keyword) => modelId.includes(keyword)) ||
83
+ knownModel?.abilities?.reasoning ||
84
+ false,
85
+ vision:
86
+ visionKeywords.some((keyword) => modelId.includes(keyword)) ||
87
+ knownModel?.abilities?.vision ||
88
+ false,
89
+ };
90
+ })
91
+ .filter(Boolean) as ChatModelCard[];
92
+ } catch (error) {
93
+ console.warn(
94
+ 'Failed to fetch AiHubMix models. Please ensure your AiHubMix API key is valid:',
95
+ error,
96
+ );
97
+ return [];
98
+ }
99
+ },
100
+ routers: [
101
+ {
102
+ apiType: 'anthropic',
103
+ models: LOBE_DEFAULT_MODEL_LIST.map((m) => m.id).filter(
104
+ (id) => id.startsWith('claude') || id.startsWith('kimi-k2'),
105
+ ),
106
+ options: { baseURL },
107
+ },
108
+ {
109
+ apiType: 'google',
110
+ models: LOBE_DEFAULT_MODEL_LIST.map((m) => m.id).filter((id) => id.startsWith('gemini')),
111
+ options: { baseURL: urlJoin(baseURL, '/gemini') },
112
+ },
113
+ {
114
+ apiType: 'openai',
115
+ options: { baseURL: urlJoin(baseURL, '/v1') },
116
+ },
117
+ ],
118
+ });
@@ -10,7 +10,7 @@ export { LobeGroq } from './groq';
10
10
  export * from './helpers';
11
11
  export { LobeMinimaxAI } from './minimax';
12
12
  export { LobeMistralAI } from './mistral';
13
- export { default as AgentRuntime } from './ModelRuntime';
13
+ export { ModelRuntime } from './ModelRuntime';
14
14
  export { LobeMoonshotAI } from './moonshot';
15
15
  export { LobeOllamaAI } from './ollama';
16
16
  export { LobeOpenAI } from './openai';
@@ -49,8 +49,8 @@ export const LobeOpenRouterAI = createOpenAICompatibleRuntime({
49
49
  },
50
50
  constructorOptions: {
51
51
  defaultHeaders: {
52
- 'HTTP-Referer': 'https://chat-preview.lobehub.com',
53
- 'X-Title': 'Lobe Chat',
52
+ 'HTTP-Referer': 'https://lobehub.com',
53
+ 'X-Title': 'LobeHub',
54
54
  },
55
55
  },
56
56
  debug: {
@@ -1,5 +1,6 @@
1
1
  import { LobeAi21AI } from './ai21';
2
2
  import { LobeAi360AI } from './ai360';
3
+ import { LobeAiHubMixAI } from './aihubmix';
3
4
  import { LobeAnthropicAI } from './anthropic';
4
5
  import { LobeAzureOpenAI } from './azureOpenai';
5
6
  import { LobeAzureAI } from './azureai';
@@ -56,6 +57,7 @@ import { LobeZhipuAI } from './zhipu';
56
57
  export const providerRuntimeMap = {
57
58
  ai21: LobeAi21AI,
58
59
  ai360: LobeAi360AI,
60
+ aihubmix: LobeAiHubMixAI,
59
61
  anthropic: LobeAnthropicAI,
60
62
  azure: LobeAzureOpenAI,
61
63
  azureai: LobeAzureAI,
@@ -30,6 +30,7 @@ export interface CreateChatCompletionOptions {
30
30
  export enum ModelProvider {
31
31
  Ai21 = 'ai21',
32
32
  Ai360 = 'ai360',
33
+ AiHubMix = 'aihubmix',
33
34
  Anthropic = 'anthropic',
34
35
  Azure = 'azure',
35
36
  AzureAI = 'azureai',
@@ -192,6 +192,7 @@ export default {
192
192
  aesGcm: '您的秘钥与代理地址等将使用 <1>AES-GCM</1> 加密算法进行加密',
193
193
  apiKey: {
194
194
  desc: '请填写你的 {{name}} API Key',
195
+ descWithUrl: '请填写你的 {{name}} API Key,<3>点此获取</3>',
195
196
  placeholder: '{{name}} API Key',
196
197
  title: 'API Key',
197
198
  },