@lobehub/chat 1.106.8 → 1.107.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (85) hide show
  1. package/.env.example +9 -0
  2. package/CHANGELOG.md +25 -0
  3. package/Dockerfile +2 -0
  4. package/Dockerfile.database +2 -0
  5. package/Dockerfile.pglite +2 -0
  6. package/changelog/v1.json +9 -0
  7. package/docs/usage/providers/aihubmix.zh-CN.mdx +101 -0
  8. package/locales/ar/modelProvider.json +1 -0
  9. package/locales/ar/providers.json +3 -0
  10. package/locales/bg-BG/modelProvider.json +1 -0
  11. package/locales/bg-BG/providers.json +3 -0
  12. package/locales/de-DE/modelProvider.json +1 -0
  13. package/locales/de-DE/providers.json +3 -0
  14. package/locales/en-US/modelProvider.json +1 -0
  15. package/locales/en-US/providers.json +3 -0
  16. package/locales/es-ES/modelProvider.json +1 -0
  17. package/locales/es-ES/providers.json +3 -0
  18. package/locales/fa-IR/modelProvider.json +1 -0
  19. package/locales/fa-IR/providers.json +3 -0
  20. package/locales/fr-FR/modelProvider.json +1 -0
  21. package/locales/fr-FR/providers.json +3 -0
  22. package/locales/it-IT/modelProvider.json +1 -0
  23. package/locales/it-IT/providers.json +3 -0
  24. package/locales/ja-JP/modelProvider.json +1 -0
  25. package/locales/ja-JP/providers.json +3 -0
  26. package/locales/ko-KR/modelProvider.json +1 -0
  27. package/locales/ko-KR/providers.json +3 -0
  28. package/locales/nl-NL/modelProvider.json +1 -0
  29. package/locales/nl-NL/providers.json +3 -0
  30. package/locales/pl-PL/modelProvider.json +1 -0
  31. package/locales/pl-PL/providers.json +3 -0
  32. package/locales/pt-BR/modelProvider.json +1 -0
  33. package/locales/pt-BR/providers.json +3 -0
  34. package/locales/ru-RU/modelProvider.json +1 -0
  35. package/locales/ru-RU/providers.json +3 -0
  36. package/locales/tr-TR/modelProvider.json +1 -0
  37. package/locales/tr-TR/providers.json +3 -0
  38. package/locales/vi-VN/modelProvider.json +1 -0
  39. package/locales/vi-VN/providers.json +3 -0
  40. package/locales/zh-CN/modelProvider.json +1 -0
  41. package/locales/zh-CN/providers.json +3 -0
  42. package/locales/zh-TW/modelProvider.json +1 -0
  43. package/locales/zh-TW/providers.json +3 -0
  44. package/package.json +1 -2
  45. package/src/app/(backend)/middleware/auth/index.ts +2 -2
  46. package/src/app/(backend)/webapi/chat/[provider]/route.test.ts +12 -12
  47. package/src/app/(backend)/webapi/chat/[provider]/route.ts +6 -6
  48. package/src/app/(backend)/webapi/chat/vertexai/route.ts +2 -2
  49. package/src/app/(backend)/webapi/models/[provider]/pull/route.ts +2 -2
  50. package/src/app/(backend)/webapi/models/[provider]/route.ts +2 -2
  51. package/src/app/(backend)/webapi/text-to-image/[provider]/route.ts +2 -2
  52. package/src/app/[variants]/(main)/settings/provider/(detail)/github/page.tsx +2 -2
  53. package/src/app/[variants]/(main)/settings/provider/features/ProviderConfig/index.tsx +17 -2
  54. package/src/config/aiModels/aihubmix.ts +164 -0
  55. package/src/config/aiModels/index.ts +3 -0
  56. package/src/config/llm.ts +6 -0
  57. package/src/config/modelProviders/aihubmix.ts +18 -0
  58. package/src/config/modelProviders/huggingface.ts +1 -0
  59. package/src/config/modelProviders/index.ts +4 -0
  60. package/src/libs/model-runtime/ModelRuntime.test.ts +9 -10
  61. package/src/libs/model-runtime/ModelRuntime.ts +2 -3
  62. package/src/libs/model-runtime/RouterRuntime/baseRuntimeMap.ts +15 -0
  63. package/src/libs/model-runtime/RouterRuntime/createRuntime.ts +193 -0
  64. package/src/libs/model-runtime/RouterRuntime/index.ts +9 -0
  65. package/src/libs/model-runtime/aihubmix/index.ts +118 -0
  66. package/src/libs/model-runtime/index.ts +1 -1
  67. package/src/libs/model-runtime/openrouter/index.ts +2 -2
  68. package/src/libs/model-runtime/runtimeMap.ts +2 -0
  69. package/src/libs/model-runtime/types/type.ts +1 -0
  70. package/src/locales/default/modelProvider.ts +1 -0
  71. package/src/server/modules/{AgentRuntime → ModelRuntime}/index.test.ts +64 -67
  72. package/src/server/modules/{AgentRuntime → ModelRuntime}/index.ts +3 -3
  73. package/src/server/routers/async/file.ts +2 -2
  74. package/src/server/routers/async/image.ts +2 -2
  75. package/src/server/routers/async/ragEval.ts +2 -2
  76. package/src/server/routers/lambda/chunk.ts +3 -3
  77. package/src/services/__tests__/chat.test.ts +21 -21
  78. package/src/services/chat.ts +2 -2
  79. package/src/types/aiProvider.ts +1 -0
  80. package/src/types/llm.ts +4 -0
  81. package/src/types/user/settings/keyVaults.ts +1 -0
  82. package/src/app/[variants]/(main)/settings/provider/(detail)/huggingface/page.tsx +0 -67
  83. /package/src/server/modules/{AgentRuntime → ModelRuntime}/apiKeyManager.test.ts +0 -0
  84. /package/src/server/modules/{AgentRuntime → ModelRuntime}/apiKeyManager.ts +0 -0
  85. /package/src/server/modules/{AgentRuntime → ModelRuntime}/trace.ts +0 -0
@@ -9,7 +9,7 @@ import {
9
9
  enableClerk,
10
10
  } from '@/const/auth';
11
11
  import { ClerkAuth } from '@/libs/clerk-auth';
12
- import { AgentRuntime, AgentRuntimeError, ChatCompletionErrorPayload } from '@/libs/model-runtime';
12
+ import { AgentRuntimeError, ChatCompletionErrorPayload, ModelRuntime } from '@/libs/model-runtime';
13
13
  import { validateOIDCJWT } from '@/libs/oidc-provider/jwt';
14
14
  import { ChatErrorType } from '@/types/fetch';
15
15
  import { createErrorResponse } from '@/utils/errorResponse';
@@ -17,7 +17,7 @@ import { getXorPayload } from '@/utils/server/xor';
17
17
 
18
18
  import { checkAuthMethod } from './utils';
19
19
 
20
- type CreateRuntime = (jwtPayload: ClientSecretPayload) => AgentRuntime;
20
+ type CreateRuntime = (jwtPayload: ClientSecretPayload) => ModelRuntime;
21
21
  type RequestOptions = { createRuntime?: CreateRuntime; params: Promise<{ provider: string }> };
22
22
 
23
23
  export type RequestHandler = (
@@ -4,7 +4,7 @@ import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
4
4
 
5
5
  import { checkAuthMethod } from '@/app/(backend)/middleware/auth/utils';
6
6
  import { LOBE_CHAT_AUTH_HEADER, OAUTH_AUTHORIZED } from '@/const/auth';
7
- import { AgentRuntime, LobeRuntimeAI } from '@/libs/model-runtime';
7
+ import { LobeRuntimeAI, ModelRuntime } from '@/libs/model-runtime';
8
8
  import { ChatErrorType } from '@/types/fetch';
9
9
  import { getXorPayload } from '@/utils/server/xor';
10
10
 
@@ -57,10 +57,10 @@ afterEach(() => {
57
57
 
58
58
  describe('POST handler', () => {
59
59
  describe('init chat model', () => {
60
- it('should initialize AgentRuntime correctly with valid authorization', async () => {
60
+ it('should initialize ModelRuntime correctly with valid authorization', async () => {
61
61
  const mockParams = Promise.resolve({ provider: 'test-provider' });
62
62
 
63
- // 设置 getJWTPayload 和 initAgentRuntimeWithUserPayload 的模拟返回值
63
+ // 设置 getJWTPayload 和 initModelRuntimeWithUserPayload 的模拟返回值
64
64
  vi.mocked(getXorPayload).mockReturnValueOnce({
65
65
  accessCode: 'test-access-code',
66
66
  apiKey: 'test-api-key',
@@ -69,10 +69,10 @@ describe('POST handler', () => {
69
69
 
70
70
  const mockRuntime: LobeRuntimeAI = { baseURL: 'abc', chat: vi.fn() };
71
71
 
72
- // migrate to new AgentRuntime init api
72
+ // migrate to new ModelRuntime init api
73
73
  const spy = vi
74
- .spyOn(AgentRuntime, 'initializeWithProvider')
75
- .mockResolvedValue(new AgentRuntime(mockRuntime));
74
+ .spyOn(ModelRuntime, 'initializeWithProvider')
75
+ .mockResolvedValue(new ModelRuntime(mockRuntime));
76
76
 
77
77
  // 调用 POST 函数
78
78
  await POST(request as unknown as Request, { params: mockParams });
@@ -111,14 +111,14 @@ describe('POST handler', () => {
111
111
  });
112
112
 
113
113
  const mockParams = Promise.resolve({ provider: 'test-provider' });
114
- // 设置 initAgentRuntimeWithUserPayload 的模拟返回值
114
+ // 设置 initModelRuntimeWithUserPayload 的模拟返回值
115
115
  vi.mocked(getAuth).mockReturnValue({} as any);
116
116
  vi.mocked(checkAuthMethod).mockReset();
117
117
 
118
118
  const mockRuntime: LobeRuntimeAI = { baseURL: 'abc', chat: vi.fn() };
119
119
 
120
- vi.spyOn(AgentRuntime, 'initializeWithProvider').mockResolvedValue(
121
- new AgentRuntime(mockRuntime),
120
+ vi.spyOn(ModelRuntime, 'initializeWithProvider').mockResolvedValue(
121
+ new ModelRuntime(mockRuntime),
122
122
  );
123
123
 
124
124
  const request = new Request(new URL('https://test.com'), {
@@ -178,12 +178,12 @@ describe('POST handler', () => {
178
178
 
179
179
  const mockChatResponse: any = { success: true, message: 'Reply from agent' };
180
180
 
181
- vi.spyOn(AgentRuntime.prototype, 'chat').mockResolvedValue(mockChatResponse);
181
+ vi.spyOn(ModelRuntime.prototype, 'chat').mockResolvedValue(mockChatResponse);
182
182
 
183
183
  const response = await POST(request as unknown as Request, { params: mockParams });
184
184
 
185
185
  expect(response).toEqual(mockChatResponse);
186
- expect(AgentRuntime.prototype.chat).toHaveBeenCalledWith(mockChatPayload, {
186
+ expect(ModelRuntime.prototype.chat).toHaveBeenCalledWith(mockChatPayload, {
187
187
  user: 'abc',
188
188
  signal: expect.anything(),
189
189
  });
@@ -210,7 +210,7 @@ describe('POST handler', () => {
210
210
  errorMessage: 'Something went wrong',
211
211
  };
212
212
 
213
- vi.spyOn(AgentRuntime.prototype, 'chat').mockRejectedValue(mockErrorResponse);
213
+ vi.spyOn(ModelRuntime.prototype, 'chat').mockRejectedValue(mockErrorResponse);
214
214
 
215
215
  const response = await POST(request, { params: mockParams });
216
216
 
@@ -1,10 +1,10 @@
1
1
  import { checkAuth } from '@/app/(backend)/middleware/auth';
2
2
  import {
3
3
  AGENT_RUNTIME_ERROR_SET,
4
- AgentRuntime,
5
4
  ChatCompletionErrorPayload,
5
+ ModelRuntime,
6
6
  } from '@/libs/model-runtime';
7
- import { createTraceOptions, initAgentRuntimeWithUserPayload } from '@/server/modules/AgentRuntime';
7
+ import { createTraceOptions, initModelRuntimeWithUserPayload } from '@/server/modules/ModelRuntime';
8
8
  import { ChatErrorType } from '@/types/fetch';
9
9
  import { ChatStreamPayload } from '@/types/openai/chat';
10
10
  import { createErrorResponse } from '@/utils/errorResponse';
@@ -17,11 +17,11 @@ export const POST = checkAuth(async (req: Request, { params, jwtPayload, createR
17
17
 
18
18
  try {
19
19
  // ============ 1. init chat model ============ //
20
- let agentRuntime: AgentRuntime;
20
+ let modelRuntime: ModelRuntime;
21
21
  if (createRuntime) {
22
- agentRuntime = createRuntime(jwtPayload);
22
+ modelRuntime = createRuntime(jwtPayload);
23
23
  } else {
24
- agentRuntime = await initAgentRuntimeWithUserPayload(provider, jwtPayload);
24
+ modelRuntime = await initModelRuntimeWithUserPayload(provider, jwtPayload);
25
25
  }
26
26
 
27
27
  // ============ 2. create chat completion ============ //
@@ -36,7 +36,7 @@ export const POST = checkAuth(async (req: Request, { params, jwtPayload, createR
36
36
  traceOptions = createTraceOptions(data, { provider, trace: tracePayload });
37
37
  }
38
38
 
39
- return await agentRuntime.chat(data, {
39
+ return await modelRuntime.chat(data, {
40
40
  user: jwtPayload.userId,
41
41
  ...traceOptions,
42
42
  signal: req.signal,
@@ -1,5 +1,5 @@
1
1
  import { checkAuth } from '@/app/(backend)/middleware/auth';
2
- import { AgentRuntime, ModelProvider } from '@/libs/model-runtime';
2
+ import { ModelProvider, ModelRuntime } from '@/libs/model-runtime';
3
3
  import { LobeVertexAI } from '@/libs/model-runtime/vertexai';
4
4
  import { safeParseJSON } from '@/utils/safeParseJSON';
5
5
 
@@ -28,7 +28,7 @@ export const POST = checkAuth(async (req: Request, { jwtPayload }) =>
28
28
  project: !!credentials?.project_id ? credentials?.project_id : process.env.VERTEXAI_PROJECT,
29
29
  });
30
30
 
31
- return new AgentRuntime(instance);
31
+ return new ModelRuntime(instance);
32
32
  },
33
33
  params: Promise.resolve({ provider: ModelProvider.VertexAI }),
34
34
  }),
@@ -1,6 +1,6 @@
1
1
  import { checkAuth } from '@/app/(backend)/middleware/auth';
2
2
  import { ChatCompletionErrorPayload, PullModelParams } from '@/libs/model-runtime';
3
- import { initAgentRuntimeWithUserPayload } from '@/server/modules/AgentRuntime';
3
+ import { initModelRuntimeWithUserPayload } from '@/server/modules/ModelRuntime';
4
4
  import { ChatErrorType } from '@/types/fetch';
5
5
  import { createErrorResponse } from '@/utils/errorResponse';
6
6
 
@@ -10,7 +10,7 @@ export const POST = checkAuth(async (req, { params, jwtPayload }) => {
10
10
  const { provider } = await params;
11
11
 
12
12
  try {
13
- const agentRuntime = await initAgentRuntimeWithUserPayload(provider, jwtPayload);
13
+ const agentRuntime = await initModelRuntimeWithUserPayload(provider, jwtPayload);
14
14
 
15
15
  const data = (await req.json()) as PullModelParams;
16
16
 
@@ -2,7 +2,7 @@ import { NextResponse } from 'next/server';
2
2
 
3
3
  import { checkAuth } from '@/app/(backend)/middleware/auth';
4
4
  import { ChatCompletionErrorPayload, ModelProvider } from '@/libs/model-runtime';
5
- import { initAgentRuntimeWithUserPayload } from '@/server/modules/AgentRuntime';
5
+ import { initModelRuntimeWithUserPayload } from '@/server/modules/ModelRuntime';
6
6
  import { ChatErrorType } from '@/types/fetch';
7
7
  import { createErrorResponse } from '@/utils/errorResponse';
8
8
 
@@ -16,7 +16,7 @@ export const GET = checkAuth(async (req, { params, jwtPayload }) => {
16
16
  try {
17
17
  const hasDefaultApiKey = jwtPayload.apiKey || 'dont-need-api-key-for-model-list';
18
18
 
19
- const agentRuntime = await initAgentRuntimeWithUserPayload(provider, {
19
+ const agentRuntime = await initModelRuntimeWithUserPayload(provider, {
20
20
  ...jwtPayload,
21
21
  apiKey: noNeedAPIKey(provider) ? hasDefaultApiKey : jwtPayload.apiKey,
22
22
  });
@@ -3,7 +3,7 @@ import { NextResponse } from 'next/server';
3
3
  import { checkAuth } from '@/app/(backend)/middleware/auth';
4
4
  import { ChatCompletionErrorPayload } from '@/libs/model-runtime';
5
5
  import { TextToImagePayload } from '@/libs/model-runtime/types';
6
- import { initAgentRuntimeWithUserPayload } from '@/server/modules/AgentRuntime';
6
+ import { initModelRuntimeWithUserPayload } from '@/server/modules/ModelRuntime';
7
7
  import { ChatErrorType } from '@/types/fetch';
8
8
  import { createErrorResponse } from '@/utils/errorResponse';
9
9
 
@@ -52,7 +52,7 @@ export const POST = checkAuth(async (req: Request, { params, jwtPayload }) => {
52
52
 
53
53
  try {
54
54
  // ============ 1. init chat model ============ //
55
- const agentRuntime = await initAgentRuntimeWithUserPayload(provider, jwtPayload);
55
+ const agentRuntime = await initModelRuntimeWithUserPayload(provider, jwtPayload);
56
56
 
57
57
  // ============ 2. create chat completion ============ //
58
58
 
@@ -48,10 +48,10 @@ const useProviderCard = (): ProviderItem => {
48
48
  ),
49
49
  desc: (
50
50
  <Markdown className={styles.markdown} fontSize={12} variant={'chat'}>
51
- {t(`github.personalAccessToken.desc`)}
51
+ {t('github.personalAccessToken.desc')}
52
52
  </Markdown>
53
53
  ),
54
- label: t(`github.personalAccessToken.title`),
54
+ label: t('github.personalAccessToken.title'),
55
55
  name: [KeyVaultsConfigKey, LLMProviderApiTokenKey],
56
56
  },
57
57
  ],
@@ -97,6 +97,7 @@ const useStyles = createStyles(({ css, prefixCls, responsive, token }) => ({
97
97
 
98
98
  export interface ProviderConfigProps extends Omit<AiProviderDetailItem, 'enabled' | 'source'> {
99
99
  apiKeyItems?: FormItemProps[];
100
+ apiKeyUrl?: string;
100
101
  canDeactivate?: boolean;
101
102
  checkErrorRender?: CheckErrorRender;
102
103
  className?: string;
@@ -127,6 +128,7 @@ const ProviderConfig = memo<ProviderConfigProps>(
127
128
  showAceGcm = true,
128
129
  extra,
129
130
  source = AiProviderSourceEnum.Builtin,
131
+ apiKeyUrl,
130
132
  }) => {
131
133
  const {
132
134
  proxyUrl,
@@ -184,7 +186,7 @@ const ProviderConfig = memo<ProviderConfigProps>(
184
186
  ) : (
185
187
  <FormPassword
186
188
  autoComplete={'new-password'}
187
- placeholder={t(`providerModels.config.apiKey.placeholder`, { name })}
189
+ placeholder={t('providerModels.config.apiKey.placeholder', { name })}
188
190
  suffix={
189
191
  configUpdating && (
190
192
  <Icon icon={Loader2Icon} spin style={{ color: theme.colorTextTertiary }} />
@@ -192,7 +194,20 @@ const ProviderConfig = memo<ProviderConfigProps>(
192
194
  }
193
195
  />
194
196
  ),
195
- desc: t(`providerModels.config.apiKey.desc`, { name }),
197
+ desc: apiKeyUrl ? (
198
+ <Trans
199
+ i18nKey="providerModels.config.apiKey.descWithUrl"
200
+ ns={'modelProvider'}
201
+ value={{ name }}
202
+ >
203
+ 请填写你的 {{ name }} API Key,
204
+ <Link href={apiKeyUrl} target={'_blank'}>
205
+ 点此获取
206
+ </Link>
207
+ </Trans>
208
+ ) : (
209
+ t(`providerModels.config.apiKey.desc`, { name })
210
+ ),
196
211
  label: t(`providerModels.config.apiKey.title`),
197
212
  name: [KeyVaultsConfigKey, LLMProviderApiTokenKey],
198
213
  },
@@ -0,0 +1,164 @@
1
+ import { AIChatModelCard } from '@/types/aiModel';
2
+
3
+ const aihubmixModels: AIChatModelCard[] = [
4
+ {
5
+ abilities: {
6
+ functionCall: true,
7
+ reasoning: true,
8
+ },
9
+ contextWindowTokens: 65_536,
10
+ description: 'DeepSeek R1 推理模型,具有强大的推理能力',
11
+ displayName: 'DeepSeek R1',
12
+ enabled: true,
13
+ id: 'DeepSeek-R1',
14
+ type: 'chat',
15
+ },
16
+ {
17
+ abilities: {
18
+ functionCall: true,
19
+ reasoning: true,
20
+ vision: true,
21
+ },
22
+ contextWindowTokens: 200_000,
23
+ description:
24
+ 'Claude Opus 4 是 Anthropic 迄今为止最强大的模型,专为处理复杂、长时间运行的任务而设计。',
25
+ displayName: 'Claude Opus 4',
26
+ enabled: true,
27
+ id: 'claude-opus-4-20250514',
28
+ type: 'chat',
29
+ },
30
+ {
31
+ abilities: {
32
+ functionCall: true,
33
+ reasoning: true,
34
+ vision: true,
35
+ },
36
+ contextWindowTokens: 200_000,
37
+ description:
38
+ 'Claude Sonnet 4 是一款高效且性价比高的模型,作为 Claude Sonnet 3.7 的升级版,适合日常任务和中等复杂度的应用。',
39
+ displayName: 'Claude Sonnet 4',
40
+ enabled: true,
41
+ id: 'claude-sonnet-4-20250514',
42
+ type: 'chat',
43
+ },
44
+ {
45
+ abilities: {
46
+ functionCall: true,
47
+ reasoning: true,
48
+ vision: true,
49
+ },
50
+ contextWindowTokens: 200_000,
51
+ description: 'OpenAI o3 推理模型,具有强大的推理能力',
52
+ displayName: 'o3',
53
+ enabled: true,
54
+ id: 'o3',
55
+ type: 'chat',
56
+ },
57
+ {
58
+ abilities: {
59
+ functionCall: true,
60
+ reasoning: true,
61
+ vision: true,
62
+ },
63
+ contextWindowTokens: 200_000,
64
+ description: 'OpenAI o4-mini 小型推理模型,高效且经济',
65
+ displayName: 'o4-mini',
66
+ enabled: true,
67
+ id: 'o4-mini',
68
+ type: 'chat',
69
+ },
70
+ {
71
+ abilities: {
72
+ functionCall: true,
73
+ vision: true,
74
+ },
75
+ contextWindowTokens: 1_047_576,
76
+ description: 'GPT-4.1 旗舰模型,适用于复杂任务',
77
+ displayName: 'GPT-4.1',
78
+ enabled: true,
79
+ id: 'gpt-4.1',
80
+ type: 'chat',
81
+ },
82
+ {
83
+ abilities: {
84
+ functionCall: true,
85
+ vision: true,
86
+ },
87
+ contextWindowTokens: 1_047_576,
88
+ description: 'GPT-4.1 mini 平衡智能、速度和成本',
89
+ displayName: 'GPT-4.1 mini',
90
+ enabled: true,
91
+ id: 'gpt-4.1-mini',
92
+ type: 'chat',
93
+ },
94
+ {
95
+ abilities: {
96
+ functionCall: true,
97
+ reasoning: true,
98
+ search: true,
99
+ vision: true,
100
+ },
101
+ contextWindowTokens: 1_048_576 + 65_536,
102
+ description:
103
+ 'Gemini 2.5 Pro 是 Google 最先进的思维模型,能够对代码、数学和STEM领域的复杂问题进行推理,以及使用长上下文分析大型数据集、代码库和文档。',
104
+ displayName: 'Gemini 2.5 Pro',
105
+ enabled: true,
106
+ id: 'gemini-2.5-pro',
107
+ maxOutput: 65_536,
108
+ pricing: {
109
+ input: 1.25, // prompts <= 200k tokens
110
+ output: 10, // prompts <= 200k tokens
111
+ },
112
+ releasedAt: '2025-06-17',
113
+ settings: {
114
+ extendParams: ['thinkingBudget'],
115
+ searchImpl: 'params',
116
+ searchProvider: 'google',
117
+ },
118
+ type: 'chat',
119
+ },
120
+ {
121
+ abilities: {
122
+ functionCall: true,
123
+ reasoning: true,
124
+ search: true,
125
+ vision: true,
126
+ },
127
+ contextWindowTokens: 1_000_000,
128
+ description: 'Gemini 2.5 Flash 预览版,快速高效的多模态模型',
129
+ displayName: 'Gemini 2.5 Flash',
130
+ enabled: true,
131
+ id: 'gemini-2.5-flash',
132
+ releasedAt: '2025-06-17',
133
+ settings: {
134
+ extendParams: ['thinkingBudget'],
135
+ searchImpl: 'params',
136
+ searchProvider: 'google',
137
+ },
138
+ type: 'chat',
139
+ },
140
+ {
141
+ abilities: {
142
+ functionCall: true,
143
+ },
144
+ contextWindowTokens: 235_000,
145
+ description: 'Qwen3 235B 大型语言模型',
146
+ displayName: 'Qwen3 235B',
147
+ enabled: true,
148
+ id: 'Qwen/Qwen3-235B-A22B',
149
+ type: 'chat',
150
+ },
151
+ {
152
+ abilities: {
153
+ functionCall: true,
154
+ },
155
+ contextWindowTokens: 32_000,
156
+ description: 'Qwen3 32B 中型语言模型',
157
+ displayName: 'Qwen3 32B',
158
+ enabled: true,
159
+ id: 'Qwen/Qwen3-32B',
160
+ type: 'chat',
161
+ },
162
+ ];
163
+
164
+ export default aihubmixModels;
@@ -2,6 +2,7 @@ import { AiFullModelCard, LobeDefaultAiModelListItem } from '@/types/aiModel';
2
2
 
3
3
  import { default as ai21 } from './ai21';
4
4
  import { default as ai360 } from './ai360';
5
+ import { default as aihubmix } from './aihubmix';
5
6
  import { default as anthropic } from './anthropic';
6
7
  import { default as azure } from './azure';
7
8
  import { default as azureai } from './azureai';
@@ -78,6 +79,7 @@ const buildDefaultModelList = (map: ModelsMap): LobeDefaultAiModelListItem[] =>
78
79
  export const LOBE_DEFAULT_MODEL_LIST = buildDefaultModelList({
79
80
  ai21,
80
81
  ai360,
82
+ aihubmix,
81
83
  anthropic,
82
84
  azure,
83
85
  azureai,
@@ -135,6 +137,7 @@ export const LOBE_DEFAULT_MODEL_LIST = buildDefaultModelList({
135
137
 
136
138
  export { default as ai21 } from './ai21';
137
139
  export { default as ai360 } from './ai360';
140
+ export { default as aihubmix } from './aihubmix';
138
141
  export { default as anthropic } from './anthropic';
139
142
  export { default as azure } from './azure';
140
143
  export { default as azureai } from './azureai';
package/src/config/llm.ts CHANGED
@@ -171,6 +171,9 @@ export const getLLMConfig = () => {
171
171
 
172
172
  ENABLED_V0: z.boolean(),
173
173
  V0_API_KEY: z.string().optional(),
174
+
175
+ ENABLED_AIHUBMIX: z.boolean(),
176
+ AIHUBMIX_API_KEY: z.string().optional(),
174
177
  },
175
178
  runtimeEnv: {
176
179
  API_KEY_SELECT_MODE: process.env.API_KEY_SELECT_MODE,
@@ -340,6 +343,9 @@ export const getLLMConfig = () => {
340
343
 
341
344
  ENABLED_V0: !!process.env.V0_API_KEY,
342
345
  V0_API_KEY: process.env.V0_API_KEY,
346
+
347
+ ENABLED_AIHUBMIX: !!process.env.AIHUBMIX_API_KEY,
348
+ AIHUBMIX_API_KEY: process.env.AIHUBMIX_API_KEY,
343
349
  },
344
350
  });
345
351
  };
@@ -0,0 +1,18 @@
1
+ import { ModelProviderCard } from '@/types/llm';
2
+
3
+ const AiHubMix: ModelProviderCard = {
4
+ apiKeyUrl: 'https://lobe.li/9mZhb4T',
5
+ chatModels: [],
6
+ checkModel: 'gpt-4.1-mini',
7
+ description: 'AiHubMix 通过统一的 API 接口提供对多种 AI 模型的访问。',
8
+ id: 'aihubmix',
9
+ modelsUrl: 'https://docs.aihubmix.com/cn/api/Model-List',
10
+ name: 'AiHubMix',
11
+ settings: {
12
+ sdkType: 'router',
13
+ showModelFetcher: true,
14
+ },
15
+ url: 'https://aihubmix.com?utm_source=lobehub',
16
+ };
17
+
18
+ export default AiHubMix;
@@ -1,6 +1,7 @@
1
1
  import { ModelProviderCard } from '@/types/llm';
2
2
 
3
3
  const HuggingFace: ModelProviderCard = {
4
+ apiKeyUrl: 'https://huggingface.co/settings/tokens',
4
5
  chatModels: [
5
6
  {
6
7
  contextWindowTokens: 32_768,
@@ -2,6 +2,7 @@ import { ChatModelCard, ModelProviderCard } from '@/types/llm';
2
2
 
3
3
  import Ai21Provider from './ai21';
4
4
  import Ai360Provider from './ai360';
5
+ import AiHubMixProvider from './aihubmix';
5
6
  import AnthropicProvider from './anthropic';
6
7
  import AzureProvider from './azure';
7
8
  import AzureAIProvider from './azureai';
@@ -94,6 +95,7 @@ export const LOBE_DEFAULT_MODEL_LIST: ChatModelCard[] = [
94
95
  TaichuProvider.chatModels,
95
96
  CloudflareProvider.chatModels,
96
97
  Ai360Provider.chatModels,
98
+ AiHubMixProvider.chatModels,
97
99
  SiliconCloudProvider.chatModels,
98
100
  GiteeAIProvider.chatModels,
99
101
  UpstageProvider.chatModels,
@@ -163,6 +165,7 @@ export const DEFAULT_MODEL_PROVIDER_LIST = [
163
165
  GiteeAIProvider,
164
166
  TaichuProvider,
165
167
  Ai360Provider,
168
+ AiHubMixProvider,
166
169
  Search1APIProvider,
167
170
  InfiniAIProvider,
168
171
  QiniuProvider,
@@ -179,6 +182,7 @@ export const isProviderDisableBrowserRequest = (id: string) => {
179
182
 
180
183
  export { default as Ai21ProviderCard } from './ai21';
181
184
  export { default as Ai360ProviderCard } from './ai360';
185
+ export { default as AiHubMixProviderCard } from './aihubmix';
182
186
  export { default as AnthropicProviderCard } from './anthropic';
183
187
  export { default as AzureProviderCard } from './azure';
184
188
  export { default as AzureAIProviderCard } from './azureai';
@@ -6,10 +6,10 @@ import { beforeEach, describe, expect, it, vi } from 'vitest';
6
6
  import * as langfuseCfg from '@/config/langfuse';
7
7
  import { ClientSecretPayload } from '@/const/auth';
8
8
  import { TraceNameMap } from '@/const/trace';
9
- import { AgentRuntime, ChatStreamPayload, LobeOpenAI, ModelProvider } from '@/libs/model-runtime';
9
+ import { ChatStreamPayload, LobeOpenAI, ModelProvider, ModelRuntime } from '@/libs/model-runtime';
10
10
  import { providerRuntimeMap } from '@/libs/model-runtime/runtimeMap';
11
11
  import { CreateImagePayload } from '@/libs/model-runtime/types/image';
12
- import { createTraceOptions } from '@/server/modules/AgentRuntime';
12
+ import { createTraceOptions } from '@/server/modules/ModelRuntime';
13
13
 
14
14
  import { AgentChatOptions } from './ModelRuntime';
15
15
 
@@ -52,7 +52,7 @@ const testRuntime = (providerId: string, payload?: any) => {
52
52
  describe(`${providerId} provider runtime`, () => {
53
53
  it('should initialize correctly', async () => {
54
54
  const jwtPayload: ClientSecretPayload = { apiKey: 'user-key', ...payload };
55
- const runtime = await AgentRuntime.initializeWithProvider(providerId, jwtPayload);
55
+ const runtime = await ModelRuntime.initializeWithProvider(providerId, jwtPayload);
56
56
 
57
57
  // @ts-ignore
58
58
  expect(runtime['_runtime']).toBeInstanceOf(providerRuntimeMap[providerId]);
@@ -64,16 +64,15 @@ const testRuntime = (providerId: string, payload?: any) => {
64
64
  });
65
65
  };
66
66
 
67
- let mockModelRuntime: AgentRuntime;
67
+ let mockModelRuntime: ModelRuntime;
68
68
  beforeEach(async () => {
69
69
  const jwtPayload: ClientSecretPayload = { apiKey: 'user-openai-key', baseURL: 'user-endpoint' };
70
- mockModelRuntime = await AgentRuntime.initializeWithProvider(ModelProvider.OpenAI, jwtPayload);
70
+ mockModelRuntime = await ModelRuntime.initializeWithProvider(ModelProvider.OpenAI, jwtPayload);
71
71
  });
72
72
 
73
- describe('AgentRuntime', () => {
73
+ describe('ModelRuntime', () => {
74
74
  describe('should initialize with various providers', () => {
75
75
  const providers = Object.values(ModelProvider).filter((i) => i !== 'lobehub');
76
-
77
76
  const specialProviderIds = [ModelProvider.VertexAI, ...specialProviders.map((p) => p.id)];
78
77
 
79
78
  const generalTestProviders = providers.filter(
@@ -87,7 +86,7 @@ describe('AgentRuntime', () => {
87
86
  specialProviders.forEach(({ id, payload }) => testRuntime(id, payload));
88
87
  });
89
88
 
90
- describe('AgentRuntime chat method', () => {
89
+ describe('ModelRuntime chat method', () => {
91
90
  it('should run correctly', async () => {
92
91
  const payload: ChatStreamPayload = {
93
92
  messages: [{ role: 'user', content: 'Hello, world!' }],
@@ -243,7 +242,7 @@ describe('AgentRuntime', () => {
243
242
  });
244
243
  });
245
244
 
246
- describe('AgentRuntime createImage method', () => {
245
+ describe('ModelRuntime createImage method', () => {
247
246
  it('should run correctly', async () => {
248
247
  const payload: CreateImagePayload = {
249
248
  model: 'dall-e-3',
@@ -292,7 +291,7 @@ describe('AgentRuntime', () => {
292
291
  });
293
292
  });
294
293
 
295
- describe('AgentRuntime models method', () => {
294
+ describe('ModelRuntime models method', () => {
296
295
  it('should run correctly', async () => {
297
296
  const mockModels = [
298
297
  { id: 'gpt-4', name: 'GPT-4' },
@@ -25,7 +25,7 @@ export interface AgentChatOptions {
25
25
  trace?: TracePayload;
26
26
  }
27
27
 
28
- class ModelRuntime {
28
+ export class ModelRuntime {
29
29
  private _runtime: LobeRuntimeAI;
30
30
 
31
31
  constructor(runtime: LobeRuntimeAI) {
@@ -113,10 +113,9 @@ class ModelRuntime {
113
113
  ) {
114
114
  // @ts-expect-error runtime map not include vertex so it will be undefined
115
115
  const providerAI = providerRuntimeMap[provider] ?? LobeOpenAI;
116
+
116
117
  const runtimeModel: LobeRuntimeAI = new providerAI(params);
117
118
 
118
119
  return new ModelRuntime(runtimeModel);
119
120
  }
120
121
  }
121
-
122
- export default ModelRuntime;
@@ -0,0 +1,15 @@
1
+ import { LobeAnthropicAI } from '../anthropic';
2
+ import { LobeAzureAI } from '../azureai';
3
+ import { LobeCloudflareAI } from '../cloudflare';
4
+ import { LobeFalAI } from '../fal';
5
+ import { LobeGoogleAI } from '../google';
6
+ import { LobeOpenAI } from '../openai';
7
+
8
+ export const baseRuntimeMap = {
9
+ anthropic: LobeAnthropicAI,
10
+ azure: LobeAzureAI,
11
+ cloudflare: LobeCloudflareAI,
12
+ fal: LobeFalAI,
13
+ google: LobeGoogleAI,
14
+ openai: LobeOpenAI,
15
+ };