@lobehub/chat 1.106.7 → 1.107.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (87) hide show
  1. package/.env.example +9 -0
  2. package/CHANGELOG.md +50 -0
  3. package/Dockerfile +2 -0
  4. package/Dockerfile.database +2 -0
  5. package/Dockerfile.pglite +2 -0
  6. package/changelog/v1.json +18 -0
  7. package/docs/usage/providers/aihubmix.zh-CN.mdx +101 -0
  8. package/locales/ar/modelProvider.json +1 -0
  9. package/locales/ar/providers.json +3 -0
  10. package/locales/bg-BG/modelProvider.json +1 -0
  11. package/locales/bg-BG/providers.json +3 -0
  12. package/locales/de-DE/modelProvider.json +1 -0
  13. package/locales/de-DE/providers.json +3 -0
  14. package/locales/en-US/modelProvider.json +1 -0
  15. package/locales/en-US/providers.json +3 -0
  16. package/locales/es-ES/modelProvider.json +1 -0
  17. package/locales/es-ES/providers.json +3 -0
  18. package/locales/fa-IR/modelProvider.json +1 -0
  19. package/locales/fa-IR/providers.json +3 -0
  20. package/locales/fr-FR/modelProvider.json +1 -0
  21. package/locales/fr-FR/providers.json +3 -0
  22. package/locales/it-IT/modelProvider.json +1 -0
  23. package/locales/it-IT/providers.json +3 -0
  24. package/locales/ja-JP/modelProvider.json +1 -0
  25. package/locales/ja-JP/providers.json +3 -0
  26. package/locales/ko-KR/modelProvider.json +1 -0
  27. package/locales/ko-KR/providers.json +3 -0
  28. package/locales/nl-NL/modelProvider.json +1 -0
  29. package/locales/nl-NL/providers.json +3 -0
  30. package/locales/pl-PL/modelProvider.json +1 -0
  31. package/locales/pl-PL/providers.json +3 -0
  32. package/locales/pt-BR/modelProvider.json +1 -0
  33. package/locales/pt-BR/providers.json +3 -0
  34. package/locales/ru-RU/modelProvider.json +1 -0
  35. package/locales/ru-RU/providers.json +3 -0
  36. package/locales/tr-TR/modelProvider.json +1 -0
  37. package/locales/tr-TR/providers.json +3 -0
  38. package/locales/vi-VN/modelProvider.json +1 -0
  39. package/locales/vi-VN/providers.json +3 -0
  40. package/locales/zh-CN/modelProvider.json +1 -0
  41. package/locales/zh-CN/providers.json +3 -0
  42. package/locales/zh-TW/modelProvider.json +1 -0
  43. package/locales/zh-TW/providers.json +3 -0
  44. package/package.json +1 -2
  45. package/src/app/(backend)/middleware/auth/index.ts +2 -2
  46. package/src/app/(backend)/webapi/chat/[provider]/route.test.ts +12 -12
  47. package/src/app/(backend)/webapi/chat/[provider]/route.ts +6 -6
  48. package/src/app/(backend)/webapi/chat/vertexai/route.ts +2 -2
  49. package/src/app/(backend)/webapi/models/[provider]/pull/route.ts +2 -2
  50. package/src/app/(backend)/webapi/models/[provider]/route.ts +2 -2
  51. package/src/app/(backend)/webapi/text-to-image/[provider]/route.ts +2 -2
  52. package/src/app/[variants]/(main)/settings/provider/(detail)/github/page.tsx +2 -2
  53. package/src/app/[variants]/(main)/settings/provider/features/ProviderConfig/index.tsx +17 -2
  54. package/src/config/aiModels/aihubmix.ts +164 -0
  55. package/src/config/aiModels/index.ts +3 -0
  56. package/src/config/aiModels/sensenova.ts +82 -3
  57. package/src/config/llm.ts +6 -0
  58. package/src/config/modelProviders/aihubmix.ts +18 -0
  59. package/src/config/modelProviders/huggingface.ts +1 -0
  60. package/src/config/modelProviders/index.ts +4 -0
  61. package/src/libs/model-runtime/ModelRuntime.test.ts +9 -10
  62. package/src/libs/model-runtime/ModelRuntime.ts +2 -3
  63. package/src/libs/model-runtime/RouterRuntime/baseRuntimeMap.ts +15 -0
  64. package/src/libs/model-runtime/RouterRuntime/createRuntime.ts +193 -0
  65. package/src/libs/model-runtime/RouterRuntime/index.ts +9 -0
  66. package/src/libs/model-runtime/aihubmix/index.ts +118 -0
  67. package/src/libs/model-runtime/index.ts +1 -1
  68. package/src/libs/model-runtime/openrouter/index.ts +2 -2
  69. package/src/libs/model-runtime/runtimeMap.ts +2 -0
  70. package/src/libs/model-runtime/sensenova/index.ts +4 -1
  71. package/src/libs/model-runtime/types/type.ts +1 -0
  72. package/src/locales/default/modelProvider.ts +1 -0
  73. package/src/server/modules/{AgentRuntime → ModelRuntime}/index.test.ts +64 -67
  74. package/src/server/modules/{AgentRuntime → ModelRuntime}/index.ts +3 -3
  75. package/src/server/routers/async/file.ts +2 -2
  76. package/src/server/routers/async/image.ts +2 -2
  77. package/src/server/routers/async/ragEval.ts +2 -2
  78. package/src/server/routers/lambda/chunk.ts +3 -3
  79. package/src/services/__tests__/chat.test.ts +21 -21
  80. package/src/services/chat.ts +2 -2
  81. package/src/types/aiProvider.ts +1 -0
  82. package/src/types/llm.ts +4 -0
  83. package/src/types/user/settings/keyVaults.ts +1 -0
  84. package/src/app/[variants]/(main)/settings/provider/(detail)/huggingface/page.tsx +0 -67
  85. /package/src/server/modules/{AgentRuntime → ModelRuntime}/apiKeyManager.test.ts +0 -0
  86. /package/src/server/modules/{AgentRuntime → ModelRuntime}/apiKeyManager.ts +0 -0
  87. /package/src/server/modules/{AgentRuntime → ModelRuntime}/trace.ts +0 -0
@@ -2,6 +2,7 @@ import { ChatModelCard, ModelProviderCard } from '@/types/llm';
2
2
 
3
3
  import Ai21Provider from './ai21';
4
4
  import Ai360Provider from './ai360';
5
+ import AiHubMixProvider from './aihubmix';
5
6
  import AnthropicProvider from './anthropic';
6
7
  import AzureProvider from './azure';
7
8
  import AzureAIProvider from './azureai';
@@ -94,6 +95,7 @@ export const LOBE_DEFAULT_MODEL_LIST: ChatModelCard[] = [
94
95
  TaichuProvider.chatModels,
95
96
  CloudflareProvider.chatModels,
96
97
  Ai360Provider.chatModels,
98
+ AiHubMixProvider.chatModels,
97
99
  SiliconCloudProvider.chatModels,
98
100
  GiteeAIProvider.chatModels,
99
101
  UpstageProvider.chatModels,
@@ -163,6 +165,7 @@ export const DEFAULT_MODEL_PROVIDER_LIST = [
163
165
  GiteeAIProvider,
164
166
  TaichuProvider,
165
167
  Ai360Provider,
168
+ AiHubMixProvider,
166
169
  Search1APIProvider,
167
170
  InfiniAIProvider,
168
171
  QiniuProvider,
@@ -179,6 +182,7 @@ export const isProviderDisableBrowserRequest = (id: string) => {
179
182
 
180
183
  export { default as Ai21ProviderCard } from './ai21';
181
184
  export { default as Ai360ProviderCard } from './ai360';
185
+ export { default as AiHubMixProviderCard } from './aihubmix';
182
186
  export { default as AnthropicProviderCard } from './anthropic';
183
187
  export { default as AzureProviderCard } from './azure';
184
188
  export { default as AzureAIProviderCard } from './azureai';
@@ -6,10 +6,10 @@ import { beforeEach, describe, expect, it, vi } from 'vitest';
6
6
  import * as langfuseCfg from '@/config/langfuse';
7
7
  import { ClientSecretPayload } from '@/const/auth';
8
8
  import { TraceNameMap } from '@/const/trace';
9
- import { AgentRuntime, ChatStreamPayload, LobeOpenAI, ModelProvider } from '@/libs/model-runtime';
9
+ import { ChatStreamPayload, LobeOpenAI, ModelProvider, ModelRuntime } from '@/libs/model-runtime';
10
10
  import { providerRuntimeMap } from '@/libs/model-runtime/runtimeMap';
11
11
  import { CreateImagePayload } from '@/libs/model-runtime/types/image';
12
- import { createTraceOptions } from '@/server/modules/AgentRuntime';
12
+ import { createTraceOptions } from '@/server/modules/ModelRuntime';
13
13
 
14
14
  import { AgentChatOptions } from './ModelRuntime';
15
15
 
@@ -52,7 +52,7 @@ const testRuntime = (providerId: string, payload?: any) => {
52
52
  describe(`${providerId} provider runtime`, () => {
53
53
  it('should initialize correctly', async () => {
54
54
  const jwtPayload: ClientSecretPayload = { apiKey: 'user-key', ...payload };
55
- const runtime = await AgentRuntime.initializeWithProvider(providerId, jwtPayload);
55
+ const runtime = await ModelRuntime.initializeWithProvider(providerId, jwtPayload);
56
56
 
57
57
  // @ts-ignore
58
58
  expect(runtime['_runtime']).toBeInstanceOf(providerRuntimeMap[providerId]);
@@ -64,16 +64,15 @@ const testRuntime = (providerId: string, payload?: any) => {
64
64
  });
65
65
  };
66
66
 
67
- let mockModelRuntime: AgentRuntime;
67
+ let mockModelRuntime: ModelRuntime;
68
68
  beforeEach(async () => {
69
69
  const jwtPayload: ClientSecretPayload = { apiKey: 'user-openai-key', baseURL: 'user-endpoint' };
70
- mockModelRuntime = await AgentRuntime.initializeWithProvider(ModelProvider.OpenAI, jwtPayload);
70
+ mockModelRuntime = await ModelRuntime.initializeWithProvider(ModelProvider.OpenAI, jwtPayload);
71
71
  });
72
72
 
73
- describe('AgentRuntime', () => {
73
+ describe('ModelRuntime', () => {
74
74
  describe('should initialize with various providers', () => {
75
75
  const providers = Object.values(ModelProvider).filter((i) => i !== 'lobehub');
76
-
77
76
  const specialProviderIds = [ModelProvider.VertexAI, ...specialProviders.map((p) => p.id)];
78
77
 
79
78
  const generalTestProviders = providers.filter(
@@ -87,7 +86,7 @@ describe('AgentRuntime', () => {
87
86
  specialProviders.forEach(({ id, payload }) => testRuntime(id, payload));
88
87
  });
89
88
 
90
- describe('AgentRuntime chat method', () => {
89
+ describe('ModelRuntime chat method', () => {
91
90
  it('should run correctly', async () => {
92
91
  const payload: ChatStreamPayload = {
93
92
  messages: [{ role: 'user', content: 'Hello, world!' }],
@@ -243,7 +242,7 @@ describe('AgentRuntime', () => {
243
242
  });
244
243
  });
245
244
 
246
- describe('AgentRuntime createImage method', () => {
245
+ describe('ModelRuntime createImage method', () => {
247
246
  it('should run correctly', async () => {
248
247
  const payload: CreateImagePayload = {
249
248
  model: 'dall-e-3',
@@ -292,7 +291,7 @@ describe('AgentRuntime', () => {
292
291
  });
293
292
  });
294
293
 
295
- describe('AgentRuntime models method', () => {
294
+ describe('ModelRuntime models method', () => {
296
295
  it('should run correctly', async () => {
297
296
  const mockModels = [
298
297
  { id: 'gpt-4', name: 'GPT-4' },
@@ -25,7 +25,7 @@ export interface AgentChatOptions {
25
25
  trace?: TracePayload;
26
26
  }
27
27
 
28
- class ModelRuntime {
28
+ export class ModelRuntime {
29
29
  private _runtime: LobeRuntimeAI;
30
30
 
31
31
  constructor(runtime: LobeRuntimeAI) {
@@ -113,10 +113,9 @@ class ModelRuntime {
113
113
  ) {
114
114
  // @ts-expect-error runtime map not include vertex so it will be undefined
115
115
  const providerAI = providerRuntimeMap[provider] ?? LobeOpenAI;
116
+
116
117
  const runtimeModel: LobeRuntimeAI = new providerAI(params);
117
118
 
118
119
  return new ModelRuntime(runtimeModel);
119
120
  }
120
121
  }
121
-
122
- export default ModelRuntime;
@@ -0,0 +1,15 @@
1
+ import { LobeAnthropicAI } from '../anthropic';
2
+ import { LobeAzureAI } from '../azureai';
3
+ import { LobeCloudflareAI } from '../cloudflare';
4
+ import { LobeFalAI } from '../fal';
5
+ import { LobeGoogleAI } from '../google';
6
+ import { LobeOpenAI } from '../openai';
7
+
8
+ export const baseRuntimeMap = {
9
+ anthropic: LobeAnthropicAI,
10
+ azure: LobeAzureAI,
11
+ cloudflare: LobeCloudflareAI,
12
+ fal: LobeFalAI,
13
+ google: LobeGoogleAI,
14
+ openai: LobeOpenAI,
15
+ };
@@ -0,0 +1,193 @@
1
+ /**
2
+ * @see https://github.com/lobehub/lobe-chat/discussions/6563
3
+ */
4
+ import OpenAI, { ClientOptions } from 'openai';
5
+ import { Stream } from 'openai/streaming';
6
+
7
+ import { ILobeAgentRuntimeErrorType } from '@/libs/model-runtime';
8
+ import { CreateImagePayload, CreateImageResponse } from '@/libs/model-runtime/types/image';
9
+ import {
10
+ CreateImageOptions,
11
+ CustomClientOptions,
12
+ } from '@/libs/model-runtime/utils/openaiCompatibleFactory';
13
+ import type { ChatModelCard } from '@/types/llm';
14
+
15
+ import { LobeRuntimeAI } from '../BaseAI';
16
+ import { LobeOpenAI } from '../openai';
17
+ import {
18
+ type ChatCompletionErrorPayload,
19
+ ChatMethodOptions,
20
+ ChatStreamCallbacks,
21
+ ChatStreamPayload,
22
+ EmbeddingsOptions,
23
+ EmbeddingsPayload,
24
+ TextToImagePayload,
25
+ TextToSpeechPayload,
26
+ } from '../types';
27
+ import { baseRuntimeMap } from './baseRuntimeMap';
28
+
29
+ export interface RuntimeItem {
30
+ id: string;
31
+ models?: string[];
32
+ runtime: LobeRuntimeAI;
33
+ }
34
+
35
+ interface ProviderIniOptions extends Record<string, any> {
36
+ accessKeyId?: string;
37
+ accessKeySecret?: string;
38
+ apiKey?: string;
39
+ apiVersion?: string;
40
+ baseURL?: string;
41
+ baseURLOrAccountID?: string;
42
+ dangerouslyAllowBrowser?: boolean;
43
+ region?: string;
44
+ sessionToken?: string;
45
+ }
46
+
47
+ interface RouterInstance {
48
+ apiType: keyof typeof baseRuntimeMap;
49
+ models?: string[];
50
+ options: ProviderIniOptions;
51
+ runtime?: typeof LobeOpenAI;
52
+ }
53
+
54
+ type ConstructorOptions<T extends Record<string, any> = any> = ClientOptions & T;
55
+
56
+ interface CreateRouterRuntimeOptions<T extends Record<string, any> = any> {
57
+ apiKey?: string;
58
+ chatCompletion?: {
59
+ excludeUsage?: boolean;
60
+ handleError?: (
61
+ error: any,
62
+ options: ConstructorOptions<T>,
63
+ ) => Omit<ChatCompletionErrorPayload, 'provider'> | undefined;
64
+ handlePayload?: (
65
+ payload: ChatStreamPayload,
66
+ options: ConstructorOptions<T>,
67
+ ) => OpenAI.ChatCompletionCreateParamsStreaming;
68
+ handleStream?: (
69
+ stream: Stream<OpenAI.ChatCompletionChunk> | ReadableStream,
70
+ { callbacks, inputStartAt }: { callbacks?: ChatStreamCallbacks; inputStartAt?: number },
71
+ ) => ReadableStream;
72
+ handleStreamBizErrorType?: (error: {
73
+ message: string;
74
+ name: string;
75
+ }) => ILobeAgentRuntimeErrorType | undefined;
76
+ handleTransformResponseToStream?: (
77
+ data: OpenAI.ChatCompletion,
78
+ ) => ReadableStream<OpenAI.ChatCompletionChunk>;
79
+ noUserId?: boolean;
80
+ };
81
+ constructorOptions?: ConstructorOptions<T>;
82
+ createImage?: (
83
+ payload: CreateImagePayload,
84
+ options: CreateImageOptions,
85
+ ) => Promise<CreateImageResponse>;
86
+ customClient?: CustomClientOptions<T>;
87
+ debug?: {
88
+ chatCompletion: () => boolean;
89
+ responses?: () => boolean;
90
+ };
91
+ errorType?: {
92
+ bizError: ILobeAgentRuntimeErrorType;
93
+ invalidAPIKey: ILobeAgentRuntimeErrorType;
94
+ };
95
+ id: string;
96
+ models?:
97
+ | ((params: { client: OpenAI }) => Promise<ChatModelCard[]>)
98
+ | {
99
+ transformModel?: (model: OpenAI.Model) => ChatModelCard;
100
+ };
101
+ responses?: {
102
+ handlePayload?: (
103
+ payload: ChatStreamPayload,
104
+ options: ConstructorOptions<T>,
105
+ ) => ChatStreamPayload;
106
+ };
107
+ routers: RouterInstance[];
108
+ }
109
+
110
+ export const createRouterRuntime = ({
111
+ id,
112
+ routers,
113
+ apiKey: DEFAULT_API_LEY,
114
+ ...params
115
+ }: CreateRouterRuntimeOptions) => {
116
+ return class UniformRuntime implements LobeRuntimeAI {
117
+ private _runtimes: RuntimeItem[];
118
+ private _options: ClientOptions & Record<string, any>;
119
+
120
+ constructor(options: ClientOptions & Record<string, any> = {}) {
121
+ const _options = {
122
+ ...options,
123
+ apiKey: options.apiKey?.trim() || DEFAULT_API_LEY,
124
+ baseURL: options.baseURL?.trim(),
125
+ };
126
+
127
+ if (routers.length === 0) {
128
+ throw new Error('empty providers');
129
+ }
130
+
131
+ this._runtimes = routers.map((router) => {
132
+ const providerAI = router.runtime ?? baseRuntimeMap[router.apiType] ?? LobeOpenAI;
133
+
134
+ const finalOptions = { ...router.options, ...options };
135
+ // @ts-ignore
136
+ const runtime: LobeRuntimeAI = new providerAI({ ...params, ...finalOptions, id });
137
+
138
+ return { id: router.apiType, models: router.models, runtime };
139
+ });
140
+
141
+ this._options = _options;
142
+ }
143
+
144
+ // 检查下是否能匹配到特定模型,否则默认使用最后一个 runtime
145
+ getRuntimeByModel(model: string) {
146
+ const runtimeItem =
147
+ this._runtimes.find((runtime) => runtime.models && runtime.models.includes(model)) ||
148
+ this._runtimes.at(-1)!;
149
+
150
+ return runtimeItem.runtime;
151
+ }
152
+
153
+ async chat(payload: ChatStreamPayload, options?: ChatMethodOptions) {
154
+ try {
155
+ const runtime = this.getRuntimeByModel(payload.model);
156
+
157
+ return await runtime.chat!(payload, options);
158
+ } catch (e) {
159
+ if (this._options.chat?.handleError) {
160
+ const error = this._options.chat.handleError(e);
161
+
162
+ if (error) {
163
+ throw error;
164
+ }
165
+ }
166
+
167
+ throw e;
168
+ }
169
+ }
170
+
171
+ async textToImage(payload: TextToImagePayload) {
172
+ const runtime = this.getRuntimeByModel(payload.model);
173
+
174
+ return runtime.textToImage!(payload);
175
+ }
176
+
177
+ async models() {
178
+ return this._runtimes[0].runtime.models?.();
179
+ }
180
+
181
+ async embeddings(payload: EmbeddingsPayload, options?: EmbeddingsOptions) {
182
+ const runtime = this.getRuntimeByModel(payload.model);
183
+
184
+ return runtime.embeddings!(payload, options);
185
+ }
186
+
187
+ async textToSpeech(payload: TextToSpeechPayload, options?: EmbeddingsOptions) {
188
+ const runtime = this.getRuntimeByModel(payload.model);
189
+
190
+ return runtime.textToSpeech!(payload, options);
191
+ }
192
+ };
193
+ };
@@ -0,0 +1,9 @@
1
+ import { LobeRuntimeAI } from '../BaseAI';
2
+
3
+ export interface RuntimeItem {
4
+ id: string;
5
+ models?: string[];
6
+ runtime: LobeRuntimeAI;
7
+ }
8
+
9
+ export { createRouterRuntime } from './createRuntime';
@@ -0,0 +1,118 @@
1
+ import urlJoin from 'url-join';
2
+
3
+ import { LOBE_DEFAULT_MODEL_LIST } from '@/config/aiModels';
4
+ import AiHubMixModels from '@/config/aiModels/aihubmix';
5
+ import type { ChatModelCard } from '@/types/llm';
6
+
7
+ import { createRouterRuntime } from '../RouterRuntime';
8
+ import { ModelProvider } from '../types';
9
+
10
+ export interface AiHubMixModelCard {
11
+ created: number;
12
+ id: string;
13
+ object: string;
14
+ owned_by: string;
15
+ }
16
+
17
+ const baseURL = 'https://aihubmix.com';
18
+
19
+ export const LobeAiHubMixAI = createRouterRuntime({
20
+ constructorOptions: {
21
+ defaultHeaders: {
22
+ 'APP-Code': 'LobeHub',
23
+ },
24
+ },
25
+ debug: {
26
+ chatCompletion: () => process.env.DEBUG_AIHUBMIX_CHAT_COMPLETION === '1',
27
+ },
28
+ id: ModelProvider.AiHubMix,
29
+ models: async ({ client }) => {
30
+ const functionCallKeywords = [
31
+ 'gpt-4',
32
+ 'gpt-3.5',
33
+ 'claude',
34
+ 'gemini',
35
+ 'qwen',
36
+ 'deepseek',
37
+ 'llama',
38
+ ];
39
+
40
+ const visionKeywords = [
41
+ 'gpt-4o',
42
+ 'gpt-4-vision',
43
+ 'claude-3',
44
+ 'claude-4',
45
+ 'gemini-pro-vision',
46
+ 'qwen-vl',
47
+ 'llava',
48
+ ];
49
+
50
+ const reasoningKeywords = [
51
+ 'o1',
52
+ 'deepseek-r1',
53
+ 'qwq',
54
+ 'claude-opus-4',
55
+ 'claude-sonnet-4',
56
+ 'claude-3-5-sonnet',
57
+ 'claude-3-5-haiku',
58
+ ];
59
+
60
+ try {
61
+ const modelsPage = (await client.models.list()) as any;
62
+ const modelList: AiHubMixModelCard[] = modelsPage.data || [];
63
+
64
+ return modelList
65
+ .map((model) => {
66
+ const knownModel = AiHubMixModels.find(
67
+ (m) => model.id.toLowerCase() === m.id.toLowerCase(),
68
+ );
69
+
70
+ const modelId = model.id.toLowerCase();
71
+
72
+ return {
73
+ contextWindowTokens: knownModel?.contextWindowTokens ?? undefined,
74
+ displayName: knownModel?.displayName ?? model.id,
75
+ enabled: knownModel?.enabled || false,
76
+ functionCall:
77
+ functionCallKeywords.some((keyword) => modelId.includes(keyword)) ||
78
+ knownModel?.abilities?.functionCall ||
79
+ false,
80
+ id: model.id,
81
+ reasoning:
82
+ reasoningKeywords.some((keyword) => modelId.includes(keyword)) ||
83
+ knownModel?.abilities?.reasoning ||
84
+ false,
85
+ vision:
86
+ visionKeywords.some((keyword) => modelId.includes(keyword)) ||
87
+ knownModel?.abilities?.vision ||
88
+ false,
89
+ };
90
+ })
91
+ .filter(Boolean) as ChatModelCard[];
92
+ } catch (error) {
93
+ console.warn(
94
+ 'Failed to fetch AiHubMix models. Please ensure your AiHubMix API key is valid:',
95
+ error,
96
+ );
97
+ return [];
98
+ }
99
+ },
100
+ routers: [
101
+ {
102
+ apiType: 'anthropic',
103
+ models: LOBE_DEFAULT_MODEL_LIST.map((m) => m.id).filter(
104
+ (id) => id.startsWith('claude') || id.startsWith('kimi-k2'),
105
+ ),
106
+ options: { baseURL },
107
+ },
108
+ {
109
+ apiType: 'google',
110
+ models: LOBE_DEFAULT_MODEL_LIST.map((m) => m.id).filter((id) => id.startsWith('gemini')),
111
+ options: { baseURL: urlJoin(baseURL, '/gemini') },
112
+ },
113
+ {
114
+ apiType: 'openai',
115
+ options: { baseURL: urlJoin(baseURL, '/v1') },
116
+ },
117
+ ],
118
+ });
@@ -10,7 +10,7 @@ export { LobeGroq } from './groq';
10
10
  export * from './helpers';
11
11
  export { LobeMinimaxAI } from './minimax';
12
12
  export { LobeMistralAI } from './mistral';
13
- export { default as AgentRuntime } from './ModelRuntime';
13
+ export { ModelRuntime } from './ModelRuntime';
14
14
  export { LobeMoonshotAI } from './moonshot';
15
15
  export { LobeOllamaAI } from './ollama';
16
16
  export { LobeOpenAI } from './openai';
@@ -49,8 +49,8 @@ export const LobeOpenRouterAI = createOpenAICompatibleRuntime({
49
49
  },
50
50
  constructorOptions: {
51
51
  defaultHeaders: {
52
- 'HTTP-Referer': 'https://chat-preview.lobehub.com',
53
- 'X-Title': 'Lobe Chat',
52
+ 'HTTP-Referer': 'https://lobehub.com',
53
+ 'X-Title': 'LobeHub',
54
54
  },
55
55
  },
56
56
  debug: {
@@ -1,5 +1,6 @@
1
1
  import { LobeAi21AI } from './ai21';
2
2
  import { LobeAi360AI } from './ai360';
3
+ import { LobeAiHubMixAI } from './aihubmix';
3
4
  import { LobeAnthropicAI } from './anthropic';
4
5
  import { LobeAzureOpenAI } from './azureOpenai';
5
6
  import { LobeAzureAI } from './azureai';
@@ -56,6 +57,7 @@ import { LobeZhipuAI } from './zhipu';
56
57
  export const providerRuntimeMap = {
57
58
  ai21: LobeAi21AI,
58
59
  ai360: LobeAi360AI,
60
+ aihubmix: LobeAiHubMixAI,
59
61
  anthropic: LobeAnthropicAI,
60
62
  azure: LobeAzureOpenAI,
61
63
  azureai: LobeAzureAI,
@@ -12,7 +12,7 @@ export const LobeSenseNovaAI = createOpenAICompatibleRuntime({
12
12
  baseURL: 'https://api.sensenova.cn/compatible-mode/v1',
13
13
  chatCompletion: {
14
14
  handlePayload: (payload) => {
15
- const { frequency_penalty, max_tokens, messages, model, temperature, top_p, ...rest } =
15
+ const { frequency_penalty, max_tokens, messages, model, temperature, thinking, top_p, ...rest } =
16
16
  payload;
17
17
 
18
18
  return {
@@ -33,6 +33,9 @@ export const LobeSenseNovaAI = createOpenAICompatibleRuntime({
33
33
  temperature !== undefined && temperature > 0 && temperature <= 2
34
34
  ? temperature
35
35
  : undefined,
36
+ thinking: thinking
37
+ ? (model && model.includes('-V6-5-') && thinking.type === 'enabled' ? { enabled: true } : { enabled: false })
38
+ : undefined,
36
39
  top_p: top_p !== undefined && top_p > 0 && top_p < 1 ? top_p : undefined,
37
40
  } as any;
38
41
  },
@@ -30,6 +30,7 @@ export interface CreateChatCompletionOptions {
30
30
  export enum ModelProvider {
31
31
  Ai21 = 'ai21',
32
32
  Ai360 = 'ai360',
33
+ AiHubMix = 'aihubmix',
33
34
  Anthropic = 'anthropic',
34
35
  Azure = 'azure',
35
36
  AzureAI = 'azureai',
@@ -192,6 +192,7 @@ export default {
192
192
  aesGcm: '您的秘钥与代理地址等将使用 <1>AES-GCM</1> 加密算法进行加密',
193
193
  apiKey: {
194
194
  desc: '请填写你的 {{name}} API Key',
195
+ descWithUrl: '请填写你的 {{name}} API Key,<3>点此获取</3>',
195
196
  placeholder: '{{name}} API Key',
196
197
  title: 'API Key',
197
198
  },