@lobehub/chat 1.107.6 → 1.108.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/.env.example CHANGED
@@ -140,6 +140,11 @@ OPENAI_API_KEY=sk-xxxxxxxxx
140
140
 
141
141
  # INFINIAI_API_KEY=sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
142
142
 
143
+
144
+ ### 302.AI ###
145
+
146
+ # AI302_API_KEY=sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
147
+
143
148
  ### ModelScope ###
144
149
 
145
150
  # MODELSCOPE_API_KEY=sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
package/CHANGELOG.md CHANGED
@@ -2,6 +2,31 @@
2
2
 
3
3
  # Changelog
4
4
 
5
+ ## [Version 1.108.0](https://github.com/lobehub/lobe-chat/compare/v1.107.6...v1.108.0)
6
+
7
+ <sup>Released on **2025-08-05**</sup>
8
+
9
+ #### ✨ Features
10
+
11
+ - **misc**: Support 302ai provider.
12
+
13
+ <br/>
14
+
15
+ <details>
16
+ <summary><kbd>Improvements and Fixes</kbd></summary>
17
+
18
+ #### What's improved
19
+
20
+ - **misc**: Support 302ai provider, closes [#8362](https://github.com/lobehub/lobe-chat/issues/8362) ([e172055](https://github.com/lobehub/lobe-chat/commit/e172055))
21
+
22
+ </details>
23
+
24
+ <div align="right">
25
+
26
+ [![](https://img.shields.io/badge/-BACK_TO_TOP-151515?style=flat-square)](#readme-top)
27
+
28
+ </div>
29
+
5
30
  ### [Version 1.107.6](https://github.com/lobehub/lobe-chat/compare/v1.107.5...v1.107.6)
6
31
 
7
32
  <sup>Released on **2025-08-05**</sup>
package/Dockerfile CHANGED
@@ -248,6 +248,8 @@ ENV \
248
248
  TENCENT_CLOUD_API_KEY="" TENCENT_CLOUD_MODEL_LIST="" \
249
249
  # Infini-AI
250
250
  INFINIAI_API_KEY="" INFINIAI_MODEL_LIST=""
251
+ # 302.AI
252
+ AI302_API_KEY="" AI302_MODEL_LIST=""
251
253
 
252
254
  USER nextjs
253
255
 
@@ -289,7 +289,9 @@ ENV \
289
289
  # Tencent Cloud
290
290
  TENCENT_CLOUD_API_KEY="" TENCENT_CLOUD_MODEL_LIST="" \
291
291
  # Infini-AI
292
- INFINIAI_API_KEY="" INFINIAI_MODEL_LIST=""
292
+ INFINIAI_API_KEY="" INFINIAI_MODEL_LIST="" \
293
+ # 302.AI
294
+ AI302_API_KEY="" AI302_MODEL_LIST=""
293
295
 
294
296
  USER nextjs
295
297
 
package/Dockerfile.pglite CHANGED
@@ -245,7 +245,9 @@ ENV \
245
245
  # Tencent Cloud
246
246
  TENCENT_CLOUD_API_KEY="" TENCENT_CLOUD_MODEL_LIST="" \
247
247
  # Infini-AI
248
- INFINIAI_API_KEY="" INFINIAI_MODEL_LIST=""
248
+ INFINIAI_API_KEY="" INFINIAI_MODEL_LIST="" \
249
+ # 302.AI
250
+ AI302_API_KEY="" AI302_MODEL_LIST=""
249
251
 
250
252
  USER nextjs
251
253
 
package/changelog/v1.json CHANGED
@@ -1,4 +1,13 @@
1
1
  [
2
+ {
3
+ "children": {
4
+ "features": [
5
+ "Support 302ai provider."
6
+ ]
7
+ },
8
+ "date": "2025-08-05",
9
+ "version": "1.108.0"
10
+ },
2
11
  {
3
12
  "children": {
4
13
  "fixes": [
@@ -0,0 +1,45 @@
1
+ ---
2
+ title: Using 302.AI in LobeChat
3
+ description: Learn how to configure and use 302.AI's API Key in LobeChat to start conversations and interactions.
4
+ tags:
5
+ - LobeChat
6
+ - 302.AI
7
+ - API Key
8
+ - Web UI
9
+ ---
10
+
11
+ # Using 302.AI in LobeChat
12
+
13
+ <Image cover src={'https://file.302.ai/gpt/imgs/20250722/c7a6ee9959a8490fa00481dae0fbb339.jpg'} />
14
+
15
+ [302.AI](https://www.302.ai/) is a pay-as-you-go AI application platform that provides the most comprehensive AI APIs and AI online applications on the market.
16
+
17
+ This article will guide you on how to use 302.AI in LobeChat.
18
+
19
+ <Steps>
20
+ ### Step 1: Obtain [302.AI](https://www.302.ai/) API Key
21
+
22
+ - Click `Get Started`, register and log in to [302.AI](https://www.302.ai/)
23
+ - Click `API Keys` on the left side
24
+ - Click `Add API KEY`, copy and save the generated API key
25
+
26
+ <Image alt={'Get API Key'} inStep src={'https://file.302.ai/gpt/imgs/20250722/7a3597061d9a484ca7358867930a8316.jpg'} />
27
+
28
+ ### Step 2: Configure 302.AI in LobeChat
29
+
30
+ - Access LobeChat's `Settings` interface
31
+ - Find the `302.AI` configuration item under `Language Models`
32
+
33
+ <Image alt={'Enter API Key'} inStep src={'https://file.302.ai/gpt/imgs/20250722/b056ca4e63374668b7e3e093726fa6f0.jpg'} />
34
+
35
+ - Enter the obtained API key
36
+ - Select a 302.AI model for your AI assistant to start conversations
37
+
38
+ <Image alt={'Select 302.AI model and start conversation'} inStep src={'https://file.302.ai/gpt/imgs/20250722/c7a6ee9959a8490fa00481dae0fbb339.jpg'} />
39
+
40
+ <Callout type={'warning'}>
41
+ During usage, you may need to pay the API service provider. Please refer to 302.AI's relevant pricing policy.
42
+ </Callout>
43
+ </Steps>
44
+
45
+ Now you can use 302.AI's models for conversations in LobeChat.
@@ -0,0 +1,45 @@
1
+ ---
2
+ title: 在 LobeChat 中使用 302.AI
3
+ description: 学习如何在 LobeChat 中配置和使用 302.AI 的API Key,以便开始对话和交互。
4
+ tags:
5
+ - LobeChat
6
+ - 302.AI
7
+ - API密钥
8
+ - Web UI
9
+ ---
10
+
11
+ # 在 LobeChat 中使用 302.AI
12
+
13
+ <Image cover src={'https://file.302.ai/gpt/imgs/20250722/d346c796faa4443eb0bd4218f84205f6.jpg'} />
14
+
15
+ [302.AI](https://www.302.ai/) 是一个按需付费的 AI 应用平台,提供市面上最全的 AI API 和 AI 在线应用。
16
+
17
+ 本文将指导你如何在 LobeChat 中使用 302.AI。
18
+
19
+ <Steps>
20
+ ### 步骤一:获得 [302.AI](https://www.302.ai/) 的 API Key
21
+
22
+ - 点击 `开始使用`,注册并登录 [302.AI](https://www.302.ai/)
23
+ - 点击左侧的 `API Keys`
24
+ - 点击 `添加API KEY`,复制并保存生成的 API 密钥
25
+
26
+ <Image alt={'获取 API 密钥'} inStep src={'https://file.302.ai/gpt/imgs/20250722/01abd69fd61540489781fd963e504a04.jpg'} />
27
+
28
+ ### 步骤二:在 LobeChat 中配置 302.AI
29
+
30
+ - 访问 LobeChat 的`设置`界面
31
+ - 在`语言模型`下找到 `302.AI` 的设置项
32
+
33
+ <Image alt={'填入 API 密钥'} inStep src={'https://file.302.ai/gpt/imgs/20250722/5247463e74c742f79bef416bbb0722bf.jpg'} />
34
+
35
+ - 填入获得的 API 密钥
36
+ - 为你的 AI 助手选择一个 302.AI 的模型即可开始对话
37
+
38
+ <Image alt={'选择 302.AI 模型并开始对话'} inStep src={'https://file.302.ai/gpt/imgs/20250722/d346c796faa4443eb0bd4218f84205f6.jpg'} />
39
+
40
+ <Callout type={'warning'}>
41
+ 在使用过程中你可能需要向 API 服务提供商付费,请参考 302.AI 的相关费用政策。
42
+ </Callout>
43
+ </Steps>
44
+
45
+ 至此你已经可以在 LobeChat 中使用 302.AI 提供的模型进行对话了。
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@lobehub/chat",
3
- "version": "1.107.6",
3
+ "version": "1.108.0",
4
4
  "description": "Lobe Chat - an open-source, high-performance chatbot framework that supports speech synthesis, multimodal, and extensible Function Call plugin system. Supports one-click free deployment of your private ChatGPT/LLM web application.",
5
5
  "keywords": [
6
6
  "framework",
@@ -38,6 +38,7 @@ export interface SearchEngineKeyVaults {
38
38
 
39
39
  export interface UserKeyVaults extends SearchEngineKeyVaults {
40
40
  ai21?: OpenAICompatibleKeyVault;
41
+ ai302?: OpenAICompatibleKeyVault;
41
42
  ai360?: OpenAICompatibleKeyVault;
42
43
  aihubmix?: OpenAICompatibleKeyVault;
43
44
  anthropic?: OpenAICompatibleKeyVault;
@@ -2,6 +2,7 @@ import { useMemo } from 'react';
2
2
 
3
3
  import {
4
4
  Ai21ProviderCard,
5
+ Ai302ProviderCard,
5
6
  Ai360ProviderCard,
6
7
  AnthropicProviderCard,
7
8
  BaichuanProviderCard,
@@ -112,6 +113,7 @@ export const useProviderList = (): ProviderItem[] => {
112
113
  GiteeAIProviderCard,
113
114
  PPIOProviderCard,
114
115
  InfiniAIProviderCard,
116
+ Ai302ProviderCard,
115
117
  ],
116
118
  [
117
119
  AzureProvider,
@@ -0,0 +1,148 @@
1
+ import { AIChatModelCard } from '@/types/aiModel';
2
+
3
+ const ai302ChatModels: AIChatModelCard[] = [
4
+ {
5
+ contextWindowTokens: 32_000,
6
+ displayName: 'deepseek-chat',
7
+ enabled: true,
8
+ id: 'deepseek-chat',
9
+ type: 'chat',
10
+ },
11
+ {
12
+ contextWindowTokens: 128_000,
13
+ displayName: 'chatgpt-4o-latest',
14
+ enabled: true,
15
+ id: 'chatgpt-4o-latest',
16
+ type: 'chat',
17
+ },
18
+ {
19
+ contextWindowTokens: 128_000,
20
+ displayName: 'llama3.3-70b',
21
+ enabled: true,
22
+ id: 'llama3.3-70b',
23
+ type: 'chat',
24
+ },
25
+ {
26
+ contextWindowTokens: 64_000,
27
+ displayName: 'deepseek-reasoner',
28
+ enabled: true,
29
+ id: 'deepseek-reasoner',
30
+ type: 'chat',
31
+ },
32
+ {
33
+ contextWindowTokens: 1_000_000,
34
+ displayName: 'gemini-2.0-flash',
35
+ enabled: true,
36
+ id: 'gemini-2.0-flash',
37
+ type: 'chat',
38
+ },
39
+ {
40
+ contextWindowTokens: 200_000,
41
+ displayName: 'claude-3-7-sonnet-20250219',
42
+ enabled: true,
43
+ id: 'claude-3-7-sonnet-20250219',
44
+ type: 'chat',
45
+ },
46
+ {
47
+ contextWindowTokens: 200_000,
48
+ displayName: 'claude-3-7-sonnet-latest',
49
+ enabled: true,
50
+ id: 'claude-3-7-sonnet-latest',
51
+ type: 'chat',
52
+ },
53
+ {
54
+ contextWindowTokens: 131_072,
55
+ displayName: 'grok-3-beta',
56
+ enabled: true,
57
+ id: 'grok-3-beta',
58
+ type: 'chat',
59
+ },
60
+ {
61
+ contextWindowTokens: 131_072,
62
+ displayName: 'grok-3-mini-beta',
63
+ enabled: true,
64
+ id: 'grok-3-mini-beta',
65
+ type: 'chat',
66
+ },
67
+ {
68
+ contextWindowTokens: 1_000_000,
69
+ displayName: 'gpt-4.1',
70
+ enabled: true,
71
+ id: 'gpt-4.1',
72
+ type: 'chat',
73
+ },
74
+ {
75
+ contextWindowTokens: 200_000,
76
+ displayName: 'o3',
77
+ enabled: true,
78
+ id: 'o3',
79
+ type: 'chat',
80
+ },
81
+ {
82
+ contextWindowTokens: 200_000,
83
+ displayName: 'o4-mini',
84
+ enabled: true,
85
+ id: 'o4-mini',
86
+ type: 'chat',
87
+ },
88
+ {
89
+ contextWindowTokens: 128_000,
90
+ displayName: 'qwen3-235b-a22b',
91
+ enabled: true,
92
+ id: 'qwen3-235b-a22b',
93
+ type: 'chat',
94
+ },
95
+ {
96
+ contextWindowTokens: 128_000,
97
+ displayName: 'qwen3-32b',
98
+ enabled: true,
99
+ id: 'qwen3-32b',
100
+ type: 'chat',
101
+ },
102
+ {
103
+ contextWindowTokens: 1_000_000,
104
+ displayName: 'gemini-2.5-pro-preview-05-06',
105
+ enabled: true,
106
+ id: 'gemini-2.5-pro-preview-05-06',
107
+ type: 'chat',
108
+ },
109
+ {
110
+ contextWindowTokens: 128_000,
111
+ displayName: 'llama-4-maverick',
112
+ enabled: true,
113
+ id: 'llama-4-maverick',
114
+ type: 'chat',
115
+ },
116
+ {
117
+ contextWindowTokens: 1_000_000,
118
+ displayName: 'gemini-2.5-flash',
119
+ enabled: true,
120
+ id: 'gemini-2.5-flash',
121
+ type: 'chat',
122
+ },
123
+ {
124
+ contextWindowTokens: 200_000,
125
+ displayName: 'claude-sonnet-4-20250514',
126
+ enabled: true,
127
+ id: 'claude-sonnet-4-20250514',
128
+ type: 'chat',
129
+ },
130
+ {
131
+ contextWindowTokens: 200_000,
132
+ displayName: 'claude-opus-4-20250514',
133
+ enabled: true,
134
+ id: 'claude-opus-4-20250514',
135
+ type: 'chat',
136
+ },
137
+ {
138
+ contextWindowTokens: 1_000_000,
139
+ displayName: 'gemini-2.5-pro',
140
+ enabled: true,
141
+ id: 'gemini-2.5-pro',
142
+ type: 'chat',
143
+ },
144
+ ];
145
+
146
+ export const allModels = [...ai302ChatModels];
147
+
148
+ export default allModels;
@@ -1,6 +1,7 @@
1
1
  import { AiFullModelCard, LobeDefaultAiModelListItem } from '@/types/aiModel';
2
2
 
3
3
  import { default as ai21 } from './ai21';
4
+ import { default as ai302 } from './ai302';
4
5
  import { default as ai360 } from './ai360';
5
6
  import { default as aihubmix } from './aihubmix';
6
7
  import { default as anthropic } from './anthropic';
@@ -78,6 +79,7 @@ const buildDefaultModelList = (map: ModelsMap): LobeDefaultAiModelListItem[] =>
78
79
 
79
80
  export const LOBE_DEFAULT_MODEL_LIST = buildDefaultModelList({
80
81
  ai21,
82
+ ai302,
81
83
  ai360,
82
84
  aihubmix,
83
85
  anthropic,
@@ -136,6 +138,7 @@ export const LOBE_DEFAULT_MODEL_LIST = buildDefaultModelList({
136
138
  });
137
139
 
138
140
  export { default as ai21 } from './ai21';
141
+ export { default as ai302 } from './ai302';
139
142
  export { default as ai360 } from './ai360';
140
143
  export { default as aihubmix } from './aihubmix';
141
144
  export { default as anthropic } from './anthropic';
package/src/config/llm.ts CHANGED
@@ -172,6 +172,10 @@ export const getLLMConfig = () => {
172
172
  ENABLED_V0: z.boolean(),
173
173
  V0_API_KEY: z.string().optional(),
174
174
 
175
+
176
+ ENABLED_AI302: z.boolean(),
177
+ AI302_API_KEY: z.string().optional(),
178
+
175
179
  ENABLED_AIHUBMIX: z.boolean(),
176
180
  AIHUBMIX_API_KEY: z.string().optional(),
177
181
  },
@@ -344,6 +348,9 @@ export const getLLMConfig = () => {
344
348
  ENABLED_V0: !!process.env.V0_API_KEY,
345
349
  V0_API_KEY: process.env.V0_API_KEY,
346
350
 
351
+ ENABLED_AI302: !!process.env.AI302_API_KEY,
352
+ AI302_API_KEY: process.env.AI302_API_KEY,
353
+
347
354
  ENABLED_AIHUBMIX: !!process.env.AIHUBMIX_API_KEY,
348
355
  AIHUBMIX_API_KEY: process.env.AIHUBMIX_API_KEY,
349
356
  },
@@ -0,0 +1,149 @@
1
+ import { ModelProviderCard } from '@/types/llm';
2
+
3
+ // ref: https://302.ai/pricing/
4
+ const Ai302: ModelProviderCard = {
5
+ chatModels: [
6
+ {
7
+ contextWindowTokens: 32_000,
8
+ displayName: 'deepseek-chat',
9
+ enabled: true,
10
+ id: 'deepseek-chat',
11
+ },
12
+ {
13
+ contextWindowTokens: 128_000,
14
+ displayName: 'gpt-4o',
15
+ enabled: true,
16
+ id: 'gpt-4o',
17
+ },
18
+ {
19
+ contextWindowTokens: 128_000,
20
+ displayName: 'chatgpt-4o-latest',
21
+ enabled: true,
22
+ id: 'chatgpt-4o-latest',
23
+ },
24
+ {
25
+ contextWindowTokens: 128_000,
26
+ displayName: 'llama3.3-70b',
27
+ enabled: true,
28
+ id: 'llama3.3-70b',
29
+ },
30
+ {
31
+ contextWindowTokens: 64_000,
32
+ displayName: 'deepseek-reasoner',
33
+ enabled: true,
34
+ id: 'deepseek-reasoner',
35
+ },
36
+ {
37
+ contextWindowTokens: 1_000_000,
38
+ displayName: 'gemini-2.0-flash',
39
+ enabled: true,
40
+ id: 'gemini-2.0-flash',
41
+ },
42
+ {
43
+ contextWindowTokens: 200_000,
44
+ displayName: 'claude-3-7-sonnet-20250219',
45
+ enabled: true,
46
+ id: 'claude-3-7-sonnet-20250219',
47
+ },
48
+ {
49
+ contextWindowTokens: 200_000,
50
+ displayName: 'claude-3-7-sonnet-latest',
51
+ enabled: true,
52
+ id: 'claude-3-7-sonnet-latest',
53
+ },
54
+ {
55
+ contextWindowTokens: 131_072,
56
+ displayName: 'grok-3-beta',
57
+ enabled: true,
58
+ id: 'grok-3-beta',
59
+ },
60
+ {
61
+ contextWindowTokens: 131_072,
62
+ displayName: 'grok-3-mini-beta',
63
+ enabled: true,
64
+ id: 'grok-3-mini-beta',
65
+ },
66
+ {
67
+ contextWindowTokens: 1_000_000,
68
+ displayName: 'gpt-4.1',
69
+ enabled: true,
70
+ id: 'gpt-4.1',
71
+ },
72
+ {
73
+ contextWindowTokens: 200_000,
74
+ displayName: 'o3',
75
+ enabled: true,
76
+ id: 'o3',
77
+ },
78
+ {
79
+ contextWindowTokens: 200_000,
80
+ displayName: 'o4-mini',
81
+ enabled: true,
82
+ id: 'o4-mini',
83
+ },
84
+ {
85
+ contextWindowTokens: 128_000,
86
+ displayName: 'qwen3-235b-a22b',
87
+ enabled: true,
88
+ id: 'qwen3-235b-a22b',
89
+ },
90
+ {
91
+ contextWindowTokens: 128_000,
92
+ displayName: 'qwen3-32b',
93
+ enabled: true,
94
+ id: 'qwen3-32b',
95
+ },
96
+ {
97
+ contextWindowTokens: 1_000_000,
98
+ displayName: 'gemini-2.5-pro-preview-05-06',
99
+ enabled: true,
100
+ id: 'gemini-2.5-pro-preview-05-06',
101
+ },
102
+ {
103
+ contextWindowTokens: 128_000,
104
+ displayName: 'llama-4-maverick',
105
+ enabled: true,
106
+ id: 'llama-4-maverick',
107
+ },
108
+ {
109
+ contextWindowTokens: 1_000_000,
110
+ displayName: 'gemini-2.5-flash',
111
+ enabled: true,
112
+ id: 'gemini-2.5-flash',
113
+ },
114
+ {
115
+ contextWindowTokens: 200_000,
116
+ displayName: 'claude-sonnet-4-20250514',
117
+ enabled: true,
118
+ id: 'claude-sonnet-4-20250514',
119
+ },
120
+ {
121
+ contextWindowTokens: 200_000,
122
+ displayName: 'claude-opus-4-20250514',
123
+ enabled: true,
124
+ id: 'claude-opus-4-20250514',
125
+ },
126
+ {
127
+ contextWindowTokens: 1_000_000,
128
+ displayName: 'gemini-2.5-pro',
129
+ enabled: true,
130
+ id: 'gemini-2.5-pro',
131
+ },
132
+ ],
133
+ checkModel: 'gpt-4o',
134
+ description: '302.AI 是一个按需付费的 AI 应用平台,提供市面上最全的 AI API 和 AI 在线应用',
135
+ id: 'ai302',
136
+ modelList: { showModelFetcher: true },
137
+ modelsUrl: 'https://302.ai/pricing/',
138
+ name: '302.AI',
139
+ settings: {
140
+ proxyUrl: {
141
+ placeholder: 'https://api.302.ai/v1',
142
+ },
143
+ sdkType: 'openai',
144
+ showModelFetcher: true,
145
+ },
146
+ url: 'https://302.ai',
147
+ };
148
+
149
+ export default Ai302;
@@ -1,6 +1,7 @@
1
1
  import { ChatModelCard, ModelProviderCard } from '@/types/llm';
2
2
 
3
3
  import Ai21Provider from './ai21';
4
+ import Ai302Provider from './ai302';
4
5
  import Ai360Provider from './ai360';
5
6
  import AiHubMixProvider from './aihubmix';
6
7
  import AnthropicProvider from './anthropic';
@@ -169,6 +170,7 @@ export const DEFAULT_MODEL_PROVIDER_LIST = [
169
170
  Search1APIProvider,
170
171
  InfiniAIProvider,
171
172
  QiniuProvider,
173
+ Ai302Provider,
172
174
  ];
173
175
 
174
176
  export const filterEnabledModels = (provider: ModelProviderCard) => {
@@ -181,6 +183,7 @@ export const isProviderDisableBrowserRequest = (id: string) => {
181
183
  };
182
184
 
183
185
  export { default as Ai21ProviderCard } from './ai21';
186
+ export { default as Ai302ProviderCard } from './ai302';
184
187
  export { default as Ai360ProviderCard } from './ai360';
185
188
  export { default as AiHubMixProviderCard } from './aihubmix';
186
189
  export { default as AnthropicProviderCard } from './anthropic';
@@ -0,0 +1,46 @@
1
+ import { AgentRuntimeErrorType } from '../error';
2
+ import { ChatCompletionErrorPayload, ModelProvider } from '../types';
3
+ import { processMultiProviderModelList } from '../utils/modelParse';
4
+ import { createOpenAICompatibleRuntime } from '../utils/openaiCompatibleFactory';
5
+
6
+ export interface Ai302ModelCard {
7
+ id: string;
8
+ }
9
+
10
+ export const Lobe302AI = createOpenAICompatibleRuntime({
11
+ baseURL: 'https://api.302.ai/v1',
12
+ chatCompletion: {
13
+ handleError: (error: any): Omit<ChatCompletionErrorPayload, 'provider'> | undefined => {
14
+ let errorResponse: Response | undefined;
15
+ if (error instanceof Response) {
16
+ errorResponse = error;
17
+ } else if ('status' in (error as any)) {
18
+ errorResponse = error as Response;
19
+ }
20
+ if (errorResponse && errorResponse.status === 401) {
21
+ return {
22
+ error: errorResponse.status,
23
+ errorType: AgentRuntimeErrorType.InvalidProviderAPIKey,
24
+ };
25
+ }
26
+
27
+ return {
28
+ error,
29
+ };
30
+ },
31
+ },
32
+ debug: {
33
+ chatCompletion: () => process.env.DEBUG_SILICONCLOUD_CHAT_COMPLETION === '1',
34
+ },
35
+ errorType: {
36
+ bizError: AgentRuntimeErrorType.ProviderBizError,
37
+ invalidAPIKey: AgentRuntimeErrorType.InvalidProviderAPIKey,
38
+ },
39
+ models: async ({ client }) => {
40
+ const modelsPage = (await client.models.list()) as any;
41
+ const modelList: Ai302ModelCard[] = modelsPage.data;
42
+
43
+ return processMultiProviderModelList(modelList);
44
+ },
45
+ provider: ModelProvider.Ai302,
46
+ });
@@ -1,4 +1,5 @@
1
1
  import { LobeAi21AI } from './ai21';
2
+ import { Lobe302AI } from './ai302';
2
3
  import { LobeAi360AI } from './ai360';
3
4
  import { LobeAiHubMixAI } from './aihubmix';
4
5
  import { LobeAnthropicAI } from './anthropic';
@@ -56,6 +57,7 @@ import { LobeZhipuAI } from './zhipu';
56
57
 
57
58
  export const providerRuntimeMap = {
58
59
  ai21: LobeAi21AI,
60
+ ai302: Lobe302AI,
59
61
  ai360: LobeAi360AI,
60
62
  aihubmix: LobeAiHubMixAI,
61
63
  anthropic: LobeAnthropicAI,
@@ -29,6 +29,7 @@ export interface CreateChatCompletionOptions {
29
29
 
30
30
  export enum ModelProvider {
31
31
  Ai21 = 'ai21',
32
+ Ai302 = 'ai302',
32
33
  Ai360 = 'ai360',
33
34
  AiHubMix = 'aihubmix',
34
35
  Anthropic = 'anthropic',