@lobehub/chat 0.155.9 → 0.156.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (61) hide show
  1. package/CHANGELOG.md +79 -0
  2. package/Dockerfile +1 -1
  3. package/locales/ar/modelProvider.json +2 -17
  4. package/locales/ar/setting.json +4 -1
  5. package/locales/bg-BG/modelProvider.json +2 -17
  6. package/locales/bg-BG/setting.json +4 -1
  7. package/locales/de-DE/modelProvider.json +2 -17
  8. package/locales/de-DE/setting.json +4 -1
  9. package/locales/en-US/modelProvider.json +2 -17
  10. package/locales/en-US/setting.json +4 -1
  11. package/locales/es-ES/modelProvider.json +2 -17
  12. package/locales/es-ES/setting.json +4 -1
  13. package/locales/fr-FR/modelProvider.json +2 -17
  14. package/locales/fr-FR/setting.json +4 -1
  15. package/locales/it-IT/modelProvider.json +2 -17
  16. package/locales/it-IT/setting.json +4 -1
  17. package/locales/ja-JP/modelProvider.json +2 -17
  18. package/locales/ja-JP/setting.json +4 -1
  19. package/locales/ko-KR/modelProvider.json +2 -17
  20. package/locales/ko-KR/setting.json +4 -1
  21. package/locales/nl-NL/modelProvider.json +2 -17
  22. package/locales/nl-NL/setting.json +4 -1
  23. package/locales/pl-PL/modelProvider.json +2 -17
  24. package/locales/pl-PL/setting.json +4 -1
  25. package/locales/pt-BR/modelProvider.json +2 -17
  26. package/locales/pt-BR/setting.json +4 -1
  27. package/locales/ru-RU/modelProvider.json +2 -17
  28. package/locales/ru-RU/setting.json +4 -1
  29. package/locales/tr-TR/modelProvider.json +2 -17
  30. package/locales/tr-TR/setting.json +4 -1
  31. package/locales/vi-VN/modelProvider.json +2 -17
  32. package/locales/vi-VN/setting.json +4 -1
  33. package/locales/zh-CN/error.json +1 -1
  34. package/locales/zh-CN/modelProvider.json +3 -18
  35. package/locales/zh-CN/setting.json +4 -1
  36. package/locales/zh-TW/modelProvider.json +2 -17
  37. package/locales/zh-TW/setting.json +4 -1
  38. package/package.json +1 -1
  39. package/src/app/(main)/settings/llm/Anthropic/index.tsx +3 -1
  40. package/src/app/(main)/settings/llm/Google/index.tsx +3 -1
  41. package/src/app/(main)/settings/llm/Groq/index.tsx +3 -0
  42. package/src/app/(main)/settings/llm/Ollama/index.tsx +8 -4
  43. package/src/app/(main)/settings/llm/OpenAI/index.tsx +5 -1
  44. package/src/app/(main)/settings/llm/Perplexity/index.tsx +3 -0
  45. package/src/app/(main)/settings/llm/components/ProviderConfig/index.tsx +12 -7
  46. package/src/app/api/chat/agentRuntime.ts +30 -35
  47. package/src/config/modelProviders/groq.ts +6 -6
  48. package/src/config/modelProviders/togetherai.ts +0 -22
  49. package/src/config/server/provider.ts +4 -0
  50. package/src/libs/agent-runtime/anthropic/index.ts +28 -13
  51. package/src/libs/agent-runtime/azureOpenai/index.test.ts +53 -0
  52. package/src/libs/agent-runtime/azureOpenai/index.ts +29 -2
  53. package/src/libs/agent-runtime/groq/index.ts +7 -0
  54. package/src/libs/agent-runtime/perplexity/index.test.ts +2 -2
  55. package/src/libs/agent-runtime/perplexity/index.ts +28 -76
  56. package/src/libs/agent-runtime/utils/anthropicHelpers.test.ts +64 -1
  57. package/src/libs/agent-runtime/utils/anthropicHelpers.ts +18 -2
  58. package/src/libs/agent-runtime/utils/openaiCompatibleFactory/index.ts +12 -1
  59. package/src/locales/default/error.ts +1 -1
  60. package/src/locales/default/modelProvider.ts +2 -18
  61. package/src/locales/default/setting.ts +4 -1
@@ -14,7 +14,9 @@ const AnthropicProvider = memo(() => {
14
14
  <ProviderConfig
15
15
  checkModel={'claude-3-haiku-20240307'}
16
16
  provider={ModelProvider.Anthropic}
17
- showEndpoint
17
+ proxyUrl={{
18
+ placeholder: 'https://api.anthropic.com',
19
+ }}
18
20
  title={<Anthropic.Text color={isDarkMode ? undefined : Claude.colorPrimary} size={15} />}
19
21
  />
20
22
  );
@@ -14,7 +14,9 @@ const GoogleProvider = memo(() => {
14
14
  <ProviderConfig
15
15
  checkModel={'gemini-pro'}
16
16
  provider={ModelProvider.Google}
17
- showEndpoint
17
+ proxyUrl={{
18
+ placeholder: 'https://generativelanguage.googleapis.com',
19
+ }}
18
20
  title={
19
21
  <Flexbox align={'center'} gap={8} horizontal>
20
22
  <Google.BrandColor size={22} />
@@ -15,6 +15,9 @@ const GroqProvider = memo(() => {
15
15
  <ProviderConfig
16
16
  checkModel={'gemma-7b-it'}
17
17
  provider={ModelProvider.Groq}
18
+ proxyUrl={{
19
+ placeholder: 'https://api.groq.com/openai/v1',
20
+ }}
18
21
  title={<Groq.Text color={theme.isDarkMode ? theme.colorText : Groq.colorPrimary} size={20} />}
19
22
  />
20
23
  );
@@ -10,21 +10,25 @@ import ProviderConfig from '../components/ProviderConfig';
10
10
  import Checker from './Checker';
11
11
 
12
12
  const OllamaProvider = memo(() => {
13
- const { t } = useTranslation('setting');
13
+ const { t } = useTranslation('modelProvider');
14
14
 
15
15
  return (
16
16
  <ProviderConfig
17
17
  checkerItem={{
18
18
  children: <Checker />,
19
- desc: t('llm.checker.ollamaDesc'),
20
- label: t('llm.checker.title'),
19
+ desc: t('ollama.checker.desc'),
20
+ label: t('ollama.checker.title'),
21
21
  minWidth: undefined,
22
22
  }}
23
23
  modelList={{ showModelFetcher: true }}
24
24
  provider={ModelProvider.Ollama}
25
+ proxyUrl={{
26
+ desc: t('ollama.endpoint.desc'),
27
+ placeholder: 'http://127.0.0.1:11434',
28
+ title: t('ollama.endpoint.title'),
29
+ }}
25
30
  showApiKey={false}
26
31
  showBrowserRequest
27
- showEndpoint
28
32
  title={<Ollama.Combine size={28} />}
29
33
  />
30
34
  );
@@ -14,9 +14,13 @@ const OpenAIProvider = memo(() => {
14
14
  <ProviderConfig
15
15
  modelList={{ showModelFetcher: true }}
16
16
  provider={'openai'}
17
+ proxyUrl={
18
+ showOpenAIProxyUrl && {
19
+ placeholder: 'https://api.openai.com/v1',
20
+ }
21
+ }
17
22
  showApiKey={showOpenAIApiKey}
18
23
  showBrowserRequest
19
- showEndpoint={showOpenAIProxyUrl}
20
24
  title={<OpenAI.Combine size={24} />}
21
25
  />
22
26
  );
@@ -12,6 +12,9 @@ const PerplexityProvider = memo(() => {
12
12
  <ProviderConfig
13
13
  checkModel={'pplx-7b-chat'}
14
14
  provider={ModelProvider.Perplexity}
15
+ proxyUrl={{
16
+ placeholder: 'https://api.perplexity.ai',
17
+ }}
15
18
  title={<Perplexity.Combine size={24} type={'color'} />}
16
19
  />
17
20
  );
@@ -57,9 +57,15 @@ interface ProviderConfigProps {
57
57
  showModelFetcher?: boolean;
58
58
  };
59
59
  provider: GlobalLLMProviderKey;
60
+ proxyUrl?:
61
+ | {
62
+ desc?: string;
63
+ placeholder: string;
64
+ title?: string;
65
+ }
66
+ | false;
60
67
  showApiKey?: boolean;
61
68
  showBrowserRequest?: boolean;
62
- showEndpoint?: boolean;
63
69
  title: ReactNode;
64
70
  }
65
71
 
@@ -67,7 +73,7 @@ const ProviderConfig = memo<ProviderConfigProps>(
67
73
  ({
68
74
  apiKeyItems,
69
75
  provider,
70
- showEndpoint,
76
+ proxyUrl,
71
77
  showApiKey = true,
72
78
  checkModel,
73
79
  canDeactivate = true,
@@ -112,14 +118,13 @@ const ProviderConfig = memo<ProviderConfigProps>(
112
118
  },
113
119
  ];
114
120
 
121
+ const showEndpoint = !!proxyUrl;
115
122
  const formItems = [
116
123
  ...apiKeyItem,
117
124
  showEndpoint && {
118
- children: (
119
- <Input allowClear placeholder={modelT(`${provider}.endpoint.placeholder` as any)} />
120
- ),
121
- desc: modelT(`${provider}.endpoint.desc` as any),
122
- label: modelT(`${provider}.endpoint.title` as any),
125
+ children: <Input allowClear placeholder={proxyUrl?.placeholder} />,
126
+ desc: proxyUrl?.desc || t('llm.proxyUrl.desc'),
127
+ label: proxyUrl?.title || t('llm.proxyUrl.title'),
123
128
  name: [LLMProviderConfigKey, provider, LLMProviderBaseUrlKey],
124
129
  },
125
130
  (showBrowserRequest || (showEndpoint && isProviderEndpointNotEmpty)) && {
@@ -85,76 +85,71 @@ const getLlmOptionsFromPayload = (provider: string, payload: JWTPayload) => {
85
85
  accessKeySecret = payload?.awsSecretAccessKey;
86
86
  region = payload?.awsRegion;
87
87
  }
88
- return {
89
- accessKeyId,
90
- accessKeySecret,
91
- region,
92
- };
88
+ return { accessKeyId, accessKeySecret, region };
93
89
  }
94
90
  case ModelProvider.Ollama: {
95
91
  const { OLLAMA_PROXY_URL } = getServerConfig();
96
92
  const baseURL = payload?.endpoint || OLLAMA_PROXY_URL;
97
- return {
98
- baseURL,
99
- };
93
+ return { baseURL };
100
94
  }
101
95
  case ModelProvider.Perplexity: {
102
- const { PERPLEXITY_API_KEY } = getServerConfig();
96
+ const { PERPLEXITY_API_KEY, PERPLEXITY_PROXY_URL } = getServerConfig();
97
+
103
98
  const apiKey = apiKeyManager.pick(payload?.apiKey || PERPLEXITY_API_KEY);
104
- return {
105
- apiKey,
106
- };
99
+ const baseURL = payload?.endpoint || PERPLEXITY_PROXY_URL;
100
+
101
+ return { apiKey, baseURL };
107
102
  }
108
103
  case ModelProvider.Anthropic: {
109
104
  const { ANTHROPIC_API_KEY, ANTHROPIC_PROXY_URL } = getServerConfig();
105
+
110
106
  const apiKey = apiKeyManager.pick(payload?.apiKey || ANTHROPIC_API_KEY);
111
107
  const baseURL = payload?.endpoint || ANTHROPIC_PROXY_URL;
112
- return {
113
- apiKey,
114
- baseURL,
115
- };
108
+
109
+ return { apiKey, baseURL };
116
110
  }
117
111
  case ModelProvider.Minimax: {
118
112
  const { MINIMAX_API_KEY } = getServerConfig();
113
+
119
114
  const apiKey = apiKeyManager.pick(payload?.apiKey || MINIMAX_API_KEY);
120
- return {
121
- apiKey,
122
- };
115
+
116
+ return { apiKey };
123
117
  }
124
118
  case ModelProvider.Mistral: {
125
119
  const { MISTRAL_API_KEY } = getServerConfig();
120
+
126
121
  const apiKey = apiKeyManager.pick(payload?.apiKey || MISTRAL_API_KEY);
127
- return {
128
- apiKey,
129
- };
122
+
123
+ return { apiKey };
130
124
  }
131
125
  case ModelProvider.Groq: {
132
- const { GROQ_API_KEY } = getServerConfig();
126
+ const { GROQ_API_KEY, GROQ_PROXY_URL } = getServerConfig();
127
+
133
128
  const apiKey = apiKeyManager.pick(payload?.apiKey || GROQ_API_KEY);
134
- return {
135
- apiKey,
136
- };
129
+ const baseURL = payload?.endpoint || GROQ_PROXY_URL;
130
+
131
+ return { apiKey, baseURL };
137
132
  }
138
133
  case ModelProvider.OpenRouter: {
139
134
  const { OPENROUTER_API_KEY } = getServerConfig();
135
+
140
136
  const apiKey = apiKeyManager.pick(payload?.apiKey || OPENROUTER_API_KEY);
141
- return {
142
- apiKey,
143
- };
137
+
138
+ return { apiKey };
144
139
  }
145
140
  case ModelProvider.TogetherAI: {
146
141
  const { TOGETHERAI_API_KEY } = getServerConfig();
142
+
147
143
  const apiKey = apiKeyManager.pick(payload?.apiKey || TOGETHERAI_API_KEY);
148
- return {
149
- apiKey,
150
- };
144
+
145
+ return { apiKey };
151
146
  }
152
147
  case ModelProvider.ZeroOne: {
153
148
  const { ZEROONE_API_KEY } = getServerConfig();
149
+
154
150
  const apiKey = apiKeyManager.pick(payload?.apiKey || ZEROONE_API_KEY);
155
- return {
156
- apiKey,
157
- };
151
+
152
+ return { apiKey };
158
153
  }
159
154
  }
160
155
  };
@@ -3,6 +3,12 @@ import { ModelProviderCard } from '@/types/llm';
3
3
  // ref https://console.groq.com/docs/models
4
4
  const Groq: ModelProviderCard = {
5
5
  chatModels: [
6
+ {
7
+ displayName: 'LLaMA3-3-70B',
8
+ enabled: true,
9
+ id: 'llama3-70b-8192',
10
+ tokens: 8192,
11
+ },
6
12
  {
7
13
  displayName: 'Mixtral-8x7b-Instruct-v0.1',
8
14
  enabled: true,
@@ -21,12 +27,6 @@ const Groq: ModelProviderCard = {
21
27
  id: 'llama3-8b-8192',
22
28
  tokens: 8192,
23
29
  },
24
- {
25
- displayName: 'LLaMA3-3-70B',
26
- enabled: true,
27
- id: 'llama3-70b-8192',
28
- tokens: 8192,
29
- },
30
30
  {
31
31
  displayName: 'LLaMA2-70b-chat',
32
32
  id: 'llama2-70b-4096',
@@ -6,90 +6,68 @@ const TogetherAI: ModelProviderCard = {
6
6
  {
7
7
  displayName: 'Deepseek Coder Instruct (33B)',
8
8
  enabled: true,
9
- functionCall: false,
10
9
  id: 'deepseek-ai/deepseek-coder-33b-instruct',
11
10
  tokens: 16_384,
12
- vision: false,
13
11
  },
14
12
  {
15
13
  displayName: 'Phind Code LLaMA v2 (34B)',
16
14
  enabled: true,
17
- functionCall: false,
18
15
  id: 'Phind/Phind-CodeLlama-34B-v2',
19
16
  tokens: 16_384,
20
- vision: false,
21
17
  },
22
18
  {
23
19
  displayName: 'Gemma Instruct (2B)',
24
20
  enabled: true,
25
- functionCall: false,
26
21
  id: 'google/gemma-2b-it',
27
22
  tokens: 8192,
28
- vision: false,
29
23
  },
30
24
  {
31
25
  displayName: 'LLaMA-2 Chat (13B)',
32
26
  enabled: true,
33
- functionCall: false,
34
27
  id: 'meta-llama/Llama-2-13b-chat-hf',
35
28
  tokens: 4096,
36
- vision: false,
37
29
  },
38
30
  {
39
31
  displayName: '01-ai Yi Chat (34B)',
40
32
  enabled: true,
41
- functionCall: false,
42
33
  id: 'zero-one-ai/Yi-34B-Chat',
43
34
  tokens: 4096,
44
- vision: false,
45
35
  },
46
36
  {
47
37
  displayName: 'Mixtral-8x7B Instruct (46.7B)',
48
38
  enabled: true,
49
- functionCall: false,
50
39
  id: 'mistralai/Mixtral-8x7B-Instruct-v0.1',
51
40
  tokens: 32_768,
52
- vision: false,
53
41
  },
54
42
  {
55
43
  displayName: 'Nous Hermes 2 - Mixtral 8x7B-DPO (46.7B)',
56
44
  enabled: true,
57
- functionCall: false,
58
45
  id: 'NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO',
59
46
  tokens: 32_768,
60
- vision: false,
61
47
  },
62
48
  {
63
49
  displayName: 'Nous Hermes-2 Yi (34B)',
64
50
  enabled: true,
65
- functionCall: false,
66
51
  id: 'NousResearch/Nous-Hermes-2-Yi-34B',
67
52
  tokens: 4096,
68
- vision: false,
69
53
  },
70
54
  {
71
55
  displayName: 'Qwen 1.5 Chat (7B)',
72
56
  enabled: true,
73
- functionCall: false,
74
57
  id: 'Qwen/Qwen1.5-7B-Chat',
75
58
  tokens: 32_768,
76
- vision: false,
77
59
  },
78
60
  {
79
61
  displayName: 'Qwen 1.5 Chat (14B)',
80
62
  enabled: true,
81
- functionCall: false,
82
63
  id: 'Qwen/Qwen1.5-14B-Chat',
83
64
  tokens: 32_768,
84
- vision: false,
85
65
  },
86
66
  {
87
67
  displayName: 'Qwen 1.5 Chat (72B)',
88
68
  enabled: true,
89
- functionCall: false,
90
69
  id: 'Qwen/Qwen1.5-72B-Chat',
91
70
  tokens: 32_768,
92
- vision: false,
93
71
  },
94
72
  ],
95
73
  id: 'togetherai',
@@ -37,6 +37,7 @@ declare global {
37
37
  // Perplexity Provider
38
38
  ENABLED_PERPLEXITY?: string;
39
39
  PERPLEXITY_API_KEY?: string;
40
+ PERPLEXITY_PROXY_URL?: string;
40
41
 
41
42
  // Anthropic Provider
42
43
  ENABLED_ANTHROPIC?: string;
@@ -54,6 +55,7 @@ declare global {
54
55
  // Groq Provider
55
56
  ENABLED_GROQ?: string;
56
57
  GROQ_API_KEY?: string;
58
+ GROQ_PROXY_URL?: string;
57
59
 
58
60
  // OpenRouter Provider
59
61
  ENABLED_OPENROUTER?: string;
@@ -172,6 +174,7 @@ export const getProviderConfig = () => {
172
174
 
173
175
  ENABLED_PERPLEXITY: !!PERPLEXITY_API_KEY,
174
176
  PERPLEXITY_API_KEY,
177
+ PERPLEXITY_PROXY_URL: process.env.PERPLEXITY_PROXY_URL,
175
178
 
176
179
  ENABLED_ANTHROPIC: !!ANTHROPIC_API_KEY,
177
180
  ANTHROPIC_API_KEY,
@@ -197,6 +200,7 @@ export const getProviderConfig = () => {
197
200
  MOONSHOT_PROXY_URL: process.env.MOONSHOT_PROXY_URL,
198
201
 
199
202
  ENABLED_GROQ: !!GROQ_API_KEY,
203
+ GROQ_PROXY_URL: process.env.GROQ_PROXY_URL,
200
204
  GROQ_API_KEY,
201
205
 
202
206
  ENABLED_ZEROONE: !!ZEROONE_API_KEY,
@@ -27,21 +27,11 @@ export class LobeAnthropicAI implements LobeRuntimeAI {
27
27
  }
28
28
 
29
29
  async chat(payload: ChatStreamPayload, options?: ChatCompetitionOptions) {
30
- const { messages, model, max_tokens, temperature, top_p } = payload;
31
- const system_message = messages.find((m) => m.role === 'system');
32
- const user_messages = messages.filter((m) => m.role !== 'system');
33
-
34
30
  try {
31
+ const anthropicPayload = this.buildAnthropicPayload(payload);
32
+
35
33
  const response = await this.client.messages.create(
36
- {
37
- max_tokens: max_tokens || 4096,
38
- messages: buildAnthropicMessages(user_messages),
39
- model: model,
40
- stream: true,
41
- system: system_message?.content as string,
42
- temperature: temperature,
43
- top_p: top_p,
44
- },
34
+ { ...anthropicPayload, stream: true },
45
35
  { signal: options?.signal },
46
36
  );
47
37
 
@@ -71,6 +61,15 @@ export class LobeAnthropicAI implements LobeRuntimeAI {
71
61
  provider: ModelProvider.Anthropic,
72
62
  });
73
63
  }
64
+
65
+ case 403: {
66
+ throw AgentRuntimeError.chat({
67
+ endpoint: desensitizedEndpoint,
68
+ error: error as any,
69
+ errorType: AgentRuntimeErrorType.LocationNotSupportError,
70
+ provider: ModelProvider.Anthropic,
71
+ });
72
+ }
74
73
  default: {
75
74
  break;
76
75
  }
@@ -84,6 +83,22 @@ export class LobeAnthropicAI implements LobeRuntimeAI {
84
83
  });
85
84
  }
86
85
  }
86
+
87
+ private buildAnthropicPayload(payload: ChatStreamPayload) {
88
+ const { messages, model, max_tokens, temperature, top_p } = payload;
89
+ const system_message = messages.find((m) => m.role === 'system');
90
+ const user_messages = messages.filter((m) => m.role !== 'system');
91
+
92
+ return {
93
+ max_tokens: max_tokens || 4096,
94
+ messages: buildAnthropicMessages(user_messages),
95
+ model: model,
96
+ stream: true,
97
+ system: system_message?.content as string,
98
+ temperature: temperature,
99
+ top_p: top_p,
100
+ };
101
+ }
87
102
  }
88
103
 
89
104
  export default LobeAnthropicAI;
@@ -163,4 +163,57 @@ describe('LobeAzureOpenAI', () => {
163
163
  });
164
164
  });
165
165
  });
166
+
167
+ describe('private method', () => {
168
+
169
+ describe('tocamelCase', () => {
170
+ it('should convert string to camel case', () => {
171
+ const key = 'image_url';
172
+
173
+ const camelCaseKey = instance['tocamelCase'](key);
174
+
175
+ expect(camelCaseKey).toEqual('imageUrl');
176
+ });
177
+ });
178
+
179
+ describe('camelCaseKeys', () => {
180
+ it('should convert object keys to camel case', () => {
181
+ const obj = {
182
+ "frequency_penalty": 0,
183
+ "messages": [
184
+ {
185
+ "role": "user",
186
+ "content": [
187
+ {
188
+ "type": "image_url",
189
+ "image_url": {
190
+ "url": "<image URL>"
191
+ }
192
+ }
193
+ ]
194
+ }
195
+ ]
196
+ };
197
+
198
+ const newObj = instance['camelCaseKeys'](obj);
199
+
200
+ expect(newObj).toEqual({
201
+ "frequencyPenalty": 0,
202
+ "messages": [
203
+ {
204
+ "role": "user",
205
+ "content": [
206
+ {
207
+ "type": "image_url",
208
+ "imageUrl": {
209
+ "url": "<image URL>"
210
+ }
211
+ }
212
+ ]
213
+ }
214
+ ]
215
+ });
216
+ });
217
+ });
218
+ })
166
219
  });
@@ -28,7 +28,8 @@ export class LobeAzureOpenAI implements LobeRuntimeAI {
28
28
 
29
29
  async chat(payload: ChatStreamPayload, options?: ChatCompetitionOptions) {
30
30
  // ============ 1. preprocess messages ============ //
31
- const { messages, model, ...params } = payload;
31
+ const camelCasePayload = this.camelCaseKeys(payload);
32
+ const { messages, model, maxTokens = 2048, ...params } = camelCasePayload;
32
33
 
33
34
  // ============ 2. send api ============ //
34
35
 
@@ -36,7 +37,7 @@ export class LobeAzureOpenAI implements LobeRuntimeAI {
36
37
  const response = await this.client.streamChatCompletions(
37
38
  model,
38
39
  messages as ChatRequestMessage[],
39
- { ...params, abortSignal: options?.signal } as GetChatCompletionsOptions,
40
+ { ...params, abortSignal: options?.signal, maxTokens } as GetChatCompletionsOptions,
40
41
  );
41
42
 
42
43
  const stream = OpenAIStream(response as any);
@@ -77,4 +78,30 @@ export class LobeAzureOpenAI implements LobeRuntimeAI {
77
78
  });
78
79
  }
79
80
  }
81
+
82
+ // Convert object keys to camel case, copy from `@azure/openai` in `node_modules/@azure/openai/dist/index.cjs`
83
+ private camelCaseKeys = (obj: any): any => {
84
+ if (typeof obj !== "object" || !obj)
85
+ return obj;
86
+ if (Array.isArray(obj)) {
87
+ return obj.map((v) => this.camelCaseKeys(v));
88
+ }
89
+ else {
90
+ for (const key of Object.keys(obj)) {
91
+ const value = obj[key];
92
+ const newKey = this.tocamelCase(key);
93
+ if (newKey !== key) {
94
+ delete obj[key];
95
+ }
96
+ obj[newKey] = typeof obj[newKey] === "object" ? this.camelCaseKeys(value) : value;
97
+ }
98
+ return obj;
99
+ }
100
+ }
101
+
102
+ private tocamelCase = (str: string) => {
103
+ return str
104
+ .toLowerCase()
105
+ .replaceAll(/(_[a-z])/g, (group) => group.toUpperCase().replace("_", ""));
106
+ }
80
107
  }
@@ -4,6 +4,13 @@ import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
4
4
 
5
5
  export const LobeGroq = LobeOpenAICompatibleFactory({
6
6
  baseURL: 'https://api.groq.com/openai/v1',
7
+ chatCompletion: {
8
+ handleError: (error) => {
9
+ // 403 means the location is not supporteds
10
+ if (error.status === 403)
11
+ return { error, errorType: AgentRuntimeErrorType.LocationNotSupportError };
12
+ },
13
+ },
7
14
  debug: {
8
15
  chatCompletion: () => process.env.DEBUG_GROQ_CHAT_COMPLETION === '1',
9
16
  },
@@ -2,7 +2,7 @@
2
2
  import OpenAI from 'openai';
3
3
  import { Mock, afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
4
4
 
5
- import { ChatStreamCallbacks } from '@/libs/agent-runtime';
5
+ import { ChatStreamCallbacks, LobeOpenAICompatibleRuntime } from '@/libs/agent-runtime';
6
6
 
7
7
  import * as debugStreamModule from '../utils/debugStream';
8
8
  import { LobePerplexityAI } from './index';
@@ -15,7 +15,7 @@ const invalidErrorType = 'InvalidPerplexityAPIKey';
15
15
  // Mock the console.error to avoid polluting test output
16
16
  vi.spyOn(console, 'error').mockImplementation(() => {});
17
17
 
18
- let instance: LobePerplexityAI;
18
+ let instance: LobeOpenAICompatibleRuntime;
19
19
 
20
20
  beforeEach(() => {
21
21
  instance = new LobePerplexityAI({ apiKey: 'test' });