@lobehub/chat 0.155.9 → 0.156.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (61) hide show
  1. package/CHANGELOG.md +79 -0
  2. package/Dockerfile +1 -1
  3. package/locales/ar/modelProvider.json +2 -17
  4. package/locales/ar/setting.json +4 -1
  5. package/locales/bg-BG/modelProvider.json +2 -17
  6. package/locales/bg-BG/setting.json +4 -1
  7. package/locales/de-DE/modelProvider.json +2 -17
  8. package/locales/de-DE/setting.json +4 -1
  9. package/locales/en-US/modelProvider.json +2 -17
  10. package/locales/en-US/setting.json +4 -1
  11. package/locales/es-ES/modelProvider.json +2 -17
  12. package/locales/es-ES/setting.json +4 -1
  13. package/locales/fr-FR/modelProvider.json +2 -17
  14. package/locales/fr-FR/setting.json +4 -1
  15. package/locales/it-IT/modelProvider.json +2 -17
  16. package/locales/it-IT/setting.json +4 -1
  17. package/locales/ja-JP/modelProvider.json +2 -17
  18. package/locales/ja-JP/setting.json +4 -1
  19. package/locales/ko-KR/modelProvider.json +2 -17
  20. package/locales/ko-KR/setting.json +4 -1
  21. package/locales/nl-NL/modelProvider.json +2 -17
  22. package/locales/nl-NL/setting.json +4 -1
  23. package/locales/pl-PL/modelProvider.json +2 -17
  24. package/locales/pl-PL/setting.json +4 -1
  25. package/locales/pt-BR/modelProvider.json +2 -17
  26. package/locales/pt-BR/setting.json +4 -1
  27. package/locales/ru-RU/modelProvider.json +2 -17
  28. package/locales/ru-RU/setting.json +4 -1
  29. package/locales/tr-TR/modelProvider.json +2 -17
  30. package/locales/tr-TR/setting.json +4 -1
  31. package/locales/vi-VN/modelProvider.json +2 -17
  32. package/locales/vi-VN/setting.json +4 -1
  33. package/locales/zh-CN/error.json +1 -1
  34. package/locales/zh-CN/modelProvider.json +3 -18
  35. package/locales/zh-CN/setting.json +4 -1
  36. package/locales/zh-TW/modelProvider.json +2 -17
  37. package/locales/zh-TW/setting.json +4 -1
  38. package/package.json +1 -1
  39. package/src/app/(main)/settings/llm/Anthropic/index.tsx +3 -1
  40. package/src/app/(main)/settings/llm/Google/index.tsx +3 -1
  41. package/src/app/(main)/settings/llm/Groq/index.tsx +3 -0
  42. package/src/app/(main)/settings/llm/Ollama/index.tsx +8 -4
  43. package/src/app/(main)/settings/llm/OpenAI/index.tsx +5 -1
  44. package/src/app/(main)/settings/llm/Perplexity/index.tsx +3 -0
  45. package/src/app/(main)/settings/llm/components/ProviderConfig/index.tsx +12 -7
  46. package/src/app/api/chat/agentRuntime.ts +30 -35
  47. package/src/config/modelProviders/groq.ts +6 -6
  48. package/src/config/modelProviders/togetherai.ts +0 -22
  49. package/src/config/server/provider.ts +4 -0
  50. package/src/libs/agent-runtime/anthropic/index.ts +28 -13
  51. package/src/libs/agent-runtime/azureOpenai/index.test.ts +53 -0
  52. package/src/libs/agent-runtime/azureOpenai/index.ts +29 -2
  53. package/src/libs/agent-runtime/groq/index.ts +7 -0
  54. package/src/libs/agent-runtime/perplexity/index.test.ts +2 -2
  55. package/src/libs/agent-runtime/perplexity/index.ts +28 -76
  56. package/src/libs/agent-runtime/utils/anthropicHelpers.test.ts +64 -1
  57. package/src/libs/agent-runtime/utils/anthropicHelpers.ts +18 -2
  58. package/src/libs/agent-runtime/utils/openaiCompatibleFactory/index.ts +12 -1
  59. package/src/locales/default/error.ts +1 -1
  60. package/src/locales/default/modelProvider.ts +2 -18
  61. package/src/locales/default/setting.ts +4 -1
@@ -1,85 +1,37 @@
1
- import { OpenAIStream, StreamingTextResponse } from 'ai';
2
- import OpenAI, { ClientOptions } from 'openai';
1
+ import OpenAI from 'openai';
3
2
 
4
- import { LobeRuntimeAI } from '../BaseAI';
5
3
  import { AgentRuntimeErrorType } from '../error';
6
- import { ChatCompetitionOptions, ChatStreamPayload, ModelProvider } from '../types';
7
- import { AgentRuntimeError } from '../utils/createError';
8
- import { debugStream } from '../utils/debugStream';
9
- import { desensitizeUrl } from '../utils/desensitizeUrl';
10
- import { handleOpenAIError } from '../utils/handleOpenAIError';
4
+ import { ChatStreamPayload, ModelProvider } from '../types';
5
+ import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
11
6
 
12
- const DEFAULT_BASE_URL = 'https://api.perplexity.ai';
13
-
14
- export class LobePerplexityAI implements LobeRuntimeAI {
15
- private client: OpenAI;
16
-
17
- baseURL: string;
18
-
19
- constructor({ apiKey, baseURL = DEFAULT_BASE_URL, ...res }: ClientOptions) {
20
- if (!apiKey) throw AgentRuntimeError.createError(AgentRuntimeErrorType.InvalidPerplexityAPIKey);
21
-
22
- this.client = new OpenAI({ apiKey, baseURL, ...res });
23
- this.baseURL = this.client.baseURL;
24
- }
25
-
26
- async chat(payload: ChatStreamPayload, options?: ChatCompetitionOptions) {
27
- try {
7
+ export const LobePerplexityAI = LobeOpenAICompatibleFactory({
8
+ baseURL: 'https://api.perplexity.ai',
9
+ chatCompletion: {
10
+ handlePayload: (payload: ChatStreamPayload) => {
28
11
  // Set a default frequency penalty value greater than 0
29
- const defaultFrequencyPenalty = 0.1;
30
- const chatPayload = {
31
- ...payload,
32
- frequency_penalty: payload.frequency_penalty || defaultFrequencyPenalty,
33
- };
34
- const response = await this.client.chat.completions.create(
35
- chatPayload as unknown as OpenAI.ChatCompletionCreateParamsStreaming,
36
- { signal: options?.signal },
37
- );
38
- const [prod, debug] = response.tee();
39
-
40
- if (process.env.DEBUG_PERPLEXITY_CHAT_COMPLETION === '1') {
41
- debugStream(debug.toReadableStream()).catch(console.error);
42
- }
43
-
44
- return new StreamingTextResponse(OpenAIStream(prod, options?.callback), {
45
- headers: options?.headers,
46
- });
47
- } catch (error) {
48
- let desensitizedEndpoint = this.baseURL;
12
+ const { presence_penalty, frequency_penalty, ...res } = payload;
49
13
 
50
- if (this.baseURL !== DEFAULT_BASE_URL) {
51
- desensitizedEndpoint = desensitizeUrl(this.baseURL);
52
- }
14
+ let param;
53
15
 
54
- if ('status' in (error as any)) {
55
- switch ((error as Response).status) {
56
- case 401: {
57
- throw AgentRuntimeError.chat({
58
- endpoint: desensitizedEndpoint,
59
- error: error as any,
60
- errorType: AgentRuntimeErrorType.InvalidPerplexityAPIKey,
61
- provider: ModelProvider.Perplexity,
62
- });
63
- }
16
+ // Ensure we are only have one frequency_penalty or frequency_penalty
17
+ if (presence_penalty !== 0) {
18
+ param = { presence_penalty };
19
+ } else {
20
+ const defaultFrequencyPenalty = 1;
64
21
 
65
- default: {
66
- break;
67
- }
68
- }
22
+ param = { frequency_penalty: frequency_penalty || defaultFrequencyPenalty };
69
23
  }
70
24
 
71
- const { errorResult, RuntimeError } = handleOpenAIError(error);
72
-
73
- const errorType = RuntimeError || AgentRuntimeErrorType.PerplexityBizError;
74
-
75
- throw AgentRuntimeError.chat({
76
- endpoint: desensitizedEndpoint,
77
- error: errorResult,
78
- errorType,
79
- provider: ModelProvider.Perplexity,
80
- });
81
- }
82
- }
83
- }
84
-
85
- export default LobePerplexityAI;
25
+ console.log(param);
26
+ return { ...res, ...param } as OpenAI.ChatCompletionCreateParamsStreaming;
27
+ },
28
+ },
29
+ debug: {
30
+ chatCompletion: () => process.env.DEBUG_PERPLEXITY_CHAT_COMPLETION === '1',
31
+ },
32
+ errorType: {
33
+ bizError: AgentRuntimeErrorType.PerplexityBizError,
34
+ invalidAPIKey: AgentRuntimeErrorType.InvalidPerplexityAPIKey,
35
+ },
36
+ provider: ModelProvider.Perplexity,
37
+ });
@@ -1,7 +1,11 @@
1
1
  import { describe, expect, it } from 'vitest';
2
2
 
3
3
  import { OpenAIChatMessage, UserMessageContentPart } from '../types/chat';
4
- import { buildAnthropicBlock, buildAnthropicMessage } from './anthropicHelpers';
4
+ import {
5
+ buildAnthropicBlock,
6
+ buildAnthropicMessage,
7
+ buildAnthropicMessages,
8
+ } from './anthropicHelpers';
5
9
  import { parseDataUri } from './uriParser';
6
10
 
7
11
  describe('anthropicHelpers', () => {
@@ -48,4 +52,63 @@ describe('anthropicHelpers', () => {
48
52
  expect(result).toEqual({ content: [{ type: 'text', text: 'Hello!' }], role: 'assistant' });
49
53
  });
50
54
  });
55
+
56
+ describe('buildAnthropicMessages', () => {
57
+ it('should correctly convert OpenAI Messages to Anthropic Messages', () => {
58
+ const messages: OpenAIChatMessage[] = [
59
+ { content: 'Hello', role: 'user' },
60
+ { content: 'Hi', role: 'assistant' },
61
+ ];
62
+
63
+ const result = buildAnthropicMessages(messages);
64
+ expect(result).toHaveLength(2);
65
+ expect(result).toEqual([
66
+ { content: 'Hello', role: 'user' },
67
+ { content: 'Hi', role: 'assistant' },
68
+ ]);
69
+ });
70
+
71
+ it('messages should end with user', () => {
72
+ const messages: OpenAIChatMessage[] = [
73
+ { content: 'Hello', role: 'user' },
74
+ { content: 'Hello', role: 'user' },
75
+ { content: 'Hi', role: 'assistant' },
76
+ ];
77
+
78
+ const contents = buildAnthropicMessages(messages);
79
+
80
+ expect(contents).toHaveLength(4);
81
+ expect(contents).toEqual([
82
+ { content: 'Hello', role: 'user' },
83
+ { content: '_', role: 'assistant' },
84
+ { content: 'Hello', role: 'user' },
85
+ { content: 'Hi', role: 'assistant' },
86
+ ]);
87
+ });
88
+
89
+ it('messages should pair', () => {
90
+ const messages: OpenAIChatMessage[] = [
91
+ { content: 'a', role: 'assistant' },
92
+ { content: 'b', role: 'assistant' },
93
+ { content: 'c', role: 'assistant' },
94
+ { content: 'd', role: 'assistant' },
95
+ { content: '你好', role: 'user' },
96
+ ];
97
+
98
+ const contents = buildAnthropicMessages(messages);
99
+
100
+ expect(contents).toHaveLength(9);
101
+ expect(contents).toEqual([
102
+ { content: '_', role: 'user' },
103
+ { content: 'a', role: 'assistant' },
104
+ { content: '_', role: 'user' },
105
+ { content: 'b', role: 'assistant' },
106
+ { content: '_', role: 'user' },
107
+ { content: 'c', role: 'assistant' },
108
+ { content: '_', role: 'user' },
109
+ { content: 'd', role: 'assistant' },
110
+ { content: '你好', role: 'user' },
111
+ ]);
112
+ });
113
+ });
51
114
  });
@@ -37,5 +37,21 @@ export const buildAnthropicMessage = (
37
37
  };
38
38
 
39
39
  export const buildAnthropicMessages = (
40
- messages: OpenAIChatMessage[],
41
- ): Anthropic.Messages.MessageParam[] => messages.map((message) => buildAnthropicMessage(message));
40
+ oaiMessages: OpenAIChatMessage[],
41
+ ): Anthropic.Messages.MessageParam[] => {
42
+ const messages: Anthropic.Messages.MessageParam[] = [];
43
+ let lastRole = 'assistant';
44
+
45
+ oaiMessages.forEach((message) => {
46
+ const anthropicMessage = buildAnthropicMessage(message);
47
+
48
+ if (lastRole === anthropicMessage.role) {
49
+ messages.push({ content: '_', role: lastRole === 'user' ? 'assistant' : 'user' });
50
+ }
51
+
52
+ lastRole = anthropicMessage.role;
53
+ messages.push(anthropicMessage);
54
+ });
55
+
56
+ return messages;
57
+ };
@@ -6,7 +6,7 @@ import { ChatModelCard } from '@/types/llm';
6
6
 
7
7
  import { LobeRuntimeAI } from '../../BaseAI';
8
8
  import { ILobeAgentRuntimeErrorType } from '../../error';
9
- import { ChatCompetitionOptions, ChatStreamPayload } from '../../types';
9
+ import { ChatCompetitionOptions, ChatCompletionErrorPayload, ChatStreamPayload } from '../../types';
10
10
  import { AgentRuntimeError } from '../createError';
11
11
  import { debugStream } from '../debugStream';
12
12
  import { desensitizeUrl } from '../desensitizeUrl';
@@ -28,6 +28,7 @@ const CHAT_MODELS_BLOCK_LIST = [
28
28
  interface OpenAICompatibleFactoryOptions {
29
29
  baseURL?: string;
30
30
  chatCompletion?: {
31
+ handleError?: (error: any) => Omit<ChatCompletionErrorPayload, 'provider'> | undefined;
31
32
  handlePayload?: (payload: ChatStreamPayload) => OpenAI.ChatCompletionCreateParamsStreaming;
32
33
  };
33
34
  constructorOptions?: ClientOptions;
@@ -113,6 +114,16 @@ export const LobeOpenAICompatibleFactory = ({
113
114
  }
114
115
  }
115
116
 
117
+ if (chatCompletion?.handleError) {
118
+ const errorResult = chatCompletion.handleError(error);
119
+
120
+ if (errorResult)
121
+ throw AgentRuntimeError.chat({
122
+ ...errorResult,
123
+ provider,
124
+ } as ChatCompletionErrorPayload);
125
+ }
126
+
116
127
  const { errorResult, RuntimeError } = handleOpenAIError(error);
117
128
 
118
129
  throw AgentRuntimeError.chat({
@@ -72,7 +72,7 @@ export default {
72
72
  InvalidAccessCode: '密码不正确或为空,请输入正确的访问密码,或者添加自定义 API Key',
73
73
  InvalidClerkUser: '很抱歉,你当前尚未登录,请先登录或注册账号后继续操作',
74
74
  LocationNotSupportError:
75
- '很抱歉,你的所在位置不支持此模型服务,可能是由于地区限制或服务未开通。请确认当前位置是否支持使用此服务,或尝试使用其他位置信息。',
75
+ '很抱歉,你的所在地区不支持此模型服务,可能是由于区域限制或服务未开通。请确认当前地区是否支持使用此服务,或尝试使用切换到其他地区后重试。',
76
76
 
77
77
  OpenAIBizError: '请求 OpenAI 服务出错,请根据以下信息排查或重试',
78
78
  NoOpenAIAPIKey: 'OpenAI API Key 为空,请添加自定义 OpenAI API Key',
@@ -1,17 +1,11 @@
1
1
  export default {
2
2
  anthropic: {
3
- endpoint: {
4
- desc: '除默认地址外,必须包含 http(s)://',
5
- placeholder: 'https://api.anthropic.com',
6
- title: 'API 代理地址',
7
- },
8
3
  title: 'Anthropic',
9
4
  token: {
10
5
  desc: '填入来自 Anthropic 的 API Key',
11
6
  placeholder: 'Anthropic API Key',
12
7
  title: 'API Key',
13
8
  },
14
-
15
9
  unlock: {
16
10
  description: '输入你的 Anthropic API Key 即可开始会话。应用不会记录你的 API Key',
17
11
  title: '使用自定义 Anthropic API Key',
@@ -69,11 +63,6 @@ export default {
69
63
  },
70
64
  },
71
65
  google: {
72
- endpoint: {
73
- desc: '除默认地址外,必须包含 http(s)://',
74
- placeholder: 'https://generativelanguage.googleapis.com',
75
- title: 'API 代理地址',
76
- },
77
66
  title: 'Google',
78
67
  token: {
79
68
  desc: '填入来自 Google 的 API Key',
@@ -136,6 +125,7 @@ export default {
136
125
  ollama: {
137
126
  checker: {
138
127
  desc: '测试代理地址是否正确填写',
128
+ title: '连通性检查',
139
129
  },
140
130
  customModelName: {
141
131
  desc: '增加自定义模型,多个模型使用逗号(,)隔开',
@@ -144,8 +134,7 @@ export default {
144
134
  },
145
135
  endpoint: {
146
136
  desc: '填入 Ollama 接口代理地址,本地未额外指定可留空',
147
- placeholder: 'http://127.0.0.1:11434',
148
- title: '接口代理地址',
137
+ title: 'Ollama 服务地址',
149
138
  },
150
139
  setup: {
151
140
  cors: {
@@ -176,11 +165,6 @@ export default {
176
165
  title: 'Ollama',
177
166
  },
178
167
  openai: {
179
- endpoint: {
180
- desc: '除默认地址外,必须包含 http(s)://',
181
- placeholder: 'https://api.openai.com/v1',
182
- title: '接口代理地址',
183
- },
184
168
  title: 'OpenAI',
185
169
  token: {
186
170
  desc: '使用自己的 OpenAI Key',
@@ -38,7 +38,6 @@ export default {
38
38
  checker: {
39
39
  button: '检查',
40
40
  desc: '测试 Api Key 与代理地址是否正确填写',
41
- ollamaDesc: '测试代理地址是否正确填写',
42
41
  pass: '检查通过',
43
42
  title: '连通性检查',
44
43
  },
@@ -99,6 +98,10 @@ export default {
99
98
  title: '模型列表',
100
99
  total: '共 {{count}} 个模型可用',
101
100
  },
101
+ proxyUrl: {
102
+ desc: '除默认地址外,必须包含 http(s)://',
103
+ title: 'API 代理地址',
104
+ },
102
105
  waitingForMore: '更多模型正在 <1>计划接入</1> 中,敬请期待 ✨',
103
106
  },
104
107
  ollama: {