@lobehub/chat 0.144.1 → 0.145.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.env.example +6 -0
- package/CHANGELOG.md +50 -0
- package/Dockerfile +3 -0
- package/docs/self-hosting/environment-variables/model-provider.mdx +9 -0
- package/docs/self-hosting/environment-variables/model-provider.zh-CN.mdx +9 -0
- package/locales/ar/common.json +1 -0
- package/locales/ar/error.json +6 -0
- package/locales/ar/setting.json +16 -0
- package/locales/bg-BG/chat.json +1 -1
- package/locales/bg-BG/error.json +1 -1
- package/locales/bg-BG/market.json +1 -1
- package/locales/bg-BG/migration.json +1 -1
- package/locales/bg-BG/plugin.json +1 -1
- package/locales/bg-BG/setting.json +1 -1
- package/locales/bg-BG/tool.json +1 -1
- package/locales/bg-BG/welcome.json +1 -1
- package/locales/de-DE/common.json +1 -0
- package/locales/de-DE/error.json +6 -0
- package/locales/de-DE/setting.json +16 -0
- package/locales/en-US/common.json +1 -0
- package/locales/en-US/error.json +6 -0
- package/locales/en-US/setting.json +16 -0
- package/locales/es-ES/common.json +1 -0
- package/locales/es-ES/error.json +6 -0
- package/locales/es-ES/setting.json +16 -0
- package/locales/fr-FR/common.json +1 -0
- package/locales/fr-FR/error.json +6 -0
- package/locales/fr-FR/setting.json +16 -0
- package/locales/it-IT/common.json +1 -0
- package/locales/it-IT/error.json +6 -0
- package/locales/it-IT/setting.json +16 -0
- package/locales/ja-JP/common.json +1 -0
- package/locales/ja-JP/error.json +6 -0
- package/locales/ja-JP/setting.json +16 -0
- package/locales/ko-KR/common.json +1 -0
- package/locales/ko-KR/error.json +2 -0
- package/locales/ko-KR/setting.json +16 -0
- package/locales/nl-NL/common.json +1 -0
- package/locales/nl-NL/error.json +6 -0
- package/locales/nl-NL/setting.json +16 -0
- package/locales/pl-PL/common.json +1 -0
- package/locales/pl-PL/error.json +6 -0
- package/locales/pl-PL/setting.json +23 -7
- package/locales/pt-BR/common.json +1 -0
- package/locales/pt-BR/error.json +6 -0
- package/locales/pt-BR/setting.json +16 -0
- package/locales/ru-RU/common.json +1 -0
- package/locales/ru-RU/error.json +6 -0
- package/locales/ru-RU/setting.json +16 -0
- package/locales/tr-TR/common.json +1 -0
- package/locales/tr-TR/error.json +6 -0
- package/locales/tr-TR/setting.json +23 -7
- package/locales/vi-VN/common.json +1 -0
- package/locales/vi-VN/error.json +6 -0
- package/locales/vi-VN/setting.json +16 -0
- package/locales/zh-CN/common.json +1 -0
- package/locales/zh-CN/error.json +6 -0
- package/locales/zh-CN/setting.json +16 -0
- package/locales/zh-TW/common.json +1 -0
- package/locales/zh-TW/error.json +6 -0
- package/locales/zh-TW/setting.json +16 -0
- package/package.json +2 -2
- package/src/app/api/chat/[provider]/agentRuntime.test.ts +26 -0
- package/src/app/api/chat/[provider]/agentRuntime.ts +13 -1
- package/src/app/api/chat/google/route.ts +2 -14
- package/src/app/api/config/route.ts +2 -0
- package/src/app/api/errorResponse.test.ts +5 -0
- package/src/app/api/errorResponse.ts +3 -0
- package/src/app/settings/llm/Google/index.tsx +2 -2
- package/src/app/settings/llm/TogetherAI/index.tsx +66 -0
- package/src/app/settings/llm/index.tsx +2 -0
- package/src/components/ModelProviderIcon/index.tsx +5 -0
- package/src/config/modelProviders/google.ts +68 -7
- package/src/config/modelProviders/index.ts +3 -0
- package/src/config/modelProviders/togetherai.ts +86 -0
- package/src/config/modelProviders/zeroone.ts +2 -1
- package/src/config/server/provider.ts +8 -0
- package/src/const/settings.ts +4 -0
- package/src/features/Conversation/Error/APIKeyForm/TogetherAI.tsx +40 -0
- package/src/features/Conversation/Error/APIKeyForm/index.tsx +5 -0
- package/src/features/Conversation/Error/index.tsx +1 -0
- package/src/libs/agent-runtime/error.ts +3 -0
- package/src/libs/agent-runtime/google/index.test.ts +74 -8
- package/src/libs/agent-runtime/google/index.ts +98 -51
- package/src/libs/agent-runtime/index.ts +1 -0
- package/src/libs/agent-runtime/togetherai/index.test.ts +347 -0
- package/src/libs/agent-runtime/togetherai/index.ts +86 -0
- package/src/libs/agent-runtime/types/type.ts +1 -0
- package/src/libs/agent-runtime/zeroone/index.ts +1 -1
- package/src/locales/default/common.ts +1 -0
- package/src/locales/default/error.ts +7 -0
- package/src/locales/default/setting.ts +17 -1
- package/src/services/_auth.test.ts +10 -0
- package/src/services/_auth.ts +4 -0
- package/src/store/global/slices/settings/selectors/modelProvider.test.ts +2 -2
- package/src/store/global/slices/settings/selectors/modelProvider.ts +26 -6
- package/src/types/llm.ts +3 -0
- package/src/types/settings/modelProvider.ts +7 -0
|
@@ -14,17 +14,6 @@ import { AgentRuntimeError } from '../utils/createError';
|
|
|
14
14
|
import { debugStream } from '../utils/debugStream';
|
|
15
15
|
import { parseDataUri } from '../utils/uriParser';
|
|
16
16
|
|
|
17
|
-
type GoogleChatErrors = GoogleChatError[];
|
|
18
|
-
|
|
19
|
-
interface GoogleChatError {
|
|
20
|
-
'@type': string;
|
|
21
|
-
'domain': string;
|
|
22
|
-
'metadata': {
|
|
23
|
-
service: string;
|
|
24
|
-
};
|
|
25
|
-
'reason': string;
|
|
26
|
-
}
|
|
27
|
-
|
|
28
17
|
enum HarmCategory {
|
|
29
18
|
HARM_CATEGORY_DANGEROUS_CONTENT = 'HARM_CATEGORY_DANGEROUS_CONTENT',
|
|
30
19
|
HARM_CATEGORY_HARASSMENT = 'HARM_CATEGORY_HARASSMENT',
|
|
@@ -47,34 +36,42 @@ export class LobeGoogleAI implements LobeRuntimeAI {
|
|
|
47
36
|
|
|
48
37
|
async chat(payload: ChatStreamPayload, options?: ChatCompetitionOptions) {
|
|
49
38
|
try {
|
|
50
|
-
const
|
|
39
|
+
const model = this.convertModel(payload.model, payload.messages);
|
|
40
|
+
|
|
41
|
+
const contents = this.buildGoogleMessages(payload.messages, model);
|
|
42
|
+
|
|
51
43
|
const geminiStream = await this.client
|
|
52
|
-
.getGenerativeModel(
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
model,
|
|
59
|
-
safetySettings: [
|
|
60
|
-
{
|
|
61
|
-
category: HarmCategory.HARM_CATEGORY_HATE_SPEECH,
|
|
62
|
-
threshold: HarmBlockThreshold.BLOCK_NONE,
|
|
63
|
-
},
|
|
64
|
-
{
|
|
65
|
-
category: HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT,
|
|
66
|
-
threshold: HarmBlockThreshold.BLOCK_NONE,
|
|
44
|
+
.getGenerativeModel(
|
|
45
|
+
{
|
|
46
|
+
generationConfig: {
|
|
47
|
+
maxOutputTokens: payload.max_tokens,
|
|
48
|
+
temperature: payload.temperature,
|
|
49
|
+
topP: payload.top_p,
|
|
67
50
|
},
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
51
|
+
model,
|
|
52
|
+
// avoid wide sensitive words
|
|
53
|
+
// refs: https://github.com/lobehub/lobe-chat/pull/1418
|
|
54
|
+
safetySettings: [
|
|
55
|
+
{
|
|
56
|
+
category: HarmCategory.HARM_CATEGORY_HATE_SPEECH,
|
|
57
|
+
threshold: HarmBlockThreshold.BLOCK_NONE,
|
|
58
|
+
},
|
|
59
|
+
{
|
|
60
|
+
category: HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT,
|
|
61
|
+
threshold: HarmBlockThreshold.BLOCK_NONE,
|
|
62
|
+
},
|
|
63
|
+
{
|
|
64
|
+
category: HarmCategory.HARM_CATEGORY_HARASSMENT,
|
|
65
|
+
threshold: HarmBlockThreshold.BLOCK_NONE,
|
|
66
|
+
},
|
|
67
|
+
{
|
|
68
|
+
category: HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT,
|
|
69
|
+
threshold: HarmBlockThreshold.BLOCK_NONE,
|
|
70
|
+
},
|
|
71
|
+
],
|
|
72
|
+
},
|
|
73
|
+
{ apiVersion: 'v1beta' },
|
|
74
|
+
)
|
|
78
75
|
.generateContentStream({ contents });
|
|
79
76
|
|
|
80
77
|
// Convert the response into a friendly text-stream
|
|
@@ -127,25 +124,64 @@ export class LobeGoogleAI implements LobeRuntimeAI {
|
|
|
127
124
|
typeof content === 'string'
|
|
128
125
|
? [{ text: content }]
|
|
129
126
|
: content.map((c) => this.convertContentToGooglePart(c)),
|
|
130
|
-
role: message.role === '
|
|
127
|
+
role: message.role === 'assistant' ? 'model' : 'user',
|
|
131
128
|
};
|
|
132
129
|
};
|
|
133
130
|
|
|
134
131
|
// convert messages from the Vercel AI SDK Format to the format
|
|
135
132
|
// that is expected by the Google GenAI SDK
|
|
136
|
-
private buildGoogleMessages = (
|
|
137
|
-
|
|
138
|
-
model
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
133
|
+
private buildGoogleMessages = (messages: OpenAIChatMessage[], model: string): Content[] => {
|
|
134
|
+
// if the model is gemini-1.5-pro-latest, we don't need any special handling
|
|
135
|
+
if (model === 'gemini-1.5-pro-latest') {
|
|
136
|
+
return messages
|
|
137
|
+
.filter((message) => message.role !== 'function')
|
|
138
|
+
.map((msg) => this.convertOAIMessagesToGoogleMessage(msg));
|
|
139
|
+
}
|
|
140
|
+
|
|
141
|
+
const contents: Content[] = [];
|
|
142
|
+
let lastRole = 'model';
|
|
143
|
+
|
|
144
|
+
messages.forEach((message) => {
|
|
145
|
+
// current to filter function message
|
|
146
|
+
if (message.role === 'function') {
|
|
147
|
+
return;
|
|
148
|
+
}
|
|
149
|
+
const googleMessage = this.convertOAIMessagesToGoogleMessage(message);
|
|
150
|
+
|
|
151
|
+
// if the last message is a model message and the current message is a model message
|
|
152
|
+
// then we need to add a user message to separate them
|
|
153
|
+
if (lastRole === googleMessage.role) {
|
|
154
|
+
contents.push({ parts: [{ text: '' }], role: lastRole === 'user' ? 'model' : 'user' });
|
|
155
|
+
}
|
|
156
|
+
|
|
157
|
+
// add the current message to the contents
|
|
158
|
+
contents.push(googleMessage);
|
|
159
|
+
|
|
160
|
+
// update the last role
|
|
161
|
+
lastRole = googleMessage.role;
|
|
162
|
+
});
|
|
163
|
+
|
|
164
|
+
// if the last message is a user message, then we need to add a model message to separate them
|
|
165
|
+
if (lastRole === 'model') {
|
|
166
|
+
contents.push({ parts: [{ text: '' }], role: 'user' });
|
|
167
|
+
}
|
|
168
|
+
|
|
169
|
+
return contents;
|
|
170
|
+
};
|
|
171
|
+
|
|
172
|
+
private convertModel = (model: string, messages: OpenAIChatMessage[]) => {
|
|
173
|
+
let finalModel: string = model;
|
|
174
|
+
|
|
175
|
+
if (model.includes('pro-vision')) {
|
|
176
|
+
// if message are all text message, use vision will return an error:
|
|
177
|
+
// "[400 Bad Request] Add an image to use models/gemini-pro-vision, or switch your model to a text model."
|
|
178
|
+
const noNeedVision = messages.every((m) => typeof m.content === 'string');
|
|
179
|
+
|
|
180
|
+
// so we need to downgrade to gemini-pro
|
|
181
|
+
if (noNeedVision) finalModel = 'gemini-pro';
|
|
182
|
+
}
|
|
183
|
+
|
|
184
|
+
return finalModel;
|
|
149
185
|
};
|
|
150
186
|
|
|
151
187
|
private parseErrorMessage(message: string): {
|
|
@@ -191,3 +227,14 @@ export class LobeGoogleAI implements LobeRuntimeAI {
|
|
|
191
227
|
}
|
|
192
228
|
|
|
193
229
|
export default LobeGoogleAI;
|
|
230
|
+
|
|
231
|
+
type GoogleChatErrors = GoogleChatError[];
|
|
232
|
+
|
|
233
|
+
interface GoogleChatError {
|
|
234
|
+
'@type': string;
|
|
235
|
+
'domain': string;
|
|
236
|
+
'metadata': {
|
|
237
|
+
service: string;
|
|
238
|
+
};
|
|
239
|
+
'reason': string;
|
|
240
|
+
}
|
|
@@ -11,6 +11,7 @@ export { LobeOllamaAI } from './ollama';
|
|
|
11
11
|
export { LobeOpenAI } from './openai';
|
|
12
12
|
export { LobeOpenRouterAI } from './openrouter';
|
|
13
13
|
export { LobePerplexityAI } from './perplexity';
|
|
14
|
+
export { LobeTogetherAI } from './togetherai';
|
|
14
15
|
export * from './types';
|
|
15
16
|
export { AgentRuntimeError } from './utils/createError';
|
|
16
17
|
export { LobeZeroOneAI } from './zeroone';
|
|
@@ -0,0 +1,347 @@
|
|
|
1
|
+
// @vitest-environment node
|
|
2
|
+
import OpenAI from 'openai';
|
|
3
|
+
import { Mock, afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
|
|
4
|
+
|
|
5
|
+
import { ChatStreamCallbacks } from '@/libs/agent-runtime';
|
|
6
|
+
|
|
7
|
+
import * as debugStreamModule from '../utils/debugStream';
|
|
8
|
+
import { LobeTogetherAI } from './index';
|
|
9
|
+
|
|
10
|
+
const provider = 'togetherai';
|
|
11
|
+
const defaultBaseURL = 'https://api.together.xyz/v1';
|
|
12
|
+
const bizErrorType = 'TogetherAIBizError';
|
|
13
|
+
const invalidErrorType = 'InvalidTogetherAIAPIKey';
|
|
14
|
+
|
|
15
|
+
// Mock the console.error to avoid polluting test output
|
|
16
|
+
vi.spyOn(console, 'error').mockImplementation(() => {});
|
|
17
|
+
|
|
18
|
+
let instance: LobeTogetherAI;
|
|
19
|
+
|
|
20
|
+
beforeEach(() => {
|
|
21
|
+
instance = new LobeTogetherAI({ apiKey: 'test' });
|
|
22
|
+
|
|
23
|
+
// 使用 vi.spyOn 来模拟 chat.completions.create 方法
|
|
24
|
+
vi.spyOn(instance['client'].chat.completions, 'create').mockResolvedValue(
|
|
25
|
+
new ReadableStream() as any,
|
|
26
|
+
);
|
|
27
|
+
});
|
|
28
|
+
|
|
29
|
+
afterEach(() => {
|
|
30
|
+
vi.clearAllMocks();
|
|
31
|
+
});
|
|
32
|
+
|
|
33
|
+
describe('LobeTogetherAI', () => {
|
|
34
|
+
describe('init', () => {
|
|
35
|
+
it('should correctly initialize with an API key', async () => {
|
|
36
|
+
const instance = new LobeTogetherAI({ apiKey: 'test_api_key' });
|
|
37
|
+
expect(instance).toBeInstanceOf(LobeTogetherAI);
|
|
38
|
+
expect(instance.baseURL).toEqual(defaultBaseURL);
|
|
39
|
+
});
|
|
40
|
+
});
|
|
41
|
+
|
|
42
|
+
describe('chat', () => {
|
|
43
|
+
it('should return a StreamingTextResponse on successful API call', async () => {
|
|
44
|
+
// Arrange
|
|
45
|
+
const mockStream = new ReadableStream();
|
|
46
|
+
const mockResponse = Promise.resolve(mockStream);
|
|
47
|
+
|
|
48
|
+
(instance['client'].chat.completions.create as Mock).mockResolvedValue(mockResponse);
|
|
49
|
+
|
|
50
|
+
// Act
|
|
51
|
+
const result = await instance.chat({
|
|
52
|
+
messages: [{ content: 'Hello', role: 'user' }],
|
|
53
|
+
model: 'mistralai/mistral-7b-instruct:free',
|
|
54
|
+
temperature: 0,
|
|
55
|
+
});
|
|
56
|
+
|
|
57
|
+
// Assert
|
|
58
|
+
expect(result).toBeInstanceOf(Response);
|
|
59
|
+
});
|
|
60
|
+
|
|
61
|
+
it('should call TogetherAI API with corresponding options', async () => {
|
|
62
|
+
// Arrange
|
|
63
|
+
const mockStream = new ReadableStream();
|
|
64
|
+
const mockResponse = Promise.resolve(mockStream);
|
|
65
|
+
|
|
66
|
+
(instance['client'].chat.completions.create as Mock).mockResolvedValue(mockResponse);
|
|
67
|
+
|
|
68
|
+
// Act
|
|
69
|
+
const result = await instance.chat({
|
|
70
|
+
max_tokens: 1024,
|
|
71
|
+
messages: [{ content: 'Hello', role: 'user' }],
|
|
72
|
+
model: 'mistralai/mistral-7b-instruct:free',
|
|
73
|
+
temperature: 0.7,
|
|
74
|
+
top_p: 1,
|
|
75
|
+
});
|
|
76
|
+
|
|
77
|
+
// Assert
|
|
78
|
+
expect(instance['client'].chat.completions.create).toHaveBeenCalledWith({
|
|
79
|
+
max_tokens: 1024,
|
|
80
|
+
messages: [{ content: 'Hello', role: 'user' }],
|
|
81
|
+
model: 'mistralai/mistral-7b-instruct:free',
|
|
82
|
+
temperature: 0.7,
|
|
83
|
+
top_p: 1,
|
|
84
|
+
});
|
|
85
|
+
expect(result).toBeInstanceOf(Response);
|
|
86
|
+
});
|
|
87
|
+
|
|
88
|
+
describe('Error', () => {
|
|
89
|
+
it('should return TogetherAIBizError with an openai error response when OpenAI.APIError is thrown', async () => {
|
|
90
|
+
// Arrange
|
|
91
|
+
const apiError = new OpenAI.APIError(
|
|
92
|
+
400,
|
|
93
|
+
{
|
|
94
|
+
status: 400,
|
|
95
|
+
error: {
|
|
96
|
+
message: 'Bad Request',
|
|
97
|
+
},
|
|
98
|
+
},
|
|
99
|
+
'Error message',
|
|
100
|
+
{},
|
|
101
|
+
);
|
|
102
|
+
|
|
103
|
+
vi.spyOn(instance['client'].chat.completions, 'create').mockRejectedValue(apiError);
|
|
104
|
+
|
|
105
|
+
// Act
|
|
106
|
+
try {
|
|
107
|
+
await instance.chat({
|
|
108
|
+
messages: [{ content: 'Hello', role: 'user' }],
|
|
109
|
+
model: 'mistralai/mistral-7b-instruct:free',
|
|
110
|
+
temperature: 0,
|
|
111
|
+
});
|
|
112
|
+
} catch (e) {
|
|
113
|
+
expect(e).toEqual({
|
|
114
|
+
endpoint: defaultBaseURL,
|
|
115
|
+
error: {
|
|
116
|
+
error: { message: 'Bad Request' },
|
|
117
|
+
status: 400,
|
|
118
|
+
},
|
|
119
|
+
errorType: bizErrorType,
|
|
120
|
+
provider,
|
|
121
|
+
});
|
|
122
|
+
}
|
|
123
|
+
});
|
|
124
|
+
|
|
125
|
+
it('should throw AgentRuntimeError with InvalidTogetherAIAPIKey if no apiKey is provided', async () => {
|
|
126
|
+
try {
|
|
127
|
+
new LobeTogetherAI({});
|
|
128
|
+
} catch (e) {
|
|
129
|
+
expect(e).toEqual({ errorType: invalidErrorType });
|
|
130
|
+
}
|
|
131
|
+
});
|
|
132
|
+
|
|
133
|
+
it('should return TogetherAIBizError with the cause when OpenAI.APIError is thrown with cause', async () => {
|
|
134
|
+
// Arrange
|
|
135
|
+
const errorInfo = {
|
|
136
|
+
stack: 'abc',
|
|
137
|
+
cause: {
|
|
138
|
+
message: 'api is undefined',
|
|
139
|
+
},
|
|
140
|
+
};
|
|
141
|
+
const apiError = new OpenAI.APIError(400, errorInfo, 'module error', {});
|
|
142
|
+
|
|
143
|
+
vi.spyOn(instance['client'].chat.completions, 'create').mockRejectedValue(apiError);
|
|
144
|
+
|
|
145
|
+
// Act
|
|
146
|
+
try {
|
|
147
|
+
await instance.chat({
|
|
148
|
+
messages: [{ content: 'Hello', role: 'user' }],
|
|
149
|
+
model: 'mistralai/mistral-7b-instruct:free',
|
|
150
|
+
temperature: 0,
|
|
151
|
+
});
|
|
152
|
+
} catch (e) {
|
|
153
|
+
expect(e).toEqual({
|
|
154
|
+
endpoint: defaultBaseURL,
|
|
155
|
+
error: {
|
|
156
|
+
cause: { message: 'api is undefined' },
|
|
157
|
+
stack: 'abc',
|
|
158
|
+
},
|
|
159
|
+
errorType: bizErrorType,
|
|
160
|
+
provider,
|
|
161
|
+
});
|
|
162
|
+
}
|
|
163
|
+
});
|
|
164
|
+
|
|
165
|
+
it('should return TogetherAIBizError with an cause response with desensitize Url', async () => {
|
|
166
|
+
// Arrange
|
|
167
|
+
const errorInfo = {
|
|
168
|
+
stack: 'abc',
|
|
169
|
+
cause: { message: 'api is undefined' },
|
|
170
|
+
};
|
|
171
|
+
const apiError = new OpenAI.APIError(400, errorInfo, 'module error', {});
|
|
172
|
+
|
|
173
|
+
instance = new LobeTogetherAI({
|
|
174
|
+
apiKey: 'test',
|
|
175
|
+
|
|
176
|
+
baseURL: 'https://api.abc.com/v1',
|
|
177
|
+
});
|
|
178
|
+
|
|
179
|
+
vi.spyOn(instance['client'].chat.completions, 'create').mockRejectedValue(apiError);
|
|
180
|
+
|
|
181
|
+
// Act
|
|
182
|
+
try {
|
|
183
|
+
await instance.chat({
|
|
184
|
+
messages: [{ content: 'Hello', role: 'user' }],
|
|
185
|
+
model: 'mistralai/mistral-7b-instruct:free',
|
|
186
|
+
temperature: 0,
|
|
187
|
+
});
|
|
188
|
+
} catch (e) {
|
|
189
|
+
expect(e).toEqual({
|
|
190
|
+
endpoint: 'https://api.***.com/v1',
|
|
191
|
+
error: {
|
|
192
|
+
cause: { message: 'api is undefined' },
|
|
193
|
+
stack: 'abc',
|
|
194
|
+
},
|
|
195
|
+
errorType: bizErrorType,
|
|
196
|
+
provider,
|
|
197
|
+
});
|
|
198
|
+
}
|
|
199
|
+
});
|
|
200
|
+
|
|
201
|
+
it('should throw an InvalidTogetherAIAPIKey error type on 401 status code', async () => {
|
|
202
|
+
// Mock the API call to simulate a 401 error
|
|
203
|
+
const error = new Error('Unauthorized') as any;
|
|
204
|
+
error.status = 401;
|
|
205
|
+
vi.mocked(instance['client'].chat.completions.create).mockRejectedValue(error);
|
|
206
|
+
|
|
207
|
+
try {
|
|
208
|
+
await instance.chat({
|
|
209
|
+
messages: [{ content: 'Hello', role: 'user' }],
|
|
210
|
+
model: 'mistralai/mistral-7b-instruct:free',
|
|
211
|
+
temperature: 0,
|
|
212
|
+
});
|
|
213
|
+
} catch (e) {
|
|
214
|
+
// Expect the chat method to throw an error with InvalidTogetherAIAPIKey
|
|
215
|
+
expect(e).toEqual({
|
|
216
|
+
endpoint: defaultBaseURL,
|
|
217
|
+
error: new Error('Unauthorized'),
|
|
218
|
+
errorType: invalidErrorType,
|
|
219
|
+
provider,
|
|
220
|
+
});
|
|
221
|
+
}
|
|
222
|
+
});
|
|
223
|
+
|
|
224
|
+
it('should return AgentRuntimeError for non-OpenAI errors', async () => {
|
|
225
|
+
// Arrange
|
|
226
|
+
const genericError = new Error('Generic Error');
|
|
227
|
+
|
|
228
|
+
vi.spyOn(instance['client'].chat.completions, 'create').mockRejectedValue(genericError);
|
|
229
|
+
|
|
230
|
+
// Act
|
|
231
|
+
try {
|
|
232
|
+
await instance.chat({
|
|
233
|
+
messages: [{ content: 'Hello', role: 'user' }],
|
|
234
|
+
model: 'mistralai/mistral-7b-instruct:free',
|
|
235
|
+
temperature: 0,
|
|
236
|
+
});
|
|
237
|
+
} catch (e) {
|
|
238
|
+
expect(e).toEqual({
|
|
239
|
+
endpoint: defaultBaseURL,
|
|
240
|
+
errorType: 'AgentRuntimeError',
|
|
241
|
+
provider,
|
|
242
|
+
error: {
|
|
243
|
+
name: genericError.name,
|
|
244
|
+
cause: genericError.cause,
|
|
245
|
+
message: genericError.message,
|
|
246
|
+
stack: genericError.stack,
|
|
247
|
+
},
|
|
248
|
+
});
|
|
249
|
+
}
|
|
250
|
+
});
|
|
251
|
+
});
|
|
252
|
+
|
|
253
|
+
describe('LobeTogetherAI chat with callback and headers', () => {
|
|
254
|
+
it('should handle callback and headers correctly', async () => {
|
|
255
|
+
// 模拟 chat.completions.create 方法返回一个可读流
|
|
256
|
+
const mockCreateMethod = vi
|
|
257
|
+
.spyOn(instance['client'].chat.completions, 'create')
|
|
258
|
+
.mockResolvedValue(
|
|
259
|
+
new ReadableStream({
|
|
260
|
+
start(controller) {
|
|
261
|
+
controller.enqueue({
|
|
262
|
+
id: 'chatcmpl-8xDx5AETP8mESQN7UB30GxTN2H1SO',
|
|
263
|
+
object: 'chat.completion.chunk',
|
|
264
|
+
created: 1709125675,
|
|
265
|
+
model: 'mistralai/mistral-7b-instruct:free',
|
|
266
|
+
system_fingerprint: 'fp_86156a94a0',
|
|
267
|
+
choices: [
|
|
268
|
+
{ index: 0, delta: { content: 'hello' }, logprobs: null, finish_reason: null },
|
|
269
|
+
],
|
|
270
|
+
});
|
|
271
|
+
controller.close();
|
|
272
|
+
},
|
|
273
|
+
}) as any,
|
|
274
|
+
);
|
|
275
|
+
|
|
276
|
+
// 准备 callback 和 headers
|
|
277
|
+
const mockCallback: ChatStreamCallbacks = {
|
|
278
|
+
onStart: vi.fn(),
|
|
279
|
+
onToken: vi.fn(),
|
|
280
|
+
};
|
|
281
|
+
const mockHeaders = { 'Custom-Header': 'TestValue' };
|
|
282
|
+
|
|
283
|
+
// 执行测试
|
|
284
|
+
const result = await instance.chat(
|
|
285
|
+
{
|
|
286
|
+
messages: [{ content: 'Hello', role: 'user' }],
|
|
287
|
+
model: 'mistralai/mistral-7b-instruct:free',
|
|
288
|
+
temperature: 0,
|
|
289
|
+
},
|
|
290
|
+
{ callback: mockCallback, headers: mockHeaders },
|
|
291
|
+
);
|
|
292
|
+
|
|
293
|
+
// 验证 callback 被调用
|
|
294
|
+
await result.text(); // 确保流被消费
|
|
295
|
+
expect(mockCallback.onStart).toHaveBeenCalled();
|
|
296
|
+
expect(mockCallback.onToken).toHaveBeenCalledWith('hello');
|
|
297
|
+
|
|
298
|
+
// 验证 headers 被正确传递
|
|
299
|
+
expect(result.headers.get('Custom-Header')).toEqual('TestValue');
|
|
300
|
+
|
|
301
|
+
// 清理
|
|
302
|
+
mockCreateMethod.mockRestore();
|
|
303
|
+
});
|
|
304
|
+
});
|
|
305
|
+
|
|
306
|
+
describe('DEBUG', () => {
|
|
307
|
+
it('should call debugStream and return StreamingTextResponse when DEBUG_TOGETHERAI_CHAT_COMPLETION is 1', async () => {
|
|
308
|
+
// Arrange
|
|
309
|
+
const mockProdStream = new ReadableStream() as any; // 模拟的 prod 流
|
|
310
|
+
const mockDebugStream = new ReadableStream({
|
|
311
|
+
start(controller) {
|
|
312
|
+
controller.enqueue('Debug stream content');
|
|
313
|
+
controller.close();
|
|
314
|
+
},
|
|
315
|
+
}) as any;
|
|
316
|
+
mockDebugStream.toReadableStream = () => mockDebugStream; // 添加 toReadableStream 方法
|
|
317
|
+
|
|
318
|
+
// 模拟 chat.completions.create 返回值,包括模拟的 tee 方法
|
|
319
|
+
(instance['client'].chat.completions.create as Mock).mockResolvedValue({
|
|
320
|
+
tee: () => [mockProdStream, { toReadableStream: () => mockDebugStream }],
|
|
321
|
+
});
|
|
322
|
+
|
|
323
|
+
// 保存原始环境变量值
|
|
324
|
+
const originalDebugValue = process.env.DEBUG_TOGETHERAI_CHAT_COMPLETION;
|
|
325
|
+
|
|
326
|
+
// 模拟环境变量
|
|
327
|
+
process.env.DEBUG_TOGETHERAI_CHAT_COMPLETION = '1';
|
|
328
|
+
vi.spyOn(debugStreamModule, 'debugStream').mockImplementation(() => Promise.resolve());
|
|
329
|
+
|
|
330
|
+
// 执行测试
|
|
331
|
+
// 运行你的测试函数,确保它会在条件满足时调用 debugStream
|
|
332
|
+
// 假设的测试函数调用,你可能需要根据实际情况调整
|
|
333
|
+
await instance.chat({
|
|
334
|
+
messages: [{ content: 'Hello', role: 'user' }],
|
|
335
|
+
model: 'mistralai/mistral-7b-instruct:free',
|
|
336
|
+
temperature: 0,
|
|
337
|
+
});
|
|
338
|
+
|
|
339
|
+
// 验证 debugStream 被调用
|
|
340
|
+
expect(debugStreamModule.debugStream).toHaveBeenCalled();
|
|
341
|
+
|
|
342
|
+
// 恢复原始环境变量值
|
|
343
|
+
process.env.DEBUG_TOGETHERAI_CHAT_COMPLETION = originalDebugValue;
|
|
344
|
+
});
|
|
345
|
+
});
|
|
346
|
+
});
|
|
347
|
+
});
|
|
@@ -0,0 +1,86 @@
|
|
|
1
|
+
import { OpenAIStream, StreamingTextResponse } from 'ai';
|
|
2
|
+
import OpenAI, { ClientOptions } from 'openai';
|
|
3
|
+
|
|
4
|
+
import { LobeRuntimeAI } from '../BaseAI';
|
|
5
|
+
import { AgentRuntimeErrorType } from '../error';
|
|
6
|
+
import { ChatCompetitionOptions, ChatStreamPayload, ModelProvider } from '../types';
|
|
7
|
+
import { AgentRuntimeError } from '../utils/createError';
|
|
8
|
+
import { debugStream } from '../utils/debugStream';
|
|
9
|
+
import { desensitizeUrl } from '../utils/desensitizeUrl';
|
|
10
|
+
import { handleOpenAIError } from '../utils/handleOpenAIError';
|
|
11
|
+
|
|
12
|
+
const DEFAULT_BASE_URL = 'https://api.together.xyz/v1';
|
|
13
|
+
|
|
14
|
+
export class LobeTogetherAI implements LobeRuntimeAI {
|
|
15
|
+
private client: OpenAI;
|
|
16
|
+
|
|
17
|
+
baseURL: string;
|
|
18
|
+
|
|
19
|
+
constructor({ apiKey, baseURL = DEFAULT_BASE_URL, ...res }: ClientOptions) {
|
|
20
|
+
if (!apiKey) throw AgentRuntimeError.createError(AgentRuntimeErrorType.InvalidTogetherAIAPIKey);
|
|
21
|
+
|
|
22
|
+
this.client = new OpenAI({
|
|
23
|
+
apiKey,
|
|
24
|
+
baseURL,
|
|
25
|
+
defaultHeaders: {
|
|
26
|
+
'HTTP-Referer': 'https://chat-preview.lobehub.com',
|
|
27
|
+
'X-Title': 'Lobe Chat',
|
|
28
|
+
},
|
|
29
|
+
...res,
|
|
30
|
+
});
|
|
31
|
+
this.baseURL = this.client.baseURL;
|
|
32
|
+
}
|
|
33
|
+
|
|
34
|
+
async chat(payload: ChatStreamPayload, options?: ChatCompetitionOptions) {
|
|
35
|
+
try {
|
|
36
|
+
const response = await this.client.chat.completions.create(
|
|
37
|
+
payload as unknown as OpenAI.ChatCompletionCreateParamsStreaming,
|
|
38
|
+
);
|
|
39
|
+
const [prod, debug] = response.tee();
|
|
40
|
+
|
|
41
|
+
if (process.env.DEBUG_TOGETHERAI_CHAT_COMPLETION === '1') {
|
|
42
|
+
debugStream(debug.toReadableStream()).catch(console.error);
|
|
43
|
+
}
|
|
44
|
+
|
|
45
|
+
return new StreamingTextResponse(OpenAIStream(prod, options?.callback), {
|
|
46
|
+
headers: options?.headers,
|
|
47
|
+
});
|
|
48
|
+
} catch (error) {
|
|
49
|
+
let desensitizedEndpoint = this.baseURL;
|
|
50
|
+
|
|
51
|
+
if (this.baseURL !== DEFAULT_BASE_URL) {
|
|
52
|
+
desensitizedEndpoint = desensitizeUrl(this.baseURL);
|
|
53
|
+
}
|
|
54
|
+
|
|
55
|
+
if ('status' in (error as any)) {
|
|
56
|
+
switch ((error as Response).status) {
|
|
57
|
+
case 401: {
|
|
58
|
+
throw AgentRuntimeError.chat({
|
|
59
|
+
endpoint: desensitizedEndpoint,
|
|
60
|
+
error: error as any,
|
|
61
|
+
errorType: AgentRuntimeErrorType.InvalidTogetherAIAPIKey,
|
|
62
|
+
provider: ModelProvider.TogetherAI,
|
|
63
|
+
});
|
|
64
|
+
}
|
|
65
|
+
|
|
66
|
+
default: {
|
|
67
|
+
break;
|
|
68
|
+
}
|
|
69
|
+
}
|
|
70
|
+
}
|
|
71
|
+
|
|
72
|
+
const { errorResult, RuntimeError } = handleOpenAIError(error);
|
|
73
|
+
|
|
74
|
+
const errorType = RuntimeError || AgentRuntimeErrorType.TogetherAIBizError;
|
|
75
|
+
|
|
76
|
+
throw AgentRuntimeError.chat({
|
|
77
|
+
endpoint: desensitizedEndpoint,
|
|
78
|
+
error: errorResult,
|
|
79
|
+
errorType,
|
|
80
|
+
provider: ModelProvider.TogetherAI,
|
|
81
|
+
});
|
|
82
|
+
}
|
|
83
|
+
}
|
|
84
|
+
}
|
|
85
|
+
|
|
86
|
+
export default LobeTogetherAI;
|
|
@@ -26,7 +26,7 @@ export class LobeZeroOneAI implements LobeRuntimeAI {
|
|
|
26
26
|
async chat(payload: ChatStreamPayload, options?: ChatCompetitionOptions) {
|
|
27
27
|
try {
|
|
28
28
|
const response = await this.client.chat.completions.create(
|
|
29
|
-
payload as unknown as OpenAI.ChatCompletionCreateParamsStreaming
|
|
29
|
+
payload as unknown as OpenAI.ChatCompletionCreateParamsStreaming,
|
|
30
30
|
);
|
|
31
31
|
const [prod, debug] = response.tee();
|
|
32
32
|
|
|
@@ -88,6 +88,9 @@ export default {
|
|
|
88
88
|
InvalidOpenRouterAPIKey: 'OpenRouter API Key 不正确或为空,请检查 OpenRouter API Key 后重试',
|
|
89
89
|
OpenRouterBizError: '请求 OpenRouter AI 服务出错,请根据以下信息排查或重试',
|
|
90
90
|
|
|
91
|
+
InvalidTogetherAIAPIKey: 'TogetherAI API Key 不正确或为空,请检查 TogetherAI API Key 后重试',
|
|
92
|
+
TogetherAIBizError: '请求 TogetherAI AI 服务出错,请根据以下信息排查或重试',
|
|
93
|
+
|
|
91
94
|
ZeroOneBizError: '请求零一万物服务出错,请根据以下信息排查或重试',
|
|
92
95
|
InvalidZeroOneAPIKey: '零一万物 API Key 不正确或为空,请检查零一万物 API Key 后重试',
|
|
93
96
|
|
|
@@ -145,6 +148,10 @@ export default {
|
|
|
145
148
|
description: '输入你的 Perplexity API Key 即可开始会话。应用不会记录你的 API Key',
|
|
146
149
|
title: '使用自定义 Perplexity API Key',
|
|
147
150
|
},
|
|
151
|
+
TogetherAI: {
|
|
152
|
+
description: '输入你的 TogetherAI API Key 即可开始会话。应用不会记录你的 API Key',
|
|
153
|
+
title: '使用自定义 TogetherAI API Key',
|
|
154
|
+
},
|
|
148
155
|
ZeroOne: {
|
|
149
156
|
description: '输入你的零一万物 API Key 即可开始会话。应用不会记录你的 API Key',
|
|
150
157
|
title: '使用自定义零一万物 API Key',
|