@lobehub/chat 1.55.0 → 1.55.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +58 -0
- package/changelog/v1.json +21 -0
- package/docs/changelog/2025-02-02-deepseek-r1.mdx +4 -4
- package/docs/self-hosting/advanced/auth/next-auth/casdoor.mdx +4 -3
- package/docs/self-hosting/advanced/auth/next-auth/casdoor.zh-CN.mdx +4 -3
- package/docs/self-hosting/advanced/auth/next-auth/logto.mdx +9 -9
- package/docs/self-hosting/advanced/auth/next-auth/logto.zh-CN.mdx +9 -9
- package/docs/self-hosting/advanced/model-list.zh-CN.mdx +6 -6
- package/docs/self-hosting/advanced/observability/langfuse.zh-CN.mdx +70 -0
- package/docs/self-hosting/environment-variables/auth.mdx +3 -3
- package/docs/self-hosting/environment-variables/auth.zh-CN.mdx +3 -2
- package/docs/self-hosting/platform/btpanel.mdx +1 -1
- package/docs/self-hosting/platform/btpanel.zh-CN.mdx +1 -1
- package/docs/self-hosting/platform/tencentcloud-lighthouse.mdx +33 -0
- package/docs/self-hosting/platform/tencentcloud-lighthouse.zh-CN.mdx +31 -0
- package/docs/self-hosting/server-database.mdx +1 -1
- package/docs/self-hosting/start.zh-CN.mdx +3 -1
- package/docs/usage/providers/wenxin.mdx +1 -0
- package/docs/usage/providers/wenxin.zh-CN.mdx +1 -0
- package/package.json +1 -3
- package/src/config/aiModels/openrouter.ts +30 -0
- package/src/config/modelProviders/openrouter.ts +9 -0
- package/src/features/Conversation/Messages/Assistant/index.tsx +5 -1
- package/src/libs/agent-runtime/AgentRuntime.test.ts +1 -0
- package/src/libs/agent-runtime/azureOpenai/index.test.ts +47 -9
- package/src/libs/agent-runtime/azureOpenai/index.ts +35 -28
- package/src/libs/agent-runtime/utils/streams/index.ts +0 -1
- package/src/server/modules/AgentRuntime/index.test.ts +3 -1
- package/src/server/routers/lambda/aiModel.test.ts +240 -0
- package/src/store/aiInfra/slices/aiModel/selectors.test.ts +228 -0
- package/src/libs/agent-runtime/utils/streams/azureOpenai.test.ts +0 -536
- package/src/libs/agent-runtime/utils/streams/azureOpenai.ts +0 -83
@@ -1,9 +1,9 @@
|
|
1
1
|
// @vitest-environment node
|
2
|
-
import {
|
3
|
-
import OpenAI from 'openai';
|
2
|
+
import { AzureOpenAI } from 'openai';
|
4
3
|
import { Mock, afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
|
5
4
|
|
6
5
|
import * as debugStreamModule from '../utils/debugStream';
|
6
|
+
import * as openaiCompatibleFactoryModule from '../utils/openaiCompatibleFactory';
|
7
7
|
import { LobeAzureOpenAI } from './index';
|
8
8
|
|
9
9
|
const bizErrorType = 'ProviderBizError';
|
@@ -23,7 +23,7 @@ describe('LobeAzureOpenAI', () => {
|
|
23
23
|
);
|
24
24
|
|
25
25
|
// 使用 vi.spyOn 来模拟 streamChatCompletions 方法
|
26
|
-
vi.spyOn(instance['client'], '
|
26
|
+
vi.spyOn(instance['client'].chat.completions, 'create').mockResolvedValue(
|
27
27
|
new ReadableStream() as any,
|
28
28
|
);
|
29
29
|
});
|
@@ -48,7 +48,7 @@ describe('LobeAzureOpenAI', () => {
|
|
48
48
|
|
49
49
|
const instance = new LobeAzureOpenAI(endpoint, apikey, apiVersion);
|
50
50
|
|
51
|
-
expect(instance.client).toBeInstanceOf(
|
51
|
+
expect(instance.client).toBeInstanceOf(AzureOpenAI);
|
52
52
|
expect(instance.baseURL).toBe(endpoint);
|
53
53
|
});
|
54
54
|
});
|
@@ -59,7 +59,7 @@ describe('LobeAzureOpenAI', () => {
|
|
59
59
|
const mockStream = new ReadableStream();
|
60
60
|
const mockResponse = Promise.resolve(mockStream);
|
61
61
|
|
62
|
-
(instance['client'].
|
62
|
+
(instance['client'].chat.completions.create as Mock).mockResolvedValue(mockResponse);
|
63
63
|
|
64
64
|
// Act
|
65
65
|
const result = await instance.chat({
|
@@ -164,7 +164,9 @@ describe('LobeAzureOpenAI', () => {
|
|
164
164
|
controller.close();
|
165
165
|
},
|
166
166
|
});
|
167
|
-
vi.spyOn(instance['client'], '
|
167
|
+
vi.spyOn(instance['client'].chat.completions, 'create').mockResolvedValue(
|
168
|
+
mockStream as any,
|
169
|
+
);
|
168
170
|
|
169
171
|
const result = await instance.chat({
|
170
172
|
stream: true,
|
@@ -204,6 +206,42 @@ describe('LobeAzureOpenAI', () => {
|
|
204
206
|
].map((item) => `${item}\n`),
|
205
207
|
);
|
206
208
|
});
|
209
|
+
|
210
|
+
it('should handle non-streaming response', async () => {
|
211
|
+
vi.spyOn(openaiCompatibleFactoryModule, 'transformResponseToStream').mockImplementation(
|
212
|
+
() => {
|
213
|
+
return new ReadableStream();
|
214
|
+
},
|
215
|
+
);
|
216
|
+
// Act
|
217
|
+
await instance.chat({
|
218
|
+
stream: false,
|
219
|
+
temperature: 0.6,
|
220
|
+
model: 'gpt-35-turbo-16k',
|
221
|
+
messages: [{ role: 'user', content: '你好' }],
|
222
|
+
});
|
223
|
+
|
224
|
+
// Assert
|
225
|
+
expect(openaiCompatibleFactoryModule.transformResponseToStream).toHaveBeenCalled();
|
226
|
+
});
|
227
|
+
});
|
228
|
+
|
229
|
+
it('should handle o1 series models without streaming', async () => {
|
230
|
+
vi.spyOn(openaiCompatibleFactoryModule, 'transformResponseToStream').mockImplementation(
|
231
|
+
() => {
|
232
|
+
return new ReadableStream();
|
233
|
+
},
|
234
|
+
);
|
235
|
+
|
236
|
+
// Act
|
237
|
+
await instance.chat({
|
238
|
+
temperature: 0.6,
|
239
|
+
model: 'o1-preview',
|
240
|
+
messages: [{ role: 'user', content: '你好' }],
|
241
|
+
});
|
242
|
+
|
243
|
+
// Assert
|
244
|
+
expect(openaiCompatibleFactoryModule.transformResponseToStream).toHaveBeenCalled();
|
207
245
|
});
|
208
246
|
|
209
247
|
describe('Error', () => {
|
@@ -214,7 +252,7 @@ describe('LobeAzureOpenAI', () => {
|
|
214
252
|
message: 'Deployment not found',
|
215
253
|
};
|
216
254
|
|
217
|
-
(instance['client'].
|
255
|
+
(instance['client'].chat.completions.create as Mock).mockRejectedValue(error);
|
218
256
|
|
219
257
|
// Act
|
220
258
|
try {
|
@@ -242,7 +280,7 @@ describe('LobeAzureOpenAI', () => {
|
|
242
280
|
// Arrange
|
243
281
|
const genericError = new Error('Generic Error');
|
244
282
|
|
245
|
-
(instance['client'].
|
283
|
+
(instance['client'].chat.completions.create as Mock).mockRejectedValue(genericError);
|
246
284
|
|
247
285
|
// Act
|
248
286
|
try {
|
@@ -279,7 +317,7 @@ describe('LobeAzureOpenAI', () => {
|
|
279
317
|
}) as any;
|
280
318
|
mockDebugStream.toReadableStream = () => mockDebugStream;
|
281
319
|
|
282
|
-
(instance['client'].
|
320
|
+
(instance['client'].chat.completions.create as Mock).mockResolvedValue({
|
283
321
|
tee: () => [mockProdStream, { toReadableStream: () => mockDebugStream }],
|
284
322
|
});
|
285
323
|
|
@@ -1,26 +1,28 @@
|
|
1
|
-
import {
|
2
|
-
|
3
|
-
ChatRequestMessage,
|
4
|
-
GetChatCompletionsOptions,
|
5
|
-
OpenAIClient,
|
6
|
-
} from '@azure/openai';
|
1
|
+
import OpenAI, { AzureOpenAI } from 'openai';
|
2
|
+
import type { Stream } from 'openai/streaming';
|
7
3
|
|
8
4
|
import { LobeRuntimeAI } from '../BaseAI';
|
9
5
|
import { AgentRuntimeErrorType } from '../error';
|
10
6
|
import { ChatCompetitionOptions, ChatStreamPayload, ModelProvider } from '../types';
|
11
7
|
import { AgentRuntimeError } from '../utils/createError';
|
12
8
|
import { debugStream } from '../utils/debugStream';
|
9
|
+
import { transformResponseToStream } from '../utils/openaiCompatibleFactory';
|
13
10
|
import { StreamingResponse } from '../utils/response';
|
14
|
-
import {
|
11
|
+
import { OpenAIStream } from '../utils/streams';
|
15
12
|
|
16
13
|
export class LobeAzureOpenAI implements LobeRuntimeAI {
|
17
|
-
client:
|
14
|
+
client: AzureOpenAI;
|
18
15
|
|
19
16
|
constructor(endpoint?: string, apikey?: string, apiVersion?: string) {
|
20
17
|
if (!apikey || !endpoint)
|
21
18
|
throw AgentRuntimeError.createError(AgentRuntimeErrorType.InvalidProviderAPIKey);
|
22
19
|
|
23
|
-
this.client = new
|
20
|
+
this.client = new AzureOpenAI({
|
21
|
+
apiKey: apikey,
|
22
|
+
apiVersion,
|
23
|
+
dangerouslyAllowBrowser: true,
|
24
|
+
endpoint,
|
25
|
+
});
|
24
26
|
|
25
27
|
this.baseURL = endpoint;
|
26
28
|
}
|
@@ -28,28 +30,33 @@ export class LobeAzureOpenAI implements LobeRuntimeAI {
|
|
28
30
|
baseURL: string;
|
29
31
|
|
30
32
|
async chat(payload: ChatStreamPayload, options?: ChatCompetitionOptions) {
|
31
|
-
|
32
|
-
|
33
|
-
const
|
34
|
-
|
35
|
-
// ============ 2. send api ============ //
|
36
|
-
|
33
|
+
const { messages, model, ...params } = payload;
|
34
|
+
// o1 series models on Azure OpenAI does not support streaming currently
|
35
|
+
const enableStreaming = model.startsWith('o1') ? false : (params.stream ?? true);
|
37
36
|
try {
|
38
|
-
const response = await this.client.
|
37
|
+
const response = await this.client.chat.completions.create({
|
38
|
+
messages: messages as OpenAI.ChatCompletionMessageParam[],
|
39
39
|
model,
|
40
|
-
|
41
|
-
|
42
|
-
|
43
|
-
|
44
|
-
const [debug, prod] = response.tee();
|
45
|
-
|
46
|
-
if (process.env.DEBUG_AZURE_CHAT_COMPLETION === '1') {
|
47
|
-
debugStream(debug).catch(console.error);
|
48
|
-
}
|
49
|
-
|
50
|
-
return StreamingResponse(AzureOpenAIStream(prod, options?.callback), {
|
51
|
-
headers: options?.headers,
|
40
|
+
...params,
|
41
|
+
max_completion_tokens: 2048,
|
42
|
+
stream: enableStreaming,
|
43
|
+
tool_choice: params.tools ? 'auto' : undefined,
|
52
44
|
});
|
45
|
+
if (enableStreaming) {
|
46
|
+
const stream = response as Stream<OpenAI.ChatCompletionChunk>;
|
47
|
+
const [prod, debug] = stream.tee();
|
48
|
+
if (process.env.DEBUG_AZURE_CHAT_COMPLETION === '1') {
|
49
|
+
debugStream(debug.toReadableStream()).catch(console.error);
|
50
|
+
}
|
51
|
+
return StreamingResponse(OpenAIStream(prod, { callbacks: options?.callback }), {
|
52
|
+
headers: options?.headers,
|
53
|
+
});
|
54
|
+
} else {
|
55
|
+
const stream = transformResponseToStream(response as OpenAI.ChatCompletion);
|
56
|
+
return StreamingResponse(OpenAIStream(stream, { callbacks: options?.callback }), {
|
57
|
+
headers: options?.headers,
|
58
|
+
});
|
59
|
+
}
|
53
60
|
} catch (e) {
|
54
61
|
let error = e as { [key: string]: any; code: string; message: string };
|
55
62
|
|
@@ -223,7 +223,9 @@ describe('initAgentRuntimeWithUserPayload method', () => {
|
|
223
223
|
});
|
224
224
|
|
225
225
|
it('Azure AI Provider: without apikey', async () => {
|
226
|
-
const jwtPayload: JWTPayload = {
|
226
|
+
const jwtPayload: JWTPayload = {
|
227
|
+
azureApiVersion: 'test-azure-api-version',
|
228
|
+
};
|
227
229
|
const runtime = await initAgentRuntimeWithUserPayload(ModelProvider.Azure, jwtPayload);
|
228
230
|
|
229
231
|
expect(runtime['_runtime']).toBeInstanceOf(LobeAzureOpenAI);
|
@@ -0,0 +1,240 @@
|
|
1
|
+
import { describe, expect, it, vi } from 'vitest';
|
2
|
+
|
3
|
+
import { AiInfraRepos } from '@/database/repositories/aiInfra';
|
4
|
+
import { AiModelModel } from '@/database/server/models/aiModel';
|
5
|
+
import { UserModel } from '@/database/server/models/user';
|
6
|
+
|
7
|
+
import { aiModelRouter } from './aiModel';
|
8
|
+
|
9
|
+
vi.mock('@/database/server/models/aiModel');
|
10
|
+
vi.mock('@/database/server/models/user');
|
11
|
+
vi.mock('@/database/repositories/aiInfra');
|
12
|
+
vi.mock('@/server/globalConfig', () => ({
|
13
|
+
getServerGlobalConfig: vi.fn().mockReturnValue({
|
14
|
+
aiProvider: {},
|
15
|
+
}),
|
16
|
+
}));
|
17
|
+
vi.mock('@/server/modules/KeyVaultsEncrypt', () => ({
|
18
|
+
KeyVaultsGateKeeper: {
|
19
|
+
initWithEnvKey: vi.fn().mockResolvedValue({
|
20
|
+
encrypt: vi.fn(),
|
21
|
+
decrypt: vi.fn(),
|
22
|
+
}),
|
23
|
+
},
|
24
|
+
}));
|
25
|
+
|
26
|
+
describe('aiModelRouter', () => {
|
27
|
+
const mockCtx = {
|
28
|
+
userId: 'test-user',
|
29
|
+
};
|
30
|
+
|
31
|
+
it('should create ai model', async () => {
|
32
|
+
const mockCreate = vi.fn().mockResolvedValue({ id: 'model-1' });
|
33
|
+
vi.mocked(AiModelModel).mockImplementation(
|
34
|
+
() =>
|
35
|
+
({
|
36
|
+
create: mockCreate,
|
37
|
+
}) as any,
|
38
|
+
);
|
39
|
+
|
40
|
+
const caller = aiModelRouter.createCaller(mockCtx);
|
41
|
+
|
42
|
+
const result = await caller.createAiModel({
|
43
|
+
id: 'test-model',
|
44
|
+
providerId: 'test-provider',
|
45
|
+
});
|
46
|
+
|
47
|
+
expect(result).toBe('model-1');
|
48
|
+
expect(mockCreate).toHaveBeenCalledWith({
|
49
|
+
id: 'test-model',
|
50
|
+
providerId: 'test-provider',
|
51
|
+
});
|
52
|
+
});
|
53
|
+
|
54
|
+
it('should get ai model by id', async () => {
|
55
|
+
const mockModel = {
|
56
|
+
id: 'model-1',
|
57
|
+
name: 'Test Model',
|
58
|
+
};
|
59
|
+
const mockFindById = vi.fn().mockResolvedValue(mockModel);
|
60
|
+
vi.mocked(AiModelModel).mockImplementation(
|
61
|
+
() =>
|
62
|
+
({
|
63
|
+
findById: mockFindById,
|
64
|
+
}) as any,
|
65
|
+
);
|
66
|
+
|
67
|
+
const caller = aiModelRouter.createCaller(mockCtx);
|
68
|
+
|
69
|
+
const result = await caller.getAiModelById({ id: 'model-1' });
|
70
|
+
|
71
|
+
expect(result).toEqual(mockModel);
|
72
|
+
expect(mockFindById).toHaveBeenCalledWith('model-1');
|
73
|
+
});
|
74
|
+
|
75
|
+
it('should get ai provider model list', async () => {
|
76
|
+
const mockModelList = [
|
77
|
+
{ id: 'model-1', name: 'Model 1' },
|
78
|
+
{ id: 'model-2', name: 'Model 2' },
|
79
|
+
];
|
80
|
+
const mockGetList = vi.fn().mockResolvedValue(mockModelList);
|
81
|
+
vi.mocked(AiInfraRepos).mockImplementation(
|
82
|
+
() =>
|
83
|
+
({
|
84
|
+
getAiProviderModelList: mockGetList,
|
85
|
+
}) as any,
|
86
|
+
);
|
87
|
+
|
88
|
+
const caller = aiModelRouter.createCaller(mockCtx);
|
89
|
+
|
90
|
+
const result = await caller.getAiProviderModelList({ id: 'provider-1' });
|
91
|
+
|
92
|
+
expect(result).toEqual(mockModelList);
|
93
|
+
expect(mockGetList).toHaveBeenCalledWith('provider-1');
|
94
|
+
});
|
95
|
+
|
96
|
+
it('should remove ai model', async () => {
|
97
|
+
const mockDelete = vi.fn().mockResolvedValue(true);
|
98
|
+
vi.mocked(AiModelModel).mockImplementation(
|
99
|
+
() =>
|
100
|
+
({
|
101
|
+
delete: mockDelete,
|
102
|
+
}) as any,
|
103
|
+
);
|
104
|
+
|
105
|
+
const caller = aiModelRouter.createCaller(mockCtx);
|
106
|
+
|
107
|
+
await caller.removeAiModel({
|
108
|
+
id: 'model-1',
|
109
|
+
providerId: 'provider-1',
|
110
|
+
});
|
111
|
+
|
112
|
+
expect(mockDelete).toHaveBeenCalledWith('model-1', 'provider-1');
|
113
|
+
});
|
114
|
+
|
115
|
+
it('should update ai model', async () => {
|
116
|
+
const mockUpdate = vi.fn().mockResolvedValue(true);
|
117
|
+
vi.mocked(AiModelModel).mockImplementation(
|
118
|
+
() =>
|
119
|
+
({
|
120
|
+
update: mockUpdate,
|
121
|
+
}) as any,
|
122
|
+
);
|
123
|
+
|
124
|
+
const caller = aiModelRouter.createCaller(mockCtx);
|
125
|
+
|
126
|
+
await caller.updateAiModel({
|
127
|
+
id: 'model-1',
|
128
|
+
providerId: 'provider-1',
|
129
|
+
value: {
|
130
|
+
displayName: 'Updated Model',
|
131
|
+
},
|
132
|
+
});
|
133
|
+
|
134
|
+
expect(mockUpdate).toHaveBeenCalledWith('model-1', 'provider-1', {
|
135
|
+
displayName: 'Updated Model',
|
136
|
+
});
|
137
|
+
});
|
138
|
+
|
139
|
+
it('should toggle model enabled status', async () => {
|
140
|
+
const mockToggle = vi.fn().mockResolvedValue(true);
|
141
|
+
vi.mocked(AiModelModel).mockImplementation(
|
142
|
+
() =>
|
143
|
+
({
|
144
|
+
toggleModelEnabled: mockToggle,
|
145
|
+
}) as any,
|
146
|
+
);
|
147
|
+
|
148
|
+
const caller = aiModelRouter.createCaller(mockCtx);
|
149
|
+
|
150
|
+
await caller.toggleModelEnabled({
|
151
|
+
id: 'model-1',
|
152
|
+
providerId: 'provider-1',
|
153
|
+
enabled: true,
|
154
|
+
});
|
155
|
+
|
156
|
+
expect(mockToggle).toHaveBeenCalledWith({
|
157
|
+
id: 'model-1',
|
158
|
+
providerId: 'provider-1',
|
159
|
+
enabled: true,
|
160
|
+
});
|
161
|
+
});
|
162
|
+
|
163
|
+
it('should batch toggle ai models', async () => {
|
164
|
+
const mockBatchToggle = vi.fn().mockResolvedValue(true);
|
165
|
+
vi.mocked(AiModelModel).mockImplementation(
|
166
|
+
() =>
|
167
|
+
({
|
168
|
+
batchToggleAiModels: mockBatchToggle,
|
169
|
+
}) as any,
|
170
|
+
);
|
171
|
+
|
172
|
+
const caller = aiModelRouter.createCaller(mockCtx);
|
173
|
+
|
174
|
+
await caller.batchToggleAiModels({
|
175
|
+
id: 'provider-1',
|
176
|
+
models: ['model-1', 'model-2'],
|
177
|
+
enabled: true,
|
178
|
+
});
|
179
|
+
|
180
|
+
expect(mockBatchToggle).toHaveBeenCalledWith('provider-1', ['model-1', 'model-2'], true);
|
181
|
+
});
|
182
|
+
|
183
|
+
it('should batch update ai models', async () => {
|
184
|
+
const mockBatchUpdate = vi.fn().mockResolvedValue([]);
|
185
|
+
vi.mocked(AiModelModel).mockImplementation(
|
186
|
+
() =>
|
187
|
+
({
|
188
|
+
batchUpdateAiModels: mockBatchUpdate,
|
189
|
+
}) as any,
|
190
|
+
);
|
191
|
+
|
192
|
+
const caller = aiModelRouter.createCaller(mockCtx);
|
193
|
+
|
194
|
+
await caller.batchUpdateAiModels({
|
195
|
+
id: 'provider-1',
|
196
|
+
models: [{ id: 'model-1' }, { id: 'model-2' }],
|
197
|
+
});
|
198
|
+
|
199
|
+
expect(mockBatchUpdate).toHaveBeenCalledWith('provider-1', [
|
200
|
+
{ id: 'model-1' },
|
201
|
+
{ id: 'model-2' },
|
202
|
+
]);
|
203
|
+
});
|
204
|
+
|
205
|
+
it('should clear models by provider', async () => {
|
206
|
+
const mockClear = vi.fn().mockResolvedValue(true);
|
207
|
+
vi.mocked(AiModelModel).mockImplementation(
|
208
|
+
() =>
|
209
|
+
({
|
210
|
+
clearModelsByProvider: mockClear,
|
211
|
+
}) as any,
|
212
|
+
);
|
213
|
+
|
214
|
+
const caller = aiModelRouter.createCaller(mockCtx);
|
215
|
+
|
216
|
+
await caller.clearModelsByProvider({
|
217
|
+
providerId: 'provider-1',
|
218
|
+
});
|
219
|
+
|
220
|
+
expect(mockClear).toHaveBeenCalledWith('provider-1');
|
221
|
+
});
|
222
|
+
|
223
|
+
it('should clear remote models', async () => {
|
224
|
+
const mockClearRemote = vi.fn().mockResolvedValue(true);
|
225
|
+
vi.mocked(AiModelModel).mockImplementation(
|
226
|
+
() =>
|
227
|
+
({
|
228
|
+
clearRemoteModels: mockClearRemote,
|
229
|
+
}) as any,
|
230
|
+
);
|
231
|
+
|
232
|
+
const caller = aiModelRouter.createCaller(mockCtx);
|
233
|
+
|
234
|
+
await caller.clearRemoteModels({
|
235
|
+
providerId: 'provider-1',
|
236
|
+
});
|
237
|
+
|
238
|
+
expect(mockClearRemote).toHaveBeenCalledWith('provider-1');
|
239
|
+
});
|
240
|
+
});
|
@@ -0,0 +1,228 @@
|
|
1
|
+
import { describe, expect, it } from 'vitest';
|
2
|
+
|
3
|
+
import { AIProviderStoreState } from '@/store/aiInfra/initialState';
|
4
|
+
import { AiModelSourceEnum } from '@/types/aiModel';
|
5
|
+
|
6
|
+
import { aiModelSelectors } from './selectors';
|
7
|
+
|
8
|
+
describe('aiModelSelectors', () => {
|
9
|
+
const mockState: AIProviderStoreState = {
|
10
|
+
aiProviderModelList: [
|
11
|
+
{
|
12
|
+
id: 'model1',
|
13
|
+
type: 'chat',
|
14
|
+
enabled: true,
|
15
|
+
displayName: 'Model One',
|
16
|
+
},
|
17
|
+
{
|
18
|
+
id: 'model2',
|
19
|
+
type: 'chat',
|
20
|
+
enabled: false,
|
21
|
+
displayName: 'Model Two',
|
22
|
+
},
|
23
|
+
{
|
24
|
+
id: 'model3',
|
25
|
+
type: 'embedding',
|
26
|
+
enabled: true,
|
27
|
+
displayName: 'Model Three',
|
28
|
+
},
|
29
|
+
{
|
30
|
+
id: 'model4',
|
31
|
+
type: 'chat',
|
32
|
+
enabled: true,
|
33
|
+
source: AiModelSourceEnum.Remote,
|
34
|
+
displayName: 'Remote Model',
|
35
|
+
},
|
36
|
+
],
|
37
|
+
modelSearchKeyword: '',
|
38
|
+
aiModelLoadingIds: ['model2'],
|
39
|
+
enabledAiModels: [
|
40
|
+
{
|
41
|
+
id: 'model1',
|
42
|
+
providerId: 'provider1',
|
43
|
+
abilities: {
|
44
|
+
functionCall: true,
|
45
|
+
vision: true,
|
46
|
+
reasoning: true,
|
47
|
+
},
|
48
|
+
contextWindowTokens: 4000,
|
49
|
+
type: 'chat',
|
50
|
+
},
|
51
|
+
{
|
52
|
+
id: 'model4',
|
53
|
+
providerId: 'provider2',
|
54
|
+
abilities: {
|
55
|
+
functionCall: false,
|
56
|
+
vision: false,
|
57
|
+
reasoning: false,
|
58
|
+
},
|
59
|
+
type: 'chat',
|
60
|
+
},
|
61
|
+
],
|
62
|
+
activeProviderModelList: [],
|
63
|
+
aiProviderConfigUpdatingIds: [],
|
64
|
+
aiProviderList: [],
|
65
|
+
aiProviderLoadingIds: [],
|
66
|
+
providerSearchKeyword: '',
|
67
|
+
aiProviderRuntimeConfig: {},
|
68
|
+
initAiProviderList: false,
|
69
|
+
};
|
70
|
+
|
71
|
+
describe('aiProviderChatModelListIds', () => {
|
72
|
+
it('should return ids of chat type models', () => {
|
73
|
+
const result = aiModelSelectors.aiProviderChatModelListIds(mockState);
|
74
|
+
expect(result).toEqual(['model1', 'model2', 'model4']);
|
75
|
+
});
|
76
|
+
});
|
77
|
+
|
78
|
+
describe('enabledAiProviderModelList', () => {
|
79
|
+
it('should return enabled models', () => {
|
80
|
+
const result = aiModelSelectors.enabledAiProviderModelList(mockState);
|
81
|
+
expect(result).toHaveLength(3);
|
82
|
+
expect(result.map((m) => m.id)).toEqual(['model1', 'model3', 'model4']);
|
83
|
+
});
|
84
|
+
});
|
85
|
+
|
86
|
+
describe('disabledAiProviderModelList', () => {
|
87
|
+
it('should return disabled models', () => {
|
88
|
+
const result = aiModelSelectors.disabledAiProviderModelList(mockState);
|
89
|
+
expect(result).toHaveLength(1);
|
90
|
+
expect(result[0].id).toBe('model2');
|
91
|
+
});
|
92
|
+
});
|
93
|
+
|
94
|
+
describe('filteredAiProviderModelList', () => {
|
95
|
+
it('should filter models by id', () => {
|
96
|
+
const state = { ...mockState, modelSearchKeyword: 'model1' };
|
97
|
+
const result = aiModelSelectors.filteredAiProviderModelList(state);
|
98
|
+
expect(result).toHaveLength(1);
|
99
|
+
expect(result[0].id).toBe('model1');
|
100
|
+
});
|
101
|
+
|
102
|
+
it('should filter models by display name', () => {
|
103
|
+
const state = { ...mockState, modelSearchKeyword: 'remote' };
|
104
|
+
const result = aiModelSelectors.filteredAiProviderModelList(state);
|
105
|
+
expect(result).toHaveLength(1);
|
106
|
+
expect(result[0].id).toBe('model4');
|
107
|
+
});
|
108
|
+
|
109
|
+
it('should handle empty keyword', () => {
|
110
|
+
const result = aiModelSelectors.filteredAiProviderModelList(mockState);
|
111
|
+
expect(result).toHaveLength(mockState.aiProviderModelList.length);
|
112
|
+
});
|
113
|
+
});
|
114
|
+
|
115
|
+
describe('totalAiProviderModelList', () => {
|
116
|
+
it('should return total number of models', () => {
|
117
|
+
const result = aiModelSelectors.totalAiProviderModelList(mockState);
|
118
|
+
expect(result).toBe(4);
|
119
|
+
});
|
120
|
+
});
|
121
|
+
|
122
|
+
describe('isEmptyAiProviderModelList', () => {
|
123
|
+
it('should return true when list is empty', () => {
|
124
|
+
const state = { ...mockState, aiProviderModelList: [] };
|
125
|
+
const result = aiModelSelectors.isEmptyAiProviderModelList(state);
|
126
|
+
expect(result).toBe(true);
|
127
|
+
});
|
128
|
+
|
129
|
+
it('should return false when list is not empty', () => {
|
130
|
+
const result = aiModelSelectors.isEmptyAiProviderModelList(mockState);
|
131
|
+
expect(result).toBe(false);
|
132
|
+
});
|
133
|
+
});
|
134
|
+
|
135
|
+
describe('hasRemoteModels', () => {
|
136
|
+
it('should return true when remote models exist', () => {
|
137
|
+
const result = aiModelSelectors.hasRemoteModels(mockState);
|
138
|
+
expect(result).toBe(true);
|
139
|
+
});
|
140
|
+
|
141
|
+
it('should return false when no remote models exist', () => {
|
142
|
+
const state = {
|
143
|
+
...mockState,
|
144
|
+
aiProviderModelList: mockState.aiProviderModelList.filter(
|
145
|
+
(m) => !('source' in m) || m.source !== AiModelSourceEnum.Remote,
|
146
|
+
),
|
147
|
+
};
|
148
|
+
const result = aiModelSelectors.hasRemoteModels(state);
|
149
|
+
expect(result).toBe(false);
|
150
|
+
});
|
151
|
+
});
|
152
|
+
|
153
|
+
describe('isModelEnabled', () => {
|
154
|
+
it('should return true for enabled model', () => {
|
155
|
+
const result = aiModelSelectors.isModelEnabled('model1')(mockState);
|
156
|
+
expect(result).toBe(true);
|
157
|
+
});
|
158
|
+
|
159
|
+
it('should return false for disabled model', () => {
|
160
|
+
const result = aiModelSelectors.isModelEnabled('model2')(mockState);
|
161
|
+
expect(result).toBe(false);
|
162
|
+
});
|
163
|
+
});
|
164
|
+
|
165
|
+
describe('isModelLoading', () => {
|
166
|
+
it('should return true for loading model', () => {
|
167
|
+
const result = aiModelSelectors.isModelLoading('model2')(mockState);
|
168
|
+
expect(result).toBe(true);
|
169
|
+
});
|
170
|
+
|
171
|
+
it('should return false for non-loading model', () => {
|
172
|
+
const result = aiModelSelectors.isModelLoading('model1')(mockState);
|
173
|
+
expect(result).toBe(false);
|
174
|
+
});
|
175
|
+
});
|
176
|
+
|
177
|
+
describe('getAiModelById', () => {
|
178
|
+
it('should return model by id', () => {
|
179
|
+
const result = aiModelSelectors.getAiModelById('model1')(mockState);
|
180
|
+
expect(result).toBeDefined();
|
181
|
+
expect(result?.id).toBe('model1');
|
182
|
+
});
|
183
|
+
|
184
|
+
it('should return undefined for non-existent model', () => {
|
185
|
+
const result = aiModelSelectors.getAiModelById('nonexistent')(mockState);
|
186
|
+
expect(result).toBeUndefined();
|
187
|
+
});
|
188
|
+
});
|
189
|
+
|
190
|
+
describe('model capability checks', () => {
|
191
|
+
it('should check tool use support', () => {
|
192
|
+
expect(aiModelSelectors.isModelSupportToolUse('model1', 'provider1')(mockState)).toBe(true);
|
193
|
+
expect(aiModelSelectors.isModelSupportToolUse('model4', 'provider2')(mockState)).toBe(false);
|
194
|
+
});
|
195
|
+
|
196
|
+
it('should check vision support', () => {
|
197
|
+
expect(aiModelSelectors.isModelSupportVision('model1', 'provider1')(mockState)).toBe(true);
|
198
|
+
expect(aiModelSelectors.isModelSupportVision('model4', 'provider2')(mockState)).toBe(false);
|
199
|
+
});
|
200
|
+
|
201
|
+
it('should check reasoning support', () => {
|
202
|
+
expect(aiModelSelectors.isModelSupportReasoning('model1', 'provider1')(mockState)).toBe(true);
|
203
|
+
expect(aiModelSelectors.isModelSupportReasoning('model4', 'provider2')(mockState)).toBe(
|
204
|
+
false,
|
205
|
+
);
|
206
|
+
});
|
207
|
+
});
|
208
|
+
|
209
|
+
describe('context window checks', () => {
|
210
|
+
it('should check if model has context window tokens', () => {
|
211
|
+
expect(aiModelSelectors.isModelHasContextWindowToken('model1', 'provider1')(mockState)).toBe(
|
212
|
+
true,
|
213
|
+
);
|
214
|
+
expect(aiModelSelectors.isModelHasContextWindowToken('model4', 'provider2')(mockState)).toBe(
|
215
|
+
false,
|
216
|
+
);
|
217
|
+
});
|
218
|
+
|
219
|
+
it('should get model context window tokens', () => {
|
220
|
+
expect(aiModelSelectors.modelContextWindowTokens('model1', 'provider1')(mockState)).toBe(
|
221
|
+
4000,
|
222
|
+
);
|
223
|
+
expect(
|
224
|
+
aiModelSelectors.modelContextWindowTokens('model4', 'provider2')(mockState),
|
225
|
+
).toBeUndefined();
|
226
|
+
});
|
227
|
+
});
|
228
|
+
});
|