@lobehub/chat 1.54.0 → 1.55.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (38) hide show
  1. package/CHANGELOG.md +58 -0
  2. package/Dockerfile +2 -0
  3. package/Dockerfile.database +2 -0
  4. package/README.ja-JP.md +1 -1
  5. package/README.md +1 -1
  6. package/README.zh-CN.md +1 -1
  7. package/README.zh-TW.md +9 -14
  8. package/changelog/v1.json +21 -0
  9. package/docs/changelog/2024-11-25-november-providers.mdx +1 -1
  10. package/docs/changelog/2024-11-25-november-providers.zh-CN.mdx +1 -1
  11. package/docs/self-hosting/platform/tencentcloud-lighthouse.mdx +33 -0
  12. package/docs/self-hosting/platform/tencentcloud-lighthouse.zh-CN.mdx +33 -0
  13. package/docs/self-hosting/start.zh-CN.mdx +3 -1
  14. package/docs/usage/features/multi-ai-providers.mdx +1 -1
  15. package/docs/usage/features/multi-ai-providers.zh-CN.mdx +1 -1
  16. package/package.json +1 -3
  17. package/src/app/[variants]/(main)/settings/llm/ProviderList/providers.tsx +2 -0
  18. package/src/config/aiModels/index.ts +3 -0
  19. package/src/config/aiModels/openrouter.ts +30 -0
  20. package/src/config/aiModels/vllm.ts +94 -0
  21. package/src/config/llm.ts +6 -0
  22. package/src/config/modelProviders/index.ts +4 -0
  23. package/src/config/modelProviders/openrouter.ts +9 -0
  24. package/src/config/modelProviders/vllm.ts +20 -0
  25. package/src/const/url.ts +1 -1
  26. package/src/libs/agent-runtime/AgentRuntime.test.ts +1 -0
  27. package/src/libs/agent-runtime/AgentRuntime.ts +7 -0
  28. package/src/libs/agent-runtime/azureOpenai/index.test.ts +47 -9
  29. package/src/libs/agent-runtime/azureOpenai/index.ts +35 -28
  30. package/src/libs/agent-runtime/types/type.ts +1 -0
  31. package/src/libs/agent-runtime/utils/streams/index.ts +0 -1
  32. package/src/libs/agent-runtime/vllm/index.ts +44 -0
  33. package/src/server/modules/AgentRuntime/index.test.ts +3 -1
  34. package/src/server/routers/lambda/aiModel.test.ts +240 -0
  35. package/src/store/aiInfra/slices/aiModel/selectors.test.ts +228 -0
  36. package/src/types/user/settings/keyVaults.ts +1 -0
  37. package/src/libs/agent-runtime/utils/streams/azureOpenai.test.ts +0 -536
  38. package/src/libs/agent-runtime/utils/streams/azureOpenai.ts +0 -83
@@ -49,6 +49,7 @@ import {
49
49
  TextToSpeechPayload,
50
50
  } from './types';
51
51
  import { LobeUpstageAI } from './upstage';
52
+ import { LobeVLLMAI } from './vllm';
52
53
  import { LobeWenxinAI } from './wenxin';
53
54
  import { LobeXAI } from './xai';
54
55
  import { LobeZeroOneAI } from './zeroone';
@@ -172,6 +173,7 @@ class AgentRuntime {
172
173
  tencentcloud: Partial<ClientOptions>;
173
174
  togetherai: Partial<ClientOptions>;
174
175
  upstage: Partial<ClientOptions>;
176
+ vllm: Partial<ClientOptions>;
175
177
  wenxin: Partial<ClientOptions>;
176
178
  xai: Partial<ClientOptions>;
177
179
  zeroone: Partial<ClientOptions>;
@@ -227,6 +229,11 @@ class AgentRuntime {
227
229
  break;
228
230
  }
229
231
 
232
+ case ModelProvider.VLLM: {
233
+ runtimeModel = new LobeVLLMAI(params.vllm);
234
+ break;
235
+ }
236
+
230
237
  case ModelProvider.Perplexity: {
231
238
  runtimeModel = new LobePerplexityAI(params.perplexity);
232
239
  break;
@@ -1,9 +1,9 @@
1
1
  // @vitest-environment node
2
- import { AzureKeyCredential, OpenAIClient } from '@azure/openai';
3
- import OpenAI from 'openai';
2
+ import { AzureOpenAI } from 'openai';
4
3
  import { Mock, afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
5
4
 
6
5
  import * as debugStreamModule from '../utils/debugStream';
6
+ import * as openaiCompatibleFactoryModule from '../utils/openaiCompatibleFactory';
7
7
  import { LobeAzureOpenAI } from './index';
8
8
 
9
9
  const bizErrorType = 'ProviderBizError';
@@ -23,7 +23,7 @@ describe('LobeAzureOpenAI', () => {
23
23
  );
24
24
 
25
25
  // 使用 vi.spyOn 来模拟 streamChatCompletions 方法
26
- vi.spyOn(instance['client'], 'streamChatCompletions').mockResolvedValue(
26
+ vi.spyOn(instance['client'].chat.completions, 'create').mockResolvedValue(
27
27
  new ReadableStream() as any,
28
28
  );
29
29
  });
@@ -48,7 +48,7 @@ describe('LobeAzureOpenAI', () => {
48
48
 
49
49
  const instance = new LobeAzureOpenAI(endpoint, apikey, apiVersion);
50
50
 
51
- expect(instance.client).toBeInstanceOf(OpenAIClient);
51
+ expect(instance.client).toBeInstanceOf(AzureOpenAI);
52
52
  expect(instance.baseURL).toBe(endpoint);
53
53
  });
54
54
  });
@@ -59,7 +59,7 @@ describe('LobeAzureOpenAI', () => {
59
59
  const mockStream = new ReadableStream();
60
60
  const mockResponse = Promise.resolve(mockStream);
61
61
 
62
- (instance['client'].streamChatCompletions as Mock).mockResolvedValue(mockResponse);
62
+ (instance['client'].chat.completions.create as Mock).mockResolvedValue(mockResponse);
63
63
 
64
64
  // Act
65
65
  const result = await instance.chat({
@@ -164,7 +164,9 @@ describe('LobeAzureOpenAI', () => {
164
164
  controller.close();
165
165
  },
166
166
  });
167
- vi.spyOn(instance['client'], 'streamChatCompletions').mockResolvedValue(mockStream as any);
167
+ vi.spyOn(instance['client'].chat.completions, 'create').mockResolvedValue(
168
+ mockStream as any,
169
+ );
168
170
 
169
171
  const result = await instance.chat({
170
172
  stream: true,
@@ -204,6 +206,42 @@ describe('LobeAzureOpenAI', () => {
204
206
  ].map((item) => `${item}\n`),
205
207
  );
206
208
  });
209
+
210
+ it('should handle non-streaming response', async () => {
211
+ vi.spyOn(openaiCompatibleFactoryModule, 'transformResponseToStream').mockImplementation(
212
+ () => {
213
+ return new ReadableStream();
214
+ },
215
+ );
216
+ // Act
217
+ await instance.chat({
218
+ stream: false,
219
+ temperature: 0.6,
220
+ model: 'gpt-35-turbo-16k',
221
+ messages: [{ role: 'user', content: '你好' }],
222
+ });
223
+
224
+ // Assert
225
+ expect(openaiCompatibleFactoryModule.transformResponseToStream).toHaveBeenCalled();
226
+ });
227
+ });
228
+
229
+ it('should handle o1 series models without streaming', async () => {
230
+ vi.spyOn(openaiCompatibleFactoryModule, 'transformResponseToStream').mockImplementation(
231
+ () => {
232
+ return new ReadableStream();
233
+ },
234
+ );
235
+
236
+ // Act
237
+ await instance.chat({
238
+ temperature: 0.6,
239
+ model: 'o1-preview',
240
+ messages: [{ role: 'user', content: '你好' }],
241
+ });
242
+
243
+ // Assert
244
+ expect(openaiCompatibleFactoryModule.transformResponseToStream).toHaveBeenCalled();
207
245
  });
208
246
 
209
247
  describe('Error', () => {
@@ -214,7 +252,7 @@ describe('LobeAzureOpenAI', () => {
214
252
  message: 'Deployment not found',
215
253
  };
216
254
 
217
- (instance['client'].streamChatCompletions as Mock).mockRejectedValue(error);
255
+ (instance['client'].chat.completions.create as Mock).mockRejectedValue(error);
218
256
 
219
257
  // Act
220
258
  try {
@@ -242,7 +280,7 @@ describe('LobeAzureOpenAI', () => {
242
280
  // Arrange
243
281
  const genericError = new Error('Generic Error');
244
282
 
245
- (instance['client'].streamChatCompletions as Mock).mockRejectedValue(genericError);
283
+ (instance['client'].chat.completions.create as Mock).mockRejectedValue(genericError);
246
284
 
247
285
  // Act
248
286
  try {
@@ -279,7 +317,7 @@ describe('LobeAzureOpenAI', () => {
279
317
  }) as any;
280
318
  mockDebugStream.toReadableStream = () => mockDebugStream;
281
319
 
282
- (instance['client'].streamChatCompletions as Mock).mockResolvedValue({
320
+ (instance['client'].chat.completions.create as Mock).mockResolvedValue({
283
321
  tee: () => [mockProdStream, { toReadableStream: () => mockDebugStream }],
284
322
  });
285
323
 
@@ -1,26 +1,28 @@
1
- import {
2
- AzureKeyCredential,
3
- ChatRequestMessage,
4
- GetChatCompletionsOptions,
5
- OpenAIClient,
6
- } from '@azure/openai';
1
+ import OpenAI, { AzureOpenAI } from 'openai';
2
+ import type { Stream } from 'openai/streaming';
7
3
 
8
4
  import { LobeRuntimeAI } from '../BaseAI';
9
5
  import { AgentRuntimeErrorType } from '../error';
10
6
  import { ChatCompetitionOptions, ChatStreamPayload, ModelProvider } from '../types';
11
7
  import { AgentRuntimeError } from '../utils/createError';
12
8
  import { debugStream } from '../utils/debugStream';
9
+ import { transformResponseToStream } from '../utils/openaiCompatibleFactory';
13
10
  import { StreamingResponse } from '../utils/response';
14
- import { AzureOpenAIStream } from '../utils/streams';
11
+ import { OpenAIStream } from '../utils/streams';
15
12
 
16
13
  export class LobeAzureOpenAI implements LobeRuntimeAI {
17
- client: OpenAIClient;
14
+ client: AzureOpenAI;
18
15
 
19
16
  constructor(endpoint?: string, apikey?: string, apiVersion?: string) {
20
17
  if (!apikey || !endpoint)
21
18
  throw AgentRuntimeError.createError(AgentRuntimeErrorType.InvalidProviderAPIKey);
22
19
 
23
- this.client = new OpenAIClient(endpoint, new AzureKeyCredential(apikey), { apiVersion });
20
+ this.client = new AzureOpenAI({
21
+ apiKey: apikey,
22
+ apiVersion,
23
+ dangerouslyAllowBrowser: true,
24
+ endpoint,
25
+ });
24
26
 
25
27
  this.baseURL = endpoint;
26
28
  }
@@ -28,28 +30,33 @@ export class LobeAzureOpenAI implements LobeRuntimeAI {
28
30
  baseURL: string;
29
31
 
30
32
  async chat(payload: ChatStreamPayload, options?: ChatCompetitionOptions) {
31
- // ============ 1. preprocess messages ============ //
32
- const camelCasePayload = this.camelCaseKeys(payload);
33
- const { messages, model, maxTokens = 2048, ...params } = camelCasePayload;
34
-
35
- // ============ 2. send api ============ //
36
-
33
+ const { messages, model, ...params } = payload;
34
+ // o1 series models on Azure OpenAI does not support streaming currently
35
+ const enableStreaming = model.startsWith('o1') ? false : (params.stream ?? true);
37
36
  try {
38
- const response = await this.client.streamChatCompletions(
37
+ const response = await this.client.chat.completions.create({
38
+ messages: messages as OpenAI.ChatCompletionMessageParam[],
39
39
  model,
40
- messages as ChatRequestMessage[],
41
- { ...params, abortSignal: options?.signal, maxTokens } as GetChatCompletionsOptions,
42
- );
43
-
44
- const [debug, prod] = response.tee();
45
-
46
- if (process.env.DEBUG_AZURE_CHAT_COMPLETION === '1') {
47
- debugStream(debug).catch(console.error);
48
- }
49
-
50
- return StreamingResponse(AzureOpenAIStream(prod, options?.callback), {
51
- headers: options?.headers,
40
+ ...params,
41
+ max_completion_tokens: 2048,
42
+ stream: enableStreaming,
43
+ tool_choice: params.tools ? 'auto' : undefined,
52
44
  });
45
+ if (enableStreaming) {
46
+ const stream = response as Stream<OpenAI.ChatCompletionChunk>;
47
+ const [prod, debug] = stream.tee();
48
+ if (process.env.DEBUG_AZURE_CHAT_COMPLETION === '1') {
49
+ debugStream(debug.toReadableStream()).catch(console.error);
50
+ }
51
+ return StreamingResponse(OpenAIStream(prod, { callbacks: options?.callback }), {
52
+ headers: options?.headers,
53
+ });
54
+ } else {
55
+ const stream = transformResponseToStream(response as OpenAI.ChatCompletion);
56
+ return StreamingResponse(OpenAIStream(stream, { callbacks: options?.callback }), {
57
+ headers: options?.headers,
58
+ });
59
+ }
53
60
  } catch (e) {
54
61
  let error = e as { [key: string]: any; code: string; message: string };
55
62
 
@@ -59,6 +59,7 @@ export enum ModelProvider {
59
59
  TencentCloud = 'tencentcloud',
60
60
  TogetherAI = 'togetherai',
61
61
  Upstage = 'upstage',
62
+ VLLM = 'vllm',
62
63
  Wenxin = 'wenxin',
63
64
  XAI = 'xai',
64
65
  ZeroOne = 'zeroone',
@@ -1,5 +1,4 @@
1
1
  export * from './anthropic';
2
- export * from './azureOpenai';
3
2
  export * from './bedrock';
4
3
  export * from './google-ai';
5
4
  export * from './ollama';
@@ -0,0 +1,44 @@
1
+ import { ModelProvider } from '../types';
2
+ import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
3
+
4
+ import type { ChatModelCard } from '@/types/llm';
5
+
6
+ export interface VLLMModelCard {
7
+ id: string;
8
+ }
9
+
10
+ export const LobeVLLMAI = LobeOpenAICompatibleFactory({
11
+ baseURL: 'http://localhost:8000/v1',
12
+ debug: {
13
+ chatCompletion: () => process.env.DEBUG_VLLM_CHAT_COMPLETION === '1',
14
+ },
15
+ models: async ({ client }) => {
16
+ const { LOBE_DEFAULT_MODEL_LIST } = await import('@/config/aiModels');
17
+
18
+ const modelsPage = await client.models.list() as any;
19
+ const modelList: VLLMModelCard[] = modelsPage.data;
20
+
21
+ return modelList
22
+ .map((model) => {
23
+ const knownModel = LOBE_DEFAULT_MODEL_LIST.find((m) => model.id.toLowerCase() === m.id.toLowerCase());
24
+
25
+ return {
26
+ contextWindowTokens: knownModel?.contextWindowTokens ?? undefined,
27
+ displayName: knownModel?.displayName ?? undefined,
28
+ enabled: knownModel?.enabled || false,
29
+ functionCall:
30
+ knownModel?.abilities?.functionCall
31
+ || false,
32
+ id: model.id,
33
+ reasoning:
34
+ knownModel?.abilities?.reasoning
35
+ || false,
36
+ vision:
37
+ knownModel?.abilities?.vision
38
+ || false,
39
+ };
40
+ })
41
+ .filter(Boolean) as ChatModelCard[];
42
+ },
43
+ provider: ModelProvider.VLLM,
44
+ });
@@ -223,7 +223,9 @@ describe('initAgentRuntimeWithUserPayload method', () => {
223
223
  });
224
224
 
225
225
  it('Azure AI Provider: without apikey', async () => {
226
- const jwtPayload: JWTPayload = {};
226
+ const jwtPayload: JWTPayload = {
227
+ azureApiVersion: 'test-azure-api-version',
228
+ };
227
229
  const runtime = await initAgentRuntimeWithUserPayload(ModelProvider.Azure, jwtPayload);
228
230
 
229
231
  expect(runtime['_runtime']).toBeInstanceOf(LobeAzureOpenAI);
@@ -0,0 +1,240 @@
1
+ import { describe, expect, it, vi } from 'vitest';
2
+
3
+ import { AiInfraRepos } from '@/database/repositories/aiInfra';
4
+ import { AiModelModel } from '@/database/server/models/aiModel';
5
+ import { UserModel } from '@/database/server/models/user';
6
+
7
+ import { aiModelRouter } from './aiModel';
8
+
9
+ vi.mock('@/database/server/models/aiModel');
10
+ vi.mock('@/database/server/models/user');
11
+ vi.mock('@/database/repositories/aiInfra');
12
+ vi.mock('@/server/globalConfig', () => ({
13
+ getServerGlobalConfig: vi.fn().mockReturnValue({
14
+ aiProvider: {},
15
+ }),
16
+ }));
17
+ vi.mock('@/server/modules/KeyVaultsEncrypt', () => ({
18
+ KeyVaultsGateKeeper: {
19
+ initWithEnvKey: vi.fn().mockResolvedValue({
20
+ encrypt: vi.fn(),
21
+ decrypt: vi.fn(),
22
+ }),
23
+ },
24
+ }));
25
+
26
+ describe('aiModelRouter', () => {
27
+ const mockCtx = {
28
+ userId: 'test-user',
29
+ };
30
+
31
+ it('should create ai model', async () => {
32
+ const mockCreate = vi.fn().mockResolvedValue({ id: 'model-1' });
33
+ vi.mocked(AiModelModel).mockImplementation(
34
+ () =>
35
+ ({
36
+ create: mockCreate,
37
+ }) as any,
38
+ );
39
+
40
+ const caller = aiModelRouter.createCaller(mockCtx);
41
+
42
+ const result = await caller.createAiModel({
43
+ id: 'test-model',
44
+ providerId: 'test-provider',
45
+ });
46
+
47
+ expect(result).toBe('model-1');
48
+ expect(mockCreate).toHaveBeenCalledWith({
49
+ id: 'test-model',
50
+ providerId: 'test-provider',
51
+ });
52
+ });
53
+
54
+ it('should get ai model by id', async () => {
55
+ const mockModel = {
56
+ id: 'model-1',
57
+ name: 'Test Model',
58
+ };
59
+ const mockFindById = vi.fn().mockResolvedValue(mockModel);
60
+ vi.mocked(AiModelModel).mockImplementation(
61
+ () =>
62
+ ({
63
+ findById: mockFindById,
64
+ }) as any,
65
+ );
66
+
67
+ const caller = aiModelRouter.createCaller(mockCtx);
68
+
69
+ const result = await caller.getAiModelById({ id: 'model-1' });
70
+
71
+ expect(result).toEqual(mockModel);
72
+ expect(mockFindById).toHaveBeenCalledWith('model-1');
73
+ });
74
+
75
+ it('should get ai provider model list', async () => {
76
+ const mockModelList = [
77
+ { id: 'model-1', name: 'Model 1' },
78
+ { id: 'model-2', name: 'Model 2' },
79
+ ];
80
+ const mockGetList = vi.fn().mockResolvedValue(mockModelList);
81
+ vi.mocked(AiInfraRepos).mockImplementation(
82
+ () =>
83
+ ({
84
+ getAiProviderModelList: mockGetList,
85
+ }) as any,
86
+ );
87
+
88
+ const caller = aiModelRouter.createCaller(mockCtx);
89
+
90
+ const result = await caller.getAiProviderModelList({ id: 'provider-1' });
91
+
92
+ expect(result).toEqual(mockModelList);
93
+ expect(mockGetList).toHaveBeenCalledWith('provider-1');
94
+ });
95
+
96
+ it('should remove ai model', async () => {
97
+ const mockDelete = vi.fn().mockResolvedValue(true);
98
+ vi.mocked(AiModelModel).mockImplementation(
99
+ () =>
100
+ ({
101
+ delete: mockDelete,
102
+ }) as any,
103
+ );
104
+
105
+ const caller = aiModelRouter.createCaller(mockCtx);
106
+
107
+ await caller.removeAiModel({
108
+ id: 'model-1',
109
+ providerId: 'provider-1',
110
+ });
111
+
112
+ expect(mockDelete).toHaveBeenCalledWith('model-1', 'provider-1');
113
+ });
114
+
115
+ it('should update ai model', async () => {
116
+ const mockUpdate = vi.fn().mockResolvedValue(true);
117
+ vi.mocked(AiModelModel).mockImplementation(
118
+ () =>
119
+ ({
120
+ update: mockUpdate,
121
+ }) as any,
122
+ );
123
+
124
+ const caller = aiModelRouter.createCaller(mockCtx);
125
+
126
+ await caller.updateAiModel({
127
+ id: 'model-1',
128
+ providerId: 'provider-1',
129
+ value: {
130
+ displayName: 'Updated Model',
131
+ },
132
+ });
133
+
134
+ expect(mockUpdate).toHaveBeenCalledWith('model-1', 'provider-1', {
135
+ displayName: 'Updated Model',
136
+ });
137
+ });
138
+
139
+ it('should toggle model enabled status', async () => {
140
+ const mockToggle = vi.fn().mockResolvedValue(true);
141
+ vi.mocked(AiModelModel).mockImplementation(
142
+ () =>
143
+ ({
144
+ toggleModelEnabled: mockToggle,
145
+ }) as any,
146
+ );
147
+
148
+ const caller = aiModelRouter.createCaller(mockCtx);
149
+
150
+ await caller.toggleModelEnabled({
151
+ id: 'model-1',
152
+ providerId: 'provider-1',
153
+ enabled: true,
154
+ });
155
+
156
+ expect(mockToggle).toHaveBeenCalledWith({
157
+ id: 'model-1',
158
+ providerId: 'provider-1',
159
+ enabled: true,
160
+ });
161
+ });
162
+
163
+ it('should batch toggle ai models', async () => {
164
+ const mockBatchToggle = vi.fn().mockResolvedValue(true);
165
+ vi.mocked(AiModelModel).mockImplementation(
166
+ () =>
167
+ ({
168
+ batchToggleAiModels: mockBatchToggle,
169
+ }) as any,
170
+ );
171
+
172
+ const caller = aiModelRouter.createCaller(mockCtx);
173
+
174
+ await caller.batchToggleAiModels({
175
+ id: 'provider-1',
176
+ models: ['model-1', 'model-2'],
177
+ enabled: true,
178
+ });
179
+
180
+ expect(mockBatchToggle).toHaveBeenCalledWith('provider-1', ['model-1', 'model-2'], true);
181
+ });
182
+
183
+ it('should batch update ai models', async () => {
184
+ const mockBatchUpdate = vi.fn().mockResolvedValue([]);
185
+ vi.mocked(AiModelModel).mockImplementation(
186
+ () =>
187
+ ({
188
+ batchUpdateAiModels: mockBatchUpdate,
189
+ }) as any,
190
+ );
191
+
192
+ const caller = aiModelRouter.createCaller(mockCtx);
193
+
194
+ await caller.batchUpdateAiModels({
195
+ id: 'provider-1',
196
+ models: [{ id: 'model-1' }, { id: 'model-2' }],
197
+ });
198
+
199
+ expect(mockBatchUpdate).toHaveBeenCalledWith('provider-1', [
200
+ { id: 'model-1' },
201
+ { id: 'model-2' },
202
+ ]);
203
+ });
204
+
205
+ it('should clear models by provider', async () => {
206
+ const mockClear = vi.fn().mockResolvedValue(true);
207
+ vi.mocked(AiModelModel).mockImplementation(
208
+ () =>
209
+ ({
210
+ clearModelsByProvider: mockClear,
211
+ }) as any,
212
+ );
213
+
214
+ const caller = aiModelRouter.createCaller(mockCtx);
215
+
216
+ await caller.clearModelsByProvider({
217
+ providerId: 'provider-1',
218
+ });
219
+
220
+ expect(mockClear).toHaveBeenCalledWith('provider-1');
221
+ });
222
+
223
+ it('should clear remote models', async () => {
224
+ const mockClearRemote = vi.fn().mockResolvedValue(true);
225
+ vi.mocked(AiModelModel).mockImplementation(
226
+ () =>
227
+ ({
228
+ clearRemoteModels: mockClearRemote,
229
+ }) as any,
230
+ );
231
+
232
+ const caller = aiModelRouter.createCaller(mockCtx);
233
+
234
+ await caller.clearRemoteModels({
235
+ providerId: 'provider-1',
236
+ });
237
+
238
+ expect(mockClearRemote).toHaveBeenCalledWith('provider-1');
239
+ });
240
+ });