@lobehub/chat 1.29.6 → 1.30.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (41) hide show
  1. package/.env.example +5 -0
  2. package/CHANGELOG.md +25 -0
  3. package/locales/ar/modelProvider.json +12 -0
  4. package/locales/bg-BG/modelProvider.json +12 -0
  5. package/locales/de-DE/modelProvider.json +12 -0
  6. package/locales/en-US/modelProvider.json +12 -0
  7. package/locales/es-ES/modelProvider.json +12 -0
  8. package/locales/fr-FR/modelProvider.json +12 -0
  9. package/locales/it-IT/modelProvider.json +12 -0
  10. package/locales/ja-JP/modelProvider.json +12 -0
  11. package/locales/ko-KR/modelProvider.json +12 -0
  12. package/locales/nl-NL/modelProvider.json +12 -0
  13. package/locales/pl-PL/modelProvider.json +12 -0
  14. package/locales/pt-BR/modelProvider.json +12 -0
  15. package/locales/ru-RU/modelProvider.json +12 -0
  16. package/locales/tr-TR/modelProvider.json +12 -0
  17. package/locales/vi-VN/modelProvider.json +12 -0
  18. package/locales/zh-CN/modelProvider.json +12 -0
  19. package/locales/zh-TW/modelProvider.json +12 -0
  20. package/package.json +2 -2
  21. package/src/app/(main)/settings/llm/ProviderList/Cloudflare/index.tsx +43 -0
  22. package/src/app/(main)/settings/llm/ProviderList/providers.tsx +4 -0
  23. package/src/config/llm.ts +9 -0
  24. package/src/config/modelProviders/cloudflare.ts +89 -0
  25. package/src/config/modelProviders/index.ts +4 -0
  26. package/src/const/auth.ts +2 -0
  27. package/src/const/settings/llm.ts +5 -0
  28. package/src/libs/agent-runtime/AgentRuntime.ts +7 -1
  29. package/src/libs/agent-runtime/cloudflare/index.test.ts +648 -0
  30. package/src/libs/agent-runtime/cloudflare/index.ts +123 -0
  31. package/src/libs/agent-runtime/types/type.ts +1 -0
  32. package/src/libs/agent-runtime/utils/cloudflareHelpers.test.ts +339 -0
  33. package/src/libs/agent-runtime/utils/cloudflareHelpers.ts +134 -0
  34. package/src/locales/default/modelProvider.ts +13 -1
  35. package/src/server/globalConfig/index.ts +4 -0
  36. package/src/server/modules/AgentRuntime/index.ts +11 -0
  37. package/src/services/_auth.ts +9 -0
  38. package/src/services/chat.ts +7 -0
  39. package/src/store/user/slices/modelList/selectors/keyVaults.ts +2 -0
  40. package/src/store/user/slices/modelList/selectors/modelConfig.ts +2 -0
  41. package/src/types/user/settings/keyVaults.ts +6 -0
@@ -9,6 +9,7 @@ import { LobeAnthropicAI } from './anthropic';
9
9
  import { LobeAzureOpenAI } from './azureOpenai';
10
10
  import { LobeBaichuanAI } from './baichuan';
11
11
  import { LobeBedrockAI, LobeBedrockAIParams } from './bedrock';
12
+ import { LobeCloudflareAI, LobeCloudflareParams } from './cloudflare';
12
13
  import { LobeDeepSeekAI } from './deepseek';
13
14
  import { LobeFireworksAI } from './fireworksai';
14
15
  import { LobeGithubAI } from './github';
@@ -131,6 +132,7 @@ class AgentRuntime {
131
132
  azure: { apiVersion?: string; apikey?: string; endpoint?: string };
132
133
  baichuan: Partial<ClientOptions>;
133
134
  bedrock: Partial<LobeBedrockAIParams>;
135
+ cloudflare: Partial<LobeCloudflareParams>;
134
136
  deepseek: Partial<ClientOptions>;
135
137
  fireworksai: Partial<ClientOptions>;
136
138
  github: Partial<ClientOptions>;
@@ -321,8 +323,12 @@ class AgentRuntime {
321
323
  runtimeModel = await LobeSenseNovaAI.fromAPIKey(params.sensenova);
322
324
  break;
323
325
  }
324
- }
325
326
 
327
+ case ModelProvider.Cloudflare: {
328
+ runtimeModel = new LobeCloudflareAI(params.cloudflare ?? {});
329
+ break;
330
+ }
331
+ }
326
332
  return new AgentRuntime(runtimeModel);
327
333
  }
328
334
  }
@@ -0,0 +1,648 @@
1
+ // @vitest-environment node
2
+ import { Mock, afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
3
+
4
+ import { ChatCompletionTool } from '@/libs/agent-runtime';
5
+
6
+ import * as debugStreamModule from '../utils/debugStream';
7
+ import { LobeCloudflareAI } from './index';
8
+
9
+ const provider = 'cloudflare';
10
+
11
+ const bizErrorType = 'ProviderBizError';
12
+ const invalidErrorType = 'InvalidProviderAPIKey';
13
+
14
+ // Mock the console.error to avoid polluting test output
15
+ vi.spyOn(console, 'error').mockImplementation(() => {});
16
+
17
+ let instance: LobeCloudflareAI;
18
+ const textEncoder = new TextEncoder();
19
+
20
+ afterEach(() => {
21
+ vi.restoreAllMocks();
22
+ });
23
+
24
+ describe('LobeCloudflareAI', () => {
25
+ const accountID = '80009000a000b000c000d000e000f000';
26
+ describe('init', () => {
27
+ it('should correctly initialize with API key and Account ID', async () => {
28
+ const instance = new LobeCloudflareAI({
29
+ apiKey: 'test_api_key',
30
+ baseURLOrAccountID: accountID,
31
+ });
32
+ expect(instance).toBeInstanceOf(LobeCloudflareAI);
33
+ expect(instance.baseURL).toBe(
34
+ `https://api.cloudflare.com/client/v4/accounts/${accountID}/ai/run/`,
35
+ );
36
+ expect(instance.accountID).toBe(accountID);
37
+ });
38
+
39
+ it('should correctly initialize with API key and Gateway URL', async () => {
40
+ const baseURL = `https://gateway.ai.cloudflare.com/v1/${accountID}/test-gateway/workers-ai`;
41
+ const instance = new LobeCloudflareAI({
42
+ apiKey: 'test_api_key',
43
+ baseURLOrAccountID: baseURL,
44
+ });
45
+ expect(instance).toBeInstanceOf(LobeCloudflareAI);
46
+ expect(instance.baseURL).toBe(baseURL + '/'); // baseURL MUST end with '/'.
47
+ expect(instance.accountID).toBe(accountID);
48
+ });
49
+ });
50
+
51
+ describe('chat', () => {
52
+ beforeEach(() => {
53
+ instance = new LobeCloudflareAI({
54
+ apiKey: 'test_api_key',
55
+ baseURLOrAccountID: accountID,
56
+ });
57
+
58
+ // Mock fetch
59
+ vi.spyOn(globalThis, 'fetch').mockResolvedValue(
60
+ new Response(
61
+ new ReadableStream<Uint8Array>({
62
+ start(controller) {
63
+ controller.enqueue(textEncoder.encode('data: {"response": "Hello, world!"}\n\n'));
64
+ controller.close();
65
+ },
66
+ }),
67
+ ),
68
+ );
69
+ });
70
+
71
+ it('should return a Response on successful API call', async () => {
72
+ const result = await instance.chat({
73
+ messages: [{ content: 'Hello', role: 'user' }],
74
+ model: '@hf/meta-llama/meta-llama-3-8b-instruct',
75
+ temperature: 0,
76
+ });
77
+
78
+ // Assert
79
+ expect(result).toBeInstanceOf(Response);
80
+ });
81
+
82
+ it('should handle text messages correctly', async () => {
83
+ // Arrange
84
+ const textEncoder = new TextEncoder();
85
+ const mockResponse = new Response(
86
+ new ReadableStream<Uint8Array>({
87
+ start(controller) {
88
+ controller.enqueue(textEncoder.encode('data: {"response": "Hello, world!"}\n\n'));
89
+ controller.close();
90
+ },
91
+ }),
92
+ );
93
+ (globalThis.fetch as Mock).mockResolvedValue(mockResponse);
94
+
95
+ // Act
96
+ const result = await instance.chat({
97
+ messages: [{ content: 'Hello', role: 'user' }],
98
+ model: '@hf/meta-llama/meta-llama-3-8b-instruct',
99
+ temperature: 0,
100
+ top_p: 1,
101
+ });
102
+
103
+ // Assert
104
+ expect(globalThis.fetch).toHaveBeenCalledWith(
105
+ // url
106
+ expect.objectContaining({
107
+ pathname: `/client/v4/accounts/${accountID}/ai/run/@hf/meta-llama/meta-llama-3-8b-instruct`,
108
+ }),
109
+ // body
110
+ expect.objectContaining({
111
+ body: expect.any(String),
112
+ method: 'POST',
113
+ }),
114
+ );
115
+
116
+ const fetchCallArgs = (globalThis.fetch as Mock).mock.calls[0];
117
+ const body = JSON.parse(fetchCallArgs[1].body);
118
+ expect(body).toEqual(
119
+ expect.objectContaining({
120
+ //max_tokens: 4096,
121
+ messages: [{ content: 'Hello', role: 'user' }],
122
+ //stream: true,
123
+ temperature: 0,
124
+ top_p: 1,
125
+ }),
126
+ );
127
+
128
+ expect(result).toBeInstanceOf(Response);
129
+ });
130
+
131
+ it('should handle system prompt correctly', async () => {
132
+ // Arrange
133
+ const textEncoder = new TextEncoder();
134
+ const mockResponse = new Response(
135
+ new ReadableStream<Uint8Array>({
136
+ start(controller) {
137
+ controller.enqueue(textEncoder.encode('data: {"response": "Hello, world!"}\n\n'));
138
+ controller.close();
139
+ },
140
+ }),
141
+ );
142
+ (globalThis.fetch as Mock).mockResolvedValue(mockResponse);
143
+
144
+ // Act
145
+ const result = await instance.chat({
146
+ messages: [
147
+ { content: 'You are an awesome greeter', role: 'system' },
148
+ { content: 'Hello', role: 'user' },
149
+ ],
150
+ model: '@hf/meta-llama/meta-llama-3-8b-instruct',
151
+ temperature: 0,
152
+ });
153
+
154
+ // Assert
155
+ expect(globalThis.fetch).toHaveBeenCalledWith(
156
+ // url
157
+ expect.objectContaining({
158
+ pathname: `/client/v4/accounts/${accountID}/ai/run/@hf/meta-llama/meta-llama-3-8b-instruct`,
159
+ }),
160
+ // body
161
+ expect.objectContaining({
162
+ body: expect.any(String),
163
+ method: 'POST',
164
+ }),
165
+ );
166
+
167
+ const fetchCallArgs = (globalThis.fetch as Mock).mock.calls[0];
168
+ const body = JSON.parse(fetchCallArgs[1].body);
169
+ expect(body).toEqual(
170
+ expect.objectContaining({
171
+ //max_tokens: 4096,
172
+ messages: [
173
+ { content: 'You are an awesome greeter', role: 'system' },
174
+ { content: 'Hello', role: 'user' },
175
+ ],
176
+ //stream: true,
177
+ temperature: 0,
178
+ }),
179
+ );
180
+
181
+ expect(result).toBeInstanceOf(Response);
182
+ });
183
+
184
+ it('should call Cloudflare API with supported opions', async () => {
185
+ // Arrange
186
+ const mockResponse = new Response(
187
+ new ReadableStream<Uint8Array>({
188
+ start(controller) {
189
+ controller.enqueue(textEncoder.encode('data: {"response": "Hello, world!"}\n\n'));
190
+ controller.close();
191
+ },
192
+ }),
193
+ );
194
+ (globalThis.fetch as Mock).mockResolvedValue(mockResponse);
195
+
196
+ // Act
197
+ const result = await instance.chat({
198
+ max_tokens: 2048,
199
+ messages: [{ content: 'Hello', role: 'user' }],
200
+ model: '@hf/meta-llama/meta-llama-3-8b-instruct',
201
+ temperature: 0.5,
202
+ top_p: 1,
203
+ });
204
+
205
+ // Assert
206
+ expect(globalThis.fetch).toHaveBeenCalledWith(
207
+ // url
208
+ expect.objectContaining({
209
+ pathname: `/client/v4/accounts/${accountID}/ai/run/@hf/meta-llama/meta-llama-3-8b-instruct`,
210
+ }),
211
+ // body
212
+ expect.objectContaining({
213
+ body: expect.any(String),
214
+ method: 'POST',
215
+ }),
216
+ );
217
+
218
+ const fetchCallArgs = (globalThis.fetch as Mock).mock.calls[0];
219
+ const body = JSON.parse(fetchCallArgs[1].body);
220
+ expect(body).toEqual(
221
+ expect.objectContaining({
222
+ max_tokens: 2048,
223
+ messages: [{ content: 'Hello', role: 'user' }],
224
+ //stream: true,
225
+ temperature: 0.5,
226
+ top_p: 1,
227
+ }),
228
+ );
229
+
230
+ expect(result).toBeInstanceOf(Response);
231
+ });
232
+
233
+ it('should call debugStream in DEBUG mode', async () => {
234
+ // Arrange
235
+ const mockProdStream = new ReadableStream({
236
+ start(controller) {
237
+ controller.enqueue('Hello, world!');
238
+ controller.close();
239
+ },
240
+ }) as any;
241
+ const mockDebugStream = new ReadableStream({
242
+ start(controller) {
243
+ controller.enqueue('Debug stream content');
244
+ controller.close();
245
+ },
246
+ }) as any;
247
+ mockDebugStream.toReadableStream = () => mockDebugStream;
248
+
249
+ (globalThis.fetch as Mock).mockResolvedValue({
250
+ body: {
251
+ tee: () => [mockProdStream, { toReadableStream: () => mockDebugStream }],
252
+ },
253
+ });
254
+
255
+ const originalDebugValue = process.env.DEBUG_CLOUDFLARE_CHAT_COMPLETION;
256
+
257
+ process.env.DEBUG_CLOUDFLARE_CHAT_COMPLETION = '1';
258
+ vi.spyOn(debugStreamModule, 'debugStream').mockImplementation(() => Promise.resolve());
259
+
260
+ // Act
261
+ await instance.chat({
262
+ messages: [{ content: 'Hello', role: 'user' }],
263
+ model: '@hf/meta-llama/meta-llama-3-8b-instruct',
264
+ temperature: 0,
265
+ });
266
+
267
+ // Assert
268
+ expect(debugStreamModule.debugStream).toHaveBeenCalled();
269
+
270
+ // Cleanup
271
+ process.env.DEBUG_CLOUDFLARE_CHAT_COMPLETION = originalDebugValue;
272
+ });
273
+
274
+ describe('chat with tools', () => {
275
+ it('should call client.beta.tools.messages.create when tools are provided', async () => {
276
+ // Arrange
277
+ const tools: ChatCompletionTool[] = [
278
+ { function: { name: 'tool1', description: 'desc1' }, type: 'function' },
279
+ ];
280
+
281
+ // Act
282
+ await instance.chat({
283
+ messages: [{ content: 'Hello', role: 'user' }],
284
+ model: '@hf/meta-llama/meta-llama-3-8b-instruct',
285
+ temperature: 1,
286
+ tools,
287
+ });
288
+
289
+ // Assert
290
+ expect(globalThis.fetch).toHaveBeenCalled();
291
+
292
+ const fetchCallArgs = (globalThis.fetch as Mock).mock.calls[0];
293
+ const body = JSON.parse(fetchCallArgs[1].body);
294
+ expect(body).toEqual(
295
+ expect.objectContaining({
296
+ tools: tools.map((t) => t.function),
297
+ }),
298
+ );
299
+ });
300
+ });
301
+
302
+ describe('Error', () => {
303
+ it('should throw ProviderBizError error on 400 error', async () => {
304
+ // Arrange
305
+ const apiError = {
306
+ status: 400,
307
+ error: {
308
+ type: 'error',
309
+ error: {
310
+ type: 'authentication_error',
311
+ message: 'invalid x-api-key',
312
+ },
313
+ },
314
+ };
315
+ (globalThis.fetch as Mock).mockRejectedValue(apiError);
316
+
317
+ try {
318
+ // Act
319
+ await instance.chat({
320
+ messages: [{ content: 'Hello', role: 'user' }],
321
+ model: '@hf/meta-llama/meta-llama-3-8b-instruct',
322
+ temperature: 0,
323
+ });
324
+ } catch (e) {
325
+ // Assert
326
+ expect(e).toEqual({
327
+ endpoint: expect.stringMatching(/https:\/\/.+/),
328
+ error: apiError,
329
+ errorType: bizErrorType,
330
+ provider,
331
+ });
332
+ }
333
+ });
334
+
335
+ it('should throw InvalidProviderAPIKey if no accountID is provided', async () => {
336
+ try {
337
+ new LobeCloudflareAI({
338
+ apiKey: 'test',
339
+ });
340
+ } catch (e) {
341
+ expect(e).toEqual({ errorType: invalidErrorType });
342
+ }
343
+ });
344
+
345
+ it('should throw InvalidProviderAPIKey if no apiKey is provided', async () => {
346
+ try {
347
+ new LobeCloudflareAI({
348
+ baseURLOrAccountID: accountID,
349
+ });
350
+ } catch (e) {
351
+ expect(e).toEqual({ errorType: invalidErrorType });
352
+ }
353
+ });
354
+
355
+ it('should not throw Error when apiKey is not provided but baseURL is provided', async () => {
356
+ const customInstance = new LobeCloudflareAI({
357
+ baseURLOrAccountID: 'https://custom.cloudflare.url/',
358
+ });
359
+ expect(customInstance).toBeInstanceOf(LobeCloudflareAI);
360
+ expect(customInstance.apiKey).toBeUndefined();
361
+ expect(customInstance.baseURL).toBe('https://custom.cloudflare.url/');
362
+ });
363
+ });
364
+
365
+ describe('Error handling', () => {
366
+ it('should throw ProviderBizError on other error status codes', async () => {
367
+ // Arrange
368
+ const apiError = { status: 400 };
369
+ (globalThis.fetch as Mock).mockRejectedValue(apiError);
370
+
371
+ // Act & Assert
372
+ await expect(
373
+ instance.chat({
374
+ messages: [{ content: 'Hello', role: 'user' }],
375
+ model: '@hf/meta-llama/meta-llama-3-8b-instruct',
376
+ temperature: 1,
377
+ }),
378
+ ).rejects.toEqual({
379
+ endpoint: expect.stringMatching(/https:\/\/.+/),
380
+ error: apiError,
381
+ errorType: bizErrorType,
382
+ provider,
383
+ });
384
+ });
385
+
386
+ it('should desensitize accountID in error message', async () => {
387
+ // Arrange
388
+ const apiError = { status: 400 };
389
+ const customInstance = new LobeCloudflareAI({
390
+ apiKey: 'test',
391
+ baseURLOrAccountID: accountID,
392
+ });
393
+ (globalThis.fetch as Mock).mockRejectedValue(apiError);
394
+
395
+ // Act & Assert
396
+ await expect(
397
+ customInstance.chat({
398
+ messages: [{ content: 'Hello', role: 'user' }],
399
+ model: '@hf/meta-llama/meta-llama-3-8b-instruct',
400
+ temperature: 0,
401
+ }),
402
+ ).rejects.toEqual({
403
+ endpoint: expect.not.stringContaining(accountID),
404
+ error: apiError,
405
+ errorType: bizErrorType,
406
+ provider,
407
+ });
408
+ });
409
+ });
410
+
411
+ describe('Options', () => {
412
+ it('should pass signal to API call', async () => {
413
+ // Arrange
414
+ const controller = new AbortController();
415
+
416
+ // Act
417
+ await instance.chat(
418
+ {
419
+ messages: [{ content: 'Hello', role: 'user' }],
420
+ model: '@hf/meta-llama/meta-llama-3-8b-instruct',
421
+ temperature: 1,
422
+ },
423
+ { signal: controller.signal },
424
+ );
425
+
426
+ // Assert
427
+ expect(globalThis.fetch).toHaveBeenCalledWith(
428
+ expect.any(URL),
429
+ expect.objectContaining({ signal: controller.signal }),
430
+ );
431
+ });
432
+
433
+ it('should apply callback to the returned stream', async () => {
434
+ // Arrange
435
+ const callback = vi.fn();
436
+
437
+ // Act
438
+ await instance.chat(
439
+ {
440
+ messages: [{ content: 'Hello', role: 'user' }],
441
+ model: '@hf/meta-llama/meta-llama-3-8b-instruct',
442
+ temperature: 0,
443
+ },
444
+ {
445
+ callback: { onStart: callback },
446
+ },
447
+ );
448
+
449
+ // Assert
450
+ expect(callback).toHaveBeenCalled();
451
+ });
452
+
453
+ it('should set headers on the response', async () => {
454
+ // Arrange
455
+ const headers = { 'X-Test-Header': 'test' };
456
+
457
+ // Act
458
+ const result = await instance.chat(
459
+ {
460
+ messages: [{ content: 'Hello', role: 'user' }],
461
+ model: '@hf/meta-llama/meta-llama-3-8b-instruct',
462
+ temperature: 1,
463
+ },
464
+ { headers },
465
+ );
466
+
467
+ // Assert
468
+ expect(result.headers.get('X-Test-Header')).toBe('test');
469
+ });
470
+ });
471
+
472
+ describe('Edge cases', () => {
473
+ it('should handle empty messages array', async () => {
474
+ // Act & Assert
475
+ await expect(
476
+ instance.chat({
477
+ messages: [],
478
+ model: '@hf/meta-llama/meta-llama-3-8b-instruct',
479
+ temperature: 1,
480
+ }),
481
+ ).resolves.toBeInstanceOf(Response);
482
+ });
483
+ });
484
+ });
485
+
486
+ describe('models', () => {
487
+ it('should send request', async () => {
488
+ // Arrange
489
+ const apiKey = 'test_api_key';
490
+ const instance = new LobeCloudflareAI({ apiKey, baseURLOrAccountID: accountID });
491
+
492
+ vi.spyOn(globalThis, 'fetch').mockResolvedValue(
493
+ new Response(
494
+ JSON.stringify({
495
+ result: [
496
+ {
497
+ description: 'Model 1',
498
+ name: 'model1',
499
+ task: { name: 'Text Generation' },
500
+ properties: [{ property_id: 'beta', value: 'false' }],
501
+ },
502
+ {
503
+ description: 'Model 2',
504
+ name: 'model2',
505
+ task: { name: 'Text Generation' },
506
+ properties: [{ property_id: 'beta', value: 'true' }],
507
+ },
508
+ ],
509
+ }),
510
+ ),
511
+ );
512
+
513
+ // Act
514
+ const result = await instance.models();
515
+
516
+ // Assert
517
+ expect(globalThis.fetch).toHaveBeenCalledWith(
518
+ `https://api.cloudflare.com/client/v4/accounts/${accountID}/ai/models/search`,
519
+ {
520
+ headers: {
521
+ 'Authorization': `Bearer ${apiKey}`,
522
+ 'Content-Type': 'application/json',
523
+ },
524
+ method: 'GET',
525
+ },
526
+ );
527
+
528
+ expect(result).toHaveLength(2);
529
+ });
530
+
531
+ it('should set id to name', async () => {
532
+ // Arrange
533
+ const instance = new LobeCloudflareAI({
534
+ apiKey: 'test_api_key',
535
+ baseURLOrAccountID: accountID,
536
+ });
537
+
538
+ vi.spyOn(globalThis, 'fetch').mockResolvedValue(
539
+ new Response(
540
+ JSON.stringify({
541
+ result: [
542
+ {
543
+ id: 'id1',
544
+ name: 'name1',
545
+ task: { name: 'Text Generation' },
546
+ },
547
+ ],
548
+ }),
549
+ ),
550
+ );
551
+
552
+ // Act
553
+ const result = await instance.models();
554
+
555
+ // Assert
556
+ expect(result).toEqual([
557
+ expect.objectContaining({
558
+ displayName: 'name1',
559
+ id: 'name1',
560
+ }),
561
+ ]);
562
+ });
563
+
564
+ it('should filter text generation models', async () => {
565
+ // Arrange
566
+ const instance = new LobeCloudflareAI({
567
+ apiKey: 'test_api_key',
568
+ baseURLOrAccountID: accountID,
569
+ });
570
+
571
+ vi.spyOn(globalThis, 'fetch').mockResolvedValue(
572
+ new Response(
573
+ JSON.stringify({
574
+ result: [
575
+ {
576
+ id: '1',
577
+ name: 'model1',
578
+ task: { name: 'Text Generation' },
579
+ },
580
+ {
581
+ id: '2',
582
+ name: 'model2',
583
+ task: { name: 'Text Classification' },
584
+ },
585
+ ],
586
+ }),
587
+ ),
588
+ );
589
+
590
+ // Act
591
+ const result = await instance.models();
592
+
593
+ // Assert
594
+ expect(result).toEqual([
595
+ expect.objectContaining({
596
+ displayName: 'model1',
597
+ id: 'model1',
598
+ }),
599
+ ]);
600
+ });
601
+
602
+ it('should enable non-beta models and mark beta models', async () => {
603
+ // Arrange
604
+ const instance = new LobeCloudflareAI({
605
+ apiKey: 'test_api_key',
606
+ baseURLOrAccountID: accountID,
607
+ });
608
+
609
+ vi.spyOn(globalThis, 'fetch').mockResolvedValue(
610
+ new Response(
611
+ JSON.stringify({
612
+ result: [
613
+ {
614
+ id: '1',
615
+ name: 'model1',
616
+ task: { name: 'Text Generation' },
617
+ properties: [{ property_id: 'beta', value: 'false' }],
618
+ },
619
+ {
620
+ id: '2',
621
+ name: 'model2',
622
+ task: { name: 'Text Generation' },
623
+ properties: [{ property_id: 'beta', value: 'true' }],
624
+ },
625
+ ],
626
+ }),
627
+ ),
628
+ );
629
+
630
+ // Act
631
+ const result = await instance.models();
632
+
633
+ // Assert
634
+ expect(result).toEqual([
635
+ expect.objectContaining({
636
+ displayName: 'model1',
637
+ enabled: true,
638
+ id: 'model1',
639
+ }),
640
+ expect.objectContaining({
641
+ displayName: 'model2 (Beta)',
642
+ enabled: false,
643
+ id: 'model2',
644
+ }),
645
+ ]);
646
+ });
647
+ });
648
+ });