@lobehub/chat 1.136.13 → 1.137.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (190) hide show
  1. package/.cursor/rules/add-setting-env.mdc +175 -0
  2. package/.cursor/rules/db-migrations.mdc +25 -0
  3. package/.env.example +7 -0
  4. package/CHANGELOG.md +50 -0
  5. package/Dockerfile +3 -2
  6. package/Dockerfile.database +15 -3
  7. package/Dockerfile.pglite +3 -2
  8. package/changelog/v1.json +18 -0
  9. package/docs/development/database-schema.dbml +1 -0
  10. package/docs/self-hosting/advanced/feature-flags.mdx +25 -15
  11. package/docs/self-hosting/advanced/feature-flags.zh-CN.mdx +25 -15
  12. package/docs/self-hosting/environment-variables/basic.mdx +12 -0
  13. package/docs/self-hosting/environment-variables/basic.zh-CN.mdx +12 -0
  14. package/locales/ar/setting.json +8 -0
  15. package/locales/bg-BG/setting.json +8 -0
  16. package/locales/de-DE/setting.json +8 -0
  17. package/locales/en-US/setting.json +8 -0
  18. package/locales/es-ES/setting.json +8 -0
  19. package/locales/fa-IR/setting.json +8 -0
  20. package/locales/fr-FR/setting.json +8 -0
  21. package/locales/it-IT/setting.json +8 -0
  22. package/locales/ja-JP/setting.json +8 -0
  23. package/locales/ko-KR/setting.json +8 -0
  24. package/locales/nl-NL/setting.json +8 -0
  25. package/locales/pl-PL/setting.json +8 -0
  26. package/locales/pt-BR/setting.json +8 -0
  27. package/locales/ru-RU/setting.json +8 -0
  28. package/locales/tr-TR/setting.json +8 -0
  29. package/locales/vi-VN/setting.json +8 -0
  30. package/locales/zh-CN/setting.json +8 -0
  31. package/locales/zh-TW/setting.json +8 -0
  32. package/package.json +1 -1
  33. package/packages/agent-runtime/examples/tools-calling.ts +4 -3
  34. package/packages/agent-runtime/src/core/__tests__/runtime.test.ts +559 -29
  35. package/packages/agent-runtime/src/core/runtime.ts +171 -43
  36. package/packages/agent-runtime/src/types/instruction.ts +32 -6
  37. package/packages/agent-runtime/src/types/runtime.ts +2 -2
  38. package/packages/agent-runtime/src/types/state.ts +1 -8
  39. package/packages/agent-runtime/vitest.config.mts +14 -0
  40. package/packages/const/src/settings/image.ts +8 -0
  41. package/packages/const/src/settings/index.ts +3 -0
  42. package/packages/context-engine/src/__tests__/pipeline.test.ts +485 -0
  43. package/packages/context-engine/src/base/__tests__/BaseProcessor.test.ts +381 -0
  44. package/packages/context-engine/src/base/__tests__/BaseProvider.test.ts +392 -0
  45. package/packages/context-engine/src/processors/__tests__/MessageCleanup.test.ts +346 -0
  46. package/packages/context-engine/src/processors/__tests__/ToolCall.test.ts +552 -0
  47. package/packages/database/migrations/0038_add_image_user_settings.sql +1 -0
  48. package/packages/database/migrations/meta/0038_snapshot.json +7580 -0
  49. package/packages/database/migrations/meta/_journal.json +7 -0
  50. package/packages/database/src/core/migrations.json +6 -0
  51. package/packages/database/src/models/user.ts +3 -1
  52. package/packages/database/src/schemas/user.ts +1 -0
  53. package/packages/file-loaders/src/loaders/docx/index.test.ts +0 -1
  54. package/packages/file-loaders/src/loaders/excel/__snapshots__/index.test.ts.snap +30 -0
  55. package/packages/file-loaders/src/loaders/excel/index.test.ts +8 -0
  56. package/packages/file-loaders/src/loaders/pptx/index.test.ts +25 -0
  57. package/packages/file-loaders/src/utils/parser-utils.test.ts +155 -0
  58. package/packages/file-loaders/vitest.config.mts +8 -0
  59. package/packages/model-runtime/CLAUDE.md +5 -0
  60. package/packages/model-runtime/docs/test-coverage.md +706 -0
  61. package/packages/model-runtime/src/core/ModelRuntime.test.ts +231 -0
  62. package/packages/model-runtime/src/core/RouterRuntime/createRuntime.ts +1 -1
  63. package/packages/model-runtime/src/core/openaiCompatibleFactory/createImage.test.ts +799 -0
  64. package/packages/model-runtime/src/core/openaiCompatibleFactory/index.test.ts +188 -4
  65. package/packages/model-runtime/src/core/openaiCompatibleFactory/index.ts +41 -10
  66. package/packages/model-runtime/src/core/streams/openai/__snapshots__/responsesStream.test.ts.snap +439 -0
  67. package/packages/model-runtime/src/core/streams/openai/openai.test.ts +789 -0
  68. package/packages/model-runtime/src/core/streams/openai/responsesStream.test.ts +551 -0
  69. package/packages/model-runtime/src/core/usageConverters/utils/computeChatCost.test.ts +230 -0
  70. package/packages/model-runtime/src/core/usageConverters/utils/computeImageCost.test.ts +334 -37
  71. package/packages/model-runtime/src/providerTestUtils.ts +148 -145
  72. package/packages/model-runtime/src/providers/ai302/index.test.ts +60 -0
  73. package/packages/model-runtime/src/providers/ai302/index.ts +9 -4
  74. package/packages/model-runtime/src/providers/ai360/index.test.ts +1213 -1
  75. package/packages/model-runtime/src/providers/ai360/index.ts +9 -4
  76. package/packages/model-runtime/src/providers/aihubmix/index.test.ts +73 -0
  77. package/packages/model-runtime/src/providers/aihubmix/index.ts +6 -9
  78. package/packages/model-runtime/src/providers/akashchat/index.test.ts +433 -3
  79. package/packages/model-runtime/src/providers/akashchat/index.ts +12 -7
  80. package/packages/model-runtime/src/providers/anthropic/generateObject.test.ts +183 -29
  81. package/packages/model-runtime/src/providers/anthropic/generateObject.ts +40 -24
  82. package/packages/model-runtime/src/providers/azureai/index.test.ts +102 -0
  83. package/packages/model-runtime/src/providers/baichuan/index.test.ts +416 -26
  84. package/packages/model-runtime/src/providers/baichuan/index.ts +23 -20
  85. package/packages/model-runtime/src/providers/bedrock/index.test.ts +420 -2
  86. package/packages/model-runtime/src/providers/cerebras/index.test.ts +465 -0
  87. package/packages/model-runtime/src/providers/cerebras/index.ts +8 -3
  88. package/packages/model-runtime/src/providers/cohere/index.test.ts +1074 -1
  89. package/packages/model-runtime/src/providers/cohere/index.ts +8 -3
  90. package/packages/model-runtime/src/providers/cometapi/index.test.ts +439 -3
  91. package/packages/model-runtime/src/providers/cometapi/index.ts +8 -3
  92. package/packages/model-runtime/src/providers/deepseek/index.test.ts +116 -1
  93. package/packages/model-runtime/src/providers/deepseek/index.ts +8 -3
  94. package/packages/model-runtime/src/providers/fireworksai/index.test.ts +264 -3
  95. package/packages/model-runtime/src/providers/fireworksai/index.ts +8 -3
  96. package/packages/model-runtime/src/providers/giteeai/index.test.ts +325 -3
  97. package/packages/model-runtime/src/providers/giteeai/index.ts +23 -6
  98. package/packages/model-runtime/src/providers/github/index.test.ts +532 -3
  99. package/packages/model-runtime/src/providers/github/index.ts +8 -3
  100. package/packages/model-runtime/src/providers/groq/index.test.ts +344 -31
  101. package/packages/model-runtime/src/providers/groq/index.ts +8 -3
  102. package/packages/model-runtime/src/providers/higress/index.test.ts +142 -0
  103. package/packages/model-runtime/src/providers/higress/index.ts +8 -3
  104. package/packages/model-runtime/src/providers/huggingface/index.test.ts +612 -1
  105. package/packages/model-runtime/src/providers/huggingface/index.ts +9 -4
  106. package/packages/model-runtime/src/providers/hunyuan/index.test.ts +365 -1
  107. package/packages/model-runtime/src/providers/hunyuan/index.ts +9 -3
  108. package/packages/model-runtime/src/providers/infiniai/index.test.ts +71 -0
  109. package/packages/model-runtime/src/providers/internlm/index.test.ts +369 -2
  110. package/packages/model-runtime/src/providers/internlm/index.ts +10 -5
  111. package/packages/model-runtime/src/providers/jina/index.test.ts +164 -3
  112. package/packages/model-runtime/src/providers/jina/index.ts +8 -3
  113. package/packages/model-runtime/src/providers/lmstudio/index.test.ts +182 -3
  114. package/packages/model-runtime/src/providers/lmstudio/index.ts +8 -3
  115. package/packages/model-runtime/src/providers/mistral/index.test.ts +779 -27
  116. package/packages/model-runtime/src/providers/mistral/index.ts +8 -3
  117. package/packages/model-runtime/src/providers/modelscope/index.test.ts +232 -1
  118. package/packages/model-runtime/src/providers/modelscope/index.ts +8 -3
  119. package/packages/model-runtime/src/providers/moonshot/index.test.ts +489 -2
  120. package/packages/model-runtime/src/providers/moonshot/index.ts +8 -3
  121. package/packages/model-runtime/src/providers/nebius/index.test.ts +381 -3
  122. package/packages/model-runtime/src/providers/nebius/index.ts +8 -3
  123. package/packages/model-runtime/src/providers/newapi/index.test.ts +667 -3
  124. package/packages/model-runtime/src/providers/newapi/index.ts +6 -3
  125. package/packages/model-runtime/src/providers/nvidia/index.test.ts +168 -1
  126. package/packages/model-runtime/src/providers/nvidia/index.ts +12 -7
  127. package/packages/model-runtime/src/providers/ollama/index.test.ts +797 -1
  128. package/packages/model-runtime/src/providers/ollama/index.ts +8 -0
  129. package/packages/model-runtime/src/providers/ollamacloud/index.test.ts +411 -0
  130. package/packages/model-runtime/src/providers/ollamacloud/index.ts +8 -3
  131. package/packages/model-runtime/src/providers/openai/index.test.ts +171 -2
  132. package/packages/model-runtime/src/providers/openai/index.ts +8 -3
  133. package/packages/model-runtime/src/providers/openrouter/index.test.ts +1647 -95
  134. package/packages/model-runtime/src/providers/openrouter/index.ts +12 -7
  135. package/packages/model-runtime/src/providers/qiniu/index.test.ts +294 -1
  136. package/packages/model-runtime/src/providers/qiniu/index.ts +8 -3
  137. package/packages/model-runtime/src/providers/search1api/index.test.ts +1131 -11
  138. package/packages/model-runtime/src/providers/search1api/index.ts +10 -4
  139. package/packages/model-runtime/src/providers/sensenova/index.test.ts +1069 -1
  140. package/packages/model-runtime/src/providers/sensenova/index.ts +8 -3
  141. package/packages/model-runtime/src/providers/siliconcloud/index.test.ts +196 -0
  142. package/packages/model-runtime/src/providers/siliconcloud/index.ts +8 -3
  143. package/packages/model-runtime/src/providers/spark/index.test.ts +293 -1
  144. package/packages/model-runtime/src/providers/spark/index.ts +8 -3
  145. package/packages/model-runtime/src/providers/stepfun/index.test.ts +322 -3
  146. package/packages/model-runtime/src/providers/stepfun/index.ts +8 -3
  147. package/packages/model-runtime/src/providers/tencentcloud/index.test.ts +182 -3
  148. package/packages/model-runtime/src/providers/tencentcloud/index.ts +8 -3
  149. package/packages/model-runtime/src/providers/togetherai/index.test.ts +359 -4
  150. package/packages/model-runtime/src/providers/togetherai/index.ts +12 -5
  151. package/packages/model-runtime/src/providers/v0/index.test.ts +341 -0
  152. package/packages/model-runtime/src/providers/v0/index.ts +20 -6
  153. package/packages/model-runtime/src/providers/vercelaigateway/index.test.ts +710 -0
  154. package/packages/model-runtime/src/providers/vercelaigateway/index.ts +19 -13
  155. package/packages/model-runtime/src/providers/vllm/index.test.ts +45 -1
  156. package/packages/model-runtime/src/providers/volcengine/index.test.ts +75 -0
  157. package/packages/model-runtime/src/providers/wenxin/index.test.ts +144 -1
  158. package/packages/model-runtime/src/providers/wenxin/index.ts +8 -3
  159. package/packages/model-runtime/src/providers/xai/index.test.ts +105 -1
  160. package/packages/model-runtime/src/providers/xinference/index.test.ts +70 -1
  161. package/packages/model-runtime/src/providers/zeroone/index.test.ts +327 -3
  162. package/packages/model-runtime/src/providers/zeroone/index.ts +23 -6
  163. package/packages/model-runtime/src/providers/zhipu/index.test.ts +908 -236
  164. package/packages/model-runtime/src/providers/zhipu/index.ts +8 -3
  165. package/packages/model-runtime/src/types/structureOutput.ts +5 -1
  166. package/packages/model-runtime/vitest.config.mts +7 -1
  167. package/packages/types/src/aiChat.ts +20 -2
  168. package/packages/types/src/serverConfig.ts +7 -1
  169. package/packages/types/src/tool/index.ts +1 -0
  170. package/packages/types/src/tool/tool.ts +33 -0
  171. package/packages/types/src/user/settings/image.ts +3 -0
  172. package/packages/types/src/user/settings/index.ts +3 -0
  173. package/src/app/[variants]/(main)/settings/_layout/SettingsContent.tsx +3 -0
  174. package/src/app/[variants]/(main)/settings/hooks/useCategory.tsx +8 -3
  175. package/src/app/[variants]/(main)/settings/image/index.tsx +74 -0
  176. package/src/components/FormInput/FormSliderWithInput.tsx +40 -0
  177. package/src/components/FormInput/index.ts +1 -0
  178. package/src/envs/image.ts +27 -0
  179. package/src/features/Conversation/Messages/Assistant/index.tsx +1 -1
  180. package/src/features/Conversation/Messages/User/index.tsx +2 -2
  181. package/src/hooks/useFetchAiImageConfig.ts +12 -17
  182. package/src/locales/default/setting.ts +8 -0
  183. package/src/server/globalConfig/index.ts +5 -0
  184. package/src/server/routers/lambda/aiChat.ts +2 -0
  185. package/src/store/global/initialState.ts +1 -0
  186. package/src/store/image/slices/generationConfig/action.test.ts +17 -0
  187. package/src/store/image/slices/generationConfig/action.ts +18 -21
  188. package/src/store/image/slices/generationConfig/initialState.ts +3 -2
  189. package/src/store/user/slices/common/action.ts +1 -0
  190. package/src/store/user/slices/settings/selectors/settings.ts +3 -0
@@ -1,10 +1,9 @@
1
1
  // @vitest-environment node
2
2
  import { LobeOpenAICompatibleRuntime } from '@lobechat/model-runtime';
3
- import { Mock, afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
3
+ import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
4
4
 
5
5
  import { testProvider } from '../../providerTestUtils';
6
- import models from './fixtures/models.json';
7
- import { LobeOpenRouterAI } from './index';
6
+ import { LobeOpenRouterAI, params } from './index';
8
7
 
9
8
  const provider = 'openrouter';
10
9
  const defaultBaseURL = 'https://openrouter.ai/api/v1';
@@ -21,159 +20,1712 @@ testProvider({
21
20
  });
22
21
 
23
22
  // Mock the console.error to avoid polluting test output
24
- vi.spyOn(console, 'error').mockImplementation(() => { });
23
+ vi.spyOn(console, 'error').mockImplementation(() => {});
25
24
 
26
25
  let instance: LobeOpenAICompatibleRuntime;
27
26
 
28
27
  beforeEach(() => {
29
28
  instance = new LobeOpenRouterAI({ apiKey: 'test' });
30
29
 
31
- // 用一个完整的假 client 覆盖实例的 client,避免访问深层属性时出现初始化顺序问题
32
- instance['client'] = {
33
- chat: {
34
- completions: {
35
- create: vi.fn().mockResolvedValue(Promise.resolve(new ReadableStream())),
36
- },
37
- },
38
- models: {
39
- list: vi.fn().mockResolvedValue({ data: [] }),
40
- },
41
- } as any;
30
+ // 使用 vi.spyOn 来模拟 chat.completions.create 方法
31
+ vi.spyOn(instance['client'].chat.completions, 'create').mockResolvedValue(
32
+ new ReadableStream() as any,
33
+ );
42
34
  });
43
35
 
44
36
  afterEach(() => {
45
- vi.restoreAllMocks();
37
+ vi.clearAllMocks();
46
38
  });
47
39
 
48
- describe('LobeOpenRouterAI', () => {
49
- describe('init', () => {
50
- it('should correctly initialize with a custom base URL', async () => {
51
- const inst = new LobeOpenRouterAI({
52
- apiKey: 'test_api_key',
53
- baseURL: 'https://api.abc.com/v1',
54
- });
55
- expect(inst).toBeInstanceOf(LobeOpenRouterAI);
56
- expect(inst.baseURL).toEqual('https://api.abc.com/v1');
40
+ describe('LobeOpenRouterAI - custom features', () => {
41
+ describe('Params Export', () => {
42
+ it('should export params object', () => {
43
+ expect(params).toBeDefined();
44
+ expect(params.provider).toBe('openrouter');
45
+ expect(params.baseURL).toBe('https://openrouter.ai/api/v1');
46
+ });
47
+
48
+ it('should have chatCompletion configuration', () => {
49
+ expect(params.chatCompletion).toBeDefined();
50
+ expect(params.chatCompletion.handlePayload).toBeDefined();
51
+ });
52
+
53
+ it('should have constructorOptions with headers', () => {
54
+ expect(params.constructorOptions).toBeDefined();
55
+ expect(params.constructorOptions.defaultHeaders).toBeDefined();
56
+ expect(params.constructorOptions.defaultHeaders['HTTP-Referer']).toBe('https://lobehub.com');
57
+ expect(params.constructorOptions.defaultHeaders['X-Title']).toBe('LobeHub');
58
+ });
59
+
60
+ it('should have debug configuration', () => {
61
+ expect(params.debug).toBeDefined();
62
+ expect(params.debug.chatCompletion).toBeDefined();
63
+ });
64
+
65
+ it('should have models function', () => {
66
+ expect(params.models).toBeDefined();
67
+ expect(typeof params.models).toBe('function');
68
+ });
69
+ });
70
+
71
+ describe('Debug Configuration', () => {
72
+ it('should disable debug by default', () => {
73
+ delete process.env.DEBUG_OPENROUTER_CHAT_COMPLETION;
74
+ const result = params.debug.chatCompletion();
75
+ expect(result).toBe(false);
76
+ });
77
+
78
+ it('should enable debug when env is set', () => {
79
+ process.env.DEBUG_OPENROUTER_CHAT_COMPLETION = '1';
80
+ const result = params.debug.chatCompletion();
81
+ expect(result).toBe(true);
82
+ delete process.env.DEBUG_OPENROUTER_CHAT_COMPLETION;
83
+ });
84
+ });
85
+
86
+ describe('Constructor Options', () => {
87
+ it('should set default headers', () => {
88
+ const instance = new LobeOpenRouterAI({ apiKey: 'test' });
89
+ expect(instance).toBeDefined();
90
+ // Headers are set in constructorOptions but not directly accessible
91
+ // We can verify by checking that the instance was created successfully
92
+ });
93
+
94
+ it('should use custom base URL when provided', () => {
95
+ const customBaseURL = 'https://custom.openrouter.ai/api/v1';
96
+ const instance = new LobeOpenRouterAI({ apiKey: 'test', baseURL: customBaseURL });
97
+ expect(instance.baseURL).toBe(customBaseURL);
57
98
  });
58
99
  });
59
100
 
60
- describe('chat', () => {
61
- it('should return a StreamingTextResponse on successful API call', async () => {
62
- // Arrange
63
- const mockStream = new ReadableStream();
64
- const mockResponse = Promise.resolve(mockStream);
101
+ describe('handlePayload', () => {
102
+ it('should default stream to true', async () => {
103
+ await instance.chat({
104
+ messages: [{ content: 'Hello', role: 'user' }],
105
+ model: 'mistralai/mistral-7b-instruct:free',
106
+ });
65
107
 
66
- (instance['client'].chat.completions.create as Mock).mockResolvedValue(mockResponse);
108
+ expect(instance['client'].chat.completions.create).toHaveBeenCalledWith(
109
+ expect.objectContaining({ stream: true }),
110
+ expect.anything(),
111
+ );
112
+ });
67
113
 
68
- // Act
69
- const result = await instance.chat({
114
+ it('should preserve stream value when explicitly set to false', async () => {
115
+ await instance.chat({
70
116
  messages: [{ content: 'Hello', role: 'user' }],
71
117
  model: 'mistralai/mistral-7b-instruct:free',
72
- temperature: 0,
118
+ stream: false,
73
119
  });
74
120
 
75
- // Assert
76
- expect(result).toBeInstanceOf(Response);
121
+ expect(instance['client'].chat.completions.create).toHaveBeenCalledWith(
122
+ expect.objectContaining({ stream: false }),
123
+ expect.anything(),
124
+ );
77
125
  });
78
126
 
79
- it('should call OpenRouter API with corresponding options', async () => {
80
- // Arrange
81
- const mockStream = new ReadableStream();
82
- const mockResponse = Promise.resolve(mockStream);
127
+ it('should append :online to model when enabledSearch is true', async () => {
128
+ await instance.chat({
129
+ messages: [{ content: 'Search for something', role: 'user' }],
130
+ model: 'openai/gpt-4',
131
+ enabledSearch: true,
132
+ });
83
133
 
84
- (instance['client'].chat.completions.create as Mock).mockResolvedValue(mockResponse);
134
+ expect(instance['client'].chat.completions.create).toHaveBeenCalledWith(
135
+ expect.objectContaining({ model: 'openai/gpt-4:online' }),
136
+ expect.anything(),
137
+ );
138
+ });
85
139
 
86
- // Act
87
- const result = await instance.chat({
88
- max_tokens: 1024,
140
+ it('should not modify model when enabledSearch is false', async () => {
141
+ await instance.chat({
89
142
  messages: [{ content: 'Hello', role: 'user' }],
90
- model: 'mistralai/mistral-7b-instruct:free',
91
- temperature: 0.7,
92
- top_p: 1,
143
+ model: 'openai/gpt-4',
144
+ enabledSearch: false,
145
+ });
146
+
147
+ expect(instance['client'].chat.completions.create).toHaveBeenCalledWith(
148
+ expect.objectContaining({ model: 'openai/gpt-4' }),
149
+ expect.anything(),
150
+ );
151
+ });
152
+
153
+ it('should not modify model when enabledSearch is undefined', async () => {
154
+ await instance.chat({
155
+ messages: [{ content: 'Hello', role: 'user' }],
156
+ model: 'openai/gpt-4',
157
+ });
158
+
159
+ expect(instance['client'].chat.completions.create).toHaveBeenCalledWith(
160
+ expect.objectContaining({ model: 'openai/gpt-4' }),
161
+ expect.anything(),
162
+ );
163
+ });
164
+
165
+ it('should add empty reasoning object when thinking is not enabled', async () => {
166
+ await instance.chat({
167
+ messages: [{ content: 'Hello', role: 'user' }],
168
+ model: 'openai/gpt-4',
169
+ });
170
+
171
+ expect(instance['client'].chat.completions.create).toHaveBeenCalledWith(
172
+ expect.objectContaining({ reasoning: {} }),
173
+ expect.anything(),
174
+ );
175
+ });
176
+
177
+ it('should add reasoning with default 1024 tokens when thinking is enabled without budget', async () => {
178
+ await instance.chat({
179
+ messages: [{ content: 'Think about this', role: 'user' }],
180
+ model: 'openai/gpt-4',
181
+ thinking: { type: 'enabled', budget_tokens: 1024 },
93
182
  });
94
183
 
95
- // Assert
96
184
  expect(instance['client'].chat.completions.create).toHaveBeenCalledWith(
97
185
  expect.objectContaining({
98
- max_tokens: 1024,
99
- messages: [{ content: 'Hello', role: 'user' }],
100
- stream: true,
101
- model: 'mistralai/mistral-7b-instruct:free',
102
- temperature: 0.7,
103
- top_p: 1,
186
+ reasoning: { max_tokens: 1024 },
187
+ }),
188
+ expect.anything(),
189
+ );
190
+ });
191
+
192
+ it('should use budget_tokens when provided and within limits', async () => {
193
+ await instance.chat({
194
+ messages: [{ content: 'Think about this', role: 'user' }],
195
+ model: 'openai/gpt-4',
196
+ thinking: { type: 'enabled', budget_tokens: 2000 },
197
+ });
198
+
199
+ expect(instance['client'].chat.completions.create).toHaveBeenCalledWith(
200
+ expect.objectContaining({
201
+ reasoning: { max_tokens: 2000 },
202
+ }),
203
+ expect.anything(),
204
+ );
205
+ });
206
+
207
+ it('should cap reasoning tokens to max_tokens - 1 when budget exceeds max_tokens', async () => {
208
+ await instance.chat({
209
+ messages: [{ content: 'Think about this', role: 'user' }],
210
+ model: 'openai/gpt-4',
211
+ max_tokens: 1000,
212
+ thinking: { type: 'enabled', budget_tokens: 2000 },
213
+ });
214
+
215
+ expect(instance['client'].chat.completions.create).toHaveBeenCalledWith(
216
+ expect.objectContaining({
217
+ reasoning: { max_tokens: 999 }, // min(2000, 1000 - 1) = 999
218
+ }),
219
+ expect.anything(),
220
+ );
221
+ });
222
+
223
+ it('should use model maxOutput when no max_tokens provided', async () => {
224
+ // Mock OpenRouterModels to have a specific maxOutput
225
+ const { openrouter } = await import('model-bank');
226
+ const modelWithMaxOutput = openrouter.find((m) => m.maxOutput !== undefined);
227
+
228
+ if (modelWithMaxOutput) {
229
+ await instance.chat({
230
+ messages: [{ content: 'Think about this', role: 'user' }],
231
+ model: modelWithMaxOutput.id,
232
+ thinking: { type: 'enabled', budget_tokens: 50000 },
233
+ });
234
+
235
+ expect(instance['client'].chat.completions.create).toHaveBeenCalledWith(
236
+ expect.objectContaining({
237
+ reasoning: expect.objectContaining({ max_tokens: expect.any(Number) }),
238
+ }),
239
+ expect.anything(),
240
+ );
241
+ }
242
+ });
243
+
244
+ it('should use default 32000 when no max_tokens or model maxOutput available', async () => {
245
+ await instance.chat({
246
+ messages: [{ content: 'Think about this', role: 'user' }],
247
+ model: 'unknown/model-without-config',
248
+ thinking: { type: 'enabled', budget_tokens: 50000 },
249
+ });
250
+
251
+ expect(instance['client'].chat.completions.create).toHaveBeenCalledWith(
252
+ expect.objectContaining({
253
+ reasoning: { max_tokens: 31999 }, // min(50000, 32000 - 1) = 31999
104
254
  }),
105
- { headers: { Accept: '*/*' } },
255
+ expect.anything(),
106
256
  );
107
- expect(result).toBeInstanceOf(Response);
108
257
  });
109
258
 
110
- it('should add reasoning field when thinking is enabled', async () => {
111
- // Arrange
112
- const mockStream = new ReadableStream();
113
- const mockResponse = Promise.resolve(mockStream);
259
+ it('should combine enabledSearch and thinking features', async () => {
260
+ await instance.chat({
261
+ messages: [{ content: 'Search and think', role: 'user' }],
262
+ model: 'openai/gpt-4',
263
+ enabledSearch: true,
264
+ thinking: { type: 'enabled', budget_tokens: 1500 },
265
+ });
114
266
 
115
- (instance['client'].chat.completions.create as Mock).mockResolvedValue(mockResponse);
267
+ expect(instance['client'].chat.completions.create).toHaveBeenCalledWith(
268
+ expect.objectContaining({
269
+ model: 'openai/gpt-4:online',
270
+ reasoning: { max_tokens: 1500 },
271
+ }),
272
+ expect.anything(),
273
+ );
274
+ });
116
275
 
117
- // Act
118
- const result = await instance.chat({
276
+ it('should preserve other payload properties', async () => {
277
+ await instance.chat({
119
278
  messages: [{ content: 'Hello', role: 'user' }],
120
- model: 'mistralai/mistral-7b-instruct:free',
279
+ model: 'openai/gpt-4',
121
280
  temperature: 0.7,
122
- thinking: {
123
- type: 'enabled',
124
- budget_tokens: 1500,
125
- },
281
+ max_tokens: 1000,
282
+ top_p: 0.9,
126
283
  });
127
284
 
128
- // Assert
129
285
  expect(instance['client'].chat.completions.create).toHaveBeenCalledWith(
130
286
  expect.objectContaining({
131
287
  messages: [{ content: 'Hello', role: 'user' }],
132
- model: 'mistralai/mistral-7b-instruct:free',
133
- reasoning: {
134
- max_tokens: 1500,
135
- },
288
+ model: 'openai/gpt-4',
136
289
  temperature: 0.7,
290
+ max_tokens: 1000,
291
+ top_p: 0.9,
292
+ }),
293
+ expect.anything(),
294
+ );
295
+ });
296
+
297
+ it('should handle thinking type disabled', async () => {
298
+ await instance.chat({
299
+ messages: [{ content: 'Hello', role: 'user' }],
300
+ model: 'openai/gpt-4',
301
+ thinking: { type: 'disabled', budget_tokens: 0 },
302
+ });
303
+
304
+ expect(instance['client'].chat.completions.create).toHaveBeenCalledWith(
305
+ expect.objectContaining({ reasoning: {} }),
306
+ expect.anything(),
307
+ );
308
+ });
309
+
310
+ it('should handle undefined thinking', async () => {
311
+ await instance.chat({
312
+ messages: [{ content: 'Hello', role: 'user' }],
313
+ model: 'openai/gpt-4',
314
+ thinking: undefined,
315
+ });
316
+
317
+ expect(instance['client'].chat.completions.create).toHaveBeenCalledWith(
318
+ expect.objectContaining({ reasoning: {} }),
319
+ expect.anything(),
320
+ );
321
+ });
322
+
323
+ it('should cap reasoning tokens to 1 when max_tokens is 2', async () => {
324
+ await instance.chat({
325
+ messages: [{ content: 'Think about this', role: 'user' }],
326
+ model: 'openai/gpt-4',
327
+ max_tokens: 2,
328
+ thinking: { type: 'enabled', budget_tokens: 2000 },
329
+ });
330
+
331
+ expect(instance['client'].chat.completions.create).toHaveBeenCalledWith(
332
+ expect.objectContaining({
333
+ reasoning: { max_tokens: 1 }, // min(2000, 2 - 1) = 1
334
+ }),
335
+ expect.anything(),
336
+ );
337
+ });
338
+
339
+ it('should use budget_tokens when lower than default 1024', async () => {
340
+ await instance.chat({
341
+ messages: [{ content: 'Think about this', role: 'user' }],
342
+ model: 'openai/gpt-4',
343
+ thinking: { type: 'enabled', budget_tokens: 512 },
344
+ });
345
+
346
+ expect(instance['client'].chat.completions.create).toHaveBeenCalledWith(
347
+ expect.objectContaining({
348
+ reasoning: { max_tokens: 512 },
349
+ }),
350
+ expect.anything(),
351
+ );
352
+ });
353
+
354
+ it('should handle 0 budget_tokens (falsy, falls back to 1024)', async () => {
355
+ await instance.chat({
356
+ messages: [{ content: 'Think about this', role: 'user' }],
357
+ model: 'openai/gpt-4',
358
+ thinking: { type: 'enabled', budget_tokens: 0 },
359
+ });
360
+
361
+ expect(instance['client'].chat.completions.create).toHaveBeenCalledWith(
362
+ expect.objectContaining({
363
+ reasoning: { max_tokens: 1024 }, // 0 is falsy, falls back to 1024
364
+ }),
365
+ expect.anything(),
366
+ );
367
+ });
368
+
369
+ it('should handle negative budget_tokens', async () => {
370
+ await instance.chat({
371
+ messages: [{ content: 'Think about this', role: 'user' }],
372
+ model: 'openai/gpt-4',
373
+ thinking: { type: 'enabled', budget_tokens: -100 },
374
+ });
375
+
376
+ expect(instance['client'].chat.completions.create).toHaveBeenCalledWith(
377
+ expect.objectContaining({
378
+ reasoning: { max_tokens: -100 },
137
379
  }),
138
- { headers: { Accept: '*/*' } },
380
+ expect.anything(),
139
381
  );
140
- expect(result).toBeInstanceOf(Response);
382
+ });
383
+ });
384
+
385
+ describe('models', () => {
386
+ beforeEach(() => {
387
+ vi.clearAllMocks();
141
388
  });
142
389
 
143
- it('should handle fetch failure gracefully', async () => {
144
- // mock the models.list method
145
- (instance['client'].models.list as Mock).mockResolvedValue({ data: models });
390
+ it('should fetch and process models successfully', async () => {
391
+ const mockModels = [
392
+ {
393
+ id: 'openai/gpt-4',
394
+ canonical_slug: 'openai/gpt-4',
395
+ name: 'OpenAI: GPT-4',
396
+ created: 1679587200,
397
+ description: 'GPT-4 model',
398
+ context_length: 8192,
399
+ architecture: {
400
+ modality: 'text->text',
401
+ input_modalities: ['text'],
402
+ output_modalities: ['text'],
403
+ tokenizer: 'gpt-4',
404
+ instruct_type: null,
405
+ },
406
+ pricing: {
407
+ prompt: '0.00003',
408
+ completion: '0.00006',
409
+ },
410
+ top_provider: {
411
+ context_length: 8192,
412
+ max_completion_tokens: 4096,
413
+ is_moderated: false,
414
+ },
415
+ supported_parameters: ['tools', 'temperature'],
416
+ },
417
+ ];
146
418
 
147
- // 模拟失败的 fetch 响应
148
419
  vi.stubGlobal(
149
420
  'fetch',
150
421
  vi.fn().mockResolvedValue({
151
- ok: false,
422
+ ok: true,
423
+ json: async () => ({ data: mockModels }),
152
424
  }),
153
425
  );
154
426
 
155
- const list = await instance.models();
427
+ const models = await params.models();
156
428
 
157
- // 验证在当前实现中,当 model fetch 返回非 ok 时,会返回空列表
158
429
  expect(fetch).toHaveBeenCalledWith('https://openrouter.ai/api/v1/models');
159
- expect(list.length).toBe(0);
160
- expect(list).toEqual([]);
430
+ expect(models.length).toBeGreaterThan(0);
161
431
  });
162
432
 
163
- it('should handle fetch error gracefully', async () => {
164
- // mock the models.list method
165
- (instance['client'].models.list as Mock).mockResolvedValue({ data: models });
433
+ it('should handle display name with colon - remove prefix', async () => {
434
+ const mockModels = [
435
+ {
436
+ id: 'anthropic/claude-3-opus',
437
+ canonical_slug: 'anthropic/claude-3-opus',
438
+ name: 'Anthropic: Claude 3 Opus',
439
+ created: 1679587200,
440
+ context_length: 200000,
441
+ architecture: {
442
+ modality: 'text->text',
443
+ input_modalities: ['text', 'image'],
444
+ output_modalities: ['text'],
445
+ tokenizer: 'claude',
446
+ instruct_type: null,
447
+ },
448
+ pricing: {
449
+ prompt: '0.000015',
450
+ completion: '0.000075',
451
+ },
452
+ top_provider: {
453
+ context_length: 200000,
454
+ max_completion_tokens: 4096,
455
+ is_moderated: false,
456
+ },
457
+ supported_parameters: ['tools', 'reasoning'],
458
+ },
459
+ ];
166
460
 
167
- // 在测试环境中,需要先修改 fetch 的实现,确保错误被捕获
168
- vi.spyOn(globalThis, 'fetch').mockImplementation(() => {
169
- throw new Error('Network error');
170
- });
461
+ vi.stubGlobal(
462
+ 'fetch',
463
+ vi.fn().mockResolvedValue({
464
+ ok: true,
465
+ json: async () => ({ data: mockModels }),
466
+ }),
467
+ );
468
+
469
+ const models = await params.models();
470
+
471
+ const claudeModel = models.find((m) => m.id === 'anthropic/claude-3-opus');
472
+ expect(claudeModel?.displayName).toBe('Claude 3 Opus');
473
+ });
474
+
475
+ it('should preserve DeepSeek prefix when suffix does not contain deepseek', async () => {
476
+ const mockModels = [
477
+ {
478
+ id: 'deepseek/deepseek-chat',
479
+ canonical_slug: 'deepseek/deepseek-chat',
480
+ name: 'DeepSeek: Chat',
481
+ created: 1679587200,
482
+ context_length: 32768,
483
+ architecture: {
484
+ modality: 'text->text',
485
+ input_modalities: ['text'],
486
+ output_modalities: ['text'],
487
+ tokenizer: 'deepseek',
488
+ instruct_type: null,
489
+ },
490
+ pricing: {
491
+ prompt: '0.00000014',
492
+ completion: '0.00000028',
493
+ },
494
+ top_provider: {
495
+ context_length: 32768,
496
+ max_completion_tokens: 4096,
497
+ is_moderated: false,
498
+ },
499
+ supported_parameters: ['tools'],
500
+ },
501
+ ];
502
+
503
+ vi.stubGlobal(
504
+ 'fetch',
505
+ vi.fn().mockResolvedValue({
506
+ ok: true,
507
+ json: async () => ({ data: mockModels }),
508
+ }),
509
+ );
510
+
511
+ const models = await params.models();
512
+
513
+ const deepseekModel = models.find((m) => m.id === 'deepseek/deepseek-chat');
514
+ expect(deepseekModel?.displayName).toBe('DeepSeek: Chat');
515
+ });
516
+
517
+ it('should remove DeepSeek prefix when suffix contains deepseek', async () => {
518
+ const mockModels = [
519
+ {
520
+ id: 'deepseek/deepseek-r1',
521
+ canonical_slug: 'deepseek/deepseek-r1',
522
+ name: 'DeepSeek: DeepSeek R1',
523
+ created: 1679587200,
524
+ context_length: 64000,
525
+ architecture: {
526
+ modality: 'text->text',
527
+ input_modalities: ['text'],
528
+ output_modalities: ['text'],
529
+ tokenizer: 'deepseek',
530
+ instruct_type: null,
531
+ },
532
+ pricing: {
533
+ prompt: '0.00000055',
534
+ completion: '0.0000022',
535
+ },
536
+ top_provider: {
537
+ context_length: 64000,
538
+ max_completion_tokens: 8192,
539
+ is_moderated: false,
540
+ },
541
+ supported_parameters: ['reasoning'],
542
+ },
543
+ ];
544
+
545
+ vi.stubGlobal(
546
+ 'fetch',
547
+ vi.fn().mockResolvedValue({
548
+ ok: true,
549
+ json: async () => ({ data: mockModels }),
550
+ }),
551
+ );
552
+
553
+ const models = await params.models();
554
+
555
+ const deepseekModel = models.find((m) => m.id === 'deepseek/deepseek-r1');
556
+ expect(deepseekModel?.displayName).toBe('DeepSeek R1');
557
+ });
558
+
559
+ it('should append (free) to display name for free models', async () => {
560
+ const mockModels = [
561
+ {
562
+ id: 'free/model',
563
+ canonical_slug: 'free/model',
564
+ name: 'Provider: Free Model',
565
+ created: 1679587200,
566
+ context_length: 4096,
567
+ architecture: {
568
+ modality: 'text->text',
569
+ input_modalities: ['text'],
570
+ output_modalities: ['text'],
571
+ tokenizer: 'default',
572
+ instruct_type: null,
573
+ },
574
+ pricing: {
575
+ prompt: '0',
576
+ completion: '0',
577
+ },
578
+ top_provider: {
579
+ context_length: 4096,
580
+ max_completion_tokens: 2048,
581
+ is_moderated: false,
582
+ },
583
+ supported_parameters: [],
584
+ },
585
+ ];
586
+
587
+ vi.stubGlobal(
588
+ 'fetch',
589
+ vi.fn().mockResolvedValue({
590
+ ok: true,
591
+ json: async () => ({ data: mockModels }),
592
+ }),
593
+ );
594
+
595
+ const models = await params.models();
596
+
597
+ const freeModel = models.find((m) => m.id === 'free/model');
598
+ expect(freeModel?.displayName).toBe('Free Model (free)');
599
+ });
600
+
601
+ it('should not append (free) if already present in name', async () => {
602
+ const mockModels = [
603
+ {
604
+ id: 'free/model',
605
+ canonical_slug: 'free/model',
606
+ name: 'Provider: Free Model (free)',
607
+ created: 1679587200,
608
+ context_length: 4096,
609
+ architecture: {
610
+ modality: 'text->text',
611
+ input_modalities: ['text'],
612
+ output_modalities: ['text'],
613
+ tokenizer: 'default',
614
+ instruct_type: null,
615
+ },
616
+ pricing: {
617
+ prompt: '0',
618
+ completion: '0',
619
+ },
620
+ top_provider: {
621
+ context_length: 4096,
622
+ max_completion_tokens: 2048,
623
+ is_moderated: false,
624
+ },
625
+ supported_parameters: [],
626
+ },
627
+ ];
628
+
629
+ vi.stubGlobal(
630
+ 'fetch',
631
+ vi.fn().mockResolvedValue({
632
+ ok: true,
633
+ json: async () => ({ data: mockModels }),
634
+ }),
635
+ );
636
+
637
+ const models = await params.models();
638
+
639
+ const freeModel = models.find((m) => m.id === 'free/model');
640
+ expect(freeModel?.displayName).toBe('Free Model (free)');
641
+ expect(freeModel?.displayName).not.toBe('Free Model (free) (free)');
642
+ });
643
+
644
+ it('should detect vision capability from input_modalities', async () => {
645
+ const mockModels = [
646
+ {
647
+ id: 'vision/model',
648
+ canonical_slug: 'vision/model',
649
+ name: 'Vision Model',
650
+ created: 1679587200,
651
+ context_length: 8192,
652
+ architecture: {
653
+ modality: 'text+image->text',
654
+ input_modalities: ['text', 'image'],
655
+ output_modalities: ['text'],
656
+ tokenizer: 'default',
657
+ instruct_type: null,
658
+ },
659
+ pricing: {
660
+ prompt: '0.00001',
661
+ completion: '0.00002',
662
+ },
663
+ top_provider: {
664
+ context_length: 8192,
665
+ max_completion_tokens: 4096,
666
+ is_moderated: false,
667
+ },
668
+ supported_parameters: [],
669
+ },
670
+ ];
671
+
672
+ vi.stubGlobal(
673
+ 'fetch',
674
+ vi.fn().mockResolvedValue({
675
+ ok: true,
676
+ json: async () => ({ data: mockModels }),
677
+ }),
678
+ );
679
+
680
+ const models = await params.models();
681
+
682
+ const visionModel = models.find((m) => m.id === 'vision/model');
683
+ expect(visionModel?.vision).toBe(true);
684
+ });
685
+
686
+ it('should detect function call from supported_parameters', async () => {
687
+ const mockModels = [
688
+ {
689
+ id: 'function/model',
690
+ canonical_slug: 'function/model',
691
+ name: 'Function Model',
692
+ created: 1679587200,
693
+ context_length: 8192,
694
+ architecture: {
695
+ modality: 'text->text',
696
+ input_modalities: ['text'],
697
+ output_modalities: ['text'],
698
+ tokenizer: 'default',
699
+ instruct_type: null,
700
+ },
701
+ pricing: {
702
+ prompt: '0.00001',
703
+ completion: '0.00002',
704
+ },
705
+ top_provider: {
706
+ context_length: 8192,
707
+ max_completion_tokens: 4096,
708
+ is_moderated: false,
709
+ },
710
+ supported_parameters: ['tools', 'temperature'],
711
+ },
712
+ ];
713
+
714
+ vi.stubGlobal(
715
+ 'fetch',
716
+ vi.fn().mockResolvedValue({
717
+ ok: true,
718
+ json: async () => ({ data: mockModels }),
719
+ }),
720
+ );
721
+
722
+ const models = await params.models();
723
+
724
+ const functionModel = models.find((m) => m.id === 'function/model');
725
+ expect(functionModel?.functionCall).toBe(true);
726
+ });
727
+
728
+ it('should detect reasoning from supported_parameters', async () => {
729
+ const mockModels = [
730
+ {
731
+ id: 'reasoning/model',
732
+ canonical_slug: 'reasoning/model',
733
+ name: 'Reasoning Model',
734
+ created: 1679587200,
735
+ context_length: 8192,
736
+ architecture: {
737
+ modality: 'text->text',
738
+ input_modalities: ['text'],
739
+ output_modalities: ['text'],
740
+ tokenizer: 'default',
741
+ instruct_type: null,
742
+ },
743
+ pricing: {
744
+ prompt: '0.00001',
745
+ completion: '0.00002',
746
+ },
747
+ top_provider: {
748
+ context_length: 8192,
749
+ max_completion_tokens: 4096,
750
+ is_moderated: false,
751
+ },
752
+ supported_parameters: ['reasoning', 'temperature'],
753
+ },
754
+ ];
755
+
756
+ vi.stubGlobal(
757
+ 'fetch',
758
+ vi.fn().mockResolvedValue({
759
+ ok: true,
760
+ json: async () => ({ data: mockModels }),
761
+ }),
762
+ );
763
+
764
+ const models = await params.models();
765
+
766
+ const reasoningModel = models.find((m) => m.id === 'reasoning/model');
767
+ expect(reasoningModel?.reasoning).toBe(true);
768
+ });
769
+
770
+ it('should format pricing correctly', async () => {
771
+ const mockModels = [
772
+ {
773
+ id: 'pricing/model',
774
+ canonical_slug: 'pricing/model',
775
+ name: 'Pricing Model',
776
+ created: 1679587200,
777
+ context_length: 8192,
778
+ architecture: {
779
+ modality: 'text->text',
780
+ input_modalities: ['text'],
781
+ output_modalities: ['text'],
782
+ tokenizer: 'default',
783
+ instruct_type: null,
784
+ },
785
+ pricing: {
786
+ prompt: '0.00001',
787
+ completion: '0.00002',
788
+ input_cache_read: '0.000001',
789
+ input_cache_write: '0.0000015',
790
+ },
791
+ top_provider: {
792
+ context_length: 8192,
793
+ max_completion_tokens: 4096,
794
+ is_moderated: false,
795
+ },
796
+ supported_parameters: [],
797
+ },
798
+ ];
799
+
800
+ vi.stubGlobal(
801
+ 'fetch',
802
+ vi.fn().mockResolvedValue({
803
+ ok: true,
804
+ json: async () => ({ data: mockModels }),
805
+ }),
806
+ );
807
+
808
+ const models = await params.models();
809
+
810
+ const pricingModel = models.find((m) => m.id === 'pricing/model');
811
+ expect(pricingModel?.pricing).toBeDefined();
812
+ // Pricing is converted to units array by processMultiProviderModelList
813
+ expect(pricingModel?.pricing?.units).toBeDefined();
814
+ expect(pricingModel?.pricing?.units).toBeInstanceOf(Array);
815
+ expect(pricingModel?.pricing?.units?.length).toBe(4);
816
+ // Check that the units contain the correct pricing information
817
+ const inputUnit = pricingModel?.pricing?.units?.find((u) => u.name === 'textInput');
818
+ const outputUnit = pricingModel?.pricing?.units?.find((u) => u.name === 'textOutput');
819
+ const cachedInputUnit = pricingModel?.pricing?.units?.find(
820
+ (u) => u.name === 'textInput_cacheRead',
821
+ );
822
+ const writeCacheInputUnit = pricingModel?.pricing?.units?.find(
823
+ (u) => u.name === 'textInput_cacheWrite',
824
+ );
825
+ expect(inputUnit?.strategy).toBe('fixed');
826
+ expect(outputUnit?.strategy).toBe('fixed');
827
+ expect(cachedInputUnit?.strategy).toBe('fixed');
828
+ expect(writeCacheInputUnit?.strategy).toBe('fixed');
829
+ if (inputUnit?.strategy === 'fixed') expect(inputUnit.rate).toBe(10);
830
+ if (outputUnit?.strategy === 'fixed') expect(outputUnit.rate).toBe(20);
831
+ if (cachedInputUnit?.strategy === 'fixed') expect(cachedInputUnit.rate).toBe(1);
832
+ if (writeCacheInputUnit?.strategy === 'fixed') expect(writeCacheInputUnit.rate).toBe(1.5);
833
+ });
834
+
835
+ it('should handle undefined pricing fields', async () => {
836
+ const mockModels = [
837
+ {
838
+ id: 'no-cache-pricing/model',
839
+ canonical_slug: 'no-cache-pricing/model',
840
+ name: 'No Cache Pricing Model',
841
+ created: 1679587200,
842
+ context_length: 8192,
843
+ architecture: {
844
+ modality: 'text->text',
845
+ input_modalities: ['text'],
846
+ output_modalities: ['text'],
847
+ tokenizer: 'default',
848
+ instruct_type: null,
849
+ },
850
+ pricing: {
851
+ prompt: '0.00001',
852
+ completion: '0.00002',
853
+ },
854
+ top_provider: {
855
+ context_length: 8192,
856
+ max_completion_tokens: 4096,
857
+ is_moderated: false,
858
+ },
859
+ supported_parameters: [],
860
+ },
861
+ ];
862
+
863
+ vi.stubGlobal(
864
+ 'fetch',
865
+ vi.fn().mockResolvedValue({
866
+ ok: true,
867
+ json: async () => ({ data: mockModels }),
868
+ }),
869
+ );
870
+
871
+ const models = await params.models();
872
+
873
+ const noCacheModel = models.find((m) => m.id === 'no-cache-pricing/model');
874
+ expect(noCacheModel?.pricing?.units).toBeDefined();
875
+ // Should only have input and output units, no cache units
876
+ expect(noCacheModel?.pricing?.units?.length).toBe(2);
877
+ const cachedInputUnit = noCacheModel?.pricing?.units?.find(
878
+ (u) => u.name === 'textInput_cacheRead',
879
+ );
880
+ const writeCacheInputUnit = noCacheModel?.pricing?.units?.find(
881
+ (u) => u.name === 'textInput_cacheWrite',
882
+ );
883
+ expect(cachedInputUnit).toBeUndefined();
884
+ expect(writeCacheInputUnit).toBeUndefined();
885
+ });
886
+
887
+ it('should handle -1 pricing as undefined', async () => {
888
+ const mockModels = [
889
+ {
890
+ id: 'invalid-pricing/model',
891
+ canonical_slug: 'invalid-pricing/model',
892
+ name: 'Invalid Pricing Model',
893
+ created: 1679587200,
894
+ context_length: 8192,
895
+ architecture: {
896
+ modality: 'text->text',
897
+ input_modalities: ['text'],
898
+ output_modalities: ['text'],
899
+ tokenizer: 'default',
900
+ instruct_type: null,
901
+ },
902
+ pricing: {
903
+ prompt: '-1',
904
+ completion: '-1',
905
+ },
906
+ top_provider: {
907
+ context_length: 8192,
908
+ max_completion_tokens: 4096,
909
+ is_moderated: false,
910
+ },
911
+ supported_parameters: [],
912
+ },
913
+ ];
914
+
915
+ vi.stubGlobal(
916
+ 'fetch',
917
+ vi.fn().mockResolvedValue({
918
+ ok: true,
919
+ json: async () => ({ data: mockModels }),
920
+ }),
921
+ );
922
+
923
+ const models = await params.models();
924
+
925
+ const invalidPricingModel = models.find((m) => m.id === 'invalid-pricing/model');
926
+ // -1 pricing is converted to undefined by formatPrice, so no pricing units should be present
927
+ expect(invalidPricingModel?.pricing).toBeUndefined();
928
+ });
929
+
930
+ it('should use top_provider context_length if available', async () => {
931
+ const mockModels = [
932
+ {
933
+ id: 'context/model',
934
+ canonical_slug: 'context/model',
935
+ name: 'Context Model',
936
+ created: 1679587200,
937
+ context_length: 4096,
938
+ architecture: {
939
+ modality: 'text->text',
940
+ input_modalities: ['text'],
941
+ output_modalities: ['text'],
942
+ tokenizer: 'default',
943
+ instruct_type: null,
944
+ },
945
+ pricing: {
946
+ prompt: '0.00001',
947
+ completion: '0.00002',
948
+ },
949
+ top_provider: {
950
+ context_length: 8192,
951
+ max_completion_tokens: 4096,
952
+ is_moderated: false,
953
+ },
954
+ supported_parameters: [],
955
+ },
956
+ ];
957
+
958
+ vi.stubGlobal(
959
+ 'fetch',
960
+ vi.fn().mockResolvedValue({
961
+ ok: true,
962
+ json: async () => ({ data: mockModels }),
963
+ }),
964
+ );
965
+
966
+ const models = await params.models();
967
+
968
+ const contextModel = models.find((m) => m.id === 'context/model');
969
+ expect(contextModel?.contextWindowTokens).toBe(8192);
970
+ });
971
+
972
+ it('should fallback to model context_length when top_provider is not available', async () => {
973
+ const mockModels = [
974
+ {
975
+ id: 'fallback-context/model',
976
+ canonical_slug: 'fallback-context/model',
977
+ name: 'Fallback Context Model',
978
+ created: 1679587200,
979
+ context_length: 4096,
980
+ architecture: {
981
+ modality: 'text->text',
982
+ input_modalities: ['text'],
983
+ output_modalities: ['text'],
984
+ tokenizer: 'default',
985
+ instruct_type: null,
986
+ },
987
+ pricing: {
988
+ prompt: '0.00001',
989
+ completion: '0.00002',
990
+ },
991
+ top_provider: {
992
+ context_length: 0,
993
+ max_completion_tokens: 4096,
994
+ is_moderated: false,
995
+ },
996
+ supported_parameters: [],
997
+ },
998
+ ];
999
+
1000
+ vi.stubGlobal(
1001
+ 'fetch',
1002
+ vi.fn().mockResolvedValue({
1003
+ ok: true,
1004
+ json: async () => ({ data: mockModels }),
1005
+ }),
1006
+ );
1007
+
1008
+ const models = await params.models();
1009
+
1010
+ const fallbackModel = models.find((m) => m.id === 'fallback-context/model');
1011
+ expect(fallbackModel?.contextWindowTokens).toBe(4096);
1012
+ });
1013
+
1014
+ it('should set maxOutput from top_provider when available', async () => {
1015
+ const mockModels = [
1016
+ {
1017
+ id: 'maxoutput/model',
1018
+ canonical_slug: 'maxoutput/model',
1019
+ name: 'Max Output Model',
1020
+ created: 1679587200,
1021
+ context_length: 8192,
1022
+ architecture: {
1023
+ modality: 'text->text',
1024
+ input_modalities: ['text'],
1025
+ output_modalities: ['text'],
1026
+ tokenizer: 'default',
1027
+ instruct_type: null,
1028
+ },
1029
+ pricing: {
1030
+ prompt: '0.00001',
1031
+ completion: '0.00002',
1032
+ },
1033
+ top_provider: {
1034
+ context_length: 8192,
1035
+ max_completion_tokens: 4096,
1036
+ is_moderated: false,
1037
+ },
1038
+ supported_parameters: [],
1039
+ },
1040
+ ];
1041
+
1042
+ vi.stubGlobal(
1043
+ 'fetch',
1044
+ vi.fn().mockResolvedValue({
1045
+ ok: true,
1046
+ json: async () => ({ data: mockModels }),
1047
+ }),
1048
+ );
1049
+
1050
+ const models = await params.models();
1051
+
1052
+ const maxOutputModel = models.find((m) => m.id === 'maxoutput/model');
1053
+ expect(maxOutputModel?.maxOutput).toBe(4096);
1054
+ });
1055
+
1056
+ it('should set maxOutput to undefined when top_provider value is null', async () => {
1057
+ const mockModels = [
1058
+ {
1059
+ id: 'null-maxoutput/model',
1060
+ canonical_slug: 'null-maxoutput/model',
1061
+ name: 'Null Max Output Model',
1062
+ created: 1679587200,
1063
+ context_length: 8192,
1064
+ architecture: {
1065
+ modality: 'text->text',
1066
+ input_modalities: ['text'],
1067
+ output_modalities: ['text'],
1068
+ tokenizer: 'default',
1069
+ instruct_type: null,
1070
+ },
1071
+ pricing: {
1072
+ prompt: '0.00001',
1073
+ completion: '0.00002',
1074
+ },
1075
+ top_provider: {
1076
+ context_length: 8192,
1077
+ max_completion_tokens: null,
1078
+ is_moderated: false,
1079
+ },
1080
+ supported_parameters: [],
1081
+ },
1082
+ ];
1083
+
1084
+ vi.stubGlobal(
1085
+ 'fetch',
1086
+ vi.fn().mockResolvedValue({
1087
+ ok: true,
1088
+ json: async () => ({ data: mockModels }),
1089
+ }),
1090
+ );
1091
+
1092
+ const models = await params.models();
1093
+
1094
+ const nullMaxOutputModel = models.find((m) => m.id === 'null-maxoutput/model');
1095
+ expect(nullMaxOutputModel?.maxOutput).toBeUndefined();
1096
+ });
1097
+
1098
+ it('should format releasedAt from created timestamp', async () => {
1099
+ const mockModels = [
1100
+ {
1101
+ id: 'released/model',
1102
+ canonical_slug: 'released/model',
1103
+ name: 'Released Model',
1104
+ created: 1679587200, // 2023-03-23
1105
+ context_length: 8192,
1106
+ architecture: {
1107
+ modality: 'text->text',
1108
+ input_modalities: ['text'],
1109
+ output_modalities: ['text'],
1110
+ tokenizer: 'default',
1111
+ instruct_type: null,
1112
+ },
1113
+ pricing: {
1114
+ prompt: '0.00001',
1115
+ completion: '0.00002',
1116
+ },
1117
+ top_provider: {
1118
+ context_length: 8192,
1119
+ max_completion_tokens: 4096,
1120
+ is_moderated: false,
1121
+ },
1122
+ supported_parameters: [],
1123
+ },
1124
+ ];
1125
+
1126
+ vi.stubGlobal(
1127
+ 'fetch',
1128
+ vi.fn().mockResolvedValue({
1129
+ ok: true,
1130
+ json: async () => ({ data: mockModels }),
1131
+ }),
1132
+ );
1133
+
1134
+ const models = await params.models();
1135
+
1136
+ const releasedModel = models.find((m) => m.id === 'released/model');
1137
+ expect(releasedModel?.releasedAt).toBe('2023-03-23');
1138
+ });
1139
+
1140
+ it('should handle empty model list from API', async () => {
1141
+ vi.stubGlobal(
1142
+ 'fetch',
1143
+ vi.fn().mockResolvedValue({
1144
+ ok: true,
1145
+ json: async () => ({ data: [] }),
1146
+ }),
1147
+ );
1148
+
1149
+ const models = await params.models();
1150
+
1151
+ expect(models).toEqual([]);
1152
+ });
1153
+
1154
+ it('should return empty array when fetch fails', async () => {
1155
+ vi.stubGlobal(
1156
+ 'fetch',
1157
+ vi.fn().mockResolvedValue({
1158
+ ok: false,
1159
+ }),
1160
+ );
1161
+
1162
+ const models = await params.models();
1163
+
1164
+ expect(models).toEqual([]);
1165
+ });
1166
+
1167
+ it('should return empty array when fetch throws error', async () => {
1168
+ vi.stubGlobal('fetch', vi.fn().mockRejectedValue(new Error('Network error')));
1169
+
1170
+ const models = await params.models();
1171
+
1172
+ expect(models).toEqual([]);
1173
+ expect(console.error).toHaveBeenCalledWith(
1174
+ 'Failed to fetch OpenRouter frontend models:',
1175
+ expect.any(Error),
1176
+ );
1177
+ });
1178
+
1179
+ it('should handle models with missing optional fields', async () => {
1180
+ const mockModels = [
1181
+ {
1182
+ id: 'minimal/model',
1183
+ canonical_slug: 'minimal/model',
1184
+ name: 'Minimal Model',
1185
+ created: 1679587200,
1186
+ context_length: 4096,
1187
+ architecture: {
1188
+ modality: 'text->text',
1189
+ input_modalities: [],
1190
+ output_modalities: ['text'],
1191
+ tokenizer: 'default',
1192
+ instruct_type: null,
1193
+ },
1194
+ pricing: {
1195
+ prompt: '0.00001',
1196
+ completion: '0.00002',
1197
+ },
1198
+ top_provider: {
1199
+ context_length: 4096,
1200
+ max_completion_tokens: 2048,
1201
+ is_moderated: false,
1202
+ },
1203
+ supported_parameters: [],
1204
+ },
1205
+ ];
1206
+
1207
+ vi.stubGlobal(
1208
+ 'fetch',
1209
+ vi.fn().mockResolvedValue({
1210
+ ok: true,
1211
+ json: async () => ({ data: mockModels }),
1212
+ }),
1213
+ );
1214
+
1215
+ const models = await params.models();
1216
+
1217
+ const minimalModel = models.find((m) => m.id === 'minimal/model');
1218
+ expect(minimalModel).toBeDefined();
1219
+ expect(minimalModel?.vision).toBe(false);
1220
+ expect(minimalModel?.functionCall).toBe(false);
1221
+ expect(minimalModel?.reasoning).toBe(false);
1222
+ });
1223
+
1224
+ it('should handle model name without colon', async () => {
1225
+ const mockModels = [
1226
+ {
1227
+ id: 'simple/model',
1228
+ canonical_slug: 'simple/model',
1229
+ name: 'Simple Model Name',
1230
+ created: 1679587200,
1231
+ context_length: 4096,
1232
+ architecture: {
1233
+ modality: 'text->text',
1234
+ input_modalities: ['text'],
1235
+ output_modalities: ['text'],
1236
+ tokenizer: 'default',
1237
+ instruct_type: null,
1238
+ },
1239
+ pricing: {
1240
+ prompt: '0.00001',
1241
+ completion: '0.00002',
1242
+ },
1243
+ top_provider: {
1244
+ context_length: 4096,
1245
+ max_completion_tokens: 2048,
1246
+ is_moderated: false,
1247
+ },
1248
+ supported_parameters: [],
1249
+ },
1250
+ ];
1251
+
1252
+ vi.stubGlobal(
1253
+ 'fetch',
1254
+ vi.fn().mockResolvedValue({
1255
+ ok: true,
1256
+ json: async () => ({ data: mockModels }),
1257
+ }),
1258
+ );
1259
+
1260
+ const models = await params.models();
1261
+
1262
+ const simpleModel = models.find((m) => m.id === 'simple/model');
1263
+ expect(simpleModel?.displayName).toBe('Simple Model Name');
1264
+ });
1265
+
1266
+ it('should process multiple models correctly', async () => {
1267
+ const mockModels = [
1268
+ {
1269
+ id: 'model-1',
1270
+ canonical_slug: 'model-1',
1271
+ name: 'Provider: Model 1',
1272
+ created: 1679587200,
1273
+ context_length: 4096,
1274
+ architecture: {
1275
+ modality: 'text->text',
1276
+ input_modalities: ['text'],
1277
+ output_modalities: ['text'],
1278
+ tokenizer: 'default',
1279
+ instruct_type: null,
1280
+ },
1281
+ pricing: { prompt: '0.00001', completion: '0.00002' },
1282
+ top_provider: {
1283
+ context_length: 4096,
1284
+ max_completion_tokens: 2048,
1285
+ is_moderated: false,
1286
+ },
1287
+ supported_parameters: ['tools'],
1288
+ },
1289
+ {
1290
+ id: 'model-2',
1291
+ canonical_slug: 'model-2',
1292
+ name: 'Provider: Model 2',
1293
+ created: 1679587200,
1294
+ context_length: 8192,
1295
+ architecture: {
1296
+ modality: 'text+image->text',
1297
+ input_modalities: ['text', 'image'],
1298
+ output_modalities: ['text'],
1299
+ tokenizer: 'default',
1300
+ instruct_type: null,
1301
+ },
1302
+ pricing: { prompt: '0.00002', completion: '0.00004' },
1303
+ top_provider: {
1304
+ context_length: 8192,
1305
+ max_completion_tokens: 4096,
1306
+ is_moderated: false,
1307
+ },
1308
+ supported_parameters: ['reasoning'],
1309
+ },
1310
+ ];
1311
+
1312
+ vi.stubGlobal(
1313
+ 'fetch',
1314
+ vi.fn().mockResolvedValue({
1315
+ ok: true,
1316
+ json: async () => ({ data: mockModels }),
1317
+ }),
1318
+ );
1319
+
1320
+ const models = await params.models();
1321
+
1322
+ expect(models.length).toBeGreaterThanOrEqual(2);
1323
+ const model1 = models.find((m) => m.id === 'model-1');
1324
+ const model2 = models.find((m) => m.id === 'model-2');
1325
+
1326
+ expect(model1?.functionCall).toBe(true);
1327
+ expect(model1?.vision).toBe(false);
1328
+ expect(model2?.reasoning).toBe(true);
1329
+ expect(model2?.vision).toBe(true);
1330
+ });
1331
+
1332
+ it('should handle both tools and reasoning in supported_parameters', async () => {
1333
+ const mockModels = [
1334
+ {
1335
+ id: 'advanced/model',
1336
+ canonical_slug: 'advanced/model',
1337
+ name: 'Advanced Model',
1338
+ created: 1679587200,
1339
+ context_length: 128000,
1340
+ architecture: {
1341
+ modality: 'text+image->text',
1342
+ input_modalities: ['text', 'image'],
1343
+ output_modalities: ['text'],
1344
+ tokenizer: 'default',
1345
+ instruct_type: null,
1346
+ },
1347
+ pricing: {
1348
+ prompt: '0.00003',
1349
+ completion: '0.00009',
1350
+ },
1351
+ top_provider: {
1352
+ context_length: 128000,
1353
+ max_completion_tokens: 8192,
1354
+ is_moderated: false,
1355
+ },
1356
+ supported_parameters: ['tools', 'reasoning', 'temperature'],
1357
+ },
1358
+ ];
1359
+
1360
+ vi.stubGlobal(
1361
+ 'fetch',
1362
+ vi.fn().mockResolvedValue({
1363
+ ok: true,
1364
+ json: async () => ({ data: mockModels }),
1365
+ }),
1366
+ );
1367
+
1368
+ const models = await params.models();
1369
+
1370
+ const advancedModel = models.find((m) => m.id === 'advanced/model');
1371
+ expect(advancedModel?.functionCall).toBe(true);
1372
+ expect(advancedModel?.reasoning).toBe(true);
1373
+ expect(advancedModel?.vision).toBe(true);
1374
+ });
1375
+
1376
+ it('should handle empty input_modalities array', async () => {
1377
+ const mockModels = [
1378
+ {
1379
+ id: 'empty-modalities/model',
1380
+ canonical_slug: 'empty-modalities/model',
1381
+ name: 'Empty Modalities Model',
1382
+ created: 1679587200,
1383
+ context_length: 4096,
1384
+ architecture: {
1385
+ modality: 'text->text',
1386
+ input_modalities: [],
1387
+ output_modalities: ['text'],
1388
+ tokenizer: 'default',
1389
+ instruct_type: null,
1390
+ },
1391
+ pricing: {
1392
+ prompt: '0.00001',
1393
+ completion: '0.00002',
1394
+ },
1395
+ top_provider: {
1396
+ context_length: 4096,
1397
+ max_completion_tokens: 2048,
1398
+ is_moderated: false,
1399
+ },
1400
+ supported_parameters: [],
1401
+ },
1402
+ ];
1403
+
1404
+ vi.stubGlobal(
1405
+ 'fetch',
1406
+ vi.fn().mockResolvedValue({
1407
+ ok: true,
1408
+ json: async () => ({ data: mockModels }),
1409
+ }),
1410
+ );
1411
+
1412
+ const models = await params.models();
1413
+
1414
+ const emptyModel = models.find((m) => m.id === 'empty-modalities/model');
1415
+ expect(emptyModel?.vision).toBe(false);
1416
+ });
1417
+
1418
+ it('should handle null pricing fields (converts to 0)', async () => {
1419
+ const mockModels = [
1420
+ {
1421
+ id: 'null-pricing/model',
1422
+ canonical_slug: 'null-pricing/model',
1423
+ name: 'Null Pricing Model',
1424
+ created: 1679587200,
1425
+ context_length: 4096,
1426
+ architecture: {
1427
+ modality: 'text->text',
1428
+ input_modalities: ['text'],
1429
+ output_modalities: ['text'],
1430
+ tokenizer: 'default',
1431
+ instruct_type: null,
1432
+ },
1433
+ pricing: {
1434
+ prompt: null,
1435
+ completion: null,
1436
+ },
1437
+ top_provider: {
1438
+ context_length: 4096,
1439
+ max_completion_tokens: 2048,
1440
+ is_moderated: false,
1441
+ },
1442
+ supported_parameters: [],
1443
+ },
1444
+ ];
1445
+
1446
+ vi.stubGlobal(
1447
+ 'fetch',
1448
+ vi.fn().mockResolvedValue({
1449
+ ok: true,
1450
+ json: async () => ({ data: mockModels }),
1451
+ }),
1452
+ );
1453
+
1454
+ const models = await params.models();
1455
+
1456
+ const nullPricingModel = models.find((m) => m.id === 'null-pricing/model');
1457
+ // null is converted to 0 by formatPrice, which is valid pricing
1458
+ expect(nullPricingModel?.pricing).toBeDefined();
1459
+ const inputUnit = nullPricingModel?.pricing?.units?.find((u) => u.name === 'textInput');
1460
+ const outputUnit = nullPricingModel?.pricing?.units?.find((u) => u.name === 'textOutput');
1461
+ if (inputUnit?.strategy === 'fixed') expect(inputUnit.rate).toBe(0);
1462
+ if (outputUnit?.strategy === 'fixed') expect(outputUnit.rate).toBe(0);
1463
+ });
1464
+
1465
+ it('should handle zero pricing (free model)', async () => {
1466
+ const mockModels = [
1467
+ {
1468
+ id: 'zero-pricing/model',
1469
+ canonical_slug: 'zero-pricing/model',
1470
+ name: 'Zero Pricing Model',
1471
+ created: 1679587200,
1472
+ context_length: 4096,
1473
+ architecture: {
1474
+ modality: 'text->text',
1475
+ input_modalities: ['text'],
1476
+ output_modalities: ['text'],
1477
+ tokenizer: 'default',
1478
+ instruct_type: null,
1479
+ },
1480
+ pricing: {
1481
+ prompt: '0',
1482
+ completion: '0',
1483
+ },
1484
+ top_provider: {
1485
+ context_length: 4096,
1486
+ max_completion_tokens: 2048,
1487
+ is_moderated: false,
1488
+ },
1489
+ supported_parameters: [],
1490
+ },
1491
+ ];
1492
+
1493
+ vi.stubGlobal(
1494
+ 'fetch',
1495
+ vi.fn().mockResolvedValue({
1496
+ ok: true,
1497
+ json: async () => ({ data: mockModels }),
1498
+ }),
1499
+ );
1500
+
1501
+ const models = await params.models();
1502
+
1503
+ const zeroPricingModel = models.find((m) => m.id === 'zero-pricing/model');
1504
+ expect(zeroPricingModel?.pricing).toBeDefined();
1505
+ // Zero is valid pricing
1506
+ const inputUnit = zeroPricingModel?.pricing?.units?.find((u) => u.name === 'textInput');
1507
+ const outputUnit = zeroPricingModel?.pricing?.units?.find((u) => u.name === 'textOutput');
1508
+ if (inputUnit?.strategy === 'fixed') expect(inputUnit.rate).toBe(0);
1509
+ if (outputUnit?.strategy === 'fixed') expect(outputUnit.rate).toBe(0);
1510
+ });
1511
+
1512
+ it('should handle mixed zero and non-zero pricing', async () => {
1513
+ const mockModels = [
1514
+ {
1515
+ id: 'mixed-free/model',
1516
+ canonical_slug: 'mixed-free/model',
1517
+ name: 'Mixed Free Model',
1518
+ created: 1679587200,
1519
+ context_length: 4096,
1520
+ architecture: {
1521
+ modality: 'text->text',
1522
+ input_modalities: ['text'],
1523
+ output_modalities: ['text'],
1524
+ tokenizer: 'default',
1525
+ instruct_type: null,
1526
+ },
1527
+ pricing: {
1528
+ prompt: '0',
1529
+ completion: '0.00001',
1530
+ },
1531
+ top_provider: {
1532
+ context_length: 4096,
1533
+ max_completion_tokens: 2048,
1534
+ is_moderated: false,
1535
+ },
1536
+ supported_parameters: [],
1537
+ },
1538
+ ];
1539
+
1540
+ vi.stubGlobal(
1541
+ 'fetch',
1542
+ vi.fn().mockResolvedValue({
1543
+ ok: true,
1544
+ json: async () => ({ data: mockModels }),
1545
+ }),
1546
+ );
1547
+
1548
+ const models = await params.models();
1549
+
1550
+ const mixedModel = models.find((m) => m.id === 'mixed-free/model');
1551
+ // Input or output is 0, so should be marked as free
1552
+ expect(mixedModel?.displayName).toContain('(free)');
1553
+ });
1554
+
1555
+ it('should handle very large pricing values', async () => {
1556
+ const mockModels = [
1557
+ {
1558
+ id: 'expensive/model',
1559
+ canonical_slug: 'expensive/model',
1560
+ name: 'Expensive Model',
1561
+ created: 1679587200,
1562
+ context_length: 4096,
1563
+ architecture: {
1564
+ modality: 'text->text',
1565
+ input_modalities: ['text'],
1566
+ output_modalities: ['text'],
1567
+ tokenizer: 'default',
1568
+ instruct_type: null,
1569
+ },
1570
+ pricing: {
1571
+ prompt: '1.5',
1572
+ completion: '3.0',
1573
+ },
1574
+ top_provider: {
1575
+ context_length: 4096,
1576
+ max_completion_tokens: 2048,
1577
+ is_moderated: false,
1578
+ },
1579
+ supported_parameters: [],
1580
+ },
1581
+ ];
1582
+
1583
+ vi.stubGlobal(
1584
+ 'fetch',
1585
+ vi.fn().mockResolvedValue({
1586
+ ok: true,
1587
+ json: async () => ({ data: mockModels }),
1588
+ }),
1589
+ );
1590
+
1591
+ const models = await params.models();
1592
+
1593
+ const expensiveModel = models.find((m) => m.id === 'expensive/model');
1594
+ expect(expensiveModel?.pricing?.units).toBeDefined();
1595
+ const inputUnit = expensiveModel?.pricing?.units?.find((u) => u.name === 'textInput');
1596
+ const outputUnit = expensiveModel?.pricing?.units?.find((u) => u.name === 'textOutput');
1597
+ if (inputUnit?.strategy === 'fixed') expect(inputUnit.rate).toBeGreaterThan(1000000);
1598
+ if (outputUnit?.strategy === 'fixed') expect(outputUnit.rate).toBeGreaterThan(1000000);
1599
+ });
1600
+ });
1601
+
1602
+ describe('formatPrice utility', () => {
1603
+ // Test formatPrice indirectly through models function
1604
+ it('should handle undefined price', async () => {
1605
+ const mockModels = [
1606
+ {
1607
+ id: 'test/model',
1608
+ canonical_slug: 'test/model',
1609
+ name: 'Test Model',
1610
+ created: 1679587200,
1611
+ context_length: 4096,
1612
+ architecture: {
1613
+ modality: 'text->text',
1614
+ input_modalities: ['text'],
1615
+ output_modalities: ['text'],
1616
+ tokenizer: 'default',
1617
+ instruct_type: null,
1618
+ },
1619
+ pricing: {
1620
+ prompt: undefined,
1621
+ completion: undefined,
1622
+ },
1623
+ top_provider: {
1624
+ context_length: 4096,
1625
+ max_completion_tokens: 2048,
1626
+ is_moderated: false,
1627
+ },
1628
+ supported_parameters: [],
1629
+ },
1630
+ ];
1631
+
1632
+ vi.stubGlobal(
1633
+ 'fetch',
1634
+ vi.fn().mockResolvedValue({
1635
+ ok: true,
1636
+ json: async () => ({ data: mockModels }),
1637
+ }),
1638
+ );
1639
+
1640
+ const models = await params.models();
1641
+
1642
+ const testModel = models.find((m) => m.id === 'test/model');
1643
+ expect(testModel?.pricing).toBeUndefined();
1644
+ });
1645
+
1646
+ it('should handle string -1 as undefined price', async () => {
1647
+ const mockModels = [
1648
+ {
1649
+ id: 'invalid-price/model',
1650
+ canonical_slug: 'invalid-price/model',
1651
+ name: 'Invalid Price Model',
1652
+ created: 1679587200,
1653
+ context_length: 4096,
1654
+ architecture: {
1655
+ modality: 'text->text',
1656
+ input_modalities: ['text'],
1657
+ output_modalities: ['text'],
1658
+ tokenizer: 'default',
1659
+ instruct_type: null,
1660
+ },
1661
+ pricing: {
1662
+ prompt: '-1',
1663
+ completion: '-1',
1664
+ },
1665
+ top_provider: {
1666
+ context_length: 4096,
1667
+ max_completion_tokens: 2048,
1668
+ is_moderated: false,
1669
+ },
1670
+ supported_parameters: [],
1671
+ },
1672
+ ];
1673
+
1674
+ vi.stubGlobal(
1675
+ 'fetch',
1676
+ vi.fn().mockResolvedValue({
1677
+ ok: true,
1678
+ json: async () => ({ data: mockModels }),
1679
+ }),
1680
+ );
1681
+
1682
+ const models = await params.models();
1683
+
1684
+ const invalidPriceModel = models.find((m) => m.id === 'invalid-price/model');
1685
+ expect(invalidPriceModel?.pricing).toBeUndefined();
1686
+ });
1687
+
1688
+ it('should format very small price values correctly', async () => {
1689
+ const mockModels = [
1690
+ {
1691
+ id: 'micro-price/model',
1692
+ canonical_slug: 'micro-price/model',
1693
+ name: 'Micro Price Model',
1694
+ created: 1679587200,
1695
+ context_length: 4096,
1696
+ architecture: {
1697
+ modality: 'text->text',
1698
+ input_modalities: ['text'],
1699
+ output_modalities: ['text'],
1700
+ tokenizer: 'default',
1701
+ instruct_type: null,
1702
+ },
1703
+ pricing: {
1704
+ prompt: '0.0000001',
1705
+ completion: '0.0000002',
1706
+ },
1707
+ top_provider: {
1708
+ context_length: 4096,
1709
+ max_completion_tokens: 2048,
1710
+ is_moderated: false,
1711
+ },
1712
+ supported_parameters: [],
1713
+ },
1714
+ ];
1715
+
1716
+ vi.stubGlobal(
1717
+ 'fetch',
1718
+ vi.fn().mockResolvedValue({
1719
+ ok: true,
1720
+ json: async () => ({ data: mockModels }),
1721
+ }),
1722
+ );
171
1723
 
172
- const list = await instance.models();
1724
+ const models = await params.models();
173
1725
 
174
- // 验证在当前实现中,当 frontend fetch 抛错时,会返回空列表
175
- expect(list.length).toBe(0);
176
- expect(list).toEqual([]);
1726
+ const microPriceModel = models.find((m) => m.id === 'micro-price/model');
1727
+ expect(microPriceModel?.pricing?.units).toBeDefined();
1728
+ expect(microPriceModel?.pricing?.units?.length).toBe(2);
177
1729
  });
178
1730
  });
179
1731
  });