@lobehub/chat 1.136.13 → 1.137.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (190) hide show
  1. package/.cursor/rules/add-setting-env.mdc +175 -0
  2. package/.cursor/rules/db-migrations.mdc +25 -0
  3. package/.env.example +7 -0
  4. package/CHANGELOG.md +50 -0
  5. package/Dockerfile +3 -2
  6. package/Dockerfile.database +15 -3
  7. package/Dockerfile.pglite +3 -2
  8. package/changelog/v1.json +18 -0
  9. package/docs/development/database-schema.dbml +1 -0
  10. package/docs/self-hosting/advanced/feature-flags.mdx +25 -15
  11. package/docs/self-hosting/advanced/feature-flags.zh-CN.mdx +25 -15
  12. package/docs/self-hosting/environment-variables/basic.mdx +12 -0
  13. package/docs/self-hosting/environment-variables/basic.zh-CN.mdx +12 -0
  14. package/locales/ar/setting.json +8 -0
  15. package/locales/bg-BG/setting.json +8 -0
  16. package/locales/de-DE/setting.json +8 -0
  17. package/locales/en-US/setting.json +8 -0
  18. package/locales/es-ES/setting.json +8 -0
  19. package/locales/fa-IR/setting.json +8 -0
  20. package/locales/fr-FR/setting.json +8 -0
  21. package/locales/it-IT/setting.json +8 -0
  22. package/locales/ja-JP/setting.json +8 -0
  23. package/locales/ko-KR/setting.json +8 -0
  24. package/locales/nl-NL/setting.json +8 -0
  25. package/locales/pl-PL/setting.json +8 -0
  26. package/locales/pt-BR/setting.json +8 -0
  27. package/locales/ru-RU/setting.json +8 -0
  28. package/locales/tr-TR/setting.json +8 -0
  29. package/locales/vi-VN/setting.json +8 -0
  30. package/locales/zh-CN/setting.json +8 -0
  31. package/locales/zh-TW/setting.json +8 -0
  32. package/package.json +1 -1
  33. package/packages/agent-runtime/examples/tools-calling.ts +4 -3
  34. package/packages/agent-runtime/src/core/__tests__/runtime.test.ts +559 -29
  35. package/packages/agent-runtime/src/core/runtime.ts +171 -43
  36. package/packages/agent-runtime/src/types/instruction.ts +32 -6
  37. package/packages/agent-runtime/src/types/runtime.ts +2 -2
  38. package/packages/agent-runtime/src/types/state.ts +1 -8
  39. package/packages/agent-runtime/vitest.config.mts +14 -0
  40. package/packages/const/src/settings/image.ts +8 -0
  41. package/packages/const/src/settings/index.ts +3 -0
  42. package/packages/context-engine/src/__tests__/pipeline.test.ts +485 -0
  43. package/packages/context-engine/src/base/__tests__/BaseProcessor.test.ts +381 -0
  44. package/packages/context-engine/src/base/__tests__/BaseProvider.test.ts +392 -0
  45. package/packages/context-engine/src/processors/__tests__/MessageCleanup.test.ts +346 -0
  46. package/packages/context-engine/src/processors/__tests__/ToolCall.test.ts +552 -0
  47. package/packages/database/migrations/0038_add_image_user_settings.sql +1 -0
  48. package/packages/database/migrations/meta/0038_snapshot.json +7580 -0
  49. package/packages/database/migrations/meta/_journal.json +7 -0
  50. package/packages/database/src/core/migrations.json +6 -0
  51. package/packages/database/src/models/user.ts +3 -1
  52. package/packages/database/src/schemas/user.ts +1 -0
  53. package/packages/file-loaders/src/loaders/docx/index.test.ts +0 -1
  54. package/packages/file-loaders/src/loaders/excel/__snapshots__/index.test.ts.snap +30 -0
  55. package/packages/file-loaders/src/loaders/excel/index.test.ts +8 -0
  56. package/packages/file-loaders/src/loaders/pptx/index.test.ts +25 -0
  57. package/packages/file-loaders/src/utils/parser-utils.test.ts +155 -0
  58. package/packages/file-loaders/vitest.config.mts +8 -0
  59. package/packages/model-runtime/CLAUDE.md +5 -0
  60. package/packages/model-runtime/docs/test-coverage.md +706 -0
  61. package/packages/model-runtime/src/core/ModelRuntime.test.ts +231 -0
  62. package/packages/model-runtime/src/core/RouterRuntime/createRuntime.ts +1 -1
  63. package/packages/model-runtime/src/core/openaiCompatibleFactory/createImage.test.ts +799 -0
  64. package/packages/model-runtime/src/core/openaiCompatibleFactory/index.test.ts +188 -4
  65. package/packages/model-runtime/src/core/openaiCompatibleFactory/index.ts +41 -10
  66. package/packages/model-runtime/src/core/streams/openai/__snapshots__/responsesStream.test.ts.snap +439 -0
  67. package/packages/model-runtime/src/core/streams/openai/openai.test.ts +789 -0
  68. package/packages/model-runtime/src/core/streams/openai/responsesStream.test.ts +551 -0
  69. package/packages/model-runtime/src/core/usageConverters/utils/computeChatCost.test.ts +230 -0
  70. package/packages/model-runtime/src/core/usageConverters/utils/computeImageCost.test.ts +334 -37
  71. package/packages/model-runtime/src/providerTestUtils.ts +148 -145
  72. package/packages/model-runtime/src/providers/ai302/index.test.ts +60 -0
  73. package/packages/model-runtime/src/providers/ai302/index.ts +9 -4
  74. package/packages/model-runtime/src/providers/ai360/index.test.ts +1213 -1
  75. package/packages/model-runtime/src/providers/ai360/index.ts +9 -4
  76. package/packages/model-runtime/src/providers/aihubmix/index.test.ts +73 -0
  77. package/packages/model-runtime/src/providers/aihubmix/index.ts +6 -9
  78. package/packages/model-runtime/src/providers/akashchat/index.test.ts +433 -3
  79. package/packages/model-runtime/src/providers/akashchat/index.ts +12 -7
  80. package/packages/model-runtime/src/providers/anthropic/generateObject.test.ts +183 -29
  81. package/packages/model-runtime/src/providers/anthropic/generateObject.ts +40 -24
  82. package/packages/model-runtime/src/providers/azureai/index.test.ts +102 -0
  83. package/packages/model-runtime/src/providers/baichuan/index.test.ts +416 -26
  84. package/packages/model-runtime/src/providers/baichuan/index.ts +23 -20
  85. package/packages/model-runtime/src/providers/bedrock/index.test.ts +420 -2
  86. package/packages/model-runtime/src/providers/cerebras/index.test.ts +465 -0
  87. package/packages/model-runtime/src/providers/cerebras/index.ts +8 -3
  88. package/packages/model-runtime/src/providers/cohere/index.test.ts +1074 -1
  89. package/packages/model-runtime/src/providers/cohere/index.ts +8 -3
  90. package/packages/model-runtime/src/providers/cometapi/index.test.ts +439 -3
  91. package/packages/model-runtime/src/providers/cometapi/index.ts +8 -3
  92. package/packages/model-runtime/src/providers/deepseek/index.test.ts +116 -1
  93. package/packages/model-runtime/src/providers/deepseek/index.ts +8 -3
  94. package/packages/model-runtime/src/providers/fireworksai/index.test.ts +264 -3
  95. package/packages/model-runtime/src/providers/fireworksai/index.ts +8 -3
  96. package/packages/model-runtime/src/providers/giteeai/index.test.ts +325 -3
  97. package/packages/model-runtime/src/providers/giteeai/index.ts +23 -6
  98. package/packages/model-runtime/src/providers/github/index.test.ts +532 -3
  99. package/packages/model-runtime/src/providers/github/index.ts +8 -3
  100. package/packages/model-runtime/src/providers/groq/index.test.ts +344 -31
  101. package/packages/model-runtime/src/providers/groq/index.ts +8 -3
  102. package/packages/model-runtime/src/providers/higress/index.test.ts +142 -0
  103. package/packages/model-runtime/src/providers/higress/index.ts +8 -3
  104. package/packages/model-runtime/src/providers/huggingface/index.test.ts +612 -1
  105. package/packages/model-runtime/src/providers/huggingface/index.ts +9 -4
  106. package/packages/model-runtime/src/providers/hunyuan/index.test.ts +365 -1
  107. package/packages/model-runtime/src/providers/hunyuan/index.ts +9 -3
  108. package/packages/model-runtime/src/providers/infiniai/index.test.ts +71 -0
  109. package/packages/model-runtime/src/providers/internlm/index.test.ts +369 -2
  110. package/packages/model-runtime/src/providers/internlm/index.ts +10 -5
  111. package/packages/model-runtime/src/providers/jina/index.test.ts +164 -3
  112. package/packages/model-runtime/src/providers/jina/index.ts +8 -3
  113. package/packages/model-runtime/src/providers/lmstudio/index.test.ts +182 -3
  114. package/packages/model-runtime/src/providers/lmstudio/index.ts +8 -3
  115. package/packages/model-runtime/src/providers/mistral/index.test.ts +779 -27
  116. package/packages/model-runtime/src/providers/mistral/index.ts +8 -3
  117. package/packages/model-runtime/src/providers/modelscope/index.test.ts +232 -1
  118. package/packages/model-runtime/src/providers/modelscope/index.ts +8 -3
  119. package/packages/model-runtime/src/providers/moonshot/index.test.ts +489 -2
  120. package/packages/model-runtime/src/providers/moonshot/index.ts +8 -3
  121. package/packages/model-runtime/src/providers/nebius/index.test.ts +381 -3
  122. package/packages/model-runtime/src/providers/nebius/index.ts +8 -3
  123. package/packages/model-runtime/src/providers/newapi/index.test.ts +667 -3
  124. package/packages/model-runtime/src/providers/newapi/index.ts +6 -3
  125. package/packages/model-runtime/src/providers/nvidia/index.test.ts +168 -1
  126. package/packages/model-runtime/src/providers/nvidia/index.ts +12 -7
  127. package/packages/model-runtime/src/providers/ollama/index.test.ts +797 -1
  128. package/packages/model-runtime/src/providers/ollama/index.ts +8 -0
  129. package/packages/model-runtime/src/providers/ollamacloud/index.test.ts +411 -0
  130. package/packages/model-runtime/src/providers/ollamacloud/index.ts +8 -3
  131. package/packages/model-runtime/src/providers/openai/index.test.ts +171 -2
  132. package/packages/model-runtime/src/providers/openai/index.ts +8 -3
  133. package/packages/model-runtime/src/providers/openrouter/index.test.ts +1647 -95
  134. package/packages/model-runtime/src/providers/openrouter/index.ts +12 -7
  135. package/packages/model-runtime/src/providers/qiniu/index.test.ts +294 -1
  136. package/packages/model-runtime/src/providers/qiniu/index.ts +8 -3
  137. package/packages/model-runtime/src/providers/search1api/index.test.ts +1131 -11
  138. package/packages/model-runtime/src/providers/search1api/index.ts +10 -4
  139. package/packages/model-runtime/src/providers/sensenova/index.test.ts +1069 -1
  140. package/packages/model-runtime/src/providers/sensenova/index.ts +8 -3
  141. package/packages/model-runtime/src/providers/siliconcloud/index.test.ts +196 -0
  142. package/packages/model-runtime/src/providers/siliconcloud/index.ts +8 -3
  143. package/packages/model-runtime/src/providers/spark/index.test.ts +293 -1
  144. package/packages/model-runtime/src/providers/spark/index.ts +8 -3
  145. package/packages/model-runtime/src/providers/stepfun/index.test.ts +322 -3
  146. package/packages/model-runtime/src/providers/stepfun/index.ts +8 -3
  147. package/packages/model-runtime/src/providers/tencentcloud/index.test.ts +182 -3
  148. package/packages/model-runtime/src/providers/tencentcloud/index.ts +8 -3
  149. package/packages/model-runtime/src/providers/togetherai/index.test.ts +359 -4
  150. package/packages/model-runtime/src/providers/togetherai/index.ts +12 -5
  151. package/packages/model-runtime/src/providers/v0/index.test.ts +341 -0
  152. package/packages/model-runtime/src/providers/v0/index.ts +20 -6
  153. package/packages/model-runtime/src/providers/vercelaigateway/index.test.ts +710 -0
  154. package/packages/model-runtime/src/providers/vercelaigateway/index.ts +19 -13
  155. package/packages/model-runtime/src/providers/vllm/index.test.ts +45 -1
  156. package/packages/model-runtime/src/providers/volcengine/index.test.ts +75 -0
  157. package/packages/model-runtime/src/providers/wenxin/index.test.ts +144 -1
  158. package/packages/model-runtime/src/providers/wenxin/index.ts +8 -3
  159. package/packages/model-runtime/src/providers/xai/index.test.ts +105 -1
  160. package/packages/model-runtime/src/providers/xinference/index.test.ts +70 -1
  161. package/packages/model-runtime/src/providers/zeroone/index.test.ts +327 -3
  162. package/packages/model-runtime/src/providers/zeroone/index.ts +23 -6
  163. package/packages/model-runtime/src/providers/zhipu/index.test.ts +908 -236
  164. package/packages/model-runtime/src/providers/zhipu/index.ts +8 -3
  165. package/packages/model-runtime/src/types/structureOutput.ts +5 -1
  166. package/packages/model-runtime/vitest.config.mts +7 -1
  167. package/packages/types/src/aiChat.ts +20 -2
  168. package/packages/types/src/serverConfig.ts +7 -1
  169. package/packages/types/src/tool/index.ts +1 -0
  170. package/packages/types/src/tool/tool.ts +33 -0
  171. package/packages/types/src/user/settings/image.ts +3 -0
  172. package/packages/types/src/user/settings/index.ts +3 -0
  173. package/src/app/[variants]/(main)/settings/_layout/SettingsContent.tsx +3 -0
  174. package/src/app/[variants]/(main)/settings/hooks/useCategory.tsx +8 -3
  175. package/src/app/[variants]/(main)/settings/image/index.tsx +74 -0
  176. package/src/components/FormInput/FormSliderWithInput.tsx +40 -0
  177. package/src/components/FormInput/index.ts +1 -0
  178. package/src/envs/image.ts +27 -0
  179. package/src/features/Conversation/Messages/Assistant/index.tsx +1 -1
  180. package/src/features/Conversation/Messages/User/index.tsx +2 -2
  181. package/src/hooks/useFetchAiImageConfig.ts +12 -17
  182. package/src/locales/default/setting.ts +8 -0
  183. package/src/server/globalConfig/index.ts +5 -0
  184. package/src/server/routers/lambda/aiChat.ts +2 -0
  185. package/src/store/global/initialState.ts +1 -0
  186. package/src/store/image/slices/generationConfig/action.test.ts +17 -0
  187. package/src/store/image/slices/generationConfig/action.ts +18 -21
  188. package/src/store/image/slices/generationConfig/initialState.ts +3 -2
  189. package/src/store/user/slices/common/action.ts +1 -0
  190. package/src/store/user/slices/settings/selectors/settings.ts +3 -0
@@ -0,0 +1,710 @@
1
+ // @vitest-environment node
2
+ import { ModelProvider } from 'model-bank';
3
+ import { beforeEach, describe, expect, it, vi } from 'vitest';
4
+
5
+ import { testProvider } from '../../providerTestUtils';
6
+ import { LobeVercelAIGatewayAI, VercelAIGatewayModelCard, formatPrice, params } from './index';
7
+
8
+ testProvider({
9
+ Runtime: LobeVercelAIGatewayAI,
10
+ bizErrorType: 'ProviderBizError',
11
+ chatDebugEnv: 'DEBUG_VERCELAIGATEWAY_CHAT_COMPLETION',
12
+ chatModel: 'gpt-4o',
13
+ defaultBaseURL: 'https://ai-gateway.vercel.sh/v1',
14
+ invalidErrorType: 'InvalidProviderAPIKey',
15
+ provider: ModelProvider.VercelAIGateway,
16
+ test: {
17
+ skipAPICall: true,
18
+ skipErrorHandle: true,
19
+ },
20
+ });
21
+
22
+ describe('LobeVercelAIGatewayAI - custom features', () => {
23
+ let instance: InstanceType<typeof LobeVercelAIGatewayAI>;
24
+
25
+ beforeEach(() => {
26
+ instance = new LobeVercelAIGatewayAI({ apiKey: 'test_api_key' });
27
+ vi.spyOn(instance['client'].chat.completions, 'create').mockResolvedValue(
28
+ new ReadableStream() as any,
29
+ );
30
+ });
31
+
32
+ describe('params export', () => {
33
+ it('should export params object', () => {
34
+ expect(params).toBeDefined();
35
+ expect(params.provider).toBe(ModelProvider.VercelAIGateway);
36
+ expect(params.baseURL).toBe('https://ai-gateway.vercel.sh/v1');
37
+ });
38
+
39
+ it('should have constructor options with default headers', () => {
40
+ expect(params.constructorOptions).toBeDefined();
41
+ expect(params.constructorOptions?.defaultHeaders).toEqual({
42
+ 'http-referer': 'https://lobehub.com',
43
+ 'x-title': 'LobeHub',
44
+ });
45
+ });
46
+ });
47
+
48
+ describe('debug configuration', () => {
49
+ it('should disable debug by default', () => {
50
+ delete process.env.DEBUG_VERCELAIGATEWAY_CHAT_COMPLETION;
51
+ const result = params.debug.chatCompletion();
52
+ expect(result).toBe(false);
53
+ });
54
+
55
+ it('should enable debug when env is set', () => {
56
+ process.env.DEBUG_VERCELAIGATEWAY_CHAT_COMPLETION = '1';
57
+ const result = params.debug.chatCompletion();
58
+ expect(result).toBe(true);
59
+ delete process.env.DEBUG_VERCELAIGATEWAY_CHAT_COMPLETION;
60
+ });
61
+
62
+ it('should disable debug when env is not "1"', () => {
63
+ process.env.DEBUG_VERCELAIGATEWAY_CHAT_COMPLETION = '0';
64
+ const result = params.debug.chatCompletion();
65
+ expect(result).toBe(false);
66
+ delete process.env.DEBUG_VERCELAIGATEWAY_CHAT_COMPLETION;
67
+ });
68
+ });
69
+
70
+ describe('handlePayload', () => {
71
+ it('should add reasoning_effort to providerOptions.openai', async () => {
72
+ await instance.chat({
73
+ messages: [{ content: 'Hello', role: 'user' }],
74
+ model: 'o1-preview',
75
+ reasoning_effort: 'high',
76
+ });
77
+
78
+ const calledPayload = (instance['client'].chat.completions.create as any).mock.calls[0][0];
79
+ expect(calledPayload.providerOptions?.openai?.reasoningEffort).toBe('high');
80
+ expect(calledPayload.providerOptions?.openai?.reasoningSummary).toBe('auto');
81
+ });
82
+
83
+ it('should handle both reasoning_effort and verbosity', async () => {
84
+ await instance.chat({
85
+ messages: [{ content: 'Hello', role: 'user' }],
86
+ model: 'o1-preview',
87
+ reasoning_effort: 'medium',
88
+ verbosity: 'low',
89
+ });
90
+
91
+ const calledPayload = (instance['client'].chat.completions.create as any).mock.calls[0][0];
92
+ expect(calledPayload.providerOptions?.openai?.reasoningEffort).toBe('medium');
93
+ expect(calledPayload.providerOptions?.openai?.textVerbosity).toBe('low');
94
+ });
95
+
96
+ it('should handle verbosity without reasoning_effort', async () => {
97
+ await instance.chat({
98
+ messages: [{ content: 'Hello', role: 'user' }],
99
+ model: 'gpt-4o',
100
+ verbosity: 'high',
101
+ });
102
+
103
+ const calledPayload = (instance['client'].chat.completions.create as any).mock.calls[0][0];
104
+ expect(calledPayload.providerOptions?.openai?.textVerbosity).toBe('high');
105
+ expect(calledPayload.providerOptions?.openai?.reasoningEffort).toBeUndefined();
106
+ });
107
+
108
+ it('should not add providerOptions when no special parameters', async () => {
109
+ await instance.chat({
110
+ messages: [{ content: 'Hello', role: 'user' }],
111
+ model: 'gpt-4o',
112
+ });
113
+
114
+ const calledPayload = (instance['client'].chat.completions.create as any).mock.calls[0][0];
115
+ expect(calledPayload.providerOptions).toEqual({});
116
+ });
117
+
118
+ it('should preserve other payload properties', async () => {
119
+ await instance.chat({
120
+ max_tokens: 1000,
121
+ messages: [{ content: 'Hello', role: 'user' }],
122
+ model: 'o1-preview',
123
+ reasoning_effort: 'high',
124
+ temperature: 0.7,
125
+ });
126
+
127
+ const calledPayload = (instance['client'].chat.completions.create as any).mock.calls[0][0];
128
+ expect(calledPayload.model).toBe('o1-preview');
129
+ expect(calledPayload.temperature).toBe(0.7);
130
+ expect(calledPayload.max_tokens).toBe(1000);
131
+ expect(calledPayload.reasoning_effort).toBeUndefined();
132
+ });
133
+
134
+ it('should handle different reasoning_effort values', async () => {
135
+ const effortValues = ['low', 'medium', 'high'] as const;
136
+
137
+ for (const effort of effortValues) {
138
+ vi.clearAllMocks();
139
+ await instance.chat({
140
+ messages: [{ content: 'Hello', role: 'user' }],
141
+ model: 'o1-preview',
142
+ reasoning_effort: effort,
143
+ } as any);
144
+
145
+ const calledPayload = (instance['client'].chat.completions.create as any).mock.calls[0][0];
146
+ expect(calledPayload.providerOptions?.openai?.reasoningEffort).toBe(effort);
147
+ }
148
+ });
149
+ });
150
+
151
+ describe('models function', () => {
152
+ it('should fetch and process models successfully', async () => {
153
+ const mockModelData: VercelAIGatewayModelCard[] = [
154
+ {
155
+ context_window: 128_000,
156
+ description: 'GPT-4o model',
157
+ id: 'gpt-4o',
158
+ max_tokens: 4096,
159
+ name: 'GPT-4o',
160
+ pricing: {
161
+ input: '0.000005',
162
+ output: '0.000015',
163
+ },
164
+ tags: ['tool-use', 'vision'],
165
+ type: 'chat',
166
+ },
167
+ {
168
+ context_window: 200_000,
169
+ id: 'claude-3-5-sonnet',
170
+ name: 'Claude 3.5 Sonnet',
171
+ pricing: {
172
+ input: 0.000_003,
173
+ input_cache_read: 0.000_000_3,
174
+ input_cache_write: 0.000_003_75,
175
+ output: 0.000_015,
176
+ },
177
+ tags: ['reasoning'],
178
+ type: 'chat',
179
+ },
180
+ ];
181
+
182
+ const mockClient = {
183
+ models: {
184
+ list: vi.fn().mockResolvedValue({ data: mockModelData }),
185
+ },
186
+ };
187
+
188
+ const models = await params.models({ client: mockClient as any });
189
+
190
+ expect(mockClient.models.list).toHaveBeenCalled();
191
+ expect(models).toBeDefined();
192
+ expect(Array.isArray(models)).toBe(true);
193
+ });
194
+
195
+ it('should handle free models with (free) suffix', async () => {
196
+ const mockModelData: VercelAIGatewayModelCard[] = [
197
+ {
198
+ id: 'free-model',
199
+ name: 'Free Model',
200
+ pricing: {
201
+ input: '0',
202
+ output: '0',
203
+ },
204
+ tags: [],
205
+ },
206
+ ];
207
+
208
+ const mockClient = {
209
+ models: {
210
+ list: vi.fn().mockResolvedValue({ data: mockModelData }),
211
+ },
212
+ };
213
+
214
+ const models = await params.models({ client: mockClient as any });
215
+ const freeModel = models.find((m) => m.id === 'free-model');
216
+
217
+ expect(freeModel).toBeDefined();
218
+ expect(freeModel?.displayName).toContain('(free)');
219
+ });
220
+
221
+ it('should handle models with numeric pricing', async () => {
222
+ const mockModelData: VercelAIGatewayModelCard[] = [
223
+ {
224
+ id: 'numeric-price-model',
225
+ pricing: {
226
+ input: 0.000_003,
227
+ output: 0.000_015,
228
+ },
229
+ tags: [],
230
+ },
231
+ ];
232
+
233
+ const mockClient = {
234
+ models: {
235
+ list: vi.fn().mockResolvedValue({ data: mockModelData }),
236
+ },
237
+ };
238
+
239
+ const models = await params.models({ client: mockClient as any });
240
+ const model = models.find((m) => m.id === 'numeric-price-model');
241
+
242
+ expect(model).toBeDefined();
243
+ expect(model?.pricing).toBeDefined();
244
+ expect(model?.pricing?.units).toBeDefined();
245
+ expect(Array.isArray(model?.pricing?.units)).toBe(true);
246
+ });
247
+
248
+ it('should handle models with missing pricing', async () => {
249
+ const mockModelData: VercelAIGatewayModelCard[] = [
250
+ {
251
+ id: 'no-pricing-model',
252
+ tags: [],
253
+ },
254
+ ];
255
+
256
+ const mockClient = {
257
+ models: {
258
+ list: vi.fn().mockResolvedValue({ data: mockModelData }),
259
+ },
260
+ };
261
+
262
+ const models = await params.models({ client: mockClient as any });
263
+ const model = models.find((m) => m.id === 'no-pricing-model');
264
+
265
+ expect(model).toBeDefined();
266
+ expect((model?.pricing as any)?.input).toBeUndefined();
267
+ expect((model?.pricing as any)?.output).toBeUndefined();
268
+ });
269
+
270
+ it('should detect function call capability from tags', async () => {
271
+ const mockModelData: VercelAIGatewayModelCard[] = [
272
+ {
273
+ id: 'tool-model',
274
+ tags: ['tool-use'],
275
+ },
276
+ {
277
+ id: 'no-tool-model',
278
+ tags: [],
279
+ },
280
+ ];
281
+
282
+ const mockClient = {
283
+ models: {
284
+ list: vi.fn().mockResolvedValue({ data: mockModelData }),
285
+ },
286
+ };
287
+
288
+ const models = await params.models({ client: mockClient as any });
289
+ const toolModel = models.find((m) => m.id === 'tool-model');
290
+ const noToolModel = models.find((m) => m.id === 'no-tool-model');
291
+
292
+ expect(toolModel?.functionCall).toBe(true);
293
+ expect(noToolModel?.functionCall).toBe(false);
294
+ });
295
+
296
+ it('should detect vision capability from tags', async () => {
297
+ const mockModelData: VercelAIGatewayModelCard[] = [
298
+ {
299
+ id: 'vision-model',
300
+ tags: ['vision'],
301
+ },
302
+ {
303
+ id: 'no-vision-model',
304
+ tags: [],
305
+ },
306
+ ];
307
+
308
+ const mockClient = {
309
+ models: {
310
+ list: vi.fn().mockResolvedValue({ data: mockModelData }),
311
+ },
312
+ };
313
+
314
+ const models = await params.models({ client: mockClient as any });
315
+ const visionModel = models.find((m) => m.id === 'vision-model');
316
+ const noVisionModel = models.find((m) => m.id === 'no-vision-model');
317
+
318
+ expect(visionModel?.vision).toBe(true);
319
+ expect(noVisionModel?.vision).toBe(false);
320
+ });
321
+
322
+ it('should detect reasoning capability from tags', async () => {
323
+ const mockModelData: VercelAIGatewayModelCard[] = [
324
+ {
325
+ id: 'reasoning-model',
326
+ tags: ['reasoning'],
327
+ },
328
+ {
329
+ id: 'no-reasoning-model',
330
+ tags: [],
331
+ },
332
+ ];
333
+
334
+ const mockClient = {
335
+ models: {
336
+ list: vi.fn().mockResolvedValue({ data: mockModelData }),
337
+ },
338
+ };
339
+
340
+ const models = await params.models({ client: mockClient as any });
341
+ const reasoningModel = models.find((m) => m.id === 'reasoning-model');
342
+ const noReasoningModel = models.find((m) => m.id === 'no-reasoning-model');
343
+
344
+ expect(reasoningModel?.reasoning).toBe(true);
345
+ expect(noReasoningModel?.reasoning).toBe(false);
346
+ });
347
+
348
+ it('should handle embedding type models', async () => {
349
+ const mockModelData: VercelAIGatewayModelCard[] = [
350
+ {
351
+ id: 'text-embedding-3-small',
352
+ tags: [],
353
+ type: 'embedding',
354
+ },
355
+ {
356
+ id: 'gpt-4o',
357
+ tags: [],
358
+ type: 'chat',
359
+ },
360
+ ];
361
+
362
+ const mockClient = {
363
+ models: {
364
+ list: vi.fn().mockResolvedValue({ data: mockModelData }),
365
+ },
366
+ };
367
+
368
+ const models = await params.models({ client: mockClient as any });
369
+ const embeddingModel = models.find((m) => m.id === 'text-embedding-3-small');
370
+ const chatModel = models.find((m) => m.id === 'gpt-4o');
371
+
372
+ expect(embeddingModel?.type).toBe('embedding');
373
+ expect(chatModel?.type).toBe('chat');
374
+ });
375
+
376
+ it('should handle models with cache pricing', async () => {
377
+ const mockModelData: VercelAIGatewayModelCard[] = [
378
+ {
379
+ id: 'cache-model',
380
+ pricing: {
381
+ input: '0.000005',
382
+ input_cache_read: '0.0000005',
383
+ input_cache_write: '0.00000625',
384
+ output: '0.000015',
385
+ },
386
+ tags: [],
387
+ },
388
+ ];
389
+
390
+ const mockClient = {
391
+ models: {
392
+ list: vi.fn().mockResolvedValue({ data: mockModelData }),
393
+ },
394
+ };
395
+
396
+ const models = await params.models({ client: mockClient as any });
397
+ const model = models.find((m) => m.id === 'cache-model');
398
+
399
+ expect(model?.pricing?.units).toBeDefined();
400
+ expect(Array.isArray(model?.pricing?.units)).toBe(true);
401
+ // Check for cache pricing units
402
+ const cacheReadUnit = model?.pricing?.units?.find(
403
+ (u: any) => u.name === 'textInput_cacheRead',
404
+ );
405
+ const cacheWriteUnit = model?.pricing?.units?.find(
406
+ (u: any) => u.name === 'textInput_cacheWrite',
407
+ );
408
+ expect(cacheReadUnit).toBeDefined();
409
+ expect(cacheWriteUnit).toBeDefined();
410
+ });
411
+
412
+ it('should handle missing model name with fallback to id', async () => {
413
+ const mockModelData: VercelAIGatewayModelCard[] = [
414
+ {
415
+ id: 'model-without-name',
416
+ tags: [],
417
+ },
418
+ ];
419
+
420
+ const mockClient = {
421
+ models: {
422
+ list: vi.fn().mockResolvedValue({ data: mockModelData }),
423
+ },
424
+ };
425
+
426
+ const models = await params.models({ client: mockClient as any });
427
+ const model = models.find((m) => m.id === 'model-without-name');
428
+
429
+ expect(model?.displayName).toBe('model-without-name');
430
+ });
431
+
432
+ it('should handle invalid tags (non-array)', async () => {
433
+ const mockModelData: VercelAIGatewayModelCard[] = [
434
+ {
435
+ id: 'invalid-tags-model',
436
+ tags: 'not-an-array' as any,
437
+ },
438
+ ];
439
+
440
+ const mockClient = {
441
+ models: {
442
+ list: vi.fn().mockResolvedValue({ data: mockModelData }),
443
+ },
444
+ };
445
+
446
+ const models = await params.models({ client: mockClient as any });
447
+ const model = models.find((m) => m.id === 'invalid-tags-model');
448
+
449
+ expect(model?.functionCall).toBe(false);
450
+ expect(model?.vision).toBe(false);
451
+ expect(model?.reasoning).toBe(false);
452
+ });
453
+
454
+ it('should handle empty model list', async () => {
455
+ const mockClient = {
456
+ models: {
457
+ list: vi.fn().mockResolvedValue({ data: [] }),
458
+ },
459
+ };
460
+
461
+ const models = await params.models({ client: mockClient as any });
462
+
463
+ expect(models).toEqual([]);
464
+ });
465
+
466
+ it('should handle null model list', async () => {
467
+ const mockClient = {
468
+ models: {
469
+ list: vi.fn().mockResolvedValue({ data: null }),
470
+ },
471
+ };
472
+
473
+ const models = await params.models({ client: mockClient as any });
474
+
475
+ expect(models).toEqual([]);
476
+ });
477
+
478
+ it('should set contextWindowTokens correctly', async () => {
479
+ const mockModelData: VercelAIGatewayModelCard[] = [
480
+ {
481
+ context_window: 128_000,
482
+ id: 'model-with-context',
483
+ tags: [],
484
+ },
485
+ {
486
+ id: 'model-without-context',
487
+ tags: [],
488
+ },
489
+ ];
490
+
491
+ const mockClient = {
492
+ models: {
493
+ list: vi.fn().mockResolvedValue({ data: mockModelData }),
494
+ },
495
+ };
496
+
497
+ const models = await params.models({ client: mockClient as any });
498
+ const withContext = models.find((m) => m.id === 'model-with-context');
499
+ const withoutContext = models.find((m) => m.id === 'model-without-context');
500
+
501
+ expect(withContext?.contextWindowTokens).toBe(128_000);
502
+ expect(withoutContext?.contextWindowTokens).toBeUndefined();
503
+ });
504
+
505
+ it('should set maxOutput correctly', async () => {
506
+ const mockModelData: VercelAIGatewayModelCard[] = [
507
+ {
508
+ id: 'model-with-max-tokens',
509
+ max_tokens: 4096,
510
+ tags: [],
511
+ },
512
+ {
513
+ id: 'model-with-string-max-tokens',
514
+ max_tokens: '8192' as any,
515
+ tags: [],
516
+ },
517
+ {
518
+ id: 'model-without-max-tokens',
519
+ tags: [],
520
+ },
521
+ ];
522
+
523
+ const mockClient = {
524
+ models: {
525
+ list: vi.fn().mockResolvedValue({ data: mockModelData }),
526
+ },
527
+ };
528
+
529
+ const models = await params.models({ client: mockClient as any });
530
+ const withMaxTokens = models.find((m) => m.id === 'model-with-max-tokens');
531
+ const withStringMaxTokens = models.find((m) => m.id === 'model-with-string-max-tokens');
532
+ const withoutMaxTokens = models.find((m) => m.id === 'model-without-max-tokens');
533
+
534
+ expect(withMaxTokens?.maxOutput).toBe(4096);
535
+ expect(withStringMaxTokens?.maxOutput).toBeUndefined();
536
+ expect(withoutMaxTokens?.maxOutput).toBeUndefined();
537
+ });
538
+
539
+ it('should handle invalid pricing values', async () => {
540
+ const mockModelData: VercelAIGatewayModelCard[] = [
541
+ {
542
+ id: 'invalid-pricing-model',
543
+ pricing: {
544
+ input: 'not-a-number',
545
+ output: 'also-not-a-number',
546
+ },
547
+ tags: [],
548
+ },
549
+ ];
550
+
551
+ const mockClient = {
552
+ models: {
553
+ list: vi.fn().mockResolvedValue({ data: mockModelData }),
554
+ },
555
+ };
556
+
557
+ const models = await params.models({ client: mockClient as any });
558
+ const model = models.find((m) => m.id === 'invalid-pricing-model');
559
+
560
+ expect((model?.pricing as any)?.input).toBeUndefined();
561
+ expect((model?.pricing as any)?.output).toBeUndefined();
562
+ });
563
+
564
+ it('should handle mixed valid and invalid pricing', async () => {
565
+ const mockModelData: VercelAIGatewayModelCard[] = [
566
+ {
567
+ id: 'mixed-pricing-model',
568
+ pricing: {
569
+ input: '0.000005',
570
+ output: 'invalid',
571
+ },
572
+ tags: [],
573
+ },
574
+ ];
575
+
576
+ const mockClient = {
577
+ models: {
578
+ list: vi.fn().mockResolvedValue({ data: mockModelData }),
579
+ },
580
+ };
581
+
582
+ const models = await params.models({ client: mockClient as any });
583
+ const model = models.find((m) => m.id === 'mixed-pricing-model');
584
+
585
+ expect(model?.pricing?.units).toBeDefined();
586
+ // Only input should have valid pricing
587
+ const inputUnit = model?.pricing?.units?.find((u: any) => u.name === 'textInput');
588
+ const outputUnit = model?.pricing?.units?.find((u: any) => u.name === 'textOutput');
589
+ expect(inputUnit).toBeDefined();
590
+ expect(outputUnit).toBeUndefined();
591
+ });
592
+
593
+ it('should handle created timestamp as number', async () => {
594
+ const mockModelData: VercelAIGatewayModelCard[] = [
595
+ {
596
+ created: 1_700_000_000,
597
+ id: 'model-with-timestamp', // Valid timestamp in 2023
598
+ tags: [],
599
+ },
600
+ ];
601
+
602
+ const mockClient = {
603
+ models: {
604
+ list: vi.fn().mockResolvedValue({ data: mockModelData }),
605
+ },
606
+ };
607
+
608
+ const models = await params.models({ client: mockClient as any });
609
+ const model = models.find((m) => m.id === 'model-with-timestamp');
610
+
611
+ // processModelCard converts created timestamp to releasedAt (date string)
612
+ expect(model?.releasedAt).toBeDefined();
613
+ expect(typeof model?.releasedAt).toBe('string');
614
+ });
615
+
616
+ it('should handle created timestamp as string', async () => {
617
+ const mockModelData: VercelAIGatewayModelCard[] = [
618
+ {
619
+ created: '2024-01-01',
620
+ id: 'model-with-string-timestamp',
621
+ tags: [],
622
+ },
623
+ ];
624
+
625
+ const mockClient = {
626
+ models: {
627
+ list: vi.fn().mockResolvedValue({ data: mockModelData }),
628
+ },
629
+ };
630
+
631
+ const models = await params.models({ client: mockClient as any });
632
+ const model = models.find((m) => m.id === 'model-with-string-timestamp');
633
+
634
+ // processModelCard converts created string to releasedAt
635
+ expect(model?.releasedAt).toBe('2024-01-01');
636
+ });
637
+
638
+ it('should handle API errors gracefully', async () => {
639
+ const mockClient = {
640
+ models: {
641
+ list: vi.fn().mockRejectedValue(new Error('API Error')),
642
+ },
643
+ };
644
+
645
+ await expect(params.models({ client: mockClient as any })).rejects.toThrow('API Error');
646
+ });
647
+ });
648
+
649
+ describe('formatPrice utility', () => {
650
+ it('should convert numeric price to per-million tokens', () => {
651
+ const result = formatPrice(0.000_005);
652
+ expect(result).toBe(5);
653
+ });
654
+
655
+ it('should convert string price to per-million tokens', () => {
656
+ const result = formatPrice('0.000003');
657
+ expect(result).toBe(3);
658
+ });
659
+
660
+ it('should handle zero price', () => {
661
+ const result = formatPrice(0);
662
+ expect(result).toBe(0);
663
+ });
664
+
665
+ it('should handle string zero price', () => {
666
+ const result = formatPrice('0');
667
+ expect(result).toBe(0);
668
+ });
669
+
670
+ it('should return undefined for undefined', () => {
671
+ const result = formatPrice(undefined);
672
+ expect(result).toBeUndefined();
673
+ });
674
+
675
+ it('should return undefined for null', () => {
676
+ const result = formatPrice(null as any);
677
+ expect(result).toBeUndefined();
678
+ });
679
+
680
+ it('should return undefined for invalid string', () => {
681
+ const result = formatPrice('not-a-number');
682
+ expect(result).toBeUndefined();
683
+ });
684
+
685
+ it('should handle empty string as zero', () => {
686
+ const result = formatPrice('');
687
+ expect(result).toBe(0);
688
+ });
689
+
690
+ it('should handle very small prices with precision', () => {
691
+ const result = formatPrice(0.000_000_1);
692
+ expect(result).toBe(0.1);
693
+ });
694
+
695
+ it('should handle large prices', () => {
696
+ const result = formatPrice(0.001);
697
+ expect(result).toBe(1000);
698
+ });
699
+
700
+ it('should use 5 significant digits precision', () => {
701
+ const result = formatPrice(0.000_012_345);
702
+ expect(result).toBe(12.345);
703
+ });
704
+
705
+ it('should handle scientific notation strings', () => {
706
+ const result = formatPrice('1.5e-5');
707
+ expect(result).toBe(15);
708
+ });
709
+ });
710
+ });