@lobehub/chat 1.136.13 → 1.137.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (190) hide show
  1. package/.cursor/rules/add-setting-env.mdc +175 -0
  2. package/.cursor/rules/db-migrations.mdc +25 -0
  3. package/.env.example +7 -0
  4. package/CHANGELOG.md +50 -0
  5. package/Dockerfile +3 -2
  6. package/Dockerfile.database +15 -3
  7. package/Dockerfile.pglite +3 -2
  8. package/changelog/v1.json +18 -0
  9. package/docs/development/database-schema.dbml +1 -0
  10. package/docs/self-hosting/advanced/feature-flags.mdx +25 -15
  11. package/docs/self-hosting/advanced/feature-flags.zh-CN.mdx +25 -15
  12. package/docs/self-hosting/environment-variables/basic.mdx +12 -0
  13. package/docs/self-hosting/environment-variables/basic.zh-CN.mdx +12 -0
  14. package/locales/ar/setting.json +8 -0
  15. package/locales/bg-BG/setting.json +8 -0
  16. package/locales/de-DE/setting.json +8 -0
  17. package/locales/en-US/setting.json +8 -0
  18. package/locales/es-ES/setting.json +8 -0
  19. package/locales/fa-IR/setting.json +8 -0
  20. package/locales/fr-FR/setting.json +8 -0
  21. package/locales/it-IT/setting.json +8 -0
  22. package/locales/ja-JP/setting.json +8 -0
  23. package/locales/ko-KR/setting.json +8 -0
  24. package/locales/nl-NL/setting.json +8 -0
  25. package/locales/pl-PL/setting.json +8 -0
  26. package/locales/pt-BR/setting.json +8 -0
  27. package/locales/ru-RU/setting.json +8 -0
  28. package/locales/tr-TR/setting.json +8 -0
  29. package/locales/vi-VN/setting.json +8 -0
  30. package/locales/zh-CN/setting.json +8 -0
  31. package/locales/zh-TW/setting.json +8 -0
  32. package/package.json +1 -1
  33. package/packages/agent-runtime/examples/tools-calling.ts +4 -3
  34. package/packages/agent-runtime/src/core/__tests__/runtime.test.ts +559 -29
  35. package/packages/agent-runtime/src/core/runtime.ts +171 -43
  36. package/packages/agent-runtime/src/types/instruction.ts +32 -6
  37. package/packages/agent-runtime/src/types/runtime.ts +2 -2
  38. package/packages/agent-runtime/src/types/state.ts +1 -8
  39. package/packages/agent-runtime/vitest.config.mts +14 -0
  40. package/packages/const/src/settings/image.ts +8 -0
  41. package/packages/const/src/settings/index.ts +3 -0
  42. package/packages/context-engine/src/__tests__/pipeline.test.ts +485 -0
  43. package/packages/context-engine/src/base/__tests__/BaseProcessor.test.ts +381 -0
  44. package/packages/context-engine/src/base/__tests__/BaseProvider.test.ts +392 -0
  45. package/packages/context-engine/src/processors/__tests__/MessageCleanup.test.ts +346 -0
  46. package/packages/context-engine/src/processors/__tests__/ToolCall.test.ts +552 -0
  47. package/packages/database/migrations/0038_add_image_user_settings.sql +1 -0
  48. package/packages/database/migrations/meta/0038_snapshot.json +7580 -0
  49. package/packages/database/migrations/meta/_journal.json +7 -0
  50. package/packages/database/src/core/migrations.json +6 -0
  51. package/packages/database/src/models/user.ts +3 -1
  52. package/packages/database/src/schemas/user.ts +1 -0
  53. package/packages/file-loaders/src/loaders/docx/index.test.ts +0 -1
  54. package/packages/file-loaders/src/loaders/excel/__snapshots__/index.test.ts.snap +30 -0
  55. package/packages/file-loaders/src/loaders/excel/index.test.ts +8 -0
  56. package/packages/file-loaders/src/loaders/pptx/index.test.ts +25 -0
  57. package/packages/file-loaders/src/utils/parser-utils.test.ts +155 -0
  58. package/packages/file-loaders/vitest.config.mts +8 -0
  59. package/packages/model-runtime/CLAUDE.md +5 -0
  60. package/packages/model-runtime/docs/test-coverage.md +706 -0
  61. package/packages/model-runtime/src/core/ModelRuntime.test.ts +231 -0
  62. package/packages/model-runtime/src/core/RouterRuntime/createRuntime.ts +1 -1
  63. package/packages/model-runtime/src/core/openaiCompatibleFactory/createImage.test.ts +799 -0
  64. package/packages/model-runtime/src/core/openaiCompatibleFactory/index.test.ts +188 -4
  65. package/packages/model-runtime/src/core/openaiCompatibleFactory/index.ts +41 -10
  66. package/packages/model-runtime/src/core/streams/openai/__snapshots__/responsesStream.test.ts.snap +439 -0
  67. package/packages/model-runtime/src/core/streams/openai/openai.test.ts +789 -0
  68. package/packages/model-runtime/src/core/streams/openai/responsesStream.test.ts +551 -0
  69. package/packages/model-runtime/src/core/usageConverters/utils/computeChatCost.test.ts +230 -0
  70. package/packages/model-runtime/src/core/usageConverters/utils/computeImageCost.test.ts +334 -37
  71. package/packages/model-runtime/src/providerTestUtils.ts +148 -145
  72. package/packages/model-runtime/src/providers/ai302/index.test.ts +60 -0
  73. package/packages/model-runtime/src/providers/ai302/index.ts +9 -4
  74. package/packages/model-runtime/src/providers/ai360/index.test.ts +1213 -1
  75. package/packages/model-runtime/src/providers/ai360/index.ts +9 -4
  76. package/packages/model-runtime/src/providers/aihubmix/index.test.ts +73 -0
  77. package/packages/model-runtime/src/providers/aihubmix/index.ts +6 -9
  78. package/packages/model-runtime/src/providers/akashchat/index.test.ts +433 -3
  79. package/packages/model-runtime/src/providers/akashchat/index.ts +12 -7
  80. package/packages/model-runtime/src/providers/anthropic/generateObject.test.ts +183 -29
  81. package/packages/model-runtime/src/providers/anthropic/generateObject.ts +40 -24
  82. package/packages/model-runtime/src/providers/azureai/index.test.ts +102 -0
  83. package/packages/model-runtime/src/providers/baichuan/index.test.ts +416 -26
  84. package/packages/model-runtime/src/providers/baichuan/index.ts +23 -20
  85. package/packages/model-runtime/src/providers/bedrock/index.test.ts +420 -2
  86. package/packages/model-runtime/src/providers/cerebras/index.test.ts +465 -0
  87. package/packages/model-runtime/src/providers/cerebras/index.ts +8 -3
  88. package/packages/model-runtime/src/providers/cohere/index.test.ts +1074 -1
  89. package/packages/model-runtime/src/providers/cohere/index.ts +8 -3
  90. package/packages/model-runtime/src/providers/cometapi/index.test.ts +439 -3
  91. package/packages/model-runtime/src/providers/cometapi/index.ts +8 -3
  92. package/packages/model-runtime/src/providers/deepseek/index.test.ts +116 -1
  93. package/packages/model-runtime/src/providers/deepseek/index.ts +8 -3
  94. package/packages/model-runtime/src/providers/fireworksai/index.test.ts +264 -3
  95. package/packages/model-runtime/src/providers/fireworksai/index.ts +8 -3
  96. package/packages/model-runtime/src/providers/giteeai/index.test.ts +325 -3
  97. package/packages/model-runtime/src/providers/giteeai/index.ts +23 -6
  98. package/packages/model-runtime/src/providers/github/index.test.ts +532 -3
  99. package/packages/model-runtime/src/providers/github/index.ts +8 -3
  100. package/packages/model-runtime/src/providers/groq/index.test.ts +344 -31
  101. package/packages/model-runtime/src/providers/groq/index.ts +8 -3
  102. package/packages/model-runtime/src/providers/higress/index.test.ts +142 -0
  103. package/packages/model-runtime/src/providers/higress/index.ts +8 -3
  104. package/packages/model-runtime/src/providers/huggingface/index.test.ts +612 -1
  105. package/packages/model-runtime/src/providers/huggingface/index.ts +9 -4
  106. package/packages/model-runtime/src/providers/hunyuan/index.test.ts +365 -1
  107. package/packages/model-runtime/src/providers/hunyuan/index.ts +9 -3
  108. package/packages/model-runtime/src/providers/infiniai/index.test.ts +71 -0
  109. package/packages/model-runtime/src/providers/internlm/index.test.ts +369 -2
  110. package/packages/model-runtime/src/providers/internlm/index.ts +10 -5
  111. package/packages/model-runtime/src/providers/jina/index.test.ts +164 -3
  112. package/packages/model-runtime/src/providers/jina/index.ts +8 -3
  113. package/packages/model-runtime/src/providers/lmstudio/index.test.ts +182 -3
  114. package/packages/model-runtime/src/providers/lmstudio/index.ts +8 -3
  115. package/packages/model-runtime/src/providers/mistral/index.test.ts +779 -27
  116. package/packages/model-runtime/src/providers/mistral/index.ts +8 -3
  117. package/packages/model-runtime/src/providers/modelscope/index.test.ts +232 -1
  118. package/packages/model-runtime/src/providers/modelscope/index.ts +8 -3
  119. package/packages/model-runtime/src/providers/moonshot/index.test.ts +489 -2
  120. package/packages/model-runtime/src/providers/moonshot/index.ts +8 -3
  121. package/packages/model-runtime/src/providers/nebius/index.test.ts +381 -3
  122. package/packages/model-runtime/src/providers/nebius/index.ts +8 -3
  123. package/packages/model-runtime/src/providers/newapi/index.test.ts +667 -3
  124. package/packages/model-runtime/src/providers/newapi/index.ts +6 -3
  125. package/packages/model-runtime/src/providers/nvidia/index.test.ts +168 -1
  126. package/packages/model-runtime/src/providers/nvidia/index.ts +12 -7
  127. package/packages/model-runtime/src/providers/ollama/index.test.ts +797 -1
  128. package/packages/model-runtime/src/providers/ollama/index.ts +8 -0
  129. package/packages/model-runtime/src/providers/ollamacloud/index.test.ts +411 -0
  130. package/packages/model-runtime/src/providers/ollamacloud/index.ts +8 -3
  131. package/packages/model-runtime/src/providers/openai/index.test.ts +171 -2
  132. package/packages/model-runtime/src/providers/openai/index.ts +8 -3
  133. package/packages/model-runtime/src/providers/openrouter/index.test.ts +1647 -95
  134. package/packages/model-runtime/src/providers/openrouter/index.ts +12 -7
  135. package/packages/model-runtime/src/providers/qiniu/index.test.ts +294 -1
  136. package/packages/model-runtime/src/providers/qiniu/index.ts +8 -3
  137. package/packages/model-runtime/src/providers/search1api/index.test.ts +1131 -11
  138. package/packages/model-runtime/src/providers/search1api/index.ts +10 -4
  139. package/packages/model-runtime/src/providers/sensenova/index.test.ts +1069 -1
  140. package/packages/model-runtime/src/providers/sensenova/index.ts +8 -3
  141. package/packages/model-runtime/src/providers/siliconcloud/index.test.ts +196 -0
  142. package/packages/model-runtime/src/providers/siliconcloud/index.ts +8 -3
  143. package/packages/model-runtime/src/providers/spark/index.test.ts +293 -1
  144. package/packages/model-runtime/src/providers/spark/index.ts +8 -3
  145. package/packages/model-runtime/src/providers/stepfun/index.test.ts +322 -3
  146. package/packages/model-runtime/src/providers/stepfun/index.ts +8 -3
  147. package/packages/model-runtime/src/providers/tencentcloud/index.test.ts +182 -3
  148. package/packages/model-runtime/src/providers/tencentcloud/index.ts +8 -3
  149. package/packages/model-runtime/src/providers/togetherai/index.test.ts +359 -4
  150. package/packages/model-runtime/src/providers/togetherai/index.ts +12 -5
  151. package/packages/model-runtime/src/providers/v0/index.test.ts +341 -0
  152. package/packages/model-runtime/src/providers/v0/index.ts +20 -6
  153. package/packages/model-runtime/src/providers/vercelaigateway/index.test.ts +710 -0
  154. package/packages/model-runtime/src/providers/vercelaigateway/index.ts +19 -13
  155. package/packages/model-runtime/src/providers/vllm/index.test.ts +45 -1
  156. package/packages/model-runtime/src/providers/volcengine/index.test.ts +75 -0
  157. package/packages/model-runtime/src/providers/wenxin/index.test.ts +144 -1
  158. package/packages/model-runtime/src/providers/wenxin/index.ts +8 -3
  159. package/packages/model-runtime/src/providers/xai/index.test.ts +105 -1
  160. package/packages/model-runtime/src/providers/xinference/index.test.ts +70 -1
  161. package/packages/model-runtime/src/providers/zeroone/index.test.ts +327 -3
  162. package/packages/model-runtime/src/providers/zeroone/index.ts +23 -6
  163. package/packages/model-runtime/src/providers/zhipu/index.test.ts +908 -236
  164. package/packages/model-runtime/src/providers/zhipu/index.ts +8 -3
  165. package/packages/model-runtime/src/types/structureOutput.ts +5 -1
  166. package/packages/model-runtime/vitest.config.mts +7 -1
  167. package/packages/types/src/aiChat.ts +20 -2
  168. package/packages/types/src/serverConfig.ts +7 -1
  169. package/packages/types/src/tool/index.ts +1 -0
  170. package/packages/types/src/tool/tool.ts +33 -0
  171. package/packages/types/src/user/settings/image.ts +3 -0
  172. package/packages/types/src/user/settings/index.ts +3 -0
  173. package/src/app/[variants]/(main)/settings/_layout/SettingsContent.tsx +3 -0
  174. package/src/app/[variants]/(main)/settings/hooks/useCategory.tsx +8 -3
  175. package/src/app/[variants]/(main)/settings/image/index.tsx +74 -0
  176. package/src/components/FormInput/FormSliderWithInput.tsx +40 -0
  177. package/src/components/FormInput/index.ts +1 -0
  178. package/src/envs/image.ts +27 -0
  179. package/src/features/Conversation/Messages/Assistant/index.tsx +1 -1
  180. package/src/features/Conversation/Messages/User/index.tsx +2 -2
  181. package/src/hooks/useFetchAiImageConfig.ts +12 -17
  182. package/src/locales/default/setting.ts +8 -0
  183. package/src/server/globalConfig/index.ts +5 -0
  184. package/src/server/routers/lambda/aiChat.ts +2 -0
  185. package/src/store/global/initialState.ts +1 -0
  186. package/src/store/image/slices/generationConfig/action.test.ts +17 -0
  187. package/src/store/image/slices/generationConfig/action.ts +18 -21
  188. package/src/store/image/slices/generationConfig/initialState.ts +3 -2
  189. package/src/store/user/slices/common/action.ts +1 -0
  190. package/src/store/user/slices/settings/selectors/settings.ts +3 -0
@@ -1,19 +1,1139 @@
1
1
  // @vitest-environment node
2
- import { ModelProvider } from 'model-bank';
2
+ import { LobeOpenAICompatibleRuntime } from '@lobechat/model-runtime';
3
+ import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
3
4
 
4
5
  import { testProvider } from '../../providerTestUtils';
5
- import { LobeSearch1API } from './index';
6
-
7
- const provider = ModelProvider.Search1API;
8
- const defaultBaseURL = 'https://api.search1api.com/v1';
6
+ import { LobeSearch1API, params } from './index';
9
7
 
10
8
  testProvider({
9
+ provider: 'search1api',
10
+ defaultBaseURL: 'https://api.search1api.com/v1',
11
+ chatModel: 'gpt-4o-mini',
11
12
  Runtime: LobeSearch1API,
12
- provider,
13
- defaultBaseURL,
14
13
  chatDebugEnv: 'DEBUG_SEARCH1API_CHAT_COMPLETION',
15
- chatModel: 'gpt-4o-mini',
16
- test: {
17
- skipAPICall: true,
18
- },
14
+ });
15
+
16
+ // Mock the console.error to avoid polluting test output
17
+ vi.spyOn(console, 'error').mockImplementation(() => {});
18
+
19
+ let instance: LobeOpenAICompatibleRuntime;
20
+
21
+ beforeEach(() => {
22
+ instance = new LobeSearch1API({ apiKey: 'test' });
23
+
24
+ // Use vi.spyOn to mock chat.completions.create method
25
+ vi.spyOn(instance['client'].chat.completions, 'create').mockResolvedValue(
26
+ new ReadableStream() as any,
27
+ );
28
+ });
29
+
30
+ afterEach(() => {
31
+ vi.clearAllMocks();
32
+ });
33
+
34
+ describe('LobeSearch1API - custom features', () => {
35
+ describe('Debug Configuration', () => {
36
+ it('should disable debug by default', () => {
37
+ delete process.env.DEBUG_SEARCH1API_CHAT_COMPLETION;
38
+ const result = params.debug.chatCompletion();
39
+ expect(result).toBe(false);
40
+ });
41
+
42
+ it('should enable debug when env is set', () => {
43
+ process.env.DEBUG_SEARCH1API_CHAT_COMPLETION = '1';
44
+ const result = params.debug.chatCompletion();
45
+ expect(result).toBe(true);
46
+ delete process.env.DEBUG_SEARCH1API_CHAT_COMPLETION;
47
+ });
48
+ });
49
+
50
+ describe('handlePayload', () => {
51
+ describe('presence_penalty handling', () => {
52
+ it('should use presence_penalty when it is non-zero', async () => {
53
+ await instance.chat({
54
+ messages: [{ content: 'Hello', role: 'user' }],
55
+ model: 'gpt-4o-mini',
56
+ presence_penalty: 0.5,
57
+ });
58
+
59
+ expect(instance['client'].chat.completions.create).toHaveBeenCalledWith(
60
+ expect.objectContaining({
61
+ presence_penalty: 0.5,
62
+ }),
63
+ expect.anything(),
64
+ );
65
+ });
66
+
67
+ it('should use presence_penalty when it is negative', async () => {
68
+ await instance.chat({
69
+ messages: [{ content: 'Hello', role: 'user' }],
70
+ model: 'gpt-4o-mini',
71
+ presence_penalty: -0.5,
72
+ });
73
+
74
+ expect(instance['client'].chat.completions.create).toHaveBeenCalledWith(
75
+ expect.objectContaining({
76
+ presence_penalty: -0.5,
77
+ }),
78
+ expect.anything(),
79
+ );
80
+ });
81
+
82
+ it('should use presence_penalty when it is 1', async () => {
83
+ await instance.chat({
84
+ messages: [{ content: 'Hello', role: 'user' }],
85
+ model: 'gpt-4o-mini',
86
+ presence_penalty: 1,
87
+ });
88
+
89
+ expect(instance['client'].chat.completions.create).toHaveBeenCalledWith(
90
+ expect.objectContaining({
91
+ presence_penalty: 1,
92
+ }),
93
+ expect.anything(),
94
+ );
95
+ });
96
+
97
+ it('should not include frequency_penalty when presence_penalty is non-zero', async () => {
98
+ await instance.chat({
99
+ messages: [{ content: 'Hello', role: 'user' }],
100
+ model: 'gpt-4o-mini',
101
+ presence_penalty: 0.5,
102
+ frequency_penalty: 0.8,
103
+ });
104
+
105
+ const call = (instance['client'].chat.completions.create as any).mock.calls[0][0];
106
+ expect(call.presence_penalty).toBe(0.5);
107
+ expect(call.frequency_penalty).toBeUndefined();
108
+ });
109
+ });
110
+
111
+ describe('frequency_penalty handling', () => {
112
+ it('should use frequency_penalty when presence_penalty is 0', async () => {
113
+ await instance.chat({
114
+ messages: [{ content: 'Hello', role: 'user' }],
115
+ model: 'gpt-4o-mini',
116
+ presence_penalty: 0,
117
+ frequency_penalty: 0.8,
118
+ });
119
+
120
+ expect(instance['client'].chat.completions.create).toHaveBeenCalledWith(
121
+ expect.objectContaining({
122
+ frequency_penalty: 0.8,
123
+ }),
124
+ expect.anything(),
125
+ );
126
+ });
127
+
128
+ it('should use default frequency_penalty of 1 when presence_penalty is 0 and frequency_penalty is not provided', async () => {
129
+ await instance.chat({
130
+ messages: [{ content: 'Hello', role: 'user' }],
131
+ model: 'gpt-4o-mini',
132
+ presence_penalty: 0,
133
+ });
134
+
135
+ expect(instance['client'].chat.completions.create).toHaveBeenCalledWith(
136
+ expect.objectContaining({
137
+ frequency_penalty: 1,
138
+ }),
139
+ expect.anything(),
140
+ );
141
+ });
142
+
143
+ it('should use default frequency_penalty of 1 when presence_penalty is undefined and frequency_penalty is not provided', async () => {
144
+ await instance.chat({
145
+ messages: [{ content: 'Hello', role: 'user' }],
146
+ model: 'gpt-4o-mini',
147
+ });
148
+
149
+ const call = (instance['client'].chat.completions.create as any).mock.calls[0][0];
150
+ // presence_penalty is undefined (not 0), so no frequency_penalty is set
151
+ expect(call.presence_penalty).toBeUndefined();
152
+ });
153
+
154
+ it('should use frequency_penalty of 0 when explicitly set with presence_penalty 0', async () => {
155
+ await instance.chat({
156
+ messages: [{ content: 'Hello', role: 'user' }],
157
+ model: 'gpt-4o-mini',
158
+ presence_penalty: 0,
159
+ frequency_penalty: 0,
160
+ });
161
+
162
+ expect(instance['client'].chat.completions.create).toHaveBeenCalledWith(
163
+ expect.objectContaining({
164
+ frequency_penalty: 1, // 0 is falsy, so default to 1
165
+ }),
166
+ expect.anything(),
167
+ );
168
+ });
169
+
170
+ it('should not include presence_penalty when presence_penalty is 0', async () => {
171
+ await instance.chat({
172
+ messages: [{ content: 'Hello', role: 'user' }],
173
+ model: 'gpt-4o-mini',
174
+ presence_penalty: 0,
175
+ frequency_penalty: 0.8,
176
+ });
177
+
178
+ const call = (instance['client'].chat.completions.create as any).mock.calls[0][0];
179
+ expect(call.frequency_penalty).toBe(0.8);
180
+ expect(call.presence_penalty).toBeUndefined();
181
+ });
182
+ });
183
+
184
+ describe('temperature handling', () => {
185
+ it('should preserve temperature when it is less than 2', async () => {
186
+ await instance.chat({
187
+ messages: [{ content: 'Hello', role: 'user' }],
188
+ model: 'gpt-4o-mini',
189
+ temperature: 0.7,
190
+ });
191
+
192
+ expect(instance['client'].chat.completions.create).toHaveBeenCalledWith(
193
+ expect.objectContaining({
194
+ temperature: 0.7,
195
+ }),
196
+ expect.anything(),
197
+ );
198
+ });
199
+
200
+ it('should preserve temperature when it is 0', async () => {
201
+ await instance.chat({
202
+ messages: [{ content: 'Hello', role: 'user' }],
203
+ model: 'gpt-4o-mini',
204
+ temperature: 0,
205
+ });
206
+
207
+ expect(instance['client'].chat.completions.create).toHaveBeenCalledWith(
208
+ expect.objectContaining({
209
+ temperature: 0,
210
+ }),
211
+ expect.anything(),
212
+ );
213
+ });
214
+
215
+ it('should preserve temperature when it is 1', async () => {
216
+ await instance.chat({
217
+ messages: [{ content: 'Hello', role: 'user' }],
218
+ model: 'gpt-4o-mini',
219
+ temperature: 1,
220
+ });
221
+
222
+ expect(instance['client'].chat.completions.create).toHaveBeenCalledWith(
223
+ expect.objectContaining({
224
+ temperature: 1,
225
+ }),
226
+ expect.anything(),
227
+ );
228
+ });
229
+
230
+ it('should preserve temperature when it is 1.99', async () => {
231
+ await instance.chat({
232
+ messages: [{ content: 'Hello', role: 'user' }],
233
+ model: 'gpt-4o-mini',
234
+ temperature: 1.99,
235
+ });
236
+
237
+ expect(instance['client'].chat.completions.create).toHaveBeenCalledWith(
238
+ expect.objectContaining({
239
+ temperature: 1.99,
240
+ }),
241
+ expect.anything(),
242
+ );
243
+ });
244
+
245
+ it('should set temperature to undefined when it is 2', async () => {
246
+ await instance.chat({
247
+ messages: [{ content: 'Hello', role: 'user' }],
248
+ model: 'gpt-4o-mini',
249
+ temperature: 2,
250
+ });
251
+
252
+ expect(instance['client'].chat.completions.create).toHaveBeenCalledWith(
253
+ expect.objectContaining({
254
+ temperature: undefined,
255
+ }),
256
+ expect.anything(),
257
+ );
258
+ });
259
+
260
+ it('should set temperature to undefined when it is greater than 2', async () => {
261
+ await instance.chat({
262
+ messages: [{ content: 'Hello', role: 'user' }],
263
+ model: 'gpt-4o-mini',
264
+ temperature: 2.5,
265
+ });
266
+
267
+ expect(instance['client'].chat.completions.create).toHaveBeenCalledWith(
268
+ expect.objectContaining({
269
+ temperature: undefined,
270
+ }),
271
+ expect.anything(),
272
+ );
273
+ });
274
+
275
+ it('should set temperature to undefined when it is not provided', async () => {
276
+ await instance.chat({
277
+ messages: [{ content: 'Hello', role: 'user' }],
278
+ model: 'gpt-4o-mini',
279
+ });
280
+
281
+ const call = (instance['client'].chat.completions.create as any).mock.calls[0][0];
282
+ expect(call.temperature).toBeUndefined();
283
+ });
284
+ });
285
+
286
+ describe('stream handling', () => {
287
+ it('should default stream to true when not specified', async () => {
288
+ await instance.chat({
289
+ messages: [{ content: 'Hello', role: 'user' }],
290
+ model: 'gpt-4o-mini',
291
+ });
292
+
293
+ expect(instance['client'].chat.completions.create).toHaveBeenCalledWith(
294
+ expect.objectContaining({
295
+ stream: true,
296
+ }),
297
+ expect.anything(),
298
+ );
299
+ });
300
+
301
+ it('should preserve stream value when explicitly set to false', async () => {
302
+ await instance.chat({
303
+ messages: [{ content: 'Hello', role: 'user' }],
304
+ model: 'gpt-4o-mini',
305
+ stream: false,
306
+ });
307
+
308
+ expect(instance['client'].chat.completions.create).toHaveBeenCalledWith(
309
+ expect.objectContaining({
310
+ stream: false,
311
+ }),
312
+ expect.anything(),
313
+ );
314
+ });
315
+
316
+ it('should preserve stream value when explicitly set to true', async () => {
317
+ await instance.chat({
318
+ messages: [{ content: 'Hello', role: 'user' }],
319
+ model: 'gpt-4o-mini',
320
+ stream: true,
321
+ });
322
+
323
+ expect(instance['client'].chat.completions.create).toHaveBeenCalledWith(
324
+ expect.objectContaining({
325
+ stream: true,
326
+ }),
327
+ expect.anything(),
328
+ );
329
+ });
330
+ });
331
+
332
+ describe('other properties preservation', () => {
333
+ it('should preserve other payload properties', async () => {
334
+ await instance.chat({
335
+ messages: [{ content: 'Hello', role: 'user' }],
336
+ model: 'gpt-4o-mini',
337
+ max_tokens: 100,
338
+ top_p: 0.9,
339
+ });
340
+
341
+ expect(instance['client'].chat.completions.create).toHaveBeenCalledWith(
342
+ expect.objectContaining({
343
+ messages: [{ content: 'Hello', role: 'user' }],
344
+ model: 'gpt-4o-mini',
345
+ max_tokens: 100,
346
+ top_p: 0.9,
347
+ }),
348
+ expect.anything(),
349
+ );
350
+ });
351
+
352
+ it('should preserve tools in payload', async () => {
353
+ await instance.chat({
354
+ messages: [{ content: 'Hello', role: 'user' }],
355
+ model: 'gpt-4o-mini',
356
+ tools: [
357
+ {
358
+ type: 'function' as const,
359
+ function: { name: 'tool1', description: '', parameters: {} },
360
+ },
361
+ ],
362
+ });
363
+
364
+ expect(instance['client'].chat.completions.create).toHaveBeenCalledWith(
365
+ expect.objectContaining({
366
+ tools: [
367
+ {
368
+ type: 'function' as const,
369
+ function: { name: 'tool1', description: '', parameters: {} },
370
+ },
371
+ ],
372
+ }),
373
+ expect.anything(),
374
+ );
375
+ });
376
+
377
+ it('should preserve messages with multiple roles', async () => {
378
+ const messages = [
379
+ { content: 'Hello', role: 'user' as const },
380
+ { content: 'Hi there', role: 'assistant' as const },
381
+ { content: 'How are you?', role: 'user' as const },
382
+ ];
383
+
384
+ await instance.chat({
385
+ messages,
386
+ model: 'gpt-4o-mini',
387
+ });
388
+
389
+ expect(instance['client'].chat.completions.create).toHaveBeenCalledWith(
390
+ expect.objectContaining({
391
+ messages,
392
+ }),
393
+ expect.anything(),
394
+ );
395
+ });
396
+ });
397
+
398
+ describe('combined parameter scenarios', () => {
399
+ it('should handle presence_penalty with temperature < 2', async () => {
400
+ await instance.chat({
401
+ messages: [{ content: 'Hello', role: 'user' }],
402
+ model: 'gpt-4o-mini',
403
+ presence_penalty: 0.5,
404
+ temperature: 0.8,
405
+ });
406
+
407
+ expect(instance['client'].chat.completions.create).toHaveBeenCalledWith(
408
+ expect.objectContaining({
409
+ presence_penalty: 0.5,
410
+ temperature: 0.8,
411
+ }),
412
+ expect.anything(),
413
+ );
414
+ });
415
+
416
+ it('should handle presence_penalty with temperature >= 2', async () => {
417
+ await instance.chat({
418
+ messages: [{ content: 'Hello', role: 'user' }],
419
+ model: 'gpt-4o-mini',
420
+ presence_penalty: 0.5,
421
+ temperature: 2.5,
422
+ });
423
+
424
+ expect(instance['client'].chat.completions.create).toHaveBeenCalledWith(
425
+ expect.objectContaining({
426
+ presence_penalty: 0.5,
427
+ temperature: undefined,
428
+ }),
429
+ expect.anything(),
430
+ );
431
+ });
432
+
433
+ it('should handle frequency_penalty with temperature < 2', async () => {
434
+ await instance.chat({
435
+ messages: [{ content: 'Hello', role: 'user' }],
436
+ model: 'gpt-4o-mini',
437
+ presence_penalty: 0,
438
+ frequency_penalty: 0.8,
439
+ temperature: 0.7,
440
+ });
441
+
442
+ expect(instance['client'].chat.completions.create).toHaveBeenCalledWith(
443
+ expect.objectContaining({
444
+ frequency_penalty: 0.8,
445
+ temperature: 0.7,
446
+ }),
447
+ expect.anything(),
448
+ );
449
+ });
450
+
451
+ it('should handle all parameters together', async () => {
452
+ await instance.chat({
453
+ messages: [{ content: 'Hello', role: 'user' }],
454
+ model: 'gpt-4o-mini',
455
+ presence_penalty: 0.5,
456
+ frequency_penalty: 0.8, // Should be ignored
457
+ temperature: 1.5,
458
+ max_tokens: 200,
459
+ top_p: 0.95,
460
+ stream: false,
461
+ });
462
+
463
+ const call = (instance['client'].chat.completions.create as any).mock.calls[0][0];
464
+ expect(call.presence_penalty).toBe(0.5);
465
+ expect(call.frequency_penalty).toBeUndefined();
466
+ expect(call.temperature).toBe(1.5);
467
+ expect(call.max_tokens).toBe(200);
468
+ expect(call.top_p).toBe(0.95);
469
+ expect(call.stream).toBe(false);
470
+ });
471
+ });
472
+ });
473
+
474
+ describe('handlePayload edge cases', () => {
475
+ it('should handle negative temperature values', async () => {
476
+ await instance.chat({
477
+ messages: [{ content: 'Hello', role: 'user' }],
478
+ model: 'gpt-4o-mini',
479
+ temperature: -1,
480
+ });
481
+
482
+ expect(instance['client'].chat.completions.create).toHaveBeenCalledWith(
483
+ expect.objectContaining({
484
+ temperature: -1,
485
+ }),
486
+ expect.anything(),
487
+ );
488
+ });
489
+
490
+ it('should handle very large temperature values', async () => {
491
+ await instance.chat({
492
+ messages: [{ content: 'Hello', role: 'user' }],
493
+ model: 'gpt-4o-mini',
494
+ temperature: 100,
495
+ });
496
+
497
+ expect(instance['client'].chat.completions.create).toHaveBeenCalledWith(
498
+ expect.objectContaining({
499
+ temperature: undefined,
500
+ }),
501
+ expect.anything(),
502
+ );
503
+ });
504
+
505
+ it('should handle edge case temperature exactly at 2', async () => {
506
+ await instance.chat({
507
+ messages: [{ content: 'Hello', role: 'user' }],
508
+ model: 'gpt-4o-mini',
509
+ temperature: 2.0,
510
+ });
511
+
512
+ expect(instance['client'].chat.completions.create).toHaveBeenCalledWith(
513
+ expect.objectContaining({
514
+ temperature: undefined,
515
+ }),
516
+ expect.anything(),
517
+ );
518
+ });
519
+
520
+ it('should handle negative presence_penalty', async () => {
521
+ await instance.chat({
522
+ messages: [{ content: 'Hello', role: 'user' }],
523
+ model: 'gpt-4o-mini',
524
+ presence_penalty: -2,
525
+ });
526
+
527
+ expect(instance['client'].chat.completions.create).toHaveBeenCalledWith(
528
+ expect.objectContaining({
529
+ presence_penalty: -2,
530
+ }),
531
+ expect.anything(),
532
+ );
533
+ });
534
+
535
+ it('should handle negative frequency_penalty', async () => {
536
+ await instance.chat({
537
+ messages: [{ content: 'Hello', role: 'user' }],
538
+ model: 'gpt-4o-mini',
539
+ presence_penalty: 0,
540
+ frequency_penalty: -1,
541
+ });
542
+
543
+ expect(instance['client'].chat.completions.create).toHaveBeenCalledWith(
544
+ expect.objectContaining({
545
+ frequency_penalty: -1,
546
+ }),
547
+ expect.anything(),
548
+ );
549
+ });
550
+
551
+ it('should handle very small positive presence_penalty', async () => {
552
+ await instance.chat({
553
+ messages: [{ content: 'Hello', role: 'user' }],
554
+ model: 'gpt-4o-mini',
555
+ presence_penalty: 0.001,
556
+ });
557
+
558
+ expect(instance['client'].chat.completions.create).toHaveBeenCalledWith(
559
+ expect.objectContaining({
560
+ presence_penalty: 0.001,
561
+ }),
562
+ expect.anything(),
563
+ );
564
+ });
565
+
566
+ it('should handle empty messages array', async () => {
567
+ await instance.chat({
568
+ messages: [],
569
+ model: 'gpt-4o-mini',
570
+ });
571
+
572
+ expect(instance['client'].chat.completions.create).toHaveBeenCalledWith(
573
+ expect.objectContaining({
574
+ messages: [],
575
+ }),
576
+ expect.anything(),
577
+ );
578
+ });
579
+
580
+ it('should handle system messages', async () => {
581
+ await instance.chat({
582
+ messages: [
583
+ { content: 'You are a helpful assistant', role: 'system' },
584
+ { content: 'Hello', role: 'user' },
585
+ ],
586
+ model: 'gpt-4o-mini',
587
+ });
588
+
589
+ expect(instance['client'].chat.completions.create).toHaveBeenCalledWith(
590
+ expect.objectContaining({
591
+ messages: [
592
+ { content: 'You are a helpful assistant', role: 'system' },
593
+ { content: 'Hello', role: 'user' },
594
+ ],
595
+ }),
596
+ expect.anything(),
597
+ );
598
+ });
599
+
600
+ it('should handle response_format parameter', async () => {
601
+ await instance.chat({
602
+ messages: [{ content: 'Hello', role: 'user' }],
603
+ model: 'gpt-4o-mini',
604
+ response_format: { type: 'json_object' },
605
+ });
606
+
607
+ expect(instance['client'].chat.completions.create).toHaveBeenCalledWith(
608
+ expect.objectContaining({
609
+ response_format: { type: 'json_object' },
610
+ }),
611
+ expect.anything(),
612
+ );
613
+ });
614
+
615
+ it('should handle seed parameter', async () => {
616
+ await instance.chat({
617
+ messages: [{ content: 'Hello', role: 'user' }],
618
+ model: 'gpt-4o-mini',
619
+ seed: 12345,
620
+ } as any);
621
+
622
+ expect(instance['client'].chat.completions.create).toHaveBeenCalledWith(
623
+ expect.objectContaining({
624
+ seed: 12345,
625
+ }),
626
+ expect.anything(),
627
+ );
628
+ });
629
+
630
+ it('should handle stop parameter as string', async () => {
631
+ await instance.chat({
632
+ messages: [{ content: 'Hello', role: 'user' }],
633
+ model: 'gpt-4o-mini',
634
+ stop: 'STOP',
635
+ } as any);
636
+
637
+ expect(instance['client'].chat.completions.create).toHaveBeenCalledWith(
638
+ expect.objectContaining({
639
+ stop: 'STOP',
640
+ }),
641
+ expect.anything(),
642
+ );
643
+ });
644
+
645
+ it('should handle stop parameter as array', async () => {
646
+ await instance.chat({
647
+ messages: [{ content: 'Hello', role: 'user' }],
648
+ model: 'gpt-4o-mini',
649
+ stop: ['STOP', 'END'],
650
+ } as any);
651
+
652
+ expect(instance['client'].chat.completions.create).toHaveBeenCalledWith(
653
+ expect.objectContaining({
654
+ stop: ['STOP', 'END'],
655
+ }),
656
+ expect.anything(),
657
+ );
658
+ });
659
+
660
+ it('should handle logit_bias parameter', async () => {
661
+ await instance.chat({
662
+ messages: [{ content: 'Hello', role: 'user' }],
663
+ model: 'gpt-4o-mini',
664
+ logit_bias: { '50256': -100 },
665
+ } as any);
666
+
667
+ expect(instance['client'].chat.completions.create).toHaveBeenCalledWith(
668
+ expect.objectContaining({
669
+ logit_bias: { '50256': -100 },
670
+ }),
671
+ expect.anything(),
672
+ );
673
+ });
674
+
675
+ it('should handle tool_choice parameter', async () => {
676
+ await instance.chat({
677
+ messages: [{ content: 'Hello', role: 'user' }],
678
+ model: 'gpt-4o-mini',
679
+ tools: [
680
+ {
681
+ type: 'function' as const,
682
+ function: { name: 'get_weather', description: '', parameters: {} },
683
+ },
684
+ ],
685
+ tool_choice: 'auto',
686
+ });
687
+
688
+ expect(instance['client'].chat.completions.create).toHaveBeenCalledWith(
689
+ expect.objectContaining({
690
+ tool_choice: 'auto',
691
+ }),
692
+ expect.anything(),
693
+ );
694
+ });
695
+
696
+ it('should handle parallel_tool_calls parameter', async () => {
697
+ await instance.chat({
698
+ messages: [{ content: 'Hello', role: 'user' }],
699
+ model: 'gpt-4o-mini',
700
+ tools: [
701
+ {
702
+ type: 'function' as const,
703
+ function: { name: 'tool1', description: '', parameters: {} },
704
+ },
705
+ ],
706
+ parallel_tool_calls: false,
707
+ } as any);
708
+
709
+ expect(instance['client'].chat.completions.create).toHaveBeenCalledWith(
710
+ expect.objectContaining({
711
+ parallel_tool_calls: false,
712
+ }),
713
+ expect.anything(),
714
+ );
715
+ });
716
+ });
717
+
718
+ describe('models', () => {
719
+ const mockClient = {
720
+ models: {
721
+ list: vi.fn(),
722
+ },
723
+ };
724
+
725
+ beforeEach(() => {
726
+ vi.clearAllMocks();
727
+ });
728
+
729
+ it('should fetch and process models from API', async () => {
730
+ mockClient.models.list.mockResolvedValue({
731
+ data: [{ id: 'gpt-4o-mini' }, { id: 'gpt-4o' }, { id: 'claude-3-5-sonnet-20241022' }],
732
+ });
733
+
734
+ const models = await params.models({ client: mockClient as any });
735
+
736
+ expect(models).toHaveLength(3);
737
+ expect(models[0]).toMatchObject({
738
+ id: 'gpt-4o-mini',
739
+ });
740
+ expect(models[1]).toMatchObject({
741
+ id: 'gpt-4o',
742
+ });
743
+ expect(models[2]).toMatchObject({
744
+ id: 'claude-3-5-sonnet-20241022',
745
+ });
746
+ });
747
+
748
+ it('should merge with known model list for all properties', async () => {
749
+ mockClient.models.list.mockResolvedValue({
750
+ data: [{ id: 'gpt-4o-mini' }],
751
+ });
752
+
753
+ const models = await params.models({ client: mockClient as any });
754
+
755
+ expect(models).toHaveLength(1);
756
+ // Should have properties from LOBE_DEFAULT_MODEL_LIST
757
+ expect(models[0].displayName).toBeDefined();
758
+ expect(models[0].contextWindowTokens).toBeDefined();
759
+ expect(models[0].functionCall).toBe(true);
760
+ expect(models[0].vision).toBe(true);
761
+ // Check that enabled is defined
762
+ expect(models[0].enabled).toBeDefined();
763
+ });
764
+
765
+ it('should handle case-insensitive model matching', async () => {
766
+ mockClient.models.list.mockResolvedValue({
767
+ data: [{ id: 'GPT-4O-MINI' }],
768
+ });
769
+
770
+ const models = await params.models({ client: mockClient as any });
771
+
772
+ expect(models).toHaveLength(1);
773
+ expect(models[0].id).toBe('GPT-4O-MINI');
774
+ // Should match with lowercase in LOBE_DEFAULT_MODEL_LIST
775
+ expect(models[0].displayName).toBeDefined();
776
+ expect(models[0].enabled).toBeDefined();
777
+ });
778
+
779
+ it('should handle models not in known model list', async () => {
780
+ mockClient.models.list.mockResolvedValue({
781
+ data: [{ id: 'unknown-custom-model' }],
782
+ });
783
+
784
+ const models = await params.models({ client: mockClient as any });
785
+
786
+ expect(models).toHaveLength(1);
787
+ expect(models[0]).toMatchObject({
788
+ id: 'unknown-custom-model',
789
+ displayName: undefined,
790
+ enabled: false,
791
+ contextWindowTokens: undefined,
792
+ functionCall: false,
793
+ vision: false,
794
+ reasoning: false,
795
+ });
796
+ });
797
+
798
+ it('should handle empty model list', async () => {
799
+ mockClient.models.list.mockResolvedValue({
800
+ data: [],
801
+ });
802
+
803
+ const models = await params.models({ client: mockClient as any });
804
+
805
+ expect(models).toEqual([]);
806
+ });
807
+
808
+ it('should preserve all known model abilities', async () => {
809
+ mockClient.models.list.mockResolvedValue({
810
+ data: [
811
+ { id: 'gpt-4o-mini' },
812
+ { id: 'claude-3-5-sonnet-20241022' },
813
+ { id: 'deepseek-chat' },
814
+ ],
815
+ });
816
+
817
+ const models = await params.models({ client: mockClient as any });
818
+
819
+ expect(models.length).toBe(3);
820
+ models.forEach((model) => {
821
+ expect(model).toHaveProperty('functionCall');
822
+ expect(model).toHaveProperty('vision');
823
+ expect(model).toHaveProperty('reasoning');
824
+ expect(model).toHaveProperty('contextWindowTokens');
825
+ expect(model).toHaveProperty('displayName');
826
+ expect(model).toHaveProperty('enabled');
827
+ });
828
+ });
829
+
830
+ it('should handle mix of known and unknown models', async () => {
831
+ mockClient.models.list.mockResolvedValue({
832
+ data: [
833
+ { id: 'gpt-4o-mini' },
834
+ { id: 'unknown-model-1' },
835
+ { id: 'claude-3-5-sonnet-20241022' },
836
+ { id: 'unknown-model-2' },
837
+ ],
838
+ });
839
+
840
+ const models = await params.models({ client: mockClient as any });
841
+
842
+ expect(models).toHaveLength(4);
843
+
844
+ // Known models should have displayName
845
+ expect(models[0].displayName).toBeDefined();
846
+ expect(models[2].displayName).toBeDefined();
847
+
848
+ // Unknown models should have undefined displayName
849
+ expect(models[1].displayName).toBeUndefined();
850
+ expect(models[3].displayName).toBeUndefined();
851
+ });
852
+
853
+ it('should preserve model id exactly as returned from API', async () => {
854
+ mockClient.models.list.mockResolvedValue({
855
+ data: [
856
+ { id: 'Model-With-Mixed-CASE' },
857
+ { id: 'model-with-dashes' },
858
+ { id: 'model_with_underscores' },
859
+ ],
860
+ });
861
+
862
+ const models = await params.models({ client: mockClient as any });
863
+
864
+ expect(models).toHaveLength(3);
865
+ expect(models[0].id).toBe('Model-With-Mixed-CASE');
866
+ expect(models[1].id).toBe('model-with-dashes');
867
+ expect(models[2].id).toBe('model_with_underscores');
868
+ });
869
+
870
+ it('should handle models with special characters in id', async () => {
871
+ mockClient.models.list.mockResolvedValue({
872
+ data: [{ id: 'model:v1' }, { id: 'model@latest' }, { id: 'model/variant' }],
873
+ });
874
+
875
+ const models = await params.models({ client: mockClient as any });
876
+
877
+ expect(models).toHaveLength(3);
878
+ expect(models[0].id).toBe('model:v1');
879
+ expect(models[1].id).toBe('model@latest');
880
+ expect(models[2].id).toBe('model/variant');
881
+ });
882
+
883
+ it('should return models with correct structure', async () => {
884
+ mockClient.models.list.mockResolvedValue({
885
+ data: [{ id: 'test-model' }],
886
+ });
887
+
888
+ const models = await params.models({ client: mockClient as any });
889
+
890
+ expect(models[0]).toHaveProperty('id');
891
+ expect(models[0]).toHaveProperty('contextWindowTokens');
892
+ expect(models[0]).toHaveProperty('displayName');
893
+ expect(models[0]).toHaveProperty('enabled');
894
+ expect(models[0]).toHaveProperty('functionCall');
895
+ expect(models[0]).toHaveProperty('vision');
896
+ expect(models[0]).toHaveProperty('reasoning');
897
+ });
898
+
899
+ it('should filter out falsy values', async () => {
900
+ mockClient.models.list.mockResolvedValue({
901
+ data: [{ id: 'valid-model' }, null, undefined, { id: 'another-valid-model' }],
902
+ });
903
+
904
+ const models = await params.models({ client: mockClient as any });
905
+
906
+ // Should only include valid models
907
+ expect(models.length).toBeGreaterThan(0);
908
+ models.forEach((model) => {
909
+ expect(model).toBeTruthy();
910
+ expect(model.id).toBeTruthy();
911
+ });
912
+ });
913
+
914
+ it('should handle vision models from known list', async () => {
915
+ mockClient.models.list.mockResolvedValue({
916
+ data: [{ id: 'gpt-4o' }],
917
+ });
918
+
919
+ const models = await params.models({ client: mockClient as any });
920
+
921
+ expect(models).toHaveLength(1);
922
+ expect(models[0].vision).toBe(true);
923
+ });
924
+
925
+ it('should handle reasoning models from known list', async () => {
926
+ mockClient.models.list.mockResolvedValue({
927
+ data: [{ id: 'deepseek-reasoner' }],
928
+ });
929
+
930
+ const models = await params.models({ client: mockClient as any });
931
+
932
+ expect(models).toHaveLength(1);
933
+ // Check if the model has reasoning capability based on known list
934
+ expect(models[0]).toHaveProperty('reasoning');
935
+ });
936
+
937
+ it('should filter out models without id', async () => {
938
+ mockClient.models.list.mockResolvedValue({
939
+ data: [
940
+ { id: 'valid-model' },
941
+ { id: '' }, // Empty id
942
+ { id: null }, // Null id
943
+ { id: undefined }, // Undefined id
944
+ { name: 'no-id-model' }, // Missing id
945
+ { id: 'another-valid-model' },
946
+ ],
947
+ });
948
+
949
+ const models = await params.models({ client: mockClient as any });
950
+
951
+ // Should only include models with valid ids
952
+ expect(models.length).toBe(2);
953
+ expect(models[0].id).toBe('valid-model');
954
+ expect(models[1].id).toBe('another-valid-model');
955
+ });
956
+
957
+ it('should handle API errors gracefully', async () => {
958
+ mockClient.models.list.mockRejectedValue(new Error('Network error'));
959
+
960
+ // Should throw the error (no error handling in the implementation)
961
+ await expect(params.models({ client: mockClient as any })).rejects.toThrow('Network error');
962
+ });
963
+
964
+ it('should handle malformed API response', async () => {
965
+ mockClient.models.list.mockResolvedValue({
966
+ data: null,
967
+ });
968
+
969
+ // This will throw an error when trying to access .filter on null
970
+ await expect(params.models({ client: mockClient as any })).rejects.toThrow();
971
+ });
972
+
973
+ it('should handle API response without data field', async () => {
974
+ mockClient.models.list.mockResolvedValue({});
975
+
976
+ // This will throw an error when trying to access .data
977
+ await expect(params.models({ client: mockClient as any })).rejects.toThrow();
978
+ });
979
+
980
+ it('should preserve model order from API', async () => {
981
+ mockClient.models.list.mockResolvedValue({
982
+ data: [{ id: 'model-z' }, { id: 'model-a' }, { id: 'model-m' }],
983
+ });
984
+
985
+ const models = await params.models({ client: mockClient as any });
986
+
987
+ expect(models).toHaveLength(3);
988
+ expect(models[0].id).toBe('model-z');
989
+ expect(models[1].id).toBe('model-a');
990
+ expect(models[2].id).toBe('model-m');
991
+ });
992
+
993
+ it('should handle large number of models', async () => {
994
+ const largeModelList = Array.from({ length: 100 }, (_, i) => ({ id: `model-${i}` }));
995
+ mockClient.models.list.mockResolvedValue({
996
+ data: largeModelList,
997
+ });
998
+
999
+ const models = await params.models({ client: mockClient as any });
1000
+
1001
+ expect(models).toHaveLength(100);
1002
+ expect(models[0].id).toBe('model-0');
1003
+ expect(models[99].id).toBe('model-99');
1004
+ });
1005
+
1006
+ it('should handle models with only id field', async () => {
1007
+ mockClient.models.list.mockResolvedValue({
1008
+ data: [{ id: 'minimal-model' }],
1009
+ });
1010
+
1011
+ const models = await params.models({ client: mockClient as any });
1012
+
1013
+ expect(models).toHaveLength(1);
1014
+ expect(models[0]).toMatchObject({
1015
+ id: 'minimal-model',
1016
+ displayName: undefined,
1017
+ enabled: false,
1018
+ contextWindowTokens: undefined,
1019
+ functionCall: false,
1020
+ vision: false,
1021
+ reasoning: false,
1022
+ });
1023
+ });
1024
+
1025
+ it('should handle function call models correctly', async () => {
1026
+ mockClient.models.list.mockResolvedValue({
1027
+ data: [{ id: 'gpt-4o-mini' }],
1028
+ });
1029
+
1030
+ const models = await params.models({ client: mockClient as any });
1031
+
1032
+ expect(models).toHaveLength(1);
1033
+ expect(models[0].functionCall).toBe(true);
1034
+ });
1035
+
1036
+ it('should handle models with whitespace in id', async () => {
1037
+ mockClient.models.list.mockResolvedValue({
1038
+ data: [
1039
+ { id: ' model-with-spaces ' },
1040
+ { id: '\tmodel-with-tab\t' },
1041
+ { id: '\nmodel-with-newline\n' },
1042
+ ],
1043
+ });
1044
+
1045
+ const models = await params.models({ client: mockClient as any });
1046
+
1047
+ // Should preserve the whitespace in the id
1048
+ expect(models).toHaveLength(3);
1049
+ expect(models[0].id).toBe(' model-with-spaces ');
1050
+ expect(models[1].id).toBe('\tmodel-with-tab\t');
1051
+ expect(models[2].id).toBe('\nmodel-with-newline\n');
1052
+ });
1053
+
1054
+ it('should handle models with numeric ids', async () => {
1055
+ mockClient.models.list.mockResolvedValue({
1056
+ data: [{ id: '12345' }, { id: '67890' }],
1057
+ });
1058
+
1059
+ const models = await params.models({ client: mockClient as any });
1060
+
1061
+ expect(models).toHaveLength(2);
1062
+ expect(models[0].id).toBe('12345');
1063
+ expect(models[1].id).toBe('67890');
1064
+ });
1065
+
1066
+ it('should handle duplicate model ids', async () => {
1067
+ mockClient.models.list.mockResolvedValue({
1068
+ data: [{ id: 'duplicate-model' }, { id: 'duplicate-model' }, { id: 'unique-model' }],
1069
+ });
1070
+
1071
+ const models = await params.models({ client: mockClient as any });
1072
+
1073
+ // Should include duplicates (no deduplication in implementation)
1074
+ expect(models).toHaveLength(3);
1075
+ expect(models[0].id).toBe('duplicate-model');
1076
+ expect(models[1].id).toBe('duplicate-model');
1077
+ expect(models[2].id).toBe('unique-model');
1078
+ });
1079
+ });
1080
+
1081
+ describe('Runtime instantiation', () => {
1082
+ it('should create runtime instance with apiKey', () => {
1083
+ const runtime = new LobeSearch1API({ apiKey: 'test-key' });
1084
+ expect(runtime).toBeDefined();
1085
+ expect(runtime).toBeInstanceOf(LobeSearch1API);
1086
+ });
1087
+
1088
+ it('should create runtime instance with baseURL', () => {
1089
+ const runtime = new LobeSearch1API({
1090
+ apiKey: 'test-key',
1091
+ baseURL: 'https://custom.api.com/v1',
1092
+ });
1093
+ expect(runtime).toBeDefined();
1094
+ });
1095
+
1096
+ it('should create runtime instance with all options', () => {
1097
+ const runtime = new LobeSearch1API({
1098
+ apiKey: 'test-key',
1099
+ baseURL: 'https://custom.api.com/v1',
1100
+ dangerouslyAllowBrowser: true,
1101
+ });
1102
+ expect(runtime).toBeDefined();
1103
+ });
1104
+ });
1105
+
1106
+ describe('Provider configuration', () => {
1107
+ it('should have correct provider ID', () => {
1108
+ expect(params.provider).toBe('search1api');
1109
+ });
1110
+
1111
+ it('should have correct baseURL', () => {
1112
+ expect(params.baseURL).toBe('https://api.search1api.com/v1');
1113
+ });
1114
+
1115
+ it('should export params object', () => {
1116
+ expect(params).toBeDefined();
1117
+ expect(params).toHaveProperty('baseURL');
1118
+ expect(params).toHaveProperty('chatCompletion');
1119
+ expect(params).toHaveProperty('debug');
1120
+ expect(params).toHaveProperty('models');
1121
+ expect(params).toHaveProperty('provider');
1122
+ });
1123
+
1124
+ it('should have chatCompletion.handlePayload function', () => {
1125
+ expect(params.chatCompletion.handlePayload).toBeDefined();
1126
+ expect(typeof params.chatCompletion.handlePayload).toBe('function');
1127
+ });
1128
+
1129
+ it('should have debug.chatCompletion function', () => {
1130
+ expect(params.debug.chatCompletion).toBeDefined();
1131
+ expect(typeof params.debug.chatCompletion).toBe('function');
1132
+ });
1133
+
1134
+ it('should have models function', () => {
1135
+ expect(params.models).toBeDefined();
1136
+ expect(typeof params.models).toBe('function');
1137
+ });
1138
+ });
19
1139
  });