kimi-vercel-ai-sdk-provider 0.2.0 → 0.4.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "name": "kimi-vercel-ai-sdk-provider",
3
3
  "description": "Kimi (Moonshot AI) provider for Vercel AI SDK v6",
4
- "version": "0.2.0",
4
+ "version": "0.4.0",
5
5
  "sideEffects": false,
6
6
  "main": "./dist/index.js",
7
7
  "module": "./dist/index.mjs",
@@ -15,6 +15,9 @@
15
15
  "scripts": {
16
16
  "build": "tsup",
17
17
  "clean": "del-cli dist *.tsbuildinfo",
18
+ "format": "biome format --write",
19
+ "lint": "biome lint --write",
20
+ "lint:fix": "biome lint --write",
18
21
  "type-check": "tsc --noEmit",
19
22
  "test": "vitest run",
20
23
  "test:watch": "vitest",
@@ -1,9 +1,27 @@
1
- import { describe, expect, it, vi, beforeEach, afterEach } from 'vitest';
1
+ import type { LanguageModelV3StreamPart } from '@ai-sdk/provider';
2
+ import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
2
3
  import { createKimiCode } from '../code';
3
4
 
4
5
  // Mock fetch for API testing
5
6
  const mockFetch = vi.fn();
6
7
 
8
+ async function readStreamParts<T>(stream: ReadableStream<T>) {
9
+ const reader = stream.getReader();
10
+ const parts: T[] = [];
11
+
12
+ while (true) {
13
+ const { value, done } = await reader.read();
14
+ if (done) {
15
+ break;
16
+ }
17
+ if (value !== undefined) {
18
+ parts.push(value);
19
+ }
20
+ }
21
+
22
+ return parts;
23
+ }
24
+
7
25
  describe('KimiCodeLanguageModel Integration', () => {
8
26
  const originalEnv = process.env;
9
27
 
@@ -147,7 +165,7 @@ describe('KimiCodeLanguageModel Integration', () => {
147
165
 
148
166
  const result = await model.doGenerate({
149
167
  prompt: [{ role: 'user', content: [{ type: 'text', text: 'Read the index file' }] }],
150
-
168
+
151
169
  tools: [
152
170
  {
153
171
  type: 'function',
@@ -198,8 +216,7 @@ describe('KimiCodeLanguageModel Integration', () => {
198
216
  });
199
217
 
200
218
  await model.doGenerate({
201
- prompt: [{ role: 'user', content: [{ type: 'text', text: 'Think hard' }] }],
202
-
219
+ prompt: [{ role: 'user', content: [{ type: 'text', text: 'Think hard' }] }]
203
220
  });
204
221
 
205
222
  const body = JSON.parse(mockFetch.mock.calls[0][1].body);
@@ -239,8 +256,7 @@ describe('KimiCodeLanguageModel Integration', () => {
239
256
  const model = provider('kimi-for-coding');
240
257
 
241
258
  const result = await model.doGenerate({
242
- prompt: [{ role: 'user', content: [{ type: 'text', text: 'Test' }] }],
243
-
259
+ prompt: [{ role: 'user', content: [{ type: 'text', text: 'Test' }] }]
244
260
  });
245
261
 
246
262
  expect(result.usage.inputTokens.total).toBe(100);
@@ -273,8 +289,7 @@ describe('KimiCodeLanguageModel Integration', () => {
273
289
 
274
290
  await expect(
275
291
  model.doGenerate({
276
- prompt: [{ role: 'user', content: [{ type: 'text', text: 'Test' }] }],
277
-
292
+ prompt: [{ role: 'user', content: [{ type: 'text', text: 'Test' }] }]
278
293
  })
279
294
  ).rejects.toThrow('Invalid API key');
280
295
  });
@@ -305,7 +320,7 @@ describe('KimiCodeLanguageModel Integration', () => {
305
320
 
306
321
  const result = await model.doGenerate({
307
322
  prompt: [{ role: 'user', content: [{ type: 'text', text: 'Write a long story' }] }],
308
-
323
+
309
324
  maxOutputTokens: 100
310
325
  });
311
326
 
@@ -350,14 +365,10 @@ describe('KimiCodeLanguageModel Integration', () => {
350
365
  const model = provider('kimi-for-coding');
351
366
 
352
367
  const result = await model.doStream({
353
- prompt: [{ role: 'user', content: [{ type: 'text', text: 'Say hello' }] }],
354
-
368
+ prompt: [{ role: 'user', content: [{ type: 'text', text: 'Say hello' }] }]
355
369
  });
356
370
 
357
- const parts: any[] = [];
358
- for await (const part of result.stream) {
359
- parts.push(part);
360
- }
371
+ const parts = await readStreamParts<LanguageModelV3StreamPart>(result.stream);
361
372
 
362
373
  // Verify streaming worked
363
374
  expect(parts.length).toBeGreaterThan(0);
@@ -409,14 +420,10 @@ describe('KimiCodeLanguageModel Integration', () => {
409
420
  const model = provider('kimi-k2-thinking');
410
421
 
411
422
  const result = await model.doStream({
412
- prompt: [{ role: 'user', content: [{ type: 'text', text: 'Think about this' }] }],
413
-
423
+ prompt: [{ role: 'user', content: [{ type: 'text', text: 'Think about this' }] }]
414
424
  });
415
425
 
416
- const parts: any[] = [];
417
- for await (const part of result.stream) {
418
- parts.push(part);
419
- }
426
+ const parts = await readStreamParts<LanguageModelV3StreamPart>(result.stream);
420
427
 
421
428
  const partTypes = parts.map((p) => p.type);
422
429
  expect(partTypes).toContain('reasoning-start');
@@ -463,7 +470,7 @@ describe('KimiCodeLanguageModel Integration', () => {
463
470
 
464
471
  const result = await model.doStream({
465
472
  prompt: [{ role: 'user', content: [{ type: 'text', text: 'Read the file' }] }],
466
-
473
+
467
474
  tools: [
468
475
  {
469
476
  type: 'function',
@@ -474,10 +481,7 @@ describe('KimiCodeLanguageModel Integration', () => {
474
481
  ]
475
482
  });
476
483
 
477
- const parts: any[] = [];
478
- for await (const part of result.stream) {
479
- parts.push(part);
480
- }
484
+ const parts = await readStreamParts<LanguageModelV3StreamPart>(result.stream);
481
485
 
482
486
  const partTypes = parts.map((p) => p.type);
483
487
  expect(partTypes).toContain('tool-input-start');
@@ -485,9 +489,12 @@ describe('KimiCodeLanguageModel Integration', () => {
485
489
  expect(partTypes).toContain('tool-input-end');
486
490
  expect(partTypes).toContain('tool-call');
487
491
 
488
- const toolCallPart = parts.find((p) => p.type === 'tool-call');
489
- expect(toolCallPart.toolName).toBe('read_file');
490
- expect(toolCallPart.input).toContain('/src/index.ts');
492
+ const toolCallPart = parts.find(
493
+ (p): p is Extract<LanguageModelV3StreamPart, { type: 'tool-call' }> => p.type === 'tool-call'
494
+ );
495
+ expect(toolCallPart).toBeDefined();
496
+ expect(toolCallPart?.toolName).toBe('read_file');
497
+ expect(toolCallPart?.input).toContain('/src/index.ts');
491
498
  });
492
499
  });
493
500
 
@@ -517,8 +524,7 @@ describe('KimiCodeLanguageModel Integration', () => {
517
524
  const model = provider('kimi-for-coding');
518
525
 
519
526
  await model.doGenerate({
520
- prompt: [{ role: 'user', content: [{ type: 'text', text: 'Test' }] }],
521
-
527
+ prompt: [{ role: 'user', content: [{ type: 'text', text: 'Test' }] }]
522
528
  });
523
529
 
524
530
  const headers = mockFetch.mock.calls[0][1].headers;
@@ -1,12 +1,11 @@
1
- import { describe, expect, it, vi, beforeEach, afterEach } from 'vitest';
1
+ import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
2
2
  import {
3
- createKimiCode,
4
- kimiCode,
5
- KimiCodeLanguageModel,
6
- KIMI_CODE_BASE_URL,
7
3
  KIMI_CODE_DEFAULT_MODEL,
8
4
  KIMI_CODE_THINKING_MODEL,
9
- inferKimiCodeCapabilities
5
+ KimiCodeLanguageModel,
6
+ createKimiCode,
7
+ inferKimiCodeCapabilities,
8
+ kimiCode
10
9
  } from '../code';
11
10
 
12
11
  describe('createKimiCode', () => {
@@ -171,9 +171,7 @@ describe('toAnthropicThinking', () => {
171
171
 
172
172
  describe('convertToKimiCodePrompt', () => {
173
173
  it('should convert simple user message', async () => {
174
- const result = await convertToKimiCodePrompt([
175
- { role: 'user', content: [{ type: 'text', text: 'Hello!' }] }
176
- ]);
174
+ const result = await convertToKimiCodePrompt([{ role: 'user', content: [{ type: 'text', text: 'Hello!' }] }]);
177
175
 
178
176
  expect(result.system).toBeUndefined();
179
177
  expect(result.messages).toEqual([{ role: 'user', content: 'Hello!' }]);
@@ -0,0 +1,310 @@
1
+ /**
2
+ * Tests for file content caching.
3
+ */
4
+
5
+ import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
6
+ import {
7
+ FileCache,
8
+ type FileCacheEntry,
9
+ clearDefaultFileCache,
10
+ generateCacheKey,
11
+ generateContentHash,
12
+ getDefaultFileCache,
13
+ setDefaultFileCache
14
+ } from '../files';
15
+
16
+ describe('FileCache', () => {
17
+ describe('basic operations', () => {
18
+ it('should store and retrieve entries', () => {
19
+ const cache = new FileCache();
20
+ const entry: FileCacheEntry = {
21
+ fileId: 'file_123',
22
+ content: 'extracted text',
23
+ createdAt: Date.now(),
24
+ purpose: 'file-extract'
25
+ };
26
+
27
+ cache.set('hash123', entry);
28
+ const retrieved = cache.get('hash123');
29
+
30
+ expect(retrieved).toEqual(entry);
31
+ });
32
+
33
+ it('should return undefined for missing entries', () => {
34
+ const cache = new FileCache();
35
+
36
+ expect(cache.get('nonexistent')).toBeUndefined();
37
+ });
38
+
39
+ it('should delete entries', () => {
40
+ const cache = new FileCache();
41
+ const entry: FileCacheEntry = {
42
+ fileId: 'file_123',
43
+ createdAt: Date.now(),
44
+ purpose: 'file-extract'
45
+ };
46
+
47
+ cache.set('hash123', entry);
48
+ expect(cache.has('hash123')).toBe(true);
49
+
50
+ cache.delete('hash123');
51
+ expect(cache.has('hash123')).toBe(false);
52
+ });
53
+
54
+ it('should clear all entries', () => {
55
+ const cache = new FileCache();
56
+
57
+ cache.set('hash1', { fileId: 'f1', createdAt: Date.now(), purpose: 'file-extract' });
58
+ cache.set('hash2', { fileId: 'f2', createdAt: Date.now(), purpose: 'image' });
59
+ cache.set('hash3', { fileId: 'f3', createdAt: Date.now(), purpose: 'video' });
60
+
61
+ expect(cache.size).toBe(3);
62
+
63
+ cache.clear();
64
+ expect(cache.size).toBe(0);
65
+ });
66
+
67
+ it('should report correct size', () => {
68
+ const cache = new FileCache();
69
+
70
+ expect(cache.size).toBe(0);
71
+
72
+ cache.set('hash1', { fileId: 'f1', createdAt: Date.now(), purpose: 'file-extract' });
73
+ expect(cache.size).toBe(1);
74
+
75
+ cache.set('hash2', { fileId: 'f2', createdAt: Date.now(), purpose: 'file-extract' });
76
+ expect(cache.size).toBe(2);
77
+ });
78
+ });
79
+
80
+ describe('LRU eviction', () => {
81
+ it('should evict oldest entries when at capacity', () => {
82
+ const cache = new FileCache({ maxSize: 3 });
83
+
84
+ cache.set('hash1', { fileId: 'f1', createdAt: Date.now(), purpose: 'file-extract' });
85
+ cache.set('hash2', { fileId: 'f2', createdAt: Date.now(), purpose: 'file-extract' });
86
+ cache.set('hash3', { fileId: 'f3', createdAt: Date.now(), purpose: 'file-extract' });
87
+
88
+ expect(cache.size).toBe(3);
89
+
90
+ // Add a 4th entry, should evict hash1
91
+ cache.set('hash4', { fileId: 'f4', createdAt: Date.now(), purpose: 'file-extract' });
92
+
93
+ expect(cache.size).toBe(3);
94
+ expect(cache.has('hash1')).toBe(false);
95
+ expect(cache.has('hash2')).toBe(true);
96
+ expect(cache.has('hash3')).toBe(true);
97
+ expect(cache.has('hash4')).toBe(true);
98
+ });
99
+
100
+ it('should update LRU order on get', () => {
101
+ const cache = new FileCache({ maxSize: 3 });
102
+
103
+ cache.set('hash1', { fileId: 'f1', createdAt: Date.now(), purpose: 'file-extract' });
104
+ cache.set('hash2', { fileId: 'f2', createdAt: Date.now(), purpose: 'file-extract' });
105
+ cache.set('hash3', { fileId: 'f3', createdAt: Date.now(), purpose: 'file-extract' });
106
+
107
+ // Access hash1 to make it recently used
108
+ cache.get('hash1');
109
+
110
+ // Add a 4th entry, should evict hash2 (not hash1)
111
+ cache.set('hash4', { fileId: 'f4', createdAt: Date.now(), purpose: 'file-extract' });
112
+
113
+ expect(cache.has('hash1')).toBe(true);
114
+ expect(cache.has('hash2')).toBe(false);
115
+ expect(cache.has('hash3')).toBe(true);
116
+ expect(cache.has('hash4')).toBe(true);
117
+ });
118
+
119
+ it('should handle updating existing entries', () => {
120
+ const cache = new FileCache({ maxSize: 3 });
121
+
122
+ cache.set('hash1', { fileId: 'f1', createdAt: Date.now(), purpose: 'file-extract' });
123
+ cache.set('hash2', { fileId: 'f2', createdAt: Date.now(), purpose: 'file-extract' });
124
+ cache.set('hash3', { fileId: 'f3', createdAt: Date.now(), purpose: 'file-extract' });
125
+
126
+ // Update hash1
127
+ cache.set('hash1', { fileId: 'f1-updated', createdAt: Date.now(), purpose: 'file-extract' });
128
+
129
+ expect(cache.size).toBe(3);
130
+ expect(cache.get('hash1')?.fileId).toBe('f1-updated');
131
+ });
132
+ });
133
+
134
+ describe('TTL expiration', () => {
135
+ beforeEach(() => {
136
+ vi.useFakeTimers();
137
+ });
138
+
139
+ afterEach(() => {
140
+ vi.useRealTimers();
141
+ });
142
+
143
+ it('should expire entries after TTL', () => {
144
+ const cache = new FileCache({ ttlMs: 1000 }); // 1 second TTL
145
+
146
+ cache.set('hash1', { fileId: 'f1', createdAt: Date.now(), purpose: 'file-extract' });
147
+
148
+ expect(cache.get('hash1')).toBeDefined();
149
+
150
+ // Advance time past TTL
151
+ vi.advanceTimersByTime(1500);
152
+
153
+ expect(cache.get('hash1')).toBeUndefined();
154
+ });
155
+
156
+ it('should not expire entries before TTL', () => {
157
+ const cache = new FileCache({ ttlMs: 1000 });
158
+
159
+ cache.set('hash1', { fileId: 'f1', createdAt: Date.now(), purpose: 'file-extract' });
160
+
161
+ // Advance time but not past TTL
162
+ vi.advanceTimersByTime(500);
163
+
164
+ expect(cache.get('hash1')).toBeDefined();
165
+ });
166
+
167
+ it('should prune expired entries', () => {
168
+ const cache = new FileCache({ ttlMs: 1000 });
169
+
170
+ cache.set('hash1', { fileId: 'f1', createdAt: Date.now(), purpose: 'file-extract' });
171
+ cache.set('hash2', { fileId: 'f2', createdAt: Date.now(), purpose: 'file-extract' });
172
+
173
+ vi.advanceTimersByTime(1500);
174
+
175
+ cache.set('hash3', { fileId: 'f3', createdAt: Date.now(), purpose: 'file-extract' });
176
+
177
+ const pruned = cache.prune();
178
+
179
+ expect(pruned).toBe(2);
180
+ expect(cache.size).toBe(1);
181
+ expect(cache.has('hash3')).toBe(true);
182
+ });
183
+ });
184
+
185
+ describe('default options', () => {
186
+ it('should use default maxSize of 100', () => {
187
+ const cache = new FileCache();
188
+
189
+ // Add 100 entries
190
+ for (let i = 0; i < 100; i++) {
191
+ cache.set(`hash${i}`, { fileId: `f${i}`, createdAt: Date.now(), purpose: 'file-extract' });
192
+ }
193
+
194
+ expect(cache.size).toBe(100);
195
+
196
+ // Add one more, should evict
197
+ cache.set('hash100', { fileId: 'f100', createdAt: Date.now(), purpose: 'file-extract' });
198
+ expect(cache.size).toBe(100);
199
+ });
200
+
201
+ it('should use default TTL of 1 hour', () => {
202
+ vi.useFakeTimers();
203
+
204
+ const cache = new FileCache();
205
+ cache.set('hash1', { fileId: 'f1', createdAt: Date.now(), purpose: 'file-extract' });
206
+
207
+ // 59 minutes - should still be valid
208
+ vi.advanceTimersByTime(59 * 60 * 1000);
209
+ expect(cache.get('hash1')).toBeDefined();
210
+
211
+ // 61 minutes - should be expired
212
+ vi.advanceTimersByTime(2 * 60 * 1000);
213
+ expect(cache.get('hash1')).toBeUndefined();
214
+
215
+ vi.useRealTimers();
216
+ });
217
+ });
218
+ });
219
+
220
+ describe('generateContentHash', () => {
221
+ it('should generate consistent hashes for same content', () => {
222
+ const data = new Uint8Array([1, 2, 3, 4, 5]);
223
+
224
+ const hash1 = generateContentHash(data);
225
+ const hash2 = generateContentHash(data);
226
+
227
+ expect(hash1).toBe(hash2);
228
+ });
229
+
230
+ it('should generate different hashes for different content', () => {
231
+ const data1 = new Uint8Array([1, 2, 3]);
232
+ const data2 = new Uint8Array([4, 5, 6]);
233
+
234
+ const hash1 = generateContentHash(data1);
235
+ const hash2 = generateContentHash(data2);
236
+
237
+ expect(hash1).not.toBe(hash2);
238
+ });
239
+
240
+ it('should work with string input', () => {
241
+ const hash1 = generateContentHash('hello world');
242
+ const hash2 = generateContentHash('hello world');
243
+ const hash3 = generateContentHash('goodbye world');
244
+
245
+ expect(hash1).toBe(hash2);
246
+ expect(hash1).not.toBe(hash3);
247
+ });
248
+
249
+ it('should return 8-character hex string', () => {
250
+ const hash = generateContentHash('test');
251
+
252
+ expect(hash).toMatch(/^[0-9a-f]{8}$/);
253
+ });
254
+ });
255
+
256
+ describe('generateCacheKey', () => {
257
+ it('should include content hash, size, and filename', () => {
258
+ const data = new Uint8Array([1, 2, 3, 4, 5]);
259
+ const key = generateCacheKey(data, 'document.pdf');
260
+
261
+ expect(key).toContain('_5_'); // size
262
+ expect(key).toContain('document.pdf');
263
+ });
264
+
265
+ it('should normalize filename', () => {
266
+ const data = new Uint8Array([1, 2, 3]);
267
+ const key = generateCacheKey(data, 'My Document (1).PDF');
268
+
269
+ expect(key).toContain('my_document__1_.pdf');
270
+ });
271
+
272
+ it('should generate different keys for same content with different names', () => {
273
+ const data = new Uint8Array([1, 2, 3, 4, 5]);
274
+
275
+ const key1 = generateCacheKey(data, 'file1.pdf');
276
+ const key2 = generateCacheKey(data, 'file2.pdf');
277
+
278
+ expect(key1).not.toBe(key2);
279
+ });
280
+ });
281
+
282
+ describe('Default Cache', () => {
283
+ afterEach(() => {
284
+ clearDefaultFileCache();
285
+ setDefaultFileCache(null);
286
+ });
287
+
288
+ it('should create default cache on first access', () => {
289
+ const cache1 = getDefaultFileCache();
290
+ const cache2 = getDefaultFileCache();
291
+
292
+ expect(cache1).toBe(cache2);
293
+ });
294
+
295
+ it('should allow setting custom default cache', () => {
296
+ const customCache = new FileCache({ maxSize: 10 });
297
+ setDefaultFileCache(customCache);
298
+
299
+ expect(getDefaultFileCache()).toBe(customCache);
300
+ });
301
+
302
+ it('should clear default cache', () => {
303
+ const cache = getDefaultFileCache();
304
+ cache.set('test', { fileId: 'f1', createdAt: Date.now(), purpose: 'file-extract' });
305
+
306
+ clearDefaultFileCache();
307
+
308
+ expect(cache.size).toBe(0);
309
+ });
310
+ });
@@ -0,0 +1,120 @@
1
+ /**
2
+ * Tests for model configuration features:
3
+ * - Temperature locking for thinking models
4
+ * - Default max_tokens
5
+ * - Model capability inference
6
+ */
7
+
8
+ import { describe, expect, it } from 'vitest';
9
+ import {
10
+ STANDARD_MODEL_DEFAULT_MAX_TOKENS,
11
+ THINKING_MODEL_DEFAULT_MAX_TOKENS,
12
+ THINKING_MODEL_TEMPERATURE,
13
+ inferModelCapabilities
14
+ } from '../core';
15
+
16
+ describe('Model Configuration', () => {
17
+ describe('inferModelCapabilities', () => {
18
+ it('should detect thinking models by suffix', () => {
19
+ const caps = inferModelCapabilities('kimi-k2.5-thinking');
20
+
21
+ expect(caps.thinking).toBe(true);
22
+ expect(caps.alwaysThinking).toBe(true);
23
+ });
24
+
25
+ it('should detect non-thinking models', () => {
26
+ const caps = inferModelCapabilities('kimi-k2.5');
27
+
28
+ expect(caps.thinking).toBe(false);
29
+ expect(caps.alwaysThinking).toBe(false);
30
+ });
31
+
32
+ it('should detect K2.5 models for video support', () => {
33
+ const k25Caps = inferModelCapabilities('kimi-k2.5');
34
+ const k2Caps = inferModelCapabilities('kimi-k2-turbo');
35
+
36
+ expect(k25Caps.videoInput).toBe(true);
37
+ expect(k2Caps.videoInput).toBe(false);
38
+ });
39
+
40
+ it('should support alternative K2.5 naming', () => {
41
+ const caps = inferModelCapabilities('kimi-k2-5-thinking');
42
+
43
+ expect(caps.videoInput).toBe(true);
44
+ expect(caps.thinking).toBe(true);
45
+ });
46
+ });
47
+
48
+ describe('Temperature Locking', () => {
49
+ it('should set locked temperature for thinking models', () => {
50
+ const caps = inferModelCapabilities('kimi-k2.5-thinking');
51
+
52
+ expect(caps.temperatureLocked).toBe(true);
53
+ expect(caps.defaultTemperature).toBe(THINKING_MODEL_TEMPERATURE);
54
+ expect(caps.defaultTemperature).toBe(1.0);
55
+ });
56
+
57
+ it('should not lock temperature for standard models', () => {
58
+ const caps = inferModelCapabilities('kimi-k2.5');
59
+
60
+ expect(caps.temperatureLocked).toBe(false);
61
+ expect(caps.defaultTemperature).toBeUndefined();
62
+ });
63
+
64
+ it('should use correct constant value', () => {
65
+ expect(THINKING_MODEL_TEMPERATURE).toBe(1.0);
66
+ });
67
+ });
68
+
69
+ describe('Default Max Tokens', () => {
70
+ it('should set higher default for thinking models', () => {
71
+ const caps = inferModelCapabilities('kimi-k2.5-thinking');
72
+
73
+ expect(caps.defaultMaxOutputTokens).toBe(THINKING_MODEL_DEFAULT_MAX_TOKENS);
74
+ expect(caps.defaultMaxOutputTokens).toBe(32768);
75
+ });
76
+
77
+ it('should set standard default for regular models', () => {
78
+ const caps = inferModelCapabilities('kimi-k2.5');
79
+
80
+ expect(caps.defaultMaxOutputTokens).toBe(STANDARD_MODEL_DEFAULT_MAX_TOKENS);
81
+ expect(caps.defaultMaxOutputTokens).toBe(4096);
82
+ });
83
+
84
+ it('should use correct constant values', () => {
85
+ expect(THINKING_MODEL_DEFAULT_MAX_TOKENS).toBe(32768);
86
+ expect(STANDARD_MODEL_DEFAULT_MAX_TOKENS).toBe(4096);
87
+ });
88
+ });
89
+
90
+ describe('All models have common capabilities', () => {
91
+ const testModels = ['kimi-k2.5', 'kimi-k2.5-thinking', 'kimi-k2-turbo', 'kimi-k2-thinking'];
92
+
93
+ for (const modelId of testModels) {
94
+ it(`${modelId} should have imageInput support`, () => {
95
+ const caps = inferModelCapabilities(modelId);
96
+ expect(caps.imageInput).toBe(true);
97
+ });
98
+
99
+ it(`${modelId} should have 256k context`, () => {
100
+ const caps = inferModelCapabilities(modelId);
101
+ expect(caps.maxContextSize).toBe(256_000);
102
+ });
103
+
104
+ it(`${modelId} should support tool calling`, () => {
105
+ const caps = inferModelCapabilities(modelId);
106
+ expect(caps.toolCalling).toBe(true);
107
+ });
108
+
109
+ it(`${modelId} should support JSON mode`, () => {
110
+ const caps = inferModelCapabilities(modelId);
111
+ expect(caps.jsonMode).toBe(true);
112
+ });
113
+
114
+ it(`${modelId} should support structured outputs`, () => {
115
+ const caps = inferModelCapabilities(modelId);
116
+ expect(caps.structuredOutputs).toBe(true);
117
+ });
118
+ }
119
+ });
120
+ });
@@ -42,8 +42,9 @@ describe('createKimi', () => {
42
42
  it('should throw when called with new', () => {
43
43
  const provider = createKimi();
44
44
  // The error can be either our custom message or the native constructor error
45
- // biome-ignore lint/suspicious/noExplicitAny: testing edge case requires any
46
- expect(() => new (provider as any)('kimi-k2.5')).toThrow();
45
+ // Test that the provider function throws when used as a constructor
46
+ const ProviderAsConstructor = provider as unknown as new (modelId: string) => unknown;
47
+ expect(() => new ProviderAsConstructor('kimi-k2.5')).toThrow();
47
48
  });
48
49
 
49
50
  it('should throw for unsupported model types', () => {