genai-lite 0.2.0 → 0.2.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,303 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ const LLMService_1 = require("./LLMService");
4
+ describe('LLMService.prepareMessage', () => {
5
+ let mockApiKeyProvider;
6
+ let service;
7
+ beforeEach(() => {
8
+ jest.clearAllMocks();
9
+ mockApiKeyProvider = jest.fn().mockResolvedValue('test-api-key');
10
+ service = new LLMService_1.LLMService(mockApiKeyProvider);
11
+ });
12
+ describe('Input validation', () => {
13
+ it('should require either template or messages', async () => {
14
+ const result = await service.prepareMessage({});
15
+ expect(result).toMatchObject({
16
+ object: 'error',
17
+ error: {
18
+ message: 'Either template or messages must be provided',
19
+ code: 'INVALID_INPUT',
20
+ type: 'validation_error',
21
+ },
22
+ });
23
+ });
24
+ it('should require either presetId or both providerId and modelId', async () => {
25
+ const result = await service.prepareMessage({
26
+ template: 'Test template',
27
+ providerId: 'openai',
28
+ // Missing modelId
29
+ });
30
+ expect(result).toMatchObject({
31
+ object: 'error',
32
+ error: {
33
+ message: 'Either presetId or both providerId and modelId must be provided',
34
+ code: 'INVALID_MODEL_SELECTION',
35
+ type: 'validation_error',
36
+ },
37
+ });
38
+ });
39
+ });
40
+ describe('Preset resolution', () => {
41
+ it('should resolve model info from presetId', async () => {
42
+ const result = await service.prepareMessage({
43
+ template: 'Test {{ model_id }}',
44
+ presetId: 'openai-gpt-4.1-default',
45
+ });
46
+ expect(result).toMatchObject({
47
+ messages: [{ role: 'user', content: 'Test gpt-4.1' }],
48
+ modelContext: {
49
+ model_id: 'gpt-4.1',
50
+ provider_id: 'openai',
51
+ thinking_enabled: false,
52
+ thinking_available: false,
53
+ },
54
+ });
55
+ });
56
+ it('should handle non-existent presetId', async () => {
57
+ const result = await service.prepareMessage({
58
+ template: 'Test',
59
+ presetId: 'non-existent-preset',
60
+ });
61
+ expect(result).toMatchObject({
62
+ object: 'error',
63
+ error: {
64
+ message: 'Preset not found: non-existent-preset',
65
+ code: 'PRESET_NOT_FOUND',
66
+ type: 'validation_error',
67
+ },
68
+ });
69
+ });
70
+ it('should merge preset settings with user settings', async () => {
71
+ const result = await service.prepareMessage({
72
+ template: 'Test',
73
+ presetId: 'anthropic-claude-3-7-sonnet-20250219-thinking',
74
+ settings: { temperature: 0.5 }, // Override preset temperature
75
+ });
76
+ expect(result).not.toHaveProperty('error');
77
+ // The settings are merged internally and will be used when sendMessage is called
78
+ });
79
+ });
80
+ describe('Model resolution from providerId/modelId', () => {
81
+ it('should resolve model info from providerId and modelId', async () => {
82
+ const result = await service.prepareMessage({
83
+ template: 'Model: {{ model_id }}, Provider: {{ provider_id }}',
84
+ providerId: 'anthropic',
85
+ modelId: 'claude-3-5-sonnet-20241022',
86
+ });
87
+ expect(result).toMatchObject({
88
+ messages: [{
89
+ role: 'user',
90
+ content: 'Model: claude-3-5-sonnet-20241022, Provider: anthropic'
91
+ }],
92
+ modelContext: {
93
+ model_id: 'claude-3-5-sonnet-20241022',
94
+ provider_id: 'anthropic',
95
+ thinking_enabled: false,
96
+ thinking_available: false,
97
+ },
98
+ });
99
+ });
100
+ it('should handle invalid model', async () => {
101
+ const result = await service.prepareMessage({
102
+ template: 'Test',
103
+ providerId: 'openai',
104
+ modelId: 'non-existent-model',
105
+ });
106
+ expect(result).toMatchObject({
107
+ object: 'error',
108
+ error: {
109
+ message: 'Unsupported model: non-existent-model for provider: openai',
110
+ code: 'UNSUPPORTED_MODEL',
111
+ type: 'validation_error',
112
+ },
113
+ });
114
+ });
115
+ });
116
+ describe('Template rendering with model context', () => {
117
+ it('should inject model context into template', async () => {
118
+ const result = await service.prepareMessage({
119
+ template: `{{ thinking_enabled ? \`Please think step by step about this:\` : \`Please analyze this:\` }}
120
+ Model: {{ model_id }}
121
+ Provider: {{ provider_id }}
122
+ Thinking available: {{ thinking_available }}`,
123
+ presetId: 'anthropic-claude-3-7-sonnet-20250219-thinking',
124
+ });
125
+ expect(result).toMatchObject({
126
+ messages: [{
127
+ role: 'user',
128
+ content: expect.stringContaining('Please think step by step about this:'),
129
+ }],
130
+ modelContext: {
131
+ thinking_enabled: true,
132
+ thinking_available: true,
133
+ model_id: 'claude-3-7-sonnet-20250219',
134
+ provider_id: 'anthropic',
135
+ },
136
+ });
137
+ const content = result.messages[0].content;
138
+ expect(content).toContain('Model: claude-3-7-sonnet-20250219');
139
+ expect(content).toContain('Provider: anthropic');
140
+ expect(content).toContain('Thinking available: true');
141
+ });
142
+ it('should handle reasoning effort in model context', async () => {
143
+ const result = await service.prepareMessage({
144
+ template: 'Effort: {{ reasoning_effort ? `{{reasoning_effort}}` : `not set` }}',
145
+ providerId: 'anthropic',
146
+ modelId: 'claude-3-7-sonnet-20250219',
147
+ settings: {
148
+ reasoning: {
149
+ enabled: true,
150
+ effort: 'high',
151
+ },
152
+ },
153
+ });
154
+ expect(result).toMatchObject({
155
+ messages: [{ role: 'user', content: 'Effort: high' }],
156
+ modelContext: {
157
+ reasoning_effort: 'high',
158
+ },
159
+ });
160
+ });
161
+ it('should handle reasoning maxTokens in model context', async () => {
162
+ const result = await service.prepareMessage({
163
+ template: 'Max tokens: {{ reasoning_max_tokens ? `{{reasoning_max_tokens}}` : `default` }}',
164
+ providerId: 'anthropic',
165
+ modelId: 'claude-3-7-sonnet-20250219',
166
+ settings: {
167
+ reasoning: {
168
+ enabled: true,
169
+ maxTokens: 5000,
170
+ },
171
+ },
172
+ });
173
+ expect(result).toMatchObject({
174
+ messages: [{ role: 'user', content: 'Max tokens: 5000' }],
175
+ modelContext: {
176
+ reasoning_max_tokens: 5000,
177
+ },
178
+ });
179
+ });
180
+ it('should combine template variables with model context', async () => {
181
+ const result = await service.prepareMessage({
182
+ template: `
183
+ Task: {{ task }}
184
+ Model: {{ model_id }}
185
+ {{ thinking_enabled ? "Use reasoning to solve this." : "Provide a direct answer." }}`,
186
+ variables: {
187
+ task: 'Calculate fibonacci(10)',
188
+ },
189
+ presetId: 'anthropic-claude-3-7-sonnet-20250219-thinking',
190
+ });
191
+ const content = result.messages[0].content;
192
+ expect(content).toContain('Task: Calculate fibonacci(10)');
193
+ expect(content).toContain('Model: claude-3-7-sonnet-20250219');
194
+ expect(content).toContain('Use reasoning to solve this.');
195
+ });
196
+ it('should handle template rendering errors', async () => {
197
+ // This will cause an actual error in template rendering by using a variable that throws on toString()
198
+ const errorObject = {
199
+ toString: () => {
200
+ throw new Error('Test template error');
201
+ }
202
+ };
203
+ const result = await service.prepareMessage({
204
+ template: '{{ data }}',
205
+ variables: { data: errorObject },
206
+ providerId: 'openai',
207
+ modelId: 'gpt-4.1',
208
+ });
209
+ expect(result).toMatchObject({
210
+ object: 'error',
211
+ error: {
212
+ message: expect.stringContaining('Template rendering failed'),
213
+ code: 'TEMPLATE_ERROR',
214
+ type: 'validation_error',
215
+ },
216
+ });
217
+ });
218
+ });
219
+ describe('Pre-built messages', () => {
220
+ it('should return pre-built messages with model context', async () => {
221
+ const messages = [
222
+ { role: 'system', content: 'You are a helpful assistant.' },
223
+ { role: 'user', content: 'Hello!' },
224
+ ];
225
+ const result = await service.prepareMessage({
226
+ messages,
227
+ providerId: 'openai',
228
+ modelId: 'o4-mini',
229
+ });
230
+ expect(result).toMatchObject({
231
+ messages,
232
+ modelContext: {
233
+ model_id: 'o4-mini',
234
+ provider_id: 'openai',
235
+ thinking_enabled: true, // o4-mini always has reasoning enabled
236
+ thinking_available: true,
237
+ },
238
+ });
239
+ });
240
+ });
241
+ describe('Thinking/reasoning models', () => {
242
+ it('should detect thinking capabilities for supported models', async () => {
243
+ const testCases = [
244
+ {
245
+ presetId: 'anthropic-claude-3-7-sonnet-20250219-thinking',
246
+ expected: { thinking_enabled: true, thinking_available: true },
247
+ },
248
+ {
249
+ presetId: 'anthropic-claude-3-5-sonnet-20241022-default',
250
+ expected: { thinking_enabled: false, thinking_available: false },
251
+ },
252
+ {
253
+ presetId: 'google-gemini-2.5-flash-thinking',
254
+ expected: { thinking_enabled: true, thinking_available: true },
255
+ },
256
+ {
257
+ presetId: 'openai-o4-mini-default',
258
+ expected: { thinking_enabled: true, thinking_available: true }, // Always on
259
+ },
260
+ ];
261
+ for (const testCase of testCases) {
262
+ const result = await service.prepareMessage({
263
+ template: 'Test',
264
+ presetId: testCase.presetId,
265
+ });
266
+ expect(result).toMatchObject({
267
+ modelContext: expect.objectContaining(testCase.expected),
268
+ });
269
+ }
270
+ });
271
+ });
272
+ describe('Custom presets', () => {
273
+ it('should work with custom presets', async () => {
274
+ const customPresets = [
275
+ {
276
+ id: 'custom-gpt4-thinking',
277
+ displayName: 'Custom GPT-4',
278
+ providerId: 'openai',
279
+ modelId: 'gpt-4.1',
280
+ settings: {
281
+ temperature: 0.3,
282
+ reasoning: { enabled: true },
283
+ },
284
+ },
285
+ ];
286
+ const customService = new LLMService_1.LLMService(mockApiKeyProvider, {
287
+ presets: customPresets,
288
+ presetMode: 'extend',
289
+ });
290
+ const result = await customService.prepareMessage({
291
+ template: 'Thinking: {{ thinking_enabled }}',
292
+ presetId: 'custom-gpt4-thinking',
293
+ });
294
+ expect(result).toMatchObject({
295
+ messages: [{ role: 'user', content: 'Thinking: false' }], // GPT-4.1 doesn't support reasoning
296
+ modelContext: {
297
+ thinking_enabled: false,
298
+ thinking_available: false,
299
+ },
300
+ });
301
+ });
302
+ });
303
+ });
@@ -0,0 +1 @@
1
+ export {};
@@ -0,0 +1,153 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ const LLMService_1 = require("./LLMService");
4
+ describe('LLMService.sendMessage with presetId', () => {
5
+ let mockApiKeyProvider;
6
+ let service;
7
+ beforeEach(() => {
8
+ jest.clearAllMocks();
9
+ mockApiKeyProvider = jest.fn().mockResolvedValue('test-api-key');
10
+ service = new LLMService_1.LLMService(mockApiKeyProvider);
11
+ });
12
+ describe('Preset resolution in sendMessage', () => {
13
+ it('should send message using presetId', async () => {
14
+ const request = {
15
+ presetId: 'openai-gpt-4.1-default',
16
+ messages: [{ role: 'user', content: 'Hello' }],
17
+ };
18
+ const response = await service.sendMessage(request);
19
+ // The request will fail because we don't have a real API key,
20
+ // but we can verify it tried with the correct provider/model
21
+ expect(response).toMatchObject({
22
+ provider: 'openai',
23
+ model: 'gpt-4.1',
24
+ object: 'error',
25
+ });
26
+ });
27
+ it('should override preset settings with request settings', async () => {
28
+ const request = {
29
+ presetId: 'anthropic-claude-3-7-sonnet-20250219-thinking',
30
+ messages: [{ role: 'user', content: 'Test' }],
31
+ settings: {
32
+ temperature: 0.2, // Override preset temperature
33
+ reasoning: {
34
+ enabled: false, // Disable reasoning despite thinking preset
35
+ },
36
+ },
37
+ };
38
+ const response = await service.sendMessage(request);
39
+ expect(response).toMatchObject({
40
+ provider: 'anthropic',
41
+ model: 'claude-3-7-sonnet-20250219',
42
+ object: 'error',
43
+ });
44
+ });
45
+ it('should handle invalid presetId', async () => {
46
+ const request = {
47
+ presetId: 'non-existent-preset',
48
+ messages: [{ role: 'user', content: 'Test' }],
49
+ };
50
+ const response = await service.sendMessage(request);
51
+ expect(response).toMatchObject({
52
+ object: 'error',
53
+ error: {
54
+ message: 'Preset not found: non-existent-preset',
55
+ code: 'PRESET_NOT_FOUND',
56
+ type: 'validation_error',
57
+ },
58
+ });
59
+ });
60
+ it('should allow either presetId or providerId/modelId', async () => {
61
+ // Test with providerId/modelId (existing behavior)
62
+ const request1 = {
63
+ providerId: 'openai',
64
+ modelId: 'gpt-4.1',
65
+ messages: [{ role: 'user', content: 'Test' }],
66
+ };
67
+ const response1 = await service.sendMessage(request1);
68
+ expect(response1).toMatchObject({
69
+ provider: 'openai',
70
+ model: 'gpt-4.1',
71
+ });
72
+ // Test with presetId (new behavior)
73
+ const request2 = {
74
+ presetId: 'openai-gpt-4.1-default',
75
+ messages: [{ role: 'user', content: 'Test' }],
76
+ };
77
+ const response2 = await service.sendMessage(request2);
78
+ expect(response2).toMatchObject({
79
+ provider: 'openai',
80
+ model: 'gpt-4.1',
81
+ });
82
+ });
83
+ it('should prefer presetId when both are provided', async () => {
84
+ const request = {
85
+ presetId: 'anthropic-claude-3-5-sonnet-20241022-default',
86
+ providerId: 'openai', // These will be ignored
87
+ modelId: 'gpt-4.1', // These will be ignored
88
+ messages: [{ role: 'user', content: 'Test' }],
89
+ };
90
+ const response = await service.sendMessage(request);
91
+ // Should use the preset's provider/model
92
+ expect(response).toMatchObject({
93
+ provider: 'anthropic',
94
+ model: 'claude-3-5-sonnet-20241022',
95
+ });
96
+ });
97
+ it('should handle preset with model that was removed from config', async () => {
98
+ // Create a service with a preset pointing to a non-existent model
99
+ const customService = new LLMService_1.LLMService(mockApiKeyProvider, {
100
+ presets: [{
101
+ id: 'broken-preset',
102
+ displayName: 'Broken',
103
+ providerId: 'openai',
104
+ modelId: 'non-existent-model',
105
+ settings: {},
106
+ }],
107
+ presetMode: 'extend',
108
+ });
109
+ const request = {
110
+ presetId: 'broken-preset',
111
+ messages: [{ role: 'user', content: 'Test' }],
112
+ };
113
+ const response = await customService.sendMessage(request);
114
+ expect(response).toMatchObject({
115
+ object: 'error',
116
+ error: {
117
+ message: 'Model not found for preset: broken-preset',
118
+ code: 'MODEL_NOT_FOUND',
119
+ type: 'validation_error',
120
+ },
121
+ });
122
+ });
123
+ });
124
+ describe('Settings merge with presets', () => {
125
+ it('should apply preset settings correctly', async () => {
126
+ const request = {
127
+ presetId: 'google-gemini-2.5-flash',
128
+ messages: [{ role: 'user', content: 'Test' }],
129
+ };
130
+ const response = await service.sendMessage(request);
131
+ // The preset includes geminiSafetySettings which should be applied
132
+ expect(response).toMatchObject({
133
+ provider: 'gemini',
134
+ model: 'gemini-2.5-flash',
135
+ });
136
+ });
137
+ it('should merge preset reasoning settings with request settings', async () => {
138
+ const request = {
139
+ presetId: 'anthropic-claude-3-7-sonnet-20250219-thinking',
140
+ messages: [{ role: 'user', content: 'Complex problem' }],
141
+ settings: {
142
+ maxTokens: 2000, // Add to preset settings
143
+ },
144
+ };
145
+ const response = await service.sendMessage(request);
146
+ expect(response).toMatchObject({
147
+ provider: 'anthropic',
148
+ model: 'claude-3-7-sonnet-20250219',
149
+ });
150
+ // Settings would be merged internally with both reasoning enabled and maxTokens
151
+ });
152
+ });
153
+ });
@@ -237,6 +237,89 @@ describe('LLMService', () => {
237
237
  expect(errorResponse.error.code).toBe('INVALID_SETTINGS');
238
238
  expect(errorResponse.error.message).toContain('topP must be a number between 0 and 1');
239
239
  });
240
+ it('should reject reasoning settings for non-reasoning models', async () => {
241
+ const request = {
242
+ providerId: 'openai',
243
+ modelId: 'gpt-4.1', // This model doesn't support reasoning
244
+ messages: [{ role: 'user', content: 'Hello' }],
245
+ settings: {
246
+ reasoning: {
247
+ enabled: true
248
+ }
249
+ }
250
+ };
251
+ const response = await service.sendMessage(request);
252
+ expect(response.object).toBe('error');
253
+ const errorResponse = response;
254
+ expect(errorResponse.error.code).toBe('reasoning_not_supported');
255
+ expect(errorResponse.error.message).toContain('does not support reasoning/thinking');
256
+ });
257
+ it('should reject reasoning with effort for non-reasoning models', async () => {
258
+ const request = {
259
+ providerId: 'openai',
260
+ modelId: 'gpt-4.1',
261
+ messages: [{ role: 'user', content: 'Hello' }],
262
+ settings: {
263
+ reasoning: {
264
+ effort: 'high'
265
+ }
266
+ }
267
+ };
268
+ const response = await service.sendMessage(request);
269
+ expect(response.object).toBe('error');
270
+ const errorResponse = response;
271
+ expect(errorResponse.error.code).toBe('reasoning_not_supported');
272
+ });
273
+ it('should reject reasoning with maxTokens for non-reasoning models', async () => {
274
+ const request = {
275
+ providerId: 'openai',
276
+ modelId: 'gpt-4.1',
277
+ messages: [{ role: 'user', content: 'Hello' }],
278
+ settings: {
279
+ reasoning: {
280
+ maxTokens: 5000
281
+ }
282
+ }
283
+ };
284
+ const response = await service.sendMessage(request);
285
+ expect(response.object).toBe('error');
286
+ const errorResponse = response;
287
+ expect(errorResponse.error.code).toBe('reasoning_not_supported');
288
+ });
289
+ it('should allow disabled reasoning for non-reasoning models', async () => {
290
+ const request = {
291
+ providerId: 'openai',
292
+ modelId: 'gpt-4.1',
293
+ messages: [{ role: 'user', content: 'Hello' }],
294
+ settings: {
295
+ reasoning: {
296
+ enabled: false
297
+ }
298
+ }
299
+ };
300
+ // This should pass validation but will fail at the adapter level since we don't have a real API key
301
+ const response = await service.sendMessage(request);
302
+ // Should not be a reasoning validation error
303
+ const errorResponse = response;
304
+ expect(errorResponse.error.code).not.toBe('reasoning_not_supported');
305
+ });
306
+ it('should allow reasoning with exclude=true for non-reasoning models', async () => {
307
+ const request = {
308
+ providerId: 'openai',
309
+ modelId: 'gpt-4.1',
310
+ messages: [{ role: 'user', content: 'Hello' }],
311
+ settings: {
312
+ reasoning: {
313
+ exclude: true
314
+ }
315
+ }
316
+ };
317
+ // This should pass validation
318
+ const response = await service.sendMessage(request);
319
+ // Should not be a reasoning validation error
320
+ const errorResponse = response;
321
+ expect(errorResponse.error.code).not.toBe('reasoning_not_supported');
322
+ });
240
323
  });
241
324
  });
242
325
  describe('getProviders', () => {
@@ -57,6 +57,42 @@ class AnthropicClientAdapter {
57
57
  stop_sequences: request.settings.stopSequences,
58
58
  }),
59
59
  };
60
+ // Handle reasoning/thinking configuration for Claude models
61
+ if (request.settings.reasoning && !request.settings.reasoning.exclude) {
62
+ const reasoning = request.settings.reasoning;
63
+ let budgetTokens;
64
+ // Convert reasoning settings to Anthropic's thinking format
65
+ if (reasoning.maxTokens !== undefined) {
66
+ budgetTokens = Math.max(reasoning.maxTokens, 1024); // Minimum 1024
67
+ }
68
+ else if (reasoning.effort) {
69
+ // Convert effort levels to token budgets
70
+ // Max budget for Anthropic is 32000
71
+ const maxBudget = 32000;
72
+ switch (reasoning.effort) {
73
+ case 'high':
74
+ budgetTokens = Math.floor(maxBudget * 0.8);
75
+ break;
76
+ case 'medium':
77
+ budgetTokens = Math.floor(maxBudget * 0.5);
78
+ break;
79
+ case 'low':
80
+ budgetTokens = Math.floor(maxBudget * 0.2);
81
+ break;
82
+ }
83
+ }
84
+ else if (reasoning.enabled !== false) {
85
+ // Use default budget
86
+ budgetTokens = 10000;
87
+ }
88
+ if (budgetTokens !== undefined) {
89
+ // Add thinking configuration to the request
90
+ messageParams.thinking = {
91
+ type: "enabled",
92
+ budget_tokens: Math.min(budgetTokens, 32000) // Cap at max
93
+ };
94
+ }
95
+ }
60
96
  console.log(`Making Anthropic API call for model: ${request.modelId}`);
61
97
  console.log(`Anthropic API parameters:`, {
62
98
  model: messageParams.model,
@@ -195,23 +231,41 @@ class AnthropicClientAdapter {
195
231
  if (!contentBlock || contentBlock.type !== "text") {
196
232
  throw new Error("Invalid completion structure from Anthropic API");
197
233
  }
234
+ // Extract thinking/reasoning content if available
235
+ let reasoning;
236
+ let reasoning_details;
237
+ // Check for thinking content in the response
238
+ if (completion.thinking_content) {
239
+ reasoning = completion.thinking_content;
240
+ }
241
+ // Check for reasoning details that need to be preserved
242
+ if (completion.reasoning_details) {
243
+ reasoning_details = completion.reasoning_details;
244
+ }
198
245
  // Map Anthropic's stop reason to our standard format
199
246
  const finishReason = this.mapAnthropicStopReason(completion.stop_reason);
247
+ const choice = {
248
+ message: {
249
+ role: "assistant",
250
+ content: contentBlock.text,
251
+ },
252
+ finish_reason: finishReason,
253
+ index: 0,
254
+ };
255
+ // Include reasoning if available and not excluded
256
+ if (reasoning && request.settings.reasoning && !request.settings.reasoning.exclude) {
257
+ choice.reasoning = reasoning;
258
+ }
259
+ // Always include reasoning_details if present (for tool use continuation)
260
+ if (reasoning_details) {
261
+ choice.reasoning_details = reasoning_details;
262
+ }
200
263
  return {
201
264
  id: completion.id,
202
265
  provider: request.providerId,
203
266
  model: completion.model || request.modelId,
204
267
  created: Math.floor(Date.now() / 1000), // Anthropic doesn't provide created timestamp
205
- choices: [
206
- {
207
- message: {
208
- role: "assistant",
209
- content: contentBlock.text,
210
- },
211
- finish_reason: finishReason,
212
- index: 0,
213
- },
214
- ],
268
+ choices: [choice],
215
269
  usage: completion.usage
216
270
  ? {
217
271
  prompt_tokens: completion.usage.input_tokens,
@@ -36,7 +36,13 @@ describe('AnthropicClientAdapter', () => {
36
36
  stopSequences: [],
37
37
  user: 'test-user',
38
38
  geminiSafetySettings: [],
39
- supportsSystemMessage: true
39
+ supportsSystemMessage: true,
40
+ reasoning: {
41
+ enabled: false,
42
+ effort: undefined,
43
+ maxTokens: undefined,
44
+ exclude: false
45
+ }
40
46
  }
41
47
  };
42
48
  });