genai-lite 0.4.0 → 0.4.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +47 -37
- package/dist/llm/LLMService.d.ts +29 -2
- package/dist/llm/LLMService.js +80 -36
- package/dist/llm/config.js +4 -4
- package/dist/llm/services/SettingsManager.js +17 -11
- package/dist/llm/types.d.ts +81 -22
- package/dist/prompting/parser.d.ts +2 -2
- package/dist/prompting/parser.js +2 -2
- package/package.json +1 -1
- package/dist/llm/LLMService.createMessages.test.d.ts +0 -4
- package/dist/llm/LLMService.createMessages.test.js +0 -364
- package/dist/llm/LLMService.original.d.ts +0 -147
- package/dist/llm/LLMService.original.js +0 -656
- package/dist/llm/LLMService.prepareMessage.test.d.ts +0 -1
- package/dist/llm/LLMService.prepareMessage.test.js +0 -303
- package/dist/llm/LLMService.presets.test.d.ts +0 -1
- package/dist/llm/LLMService.presets.test.js +0 -210
- package/dist/llm/LLMService.sendMessage.preset.test.d.ts +0 -1
- package/dist/llm/LLMService.sendMessage.preset.test.js +0 -153
- package/dist/llm/LLMService.test.d.ts +0 -1
- package/dist/llm/LLMService.test.js +0 -639
- package/dist/llm/clients/AnthropicClientAdapter.test.d.ts +0 -1
- package/dist/llm/clients/AnthropicClientAdapter.test.js +0 -273
- package/dist/llm/clients/GeminiClientAdapter.test.d.ts +0 -1
- package/dist/llm/clients/GeminiClientAdapter.test.js +0 -405
- package/dist/llm/clients/LlamaCppClientAdapter.test.d.ts +0 -1
- package/dist/llm/clients/LlamaCppClientAdapter.test.js +0 -447
- package/dist/llm/clients/LlamaCppServerClient.test.d.ts +0 -1
- package/dist/llm/clients/LlamaCppServerClient.test.js +0 -294
- package/dist/llm/clients/MockClientAdapter.test.d.ts +0 -1
- package/dist/llm/clients/MockClientAdapter.test.js +0 -250
- package/dist/llm/clients/OpenAIClientAdapter.test.d.ts +0 -1
- package/dist/llm/clients/OpenAIClientAdapter.test.js +0 -258
- package/dist/llm/clients/adapterErrorUtils.test.d.ts +0 -1
- package/dist/llm/clients/adapterErrorUtils.test.js +0 -123
- package/dist/llm/config.test.d.ts +0 -1
- package/dist/llm/config.test.js +0 -176
- package/dist/llm/services/AdapterRegistry.test.d.ts +0 -1
- package/dist/llm/services/AdapterRegistry.test.js +0 -239
- package/dist/llm/services/ModelResolver.test.d.ts +0 -1
- package/dist/llm/services/ModelResolver.test.js +0 -179
- package/dist/llm/services/PresetManager.test.d.ts +0 -1
- package/dist/llm/services/PresetManager.test.js +0 -210
- package/dist/llm/services/RequestValidator.test.d.ts +0 -1
- package/dist/llm/services/RequestValidator.test.js +0 -159
- package/dist/llm/services/SettingsManager.test.d.ts +0 -1
- package/dist/llm/services/SettingsManager.test.js +0 -266
- package/dist/prompting/builder.d.ts +0 -38
- package/dist/prompting/builder.js +0 -63
- package/dist/prompting/builder.test.d.ts +0 -4
- package/dist/prompting/builder.test.js +0 -109
- package/dist/prompting/content.test.d.ts +0 -4
- package/dist/prompting/content.test.js +0 -212
- package/dist/prompting/parser.test.d.ts +0 -4
- package/dist/prompting/parser.test.js +0 -464
- package/dist/prompting/template.test.d.ts +0 -1
- package/dist/prompting/template.test.js +0 -250
- package/dist/providers/fromEnvironment.test.d.ts +0 -1
- package/dist/providers/fromEnvironment.test.js +0 -59
|
@@ -1,303 +0,0 @@
|
|
|
1
|
-
"use strict";
|
|
2
|
-
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
-
const LLMService_1 = require("./LLMService");
|
|
4
|
-
describe('LLMService.prepareMessage', () => {
|
|
5
|
-
let mockApiKeyProvider;
|
|
6
|
-
let service;
|
|
7
|
-
beforeEach(() => {
|
|
8
|
-
jest.clearAllMocks();
|
|
9
|
-
mockApiKeyProvider = jest.fn().mockResolvedValue('test-api-key');
|
|
10
|
-
service = new LLMService_1.LLMService(mockApiKeyProvider);
|
|
11
|
-
});
|
|
12
|
-
describe('Input validation', () => {
|
|
13
|
-
it('should require either template or messages', async () => {
|
|
14
|
-
const result = await service.prepareMessage({});
|
|
15
|
-
expect(result).toMatchObject({
|
|
16
|
-
object: 'error',
|
|
17
|
-
error: {
|
|
18
|
-
message: 'Either template or messages must be provided',
|
|
19
|
-
code: 'INVALID_INPUT',
|
|
20
|
-
type: 'validation_error',
|
|
21
|
-
},
|
|
22
|
-
});
|
|
23
|
-
});
|
|
24
|
-
it('should require either presetId or both providerId and modelId', async () => {
|
|
25
|
-
const result = await service.prepareMessage({
|
|
26
|
-
template: 'Test template',
|
|
27
|
-
providerId: 'openai',
|
|
28
|
-
// Missing modelId
|
|
29
|
-
});
|
|
30
|
-
expect(result).toMatchObject({
|
|
31
|
-
object: 'error',
|
|
32
|
-
error: {
|
|
33
|
-
message: 'Either presetId or both providerId and modelId must be provided',
|
|
34
|
-
code: 'INVALID_MODEL_SELECTION',
|
|
35
|
-
type: 'validation_error',
|
|
36
|
-
},
|
|
37
|
-
});
|
|
38
|
-
});
|
|
39
|
-
});
|
|
40
|
-
describe('Preset resolution', () => {
|
|
41
|
-
it('should resolve model info from presetId', async () => {
|
|
42
|
-
const result = await service.prepareMessage({
|
|
43
|
-
template: 'Test {{ model_id }}',
|
|
44
|
-
presetId: 'openai-gpt-4.1-default',
|
|
45
|
-
});
|
|
46
|
-
expect(result).toMatchObject({
|
|
47
|
-
messages: [{ role: 'user', content: 'Test gpt-4.1' }],
|
|
48
|
-
modelContext: {
|
|
49
|
-
model_id: 'gpt-4.1',
|
|
50
|
-
provider_id: 'openai',
|
|
51
|
-
thinking_enabled: false,
|
|
52
|
-
thinking_available: false,
|
|
53
|
-
},
|
|
54
|
-
});
|
|
55
|
-
});
|
|
56
|
-
it('should handle non-existent presetId', async () => {
|
|
57
|
-
const result = await service.prepareMessage({
|
|
58
|
-
template: 'Test',
|
|
59
|
-
presetId: 'non-existent-preset',
|
|
60
|
-
});
|
|
61
|
-
expect(result).toMatchObject({
|
|
62
|
-
object: 'error',
|
|
63
|
-
error: {
|
|
64
|
-
message: 'Preset not found: non-existent-preset',
|
|
65
|
-
code: 'PRESET_NOT_FOUND',
|
|
66
|
-
type: 'validation_error',
|
|
67
|
-
},
|
|
68
|
-
});
|
|
69
|
-
});
|
|
70
|
-
it('should merge preset settings with user settings', async () => {
|
|
71
|
-
const result = await service.prepareMessage({
|
|
72
|
-
template: 'Test',
|
|
73
|
-
presetId: 'anthropic-claude-3-7-sonnet-20250219-thinking',
|
|
74
|
-
settings: { temperature: 0.5 }, // Override preset temperature
|
|
75
|
-
});
|
|
76
|
-
expect(result).not.toHaveProperty('error');
|
|
77
|
-
// The settings are merged internally and will be used when sendMessage is called
|
|
78
|
-
});
|
|
79
|
-
});
|
|
80
|
-
describe('Model resolution from providerId/modelId', () => {
|
|
81
|
-
it('should resolve model info from providerId and modelId', async () => {
|
|
82
|
-
const result = await service.prepareMessage({
|
|
83
|
-
template: 'Model: {{ model_id }}, Provider: {{ provider_id }}',
|
|
84
|
-
providerId: 'anthropic',
|
|
85
|
-
modelId: 'claude-3-5-sonnet-20241022',
|
|
86
|
-
});
|
|
87
|
-
expect(result).toMatchObject({
|
|
88
|
-
messages: [{
|
|
89
|
-
role: 'user',
|
|
90
|
-
content: 'Model: claude-3-5-sonnet-20241022, Provider: anthropic'
|
|
91
|
-
}],
|
|
92
|
-
modelContext: {
|
|
93
|
-
model_id: 'claude-3-5-sonnet-20241022',
|
|
94
|
-
provider_id: 'anthropic',
|
|
95
|
-
thinking_enabled: false,
|
|
96
|
-
thinking_available: false,
|
|
97
|
-
},
|
|
98
|
-
});
|
|
99
|
-
});
|
|
100
|
-
it('should handle invalid model', async () => {
|
|
101
|
-
const result = await service.prepareMessage({
|
|
102
|
-
template: 'Test',
|
|
103
|
-
providerId: 'openai',
|
|
104
|
-
modelId: 'non-existent-model',
|
|
105
|
-
});
|
|
106
|
-
expect(result).toMatchObject({
|
|
107
|
-
object: 'error',
|
|
108
|
-
error: {
|
|
109
|
-
message: 'Unsupported model: non-existent-model for provider: openai',
|
|
110
|
-
code: 'UNSUPPORTED_MODEL',
|
|
111
|
-
type: 'validation_error',
|
|
112
|
-
},
|
|
113
|
-
});
|
|
114
|
-
});
|
|
115
|
-
});
|
|
116
|
-
describe('Template rendering with model context', () => {
|
|
117
|
-
it('should inject model context into template', async () => {
|
|
118
|
-
const result = await service.prepareMessage({
|
|
119
|
-
template: `{{ thinking_enabled ? \`Please think step by step about this:\` : \`Please analyze this:\` }}
|
|
120
|
-
Model: {{ model_id }}
|
|
121
|
-
Provider: {{ provider_id }}
|
|
122
|
-
Thinking available: {{ thinking_available }}`,
|
|
123
|
-
presetId: 'anthropic-claude-3-7-sonnet-20250219-thinking',
|
|
124
|
-
});
|
|
125
|
-
expect(result).toMatchObject({
|
|
126
|
-
messages: [{
|
|
127
|
-
role: 'user',
|
|
128
|
-
content: expect.stringContaining('Please think step by step about this:'),
|
|
129
|
-
}],
|
|
130
|
-
modelContext: {
|
|
131
|
-
thinking_enabled: true,
|
|
132
|
-
thinking_available: true,
|
|
133
|
-
model_id: 'claude-3-7-sonnet-20250219',
|
|
134
|
-
provider_id: 'anthropic',
|
|
135
|
-
},
|
|
136
|
-
});
|
|
137
|
-
const content = result.messages[0].content;
|
|
138
|
-
expect(content).toContain('Model: claude-3-7-sonnet-20250219');
|
|
139
|
-
expect(content).toContain('Provider: anthropic');
|
|
140
|
-
expect(content).toContain('Thinking available: true');
|
|
141
|
-
});
|
|
142
|
-
it('should handle reasoning effort in model context', async () => {
|
|
143
|
-
const result = await service.prepareMessage({
|
|
144
|
-
template: 'Effort: {{ reasoning_effort ? `{{reasoning_effort}}` : `not set` }}',
|
|
145
|
-
providerId: 'anthropic',
|
|
146
|
-
modelId: 'claude-3-7-sonnet-20250219',
|
|
147
|
-
settings: {
|
|
148
|
-
reasoning: {
|
|
149
|
-
enabled: true,
|
|
150
|
-
effort: 'high',
|
|
151
|
-
},
|
|
152
|
-
},
|
|
153
|
-
});
|
|
154
|
-
expect(result).toMatchObject({
|
|
155
|
-
messages: [{ role: 'user', content: 'Effort: high' }],
|
|
156
|
-
modelContext: {
|
|
157
|
-
reasoning_effort: 'high',
|
|
158
|
-
},
|
|
159
|
-
});
|
|
160
|
-
});
|
|
161
|
-
it('should handle reasoning maxTokens in model context', async () => {
|
|
162
|
-
const result = await service.prepareMessage({
|
|
163
|
-
template: 'Max tokens: {{ reasoning_max_tokens ? `{{reasoning_max_tokens}}` : `default` }}',
|
|
164
|
-
providerId: 'anthropic',
|
|
165
|
-
modelId: 'claude-3-7-sonnet-20250219',
|
|
166
|
-
settings: {
|
|
167
|
-
reasoning: {
|
|
168
|
-
enabled: true,
|
|
169
|
-
maxTokens: 5000,
|
|
170
|
-
},
|
|
171
|
-
},
|
|
172
|
-
});
|
|
173
|
-
expect(result).toMatchObject({
|
|
174
|
-
messages: [{ role: 'user', content: 'Max tokens: 5000' }],
|
|
175
|
-
modelContext: {
|
|
176
|
-
reasoning_max_tokens: 5000,
|
|
177
|
-
},
|
|
178
|
-
});
|
|
179
|
-
});
|
|
180
|
-
it('should combine template variables with model context', async () => {
|
|
181
|
-
const result = await service.prepareMessage({
|
|
182
|
-
template: `
|
|
183
|
-
Task: {{ task }}
|
|
184
|
-
Model: {{ model_id }}
|
|
185
|
-
{{ thinking_enabled ? "Use reasoning to solve this." : "Provide a direct answer." }}`,
|
|
186
|
-
variables: {
|
|
187
|
-
task: 'Calculate fibonacci(10)',
|
|
188
|
-
},
|
|
189
|
-
presetId: 'anthropic-claude-3-7-sonnet-20250219-thinking',
|
|
190
|
-
});
|
|
191
|
-
const content = result.messages[0].content;
|
|
192
|
-
expect(content).toContain('Task: Calculate fibonacci(10)');
|
|
193
|
-
expect(content).toContain('Model: claude-3-7-sonnet-20250219');
|
|
194
|
-
expect(content).toContain('Use reasoning to solve this.');
|
|
195
|
-
});
|
|
196
|
-
it('should handle template rendering errors', async () => {
|
|
197
|
-
// This will cause an actual error in template rendering by using a variable that throws on toString()
|
|
198
|
-
const errorObject = {
|
|
199
|
-
toString: () => {
|
|
200
|
-
throw new Error('Test template error');
|
|
201
|
-
}
|
|
202
|
-
};
|
|
203
|
-
const result = await service.prepareMessage({
|
|
204
|
-
template: '{{ data }}',
|
|
205
|
-
variables: { data: errorObject },
|
|
206
|
-
providerId: 'openai',
|
|
207
|
-
modelId: 'gpt-4.1',
|
|
208
|
-
});
|
|
209
|
-
expect(result).toMatchObject({
|
|
210
|
-
object: 'error',
|
|
211
|
-
error: {
|
|
212
|
-
message: expect.stringContaining('Template rendering failed'),
|
|
213
|
-
code: 'TEMPLATE_ERROR',
|
|
214
|
-
type: 'validation_error',
|
|
215
|
-
},
|
|
216
|
-
});
|
|
217
|
-
});
|
|
218
|
-
});
|
|
219
|
-
describe('Pre-built messages', () => {
|
|
220
|
-
it('should return pre-built messages with model context', async () => {
|
|
221
|
-
const messages = [
|
|
222
|
-
{ role: 'system', content: 'You are a helpful assistant.' },
|
|
223
|
-
{ role: 'user', content: 'Hello!' },
|
|
224
|
-
];
|
|
225
|
-
const result = await service.prepareMessage({
|
|
226
|
-
messages,
|
|
227
|
-
providerId: 'openai',
|
|
228
|
-
modelId: 'o4-mini',
|
|
229
|
-
});
|
|
230
|
-
expect(result).toMatchObject({
|
|
231
|
-
messages,
|
|
232
|
-
modelContext: {
|
|
233
|
-
model_id: 'o4-mini',
|
|
234
|
-
provider_id: 'openai',
|
|
235
|
-
thinking_enabled: true, // o4-mini always has reasoning enabled
|
|
236
|
-
thinking_available: true,
|
|
237
|
-
},
|
|
238
|
-
});
|
|
239
|
-
});
|
|
240
|
-
});
|
|
241
|
-
describe('Thinking/reasoning models', () => {
|
|
242
|
-
it('should detect thinking capabilities for supported models', async () => {
|
|
243
|
-
const testCases = [
|
|
244
|
-
{
|
|
245
|
-
presetId: 'anthropic-claude-3-7-sonnet-20250219-thinking',
|
|
246
|
-
expected: { thinking_enabled: true, thinking_available: true },
|
|
247
|
-
},
|
|
248
|
-
{
|
|
249
|
-
presetId: 'anthropic-claude-3-5-sonnet-20241022-default',
|
|
250
|
-
expected: { thinking_enabled: false, thinking_available: false },
|
|
251
|
-
},
|
|
252
|
-
{
|
|
253
|
-
presetId: 'google-gemini-2.5-flash-thinking',
|
|
254
|
-
expected: { thinking_enabled: true, thinking_available: true },
|
|
255
|
-
},
|
|
256
|
-
{
|
|
257
|
-
presetId: 'openai-o4-mini-default',
|
|
258
|
-
expected: { thinking_enabled: true, thinking_available: true }, // Always on
|
|
259
|
-
},
|
|
260
|
-
];
|
|
261
|
-
for (const testCase of testCases) {
|
|
262
|
-
const result = await service.prepareMessage({
|
|
263
|
-
template: 'Test',
|
|
264
|
-
presetId: testCase.presetId,
|
|
265
|
-
});
|
|
266
|
-
expect(result).toMatchObject({
|
|
267
|
-
modelContext: expect.objectContaining(testCase.expected),
|
|
268
|
-
});
|
|
269
|
-
}
|
|
270
|
-
});
|
|
271
|
-
});
|
|
272
|
-
describe('Custom presets', () => {
|
|
273
|
-
it('should work with custom presets', async () => {
|
|
274
|
-
const customPresets = [
|
|
275
|
-
{
|
|
276
|
-
id: 'custom-gpt4-thinking',
|
|
277
|
-
displayName: 'Custom GPT-4',
|
|
278
|
-
providerId: 'openai',
|
|
279
|
-
modelId: 'gpt-4.1',
|
|
280
|
-
settings: {
|
|
281
|
-
temperature: 0.3,
|
|
282
|
-
reasoning: { enabled: true },
|
|
283
|
-
},
|
|
284
|
-
},
|
|
285
|
-
];
|
|
286
|
-
const customService = new LLMService_1.LLMService(mockApiKeyProvider, {
|
|
287
|
-
presets: customPresets,
|
|
288
|
-
presetMode: 'extend',
|
|
289
|
-
});
|
|
290
|
-
const result = await customService.prepareMessage({
|
|
291
|
-
template: 'Thinking: {{ thinking_enabled }}',
|
|
292
|
-
presetId: 'custom-gpt4-thinking',
|
|
293
|
-
});
|
|
294
|
-
expect(result).toMatchObject({
|
|
295
|
-
messages: [{ role: 'user', content: 'Thinking: false' }], // GPT-4.1 doesn't support reasoning
|
|
296
|
-
modelContext: {
|
|
297
|
-
thinking_enabled: false,
|
|
298
|
-
thinking_available: false,
|
|
299
|
-
},
|
|
300
|
-
});
|
|
301
|
-
});
|
|
302
|
-
});
|
|
303
|
-
});
|
|
@@ -1 +0,0 @@
|
|
|
1
|
-
export {};
|
|
@@ -1,210 +0,0 @@
|
|
|
1
|
-
"use strict";
|
|
2
|
-
var __importDefault = (this && this.__importDefault) || function (mod) {
|
|
3
|
-
return (mod && mod.__esModule) ? mod : { "default": mod };
|
|
4
|
-
};
|
|
5
|
-
Object.defineProperty(exports, "__esModule", { value: true });
|
|
6
|
-
const LLMService_1 = require("./LLMService");
|
|
7
|
-
const presets_json_1 = __importDefault(require("../config/presets.json"));
|
|
8
|
-
describe('LLMService Presets', () => {
|
|
9
|
-
let mockApiKeyProvider;
|
|
10
|
-
beforeEach(() => {
|
|
11
|
-
jest.clearAllMocks();
|
|
12
|
-
mockApiKeyProvider = jest.fn().mockResolvedValue('test-api-key');
|
|
13
|
-
});
|
|
14
|
-
describe('Default behavior', () => {
|
|
15
|
-
it('should load default presets when no options provided', async () => {
|
|
16
|
-
const service = new LLMService_1.LLMService(mockApiKeyProvider);
|
|
17
|
-
const presets = service.getPresets();
|
|
18
|
-
expect(presets).toHaveLength(presets_json_1.default.length);
|
|
19
|
-
expect(presets).toEqual(expect.arrayContaining(presets_json_1.default.map(preset => expect.objectContaining({
|
|
20
|
-
id: preset.id,
|
|
21
|
-
displayName: preset.displayName,
|
|
22
|
-
providerId: preset.providerId,
|
|
23
|
-
modelId: preset.modelId
|
|
24
|
-
}))));
|
|
25
|
-
});
|
|
26
|
-
it('should return a copy of presets to prevent external modification', async () => {
|
|
27
|
-
const service = new LLMService_1.LLMService(mockApiKeyProvider);
|
|
28
|
-
const presets1 = service.getPresets();
|
|
29
|
-
const presets2 = service.getPresets();
|
|
30
|
-
expect(presets1).not.toBe(presets2); // Different array instances
|
|
31
|
-
expect(presets1).toEqual(presets2); // Same content
|
|
32
|
-
// Modifying returned array should not affect service
|
|
33
|
-
presets1.push({
|
|
34
|
-
id: 'test-preset',
|
|
35
|
-
displayName: 'Test',
|
|
36
|
-
providerId: 'openai',
|
|
37
|
-
modelId: 'gpt-4',
|
|
38
|
-
settings: {}
|
|
39
|
-
});
|
|
40
|
-
const presets3 = service.getPresets();
|
|
41
|
-
expect(presets3).toHaveLength(presets_json_1.default.length);
|
|
42
|
-
});
|
|
43
|
-
});
|
|
44
|
-
describe('Extend mode', () => {
|
|
45
|
-
it('should add new presets to defaults in extend mode', async () => {
|
|
46
|
-
const customPresets = [
|
|
47
|
-
{
|
|
48
|
-
id: 'custom-preset-1',
|
|
49
|
-
displayName: 'Custom Preset 1',
|
|
50
|
-
providerId: 'openai',
|
|
51
|
-
modelId: 'gpt-4',
|
|
52
|
-
settings: { temperature: 0.5 }
|
|
53
|
-
}
|
|
54
|
-
];
|
|
55
|
-
const service = new LLMService_1.LLMService(mockApiKeyProvider, {
|
|
56
|
-
presets: customPresets,
|
|
57
|
-
presetMode: 'extend'
|
|
58
|
-
});
|
|
59
|
-
const presets = service.getPresets();
|
|
60
|
-
expect(presets).toHaveLength(presets_json_1.default.length + 1);
|
|
61
|
-
expect(presets).toContainEqual(expect.objectContaining({
|
|
62
|
-
id: 'custom-preset-1',
|
|
63
|
-
displayName: 'Custom Preset 1'
|
|
64
|
-
}));
|
|
65
|
-
});
|
|
66
|
-
it('should override default presets with same ID in extend mode', async () => {
|
|
67
|
-
const existingPresetId = presets_json_1.default[0].id;
|
|
68
|
-
const customPresets = [
|
|
69
|
-
{
|
|
70
|
-
id: existingPresetId,
|
|
71
|
-
displayName: 'Overridden Preset',
|
|
72
|
-
providerId: 'anthropic',
|
|
73
|
-
modelId: 'claude-3-5-sonnet-20241022',
|
|
74
|
-
settings: { temperature: 0.8 }
|
|
75
|
-
}
|
|
76
|
-
];
|
|
77
|
-
const service = new LLMService_1.LLMService(mockApiKeyProvider, {
|
|
78
|
-
presets: customPresets,
|
|
79
|
-
presetMode: 'extend'
|
|
80
|
-
});
|
|
81
|
-
const presets = service.getPresets();
|
|
82
|
-
const overriddenPreset = presets.find(p => p.id === existingPresetId);
|
|
83
|
-
expect(presets).toHaveLength(presets_json_1.default.length);
|
|
84
|
-
expect(overriddenPreset).toBeDefined();
|
|
85
|
-
expect(overriddenPreset?.displayName).toBe('Overridden Preset');
|
|
86
|
-
expect(overriddenPreset?.providerId).toBe('anthropic');
|
|
87
|
-
});
|
|
88
|
-
it('should use extend mode by default when mode not specified', async () => {
|
|
89
|
-
const customPresets = [
|
|
90
|
-
{
|
|
91
|
-
id: 'custom-preset-default',
|
|
92
|
-
displayName: 'Custom Default',
|
|
93
|
-
providerId: 'gemini',
|
|
94
|
-
modelId: 'gemini-2.0-flash',
|
|
95
|
-
settings: { temperature: 0.3 }
|
|
96
|
-
}
|
|
97
|
-
];
|
|
98
|
-
const service = new LLMService_1.LLMService(mockApiKeyProvider, {
|
|
99
|
-
presets: customPresets
|
|
100
|
-
// presetMode not specified, should default to 'extend'
|
|
101
|
-
});
|
|
102
|
-
const presets = service.getPresets();
|
|
103
|
-
expect(presets).toHaveLength(presets_json_1.default.length + 1);
|
|
104
|
-
});
|
|
105
|
-
});
|
|
106
|
-
describe('Replace mode', () => {
|
|
107
|
-
it('should use only custom presets in replace mode', async () => {
|
|
108
|
-
const customPresets = [
|
|
109
|
-
{
|
|
110
|
-
id: 'replace-preset-1',
|
|
111
|
-
displayName: 'Replace Preset 1',
|
|
112
|
-
providerId: 'openai',
|
|
113
|
-
modelId: 'gpt-4',
|
|
114
|
-
settings: { temperature: 0.5 }
|
|
115
|
-
},
|
|
116
|
-
{
|
|
117
|
-
id: 'replace-preset-2',
|
|
118
|
-
displayName: 'Replace Preset 2',
|
|
119
|
-
providerId: 'anthropic',
|
|
120
|
-
modelId: 'claude-3-5-sonnet-20241022',
|
|
121
|
-
settings: { temperature: 0.3 }
|
|
122
|
-
}
|
|
123
|
-
];
|
|
124
|
-
const service = new LLMService_1.LLMService(mockApiKeyProvider, {
|
|
125
|
-
presets: customPresets,
|
|
126
|
-
presetMode: 'replace'
|
|
127
|
-
});
|
|
128
|
-
const presets = service.getPresets();
|
|
129
|
-
expect(presets).toHaveLength(2);
|
|
130
|
-
expect(presets).toEqual(expect.arrayContaining([
|
|
131
|
-
expect.objectContaining({ id: 'replace-preset-1' }),
|
|
132
|
-
expect.objectContaining({ id: 'replace-preset-2' })
|
|
133
|
-
]));
|
|
134
|
-
// Should not contain any default presets
|
|
135
|
-
const defaultPresetIds = presets_json_1.default.map(p => p.id);
|
|
136
|
-
const actualPresetIds = presets.map(p => p.id);
|
|
137
|
-
expect(actualPresetIds).not.toContain(expect.arrayContaining(defaultPresetIds));
|
|
138
|
-
});
|
|
139
|
-
it('should return empty array when replace mode with no custom presets', async () => {
|
|
140
|
-
const service = new LLMService_1.LLMService(mockApiKeyProvider, {
|
|
141
|
-
presets: [],
|
|
142
|
-
presetMode: 'replace'
|
|
143
|
-
});
|
|
144
|
-
const presets = service.getPresets();
|
|
145
|
-
expect(presets).toHaveLength(0);
|
|
146
|
-
});
|
|
147
|
-
it('should handle undefined presets array in replace mode', async () => {
|
|
148
|
-
const service = new LLMService_1.LLMService(mockApiKeyProvider, {
|
|
149
|
-
presetMode: 'replace'
|
|
150
|
-
// presets not provided
|
|
151
|
-
});
|
|
152
|
-
const presets = service.getPresets();
|
|
153
|
-
expect(presets).toHaveLength(0);
|
|
154
|
-
});
|
|
155
|
-
});
|
|
156
|
-
describe('Edge cases', () => {
|
|
157
|
-
it('should handle duplicate IDs within custom presets', async () => {
|
|
158
|
-
const customPresets = [
|
|
159
|
-
{
|
|
160
|
-
id: 'duplicate-id',
|
|
161
|
-
displayName: 'First Preset',
|
|
162
|
-
providerId: 'openai',
|
|
163
|
-
modelId: 'gpt-4',
|
|
164
|
-
settings: { temperature: 0.5 }
|
|
165
|
-
},
|
|
166
|
-
{
|
|
167
|
-
id: 'duplicate-id',
|
|
168
|
-
displayName: 'Second Preset',
|
|
169
|
-
providerId: 'anthropic',
|
|
170
|
-
modelId: 'claude-3-5-sonnet-20241022',
|
|
171
|
-
settings: { temperature: 0.3 }
|
|
172
|
-
}
|
|
173
|
-
];
|
|
174
|
-
const service = new LLMService_1.LLMService(mockApiKeyProvider, {
|
|
175
|
-
presets: customPresets,
|
|
176
|
-
presetMode: 'replace'
|
|
177
|
-
});
|
|
178
|
-
const presets = service.getPresets();
|
|
179
|
-
const duplicatePresets = presets.filter(p => p.id === 'duplicate-id');
|
|
180
|
-
// Last one should win
|
|
181
|
-
expect(duplicatePresets).toHaveLength(1);
|
|
182
|
-
expect(duplicatePresets[0].displayName).toBe('Second Preset');
|
|
183
|
-
});
|
|
184
|
-
it('should handle presets with complex settings including gemini safety settings', async () => {
|
|
185
|
-
const customPresets = [
|
|
186
|
-
{
|
|
187
|
-
id: 'gemini-complex',
|
|
188
|
-
displayName: 'Gemini Complex',
|
|
189
|
-
providerId: 'gemini',
|
|
190
|
-
modelId: 'gemini-2.0-flash',
|
|
191
|
-
settings: {
|
|
192
|
-
temperature: 0.5,
|
|
193
|
-
maxTokens: 2000,
|
|
194
|
-
geminiSafetySettings: [
|
|
195
|
-
{ category: 'HARM_CATEGORY_HATE_SPEECH', threshold: 'BLOCK_NONE' },
|
|
196
|
-
{ category: 'HARM_CATEGORY_DANGEROUS_CONTENT', threshold: 'BLOCK_MEDIUM_AND_ABOVE' }
|
|
197
|
-
]
|
|
198
|
-
}
|
|
199
|
-
}
|
|
200
|
-
];
|
|
201
|
-
const service = new LLMService_1.LLMService(mockApiKeyProvider, {
|
|
202
|
-
presets: customPresets,
|
|
203
|
-
presetMode: 'replace'
|
|
204
|
-
});
|
|
205
|
-
const presets = service.getPresets();
|
|
206
|
-
expect(presets).toHaveLength(1);
|
|
207
|
-
expect(presets[0].settings.geminiSafetySettings).toHaveLength(2);
|
|
208
|
-
});
|
|
209
|
-
});
|
|
210
|
-
});
|
|
@@ -1 +0,0 @@
|
|
|
1
|
-
export {};
|
|
@@ -1,153 +0,0 @@
|
|
|
1
|
-
"use strict";
|
|
2
|
-
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
-
const LLMService_1 = require("./LLMService");
|
|
4
|
-
describe('LLMService.sendMessage with presetId', () => {
|
|
5
|
-
let mockApiKeyProvider;
|
|
6
|
-
let service;
|
|
7
|
-
beforeEach(() => {
|
|
8
|
-
jest.clearAllMocks();
|
|
9
|
-
mockApiKeyProvider = jest.fn().mockResolvedValue('test-api-key');
|
|
10
|
-
service = new LLMService_1.LLMService(mockApiKeyProvider);
|
|
11
|
-
});
|
|
12
|
-
describe('Preset resolution in sendMessage', () => {
|
|
13
|
-
it('should send message using presetId', async () => {
|
|
14
|
-
const request = {
|
|
15
|
-
presetId: 'openai-gpt-4.1-default',
|
|
16
|
-
messages: [{ role: 'user', content: 'Hello' }],
|
|
17
|
-
};
|
|
18
|
-
const response = await service.sendMessage(request);
|
|
19
|
-
// The request will fail because we don't have a real API key,
|
|
20
|
-
// but we can verify it tried with the correct provider/model
|
|
21
|
-
expect(response).toMatchObject({
|
|
22
|
-
provider: 'openai',
|
|
23
|
-
model: 'gpt-4.1',
|
|
24
|
-
object: 'error',
|
|
25
|
-
});
|
|
26
|
-
});
|
|
27
|
-
it('should override preset settings with request settings', async () => {
|
|
28
|
-
const request = {
|
|
29
|
-
presetId: 'anthropic-claude-3-7-sonnet-20250219-thinking',
|
|
30
|
-
messages: [{ role: 'user', content: 'Test' }],
|
|
31
|
-
settings: {
|
|
32
|
-
temperature: 0.2, // Override preset temperature
|
|
33
|
-
reasoning: {
|
|
34
|
-
enabled: false, // Disable reasoning despite thinking preset
|
|
35
|
-
},
|
|
36
|
-
},
|
|
37
|
-
};
|
|
38
|
-
const response = await service.sendMessage(request);
|
|
39
|
-
expect(response).toMatchObject({
|
|
40
|
-
provider: 'anthropic',
|
|
41
|
-
model: 'claude-3-7-sonnet-20250219',
|
|
42
|
-
object: 'error',
|
|
43
|
-
});
|
|
44
|
-
});
|
|
45
|
-
it('should handle invalid presetId', async () => {
|
|
46
|
-
const request = {
|
|
47
|
-
presetId: 'non-existent-preset',
|
|
48
|
-
messages: [{ role: 'user', content: 'Test' }],
|
|
49
|
-
};
|
|
50
|
-
const response = await service.sendMessage(request);
|
|
51
|
-
expect(response).toMatchObject({
|
|
52
|
-
object: 'error',
|
|
53
|
-
error: {
|
|
54
|
-
message: 'Preset not found: non-existent-preset',
|
|
55
|
-
code: 'PRESET_NOT_FOUND',
|
|
56
|
-
type: 'validation_error',
|
|
57
|
-
},
|
|
58
|
-
});
|
|
59
|
-
});
|
|
60
|
-
it('should allow either presetId or providerId/modelId', async () => {
|
|
61
|
-
// Test with providerId/modelId (existing behavior)
|
|
62
|
-
const request1 = {
|
|
63
|
-
providerId: 'openai',
|
|
64
|
-
modelId: 'gpt-4.1',
|
|
65
|
-
messages: [{ role: 'user', content: 'Test' }],
|
|
66
|
-
};
|
|
67
|
-
const response1 = await service.sendMessage(request1);
|
|
68
|
-
expect(response1).toMatchObject({
|
|
69
|
-
provider: 'openai',
|
|
70
|
-
model: 'gpt-4.1',
|
|
71
|
-
});
|
|
72
|
-
// Test with presetId (new behavior)
|
|
73
|
-
const request2 = {
|
|
74
|
-
presetId: 'openai-gpt-4.1-default',
|
|
75
|
-
messages: [{ role: 'user', content: 'Test' }],
|
|
76
|
-
};
|
|
77
|
-
const response2 = await service.sendMessage(request2);
|
|
78
|
-
expect(response2).toMatchObject({
|
|
79
|
-
provider: 'openai',
|
|
80
|
-
model: 'gpt-4.1',
|
|
81
|
-
});
|
|
82
|
-
});
|
|
83
|
-
it('should prefer presetId when both are provided', async () => {
|
|
84
|
-
const request = {
|
|
85
|
-
presetId: 'anthropic-claude-3-5-sonnet-20241022-default',
|
|
86
|
-
providerId: 'openai', // These will be ignored
|
|
87
|
-
modelId: 'gpt-4.1', // These will be ignored
|
|
88
|
-
messages: [{ role: 'user', content: 'Test' }],
|
|
89
|
-
};
|
|
90
|
-
const response = await service.sendMessage(request);
|
|
91
|
-
// Should use the preset's provider/model
|
|
92
|
-
expect(response).toMatchObject({
|
|
93
|
-
provider: 'anthropic',
|
|
94
|
-
model: 'claude-3-5-sonnet-20241022',
|
|
95
|
-
});
|
|
96
|
-
});
|
|
97
|
-
it('should handle preset with model that was removed from config', async () => {
|
|
98
|
-
// Create a service with a preset pointing to a non-existent model
|
|
99
|
-
const customService = new LLMService_1.LLMService(mockApiKeyProvider, {
|
|
100
|
-
presets: [{
|
|
101
|
-
id: 'broken-preset',
|
|
102
|
-
displayName: 'Broken',
|
|
103
|
-
providerId: 'openai',
|
|
104
|
-
modelId: 'non-existent-model',
|
|
105
|
-
settings: {},
|
|
106
|
-
}],
|
|
107
|
-
presetMode: 'extend',
|
|
108
|
-
});
|
|
109
|
-
const request = {
|
|
110
|
-
presetId: 'broken-preset',
|
|
111
|
-
messages: [{ role: 'user', content: 'Test' }],
|
|
112
|
-
};
|
|
113
|
-
const response = await customService.sendMessage(request);
|
|
114
|
-
expect(response).toMatchObject({
|
|
115
|
-
object: 'error',
|
|
116
|
-
error: {
|
|
117
|
-
message: 'Model not found for preset: broken-preset',
|
|
118
|
-
code: 'MODEL_NOT_FOUND',
|
|
119
|
-
type: 'validation_error',
|
|
120
|
-
},
|
|
121
|
-
});
|
|
122
|
-
});
|
|
123
|
-
});
|
|
124
|
-
describe('Settings merge with presets', () => {
|
|
125
|
-
it('should apply preset settings correctly', async () => {
|
|
126
|
-
const request = {
|
|
127
|
-
presetId: 'google-gemini-2.5-flash',
|
|
128
|
-
messages: [{ role: 'user', content: 'Test' }],
|
|
129
|
-
};
|
|
130
|
-
const response = await service.sendMessage(request);
|
|
131
|
-
// The preset includes geminiSafetySettings which should be applied
|
|
132
|
-
expect(response).toMatchObject({
|
|
133
|
-
provider: 'gemini',
|
|
134
|
-
model: 'gemini-2.5-flash',
|
|
135
|
-
});
|
|
136
|
-
});
|
|
137
|
-
it('should merge preset reasoning settings with request settings', async () => {
|
|
138
|
-
const request = {
|
|
139
|
-
presetId: 'anthropic-claude-3-7-sonnet-20250219-thinking',
|
|
140
|
-
messages: [{ role: 'user', content: 'Complex problem' }],
|
|
141
|
-
settings: {
|
|
142
|
-
maxTokens: 2000, // Add to preset settings
|
|
143
|
-
},
|
|
144
|
-
};
|
|
145
|
-
const response = await service.sendMessage(request);
|
|
146
|
-
expect(response).toMatchObject({
|
|
147
|
-
provider: 'anthropic',
|
|
148
|
-
model: 'claude-3-7-sonnet-20250219',
|
|
149
|
-
});
|
|
150
|
-
// Settings would be merged internally with both reasoning enabled and maxTokens
|
|
151
|
-
});
|
|
152
|
-
});
|
|
153
|
-
});
|
|
@@ -1 +0,0 @@
|
|
|
1
|
-
export {};
|