genai-lite 0.1.0 → 0.1.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +194 -1
- package/dist/config/presets.json +222 -0
- package/dist/index.d.ts +2 -0
- package/dist/llm/LLMService.d.ts +25 -1
- package/dist/llm/LLMService.js +34 -1
- package/dist/llm/LLMService.presets.test.d.ts +1 -0
- package/dist/llm/LLMService.presets.test.js +210 -0
- package/dist/llm/LLMService.test.d.ts +1 -0
- package/dist/llm/LLMService.test.js +279 -0
- package/dist/llm/clients/AnthropicClientAdapter.test.d.ts +1 -0
- package/dist/llm/clients/AnthropicClientAdapter.test.js +263 -0
- package/dist/llm/clients/GeminiClientAdapter.test.d.ts +1 -0
- package/dist/llm/clients/GeminiClientAdapter.test.js +281 -0
- package/dist/llm/clients/MockClientAdapter.test.d.ts +1 -0
- package/dist/llm/clients/MockClientAdapter.test.js +240 -0
- package/dist/llm/clients/OpenAIClientAdapter.test.d.ts +1 -0
- package/dist/llm/clients/OpenAIClientAdapter.test.js +248 -0
- package/dist/llm/clients/adapterErrorUtils.test.d.ts +1 -0
- package/dist/llm/clients/adapterErrorUtils.test.js +123 -0
- package/dist/llm/config.test.d.ts +1 -0
- package/dist/llm/config.test.js +159 -0
- package/dist/providers/fromEnvironment.test.d.ts +1 -0
- package/dist/providers/fromEnvironment.test.js +46 -0
- package/dist/types/presets.d.ts +19 -0
- package/dist/types/presets.js +2 -0
- package/dist/utils/index.d.ts +1 -0
- package/dist/utils/index.js +17 -0
- package/dist/utils/prompt.d.ts +6 -0
- package/dist/utils/prompt.js +55 -0
- package/dist/utils/prompt.test.d.ts +1 -0
- package/dist/utils/prompt.test.js +115 -0
- package/package.json +22 -4
- package/src/config/presets.json +222 -0
|
@@ -0,0 +1,210 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
var __importDefault = (this && this.__importDefault) || function (mod) {
|
|
3
|
+
return (mod && mod.__esModule) ? mod : { "default": mod };
|
|
4
|
+
};
|
|
5
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
6
|
+
const LLMService_1 = require("./LLMService");
|
|
7
|
+
const presets_json_1 = __importDefault(require("../config/presets.json"));
|
|
8
|
+
describe('LLMService Presets', () => {
|
|
9
|
+
let mockApiKeyProvider;
|
|
10
|
+
beforeEach(() => {
|
|
11
|
+
jest.clearAllMocks();
|
|
12
|
+
mockApiKeyProvider = jest.fn().mockResolvedValue('test-api-key');
|
|
13
|
+
});
|
|
14
|
+
describe('Default behavior', () => {
|
|
15
|
+
it('should load default presets when no options provided', async () => {
|
|
16
|
+
const service = new LLMService_1.LLMService(mockApiKeyProvider);
|
|
17
|
+
const presets = service.getPresets();
|
|
18
|
+
expect(presets).toHaveLength(presets_json_1.default.length);
|
|
19
|
+
expect(presets).toEqual(expect.arrayContaining(presets_json_1.default.map(preset => expect.objectContaining({
|
|
20
|
+
id: preset.id,
|
|
21
|
+
displayName: preset.displayName,
|
|
22
|
+
providerId: preset.providerId,
|
|
23
|
+
modelId: preset.modelId
|
|
24
|
+
}))));
|
|
25
|
+
});
|
|
26
|
+
it('should return a copy of presets to prevent external modification', async () => {
|
|
27
|
+
const service = new LLMService_1.LLMService(mockApiKeyProvider);
|
|
28
|
+
const presets1 = service.getPresets();
|
|
29
|
+
const presets2 = service.getPresets();
|
|
30
|
+
expect(presets1).not.toBe(presets2); // Different array instances
|
|
31
|
+
expect(presets1).toEqual(presets2); // Same content
|
|
32
|
+
// Modifying returned array should not affect service
|
|
33
|
+
presets1.push({
|
|
34
|
+
id: 'test-preset',
|
|
35
|
+
displayName: 'Test',
|
|
36
|
+
providerId: 'openai',
|
|
37
|
+
modelId: 'gpt-4',
|
|
38
|
+
settings: {}
|
|
39
|
+
});
|
|
40
|
+
const presets3 = service.getPresets();
|
|
41
|
+
expect(presets3).toHaveLength(presets_json_1.default.length);
|
|
42
|
+
});
|
|
43
|
+
});
|
|
44
|
+
describe('Extend mode', () => {
|
|
45
|
+
it('should add new presets to defaults in extend mode', async () => {
|
|
46
|
+
const customPresets = [
|
|
47
|
+
{
|
|
48
|
+
id: 'custom-preset-1',
|
|
49
|
+
displayName: 'Custom Preset 1',
|
|
50
|
+
providerId: 'openai',
|
|
51
|
+
modelId: 'gpt-4',
|
|
52
|
+
settings: { temperature: 0.5 }
|
|
53
|
+
}
|
|
54
|
+
];
|
|
55
|
+
const service = new LLMService_1.LLMService(mockApiKeyProvider, {
|
|
56
|
+
presets: customPresets,
|
|
57
|
+
presetMode: 'extend'
|
|
58
|
+
});
|
|
59
|
+
const presets = service.getPresets();
|
|
60
|
+
expect(presets).toHaveLength(presets_json_1.default.length + 1);
|
|
61
|
+
expect(presets).toContainEqual(expect.objectContaining({
|
|
62
|
+
id: 'custom-preset-1',
|
|
63
|
+
displayName: 'Custom Preset 1'
|
|
64
|
+
}));
|
|
65
|
+
});
|
|
66
|
+
it('should override default presets with same ID in extend mode', async () => {
|
|
67
|
+
const existingPresetId = presets_json_1.default[0].id;
|
|
68
|
+
const customPresets = [
|
|
69
|
+
{
|
|
70
|
+
id: existingPresetId,
|
|
71
|
+
displayName: 'Overridden Preset',
|
|
72
|
+
providerId: 'anthropic',
|
|
73
|
+
modelId: 'claude-3-5-sonnet-20241022',
|
|
74
|
+
settings: { temperature: 0.8 }
|
|
75
|
+
}
|
|
76
|
+
];
|
|
77
|
+
const service = new LLMService_1.LLMService(mockApiKeyProvider, {
|
|
78
|
+
presets: customPresets,
|
|
79
|
+
presetMode: 'extend'
|
|
80
|
+
});
|
|
81
|
+
const presets = service.getPresets();
|
|
82
|
+
const overriddenPreset = presets.find(p => p.id === existingPresetId);
|
|
83
|
+
expect(presets).toHaveLength(presets_json_1.default.length);
|
|
84
|
+
expect(overriddenPreset).toBeDefined();
|
|
85
|
+
expect(overriddenPreset?.displayName).toBe('Overridden Preset');
|
|
86
|
+
expect(overriddenPreset?.providerId).toBe('anthropic');
|
|
87
|
+
});
|
|
88
|
+
it('should use extend mode by default when mode not specified', async () => {
|
|
89
|
+
const customPresets = [
|
|
90
|
+
{
|
|
91
|
+
id: 'custom-preset-default',
|
|
92
|
+
displayName: 'Custom Default',
|
|
93
|
+
providerId: 'gemini',
|
|
94
|
+
modelId: 'gemini-2.0-flash',
|
|
95
|
+
settings: { temperature: 0.3 }
|
|
96
|
+
}
|
|
97
|
+
];
|
|
98
|
+
const service = new LLMService_1.LLMService(mockApiKeyProvider, {
|
|
99
|
+
presets: customPresets
|
|
100
|
+
// presetMode not specified, should default to 'extend'
|
|
101
|
+
});
|
|
102
|
+
const presets = service.getPresets();
|
|
103
|
+
expect(presets).toHaveLength(presets_json_1.default.length + 1);
|
|
104
|
+
});
|
|
105
|
+
});
|
|
106
|
+
describe('Replace mode', () => {
|
|
107
|
+
it('should use only custom presets in replace mode', async () => {
|
|
108
|
+
const customPresets = [
|
|
109
|
+
{
|
|
110
|
+
id: 'replace-preset-1',
|
|
111
|
+
displayName: 'Replace Preset 1',
|
|
112
|
+
providerId: 'openai',
|
|
113
|
+
modelId: 'gpt-4',
|
|
114
|
+
settings: { temperature: 0.5 }
|
|
115
|
+
},
|
|
116
|
+
{
|
|
117
|
+
id: 'replace-preset-2',
|
|
118
|
+
displayName: 'Replace Preset 2',
|
|
119
|
+
providerId: 'anthropic',
|
|
120
|
+
modelId: 'claude-3-5-sonnet-20241022',
|
|
121
|
+
settings: { temperature: 0.3 }
|
|
122
|
+
}
|
|
123
|
+
];
|
|
124
|
+
const service = new LLMService_1.LLMService(mockApiKeyProvider, {
|
|
125
|
+
presets: customPresets,
|
|
126
|
+
presetMode: 'replace'
|
|
127
|
+
});
|
|
128
|
+
const presets = service.getPresets();
|
|
129
|
+
expect(presets).toHaveLength(2);
|
|
130
|
+
expect(presets).toEqual(expect.arrayContaining([
|
|
131
|
+
expect.objectContaining({ id: 'replace-preset-1' }),
|
|
132
|
+
expect.objectContaining({ id: 'replace-preset-2' })
|
|
133
|
+
]));
|
|
134
|
+
// Should not contain any default presets
|
|
135
|
+
const defaultPresetIds = presets_json_1.default.map(p => p.id);
|
|
136
|
+
const actualPresetIds = presets.map(p => p.id);
|
|
137
|
+
expect(actualPresetIds).not.toContain(expect.arrayContaining(defaultPresetIds));
|
|
138
|
+
});
|
|
139
|
+
it('should return empty array when replace mode with no custom presets', async () => {
|
|
140
|
+
const service = new LLMService_1.LLMService(mockApiKeyProvider, {
|
|
141
|
+
presets: [],
|
|
142
|
+
presetMode: 'replace'
|
|
143
|
+
});
|
|
144
|
+
const presets = service.getPresets();
|
|
145
|
+
expect(presets).toHaveLength(0);
|
|
146
|
+
});
|
|
147
|
+
it('should handle undefined presets array in replace mode', async () => {
|
|
148
|
+
const service = new LLMService_1.LLMService(mockApiKeyProvider, {
|
|
149
|
+
presetMode: 'replace'
|
|
150
|
+
// presets not provided
|
|
151
|
+
});
|
|
152
|
+
const presets = service.getPresets();
|
|
153
|
+
expect(presets).toHaveLength(0);
|
|
154
|
+
});
|
|
155
|
+
});
|
|
156
|
+
describe('Edge cases', () => {
|
|
157
|
+
it('should handle duplicate IDs within custom presets', async () => {
|
|
158
|
+
const customPresets = [
|
|
159
|
+
{
|
|
160
|
+
id: 'duplicate-id',
|
|
161
|
+
displayName: 'First Preset',
|
|
162
|
+
providerId: 'openai',
|
|
163
|
+
modelId: 'gpt-4',
|
|
164
|
+
settings: { temperature: 0.5 }
|
|
165
|
+
},
|
|
166
|
+
{
|
|
167
|
+
id: 'duplicate-id',
|
|
168
|
+
displayName: 'Second Preset',
|
|
169
|
+
providerId: 'anthropic',
|
|
170
|
+
modelId: 'claude-3-5-sonnet-20241022',
|
|
171
|
+
settings: { temperature: 0.3 }
|
|
172
|
+
}
|
|
173
|
+
];
|
|
174
|
+
const service = new LLMService_1.LLMService(mockApiKeyProvider, {
|
|
175
|
+
presets: customPresets,
|
|
176
|
+
presetMode: 'replace'
|
|
177
|
+
});
|
|
178
|
+
const presets = service.getPresets();
|
|
179
|
+
const duplicatePresets = presets.filter(p => p.id === 'duplicate-id');
|
|
180
|
+
// Last one should win
|
|
181
|
+
expect(duplicatePresets).toHaveLength(1);
|
|
182
|
+
expect(duplicatePresets[0].displayName).toBe('Second Preset');
|
|
183
|
+
});
|
|
184
|
+
it('should handle presets with complex settings including gemini safety settings', async () => {
|
|
185
|
+
const customPresets = [
|
|
186
|
+
{
|
|
187
|
+
id: 'gemini-complex',
|
|
188
|
+
displayName: 'Gemini Complex',
|
|
189
|
+
providerId: 'gemini',
|
|
190
|
+
modelId: 'gemini-2.0-flash',
|
|
191
|
+
settings: {
|
|
192
|
+
temperature: 0.5,
|
|
193
|
+
maxTokens: 2000,
|
|
194
|
+
geminiSafetySettings: [
|
|
195
|
+
{ category: 'HARM_CATEGORY_HATE_SPEECH', threshold: 'BLOCK_NONE' },
|
|
196
|
+
{ category: 'HARM_CATEGORY_DANGEROUS_CONTENT', threshold: 'BLOCK_MEDIUM_AND_ABOVE' }
|
|
197
|
+
]
|
|
198
|
+
}
|
|
199
|
+
}
|
|
200
|
+
];
|
|
201
|
+
const service = new LLMService_1.LLMService(mockApiKeyProvider, {
|
|
202
|
+
presets: customPresets,
|
|
203
|
+
presetMode: 'replace'
|
|
204
|
+
});
|
|
205
|
+
const presets = service.getPresets();
|
|
206
|
+
expect(presets).toHaveLength(1);
|
|
207
|
+
expect(presets[0].settings.geminiSafetySettings).toHaveLength(2);
|
|
208
|
+
});
|
|
209
|
+
});
|
|
210
|
+
});
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export {};
|
|
@@ -0,0 +1,279 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
const LLMService_1 = require("./LLMService");
|
|
4
|
+
describe('LLMService', () => {
|
|
5
|
+
let service;
|
|
6
|
+
let mockApiKeyProvider;
|
|
7
|
+
beforeEach(() => {
|
|
8
|
+
// Reset all mocks
|
|
9
|
+
jest.clearAllMocks();
|
|
10
|
+
// Create mock API key provider
|
|
11
|
+
mockApiKeyProvider = jest.fn(async (providerId) => `mock-key-for-${providerId}`);
|
|
12
|
+
// Create service instance
|
|
13
|
+
service = new LLMService_1.LLMService(mockApiKeyProvider);
|
|
14
|
+
});
|
|
15
|
+
describe('constructor and initialization', () => {
|
|
16
|
+
it('should initialize with the provided API key provider', () => {
|
|
17
|
+
expect(service).toBeDefined();
|
|
18
|
+
// The service should be ready to use
|
|
19
|
+
});
|
|
20
|
+
it('should lazy-load client adapters on first use', async () => {
|
|
21
|
+
mockApiKeyProvider.mockResolvedValueOnce('sk-test-key-12345678901234567890');
|
|
22
|
+
// First request should create the adapter
|
|
23
|
+
const request = {
|
|
24
|
+
providerId: 'openai',
|
|
25
|
+
modelId: 'gpt-4.1',
|
|
26
|
+
messages: [{ role: 'user', content: 'Hello' }]
|
|
27
|
+
};
|
|
28
|
+
await service.sendMessage(request);
|
|
29
|
+
// Verify API key provider was called
|
|
30
|
+
expect(mockApiKeyProvider).toHaveBeenCalledWith('openai');
|
|
31
|
+
});
|
|
32
|
+
});
|
|
33
|
+
describe('sendMessage', () => {
|
|
34
|
+
describe('request validation', () => {
|
|
35
|
+
it('should return validation error for unsupported provider', async () => {
|
|
36
|
+
const request = {
|
|
37
|
+
providerId: 'unsupported-provider',
|
|
38
|
+
modelId: 'some-model',
|
|
39
|
+
messages: [{ role: 'user', content: 'Hello' }]
|
|
40
|
+
};
|
|
41
|
+
const response = await service.sendMessage(request);
|
|
42
|
+
expect(response.object).toBe('error');
|
|
43
|
+
const errorResponse = response;
|
|
44
|
+
expect(errorResponse.error.code).toBe('UNSUPPORTED_PROVIDER');
|
|
45
|
+
expect(errorResponse.error.message).toContain('Unsupported provider');
|
|
46
|
+
});
|
|
47
|
+
it('should return validation error for unsupported model', async () => {
|
|
48
|
+
const request = {
|
|
49
|
+
providerId: 'openai',
|
|
50
|
+
modelId: 'unsupported-model',
|
|
51
|
+
messages: [{ role: 'user', content: 'Hello' }]
|
|
52
|
+
};
|
|
53
|
+
const response = await service.sendMessage(request);
|
|
54
|
+
expect(response.object).toBe('error');
|
|
55
|
+
const errorResponse = response;
|
|
56
|
+
expect(errorResponse.error.code).toBe('UNSUPPORTED_MODEL');
|
|
57
|
+
expect(errorResponse.error.message).toContain('Unsupported model');
|
|
58
|
+
});
|
|
59
|
+
it('should return validation error for empty messages', async () => {
|
|
60
|
+
const request = {
|
|
61
|
+
providerId: 'openai',
|
|
62
|
+
modelId: 'gpt-4.1',
|
|
63
|
+
messages: []
|
|
64
|
+
};
|
|
65
|
+
const response = await service.sendMessage(request);
|
|
66
|
+
expect(response.object).toBe('error');
|
|
67
|
+
const errorResponse = response;
|
|
68
|
+
expect(errorResponse.error.code).toBe('INVALID_REQUEST');
|
|
69
|
+
expect(errorResponse.error.message).toContain('Request must contain at least one message');
|
|
70
|
+
});
|
|
71
|
+
it('should return validation error for invalid message role', async () => {
|
|
72
|
+
const request = {
|
|
73
|
+
providerId: 'openai',
|
|
74
|
+
modelId: 'gpt-4.1',
|
|
75
|
+
messages: [{ role: 'invalid', content: 'Hello' }]
|
|
76
|
+
};
|
|
77
|
+
const response = await service.sendMessage(request);
|
|
78
|
+
expect(response.object).toBe('error');
|
|
79
|
+
const errorResponse = response;
|
|
80
|
+
expect(errorResponse.error.code).toBe('INVALID_MESSAGE_ROLE');
|
|
81
|
+
expect(errorResponse.error.message).toContain('Invalid message role');
|
|
82
|
+
});
|
|
83
|
+
it('should return validation error for empty message content', async () => {
|
|
84
|
+
const request = {
|
|
85
|
+
providerId: 'openai',
|
|
86
|
+
modelId: 'gpt-4.1',
|
|
87
|
+
messages: [{ role: 'user', content: '' }]
|
|
88
|
+
};
|
|
89
|
+
const response = await service.sendMessage(request);
|
|
90
|
+
expect(response.object).toBe('error');
|
|
91
|
+
const errorResponse = response;
|
|
92
|
+
expect(errorResponse.error.code).toBe('INVALID_MESSAGE');
|
|
93
|
+
expect(errorResponse.error.message).toContain('Message at index 0 must have both');
|
|
94
|
+
});
|
|
95
|
+
});
|
|
96
|
+
describe('API key handling', () => {
|
|
97
|
+
it('should return error when API key provider returns null', async () => {
|
|
98
|
+
mockApiKeyProvider.mockResolvedValueOnce(null);
|
|
99
|
+
const request = {
|
|
100
|
+
providerId: 'openai',
|
|
101
|
+
modelId: 'gpt-4.1',
|
|
102
|
+
messages: [{ role: 'user', content: 'Hello' }]
|
|
103
|
+
};
|
|
104
|
+
const response = await service.sendMessage(request);
|
|
105
|
+
expect(response.object).toBe('error');
|
|
106
|
+
const errorResponse = response;
|
|
107
|
+
expect(errorResponse.error.code).toBe('API_KEY_ERROR');
|
|
108
|
+
expect(errorResponse.error.message).toContain('API key for provider');
|
|
109
|
+
});
|
|
110
|
+
it('should return error when API key provider throws', async () => {
|
|
111
|
+
mockApiKeyProvider.mockRejectedValueOnce(new Error('Key provider error'));
|
|
112
|
+
const request = {
|
|
113
|
+
providerId: 'openai',
|
|
114
|
+
modelId: 'gpt-4.1',
|
|
115
|
+
messages: [{ role: 'user', content: 'Hello' }]
|
|
116
|
+
};
|
|
117
|
+
const response = await service.sendMessage(request);
|
|
118
|
+
expect(response.object).toBe('error');
|
|
119
|
+
const errorResponse = response;
|
|
120
|
+
expect(errorResponse.error.code).toBe('PROVIDER_ERROR');
|
|
121
|
+
expect(errorResponse.error.message).toContain('Key provider error');
|
|
122
|
+
});
|
|
123
|
+
it('should return error for invalid API key format', async () => {
|
|
124
|
+
mockApiKeyProvider.mockResolvedValueOnce('invalid-key');
|
|
125
|
+
const request = {
|
|
126
|
+
providerId: 'openai',
|
|
127
|
+
modelId: 'gpt-4.1',
|
|
128
|
+
messages: [{ role: 'user', content: 'Hello' }]
|
|
129
|
+
};
|
|
130
|
+
const response = await service.sendMessage(request);
|
|
131
|
+
// OpenAI adapter expects keys starting with 'sk-'
|
|
132
|
+
expect(response.object).toBe('error');
|
|
133
|
+
const errorResponse = response;
|
|
134
|
+
expect(errorResponse.error.code).toBe('INVALID_API_KEY');
|
|
135
|
+
});
|
|
136
|
+
});
|
|
137
|
+
describe('adapter routing', () => {
|
|
138
|
+
it('should route request to correct adapter based on provider', async () => {
|
|
139
|
+
mockApiKeyProvider.mockResolvedValueOnce('sk-test-key-12345678901234567890');
|
|
140
|
+
const request = {
|
|
141
|
+
providerId: 'openai',
|
|
142
|
+
modelId: 'gpt-4.1',
|
|
143
|
+
messages: [{ role: 'user', content: 'Test routing' }]
|
|
144
|
+
};
|
|
145
|
+
const response = await service.sendMessage(request);
|
|
146
|
+
// This will fail with a network error since we're not mocking the actual API
|
|
147
|
+
expect(response.object).toBe('error');
|
|
148
|
+
const errorResponse = response;
|
|
149
|
+
// We should get a network error or similar since we're not mocking the HTTP request
|
|
150
|
+
expect(errorResponse.provider).toBe('openai');
|
|
151
|
+
});
|
|
152
|
+
it('should reuse existing adapter for same provider', async () => {
|
|
153
|
+
const request = {
|
|
154
|
+
providerId: 'mock',
|
|
155
|
+
modelId: 'mock-model',
|
|
156
|
+
messages: [{ role: 'user', content: 'First request' }]
|
|
157
|
+
};
|
|
158
|
+
// First request
|
|
159
|
+
await service.sendMessage(request);
|
|
160
|
+
// Second request to same provider
|
|
161
|
+
request.messages = [{ role: 'user', content: 'Second request' }];
|
|
162
|
+
await service.sendMessage(request);
|
|
163
|
+
// API key provider should be called for each request with mock provider
|
|
164
|
+
expect(mockApiKeyProvider).toHaveBeenCalledTimes(0); // Mock provider doesn't need API keys
|
|
165
|
+
});
|
|
166
|
+
});
|
|
167
|
+
describe('settings management', () => {
|
|
168
|
+
it('should apply default settings when none provided', async () => {
|
|
169
|
+
mockApiKeyProvider.mockResolvedValueOnce('sk-test-key-12345678901234567890');
|
|
170
|
+
const request = {
|
|
171
|
+
providerId: 'openai',
|
|
172
|
+
modelId: 'gpt-4.1',
|
|
173
|
+
messages: [{ role: 'user', content: 'Hello' }]
|
|
174
|
+
};
|
|
175
|
+
const response = await service.sendMessage(request);
|
|
176
|
+
// We'll get a network error but can still verify the request was attempted
|
|
177
|
+
expect(response.object).toBe('error');
|
|
178
|
+
expect(mockApiKeyProvider).toHaveBeenCalledWith('openai');
|
|
179
|
+
});
|
|
180
|
+
it('should merge user settings with defaults', async () => {
|
|
181
|
+
mockApiKeyProvider.mockResolvedValueOnce('sk-test-key-12345678901234567890');
|
|
182
|
+
const request = {
|
|
183
|
+
providerId: 'openai',
|
|
184
|
+
modelId: 'gpt-4.1',
|
|
185
|
+
messages: [{ role: 'user', content: 'Hello' }],
|
|
186
|
+
settings: {
|
|
187
|
+
temperature: 0.9,
|
|
188
|
+
maxTokens: 500
|
|
189
|
+
}
|
|
190
|
+
};
|
|
191
|
+
const response = await service.sendMessage(request);
|
|
192
|
+
// We'll get a network error but the settings should still be validated
|
|
193
|
+
expect(response.object).toBe('error');
|
|
194
|
+
});
|
|
195
|
+
it('should validate temperature setting', async () => {
|
|
196
|
+
const request = {
|
|
197
|
+
providerId: 'openai',
|
|
198
|
+
modelId: 'gpt-4.1',
|
|
199
|
+
messages: [{ role: 'user', content: 'Hello' }],
|
|
200
|
+
settings: {
|
|
201
|
+
temperature: 2.5 // Out of range
|
|
202
|
+
}
|
|
203
|
+
};
|
|
204
|
+
const response = await service.sendMessage(request);
|
|
205
|
+
expect(response.object).toBe('error');
|
|
206
|
+
const errorResponse = response;
|
|
207
|
+
expect(errorResponse.error.code).toBe('INVALID_SETTINGS');
|
|
208
|
+
expect(errorResponse.error.message).toContain('temperature must be a number between');
|
|
209
|
+
});
|
|
210
|
+
it('should validate maxTokens setting', async () => {
|
|
211
|
+
const request = {
|
|
212
|
+
providerId: 'openai',
|
|
213
|
+
modelId: 'gpt-4.1',
|
|
214
|
+
messages: [{ role: 'user', content: 'Hello' }],
|
|
215
|
+
settings: {
|
|
216
|
+
maxTokens: 0 // Invalid
|
|
217
|
+
}
|
|
218
|
+
};
|
|
219
|
+
const response = await service.sendMessage(request);
|
|
220
|
+
expect(response.object).toBe('error');
|
|
221
|
+
const errorResponse = response;
|
|
222
|
+
expect(errorResponse.error.code).toBe('INVALID_SETTINGS');
|
|
223
|
+
expect(errorResponse.error.message).toContain('maxTokens must be an integer between');
|
|
224
|
+
});
|
|
225
|
+
it('should validate topP setting', async () => {
|
|
226
|
+
const request = {
|
|
227
|
+
providerId: 'openai',
|
|
228
|
+
modelId: 'gpt-4.1',
|
|
229
|
+
messages: [{ role: 'user', content: 'Hello' }],
|
|
230
|
+
settings: {
|
|
231
|
+
topP: -0.1 // Out of range
|
|
232
|
+
}
|
|
233
|
+
};
|
|
234
|
+
const response = await service.sendMessage(request);
|
|
235
|
+
expect(response.object).toBe('error');
|
|
236
|
+
const errorResponse = response;
|
|
237
|
+
expect(errorResponse.error.code).toBe('INVALID_SETTINGS');
|
|
238
|
+
expect(errorResponse.error.message).toContain('topP must be a number between 0 and 1');
|
|
239
|
+
});
|
|
240
|
+
});
|
|
241
|
+
});
|
|
242
|
+
describe('getProviders', () => {
|
|
243
|
+
it('should return all supported providers', async () => {
|
|
244
|
+
const providers = await service.getProviders();
|
|
245
|
+
expect(providers).toHaveLength(4);
|
|
246
|
+
expect(providers.find(p => p.id === 'openai')).toBeDefined();
|
|
247
|
+
expect(providers.find(p => p.id === 'anthropic')).toBeDefined();
|
|
248
|
+
expect(providers.find(p => p.id === 'gemini')).toBeDefined();
|
|
249
|
+
expect(providers.find(p => p.id === 'mistral')).toBeDefined();
|
|
250
|
+
});
|
|
251
|
+
it('should include provider metadata', async () => {
|
|
252
|
+
const providers = await service.getProviders();
|
|
253
|
+
const openai = providers.find(p => p.id === 'openai');
|
|
254
|
+
expect(openai).toMatchObject({
|
|
255
|
+
id: 'openai',
|
|
256
|
+
name: 'OpenAI'
|
|
257
|
+
});
|
|
258
|
+
});
|
|
259
|
+
});
|
|
260
|
+
describe('getModels', () => {
|
|
261
|
+
it('should return all models for a provider', async () => {
|
|
262
|
+
const models = await service.getModels('openai');
|
|
263
|
+
expect(models.length).toBeGreaterThan(0);
|
|
264
|
+
expect(models.some(m => m.id.includes('gpt-4'))).toBe(true);
|
|
265
|
+
expect(models.some(m => m.id.includes('o4-mini'))).toBe(true);
|
|
266
|
+
});
|
|
267
|
+
it('should return empty array for invalid provider', async () => {
|
|
268
|
+
const models = await service.getModels('invalid-provider');
|
|
269
|
+
expect(models).toEqual([]);
|
|
270
|
+
});
|
|
271
|
+
it('should include model metadata', async () => {
|
|
272
|
+
const models = await service.getModels('openai');
|
|
273
|
+
const gpt4 = models.find(m => m.id === 'gpt-4.1');
|
|
274
|
+
expect(gpt4).toBeDefined();
|
|
275
|
+
expect(gpt4.contextWindow).toBeGreaterThan(0);
|
|
276
|
+
expect(gpt4.maxTokens).toBeGreaterThan(0);
|
|
277
|
+
});
|
|
278
|
+
});
|
|
279
|
+
});
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export {};
|