genai-lite 0.1.1 → 0.1.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +99 -1
- package/dist/config/presets.json +222 -0
- package/dist/index.d.ts +2 -0
- package/dist/llm/LLMService.d.ts +25 -1
- package/dist/llm/LLMService.js +34 -1
- package/dist/llm/LLMService.presets.test.d.ts +1 -0
- package/dist/llm/LLMService.presets.test.js +210 -0
- package/dist/llm/LLMService.test.d.ts +1 -0
- package/dist/llm/LLMService.test.js +279 -0
- package/dist/llm/clients/AnthropicClientAdapter.test.d.ts +1 -0
- package/dist/llm/clients/AnthropicClientAdapter.test.js +263 -0
- package/dist/llm/clients/GeminiClientAdapter.test.d.ts +1 -0
- package/dist/llm/clients/GeminiClientAdapter.test.js +281 -0
- package/dist/llm/clients/MockClientAdapter.test.d.ts +1 -0
- package/dist/llm/clients/MockClientAdapter.test.js +240 -0
- package/dist/llm/clients/OpenAIClientAdapter.test.d.ts +1 -0
- package/dist/llm/clients/OpenAIClientAdapter.test.js +248 -0
- package/dist/llm/clients/adapterErrorUtils.test.d.ts +1 -0
- package/dist/llm/clients/adapterErrorUtils.test.js +123 -0
- package/dist/llm/config.test.d.ts +1 -0
- package/dist/llm/config.test.js +159 -0
- package/dist/providers/fromEnvironment.test.d.ts +1 -0
- package/dist/providers/fromEnvironment.test.js +46 -0
- package/dist/types/presets.d.ts +19 -0
- package/dist/types/presets.js +2 -0
- package/dist/utils/prompt.test.d.ts +1 -0
- package/dist/utils/prompt.test.js +115 -0
- package/package.json +9 -4
- package/src/config/presets.json +222 -0
|
@@ -0,0 +1,281 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
const genai_1 = require("@google/genai");
|
|
4
|
+
const GeminiClientAdapter_1 = require("./GeminiClientAdapter");
|
|
5
|
+
const types_1 = require("./types");
|
|
6
|
+
// Mock the entire '@google/genai' module
|
|
7
|
+
jest.mock('@google/genai');
|
|
8
|
+
// Cast the mocked module to allow setting up mock implementations
|
|
9
|
+
const MockGoogleGenAI = genai_1.GoogleGenAI;
|
|
10
|
+
describe('GeminiClientAdapter', () => {
|
|
11
|
+
let adapter;
|
|
12
|
+
let mockGenerateContent;
|
|
13
|
+
let mockGetGenerativeModel;
|
|
14
|
+
let mockModel;
|
|
15
|
+
let basicRequest;
|
|
16
|
+
beforeEach(() => {
|
|
17
|
+
// Reset mocks before each test
|
|
18
|
+
MockGoogleGenAI.mockClear();
|
|
19
|
+
mockGenerateContent = jest.fn();
|
|
20
|
+
// Mock the models.generateContent method
|
|
21
|
+
MockGoogleGenAI.mockImplementation(() => ({
|
|
22
|
+
models: {
|
|
23
|
+
generateContent: mockGenerateContent
|
|
24
|
+
}
|
|
25
|
+
}));
|
|
26
|
+
adapter = new GeminiClientAdapter_1.GeminiClientAdapter();
|
|
27
|
+
basicRequest = {
|
|
28
|
+
providerId: 'gemini',
|
|
29
|
+
modelId: 'gemini-2.5-pro',
|
|
30
|
+
messages: [{ role: 'user', content: 'Hello' }],
|
|
31
|
+
settings: {
|
|
32
|
+
temperature: 0.7,
|
|
33
|
+
maxTokens: 100,
|
|
34
|
+
topP: 1,
|
|
35
|
+
frequencyPenalty: 0,
|
|
36
|
+
presencePenalty: 0,
|
|
37
|
+
stopSequences: [],
|
|
38
|
+
user: 'test-user',
|
|
39
|
+
geminiSafetySettings: [],
|
|
40
|
+
supportsSystemMessage: true
|
|
41
|
+
}
|
|
42
|
+
};
|
|
43
|
+
});
|
|
44
|
+
describe('sendMessage', () => {
|
|
45
|
+
it('should format the request correctly and call the Gemini API', async () => {
|
|
46
|
+
// Setup mock response - Gemini API returns the raw response without nesting
|
|
47
|
+
mockGenerateContent.mockResolvedValueOnce({
|
|
48
|
+
text: () => 'Hello! How can I help you today?',
|
|
49
|
+
candidates: [{
|
|
50
|
+
finishReason: 'STOP',
|
|
51
|
+
content: {
|
|
52
|
+
parts: [{ text: 'Hello! How can I help you today?' }],
|
|
53
|
+
role: 'model'
|
|
54
|
+
}
|
|
55
|
+
}],
|
|
56
|
+
usageMetadata: {
|
|
57
|
+
promptTokenCount: 10,
|
|
58
|
+
candidatesTokenCount: 20,
|
|
59
|
+
totalTokenCount: 30
|
|
60
|
+
}
|
|
61
|
+
});
|
|
62
|
+
const response = await adapter.sendMessage(basicRequest, 'test-api-key');
|
|
63
|
+
// Verify GoogleGenAI was instantiated with the API key
|
|
64
|
+
expect(MockGoogleGenAI).toHaveBeenCalledWith({ apiKey: 'test-api-key' });
|
|
65
|
+
// Verify generateContent was called
|
|
66
|
+
expect(mockGenerateContent).toHaveBeenCalledTimes(1);
|
|
67
|
+
const callArgs = mockGenerateContent.mock.calls[0][0];
|
|
68
|
+
expect(callArgs.model).toBe('gemini-2.5-pro');
|
|
69
|
+
expect(callArgs.contents).toHaveLength(1);
|
|
70
|
+
expect(callArgs.contents[0].role).toBe('user');
|
|
71
|
+
// Verify the response
|
|
72
|
+
expect(response.object).toBe('chat.completion');
|
|
73
|
+
const successResponse = response;
|
|
74
|
+
expect(successResponse.provider).toBe('gemini');
|
|
75
|
+
expect(successResponse.model).toBe('gemini-2.5-pro');
|
|
76
|
+
expect(successResponse.choices[0].message.content).toBe('Hello! How can I help you today?');
|
|
77
|
+
expect(successResponse.usage?.total_tokens).toBe(30);
|
|
78
|
+
});
|
|
79
|
+
it('should handle system messages correctly', async () => {
|
|
80
|
+
basicRequest.messages = [
|
|
81
|
+
{ role: 'system', content: 'You are a helpful assistant.' },
|
|
82
|
+
{ role: 'user', content: 'Hello' }
|
|
83
|
+
];
|
|
84
|
+
mockGenerateContent.mockResolvedValueOnce({
|
|
85
|
+
text: () => 'Hello!',
|
|
86
|
+
candidates: [{
|
|
87
|
+
finishReason: 'STOP',
|
|
88
|
+
content: { parts: [{ text: 'Hello!' }], role: 'model' }
|
|
89
|
+
}],
|
|
90
|
+
usageMetadata: { promptTokenCount: 15, candidatesTokenCount: 5, totalTokenCount: 20 }
|
|
91
|
+
});
|
|
92
|
+
await adapter.sendMessage(basicRequest, 'test-api-key');
|
|
93
|
+
// System message should be passed as systemInstruction
|
|
94
|
+
expect(mockGenerateContent).toHaveBeenCalledWith({
|
|
95
|
+
model: 'gemini-2.5-pro',
|
|
96
|
+
contents: [{
|
|
97
|
+
role: 'user',
|
|
98
|
+
parts: [{ text: 'Hello' }]
|
|
99
|
+
}],
|
|
100
|
+
config: {
|
|
101
|
+
temperature: 0.7,
|
|
102
|
+
maxOutputTokens: 100,
|
|
103
|
+
topP: 1,
|
|
104
|
+
safetySettings: [],
|
|
105
|
+
systemInstruction: 'You are a helpful assistant.'
|
|
106
|
+
}
|
|
107
|
+
});
|
|
108
|
+
});
|
|
109
|
+
it('should handle multi-turn conversations with role mapping', async () => {
|
|
110
|
+
basicRequest.messages = [
|
|
111
|
+
{ role: 'user', content: 'Hello' },
|
|
112
|
+
{ role: 'assistant', content: 'Hi there!' },
|
|
113
|
+
{ role: 'user', content: 'How are you?' }
|
|
114
|
+
];
|
|
115
|
+
mockGenerateContent.mockResolvedValueOnce({
|
|
116
|
+
text: () => "I'm doing well!",
|
|
117
|
+
candidates: [{
|
|
118
|
+
finishReason: 'STOP',
|
|
119
|
+
content: { parts: [{ text: "I'm doing well!" }], role: 'model' }
|
|
120
|
+
}],
|
|
121
|
+
usageMetadata: { promptTokenCount: 20, candidatesTokenCount: 10, totalTokenCount: 30 }
|
|
122
|
+
});
|
|
123
|
+
await adapter.sendMessage(basicRequest, 'test-api-key');
|
|
124
|
+
// Verify role mapping: assistant -> model
|
|
125
|
+
expect(mockGenerateContent).toHaveBeenCalledWith({
|
|
126
|
+
model: 'gemini-2.5-pro',
|
|
127
|
+
contents: [
|
|
128
|
+
{ role: 'user', parts: [{ text: 'Hello' }] },
|
|
129
|
+
{ role: 'model', parts: [{ text: 'Hi there!' }] },
|
|
130
|
+
{ role: 'user', parts: [{ text: 'How are you?' }] }
|
|
131
|
+
],
|
|
132
|
+
config: {
|
|
133
|
+
temperature: 0.7,
|
|
134
|
+
maxOutputTokens: 100,
|
|
135
|
+
topP: 1,
|
|
136
|
+
safetySettings: []
|
|
137
|
+
}
|
|
138
|
+
});
|
|
139
|
+
});
|
|
140
|
+
it('should handle stop sequences', async () => {
|
|
141
|
+
basicRequest.settings.stopSequences = ['END', 'STOP'];
|
|
142
|
+
mockGenerateContent.mockResolvedValueOnce({
|
|
143
|
+
text: () => 'Response',
|
|
144
|
+
candidates: [{
|
|
145
|
+
finishReason: 'STOP',
|
|
146
|
+
content: { parts: [{ text: 'Response' }], role: 'model' }
|
|
147
|
+
}]
|
|
148
|
+
});
|
|
149
|
+
await adapter.sendMessage(basicRequest, 'test-api-key');
|
|
150
|
+
expect(mockGenerateContent).toHaveBeenCalledWith(expect.objectContaining({
|
|
151
|
+
config: expect.objectContaining({
|
|
152
|
+
stopSequences: ['END', 'STOP']
|
|
153
|
+
})
|
|
154
|
+
}));
|
|
155
|
+
});
|
|
156
|
+
it('should handle safety settings', async () => {
|
|
157
|
+
basicRequest.settings.geminiSafetySettings = [
|
|
158
|
+
{ category: 'HARM_CATEGORY_HATE_SPEECH', threshold: 'BLOCK_NONE' }
|
|
159
|
+
];
|
|
160
|
+
mockGenerateContent.mockResolvedValueOnce({
|
|
161
|
+
text: () => 'Response',
|
|
162
|
+
candidates: [{
|
|
163
|
+
finishReason: 'STOP',
|
|
164
|
+
content: { parts: [{ text: 'Response' }], role: 'model' }
|
|
165
|
+
}]
|
|
166
|
+
});
|
|
167
|
+
await adapter.sendMessage(basicRequest, 'test-api-key');
|
|
168
|
+
expect(mockGenerateContent).toHaveBeenCalledWith(expect.objectContaining({
|
|
169
|
+
config: expect.objectContaining({
|
|
170
|
+
safetySettings: [
|
|
171
|
+
{ category: 'HARM_CATEGORY_HATE_SPEECH', threshold: 'BLOCK_NONE' }
|
|
172
|
+
]
|
|
173
|
+
})
|
|
174
|
+
}));
|
|
175
|
+
});
|
|
176
|
+
it('should map finish reasons correctly', async () => {
|
|
177
|
+
const finishReasons = [
|
|
178
|
+
{ gemini: 'STOP', expected: 'stop' },
|
|
179
|
+
{ gemini: 'MAX_TOKENS', expected: 'length' },
|
|
180
|
+
{ gemini: 'SAFETY', expected: 'content_filter' },
|
|
181
|
+
{ gemini: 'RECITATION', expected: 'content_filter' },
|
|
182
|
+
{ gemini: 'OTHER', expected: 'other' },
|
|
183
|
+
{ gemini: 'UNKNOWN', expected: 'other' }
|
|
184
|
+
];
|
|
185
|
+
for (const { gemini, expected } of finishReasons) {
|
|
186
|
+
mockGenerateContent.mockResolvedValueOnce({
|
|
187
|
+
text: () => 'Response',
|
|
188
|
+
candidates: [{
|
|
189
|
+
finishReason: gemini,
|
|
190
|
+
content: { parts: [{ text: 'Response' }], role: 'model' }
|
|
191
|
+
}]
|
|
192
|
+
});
|
|
193
|
+
const response = await adapter.sendMessage(basicRequest, 'test-api-key');
|
|
194
|
+
const successResponse = response;
|
|
195
|
+
expect(successResponse.choices[0].finish_reason).toBe(expected);
|
|
196
|
+
}
|
|
197
|
+
});
|
|
198
|
+
describe('error handling', () => {
|
|
199
|
+
it('should handle API key errors', async () => {
|
|
200
|
+
const apiError = new Error('API key not valid');
|
|
201
|
+
mockGenerateContent.mockRejectedValueOnce(apiError);
|
|
202
|
+
const response = await adapter.sendMessage(basicRequest, 'invalid-key');
|
|
203
|
+
const errorResponse = response;
|
|
204
|
+
expect(errorResponse.error.code).toBe(types_1.ADAPTER_ERROR_CODES.INVALID_API_KEY);
|
|
205
|
+
expect(errorResponse.error.type).toBe('authentication_error');
|
|
206
|
+
});
|
|
207
|
+
it('should handle safety/content filter errors', async () => {
|
|
208
|
+
const apiError = new Error('Response was blocked due to safety reasons');
|
|
209
|
+
mockGenerateContent.mockRejectedValueOnce(apiError);
|
|
210
|
+
const response = await adapter.sendMessage(basicRequest, 'test-key');
|
|
211
|
+
const errorResponse = response;
|
|
212
|
+
expect(errorResponse.error.code).toBe(types_1.ADAPTER_ERROR_CODES.CONTENT_FILTER);
|
|
213
|
+
expect(errorResponse.error.type).toBe('content_filter_error');
|
|
214
|
+
});
|
|
215
|
+
it('should handle quota exceeded errors', async () => {
|
|
216
|
+
const apiError = new Error('API rate limit exceeded');
|
|
217
|
+
mockGenerateContent.mockRejectedValueOnce(apiError);
|
|
218
|
+
const response = await adapter.sendMessage(basicRequest, 'test-key');
|
|
219
|
+
const errorResponse = response;
|
|
220
|
+
expect(errorResponse.error.code).toBe(types_1.ADAPTER_ERROR_CODES.RATE_LIMIT_EXCEEDED);
|
|
221
|
+
expect(errorResponse.error.type).toBe('rate_limit_error');
|
|
222
|
+
});
|
|
223
|
+
it('should handle model not found errors', async () => {
|
|
224
|
+
const apiError = new Error('Model not found');
|
|
225
|
+
apiError.status = 404;
|
|
226
|
+
mockGenerateContent.mockRejectedValueOnce(apiError);
|
|
227
|
+
const response = await adapter.sendMessage(basicRequest, 'test-key');
|
|
228
|
+
const errorResponse = response;
|
|
229
|
+
expect(errorResponse.error.code).toBe(types_1.ADAPTER_ERROR_CODES.MODEL_NOT_FOUND);
|
|
230
|
+
expect(errorResponse.error.type).toBe('invalid_request_error');
|
|
231
|
+
});
|
|
232
|
+
it('should handle permission errors', async () => {
|
|
233
|
+
const apiError = new Error('Invalid API key provided');
|
|
234
|
+
mockGenerateContent.mockRejectedValueOnce(apiError);
|
|
235
|
+
const response = await adapter.sendMessage(basicRequest, 'test-key');
|
|
236
|
+
const errorResponse = response;
|
|
237
|
+
expect(errorResponse.error.code).toBe(types_1.ADAPTER_ERROR_CODES.INVALID_API_KEY);
|
|
238
|
+
expect(errorResponse.error.type).toBe('authentication_error');
|
|
239
|
+
});
|
|
240
|
+
it('should handle generic errors', async () => {
|
|
241
|
+
const apiError = new Error('Unknown error');
|
|
242
|
+
mockGenerateContent.mockRejectedValueOnce(apiError);
|
|
243
|
+
const response = await adapter.sendMessage(basicRequest, 'test-key');
|
|
244
|
+
const errorResponse = response;
|
|
245
|
+
expect(errorResponse.error.code).toBe(types_1.ADAPTER_ERROR_CODES.UNKNOWN_ERROR);
|
|
246
|
+
expect(errorResponse.error.message).toContain('Unknown error');
|
|
247
|
+
});
|
|
248
|
+
it('should handle empty response as success with empty content', async () => {
|
|
249
|
+
mockGenerateContent.mockResolvedValueOnce({
|
|
250
|
+
text: () => '',
|
|
251
|
+
candidates: []
|
|
252
|
+
});
|
|
253
|
+
const response = await adapter.sendMessage(basicRequest, 'test-key');
|
|
254
|
+
// Empty responses are returned as success with empty content
|
|
255
|
+
const successResponse = response;
|
|
256
|
+
expect(successResponse.object).toBe('chat.completion');
|
|
257
|
+
expect(successResponse.choices[0].message.content).toBe('');
|
|
258
|
+
});
|
|
259
|
+
});
|
|
260
|
+
});
|
|
261
|
+
describe('validateApiKey', () => {
|
|
262
|
+
it('should validate API key format', () => {
|
|
263
|
+
// Gemini API keys must start with 'AIza' and be at least 35 chars
|
|
264
|
+
expect(adapter.validateApiKey('AIzaSyABCDEFGHIJKLMNOPQRSTUVWXYZ123456')).toBe(true);
|
|
265
|
+
expect(adapter.validateApiKey('AIzaABCDEFGHIJKLMNOPQRSTUVWXYZ12345')).toBe(true);
|
|
266
|
+
// Invalid formats
|
|
267
|
+
expect(adapter.validateApiKey('')).toBe(false);
|
|
268
|
+
expect(adapter.validateApiKey('short')).toBe(false); // Too short
|
|
269
|
+
expect(adapter.validateApiKey('abcdef123456')).toBe(false); // Wrong prefix
|
|
270
|
+
});
|
|
271
|
+
});
|
|
272
|
+
describe('getAdapterInfo', () => {
|
|
273
|
+
it('should return correct adapter information', () => {
|
|
274
|
+
const info = adapter.getAdapterInfo();
|
|
275
|
+
expect(info.providerId).toBe('gemini');
|
|
276
|
+
expect(info.name).toBe('Gemini Client Adapter');
|
|
277
|
+
expect(info.version).toBeDefined();
|
|
278
|
+
// supportedModels is not part of the interface
|
|
279
|
+
});
|
|
280
|
+
});
|
|
281
|
+
});
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export {};
|
|
@@ -0,0 +1,240 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
const MockClientAdapter_1 = require("./MockClientAdapter");
|
|
4
|
+
const types_1 = require("./types");
|
|
5
|
+
describe('MockClientAdapter', () => {
|
|
6
|
+
let adapter;
|
|
7
|
+
let basicRequest;
|
|
8
|
+
beforeEach(() => {
|
|
9
|
+
adapter = new MockClientAdapter_1.MockClientAdapter('openai');
|
|
10
|
+
basicRequest = {
|
|
11
|
+
providerId: 'openai',
|
|
12
|
+
modelId: 'mock-model',
|
|
13
|
+
messages: [{ role: 'user', content: 'Hello' }],
|
|
14
|
+
settings: {
|
|
15
|
+
temperature: 0.7,
|
|
16
|
+
maxTokens: 100,
|
|
17
|
+
topP: 1,
|
|
18
|
+
frequencyPenalty: 0,
|
|
19
|
+
presencePenalty: 0,
|
|
20
|
+
stopSequences: [],
|
|
21
|
+
user: 'test-user',
|
|
22
|
+
geminiSafetySettings: [],
|
|
23
|
+
supportsSystemMessage: true
|
|
24
|
+
}
|
|
25
|
+
};
|
|
26
|
+
});
|
|
27
|
+
describe('sendMessage', () => {
|
|
28
|
+
it('should return a successful response for basic messages', async () => {
|
|
29
|
+
const response = await adapter.sendMessage(basicRequest, 'test-key');
|
|
30
|
+
expect(response.object).toBe('chat.completion');
|
|
31
|
+
expect('error' in response).toBe(false);
|
|
32
|
+
const successResponse = response;
|
|
33
|
+
expect(successResponse.provider).toBe('openai');
|
|
34
|
+
expect(successResponse.model).toBe('mock-model');
|
|
35
|
+
expect(successResponse.choices).toHaveLength(1);
|
|
36
|
+
expect(successResponse.choices[0].message.role).toBe('assistant');
|
|
37
|
+
expect(successResponse.choices[0].message.content).toContain('Hello');
|
|
38
|
+
expect(successResponse.usage).toBeDefined();
|
|
39
|
+
expect(successResponse.usage?.total_tokens).toBeGreaterThan(0);
|
|
40
|
+
});
|
|
41
|
+
describe('error simulations', () => {
|
|
42
|
+
it('should simulate invalid API key error', async () => {
|
|
43
|
+
basicRequest.messages[0].content = 'error_invalid_key';
|
|
44
|
+
const response = await adapter.sendMessage(basicRequest, 'test-key');
|
|
45
|
+
expect(response.object).toBe('error');
|
|
46
|
+
const errorResponse = response;
|
|
47
|
+
expect(errorResponse.error.code).toBe(types_1.ADAPTER_ERROR_CODES.INVALID_API_KEY);
|
|
48
|
+
expect(errorResponse.error.type).toBe('authentication_error');
|
|
49
|
+
expect(errorResponse.error.status).toBe(401);
|
|
50
|
+
});
|
|
51
|
+
it('should simulate rate limit error', async () => {
|
|
52
|
+
basicRequest.messages[0].content = 'error_rate_limit';
|
|
53
|
+
const response = await adapter.sendMessage(basicRequest, 'test-key');
|
|
54
|
+
const errorResponse = response;
|
|
55
|
+
expect(errorResponse.error.code).toBe(types_1.ADAPTER_ERROR_CODES.RATE_LIMIT_EXCEEDED);
|
|
56
|
+
expect(errorResponse.error.type).toBe('rate_limit_error');
|
|
57
|
+
expect(errorResponse.error.status).toBe(429);
|
|
58
|
+
});
|
|
59
|
+
it('should simulate insufficient credits error', async () => {
|
|
60
|
+
basicRequest.messages[0].content = 'error_credits';
|
|
61
|
+
const response = await adapter.sendMessage(basicRequest, 'test-key');
|
|
62
|
+
const errorResponse = response;
|
|
63
|
+
expect(errorResponse.error.code).toBe(types_1.ADAPTER_ERROR_CODES.INSUFFICIENT_CREDITS);
|
|
64
|
+
expect(errorResponse.error.type).toBe('rate_limit_error');
|
|
65
|
+
expect(errorResponse.error.status).toBe(402);
|
|
66
|
+
});
|
|
67
|
+
it('should simulate context length exceeded error', async () => {
|
|
68
|
+
basicRequest.messages[0].content = 'error_context_length';
|
|
69
|
+
const response = await adapter.sendMessage(basicRequest, 'test-key');
|
|
70
|
+
const errorResponse = response;
|
|
71
|
+
expect(errorResponse.error.code).toBe(types_1.ADAPTER_ERROR_CODES.CONTEXT_LENGTH_EXCEEDED);
|
|
72
|
+
expect(errorResponse.error.type).toBe('invalid_request_error');
|
|
73
|
+
expect(errorResponse.error.status).toBe(400);
|
|
74
|
+
});
|
|
75
|
+
it('should simulate model not found error', async () => {
|
|
76
|
+
basicRequest.messages[0].content = 'error_model_not_found';
|
|
77
|
+
const response = await adapter.sendMessage(basicRequest, 'test-key');
|
|
78
|
+
const errorResponse = response;
|
|
79
|
+
expect(errorResponse.error.code).toBe(types_1.ADAPTER_ERROR_CODES.MODEL_NOT_FOUND);
|
|
80
|
+
expect(errorResponse.error.type).toBe('invalid_request_error');
|
|
81
|
+
expect(errorResponse.error.status).toBe(404);
|
|
82
|
+
});
|
|
83
|
+
it('should simulate content filter error', async () => {
|
|
84
|
+
basicRequest.messages[0].content = 'error_content_filter';
|
|
85
|
+
const response = await adapter.sendMessage(basicRequest, 'test-key');
|
|
86
|
+
const errorResponse = response;
|
|
87
|
+
expect(errorResponse.error.code).toBe(types_1.ADAPTER_ERROR_CODES.CONTENT_FILTER);
|
|
88
|
+
expect(errorResponse.error.type).toBe('content_filter_error');
|
|
89
|
+
expect(errorResponse.error.status).toBe(400);
|
|
90
|
+
});
|
|
91
|
+
it('should simulate network error', async () => {
|
|
92
|
+
basicRequest.messages[0].content = 'error_network';
|
|
93
|
+
const response = await adapter.sendMessage(basicRequest, 'test-key');
|
|
94
|
+
const errorResponse = response;
|
|
95
|
+
expect(errorResponse.error.code).toBe(types_1.ADAPTER_ERROR_CODES.NETWORK_ERROR);
|
|
96
|
+
expect(errorResponse.error.type).toBe('connection_error');
|
|
97
|
+
// Status is not included when it's 0
|
|
98
|
+
expect(errorResponse.error.status).toBeUndefined();
|
|
99
|
+
});
|
|
100
|
+
it('should simulate generic provider error', async () => {
|
|
101
|
+
basicRequest.messages[0].content = 'error_generic';
|
|
102
|
+
const response = await adapter.sendMessage(basicRequest, 'test-key');
|
|
103
|
+
const errorResponse = response;
|
|
104
|
+
expect(errorResponse.error.code).toBe(types_1.ADAPTER_ERROR_CODES.PROVIDER_ERROR);
|
|
105
|
+
expect(errorResponse.error.type).toBe('server_error');
|
|
106
|
+
expect(errorResponse.error.status).toBe(500);
|
|
107
|
+
});
|
|
108
|
+
});
|
|
109
|
+
describe('temperature effects', () => {
|
|
110
|
+
it('should generate low temperature response', async () => {
|
|
111
|
+
basicRequest.messages[0].content = 'test_temperature';
|
|
112
|
+
basicRequest.settings.temperature = 0.2;
|
|
113
|
+
const response = await adapter.sendMessage(basicRequest, 'test-key');
|
|
114
|
+
const successResponse = response;
|
|
115
|
+
expect(successResponse.choices[0].message.content).toContain('Low temperature');
|
|
116
|
+
expect(successResponse.choices[0].message.content).toContain('deterministic');
|
|
117
|
+
});
|
|
118
|
+
it('should generate high temperature response', async () => {
|
|
119
|
+
basicRequest.messages[0].content = 'test_temperature';
|
|
120
|
+
basicRequest.settings.temperature = 0.9;
|
|
121
|
+
const response = await adapter.sendMessage(basicRequest, 'test-key');
|
|
122
|
+
const successResponse = response;
|
|
123
|
+
expect(successResponse.choices[0].message.content).toContain('High temperature');
|
|
124
|
+
expect(successResponse.choices[0].message.content).toContain('creative');
|
|
125
|
+
});
|
|
126
|
+
it('should generate moderate temperature response', async () => {
|
|
127
|
+
basicRequest.messages[0].content = 'test_temperature';
|
|
128
|
+
basicRequest.settings.temperature = 0.5;
|
|
129
|
+
const response = await adapter.sendMessage(basicRequest, 'test-key');
|
|
130
|
+
const successResponse = response;
|
|
131
|
+
expect(successResponse.choices[0].message.content).toContain('Moderate temperature');
|
|
132
|
+
expect(successResponse.choices[0].message.content).toContain('balances');
|
|
133
|
+
});
|
|
134
|
+
});
|
|
135
|
+
describe('settings effects', () => {
|
|
136
|
+
it('should respect maxTokens limit', async () => {
|
|
137
|
+
basicRequest.messages[0].content = 'long detailed response please';
|
|
138
|
+
basicRequest.settings.maxTokens = 10; // Very low limit
|
|
139
|
+
const response = await adapter.sendMessage(basicRequest, 'test-key');
|
|
140
|
+
const successResponse = response;
|
|
141
|
+
const content = successResponse.choices[0].message.content;
|
|
142
|
+
const wordCount = content.split(' ').length;
|
|
143
|
+
expect(wordCount).toBeLessThanOrEqual(10); // Should be truncated
|
|
144
|
+
expect(content).toContain('...');
|
|
145
|
+
});
|
|
146
|
+
it('should respect stop sequences', async () => {
|
|
147
|
+
basicRequest.messages[0].content = 'Hello world! This is a test. STOP More content here.';
|
|
148
|
+
basicRequest.settings.stopSequences = ['STOP'];
|
|
149
|
+
const response = await adapter.sendMessage(basicRequest, 'test-key');
|
|
150
|
+
const successResponse = response;
|
|
151
|
+
const content = successResponse.choices[0].message.content;
|
|
152
|
+
expect(content).not.toContain('More content here');
|
|
153
|
+
});
|
|
154
|
+
it('should generate settings test response', async () => {
|
|
155
|
+
basicRequest.messages[0].content = 'test_settings';
|
|
156
|
+
basicRequest.settings.stopSequences = []; // Empty stop sequences to avoid truncation
|
|
157
|
+
basicRequest.settings.frequencyPenalty = 0.5;
|
|
158
|
+
basicRequest.settings.presencePenalty = -0.5;
|
|
159
|
+
basicRequest.settings.maxTokens = 500;
|
|
160
|
+
const response = await adapter.sendMessage(basicRequest, 'test-key');
|
|
161
|
+
const successResponse = response;
|
|
162
|
+
const content = successResponse.choices[0].message.content;
|
|
163
|
+
expect(content).toContain('Temperature: 0.7');
|
|
164
|
+
expect(content).toContain('Max Tokens: 500');
|
|
165
|
+
expect(content).toContain('Stop Sequences: none'); // When empty, it shows "none"
|
|
166
|
+
expect(content).toContain('Frequency Penalty: 0.5');
|
|
167
|
+
expect(content).toContain('Presence Penalty: -0.5');
|
|
168
|
+
});
|
|
169
|
+
});
|
|
170
|
+
describe('content-based responses', () => {
|
|
171
|
+
it('should generate greeting response', async () => {
|
|
172
|
+
basicRequest.messages[0].content = 'hello';
|
|
173
|
+
const response = await adapter.sendMessage(basicRequest, 'test-key');
|
|
174
|
+
const successResponse = response;
|
|
175
|
+
expect(successResponse.choices[0].message.content).toContain('Hello!');
|
|
176
|
+
expect(successResponse.choices[0].message.content).toContain('mock LLM assistant');
|
|
177
|
+
});
|
|
178
|
+
it('should generate weather response', async () => {
|
|
179
|
+
basicRequest.messages[0].content = 'What is the weather today?';
|
|
180
|
+
const response = await adapter.sendMessage(basicRequest, 'test-key');
|
|
181
|
+
const successResponse = response;
|
|
182
|
+
expect(successResponse.choices[0].message.content).toContain('weather');
|
|
183
|
+
expect(successResponse.choices[0].message.content).toContain('72°F');
|
|
184
|
+
});
|
|
185
|
+
it('should generate code response', async () => {
|
|
186
|
+
basicRequest.messages[0].content = 'Show me some code';
|
|
187
|
+
const response = await adapter.sendMessage(basicRequest, 'test-key');
|
|
188
|
+
const successResponse = response;
|
|
189
|
+
expect(successResponse.choices[0].message.content).toContain('```javascript');
|
|
190
|
+
expect(successResponse.choices[0].message.content).toContain('mockFunction');
|
|
191
|
+
});
|
|
192
|
+
it('should generate long response', async () => {
|
|
193
|
+
basicRequest.messages[0].content = 'Give me a long detailed explanation';
|
|
194
|
+
basicRequest.settings.maxTokens = 1000; // Allow long response
|
|
195
|
+
const response = await adapter.sendMessage(basicRequest, 'test-key');
|
|
196
|
+
const successResponse = response;
|
|
197
|
+
const content = successResponse.choices[0].message.content;
|
|
198
|
+
expect(content.length).toBeGreaterThan(500);
|
|
199
|
+
expect(content).toContain('Error Simulation');
|
|
200
|
+
expect(content).toContain('Variable Length');
|
|
201
|
+
});
|
|
202
|
+
});
|
|
203
|
+
it('should generate unique IDs for each response', async () => {
|
|
204
|
+
const response1 = await adapter.sendMessage(basicRequest, 'test-key');
|
|
205
|
+
const response2 = await adapter.sendMessage(basicRequest, 'test-key');
|
|
206
|
+
expect(response1.id).not.toBe(response2.id);
|
|
207
|
+
});
|
|
208
|
+
it('should calculate token usage', async () => {
|
|
209
|
+
basicRequest.messages[0].content = 'Calculate tokens for this message';
|
|
210
|
+
const response = await adapter.sendMessage(basicRequest, 'test-key');
|
|
211
|
+
const successResponse = response;
|
|
212
|
+
expect(successResponse.usage?.prompt_tokens).toBeGreaterThan(0);
|
|
213
|
+
expect(successResponse.usage?.completion_tokens).toBeGreaterThan(0);
|
|
214
|
+
expect(successResponse.usage?.total_tokens).toBe((successResponse.usage?.prompt_tokens ?? 0) + (successResponse.usage?.completion_tokens ?? 0));
|
|
215
|
+
});
|
|
216
|
+
});
|
|
217
|
+
describe('validateApiKey', () => {
|
|
218
|
+
it('should return true for non-empty API keys', () => {
|
|
219
|
+
expect(adapter.validateApiKey('valid-key')).toBe(true);
|
|
220
|
+
expect(adapter.validateApiKey('a')).toBe(true);
|
|
221
|
+
});
|
|
222
|
+
it('should return false for empty API keys', () => {
|
|
223
|
+
expect(adapter.validateApiKey('')).toBe(false);
|
|
224
|
+
});
|
|
225
|
+
});
|
|
226
|
+
describe('getAdapterInfo', () => {
|
|
227
|
+
it('should return correct adapter information', () => {
|
|
228
|
+
const info = adapter.getAdapterInfo();
|
|
229
|
+
expect(info.providerId).toBe('openai');
|
|
230
|
+
expect(info.name).toBe('Mock Client Adapter');
|
|
231
|
+
expect(info.version).toBe('1.0.0');
|
|
232
|
+
expect(info.supportedModels).toEqual(['mock-model-1', 'mock-model-2']);
|
|
233
|
+
});
|
|
234
|
+
it('should use custom provider ID', () => {
|
|
235
|
+
const customAdapter = new MockClientAdapter_1.MockClientAdapter('anthropic');
|
|
236
|
+
const info = customAdapter.getAdapterInfo();
|
|
237
|
+
expect(info.providerId).toBe('anthropic');
|
|
238
|
+
});
|
|
239
|
+
});
|
|
240
|
+
});
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export {};
|