genai-lite 0.4.0 → 0.4.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +47 -37
- package/dist/llm/LLMService.d.ts +29 -2
- package/dist/llm/LLMService.js +67 -36
- package/dist/llm/config.js +4 -4
- package/dist/llm/services/SettingsManager.js +17 -11
- package/dist/llm/types.d.ts +81 -22
- package/dist/prompting/parser.d.ts +2 -2
- package/dist/prompting/parser.js +2 -2
- package/package.json +1 -1
- package/dist/llm/LLMService.createMessages.test.d.ts +0 -4
- package/dist/llm/LLMService.createMessages.test.js +0 -364
- package/dist/llm/LLMService.original.d.ts +0 -147
- package/dist/llm/LLMService.original.js +0 -656
- package/dist/llm/LLMService.prepareMessage.test.d.ts +0 -1
- package/dist/llm/LLMService.prepareMessage.test.js +0 -303
- package/dist/llm/LLMService.presets.test.d.ts +0 -1
- package/dist/llm/LLMService.presets.test.js +0 -210
- package/dist/llm/LLMService.sendMessage.preset.test.d.ts +0 -1
- package/dist/llm/LLMService.sendMessage.preset.test.js +0 -153
- package/dist/llm/LLMService.test.d.ts +0 -1
- package/dist/llm/LLMService.test.js +0 -639
- package/dist/llm/clients/AnthropicClientAdapter.test.d.ts +0 -1
- package/dist/llm/clients/AnthropicClientAdapter.test.js +0 -273
- package/dist/llm/clients/GeminiClientAdapter.test.d.ts +0 -1
- package/dist/llm/clients/GeminiClientAdapter.test.js +0 -405
- package/dist/llm/clients/LlamaCppClientAdapter.test.d.ts +0 -1
- package/dist/llm/clients/LlamaCppClientAdapter.test.js +0 -447
- package/dist/llm/clients/LlamaCppServerClient.test.d.ts +0 -1
- package/dist/llm/clients/LlamaCppServerClient.test.js +0 -294
- package/dist/llm/clients/MockClientAdapter.test.d.ts +0 -1
- package/dist/llm/clients/MockClientAdapter.test.js +0 -250
- package/dist/llm/clients/OpenAIClientAdapter.test.d.ts +0 -1
- package/dist/llm/clients/OpenAIClientAdapter.test.js +0 -258
- package/dist/llm/clients/adapterErrorUtils.test.d.ts +0 -1
- package/dist/llm/clients/adapterErrorUtils.test.js +0 -123
- package/dist/llm/config.test.d.ts +0 -1
- package/dist/llm/config.test.js +0 -176
- package/dist/llm/services/AdapterRegistry.test.d.ts +0 -1
- package/dist/llm/services/AdapterRegistry.test.js +0 -239
- package/dist/llm/services/ModelResolver.test.d.ts +0 -1
- package/dist/llm/services/ModelResolver.test.js +0 -179
- package/dist/llm/services/PresetManager.test.d.ts +0 -1
- package/dist/llm/services/PresetManager.test.js +0 -210
- package/dist/llm/services/RequestValidator.test.d.ts +0 -1
- package/dist/llm/services/RequestValidator.test.js +0 -159
- package/dist/llm/services/SettingsManager.test.d.ts +0 -1
- package/dist/llm/services/SettingsManager.test.js +0 -266
- package/dist/prompting/builder.d.ts +0 -38
- package/dist/prompting/builder.js +0 -63
- package/dist/prompting/builder.test.d.ts +0 -4
- package/dist/prompting/builder.test.js +0 -109
- package/dist/prompting/content.test.d.ts +0 -4
- package/dist/prompting/content.test.js +0 -212
- package/dist/prompting/parser.test.d.ts +0 -4
- package/dist/prompting/parser.test.js +0 -464
- package/dist/prompting/template.test.d.ts +0 -1
- package/dist/prompting/template.test.js +0 -250
- package/dist/providers/fromEnvironment.test.d.ts +0 -1
- package/dist/providers/fromEnvironment.test.js +0 -59
|
@@ -1,273 +0,0 @@
|
|
|
1
|
-
"use strict";
|
|
2
|
-
var __importDefault = (this && this.__importDefault) || function (mod) {
|
|
3
|
-
return (mod && mod.__esModule) ? mod : { "default": mod };
|
|
4
|
-
};
|
|
5
|
-
Object.defineProperty(exports, "__esModule", { value: true });
|
|
6
|
-
const sdk_1 = __importDefault(require("@anthropic-ai/sdk"));
|
|
7
|
-
const AnthropicClientAdapter_1 = require("./AnthropicClientAdapter");
|
|
8
|
-
const types_1 = require("./types");
|
|
9
|
-
// Mock the entire '@anthropic-ai/sdk' module
|
|
10
|
-
jest.mock('@anthropic-ai/sdk');
|
|
11
|
-
// Cast the mocked module to allow setting up mock implementations
|
|
12
|
-
const MockAnthropic = sdk_1.default;
|
|
13
|
-
describe('AnthropicClientAdapter', () => {
|
|
14
|
-
let adapter;
|
|
15
|
-
let mockCreate;
|
|
16
|
-
let basicRequest;
|
|
17
|
-
beforeEach(() => {
|
|
18
|
-
// Reset mocks before each test
|
|
19
|
-
MockAnthropic.mockClear();
|
|
20
|
-
mockCreate = jest.fn();
|
|
21
|
-
// Mock the messages.create method
|
|
22
|
-
MockAnthropic.prototype.messages = {
|
|
23
|
-
create: mockCreate,
|
|
24
|
-
};
|
|
25
|
-
adapter = new AnthropicClientAdapter_1.AnthropicClientAdapter();
|
|
26
|
-
basicRequest = {
|
|
27
|
-
providerId: 'anthropic',
|
|
28
|
-
modelId: 'claude-3-5-sonnet-20241022',
|
|
29
|
-
messages: [{ role: 'user', content: 'Hello' }],
|
|
30
|
-
settings: {
|
|
31
|
-
temperature: 0.7,
|
|
32
|
-
maxTokens: 100,
|
|
33
|
-
topP: 1,
|
|
34
|
-
frequencyPenalty: 0,
|
|
35
|
-
presencePenalty: 0,
|
|
36
|
-
stopSequences: [],
|
|
37
|
-
user: 'test-user',
|
|
38
|
-
geminiSafetySettings: [],
|
|
39
|
-
supportsSystemMessage: true,
|
|
40
|
-
reasoning: {
|
|
41
|
-
enabled: false,
|
|
42
|
-
effort: undefined,
|
|
43
|
-
maxTokens: undefined,
|
|
44
|
-
exclude: false
|
|
45
|
-
},
|
|
46
|
-
thinkingExtraction: {
|
|
47
|
-
enabled: true,
|
|
48
|
-
tag: 'thinking'
|
|
49
|
-
}
|
|
50
|
-
}
|
|
51
|
-
};
|
|
52
|
-
});
|
|
53
|
-
describe('sendMessage', () => {
|
|
54
|
-
it('should format the request correctly and call the Anthropic API', async () => {
|
|
55
|
-
// Setup mock response
|
|
56
|
-
mockCreate.mockResolvedValueOnce({
|
|
57
|
-
id: 'msg_123',
|
|
58
|
-
type: 'message',
|
|
59
|
-
role: 'assistant',
|
|
60
|
-
model: 'claude-3-5-sonnet-20241022',
|
|
61
|
-
content: [{
|
|
62
|
-
type: 'text',
|
|
63
|
-
text: 'Hello! How can I help you today?'
|
|
64
|
-
}],
|
|
65
|
-
stop_reason: 'end_turn',
|
|
66
|
-
stop_sequence: null,
|
|
67
|
-
usage: {
|
|
68
|
-
input_tokens: 10,
|
|
69
|
-
output_tokens: 20
|
|
70
|
-
}
|
|
71
|
-
});
|
|
72
|
-
const response = await adapter.sendMessage(basicRequest, 'test-api-key');
|
|
73
|
-
// Verify Anthropic was instantiated with the API key
|
|
74
|
-
expect(MockAnthropic).toHaveBeenCalledWith({
|
|
75
|
-
apiKey: 'test-api-key',
|
|
76
|
-
baseURL: undefined
|
|
77
|
-
});
|
|
78
|
-
// Verify the create method was called with correct parameters
|
|
79
|
-
expect(mockCreate).toHaveBeenCalledWith({
|
|
80
|
-
model: 'claude-3-5-sonnet-20241022',
|
|
81
|
-
messages: [{ role: 'user', content: 'Hello' }],
|
|
82
|
-
max_tokens: 100,
|
|
83
|
-
temperature: 0.7,
|
|
84
|
-
top_p: 1
|
|
85
|
-
});
|
|
86
|
-
// Verify the response
|
|
87
|
-
expect(response.object).toBe('chat.completion');
|
|
88
|
-
const successResponse = response;
|
|
89
|
-
expect(successResponse.id).toBe('msg_123');
|
|
90
|
-
expect(successResponse.provider).toBe('anthropic');
|
|
91
|
-
expect(successResponse.model).toBe('claude-3-5-sonnet-20241022');
|
|
92
|
-
expect(successResponse.choices[0].message.content).toBe('Hello! How can I help you today?');
|
|
93
|
-
expect(successResponse.usage?.total_tokens).toBe(30);
|
|
94
|
-
});
|
|
95
|
-
it('should handle system messages by merging into first user message', async () => {
|
|
96
|
-
basicRequest.messages = [
|
|
97
|
-
{ role: 'system', content: 'You are a helpful assistant.' },
|
|
98
|
-
{ role: 'user', content: 'Hello' }
|
|
99
|
-
];
|
|
100
|
-
mockCreate.mockResolvedValueOnce({
|
|
101
|
-
id: 'msg_123',
|
|
102
|
-
type: 'message',
|
|
103
|
-
role: 'assistant',
|
|
104
|
-
model: 'claude-3-5-sonnet-20241022',
|
|
105
|
-
content: [{ type: 'text', text: 'Hello!' }],
|
|
106
|
-
stop_reason: 'end_turn',
|
|
107
|
-
usage: { input_tokens: 15, output_tokens: 5 }
|
|
108
|
-
});
|
|
109
|
-
await adapter.sendMessage(basicRequest, 'test-api-key');
|
|
110
|
-
// System message should be sent as separate system parameter
|
|
111
|
-
expect(mockCreate).toHaveBeenCalledWith(expect.objectContaining({
|
|
112
|
-
system: 'You are a helpful assistant.',
|
|
113
|
-
messages: [{
|
|
114
|
-
role: 'user',
|
|
115
|
-
content: 'Hello'
|
|
116
|
-
}]
|
|
117
|
-
}));
|
|
118
|
-
});
|
|
119
|
-
it('should handle stop sequences correctly', async () => {
|
|
120
|
-
basicRequest.settings.stopSequences = ['END', 'STOP'];
|
|
121
|
-
mockCreate.mockResolvedValueOnce({
|
|
122
|
-
id: 'msg_123',
|
|
123
|
-
type: 'message',
|
|
124
|
-
role: 'assistant',
|
|
125
|
-
model: 'claude-3-5-sonnet-20241022',
|
|
126
|
-
content: [{ type: 'text', text: 'Response' }],
|
|
127
|
-
stop_reason: 'end_turn',
|
|
128
|
-
usage: { input_tokens: 10, output_tokens: 10 }
|
|
129
|
-
});
|
|
130
|
-
await adapter.sendMessage(basicRequest, 'test-api-key');
|
|
131
|
-
expect(mockCreate).toHaveBeenCalledWith(expect.objectContaining({
|
|
132
|
-
stop_sequences: ['END', 'STOP']
|
|
133
|
-
}));
|
|
134
|
-
});
|
|
135
|
-
it('should handle multi-turn conversations', async () => {
|
|
136
|
-
basicRequest.messages = [
|
|
137
|
-
{ role: 'user', content: 'Hello' },
|
|
138
|
-
{ role: 'assistant', content: 'Hi there!' },
|
|
139
|
-
{ role: 'user', content: 'How are you?' }
|
|
140
|
-
];
|
|
141
|
-
mockCreate.mockResolvedValueOnce({
|
|
142
|
-
id: 'msg_123',
|
|
143
|
-
type: 'message',
|
|
144
|
-
role: 'assistant',
|
|
145
|
-
model: 'claude-3-5-sonnet-20241022',
|
|
146
|
-
content: [{ type: 'text', text: "I'm doing well, thanks!" }],
|
|
147
|
-
stop_reason: 'end_turn',
|
|
148
|
-
usage: { input_tokens: 20, output_tokens: 10 }
|
|
149
|
-
});
|
|
150
|
-
await adapter.sendMessage(basicRequest, 'test-api-key');
|
|
151
|
-
expect(mockCreate).toHaveBeenCalledWith(expect.objectContaining({
|
|
152
|
-
messages: [
|
|
153
|
-
{ role: 'user', content: 'Hello' },
|
|
154
|
-
{ role: 'assistant', content: 'Hi there!' },
|
|
155
|
-
{ role: 'user', content: 'How are you?' }
|
|
156
|
-
]
|
|
157
|
-
}));
|
|
158
|
-
});
|
|
159
|
-
it('should map stop_reason correctly', async () => {
|
|
160
|
-
const stopReasons = [
|
|
161
|
-
{ anthropic: 'end_turn', expected: 'stop' },
|
|
162
|
-
{ anthropic: 'max_tokens', expected: 'length' },
|
|
163
|
-
{ anthropic: 'stop_sequence', expected: 'stop' },
|
|
164
|
-
{ anthropic: 'unknown_reason', expected: 'other' }
|
|
165
|
-
];
|
|
166
|
-
for (const { anthropic, expected } of stopReasons) {
|
|
167
|
-
mockCreate.mockResolvedValueOnce({
|
|
168
|
-
id: 'msg_123',
|
|
169
|
-
type: 'message',
|
|
170
|
-
role: 'assistant',
|
|
171
|
-
model: 'claude-3-5-sonnet-20241022',
|
|
172
|
-
content: [{ type: 'text', text: 'Response' }],
|
|
173
|
-
stop_reason: anthropic,
|
|
174
|
-
usage: { input_tokens: 10, output_tokens: 10 }
|
|
175
|
-
});
|
|
176
|
-
const response = await adapter.sendMessage(basicRequest, 'test-api-key');
|
|
177
|
-
const successResponse = response;
|
|
178
|
-
expect(successResponse.choices[0].finish_reason).toBe(expected);
|
|
179
|
-
}
|
|
180
|
-
});
|
|
181
|
-
describe('error handling', () => {
|
|
182
|
-
it('should handle authentication errors (401)', async () => {
|
|
183
|
-
const apiError = new Error('Invalid API key');
|
|
184
|
-
apiError.status = 401;
|
|
185
|
-
mockCreate.mockRejectedValueOnce(apiError);
|
|
186
|
-
const response = await adapter.sendMessage(basicRequest, 'invalid-key');
|
|
187
|
-
expect(response.object).toBe('error');
|
|
188
|
-
const errorResponse = response;
|
|
189
|
-
expect(errorResponse.error.code).toBe(types_1.ADAPTER_ERROR_CODES.INVALID_API_KEY);
|
|
190
|
-
expect(errorResponse.error.type).toBe('authentication_error');
|
|
191
|
-
});
|
|
192
|
-
it('should handle rate limit errors (429)', async () => {
|
|
193
|
-
const apiError = new Error('Rate limit exceeded');
|
|
194
|
-
apiError.status = 429;
|
|
195
|
-
mockCreate.mockRejectedValueOnce(apiError);
|
|
196
|
-
const response = await adapter.sendMessage(basicRequest, 'test-key');
|
|
197
|
-
const errorResponse = response;
|
|
198
|
-
expect(errorResponse.error.code).toBe(types_1.ADAPTER_ERROR_CODES.RATE_LIMIT_EXCEEDED);
|
|
199
|
-
expect(errorResponse.error.type).toBe('rate_limit_error');
|
|
200
|
-
});
|
|
201
|
-
it('should handle context length errors', async () => {
|
|
202
|
-
// Create a mock error that simulates Anthropic.APIError
|
|
203
|
-
const apiError = Object.assign(new Error('Message is too long'), {
|
|
204
|
-
status: 400,
|
|
205
|
-
constructor: { name: 'APIError' }
|
|
206
|
-
});
|
|
207
|
-
Object.setPrototypeOf(apiError, sdk_1.default.APIError.prototype);
|
|
208
|
-
mockCreate.mockRejectedValueOnce(apiError);
|
|
209
|
-
const response = await adapter.sendMessage(basicRequest, 'test-key');
|
|
210
|
-
const errorResponse = response;
|
|
211
|
-
expect(errorResponse.error.code).toBe(types_1.ADAPTER_ERROR_CODES.CONTEXT_LENGTH_EXCEEDED);
|
|
212
|
-
expect(errorResponse.error.type).toBe('invalid_request_error');
|
|
213
|
-
});
|
|
214
|
-
it('should handle invalid model errors', async () => {
|
|
215
|
-
const apiError = new Error('Model not found');
|
|
216
|
-
apiError.status = 404;
|
|
217
|
-
mockCreate.mockRejectedValueOnce(apiError);
|
|
218
|
-
const response = await adapter.sendMessage(basicRequest, 'test-key');
|
|
219
|
-
const errorResponse = response;
|
|
220
|
-
expect(errorResponse.error.code).toBe(types_1.ADAPTER_ERROR_CODES.MODEL_NOT_FOUND);
|
|
221
|
-
expect(errorResponse.error.type).toBe('invalid_request_error');
|
|
222
|
-
});
|
|
223
|
-
it('should handle credit errors', async () => {
|
|
224
|
-
const apiError = new Error('Insufficient credits');
|
|
225
|
-
apiError.status = 402;
|
|
226
|
-
mockCreate.mockRejectedValueOnce(apiError);
|
|
227
|
-
const response = await adapter.sendMessage(basicRequest, 'test-key');
|
|
228
|
-
const errorResponse = response;
|
|
229
|
-
expect(errorResponse.error.code).toBe(types_1.ADAPTER_ERROR_CODES.INSUFFICIENT_CREDITS);
|
|
230
|
-
expect(errorResponse.error.type).toBe('rate_limit_error');
|
|
231
|
-
});
|
|
232
|
-
it('should handle server errors (500)', async () => {
|
|
233
|
-
const apiError = new Error('Internal server error');
|
|
234
|
-
apiError.status = 500;
|
|
235
|
-
mockCreate.mockRejectedValueOnce(apiError);
|
|
236
|
-
const response = await adapter.sendMessage(basicRequest, 'test-key');
|
|
237
|
-
const errorResponse = response;
|
|
238
|
-
expect(errorResponse.error.code).toBe(types_1.ADAPTER_ERROR_CODES.PROVIDER_ERROR);
|
|
239
|
-
expect(errorResponse.error.type).toBe('server_error');
|
|
240
|
-
});
|
|
241
|
-
it('should handle network errors', async () => {
|
|
242
|
-
const networkError = new Error('Network error');
|
|
243
|
-
networkError.code = 'ENOTFOUND';
|
|
244
|
-
mockCreate.mockRejectedValueOnce(networkError);
|
|
245
|
-
const response = await adapter.sendMessage(basicRequest, 'test-key');
|
|
246
|
-
const errorResponse = response;
|
|
247
|
-
expect(errorResponse.error.code).toBe(types_1.ADAPTER_ERROR_CODES.NETWORK_ERROR);
|
|
248
|
-
expect(errorResponse.error.type).toBe('connection_error');
|
|
249
|
-
});
|
|
250
|
-
});
|
|
251
|
-
});
|
|
252
|
-
describe('validateApiKey', () => {
|
|
253
|
-
it('should validate API key format', () => {
|
|
254
|
-
// Valid Anthropic API key format - must start with 'sk-ant-' and be at least 30 chars
|
|
255
|
-
expect(adapter.validateApiKey('sk-ant-api01-test123456789012345')).toBe(true);
|
|
256
|
-
expect(adapter.validateApiKey('sk-ant-api03-test123456789012345')).toBe(true);
|
|
257
|
-
// Invalid formats
|
|
258
|
-
expect(adapter.validateApiKey('invalid')).toBe(false);
|
|
259
|
-
expect(adapter.validateApiKey('')).toBe(false);
|
|
260
|
-
expect(adapter.validateApiKey('sk-test')).toBe(false); // OpenAI format
|
|
261
|
-
expect(adapter.validateApiKey('sk-ant-test123')).toBe(false); // Too short
|
|
262
|
-
});
|
|
263
|
-
});
|
|
264
|
-
describe('getAdapterInfo', () => {
|
|
265
|
-
it('should return correct adapter information', () => {
|
|
266
|
-
const info = adapter.getAdapterInfo();
|
|
267
|
-
expect(info.providerId).toBe('anthropic');
|
|
268
|
-
expect(info.name).toBe('Anthropic Client Adapter');
|
|
269
|
-
expect(info.version).toBeDefined();
|
|
270
|
-
// supportedModels is not part of the interface
|
|
271
|
-
});
|
|
272
|
-
});
|
|
273
|
-
});
|
|
@@ -1 +0,0 @@
|
|
|
1
|
-
export {};
|
|
@@ -1,405 +0,0 @@
|
|
|
1
|
-
"use strict";
|
|
2
|
-
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
-
const genai_1 = require("@google/genai");
|
|
4
|
-
const GeminiClientAdapter_1 = require("./GeminiClientAdapter");
|
|
5
|
-
const types_1 = require("./types");
|
|
6
|
-
// Mock the entire '@google/genai' module
|
|
7
|
-
jest.mock('@google/genai');
|
|
8
|
-
// Cast the mocked module to allow setting up mock implementations
|
|
9
|
-
const MockGoogleGenAI = genai_1.GoogleGenAI;
|
|
10
|
-
describe('GeminiClientAdapter', () => {
|
|
11
|
-
let adapter;
|
|
12
|
-
let mockGenerateContent;
|
|
13
|
-
let mockGetGenerativeModel;
|
|
14
|
-
let mockModel;
|
|
15
|
-
let basicRequest;
|
|
16
|
-
beforeEach(() => {
|
|
17
|
-
// Reset mocks before each test
|
|
18
|
-
MockGoogleGenAI.mockClear();
|
|
19
|
-
mockGenerateContent = jest.fn();
|
|
20
|
-
// Mock the models.generateContent method
|
|
21
|
-
MockGoogleGenAI.mockImplementation(() => ({
|
|
22
|
-
models: {
|
|
23
|
-
generateContent: mockGenerateContent
|
|
24
|
-
}
|
|
25
|
-
}));
|
|
26
|
-
adapter = new GeminiClientAdapter_1.GeminiClientAdapter();
|
|
27
|
-
basicRequest = {
|
|
28
|
-
providerId: 'gemini',
|
|
29
|
-
modelId: 'gemini-2.5-pro',
|
|
30
|
-
messages: [{ role: 'user', content: 'Hello' }],
|
|
31
|
-
settings: {
|
|
32
|
-
temperature: 0.7,
|
|
33
|
-
maxTokens: 100,
|
|
34
|
-
topP: 1,
|
|
35
|
-
frequencyPenalty: 0,
|
|
36
|
-
presencePenalty: 0,
|
|
37
|
-
stopSequences: [],
|
|
38
|
-
user: 'test-user',
|
|
39
|
-
geminiSafetySettings: [],
|
|
40
|
-
supportsSystemMessage: true,
|
|
41
|
-
reasoning: {
|
|
42
|
-
enabled: false,
|
|
43
|
-
effort: undefined,
|
|
44
|
-
maxTokens: undefined,
|
|
45
|
-
exclude: false
|
|
46
|
-
},
|
|
47
|
-
thinkingExtraction: {
|
|
48
|
-
enabled: true,
|
|
49
|
-
tag: 'thinking'
|
|
50
|
-
}
|
|
51
|
-
}
|
|
52
|
-
};
|
|
53
|
-
});
|
|
54
|
-
describe('sendMessage', () => {
|
|
55
|
-
it('should format the request correctly and call the Gemini API', async () => {
|
|
56
|
-
// Setup mock response - Gemini API returns the raw response without nesting
|
|
57
|
-
mockGenerateContent.mockResolvedValueOnce({
|
|
58
|
-
text: () => 'Hello! How can I help you today?',
|
|
59
|
-
candidates: [{
|
|
60
|
-
finishReason: 'STOP',
|
|
61
|
-
content: {
|
|
62
|
-
parts: [{ text: 'Hello! How can I help you today?' }],
|
|
63
|
-
role: 'model'
|
|
64
|
-
}
|
|
65
|
-
}],
|
|
66
|
-
usageMetadata: {
|
|
67
|
-
promptTokenCount: 10,
|
|
68
|
-
candidatesTokenCount: 20,
|
|
69
|
-
totalTokenCount: 30
|
|
70
|
-
}
|
|
71
|
-
});
|
|
72
|
-
const response = await adapter.sendMessage(basicRequest, 'test-api-key');
|
|
73
|
-
// Verify GoogleGenAI was instantiated with the API key
|
|
74
|
-
expect(MockGoogleGenAI).toHaveBeenCalledWith({ apiKey: 'test-api-key' });
|
|
75
|
-
// Verify generateContent was called
|
|
76
|
-
expect(mockGenerateContent).toHaveBeenCalledTimes(1);
|
|
77
|
-
const callArgs = mockGenerateContent.mock.calls[0][0];
|
|
78
|
-
expect(callArgs.model).toBe('gemini-2.5-pro');
|
|
79
|
-
expect(callArgs.contents).toHaveLength(1);
|
|
80
|
-
expect(callArgs.contents[0].role).toBe('user');
|
|
81
|
-
// Verify the response
|
|
82
|
-
expect(response.object).toBe('chat.completion');
|
|
83
|
-
const successResponse = response;
|
|
84
|
-
expect(successResponse.provider).toBe('gemini');
|
|
85
|
-
expect(successResponse.model).toBe('gemini-2.5-pro');
|
|
86
|
-
expect(successResponse.choices[0].message.content).toBe('Hello! How can I help you today?');
|
|
87
|
-
expect(successResponse.usage?.total_tokens).toBe(30);
|
|
88
|
-
});
|
|
89
|
-
it('should handle system messages correctly', async () => {
|
|
90
|
-
basicRequest.messages = [
|
|
91
|
-
{ role: 'system', content: 'You are a helpful assistant.' },
|
|
92
|
-
{ role: 'user', content: 'Hello' }
|
|
93
|
-
];
|
|
94
|
-
mockGenerateContent.mockResolvedValueOnce({
|
|
95
|
-
text: () => 'Hello!',
|
|
96
|
-
candidates: [{
|
|
97
|
-
finishReason: 'STOP',
|
|
98
|
-
content: { parts: [{ text: 'Hello!' }], role: 'model' }
|
|
99
|
-
}],
|
|
100
|
-
usageMetadata: { promptTokenCount: 15, candidatesTokenCount: 5, totalTokenCount: 20 }
|
|
101
|
-
});
|
|
102
|
-
await adapter.sendMessage(basicRequest, 'test-api-key');
|
|
103
|
-
// System message should be passed as systemInstruction
|
|
104
|
-
expect(mockGenerateContent).toHaveBeenCalledWith({
|
|
105
|
-
model: 'gemini-2.5-pro',
|
|
106
|
-
contents: [{
|
|
107
|
-
role: 'user',
|
|
108
|
-
parts: [{ text: 'Hello' }]
|
|
109
|
-
}],
|
|
110
|
-
config: {
|
|
111
|
-
temperature: 0.7,
|
|
112
|
-
maxOutputTokens: 100,
|
|
113
|
-
topP: 1,
|
|
114
|
-
safetySettings: [],
|
|
115
|
-
systemInstruction: 'You are a helpful assistant.'
|
|
116
|
-
}
|
|
117
|
-
});
|
|
118
|
-
});
|
|
119
|
-
it('should handle multi-turn conversations with role mapping', async () => {
|
|
120
|
-
basicRequest.messages = [
|
|
121
|
-
{ role: 'user', content: 'Hello' },
|
|
122
|
-
{ role: 'assistant', content: 'Hi there!' },
|
|
123
|
-
{ role: 'user', content: 'How are you?' }
|
|
124
|
-
];
|
|
125
|
-
mockGenerateContent.mockResolvedValueOnce({
|
|
126
|
-
text: () => "I'm doing well!",
|
|
127
|
-
candidates: [{
|
|
128
|
-
finishReason: 'STOP',
|
|
129
|
-
content: { parts: [{ text: "I'm doing well!" }], role: 'model' }
|
|
130
|
-
}],
|
|
131
|
-
usageMetadata: { promptTokenCount: 20, candidatesTokenCount: 10, totalTokenCount: 30 }
|
|
132
|
-
});
|
|
133
|
-
await adapter.sendMessage(basicRequest, 'test-api-key');
|
|
134
|
-
// Verify role mapping: assistant -> model
|
|
135
|
-
expect(mockGenerateContent).toHaveBeenCalledWith({
|
|
136
|
-
model: 'gemini-2.5-pro',
|
|
137
|
-
contents: [
|
|
138
|
-
{ role: 'user', parts: [{ text: 'Hello' }] },
|
|
139
|
-
{ role: 'model', parts: [{ text: 'Hi there!' }] },
|
|
140
|
-
{ role: 'user', parts: [{ text: 'How are you?' }] }
|
|
141
|
-
],
|
|
142
|
-
config: {
|
|
143
|
-
temperature: 0.7,
|
|
144
|
-
maxOutputTokens: 100,
|
|
145
|
-
topP: 1,
|
|
146
|
-
safetySettings: []
|
|
147
|
-
}
|
|
148
|
-
});
|
|
149
|
-
});
|
|
150
|
-
it('should handle stop sequences', async () => {
|
|
151
|
-
basicRequest.settings.stopSequences = ['END', 'STOP'];
|
|
152
|
-
mockGenerateContent.mockResolvedValueOnce({
|
|
153
|
-
text: () => 'Response',
|
|
154
|
-
candidates: [{
|
|
155
|
-
finishReason: 'STOP',
|
|
156
|
-
content: { parts: [{ text: 'Response' }], role: 'model' }
|
|
157
|
-
}]
|
|
158
|
-
});
|
|
159
|
-
await adapter.sendMessage(basicRequest, 'test-api-key');
|
|
160
|
-
expect(mockGenerateContent).toHaveBeenCalledWith(expect.objectContaining({
|
|
161
|
-
config: expect.objectContaining({
|
|
162
|
-
stopSequences: ['END', 'STOP']
|
|
163
|
-
})
|
|
164
|
-
}));
|
|
165
|
-
});
|
|
166
|
-
it('should handle safety settings', async () => {
|
|
167
|
-
basicRequest.settings.geminiSafetySettings = [
|
|
168
|
-
{ category: 'HARM_CATEGORY_HATE_SPEECH', threshold: 'BLOCK_NONE' }
|
|
169
|
-
];
|
|
170
|
-
mockGenerateContent.mockResolvedValueOnce({
|
|
171
|
-
text: () => 'Response',
|
|
172
|
-
candidates: [{
|
|
173
|
-
finishReason: 'STOP',
|
|
174
|
-
content: { parts: [{ text: 'Response' }], role: 'model' }
|
|
175
|
-
}]
|
|
176
|
-
});
|
|
177
|
-
await adapter.sendMessage(basicRequest, 'test-api-key');
|
|
178
|
-
expect(mockGenerateContent).toHaveBeenCalledWith(expect.objectContaining({
|
|
179
|
-
config: expect.objectContaining({
|
|
180
|
-
safetySettings: [
|
|
181
|
-
{ category: 'HARM_CATEGORY_HATE_SPEECH', threshold: 'BLOCK_NONE' }
|
|
182
|
-
]
|
|
183
|
-
})
|
|
184
|
-
}));
|
|
185
|
-
});
|
|
186
|
-
it('should map finish reasons correctly', async () => {
|
|
187
|
-
const finishReasons = [
|
|
188
|
-
{ gemini: 'STOP', expected: 'stop' },
|
|
189
|
-
{ gemini: 'MAX_TOKENS', expected: 'length' },
|
|
190
|
-
{ gemini: 'SAFETY', expected: 'content_filter' },
|
|
191
|
-
{ gemini: 'RECITATION', expected: 'content_filter' },
|
|
192
|
-
{ gemini: 'OTHER', expected: 'other' },
|
|
193
|
-
{ gemini: 'UNKNOWN', expected: 'other' }
|
|
194
|
-
];
|
|
195
|
-
for (const { gemini, expected } of finishReasons) {
|
|
196
|
-
mockGenerateContent.mockResolvedValueOnce({
|
|
197
|
-
text: () => 'Response',
|
|
198
|
-
candidates: [{
|
|
199
|
-
finishReason: gemini,
|
|
200
|
-
content: { parts: [{ text: 'Response' }], role: 'model' }
|
|
201
|
-
}]
|
|
202
|
-
});
|
|
203
|
-
const response = await adapter.sendMessage(basicRequest, 'test-api-key');
|
|
204
|
-
const successResponse = response;
|
|
205
|
-
expect(successResponse.choices[0].finish_reason).toBe(expected);
|
|
206
|
-
}
|
|
207
|
-
});
|
|
208
|
-
describe('reasoning/thinking configuration', () => {
|
|
209
|
-
it('should add thinking config when reasoning is enabled with maxTokens', async () => {
|
|
210
|
-
const requestWithReasoning = {
|
|
211
|
-
...basicRequest,
|
|
212
|
-
settings: {
|
|
213
|
-
...basicRequest.settings,
|
|
214
|
-
reasoning: {
|
|
215
|
-
enabled: true,
|
|
216
|
-
maxTokens: 5000,
|
|
217
|
-
effort: undefined,
|
|
218
|
-
exclude: false
|
|
219
|
-
}
|
|
220
|
-
}
|
|
221
|
-
};
|
|
222
|
-
mockGenerateContent.mockResolvedValueOnce({
|
|
223
|
-
text: () => 'Response with thinking',
|
|
224
|
-
candidates: [{
|
|
225
|
-
finishReason: 'STOP',
|
|
226
|
-
content: {
|
|
227
|
-
parts: [{ text: 'Response with thinking' }]
|
|
228
|
-
}
|
|
229
|
-
}],
|
|
230
|
-
usageMetadata: {}
|
|
231
|
-
});
|
|
232
|
-
await adapter.sendMessage(requestWithReasoning, 'test-api-key');
|
|
233
|
-
const callArgs = mockGenerateContent.mock.calls[0][0];
|
|
234
|
-
expect(callArgs.config.thinkingConfig).toEqual({
|
|
235
|
-
thinkingBudget: 5000,
|
|
236
|
-
includeThoughts: true
|
|
237
|
-
});
|
|
238
|
-
});
|
|
239
|
-
it('should convert effort levels to thinking budget', async () => {
|
|
240
|
-
const requestWithEffort = {
|
|
241
|
-
...basicRequest,
|
|
242
|
-
settings: {
|
|
243
|
-
...basicRequest.settings,
|
|
244
|
-
reasoning: {
|
|
245
|
-
enabled: true,
|
|
246
|
-
effort: 'high',
|
|
247
|
-
maxTokens: undefined,
|
|
248
|
-
exclude: false
|
|
249
|
-
}
|
|
250
|
-
}
|
|
251
|
-
};
|
|
252
|
-
mockGenerateContent.mockResolvedValueOnce({
|
|
253
|
-
text: () => 'Response',
|
|
254
|
-
candidates: [{
|
|
255
|
-
finishReason: 'STOP',
|
|
256
|
-
content: {
|
|
257
|
-
parts: [{ text: 'Response' }]
|
|
258
|
-
}
|
|
259
|
-
}],
|
|
260
|
-
usageMetadata: {}
|
|
261
|
-
});
|
|
262
|
-
await adapter.sendMessage(requestWithEffort, 'test-api-key');
|
|
263
|
-
const callArgs = mockGenerateContent.mock.calls[0][0];
|
|
264
|
-
// For gemini-2.5-pro (not flash), max budget is 65536, high effort = 80%
|
|
265
|
-
expect(callArgs.config.thinkingConfig?.thinkingBudget).toBe(Math.floor(65536 * 0.8));
|
|
266
|
-
});
|
|
267
|
-
it('should use dynamic budget (-1) when reasoning enabled without specific settings', async () => {
|
|
268
|
-
const requestWithBasicReasoning = {
|
|
269
|
-
...basicRequest,
|
|
270
|
-
settings: {
|
|
271
|
-
...basicRequest.settings,
|
|
272
|
-
reasoning: {
|
|
273
|
-
enabled: true,
|
|
274
|
-
effort: undefined,
|
|
275
|
-
maxTokens: undefined,
|
|
276
|
-
exclude: false
|
|
277
|
-
}
|
|
278
|
-
}
|
|
279
|
-
};
|
|
280
|
-
mockGenerateContent.mockResolvedValueOnce({
|
|
281
|
-
text: () => 'Response',
|
|
282
|
-
candidates: [{
|
|
283
|
-
finishReason: 'STOP',
|
|
284
|
-
content: {
|
|
285
|
-
parts: [{ text: 'Response' }]
|
|
286
|
-
}
|
|
287
|
-
}],
|
|
288
|
-
usageMetadata: {}
|
|
289
|
-
});
|
|
290
|
-
await adapter.sendMessage(requestWithBasicReasoning, 'test-api-key');
|
|
291
|
-
const callArgs = mockGenerateContent.mock.calls[0][0];
|
|
292
|
-
expect(callArgs.config.thinkingConfig?.thinkingBudget).toBe(-1);
|
|
293
|
-
});
|
|
294
|
-
it('should exclude thinking config when reasoning.exclude is true', async () => {
|
|
295
|
-
const requestWithExclude = {
|
|
296
|
-
...basicRequest,
|
|
297
|
-
settings: {
|
|
298
|
-
...basicRequest.settings,
|
|
299
|
-
reasoning: {
|
|
300
|
-
enabled: true,
|
|
301
|
-
maxTokens: 5000,
|
|
302
|
-
effort: undefined,
|
|
303
|
-
exclude: true
|
|
304
|
-
}
|
|
305
|
-
}
|
|
306
|
-
};
|
|
307
|
-
mockGenerateContent.mockResolvedValueOnce({
|
|
308
|
-
text: () => 'Response',
|
|
309
|
-
candidates: [{
|
|
310
|
-
finishReason: 'STOP',
|
|
311
|
-
content: {
|
|
312
|
-
parts: [{ text: 'Response' }]
|
|
313
|
-
}
|
|
314
|
-
}],
|
|
315
|
-
usageMetadata: {}
|
|
316
|
-
});
|
|
317
|
-
await adapter.sendMessage(requestWithExclude, 'test-api-key');
|
|
318
|
-
const callArgs = mockGenerateContent.mock.calls[0][0];
|
|
319
|
-
expect(callArgs.config.thinkingConfig).toBeUndefined();
|
|
320
|
-
});
|
|
321
|
-
});
|
|
322
|
-
describe('error handling', () => {
|
|
323
|
-
it('should handle API key errors', async () => {
|
|
324
|
-
const apiError = new Error('API key not valid');
|
|
325
|
-
mockGenerateContent.mockRejectedValueOnce(apiError);
|
|
326
|
-
const response = await adapter.sendMessage(basicRequest, 'invalid-key');
|
|
327
|
-
const errorResponse = response;
|
|
328
|
-
expect(errorResponse.error.code).toBe(types_1.ADAPTER_ERROR_CODES.INVALID_API_KEY);
|
|
329
|
-
expect(errorResponse.error.type).toBe('authentication_error');
|
|
330
|
-
});
|
|
331
|
-
it('should handle safety/content filter errors', async () => {
|
|
332
|
-
const apiError = new Error('Response was blocked due to safety reasons');
|
|
333
|
-
mockGenerateContent.mockRejectedValueOnce(apiError);
|
|
334
|
-
const response = await adapter.sendMessage(basicRequest, 'test-key');
|
|
335
|
-
const errorResponse = response;
|
|
336
|
-
expect(errorResponse.error.code).toBe(types_1.ADAPTER_ERROR_CODES.CONTENT_FILTER);
|
|
337
|
-
expect(errorResponse.error.type).toBe('content_filter_error');
|
|
338
|
-
});
|
|
339
|
-
it('should handle quota exceeded errors', async () => {
|
|
340
|
-
const apiError = new Error('API rate limit exceeded');
|
|
341
|
-
mockGenerateContent.mockRejectedValueOnce(apiError);
|
|
342
|
-
const response = await adapter.sendMessage(basicRequest, 'test-key');
|
|
343
|
-
const errorResponse = response;
|
|
344
|
-
expect(errorResponse.error.code).toBe(types_1.ADAPTER_ERROR_CODES.RATE_LIMIT_EXCEEDED);
|
|
345
|
-
expect(errorResponse.error.type).toBe('rate_limit_error');
|
|
346
|
-
});
|
|
347
|
-
it('should handle model not found errors', async () => {
|
|
348
|
-
const apiError = new Error('Model not found');
|
|
349
|
-
apiError.status = 404;
|
|
350
|
-
mockGenerateContent.mockRejectedValueOnce(apiError);
|
|
351
|
-
const response = await adapter.sendMessage(basicRequest, 'test-key');
|
|
352
|
-
const errorResponse = response;
|
|
353
|
-
expect(errorResponse.error.code).toBe(types_1.ADAPTER_ERROR_CODES.MODEL_NOT_FOUND);
|
|
354
|
-
expect(errorResponse.error.type).toBe('invalid_request_error');
|
|
355
|
-
});
|
|
356
|
-
it('should handle permission errors', async () => {
|
|
357
|
-
const apiError = new Error('Invalid API key provided');
|
|
358
|
-
mockGenerateContent.mockRejectedValueOnce(apiError);
|
|
359
|
-
const response = await adapter.sendMessage(basicRequest, 'test-key');
|
|
360
|
-
const errorResponse = response;
|
|
361
|
-
expect(errorResponse.error.code).toBe(types_1.ADAPTER_ERROR_CODES.INVALID_API_KEY);
|
|
362
|
-
expect(errorResponse.error.type).toBe('authentication_error');
|
|
363
|
-
});
|
|
364
|
-
it('should handle generic errors', async () => {
|
|
365
|
-
const apiError = new Error('Unknown error');
|
|
366
|
-
mockGenerateContent.mockRejectedValueOnce(apiError);
|
|
367
|
-
const response = await adapter.sendMessage(basicRequest, 'test-key');
|
|
368
|
-
const errorResponse = response;
|
|
369
|
-
expect(errorResponse.error.code).toBe(types_1.ADAPTER_ERROR_CODES.UNKNOWN_ERROR);
|
|
370
|
-
expect(errorResponse.error.message).toContain('Unknown error');
|
|
371
|
-
});
|
|
372
|
-
it('should handle empty response as success with empty content', async () => {
|
|
373
|
-
mockGenerateContent.mockResolvedValueOnce({
|
|
374
|
-
text: () => '',
|
|
375
|
-
candidates: []
|
|
376
|
-
});
|
|
377
|
-
const response = await adapter.sendMessage(basicRequest, 'test-key');
|
|
378
|
-
// Empty responses are returned as success with empty content
|
|
379
|
-
const successResponse = response;
|
|
380
|
-
expect(successResponse.object).toBe('chat.completion');
|
|
381
|
-
expect(successResponse.choices[0].message.content).toBe('');
|
|
382
|
-
});
|
|
383
|
-
});
|
|
384
|
-
});
|
|
385
|
-
describe('validateApiKey', () => {
|
|
386
|
-
it('should validate API key format', () => {
|
|
387
|
-
// Gemini API keys must start with 'AIza' and be at least 35 chars
|
|
388
|
-
expect(adapter.validateApiKey('AIzaSyABCDEFGHIJKLMNOPQRSTUVWXYZ123456')).toBe(true);
|
|
389
|
-
expect(adapter.validateApiKey('AIzaABCDEFGHIJKLMNOPQRSTUVWXYZ12345')).toBe(true);
|
|
390
|
-
// Invalid formats
|
|
391
|
-
expect(adapter.validateApiKey('')).toBe(false);
|
|
392
|
-
expect(adapter.validateApiKey('short')).toBe(false); // Too short
|
|
393
|
-
expect(adapter.validateApiKey('abcdef123456')).toBe(false); // Wrong prefix
|
|
394
|
-
});
|
|
395
|
-
});
|
|
396
|
-
describe('getAdapterInfo', () => {
|
|
397
|
-
it('should return correct adapter information', () => {
|
|
398
|
-
const info = adapter.getAdapterInfo();
|
|
399
|
-
expect(info.providerId).toBe('gemini');
|
|
400
|
-
expect(info.name).toBe('Gemini Client Adapter');
|
|
401
|
-
expect(info.version).toBeDefined();
|
|
402
|
-
// supportedModels is not part of the interface
|
|
403
|
-
});
|
|
404
|
-
});
|
|
405
|
-
});
|