genai-lite 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,281 @@
1
+ "use strict";
2
+ // AI Summary: Anthropic client adapter for making real API calls to Anthropic's messages endpoint.
3
+ // Handles Claude-specific request formatting, response parsing, and error mapping to standardized format.
4
+ var __importDefault = (this && this.__importDefault) || function (mod) {
5
+ return (mod && mod.__esModule) ? mod : { "default": mod };
6
+ };
7
+ Object.defineProperty(exports, "__esModule", { value: true });
8
+ exports.AnthropicClientAdapter = void 0;
9
+ const sdk_1 = __importDefault(require("@anthropic-ai/sdk"));
10
+ const types_1 = require("./types");
11
+ const adapterErrorUtils_1 = require("./adapterErrorUtils");
12
+ /**
13
+ * Client adapter for Anthropic API integration
14
+ *
15
+ * This adapter:
16
+ * - Formats requests according to Anthropic's messages API requirements
17
+ * - Handles Claude-specific system message positioning and formatting
18
+ * - Maps Anthropic responses to standardized LLMResponse format
19
+ * - Converts Anthropic errors to standardized LLMFailureResponse format
20
+ * - Manages Claude-specific settings and constraints
21
+ */
22
+ class AnthropicClientAdapter {
23
+ /**
24
+ * Creates a new Anthropic client adapter
25
+ *
26
+ * @param config Optional configuration for the adapter
27
+ * @param config.baseURL Custom base URL for Anthropic-compatible APIs
28
+ */
29
+ constructor(config) {
30
+ this.baseURL = config?.baseURL;
31
+ }
32
+ /**
33
+ * Sends a chat message to Anthropic's API
34
+ *
35
+ * @param request - The internal LLM request with applied settings
36
+ * @param apiKey - The decrypted Anthropic API key
37
+ * @returns Promise resolving to success or failure response
38
+ */
39
+ async sendMessage(request, apiKey) {
40
+ try {
41
+ // Initialize Anthropic client
42
+ const anthropic = new sdk_1.default({
43
+ apiKey,
44
+ ...(this.baseURL && { baseURL: this.baseURL }),
45
+ });
46
+ // Format messages for Anthropic API (Claude has specific requirements)
47
+ const { messages, systemMessage } = this.formatMessagesForAnthropic(request);
48
+ // Prepare API call parameters
49
+ const messageParams = {
50
+ model: request.modelId,
51
+ messages: messages,
52
+ max_tokens: request.settings.maxTokens,
53
+ temperature: request.settings.temperature,
54
+ top_p: request.settings.topP,
55
+ ...(systemMessage && { system: systemMessage }),
56
+ ...(request.settings.stopSequences.length > 0 && {
57
+ stop_sequences: request.settings.stopSequences,
58
+ }),
59
+ };
60
+ console.log(`Making Anthropic API call for model: ${request.modelId}`);
61
+ console.log(`Anthropic API parameters:`, {
62
+ model: messageParams.model,
63
+ temperature: messageParams.temperature,
64
+ max_tokens: messageParams.max_tokens,
65
+ top_p: messageParams.top_p,
66
+ hasSystem: !!messageParams.system,
67
+ messageCount: messages.length,
68
+ hasStopSequences: !!messageParams.stop_sequences,
69
+ });
70
+ // Make the API call
71
+ const completion = await anthropic.messages.create(messageParams);
72
+ console.log(`Anthropic API call successful, response ID: ${completion.id}`);
73
+ // Convert to standardized response format
74
+ return this.createSuccessResponse(completion, request);
75
+ }
76
+ catch (error) {
77
+ console.error("Anthropic API error:", error);
78
+ return this.createErrorResponse(error, request);
79
+ }
80
+ }
81
+ /**
82
+ * Validates Anthropic API key format
83
+ *
84
+ * @param apiKey - The API key to validate
85
+ * @returns True if the key format appears valid
86
+ */
87
+ validateApiKey(apiKey) {
88
+ // Anthropic API keys typically start with 'sk-ant-' and are longer
89
+ return apiKey.startsWith("sk-ant-") && apiKey.length >= 30;
90
+ }
91
+ /**
92
+ * Gets adapter information
93
+ */
94
+ getAdapterInfo() {
95
+ return {
96
+ providerId: "anthropic",
97
+ name: "Anthropic Client Adapter",
98
+ version: "1.0.0",
99
+ };
100
+ }
101
+ /**
102
+ * Formats messages for Anthropic API with proper system message handling
103
+ *
104
+ * @param request - The internal LLM request
105
+ * @returns Formatted messages and system message for Anthropic
106
+ */
107
+ formatMessagesForAnthropic(request) {
108
+ const messages = [];
109
+ let systemMessage = request.systemMessage;
110
+ // Process conversation messages
111
+ for (const message of request.messages) {
112
+ if (message.role === "system") {
113
+ // Anthropic handles system messages separately
114
+ // If we already have a system message, append to it
115
+ if (systemMessage) {
116
+ systemMessage += "\n\n" + message.content;
117
+ }
118
+ else {
119
+ systemMessage = message.content;
120
+ }
121
+ }
122
+ else if (message.role === "user") {
123
+ messages.push({
124
+ role: "user",
125
+ content: message.content,
126
+ });
127
+ }
128
+ else if (message.role === "assistant") {
129
+ messages.push({
130
+ role: "assistant",
131
+ content: message.content,
132
+ });
133
+ }
134
+ }
135
+ // Anthropic requires messages to start with 'user' role
136
+ // If the first message is not from user, we need to handle this
137
+ if (messages.length > 0 && messages[0].role !== "user") {
138
+ console.warn("Anthropic API requires first message to be from user. Adjusting message order.");
139
+ // Find the first user message and move it to the front, or create a default one
140
+ const firstUserIndex = messages.findIndex((msg) => msg.role === "user");
141
+ if (firstUserIndex > 0) {
142
+ const firstUserMessage = messages.splice(firstUserIndex, 1)[0];
143
+ messages.unshift(firstUserMessage);
144
+ }
145
+ else if (firstUserIndex === -1) {
146
+ // No user message found, create a default one
147
+ messages.unshift({
148
+ role: "user",
149
+ content: "Please respond based on the previous context.",
150
+ });
151
+ }
152
+ }
153
+ // Ensure alternating user/assistant pattern (Anthropic requirement)
154
+ const cleanedMessages = this.ensureAlternatingRoles(messages);
155
+ return {
156
+ messages: cleanedMessages,
157
+ systemMessage,
158
+ };
159
+ }
160
+ /**
161
+ * Ensures messages alternate between user and assistant roles as required by Anthropic
162
+ *
163
+ * @param messages - Original messages array
164
+ * @returns Cleaned messages with proper alternating pattern
165
+ */
166
+ ensureAlternatingRoles(messages) {
167
+ if (messages.length === 0)
168
+ return messages;
169
+ const cleanedMessages = [];
170
+ let expectedRole = "user";
171
+ for (const message of messages) {
172
+ if (message.role === expectedRole) {
173
+ cleanedMessages.push(message);
174
+ expectedRole = expectedRole === "user" ? "assistant" : "user";
175
+ }
176
+ else if (message.role === "user" || message.role === "assistant") {
177
+ // If roles don't alternate properly, we might need to combine messages
178
+ // or insert a placeholder. For now, we'll skip non-alternating messages
179
+ // and log a warning.
180
+ console.warn(`Skipping message with unexpected role: expected ${expectedRole}, got ${message.role}`);
181
+ }
182
+ }
183
+ return cleanedMessages;
184
+ }
185
+ /**
186
+ * Creates a standardized success response from Anthropic's response
187
+ *
188
+ * @param completion - Raw Anthropic completion response
189
+ * @param request - Original request for context
190
+ * @returns Standardized LLM response
191
+ */
192
+ createSuccessResponse(completion, request) {
193
+ // Anthropic returns content as an array of content blocks
194
+ const contentBlock = completion.content[0];
195
+ if (!contentBlock || contentBlock.type !== "text") {
196
+ throw new Error("Invalid completion structure from Anthropic API");
197
+ }
198
+ // Map Anthropic's stop reason to our standard format
199
+ const finishReason = this.mapAnthropicStopReason(completion.stop_reason);
200
+ return {
201
+ id: completion.id,
202
+ provider: request.providerId,
203
+ model: completion.model || request.modelId,
204
+ created: Math.floor(Date.now() / 1000), // Anthropic doesn't provide created timestamp
205
+ choices: [
206
+ {
207
+ message: {
208
+ role: "assistant",
209
+ content: contentBlock.text,
210
+ },
211
+ finish_reason: finishReason,
212
+ index: 0,
213
+ },
214
+ ],
215
+ usage: completion.usage
216
+ ? {
217
+ prompt_tokens: completion.usage.input_tokens,
218
+ completion_tokens: completion.usage.output_tokens,
219
+ total_tokens: completion.usage.input_tokens + completion.usage.output_tokens,
220
+ }
221
+ : undefined,
222
+ object: "chat.completion",
223
+ };
224
+ }
225
+ /**
226
+ * Maps Anthropic stop reasons to standardized format
227
+ *
228
+ * @param anthropicReason - The stop reason from Anthropic
229
+ * @returns Standardized finish reason
230
+ */
231
+ mapAnthropicStopReason(anthropicReason) {
232
+ if (!anthropicReason)
233
+ return null;
234
+ const reasonMap = {
235
+ end_turn: "stop",
236
+ max_tokens: "length",
237
+ stop_sequence: "stop",
238
+ content_filter: "content_filter",
239
+ tool_use: "tool_calls",
240
+ };
241
+ return reasonMap[anthropicReason] || "other";
242
+ }
243
+ /**
244
+ * Creates a standardized error response from Anthropic errors
245
+ *
246
+ * @param error - The error from Anthropic API
247
+ * @param request - Original request for context
248
+ * @returns Standardized LLM failure response
249
+ */
250
+ createErrorResponse(error, request) {
251
+ // Use shared error mapping utility for common error patterns
252
+ const initialProviderMessage = error instanceof sdk_1.default.APIError ? error.message : undefined;
253
+ let { errorCode, errorMessage, errorType, status } = (0, adapterErrorUtils_1.getCommonMappedErrorDetails)(error, initialProviderMessage);
254
+ // Apply Anthropic-specific refinements for 400 errors based on message content
255
+ if (error instanceof sdk_1.default.APIError && status === 400) {
256
+ if (error.message.toLowerCase().includes("context length") ||
257
+ error.message.toLowerCase().includes("too long")) {
258
+ errorCode = types_1.ADAPTER_ERROR_CODES.CONTEXT_LENGTH_EXCEEDED;
259
+ }
260
+ else if (error.message.toLowerCase().includes("content policy") ||
261
+ error.message.toLowerCase().includes("safety")) {
262
+ errorCode = types_1.ADAPTER_ERROR_CODES.CONTENT_FILTER;
263
+ errorType = "content_filter_error";
264
+ }
265
+ // For other 400 errors, use the default mapping from the utility (PROVIDER_ERROR)
266
+ }
267
+ return {
268
+ provider: request.providerId,
269
+ model: request.modelId,
270
+ error: {
271
+ message: errorMessage,
272
+ code: errorCode,
273
+ type: errorType,
274
+ ...(status && { status }),
275
+ providerError: error,
276
+ },
277
+ object: "error",
278
+ };
279
+ }
280
+ }
281
+ exports.AnthropicClientAdapter = AnthropicClientAdapter;
@@ -0,0 +1,83 @@
1
+ import type { LLMResponse, LLMFailureResponse } from "../types";
2
+ import type { ILLMClientAdapter, InternalLLMChatRequest } from "./types";
3
+ /**
4
+ * Client adapter for Google Gemini API integration
5
+ *
6
+ * This adapter:
7
+ * - Formats requests according to Gemini's generative AI API requirements
8
+ * - Handles Gemini-specific safety settings and system instructions
9
+ * - Maps Gemini responses to standardized LLMResponse format
10
+ * - Converts Gemini errors to standardized LLMFailureResponse format
11
+ * - Manages Gemini-specific settings and constraints
12
+ */
13
+ export declare class GeminiClientAdapter implements ILLMClientAdapter {
14
+ private baseURL?;
15
+ /**
16
+ * Creates a new Gemini client adapter
17
+ *
18
+ * @param config Optional configuration for the adapter
19
+ * @param config.baseURL Custom base URL (unused for Gemini but kept for consistency)
20
+ */
21
+ constructor(config?: {
22
+ baseURL?: string;
23
+ });
24
+ /**
25
+ * Sends a chat message to Gemini's API
26
+ *
27
+ * @param request - The internal LLM request with applied settings
28
+ * @param apiKey - The decrypted Gemini API key
29
+ * @returns Promise resolving to success or failure response
30
+ */
31
+ sendMessage(request: InternalLLMChatRequest, apiKey: string): Promise<LLMResponse | LLMFailureResponse>;
32
+ /**
33
+ * Validates Gemini API key format
34
+ *
35
+ * @param apiKey - The API key to validate
36
+ * @returns True if the key format appears valid
37
+ */
38
+ validateApiKey(apiKey: string): boolean;
39
+ /**
40
+ * Gets adapter information
41
+ */
42
+ getAdapterInfo(): {
43
+ providerId: "gemini";
44
+ name: string;
45
+ version: string;
46
+ };
47
+ /**
48
+ * Formats the internal LLM request for Gemini API
49
+ *
50
+ * @param request - The internal LLM request
51
+ * @returns Formatted request components for Gemini
52
+ */
53
+ private formatInternalRequestToGemini;
54
+ /**
55
+ * Creates a standardized success response from Gemini's response
56
+ *
57
+ * @param response - Raw Gemini response
58
+ * @param request - Original request for context
59
+ * @returns Standardized LLM response
60
+ */
61
+ private createSuccessResponse;
62
+ /**
63
+ * Maps Gemini finish reasons to standardized format
64
+ *
65
+ * @param geminiReason - The finish reason from Gemini
66
+ * @returns Standardized finish reason
67
+ */
68
+ private mapGeminiFinishReason;
69
+ /**
70
+ * Creates a standardized error response from Gemini errors
71
+ *
72
+ * @param error - The error from Gemini API
73
+ * @param request - Original request for context
74
+ * @returns Standardized LLM failure response
75
+ */
76
+ private createErrorResponse;
77
+ /**
78
+ * Generates a unique response ID
79
+ *
80
+ * @returns A unique response ID string
81
+ */
82
+ private generateResponseId;
83
+ }
@@ -0,0 +1,266 @@
1
+ "use strict";
2
+ // AI Summary: Gemini client adapter for making real API calls to Google's Gemini LLM APIs.
3
+ // Handles Gemini-specific request formatting, safety settings, response parsing, and error mapping.
4
+ Object.defineProperty(exports, "__esModule", { value: true });
5
+ exports.GeminiClientAdapter = void 0;
6
+ const genai_1 = require("@google/genai");
7
+ const types_1 = require("./types");
8
+ const adapterErrorUtils_1 = require("./adapterErrorUtils");
9
+ /**
10
+ * Client adapter for Google Gemini API integration
11
+ *
12
+ * This adapter:
13
+ * - Formats requests according to Gemini's generative AI API requirements
14
+ * - Handles Gemini-specific safety settings and system instructions
15
+ * - Maps Gemini responses to standardized LLMResponse format
16
+ * - Converts Gemini errors to standardized LLMFailureResponse format
17
+ * - Manages Gemini-specific settings and constraints
18
+ */
19
+ class GeminiClientAdapter {
20
+ /**
21
+ * Creates a new Gemini client adapter
22
+ *
23
+ * @param config Optional configuration for the adapter
24
+ * @param config.baseURL Custom base URL (unused for Gemini but kept for consistency)
25
+ */
26
+ constructor(config) {
27
+ this.baseURL = config?.baseURL;
28
+ }
29
+ /**
30
+ * Sends a chat message to Gemini's API
31
+ *
32
+ * @param request - The internal LLM request with applied settings
33
+ * @param apiKey - The decrypted Gemini API key
34
+ * @returns Promise resolving to success or failure response
35
+ */
36
+ async sendMessage(request, apiKey) {
37
+ try {
38
+ // Initialize Gemini client
39
+ const genAI = new genai_1.GoogleGenAI({ apiKey });
40
+ // Format the request for Gemini API
41
+ const { contents, generationConfig, safetySettings, systemInstruction } = this.formatInternalRequestToGemini(request);
42
+ console.log(`Making Gemini API call for model: ${request.modelId}`);
43
+ console.log(`Gemini API parameters:`, {
44
+ model: request.modelId,
45
+ temperature: generationConfig.temperature,
46
+ maxOutputTokens: generationConfig.maxOutputTokens,
47
+ hasSystemInstruction: !!systemInstruction,
48
+ contentsLength: contents.length,
49
+ safetySettingsCount: safetySettings?.length || 0,
50
+ });
51
+ // Generate content using the modern API
52
+ const result = await genAI.models.generateContent({
53
+ model: request.modelId,
54
+ contents: contents,
55
+ config: {
56
+ ...generationConfig,
57
+ safetySettings: safetySettings,
58
+ ...(systemInstruction && { systemInstruction: systemInstruction }),
59
+ },
60
+ });
61
+ console.log(`Gemini API call successful, processing response`);
62
+ // Convert to standardized response format
63
+ return this.createSuccessResponse(result, request);
64
+ }
65
+ catch (error) {
66
+ console.error("Gemini API error:", error);
67
+ return this.createErrorResponse(error, request);
68
+ }
69
+ }
70
+ /**
71
+ * Validates Gemini API key format
72
+ *
73
+ * @param apiKey - The API key to validate
74
+ * @returns True if the key format appears valid
75
+ */
76
+ validateApiKey(apiKey) {
77
+ // Gemini API keys typically start with 'AIza' and are around 39 characters long
78
+ return (typeof apiKey === "string" &&
79
+ apiKey.startsWith("AIza") &&
80
+ apiKey.length >= 35);
81
+ }
82
+ /**
83
+ * Gets adapter information
84
+ */
85
+ getAdapterInfo() {
86
+ return {
87
+ providerId: "gemini",
88
+ name: "Gemini Client Adapter",
89
+ version: "1.0.0",
90
+ };
91
+ }
92
+ /**
93
+ * Formats the internal LLM request for Gemini API
94
+ *
95
+ * @param request - The internal LLM request
96
+ * @returns Formatted request components for Gemini
97
+ */
98
+ formatInternalRequestToGemini(request) {
99
+ const contents = [];
100
+ let systemInstruction = request.systemMessage;
101
+ // Process messages - separate system messages and build conversation contents
102
+ for (const message of request.messages) {
103
+ if (message.role === "system") {
104
+ // Gemini handles system messages as systemInstruction
105
+ if (systemInstruction) {
106
+ systemInstruction += "\n\n" + message.content;
107
+ }
108
+ else {
109
+ systemInstruction = message.content;
110
+ }
111
+ }
112
+ else if (message.role === "user") {
113
+ contents.push({
114
+ role: "user",
115
+ parts: [{ text: message.content }],
116
+ });
117
+ }
118
+ else if (message.role === "assistant") {
119
+ // Map assistant to model for Gemini
120
+ contents.push({
121
+ role: "model",
122
+ parts: [{ text: message.content }],
123
+ });
124
+ }
125
+ }
126
+ // Build generation config
127
+ const generationConfig = {
128
+ maxOutputTokens: request.settings.maxTokens,
129
+ temperature: request.settings.temperature,
130
+ ...(request.settings.topP && { topP: request.settings.topP }),
131
+ ...(request.settings.stopSequences &&
132
+ request.settings.stopSequences.length > 0 && {
133
+ stopSequences: request.settings.stopSequences,
134
+ }),
135
+ };
136
+ // Map safety settings from Athanor format to Gemini SDK format
137
+ const safetySettings = request.settings.geminiSafetySettings?.map((setting) => ({
138
+ category: setting.category,
139
+ threshold: setting.threshold,
140
+ }));
141
+ return {
142
+ contents,
143
+ generationConfig,
144
+ safetySettings,
145
+ systemInstruction,
146
+ };
147
+ }
148
+ /**
149
+ * Creates a standardized success response from Gemini's response
150
+ *
151
+ * @param response - Raw Gemini response
152
+ * @param request - Original request for context
153
+ * @returns Standardized LLM response
154
+ */
155
+ createSuccessResponse(response, request) {
156
+ // Extract content from the response object
157
+ const candidate = response.candidates?.[0];
158
+ const content = candidate?.content?.parts?.[0]?.text || "";
159
+ // Extract usage data if available
160
+ const usageMetadata = response.usageMetadata || {};
161
+ const finishReason = this.mapGeminiFinishReason(candidate?.finishReason || null);
162
+ return {
163
+ id: this.generateResponseId(),
164
+ provider: request.providerId,
165
+ model: response.modelUsed || request.modelId,
166
+ created: Math.floor(Date.now() / 1000),
167
+ choices: [
168
+ {
169
+ message: {
170
+ role: "assistant",
171
+ content: content,
172
+ },
173
+ finish_reason: finishReason,
174
+ index: 0,
175
+ },
176
+ ],
177
+ usage: usageMetadata
178
+ ? {
179
+ prompt_tokens: usageMetadata.promptTokenCount || 0,
180
+ completion_tokens: usageMetadata.candidatesTokenCount || 0,
181
+ total_tokens: usageMetadata.totalTokenCount || 0,
182
+ }
183
+ : undefined,
184
+ object: "chat.completion",
185
+ };
186
+ }
187
+ /**
188
+ * Maps Gemini finish reasons to standardized format
189
+ *
190
+ * @param geminiReason - The finish reason from Gemini
191
+ * @returns Standardized finish reason
192
+ */
193
+ mapGeminiFinishReason(geminiReason) {
194
+ if (!geminiReason)
195
+ return null;
196
+ const reasonMap = {
197
+ STOP: "stop",
198
+ MAX_TOKENS: "length",
199
+ SAFETY: "content_filter",
200
+ RECITATION: "content_filter",
201
+ PROHIBITED_CONTENT: "content_filter",
202
+ SPII: "content_filter",
203
+ BLOCKLIST: "content_filter",
204
+ LANGUAGE: "other",
205
+ OTHER: "other",
206
+ MALFORMED_FUNCTION_CALL: "function_call_error",
207
+ };
208
+ return reasonMap[geminiReason] || "other";
209
+ }
210
+ /**
211
+ * Creates a standardized error response from Gemini errors
212
+ *
213
+ * @param error - The error from Gemini API
214
+ * @param request - Original request for context
215
+ * @returns Standardized LLM failure response
216
+ */
217
+ createErrorResponse(error, request) {
218
+ // Use shared error mapping utility for common error patterns
219
+ const initialProviderMessage = error?.message;
220
+ let { errorCode, errorMessage, errorType, status } = (0, adapterErrorUtils_1.getCommonMappedErrorDetails)(error, initialProviderMessage);
221
+ // Apply Gemini-specific refinements for certain error types
222
+ if (error && error.message) {
223
+ const message = error.message.toLowerCase();
224
+ if (message.includes("context length") || message.includes("too long")) {
225
+ errorCode = types_1.ADAPTER_ERROR_CODES.CONTEXT_LENGTH_EXCEEDED;
226
+ errorType = "invalid_request_error";
227
+ }
228
+ else if (message.includes("safety") || message.includes("blocked")) {
229
+ errorCode = types_1.ADAPTER_ERROR_CODES.CONTENT_FILTER;
230
+ errorType = "content_filter_error";
231
+ }
232
+ else if (message.includes("api key") ||
233
+ message.includes("authentication")) {
234
+ errorCode = types_1.ADAPTER_ERROR_CODES.INVALID_API_KEY;
235
+ errorType = "authentication_error";
236
+ }
237
+ else if (message.includes("quota") || message.includes("limit")) {
238
+ errorCode = types_1.ADAPTER_ERROR_CODES.RATE_LIMIT_EXCEEDED;
239
+ errorType = "rate_limit_error";
240
+ }
241
+ }
242
+ return {
243
+ provider: request.providerId,
244
+ model: request.modelId,
245
+ error: {
246
+ message: errorMessage,
247
+ code: errorCode,
248
+ type: errorType,
249
+ ...(status && { status }),
250
+ providerError: error,
251
+ },
252
+ object: "error",
253
+ };
254
+ }
255
+ /**
256
+ * Generates a unique response ID
257
+ *
258
+ * @returns A unique response ID string
259
+ */
260
+ generateResponseId() {
261
+ return `gemini-${Date.now()}-${Math.random()
262
+ .toString(36)
263
+ .substring(2, 15)}`;
264
+ }
265
+ }
266
+ exports.GeminiClientAdapter = GeminiClientAdapter;
@@ -0,0 +1,69 @@
1
+ import type { LLMResponse, LLMFailureResponse, ApiProviderId } from "../types";
2
+ import type { ILLMClientAdapter, InternalLLMChatRequest } from "./types";
3
+ /**
4
+ * Mock client adapter for testing LLM functionality
5
+ *
6
+ * This adapter simulates various LLM provider responses without making real API calls.
7
+ * It's useful for:
8
+ * - Testing the LLM service flow
9
+ * - Development when API keys are not available
10
+ * - Simulating error conditions
11
+ * - Performance testing without API costs
12
+ */
13
+ export declare class MockClientAdapter implements ILLMClientAdapter {
14
+ private providerId;
15
+ constructor(providerId?: ApiProviderId);
16
+ /**
17
+ * Sends a mock message response based on request content
18
+ *
19
+ * @param request - The LLM request
20
+ * @param apiKey - The API key (ignored for mock)
21
+ * @returns Promise resolving to mock response
22
+ */
23
+ sendMessage(request: InternalLLMChatRequest, apiKey: string): Promise<LLMResponse | LLMFailureResponse>;
24
+ /**
25
+ * Validates API key format (always returns true for mock)
26
+ */
27
+ validateApiKey(apiKey: string): boolean;
28
+ /**
29
+ * Gets adapter information
30
+ */
31
+ getAdapterInfo(): {
32
+ providerId: string;
33
+ name: string;
34
+ version: string;
35
+ supportedModels: string[];
36
+ };
37
+ /**
38
+ * Creates a successful mock response
39
+ */
40
+ private createSuccessResponse;
41
+ /**
42
+ * Generates a response that demonstrates temperature effects
43
+ */
44
+ private generateTemperatureTestResponse;
45
+ /**
46
+ * Generates a response that shows current settings
47
+ */
48
+ private generateSettingsTestResponse;
49
+ /**
50
+ * Applies mock temperature effects to response content
51
+ */
52
+ private applyTemperatureEffects;
53
+ /**
54
+ * Creates an error response
55
+ */
56
+ private createErrorResponse;
57
+ /**
58
+ * Maps error codes to error types
59
+ */
60
+ private getErrorType;
61
+ /**
62
+ * Generates a longer mock response for testing
63
+ */
64
+ private generateLongResponse;
65
+ /**
66
+ * Simulates network delay with random variation
67
+ */
68
+ private simulateDelay;
69
+ }