genai-lite 0.8.2 → 0.8.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -24,6 +24,7 @@ export declare class GenaiElectronImageAdapter implements ImageProviderAdapter {
24
24
  private baseURL;
25
25
  private timeout;
26
26
  private pollInterval;
27
+ private logger;
27
28
  constructor(config?: ImageProviderAdapterConfig);
28
29
  /**
29
30
  * Generates images using genai-electron's async API with progress polling
@@ -19,7 +19,6 @@ Object.defineProperty(exports, "__esModule", { value: true });
19
19
  exports.GenaiElectronImageAdapter = void 0;
20
20
  const errorUtils_1 = require("../../shared/adapters/errorUtils");
21
21
  const defaultLogger_1 = require("../../logging/defaultLogger");
22
- const logger = (0, defaultLogger_1.createDefaultLogger)();
23
22
  /**
24
23
  * Adapter for genai-electron's local diffusion image generation
25
24
  */
@@ -37,6 +36,7 @@ class GenaiElectronImageAdapter {
37
36
  this.baseURL = config?.baseURL || 'http://localhost:8081';
38
37
  this.timeout = config?.timeout || 120000; // 120 seconds for diffusion
39
38
  this.pollInterval = 500; // Poll every 500ms
39
+ this.logger = config?.logger ?? (0, defaultLogger_1.createDefaultLogger)();
40
40
  }
41
41
  /**
42
42
  * Generates images using genai-electron's async API with progress polling
@@ -46,7 +46,7 @@ class GenaiElectronImageAdapter {
46
46
  try {
47
47
  // Build request payload
48
48
  const payload = this.buildRequestPayload(resolvedPrompt, request, settings);
49
- logger.debug(`GenaiElectron Image API: Starting generation`, {
49
+ this.logger.debug(`GenaiElectron Image API: Starting generation`, {
50
50
  prompt: resolvedPrompt.substring(0, 100),
51
51
  count: payload.count,
52
52
  dimensions: `${payload.width}x${payload.height}`,
@@ -54,15 +54,15 @@ class GenaiElectronImageAdapter {
54
54
  });
55
55
  // Start generation (returns immediately with ID)
56
56
  const generationId = await this.startGeneration(payload);
57
- logger.info(`GenaiElectron Image API: Generation started with ID: ${generationId}`);
57
+ this.logger.info(`GenaiElectron Image API: Generation started with ID: ${generationId}`);
58
58
  // Poll for completion
59
59
  const result = await this.pollForCompletion(generationId, settings.diffusion?.onProgress);
60
- logger.info(`GenaiElectron Image API: Generation complete (${result.timeTaken}ms)`);
60
+ this.logger.info(`GenaiElectron Image API: Generation complete (${result.timeTaken}ms)`);
61
61
  // Convert to ImageGenerationResponse
62
62
  return this.convertToResponse(result, request);
63
63
  }
64
64
  catch (error) {
65
- logger.error('GenaiElectron Image API error:', error);
65
+ this.logger.error('GenaiElectron Image API error:', error);
66
66
  throw this.handleError(error, request);
67
67
  }
68
68
  }
@@ -21,6 +21,7 @@ export declare class OpenAIImageAdapter implements ImageProviderAdapter {
21
21
  readonly supports: ImageProviderCapabilities;
22
22
  private baseURL?;
23
23
  private timeout;
24
+ private logger;
24
25
  constructor(config?: ImageProviderAdapterConfig);
25
26
  /**
26
27
  * Validates OpenAI API key format
@@ -21,7 +21,6 @@ exports.OpenAIImageAdapter = void 0;
21
21
  const openai_1 = __importDefault(require("openai"));
22
22
  const errorUtils_1 = require("../../shared/adapters/errorUtils");
23
23
  const defaultLogger_1 = require("../../logging/defaultLogger");
24
- const logger = (0, defaultLogger_1.createDefaultLogger)();
25
24
  /**
26
25
  * Prompt length limits per model
27
26
  */
@@ -47,6 +46,7 @@ class OpenAIImageAdapter {
47
46
  };
48
47
  this.baseURL = config?.baseURL;
49
48
  this.timeout = config?.timeout || 60000;
49
+ this.logger = config?.logger ?? (0, defaultLogger_1.createDefaultLogger)();
50
50
  }
51
51
  /**
52
52
  * Validates OpenAI API key format
@@ -96,7 +96,7 @@ class OpenAIImageAdapter {
96
96
  // dall-e-2/dall-e-3: use traditional parameters
97
97
  this.addDalleParams(params, settings);
98
98
  }
99
- logger.debug(`OpenAI Image API call for model: ${request.modelId}`, {
99
+ this.logger.debug(`OpenAI Image API call for model: ${request.modelId}`, {
100
100
  model: params.model,
101
101
  promptLength: resolvedPrompt.length,
102
102
  n: params.n,
@@ -107,12 +107,12 @@ class OpenAIImageAdapter {
107
107
  if (!response.data || response.data.length === 0) {
108
108
  throw new Error('OpenAI API returned no images in response');
109
109
  }
110
- logger.info(`OpenAI Image API call successful, generated ${response.data.length} images`);
110
+ this.logger.info(`OpenAI Image API call successful, generated ${response.data.length} images`);
111
111
  // Process response
112
112
  return await this.processResponse(response, request, isGptImageModel);
113
113
  }
114
114
  catch (error) {
115
- logger.error('OpenAI Image API error:', error);
115
+ this.logger.error('OpenAI Image API error:', error);
116
116
  throw this.handleError(error, request);
117
117
  }
118
118
  }
@@ -44,6 +44,7 @@ class ImageService {
44
44
  this.adapterRegistry.registerAdapter('openai-images', new OpenAIImageAdapter_1.OpenAIImageAdapter({
45
45
  baseURL: openaiBaseURL,
46
46
  timeout: openaiConfig.timeout,
47
+ logger: this.logger,
47
48
  }));
48
49
  // Register genai-electron-images adapter
49
50
  const electronConfig = config_1.IMAGE_ADAPTER_CONFIGS['genai-electron-images'];
@@ -51,6 +52,7 @@ class ImageService {
51
52
  this.adapterRegistry.registerAdapter('genai-electron-images', new GenaiElectronImageAdapter_1.GenaiElectronImageAdapter({
52
53
  baseURL: electronBaseURL,
53
54
  timeout: electronConfig.timeout,
55
+ logger: this.logger,
54
56
  }));
55
57
  // Register custom adapters if provided
56
58
  if (options.adapters) {
@@ -42,7 +42,7 @@ class LLMService {
42
42
  adapterConstructors: config_1.ADAPTER_CONSTRUCTORS,
43
43
  adapterConfigs: config_1.ADAPTER_CONFIGS,
44
44
  }, this.logger);
45
- this.requestValidator = new RequestValidator_1.RequestValidator();
45
+ this.requestValidator = new RequestValidator_1.RequestValidator(this.logger);
46
46
  this.settingsManager = new SettingsManager_1.SettingsManager(this.logger);
47
47
  this.modelResolver = new ModelResolver_1.ModelResolver(this.presetManager, this.adapterRegistry, this.logger);
48
48
  }
@@ -1,5 +1,6 @@
1
1
  import type { LLMResponse, LLMFailureResponse } from "../types";
2
2
  import type { ILLMClientAdapter, InternalLLMChatRequest } from "./types";
3
+ import type { Logger } from "../../logging/types";
3
4
  /**
4
5
  * Client adapter for Anthropic API integration
5
6
  *
@@ -12,14 +13,17 @@ import type { ILLMClientAdapter, InternalLLMChatRequest } from "./types";
12
13
  */
13
14
  export declare class AnthropicClientAdapter implements ILLMClientAdapter {
14
15
  private baseURL?;
16
+ private logger;
15
17
  /**
16
18
  * Creates a new Anthropic client adapter
17
19
  *
18
20
  * @param config Optional configuration for the adapter
19
21
  * @param config.baseURL Custom base URL for Anthropic-compatible APIs
22
+ * @param config.logger Custom logger instance
20
23
  */
21
24
  constructor(config?: {
22
25
  baseURL?: string;
26
+ logger?: Logger;
23
27
  });
24
28
  /**
25
29
  * Sends a chat message to Anthropic's API
@@ -11,7 +11,6 @@ const types_1 = require("./types");
11
11
  const errorUtils_1 = require("../../shared/adapters/errorUtils");
12
12
  const systemMessageUtils_1 = require("../../shared/adapters/systemMessageUtils");
13
13
  const defaultLogger_1 = require("../../logging/defaultLogger");
14
- const logger = (0, defaultLogger_1.createDefaultLogger)();
15
14
  /**
16
15
  * Client adapter for Anthropic API integration
17
16
  *
@@ -28,9 +27,11 @@ class AnthropicClientAdapter {
28
27
  *
29
28
  * @param config Optional configuration for the adapter
30
29
  * @param config.baseURL Custom base URL for Anthropic-compatible APIs
30
+ * @param config.logger Custom logger instance
31
31
  */
32
32
  constructor(config) {
33
33
  this.baseURL = config?.baseURL;
34
+ this.logger = config?.logger ?? (0, defaultLogger_1.createDefaultLogger)();
34
35
  }
35
36
  /**
36
37
  * Sends a chat message to Anthropic's API
@@ -115,8 +116,8 @@ class AnthropicClientAdapter {
115
116
  };
116
117
  }
117
118
  }
118
- logger.info(`Making Anthropic API call for model: ${request.modelId}`);
119
- logger.debug(`Anthropic API parameters:`, {
119
+ this.logger.info(`Making Anthropic API call for model: ${request.modelId}`);
120
+ this.logger.debug(`Anthropic API parameters:`, {
120
121
  model: messageParams.model,
121
122
  temperature: messageParams.temperature,
122
123
  max_tokens: messageParams.max_tokens,
@@ -139,13 +140,13 @@ class AnthropicClientAdapter {
139
140
  else {
140
141
  completion = await anthropic.messages.create(messageParams);
141
142
  }
142
- logger.info(`Anthropic API call successful, response ID: ${completion.id}`);
143
+ this.logger.info(`Anthropic API call successful, response ID: ${completion.id}`);
143
144
  // Convert to standardized response format
144
145
  // Cast to any to handle beta response type differences
145
146
  return this.createSuccessResponse(completion, request);
146
147
  }
147
148
  catch (error) {
148
- logger.error("Anthropic API error:", error);
149
+ this.logger.error("Anthropic API error:", error);
149
150
  return this.createErrorResponse(error, request);
150
151
  }
151
152
  }
@@ -245,7 +246,7 @@ class AnthropicClientAdapter {
245
246
  const modifiedIndex = (0, systemMessageUtils_1.prependSystemToFirstUserMessage)(simpleMessages, combinedSystemContent, request.settings.systemMessageFallback);
246
247
  if (modifiedIndex !== -1) {
247
248
  messages[modifiedIndex].content = simpleMessages[modifiedIndex].content;
248
- logger.debug(`Model ${request.modelId} doesn't support system messages - prepended to first user message`);
249
+ this.logger.debug(`Model ${request.modelId} doesn't support system messages - prepended to first user message`);
249
250
  }
250
251
  // Don't set systemMessage - it stays undefined
251
252
  }
@@ -253,7 +254,7 @@ class AnthropicClientAdapter {
253
254
  // Anthropic requires messages to start with 'user' role
254
255
  // If the first message is not from user, we need to handle this
255
256
  if (messages.length > 0 && messages[0].role !== "user") {
256
- logger.warn("Anthropic API requires first message to be from user. Adjusting message order.");
257
+ this.logger.warn("Anthropic API requires first message to be from user. Adjusting message order.");
257
258
  // Find the first user message and move it to the front, or create a default one
258
259
  const firstUserIndex = messages.findIndex((msg) => msg.role === "user");
259
260
  if (firstUserIndex > 0) {
@@ -295,7 +296,7 @@ class AnthropicClientAdapter {
295
296
  // If roles don't alternate properly, we might need to combine messages
296
297
  // or insert a placeholder. For now, we'll skip non-alternating messages
297
298
  // and log a warning.
298
- logger.warn(`Skipping message with unexpected role: expected ${expectedRole}, got ${message.role}`);
299
+ this.logger.warn(`Skipping message with unexpected role: expected ${expectedRole}, got ${message.role}`);
299
300
  }
300
301
  }
301
302
  return cleanedMessages;
@@ -1,5 +1,6 @@
1
1
  import type { LLMResponse, LLMFailureResponse } from "../types";
2
2
  import type { ILLMClientAdapter, InternalLLMChatRequest } from "./types";
3
+ import type { Logger } from "../../logging/types";
3
4
  /**
4
5
  * Client adapter for Google Gemini API integration
5
6
  *
@@ -12,14 +13,17 @@ import type { ILLMClientAdapter, InternalLLMChatRequest } from "./types";
12
13
  */
13
14
  export declare class GeminiClientAdapter implements ILLMClientAdapter {
14
15
  private baseURL?;
16
+ private logger;
15
17
  /**
16
18
  * Creates a new Gemini client adapter
17
19
  *
18
20
  * @param config Optional configuration for the adapter
19
21
  * @param config.baseURL Custom base URL (unused for Gemini but kept for consistency)
22
+ * @param config.logger Custom logger instance
20
23
  */
21
24
  constructor(config?: {
22
25
  baseURL?: string;
26
+ logger?: Logger;
23
27
  });
24
28
  /**
25
29
  * Sends a chat message to Gemini's API
@@ -8,7 +8,6 @@ const types_1 = require("./types");
8
8
  const errorUtils_1 = require("../../shared/adapters/errorUtils");
9
9
  const systemMessageUtils_1 = require("../../shared/adapters/systemMessageUtils");
10
10
  const defaultLogger_1 = require("../../logging/defaultLogger");
11
- const logger = (0, defaultLogger_1.createDefaultLogger)();
12
11
  /**
13
12
  * Client adapter for Google Gemini API integration
14
13
  *
@@ -25,9 +24,11 @@ class GeminiClientAdapter {
25
24
  *
26
25
  * @param config Optional configuration for the adapter
27
26
  * @param config.baseURL Custom base URL (unused for Gemini but kept for consistency)
27
+ * @param config.logger Custom logger instance
28
28
  */
29
29
  constructor(config) {
30
30
  this.baseURL = config?.baseURL;
31
+ this.logger = config?.logger ?? (0, defaultLogger_1.createDefaultLogger)();
31
32
  }
32
33
  /**
33
34
  * Sends a chat message to Gemini's API
@@ -42,8 +43,8 @@ class GeminiClientAdapter {
42
43
  const genAI = new genai_1.GoogleGenAI({ apiKey });
43
44
  // Format the request for Gemini API
44
45
  const { contents, generationConfig, safetySettings, systemInstruction } = this.formatInternalRequestToGemini(request);
45
- logger.info(`Making Gemini API call for model: ${request.modelId}`);
46
- logger.debug(`Gemini API parameters:`, {
46
+ this.logger.info(`Making Gemini API call for model: ${request.modelId}`);
47
+ this.logger.debug(`Gemini API parameters:`, {
47
48
  model: request.modelId,
48
49
  temperature: generationConfig.temperature,
49
50
  maxOutputTokens: generationConfig.maxOutputTokens,
@@ -61,12 +62,12 @@ class GeminiClientAdapter {
61
62
  ...(systemInstruction && { systemInstruction: systemInstruction }),
62
63
  },
63
64
  });
64
- logger.info(`Gemini API call successful, processing response`);
65
+ this.logger.info(`Gemini API call successful, processing response`);
65
66
  // Convert to standardized response format
66
67
  return this.createSuccessResponse(result, request);
67
68
  }
68
69
  catch (error) {
69
- logger.error("Gemini API error:", error);
70
+ this.logger.error("Gemini API error:", error);
70
71
  return this.createErrorResponse(error, request);
71
72
  }
72
73
  }
@@ -142,7 +143,7 @@ class GeminiClientAdapter {
142
143
  if (modifiedIndex !== -1) {
143
144
  // Update the actual contents array
144
145
  contents[modifiedIndex].parts[0].text = simpleContents[modifiedIndex].content;
145
- logger.debug(`Model ${request.modelId} doesn't support system instructions - prepended to first user message`);
146
+ this.logger.debug(`Model ${request.modelId} doesn't support system instructions - prepended to first user message`);
146
147
  }
147
148
  // Don't set systemInstruction - it stays undefined
148
149
  }
@@ -1,6 +1,7 @@
1
1
  import type { LLMResponse, LLMFailureResponse, ModelInfo } from "../types";
2
2
  import type { ILLMClientAdapter, InternalLLMChatRequest } from "./types";
3
3
  import { LlamaCppServerClient } from "./LlamaCppServerClient";
4
+ import type { Logger } from "../../logging/types";
4
5
  /**
5
6
  * Configuration options for LlamaCppClientAdapter
6
7
  */
@@ -9,6 +10,8 @@ export interface LlamaCppClientConfig {
9
10
  baseURL?: string;
10
11
  /** Whether to check server health before sending requests (default: false) */
11
12
  checkHealth?: boolean;
13
+ /** Logger instance for adapter logging */
14
+ logger?: Logger;
12
15
  }
13
16
  /**
14
17
  * Client adapter for llama.cpp server integration
@@ -51,6 +54,7 @@ export declare class LlamaCppClientAdapter implements ILLMClientAdapter {
51
54
  private serverClient;
52
55
  private cachedModelCapabilities;
53
56
  private detectionAttempted;
57
+ private logger;
54
58
  /**
55
59
  * Creates a new llama.cpp client adapter
56
60
  *
@@ -13,7 +13,6 @@ const systemMessageUtils_1 = require("../../shared/adapters/systemMessageUtils")
13
13
  const LlamaCppServerClient_1 = require("./LlamaCppServerClient");
14
14
  const config_1 = require("../config");
15
15
  const defaultLogger_1 = require("../../logging/defaultLogger");
16
- const logger = (0, defaultLogger_1.createDefaultLogger)();
17
16
  /**
18
17
  * Client adapter for llama.cpp server integration
19
18
  *
@@ -61,6 +60,7 @@ class LlamaCppClientAdapter {
61
60
  this.baseURL = config?.baseURL || 'http://localhost:8080';
62
61
  this.checkHealth = config?.checkHealth || false;
63
62
  this.serverClient = new LlamaCppServerClient_1.LlamaCppServerClient(this.baseURL);
63
+ this.logger = config?.logger ?? (0, defaultLogger_1.createDefaultLogger)();
64
64
  }
65
65
  /**
66
66
  * Gets model capabilities by detecting the loaded GGUF model
@@ -81,10 +81,10 @@ class LlamaCppClientAdapter {
81
81
  }
82
82
  // Attempt detection
83
83
  try {
84
- logger.debug(`Detecting model capabilities from llama.cpp server at ${this.baseURL}`);
84
+ this.logger.debug(`Detecting model capabilities from llama.cpp server at ${this.baseURL}`);
85
85
  const { data } = await this.serverClient.getModels();
86
86
  if (!data || data.length === 0) {
87
- logger.warn('No models loaded in llama.cpp server');
87
+ this.logger.warn('No models loaded in llama.cpp server');
88
88
  this.detectionAttempted = true;
89
89
  return null;
90
90
  }
@@ -94,15 +94,15 @@ class LlamaCppClientAdapter {
94
94
  this.cachedModelCapabilities = capabilities;
95
95
  this.detectionAttempted = true;
96
96
  if (capabilities) {
97
- logger.debug(`Cached model capabilities for: ${ggufFilename}`);
97
+ this.logger.debug(`Cached model capabilities for: ${ggufFilename}`);
98
98
  }
99
99
  else {
100
- logger.debug(`No known pattern matched for: ${ggufFilename}`);
100
+ this.logger.debug(`No known pattern matched for: ${ggufFilename}`);
101
101
  }
102
102
  return capabilities;
103
103
  }
104
104
  catch (error) {
105
- logger.warn('Failed to detect model capabilities:', error);
105
+ this.logger.warn('Failed to detect model capabilities:', error);
106
106
  this.detectionAttempted = true;
107
107
  return null;
108
108
  }
@@ -116,7 +116,7 @@ class LlamaCppClientAdapter {
116
116
  clearModelCache() {
117
117
  this.cachedModelCapabilities = null;
118
118
  this.detectionAttempted = false;
119
- logger.debug('Cleared model capabilities cache');
119
+ this.logger.debug('Cleared model capabilities cache');
120
120
  }
121
121
  /**
122
122
  * Sends a chat message to llama.cpp server
@@ -145,7 +145,7 @@ class LlamaCppClientAdapter {
145
145
  }
146
146
  }
147
147
  catch (healthError) {
148
- logger.warn('Health check failed, proceeding with request anyway:', healthError);
148
+ this.logger.warn('Health check failed, proceeding with request anyway:', healthError);
149
149
  }
150
150
  }
151
151
  // Initialize OpenAI client with llama.cpp base URL
@@ -182,19 +182,19 @@ class LlamaCppClientAdapter {
182
182
  schema: so.schema,
183
183
  };
184
184
  }
185
- logger.debug(`llama.cpp API parameters:`, {
185
+ this.logger.debug(`llama.cpp API parameters:`, {
186
186
  baseURL: this.baseURL,
187
187
  model: completionParams.model,
188
188
  temperature: completionParams.temperature,
189
189
  max_tokens: completionParams.max_tokens,
190
190
  top_p: completionParams.top_p,
191
191
  });
192
- logger.info(`Making llama.cpp API call for model: ${request.modelId}`);
192
+ this.logger.info(`Making llama.cpp API call for model: ${request.modelId}`);
193
193
  // Make the API call
194
194
  const completion = await openai.chat.completions.create(completionParams);
195
195
  // Type guard to ensure we have a non-streaming response
196
196
  if ('id' in completion && 'choices' in completion) {
197
- logger.info(`llama.cpp API call successful, response ID: ${completion.id}`);
197
+ this.logger.info(`llama.cpp API call successful, response ID: ${completion.id}`);
198
198
  return this.createSuccessResponse(completion, request);
199
199
  }
200
200
  else {
@@ -202,7 +202,7 @@ class LlamaCppClientAdapter {
202
202
  }
203
203
  }
204
204
  catch (error) {
205
- logger.error("llama.cpp API error:", error);
205
+ this.logger.error("llama.cpp API error:", error);
206
206
  // Clear cache on connection errors so we re-detect on next request
207
207
  const errorMessage = error?.message || String(error);
208
208
  if (errorMessage.includes("ECONNREFUSED") ||
@@ -296,7 +296,7 @@ class LlamaCppClientAdapter {
296
296
  const modifiedIndex = (0, systemMessageUtils_1.prependSystemToFirstUserMessage)(simpleMessages, combinedSystemContent, request.settings.systemMessageFallback);
297
297
  if (modifiedIndex !== -1) {
298
298
  messages[modifiedIndex].content = simpleMessages[modifiedIndex].content;
299
- logger.debug(`Model ${request.modelId} doesn't support system messages - prepended to first user message`);
299
+ this.logger.debug(`Model ${request.modelId} doesn't support system messages - prepended to first user message`);
300
300
  }
301
301
  }
302
302
  }
@@ -1,11 +1,14 @@
1
1
  import type { LLMResponse, LLMFailureResponse } from "../types";
2
2
  import type { ILLMClientAdapter, InternalLLMChatRequest } from "./types";
3
+ import type { Logger } from "../../logging/types";
3
4
  /**
4
5
  * Configuration options for MistralClientAdapter
5
6
  */
6
7
  export interface MistralClientConfig {
7
8
  /** Base URL of the Mistral API (default: https://api.mistral.ai) */
8
9
  baseURL?: string;
10
+ /** Logger instance for adapter logging */
11
+ logger?: Logger;
9
12
  }
10
13
  /**
11
14
  * Client adapter for Mistral AI API integration
@@ -35,6 +38,7 @@ export interface MistralClientConfig {
35
38
  */
36
39
  export declare class MistralClientAdapter implements ILLMClientAdapter {
37
40
  private baseURL;
41
+ private logger;
38
42
  /**
39
43
  * Creates a new Mistral client adapter
40
44
  *
@@ -8,7 +8,6 @@ const types_1 = require("./types");
8
8
  const errorUtils_1 = require("../../shared/adapters/errorUtils");
9
9
  const systemMessageUtils_1 = require("../../shared/adapters/systemMessageUtils");
10
10
  const defaultLogger_1 = require("../../logging/defaultLogger");
11
- const logger = (0, defaultLogger_1.createDefaultLogger)();
12
11
  /**
13
12
  * Client adapter for Mistral AI API integration
14
13
  *
@@ -43,6 +42,7 @@ class MistralClientAdapter {
43
42
  */
44
43
  constructor(config) {
45
44
  this.baseURL = config?.baseURL || process.env.MISTRAL_API_BASE_URL || 'https://api.mistral.ai';
45
+ this.logger = config?.logger ?? (0, defaultLogger_1.createDefaultLogger)();
46
46
  }
47
47
  /**
48
48
  * Sends a chat message to Mistral API
@@ -60,14 +60,14 @@ class MistralClientAdapter {
60
60
  });
61
61
  // Format messages for Mistral API
62
62
  const messages = this.formatMessages(request);
63
- logger.debug(`Mistral API parameters:`, {
63
+ this.logger.debug(`Mistral API parameters:`, {
64
64
  baseURL: this.baseURL,
65
65
  model: request.modelId,
66
66
  temperature: request.settings.temperature,
67
67
  max_tokens: request.settings.maxTokens,
68
68
  top_p: request.settings.topP,
69
69
  });
70
- logger.info(`Making Mistral API call for model: ${request.modelId}`);
70
+ this.logger.info(`Making Mistral API call for model: ${request.modelId}`);
71
71
  // Build request options
72
72
  const requestOptions = {
73
73
  model: request.modelId,
@@ -84,13 +84,13 @@ class MistralClientAdapter {
84
84
  // Mistral only supports json_object mode, no schema validation
85
85
  if (request.settings.structuredOutput?.schema && request.settings.structuredOutput.enabled !== false) {
86
86
  requestOptions.responseFormat = { type: 'json_object' };
87
- logger.warn(`Mistral does not support JSON schema validation. ` +
87
+ this.logger.warn(`Mistral does not support JSON schema validation. ` +
88
88
  `Using json_object mode - schema validation will be client-side only.`);
89
89
  }
90
90
  // Make the API call
91
91
  const completion = await mistral.chat.complete(requestOptions);
92
92
  if (completion && completion.choices && completion.choices.length > 0) {
93
- logger.info(`Mistral API call successful, response ID: ${completion.id}`);
93
+ this.logger.info(`Mistral API call successful, response ID: ${completion.id}`);
94
94
  return this.createSuccessResponse(completion, request);
95
95
  }
96
96
  else {
@@ -98,7 +98,7 @@ class MistralClientAdapter {
98
98
  }
99
99
  }
100
100
  catch (error) {
101
- logger.error("Mistral API error:", error);
101
+ this.logger.error("Mistral API error:", error);
102
102
  return this.createErrorResponse(error, request);
103
103
  }
104
104
  }
@@ -170,7 +170,7 @@ class MistralClientAdapter {
170
170
  // Model doesn't support system messages - prepend to first user message
171
171
  const modifiedIndex = (0, systemMessageUtils_1.prependSystemToFirstUserMessage)(messages, combinedSystemContent, request.settings.systemMessageFallback);
172
172
  if (modifiedIndex !== -1) {
173
- logger.debug(`Model ${request.modelId} doesn't support system messages - prepended to first user message`);
173
+ this.logger.debug(`Model ${request.modelId} doesn't support system messages - prepended to first user message`);
174
174
  }
175
175
  }
176
176
  }
@@ -1,5 +1,6 @@
1
1
  import type { LLMResponse, LLMFailureResponse } from "../types";
2
2
  import type { ILLMClientAdapter, InternalLLMChatRequest } from "./types";
3
+ import type { Logger } from "../../logging/types";
3
4
  /**
4
5
  * Client adapter for OpenAI API integration
5
6
  *
@@ -11,14 +12,17 @@ import type { ILLMClientAdapter, InternalLLMChatRequest } from "./types";
11
12
  */
12
13
  export declare class OpenAIClientAdapter implements ILLMClientAdapter {
13
14
  private baseURL?;
15
+ private logger;
14
16
  /**
15
17
  * Creates a new OpenAI client adapter
16
18
  *
17
19
  * @param config Optional configuration for the adapter
18
20
  * @param config.baseURL Custom base URL for OpenAI-compatible APIs
21
+ * @param config.logger Custom logger instance
19
22
  */
20
23
  constructor(config?: {
21
24
  baseURL?: string;
25
+ logger?: Logger;
22
26
  });
23
27
  /**
24
28
  * Sends a chat message to OpenAI's API
@@ -11,7 +11,6 @@ const types_1 = require("./types");
11
11
  const errorUtils_1 = require("../../shared/adapters/errorUtils");
12
12
  const systemMessageUtils_1 = require("../../shared/adapters/systemMessageUtils");
13
13
  const defaultLogger_1 = require("../../logging/defaultLogger");
14
- const logger = (0, defaultLogger_1.createDefaultLogger)();
15
14
  /**
16
15
  * Client adapter for OpenAI API integration
17
16
  *
@@ -27,9 +26,11 @@ class OpenAIClientAdapter {
27
26
  *
28
27
  * @param config Optional configuration for the adapter
29
28
  * @param config.baseURL Custom base URL for OpenAI-compatible APIs
29
+ * @param config.logger Custom logger instance
30
30
  */
31
31
  constructor(config) {
32
32
  this.baseURL = config?.baseURL;
33
+ this.logger = config?.logger ?? (0, defaultLogger_1.createDefaultLogger)();
33
34
  }
34
35
  /**
35
36
  * Sends a chat message to OpenAI's API
@@ -95,7 +96,7 @@ class OpenAIClientAdapter {
95
96
  }
96
97
  };
97
98
  }
98
- logger.debug(`OpenAI API parameters:`, {
99
+ this.logger.debug(`OpenAI API parameters:`, {
99
100
  model: completionParams.model,
100
101
  temperature: completionParams.temperature,
101
102
  max_completion_tokens: completionParams.max_completion_tokens,
@@ -105,12 +106,12 @@ class OpenAIClientAdapter {
105
106
  presence_penalty: completionParams.presence_penalty,
106
107
  hasUser: !!completionParams.user,
107
108
  });
108
- logger.info(`Making OpenAI API call for model: ${request.modelId}`);
109
+ this.logger.info(`Making OpenAI API call for model: ${request.modelId}`);
109
110
  // Make the API call
110
111
  const completion = await openai.chat.completions.create(completionParams);
111
112
  // Type guard to ensure we have a non-streaming response
112
113
  if ('id' in completion && 'choices' in completion) {
113
- logger.info(`OpenAI API call successful, response ID: ${completion.id}`);
114
+ this.logger.info(`OpenAI API call successful, response ID: ${completion.id}`);
114
115
  // Convert to standardized response format
115
116
  return this.createSuccessResponse(completion, request);
116
117
  }
@@ -119,7 +120,7 @@ class OpenAIClientAdapter {
119
120
  }
120
121
  }
121
122
  catch (error) {
122
- logger.error("OpenAI API error:", error);
123
+ this.logger.error("OpenAI API error:", error);
123
124
  return this.createErrorResponse(error, request);
124
125
  }
125
126
  }
@@ -224,7 +225,7 @@ class OpenAIClientAdapter {
224
225
  const modifiedIndex = (0, systemMessageUtils_1.prependSystemToFirstUserMessage)(simpleMessages, combinedSystemContent, request.settings.systemMessageFallback);
225
226
  if (modifiedIndex !== -1) {
226
227
  messages[modifiedIndex].content = simpleMessages[modifiedIndex].content;
227
- logger.debug(`Model ${request.modelId} doesn't support system messages - prepended to first user message`);
228
+ this.logger.debug(`Model ${request.modelId} doesn't support system messages - prepended to first user message`);
228
229
  }
229
230
  }
230
231
  }
@@ -1,5 +1,6 @@
1
1
  import type { LLMResponse, LLMFailureResponse } from "../types";
2
2
  import type { ILLMClientAdapter, InternalLLMChatRequest } from "./types";
3
+ import type { Logger } from "../../logging/types";
3
4
  /**
4
5
  * Configuration options for OpenRouterClientAdapter
5
6
  */
@@ -10,6 +11,8 @@ export interface OpenRouterClientConfig {
10
11
  httpReferer?: string;
11
12
  /** Your app's display name for rankings (optional) */
12
13
  siteTitle?: string;
14
+ /** Logger instance for adapter logging */
15
+ logger?: Logger;
13
16
  }
14
17
  /**
15
18
  * Client adapter for OpenRouter API integration
@@ -45,6 +48,7 @@ export declare class OpenRouterClientAdapter implements ILLMClientAdapter {
45
48
  private baseURL;
46
49
  private httpReferer?;
47
50
  private siteTitle?;
51
+ private logger;
48
52
  /**
49
53
  * Creates a new OpenRouter client adapter
50
54
  *
@@ -11,7 +11,6 @@ const types_1 = require("./types");
11
11
  const errorUtils_1 = require("../../shared/adapters/errorUtils");
12
12
  const systemMessageUtils_1 = require("../../shared/adapters/systemMessageUtils");
13
13
  const defaultLogger_1 = require("../../logging/defaultLogger");
14
- const logger = (0, defaultLogger_1.createDefaultLogger)();
15
14
  /**
16
15
  * Client adapter for OpenRouter API integration
17
16
  *
@@ -52,6 +51,7 @@ class OpenRouterClientAdapter {
52
51
  this.baseURL = config?.baseURL || 'https://openrouter.ai/api/v1';
53
52
  this.httpReferer = config?.httpReferer || process.env.OPENROUTER_HTTP_REFERER;
54
53
  this.siteTitle = config?.siteTitle || process.env.OPENROUTER_SITE_TITLE;
54
+ this.logger = config?.logger ?? (0, defaultLogger_1.createDefaultLogger)();
55
55
  }
56
56
  /**
57
57
  * Sends a chat message to OpenRouter API
@@ -126,7 +126,7 @@ class OpenRouterClientAdapter {
126
126
  }
127
127
  };
128
128
  }
129
- logger.debug(`OpenRouter API parameters:`, {
129
+ this.logger.debug(`OpenRouter API parameters:`, {
130
130
  baseURL: this.baseURL,
131
131
  model: completionParams.model,
132
132
  temperature: completionParams.temperature,
@@ -134,12 +134,12 @@ class OpenRouterClientAdapter {
134
134
  top_p: completionParams.top_p,
135
135
  hasProviderRouting: !!completionParams.provider,
136
136
  });
137
- logger.info(`Making OpenRouter API call for model: ${request.modelId}`);
137
+ this.logger.info(`Making OpenRouter API call for model: ${request.modelId}`);
138
138
  // Make the API call
139
139
  const completion = await openai.chat.completions.create(completionParams);
140
140
  // Type guard to ensure we have a non-streaming response
141
141
  if ('id' in completion && 'choices' in completion) {
142
- logger.info(`OpenRouter API call successful, response ID: ${completion.id}`);
142
+ this.logger.info(`OpenRouter API call successful, response ID: ${completion.id}`);
143
143
  return this.createSuccessResponse(completion, request);
144
144
  }
145
145
  else {
@@ -147,7 +147,7 @@ class OpenRouterClientAdapter {
147
147
  }
148
148
  }
149
149
  catch (error) {
150
- logger.error("OpenRouter API error:", error);
150
+ this.logger.error("OpenRouter API error:", error);
151
151
  return this.createErrorResponse(error, request);
152
152
  }
153
153
  }
@@ -223,7 +223,7 @@ class OpenRouterClientAdapter {
223
223
  const modifiedIndex = (0, systemMessageUtils_1.prependSystemToFirstUserMessage)(simpleMessages, combinedSystemContent, request.settings.systemMessageFallback);
224
224
  if (modifiedIndex !== -1) {
225
225
  messages[modifiedIndex].content = simpleMessages[modifiedIndex].content;
226
- logger.debug(`Model ${request.modelId} doesn't support system messages - prepended to first user message`);
226
+ this.logger.debug(`Model ${request.modelId} doesn't support system messages - prepended to first user message`);
227
227
  }
228
228
  }
229
229
  }
@@ -1,8 +1,16 @@
1
1
  import type { LLMChatRequest, LLMChatRequestWithPreset, LLMFailureResponse, LLMSettings, ModelInfo, StructuredOutputSettings } from "../types";
2
+ import type { Logger } from "../../logging/types";
2
3
  /**
3
4
  * Validates LLM requests including structure, messages, and settings
4
5
  */
5
6
  export declare class RequestValidator {
7
+ private logger;
8
+ /**
9
+ * Creates a new RequestValidator
10
+ *
11
+ * @param logger Optional logger instance
12
+ */
13
+ constructor(logger?: Logger);
6
14
  /**
7
15
  * Validates basic LLM request structure
8
16
  *
@@ -2,12 +2,19 @@
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
3
  exports.RequestValidator = void 0;
4
4
  const defaultLogger_1 = require("../../logging/defaultLogger");
5
- const logger = (0, defaultLogger_1.createDefaultLogger)();
6
5
  const config_1 = require("../config");
7
6
  /**
8
7
  * Validates LLM requests including structure, messages, and settings
9
8
  */
10
9
  class RequestValidator {
10
+ /**
11
+ * Creates a new RequestValidator
12
+ *
13
+ * @param logger Optional logger instance
14
+ */
15
+ constructor(logger) {
16
+ this.logger = logger ?? (0, defaultLogger_1.createDefaultLogger)();
17
+ }
11
18
  /**
12
19
  * Validates basic LLM request structure
13
20
  *
@@ -156,7 +163,7 @@ class RequestValidator {
156
163
  }
157
164
  // Warn (but don't error) if strict mode requested but not supported
158
165
  if (structuredOutput.strict !== false && modelInfo.structuredOutput.strictMode === false) {
159
- logger.warn(`Model ${request.modelId} does not support strict mode for structured output. ` +
166
+ this.logger.warn(`Model ${request.modelId} does not support strict mode for structured output. ` +
160
167
  `Schema validation will be client-side only.`);
161
168
  }
162
169
  }
@@ -50,7 +50,9 @@ class AdapterRegistry {
50
50
  if (AdapterClass) {
51
51
  try {
52
52
  const adapterConfig = adapterConfigs[provider.id];
53
- const adapterInstance = new AdapterClass(adapterConfig);
53
+ // Inject logger into adapter config
54
+ const configWithLogger = { ...adapterConfig, logger: this.logger };
55
+ const adapterInstance = new AdapterClass(configWithLogger);
54
56
  this.registerAdapter(providerId, adapterInstance);
55
57
  registeredCount++;
56
58
  successfullyRegisteredProviders.push(provider.id);
@@ -337,6 +337,8 @@ export interface ImageProviderAdapterConfig {
337
337
  timeout?: number;
338
338
  /** Whether to check health before requests */
339
339
  checkHealth?: boolean;
340
+ /** Logger instance for adapter logging */
341
+ logger?: Logger;
340
342
  }
341
343
  /**
342
344
  * Interface that all image provider adapters must implement
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "genai-lite",
3
- "version": "0.8.2",
3
+ "version": "0.8.3",
4
4
  "description": "A lightweight, portable toolkit for interacting with various Generative AI APIs.",
5
5
  "main": "dist/index.js",
6
6
  "types": "dist/index.d.ts",