genai-lite 0.6.1 → 0.7.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +21 -0
- package/dist/adapters/image/GenaiElectronImageAdapter.js +6 -4
- package/dist/adapters/image/OpenAIImageAdapter.js +5 -3
- package/dist/config/llm-presets.json +0 -44
- package/dist/image/ImageService.d.ts +1 -0
- package/dist/image/ImageService.js +13 -10
- package/dist/index.d.ts +2 -0
- package/dist/index.js +5 -1
- package/dist/llm/LLMService.d.ts +6 -0
- package/dist/llm/LLMService.js +20 -17
- package/dist/llm/clients/AnthropicClientAdapter.js +8 -6
- package/dist/llm/clients/GeminiClientAdapter.js +6 -4
- package/dist/llm/clients/LlamaCppClientAdapter.js +13 -11
- package/dist/llm/clients/OpenAIClientAdapter.js +6 -4
- package/dist/llm/config.js +3 -27
- package/dist/llm/services/ModelResolver.d.ts +3 -1
- package/dist/llm/services/ModelResolver.js +5 -3
- package/dist/llm/services/SettingsManager.d.ts +3 -0
- package/dist/llm/services/SettingsManager.js +24 -20
- package/dist/logging/defaultLogger.d.ts +35 -0
- package/dist/logging/defaultLogger.js +94 -0
- package/dist/logging/index.d.ts +2 -0
- package/dist/logging/index.js +7 -0
- package/dist/logging/types.d.ts +23 -0
- package/dist/logging/types.js +2 -0
- package/dist/prompting/parser.js +4 -1
- package/dist/shared/services/AdapterRegistry.d.ts +4 -1
- package/dist/shared/services/AdapterRegistry.js +12 -9
- package/dist/types/image.d.ts +5 -0
- package/package.json +1 -1
- package/src/config/llm-presets.json +0 -44
package/README.md
CHANGED
|
@@ -14,6 +14,7 @@ A lightweight, portable Node.js/TypeScript library providing a unified interface
|
|
|
14
14
|
- 🛡️ **Provider Normalization** - Consistent responses across different AI APIs
|
|
15
15
|
- 🎨 **Configurable Model Presets** - Built-in presets with full customization options
|
|
16
16
|
- 🎭 **Template Engine** - Sophisticated templating with conditionals and variable substitution
|
|
17
|
+
- 📊 **Configurable Logging** - Debug mode, custom loggers (pino, winston), and silent mode for tests
|
|
17
18
|
|
|
18
19
|
## Installation
|
|
19
20
|
|
|
@@ -112,6 +113,7 @@ Comprehensive documentation is available in the **[`genai-lite-docs`](./genai-li
|
|
|
112
113
|
|
|
113
114
|
### Utilities & Advanced
|
|
114
115
|
- **[Prompting Utilities](./genai-lite-docs/prompting-utilities.md)** - Template engine, token counting, content parsing
|
|
116
|
+
- **[Logging](./genai-lite-docs/logging.md)** - Configure logging and debugging
|
|
115
117
|
- **[TypeScript Reference](./genai-lite-docs/typescript-reference.md)** - Type definitions
|
|
116
118
|
|
|
117
119
|
### Provider Reference
|
|
@@ -154,6 +156,25 @@ const llmService = new LLMService(myKeyProvider);
|
|
|
154
156
|
|
|
155
157
|
See **[Core Concepts](./genai-lite-docs/core-concepts.md#api-key-management)** for detailed examples including Electron integration.
|
|
156
158
|
|
|
159
|
+
## Logging Configuration
|
|
160
|
+
|
|
161
|
+
Control logging verbosity via environment variable or service options:
|
|
162
|
+
|
|
163
|
+
```bash
|
|
164
|
+
# Environment variable (applies to all services)
|
|
165
|
+
export GENAI_LITE_LOG_LEVEL=debug # Options: silent, error, warn, info, debug
|
|
166
|
+
```
|
|
167
|
+
|
|
168
|
+
```typescript
|
|
169
|
+
// Per-service configuration
|
|
170
|
+
const llmService = new LLMService(fromEnvironment, {
|
|
171
|
+
logLevel: 'debug', // Override env var
|
|
172
|
+
logger: customPinoLogger // Inject pino/winston/etc.
|
|
173
|
+
});
|
|
174
|
+
```
|
|
175
|
+
|
|
176
|
+
See **[Logging](./genai-lite-docs/logging.md)** for custom logger integration and testing patterns.
|
|
177
|
+
|
|
157
178
|
## Example Applications
|
|
158
179
|
|
|
159
180
|
The library includes two complete demo applications showcasing all features:
|
|
@@ -18,6 +18,8 @@
|
|
|
18
18
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
19
19
|
exports.GenaiElectronImageAdapter = void 0;
|
|
20
20
|
const errorUtils_1 = require("../../shared/adapters/errorUtils");
|
|
21
|
+
const defaultLogger_1 = require("../../logging/defaultLogger");
|
|
22
|
+
const logger = (0, defaultLogger_1.createDefaultLogger)();
|
|
21
23
|
/**
|
|
22
24
|
* Adapter for genai-electron's local diffusion image generation
|
|
23
25
|
*/
|
|
@@ -44,7 +46,7 @@ class GenaiElectronImageAdapter {
|
|
|
44
46
|
try {
|
|
45
47
|
// Build request payload
|
|
46
48
|
const payload = this.buildRequestPayload(resolvedPrompt, request, settings);
|
|
47
|
-
|
|
49
|
+
logger.debug(`GenaiElectron Image API: Starting generation`, {
|
|
48
50
|
prompt: resolvedPrompt.substring(0, 100),
|
|
49
51
|
count: payload.count,
|
|
50
52
|
dimensions: `${payload.width}x${payload.height}`,
|
|
@@ -52,15 +54,15 @@ class GenaiElectronImageAdapter {
|
|
|
52
54
|
});
|
|
53
55
|
// Start generation (returns immediately with ID)
|
|
54
56
|
const generationId = await this.startGeneration(payload);
|
|
55
|
-
|
|
57
|
+
logger.info(`GenaiElectron Image API: Generation started with ID: ${generationId}`);
|
|
56
58
|
// Poll for completion
|
|
57
59
|
const result = await this.pollForCompletion(generationId, settings.diffusion?.onProgress);
|
|
58
|
-
|
|
60
|
+
logger.info(`GenaiElectron Image API: Generation complete (${result.timeTaken}ms)`);
|
|
59
61
|
// Convert to ImageGenerationResponse
|
|
60
62
|
return this.convertToResponse(result, request);
|
|
61
63
|
}
|
|
62
64
|
catch (error) {
|
|
63
|
-
|
|
65
|
+
logger.error('GenaiElectron Image API error:', error);
|
|
64
66
|
throw this.handleError(error, request);
|
|
65
67
|
}
|
|
66
68
|
}
|
|
@@ -20,6 +20,8 @@ Object.defineProperty(exports, "__esModule", { value: true });
|
|
|
20
20
|
exports.OpenAIImageAdapter = void 0;
|
|
21
21
|
const openai_1 = __importDefault(require("openai"));
|
|
22
22
|
const errorUtils_1 = require("../../shared/adapters/errorUtils");
|
|
23
|
+
const defaultLogger_1 = require("../../logging/defaultLogger");
|
|
24
|
+
const logger = (0, defaultLogger_1.createDefaultLogger)();
|
|
23
25
|
/**
|
|
24
26
|
* Prompt length limits per model
|
|
25
27
|
*/
|
|
@@ -94,7 +96,7 @@ class OpenAIImageAdapter {
|
|
|
94
96
|
// dall-e-2/dall-e-3: use traditional parameters
|
|
95
97
|
this.addDalleParams(params, settings);
|
|
96
98
|
}
|
|
97
|
-
|
|
99
|
+
logger.debug(`OpenAI Image API call for model: ${request.modelId}`, {
|
|
98
100
|
model: params.model,
|
|
99
101
|
promptLength: resolvedPrompt.length,
|
|
100
102
|
n: params.n,
|
|
@@ -105,12 +107,12 @@ class OpenAIImageAdapter {
|
|
|
105
107
|
if (!response.data || response.data.length === 0) {
|
|
106
108
|
throw new Error('OpenAI API returned no images in response');
|
|
107
109
|
}
|
|
108
|
-
|
|
110
|
+
logger.info(`OpenAI Image API call successful, generated ${response.data.length} images`);
|
|
109
111
|
// Process response
|
|
110
112
|
return await this.processResponse(response, request, isGptImageModel);
|
|
111
113
|
}
|
|
112
114
|
catch (error) {
|
|
113
|
-
|
|
115
|
+
logger.error('OpenAI Image API error:', error);
|
|
114
116
|
throw this.handleError(error, request);
|
|
115
117
|
}
|
|
116
118
|
}
|
|
@@ -369,50 +369,6 @@
|
|
|
369
369
|
}
|
|
370
370
|
}
|
|
371
371
|
},
|
|
372
|
-
{
|
|
373
|
-
"id": "google-gemini-2.0-flash-default",
|
|
374
|
-
"displayName": "Google - Gemini 2.0 Flash",
|
|
375
|
-
"description": "Default preset for Gemini 2.0 Flash.",
|
|
376
|
-
"providerId": "gemini",
|
|
377
|
-
"modelId": "gemini-2.0-flash",
|
|
378
|
-
"settings": {
|
|
379
|
-
"temperature": 0.7,
|
|
380
|
-
"geminiSafetySettings": [
|
|
381
|
-
{ "category": "HARM_CATEGORY_HATE_SPEECH", "threshold": "BLOCK_NONE" },
|
|
382
|
-
{
|
|
383
|
-
"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
|
|
384
|
-
"threshold": "BLOCK_NONE"
|
|
385
|
-
},
|
|
386
|
-
{
|
|
387
|
-
"category": "HARM_CATEGORY_DANGEROUS_CONTENT",
|
|
388
|
-
"threshold": "BLOCK_NONE"
|
|
389
|
-
},
|
|
390
|
-
{ "category": "HARM_CATEGORY_HARASSMENT", "threshold": "BLOCK_NONE" }
|
|
391
|
-
]
|
|
392
|
-
}
|
|
393
|
-
},
|
|
394
|
-
{
|
|
395
|
-
"id": "google-gemini-2.0-flash-lite-default",
|
|
396
|
-
"displayName": "Google - Gemini 2.0 Flash Lite",
|
|
397
|
-
"description": "Default preset for Gemini 2.0 Flash Lite.",
|
|
398
|
-
"providerId": "gemini",
|
|
399
|
-
"modelId": "gemini-2.0-flash-lite",
|
|
400
|
-
"settings": {
|
|
401
|
-
"temperature": 0.7,
|
|
402
|
-
"geminiSafetySettings": [
|
|
403
|
-
{ "category": "HARM_CATEGORY_HATE_SPEECH", "threshold": "BLOCK_NONE" },
|
|
404
|
-
{
|
|
405
|
-
"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
|
|
406
|
-
"threshold": "BLOCK_NONE"
|
|
407
|
-
},
|
|
408
|
-
{
|
|
409
|
-
"category": "HARM_CATEGORY_DANGEROUS_CONTENT",
|
|
410
|
-
"threshold": "BLOCK_NONE"
|
|
411
|
-
},
|
|
412
|
-
{ "category": "HARM_CATEGORY_HARASSMENT", "threshold": "BLOCK_NONE" }
|
|
413
|
-
]
|
|
414
|
-
}
|
|
415
|
-
},
|
|
416
372
|
{
|
|
417
373
|
"id": "google-gemma-3-27b-default",
|
|
418
374
|
"displayName": "Google - Gemma 3 27B (Free)",
|
|
@@ -10,6 +10,7 @@ var __importDefault = (this && this.__importDefault) || function (mod) {
|
|
|
10
10
|
};
|
|
11
11
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
12
12
|
exports.ImageService = void 0;
|
|
13
|
+
const defaultLogger_1 = require("../logging/defaultLogger");
|
|
13
14
|
const config_1 = require("./config");
|
|
14
15
|
const image_presets_json_1 = __importDefault(require("../config/image-presets.json"));
|
|
15
16
|
const PresetManager_1 = require("../shared/services/PresetManager");
|
|
@@ -28,13 +29,15 @@ const defaultImagePresets = image_presets_json_1.default;
|
|
|
28
29
|
class ImageService {
|
|
29
30
|
constructor(getApiKey, options = {}) {
|
|
30
31
|
this.getApiKey = getApiKey;
|
|
32
|
+
// Initialize logger - custom logger takes precedence over logLevel
|
|
33
|
+
this.logger = options.logger ?? (0, defaultLogger_1.createDefaultLogger)(options.logLevel);
|
|
31
34
|
// Initialize helper services
|
|
32
35
|
this.presetManager = new PresetManager_1.PresetManager(defaultImagePresets, options.presets, options.presetMode);
|
|
33
36
|
// Initialize adapter registry with fallback
|
|
34
37
|
this.adapterRegistry = new AdapterRegistry_1.AdapterRegistry({
|
|
35
38
|
supportedProviders: config_1.SUPPORTED_IMAGE_PROVIDERS,
|
|
36
39
|
fallbackAdapter: new MockImageAdapter_1.MockImageAdapter(),
|
|
37
|
-
});
|
|
40
|
+
}, this.logger);
|
|
38
41
|
// Register OpenAI adapter
|
|
39
42
|
const openaiConfig = config_1.IMAGE_ADAPTER_CONFIGS['openai-images'];
|
|
40
43
|
const openaiBaseURL = options.baseUrls?.['openai-images'] || openaiConfig.baseURL;
|
|
@@ -58,7 +61,7 @@ class ImageService {
|
|
|
58
61
|
this.requestValidator = new ImageRequestValidator_1.ImageRequestValidator();
|
|
59
62
|
this.settingsResolver = new ImageSettingsResolver_1.ImageSettingsResolver();
|
|
60
63
|
this.modelResolver = new ImageModelResolver_1.ImageModelResolver(this.presetManager);
|
|
61
|
-
|
|
64
|
+
this.logger.debug('ImageService: Initialized with OpenAI Images and genai-electron adapters');
|
|
62
65
|
}
|
|
63
66
|
/**
|
|
64
67
|
* Generates images based on the request
|
|
@@ -67,7 +70,7 @@ class ImageService {
|
|
|
67
70
|
* @returns Promise resolving to response or error
|
|
68
71
|
*/
|
|
69
72
|
async generateImage(request) {
|
|
70
|
-
|
|
73
|
+
this.logger.info('ImageService.generateImage called');
|
|
71
74
|
try {
|
|
72
75
|
// Resolve model information
|
|
73
76
|
const resolved = this.modelResolver.resolve(request);
|
|
@@ -114,18 +117,18 @@ class ImageService {
|
|
|
114
117
|
};
|
|
115
118
|
}
|
|
116
119
|
// Generate images
|
|
117
|
-
|
|
120
|
+
this.logger.info(`ImageService: Calling adapter for provider: ${providerId}`);
|
|
118
121
|
const response = await adapter.generate({
|
|
119
122
|
request: fullRequest,
|
|
120
123
|
resolvedPrompt,
|
|
121
124
|
settings: resolvedSettings,
|
|
122
125
|
apiKey,
|
|
123
126
|
});
|
|
124
|
-
|
|
127
|
+
this.logger.info('ImageService: Image generation completed successfully');
|
|
125
128
|
return response;
|
|
126
129
|
}
|
|
127
130
|
catch (error) {
|
|
128
|
-
|
|
131
|
+
this.logger.error('ImageService: Error during image generation:', error);
|
|
129
132
|
return {
|
|
130
133
|
object: 'error',
|
|
131
134
|
providerId: providerId,
|
|
@@ -142,7 +145,7 @@ class ImageService {
|
|
|
142
145
|
}
|
|
143
146
|
}
|
|
144
147
|
catch (error) {
|
|
145
|
-
|
|
148
|
+
this.logger.error('ImageService: Unexpected error:', error);
|
|
146
149
|
const req = request;
|
|
147
150
|
return {
|
|
148
151
|
object: 'error',
|
|
@@ -163,7 +166,7 @@ class ImageService {
|
|
|
163
166
|
* @returns Promise resolving to array of provider information
|
|
164
167
|
*/
|
|
165
168
|
async getProviders() {
|
|
166
|
-
|
|
169
|
+
this.logger.debug('ImageService.getProviders called');
|
|
167
170
|
return [...config_1.SUPPORTED_IMAGE_PROVIDERS];
|
|
168
171
|
}
|
|
169
172
|
/**
|
|
@@ -173,9 +176,9 @@ class ImageService {
|
|
|
173
176
|
* @returns Promise resolving to array of model information
|
|
174
177
|
*/
|
|
175
178
|
async getModels(providerId) {
|
|
176
|
-
|
|
179
|
+
this.logger.debug(`ImageService.getModels called for provider: ${providerId}`);
|
|
177
180
|
const models = (0, config_1.getImageModelsByProvider)(providerId);
|
|
178
|
-
|
|
181
|
+
this.logger.debug(`ImageService: Found ${models.length} models for provider: ${providerId}`);
|
|
179
182
|
return [...models];
|
|
180
183
|
}
|
|
181
184
|
/**
|
package/dist/index.d.ts
CHANGED
|
@@ -17,3 +17,5 @@ export { parseStructuredContent, parseRoleTags, extractInitialTaggedContent, par
|
|
|
17
17
|
export type { TemplateMetadata } from "./prompting/parser";
|
|
18
18
|
export { createFallbackModelInfo, detectGgufCapabilities, KNOWN_GGUF_MODELS } from "./llm/config";
|
|
19
19
|
export type { GgufModelPattern } from "./llm/config";
|
|
20
|
+
export type { Logger, LogLevel, LoggingConfig } from "./logging/types";
|
|
21
|
+
export { createDefaultLogger, DEFAULT_LOG_LEVEL, silentLogger } from "./logging/defaultLogger";
|
package/dist/index.js
CHANGED
|
@@ -14,7 +14,7 @@ var __exportStar = (this && this.__exportStar) || function(m, exports) {
|
|
|
14
14
|
for (var p in m) if (p !== "default" && !Object.prototype.hasOwnProperty.call(exports, p)) __createBinding(exports, m, p);
|
|
15
15
|
};
|
|
16
16
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
17
|
-
exports.KNOWN_GGUF_MODELS = exports.detectGgufCapabilities = exports.createFallbackModelInfo = exports.parseTemplateWithMetadata = exports.extractInitialTaggedContent = exports.parseRoleTags = exports.parseStructuredContent = exports.extractRandomVariables = exports.getSmartPreview = exports.countTokens = exports.renderTemplate = exports.ImageService = exports.LlamaCppServerClient = exports.LlamaCppClientAdapter = exports.fromEnvironment = exports.LLMService = void 0;
|
|
17
|
+
exports.silentLogger = exports.DEFAULT_LOG_LEVEL = exports.createDefaultLogger = exports.KNOWN_GGUF_MODELS = exports.detectGgufCapabilities = exports.createFallbackModelInfo = exports.parseTemplateWithMetadata = exports.extractInitialTaggedContent = exports.parseRoleTags = exports.parseStructuredContent = exports.extractRandomVariables = exports.getSmartPreview = exports.countTokens = exports.renderTemplate = exports.ImageService = exports.LlamaCppServerClient = exports.LlamaCppClientAdapter = exports.fromEnvironment = exports.LLMService = void 0;
|
|
18
18
|
// --- LLM Service ---
|
|
19
19
|
var LLMService_1 = require("./llm/LLMService");
|
|
20
20
|
Object.defineProperty(exports, "LLMService", { enumerable: true, get: function () { return LLMService_1.LLMService; } });
|
|
@@ -50,3 +50,7 @@ var config_1 = require("./llm/config");
|
|
|
50
50
|
Object.defineProperty(exports, "createFallbackModelInfo", { enumerable: true, get: function () { return config_1.createFallbackModelInfo; } });
|
|
51
51
|
Object.defineProperty(exports, "detectGgufCapabilities", { enumerable: true, get: function () { return config_1.detectGgufCapabilities; } });
|
|
52
52
|
Object.defineProperty(exports, "KNOWN_GGUF_MODELS", { enumerable: true, get: function () { return config_1.KNOWN_GGUF_MODELS; } });
|
|
53
|
+
var defaultLogger_1 = require("./logging/defaultLogger");
|
|
54
|
+
Object.defineProperty(exports, "createDefaultLogger", { enumerable: true, get: function () { return defaultLogger_1.createDefaultLogger; } });
|
|
55
|
+
Object.defineProperty(exports, "DEFAULT_LOG_LEVEL", { enumerable: true, get: function () { return defaultLogger_1.DEFAULT_LOG_LEVEL; } });
|
|
56
|
+
Object.defineProperty(exports, "silentLogger", { enumerable: true, get: function () { return defaultLogger_1.silentLogger; } });
|
package/dist/llm/LLMService.d.ts
CHANGED
|
@@ -1,4 +1,5 @@
|
|
|
1
1
|
import type { ApiKeyProvider, PresetMode } from '../types';
|
|
2
|
+
import type { Logger, LogLevel } from '../logging/types';
|
|
2
3
|
import type { LLMChatRequest, LLMChatRequestWithPreset, LLMResponse, LLMFailureResponse, ProviderInfo, ModelInfo, ApiProviderId, LLMSettings, ModelContext, LLMMessage } from "./types";
|
|
3
4
|
import type { ModelPreset } from "../types/presets";
|
|
4
5
|
export type { PresetMode };
|
|
@@ -10,6 +11,10 @@ export interface LLMServiceOptions {
|
|
|
10
11
|
presets?: ModelPreset[];
|
|
11
12
|
/** The strategy for integrating custom presets. Defaults to 'extend'. */
|
|
12
13
|
presetMode?: PresetMode;
|
|
14
|
+
/** Log level for filtering messages. Defaults to GENAI_LITE_LOG_LEVEL env var or 'warn'. */
|
|
15
|
+
logLevel?: LogLevel;
|
|
16
|
+
/** Custom logger implementation. If provided, logLevel is ignored. */
|
|
17
|
+
logger?: Logger;
|
|
13
18
|
}
|
|
14
19
|
/**
|
|
15
20
|
* Result from createMessages method
|
|
@@ -35,6 +40,7 @@ export interface CreateMessagesResult {
|
|
|
35
40
|
*/
|
|
36
41
|
export declare class LLMService {
|
|
37
42
|
private getApiKey;
|
|
43
|
+
private logger;
|
|
38
44
|
private presetManager;
|
|
39
45
|
private adapterRegistry;
|
|
40
46
|
private requestValidator;
|
package/dist/llm/LLMService.js
CHANGED
|
@@ -6,6 +6,7 @@ var __importDefault = (this && this.__importDefault) || function (mod) {
|
|
|
6
6
|
};
|
|
7
7
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
8
8
|
exports.LLMService = void 0;
|
|
9
|
+
const defaultLogger_1 = require("../logging/defaultLogger");
|
|
9
10
|
const config_1 = require("./config");
|
|
10
11
|
const template_1 = require("../prompting/template");
|
|
11
12
|
const parser_1 = require("../prompting/parser");
|
|
@@ -31,17 +32,19 @@ const ModelResolver_1 = require("./services/ModelResolver");
|
|
|
31
32
|
class LLMService {
|
|
32
33
|
constructor(getApiKey, options = {}) {
|
|
33
34
|
this.getApiKey = getApiKey;
|
|
34
|
-
// Initialize
|
|
35
|
+
// Initialize logger - custom logger takes precedence over logLevel
|
|
36
|
+
this.logger = options.logger ?? (0, defaultLogger_1.createDefaultLogger)(options.logLevel);
|
|
37
|
+
// Initialize services with logger
|
|
35
38
|
this.presetManager = new PresetManager_1.PresetManager(llm_presets_json_1.default, options.presets, options.presetMode);
|
|
36
39
|
this.adapterRegistry = new AdapterRegistry_1.AdapterRegistry({
|
|
37
40
|
supportedProviders: config_1.SUPPORTED_PROVIDERS,
|
|
38
41
|
fallbackAdapter: new MockClientAdapter_1.MockClientAdapter(),
|
|
39
42
|
adapterConstructors: config_1.ADAPTER_CONSTRUCTORS,
|
|
40
43
|
adapterConfigs: config_1.ADAPTER_CONFIGS,
|
|
41
|
-
});
|
|
44
|
+
}, this.logger);
|
|
42
45
|
this.requestValidator = new RequestValidator_1.RequestValidator();
|
|
43
|
-
this.settingsManager = new SettingsManager_1.SettingsManager();
|
|
44
|
-
this.modelResolver = new ModelResolver_1.ModelResolver(this.presetManager, this.adapterRegistry);
|
|
46
|
+
this.settingsManager = new SettingsManager_1.SettingsManager(this.logger);
|
|
47
|
+
this.modelResolver = new ModelResolver_1.ModelResolver(this.presetManager, this.adapterRegistry, this.logger);
|
|
45
48
|
}
|
|
46
49
|
/**
|
|
47
50
|
* Gets list of supported LLM providers
|
|
@@ -49,7 +52,7 @@ class LLMService {
|
|
|
49
52
|
* @returns Promise resolving to array of provider information
|
|
50
53
|
*/
|
|
51
54
|
async getProviders() {
|
|
52
|
-
|
|
55
|
+
this.logger.debug("LLMService.getProviders called");
|
|
53
56
|
return [...config_1.SUPPORTED_PROVIDERS]; // Return a copy to prevent external modification
|
|
54
57
|
}
|
|
55
58
|
/**
|
|
@@ -59,14 +62,14 @@ class LLMService {
|
|
|
59
62
|
* @returns Promise resolving to array of model information
|
|
60
63
|
*/
|
|
61
64
|
async getModels(providerId) {
|
|
62
|
-
|
|
65
|
+
this.logger.debug(`LLMService.getModels called for provider: ${providerId}`);
|
|
63
66
|
// Validate provider exists
|
|
64
67
|
const models = (0, config_1.getModelsByProvider)(providerId);
|
|
65
68
|
if (models.length === 0) {
|
|
66
|
-
|
|
69
|
+
this.logger.warn(`Requested models for unsupported provider: ${providerId}`);
|
|
67
70
|
return [];
|
|
68
71
|
}
|
|
69
|
-
|
|
72
|
+
this.logger.debug(`Found ${models.length} models for provider: ${providerId}`);
|
|
70
73
|
return [...models]; // Return a copy to prevent external modification
|
|
71
74
|
}
|
|
72
75
|
/**
|
|
@@ -76,7 +79,7 @@ class LLMService {
|
|
|
76
79
|
* @returns Promise resolving to either success or failure response
|
|
77
80
|
*/
|
|
78
81
|
async sendMessage(request) {
|
|
79
|
-
|
|
82
|
+
this.logger.info(`LLMService.sendMessage called with presetId: ${request.presetId}, provider: ${request.providerId}, model: ${request.modelId}`);
|
|
80
83
|
try {
|
|
81
84
|
// Resolve model information from preset or direct IDs
|
|
82
85
|
const resolved = await this.modelResolver.resolve(request);
|
|
@@ -130,7 +133,7 @@ class LLMService {
|
|
|
130
133
|
...resolvedRequest,
|
|
131
134
|
settings: filteredSettings,
|
|
132
135
|
};
|
|
133
|
-
|
|
136
|
+
this.logger.debug(`Processing LLM request with (potentially filtered) settings:`, {
|
|
134
137
|
provider: providerId,
|
|
135
138
|
model: modelId,
|
|
136
139
|
settings: filteredSettings,
|
|
@@ -166,7 +169,7 @@ class LLMService {
|
|
|
166
169
|
object: "error",
|
|
167
170
|
};
|
|
168
171
|
}
|
|
169
|
-
|
|
172
|
+
this.logger.info(`Making LLM request with ${clientAdapter.constructor.name} for provider: ${providerId}`);
|
|
170
173
|
const result = await clientAdapter.sendMessage(internalRequest, apiKey);
|
|
171
174
|
// Post-process for thinking tag fallback
|
|
172
175
|
// This feature extracts reasoning from XML tags when native reasoning is not active.
|
|
@@ -186,7 +189,7 @@ class LLMService {
|
|
|
186
189
|
const { extracted, remaining } = (0, parser_1.extractInitialTaggedContent)(choice.message.content, tagName);
|
|
187
190
|
if (extracted !== null) {
|
|
188
191
|
// Success: thinking tag found
|
|
189
|
-
|
|
192
|
+
this.logger.debug(`Extracted <${tagName}> block from response.`);
|
|
190
193
|
// Handle the edge case: append to existing reasoning if present (e.g., native reasoning + thinking tags)
|
|
191
194
|
const existingReasoning = choice.reasoning || '';
|
|
192
195
|
if (existingReasoning) {
|
|
@@ -234,11 +237,11 @@ class LLMService {
|
|
|
234
237
|
}
|
|
235
238
|
}
|
|
236
239
|
}
|
|
237
|
-
|
|
240
|
+
this.logger.info(`LLM request completed successfully for model: ${modelId}`);
|
|
238
241
|
return result;
|
|
239
242
|
}
|
|
240
243
|
catch (error) {
|
|
241
|
-
|
|
244
|
+
this.logger.error("Error in LLMService.sendMessage:", error);
|
|
242
245
|
return {
|
|
243
246
|
provider: providerId,
|
|
244
247
|
model: modelId,
|
|
@@ -255,7 +258,7 @@ class LLMService {
|
|
|
255
258
|
}
|
|
256
259
|
}
|
|
257
260
|
catch (error) {
|
|
258
|
-
|
|
261
|
+
this.logger.error("Error in LLMService.sendMessage (outer):", error);
|
|
259
262
|
return {
|
|
260
263
|
provider: request.providerId || request.presetId || 'unknown',
|
|
261
264
|
model: request.modelId || request.presetId || 'unknown',
|
|
@@ -328,7 +331,7 @@ class LLMService {
|
|
|
328
331
|
* ```
|
|
329
332
|
*/
|
|
330
333
|
async createMessages(options) {
|
|
331
|
-
|
|
334
|
+
this.logger.debug('LLMService.createMessages called');
|
|
332
335
|
// NEW: Step 1 - Parse the template for metadata and content
|
|
333
336
|
const { metadata, content: templateContent } = (0, parser_1.parseTemplateWithMetadata)(options.template);
|
|
334
337
|
// Validate the settings from the template
|
|
@@ -345,7 +348,7 @@ class LLMService {
|
|
|
345
348
|
});
|
|
346
349
|
if (resolved.error) {
|
|
347
350
|
// If resolution fails, proceed without model context
|
|
348
|
-
|
|
351
|
+
this.logger.warn('Model resolution failed, proceeding without model context:', resolved.error);
|
|
349
352
|
}
|
|
350
353
|
else {
|
|
351
354
|
const { providerId, modelId, modelInfo, settings } = resolved;
|
|
@@ -9,6 +9,8 @@ exports.AnthropicClientAdapter = void 0;
|
|
|
9
9
|
const sdk_1 = __importDefault(require("@anthropic-ai/sdk"));
|
|
10
10
|
const types_1 = require("./types");
|
|
11
11
|
const errorUtils_1 = require("../../shared/adapters/errorUtils");
|
|
12
|
+
const defaultLogger_1 = require("../../logging/defaultLogger");
|
|
13
|
+
const logger = (0, defaultLogger_1.createDefaultLogger)();
|
|
12
14
|
/**
|
|
13
15
|
* Client adapter for Anthropic API integration
|
|
14
16
|
*
|
|
@@ -93,8 +95,8 @@ class AnthropicClientAdapter {
|
|
|
93
95
|
};
|
|
94
96
|
}
|
|
95
97
|
}
|
|
96
|
-
|
|
97
|
-
|
|
98
|
+
logger.info(`Making Anthropic API call for model: ${request.modelId}`);
|
|
99
|
+
logger.debug(`Anthropic API parameters:`, {
|
|
98
100
|
model: messageParams.model,
|
|
99
101
|
temperature: messageParams.temperature,
|
|
100
102
|
max_tokens: messageParams.max_tokens,
|
|
@@ -105,12 +107,12 @@ class AnthropicClientAdapter {
|
|
|
105
107
|
});
|
|
106
108
|
// Make the API call
|
|
107
109
|
const completion = await anthropic.messages.create(messageParams);
|
|
108
|
-
|
|
110
|
+
logger.info(`Anthropic API call successful, response ID: ${completion.id}`);
|
|
109
111
|
// Convert to standardized response format
|
|
110
112
|
return this.createSuccessResponse(completion, request);
|
|
111
113
|
}
|
|
112
114
|
catch (error) {
|
|
113
|
-
|
|
115
|
+
logger.error("Anthropic API error:", error);
|
|
114
116
|
return this.createErrorResponse(error, request);
|
|
115
117
|
}
|
|
116
118
|
}
|
|
@@ -171,7 +173,7 @@ class AnthropicClientAdapter {
|
|
|
171
173
|
// Anthropic requires messages to start with 'user' role
|
|
172
174
|
// If the first message is not from user, we need to handle this
|
|
173
175
|
if (messages.length > 0 && messages[0].role !== "user") {
|
|
174
|
-
|
|
176
|
+
logger.warn("Anthropic API requires first message to be from user. Adjusting message order.");
|
|
175
177
|
// Find the first user message and move it to the front, or create a default one
|
|
176
178
|
const firstUserIndex = messages.findIndex((msg) => msg.role === "user");
|
|
177
179
|
if (firstUserIndex > 0) {
|
|
@@ -213,7 +215,7 @@ class AnthropicClientAdapter {
|
|
|
213
215
|
// If roles don't alternate properly, we might need to combine messages
|
|
214
216
|
// or insert a placeholder. For now, we'll skip non-alternating messages
|
|
215
217
|
// and log a warning.
|
|
216
|
-
|
|
218
|
+
logger.warn(`Skipping message with unexpected role: expected ${expectedRole}, got ${message.role}`);
|
|
217
219
|
}
|
|
218
220
|
}
|
|
219
221
|
return cleanedMessages;
|
|
@@ -6,6 +6,8 @@ exports.GeminiClientAdapter = void 0;
|
|
|
6
6
|
const genai_1 = require("@google/genai");
|
|
7
7
|
const types_1 = require("./types");
|
|
8
8
|
const errorUtils_1 = require("../../shared/adapters/errorUtils");
|
|
9
|
+
const defaultLogger_1 = require("../../logging/defaultLogger");
|
|
10
|
+
const logger = (0, defaultLogger_1.createDefaultLogger)();
|
|
9
11
|
/**
|
|
10
12
|
* Client adapter for Google Gemini API integration
|
|
11
13
|
*
|
|
@@ -39,8 +41,8 @@ class GeminiClientAdapter {
|
|
|
39
41
|
const genAI = new genai_1.GoogleGenAI({ apiKey });
|
|
40
42
|
// Format the request for Gemini API
|
|
41
43
|
const { contents, generationConfig, safetySettings, systemInstruction } = this.formatInternalRequestToGemini(request);
|
|
42
|
-
|
|
43
|
-
|
|
44
|
+
logger.info(`Making Gemini API call for model: ${request.modelId}`);
|
|
45
|
+
logger.debug(`Gemini API parameters:`, {
|
|
44
46
|
model: request.modelId,
|
|
45
47
|
temperature: generationConfig.temperature,
|
|
46
48
|
maxOutputTokens: generationConfig.maxOutputTokens,
|
|
@@ -58,12 +60,12 @@ class GeminiClientAdapter {
|
|
|
58
60
|
...(systemInstruction && { systemInstruction: systemInstruction }),
|
|
59
61
|
},
|
|
60
62
|
});
|
|
61
|
-
|
|
63
|
+
logger.info(`Gemini API call successful, processing response`);
|
|
62
64
|
// Convert to standardized response format
|
|
63
65
|
return this.createSuccessResponse(result, request);
|
|
64
66
|
}
|
|
65
67
|
catch (error) {
|
|
66
|
-
|
|
68
|
+
logger.error("Gemini API error:", error);
|
|
67
69
|
return this.createErrorResponse(error, request);
|
|
68
70
|
}
|
|
69
71
|
}
|
|
@@ -11,6 +11,8 @@ const types_1 = require("./types");
|
|
|
11
11
|
const errorUtils_1 = require("../../shared/adapters/errorUtils");
|
|
12
12
|
const LlamaCppServerClient_1 = require("./LlamaCppServerClient");
|
|
13
13
|
const config_1 = require("../config");
|
|
14
|
+
const defaultLogger_1 = require("../../logging/defaultLogger");
|
|
15
|
+
const logger = (0, defaultLogger_1.createDefaultLogger)();
|
|
14
16
|
/**
|
|
15
17
|
* Client adapter for llama.cpp server integration
|
|
16
18
|
*
|
|
@@ -78,10 +80,10 @@ class LlamaCppClientAdapter {
|
|
|
78
80
|
}
|
|
79
81
|
// Attempt detection
|
|
80
82
|
try {
|
|
81
|
-
|
|
83
|
+
logger.debug(`Detecting model capabilities from llama.cpp server at ${this.baseURL}`);
|
|
82
84
|
const { data } = await this.serverClient.getModels();
|
|
83
85
|
if (!data || data.length === 0) {
|
|
84
|
-
|
|
86
|
+
logger.warn('No models loaded in llama.cpp server');
|
|
85
87
|
this.detectionAttempted = true;
|
|
86
88
|
return null;
|
|
87
89
|
}
|
|
@@ -91,15 +93,15 @@ class LlamaCppClientAdapter {
|
|
|
91
93
|
this.cachedModelCapabilities = capabilities;
|
|
92
94
|
this.detectionAttempted = true;
|
|
93
95
|
if (capabilities) {
|
|
94
|
-
|
|
96
|
+
logger.debug(`Cached model capabilities for: ${ggufFilename}`);
|
|
95
97
|
}
|
|
96
98
|
else {
|
|
97
|
-
|
|
99
|
+
logger.debug(`No known pattern matched for: ${ggufFilename}`);
|
|
98
100
|
}
|
|
99
101
|
return capabilities;
|
|
100
102
|
}
|
|
101
103
|
catch (error) {
|
|
102
|
-
|
|
104
|
+
logger.warn('Failed to detect model capabilities:', error);
|
|
103
105
|
this.detectionAttempted = true;
|
|
104
106
|
return null;
|
|
105
107
|
}
|
|
@@ -113,7 +115,7 @@ class LlamaCppClientAdapter {
|
|
|
113
115
|
clearModelCache() {
|
|
114
116
|
this.cachedModelCapabilities = null;
|
|
115
117
|
this.detectionAttempted = false;
|
|
116
|
-
|
|
118
|
+
logger.debug('Cleared model capabilities cache');
|
|
117
119
|
}
|
|
118
120
|
/**
|
|
119
121
|
* Sends a chat message to llama.cpp server
|
|
@@ -142,7 +144,7 @@ class LlamaCppClientAdapter {
|
|
|
142
144
|
}
|
|
143
145
|
}
|
|
144
146
|
catch (healthError) {
|
|
145
|
-
|
|
147
|
+
logger.warn('Health check failed, proceeding with request anyway:', healthError);
|
|
146
148
|
}
|
|
147
149
|
}
|
|
148
150
|
// Initialize OpenAI client with llama.cpp base URL
|
|
@@ -170,19 +172,19 @@ class LlamaCppClientAdapter {
|
|
|
170
172
|
presence_penalty: request.settings.presencePenalty,
|
|
171
173
|
}),
|
|
172
174
|
};
|
|
173
|
-
|
|
175
|
+
logger.debug(`llama.cpp API parameters:`, {
|
|
174
176
|
baseURL: this.baseURL,
|
|
175
177
|
model: completionParams.model,
|
|
176
178
|
temperature: completionParams.temperature,
|
|
177
179
|
max_tokens: completionParams.max_tokens,
|
|
178
180
|
top_p: completionParams.top_p,
|
|
179
181
|
});
|
|
180
|
-
|
|
182
|
+
logger.info(`Making llama.cpp API call for model: ${request.modelId}`);
|
|
181
183
|
// Make the API call
|
|
182
184
|
const completion = await openai.chat.completions.create(completionParams);
|
|
183
185
|
// Type guard to ensure we have a non-streaming response
|
|
184
186
|
if ('id' in completion && 'choices' in completion) {
|
|
185
|
-
|
|
187
|
+
logger.info(`llama.cpp API call successful, response ID: ${completion.id}`);
|
|
186
188
|
return this.createSuccessResponse(completion, request);
|
|
187
189
|
}
|
|
188
190
|
else {
|
|
@@ -190,7 +192,7 @@ class LlamaCppClientAdapter {
|
|
|
190
192
|
}
|
|
191
193
|
}
|
|
192
194
|
catch (error) {
|
|
193
|
-
|
|
195
|
+
logger.error("llama.cpp API error:", error);
|
|
194
196
|
// Clear cache on connection errors so we re-detect on next request
|
|
195
197
|
const errorMessage = error?.message || String(error);
|
|
196
198
|
if (errorMessage.includes("ECONNREFUSED") ||
|