genai-lite 0.1.1 → 0.1.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (36) hide show
  1. package/README.md +202 -1
  2. package/dist/config/presets.json +222 -0
  3. package/dist/index.d.ts +3 -0
  4. package/dist/index.js +4 -1
  5. package/dist/llm/LLMService.d.ts +25 -1
  6. package/dist/llm/LLMService.js +34 -1
  7. package/dist/llm/LLMService.presets.test.d.ts +1 -0
  8. package/dist/llm/LLMService.presets.test.js +210 -0
  9. package/dist/llm/LLMService.test.d.ts +1 -0
  10. package/dist/llm/LLMService.test.js +279 -0
  11. package/dist/llm/clients/AnthropicClientAdapter.test.d.ts +1 -0
  12. package/dist/llm/clients/AnthropicClientAdapter.test.js +263 -0
  13. package/dist/llm/clients/GeminiClientAdapter.test.d.ts +1 -0
  14. package/dist/llm/clients/GeminiClientAdapter.test.js +281 -0
  15. package/dist/llm/clients/MockClientAdapter.test.d.ts +1 -0
  16. package/dist/llm/clients/MockClientAdapter.test.js +240 -0
  17. package/dist/llm/clients/OpenAIClientAdapter.test.d.ts +1 -0
  18. package/dist/llm/clients/OpenAIClientAdapter.test.js +248 -0
  19. package/dist/llm/clients/adapterErrorUtils.test.d.ts +1 -0
  20. package/dist/llm/clients/adapterErrorUtils.test.js +123 -0
  21. package/dist/llm/config.test.d.ts +1 -0
  22. package/dist/llm/config.test.js +159 -0
  23. package/dist/providers/fromEnvironment.test.d.ts +1 -0
  24. package/dist/providers/fromEnvironment.test.js +46 -0
  25. package/dist/types/presets.d.ts +19 -0
  26. package/dist/types/presets.js +2 -0
  27. package/dist/utils/index.d.ts +1 -0
  28. package/dist/utils/index.js +1 -0
  29. package/dist/utils/prompt.test.d.ts +1 -0
  30. package/dist/utils/prompt.test.js +115 -0
  31. package/dist/utils/templateEngine.d.ts +15 -0
  32. package/dist/utils/templateEngine.js +194 -0
  33. package/dist/utils/templateEngine.test.d.ts +1 -0
  34. package/dist/utils/templateEngine.test.js +134 -0
  35. package/package.json +9 -4
  36. package/src/config/presets.json +222 -0
package/README.md CHANGED
@@ -10,6 +10,8 @@ A lightweight, portable Node.js/TypeScript library providing a unified interface
10
10
  - 🎯 **TypeScript First** - Full type safety and IntelliSense support
11
11
  - ⚡ **Lightweight** - Minimal dependencies, focused functionality
12
12
  - 🛡️ **Provider Normalization** - Consistent responses across different AI APIs
13
+ - 🎨 **Configurable Model Presets** - Built-in presets with full customization options
14
+ - 🎭 **Template Engine** - Sophisticated templating with conditionals and variable substitution
13
15
 
14
16
  ## Installation
15
17
 
@@ -135,6 +137,80 @@ const providers = await llmService.getProviders();
135
137
 
136
138
  // Get models for a specific provider
137
139
  const models = await llmService.getModels('anthropic');
140
+
141
+ // Get configured model presets
142
+ const presets = llmService.getPresets();
143
+ ```
144
+
145
+ ### Model Presets
146
+
147
+ genai-lite includes a built-in set of model presets for common use cases. You can use these defaults, extend them with your own, or replace them entirely.
148
+
149
+ #### Using Default Presets
150
+
151
+ ```typescript
152
+ const llmService = new LLMService(fromEnvironment);
153
+
154
+ // Get all default presets
155
+ const presets = llmService.getPresets();
156
+ // Returns presets like:
157
+ // - anthropic-claude-3-5-sonnet-20241022-default
158
+ // - openai-gpt-4.1-default
159
+ // - google-gemini-2.5-pro
160
+ // ... and more
161
+ ```
162
+
163
+ #### Extending Default Presets
164
+
165
+ ```typescript
166
+ import { LLMService, fromEnvironment, ModelPreset } from 'genai-lite';
167
+
168
+ const customPresets: ModelPreset[] = [
169
+ {
170
+ id: 'my-creative-preset',
171
+ displayName: 'Creative Writing Assistant',
172
+ providerId: 'openai',
173
+ modelId: 'gpt-4.1',
174
+ settings: {
175
+ temperature: 0.9,
176
+ maxTokens: 2000,
177
+ topP: 0.95
178
+ }
179
+ }
180
+ ];
181
+
182
+ const llmService = new LLMService(fromEnvironment, {
183
+ presets: customPresets,
184
+ presetMode: 'extend' // Default behavior - adds to existing presets
185
+ });
186
+ ```
187
+
188
+ #### Replacing Default Presets
189
+
190
+ For applications that need full control over available presets:
191
+
192
+ ```typescript
193
+ const applicationPresets: ModelPreset[] = [
194
+ {
195
+ id: 'app-gpt4-default',
196
+ displayName: 'GPT-4 Standard',
197
+ providerId: 'openai',
198
+ modelId: 'gpt-4.1',
199
+ settings: { temperature: 0.7 }
200
+ },
201
+ {
202
+ id: 'app-claude-creative',
203
+ displayName: 'Claude Creative',
204
+ providerId: 'anthropic',
205
+ modelId: 'claude-3-5-sonnet-20241022',
206
+ settings: { temperature: 0.8, maxTokens: 4000 }
207
+ }
208
+ ];
209
+
210
+ const llmService = new LLMService(fromEnvironment, {
211
+ presets: applicationPresets,
212
+ presetMode: 'replace' // Use ONLY these presets, ignore defaults
213
+ });
138
214
  ```
139
215
 
140
216
  ### Error Handling
@@ -211,7 +287,10 @@ import type {
211
287
  LLMResponse,
212
288
  LLMFailureResponse,
213
289
  LLMSettings,
214
- ApiKeyProvider
290
+ ApiKeyProvider,
291
+ ModelPreset,
292
+ LLMServiceOptions,
293
+ PresetMode
215
294
  } from 'genai-lite';
216
295
  ```
217
296
 
@@ -310,6 +389,108 @@ const response = await llm.sendMessage({
310
389
  });
311
390
  ```
312
391
 
392
+ ### Template Engine
393
+
394
+ Generate dynamic prompts and content using the built-in template engine that supports variable substitution and conditional logic:
395
+
396
+ ```typescript
397
+ import { renderTemplate } from 'genai-lite/utils';
398
+
399
+ // Simple variable substitution
400
+ const greeting = renderTemplate('Hello, {{ name }}!', { name: 'World' });
401
+ // Result: "Hello, World!"
402
+
403
+ // Conditional rendering with ternary syntax
404
+ const prompt = renderTemplate(
405
+ 'Analyze this {{ language }} code:\n{{ hasContext ? `Context: {{context}}\n` : `` }}```\n{{ code }}\n```',
406
+ {
407
+ language: 'TypeScript',
408
+ hasContext: true,
409
+ context: 'React component for user authentication',
410
+ code: 'export const Login = () => { ... }'
411
+ }
412
+ );
413
+ // Result includes the context line when hasContext is true
414
+
415
+ // Complex template with multiple conditionals
416
+ const complexTemplate = `
417
+ System: You are a {{ role }} assistant.
418
+ {{ hasExpertise ? `Expertise: {{expertise}}` : `General knowledge assistant` }}
419
+
420
+ Task: {{ task }}
421
+ {{ hasFiles ? `
422
+ Files to analyze:
423
+ {{ fileList }}` : `` }}
424
+ {{ requiresOutput ? `
425
+ Expected output format:
426
+ {{ outputFormat }}` : `` }}
427
+ `;
428
+
429
+ const result = renderTemplate(complexTemplate, {
430
+ role: 'coding',
431
+ hasExpertise: true,
432
+ expertise: 'TypeScript, React, Node.js',
433
+ task: 'Review the code for best practices',
434
+ hasFiles: true,
435
+ fileList: '- src/index.ts\n- src/utils.ts',
436
+ requiresOutput: false
437
+ });
438
+ ```
439
+
440
+ Template syntax supports:
441
+ - **Simple substitution**: `{{ variableName }}`
442
+ - **Ternary conditionals**: `{{ condition ? `true result` : `false result` }}`
443
+ - **Nested variables**: `{{ show ? `Name: {{name}}` : `Anonymous` }}`
444
+ - **Multi-line strings**: Use backticks to preserve formatting
445
+ - **Intelligent newline handling**: Empty results remove trailing newlines
446
+
447
+ ### Example: Building Dynamic LLM Prompts
448
+
449
+ Combine the template engine with other utilities for powerful prompt generation:
450
+
451
+ ```typescript
452
+ import { LLMService, fromEnvironment } from 'genai-lite';
453
+ import { renderTemplate, countTokens } from 'genai-lite/utils';
454
+
455
+ const llm = new LLMService(fromEnvironment);
456
+
457
+ // Define a reusable prompt template
458
+ const codeReviewTemplate = `
459
+ You are an expert {{ language }} developer.
460
+
461
+ {{ hasGuidelines ? `Follow these coding guidelines:
462
+ {{ guidelines }}
463
+
464
+ ` : `` }}Review the following code:
465
+ \`\`\`{{ language }}
466
+ {{ code }}
467
+ \`\`\`
468
+
469
+ {{ hasFocus ? `Focus on: {{ focusAreas }}` : `Provide a comprehensive review covering all aspects.` }}
470
+ `;
471
+
472
+ // Render the prompt with specific values
473
+ const prompt = renderTemplate(codeReviewTemplate, {
474
+ language: 'TypeScript',
475
+ hasGuidelines: true,
476
+ guidelines: '- Use functional components\n- Prefer composition over inheritance',
477
+ code: sourceCode,
478
+ hasFocus: true,
479
+ focusAreas: 'performance optimizations and error handling'
480
+ });
481
+
482
+ // Check token count before sending
483
+ const tokenCount = countTokens(prompt, 'gpt-4.1-mini');
484
+ console.log(`Prompt uses ${tokenCount} tokens`);
485
+
486
+ // Send to LLM
487
+ const response = await llm.sendMessage({
488
+ providerId: 'openai',
489
+ modelId: 'gpt-4.1-mini',
490
+ messages: [{ role: 'user', content: prompt }]
491
+ });
492
+ ```
493
+
313
494
  ## Contributing
314
495
 
315
496
  Contributions are welcome! Please feel free to submit a Pull Request. For major changes, please open an issue first to discuss what you would like to change.
@@ -327,6 +508,26 @@ npm run build
327
508
  npm test
328
509
  ```
329
510
 
511
+ ### End-to-End Testing
512
+
513
+ The project includes an end-to-end test suite that makes real API calls to providers. These tests are separate from the main unit test suite and are not run in CI by default.
514
+
515
+ To run these tests locally, you must first provide API keys as environment variables with the `E2E_` prefix:
516
+
517
+ ```bash
518
+ export E2E_OPENAI_API_KEY="sk-..."
519
+ export E2E_ANTHROPIC_API_KEY="sk-ant-..."
520
+ export E2E_GEMINI_API_KEY="AIza..."
521
+ ```
522
+
523
+ Then, run the E2E test script:
524
+
525
+ ```bash
526
+ npm run test:e2e
527
+ ```
528
+
529
+ The tests will automatically skip any provider for which an API key is not found.
530
+
330
531
  ## License
331
532
 
332
533
  This project is licensed under the MIT License - see the LICENSE file for details.
@@ -0,0 +1,222 @@
1
+ [
2
+ {
3
+ "id": "anthropic-claude-sonnet-4-20250514-default",
4
+ "displayName": "Anthropic - Claude Sonnet 4",
5
+ "description": "Default preset for Claude Sonnet 4.",
6
+ "providerId": "anthropic",
7
+ "modelId": "claude-sonnet-4-20250514",
8
+ "settings": {
9
+ "temperature": 0.3
10
+ }
11
+ },
12
+ {
13
+ "id": "anthropic-claude-opus-4-20250514-default",
14
+ "displayName": "Anthropic - Claude Opus 4",
15
+ "description": "Default preset for Claude Opus 4.",
16
+ "providerId": "anthropic",
17
+ "modelId": "claude-opus-4-20250514",
18
+ "settings": {
19
+ "temperature": 0.3
20
+ }
21
+ },
22
+ {
23
+ "id": "anthropic-claude-3-7-sonnet-20250219-default",
24
+ "displayName": "Anthropic - Claude 3.7 Sonnet",
25
+ "description": "Default preset for Claude 3.7 Sonnet.",
26
+ "providerId": "anthropic",
27
+ "modelId": "claude-3-7-sonnet-20250219",
28
+ "settings": {
29
+ "temperature": 0.3
30
+ }
31
+ },
32
+ {
33
+ "id": "anthropic-claude-3-5-sonnet-20241022-default",
34
+ "displayName": "Anthropic - Claude 3.5 Sonnet",
35
+ "description": "Default preset for Claude 3.5 Sonnet.",
36
+ "providerId": "anthropic",
37
+ "modelId": "claude-3-5-sonnet-20241022",
38
+ "settings": {
39
+ "temperature": 0.3
40
+ }
41
+ },
42
+ {
43
+ "id": "anthropic-claude-3-5-haiku-20241022-default",
44
+ "displayName": "Anthropic - Claude 3.5 Haiku",
45
+ "description": "Default preset for Claude 3.5 Haiku.",
46
+ "providerId": "anthropic",
47
+ "modelId": "claude-3-5-haiku-20241022",
48
+ "settings": {
49
+ "temperature": 0.3
50
+ }
51
+ },
52
+ {
53
+ "id": "google-gemini-2.5-pro",
54
+ "displayName": "Google - Gemini 2.5 Pro",
55
+ "description": "Default preset for Gemini 2.5 Pro.",
56
+ "providerId": "gemini",
57
+ "modelId": "gemini-2.5-pro",
58
+ "settings": {
59
+ "temperature": 0.3,
60
+ "geminiSafetySettings": [
61
+ { "category": "HARM_CATEGORY_HATE_SPEECH", "threshold": "BLOCK_NONE" },
62
+ {
63
+ "category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
64
+ "threshold": "BLOCK_NONE"
65
+ },
66
+ {
67
+ "category": "HARM_CATEGORY_DANGEROUS_CONTENT",
68
+ "threshold": "BLOCK_NONE"
69
+ },
70
+ { "category": "HARM_CATEGORY_HARASSMENT", "threshold": "BLOCK_NONE" }
71
+ ]
72
+ }
73
+ },
74
+ {
75
+ "id": "google-gemini-2.5-flash",
76
+ "displayName": "Google - Gemini 2.5 Flash",
77
+ "description": "Default preset for Gemini 2.5 Flash.",
78
+ "providerId": "gemini",
79
+ "modelId": "gemini-2.5-flash",
80
+ "settings": {
81
+ "temperature": 0.3,
82
+ "geminiSafetySettings": [
83
+ { "category": "HARM_CATEGORY_HATE_SPEECH", "threshold": "BLOCK_NONE" },
84
+ {
85
+ "category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
86
+ "threshold": "BLOCK_NONE"
87
+ },
88
+ {
89
+ "category": "HARM_CATEGORY_DANGEROUS_CONTENT",
90
+ "threshold": "BLOCK_NONE"
91
+ },
92
+ { "category": "HARM_CATEGORY_HARASSMENT", "threshold": "BLOCK_NONE" }
93
+ ]
94
+ }
95
+ },
96
+ {
97
+ "id": "google-gemini-2.5-flash-lite-preview",
98
+ "displayName": "Google - Gemini 2.5 Flash-Lite Preview",
99
+ "description": "Default preset for Gemini 2.5 Flash-Lite.",
100
+ "providerId": "gemini",
101
+ "modelId": "gemini-2.5-flash-lite-preview-06-17",
102
+ "settings": {
103
+ "temperature": 0.3,
104
+ "geminiSafetySettings": [
105
+ { "category": "HARM_CATEGORY_HATE_SPEECH", "threshold": "BLOCK_NONE" },
106
+ {
107
+ "category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
108
+ "threshold": "BLOCK_NONE"
109
+ },
110
+ {
111
+ "category": "HARM_CATEGORY_DANGEROUS_CONTENT",
112
+ "threshold": "BLOCK_NONE"
113
+ },
114
+ { "category": "HARM_CATEGORY_HARASSMENT", "threshold": "BLOCK_NONE" }
115
+ ]
116
+ }
117
+ },
118
+ {
119
+ "id": "google-gemini-2.0-flash-default",
120
+ "displayName": "Google - Gemini 2.0 Flash",
121
+ "description": "Default preset for Gemini 2.0 Flash.",
122
+ "providerId": "gemini",
123
+ "modelId": "gemini-2.0-flash",
124
+ "settings": {
125
+ "temperature": 0.3,
126
+ "geminiSafetySettings": [
127
+ { "category": "HARM_CATEGORY_HATE_SPEECH", "threshold": "BLOCK_NONE" },
128
+ {
129
+ "category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
130
+ "threshold": "BLOCK_NONE"
131
+ },
132
+ {
133
+ "category": "HARM_CATEGORY_DANGEROUS_CONTENT",
134
+ "threshold": "BLOCK_NONE"
135
+ },
136
+ { "category": "HARM_CATEGORY_HARASSMENT", "threshold": "BLOCK_NONE" }
137
+ ]
138
+ }
139
+ },
140
+ {
141
+ "id": "google-gemini-2.0-flash-lite-default",
142
+ "displayName": "Google - Gemini 2.0 Flash Lite",
143
+ "description": "Default preset for Gemini 2.0 Flash Lite.",
144
+ "providerId": "gemini",
145
+ "modelId": "gemini-2.0-flash-lite",
146
+ "settings": {
147
+ "temperature": 0.3,
148
+ "geminiSafetySettings": [
149
+ { "category": "HARM_CATEGORY_HATE_SPEECH", "threshold": "BLOCK_NONE" },
150
+ {
151
+ "category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
152
+ "threshold": "BLOCK_NONE"
153
+ },
154
+ {
155
+ "category": "HARM_CATEGORY_DANGEROUS_CONTENT",
156
+ "threshold": "BLOCK_NONE"
157
+ },
158
+ { "category": "HARM_CATEGORY_HARASSMENT", "threshold": "BLOCK_NONE" }
159
+ ]
160
+ }
161
+ },
162
+ {
163
+ "id": "openai-o4-mini-default",
164
+ "displayName": "OpenAI - o4-mini",
165
+ "description": "Default preset for o4-mini.",
166
+ "providerId": "openai",
167
+ "modelId": "o4-mini",
168
+ "settings": {
169
+ "temperature": 1.0
170
+ }
171
+ },
172
+ {
173
+ "id": "openai-gpt-4.1-default",
174
+ "displayName": "OpenAI - GPT-4.1",
175
+ "description": "Default preset for GPT-4.1.",
176
+ "providerId": "openai",
177
+ "modelId": "gpt-4.1",
178
+ "settings": {
179
+ "temperature": 0.3
180
+ }
181
+ },
182
+ {
183
+ "id": "openai-gpt-4.1-mini-default",
184
+ "displayName": "OpenAI - GPT-4.1 Mini",
185
+ "description": "Default preset for GPT-4.1 Mini.",
186
+ "providerId": "openai",
187
+ "modelId": "gpt-4.1-mini",
188
+ "settings": {
189
+ "temperature": 0.3
190
+ }
191
+ },
192
+ {
193
+ "id": "openai-gpt-4.1-nano-default",
194
+ "displayName": "OpenAI - GPT-4.1 Nano",
195
+ "description": "Default preset for GPT-4.1 Nano.",
196
+ "providerId": "openai",
197
+ "modelId": "gpt-4.1-nano",
198
+ "settings": {
199
+ "temperature": 0.3
200
+ }
201
+ },
202
+ {
203
+ "id": "mistral-codestral-2501-default",
204
+ "displayName": "Mistral AI - Codestral",
205
+ "description": "Default preset for Codestral.",
206
+ "providerId": "mistral",
207
+ "modelId": "codestral-2501",
208
+ "settings": {
209
+ "temperature": 0.3
210
+ }
211
+ },
212
+ {
213
+ "id": "mistral-devstral-small-2505-default",
214
+ "displayName": "Mistral AI - Devstral Small",
215
+ "description": "Default preset for Devstral Small.",
216
+ "providerId": "mistral",
217
+ "modelId": "devstral-small-2505",
218
+ "settings": {
219
+ "temperature": 0.3
220
+ }
221
+ }
222
+ ]
package/dist/index.d.ts CHANGED
@@ -1,5 +1,8 @@
1
1
  export type { ApiKeyProvider } from "./types";
2
2
  export { LLMService } from "./llm/LLMService";
3
+ export type { LLMServiceOptions, PresetMode } from "./llm/LLMService";
4
+ export type { ModelPreset } from "./types/presets";
3
5
  export * from "./llm/types";
4
6
  export * from "./llm/clients/types";
5
7
  export { fromEnvironment } from "./providers/fromEnvironment";
8
+ export { renderTemplate } from "./utils/templateEngine";
package/dist/index.js CHANGED
@@ -14,7 +14,7 @@ var __exportStar = (this && this.__exportStar) || function(m, exports) {
14
14
  for (var p in m) if (p !== "default" && !Object.prototype.hasOwnProperty.call(exports, p)) __createBinding(exports, m, p);
15
15
  };
16
16
  Object.defineProperty(exports, "__esModule", { value: true });
17
- exports.fromEnvironment = exports.LLMService = void 0;
17
+ exports.renderTemplate = exports.fromEnvironment = exports.LLMService = void 0;
18
18
  // --- LLM Service ---
19
19
  var LLMService_1 = require("./llm/LLMService");
20
20
  Object.defineProperty(exports, "LLMService", { enumerable: true, get: function () { return LLMService_1.LLMService; } });
@@ -25,3 +25,6 @@ __exportStar(require("./llm/clients/types"), exports);
25
25
  // --- API Key Providers ---
26
26
  var fromEnvironment_1 = require("./providers/fromEnvironment");
27
27
  Object.defineProperty(exports, "fromEnvironment", { enumerable: true, get: function () { return fromEnvironment_1.fromEnvironment; } });
28
+ // --- Utilities ---
29
+ var templateEngine_1 = require("./utils/templateEngine");
30
+ Object.defineProperty(exports, "renderTemplate", { enumerable: true, get: function () { return templateEngine_1.renderTemplate; } });
@@ -1,6 +1,22 @@
1
1
  import type { ApiKeyProvider } from '../types';
2
2
  import type { LLMChatRequest, LLMResponse, LLMFailureResponse, ProviderInfo, ModelInfo, ApiProviderId } from "./types";
3
3
  import type { ILLMClientAdapter } from "./clients/types";
4
+ import type { ModelPreset } from "../types/presets";
5
+ /**
6
+ * Defines how custom presets interact with the default presets.
7
+ * 'replace': Use only the custom presets provided. The default set is ignored.
8
+ * 'extend': Use the default presets, and add/override them with the custom presets. This is the default behavior.
9
+ */
10
+ export type PresetMode = 'replace' | 'extend';
11
+ /**
12
+ * Options for configuring the LLMService
13
+ */
14
+ export interface LLMServiceOptions {
15
+ /** An array of custom presets to integrate. */
16
+ presets?: ModelPreset[];
17
+ /** The strategy for integrating custom presets. Defaults to 'extend'. */
18
+ presetMode?: PresetMode;
19
+ }
4
20
  /**
5
21
  * Main process service for LLM operations
6
22
  *
@@ -10,12 +26,14 @@ import type { ILLMClientAdapter } from "./clients/types";
10
26
  * - Validates requests and applies default settings
11
27
  * - Routes requests to appropriate provider adapters
12
28
  * - Handles errors and provides standardized responses
29
+ * - Provides configurable model presets for common use cases
13
30
  */
14
31
  export declare class LLMService {
15
32
  private getApiKey;
16
33
  private clientAdapters;
17
34
  private mockClientAdapter;
18
- constructor(getApiKey: ApiKeyProvider);
35
+ private presets;
36
+ constructor(getApiKey: ApiKeyProvider, options?: LLMServiceOptions);
19
37
  /**
20
38
  * Gets list of supported LLM providers
21
39
  *
@@ -83,4 +101,10 @@ export declare class LLMService {
83
101
  availableProviders: string[];
84
102
  unavailableProviders: string[];
85
103
  };
104
+ /**
105
+ * Gets all configured model presets
106
+ *
107
+ * @returns Array of model presets
108
+ */
109
+ getPresets(): ModelPreset[];
86
110
  }
@@ -1,10 +1,14 @@
1
1
  "use strict";
2
2
  // AI Summary: Main process service for LLM operations, integrating with ApiKeyProvider for secure key access.
3
3
  // Orchestrates LLM requests through provider-specific client adapters with proper error handling.
4
+ var __importDefault = (this && this.__importDefault) || function (mod) {
5
+ return (mod && mod.__esModule) ? mod : { "default": mod };
6
+ };
4
7
  Object.defineProperty(exports, "__esModule", { value: true });
5
8
  exports.LLMService = void 0;
6
9
  const MockClientAdapter_1 = require("./clients/MockClientAdapter");
7
10
  const config_1 = require("./config");
11
+ const presets_json_1 = __importDefault(require("../config/presets.json"));
8
12
  /**
9
13
  * Main process service for LLM operations
10
14
  *
@@ -14,12 +18,33 @@ const config_1 = require("./config");
14
18
  * - Validates requests and applies default settings
15
19
  * - Routes requests to appropriate provider adapters
16
20
  * - Handles errors and provides standardized responses
21
+ * - Provides configurable model presets for common use cases
17
22
  */
18
23
  class LLMService {
19
- constructor(getApiKey) {
24
+ constructor(getApiKey, options = {}) {
20
25
  this.getApiKey = getApiKey;
21
26
  this.clientAdapters = new Map();
22
27
  this.mockClientAdapter = new MockClientAdapter_1.MockClientAdapter();
28
+ // Initialize presets based on mode
29
+ const finalPresets = new Map();
30
+ const customPresets = options.presets || [];
31
+ const mode = options.presetMode || 'extend';
32
+ if (mode === 'replace') {
33
+ // Replace Mode: Only use custom presets.
34
+ for (const preset of customPresets) {
35
+ finalPresets.set(preset.id, preset);
36
+ }
37
+ }
38
+ else {
39
+ // Extend Mode: Load defaults first, then add/override.
40
+ for (const preset of presets_json_1.default) {
41
+ finalPresets.set(preset.id, preset);
42
+ }
43
+ for (const preset of customPresets) {
44
+ finalPresets.set(preset.id, preset);
45
+ }
46
+ }
47
+ this.presets = Array.from(finalPresets.values());
23
48
  // Dynamically register client adapters based on configuration
24
49
  let registeredCount = 0;
25
50
  const successfullyRegisteredProviders = [];
@@ -406,5 +431,13 @@ class LLMService {
406
431
  unavailableProviders,
407
432
  };
408
433
  }
434
+ /**
435
+ * Gets all configured model presets
436
+ *
437
+ * @returns Array of model presets
438
+ */
439
+ getPresets() {
440
+ return [...this.presets]; // Return a copy to prevent external modification
441
+ }
409
442
  }
410
443
  exports.LLMService = LLMService;
@@ -0,0 +1 @@
1
+ export {};